summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/ABI/README3
-rw-r--r--Documentation/ABI/obsolete/sysfs-driver-hid-roccat-koneplus38
-rw-r--r--Documentation/ABI/obsolete/sysfs-driver-hid-roccat-kovaplus66
-rw-r--r--Documentation/ABI/obsolete/sysfs-driver-hid-roccat-pyra73
-rw-r--r--Documentation/ABI/stable/sysfs-devices-node96
-rw-r--r--Documentation/ABI/stable/sysfs-driver-ib_srp156
-rw-r--r--Documentation/ABI/stable/sysfs-transport-srp19
-rw-r--r--Documentation/ABI/testing/dev-kmsg2
-rw-r--r--Documentation/ABI/testing/ima_policy3
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci34
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-node7
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power2
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-isku8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus48
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus69
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-lua7
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra76
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-savu3
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ppi2
-rw-r--r--Documentation/ABI/testing/sysfs-profiling6
-rw-r--r--Documentation/DMA-API-HOWTO.txt126
-rw-r--r--Documentation/DMA-API.txt12
-rw-r--r--Documentation/DMA-attributes.txt9
-rw-r--r--Documentation/DocBook/drm.tmpl39
-rw-r--r--Documentation/DocBook/kernel-api.tmpl3
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml7
-rw-r--r--Documentation/DocBook/media/v4l/io.xml188
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml1
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-create-bufs.xml16
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-expbuf.xml212
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-qbuf.xml17
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-reqbufs.xml47
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl85
-rw-r--r--Documentation/HOWTO2
-rw-r--r--Documentation/PCI/pci-iov-howto.txt48
-rw-r--r--Documentation/accounting/getdelays.c1
-rw-r--r--Documentation/acpi/initrd_table_override.txt94
-rw-r--r--Documentation/aoe/aoe.txt4
-rw-r--r--Documentation/arm/OMAP/DSS10
-rw-r--r--Documentation/backlight/lp855x-driver.txt10
-rw-r--r--Documentation/bus-devices/ti-gpmc.txt122
-rw-r--r--Documentation/cgroups/cpusets.txt2
-rw-r--r--Documentation/cgroups/memory.txt66
-rw-r--r--Documentation/cgroups/resource_counter.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-reset.txt11
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-system.txt11
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt20
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/coherency-fabric.txt21
-rw-r--r--Documentation/devicetree/bindings/arm/davinci/nand.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/spear/shirq.txt48
-rw-r--r--Documentation/devicetree/bindings/ata/exynos-sata-phy.txt14
-rw-r--r--Documentation/devicetree/bindings/ata/exynos-sata.txt17
-rw-r--r--Documentation/devicetree/bindings/clock/imx25-clock.txt162
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-core-clock.txt47
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt119
-rw-r--r--Documentation/devicetree/bindings/clock/zynq-7000.txt55
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt5
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor.txt40
-rw-r--r--Documentation/devicetree/bindings/drm/exynos/hdmi.txt22
-rw-r--r--Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt12
-rw-r--r--Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt12
-rw-r--r--Documentation/devicetree/bindings/drm/exynos/mixer.txt15
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-poweroff.txt22
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt191
-rw-r--r--Documentation/devicetree/bindings/hwmon/vexpress.txt23
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt27
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt81
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-ocores.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt20
-rw-r--r--Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt46
-rw-r--r--Documentation/devicetree/bindings/input/pwm-beeper.txt7
-rw-r--r--Documentation/devicetree/bindings/input/stmpe-keypad.txt39
-rw-r--r--Documentation/devicetree/bindings/input/tca8418_keypad.txt8
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/bu21013.txt28
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/mms114.txt34
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/stmpe.txt43
-rw-r--r--Documentation/devicetree/bindings/media/s5p-mfc.txt23
-rw-r--r--Documentation/devicetree/bindings/mfd/ab8500.txt27
-rw-r--r--Documentation/devicetree/bindings/mfd/stmpe.txt28
-rw-r--r--Documentation/devicetree/bindings/misc/atmel-ssc.txt15
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt23
-rw-r--r--Documentation/devicetree/bindings/mtd/flctl-nand.txt49
-rw-r--r--Documentation/devicetree/bindings/mtd/fsmc-nand.txt12
-rw-r--r--Documentation/devicetree/bindings/mtd/m25p80.txt29
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt3
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt23
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-mdio.txt35
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt39
-rw-r--r--Documentation/devicetree/bindings/power_supply/ab8500/btemp.txt16
-rw-r--r--Documentation/devicetree/bindings/power_supply/ab8500/chargalg.txt16
-rw-r--r--Documentation/devicetree/bindings/power_supply/ab8500/charger.txt25
-rw-r--r--Documentation/devicetree/bindings/power_supply/ab8500/fg.txt58
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/raideng.txt81
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.txt23
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt23
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt31
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/spear-pwm.txt18
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/vt8500-pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/regulator/gpio-regulator.txt37
-rw-r--r--Documentation/devicetree/bindings/regulator/max8925-regulator.txt40
-rw-r--r--Documentation/devicetree/bindings/regulator/max8997-regulator.txt146
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65217.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/vexpress.txt32
-rw-r--r--Documentation/devicetree/bindings/rtc/imxdi-rtc.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt19
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-omap.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/ak4104.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt26
-rw-r--r--Documentation/devicetree/bindings/sound/cs4271.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/omap-spi.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/spi-bus.txt22
-rw-r--r--Documentation/devicetree/bindings/spi/spi_atmel.txt26
-rw-r--r--Documentation/devicetree/bindings/timer/nvidia,tegra20-timer.txt21
-rw-r--r--Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt23
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-orion.txt15
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dp.txt80
-rw-r--r--Documentation/devicetree/bindings/video/ssd1307fb.txt24
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel-wdt.txt15
-rw-r--r--Documentation/devicetree/bindings/watchdog/davinci-wdt.txt12
-rw-r--r--Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt10
-rw-r--r--Documentation/dma-buf-sharing.txt4
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/fault-injection/notifier-error-inject.txt4
-rw-r--r--Documentation/filesystems/00-INDEX4
-rw-r--r--Documentation/filesystems/Locking6
-rw-r--r--Documentation/filesystems/caching/backend-api.txt38
-rw-r--r--Documentation/filesystems/caching/netfs-api.txt46
-rw-r--r--Documentation/filesystems/caching/object.txt23
-rw-r--r--Documentation/filesystems/caching/operations.txt2
-rw-r--r--Documentation/filesystems/efivarfs.txt16
-rw-r--r--Documentation/filesystems/ext4.txt9
-rw-r--r--Documentation/filesystems/f2fs.txt421
-rw-r--r--Documentation/filesystems/nfs/nfs41-server.txt20
-rw-r--r--Documentation/filesystems/porting2
-rw-r--r--Documentation/filesystems/proc.txt130
-rw-r--r--Documentation/filesystems/vfat.txt9
-rw-r--r--Documentation/filesystems/vfs.txt11
-rw-r--r--Documentation/hid/uhid.txt2
-rw-r--r--Documentation/hwmon/it8710
-rw-r--r--Documentation/hwmon/pmbus2
-rw-r--r--Documentation/hwmon/vexpress34
-rw-r--r--Documentation/i2c/smbus-protocol40
-rw-r--r--Documentation/input/alps.txt2
-rw-r--r--Documentation/input/event-codes.txt11
-rw-r--r--Documentation/kbuild/modules.txt2
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt13
-rw-r--r--Documentation/kernel-parameters.txt16
-rw-r--r--Documentation/kref.txt88
-rw-r--r--Documentation/memory-hotplug.txt5
-rw-r--r--Documentation/misc-devices/mei/mei-amt-version.c4
-rw-r--r--Documentation/power/power_supply_class.txt3
-rw-r--r--Documentation/powerpc/ptrace.txt16
-rw-r--r--Documentation/prctl/seccomp_filter.txt74
-rw-r--r--Documentation/scsi/hptiop.txt69
-rw-r--r--Documentation/security/00-INDEX2
-rw-r--r--Documentation/security/keys.txt17
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt3
-rw-r--r--Documentation/sparse.txt18
-rw-r--r--Documentation/video4linux/bttv/Cards2
-rw-r--r--Documentation/video4linux/bttv/Sound-FAQ2
-rw-r--r--Documentation/virtual/kvm/api.txt140
-rw-r--r--Documentation/vm/frontswap.txt2
-rw-r--r--Documentation/vm/transhuge.txt19
-rw-r--r--Documentation/x86/boot.txt3
-rw-r--r--Documentation/xtensa/atomctl.txt44
-rw-r--r--MAINTAINERS321
-rw-r--r--Makefile20
-rw-r--r--README4
-rw-r--r--arch/Kconfig29
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/Kbuild9
-rw-r--r--arch/alpha/include/asm/a.out.h89
-rw-r--r--arch/alpha/include/asm/compiler.h115
-rw-r--r--arch/alpha/include/asm/console.h48
-rw-r--r--arch/alpha/include/asm/fpu.h124
-rw-r--r--arch/alpha/include/asm/mmzone.h2
-rw-r--r--arch/alpha/include/asm/pal.h50
-rw-r--r--arch/alpha/include/asm/param.h20
-rw-r--r--arch/alpha/include/asm/ptrace.h69
-rw-r--r--arch/alpha/include/asm/signal.h135
-rw-r--r--arch/alpha/include/asm/socket.h79
-rw-r--r--arch/alpha/include/asm/termios.h68
-rw-r--r--arch/alpha/include/asm/types.h13
-rw-r--r--arch/alpha/include/asm/unistd.h470
-rw-r--r--arch/alpha/include/uapi/asm/Kbuild40
-rw-r--r--arch/alpha/include/uapi/asm/a.out.h91
-rw-r--r--arch/alpha/include/uapi/asm/auxvec.h (renamed from arch/alpha/include/asm/auxvec.h)0
-rw-r--r--arch/alpha/include/uapi/asm/bitsperlong.h (renamed from arch/alpha/include/asm/bitsperlong.h)0
-rw-r--r--arch/alpha/include/uapi/asm/byteorder.h (renamed from arch/alpha/include/asm/byteorder.h)0
-rw-r--r--arch/alpha/include/uapi/asm/compiler.h117
-rw-r--r--arch/alpha/include/uapi/asm/console.h50
-rw-r--r--arch/alpha/include/uapi/asm/errno.h (renamed from arch/alpha/include/asm/errno.h)0
-rw-r--r--arch/alpha/include/uapi/asm/fcntl.h (renamed from arch/alpha/include/asm/fcntl.h)0
-rw-r--r--arch/alpha/include/uapi/asm/fpu.h123
-rw-r--r--arch/alpha/include/uapi/asm/gentrap.h (renamed from arch/alpha/include/asm/gentrap.h)0
-rw-r--r--arch/alpha/include/uapi/asm/ioctl.h (renamed from arch/alpha/include/asm/ioctl.h)0
-rw-r--r--arch/alpha/include/uapi/asm/ioctls.h (renamed from arch/alpha/include/asm/ioctls.h)0
-rw-r--r--arch/alpha/include/uapi/asm/ipcbuf.h (renamed from arch/alpha/include/asm/ipcbuf.h)0
-rw-r--r--arch/alpha/include/uapi/asm/kvm_para.h (renamed from arch/alpha/include/asm/kvm_para.h)0
-rw-r--r--arch/alpha/include/uapi/asm/mman.h (renamed from arch/alpha/include/asm/mman.h)0
-rw-r--r--arch/alpha/include/uapi/asm/msgbuf.h (renamed from arch/alpha/include/asm/msgbuf.h)0
-rw-r--r--arch/alpha/include/uapi/asm/pal.h52
-rw-r--r--arch/alpha/include/uapi/asm/param.h21
-rw-r--r--arch/alpha/include/uapi/asm/poll.h (renamed from arch/alpha/include/asm/poll.h)0
-rw-r--r--arch/alpha/include/uapi/asm/posix_types.h (renamed from arch/alpha/include/asm/posix_types.h)0
-rw-r--r--arch/alpha/include/uapi/asm/ptrace.h70
-rw-r--r--arch/alpha/include/uapi/asm/reg.h (renamed from arch/alpha/include/asm/reg.h)0
-rw-r--r--arch/alpha/include/uapi/asm/regdef.h (renamed from arch/alpha/include/asm/regdef.h)0
-rw-r--r--arch/alpha/include/uapi/asm/resource.h (renamed from arch/alpha/include/asm/resource.h)0
-rw-r--r--arch/alpha/include/uapi/asm/sembuf.h (renamed from arch/alpha/include/asm/sembuf.h)0
-rw-r--r--arch/alpha/include/uapi/asm/setup.h (renamed from arch/alpha/include/asm/setup.h)0
-rw-r--r--arch/alpha/include/uapi/asm/shmbuf.h (renamed from arch/alpha/include/asm/shmbuf.h)0
-rw-r--r--arch/alpha/include/uapi/asm/sigcontext.h (renamed from arch/alpha/include/asm/sigcontext.h)0
-rw-r--r--arch/alpha/include/uapi/asm/siginfo.h (renamed from arch/alpha/include/asm/siginfo.h)0
-rw-r--r--arch/alpha/include/uapi/asm/signal.h129
-rw-r--r--arch/alpha/include/uapi/asm/socket.h81
-rw-r--r--arch/alpha/include/uapi/asm/sockios.h (renamed from arch/alpha/include/asm/sockios.h)0
-rw-r--r--arch/alpha/include/uapi/asm/stat.h (renamed from arch/alpha/include/asm/stat.h)0
-rw-r--r--arch/alpha/include/uapi/asm/statfs.h (renamed from arch/alpha/include/asm/statfs.h)0
-rw-r--r--arch/alpha/include/uapi/asm/swab.h (renamed from arch/alpha/include/asm/swab.h)0
-rw-r--r--arch/alpha/include/uapi/asm/sysinfo.h (renamed from arch/alpha/include/asm/sysinfo.h)0
-rw-r--r--arch/alpha/include/uapi/asm/termbits.h (renamed from arch/alpha/include/asm/termbits.h)0
-rw-r--r--arch/alpha/include/uapi/asm/termios.h70
-rw-r--r--arch/alpha/include/uapi/asm/types.h16
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h471
-rw-r--r--arch/alpha/kernel/pci_iommu.c12
-rw-r--r--arch/alpha/kernel/signal.c10
-rw-r--r--arch/arm/Kconfig30
-rw-r--r--arch/arm/Kconfig.debug82
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/boot/dts/Makefile26
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts25
-rw-r--r--arch/arm/boot/dts/armada-370-mirabox.dts56
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi63
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi57
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts44
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi12
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi19
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78460.dtsi34
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts125
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi91
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi14
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi22
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi32
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi22
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi8
-rw-r--r--arch/arm/boot/dts/ccu9540.dts72
-rw-r--r--arch/arm/boot/dts/cros5250-common.dtsi184
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi84
-rw-r--r--arch/arm/boot/dts/dove-cubox.dts10
-rw-r--r--arch/arm/boot/dts/dove.dtsi81
-rw-r--r--arch/arm/boot/dts/evk-pro3.dts12
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi54
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts70
-rw-r--r--arch/arm/boot/dts/exynos4210-pinctrl.dtsi56
-rw-r--r--arch/arm/boot/dts/exynos4210-smdkv310.dts16
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts87
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos4212.dtsi28
-rw-r--r--arch/arm/boot/dts/exynos4412-smdk4412.dts45
-rw-r--r--arch/arm/boot/dts/exynos4412.dtsi28
-rw-r--r--arch/arm/boot/dts/exynos4x12-pinctrl.dtsi965
-rw-r--r--arch/arm/boot/dts/exynos4x12.dtsi69
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts56
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts43
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi87
-rw-r--r--arch/arm/boot/dts/href.dtsi273
-rw-r--r--arch/arm/boot/dts/hrefprev60.dts48
-rw-r--r--arch/arm/boot/dts/hrefv60plus.dts217
-rw-r--r--arch/arm/boot/dts/imx23-olinuxino.dts23
-rw-r--r--arch/arm/boot/dts/imx23.dtsi13
-rw-r--r--arch/arm/boot/dts/imx25-karo-tx25.dts44
-rw-r--r--arch/arm/boot/dts/imx25.dtsi515
-rw-r--r--arch/arm/boot/dts/imx27-3ds.dts8
-rw-r--r--arch/arm/boot/dts/imx27-apf27.dts89
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycore.dts13
-rw-r--r--arch/arm/boot/dts/imx27.dtsi16
-rw-r--r--arch/arm/boot/dts/imx28-apf28.dts85
-rw-r--r--arch/arm/boot/dts/imx28-apf28dev.dts154
-rw-r--r--arch/arm/boot/dts/imx28-cfa10036.dts46
-rw-r--r--arch/arm/boot/dts/imx28-cfa10049.dts57
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts13
-rw-r--r--arch/arm/boot/dts/imx28-sps1.dts169
-rw-r--r--arch/arm/boot/dts/imx28.dtsi35
-rw-r--r--arch/arm/boot/dts/imx51.dtsi43
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts62
-rw-r--r--arch/arm/boot/dts/imx53.dtsi92
-rw-r--r--arch/arm/boot/dts/imx6q-sabreauto.dts64
-rw-r--r--arch/arm/boot/dts/imx6q-sabresd.dts18
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi114
-rw-r--r--arch/arm/boot/dts/kirkwood-6281.dtsi44
-rw-r--r--arch/arm/boot/dts/kirkwood-6282.dtsi45
-rw-r--r--arch/arm/boot/dts/kirkwood-98dx4122.dtsi31
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi140
-rw-r--r--arch/arm/boot/dts/kirkwood-dockstar.dts37
-rw-r--r--arch/arm/boot/dts/kirkwood-dreamplug.dts21
-rw-r--r--arch/arm/boot/dts/kirkwood-goflexnet.dts73
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts40
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts51
-rw-r--r--arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts89
-rw-r--r--arch/arm/boot/dts/kirkwood-km_kirkwood.dts17
-rw-r--r--arch/arm/boot/dts/kirkwood-lsxl.dtsi95
-rw-r--r--arch/arm/boot/dts/kirkwood-mplcec4.dts59
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a6.dts59
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219-6281.dts31
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219-6282.dts31
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi79
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi7
-rw-r--r--arch/arm/boot/dts/omap2420-h4.dts2
-rw-r--r--arch/arm/boot/dts/snowball.dts140
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi10
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts165
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi32
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts253
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi61
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi82
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts20
-rw-r--r--arch/arm/boot/dts/spear300.dtsi22
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts30
-rw-r--r--arch/arm/boot/dts/spear310.dtsi26
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts35
-rw-r--r--arch/arm/boot/dts/spear320-hmi.dts305
-rw-r--r--arch/arm/boot/dts/spear320.dtsi47
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi5
-rw-r--r--arch/arm/boot/dts/spear600-evb.dts46
-rw-r--r--arch/arm/boot/dts/spear600.dtsi24
-rw-r--r--arch/arm/boot/dts/stuib.dtsi78
-rw-r--r--arch/arm/boot/dts/sun4i-a10-cubieboard.dts (renamed from arch/arm/boot/dts/sun4i-cubieboard.dts)4
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi (renamed from arch/arm/boot/dts/sun4i.dtsi)0
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts (renamed from arch/arm/boot/dts/sun5i-olinuxino.dts)4
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi (renamed from arch/arm/boot/dts/sun5i.dtsi)0
-rw-r--r--arch/arm/boot/dts/tegra20-harmony.dts25
-rw-r--r--arch/arm/boot/dts/tegra20-plutux.dts6
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts6
-rw-r--r--arch/arm/boot/dts/tegra20-tamonten.dtsi88
-rw-r--r--arch/arm/boot/dts/tegra20-tec.dts9
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts54
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts90
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts21
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi158
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu-a02.dts6
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu-a04.dts6
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi37
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi170
-rw-r--r--arch/arm/boot/dts/twl4030.dtsi4
-rw-r--r--arch/arm/boot/dts/u9540.dts72
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi166
-rw-r--r--arch/arm/boot/dts/zynq-ep107.dts63
-rw-r--r--arch/arm/boot/dts/zynq-zc702.dts44
-rw-r--r--arch/arm/configs/marzen_defconfig14
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/configs/mvebu_defconfig17
-rw-r--r--arch/arm/configs/mxs_defconfig6
-rw-r--r--arch/arm/configs/nhk8815_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig5
-rw-r--r--arch/arm/configs/socfpga_defconfig3
-rw-r--r--arch/arm/configs/u8500_defconfig1
-rw-r--r--arch/arm/include/asm/dma-mapping.h3
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/include/asm/xen/interface.h1
-rw-r--r--arch/arm/include/debug/imx.S20
-rw-r--r--arch/arm/include/debug/tegra.S223
-rw-r--r--arch/arm/include/debug/zynq.S (renamed from arch/arm/mach-zynq/include/mach/debug-macro.S)23
-rw-r--r--arch/arm/include/uapi/asm/signal.h7
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/kprobes-test.c2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm/mach-at91/at91rm9200.c9
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9260.c3
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261.c9
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c4
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c6
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c4
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c4
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c1
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c11
-rw-r--r--arch/arm/mach-davinci/Makefile.boot2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c25
-rw-r--r--arch/arm/mach-davinci/da830.c2
-rw-r--r--arch/arm/mach-davinci/da850.c2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c35
-rw-r--r--arch/arm/mach-davinci/pm_domain.c1
-rw-r--r--arch/arm/mach-dove/Kconfig2
-rw-r--r--arch/arm/mach-dove/common.c62
-rw-r--r--arch/arm/mach-ep93xx/include/mach/uncompress.h10
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/Makefile1
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c8
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c49
-rw-r--r--arch/arm/mach-exynos/common.c12
-rw-r--r--arch/arm/mach-exynos/common.h2
-rw-r--r--arch/arm/mach-exynos/cpuidle.c39
-rw-r--r--arch/arm/mach-exynos/dev-audio.c12
-rw-r--r--arch/arm/mach-exynos/dev-drm.c29
-rw-r--r--arch/arm/mach-exynos/hotplug.c45
-rw-r--r--arch/arm/mach-exynos/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-clock.h19
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-pmu.h3
-rw-r--r--arch/arm/mach-exynos/mach-armlex4210.c1
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c4
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c67
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c3
-rw-r--r--arch/arm/mach-exynos/mach-origen.c7
-rw-r--r--arch/arm/mach-exynos/mach-smdk4x12.c7
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c8
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c3
-rw-r--r--arch/arm/mach-exynos/platsmp.c2
-rw-r--r--arch/arm/mach-exynos/pm.c16
-rw-r--r--arch/arm/mach-exynos/pm_domains.c93
-rw-r--r--arch/arm/mach-imx/Kconfig18
-rw-r--r--arch/arm/mach-imx/Makefile1
-rw-r--r--arch/arm/mach-imx/clk-imx25.c119
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c16
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c1
-rw-r--r--arch/arm/mach-imx/common.h1
-rw-r--r--arch/arm/mach-imx/devices/platform-mx2-emma.c (renamed from arch/arm/plat-mxc/devices/platform-mx2-emma.c)4
-rw-r--r--arch/arm/mach-imx/imx25-dt.c48
-rw-r--r--arch/arm/mach-imx/lluart.c28
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c35
-rw-r--r--arch/arm/mach-imx/mx6q.h4
-rw-r--r--arch/arm/mach-kirkwood/Kconfig7
-rw-r--r--arch/arm/mach-kirkwood/board-dnskw.c54
-rw-r--r--arch/arm/mach-kirkwood/board-dockstar.c16
-rw-r--r--arch/arm/mach-kirkwood/board-dreamplug.c15
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c69
-rw-r--r--arch/arm/mach-kirkwood/board-goflexnet.c24
-rw-r--r--arch/arm/mach-kirkwood/board-ib62x0.c35
-rw-r--r--arch/arm/mach-kirkwood/board-iconnect.c18
-rw-r--r--arch/arm/mach-kirkwood/board-iomega_ix2_200.c26
-rw-r--r--arch/arm/mach-kirkwood/board-km_kirkwood.c13
-rw-r--r--arch/arm/mach-kirkwood/board-lsxl.c28
-rw-r--r--arch/arm/mach-kirkwood/board-mplcec4.c36
-rw-r--r--arch/arm/mach-kirkwood/board-ns2.c1
-rw-r--r--arch/arm/mach-kirkwood/board-nsa310.c4
-rw-r--r--arch/arm/mach-kirkwood/board-openblocks_a6.c5
-rw-r--r--arch/arm/mach-kirkwood/board-ts219.c26
-rw-r--r--arch/arm/mach-kirkwood/board-usi_topkick.c1
-rw-r--r--arch/arm/mach-kirkwood/common.c4
-rw-r--r--arch/arm/mach-mvebu/Kconfig8
-rw-r--r--arch/arm/mach-mvebu/Makefile4
-rw-r--r--arch/arm/mach-mvebu/addr-map.c5
-rw-r--r--arch/arm/mach-mvebu/armada-370-xp.c33
-rw-r--r--arch/arm/mach-mvebu/armada-370-xp.h7
-rw-r--r--arch/arm/mach-mvebu/coherency.c155
-rw-r--r--arch/arm/mach-mvebu/coherency.h24
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S49
-rw-r--r--arch/arm/mach-mvebu/common.h5
-rw-r--r--arch/arm/mach-mvebu/headsmp.S49
-rw-r--r--arch/arm/mach-mvebu/hotplug.c30
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c96
-rw-r--r--arch/arm/mach-mvebu/platsmp.c122
-rw-r--r--arch/arm/mach-mvebu/pmsu.c75
-rw-r--r--arch/arm/mach-mvebu/pmsu.h16
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c58
-rw-r--r--arch/arm/mach-mxs/timer.c10
-rw-r--r--arch/arm/mach-netx/xc.c2
-rw-r--r--arch/arm/mach-nomadik/Kconfig2
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c81
-rw-r--r--arch/arm/mach-nomadik/include/mach/fsmc.h29
-rw-r--r--arch/arm/mach-nomadik/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-omap1/Makefile2
-rw-r--r--arch/arm/mach-omap1/board-h2.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c14
-rw-r--r--arch/arm/mach-omap1/board-palmte.c2
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c2
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c2
-rw-r--r--arch/arm/mach-omap1/board-sx1.c2
-rw-r--r--arch/arm/mach-omap1/dma.c2
-rw-r--r--arch/arm/mach-omap1/fb.c80
-rw-r--r--arch/arm/mach-omap1/io.c2
-rw-r--r--arch/arm/mach-omap1/lcd_dma.c2
-rw-r--r--arch/arm/mach-omap1/mcbsp.c2
-rw-r--r--arch/arm/mach-omap1/pm.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig8
-rw-r--r--arch/arm/mach-omap2/Makefile12
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c3
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c1
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c1
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c1
-rw-r--r--arch/arm/mach-omap2/board-h4.c83
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c42
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c1
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c8
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c14
-rw-r--r--arch/arm/mach-omap2/board-rx51.c5
-rw-r--r--arch/arm/mach-omap2/cclock2420_data.c1950
-rw-r--r--arch/arm/mach-omap2/cclock2430_data.c2065
-rw-r--r--arch/arm/mach-omap2/cclock33xx_data.c961
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c3595
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c2039
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_apll.c62
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_dpll.c8
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_dpllcore.c13
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_osc.c13
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_sys.c7
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c9
-rw-r--r--arch/arm/mach-omap2/clkt34xx_dpll3m2.c8
-rw-r--r--arch/arm/mach-omap2/clkt_clksel.c194
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c28
-rw-r--r--arch/arm/mach-omap2/clkt_iclk.c30
-rw-r--r--arch/arm/mach-omap2/clock.c910
-rw-r--r--arch/arm/mach-omap2/clock.h339
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c1972
-rw-r--r--arch/arm/mach-omap2/clock2430.c8
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c2071
-rw-r--r--arch/arm/mach-omap2/clock2xxx.c1
-rw-r--r--arch/arm/mach-omap2/clock2xxx.h41
-rw-r--r--arch/arm/mach-omap2/clock33xx_data.c1109
-rw-r--r--arch/arm/mach-omap2/clock34xx.c51
-rw-r--r--arch/arm/mach-omap2/clock3517.c24
-rw-r--r--arch/arm/mach-omap2/clock36xx.c22
-rw-r--r--arch/arm/mach-omap2/clock36xx.h2
-rw-r--r--arch/arm/mach-omap2/clock3xxx.c6
-rw-r--r--arch/arm/mach-omap2/clock3xxx.h6
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c3613
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c3398
-rw-r--r--arch/arm/mach-omap2/clock_common_data.c22
-rw-r--r--arch/arm/mach-omap2/clockdomain.c90
-rw-r--r--arch/arm/mach-omap2/cm-regbits-24xx.h5
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h31
-rw-r--r--arch/arm/mach-omap2/cm2xxx_3xxx.h1
-rw-r--r--arch/arm/mach-omap2/common.c3
-rw-r--r--arch/arm/mach-omap2/control.h3
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c14
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c28
-rw-r--r--arch/arm/mach-omap2/devices.c14
-rw-r--r--arch/arm/mach-omap2/display.c15
-rw-r--r--arch/arm/mach-omap2/dma.c2
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c226
-rw-r--r--arch/arm/mach-omap2/dpll44xx.c87
-rw-r--r--arch/arm/mach-omap2/drm.c1
-rw-r--r--arch/arm/mach-omap2/dss-common.c3
-rw-r--r--arch/arm/mach-omap2/fb.c (renamed from arch/arm/plat-omap/fb.c)53
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c26
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c143
-rw-r--r--arch/arm/mach-omap2/gpmc-smc91x.c43
-rw-r--r--arch/arm/mach-omap2/gpmc.c373
-rw-r--r--arch/arm/mach-omap2/gpmc.h113
-rw-r--r--arch/arm/mach-omap2/i2c.c20
-rw-r--r--arch/arm/mach-omap2/io.c18
-rw-r--r--arch/arm/mach-omap2/mcbsp.c2
-rw-r--r--arch/arm/mach-omap2/mux.c10
-rw-r--r--arch/arm/mach-omap2/mux.h20
-rw-r--r--arch/arm/mach-omap2/mux34xx.c2
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c167
-rw-r--r--arch/arm/mach-omap2/omap_device.c87
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c145
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h12
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c11
-rw-r--r--arch/arm/mach-omap2/omap_opp_data.h9
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c32
-rw-r--r--arch/arm/mach-omap2/omap_twl.c81
-rw-r--r--arch/arm/mach-omap2/opp4xxx_data.c98
-rw-r--r--arch/arm/mach-omap2/pm.c32
-rw-r--r--arch/arm/mach-omap2/pm.h10
-rw-r--r--arch/arm/mach-omap2/pm24xx.c6
-rw-r--r--arch/arm/mach-omap2/pm34xx.c3
-rw-r--r--arch/arm/mach-omap2/pm44xx.c7
-rw-r--r--arch/arm/mach-omap2/pmu.c2
-rw-r--r--arch/arm/mach-omap2/prm-regbits-24xx.h2
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/prm.h11
-rw-r--r--arch/arm/mach-omap2/prm2xxx.c6
-rw-r--r--arch/arm/mach-omap2/prm2xxx.h3
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.h1
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c20
-rw-r--r--arch/arm/mach-omap2/prm3xxx.h1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c49
-rw-r--r--arch/arm/mach-omap2/prm44xx.h1
-rw-r--r--arch/arm/mach-omap2/prm_common.c45
-rw-r--r--arch/arm/mach-omap2/scrm44xx.h2
-rw-r--r--arch/arm/mach-omap2/serial.c5
-rw-r--r--arch/arm/mach-omap2/sr_device.c13
-rw-r--r--arch/arm/mach-omap2/timer.c8
-rw-r--r--arch/arm/mach-omap2/usb-host.c4
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c181
-rw-r--r--arch/arm/mach-omap2/vc.c453
-rw-r--r--arch/arm/mach-omap2/vc.h8
-rw-r--r--arch/arm/mach-omap2/vc3xxx_data.c22
-rw-r--r--arch/arm/mach-omap2/vc44xx_data.c28
-rw-r--r--arch/arm/mach-omap2/voltage.h44
-rw-r--r--arch/arm/mach-omap2/voltagedomains3xxx_data.c5
-rw-r--r--arch/arm/mach-omap2/voltagedomains44xx_data.c25
-rw-r--r--arch/arm/mach-omap2/vp.c19
-rw-r--r--arch/arm/mach-omap2/vp.h7
-rw-r--r--arch/arm/mach-omap2/vp3xxx_data.c10
-rw-r--r--arch/arm/mach-omap2/vp44xx_data.c15
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c6
-rw-r--r--arch/arm/mach-realview/include/mach/board-eb.h2
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/bast-map.h2
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h2
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/vr1000-map.h2
-rw-r--r--arch/arm/mach-s3c24xx/mach-gta02.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-h1940.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-mini2440.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-rx1950.c1
-rw-r--r--arch/arm/mach-s3c24xx/pm.c2
-rw-r--r--arch/arm/mach-s3c64xx/clock.c126
-rw-r--r--arch/arm/mach-s3c64xx/dev-audio.c11
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6440.c49
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6450.c61
-rw-r--r--arch/arm/mach-s5p64x0/dev-audio.c12
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6440.c1
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6450.c1
-rw-r--r--arch/arm/mach-s5pc100/clock.c48
-rw-r--r--arch/arm/mach-s5pc100/dev-audio.c16
-rw-r--r--arch/arm/mach-s5pc100/mach-smdkc100.c1
-rw-r--r--arch/arm/mach-s5pv210/dev-audio.c16
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkc110.c1
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c1
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c198
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c143
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c38
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c86
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c186
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c20
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h2
-rw-r--r--arch/arm/mach-shmobile/smp-emev2.c22
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c25
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c23
-rw-r--r--arch/arm/mach-socfpga/Kconfig1
-rw-r--r--arch/arm/mach-socfpga/Makefile1
-rw-r--r--arch/arm/mach-socfpga/core.h34
-rw-r--r--arch/arm/mach-socfpga/headsmp.S25
-rw-r--r--arch/arm/mach-socfpga/platsmp.c116
-rw-r--r--arch/arm/mach-socfpga/socfpga.c54
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear.h8
-rw-r--r--arch/arm/mach-spear13xx/spear1310.c16
-rw-r--r--arch/arm/mach-spear13xx/spear13xx.c2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h10
-rw-r--r--arch/arm/mach-spear3xx/spear300.c103
-rw-r--r--arch/arm/mach-spear3xx/spear310.c202
-rw-r--r--arch/arm/mach-spear3xx/spear320.c205
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c4
-rw-r--r--arch/arm/mach-sunxi/sunxi.c4
-rw-r--r--arch/arm/mach-tegra/Kconfig53
-rw-r--r--arch/arm/mach-tegra/common.c11
-rw-r--r--arch/arm/mach-tegra/include/mach/debug-macro.S100
-rw-r--r--arch/arm/mach-tegra/include/mach/irqs.h182
-rw-r--r--arch/arm/mach-tegra/include/mach/uncompress.h65
-rw-r--r--arch/arm/mach-tegra/io.c1
-rw-r--r--arch/arm/mach-tegra/iomap.h14
-rw-r--r--arch/arm/mach-tegra/irammap.h9
-rw-r--r--arch/arm/mach-tegra/pcie.c3
-rw-r--r--arch/arm/mach-tegra/tegra30_clocks.c4
-rw-r--r--arch/arm/mach-tegra/timer.c78
-rw-r--r--arch/arm/mach-u300/core.c48
-rw-r--r--arch/arm/mach-ux500/Kconfig2
-rw-r--r--arch/arm/mach-ux500/board-mop500-audio.c19
-rw-r--r--arch/arm/mach-ux500/board-mop500-pins.c425
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500-stuib.c93
-rw-r--r--arch/arm/mach-ux500/board-mop500.c149
-rw-r--r--arch/arm/mach-ux500/board-mop500.h10
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c110
-rw-r--r--arch/arm/mach-ux500/cpu.c3
-rw-r--r--arch/arm/mach-ux500/devices-common.c1
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c4
-rw-r--r--arch/arm/mach-ux500/devices-db8500.h2
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-ux500/include/mach/msp.h2
-rw-r--r--arch/arm/mach-ux500/timer.c5
-rw-r--r--arch/arm/mach-ux500/usb.c4
-rw-r--r--arch/arm/mach-vexpress/reset.c141
-rw-r--r--arch/arm/mach-vt8500/Kconfig12
-rw-r--r--arch/arm/mach-vt8500/common.h3
-rw-r--r--arch/arm/mach-vt8500/include/mach/entry-macro.S26
-rw-r--r--arch/arm/mach-vt8500/irq.c108
-rw-r--r--arch/arm/mach-vt8500/vt8500.c1
-rw-r--r--arch/arm/mach-zynq/Kconfig13
-rw-r--r--arch/arm/mach-zynq/common.c63
-rw-r--r--arch/arm/mach-zynq/common.h4
-rw-r--r--arch/arm/mach-zynq/include/mach/hardware.h18
-rw-r--r--arch/arm/mach-zynq/include/mach/irqs.h21
-rw-r--r--arch/arm/mach-zynq/include/mach/timex.h23
-rw-r--r--arch/arm/mach-zynq/include/mach/uart.h25
-rw-r--r--arch/arm/mach-zynq/include/mach/uncompress.h51
-rw-r--r--arch/arm/mach-zynq/include/mach/zynq_soc.h47
-rw-r--r--arch/arm/mach-zynq/timer.c298
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/cache-v7.S6
-rw-r--r--arch/arm/mm/dma-mapping.c45
-rw-r--r--arch/arm/mm/proc-v7.S67
-rw-r--r--arch/arm/plat-nomadik/Kconfig29
-rw-r--r--arch/arm/plat-nomadik/Makefile5
-rw-r--r--arch/arm/plat-omap/Makefile3
-rw-r--r--arch/arm/plat-omap/debug-devices.c92
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-omap/dmtimer.c13
-rw-r--r--arch/arm/plat-omap/include/plat-omap/dma-omap.h377
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h4
-rw-r--r--arch/arm/plat-omap/include/plat/debug-devices.h2
-rw-r--r--arch/arm/plat-omap/include/plat/vram.h43
-rw-r--r--arch/arm/plat-orion/addr-map.c4
-rw-r--r--arch/arm/plat-orion/common.c192
-rw-r--r--arch/arm/plat-orion/include/plat/addr-map.h1
-rw-r--r--arch/arm/plat-orion/include/plat/common.h1
-rw-r--r--arch/arm/plat-orion/irq.c6
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-samsung/clock.c75
-rw-r--r--arch/arm/plat-samsung/devs.c10
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-core.h7
-rw-r--r--arch/arm/plat-samsung/include/plat/mfc.h11
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h2
-rw-r--r--arch/arm/plat-samsung/s5p-dev-mfc.c34
-rw-r--r--arch/arm/plat-samsung/s5p-irq-gpioint.c8
-rw-r--r--arch/arm/plat-spear/Makefile2
-rw-r--r--arch/arm/plat-spear/shirq.c118
-rw-r--r--arch/arm/xen/enlighten.c123
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/compat.h5
-rw-r--r--arch/arm64/include/asm/dma-mapping.h1
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/kernel/sys_compat.c15
-rw-r--r--arch/avr32/Kconfig2
-rw-r--r--arch/avr32/include/asm/ptrace.h1
-rw-r--r--arch/avr32/include/asm/unistd.h1
-rw-r--r--arch/avr32/include/uapi/asm/signal.h6
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/include/asm/Kbuild6
-rw-r--r--arch/blackfin/include/asm/bfin_sport.h128
-rw-r--r--arch/blackfin/include/asm/bfin_twi.h2
-rw-r--r--arch/blackfin/include/asm/fixed_code.h30
-rw-r--r--arch/blackfin/include/asm/pgtable.h2
-rw-r--r--arch/blackfin/include/asm/ptrace.h162
-rw-r--r--arch/blackfin/include/asm/uaccess.h41
-rw-r--r--arch/blackfin/include/asm/unistd.h431
-rw-r--r--arch/blackfin/include/mach-common/irq.h5
-rw-r--r--arch/blackfin/include/uapi/asm/Kbuild16
-rw-r--r--arch/blackfin/include/uapi/asm/bfin_sport.h136
-rw-r--r--arch/blackfin/include/uapi/asm/byteorder.h (renamed from arch/blackfin/include/asm/byteorder.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/cachectl.h (renamed from arch/blackfin/include/asm/cachectl.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/fcntl.h (renamed from arch/blackfin/include/asm/fcntl.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/fixed_code.h38
-rw-r--r--arch/blackfin/include/uapi/asm/ioctls.h (renamed from arch/blackfin/include/asm/ioctls.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/poll.h (renamed from arch/blackfin/include/asm/poll.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/posix_types.h (renamed from arch/blackfin/include/asm/posix_types.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/ptrace.h170
-rw-r--r--arch/blackfin/include/uapi/asm/sigcontext.h (renamed from arch/blackfin/include/asm/sigcontext.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/siginfo.h (renamed from arch/blackfin/include/asm/siginfo.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/signal.h (renamed from arch/blackfin/include/asm/signal.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/stat.h (renamed from arch/blackfin/include/asm/stat.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/swab.h (renamed from arch/blackfin/include/asm/swab.h)0
-rw-r--r--arch/blackfin/include/uapi/asm/unistd.h437
-rw-r--r--arch/blackfin/kernel/kgdb.c13
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h1
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h1
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h1
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h1
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h1
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h1
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h1
-rw-r--r--arch/blackfin/mach-bf609/Kconfig2
-rw-r--r--arch/blackfin/mach-bf609/include/mach/irq.h3
-rw-r--r--arch/blackfin/mach-bf609/pm.c3
-rw-r--r--arch/blackfin/mach-common/dpmc.c19
-rw-r--r--arch/blackfin/mach-common/ints-priority.c272
-rw-r--r--arch/blackfin/mm/sram-alloc.c2
-rw-r--r--arch/c6x/Kconfig2
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/dma-mapping.h1
-rw-r--r--arch/c6x/include/asm/mmu.h22
-rw-r--r--arch/c6x/include/uapi/asm/unistd.h1
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/cris/include/arch-v10/arch/Kbuild5
-rw-r--r--arch/cris/include/arch-v10/arch/irq.h2
-rw-r--r--arch/cris/include/arch-v32/arch/Kbuild3
-rw-r--r--arch/cris/include/arch-v32/arch/cryptocop.h116
-rw-r--r--arch/cris/include/arch-v32/arch/irq.h2
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h2
-rw-r--r--arch/cris/include/asm/Kbuild5
-rw-r--r--arch/cris/include/asm/io.h39
-rw-r--r--arch/cris/include/asm/ptrace.h6
-rw-r--r--arch/cris/include/asm/signal.h121
-rw-r--r--arch/cris/include/asm/swab.h3
-rw-r--r--arch/cris/include/asm/termios.h43
-rw-r--r--arch/cris/include/asm/types.h5
-rw-r--r--arch/cris/include/asm/unistd.h343
-rw-r--r--arch/cris/include/uapi/arch-v10/arch/Kbuild4
-rw-r--r--arch/cris/include/uapi/arch-v10/arch/sv_addr.agh (renamed from arch/cris/include/arch-v10/arch/sv_addr.agh)0
-rw-r--r--arch/cris/include/uapi/arch-v10/arch/sv_addr_ag.h (renamed from arch/cris/include/arch-v10/arch/sv_addr_ag.h)0
-rw-r--r--arch/cris/include/uapi/arch-v10/arch/svinto.h (renamed from arch/cris/include/arch-v10/arch/svinto.h)0
-rw-r--r--arch/cris/include/uapi/arch-v10/arch/user.h (renamed from arch/cris/include/arch-v10/arch/user.h)0
-rw-r--r--arch/cris/include/uapi/arch-v32/arch/Kbuild2
-rw-r--r--arch/cris/include/uapi/arch-v32/arch/cryptocop.h122
-rw-r--r--arch/cris/include/uapi/arch-v32/arch/user.h (renamed from arch/cris/include/arch-v32/arch/user.h)0
-rw-r--r--arch/cris/include/uapi/asm/Kbuild34
-rw-r--r--arch/cris/include/uapi/asm/auxvec.h (renamed from arch/cris/include/asm/auxvec.h)0
-rw-r--r--arch/cris/include/uapi/asm/bitsperlong.h (renamed from arch/cris/include/asm/bitsperlong.h)0
-rw-r--r--arch/cris/include/uapi/asm/byteorder.h (renamed from arch/cris/include/asm/byteorder.h)0
-rw-r--r--arch/cris/include/uapi/asm/errno.h (renamed from arch/cris/include/asm/errno.h)0
-rw-r--r--arch/cris/include/uapi/asm/ethernet.h (renamed from arch/cris/include/asm/ethernet.h)0
-rw-r--r--arch/cris/include/uapi/asm/etraxgpio.h (renamed from arch/cris/include/asm/etraxgpio.h)0
-rw-r--r--arch/cris/include/uapi/asm/fcntl.h (renamed from arch/cris/include/asm/fcntl.h)0
-rw-r--r--arch/cris/include/uapi/asm/ioctl.h (renamed from arch/cris/include/asm/ioctl.h)0
-rw-r--r--arch/cris/include/uapi/asm/ioctls.h (renamed from arch/cris/include/asm/ioctls.h)0
-rw-r--r--arch/cris/include/uapi/asm/ipcbuf.h (renamed from arch/cris/include/asm/ipcbuf.h)0
-rw-r--r--arch/cris/include/uapi/asm/mman.h (renamed from arch/cris/include/asm/mman.h)0
-rw-r--r--arch/cris/include/uapi/asm/msgbuf.h (renamed from arch/cris/include/asm/msgbuf.h)0
-rw-r--r--arch/cris/include/uapi/asm/param.h (renamed from arch/cris/include/asm/param.h)0
-rw-r--r--arch/cris/include/uapi/asm/poll.h (renamed from arch/cris/include/asm/poll.h)0
-rw-r--r--arch/cris/include/uapi/asm/posix_types.h (renamed from arch/cris/include/asm/posix_types.h)0
-rw-r--r--arch/cris/include/uapi/asm/ptrace.h1
-rw-r--r--arch/cris/include/uapi/asm/resource.h (renamed from arch/cris/include/asm/resource.h)0
-rw-r--r--arch/cris/include/uapi/asm/rs485.h (renamed from arch/cris/include/asm/rs485.h)0
-rw-r--r--arch/cris/include/uapi/asm/sembuf.h (renamed from arch/cris/include/asm/sembuf.h)0
-rw-r--r--arch/cris/include/uapi/asm/setup.h (renamed from arch/cris/include/asm/setup.h)0
-rw-r--r--arch/cris/include/uapi/asm/shmbuf.h (renamed from arch/cris/include/asm/shmbuf.h)0
-rw-r--r--arch/cris/include/uapi/asm/sigcontext.h (renamed from arch/cris/include/asm/sigcontext.h)0
-rw-r--r--arch/cris/include/uapi/asm/siginfo.h (renamed from arch/cris/include/asm/siginfo.h)0
-rw-r--r--arch/cris/include/uapi/asm/signal.h116
-rw-r--r--arch/cris/include/uapi/asm/socket.h (renamed from arch/cris/include/asm/socket.h)0
-rw-r--r--arch/cris/include/uapi/asm/sockios.h (renamed from arch/cris/include/asm/sockios.h)0
-rw-r--r--arch/cris/include/uapi/asm/stat.h (renamed from arch/cris/include/asm/stat.h)0
-rw-r--r--arch/cris/include/uapi/asm/statfs.h (renamed from arch/cris/include/asm/statfs.h)0
-rw-r--r--arch/cris/include/uapi/asm/swab.h3
-rw-r--r--arch/cris/include/uapi/asm/sync_serial.h (renamed from arch/cris/include/asm/sync_serial.h)0
-rw-r--r--arch/cris/include/uapi/asm/termbits.h (renamed from arch/cris/include/asm/termbits.h)0
-rw-r--r--arch/cris/include/uapi/asm/termios.h45
-rw-r--r--arch/cris/include/uapi/asm/types.h1
-rw-r--r--arch/cris/include/uapi/asm/unistd.h344
-rw-r--r--arch/cris/kernel/asm-offsets.c6
-rw-r--r--arch/cris/kernel/module.c2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/frv/include/asm/unistd.h1
-rw-r--r--arch/frv/kernel/setup.c12
-rw-r--r--arch/frv/mm/init.c2
-rw-r--r--arch/frv/mm/pgalloc.c2
-rw-r--r--arch/h8300/Kconfig3
-rw-r--r--arch/h8300/include/asm/Kbuild2
-rw-r--r--arch/h8300/include/asm/mmu.h10
-rw-r--r--arch/h8300/include/asm/param.h15
-rw-r--r--arch/h8300/include/asm/ptrace.h41
-rw-r--r--arch/h8300/include/asm/signal.h121
-rw-r--r--arch/h8300/include/asm/termios.h44
-rw-r--r--arch/h8300/include/asm/types.h5
-rw-r--r--arch/h8300/include/asm/unistd.h329
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild31
-rw-r--r--arch/h8300/include/uapi/asm/auxvec.h (renamed from arch/h8300/include/asm/auxvec.h)0
-rw-r--r--arch/h8300/include/uapi/asm/bitsperlong.h (renamed from arch/h8300/include/asm/bitsperlong.h)0
-rw-r--r--arch/h8300/include/uapi/asm/byteorder.h (renamed from arch/h8300/include/asm/byteorder.h)0
-rw-r--r--arch/h8300/include/uapi/asm/errno.h (renamed from arch/h8300/include/asm/errno.h)0
-rw-r--r--arch/h8300/include/uapi/asm/fcntl.h (renamed from arch/h8300/include/asm/fcntl.h)0
-rw-r--r--arch/h8300/include/uapi/asm/ioctl.h (renamed from arch/h8300/include/asm/ioctl.h)0
-rw-r--r--arch/h8300/include/uapi/asm/ioctls.h (renamed from arch/h8300/include/asm/ioctls.h)0
-rw-r--r--arch/h8300/include/uapi/asm/ipcbuf.h (renamed from arch/h8300/include/asm/ipcbuf.h)0
-rw-r--r--arch/h8300/include/uapi/asm/kvm_para.h (renamed from arch/blackfin/include/asm/kvm_para.h)0
-rw-r--r--arch/h8300/include/uapi/asm/mman.h (renamed from arch/h8300/include/asm/mman.h)0
-rw-r--r--arch/h8300/include/uapi/asm/msgbuf.h (renamed from arch/h8300/include/asm/msgbuf.h)0
-rw-r--r--arch/h8300/include/uapi/asm/param.h16
-rw-r--r--arch/h8300/include/uapi/asm/poll.h (renamed from arch/h8300/include/asm/poll.h)0
-rw-r--r--arch/h8300/include/uapi/asm/posix_types.h (renamed from arch/h8300/include/asm/posix_types.h)0
-rw-r--r--arch/h8300/include/uapi/asm/ptrace.h44
-rw-r--r--arch/h8300/include/uapi/asm/resource.h (renamed from arch/h8300/include/asm/resource.h)0
-rw-r--r--arch/h8300/include/uapi/asm/sembuf.h (renamed from arch/h8300/include/asm/sembuf.h)0
-rw-r--r--arch/h8300/include/uapi/asm/setup.h (renamed from arch/h8300/include/asm/setup.h)0
-rw-r--r--arch/h8300/include/uapi/asm/shmbuf.h (renamed from arch/h8300/include/asm/shmbuf.h)0
-rw-r--r--arch/h8300/include/uapi/asm/sigcontext.h (renamed from arch/h8300/include/asm/sigcontext.h)0
-rw-r--r--arch/h8300/include/uapi/asm/siginfo.h (renamed from arch/h8300/include/asm/siginfo.h)0
-rw-r--r--arch/h8300/include/uapi/asm/signal.h115
-rw-r--r--arch/h8300/include/uapi/asm/socket.h (renamed from arch/h8300/include/asm/socket.h)0
-rw-r--r--arch/h8300/include/uapi/asm/sockios.h (renamed from arch/h8300/include/asm/sockios.h)0
-rw-r--r--arch/h8300/include/uapi/asm/stat.h (renamed from arch/h8300/include/asm/stat.h)0
-rw-r--r--arch/h8300/include/uapi/asm/statfs.h (renamed from arch/h8300/include/asm/statfs.h)0
-rw-r--r--arch/h8300/include/uapi/asm/swab.h (renamed from arch/h8300/include/asm/swab.h)0
-rw-r--r--arch/h8300/include/uapi/asm/termbits.h (renamed from arch/h8300/include/asm/termbits.h)0
-rw-r--r--arch/h8300/include/uapi/asm/termios.h44
-rw-r--r--arch/h8300/include/uapi/asm/types.h1
-rw-r--r--arch/h8300/include/uapi/asm/unistd.h330
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h1
-rw-r--r--arch/ia64/include/asm/ptrace.h5
-rw-r--r--arch/ia64/include/asm/unistd.h1
-rw-r--r--arch/ia64/include/uapi/asm/signal.h6
-rw-r--r--arch/ia64/kvm/kvm-ia64.c9
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/ptrace.h111
-rw-r--r--arch/m32r/include/asm/setup.h9
-rw-r--r--arch/m32r/include/asm/signal.h123
-rw-r--r--arch/m32r/include/asm/termios.h42
-rw-r--r--arch/m32r/include/asm/types.h5
-rw-r--r--arch/m32r/include/asm/unistd.h334
-rw-r--r--arch/m32r/include/uapi/asm/Kbuild30
-rw-r--r--arch/m32r/include/uapi/asm/auxvec.h (renamed from arch/m32r/include/asm/auxvec.h)0
-rw-r--r--arch/m32r/include/uapi/asm/bitsperlong.h (renamed from arch/m32r/include/asm/bitsperlong.h)0
-rw-r--r--arch/m32r/include/uapi/asm/byteorder.h (renamed from arch/m32r/include/asm/byteorder.h)0
-rw-r--r--arch/m32r/include/uapi/asm/errno.h (renamed from arch/m32r/include/asm/errno.h)0
-rw-r--r--arch/m32r/include/uapi/asm/fcntl.h (renamed from arch/m32r/include/asm/fcntl.h)0
-rw-r--r--arch/m32r/include/uapi/asm/ioctl.h (renamed from arch/m32r/include/asm/ioctl.h)0
-rw-r--r--arch/m32r/include/uapi/asm/ioctls.h (renamed from arch/m32r/include/asm/ioctls.h)0
-rw-r--r--arch/m32r/include/uapi/asm/ipcbuf.h (renamed from arch/m32r/include/asm/ipcbuf.h)0
-rw-r--r--arch/m32r/include/uapi/asm/mman.h (renamed from arch/m32r/include/asm/mman.h)0
-rw-r--r--arch/m32r/include/uapi/asm/msgbuf.h (renamed from arch/m32r/include/asm/msgbuf.h)0
-rw-r--r--arch/m32r/include/uapi/asm/param.h (renamed from arch/m32r/include/asm/param.h)0
-rw-r--r--arch/m32r/include/uapi/asm/poll.h (renamed from arch/m32r/include/asm/poll.h)0
-rw-r--r--arch/m32r/include/uapi/asm/posix_types.h (renamed from arch/m32r/include/asm/posix_types.h)0
-rw-r--r--arch/m32r/include/uapi/asm/ptrace.h117
-rw-r--r--arch/m32r/include/uapi/asm/resource.h (renamed from arch/m32r/include/asm/resource.h)0
-rw-r--r--arch/m32r/include/uapi/asm/sembuf.h (renamed from arch/m32r/include/asm/sembuf.h)0
-rw-r--r--arch/m32r/include/uapi/asm/setup.h11
-rw-r--r--arch/m32r/include/uapi/asm/shmbuf.h (renamed from arch/m32r/include/asm/shmbuf.h)0
-rw-r--r--arch/m32r/include/uapi/asm/sigcontext.h (renamed from arch/m32r/include/asm/sigcontext.h)0
-rw-r--r--arch/m32r/include/uapi/asm/siginfo.h (renamed from arch/m32r/include/asm/siginfo.h)0
-rw-r--r--arch/m32r/include/uapi/asm/signal.h117
-rw-r--r--arch/m32r/include/uapi/asm/socket.h (renamed from arch/m32r/include/asm/socket.h)0
-rw-r--r--arch/m32r/include/uapi/asm/sockios.h (renamed from arch/m32r/include/asm/sockios.h)0
-rw-r--r--arch/m32r/include/uapi/asm/stat.h (renamed from arch/m32r/include/asm/stat.h)0
-rw-r--r--arch/m32r/include/uapi/asm/statfs.h (renamed from arch/m32r/include/asm/statfs.h)0
-rw-r--r--arch/m32r/include/uapi/asm/swab.h (renamed from arch/m32r/include/asm/swab.h)0
-rw-r--r--arch/m32r/include/uapi/asm/termbits.h (renamed from arch/m32r/include/asm/termbits.h)0
-rw-r--r--arch/m32r/include/uapi/asm/termios.h (renamed from arch/m68k/include/uapi/asm/termios.h)7
-rw-r--r--arch/m32r/include/uapi/asm/types.h1
-rw-r--r--arch/m32r/include/uapi/asm/unistd.h335
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/Kconfig.bus4
-rw-r--r--arch/m68k/Kconfig.cpu6
-rw-r--r--arch/m68k/Kconfig.debug2
-rw-r--r--arch/m68k/Kconfig.devices6
-rw-r--r--arch/m68k/Makefile6
-rw-r--r--arch/m68k/include/asm/Kbuild4
-rw-r--r--arch/m68k/include/asm/hw_irq.h6
-rw-r--r--arch/m68k/include/asm/m5249sim.h269
-rw-r--r--arch/m68k/include/asm/m525xsim.h116
-rw-r--r--arch/m68k/include/asm/mcfclk.h9
-rw-r--r--arch/m68k/include/asm/mcfsim.h5
-rw-r--r--arch/m68k/include/asm/page_no.h2
-rw-r--r--arch/m68k/include/asm/ptrace.h1
-rw-r--r--arch/m68k/include/asm/shmparam.h6
-rw-r--r--arch/m68k/include/asm/spinlock.h6
-rw-r--r--arch/m68k/include/asm/termios.h50
-rw-r--r--arch/m68k/include/asm/unistd.h1
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild17
-rw-r--r--arch/m68k/include/uapi/asm/auxvec.h4
-rw-r--r--arch/m68k/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/m68k/include/uapi/asm/sembuf.h25
-rw-r--r--arch/m68k/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/m68k/include/uapi/asm/signal.h6
-rw-r--r--arch/m68k/include/uapi/asm/socket.h73
-rw-r--r--arch/m68k/include/uapi/asm/sockios.h13
-rw-r--r--arch/m68k/include/uapi/asm/termbits.h201
-rw-r--r--arch/m68k/kernel/traps.c2
-rw-r--r--arch/m68k/lib/memcpy.c3
-rw-r--r--arch/m68k/math-emu/fp_log.c2
-rw-r--r--arch/m68k/mm/init.c224
-rw-r--r--arch/m68k/mm/init_mm.c176
-rw-r--r--arch/m68k/mm/init_no.c145
-rw-r--r--arch/m68k/mm/mcfmmu.c4
-rw-r--r--arch/m68k/mm/motorola.c14
-rw-r--r--arch/m68k/mm/sun3mmu.c4
-rw-r--r--arch/m68k/platform/68000/Makefile18
-rw-r--r--arch/m68k/platform/68000/bootlogo-vz.h (renamed from arch/m68k/platform/68VZ328/bootlogo.h)0
-rw-r--r--arch/m68k/platform/68000/bootlogo.h (renamed from arch/m68k/platform/68328/bootlogo.h)0
-rw-r--r--arch/m68k/platform/68000/entry.S (renamed from arch/m68k/platform/68328/entry.S)0
-rw-r--r--arch/m68k/platform/68000/head.S240
-rw-r--r--arch/m68k/platform/68000/ints.c (renamed from arch/m68k/platform/68328/ints.c)2
-rw-r--r--arch/m68k/platform/68000/m68328.c (renamed from arch/m68k/platform/68328/config.c)2
-rw-r--r--arch/m68k/platform/68000/m68EZ328.c (renamed from arch/m68k/platform/68EZ328/config.c)2
-rw-r--r--arch/m68k/platform/68000/m68VZ328.c (renamed from arch/m68k/platform/68VZ328/config.c)4
-rw-r--r--arch/m68k/platform/68000/romvec.S (renamed from arch/m68k/platform/68328/romvec.S)2
-rw-r--r--arch/m68k/platform/68000/timers.c (renamed from arch/m68k/platform/68328/timers.c)2
-rw-r--r--arch/m68k/platform/68328/Makefile21
-rw-r--r--arch/m68k/platform/68328/head-de2.S128
-rw-r--r--arch/m68k/platform/68328/head-pilot.S207
-rw-r--r--arch/m68k/platform/68328/head-ram.S141
-rw-r--r--arch/m68k/platform/68328/head-rom.S105
-rw-r--r--arch/m68k/platform/68EZ328/Makefile5
-rw-r--r--arch/m68k/platform/68VZ328/Makefile5
-rw-r--r--arch/m68k/platform/coldfire/clk.c100
-rw-r--r--arch/m68k/platform/coldfire/intc-5249.c8
-rw-r--r--arch/m68k/platform/coldfire/m5206.c20
-rw-r--r--arch/m68k/platform/coldfire/m523x.c28
-rw-r--r--arch/m68k/platform/coldfire/m5249.c28
-rw-r--r--arch/m68k/platform/coldfire/m525x.c20
-rw-r--r--arch/m68k/platform/coldfire/m5272.c26
-rw-r--r--arch/m68k/platform/coldfire/m527x.c30
-rw-r--r--arch/m68k/platform/coldfire/m528x.c28
-rw-r--r--arch/m68k/platform/coldfire/m5307.c20
-rw-r--r--arch/m68k/platform/coldfire/m5407.c20
-rw-r--r--arch/m68k/platform/coldfire/m54xx.c26
-rw-r--r--arch/m68k/sun3/sun3ints.c29
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/Kbuild2
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/microblaze/include/asm/elf.h97
-rw-r--r--arch/microblaze/include/asm/entry.h2
-rw-r--r--arch/microblaze/include/asm/ptrace.h63
-rw-r--r--arch/microblaze/include/asm/setup.h6
-rw-r--r--arch/microblaze/include/asm/uaccess.h3
-rw-r--r--arch/microblaze/include/asm/unistd.h391
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild32
-rw-r--r--arch/microblaze/include/uapi/asm/auxvec.h (renamed from arch/microblaze/include/asm/auxvec.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/bitsperlong.h (renamed from arch/microblaze/include/asm/bitsperlong.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/byteorder.h (renamed from arch/microblaze/include/asm/byteorder.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/elf.h121
-rw-r--r--arch/microblaze/include/uapi/asm/errno.h (renamed from arch/microblaze/include/asm/errno.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/fcntl.h (renamed from arch/microblaze/include/asm/fcntl.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/ioctl.h (renamed from arch/microblaze/include/asm/ioctl.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/ioctls.h (renamed from arch/microblaze/include/asm/ioctls.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/ipcbuf.h (renamed from arch/microblaze/include/asm/ipcbuf.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/kvm_para.h (renamed from arch/h8300/include/asm/kvm_para.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/mman.h (renamed from arch/microblaze/include/asm/mman.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/msgbuf.h (renamed from arch/microblaze/include/asm/msgbuf.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/param.h (renamed from arch/microblaze/include/asm/param.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/poll.h (renamed from arch/microblaze/include/asm/poll.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/posix_types.h (renamed from arch/microblaze/include/asm/posix_types.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/ptrace.h72
-rw-r--r--arch/microblaze/include/uapi/asm/resource.h (renamed from arch/microblaze/include/asm/resource.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/sembuf.h (renamed from arch/microblaze/include/asm/sembuf.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/setup.h19
-rw-r--r--arch/microblaze/include/uapi/asm/shmbuf.h (renamed from arch/microblaze/include/asm/shmbuf.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/sigcontext.h (renamed from arch/microblaze/include/asm/sigcontext.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/siginfo.h (renamed from arch/microblaze/include/asm/siginfo.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/signal.h (renamed from arch/microblaze/include/asm/signal.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/socket.h (renamed from arch/microblaze/include/asm/socket.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/sockios.h (renamed from arch/microblaze/include/asm/sockios.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/stat.h (renamed from arch/microblaze/include/asm/stat.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/statfs.h (renamed from arch/microblaze/include/asm/statfs.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/swab.h (renamed from arch/microblaze/include/asm/swab.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/termbits.h (renamed from arch/microblaze/include/asm/termbits.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/termios.h (renamed from arch/microblaze/include/asm/termios.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/types.h (renamed from arch/microblaze/include/asm/types.h)0
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h401
-rw-r--r--arch/microblaze/kernel/entry-nommu.S1
-rw-r--r--arch/microblaze/kernel/intc.c5
-rw-r--r--arch/microblaze/kernel/process.c1
-rw-r--r--arch/microblaze/kernel/prom.c4
-rw-r--r--arch/microblaze/kernel/signal.c2
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/microblaze/lib/libgcc.h7
-rw-r--r--arch/microblaze/lib/muldi3.c28
-rw-r--r--arch/mips/Kconfig143
-rw-r--r--arch/mips/Makefile12
-rw-r--r--arch/mips/ar7/platform.c3
-rw-r--r--arch/mips/bcm47xx/Kconfig2
-rw-r--r--arch/mips/bcm47xx/Makefile2
-rw-r--r--arch/mips/bcm47xx/gpio.c102
-rw-r--r--arch/mips/bcm47xx/prom.c20
-rw-r--r--arch/mips/bcm47xx/setup.c11
-rw-r--r--arch/mips/bcm47xx/sprom.c780
-rw-r--r--arch/mips/bcm47xx/wgt634u.c8
-rw-r--r--arch/mips/bcm63xx/Makefile7
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c71
-rw-r--r--arch/mips/bcm63xx/clk.c34
-rw-r--r--arch/mips/bcm63xx/nvram.c107
-rw-r--r--arch/mips/bcm63xx/reset.c223
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c5
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c3
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c1
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S27
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c102
-rw-r--r--arch/mips/cavium-octeon/setup.c374
-rw-r--r--arch/mips/configs/ath79_defconfig111
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig98
-rw-r--r--arch/mips/configs/yosemite_defconfig94
-rw-r--r--arch/mips/fw/sni/Makefile2
-rw-r--r--arch/mips/include/asm/cpu.h6
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/asm/fw/arc/types.h8
-rw-r--r--arch/mips/include/asm/hazards.h25
-rw-r--r--arch/mips/include/asm/kexec.h27
-rw-r--r--arch/mips/include/asm/mach-ar7/war.h1
-rw-r--r--arch/mips/include/asm/mach-ath79/war.h1
-rw-r--r--arch/mips/include/asm/mach-au1x00/war.h1
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx.h4
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/gpio.h154
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/war.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h35
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h29
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h21
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h17
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/war.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/war.h1
-rw-r--r--arch/mips/include/asm/mach-cobalt/war.h1
-rw-r--r--arch/mips/include/asm/mach-dec/war.h1
-rw-r--r--arch/mips/include/asm/mach-emma2rh/war.h1
-rw-r--r--arch/mips/include/asm/mach-generic/irq.h6
-rw-r--r--arch/mips/include/asm/mach-ip22/war.h1
-rw-r--r--arch/mips/include/asm/mach-ip27/war.h1
-rw-r--r--arch/mips/include/asm/mach-ip28/war.h1
-rw-r--r--arch/mips/include/asm/mach-ip32/war.h1
-rw-r--r--arch/mips/include/asm/mach-jazz/war.h1
-rw-r--r--arch/mips/include/asm/mach-jz4740/war.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/war.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h3
-rw-r--r--arch/mips/include/asm/mach-lasat/war.h1
-rw-r--r--arch/mips/include/asm/mach-loongson/war.h1
-rw-r--r--arch/mips/include/asm/mach-loongson1/platform.h3
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-clk.h7
-rw-r--r--arch/mips/include/asm/mach-loongson1/war.h1
-rw-r--r--arch/mips/include/asm/mach-malta/war.h1
-rw-r--r--arch/mips/include/asm/mach-netlogic/irq.h4
-rw-r--r--arch/mips/include/asm/mach-netlogic/multi-node.h54
-rw-r--r--arch/mips/include/asm/mach-netlogic/war.h1
-rw-r--r--arch/mips/include/asm/mach-pnx833x/war.h1
-rw-r--r--arch/mips/include/asm/mach-pnx8550/war.h1
-rw-r--r--arch/mips/include/asm/mach-powertv/war.h1
-rw-r--r--arch/mips/include/asm/mach-rc32434/war.h1
-rw-r--r--arch/mips/include/asm/mach-rm/war.h1
-rw-r--r--arch/mips/include/asm/mach-sead3/war.h1
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h1
-rw-r--r--arch/mips/include/asm/mach-tx39xx/war.h1
-rw-r--r--arch/mips/include/asm/mach-tx49xx/war.h1
-rw-r--r--arch/mips/include/asm/mach-vr41xx/war.h1
-rw-r--r--arch/mips/include/asm/mach-wrppmc/war.h1
-rw-r--r--arch/mips/include/asm/mach-yosemite/cpu-feature-overrides.h48
-rw-r--r--arch/mips/include/asm/mach-yosemite/war.h25
-rw-r--r--arch/mips/include/asm/mipsregs.h10
-rw-r--r--arch/mips/include/asm/mmu_context.h6
-rw-r--r--arch/mips/include/asm/module.h2
-rw-r--r--arch/mips/include/asm/netlogic/common.h51
-rw-r--r--arch/mips/include/asm/netlogic/interrupt.h2
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h142
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h44
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/sys.h1
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h363
-rw-r--r--arch/mips/include/asm/netlogic/xlr/pic.h2
-rw-r--r--arch/mips/include/asm/netlogic/xlr/xlr.h6
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-lmcx-defs.h3457
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h6
-rw-r--r--arch/mips/include/asm/octeon/octeon.h7
-rw-r--r--arch/mips/include/asm/page.h8
-rw-r--r--arch/mips/include/asm/pgtable-64.h2
-rw-r--r--arch/mips/include/asm/pgtable-bits.h131
-rw-r--r--arch/mips/include/asm/pgtable.h179
-rw-r--r--arch/mips/include/asm/pmc-sierra/msp71xx/war.h1
-rw-r--r--arch/mips/include/asm/processor.h4
-rw-r--r--arch/mips/include/asm/ptrace.h1
-rw-r--r--arch/mips/include/asm/sgiarcs.h8
-rw-r--r--arch/mips/include/asm/smp.h6
-rw-r--r--arch/mips/include/asm/smvp.h19
-rw-r--r--arch/mips/include/asm/sparsemem.h2
-rw-r--r--arch/mips/include/asm/time.h4
-rw-r--r--arch/mips/include/asm/titan_dep.h231
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/include/asm/war.h8
-rw-r--r--arch/mips/include/uapi/asm/signal.h6
-rw-r--r--arch/mips/kernel/Makefile8
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/crash.c71
-rw-r--r--arch/mips/kernel/crash_dump.c75
-rw-r--r--arch/mips/kernel/irq-rm9000.c106
-rw-r--r--arch/mips/kernel/machine_kexec.c33
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c4
-rw-r--r--arch/mips/kernel/mips_ksyms.c2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c124
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/relocate_kernel.S107
-rw-r--r--arch/mips/kernel/scall64-n32.S6
-rw-r--r--arch/mips/kernel/setup.c56
-rw-r--r--arch/mips/kernel/signal.c13
-rw-r--r--arch/mips/kernel/smp.c17
-rw-r--r--arch/mips/kernel/traps.c25
-rw-r--r--arch/mips/lantiq/Kconfig4
-rw-r--r--arch/mips/lantiq/prom.c5
-rw-r--r--arch/mips/lantiq/xway/Makefile2
-rw-r--r--arch/mips/lantiq/xway/dma.c15
-rw-r--r--arch/mips/lantiq/xway/reset.c58
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c4
-rw-r--r--arch/mips/lantiq/xway/xrx200_phy_fw.c97
-rw-r--r--arch/mips/loongson1/Kconfig2
-rw-r--r--arch/mips/loongson1/common/clock.c159
-rw-r--r--arch/mips/loongson1/common/platform.c10
-rw-r--r--arch/mips/loongson1/ls1b/board.c5
-rw-r--r--arch/mips/math-emu/cp1emu.c15
-rw-r--r--arch/mips/mm/c-octeon.c67
-rw-r--r--arch/mips/mm/c-r4k.c23
-rw-r--r--arch/mips/mm/highmem.c3
-rw-r--r--arch/mips/mm/page.c9
-rw-r--r--arch/mips/mm/pgtable-64.c31
-rw-r--r--arch/mips/mm/tlb-r4k.c22
-rw-r--r--arch/mips/mm/tlbex.c123
-rw-r--r--arch/mips/netlogic/Kconfig28
-rw-r--r--arch/mips/netlogic/common/irq.c165
-rw-r--r--arch/mips/netlogic/common/smp.c89
-rw-r--r--arch/mips/netlogic/common/smpboot.S6
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c67
-rw-r--r--arch/mips/netlogic/xlp/setup.c50
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c83
-rw-r--r--arch/mips/netlogic/xlr/Makefile4
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c290
-rw-r--r--arch/mips/netlogic/xlr/fmn.c204
-rw-r--r--arch/mips/netlogic/xlr/setup.c37
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c23
-rw-r--r--arch/mips/oprofile/Makefile2
-rw-r--r--arch/mips/oprofile/common.c5
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c29
-rw-r--r--arch/mips/oprofile/op_model_rm9000.c138
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/fixup-yosemite.c41
-rw-r--r--arch/mips/pci/ops-bridge.c24
-rw-r--r--arch/mips/pci/ops-titan-ht.c124
-rw-r--r--arch/mips/pci/ops-titan.c111
-rw-r--r--arch/mips/pci/pci-bcm63xx.c34
-rw-r--r--arch/mips/pci/pci-octeon.c5
-rw-r--r--arch/mips/pci/pci-xlr.c69
-rw-r--r--arch/mips/pci/pci-yosemite.c67
-rw-r--r--arch/mips/pmc-sierra/Kconfig4
-rw-r--r--arch/mips/pmc-sierra/Platform7
-rw-r--r--arch/mips/pmc-sierra/yosemite/Makefile7
-rw-r--r--arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c169
-rw-r--r--arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.h67
-rw-r--r--arch/mips/pmc-sierra/yosemite/ht-irq.c41
-rw-r--r--arch/mips/pmc-sierra/yosemite/ht.c404
-rw-r--r--arch/mips/pmc-sierra/yosemite/irq.c152
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c142
-rw-r--r--arch/mips/pmc-sierra/yosemite/py-console.c109
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c224
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.h32
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c185
-rw-r--r--arch/mips/powertv/init.c37
-rw-r--r--arch/mips/rb532/prom.c3
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c8
-rw-r--r--arch/mips/sibyte/Kconfig2
-rw-r--r--arch/mips/sni/setup.c8
-rw-r--r--arch/mips/txx9/generic/pci.c2
-rw-r--r--arch/mips/wrppmc/pci.c6
-rw-r--r--arch/mn10300/Kconfig2
-rw-r--r--arch/mn10300/include/asm/unistd.h1
-rw-r--r--arch/mn10300/include/uapi/asm/signal.h6
-rw-r--r--arch/mn10300/mm/pgtable.c2
-rw-r--r--arch/openrisc/Kconfig4
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/io.h1
-rw-r--r--arch/openrisc/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/openrisc/include/uapi/asm/unistd.h1
-rw-r--r--arch/openrisc/kernel/asm-offsets.c6
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/unistd.h1
-rw-r--r--arch/parisc/include/uapi/asm/signal.h6
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/boot/dts/a3m071.dts144
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi6
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-raid1.0-0.dtsi85
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/bitops.h75
-rw-r--r--arch/powerpc/include/asm/cputable.h12
-rw-r--r--arch/powerpc/include/asm/dbell.h2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h83
-rw-r--r--arch/powerpc/include/asm/exception-64s.h97
-rw-r--r--arch/powerpc/include/asm/firmware.h4
-rw-r--r--arch/powerpc/include/asm/fsl_gtm.h2
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h4
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h36
-rw-r--r--arch/powerpc/include/asm/hvcall.h23
-rw-r--r--arch/powerpc/include/asm/immap_qe.h2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h12
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h33
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h29
-rw-r--r--arch/powerpc/include/asm/kvm_host.h68
-rw-r--r--arch/powerpc/include/asm/kvm_para.h15
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h87
-rw-r--r--arch/powerpc/include/asm/machdep.h39
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h2
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h10
-rw-r--r--arch/powerpc/include/asm/mmu.h1
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h2
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h47
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/include/asm/prom.h16
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h2
-rw-r--r--arch/powerpc/include/asm/qe.h2
-rw-r--r--arch/powerpc/include/asm/qe_ic.h2
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/reg_booke.h7
-rw-r--r--arch/powerpc/include/asm/rtas.h5
-rw-r--r--arch/powerpc/include/asm/setup.h29
-rw-r--r--arch/powerpc/include/asm/smp.h8
-rw-r--r--arch/powerpc/include/asm/smu.h4
-rw-r--r--arch/powerpc/include/asm/systbl.h3
-rw-r--r--arch/powerpc/include/asm/ucc.h2
-rw-r--r--arch/powerpc/include/asm/ucc_fast.h2
-rw-r--r--arch/powerpc/include/asm/ucc_slow.h2
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h4
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/epapr_hcalls.h98
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h86
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h7
-rw-r--r--arch/powerpc/include/uapi/asm/setup.h31
-rw-r--r--arch/powerpc/include/uapi/asm/signal.h6
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S (renamed from arch/powerpc/kernel/cpu_setup_power7.S)32
-rw-r--r--arch/powerpc/kernel/cputable.c38
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S28
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c11
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S308
-rw-r--r--arch/powerpc/kernel/head_64.S6
-rw-r--r--arch/powerpc/kernel/idle.c3
-rw-r--r--arch/powerpc/kernel/iommu.c16
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c8
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/prom.c7
-rw-r--r--arch/powerpc/kernel/prom_init.c11
-rw-r--r--arch/powerpc/kernel/ptrace.c90
-rw-r--r--arch/powerpc/kernel/rtas.c1
-rw-r--r--arch/powerpc/kernel/rtas_flash.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c46
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c17
-rw-r--r--arch/powerpc/kernel/udbg.c23
-rw-r--r--arch/powerpc/kvm/44x.c1
-rw-r--r--arch/powerpc/kvm/44x_emulate.c112
-rw-r--r--arch/powerpc/kvm/Kconfig4
-rw-r--r--arch/powerpc/kvm/Makefile5
-rw-r--r--arch/powerpc/kvm/book3s.c125
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c474
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c16
-rw-r--r--arch/powerpc/kvm/book3s_exports.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c655
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c144
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c143
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S142
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c294
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S18
-rw-r--r--arch/powerpc/kvm/booke.c346
-rw-r--r--arch/powerpc/kvm/booke.h1
-rw-r--r--arch/powerpc/kvm/booke_emulate.c36
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S145
-rw-r--r--arch/powerpc/kvm/e500.h11
-rw-r--r--arch/powerpc/kvm/e500_emulate.c14
-rw-r--r--arch/powerpc/kvm/e500_tlb.c132
-rw-r--r--arch/powerpc/kvm/emulate.c221
-rw-r--r--arch/powerpc/kvm/powerpc.c187
-rw-r--r--arch/powerpc/kvm/trace.h200
-rw-r--r--arch/powerpc/mm/fault.c27
-rw-r--r--arch/powerpc/mm/numa.c12
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S15
-rw-r--r--arch/powerpc/perf/power7-pmu.c17
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads.c3
-rw-r--r--arch/powerpc/platforms/512x/mpc512x.h11
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c25
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c16
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c8
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c8
-rw-r--r--arch/powerpc/platforms/85xx/smp.c49
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c2
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c5
-rw-r--r--arch/powerpc/platforms/powermac/pic.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c25
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c6
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c34
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c1
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c60
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c10
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c4
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h31
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c119
-rw-r--r--arch/powerpc/platforms/pseries/setup.c77
-rw-r--r--arch/powerpc/platforms/pseries/smp.c1
-rw-r--r--arch/powerpc/sysdev/fsl_gtm.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c9
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c37
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--arch/powerpc/sysdev/pmi.c13
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/usb.c2
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/nonstdio.c53
-rw-r--r--arch/powerpc/xmon/nonstdio.h6
-rw-r--r--arch/powerpc/xmon/start.c34
-rw-r--r--arch/powerpc/xmon/xmon.c26
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig72
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/crypto/aes_s390.c18
-rw-r--r--arch/s390/crypto/des_s390.c12
-rw-r--r--arch/s390/crypto/ghash_s390.c21
-rw-r--r--arch/s390/crypto/sha_common.c9
-rw-r--r--arch/s390/include/asm/bitops.h81
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/ccwgroup.h3
-rw-r--r--arch/s390/include/asm/clp.h28
-rw-r--r--arch/s390/include/asm/compat.h3
-rw-r--r--arch/s390/include/asm/dma-mapping.h76
-rw-r--r--arch/s390/include/asm/dma.h19
-rw-r--r--arch/s390/include/asm/hw_irq.h22
-rw-r--r--arch/s390/include/asm/io.h55
-rw-r--r--arch/s390/include/asm/irq.h12
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pci.h195
-rw-r--r--arch/s390/include/asm/pci_clp.h182
-rw-r--r--arch/s390/include/asm/pci_debug.h36
-rw-r--r--arch/s390/include/asm/pci_dma.h196
-rw-r--r--arch/s390/include/asm/pci_insn.h280
-rw-r--r--arch/s390/include/asm/pci_io.h194
-rw-r--r--arch/s390/include/asm/pgtable.h22
-rw-r--r--arch/s390/include/asm/sclp.h2
-rw-r--r--arch/s390/include/asm/topology.h34
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/include/asm/vga.h6
-rw-r--r--arch/s390/include/uapi/asm/signal.h6
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/dis.c578
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/entry.h21
-rw-r--r--arch/s390/kernel/entry64.S36
-rw-r--r--arch/s390/kernel/head.S74
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/pgm_check.S152
-rw-r--r--arch/s390/kernel/setup.c39
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/topology.c113
-rw-r--r--arch/s390/kernel/traps.c52
-rw-r--r--arch/s390/kvm/interrupt.c19
-rw-r--r--arch/s390/kvm/kvm-s390.c7
-rw-r--r--arch/s390/mm/Makefile12
-rw-r--r--arch/s390/mm/dump_pagetables.c7
-rw-r--r--arch/s390/mm/fault.c31
-rw-r--r--arch/s390/mm/init.c29
-rw-r--r--arch/s390/mm/pageattr.c82
-rw-r--r--arch/s390/mm/pgtable.c16
-rw-r--r--arch/s390/mm/vmem.c46
-rw-r--r--arch/s390/net/bpf_jit_comp.c28
-rw-r--r--arch/s390/pci/Makefile6
-rw-r--r--arch/s390/pci/pci.c1170
-rw-r--r--arch/s390/pci/pci_clp.c325
-rw-r--r--arch/s390/pci/pci_debug.c193
-rw-r--r--arch/s390/pci/pci_dma.c512
-rw-r--r--arch/s390/pci/pci_event.c95
-rw-r--r--arch/s390/pci/pci_msi.c141
-rw-r--r--arch/s390/pci/pci_sysfs.c86
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/kvm_para.h1
-rw-r--r--arch/score/include/asm/ptrace.h75
-rw-r--r--arch/score/include/asm/setup.h7
-rw-r--r--arch/score/include/uapi/asm/Kbuild31
-rw-r--r--arch/score/include/uapi/asm/auxvec.h (renamed from arch/score/include/asm/auxvec.h)0
-rw-r--r--arch/score/include/uapi/asm/bitsperlong.h (renamed from arch/score/include/asm/bitsperlong.h)0
-rw-r--r--arch/score/include/uapi/asm/byteorder.h (renamed from arch/score/include/asm/byteorder.h)0
-rw-r--r--arch/score/include/uapi/asm/errno.h (renamed from arch/score/include/asm/errno.h)0
-rw-r--r--arch/score/include/uapi/asm/fcntl.h (renamed from arch/score/include/asm/fcntl.h)0
-rw-r--r--arch/score/include/uapi/asm/ioctl.h (renamed from arch/score/include/asm/ioctl.h)0
-rw-r--r--arch/score/include/uapi/asm/ioctls.h (renamed from arch/score/include/asm/ioctls.h)0
-rw-r--r--arch/score/include/uapi/asm/ipcbuf.h (renamed from arch/score/include/asm/ipcbuf.h)0
-rw-r--r--arch/score/include/uapi/asm/kvm_para.h (renamed from arch/microblaze/include/asm/kvm_para.h)0
-rw-r--r--arch/score/include/uapi/asm/mman.h (renamed from arch/score/include/asm/mman.h)0
-rw-r--r--arch/score/include/uapi/asm/msgbuf.h (renamed from arch/score/include/asm/msgbuf.h)0
-rw-r--r--arch/score/include/uapi/asm/param.h (renamed from arch/score/include/asm/param.h)0
-rw-r--r--arch/score/include/uapi/asm/poll.h (renamed from arch/score/include/asm/poll.h)0
-rw-r--r--arch/score/include/uapi/asm/posix_types.h (renamed from arch/score/include/asm/posix_types.h)0
-rw-r--r--arch/score/include/uapi/asm/ptrace.h76
-rw-r--r--arch/score/include/uapi/asm/resource.h (renamed from arch/score/include/asm/resource.h)0
-rw-r--r--arch/score/include/uapi/asm/sembuf.h (renamed from arch/score/include/asm/sembuf.h)0
-rw-r--r--arch/score/include/uapi/asm/setup.h9
-rw-r--r--arch/score/include/uapi/asm/shmbuf.h (renamed from arch/score/include/asm/shmbuf.h)0
-rw-r--r--arch/score/include/uapi/asm/sigcontext.h (renamed from arch/score/include/asm/sigcontext.h)0
-rw-r--r--arch/score/include/uapi/asm/siginfo.h (renamed from arch/score/include/asm/siginfo.h)0
-rw-r--r--arch/score/include/uapi/asm/signal.h (renamed from arch/score/include/asm/signal.h)0
-rw-r--r--arch/score/include/uapi/asm/socket.h (renamed from arch/score/include/asm/socket.h)0
-rw-r--r--arch/score/include/uapi/asm/sockios.h (renamed from arch/score/include/asm/sockios.h)0
-rw-r--r--arch/score/include/uapi/asm/stat.h (renamed from arch/score/include/asm/stat.h)0
-rw-r--r--arch/score/include/uapi/asm/statfs.h (renamed from arch/score/include/asm/statfs.h)0
-rw-r--r--arch/score/include/uapi/asm/swab.h (renamed from arch/score/include/asm/swab.h)0
-rw-r--r--arch/score/include/uapi/asm/termbits.h (renamed from arch/score/include/asm/termbits.h)0
-rw-r--r--arch/score/include/uapi/asm/termios.h (renamed from arch/score/include/asm/termios.h)0
-rw-r--r--arch/score/include/uapi/asm/types.h (renamed from arch/score/include/asm/types.h)0
-rw-r--r--arch/score/include/uapi/asm/unistd.h (renamed from arch/score/include/asm/unistd.h)1
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/boards/board-espt.c2
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c6
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c6
-rw-r--r--arch/sh/boards/mach-kfr2r09/lcd_wqvga.c16
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c7
-rw-r--r--arch/sh/include/asm/dma-mapping.h1
-rw-r--r--arch/sh/include/asm/unistd.h1
-rw-r--r--arch/sh/include/mach-kfr2r09/mach/kfr2r09.h6
-rw-r--r--arch/sh/mm/Kconfig1
-rw-r--r--arch/sh/mm/fault.c19
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/crypto/aes_asm.S20
-rw-r--r--arch/sparc/crypto/aes_glue.c31
-rw-r--r--arch/sparc/crypto/camellia_glue.c3
-rw-r--r--arch/sparc/crypto/des_asm.S1
-rw-r--r--arch/sparc/crypto/des_glue.c6
-rw-r--r--arch/sparc/include/asm/dma-mapping.h1
-rw-r--r--arch/sparc/include/asm/hugetlb.h10
-rw-r--r--arch/sparc/include/asm/pgtable_64.h8
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/include/uapi/asm/signal.h6
-rw-r--r--arch/sparc/kernel/module.c4
-rw-r--r--arch/sparc/kernel/sys_sparc32.c14
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/include/asm/compat.h2
-rw-r--r--arch/tile/include/asm/dma-mapping.h1
-rw-r--r--arch/tile/include/asm/elf.h2
-rw-r--r--arch/tile/include/asm/ptrace.h4
-rw-r--r--arch/tile/include/asm/unistd.h2
-rw-r--r--arch/tile/include/uapi/asm/ptrace.h8
-rw-r--r--arch/tile/kernel/compat.c18
-rw-r--r--arch/tile/kernel/module.c2
-rw-r--r--arch/tile/kernel/pci.c4
-rw-r--r--arch/tile/kernel/pci_gx.c3
-rw-r--r--arch/tile/kernel/ptrace.c140
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/um/kernel/signal.c5
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/include/asm/ptrace.h1
-rw-r--r--arch/unicore32/include/uapi/asm/unistd.h1
-rw-r--r--arch/unicore32/kernel/module.c3
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/boot/.gitignore1
-rw-r--r--arch/x86/boot/compressed/eboot.c118
-rw-r--r--arch/x86/crypto/Makefile5
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S1102
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c558
-rw-r--r--arch/x86/crypto/camellia_glue.c92
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S348
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c79
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S206
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c77
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c (renamed from arch/x86/crypto/crc32c-intel.c)81
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S460
-rw-r--r--arch/x86/crypto/glue_helper-asm-avx.S91
-rw-r--r--arch/x86/crypto/glue_helper.c12
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S166
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c49
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c12
-rw-r--r--arch/x86/crypto/twofish-avx-x86_64-asm_64.S208
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c73
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c20
-rw-r--r--arch/x86/ia32/ia32_signal.c55
-rw-r--r--arch/x86/ia32/ia32entry.S1
-rw-r--r--arch/x86/include/asm/Kbuild26
-rw-r--r--arch/x86/include/asm/boot.h9
-rw-r--r--arch/x86/include/asm/clocksource.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/crypto/camellia.h82
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h28
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h27
-rw-r--r--arch/x86/include/asm/crypto/twofish.h4
-rw-r--r--arch/x86/include/asm/debugreg.h79
-rw-r--r--arch/x86/include/asm/dma-mapping.h1
-rw-r--r--arch/x86/include/asm/e820.h74
-rw-r--r--arch/x86/include/asm/fixmap.h5
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h5
-rw-r--r--arch/x86/include/asm/ia32.h10
-rw-r--r--arch/x86/include/asm/ist.h17
-rw-r--r--arch/x86/include/asm/kexec.h3
-rw-r--r--arch/x86/include/asm/kvm_guest.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h24
-rw-r--r--arch/x86/include/asm/kvm_para.h99
-rw-r--r--arch/x86/include/asm/mce.h138
-rw-r--r--arch/x86/include/asm/msr.h11
-rw-r--r--arch/x86/include/asm/mtrr.h93
-rw-r--r--arch/x86/include/asm/numachip/numachip.h19
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/pci.h12
-rw-r--r--arch/x86/include/asm/pgtable.h17
-rw-r--r--arch/x86/include/asm/pgtable_types.h20
-rw-r--r--arch/x86/include/asm/posix_types.h10
-rw-r--r--arch/x86/include/asm/processor-flags.h97
-rw-r--r--arch/x86/include/asm/ptrace.h82
-rw-r--r--arch/x86/include/asm/pvclock.h47
-rw-r--r--arch/x86/include/asm/setup.h5
-rw-r--r--arch/x86/include/asm/sigcontext.h216
-rw-r--r--arch/x86/include/asm/signal.h140
-rw-r--r--arch/x86/include/asm/svm.h132
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
-rw-r--r--arch/x86/include/asm/syscalls.h3
-rw-r--r--arch/x86/include/asm/unistd.h15
-rw-r--r--arch/x86/include/asm/vm86.h128
-rw-r--r--arch/x86/include/asm/vmx.h92
-rw-r--r--arch/x86/include/asm/vsyscall.h34
-rw-r--r--arch/x86/include/asm/xen/interface.h1
-rw-r--r--arch/x86/include/uapi/asm/Kbuild58
-rw-r--r--arch/x86/include/uapi/asm/a.out.h (renamed from arch/x86/include/asm/a.out.h)0
-rw-r--r--arch/x86/include/uapi/asm/auxvec.h (renamed from arch/x86/include/asm/auxvec.h)0
-rw-r--r--arch/x86/include/uapi/asm/bitsperlong.h (renamed from arch/x86/include/asm/bitsperlong.h)0
-rw-r--r--arch/x86/include/uapi/asm/boot.h10
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h (renamed from arch/x86/include/asm/bootparam.h)1
-rw-r--r--arch/x86/include/uapi/asm/byteorder.h (renamed from arch/x86/include/asm/byteorder.h)0
-rw-r--r--arch/x86/include/uapi/asm/debugreg.h80
-rw-r--r--arch/x86/include/uapi/asm/e820.h75
-rw-r--r--arch/x86/include/uapi/asm/errno.h (renamed from arch/x86/include/asm/errno.h)0
-rw-r--r--arch/x86/include/uapi/asm/fcntl.h (renamed from arch/x86/include/asm/fcntl.h)0
-rw-r--r--arch/x86/include/uapi/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h (renamed from arch/x86/include/asm/hyperv.h)0
-rw-r--r--arch/x86/include/uapi/asm/ioctl.h (renamed from arch/x86/include/asm/ioctl.h)0
-rw-r--r--arch/x86/include/uapi/asm/ioctls.h (renamed from arch/x86/include/asm/ioctls.h)0
-rw-r--r--arch/x86/include/uapi/asm/ipcbuf.h (renamed from arch/x86/include/asm/ipcbuf.h)0
-rw-r--r--arch/x86/include/uapi/asm/ist.h29
-rw-r--r--arch/x86/include/uapi/asm/kvm.h (renamed from arch/x86/include/asm/kvm.h)0
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h100
-rw-r--r--arch/x86/include/uapi/asm/ldt.h (renamed from arch/x86/include/asm/ldt.h)0
-rw-r--r--arch/x86/include/uapi/asm/mce.h121
-rw-r--r--arch/x86/include/uapi/asm/mman.h (renamed from arch/x86/include/asm/mman.h)0
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h (renamed from arch/x86/include/asm/msgbuf.h)0
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h (renamed from arch/x86/include/asm/msr-index.h)38
-rw-r--r--arch/x86/include/uapi/asm/msr.h15
-rw-r--r--arch/x86/include/uapi/asm/mtrr.h117
-rw-r--r--arch/x86/include/uapi/asm/param.h (renamed from arch/x86/include/asm/param.h)0
-rw-r--r--arch/x86/include/uapi/asm/perf_regs.h (renamed from arch/x86/include/asm/perf_regs.h)0
-rw-r--r--arch/x86/include/uapi/asm/poll.h (renamed from arch/x86/include/asm/poll.h)0
-rw-r--r--arch/x86/include/uapi/asm/posix_types.h9
-rw-r--r--arch/x86/include/uapi/asm/posix_types_32.h (renamed from arch/x86/include/asm/posix_types_32.h)0
-rw-r--r--arch/x86/include/uapi/asm/posix_types_64.h (renamed from arch/x86/include/asm/posix_types_64.h)0
-rw-r--r--arch/x86/include/uapi/asm/posix_types_x32.h (renamed from arch/x86/include/asm/posix_types_x32.h)0
-rw-r--r--arch/x86/include/uapi/asm/prctl.h (renamed from arch/x86/include/asm/prctl.h)0
-rw-r--r--arch/x86/include/uapi/asm/processor-flags.h99
-rw-r--r--arch/x86/include/uapi/asm/ptrace-abi.h (renamed from arch/x86/include/asm/ptrace-abi.h)0
-rw-r--r--arch/x86/include/uapi/asm/ptrace.h78
-rw-r--r--arch/x86/include/uapi/asm/resource.h (renamed from arch/x86/include/asm/resource.h)0
-rw-r--r--arch/x86/include/uapi/asm/sembuf.h (renamed from arch/x86/include/asm/sembuf.h)0
-rw-r--r--arch/x86/include/uapi/asm/setup.h1
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h (renamed from arch/x86/include/asm/shmbuf.h)0
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h221
-rw-r--r--arch/x86/include/uapi/asm/sigcontext32.h (renamed from arch/x86/include/asm/sigcontext32.h)0
-rw-r--r--arch/x86/include/uapi/asm/siginfo.h (renamed from arch/x86/include/asm/siginfo.h)0
-rw-r--r--arch/x86/include/uapi/asm/signal.h139
-rw-r--r--arch/x86/include/uapi/asm/socket.h (renamed from arch/x86/include/asm/socket.h)0
-rw-r--r--arch/x86/include/uapi/asm/sockios.h (renamed from arch/x86/include/asm/sockios.h)0
-rw-r--r--arch/x86/include/uapi/asm/stat.h (renamed from arch/x86/include/asm/stat.h)0
-rw-r--r--arch/x86/include/uapi/asm/statfs.h (renamed from arch/x86/include/asm/statfs.h)0
-rw-r--r--arch/x86/include/uapi/asm/svm.h132
-rw-r--r--arch/x86/include/uapi/asm/swab.h (renamed from arch/x86/include/asm/swab.h)0
-rw-r--r--arch/x86/include/uapi/asm/termbits.h (renamed from arch/x86/include/asm/termbits.h)0
-rw-r--r--arch/x86/include/uapi/asm/termios.h (renamed from arch/x86/include/asm/termios.h)0
-rw-r--r--arch/x86/include/uapi/asm/types.h (renamed from arch/x86/include/asm/types.h)0
-rw-r--r--arch/x86/include/uapi/asm/ucontext.h (renamed from arch/x86/include/asm/ucontext.h)0
-rw-r--r--arch/x86/include/uapi/asm/unistd.h17
-rw-r--r--arch/x86/include/uapi/asm/vm86.h129
-rw-r--r--arch/x86/include/uapi/asm/vmx.h109
-rw-r--r--arch/x86/include/uapi/asm/vsyscall.h17
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c209
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c7
-rw-r--r--arch/x86/kernel/crash.c32
-rw-r--r--arch/x86/kernel/entry_32.S1
-rw-r--r--arch/x86/kernel/entry_64.S3
-rw-r--r--arch/x86/kernel/irqinit.c40
-rw-r--r--arch/x86/kernel/kvm.c20
-rw-r--r--arch/x86/kernel/kvmclock.c88
-rw-r--r--arch/x86/kernel/pvclock.c143
-rw-r--r--arch/x86/kernel/setup.c8
-rw-r--r--arch/x86/kernel/signal.c29
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c110
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/emulate.c5
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c65
-rw-r--r--arch/x86/kvm/paging_tmpl.h115
-rw-r--r--arch/x86/kvm/svm.c48
-rw-r--r--arch/x86/kvm/trace.h63
-rw-r--r--arch/x86/kvm/vmx.c203
-rw-r--r--arch/x86/kvm/x86.c548
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/fault.c23
-rw-r--r--arch/x86/mm/init_64.c4
-rw-r--r--arch/x86/mm/pgtable.c10
-rw-r--r--arch/x86/pci/Makefile1
-rw-r--r--arch/x86/pci/acpi.c46
-rw-r--r--arch/x86/pci/common.c35
-rw-r--r--arch/x86/pci/numachip.c129
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c2
-rw-r--r--arch/x86/platform/iris/iris.c67
-rw-r--r--arch/x86/syscalls/syscall_32.tbl3
-rw-r--r--arch/x86/syscalls/syscall_64.tbl5
-rw-r--r--arch/x86/um/Kconfig3
-rw-r--r--arch/x86/um/asm/ptrace.h1
-rw-r--r--arch/x86/um/signal.c9
-rw-r--r--arch/x86/um/sys_call_table_32.c1
-rw-r--r--arch/x86/um/sys_call_table_64.c1
-rw-r--r--arch/x86/vdso/vclock_gettime.c81
-rw-r--r--arch/x86/vdso/vgetcpu.c11
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--arch/x86/xen/enlighten.c109
-rw-r--r--arch/x86/xen/mmu.c17
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/Kconfig23
-rw-r--r--arch/xtensa/Kconfig.debug22
-rw-r--r--arch/xtensa/Makefile20
-rw-r--r--arch/xtensa/boot/Makefile25
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile26
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile26
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile14
-rw-r--r--arch/xtensa/boot/dts/lx60.dts11
-rw-r--r--arch/xtensa/boot/dts/ml605.dts11
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi26
-rw-r--r--arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi18
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi56
-rw-r--r--arch/xtensa/include/asm/atomic.h271
-rw-r--r--arch/xtensa/include/asm/barrier.h6
-rw-r--r--arch/xtensa/include/asm/bitops.h127
-rw-r--r--arch/xtensa/include/asm/bootparam.h20
-rw-r--r--arch/xtensa/include/asm/cacheasm.h1
-rw-r--r--arch/xtensa/include/asm/cacheflush.h3
-rw-r--r--arch/xtensa/include/asm/checksum.h19
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h74
-rw-r--r--arch/xtensa/include/asm/current.h2
-rw-r--r--arch/xtensa/include/asm/delay.h7
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h6
-rw-r--r--arch/xtensa/include/asm/elf.h10
-rw-r--r--arch/xtensa/include/asm/highmem.h1
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h55
-rw-r--r--arch/xtensa/include/asm/mmu.h2
-rw-r--r--arch/xtensa/include/asm/mmu_context.h2
-rw-r--r--arch/xtensa/include/asm/nommu.h3
-rw-r--r--arch/xtensa/include/asm/nommu_context.h2
-rw-r--r--arch/xtensa/include/asm/page.h20
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h2
-rw-r--r--arch/xtensa/include/asm/pci.h2
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h8
-rw-r--r--arch/xtensa/include/asm/platform.h1
-rw-r--r--arch/xtensa/include/asm/processor.h10
-rw-r--r--arch/xtensa/include/asm/prom.h6
-rw-r--r--arch/xtensa/include/asm/ptrace.h6
-rw-r--r--arch/xtensa/include/asm/regs.h5
-rw-r--r--arch/xtensa/include/asm/spinlock.h188
-rw-r--r--arch/xtensa/include/asm/syscall.h11
-rw-r--r--arch/xtensa/include/asm/traps.h23
-rw-r--r--arch/xtensa/include/asm/uaccess.h43
-rw-r--r--arch/xtensa/include/asm/unistd.h1
-rw-r--r--arch/xtensa/include/uapi/asm/signal.h6
-rw-r--r--arch/xtensa/kernel/Makefile8
-rw-r--r--arch/xtensa/kernel/align.S4
-rw-r--r--arch/xtensa/kernel/asm-offsets.c5
-rw-r--r--arch/xtensa/kernel/coprocessor.S25
-rw-r--r--arch/xtensa/kernel/entry.S67
-rw-r--r--arch/xtensa/kernel/head.S21
-rw-r--r--arch/xtensa/kernel/irq.c132
-rw-r--r--arch/xtensa/kernel/module.c2
-rw-r--r--arch/xtensa/kernel/platform.c1
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/ptrace.c3
-rw-r--r--arch/xtensa/kernel/setup.c279
-rw-r--r--arch/xtensa/kernel/signal.c8
-rw-r--r--arch/xtensa/kernel/syscall.c1
-rw-r--r--arch/xtensa/kernel/time.c7
-rw-r--r--arch/xtensa/kernel/traps.c18
-rw-r--r--arch/xtensa/kernel/vectors.S67
-rw-r--r--arch/xtensa/lib/checksum.S15
-rw-r--r--arch/xtensa/lib/memcopy.S6
-rw-r--r--arch/xtensa/lib/pci-auto.c9
-rw-r--r--arch/xtensa/lib/strncpy_user.S4
-rw-r--r--arch/xtensa/lib/strnlen_user.S1
-rw-r--r--arch/xtensa/lib/usercopy.S1
-rw-r--r--arch/xtensa/mm/cache.c27
-rw-r--r--arch/xtensa/mm/fault.c1
-rw-r--r--arch/xtensa/mm/init.c16
-rw-r--r--arch/xtensa/mm/misc.S51
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/mm/tlb.c9
-rw-r--r--arch/xtensa/platforms/iss/include/platform/serial.h15
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall.h7
-rw-r--r--arch/xtensa/platforms/xtfpga/Makefile9
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h69
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/lcd.h20
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/serial.h18
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c76
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c301
-rw-r--r--arch/xtensa/variants/s6000/gpio.c4
-rw-r--r--block/Kconfig1
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c127
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/blk-lib.c26
-rw-r--r--block/blk-settings.c6
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h4
-rw-r--r--block/bsg-lib.c13
-rw-r--r--block/cfq-iosched.c3
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c16
-rw-r--r--block/genhd.c10
-rw-r--r--block/partitions/Kconfig4
-rw-r--r--block/partitions/efi.c7
-rw-r--r--block/partitions/msdos.c21
-rw-r--r--crypto/Kconfig42
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/cast5_generic.c277
-rw-r--r--crypto/cast6_generic.c280
-rw-r--r--crypto/cast_common.c290
-rw-r--r--crypto/tcrypt.c29
-rw-r--r--crypto/testmgr.c369
-rw-r--r--crypto/testmgr.h5876
-rw-r--r--crypto/vmac.c47
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/osl.c202
-rw-r--r--drivers/acpi/pci_bind.c12
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_root.c165
-rw-r--r--drivers/amba/tegra-ahb.c2
-rw-r--r--drivers/ata/ahci_platform.c46
-rw-r--r--drivers/ata/ata_piix.c444
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c31
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_arasan_cf.c7
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_cs5536.c32
-rw-r--r--drivers/ata/pata_ep93xx.c6
-rw-r--r--drivers/ata/pata_imx.c2
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c11
-rw-r--r--drivers/ata/pata_mpc52xx.c17
-rw-r--r--drivers/ata/pata_octeon_cf.c423
-rw-r--r--drivers/ata/pata_of_platform.c8
-rw-r--r--drivers/ata/pata_palmld.c4
-rw-r--r--drivers/ata/pata_platform.c24
-rw-r--r--drivers/ata/pata_rdc.c3
-rw-r--r--[-rwxr-xr-x]drivers/ata/sata_dwc_460ex.c1
-rw-r--r--drivers/ata/sata_highbank.c14
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_promise.c15
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/ata/sata_sx4.c14
-rw-r--r--drivers/atm/solos-pci.c186
-rw-r--r--drivers/base/core.c21
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/dma-buf.c2
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/bcma/Kconfig9
-rw-r--r--drivers/bcma/Makefile1
-rw-r--r--drivers/bcma/bcma_private.h10
-rw-r--r--drivers/bcma/driver_chipcommon.c81
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c3
-rw-r--r--drivers/bcma/driver_gpio.c98
-rw-r--r--drivers/bcma/main.c5
-rw-r--r--drivers/block/aoe/aoe.h57
-rw-r--r--drivers/block/aoe/aoeblk.c104
-rw-r--r--drivers/block/aoe/aoechr.c7
-rw-r--r--drivers/block/aoe/aoecmd.c715
-rw-r--r--drivers/block/aoe/aoedev.c243
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/aoe/aoenet.c15
-rw-r--r--drivers/block/cciss.c21
-rw-r--r--drivers/block/drbd/Kconfig10
-rw-r--r--drivers/block/drbd/Makefile2
-rw-r--r--drivers/block/drbd/drbd_actlog.c702
-rw-r--r--drivers/block/drbd/drbd_bitmap.c249
-rw-r--r--drivers/block/drbd/drbd_int.h1365
-rw-r--r--drivers/block/drbd/drbd_interval.c207
-rw-r--r--drivers/block/drbd/drbd_interval.h40
-rw-r--r--drivers/block/drbd/drbd_main.c3781
-rw-r--r--drivers/block/drbd/drbd_nl.c3276
-rw-r--r--drivers/block/drbd/drbd_nla.c55
-rw-r--r--drivers/block/drbd/drbd_nla.h8
-rw-r--r--drivers/block/drbd/drbd_proc.c41
-rw-r--r--drivers/block/drbd/drbd_receiver.c3894
-rw-r--r--drivers/block/drbd/drbd_req.c1574
-rw-r--r--drivers/block/drbd/drbd_req.h187
-rw-r--r--drivers/block/drbd/drbd_state.c1856
-rw-r--r--drivers/block/drbd/drbd_state.h161
-rw-r--r--drivers/block/drbd/drbd_strings.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c1237
-rw-r--r--drivers/block/drbd/drbd_wrappers.h11
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/rbd.c1389
-rw-r--r--drivers/block/rbd_types.h2
-rw-r--r--drivers/block/xen-blkback/blkback.c301
-rw-r--r--drivers/block/xen-blkback/common.h16
-rw-r--r--drivers/block/xen-blkback/xenbus.c23
-rw-r--r--drivers/block/xen-blkfront.c199
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/char/agp/intel-agp.h91
-rw-r--r--drivers/char/agp/intel-gtt.c320
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/ppdev.c6
-rw-r--r--drivers/char/random.c40
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c81
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h5
-rw-r--r--drivers/char/virtio_console.c329
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-nomadik.c1
-rw-r--r--drivers/clk/clk-zynq.c383
-rw-r--r--drivers/clk/mvebu/Kconfig8
-rw-r--r--drivers/clk/mvebu/Makefile3
-rw-r--r--drivers/clk/mvebu/clk-core.c675
-rw-r--r--drivers/clk/mvebu/clk-core.h18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c186
-rw-r--r--drivers/clk/mvebu/clk-cpu.h22
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.c249
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.h22
-rw-r--r--drivers/clk/mvebu/clk.c27
-rw-r--r--drivers/clk/spear/spear1310_clock.c1
-rw-r--r--drivers/clk/ux500/u8500_clk.c11
-rw-r--r--drivers/clocksource/Kconfig17
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/nomadik-mtu.c (renamed from arch/arm/plat-nomadik/timer.c)19
-rw-r--r--drivers/clocksource/time-armada-370-xp.c11
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/nx/nx-842.c20
-rw-r--r--drivers/crypto/nx/nx.c1
-rw-r--r--drivers/crypto/omap-aes.c2
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c1
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/talitos.c3
-rw-r--r--drivers/crypto/tegra-aes.c10
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c3
-rw-r--r--drivers/dma/dmatest.c49
-rw-r--r--drivers/dma/mv_xor.c429
-rw-r--r--drivers/dma/mv_xor.h36
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/ste_dma40_ll.c2
-rw-r--r--drivers/edac/Kconfig33
-rw-r--r--drivers/edac/Makefile5
-rw-r--r--drivers/edac/octeon_edac-l2c.c208
-rw-r--r--drivers/edac/octeon_edac-lmc.c186
-rw-r--r--drivers/edac/octeon_edac-pc.c143
-rw-r--r--drivers/edac/octeon_edac-pci.c111
-rw-r--r--drivers/eisa/eisa.ids4
-rw-r--r--drivers/extcon/extcon-arizona.c1
-rw-r--r--drivers/extcon/extcon-class.c2
-rw-r--r--drivers/extcon/extcon-max77693.c36
-rw-r--r--drivers/extcon/extcon-max8997.c28
-rw-r--r--drivers/firewire/init_ohci1394_dma.c4
-rw-r--r--drivers/firewire/net.c15
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firewire/sbp2.c2
-rw-r--r--drivers/firmware/dmi_scan.c78
-rw-r--r--drivers/firmware/efivars.c511
-rw-r--r--drivers/gpio/Kconfig18
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-da9052.c6
-rw-r--r--drivers/gpio/gpio-ich.c1
-rw-r--r--drivers/gpio/gpio-mvebu.c17
-rw-r--r--drivers/gpio/gpio-samsung.c68
-rw-r--r--drivers/gpio/gpio-tps6586x.c9
-rw-r--r--drivers/gpio/gpio-twl4030.c12
-rw-r--r--drivers/gpio/gpio-viperboard.c517
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c13
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c161
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c (renamed from drivers/gpu/drm/drm_dp_i2c_helper.c)146
-rw-r--r--drivers/gpu/drm/drm_edid.c48
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c76
-rw-r--r--drivers/gpu/drm/drm_hashtab.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c120
-rw-r--r--drivers/gpu/drm/drm_mm.c41
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c37
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig30
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c115
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2001
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c200
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c495
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c435
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1870
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c59
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h85
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2060
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c855
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c324
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c376
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c10
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c365
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c66
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c98
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c136
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h480
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c333
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c420
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c98
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h312
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c763
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c62
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1091
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1968
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c961
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h123
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c227
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c90
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c671
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c318
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h37
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c128
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c101
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile38
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c884
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c190
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c184
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h225
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c261
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c764
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c136
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c321
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2547
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c403
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h120
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c530
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2141
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c149
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c220
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c769
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h131
-rw-r--r--drivers/gpu/drm/radeon/ni.c361
-rw-r--r--drivers/gpu/drm/radeon/nid.h86
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r600.c480
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c386
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h86
-rw-r--r--drivers/gpu/drm/radeon/radeon.h41
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c40
-rw-r--r--drivers/gpu/drm/radeon/rv515.c122
-rw-r--r--drivers/gpu/drm/radeon/rv770.c31
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h71
-rw-r--r--drivers/gpu/drm/radeon/si.c361
-rw-r--r--drivers/gpu/drm/radeon/sid.h119
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig23
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/dc.c833
-rw-r--r--drivers/gpu/drm/tegra/dc.h388
-rw-r--r--drivers/gpu/drm/tegra/drm.c115
-rw-r--r--drivers/gpu/drm/tegra/drm.h216
-rw-r--r--drivers/gpu/drm/tegra/fb.c56
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1321
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h575
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/drm/tegra/output.c272
-rw-r--r--drivers/gpu/drm/tegra/rgb.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c321
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c51
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h909
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c274
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c917
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2019
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c893
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/hid/Kconfig11
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c61
-rw-r--r--drivers/hid/hid-icade.c259
-rw-r--r--drivers/hid/hid-ids.h19
-rw-r--r--drivers/hid/hid-input.c97
-rw-r--r--drivers/hid/hid-multitouch.c109
-rw-r--r--drivers/hid/hid-picolcd_cir.c2
-rw-r--r--drivers/hid/hid-roccat-isku.c44
-rw-r--r--drivers/hid/hid-roccat-isku.h78
-rw-r--r--drivers/hid/hid-roccat-koneplus.c348
-rw-r--r--drivers/hid/hid-roccat-koneplus.h101
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c237
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h16
-rw-r--r--drivers/hid/hid-roccat-lua.c227
-rw-r--r--drivers/hid/hid-roccat-lua.h29
-rw-r--r--drivers/hid/hid-roccat-pyra.c342
-rw-r--r--drivers/hid/hid-roccat-pyra.h24
-rw-r--r--drivers/hid/hid-roccat-savu.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c36
-rw-r--r--drivers/hid/hidraw.c16
-rw-r--r--drivers/hid/i2c-hid/Kconfig18
-rw-r--r--drivers/hid/i2c-hid/Makefile5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c979
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/usbhid/hiddev.c10
-rw-r--r--drivers/hwmon/Kconfig8
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/hwmon-vid.c10
-rw-r--r--drivers/hwmon/hwmon.c26
-rw-r--r--drivers/hwmon/it87.c918
-rw-r--r--drivers/hwmon/lm73.c16
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c2
-rw-r--r--drivers/hwmon/vexpress.c229
-rw-r--r--drivers/hwmon/w83627ehf.c99
-rw-r--r--drivers/hwmon/w83627hf.c81
-rw-r--r--drivers/i2c/busses/Kconfig20
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c8
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c10
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c8
-rw-r--r--drivers/i2c/busses/i2c-amd756.c7
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c7
-rw-r--r--drivers/i2c/busses/i2c-at91.c348
-rw-r--r--drivers/i2c/busses/i2c-au1550.c6
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c300
-rw-r--r--drivers/i2c/busses/i2c-cpm.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c6
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c6
-rw-r--r--drivers/i2c/busses/i2c-elektor.c8
-rw-r--r--drivers/i2c/busses/i2c-gpio.c14
-rw-r--r--drivers/i2c/busses/i2c-highlander.c6
-rw-r--r--drivers/i2c/busses/i2c-hydra.c6
-rw-r--r--drivers/i2c/busses/i2c-i801.c41
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c8
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c6
-rw-r--r--drivers/i2c/busses/i2c-isch.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c38
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c18
-rw-r--r--drivers/i2c/busses/i2c-mxs.c8
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c12
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c14
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c8
-rw-r--r--drivers/i2c/busses/i2c-ocores.c170
-rw-r--r--drivers/i2c/busses/i2c-octeon.c10
-rw-r--r--drivers/i2c/busses/i2c-omap.c232
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c6
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c6
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c8
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c6
-rw-r--r--drivers/i2c/busses/i2c-piix4.c37
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c6
-rw-r--r--drivers/i2c/busses/i2c-pnx.c6
-rw-r--r--drivers/i2c/busses/i2c-powermac.c16
-rw-r--r--drivers/i2c/busses/i2c-puv3.c6
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c12
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c215
-rw-r--r--drivers/i2c/busses/i2c-s6000.c8
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c8
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c152
-rw-r--r--drivers/i2c/busses/i2c-sirf.c8
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c4
-rw-r--r--drivers/i2c/busses/i2c-sis630.c8
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c8
-rw-r--r--drivers/i2c/busses/i2c-via.c6
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c480
-rw-r--r--drivers/i2c/busses/i2c-xiic.c8
-rw-r--r--drivers/i2c/busses/i2c-xlr.c6
-rw-r--r--drivers/i2c/busses/scx200_acb.c16
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c153
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c8
-rw-r--r--drivers/iio/adc/Kconfig14
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c260
-rw-r--r--drivers/iio/adc/viperboard_adc.c181
-rw-r--r--drivers/infiniband/core/cma.c9
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c797
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c210
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h33
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c20
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c10
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c27
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/user.h12
-rw-r--r--drivers/infiniband/hw/nes/nes.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c32
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c42
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c314
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c178
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/gameport/emu10k1-gp.c6
-rw-r--r--drivers/input/gameport/fm801-gp.c6
-rw-r--r--drivers/input/input-mt.c4
-rw-r--r--drivers/input/input.c181
-rw-r--r--drivers/input/joystick/as5011.c29
-rw-r--r--drivers/input/joystick/maplecontrol.c6
-rw-r--r--drivers/input/joystick/walkera0701.c7
-rw-r--r--drivers/input/joystick/xpad.c33
-rw-r--r--drivers/input/keyboard/Kconfig5
-rw-r--r--drivers/input/keyboard/adp5520-keys.c6
-rw-r--r--drivers/input/keyboard/adp5588-keys.c18
-rw-r--r--drivers/input/keyboard/adp5589-keys.c21
-rw-r--r--drivers/input/keyboard/bf54x-keys.c6
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c4
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c6
-rw-r--r--drivers/input/keyboard/gpio_keys.c90
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c26
-rw-r--r--drivers/input/keyboard/hilkbd.c10
-rw-r--r--drivers/input/keyboard/imx_keypad.c9
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c6
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c6
-rw-r--r--drivers/input/keyboard/lm8323.c6
-rw-r--r--drivers/input/keyboard/lm8333.c6
-rw-r--r--drivers/input/keyboard/locomokbd.c8
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c8
-rw-r--r--drivers/input/keyboard/matrix_keypad.c129
-rw-r--r--drivers/input/keyboard/max7359_keypad.c6
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c6
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c12
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c38
-rw-r--r--drivers/input/keyboard/omap-keypad.c6
-rw-r--r--drivers/input/keyboard/omap4-keypad.c10
-rw-r--r--drivers/input/keyboard/opencores-kbd.c6
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c10
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c6
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c6
-rw-r--r--drivers/input/keyboard/qt1070.c8
-rw-r--r--drivers/input/keyboard/qt2160.c31
-rw-r--r--drivers/input/keyboard/samsung-keypad.c109
-rw-r--r--drivers/input/keyboard/sh_keysc.c6
-rw-r--r--drivers/input/keyboard/spear-keyboard.c98
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c142
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c6
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c8
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c179
-rw-r--r--drivers/input/keyboard/tegra-kbc.c16
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c6
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c8
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c6
-rw-r--r--drivers/input/matrix-keymap.c23
-rw-r--r--drivers/input/misc/88pm80x_onkey.c6
-rw-r--r--drivers/input/misc/88pm860x_onkey.c6
-rw-r--r--drivers/input/misc/Kconfig39
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/ab8500-ponkey.c6
-rw-r--r--drivers/input/misc/ad714x-i2c.c6
-rw-r--r--drivers/input/misc/ad714x-spi.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c6
-rw-r--r--drivers/input/misc/adxl34x-spi.c6
-rw-r--r--drivers/input/misc/arizona-haptics.c255
-rw-r--r--drivers/input/misc/bfin_rotary.c6
-rw-r--r--drivers/input/misc/bma150.c28
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c6
-rw-r--r--drivers/input/misc/cobalt_btns.c6
-rw-r--r--drivers/input/misc/da9052_onkey.c28
-rw-r--r--drivers/input/misc/da9055_onkey.c171
-rw-r--r--drivers/input/misc/dm355evm_keys.c6
-rw-r--r--drivers/input/misc/gp2ap002a00f.c8
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c6
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c6
-rw-r--r--drivers/input/misc/kxtj9.c16
-rw-r--r--drivers/input/misc/m68kspkr.c6
-rw-r--r--drivers/input/misc/max8925_onkey.c6
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c6
-rw-r--r--drivers/input/misc/mma8450.c6
-rw-r--r--drivers/input/misc/mpu3050.c8
-rw-r--r--drivers/input/misc/pcap_keys.c6
-rw-r--r--drivers/input/misc/pcf50633-input.c6
-rw-r--r--drivers/input/misc/pcf8574_keypad.c6
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c6
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c6
-rw-r--r--drivers/input/misc/pwm-beeper.c20
-rw-r--r--drivers/input/misc/rb532_button.c6
-rw-r--r--drivers/input/misc/retu-pwrbutton.c99
-rw-r--r--drivers/input/misc/rotary_encoder.c9
-rw-r--r--drivers/input/misc/sgi_btns.c6
-rw-r--r--drivers/input/misc/sparcspkr.c14
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c3
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c6
-rw-r--r--drivers/input/misc/wistron_btns.c20
-rw-r--r--drivers/input/misc/wm831x-on.c11
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/alps.c10
-rw-r--r--drivers/input/mouse/gpio_mouse.c6
-rw-r--r--drivers/input/mouse/maplemouse.c6
-rw-r--r--drivers/input/mouse/navpoint.c6
-rw-r--r--drivers/input/mouse/pxa930_trkball.c6
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/serio/Kconfig9
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/altera_ps2.c6
-rw-r--r--drivers/input/serio/ambakmi.c6
-rw-r--r--drivers/input/serio/arc_ps2.c274
-rw-r--r--drivers/input/serio/ct82c710.c6
-rw-r--r--drivers/input/serio/gscps2.c6
-rw-r--r--drivers/input/serio/hil_mlc.c13
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h9
-rw-r--r--drivers/input/serio/i8042.c6
-rw-r--r--drivers/input/serio/maceps2.c8
-rw-r--r--drivers/input/serio/pcips2.c6
-rw-r--r--drivers/input/serio/q40kbd.c6
-rw-r--r--drivers/input/serio/rpckbd.c6
-rw-r--r--drivers/input/serio/sa1111ps2.c12
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/input/serio/xilinx_ps2.c8
-rw-r--r--drivers/input/tablet/wacom_sys.c58
-rw-r--r--drivers/input/tablet/wacom_wac.c32
-rw-r--r--drivers/input/tablet/wacom_wac.h2
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c8
-rw-r--r--drivers/input/touchscreen/Kconfig18
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c6
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c6
-rw-r--r--drivers/input/touchscreen/ads7846.c10
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c6
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c6
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c8
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c125
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c19
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c6
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c6
-rw-r--r--drivers/input/touchscreen/da9034-ts.c6
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c69
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c28
-rw-r--r--drivers/input/touchscreen/eeti_ts.c6
-rw-r--r--drivers/input/touchscreen/egalax_ts.c8
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c479
-rw-r--r--drivers/input/touchscreen/htcpen.c6
-rw-r--r--drivers/input/touchscreen/ili210x.c6
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c14
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c6
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c6
-rw-r--r--drivers/input/touchscreen/max11801_ts.c8
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c4
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c6
-rw-r--r--drivers/input/touchscreen/mms114.c68
-rw-r--r--drivers/input/touchscreen/pcap_ts.c6
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c6
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c6
-rw-r--r--drivers/input/touchscreen/st1232.c8
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c133
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c398
-rw-r--r--drivers/input/touchscreen/ti_tscadc.c486
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c6
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c4
-rw-r--r--drivers/input/touchscreen/tsc2005.c8
-rw-r--r--drivers/input/touchscreen/tsc2007.c6
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c8
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c6
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c6
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c12
-rw-r--r--drivers/iommu/amd_iommu.c196
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/intel-iommu.c42
-rw-r--r--drivers/iommu/omap-iommu.c68
-rw-r--r--drivers/iommu/omap-iommu.h3
-rw-r--r--drivers/iommu/omap-iommu2.c36
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c6
-rw-r--r--drivers/irqchip/Makefile7
-rw-r--r--drivers/irqchip/spear-shirq.c316
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c2
-rw-r--r--drivers/isdn/hisax/isar.c2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c3
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-triggers.c25
-rw-r--r--drivers/leds/leds-88pm860x.c9
-rw-r--r--drivers/leds/leds-adp5520.c4
-rw-r--r--drivers/leds/leds-bd2802.c10
-rw-r--r--drivers/leds/leds-clevo-mail.c11
-rw-r--r--drivers/leds/leds-cobalt-qube.c11
-rw-r--r--drivers/leds/leds-cobalt-raq.c11
-rw-r--r--drivers/leds/leds-da903x.c10
-rw-r--r--drivers/leds/leds-fsg.c15
-rw-r--r--drivers/leds/leds-gpio.c38
-rw-r--r--drivers/leds/leds-lm355x.c4
-rw-r--r--drivers/leds/leds-lm3642.c12
-rw-r--r--drivers/leds/leds-lp3944.c2
-rw-r--r--drivers/leds/leds-lp5521.c13
-rw-r--r--drivers/leds/leds-lp5523.c24
-rw-r--r--drivers/leds/leds-lt3593.c20
-rw-r--r--drivers/leds/leds-net48xx.c2
-rw-r--r--drivers/leds/leds-netxbig.c2
-rw-r--r--drivers/leds/leds-ns2.c36
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-pwm.c2
-rw-r--r--drivers/leds/leds-rb532.c2
-rw-r--r--drivers/leds/leds-renesas-tpu.c25
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-wm8350.c4
-rw-r--r--drivers/leds/leds-wrap.c2
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c2
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c14
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c14
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c13
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c13
-rw-r--r--drivers/md/dm-bio-prison.c25
-rw-r--r--drivers/md/dm-bio-prison.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-delay.c5
-rw-r--r--drivers/md/dm-flakey.c21
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-ioctl.c64
-rw-r--r--drivers/md/dm-kcopyd.c18
-rw-r--r--drivers/md/dm-linear.c6
-rw-r--r--drivers/md/dm-raid.c8
-rw-r--r--drivers/md/dm-raid1.c75
-rw-r--r--drivers/md/dm-snap.c90
-rw-r--r--drivers/md/dm-stripe.c20
-rw-r--r--drivers/md/dm-table.c41
-rw-r--r--drivers/md/dm-target.c5
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-thin.c234
-rw-r--r--drivers/md/dm-verity.c25
-rw-r--r--drivers/md/dm-zero.c5
-rw-r--r--drivers/md/dm.c84
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md.c260
-rw-r--r--drivers/md/md.h28
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c16
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h16
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c50
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c20
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-btree.h2
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c16
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c57
-rw-r--r--drivers/media/common/Kconfig7
-rw-r--r--drivers/media/common/b2c2/Kconfig5
-rw-r--r--drivers/media/common/siano/Kconfig18
-rw-r--r--drivers/media/common/siano/Makefile6
-rw-r--r--drivers/media/common/siano/smscoreapi.c2
-rw-r--r--drivers/media/common/siano/smsir.c2
-rw-r--r--drivers/media/common/siano/smsir.h9
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-core/dmxdev.h1
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c10
-rw-r--r--drivers/media/dvb-frontends/cx22700.c4
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/dib9000.h2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c8
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c26
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.h6
-rw-r--r--drivers/media/dvb-frontends/ds3000.c15
-rw-r--r--drivers/media/dvb-frontends/l64781.c4
-rw-r--r--drivers/media/dvb-frontends/mt312.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c6
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c6
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c19
-rw-r--r--drivers/media/dvb-frontends/tda10071.c6
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c1
-rw-r--r--drivers/media/firewire/firedtv.h1
-rw-r--r--drivers/media/i2c/adp1653.c4
-rw-r--r--drivers/media/i2c/adv7183.c13
-rw-r--r--drivers/media/i2c/adv7604.c16
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c14
-rw-r--r--drivers/media/i2c/s5k4ecgx.c2
-rw-r--r--drivers/media/i2c/smiapp-pll.c219
-rw-r--r--drivers/media/i2c/smiapp-pll.h61
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c74
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg-defs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h2
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c88
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c55
-rw-r--r--drivers/media/i2c/vs6624.c13
-rw-r--r--drivers/media/mmc/siano/Kconfig3
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c7
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c1
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c45
-rw-r--r--drivers/media/pci/cx23885/cimax2.c17
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-av.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c8
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-f300.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-ioctl.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-ir.c1
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c1
-rw-r--r--drivers/media/pci/cx23885/netup-init.c1
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c44
-rw-r--r--drivers/media/pci/cx25821/cx25821-biffuncs.h6
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c54
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c47
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c8
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c14
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c7
-rw-r--r--drivers/media/pci/cx88/cx88-core.c12
-rw-r--r--drivers/media/pci/cx88/cx88-input.c8
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c18
-rw-r--r--drivers/media/pci/cx88/cx88.h4
-rw-r--r--drivers/media/pci/dm1105/dm1105.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c6
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c4
-rw-r--r--drivers/media/pci/mantis/mantis_input.c5
-rw-r--r--drivers/media/pci/mantis/mantis_uart.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp1033.c6
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c4
-rw-r--r--drivers/media/pci/ngene/ngene-core.c7
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c26
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c16
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c15
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/pci/ttpci/av7110.h1
-rw-r--r--drivers/media/pci/ttpci/budget-av.c4
-rw-r--r--drivers/media/platform/Kconfig12
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c14
-rw-r--r--drivers/media/platform/coda.c4
-rw-r--r--drivers/media/platform/davinci/Kconfig2
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c8
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c16
-rw-r--r--drivers/media/platform/davinci/isif.c5
-rw-r--r--drivers/media/platform/davinci/vpbe.c10
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c303
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c9
-rw-r--r--drivers/media/platform/davinci/vpif.c8
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c34
-rw-r--r--drivers/media/platform/davinci/vpif_display.c28
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c24
-rw-r--r--drivers/media/platform/mem2mem_testdev.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c16
-rw-r--r--drivers/media/platform/omap/omap_vout.c57
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c2
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c38
-rw-r--r--drivers/media/platform/omap/omap_voutlib.h3
-rw-r--r--drivers/media/platform/omap3isp/isp.c85
-rw-r--r--drivers/media/platform/omap3isp/isp.h5
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c6
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c227
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h10
-rw-r--r--drivers/media/platform/omap3isp/isphist.c8
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c41
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h99
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c5
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h4
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c3
-rw-r--r--drivers/media/platform/s3c-camif/Makefile5
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c1672
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c662
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h393
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c606
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.h269
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-capture.c11
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-m2m.c16
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c14
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c16
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c13
-rw-r--r--drivers/media/platform/soc_camera/Kconfig1
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c14
-rw-r--r--drivers/media/platform/vivi.c8
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-cadet.c3
-rw-r--r--drivers/media/radio/radio-isa.c10
-rw-r--r--drivers/media/radio/radio-sf16fmi.c2
-rw-r--r--drivers/media/radio/radio-tea5764.c4
-rw-r--r--drivers/media/radio/si4713-i2c.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c2
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c33
-rw-r--r--drivers/media/rc/fintek-cir.c6
-rw-r--r--drivers/media/rc/gpio-ir-recv.c2
-rw-r--r--drivers/media/rc/iguanair.c2
-rw-r--r--drivers/media/rc/imon.c40
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c4
-rw-r--r--drivers/media/rc/ir-lirc-codec.c4
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c4
-rw-r--r--drivers/media/rc/ir-nec-decoder.c4
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c14
-rw-r--r--drivers/media/rc/ir-rc5-sz-decoder.c6
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c8
-rw-r--r--drivers/media/rc/ir-rx51.c13
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c4
-rw-r--r--drivers/media/rc/ir-sony-decoder.c17
-rw-r--r--drivers/media/rc/ite-cir.c6
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c2
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c2
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/nuvoton-cir.c13
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-loopback.c2
-rw-r--r--drivers/media/rc/rc-main.c73
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/rc/ttusbir.c2
-rw-r--r--drivers/media/rc/winbond-cir.c113
-rw-r--r--drivers/media/tuners/fc2580.c61
-rw-r--r--drivers/media/tuners/max2165.c2
-rw-r--r--drivers/media/tuners/tua9001.c2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c2
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c5
-rw-r--r--drivers/media/usb/au0828/au0828-video.c16
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c9
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c8
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c14
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c8
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c11
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c16
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c146
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c8
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c15
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c84
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c16
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/gspca.c3
-rw-r--r--drivers/media/usb/gspca/gspca.h2
-rw-r--r--drivers/media/usb/gspca/jeilinj.c6
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k4aa.c6
-rw-r--r--drivers/media/usb/gspca/pac7302.c62
-rw-r--r--drivers/media/usb/gspca/sonixb.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c8
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/siano/Kconfig3
-rw-r--r--drivers/media/usb/sn9c102/sn9c102_core.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c23
-rw-r--r--drivers/media/usb/stk1160/stk1160.h5
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c5
-rw-r--r--drivers/media/usb/tlg2300/pd-dvb.c1
-rw-r--r--drivers/media/usb/tlg2300/pd-video.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c20
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c1
-rw-r--r--drivers/media/usb/usbvision/usbvision.h2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c10
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c10
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c77
-rw-r--r--drivers/media/usb/uvc/uvc_video.c1
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h8
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c3
-rw-r--r--drivers/media/v4l2-core/Kconfig3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c300
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c700
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c40
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c56
-rw-r--r--drivers/message/fusion/mptscsih.c1
-rw-r--r--drivers/message/i2o/README.ioctl12
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/i2o_config.c2
-rw-r--r--drivers/mfd/Kconfig64
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/ab8500-core.c140
-rw-r--r--drivers/mfd/arizona-core.c19
-rw-r--r--drivers/mfd/arizona-irq.c1
-rw-r--r--drivers/mfd/as3711.c217
-rw-r--r--drivers/mfd/da9052-core.c273
-rw-r--r--drivers/mfd/da9052-irq.c288
-rw-r--r--drivers/mfd/db8500-prcmu.c20
-rw-r--r--drivers/mfd/jz4740-adc.c20
-rw-r--r--drivers/mfd/lpc_ich.c16
-rw-r--r--drivers/mfd/max8997.c73
-rw-r--r--drivers/mfd/mc13xxx-core.c94
-rw-r--r--drivers/mfd/mc13xxx-i2c.c22
-rw-r--r--drivers/mfd/mc13xxx-spi.c29
-rw-r--r--drivers/mfd/mc13xxx.h18
-rw-r--r--drivers/mfd/mfd-core.c15
-rw-r--r--drivers/mfd/omap-usb-host.c3
-rw-r--r--drivers/mfd/rc5t583-irq.c2
-rw-r--r--drivers/mfd/retu-mfd.c264
-rw-r--r--drivers/mfd/rtsx_pcr.c1
-rw-r--r--drivers/mfd/sec-irq.c102
-rw-r--r--drivers/mfd/sta2x11-mfd.c536
-rw-r--r--drivers/mfd/stmpe-i2c.c8
-rw-r--r--drivers/mfd/stmpe.c208
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c274
-rw-r--r--drivers/mfd/tps6507x.c21
-rw-r--r--drivers/mfd/tps65090.c312
-rw-r--r--drivers/mfd/tps65217.c12
-rw-r--r--drivers/mfd/tps6586x.c179
-rw-r--r--drivers/mfd/tps65910-irq.c260
-rw-r--r--drivers/mfd/tps65910.c234
-rw-r--r--drivers/mfd/tps80031.c574
-rw-r--r--drivers/mfd/twl-core.c227
-rw-r--r--drivers/mfd/twl4030-irq.c10
-rw-r--r--drivers/mfd/twl4030-madc.c14
-rw-r--r--drivers/mfd/twl4030-power.c124
-rw-r--r--drivers/mfd/twl6030-irq.c4
-rw-r--r--drivers/mfd/twl6040-irq.c205
-rw-r--r--drivers/mfd/twl6040.c (renamed from drivers/mfd/twl6040-core.c)138
-rw-r--r--drivers/mfd/viperboard.c137
-rw-r--r--drivers/mfd/wm5102-tables.c38
-rw-r--r--drivers/mfd/wm8994-core.c52
-rw-r--r--drivers/misc/atmel-ssc.c135
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c34
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c4
-rw-r--r--drivers/mtd/ar7part.c7
-rw-r--r--drivers/mtd/bcm63xxpart.c32
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c16
-rw-r--r--drivers/mtd/cmdlinepart.c91
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/docprobe.c2
-rw-r--r--drivers/mtd/devices/m25p80.c48
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c14
-rw-r--r--drivers/mtd/devices/spear_smi.c23
-rw-r--r--drivers/mtd/devices/sst25l.c10
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c9
-rw-r--r--drivers/mtd/maps/ck804xrom.c6
-rw-r--r--drivers/mtd/maps/esb2rom.c6
-rw-r--r--drivers/mtd/maps/fortunet.c277
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c12
-rw-r--r--drivers/mtd/maps/ichxrom.c8
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c18
-rw-r--r--drivers/mtd/maps/lantiq-flash.c8
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c4
-rw-r--r--drivers/mtd/maps/pci.c6
-rw-r--r--drivers/mtd/maps/physmap_of.c19
-rw-r--r--drivers/mtd/maps/pismo.c18
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c6
-rw-r--r--drivers/mtd/maps/sa1100-flash.c6
-rw-r--r--drivers/mtd/maps/scb2_flash.c8
-rw-r--r--drivers/mtd/maps/sun_uflash.c6
-rw-r--r--drivers/mtd/maps/vmu-flash.c10
-rw-r--r--drivers/mtd/mtd_blkdevs.c51
-rw-r--r--drivers/mtd/mtdoops.c15
-rw-r--r--drivers/mtd/nand/Kconfig36
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/ams-delta.c6
-rw-r--r--drivers/mtd/nand/atmel_nand.c28
-rw-r--r--drivers/mtd/nand/au1550nd.c8
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/Makefile4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h22
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c108
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c413
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c8
-rw-r--r--drivers/mtd/nand/cafe_nand.c12
-rw-r--r--drivers/mtd/nand/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/davinci_nand.c13
-rw-r--r--drivers/mtd/nand/denali.c162
-rw-r--r--drivers/mtd/nand/denali.h5
-rw-r--r--drivers/mtd/nand/denali_dt.c167
-rw-r--r--drivers/mtd/nand/denali_pci.c144
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c73
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c17
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c6
-rw-r--r--drivers/mtd/nand/fsl_upm.c8
-rw-r--r--drivers/mtd/nand/fsmc_nand.c106
-rw-r--r--drivers/mtd/nand/gpio.c34
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c10
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c41
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c14
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c6
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c8
-rw-r--r--drivers/mtd/nand/mxc_nand.c12
-rw-r--r--drivers/mtd/nand/nand_base.c114
-rw-r--r--drivers/mtd/nand/nandsim.c191
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/nomadik_nand.c235
-rw-r--r--drivers/mtd/nand/nuc900_nand.c6
-rw-r--r--drivers/mtd/nand/omap2.c3
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/pasemi_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c9
-rw-r--r--drivers/mtd/nand/sh_flctl.c306
-rw-r--r--drivers/mtd/nand/sharpsl.c6
-rw-r--r--drivers/mtd/nand/socrates_nand.c6
-rw-r--r--drivers/mtd/ofpart.c5
-rw-r--r--drivers/mtd/onenand/generic.c6
-rw-r--r--drivers/mtd/onenand/omap2.c8
-rw-r--r--drivers/mtd/onenand/samsung.c4
-rw-r--r--drivers/mtd/tests/mtd_nandbiterrs.c73
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c6
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c171
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c152
-rw-r--r--drivers/mtd/tests/mtd_readtest.c44
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c88
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c44
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c124
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c73
-rw-r--r--drivers/mtd/ubi/attach.c23
-rw-r--r--drivers/mtd/ubi/build.c12
-rw-r--r--drivers/mtd/ubi/debug.c34
-rw-r--r--drivers/mtd/ubi/debug.h57
-rw-r--r--drivers/mtd/ubi/fastmap.c6
-rw-r--r--drivers/mtd/ubi/gluebi.c28
-rw-r--r--drivers/mtd/ubi/io.c14
-rw-r--r--drivers/mtd/ubi/ubi.h40
-rw-r--r--drivers/mtd/ubi/upd.c6
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/vtbl.c2
-rw-r--r--drivers/mtd/ubi/wl.c7
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c469
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h459
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c59
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h20
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig24
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c228
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2847
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c28
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c14
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c18
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/cpts.h1
-rw-r--r--drivers/net/sungem_phy.c8
-rw-r--r--drivers/net/tun.c118
-rw-r--r--drivers/net/usb/cdc_ether.c45
-rw-r--r--drivers/net/usb/cdc_ncm.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c15
-rw-r--r--drivers/net/usb/usbnet.c25
-rw-r--r--drivers/net/virtio_net.c48
-rw-r--r--drivers/net/vxlan.c6
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c1
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h3
-rw-r--r--drivers/net/wimax/i2400m/usb.c6
-rw-r--r--drivers/net/wireless/Kconfig6
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c8
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h4
-rw-r--r--drivers/net/wireless/iwlegacy/common.c10
-rw-r--r--drivers/net/wireless/iwlegacy/common.h12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c2
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/base.c146
-rw-r--r--drivers/of/fdt.c10
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/hotplug/Kconfig11
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c252
-rw-r--r--drivers/pci/ioapic.c2
-rw-r--r--drivers/pci/iov.c87
-rw-r--r--drivers/pci/irq.c10
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/pci-driver.c73
-rw-r--r--drivers/pci/pci-stub.c2
-rw-r--r--drivers/pci/pci-sysfs.c155
-rw-r--r--drivers/pci/pci.c48
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h5
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c22
-rw-r--r--drivers/pci/pcie/aspm.c18
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c20
-rw-r--r--drivers/pci/probe.c42
-rw-r--r--drivers/pci/quirks.c46
-rw-r--r--drivers/pci/remove.c36
-rw-r--r--drivers/pci/rom.c11
-rw-r--r--drivers/pci/setup-bus.c22
-rw-r--r--drivers/pci/xen-pcifront.c5
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c22
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c568
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c3
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/power/Kconfig25
-rw-r--r--drivers/power/Makefile5
-rw-r--r--drivers/power/ab8500_bmdata.c521
-rw-r--r--drivers/power/ab8500_btemp.c77
-rw-r--r--drivers/power/ab8500_charger.c84
-rw-r--r--drivers/power/ab8500_fg.c82
-rw-r--r--drivers/power/abx500_chargalg.c56
-rw-r--r--drivers/power/avs/smartreflex.c56
-rw-r--r--drivers/power/bq2415x_charger.c1670
-rw-r--r--drivers/power/bq27x00_battery.c8
-rw-r--r--drivers/power/charger-manager.c38
-rw-r--r--drivers/power/da9052-battery.c44
-rw-r--r--drivers/power/ds2782_battery.c4
-rw-r--r--drivers/power/generic-adc-battery.c5
-rw-r--r--drivers/power/jz4740-battery.c45
-rw-r--r--drivers/power/lp8788-charger.c75
-rw-r--r--drivers/power/max17042_battery.c3
-rw-r--r--drivers/power/max8925_power.c51
-rw-r--r--drivers/power/power_supply_core.c96
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/gpio-poweroff.c129
-rw-r--r--drivers/power/rx51_battery.c251
-rw-r--r--drivers/power/twl4030_charger.c12
-rw-r--r--drivers/pwm/Kconfig39
-rw-r--r--drivers/pwm/Makefile5
-rw-r--r--drivers/pwm/core.c29
-rw-r--r--drivers/pwm/pwm-imx.c2
-rw-r--r--drivers/pwm/pwm-lpc32xx.c23
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-spear.c276
-rw-r--r--drivers/pwm/pwm-tiecap.c48
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c62
-rw-r--r--drivers/pwm/pwm-tipwmss.c139
-rw-r--r--drivers/pwm/pwm-tipwmss.h39
-rw-r--r--drivers/pwm/pwm-twl-led.c344
-rw-r--r--drivers/pwm/pwm-twl.c359
-rw-r--r--drivers/pwm/pwm-twl6030.c184
-rw-r--r--drivers/pwm/pwm-vt8500.c98
-rw-r--r--drivers/regulator/88pm8607.c6
-rw-r--r--drivers/regulator/Kconfig54
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/aat2870-regulator.c4
-rw-r--r--drivers/regulator/ab3100.c6
-rw-r--r--drivers/regulator/ab8500.c12
-rw-r--r--drivers/regulator/ad5398.c6
-rw-r--r--drivers/regulator/anatop-regulator.c34
-rw-r--r--drivers/regulator/arizona-ldo1.c136
-rw-r--r--drivers/regulator/arizona-micsupp.c8
-rw-r--r--drivers/regulator/as3711-regulator.c369
-rw-r--r--drivers/regulator/core.c42
-rw-r--r--drivers/regulator/da903x.c6
-rw-r--r--drivers/regulator/da9052-regulator.c16
-rw-r--r--drivers/regulator/da9055-regulator.c641
-rw-r--r--drivers/regulator/db8500-prcmu.c6
-rw-r--r--drivers/regulator/dbx500-prcmu.c4
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/fan53555.c6
-rw-r--r--drivers/regulator/fixed.c6
-rw-r--r--drivers/regulator/gpio-regulator.c112
-rw-r--r--drivers/regulator/isl6271a-regulator.c6
-rw-r--r--drivers/regulator/lp3971.c8
-rw-r--r--drivers/regulator/lp3972.c8
-rw-r--r--drivers/regulator/lp872x.c4
-rw-r--r--drivers/regulator/lp8788-buck.c24
-rw-r--r--drivers/regulator/lp8788-ldo.c25
-rw-r--r--drivers/regulator/max1586.c50
-rw-r--r--drivers/regulator/max77686.c170
-rw-r--r--drivers/regulator/max8649.c6
-rw-r--r--drivers/regulator/max8660.c6
-rw-r--r--drivers/regulator/max8907-regulator.c6
-rw-r--r--drivers/regulator/max8925-regulator.c78
-rw-r--r--drivers/regulator/max8952.c6
-rw-r--r--drivers/regulator/max8973-regulator.c505
-rw-r--r--drivers/regulator/max8997.c187
-rw-r--r--drivers/regulator/max8998.c6
-rw-r--r--drivers/regulator/mc13783-regulator.c6
-rw-r--r--drivers/regulator/mc13892-regulator.c6
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c4
-rw-r--r--drivers/regulator/palmas-regulator.c166
-rw-r--r--drivers/regulator/pcap-regulator.c6
-rw-r--r--drivers/regulator/pcf50633-regulator.c182
-rw-r--r--drivers/regulator/rc5t583-regulator.c6
-rw-r--r--drivers/regulator/s2mps11.c16
-rw-r--r--drivers/regulator/s5m8767.c46
-rw-r--r--drivers/regulator/tps51632-regulator.c342
-rw-r--r--drivers/regulator/tps6105x-regulator.c6
-rw-r--r--drivers/regulator/tps62360-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/regulator/tps6507x-regulator.c6
-rw-r--r--drivers/regulator/tps65090-regulator.c254
-rw-r--r--drivers/regulator/tps65217-regulator.c6
-rw-r--r--drivers/regulator/tps6524x-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c189
-rw-r--r--drivers/regulator/tps65910-regulator.c13
-rw-r--r--drivers/regulator/tps65912-regulator.c6
-rw-r--r--drivers/regulator/tps80031-regulator.c788
-rw-r--r--drivers/regulator/twl-regulator.c6
-rw-r--r--drivers/regulator/vexpress.c147
-rw-r--r--drivers/regulator/virtual.c6
-rw-r--r--drivers/regulator/wm831x-dcdc.c31
-rw-r--r--drivers/regulator/wm831x-isink.c6
-rw-r--r--drivers/regulator/wm831x-ldo.c18
-rw-r--r--drivers/regulator/wm8400-regulator.c6
-rw-r--r--drivers/regulator/wm8994-regulator.c6
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c4
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c6
-rw-r--r--drivers/rtc/Kconfig31
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/class.c1
-rw-r--r--drivers/rtc/rtc-da9055.c413
-rw-r--r--drivers/rtc/rtc-davinci.c21
-rw-r--r--drivers/rtc/rtc-dev.c19
-rw-r--r--drivers/rtc/rtc-imxdi.c12
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-omap.c80
-rw-r--r--drivers/rtc/rtc-pcf8523.c326
-rw-r--r--drivers/rtc/rtc-s3c.c54
-rw-r--r--drivers/rtc/rtc-spear.c91
-rw-r--r--drivers/rtc/rtc-test.c14
-rw-r--r--drivers/rtc/rtc-tps65910.c9
-rw-r--r--drivers/rtc/rtc-twl.c32
-rw-r--r--drivers/rtc/rtc-vt8500.c15
-rw-r--r--drivers/s390/block/dasd.c97
-rw-r--r--drivers/s390/block/dasd_devmap.c36
-rw-r--r--drivers/s390/block/dasd_eckd.c92
-rw-r--r--drivers/s390/block/dasd_fba.c23
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c11
-rw-r--r--drivers/s390/char/sclp.h3
-rw-r--r--drivers/s390/char/sclp_cmd.c81
-rw-r--r--drivers/s390/cio/ccwgroup.c26
-rw-r--r--drivers/s390/cio/chsc.c156
-rw-r--r--drivers/s390/cio/device.c11
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c10
-rw-r--r--drivers/s390/cio/qdio_main.c52
-rw-r--r--drivers/s390/cio/qdio_setup.c9
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c68
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c87
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/be2iscsi/be.h7
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c236
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h93
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c124
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1059
-rw-r--r--drivers/scsi/be2iscsi/be_main.h152
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c424
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h23
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c43
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/Kconfig19
-rw-r--r--drivers/scsi/csiostor/Makefile11
-rw-r--r--drivers/scsi/csiostor/csio_attr.c796
-rw-r--r--drivers/scsi/csiostor/csio_defs.h121
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4395
-rw-r--r--drivers/scsi/csiostor/csio_hw.h665
-rw-r--r--drivers/scsi/csiostor/csio_init.c1274
-rw-r--r--drivers/scsi/csiostor/csio_init.h158
-rw-r--r--drivers/scsi/csiostor/csio_isr.c624
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2135
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h255
-rw-r--r--drivers/scsi/csiostor/csio_mb.c1750
-rw-r--r--drivers/scsi/csiostor/csio_mb.h278
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c913
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h141
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2555
-rw-r--r--drivers/scsi/csiostor/csio_scsi.h342
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1632
-rw-r--r--drivers/scsi/csiostor/csio_wr.h512
-rw-r--r--drivers/scsi/csiostor/t4fw_api_stor.h539
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/hptiop.c413
-rw-r--r--drivers/scsi/hptiop.h72
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt3sas/Kconfig67
-rw-r--r--drivers/scsi/mpt3sas/Makefile8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h1164
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h3323
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h560
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h1665
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h346
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h295
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h437
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h56
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4840
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h1139
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1650
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3297
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h418
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h219
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8167
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c2128
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c434
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h193
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h14
-rw-r--r--drivers/scsi/mvsas/mv_sas.h2
-rw-r--r--drivers/scsi/osd/osd_uld.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c72
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c153
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c83
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c6
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_pm.c98
-rw-r--r--drivers/scsi/scsi_sysfs.c11
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--drivers/scsi/scsi_transport_srp.c51
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/virtio_scsi.c26
-rw-r--r--drivers/sh/clk/cpg.c1
-rw-r--r--drivers/spi/Kconfig31
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-altera.c6
-rw-r--r--drivers/spi/spi-ath79.c6
-rw-r--r--drivers/spi/spi-atmel.c21
-rw-r--r--drivers/spi/spi-bcm63xx.c22
-rw-r--r--drivers/spi/spi-bfin-sport.c8
-rw-r--r--drivers/spi/spi-bfin5xx.c4
-rw-r--r--drivers/spi/spi-bitbang.c27
-rw-r--r--drivers/spi/spi-clps711x.c296
-rw-r--r--drivers/spi/spi-coldfire-qspi.c6
-rw-r--r--drivers/spi/spi-davinci.c6
-rw-r--r--drivers/spi/spi-dw-mmio.c6
-rw-r--r--drivers/spi/spi-dw-pci.c6
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-ep93xx.c6
-rw-r--r--drivers/spi/spi-falcon.c6
-rw-r--r--drivers/spi/spi-fsl-espi.c8
-rw-r--r--drivers/spi/spi-fsl-lib.c4
-rw-r--r--drivers/spi/spi-fsl-spi.c14
-rw-r--r--drivers/spi/spi-gpio.c13
-rw-r--r--drivers/spi/spi-imx.c6
-rw-r--r--drivers/spi/spi-mpc512x-psc.c10
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c8
-rw-r--r--drivers/spi/spi-mpc52xx.c8
-rw-r--r--drivers/spi/spi-mxs.c6
-rw-r--r--drivers/spi/spi-nuc900.c6
-rw-r--r--drivers/spi/spi-oc-tiny.c10
-rw-r--r--drivers/spi/spi-octeon.c6
-rw-r--r--drivers/spi/spi-omap-100k.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c76
-rw-r--r--drivers/spi/spi-orion.c27
-rw-r--r--drivers/spi/spi-pl022.c61
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c6
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-rspi.c10
-rw-r--r--drivers/spi/spi-s3c24xx.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c48
-rw-r--r--drivers/spi/spi-sh-hspi.c51
-rw-r--r--drivers/spi/spi-sh-msiof.c6
-rw-r--r--drivers/spi/spi-sh.c6
-rw-r--r--drivers/spi/spi-sirf.c6
-rw-r--r--drivers/spi/spi-stmp.c664
-rw-r--r--drivers/spi/spi-tegra20-sflash.c665
-rw-r--r--drivers/spi/spi-tegra20-slink.c1358
-rw-r--r--drivers/spi/spi-ti-ssp.c6
-rw-r--r--drivers/spi/spi-tle62x0.c6
-rw-r--r--drivers/spi/spi-topcliff-pch.c12
-rw-r--r--drivers/spi/spi-xcomm.c6
-rw-r--r--drivers/spi/spi-xilinx.c6
-rw-r--r--drivers/spi/spi.c106
-rw-r--r--drivers/spi/spidev.c16
-rw-r--r--drivers/ssb/Kconfig9
-rw-r--r--drivers/ssb/Makefile1
-rw-r--r--drivers/ssb/driver_chipcommon.c78
-rw-r--r--drivers/ssb/driver_extif.c43
-rw-r--r--drivers/ssb/driver_gpio.c176
-rw-r--r--drivers/ssb/main.c7
-rw-r--r--drivers/ssb/ssb_private.h17
-rw-r--r--drivers/staging/android/binder.c3
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c2
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c4
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c42
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c2
-rw-r--r--drivers/staging/media/go7007/s2250-board.c13
-rw-r--r--drivers/staging/media/go7007/wis-ov7640.c20
-rw-r--r--drivers/staging/media/go7007/wis-saa7113.c20
-rw-r--r--drivers/staging/media/go7007/wis-saa7115.c20
-rw-r--r--drivers/staging/media/go7007/wis-sony-tuner.c13
-rw-r--r--drivers/staging/media/go7007/wis-tw2804.c13
-rw-r--r--drivers/staging/media/go7007/wis-tw9903.c13
-rw-r--r--drivers/staging/media/go7007/wis-uda1342.c13
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c6
-rw-r--r--drivers/staging/omapdrm/omap_drv.c11
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c84
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c18
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c10
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/Kconfig2
-rw-r--r--drivers/target/sbp/sbp_target.c24
-rw-r--r--drivers/target/target_core_alua.c346
-rw-r--r--drivers/target/target_core_alua.h9
-rw-r--r--drivers/target/target_core_configfs.c705
-rw-r--r--drivers/target/target_core_device.c710
-rw-r--r--drivers/target/target_core_fabric_configfs.c37
-rw-r--r--drivers/target/target_core_fabric_lib.c3
-rw-r--r--drivers/target/target_core_file.c279
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_hba.c9
-rw-r--r--drivers/target/target_core_iblock.c501
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h16
-rw-r--r--drivers/target/target_core_pr.c1225
-rw-r--r--drivers/target/target_core_pr.h10
-rw-r--r--drivers/target/target_core_pscsi.c349
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/target/target_core_rd.c126
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_sbc.c185
-rw-r--r--drivers/target/target_core_spc.c572
-rw-r--r--drivers/target/target_core_stat.c312
-rw-r--r--drivers/target/target_core_tmr.c9
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c675
-rw-r--r--drivers/target/target_core_ua.c20
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/tty/Kconfig1
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvc_vio.c2
-rw-r--r--drivers/tty/ipwireless/setup_protocol.h2
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/bfin_uart.c2
-rw-r--r--drivers/tty/serial/icom.c2
-rw-r--r--drivers/tty/serial/omap-serial.c3
-rw-r--r--drivers/usb/core/driver.c2
-rw-r--r--drivers/usb/gadget/at91_udc.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/host/ehci-orion.c38
-rw-r--r--drivers/usb/musb/musb_core.c12
-rw-r--r--drivers/usb/musb/musb_io.h21
-rw-r--r--drivers/usb/musb/tusb6010.c5
-rw-r--r--drivers/usb/musb/tusb6010_omap.c2
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/storage/realtek_cr.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c83
-rw-r--r--drivers/vfio/vfio.c34
-rw-r--r--drivers/vhost/tcm_vhost.c6
-rw-r--r--drivers/video/Kconfig25
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/backlight/88pm860x_bl.c18
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c7
-rw-r--r--drivers/video/backlight/backlight.c29
-rw-r--r--drivers/video/backlight/corgi_lcd.c20
-rw-r--r--drivers/video/backlight/da903x_bl.c15
-rw-r--r--drivers/video/backlight/da9052_bl.c2
-rw-r--r--drivers/video/backlight/generic_bl.c4
-rw-r--r--drivers/video/backlight/hp680_bl.c4
-rw-r--r--drivers/video/backlight/ili9320.c14
-rw-r--r--drivers/video/backlight/ili9320.h2
-rw-r--r--drivers/video/backlight/jornada720_bl.c31
-rw-r--r--drivers/video/backlight/l4f00242t03.c3
-rw-r--r--drivers/video/backlight/lcd.c8
-rw-r--r--drivers/video/backlight/lm3630_bl.c2
-rw-r--r--drivers/video/backlight/lm3639_bl.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c17
-rw-r--r--drivers/video/backlight/locomolcd.c38
-rw-r--r--drivers/video/backlight/lp855x_bl.c51
-rw-r--r--drivers/video/backlight/max8925_bl.c11
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/pandora_bl.c8
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c8
-rw-r--r--drivers/video/backlight/platform_lcd.c2
-rw-r--r--drivers/video/backlight/s6e63m0.c2
-rw-r--r--drivers/video/backlight/tdo24m.c33
-rw-r--r--drivers/video/backlight/tosa_bl.c7
-rw-r--r--drivers/video/backlight/tosa_lcd.c24
-rw-r--r--drivers/video/backlight/vgg2432a4.c10
-rw-r--r--drivers/video/console/newport_con.c11
-rw-r--r--drivers/video/console/softcursor.c3
-rw-r--r--drivers/video/da8xx-fb.c170
-rw-r--r--drivers/video/exynos/exynos_dp_core.c697
-rw-r--r--drivers/video/exynos/exynos_dp_core.h21
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c77
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h3
-rw-r--r--drivers/video/fsl-diu-fb.c201
-rw-r--r--drivers/video/gxt4500.c15
-rw-r--r--drivers/video/omap/lcdc.c2
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap/sossi.c2
-rw-r--r--drivers/video/omap2/Kconfig7
-rw-r--r--drivers/video/omap2/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c23
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c36
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c34
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c89
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c24
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c45
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c17
-rw-r--r--drivers/video/omap2/displays/panel-taal.c72
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c33
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c20
-rw-r--r--drivers/video/omap2/dss/Kconfig35
-rw-r--r--drivers/video/omap2/dss/Makefile7
-rw-r--r--drivers/video/omap2/dss/apply.c331
-rw-r--r--drivers/video/omap2/dss/core.c72
-rw-r--r--drivers/video/omap2/dss/dispc-compat.c667
-rw-r--r--drivers/video/omap2/dss/dispc-compat.h30
-rw-r--r--drivers/video/omap2/dss/dispc.c1063
-rw-r--r--drivers/video/omap2/dss/display-sysfs.c321
-rw-r--r--drivers/video/omap2/dss/display.c386
-rw-r--r--drivers/video/omap2/dss/dpi.c126
-rw-r--r--drivers/video/omap2/dss/dsi.c247
-rw-r--r--drivers/video/omap2/dss/dss.c101
-rw-r--r--drivers/video/omap2/dss/dss.h124
-rw-r--r--drivers/video/omap2/dss/dss_features.c15
-rw-r--r--drivers/video/omap2/dss/dss_features.h7
-rw-r--r--drivers/video/omap2/dss/hdmi.c159
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c82
-rw-r--r--drivers/video/omap2/dss/manager.c39
-rw-r--r--drivers/video/omap2/dss/output.c90
-rw-r--r--drivers/video/omap2/dss/overlay.c17
-rw-r--r--drivers/video/omap2/dss/rfbi.c23
-rw-r--r--drivers/video/omap2/dss/sdi.c11
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h3
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c11
-rw-r--r--drivers/video/omap2/dss/venc.c11
-rw-r--r--drivers/video/omap2/dss/venc_panel.c19
-rw-r--r--drivers/video/omap2/omapfb/Kconfig1
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c46
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c204
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c4
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h20
-rw-r--r--drivers/video/omap2/vram.c514
-rw-r--r--drivers/video/s3c-fb.c24
-rw-r--r--drivers/video/sh_mipi_dsi.c69
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c74
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h1
-rw-r--r--drivers/video/ssd1307fb.c396
-rw-r--r--drivers/virt/Kconfig1
-rw-r--r--drivers/virt/fsl_hypervisor.c3
-rw-r--r--drivers/virtio/virtio.c30
-rw-r--r--drivers/virtio/virtio_balloon.c7
-rw-r--r--drivers/virtio/virtio_mmio.c30
-rw-r--r--drivers/virtio/virtio_pci.c20
-rw-r--r--drivers/virtio/virtio_ring.c46
-rw-r--r--drivers/w1/masters/Kconfig1
-rw-r--r--drivers/watchdog/Kconfig18
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/at91sam9_wdt.c11
-rw-r--r--drivers/watchdog/ath79_wdt.c13
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/da9052_wdt.c4
-rw-r--r--drivers/watchdog/da9055_wdt.c211
-rw-r--r--drivers/watchdog/davinci_wdt.c11
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/mpcore_wdt.c19
-rw-r--r--drivers/watchdog/omap_wdt.c314
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c6
-rw-r--r--drivers/watchdog/sp5100_tco.c321
-rw-r--r--drivers/watchdog/sp5100_tco.h46
-rw-r--r--drivers/watchdog/sp805_wdt.c11
-rw-r--r--drivers/watchdog/twl4030_wdt.c196
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile7
-rw-r--r--drivers/xen/balloon.c5
-rw-r--r--drivers/xen/privcmd.c72
-rw-r--r--drivers/xen/swiotlb-xen.c25
-rw-r--r--drivers/xen/xen-acpi-pad.c182
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c120
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c1
-rw-r--r--fs/Kconfig5
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/inode.c15
-rw-r--r--fs/affs/file.c18
-rw-r--r--fs/affs/inode.c5
-rw-r--r--fs/attr.c11
-rw-r--r--fs/autofs4/autofs_i.h8
-rw-r--r--fs/autofs4/dev-ioctl.c4
-rw-r--r--fs/autofs4/expire.c9
-rw-r--r--fs/autofs4/inode.c24
-rw-r--r--fs/autofs4/root.c83
-rw-r--r--fs/autofs4/waitq.c5
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/bfs/file.c15
-rw-r--r--fs/binfmt_elf.c4
-rw-r--r--fs/binfmt_em86.c1
-rw-r--r--fs/binfmt_misc.c11
-rw-r--r--fs/binfmt_script.c8
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/acl.c2
-rw-r--r--fs/btrfs/backref.c16
-rw-r--r--fs/btrfs/btrfs_inode.h4
-rw-r--r--fs/btrfs/check-integrity.c31
-rw-r--r--fs/btrfs/compression.c6
-rw-r--r--fs/btrfs/ctree.c229
-rw-r--r--fs/btrfs/ctree.h184
-rw-r--r--fs/btrfs/delayed-inode.c11
-rw-r--r--fs/btrfs/dev-replace.c856
-rw-r--r--fs/btrfs/dev-replace.h44
-rw-r--r--fs/btrfs/dir-item.c59
-rw-r--r--fs/btrfs/disk-io.c142
-rw-r--r--fs/btrfs/disk-io.h4
-rw-r--r--fs/btrfs/extent-tree.c229
-rw-r--r--fs/btrfs/extent_io.c37
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/extent_map.c27
-rw-r--r--fs/btrfs/extent_map.h2
-rw-r--r--fs/btrfs/file-item.c21
-rw-r--r--fs/btrfs/file.c422
-rw-r--r--fs/btrfs/free-space-cache.c51
-rw-r--r--fs/btrfs/inode-map.c5
-rw-r--r--fs/btrfs/inode.c500
-rw-r--r--fs/btrfs/ioctl.c317
-rw-r--r--fs/btrfs/ioctl.h48
-rw-r--r--fs/btrfs/math.h44
-rw-r--r--fs/btrfs/ordered-data.c90
-rw-r--r--fs/btrfs/ordered-data.h9
-rw-r--r--fs/btrfs/print-tree.c3
-rw-r--r--fs/btrfs/reada.c31
-rw-r--r--fs/btrfs/relocation.c40
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/scrub.c1836
-rw-r--r--fs/btrfs/send.c8
-rw-r--r--fs/btrfs/super.c48
-rw-r--r--fs/btrfs/transaction.c170
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c477
-rw-r--r--fs/btrfs/volumes.c966
-rw-r--r--fs/btrfs/volumes.h35
-rw-r--r--fs/btrfs/xattr.c13
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cachefiles/interface.c57
-rw-r--r--fs/cachefiles/internal.h2
-rw-r--r--fs/cachefiles/key.c2
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/cachefiles/rdwr.c114
-rw-r--r--fs/cachefiles/xattr.c2
-rw-r--r--fs/ceph/addr.c60
-rw-r--r--fs/ceph/caps.c18
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/file.c79
-rw-r--r--fs/ceph/inode.c15
-rw-r--r--fs/ceph/mds_client.c11
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/cifs/README2
-rw-r--r--fs/cifs/cifs_debug.h6
-rw-r--r--fs/cifs/cifsacl.c12
-rw-r--r--fs/cifs/cifsfs.c9
-rw-r--r--fs/cifs/connect.c9
-rw-r--r--fs/cifs/readdir.c19
-rw-r--r--fs/configfs/dir.c4
-rw-r--r--fs/dcache.c35
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/kthread.c6
-rw-r--r--fs/ecryptfs/mmap.c12
-rw-r--r--fs/eventfd.c20
-rw-r--r--fs/eventpoll.c50
-rw-r--r--fs/exec.c55
-rw-r--r--fs/exofs/inode.c16
-rw-r--r--fs/exportfs/expfs.c23
-rw-r--r--fs/ext3/dir.c6
-rw-r--r--fs/ext3/inode.c3
-rw-r--r--fs/ext3/super.c3
-rw-r--r--fs/ext4/Kconfig15
-rw-r--r--fs/ext4/Makefile4
-rw-r--r--fs/ext4/acl.c6
-rw-r--r--fs/ext4/dir.c47
-rw-r--r--fs/ext4/ext4.h167
-rw-r--r--fs/ext4/ext4_extents.h40
-rw-r--r--fs/ext4/ext4_jbd2.h7
-rw-r--r--fs/ext4/extents.c502
-rw-r--r--fs/ext4/extents_status.c500
-rw-r--r--fs/ext4/extents_status.h45
-rw-r--r--fs/ext4/file.c346
-rw-r--r--fs/ext4/fsync.c8
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/indirect.c5
-rw-r--r--fs/ext4/inline.c1884
-rw-r--r--fs/ext4/inode.c728
-rw-r--r--fs/ext4/mballoc.c60
-rw-r--r--fs/ext4/migrate.c1
-rw-r--r--fs/ext4/move_extent.c1
-rw-r--r--fs/ext4/namei.c534
-rw-r--r--fs/ext4/page-io.c3
-rw-r--r--fs/ext4/resize.c17
-rw-r--r--fs/ext4/super.c87
-rw-r--r--fs/ext4/symlink.c4
-rw-r--r--fs/ext4/xattr.c110
-rw-r--r--fs/ext4/xattr.h158
-rw-r--r--fs/f2fs/Kconfig53
-rw-r--r--fs/f2fs/Makefile7
-rw-r--r--fs/f2fs/acl.c413
-rw-r--r--fs/f2fs/acl.h57
-rw-r--r--fs/f2fs/checkpoint.c794
-rw-r--r--fs/f2fs/data.c702
-rw-r--r--fs/f2fs/debug.c361
-rw-r--r--fs/f2fs/dir.c672
-rw-r--r--fs/f2fs/f2fs.h1083
-rw-r--r--fs/f2fs/file.c636
-rw-r--r--fs/f2fs/gc.c742
-rw-r--r--fs/f2fs/gc.h117
-rw-r--r--fs/f2fs/hash.c97
-rw-r--r--fs/f2fs/inode.c268
-rw-r--r--fs/f2fs/namei.c503
-rw-r--r--fs/f2fs/node.c1764
-rw-r--r--fs/f2fs/node.h353
-rw-r--r--fs/f2fs/recovery.c375
-rw-r--r--fs/f2fs/segment.c1791
-rw-r--r--fs/f2fs/segment.h618
-rw-r--r--fs/f2fs/super.c657
-rw-r--r--fs/f2fs/xattr.c440
-rw-r--r--fs/f2fs/xattr.h145
-rw-r--r--fs/fat/dir.c5
-rw-r--r--fs/fat/fat.h3
-rw-r--r--fs/fat/inode.c57
-rw-r--r--fs/fat/misc.c13
-rw-r--r--fs/fhandle.c6
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fscache/cache.c8
-rw-r--r--fs/fscache/cookie.c78
-rw-r--r--fs/fscache/internal.h15
-rw-r--r--fs/fscache/object-list.c2
-rw-r--r--fs/fscache/object.c101
-rw-r--r--fs/fscache/operation.c140
-rw-r--r--fs/fscache/page.c195
-rw-r--r--fs/fscache/stats.c17
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/dir.c20
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/fuse_i.h4
-rw-r--r--fs/fuse/inode.c23
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/bmap.c54
-rw-r--r--fs/gfs2/dir.c7
-rw-r--r--fs/gfs2/file.c14
-rw-r--r--fs/gfs2/glock.c40
-rw-r--r--fs/gfs2/glock.h54
-rw-r--r--fs/gfs2/glops.c19
-rw-r--r--fs/gfs2/incore.h6
-rw-r--r--fs/gfs2/inode.c209
-rw-r--r--fs/gfs2/lock_dlm.c20
-rw-r--r--fs/gfs2/ops_fstype.c3
-rw-r--r--fs/gfs2/quota.c10
-rw-r--r--fs/gfs2/rgrp.c139
-rw-r--r--fs/gfs2/rgrp.h3
-rw-r--r--fs/gfs2/trace_gfs2.h2
-rw-r--r--fs/gfs2/xattr.c2
-rw-r--r--fs/hfs/inode.c26
-rw-r--r--fs/hfsplus/bitmap.c13
-rw-r--r--fs/hfsplus/btree.c5
-rw-r--r--fs/hfsplus/extents.c24
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c27
-rw-r--r--fs/hfsplus/super.c15
-rw-r--r--fs/hpfs/file.c20
-rw-r--r--fs/hpfs/hpfs_fn.h1
-rw-r--r--fs/hpfs/inode.c5
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jbd2/journal.c1
-rw-r--r--fs/jbd2/transaction.c43
-rw-r--r--fs/jffs2/nodemgmt.c6
-rw-r--r--fs/jfs/file.c6
-rw-r--r--fs/jfs/inode.c20
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/lockd/clnt4xdr.c8
-rw-r--r--fs/lockd/clntproc.c3
-rw-r--r--fs/lockd/clntxdr.c8
-rw-r--r--fs/lockd/host.c15
-rw-r--r--fs/lockd/mon.c3
-rw-r--r--fs/logfs/inode.c2
-rw-r--r--fs/logfs/readwrite.c10
-rw-r--r--fs/minix/file.c6
-rw-r--r--fs/minix/inode.c17
-rw-r--r--fs/mount.h3
-rw-r--r--fs/namei.c118
-rw-r--r--fs/namespace.c214
-rw-r--r--fs/ncpfs/inode.c4
-rw-r--r--fs/ncpfs/mmap.c2
-rw-r--r--fs/nfs/Makefile2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c1
-rw-r--r--fs/nfs/cache_lib.c1
-rw-r--r--fs/nfs/callback.h4
-rw-r--r--fs/nfs/callback_proc.c17
-rw-r--r--fs/nfs/callback_xdr.c5
-rw-r--r--fs/nfs/client.c9
-rw-r--r--fs/nfs/dir.c28
-rw-r--r--fs/nfs/direct.c17
-rw-r--r--fs/nfs/file.c10
-rw-r--r--fs/nfs/fscache.c1
-rw-r--r--fs/nfs/fscache.h21
-rw-r--r--fs/nfs/idmap.c12
-rw-r--r--fs/nfs/inode.c30
-rw-r--r--fs/nfs/internal.h42
-rw-r--r--fs/nfs/mount_clnt.c7
-rw-r--r--fs/nfs/nfs2xdr.c4
-rw-r--r--fs/nfs/nfs3proc.c6
-rw-r--r--fs/nfs/nfs3xdr.c7
-rw-r--r--fs/nfs/nfs4_fs.h29
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4file.c3
-rw-r--r--fs/nfs/nfs4filelayout.c45
-rw-r--r--fs/nfs/nfs4filelayoutdev.c3
-rw-r--r--fs/nfs/nfs4proc.c821
-rw-r--r--fs/nfs/nfs4session.c552
-rw-r--r--fs/nfs/nfs4session.h142
-rw-r--r--fs/nfs/nfs4state.c143
-rw-r--r--fs/nfs/nfs4super.c1
-rw-r--r--fs/nfs/nfs4xdr.c52
-rw-r--r--fs/nfs/objlayout/objlayout.c11
-rw-r--r--fs/nfs/pnfs.c17
-rw-r--r--fs/nfs/proc.c43
-rw-r--r--fs/nfs/super.c21
-rw-r--r--fs/nfs/write.c34
-rw-r--r--fs/nfsd/fault_inject.c113
-rw-r--r--fs/nfsd/fault_inject.h28
-rw-r--r--fs/nfsd/netns.h66
-rw-r--r--fs/nfsd/nfs2acl.c2
-rw-r--r--fs/nfsd/nfs3acl.c2
-rw-r--r--fs/nfsd/nfs3proc.c6
-rw-r--r--fs/nfsd/nfs3xdr.c47
-rw-r--r--fs/nfsd/nfs4callback.c69
-rw-r--r--fs/nfsd/nfs4proc.c74
-rw-r--r--fs/nfsd/nfs4recover.c561
-rw-r--r--fs/nfsd/nfs4state.c1015
-rw-r--r--fs/nfsd/nfs4xdr.c324
-rw-r--r--fs/nfsd/nfsctl.c100
-rw-r--r--fs/nfsd/nfsd.h36
-rw-r--r--fs/nfsd/nfsfh.c4
-rw-r--r--fs/nfsd/nfssvc.c203
-rw-r--r--fs/nfsd/nfsxdr.c11
-rw-r--r--fs/nfsd/state.h64
-rw-r--r--fs/nfsd/vfs.c52
-rw-r--r--fs/nfsd/xdr4.h15
-rw-r--r--fs/nilfs2/file.c1
-rw-r--r--fs/nilfs2/inode.c24
-rw-r--r--fs/nilfs2/nilfs.h1
-rw-r--r--fs/nilfs2/recovery.c3
-rw-r--r--fs/notify/Makefile2
-rw-r--r--fs/notify/dnotify/dnotify.c4
-rw-r--r--fs/notify/fanotify/Kconfig2
-rw-r--r--fs/notify/fanotify/fanotify.c6
-rw-r--r--fs/notify/fanotify/fanotify_user.c39
-rw-r--r--fs/notify/fdinfo.c179
-rw-r--r--fs/notify/fdinfo.h27
-rw-r--r--fs/notify/group.c47
-rw-r--r--fs/notify/inode_mark.c19
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c4
-rw-r--r--fs/notify/inotify/inotify_user.c36
-rw-r--r--fs/notify/mark.c91
-rw-r--r--fs/notify/notification.c3
-rw-r--r--fs/notify/vfsmount_mark.c14
-rw-r--r--fs/ntfs/file.c16
-rw-r--r--fs/ntfs/inode.c8
-rw-r--r--fs/ntfs/inode.h4
-rw-r--r--fs/ocfs2/extent_map.c12
-rw-r--r--fs/ocfs2/file.c24
-rw-r--r--fs/omfs/file.c22
-rw-r--r--fs/open.c99
-rw-r--r--fs/pnode.h1
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/array.c23
-rw-r--r--fs/proc/base.c176
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/generic.c48
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/namespaces.c185
-rw-r--r--fs/proc/proc_devtree.c6
-rw-r--r--fs/proc/proc_sysctl.c7
-rw-r--r--fs/proc/root.c17
-rw-r--r--fs/proc/self.c59
-rw-r--r--fs/proc/task_mmu.c61
-rw-r--r--fs/pstore/ftrace.c4
-rw-r--r--fs/pstore/inode.c6
-rw-r--r--fs/pstore/ram.c42
-rw-r--r--fs/quota/quota.c4
-rw-r--r--fs/read_write.c42
-rw-r--r--fs/reiserfs/file.c3
-rw-r--r--fs/reiserfs/inode.c15
-rw-r--r--fs/reiserfs/reiserfs.h1
-rw-r--r--fs/seq_file.c4
-rw-r--r--fs/signalfd.c18
-rw-r--r--fs/stat.c16
-rw-r--r--fs/statfs.c9
-rw-r--r--fs/sysfs/mount.c1
-rw-r--r--fs/sysv/file.c5
-rw-r--r--fs/sysv/itree.c17
-rw-r--r--fs/ubifs/debug.c8
-rw-r--r--fs/ubifs/dir.c4
-rw-r--r--fs/udf/inode.c14
-rw-r--r--fs/ufs/inode.c15
-rw-r--r--fs/utimes.c6
-rw-r--r--fs/xattr.c72
-rw-r--r--include/Kbuild3
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/asm-generic/io.h64
-rw-r--r--include/asm-generic/mmu.h6
-rw-r--r--include/asm-generic/pgtable.h136
-rw-r--r--include/crypto/cast5.h6
-rw-r--r--include/crypto/cast6.h6
-rw-r--r--include/crypto/cast_common.h9
-rw-r--r--include/crypto/vmac.h2
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h19
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h39
-rw-r--r--include/drm/drm_hashtab.h14
-rw-r--r--include/drm/drm_mm.h25
-rw-r--r--include/drm/exynos_drm.h26
-rw-r--r--include/drm/intel-gtt.h7
-rw-r--r--include/drm/ttm/ttm_bo_api.h33
-rw-r--r--include/drm/ttm/ttm_bo_driver.h45
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/drm/ttm/ttm_memory.h2
-rw-r--r--include/drm/ttm/ttm_object.h4
-rw-r--r--include/linux/Kbuild5
-rw-r--r--include/linux/acpi.h16
-rw-r--r--include/linux/asn1.h2
-rw-r--r--include/linux/ata_platform.h2
-rw-r--r--include/linux/atmel-ssc.h6
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/backlight.h10
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h9
-rw-r--r--include/linux/binfmts.h7
-rw-r--r--include/linux/blkdev.h28
-rw-r--r--include/linux/bootmem.h3
-rw-r--r--include/linux/bsg-lib.h1
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/osdmap.h1
-rw-r--r--include/linux/ceph/rados.h2
-rw-r--r--include/linux/clk/mvebu.h22
-rw-r--r--include/linux/clk/zynq.h (renamed from arch/arm/mach-vt8500/include/mach/irqs.h)12
-rw-r--r--include/linux/compat.h65
-rw-r--r--include/linux/compiler-gcc4.h12
-rw-r--r--include/linux/compiler-intel.h7
-rw-r--r--include/linux/compiler.h11
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/linux/cred.h19
-rw-r--r--include/linux/dcache.h8
-rw-r--r--include/linux/device-mapper.h55
-rw-r--r--include/linux/device.h7
-rw-r--r--include/linux/dlm.h2
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/linux/dma-buf.h99
-rw-r--r--include/linux/dma-debug.h7
-rw-r--r--include/linux/drbd.h81
-rw-r--r--include/linux/drbd_genl.h378
-rw-r--r--include/linux/drbd_genl_api.h55
-rw-r--r--include/linux/drbd_limits.h90
-rw-r--r--include/linux/drbd_nl.h163
-rw-r--r--include/linux/drbd_tag_magic.h84
-rw-r--r--include/linux/dvb/Kbuild0
-rw-r--r--include/linux/dvb/dmx.h29
-rw-r--r--include/linux/dvb/video.h29
-rw-r--r--include/linux/earlycpio.h17
-rw-r--r--include/linux/efi.h77
-rw-r--r--include/linux/exportfs.h7
-rw-r--r--include/linux/extcon.h50
-rw-r--r--include/linux/f2fs_fs.h413
-rw-r--r--include/linux/fs.h24
-rw-r--r--include/linux/fscache-cache.h71
-rw-r--r--include/linux/fscache.h50
-rw-r--r--include/linux/fsl-diu-fb.h9
-rw-r--r--include/linux/fsnotify_backend.h31
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/genl_magic_func.h422
-rw-r--r--include/linux/genl_magic_struct.h277
-rw-r--r--include/linux/gfp.h6
-rw-r--r--include/linux/hash.h2
-rw-r--r--include/linux/hdlc/Kbuild0
-rw-r--r--include/linux/hid-sensor-ids.h1
-rw-r--r--include/linux/hid.h6
-rw-r--r--include/linux/hsi/Kbuild0
-rw-r--r--include/linux/huge_mm.h34
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/hugetlb_cgroup.h5
-rw-r--r--include/linux/i2c-omap.h2
-rw-r--r--include/linux/i2c/i2c-hid.h35
-rw-r--r--include/linux/i2c/i2c-sh_mobile.h1
-rw-r--r--include/linux/i2c/twl.h73
-rw-r--r--include/linux/idr.h11
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/init.h40
-rw-r--r--include/linux/input.h10
-rw-r--r--include/linux/input/bu21013.h10
-rw-r--r--include/linux/input/mt.h6
-rw-r--r--include/linux/input/ti_am335x_tsc.h23
-rw-r--r--include/linux/input/ti_tscadc.h17
-rw-r--r--include/linux/ipc_namespace.h9
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/irqchip/spear-shirq.h (renamed from arch/arm/plat-spear/include/plat/shirq.h)49
-rw-r--r--include/linux/jbd2.h11
-rw-r--r--include/linux/kernel.h39
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kref.h21
-rw-r--r--include/linux/kvm_host.h53
-rw-r--r--include/linux/leds.h17
-rw-r--r--include/linux/libata.h5
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/lru_cache.h71
-rw-r--r--include/linux/memcontrol.h218
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mempolicy.h21
-rw-r--r--include/linux/mfd/abx500.h34
-rw-r--r--include/linux/mfd/arizona/core.h4
-rw-r--r--include/linux/mfd/arizona/pdata.h6
-rw-r--r--include/linux/mfd/arizona/registers.h17
-rw-r--r--include/linux/mfd/as3711.h126
-rw-r--r--include/linux/mfd/da9052/da9052.h10
-rw-r--r--include/linux/mfd/da9055/core.h2
-rw-r--r--include/linux/mfd/da9055/pdata.h29
-rw-r--r--include/linux/mfd/da9055/reg.h2
-rw-r--r--include/linux/mfd/lp8788.h8
-rw-r--r--include/linux/mfd/max8997-private.h1
-rw-r--r--include/linux/mfd/max8997.h1
-rw-r--r--include/linux/mfd/rc5t583.h3
-rw-r--r--include/linux/mfd/retu.h22
-rw-r--r--include/linux/mfd/sta2x11-mfd.h198
-rw-r--r--include/linux/mfd/stmpe.h4
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h152
-rw-r--r--include/linux/mfd/tps65090.h104
-rw-r--r--include/linux/mfd/tps6586x.h4
-rw-r--r--include/linux/mfd/tps65910.h145
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/mfd/twl6040.h10
-rw-r--r--include/linux/mfd/viperboard.h110
-rw-r--r--include/linux/mfd/wm8994/core.h4
-rw-r--r--include/linux/mfd/wm8994/pdata.h5
-rw-r--r--include/linux/migrate.h46
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx4/device.h22
-rw-r--r--include/linux/mm.h40
-rw-r--r--include/linux/mm_types.h38
-rw-r--r--include/linux/mmzone.h54
-rw-r--r--include/linux/mnt_namespace.h3
-rw-r--r--include/linux/moduleparam.h6
-rw-r--r--include/linux/mtd/blktrans.h4
-rw-r--r--include/linux/mtd/doc2000.h22
-rw-r--r--include/linux/mtd/fsmc.h3
-rw-r--r--include/linux/mtd/gpmi-nand.h68
-rw-r--r--include/linux/mtd/map.h4
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/nand.h11
-rw-r--r--include/linux/mtd/sh_flctl.h14
-rw-r--r--include/linux/namei.h20
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/nfs_fs_sb.h47
-rw-r--r--include/linux/nfs_xdr.h155
-rw-r--r--include/linux/nodemask.h5
-rw-r--r--include/linux/nsproxy.h2
-rw-r--r--include/linux/of.h31
-rw-r--r--include/linux/of_i2c.h12
-rw-r--r--include/linux/of_platform.h1
-rw-r--r--include/linux/omap-dma.h366
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--include/linux/pci.h22
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-rwsem.h91
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/pid_namespace.h13
-rw-r--r--include/linux/platform_data/asoc-s3c.h6
-rw-r--r--include/linux/platform_data/clocksource-nomadik-mtu.h (renamed from arch/arm/plat-nomadik/include/plat/mtu.h)2
-rw-r--r--include/linux/platform_data/crypto-ux500.h2
-rw-r--r--include/linux/platform_data/davinci_asp.h4
-rw-r--r--include/linux/platform_data/dma-mv_xor.h11
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h (renamed from arch/arm/plat-nomadik/include/plat/ste_dma40.h)0
-rw-r--r--include/linux/platform_data/i2c-cbus-gpio.h27
-rw-r--r--include/linux/platform_data/iommu-omap.h9
-rw-r--r--include/linux/platform_data/lp855x.h9
-rw-r--r--include/linux/platform_data/mtd-nomadik-nand.h16
-rw-r--r--include/linux/platform_data/omap-twl4030.h26
-rw-r--r--include/linux/platform_data/serial-omap.h (renamed from arch/arm/plat-omap/include/plat/omap-serial.h)0
-rw-r--r--include/linux/platform_data/spi-clps711x.h21
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h4
-rw-r--r--include/linux/platform_data/ti_am335x_adc.h14
-rw-r--r--include/linux/platform_data/usb-omap.h3
-rw-r--r--include/linux/power/bq2415x_charger.h95
-rw-r--r--include/linux/power/smartreflex.h14
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/proc_fs.h29
-rw-r--r--include/linux/ptrace.h6
-rw-r--r--include/linux/pvclock_gtod.h9
-rw-r--r--include/linux/pwm.h3
-rw-r--r--include/linux/raid/Kbuild0
-rw-r--r--include/linux/raid/pq.h4
-rw-r--r--include/linux/random.h19
-rw-r--r--include/linux/regulator/consumer.h13
-rw-r--r--include/linux/regulator/driver.h5
-rw-r--r--include/linux/regulator/max8973-regulator.h72
-rw-r--r--include/linux/regulator/tps51632-regulator.h47
-rw-r--r--include/linux/regulator/tps65090-regulator.h50
-rw-r--r--include/linux/res_counter.h17
-rw-r--r--include/linux/rmap.h33
-rw-r--r--include/linux/sched.h44
-rw-r--r--include/linux/security.h13
-rw-r--r--include/linux/signal.h3
-rw-r--r--include/linux/slab.h57
-rw-r--r--include/linux/slab_def.h9
-rw-r--r--include/linux/slub_def.h9
-rw-r--r--include/linux/spi/spi-tegra.h40
-rw-r--r--include/linux/spi/spi.h3
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h3
-rw-r--r--include/linux/ssb/ssb_driver_extif.h1
-rw-r--r--include/linux/stmmac.h2
-rw-r--r--include/linux/string.h11
-rw-r--r--include/linux/sunrpc/cache.h6
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/svc.h6
-rw-r--r--include/linux/sunrpc/svcsock.h21
-rw-r--r--include/linux/swiotlb.h20
-rw-r--r--include/linux/syscalls.h20
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/usb/Kbuild0
-rw-r--r--include/linux/usb/usbnet.h3
-rw-r--r--include/linux/user_namespace.h10
-rw-r--r--include/linux/utsname.h7
-rw-r--r--include/linux/vgaarb.h4
-rw-r--r--include/linux/virtio.h25
-rw-r--r--include/linux/virtio_scsi.h28
-rw-r--r--include/linux/vm_event_item.h14
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/wait.h164
-rw-r--r--include/linux/watchdog.h4
-rw-r--r--include/media/adp1653.h4
-rw-r--r--include/media/davinci/vpbe_display.h15
-rw-r--r--include/media/davinci/vpbe_osd.h2
-rw-r--r--include/media/ir-kbd-i2c.h2
-rw-r--r--include/media/mt9v022.h16
-rw-r--r--include/media/rc-core.h4
-rw-r--r--include/media/rc-map.h64
-rw-r--r--include/media/s3c_camif.h45
-rw-r--r--include/media/smiapp.h2
-rw-r--r--include/media/v4l2-event.h2
-rw-r--r--include/media/v4l2-fh.h2
-rw-r--r--include/media/v4l2-ioctl.h2
-rw-r--r--include/media/v4l2-mem2mem.h3
-rw-r--r--include/media/videobuf2-core.h38
-rw-r--r--include/media/videobuf2-memops.h5
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/irda/irlmp.h2
-rw-r--r--include/net/ndisc.h7
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/sock.h4
-rw-r--r--include/rdma/Kbuild6
-rw-r--r--include/rdma/rdma_netlink.h36
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/scsi_transport_srp.h8
-rw-r--r--include/sound/Kbuild10
-rw-r--r--include/sound/asequencer.h594
-rw-r--r--include/sound/asound.h935
-rw-r--r--include/sound/cs4271.h1
-rw-r--r--include/sound/emu10k1.h359
-rw-r--r--include/sound/pcm.h3
-rw-r--r--include/sound/sb16_csp.h104
-rw-r--r--include/sound/sh_fsi.h6
-rw-r--r--include/sound/soc-dai.h1
-rw-r--r--include/sound/soc.h1
-rw-r--r--include/sound/tlv320aic32x4.h1
-rw-r--r--include/sound/vx_core.h6
-rw-r--r--include/target/target_core_backend.h49
-rw-r--r--include/target/target_core_base.h212
-rw-r--r--include/target/target_core_fabric.h15
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/trace/events/ext4.h150
-rw-r--r--include/trace/events/gfpflags.h1
-rw-r--r--include/trace/events/migrate.h51
-rw-r--r--include/uapi/asm-generic/signal.h6
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/exynos_drm.h203
-rw-r--r--include/uapi/drm/i915_drm.h16
-rw-r--r--include/uapi/drm/radeon_drm.h6
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/ethtool.h16
-rw-r--r--include/uapi/linux/if_bridge.h3
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/kvm.h21
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/mempolicy.h15
-rw-r--r--include/uapi/linux/module.h8
-rw-r--r--include/uapi/linux/pci_regs.h25
-rw-r--r--include/uapi/linux/ptrace.h5
-rw-r--r--include/uapi/linux/signal.h2
-rw-r--r--include/uapi/linux/swab.h12
-rw-r--r--include/uapi/linux/videodev2.h37
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/rdma/Kbuild6
-rw-r--r--include/uapi/rdma/ib_user_cm.h (renamed from include/rdma/ib_user_cm.h)0
-rw-r--r--include/uapi/rdma/ib_user_mad.h (renamed from include/rdma/ib_user_mad.h)0
-rw-r--r--include/uapi/rdma/ib_user_sa.h (renamed from include/rdma/ib_user_sa.h)0
-rw-r--r--include/uapi/rdma/ib_user_verbs.h (renamed from include/rdma/ib_user_verbs.h)0
-rw-r--r--include/uapi/rdma/rdma_netlink.h37
-rw-r--r--include/uapi/rdma/rdma_user_cm.h (renamed from include/rdma/rdma_user_cm.h)0
-rw-r--r--include/uapi/sound/Kbuild10
-rw-r--r--include/uapi/sound/asequencer.h614
-rw-r--r--include/uapi/sound/asound.h971
-rw-r--r--include/uapi/sound/asound_fm.h (renamed from include/sound/asound_fm.h)0
-rw-r--r--include/uapi/sound/compress_offload.h (renamed from include/sound/compress_offload.h)0
-rw-r--r--include/uapi/sound/compress_params.h (renamed from include/sound/compress_params.h)0
-rw-r--r--include/uapi/sound/emu10k1.h373
-rw-r--r--include/uapi/sound/hdsp.h (renamed from include/sound/hdsp.h)0
-rw-r--r--include/uapi/sound/hdspm.h (renamed from include/sound/hdspm.h)0
-rw-r--r--include/uapi/sound/sb16_csp.h122
-rw-r--r--include/uapi/sound/sfnt_info.h (renamed from include/sound/sfnt_info.h)0
-rw-r--r--include/video/da8xx-fb.h25
-rw-r--r--include/video/omap-panel-tfp410.h2
-rw-r--r--include/video/omapdss.h91
-rw-r--r--include/video/samsung_fimd.h168
-rw-r--r--include/video/sh_mipi_dsi.h4
-rw-r--r--include/video/sh_mobile_lcdc.h1
-rw-r--r--include/xen/interface/event_channel.h13
-rw-r--r--include/xen/interface/memory.h44
-rw-r--r--include/xen/interface/platform.h17
-rw-r--r--include/xen/xen-ops.h9
-rw-r--r--init/Kconfig48
-rw-r--r--init/do_mounts.c61
-rw-r--r--init/main.c7
-rw-r--r--init/version.c2
-rw-r--r--ipc/msgutil.c2
-rw-r--r--ipc/namespace.c33
-rw-r--r--kernel/Makefile10
-rw-r--r--kernel/audit_tree.c10
-rw-r--r--kernel/audit_watch.c4
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/compat.c17
-rw-r--r--kernel/cpuset.c32
-rw-r--r--kernel/cred.c154
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/fork.c86
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/kcmp.c1
-rw-r--r--kernel/kmod.c6
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/modsign_certificate.S19
-rw-r--r--kernel/modsign_pubkey.c21
-rw-r--r--kernel/module.c444
-rw-r--r--kernel/nsproxy.c36
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/pid.c75
-rw-r--r--kernel/pid_namespace.c117
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/printk.c40
-rw-r--r--kernel/profile.c7
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/res_counter.c42
-rw-r--r--kernel/sched/core.c96
-rw-r--r--kernel/sched/fair.c248
-rw-r--r--kernel/sched/features.h11
-rw-r--r--kernel/sched/sched.h12
-rw-r--r--kernel/seccomp.c13
-rw-r--r--kernel/signal.c76
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c45
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/timekeeping.c50
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.c62
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_uprobe.c8
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/user_namespace.c147
-rw-r--r--kernel/utsname.c34
-rw-r--r--kernel/wait.c2
-rw-r--r--kernel/watchdog.c24
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug20
-rw-r--r--lib/Makefile8
-rw-r--r--lib/asn1_decoder.c8
-rw-r--r--lib/atomic64.c17
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/dma-debug.c66
-rw-r--r--lib/dynamic_debug.c9
-rw-r--r--lib/earlycpio.c145
-rw-r--r--lib/interval_tree_test_main.c7
-rw-r--r--lib/kstrtox.c64
-rw-r--r--lib/lru_cache.c359
-rw-r--r--lib/of-reconfig-notifier-error-inject.c (renamed from lib/pSeries-reconfig-notifier-error-inject.c)22
-rw-r--r--lib/percpu-rwsem.c165
-rw-r--r--lib/raid6/Makefile9
-rw-r--r--lib/raid6/algos.c12
-rw-r--r--lib/raid6/altivec.uc3
-rw-r--r--lib/raid6/avx2.c251
-rw-r--r--lib/raid6/mmx.c2
-rw-r--r--lib/raid6/recov_avx2.c323
-rw-r--r--lib/raid6/recov_ssse3.c4
-rw-r--r--lib/raid6/sse1.c2
-rw-r--r--lib/raid6/sse2.c8
-rw-r--r--lib/raid6/test/Makefile29
-rw-r--r--lib/raid6/x86.h14
-rw-r--r--lib/random32.c97
-rw-r--r--lib/rbtree_test.c8
-rw-r--r--lib/scatterlist.c3
-rw-r--r--lib/swiotlb.c269
-rw-r--r--lib/vsprintf.c109
-rw-r--r--mm/Kconfig19
-rw-r--r--mm/bootmem.c59
-rw-r--r--mm/compaction.c139
-rw-r--r--mm/highmem.c1
-rw-r--r--mm/huge_memory.c473
-rw-r--r--mm/hugetlb.c59
-rw-r--r--mm/hugetlb_cgroup.c19
-rw-r--r--mm/internal.h7
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/ksm.c16
-rw-r--r--mm/memcontrol.c1290
-rw-r--r--mm/memory-failure.c7
-rw-r--r--mm/memory.c232
-rw-r--r--mm/memory_hotplug.c128
-rw-r--r--mm/mempolicy.c427
-rw-r--r--mm/migrate.c339
-rw-r--r--mm/mmap.c42
-rw-r--r--mm/mprotect.c151
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/nobootmem.c22
-rw-r--r--mm/oom_kill.c52
-rw-r--r--mm/page-writeback.c25
-rw-r--r--mm/page_alloc.c178
-rw-r--r--mm/page_cgroup.c2
-rw-r--r--mm/pagewalk.c2
-rw-r--r--mm/pgtable-generic.c9
-rw-r--r--mm/rmap.c78
-rw-r--r--mm/shmem.c96
-rw-r--r--mm/slab.c383
-rw-r--r--mm/slab.h190
-rw-r--r--mm/slab_common.c292
-rw-r--r--mm/slob.c48
-rw-r--r--mm/slub.c447
-rw-r--r--mm/truncate.c23
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmscan.c133
-rw-r--r--mm/vmstat.c28
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/atm/atm_sysfs.c40
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/bluetooth/hidp/core.c9
-rw-r--r--net/bridge/br_if.c8
-rw-r--r--net/bridge/br_mdb.c22
-rw-r--r--net/bridge/br_multicast.c15
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/can/proc.c2
-rw-r--r--net/ceph/ceph_common.c3
-rw-r--r--net/ceph/messenger.c130
-rw-r--r--net/ceph/osd_client.c93
-rw-r--r--net/ceph/osdmap.c47
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/net_namespace.c32
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dns_resolver/dns_key.c15
-rw-r--r--net/ipv4/arp.c10
-rw-r--r--net/ipv4/inet_connection_sock.c16
-rw-r--r--net/ipv4/ip_gre.c13
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_input.c14
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ndisc.c19
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/netfilter/nf_log.c2
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/nfc/llcp/sock.c4
-rw-r--r--net/rds/ib_cm.c11
-rw-r--r--net/rds/ib_recv.c9
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sctp/Kconfig27
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/probe.c3
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c17
-rw-r--r--net/sunrpc/backchannel_rqst.c9
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/clnt.c48
-rw-r--r--net/sunrpc/rpc_pipe.c9
-rw-r--r--net/sunrpc/rpcb_clnt.c6
-rw-r--r--net/sunrpc/sched.c71
-rw-r--r--net/sunrpc/svc.c20
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcsock.c104
-rw-r--r--net/sunrpc/xdr.c5
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c4
-rw-r--r--net/sunrpc/xprtsock.c19
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--scripts/Makefile.modsign32
-rwxr-xr-xscripts/checkpatch.pl150
-rw-r--r--scripts/coccinelle/api/d_find_alias.cocci80
-rw-r--r--scripts/coccinelle/misc/warn.cocci109
-rwxr-xr-xscripts/config1
-rw-r--r--scripts/headers_install.pl6
-rwxr-xr-xscripts/kernel-doc34
-rw-r--r--scripts/pnmtologo.c7
-rwxr-xr-xscripts/tags.sh57
-rw-r--r--security/capability.c6
-rw-r--r--security/commoncap.c25
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_api.c4
-rw-r--r--security/integrity/ima/ima_main.c21
-rw-r--r--security/integrity/ima/ima_policy.c3
-rw-r--r--security/keys/key.c6
-rw-r--r--security/keys/keyctl.c15
-rw-r--r--security/keys/keyring.c10
-rw-r--r--security/keys/process_keys.c96
-rw-r--r--security/keys/request_key.c21
-rw-r--r--security/security.c10
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/smack/Kconfig6
-rw-r--r--security/smack/smackfs.c17
-rw-r--r--security/yama/yama_lsm.c100
-rw-r--r--sound/Kconfig3
-rw-r--r--sound/arm/aaci.c18
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c2
-rw-r--r--sound/arm/pxa2xx-ac97.c6
-rw-r--r--sound/atmel/abdac.c8
-rw-r--r--sound/atmel/ac97c.c10
-rw-r--r--sound/core/oss/pcm_plugin.c6
-rw-r--r--sound/core/pcm.c3
-rw-r--r--sound/core/pcm_compat.c20
-rw-r--r--sound/core/pcm_lib.c57
-rw-r--r--sound/core/pcm_native.c4
-rw-r--r--sound/core/seq/seq_device.c2
-rw-r--r--sound/drivers/Kconfig2
-rw-r--r--sound/drivers/aloop.c52
-rw-r--r--sound/drivers/dummy.c87
-rw-r--r--sound/drivers/ml403-ac97cr.c10
-rw-r--r--sound/drivers/mpu401/mpu401.c18
-rw-r--r--sound/drivers/mtpav.c14
-rw-r--r--sound/drivers/mts64.c40
-rw-r--r--sound/drivers/pcsp/pcsp.c14
-rw-r--r--sound/drivers/pcsp/pcsp_input.c2
-rw-r--r--sound/drivers/pcsp/pcsp_input.h2
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/drivers/pcsp/pcsp_mixer.c10
-rw-r--r--sound/drivers/portman2x4.c18
-rw-r--r--sound/drivers/serial-u16550.c32
-rw-r--r--sound/drivers/virmidi.c6
-rw-r--r--sound/drivers/vx/vx_hwdep.c139
-rw-r--r--sound/firewire/Kconfig13
-rw-r--r--sound/firewire/Makefile2
-rw-r--r--sound/firewire/scs1x.c527
-rw-r--r--sound/firewire/speakers.c8
-rw-r--r--sound/isa/Kconfig4
-rw-r--r--sound/isa/ad1816a/ad1816a.c18
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c19
-rw-r--r--sound/isa/ad1848/ad1848.c8
-rw-r--r--sound/isa/adlib.c8
-rw-r--r--sound/isa/als100.c22
-rw-r--r--sound/isa/azt2320.c26
-rw-r--r--sound/isa/cmi8328.c8
-rw-r--r--sound/isa/cmi8330.c42
-rw-r--r--sound/isa/cs423x/cs4231.c8
-rw-r--r--sound/isa/cs423x/cs4236.c50
-rw-r--r--sound/isa/es1688/es1688.c28
-rw-r--r--sound/isa/es18xx.c80
-rw-r--r--sound/isa/galaxy/galaxy.c26
-rw-r--r--sound/isa/gus/gusclassic.c15
-rw-r--r--sound/isa/gus/gusextreme.c24
-rw-r--r--sound/isa/gus/gusmax.c16
-rw-r--r--sound/isa/gus/interwave.c54
-rw-r--r--sound/isa/msnd/msnd.h2
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c44
-rw-r--r--sound/isa/msnd/msnd_pinnacle_mixer.c2
-rw-r--r--sound/isa/opl3sa2.c40
-rw-r--r--sound/isa/opti9xx/miro.c68
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c42
-rw-r--r--sound/isa/sb/emu8000.c30
-rw-r--r--sound/isa/sb/jazz16.c18
-rw-r--r--sound/isa/sb/sb16.c26
-rw-r--r--sound/isa/sb/sb8.c8
-rw-r--r--sound/isa/sc6000.c38
-rw-r--r--sound/isa/sscape.c32
-rw-r--r--sound/isa/wavefront/wavefront.c53
-rw-r--r--sound/isa/wavefront/wavefront_fx.c2
-rw-r--r--sound/isa/wavefront/wavefront_midi.c2
-rw-r--r--sound/isa/wavefront/wavefront_synth.c14
-rw-r--r--sound/mips/au1x00.c4
-rw-r--r--sound/mips/hal2.c14
-rw-r--r--sound/mips/sgio2audio.c28
-rw-r--r--sound/oss/ad1848.c2
-rw-r--r--sound/oss/kahlua.c10
-rw-r--r--sound/oss/sb_audio.c3
-rw-r--r--sound/parisc/harmony.c12
-rw-r--r--sound/pci/Kconfig5
-rw-r--r--sound/pci/ad1889.c18
-rw-r--r--sound/pci/ak4531_codec.c10
-rw-r--r--sound/pci/ali5451/ali5451.c32
-rw-r--r--sound/pci/als300.c14
-rw-r--r--sound/pci/als4000.c12
-rw-r--r--sound/pci/asihpi/asihpi.c55
-rw-r--r--sound/pci/asihpi/hpidspcd.c22
-rw-r--r--sound/pci/asihpi/hpioctl.c23
-rw-r--r--sound/pci/asihpi/hpioctl.h6
-rw-r--r--sound/pci/atiixp.c32
-rw-r--r--sound/pci/atiixp_modem.c20
-rw-r--r--sound/pci/au88x0/au88x0.c10
-rw-r--r--sound/pci/au88x0/au88x0_a3d.c6
-rw-r--r--sound/pci/au88x0/au88x0_core.c9
-rw-r--r--sound/pci/au88x0/au88x0_eq.c10
-rw-r--r--sound/pci/au88x0/au88x0_game.c2
-rw-r--r--sound/pci/au88x0/au88x0_mixer.c2
-rw-r--r--sound/pci/au88x0/au88x0_mpu401.c2
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c6
-rw-r--r--sound/pci/aw2/aw2-alsa.c28
-rw-r--r--sound/pci/azt3328.c22
-rw-r--r--sound/pci/bt87x.c22
-rw-r--r--sound/pci/ca0106/ca0106_main.c12
-rw-r--r--sound/pci/ca0106/ca0106_mixer.c26
-rw-r--r--sound/pci/ca0106/ca0106_proc.c2
-rw-r--r--sound/pci/ca0106/ca_midi.c2
-rw-r--r--sound/pci/cmipci.c48
-rw-r--r--sound/pci/cs4281.c30
-rw-r--r--sound/pci/cs46xx/cs46xx.c8
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c35
-rw-r--r--sound/pci/cs5530.c16
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c18
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h10
-rw-r--r--sound/pci/cs5535audio/cs5535audio_olpc.c10
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c20
-rw-r--r--sound/pci/ctxfi/ctatc.h8
-rw-r--r--sound/pci/ctxfi/cthardware.c4
-rw-r--r--sound/pci/ctxfi/cthw20k1.c4
-rw-r--r--sound/pci/ctxfi/cthw20k2.c4
-rw-r--r--sound/pci/ctxfi/xfi.c6
-rw-r--r--sound/pci/echoaudio/echoaudio.c46
-rw-r--r--sound/pci/echoaudio/echoaudio.h4
-rw-r--r--sound/pci/echoaudio/midi.c4
-rw-r--r--sound/pci/emu10k1/emu10k1.c12
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c96
-rw-r--r--sound/pci/emu10k1/emu10k1_patch.c2
-rw-r--r--sound/pci/emu10k1/emu10k1x.c29
-rw-r--r--sound/pci/emu10k1/emufx.c25
-rw-r--r--sound/pci/emu10k1/emumixer.c22
-rw-r--r--sound/pci/emu10k1/emumpu401.c6
-rw-r--r--sound/pci/emu10k1/emupcm.c11
-rw-r--r--sound/pci/emu10k1/emuproc.c2
-rw-r--r--sound/pci/emu10k1/p16v.c8
-rw-r--r--sound/pci/emu10k1/timer.c2
-rw-r--r--sound/pci/ens1370.c52
-rw-r--r--sound/pci/es1938.c20
-rw-r--r--sound/pci/es1968.c44
-rw-r--r--sound/pci/fm801.c26
-rw-r--r--sound/pci/hda/Kconfig5
-rw-r--r--sound/pci/hda/Makefile1
-rw-r--r--sound/pci/hda/hda_auto_parser.c106
-rw-r--r--sound/pci/hda/hda_codec.c214
-rw-r--r--sound/pci/hda/hda_codec.h6
-rw-r--r--sound/pci/hda/hda_hwdep.c2
-rw-r--r--sound/pci/hda/hda_intel.c435
-rw-r--r--sound/pci/hda/hda_intel_trace.h62
-rw-r--r--sound/pci/hda/hda_jack.c93
-rw-r--r--sound/pci/hda/hda_jack.h6
-rw-r--r--sound/pci/hda/hda_local.h17
-rw-r--r--sound/pci/hda/patch_analog.c73
-rw-r--r--sound/pci/hda/patch_cirrus.c47
-rw-r--r--sound/pci/hda/patch_conexant.c99
-rw-r--r--sound/pci/hda/patch_hdmi.c68
-rw-r--r--sound/pci/hda/patch_realtek.c197
-rw-r--r--sound/pci/hda/patch_sigmatel.c152
-rw-r--r--sound/pci/hda/patch_via.c186
-rw-r--r--sound/pci/ice1712/Makefile2
-rw-r--r--sound/pci/ice1712/amp.c7
-rw-r--r--sound/pci/ice1712/aureon.c28
-rw-r--r--sound/pci/ice1712/delta.c45
-rw-r--r--sound/pci/ice1712/ews.c33
-rw-r--r--sound/pci/ice1712/hoontech.c27
-rw-r--r--sound/pci/ice1712/ice1712.c103
-rw-r--r--sound/pci/ice1712/ice1712.h12
-rw-r--r--sound/pci/ice1712/ice1724.c85
-rw-r--r--sound/pci/ice1712/juli.c26
-rw-r--r--sound/pci/ice1712/maya44.c21
-rw-r--r--sound/pci/ice1712/phase.c25
-rw-r--r--sound/pci/ice1712/pontis.c11
-rw-r--r--sound/pci/ice1712/prodigy192.c17
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c21
-rw-r--r--sound/pci/ice1712/psc724.c464
-rw-r--r--sound/pci/ice1712/psc724.h13
-rw-r--r--sound/pci/ice1712/quartet.c32
-rw-r--r--sound/pci/ice1712/revo.c29
-rw-r--r--sound/pci/ice1712/se.c31
-rw-r--r--sound/pci/ice1712/vt1720_mobo.c11
-rw-r--r--sound/pci/ice1712/wm8766.c361
-rw-r--r--sound/pci/ice1712/wm8766.h163
-rw-r--r--sound/pci/ice1712/wm8776.c633
-rw-r--r--sound/pci/ice1712/wm8776.h226
-rw-r--r--sound/pci/ice1712/wtm.c11
-rw-r--r--sound/pci/intel8x0.c56
-rw-r--r--sound/pci/intel8x0m.c30
-rw-r--r--sound/pci/korg1212/korg1212.c12
-rw-r--r--sound/pci/lola/lola.c14
-rw-r--r--sound/pci/lola/lola_clock.c2
-rw-r--r--sound/pci/lola/lola_mixer.c32
-rw-r--r--sound/pci/lola/lola_pcm.c4
-rw-r--r--sound/pci/lola/lola_proc.c2
-rw-r--r--sound/pci/lx6464es/lx6464es.c30
-rw-r--r--sound/pci/lx6464es/lx_core.c2
-rw-r--r--sound/pci/lx6464es/lx_core.h2
-rw-r--r--sound/pci/maestro3.c24
-rw-r--r--sound/pci/mixart/mixart.c12
-rw-r--r--sound/pci/mixart/mixart_hwdep.c76
-rw-r--r--sound/pci/nm256/nm256.c18
-rw-r--r--sound/pci/oxygen/oxygen.c10
-rw-r--r--sound/pci/oxygen/virtuoso.c11
-rw-r--r--sound/pci/oxygen/xonar_cs43xx.c4
-rw-r--r--sound/pci/oxygen/xonar_pcm179x.c4
-rw-r--r--sound/pci/oxygen/xonar_wm87x6.c10
-rw-r--r--sound/pci/pcxhr/pcxhr.c14
-rw-r--r--sound/pci/pcxhr/pcxhr_hwdep.c86
-rw-r--r--sound/pci/riptide/riptide.c20
-rw-r--r--sound/pci/rme32.c10
-rw-r--r--sound/pci/rme96.c14
-rw-r--r--sound/pci/rme9652/hdsp.c77
-rw-r--r--sound/pci/rme9652/hdspm.c439
-rw-r--r--sound/pci/rme9652/rme9652.c22
-rw-r--r--sound/pci/sis7019.c18
-rw-r--r--sound/pci/sonicvibes.c37
-rw-r--r--sound/pci/trident/trident.c8
-rw-r--r--sound/pci/trident/trident_main.c46
-rw-r--r--sound/pci/via82xx.c50
-rw-r--r--sound/pci/via82xx_modem.c26
-rw-r--r--sound/pci/vx222/vx222.c14
-rw-r--r--sound/pci/ymfpci/ymfpci.c12
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c50
-rw-r--r--sound/ppc/awacs.c54
-rw-r--r--sound/ppc/beep.c2
-rw-r--r--sound/ppc/burgundy.c22
-rw-r--r--sound/ppc/daca.c2
-rw-r--r--sound/ppc/keywest.c4
-rw-r--r--sound/ppc/pmac.c12
-rw-r--r--sound/ppc/powermac.c6
-rw-r--r--sound/ppc/snd_ps3.c12
-rw-r--r--sound/ppc/tumbler.c16
-rw-r--r--sound/sh/aica.c13
-rw-r--r--sound/sh/sh_dac_audio.c10
-rw-r--r--sound/soc/atmel/Kconfig13
-rw-r--r--sound/soc/atmel/Makefile4
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c240
-rw-r--r--sound/soc/atmel/atmel-pcm-pdc.c401
-rw-r--r--sound/soc/atmel/atmel-pcm.c401
-rw-r--r--sound/soc/atmel/atmel-pcm.h34
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c168
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.h3
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c116
-rw-r--r--sound/soc/au1x/ac97c.c6
-rw-r--r--sound/soc/au1x/db1000.c6
-rw-r--r--sound/soc/au1x/db1200.c8
-rw-r--r--sound/soc/au1x/dbdma2.c6
-rw-r--r--sound/soc/au1x/dma.c6
-rw-r--r--sound/soc/au1x/i2sc.c6
-rw-r--r--sound/soc/au1x/psc-ac97.c6
-rw-r--r--sound/soc/au1x/psc-i2s.c6
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c6
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c6
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c6
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c6
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.c6
-rw-r--r--sound/soc/blackfin/bf5xx-tdm-pcm.c6
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.c6
-rw-r--r--sound/soc/blackfin/bf6xx-i2s.c6
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1373.c4
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1701.c4
-rw-r--r--sound/soc/blackfin/bfin-eval-adav80x.c4
-rw-r--r--sound/soc/cirrus/edb93xx.c6
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c6
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c4
-rw-r--r--sound/soc/cirrus/ep93xx-pcm.c6
-rw-r--r--sound/soc/cirrus/simone.c6
-rw-r--r--sound/soc/cirrus/snappercl15.c6
-rw-r--r--sound/soc/codecs/88pm860x-codec.c6
-rw-r--r--sound/soc/codecs/Kconfig16
-rw-r--r--sound/soc/codecs/Makefile6
-rw-r--r--sound/soc/codecs/ab8500-codec.c8
-rw-r--r--sound/soc/codecs/ac97.c6
-rw-r--r--sound/soc/codecs/ad1836.c6
-rw-r--r--sound/soc/codecs/ad193x.c14
-rw-r--r--sound/soc/codecs/ad1980.c6
-rw-r--r--sound/soc/codecs/ad73311.c4
-rw-r--r--sound/soc/codecs/adau1373.c8
-rw-r--r--sound/soc/codecs/adau1701.c8
-rw-r--r--sound/soc/codecs/adav80x.c20
-rw-r--r--sound/soc/codecs/ads117x.c6
-rw-r--r--sound/soc/codecs/ak4104.c69
-rw-r--r--sound/soc/codecs/ak4535.c15
-rw-r--r--sound/soc/codecs/ak4641.c8
-rw-r--r--sound/soc/codecs/ak4642.c31
-rw-r--r--sound/soc/codecs/ak4671.c8
-rw-r--r--sound/soc/codecs/alc5623.c8
-rw-r--r--sound/soc/codecs/alc5632.c8
-rw-r--r--sound/soc/codecs/arizona.c53
-rw-r--r--sound/soc/codecs/arizona.h71
-rw-r--r--sound/soc/codecs/cq93vc.c2
-rw-r--r--sound/soc/codecs/cs4271.c35
-rw-r--r--sound/soc/codecs/cs42l52.c2
-rw-r--r--sound/soc/codecs/cs42l73.c124
-rw-r--r--sound/soc/codecs/da7210.c38
-rw-r--r--sound/soc/codecs/da732x.c8
-rw-r--r--sound/soc/codecs/da9055.c51
-rw-r--r--sound/soc/codecs/dfbmcs320.c6
-rw-r--r--sound/soc/codecs/dmic.c6
-rw-r--r--sound/soc/codecs/isabelle.c8
-rw-r--r--sound/soc/codecs/jz4740.c148
-rw-r--r--sound/soc/codecs/lm4857.c8
-rw-r--r--sound/soc/codecs/lm49453.c18
-rw-r--r--sound/soc/codecs/max9768.c15
-rw-r--r--sound/soc/codecs/max98088.c16
-rw-r--r--sound/soc/codecs/max98090.c577
-rw-r--r--sound/soc/codecs/max98095.c4
-rw-r--r--sound/soc/codecs/max9850.c8
-rw-r--r--sound/soc/codecs/max9877.c8
-rw-r--r--sound/soc/codecs/mc13783.c2
-rw-r--r--sound/soc/codecs/ml26124.c8
-rw-r--r--sound/soc/codecs/omap-hdmi.c6
-rw-r--r--sound/soc/codecs/pcm3008.c6
-rw-r--r--sound/soc/codecs/rt5631.c6
-rw-r--r--sound/soc/codecs/sgtl5000.c8
-rw-r--r--sound/soc/codecs/si476x.c255
-rw-r--r--sound/soc/codecs/sigmadsp.c2
-rw-r--r--sound/soc/codecs/sn95031.c6
-rw-r--r--sound/soc/codecs/ssm2602.c12
-rw-r--r--sound/soc/codecs/sta32x.c8
-rw-r--r--sound/soc/codecs/sta529.c8
-rw-r--r--sound/soc/codecs/stac9766.c6
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c32
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h3
-rw-r--r--sound/soc/codecs/tlv320dac33.c8
-rw-r--r--sound/soc/codecs/tpa6130a2.c31
-rw-r--r--sound/soc/codecs/twl4030.c6
-rw-r--r--sound/soc/codecs/twl6040.c6
-rw-r--r--sound/soc/codecs/uda134x.c6
-rw-r--r--sound/soc/codecs/uda1380.c8
-rw-r--r--sound/soc/codecs/wl1273.c6
-rw-r--r--sound/soc/codecs/wm0010.c425
-rw-r--r--sound/soc/codecs/wm1250-ev1.c10
-rw-r--r--sound/soc/codecs/wm2000.c10
-rw-r--r--sound/soc/codecs/wm2200.c277
-rw-r--r--sound/soc/codecs/wm5100.c10
-rw-r--r--sound/soc/codecs/wm5102.c144
-rw-r--r--sound/soc/codecs/wm5110.c75
-rw-r--r--sound/soc/codecs/wm8350.c10
-rw-r--r--sound/soc/codecs/wm8400.c20
-rw-r--r--sound/soc/codecs/wm8510.c17
-rw-r--r--sound/soc/codecs/wm8523.c8
-rw-r--r--sound/soc/codecs/wm8711.c14
-rw-r--r--sound/soc/codecs/wm8727.c6
-rw-r--r--sound/soc/codecs/wm8728.c14
-rw-r--r--sound/soc/codecs/wm8731.c14
-rw-r--r--sound/soc/codecs/wm8737.c14
-rw-r--r--sound/soc/codecs/wm8741.c10
-rw-r--r--sound/soc/codecs/wm8750.c100
-rw-r--r--sound/soc/codecs/wm8753.c47
-rw-r--r--sound/soc/codecs/wm8770.c223
-rw-r--r--sound/soc/codecs/wm8776.c14
-rw-r--r--sound/soc/codecs/wm8782.c6
-rw-r--r--sound/soc/codecs/wm8804.c31
-rw-r--r--sound/soc/codecs/wm8900.c14
-rw-r--r--sound/soc/codecs/wm8903.c8
-rw-r--r--sound/soc/codecs/wm8904.c8
-rw-r--r--sound/soc/codecs/wm8940.c8
-rw-r--r--sound/soc/codecs/wm8955.c19
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c79
-rw-r--r--sound/soc/codecs/wm8960.c10
-rw-r--r--sound/soc/codecs/wm8961.c8
-rw-r--r--sound/soc/codecs/wm8962.c32
-rw-r--r--sound/soc/codecs/wm8971.c88
-rw-r--r--sound/soc/codecs/wm8974.c8
-rw-r--r--sound/soc/codecs/wm8978.c24
-rw-r--r--sound/soc/codecs/wm8983.c14
-rw-r--r--sound/soc/codecs/wm8985.c44
-rw-r--r--sound/soc/codecs/wm8988.c28
-rw-r--r--sound/soc/codecs/wm8990.c8
-rw-r--r--sound/soc/codecs/wm8991.c8
-rw-r--r--sound/soc/codecs/wm8993.c22
-rw-r--r--sound/soc/codecs/wm8994.c349
-rw-r--r--sound/soc/codecs/wm8994.h13
-rw-r--r--sound/soc/codecs/wm8995.c54
-rw-r--r--sound/soc/codecs/wm8996.c8
-rw-r--r--sound/soc/codecs/wm9081.c30
-rw-r--r--sound/soc/codecs/wm9090.c24
-rw-r--r--sound/soc/codecs/wm9705.c6
-rw-r--r--sound/soc/codecs/wm9712.c6
-rw-r--r--sound/soc/codecs/wm9713.c6
-rw-r--r--sound/soc/codecs/wm_adsp.c699
-rw-r--r--sound/soc/codecs/wm_adsp.h59
-rw-r--r--sound/soc/codecs/wmfw.h128
-rw-r--r--sound/soc/davinci/davinci-evm.c5
-rw-r--r--sound/soc/davinci/davinci-mcasp.c152
-rw-r--r--sound/soc/davinci/davinci-mcasp.h15
-rw-r--r--sound/soc/davinci/davinci-pcm.c53
-rw-r--r--sound/soc/davinci/davinci-pcm.h2
-rw-r--r--sound/soc/fsl/Kconfig20
-rw-r--r--sound/soc/fsl/Makefile14
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c6
-rw-r--r--sound/soc/fsl/fsl_dma.c6
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/fsl/imx-audmux.c8
-rw-r--r--sound/soc/fsl/imx-mc13783.c6
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c6
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c6
-rw-r--r--sound/soc/fsl/imx-pcm.c4
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c7
-rw-r--r--sound/soc/fsl/imx-ssi.c4
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c8
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c8
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c4
-rw-r--r--sound/soc/fsl/mx27vis-aic32x4.c6
-rw-r--r--sound/soc/fsl/p1022_ds.c4
-rw-r--r--sound/soc/fsl/p1022_rdk.c392
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c8
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c6
-rw-r--r--sound/soc/jz4740/jz4740-pcm.c6
-rw-r--r--sound/soc/jz4740/qi_lb60.c6
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c22
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c241
-rw-r--r--sound/soc/kirkwood/kirkwood-openrd.c6
-rw-r--r--sound/soc/kirkwood/kirkwood-t5325.c6
-rw-r--r--sound/soc/kirkwood/kirkwood.h11
-rw-r--r--sound/soc/mid-x86/mfld_machine.c6
-rw-r--r--sound/soc/mxs/mxs-pcm.c4
-rw-r--r--sound/soc/mxs/mxs-saif.c8
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c8
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c6
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c6
-rw-r--r--sound/soc/omap/ams-delta.c6
-rw-r--r--sound/soc/omap/mcbsp.c11
-rw-r--r--sound/soc/omap/mcbsp.h10
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c10
-rw-r--r--sound/soc/omap/omap-dmic.c6
-rw-r--r--sound/soc/omap/omap-hdmi-card.c6
-rw-r--r--sound/soc/omap/omap-hdmi.c6
-rw-r--r--sound/soc/omap/omap-mcbsp.c11
-rw-r--r--sound/soc/omap/omap-mcpdm.c6
-rw-r--r--sound/soc/omap/omap-pcm.c6
-rw-r--r--sound/soc/omap/omap-twl4030.c6
-rw-r--r--sound/soc/omap/zoom2.c7
-rw-r--r--sound/soc/pxa/brownstone.c6
-rw-r--r--sound/soc/pxa/corgi.c6
-rw-r--r--sound/soc/pxa/e740_wm9705.c6
-rw-r--r--sound/soc/pxa/e750_wm9705.c6
-rw-r--r--sound/soc/pxa/e800_wm9712.c6
-rw-r--r--sound/soc/pxa/hx4700.c6
-rw-r--r--sound/soc/pxa/imote2.c6
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c6
-rw-r--r--sound/soc/pxa/mmp-pcm.c6
-rw-r--r--sound/soc/pxa/mmp-sspa.c6
-rw-r--r--sound/soc/pxa/palm27x.c4
-rw-r--r--sound/soc/pxa/poodle.c6
-rw-r--r--sound/soc/pxa/pxa-ssp.c6
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c8
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c4
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c6
-rw-r--r--sound/soc/pxa/tosa.c6
-rw-r--r--sound/soc/pxa/ttc-dkb.c6
-rw-r--r--sound/soc/s6000/s6000-i2s.c6
-rw-r--r--sound/soc/s6000/s6000-pcm.c6
-rw-r--r--sound/soc/samsung/ac97.c22
-rw-r--r--sound/soc/samsung/bells.c228
-rw-r--r--sound/soc/samsung/dma.c24
-rw-r--r--sound/soc/samsung/dma.h3
-rw-r--r--sound/soc/samsung/goni_wm8994.c2
-rw-r--r--sound/soc/samsung/h1940_uda1380.c2
-rw-r--r--sound/soc/samsung/i2s.c33
-rw-r--r--sound/soc/samsung/idma.c6
-rw-r--r--sound/soc/samsung/jive_wm8750.c2
-rw-r--r--sound/soc/samsung/littlemill.c10
-rw-r--r--sound/soc/samsung/ln2440sbc_alc650.c2
-rw-r--r--sound/soc/samsung/lowland.c8
-rw-r--r--sound/soc/samsung/neo1973_wm8753.c2
-rw-r--r--sound/soc/samsung/pcm.c27
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c2
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c26
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c26
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.c6
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_hermes.c6
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c6
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c2
-rw-r--r--sound/soc/samsung/smartq_wm8987.c2
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c2
-rw-r--r--sound/soc/samsung/smdk_spdif.c2
-rw-r--r--sound/soc/samsung/smdk_wm8580.c6
-rw-r--r--sound/soc/samsung/smdk_wm8580pcm.c8
-rw-r--r--sound/soc/samsung/smdk_wm8994.c10
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c8
-rw-r--r--sound/soc/samsung/smdk_wm9713.c2
-rw-r--r--sound/soc/samsung/spdif.c28
-rw-r--r--sound/soc/samsung/speyside.c8
-rw-r--r--sound/soc/samsung/tobermory.c8
-rw-r--r--sound/soc/sh/dma-sh7760.c6
-rw-r--r--sound/soc/sh/fsi.c550
-rw-r--r--sound/soc/sh/hac.c6
-rw-r--r--sound/soc/sh/siu_dai.c6
-rw-r--r--sound/soc/sh/ssi.c6
-rw-r--r--sound/soc/soc-cache.c10
-rw-r--r--sound/soc/soc-compress.c2
-rw-r--r--sound/soc/soc-core.c237
-rw-r--r--sound/soc/soc-dapm.c134
-rw-r--r--sound/soc/soc-dmaengine-pcm.c2
-rw-r--r--sound/soc/soc-jack.c16
-rw-r--r--sound/soc/soc-pcm.c207
-rw-r--r--sound/soc/soc-utils.c6
-rw-r--r--sound/soc/spear/spear_pcm.c6
-rw-r--r--sound/soc/tegra/tegra20_das.c8
-rw-r--r--sound/soc/tegra/tegra20_i2s.c10
-rw-r--r--sound/soc/tegra/tegra20_spdif.c8
-rw-r--r--sound/soc/tegra/tegra30_ahub.c14
-rw-r--r--sound/soc/tegra/tegra30_i2s.c10
-rw-r--r--sound/soc/tegra/tegra_alc5632.c8
-rw-r--r--sound/soc/tegra/tegra_pcm.c4
-rw-r--r--sound/soc/tegra/tegra_wm8753.c8
-rw-r--r--sound/soc/tegra/tegra_wm8903.c8
-rw-r--r--sound/soc/tegra/trimslice.c8
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c6
-rw-r--r--sound/soc/txx9/txx9aclc.c6
-rw-r--r--sound/soc/ux500/mop500.c14
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c59
-rw-r--r--sound/soc/ux500/ux500_msp_dai.h1
-rw-r--r--sound/soc/ux500/ux500_pcm.c22
-rw-r--r--sound/soc/ux500/ux500_pcm.h3
-rw-r--r--sound/sound_core.c3
-rw-r--r--sound/sparc/amd7930.c16
-rw-r--r--sound/sparc/cs4231.c38
-rw-r--r--sound/sparc/dbri.c28
-rw-r--r--sound/spi/at73c213.c20
-rw-r--r--sound/usb/6fire/chip.c4
-rw-r--r--sound/usb/6fire/comm.c5
-rw-r--r--sound/usb/6fire/comm.h2
-rw-r--r--sound/usb/6fire/control.c8
-rw-r--r--sound/usb/6fire/control.h2
-rw-r--r--sound/usb/6fire/firmware.h2
-rw-r--r--sound/usb/6fire/midi.c2
-rw-r--r--sound/usb/6fire/midi.h2
-rw-r--r--sound/usb/6fire/pcm.c11
-rw-r--r--sound/usb/6fire/pcm.h2
-rw-r--r--sound/usb/Kconfig2
-rw-r--r--sound/usb/caiaq/control.c8
-rw-r--r--sound/usb/caiaq/device.c6
-rw-r--r--sound/usb/card.c7
-rw-r--r--sound/usb/card.h2
-rw-r--r--sound/usb/endpoint.c53
-rw-r--r--sound/usb/endpoint.h5
-rw-r--r--sound/usb/format.c10
-rw-r--r--sound/usb/midi.c95
-rw-r--r--sound/usb/mixer.c74
-rw-r--r--sound/usb/mixer.h1
-rw-r--r--sound/usb/mixer_quirks.c223
-rw-r--r--sound/usb/pcm.c175
-rw-r--r--sound/usb/quirks-table.h321
-rw-r--r--sound/usb/quirks.c93
-rw-r--r--sound/usb/stream.c230
-rw-r--r--sound/usb/usbaudio.h2
-rw-r--r--tools/firewire/nosy-dump.c4
-rw-r--r--tools/lguest/lguest.c84
-rw-r--r--tools/perf/Documentation/perf-record.txt2
-rw-r--r--tools/power/x86/turbostat/Makefile21
-rw-r--r--tools/power/x86/turbostat/turbostat.8103
-rw-r--r--tools/power/x86/turbostat/turbostat.c677
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile6
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl120
-rw-r--r--tools/testing/ktest/sample.conf46
-rw-r--r--tools/testing/selftests/breakpoints/Makefile2
-rw-r--r--tools/testing/selftests/cpu-hotplug/Makefile2
-rw-r--r--tools/testing/selftests/kcmp/Makefile6
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c6
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile2
-rw-r--r--tools/testing/selftests/mqueue/Makefile4
-rw-r--r--tools/testing/selftests/vm/Makefile2
-rw-r--r--tools/virtio/virtio_test.c4
-rw-r--r--usr/gen_init_cpio.c1
-rw-r--r--virt/kvm/assigned-dev.c36
-rw-r--r--virt/kvm/eventfd.c8
-rw-r--r--virt/kvm/iommu.c10
-rw-r--r--virt/kvm/irq_comm.c83
-rw-r--r--virt/kvm/kvm_main.c57
5245 files changed, 291611 insertions, 105239 deletions
diff --git a/.gitignore b/.gitignore
index 92bd0e45dfa..3b8b9b33be3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -60,7 +60,6 @@ modules.builtin
# Generated include files
#
include/config
-include/linux/version.h
include/generated
arch/*/include/generated
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index ceb1ff73546..8afe64fb200 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -136,8 +136,6 @@ fault-injection/
- dir with docs about the fault injection capabilities infrastructure.
fb/
- directory with info on the frame buffer graphics abstraction layer.
-feature-removal-schedule.txt
- - list of files and features that are going to be removed.
filesystems/
- info on the vfs and the various filesystems that Linux supports.
firmware_class/
diff --git a/Documentation/ABI/README b/Documentation/ABI/README
index 9feaf16f161..10069828568 100644
--- a/Documentation/ABI/README
+++ b/Documentation/ABI/README
@@ -36,9 +36,6 @@ The different levels of stability are:
the kernel, but are marked to be removed at some later point in
time. The description of the interface will document the reason
why it is obsolete and when it can be expected to be removed.
- The file Documentation/feature-removal-schedule.txt may describe
- some of these interfaces, giving a schedule for when they will
- be removed.
removed/
This directory contains a list of the old interfaces that have
diff --git a/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-koneplus b/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-koneplus
index c2a270b45b0..833fd59926a 100644
--- a/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-koneplus
+++ b/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-koneplus
@@ -8,3 +8,41 @@ Description: The integer value of this attribute ranges from 0-4.
When written, this file sets the number of the startup profile
and the mouse activates this profile immediately.
Please use actual_profile, it does the same thing.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/firmware_version
+Date: October 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the raw integer version number of the
+ firmware reported by the mouse. Using the integer value eases
+ further usage in other programs. To receive the real version
+ number the decimal point has to be shifted 2 positions to the
+ left. E.g. a returned value of 121 means 1.21
+ This file is readonly.
+ Please read binary attribute info which contains firmware version.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_buttons
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_buttons holds information about button layout.
+ When read, these files return the respective profile buttons.
+ The returned data is 77 bytes in size.
+ This file is readonly.
+ Write control to select profile and read profile_buttons instead.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_settings
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_settings holds information like resolution, sensitivity
+ and light effects.
+ When read, these files return the respective profile settings.
+ The returned data is 43 bytes in size.
+ This file is readonly.
+ Write control to select profile and read profile_settings instead.
+Users: http://roccat.sourceforge.net \ No newline at end of file
diff --git a/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-kovaplus b/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-kovaplus
new file mode 100644
index 00000000000..4a98e02b6c6
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-kovaplus
@@ -0,0 +1,66 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_cpi
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1-4.
+ When read, this attribute returns the number of the active
+ cpi level.
+ This file is readonly.
+ Has never been used. If bookkeeping is done, it's done in userland tools.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_sensitivity_x
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1-10.
+ When read, this attribute returns the number of the actual
+ sensitivity in x direction.
+ This file is readonly.
+ Has never been used. If bookkeeping is done, it's done in userland tools.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_sensitivity_y
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1-10.
+ When read, this attribute returns the number of the actual
+ sensitivity in y direction.
+ This file is readonly.
+ Has never been used. If bookkeeping is done, it's done in userland tools.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/firmware_version
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the raw integer version number of the
+ firmware reported by the mouse. Using the integer value eases
+ further usage in other programs. To receive the real version
+ number the decimal point has to be shifted 2 positions to the
+ left. E.g. a returned value of 121 means 1.21
+ This file is readonly.
+ Obsoleted by binary sysfs attribute "info".
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile[1-5]_buttons
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_buttons holds information about button layout.
+ When read, these files return the respective profile buttons.
+ The returned data is 23 bytes in size.
+ This file is readonly.
+ Write control to select profile and read profile_buttons instead.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile[1-5]_settings
+Date: January 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_settings holds information like resolution, sensitivity
+ and light effects.
+ When read, these files return the respective profile settings.
+ The returned data is 16 bytes in size.
+ This file is readonly.
+ Write control to select profile and read profile_settings instead.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-pyra b/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-pyra
new file mode 100644
index 00000000000..87ac87e9556
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-driver-hid-roccat-pyra
@@ -0,0 +1,73 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/actual_cpi
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: It is possible to switch the cpi setting of the mouse with the
+ press of a button.
+ When read, this file returns the raw number of the actual cpi
+ setting reported by the mouse. This number has to be further
+ processed to receive the real dpi value.
+
+ VALUE DPI
+ 1 400
+ 2 800
+ 4 1600
+
+ This file is readonly.
+ Has never been used. If bookkeeping is done, it's done in userland tools.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/actual_profile
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the number of the actual profile in
+ range 0-4.
+ This file is readonly.
+ Please use binary attribute "settings" which provides this information.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/firmware_version
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the raw integer version number of the
+ firmware reported by the mouse. Using the integer value eases
+ further usage in other programs. To receive the real version
+ number the decimal point has to be shifted 2 positions to the
+ left. E.g. a returned value of 138 means 1.38
+ This file is readonly.
+ Please use binary attribute "info" which provides this information.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_buttons
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_buttons holds information about button layout.
+ When read, these files return the respective profile buttons.
+ The returned data is 19 bytes in size.
+ This file is readonly.
+ Write control to select profile and read profile_buttons instead.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_settings
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split in settings and buttons.
+ profile_settings holds information like resolution, sensitivity
+ and light effects.
+ When read, these files return the respective profile settings.
+ The returned data is 13 bytes in size.
+ This file is readonly.
+ Write control to select profile and read profile_settings instead.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/startup_profile
+Date: August 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 0-4.
+ When read, this attribute returns the number of the profile
+ that's active when the mouse is powered on.
+ This file is readonly.
+ Please use binary attribute "settings" which provides this information.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node
index 49b82cad700..ce259c13c36 100644
--- a/Documentation/ABI/stable/sysfs-devices-node
+++ b/Documentation/ABI/stable/sysfs-devices-node
@@ -1,7 +1,101 @@
+What: /sys/devices/system/node/possible
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Nodes that could be possibly become online at some point.
+
+What: /sys/devices/system/node/online
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Nodes that are online.
+
+What: /sys/devices/system/node/has_normal_memory
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Nodes that have regular memory.
+
+What: /sys/devices/system/node/has_cpu
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Nodes that have one or more CPUs.
+
+What: /sys/devices/system/node/has_high_memory
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Nodes that have regular or high memory.
+ Depends on CONFIG_HIGHMEM.
+
What: /sys/devices/system/node/nodeX
Date: October 2002
Contact: Linux Memory Management list <linux-mm@kvack.org>
Description:
When CONFIG_NUMA is enabled, this is a directory containing
information on node X such as what CPUs are local to the
- node.
+ node. Each file is detailed next.
+
+What: /sys/devices/system/node/nodeX/cpumap
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ The node's cpumap.
+
+What: /sys/devices/system/node/nodeX/cpulist
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ The CPUs associated to the node.
+
+What: /sys/devices/system/node/nodeX/meminfo
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Provides information about the node's distribution and memory
+ utilization. Similar to /proc/meminfo, see Documentation/filesystems/proc.txt
+
+What: /sys/devices/system/node/nodeX/numastat
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ The node's hit/miss statistics, in units of pages.
+ See Documentation/numastat.txt
+
+What: /sys/devices/system/node/nodeX/distance
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ Distance between the node and all the other nodes
+ in the system.
+
+What: /sys/devices/system/node/nodeX/vmstat
+Date: October 2002
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ The node's zoned virtual memory statistics.
+ This is a superset of numastat.
+
+What: /sys/devices/system/node/nodeX/compact
+Date: February 2010
+Contact: Mel Gorman <mel@csn.ul.ie>
+Description:
+ When this file is written to, all memory within that node
+ will be compacted. When it completes, memory will be freed
+ into blocks which have as many contiguous pages as possible
+
+What: /sys/devices/system/node/nodeX/scan_unevictable_pages
+Date: October 2008
+Contact: Lee Schermerhorn <lee.schermerhorn@hp.com>
+Description:
+ When set, it triggers scanning the node's unevictable lists
+ and move any pages that have become evictable onto the respective
+ zone's inactive list. See mm/vmscan.c
+
+What: /sys/devices/system/node/nodeX/hugepages/hugepages-<size>/
+Date: December 2009
+Contact: Lee Schermerhorn <lee.schermerhorn@hp.com>
+Description:
+ The node's huge page size control/query attributes.
+ See Documentation/vm/hugetlbpage.txt \ No newline at end of file
diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp
new file mode 100644
index 00000000000..481aae95c7d
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-ib_srp
@@ -0,0 +1,156 @@
+What: /sys/class/infiniband_srp/srp-<hca>-<port_number>/add_target
+Date: January 2, 2006
+KernelVersion: 2.6.15
+Contact: linux-rdma@vger.kernel.org
+Description: Interface for making ib_srp connect to a new target.
+ One can request ib_srp to connect to a new target by writing
+ a comma-separated list of login parameters to this sysfs
+ attribute. The supported parameters are:
+ * id_ext, a 16-digit hexadecimal number specifying the eight
+ byte identifier extension in the 16-byte SRP target port
+ identifier. The target port identifier is sent by ib_srp
+ to the target in the SRP_LOGIN_REQ request.
+ * ioc_guid, a 16-digit hexadecimal number specifying the eight
+ byte I/O controller GUID portion of the 16-byte target port
+ identifier.
+ * dgid, a 32-digit hexadecimal number specifying the
+ destination GID.
+ * pkey, a four-digit hexadecimal number specifying the
+ InfiniBand partition key.
+ * service_id, a 16-digit hexadecimal number specifying the
+ InfiniBand service ID used to establish communication with
+ the SRP target. How to find out the value of the service ID
+ is specified in the documentation of the SRP target.
+ * max_sect, a decimal number specifying the maximum number of
+ 512-byte sectors to be transferred via a single SCSI command.
+ * max_cmd_per_lun, a decimal number specifying the maximum
+ number of outstanding commands for a single LUN.
+ * io_class, a hexadecimal number specifying the SRP I/O class.
+ Must be either 0xff00 (rev 10) or 0x0100 (rev 16a). The I/O
+ class defines the format of the SRP initiator and target
+ port identifiers.
+ * initiator_ext, a 16-digit hexadecimal number specifying the
+ identifier extension portion of the SRP initiator port
+ identifier. This data is sent by the initiator to the target
+ in the SRP_LOGIN_REQ request.
+ * cmd_sg_entries, a number in the range 1..255 that specifies
+ the maximum number of data buffer descriptors stored in the
+ SRP_CMD information unit itself. With allow_ext_sg=0 the
+ parameter cmd_sg_entries defines the maximum S/G list length
+ for a single SRP_CMD, and commands whose S/G list length
+ exceeds this limit after S/G list collapsing will fail.
+ * allow_ext_sg, whether ib_srp is allowed to include a partial
+ memory descriptor list in an SRP_CMD instead of the entire
+ list. If a partial memory descriptor list has been included
+ in an SRP_CMD the remaining memory descriptors are
+ communicated from initiator to target via an additional RDMA
+ transfer. Setting allow_ext_sg to 1 increases the maximum
+ amount of data that can be transferred between initiator and
+ target via a single SCSI command. Since not all SRP target
+ implementations support partial memory descriptor lists the
+ default value for this option is 0.
+ * sg_tablesize, a number in the range 1..2048 specifying the
+ maximum S/G list length the SCSI layer is allowed to pass to
+ ib_srp. Specifying a value that exceeds cmd_sg_entries is
+ only safe with partial memory descriptor list support enabled
+ (allow_ext_sg=1).
+
+What: /sys/class/infiniband_srp/srp-<hca>-<port_number>/ibdev
+Date: January 2, 2006
+KernelVersion: 2.6.15
+Contact: linux-rdma@vger.kernel.org
+Description: HCA name (<hca>).
+
+What: /sys/class/infiniband_srp/srp-<hca>-<port_number>/port
+Date: January 2, 2006
+KernelVersion: 2.6.15
+Contact: linux-rdma@vger.kernel.org
+Description: HCA port number (<port_number>).
+
+What: /sys/class/scsi_host/host<n>/allow_ext_sg
+Date: May 19, 2011
+KernelVersion: 2.6.39
+Contact: linux-rdma@vger.kernel.org
+Description: Whether ib_srp is allowed to include a partial memory
+ descriptor list in an SRP_CMD when communicating with an SRP
+ target.
+
+What: /sys/class/scsi_host/host<n>/cmd_sg_entries
+Date: May 19, 2011
+KernelVersion: 2.6.39
+Contact: linux-rdma@vger.kernel.org
+Description: Maximum number of data buffer descriptors that may be sent to
+ the target in a single SRP_CMD request.
+
+What: /sys/class/scsi_host/host<n>/dgid
+Date: June 17, 2006
+KernelVersion: 2.6.17
+Contact: linux-rdma@vger.kernel.org
+Description: InfiniBand destination GID used for communication with the SRP
+ target. Differs from orig_dgid if port redirection has happened.
+
+What: /sys/class/scsi_host/host<n>/id_ext
+Date: June 17, 2006
+KernelVersion: 2.6.17
+Contact: linux-rdma@vger.kernel.org
+Description: Eight-byte identifier extension portion of the 16-byte target
+ port identifier.
+
+What: /sys/class/scsi_host/host<n>/ioc_guid
+Date: June 17, 2006
+KernelVersion: 2.6.17
+Contact: linux-rdma@vger.kernel.org
+Description: Eight-byte I/O controller GUID portion of the 16-byte target
+ port identifier.
+
+What: /sys/class/scsi_host/host<n>/local_ib_device
+Date: November 29, 2006
+KernelVersion: 2.6.19
+Contact: linux-rdma@vger.kernel.org
+Description: Name of the InfiniBand HCA used for communicating with the
+ SRP target.
+
+What: /sys/class/scsi_host/host<n>/local_ib_port
+Date: November 29, 2006
+KernelVersion: 2.6.19
+Contact: linux-rdma@vger.kernel.org
+Description: Number of the HCA port used for communicating with the
+ SRP target.
+
+What: /sys/class/scsi_host/host<n>/orig_dgid
+Date: June 17, 2006
+KernelVersion: 2.6.17
+Contact: linux-rdma@vger.kernel.org
+Description: InfiniBand destination GID specified in the parameters
+ written to the add_target sysfs attribute.
+
+What: /sys/class/scsi_host/host<n>/pkey
+Date: June 17, 2006
+KernelVersion: 2.6.17
+Contact: linux-rdma@vger.kernel.org
+Description: A 16-bit number representing the InfiniBand partition key used
+ for communication with the SRP target.
+
+What: /sys/class/scsi_host/host<n>/req_lim
+Date: October 20, 2010
+KernelVersion: 2.6.36
+Contact: linux-rdma@vger.kernel.org
+Description: Number of requests ib_srp can send to the target before it has
+ to wait for more credits. For more information see also the
+ SRP credit algorithm in the SRP specification.
+
+What: /sys/class/scsi_host/host<n>/service_id
+Date: June 17, 2006
+KernelVersion: 2.6.17
+Contact: linux-rdma@vger.kernel.org
+Description: InfiniBand service ID used for establishing communication with
+ the SRP target.
+
+What: /sys/class/scsi_host/host<n>/zero_req_lim
+Date: September 20, 2006
+KernelVersion: 2.6.18
+Contact: linux-rdma@vger.kernel.org
+Description: Number of times the initiator had to wait before sending a
+ request to the target because it ran out of credits. For more
+ information see also the SRP credit algorithm in the SRP
+ specification.
diff --git a/Documentation/ABI/stable/sysfs-transport-srp b/Documentation/ABI/stable/sysfs-transport-srp
new file mode 100644
index 00000000000..b36fb0dc13c
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-transport-srp
@@ -0,0 +1,19 @@
+What: /sys/class/srp_remote_ports/port-<h>:<n>/delete
+Date: June 1, 2012
+KernelVersion: 3.7
+Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
+Description: Instructs an SRP initiator to disconnect from a target and to
+ remove all LUNs imported from that target.
+
+What: /sys/class/srp_remote_ports/port-<h>:<n>/port_id
+Date: June 27, 2007
+KernelVersion: 2.6.24
+Contact: linux-scsi@vger.kernel.org
+Description: 16-byte local SRP port identifier in hexadecimal format. An
+ example: 4c:49:4e:55:58:20:56:49:4f:00:00:00:00:00:00:00.
+
+What: /sys/class/srp_remote_ports/port-<h>:<n>/roles
+Date: June 27, 2007
+KernelVersion: 2.6.24
+Contact: linux-scsi@vger.kernel.org
+Description: Role of the remote port. Either "SRP Initiator" or "SRP Target".
diff --git a/Documentation/ABI/testing/dev-kmsg b/Documentation/ABI/testing/dev-kmsg
index 7e7e07a82e0..bb820be4817 100644
--- a/Documentation/ABI/testing/dev-kmsg
+++ b/Documentation/ABI/testing/dev-kmsg
@@ -92,7 +92,7 @@ Description: The /dev/kmsg character device node provides userspace access
The flags field carries '-' by default. A 'c' indicates a
fragment of a line. All following fragments are flagged with
'+'. Note, that these hints about continuation lines are not
- neccessarily correct, and the stream could be interleaved with
+ necessarily correct, and the stream could be interleaved with
unrelated messages, but merging the lines in the output
usually produces better human readable results. A similar
logic is used internally when messages are printed to the
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index 98694661354..ec0a38ef314 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -23,7 +23,7 @@ Description:
lsm: [[subj_user=] [subj_role=] [subj_type=]
[obj_user=] [obj_role=] [obj_type=]]
- base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
+ base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK][MODULE_CHECK]
mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
fsmagic:= hex value
uid:= decimal value
@@ -53,6 +53,7 @@ Description:
measure func=BPRM_CHECK
measure func=FILE_MMAP mask=MAY_EXEC
measure func=FILE_CHECK mask=MAY_READ uid=0
+ measure func=MODULE_CHECK uid=0
appraise fowner=0
The default policy measures all executables in bprm_check,
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index dff1f48d252..1ce5ae329c0 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -222,3 +222,37 @@ Description:
satisfied too. Reading this attribute will show the current
value of d3cold_allowed bit. Writing this attribute will set
the value of d3cold_allowed bit.
+
+What: /sys/bus/pci/devices/.../sriov_totalvfs
+Date: November 2012
+Contact: Donald Dutile <ddutile@redhat.com>
+Description:
+ This file appears when a physical PCIe device supports SR-IOV.
+ Userspace applications can read this file to determine the
+ maximum number of Virtual Functions (VFs) a PCIe physical
+ function (PF) can support. Typically, this is the value reported
+ in the PF's SR-IOV extended capability structure's TotalVFs
+ element. Drivers have the ability at probe time to reduce the
+ value read from this file via the pci_sriov_set_totalvfs()
+ function.
+
+What: /sys/bus/pci/devices/.../sriov_numvfs
+Date: November 2012
+Contact: Donald Dutile <ddutile@redhat.com>
+Description:
+ This file appears when a physical PCIe device supports SR-IOV.
+ Userspace applications can read and write to this file to
+ determine and control the enablement or disablement of Virtual
+ Functions (VFs) on the physical function (PF). A read of this
+ file will return the number of VFs that are enabled on this PF.
+ A number written to this file will enable the specified
+ number of VFs. A userspace application would typically read the
+ file and check that the value is zero, and then write the number
+ of VFs that should be enabled on the PF; the value written
+ should be less than or equal to the value in the sriov_totalvfs
+ file. A userspace application wanting to disable the VFs would
+ write a zero to this file. The core ensures that valid values
+ are written to this file, and returns errors when values are not
+ valid. For example, writing a 2 to this file when sriov_numvfs
+ is not 0 and not 2 already will return an error. Writing a 10
+ when the value of sriov_totalvfs is 8 will return an error.
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index 1cf2adf46b1..cd9213ccf3d 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -70,6 +70,10 @@ snap_*
A directory per each snapshot
+parent
+
+ Information identifying the pool, image, and snapshot id for
+ the parent image in a layered rbd image (format 2 only).
Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
-------------------------------------------------------------
diff --git a/Documentation/ABI/testing/sysfs-devices-node b/Documentation/ABI/testing/sysfs-devices-node
deleted file mode 100644
index 453a210c3ce..00000000000
--- a/Documentation/ABI/testing/sysfs-devices-node
+++ /dev/null
@@ -1,7 +0,0 @@
-What: /sys/devices/system/node/nodeX/compact
-Date: February 2010
-Contact: Mel Gorman <mel@csn.ul.ie>
-Description:
- When this file is written to, all memory within that node
- will be compacted. When it completes, memory will be freed
- into blocks which have as many contiguous pages as possible
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index 7fc2997b23a..9d43e767084 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -164,7 +164,7 @@ Contact: Rafael J. Wysocki <rjw@sisk.pl>
Description:
The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
contains the total time the device has been preventing
- opportunistic transitions to sleep states from occuring.
+ opportunistic transitions to sleep states from occurring.
This attribute is read-only. If the device is not enabled to
wake up the system from sleep states, this attribute is not
present.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku b/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku
index 189dc43891b..9eca5a182e6 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku
@@ -117,6 +117,14 @@ Description: When written, this file lets one store macros with max 500
which profile and key to read.
Users: http://roccat.sourceforge.net
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/reset
+Date: November 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one reset the device.
+ The data has to be 3 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/control
Date: June 2011
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
index 65e6e5dd67e..7bd776f9c3c 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
@@ -9,15 +9,12 @@ Description: The integer value of this attribute ranges from 0-4.
and the mouse activates this profile immediately.
Users: http://roccat.sourceforge.net
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/firmware_version
-Date: October 2010
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/info
+Date: November 2012
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: When read, this file returns the raw integer version number of the
- firmware reported by the mouse. Using the integer value eases
- further usage in other programs. To receive the real version
- number the decimal point has to be shifted 2 positions to the
- left. E.g. a returned value of 121 means 1.21
- This file is readonly.
+Description: When read, this file returns general data like firmware version.
+ When written, the device can be reset.
+ The data is 8 bytes long.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/macro
@@ -42,18 +39,8 @@ Description: The mouse can store 5 profiles which can be switched by the
The mouse will reject invalid data.
Which profile to write is determined by the profile number
contained in the data.
- This file is writeonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_buttons
-Date: August 2010
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The mouse can store 5 profiles which can be switched by the
- press of a button. A profile is split in settings and buttons.
- profile_buttons holds information about button layout.
- When read, these files return the respective profile buttons.
- The returned data is 77 bytes in size.
- This file is readonly.
+ Before reading this file, control has to be written to select
+ which profile to read.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile_settings
@@ -68,19 +55,8 @@ Description: The mouse can store 5 profiles which can be switched by the
The mouse will reject invalid data.
Which profile to write is determined by the profile number
contained in the data.
- This file is writeonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_settings
-Date: August 2010
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The mouse can store 5 profiles which can be switched by the
- press of a button. A profile is split in settings and buttons.
- profile_settings holds information like resolution, sensitivity
- and light effects.
- When read, these files return the respective profile settings.
- The returned data is 43 bytes in size.
- This file is readonly.
+ Before reading this file, control has to be written to select
+ which profile to read.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/sensor
@@ -104,9 +80,9 @@ What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-
Date: October 2010
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
Description: When written a calibration process for the tracking control unit
- can be initiated/cancelled.
- The data has to be 3 bytes long.
- This file is writeonly.
+ can be initiated/cancelled. Also lets one read/write sensor
+ registers.
+ The data has to be 4 bytes long.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/tcu_image
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus
index 20f937c9d84..a10404f15a5 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kovaplus
@@ -1,12 +1,3 @@
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_cpi
-Date: January 2011
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The integer value of this attribute ranges from 1-4.
- When read, this attribute returns the number of the active
- cpi level.
- This file is readonly.
-Users: http://roccat.sourceforge.net
-
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_profile
Date: January 2011
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
@@ -18,33 +9,12 @@ Description: The integer value of this attribute ranges from 0-4.
active when the mouse is powered on.
Users: http://roccat.sourceforge.net
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_sensitivity_x
-Date: January 2011
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/info
+Date: November 2012
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The integer value of this attribute ranges from 1-10.
- When read, this attribute returns the number of the actual
- sensitivity in x direction.
- This file is readonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/actual_sensitivity_y
-Date: January 2011
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The integer value of this attribute ranges from 1-10.
- When read, this attribute returns the number of the actual
- sensitivity in y direction.
- This file is readonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/firmware_version
-Date: January 2011
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: When read, this file returns the raw integer version number of the
- firmware reported by the mouse. Using the integer value eases
- further usage in other programs. To receive the real version
- number the decimal point has to be shifted 2 positions to the
- left. E.g. a returned value of 121 means 1.21
- This file is readonly.
+Description: When read, this file returns general data like firmware version.
+ When written, the device can be reset.
+ The data is 6 bytes long.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile_buttons
@@ -58,18 +28,8 @@ Description: The mouse can store 5 profiles which can be switched by the
The mouse will reject invalid data.
Which profile to write is determined by the profile number
contained in the data.
- This file is writeonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile[1-5]_buttons
-Date: January 2011
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The mouse can store 5 profiles which can be switched by the
- press of a button. A profile is split in settings and buttons.
- profile_buttons holds information about button layout.
- When read, these files return the respective profile buttons.
- The returned data is 23 bytes in size.
- This file is readonly.
+ Before reading this file, control has to be written to select
+ which profile to read.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile_settings
@@ -84,17 +44,6 @@ Description: The mouse can store 5 profiles which can be switched by the
The mouse will reject invalid data.
Which profile to write is determined by the profile number
contained in the data.
- This file is writeonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kovaplus/roccatkovaplus<minor>/profile[1-5]_settings
-Date: January 2011
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The mouse can store 5 profiles which can be switched by the
- press of a button. A profile is split in settings and buttons.
- profile_settings holds information like resolution, sensitivity
- and light effects.
- When read, these files return the respective profile settings.
- The returned data is 16 bytes in size.
- This file is readonly.
+ Before reading this file, control has to be written to select
+ which profile to read.
Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-lua b/Documentation/ABI/testing/sysfs-driver-hid-roccat-lua
new file mode 100644
index 00000000000..31c6c4c8ba2
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-lua
@@ -0,0 +1,7 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/control
+Date: October 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, cpi, button and light settings can be configured.
+ When read, actual cpi setting and sensor data are returned.
+ The data has to be 8 bytes long.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
index 3f8de50e4ff..9fa9de30d14 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
@@ -1,37 +1,9 @@
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/actual_cpi
-Date: August 2010
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: It is possible to switch the cpi setting of the mouse with the
- press of a button.
- When read, this file returns the raw number of the actual cpi
- setting reported by the mouse. This number has to be further
- processed to receive the real dpi value.
-
- VALUE DPI
- 1 400
- 2 800
- 4 1600
-
- This file is readonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/actual_profile
-Date: August 2010
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/info
+Date: November 2012
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: When read, this file returns the number of the actual profile in
- range 0-4.
- This file is readonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/firmware_version
-Date: August 2010
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: When read, this file returns the raw integer version number of the
- firmware reported by the mouse. Using the integer value eases
- further usage in other programs. To receive the real version
- number the decimal point has to be shifted 2 positions to the
- left. E.g. a returned value of 138 means 1.38
- This file is readonly.
+Description: When read, this file returns general data like firmware version.
+ When written, the device can be reset.
+ The data is 6 bytes long.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile_settings
@@ -46,19 +18,8 @@ Description: The mouse can store 5 profiles which can be switched by the
The mouse will reject invalid data.
Which profile to write is determined by the profile number
contained in the data.
- This file is writeonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_settings
-Date: August 2010
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The mouse can store 5 profiles which can be switched by the
- press of a button. A profile is split in settings and buttons.
- profile_settings holds information like resolution, sensitivity
- and light effects.
- When read, these files return the respective profile settings.
- The returned data is 13 bytes in size.
- This file is readonly.
+ Before reading this file, control has to be written to select
+ which profile to read.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile_buttons
@@ -72,27 +33,8 @@ Description: The mouse can store 5 profiles which can be switched by the
The mouse will reject invalid data.
Which profile to write is determined by the profile number
contained in the data.
- This file is writeonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_buttons
-Date: August 2010
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The mouse can store 5 profiles which can be switched by the
- press of a button. A profile is split in settings and buttons.
- profile_buttons holds information about button layout.
- When read, these files return the respective profile buttons.
- The returned data is 19 bytes in size.
- This file is readonly.
-Users: http://roccat.sourceforge.net
-
-What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/startup_profile
-Date: August 2010
-Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
-Description: The integer value of this attribute ranges from 0-4.
- When read, this attribute returns the number of the profile
- that's active when the mouse is powered on.
- This file is readonly.
+ Before reading this file, control has to be written to select
+ which profile to read.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/settings
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-savu b/Documentation/ABI/testing/sysfs-driver-hid-roccat-savu
index b42922cf6b1..f1e02a98bd9 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-savu
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-savu
@@ -40,8 +40,8 @@ What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-
Date: Mai 2012
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
Description: When read, this file returns general data like firmware version.
+ When written, the device can be reset.
The data is 8 bytes long.
- This file is readonly.
Users: http://roccat.sourceforge.net
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/macro
@@ -74,4 +74,3 @@ Description: The mouse has a Avago ADNS-3090 sensor.
This file allows reading and writing of the mouse sensors registers.
The data has to be 4 bytes long.
Users: http://roccat.sourceforge.net
-
diff --git a/Documentation/ABI/testing/sysfs-driver-ppi b/Documentation/ABI/testing/sysfs-driver-ppi
index 97a003ee058..7d1435bc976 100644
--- a/Documentation/ABI/testing/sysfs-driver-ppi
+++ b/Documentation/ABI/testing/sysfs-driver-ppi
@@ -5,7 +5,7 @@ Contact: xiaoyan.zhang@intel.com
Description:
This folder includes the attributes related with PPI (Physical
Presence Interface). Only if TPM is supported by BIOS, this
- folder makes sence. The folder path can be got by command
+ folder makes sense. The folder path can be got by command
'find /sys/ -name 'pcrs''. For the detail information of PPI,
please refer to the PPI specification from
http://www.trustedcomputinggroup.org/
diff --git a/Documentation/ABI/testing/sysfs-profiling b/Documentation/ABI/testing/sysfs-profiling
index b02d8b8c173..8a8e466eb2c 100644
--- a/Documentation/ABI/testing/sysfs-profiling
+++ b/Documentation/ABI/testing/sysfs-profiling
@@ -1,13 +1,13 @@
-What: /sys/kernel/profile
+What: /sys/kernel/profiling
Date: September 2008
Contact: Dave Hansen <dave@linux.vnet.ibm.com>
Description:
- /sys/kernel/profile is the runtime equivalent
+ /sys/kernel/profiling is the runtime equivalent
of the boot-time profile= option.
You can get the same effect running:
- echo 2 > /sys/kernel/profile
+ echo 2 > /sys/kernel/profiling
as you would by issuing profile=2 on the boot
command line.
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index a0b6250add7..4a4fb295cee 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -468,11 +468,46 @@ To map a single region, you do:
size_t size = buffer->len;
dma_handle = dma_map_single(dev, addr, size, direction);
+ if (dma_mapping_error(dma_handle)) {
+ /*
+ * reduce current DMA mapping usage,
+ * delay and try again later or
+ * reset driver.
+ */
+ goto map_error_handling;
+ }
and to unmap it:
dma_unmap_single(dev, dma_handle, size, direction);
+You should call dma_mapping_error() as dma_map_single() could fail and return
+error. Not all dma implementations support dma_mapping_error() interface.
+However, it is a good practice to call dma_mapping_error() interface, which
+will invoke the generic mapping error check interface. Doing so will ensure
+that the mapping code will work correctly on all dma implementations without
+any dependency on the specifics of the underlying implementation. Using the
+returned address without checking for errors could result in failures ranging
+from panics to silent data corruption. Couple of example of incorrect ways to
+check for errors that make assumptions about the underlying dma implementation
+are as follows and these are applicable to dma_map_page() as well.
+
+Incorrect example 1:
+ dma_addr_t dma_handle;
+
+ dma_handle = dma_map_single(dev, addr, size, direction);
+ if ((dma_handle & 0xffff != 0) || (dma_handle >= 0x1000000)) {
+ goto map_error;
+ }
+
+Incorrect example 2:
+ dma_addr_t dma_handle;
+
+ dma_handle = dma_map_single(dev, addr, size, direction);
+ if (dma_handle == DMA_ERROR_CODE) {
+ goto map_error;
+ }
+
You should call dma_unmap_single when the DMA activity is finished, e.g.
from the interrupt which told you that the DMA transfer is done.
@@ -489,6 +524,14 @@ Specifically:
size_t size = buffer->len;
dma_handle = dma_map_page(dev, page, offset, size, direction);
+ if (dma_mapping_error(dma_handle)) {
+ /*
+ * reduce current DMA mapping usage,
+ * delay and try again later or
+ * reset driver.
+ */
+ goto map_error_handling;
+ }
...
@@ -496,6 +539,12 @@ Specifically:
Here, "offset" means byte offset within the given page.
+You should call dma_mapping_error() as dma_map_page() could fail and return
+error as outlined under the dma_map_single() discussion.
+
+You should call dma_unmap_page when the DMA activity is finished, e.g.
+from the interrupt which told you that the DMA transfer is done.
+
With scatterlists, you map a region gathered from several regions by:
int i, count = dma_map_sg(dev, sglist, nents, direction);
@@ -578,6 +627,14 @@ to use the dma_sync_*() interfaces.
dma_addr_t mapping;
mapping = dma_map_single(cp->dev, buffer, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_handle)) {
+ /*
+ * reduce current DMA mapping usage,
+ * delay and try again later or
+ * reset driver.
+ */
+ goto map_error_handling;
+ }
cp->rx_buf = buffer;
cp->rx_len = len;
@@ -658,6 +715,75 @@ failure can be determined by:
* delay and try again later or
* reset driver.
*/
+ goto map_error_handling;
+ }
+
+- unmap pages that are already mapped, when mapping error occurs in the middle
+ of a multiple page mapping attempt. These example are applicable to
+ dma_map_page() as well.
+
+Example 1:
+ dma_addr_t dma_handle1;
+ dma_addr_t dma_handle2;
+
+ dma_handle1 = dma_map_single(dev, addr, size, direction);
+ if (dma_mapping_error(dev, dma_handle1)) {
+ /*
+ * reduce current DMA mapping usage,
+ * delay and try again later or
+ * reset driver.
+ */
+ goto map_error_handling1;
+ }
+ dma_handle2 = dma_map_single(dev, addr, size, direction);
+ if (dma_mapping_error(dev, dma_handle2)) {
+ /*
+ * reduce current DMA mapping usage,
+ * delay and try again later or
+ * reset driver.
+ */
+ goto map_error_handling2;
+ }
+
+ ...
+
+ map_error_handling2:
+ dma_unmap_single(dma_handle1);
+ map_error_handling1:
+
+Example 2: (if buffers are allocated a loop, unmap all mapped buffers when
+ mapping error is detected in the middle)
+
+ dma_addr_t dma_addr;
+ dma_addr_t array[DMA_BUFFERS];
+ int save_index = 0;
+
+ for (i = 0; i < DMA_BUFFERS; i++) {
+
+ ...
+
+ dma_addr = dma_map_single(dev, addr, size, direction);
+ if (dma_mapping_error(dev, dma_addr)) {
+ /*
+ * reduce current DMA mapping usage,
+ * delay and try again later or
+ * reset driver.
+ */
+ goto map_error_handling;
+ }
+ array[i].dma_addr = dma_addr;
+ save_index++;
+ }
+
+ ...
+
+ map_error_handling:
+
+ for (i = 0; i < save_index; i++) {
+
+ ...
+
+ dma_unmap_single(array[i].dma_addr);
}
Networking drivers must call dev_kfree_skb to free the socket buffer
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 66bd97a95f1..78a6c569d20 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -678,3 +678,15 @@ out of dma_debug_entries. These entries are preallocated at boot. The number
of preallocated entries is defined per architecture. If it is too low for you
boot with 'dma_debug_entries=<your_desired_number>' to overwrite the
architectural default.
+
+void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+dma-debug interface debug_dma_mapping_error() to debug drivers that fail
+to check dma mapping errors on addresses returned by dma_map_single() and
+dma_map_page() interfaces. This interface clears a flag set by
+debug_dma_map_page() to indicate that dma_mapping_error() has been called by
+the driver. When driver does unmap, debug_dma_unmap() checks the flag and if
+this flag is still set, prints warning message that includes call trace that
+leads up to the unmap. This interface can be called from dma_mapping_error()
+routines to enable dma mapping error check debugging.
+
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index f50309081ac..e59480db9ee 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with
care!
+
+DMA_ATTR_FORCE_CONTIGUOUS
+-------------------------
+
+By default DMA-mapping subsystem is allowed to assemble the buffer
+allocated by dma_alloc_attrs() function from individual pages if it can
+be mapped as contiguous chunk into device dma address space. By
+specifing this attribute the allocated buffer is forced to be contiguous
+also in physical memory.
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index b0300529ab1..4ee2304f82f 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1141,23 +1141,13 @@ int max_width, max_height;</synopsis>
the <methodname>page_flip</methodname> operation will be called with a
non-NULL <parameter>event</parameter> argument pointing to a
<structname>drm_pending_vblank_event</structname> instance. Upon page
- flip completion the driver must fill the
- <parameter>event</parameter>::<structfield>event</structfield>
- <structfield>sequence</structfield>, <structfield>tv_sec</structfield>
- and <structfield>tv_usec</structfield> fields with the associated
- vertical blanking count and timestamp, add the event to the
- <parameter>drm_file</parameter> list of events to be signaled, and wake
- up any waiting process. This can be performed with
+ flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
+ to fill in the event and send to wake up any waiting processes.
+ This can be performed with
<programlisting><![CDATA[
- struct timeval now;
-
- event->event.sequence = drm_vblank_count_and_time(..., &now);
- event->event.tv_sec = now.tv_sec;
- event->event.tv_usec = now.tv_usec;
-
spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&event->base.link, &event->base.file_priv->event_list);
- wake_up_interruptible(&event->base.file_priv->event_wait);
+ ...
+ drm_send_vblank_event(dev, pipe, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
]]></programlisting>
</para>
@@ -1621,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
</sect2>
</sect1>
- <!-- Internals: mid-layer helper functions -->
+ <!-- Internals: kms helper functions -->
<sect1>
- <title>Mid-layer Helper Functions</title>
+ <title>Mode Setting Helper Functions</title>
<para>
The CRTC, encoder and connector functions provided by the drivers
implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2106,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
</listitem>
</itemizedlist>
</sect2>
+ <sect2>
+ <title>Modeset Helper Functions Reference</title>
+!Edrivers/gpu/drm/drm_crtc_helper.c
+ </sect2>
+ <sect2>
+ <title>fbdev Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
+!Edrivers/gpu/drm/drm_fb_helper.c
+ </sect2>
+ <sect2>
+ <title>Display Port Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
+!Iinclude/drm/drm_dp_helper.h
+!Edrivers/gpu/drm/drm_dp_helper.c
+ </sect2>
</sect1>
<!-- Internals: vertical blanking -->
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 00687ee9d36..f75ab4c1b28 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -58,6 +58,9 @@
<sect1><title>String Conversions</title>
!Elib/vsprintf.c
+!Finclude/linux/kernel.h kstrtol
+!Finclude/linux/kernel.h kstrtoul
+!Elib/kstrtox.c
</sect1>
<sect1><title>String Manipulation</title>
<!-- All functions are exported at now
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 4fdf6b562d1..3dd9e78815d 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2586,6 +2586,13 @@ ioctls.</para>
<para>Vendor and device specific media bus pixel formats.
<xref linkend="v4l2-mbus-vendor-spec-fmts" />.</para>
</listitem>
+ <listitem>
+ <para>Importing DMABUF file descriptors as a new IO method described
+ in <xref linkend="dmabuf" />.</para>
+ </listitem>
+ <listitem>
+ <para>Exporting DMABUF files using &VIDIOC-EXPBUF; ioctl.</para>
+ </listitem>
</itemizedlist>
</section>
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
index b5d1cbdc558..388a3403265 100644
--- a/Documentation/DocBook/media/v4l/io.xml
+++ b/Documentation/DocBook/media/v4l/io.xml
@@ -331,7 +331,7 @@ application until one or more buffers can be dequeued. By default
outgoing queue. When the <constant>O_NONBLOCK</constant> flag was
given to the &func-open; function, <constant>VIDIOC_DQBUF</constant>
returns immediately with an &EAGAIN; when no buffer is available. The
-&func-select; or &func-poll; function are always available.</para>
+&func-select; or &func-poll; functions are always available.</para>
<para>To start and stop capturing or output applications call the
&VIDIOC-STREAMON; and &VIDIOC-STREAMOFF; ioctl. Note
@@ -472,6 +472,165 @@ rest should be evident.</para>
</footnote></para>
</section>
+ <section id="dmabuf">
+ <title>Streaming I/O (DMA buffer importing)</title>
+
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
+<para>The DMABUF framework provides a generic method for sharing buffers
+between multiple devices. Device drivers that support DMABUF can export a DMA
+buffer to userspace as a file descriptor (known as the exporter role), import a
+DMA buffer from userspace using a file descriptor previously exported for a
+different or the same device (known as the importer role), or both. This
+section describes the DMABUF importer role API in V4L2.</para>
+
+ <para>Refer to <link linked="vidioc-expbuf"> DMABUF exporting </link> for
+details about exporting V4L2 buffers as DMABUF file descriptors.</para>
+
+<para>Input and output devices support the streaming I/O method when the
+<constant>V4L2_CAP_STREAMING</constant> flag in the
+<structfield>capabilities</structfield> field of &v4l2-capability; returned by
+the &VIDIOC-QUERYCAP; ioctl is set. Whether importing DMA buffers through
+DMABUF file descriptors is supported is determined by calling the
+&VIDIOC-REQBUFS; ioctl with the memory type set to
+<constant>V4L2_MEMORY_DMABUF</constant>.</para>
+
+ <para>This I/O method is dedicated to sharing DMA buffers between different
+devices, which may be V4L devices or other video-related devices (e.g. DRM).
+Buffers (planes) are allocated by a driver on behalf of an application. Next,
+these buffers are exported to the application as file descriptors using an API
+which is specific for an allocator driver. Only such file descriptor are
+exchanged. The descriptors and meta-information are passed in &v4l2-buffer; (or
+in &v4l2-plane; in the multi-planar API case). The driver must be switched
+into DMABUF I/O mode by calling the &VIDIOC-REQBUFS; with the desired buffer
+type.</para>
+
+ <example>
+ <title>Initiating streaming I/O with DMABUF file descriptors</title>
+
+ <programlisting>
+&v4l2-requestbuffers; reqbuf;
+
+memset(&amp;reqbuf, 0, sizeof (reqbuf));
+reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+reqbuf.memory = V4L2_MEMORY_DMABUF;
+reqbuf.count = 1;
+
+if (ioctl(fd, &VIDIOC-REQBUFS;, &amp;reqbuf) == -1) {
+ if (errno == EINVAL)
+ printf("Video capturing or DMABUF streaming is not supported\n");
+ else
+ perror("VIDIOC_REQBUFS");
+
+ exit(EXIT_FAILURE);
+}
+ </programlisting>
+ </example>
+
+ <para>The buffer (plane) file descriptor is passed on the fly with the
+&VIDIOC-QBUF; ioctl. In case of multiplanar buffers, every plane can be
+associated with a different DMABUF descriptor. Although buffers are commonly
+cycled, applications can pass a different DMABUF descriptor at each
+<constant>VIDIOC_QBUF</constant> call.</para>
+
+ <example>
+ <title>Queueing DMABUF using single plane API</title>
+
+ <programlisting>
+int buffer_queue(int v4lfd, int index, int dmafd)
+{
+ &v4l2-buffer; buf;
+
+ memset(&amp;buf, 0, sizeof buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_DMABUF;
+ buf.index = index;
+ buf.m.fd = dmafd;
+
+ if (ioctl(v4lfd, &VIDIOC-QBUF;, &amp;buf) == -1) {
+ perror("VIDIOC_QBUF");
+ return -1;
+ }
+
+ return 0;
+}
+ </programlisting>
+ </example>
+
+ <example>
+ <title>Queueing DMABUF using multi plane API</title>
+
+ <programlisting>
+int buffer_queue_mp(int v4lfd, int index, int dmafd[], int n_planes)
+{
+ &v4l2-buffer; buf;
+ &v4l2-plane; planes[VIDEO_MAX_PLANES];
+ int i;
+
+ memset(&amp;buf, 0, sizeof buf);
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ buf.memory = V4L2_MEMORY_DMABUF;
+ buf.index = index;
+ buf.m.planes = planes;
+ buf.length = n_planes;
+
+ memset(&amp;planes, 0, sizeof planes);
+
+ for (i = 0; i &lt; n_planes; ++i)
+ buf.m.planes[i].m.fd = dmafd[i];
+
+ if (ioctl(v4lfd, &VIDIOC-QBUF;, &amp;buf) == -1) {
+ perror("VIDIOC_QBUF");
+ return -1;
+ }
+
+ return 0;
+}
+ </programlisting>
+ </example>
+
+ <para>Captured or displayed buffers are dequeued with the
+&VIDIOC-DQBUF; ioctl. The driver can unlock the buffer at any
+time between the completion of the DMA and this ioctl. The memory is
+also unlocked when &VIDIOC-STREAMOFF; is called, &VIDIOC-REQBUFS;, or
+when the device is closed.</para>
+
+ <para>For capturing applications it is customary to enqueue a
+number of empty buffers, to start capturing and enter the read loop.
+Here the application waits until a filled buffer can be dequeued, and
+re-enqueues the buffer when the data is no longer needed. Output
+applications fill and enqueue buffers, when enough buffers are stacked
+up output is started. In the write loop, when the application
+runs out of free buffers it must wait until an empty buffer can be
+dequeued and reused. Two methods exist to suspend execution of the
+application until one or more buffers can be dequeued. By default
+<constant>VIDIOC_DQBUF</constant> blocks when no buffer is in the
+outgoing queue. When the <constant>O_NONBLOCK</constant> flag was
+given to the &func-open; function, <constant>VIDIOC_DQBUF</constant>
+returns immediately with an &EAGAIN; when no buffer is available. The
+&func-select; and &func-poll; functions are always available.</para>
+
+ <para>To start and stop capturing or displaying applications call the
+&VIDIOC-STREAMON; and &VIDIOC-STREAMOFF; ioctls. Note that
+<constant>VIDIOC_STREAMOFF</constant> removes all buffers from both queues and
+unlocks all buffers as a side effect. Since there is no notion of doing
+anything "now" on a multitasking system, if an application needs to synchronize
+with another event it should examine the &v4l2-buffer;
+<structfield>timestamp</structfield> of captured buffers, or set the field
+before enqueuing buffers for output.</para>
+
+ <para>Drivers implementing DMABUF importing I/O must support the
+<constant>VIDIOC_REQBUFS</constant>, <constant>VIDIOC_QBUF</constant>,
+<constant>VIDIOC_DQBUF</constant>, <constant>VIDIOC_STREAMON</constant> and
+<constant>VIDIOC_STREAMOFF</constant> ioctls, and the
+<function>select()</function> and <function>poll()</function> functions.</para>
+
+ </section>
+
<section id="async">
<title>Asynchronous I/O</title>
@@ -673,6 +832,14 @@ memory, set by the application. See <xref linkend="userp" /> for details.
<structname>v4l2_buffer</structname> structure.</entry>
</row>
<row>
+ <entry></entry>
+ <entry>int</entry>
+ <entry><structfield>fd</structfield></entry>
+ <entry>For the single-plane API and when
+<structfield>memory</structfield> is <constant>V4L2_MEMORY_DMABUF</constant> this
+is the file descriptor associated with a DMABUF buffer.</entry>
+ </row>
+ <row>
<entry>__u32</entry>
<entry><structfield>length</structfield></entry>
<entry></entry>
@@ -744,6 +911,15 @@ should set this to 0.</entry>
</entry>
</row>
<row>
+ <entry></entry>
+ <entry>int</entry>
+ <entry><structfield>fd</structfield></entry>
+ <entry>When the memory type in the containing &v4l2-buffer; is
+ <constant>V4L2_MEMORY_DMABUF</constant>, this is a file
+ descriptor associated with a DMABUF buffer, similar to the
+ <structfield>fd</structfield> field in &v4l2-buffer;.</entry>
+ </row>
+ <row>
<entry>__u32</entry>
<entry><structfield>data_offset</structfield></entry>
<entry></entry>
@@ -923,7 +1099,7 @@ application. Drivers set or clear this flag when the
</row>
<row>
<entry><constant>V4L2_BUF_FLAG_NO_CACHE_INVALIDATE</constant></entry>
- <entry>0x0400</entry>
+ <entry>0x0800</entry>
<entry>Caches do not have to be invalidated for this buffer.
Typically applications shall use this flag if the data captured in the buffer
is not going to be touched by the CPU, instead the buffer will, probably, be
@@ -932,7 +1108,7 @@ passed on to a DMA-capable hardware unit for further processing or output.
</row>
<row>
<entry><constant>V4L2_BUF_FLAG_NO_CACHE_CLEAN</constant></entry>
- <entry>0x0800</entry>
+ <entry>0x1000</entry>
<entry>Caches do not have to be cleaned for this buffer.
Typically applications shall use this flag for output buffers if the data
in this buffer has not been created by the CPU but by some DMA-capable unit,
@@ -964,6 +1140,12 @@ pointer</link> I/O.</entry>
<entry>3</entry>
<entry>[to do]</entry>
</row>
+ <row>
+ <entry><constant>V4L2_MEMORY_DMABUF</constant></entry>
+ <entry>4</entry>
+ <entry>The buffer is used for <link linkend="dmabuf">DMA shared
+buffer</link> I/O.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 10ccde9d16d..4d110b1ad3e 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -543,6 +543,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-enuminput;
&sub-enumoutput;
&sub-enumstd;
+ &sub-expbuf;
&sub-g-audio;
&sub-g-audioout;
&sub-g-crop;
diff --git a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
index a8cda1acacd..cd994367243 100644
--- a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
@@ -6,7 +6,8 @@
<refnamediv>
<refname>VIDIOC_CREATE_BUFS</refname>
- <refpurpose>Create buffers for Memory Mapped or User Pointer I/O</refpurpose>
+ <refpurpose>Create buffers for Memory Mapped or User Pointer or DMA Buffer
+ I/O</refpurpose>
</refnamediv>
<refsynopsisdiv>
@@ -55,11 +56,11 @@
</note>
<para>This ioctl is used to create buffers for <link linkend="mmap">memory
-mapped</link> or <link linkend="userp">user pointer</link>
-I/O. It can be used as an alternative or in addition to the
-<constant>VIDIOC_REQBUFS</constant> ioctl, when a tighter control over buffers
-is required. This ioctl can be called multiple times to create buffers of
-different sizes.</para>
+mapped</link> or <link linkend="userp">user pointer</link> or <link
+linkend="dmabuf">DMA buffer</link> I/O. It can be used as an alternative or in
+addition to the <constant>VIDIOC_REQBUFS</constant> ioctl, when a tighter
+control over buffers is required. This ioctl can be called multiple times to
+create buffers of different sizes.</para>
<para>To allocate device buffers applications initialize relevant fields of
the <structname>v4l2_create_buffers</structname> structure. They set the
@@ -109,7 +110,8 @@ information.</para>
<entry>__u32</entry>
<entry><structfield>memory</structfield></entry>
<entry>Applications set this field to
-<constant>V4L2_MEMORY_MMAP</constant> or
+<constant>V4L2_MEMORY_MMAP</constant>,
+<constant>V4L2_MEMORY_DMABUF</constant> or
<constant>V4L2_MEMORY_USERPTR</constant>. See <xref linkend="v4l2-memory"
/></entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
new file mode 100644
index 00000000000..72dfbd20a80
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -0,0 +1,212 @@
+<refentry id="vidioc-expbuf">
+
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_EXPBUF</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_EXPBUF</refname>
+ <refpurpose>Export a buffer as a DMABUF file descriptor.</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_exportbuffer *<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_EXPBUF</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
+<para>This ioctl is an extension to the <link linkend="mmap">memory
+mapping</link> I/O method, therefore it is available only for
+<constant>V4L2_MEMORY_MMAP</constant> buffers. It can be used to export a
+buffer as a DMABUF file at any time after buffers have been allocated with the
+&VIDIOC-REQBUFS; ioctl.</para>
+
+<para> To export a buffer, applications fill &v4l2-exportbuffer;. The
+<structfield> type </structfield> field is set to the same buffer type as was
+previously used with &v4l2-requestbuffers;<structfield> type </structfield>.
+Applications must also set the <structfield> index </structfield> field. Valid
+index numbers range from zero to the number of buffers allocated with
+&VIDIOC-REQBUFS; (&v4l2-requestbuffers;<structfield> count </structfield>)
+minus one. For the multi-planar API, applications set the <structfield> plane
+</structfield> field to the index of the plane to be exported. Valid planes
+range from zero to the maximal number of valid planes for the currently active
+format. For the single-planar API, applications must set <structfield> plane
+</structfield> to zero. Additional flags may be posted in the <structfield>
+flags </structfield> field. Refer to a manual for open() for details.
+Currently only O_CLOEXEC is supported. All other fields must be set to zero.
+In the case of multi-planar API, every plane is exported separately using
+multiple <constant> VIDIOC_EXPBUF </constant> calls. </para>
+
+<para> After calling <constant>VIDIOC_EXPBUF</constant> the <structfield> fd
+</structfield> field will be set by a driver. This is a DMABUF file
+descriptor. The application may pass it to other DMABUF-aware devices. Refer to
+<link linkend="dmabuf">DMABUF importing</link> for details about importing
+DMABUF files into V4L2 nodes. It is recommended to close a DMABUF file when it
+is no longer used to allow the associated memory to be reclaimed. </para>
+
+ </refsect1>
+ <refsect1>
+ <section>
+ <title>Examples</title>
+
+ <example>
+ <title>Exporting a buffer.</title>
+ <programlisting>
+int buffer_export(int v4lfd, &v4l2-buf-type; bt, int index, int *dmafd)
+{
+ &v4l2-exportbuffer; expbuf;
+
+ memset(&amp;expbuf, 0, sizeof(expbuf));
+ expbuf.type = bt;
+ expbuf.index = index;
+ if (ioctl(v4lfd, &VIDIOC-EXPBUF;, &amp;expbuf) == -1) {
+ perror("VIDIOC_EXPBUF");
+ return -1;
+ }
+
+ *dmafd = expbuf.fd;
+
+ return 0;
+}
+ </programlisting>
+ </example>
+
+ <example>
+ <title>Exporting a buffer using the multi-planar API.</title>
+ <programlisting>
+int buffer_export_mp(int v4lfd, &v4l2-buf-type; bt, int index,
+ int dmafd[], int n_planes)
+{
+ int i;
+
+ for (i = 0; i &lt; n_planes; ++i) {
+ &v4l2-exportbuffer; expbuf;
+
+ memset(&amp;expbuf, 0, sizeof(expbuf));
+ expbuf.type = bt;
+ expbuf.index = index;
+ expbuf.plane = i;
+ if (ioctl(v4lfd, &VIDIOC-EXPBUF;, &amp;expbuf) == -1) {
+ perror("VIDIOC_EXPBUF");
+ while (i)
+ close(dmafd[--i]);
+ return -1;
+ }
+ dmafd[i] = expbuf.fd;
+ }
+
+ return 0;
+}
+ </programlisting>
+ </example>
+ </section>
+ </refsect1>
+
+ <refsect1>
+ <table pgwide="1" frame="none" id="v4l2-exportbuffer">
+ <title>struct <structname>v4l2_exportbuffer</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry>Type of the buffer, same as &v4l2-format;
+<structfield>type</structfield> or &v4l2-requestbuffers;
+<structfield>type</structfield>, set by the application. See <xref
+linkend="v4l2-buf-type" /></entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>index</structfield></entry>
+ <entry>Number of the buffer, set by the application. This field is
+only used for <link linkend="mmap">memory mapping</link> I/O and can range from
+zero to the number of buffers allocated with the &VIDIOC-REQBUFS; and/or
+&VIDIOC-CREATE-BUFS; ioctls. </entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>plane</structfield></entry>
+ <entry>Index of the plane to be exported when using the
+multi-planar API. Otherwise this value must be set to zero. </entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>flags</structfield></entry>
+ <entry>Flags for the newly created file, currently only <constant>
+O_CLOEXEC </constant> is supported, refer to the manual of open() for more
+details.</entry>
+ </row>
+ <row>
+ <entry>__s32</entry>
+ <entry><structfield>fd</structfield></entry>
+ <entry>The DMABUF file descriptor associated with a buffer. Set by
+ the driver.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved[11]</structfield></entry>
+ <entry>Reserved field for future use. Must be set to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>A queue is not in MMAP mode or DMABUF exporting is not
+supported or <structfield> flags </structfield> or <structfield> type
+</structfield> or <structfield> index </structfield> or <structfield> plane
+</structfield> fields are invalid.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-qbuf.xml b/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
index 2d37abefce1..3504a7f2f38 100644
--- a/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
@@ -109,6 +109,23 @@ they cannot be swapped out to disk. Buffers remain locked until
dequeued, until the &VIDIOC-STREAMOFF; or &VIDIOC-REQBUFS; ioctl is
called, or until the device is closed.</para>
+ <para>To enqueue a <link linkend="dmabuf">DMABUF</link> buffer applications
+set the <structfield>memory</structfield> field to
+<constant>V4L2_MEMORY_DMABUF</constant> and the <structfield>m.fd</structfield>
+field to a file descriptor associated with a DMABUF buffer. When the
+multi-planar API is used the <structfield>m.fd</structfield> fields of the
+passed array of &v4l2-plane; have to be used instead. When
+<constant>VIDIOC_QBUF</constant> is called with a pointer to this structure the
+driver sets the <constant>V4L2_BUF_FLAG_QUEUED</constant> flag and clears the
+<constant>V4L2_BUF_FLAG_MAPPED</constant> and
+<constant>V4L2_BUF_FLAG_DONE</constant> flags in the
+<structfield>flags</structfield> field, or it returns an error code. This
+ioctl locks the buffer. Locking a buffer means passing it to a driver for a
+hardware access (usually DMA). If an application accesses (reads/writes) a
+locked buffer then the result is undefined. Buffers remain locked until
+dequeued, until the &VIDIOC-STREAMOFF; or &VIDIOC-REQBUFS; ioctl is called, or
+until the device is closed.</para>
+
<para>Applications call the <constant>VIDIOC_DQBUF</constant>
ioctl to dequeue a filled (capturing) or displayed (output) buffer
from the driver's outgoing queue. They just set the
diff --git a/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml b/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
index 2b50ef2007f..78a06a9a5ec 100644
--- a/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
@@ -48,28 +48,30 @@
<refsect1>
<title>Description</title>
- <para>This ioctl is used to initiate <link linkend="mmap">memory
-mapped</link> or <link linkend="userp">user pointer</link>
-I/O. Memory mapped buffers are located in device memory and must be
-allocated with this ioctl before they can be mapped into the
-application's address space. User buffers are allocated by
-applications themselves, and this ioctl is merely used to switch the
-driver into user pointer I/O mode and to setup some internal structures.</para>
+<para>This ioctl is used to initiate <link linkend="mmap">memory mapped</link>,
+<link linkend="userp">user pointer</link> or <link
+linkend="dmabuf">DMABUF</link> based I/O. Memory mapped buffers are located in
+device memory and must be allocated with this ioctl before they can be mapped
+into the application's address space. User buffers are allocated by
+applications themselves, and this ioctl is merely used to switch the driver
+into user pointer I/O mode and to setup some internal structures.
+Similarly, DMABUF buffers are allocated by applications through a device
+driver, and this ioctl only configures the driver into DMABUF I/O mode without
+performing any direct allocation.</para>
- <para>To allocate device buffers applications initialize all
-fields of the <structname>v4l2_requestbuffers</structname> structure.
-They set the <structfield>type</structfield> field to the respective
-stream or buffer type, the <structfield>count</structfield> field to
-the desired number of buffers, <structfield>memory</structfield>
-must be set to the requested I/O method and the <structfield>reserved</structfield> array
-must be zeroed. When the ioctl
-is called with a pointer to this structure the driver will attempt to allocate
-the requested number of buffers and it stores the actual number
-allocated in the <structfield>count</structfield> field. It can be
-smaller than the number requested, even zero, when the driver runs out
-of free memory. A larger number is also possible when the driver requires
-more buffers to function correctly. For example video output requires at least two buffers,
-one displayed and one filled by the application.</para>
+ <para>To allocate device buffers applications initialize all fields of the
+<structname>v4l2_requestbuffers</structname> structure. They set the
+<structfield>type</structfield> field to the respective stream or buffer type,
+the <structfield>count</structfield> field to the desired number of buffers,
+<structfield>memory</structfield> must be set to the requested I/O method and
+the <structfield>reserved</structfield> array must be zeroed. When the ioctl is
+called with a pointer to this structure the driver will attempt to allocate the
+requested number of buffers and it stores the actual number allocated in the
+<structfield>count</structfield> field. It can be smaller than the number
+requested, even zero, when the driver runs out of free memory. A larger number
+is also possible when the driver requires more buffers to function correctly.
+For example video output requires at least two buffers, one displayed and one
+filled by the application.</para>
<para>When the I/O method is not supported the ioctl
returns an &EINVAL;.</para>
@@ -102,7 +104,8 @@ as the &v4l2-format; <structfield>type</structfield> field. See <xref
<entry>__u32</entry>
<entry><structfield>memory</structfield></entry>
<entry>Applications set this field to
-<constant>V4L2_MEMORY_MMAP</constant> or
+<constant>V4L2_MEMORY_MMAP</constant>,
+<constant>V4L2_MEMORY_DMABUF</constant> or
<constant>V4L2_MEMORY_USERPTR</constant>. See <xref linkend="v4l2-memory"
/>.</entry>
</row>
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index cab4ec58e46..fb32aead5a0 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -433,9 +433,9 @@
/* chip-specific constructor
* (see "Management of Cards and Components")
*/
- static int __devinit snd_mychip_create(struct snd_card *card,
- struct pci_dev *pci,
- struct mychip **rchip)
+ static int snd_mychip_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct mychip **rchip)
{
struct mychip *chip;
int err;
@@ -475,8 +475,8 @@
}
/* constructor -- see "Constructor" sub-section */
- static int __devinit snd_mychip_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+ static int snd_mychip_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -526,7 +526,7 @@
}
/* destructor -- see the "Destructor" sub-section */
- static void __devexit snd_mychip_remove(struct pci_dev *pci)
+ static void snd_mychip_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -542,9 +542,8 @@
<para>
The real constructor of PCI drivers is the <function>probe</function> callback.
The <function>probe</function> callback and other component-constructors which are called
- from the <function>probe</function> callback should be defined with
- the <parameter>__devinit</parameter> prefix. You
- cannot use the <parameter>__init</parameter> prefix for them,
+ from the <function>probe</function> callback cannot be used with
+ the <parameter>__init</parameter> prefix
because any PCI device could be a hotplug device.
</para>
@@ -728,7 +727,7 @@
<informalexample>
<programlisting>
<![CDATA[
- static void __devexit snd_mychip_remove(struct pci_dev *pci)
+ static void snd_mychip_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1059,14 +1058,6 @@
</para>
<para>
- As further notes, the destructors (both
- <function>snd_mychip_dev_free</function> and
- <function>snd_mychip_free</function>) cannot be defined with
- the <parameter>__devexit</parameter> prefix, because they may be
- called from the constructor, too, at the false path.
- </para>
-
- <para>
For a device which allows hotplugging, you can use
<function>snd_card_free_when_closed</function>. This one will
postpone the destruction until all devices are closed.
@@ -1120,9 +1111,9 @@
}
/* chip-specific constructor */
- static int __devinit snd_mychip_create(struct snd_card *card,
- struct pci_dev *pci,
- struct mychip **rchip)
+ static int snd_mychip_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct mychip **rchip)
{
struct mychip *chip;
int err;
@@ -1200,7 +1191,7 @@
.name = KBUILD_MODNAME,
.id_table = snd_mychip_ids,
.probe = snd_mychip_probe,
- .remove = __devexit_p(snd_mychip_remove),
+ .remove = snd_mychip_remove,
};
/* module initialization */
@@ -1465,11 +1456,6 @@
</para>
<para>
- Again, remember that you cannot
- use the <parameter>__devexit</parameter> prefix for this destructor.
- </para>
-
- <para>
We didn't implement the hardware disabling part in the above.
If you need to do this, please note that the destructor may be
called even before the initialization of the chip is completed.
@@ -1619,7 +1605,7 @@
.name = KBUILD_MODNAME,
.id_table = snd_mychip_ids,
.probe = snd_mychip_probe,
- .remove = __devexit_p(snd_mychip_remove),
+ .remove = snd_mychip_remove,
};
]]>
</programlisting>
@@ -1630,11 +1616,7 @@
The <structfield>probe</structfield> and
<structfield>remove</structfield> functions have already
been defined in the previous sections.
- The <structfield>remove</structfield> function should
- be defined with the
- <function>__devexit_p()</function> macro, so that it's not
- defined for built-in (and non-hot-pluggable) case. The
- <structfield>name</structfield>
+ The <structfield>name</structfield>
field is the name string of this device. Note that you must not
use a slash <quote>/</quote> in this string.
</para>
@@ -1665,9 +1647,7 @@
<para>
Note that these module entries are tagged with
<parameter>__init</parameter> and
- <parameter>__exit</parameter> prefixes, not
- <parameter>__devinit</parameter> nor
- <parameter>__devexit</parameter>.
+ <parameter>__exit</parameter> prefixes.
</para>
<para>
@@ -1918,7 +1898,7 @@
*/
/* create a pcm device */
- static int __devinit snd_mychip_new_pcm(struct mychip *chip)
+ static int snd_mychip_new_pcm(struct mychip *chip)
{
struct snd_pcm *pcm;
int err;
@@ -1957,7 +1937,7 @@
<informalexample>
<programlisting>
<![CDATA[
- static int __devinit snd_mychip_new_pcm(struct mychip *chip)
+ static int snd_mychip_new_pcm(struct mychip *chip)
{
struct snd_pcm *pcm;
int err;
@@ -2124,7 +2104,7 @@
....
}
- static int __devinit snd_mychip_new_pcm(struct mychip *chip)
+ static int snd_mychip_new_pcm(struct mychip *chip)
{
struct snd_pcm *pcm;
....
@@ -3399,7 +3379,7 @@ struct _snd_pcm_runtime {
<title>Definition of a Control</title>
<programlisting>
<![CDATA[
- static struct snd_kcontrol_new my_control __devinitdata = {
+ static struct snd_kcontrol_new my_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.index = 0,
@@ -3415,13 +3395,6 @@ struct _snd_pcm_runtime {
</para>
<para>
- Most likely the control is created via
- <function>snd_ctl_new1()</function>, and in such a case, you can
- add the <parameter>__devinitdata</parameter> prefix to the
- definition as above.
- </para>
-
- <para>
The <structfield>iface</structfield> field specifies the control
type, <constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>, which
is usually <constant>MIXER</constant>.
@@ -3847,10 +3820,8 @@ struct _snd_pcm_runtime {
<para>
<function>snd_ctl_new1()</function> allocates a new
- <structname>snd_kcontrol</structname> instance (that's why the definition
- of <parameter>my_control</parameter> can be with
- the <parameter>__devinitdata</parameter>
- prefix), and <function>snd_ctl_add</function> assigns the given
+ <structname>snd_kcontrol</structname> instance,
+ and <function>snd_ctl_add</function> assigns the given
control component to the card.
</para>
</section>
@@ -3896,7 +3867,7 @@ struct _snd_pcm_runtime {
<![CDATA[
static DECLARE_TLV_DB_SCALE(db_scale_my_control, -4050, 150, 0);
- static struct snd_kcontrol_new my_control __devinitdata = {
+ static struct snd_kcontrol_new my_control = {
...
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ,
@@ -5761,8 +5732,8 @@ struct _snd_pcm_runtime {
<informalexample>
<programlisting>
<![CDATA[
- static int __devinit snd_mychip_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+ static int snd_mychip_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
....
struct snd_card *card;
@@ -5787,8 +5758,8 @@ struct _snd_pcm_runtime {
<informalexample>
<programlisting>
<![CDATA[
- static int __devinit snd_mychip_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+ static int snd_mychip_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
....
struct snd_card *card;
@@ -5825,7 +5796,7 @@ struct _snd_pcm_runtime {
.name = KBUILD_MODNAME,
.id_table = snd_my_ids,
.probe = snd_my_probe,
- .remove = __devexit_p(snd_my_remove),
+ .remove = snd_my_remove,
#ifdef CONFIG_PM
.suspend = snd_my_suspend,
.resume = snd_my_resume,
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 59c080f084e..a9f288ff54f 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -462,7 +462,7 @@ Differences between the kernel community and corporate structures
The kernel community works differently than most traditional corporate
development environments. Here are a list of things that you can try to
-do to try to avoid problems:
+do to avoid problems:
Good things to say regarding your proposed changes:
- "This solves multiple problems."
- "This deletes 2000 lines of code."
diff --git a/Documentation/PCI/pci-iov-howto.txt b/Documentation/PCI/pci-iov-howto.txt
index fc73ef5d65b..cfaca7e6989 100644
--- a/Documentation/PCI/pci-iov-howto.txt
+++ b/Documentation/PCI/pci-iov-howto.txt
@@ -2,6 +2,9 @@
Copyright (C) 2009 Intel Corporation
Yu Zhao <yu.zhao@intel.com>
+ Update: November 2012
+ -- sysfs-based SRIOV enable-/disable-ment
+ Donald Dutile <ddutile@redhat.com>
1. Overview
@@ -24,10 +27,21 @@ real existing PCI device.
2.1 How can I enable SR-IOV capability
-The device driver (PF driver) will control the enabling and disabling
-of the capability via API provided by SR-IOV core. If the hardware
-has SR-IOV capability, loading its PF driver would enable it and all
-VFs associated with the PF.
+Multiple methods are available for SR-IOV enablement.
+In the first method, the device driver (PF driver) will control the
+enabling and disabling of the capability via API provided by SR-IOV core.
+If the hardware has SR-IOV capability, loading its PF driver would
+enable it and all VFs associated with the PF. Some PF drivers require
+a module parameter to be set to determine the number of VFs to enable.
+In the second method, a write to the sysfs file sriov_numvfs will
+enable and disable the VFs associated with a PCIe PF. This method
+enables per-PF, VF enable/disable values versus the first method,
+which applies to all PFs of the same device. Additionally, the
+PCI SRIOV core support ensures that enable/disable operations are
+valid to reduce duplication in multiple drivers for the same
+checks, e.g., check numvfs == 0 if enabling VFs, ensure
+numvfs <= totalvfs.
+The second method is the recommended method for new/future VF devices.
2.2 How can I use the Virtual Functions
@@ -40,13 +54,22 @@ requires device driver that is same as a normal PCI device's.
3.1 SR-IOV API
To enable SR-IOV capability:
+(a) For the first method, in the driver:
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
'nr_virtfn' is number of VFs to be enabled.
+(b) For the second method, from sysfs:
+ echo 'nr_virtfn' > \
+ /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs
To disable SR-IOV capability:
+(a) For the first method, in the driver:
void pci_disable_sriov(struct pci_dev *dev);
+(b) For the second method, from sysfs:
+ echo 0 > \
+ /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs
To notify SR-IOV core of Virtual Function Migration:
+(a) In the driver:
irqreturn_t pci_sriov_migration(struct pci_dev *dev);
3.2 Usage example
@@ -88,6 +111,22 @@ static void dev_shutdown(struct pci_dev *dev)
...
}
+static int dev_sriov_configure(struct pci_dev *dev, int numvfs)
+{
+ if (numvfs > 0) {
+ ...
+ pci_enable_sriov(dev, numvfs);
+ ...
+ return numvfs;
+ }
+ if (numvfs == 0) {
+ ....
+ pci_disable_sriov(dev);
+ ...
+ return 0;
+ }
+}
+
static struct pci_driver dev_driver = {
.name = "SR-IOV Physical Function driver",
.id_table = dev_id_table,
@@ -96,4 +135,5 @@ static struct pci_driver dev_driver = {
.suspend = dev_suspend,
.resume = dev_resume,
.shutdown = dev_shutdown,
+ .sriov_configure = dev_sriov_configure,
};
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index 6f706aca204..f8ebcde43b1 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -51,7 +51,6 @@ int dbg;
int print_delays;
int print_io_accounting;
int print_task_context_switch_counts;
-__u64 stime, utime;
#define PRINTF(fmt, arg...) { \
if (dbg) { \
diff --git a/Documentation/acpi/initrd_table_override.txt b/Documentation/acpi/initrd_table_override.txt
new file mode 100644
index 00000000000..35c3f541547
--- /dev/null
+++ b/Documentation/acpi/initrd_table_override.txt
@@ -0,0 +1,94 @@
+Overriding ACPI tables via initrd
+=================================
+
+1) Introduction (What is this about)
+2) What is this for
+3) How does it work
+4) References (Where to retrieve userspace tools)
+
+1) What is this about
+---------------------
+
+If the ACPI_INITRD_TABLE_OVERRIDE compile option is true, it is possible to
+override nearly any ACPI table provided by the BIOS with an instrumented,
+modified one.
+
+For a full list of ACPI tables that can be overridden, take a look at
+the char *table_sigs[MAX_ACPI_SIGNATURE]; definition in drivers/acpi/osl.c
+All ACPI tables iasl (Intel's ACPI compiler and disassembler) knows should
+be overridable, except:
+ - ACPI_SIG_RSDP (has a signature of 6 bytes)
+ - ACPI_SIG_FACS (does not have an ordinary ACPI table header)
+Both could get implemented as well.
+
+
+2) What is this for
+-------------------
+
+Please keep in mind that this is a debug option.
+ACPI tables should not get overridden for productive use.
+If BIOS ACPI tables are overridden the kernel will get tainted with the
+TAINT_OVERRIDDEN_ACPI_TABLE flag.
+Complain to your platform/BIOS vendor if you find a bug which is so sever
+that a workaround is not accepted in the Linux kernel.
+
+Still, it can and should be enabled in any kernel, because:
+ - There is no functional change with not instrumented initrds
+ - It provides a powerful feature to easily debug and test ACPI BIOS table
+ compatibility with the Linux kernel.
+
+
+3) How does it work
+-------------------
+
+# Extract the machine's ACPI tables:
+cd /tmp
+acpidump >acpidump
+acpixtract -a acpidump
+# Disassemble, modify and recompile them:
+iasl -d *.dat
+# For example add this statement into a _PRT (PCI Routing Table) function
+# of the DSDT:
+Store("HELLO WORLD", debug)
+iasl -sa dsdt.dsl
+# Add the raw ACPI tables to an uncompressed cpio archive.
+# They must be put into a /kernel/firmware/acpi directory inside the
+# cpio archive.
+# The uncompressed cpio archive must be the first.
+# Other, typically compressed cpio archives, must be
+# concatenated on top of the uncompressed one.
+mkdir -p kernel/firmware/acpi
+cp dsdt.aml kernel/firmware/acpi
+# A maximum of: #define ACPI_OVERRIDE_TABLES 10
+# tables are currently allowed (see osl.c):
+iasl -sa facp.dsl
+iasl -sa ssdt1.dsl
+cp facp.aml kernel/firmware/acpi
+cp ssdt1.aml kernel/firmware/acpi
+# Create the uncompressed cpio archive and concatenate the original initrd
+# on top:
+find kernel | cpio -H newc --create > /boot/instrumented_initrd
+cat /boot/initrd >>/boot/instrumented_initrd
+# reboot with increased acpi debug level, e.g. boot params:
+acpi.debug_level=0x2 acpi.debug_layer=0xFFFFFFFF
+# and check your syslog:
+[ 1.268089] ACPI: PCI Interrupt Routing Table [\_SB_.PCI0._PRT]
+[ 1.272091] [ACPI Debug] String [0x0B] "HELLO WORLD"
+
+iasl is able to disassemble and recompile quite a lot different,
+also static ACPI tables.
+
+
+4) Where to retrieve userspace tools
+------------------------------------
+
+iasl and acpixtract are part of Intel's ACPICA project:
+http://acpica.org/
+and should be packaged by distributions (for example in the acpica package
+on SUSE).
+
+acpidump can be found in Len Browns pmtools:
+ftp://kernel.org/pub/linux/kernel/people/lenb/acpi/utils/pmtools/acpidump
+This tool is also part of the acpica package on SUSE.
+Alternatively, used ACPI tables can be retrieved via sysfs in latest kernels:
+/sys/firmware/acpi/tables
diff --git a/Documentation/aoe/aoe.txt b/Documentation/aoe/aoe.txt
index bfc9cb19abc..c71487d399d 100644
--- a/Documentation/aoe/aoe.txt
+++ b/Documentation/aoe/aoe.txt
@@ -125,7 +125,9 @@ DRIVER OPTIONS
The aoe_deadsecs module parameter determines the maximum number of
seconds that the driver will wait for an AoE device to provide a
response to an AoE command. After aoe_deadsecs seconds have
- elapsed, the AoE device will be marked as "down".
+ elapsed, the AoE device will be marked as "down". A value of zero
+ is supported for testing purposes and makes the aoe driver keep
+ trying AoE commands forever.
The aoe_maxout module parameter has a default of 128. This is the
maximum number of unresponded packets that will be sent to an AoE
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
index a564ceea9e9..4484e021290 100644
--- a/Documentation/arm/OMAP/DSS
+++ b/Documentation/arm/OMAP/DSS
@@ -285,7 +285,10 @@ FB0 +-- GFX ---- LCD ---- LCD
Misc notes
----------
-OMAP FB allocates the framebuffer memory using the OMAP VRAM allocator.
+OMAP FB allocates the framebuffer memory using the standard dma allocator. You
+can enable Contiguous Memory Allocator (CONFIG_CMA) to improve the dma
+allocator, and if CMA is enabled, you use "cma=" kernel parameter to increase
+the global memory area for CMA.
Using DSI DPLL to generate pixel clock it is possible produce the pixel clock
of 86.5MHz (max possible), and with that you get 1280x1024@57 output from DVI.
@@ -301,11 +304,6 @@ framebuffer parameters.
Kernel boot arguments
---------------------
-vram=<size>[,<physaddr>]
- - Amount of total VRAM to preallocate and optionally a physical start
- memory address. For example, "10M". omapfb allocates memory for
- framebuffers from VRAM.
-
omapfb.mode=<display>:<mode>[,...]
- Default video mode for specified displays. For example,
"dvi:800x400MR-24@60". See drivers/video/modedb.c.
diff --git a/Documentation/backlight/lp855x-driver.txt b/Documentation/backlight/lp855x-driver.txt
index f5e4caafab7..1529394cfe8 100644
--- a/Documentation/backlight/lp855x-driver.txt
+++ b/Documentation/backlight/lp855x-driver.txt
@@ -35,11 +35,8 @@ For supporting platform specific data, the lp855x platform data can be used.
* mode : Brightness control mode. PWM or register based.
* device_control : Value of DEVICE CONTROL register.
* initial_brightness : Initial value of backlight brightness.
-* pwm_data : Platform specific pwm generation functions.
+* period_ns : Platform specific PWM period value. unit is nano.
Only valid when brightness is pwm input mode.
- Functions should be implemented by PWM driver.
- - pwm_set_intensity() : set duty of PWM
- - pwm_get_intensity() : get current duty of PWM
* load_new_rom_data :
0 : use default configuration data
1 : update values of eeprom or eprom registers on loading driver
@@ -71,8 +68,5 @@ static struct lp855x_platform_data lp8556_pdata = {
.mode = PWM_BASED,
.device_control = PWM_CONFIG(LP8556),
.initial_brightness = INITIAL_BRT,
- .pwm_data = {
- .pwm_set_intensity = platform_pwm_set_intensity,
- .pwm_get_intensity = platform_pwm_get_intensity,
- },
+ .period_ns = 1000000,
};
diff --git a/Documentation/bus-devices/ti-gpmc.txt b/Documentation/bus-devices/ti-gpmc.txt
new file mode 100644
index 00000000000..cc9ce57e0a2
--- /dev/null
+++ b/Documentation/bus-devices/ti-gpmc.txt
@@ -0,0 +1,122 @@
+GPMC (General Purpose Memory Controller):
+=========================================
+
+GPMC is an unified memory controller dedicated to interfacing external
+memory devices like
+ * Asynchronous SRAM like memories and application specific integrated
+ circuit devices.
+ * Asynchronous, synchronous, and page mode burst NOR flash devices
+ NAND flash
+ * Pseudo-SRAM devices
+
+GPMC is found on Texas Instruments SoC's (OMAP based)
+IP details: http://www.ti.com/lit/pdf/spruh73 section 7.1
+
+
+GPMC generic timing calculation:
+================================
+
+GPMC has certain timings that has to be programmed for proper
+functioning of the peripheral, while peripheral has another set of
+timings. To have peripheral work with gpmc, peripheral timings has to
+be translated to the form gpmc can understand. The way it has to be
+translated depends on the connected peripheral. Also there is a
+dependency for certain gpmc timings on gpmc clock frequency. Hence a
+generic timing routine was developed to achieve above requirements.
+
+Generic routine provides a generic method to calculate gpmc timings
+from gpmc peripheral timings. struct gpmc_device_timings fields has to
+be updated with timings from the datasheet of the peripheral that is
+connected to gpmc. A few of the peripheral timings can be fed either
+in time or in cycles, provision to handle this scenario has been
+provided (refer struct gpmc_device_timings definition). It may so
+happen that timing as specified by peripheral datasheet is not present
+in timing structure, in this scenario, try to correlate peripheral
+timing to the one available. If that doesn't work, try to add a new
+field as required by peripheral, educate generic timing routine to
+handle it, make sure that it does not break any of the existing.
+Then there may be cases where peripheral datasheet doesn't mention
+certain fields of struct gpmc_device_timings, zero those entries.
+
+Generic timing routine has been verified to work properly on
+multiple onenand's and tusb6010 peripherals.
+
+A word of caution: generic timing routine has been developed based
+on understanding of gpmc timings, peripheral timings, available
+custom timing routines, a kind of reverse engineering without
+most of the datasheets & hardware (to be exact none of those supported
+in mainline having custom timing routine) and by simulation.
+
+gpmc timing dependency on peripheral timings:
+[<gpmc_timing>: <peripheral timing1>, <peripheral timing2> ...]
+
+1. common
+cs_on: t_ceasu
+adv_on: t_avdasu, t_ceavd
+
+2. sync common
+sync_clk: clk
+page_burst_access: t_bacc
+clk_activation: t_ces, t_avds
+
+3. read async muxed
+adv_rd_off: t_avdp_r
+oe_on: t_oeasu, t_aavdh
+access: t_iaa, t_oe, t_ce, t_aa
+rd_cycle: t_rd_cycle, t_cez_r, t_oez
+
+4. read async non-muxed
+adv_rd_off: t_avdp_r
+oe_on: t_oeasu
+access: t_iaa, t_oe, t_ce, t_aa
+rd_cycle: t_rd_cycle, t_cez_r, t_oez
+
+5. read sync muxed
+adv_rd_off: t_avdp_r, t_avdh
+oe_on: t_oeasu, t_ach, cyc_aavdh_oe
+access: t_iaa, cyc_iaa, cyc_oe
+rd_cycle: t_cez_r, t_oez, t_ce_rdyz
+
+6. read sync non-muxed
+adv_rd_off: t_avdp_r
+oe_on: t_oeasu
+access: t_iaa, cyc_iaa, cyc_oe
+rd_cycle: t_cez_r, t_oez, t_ce_rdyz
+
+7. write async muxed
+adv_wr_off: t_avdp_w
+we_on, wr_data_mux_bus: t_weasu, t_aavdh, cyc_aavhd_we
+we_off: t_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_wr_cycle
+
+8. write async non-muxed
+adv_wr_off: t_avdp_w
+we_on, wr_data_mux_bus: t_weasu
+we_off: t_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_wr_cycle
+
+9. write sync muxed
+adv_wr_off: t_avdp_w, t_avdh
+we_on, wr_data_mux_bus: t_weasu, t_rdyo, t_aavdh, cyc_aavhd_we
+we_off: t_wpl, cyc_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_ce_rdyz
+
+10. write sync non-muxed
+adv_wr_off: t_avdp_w
+we_on, wr_data_mux_bus: t_weasu, t_rdyo
+we_off: t_wpl, cyc_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_ce_rdyz
+
+
+Note: Many of gpmc timings are dependent on other gpmc timings (a few
+gpmc timings purely dependent on other gpmc timings, a reason that
+some of the gpmc timings are missing above), and it will result in
+indirect dependency of peripheral timings to gpmc timings other than
+mentioned above, refer timing routine for more details. To know what
+these peripheral timings correspond to, please see explanations in
+struct gpmc_device_timings definition. And for gpmc timings refer
+IP details (link above).
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index cefd3d8bbd1..12e01d432bf 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -218,7 +218,7 @@ and name space for cpusets, with a minimum of additional kernel code.
The cpus and mems files in the root (top_cpuset) cpuset are
read-only. The cpus file automatically tracks the value of
cpu_online_mask using a CPU hotplug notifier, and the mems file
-automatically tracks the value of node_states[N_HIGH_MEMORY]--i.e.,
+automatically tracks the value of node_states[N_MEMORY]--i.e.,
nodes with memory--using the cpuset_track_online_nodes() hook.
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index a25cb3fafeb..8b8c28b9864 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -71,6 +71,11 @@ Brief summary of control files.
memory.oom_control # set/show oom controls.
memory.numa_stat # show the number of memory usage per numa node
+ memory.kmem.limit_in_bytes # set/show hard limit for kernel memory
+ memory.kmem.usage_in_bytes # show current kernel memory allocation
+ memory.kmem.failcnt # show the number of kernel memory usage hits limits
+ memory.kmem.max_usage_in_bytes # show max kernel memory usage recorded
+
memory.kmem.tcp.limit_in_bytes # set/show hard limit for tcp buf memory
memory.kmem.tcp.usage_in_bytes # show current tcp buf memory allocation
memory.kmem.tcp.failcnt # show the number of tcp buf memory usage hits limits
@@ -268,20 +273,73 @@ the amount of kernel memory used by the system. Kernel memory is fundamentally
different than user memory, since it can't be swapped out, which makes it
possible to DoS the system by consuming too much of this precious resource.
+Kernel memory won't be accounted at all until limit on a group is set. This
+allows for existing setups to continue working without disruption. The limit
+cannot be set if the cgroup have children, or if there are already tasks in the
+cgroup. Attempting to set the limit under those conditions will return -EBUSY.
+When use_hierarchy == 1 and a group is accounted, its children will
+automatically be accounted regardless of their limit value.
+
+After a group is first limited, it will be kept being accounted until it
+is removed. The memory limitation itself, can of course be removed by writing
+-1 to memory.kmem.limit_in_bytes. In this case, kmem will be accounted, but not
+limited.
+
Kernel memory limits are not imposed for the root cgroup. Usage for the root
-cgroup may or may not be accounted.
+cgroup may or may not be accounted. The memory used is accumulated into
+memory.kmem.usage_in_bytes, or in a separate counter when it makes sense.
+(currently only for tcp).
+The main "kmem" counter is fed into the main counter, so kmem charges will
+also be visible from the user counter.
Currently no soft limit is implemented for kernel memory. It is future work
to trigger slab reclaim when those limits are reached.
2.7.1 Current Kernel Memory resources accounted
+* stack pages: every process consumes some stack pages. By accounting into
+kernel memory, we prevent new processes from being created when the kernel
+memory usage is too high.
+
+* slab pages: pages allocated by the SLAB or SLUB allocator are tracked. A copy
+of each kmem_cache is created everytime the cache is touched by the first time
+from inside the memcg. The creation is done lazily, so some objects can still be
+skipped while the cache is being created. All objects in a slab page should
+belong to the same memcg. This only fails to hold when a task is migrated to a
+different memcg during the page allocation by the cache.
+
* sockets memory pressure: some sockets protocols have memory pressure
thresholds. The Memory Controller allows them to be controlled individually
per cgroup, instead of globally.
* tcp memory pressure: sockets memory pressure for the tcp protocol.
+2.7.3 Common use cases
+
+Because the "kmem" counter is fed to the main user counter, kernel memory can
+never be limited completely independently of user memory. Say "U" is the user
+limit, and "K" the kernel limit. There are three possible ways limits can be
+set:
+
+ U != 0, K = unlimited:
+ This is the standard memcg limitation mechanism already present before kmem
+ accounting. Kernel memory is completely ignored.
+
+ U != 0, K < U:
+ Kernel memory is a subset of the user memory. This setup is useful in
+ deployments where the total amount of memory per-cgroup is overcommited.
+ Overcommiting kernel memory limits is definitely not recommended, since the
+ box can still run out of non-reclaimable memory.
+ In this case, the admin could set up K so that the sum of all groups is
+ never greater than the total memory, and freely set U at the cost of his
+ QoS.
+
+ U != 0, K >= U:
+ Since kmem charges will also be fed to the user counter and reclaim will be
+ triggered for the cgroup for both kinds of memory. This setup gives the
+ admin a unified view of memory, and it is also useful for people who just
+ want to track kernel memory usage.
+
3. User Interface
0. Configuration
@@ -290,6 +348,7 @@ a. Enable CONFIG_CGROUPS
b. Enable CONFIG_RESOURCE_COUNTERS
c. Enable CONFIG_MEMCG
d. Enable CONFIG_MEMCG_SWAP (to use swap extension)
+d. Enable CONFIG_MEMCG_KMEM (to use kmem extension)
1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
# mount -t tmpfs none /sys/fs/cgroup
@@ -406,6 +465,11 @@ About use_hierarchy, see Section 6.
Because rmdir() moves all pages to parent, some out-of-use page caches can be
moved to the parent. If you want to avoid that, force_empty will be useful.
+ Also, note that when memory.kmem.limit_in_bytes is set the charges due to
+ kernel pages will still be seen. This is not considered a failure and the
+ write will still return success. In this case, it is expected that
+ memory.kmem.usage_in_bytes == memory.usage_in_bytes.
+
About use_hierarchy, see Section 6.
5.2 stat file
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index 0c4a344e78f..c4d99ed0b41 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -83,16 +83,17 @@ to work with it.
res_counter->lock internally (it must be called with res_counter->lock
held). The force parameter indicates whether we can bypass the limit.
- e. void res_counter_uncharge[_locked]
+ e. u64 res_counter_uncharge[_locked]
(struct res_counter *rc, unsigned long val)
When a resource is released (freed) it should be de-accounted
from the resource counter it was accounted to. This is called
- "uncharging".
+ "uncharging". The return value of this function indicate the amount
+ of charges still present in the counter.
The _locked routines imply that the res_counter->lock is taken.
- f. void res_counter_uncharge_until
+ f. u64 res_counter_uncharge_until
(struct res_counter *rc, struct res_counter *top,
unsinged long val)
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-reset.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-reset.txt
new file mode 100644
index 00000000000..ecdb57d69db
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-reset.txt
@@ -0,0 +1,11 @@
+Altera SOCFPGA Reset Manager
+
+Required properties:
+- compatible : "altr,rst-mgr"
+- reg : Should contain 1 register ranges(address and length)
+
+Example:
+ rstmgr@ffd05000 {
+ compatible = "altr,rst-mgr";
+ reg = <0xffd05000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt
new file mode 100644
index 00000000000..07c65e3cdcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt
@@ -0,0 +1,11 @@
+Altera SOCFPGA System Manager
+
+Required properties:
+- compatible : "altr,sys-mgr"
+- reg : Should contain 1 register ranges(address and length)
+
+Example:
+ sysmgr@ffd08000 {
+ compatible = "altr,sys-mgr";
+ reg = <0xffd08000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
index 70c0dc5f00e..61df564c0d2 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
@@ -6,9 +6,15 @@ Required properties:
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: The number of cells to define the interrupts. Should be 1.
The cell is the IRQ number
+
- reg: Should contain PMIC registers location and length. First pair
for the main interrupt registers, second pair for the per-CPU
- interrupt registers
+ interrupt registers. For this last pair, to be compliant with SMP
+ support, the "virtual" must be use (For the record, these registers
+ automatically map to the interrupt controller registers of the
+ current CPU)
+
+
Example:
@@ -18,6 +24,6 @@ Example:
#address-cells = <1>;
#size-cells = <1>;
interrupt-controller;
- reg = <0xd0020000 0x1000>,
- <0xd0021000 0x1000>;
+ reg = <0xd0020a00 0x1d0>,
+ <0xd0021070 0x58>;
};
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt
new file mode 100644
index 00000000000..926b4d6aae7
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt
@@ -0,0 +1,20 @@
+Power Management Service Unit(PMSU)
+-----------------------------------
+Available on Marvell SOCs: Armada 370 and Armada XP
+
+Required properties:
+
+- compatible: "marvell,armada-370-xp-pmsu"
+
+- reg: Should contain PMSU registers location and length. First pair
+ for the per-CPU SW Reset Control registers, second pair for the
+ Power Management Service Unit.
+
+Example:
+
+armada-370-xp-pmsu@d0022000 {
+ compatible = "marvell,armada-370-xp-pmsu";
+ reg = <0xd0022100 0x430>,
+ <0xd0020800 0x20>;
+};
+
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
index 8b6ea2267c9..64830118b01 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
@@ -5,6 +5,7 @@ Required properties:
- compatible: Should be "marvell,armada-370-xp-timer"
- interrupts: Should contain the list of Global Timer interrupts
- reg: Should contain the base address of the Global Timer registers
+- clocks: clock driving the timer hardware
Optional properties:
- marvell,timer-25Mhz: Tells whether the Global timer supports the 25
diff --git a/Documentation/devicetree/bindings/arm/coherency-fabric.txt b/Documentation/devicetree/bindings/arm/coherency-fabric.txt
new file mode 100644
index 00000000000..17d8cd10755
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/coherency-fabric.txt
@@ -0,0 +1,21 @@
+Coherency fabric
+----------------
+Available on Marvell SOCs: Armada 370 and Armada XP
+
+Required properties:
+
+- compatible: "marvell,coherency-fabric"
+
+- reg: Should contain coherency fabric registers location and
+ length. First pair for the coherency fabric registers, second pair
+ for the per-CPU fabric registers registers.
+
+Example:
+
+coherency-fabric@d0020200 {
+ compatible = "marvell,coherency-fabric";
+ reg = <0xd0020200 0xb0>,
+ <0xd0021810 0x1c>;
+
+};
+
diff --git a/Documentation/devicetree/bindings/arm/davinci/nand.txt b/Documentation/devicetree/bindings/arm/davinci/nand.txt
index 49fc7ada929..3545ea704b5 100644
--- a/Documentation/devicetree/bindings/arm/davinci/nand.txt
+++ b/Documentation/devicetree/bindings/arm/davinci/nand.txt
@@ -23,6 +23,9 @@ Recommended properties :
- ti,davinci-nand-buswidth: buswidth 8 or 16
- ti,davinci-nand-use-bbt: use flash based bad block table support.
+nand device bindings may contain additional sub-nodes describing
+partitions of the address space. See partition.txt for more detail.
+
Example(da850 EVM ):
nand_cs3@62000000 {
compatible = "ti,davinci-nand";
@@ -35,4 +38,9 @@ nand_cs3@62000000 {
ti,davinci-ecc-mode = "hw";
ti,davinci-ecc-bits = <4>;
ti,davinci-nand-use-bbt;
+
+ partition@180000 {
+ label = "ubifs";
+ reg = <0x180000 0x7e80000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index 6528e215c5f..5216b419016 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -4,14 +4,13 @@ Exynos processors include support for multiple power domains which are used
to gate power to one or more peripherals on the processor.
Required Properties:
-- compatiable: should be one of the following.
+- compatible: should be one of the following.
* samsung,exynos4210-pd - for exynos4210 type power domain.
- reg: physical base address of the controller and length of memory mapped
region.
-Optional Properties:
-- samsung,exynos4210-pd-off: Specifies that the power domain is in turned-off
- state during boot and remains to be turned-off until explicitly turned-on.
+Node of a device using power domains must have a samsung,power-domain property
+defined with a phandle to respective power domain.
Example:
@@ -19,3 +18,11 @@ Example:
compatible = "samsung,exynos4210-pd";
reg = <0x10023C00 0x10>;
};
+
+Example of the node using power domain:
+
+ node {
+ /* ... */
+ samsung,power-domain = <&lcd0>;
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index ac9e7516756..f79818711e8 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -41,6 +41,10 @@ i.MX6 Quad SABRE Smart Device Board
Required root node properties:
- compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+i.MX6 Quad SABRE Automotive Board
+Required root node properties:
+ - compatible = "fsl,imx6q-sabreauto", "fsl,imx6q";
+
Generic i.MX boards
-------------------
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 7c3ee3aeb7b..cbef09b5c8a 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -10,6 +10,12 @@ Required properties:
"arm,pl310-cache"
"arm,l220-cache"
"arm,l210-cache"
+ "marvell,aurora-system-cache": Marvell Controller designed to be
+ compatible with the ARM one, with system cache mode (meaning
+ maintenance operations on L1 are broadcasted to the L2 and L2
+ performs the same operation).
+ "marvell,"aurora-outer-cache: Marvell Controller designed to be
+ compatible with the ARM one with outer cache mode.
- cache-unified : Specifies the cache is a unified cache.
- cache-level : Should be set to 2 for a level 2 cache.
- reg : Physical base address and size of cache controller's memory mapped
@@ -29,6 +35,9 @@ Optional properties:
filter. Addresses in the filter window are directed to the M1 port. Other
addresses will go to the M0 port.
- interrupts : 1 combined interrupt.
+- cache-id-part: cache id part number to be used if it is not present
+ on hardware
+- wt-override: If present then L2 is forced to Write through mode
Example:
diff --git a/Documentation/devicetree/bindings/arm/spear/shirq.txt b/Documentation/devicetree/bindings/arm/spear/shirq.txt
new file mode 100644
index 00000000000..13fbb8866bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/spear/shirq.txt
@@ -0,0 +1,48 @@
+* SPEAr Shared IRQ layer (shirq)
+
+SPEAr3xx architecture includes shared/multiplexed irqs for certain set
+of devices. The multiplexor provides a single interrupt to parent
+interrupt controller (VIC) on behalf of a group of devices.
+
+There can be multiple groups available on SPEAr3xx variants but not
+exceeding 4. The number of devices in a group can differ, further they
+may share same set of status/mask registers spanning across different
+bit masks. Also in some cases the group may not have enable or other
+registers. This makes software little complex.
+
+A single node in the device tree is used to describe the shared
+interrupt multiplexor (one node for all groups). A group in the
+interrupt controller shares config/control registers with other groups.
+For example, a 32-bit interrupt enable/disable config register can
+accommodate upto 4 interrupt groups.
+
+Required properties:
+ - compatible: should be, either of
+ - "st,spear300-shirq"
+ - "st,spear310-shirq"
+ - "st,spear320-shirq"
+ - interrupt-controller: Identifies the node as an interrupt controller.
+ - #interrupt-cells: should be <1> which basically contains the offset
+ (starting from 0) of interrupts for all the groups.
+ - reg: Base address and size of shirq registers.
+ - interrupts: The list of interrupts generated by the groups which are
+ then connected to a parent interrupt controller. Each group is
+ associated with one of the interrupts, hence number of interrupts (to
+ parent) is equal to number of groups. The format of the interrupt
+ specifier depends in the interrupt parent controller.
+
+ Optional properties:
+ - interrupt-parent: pHandle of the parent interrupt controller, if not
+ inherited from the parent node.
+
+Example:
+
+The following is an example from the SPEAr320 SoC dtsi file.
+
+shirq: interrupt-controller@0xb3000000 {
+ compatible = "st,spear320-shirq";
+ reg = <0xb3000000 0x1000>;
+ interrupts = <28 29 30 1>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+};
diff --git a/Documentation/devicetree/bindings/ata/exynos-sata-phy.txt b/Documentation/devicetree/bindings/ata/exynos-sata-phy.txt
new file mode 100644
index 00000000000..37824fac688
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/exynos-sata-phy.txt
@@ -0,0 +1,14 @@
+* Samsung SATA PHY Controller
+
+SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers.
+Each SATA PHY controller should have its own node.
+
+Required properties:
+- compatible : compatible list, contains "samsung,exynos5-sata-phy"
+- reg : <registers mapping>
+
+Example:
+ sata@ffe07000 {
+ compatible = "samsung,exynos5-sata-phy";
+ reg = <0xffe07000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/exynos-sata.txt b/Documentation/devicetree/bindings/ata/exynos-sata.txt
new file mode 100644
index 00000000000..0849f1025e3
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/exynos-sata.txt
@@ -0,0 +1,17 @@
+* Samsung AHCI SATA Controller
+
+SATA nodes are defined to describe on-chip Serial ATA controllers.
+Each SATA controller should have its own node.
+
+Required properties:
+- compatible : compatible list, contains "samsung,exynos5-sata"
+- interrupts : <interrupt mapping for SATA IRQ>
+- reg : <registers mapping>
+- samsung,sata-freq : <frequency in MHz>
+
+Example:
+ sata@ffe08000 {
+ compatible = "samsung,exynos5-sata";
+ reg = <0xffe08000 0x1000>;
+ interrupts = <115>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx25-clock.txt b/Documentation/devicetree/bindings/clock/imx25-clock.txt
new file mode 100644
index 00000000000..c2a3525ecb4
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx25-clock.txt
@@ -0,0 +1,162 @@
+* Clock bindings for Freescale i.MX25
+
+Required properties:
+- compatible: Should be "fsl,imx25-ccm"
+- reg: Address and length of the register set
+- interrupts: Should contain CCM interrupt
+- #clock-cells: Should be <1>
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell. The following is a full list of i.MX25
+clocks and IDs.
+
+ Clock ID
+ ---------------------------
+ dummy 0
+ osc 1
+ mpll 2
+ upll 3
+ mpll_cpu_3_4 4
+ cpu_sel 5
+ cpu 6
+ ahb 7
+ usb_div 8
+ ipg 9
+ per0_sel 10
+ per1_sel 11
+ per2_sel 12
+ per3_sel 13
+ per4_sel 14
+ per5_sel 15
+ per6_sel 16
+ per7_sel 17
+ per8_sel 18
+ per9_sel 19
+ per10_sel 20
+ per11_sel 21
+ per12_sel 22
+ per13_sel 23
+ per14_sel 24
+ per15_sel 25
+ per0 26
+ per1 27
+ per2 28
+ per3 29
+ per4 30
+ per5 31
+ per6 32
+ per7 33
+ per8 34
+ per9 35
+ per10 36
+ per11 37
+ per12 38
+ per13 39
+ per14 40
+ per15 41
+ csi_ipg_per 42
+ epit_ipg_per 43
+ esai_ipg_per 44
+ esdhc1_ipg_per 45
+ esdhc2_ipg_per 46
+ gpt_ipg_per 47
+ i2c_ipg_per 48
+ lcdc_ipg_per 49
+ nfc_ipg_per 50
+ owire_ipg_per 51
+ pwm_ipg_per 52
+ sim1_ipg_per 53
+ sim2_ipg_per 54
+ ssi1_ipg_per 55
+ ssi2_ipg_per 56
+ uart_ipg_per 57
+ ata_ahb 58
+ reserved 59
+ csi_ahb 60
+ emi_ahb 61
+ esai_ahb 62
+ esdhc1_ahb 63
+ esdhc2_ahb 64
+ fec_ahb 65
+ lcdc_ahb 66
+ rtic_ahb 67
+ sdma_ahb 68
+ slcdc_ahb 69
+ usbotg_ahb 70
+ reserved 71
+ reserved 72
+ reserved 73
+ reserved 74
+ can1_ipg 75
+ can2_ipg 76
+ csi_ipg 77
+ cspi1_ipg 78
+ cspi2_ipg 79
+ cspi3_ipg 80
+ dryice_ipg 81
+ ect_ipg 82
+ epit1_ipg 83
+ epit2_ipg 84
+ reserved 85
+ esdhc1_ipg 86
+ esdhc2_ipg 87
+ fec_ipg 88
+ reserved 89
+ reserved 90
+ reserved 91
+ gpt1_ipg 92
+ gpt2_ipg 93
+ gpt3_ipg 94
+ gpt4_ipg 95
+ reserved 96
+ reserved 97
+ reserved 98
+ iim_ipg 99
+ reserved 100
+ reserved 101
+ kpp_ipg 102
+ lcdc_ipg 103
+ reserved 104
+ pwm1_ipg 105
+ pwm2_ipg 106
+ pwm3_ipg 107
+ pwm4_ipg 108
+ rngb_ipg 109
+ reserved 110
+ scc_ipg 111
+ sdma_ipg 112
+ sim1_ipg 113
+ sim2_ipg 114
+ slcdc_ipg 115
+ spba_ipg 116
+ ssi1_ipg 117
+ ssi2_ipg 118
+ tsc_ipg 119
+ uart1_ipg 120
+ uart2_ipg 121
+ uart3_ipg 122
+ uart4_ipg 123
+ uart5_ipg 124
+ reserved 125
+ wdt_ipg 126
+
+Examples:
+
+clks: ccm@53f80000 {
+ compatible = "fsl,imx25-ccm";
+ reg = <0x53f80000 0x4000>;
+ interrupts = <31>;
+ clock-output-names = ...
+ "uart_ipg",
+ "uart_serial",
+ ...;
+};
+
+uart1: serial@43f90000 {
+ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
+ reg = <0x43f90000 0x4000>;
+ interrupts = <45>;
+ clocks = <&clks 79>, <&clks 50>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
new file mode 100644
index 00000000000..1e662948661
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
@@ -0,0 +1,47 @@
+* Core Clock bindings for Marvell MVEBU SoCs
+
+Marvell MVEBU SoCs usually allow to determine core clock frequencies by
+reading the Sample-At-Reset (SAR) register. The core clock consumer should
+specify the desired clock by having the clock ID in its "clocks" phandle cell.
+
+The following is a list of provided IDs and clock names on Armada 370/XP:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = nbclk (L2 Cache clock)
+ 3 = hclk (DRAM control clock)
+ 4 = dramclk (DDR clock)
+
+The following is a list of provided IDs and clock names on Kirkwood and Dove:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU0 clock)
+ 2 = l2clk (L2 Cache clock derived from CPU0 clock)
+ 3 = ddrclk (DDR controller clock derived from CPU0 clock)
+
+Required properties:
+- compatible : shall be one of the following:
+ "marvell,armada-370-core-clock" - For Armada 370 SoC core clocks
+ "marvell,armada-xp-core-clock" - For Armada XP SoC core clocks
+ "marvell,dove-core-clock" - for Dove SoC core clocks
+ "marvell,kirkwood-core-clock" - for Kirkwood SoC (except mv88f6180)
+ "marvell,mv88f6180-core-clock" - for Kirkwood MV88f6180 SoC
+- reg : shall be the register address of the Sample-At-Reset (SAR) register
+- #clock-cells : from common clock binding; shall be set to 1
+
+Optional properties:
+- clock-output-names : from common clock binding; allows overwrite default clock
+ output names ("tclk", "cpuclk", "l2clk", "ddrclk")
+
+Example:
+
+core_clk: core-clocks@d0214 {
+ compatible = "marvell,dove-core-clock";
+ reg = <0xd0214 0x4>;
+ #clock-cells = <1>;
+};
+
+spi0: spi@10600 {
+ compatible = "marvell,orion-spi";
+ /* ... */
+ /* get tclk from core clock provider */
+ clocks = <&core_clk 0>;
+};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
new file mode 100644
index 00000000000..feb83013071
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
@@ -0,0 +1,21 @@
+Device Tree Clock bindings for cpu clock of Marvell EBU platforms
+
+Required properties:
+- compatible : shall be one of the following:
+ "marvell,armada-xp-cpu-clock" - cpu clocks for Armada XP
+- reg : Address and length of the clock complex register set
+- #clock-cells : should be set to 1.
+- clocks : shall be the input parent clock phandle for the clock.
+
+cpuclk: clock-complex@d0018700 {
+ #clock-cells = <1>;
+ compatible = "marvell,armada-xp-cpu-clock";
+ reg = <0xd0018700 0xA0>;
+ clocks = <&coreclk 1>;
+}
+
+cpu@0 {
+ compatible = "marvell,sheeva-v7";
+ reg = <0>;
+ clocks = <&cpuclk 0>;
+};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
new file mode 100644
index 00000000000..7337005ef5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
@@ -0,0 +1,119 @@
+* Gated Clock bindings for Marvell Orion SoCs
+
+Marvell Dove and Kirkwood allow some peripheral clocks to be gated to save
+some power. The clock consumer should specify the desired clock by having
+the clock ID in its "clocks" phandle cell. The clock ID is directly mapped to
+the corresponding clock gating control bit in HW to ease manual clock lookup
+in datasheet.
+
+The following is a list of provided IDs for Armada 370:
+ID Clock Peripheral
+-----------------------------------
+0 Audio AC97 Cntrl
+1 pex0_en PCIe 0 Clock out
+2 pex1_en PCIe 1 Clock out
+3 ge1 Gigabit Ethernet 1
+4 ge0 Gigabit Ethernet 0
+5 pex0 PCIe Cntrl 0
+9 pex1 PCIe Cntrl 1
+15 sata0 SATA Host 0
+17 sdio SDHCI Host
+25 tdm Time Division Mplx
+28 ddr DDR Cntrl
+30 sata1 SATA Host 0
+
+The following is a list of provided IDs for Armada XP:
+ID Clock Peripheral
+-----------------------------------
+0 audio Audio Cntrl
+1 ge3 Gigabit Ethernet 3
+2 ge2 Gigabit Ethernet 2
+3 ge1 Gigabit Ethernet 1
+4 ge0 Gigabit Ethernet 0
+5 pex0 PCIe Cntrl 0
+6 pex1 PCIe Cntrl 1
+7 pex2 PCIe Cntrl 2
+8 pex3 PCIe Cntrl 3
+13 bp
+14 sata0lnk
+15 sata0 SATA Host 0
+16 lcd LCD Cntrl
+17 sdio SDHCI Host
+18 usb0 USB Host 0
+19 usb1 USB Host 1
+20 usb2 USB Host 2
+22 xor0 XOR DMA 0
+23 crypto CESA engine
+25 tdm Time Division Mplx
+28 xor1 XOR DMA 1
+29 sata1lnk
+30 sata1 SATA Host 0
+
+The following is a list of provided IDs for Dove:
+ID Clock Peripheral
+-----------------------------------
+0 usb0 USB Host 0
+1 usb1 USB Host 1
+2 ge Gigabit Ethernet
+3 sata SATA Host
+4 pex0 PCIe Cntrl 0
+5 pex1 PCIe Cntrl 1
+8 sdio0 SDHCI Host 0
+9 sdio1 SDHCI Host 1
+10 nand NAND Cntrl
+11 camera Camera Cntrl
+12 i2s0 I2S Cntrl 0
+13 i2s1 I2S Cntrl 1
+15 crypto CESA engine
+21 ac97 AC97 Cntrl
+22 pdma Peripheral DMA
+23 xor0 XOR DMA 0
+24 xor1 XOR DMA 1
+30 gephy Gigabit Ethernel PHY
+Note: gephy(30) is implemented as a parent clock of ge(2)
+
+The following is a list of provided IDs for Kirkwood:
+ID Clock Peripheral
+-----------------------------------
+0 ge0 Gigabit Ethernet 0
+2 pex0 PCIe Cntrl 0
+3 usb0 USB Host 0
+4 sdio SDIO Cntrl
+5 tsu Transp. Stream Unit
+6 dunit SDRAM Cntrl
+7 runit Runit
+8 xor0 XOR DMA 0
+9 audio I2S Cntrl 0
+14 sata0 SATA Host 0
+15 sata1 SATA Host 1
+16 xor1 XOR DMA 1
+17 crypto CESA engine
+18 pex1 PCIe Cntrl 1
+19 ge1 Gigabit Ethernet 0
+20 tdm Time Division Mplx
+
+Required properties:
+- compatible : shall be one of the following:
+ "marvell,dove-gating-clock" - for Dove SoC clock gating
+ "marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating
+- reg : shall be the register address of the Clock Gating Control register
+- #clock-cells : from common clock binding; shall be set to 1
+
+Optional properties:
+- clocks : default parent clock phandle (e.g. tclk)
+
+Example:
+
+gate_clk: clock-gating-control@d0038 {
+ compatible = "marvell,dove-gating-clock";
+ reg = <0xd0038 0x4>;
+ /* default parent clock is tclk */
+ clocks = <&core_clk 0>;
+ #clock-cells = <1>;
+};
+
+sdio0: sdio@92000 {
+ compatible = "marvell,dove-sdhci";
+ /* get clk gate bit 8 (sdio0) */
+ clocks = <&gate_clk 8>;
+};
diff --git a/Documentation/devicetree/bindings/clock/zynq-7000.txt b/Documentation/devicetree/bindings/clock/zynq-7000.txt
new file mode 100644
index 00000000000..23ae1db1bc1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/zynq-7000.txt
@@ -0,0 +1,55 @@
+Device Tree Clock bindings for the Zynq 7000 EPP
+
+The Zynq EPP has several different clk providers, each with there own bindings.
+The purpose of this document is to document their usage.
+
+See clock_bindings.txt for more information on the generic clock bindings.
+See Chapter 25 of Zynq TRM for more information about Zynq clocks.
+
+== PLLs ==
+
+Used to describe the ARM_PLL, DDR_PLL, and IO_PLL.
+
+Required properties:
+- #clock-cells : shall be 0 (only one clock is output from this node)
+- compatible : "xlnx,zynq-pll"
+- reg : pair of u32 values, which are the address offsets within the SLCR
+ of the relevant PLL_CTRL register and PLL_CFG register respectively
+- clocks : phandle for parent clock. should be the phandle for ps_clk
+
+Optional properties:
+- clock-output-names : name of the output clock
+
+Example:
+ armpll: armpll {
+ #clock-cells = <0>;
+ compatible = "xlnx,zynq-pll";
+ clocks = <&ps_clk>;
+ reg = <0x100 0x110>;
+ clock-output-names = "armpll";
+ };
+
+== Peripheral clocks ==
+
+Describes clock node for the SDIO, SMC, SPI, QSPI, and UART clocks.
+
+Required properties:
+- #clock-cells : shall be 1
+- compatible : "xlnx,zynq-periph-clock"
+- reg : a single u32 value, describing the offset within the SLCR where
+ the CLK_CTRL register is found for this peripheral
+- clocks : phandle for parent clocks. should hold phandles for
+ the IO_PLL, ARM_PLL, and DDR_PLL in order
+- clock-output-names : names of the output clock(s). For peripherals that have
+ two output clocks (for example, the UART), two clocks
+ should be listed.
+
+Example:
+ uart_clk: uart_clk {
+ #clock-cells = <1>;
+ compatible = "xlnx,zynq-periph-clock";
+ clocks = <&iopll &armpll &ddrpll>;
+ reg = <0x154>;
+ clock-output-names = "uart0_ref_clk",
+ "uart1_ref_clk";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index bd7ce120bc1..fc9ce6f1688 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -54,7 +54,8 @@ PROPERTIES
- compatible
Usage: required
Value type: <string>
- Definition: Must include "fsl,sec-v4.0"
+ Definition: Must include "fsl,sec-v4.0". Also includes SEC
+ ERA versions (optional) with which the device is compatible.
- #address-cells
Usage: required
@@ -106,7 +107,7 @@ PROPERTIES
EXAMPLE
crypto@300000 {
- compatible = "fsl,sec-v4.0";
+ compatible = "fsl,sec-v4.0", "fsl,sec-era-v2.0";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x300000 0x10000>;
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
new file mode 100644
index 00000000000..7c6cb7fcecd
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/mv-xor.txt
@@ -0,0 +1,40 @@
+* Marvell XOR engines
+
+Required properties:
+- compatible: Should be "marvell,orion-xor"
+- reg: Should contain registers location and length (two sets)
+ the first set is the low registers, the second set the high
+ registers for the XOR engine.
+- clocks: pointer to the reference clock
+
+The DT node must also contains sub-nodes for each XOR channel that the
+XOR engine has. Those sub-nodes have the following required
+properties:
+- interrupts: interrupt of the XOR channel
+
+And the following optional properties:
+- dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
+- dmacap,memset to indicate that the XOR channel is capable of memset operations
+- dmacap,xor to indicate that the XOR channel is capable of xor operations
+
+Example:
+
+xor@d0060900 {
+ compatible = "marvell,orion-xor";
+ reg = <0xd0060900 0x100
+ 0xd0060b00 0x100>;
+ clocks = <&coreclk 0>;
+ status = "okay";
+
+ xor00 {
+ interrupts = <51>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor01 {
+ interrupts = <52>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+};
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmi.txt b/Documentation/devicetree/bindings/drm/exynos/hdmi.txt
new file mode 100644
index 00000000000..589edee3739
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/exynos/hdmi.txt
@@ -0,0 +1,22 @@
+Device-Tree bindings for drm hdmi driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-hdmi".
+- reg: physical base address of the hdmi and length of memory mapped
+ region.
+- interrupts: interrupt number to the cpu.
+- hpd-gpio: following information about the hotplug gpio pin.
+ a) phandle of the gpio controller node.
+ b) pin number within the gpio controller.
+ c) pin function mode.
+ d) optional flags and pull up/down.
+ e) drive strength.
+
+Example:
+
+ hdmi {
+ compatible = "samsung,exynos5-hdmi";
+ reg = <0x14530000 0x100000>;
+ interrupts = <0 95 0>;
+ hpd-gpio = <&gpx3 7 0xf 1 3>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt b/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt
new file mode 100644
index 00000000000..fa166d94580
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt
@@ -0,0 +1,12 @@
+Device-Tree bindings for hdmiddc driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-hdmiddc".
+- reg: I2C address of the hdmiddc device.
+
+Example:
+
+ hdmiddc {
+ compatible = "samsung,exynos5-hdmiddc";
+ reg = <0x50>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt b/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt
new file mode 100644
index 00000000000..858f4f9b902
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt
@@ -0,0 +1,12 @@
+Device-Tree bindings for hdmiphy driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-hdmiphy".
+- reg: I2C address of the hdmiphy device.
+
+Example:
+
+ hdmiphy {
+ compatible = "samsung,exynos5-hdmiphy";
+ reg = <0x38>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/exynos/mixer.txt b/Documentation/devicetree/bindings/drm/exynos/mixer.txt
new file mode 100644
index 00000000000..9b2ea034356
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/exynos/mixer.txt
@@ -0,0 +1,15 @@
+Device-Tree bindings for mixer driver
+
+Required properties:
+- compatible: value should be "samsung,exynos5-mixer".
+- reg: physical base address of the mixer and length of memory mapped
+ region.
+- interrupts: interrupt number to the cpu.
+
+Example:
+
+ mixer {
+ compatible = "samsung,exynos5-mixer";
+ reg = <0x14450000 0x10000>;
+ interrupts = <0 94 0>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-poweroff.txt b/Documentation/devicetree/bindings/gpio/gpio-poweroff.txt
new file mode 100644
index 00000000000..558cdf3c9ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-poweroff.txt
@@ -0,0 +1,22 @@
+GPIO line that should be set high/low to power off a device
+
+Required properties:
+- compatible : should be "gpio-poweroff".
+- gpios : The GPIO to set high/low, see "gpios property" in
+ Documentation/devicetree/bindings/gpio/gpio.txt. If the pin should be
+ low to power down the board set it to "Active Low", otherwise set
+ gpio to "Active High".
+
+Optional properties:
+- input : Initially configure the GPIO line as an input. Only reconfigure
+ it to an output when the pm_power_off function is called. If this optional
+ property is not specified, the GPIO is initialized as an output in its
+ inactive state.
+
+
+Examples:
+
+gpio-poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio 4 0>; /* GPIO 4 Active Low */
+};
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
new file mode 100644
index 00000000000..b4fa934ae3a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -0,0 +1,191 @@
+NVIDIA Tegra host1x
+
+Required properties:
+- compatible: "nvidia,tegra<chip>-host1x"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt outputs from the controller.
+- #address-cells: The number of cells used to represent physical base addresses
+ in the host1x address space. Should be 1.
+- #size-cells: The number of cells used to represent the size of an address
+ range in the host1x address space. Should be 1.
+- ranges: The mapping of the host1x address space to the CPU address space.
+
+The host1x top-level node defines a number of children, each representing one
+of the following host1x client modules:
+
+- mpe: video encoder
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-mpe"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- vi: video input
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-vi"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- epp: encoder pre-processor
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-epp"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- isp: image signal processor
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-isp"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- gr2d: 2D graphics engine
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-gr2d"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- gr3d: 3D graphics engine
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-gr3d"
+ - reg: Physical base address and length of the controller's registers.
+
+- dc: display controller
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-dc"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+ Each display controller node has a child node, named "rgb", that represents
+ the RGB output associated with the controller. It can take the following
+ optional properties:
+ - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+ - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+ - nvidia,edid: supplies a binary EDID blob
+
+- hdmi: High Definition Multimedia Interface
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-hdmi"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+ - vdd-supply: regulator for supply voltage
+ - pll-supply: regulator for PLL
+
+ Optional properties:
+ - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+ - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+ - nvidia,edid: supplies a binary EDID blob
+
+- tvo: TV encoder output
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-tvo"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- dsi: display serial interface
+
+ Required properties:
+ - compatible: "nvidia,tegra<chip>-dsi"
+ - reg: Physical base address and length of the controller's registers.
+
+Example:
+
+/ {
+ ...
+
+ host1x {
+ compatible = "nvidia,tegra20-host1x", "simple-bus";
+ reg = <0x50000000 0x00024000>;
+ interrupts = <0 65 0x04 /* mpcore syncpt */
+ 0 67 0x04>; /* mpcore general */
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x54000000 0x54000000 0x04000000>;
+
+ mpe {
+ compatible = "nvidia,tegra20-mpe";
+ reg = <0x54040000 0x00040000>;
+ interrupts = <0 68 0x04>;
+ };
+
+ vi {
+ compatible = "nvidia,tegra20-vi";
+ reg = <0x54080000 0x00040000>;
+ interrupts = <0 69 0x04>;
+ };
+
+ epp {
+ compatible = "nvidia,tegra20-epp";
+ reg = <0x540c0000 0x00040000>;
+ interrupts = <0 70 0x04>;
+ };
+
+ isp {
+ compatible = "nvidia,tegra20-isp";
+ reg = <0x54100000 0x00040000>;
+ interrupts = <0 71 0x04>;
+ };
+
+ gr2d {
+ compatible = "nvidia,tegra20-gr2d";
+ reg = <0x54140000 0x00040000>;
+ interrupts = <0 72 0x04>;
+ };
+
+ gr3d {
+ compatible = "nvidia,tegra20-gr3d";
+ reg = <0x54180000 0x00040000>;
+ };
+
+ dc@54200000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54200000 0x00040000>;
+ interrupts = <0 73 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ dc@54240000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54240000 0x00040000>;
+ interrupts = <0 74 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ hdmi {
+ compatible = "nvidia,tegra20-hdmi";
+ reg = <0x54280000 0x00040000>;
+ interrupts = <0 75 0x04>;
+ status = "disabled";
+ };
+
+ tvo {
+ compatible = "nvidia,tegra20-tvo";
+ reg = <0x542c0000 0x00040000>;
+ interrupts = <0 76 0x04>;
+ status = "disabled";
+ };
+
+ dsi {
+ compatible = "nvidia,tegra20-dsi";
+ reg = <0x54300000 0x00040000>;
+ status = "disabled";
+ };
+ };
+
+ ...
+};
diff --git a/Documentation/devicetree/bindings/hwmon/vexpress.txt b/Documentation/devicetree/bindings/hwmon/vexpress.txt
new file mode 100644
index 00000000000..9c27ed694bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/vexpress.txt
@@ -0,0 +1,23 @@
+Versatile Express hwmon sensors
+-------------------------------
+
+Requires node properties:
+- "compatible" value : one of
+ "arm,vexpress-volt"
+ "arm,vexpress-amp"
+ "arm,vexpress-temp"
+ "arm,vexpress-power"
+ "arm,vexpress-energy"
+- "arm,vexpress-sysreg,func" when controlled via vexpress-sysreg
+ (see Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
+ for more details)
+
+Optional node properties:
+- label : string describing the monitored value
+
+Example:
+ energy@0 {
+ compatible = "arm,vexpress-energy";
+ arm,vexpress-sysreg,func = <13 0>;
+ label = "A15 Jcore";
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt
new file mode 100644
index 00000000000..8ce9cd2855b
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt
@@ -0,0 +1,27 @@
+Device tree bindings for i2c-cbus-gpio driver
+
+Required properties:
+ - compatible = "i2c-cbus-gpio";
+ - gpios: clk, dat, sel
+ - #address-cells = <1>;
+ - #size-cells = <0>;
+
+Optional properties:
+ - child nodes conforming to i2c bus binding
+
+Example:
+
+i2c@0 {
+ compatible = "i2c-cbus-gpio";
+ gpios = <&gpio 66 0 /* clk */
+ &gpio 65 0 /* dat */
+ &gpio 64 0 /* sel */
+ >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ retu-mfd: retu@1 {
+ compatible = "retu-mfd";
+ reg = <0x1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
new file mode 100644
index 00000000000..66709a82554
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
@@ -0,0 +1,81 @@
+GPIO-based I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses GPIOs to
+route the I2C signals.
+
+ +-----+ +-----+
+ | dev | | dev |
+ +------------+ +-----+ +-----+
+ | SoC | | |
+ | | /--------+--------+
+ | +------+ | +------+ child bus A, on GPIO value set to 0
+ | | I2C |-|--| Mux |
+ | +------+ | +--+---+ child bus B, on GPIO value set to 1
+ | | | \----------+--------+--------+
+ | +------+ | | | | |
+ | | GPIO |-|-----+ +-----+ +-----+ +-----+
+ | +------+ | | dev | | dev | | dev |
+ +------------+ +-----+ +-----+ +-----+
+
+Required properties:
+- compatible: i2c-mux-gpio
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+ port is connected to.
+- mux-gpios: list of gpios used to control the muxer
+* Standard I2C mux properties. See mux.txt in this directory.
+* I2C child bus nodes. See mux.txt in this directory.
+
+Optional properties:
+- idle-state: value to set the muxer to when idle. When no value is
+ given, it defaults to the last value used.
+
+For each i2c child node, an I2C child bus will be created. They will
+be numbered based on their order in the device tree.
+
+Whenever an access is made to a device on a child bus, the value set
+in the revelant node's reg property will be output using the list of
+GPIOs, the first in the list holding the least-significant value.
+
+If an idle state is defined, using the idle-state (optional) property,
+whenever an access is not being made to a device on a child bus, the
+GPIOs will be set according to the idle value.
+
+If an idle state is not defined, the most recently used value will be
+left programmed into hardware whenever no access is being made to a
+device on a child bus.
+
+Example:
+ i2cmux {
+ compatible = "i2c-mux-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mux-gpios = <&gpio1 22 0 &gpio1 23 0>;
+ i2c-parent = <&i2c1>;
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssd1307: oled@3c {
+ compatible = "solomon,ssd1307fb-i2c";
+ reg = <0x3c>;
+ pwms = <&pwm 4 3000>;
+ reset-gpios = <&gpio2 7 1>;
+ reset-active-low;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pca9555: pca9555@20 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
index c15781f4dc8..1637c298a1b 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
@@ -1,7 +1,7 @@
Device tree configuration for i2c-ocores
Required properties:
-- compatible : "opencores,i2c-ocores"
+- compatible : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst"
- reg : bus address start and address range size of device
- interrupts : interrupt number
- clock-frequency : frequency of bus clock in Hz
diff --git a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
index b6cb5a12c67..e9611ace879 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt
@@ -13,11 +13,17 @@ Required properties:
- interrupts: interrupt number to the cpu.
- samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges.
+Required for all cases except "samsung,s3c2440-hdmiphy-i2c":
+ - Samsung GPIO variant (deprecated):
+ - gpios: The order of the gpios should be the following: <SDA, SCL>.
+ The gpio specifier depends on the gpio controller. Required in all
+ cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
+ lines are permanently wired to the respective clienta
+ - Pinctrl variant (preferred, if available):
+ - pinctrl-0: Pin control group to be used for this controller.
+ - pinctrl-names: Should contain only one value - "default".
+
Optional properties:
- - gpios: The order of the gpios should be the following: <SDA, SCL>.
- The gpio specifier depends on the gpio controller. Required in all
- cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
- lines are permanently wired to the respective client
- samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not
specified, default value is 0.
- samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not
@@ -31,8 +37,14 @@ Example:
interrupts = <345>;
samsung,i2c-sda-delay = <100>;
samsung,i2c-max-bus-freq = <100000>;
+ /* Samsung GPIO variant begins here */
gpios = <&gpd1 2 0 /* SDA */
&gpd1 3 0 /* SCL */>;
+ /* Samsung GPIO variant ends here */
+ /* Pinctrl variant begins here */
+ pinctrl-0 = <&i2c3_bus>;
+ pinctrl-names = "default";
+ /* Pinctrl variant ends here */
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
new file mode 100644
index 00000000000..ead641c65e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
@@ -0,0 +1,46 @@
+* GPIO driven matrix keypad device tree bindings
+
+GPIO driven matrix keypad is used to interface a SoC with a matrix keypad.
+The matrix keypad supports multiple row and column lines, a key can be
+placed at each intersection of a unique row and a unique column. The matrix
+keypad can sense a key-press and key-release by means of GPIO lines and
+report the event using GPIO interrupts to the cpu.
+
+Required Properties:
+- compatible: Should be "gpio-matrix-keypad"
+- row-gpios: List of gpios used as row lines. The gpio specifier
+ for this property depends on the gpio controller to
+ which these row lines are connected.
+- col-gpios: List of gpios used as column lines. The gpio specifier
+ for this property depends on the gpio controller to
+ which these column lines are connected.
+- linux,keymap: The definition can be found at
+ bindings/input/matrix-keymap.txt
+
+Optional Properties:
+- linux,no-autorepeat: do no enable autorepeat feature.
+- linux,wakeup: use any event on keypad as wakeup event.
+- debounce-delay-ms: debounce interval in milliseconds
+- col-scan-delay-us: delay, measured in microseconds, that is needed
+ before we can scan keypad after activating column gpio
+
+Example:
+ matrix-keypad {
+ compatible = "gpio-matrix-keypad";
+ debounce-delay-ms = <5>;
+ col-scan-delay-us = <2>;
+
+ row-gpios = <&gpio2 25 0
+ &gpio2 26 0
+ &gpio2 27 0>;
+
+ col-gpios = <&gpio2 21 0
+ &gpio2 22 0>;
+
+ linux,keymap = <0x0000008B
+ 0x0100009E
+ 0x02000069
+ 0x0001006A
+ 0x0101001C
+ 0x0201006C>;
+ };
diff --git a/Documentation/devicetree/bindings/input/pwm-beeper.txt b/Documentation/devicetree/bindings/input/pwm-beeper.txt
new file mode 100644
index 00000000000..be332ae4f2d
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/pwm-beeper.txt
@@ -0,0 +1,7 @@
+* PWM beeper device tree bindings
+
+Registers a PWM device as beeper.
+
+Required properties:
+- compatible: should be "pwm-beeper"
+- pwms: phandle to the physical PWM device
diff --git a/Documentation/devicetree/bindings/input/stmpe-keypad.txt b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
new file mode 100644
index 00000000000..1b97222e8a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
@@ -0,0 +1,39 @@
+* STMPE Keypad
+
+Required properties:
+ - compatible : "st,stmpe-keypad"
+ - linux,keymap : See ./matrix-keymap.txt
+
+Optional properties:
+ - debounce-interval : Debouncing interval time in milliseconds
+ - st,scan-count : Scanning cycles elapsed before key data is updated
+ - st,no-autorepeat : If specified device will not autorepeat
+
+Example:
+
+ stmpe_keypad {
+ compatible = "st,stmpe-keypad";
+
+ debounce-interval = <64>;
+ st,scan-count = <8>;
+ st,no-autorepeat;
+
+ linux,keymap = <0x205006b
+ 0x4010074
+ 0x3050072
+ 0x1030004
+ 0x502006a
+ 0x500000a
+ 0x5008b
+ 0x706001c
+ 0x405000b
+ 0x6070003
+ 0x3040067
+ 0x303006c
+ 0x60400e7
+ 0x602009e
+ 0x4020073
+ 0x5050002
+ 0x4030069
+ 0x3020008>;
+ };
diff --git a/Documentation/devicetree/bindings/input/tca8418_keypad.txt b/Documentation/devicetree/bindings/input/tca8418_keypad.txt
new file mode 100644
index 00000000000..2a1538f0053
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/tca8418_keypad.txt
@@ -0,0 +1,8 @@
+
+Required properties:
+- compatible: "ti,tca8418"
+- reg: the I2C address
+- interrupts: IRQ line number, should trigger on falling edge
+- keypad,num-rows: The number of rows
+- keypad,num-columns: The number of columns
+- linux,keymap: Keys definitions, see keypad-matrix.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/bu21013.txt b/Documentation/devicetree/bindings/input/touchscreen/bu21013.txt
new file mode 100644
index 00000000000..ca5a2c86480
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/bu21013.txt
@@ -0,0 +1,28 @@
+* Rohm BU21013 Touch Screen
+
+Required properties:
+ - compatible : "rohm,bu21013_tp"
+ - reg : I2C device address
+
+Optional properties:
+ - touch-gpio : GPIO pin registering a touch event
+ - <supply_name>-supply : Phandle to a regulator supply
+ - rohm,touch-max-x : Maximum outward permitted limit in the X axis
+ - rohm,touch-max-y : Maximum outward permitted limit in the Y axis
+ - rohm,flip-x : Flip touch coordinates on the X axis
+ - rohm,flip-y : Flip touch coordinates on the Y axis
+
+Example:
+
+ i2c@80110000 {
+ bu21013_tp@0x5c {
+ compatible = "rohm,bu21013_tp";
+ reg = <0x5c>;
+ touch-gpio = <&gpio2 20 0x4>;
+ avdd-supply = <&ab8500_ldo_aux1_reg>;
+
+ rohm,touch-max-x = <384>;
+ rohm,touch-max-y = <704>;
+ rohm,flip-y;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/mms114.txt b/Documentation/devicetree/bindings/input/touchscreen/mms114.txt
new file mode 100644
index 00000000000..89d4c56c567
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/mms114.txt
@@ -0,0 +1,34 @@
+* MELFAS MMS114 touchscreen controller
+
+Required properties:
+- compatible: must be "melfas,mms114"
+- reg: I2C address of the chip
+- interrupts: interrupt to which the chip is connected
+- x-size: horizontal resolution of touchscreen
+- y-size: vertical resolution of touchscreen
+
+Optional properties:
+- contact-threshold:
+- moving-threshold:
+- x-invert: invert X axis
+- y-invert: invert Y axis
+
+Example:
+
+ i2c@00000000 {
+ /* ... */
+
+ touchscreen@48 {
+ compatible = "melfas,mms114";
+ reg = <0x48>;
+ interrupts = <39 0>;
+ x-size = <720>;
+ y-size = <1280>;
+ contact-threshold = <10>;
+ moving-threshold = <10>;
+ x-invert;
+ y-invert;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt b/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt
new file mode 100644
index 00000000000..127baa31a77
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt
@@ -0,0 +1,43 @@
+STMPE Touchscreen
+----------------
+
+Required properties:
+ - compatible: "st,stmpe-ts"
+
+Optional properties:
+- st,sample-time: ADC converstion time in number of clock. (0 -> 36 clocks, 1 ->
+ 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks, 4 -> 80 clocks, 5 -> 96 clocks, 6
+ -> 144 clocks), recommended is 4.
+- st,mod-12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
+- st,ref-sel: ADC reference source (0 -> internal reference, 1 -> external
+ reference)
+- st,adc-freq: ADC Clock speed (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
+- st,ave-ctrl: Sample average control (0 -> 1 sample, 1 -> 2 samples, 2 -> 4
+ samples, 3 -> 8 samples)
+- st,touch-det-delay: Touch detect interrupt delay (0 -> 10 us, 1 -> 50 us, 2 ->
+ 100 us, 3 -> 500 us, 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms) recommended
+ is 3
+- st,settling: Panel driver settling time (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3
+ -> 1 ms, 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms) recommended is 2
+- st,fraction-z: Length of the fractional part in z (fraction-z ([0..7]) = Count of
+ the fractional part) recommended is 7
+- st,i-drive: current limit value of the touchscreen drivers (0 -> 20 mA typical 35
+ mA max, 1 -> 50 mA typical 80 mA max)
+
+Node name must be stmpe_touchscreen and should be child node of stmpe node to
+which it belongs.
+
+Example:
+
+ stmpe_touchscreen {
+ compatible = "st,stmpe-ts";
+ st,sample-time = <4>;
+ st,mod-12b = <1>;
+ st,ref-sel = <0>;
+ st,adc-freq = <1>;
+ st,ave-ctrl = <1>;
+ st,touch-det-delay = <2>;
+ st,settling = <2>;
+ st,fraction-z = <7>;
+ st,i-drive = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/media/s5p-mfc.txt b/Documentation/devicetree/bindings/media/s5p-mfc.txt
new file mode 100644
index 00000000000..67ec3d4ccc7
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/s5p-mfc.txt
@@ -0,0 +1,23 @@
+* Samsung Multi Format Codec (MFC)
+
+Multi Format Codec (MFC) is the IP present in Samsung SoCs which
+supports high resolution decoding and encoding functionalities.
+The MFC device driver is a v4l2 driver which can encode/decode
+video raw/elementary streams and has support for all popular
+video codecs.
+
+Required properties:
+ - compatible : value should be either one among the following
+ (a) "samsung,mfc-v5" for MFC v5 present in Exynos4 SoCs
+ (b) "samsung,mfc-v6" for MFC v6 present in Exynos5 SoCs
+
+ - reg : Physical base address of the IP registers and length of memory
+ mapped region.
+
+ - interrupts : MFC interrupt number to the CPU.
+
+ - samsung,mfc-r : Base address of the first memory bank used by MFC
+ for DMA contiguous memory allocation and its size.
+
+ - samsung,mfc-l : Base address of the second memory bank used by MFC
+ for DMA contiguous memory allocation and its size.
diff --git a/Documentation/devicetree/bindings/mfd/ab8500.txt b/Documentation/devicetree/bindings/mfd/ab8500.txt
index ce83c8d3c00..13b707b7355 100644
--- a/Documentation/devicetree/bindings/mfd/ab8500.txt
+++ b/Documentation/devicetree/bindings/mfd/ab8500.txt
@@ -24,7 +24,32 @@ ab8500-bm : : : Battery Manager
ab8500-btemp : : : Battery Temperature
ab8500-charger : : : Battery Charger
ab8500-codec : : : Audio Codec
-ab8500-fg : : : Fuel Gauge
+ab8500-fg : : vddadc : Fuel Gauge
+ : NCONV_ACCU : : Accumulate N Sample Conversion
+ : BATT_OVV : : Battery Over Voltage
+ : LOW_BAT_F : : LOW threshold battery voltage
+ : CC_INT_CALIB : : Coulomb Counter Internal Calibration
+ : CCEOC : : Coulomb Counter End of Conversion
+ab8500-btemp : : vtvout : Battery Temperature
+ : BAT_CTRL_INDB : : Battery Removal Indicator
+ : BTEMP_LOW : : Btemp < BtempLow, if battery temperature is lower than -10°C
+ : BTEMP_LOW_MEDIUM : : BtempLow < Btemp < BtempMedium,if battery temperature is between -10 and 0°C
+ : BTEMP_MEDIUM_HIGH : : BtempMedium < Btemp < BtempHigh,if battery temperature is between 0°C and“MaxTemp
+ : BTEMP_HIGH : : Btemp > BtempHigh, if battery temperature is higher than “MaxTemp
+ab8500-charger : : vddadc : Charger interface
+ : MAIN_CH_UNPLUG_DET : : main charger unplug detection management (not in 8505)
+ : MAIN_CHARGE_PLUG_DET : : main charger plug detection management (not in 8505)
+ : MAIN_EXT_CH_NOT_OK : : main charger not OK
+ : MAIN_CH_TH_PROT_R : : Die temp is above main charger
+ : MAIN_CH_TH_PROT_F : : Die temp is below main charger
+ : VBUS_DET_F : : VBUS falling detected
+ : VBUS_DET_R : : VBUS rising detected
+ : USB_LINK_STATUS : : USB link status has changed
+ : USB_CH_TH_PROT_R : : Die temp is above usb charger
+ : USB_CH_TH_PROT_F : : Die temp is below usb charger
+ : USB_CHARGER_NOT_OKR : : allowed USB charger not ok detection
+ : VBUS_OVV : : Overvoltage on Vbus ball detected (USB charge is stopped)
+ : CH_WD_EXP : : Charger watchdog detected
ab8500-gpadc : HW_CONV_END : vddadc : Analogue to Digital Converter
SW_CONV_END : :
ab8500-gpio : : : GPIO Controller
diff --git a/Documentation/devicetree/bindings/mfd/stmpe.txt b/Documentation/devicetree/bindings/mfd/stmpe.txt
new file mode 100644
index 00000000000..56edb552068
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/stmpe.txt
@@ -0,0 +1,28 @@
+* ST Microelectronics STMPE Multi-Functional Device
+
+STMPE is an MFD device which may expose the following inbuilt devices: gpio,
+keypad, touchscreen, adc, pwm, rotator.
+
+Required properties:
+ - compatible : "st,stmpe[610|801|811|1601|2401|2403]"
+ - reg : I2C/SPI address of the device
+
+Optional properties:
+ - interrupts : The interrupt outputs from the controller
+ - interrupt-controller : Marks the device node as an interrupt controller
+ - interrupt-parent : Specifies which IRQ controller we're connected to
+ - wakeup-source : Marks the input device as wakable
+ - st,autosleep-timeout : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024
+
+Example:
+
+ stmpe1601: stmpe1601@40 {
+ compatible = "st,stmpe1601";
+ reg = <0x40>;
+ interrupts = <26 0x4>;
+ interrupt-parent = <&gpio6>;
+ interrupt-controller;
+
+ wakeup-source;
+ st,autosleep-timeout = <1024>;
+ };
diff --git a/Documentation/devicetree/bindings/misc/atmel-ssc.txt b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
new file mode 100644
index 00000000000..38e51ad2e07
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
@@ -0,0 +1,15 @@
+* Atmel SSC driver.
+
+Required properties:
+- compatible: "atmel,at91rm9200-ssc" or "atmel,at91sam9g45-ssc"
+ - atmel,at91rm9200-ssc: support pdc transfer
+ - atmel,at91sam9g45-ssc: support dma transfer
+- reg: Should contain SSC registers location and length
+- interrupts: Should contain SSC interrupt
+
+Example:
+ssc0: ssc@fffbc000 {
+ compatible = "atmel,at91rm9200-ssc";
+ reg = <0xfffbc000 0x4000>;
+ interrupts = <14 4 5>;
+};
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
new file mode 100644
index 00000000000..b04d03a1d49
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -0,0 +1,23 @@
+* Denali NAND controller
+
+Required properties:
+ - compatible : should be "denali,denali-nand-dt"
+ - reg : should contain registers location and length for data and reg.
+ - reg-names: Should contain the reg names "nand_data" and "denali_reg"
+ - interrupts : The interrupt number.
+ - dm-mask : DMA bit mask
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Examples:
+
+nand: nand@ff900000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "denali,denali-nand-dt";
+ reg = <0xff900000 0x100000>, <0xffb80000 0x10000>;
+ reg-names = "nand_data", "denali_reg";
+ interrupts = <0 144 4>;
+ dma-mask = <0xffffffff>;
+};
diff --git a/Documentation/devicetree/bindings/mtd/flctl-nand.txt b/Documentation/devicetree/bindings/mtd/flctl-nand.txt
new file mode 100644
index 00000000000..427f46dc60a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/flctl-nand.txt
@@ -0,0 +1,49 @@
+FLCTL NAND controller
+
+Required properties:
+- compatible : "renesas,shmobile-flctl-sh7372"
+- reg : Address range of the FLCTL
+- interrupts : flste IRQ number
+- nand-bus-width : bus width to NAND chip
+
+Optional properties:
+- dmas: DMA specifier(s)
+- dma-names: name for each DMA specifier. Valid names are
+ "data_tx", "data_rx", "ecc_tx", "ecc_rx"
+
+The DMA fields are not used yet in the driver but are listed here for
+completing the bindings.
+
+The device tree may optionally contain sub-nodes describing partitions of the
+address space. See partition.txt for more detail.
+
+Example:
+
+ flctl@e6a30000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "renesas,shmobile-flctl-sh7372";
+ reg = <0xe6a30000 0x100>;
+ interrupts = <0x0d80>;
+
+ nand-bus-width = <16>;
+
+ dmas = <&dmac 1 /* data_tx */
+ &dmac 2;> /* data_rx */
+ dma-names = "data_tx", "data_rx";
+
+ system@0 {
+ label = "system";
+ reg = <0x0 0x8000000>;
+ };
+
+ userdata@8000000 {
+ label = "userdata";
+ reg = <0x8000000 0x10000000>;
+ };
+
+ cache@18000000 {
+ label = "cache";
+ reg = <0x18000000 0x8000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
index e2c663b354d..e3ea32e7de3 100644
--- a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
@@ -3,9 +3,7 @@
Required properties:
- compatible : "st,spear600-fsmc-nand"
- reg : Address range of the mtd chip
-- reg-names: Should contain the reg names "fsmc_regs" and "nand_data"
-- st,ale-off : Chip specific offset to ALE
-- st,cle-off : Chip specific offset to CLE
+- reg-names: Should contain the reg names "fsmc_regs", "nand_data", "nand_addr" and "nand_cmd"
Optional properties:
- bank-width : Width (in bytes) of the device. If not present, the width
@@ -19,10 +17,10 @@ Example:
#address-cells = <1>;
#size-cells = <1>;
reg = <0xd1800000 0x1000 /* FSMC Register */
- 0xd2000000 0x4000>; /* NAND Base */
- reg-names = "fsmc_regs", "nand_data";
- st,ale-off = <0x20000>;
- st,cle-off = <0x10000>;
+ 0xd2000000 0x0010 /* NAND Base DATA */
+ 0xd2020000 0x0010 /* NAND Base ADDR */
+ 0xd2010000 0x0010>; /* NAND Base CMD */
+ reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
bank-width = <1>;
nand-skip-bbtscan;
diff --git a/Documentation/devicetree/bindings/mtd/m25p80.txt b/Documentation/devicetree/bindings/mtd/m25p80.txt
new file mode 100644
index 00000000000..6d3d5760947
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/m25p80.txt
@@ -0,0 +1,29 @@
+* MTD SPI driver for ST M25Pxx (and similar) serial flash chips
+
+Required properties:
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+ representing partitions.
+- compatible : Should be the manufacturer and the name of the chip. Bear in mind
+ the DT binding is not Linux-only, but in case of Linux, see the
+ "m25p_ids" table in drivers/mtd/devices/m25p80.c for the list of
+ supported chips.
+- reg : Chip-Select number
+- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
+
+Optional properties:
+- m25p,fast-read : Use the "fast read" opcode to read data from the chip instead
+ of the usual "read" opcode. This opcode is not supported by
+ all chips and support for it can not be detected at runtime.
+ Refer to your chips' datasheet to check if this is supported
+ by your chip.
+
+Example:
+
+ flash: m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,m25p80";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ m25p,fast-read;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index 94de19b8f16..dab7847fc80 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -23,6 +23,9 @@ file systems on embedded devices.
unaligned accesses as implemented in the JFFS2 code via memcpy().
By defining "no-unaligned-direct-access", the flash will not be
exposed directly to the MTD users (e.g. JFFS2) any more.
+ - linux,mtd-name: allow to specify the mtd name for retro capability with
+ physmap-flash drivers as boot loader pass the mtd partition via the old
+ device name physmap-flash.
For JEDEC compatible devices, the following additional properties
are defined:
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
new file mode 100644
index 00000000000..859a6fa7569
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -0,0 +1,23 @@
+* Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
+
+Required properties:
+- compatible: should be "marvell,armada-370-neta".
+- reg: address and length of the register set for the device.
+- interrupts: interrupt for the device
+- phy: A phandle to a phy node defining the PHY address (as the reg
+ property, a single integer).
+- phy-mode: The interface between the SoC and the PHY (a string that
+ of_get_phy_mode() can understand)
+- clocks: a pointer to the reference clock for this device.
+
+Example:
+
+ethernet@d0070000 {
+ compatible = "marvell,armada-370-neta";
+ reg = <0xd0070000 0x2500>;
+ interrupts = <8>;
+ clocks = <&gate_clk 4>;
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+};
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
new file mode 100644
index 00000000000..34e7aafa321
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
@@ -0,0 +1,35 @@
+* Marvell MDIO Ethernet Controller interface
+
+The Ethernet controllers of the Marvel Kirkwood, Dove, Orion5x,
+MV78xx0, Armada 370 and Armada XP have an identical unit that provides
+an interface with the MDIO bus. This driver handles this MDIO
+interface.
+
+Required properties:
+- compatible: "marvell,orion-mdio"
+- reg: address and length of the SMI register
+
+The child nodes of the MDIO driver are the individual PHY devices
+connected to this MDIO bus. They must have a "reg" property given the
+PHY address on the MDIO bus.
+
+Example at the SoC level:
+
+mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "marvell,orion-mdio";
+ reg = <0xd0072004 0x4>;
+};
+
+And at the board level:
+
+mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+}
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt
index 361bccb7ec8..95daf6335c3 100644
--- a/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt
@@ -7,8 +7,10 @@ Required properties:
- compatible: "marvell,88f6180-pinctrl",
"marvell,88f6190-pinctrl", "marvell,88f6192-pinctrl",
"marvell,88f6281-pinctrl", "marvell,88f6282-pinctrl"
+ "marvell,98dx4122-pinctrl"
This driver supports all kirkwood variants, i.e. 88f6180, 88f619x, and 88f628x.
+It also support the 88f6281-based variant in the 98dx412x Bobcat SoCs.
Available mpp pins/groups and functions:
Note: brackets (x) are not part of the mpp name for marvell,function and given
@@ -277,3 +279,40 @@ mpp46 46 gpio, ts(mp10), tdm(fs), lcd(hsync)
mpp47 47 gpio, ts(mp11), tdm(drx), lcd(vsync)
mpp48 48 gpio, ts(mp12), tdm(dtx), lcd(d16)
mpp49 49 gpo, tdm(rx0ql), pex(clkreq), lcd(d17)
+
+* Marvell Bobcat 98dx4122
+
+name pins functions
+================================================================================
+mpp0 0 gpio, nand(io2), spi(cs)
+mpp1 1 gpo, nand(io3), spi(mosi)
+mpp2 2 gpo, nand(io4), spi(sck)
+mpp3 3 gpo, nand(io5), spi(miso)
+mpp4 4 gpio, nand(io6), uart0(rxd)
+mpp5 5 gpo, nand(io7), uart0(txd)
+mpp6 6 sysrst(out), spi(mosi)
+mpp7 7 gpo, pex(rsto), spi(cs)
+mpp8 8 gpio, twsi0(sda), uart0(rts), uart1(rts)
+mpp9 9 gpio, twsi(sck), uart0(cts), uart1(cts)
+mpp10 10 gpo, spi(sck), uart0(txd)
+mpp11 11 gpio, spi(miso), uart0(rxd)
+mpp13 13 gpio, uart1(txd)
+mpp14 14 gpio, uart1(rxd)
+mpp15 15 gpio, uart0(rts)
+mpp16 16 gpio, uart0(cts)
+mpp18 18 gpo, nand(io0)
+mpp19 19 gpo, nand(io1)
+mpp34 34 gpio
+mpp35 35 gpio
+mpp36 36 gpio
+mpp37 37 gpio
+mpp38 38 gpio
+mpp39 39 gpio
+mpp40 40 gpio
+mpp41 41 gpio
+mpp42 42 gpio
+mpp43 43 gpio
+mpp44 44 gpio
+mpp45 45 gpio
+mpp49 49 gpio
+
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/btemp.txt b/Documentation/devicetree/bindings/power_supply/ab8500/btemp.txt
new file mode 100644
index 00000000000..0ba1bcc7f33
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/ab8500/btemp.txt
@@ -0,0 +1,16 @@
+=== AB8500 Battery Temperature Monitor Driver ===
+
+The properties below describes the node for btemp driver.
+
+Required Properties:
+- compatible = Shall be: "stericsson,ab8500-btemp"
+- battery = Shall be battery specific information
+
+ Example:
+ ab8500_btemp {
+ compatible = "stericsson,ab8500-btemp";
+ battery = <&ab8500_battery>;
+ };
+
+For information on battery specific node, Ref:
+Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/chargalg.txt b/Documentation/devicetree/bindings/power_supply/ab8500/chargalg.txt
new file mode 100644
index 00000000000..ef532837112
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/ab8500/chargalg.txt
@@ -0,0 +1,16 @@
+=== AB8500 Charging Algorithm Driver ===
+
+The properties below describes the node for chargalg driver.
+
+Required Properties:
+- compatible = Shall be: "stericsson,ab8500-chargalg"
+- battery = Shall be battery specific information
+
+Example:
+ab8500_chargalg {
+ compatible = "stericsson,ab8500-chargalg";
+ battery = <&ab8500_battery>;
+};
+
+For information on battery specific node, Ref:
+Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/charger.txt b/Documentation/devicetree/bindings/power_supply/ab8500/charger.txt
new file mode 100644
index 00000000000..6bdbb08ea9e
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/ab8500/charger.txt
@@ -0,0 +1,25 @@
+=== AB8500 Charger Driver ===
+
+Required Properties:
+- compatible = Shall be "stericsson,ab8500-charger"
+- battery = Shall be battery specific information
+ Example:
+ ab8500_charger {
+ compatible = "stericsson,ab8500-charger";
+ battery = <&ab8500_battery>;
+ };
+
+- vddadc-supply: Supply for USB and Main charger
+ Example:
+ ab8500-charger {
+ vddadc-supply = <&ab8500_ldo_tvout_reg>;
+ }
+- autopower_cfg:
+ Boolean value depicting the presence of 'automatic poweron after powerloss'
+ Example:
+ ab8500-charger {
+ autopower_cfg;
+ };
+
+For information on battery specific node, Ref:
+Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
diff --git a/Documentation/devicetree/bindings/power_supply/ab8500/fg.txt b/Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
new file mode 100644
index 00000000000..ccafcb9112f
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/ab8500/fg.txt
@@ -0,0 +1,58 @@
+=== AB8500 Fuel Gauge Driver ===
+
+AB8500 is a mixed signal multimedia and power management
+device comprising: power and energy-management-module,
+wall-charger, usb-charger, audio codec, general purpose adc,
+tvout, clock management and sim card interface.
+
+Fuelgauge support is part of energy-management-modules, other
+components of this module are:
+main-charger, usb-combo-charger and battery-temperature-monitoring.
+
+The properties below describes the node for fuelgauge driver.
+
+Required Properties:
+- compatible = This shall be: "stericsson,ab8500-fg"
+- battery = Shall be battery specific information
+ Example:
+ ab8500_fg {
+ compatible = "stericsson,ab8500-fg";
+ battery = <&ab8500_battery>;
+ };
+
+dependent node:
+ ab8500_battery: ab8500_battery {
+ };
+ This node will provide information on 'thermistor interface' and
+ 'battery technology type' used.
+
+Properties of this node are:
+thermistor-on-batctrl:
+ A boolean value indicating thermistor interface to battery
+
+ Note:
+ 'btemp' and 'batctrl' are the pins interfaced for battery temperature
+ measurement, 'btemp' signal is used when NTC(negative temperature
+ coefficient) resister is interfaced external to battery whereas
+ 'batctrl' pin is used when NTC resister is internal to battery.
+
+ Example:
+ ab8500_battery: ab8500_battery {
+ thermistor-on-batctrl;
+ };
+ indicates: NTC resister is internal to battery, 'batctrl' is used
+ for thermal measurement.
+
+ The absence of property 'thermal-on-batctrl' indicates
+ NTC resister is external to battery and 'btemp' signal is used
+ for thermal measurement.
+
+battery-type:
+ This shall be the battery manufacturing technology type,
+ allowed types are:
+ "UNKNOWN" "NiMH" "LION" "LIPO" "LiFe" "NiCd" "LiMn"
+ Example:
+ ab8500_battery: ab8500_battery {
+ stericsson,battery-type = "LIPO";
+ }
+
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/raideng.txt b/Documentation/devicetree/bindings/powerpc/fsl/raideng.txt
new file mode 100644
index 00000000000..4ad29b9ac2a
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/raideng.txt
@@ -0,0 +1,81 @@
+* Freescale 85xx RAID Engine nodes
+
+RAID Engine nodes are defined to describe on-chip RAID accelerators. Each RAID
+Engine should have a separate node.
+
+Supported chips:
+P5020, P5040
+
+Required properties:
+
+- compatible: Should contain "fsl,raideng-v1.0" as the value
+ This identifies RAID Engine block. 1 in 1.0 represents
+ major number whereas 0 represents minor number. The
+ version matches the hardware IP version.
+- reg: offset and length of the register set for the device
+- ranges: standard ranges property specifying the translation
+ between child address space and parent address space
+
+Example:
+ /* P5020 */
+ raideng: raideng@320000 {
+ compatible = "fsl,raideng-v1.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x320000 0x10000>;
+ ranges = <0 0x320000 0x10000>;
+ };
+
+
+There must be a sub-node for each job queue present in RAID Engine
+This node must be a sub-node of the main RAID Engine node
+
+- compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value
+ This identifies the job queue interface
+- reg: offset and length of the register set for job queue
+- ranges: standard ranges property specifying the translation
+ between child address space and parent address space
+
+Example:
+ /* P5020 */
+ raideng_jq0@1000 {
+ compatible = "fsl,raideng-v1.0-job-queue";
+ reg = <0x1000 0x1000>;
+ ranges = <0x0 0x1000 0x1000>;
+ };
+
+
+There must be a sub-node for each job ring present in RAID Engine
+This node must be a sub-node of job queue node
+
+- compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value
+ This identifies job ring. Should contain either
+ "fsl,raideng-v1.0-hp-ring" or "fsl,raideng-v1.0-lp-ring"
+ depending upon whether ring has high or low priority
+- reg: offset and length of the register set for job ring
+- interrupts: interrupt mapping for job ring IRQ
+
+Optional property:
+
+- fsl,liodn: Specifies the LIODN to be used for Job Ring. This
+ property is normally set by firmware. Value
+ is of 12-bits which is the LIODN number for this JR.
+ This property is used by the IOMMU (PAMU) to distinquish
+ transactions from this JR and than be able to do address
+ translation & protection accordingly.
+
+Example:
+ /* P5020 */
+ raideng_jq0@1000 {
+ compatible = "fsl,raideng-v1.0-job-queue";
+ reg = <0x1000 0x1000>;
+ ranges = <0x0 0x1000 0x1000>;
+
+ raideng_jr0: jr@0 {
+ compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring";
+ reg = <0x0 0x400>;
+ interrupts = <139 2 0 0>;
+ interrupt-parent = <&mpic>;
+ fsl,liodn = <0x41>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
new file mode 100644
index 00000000000..131e8c11d26
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
@@ -0,0 +1,23 @@
+TI SOC ECAP based APWM controller
+
+Required properties:
+- compatible: Must be "ti,am33xx-ecap"
+- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
+ First cell specifies the per-chip index of the PWM to use, the second
+ cell is the period in nanoseconds and bit 0 in the third cell is used to
+ encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
+ to 1 for inverse polarity & set to 0 for normal polarity.
+- reg: physical base address and size of the registers map.
+
+Optional properties:
+- ti,hwmods: Name of the hwmod associated to the ECAP:
+ "ecap<x>", <x> being the 0-based instance number from the HW spec
+
+Example:
+
+ecap0: ecap@0 {
+ compatible = "ti,am33xx-ecap";
+ #pwm-cells = <3>;
+ reg = <0x48300100 0x80>;
+ ti,hwmods = "ecap0";
+};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
new file mode 100644
index 00000000000..4fc7079d822
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
@@ -0,0 +1,23 @@
+TI SOC EHRPWM based PWM controller
+
+Required properties:
+- compatible : Must be "ti,am33xx-ehrpwm"
+- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
+ First cell specifies the per-chip index of the PWM to use, the second
+ cell is the period in nanoseconds and bit 0 in the third cell is used to
+ encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
+ to 1 for inverse polarity & set to 0 for normal polarity.
+- reg: physical base address and size of the registers map.
+
+Optional properties:
+- ti,hwmods: Name of the hwmod associated to the EHRPWM:
+ "ehrpwm<x>", <x> being the 0-based instance number from the HW spec
+
+Example:
+
+ehrpwm0: ehrpwm@0 {
+ compatible = "ti,am33xx-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x48300200 0x100>;
+ ti,hwmods = "ehrpwm0";
+};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt
new file mode 100644
index 00000000000..f7eae77f835
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt
@@ -0,0 +1,31 @@
+TI SOC based PWM Subsystem
+
+Required properties:
+- compatible: Must be "ti,am33xx-pwmss";
+- reg: physical base address and size of the registers map.
+- address-cells: Specify the number of u32 entries needed in child nodes.
+ Should set to 1.
+- size-cells: specify number of u32 entries needed to specify child nodes size
+ in reg property. Should set to 1.
+- ranges: describes the address mapping of a memory-mapped bus. Should set to
+ physical address map of child's base address, physical address within
+ parent's address space and length of the address map. For am33xx,
+ 3 set of child register maps present, ECAP register space, EQEP
+ register space, EHRPWM register space.
+
+Also child nodes should also populated under PWMSS DT node.
+
+Example:
+pwmss0: pwmss@48300000 {
+ compatible = "ti,am33xx-pwmss";
+ reg = <0x48300000 0x10>;
+ ti,hwmods = "epwmss0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ ranges = <0x48300100 0x48300100 0x80 /* ECAP */
+ 0x48300180 0x48300180 0x80 /* EQEP */
+ 0x48300200 0x48300200 0x80>; /* EHRPWM */
+
+ /* child nodes go here */
+};
diff --git a/Documentation/devicetree/bindings/pwm/pwm.txt b/Documentation/devicetree/bindings/pwm/pwm.txt
index 73ec962bfe8..06e67247859 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm.txt
@@ -37,10 +37,21 @@ device:
pwm-names = "backlight";
};
+Note that in the example above, specifying the "pwm-names" is redundant
+because the name "backlight" would be used as fallback anyway.
+
pwm-specifier typically encodes the chip-relative PWM number and the PWM
-period in nanoseconds. Note that in the example above, specifying the
-"pwm-names" is redundant because the name "backlight" would be used as
-fallback anyway.
+period in nanoseconds.
+
+Optionally, the pwm-specifier can encode a number of flags in a third cell:
+- bit 0: PWM signal polarity (0: normal polarity, 1: inverse polarity)
+
+Example with optional PWM specifier for inverse polarity
+
+ bl: backlight {
+ pwms = <&pwm 0 5000000 1>;
+ pwm-names = "backlight";
+ };
2) PWM controller nodes
-----------------------
diff --git a/Documentation/devicetree/bindings/pwm/spear-pwm.txt b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
new file mode 100644
index 00000000000..3ac779d8338
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
@@ -0,0 +1,18 @@
+== ST SPEAr SoC PWM controller ==
+
+Required properties:
+- compatible: should be one of:
+ - "st,spear320-pwm"
+ - "st,spear1340-pwm"
+- reg: physical base address and length of the controller's registers
+- #pwm-cells: number of cells used to specify PWM which is fixed to 2 on
+ SPEAr. The first cell specifies the per-chip index of the PWM to use and
+ the second cell is the period in nanoseconds.
+
+Example:
+
+ pwm: pwm@a8000000 {
+ compatible ="st,spear320-pwm";
+ reg = <0xa8000000 0x1000>;
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
new file mode 100644
index 00000000000..2943ee5fce0
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
@@ -0,0 +1,17 @@
+Texas Instruments TWL series PWM drivers
+
+Supported PWMs:
+On TWL4030 series: PWM1 and PWM2
+On TWL6030 series: PWM0 and PWM1
+
+Required properties:
+- compatible: "ti,twl4030-pwm" or "ti,twl6030-pwm"
+- #pwm-cells: should be 2. The first cell specifies the per-chip index
+ of the PWM to use and the second cell is the period in nanoseconds.
+
+Example:
+
+twl_pwm: pwm {
+ compatible = "ti,twl6030-pwm";
+ #pwm-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
new file mode 100644
index 00000000000..cb64f3acc10
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
@@ -0,0 +1,17 @@
+Texas Instruments TWL series PWM drivers connected to LED terminals
+
+Supported PWMs:
+On TWL4030 series: PWMA and PWMB (connected to LEDA and LEDB terminals)
+On TWL6030 series: LED PWM (mainly used as charging indicator LED)
+
+Required properties:
+- compatible: "ti,twl4030-pwmled" or "ti,twl6030-pwmled"
+- #pwm-cells: should be 2. The first cell specifies the per-chip index
+ of the PWM to use and the second cell is the period in nanoseconds.
+
+Example:
+
+twl_pwmled: pwmled {
+ compatible = "ti,twl6030-pwmled";
+ #pwm-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
new file mode 100644
index 00000000000..bcc63678a9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
@@ -0,0 +1,17 @@
+VIA/Wondermedia VT8500/WM8xxx series SoC PWM controller
+
+Required properties:
+- compatible: should be "via,vt8500-pwm"
+- reg: physical base address and length of the controller's registers
+- #pwm-cells: should be 2. The first cell specifies the per-chip index
+ of the PWM to use and the second cell is the period in nanoseconds.
+- clocks: phandle to the PWM source clock
+
+Example:
+
+pwm1: pwm@d8220000 {
+ #pwm-cells = <2>;
+ compatible = "via,vt8500-pwm";
+ reg = <0xd8220000 0x1000>;
+ clocks = <&clkpwm>;
+};
diff --git a/Documentation/devicetree/bindings/regulator/gpio-regulator.txt b/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
new file mode 100644
index 00000000000..63c659800c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
@@ -0,0 +1,37 @@
+GPIO controlled regulators
+
+Required properties:
+- compatible : Must be "regulator-gpio".
+- states : Selection of available voltages and GPIO configs.
+ if there are no states, then use a fixed regulator
+
+Optional properties:
+- enable-gpio : GPIO to use to enable/disable the regulator.
+- gpios : GPIO group used to control voltage.
+- startup-delay-us : Startup time in microseconds.
+- enable-active-high : Polarity of GPIO is active high (default is low).
+
+Any property defined as part of the core regulator binding defined in
+regulator.txt can also be used.
+
+Example:
+
+ mmciv: gpio-regulator {
+ compatible = "regulator-gpio";
+
+ regulator-name = "mmci-gpio-supply";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2600000>;
+ regulator-boot-on;
+
+ enable-gpio = <&gpio0 23 0x4>;
+ gpios = <&gpio0 24 0x4
+ &gpio0 25 0x4>;
+ states = <1800000 0x3
+ 2200000 0x2
+ 2600000 0x1
+ 2900000 0x0>;
+
+ startup-delay-us = <100000>;
+ enable-active-high;
+ };
diff --git a/Documentation/devicetree/bindings/regulator/max8925-regulator.txt b/Documentation/devicetree/bindings/regulator/max8925-regulator.txt
new file mode 100644
index 00000000000..0057695aae8
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max8925-regulator.txt
@@ -0,0 +1,40 @@
+Max8925 Voltage regulators
+
+Required nodes:
+-nodes:
+ - SDV1 for SDV SDV1
+ - SDV2 for SDV SDV2
+ - SDV3 for SDV SDV3
+ - LDO1 for LDO LDO1
+ - LDO2 for LDO LDO2
+ - LDO3 for LDO LDO3
+ - LDO4 for LDO LDO4
+ - LDO5 for LDO LDO5
+ - LDO6 for LDO LDO6
+ - LDO7 for LDO LDO7
+ - LDO8 for LDO LDO8
+ - LDO9 for LDO LDO9
+ - LDO10 for LDO LDO10
+ - LDO11 for LDO LDO11
+ - LDO12 for LDO LDO12
+ - LDO13 for LDO LDO13
+ - LDO14 for LDO LDO14
+ - LDO15 for LDO LDO15
+ - LDO16 for LDO LDO16
+ - LDO17 for LDO LDO17
+ - LDO18 for LDO LDO18
+ - LDO19 for LDO LDO19
+ - LDO20 for LDO LDO20
+
+Optional properties:
+- Any optional property defined in bindings/regulator/regulator.txt
+
+Example:
+
+ SDV1 {
+ regulator-min-microvolt = <637500>;
+ regulator-max-microvolt = <1425000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
diff --git a/Documentation/devicetree/bindings/regulator/max8997-regulator.txt b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
new file mode 100644
index 00000000000..9fd69a18b0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max8997-regulator.txt
@@ -0,0 +1,146 @@
+* Maxim MAX8997 Voltage and Current Regulator
+
+The Maxim MAX8997 is a multi-function device which includes volatage and
+current regulators, rtc, charger controller and other sub-blocks. It is
+interfaced to the host controller using a i2c interface. Each sub-block is
+addressed by the host system using different i2c slave address. This document
+describes the bindings for 'pmic' sub-block of max8997.
+
+Required properties:
+- compatible: Should be "maxim,max8997-pmic".
+- reg: Specifies the i2c slave address of the pmic block. It should be 0x66.
+
+- max8997,pmic-buck1-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
+ units for buck1 when changing voltage using gpio dvs. Refer to [1] below
+ for additional information.
+
+- max8997,pmic-buck2-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
+ units for buck2 when changing voltage using gpio dvs. Refer to [1] below
+ for additional information.
+
+- max8997,pmic-buck5-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
+ units for buck5 when changing voltage using gpio dvs. Refer to [1] below
+ for additional information.
+
+[1] If none of the 'max8997,pmic-buck[1/2/5]-uses-gpio-dvs' optional
+ property is specified, the 'max8997,pmic-buck[1/2/5]-dvs-voltage'
+ property should specify atleast one voltage level (which would be a
+ safe operating voltage).
+
+ If either of the 'max8997,pmic-buck[1/2/5]-uses-gpio-dvs' optional
+ property is specified, then all the eigth voltage values for the
+ 'max8997,pmic-buck[1/2/5]-dvs-voltage' should be specified.
+
+Optional properties:
+- interrupt-parent: Specifies the phandle of the interrupt controller to which
+ the interrupts from max8997 are delivered to.
+- interrupts: Interrupt specifiers for two interrupt sources.
+ - First interrupt specifier is for 'irq1' interrupt.
+ - Second interrupt specifier is for 'alert' interrupt.
+- max8997,pmic-buck1-uses-gpio-dvs: 'buck1' can be controlled by gpio dvs.
+- max8997,pmic-buck2-uses-gpio-dvs: 'buck2' can be controlled by gpio dvs.
+- max8997,pmic-buck5-uses-gpio-dvs: 'buck5' can be controlled by gpio dvs.
+
+Additional properties required if either of the optional properties are used:
+- max8997,pmic-ignore-gpiodvs-side-effect: When GPIO-DVS mode is used for
+ multiple bucks, changing the voltage value of one of the bucks may affect
+ that of another buck, which is the side effect of the change (set_voltage).
+ Use this property to ignore such side effects and change the voltage.
+
+- max8997,pmic-buck125-default-dvs-idx: Default voltage setting selected from
+ the possible 8 options selectable by the dvs gpios. The value of this
+ property should be between 0 and 7. If not specified or if out of range, the
+ default value of this property is set to 0.
+
+- max8997,pmic-buck125-dvs-gpios: GPIO specifiers for three host gpio's used
+ for dvs. The format of the gpio specifier depends in the gpio controller.
+
+Regulators: The regulators of max8997 that have to be instantiated should be
+included in a sub-node named 'regulators'. Regulator nodes included in this
+sub-node should be of the format as listed below.
+
+ regulator_name {
+ standard regulator bindings here
+ };
+
+The following are the names of the regulators that the max8997 pmic block
+supports. Note: The 'n' in LDOn and BUCKn represents the LDO or BUCK number
+as per the datasheet of max8997.
+
+ - LDOn
+ - valid values for n are 1 to 18 and 21
+ - Example: LDO0, LD01, LDO2, LDO21
+ - BUCKn
+ - valid values for n are 1 to 7.
+ - Example: BUCK1, BUCK2, BUCK3, BUCK7
+
+ - ENVICHG: Battery Charging Current Monitor Output. This is a fixed
+ voltage type regulator
+
+ - ESAFEOUT1: (ldo19)
+ - ESAFEOUT2: (ld020)
+
+ - CHARGER_CV: main battery charger voltage control
+ - CHARGER: main battery charger current control
+ - CHARGER_TOPOFF: end of charge current threshold level
+
+The bindings inside the regulator nodes use the standard regulator bindings
+which are documented elsewhere.
+
+Example:
+
+ max8997_pmic@66 {
+ compatible = "maxim,max8997-pmic";
+ interrupt-parent = <&wakeup_eint>;
+ reg = <0x66>;
+ interrupts = <4 0>, <3 0>;
+
+ max8997,pmic-buck1-uses-gpio-dvs;
+ max8997,pmic-buck2-uses-gpio-dvs;
+ max8997,pmic-buck5-uses-gpio-dvs;
+
+ max8997,pmic-ignore-gpiodvs-side-effect;
+ max8997,pmic-buck125-default-dvs-idx = <0>;
+
+ max8997,pmic-buck125-dvs-gpios = <&gpx0 0 1 0 0>, /* SET1 */
+ <&gpx0 1 1 0 0>, /* SET2 */
+ <&gpx0 2 1 0 0>; /* SET3 */
+
+ max8997,pmic-buck1-dvs-voltage = <1350000>, <1300000>,
+ <1250000>, <1200000>,
+ <1150000>, <1100000>,
+ <1000000>, <950000>;
+
+ max8997,pmic-buck2-dvs-voltage = <1100000>, <1100000>,
+ <1100000>, <1100000>,
+ <1000000>, <1000000>,
+ <1000000>, <1000000>;
+
+ max8997,pmic-buck5-dvs-voltage = <1200000>, <1200000>,
+ <1200000>, <1200000>,
+ <1200000>, <1200000>,
+ <1200000>, <1200000>;
+
+ regulators {
+ ldo1_reg: LDO1 {
+ regulator-name = "VDD_ABB_3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo2_reg: LDO2 {
+ regulator-name = "VDD_ALIVE_1.1V";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+
+ buck1_reg: BUCK1 {
+ regulator-name = "VDD_ARM_1.2V";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/tps65217.txt b/Documentation/devicetree/bindings/regulator/tps65217.txt
index d316fb895da..4f05d208c95 100644
--- a/Documentation/devicetree/bindings/regulator/tps65217.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65217.txt
@@ -11,6 +11,9 @@ Required properties:
using the standard binding for regulators found at
Documentation/devicetree/bindings/regulator/regulator.txt.
+Optional properties:
+- ti,pmic-shutdown-controller: Telling the PMIC to shutdown on PWR_EN toggle.
+
The valid names for regulators are:
tps65217: dcdc1, dcdc2, dcdc3, ldo1, ldo2, ldo3 and ldo4
@@ -20,6 +23,7 @@ Example:
tps: tps@24 {
compatible = "ti,tps65217";
+ ti,pmic-shutdown-controller;
regulators {
dcdc1_reg: dcdc1 {
diff --git a/Documentation/devicetree/bindings/regulator/vexpress.txt b/Documentation/devicetree/bindings/regulator/vexpress.txt
new file mode 100644
index 00000000000..d775f72487a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/vexpress.txt
@@ -0,0 +1,32 @@
+Versatile Express voltage regulators
+------------------------------------
+
+Requires node properties:
+- "compatible" value: "arm,vexpress-volt"
+- "arm,vexpress-sysreg,func" when controlled via vexpress-sysreg
+ (see Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
+ for more details)
+
+Required regulator properties:
+- "regulator-name"
+- "regulator-always-on"
+
+Optional regulator properties:
+- "regulator-min-microvolt"
+- "regulator-max-microvolt"
+
+See Documentation/devicetree/bindings/regulator/regulator.txt
+for more details about the regulator properties.
+
+When no "regulator-[min|max]-microvolt" properties are defined,
+the device is treated as fixed (or rather "read-only") regulator.
+
+Example:
+ volt@0 {
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 0>;
+ regulator-name = "Cores";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt
new file mode 100644
index 00000000000..c9d80d7da14
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt
@@ -0,0 +1,17 @@
+* i.MX25 Real Time Clock controller
+
+This binding supports the following chips: i.MX25, i.MX53
+
+Required properties:
+- compatible: should be: "fsl,imx25-rtc"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: rtc alarm interrupt
+
+Example:
+
+rtc@80056000 {
+ compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
+ reg = <0x80056000 2000>;
+ interrupts = <29>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt b/Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt
new file mode 100644
index 00000000000..93f45e9dce7
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/nvidia,tegra20-rtc.txt
@@ -0,0 +1,19 @@
+NVIDIA Tegra20 real-time clock
+
+The Tegra RTC maintains seconds and milliseconds counters, and five alarm
+registers. The alarms and other interrupts may wake the system from low-power
+state.
+
+Required properties:
+
+- compatible : should be "nvidia,tegra20-rtc".
+- reg : Specifies base physical address and size of the registers.
+- interrupts : A single interrupt specifier.
+
+Example:
+
+timer {
+ compatible = "nvidia,tegra20-rtc";
+ reg = <0x7000e000 0x100>;
+ interrupts = <0 2 0x04>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
new file mode 100644
index 00000000000..b47aa415c82
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -0,0 +1,17 @@
+TI Real Time Clock
+
+Required properties:
+- compatible: "ti,da830-rtc"
+- reg: Address range of rtc register set
+- interrupts: rtc timer, alarm interrupts in order
+- interrupt-parent: phandle for the interrupt controller
+
+Example:
+
+rtc@1c23000 {
+ compatible = "ti,da830-rtc";
+ reg = <0x23000 0x1000>;
+ interrupts = <19
+ 19>;
+ interrupt-parent = <&intc>;
+};
diff --git a/Documentation/devicetree/bindings/sound/ak4104.txt b/Documentation/devicetree/bindings/sound/ak4104.txt
new file mode 100644
index 00000000000..b902ee39cf8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ak4104.txt
@@ -0,0 +1,22 @@
+AK4104 S/PDIF transmitter
+
+This device supports SPI mode only.
+
+Required properties:
+
+ - compatible : "asahi-kasei,ak4104"
+
+ - reg : The chip select number on the SPI bus
+
+Optional properties:
+
+ - reset-gpio : a GPIO spec for the reset pin. If specified, it will be
+ deasserted before communication to the device starts.
+
+Example:
+
+spdif: ak4104@0 {
+ compatible = "asahi-kasei,ak4104";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+};
diff --git a/Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt b/Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt
new file mode 100644
index 00000000000..9c5a9947b64
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt
@@ -0,0 +1,26 @@
+* Atmel at91sam9g20ek wm8731 audio complex
+
+Required properties:
+ - compatible: "atmel,at91sam9g20ek-wm8731-audio"
+ - atmel,model: The user-visible name of this sound complex.
+ - atmel,audio-routing: A list of the connections between audio components.
+ - atmel,ssc-controller: The phandle of the SSC controller
+ - atmel,audio-codec: The phandle of the WM8731 audio codec
+Optional properties:
+ - pinctrl-names, pinctrl-0: Please refer to pinctrl-bindings.txt
+
+Example:
+sound {
+ compatible = "atmel,at91sam9g20ek-wm8731-audio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_mck>;
+
+ atmel,model = "wm8731 @ AT91SAMG20EK";
+
+ atmel,audio-routing =
+ "Ext Spk", "LHPOUT",
+ "Int MIC", "MICIN";
+
+ atmel,ssc-controller = <&ssc0>;
+ atmel,audio-codec = <&wm8731>;
+};
diff --git a/Documentation/devicetree/bindings/sound/cs4271.txt b/Documentation/devicetree/bindings/sound/cs4271.txt
index c81b5fd5a5b..a850fb9c88e 100644
--- a/Documentation/devicetree/bindings/sound/cs4271.txt
+++ b/Documentation/devicetree/bindings/sound/cs4271.txt
@@ -18,6 +18,8 @@ Optional properties:
- reset-gpio: a GPIO spec to define which pin is connected to the chip's
!RESET pin
+ - cirrus,amuteb-eq-bmutec: When given, the Codec's AMUTEB=BMUTEC flag
+ is enabled.
Examples:
diff --git a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
index 65dec876cb2..fd40c852d7c 100644
--- a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
+++ b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
@@ -12,7 +12,7 @@ Required properties:
Optional properties:
- ti,dmic: phandle for the OMAP dmic node if the machine have it connected
-- ti,jack_detection: Need to be set to <1> if the board capable to detect jack
+- ti,jack_detection: Need to be present if the board capable to detect jack
insertion, removal.
Available audio endpoints for the audio-routing table:
@@ -59,7 +59,7 @@ sound {
compatible = "ti,abe-twl6040";
ti,model = "SDP4430";
- ti,jack-detection = <1>;
+ ti,jack-detection;
ti,mclk-freq = <38400000>;
ti,mcpdm = <&mcpdm>;
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
new file mode 100644
index 00000000000..7b53da5cb75
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
@@ -0,0 +1,26 @@
+NVIDIA Tegra20 SFLASH controller.
+
+Required properties:
+- compatible : should be "nvidia,tegra20-sflash".
+- reg: Should contain SFLASH registers location and length.
+- interrupts: Should contain SFLASH interrupts.
+- nvidia,dma-request-selector : The Tegra DMA controller's phandle and
+ request selector for this SFLASH controller.
+
+Recommended properties:
+- spi-max-frequency: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+spi@7000c380 {
+ compatible = "nvidia,tegra20-sflash";
+ reg = <0x7000c380 0x80>;
+ interrupts = <0 39 0x04>;
+ nvidia,dma-request-selector = <&apbdma 16>;
+ spi-max-frequency = <25000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+};
+
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
new file mode 100644
index 00000000000..eefe15e3d95
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
@@ -0,0 +1,26 @@
+NVIDIA Tegra20/Tegra30 SLINK controller.
+
+Required properties:
+- compatible : should be "nvidia,tegra20-slink", "nvidia,tegra30-slink".
+- reg: Should contain SLINK registers location and length.
+- interrupts: Should contain SLINK interrupts.
+- nvidia,dma-request-selector : The Tegra DMA controller's phandle and
+ request selector for this SLINK controller.
+
+Recommended properties:
+- spi-max-frequency: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+spi@7000d600 {
+ compatible = "nvidia,tegra20-slink";
+ reg = <0x7000d600 0x200>;
+ interrupts = <0 82 0x04>;
+ nvidia,dma-request-selector = <&apbdma 16>;
+ spi-max-frequency = <25000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+};
+
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
index 81df374adbb..938809c6829 100644
--- a/Documentation/devicetree/bindings/spi/omap-spi.txt
+++ b/Documentation/devicetree/bindings/spi/omap-spi.txt
@@ -6,7 +6,9 @@ Required properties:
- "ti,omap4-spi" for OMAP4+.
- ti,spi-num-cs : Number of chipselect supported by the instance.
- ti,hwmods: Name of the hwmod associated to the McSPI
-
+- ti,pindir-d0-out-d1-in: Select the D0 pin as output and D1 as
+ input. The default is D0 as input and
+ D1 as output.
Example:
diff --git a/Documentation/devicetree/bindings/spi/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
index d2c33d0f533..296015e3c63 100644
--- a/Documentation/devicetree/bindings/spi/spi-bus.txt
+++ b/Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -12,6 +12,7 @@ The SPI master node requires the following properties:
- #size-cells - should be zero.
- compatible - name of SPI bus controller following generic names
recommended practice.
+- cs-gpios - (optional) gpios chip select.
No other properties are required in the SPI bus node. It is assumed
that a driver for an SPI bus device will understand that it is an SPI bus.
However, the binding does not attempt to define the specific method for
@@ -24,6 +25,22 @@ support describing the chip select layout.
Optional property:
- num-cs : total number of chipselects
+If cs-gpios is used the number of chip select will automatically increased
+with max(cs-gpios > hw cs)
+
+So if for example the controller has 2 CS lines, and the cs-gpios
+property looks like this:
+
+cs-gpios = <&gpio1 0 0> <0> <&gpio1 1 0> <&gpio1 2 0>;
+
+Then it should be configured so that num_chipselect = 4 with the
+following mapping:
+
+cs0 : &gpio1 0 0
+cs1 : native
+cs2 : &gpio1 1 0
+cs3 : &gpio1 2 0
+
SPI slave nodes must be children of the SPI master node and can
contain the following properties.
- reg - (required) chip select address of device.
@@ -36,6 +53,11 @@ contain the following properties.
shifted clock phase (CPHA) mode
- spi-cs-high - (optional) Empty property indicating device requires
chip select active high
+- spi-3wire - (optional) Empty property indicating device requires
+ 3-wire mode.
+
+If a gpio chipselect is used for the SPI slave the gpio number will be passed
+via the cs_gpio
SPI example for an MPC5200 SPI bus:
spi@f00 {
diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt
new file mode 100644
index 00000000000..07e04cdc0c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt
@@ -0,0 +1,26 @@
+Atmel SPI device
+
+Required properties:
+- compatible : should be "atmel,at91rm9200-spi".
+- reg: Address and length of the register set for the device
+- interrupts: Should contain spi interrupt
+- cs-gpios: chipselects
+
+Example:
+
+spi1: spi@fffcc000 {
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffcc000 0x4000>;
+ interrupts = <13 4 5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <&pioB 3 0>;
+ status = "okay";
+
+ mmc-slot@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ gpios = <&pioC 4 0>; /* CD */
+ spi-max-frequency = <25000000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/timer/nvidia,tegra20-timer.txt b/Documentation/devicetree/bindings/timer/nvidia,tegra20-timer.txt
new file mode 100644
index 00000000000..e019fdc3877
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nvidia,tegra20-timer.txt
@@ -0,0 +1,21 @@
+NVIDIA Tegra20 timer
+
+The Tegra20 timer provides four 29-bit timer channels and a single 32-bit free
+running counter. The first two channels may also trigger a watchdog reset.
+
+Required properties:
+
+- compatible : should be "nvidia,tegra20-timer".
+- reg : Specifies base physical address and size of the registers.
+- interrupts : A list of 4 interrupts; one per timer channel.
+
+Example:
+
+timer {
+ compatible = "nvidia,tegra20-timer";
+ reg = <0x60005000 0x60>;
+ interrupts = <0 0 0x04
+ 0 1 0x04
+ 0 41 0x04
+ 0 42 0x04>;
+};
diff --git a/Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt b/Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt
new file mode 100644
index 00000000000..906109d4c59
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nvidia,tegra30-timer.txt
@@ -0,0 +1,23 @@
+NVIDIA Tegra30 timer
+
+The Tegra30 timer provides ten 29-bit timer channels, a single 32-bit free
+running counter, and 5 watchdog modules. The first two channels may also
+trigger a legacy watchdog reset.
+
+Required properties:
+
+- compatible : should be "nvidia,tegra30-timer", "nvidia,tegra20-timer".
+- reg : Specifies base physical address and size of the registers.
+- interrupts : A list of 6 interrupts; one per each of timer channels 1
+ through 5, and one for the shared interrupt for the remaining channels.
+
+timer {
+ compatible = "nvidia,tegra30-timer", "nvidia,tegra20-timer";
+ reg = <0x60005000 0x400>;
+ interrupts = <0 0 0x04
+ 0 1 0x04
+ 0 41 0x04
+ 0 42 0x04
+ 0 121 0x04
+ 0 122 0x04>;
+};
diff --git a/Documentation/devicetree/bindings/usb/ehci-orion.txt b/Documentation/devicetree/bindings/usb/ehci-orion.txt
new file mode 100644
index 00000000000..6bc09ec14c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ehci-orion.txt
@@ -0,0 +1,15 @@
+* EHCI controller, Orion Marvell variants
+
+Required properties:
+- compatible: must be "marvell,orion-ehci"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The EHCI interrupt
+
+Example:
+
+ ehci@50000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0x50000 0x1000>;
+ interrupts = <19>;
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 770a0193ca1..902b1b1f568 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -55,4 +55,5 @@ ti Texas Instruments
via VIA Technologies, Inc.
wlf Wolfson Microelectronics
wm Wondermedia Technologies, Inc.
+winbond Winbond Electronics corp.
xlnx Xilinx
diff --git a/Documentation/devicetree/bindings/video/exynos_dp.txt b/Documentation/devicetree/bindings/video/exynos_dp.txt
new file mode 100644
index 00000000000..c60da67a5d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/exynos_dp.txt
@@ -0,0 +1,80 @@
+The Exynos display port interface should be configured based on
+the type of panel connected to it.
+
+We use two nodes:
+ -dp-controller node
+ -dptx-phy node(defined inside dp-controller node)
+
+For the DP-PHY initialization, we use the dptx-phy node.
+Required properties for dptx-phy:
+ -reg:
+ Base address of DP PHY register.
+ -samsung,enable-mask:
+ The bit-mask used to enable/disable DP PHY.
+
+For the Panel initialization, we read data from dp-controller node.
+Required properties for dp-controller:
+ -compatible:
+ should be "samsung,exynos5-dp".
+ -reg:
+ physical base address of the controller and length
+ of memory mapped region.
+ -interrupts:
+ interrupt combiner values.
+ -interrupt-parent:
+ phandle to Interrupt combiner node.
+ -samsung,color-space:
+ input video data format.
+ COLOR_RGB = 0, COLOR_YCBCR422 = 1, COLOR_YCBCR444 = 2
+ -samsung,dynamic-range:
+ dynamic range for input video data.
+ VESA = 0, CEA = 1
+ -samsung,ycbcr-coeff:
+ YCbCr co-efficients for input video.
+ COLOR_YCBCR601 = 0, COLOR_YCBCR709 = 1
+ -samsung,color-depth:
+ number of bits per colour component.
+ COLOR_6 = 0, COLOR_8 = 1, COLOR_10 = 2, COLOR_12 = 3
+ -samsung,link-rate:
+ link rate supported by the panel.
+ LINK_RATE_1_62GBPS = 0x6, LINK_RATE_2_70GBPS = 0x0A
+ -samsung,lane-count:
+ number of lanes supported by the panel.
+ LANE_COUNT1 = 1, LANE_COUNT2 = 2, LANE_COUNT4 = 4
+
+Optional properties for dp-controller:
+ -interlaced:
+ interlace scan mode.
+ Progressive if defined, Interlaced if not defined
+ -vsync-active-high:
+ VSYNC polarity configuration.
+ High if defined, Low if not defined
+ -hsync-active-high:
+ HSYNC polarity configuration.
+ High if defined, Low if not defined
+
+Example:
+
+SOC specific portion:
+ dp-controller {
+ compatible = "samsung,exynos5-dp";
+ reg = <0x145b0000 0x10000>;
+ interrupts = <10 3>;
+ interrupt-parent = <&combiner>;
+
+ dptx-phy {
+ reg = <0x10040720>;
+ samsung,enable-mask = <1>;
+ };
+
+ };
+
+Board Specific portion:
+ dp-controller {
+ samsung,color-space = <0>;
+ samsung,dynamic-range = <0>;
+ samsung,ycbcr-coeff = <0>;
+ samsung,color-depth = <1>;
+ samsung,link-rate = <0x0a>;
+ samsung,lane-count = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/video/ssd1307fb.txt b/Documentation/devicetree/bindings/video/ssd1307fb.txt
new file mode 100644
index 00000000000..3d0060cff06
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/ssd1307fb.txt
@@ -0,0 +1,24 @@
+* Solomon SSD1307 Framebuffer Driver
+
+Required properties:
+ - compatible: Should be "solomon,ssd1307fb-<bus>". The only supported bus for
+ now is i2c.
+ - reg: Should contain address of the controller on the I2C bus. Most likely
+ 0x3c or 0x3d
+ - pwm: Should contain the pwm to use according to the OF device tree PWM
+ specification [0]
+ - reset-gpios: Should contain the GPIO used to reset the OLED display
+
+Optional properties:
+ - reset-active-low: Is the reset gpio is active on physical low?
+
+[0]: Documentation/devicetree/bindings/pwm/pwm.txt
+
+Examples:
+ssd1307: oled@3c {
+ compatible = "solomon,ssd1307fb-i2c";
+ reg = <0x3c>;
+ pwms = <&pwm 4 3000>;
+ reset-gpios = <&gpio2 7>;
+ reset-active-low;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
new file mode 100644
index 00000000000..2957ebb5aa7
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
@@ -0,0 +1,15 @@
+* Atmel Watchdog Timers
+
+** at91sam9-wdt
+
+Required properties:
+- compatible: must be "atmel,at91sam9260-wdt".
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+Example:
+
+ watchdog@fffffd40 {
+ compatible = "atmel,at91sam9260-wdt";
+ reg = <0xfffffd40 0x10>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt b/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
new file mode 100644
index 00000000000..75558ccd9a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
@@ -0,0 +1,12 @@
+DaVinci Watchdog Timer (WDT) Controller
+
+Required properties:
+- compatible : Should be "ti,davinci-wdt"
+- reg : Should contain WDT registers location and length
+
+Examples:
+
+wdt: wdt@2320000 {
+ compatible = "ti,davinci-wdt";
+ reg = <0x02320000 0x80>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt b/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt
new file mode 100644
index 00000000000..80a37193c0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/twl4030-wdt.txt
@@ -0,0 +1,10 @@
+Device tree bindings for twl4030-wdt driver (TWL4030 watchdog)
+
+Required properties:
+ compatible = "ti,twl4030-wdt";
+
+Example:
+
+watchdog {
+ compatible = "ti,twl4030-wdt";
+};
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index ad86fb86c9a..0188903bc9e 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -376,7 +376,7 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
leaving the cpu domain and flushing caches at fault time. Note that all the
dma_buf files share the same anon inode, hence the exporter needs to replace
the dma_buf file stored in vma->vm_file with it's own if pte shootdown is
- requred. This is because the kernel uses the underlying inode's address_space
+ required. This is because the kernel uses the underlying inode's address_space
for vma tracking (and hence pte tracking at shootdown time with
unmap_mapping_range).
@@ -388,7 +388,7 @@ Being able to mmap an export dma-buf buffer object has 2 main use-cases:
Exporters that shoot down mappings (for any reasons) shall not do any
synchronization at fault time with outstanding device operations.
Synchronization is an orthogonal issue to sharing the backing storage of a
- buffer and hence should not be handled by dma-buf itself. This is explictly
+ buffer and hence should not be handled by dma-buf itself. This is explicitly
mentioned here because many people seem to want something like this, but if
different exporters handle this differently, buffer sharing can fail in
interesting ways depending upong the exporter (if userspace starts depending
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 74c25c8d888..b89a739a327 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -181,7 +181,6 @@ modversions.h*
nconf
ncscope.*
offset.h
-offsets.h
oui.c*
page-types
parse.c
diff --git a/Documentation/fault-injection/notifier-error-inject.txt b/Documentation/fault-injection/notifier-error-inject.txt
index c83526c364e..09adabef513 100644
--- a/Documentation/fault-injection/notifier-error-inject.txt
+++ b/Documentation/fault-injection/notifier-error-inject.txt
@@ -1,7 +1,7 @@
Notifier error injection
========================
-Notifier error injection provides the ability to inject artifical errors to
+Notifier error injection provides the ability to inject artificial errors to
specified notifier chain callbacks. It is useful to test the error handling of
notifier call chain failures which is rarely executed. There are kernel
modules that can be used to test the following notifiers.
@@ -14,7 +14,7 @@ modules that can be used to test the following notifiers.
CPU notifier error injection module
-----------------------------------
This feature can be used to test the error handling of the CPU notifiers by
-injecting artifical errors to CPU notifier chain callbacks.
+injecting artificial errors to CPU notifier chain callbacks.
If the notifier call chain should be failed with some events notified, write
the error code to debugfs interface
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 8c624a18f67..8042050eb26 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -38,6 +38,8 @@ dnotify_test.c
- example program for dnotify
ecryptfs.txt
- docs on eCryptfs: stacked cryptographic filesystem for Linux.
+efivarfs.txt
+ - info for the efivarfs filesystem.
exofs.txt
- info, usage, mount options, design about EXOFS.
ext2.txt
@@ -48,6 +50,8 @@ ext4.txt
- info, mount options and specifications for the Ext4 filesystem.
files.txt
- info on file management in the Linux kernel.
+f2fs.txt
+ - info and mount options for the F2FS filesystem.
fuse.txt
- info on the Filesystem in User SpacE including mount options.
gfs2.txt
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index e540a24e5d0..f48e0c6b4c4 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -80,7 +80,6 @@ rename: yes (all) (see below)
readlink: no
follow_link: no
put_link: no
-truncate: yes (see below)
setattr: yes
permission: no (may not block if called in rcu-walk mode)
get_acl: no
@@ -96,11 +95,6 @@ atomic_open: yes
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
victim.
cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
- ->truncate() is never called directly - it's a callback, not a
-method. It's called by vmtruncate() - deprecated library function used by
-->setattr(). Locking information above applies to that call (i.e. is
-inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been
-passed).
See Documentation/filesystems/directory-locking for more detailed discussion
of the locking scheme for directory operations.
diff --git a/Documentation/filesystems/caching/backend-api.txt b/Documentation/filesystems/caching/backend-api.txt
index 382d52cdaf2..d78bab9622c 100644
--- a/Documentation/filesystems/caching/backend-api.txt
+++ b/Documentation/filesystems/caching/backend-api.txt
@@ -308,6 +308,18 @@ performed on the denizens of the cache. These are held in a structure of type:
obtained by calling object->cookie->def->get_aux()/get_attr().
+ (*) Invalidate data object [mandatory]:
+
+ int (*invalidate_object)(struct fscache_operation *op)
+
+ This is called to invalidate a data object (as pointed to by op->object).
+ All the data stored for this object should be discarded and an
+ attr_changed operation should be performed. The caller will follow up
+ with an object update operation.
+
+ fscache_op_complete() must be called on op before returning.
+
+
(*) Discard object [mandatory]:
void (*drop_object)(struct fscache_object *object)
@@ -419,7 +431,10 @@ performed on the denizens of the cache. These are held in a structure of type:
If an I/O error occurs, fscache_io_error() should be called and -ENOBUFS
returned if possible or fscache_end_io() called with a suitable error
- code..
+ code.
+
+ fscache_put_retrieval() should be called after a page or pages are dealt
+ with. This will complete the operation when all pages are dealt with.
(*) Request pages be read from cache [mandatory]:
@@ -526,6 +541,27 @@ FS-Cache provides some utilities that a cache backend may make use of:
error value should be 0 if successful and an error otherwise.
+ (*) Record that one or more pages being retrieved or allocated have been dealt
+ with:
+
+ void fscache_retrieval_complete(struct fscache_retrieval *op,
+ int n_pages);
+
+ This is called to record the fact that one or more pages have been dealt
+ with and are no longer the concern of this operation. When the number of
+ pages remaining in the operation reaches 0, the operation will be
+ completed.
+
+
+ (*) Record operation completion:
+
+ void fscache_op_complete(struct fscache_operation *op);
+
+ This is called to record the completion of an operation. This deducts
+ this operation from the parent object's run state, potentially permitting
+ one or more pending operations to start running.
+
+
(*) Set highest store limit:
void fscache_set_store_limit(struct fscache_object *object,
diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt
index 7cc6bf2871e..97e6c0ecc5e 100644
--- a/Documentation/filesystems/caching/netfs-api.txt
+++ b/Documentation/filesystems/caching/netfs-api.txt
@@ -35,8 +35,9 @@ This document contains the following sections:
(12) Index and data file update
(13) Miscellaneous cookie operations
(14) Cookie unregistration
- (15) Index and data file invalidation
- (16) FS-Cache specific page flags.
+ (15) Index invalidation
+ (16) Data file invalidation
+ (17) FS-Cache specific page flags.
=============================
@@ -767,13 +768,42 @@ the cookies for "child" indices, objects and pages have been relinquished
first.
-================================
-INDEX AND DATA FILE INVALIDATION
-================================
+==================
+INDEX INVALIDATION
+==================
+
+There is no direct way to invalidate an index subtree. To do this, the caller
+should relinquish and retire the cookie they have, and then acquire a new one.
+
+
+======================
+DATA FILE INVALIDATION
+======================
+
+Sometimes it will be necessary to invalidate an object that contains data.
+Typically this will be necessary when the server tells the netfs of a foreign
+change - at which point the netfs has to throw away all the state it had for an
+inode and reload from the server.
+
+To indicate that a cache object should be invalidated, the following function
+can be called:
+
+ void fscache_invalidate(struct fscache_cookie *cookie);
+
+This can be called with spinlocks held as it defers the work to a thread pool.
+All extant storage, retrieval and attribute change ops at this point are
+cancelled and discarded. Some future operations will be rejected until the
+cache has had a chance to insert a barrier in the operations queue. After
+that, operations will be queued again behind the invalidation operation.
+
+The invalidation operation will perform an attribute change operation and an
+auxiliary data update operation as it is very likely these will have changed.
+
+Using the following function, the netfs can wait for the invalidation operation
+to have reached a point at which it can start submitting ordinary operations
+once again:
-There is no direct way to invalidate an index subtree or a data file. To do
-this, the caller should relinquish and retire the cookie they have, and then
-acquire a new one.
+ void fscache_wait_on_invalidate(struct fscache_cookie *cookie);
===========================
diff --git a/Documentation/filesystems/caching/object.txt b/Documentation/filesystems/caching/object.txt
index 58313348da8..100ff41127e 100644
--- a/Documentation/filesystems/caching/object.txt
+++ b/Documentation/filesystems/caching/object.txt
@@ -216,7 +216,14 @@ servicing netfs requests:
The normal running state. In this state, requests the netfs makes will be
passed on to the cache.
- (6) State FSCACHE_OBJECT_UPDATING.
+ (6) State FSCACHE_OBJECT_INVALIDATING.
+
+ The object is undergoing invalidation. When the state comes here, it
+ discards all pending read, write and attribute change operations as it is
+ going to clear out the cache entirely and reinitialise it. It will then
+ continue to the FSCACHE_OBJECT_UPDATING state.
+
+ (7) State FSCACHE_OBJECT_UPDATING.
The state machine comes here to update the object in the cache from the
netfs's records. This involves updating the auxiliary data that is used
@@ -225,13 +232,13 @@ servicing netfs requests:
And there are terminal states in which an object cleans itself up, deallocates
memory and potentially deletes stuff from disk:
- (7) State FSCACHE_OBJECT_LC_DYING.
+ (8) State FSCACHE_OBJECT_LC_DYING.
The object comes here if it is dying because of a lookup or creation
error. This would be due to a disk error or system error of some sort.
Temporary data is cleaned up, and the parent is released.
- (8) State FSCACHE_OBJECT_DYING.
+ (9) State FSCACHE_OBJECT_DYING.
The object comes here if it is dying due to an error, because its parent
cookie has been relinquished by the netfs or because the cache is being
@@ -241,27 +248,27 @@ memory and potentially deletes stuff from disk:
can destroy themselves. This object waits for all its children to go away
before advancing to the next state.
- (9) State FSCACHE_OBJECT_ABORT_INIT.
+(10) State FSCACHE_OBJECT_ABORT_INIT.
The object comes to this state if it was waiting on its parent in
FSCACHE_OBJECT_INIT, but its parent died. The object will destroy itself
so that the parent may proceed from the FSCACHE_OBJECT_DYING state.
-(10) State FSCACHE_OBJECT_RELEASING.
-(11) State FSCACHE_OBJECT_RECYCLING.
+(11) State FSCACHE_OBJECT_RELEASING.
+(12) State FSCACHE_OBJECT_RECYCLING.
The object comes to one of these two states when dying once it is rid of
all its children, if it is dying because the netfs relinquished its
cookie. In the first state, the cached data is expected to persist, and
in the second it will be deleted.
-(12) State FSCACHE_OBJECT_WITHDRAWING.
+(13) State FSCACHE_OBJECT_WITHDRAWING.
The object transits to this state if the cache decides it wants to
withdraw the object from service, perhaps to make space, but also due to
error or just because the whole cache is being withdrawn.
-(13) State FSCACHE_OBJECT_DEAD.
+(14) State FSCACHE_OBJECT_DEAD.
The object transits to this state when the in-memory object record is
ready to be deleted. The object processor shouldn't ever see an object in
diff --git a/Documentation/filesystems/caching/operations.txt b/Documentation/filesystems/caching/operations.txt
index b6b070c57cb..bee2a5f93d6 100644
--- a/Documentation/filesystems/caching/operations.txt
+++ b/Documentation/filesystems/caching/operations.txt
@@ -174,7 +174,7 @@ Operations are used through the following procedure:
necessary (the object might have died whilst the thread was waiting).
When it has finished doing its processing, it should call
- fscache_put_operation() on it.
+ fscache_op_complete() and fscache_put_operation() on it.
(4) The operation holds an effective lock upon the object, preventing other
exclusive ops conflicting until it is released. The operation can be
diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
new file mode 100644
index 00000000000..c477af086e6
--- /dev/null
+++ b/Documentation/filesystems/efivarfs.txt
@@ -0,0 +1,16 @@
+
+efivarfs - a (U)EFI variable filesystem
+
+The efivarfs filesystem was created to address the shortcomings of
+using entries in sysfs to maintain EFI variables. The old sysfs EFI
+variables code only supported variables of up to 1024 bytes. This
+limitation existed in version 0.99 of the EFI specification, but was
+removed before any full releases. Since variables can now be larger
+than a single page, sysfs isn't the best interface for this.
+
+Variables can be created, deleted and modified with the efivarfs
+filesystem.
+
+efivarfs is typically mounted like this,
+
+ mount -t efivarfs none /sys/firmware/efi/efivars
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 104322bf378..34ea4f1fa6e 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -200,12 +200,9 @@ inode_readahead_blks=n This tuning parameter controls the maximum
table readahead algorithm will pre-read into
the buffer cache. The default value is 32 blocks.
-nouser_xattr Disables Extended User Attributes. If you have extended
- attribute support enabled in the kernel configuration
- (CONFIG_EXT4_FS_XATTR), extended attribute support
- is enabled by default on mount. See the attr(5) manual
- page and http://acl.bestbits.at/ for more information
- about extended attributes.
+nouser_xattr Disables Extended User Attributes. See the
+ attr(5) manual page and http://acl.bestbits.at/
+ for more information about extended attributes.
noacl This option disables POSIX Access Control List
support. If ACL support is enabled in the kernel
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
new file mode 100644
index 00000000000..8fbd8b46ee3
--- /dev/null
+++ b/Documentation/filesystems/f2fs.txt
@@ -0,0 +1,421 @@
+================================================================================
+WHAT IS Flash-Friendly File System (F2FS)?
+================================================================================
+
+NAND flash memory-based storage devices, such as SSD, eMMC, and SD cards, have
+been equipped on a variety systems ranging from mobile to server systems. Since
+they are known to have different characteristics from the conventional rotating
+disks, a file system, an upper layer to the storage device, should adapt to the
+changes from the sketch in the design level.
+
+F2FS is a file system exploiting NAND flash memory-based storage devices, which
+is based on Log-structured File System (LFS). The design has been focused on
+addressing the fundamental issues in LFS, which are snowball effect of wandering
+tree and high cleaning overhead.
+
+Since a NAND flash memory-based storage device shows different characteristic
+according to its internal geometry or flash memory management scheme, namely FTL,
+F2FS and its tools support various parameters not only for configuring on-disk
+layout, but also for selecting allocation and cleaning algorithms.
+
+The file system formatting tool, "mkfs.f2fs", is available from the following
+git tree:
+>> git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git
+
+For reporting bugs and sending patches, please use the following mailing list:
+>> linux-f2fs-devel@lists.sourceforge.net
+
+================================================================================
+BACKGROUND AND DESIGN ISSUES
+================================================================================
+
+Log-structured File System (LFS)
+--------------------------------
+"A log-structured file system writes all modifications to disk sequentially in
+a log-like structure, thereby speeding up both file writing and crash recovery.
+The log is the only structure on disk; it contains indexing information so that
+files can be read back from the log efficiently. In order to maintain large free
+areas on disk for fast writing, we divide the log into segments and use a
+segment cleaner to compress the live information from heavily fragmented
+segments." from Rosenblum, M. and Ousterhout, J. K., 1992, "The design and
+implementation of a log-structured file system", ACM Trans. Computer Systems
+10, 1, 26–52.
+
+Wandering Tree Problem
+----------------------
+In LFS, when a file data is updated and written to the end of log, its direct
+pointer block is updated due to the changed location. Then the indirect pointer
+block is also updated due to the direct pointer block update. In this manner,
+the upper index structures such as inode, inode map, and checkpoint block are
+also updated recursively. This problem is called as wandering tree problem [1],
+and in order to enhance the performance, it should eliminate or relax the update
+propagation as much as possible.
+
+[1] Bityutskiy, A. 2005. JFFS3 design issues. http://www.linux-mtd.infradead.org/
+
+Cleaning Overhead
+-----------------
+Since LFS is based on out-of-place writes, it produces so many obsolete blocks
+scattered across the whole storage. In order to serve new empty log space, it
+needs to reclaim these obsolete blocks seamlessly to users. This job is called
+as a cleaning process.
+
+The process consists of three operations as follows.
+1. A victim segment is selected through referencing segment usage table.
+2. It loads parent index structures of all the data in the victim identified by
+ segment summary blocks.
+3. It checks the cross-reference between the data and its parent index structure.
+4. It moves valid data selectively.
+
+This cleaning job may cause unexpected long delays, so the most important goal
+is to hide the latencies to users. And also definitely, it should reduce the
+amount of valid data to be moved, and move them quickly as well.
+
+================================================================================
+KEY FEATURES
+================================================================================
+
+Flash Awareness
+---------------
+- Enlarge the random write area for better performance, but provide the high
+ spatial locality
+- Align FS data structures to the operational units in FTL as best efforts
+
+Wandering Tree Problem
+----------------------
+- Use a term, “node”, that represents inodes as well as various pointer blocks
+- Introduce Node Address Table (NAT) containing the locations of all the “node”
+ blocks; this will cut off the update propagation.
+
+Cleaning Overhead
+-----------------
+- Support a background cleaning process
+- Support greedy and cost-benefit algorithms for victim selection policies
+- Support multi-head logs for static/dynamic hot and cold data separation
+- Introduce adaptive logging for efficient block allocation
+
+================================================================================
+MOUNT OPTIONS
+================================================================================
+
+background_gc_off Turn off cleaning operations, namely garbage collection,
+ triggered in background when I/O subsystem is idle.
+disable_roll_forward Disable the roll-forward recovery routine
+discard Issue discard/TRIM commands when a segment is cleaned.
+no_heap Disable heap-style segment allocation which finds free
+ segments for data from the beginning of main area, while
+ for node from the end of main area.
+nouser_xattr Disable Extended User Attributes. Note: xattr is enabled
+ by default if CONFIG_F2FS_FS_XATTR is selected.
+noacl Disable POSIX Access Control List. Note: acl is enabled
+ by default if CONFIG_F2FS_FS_POSIX_ACL is selected.
+active_logs=%u Support configuring the number of active logs. In the
+ current design, f2fs supports only 2, 4, and 6 logs.
+ Default number is 6.
+disable_ext_identify Disable the extension list configured by mkfs, so f2fs
+ does not aware of cold files such as media files.
+
+================================================================================
+DEBUGFS ENTRIES
+================================================================================
+
+/sys/kernel/debug/f2fs/ contains information about all the partitions mounted as
+f2fs. Each file shows the whole f2fs information.
+
+/sys/kernel/debug/f2fs/status includes:
+ - major file system information managed by f2fs currently
+ - average SIT information about whole segments
+ - current memory footprint consumed by f2fs.
+
+================================================================================
+USAGE
+================================================================================
+
+1. Download userland tools and compile them.
+
+2. Skip, if f2fs was compiled statically inside kernel.
+ Otherwise, insert the f2fs.ko module.
+ # insmod f2fs.ko
+
+3. Create a directory trying to mount
+ # mkdir /mnt/f2fs
+
+4. Format the block device, and then mount as f2fs
+ # mkfs.f2fs -l label /dev/block_device
+ # mount -t f2fs /dev/block_device /mnt/f2fs
+
+Format options
+--------------
+-l [label] : Give a volume label, up to 256 unicode name.
+-a [0 or 1] : Split start location of each area for heap-based allocation.
+ 1 is set by default, which performs this.
+-o [int] : Set overprovision ratio in percent over volume size.
+ 5 is set by default.
+-s [int] : Set the number of segments per section.
+ 1 is set by default.
+-z [int] : Set the number of sections per zone.
+ 1 is set by default.
+-e [str] : Set basic extension list. e.g. "mp3,gif,mov"
+
+================================================================================
+DESIGN
+================================================================================
+
+On-disk Layout
+--------------
+
+F2FS divides the whole volume into a number of segments, each of which is fixed
+to 2MB in size. A section is composed of consecutive segments, and a zone
+consists of a set of sections. By default, section and zone sizes are set to one
+segment size identically, but users can easily modify the sizes by mkfs.
+
+F2FS splits the entire volume into six areas, and all the areas except superblock
+consists of multiple segments as described below.
+
+ align with the zone size <-|
+ |-> align with the segment size
+ _________________________________________________________________________
+ | | | Node | Segment | Segment | |
+ | Superblock | Checkpoint | Address | Info. | Summary | Main |
+ | (SB) | (CP) | Table (NAT) | Table (SIT) | Area (SSA) | |
+ |____________|_____2______|______N______|______N______|______N_____|__N___|
+ . .
+ . .
+ . .
+ ._________________________________________.
+ |_Segment_|_..._|_Segment_|_..._|_Segment_|
+ . .
+ ._________._________
+ |_section_|__...__|_
+ . .
+ .________.
+ |__zone__|
+
+- Superblock (SB)
+ : It is located at the beginning of the partition, and there exist two copies
+ to avoid file system crash. It contains basic partition information and some
+ default parameters of f2fs.
+
+- Checkpoint (CP)
+ : It contains file system information, bitmaps for valid NAT/SIT sets, orphan
+ inode lists, and summary entries of current active segments.
+
+- Node Address Table (NAT)
+ : It is composed of a block address table for all the node blocks stored in
+ Main area.
+
+- Segment Information Table (SIT)
+ : It contains segment information such as valid block count and bitmap for the
+ validity of all the blocks.
+
+- Segment Summary Area (SSA)
+ : It contains summary entries which contains the owner information of all the
+ data and node blocks stored in Main area.
+
+- Main Area
+ : It contains file and directory data including their indices.
+
+In order to avoid misalignment between file system and flash-based storage, F2FS
+aligns the start block address of CP with the segment size. Also, it aligns the
+start block address of Main area with the zone size by reserving some segments
+in SSA area.
+
+Reference the following survey for additional technical details.
+https://wiki.linaro.org/WorkingGroups/Kernel/Projects/FlashCardSurvey
+
+File System Metadata Structure
+------------------------------
+
+F2FS adopts the checkpointing scheme to maintain file system consistency. At
+mount time, F2FS first tries to find the last valid checkpoint data by scanning
+CP area. In order to reduce the scanning time, F2FS uses only two copies of CP.
+One of them always indicates the last valid data, which is called as shadow copy
+mechanism. In addition to CP, NAT and SIT also adopt the shadow copy mechanism.
+
+For file system consistency, each CP points to which NAT and SIT copies are
+valid, as shown as below.
+
+ +--------+----------+---------+
+ | CP | NAT | SIT |
+ +--------+----------+---------+
+ . . . .
+ . . . .
+ . . . .
+ +-------+-------+--------+--------+--------+--------+
+ | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 |
+ +-------+-------+--------+--------+--------+--------+
+ | ^ ^
+ | | |
+ `----------------------------------------'
+
+Index Structure
+---------------
+
+The key data structure to manage the data locations is a "node". Similar to
+traditional file structures, F2FS has three types of node: inode, direct node,
+indirect node. F2FS assigns 4KB to an inode block which contains 923 data block
+indices, two direct node pointers, two indirect node pointers, and one double
+indirect node pointer as described below. One direct node block contains 1018
+data blocks, and one indirect node block contains also 1018 node blocks. Thus,
+one inode block (i.e., a file) covers:
+
+ 4KB * (923 + 2 * 1018 + 2 * 1018 * 1018 + 1018 * 1018 * 1018) := 3.94TB.
+
+ Inode block (4KB)
+ |- data (923)
+ |- direct node (2)
+ | `- data (1018)
+ |- indirect node (2)
+ | `- direct node (1018)
+ | `- data (1018)
+ `- double indirect node (1)
+ `- indirect node (1018)
+ `- direct node (1018)
+ `- data (1018)
+
+Note that, all the node blocks are mapped by NAT which means the location of
+each node is translated by the NAT table. In the consideration of the wandering
+tree problem, F2FS is able to cut off the propagation of node updates caused by
+leaf data writes.
+
+Directory Structure
+-------------------
+
+A directory entry occupies 11 bytes, which consists of the following attributes.
+
+- hash hash value of the file name
+- ino inode number
+- len the length of file name
+- type file type such as directory, symlink, etc
+
+A dentry block consists of 214 dentry slots and file names. Therein a bitmap is
+used to represent whether each dentry is valid or not. A dentry block occupies
+4KB with the following composition.
+
+ Dentry Block(4 K) = bitmap (27 bytes) + reserved (3 bytes) +
+ dentries(11 * 214 bytes) + file name (8 * 214 bytes)
+
+ [Bucket]
+ +--------------------------------+
+ |dentry block 1 | dentry block 2 |
+ +--------------------------------+
+ . .
+ . .
+ . [Dentry Block Structure: 4KB] .
+ +--------+----------+----------+------------+
+ | bitmap | reserved | dentries | file names |
+ +--------+----------+----------+------------+
+ [Dentry Block: 4KB] . .
+ . .
+ . .
+ +------+------+-----+------+
+ | hash | ino | len | type |
+ +------+------+-----+------+
+ [Dentry Structure: 11 bytes]
+
+F2FS implements multi-level hash tables for directory structure. Each level has
+a hash table with dedicated number of hash buckets as shown below. Note that
+"A(2B)" means a bucket includes 2 data blocks.
+
+----------------------
+A : bucket
+B : block
+N : MAX_DIR_HASH_DEPTH
+----------------------
+
+level #0 | A(2B)
+ |
+level #1 | A(2B) - A(2B)
+ |
+level #2 | A(2B) - A(2B) - A(2B) - A(2B)
+ . | . . . .
+level #N/2 | A(2B) - A(2B) - A(2B) - A(2B) - A(2B) - ... - A(2B)
+ . | . . . .
+level #N | A(4B) - A(4B) - A(4B) - A(4B) - A(4B) - ... - A(4B)
+
+The number of blocks and buckets are determined by,
+
+ ,- 2, if n < MAX_DIR_HASH_DEPTH / 2,
+ # of blocks in level #n = |
+ `- 4, Otherwise
+
+ ,- 2^n, if n < MAX_DIR_HASH_DEPTH / 2,
+ # of buckets in level #n = |
+ `- 2^((MAX_DIR_HASH_DEPTH / 2) - 1), Otherwise
+
+When F2FS finds a file name in a directory, at first a hash value of the file
+name is calculated. Then, F2FS scans the hash table in level #0 to find the
+dentry consisting of the file name and its inode number. If not found, F2FS
+scans the next hash table in level #1. In this way, F2FS scans hash tables in
+each levels incrementally from 1 to N. In each levels F2FS needs to scan only
+one bucket determined by the following equation, which shows O(log(# of files))
+complexity.
+
+ bucket number to scan in level #n = (hash value) % (# of buckets in level #n)
+
+In the case of file creation, F2FS finds empty consecutive slots that cover the
+file name. F2FS searches the empty slots in the hash tables of whole levels from
+1 to N in the same way as the lookup operation.
+
+The following figure shows an example of two cases holding children.
+ --------------> Dir <--------------
+ | |
+ child child
+
+ child - child [hole] - child
+
+ child - child - child [hole] - [hole] - child
+
+ Case 1: Case 2:
+ Number of children = 6, Number of children = 3,
+ File size = 7 File size = 7
+
+Default Block Allocation
+------------------------
+
+At runtime, F2FS manages six active logs inside "Main" area: Hot/Warm/Cold node
+and Hot/Warm/Cold data.
+
+- Hot node contains direct node blocks of directories.
+- Warm node contains direct node blocks except hot node blocks.
+- Cold node contains indirect node blocks
+- Hot data contains dentry blocks
+- Warm data contains data blocks except hot and cold data blocks
+- Cold data contains multimedia data or migrated data blocks
+
+LFS has two schemes for free space management: threaded log and copy-and-compac-
+tion. The copy-and-compaction scheme which is known as cleaning, is well-suited
+for devices showing very good sequential write performance, since free segments
+are served all the time for writing new data. However, it suffers from cleaning
+overhead under high utilization. Contrarily, the threaded log scheme suffers
+from random writes, but no cleaning process is needed. F2FS adopts a hybrid
+scheme where the copy-and-compaction scheme is adopted by default, but the
+policy is dynamically changed to the threaded log scheme according to the file
+system status.
+
+In order to align F2FS with underlying flash-based storage, F2FS allocates a
+segment in a unit of section. F2FS expects that the section size would be the
+same as the unit size of garbage collection in FTL. Furthermore, with respect
+to the mapping granularity in FTL, F2FS allocates each section of the active
+logs from different zones as much as possible, since FTL can write the data in
+the active logs into one allocation unit according to its mapping granularity.
+
+Cleaning process
+----------------
+
+F2FS does cleaning both on demand and in the background. On-demand cleaning is
+triggered when there are not enough free segments to serve VFS calls. Background
+cleaner is operated by a kernel thread, and triggers the cleaning job when the
+system is idle.
+
+F2FS supports two victim selection policies: greedy and cost-benefit algorithms.
+In the greedy algorithm, F2FS selects a victim segment having the smallest number
+of valid blocks. In the cost-benefit algorithm, F2FS selects a victim segment
+according to the segment age and the number of valid blocks in order to address
+log block thrashing problem in the greedy algorithm. F2FS adopts the greedy
+algorithm for on-demand cleaner, while background cleaner adopts cost-benefit
+algorithm.
+
+In order to identify whether the data in the victim segment are valid or not,
+F2FS manages a bitmap. Each bit represents the validity of a block, and the
+bitmap is composed of a bit stream covering whole blocks in main area.
diff --git a/Documentation/filesystems/nfs/nfs41-server.txt b/Documentation/filesystems/nfs/nfs41-server.txt
index 092fad92a3f..01c2db76979 100644
--- a/Documentation/filesystems/nfs/nfs41-server.txt
+++ b/Documentation/filesystems/nfs/nfs41-server.txt
@@ -39,21 +39,10 @@ interoperability problems with future clients. Known issues:
from a linux client are possible, but we aren't really
conformant with the spec (for example, we don't use kerberos
on the backchannel correctly).
- - Incomplete backchannel support: incomplete backchannel gss
- support and no support for BACKCHANNEL_CTL mean that
- callbacks (hence delegations and layouts) may not be
- available and clients confused by the incomplete
- implementation may fail.
- We do not support SSV, which provides security for shared
client-server state (thus preventing unauthorized tampering
with locks and opens, for example). It is mandatory for
servers to support this, though no clients use it yet.
- - Mandatory operations which we do not support, such as
- DESTROY_CLIENTID, are not currently used by clients, but will be
- (and the spec recommends their uses in common cases), and
- clients should not be expected to know how to recover from the
- case where they are not supported. This will eventually cause
- interoperability failures.
In addition, some limitations are inherited from the current NFSv4
implementation:
@@ -89,7 +78,7 @@ Operations
| | MNI | or OPT) | |
+----------------------+------------+--------------+----------------+
| ACCESS | REQ | | Section 18.1 |
-NS | BACKCHANNEL_CTL | REQ | | Section 18.33 |
+I | BACKCHANNEL_CTL | REQ | | Section 18.33 |
I | BIND_CONN_TO_SESSION | REQ | | Section 18.34 |
| CLOSE | REQ | | Section 18.2 |
| COMMIT | REQ | | Section 18.3 |
@@ -99,7 +88,7 @@ NS*| DELEGPURGE | OPT | FDELG (REQ) | Section 18.5 |
| DELEGRETURN | OPT | FDELG, | Section 18.6 |
| | | DDELG, pNFS | |
| | | (REQ) | |
-NS | DESTROY_CLIENTID | REQ | | Section 18.50 |
+I | DESTROY_CLIENTID | REQ | | Section 18.50 |
I | DESTROY_SESSION | REQ | | Section 18.37 |
I | EXCHANGE_ID | REQ | | Section 18.35 |
I | FREE_STATEID | REQ | | Section 18.38 |
@@ -192,7 +181,6 @@ EXCHANGE_ID:
CREATE_SESSION:
* backchannel attributes are ignored
-* backchannel security parameters are ignored
SEQUENCE:
* no support for dynamic slot table renegotiation (optional)
@@ -202,7 +190,7 @@ Nonstandard compound limitations:
ca_maxrequestsize request and a ca_maxresponsesize reply, so we may
fail to live up to the promise we made in CREATE_SESSION fore channel
negotiation.
-* No more than one IO operation (read, write, readdir) allowed per
- compound.
+* No more than one read-like operation allowed per compound; encoding
+ replies that cross page boundaries (except for read data) not handled.
See also http://wiki.linux-nfs.org/wiki/index.php/Server_4.0_and_4.1_issues.
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 0742feebc6e..0472c31c163 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -281,7 +281,7 @@ ext2_write_failed and callers for an example.
[mandatory]
- ->truncate is going away. The whole truncate sequence needs to be
+ ->truncate is gone. The whole truncate sequence needs to be
implemented in ->setattr, which is now mandatory for filesystems
implementing on-disk size changes. Start with a copy of the old inode_setattr
and vmtruncate, and the reorder the vmtruncate + foofs_vmtruncate sequence to
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 3844d21d6ca..fd8d0d594fc 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -41,6 +41,7 @@ Table of Contents
3.5 /proc/<pid>/mountinfo - Information about mounts
3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
3.7 /proc/<pid>/task/<tid>/children - Information about task children
+ 3.8 /proc/<pid>/fdinfo/<fd> - Information about opened file
4 Configuring procfs
4.1 Mount options
@@ -142,7 +143,7 @@ Table 1-1: Process specific entries in /proc
pagemap Page table
stack Report full stack trace, enable via CONFIG_STACKTRACE
smaps a extension based on maps, showing the memory consumption of
- each mapping
+ each mapping and flags associated with it
..............................................................................
For example, to get the status information of a process, all you have to do is
@@ -181,6 +182,7 @@ read the file /proc/PID/status:
CapPrm: 0000000000000000
CapEff: 0000000000000000
CapBnd: ffffffffffffffff
+ Seccomp: 0
voluntary_ctxt_switches: 0
nonvoluntary_ctxt_switches: 1
@@ -237,6 +239,7 @@ Table 1-2: Contents of the status files (as of 2.6.30-rc7)
CapPrm bitmap of permitted capabilities
CapEff bitmap of effective capabilities
CapBnd bitmap of capabilities bounding set
+ Seccomp seccomp mode, like prctl(PR_GET_SECCOMP, ...)
Cpus_allowed mask of CPUs on which this process may run
Cpus_allowed_list Same as previous, but in "list format"
Mems_allowed mask of memory nodes allowed to this process
@@ -415,8 +418,9 @@ Swap: 0 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Locked: 374 kB
+VmFlags: rd ex mr mw me de
-The first of these lines shows the same information as is displayed for the
+the first of these lines shows the same information as is displayed for the
mapping in /proc/PID/maps. The remaining lines show the size of the mapping
(size), the amount of the mapping that is currently resident in RAM (RSS), the
process' proportional share of this mapping (PSS), the number of clean and
@@ -430,6 +434,41 @@ and a page is modified, the file page is replaced by a private anonymous copy.
"Swap" shows how much would-be-anonymous memory is also used, but out on
swap.
+"VmFlags" field deserves a separate description. This member represents the kernel
+flags associated with the particular virtual memory area in two letter encoded
+manner. The codes are the following:
+ rd - readable
+ wr - writeable
+ ex - executable
+ sh - shared
+ mr - may read
+ mw - may write
+ me - may execute
+ ms - may share
+ gd - stack segment growns down
+ pf - pure PFN range
+ dw - disabled write to the mapped file
+ lo - pages are locked in memory
+ io - memory mapped I/O area
+ sr - sequential read advise provided
+ rr - random read advise provided
+ dc - do not copy area on fork
+ de - do not expand area on remapping
+ ac - area is accountable
+ nr - swap space is not reserved for the area
+ ht - area uses huge tlb pages
+ nl - non-linear mapping
+ ar - architecture specific flag
+ dd - do not include area into core dump
+ mm - mixed map area
+ hg - huge page advise flag
+ nh - no-huge page advise flag
+ mg - mergable advise flag
+
+Note that there is no guarantee that every flag and associated mnemonic will
+be present in all further kernel releases. Things get changed, the flags may
+be vanished or the reverse -- new added.
+
This file is only present if the CONFIG_MMU kernel configuration option is
enabled.
@@ -1595,6 +1634,93 @@ pids, so one need to either stop or freeze processes being inspected
if precise results are needed.
+3.7 /proc/<pid>/fdinfo/<fd> - Information about opened file
+---------------------------------------------------------------
+This file provides information associated with an opened file. The regular
+files have at least two fields -- 'pos' and 'flags'. The 'pos' represents
+the current offset of the opened file in decimal form [see lseek(2) for
+details] and 'flags' denotes the octal O_xxx mask the file has been
+created with [see open(2) for details].
+
+A typical output is
+
+ pos: 0
+ flags: 0100002
+
+The files such as eventfd, fsnotify, signalfd, epoll among the regular pos/flags
+pair provide additional information particular to the objects they represent.
+
+ Eventfd files
+ ~~~~~~~~~~~~~
+ pos: 0
+ flags: 04002
+ eventfd-count: 5a
+
+ where 'eventfd-count' is hex value of a counter.
+
+ Signalfd files
+ ~~~~~~~~~~~~~~
+ pos: 0
+ flags: 04002
+ sigmask: 0000000000000200
+
+ where 'sigmask' is hex value of the signal mask associated
+ with a file.
+
+ Epoll files
+ ~~~~~~~~~~~
+ pos: 0
+ flags: 02
+ tfd: 5 events: 1d data: ffffffffffffffff
+
+ where 'tfd' is a target file descriptor number in decimal form,
+ 'events' is events mask being watched and the 'data' is data
+ associated with a target [see epoll(7) for more details].
+
+ Fsnotify files
+ ~~~~~~~~~~~~~~
+ For inotify files the format is the following
+
+ pos: 0
+ flags: 02000000
+ inotify wd:3 ino:9e7e sdev:800013 mask:800afce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:7e9e0000640d1b6d
+
+ where 'wd' is a watch descriptor in decimal form, ie a target file
+ descriptor number, 'ino' and 'sdev' are inode and device where the
+ target file resides and the 'mask' is the mask of events, all in hex
+ form [see inotify(7) for more details].
+
+ If the kernel was built with exportfs support, the path to the target
+ file is encoded as a file handle. The file handle is provided by three
+ fields 'fhandle-bytes', 'fhandle-type' and 'f_handle', all in hex
+ format.
+
+ If the kernel is built without exportfs support the file handle won't be
+ printed out.
+
+ If there is no inotify mark attached yet the 'inotify' line will be omitted.
+
+ For fanotify files the format is
+
+ pos: 0
+ flags: 02
+ fanotify flags:10 event-flags:0
+ fanotify mnt_id:12 mflags:40 mask:38 ignored_mask:40000003
+ fanotify ino:4f969 sdev:800013 mflags:0 mask:3b ignored_mask:40000000 fhandle-bytes:8 fhandle-type:1 f_handle:69f90400c275b5b4
+
+ where fanotify 'flags' and 'event-flags' are values used in fanotify_init
+ call, 'mnt_id' is the mount point identifier, 'mflags' is the value of
+ flags associated with mark which are tracked separately from events
+ mask. 'ino', 'sdev' are target inode and device, 'mask' is the events
+ mask and 'ignored_mask' is the mask of events which are to be ignored.
+ All in hex format. Incorporation of 'mflags', 'mask' and 'ignored_mask'
+ does provide information about flags and mask used in fanotify_mark
+ call [see fsnotify manpage for details].
+
+ While the first three lines are mandatory and always printed, the rest is
+ optional and may be omitted if no marks created yet.
+
+
------------------------------------------------------------------------------
Configuring procfs
------------------------------------------------------------------------------
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index de1e6c4dccf..d230dd9c99b 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -111,6 +111,15 @@ tz=UTC -- Interpret timestamps as UTC rather than local time.
useful when mounting devices (like digital cameras)
that are set to UTC in order to avoid the pitfalls of
local time.
+time_offset=minutes
+ -- Set offset for conversion of timestamps from local time
+ used by FAT to UTC. I.e. <minutes> minutes will be subtracted
+ from each timestamp to convert it to UTC used internally by
+ Linux. This is useful when time zone set in sys_tz is
+ not the time zone used by the filesystem. Note that this
+ option still does not provide correct time stamps in all
+ cases in presence of DST - time stamps in a different DST
+ setting will be off by one hour.
showexec -- If set, the execute permission bits of the file will be
allowed only if the extension part of the name is .EXE,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 2ee133e030c..e3869098163 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -350,7 +350,6 @@ struct inode_operations {
int (*readlink) (struct dentry *, char __user *,int);
void * (*follow_link) (struct dentry *, struct nameidata *);
void (*put_link) (struct dentry *, struct nameidata *, void *);
- void (*truncate) (struct inode *);
int (*permission) (struct inode *, int);
int (*get_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
@@ -431,16 +430,6 @@ otherwise noted.
started might not be in the page cache at the end of the
walk).
- truncate: Deprecated. This will not be called if ->setsize is defined.
- Called by the VFS to change the size of a file. The
- i_size field of the inode is set to the desired size by the
- VFS before this method is called. This method is called by
- the truncate(2) system call and related functionality.
-
- Note: ->truncate and vmtruncate are deprecated. Do not add new
- instances/calls of these. Filesystems should be converted to do their
- truncate sequence via ->setattr().
-
permission: called by the VFS to check for access rights on a POSIX-like
filesystem.
diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt
index 4627c4241ec..3c741214dfb 100644
--- a/Documentation/hid/uhid.txt
+++ b/Documentation/hid/uhid.txt
@@ -108,7 +108,7 @@ the request was handled successfully.
UHID_FEATURE_ANSWER:
If you receive a UHID_FEATURE request you must answer with this request. You
must copy the "id" field from the request into the answer. Set the "err" field
- to 0 if no error occured or to EIO if an I/O error occurred.
+ to 0 if no error occurred or to EIO if an I/O error occurred.
If "err" is 0 then you should fill the buffer of the answer with the results
of the feature request and set "size" correspondingly.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index 87850d86c55..8386aadc0a8 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -209,3 +209,13 @@ doesn't use CPU cycles.
Trip points must be set properly before switching to automatic fan speed
control mode. The driver will perform basic integrity checks before
actually switching to automatic control mode.
+
+
+Temperature offset attributes
+-----------------------------
+
+The driver supports temp[1-3]_offset sysfs attributes to adjust the reported
+temperature for thermal diodes or diode-connected thermal transistors.
+If a temperature sensor is configured for thermistors, the attribute values
+are ignored. If the thermal sensor type is Intel PECI, the temperature offset
+must be programmed to the critical CPU temperature.
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index f90f99920cc..3d3a0f97f96 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -138,7 +138,7 @@ Sysfs entries
When probing the chip, the driver identifies which PMBus registers are
supported, and determines available sensors from this information.
-Attribute files only exist if respective sensors are suported by the chip.
+Attribute files only exist if respective sensors are supported by the chip.
Labels are provided to inform the user about the sensor associated with
a given sysfs entry.
diff --git a/Documentation/hwmon/vexpress b/Documentation/hwmon/vexpress
new file mode 100644
index 00000000000..557d6d5ad90
--- /dev/null
+++ b/Documentation/hwmon/vexpress
@@ -0,0 +1,34 @@
+Kernel driver vexpress
+======================
+
+Supported systems:
+ * ARM Ltd. Versatile Express platform
+ Prefix: 'vexpress'
+ Datasheets:
+ * "Hardware Description" sections of the Technical Reference Manuals
+ for the Versatile Express boards:
+ http://infocenter.arm.com/help/topic/com.arm.doc.subset.boards.express/index.html
+ * Section "4.4.14. System Configuration registers" of the V2M-P1 TRM:
+ http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0447-/index.html
+
+Author: Pawel Moll
+
+Description
+-----------
+
+Versatile Express platform (http://www.arm.com/versatileexpress/) is a
+reference & prototyping system for ARM Ltd. processors. It can be set up
+from a wide range of boards, each of them containing (apart of the main
+chip/FPGA) a number of microcontrollers responsible for platform
+configuration and control. Theses microcontrollers can also monitor the
+board and its environment by a number of internal and external sensors,
+providing information about power lines voltages and currents, board
+temperature and power usage. Some of them also calculate consumed energy
+and provide a cumulative use counter.
+
+The configuration devices are _not_ memory mapped and must be accessed
+via a custom interface, abstracted by the "vexpress_config" API.
+
+As these devices are non-discoverable, they must be described in a Device
+Tree passed to the kernel. Details of the DT binding for them can be found
+in Documentation/devicetree/bindings/hwmon/vexpress.txt.
diff --git a/Documentation/i2c/smbus-protocol b/Documentation/i2c/smbus-protocol
index 49f5b680809..d1f22618e14 100644
--- a/Documentation/i2c/smbus-protocol
+++ b/Documentation/i2c/smbus-protocol
@@ -23,6 +23,12 @@ don't match these function names. For some of the operations which pass a
single data byte, the functions using SMBus protocol operation names execute
a different protocol operation entirely.
+Each transaction type corresponds to a functionality flag. Before calling a
+transaction function, a device driver should always check (just once) for
+the corresponding functionality flag to ensure that the underlying I2C
+adapter supports the transaction in question. See
+<file:Documentation/i2c/functionality> for the details.
+
Key to symbols
==============
@@ -49,6 +55,8 @@ This sends a single bit to the device, at the place of the Rd/Wr bit.
A Addr Rd/Wr [A] P
+Functionality flag: I2C_FUNC_SMBUS_QUICK
+
SMBus Receive Byte: i2c_smbus_read_byte()
==========================================
@@ -60,6 +68,8 @@ the previous SMBus command.
S Addr Rd [A] [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_BYTE
+
SMBus Send Byte: i2c_smbus_write_byte()
========================================
@@ -69,6 +79,8 @@ to a device. See Receive Byte for more information.
S Addr Wr [A] Data [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE
+
SMBus Read Byte: i2c_smbus_read_byte_data()
============================================
@@ -78,6 +90,8 @@ The register is specified through the Comm byte.
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
+
SMBus Read Word: i2c_smbus_read_word_data()
============================================
@@ -88,6 +102,8 @@ byte. But this time, the data is a complete word (16 bits).
S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
+
Note the convenience function i2c_smbus_read_word_swapped is
available for reads where the two data bytes are the other way
around (not SMBus compliant, but very popular.)
@@ -102,6 +118,8 @@ the Read Byte operation.
S Addr Wr [A] Comm [A] Data [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE_DATA
+
SMBus Write Word: i2c_smbus_write_word_data()
==============================================
@@ -112,6 +130,8 @@ specified through the Comm byte.
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA
+
Note the convenience function i2c_smbus_write_word_swapped is
available for writes where the two data bytes are the other way
around (not SMBus compliant, but very popular.)
@@ -126,6 +146,8 @@ This command selects a device register (through the Comm byte), sends
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
S Addr Rd [A] [DataLow] A [DataHigh] NA P
+Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
+
SMBus Block Read: i2c_smbus_read_block_data()
==============================================
@@ -137,6 +159,8 @@ of data is specified by the device in the Count byte.
S Addr Wr [A] Comm [A]
S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
+
SMBus Block Write: i2c_smbus_write_block_data()
================================================
@@ -147,6 +171,8 @@ Comm byte. The amount of data is specified in the Count byte.
S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA
+
SMBus Block Write - Block Read Process Call
===========================================
@@ -160,6 +186,8 @@ This command selects a device register (through the Comm byte), sends
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
S Addr Rd [A] [Count] A [Data] ... A P
+Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
+
SMBus Host Notify
=================
@@ -229,15 +257,7 @@ designated register that is specified through the Comm byte.
S Addr Wr [A] Comm [A]
S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
-
-I2C Block Read (2 Comm bytes)
-=============================
-
-This command reads a block of bytes from a device, from a
-designated register that is specified through the two Comm bytes.
-
-S Addr Wr [A] Comm1 [A] Comm2 [A]
- S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
I2C Block Write: i2c_smbus_write_i2c_block_data()
@@ -249,3 +269,5 @@ Comm byte. Note that command lengths of 0, 2, or more bytes are
supported as they are indistinguishable from data.
S Addr Wr [A] Comm [A] Data [A] Data [A] ... [A] Data [A] P
+
+Functionality flag: I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index ae8ba9a74ce..3262b6e4d68 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -133,7 +133,7 @@ number of contacts (f1 and f0 in the table below).
This packet only appears after a position packet with the mt bit set, and
usually only appears when there are two or more contacts (although
-occassionally it's seen with only a single contact).
+occasionally it's seen with only a single contact).
The final v3 packet type is the trackstick packet.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 53305bd0818..f1ea2c69648 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -196,6 +196,17 @@ EV_MSC:
EV_MSC events are used for input and output events that do not fall under other
categories.
+A few EV_MSC codes have special meaning:
+
+* MSC_TIMESTAMP:
+ - Used to report the number of microseconds since the last reset. This event
+ should be coded as an uint32 value, which is allowed to wrap around with
+ no special consequence. It is assumed that the time difference between two
+ consecutive events is reliable on a reasonable time scale (hours).
+ A reset to zero can happen, in which case the time since the last event is
+ unknown. If the device does not provide this information, the driver must
+ not provide it to user space.
+
EV_LED:
----------
EV_LED events are used for input and output to set and query the state of
diff --git a/Documentation/kbuild/modules.txt b/Documentation/kbuild/modules.txt
index 3fb39e0116b..69372fb98cf 100644
--- a/Documentation/kbuild/modules.txt
+++ b/Documentation/kbuild/modules.txt
@@ -470,7 +470,7 @@ build.
Sometimes, an external module uses exported symbols from
another external module. kbuild needs to have full knowledge of
- all symbols to avoid spitting out warnings about undefined
+ all symbols to avoid spliitting out warnings about undefined
symbols. Three solutions exist for this situation.
NOTE: The method with a top-level kbuild file is recommended
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 3d8a97747f7..99b57abddf8 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -64,6 +64,8 @@ Example kernel-doc function comment:
* comment lines.
*
* The longer description can have multiple paragraphs.
+ *
+ * Return: Describe the return value of foobar.
*/
The short description following the subject can span multiple lines
@@ -78,6 +80,8 @@ If a function parameter is "..." (varargs), it should be listed in
kernel-doc notation as:
* @...: description
+The return value, if any, should be described in a dedicated section
+named "Return".
Example kernel-doc data structure comment.
@@ -222,6 +226,9 @@ only a "*").
"section header:" names must be unique per function (or struct,
union, typedef, enum).
+Use the section header "Return" for sections describing the return value
+of a function.
+
Avoid putting a spurious blank line after the function name, or else the
description will be repeated!
@@ -237,21 +244,21 @@ patterns, which are highlighted appropriately.
NOTE 1: The multi-line descriptive text you provide does *not* recognize
line breaks, so if you try to format some text nicely, as in:
- Return codes
+ Return:
0 - cool
1 - invalid arg
2 - out of memory
this will all run together and produce:
- Return codes 0 - cool 1 - invalid arg 2 - out of memory
+ Return: 0 - cool 1 - invalid arg 2 - out of memory
NOTE 2: If the descriptive text you provide has lines that begin with
some phrase followed by a colon, each of those phrases will be taken as
a new section heading, which means you should similarly try to avoid text
like:
- Return codes:
+ Return:
0: cool
1: invalid arg
2: out of memory
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 20e248cc03a..363e348bff9 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -446,12 +446,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
possible to determine what the correct size should be.
This option provides an override for these situations.
- capability.disable=
- [SECURITY] Disable capabilities. This would normally
- be used only if an alternative security model is to be
- configured. Potentially dangerous and should only be
- used if you are entirely sure of the consequences.
-
ccw_timeout_log [S390]
See Documentation/s390/CommonIO for details.
@@ -1503,9 +1497,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
Amount of memory to be used when the kernel is not able
to see the whole system memory or for test.
- [X86-32] Use together with memmap= to avoid physical
- address space collisions. Without memmap= PCI devices
- could be placed at addresses belonging to unused RAM.
+ [X86] Work as limiting max address. Use together
+ with memmap= to avoid physical address space collisions.
+ Without memmap= PCI devices could be placed at addresses
+ belonging to unused RAM.
mem=nopentium [BUGS=X86-32] Disable usage of 4MB pages for kernel
memory.
@@ -2032,6 +2027,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
+ numa_balancing= [KNL,X86] Enable or disable automatic NUMA balancing.
+ Allowed values are enable and disable
+
numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
one of ['zone', 'node', 'default'] can be specified
This can be set from sysctl after boot.
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index 48ba715d5a6..ddf85a5dde0 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
and:
http://www.kroah.com/linux/talks/ols_2004_kref_talk/
+
+The above example could also be optimized using kref_get_unless_zero() in
+the following way:
+
+static struct my_data *get_entry()
+{
+ struct my_data *entry = NULL;
+ mutex_lock(&mutex);
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ mutex_unlock(&mutex);
+ return entry;
+}
+
+static void release_entry(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del(&entry->link);
+ mutex_unlock(&mutex);
+ kfree(entry);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry);
+}
+
+Which is useful to remove the mutex lock around kref_put() in put_entry(), but
+it's important that kref_get_unless_zero is enclosed in the same critical
+section that finds the entry in the lookup table,
+otherwise kref_get_unless_zero may reference already freed memory.
+Note that it is illegal to use kref_get_unless_zero without checking its
+return value. If you are sure (by already having a valid pointer) that
+kref_get_unless_zero() will return true, then use kref_get() instead.
+
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example:
+
+struct my_data
+{
+ struct rcu_head rhead;
+ .
+ struct kref refcount;
+ .
+ .
+};
+
+static struct my_data *get_entry_rcu()
+{
+ struct my_data *entry = NULL;
+ rcu_read_lock();
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ rcu_read_unlock();
+ return entry;
+}
+
+static void release_entry_rcu(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del_rcu(&entry->link);
+ mutex_unlock(&mutex);
+ kfree_rcu(entry, rhead);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry_rcu);
+}
+
+But note that the struct kref member needs to remain in valid memory for a
+rcu grace period after release_entry_rcu was called. That can be accomplished
+by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
+before using kfree, but note that synchronize_rcu() may sleep for a
+substantial amount of time.
+
+
+Thomas Hellstrom <thellstrom@vmware.com>
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index c6f993d491b..8e5eacbdcfa 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -390,6 +390,7 @@ struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
int status_change_nid_normal;
+ int status_change_nid_high;
int status_change_nid;
}
@@ -397,7 +398,9 @@ start_pfn is start_pfn of online/offline memory.
nr_pages is # of pages of online/offline memory.
status_change_nid_normal is set node id when N_NORMAL_MEMORY of nodemask
is (will be) set/clear, if this is -1, then nodemask status is not changed.
-status_change_nid is set node id when N_HIGH_MEMORY of nodemask is (will be)
+status_change_nid_high is set node id when N_HIGH_MEMORY of nodemask
+is (will be) set/clear, if this is -1, then nodemask status is not changed.
+status_change_nid is set node id when N_MEMORY of nodemask is (will be)
set/clear. It means a new(memoryless) node gets new memory by online and a
node loses all memory. If this is -1, then nodemask status is not changed.
If status_changed_nid* >= 0, callback should create/discard structures for the
diff --git a/Documentation/misc-devices/mei/mei-amt-version.c b/Documentation/misc-devices/mei/mei-amt-version.c
index 01804f21631..49e4f770864 100644
--- a/Documentation/misc-devices/mei/mei-amt-version.c
+++ b/Documentation/misc-devices/mei/mei-amt-version.c
@@ -214,7 +214,7 @@ out:
}
/***************************************************************************
- * Intel Advanced Management Technolgy ME Client
+ * Intel Advanced Management Technology ME Client
***************************************************************************/
#define AMT_MAJOR_VERSION 1
@@ -256,7 +256,7 @@ struct amt_code_versions {
} __attribute__((packed));
/***************************************************************************
- * Intel Advanced Management Technolgy Host Interface
+ * Intel Advanced Management Technology Host Interface
***************************************************************************/
struct amt_host_if_msg_header {
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 9c647bd7c5a..3f10b39b034 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -123,6 +123,9 @@ CONSTANT_CHARGE_VOLTAGE - constant charge voltage programmed by charger.
CONSTANT_CHARGE_VOLTAGE_MAX - maximum charge voltage supported by the
power supply object.
+CHARGE_CONTROL_LIMIT - current charge control limit setting
+CHARGE_CONTROL_LIMIT_MAX - maximum charge control limit setting
+
ENERGY_FULL, ENERGY_EMPTY - same as above but for energy.
CAPACITY - capacity in percents.
diff --git a/Documentation/powerpc/ptrace.txt b/Documentation/powerpc/ptrace.txt
index f4a5499b7bc..f2a7a391977 100644
--- a/Documentation/powerpc/ptrace.txt
+++ b/Documentation/powerpc/ptrace.txt
@@ -127,6 +127,22 @@ Some examples of using the structure to:
p.addr2 = (uint64_t) end_range;
p.condition_value = 0;
+- set a watchpoint in server processors (BookS)
+
+ p.version = 1;
+ p.trigger_type = PPC_BREAKPOINT_TRIGGER_RW;
+ p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
+ or
+ p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+
+ p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ p.addr = (uint64_t) begin_range;
+ /* For PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE addr2 needs to be specified, where
+ * addr2 - addr <= 8 Bytes.
+ */
+ p.addr2 = (uint64_t) end_range;
+ p.condition_value = 0;
+
3. PTRACE_DELHWDEBUG
Takes an integer which identifies an existing breakpoint or watchpoint
diff --git a/Documentation/prctl/seccomp_filter.txt b/Documentation/prctl/seccomp_filter.txt
index 597c3c58137..1e469ef7577 100644
--- a/Documentation/prctl/seccomp_filter.txt
+++ b/Documentation/prctl/seccomp_filter.txt
@@ -95,12 +95,15 @@ SECCOMP_RET_KILL:
SECCOMP_RET_TRAP:
Results in the kernel sending a SIGSYS signal to the triggering
- task without executing the system call. The kernel will
- rollback the register state to just before the system call
- entry such that a signal handler in the task will be able to
- inspect the ucontext_t->uc_mcontext registers and emulate
- system call success or failure upon return from the signal
- handler.
+ task without executing the system call. siginfo->si_call_addr
+ will show the address of the system call instruction, and
+ siginfo->si_syscall and siginfo->si_arch will indicate which
+ syscall was attempted. The program counter will be as though
+ the syscall happened (i.e. it will not point to the syscall
+ instruction). The return value register will contain an arch-
+ dependent value -- if resuming execution, set it to something
+ sensible. (The architecture dependency is because replacing
+ it with -ENOSYS could overwrite some useful information.)
The SECCOMP_RET_DATA portion of the return value will be passed
as si_errno.
@@ -123,6 +126,18 @@ SECCOMP_RET_TRACE:
the BPF program return value will be available to the tracer
via PTRACE_GETEVENTMSG.
+ The tracer can skip the system call by changing the syscall number
+ to -1. Alternatively, the tracer can change the system call
+ requested by changing the system call to a valid syscall number. If
+ the tracer asks to skip the system call, then the system call will
+ appear to return the value that the tracer puts in the return value
+ register.
+
+ The seccomp check will not be run again after the tracer is
+ notified. (This means that seccomp-based sandboxes MUST NOT
+ allow use of ptrace, even of other sandboxed processes, without
+ extreme care; ptracers can use this mechanism to escape.)
+
SECCOMP_RET_ALLOW:
Results in the system call being executed.
@@ -161,3 +176,50 @@ architecture supports both ptrace_event and seccomp, it will be able to
support seccomp filter with minor fixup: SIGSYS support and seccomp return
value checking. Then it must just add CONFIG_HAVE_ARCH_SECCOMP_FILTER
to its arch-specific Kconfig.
+
+
+
+Caveats
+-------
+
+The vDSO can cause some system calls to run entirely in userspace,
+leading to surprises when you run programs on different machines that
+fall back to real syscalls. To minimize these surprises on x86, make
+sure you test with
+/sys/devices/system/clocksource/clocksource0/current_clocksource set to
+something like acpi_pm.
+
+On x86-64, vsyscall emulation is enabled by default. (vsyscalls are
+legacy variants on vDSO calls.) Currently, emulated vsyscalls will honor seccomp, with a few oddities:
+
+- A return value of SECCOMP_RET_TRAP will set a si_call_addr pointing to
+ the vsyscall entry for the given call and not the address after the
+ 'syscall' instruction. Any code which wants to restart the call
+ should be aware that (a) a ret instruction has been emulated and (b)
+ trying to resume the syscall will again trigger the standard vsyscall
+ emulation security checks, making resuming the syscall mostly
+ pointless.
+
+- A return value of SECCOMP_RET_TRACE will signal the tracer as usual,
+ but the syscall may not be changed to another system call using the
+ orig_rax register. It may only be changed to -1 order to skip the
+ currently emulated call. Any other change MAY terminate the process.
+ The rip value seen by the tracer will be the syscall entry address;
+ this is different from normal behavior. The tracer MUST NOT modify
+ rip or rsp. (Do not rely on other changes terminating the process.
+ They might work. For example, on some kernels, choosing a syscall
+ that only exists in future kernels will be correctly emulated (by
+ returning -ENOSYS).
+
+To detect this quirky behavior, check for addr & ~0x0C00 ==
+0xFFFFFFFFFF600000. (For SECCOMP_RET_TRACE, use rip. For
+SECCOMP_RET_TRAP, use siginfo->si_call_addr.) Do not check any other
+condition: future kernels may improve vsyscall emulation and current
+kernels in vsyscall=native mode will behave differently, but the
+instructions at 0xF...F600{0,4,8,C}00 will not be system calls in these
+cases.
+
+Note that modern systems are unlikely to use vsyscalls at all -- they
+are a legacy feature and they are considerably slower than standard
+syscalls. New code will use the vDSO, and vDSO-issued system calls
+are indistinguishable from normal system calls.
diff --git a/Documentation/scsi/hptiop.txt b/Documentation/scsi/hptiop.txt
index 9605179711f..4a4f47e759c 100644
--- a/Documentation/scsi/hptiop.txt
+++ b/Documentation/scsi/hptiop.txt
@@ -37,7 +37,7 @@ For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
0x40 Inbound Queue Port
0x44 Outbound Queue Port
-For Marvell IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
+For Marvell not Frey IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
BAR0 offset Register
0x20400 Inbound Doorbell Register
@@ -55,9 +55,31 @@ For Marvell IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
0x40-0x1040 Inbound Queue
0x1040-0x2040 Outbound Queue
+For Marvell Frey IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
-I/O Request Workflow
-----------------------
+ BAR0 offset Register
+ 0x0 IOP configuration information.
+
+ BAR1 offset Register
+ 0x4000 Inbound List Base Address Low
+ 0x4004 Inbound List Base Address High
+ 0x4018 Inbound List Write Pointer
+ 0x402C Inbound List Configuration and Control
+ 0x4050 Outbound List Base Address Low
+ 0x4054 Outbound List Base Address High
+ 0x4058 Outbound List Copy Pointer Shadow Base Address Low
+ 0x405C Outbound List Copy Pointer Shadow Base Address High
+ 0x4088 Outbound List Interrupt Cause
+ 0x408C Outbound List Interrupt Enable
+ 0x1020C PCIe Function 0 Interrupt Enable
+ 0x10400 PCIe Function 0 to CPU Message A
+ 0x10420 CPU to PCIe Function 0 Message A
+ 0x10480 CPU to PCIe Function 0 Doorbell
+ 0x10484 CPU to PCIe Function 0 Doorbell Enable
+
+
+I/O Request Workflow of Not Marvell Frey
+------------------------------------------
All queued requests are handled via inbound/outbound queue port.
A request packet can be allocated in either IOP or host memory.
@@ -101,6 +123,45 @@ register 0. An outbound message with the same value indicates the completion
of an inbound message.
+I/O Request Workflow of Marvell Frey
+--------------------------------------
+
+All queued requests are handled via inbound/outbound list.
+
+To send a request to the controller:
+
+ - Allocate a free request in host DMA coherent memory.
+
+ Requests allocated in host memory must be aligned on 32-bytes boundary.
+
+ - Fill the request with index of the request in the flag.
+
+ Fill a free inbound list unit with the physical address and the size of
+ the request.
+
+ Set up the inbound list write pointer with the index of previous unit,
+ round to 0 if the index reaches the supported count of requests.
+
+ - Post the inbound list writer pointer to IOP.
+
+ - The IOP process the request. When the request is completed, the flag of
+ the request with or-ed IOPMU_QUEUE_MASK_HOST_BITS will be put into a
+ free outbound list unit and the index of the outbound list unit will be
+ put into the copy pointer shadow register. An outbound interrupt will be
+ generated.
+
+ - The host read the outbound list copy pointer shadow register and compare
+ with previous saved read ponter N. If they are different, the host will
+ read the (N+1)th outbound list unit.
+
+ The host get the index of the request from the (N+1)th outbound list
+ unit and complete the request.
+
+Non-queued requests (reset communication/reset/flush etc) can be sent via PCIe
+Function 0 to CPU Message A register. The CPU to PCIe Function 0 Message register
+with the same value indicates the completion of message.
+
+
User-level Interface
---------------------
@@ -112,7 +173,7 @@ The driver exposes following sysfs attributes:
-----------------------------------------------------------------------------
-Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
+Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
index eeed1de546d..414235c1fcf 100644
--- a/Documentation/security/00-INDEX
+++ b/Documentation/security/00-INDEX
@@ -12,6 +12,8 @@ apparmor.txt
- documentation on the AppArmor security extension.
credentials.txt
- documentation about credentials in Linux.
+keys-ecryptfs.txt
+ - description of the encryption keys for the ecryptfs filesystem.
keys-request-key.txt
- description of the kernel key request service.
keys-trusted-encrypted.txt
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 7d9ca92022d..7b4145d0045 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -994,6 +994,23 @@ payload contents" for more information.
reference pointer if successful.
+(*) A keyring can be created by:
+
+ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+ const struct cred *cred,
+ key_perm_t perm,
+ unsigned long flags,
+ struct key *dest);
+
+ This creates a keyring with the given attributes and returns it. If dest
+ is not NULL, the new keyring will be linked into the keyring to which it
+ points. No permission checks are made upon the destination keyring.
+
+ Error EDQUOT can be returned if the keyring would overload the quota (pass
+ KEY_ALLOC_NOT_IN_QUOTA in flags if the keyring shouldn't be accounted
+ towards the user's quota). Error ENOMEM can also be returned.
+
+
(*) To check the validity of a key, this function can be called:
int validate_key(struct key *key);
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index d90d8ec2853..b9cfd339a6f 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -1905,7 +1905,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
vid - Vendor ID for the device (optional)
pid - Product ID for the device (optional)
nrpacks - Max. number of packets per URB (default: 8)
- async_unlink - Use async unlink mode (default: yes)
device_setup - Device specific magic number (optional)
- Influence depends on the device
- Default: 0x0000
@@ -1917,8 +1916,6 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
NB: nrpacks parameter can be modified dynamically via sysfs.
Don't put the value over 20. Changing via sysfs has no sanity
check.
- NB: async_unlink=0 would cause Oops. It remains just for
- debugging purpose (if any).
NB: ignore_ctl_error=1 may help when you get an error at accessing
the mixer element such as URB error -22. This happens on some
buggy USB device or the controller.
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index 4909d411635..eceab1308a8 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -49,6 +49,24 @@ be generated without __CHECK_ENDIAN__.
__bitwise - noisy stuff; in particular, __le*/__be* are that. We really
don't want to drown in noise unless we'd explicitly asked for it.
+Using sparse for lock checking
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following macros are undefined for gcc and defined during a sparse
+run to use the "context" tracking feature of sparse, applied to
+locking. These annotations tell sparse when a lock is held, with
+regard to the annotated function's entry and exit.
+
+__must_hold - The specified lock is held on function entry and exit.
+
+__acquires - The specified lock is held on function exit, but not entry.
+
+__releases - The specified lock is held on function entry, but not exit.
+
+If the function enters and exits without the lock held, acquiring and
+releasing the lock inside the function in a balanced way, no
+annotation is needed. The tree annotations above are for cases where
+sparse would otherwise report a context imbalance.
Getting sparse
~~~~~~~~~~~~~~
diff --git a/Documentation/video4linux/bttv/Cards b/Documentation/video4linux/bttv/Cards
index db833ced2cb..a8fb6e2d3c8 100644
--- a/Documentation/video4linux/bttv/Cards
+++ b/Documentation/video4linux/bttv/Cards
@@ -43,7 +43,7 @@ Very nice card if you only have satellite TV but several tuners connected
to the card via composite.
Many thanks to Matrix-Vision for giving us 2 cards for free which made
-Bt848a/Bt849 single crytal operation support possible!!!
+Bt848a/Bt849 single crystal operation support possible!!!
diff --git a/Documentation/video4linux/bttv/Sound-FAQ b/Documentation/video4linux/bttv/Sound-FAQ
index 395f6c6fdd9..d3f1d7783d1 100644
--- a/Documentation/video4linux/bttv/Sound-FAQ
+++ b/Documentation/video4linux/bttv/Sound-FAQ
@@ -82,7 +82,7 @@ card installed, you might to check out if you can read these registers
values used by the windows driver. A tool to do this is available
from ftp://telepresence.dmem.strath.ac.uk/pub/bt848/winutil, but it
does'nt work with bt878 boards according to some reports I received.
-Another one with bt878 suport is available from
+Another one with bt878 support is available from
http://btwincap.sourceforge.net/Files/btspy2.00.zip
You might also dig around in the *.ini files of the Windows applications.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index f6ec3a92e62..a4df5535996 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1194,12 +1194,15 @@ struct kvm_ppc_pvinfo {
This ioctl fetches PV specific information that need to be passed to the guest
using the device tree or other means from vm context.
-For now the only implemented piece of information distributed here is an array
-of 4 instructions that make up a hypercall.
+The hcall array defines 4 instructions that make up a hypercall.
If any additional field gets added to this structure later on, a bit for that
additional piece of information will be set in the flags bitmap.
+The flags bitmap is defined as:
+
+ /* the host supports the ePAPR idle hcall
+ #define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
4.48 KVM_ASSIGN_PCI_DEVICE
@@ -1731,7 +1734,46 @@ registers, find a list below:
Arch | Register | Width (bits)
| |
PPC | KVM_REG_PPC_HIOR | 64
-
+ PPC | KVM_REG_PPC_IAC1 | 64
+ PPC | KVM_REG_PPC_IAC2 | 64
+ PPC | KVM_REG_PPC_IAC3 | 64
+ PPC | KVM_REG_PPC_IAC4 | 64
+ PPC | KVM_REG_PPC_DAC1 | 64
+ PPC | KVM_REG_PPC_DAC2 | 64
+ PPC | KVM_REG_PPC_DABR | 64
+ PPC | KVM_REG_PPC_DSCR | 64
+ PPC | KVM_REG_PPC_PURR | 64
+ PPC | KVM_REG_PPC_SPURR | 64
+ PPC | KVM_REG_PPC_DAR | 64
+ PPC | KVM_REG_PPC_DSISR | 32
+ PPC | KVM_REG_PPC_AMR | 64
+ PPC | KVM_REG_PPC_UAMOR | 64
+ PPC | KVM_REG_PPC_MMCR0 | 64
+ PPC | KVM_REG_PPC_MMCR1 | 64
+ PPC | KVM_REG_PPC_MMCRA | 64
+ PPC | KVM_REG_PPC_PMC1 | 32
+ PPC | KVM_REG_PPC_PMC2 | 32
+ PPC | KVM_REG_PPC_PMC3 | 32
+ PPC | KVM_REG_PPC_PMC4 | 32
+ PPC | KVM_REG_PPC_PMC5 | 32
+ PPC | KVM_REG_PPC_PMC6 | 32
+ PPC | KVM_REG_PPC_PMC7 | 32
+ PPC | KVM_REG_PPC_PMC8 | 32
+ PPC | KVM_REG_PPC_FPR0 | 64
+ ...
+ PPC | KVM_REG_PPC_FPR31 | 64
+ PPC | KVM_REG_PPC_VR0 | 128
+ ...
+ PPC | KVM_REG_PPC_VR31 | 128
+ PPC | KVM_REG_PPC_VSR0 | 128
+ ...
+ PPC | KVM_REG_PPC_VSR31 | 128
+ PPC | KVM_REG_PPC_FPSCR | 64
+ PPC | KVM_REG_PPC_VSCR | 32
+ PPC | KVM_REG_PPC_VPA_ADDR | 64
+ PPC | KVM_REG_PPC_VPA_SLB | 128
+ PPC | KVM_REG_PPC_VPA_DTL | 128
+ PPC | KVM_REG_PPC_EPCR | 32
4.69 KVM_GET_ONE_REG
@@ -1747,7 +1789,7 @@ kvm_one_reg struct passed in. On success, the register value can be found
at the memory location pointed to by "addr".
The list of registers accessible using this interface is identical to the
-list in 4.64.
+list in 4.68.
4.70 KVM_KVMCLOCK_CTRL
@@ -1997,6 +2039,93 @@ return the hash table order in the parameter. (If the guest is using
the virtualized real-mode area (VRMA) facility, the kernel will
re-create the VMRA HPTEs on the next KVM_RUN of any vcpu.)
+4.77 KVM_S390_INTERRUPT
+
+Capability: basic
+Architectures: s390
+Type: vm ioctl, vcpu ioctl
+Parameters: struct kvm_s390_interrupt (in)
+Returns: 0 on success, -1 on error
+
+Allows to inject an interrupt to the guest. Interrupts can be floating
+(vm ioctl) or per cpu (vcpu ioctl), depending on the interrupt type.
+
+Interrupt parameters are passed via kvm_s390_interrupt:
+
+struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+};
+
+type can be one of the following:
+
+KVM_S390_SIGP_STOP (vcpu) - sigp restart
+KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm
+KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm
+KVM_S390_RESTART (vcpu) - restart
+KVM_S390_INT_VIRTIO (vm) - virtio external interrupt; external interrupt
+ parameters in parm and parm64
+KVM_S390_INT_SERVICE (vm) - sclp external interrupt; sclp parameter in parm
+KVM_S390_INT_EMERGENCY (vcpu) - sigp emergency; source cpu in parm
+KVM_S390_INT_EXTERNAL_CALL (vcpu) - sigp external call; source cpu in parm
+
+Note that the vcpu ioctl is asynchronous to vcpu execution.
+
+4.78 KVM_PPC_GET_HTAB_FD
+
+Capability: KVM_CAP_PPC_HTAB_FD
+Architectures: powerpc
+Type: vm ioctl
+Parameters: Pointer to struct kvm_get_htab_fd (in)
+Returns: file descriptor number (>= 0) on success, -1 on error
+
+This returns a file descriptor that can be used either to read out the
+entries in the guest's hashed page table (HPT), or to write entries to
+initialize the HPT. The returned fd can only be written to if the
+KVM_GET_HTAB_WRITE bit is set in the flags field of the argument, and
+can only be read if that bit is clear. The argument struct looks like
+this:
+
+/* For KVM_PPC_GET_HTAB_FD */
+struct kvm_get_htab_fd {
+ __u64 flags;
+ __u64 start_index;
+ __u64 reserved[2];
+};
+
+/* Values for kvm_get_htab_fd.flags */
+#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
+#define KVM_GET_HTAB_WRITE ((__u64)0x2)
+
+The `start_index' field gives the index in the HPT of the entry at
+which to start reading. It is ignored when writing.
+
+Reads on the fd will initially supply information about all
+"interesting" HPT entries. Interesting entries are those with the
+bolted bit set, if the KVM_GET_HTAB_BOLTED_ONLY bit is set, otherwise
+all entries. When the end of the HPT is reached, the read() will
+return. If read() is called again on the fd, it will start again from
+the beginning of the HPT, but will only return HPT entries that have
+changed since they were last read.
+
+Data read or written is structured as a header (8 bytes) followed by a
+series of valid HPT entries (16 bytes) each. The header indicates how
+many valid HPT entries there are and how many invalid entries follow
+the valid entries. The invalid entries are not represented explicitly
+in the stream. The header format is:
+
+struct kvm_get_htab_header {
+ __u32 index;
+ __u16 n_valid;
+ __u16 n_invalid;
+};
+
+Writes to the fd create HPT entries starting at the index given in the
+header; first `n_valid' valid entries with contents from the data
+written, then `n_invalid' invalid entries, invalidating any previously
+valid entries found.
+
5. The kvm_run structure
------------------------
@@ -2109,7 +2238,8 @@ executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
-NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO and KVM_EXIT_OSI, the corresponding
+NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR
+ and KVM_EXIT_PAPR the corresponding
operations are complete (and guest state is consistent) only after userspace
has re-entered the kernel with KVM_RUN. The kernel side will first finish
incomplete operations and then check for pending signals. Userspace
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt
index 5ef2d136642..c71a019be60 100644
--- a/Documentation/vm/frontswap.txt
+++ b/Documentation/vm/frontswap.txt
@@ -193,7 +193,7 @@ faster.
or maybe swap-over-nbd/NFS)?
No. First, the existing swap subsystem doesn't allow for any kind of
-swap hierarchy. Perhaps it could be rewritten to accomodate a hierarchy,
+swap hierarchy. Perhaps it could be rewritten to accommodate a hierarchy,
but this would require fairly drastic changes. Even if it were
rewritten, the existing swap subsystem uses the block I/O layer which
assumes a swap device is fixed size and any page in it is linearly
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index f734bb2a78d..8785fb87d9c 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -116,6 +116,13 @@ echo always >/sys/kernel/mm/transparent_hugepage/defrag
echo madvise >/sys/kernel/mm/transparent_hugepage/defrag
echo never >/sys/kernel/mm/transparent_hugepage/defrag
+By default kernel tries to use huge zero page on read page fault.
+It's possible to disable huge zero page by writing 0 or enable it
+back by writing 1:
+
+echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/use_zero_page
+echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/use_zero_page
+
khugepaged will be automatically started when
transparent_hugepage/enabled is set to "always" or "madvise, and it'll
be automatically shutdown if it's set to "never".
@@ -197,6 +204,14 @@ thp_split is incremented every time a huge page is split into base
pages. This can happen for a variety of reasons but a common
reason is that a huge page is old and is being reclaimed.
+thp_zero_page_alloc is incremented every time a huge zero page is
+ successfully allocated. It includes allocations which where
+ dropped due race with other allocation. Note, it doesn't count
+ every map of the huge zero page, only its allocation.
+
+thp_zero_page_alloc_failed is incremented if kernel fails to allocate
+ huge zero page and falls back to using small pages.
+
As the system ages, allocating huge pages may be expensive as the
system uses memory compaction to copy data around memory to free a
huge page for use. There are some counters in /proc/vmstat to help
@@ -276,7 +291,7 @@ unaffected. libhugetlbfs will also work fine as usual.
== Graceful fallback ==
Code walking pagetables but unware about huge pmds can simply call
-split_huge_page_pmd(mm, pmd) where the pmd is the one returned by
+split_huge_page_pmd(vma, addr, pmd) where the pmd is the one returned by
pmd_offset. It's trivial to make the code transparent hugepage aware
by just grepping for "pmd_offset" and adding split_huge_page_pmd where
missing after pmd_offset returns the pmd. Thanks to the graceful
@@ -299,7 +314,7 @@ diff --git a/mm/mremap.c b/mm/mremap.c
return NULL;
pmd = pmd_offset(pud, addr);
-+ split_huge_page_pmd(mm, pmd);
++ split_huge_page_pmd(vma, addr, pmd);
if (pmd_none_or_clear_bad(pmd))
return NULL;
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index f15cb74c4f7..406d82d5d2b 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -373,7 +373,7 @@ Protocol: 2.00+
1 Loadlin
2 bootsect-loader (0x20, all other values reserved)
3 Syslinux
- 4 Etherboot/gPXE
+ 4 Etherboot/gPXE/iPXE
5 ELILO
7 GRUB
8 U-Boot
@@ -381,6 +381,7 @@ Protocol: 2.00+
A Gujin
B Qemu
C Arcturus Networks uCbootloader
+ D kexec-tools
E Extended (see ext_loader_type)
F Special (0xFF = undefined)
10 Reserved
diff --git a/Documentation/xtensa/atomctl.txt b/Documentation/xtensa/atomctl.txt
new file mode 100644
index 00000000000..10a8d1ff35e
--- /dev/null
+++ b/Documentation/xtensa/atomctl.txt
@@ -0,0 +1,44 @@
+We Have Atomic Operation Control (ATOMCTL) Register.
+This register determines the effect of using a S32C1I instruction
+with various combinations of:
+
+ 1. With and without an Coherent Cache Controller which
+ can do Atomic Transactions to the memory internally.
+
+ 2. With and without An Intelligent Memory Controller which
+ can do Atomic Transactions itself.
+
+The Core comes up with a default value of for the three types of cache ops:
+
+ 0x28: (WB: Internal, WT: Internal, BY:Exception)
+
+On the FPGA Cards we typically simulate an Intelligent Memory controller
+which can implement RCW transactions. For FPGA cards with an External
+Memory controller we let it to the atomic operations internally while
+doing a Cached (WB) transaction and use the Memory RCW for un-cached
+operations.
+
+For systems without an coherent cache controller, non-MX, we always
+use the memory controllers RCW, thought non-MX controlers likely
+support the Internal Operation.
+
+CUSTOMER-WARNING:
+ Virtually all customers buy their memory controllers from vendors that
+ don't support atomic RCW memory transactions and will likely want to
+ configure this register to not use RCW.
+
+Developers might find using RCW in Bypass mode convenient when testing
+with the cache being bypassed; for example studying cache alias problems.
+
+See Section 4.3.12.4 of ISA; Bits:
+
+ WB WT BY
+ 5 4 | 3 2 | 1 0
+ 2 Bit
+ Field
+ Values WB - Write Back WT - Write Thru BY - Bypass
+--------- --------------- ----------------- ----------------
+ 0 Exception Exception Exception
+ 1 RCW Transaction RCW Transaction RCW Transaction
+ 2 Internal Operation Exception Reserved
+ 3 Reserved Reserved Reserved
diff --git a/MAINTAINERS b/MAINTAINERS
index 0bc485c4a88..fa309ab7ccb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -337,6 +337,13 @@ W: http://wireless.kernel.org/
S: Orphan
F: drivers/net/wireless/adm8211.*
+ADP1653 FLASH CONTROLLER DRIVER
+M: Sakari Ailus <sakari.ailus@iki.fi>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/adp1653.c
+F: include/media/adp1653.h
+
ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
M: Michael Hennerich <michael.hennerich@analog.com>
L: device-drivers-devel@blackfin.uclinux.org
@@ -1273,7 +1280,7 @@ F: Documentation/hwmon/asc7621
F: drivers/hwmon/asc7621.c
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
-M: Corentin Chary <corentincj@iksaif.net>
+M: Corentin Chary <corentin.chary@gmail.com>
L: acpi4asus-user@lists.sourceforge.net
L: platform-driver-x86@vger.kernel.org
W: http://acpi4asus.sf.net
@@ -1494,6 +1501,14 @@ F: include/linux/ax25.h
F: include/net/ax25.h
F: net/ax25/
+AZ6007 DVB DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/usb/dvb-usb-v2/az6007.c
+
B43 WIRELESS DRIVER
M: Stefano Brivio <stefano.brivio@polimi.it>
L: linux-wireless@vger.kernel.org
@@ -1745,11 +1760,11 @@ F: Documentation/filesystems/btrfs.txt
F: fs/btrfs/
BTTV VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@infradead.org>
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
-S: Maintained
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
F: Documentation/video4linux/bttv/
F: drivers/media/pci/bt8xx/bttv*
@@ -1778,7 +1793,7 @@ F: fs/cachefiles/
CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
M: Jonathan Corbet <corbet@lwn.net>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: Documentation/video4linux/cafe_ccic
F: drivers/media/platform/marvell-ccic/
@@ -1914,7 +1929,7 @@ F: scripts/checkpatch.pl
CHINESE DOCUMENTATION
M: Harry Wei <harryxiyou@gmail.com>
-L: xiyoulinuxkernelgroup@googlegroups.com
+L: xiyoulinuxkernelgroup@googlegroups.com (subscribers-only)
L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
S: Maintained
F: Documentation/zh_CN/
@@ -2164,12 +2179,22 @@ CX18 VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers)
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
W: http://www.ivtvdriver.org/index.php/Cx18
S: Maintained
F: Documentation/video4linux/cx18.txt
F: drivers/media/pci/cx18/
+F: include/uapi/linux/ivtv*
+
+CX88 VIDEO4LINUX DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
+F: Documentation/video4linux/cx88/
+F: drivers/media/pci/cx88/
CXD2820R MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
@@ -2524,6 +2549,15 @@ S: Supported
F: drivers/gpu/drm/exynos
F: include/drm/exynos*
+DRM DRIVERS FOR NVIDIA TEGRA
+M: Thierry Reding <thierry.reding@avionic-design.de>
+L: dri-devel@lists.freedesktop.org
+L: linux-tegra@vger.kernel.org
+T: git git://gitorious.org/thierryreding/linux.git
+S: Maintained
+F: drivers/gpu/drm/tegra/
+F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+
DSCC4 DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -2726,6 +2760,15 @@ W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/amd64_edac*
+EDAC-CAVIUM
+M: Ralf Baechle <ralf@linux-mips.org>
+M: David Daney <david.daney@cavium.com>
+L: linux-edac@vger.kernel.org
+L: linux-mips@linux-mips.org
+W: bluesmoke.sourceforge.net
+S: Supported
+F: drivers/edac/octeon_edac*
+
EDAC-E752X
M: Mark Gross <mark.gross@intel.com>
M: Doug Thompson <dougthompson@xmission.com>
@@ -2856,6 +2899,14 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ibm/ehea/
+EM28XX VIDEO4LINUX DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/usb/em28xx/
+
EMBEDDED LINUX
M: Paul Gortmaker <paul.gortmaker@windriver.com>
M: Matt Mackall <mpm@selenic.com>
@@ -2931,7 +2982,6 @@ L: linux-ext4@vger.kernel.org
S: Maintained
F: Documentation/filesystems/ext3.txt
F: fs/ext3/
-F: include/linux/ext3*
EXT4 FILE SYSTEM
M: "Theodore Ts'o" <tytso@mit.edu>
@@ -3054,6 +3104,14 @@ T: git git://git.alsa-project.org/alsa-kernel.git
S: Maintained
F: sound/firewire/
+FIREWIRE MEDIA DRIVERS (firedtv)
+M: Stefan Richter <stefanr@s5r6.in-berlin.de>
+L: linux-media@vger.kernel.org
+L: linux1394-devel@lists.sourceforge.net
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+S: Maintained
+F: drivers/media/firewire/
+
FIREWIRE SBP-2 TARGET
M: Chris Boot <bootc@bootc.net>
L: linux-scsi@vger.kernel.org
@@ -3070,7 +3128,8 @@ W: http://ieee1394.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
S: Maintained
F: drivers/firewire/
-F: include/linux/firewire*.h
+F: include/linux/firewire.h
+F: include/uapi/linux/firewire*.h
F: tools/firewire/
FIRMWARE LOADER (request_firmware)
@@ -3112,6 +3171,12 @@ F: drivers/video/
F: include/video/
F: include/linux/fb.h
+FREESCALE DIU FRAMEBUFFER DRIVER
+M: Timur Tabi <timur@freescale.com>
+L: linux-fbdev@vger.kernel.org
+S: Supported
+F: drivers/video/fsl-diu-fb.*
+
FREESCALE DMA DRIVER
M: Li Yang <leoli@freescale.com>
M: Zhang Wei <zw@zh-kernel.org>
@@ -3340,56 +3405,56 @@ F: drivers/net/ethernet/aeroflex/
GSPCA FINEPIX SUBDRIVER
M: Frank Zago <frank@zago.net>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/finepix.c
GSPCA GL860 SUBDRIVER
M: Olivier Lorin <o.lorin@laposte.net>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/gl860/
GSPCA M5602 SUBDRIVER
M: Erik Andren <erik.andren@gmail.com>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/m5602/
GSPCA PAC207 SONIXB SUBDRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/pac207.c
GSPCA SN9C20X SUBDRIVER
M: Brian Johnson <brijohn@gmail.com>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/sn9c20x.c
GSPCA T613 SUBDRIVER
M: Leandro Costantino <lcostantino@gmail.com>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/t613.c
GSPCA USB WEBCAM DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/
STK1160 USB VIDEO CAPTURE DRIVER
M: Ezequiel Garcia <elezegarcia@gmail.com>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/stk1160/
@@ -3596,7 +3661,7 @@ S: Maintained
F: drivers/input/touchscreen/htcpen.c
HUGETLB FILESYSTEM
-M: William Irwin <wli@holomorphy.com>
+M: Nadia Yvette Chambers <nyc@holomorphy.com>
S: Maintained
F: fs/hugetlbfs/
@@ -3656,7 +3721,7 @@ I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: linux-i2c@vger.kernel.org
S: Maintained
-F: drivers/i2c/busses/i2c-stub.c
+F: drivers/i2c/i2c-stub.c
I2C SUBSYSTEM
M: Wolfram Sang <w.sang@pengutronix.de>
@@ -3787,6 +3852,12 @@ F: net/ieee802154/
F: net/mac802154/
F: drivers/ieee802154/
+IGUANAWORKS USB IR TRANSCEIVER
+M: Sean Young <sean@mess.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/rc/iguanair.c
+
IIO SUBSYSTEM AND DRIVERS
M: Jonathan Cameron <jic23@cam.ac.uk>
L: linux-iio@vger.kernel.org
@@ -4172,17 +4243,41 @@ S: Maintained
F: Documentation/hwmon/it87
F: drivers/hwmon/it87.c
+IT913X MEDIA DRIVER
+M: Malcolm Priestley <tvboxspy@gmail.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+S: Maintained
+F: drivers/media/usb/dvb-usb-v2/it913x*
+
+IT913X FE MEDIA DRIVER
+M: Malcolm Priestley <tvboxspy@gmail.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+S: Maintained
+F: drivers/media/dvb-frontends/it913x-fe*
+
IVTV VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers)
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
W: http://www.ivtvdriver.org
S: Maintained
F: Documentation/video4linux/*.ivtv
F: drivers/media/pci/ivtv/
F: include/linux/ivtv*
+IX2505V MEDIA DRIVER
+M: Malcolm Priestley <tvboxspy@gmail.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+S: Maintained
+F: drivers/media/dvb-frontends/ix2505v*
+
JC42.4 TEMPERATURE SENSOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: lm-sensors@lm-sensors.org
@@ -4219,7 +4314,6 @@ M: Jan Kara <jack@suse.cz>
L: linux-ext4@vger.kernel.org
S: Maintained
F: fs/jbd/
-F: include/linux/ext3_jbd.h
F: include/linux/jbd.h
JOURNALLING LAYER FOR BLOCK DEVICES (JBD2)
@@ -4314,10 +4408,10 @@ F: include/linux/kvm*
F: virt/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
-M: Joerg Roedel <joerg.roedel@amd.com>
+M: Joerg Roedel <joro@8bytes.org>
L: kvm@vger.kernel.org
W: http://kvm.qumranet.com
-S: Supported
+S: Maintained
F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
@@ -4325,6 +4419,7 @@ KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
M: Alexander Graf <agraf@suse.de>
L: kvm-ppc@vger.kernel.org
W: http://kvm.qumranet.com
+T: git git://github.com/agraf/linux-2.6.git
S: Supported
F: arch/powerpc/include/asm/kvm*
F: arch/powerpc/kvm/
@@ -4627,6 +4722,14 @@ S: Maintained
F: Documentation/hwmon/lm90
F: drivers/hwmon/lm90.c
+LME2510 MEDIA DRIVER
+M: Malcolm Priestley <tvboxspy@gmail.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+S: Maintained
+F: drivers/media/usb/dvb-usb-v2/lmedm04*
+
LOCKDEP AND LOCKSTAT
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
@@ -4654,13 +4757,16 @@ S: Maintained
F: fs/logfs/
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
-M: Eric Moore <Eric.Moore@lsi.com>
+M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
+M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
M: support@lsi.com
L: DL-MPTFusionLinux@lsi.com
L: linux-scsi@vger.kernel.org
W: http://www.lsilogic.com/support
S: Supported
F: drivers/message/fusion/
+F: drivers/scsi/mpt2sas/
+F: drivers/scsi/mpt3sas/
LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers
M: Matthew Wilcox <matthew@wil.cx>
@@ -4717,6 +4823,14 @@ W: http://www.tazenda.demon.co.uk/phil/linux-hp
S: Maintained
F: arch/m68k/hp300/
+M88RS2000 MEDIA DRIVER
+M: Malcolm Priestley <tvboxspy@gmail.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+S: Maintained
+F: drivers/media/dvb-frontends/m88rs2000*
+
MAC80211
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
@@ -4771,6 +4885,12 @@ S: Maintained
F: drivers/net/ethernet/marvell/mv643xx_eth.*
F: include/linux/mv643xx.h
+MARVELL MVNETA ETHERNET DRIVER
+M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/marvell/mvneta.*
+
MARVELL MWIFIEX WIRELESS DRIVER
M: Bing Zhao <bzhao@marvell.com>
L: linux-wireless@vger.kernel.org
@@ -4809,12 +4929,12 @@ F: Documentation/hwmon/max6650
F: drivers/hwmon/max6650.c
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M: Mauro Carvalho Chehab <mchehab@infradead.org>
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
P: LinuxTV.org Project
L: linux-media@vger.kernel.org
W: http://linuxtv.org
Q: http://patchwork.kernel.org/project/linux-media/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: Documentation/dvb/
F: Documentation/video4linux/
@@ -4822,8 +4942,13 @@ F: Documentation/DocBook/media/
F: drivers/media/
F: drivers/staging/media/
F: include/media/
-F: include/linux/dvb/
-F: include/linux/videodev*.h
+F: include/uapi/linux/dvb/
+F: include/uapi/linux/videodev2.h
+F: include/uapi/linux/media.h
+F: include/uapi/linux/v4l2-*
+F: include/uapi/linux/meye.h
+F: include/uapi/linux/ivtv*
+F: include/uapi/linux/uvcvideo.h
MEGARAID SCSI DRIVERS
M: Neela Syam Kolli <megaraidlinux@lsi.com>
@@ -4905,7 +5030,7 @@ W: http://popies.net/meye/
S: Orphan
F: Documentation/video4linux/meye.txt
F: drivers/media/pci/meye/
-F: include/linux/meye.h
+F: include/uapi/linux/meye.h
MOTOROLA IMX MMC/SD HOST CONTROLLER INTERFACE DRIVER
M: Pavel Pisa <ppisa@pikron.com>
@@ -4919,6 +5044,13 @@ S: Maintained
F: Documentation/serial/moxa-smartio
F: drivers/tty/mxser.*
+MR800 AVERMEDIA USB FM RADIO DRIVER
+M: Alexey Klimov <klimov.linux@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/radio-mr800.c
+
MSI LAPTOP SUPPORT
M: "Lee, Chun-Yi" <jlee@novell.com>
L: platform-driver-x86@vger.kernel.org
@@ -5253,6 +5385,15 @@ F: arch/arm/*omap*/
F: drivers/i2c/busses/i2c-omap.c
F: include/linux/i2c-omap.h
+OMAP DEVICE TREE SUPPORT
+M: Benoît Cousson <b-cousson@ti.com>
+M: Tony Lindgren <tony@atomide.com>
+L: linux-omap@vger.kernel.org
+L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/boot/dts/*omap*
+F: arch/arm/boot/dts/*am3*
+
OMAP CLOCK FRAMEWORK SUPPORT
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
@@ -5381,7 +5522,7 @@ F: drivers/char/pcmcia/cm4040_cs.*
OMNIVISION OV7670 SENSOR DRIVER
M: Jonathan Corbet <corbet@lwn.net>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/ov7670.c
@@ -5930,11 +6071,18 @@ M: Mike Isely <isely@pobox.com>
L: pvrusb2@isely.net (subscribers-only)
L: linux-media@vger.kernel.org
W: http://www.isely.net/pvrusb2/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: Documentation/video4linux/README.pvrusb2
F: drivers/media/usb/pvrusb2/
+PWC WEBCAM DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/usb/pwc/*
+
PWM SUBSYSTEM
M: Thierry Reding <thierry.reding@avionic-design.de>
L: linux-kernel@vger.kernel.org
@@ -6077,6 +6225,21 @@ S: Maintained
F: drivers/video/aty/radeon*
F: include/linux/radeonfb.h
+RADIOSHARK RADIO DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/radio-shark.c
+
+RADIOSHARK2 RADIO DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/radio-shark2.c
+F: drivers/media/radio/radio-tea5777.c
+
RAGE128 FRAMEBUFFER DISPLAY DRIVER
M: Paul Mackerras <paulus@samba.org>
L: linux-fbdev@vger.kernel.org
@@ -6317,10 +6480,19 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/mmc/host/s3cmci.*
+SAA7134 VIDEO4LINUX DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
+F: Documentation/video4linux/saa7134/
+F: drivers/media/pci/saa7134/
+
SAA7146 VIDEO4LINUX-2 DRIVER
M: Michael Hunold <michael@mihu.de>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
W: http://www.mihu.de/linux/saa7146
S: Maintained
F: drivers/media/common/saa7146/
@@ -6328,7 +6500,7 @@ F: drivers/media/pci/saa7146/
F: include/media/saa7146*
SAMSUNG LAPTOP DRIVER
-M: Corentin Chary <corentincj@iksaif.net>
+M: Corentin Chary <corentin.chary@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/samsung-laptop.c
@@ -6355,6 +6527,14 @@ F: drivers/regulator/s5m*.c
F: drivers/rtc/rtc-sec.c
F: include/linux/mfd/samsung/
+SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER
+M: Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+L: linux-media@vger.kernel.org
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/media/platform/s3c-camif/
+F: include/media/s3c_camif.h
+
SERIAL DRIVERS
M: Alan Cox <alan@linux.intel.com>
L: linux-serial@vger.kernel.org
@@ -6649,6 +6829,18 @@ S: Supported
F: arch/arm/mach-davinci
F: drivers/i2c/busses/i2c-davinci.c
+TI DAVINCI SERIES MEDIA DRIVER
+M: Manjunath Hadli <manjunath.hadli@ti.com>
+M: Prabhakar Lad <prabhakar.lad@ti.com>
+L: linux-media@vger.kernel.org
+L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
+S: Supported
+F: drivers/media/platform/davinci/
+F: include/media/davinci/
+
SIS 190 ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -6715,6 +6907,15 @@ M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
F: drivers/net/ethernet/smsc/smc91x.*
+SMIA AND SMIA++ IMAGE SENSOR DRIVER
+M: Sakari Ailus <sakari.ailus@iki.fi>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/smiapp
+F: include/media/smiapp.h
+F: drivers/media/i2c/smiapp-pll.c
+F: drivers/media/i2c/smiapp-pll.h
+
SMM665 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: lm-sensors@lm-sensors.org
@@ -6773,7 +6974,7 @@ F: arch/ia64/sn/
SOC-CAMERA V4L2 SUBSYSTEM
M: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
S: Maintained
F: include/media/soc*
F: drivers/media/i2c/soc_camera/
@@ -7263,6 +7464,22 @@ T: git git://linuxtv.org/mkrufky/tuners.git
S: Maintained
F: drivers/media/tuners/tda8290.*
+TEA5761 TUNER DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
+F: drivers/media/tuners/tea5761.*
+
+TEA5767 TUNER DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/tuners/tea5767.*
+
TEAM DRIVER
M: Jiri Pirko <jpirko@redhat.com>
L: netdev@vger.kernel.org
@@ -7270,6 +7487,12 @@ S: Supported
F: drivers/net/team/
F: include/linux/if_team.h
+TECHNOTREND USB IR RECEIVER
+M: Sean Young <sean@mess.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/rc/ttusbir.c
+
TEGRA SUPPORT
M: Stephen Warren <swarren@wwwdotorg.org>
L: linux-tegra@vger.kernel.org
@@ -7331,6 +7554,13 @@ S: Maintained
F: sound/soc/codecs/lm49453*
F: sound/soc/codecs/isabelle*
+TI LP855x BACKLIGHT DRIVER
+M: Milo Kim <milo.kim@ti.com>
+S: Maintained
+F: Documentation/backlight/lp855x-driver.txt
+F: drivers/video/backlight/lp855x_bl.c
+F: include/linux/platform_data/lp855x.h
+
TI TWL4030 SERIES SOC CODEC DRIVER
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -7422,6 +7652,14 @@ S: Maintained
F: include/linux/shmem_fs.h
F: mm/shmem.c
+TM6000 VIDEO4LINUX DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
+F: drivers/media/usb/tm6000/
+
TPM DEVICE DRIVER
M: Kent Yoder <key@linux.vnet.ibm.com>
M: Rajiv Andrade <mail@srajiv.net>
@@ -7816,7 +8054,7 @@ USB SN9C1xx DRIVER
M: Luca Risolia <luca.risolia@studio.unibo.it>
L: linux-usb@vger.kernel.org
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
W: http://www.linux-projects.org
S: Maintained
F: Documentation/video4linux/sn9c102.txt
@@ -7852,10 +8090,11 @@ USB VIDEO CLASS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-uvc-devel@lists.sourceforge.net (subscribers-only)
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
W: http://www.ideasonboard.org/uvc/
S: Maintained
F: drivers/media/usb/uvc/
+F: include/uapi/linux/uvcvideo.h
USB WEBCAM GADGET
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
@@ -7887,7 +8126,7 @@ USB ZR364XX DRIVER
M: Antoine Jacquet <royale@zerezo.com>
L: linux-usb@vger.kernel.org
L: linux-media@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
+T: git git://linuxtv.org/media_tree.git
W: http://royale.zerezo.com/zr364xx/
S: Maintained
F: Documentation/video4linux/zr364xx.txt
@@ -8242,6 +8481,14 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/mcheck/*
+XC2028/3028 TUNER DRIVER
+M: Mauro Carvalho Chehab <mchehab@redhat.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/tuners/tuner-xc2028.*
+
XEN HYPERVISOR INTERFACE
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
M: Jeremy Fitzhardinge <jeremy@goop.org>
diff --git a/Makefile b/Makefile
index 540f7b240c7..80c5694b29f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
-PATCHLEVEL = 7
+PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc2
NAME = Terrified Chipmunk
# *DOCUMENTATION*
@@ -124,7 +124,7 @@ $(if $(KBUILD_OUTPUT),, \
PHONY += $(MAKECMDGOALS) sub-make
$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
- $(Q)@:
+ @:
sub-make: FORCE
$(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
@@ -981,6 +981,12 @@ _modinst_post: _modinst_
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst
$(call cmd,depmod)
+ifeq ($(CONFIG_MODULE_SIG), y)
+PHONY += modules_sign
+modules_sign:
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modsign
+endif
+
else # CONFIG_MODULES
# Modules not configured
@@ -1021,11 +1027,14 @@ clean: rm-dirs := $(CLEAN_DIRS)
clean: rm-files := $(CLEAN_FILES)
clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
-PHONY += $(clean-dirs) clean archclean
+PHONY += $(clean-dirs) clean archclean vmlinuxclean
$(clean-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
-clean: archclean
+vmlinuxclean:
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
+
+clean: archclean vmlinuxclean
# mrproper - Delete all generated files, including .config
#
@@ -1252,7 +1261,6 @@ scripts: ;
endif # KBUILD_EXTMOD
clean: $(clean-dirs)
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
$(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
diff --git a/README b/README
index f32710a817f..a24ec89ba44 100644
--- a/README
+++ b/README
@@ -180,6 +180,10 @@ CONFIGURING the kernel:
with questions already answered.
Additionally updates the dependencies.
+ "make olddefconfig"
+ Like above, but sets new symbols to their default
+ values without prompting.
+
"make defconfig" Create a ./.config file by using the default
symbol values from either arch/$ARCH/defconfig
or arch/$ARCH/configs/${PLATFORM}_defconfig,
diff --git a/arch/Kconfig b/arch/Kconfig
index 34884faf98c..7f8f281f258 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -80,6 +80,7 @@ config UPROBES
bool "Transparent user-space probes (EXPERIMENTAL)"
depends on UPROBE_EVENT && PERF_EVENTS
default n
+ select PERCPU_RWSEM
help
Uprobes is the user-space counterpart to kprobes: they
enable instrumentation applications (such as 'perf probe')
@@ -112,6 +113,25 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
See Documentation/unaligned-memory-access.txt for more
information on the topic of unaligned memory accesses.
+config ARCH_USE_BUILTIN_BSWAP
+ bool
+ help
+ Modern versions of GCC (since 4.4) have builtin functions
+ for handling byte-swapping. Using these, instead of the old
+ inline assembler that the architecture code provides in the
+ __arch_bswapXX() macros, allows the compiler to see what's
+ happening and offers more opportunity for optimisation. In
+ particular, the compiler will be able to combine the byteswap
+ with a nearby load or store and use load-and-swap or
+ store-and-swap instructions if the architecture has them. It
+ should almost *never* result in code which is worse than the
+ hand-coded assembler in <asm/swab.h>. But just in case it
+ does, the use of the builtins is optional.
+
+ Any architecture with load-and-swap or store-and-swap
+ instructions should set this. And it shouldn't hurt to set it
+ on architectures that don't have such instructions.
+
config HAVE_SYSCALL_WRAPPERS
bool
@@ -271,12 +291,6 @@ config ARCH_WANT_OLD_COMPAT_IPC
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool
-config GENERIC_KERNEL_THREAD
- bool
-
-config GENERIC_KERNEL_EXECVE
- bool
-
config HAVE_ARCH_SECCOMP_FILTER
bool
help
@@ -342,6 +356,9 @@ config MODULES_USE_ELF_REL
Modules only use ELF REL relocations. Modules with ELF RELA
relocations will give an error.
+config GENERIC_SIGALTSTACK
+ bool
+
#
# ABI hall of shame
#
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5dd7f5db24d..9d5904cc771 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -20,10 +20,9 @@ config ALPHA
select GENERIC_CMOS_UPDATE
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
+ select GENERIC_SIGALTSTACK
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index dcfabb9f05a..a6e85f448c1 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -1,14 +1,5 @@
-include include/asm-generic/Kbuild.asm
generic-y += clkdev.h
-header-y += compiler.h
-header-y += console.h
-header-y += fpu.h
-header-y += gentrap.h
-header-y += pal.h
-header-y += reg.h
-header-y += regdef.h
-header-y += sysinfo.h
generic-y += exec.h
generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/a.out.h b/arch/alpha/include/asm/a.out.h
index acdc681231c..9abbd245530 100644
--- a/arch/alpha/include/asm/a.out.h
+++ b/arch/alpha/include/asm/a.out.h
@@ -1,94 +1,8 @@
#ifndef __ALPHA_A_OUT_H__
#define __ALPHA_A_OUT_H__
-#include <linux/types.h>
+#include <uapi/asm/a.out.h>
-/*
- * OSF/1 ECOFF header structs. ECOFF files consist of:
- * - a file header (struct filehdr),
- * - an a.out header (struct aouthdr),
- * - one or more section headers (struct scnhdr).
- * The filhdr's "f_nscns" field contains the
- * number of section headers.
- */
-
-struct filehdr
-{
- /* OSF/1 "file" header */
- __u16 f_magic, f_nscns;
- __u32 f_timdat;
- __u64 f_symptr;
- __u32 f_nsyms;
- __u16 f_opthdr, f_flags;
-};
-
-struct aouthdr
-{
- __u64 info; /* after that it looks quite normal.. */
- __u64 tsize;
- __u64 dsize;
- __u64 bsize;
- __u64 entry;
- __u64 text_start; /* with a few additions that actually make sense */
- __u64 data_start;
- __u64 bss_start;
- __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */
- __u64 gpvalue;
-};
-
-struct scnhdr
-{
- char s_name[8];
- __u64 s_paddr;
- __u64 s_vaddr;
- __u64 s_size;
- __u64 s_scnptr;
- __u64 s_relptr;
- __u64 s_lnnoptr;
- __u16 s_nreloc;
- __u16 s_nlnno;
- __u32 s_flags;
-};
-
-struct exec
-{
- /* OSF/1 "file" header */
- struct filehdr fh;
- struct aouthdr ah;
-};
-
-/*
- * Define's so that the kernel exec code can access the a.out header
- * fields...
- */
-#define a_info ah.info
-#define a_text ah.tsize
-#define a_data ah.dsize
-#define a_bss ah.bsize
-#define a_entry ah.entry
-#define a_textstart ah.text_start
-#define a_datastart ah.data_start
-#define a_bssstart ah.bss_start
-#define a_gprmask ah.gprmask
-#define a_fprmask ah.fprmask
-#define a_gpvalue ah.gpvalue
-
-#define N_TXTADDR(x) ((x).a_textstart)
-#define N_DATADDR(x) ((x).a_datastart)
-#define N_BSSADDR(x) ((x).a_bssstart)
-#define N_DRSIZE(x) 0
-#define N_TRSIZE(x) 0
-#define N_SYMSIZE(x) 0
-
-#define AOUTHSZ sizeof(struct aouthdr)
-#define SCNHSZ sizeof(struct scnhdr)
-#define SCNROUND 16
-
-#define N_TXTOFF(x) \
- ((long) N_MAGIC(x) == ZMAGIC ? 0 : \
- (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1))
-
-#ifdef __KERNEL__
/* Assume that start addresses below 4G belong to a TASO application.
Unfortunately, there is no proper bit in the exec header to check.
@@ -98,5 +12,4 @@ struct exec
set_personality (((BFPM->taso || EX.ah.entry < 0x100000000L \
? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
-#endif /* __KERNEL__ */
#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h
index da6bb199839..a7720b96bcc 100644
--- a/arch/alpha/include/asm/compiler.h
+++ b/arch/alpha/include/asm/compiler.h
@@ -1,119 +1,8 @@
#ifndef __ALPHA_COMPILER_H
#define __ALPHA_COMPILER_H
-/*
- * Herein are macros we use when describing various patterns we want to GCC.
- * In all cases we can get better schedules out of the compiler if we hide
- * as little as possible inside inline assembly. However, we want to be
- * able to know what we'll get out before giving up inline assembly. Thus
- * these tests and macros.
- */
+#include <uapi/asm/compiler.h>
-#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
-# define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift)
-# define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift)
-# define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift)
-# define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift)
-# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
-# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
-# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
-#else
-# define __kernel_insbl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_inswl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_insql(val, shift) \
- ({ unsigned long __kir; \
- __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_inslh(val, shift) \
- ({ unsigned long __kir; \
- __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_extbl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_extwl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_cmpbge(a, b) \
- ({ unsigned long __kir; \
- __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \
- __kir; })
-#endif
-
-#ifdef __alpha_cix__
-# if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
-# define __kernel_cttz(x) __builtin_ctzl(x)
-# define __kernel_ctlz(x) __builtin_clzl(x)
-# define __kernel_ctpop(x) __builtin_popcountl(x)
-# else
-# define __kernel_cttz(x) \
- ({ unsigned long __kir; \
- __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctlz(x) \
- ({ unsigned long __kir; \
- __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctpop(x) \
- ({ unsigned long __kir; \
- __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# endif
-#else
-# define __kernel_cttz(x) \
- ({ unsigned long __kir; \
- __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctlz(x) \
- ({ unsigned long __kir; \
- __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctpop(x) \
- ({ unsigned long __kir; \
- __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-#endif
-
-
-/*
- * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX
- * extension is enabled. Previous versions did not define anything
- * we could test during compilation -- too bad, so sad.
- */
-
-#if defined(__alpha_bwx__)
-#define __kernel_ldbu(mem) (mem)
-#define __kernel_ldwu(mem) (mem)
-#define __kernel_stb(val,mem) ((mem) = (val))
-#define __kernel_stw(val,mem) ((mem) = (val))
-#else
-#define __kernel_ldbu(mem) \
- ({ unsigned char __kir; \
- __asm__(".arch ev56; \
- ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_ldwu(mem) \
- ({ unsigned short __kir; \
- __asm__(".arch ev56; \
- ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_stb(val,mem) \
- __asm__(".arch ev56; \
- stb %1,%0" : "=m"(mem) : "r"(val))
-#define __kernel_stw(val,mem) \
- __asm__(".arch ev56; \
- stw %1,%0" : "=m"(mem) : "r"(val))
-#endif
-
-#ifdef __KERNEL__
/* Some idiots over in <linux/compiler.h> thought inline should imply
always_inline. This breaks stuff. We'll include this file whenever
we run into such problems. */
@@ -125,6 +14,4 @@
#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
-#endif /* __KERNEL__ */
-
#endif /* __ALPHA_COMPILER_H */
diff --git a/arch/alpha/include/asm/console.h b/arch/alpha/include/asm/console.h
index a3ce4e62249..f2b584fe099 100644
--- a/arch/alpha/include/asm/console.h
+++ b/arch/alpha/include/asm/console.h
@@ -1,52 +1,8 @@
#ifndef __AXP_CONSOLE_H
#define __AXP_CONSOLE_H
-/*
- * Console callback routine numbers
- */
-#define CCB_GETC 0x01
-#define CCB_PUTS 0x02
-#define CCB_RESET_TERM 0x03
-#define CCB_SET_TERM_INT 0x04
-#define CCB_SET_TERM_CTL 0x05
-#define CCB_PROCESS_KEYCODE 0x06
-#define CCB_OPEN_CONSOLE 0x07
-#define CCB_CLOSE_CONSOLE 0x08
+#include <uapi/asm/console.h>
-#define CCB_OPEN 0x10
-#define CCB_CLOSE 0x11
-#define CCB_IOCTL 0x12
-#define CCB_READ 0x13
-#define CCB_WRITE 0x14
-
-#define CCB_SET_ENV 0x20
-#define CCB_RESET_ENV 0x21
-#define CCB_GET_ENV 0x22
-#define CCB_SAVE_ENV 0x23
-
-#define CCB_PSWITCH 0x30
-#define CCB_BIOS_EMUL 0x32
-
-/*
- * Environment variable numbers
- */
-#define ENV_AUTO_ACTION 0x01
-#define ENV_BOOT_DEV 0x02
-#define ENV_BOOTDEF_DEV 0x03
-#define ENV_BOOTED_DEV 0x04
-#define ENV_BOOT_FILE 0x05
-#define ENV_BOOTED_FILE 0x06
-#define ENV_BOOT_OSFLAGS 0x07
-#define ENV_BOOTED_OSFLAGS 0x08
-#define ENV_BOOT_RESET 0x09
-#define ENV_DUMP_DEV 0x0A
-#define ENV_ENABLE_AUDIT 0x0B
-#define ENV_LICENSE 0x0C
-#define ENV_CHAR_SET 0x0D
-#define ENV_LANGUAGE 0x0E
-#define ENV_TTY_DEV 0x0F
-
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
extern long callback_puts(long unit, const char *s, long length);
extern long callback_getc(long unit);
@@ -70,6 +26,4 @@ struct hwrpb_struct;
extern int callback_init_done;
extern void * callback_init(void *);
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* __AXP_CONSOLE_H */
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index e477bcd5b94..71c20956b90 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -1,128 +1,8 @@
#ifndef __ASM_ALPHA_FPU_H
#define __ASM_ALPHA_FPU_H
-#ifdef __KERNEL__
#include <asm/special_insns.h>
-#endif
-
-/*
- * Alpha floating-point control register defines:
- */
-#define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */
-#define FPCR_DNZ (1UL<<48) /* denorms to zero */
-#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */
-#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */
-#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */
-#define FPCR_INV (1UL<<52) /* invalid operation */
-#define FPCR_DZE (1UL<<53) /* division by zero */
-#define FPCR_OVF (1UL<<54) /* overflow */
-#define FPCR_UNF (1UL<<55) /* underflow */
-#define FPCR_INE (1UL<<56) /* inexact */
-#define FPCR_IOV (1UL<<57) /* integer overflow */
-#define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */
-#define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */
-#define FPCR_INED (1UL<<62) /* inexact disable (opt.) */
-#define FPCR_SUM (1UL<<63) /* summary bit */
-
-#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */
-#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */
-#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */
-#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */
-#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */
-#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT)
-
-#define FPCR_MASK 0xffff800000000000L
-
-/*
- * IEEE trap enables are implemented in software. These per-thread
- * bits are stored in the "ieee_state" field of "struct thread_info".
- * Thus, the bits are defined so as not to conflict with the
- * floating-point enable bit (which is architected). On top of that,
- * we want to make these bits compatible with OSF/1 so
- * ieee_set_fp_control() etc. can be implemented easily and
- * compatibly. The corresponding definitions are in
- * /usr/include/machine/fpu.h under OSF/1.
- */
-#define IEEE_TRAP_ENABLE_INV (1UL<<1) /* invalid op */
-#define IEEE_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */
-#define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */
-#define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */
-#define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */
-#define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */
-#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\
- IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\
- IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO)
-
-/* Denorm and Underflow flushing */
-#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */
-#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */
-
-#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ)
-
-/* status bits coming from fpcr: */
-#define IEEE_STATUS_INV (1UL<<17)
-#define IEEE_STATUS_DZE (1UL<<18)
-#define IEEE_STATUS_OVF (1UL<<19)
-#define IEEE_STATUS_UNF (1UL<<20)
-#define IEEE_STATUS_INE (1UL<<21)
-#define IEEE_STATUS_DNO (1UL<<22)
-
-#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \
- IEEE_STATUS_OVF | IEEE_STATUS_UNF | \
- IEEE_STATUS_INE | IEEE_STATUS_DNO)
-
-#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \
- IEEE_STATUS_MASK | IEEE_MAP_MASK)
-
-#define IEEE_CURRENT_RM_SHIFT 32
-#define IEEE_CURRENT_RM_MASK (3UL<<IEEE_CURRENT_RM_SHIFT)
-
-#define IEEE_STATUS_TO_EXCSUM_SHIFT 16
-
-#define IEEE_INHERIT (1UL<<63) /* inherit on thread create? */
-
-/*
- * Convert the software IEEE trap enable and status bits into the
- * hardware fpcr format.
- *
- * Digital Unix engineers receive my thanks for not defining the
- * software bits identical to the hardware bits. The chip designers
- * receive my thanks for making all the not-implemented fpcr bits
- * RAZ forcing us to use system calls to read/write this value.
- */
-
-static inline unsigned long
-ieee_swcr_to_fpcr(unsigned long sw)
-{
- unsigned long fp;
- fp = (sw & IEEE_STATUS_MASK) << 35;
- fp |= (sw & IEEE_MAP_DMZ) << 36;
- fp |= (sw & IEEE_STATUS_MASK ? FPCR_SUM : 0);
- fp |= (~sw & (IEEE_TRAP_ENABLE_INV
- | IEEE_TRAP_ENABLE_DZE
- | IEEE_TRAP_ENABLE_OVF)) << 48;
- fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57;
- fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
- fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41;
- return fp;
-}
-
-static inline unsigned long
-ieee_fpcr_to_swcr(unsigned long fp)
-{
- unsigned long sw;
- sw = (fp >> 35) & IEEE_STATUS_MASK;
- sw |= (fp >> 36) & IEEE_MAP_DMZ;
- sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV
- | IEEE_TRAP_ENABLE_DZE
- | IEEE_TRAP_ENABLE_OVF);
- sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE);
- sw |= (fp >> 47) & IEEE_MAP_UMZ;
- sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO;
- return sw;
-}
-
-#ifdef __KERNEL__
+#include <uapi/asm/fpu.h>
/* The following two functions don't need trapb/excb instructions
around the mf_fpcr/mt_fpcr instructions because (a) the kernel
@@ -192,6 +72,4 @@ extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
-#endif /* __KERNEL__ */
-
#endif /* __ASM_ALPHA_FPU_H */
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 445dc42e033..c5b5d6bac9e 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -66,7 +66,7 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
<< PAGE_SHIFT))
-/* XXX: FIXME -- wli */
+/* XXX: FIXME -- nyc */
#define kern_addr_valid(kaddr) (0)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h
index 6699ee58342..6fcd2b5b08f 100644
--- a/arch/alpha/include/asm/pal.h
+++ b/arch/alpha/include/asm/pal.h
@@ -1,54 +1,8 @@
#ifndef __ALPHA_PAL_H
#define __ALPHA_PAL_H
-/*
- * Common PAL-code
- */
-#define PAL_halt 0
-#define PAL_cflush 1
-#define PAL_draina 2
-#define PAL_bpt 128
-#define PAL_bugchk 129
-#define PAL_chmk 131
-#define PAL_callsys 131
-#define PAL_imb 134
-#define PAL_rduniq 158
-#define PAL_wruniq 159
-#define PAL_gentrap 170
-#define PAL_nphalt 190
-
-/*
- * VMS specific PAL-code
- */
-#define PAL_swppal 10
-#define PAL_mfpr_vptb 41
+#include <uapi/asm/pal.h>
-/*
- * OSF specific PAL-code
- */
-#define PAL_cserve 9
-#define PAL_wripir 13
-#define PAL_rdmces 16
-#define PAL_wrmces 17
-#define PAL_wrfen 43
-#define PAL_wrvptptr 45
-#define PAL_jtopal 46
-#define PAL_swpctx 48
-#define PAL_wrval 49
-#define PAL_rdval 50
-#define PAL_tbi 51
-#define PAL_wrent 52
-#define PAL_swpipl 53
-#define PAL_rdps 54
-#define PAL_wrkgp 55
-#define PAL_wrusp 56
-#define PAL_wrperfmon 57
-#define PAL_rdusp 58
-#define PAL_whami 60
-#define PAL_retsys 61
-#define PAL_rti 63
-
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
extern void halt(void) __attribute__((noreturn));
@@ -158,6 +112,4 @@ __CALL_PAL_W1(wrvptptr, unsigned long);
#define tbia() __tbi(-2, /* no second argument */)
#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* __ALPHA_PAL_H */
diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h
index e691ecfedb2..bf46af51941 100644
--- a/arch/alpha/include/asm/param.h
+++ b/arch/alpha/include/asm/param.h
@@ -1,27 +1,9 @@
#ifndef _ASM_ALPHA_PARAM_H
#define _ASM_ALPHA_PARAM_H
-/* ??? Gross. I don't want to parameterize this, and supposedly the
- hardware ignores reprogramming. We also need userland buy-in to the
- change in HZ, since this is visible in the wait4 resources etc. */
+#include <uapi/asm/param.h>
-#ifdef __KERNEL__
#define HZ CONFIG_HZ
#define USER_HZ HZ
-#else
-#define HZ 1024
-#endif
-
-#define EXEC_PAGESIZE 8192
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#ifdef __KERNEL__
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
-#endif
-
#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index b4c5b2fbb64..21128505ddb 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -1,77 +1,14 @@
#ifndef _ASMAXP_PTRACE_H
#define _ASMAXP_PTRACE_H
+#include <uapi/asm/ptrace.h>
-/*
- * This struct defines the way the registers are stored on the
- * kernel stack during a system call or other kernel entry
- *
- * NOTE! I want to minimize the overhead of system calls, so this
- * struct has as little information as possible. I does not have
- *
- * - floating point regs: the kernel doesn't change those
- * - r9-15: saved by the C compiler
- *
- * This makes "fork()" and "exec()" a bit more complex, but should
- * give us low system call latency.
- */
-
-struct pt_regs {
- unsigned long r0;
- unsigned long r1;
- unsigned long r2;
- unsigned long r3;
- unsigned long r4;
- unsigned long r5;
- unsigned long r6;
- unsigned long r7;
- unsigned long r8;
- unsigned long r19;
- unsigned long r20;
- unsigned long r21;
- unsigned long r22;
- unsigned long r23;
- unsigned long r24;
- unsigned long r25;
- unsigned long r26;
- unsigned long r27;
- unsigned long r28;
- unsigned long hae;
-/* JRP - These are the values provided to a0-a2 by PALcode */
- unsigned long trap_a0;
- unsigned long trap_a1;
- unsigned long trap_a2;
-/* These are saved by PAL-code: */
- unsigned long ps;
- unsigned long pc;
- unsigned long gp;
- unsigned long r16;
- unsigned long r17;
- unsigned long r18;
-};
-
-/*
- * This is the extended stack used by signal handlers and the context
- * switcher: it's pushed after the normal "struct pt_regs".
- */
-struct switch_stack {
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long r13;
- unsigned long r14;
- unsigned long r15;
- unsigned long r26;
- unsigned long fp[32]; /* fp[31] is fpcr */
-};
-
-#ifdef __KERNEL__
#define arch_has_single_step() (1)
#define user_mode(regs) (((regs)->ps & 8) != 0)
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
+#define current_user_stack_pointer() rdusp()
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
@@ -83,5 +20,3 @@ struct switch_stack {
#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
#endif
-
-#endif
diff --git a/arch/alpha/include/asm/signal.h b/arch/alpha/include/asm/signal.h
index 45552862cc1..8a1ac28cd56 100644
--- a/arch/alpha/include/asm/signal.h
+++ b/arch/alpha/include/asm/signal.h
@@ -1,12 +1,8 @@
#ifndef _ASMAXP_SIGNAL_H
#define _ASMAXP_SIGNAL_H
-#include <linux/types.h>
+#include <uapi/asm/signal.h>
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
/* Digital Unix defines 64 signals. Most things should be clean enough
to redefine this at will, if care is taken to make libc match. */
@@ -20,100 +16,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-
-/*
- * Linux/AXP has different signal numbers that Linux/i386: I'm trying
- * to make it OSF/1 binary compatible, at least for normal binaries.
- */
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGEMT 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGBUS 10
-#define SIGSEGV 11
-#define SIGSYS 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGURG 16
-#define SIGSTOP 17
-#define SIGTSTP 18
-#define SIGCONT 19
-#define SIGCHLD 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGIO 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGINFO 29
-#define SIGUSR1 30
-#define SIGUSR2 31
-
-#define SIGPOLL SIGIO
-#define SIGPWR SIGINFO
-#define SIGIOT SIGABRT
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-
-#define SA_ONSTACK 0x00000001
-#define SA_RESTART 0x00000002
-#define SA_NOCLDSTOP 0x00000004
-#define SA_NODEFER 0x00000008
-#define SA_RESETHAND 0x00000010
-#define SA_NOCLDWAIT 0x00000020
-#define SA_SIGINFO 0x00000040
-
-#define SA_ONESHOT SA_RESETHAND
-#define SA_NOMASK SA_NODEFER
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 4096
-#define SIGSTKSZ 16384
-
-#define SIG_BLOCK 1 /* for blocking signals */
-#define SIG_UNBLOCK 2 /* for unblocking signals */
-#define SIG_SETMASK 3 /* for setting the signal mask */
-
-#include <asm-generic/signal-defs.h>
-
-#ifdef __KERNEL__
struct osf_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
@@ -130,40 +32,5 @@ struct k_sigaction {
struct sigaction sa;
__sigrestore_t ka_restorer;
};
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- int sa_flags;
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-/* sigstack(2) is deprecated, and will be withdrawn in a future version
- of the X/Open CAE Specification. Use sigaltstack instead. It is only
- implemented here for OSF/1 compatibility. */
-
-struct sigstack {
- void __user *ss_sp;
- int ss_onstack;
-};
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
#endif
-
-#endif
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index 0087d053b77..8d806d80ed2 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -1,87 +1,10 @@
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
-#include <asm/sockios.h>
+#include <uapi/asm/socket.h>
-/* For setsockopt(2) */
-/*
- * Note: we only bother about making the SOL_SOCKET options
- * same as OSF/1, as that's all that "normal" programs are
- * likely to set. We don't necessarily want to be binary
- * compatible with _everything_.
- */
-#define SOL_SOCKET 0xffff
-
-#define SO_DEBUG 0x0001
-#define SO_REUSEADDR 0x0004
-#define SO_KEEPALIVE 0x0008
-#define SO_DONTROUTE 0x0010
-#define SO_BROADCAST 0x0020
-#define SO_LINGER 0x0080
-#define SO_OOBINLINE 0x0100
-/* To add :#define SO_REUSEPORT 0x0200 */
-
-#define SO_TYPE 0x1008
-#define SO_ERROR 0x1007
-#define SO_SNDBUF 0x1001
-#define SO_RCVBUF 0x1002
-#define SO_SNDBUFFORCE 0x100a
-#define SO_RCVBUFFORCE 0x100b
-#define SO_RCVLOWAT 0x1010
-#define SO_SNDLOWAT 0x1011
-#define SO_RCVTIMEO 0x1012
-#define SO_SNDTIMEO 0x1013
-#define SO_ACCEPTCONN 0x1014
-#define SO_PROTOCOL 0x1028
-#define SO_DOMAIN 0x1029
-
-/* linux-specific, might as well be the same as on i386 */
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_BSDCOMPAT 14
-
-#define SO_PASSCRED 17
-#define SO_PEERCRED 18
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-#define SO_GET_FILTER SO_ATTACH_FILTER
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_PEERSEC 30
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 19
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 20
-#define SO_SECURITY_ENCRYPTION_NETWORK 21
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_RXQ_OVFL 40
-
-#define SO_WIFI_STATUS 41
-#define SCM_WIFI_STATUS SO_WIFI_STATUS
-#define SO_PEEK_OFF 42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS 43
-
-#ifdef __KERNEL__
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
#define SOCK_NONBLOCK 0x40000000
-#endif /* __KERNEL__ */
-
#endif /* _ASM_SOCKET_H */
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
index fa13716a11c..7fde0f88da8 100644
--- a/arch/alpha/include/asm/termios.h
+++ b/arch/alpha/include/asm/termios.h
@@ -1,72 +1,8 @@
#ifndef _ALPHA_TERMIOS_H
#define _ALPHA_TERMIOS_H
-#include <asm/ioctls.h>
-#include <asm/termbits.h>
+#include <uapi/asm/termios.h>
-struct sgttyb {
- char sg_ispeed;
- char sg_ospeed;
- char sg_erase;
- char sg_kill;
- short sg_flags;
-};
-
-struct tchars {
- char t_intrc;
- char t_quitc;
- char t_startc;
- char t_stopc;
- char t_eofc;
- char t_brkc;
-};
-
-struct ltchars {
- char t_suspc;
- char t_dsuspc;
- char t_rprntc;
- char t_flushc;
- char t_werasc;
- char t_lnextc;
-};
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/*
- * c_cc characters in the termio structure. Oh, how I love being
- * backwardly compatible. Notice that character 4 and 5 are
- * interpreted differently depending on whether ICANON is set in
- * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise
- * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which
- * is compatible with sysV)...
- */
-#define _VINTR 0
-#define _VQUIT 1
-#define _VERASE 2
-#define _VKILL 3
-#define _VEOF 4
-#define _VMIN 4
-#define _VEOL 5
-#define _VTIME 5
-#define _VEOL2 6
-#define _VSWTC 7
-
-#ifdef __KERNEL__
/* eof=^D eol=\0 eol2=\0 erase=del
werase=^W kill=^U reprint=^R sxtc=\0
intr=^C quit=^\ susp=^Z <OSF/1 VDSUSP>
@@ -141,6 +77,4 @@ struct termio {
#define kernel_termios_to_user_termios(u, k) \
copy_to_user(u, k, sizeof(struct termios))
-#endif /* __KERNEL__ */
-
#endif /* _ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 0a0579076f4..f61e1a56c37 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,18 +1,7 @@
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-#ifdef __KERNEL__
#include <asm-generic/int-ll64.h>
-#else
-#include <asm-generic/int-l64.h>
-#endif
+#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index eb3a4664ced..b3396ee039b 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -1,474 +1,8 @@
#ifndef _ALPHA_UNISTD_H
#define _ALPHA_UNISTD_H
-#define __NR_osf_syscall 0 /* not implemented */
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_osf_old_open 5 /* not implemented */
-#define __NR_close 6
-#define __NR_osf_wait4 7
-#define __NR_osf_old_creat 8 /* not implemented */
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_osf_execve 11 /* not implemented */
-#define __NR_chdir 12
-#define __NR_fchdir 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_chown 16
-#define __NR_brk 17
-#define __NR_osf_getfsstat 18 /* not implemented */
-#define __NR_lseek 19
-#define __NR_getxpid 20
-#define __NR_osf_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getxuid 24
-#define __NR_exec_with_loader 25 /* not implemented */
-#define __NR_ptrace 26
-#define __NR_osf_nrecvmsg 27 /* not implemented */
-#define __NR_osf_nsendmsg 28 /* not implemented */
-#define __NR_osf_nrecvfrom 29 /* not implemented */
-#define __NR_osf_naccept 30 /* not implemented */
-#define __NR_osf_ngetpeername 31 /* not implemented */
-#define __NR_osf_ngetsockname 32 /* not implemented */
-#define __NR_access 33
-#define __NR_osf_chflags 34 /* not implemented */
-#define __NR_osf_fchflags 35 /* not implemented */
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_osf_old_stat 38 /* not implemented */
-#define __NR_setpgid 39
-#define __NR_osf_old_lstat 40 /* not implemented */
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_osf_set_program_attributes 43
-#define __NR_osf_profil 44 /* not implemented */
-#define __NR_open 45
-#define __NR_osf_old_sigaction 46 /* not implemented */
-#define __NR_getxgid 47
-#define __NR_osf_sigprocmask 48
-#define __NR_osf_getlogin 49 /* not implemented */
-#define __NR_osf_setlogin 50 /* not implemented */
-#define __NR_acct 51
-#define __NR_sigpending 52
+#include <uapi/asm/unistd.h>
-#define __NR_ioctl 54
-#define __NR_osf_reboot 55 /* not implemented */
-#define __NR_osf_revoke 56 /* not implemented */
-#define __NR_symlink 57
-#define __NR_readlink 58
-#define __NR_execve 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_osf_old_fstat 62 /* not implemented */
-#define __NR_getpgrp 63
-#define __NR_getpagesize 64
-#define __NR_osf_mremap 65 /* not implemented */
-#define __NR_vfork 66
-#define __NR_stat 67
-#define __NR_lstat 68
-#define __NR_osf_sbrk 69 /* not implemented */
-#define __NR_osf_sstk 70 /* not implemented */
-#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */
-#define __NR_osf_old_vadvise 72 /* not implemented */
-#define __NR_munmap 73
-#define __NR_mprotect 74
-#define __NR_madvise 75
-#define __NR_vhangup 76
-#define __NR_osf_kmodcall 77 /* not implemented */
-#define __NR_osf_mincore 78 /* not implemented */
-#define __NR_getgroups 79
-#define __NR_setgroups 80
-#define __NR_osf_old_getpgrp 81 /* not implemented */
-#define __NR_setpgrp 82 /* BSD alias for setpgid */
-#define __NR_osf_setitimer 83
-#define __NR_osf_old_wait 84 /* not implemented */
-#define __NR_osf_table 85 /* not implemented */
-#define __NR_osf_getitimer 86
-#define __NR_gethostname 87
-#define __NR_sethostname 88
-#define __NR_getdtablesize 89
-#define __NR_dup2 90
-#define __NR_fstat 91
-#define __NR_fcntl 92
-#define __NR_osf_select 93
-#define __NR_poll 94
-#define __NR_fsync 95
-#define __NR_setpriority 96
-#define __NR_socket 97
-#define __NR_connect 98
-#define __NR_accept 99
-#define __NR_getpriority 100
-#define __NR_send 101
-#define __NR_recv 102
-#define __NR_sigreturn 103
-#define __NR_bind 104
-#define __NR_setsockopt 105
-#define __NR_listen 106
-#define __NR_osf_plock 107 /* not implemented */
-#define __NR_osf_old_sigvec 108 /* not implemented */
-#define __NR_osf_old_sigblock 109 /* not implemented */
-#define __NR_osf_old_sigsetmask 110 /* not implemented */
-#define __NR_sigsuspend 111
-#define __NR_osf_sigstack 112
-#define __NR_recvmsg 113
-#define __NR_sendmsg 114
-#define __NR_osf_old_vtrace 115 /* not implemented */
-#define __NR_osf_gettimeofday 116
-#define __NR_osf_getrusage 117
-#define __NR_getsockopt 118
-
-#define __NR_readv 120
-#define __NR_writev 121
-#define __NR_osf_settimeofday 122
-#define __NR_fchown 123
-#define __NR_fchmod 124
-#define __NR_recvfrom 125
-#define __NR_setreuid 126
-#define __NR_setregid 127
-#define __NR_rename 128
-#define __NR_truncate 129
-#define __NR_ftruncate 130
-#define __NR_flock 131
-#define __NR_setgid 132
-#define __NR_sendto 133
-#define __NR_shutdown 134
-#define __NR_socketpair 135
-#define __NR_mkdir 136
-#define __NR_rmdir 137
-#define __NR_osf_utimes 138
-#define __NR_osf_old_sigreturn 139 /* not implemented */
-#define __NR_osf_adjtime 140 /* not implemented */
-#define __NR_getpeername 141
-#define __NR_osf_gethostid 142 /* not implemented */
-#define __NR_osf_sethostid 143 /* not implemented */
-#define __NR_getrlimit 144
-#define __NR_setrlimit 145
-#define __NR_osf_old_killpg 146 /* not implemented */
-#define __NR_setsid 147
-#define __NR_quotactl 148
-#define __NR_osf_oldquota 149 /* not implemented */
-#define __NR_getsockname 150
-
-#define __NR_osf_pid_block 153 /* not implemented */
-#define __NR_osf_pid_unblock 154 /* not implemented */
-
-#define __NR_sigaction 156
-#define __NR_osf_sigwaitprim 157 /* not implemented */
-#define __NR_osf_nfssvc 158 /* not implemented */
-#define __NR_osf_getdirentries 159
-#define __NR_osf_statfs 160
-#define __NR_osf_fstatfs 161
-
-#define __NR_osf_asynch_daemon 163 /* not implemented */
-#define __NR_osf_getfh 164 /* not implemented */
-#define __NR_osf_getdomainname 165
-#define __NR_setdomainname 166
-
-#define __NR_osf_exportfs 169 /* not implemented */
-
-#define __NR_osf_alt_plock 181 /* not implemented */
-
-#define __NR_osf_getmnt 184 /* not implemented */
-
-#define __NR_osf_alt_sigpending 187 /* not implemented */
-#define __NR_osf_alt_setsid 188 /* not implemented */
-
-#define __NR_osf_swapon 199
-#define __NR_msgctl 200
-#define __NR_msgget 201
-#define __NR_msgrcv 202
-#define __NR_msgsnd 203
-#define __NR_semctl 204
-#define __NR_semget 205
-#define __NR_semop 206
-#define __NR_osf_utsname 207
-#define __NR_lchown 208
-#define __NR_osf_shmat 209
-#define __NR_shmctl 210
-#define __NR_shmdt 211
-#define __NR_shmget 212
-#define __NR_osf_mvalid 213 /* not implemented */
-#define __NR_osf_getaddressconf 214 /* not implemented */
-#define __NR_osf_msleep 215 /* not implemented */
-#define __NR_osf_mwakeup 216 /* not implemented */
-#define __NR_msync 217
-#define __NR_osf_signal 218 /* not implemented */
-#define __NR_osf_utc_gettime 219 /* not implemented */
-#define __NR_osf_utc_adjtime 220 /* not implemented */
-
-#define __NR_osf_security 222 /* not implemented */
-#define __NR_osf_kloadcall 223 /* not implemented */
-
-#define __NR_osf_stat 224
-#define __NR_osf_lstat 225
-#define __NR_osf_fstat 226
-#define __NR_osf_statfs64 227
-#define __NR_osf_fstatfs64 228
-
-#define __NR_getpgid 233
-#define __NR_getsid 234
-#define __NR_sigaltstack 235
-#define __NR_osf_waitid 236 /* not implemented */
-#define __NR_osf_priocntlset 237 /* not implemented */
-#define __NR_osf_sigsendset 238 /* not implemented */
-#define __NR_osf_set_speculative 239 /* not implemented */
-#define __NR_osf_msfs_syscall 240 /* not implemented */
-#define __NR_osf_sysinfo 241
-#define __NR_osf_uadmin 242 /* not implemented */
-#define __NR_osf_fuser 243 /* not implemented */
-#define __NR_osf_proplist_syscall 244
-#define __NR_osf_ntp_adjtime 245 /* not implemented */
-#define __NR_osf_ntp_gettime 246 /* not implemented */
-#define __NR_osf_pathconf 247 /* not implemented */
-#define __NR_osf_fpathconf 248 /* not implemented */
-
-#define __NR_osf_uswitch 250 /* not implemented */
-#define __NR_osf_usleep_thread 251
-#define __NR_osf_audcntl 252 /* not implemented */
-#define __NR_osf_audgen 253 /* not implemented */
-#define __NR_sysfs 254
-#define __NR_osf_subsys_info 255 /* not implemented */
-#define __NR_osf_getsysinfo 256
-#define __NR_osf_setsysinfo 257
-#define __NR_osf_afs_syscall 258 /* not implemented */
-#define __NR_osf_swapctl 259 /* not implemented */
-#define __NR_osf_memcntl 260 /* not implemented */
-#define __NR_osf_fdatasync 261 /* not implemented */
-
-/*
- * Ignore legacy syscalls that we don't use.
- */
-#define __IGNORE_alarm
-#define __IGNORE_creat
-#define __IGNORE_getegid
-#define __IGNORE_geteuid
-#define __IGNORE_getgid
-#define __IGNORE_getpid
-#define __IGNORE_getppid
-#define __IGNORE_getuid
-#define __IGNORE_pause
-#define __IGNORE_time
-#define __IGNORE_utime
-#define __IGNORE_umount2
-
-/*
- * Linux-specific system calls begin at 300
- */
-#define __NR_bdflush 300
-#define __NR_sethae 301
-#define __NR_mount 302
-#define __NR_old_adjtimex 303
-#define __NR_swapoff 304
-#define __NR_getdents 305
-#define __NR_create_module 306
-#define __NR_init_module 307
-#define __NR_delete_module 308
-#define __NR_get_kernel_syms 309
-#define __NR_syslog 310
-#define __NR_reboot 311
-#define __NR_clone 312
-#define __NR_uselib 313
-#define __NR_mlock 314
-#define __NR_munlock 315
-#define __NR_mlockall 316
-#define __NR_munlockall 317
-#define __NR_sysinfo 318
-#define __NR__sysctl 319
-/* 320 was sys_idle. */
-#define __NR_oldumount 321
-#define __NR_swapon 322
-#define __NR_times 323
-#define __NR_personality 324
-#define __NR_setfsuid 325
-#define __NR_setfsgid 326
-#define __NR_ustat 327
-#define __NR_statfs 328
-#define __NR_fstatfs 329
-#define __NR_sched_setparam 330
-#define __NR_sched_getparam 331
-#define __NR_sched_setscheduler 332
-#define __NR_sched_getscheduler 333
-#define __NR_sched_yield 334
-#define __NR_sched_get_priority_max 335
-#define __NR_sched_get_priority_min 336
-#define __NR_sched_rr_get_interval 337
-#define __NR_afs_syscall 338
-#define __NR_uname 339
-#define __NR_nanosleep 340
-#define __NR_mremap 341
-#define __NR_nfsservctl 342
-#define __NR_setresuid 343
-#define __NR_getresuid 344
-#define __NR_pciconfig_read 345
-#define __NR_pciconfig_write 346
-#define __NR_query_module 347
-#define __NR_prctl 348
-#define __NR_pread64 349
-#define __NR_pwrite64 350
-#define __NR_rt_sigreturn 351
-#define __NR_rt_sigaction 352
-#define __NR_rt_sigprocmask 353
-#define __NR_rt_sigpending 354
-#define __NR_rt_sigtimedwait 355
-#define __NR_rt_sigqueueinfo 356
-#define __NR_rt_sigsuspend 357
-#define __NR_select 358
-#define __NR_gettimeofday 359
-#define __NR_settimeofday 360
-#define __NR_getitimer 361
-#define __NR_setitimer 362
-#define __NR_utimes 363
-#define __NR_getrusage 364
-#define __NR_wait4 365
-#define __NR_adjtimex 366
-#define __NR_getcwd 367
-#define __NR_capget 368
-#define __NR_capset 369
-#define __NR_sendfile 370
-#define __NR_setresgid 371
-#define __NR_getresgid 372
-#define __NR_dipc 373
-#define __NR_pivot_root 374
-#define __NR_mincore 375
-#define __NR_pciconfig_iobase 376
-#define __NR_getdents64 377
-#define __NR_gettid 378
-#define __NR_readahead 379
-/* 380 is unused */
-#define __NR_tkill 381
-#define __NR_setxattr 382
-#define __NR_lsetxattr 383
-#define __NR_fsetxattr 384
-#define __NR_getxattr 385
-#define __NR_lgetxattr 386
-#define __NR_fgetxattr 387
-#define __NR_listxattr 388
-#define __NR_llistxattr 389
-#define __NR_flistxattr 390
-#define __NR_removexattr 391
-#define __NR_lremovexattr 392
-#define __NR_fremovexattr 393
-#define __NR_futex 394
-#define __NR_sched_setaffinity 395
-#define __NR_sched_getaffinity 396
-#define __NR_tuxcall 397
-#define __NR_io_setup 398
-#define __NR_io_destroy 399
-#define __NR_io_getevents 400
-#define __NR_io_submit 401
-#define __NR_io_cancel 402
-#define __NR_exit_group 405
-#define __NR_lookup_dcookie 406
-#define __NR_epoll_create 407
-#define __NR_epoll_ctl 408
-#define __NR_epoll_wait 409
-/* Feb 2007: These three sys_epoll defines shouldn't be here but culling
- * them would break userspace apps ... we'll kill them off in 2010 :) */
-#define __NR_sys_epoll_create __NR_epoll_create
-#define __NR_sys_epoll_ctl __NR_epoll_ctl
-#define __NR_sys_epoll_wait __NR_epoll_wait
-#define __NR_remap_file_pages 410
-#define __NR_set_tid_address 411
-#define __NR_restart_syscall 412
-#define __NR_fadvise64 413
-#define __NR_timer_create 414
-#define __NR_timer_settime 415
-#define __NR_timer_gettime 416
-#define __NR_timer_getoverrun 417
-#define __NR_timer_delete 418
-#define __NR_clock_settime 419
-#define __NR_clock_gettime 420
-#define __NR_clock_getres 421
-#define __NR_clock_nanosleep 422
-#define __NR_semtimedop 423
-#define __NR_tgkill 424
-#define __NR_stat64 425
-#define __NR_lstat64 426
-#define __NR_fstat64 427
-#define __NR_vserver 428
-#define __NR_mbind 429
-#define __NR_get_mempolicy 430
-#define __NR_set_mempolicy 431
-#define __NR_mq_open 432
-#define __NR_mq_unlink 433
-#define __NR_mq_timedsend 434
-#define __NR_mq_timedreceive 435
-#define __NR_mq_notify 436
-#define __NR_mq_getsetattr 437
-#define __NR_waitid 438
-#define __NR_add_key 439
-#define __NR_request_key 440
-#define __NR_keyctl 441
-#define __NR_ioprio_set 442
-#define __NR_ioprio_get 443
-#define __NR_inotify_init 444
-#define __NR_inotify_add_watch 445
-#define __NR_inotify_rm_watch 446
-#define __NR_fdatasync 447
-#define __NR_kexec_load 448
-#define __NR_migrate_pages 449
-#define __NR_openat 450
-#define __NR_mkdirat 451
-#define __NR_mknodat 452
-#define __NR_fchownat 453
-#define __NR_futimesat 454
-#define __NR_fstatat64 455
-#define __NR_unlinkat 456
-#define __NR_renameat 457
-#define __NR_linkat 458
-#define __NR_symlinkat 459
-#define __NR_readlinkat 460
-#define __NR_fchmodat 461
-#define __NR_faccessat 462
-#define __NR_pselect6 463
-#define __NR_ppoll 464
-#define __NR_unshare 465
-#define __NR_set_robust_list 466
-#define __NR_get_robust_list 467
-#define __NR_splice 468
-#define __NR_sync_file_range 469
-#define __NR_tee 470
-#define __NR_vmsplice 471
-#define __NR_move_pages 472
-#define __NR_getcpu 473
-#define __NR_epoll_pwait 474
-#define __NR_utimensat 475
-#define __NR_signalfd 476
-#define __NR_timerfd 477
-#define __NR_eventfd 478
-#define __NR_recvmmsg 479
-#define __NR_fallocate 480
-#define __NR_timerfd_create 481
-#define __NR_timerfd_settime 482
-#define __NR_timerfd_gettime 483
-#define __NR_signalfd4 484
-#define __NR_eventfd2 485
-#define __NR_epoll_create1 486
-#define __NR_dup3 487
-#define __NR_pipe2 488
-#define __NR_inotify_init1 489
-#define __NR_preadv 490
-#define __NR_pwritev 491
-#define __NR_rt_tgsigqueueinfo 492
-#define __NR_perf_event_open 493
-#define __NR_fanotify_init 494
-#define __NR_fanotify_mark 495
-#define __NR_prlimit64 496
-#define __NR_name_to_handle_at 497
-#define __NR_open_by_handle_at 498
-#define __NR_clock_adjtime 499
-#define __NR_syncfs 500
-#define __NR_setns 501
-#define __NR_accept4 502
-#define __NR_sendmmsg 503
-#define __NR_process_vm_readv 504
-#define __NR_process_vm_writev 505
-
-#ifdef __KERNEL__
#define NR_SYSCALLS 506
@@ -481,7 +15,6 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
@@ -498,5 +31,4 @@
#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall")
-#endif /* __KERNEL__ */
#endif /* _ALPHA_UNISTD_H */
diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild
index baebb3da1d4..d96f2ef5b63 100644
--- a/arch/alpha/include/uapi/asm/Kbuild
+++ b/arch/alpha/include/uapi/asm/Kbuild
@@ -1,3 +1,43 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += a.out.h
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += byteorder.h
+header-y += compiler.h
+header-y += console.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += fpu.h
+header-y += gentrap.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += kvm_para.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += pal.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += reg.h
+header-y += regdef.h
+header-y += resource.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += swab.h
+header-y += sysinfo.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += unistd.h
diff --git a/arch/alpha/include/uapi/asm/a.out.h b/arch/alpha/include/uapi/asm/a.out.h
new file mode 100644
index 00000000000..547707246f6
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/a.out.h
@@ -0,0 +1,91 @@
+#ifndef _UAPI__ALPHA_A_OUT_H__
+#define _UAPI__ALPHA_A_OUT_H__
+
+#include <linux/types.h>
+
+/*
+ * OSF/1 ECOFF header structs. ECOFF files consist of:
+ * - a file header (struct filehdr),
+ * - an a.out header (struct aouthdr),
+ * - one or more section headers (struct scnhdr).
+ * The filhdr's "f_nscns" field contains the
+ * number of section headers.
+ */
+
+struct filehdr
+{
+ /* OSF/1 "file" header */
+ __u16 f_magic, f_nscns;
+ __u32 f_timdat;
+ __u64 f_symptr;
+ __u32 f_nsyms;
+ __u16 f_opthdr, f_flags;
+};
+
+struct aouthdr
+{
+ __u64 info; /* after that it looks quite normal.. */
+ __u64 tsize;
+ __u64 dsize;
+ __u64 bsize;
+ __u64 entry;
+ __u64 text_start; /* with a few additions that actually make sense */
+ __u64 data_start;
+ __u64 bss_start;
+ __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */
+ __u64 gpvalue;
+};
+
+struct scnhdr
+{
+ char s_name[8];
+ __u64 s_paddr;
+ __u64 s_vaddr;
+ __u64 s_size;
+ __u64 s_scnptr;
+ __u64 s_relptr;
+ __u64 s_lnnoptr;
+ __u16 s_nreloc;
+ __u16 s_nlnno;
+ __u32 s_flags;
+};
+
+struct exec
+{
+ /* OSF/1 "file" header */
+ struct filehdr fh;
+ struct aouthdr ah;
+};
+
+/*
+ * Define's so that the kernel exec code can access the a.out header
+ * fields...
+ */
+#define a_info ah.info
+#define a_text ah.tsize
+#define a_data ah.dsize
+#define a_bss ah.bsize
+#define a_entry ah.entry
+#define a_textstart ah.text_start
+#define a_datastart ah.data_start
+#define a_bssstart ah.bss_start
+#define a_gprmask ah.gprmask
+#define a_fprmask ah.fprmask
+#define a_gpvalue ah.gpvalue
+
+#define N_TXTADDR(x) ((x).a_textstart)
+#define N_DATADDR(x) ((x).a_datastart)
+#define N_BSSADDR(x) ((x).a_bssstart)
+#define N_DRSIZE(x) 0
+#define N_TRSIZE(x) 0
+#define N_SYMSIZE(x) 0
+
+#define AOUTHSZ sizeof(struct aouthdr)
+#define SCNHSZ sizeof(struct scnhdr)
+#define SCNROUND 16
+
+#define N_TXTOFF(x) \
+ ((long) N_MAGIC(x) == ZMAGIC ? 0 : \
+ (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1))
+
+#endif /* _UAPI__ALPHA_A_OUT_H__ */
diff --git a/arch/alpha/include/asm/auxvec.h b/arch/alpha/include/uapi/asm/auxvec.h
index a3a579dfdb4..a3a579dfdb4 100644
--- a/arch/alpha/include/asm/auxvec.h
+++ b/arch/alpha/include/uapi/asm/auxvec.h
diff --git a/arch/alpha/include/asm/bitsperlong.h b/arch/alpha/include/uapi/asm/bitsperlong.h
index ad57f786820..ad57f786820 100644
--- a/arch/alpha/include/asm/bitsperlong.h
+++ b/arch/alpha/include/uapi/asm/bitsperlong.h
diff --git a/arch/alpha/include/asm/byteorder.h b/arch/alpha/include/uapi/asm/byteorder.h
index 73683093202..73683093202 100644
--- a/arch/alpha/include/asm/byteorder.h
+++ b/arch/alpha/include/uapi/asm/byteorder.h
diff --git a/arch/alpha/include/uapi/asm/compiler.h b/arch/alpha/include/uapi/asm/compiler.h
new file mode 100644
index 00000000000..32cc7833f0c
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/compiler.h
@@ -0,0 +1,117 @@
+#ifndef _UAPI__ALPHA_COMPILER_H
+#define _UAPI__ALPHA_COMPILER_H
+
+/*
+ * Herein are macros we use when describing various patterns we want to GCC.
+ * In all cases we can get better schedules out of the compiler if we hide
+ * as little as possible inside inline assembly. However, we want to be
+ * able to know what we'll get out before giving up inline assembly. Thus
+ * these tests and macros.
+ */
+
+#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
+# define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift)
+# define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift)
+# define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift)
+# define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift)
+# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
+# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
+# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
+#else
+# define __kernel_insbl(val, shift) \
+ ({ unsigned long __kir; \
+ __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
+ __kir; })
+# define __kernel_inswl(val, shift) \
+ ({ unsigned long __kir; \
+ __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
+ __kir; })
+# define __kernel_insql(val, shift) \
+ ({ unsigned long __kir; \
+ __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
+ __kir; })
+# define __kernel_inslh(val, shift) \
+ ({ unsigned long __kir; \
+ __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
+ __kir; })
+# define __kernel_extbl(val, shift) \
+ ({ unsigned long __kir; \
+ __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
+ __kir; })
+# define __kernel_extwl(val, shift) \
+ ({ unsigned long __kir; \
+ __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
+ __kir; })
+# define __kernel_cmpbge(a, b) \
+ ({ unsigned long __kir; \
+ __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \
+ __kir; })
+#endif
+
+#ifdef __alpha_cix__
+# if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
+# define __kernel_cttz(x) __builtin_ctzl(x)
+# define __kernel_ctlz(x) __builtin_clzl(x)
+# define __kernel_ctpop(x) __builtin_popcountl(x)
+# else
+# define __kernel_cttz(x) \
+ ({ unsigned long __kir; \
+ __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# define __kernel_ctlz(x) \
+ ({ unsigned long __kir; \
+ __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# define __kernel_ctpop(x) \
+ ({ unsigned long __kir; \
+ __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# endif
+#else
+# define __kernel_cttz(x) \
+ ({ unsigned long __kir; \
+ __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# define __kernel_ctlz(x) \
+ ({ unsigned long __kir; \
+ __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+# define __kernel_ctpop(x) \
+ ({ unsigned long __kir; \
+ __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
+ __kir; })
+#endif
+
+
+/*
+ * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX
+ * extension is enabled. Previous versions did not define anything
+ * we could test during compilation -- too bad, so sad.
+ */
+
+#if defined(__alpha_bwx__)
+#define __kernel_ldbu(mem) (mem)
+#define __kernel_ldwu(mem) (mem)
+#define __kernel_stb(val,mem) ((mem) = (val))
+#define __kernel_stw(val,mem) ((mem) = (val))
+#else
+#define __kernel_ldbu(mem) \
+ ({ unsigned char __kir; \
+ __asm__(".arch ev56; \
+ ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
+ __kir; })
+#define __kernel_ldwu(mem) \
+ ({ unsigned short __kir; \
+ __asm__(".arch ev56; \
+ ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
+ __kir; })
+#define __kernel_stb(val,mem) \
+ __asm__(".arch ev56; \
+ stb %1,%0" : "=m"(mem) : "r"(val))
+#define __kernel_stw(val,mem) \
+ __asm__(".arch ev56; \
+ stw %1,%0" : "=m"(mem) : "r"(val))
+#endif
+
+
+#endif /* _UAPI__ALPHA_COMPILER_H */
diff --git a/arch/alpha/include/uapi/asm/console.h b/arch/alpha/include/uapi/asm/console.h
new file mode 100644
index 00000000000..fd08a191f36
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/console.h
@@ -0,0 +1,50 @@
+#ifndef _UAPI__AXP_CONSOLE_H
+#define _UAPI__AXP_CONSOLE_H
+
+/*
+ * Console callback routine numbers
+ */
+#define CCB_GETC 0x01
+#define CCB_PUTS 0x02
+#define CCB_RESET_TERM 0x03
+#define CCB_SET_TERM_INT 0x04
+#define CCB_SET_TERM_CTL 0x05
+#define CCB_PROCESS_KEYCODE 0x06
+#define CCB_OPEN_CONSOLE 0x07
+#define CCB_CLOSE_CONSOLE 0x08
+
+#define CCB_OPEN 0x10
+#define CCB_CLOSE 0x11
+#define CCB_IOCTL 0x12
+#define CCB_READ 0x13
+#define CCB_WRITE 0x14
+
+#define CCB_SET_ENV 0x20
+#define CCB_RESET_ENV 0x21
+#define CCB_GET_ENV 0x22
+#define CCB_SAVE_ENV 0x23
+
+#define CCB_PSWITCH 0x30
+#define CCB_BIOS_EMUL 0x32
+
+/*
+ * Environment variable numbers
+ */
+#define ENV_AUTO_ACTION 0x01
+#define ENV_BOOT_DEV 0x02
+#define ENV_BOOTDEF_DEV 0x03
+#define ENV_BOOTED_DEV 0x04
+#define ENV_BOOT_FILE 0x05
+#define ENV_BOOTED_FILE 0x06
+#define ENV_BOOT_OSFLAGS 0x07
+#define ENV_BOOTED_OSFLAGS 0x08
+#define ENV_BOOT_RESET 0x09
+#define ENV_DUMP_DEV 0x0A
+#define ENV_ENABLE_AUDIT 0x0B
+#define ENV_LICENSE 0x0C
+#define ENV_CHAR_SET 0x0D
+#define ENV_LANGUAGE 0x0E
+#define ENV_TTY_DEV 0x0F
+
+
+#endif /* _UAPI__AXP_CONSOLE_H */
diff --git a/arch/alpha/include/asm/errno.h b/arch/alpha/include/uapi/asm/errno.h
index e5f29ca2818..e5f29ca2818 100644
--- a/arch/alpha/include/asm/errno.h
+++ b/arch/alpha/include/uapi/asm/errno.h
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/uapi/asm/fcntl.h
index 6d9e805f18a..6d9e805f18a 100644
--- a/arch/alpha/include/asm/fcntl.h
+++ b/arch/alpha/include/uapi/asm/fcntl.h
diff --git a/arch/alpha/include/uapi/asm/fpu.h b/arch/alpha/include/uapi/asm/fpu.h
new file mode 100644
index 00000000000..21a053ca223
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/fpu.h
@@ -0,0 +1,123 @@
+#ifndef _UAPI__ASM_ALPHA_FPU_H
+#define _UAPI__ASM_ALPHA_FPU_H
+
+
+/*
+ * Alpha floating-point control register defines:
+ */
+#define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */
+#define FPCR_DNZ (1UL<<48) /* denorms to zero */
+#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */
+#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */
+#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */
+#define FPCR_INV (1UL<<52) /* invalid operation */
+#define FPCR_DZE (1UL<<53) /* division by zero */
+#define FPCR_OVF (1UL<<54) /* overflow */
+#define FPCR_UNF (1UL<<55) /* underflow */
+#define FPCR_INE (1UL<<56) /* inexact */
+#define FPCR_IOV (1UL<<57) /* integer overflow */
+#define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */
+#define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */
+#define FPCR_INED (1UL<<62) /* inexact disable (opt.) */
+#define FPCR_SUM (1UL<<63) /* summary bit */
+
+#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */
+#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */
+#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */
+#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */
+#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */
+#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT)
+
+#define FPCR_MASK 0xffff800000000000L
+
+/*
+ * IEEE trap enables are implemented in software. These per-thread
+ * bits are stored in the "ieee_state" field of "struct thread_info".
+ * Thus, the bits are defined so as not to conflict with the
+ * floating-point enable bit (which is architected). On top of that,
+ * we want to make these bits compatible with OSF/1 so
+ * ieee_set_fp_control() etc. can be implemented easily and
+ * compatibly. The corresponding definitions are in
+ * /usr/include/machine/fpu.h under OSF/1.
+ */
+#define IEEE_TRAP_ENABLE_INV (1UL<<1) /* invalid op */
+#define IEEE_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */
+#define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */
+#define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */
+#define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */
+#define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */
+#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\
+ IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\
+ IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO)
+
+/* Denorm and Underflow flushing */
+#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */
+#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */
+
+#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ)
+
+/* status bits coming from fpcr: */
+#define IEEE_STATUS_INV (1UL<<17)
+#define IEEE_STATUS_DZE (1UL<<18)
+#define IEEE_STATUS_OVF (1UL<<19)
+#define IEEE_STATUS_UNF (1UL<<20)
+#define IEEE_STATUS_INE (1UL<<21)
+#define IEEE_STATUS_DNO (1UL<<22)
+
+#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \
+ IEEE_STATUS_OVF | IEEE_STATUS_UNF | \
+ IEEE_STATUS_INE | IEEE_STATUS_DNO)
+
+#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \
+ IEEE_STATUS_MASK | IEEE_MAP_MASK)
+
+#define IEEE_CURRENT_RM_SHIFT 32
+#define IEEE_CURRENT_RM_MASK (3UL<<IEEE_CURRENT_RM_SHIFT)
+
+#define IEEE_STATUS_TO_EXCSUM_SHIFT 16
+
+#define IEEE_INHERIT (1UL<<63) /* inherit on thread create? */
+
+/*
+ * Convert the software IEEE trap enable and status bits into the
+ * hardware fpcr format.
+ *
+ * Digital Unix engineers receive my thanks for not defining the
+ * software bits identical to the hardware bits. The chip designers
+ * receive my thanks for making all the not-implemented fpcr bits
+ * RAZ forcing us to use system calls to read/write this value.
+ */
+
+static inline unsigned long
+ieee_swcr_to_fpcr(unsigned long sw)
+{
+ unsigned long fp;
+ fp = (sw & IEEE_STATUS_MASK) << 35;
+ fp |= (sw & IEEE_MAP_DMZ) << 36;
+ fp |= (sw & IEEE_STATUS_MASK ? FPCR_SUM : 0);
+ fp |= (~sw & (IEEE_TRAP_ENABLE_INV
+ | IEEE_TRAP_ENABLE_DZE
+ | IEEE_TRAP_ENABLE_OVF)) << 48;
+ fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57;
+ fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
+ fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41;
+ return fp;
+}
+
+static inline unsigned long
+ieee_fpcr_to_swcr(unsigned long fp)
+{
+ unsigned long sw;
+ sw = (fp >> 35) & IEEE_STATUS_MASK;
+ sw |= (fp >> 36) & IEEE_MAP_DMZ;
+ sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV
+ | IEEE_TRAP_ENABLE_DZE
+ | IEEE_TRAP_ENABLE_OVF);
+ sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE);
+ sw |= (fp >> 47) & IEEE_MAP_UMZ;
+ sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO;
+ return sw;
+}
+
+
+#endif /* _UAPI__ASM_ALPHA_FPU_H */
diff --git a/arch/alpha/include/asm/gentrap.h b/arch/alpha/include/uapi/asm/gentrap.h
index ae50cc3192c..ae50cc3192c 100644
--- a/arch/alpha/include/asm/gentrap.h
+++ b/arch/alpha/include/uapi/asm/gentrap.h
diff --git a/arch/alpha/include/asm/ioctl.h b/arch/alpha/include/uapi/asm/ioctl.h
index fc63727f417..fc63727f417 100644
--- a/arch/alpha/include/asm/ioctl.h
+++ b/arch/alpha/include/uapi/asm/ioctl.h
diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
index 92c557be49f..92c557be49f 100644
--- a/arch/alpha/include/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
diff --git a/arch/alpha/include/asm/ipcbuf.h b/arch/alpha/include/uapi/asm/ipcbuf.h
index 84c7e51cb6d..84c7e51cb6d 100644
--- a/arch/alpha/include/asm/ipcbuf.h
+++ b/arch/alpha/include/uapi/asm/ipcbuf.h
diff --git a/arch/alpha/include/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h
index 14fab8f0b95..14fab8f0b95 100644
--- a/arch/alpha/include/asm/kvm_para.h
+++ b/arch/alpha/include/uapi/asm/kvm_para.h
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index 0086b472bc2..0086b472bc2 100644
--- a/arch/alpha/include/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
diff --git a/arch/alpha/include/asm/msgbuf.h b/arch/alpha/include/uapi/asm/msgbuf.h
index 98496501a2b..98496501a2b 100644
--- a/arch/alpha/include/asm/msgbuf.h
+++ b/arch/alpha/include/uapi/asm/msgbuf.h
diff --git a/arch/alpha/include/uapi/asm/pal.h b/arch/alpha/include/uapi/asm/pal.h
new file mode 100644
index 00000000000..3c0ce08e5f5
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/pal.h
@@ -0,0 +1,52 @@
+#ifndef _UAPI__ALPHA_PAL_H
+#define _UAPI__ALPHA_PAL_H
+
+/*
+ * Common PAL-code
+ */
+#define PAL_halt 0
+#define PAL_cflush 1
+#define PAL_draina 2
+#define PAL_bpt 128
+#define PAL_bugchk 129
+#define PAL_chmk 131
+#define PAL_callsys 131
+#define PAL_imb 134
+#define PAL_rduniq 158
+#define PAL_wruniq 159
+#define PAL_gentrap 170
+#define PAL_nphalt 190
+
+/*
+ * VMS specific PAL-code
+ */
+#define PAL_swppal 10
+#define PAL_mfpr_vptb 41
+
+/*
+ * OSF specific PAL-code
+ */
+#define PAL_cserve 9
+#define PAL_wripir 13
+#define PAL_rdmces 16
+#define PAL_wrmces 17
+#define PAL_wrfen 43
+#define PAL_wrvptptr 45
+#define PAL_jtopal 46
+#define PAL_swpctx 48
+#define PAL_wrval 49
+#define PAL_rdval 50
+#define PAL_tbi 51
+#define PAL_wrent 52
+#define PAL_swpipl 53
+#define PAL_rdps 54
+#define PAL_wrkgp 55
+#define PAL_wrusp 56
+#define PAL_wrperfmon 57
+#define PAL_rdusp 58
+#define PAL_whami 60
+#define PAL_retsys 61
+#define PAL_rti 63
+
+
+#endif /* _UAPI__ALPHA_PAL_H */
diff --git a/arch/alpha/include/uapi/asm/param.h b/arch/alpha/include/uapi/asm/param.h
new file mode 100644
index 00000000000..29daed819eb
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/param.h
@@ -0,0 +1,21 @@
+#ifndef _UAPI_ASM_ALPHA_PARAM_H
+#define _UAPI_ASM_ALPHA_PARAM_H
+
+/* ??? Gross. I don't want to parameterize this, and supposedly the
+ hardware ignores reprogramming. We also need userland buy-in to the
+ change in HZ, since this is visible in the wait4 resources etc. */
+
+#ifndef __KERNEL__
+#define HZ 1024
+#endif
+
+#define EXEC_PAGESIZE 8192
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+
+#endif /* _UAPI_ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/asm/poll.h b/arch/alpha/include/uapi/asm/poll.h
index c98509d3149..c98509d3149 100644
--- a/arch/alpha/include/asm/poll.h
+++ b/arch/alpha/include/uapi/asm/poll.h
diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/uapi/asm/posix_types.h
index 5a8a48320ef..5a8a48320ef 100644
--- a/arch/alpha/include/asm/posix_types.h
+++ b/arch/alpha/include/uapi/asm/posix_types.h
diff --git a/arch/alpha/include/uapi/asm/ptrace.h b/arch/alpha/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..5ce83fa9a05
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/ptrace.h
@@ -0,0 +1,70 @@
+#ifndef _UAPI_ASMAXP_PTRACE_H
+#define _UAPI_ASMAXP_PTRACE_H
+
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry
+ *
+ * NOTE! I want to minimize the overhead of system calls, so this
+ * struct has as little information as possible. I does not have
+ *
+ * - floating point regs: the kernel doesn't change those
+ * - r9-15: saved by the C compiler
+ *
+ * This makes "fork()" and "exec()" a bit more complex, but should
+ * give us low system call latency.
+ */
+
+struct pt_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27;
+ unsigned long r28;
+ unsigned long hae;
+/* JRP - These are the values provided to a0-a2 by PALcode */
+ unsigned long trap_a0;
+ unsigned long trap_a1;
+ unsigned long trap_a2;
+/* These are saved by PAL-code: */
+ unsigned long ps;
+ unsigned long pc;
+ unsigned long gp;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+};
+
+/*
+ * This is the extended stack used by signal handlers and the context
+ * switcher: it's pushed after the normal "struct pt_regs".
+ */
+struct switch_stack {
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r26;
+ unsigned long fp[32]; /* fp[31] is fpcr */
+};
+
+
+#endif /* _UAPI_ASMAXP_PTRACE_H */
diff --git a/arch/alpha/include/asm/reg.h b/arch/alpha/include/uapi/asm/reg.h
index 86ff916fb06..86ff916fb06 100644
--- a/arch/alpha/include/asm/reg.h
+++ b/arch/alpha/include/uapi/asm/reg.h
diff --git a/arch/alpha/include/asm/regdef.h b/arch/alpha/include/uapi/asm/regdef.h
index 142df9c4f8b..142df9c4f8b 100644
--- a/arch/alpha/include/asm/regdef.h
+++ b/arch/alpha/include/uapi/asm/regdef.h
diff --git a/arch/alpha/include/asm/resource.h b/arch/alpha/include/uapi/asm/resource.h
index c10874ff597..c10874ff597 100644
--- a/arch/alpha/include/asm/resource.h
+++ b/arch/alpha/include/uapi/asm/resource.h
diff --git a/arch/alpha/include/asm/sembuf.h b/arch/alpha/include/uapi/asm/sembuf.h
index 7b38b153478..7b38b153478 100644
--- a/arch/alpha/include/asm/sembuf.h
+++ b/arch/alpha/include/uapi/asm/sembuf.h
diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/uapi/asm/setup.h
index b50014b3090..b50014b3090 100644
--- a/arch/alpha/include/asm/setup.h
+++ b/arch/alpha/include/uapi/asm/setup.h
diff --git a/arch/alpha/include/asm/shmbuf.h b/arch/alpha/include/uapi/asm/shmbuf.h
index 37ee84f0508..37ee84f0508 100644
--- a/arch/alpha/include/asm/shmbuf.h
+++ b/arch/alpha/include/uapi/asm/shmbuf.h
diff --git a/arch/alpha/include/asm/sigcontext.h b/arch/alpha/include/uapi/asm/sigcontext.h
index 323cdb02619..323cdb02619 100644
--- a/arch/alpha/include/asm/sigcontext.h
+++ b/arch/alpha/include/uapi/asm/sigcontext.h
diff --git a/arch/alpha/include/asm/siginfo.h b/arch/alpha/include/uapi/asm/siginfo.h
index 9822362a842..9822362a842 100644
--- a/arch/alpha/include/asm/siginfo.h
+++ b/arch/alpha/include/uapi/asm/siginfo.h
diff --git a/arch/alpha/include/uapi/asm/signal.h b/arch/alpha/include/uapi/asm/signal.h
new file mode 100644
index 00000000000..dd4ca4bcbb4
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/signal.h
@@ -0,0 +1,129 @@
+#ifndef _UAPI_ASMAXP_SIGNAL_H
+#define _UAPI_ASMAXP_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * Linux/AXP has different signal numbers that Linux/i386: I'm trying
+ * to make it OSF/1 binary compatible, at least for normal binaries.
+ */
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGEMT 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGBUS 10
+#define SIGSEGV 11
+#define SIGSYS 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGURG 16
+#define SIGSTOP 17
+#define SIGTSTP 18
+#define SIGCONT 19
+#define SIGCHLD 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGIO 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGINFO 29
+#define SIGUSR1 30
+#define SIGUSR2 31
+
+#define SIGPOLL SIGIO
+#define SIGPWR SIGINFO
+#define SIGIOT SIGABRT
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+
+#define SA_ONSTACK 0x00000001
+#define SA_RESTART 0x00000002
+#define SA_NOCLDSTOP 0x00000004
+#define SA_NODEFER 0x00000008
+#define SA_RESETHAND 0x00000010
+#define SA_NOCLDWAIT 0x00000020
+#define SA_SIGINFO 0x00000040
+
+#define SA_ONESHOT SA_RESETHAND
+#define SA_NOMASK SA_NODEFER
+
+#define MINSIGSTKSZ 4096
+#define SIGSTKSZ 16384
+
+#define SIG_BLOCK 1 /* for blocking signals */
+#define SIG_UNBLOCK 2 /* for unblocking signals */
+#define SIG_SETMASK 3 /* for setting the signal mask */
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ int sa_flags;
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+/* sigstack(2) is deprecated, and will be withdrawn in a future version
+ of the X/Open CAE Specification. Use sigaltstack instead. It is only
+ implemented here for OSF/1 compatibility. */
+
+struct sigstack {
+ void __user *ss_sp;
+ int ss_onstack;
+};
+
+
+#endif /* _UAPI_ASMAXP_SIGNAL_H */
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
new file mode 100644
index 00000000000..097c1577735
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -0,0 +1,81 @@
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+/*
+ * Note: we only bother about making the SOL_SOCKET options
+ * same as OSF/1, as that's all that "normal" programs are
+ * likely to set. We don't necessarily want to be binary
+ * compatible with _everything_.
+ */
+#define SOL_SOCKET 0xffff
+
+#define SO_DEBUG 0x0001
+#define SO_REUSEADDR 0x0004
+#define SO_KEEPALIVE 0x0008
+#define SO_DONTROUTE 0x0010
+#define SO_BROADCAST 0x0020
+#define SO_LINGER 0x0080
+#define SO_OOBINLINE 0x0100
+/* To add :#define SO_REUSEPORT 0x0200 */
+
+#define SO_TYPE 0x1008
+#define SO_ERROR 0x1007
+#define SO_SNDBUF 0x1001
+#define SO_RCVBUF 0x1002
+#define SO_SNDBUFFORCE 0x100a
+#define SO_RCVBUFFORCE 0x100b
+#define SO_RCVLOWAT 0x1010
+#define SO_SNDLOWAT 0x1011
+#define SO_RCVTIMEO 0x1012
+#define SO_SNDTIMEO 0x1013
+#define SO_ACCEPTCONN 0x1014
+#define SO_PROTOCOL 0x1028
+#define SO_DOMAIN 0x1029
+
+/* linux-specific, might as well be the same as on i386 */
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_BSDCOMPAT 14
+
+#define SO_PASSCRED 17
+#define SO_PEERCRED 18
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+#define SO_GET_FILTER SO_ATTACH_FILTER
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_PEERSEC 30
+#define SO_PASSSEC 34
+#define SO_TIMESTAMPNS 35
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 19
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 20
+#define SO_SECURITY_ENCRYPTION_NETWORK 21
+
+#define SO_MARK 36
+
+#define SO_TIMESTAMPING 37
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#define SO_RXQ_OVFL 40
+
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+#define SO_PEEK_OFF 42
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS 43
+
+
+#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/alpha/include/asm/sockios.h b/arch/alpha/include/uapi/asm/sockios.h
index 7932c7ab4a4..7932c7ab4a4 100644
--- a/arch/alpha/include/asm/sockios.h
+++ b/arch/alpha/include/uapi/asm/sockios.h
diff --git a/arch/alpha/include/asm/stat.h b/arch/alpha/include/uapi/asm/stat.h
index 07ad3e6b3f3..07ad3e6b3f3 100644
--- a/arch/alpha/include/asm/stat.h
+++ b/arch/alpha/include/uapi/asm/stat.h
diff --git a/arch/alpha/include/asm/statfs.h b/arch/alpha/include/uapi/asm/statfs.h
index ccd2e186bfd..ccd2e186bfd 100644
--- a/arch/alpha/include/asm/statfs.h
+++ b/arch/alpha/include/uapi/asm/statfs.h
diff --git a/arch/alpha/include/asm/swab.h b/arch/alpha/include/uapi/asm/swab.h
index 4d682b16c7c..4d682b16c7c 100644
--- a/arch/alpha/include/asm/swab.h
+++ b/arch/alpha/include/uapi/asm/swab.h
diff --git a/arch/alpha/include/asm/sysinfo.h b/arch/alpha/include/uapi/asm/sysinfo.h
index 0b80e79d75e..0b80e79d75e 100644
--- a/arch/alpha/include/asm/sysinfo.h
+++ b/arch/alpha/include/uapi/asm/sysinfo.h
diff --git a/arch/alpha/include/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
index 879dd358992..879dd358992 100644
--- a/arch/alpha/include/asm/termbits.h
+++ b/arch/alpha/include/uapi/asm/termbits.h
diff --git a/arch/alpha/include/uapi/asm/termios.h b/arch/alpha/include/uapi/asm/termios.h
new file mode 100644
index 00000000000..580ed1e4854
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/termios.h
@@ -0,0 +1,70 @@
+#ifndef _UAPI_ALPHA_TERMIOS_H
+#define _UAPI_ALPHA_TERMIOS_H
+
+#include <asm/ioctls.h>
+#include <asm/termbits.h>
+
+struct sgttyb {
+ char sg_ispeed;
+ char sg_ospeed;
+ char sg_erase;
+ char sg_kill;
+ short sg_flags;
+};
+
+struct tchars {
+ char t_intrc;
+ char t_quitc;
+ char t_startc;
+ char t_stopc;
+ char t_eofc;
+ char t_brkc;
+};
+
+struct ltchars {
+ char t_suspc;
+ char t_dsuspc;
+ char t_rprntc;
+ char t_flushc;
+ char t_werasc;
+ char t_lnextc;
+};
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/*
+ * c_cc characters in the termio structure. Oh, how I love being
+ * backwardly compatible. Notice that character 4 and 5 are
+ * interpreted differently depending on whether ICANON is set in
+ * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise
+ * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which
+ * is compatible with sysV)...
+ */
+#define _VINTR 0
+#define _VQUIT 1
+#define _VERASE 2
+#define _VKILL 3
+#define _VEOF 4
+#define _VMIN 4
+#define _VEOL 5
+#define _VTIME 5
+#define _VEOL2 6
+#define _VSWTC 7
+
+
+#endif /* _UAPI_ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
new file mode 100644
index 00000000000..9fd3cd45977
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -0,0 +1,16 @@
+#ifndef _UAPI_ALPHA_TYPES_H
+#define _UAPI_ALPHA_TYPES_H
+
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ */
+
+#ifndef __KERNEL__
+#include <asm-generic/int-l64.h>
+#endif
+
+#endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..801d28bcea5
--- /dev/null
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -0,0 +1,471 @@
+#ifndef _UAPI_ALPHA_UNISTD_H
+#define _UAPI_ALPHA_UNISTD_H
+
+#define __NR_osf_syscall 0 /* not implemented */
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_osf_old_open 5 /* not implemented */
+#define __NR_close 6
+#define __NR_osf_wait4 7
+#define __NR_osf_old_creat 8 /* not implemented */
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_osf_execve 11 /* not implemented */
+#define __NR_chdir 12
+#define __NR_fchdir 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+#define __NR_brk 17
+#define __NR_osf_getfsstat 18 /* not implemented */
+#define __NR_lseek 19
+#define __NR_getxpid 20
+#define __NR_osf_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getxuid 24
+#define __NR_exec_with_loader 25 /* not implemented */
+#define __NR_ptrace 26
+#define __NR_osf_nrecvmsg 27 /* not implemented */
+#define __NR_osf_nsendmsg 28 /* not implemented */
+#define __NR_osf_nrecvfrom 29 /* not implemented */
+#define __NR_osf_naccept 30 /* not implemented */
+#define __NR_osf_ngetpeername 31 /* not implemented */
+#define __NR_osf_ngetsockname 32 /* not implemented */
+#define __NR_access 33
+#define __NR_osf_chflags 34 /* not implemented */
+#define __NR_osf_fchflags 35 /* not implemented */
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_osf_old_stat 38 /* not implemented */
+#define __NR_setpgid 39
+#define __NR_osf_old_lstat 40 /* not implemented */
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_osf_set_program_attributes 43
+#define __NR_osf_profil 44 /* not implemented */
+#define __NR_open 45
+#define __NR_osf_old_sigaction 46 /* not implemented */
+#define __NR_getxgid 47
+#define __NR_osf_sigprocmask 48
+#define __NR_osf_getlogin 49 /* not implemented */
+#define __NR_osf_setlogin 50 /* not implemented */
+#define __NR_acct 51
+#define __NR_sigpending 52
+
+#define __NR_ioctl 54
+#define __NR_osf_reboot 55 /* not implemented */
+#define __NR_osf_revoke 56 /* not implemented */
+#define __NR_symlink 57
+#define __NR_readlink 58
+#define __NR_execve 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_osf_old_fstat 62 /* not implemented */
+#define __NR_getpgrp 63
+#define __NR_getpagesize 64
+#define __NR_osf_mremap 65 /* not implemented */
+#define __NR_vfork 66
+#define __NR_stat 67
+#define __NR_lstat 68
+#define __NR_osf_sbrk 69 /* not implemented */
+#define __NR_osf_sstk 70 /* not implemented */
+#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */
+#define __NR_osf_old_vadvise 72 /* not implemented */
+#define __NR_munmap 73
+#define __NR_mprotect 74
+#define __NR_madvise 75
+#define __NR_vhangup 76
+#define __NR_osf_kmodcall 77 /* not implemented */
+#define __NR_osf_mincore 78 /* not implemented */
+#define __NR_getgroups 79
+#define __NR_setgroups 80
+#define __NR_osf_old_getpgrp 81 /* not implemented */
+#define __NR_setpgrp 82 /* BSD alias for setpgid */
+#define __NR_osf_setitimer 83
+#define __NR_osf_old_wait 84 /* not implemented */
+#define __NR_osf_table 85 /* not implemented */
+#define __NR_osf_getitimer 86
+#define __NR_gethostname 87
+#define __NR_sethostname 88
+#define __NR_getdtablesize 89
+#define __NR_dup2 90
+#define __NR_fstat 91
+#define __NR_fcntl 92
+#define __NR_osf_select 93
+#define __NR_poll 94
+#define __NR_fsync 95
+#define __NR_setpriority 96
+#define __NR_socket 97
+#define __NR_connect 98
+#define __NR_accept 99
+#define __NR_getpriority 100
+#define __NR_send 101
+#define __NR_recv 102
+#define __NR_sigreturn 103
+#define __NR_bind 104
+#define __NR_setsockopt 105
+#define __NR_listen 106
+#define __NR_osf_plock 107 /* not implemented */
+#define __NR_osf_old_sigvec 108 /* not implemented */
+#define __NR_osf_old_sigblock 109 /* not implemented */
+#define __NR_osf_old_sigsetmask 110 /* not implemented */
+#define __NR_sigsuspend 111
+#define __NR_osf_sigstack 112
+#define __NR_recvmsg 113
+#define __NR_sendmsg 114
+#define __NR_osf_old_vtrace 115 /* not implemented */
+#define __NR_osf_gettimeofday 116
+#define __NR_osf_getrusage 117
+#define __NR_getsockopt 118
+
+#define __NR_readv 120
+#define __NR_writev 121
+#define __NR_osf_settimeofday 122
+#define __NR_fchown 123
+#define __NR_fchmod 124
+#define __NR_recvfrom 125
+#define __NR_setreuid 126
+#define __NR_setregid 127
+#define __NR_rename 128
+#define __NR_truncate 129
+#define __NR_ftruncate 130
+#define __NR_flock 131
+#define __NR_setgid 132
+#define __NR_sendto 133
+#define __NR_shutdown 134
+#define __NR_socketpair 135
+#define __NR_mkdir 136
+#define __NR_rmdir 137
+#define __NR_osf_utimes 138
+#define __NR_osf_old_sigreturn 139 /* not implemented */
+#define __NR_osf_adjtime 140 /* not implemented */
+#define __NR_getpeername 141
+#define __NR_osf_gethostid 142 /* not implemented */
+#define __NR_osf_sethostid 143 /* not implemented */
+#define __NR_getrlimit 144
+#define __NR_setrlimit 145
+#define __NR_osf_old_killpg 146 /* not implemented */
+#define __NR_setsid 147
+#define __NR_quotactl 148
+#define __NR_osf_oldquota 149 /* not implemented */
+#define __NR_getsockname 150
+
+#define __NR_osf_pid_block 153 /* not implemented */
+#define __NR_osf_pid_unblock 154 /* not implemented */
+
+#define __NR_sigaction 156
+#define __NR_osf_sigwaitprim 157 /* not implemented */
+#define __NR_osf_nfssvc 158 /* not implemented */
+#define __NR_osf_getdirentries 159
+#define __NR_osf_statfs 160
+#define __NR_osf_fstatfs 161
+
+#define __NR_osf_asynch_daemon 163 /* not implemented */
+#define __NR_osf_getfh 164 /* not implemented */
+#define __NR_osf_getdomainname 165
+#define __NR_setdomainname 166
+
+#define __NR_osf_exportfs 169 /* not implemented */
+
+#define __NR_osf_alt_plock 181 /* not implemented */
+
+#define __NR_osf_getmnt 184 /* not implemented */
+
+#define __NR_osf_alt_sigpending 187 /* not implemented */
+#define __NR_osf_alt_setsid 188 /* not implemented */
+
+#define __NR_osf_swapon 199
+#define __NR_msgctl 200
+#define __NR_msgget 201
+#define __NR_msgrcv 202
+#define __NR_msgsnd 203
+#define __NR_semctl 204
+#define __NR_semget 205
+#define __NR_semop 206
+#define __NR_osf_utsname 207
+#define __NR_lchown 208
+#define __NR_osf_shmat 209
+#define __NR_shmctl 210
+#define __NR_shmdt 211
+#define __NR_shmget 212
+#define __NR_osf_mvalid 213 /* not implemented */
+#define __NR_osf_getaddressconf 214 /* not implemented */
+#define __NR_osf_msleep 215 /* not implemented */
+#define __NR_osf_mwakeup 216 /* not implemented */
+#define __NR_msync 217
+#define __NR_osf_signal 218 /* not implemented */
+#define __NR_osf_utc_gettime 219 /* not implemented */
+#define __NR_osf_utc_adjtime 220 /* not implemented */
+
+#define __NR_osf_security 222 /* not implemented */
+#define __NR_osf_kloadcall 223 /* not implemented */
+
+#define __NR_osf_stat 224
+#define __NR_osf_lstat 225
+#define __NR_osf_fstat 226
+#define __NR_osf_statfs64 227
+#define __NR_osf_fstatfs64 228
+
+#define __NR_getpgid 233
+#define __NR_getsid 234
+#define __NR_sigaltstack 235
+#define __NR_osf_waitid 236 /* not implemented */
+#define __NR_osf_priocntlset 237 /* not implemented */
+#define __NR_osf_sigsendset 238 /* not implemented */
+#define __NR_osf_set_speculative 239 /* not implemented */
+#define __NR_osf_msfs_syscall 240 /* not implemented */
+#define __NR_osf_sysinfo 241
+#define __NR_osf_uadmin 242 /* not implemented */
+#define __NR_osf_fuser 243 /* not implemented */
+#define __NR_osf_proplist_syscall 244
+#define __NR_osf_ntp_adjtime 245 /* not implemented */
+#define __NR_osf_ntp_gettime 246 /* not implemented */
+#define __NR_osf_pathconf 247 /* not implemented */
+#define __NR_osf_fpathconf 248 /* not implemented */
+
+#define __NR_osf_uswitch 250 /* not implemented */
+#define __NR_osf_usleep_thread 251
+#define __NR_osf_audcntl 252 /* not implemented */
+#define __NR_osf_audgen 253 /* not implemented */
+#define __NR_sysfs 254
+#define __NR_osf_subsys_info 255 /* not implemented */
+#define __NR_osf_getsysinfo 256
+#define __NR_osf_setsysinfo 257
+#define __NR_osf_afs_syscall 258 /* not implemented */
+#define __NR_osf_swapctl 259 /* not implemented */
+#define __NR_osf_memcntl 260 /* not implemented */
+#define __NR_osf_fdatasync 261 /* not implemented */
+
+/*
+ * Ignore legacy syscalls that we don't use.
+ */
+#define __IGNORE_alarm
+#define __IGNORE_creat
+#define __IGNORE_getegid
+#define __IGNORE_geteuid
+#define __IGNORE_getgid
+#define __IGNORE_getpid
+#define __IGNORE_getppid
+#define __IGNORE_getuid
+#define __IGNORE_pause
+#define __IGNORE_time
+#define __IGNORE_utime
+#define __IGNORE_umount2
+
+/*
+ * Linux-specific system calls begin at 300
+ */
+#define __NR_bdflush 300
+#define __NR_sethae 301
+#define __NR_mount 302
+#define __NR_old_adjtimex 303
+#define __NR_swapoff 304
+#define __NR_getdents 305
+#define __NR_create_module 306
+#define __NR_init_module 307
+#define __NR_delete_module 308
+#define __NR_get_kernel_syms 309
+#define __NR_syslog 310
+#define __NR_reboot 311
+#define __NR_clone 312
+#define __NR_uselib 313
+#define __NR_mlock 314
+#define __NR_munlock 315
+#define __NR_mlockall 316
+#define __NR_munlockall 317
+#define __NR_sysinfo 318
+#define __NR__sysctl 319
+/* 320 was sys_idle. */
+#define __NR_oldumount 321
+#define __NR_swapon 322
+#define __NR_times 323
+#define __NR_personality 324
+#define __NR_setfsuid 325
+#define __NR_setfsgid 326
+#define __NR_ustat 327
+#define __NR_statfs 328
+#define __NR_fstatfs 329
+#define __NR_sched_setparam 330
+#define __NR_sched_getparam 331
+#define __NR_sched_setscheduler 332
+#define __NR_sched_getscheduler 333
+#define __NR_sched_yield 334
+#define __NR_sched_get_priority_max 335
+#define __NR_sched_get_priority_min 336
+#define __NR_sched_rr_get_interval 337
+#define __NR_afs_syscall 338
+#define __NR_uname 339
+#define __NR_nanosleep 340
+#define __NR_mremap 341
+#define __NR_nfsservctl 342
+#define __NR_setresuid 343
+#define __NR_getresuid 344
+#define __NR_pciconfig_read 345
+#define __NR_pciconfig_write 346
+#define __NR_query_module 347
+#define __NR_prctl 348
+#define __NR_pread64 349
+#define __NR_pwrite64 350
+#define __NR_rt_sigreturn 351
+#define __NR_rt_sigaction 352
+#define __NR_rt_sigprocmask 353
+#define __NR_rt_sigpending 354
+#define __NR_rt_sigtimedwait 355
+#define __NR_rt_sigqueueinfo 356
+#define __NR_rt_sigsuspend 357
+#define __NR_select 358
+#define __NR_gettimeofday 359
+#define __NR_settimeofday 360
+#define __NR_getitimer 361
+#define __NR_setitimer 362
+#define __NR_utimes 363
+#define __NR_getrusage 364
+#define __NR_wait4 365
+#define __NR_adjtimex 366
+#define __NR_getcwd 367
+#define __NR_capget 368
+#define __NR_capset 369
+#define __NR_sendfile 370
+#define __NR_setresgid 371
+#define __NR_getresgid 372
+#define __NR_dipc 373
+#define __NR_pivot_root 374
+#define __NR_mincore 375
+#define __NR_pciconfig_iobase 376
+#define __NR_getdents64 377
+#define __NR_gettid 378
+#define __NR_readahead 379
+/* 380 is unused */
+#define __NR_tkill 381
+#define __NR_setxattr 382
+#define __NR_lsetxattr 383
+#define __NR_fsetxattr 384
+#define __NR_getxattr 385
+#define __NR_lgetxattr 386
+#define __NR_fgetxattr 387
+#define __NR_listxattr 388
+#define __NR_llistxattr 389
+#define __NR_flistxattr 390
+#define __NR_removexattr 391
+#define __NR_lremovexattr 392
+#define __NR_fremovexattr 393
+#define __NR_futex 394
+#define __NR_sched_setaffinity 395
+#define __NR_sched_getaffinity 396
+#define __NR_tuxcall 397
+#define __NR_io_setup 398
+#define __NR_io_destroy 399
+#define __NR_io_getevents 400
+#define __NR_io_submit 401
+#define __NR_io_cancel 402
+#define __NR_exit_group 405
+#define __NR_lookup_dcookie 406
+#define __NR_epoll_create 407
+#define __NR_epoll_ctl 408
+#define __NR_epoll_wait 409
+/* Feb 2007: These three sys_epoll defines shouldn't be here but culling
+ * them would break userspace apps ... we'll kill them off in 2010 :) */
+#define __NR_sys_epoll_create __NR_epoll_create
+#define __NR_sys_epoll_ctl __NR_epoll_ctl
+#define __NR_sys_epoll_wait __NR_epoll_wait
+#define __NR_remap_file_pages 410
+#define __NR_set_tid_address 411
+#define __NR_restart_syscall 412
+#define __NR_fadvise64 413
+#define __NR_timer_create 414
+#define __NR_timer_settime 415
+#define __NR_timer_gettime 416
+#define __NR_timer_getoverrun 417
+#define __NR_timer_delete 418
+#define __NR_clock_settime 419
+#define __NR_clock_gettime 420
+#define __NR_clock_getres 421
+#define __NR_clock_nanosleep 422
+#define __NR_semtimedop 423
+#define __NR_tgkill 424
+#define __NR_stat64 425
+#define __NR_lstat64 426
+#define __NR_fstat64 427
+#define __NR_vserver 428
+#define __NR_mbind 429
+#define __NR_get_mempolicy 430
+#define __NR_set_mempolicy 431
+#define __NR_mq_open 432
+#define __NR_mq_unlink 433
+#define __NR_mq_timedsend 434
+#define __NR_mq_timedreceive 435
+#define __NR_mq_notify 436
+#define __NR_mq_getsetattr 437
+#define __NR_waitid 438
+#define __NR_add_key 439
+#define __NR_request_key 440
+#define __NR_keyctl 441
+#define __NR_ioprio_set 442
+#define __NR_ioprio_get 443
+#define __NR_inotify_init 444
+#define __NR_inotify_add_watch 445
+#define __NR_inotify_rm_watch 446
+#define __NR_fdatasync 447
+#define __NR_kexec_load 448
+#define __NR_migrate_pages 449
+#define __NR_openat 450
+#define __NR_mkdirat 451
+#define __NR_mknodat 452
+#define __NR_fchownat 453
+#define __NR_futimesat 454
+#define __NR_fstatat64 455
+#define __NR_unlinkat 456
+#define __NR_renameat 457
+#define __NR_linkat 458
+#define __NR_symlinkat 459
+#define __NR_readlinkat 460
+#define __NR_fchmodat 461
+#define __NR_faccessat 462
+#define __NR_pselect6 463
+#define __NR_ppoll 464
+#define __NR_unshare 465
+#define __NR_set_robust_list 466
+#define __NR_get_robust_list 467
+#define __NR_splice 468
+#define __NR_sync_file_range 469
+#define __NR_tee 470
+#define __NR_vmsplice 471
+#define __NR_move_pages 472
+#define __NR_getcpu 473
+#define __NR_epoll_pwait 474
+#define __NR_utimensat 475
+#define __NR_signalfd 476
+#define __NR_timerfd 477
+#define __NR_eventfd 478
+#define __NR_recvmmsg 479
+#define __NR_fallocate 480
+#define __NR_timerfd_create 481
+#define __NR_timerfd_settime 482
+#define __NR_timerfd_gettime 483
+#define __NR_signalfd4 484
+#define __NR_eventfd2 485
+#define __NR_epoll_create1 486
+#define __NR_dup3 487
+#define __NR_pipe2 488
+#define __NR_inotify_init1 489
+#define __NR_preadv 490
+#define __NR_pwritev 491
+#define __NR_rt_tgsigqueueinfo 492
+#define __NR_perf_event_open 493
+#define __NR_fanotify_init 494
+#define __NR_fanotify_mark 495
+#define __NR_prlimit64 496
+#define __NR_name_to_handle_at 497
+#define __NR_open_by_handle_at 498
+#define __NR_clock_adjtime 499
+#define __NR_syncfs 500
+#define __NR_setns 501
+#define __NR_accept4 502
+#define __NR_sendmmsg 503
+#define __NR_process_vm_readv 504
+#define __NR_process_vm_writev 505
+
+#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 3f844d26d2c..a21d0ab3b19 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -354,8 +354,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
int dac_allowed;
- if (dir == PCI_DMA_NONE)
- BUG();
+ BUG_ON(dir == PCI_DMA_NONE);
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
@@ -378,8 +377,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
struct pci_iommu_arena *arena;
long dma_ofs, npages;
- if (dir == PCI_DMA_NONE)
- BUG();
+ BUG_ON(dir == PCI_DMA_NONE);
if (dma_addr >= __direct_map_base
&& dma_addr < __direct_map_base + __direct_map_size) {
@@ -662,8 +660,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t max_dma;
int dac_allowed;
- if (dir == PCI_DMA_NONE)
- BUG();
+ BUG_ON(dir == PCI_DMA_NONE);
dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
@@ -742,8 +739,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t max_dma;
dma_addr_t fbeg, fend;
- if (dir == PCI_DMA_NONE)
- BUG();
+ BUG_ON(dir == PCI_DMA_NONE);
if (! alpha_mv.mv_pci_tbi)
return;
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 336393c9c11..02d02c047f1 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -122,12 +122,6 @@ SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
return sigsuspend(&blocked);
}
-asmlinkage int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
-{
- return do_sigaltstack(uss, uoss, rdusp());
-}
-
/*
* Do a signal return; undo the signal stack.
*/
@@ -418,9 +412,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(oldsp), &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, oldsp);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs,
set->sig[0], oldsp);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 08330d9e6a9..f95ba14ae3d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -12,8 +12,6 @@ config ARM
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select GENERIC_PCI_IOMAP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
@@ -533,9 +531,12 @@ config ARCH_IXP4XX
config ARCH_DOVE
bool "Marvell Dove"
select ARCH_REQUIRE_GPIOLIB
+ select COMMON_CLK_DOVE
select CPU_V7
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
+ select PINCTRL
+ select PINCTRL_DOVE
select PLAT_ORION_LEGACY
select USB_ARCH_HAS_EHCI
help
@@ -548,6 +549,8 @@ config ARCH_KIRKWOOD
select GENERIC_CLOCKEVENTS
select PCI
select PCI_QUIRKS
+ select PINCTRL
+ select PINCTRL_KIRKWOOD
select PLAT_ORION_LEGACY
help
Support for the following Marvell Kirkwood series SoCs:
@@ -646,6 +649,7 @@ config ARCH_TEGRA
select HAVE_CLK
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
+ select SPARSE_IRQ
select USE_OF
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
@@ -887,6 +891,7 @@ config ARCH_U8500
select GENERIC_CLOCKEVENTS
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
+ select SPARSE_IRQ
help
Support for ST-Ericsson's Ux500 architecture
@@ -901,6 +906,7 @@ config ARCH_NOMADIK
select MIGHT_HAVE_CACHE_L2X0
select PINCTRL
select PINCTRL_STN8815
+ select SPARSE_IRQ
help
Support for the Nomadik platform by ST-Ericsson
@@ -944,7 +950,7 @@ config ARCH_OMAP
help
Support for TI's OMAP platform (OMAP1/2/3/4).
-config ARCH_VT8500
+config ARCH_VT8500_SINGLE
bool "VIA/WonderMedia 85xx"
select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
@@ -954,21 +960,12 @@ config ARCH_VT8500
select GENERIC_CLOCKEVENTS
select GENERIC_GPIO
select HAVE_CLK
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
select USE_OF
help
Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
-config ARCH_ZYNQ
- bool "Xilinx Zynq ARM Cortex A9 Platform"
- select ARM_AMBA
- select ARM_GIC
- select CPU_V7
- select GENERIC_CLOCKEVENTS
- select ICST
- select MIGHT_HAVE_CACHE_L2X0
- select USE_OF
- help
- Support for Xilinx Zynq ARM Cortex A9 Platform
endchoice
menu "Multiple platform selection"
@@ -1069,7 +1066,6 @@ source "arch/arm/mach-mxs/Kconfig"
source "arch/arm/mach-netx/Kconfig"
source "arch/arm/mach-nomadik/Kconfig"
-source "arch/arm/plat-nomadik/Kconfig"
source "arch/arm/plat-omap/Kconfig"
@@ -1132,8 +1128,12 @@ source "arch/arm/mach-versatile/Kconfig"
source "arch/arm/mach-vexpress/Kconfig"
source "arch/arm/plat-versatile/Kconfig"
+source "arch/arm/mach-vt8500/Kconfig"
+
source "arch/arm/mach-w90x900/Kconfig"
+source "arch/arm/mach-zynq/Kconfig"
+
# Definitions to make life easier
config ARCH_ACORN
bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 04a3f0d1d05..661030d6bc6 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -132,6 +132,23 @@ choice
their output to UART1 serial port on DaVinci TNETV107X
devices.
+ config DEBUG_ZYNQ_UART0
+ bool "Kernel low-level debugging on Xilinx Zynq using UART0"
+ depends on ARCH_ZYNQ
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART0 on the Zynq platform.
+
+ config DEBUG_ZYNQ_UART1
+ bool "Kernel low-level debugging on Xilinx Zynq using UART1"
+ depends on ARCH_ZYNQ
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART1 on the Zynq platform.
+
+ If you have a ZC702 board and want early boot messages to
+ appear on the USB serial adaptor, select this option.
+
config DEBUG_DC21285_PORT
bool "Kernel low-level debugging messages via footbridge serial port"
depends on FOOTBRIDGE
@@ -209,20 +226,12 @@ choice
Say Y here if you want kernel low-level debugging support
on i.MX50 or i.MX53.
- config DEBUG_IMX6Q_UART2
- bool "i.MX6Q Debug UART2"
+ config DEBUG_IMX6Q_UART
+ bool "i.MX6Q Debug UART"
depends on SOC_IMX6Q
help
Say Y here if you want kernel low-level debugging support
- on i.MX6Q UART2. This is correct for e.g. the SabreLite
- board.
-
- config DEBUG_IMX6Q_UART4
- bool "i.MX6Q Debug UART4"
- depends on SOC_IMX6Q
- help
- Say Y here if you want kernel low-level debugging support
- on i.MX6Q UART4.
+ on i.MX6Q.
config DEBUG_MMP_UART2
bool "Kernel low-level debugging message via MMP UART2"
@@ -370,6 +379,13 @@ choice
Say Y here if you want kernel low-level debugging support
on Allwinner A1X based platforms on the UART1.
+ config DEBUG_TEGRA_UART
+ depends on ARCH_TEGRA
+ bool "Use Tegra UART for low-level debug"
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Tegra based platforms.
+
config DEBUG_VEXPRESS_UART0_DETECT
bool "Autodetect UART0 on Versatile Express Cortex-A core tiles"
depends on ARCH_VEXPRESS && CPU_CP15_MMU
@@ -434,6 +450,45 @@ choice
endchoice
+config DEBUG_IMX6Q_UART_PORT
+ int "i.MX6Q Debug UART Port (1-5)" if DEBUG_IMX6Q_UART
+ range 1 5
+ default 1
+ depends on SOC_IMX6Q
+ help
+ Choose UART port on which kernel low-level debug messages
+ should be output.
+
+choice
+ prompt "Low-level debug console UART"
+ depends on DEBUG_LL && DEBUG_TEGRA_UART
+
+ config TEGRA_DEBUG_UART_AUTO_ODMDATA
+ bool "Via ODMDATA"
+ help
+ Automatically determines which UART to use for low-level debug based
+ on the ODMDATA value. This value is part of the BCT, and is written
+ to the boot memory device using nvflash, or other flashing tool.
+ When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
+ 0/1/2/3/4 are UART A/B/C/D/E.
+
+ config TEGRA_DEBUG_UARTA
+ bool "UART A"
+
+ config TEGRA_DEBUG_UARTB
+ bool "UART B"
+
+ config TEGRA_DEBUG_UARTC
+ bool "UART C"
+
+ config TEGRA_DEBUG_UARTD
+ bool "UART D"
+
+ config TEGRA_DEBUG_UARTE
+ bool "UART E"
+
+endchoice
+
config DEBUG_LL_INCLUDE
string
default "debug/icedcc.S" if DEBUG_ICEDCC
@@ -443,8 +498,7 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX31_IMX35_UART || \
DEBUG_IMX51_UART || \
DEBUG_IMX50_IMX53_UART ||\
- DEBUG_IMX6Q_UART2 || \
- DEBUG_IMX6Q_UART4
+ DEBUG_IMX6Q_UART
default "debug/highbank.S" if DEBUG_HIGHBANK_UART
default "debug/mvebu.S" if DEBUG_MVEBU_UART
default "debug/picoxcell.S" if DEBUG_PICOXCELL_UART
@@ -452,6 +506,8 @@ config DEBUG_LL_INCLUDE
default "debug/sunxi.S" if DEBUG_SUNXI_UART0 || DEBUG_SUNXI_UART1
default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \
DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1
+ default "debug/tegra.S" if DEBUG_TEGRA_UART
+ default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
default "mach/debug-macro.S"
config EARLY_PRINTK
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 9c60f474a55..30c443c406f 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -202,7 +202,6 @@ machine-$(CONFIG_ARCH_SUNXI) += sunxi
plat-$(CONFIG_ARCH_OMAP) += omap
plat-$(CONFIG_ARCH_S3C64XX) += samsung
plat-$(CONFIG_PLAT_IOP) += iop
-plat-$(CONFIG_PLAT_NOMADIK) += nomadik
plat-$(CONFIG_PLAT_ORION) += orion
plat-$(CONFIG_PLAT_PXA) += pxa
plat-$(CONFIG_PLAT_S3C24XX) += s3c24xx samsung
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 49ca86e37b8..fe4d9c3ad76 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -44,7 +44,7 @@
#else
-#include <mach/debug-macro.S>
+#include CONFIG_DEBUG_LL_INCLUDE
.macro writeb, ch, rb
senduart \ch, \rb
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f3f2f80cdf3..e44da40d984 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -34,13 +34,17 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb
dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb
+dtb-$(CONFIG_ARCH_DAVINCI) += da850-enbw-cmc.dtb \
+ da850-evm.dtb
dtb-$(CONFIG_ARCH_DOVE) += dove-cm-a510.dtb \
dove-cubox.dtb \
dove-dove-db.dtb
dtb-$(CONFIG_ARCH_EXYNOS) += exynos4210-origen.dtb \
exynos4210-smdkv310.dtb \
exynos4210-trats.dtb \
+ exynos4412-smdk4412.dtb \
exynos5250-smdk5250.dtb \
+ exynos5250-snow.dtb \
exynos5440-ssdk5440.dtb
dtb-$(CONFIG_ARCH_HIGHBANK) += highbank.dtb \
ecx-2000.dtb
@@ -72,23 +76,29 @@ dtb-$(CONFIG_ARCH_KIRKWOOD) += kirkwood-dns320.dtb \
dtb-$(CONFIG_ARCH_MSM) += msm8660-surf.dtb \
msm8960-cdp.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-370-db.dtb \
- armada-xp-db.dtb
+ armada-370-mirabox.dtb \
+ armada-xp-db.dtb \
+ armada-xp-openblocks-ax3-4.dtb
dtb-$(CONFIG_ARCH_MXC) += imx51-babbage.dtb \
imx53-ard.dtb \
imx53-evk.dtb \
imx53-qsb.dtb \
imx53-smd.dtb \
imx6q-arm2.dtb \
+ imx6q-sabreauto.dtb \
imx6q-sabrelite.dtb \
imx6q-sabresd.dtb
dtb-$(CONFIG_ARCH_MXS) += imx23-evk.dtb \
imx23-olinuxino.dtb \
imx23-stmp378x_devb.dtb \
+ imx28-apf28.dtb \
+ imx28-apf28dev.dtb \
imx28-apx4devkit.dtb \
imx28-cfa10036.dtb \
imx28-cfa10049.dtb \
imx28-evk.dtb \
imx28-m28evk.dtb \
+ imx28-sps1.dtb \
imx28-tx28.dtb
dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
omap3-beagle.dtb \
@@ -96,6 +106,7 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
omap3-evm.dtb \
omap3-tobi.dtb \
omap4-panda.dtb \
+ omap4-panda-a4.dtb \
omap4-panda-es.dtb \
omap4-var-som.dtb \
omap4-sdp.dtb \
@@ -105,7 +116,10 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
am335x-bone.dtb
dtb-$(CONFIG_ARCH_ORION5X) += orion5x-lacie-ethernet-disk-mini-v2.dtb
dtb-$(CONFIG_ARCH_PRIMA2) += prima2-evb.dtb
-dtb-$(CONFIG_ARCH_U8500) += snowball.dtb
+dtb-$(CONFIG_ARCH_U8500) += snowball.dtb \
+ hrefprev60.dtb \
+ hrefv60plus.dtb \
+ ccu9540.dtb
dtb-$(CONFIG_ARCH_SHMOBILE) += emev2-kzm9d.dtb \
r8a7740-armadillo800eva.dtb \
sh73a0-kzm9g.dtb \
@@ -114,10 +128,11 @@ dtb-$(CONFIG_ARCH_SPEAR13XX) += spear1310-evb.dtb \
spear1340-evb.dtb
dtb-$(CONFIG_ARCH_SPEAR3XX)+= spear300-evb.dtb \
spear310-evb.dtb \
- spear320-evb.dtb
+ spear320-evb.dtb \
+ spear320-hmi.dtb
dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb
-dtb-$(CONFIG_ARCH_SUNXI) += sun4i-cubieboard.dtb \
- sun5i-olinuxino.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \
+ sun5i-a13-olinuxino.dtb
dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
tegra20-medcom-wide.dtb \
tegra20-paz00.dtb \
@@ -137,6 +152,7 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
wm8505-ref.dtb \
wm8650-mid.dtb
+dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb
targets += dtbs
endif
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index fffd5c2a304..00044026ef1 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -34,9 +34,30 @@
clock-frequency = <200000000>;
status = "okay";
};
- timer@d0020300 {
- clock-frequency = <600000000>;
+ sata@d00a0000 {
+ nr-ports = <2>;
status = "okay";
};
+
+ mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+
+ ethernet@d0070000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet@d0074000 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
new file mode 100644
index 00000000000..3b407133659
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -0,0 +1,56 @@
+/*
+ * Device Tree file for Globalscale Mirabox
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-370.dtsi"
+
+/ {
+ model = "Globalscale Mirabox";
+ compatible = "globalscale,mirabox", "marvell,armada370", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>; /* 512 MB */
+ };
+
+ soc {
+ serial@d0012000 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
+ timer@d0020300 {
+ clock-frequency = <600000000>;
+ status = "okay";
+ };
+ mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+ ethernet@d0070000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet@d0074000 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 16cc82cdaa8..cf6c48a09ea 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -20,7 +20,7 @@
/ {
model = "Marvell Armada 370 and XP SoC";
- compatible = "marvell,armada_370_xp";
+ compatible = "marvell,armada-370-xp";
cpus {
cpu@0 {
@@ -36,6 +36,12 @@
interrupt-controller;
};
+ coherency-fabric@d0020200 {
+ compatible = "marvell,coherency-fabric";
+ reg = <0xd0020200 0xb0>,
+ <0xd0021810 0x1c>;
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -62,12 +68,67 @@
compatible = "marvell,armada-370-xp-timer";
reg = <0xd0020300 0x30>;
interrupts = <37>, <38>, <39>, <40>;
+ clocks = <&coreclk 2>;
};
addr-decoding@d0020000 {
compatible = "marvell,armada-addr-decoding-controller";
reg = <0xd0020000 0x258>;
};
+
+ sata@d00a0000 {
+ compatible = "marvell,orion-sata";
+ reg = <0xd00a0000 0x2400>;
+ interrupts = <55>;
+ clocks = <&gateclk 15>, <&gateclk 30>;
+ clock-names = "0", "1";
+ status = "disabled";
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "marvell,orion-mdio";
+ reg = <0xd0072004 0x4>;
+ };
+
+ ethernet@d0070000 {
+ compatible = "marvell,armada-370-neta";
+ reg = <0xd0070000 0x2500>;
+ interrupts = <8>;
+ clocks = <&gateclk 4>;
+ status = "disabled";
+ };
+
+ ethernet@d0074000 {
+ compatible = "marvell,armada-370-neta";
+ reg = <0xd0074000 0x2500>;
+ interrupts = <10>;
+ clocks = <&gateclk 3>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@d0011000 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0xd0011000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <31>;
+ timeout-ms = <1000>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@d0011100 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0xd0011100 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <32>;
+ timeout-ms = <1000>;
+ clocks = <&coreclk 0>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 2069151afe0..636cf7d4009 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -20,6 +20,12 @@
/ {
model = "Marvell Armada 370 family SoC";
compatible = "marvell,armada370", "marvell,armada-370-xp";
+ L2: l2-cache {
+ compatible = "marvell,aurora-outer-cache";
+ reg = <0xd0008000 0x1000>;
+ cache-id-part = <0x100>;
+ wt-override;
+ };
aliases {
gpio0 = &gpio0;
@@ -75,5 +81,56 @@
#interrupts-cells = <2>;
interrupts = <91>;
};
+
+ coreclk: mvebu-sar@d0018230 {
+ compatible = "marvell,armada-370-core-clock";
+ reg = <0xd0018230 0x08>;
+ #clock-cells = <1>;
+ };
+
+ gateclk: clock-gating-control@d0018220 {
+ compatible = "marvell,armada-370-gating-clock";
+ reg = <0xd0018220 0x4>;
+ clocks = <&coreclk 0>;
+ #clock-cells = <1>;
+ };
+
+ xor@d0060800 {
+ compatible = "marvell,orion-xor";
+ reg = <0xd0060800 0x100
+ 0xd0060A00 0x100>;
+ status = "okay";
+
+ xor00 {
+ interrupts = <51>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor01 {
+ interrupts = <52>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
+
+ xor@d0060900 {
+ compatible = "marvell,orion-xor";
+ reg = <0xd0060900 0x100
+ 0xd0060b00 0x100>;
+ status = "okay";
+
+ xor10 {
+ interrupts = <94>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor11 {
+ interrupts = <95>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index b1fc728515e..8e53b25b550 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -46,5 +46,49 @@
clock-frequency = <250000000>;
status = "okay";
};
+
+ sata@d00a0000 {
+ nr-ports = <2>;
+ status = "okay";
+ };
+
+ mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ phy2: ethernet-phy@2 {
+ reg = <25>;
+ };
+
+ phy3: ethernet-phy@3 {
+ reg = <27>;
+ };
+ };
+
+ ethernet@d0070000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet@d0074000 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+ };
+ ethernet@d0030000 {
+ status = "okay";
+ phy = <&phy2>;
+ phy-mode = "sgmii";
+ };
+ ethernet@d0034000 {
+ status = "okay";
+ phy = <&phy3>;
+ phy-mode = "sgmii";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index ea355192be6..c45c7b4dc35 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -24,6 +24,18 @@
gpio1 = &gpio1;
};
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "marvell,sheeva-v7";
+ reg = <0>;
+ clocks = <&cpuclk 0>;
+ };
+ }
+
soc {
pinctrl {
compatible = "marvell,mv78230-pinctrl";
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 2057863f3df..a2aee570737 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -25,6 +25,25 @@
gpio2 = &gpio2;
};
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "marvell,sheeva-v7";
+ reg = <0>;
+ clocks = <&cpuclk 0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "marvell,sheeva-v7";
+ reg = <1>;
+ clocks = <&cpuclk 1>;
+ };
+ };
+
soc {
pinctrl {
compatible = "marvell,mv78260-pinctrl";
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index ffac9837379..da03a129243 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -25,6 +25,40 @@
gpio2 = &gpio2;
};
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "marvell,sheeva-v7";
+ reg = <0>;
+ clocks = <&cpuclk 0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "marvell,sheeva-v7";
+ reg = <1>;
+ clocks = <&cpuclk 1>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "marvell,sheeva-v7";
+ reg = <2>;
+ clocks = <&cpuclk 2>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "marvell,sheeva-v7";
+ reg = <3>;
+ clocks = <&cpuclk 3>;
+ };
+ };
+
soc {
pinctrl {
compatible = "marvell,mv78460-pinctrl";
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
new file mode 100644
index 00000000000..b42652fd3d8
--- /dev/null
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -0,0 +1,125 @@
+/*
+ * Device Tree file for OpenBlocks AX3-4 board
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-xp-mv78260.dtsi"
+
+/ {
+ model = "PlatHome OpenBlocks AX3-4 board";
+ compatible = "plathome,openblocks-ax3-4", "marvell,armadaxp-mv78260", "marvell,armadaxp", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0xC0000000>; /* 3 GB */
+ };
+
+ soc {
+ serial@d0012000 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@d0012100 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ pinctrl {
+ led_pins: led-pins-0 {
+ marvell,pins = "mpp49", "mpp51", "mpp53";
+ marvell,function = "gpio";
+ };
+ };
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ red_led {
+ label = "red_led";
+ gpios = <&gpio1 17 1>;
+ default-state = "off";
+ };
+
+ yellow_led {
+ label = "yellow_led";
+ gpios = <&gpio1 19 1>;
+ default-state = "off";
+ };
+
+ green_led {
+ label = "green_led";
+ gpios = <&gpio1 21 1>;
+ default-state = "off";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ };
+
+ ethernet@d0070000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "sgmii";
+ };
+ ethernet@d0074000 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "sgmii";
+ };
+ ethernet@d0030000 {
+ status = "okay";
+ phy = <&phy2>;
+ phy-mode = "sgmii";
+ };
+ ethernet@d0034000 {
+ status = "okay";
+ phy = <&phy3>;
+ phy-mode = "sgmii";
+ };
+ i2c@d0011000 {
+ status = "okay";
+ clock-frequency = <400000>;
+ };
+ i2c@d0011100 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ s35390a: s35390a@30 {
+ compatible = "s35390a";
+ reg = <0x30>;
+ };
+ };
+ sata@d00a0000 {
+ nr-ports = <2>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 71d6b5d0daf..367aa3f9491 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -22,9 +22,22 @@
model = "Marvell Armada XP family SoC";
compatible = "marvell,armadaxp", "marvell,armada-370-xp";
+ L2: l2-cache {
+ compatible = "marvell,aurora-system-cache";
+ reg = <0xd0008000 0x1000>;
+ cache-id-part = <0x100>;
+ wt-override;
+ };
+
mpic: interrupt-controller@d0020000 {
reg = <0xd0020a00 0x1d0>,
- <0xd0021870 0x58>;
+ <0xd0021070 0x58>;
+ };
+
+ armada-370-xp-pmsu@d0022000 {
+ compatible = "marvell,armada-370-xp-pmsu";
+ reg = <0xd0022100 0x430>,
+ <0xd0020800 0x20>;
};
soc {
@@ -47,9 +60,85 @@
marvell,timer-25Mhz;
};
+ coreclk: mvebu-sar@d0018230 {
+ compatible = "marvell,armada-xp-core-clock";
+ reg = <0xd0018230 0x08>;
+ #clock-cells = <1>;
+ };
+
+ cpuclk: clock-complex@d0018700 {
+ #clock-cells = <1>;
+ compatible = "marvell,armada-xp-cpu-clock";
+ reg = <0xd0018700 0xA0>;
+ clocks = <&coreclk 1>;
+ };
+
+ gateclk: clock-gating-control@d0018220 {
+ compatible = "marvell,armada-xp-gating-clock";
+ reg = <0xd0018220 0x4>;
+ clocks = <&coreclk 0>;
+ #clock-cells = <1>;
+ };
+
system-controller@d0018200 {
compatible = "marvell,armada-370-xp-system-controller";
reg = <0xd0018200 0x500>;
};
+
+ ethernet@d0030000 {
+ compatible = "marvell,armada-370-neta";
+ reg = <0xd0030000 0x2500>;
+ interrupts = <12>;
+ clocks = <&gateclk 2>;
+ status = "disabled";
+ };
+
+ ethernet@d0034000 {
+ compatible = "marvell,armada-370-neta";
+ reg = <0xd0034000 0x2500>;
+ interrupts = <14>;
+ clocks = <&gateclk 1>;
+ status = "disabled";
+ };
+
+ xor@d0060900 {
+ compatible = "marvell,orion-xor";
+ reg = <0xd0060900 0x100
+ 0xd0060b00 0x100>;
+ clocks = <&gateclk 22>;
+ status = "okay";
+
+ xor10 {
+ interrupts = <51>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor11 {
+ interrupts = <52>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
+
+ xor@d00f0900 {
+ compatible = "marvell,orion-xor";
+ reg = <0xd00F0900 0x100
+ 0xd00F0B00 0x100>;
+ clocks = <&gateclk 28>;
+ status = "okay";
+
+ xor00 {
+ interrupts = <94>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor01 {
+ interrupts = <95>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index b1d3fab60e0..68bccf41a2c 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -29,6 +29,7 @@
tcb0 = &tcb0;
tcb1 = &tcb1;
i2c0 = &i2c0;
+ ssc0 = &ssc0;
};
cpus {
cpu@0 {
@@ -445,6 +446,13 @@
status = "disabled";
};
+ ssc0: ssc@fffbc000 {
+ compatible = "atmel,at91rm9200-ssc";
+ reg = <0xfffbc000 0x4000>;
+ interrupts = <14 4 5>;
+ status = "disabled";
+ };
+
adc0: adc@fffe0000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xfffe0000 0x100>;
@@ -479,6 +487,12 @@
trigger-external;
};
};
+
+ watchdog@fffffd40 {
+ compatible = "atmel,at91sam9260-wdt";
+ reg = <0xfffffd40 0x10>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 66106eecf1e..32ec62cf538 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -25,6 +25,8 @@
gpio4 = &pioE;
tcb0 = &tcb0;
i2c0 = &i2c0;
+ ssc0 = &ssc0;
+ ssc1 = &ssc1;
};
cpus {
cpu@0 {
@@ -362,6 +364,20 @@
status = "disabled";
};
+ ssc0: ssc@fff98000 {
+ compatible = "atmel,at91rm9200-ssc";
+ reg = <0xfff98000 0x4000>;
+ interrupts = <16 4 5>;
+ status = "disabled";
+ };
+
+ ssc1: ssc@fff9c000 {
+ compatible = "atmel,at91rm9200-ssc";
+ reg = <0xfff9c000 0x4000>;
+ interrupts = <17 4 5>;
+ status = "disabled";
+ };
+
macb0: ethernet@fffbc000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
reg = <0xfffbc000 0x100>;
@@ -404,6 +420,12 @@
#size-cells = <0>;
status = "disabled";
};
+
+ watchdog@fffffd40 {
+ compatible = "atmel,at91sam9260-wdt";
+ reg = <0xfffffd40 0x10>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 32a500a0e48..da15e83e7f1 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -30,6 +30,16 @@
ahb {
apb {
+ pinctrl@fffff400 {
+ board {
+ pinctrl_pck0_as_mck: pck0_as_mck {
+ atmel,pins =
+ <2 1 0x2 0x0>; /* PC1 periph B */
+ };
+
+ };
+ };
+
dbgu: serial@fffff200 {
status = "okay";
};
@@ -81,6 +91,11 @@
};
};
};
+
+ ssc0: ssc@fffbc000 {
+ status = "okay";
+ pinctrl-0 = <&pinctrl_ssc0_tx>;
+ };
};
nand0: nand@40000000 {
@@ -144,7 +159,7 @@
reg = <0x50>;
};
- wm8731@1b {
+ wm8731: wm8731@1b {
compatible = "wm8731";
reg = <0x1b>;
};
@@ -169,4 +184,19 @@
gpio-key,wakeup;
};
};
+
+ sound {
+ compatible = "atmel,at91sam9g20ek-wm8731-audio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_mck>;
+
+ atmel,model = "wm8731 @ AT91SAMG20EK";
+
+ atmel,audio-routing =
+ "Ext Spk", "LHPOUT",
+ "Int Mic", "MICIN";
+
+ atmel,ssc-controller = <&ssc0>;
+ atmel,audio-codec = <&wm8731>;
+ };
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 0741caeeced..231858ffd85 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -31,6 +31,8 @@
tcb1 = &tcb1;
i2c0 = &i2c0;
i2c1 = &i2c1;
+ ssc0 = &ssc0;
+ ssc1 = &ssc1;
};
cpus {
cpu@0 {
@@ -419,6 +421,20 @@
status = "disabled";
};
+ ssc0: ssc@fff9c000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xfff9c000 0x4000>;
+ interrupts = <16 4 5>;
+ status = "disabled";
+ };
+
+ ssc1: ssc@fffa0000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xfffa0000 0x4000>;
+ interrupts = <17 4 5>;
+ status = "disabled";
+ };
+
adc0: adc@fffb0000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xfffb0000 0x100>;
@@ -473,6 +489,12 @@
#size-cells = <0>;
status = "disabled";
};
+
+ watchdog@fffffd40 {
+ compatible = "atmel,at91sam9260-wdt";
+ reg = <0xfffffd40 0x10>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 7ee49e8daf9..40ac3a4eb1a 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -30,6 +30,7 @@
i2c0 = &i2c0;
i2c1 = &i2c1;
i2c2 = &i2c2;
+ ssc0 = &ssc0;
};
cpus {
cpu@0 {
@@ -87,6 +88,13 @@
interrupts = <1 4 7>;
};
+ ssc0: ssc@f0010000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xf0010000 0x4000>;
+ interrupts = <28 4 5>;
+ status = "disabled";
+ };
+
tcb0: timer@f8008000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf8008000 0x100>;
diff --git a/arch/arm/boot/dts/ccu9540.dts b/arch/arm/boot/dts/ccu9540.dts
new file mode 100644
index 00000000000..04305463f00
--- /dev/null
+++ b/arch/arm/boot/dts/ccu9540.dts
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "dbx5x0.dtsi"
+
+/ {
+ model = "ST-Ericsson CCU9540 platform with Device Tree";
+ compatible = "st-ericsson,ccu9540", "st-ericsson,u9540";
+
+ memory {
+ reg = <0x00000000 0x20000000>;
+ };
+
+ soc-u9500 {
+ uart@80120000 {
+ status = "okay";
+ };
+
+ uart@80121000 {
+ status = "okay";
+ };
+
+ uart@80007000 {
+ status = "okay";
+ };
+
+ // External Micro SD slot
+ sdi0_per1@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <4>;
+ mmc-cap-sd-highspeed;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+
+ cd-gpios = <&gpio7 6 0x4>; // 230
+ cd-inverted;
+
+ status = "okay";
+ };
+
+
+ // WLAN SDIO channel
+ sdi1_per2@80118000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+
+ status = "okay";
+ };
+
+ // On-board eMMC
+ sdi4_per2@80114000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux2_reg>;
+
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
new file mode 100644
index 00000000000..fddd1741743
--- /dev/null
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -0,0 +1,184 @@
+/*
+ * Common device tree include for all Exynos 5250 boards based off of Daisy.
+ *
+ * Copyright (c) 2012 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/ {
+ aliases {
+ };
+
+ memory {
+ reg = <0x40000000 0x80000000>;
+ };
+
+ chosen {
+ };
+
+ i2c@12C60000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <378000>;
+ gpios = <&gpb3 0 2 3 0>,
+ <&gpb3 1 2 3 0>;
+ };
+
+ i2c@12C70000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <378000>;
+ gpios = <&gpb3 2 2 3 0>,
+ <&gpb3 3 2 3 0>;
+ };
+
+ i2c@12C80000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+
+ /*
+ * Disabled pullups since external part has its own pullups and
+ * double-pulling gets us out of spec in some cases.
+ */
+ gpios = <&gpa0 6 3 0 0>,
+ <&gpa0 7 3 0 0>;
+
+ hdmiddc@50 {
+ compatible = "samsung,exynos5-hdmiddc";
+ reg = <0x50>;
+ };
+ };
+
+ i2c@12C90000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+ gpios = <&gpa1 2 3 3 0>,
+ <&gpa1 3 3 3 0>;
+ };
+
+ i2c@12CA0000 {
+ status = "disabled";
+ };
+
+ i2c@12CB0000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+ gpios = <&gpa2 2 3 3 0>,
+ <&gpa2 3 3 3 0>;
+ };
+
+ i2c@12CC0000 {
+ status = "disabled";
+ };
+
+ i2c@12CD0000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+ gpios = <&gpb2 2 3 3 0>,
+ <&gpb2 3 3 3 0>;
+ };
+
+ i2c@12CE0000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <378000>;
+
+ hdmiphy@38 {
+ compatible = "samsung,exynos5-hdmiphy";
+ reg = <0x38>;
+ };
+ };
+
+ dwmmc0@12200000 {
+ num-slots = <1>;
+ supports-highspeed;
+ broken-cd;
+ fifo-depth = <0x80>;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <2 3 3>;
+ samsung,dw-mshc-ddr-timing = <1 2 3>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <8>;
+ gpios = <&gpc0 0 2 0 3>, <&gpc0 1 2 0 3>,
+ <&gpc1 0 2 3 3>, <&gpc1 1 2 3 3>,
+ <&gpc1 2 2 3 3>, <&gpc1 3 2 3 3>,
+ <&gpc0 3 2 3 3>, <&gpc0 4 2 3 3>,
+ <&gpc0 5 2 3 3>, <&gpc0 6 2 3 3>;
+ };
+ };
+
+ dwmmc1@12210000 {
+ status = "disabled";
+ };
+
+ dwmmc2@12220000 {
+ num-slots = <1>;
+ supports-highspeed;
+ fifo-depth = <0x80>;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <2 3 3>;
+ samsung,dw-mshc-ddr-timing = <1 2 3>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ samsung,cd-pinmux-gpio = <&gpc3 2 2 3 3>;
+ wp-gpios = <&gpc2 1 0 0 3>;
+ gpios = <&gpc3 0 2 0 3>, <&gpc3 1 2 0 3>,
+ <&gpc3 3 2 3 3>, <&gpc3 4 2 3 3>,
+ <&gpc3 5 2 3 3>, <&gpc3 6 2 3 3>;
+ };
+ };
+
+ dwmmc3@12230000 {
+ num-slots = <1>;
+ supports-highspeed;
+ broken-cd;
+ fifo-depth = <0x80>;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <2 3 3>;
+ samsung,dw-mshc-ddr-timing = <1 2 3>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ /* See board-specific dts files for GPIOs */
+ };
+ };
+
+ spi_0: spi@12d20000 {
+ status = "disabled";
+ };
+
+ spi_1: spi@12d30000 {
+ gpios = <&gpa2 4 2 3 0>,
+ <&gpa2 6 2 3 0>,
+ <&gpa2 7 2 3 0>;
+ samsung,spi-src-clk = <0>;
+ num-cs = <1>;
+ };
+
+ spi_2: spi@12d40000 {
+ status = "disabled";
+ };
+
+ hdmi {
+ hpd-gpio = <&gpx3 7 0xf 1 3>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ power {
+ label = "Power";
+ gpios = <&gpx1 3 0 0x10000 0>;
+ linux,code = <116>; /* KEY_POWER */
+ gpio-key,wakeup;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 731086b2fca..2efd9c891bc 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -217,123 +217,103 @@
// DB8500_REGULATOR_VAPE
db8500_vape_reg: db8500_vape {
regulator-compatible = "db8500_vape";
- regulator-name = "db8500-vape";
regulator-always-on;
};
// DB8500_REGULATOR_VARM
db8500_varm_reg: db8500_varm {
regulator-compatible = "db8500_varm";
- regulator-name = "db8500-varm";
};
// DB8500_REGULATOR_VMODEM
db8500_vmodem_reg: db8500_vmodem {
regulator-compatible = "db8500_vmodem";
- regulator-name = "db8500-vmodem";
};
// DB8500_REGULATOR_VPLL
db8500_vpll_reg: db8500_vpll {
regulator-compatible = "db8500_vpll";
- regulator-name = "db8500-vpll";
};
// DB8500_REGULATOR_VSMPS1
db8500_vsmps1_reg: db8500_vsmps1 {
regulator-compatible = "db8500_vsmps1";
- regulator-name = "db8500-vsmps1";
};
// DB8500_REGULATOR_VSMPS2
db8500_vsmps2_reg: db8500_vsmps2 {
regulator-compatible = "db8500_vsmps2";
- regulator-name = "db8500-vsmps2";
};
// DB8500_REGULATOR_VSMPS3
db8500_vsmps3_reg: db8500_vsmps3 {
regulator-compatible = "db8500_vsmps3";
- regulator-name = "db8500-vsmps3";
};
// DB8500_REGULATOR_VRF1
db8500_vrf1_reg: db8500_vrf1 {
regulator-compatible = "db8500_vrf1";
- regulator-name = "db8500-vrf1";
};
// DB8500_REGULATOR_SWITCH_SVAMMDSP
db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
regulator-compatible = "db8500_sva_mmdsp";
- regulator-name = "db8500-sva-mmdsp";
};
// DB8500_REGULATOR_SWITCH_SVAMMDSPRET
db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
regulator-compatible = "db8500_sva_mmdsp_ret";
- regulator-name = "db8500-sva-mmdsp-ret";
};
// DB8500_REGULATOR_SWITCH_SVAPIPE
db8500_sva_pipe_reg: db8500_sva_pipe {
regulator-compatible = "db8500_sva_pipe";
- regulator-name = "db8500_sva_pipe";
};
// DB8500_REGULATOR_SWITCH_SIAMMDSP
db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
regulator-compatible = "db8500_sia_mmdsp";
- regulator-name = "db8500_sia_mmdsp";
};
// DB8500_REGULATOR_SWITCH_SIAMMDSPRET
db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
- regulator-name = "db8500-sia-mmdsp-ret";
};
// DB8500_REGULATOR_SWITCH_SIAPIPE
db8500_sia_pipe_reg: db8500_sia_pipe {
regulator-compatible = "db8500_sia_pipe";
- regulator-name = "db8500-sia-pipe";
};
// DB8500_REGULATOR_SWITCH_SGA
db8500_sga_reg: db8500_sga {
regulator-compatible = "db8500_sga";
- regulator-name = "db8500-sga";
vin-supply = <&db8500_vape_reg>;
};
// DB8500_REGULATOR_SWITCH_B2R2_MCDE
db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
regulator-compatible = "db8500_b2r2_mcde";
- regulator-name = "db8500-b2r2-mcde";
vin-supply = <&db8500_vape_reg>;
};
// DB8500_REGULATOR_SWITCH_ESRAM12
db8500_esram12_reg: db8500_esram12 {
regulator-compatible = "db8500_esram12";
- regulator-name = "db8500-esram12";
};
// DB8500_REGULATOR_SWITCH_ESRAM12RET
db8500_esram12_ret_reg: db8500_esram12_ret {
regulator-compatible = "db8500_esram12_ret";
- regulator-name = "db8500-esram12-ret";
};
// DB8500_REGULATOR_SWITCH_ESRAM34
db8500_esram34_reg: db8500_esram34 {
regulator-compatible = "db8500_esram34";
- regulator-name = "db8500-esram34";
};
// DB8500_REGULATOR_SWITCH_ESRAM34RET
db8500_esram34_ret_reg: db8500_esram34_ret {
regulator-compatible = "db8500_esram34_ret";
- regulator-name = "db8500-esram34-ret";
};
};
@@ -360,7 +340,33 @@
vddadc-supply = <&ab8500_ldo_tvout_reg>;
};
- ab8500-usb {
+ ab8500_battery: ab8500_battery {
+ stericsson,battery-type = "LIPO";
+ thermistor-on-batctrl;
+ };
+
+ ab8500_fg {
+ compatible = "stericsson,ab8500-fg";
+ battery = <&ab8500_battery>;
+ };
+
+ ab8500_btemp {
+ compatible = "stericsson,ab8500-btemp";
+ battery = <&ab8500_battery>;
+ };
+
+ ab8500_charger {
+ compatible = "stericsson,ab8500-charger";
+ battery = <&ab8500_battery>;
+ vddadc-supply = <&ab8500_ldo_tvout_reg>;
+ };
+
+ ab8500_chargalg {
+ compatible = "stericsson,ab8500-chargalg";
+ battery = <&ab8500_battery>;
+ };
+
+ ab8500_usb {
compatible = "stericsson,ab8500-usb";
interrupts = < 90 0x4
96 0x4
@@ -412,7 +418,6 @@
// supplies to the display/camera
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-compatible = "ab8500_ldo_aux1";
- regulator-name = "V-DISPLAY";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2900000>;
regulator-boot-on;
@@ -423,7 +428,6 @@
// supplies to the on-board eMMC
ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
regulator-compatible = "ab8500_ldo_aux2";
- regulator-name = "V-eMMC1";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <3300000>;
};
@@ -431,7 +435,6 @@
// supply for VAUX3; SDcard slots
ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
regulator-compatible = "ab8500_ldo_aux3";
- regulator-name = "V-MMC-SD";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <3300000>;
};
@@ -439,49 +442,41 @@
// supply for v-intcore12; VINTCORE12 LDO
ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
regulator-compatible = "ab8500_ldo_initcore";
- regulator-name = "V-INTCORE";
};
// supply for tvout; gpadc; TVOUT LDO
ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
regulator-compatible = "ab8500_ldo_tvout";
- regulator-name = "V-TVOUT";
};
// supply for ab8500-usb; USB LDO
ab8500_ldo_usb_reg: ab8500_ldo_usb {
regulator-compatible = "ab8500_ldo_usb";
- regulator-name = "dummy";
};
// supply for ab8500-vaudio; VAUDIO LDO
ab8500_ldo_audio_reg: ab8500_ldo_audio {
regulator-compatible = "ab8500_ldo_audio";
- regulator-name = "V-AUD";
};
// supply for v-anamic1 VAMic1-LDO
ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
regulator-compatible = "ab8500_ldo_anamic1";
- regulator-name = "V-AMIC1";
};
// supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1
ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
regulator-compatible = "ab8500_ldo_amamic2";
- regulator-name = "V-AMIC2";
};
// supply for v-dmic; VDMIC LDO
ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
regulator-compatible = "ab8500_ldo_dmic";
- regulator-name = "V-DMIC";
};
// supply for U8500 CSI/DSI; VANA LDO
ab8500_ldo_ana_reg: ab8500_ldo_ana {
regulator-compatible = "ab8500_ldo_ana";
- regulator-name = "V-CSI/DSI";
};
};
};
@@ -585,42 +580,42 @@
status = "disabled";
};
- sdi@80126000 {
+ sdi0_per1@80126000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80126000 0x1000>;
interrupts = <0 60 0x4>;
status = "disabled";
};
- sdi@80118000 {
+ sdi1_per2@80118000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80118000 0x1000>;
interrupts = <0 50 0x4>;
status = "disabled";
};
- sdi@80005000 {
+ sdi2_per3@80005000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80005000 0x1000>;
interrupts = <0 41 0x4>;
status = "disabled";
};
- sdi@80119000 {
+ sdi3_per2@80119000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80119000 0x1000>;
interrupts = <0 59 0x4>;
status = "disabled";
};
- sdi@80114000 {
+ sdi4_per2@80114000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80114000 0x1000>;
interrupts = <0 99 0x4>;
status = "disabled";
};
- sdi@80008000 {
+ sdi5_per3@80008000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80008000 0x1000>;
interrupts = <0 100 0x4>;
@@ -674,5 +669,18 @@
status = "disabled";
};
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2600000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts
index 0adbd5a3809..fed7d3f9f43 100644
--- a/arch/arm/boot/dts/dove-cubox.dts
+++ b/arch/arm/boot/dts/dove-cubox.dts
@@ -40,3 +40,13 @@
reg = <0>;
};
};
+
+&pinctrl {
+ pinctrl-0 = <&pmx_gpio_18>;
+ pinctrl-names = "default";
+
+ pmx_gpio_18: pmx-gpio-18 {
+ marvell,pins = "mpp18";
+ marvell,function = "gpio";
+ };
+};
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 5a00022383e..f3f7e9d8adc 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -4,6 +4,12 @@
compatible = "marvell,dove";
model = "Marvell Armada 88AP510 SoC";
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ };
+
soc@f1000000 {
compatible = "simple-bus";
#address-cells = <1>;
@@ -31,6 +37,19 @@
reg = <0x20204 0x04>, <0x20214 0x04>;
};
+ core_clk: core-clocks@d0214 {
+ compatible = "marvell,dove-core-clock";
+ reg = <0xd0214 0x4>;
+ #clock-cells = <1>;
+ };
+
+ gate_clk: clock-gating-control@d0038 {
+ compatible = "marvell,dove-gating-clock";
+ reg = <0xd0038 0x4>;
+ clocks = <&core_clk 0>;
+ #clock-cells = <1>;
+ };
+
uart0: serial@12000 {
compatible = "ns16550a";
reg = <0x12000 0x100>;
@@ -72,7 +91,8 @@
#gpio-cells = <2>;
gpio-controller;
reg = <0xd0400 0x20>;
- ngpio = <32>;
+ ngpios = <32>;
+ interrupt-controller;
interrupts = <12>, <13>, <14>, <60>;
};
@@ -81,7 +101,8 @@
#gpio-cells = <2>;
gpio-controller;
reg = <0xd0420 0x20>;
- ngpio = <32>;
+ ngpios = <32>;
+ interrupt-controller;
interrupts = <61>;
};
@@ -90,7 +111,12 @@
#gpio-cells = <2>;
gpio-controller;
reg = <0xe8400 0x0c>;
- ngpio = <8>;
+ ngpios = <8>;
+ };
+
+ pinctrl: pinctrl@d0200 {
+ compatible = "marvell,dove-pinctrl";
+ reg = <0xd0200 0x10>;
};
spi0: spi@10600 {
@@ -100,6 +126,7 @@
cell-index = <0>;
interrupts = <6>;
reg = <0x10600 0x28>;
+ clocks = <&core_clk 0>;
status = "disabled";
};
@@ -110,6 +137,7 @@
cell-index = <1>;
interrupts = <5>;
reg = <0x14600 0x28>;
+ clocks = <&core_clk 0>;
status = "disabled";
};
@@ -121,6 +149,7 @@
interrupts = <11>;
clock-frequency = <400000>;
timeout-ms = <1000>;
+ clocks = <&core_clk 0>;
status = "disabled";
};
@@ -128,6 +157,7 @@
compatible = "marvell,dove-sdhci";
reg = <0x92000 0x100>;
interrupts = <35>, <37>;
+ clocks = <&gate_clk 8>;
status = "disabled";
};
@@ -135,6 +165,7 @@
compatible = "marvell,dove-sdhci";
reg = <0x90000 0x100>;
interrupts = <36>, <38>;
+ clocks = <&gate_clk 9>;
status = "disabled";
};
@@ -142,6 +173,7 @@
compatible = "marvell,orion-sata";
reg = <0xa0000 0x2400>;
interrupts = <62>;
+ clocks = <&gate_clk 3>;
nr-ports = <1>;
status = "disabled";
};
@@ -152,7 +184,50 @@
<0xc8000000 0x800>;
reg-names = "regs", "sram";
interrupts = <31>;
+ clocks = <&gate_clk 15>;
+ status = "okay";
+ };
+
+ xor0: dma-engine@60800 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60800 0x100
+ 0x60a00 0x100>;
+ clocks = <&gate_clk 23>;
+ status = "okay";
+
+ channel0 {
+ interrupts = <39>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+
+ channel1 {
+ interrupts = <40>;
+ dmacap,memset;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ };
+
+ xor1: dma-engine@60900 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60900 0x100
+ 0x60b00 0x100>;
+ clocks = <&gate_clk 24>;
status = "okay";
+
+ channel0 {
+ interrupts = <42>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+
+ channel1 {
+ interrupts = <43>;
+ dmacap,memset;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/evk-pro3.dts b/arch/arm/boot/dts/evk-pro3.dts
index b7354e6506d..96e50f56943 100644
--- a/arch/arm/boot/dts/evk-pro3.dts
+++ b/arch/arm/boot/dts/evk-pro3.dts
@@ -22,10 +22,22 @@
status = "okay";
};
+ usart0: serial@fffb0000 {
+ status = "okay";
+ };
+
+ usart2: serial@fffb8000 {
+ status = "okay";
+ };
+
usb1: gadget@fffa4000 {
atmel,vbus-gpio = <&pioC 5 0>;
status = "okay";
};
+
+ watchdog@fffffd40 {
+ status = "okay";
+ };
};
usb0: ohci@00500000 {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 96d4462730f..e1347fceb5b 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -28,6 +28,44 @@
spi0 = &spi_0;
spi1 = &spi_1;
spi2 = &spi_2;
+ i2c0 = &i2c_0;
+ i2c1 = &i2c_1;
+ i2c2 = &i2c_2;
+ i2c3 = &i2c_3;
+ i2c4 = &i2c_4;
+ i2c5 = &i2c_5;
+ i2c6 = &i2c_6;
+ i2c7 = &i2c_7;
+ };
+
+ pd_mfc: mfc-power-domain@10023C40 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023C40 0x20>;
+ };
+
+ pd_g3d: g3d-power-domain@10023C60 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023C60 0x20>;
+ };
+
+ pd_lcd0: lcd0-power-domain@10023C80 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023C80 0x20>;
+ };
+
+ pd_tv: tv-power-domain@10023C20 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023C20 0x20>;
+ };
+
+ pd_cam: cam-power-domain@10023C00 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023C00 0x20>;
+ };
+
+ pd_gps: gps-power-domain@10023CE0 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023CE0 0x20>;
};
gic:interrupt-controller@10490000 {
@@ -121,7 +159,7 @@
status = "disabled";
};
- i2c@13860000 {
+ i2c_0: i2c@13860000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
@@ -130,7 +168,7 @@
status = "disabled";
};
- i2c@13870000 {
+ i2c_1: i2c@13870000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
@@ -139,7 +177,7 @@
status = "disabled";
};
- i2c@13880000 {
+ i2c_2: i2c@13880000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
@@ -148,7 +186,7 @@
status = "disabled";
};
- i2c@13890000 {
+ i2c_3: i2c@13890000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
@@ -157,7 +195,7 @@
status = "disabled";
};
- i2c@138A0000 {
+ i2c_4: i2c@138A0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
@@ -166,7 +204,7 @@
status = "disabled";
};
- i2c@138B0000 {
+ i2c_5: i2c@138B0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
@@ -175,7 +213,7 @@
status = "disabled";
};
- i2c@138C0000 {
+ i2c_6: i2c@138C0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
@@ -184,7 +222,7 @@
status = "disabled";
};
- i2c@138D0000 {
+ i2c_7: i2c@138D0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "samsung,s3c2440-i2c";
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index 3e68f52e845..f2710018e84 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -22,38 +22,54 @@
compatible = "insignal,origen", "samsung,exynos4210";
memory {
- reg = <0x40000000 0x40000000>;
+ reg = <0x40000000 0x10000000
+ 0x50000000 0x10000000
+ 0x60000000 0x10000000
+ 0x70000000 0x10000000>;
};
chosen {
bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC2,115200 init=/linuxrc";
};
+ mmc_reg: voltage-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "VMEM_VDD_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpx1 1 0>;
+ enable-active-high;
+ };
+
sdhci@12530000 {
- samsung,sdhci-bus-width = <4>;
- linux,mmc_cap_4_bit_data;
- samsung,sdhci-cd-internal;
- gpio-cd = <&gpk2 2 2 3 3>;
- gpios = <&gpk2 0 2 0 3>,
- <&gpk2 1 2 0 3>,
- <&gpk2 3 2 3 3>,
- <&gpk2 4 2 3 3>,
- <&gpk2 5 2 3 3>,
- <&gpk2 6 2 3 3>;
+ bus-width = <4>;
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus4 &sd2_cd>;
+ pinctrl-names = "default";
+ vmmc-supply = <&mmc_reg>;
status = "okay";
};
sdhci@12510000 {
- samsung,sdhci-bus-width = <4>;
- linux,mmc_cap_4_bit_data;
- samsung,sdhci-cd-internal;
- gpio-cd = <&gpk0 2 2 3 3>;
- gpios = <&gpk0 0 2 0 3>,
- <&gpk0 1 2 0 3>,
- <&gpk0 3 2 3 3>,
- <&gpk0 4 2 3 3>,
- <&gpk0 5 2 3 3>,
- <&gpk0 6 2 3 3>;
+ bus-width = <4>;
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_cd>;
+ pinctrl-names = "default";
+ vmmc-supply = <&mmc_reg>;
+ status = "okay";
+ };
+
+ serial@13800000 {
+ status = "okay";
+ };
+
+ serial@13810000 {
+ status = "okay";
+ };
+
+ serial@13820000 {
+ status = "okay";
+ };
+
+ serial@13830000 {
status = "okay";
};
@@ -64,35 +80,35 @@
up {
label = "Up";
- gpios = <&gpx2 0 0 0x10000 2>;
+ gpios = <&gpx2 0 1>;
linux,code = <103>;
gpio-key,wakeup;
};
down {
label = "Down";
- gpios = <&gpx2 1 0 0x10000 2>;
+ gpios = <&gpx2 1 1>;
linux,code = <108>;
gpio-key,wakeup;
};
back {
label = "Back";
- gpios = <&gpx1 7 0 0x10000 2>;
+ gpios = <&gpx1 7 1>;
linux,code = <158>;
gpio-key,wakeup;
};
home {
label = "Home";
- gpios = <&gpx1 6 0 0x10000 2>;
+ gpios = <&gpx1 6 1>;
linux,code = <102>;
gpio-key,wakeup;
};
menu {
label = "Menu";
- gpios = <&gpx1 5 0 0x10000 2>;
+ gpios = <&gpx1 5 1>;
linux,code = <139>;
gpio-key,wakeup;
};
@@ -101,7 +117,7 @@
leds {
compatible = "gpio-leds";
status {
- gpios = <&gpx1 3 0 0x10000 2>;
+ gpios = <&gpx1 3 1>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
index 6a4a1a04221..55a2efb763d 100644
--- a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
@@ -482,196 +482,196 @@
samsung,pins = "gpk0-0";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd0_cmd: sd0-cmd {
samsung,pins = "gpk0-1";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd0_cd: sd0-cd {
samsung,pins = "gpk0-2";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd0_bus1: sd0-bus-width1 {
samsung,pins = "gpk0-3";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd0_bus4: sd0-bus-width4 {
samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd0_bus8: sd0-bus-width8 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd4_clk: sd4-clk {
samsung,pins = "gpk0-0";
samsung,pin-function = <3>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd4_cmd: sd4-cmd {
samsung,pins = "gpk0-1";
samsung,pin-function = <3>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd4_cd: sd4-cd {
samsung,pins = "gpk0-2";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd4_bus1: sd4-bus-width1 {
samsung,pins = "gpk0-3";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd4_bus4: sd4-bus-width4 {
samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd4_bus8: sd4-bus-width8 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
samsung,pin-function = <3>;
samsung,pin-pud = <4>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd1_clk: sd1-clk {
samsung,pins = "gpk1-0";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd1_cmd: sd1-cmd {
samsung,pins = "gpk1-1";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd1_cd: sd1-cd {
samsung,pins = "gpk1-2";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd1_bus1: sd1-bus-width1 {
samsung,pins = "gpk1-3";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd1_bus4: sd1-bus-width4 {
samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd2_clk: sd2-clk {
samsung,pins = "gpk2-0";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd2_cmd: sd2-cmd {
samsung,pins = "gpk2-1";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd2_cd: sd2-cd {
samsung,pins = "gpk2-2";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd2_bus1: sd2-bus-width1 {
samsung,pins = "gpk2-3";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd2_bus4: sd2-bus-width4 {
samsung,pins = "gpk2-3", "gpk2-4", "gpk2-5", "gpk2-6";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd2_bus8: sd2-bus-width8 {
samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
samsung,pin-function = <3>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd3_clk: sd3-clk {
samsung,pins = "gpk3-0";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd3_cmd: sd3-cmd {
samsung,pins = "gpk3-1";
samsung,pin-function = <2>;
samsung,pin-pud = <0>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd3_cd: sd3-cd {
samsung,pins = "gpk3-2";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd3_bus1: sd3-bus-width1 {
samsung,pins = "gpk3-3";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
sd3_bus4: sd3-bus-width4 {
samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
samsung,pin-function = <2>;
samsung,pin-pud = <3>;
- samsung,pin-drv = <0>;
+ samsung,pin-drv = <3>;
};
eint0: ext-int0 {
diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
index 63610c3ba3a..9b23a8255e3 100644
--- a/arch/arm/boot/dts/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
@@ -43,6 +43,22 @@
status = "okay";
};
+ serial@13800000 {
+ status = "okay";
+ };
+
+ serial@13810000 {
+ status = "okay";
+ };
+
+ serial@13820000 {
+ status = "okay";
+ };
+
+ serial@13830000 {
+ status = "okay";
+ };
+
keypad@100A0000 {
samsung,keypad-num-rows = <2>;
samsung,keypad-num-columns = <8>;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index a21511c1407..c346b64dff5 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -35,24 +35,15 @@
regulator-name = "VMEM_VDD_2.8V";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
- gpio = <&gpk0 2 1 0 0>;
+ gpio = <&gpk0 2 0>;
enable-active-high;
};
sdhci_emmc: sdhci@12510000 {
bus-width = <8>;
non-removable;
- broken-voltage;
- gpios = <&gpk0 0 2 0 3>,
- <&gpk0 1 2 0 3>,
- <&gpk0 3 2 2 3>,
- <&gpk0 4 2 2 3>,
- <&gpk0 5 2 2 3>,
- <&gpk0 6 2 2 3>,
- <&gpk1 3 3 3 3>,
- <&gpk1 4 3 3 3>,
- <&gpk1 5 3 3 3>,
- <&gpk1 6 3 3 3>;
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus8>;
+ pinctrl-names = "default";
vmmc-supply = <&vemmc_reg>;
status = "okay";
};
@@ -73,12 +64,74 @@
status = "okay";
};
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ vol-down-key {
+ gpios = <&gpx2 1 1>;
+ linux,code = <114>;
+ label = "volume down";
+ debounce-interval = <10>;
+ };
+
+ vol-up-key {
+ gpios = <&gpx2 0 1>;
+ linux,code = <115>;
+ label = "volume up";
+ debounce-interval = <10>;
+ };
+
+ power-key {
+ gpios = <&gpx2 7 1>;
+ linux,code = <116>;
+ label = "power";
+ debounce-interval = <10>;
+ gpio-key,wakeup;
+ };
+
+ ok-key {
+ gpios = <&gpx3 5 1>;
+ linux,code = <352>;
+ label = "ok";
+ debounce-interval = <10>;
+ };
+ };
+
+ tsp_reg: voltage-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "TSP_FIXED_VOLTAGES";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpl0 3 0>;
+ enable-active-high;
+ };
+
+ i2c@13890000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-slave-addr = <0x10>;
+ samsung,i2c-max-bus-freq = <400000>;
+ pinctrl-0 = <&i2c3_bus>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ mms114-touchscreen@48 {
+ compatible = "melfas,mms114";
+ reg = <0x48>;
+ interrupt-parent = <&gpx0>;
+ interrupts = <4 2>;
+ x-size = <720>;
+ y-size = <1280>;
+ avdd-supply = <&tsp_reg>;
+ vdd-supply = <&tsp_reg>;
+ };
+ };
+
i2c@138B0000 {
samsung,i2c-sda-delay = <100>;
samsung,i2c-slave-addr = <0x10>;
samsung,i2c-max-bus-freq = <100000>;
- gpios = <&gpb 6 3 3 0>,
- <&gpb 7 3 3 0>;
+ pinctrl-0 = <&i2c5_bus>;
+ pinctrl-names = "default";
status = "okay";
max8997_pmic@66 {
@@ -93,9 +146,9 @@
max8997,pmic-ignore-gpiodvs-side-effect;
max8997,pmic-buck125-default-dvs-idx = <0>;
- max8997,pmic-buck125-dvs-gpios = <&gpx0 5 1 0 0>,
- <&gpx0 6 1 0 0>,
- <&gpl0 0 1 0 0>;
+ max8997,pmic-buck125-dvs-gpios = <&gpx0 5 0>,
+ <&gpx0 6 0>,
+ <&gpl0 0 0>;
max8997,pmic-buck1-dvs-voltage = <1350000>, <1300000>,
<1250000>, <1200000>,
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index d877dbe7ac0..e31bfc4a6f0 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -31,6 +31,11 @@
pinctrl2 = &pinctrl_2;
};
+ pd_lcd1: lcd1-power-domain@10023CA0 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023CA0 0x20>;
+ };
+
gic:interrupt-controller@10490000 {
cpu-offset = <0x8000>;
};
@@ -64,4 +69,11 @@
compatible = "samsung,pinctrl-exynos4210";
reg = <0x03860000 0x1000>;
};
+
+ tmu@100C0000 {
+ compatible = "samsung,exynos4210-tmu";
+ interrupt-parent = <&combiner>;
+ reg = <0x100C0000 0x100>;
+ interrupts = <2 4>;
+ };
};
diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi
new file mode 100644
index 00000000000..c6ae2005961
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4212.dtsi
@@ -0,0 +1,28 @@
+/*
+ * Samsung's Exynos4212 SoC device tree source
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung's Exynos4212 SoC device nodes are listed in this file. Exynos4212
+ * based board files can include this file and provide values for board specfic
+ * bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * Exynos4212 SoC. As device tree coverage for Exynos4212 increases, additional
+ * nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/include/ "exynos4x12.dtsi"
+
+/ {
+ compatible = "samsung,exynos4212";
+
+ gic:interrupt-controller@10490000 {
+ cpu-offset = <0x8000>;
+ };
+};
diff --git a/arch/arm/boot/dts/exynos4412-smdk4412.dts b/arch/arm/boot/dts/exynos4412-smdk4412.dts
new file mode 100644
index 00000000000..f05bf575cc4
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-smdk4412.dts
@@ -0,0 +1,45 @@
+/*
+ * Samsung's Exynos4412 based SMDK board device tree source
+ *
+ * Copyright (c) 2012-2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Device tree source file for Samsung's SMDK4412 board which is based on
+ * Samsung's Exynos4412 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/dts-v1/;
+/include/ "exynos4412.dtsi"
+
+/ {
+ model = "Samsung SMDK evaluation board based on Exynos4412";
+ compatible = "samsung,smdk4412", "samsung,exynos4412";
+
+ memory {
+ reg = <0x40000000 0x40000000>;
+ };
+
+ chosen {
+ bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc";
+ };
+
+ serial@13800000 {
+ status = "okay";
+ };
+
+ serial@13810000 {
+ status = "okay";
+ };
+
+ serial@13820000 {
+ status = "okay";
+ };
+
+ serial@13830000 {
+ status = "okay";
+ };
+};
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
new file mode 100644
index 00000000000..d7dfe312772
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -0,0 +1,28 @@
+/*
+ * Samsung's Exynos4412 SoC device tree source
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung's Exynos4412 SoC device nodes are listed in this file. Exynos4412
+ * based board files can include this file and provide values for board specfic
+ * bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * Exynos4412 SoC. As device tree coverage for Exynos4412 increases, additional
+ * nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/include/ "exynos4x12.dtsi"
+
+/ {
+ compatible = "samsung,exynos4412";
+
+ gic:interrupt-controller@10490000 {
+ cpu-offset = <0x4000>;
+ };
+};
diff --git a/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
new file mode 100644
index 00000000000..8e6115adcd9
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
@@ -0,0 +1,965 @@
+/*
+ * Samsung's Exynos4x12 SoCs pin-mux and pin-config device tree source
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung's Exynos4x12 SoCs pin-mux and pin-config optiosn are listed as device
+ * tree nodes are listed in this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/ {
+ pinctrl@11400000 {
+ gpa0: gpa0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpa1: gpa1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpb: gpb {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc0: gpc0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc1: gpc1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd0: gpd0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd1: gpd1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf0: gpf0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf1: gpf1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf2: gpf2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpf3: gpf3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpj0: gpj0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpj1: gpj1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ uart0_data: uart0-data {
+ samsung,pins = "gpa0-0", "gpa0-1";
+ samsung,pin-function = <0x2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart0_fctl: uart0-fctl {
+ samsung,pins = "gpa0-2", "gpa0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart1_data: uart1-data {
+ samsung,pins = "gpa0-4", "gpa0-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart1_fctl: uart1-fctl {
+ samsung,pins = "gpa0-6", "gpa0-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c2_bus: i2c2-bus {
+ samsung,pins = "gpa0-6", "gpa0-7";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart2_data: uart2-data {
+ samsung,pins = "gpa1-0", "gpa1-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart2_fctl: uart2-fctl {
+ samsung,pins = "gpa1-2", "gpa1-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart_audio_a: uart-audio-a {
+ samsung,pins = "gpa1-0", "gpa1-1";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c3_bus: i2c3-bus {
+ samsung,pins = "gpa1-2", "gpa1-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart3_data: uart3-data {
+ samsung,pins = "gpa1-4", "gpa1-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart_audio_b: uart-audio-b {
+ samsung,pins = "gpa1-4", "gpa1-5";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ spi0_bus: spi0-bus {
+ samsung,pins = "gpb-0", "gpb-2", "gpb-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c4_bus: i2c4-bus {
+ samsung,pins = "gpb-0", "gpb-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ spi1_bus: spi1-bus {
+ samsung,pins = "gpb-4", "gpb-6", "gpb-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c5_bus: i2c5-bus {
+ samsung,pins = "gpb-2", "gpb-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2s1_bus: i2s1-bus {
+ samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
+ "gpc0-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pcm1_bus: pcm1-bus {
+ samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
+ "gpc0-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ ac97_bus: ac97-bus {
+ samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
+ "gpc0-4";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2s2_bus: i2s2-bus {
+ samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
+ "gpc1-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pcm2_bus: pcm2-bus {
+ samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
+ "gpc1-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ spdif_bus: spdif-bus {
+ samsung,pins = "gpc1-0", "gpc1-1";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c6_bus: i2c6-bus {
+ samsung,pins = "gpc1-3", "gpc1-4";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ spi2_bus: spi2-bus {
+ samsung,pins = "gpc1-1", "gpc1-3", "gpc1-4";
+ samsung,pin-function = <5>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm0_out: pwm0-out {
+ samsung,pins = "gpd0-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm1_out: pwm1-out {
+ samsung,pins = "gpd0-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ lcd_ctrl: lcd-ctrl {
+ samsung,pins = "gpd0-0", "gpd0-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c7_bus: i2c7-bus {
+ samsung,pins = "gpd0-2", "gpd0-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm2_out: pwm2-out {
+ samsung,pins = "gpd0-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm3_out: pwm3-out {
+ samsung,pins = "gpd0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c0_bus: i2c0-bus {
+ samsung,pins = "gpd1-0", "gpd1-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ mipi0_clk: mipi0-clk {
+ samsung,pins = "gpd1-0", "gpd1-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c1_bus: i2c1-bus {
+ samsung,pins = "gpd1-2", "gpd1-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ mipi1_clk: mipi1-clk {
+ samsung,pins = "gpd1-2", "gpd1-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ lcd_clk: lcd-clk {
+ samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ lcd_data16: lcd-data-width16 {
+ samsung,pins = "gpf0-7", "gpf1-0", "gpf1-1", "gpf1-2",
+ "gpf1-3", "gpf1-6", "gpf1-7", "gpf2-0",
+ "gpf2-1", "gpf2-2", "gpf2-3", "gpf2-7",
+ "gpf3-0", "gpf3-1", "gpf3-2", "gpf3-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ lcd_data18: lcd-data-width18 {
+ samsung,pins = "gpf0-6", "gpf0-7", "gpf1-0", "gpf1-1",
+ "gpf1-2", "gpf1-3", "gpf1-6", "gpf1-7",
+ "gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3",
+ "gpf2-6", "gpf2-7", "gpf3-0", "gpf3-1",
+ "gpf3-2", "gpf3-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ lcd_data24: lcd-data-width24 {
+ samsung,pins = "gpf0-4", "gpf0-5", "gpf0-6", "gpf0-7",
+ "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3",
+ "gpf1-4", "gpf1-5", "gpf1-6", "gpf1-7",
+ "gpf2-0", "gpf2-1", "gpf2-2", "gpf2-3",
+ "gpf2-4", "gpf2-5", "gpf2-6", "gpf2-7",
+ "gpf3-0", "gpf3-1", "gpf3-2", "gpf3-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ lcd_ldi: lcd-ldi {
+ samsung,pins = "gpf3-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ cam_port_a: cam-port-a {
+ samsung,pins = "gpj0-0", "gpj0-1", "gpj0-2", "gpj0-3",
+ "gpj0-4", "gpj0-5", "gpj0-6", "gpj0-7",
+ "gpj1-0", "gpj1-1", "gpj1-2", "gpj1-3",
+ "gpj1-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+ };
+
+ pinctrl@11000000 {
+ gpk0: gpk0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpk1: gpk1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpk2: gpk2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpk3: gpk3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpl0: gpl0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpl1: gpl1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpl2: gpl2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpm0: gpm0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpm1: gpm1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpm2: gpm2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpm3: gpm3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpm4: gpm4 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpy0: gpy0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpy1: gpy1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpy2: gpy2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpy3: gpy3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpy4: gpy4 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpy5: gpy5 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpy6: gpy6 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpx0: gpx0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
+ <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>;
+ #interrupt-cells = <2>;
+ };
+
+ gpx1: gpx1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
+ <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
+ #interrupt-cells = <2>;
+ };
+
+ gpx2: gpx2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpx3: gpx3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ sd0_clk: sd0-clk {
+ samsung,pins = "gpk0-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_cmd: sd0-cmd {
+ samsung,pins = "gpk0-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_cd: sd0-cd {
+ samsung,pins = "gpk0-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus1: sd0-bus-width1 {
+ samsung,pins = "gpk0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus4: sd0-bus-width4 {
+ samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus8: sd0-bus-width8 {
+ samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd4_clk: sd4-clk {
+ samsung,pins = "gpk0-0";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd4_cmd: sd4-cmd {
+ samsung,pins = "gpk0-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd4_cd: sd4-cd {
+ samsung,pins = "gpk0-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd4_bus1: sd4-bus-width1 {
+ samsung,pins = "gpk0-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd4_bus4: sd4-bus-width4 {
+ samsung,pins = "gpk0-3", "gpk0-4", "gpk0-5", "gpk0-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd4_bus8: sd4-bus-width8 {
+ samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <4>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd1_clk: sd1-clk {
+ samsung,pins = "gpk1-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd1_cmd: sd1-cmd {
+ samsung,pins = "gpk1-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd1_cd: sd1-cd {
+ samsung,pins = "gpk1-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd1_bus1: sd1-bus-width1 {
+ samsung,pins = "gpk1-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd1_bus4: sd1-bus-width4 {
+ samsung,pins = "gpk1-3", "gpk1-4", "gpk1-5", "gpk1-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_clk: sd2-clk {
+ samsung,pins = "gpk2-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_cmd: sd2-cmd {
+ samsung,pins = "gpk2-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_cd: sd2-cd {
+ samsung,pins = "gpk2-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_bus1: sd2-bus-width1 {
+ samsung,pins = "gpk2-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_bus4: sd2-bus-width4 {
+ samsung,pins = "gpk2-3", "gpk2-4", "gpk2-5", "gpk2-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_bus8: sd2-bus-width8 {
+ samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd3_clk: sd3-clk {
+ samsung,pins = "gpk3-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd3_cmd: sd3-cmd {
+ samsung,pins = "gpk3-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd3_cd: sd3-cd {
+ samsung,pins = "gpk3-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd3_bus1: sd3-bus-width1 {
+ samsung,pins = "gpk3-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd3_bus4: sd3-bus-width4 {
+ samsung,pins = "gpk3-3", "gpk3-4", "gpk3-5", "gpk3-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ keypad_col0: keypad-col0 {
+ samsung,pins = "gpl2-0";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ keypad_col1: keypad-col1 {
+ samsung,pins = "gpl2-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ keypad_col2: keypad-col2 {
+ samsung,pins = "gpl2-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ keypad_col3: keypad-col3 {
+ samsung,pins = "gpl2-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ keypad_col4: keypad-col4 {
+ samsung,pins = "gpl2-4";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ keypad_col5: keypad-col5 {
+ samsung,pins = "gpl2-5";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ keypad_col6: keypad-col6 {
+ samsung,pins = "gpl2-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ keypad_col7: keypad-col7 {
+ samsung,pins = "gpl2-7";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ cam_port_b: cam-port-b {
+ samsung,pins = "gpm0-0", "gpm0-1", "gpm0-2", "gpm0-3",
+ "gpm0-4", "gpm0-5", "gpm0-6", "gpm0-7",
+ "gpm1-0", "gpm1-1", "gpm2-0", "gpm2-1",
+ "gpm2-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ eint0: ext-int0 {
+ samsung,pins = "gpx0-0";
+ samsung,pin-function = <0xf>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ eint8: ext-int8 {
+ samsung,pins = "gpx1-0";
+ samsung,pin-function = <0xf>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ eint15: ext-int15 {
+ samsung,pins = "gpx1-7";
+ samsung,pin-function = <0xf>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ eint16: ext-int16 {
+ samsung,pins = "gpx2-0";
+ samsung,pin-function = <0xf>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ eint31: ext-int31 {
+ samsung,pins = "gpx3-7";
+ samsung,pin-function = <0xf>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+ };
+
+ pinctrl@03860000 {
+ gpz: gpz {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ i2s0_bus: i2s0-bus {
+ samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
+ "gpz-4", "gpz-5", "gpz-6";
+ samsung,pin-function = <0x2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pcm0_bus: pcm0-bus {
+ samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
+ "gpz-4";
+ samsung,pin-function = <0x3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+ };
+
+ pinctrl@106E0000 {
+ gpv0: gpv0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpv1: gpv1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpv2: gpv2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpv3: gpv3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpv4: gpv4 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ c2c_bus: c2c-bus {
+ samsung,pins = "gpv0-0", "gpv0-1", "gpv0-2", "gpv0-3",
+ "gpv0-4", "gpv0-5", "gpv0-6", "gpv0-7",
+ "gpv1-0", "gpv1-1", "gpv1-2", "gpv1-3",
+ "gpv1-4", "gpv1-5", "gpv1-6", "gpv1-7",
+ "gpv2-0", "gpv2-1", "gpv2-2", "gpv2-3",
+ "gpv2-4", "gpv2-5", "gpv2-6", "gpv2-7",
+ "gpv3-0", "gpv3-1", "gpv3-2", "gpv3-3",
+ "gpv3-4", "gpv3-5", "gpv3-6", "gpv3-7",
+ "gpv4-0", "gpv4-1";
+ samsung,pin-function = <0x2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
new file mode 100644
index 00000000000..179a62e46c9
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -0,0 +1,69 @@
+/*
+ * Samsung's Exynos4x12 SoCs device tree source
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung's Exynos4x12 SoCs device nodes are listed in this file. Exynos4x12
+ * based board files can include this file and provide values for board specfic
+ * bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * Exynos4x12 SoC. As device tree coverage for Exynos4x12 increases, additional
+ * nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/include/ "exynos4.dtsi"
+/include/ "exynos4x12-pinctrl.dtsi"
+
+/ {
+ aliases {
+ pinctrl0 = &pinctrl_0;
+ pinctrl1 = &pinctrl_1;
+ pinctrl2 = &pinctrl_2;
+ pinctrl3 = &pinctrl_3;
+ };
+
+ combiner:interrupt-controller@10440000 {
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+ <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
+ <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>;
+ };
+
+ pinctrl_0: pinctrl@11400000 {
+ compatible = "samsung,pinctrl-exynos4x12";
+ reg = <0x11400000 0x1000>;
+ interrupts = <0 47 0>;
+ };
+
+ pinctrl_1: pinctrl@11000000 {
+ compatible = "samsung,pinctrl-exynos4x12";
+ reg = <0x11000000 0x1000>;
+ interrupts = <0 46 0>;
+
+ wakup_eint: wakeup-interrupt-controller {
+ compatible = "samsung,exynos4210-wakeup-eint";
+ interrupt-parent = <&gic>;
+ interrupts = <0 32 0>;
+ };
+ };
+
+ pinctrl_2: pinctrl@03860000 {
+ compatible = "samsung,pinctrl-exynos4x12";
+ reg = <0x03860000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <10 0>;
+ };
+
+ pinctrl_3: pinctrl@106E0000 {
+ compatible = "samsung,pinctrl-exynos4x12";
+ reg = <0x106E0000 0x1000>;
+ interrupts = <0 72 0>;
+ };
+};
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index a352df403b7..942d5761ca9 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -17,10 +17,6 @@
compatible = "samsung,smdk5250", "samsung,exynos5250";
aliases {
- mshc0 = &dwmmc_0;
- mshc1 = &dwmmc_1;
- mshc2 = &dwmmc_2;
- mshc3 = &dwmmc_3;
};
memory {
@@ -55,8 +51,31 @@
};
};
+ i2c@121D0000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <40000>;
+ samsung,i2c-slave-addr = <0x38>;
+
+ sata-phy {
+ compatible = "samsung,sata-phy";
+ reg = <0x38>;
+ };
+ };
+
+ sata@122F0000 {
+ samsung,sata-freq = <66>;
+ };
+
i2c@12C80000 {
- status = "disabled";
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+ gpios = <&gpa0 6 3 3 0>,
+ <&gpa0 7 3 3 0>;
+
+ hdmiddc@50 {
+ compatible = "samsung,exynos5-hdmiddc";
+ reg = <0x50>;
+ };
};
i2c@12C90000 {
@@ -79,7 +98,17 @@
status = "disabled";
};
- dwmmc_0: dwmmc0@12200000 {
+ i2c@12CE0000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <66000>;
+
+ hdmiphy@38 {
+ compatible = "samsung,exynos5-hdmiphy";
+ reg = <0x38>;
+ };
+ };
+
+ dwmmc0@12200000 {
num-slots = <1>;
supports-highspeed;
broken-cd;
@@ -100,11 +129,11 @@
};
};
- dwmmc_1: dwmmc1@12210000 {
+ dwmmc1@12210000 {
status = "disabled";
};
- dwmmc_2: dwmmc2@12220000 {
+ dwmmc2@12220000 {
num-slots = <1>;
supports-highspeed;
fifo-depth = <0x80>;
@@ -125,7 +154,7 @@
};
};
- dwmmc_3: dwmmc3@12230000 {
+ dwmmc3@12230000 {
status = "disabled";
};
@@ -166,4 +195,13 @@
spi_2: spi@12d40000 {
status = "disabled";
};
+
+ hdmi {
+ hpd-gpio = <&gpx3 7 0xf 1 3>;
+ };
+
+ codec@11000000 {
+ samsung,mfc-r = <0x43000000 0x800000>;
+ samsung,mfc-l = <0x51000000 0x800000>;
+ };
};
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
new file mode 100644
index 00000000000..17dd951c1cd
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -0,0 +1,43 @@
+/*
+ * Google Snow board device tree source
+ *
+ * Copyright (c) 2012 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/dts-v1/;
+/include/ "exynos5250.dtsi"
+/include/ "cros5250-common.dtsi"
+
+/ {
+ model = "Google Snow";
+ compatible = "google,snow", "samsung,exynos5250";
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ lid-switch {
+ label = "Lid";
+ gpios = <&gpx3 5 0 0x10000 0>;
+ linux,input-type = <5>; /* EV_SW */
+ linux,code = <0>; /* SW_LID */
+ debounce-interval = <1>;
+ gpio-key,wakeup;
+ };
+ };
+
+ /*
+ * On Snow we've got SIP WiFi and so can keep drive strengths low to
+ * reduce EMI.
+ */
+ dwmmc3@12230000 {
+ slot@0 {
+ gpios = <&gpc4 0 2 0 0>, <&gpc4 1 2 3 0>,
+ <&gpc4 3 2 3 0>, <&gpc4 4 2 3 0>,
+ <&gpc4 5 2 3 0>, <&gpc4 6 2 3 0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index dddfd6e444d..2e3b6efaf1a 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -31,6 +31,19 @@
gsc1 = &gsc_1;
gsc2 = &gsc_2;
gsc3 = &gsc_3;
+ mshc0 = &dwmmc_0;
+ mshc1 = &dwmmc_1;
+ mshc2 = &dwmmc_2;
+ mshc3 = &dwmmc_3;
+ i2c0 = &i2c_0;
+ i2c1 = &i2c_1;
+ i2c2 = &i2c_2;
+ i2c3 = &i2c_3;
+ i2c4 = &i2c_4;
+ i2c5 = &i2c_5;
+ i2c6 = &i2c_6;
+ i2c7 = &i2c_7;
+ i2c8 = &i2c_8;
};
gic:interrupt-controller@10481000 {
@@ -62,12 +75,24 @@
interrupts = <0 42 0>;
};
+ codec@11000000 {
+ compatible = "samsung,mfc-v6";
+ reg = <0x11000000 0x10000>;
+ interrupts = <0 96 0>;
+ };
+
rtc {
compatible = "samsung,s3c6410-rtc";
reg = <0x101E0000 0x100>;
interrupts = <0 43 0>, <0 44 0>;
};
+ tmu@10060000 {
+ compatible = "samsung,exynos5250-tmu";
+ reg = <0x10060000 0x100>;
+ interrupts = <0 65 0>;
+ };
+
serial@12C00000 {
compatible = "samsung,exynos4210-uart";
reg = <0x12C00000 0x100>;
@@ -92,7 +117,18 @@
interrupts = <0 54 0>;
};
- i2c@12C60000 {
+ sata@122F0000 {
+ compatible = "samsung,exynos5-sata-ahci";
+ reg = <0x122F0000 0x1ff>;
+ interrupts = <0 115 0>;
+ };
+
+ sata-phy@12170000 {
+ compatible = "samsung,exynos5-sata-phy";
+ reg = <0x12170000 0x1ff>;
+ };
+
+ i2c_0: i2c@12C60000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C60000 0x100>;
interrupts = <0 56 0>;
@@ -100,7 +136,7 @@
#size-cells = <0>;
};
- i2c@12C70000 {
+ i2c_1: i2c@12C70000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C70000 0x100>;
interrupts = <0 57 0>;
@@ -108,7 +144,7 @@
#size-cells = <0>;
};
- i2c@12C80000 {
+ i2c_2: i2c@12C80000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C80000 0x100>;
interrupts = <0 58 0>;
@@ -116,7 +152,7 @@
#size-cells = <0>;
};
- i2c@12C90000 {
+ i2c_3: i2c@12C90000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C90000 0x100>;
interrupts = <0 59 0>;
@@ -124,7 +160,7 @@
#size-cells = <0>;
};
- i2c@12CA0000 {
+ i2c_4: i2c@12CA0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CA0000 0x100>;
interrupts = <0 60 0>;
@@ -132,7 +168,7 @@
#size-cells = <0>;
};
- i2c@12CB0000 {
+ i2c_5: i2c@12CB0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CB0000 0x100>;
interrupts = <0 61 0>;
@@ -140,7 +176,7 @@
#size-cells = <0>;
};
- i2c@12CC0000 {
+ i2c_6: i2c@12CC0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CC0000 0x100>;
interrupts = <0 62 0>;
@@ -148,7 +184,7 @@
#size-cells = <0>;
};
- i2c@12CD0000 {
+ i2c_7: i2c@12CD0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CD0000 0x100>;
interrupts = <0 63 0>;
@@ -156,6 +192,21 @@
#size-cells = <0>;
};
+ i2c_8: i2c@12CE0000 {
+ compatible = "samsung,s3c2440-hdmiphy-i2c";
+ reg = <0x12CE0000 0x1000>;
+ interrupts = <0 64 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@121D0000 {
+ compatible = "samsung,exynos5-sata-phy-i2c";
+ reg = <0x121D0000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
spi_0: spi@12d20000 {
compatible = "samsung,exynos4210-spi";
reg = <0x12d20000 0x100>;
@@ -186,7 +237,7 @@
#size-cells = <0>;
};
- dwmmc0@12200000 {
+ dwmmc_0: dwmmc0@12200000 {
compatible = "samsung,exynos5250-dw-mshc";
reg = <0x12200000 0x1000>;
interrupts = <0 75 0>;
@@ -194,7 +245,7 @@
#size-cells = <0>;
};
- dwmmc1@12210000 {
+ dwmmc_1: dwmmc1@12210000 {
compatible = "samsung,exynos5250-dw-mshc";
reg = <0x12210000 0x1000>;
interrupts = <0 76 0>;
@@ -202,7 +253,7 @@
#size-cells = <0>;
};
- dwmmc2@12220000 {
+ dwmmc_2: dwmmc2@12220000 {
compatible = "samsung,exynos5250-dw-mshc";
reg = <0x12220000 0x1000>;
interrupts = <0 77 0>;
@@ -210,7 +261,7 @@
#size-cells = <0>;
};
- dwmmc3@12230000 {
+ dwmmc_3: dwmmc3@12230000 {
compatible = "samsung,exynos5250-dw-mshc";
reg = <0x12230000 0x1000>;
interrupts = <0 78 0>;
@@ -520,4 +571,16 @@
reg = <0x13e30000 0x1000>;
interrupts = <0 88 0>;
};
+
+ hdmi {
+ compatible = "samsung,exynos5-hdmi";
+ reg = <0x14530000 0x100000>;
+ interrupts = <0 95 0>;
+ };
+
+ mixer {
+ compatible = "samsung,exynos5-mixer";
+ reg = <0x14450000 0x10000>;
+ interrupts = <0 94 0>;
+ };
};
diff --git a/arch/arm/boot/dts/href.dtsi b/arch/arm/boot/dts/href.dtsi
new file mode 100644
index 00000000000..592fb9dc35b
--- /dev/null
+++ b/arch/arm/boot/dts/href.dtsi
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "dbx5x0.dtsi"
+
+/ {
+ memory {
+ reg = <0x00000000 0x20000000>;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@1 {
+ linux,code = <11>;
+ label = "SFH7741 Proximity Sensor";
+ };
+ };
+
+ soc-u9500 {
+ uart@80120000 {
+ status = "okay";
+ };
+
+ uart@80121000 {
+ status = "okay";
+ };
+
+ uart@80007000 {
+ status = "okay";
+ };
+
+ i2c@80004000 {
+ tc3589x@42 {
+ compatible = "tc3589x";
+ reg = <0x42>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <25 0x1>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ tc3589x_gpio: tc3589x_gpio {
+ compatible = "tc3589x-gpio";
+ interrupts = <0 0x1>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+ };
+
+ i2c@80128000 {
+ lp5521@0x33 {
+ compatible = "lp5521";
+ reg = <0x33>;
+ };
+
+ lp5521@0x34 {
+ compatible = "lp5521";
+ reg = <0x34>;
+ };
+
+ bh1780@0x29 {
+ compatible = "rohm,bh1780gli";
+ reg = <0x33>;
+ };
+ };
+
+ // External Micro SD slot
+ sdi0_per1@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ mmc-cap-sd-highspeed;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+
+ cd-gpios = <&tc3589x_gpio 3 0x4>;
+
+ status = "okay";
+ };
+
+ // WLAN SDIO channel
+ sdi1_per2@80118000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+
+ status = "okay";
+ };
+
+ // PoP:ed eMMC
+ sdi2_per3@80005000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+
+ status = "okay";
+ };
+
+ // On-board eMMC
+ sdi4_per2@80114000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux2_reg>;
+
+ status = "okay";
+ };
+
+ sound {
+ compatible = "stericsson,snd-soc-mop500";
+
+ stericsson,cpu-dai = <&msp1 &msp3>;
+ stericsson,audio-codec = <&codec>;
+ };
+
+ msp1: msp@80124000 {
+ status = "okay";
+ };
+
+ msp3: msp@80125000 {
+ status = "okay";
+ };
+
+ prcmu@80157000 {
+ db8500-prcmu-regulators {
+ db8500_vape_reg: db8500_vape {
+ regulator-name = "db8500-vape";
+ };
+
+ db8500_varm_reg: db8500_varm {
+ regulator-name = "db8500-varm";
+ };
+
+ db8500_vmodem_reg: db8500_vmodem {
+ regulator-name = "db8500-vmodem";
+ };
+
+ db8500_vpll_reg: db8500_vpll {
+ regulator-name = "db8500-vpll";
+ };
+
+ db8500_vsmps1_reg: db8500_vsmps1 {
+ regulator-name = "db8500-vsmps1";
+ };
+
+ db8500_vsmps2_reg: db8500_vsmps2 {
+ regulator-name = "db8500-vsmps2";
+ };
+
+ db8500_vsmps3_reg: db8500_vsmps3 {
+ regulator-name = "db8500-vsmps3";
+ };
+
+ db8500_vrf1_reg: db8500_vrf1 {
+ regulator-name = "db8500-vrf1";
+ };
+
+ db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
+ regulator-name = "db8500-sva-mmdsp";
+ };
+
+ db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
+ regulator-name = "db8500-sva-mmdsp-ret";
+ };
+
+ db8500_sva_pipe_reg: db8500_sva_pipe {
+ regulator-name = "db8500_sva_pipe";
+ };
+
+ db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
+ regulator-name = "db8500_sia_mmdsp";
+ };
+
+ db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
+ regulator-name = "db8500-sia-mmdsp-ret";
+ };
+
+ db8500_sia_pipe_reg: db8500_sia_pipe {
+ regulator-name = "db8500-sia-pipe";
+ };
+
+ db8500_sga_reg: db8500_sga {
+ regulator-name = "db8500-sga";
+ };
+
+ db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
+ regulator-name = "db8500-b2r2-mcde";
+ };
+
+ db8500_esram12_reg: db8500_esram12 {
+ regulator-name = "db8500-esram12";
+ };
+
+ db8500_esram12_ret_reg: db8500_esram12_ret {
+ regulator-name = "db8500-esram12-ret";
+ };
+
+ db8500_esram34_reg: db8500_esram34 {
+ regulator-name = "db8500-esram34";
+ };
+
+ db8500_esram34_ret_reg: db8500_esram34_ret {
+ regulator-name = "db8500-esram34-ret";
+ };
+ };
+
+ ab8500@5 {
+ ab8500-regulators {
+ ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+ regulator-name = "V-DISPLAY";
+ };
+
+ ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+ regulator-name = "V-eMMC1";
+ };
+
+ ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+ regulator-name = "V-MMC-SD";
+ };
+
+ ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
+ regulator-name = "V-INTCORE";
+ };
+
+ ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
+ regulator-name = "V-TVOUT";
+ };
+
+ ab8500_ldo_usb_reg: ab8500_ldo_usb {
+ regulator-name = "dummy";
+ };
+
+ ab8500_ldo_audio_reg: ab8500_ldo_audio {
+ regulator-name = "V-AUD";
+ };
+
+ ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+ regulator-name = "V-AMIC1";
+ };
+
+ ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
+ regulator-name = "V-AMIC2";
+ };
+
+ ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
+ regulator-name = "V-DMIC";
+ };
+
+ ab8500_ldo_ana_reg: ab8500_ldo_ana {
+ regulator-name = "V-CSI/DSI";
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/hrefprev60.dts b/arch/arm/boot/dts/hrefprev60.dts
new file mode 100644
index 00000000000..eec29c4a86d
--- /dev/null
+++ b/arch/arm/boot/dts/hrefprev60.dts
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "dbx5x0.dtsi"
+/include/ "href.dtsi"
+/include/ "stuib.dtsi"
+
+/ {
+ model = "ST-Ericsson HREF (pre-v60) platform with Device Tree";
+ compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+
+ gpio_keys {
+ button@1 {
+ gpios = <&tc3589x_gpio 7 0x4>;
+ };
+ };
+
+ soc-u9500 {
+ i2c@80004000 {
+ tps61052@33 {
+ compatible = "tps61052";
+ reg = <0x33>;
+ };
+ };
+
+ i2c@80110000 {
+ bu21013_tp@0x5c {
+ reset-gpio = <&tc3589x_gpio 13 0x4>;
+ };
+ };
+
+ vmmci: regulator-gpio {
+ gpios = <&tc3589x_gpio 18 0x4>;
+ gpio-enable = <&tc3589x_gpio 17 0x4>;
+
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/hrefv60plus.dts b/arch/arm/boot/dts/hrefv60plus.dts
index 2131d77dc9c..55f4191a626 100644
--- a/arch/arm/boot/dts/hrefv60plus.dts
+++ b/arch/arm/boot/dts/hrefv60plus.dts
@@ -11,85 +11,200 @@
/dts-v1/;
/include/ "dbx5x0.dtsi"
+/include/ "href.dtsi"
+/include/ "stuib.dtsi"
/ {
- model = "ST-Ericsson HREF platform with Device Tree";
- compatible = "st-ericsson,hrefv60+";
+ model = "ST-Ericsson HREF (v60+) platform with Device Tree";
+ compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
- memory {
- reg = <0x00000000 0x20000000>;
+ gpio_keys {
+ button@1 {
+ gpios = <&gpio6 25 0x4>;
+ };
};
soc-u9500 {
- uart@80120000 {
+ i2c@80110000 {
+ bu21013_tp@0x5c {
+ reset-gpio = <&gpio4 15 0x4>;
+ };
+ };
+
+ // External Micro SD slot
+ sdi0_per1@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ mmc-cap-sd-highspeed;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+
+ cd-gpios = <&tc3589x_gpio 3 0x4>;
+
status = "okay";
};
- uart@80121000 {
+ // WLAN SDIO channel
+ sdi1_per2@80118000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+
status = "okay";
};
- uart@80007000 {
+ // PoP:ed eMMC
+ sdi2_per3@80005000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+
status = "okay";
};
- i2c@80004000 {
- tc3589x@42 {
- compatible = "tc3589x";
- reg = <0x42>;
- interrupt-parent = <&gpio6>;
- interrupts = <25 0x1>;
+ // On-board eMMC
+ sdi4_per2@80114000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux2_reg>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ status = "okay";
+ };
- tc3589x_gpio: tc3589x_gpio {
- compatible = "tc3589x-gpio";
- interrupts = <0 0x1>;
+ prcmu@80157000 {
+ db8500-prcmu-regulators {
+ db8500_vape_reg: db8500_vape {
+ regulator-name = "db8500-vape";
+ };
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-controller;
- #gpio-cells = <2>;
+ db8500_varm_reg: db8500_varm {
+ regulator-name = "db8500-varm";
};
- };
- tps61052@33 {
- compatible = "tps61052";
- reg = <0x33>;
- };
- };
+ db8500_vmodem_reg: db8500_vmodem {
+ regulator-name = "db8500-vmodem";
+ };
- i2c@80128000 {
- lp5521@0x33 {
- compatible = "lp5521";
- reg = <0x33>;
- };
+ db8500_vpll_reg: db8500_vpll {
+ regulator-name = "db8500-vpll";
+ };
- lp5521@0x34 {
- compatible = "lp5521";
- reg = <0x34>;
- };
+ db8500_vsmps1_reg: db8500_vsmps1 {
+ regulator-name = "db8500-vsmps1";
+ };
+
+ db8500_vsmps2_reg: db8500_vsmps2 {
+ regulator-name = "db8500-vsmps2";
+ };
+
+ db8500_vsmps3_reg: db8500_vsmps3 {
+ regulator-name = "db8500-vsmps3";
+ };
+
+ db8500_vrf1_reg: db8500_vrf1 {
+ regulator-name = "db8500-vrf1";
+ };
+
+ db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
+ regulator-name = "db8500-sva-mmdsp";
+ };
+
+ db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
+ regulator-name = "db8500-sva-mmdsp-ret";
+ };
+
+ db8500_sva_pipe_reg: db8500_sva_pipe {
+ regulator-name = "db8500_sva_pipe";
+ };
- bh1780@0x29 {
- compatible = "rohm,bh1780gli";
- reg = <0x33>;
+ db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
+ regulator-name = "db8500_sia_mmdsp";
+ };
+
+ db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
+ regulator-name = "db8500-sia-mmdsp-ret";
+ };
+
+ db8500_sia_pipe_reg: db8500_sia_pipe {
+ regulator-name = "db8500-sia-pipe";
+ };
+
+ db8500_sga_reg: db8500_sga {
+ regulator-name = "db8500-sga";
+ };
+
+ db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
+ regulator-name = "db8500-b2r2-mcde";
+ };
+
+ db8500_esram12_reg: db8500_esram12 {
+ regulator-name = "db8500-esram12";
+ };
+
+ db8500_esram12_ret_reg: db8500_esram12_ret {
+ regulator-name = "db8500-esram12-ret";
+ };
+
+ db8500_esram34_reg: db8500_esram34 {
+ regulator-name = "db8500-esram34";
+ };
+
+ db8500_esram34_ret_reg: db8500_esram34_ret {
+ regulator-name = "db8500-esram34-ret";
+ };
};
- };
- sound {
- compatible = "stericsson,snd-soc-mop500";
+ ab8500@5 {
+ ab8500-regulators {
+ ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+ regulator-name = "V-DISPLAY";
+ };
- stericsson,cpu-dai = <&msp1 &msp3>;
- stericsson,audio-codec = <&codec>;
- };
+ ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+ regulator-name = "V-eMMC1";
+ };
- msp1: msp@80124000 {
- status = "okay";
- };
+ ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+ regulator-name = "V-MMC-SD";
+ };
- msp3: msp@80125000 {
- status = "okay";
+ ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
+ regulator-name = "V-INTCORE";
+ };
+
+ ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
+ regulator-name = "V-TVOUT";
+ };
+
+ ab8500_ldo_usb_reg: ab8500_ldo_usb {
+ regulator-name = "dummy";
+ };
+
+ ab8500_ldo_audio_reg: ab8500_ldo_audio {
+ regulator-name = "V-AUD";
+ };
+
+ ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+ regulator-name = "V-AMIC1";
+ };
+
+ ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
+ regulator-name = "V-AMIC2";
+ };
+
+ ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
+ regulator-name = "V-DMIC";
+ };
+
+ ab8500_ldo_ana_reg: ab8500_ldo_ana {
+ regulator-name = "V-CSI/DSI";
+ };
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
index 384d8b66f33..7c43b8e70b9 100644
--- a/arch/arm/boot/dts/imx23-olinuxino.dts
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
@@ -40,6 +40,15 @@
reg = <0>;
fsl,pinmux-ids = <
0x2013 /* MX23_PAD_SSP1_DETECT__GPIO_2_1 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ led_pin_gpio0_17: led_gpio0_17@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
0x0113 /* MX23_PAD_GPMI_ALE__GPIO_0_17 */
>;
fsl,drive-strength = <0>;
@@ -47,6 +56,15 @@
fsl,pull-up = <0>;
};
};
+
+ ssp1: ssp@80034000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx23-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+ };
};
apbx@80040000 {
@@ -91,11 +109,12 @@
leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pin_gpio0_17>;
user {
label = "green";
- gpios = <&gpio2 1 0>;
- linux,default-trigger = "default-on";
+ gpios = <&gpio2 1 1>;
};
};
};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 6d31aa38346..65415c598a5 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -279,6 +279,19 @@
fsl,voltage = <1>;
fsl,pull-up = <0>;
};
+
+ spi2_pins_a: spi2@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0182 /* MX23_PAD_GPMI_WRN__SSP2_SCK */
+ 0x0142 /* MX23_PAD_GPMI_RDY1__SSP2_CMD */
+ 0x0002 /* MX23_PAD_GPMI_D00__SSP2_DATA0 */
+ 0x0032 /* MX23_PAD_GPMI_D03__SSP2_DATA3 */
+ >;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
};
digctl@8001c000 {
diff --git a/arch/arm/boot/dts/imx25-karo-tx25.dts b/arch/arm/boot/dts/imx25-karo-tx25.dts
new file mode 100644
index 00000000000..d81f8a0b979
--- /dev/null
+++ b/arch/arm/boot/dts/imx25-karo-tx25.dts
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Sascha Hauer, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx25.dtsi"
+
+/ {
+ model = "Ka-Ro TX25";
+ compatible = "karo,imx25-tx25", "fsl,imx25";
+
+ memory {
+ reg = <0x80000000 0x02000000 0x90000000 0x02000000>;
+ };
+
+ soc {
+ aips@43f00000 {
+ uart1: serial@43f90000 {
+ status = "okay";
+ };
+ };
+
+ spba@50000000 {
+ fec: ethernet@50038000 {
+ status = "okay";
+ phy-mode = "rmii";
+ };
+ };
+
+ emi@80000000 {
+ nand@bb000000 {
+ nand-on-flash-bbt;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
new file mode 100644
index 00000000000..e1b13ebc96d
--- /dev/null
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2012 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ usb0 = &usbotg;
+ usb1 = &usbhost1;
+ };
+
+ asic: asic-interrupt-controller@68000000 {
+ compatible = "fsl,imx25-asic", "fsl,avic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x68000000 0x8000000>;
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ osc {
+ compatible = "fsl,imx-osc", "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&asic>;
+ ranges;
+
+ aips@43f00000 { /* AIPS1 */
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x43f00000 0x100000>;
+ ranges;
+
+ i2c1: i2c@43f80000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx25-i2c", "fsl,imx21-i2c";
+ reg = <0x43f80000 0x4000>;
+ clocks = <&clks 48>;
+ clock-names = "";
+ interrupts = <3>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@43f84000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx25-i2c", "fsl,imx21-i2c";
+ reg = <0x43f84000 0x4000>;
+ clocks = <&clks 48>;
+ clock-names = "";
+ interrupts = <10>;
+ status = "disabled";
+ };
+
+ can1: can@43f88000 {
+ compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+ reg = <0x43f88000 0x4000>;
+ interrupts = <43>;
+ clocks = <&clks 75>, <&clks 75>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ can2: can@43f8c000 {
+ compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+ reg = <0x43f8c000 0x4000>;
+ interrupts = <44>;
+ clocks = <&clks 76>, <&clks 76>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart1: serial@43f90000 {
+ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
+ reg = <0x43f90000 0x4000>;
+ interrupts = <45>;
+ clocks = <&clks 120>, <&clks 57>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart2: serial@43f94000 {
+ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
+ reg = <0x43f94000 0x4000>;
+ interrupts = <32>;
+ clocks = <&clks 121>, <&clks 57>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ i2c2: i2c@43f98000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx25-i2c", "fsl,imx21-i2c";
+ reg = <0x43f98000 0x4000>;
+ clocks = <&clks 48>;
+ clock-names = "";
+ interrupts = <4>;
+ status = "disabled";
+ };
+
+ owire@43f9c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x43f9c000 0x4000>;
+ clocks = <&clks 51>;
+ clock-names = "";
+ interrupts = <2>;
+ status = "disabled";
+ };
+
+ spi1: cspi@43fa4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
+ reg = <0x43fa4000 0x4000>;
+ clocks = <&clks 62>;
+ clock-names = "ipg";
+ interrupts = <14>;
+ status = "disabled";
+ };
+
+ kpp@43fa8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x43fa8000 0x4000>;
+ clocks = <&clks 102>;
+ clock-names = "";
+ interrupts = <24>;
+ status = "disabled";
+ };
+
+ iomuxc@43fac000{
+ compatible = "fsl,imx25-iomuxc";
+ reg = <0x43fac000 0x4000>;
+ };
+
+ audmux@43fb0000 {
+ compatible = "fsl,imx25-audmux", "fsl,imx31-audmux";
+ reg = <0x43fb0000 0x4000>;
+ status = "disabled";
+ };
+ };
+
+ spba@50000000 {
+ compatible = "fsl,spba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x50000000 0x40000>;
+ ranges;
+
+ spi3: cspi@50004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
+ reg = <0x50004000 0x4000>;
+ interrupts = <0>;
+ clocks = <&clks 80>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ uart4: serial@50008000 {
+ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
+ reg = <0x50008000 0x4000>;
+ interrupts = <5>;
+ clocks = <&clks 123>, <&clks 57>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart3: serial@5000c000 {
+ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
+ reg = <0x5000c000 0x4000>;
+ interrupts = <18>;
+ clocks = <&clks 122>, <&clks 57>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ spi2: cspi@50010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
+ reg = <0x50010000 0x4000>;
+ clocks = <&clks 79>;
+ clock-names = "ipg";
+ interrupts = <13>;
+ status = "disabled";
+ };
+
+ ssi2: ssi@50014000 {
+ compatible = "fsl,imx25-ssi", "fsl,imx21-ssi";
+ reg = <0x50014000 0x4000>;
+ interrupts = <11>;
+ status = "disabled";
+ };
+
+ esai@50018000 {
+ reg = <0x50018000 0x4000>;
+ interrupts = <7>;
+ };
+
+ uart5: serial@5002c000 {
+ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
+ reg = <0x5002c000 0x4000>;
+ interrupts = <40>;
+ clocks = <&clks 124>, <&clks 57>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ tsc: tsc@50030000 {
+ compatible = "fsl,imx25-adc", "fsl,imx21-tsc";
+ reg = <0x50030000 0x4000>;
+ interrupts = <46>;
+ clocks = <&clks 119>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ ssi1: ssi@50034000 {
+ compatible = "fsl,imx25-ssi", "fsl,imx21-ssi";
+ reg = <0x50034000 0x4000>;
+ interrupts = <12>;
+ status = "disabled";
+ };
+
+ fec: ethernet@50038000 {
+ compatible = "fsl,imx25-fec";
+ reg = <0x50038000 0x4000>;
+ interrupts = <57>;
+ clocks = <&clks 88>, <&clks 65>;
+ clock-names = "ipg", "ahb";
+ status = "disabled";
+ };
+ };
+
+ aips@53f00000 { /* AIPS2 */
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x53f00000 0x100000>;
+ ranges;
+
+ clks: ccm@53f80000 {
+ compatible = "fsl,imx25-ccm";
+ reg = <0x53f80000 0x4000>;
+ interrupts = <31>;
+ #clock-cells = <1>;
+ };
+
+ gpt4: timer@53f84000 {
+ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
+ reg = <0x53f84000 0x4000>;
+ clocks = <&clks 9>, <&clks 45>;
+ clock-names = "ipg", "per";
+ interrupts = <1>;
+ };
+
+ gpt3: timer@53f88000 {
+ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
+ reg = <0x53f88000 0x4000>;
+ clocks = <&clks 9>, <&clks 47>;
+ clock-names = "ipg", "per";
+ interrupts = <29>;
+ };
+
+ gpt2: timer@53f8c000 {
+ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
+ reg = <0x53f8c000 0x4000>;
+ clocks = <&clks 9>, <&clks 47>;
+ clock-names = "ipg", "per";
+ interrupts = <53>;
+ };
+
+ gpt1: timer@53f90000 {
+ compatible = "fsl,imx25-gpt", "fsl,imx31-gpt";
+ reg = <0x53f90000 0x4000>;
+ clocks = <&clks 9>, <&clks 47>;
+ clock-names = "ipg", "per";
+ interrupts = <54>;
+ };
+
+ epit1: timer@53f94000 {
+ compatible = "fsl,imx25-epit";
+ reg = <0x53f94000 0x4000>;
+ interrupts = <28>;
+ };
+
+ epit2: timer@53f98000 {
+ compatible = "fsl,imx25-epit";
+ reg = <0x53f98000 0x4000>;
+ interrupts = <27>;
+ };
+
+ gpio4: gpio@53f9c000 {
+ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
+ reg = <0x53f9c000 0x4000>;
+ interrupts = <23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pwm2: pwm@53fa0000 {
+ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ #pwm-cells = <2>;
+ reg = <0x53fa0000 0x4000>;
+ clocks = <&clks 106>, <&clks 36>;
+ clock-names = "ipg", "per";
+ interrupts = <36>;
+ };
+
+ gpio3: gpio@53fa4000 {
+ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
+ reg = <0x53fa4000 0x4000>;
+ interrupts = <16>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pwm3: pwm@53fa8000 {
+ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ #pwm-cells = <2>;
+ reg = <0x53fa8000 0x4000>;
+ clocks = <&clks 107>, <&clks 36>;
+ clock-names = "ipg", "per";
+ interrupts = <41>;
+ };
+
+ esdhc1: esdhc@53fb4000 {
+ compatible = "fsl,imx25-esdhc";
+ reg = <0x53fb4000 0x4000>;
+ interrupts = <9>;
+ clocks = <&clks 86>, <&clks 63>, <&clks 45>;
+ clock-names = "ipg", "ahb", "per";
+ status = "disabled";
+ };
+
+ esdhc2: esdhc@53fb8000 {
+ compatible = "fsl,imx25-esdhc";
+ reg = <0x53fb8000 0x4000>;
+ interrupts = <8>;
+ clocks = <&clks 87>, <&clks 64>, <&clks 46>;
+ clock-names = "ipg", "ahb", "per";
+ status = "disabled";
+ };
+
+ lcdc@53fbc000 {
+ reg = <0x53fbc000 0x4000>;
+ interrupts = <39>;
+ clocks = <&clks 103>, <&clks 66>, <&clks 49>;
+ clock-names = "ipg", "ahb", "per";
+ status = "disabled";
+ };
+
+ slcdc@53fc0000 {
+ reg = <0x53fc0000 0x4000>;
+ interrupts = <38>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@53fc8000 {
+ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ reg = <0x53fc8000 0x4000>;
+ clocks = <&clks 108>, <&clks 36>;
+ clock-names = "ipg", "per";
+ interrupts = <42>;
+ };
+
+ gpio1: gpio@53fcc000 {
+ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
+ reg = <0x53fcc000 0x4000>;
+ interrupts = <52>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@53fd0000 {
+ compatible = "fsl,imx25-gpio", "fsl,imx35-gpio";
+ reg = <0x53fd0000 0x4000>;
+ interrupts = <51>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ sdma@53fd4000 {
+ compatible = "fsl,imx25-sdma", "fsl,imx35-sdma";
+ reg = <0x53fd4000 0x4000>;
+ clocks = <&clks 112>, <&clks 68>;
+ clock-names = "ipg", "ahb";
+ interrupts = <34>;
+ };
+
+ wdog@53fdc000 {
+ compatible = "fsl,imx25-wdt", "fsl,imx21-wdt";
+ reg = <0x53fdc000 0x4000>;
+ clocks = <&clks 126>;
+ clock-names = "";
+ interrupts = <55>;
+ };
+
+ pwm1: pwm@53fe0000 {
+ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ #pwm-cells = <2>;
+ reg = <0x53fe0000 0x4000>;
+ clocks = <&clks 105>, <&clks 36>;
+ clock-names = "ipg", "per";
+ interrupts = <26>;
+ };
+
+ usbphy1: usbphy@1 {
+ compatible = "nop-usbphy";
+ status = "disabled";
+ };
+
+ usbphy2: usbphy@2 {
+ compatible = "nop-usbphy";
+ status = "disabled";
+ };
+
+ usbotg: usb@53ff4000 {
+ compatible = "fsl,imx25-usb", "fsl,imx27-usb";
+ reg = <0x53ff4000 0x0200>;
+ interrupts = <37>;
+ clocks = <&clks 9>, <&clks 70>, <&clks 8>;
+ clock-names = "ipg", "ahb", "per";
+ fsl,usbmisc = <&usbmisc 0>;
+ status = "disabled";
+ };
+
+ usbhost1: usb@53ff4400 {
+ compatible = "fsl,imx25-usb", "fsl,imx27-usb";
+ reg = <0x53ff4400 0x0200>;
+ interrupts = <35>;
+ clocks = <&clks 9>, <&clks 70>, <&clks 8>;
+ clock-names = "ipg", "ahb", "per";
+ fsl,usbmisc = <&usbmisc 1>;
+ status = "disabled";
+ };
+
+ usbmisc: usbmisc@53ff4600 {
+ #index-cells = <1>;
+ compatible = "fsl,imx25-usbmisc";
+ clocks = <&clks 9>, <&clks 70>, <&clks 8>;
+ clock-names = "ipg", "ahb", "per";
+ reg = <0x53ff4600 0x00f>;
+ status = "disabled";
+ };
+
+ dryice@53ffc000 {
+ compatible = "fsl,imx25-dryice", "fsl,imx25-rtc";
+ reg = <0x53ffc000 0x4000>;
+ clocks = <&clks 81>;
+ clock-names = "ipg";
+ interrupts = <25>;
+ };
+ };
+
+ emi@80000000 {
+ compatible = "fsl,emi-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x3b002000>;
+ ranges;
+
+ nand@bb000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "fsl,imx25-nand";
+ reg = <0xbb000000 0x2000>;
+ clocks = <&clks 50>;
+ clock-names = "";
+ interrupts = <33>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts
index b01c0d745fc..fa04c7b18bc 100644
--- a/arch/arm/boot/dts/imx27-3ds.dts
+++ b/arch/arm/boot/dts/imx27-3ds.dts
@@ -21,17 +21,17 @@
};
soc {
- aipi@10000000 { /* aipi */
-
+ aipi@10000000 { /* aipi1 */
uart1: serial@1000a000 {
fsl,uart-has-rtscts;
status = "okay";
};
+ };
- fec@1002b000 {
+ aipi@10020000 { /* aipi2 */
+ ethernet@1002b000 {
status = "okay";
};
};
};
-
};
diff --git a/arch/arm/boot/dts/imx27-apf27.dts b/arch/arm/boot/dts/imx27-apf27.dts
new file mode 100644
index 00000000000..c0327c054de
--- /dev/null
+++ b/arch/arm/boot/dts/imx27-apf27.dts
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Philippe Reynes <tremyfr@yahoo.fr>
+ * Copyright 2012 Armadeus Systems <support@armadeus.com>
+ *
+ * Based on code which is: Copyright 2012 Sascha Hauer, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx27.dtsi"
+
+/ {
+ model = "Armadeus Systems APF27 module";
+ compatible = "armadeus,imx27-apf27", "fsl,imx27";
+
+ memory {
+ reg = <0xa0000000 0x04000000>;
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ osc26m {
+ compatible = "fsl,imx-osc26m", "fixed-clock";
+ clock-frequency = <0>;
+ };
+ };
+
+ soc {
+ aipi@10000000 {
+ serial@1000a000 {
+ status = "okay";
+ };
+
+ ethernet@1002b000 {
+ status = "okay";
+ };
+ };
+
+ nand@d8000000 {
+ status = "okay";
+ nand-bus-width = <16>;
+ nand-ecc-mode = "hw";
+ nand-on-flash-bbt;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ label = "env";
+ reg = <0x100000 0x80000>;
+ };
+
+ partition@180000 {
+ label = "env2";
+ reg = <0x180000 0x80000>;
+ };
+
+ partition@200000 {
+ label = "firmware";
+ reg = <0x200000 0x80000>;
+ };
+
+ partition@280000 {
+ label = "dtb";
+ reg = <0x280000 0x80000>;
+ };
+
+ partition@300000 {
+ label = "kernel";
+ reg = <0x300000 0x500000>;
+ };
+
+ partition@800000 {
+ label = "rootfs";
+ reg = <0x800000 0xf800000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore.dts b/arch/arm/boot/dts/imx27-phytec-phycore.dts
index af50469e34b..53b0ec0c228 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycore.dts
@@ -21,8 +21,7 @@
};
soc {
- aipi@10000000 { /* aipi */
-
+ aipi@10000000 { /* aipi1 */
serial@1000a000 {
fsl,uart-has-rtscts;
status = "okay";
@@ -38,10 +37,6 @@
status = "okay";
};
- ethernet@1002b000 {
- status = "okay";
- };
-
i2c@1001d000 {
clock-frequency = <400000>;
status = "okay";
@@ -60,6 +55,12 @@
};
};
};
+
+ aipi@10020000 { /* aipi2 */
+ ethernet@1002b000 {
+ status = "okay";
+ };
+ };
};
nor_flash@c0000000 {
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 67d672792b0..5a82cb5707a 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -55,10 +55,10 @@
compatible = "fsl,aipi-bus", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x10000000 0x10000000>;
+ reg = <0x10000000 0x20000>;
ranges;
- wdog@10002000 {
+ wdog: wdog@10002000 {
compatible = "fsl,imx27-wdt", "fsl,imx21-wdt";
reg = <0x10002000 0x4000>;
interrupts = <27>;
@@ -211,6 +211,15 @@
status = "disabled";
};
+ };
+
+ aipi@10020000 { /* AIPI2 */
+ compatible = "fsl,aipi-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x10020000 0x20000>;
+ ranges;
+
fec: ethernet@1002b000 {
compatible = "fsl,imx27-fec";
reg = <0x1002b000 0x4000>;
@@ -218,7 +227,8 @@
status = "disabled";
};
};
- nand@d8000000 {
+
+ nfc: nand@d8000000 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx28-apf28.dts b/arch/arm/boot/dts/imx28-apf28.dts
new file mode 100644
index 00000000000..7eb075876c4
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-apf28.dts
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012 Armadeus Systems - <support@armadeus.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "Armadeus Systems APF28 module";
+ compatible = "armadeus,imx28-apf28", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ gpmi-nand@8000c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmi_pins_a &gpmi_status_cfg>;
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0 0x300000>;
+ };
+
+ partition@300000 {
+ label = "env";
+ reg = <0x300000 0x80000>;
+ };
+
+ partition@380000 {
+ label = "env2";
+ reg = <0x380000 0x80000>;
+ };
+
+ partition@400000 {
+ label = "dtb";
+ reg = <0x400000 0x80000>;
+ };
+
+ partition@480000 {
+ label = "splash";
+ reg = <0x480000 0x80000>;
+ };
+
+ partition@500000 {
+ label = "kernel";
+ reg = <0x500000 0x800000>;
+ };
+
+ partition@d00000 {
+ label = "rootfs";
+ reg = <0xd00000 0xf300000>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ phy-reset-gpios = <&gpio4 13 0>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-apf28dev.dts b/arch/arm/boot/dts/imx28-apf28dev.dts
new file mode 100644
index 00000000000..6d8865bfb4b
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-apf28dev.dts
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Armadeus Systems - <support@armadeus.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/* APF28Dev is a docking board for the APF28 SOM */
+/include/ "imx28-apf28.dts"
+
+/ {
+ model = "Armadeus Systems APF28Dev docking/development board";
+ compatible = "armadeus,imx28-apf28dev", "armadeus,imx28-apf28", "fsl,imx28";
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <4>;
+ status = "okay";
+ };
+
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_apf28dev>;
+
+ hog_pins_apf28dev: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1103 /* MX28_PAD_LCD_D16__GPIO_1_16 */
+ 0x1113 /* MX28_PAD_LCD_D17__GPIO_1_17 */
+ 0x1123 /* MX28_PAD_LCD_D18__GPIO_1_18 */
+ 0x1133 /* MX28_PAD_LCD_D19__GPIO_1_19 */
+ 0x1143 /* MX28_PAD_LCD_D20__GPIO_1_20 */
+ 0x1153 /* MX28_PAD_LCD_D21__GPIO_1_21 */
+ 0x1163 /* MX28_PAD_LCD_D22__GPIO_1_22 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ lcdif_pins_apf28dev: lcdif-apf28dev@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
+ 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
+ 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
+ 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+
+ lcdif@80030000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_16bit_pins_a
+ &lcdif_pins_apf28dev>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ lradc@80050000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+ };
+
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3_pins_a &pwm4_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+
+ usbphy1: usbphy@8007e000 {
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ vbus-supply = <&reg_usb0_vbus>;
+ status = "okay";
+ };
+
+ usb1: usb@80090000 {
+ status = "okay";
+ };
+
+ mac1: ethernet@800f4000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac1_pins_a>;
+ phy-reset-gpios = <&gpio0 23 0>;
+ status = "okay";
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_usb0_vbus: usb0_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 23 1>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ user {
+ label = "Heartbeat";
+ gpios = <&gpio0 21 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+
+ pwms = <&pwm 3 191000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-cfa10036.dts b/arch/arm/boot/dts/imx28-cfa10036.dts
index c03a577beca..1594694532b 100644
--- a/arch/arm/boot/dts/imx28-cfa10036.dts
+++ b/arch/arm/boot/dts/imx28-cfa10036.dts
@@ -22,6 +22,31 @@
apb@80000000 {
apbh@80000000 {
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_cfa10036>;
+
+ hog_pins_cfa10036: hog-10036@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x2073 /* MX28_PAD_SSP0_D7__GPIO_2_7 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ led_pins_cfa10036: leds-10036@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3043 /* MX28_PAD_AUART1_RX__GPIO_3_4 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+
ssp0: ssp@80010000 {
compatible = "fsl,imx28-mmc";
pinctrl-names = "default";
@@ -33,16 +58,37 @@
};
apbx@80040000 {
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm4_pins_a>;
+ status = "okay";
+ };
+
duart: serial@80074000 {
pinctrl-names = "default";
pinctrl-0 = <&duart_pins_b>;
status = "okay";
};
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_b>;
+ status = "okay";
+
+ ssd1307: oled@3c {
+ compatible = "solomon,ssd1307fb-i2c";
+ reg = <0x3c>;
+ pwms = <&pwm 4 3000>;
+ reset-gpios = <&gpio2 7 0>;
+ };
+ };
};
};
leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins_cfa10036>;
power {
gpios = <&gpio3 4 1>;
diff --git a/arch/arm/boot/dts/imx28-cfa10049.dts b/arch/arm/boot/dts/imx28-cfa10049.dts
index 05c892e931e..bdc80a4453d 100644
--- a/arch/arm/boot/dts/imx28-cfa10049.dts
+++ b/arch/arm/boot/dts/imx28-cfa10049.dts
@@ -22,6 +22,22 @@
apb@80000000 {
apbh@80000000 {
pinctrl@80018000 {
+ pinctrl-names = "default", "default";
+ pinctrl-1 = <&hog_pins_cfa10049>;
+
+ hog_pins_cfa10049: hog-10049@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0073 /* MX28_PAD_GPMI_D7__GPIO_0_7 */
+ 0x1163 /* MX28_PAD_LCD_D22__GPIO_1_22 */
+ 0x1173 /* MX28_PAD_LCD_D22__GPIO_1_23 */
+ 0x2153 /* MX28_PAD_SSP2_D5__GPIO_2_21 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
spi3_pins_cfa10049: spi3-cfa10049@0 {
reg = <0>;
fsl,pinmux-ids = <
@@ -29,6 +45,7 @@
0x01c1 /* MX28_PAD_GPMI_RESETN__SSP3_CMD */
0x0111 /* MX28_PAD_GPMI_CE1N__SSP3_D3 */
0x01a2 /* MX28_PAD_GPMI_ALE__SSP3_D4 */
+ 0x01b2 /* MX28_PAD_GPMI_CLE__SSP3_D5 */
>;
fsl,drive-strength = <1>;
fsl,voltage = <1>;
@@ -60,6 +77,11 @@
spi-max-frequency = <100000>;
};
+ dac0: dh2228@2 {
+ compatible = "rohm,dh2228fv";
+ reg = <2>;
+ spi-max-frequency = <100000>;
+ };
};
};
@@ -70,6 +92,30 @@
status = "okay";
};
+ i2cmux {
+ compatible = "i2c-mux-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mux-gpios = <&gpio1 22 0 &gpio1 23 0>;
+ i2c-parent = <&i2c1>;
+
+ i2c@0 {
+ reg = <0>;
+ };
+
+ i2c@1 {
+ reg = <1>;
+ };
+
+ i2c@2 {
+ reg = <2>;
+ };
+
+ i2c@3 {
+ reg = <3>;
+ };
+ };
+
usbphy1: usbphy@8007e000 {
status = "okay";
};
@@ -96,4 +142,15 @@
gpio = <&gpio0 7 1>;
};
};
+
+ ahb@80080000 {
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ phy-reset-gpios = <&gpio2 21 0>;
+ phy-reset-duration = <100>;
+ status = "okay";
+ };
+ };
};
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index a0ad71ca3a4..2da316e0440 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -76,7 +76,6 @@
0x20c3 /* MX28_PAD_SSP1_SCK__GPIO_2_12 */
0x31c3 /* MX28_PAD_PWM3__GPIO_3_28 */
0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
- 0x3053 /* MX28_PAD_AUART1_TX__GPIO_3_5 */
0x3083 /* MX28_PAD_AUART2_RX__GPIO_3_8 */
0x3093 /* MX28_PAD_AUART2_TX__GPIO_3_9 */
>;
@@ -85,6 +84,16 @@
fsl,pull-up = <0>;
};
+ led_pin_gpio3_5: led_gpio3_5@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3053 /* MX28_PAD_AUART1_TX__GPIO_3_5 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
gpmi_pins_evk: gpmi-nand-evk@0 {
reg = <0>;
fsl,pinmux-ids = <
@@ -288,6 +297,8 @@
leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pin_gpio3_5>;
user {
label = "Heartbeat";
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts
new file mode 100644
index 00000000000..e6cde8aa7ff
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-sps1.dts
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2012 Marek Vasut <marex@denx.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "SchulerControl GmbH, SC SPS 1";
+ compatible = "schulercontrol,imx28-sps1", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog-gpios@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0003 /* MX28_PAD_GPMI_D00__GPIO_0_0 */
+ 0x0033 /* MX28_PAD_GPMI_D03__GPIO_0_3 */
+ 0x0063 /* MX28_PAD_GPMI_D06__GPIO_0_6 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ };
+
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a>;
+ bus-width = <4>;
+ status = "okay";
+ };
+
+ ssp2: ssp@80014000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+
+ flash: m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "everspin,mr25h256", "mr25h256";
+ spi-max-frequency = <40000000>;
+ reg = <0>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ rtc: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+
+ eeprom: eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ pagesize = <32>;
+ };
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ vbus-supply = <&reg_usb0_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usbphy0_pins_b>;
+ status = "okay";
+ };
+
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ status = "okay";
+ };
+
+ mac1: ethernet@800f4000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac1_pins_a>;
+ status = "okay";
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_usb0_vbus: usb0_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 9 0>;
+ };
+ };
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "gpio-leds";
+ status = "okay";
+
+ led@1 {
+ label = "sps1-1:yellow:user";
+ gpios = <&gpio0 6 0>;
+ linux,default-trigger = "heartbeat";
+ reg = <0>;
+ };
+
+ led@2 {
+ label = "sps1-2:red:user";
+ gpios = <&gpio0 3 0>;
+ linux,default-trigger = "heartbeat";
+ reg = <1>;
+ };
+
+ led@3 {
+ label = "sps1-3:red:user";
+ gpios = <&gpio0 0 0>;
+ default-trigger = "heartbeat";
+ reg = <2>;
+ };
+
+ };
+};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index b4587b27ae4..13b7053d799 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -492,6 +492,16 @@
fsl,pull-up = <0>;
};
+ pwm3_pins_a: pwm3@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x31c0 /* MX28_PAD_PWM3__PWM_3 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
pwm4_pins_a: pwm4@0 {
reg = <0>;
fsl,pinmux-ids = <
@@ -535,6 +545,31 @@
fsl,pull-up = <0>;
};
+ lcdif_16bit_pins_a: lcdif-16bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1000 /* MX28_PAD_LCD_D00__LCD_D0 */
+ 0x1010 /* MX28_PAD_LCD_D01__LCD_D1 */
+ 0x1020 /* MX28_PAD_LCD_D02__LCD_D2 */
+ 0x1030 /* MX28_PAD_LCD_D03__LCD_D3 */
+ 0x1040 /* MX28_PAD_LCD_D04__LCD_D4 */
+ 0x1050 /* MX28_PAD_LCD_D05__LCD_D5 */
+ 0x1060 /* MX28_PAD_LCD_D06__LCD_D6 */
+ 0x1070 /* MX28_PAD_LCD_D07__LCD_D7 */
+ 0x1080 /* MX28_PAD_LCD_D08__LCD_D8 */
+ 0x1090 /* MX28_PAD_LCD_D09__LCD_D9 */
+ 0x10a0 /* MX28_PAD_LCD_D10__LCD_D10 */
+ 0x10b0 /* MX28_PAD_LCD_D11__LCD_D11 */
+ 0x10c0 /* MX28_PAD_LCD_D12__LCD_D12 */
+ 0x10d0 /* MX28_PAD_LCD_D13__LCD_D13 */
+ 0x10e0 /* MX28_PAD_LCD_D14__LCD_D14 */
+ 0x10f0 /* MX28_PAD_LCD_D15__LCD_D15 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
can0_pins_a: can0@0 {
reg = <0>;
fsl,pinmux-ids = <
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 2781e47cff0..1f5d45eff45 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -83,7 +83,7 @@
reg = <0x70000000 0x40000>;
ranges;
- esdhc@70004000 { /* ESDHC1 */
+ esdhc1: esdhc@70004000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70004000 0x4000>;
interrupts = <1>;
@@ -92,12 +92,13 @@
status = "disabled";
};
- esdhc@70008000 { /* ESDHC2 */
+ esdhc2: esdhc@70008000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70008000 0x4000>;
interrupts = <2>;
clocks = <&clks 45>, <&clks 0>, <&clks 72>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
@@ -110,7 +111,7 @@
status = "disabled";
};
- ecspi@70010000 { /* ECSPI1 */
+ ecspi1: ecspi@70010000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx51-ecspi";
@@ -131,47 +132,49 @@
status = "disabled";
};
- esdhc@70020000 { /* ESDHC3 */
+ esdhc3: esdhc@70020000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70020000 0x4000>;
interrupts = <3>;
clocks = <&clks 46>, <&clks 0>, <&clks 73>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
- esdhc@70024000 { /* ESDHC4 */
+ esdhc4: esdhc@70024000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70024000 0x4000>;
interrupts = <4>;
clocks = <&clks 47>, <&clks 0>, <&clks 74>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
};
- usb@73f80000 {
+ usbotg: usb@73f80000 {
compatible = "fsl,imx51-usb", "fsl,imx27-usb";
reg = <0x73f80000 0x0200>;
interrupts = <18>;
status = "disabled";
};
- usb@73f80200 {
+ usbh1: usb@73f80200 {
compatible = "fsl,imx51-usb", "fsl,imx27-usb";
reg = <0x73f80200 0x0200>;
interrupts = <14>;
status = "disabled";
};
- usb@73f80400 {
+ usbh2: usb@73f80400 {
compatible = "fsl,imx51-usb", "fsl,imx27-usb";
reg = <0x73f80400 0x0200>;
interrupts = <16>;
status = "disabled";
};
- usb@73f80600 {
+ usbh3: usb@73f80600 {
compatible = "fsl,imx51-usb", "fsl,imx27-usb";
reg = <0x73f80600 0x0200>;
interrupts = <17>;
@@ -218,14 +221,14 @@
#interrupt-cells = <2>;
};
- wdog@73f98000 { /* WDOG1 */
+ wdog1: wdog@73f98000 {
compatible = "fsl,imx51-wdt", "fsl,imx21-wdt";
reg = <0x73f98000 0x4000>;
interrupts = <58>;
clocks = <&clks 0>;
};
- wdog@73f9c000 { /* WDOG2 */
+ wdog2: wdog@73f9c000 {
compatible = "fsl,imx51-wdt", "fsl,imx21-wdt";
reg = <0x73f9c000 0x4000>;
interrupts = <59>;
@@ -233,7 +236,7 @@
status = "disabled";
};
- iomuxc@73fa8000 {
+ iomuxc: iomuxc@73fa8000 {
compatible = "fsl,imx51-iomuxc";
reg = <0x73fa8000 0x4000>;
@@ -460,7 +463,7 @@
reg = <0x80000000 0x10000000>;
ranges;
- ecspi@83fac000 { /* ECSPI2 */
+ ecspi2: ecspi@83fac000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx51-ecspi";
@@ -471,7 +474,7 @@
status = "disabled";
};
- sdma@83fb0000 {
+ sdma: sdma@83fb0000 {
compatible = "fsl,imx51-sdma", "fsl,imx35-sdma";
reg = <0x83fb0000 0x4000>;
interrupts = <6>;
@@ -480,7 +483,7 @@
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin";
};
- cspi@83fc0000 {
+ cspi: cspi@83fc0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx51-cspi", "fsl,imx35-cspi";
@@ -491,7 +494,7 @@
status = "disabled";
};
- i2c@83fc4000 { /* I2C2 */
+ i2c2: i2c@83fc4000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx51-i2c", "fsl,imx21-i2c";
@@ -501,7 +504,7 @@
status = "disabled";
};
- i2c@83fc8000 { /* I2C1 */
+ i2c1: i2c@83fc8000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx51-i2c", "fsl,imx21-i2c";
@@ -521,13 +524,13 @@
status = "disabled";
};
- audmux@83fd0000 {
+ audmux: audmux@83fd0000 {
compatible = "fsl,imx51-audmux", "fsl,imx31-audmux";
reg = <0x83fd0000 0x4000>;
status = "disabled";
};
- nand@83fdb000 {
+ nfc: nand@83fdb000 {
compatible = "fsl,imx51-nand";
reg = <0x83fdb000 0x1000 0xcfff0000 0x10000>;
interrupts = <8>;
@@ -545,7 +548,7 @@
status = "disabled";
};
- ethernet@83fec000 {
+ fec: ethernet@83fec000 {
compatible = "fsl,imx51-fec", "fsl,imx27-fec";
reg = <0x83fec000 0x4000>;
interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index 08948af86d1..b0075537195 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -60,10 +60,17 @@
697 0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
701 0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
868 0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+ 1149 0x80000000 /* MX53_PAD_GPIO_16__GPIO7_11 */
+ >;
+ };
+
+ led_pin_gpio7_7: led_gpio7_7@0 {
+ fsl,pins = <
873 0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
>;
};
};
+
};
uart1: serial@53fbc000 {
@@ -100,76 +107,93 @@
pmic: dialog@48 {
compatible = "dlg,da9053-aa", "dlg,da9052";
reg = <0x48>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <11 0x8>; /* low-level active IRQ at GPIO7_11 */
regulators {
- buck0 {
+ buck1_reg: buck1 {
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <2075000>;
+ regulator-always-on;
};
- buck1 {
+ buck2_reg: buck2 {
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <2075000>;
+ regulator-always-on;
};
- buck2 {
+ buck3_reg: buck3 {
regulator-min-microvolt = <925000>;
regulator-max-microvolt = <2500000>;
+ regulator-always-on;
};
- buck3 {
+ buck4_reg: buck4 {
regulator-min-microvolt = <925000>;
regulator-max-microvolt = <2500000>;
+ regulator-always-on;
};
- ldo4 {
+ ldo1_reg: ldo1 {
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
};
- ldo5 {
+ ldo2_reg: ldo2 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo3_reg: ldo3 {
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
- ldo6 {
+ ldo4_reg: ldo4 {
regulator-min-microvolt = <1725000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
- ldo7 {
+ ldo5_reg: ldo5 {
regulator-min-microvolt = <1725000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
- ldo8 {
+ ldo6_reg: ldo6 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
+ regulator-always-on;
};
- ldo9 {
+ ldo7_reg: ldo7 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
+ regulator-always-on;
};
- ldo10 {
+ ldo8_reg: ldo8 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
+ regulator-always-on;
};
- ldo11 {
+ ldo9_reg: ldo9 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <3600000>;
+ regulator-always-on;
};
- ldo12 {
+ ldo10_reg: ldo10 {
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3650000>;
- };
-
- ldo13 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3600000>;
+ regulator-always-on;
};
};
};
@@ -216,6 +240,8 @@
leds {
compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pin_gpio7_7>;
user {
label = "Heartbeat";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index da9a047ce4c..552aed4ff98 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -88,21 +88,23 @@
reg = <0x50000000 0x40000>;
ranges;
- esdhc@50004000 { /* ESDHC1 */
+ esdhc1: esdhc@50004000 {
compatible = "fsl,imx53-esdhc";
reg = <0x50004000 0x4000>;
interrupts = <1>;
clocks = <&clks 44>, <&clks 0>, <&clks 71>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
- esdhc@50008000 { /* ESDHC2 */
+ esdhc2: esdhc@50008000 {
compatible = "fsl,imx53-esdhc";
reg = <0x50008000 0x4000>;
interrupts = <2>;
clocks = <&clks 45>, <&clks 0>, <&clks 72>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
@@ -115,7 +117,7 @@
status = "disabled";
};
- ecspi@50010000 { /* ECSPI1 */
+ ecspi1: ecspi@50010000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx53-ecspi", "fsl,imx51-ecspi";
@@ -136,47 +138,49 @@
status = "disabled";
};
- esdhc@50020000 { /* ESDHC3 */
+ esdhc3: esdhc@50020000 {
compatible = "fsl,imx53-esdhc";
reg = <0x50020000 0x4000>;
interrupts = <3>;
clocks = <&clks 46>, <&clks 0>, <&clks 73>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
- esdhc@50024000 { /* ESDHC4 */
+ esdhc4: esdhc@50024000 {
compatible = "fsl,imx53-esdhc";
reg = <0x50024000 0x4000>;
interrupts = <4>;
clocks = <&clks 47>, <&clks 0>, <&clks 74>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
};
- usb@53f80000 {
+ usbotg: usb@53f80000 {
compatible = "fsl,imx53-usb", "fsl,imx27-usb";
reg = <0x53f80000 0x0200>;
interrupts = <18>;
status = "disabled";
};
- usb@53f80200 {
+ usbh1: usb@53f80200 {
compatible = "fsl,imx53-usb", "fsl,imx27-usb";
reg = <0x53f80200 0x0200>;
interrupts = <14>;
status = "disabled";
};
- usb@53f80400 {
+ usbh2: usb@53f80400 {
compatible = "fsl,imx53-usb", "fsl,imx27-usb";
reg = <0x53f80400 0x0200>;
interrupts = <16>;
status = "disabled";
};
- usb@53f80600 {
+ usbh3: usb@53f80600 {
compatible = "fsl,imx53-usb", "fsl,imx27-usb";
reg = <0x53f80600 0x0200>;
interrupts = <17>;
@@ -223,14 +227,14 @@
#interrupt-cells = <2>;
};
- wdog@53f98000 { /* WDOG1 */
+ wdog1: wdog@53f98000 {
compatible = "fsl,imx53-wdt", "fsl,imx21-wdt";
reg = <0x53f98000 0x4000>;
interrupts = <58>;
clocks = <&clks 0>;
};
- wdog@53f9c000 { /* WDOG2 */
+ wdog2: wdog@53f9c000 {
compatible = "fsl,imx53-wdt", "fsl,imx21-wdt";
reg = <0x53f9c000 0x4000>;
interrupts = <59>;
@@ -238,7 +242,7 @@
status = "disabled";
};
- iomuxc@53fa8000 {
+ iomuxc: iomuxc@53fa8000 {
compatible = "fsl,imx53-iomuxc";
reg = <0x53fa8000 0x4000>;
@@ -338,6 +342,24 @@
};
};
+ can1 {
+ pinctrl_can1_1: can1grp-1 {
+ fsl,pins = <
+ 847 0x80000000 /* MX53_PAD_PATA_INTRQ__CAN1_TXCAN */
+ 853 0x80000000 /* MX53_PAD_PATA_DIOR__CAN1_RXCAN */
+ >;
+ };
+ };
+
+ can2 {
+ pinctrl_can2_1: can2grp-1 {
+ fsl,pins = <
+ 67 0x80000000 /* MX53_PAD_KEY_COL4__CAN2_TXCAN */
+ 74 0x80000000 /* MX53_PAD_KEY_ROW4__CAN2_RXCAN */
+ >;
+ };
+ };
+
i2c1 {
pinctrl_i2c1_1: i2c1grp-1 {
fsl,pins = <
@@ -356,6 +378,15 @@
};
};
+ i2c3 {
+ pinctrl_i2c3_1: i2c3grp-1 {
+ fsl,pins = <
+ 1102 0xc0000000 /* MX53_PAD_GPIO_6__I2C3_SDA */
+ 1130 0xc0000000 /* MX53_PAD_GPIO_5__I2C3_SCL */
+ >;
+ };
+ };
+
uart1 {
pinctrl_uart1_1: uart1grp-1 {
fsl,pins = <
@@ -391,6 +422,25 @@
>;
};
};
+
+ uart4 {
+ pinctrl_uart4_1: uart4grp-1 {
+ fsl,pins = <
+ 11 0x1c5 /* MX53_PAD_KEY_COL0__UART4_TXD_MUX */
+ 18 0x1c5 /* MX53_PAD_KEY_ROW0__UART4_RXD_MUX */
+ >;
+ };
+ };
+
+ uart5 {
+ pinctrl_uart5_1: uart5grp-1 {
+ fsl,pins = <
+ 24 0x1c5 /* MX53_PAD_KEY_COL1__UART5_TXD_MUX */
+ 31 0x1c5 /* MX53_PAD_KEY_ROW1__UART5_RXD_MUX */
+ >;
+ };
+ };
+
};
pwm1: pwm@53fb4000 {
@@ -484,7 +534,7 @@
#interrupt-cells = <2>;
};
- i2c@53fec000 { /* I2C3 */
+ i2c3: i2c@53fec000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx53-i2c", "fsl,imx21-i2c";
@@ -520,7 +570,7 @@
status = "disabled";
};
- ecspi@63fac000 { /* ECSPI2 */
+ ecspi2: ecspi@63fac000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx53-ecspi", "fsl,imx51-ecspi";
@@ -531,7 +581,7 @@
status = "disabled";
};
- sdma@63fb0000 {
+ sdma: sdma@63fb0000 {
compatible = "fsl,imx53-sdma", "fsl,imx35-sdma";
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
@@ -540,7 +590,7 @@
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin";
};
- cspi@63fc0000 {
+ cspi: cspi@63fc0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx53-cspi", "fsl,imx35-cspi";
@@ -551,7 +601,7 @@
status = "disabled";
};
- i2c@63fc4000 { /* I2C2 */
+ i2c2: i2c@63fc4000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx53-i2c", "fsl,imx21-i2c";
@@ -561,7 +611,7 @@
status = "disabled";
};
- i2c@63fc8000 { /* I2C1 */
+ i2c1: i2c@63fc8000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx53-i2c", "fsl,imx21-i2c";
@@ -581,13 +631,13 @@
status = "disabled";
};
- audmux@63fd0000 {
+ audmux: audmux@63fd0000 {
compatible = "fsl,imx53-audmux", "fsl,imx31-audmux";
reg = <0x63fd0000 0x4000>;
status = "disabled";
};
- nand@63fdb000 {
+ nfc: nand@63fdb000 {
compatible = "fsl,imx53-nand";
reg = <0x63fdb000 0x1000 0xf7ff0000 0x10000>;
interrupts = <8>;
@@ -605,7 +655,7 @@
status = "disabled";
};
- ethernet@63fec000 {
+ fec: ethernet@63fec000 {
compatible = "fsl,imx53-fec", "fsl,imx25-fec";
reg = <0x63fec000 0x4000>;
interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx6q-sabreauto.dts b/arch/arm/boot/dts/imx6q-sabreauto.dts
new file mode 100644
index 00000000000..826e4ad1477
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-sabreauto.dts
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx6q.dtsi"
+
+/ {
+ model = "Freescale i.MX6 Quad SABRE Automotive Board";
+ compatible = "fsl,imx6q-sabreauto", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x80000000>;
+ };
+
+ soc {
+ aips-bus@02000000 { /* AIPS1 */
+ iomuxc@020e0000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ hog {
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ 1376 0x80000000 /* MX6Q_PAD_NANDF_CS2__GPIO_6_15 */
+ 13 0x80000000 /* MX6Q_PAD_SD2_DAT2__GPIO_1_13 */
+ >;
+ };
+ };
+ };
+ };
+
+ aips-bus@02100000 { /* AIPS2 */
+ uart4: serial@021f0000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4_1>;
+ status = "okay";
+ };
+
+ ethernet@02188000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet_2>;
+ phy-mode = "rgmii";
+ status = "okay";
+ };
+
+ usdhc@02198000 { /* uSDHC3 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_1>;
+ cd-gpios = <&gpio6 15 0>;
+ wp-gpios = <&gpio1 13 0>;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
index e596c28c214..a42402562b7 100644
--- a/arch/arm/boot/dts/imx6q-sabresd.dts
+++ b/arch/arm/boot/dts/imx6q-sabresd.dts
@@ -38,6 +38,8 @@
hog {
pinctrl_hog: hoggrp {
fsl,pins = <
+ 1004 0x80000000 /* MX6Q_PAD_GPIO_4__GPIO_1_4 */
+ 1012 0x80000000 /* MX6Q_PAD_GPIO_5__GPIO_1_5 */
1402 0x80000000 /* MX6Q_PAD_NANDF_D0__GPIO_2_0 */
1410 0x80000000 /* MX6Q_PAD_NANDF_D1__GPIO_2_1 */
1418 0x80000000 /* MX6Q_PAD_NANDF_D2__GPIO_2_2 */
@@ -73,4 +75,20 @@
};
};
};
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ volume-up {
+ label = "Volume Up";
+ gpios = <&gpio1 4 0>;
+ linux,code = <115>; /* KEY_VOLUMEUP */
+ };
+
+ volume-down {
+ label = "Volume Down";
+ gpios = <&gpio1 5 0>;
+ linux,code = <114>; /* KEY_VOLUMEDOWN */
+ };
+ };
};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index cce1d874c7a..d6265ca9711 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -36,6 +36,14 @@
compatible = "arm,cortex-a9";
reg = <0>;
next-level-cache = <&L2>;
+ operating-points = <
+ /* kHz uV */
+ 792000 1100000
+ 396000 950000
+ 198000 850000
+ >;
+ clock-latency = <61036>; /* two CLK32 periods */
+ cpu0-supply = <&reg_cpu>;
};
cpu@1 {
@@ -100,7 +108,7 @@
clocks = <&clks 106>;
};
- gpmi-nand@00112000 {
+ nfc: gpmi-nand@00112000 {
compatible = "fsl,imx6q-gpmi-nand";
#address-cells = <1>;
#size-cells = <1>;
@@ -144,12 +152,12 @@
reg = <0x02000000 0x40000>;
ranges;
- spdif@02004000 {
+ spdif: spdif@02004000 {
reg = <0x02004000 0x4000>;
interrupts = <0 52 0x04>;
};
- ecspi@02008000 { /* eCSPI1 */
+ ecspi1: ecspi@02008000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
@@ -160,7 +168,7 @@
status = "disabled";
};
- ecspi@0200c000 { /* eCSPI2 */
+ ecspi2: ecspi@0200c000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
@@ -171,7 +179,7 @@
status = "disabled";
};
- ecspi@02010000 { /* eCSPI3 */
+ ecspi3: ecspi@02010000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
@@ -182,7 +190,7 @@
status = "disabled";
};
- ecspi@02014000 { /* eCSPI4 */
+ ecspi4: ecspi@02014000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
@@ -193,7 +201,7 @@
status = "disabled";
};
- ecspi@02018000 { /* eCSPI5 */
+ ecspi5: ecspi@02018000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
@@ -213,7 +221,7 @@
status = "disabled";
};
- esai@02024000 {
+ esai: esai@02024000 {
reg = <0x02024000 0x4000>;
interrupts = <0 51 0x04>;
};
@@ -248,7 +256,7 @@
status = "disabled";
};
- asrc@02034000 {
+ asrc: asrc@02034000 {
reg = <0x02034000 0x4000>;
interrupts = <0 50 0x04>;
};
@@ -258,7 +266,7 @@
};
};
- vpu@02040000 {
+ vpu: vpu@02040000 {
reg = <0x02040000 0x3c000>;
interrupts = <0 3 0x04 0 12 0x04>;
};
@@ -267,7 +275,7 @@
reg = <0x0207c000 0x4000>;
};
- pwm@02080000 { /* PWM1 */
+ pwm1: pwm@02080000 {
#pwm-cells = <2>;
compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
reg = <0x02080000 0x4000>;
@@ -276,7 +284,7 @@
clock-names = "ipg", "per";
};
- pwm@02084000 { /* PWM2 */
+ pwm2: pwm@02084000 {
#pwm-cells = <2>;
compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
reg = <0x02084000 0x4000>;
@@ -285,7 +293,7 @@
clock-names = "ipg", "per";
};
- pwm@02088000 { /* PWM3 */
+ pwm3: pwm@02088000 {
#pwm-cells = <2>;
compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
reg = <0x02088000 0x4000>;
@@ -294,7 +302,7 @@
clock-names = "ipg", "per";
};
- pwm@0208c000 { /* PWM4 */
+ pwm4: pwm@0208c000 {
#pwm-cells = <2>;
compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
reg = <0x0208c000 0x4000>;
@@ -303,17 +311,17 @@
clock-names = "ipg", "per";
};
- flexcan@02090000 { /* CAN1 */
+ can1: flexcan@02090000 {
reg = <0x02090000 0x4000>;
interrupts = <0 110 0x04>;
};
- flexcan@02094000 { /* CAN2 */
+ can2: flexcan@02094000 {
reg = <0x02094000 0x4000>;
interrupts = <0 111 0x04>;
};
- gpt@02098000 {
+ gpt: gpt@02098000 {
compatible = "fsl,imx6q-gpt";
reg = <0x02098000 0x4000>;
interrupts = <0 55 0x04>;
@@ -389,19 +397,19 @@
#interrupt-cells = <2>;
};
- kpp@020b8000 {
+ kpp: kpp@020b8000 {
reg = <0x020b8000 0x4000>;
interrupts = <0 82 0x04>;
};
- wdog@020bc000 { /* WDOG1 */
+ wdog1: wdog@020bc000 {
compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
reg = <0x020bc000 0x4000>;
interrupts = <0 80 0x04>;
clocks = <&clks 0>;
};
- wdog@020c0000 { /* WDOG2 */
+ wdog2: wdog@020c0000 {
compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
reg = <0x020c0000 0x4000>;
interrupts = <0 81 0x04>;
@@ -463,7 +471,7 @@
anatop-max-voltage = <2750000>;
};
- regulator-vddcore@140 {
+ reg_cpu: regulator-vddcore@140 {
compatible = "fsl,anatop-regulator";
regulator-name = "cpu";
regulator-min-microvolt = <725000>;
@@ -521,27 +529,35 @@
};
snvs@020cc000 {
- reg = <0x020cc000 0x4000>;
- interrupts = <0 19 0x04 0 20 0x04>;
+ compatible = "fsl,sec-v4.0-mon", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x020cc000 0x4000>;
+
+ snvs-rtc-lp@34 {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ reg = <0x34 0x58>;
+ interrupts = <0 19 0x04 0 20 0x04>;
+ };
};
- epit@020d0000 { /* EPIT1 */
+ epit1: epit@020d0000 { /* EPIT1 */
reg = <0x020d0000 0x4000>;
interrupts = <0 56 0x04>;
};
- epit@020d4000 { /* EPIT2 */
+ epit2: epit@020d4000 { /* EPIT2 */
reg = <0x020d4000 0x4000>;
interrupts = <0 57 0x04>;
};
- src@020d8000 {
+ src: src@020d8000 {
compatible = "fsl,imx6q-src";
reg = <0x020d8000 0x4000>;
interrupts = <0 91 0x04 0 96 0x04>;
};
- gpc@020dc000 {
+ gpc: gpc@020dc000 {
compatible = "fsl,imx6q-gpc";
reg = <0x020dc000 0x4000>;
interrupts = <0 89 0x04 0 90 0x04>;
@@ -552,7 +568,7 @@
reg = <0x020e0000 0x38>;
};
- iomuxc@020e0000 {
+ iomuxc: iomuxc@020e0000 {
compatible = "fsl,imx6q-iomuxc";
reg = <0x020e0000 0x4000>;
@@ -765,17 +781,17 @@
};
};
- dcic@020e4000 { /* DCIC1 */
+ dcic1: dcic@020e4000 {
reg = <0x020e4000 0x4000>;
interrupts = <0 124 0x04>;
};
- dcic@020e8000 { /* DCIC2 */
+ dcic2: dcic@020e8000 {
reg = <0x020e8000 0x4000>;
interrupts = <0 125 0x04>;
};
- sdma@020ec000 {
+ sdma: sdma@020ec000 {
compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <0 2 0x04>;
@@ -801,7 +817,7 @@
reg = <0x0217c000 0x4000>;
};
- usb@02184000 { /* USB OTG */
+ usbotg: usb@02184000 {
compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
reg = <0x02184000 0x200>;
interrupts = <0 43 0x04>;
@@ -811,7 +827,7 @@
status = "disabled";
};
- usb@02184200 { /* USB1 */
+ usbh1: usb@02184200 {
compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
reg = <0x02184200 0x200>;
interrupts = <0 40 0x04>;
@@ -821,7 +837,7 @@
status = "disabled";
};
- usb@02184400 { /* USB2 */
+ usbh2: usb@02184400 {
compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
reg = <0x02184400 0x200>;
interrupts = <0 41 0x04>;
@@ -830,7 +846,7 @@
status = "disabled";
};
- usb@02184600 { /* USB3 */
+ usbh3: usb@02184600 {
compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
reg = <0x02184600 0x200>;
interrupts = <0 42 0x04>;
@@ -839,14 +855,14 @@
status = "disabled";
};
- usbmisc: usbmisc@02184800 {
+ usbmisc: usbmisc: usbmisc@02184800 {
#index-cells = <1>;
compatible = "fsl,imx6q-usbmisc";
reg = <0x02184800 0x200>;
clocks = <&clks 162>;
};
- ethernet@02188000 {
+ fec: ethernet@02188000 {
compatible = "fsl,imx6q-fec";
reg = <0x02188000 0x4000>;
interrupts = <0 118 0x04 0 119 0x04>;
@@ -860,43 +876,47 @@
interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
};
- usdhc@02190000 { /* uSDHC1 */
+ usdhc1: usdhc@02190000 {
compatible = "fsl,imx6q-usdhc";
reg = <0x02190000 0x4000>;
interrupts = <0 22 0x04>;
clocks = <&clks 163>, <&clks 163>, <&clks 163>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
- usdhc@02194000 { /* uSDHC2 */
+ usdhc2: usdhc@02194000 {
compatible = "fsl,imx6q-usdhc";
reg = <0x02194000 0x4000>;
interrupts = <0 23 0x04>;
clocks = <&clks 164>, <&clks 164>, <&clks 164>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
- usdhc@02198000 { /* uSDHC3 */
+ usdhc3: usdhc@02198000 {
compatible = "fsl,imx6q-usdhc";
reg = <0x02198000 0x4000>;
interrupts = <0 24 0x04>;
clocks = <&clks 165>, <&clks 165>, <&clks 165>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
- usdhc@0219c000 { /* uSDHC4 */
+ usdhc4: usdhc@0219c000 {
compatible = "fsl,imx6q-usdhc";
reg = <0x0219c000 0x4000>;
interrupts = <0 25 0x04>;
clocks = <&clks 166>, <&clks 166>, <&clks 166>;
clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
status = "disabled";
};
- i2c@021a0000 { /* I2C1 */
+ i2c1: i2c@021a0000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
@@ -906,7 +926,7 @@
status = "disabled";
};
- i2c@021a4000 { /* I2C2 */
+ i2c2: i2c@021a4000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
@@ -916,7 +936,7 @@
status = "disabled";
};
- i2c@021a8000 { /* I2C3 */
+ i2c3: i2c@021a8000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
@@ -930,12 +950,12 @@
reg = <0x021ac000 0x4000>;
};
- mmdc@021b0000 { /* MMDC0 */
+ mmdc0: mmdc@021b0000 { /* MMDC0 */
compatible = "fsl,imx6q-mmdc";
reg = <0x021b0000 0x4000>;
};
- mmdc@021b4000 { /* MMDC1 */
+ mmdc1: mmdc@021b4000 { /* MMDC1 */
reg = <0x021b4000 0x4000>;
};
@@ -963,7 +983,7 @@
interrupts = <0 109 0x04>;
};
- audmux@021d8000 {
+ audmux: audmux@021d8000 {
compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
reg = <0x021d8000 0x4000>;
status = "disabled";
diff --git a/arch/arm/boot/dts/kirkwood-6281.dtsi b/arch/arm/boot/dts/kirkwood-6281.dtsi
new file mode 100644
index 00000000000..d6c9d65cbae
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-6281.dtsi
@@ -0,0 +1,44 @@
+/ {
+ ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+ compatible = "marvell,88f6281-pinctrl";
+ reg = <0x10000 0x20>;
+
+ pmx_nand: pmx-nand {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3",
+ "mpp4", "mpp5", "mpp18",
+ "mpp19";
+ marvell,function = "nand";
+ };
+ pmx_sata0: pmx-sata0 {
+ marvell,pins = "mpp5", "mpp21", "mpp23";
+ marvell,function = "sata0";
+ };
+ pmx_sata1: pmx-sata1 {
+ marvell,pins = "mpp4", "mpp20", "mpp22";
+ marvell,function = "sata1";
+ };
+ pmx_spi: pmx-spi {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3";
+ marvell,function = "spi";
+ };
+ pmx_twsi0: pmx-twsi0 {
+ marvell,pins = "mpp8", "mpp9";
+ marvell,function = "twsi0";
+ };
+ pmx_uart0: pmx-uart0 {
+ marvell,pins = "mpp10", "mpp11";
+ marvell,function = "uart0";
+ };
+ pmx_uart1: pmx-uart1 {
+ marvell,pins = "mpp13", "mpp14";
+ marvell,function = "uart1";
+ };
+ pmx_sdio: pmx-sdio {
+ marvell,pins = "mpp12", "mpp13", "mpp14",
+ "mpp15", "mpp16", "mpp17";
+ marvell,function = "sdio";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-6282.dtsi b/arch/arm/boot/dts/kirkwood-6282.dtsi
new file mode 100644
index 00000000000..9ae2004d567
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-6282.dtsi
@@ -0,0 +1,45 @@
+/ {
+ ocp@f1000000 {
+
+ pinctrl: pinctrl@10000 {
+ compatible = "marvell,88f6282-pinctrl";
+ reg = <0x10000 0x20>;
+
+ pmx_sata0: pmx-sata0 {
+ marvell,pins = "mpp5", "mpp21", "mpp23";
+ marvell,function = "sata0";
+ };
+ pmx_sata1: pmx-sata1 {
+ marvell,pins = "mpp4", "mpp20", "mpp22";
+ marvell,function = "sata1";
+ };
+ pmx_spi: pmx-spi {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3";
+ marvell,function = "spi";
+ };
+ pmx_twsi0: pmx-twsi0 {
+ marvell,pins = "mpp8", "mpp9";
+ marvell,function = "twsi0";
+ };
+ pmx_uart0: pmx-uart0 {
+ marvell,pins = "mpp10", "mpp11";
+ marvell,function = "uart0";
+ };
+
+ pmx_uart1: pmx-uart1 {
+ marvell,pins = "mpp13", "mpp14";
+ marvell,function = "uart1";
+ };
+ };
+
+ i2c@11100 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11100 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <32>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-98dx4122.dtsi b/arch/arm/boot/dts/kirkwood-98dx4122.dtsi
new file mode 100644
index 00000000000..3271e4c8ea0
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-98dx4122.dtsi
@@ -0,0 +1,31 @@
+/ {
+ ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+ compatible = "marvell,98dx4122-pinctrl";
+ reg = <0x10000 0x20>;
+
+ pmx_nand: pmx-nand {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3",
+ "mpp4", "mpp5", "mpp18",
+ "mpp19";
+ marvell,function = "nand";
+ };
+ pmx_spi: pmx-spi {
+ marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3";
+ marvell,function = "spi";
+ };
+ pmx_twsi0: pmx-twsi0 {
+ marvell,pins = "mpp8", "mpp9";
+ marvell,function = "twsi0";
+ };
+ pmx_uart0: pmx-uart0 {
+ marvell,pins = "mpp10", "mpp11";
+ marvell,function = "uart0";
+ };
+ pmx_uart1: pmx-uart1 {
+ marvell,pins = "mpp13", "mpp14";
+ marvell,function = "uart1";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index 9b32d027282..6875ac00c17 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -1,4 +1,5 @@
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "D-Link DNS NASes (kirkwood-based)";
@@ -35,7 +36,116 @@
6000 2>;
};
+ gpio_poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio1 4 0>;
+ };
+
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_nand &pmx_uart1
+ &pmx_sata0 &pmx_sata1
+ &pmx_led_power
+ &pmx_led_red_right_hdd
+ &pmx_led_red_left_hdd
+ &pmx_led_red_usb_325
+ &pmx_button_power
+ &pmx_led_red_usb_320
+ &pmx_power_off &pmx_power_back_on
+ &pmx_power_sata0 &pmx_power_sata1
+ &pmx_present_sata0 &pmx_present_sata1
+ &pmx_led_white_usb &pmx_fan_tacho
+ &pmx_fan_high_speed &pmx_fan_low_speed
+ &pmx_button_unmount &pmx_button_reset
+ &pmx_temp_alarm >;
+ pinctrl-names = "default";
+
+ pmx_sata0: pmx-sata0 {
+ marvell,pins = "mpp20";
+ marvell,function = "sata1";
+ };
+ pmx_sata1: pmx-sata1 {
+ marvell,pins = "mpp21";
+ marvell,function = "sata0";
+ };
+ pmx_led_power: pmx-led-power {
+ marvell,pins = "mpp26";
+ marvell,function = "gpio";
+ };
+ pmx_led_red_right_hdd: pmx-led-red-right-hdd {
+ marvell,pins = "mpp27";
+ marvell,function = "gpio";
+ };
+ pmx_led_red_left_hdd: pmx-led-red-left-hdd {
+ marvell,pins = "mpp28";
+ marvell,function = "gpio";
+ };
+ pmx_led_red_usb_325: pmx-led-red-usb-325 {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+ pmx_button_power: pmx-button-power {
+ marvell,pins = "mpp34";
+ marvell,function = "gpio";
+ };
+ pmx_led_red_usb_320: pmx-led-red-usb-320 {
+ marvell,pins = "mpp35";
+ marvell,function = "gpio";
+ };
+ pmx_power_off: pmx-power-off {
+ marvell,pins = "mpp36";
+ marvell,function = "gpio";
+ };
+ pmx_power_back_on: pmx-power-back-on {
+ marvell,pins = "mpp37";
+ marvell,function = "gpio";
+ };
+ pmx_power_sata0: pmx-power-sata0 {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+ pmx_power_sata1: pmx-power-sata1 {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+ pmx_present_sata0: pmx-present-sata0 {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+ pmx_present_sata1: pmx-present-sata1 {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+ pmx_led_white_usb: pmx-led-white-usb {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+ pmx_fan_tacho: pmx-fan-tacho {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ pmx_fan_high_speed: pmx-fan-high-speed {
+ marvell,pins = "mpp45";
+ marvell,function = "gpio";
+ };
+ pmx_fan_low_speed: pmx-fan-low-speed {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+ pmx_button_unmount: pmx-button-unmount {
+ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+ pmx_button_reset: pmx-button-reset {
+ marvell,pins = "mpp48";
+ marvell,function = "gpio";
+ };
+ pmx_temp_alarm: pmx-temp-alarm {
+ marvell,pins = "mpp49";
+ marvell,function = "gpio";
+ };
+ };
sata@80000 {
status = "okay";
nr-ports = <2>;
@@ -43,6 +153,7 @@
nand@3000000 {
status = "okay";
+ chip-delay = <35>;
partition@0 {
label = "u-boot";
@@ -76,4 +187,33 @@
};
};
};
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sata0_power: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "SATA0 Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio1 7 0>;
+ };
+ sata1_power: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "SATA1 Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio1 8 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts
index 08a582414b8..2e3dd34e21a 100644
--- a/arch/arm/boot/dts/kirkwood-dockstar.dts
+++ b/arch/arm/boot/dts/kirkwood-dockstar.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "Seagate FreeAgent Dockstar";
@@ -16,6 +17,25 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_usb_power_enable
+ &pmx_led_green &pmx_led_orange >;
+ pinctrl-names = "default";
+
+ pmx_usb_power_enable: pmx-usb-power-enable {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+ pmx_led_green: pmx-led-green {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+ pmx_led_orange: pmx-led-orange {
+ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+ };
serial@12000 {
clock-frequency = <200000000>;
status = "ok";
@@ -54,4 +74,21 @@
gpios = <&gpio1 15 1>;
};
};
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_power: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "USB Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 29 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts
index 26e281fbf6b..f2d386c95b0 100644
--- a/arch/arm/boot/dts/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "Globalscale Technologies Dreamplug";
@@ -16,6 +17,26 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_spi
+ &pmx_led_bluetooth &pmx_led_wifi
+ &pmx_led_wifi_ap >;
+ pinctrl-names = "default";
+
+ pmx_led_bluetooth: pmx-led-bluetooth {
+ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+ pmx_led_wifi: pmx-led-wifi {
+ marvell,pins = "mpp48";
+ marvell,function = "gpio";
+ };
+ pmx_led_wifi_ap: pmx-led-wifi-ap {
+ marvell,pins = "mpp49";
+ marvell,function = "gpio";
+ };
+ };
serial@12000 {
clock-frequency = <200000000>;
status = "ok";
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
index 7c8238fbb6f..1b133e0c566 100644
--- a/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "Seagate GoFlex Net";
@@ -16,6 +17,61 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_usb_power_enable &pmx_led_orange
+ &pmx_led_left_cap_0 &pmx_led_left_cap_1
+ &pmx_led_left_cap_2 &pmx_led_left_cap_3
+ &pmx_led_right_cap_0 &pmx_led_right_cap_1
+ &pmx_led_right_cap_2 &pmx_led_right_cap_3
+ >;
+ pinctrl-names = "default";
+
+ pmx_usb_power_enable: pmx-usb-power-enable {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+ pmx_led_right_cap_0: pmx-led_right_cap_0 {
+ marvell,pins = "mpp38";
+ marvell,function = "gpio";
+ };
+ pmx_led_right_cap_1: pmx-led_right_cap_1 {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+ pmx_led_right_cap_2: pmx-led_right_cap_2 {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+ pmx_led_right_cap_3: pmx-led_right_cap_3 {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+ pmx_led_left_cap_0: pmx-led_left_cap_0 {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+ pmx_led_left_cap_1: pmx-led_left_cap_1 {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+ pmx_led_left_cap_2: pmx-led_left_cap_2 {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ pmx_led_left_cap_3: pmx-led_left_cap_3 {
+ marvell,pins = "mpp45";
+ marvell,function = "gpio";
+ };
+ pmx_led_green: pmx-led_green {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+ pmx_led_orange: pmx-led_orange {
+ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+ };
serial@12000 {
clock-frequency = <200000000>;
status = "ok";
@@ -96,4 +152,21 @@
gpios = <&gpio1 9 0>;
};
};
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_power: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "USB Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 29 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index 66794ed75ff..71902da33d6 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "RaidSonic ICY BOX IB-NAS62x0 (Rev B)";
@@ -16,6 +17,39 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_nand
+ &pmx_led_os_red &pmx_power_off
+ &pmx_led_os_green &pmx_led_usb_transfer
+ &pmx_button_reset &pmx_button_usb_copy >;
+ pinctrl-names = "default";
+
+ pmx_led_os_red: pmx-led-os-red {
+ marvell,pins = "mpp22";
+ marvell,function = "gpio";
+ };
+ pmx_power_off: pmx-power-off {
+ marvell,pins = "mpp24";
+ marvell,function = "gpio";
+ };
+ pmx_led_os_green: pmx-led-os-green {
+ marvell,pins = "mpp25";
+ marvell,function = "gpio";
+ };
+ pmx_led_usb_transfer: pmx-led-usb-transfer {
+ marvell,pins = "mpp27";
+ marvell,function = "gpio";
+ };
+ pmx_button_reset: pmx-button-reset {
+ marvell,pins = "mpp28";
+ marvell,function = "gpio";
+ };
+ pmx_button_usb_copy: pmx-button-usb-copy {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+ };
serial@12000 {
clock-frequency = <200000000>;
status = "okay";
@@ -79,4 +113,10 @@
gpios = <&gpio0 27 0>;
};
};
+ gpio_poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio0 24 0>;
+ };
+
+
};
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index d97cd9d4753..504f16be8b5 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "Iomega Iconnect";
@@ -18,6 +19,56 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_gpio_12 &pmx_gpio_35
+ &pmx_gpio_41 &pmx_gpio_42
+ &pmx_gpio_43 &pmx_gpio_44
+ &pmx_gpio_45 &pmx_gpio_46
+ &pmx_gpio_47 &pmx_gpio_48 >;
+ pinctrl-names = "default";
+
+ pmx_gpio_12: pmx-gpio-12 {
+ marvell,pins = "mpp12";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_35: pmx-gpio-35 {
+ marvell,pins = "mpp35";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_41: pmx-gpio-41 {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_42: pmx-gpio-42 {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_43: pmx-gpio-43 {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_44: pmx-gpio-44 {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_45: pmx-gpio-45 {
+ marvell,pins = "mpp45";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_46: pmx-gpio-46 {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_47: pmx-gpio-47 {
+ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+ pmx_gpio_48: pmx-gpio-48 {
+ marvell,pins = "mpp48";
+ marvell,function = "gpio";
+ };
+ };
i2c@11000 {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 865aeec40a2..6cae4599c4b 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "Iomega StorCenter ix2-200";
@@ -16,6 +17,94 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_button_reset &pmx_button_power
+ &pmx_led_backup &pmx_led_power
+ &pmx_button_otb &pmx_led_rebuild
+ &pmx_led_health
+ &pmx_led_sata_brt_ctrl_1
+ &pmx_led_sata_brt_ctrl_2
+ &pmx_led_backup_brt_ctrl_1
+ &pmx_led_backup_brt_ctrl_2
+ &pmx_led_power_brt_ctrl_1
+ &pmx_led_power_brt_ctrl_2
+ &pmx_led_health_brt_ctrl_1
+ &pmx_led_health_brt_ctrl_2
+ &pmx_led_rebuild_brt_ctrl_1
+ &pmx_led_rebuild_brt_ctrl_2 >;
+ pinctrl-names = "default";
+
+ pmx_button_reset: pmx-button-reset {
+ marvell,pins = "mpp12";
+ marvell,function = "gpio";
+ };
+ pmx_button_power: pmx-button-power {
+ marvell,pins = "mpp14";
+ marvell,function = "gpio";
+ };
+ pmx_led_backup: pmx-led-backup {
+ marvell,pins = "mpp15";
+ marvell,function = "gpio";
+ };
+ pmx_led_power: pmx-led-power {
+ marvell,pins = "mpp16";
+ marvell,function = "gpio";
+ };
+ pmx_button_otb: pmx-button-otb {
+ marvell,pins = "mpp35";
+ marvell,function = "gpio";
+ };
+ pmx_led_rebuild: pmx-led-rebuild {
+ marvell,pins = "mpp36";
+ marvell,function = "gpio";
+ };
+ pmx_led_health: pmx-led_health {
+ marvell,pins = "mpp37";
+ marvell,function = "gpio";
+ };
+ pmx_led_sata_brt_ctrl_1: pmx-led-sata-brt-ctrl-1 {
+ marvell,pins = "mpp38";
+ marvell,function = "gpio";
+ };
+ pmx_led_sata_brt_ctrl_2: pmx-led-sata-brt-ctrl-2 {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+ pmx_led_backup_brt_ctrl_1: pmx-led-backup-brt-ctrl-1 {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+ pmx_led_backup_brt_ctrl_2: pmx-led-backup-brt-ctrl-2 {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+ pmx_led_power_brt_ctrl_1: pmx-led-power-brt-ctrl-1 {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+ pmx_led_power_brt_ctrl_2: pmx-led-power-brt-ctrl-2 {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+ pmx_led_health_brt_ctrl_1: pmx-led-health-brt-ctrl-1 {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ pmx_led_health_brt_ctrl_2: pmx-led-health-brt-ctrl-2 {
+ marvell,pins = "mpp45";
+ marvell,function = "gpio";
+ };
+ pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
+ marvell,pins = "mpp45";
+ marvell,function = "gpio";
+ };
+
+ };
i2c@11000 {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
index 75bdb93fed2..8db3123ac80 100644
--- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
+++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-98dx4122.dtsi"
/ {
model = "Keymile Kirkwood Reference Design";
@@ -16,6 +17,22 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_nand &pmx_i2c_gpio_sda
+ &pmx_i2c_gpio_scl >;
+ pinctrl-names = "default";
+
+ pmx_i2c_gpio_sda: pmx-gpio-sda {
+ marvell,pins = "mpp8";
+ marvell,function = "gpio";
+ };
+ pmx_i2c_gpio_scl: pmx-gpio-scl {
+ marvell,pins = "mpp9";
+ marvell,function = "gpio";
+ };
+ };
+
serial@12000 {
clock-frequency = <200000000>;
status = "ok";
diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
index 798e60eeedf..37d45c4f88f 100644
--- a/arch/arm/boot/dts/kirkwood-lsxl.dtsi
+++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
@@ -1,4 +1,5 @@
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
chosen {
@@ -6,6 +7,71 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_power_hdd &pmx_usb_vbus
+ &pmx_fan_low &pmx_fan_high
+ &pmx_led_function_red &pmx_led_alarm
+ &pmx_led_info &pmx_led_power
+ &pmx_fan_lock &pmx_button_function
+ &pmx_power_switch &pmx_power_auto_switch
+ &pmx_led_function_blue >;
+ pinctrl-names = "default";
+
+ pmx_power_hdd: pmx-power-hdd {
+ marvell,pins = "mpp10";
+ marvell,function = "gpo";
+ };
+ pmx_usb_vbus: pmx-usb-vbus {
+ marvell,pins = "mpp11";
+ marvell,function = "gpio";
+ };
+ pmx_fan_high: pmx-fan-high {
+ marvell,pins = "mpp18";
+ marvell,function = "gpo";
+ };
+ pmx_fan_low: pmx-fan-low {
+ marvell,pins = "mpp19";
+ marvell,function = "gpo";
+ };
+ pmx_led_function_blue: pmx-led-function-blue {
+ marvell,pins = "mpp36";
+ marvell,function = "gpio";
+ };
+ pmx_led_alarm: pmx-led-alarm {
+ marvell,pins = "mpp37";
+ marvell,function = "gpio";
+ };
+ pmx_led_info: pmx-led-info {
+ marvell,pins = "mpp38";
+ marvell,function = "gpio";
+ };
+ pmx_led_power: pmx-led-power {
+ marvell,pins = "mpp39";
+ marvell,function = "gpio";
+ };
+ pmx_fan_lock: pmx-fan-lock {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+ pmx_button_function: pmx-button-function {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+ pmx_power_switch: pmx-power-switch {
+ marvell,pins = "mpp42";
+ marvell,function = "gpio";
+ };
+ pmx_power_auto_switch: pmx-power-auto-switch {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+ pmx_led_function_red: pmx-led-function_red {
+ marvell,pins = "mpp48";
+ marvell,function = "gpio";
+ };
+
+ };
sata@80000 {
status = "okay";
nr-ports = <1>;
@@ -105,4 +171,33 @@
5000 0>;
alarm-gpios = <&gpio1 8 0>;
};
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_power: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "USB Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 11 0>;
+ };
+ hdd_power: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "HDD Power";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&gpio0 10 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index ac3c080bed2..262c6540376 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
model = "MPL CEC4";
@@ -16,6 +17,64 @@
};
ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_nand &pmx_uart0
+ &pmx_led_health &pmx_sdio
+ &pmx_sata0 &pmx_sata1
+ &pmx_led_user1o
+ &pmx_led_user1g &pmx_led_user0o
+ &pmx_led_user0g &pmx_led_misc
+ &pmx_sdio_cd
+ >;
+ pinctrl-names = "default";
+
+ pmx_led_health: pmx-led-health {
+ marvell,pins = "mpp7";
+ marvell,function = "gpo";
+ };
+
+ pmx_sata1: pmx-sata1 {
+ marvell,pins = "mpp34";
+ marvell,function = "sata1";
+ };
+
+ pmx_sata0: pmx-sata0 {
+ marvell,pins = "mpp35";
+ marvell,function = "sata0";
+ };
+
+ pmx_led_user1o: pmx-led-user1o {
+ marvell,pins = "mpp40";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_user1g: pmx-led-user1g {
+ marvell,pins = "mpp41";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_user0o: pmx-led-user0o {
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_user0g: pmx-led-user0g {
+ marvell,pins = "mpp45";
+ marvell,function = "gpio";
+ };
+
+ pmx_led_misc: pmx-led-misc {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+
+ pmx_sdio_cd: pmx-sdio-cd {
+ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+ };
+
i2c@11000 {
status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index 9a2606c8b78..49d3d74d4d3 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6282.dtsi"
/ {
model = "Plat'Home OpenBlocksA6";
@@ -29,11 +30,69 @@
nand@3000000 {
chip-delay = <25>;
status = "okay";
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x90000>;
+ };
+
+ partition@90000 {
+ label = "env";
+ reg = <0x90000 0x44000>;
+ };
+
+ partition@d4000 {
+ label = "test";
+ reg = <0xd4000 0x24000>;
+ };
+
+ partition@f4000 {
+ label = "conf";
+ reg = <0xf4000 0x400000>;
+ };
+
+ partition@4f4000 {
+ label = "linux";
+ reg = <0x4f4000 0x1d20000>;
+ };
+
+ partition@2214000 {
+ label = "user";
+ reg = <0x2214000 0x1dec000>;
+ };
};
sata@80000 {
nr-ports = <1>;
status = "okay";
};
+
+ i2c@11100 {
+ status = "okay";
+
+ s35390a: s35390a@30 {
+ compatible = "s35390a";
+ reg = <0x30>;
+ };
+ };
};
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led-red {
+ label = "obsa6:red:stat";
+ gpios = <&gpio1 9 1>;
+ };
+
+ led-green {
+ label = "obsa6:green:stat";
+ gpios = <&gpio1 10 1>;
+ };
+
+ led-yellow {
+ label = "obsa6:yellow:stat";
+ gpios = <&gpio1 11 1>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6281.dts b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
index ccbf3275780..8295c833887 100644
--- a/arch/arm/boot/dts/kirkwood-ts219-6281.dts
+++ b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
@@ -1,8 +1,39 @@
/dts-v1/;
/include/ "kirkwood-ts219.dtsi"
+/include/ "kirkwood-6281.dtsi"
/ {
+ ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_uart0 &pmx_uart1 &pmx_spi
+ &pmx_twsi0 &pmx_sata0 &pmx_sata1
+ &pmx_ram_size &pmx_reset_button
+ &pmx_USB_copy_button &pmx_board_id>;
+ pinctrl-names = "default";
+
+ pmx_ram_size: pmx-ram-size {
+ /* RAM: 0: 256 MB, 1: 512 MB */
+ marvell,pins = "mpp36";
+ marvell,function = "gpio";
+ };
+ pmx_USB_copy_button: pmx-USB-copy-button {
+ marvell,pins = "mpp15";
+ marvell,function = "gpio";
+ };
+ pmx_reset_button: pmx-reset-button {
+ marvell,pins = "mpp16";
+ marvell,function = "gpio";
+ };
+ pmx_board_id: pmx-board-id {
+ /* 0: TS-11x, 1: TS-21x */
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ };
+ };
+
gpio_keys {
compatible = "gpio-keys";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6282.dts b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
index fbe9932161a..df3f95dfba3 100644
--- a/arch/arm/boot/dts/kirkwood-ts219-6282.dts
+++ b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
@@ -1,8 +1,39 @@
/dts-v1/;
/include/ "kirkwood-ts219.dtsi"
+/include/ "kirkwood-6282.dtsi"
/ {
+ ocp@f1000000 {
+ pinctrl: pinctrl@10000 {
+
+ pinctrl-0 = < &pmx_uart0 &pmx_uart1 &pmx_spi
+ &pmx_twsi0 &pmx_sata0 &pmx_sata1
+ &pmx_ram_size &pmx_reset_button
+ &pmx_USB_copy_button &pmx_board_id>;
+ pinctrl-names = "default";
+
+ pmx_ram_size: pmx-ram-size {
+ /* RAM: 0: 256 MB, 1: 512 MB */
+ marvell,pins = "mpp36";
+ marvell,function = "gpio";
+ };
+ pmx_reset_button: pmx-reset-button {
+ marvell,pins = "mpp37";
+ marvell,function = "gpio";
+ };
+ pmx_USB_copy_button: pmx-USB-copy-button {
+ marvell,pins = "mpp43";
+ marvell,function = "gpio";
+ };
+ pmx_board_id: pmx-board-id {
+ /* 0: TS-11x, 1: TS-21x */
+ marvell,pins = "mpp44";
+ marvell,function = "gpio";
+ };
+ };
+ };
+
gpio_keys {
compatible = "gpio-keys";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 4e5b8154a5b..7735cee4a9c 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -4,6 +4,10 @@
compatible = "marvell,kirkwood";
interrupt-parent = <&intc>;
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ };
intc: interrupt-controller {
compatible = "marvell,orion-intc", "marvell,intc";
interrupt-controller;
@@ -19,12 +23,19 @@
#address-cells = <1>;
#size-cells = <1>;
+ core_clk: core-clocks@10030 {
+ compatible = "marvell,kirkwood-core-clock";
+ reg = <0x10030 0x4>;
+ #clock-cells = <1>;
+ };
+
gpio0: gpio@10100 {
compatible = "marvell,orion-gpio";
#gpio-cells = <2>;
gpio-controller;
reg = <0x10100 0x40>;
- ngpio = <32>;
+ ngpios = <32>;
+ interrupt-controller;
interrupts = <35>, <36>, <37>, <38>;
};
@@ -33,7 +44,8 @@
#gpio-cells = <2>;
gpio-controller;
reg = <0x10140 0x40>;
- ngpio = <18>;
+ ngpios = <18>;
+ interrupt-controller;
interrupts = <39>, <40>, <41>;
};
@@ -42,6 +54,7 @@
reg = <0x12000 0x100>;
reg-shift = <2>;
interrupts = <33>;
+ clocks = <&gate_clk 7>;
/* set clock-frequency in board dts */
status = "disabled";
};
@@ -51,6 +64,7 @@
reg = <0x12100 0x100>;
reg-shift = <2>;
interrupts = <34>;
+ clocks = <&gate_clk 7>;
/* set clock-frequency in board dts */
status = "disabled";
};
@@ -68,12 +82,68 @@
cell-index = <0>;
interrupts = <23>;
reg = <0x10600 0x28>;
+ clocks = <&gate_clk 7>;
status = "disabled";
};
+ gate_clk: clock-gating-control@2011c {
+ compatible = "marvell,kirkwood-gating-clock";
+ reg = <0x2011c 0x4>;
+ clocks = <&core_clk 0>;
+ #clock-cells = <1>;
+ };
+
wdt@20300 {
compatible = "marvell,orion-wdt";
reg = <0x20300 0x28>;
+ clocks = <&gate_clk 7>;
+ status = "okay";
+ };
+
+ xor@60800 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60800 0x100
+ 0x60A00 0x100>;
+ status = "okay";
+ clocks = <&gate_clk 8>;
+
+ xor00 {
+ interrupts = <5>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor01 {
+ interrupts = <6>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
+
+ xor@60900 {
+ compatible = "marvell,orion-xor";
+ reg = <0x60900 0x100
+ 0xd0B00 0x100>;
+ status = "okay";
+ clocks = <&gate_clk 16>;
+
+ xor00 {
+ interrupts = <7>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor01 {
+ interrupts = <8>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
+ };
+
+ ehci@50000 {
+ compatible = "marvell,orion-ehci";
+ reg = <0x50000 0x1000>;
+ interrupts = <19>;
status = "okay";
};
@@ -81,6 +151,8 @@
compatible = "marvell,orion-sata";
reg = <0x80000 0x5000>;
interrupts = <21>;
+ clocks = <&gate_clk 14>, <&gate_clk 15>;
+ clock-names = "0", "1";
status = "disabled";
};
@@ -94,6 +166,7 @@
reg = <0x3000000 0x400>;
chip-delay = <25>;
/* set partition map and/or chip-delay in board dts */
+ clocks = <&gate_clk 7>;
status = "disabled";
};
@@ -104,6 +177,7 @@
#size-cells = <0>;
interrupts = <29>;
clock-frequency = <100000>;
+ clocks = <&gate_clk 7>;
status = "disabled";
};
@@ -113,6 +187,7 @@
<0xf5000000 0x800>;
reg-names = "regs", "sram";
interrupts = <22>;
+ clocks = <&gate_clk 17>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index e5ffe960dbf..1582f484a86 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -182,6 +182,13 @@
pnx,timeout = <0x64>;
};
+ mpwm: mpwm@400E8000 {
+ compatible = "nxp,lpc3220-motor-pwm";
+ reg = <0x400E8000 0x78>;
+ status = "disabled";
+ #pwm-cells = <2>;
+ };
+
i2cusb: i2c@31020300 {
compatible = "nxp,pnx-i2c";
reg = <0x31020300 0x100>;
diff --git a/arch/arm/boot/dts/omap2420-h4.dts b/arch/arm/boot/dts/omap2420-h4.dts
index 77b84e17c47..9b0d07746cb 100644
--- a/arch/arm/boot/dts/omap2420-h4.dts
+++ b/arch/arm/boot/dts/omap2420-h4.dts
@@ -15,6 +15,6 @@
memory {
device_type = "memory";
- reg = <0x80000000 0x84000000>; /* 64 MB */
+ reg = <0x80000000 0x4000000>; /* 64 MB */
};
};
diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts
index c6f85f0bc53..27f31a5fa49 100644
--- a/arch/arm/boot/dts/snowball.dts
+++ b/arch/arm/boot/dts/snowball.dts
@@ -14,7 +14,7 @@
/ {
model = "Calao Systems Snowball platform with device tree";
- compatible = "calaosystems,snowball-a9500";
+ compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500";
memory {
reg = <0x00000000 0x20000000>;
@@ -147,10 +147,10 @@
};
// External Micro SD slot
- sdi@80126000 {
+ sdi0_per1@80126000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <50000000>;
- bus-width = <8>;
+ bus-width = <4>;
mmc-cap-mmc-highspeed;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
@@ -161,7 +161,7 @@
};
// On-board eMMC
- sdi@80114000 {
+ sdi4_per2@80114000 {
arm,primecell-periphid = <0x10480180>;
max-frequency = <50000000>;
bus-width = <8>;
@@ -214,5 +214,137 @@
cpufreq-cooling {
status = "okay";
};
+
+ prcmu@80157000 {
+ db8500-prcmu-regulators {
+ db8500_vape_reg: db8500_vape {
+ regulator-name = "db8500-vape";
+ };
+
+ db8500_varm_reg: db8500_varm {
+ regulator-name = "db8500-varm";
+ };
+
+ db8500_vmodem_reg: db8500_vmodem {
+ regulator-name = "db8500-vmodem";
+ };
+
+ db8500_vpll_reg: db8500_vpll {
+ regulator-name = "db8500-vpll";
+ };
+
+ db8500_vsmps1_reg: db8500_vsmps1 {
+ regulator-name = "db8500-vsmps1";
+ };
+
+ db8500_vsmps2_reg: db8500_vsmps2 {
+ regulator-name = "db8500-vsmps2";
+ };
+
+ db8500_vsmps3_reg: db8500_vsmps3 {
+ regulator-name = "db8500-vsmps3";
+ };
+
+ db8500_vrf1_reg: db8500_vrf1 {
+ regulator-name = "db8500-vrf1";
+ };
+
+ db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
+ regulator-name = "db8500-sva-mmdsp";
+ };
+
+ db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
+ regulator-name = "db8500-sva-mmdsp-ret";
+ };
+
+ db8500_sva_pipe_reg: db8500_sva_pipe {
+ regulator-name = "db8500_sva_pipe";
+ };
+
+ db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
+ regulator-name = "db8500_sia_mmdsp";
+ };
+
+ db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
+ regulator-name = "db8500-sia-mmdsp-ret";
+ };
+
+ db8500_sia_pipe_reg: db8500_sia_pipe {
+ regulator-name = "db8500-sia-pipe";
+ };
+
+ db8500_sga_reg: db8500_sga {
+ regulator-name = "db8500-sga";
+ };
+
+ db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
+ regulator-name = "db8500-b2r2-mcde";
+ };
+
+ db8500_esram12_reg: db8500_esram12 {
+ regulator-name = "db8500-esram12";
+ };
+
+ db8500_esram12_ret_reg: db8500_esram12_ret {
+ regulator-name = "db8500-esram12-ret";
+ };
+
+ db8500_esram34_reg: db8500_esram34 {
+ regulator-name = "db8500-esram34";
+ };
+
+ db8500_esram34_ret_reg: db8500_esram34_ret {
+ regulator-name = "db8500-esram34-ret";
+ };
+ };
+
+ ab8500@5 {
+ ab8500-regulators {
+ ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+ regulator-name = "V-DISPLAY";
+ };
+
+ ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+ regulator-name = "V-eMMC1";
+ };
+
+ ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+ regulator-name = "V-MMC-SD";
+ };
+
+ ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
+ regulator-name = "V-INTCORE";
+ };
+
+ ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
+ regulator-name = "V-TVOUT";
+ };
+
+ ab8500_ldo_usb_reg: ab8500_ldo_usb {
+ regulator-name = "dummy";
+ };
+
+ ab8500_ldo_audio_reg: ab8500_ldo_audio {
+ regulator-name = "V-AUD";
+ };
+
+ ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+ regulator-name = "V-AMIC1";
+ };
+
+ ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
+ regulator-name = "V-AMIC2";
+ };
+
+ ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
+ regulator-name = "V-DMIC";
+ };
+
+ ab8500_ldo_ana_reg: ab8500_ldo_ana {
+ regulator-name = "V-CSI/DSI";
+ };
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 0772f5739f5..19aec421bb2 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -143,5 +143,15 @@
reg-shift = <2>;
reg-io-width = <4>;
};
+
+ rstmgr@ffd05000 {
+ compatible = "altr,rst-mgr";
+ reg = <0xffd05000 0x1000>;
+ };
+
+ sysmgr@ffd08000 {
+ compatible = "altr,sys-mgr";
+ reg = <0xffd08000 0x4000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index 2e4c5727468..b56a801e42a 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -30,10 +30,14 @@
pinctrl-0 = <&state_default>;
state_default: pinmux {
- i2c0-pmx {
+ i2c0 {
st,pins = "i2c0_grp";
st,function = "i2c0";
};
+ i2s0 {
+ st,pins = "i2s0_grp";
+ st,function = "i2s0";
+ };
i2s1 {
st,pins = "i2s1_grp";
st,function = "i2s1";
@@ -42,6 +46,10 @@
st,pins = "arm_gpio_grp";
st,function = "arm_gpio";
};
+ clcd {
+ st,pins = "clcd_grp" , "clcd_high_res";
+ st,function = "clcd";
+ };
eth {
st,pins = "gmii_grp";
st,function = "gmii";
@@ -74,11 +82,6 @@
st,pins = "i2c_1_2_grp";
st,function = "i2c_1_2";
};
- pci {
- st,pins = "pcie0_grp","pcie1_grp",
- "pcie2_grp";
- st,function = "pci";
- };
smii {
st,pins = "smii_0_1_2_grp";
st,function = "smii_0_1_2";
@@ -88,6 +91,14 @@
"nand_16bit_grp";
st,function = "nand";
};
+ sata {
+ st,pins = "sata0_grp";
+ st,function = "sata";
+ };
+ pcie {
+ st,pins = "pcie1_grp", "pcie2_grp";
+ st,function = "pci_express";
+ };
};
};
@@ -109,9 +120,49 @@
fsmc: flash@b0000000 {
status = "okay";
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x80000>;
+ };
+ partition@80000 {
+ label = "u-boot";
+ reg = <0x80000 0x140000>;
+ };
+ partition@1C0000 {
+ label = "environment";
+ reg = <0x1C0000 0x40000>;
+ };
+ partition@200000 {
+ label = "dtb";
+ reg = <0x200000 0x40000>;
+ };
+ partition@240000 {
+ label = "linux";
+ reg = <0x240000 0xC00000>;
+ };
+ partition@E40000 {
+ label = "rootfs";
+ reg = <0xE40000 0x0>;
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@1 {
+ label = "wakeup";
+ linux,code = <0x100>;
+ gpios = <&gpio0 7 0x4>;
+ debounce-interval = <20>;
+ gpio-key,wakeup = <1>;
+ };
};
gmac0: eth@e2000000 {
+ phy-mode = "gmii";
status = "okay";
};
@@ -135,23 +186,27 @@
};
partition@10000 {
label = "u-boot";
- reg = <0x10000 0x40000>;
+ reg = <0x10000 0x50000>;
+ };
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
};
- partition@50000 {
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
+ };
+ partition@80000 {
label = "linux";
- reg = <0x50000 0x2c0000>;
+ reg = <0x80000 0x310000>;
};
- partition@310000 {
+ partition@390000 {
label = "rootfs";
- reg = <0x310000 0x4f0000>;
+ reg = <0x390000 0x0>;
};
};
};
- spi0: spi@e0100000 {
- status = "okay";
- };
-
ehci@e4800000 {
status = "okay";
};
@@ -189,10 +244,6 @@
status = "okay";
};
- i2c1: i2c@5cd00000 {
- status = "okay";
- };
-
kbd@e0300000 {
linux,keymap = < 0x00000001
0x00010002
@@ -277,6 +328,7 @@
0x08080052 >;
autorepeat;
st,mode = <0>;
+ suspended_rate = <2000000>;
status = "okay";
};
@@ -286,6 +338,81 @@
serial@e0000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ };
+
+ spi0: spi@e0100000 {
+ status = "okay";
+ num-cs = <3>;
+ cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
+
+ stmpe610@0 {
+ compatible = "st,stmpe610";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpha;
+ pl022,hierarchy = <0>;
+ pl022,interface = <0>;
+ pl022,slave-tx-disable;
+ pl022,com-mode = <0>;
+ pl022,rx-level-trig = <0>;
+ pl022,tx-level-trig = <0>;
+ pl022,ctrl-len = <0x7>;
+ pl022,wait-state = <0>;
+ pl022,duplex = <0>;
+ interrupts = <6 0x4>;
+ interrupt-parent = <&gpio1>;
+ irq-trigger = <0x2>;
+
+ stmpe_touchscreen {
+ compatible = "st,stmpe-ts";
+ ts,sample-time = <4>;
+ ts,mod-12b = <1>;
+ ts,ref-sel = <0>;
+ ts,adc-freq = <1>;
+ ts,ave-ctrl = <1>;
+ ts,touch-det-delay = <2>;
+ ts,settling = <2>;
+ ts,fraction-z = <7>;
+ ts,i-drive = <1>;
+ };
+ };
+
+ m25p80@1 {
+ compatible = "st,m25p80";
+ reg = <1>;
+ spi-max-frequency = <12000000>;
+ spi-cpol;
+ spi-cpha;
+ pl022,hierarchy = <0>;
+ pl022,interface = <0>;
+ pl022,slave-tx-disable;
+ pl022,com-mode = <0x2>;
+ pl022,rx-level-trig = <0>;
+ pl022,tx-level-trig = <0>;
+ pl022,ctrl-len = <0x11>;
+ pl022,wait-state = <0>;
+ pl022,duplex = <0>;
+ };
+
+ spidev@2 {
+ compatible = "spidev";
+ reg = <2>;
+ spi-max-frequency = <25000000>;
+ spi-cpha;
+ pl022,hierarchy = <0>;
+ pl022,interface = <0>;
+ pl022,slave-tx-disable;
+ pl022,com-mode = <0x2>;
+ pl022,rx-level-trig = <0>;
+ pl022,tx-level-trig = <0>;
+ pl022,ctrl-len = <0x11>;
+ pl022,wait-state = <0>;
+ pl022,duplex = <0>;
+ };
};
wdt@ec800620 {
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 7cd25eb4f8e..1513c1927cc 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -17,6 +17,18 @@
compatible = "st,spear1310";
ahb {
+ spics: spics@e0700000{
+ compatible = "st,spear-spics-gpio";
+ reg = <0xe0700000 0x1000>;
+ st-spics,peripcfg-reg = <0x3b0>;
+ st-spics,sw-enable-bit = <12>;
+ st-spics,cs-value-bit = <11>;
+ st-spics,cs-enable-mask = <3>;
+ st-spics,cs-enable-shift = <8>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
ahci@b1000000 {
compatible = "snps,spear-ahci";
reg = <0xb1000000 0x10000>;
@@ -43,6 +55,7 @@
reg = <0x5c400000 0x8000>;
interrupts = <0 95 0x4>;
interrupt-names = "macirq";
+ phy-mode = "mii";
status = "disabled";
};
@@ -51,6 +64,7 @@
reg = <0x5c500000 0x8000>;
interrupts = <0 96 0x4>;
interrupt-names = "macirq";
+ phy-mode = "mii";
status = "disabled";
};
@@ -59,6 +73,7 @@
reg = <0x5c600000 0x8000>;
interrupts = <0 97 0x4>;
interrupt-names = "macirq";
+ phy-mode = "rmii";
status = "disabled";
};
@@ -67,6 +82,7 @@
reg = <0x5c700000 0x8000>;
interrupts = <0 98 0x4>;
interrupt-names = "macirq";
+ phy-mode = "rgmii";
status = "disabled";
};
@@ -76,13 +92,6 @@
#gpio-range-cells = <2>;
};
- spi1: spi@5d400000 {
- compatible = "arm,pl022", "arm,primecell";
- reg = <0x5d400000 0x1000>;
- interrupts = <0 99 0x4>;
- status = "disabled";
- };
-
apb {
i2c1: i2c@5cd00000 {
#address-cells = <1>;
@@ -147,6 +156,15 @@
status = "disabled";
};
+ spi1: spi@5d400000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x5d400000 0x1000>;
+ interrupts = <0 99 0x4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
serial@5c800000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x5c800000 0x1000>;
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
index 045f7123ffa..d6c30ae0a8d 100644
--- a/arch/arm/boot/dts/spear1340-evb.dts
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -38,20 +38,15 @@
st,pins = "fsmc_8bit_grp";
st,function = "fsmc";
};
- kbd {
- st,pins = "keyboard_row_col_grp",
- "keyboard_col5_grp";
- st,function = "keyboard";
- };
uart0 {
- st,pins = "uart0_grp", "uart0_enh_grp";
+ st,pins = "uart0_grp";
st,function = "uart0";
};
- i2c0-pmx {
+ i2c0 {
st,pins = "i2c0_grp";
st,function = "i2c0";
};
- i2c1-pmx {
+ i2c1 {
st,pins = "i2c1_grp";
st,function = "i2c1";
};
@@ -64,14 +59,9 @@
st,function = "spdif_out";
};
ssp0 {
- st,pins = "ssp0_grp", "ssp0_cs1_grp",
- "ssp0_cs3_grp";
+ st,pins = "ssp0_grp", "ssp0_cs1_grp", "ssp0_cs2_grp", "ssp0_cs3_grp";
st,function = "ssp0";
};
- pwm {
- st,pins = "pwm2_grp", "pwm3_grp";
- st,function = "pwm";
- };
smi-pmx {
st,pins = "smi_grp";
st,function = "smi";
@@ -84,6 +74,18 @@
st,pins = "gmii_grp", "rgmii_grp";
st,function = "gmac";
};
+ cam0 {
+ st,pins = "cam0_grp";
+ st,function = "cam0";
+ };
+ cam1 {
+ st,pins = "cam1_grp";
+ st,function = "cam1";
+ };
+ cam2 {
+ st,pins = "cam2_grp";
+ st,function = "cam2";
+ };
cam3 {
st,pins = "cam3_grp";
st,function = "cam3";
@@ -108,9 +110,18 @@
st,pins = "sata_grp";
st,function = "sata";
};
+ pcie {
+ st,pins = "pcie_grp";
+ st,function = "pcie";
+ };
+
};
};
+ ahci@b1000000 {
+ status = "okay";
+ };
+
dma@ea800000 {
status = "okay";
};
@@ -121,9 +132,35 @@
fsmc: flash@b0000000 {
status = "okay";
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x200000>;
+ };
+ partition@200000 {
+ label = "u-boot";
+ reg = <0x200000 0x200000>;
+ };
+ partition@400000 {
+ label = "environment";
+ reg = <0x400000 0x100000>;
+ };
+ partition@500000 {
+ label = "dtb";
+ reg = <0x500000 0x100000>;
+ };
+ partition@600000 {
+ label = "linux";
+ reg = <0x600000 0xC00000>;
+ };
+ partition@1200000 {
+ label = "rootfs";
+ reg = <0x1200000 0x0>;
+ };
};
gmac0: eth@e2000000 {
+ phy-mode = "rgmii";
status = "okay";
};
@@ -147,31 +184,62 @@
};
partition@10000 {
label = "u-boot";
- reg = <0x10000 0x40000>;
+ reg = <0x10000 0x50000>;
+ };
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
};
- partition@50000 {
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
+ };
+ partition@80000 {
label = "linux";
- reg = <0x50000 0x2c0000>;
+ reg = <0x80000 0x310000>;
};
- partition@310000 {
+ partition@390000 {
label = "rootfs";
- reg = <0x310000 0x4f0000>;
+ reg = <0x390000 0x0>;
};
};
};
- spi0: spi@e0100000 {
+ ehci@e4800000 {
status = "okay";
};
- ehci@e4800000 {
- status = "okay";
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@1 {
+ label = "wakeup";
+ linux,code = <0x100>;
+ gpios = <&gpio1 1 0x4>;
+ debounce-interval = <20>;
+ gpio-key,wakeup = <1>;
+ };
};
ehci@e5800000 {
status = "okay";
};
+ i2s0: i2s-play@b2400000 {
+ status = "okay";
+ };
+
+ i2s1: i2s-rec@b2000000 {
+ status = "okay";
+ };
+
+ incodec: dir-hifi {
+ compatible = "dummy,dir-hifi";
+ status = "okay";
+ };
+
ohci@e4000000 {
status = "okay";
};
@@ -180,11 +248,43 @@
status = "okay";
};
+ outcodec: dit-hifi {
+ compatible = "dummy,dit-hifi";
+ status = "okay";
+ };
+
+ sound {
+ compatible = "spear,spear-evb";
+ audio-controllers = <&spdif0 &spdif1 &i2s0 &i2s1>;
+ audio-codecs = <&incodec &outcodec &sta529 &sta529>;
+ codec_dai_name = "dir-hifi", "dit-hifi", "sta529-audio", "sta529-audio";
+ stream_name = "spdif-cap", "spdif-play", "i2s-play", "i2s-cap";
+ dai_name = "spdifin-pcm", "spdifout-pcm", "i2s0-pcm", "i2s1-pcm";
+ nr_controllers = <4>;
+ status = "okay";
+ };
+
+ spdif0: spdif-in@d0100000 {
+ status = "okay";
+ };
+
+ spdif1: spdif-out@d0000000 {
+ status = "okay";
+ };
+
apb {
adc@e0080000 {
status = "okay";
};
+ i2s-play@b2400000 {
+ status = "okay";
+ };
+
+ i2s-rec@b2000000 {
+ status = "okay";
+ };
+
gpio0: gpio@e0600000 {
status = "okay";
};
@@ -199,10 +299,36 @@
i2c0: i2c@e0280000 {
status = "okay";
+
+ sta529: sta529@1a {
+ compatible = "st,sta529";
+ reg = <0x1a>;
+ };
};
i2c1: i2c@b4000000 {
status = "okay";
+
+ eeprom0@56 {
+ compatible = "st,eeprom";
+ reg = <0x56>;
+ };
+
+ stmpe801@41 {
+ compatible = "st,stmpe801";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x41>;
+ interrupts = <4 0x4>;
+ interrupt-parent = <&gpio0>;
+ irq-trigger = <0x2>;
+
+ stmpegpio: stmpe_gpio {
+ compatible = "st,stmpe-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
};
kbd@e0300000 {
@@ -289,6 +415,7 @@
0x08080052 >;
autorepeat;
st,mode = <0>;
+ suspended_rate = <2000000>;
status = "okay";
};
@@ -298,10 +425,92 @@
serial@e0000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@b4100000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ };
+
+ spi0: spi@e0100000 {
+ status = "okay";
+ num-cs = <3>;
+ cs-gpios = <&gpiopinctrl 80 0>, <&gpiopinctrl 24 0>,
+ <&gpiopinctrl 85 0>;
+
+ m25p80@0 {
+ compatible = "m25p80";
+ reg = <0>;
+ spi-max-frequency = <12000000>;
+ spi-cpol;
+ spi-cpha;
+ pl022,hierarchy = <0>;
+ pl022,interface = <0>;
+ pl022,slave-tx-disable;
+ pl022,com-mode = <0x2>;
+ pl022,rx-level-trig = <0>;
+ pl022,tx-level-trig = <0>;
+ pl022,ctrl-len = <0x11>;
+ pl022,wait-state = <0>;
+ pl022,duplex = <0>;
+ };
+
+ stmpe610@1 {
+ compatible = "st,stmpe610";
+ spi-max-frequency = <1000000>;
+ spi-cpha;
+ reg = <1>;
+ pl022,hierarchy = <0>;
+ pl022,interface = <0>;
+ pl022,slave-tx-disable;
+ pl022,com-mode = <0>;
+ pl022,rx-level-trig = <0>;
+ pl022,tx-level-trig = <0>;
+ pl022,ctrl-len = <0x7>;
+ pl022,wait-state = <0>;
+ pl022,duplex = <0>;
+ interrupts = <100 0>;
+ interrupt-parent = <&gpiopinctrl>;
+ irq-trigger = <0x2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ stmpe_touchscreen {
+ compatible = "st,stmpe-ts";
+ ts,sample-time = <4>;
+ ts,mod-12b = <1>;
+ ts,ref-sel = <0>;
+ ts,adc-freq = <1>;
+ ts,ave-ctrl = <1>;
+ ts,touch-det-delay = <2>;
+ ts,settling = <2>;
+ ts,fraction-z = <7>;
+ ts,i-drive = <1>;
+ };
+ };
+
+ spidev@2 {
+ compatible = "spidev";
+ reg = <2>;
+ spi-max-frequency = <25000000>;
+ spi-cpha;
+ pl022,hierarchy = <0>;
+ pl022,interface = <0>;
+ pl022,slave-tx-disable;
+ pl022,com-mode = <0x2>;
+ pl022,rx-level-trig = <0>;
+ pl022,tx-level-trig = <0>;
+ pl022,ctrl-len = <0x11>;
+ pl022,wait-state = <0>;
+ pl022,duplex = <0>;
+ };
+ };
+
+ timer@ec800600 {
+ status = "okay";
};
wdt@ec800620 {
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 6c09eb0a1b2..34da11aa679 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -17,6 +17,20 @@
compatible = "st,spear1340";
ahb {
+
+ spics: spics@e0700000{
+ compatible = "st,spear-spics-gpio";
+ reg = <0xe0700000 0x1000>;
+ st-spics,peripcfg-reg = <0x42c>;
+ st-spics,sw-enable-bit = <21>;
+ st-spics,cs-value-bit = <20>;
+ st-spics,cs-enable-mask = <3>;
+ st-spics,cs-enable-shift = <18>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ status = "disabled";
+ };
+
ahci@b1000000 {
compatible = "snps,spear-ahci";
reg = <0xb1000000 0x10000>;
@@ -24,15 +38,61 @@
status = "disabled";
};
+ i2s-play@b2400000 {
+ compatible = "snps,designware-i2s";
+ reg = <0xb2400000 0x10000>;
+ interrupt-names = "play_irq";
+ interrupts = <0 98 0x4
+ 0 99 0x4>;
+ play;
+ channel = <8>;
+ status = "disabled";
+ };
+
+ i2s-rec@b2000000 {
+ compatible = "snps,designware-i2s";
+ reg = <0xb2000000 0x10000>;
+ interrupt-names = "record_irq";
+ interrupts = <0 100 0x4
+ 0 101 0x4>;
+ record;
+ channel = <8>;
+ status = "disabled";
+ };
+
pinmux: pinmux@e0700000 {
compatible = "st,spear1340-pinmux";
reg = <0xe0700000 0x1000>;
#gpio-range-cells = <2>;
};
+ pwm: pwm@e0180000 {
+ compatible ="st,spear13xx-pwm";
+ reg = <0xe0180000 0x1000>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ spdif-in@d0100000 {
+ compatible = "st,spdif-in";
+ reg = < 0xd0100000 0x20000
+ 0xd0110000 0x10000 >;
+ interrupts = <0 84 0x4>;
+ status = "disabled";
+ };
+
+ spdif-out@d0000000 {
+ compatible = "st,spdif-out";
+ reg = <0xd0000000 0x20000>;
+ interrupts = <0 85 0x4>;
+ status = "disabled";
+ };
+
spi1: spi@5d400000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x5d400000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 99 0x4>;
status = "disabled";
};
@@ -44,6 +104,7 @@
compatible = "snps,designware-i2c";
reg = <0xb4000000 0x1000>;
interrupts = <0 104 0x4>;
+ write-16bit;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index f7b84aced65..b4ca60f4eb4 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -64,12 +64,26 @@
bootargs = "console=ttyAMA0,115200";
};
+ cpufreq {
+ compatible = "st,cpufreq-spear";
+ cpufreq_tbl = < 166000
+ 200000
+ 250000
+ 300000
+ 400000
+ 500000
+ 600000 >;
+ status = "disabled";
+ };
+
ahb {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
ranges = <0x50000000 0x50000000 0x10000000
0xb0000000 0xb0000000 0x10000000
+ 0xd0000000 0xd0000000 0x02000000
+ 0xd8000000 0xd8000000 0x01000000
0xe0000000 0xe0000000 0x10000000>;
sdhci@b3000000 {
@@ -81,7 +95,7 @@
cf@b2800000 {
compatible = "arasan,cf-spear1340";
- reg = <0xb2800000 0x100>;
+ reg = <0xb2800000 0x1000>;
interrupts = <0 29 0x4>;
status = "disabled";
};
@@ -104,15 +118,16 @@
compatible = "st,spear600-fsmc-nand";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0xb0000000 0x1000 /* FSMC Register */
- 0xb0800000 0x0010>; /* NAND Base */
- reg-names = "fsmc_regs", "nand_data";
+ reg = <0xb0000000 0x1000 /* FSMC Register*/
+ 0xb0800000 0x0010 /* NAND Base DATA */
+ 0xb0820000 0x0010 /* NAND Base ADDR */
+ 0xb0810000 0x0010>; /* NAND Base CMD */
+ reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
interrupts = <0 20 0x4
0 21 0x4
0 22 0x4
0 23 0x4>;
- st,ale-off = <0x20000>;
- st,cle-off = <0x10000>;
+ st,mode = <2>;
status = "disabled";
};
@@ -125,6 +140,13 @@
status = "disabled";
};
+ pcm {
+ compatible = "st,pcm-audio";
+ #address-cells = <0>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
smi: flash@ea000000 {
compatible = "st,spear600-smi";
#address-cells = <1>;
@@ -134,17 +156,11 @@
status = "disabled";
};
- spi0: spi@e0100000 {
- compatible = "arm,pl022", "arm,primecell";
- reg = <0xe0100000 0x1000>;
- interrupts = <0 31 0x4>;
- status = "disabled";
- };
-
ehci@e4800000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe4800000 0x1000>;
interrupts = <0 64 0x4>;
+ usbh0_id = <0>;
status = "disabled";
};
@@ -152,6 +168,7 @@
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe5800000 0x1000>;
interrupts = <0 66 0x4>;
+ usbh1_id = <1>;
status = "disabled";
};
@@ -159,6 +176,7 @@
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe4000000 0x1000>;
interrupts = <0 65 0x4>;
+ usbh0_id = <0>;
status = "disabled";
};
@@ -166,6 +184,7 @@
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe5000000 0x1000>;
interrupts = <0 67 0x4>;
+ usbh1_id = <1>;
status = "disabled";
};
@@ -175,6 +194,8 @@
compatible = "simple-bus";
ranges = <0x50000000 0x50000000 0x10000000
0xb0000000 0xb0000000 0x10000000
+ 0xd0000000 0xd0000000 0x02000000
+ 0xd8000000 0xd8000000 0x01000000
0xe0000000 0xe0000000 0x10000000>;
gpio0: gpio@e0600000 {
@@ -215,8 +236,35 @@
status = "disabled";
};
+ i2s@e0180000 {
+ compatible = "st,designware-i2s";
+ reg = <0xe0180000 0x1000>;
+ interrupt-names = "play_irq", "record_irq";
+ interrupts = <0 10 0x4
+ 0 11 0x4 >;
+ status = "disabled";
+ };
+
+ i2s@e0200000 {
+ compatible = "st,designware-i2s";
+ reg = <0xe0200000 0x1000>;
+ interrupt-names = "play_irq", "record_irq";
+ interrupts = <0 26 0x4
+ 0 53 0x4>;
+ status = "disabled";
+ };
+
+ spi0: spi@e0100000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0xe0100000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 31 0x4>;
+ status = "disabled";
+ };
+
rtc@e0580000 {
- compatible = "st,spear-rtc";
+ compatible = "st,spear600-rtc";
reg = <0xe0580000 0x1000>;
interrupts = <0 36 0x4>;
status = "disabled";
@@ -232,7 +280,7 @@
adc@e0080000 {
compatible = "st,spear600-adc";
reg = <0xe0080000 0x1000>;
- interrupts = <0 44 0x4>;
+ interrupts = <0 12 0x4>;
status = "disabled";
};
@@ -245,7 +293,8 @@
timer@ec800600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xec800600 0x20>;
- interrupts = <1 13 0x301>;
+ interrupts = <1 13 0x4>;
+ status = "disabled";
};
wdt@ec800620 {
@@ -257,6 +306,7 @@
thermal@e07008c4 {
compatible = "st,thermal-spear1340";
reg = <0xe07008c4 0x4>;
+ thermal_flags = <0x7000>;
};
};
};
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 1e7c7a8e212..5de1431653e 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -100,15 +100,23 @@
};
partition@10000 {
label = "u-boot";
- reg = <0x10000 0x40000>;
+ reg = <0x10000 0x50000>;
};
- partition@50000 {
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
+ };
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
+ };
+ partition@80000 {
label = "linux";
- reg = <0x50000 0x2c0000>;
+ reg = <0x80000 0x310000>;
};
- partition@310000 {
+ partition@390000 {
label = "rootfs";
- reg = <0x310000 0x4f0000>;
+ reg = <0x390000 0x0>;
};
};
};
@@ -235,6 +243,8 @@
serial@d0000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
wdt@fc880000 {
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index ed3627c116c..f79b3dfaabe 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -27,7 +27,7 @@
};
clcd@60000000 {
- compatible = "arm,clcd-pl110", "arm,primecell";
+ compatible = "arm,pl110", "arm,primecell";
reg = <0x60000000 0x1000>;
interrupts = <30>;
status = "disabled";
@@ -38,10 +38,10 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0x94000000 0x1000 /* FSMC Register */
- 0x80000000 0x0010>; /* NAND Base */
- reg-names = "fsmc_regs", "nand_data";
- st,ale-off = <0x20000>;
- st,cle-off = <0x10000>;
+ 0x80000000 0x0010 /* NAND Base DATA */
+ 0x80020000 0x0010 /* NAND Base ADDR */
+ 0x80010000 0x0010>; /* NAND Base CMD */
+ reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
status = "disabled";
};
@@ -52,6 +52,14 @@
status = "disabled";
};
+ shirq: interrupt-controller@0x50000000 {
+ compatible = "st,spear300-shirq";
+ reg = <0x50000000 0x1000>;
+ interrupts = <28>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
apb {
#address-cells = <1>;
#size-cells = <1>;
@@ -64,12 +72,16 @@
compatible = "arm,pl061", "arm,primecell";
gpio-controller;
reg = <0xa9000000 0x1000>;
+ interrupts = <8>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
kbd@a0000000 {
compatible = "st,spear300-kbd";
reg = <0xa0000000 0x1000>;
+ interrupts = <7>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index b00544e0cd5..b09632963d1 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -114,15 +114,23 @@
};
partition@10000 {
label = "u-boot";
- reg = <0x10000 0x40000>;
+ reg = <0x10000 0x50000>;
};
- partition@50000 {
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
+ };
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
+ };
+ partition@80000 {
label = "linux";
- reg = <0x50000 0x2c0000>;
+ reg = <0x80000 0x310000>;
};
- partition@310000 {
+ partition@390000 {
label = "rootfs";
- reg = <0x310000 0x4f0000>;
+ reg = <0x390000 0x0>;
};
};
};
@@ -158,26 +166,38 @@
serial@d0000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@b2000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@b2080000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@b2100000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@b2180000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@b2200000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
wdt@fc880000 {
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index 930303e48df..ab45b8c8198 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -33,13 +33,21 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0x44000000 0x1000 /* FSMC Register */
- 0x40000000 0x0010>; /* NAND Base */
- reg-names = "fsmc_regs", "nand_data";
- st,ale-off = <0x10000>;
- st,cle-off = <0x20000>;
+ 0x40000000 0x0010 /* NAND Base DATA */
+ 0x40020000 0x0010 /* NAND Base ADDR */
+ 0x40010000 0x0010>; /* NAND Base CMD */
+ reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
status = "disabled";
};
+ shirq: interrupt-controller@0xb4000000 {
+ compatible = "st,spear310-shirq";
+ reg = <0xb4000000 0x1000>;
+ interrupts = <28 29 30 1>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
apb {
#address-cells = <1>;
#size-cells = <1>;
@@ -50,30 +58,40 @@
serial@b2000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xb2000000 0x1000>;
+ interrupts = <8>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
serial@b2080000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xb2080000 0x1000>;
+ interrupts = <9>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
serial@b2100000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xb2100000 0x1000>;
+ interrupts = <10>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
serial@b2180000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xb2180000 0x1000>;
+ interrupts = <11>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
serial@b2200000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xb2200000 0x1000>;
+ interrupts = <12>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index ad4bfc68ee0..fdedbb51410 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -76,20 +76,12 @@
st,function = "mii2";
};
pwm0_1 {
- st,pins = "pwm0_1_pin_14_15_grp";
+ st,pins = "pwm0_1_pin_37_38_grp";
st,function = "pwm0_1";
};
- pwm2 {
- st,pins = "pwm2_pin_13_grp";
- st,function = "pwm2";
- };
};
};
- clcd@90000000 {
- status = "okay";
- };
-
dma@fc400000 {
status = "okay";
};
@@ -103,6 +95,7 @@
};
sdhci@70000000 {
+ power-gpio = <&gpiopinctrl 61 1>;
status = "okay";
};
@@ -122,15 +115,23 @@
};
partition@10000 {
label = "u-boot";
- reg = <0x10000 0x40000>;
+ reg = <0x10000 0x50000>;
+ };
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
+ };
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
};
- partition@50000 {
+ partition@80000 {
label = "linux";
- reg = <0x50000 0x2c0000>;
+ reg = <0x80000 0x310000>;
};
- partition@310000 {
+ partition@390000 {
label = "rootfs";
- reg = <0x310000 0x4f0000>;
+ reg = <0x390000 0x0>;
};
};
};
@@ -182,14 +183,20 @@
serial@d0000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@a3000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@a4000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
wdt@fc880000 {
diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts
new file mode 100644
index 00000000000..3075d2d3a8b
--- /dev/null
+++ b/arch/arm/boot/dts/spear320-hmi.dts
@@ -0,0 +1,305 @@
+/*
+ * DTS file for SPEAr320 Evaluation Baord
+ *
+ * Copyright 2012 Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear320.dtsi"
+
+/ {
+ model = "ST SPEAr320 HMI Board";
+ compatible = "st,spear320-hmi", "st,spear320";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ reg = <0 0x40000000>;
+ };
+
+ ahb {
+ pinmux@b3000000 {
+ st,pinmux-mode = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ i2c0 {
+ st,pins = "i2c0_grp";
+ st,function = "i2c0";
+ };
+ ssp0 {
+ st,pins = "ssp0_grp";
+ st,function = "ssp0";
+ };
+ uart0 {
+ st,pins = "uart0_grp";
+ st,function = "uart0";
+ };
+ clcd {
+ st,pins = "clcd_grp";
+ st,function = "clcd";
+ };
+ fsmc {
+ st,pins = "fsmc_8bit_grp";
+ st,function = "fsmc";
+ };
+ sdhci {
+ st,pins = "sdhci_cd_12_grp";
+ st,function = "sdhci";
+ };
+ i2s {
+ st,pins = "i2s_grp";
+ st,function = "i2s";
+ };
+ uart1 {
+ st,pins = "uart1_grp";
+ st,function = "uart1";
+ };
+ uart2 {
+ st,pins = "uart2_grp";
+ st,function = "uart2";
+ };
+ can0 {
+ st,pins = "can0_grp";
+ st,function = "can0";
+ };
+ can1 {
+ st,pins = "can1_grp";
+ st,function = "can1";
+ };
+ mii0_1 {
+ st,pins = "rmii0_1_grp";
+ st,function = "mii0_1";
+ };
+ pwm0_1 {
+ st,pins = "pwm0_1_pin_37_38_grp";
+ st,function = "pwm0_1";
+ };
+ pwm2 {
+ st,pins = "pwm2_pin_34_grp";
+ st,function = "pwm2";
+ };
+ };
+ };
+
+ clcd@90000000 {
+ status = "okay";
+ };
+
+ dma@fc400000 {
+ status = "okay";
+ };
+
+ ehci@e1800000 {
+ status = "okay";
+ };
+
+ fsmc: flash@4c000000 {
+ status = "okay";
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x80000>;
+ };
+ partition@80000 {
+ label = "u-boot";
+ reg = <0x80000 0x140000>;
+ };
+ partition@1C0000 {
+ label = "environment";
+ reg = <0x1C0000 0x40000>;
+ };
+ partition@200000 {
+ label = "dtb";
+ reg = <0x200000 0x40000>;
+ };
+ partition@240000 {
+ label = "linux";
+ reg = <0x240000 0xC00000>;
+ };
+ partition@E40000 {
+ label = "rootfs";
+ reg = <0xE40000 0x0>;
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@1 {
+ label = "user button 1";
+ linux,code = <0x100>;
+ gpios = <&stmpegpio 3 0x4>;
+ debounce-interval = <20>;
+ gpio-key,wakeup = <1>;
+ };
+
+ button@2 {
+ label = "user button 2";
+ linux,code = <0x200>;
+ gpios = <&stmpegpio 2 0x4>;
+ debounce-interval = <20>;
+ gpio-key,wakeup = <1>;
+ };
+ };
+
+ ohci@e1900000 {
+ status = "okay";
+ };
+
+ ohci@e2100000 {
+ status = "okay";
+ };
+
+ pwm: pwm@a8000000 {
+ status = "okay";
+ };
+
+ sdhci@70000000 {
+ power-gpio = <&gpiopinctrl 50 1>;
+ power_always_enb;
+ status = "okay";
+ };
+
+ smi: flash@fc000000 {
+ status = "okay";
+ clock-rate=<50000000>;
+
+ flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x50000>;
+ };
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
+ };
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
+ };
+ partition@80000 {
+ label = "linux";
+ reg = <0x80000 0x310000>;
+ };
+ partition@390000 {
+ label = "rootfs";
+ reg = <0x390000 0x0>;
+ };
+ };
+ };
+
+ spi0: spi@d0100000 {
+ status = "okay";
+ };
+
+ spi1: spi@a5000000 {
+ status = "okay";
+ };
+
+ spi2: spi@a6000000 {
+ status = "okay";
+ };
+
+ usbd@e1100000 {
+ status = "okay";
+ };
+
+ apb {
+ gpio0: gpio@fc980000 {
+ status = "okay";
+ };
+
+ gpio@b3000000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@d0180000 {
+ status = "okay";
+
+ stmpe811@41 {
+ compatible = "st,stmpe811";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x41>;
+ irq-over-gpio;
+ irq-gpios = <&gpiopinctrl 29 0x4>;
+ id = <0>;
+ blocks = <0x5>;
+ irq-trigger = <0x1>;
+
+ stmpegpio: stmpe-gpio {
+ compatible = "stmpe,gpio";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio,norequest-mask = <0xF3>;
+ };
+
+ stmpe610-ts {
+ compatible = "stmpe,ts";
+ reg = <0>;
+ ts,sample-time = <4>;
+ ts,mod-12b = <1>;
+ ts,ref-sel = <0>;
+ ts,adc-freq = <1>;
+ ts,ave-ctrl = <1>;
+ ts,touch-det-delay = <3>;
+ ts,settling = <4>;
+ ts,fraction-z = <7>;
+ ts,i-drive = <1>;
+ };
+ };
+ };
+
+ i2c1: i2c@a7000000 {
+ status = "okay";
+ };
+
+ rtc@fc900000 {
+ status = "okay";
+ };
+
+ serial@d0000000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ };
+
+ serial@a3000000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ };
+
+ serial@a4000000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ };
+
+ wdt@fc880000 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index 67d7ada7127..caa5520b1fd 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -28,9 +28,10 @@
};
clcd@90000000 {
- compatible = "arm,clcd-pl110", "arm,primecell";
+ compatible = "arm,pl110", "arm,primecell";
reg = <0x90000000 0x1000>;
- interrupts = <33>;
+ interrupts = <8>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
@@ -39,37 +40,61 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0x4c000000 0x1000 /* FSMC Register */
- 0x50000000 0x0010>; /* NAND Base */
- reg-names = "fsmc_regs", "nand_data";
- st,ale-off = <0x20000>;
- st,cle-off = <0x10000>;
+ 0x50000000 0x0010 /* NAND Base DATA */
+ 0x50020000 0x0010 /* NAND Base ADDR */
+ 0x50010000 0x0010>; /* NAND Base CMD */
+ reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
status = "disabled";
};
sdhci@70000000 {
compatible = "st,sdhci-spear";
reg = <0x70000000 0x100>;
- interrupts = <29>;
+ interrupts = <10>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
+ shirq: interrupt-controller@0xb3000000 {
+ compatible = "st,spear320-shirq";
+ reg = <0xb3000000 0x1000>;
+ interrupts = <30 28 29 1>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
spi1: spi@a5000000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0xa5000000 0x1000>;
+ interrupts = <15>;
+ interrupt-parent = <&shirq>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
spi2: spi@a6000000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0xa6000000 0x1000>;
+ interrupts = <16>;
+ interrupt-parent = <&shirq>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
+ pwm: pwm@a8000000 {
+ compatible ="st,spear-pwm";
+ reg = <0xa8000000 0x1000>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
apb {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
- ranges = <0xa0000000 0xa0000000 0x10000000
+ ranges = <0xa0000000 0xa0000000 0x20000000
0xd0000000 0xd0000000 0x30000000>;
i2c1: i2c@a7000000 {
@@ -77,18 +102,24 @@
#size-cells = <0>;
compatible = "snps,designware-i2c";
reg = <0xa7000000 0x1000>;
+ interrupts = <21>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
serial@a3000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xa3000000 0x1000>;
+ interrupts = <13>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
serial@a4000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xa4000000 0x1000>;
+ interrupts = <14>;
+ interrupt-parent = <&shirq>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 3a8bb573692..c2a852d43c4 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -53,6 +53,7 @@
reg = <0xe0800000 0x8000>;
interrupts = <23 22>;
interrupt-names = "macirq", "eth_wake_irq";
+ phy-mode = "mii";
status = "disabled";
};
@@ -69,6 +70,8 @@
compatible = "arm,pl022", "arm,primecell";
reg = <0xd0100000 0x1000>;
interrupts = <20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -120,7 +123,7 @@
};
rtc@fc900000 {
- compatible = "st,spear-rtc";
+ compatible = "st,spear600-rtc";
reg = <0xfc900000 0x1000>;
interrupts = <10>;
status = "disabled";
diff --git a/arch/arm/boot/dts/spear600-evb.dts b/arch/arm/boot/dts/spear600-evb.dts
index 1119c22c9a8..d865a891776 100644
--- a/arch/arm/boot/dts/spear600-evb.dts
+++ b/arch/arm/boot/dts/spear600-evb.dts
@@ -24,15 +24,35 @@
};
ahb {
+ clcd@fc200000 {
+ status = "okay";
+ };
+
dma@fc400000 {
status = "okay";
};
+ ehci@e1800000 {
+ status = "okay";
+ };
+
+ ehci@e2000000 {
+ status = "okay";
+ };
+
gmac: ethernet@e0800000 {
phy-mode = "gmii";
status = "okay";
};
+ ohci@e1900000 {
+ status = "okay";
+ };
+
+ ohci@e2100000 {
+ status = "okay";
+ };
+
smi: flash@fc000000 {
status = "okay";
clock-rate=<50000000>;
@@ -49,15 +69,23 @@
};
partition@10000 {
label = "u-boot";
- reg = <0x10000 0x40000>;
+ reg = <0x10000 0x50000>;
};
- partition@50000 {
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
+ };
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
+ };
+ partition@80000 {
label = "linux";
- reg = <0x50000 0x2c0000>;
+ reg = <0x80000 0x310000>;
};
- partition@310000 {
+ partition@390000 {
label = "rootfs";
- reg = <0x310000 0x4f0000>;
+ reg = <0x390000 0x0>;
};
};
};
@@ -65,10 +93,18 @@
apb {
serial@d0000000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
};
serial@d0080000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ };
+
+ rtc@fc900000 {
+ status = "okay";
};
i2c@d0200000 {
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index a3c36e47d7e..19f99dc4115 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -45,6 +45,14 @@
#interrupt-cells = <1>;
};
+ clcd@fc200000 {
+ compatible = "arm,pl110", "arm,primecell";
+ reg = <0xfc200000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <12>;
+ status = "disabled";
+ };
+
dma@fc400000 {
compatible = "arm,pl080", "arm,primecell";
reg = <0xfc400000 0x1000>;
@@ -59,6 +67,7 @@
interrupt-parent = <&vic1>;
interrupts = <24 23>;
interrupt-names = "macirq", "eth_wake_irq";
+ phy-mode = "gmii";
status = "disabled";
};
@@ -67,10 +76,10 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0xd1800000 0x1000 /* FSMC Register */
- 0xd2000000 0x4000>; /* NAND Base */
- reg-names = "fsmc_regs", "nand_data";
- st,ale-off = <0x20000>;
- st,cle-off = <0x10000>;
+ 0xd2000000 0x0010 /* NAND Base DATA */
+ 0xd2020000 0x0010 /* NAND Base ADDR */
+ 0xd2010000 0x0010>; /* NAND Base CMD */
+ reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd";
status = "disabled";
};
@@ -178,6 +187,13 @@
status = "disabled";
};
+ rtc@fc900000 {
+ compatible = "st,spear600-rtc";
+ reg = <0xfc900000 0x1000>;
+ interrupts = <10>;
+ status = "disabled";
+ };
+
timer@f0000000 {
compatible = "st,spear-timer";
reg = <0xf0000000 0x400>;
diff --git a/arch/arm/boot/dts/stuib.dtsi b/arch/arm/boot/dts/stuib.dtsi
new file mode 100644
index 00000000000..39446a247e7
--- /dev/null
+++ b/arch/arm/boot/dts/stuib.dtsi
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/ {
+ soc-u9500 {
+ i2c@80004000 {
+ stmpe1601: stmpe1601@40 {
+ compatible = "st,stmpe1601";
+ reg = <0x40>;
+ interrupts = <26 0x1>;
+ interrupt-parent = <&gpio6>;
+ interrupt-controller;
+
+ wakeup-source;
+ st,autosleep-timeout = <1024>;
+
+ stmpe_keypad {
+ compatible = "st,stmpe-keypad";
+
+ debounce-interval = <64>;
+ st,scan-count = <8>;
+ st,no-autorepeat;
+
+ linux,keymap = <0x205006b
+ 0x4010074
+ 0x3050072
+ 0x1030004
+ 0x502006a
+ 0x500000a
+ 0x5008b
+ 0x706001c
+ 0x405000b
+ 0x6070003
+ 0x3040067
+ 0x303006c
+ 0x60400e7
+ 0x602009e
+ 0x4020073
+ 0x5050002
+ 0x4030069
+ 0x3020008>;
+ };
+ };
+ };
+
+ i2c@80110000 {
+ bu21013_tp@0x5c {
+ compatible = "rhom,bu21013_tp";
+ reg = <0x5c>;
+ touch-gpio = <&gpio2 20 0x4>;
+ avdd-supply = <&ab8500_ldo_aux1_reg>;
+
+ rhom,touch-max-x = <384>;
+ rhom,touch-max-y = <704>;
+ rhom,flip-y;
+ };
+
+ bu21013_tp@0x5d {
+ compatible = "rhom,bu21013_tp";
+ reg = <0x5d>;
+ touch-gpio = <&gpio2 20 0x4>;
+ avdd-supply = <&ab8500_ldo_aux1_reg>;
+
+ rhom,touch-max-x = <384>;
+ rhom,touch-max-y = <704>;
+ rhom,flip-y;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sun4i-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
index f4ca126ad99..5cab8254043 100644
--- a/arch/arm/boot/dts/sun4i-cubieboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
@@ -11,11 +11,11 @@
*/
/dts-v1/;
-/include/ "sun4i.dtsi"
+/include/ "sun4i-a10.dtsi"
/ {
model = "Cubietech Cubieboard";
- compatible = "cubietech,cubieboard", "allwinner,sun4i";
+ compatible = "cubietech,a10-cubieboard", "allwinner,sun4i-a10";
aliases {
serial0 = &uart0;
diff --git a/arch/arm/boot/dts/sun4i.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index e61fdd47bd0..e61fdd47bd0 100644
--- a/arch/arm/boot/dts/sun4i.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
diff --git a/arch/arm/boot/dts/sun5i-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index d6ff889a5d8..498a091a4ea 100644
--- a/arch/arm/boot/dts/sun5i-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -12,11 +12,11 @@
*/
/dts-v1/;
-/include/ "sun5i.dtsi"
+/include/ "sun5i-a13.dtsi"
/ {
model = "Olimex A13-Olinuxino";
- compatible = "olimex,a13-olinuxino", "allwinner,sun5i";
+ compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 59a2d265a98..59a2d265a98 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index 74b8a47adf9..43eb72af894 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -10,6 +10,18 @@
reg = <0x00000000 0x40000000>;
};
+ host1x {
+ hdmi {
+ status = "okay";
+
+ vdd-supply = <&hdmi_vdd_reg>;
+ pll-supply = <&hdmi_pll_reg>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio = <&gpio 111 0>; /* PN7 */
+ };
+ };
+
pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -262,9 +274,9 @@
};
};
- i2c@7000c400 {
+ hdmi_ddc: i2c@7000c400 {
status = "okay";
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
};
i2c@7000c500 {
@@ -369,13 +381,13 @@
regulator-max-microvolt = <1800000>;
};
- ldo7 {
+ hdmi_vdd_reg: ldo7 {
regulator-name = "vdd_ldo7,avdd_hdmi";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
- ldo8 {
+ hdmi_pll_reg: ldo8 {
regulator-name = "vdd_ldo8,avdd_hdmi_pll";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -396,6 +408,11 @@
};
};
};
+
+ temperature-sensor@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
};
pmc {
diff --git a/arch/arm/boot/dts/tegra20-plutux.dts b/arch/arm/boot/dts/tegra20-plutux.dts
index 331a3ef24d5..289480026fb 100644
--- a/arch/arm/boot/dts/tegra20-plutux.dts
+++ b/arch/arm/boot/dts/tegra20-plutux.dts
@@ -6,6 +6,12 @@
model = "Avionic Design Plutux board";
compatible = "ad,plutux", "ad,tamonten", "nvidia,tegra20";
+ host1x {
+ hdmi {
+ status = "okay";
+ };
+ };
+
i2c@7000c000 {
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index e58a0e60f71..420459825b4 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -561,6 +561,12 @@
status = "okay";
};
+ sdhci@c8000000 {
+ status = "okay";
+ power-gpios = <&gpio 86 0>; /* gpio PK6 */
+ bus-width = <4>;
+ };
+
sdhci@c8000400 {
status = "okay";
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
index 5b3d8b157b3..a239ccdfaa5 100644
--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
@@ -8,6 +8,16 @@
reg = <0x00000000 0x20000000>;
};
+ host1x {
+ hdmi {
+ vdd-supply = <&hdmi_vdd_reg>;
+ pll-supply = <&hdmi_pll_reg>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio = <&gpio 111 0>; /* PN7 */
+ };
+ };
+
pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -62,10 +72,6 @@
nvidia,pins = "dap4";
nvidia,function = "dap4";
};
- ddc {
- nvidia,pins = "ddc";
- nvidia,function = "i2c2";
- };
dta {
nvidia,pins = "dta", "dtd";
nvidia,function = "sdio2";
@@ -91,7 +97,7 @@
nvidia,function = "pcie";
};
hdint {
- nvidia,pins = "hdint", "pta";
+ nvidia,pins = "hdint";
nvidia,function = "hdmi";
};
i2cp {
@@ -230,6 +236,39 @@
nvidia,pull = <1>;
};
};
+
+ state_i2cmux_ddc: pinmux_i2cmux_ddc {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "i2c2";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "rsvd4";
+ };
+ };
+
+ state_i2cmux_pta: pinmux_i2cmux_pta {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "rsvd4";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "i2c2";
+ };
+ };
+
+ state_i2cmux_idle: pinmux_i2cmux_idle {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "rsvd4";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "rsvd4";
+ };
+ };
};
i2s@70002800 {
@@ -246,6 +285,36 @@
status = "okay";
};
+ i2c@7000c400 {
+ clock-frequency = <100000>;
+ status = "okay";
+ };
+
+ i2cmux {
+ compatible = "i2c-mux-pinctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-parent = <&{/i2c@7000c400}>;
+
+ pinctrl-names = "ddc", "pta", "idle";
+ pinctrl-0 = <&state_i2cmux_ddc>;
+ pinctrl-1 = <&state_i2cmux_pta>;
+ pinctrl-2 = <&state_i2cmux_idle>;
+
+ hdmi_ddc: i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
i2c@7000d000 {
clock-frequency = <400000>;
status = "okay";
@@ -348,13 +417,13 @@
regulator-max-microvolt = <2850000>;
};
- ldo7 {
+ hdmi_vdd_reg: ldo7 {
regulator-name = "vdd_ldo7,avdd_hdmi";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
- ldo8 {
+ hdmi_pll_reg: ldo8 {
regulator-name = "vdd_ldo8,avdd_hdmi_pll";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -381,6 +450,11 @@
};
};
};
+
+ temperature-sensor@4c {
+ compatible = "onnn,nct1008";
+ reg = <0x4c>;
+ };
};
pmc {
diff --git a/arch/arm/boot/dts/tegra20-tec.dts b/arch/arm/boot/dts/tegra20-tec.dts
index 9aff31b0fe4..402b21004be 100644
--- a/arch/arm/boot/dts/tegra20-tec.dts
+++ b/arch/arm/boot/dts/tegra20-tec.dts
@@ -6,10 +6,13 @@
model = "Avionic Design Tamonten Evaluation Carrier";
compatible = "ad,tec", "ad,tamonten", "nvidia,tegra20";
- i2c@7000c000 {
- clock-frequency = <400000>;
- status = "okay";
+ host1x {
+ hdmi {
+ status = "okay";
+ };
+ };
+ i2c@7000c000 {
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
reg = <0x1a>;
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 27fb8a67ea4..b70b4cb754c 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -10,6 +10,18 @@
reg = <0x00000000 0x40000000>;
};
+ host1x {
+ hdmi {
+ status = "okay";
+
+ vdd-supply = <&hdmi_vdd_reg>;
+ pll-supply = <&hdmi_pll_reg>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio = <&gpio 111 0>; /* PN7 */
+ };
+ };
+
pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -249,14 +261,24 @@
clock-frequency = <216000000>;
};
- i2c@7000c000 {
+ dvi_ddc: i2c@7000c000 {
status = "okay";
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
};
- i2c@7000c400 {
+ spi@7000c380 {
status = "okay";
- clock-frequency = <400000>;
+ spi-max-frequency = <48000000>;
+ spi-flash@0 {
+ compatible = "winbond,w25q80bl";
+ reg = <0>;
+ spi-max-frequency = <48000000>;
+ };
+ };
+
+ hdmi_ddc: i2c@7000c400 {
+ status = "okay";
+ clock-frequency = <100000>;
};
i2c@7000c500 {
@@ -300,6 +322,30 @@
bus-width = <4>;
};
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_vdd_reg: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "avdd_hdmi";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ hdmi_pll_reg: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "avdd_hdmi_pll";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+ };
+
sound {
compatible = "nvidia,tegra-audio-trimslice";
nvidia,i2s-controller = <&tegra_i2s1>;
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index 86854f1abd5..adc47547eaa 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -64,11 +64,6 @@
nvidia,pins = "dap4";
nvidia,function = "dap4";
};
- ddc {
- nvidia,pins = "ddc", "owc", "spdi", "spdo",
- "uac";
- nvidia,function = "rsvd2";
- };
dta {
nvidia,pins = "dta", "dtb", "dtc", "dtd", "dte";
nvidia,function = "vi";
@@ -98,7 +93,7 @@
nvidia,function = "pcie";
};
hdint {
- nvidia,pins = "hdint", "pta";
+ nvidia,pins = "hdint";
nvidia,function = "hdmi";
};
i2cp {
@@ -129,6 +124,10 @@
"lspi", "lvp1", "lvs";
nvidia,function = "displaya";
};
+ owc {
+ nvidia,pins = "owc", "spdi", "spdo", "uac";
+ nvidia,function = "rsvd2";
+ };
pmc {
nvidia,pins = "pmc";
nvidia,function = "pwr_on";
@@ -237,6 +236,49 @@
"ld23_22";
nvidia,pull = <1>;
};
+ drive_sdio1 {
+ nvidia,pins = "drive_sdio1";
+ nvidia,high-speed-mode = <0>;
+ nvidia,schmitt = <1>;
+ nvidia,low-power-mode = <3>;
+ nvidia,pull-down-strength = <31>;
+ nvidia,pull-up-strength = <31>;
+ nvidia,slew-rate-rising = <3>;
+ nvidia,slew-rate-falling = <3>;
+ };
+ };
+
+ state_i2cmux_ddc: pinmux_i2cmux_ddc {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "i2c2";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "rsvd4";
+ };
+ };
+
+ state_i2cmux_pta: pinmux_i2cmux_pta {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "rsvd4";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "i2c2";
+ };
+ };
+
+ state_i2cmux_idle: pinmux_i2cmux_idle {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "rsvd4";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "rsvd4";
+ };
};
};
@@ -281,6 +323,31 @@
clock-frequency = <400000>;
};
+ i2cmux {
+ compatible = "i2c-mux-pinctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-parent = <&{/i2c@7000c400}>;
+
+ pinctrl-names = "ddc", "pta", "idle";
+ pinctrl-0 = <&state_i2cmux_ddc>;
+ pinctrl-1 = <&state_i2cmux_pta>;
+ pinctrl-2 = <&state_i2cmux_idle>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
i2c@7000c500 {
status = "okay";
clock-frequency = <400000>;
@@ -406,6 +473,11 @@
};
};
};
+
+ temperature-sensor@4c {
+ compatible = "onnn,nct1008";
+ reg = <0x4c>;
+ };
};
pmc {
@@ -425,6 +497,12 @@
status = "okay";
};
+ sdhci@c8000000 {
+ status = "okay";
+ power-gpios = <&gpio 86 0>; /* gpio PK6 */
+ bus-width = <4>;
+ };
+
sdhci@c8000400 {
status = "okay";
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index 94a71c91beb..20d576ecd55 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -10,6 +10,18 @@
reg = <0x00000000 0x20000000>;
};
+ host1x {
+ hdmi {
+ status = "okay";
+
+ vdd-supply = <&hdmi_vdd_reg>;
+ pll-supply = <&hdmi_pll_reg>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio = <&gpio 111 0>; /* PN7 */
+ };
+ };
+
pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -246,6 +258,11 @@
clock-frequency = <216000000>;
};
+ hdmi_ddc: i2c@7000c400 {
+ status = "okay";
+ clock-frequency = <100000>;
+ };
+
i2c@7000d000 {
status = "okay";
clock-frequency = <100000>;
@@ -356,7 +373,7 @@
regulator-always-on;
};
- ldo6 {
+ hdmi_pll_reg: ldo6 {
regulator-name = "nvvdd_ldo6,avdd_hdmi_pll";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -388,7 +405,7 @@
regulator-always-on;
};
- ldo11 {
+ hdmi_vdd_reg: ldo11 {
regulator-name = "nvvdd_ldo11,vddio_pex_clk,vcom_33,avdd_hdmi";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index f40cfbaa7c7..b8effa1cbda 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -4,6 +4,99 @@
compatible = "nvidia,tegra20";
interrupt-parent = <&intc>;
+ host1x {
+ compatible = "nvidia,tegra20-host1x", "simple-bus";
+ reg = <0x50000000 0x00024000>;
+ interrupts = <0 65 0x04 /* mpcore syncpt */
+ 0 67 0x04>; /* mpcore general */
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x54000000 0x54000000 0x04000000>;
+
+ mpe {
+ compatible = "nvidia,tegra20-mpe";
+ reg = <0x54040000 0x00040000>;
+ interrupts = <0 68 0x04>;
+ };
+
+ vi {
+ compatible = "nvidia,tegra20-vi";
+ reg = <0x54080000 0x00040000>;
+ interrupts = <0 69 0x04>;
+ };
+
+ epp {
+ compatible = "nvidia,tegra20-epp";
+ reg = <0x540c0000 0x00040000>;
+ interrupts = <0 70 0x04>;
+ };
+
+ isp {
+ compatible = "nvidia,tegra20-isp";
+ reg = <0x54100000 0x00040000>;
+ interrupts = <0 71 0x04>;
+ };
+
+ gr2d {
+ compatible = "nvidia,tegra20-gr2d";
+ reg = <0x54140000 0x00040000>;
+ interrupts = <0 72 0x04>;
+ };
+
+ gr3d {
+ compatible = "nvidia,tegra20-gr3d";
+ reg = <0x54180000 0x00040000>;
+ };
+
+ dc@54200000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54200000 0x00040000>;
+ interrupts = <0 73 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ dc@54240000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54240000 0x00040000>;
+ interrupts = <0 74 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ hdmi {
+ compatible = "nvidia,tegra20-hdmi";
+ reg = <0x54280000 0x00040000>;
+ interrupts = <0 75 0x04>;
+ status = "disabled";
+ };
+
+ tvo {
+ compatible = "nvidia,tegra20-tvo";
+ reg = <0x542c0000 0x00040000>;
+ interrupts = <0 76 0x04>;
+ status = "disabled";
+ };
+
+ dsi {
+ compatible = "nvidia,tegra20-dsi";
+ reg = <0x54300000 0x00040000>;
+ status = "disabled";
+ };
+ };
+
+ timer@50004600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x50040600 0x20>;
+ interrupts = <1 13 0x304>;
+ };
+
cache-controller@50043000 {
compatible = "arm,pl310-cache";
reg = <0x50043000 0x1000>;
@@ -21,6 +114,15 @@
#interrupt-cells = <3>;
};
+ timer@60005000 {
+ compatible = "nvidia,tegra20-timer";
+ reg = <0x60005000 0x60>;
+ interrupts = <0 0 0x04
+ 0 1 0x04
+ 0 41 0x04
+ 0 42 0x04>;
+ };
+
apbdma: dma {
compatible = "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1200>;
@@ -138,6 +240,12 @@
#pwm-cells = <2>;
};
+ rtc {
+ compatible = "nvidia,tegra20-rtc";
+ reg = <0x7000e000 0x100>;
+ interrupts = <0 2 0x04>;
+ };
+
i2c@7000c000 {
compatible = "nvidia,tegra20-i2c";
reg = <0x7000c000 0x100>;
@@ -147,6 +255,16 @@
status = "disabled";
};
+ spi@7000c380 {
+ compatible = "nvidia,tegra20-sflash";
+ reg = <0x7000c380 0x80>;
+ interrupts = <0 39 0x04>;
+ nvidia,dma-request-selector = <&apbdma 11>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
i2c@7000c400 {
compatible = "nvidia,tegra20-i2c";
reg = <0x7000c400 0x100>;
@@ -174,6 +292,46 @@
status = "disabled";
};
+ spi@7000d400 {
+ compatible = "nvidia,tegra20-slink";
+ reg = <0x7000d400 0x200>;
+ interrupts = <0 59 0x04>;
+ nvidia,dma-request-selector = <&apbdma 15>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000d600 {
+ compatible = "nvidia,tegra20-slink";
+ reg = <0x7000d600 0x200>;
+ interrupts = <0 82 0x04>;
+ nvidia,dma-request-selector = <&apbdma 16>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000d800 {
+ compatible = "nvidia,tegra20-slink";
+ reg = <0x7000d480 0x200>;
+ interrupts = <0 83 0x04>;
+ nvidia,dma-request-selector = <&apbdma 17>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000da00 {
+ compatible = "nvidia,tegra20-slink";
+ reg = <0x7000da00 0x200>;
+ interrupts = <0 93 0x04>;
+ nvidia,dma-request-selector = <&apbdma 18>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pmc {
compatible = "nvidia,tegra20-pmc";
reg = <0x7000e400 0x400>;
diff --git a/arch/arm/boot/dts/tegra30-cardhu-a02.dts b/arch/arm/boot/dts/tegra30-cardhu-a02.dts
index dd4222f00ec..adc88aa50eb 100644
--- a/arch/arm/boot/dts/tegra30-cardhu-a02.dts
+++ b/arch/arm/boot/dts/tegra30-cardhu-a02.dts
@@ -83,5 +83,11 @@
gpio = <&gpio 83 0>; /* GPIO PK3 */
};
};
+
+ sdhci@78000400 {
+ status = "okay";
+ power-gpios = <&gpio 28 0>; /* gpio PD4 */
+ bus-width = <4>;
+ };
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu-a04.dts b/arch/arm/boot/dts/tegra30-cardhu-a04.dts
index 0828f097ca8..08163e145d5 100644
--- a/arch/arm/boot/dts/tegra30-cardhu-a04.dts
+++ b/arch/arm/boot/dts/tegra30-cardhu-a04.dts
@@ -95,4 +95,10 @@
gpio = <&gpio 232 0>; /* GPIO PDD0 */
};
};
+
+ sdhci@78000400 {
+ status = "okay";
+ power-gpios = <&gpio 27 0>; /* gpio PD3 */
+ bus-width = <4>;
+ };
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index b1271a89432..bdb2a660f37 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -52,6 +52,22 @@
nvidia,pull = <2>;
nvidia,tristate = <0>;
};
+ sdmmc3_clk_pa6 {
+ nvidia,pins = "sdmmc3_clk_pa6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <0>;
+ nvidia,tristate = <0>;
+ };
+ sdmmc3_cmd_pa7 {
+ nvidia,pins = "sdmmc3_cmd_pa7",
+ "sdmmc3_dat0_pb7",
+ "sdmmc3_dat1_pb6",
+ "sdmmc3_dat2_pb5",
+ "sdmmc3_dat3_pb4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <2>;
+ nvidia,tristate = <0>;
+ };
sdmmc4_clk_pcc4 {
nvidia,pins = "sdmmc4_clk_pcc4",
"sdmmc4_rst_n_pcc3";
@@ -81,6 +97,15 @@
nvidia,pull = <0>;
nvidia,tristate = <0>;
};
+ sdio3 {
+ nvidia,pins = "drive_sdio3";
+ nvidia,high-speed-mode = <0>;
+ nvidia,schmitt = <0>;
+ nvidia,pull-down-strength = <46>;
+ nvidia,pull-up-strength = <42>;
+ nvidia,slew-rate-rising = <1>;
+ nvidia,slew-rate-falling = <1>;
+ };
};
};
@@ -250,6 +275,16 @@
};
};
+ spi@7000da00 {
+ status = "okay";
+ spi-max-frequency = <25000000>;
+ spi-flash@1 {
+ compatible = "winbond,w25q32";
+ reg = <1>;
+ spi-max-frequency = <20000000>;
+ };
+ };
+
ahub {
i2s@70080400 {
status = "okay";
@@ -384,6 +419,8 @@
regulator-name = "vdd_com";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
enable-active-high;
gpio = <&gpio 24 0>; /* gpio PD0 */
vin-supply = <&sys_3v3_reg>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index fed8dca1692..529fdb82dfd 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -4,6 +4,99 @@
compatible = "nvidia,tegra30";
interrupt-parent = <&intc>;
+ host1x {
+ compatible = "nvidia,tegra30-host1x", "simple-bus";
+ reg = <0x50000000 0x00024000>;
+ interrupts = <0 65 0x04 /* mpcore syncpt */
+ 0 67 0x04>; /* mpcore general */
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x54000000 0x54000000 0x04000000>;
+
+ mpe {
+ compatible = "nvidia,tegra30-mpe";
+ reg = <0x54040000 0x00040000>;
+ interrupts = <0 68 0x04>;
+ };
+
+ vi {
+ compatible = "nvidia,tegra30-vi";
+ reg = <0x54080000 0x00040000>;
+ interrupts = <0 69 0x04>;
+ };
+
+ epp {
+ compatible = "nvidia,tegra30-epp";
+ reg = <0x540c0000 0x00040000>;
+ interrupts = <0 70 0x04>;
+ };
+
+ isp {
+ compatible = "nvidia,tegra30-isp";
+ reg = <0x54100000 0x00040000>;
+ interrupts = <0 71 0x04>;
+ };
+
+ gr2d {
+ compatible = "nvidia,tegra30-gr2d";
+ reg = <0x54140000 0x00040000>;
+ interrupts = <0 72 0x04>;
+ };
+
+ gr3d {
+ compatible = "nvidia,tegra30-gr3d";
+ reg = <0x54180000 0x00040000>;
+ };
+
+ dc@54200000 {
+ compatible = "nvidia,tegra30-dc";
+ reg = <0x54200000 0x00040000>;
+ interrupts = <0 73 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ dc@54240000 {
+ compatible = "nvidia,tegra30-dc";
+ reg = <0x54240000 0x00040000>;
+ interrupts = <0 74 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ hdmi {
+ compatible = "nvidia,tegra30-hdmi";
+ reg = <0x54280000 0x00040000>;
+ interrupts = <0 75 0x04>;
+ status = "disabled";
+ };
+
+ tvo {
+ compatible = "nvidia,tegra30-tvo";
+ reg = <0x542c0000 0x00040000>;
+ interrupts = <0 76 0x04>;
+ status = "disabled";
+ };
+
+ dsi {
+ compatible = "nvidia,tegra30-dsi";
+ reg = <0x54300000 0x00040000>;
+ status = "disabled";
+ };
+ };
+
+ timer@50004600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x50040600 0x20>;
+ interrupts = <1 13 0xf04>;
+ };
+
cache-controller@50043000 {
compatible = "arm,pl310-cache";
reg = <0x50043000 0x1000>;
@@ -21,6 +114,17 @@
#interrupt-cells = <3>;
};
+ timer@60005000 {
+ compatible = "nvidia,tegra30-timer", "nvidia,tegra20-timer";
+ reg = <0x60005000 0x400>;
+ interrupts = <0 0 0x04
+ 0 1 0x04
+ 0 41 0x04
+ 0 42 0x04
+ 0 121 0x04
+ 0 122 0x04>;
+ };
+
apbdma: dma {
compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1400>;
@@ -132,6 +236,12 @@
#pwm-cells = <2>;
};
+ rtc {
+ compatible = "nvidia,tegra30-rtc", "nvidia,tegra20-rtc";
+ reg = <0x7000e000 0x100>;
+ interrupts = <0 2 0x04>;
+ };
+
i2c@7000c000 {
compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
reg = <0x7000c000 0x100>;
@@ -177,6 +287,66 @@
status = "disabled";
};
+ spi@7000d400 {
+ compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
+ reg = <0x7000d400 0x200>;
+ interrupts = <0 59 0x04>;
+ nvidia,dma-request-selector = <&apbdma 15>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000d600 {
+ compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
+ reg = <0x7000d600 0x200>;
+ interrupts = <0 82 0x04>;
+ nvidia,dma-request-selector = <&apbdma 16>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000d800 {
+ compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
+ reg = <0x7000d480 0x200>;
+ interrupts = <0 83 0x04>;
+ nvidia,dma-request-selector = <&apbdma 17>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000da00 {
+ compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
+ reg = <0x7000da00 0x200>;
+ interrupts = <0 93 0x04>;
+ nvidia,dma-request-selector = <&apbdma 18>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000dc00 {
+ compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
+ reg = <0x7000dc00 0x200>;
+ interrupts = <0 94 0x04>;
+ nvidia,dma-request-selector = <&apbdma 27>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi@7000de00 {
+ compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
+ reg = <0x7000de00 0x200>;
+ interrupts = <0 79 0x04>;
+ nvidia,dma-request-selector = <&apbdma 28>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pmc {
compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
reg = <0x7000e400 0x400>;
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index 63411b03693..ed0bc954683 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -19,6 +19,10 @@
interrupts = <11>;
};
+ watchdog {
+ compatible = "ti,twl4030-wdt";
+ };
+
vdac: regulator-vdac {
compatible = "ti,twl4030-vdac";
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/u9540.dts b/arch/arm/boot/dts/u9540.dts
new file mode 100644
index 00000000000..95892ec6c34
--- /dev/null
+++ b/arch/arm/boot/dts/u9540.dts
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012 ST-Ericsson AB
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "dbx5x0.dtsi"
+
+/ {
+ model = "ST-Ericsson U9540 platform with Device Tree";
+ compatible = "st-ericsson,u9540";
+
+ memory {
+ reg = <0x00000000 0x20000000>;
+ };
+
+ soc-u9500 {
+ uart@80120000 {
+ status = "okay";
+ };
+
+ uart@80121000 {
+ status = "okay";
+ };
+
+ uart@80007000 {
+ status = "okay";
+ };
+
+ // External Micro SD slot
+ sdi0_per1@80126000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <4>;
+ mmc-cap-sd-highspeed;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+
+ cd-gpios = <&gpio7 6 0x4>; // 230
+ cd-inverted;
+
+ status = "okay";
+ };
+
+
+ // WLAN SDIO channel
+ sdi1_per2@80118000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+
+ status = "okay";
+ };
+
+ // On-board eMMC
+ sdi4_per2@80114000 {
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
+ vmmc-supply = <&ab8500_ldo_aux2_reg>;
+
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
new file mode 100644
index 00000000000..401c1262d4e
--- /dev/null
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011 Xilinx
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "xlnx,zynq-7000";
+
+ amba {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+ ranges;
+
+ intc: interrupt-controller@f8f01000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ interrupt-controller;
+ reg = <0xF8F01000 0x1000>,
+ <0xF8F00100 0x100>;
+ };
+
+ L2: cache-controller {
+ compatible = "arm,pl310-cache";
+ reg = <0xF8F02000 0x1000>;
+ arm,data-latency = <2 3 2>;
+ arm,tag-latency = <2 3 2>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ uart0: uart@e0000000 {
+ compatible = "xlnx,xuartps";
+ reg = <0xE0000000 0x1000>;
+ interrupts = <0 27 4>;
+ clock = <50000000>;
+ };
+
+ uart1: uart@e0001000 {
+ compatible = "xlnx,xuartps";
+ reg = <0xE0001000 0x1000>;
+ interrupts = <0 50 4>;
+ clock = <50000000>;
+ };
+
+ slcr: slcr@f8000000 {
+ compatible = "xlnx,zynq-slcr";
+ reg = <0xF8000000 0x1000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ps_clk: ps_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ /* clock-frequency set in board-specific file */
+ clock-output-names = "ps_clk";
+ };
+ armpll: armpll {
+ #clock-cells = <0>;
+ compatible = "xlnx,zynq-pll";
+ clocks = <&ps_clk>;
+ reg = <0x100 0x110>;
+ clock-output-names = "armpll";
+ };
+ ddrpll: ddrpll {
+ #clock-cells = <0>;
+ compatible = "xlnx,zynq-pll";
+ clocks = <&ps_clk>;
+ reg = <0x104 0x114>;
+ clock-output-names = "ddrpll";
+ };
+ iopll: iopll {
+ #clock-cells = <0>;
+ compatible = "xlnx,zynq-pll";
+ clocks = <&ps_clk>;
+ reg = <0x108 0x118>;
+ clock-output-names = "iopll";
+ };
+ uart_clk: uart_clk {
+ #clock-cells = <1>;
+ compatible = "xlnx,zynq-periph-clock";
+ clocks = <&iopll &armpll &ddrpll>;
+ reg = <0x154>;
+ clock-output-names = "uart0_ref_clk",
+ "uart1_ref_clk";
+ };
+ cpu_clk: cpu_clk {
+ #clock-cells = <1>;
+ compatible = "xlnx,zynq-cpu-clock";
+ clocks = <&iopll &armpll &ddrpll>;
+ reg = <0x120 0x1C4>;
+ clock-output-names = "cpu_6x4x",
+ "cpu_3x2x",
+ "cpu_2x",
+ "cpu_1x";
+ };
+ };
+ };
+
+ ttc0: ttc0@f8001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "xlnx,ttc";
+ reg = <0xF8001000 0x1000>;
+ clocks = <&cpu_clk 3>;
+ clock-names = "cpu_1x";
+ clock-ranges;
+
+ ttc0_0: ttc0.0 {
+ status = "disabled";
+ reg = <0>;
+ interrupts = <0 10 4>;
+ };
+ ttc0_1: ttc0.1 {
+ status = "disabled";
+ reg = <1>;
+ interrupts = <0 11 4>;
+ };
+ ttc0_2: ttc0.2 {
+ status = "disabled";
+ reg = <2>;
+ interrupts = <0 12 4>;
+ };
+ };
+
+ ttc1: ttc1@f8002000 {
+ #interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "xlnx,ttc";
+ reg = <0xF8002000 0x1000>;
+ clocks = <&cpu_clk 3>;
+ clock-names = "cpu_1x";
+ clock-ranges;
+
+ ttc1_0: ttc1.0 {
+ status = "disabled";
+ reg = <0>;
+ interrupts = <0 37 4>;
+ };
+ ttc1_1: ttc1.1 {
+ status = "disabled";
+ reg = <1>;
+ interrupts = <0 38 4>;
+ };
+ ttc1_2: ttc1.2 {
+ status = "disabled";
+ reg = <2>;
+ interrupts = <0 39 4>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/zynq-ep107.dts b/arch/arm/boot/dts/zynq-ep107.dts
deleted file mode 100644
index 574bc044f57..00000000000
--- a/arch/arm/boot/dts/zynq-ep107.dts
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-/ {
- model = "Xilinx Zynq EP107";
- compatible = "xlnx,zynq-ep107";
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&intc>;
-
- memory {
- device_type = "memory";
- reg = <0x0 0x10000000>;
- };
-
- chosen {
- bootargs = "console=ttyPS0,9600 root=/dev/ram rw initrd=0x800000,8M earlyprintk";
- linux,stdout-path = &uart0;
- };
-
- amba {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- intc: interrupt-controller@f8f01000 {
- compatible = "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- #address-cells = <1>;
- interrupt-controller;
- reg = <0xF8F01000 0x1000>,
- <0xF8F00100 0x100>;
- };
-
- L2: cache-controller {
- compatible = "arm,pl310-cache";
- reg = <0xF8F02000 0x1000>;
- arm,data-latency = <2 3 2>;
- arm,tag-latency = <2 3 2>;
- cache-unified;
- cache-level = <2>;
- };
-
- uart0: uart@e0000000 {
- compatible = "xlnx,xuartps";
- reg = <0xE0000000 0x1000>;
- interrupts = <0 27 4>;
- clock = <50000000>;
- };
- };
-};
diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts
new file mode 100644
index 00000000000..c772942a399
--- /dev/null
+++ b/arch/arm/boot/dts/zynq-zc702.dts
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011 Xilinx
+ * Copyright (C) 2012 National Instruments Corp.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/dts-v1/;
+/include/ "zynq-7000.dtsi"
+
+/ {
+ model = "Zynq ZC702 Development Board";
+ compatible = "xlnx,zynq-zc702", "xlnx,zynq-7000";
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x40000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyPS1,115200 earlyprintk";
+ };
+
+};
+
+&ps_clk {
+ clock-frequency = <33333330>;
+};
+
+&ttc0_0 {
+ status = "ok";
+ compatible = "xlnx,ttc-counter-clocksource";
+};
+
+&ttc0_1 {
+ status = "ok";
+ compatible = "xlnx,ttc-counter-clockevent";
+};
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
index 5b8215f424c..728a43c446f 100644
--- a/arch/arm/configs/marzen_defconfig
+++ b/arch/arm/configs/marzen_defconfig
@@ -47,6 +47,8 @@ CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_FARADAY is not set
@@ -59,9 +61,8 @@ CONFIG_SMSC911X=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
+CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
@@ -78,9 +79,16 @@ CONFIG_GPIO_SYSFS=y
CONFIG_THERMAL=y
CONFIG_RCAR_THERMAL=y
CONFIG_SSB=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_RCAR_PHY=y
CONFIG_MMC=y
CONFIG_MMC_SDHI=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
CONFIG_UIO=y
CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_IOMMU_SUPPORT is not set
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 159f75fc437..2eeff1e64b6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -6,6 +6,7 @@ CONFIG_MACH_ARMADA_370=y
CONFIG_MACH_ARMADA_XP=y
CONFIG_ARCH_HIGHBANK=y
CONFIG_ARCH_SOCFPGA=y
+CONFIG_ARCH_SUNXI=y
# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set
CONFIG_ARM_ERRATA_754322=y
CONFIG_SMP=y
@@ -17,8 +18,10 @@ CONFIG_ARM_APPENDED_DTB=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_NET=y
+CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_HIGHBANK=y
+CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_NET_CALXEDA_XGMAC=y
CONFIG_SMSC911X=y
diff --git a/arch/arm/configs/mvebu_defconfig b/arch/arm/configs/mvebu_defconfig
index 3458752c4bb..a702fb345c0 100644
--- a/arch/arm/configs/mvebu_defconfig
+++ b/arch/arm/configs/mvebu_defconfig
@@ -12,6 +12,9 @@ CONFIG_ARCH_MVEBU=y
CONFIG_MACH_ARMADA_370=y
CONFIG_MACH_ARMADA_XP=y
# CONFIG_CACHE_L2X0 is not set
+# CONFIG_SWP_EMULATE is not set
+CONFIG_SMP=y
+# CONFIG_LOCAL_TIMERS is not set
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
# CONFIG_COMPACTION is not set
@@ -19,13 +22,27 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_VFP=y
+CONFIG_NET=y
+CONFIG_INET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_MV=y
+CONFIG_NETDEVICES=y
+CONFIG_MVNETA=y
+CONFIG_MARVELL_PHY=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_I2C=y
+CONFIG_I2C_MV64XXX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_S35390A=y
+CONFIG_DMADEVICES=y
+CONFIG_MV_XOR=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 048aaca6081..7bf535104e2 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -61,6 +61,8 @@ CONFIG_MTD_NAND_GPMI_NAND=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_ENC28J60=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC95XX=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
@@ -158,6 +160,10 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
CONFIG_PRINTK_TIME=y
CONFIG_FRAME_WARN=2048
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 240b25eea56..86cfd2959c4 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -57,7 +57,7 @@ CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND_NOMADIK=y
+CONFIG_MTD_NAND_FSMC=y
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
CONFIG_MTD_ONENAND_GENERIC=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index a1dc5c071e7..82ce8d738fa 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -65,6 +65,8 @@ CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
@@ -132,9 +134,11 @@ CONFIG_POWER_SUPPLY=y
CONFIG_WATCHDOG=y
CONFIG_OMAP_WATCHDOG=y
CONFIG_TWL4030_WATCHDOG=y
+CONFIG_MFD_TPS65217=y
CONFIG_REGULATOR_TWL4030=y
CONFIG_REGULATOR_TPS65023=y
CONFIG_REGULATOR_TPS6507X=y
+CONFIG_REGULATOR_TPS65217=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
@@ -170,6 +174,7 @@ CONFIG_SND_DEBUG=y
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
CONFIG_SND_OMAP_SOC=m
+CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
CONFIG_USB=y
CONFIG_USB_DEBUG=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 0ac1293dba1..4e1ce211d43 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -18,9 +18,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_SOCFPGA=y
CONFIG_MACH_SOCFPGA_CYCLONE5=y
CONFIG_ARM_THUMBEE=y
+# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set
# CONFIG_CACHE_L2X0 is not set
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_VMSPLIT_2G=y
+CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 250625d5223..231dca60473 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -78,6 +78,7 @@ CONFIG_AB8500_CORE=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
# CONFIG_HID_SUPPORT is not set
CONFIG_USB_GADGET=y
CONFIG_AB8500_USB=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 8ea02ac3ec1..5b579b95150 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -91,6 +91,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
+ debug_dma_mapping_error(dev, dma_addr);
return dma_addr == DMA_ERROR_CODE;
}
@@ -111,6 +112,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
extern int dma_supported(struct device *dev, u64 mask);
+extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7cd13cc6262..21a2700d295 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -41,7 +41,6 @@
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_SOCKETCALL
#endif
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 5000397134b..1151188bcd8 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -49,6 +49,7 @@ DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t);
DEFINE_GUEST_HANDLE(uint32_t);
DEFINE_GUEST_HANDLE(xen_pfn_t);
+DEFINE_GUEST_HANDLE(xen_ulong_t);
/* Maximum number of virtual CPUs in multi-processor guests. */
#define MAX_VIRT_CPUS 1
diff --git a/arch/arm/include/debug/imx.S b/arch/arm/include/debug/imx.S
index 0b65d792f66..0c4e17d4d35 100644
--- a/arch/arm/include/debug/imx.S
+++ b/arch/arm/include/debug/imx.S
@@ -10,6 +10,20 @@
* published by the Free Software Foundation.
*
*/
+#define IMX6Q_UART1_BASE_ADDR 0x02020000
+#define IMX6Q_UART2_BASE_ADDR 0x021e8000
+#define IMX6Q_UART3_BASE_ADDR 0x021ec000
+#define IMX6Q_UART4_BASE_ADDR 0x021f0000
+#define IMX6Q_UART5_BASE_ADDR 0x021f4000
+
+/*
+ * IMX6Q_UART_BASE_ADDR is put in the middle to force the expansion
+ * of IMX6Q_UART##n##_BASE_ADDR.
+ */
+#define IMX6Q_UART_BASE_ADDR(n) IMX6Q_UART##n##_BASE_ADDR
+#define IMX6Q_UART_BASE(n) IMX6Q_UART_BASE_ADDR(n)
+#define IMX6Q_DEBUG_UART_BASE IMX6Q_UART_BASE(CONFIG_DEBUG_IMX6Q_UART_PORT)
+
#ifdef CONFIG_DEBUG_IMX1_UART
#define UART_PADDR 0x00206000
#elif defined (CONFIG_DEBUG_IMX25_UART)
@@ -22,10 +36,8 @@
#define UART_PADDR 0x73fbc000
#elif defined (CONFIG_DEBUG_IMX50_IMX53_UART)
#define UART_PADDR 0x53fbc000
-#elif defined (CONFIG_DEBUG_IMX6Q_UART2)
-#define UART_PADDR 0x021e8000
-#elif defined (CONFIG_DEBUG_IMX6Q_UART4)
-#define UART_PADDR 0x021f0000
+#elif defined (CONFIG_DEBUG_IMX6Q_UART)
+#define UART_PADDR IMX6Q_DEBUG_UART_BASE
#endif
/*
diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
new file mode 100644
index 00000000000..883d7c22fd9
--- /dev/null
+++ b/arch/arm/include/debug/tegra.S
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2010,2011 Google, Inc.
+ * Copyright (C) 2011-2012 NVIDIA CORPORATION. All Rights Reserved.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Erik Gilling <konkers@google.com>
+ * Doug Anderson <dianders@chromium.org>
+ * Stephen Warren <swarren@nvidia.com>
+ *
+ * Portions based on mach-omap2's debug-macro.S
+ * Copyright (C) 1994-1999 Russell King
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/serial_reg.h>
+
+#define UART_SHIFT 2
+
+/* Physical addresses */
+#define TEGRA_CLK_RESET_BASE 0x60006000
+#define TEGRA_APB_MISC_BASE 0x70000000
+#define TEGRA_UARTA_BASE 0x70006000
+#define TEGRA_UARTB_BASE 0x70006040
+#define TEGRA_UARTC_BASE 0x70006200
+#define TEGRA_UARTD_BASE 0x70006300
+#define TEGRA_UARTE_BASE 0x70006400
+#define TEGRA_PMC_BASE 0x7000e400
+
+#define TEGRA_CLK_RST_DEVICES_L (TEGRA_CLK_RESET_BASE + 0x04)
+#define TEGRA_CLK_RST_DEVICES_H (TEGRA_CLK_RESET_BASE + 0x08)
+#define TEGRA_CLK_RST_DEVICES_U (TEGRA_CLK_RESET_BASE + 0x0c)
+#define TEGRA_CLK_OUT_ENB_L (TEGRA_CLK_RESET_BASE + 0x10)
+#define TEGRA_CLK_OUT_ENB_H (TEGRA_CLK_RESET_BASE + 0x14)
+#define TEGRA_CLK_OUT_ENB_U (TEGRA_CLK_RESET_BASE + 0x18)
+#define TEGRA_PMC_SCRATCH20 (TEGRA_PMC_BASE + 0xa0)
+#define TEGRA_APB_MISC_GP_HIDREV (TEGRA_APB_MISC_BASE + 0x804)
+
+/*
+ * Must be 1MB-aligned since a 1MB mapping is used early on.
+ * Must not overlap with regions in mach-tegra/io.c:tegra_io_desc[].
+ */
+#define UART_VIRTUAL_BASE 0xfe100000
+
+#define checkuart(rp, rv, lhu, bit, uart) \
+ /* Load address of CLK_RST register */ \
+ movw rp, #TEGRA_CLK_RST_DEVICES_##lhu & 0xffff ; \
+ movt rp, #TEGRA_CLK_RST_DEVICES_##lhu >> 16 ; \
+ /* Load value from CLK_RST register */ \
+ ldr rp, [rp, #0] ; \
+ /* Test UART's reset bit */ \
+ tst rp, #(1 << bit) ; \
+ /* If set, can't use UART; jump to save no UART */ \
+ bne 90f ; \
+ /* Load address of CLK_OUT_ENB register */ \
+ movw rp, #TEGRA_CLK_OUT_ENB_##lhu & 0xffff ; \
+ movt rp, #TEGRA_CLK_OUT_ENB_##lhu >> 16 ; \
+ /* Load value from CLK_OUT_ENB register */ \
+ ldr rp, [rp, #0] ; \
+ /* Test UART's clock enable bit */ \
+ tst rp, #(1 << bit) ; \
+ /* If clear, can't use UART; jump to save no UART */ \
+ beq 90f ; \
+ /* Passed all tests, load address of UART registers */ \
+ movw rp, #TEGRA_UART##uart##_BASE & 0xffff ; \
+ movt rp, #TEGRA_UART##uart##_BASE >> 16 ; \
+ /* Jump to save UART address */ \
+ b 91f
+
+ .macro addruart, rp, rv, tmp
+ adr \rp, 99f @ actual addr of 99f
+ ldr \rv, [\rp] @ linked addr is stored there
+ sub \rv, \rv, \rp @ offset between the two
+ ldr \rp, [\rp, #4] @ linked tegra_uart_config
+ sub \tmp, \rp, \rv @ actual tegra_uart_config
+ ldr \rp, [\tmp] @ Load tegra_uart_config
+ cmp \rp, #1 @ needs initialization?
+ bne 100f @ no; go load the addresses
+ mov \rv, #0 @ yes; record init is done
+ str \rv, [\tmp]
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA
+ /* Check ODMDATA */
+10: movw \rp, #TEGRA_PMC_SCRATCH20 & 0xffff
+ movt \rp, #TEGRA_PMC_SCRATCH20 >> 16
+ ldr \rp, [\rp, #0] @ Load PMC_SCRATCH20
+ ubfx \rv, \rp, #18, #2 @ 19:18 are console type
+ cmp \rv, #2 @ 2 and 3 mean DCC, UART
+ beq 11f @ some boards swap the meaning
+ cmp \rv, #3 @ so accept either
+ bne 90f
+11: ubfx \rv, \rp, #15, #3 @ 17:15 are UART ID
+ cmp \rv, #0 @ UART 0?
+ beq 20f
+ cmp \rv, #1 @ UART 1?
+ beq 21f
+ cmp \rv, #2 @ UART 2?
+ beq 22f
+ cmp \rv, #3 @ UART 3?
+ beq 23f
+ cmp \rv, #4 @ UART 4?
+ beq 24f
+ b 90f @ invalid
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTA) || \
+ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ /* Check UART A validity */
+20: checkuart(\rp, \rv, L, 6, A)
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTB) || \
+ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ /* Check UART B validity */
+21: checkuart(\rp, \rv, L, 7, B)
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTC) || \
+ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ /* Check UART C validity */
+22: checkuart(\rp, \rv, H, 23, C)
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTD) || \
+ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ /* Check UART D validity */
+23: checkuart(\rp, \rv, U, 1, D)
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTE) || \
+ defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ /* Check UART E validity */
+24:
+ checkuart(\rp, \rv, U, 2, E)
+#endif
+
+ /* No valid UART found */
+90: mov \rp, #0
+ /* fall through */
+
+ /* Record whichever UART we chose */
+91: str \rp, [\tmp, #4] @ Store in tegra_uart_phys
+ cmp \rp, #0 @ Valid UART address?
+ bne 92f @ Yes, go process it
+ str \rp, [\tmp, #8] @ Store 0 in tegra_uart_virt
+ b 100f @ Done
+92: and \rv, \rp, #0xffffff @ offset within 1MB section
+ add \rv, \rv, #UART_VIRTUAL_BASE
+ str \rv, [\tmp, #8] @ Store in tegra_uart_virt
+ movw \rv, #TEGRA_APB_MISC_GP_HIDREV & 0xffff
+ movt \rv, #TEGRA_APB_MISC_GP_HIDREV >> 16
+ ldr \rv, [\rv, #0] @ Load HIDREV
+ ubfx \rv, \rv, #8, #8 @ 15:8 are SoC version
+ cmp \rv, #0x20 @ Tegra20?
+ moveq \rv, #0x75 @ Tegra20 divisor
+ movne \rv, #0xdd @ Tegra30 divisor
+ str \rv, [\tmp, #12] @ Save divisor to scratch
+ /* uart[UART_LCR] = UART_LCR_WLEN8 | UART_LCR_DLAB; */
+ mov \rv, #UART_LCR_WLEN8 | UART_LCR_DLAB
+ str \rv, [\rp, #UART_LCR << UART_SHIFT]
+ /* uart[UART_DLL] = div & 0xff; */
+ ldr \rv, [\tmp, #12]
+ and \rv, \rv, #0xff
+ str \rv, [\rp, #UART_DLL << UART_SHIFT]
+ /* uart[UART_DLM] = div >> 8; */
+ ldr \rv, [\tmp, #12]
+ lsr \rv, \rv, #8
+ str \rv, [\rp, #UART_DLM << UART_SHIFT]
+ /* uart[UART_LCR] = UART_LCR_WLEN8; */
+ mov \rv, #UART_LCR_WLEN8
+ str \rv, [\rp, #UART_LCR << UART_SHIFT]
+ b 100f
+
+ .align
+99: .word .
+ .word tegra_uart_config
+ .ltorg
+
+ /* Load previously selected UART address */
+100: ldr \rp, [\tmp, #4] @ Load tegra_uart_phys
+ ldr \rv, [\tmp, #8] @ Load tegra_uart_virt
+ .endm
+
+/*
+ * Code below is swiped from <asm/hardware/debug-8250.S>, but add an extra
+ * check to make sure that the UART address is actually valid.
+ */
+
+ .macro senduart, rd, rx
+ cmp \rx, #0
+ strneb \rd, [\rx, #UART_TX << UART_SHIFT]
+1001:
+ .endm
+
+ .macro busyuart, rd, rx
+ cmp \rx, #0
+ beq 1002f
+1001: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT]
+ and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ bne 1001b
+1002:
+ .endm
+
+ .macro waituart, rd, rx
+#ifdef FLOW_CONTROL
+ cmp \rx, #0
+ beq 1002f
+1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
+ tst \rd, #UART_MSR_CTS
+ beq 1001b
+1002:
+#endif
+ .endm
diff --git a/arch/arm/mach-zynq/include/mach/debug-macro.S b/arch/arm/include/debug/zynq.S
index 3ab0be1f619..f9aa9740a73 100644
--- a/arch/arm/mach-zynq/include/mach/debug-macro.S
+++ b/arch/arm/include/debug/zynq.S
@@ -1,5 +1,4 @@
-/* arch/arm/mach-zynq/include/mach/debug-macro.S
- *
+/*
* Debugging macro include header
*
* Copyright (C) 2011 Xilinx
@@ -13,9 +12,25 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#define UART_CR_OFFSET 0x00 /* Control Register [8:0] */
+#define UART_SR_OFFSET 0x2C /* Channel Status [11:0] */
+#define UART_FIFO_OFFSET 0x30 /* FIFO [15:0] or [7:0] */
+
+#define UART_SR_TXFULL 0x00000010 /* TX FIFO full */
+#define UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */
+
+#define UART0_PHYS 0xE0000000
+#define UART1_PHYS 0xE0001000
+#define UART_SIZE SZ_4K
+#define UART_VIRT 0xF0001000
+
+#if IS_ENABLED(CONFIG_DEBUG_ZYNQ_UART1)
+# define LL_UART_PADDR UART1_PHYS
+#else
+# define LL_UART_PADDR UART0_PHYS
+#endif
-#include <mach/zynq_soc.h>
-#include <mach/uart.h>
+#define LL_UART_VADDR UART_VIRT
.macro addruart, rp, rv, tmp
ldr \rp, =LL_UART_PADDR @ physical
diff --git a/arch/arm/include/uapi/asm/signal.h b/arch/arm/include/uapi/asm/signal.h
index 921c57fdc52..33073bdcf09 100644
--- a/arch/arm/include/uapi/asm/signal.h
+++ b/arch/arm/include/uapi/asm/signal.h
@@ -87,13 +87,6 @@ typedef unsigned long sigset_t;
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index ac03bdb4ae4..4da7cde70b5 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -405,6 +405,7 @@
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
/* 378 for kcmp */
+#define __NR_finit_module (__NR_SYSCALL_BASE+379)
/*
* This may need to be greater than __NR_last_syscall+1 in order to
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 5935b6a02e6..a4fda4e7a37 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -388,6 +388,7 @@
CALL(sys_process_vm_readv)
CALL(sys_process_vm_writev)
CALL(sys_ni_syscall) /* reserved for sys_kcmp */
+ CALL(sys_finit_module)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index 1862d8f2fd4..0cd63d080c7 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -1598,7 +1598,7 @@ static int __init run_all_tests(void)
{
int ret = 0;
- pr_info("Begining kprobe tests...\n");
+ pr_info("Beginning kprobe tests...\n");
#ifndef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9a89bf4aefe..3f6cbb2e3ed 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -733,7 +733,7 @@ void __init setup_arch(char **cmdline_p)
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (!mdesc)
- mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
+ mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
machine_desc = mdesc;
machine_name = mdesc->name;
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index df745188f5d..ab1017bd166 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -109,10 +109,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
siginfo_t info;
+ down_read(&current->mm->mmap_sem);
if (find_vma(current->mm, addr) == NULL)
info.si_code = SEGV_MAPERR;
else
info.si_code = SEGV_ACCERR;
+ up_read(&current->mm->mmap_sem);
info.si_signo = SIGSEGV;
info.si_errno = 0;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b9f38e388b4..11c1785bf63 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -140,6 +140,8 @@ SECTIONS
}
#endif
+ NOTES
+
_etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL
@@ -295,8 +297,6 @@ SECTIONS
}
#endif
- NOTES
-
BSS_SECTION(0, 0, 0)
_end = .;
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 8ce068240c6..7aeb473ee53 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -184,9 +184,12 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.2", &ssc2_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffd0000.ssc", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffd4000.ssc", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffd8000.ssc", &ssc2_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91rm9200.0", &twi_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 2a1f8e67683..3ebc9792560 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -752,7 +752,7 @@ static struct resource ssc0_resources[] = {
};
static struct platform_device at91rm9200_ssc0_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
@@ -794,7 +794,7 @@ static struct resource ssc1_resources[] = {
};
static struct platform_device at91rm9200_ssc1_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
@@ -836,7 +836,7 @@ static struct resource ssc2_resources[] = {
};
static struct platform_device at91rm9200_ssc2_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 2,
.dev = {
.dma_mask = &ssc2_dmamask,
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index c65e7b8d7a8..b67cd537411 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -210,7 +210,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.0", &ssc_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffbc000.ssc", &ssc_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260.0", &twi_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g20.0", &twi_clk),
/* more usart lookup table for DT entries */
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 1f6fac21b2c..eda8d1679d4 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -742,7 +742,7 @@ static struct resource ssc_resources[] = {
};
static struct platform_device at91sam9260_ssc_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 0,
.dev = {
.dma_mask = &ssc_dmamask,
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 9d3e9b8b992..2998a08afc2 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -174,9 +174,12 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.2", &ssc2_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffbc000.ssc", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffc0000.ssc", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffc4000.ssc", &ssc2_clk),
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &hck0),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9261.0", &twi_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10.0", &twi_clk),
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 6ce6d27e244..92e0f861084 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -706,7 +706,7 @@ static struct resource ssc0_resources[] = {
};
static struct platform_device at91sam9261_ssc0_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
@@ -748,7 +748,7 @@ static struct resource ssc1_resources[] = {
};
static struct platform_device at91sam9261_ssc1_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
@@ -790,7 +790,7 @@ static struct resource ssc2_resources[] = {
};
static struct platform_device at91sam9261_ssc2_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 2,
.dev = {
.dma_mask = &ssc2_dmamask,
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 82deb4d748b..b9fc60d1b33 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -186,8 +186,10 @@ static struct clk *periph_clocks[] __initdata = {
static struct clk_lookup periph_clocks_lookups[] = {
/* One additional fake clock for macb_hclk */
CLKDEV_CON_ID("hclk", &macb_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fff98000.ssc", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fff9c000.ssc", &ssc1_clk),
CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk),
CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index fb98163b9b3..ed666f5cb01 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -1199,7 +1199,7 @@ static struct resource ssc0_resources[] = {
};
static struct platform_device at91sam9263_ssc0_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
@@ -1241,7 +1241,7 @@ static struct resource ssc1_resources[] = {
};
static struct platform_device at91sam9263_ssc1_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 45d753d473f..d3addee43d8 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -239,8 +239,10 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tcb0_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10.0", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10.1", &twi1_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91sam9g45_ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91sam9g45_ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fff9c000.ssc", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffa0000.ssc", &ssc1_clk),
CLKDEV_CON_DEV_ID(NULL, "atmel-trng", &trng_clk),
CLKDEV_CON_DEV_ID(NULL, "atmel_sha", &aestdessha_clk),
CLKDEV_CON_DEV_ID(NULL, "atmel_tdes", &aestdessha_clk),
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index e35964201a1..827c9f2a70f 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1459,7 +1459,7 @@ static struct resource ssc0_resources[] = {
};
static struct platform_device at91sam9g45_ssc0_device = {
- .name = "ssc",
+ .name = "at91sam9g45_ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
@@ -1501,7 +1501,7 @@ static struct resource ssc1_resources[] = {
};
static struct platform_device at91sam9g45_ssc1_device = {
- .name = "ssc",
+ .name = "at91sam9g45_ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 44e3a633fda..eb98704db2d 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -184,8 +184,10 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
- CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.0", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.1", &ssc1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffc0000.ssc", &ssc0_clk),
+ CLKDEV_CON_DEV_ID("pclk", "fffc4000.ssc", &ssc1_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g20.0", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g20.1", &twi1_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 160384d93db..ddf223ff35c 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -832,7 +832,7 @@ static struct resource ssc0_resources[] = {
};
static struct platform_device at91sam9rl_ssc0_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
@@ -874,7 +874,7 @@ static struct resource ssc1_resources[] = {
};
static struct platform_device at91sam9rl_ssc1_device = {
- .name = "ssc",
+ .name = "at91rm9200_ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index dfb2c0c13fb..44a9a62dcc1 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -233,6 +233,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("mci_clk", "f000c000.mmc", &mmc1_clk),
CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma0_clk),
CLKDEV_CON_DEV_ID("dma_clk", "ffffee00.dma-controller", &dma1_clk),
+ CLKDEV_CON_DEV_ID("pclk", "f0010000.ssc", &ssc_clk),
CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk),
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index 7b512380236..1b7dd9f688d 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -353,6 +353,16 @@ static struct i2c_board_info __initdata ek_i2c_devices[] = {
},
};
+static struct platform_device sam9g20ek_audio_device = {
+ .name = "at91sam9g20ek-audio",
+ .id = -1,
+};
+
+static void __init ek_add_device_audio(void)
+{
+ platform_device_register(&sam9g20ek_audio_device);
+}
+
static void __init ek_board_init(void)
{
@@ -394,6 +404,7 @@ static void __init ek_board_init(void)
at91_set_B_periph(AT91_PIN_PC1, 0);
/* SSC (for WM8731) */
at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX);
+ ek_add_device_audio();
}
MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK")
diff --git a/arch/arm/mach-davinci/Makefile.boot b/arch/arm/mach-davinci/Makefile.boot
index 5c5a95a9d7d..04a6c4e67b1 100644
--- a/arch/arm/mach-davinci/Makefile.boot
+++ b/arch/arm/mach-davinci/Makefile.boot
@@ -11,5 +11,3 @@ else
params_phys-y := 0x80000100
initrd_phys-y := 0x80800000
endif
-
-dtb-$(CONFIG_MACH_DA8XX_DT) += da850-enbw-cmc.dtb da850-evm.dtb
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index d4f4dbfc0e5..0299915575a 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -41,6 +41,7 @@
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
#include <mach/mux.h>
+#include <mach/sram.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -763,16 +764,19 @@ static u8 da850_iis_serializer_direction[] = {
};
static struct snd_platform_data da850_evm_snd_data = {
- .tx_dma_offset = 0x2000,
- .rx_dma_offset = 0x2000,
- .op_mode = DAVINCI_MCASP_IIS_MODE,
- .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
- .tdm_slots = 2,
- .serial_dir = da850_iis_serializer_direction,
- .asp_chan_q = EVENTQ_0,
- .version = MCASP_VERSION_2,
- .txnumevt = 1,
- .rxnumevt = 1,
+ .tx_dma_offset = 0x2000,
+ .rx_dma_offset = 0x2000,
+ .op_mode = DAVINCI_MCASP_IIS_MODE,
+ .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
+ .tdm_slots = 2,
+ .serial_dir = da850_iis_serializer_direction,
+ .asp_chan_q = EVENTQ_0,
+ .ram_chan_q = EVENTQ_1,
+ .version = MCASP_VERSION_2,
+ .txnumevt = 1,
+ .rxnumevt = 1,
+ .sram_size_playback = SZ_8K,
+ .sram_size_capture = SZ_8K,
};
static const short da850_evm_mcasp_pins[] __initconst = {
@@ -1510,6 +1514,7 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: mcasp mux setup failed: %d\n",
ret);
+ da850_evm_snd_data.sram_pool = sram_get_gen_pool();
da8xx_register_mcasp(0, &da850_evm_snd_data);
ret = davinci_cfg_reg_list(da850_lcdcntl_pins);
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 510648e0394..678a54a64da 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -408,7 +408,7 @@ static struct clk_lookup da830_clks[] = {
CLK(NULL, "pwm2", &pwm2_clk),
CLK("eqep.0", NULL, &eqep0_clk),
CLK("eqep.1", NULL, &eqep1_clk),
- CLK("da8xx_lcdc.0", NULL, &lcdc_clk),
+ CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 68c5fe01857..6b9154e9f90 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -403,7 +403,7 @@ static struct clk_lookup da850_clks[] = {
CLK(NULL, "rmii", &rmii_clk),
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
- CLK("da8xx_lcdc.0", NULL, &lcdc_clk),
+ CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
CLK(NULL, "aemif", &aemif_clk),
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 46c9a0c09ae..2d5502d84a2 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -589,29 +589,9 @@ int __init da8xx_register_uio_pruss(void)
return platform_device_register(&da8xx_uio_pruss_dev);
}
-static const struct display_panel disp_panel = {
- QVGA,
- 16,
- 16,
- COLOR_ACTIVE,
-};
-
static struct lcd_ctrl_config lcd_cfg = {
- &disp_panel,
- .ac_bias = 255,
- .ac_bias_intrpt = 0,
- .dma_burst_sz = 16,
+ .panel_shade = COLOR_ACTIVE,
.bpp = 16,
- .fdd = 255,
- .tft_alt_mode = 0,
- .stn_565_mode = 0,
- .mono_8bit_mode = 0,
- .invert_line_clock = 1,
- .invert_frm_clock = 1,
- .sync_edge = 0,
- .sync_ctrl = 1,
- .raster_order = 0,
- .fifo_th = 6,
};
struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = {
@@ -745,7 +725,7 @@ static struct resource da8xx_rtc_resources[] = {
};
static struct platform_device da8xx_rtc_device = {
- .name = "omap_rtc",
+ .name = "da830-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(da8xx_rtc_resources),
.resource = da8xx_rtc_resources,
@@ -754,17 +734,6 @@ static struct platform_device da8xx_rtc_device = {
int da8xx_register_rtc(void)
{
int ret;
- void __iomem *base;
-
- base = ioremap(DA8XX_RTC_BASE, SZ_4K);
- if (WARN_ON(!base))
- return -ENOMEM;
-
- /* Unlock the rtc's registers */
- __raw_writel(0x83e70b13, base + 0x6c);
- __raw_writel(0x95a4f1e0, base + 0x70);
-
- iounmap(base);
ret = platform_device_register(&da8xx_rtc_device);
if (!ret)
diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c
index 00946e23c1e..c90250e3bef 100644
--- a/arch/arm/mach-davinci/pm_domain.c
+++ b/arch/arm/mach-davinci/pm_domain.c
@@ -53,6 +53,7 @@ static struct dev_pm_domain davinci_pm_domain = {
static struct pm_clk_notifier_block platform_bus_notifier = {
.pm_domain = &davinci_pm_domain,
+ .con_ids = { "fck", NULL, },
};
static int __init davinci_pm_runtime_init(void)
diff --git a/arch/arm/mach-dove/Kconfig b/arch/arm/mach-dove/Kconfig
index 00154e74ce6..603c5fd99e8 100644
--- a/arch/arm/mach-dove/Kconfig
+++ b/arch/arm/mach-dove/Kconfig
@@ -17,6 +17,8 @@ config MACH_CM_A510
config MACH_DOVE_DT
bool "Marvell Dove Flattened Device Tree"
+ select MVEBU_CLK_CORE
+ select MVEBU_CLK_GATING
select USE_OF
help
Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index f723fe13d0f..89f4f993cd0 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/clk-provider.h>
+#include <linux/clk/mvebu.h>
#include <linux/ata_platform.h>
#include <linux/gpio.h>
#include <linux/of.h>
@@ -32,6 +33,7 @@
#include <linux/irq.h>
#include <plat/time.h>
#include <linux/platform_data/usb-ehci-orion.h>
+#include <linux/platform_data/dma-mv_xor.h>
#include <plat/irq.h>
#include <plat/common.h>
#include <plat/addr-map.h>
@@ -123,8 +125,8 @@ static void __init dove_clk_init(void)
orion_clkdev_add(NULL, "mv_crypto", crypto);
orion_clkdev_add(NULL, "dove-ac97", ac97);
orion_clkdev_add(NULL, "dove-pdma", pdma);
- orion_clkdev_add(NULL, "mv_xor_shared.0", xor0);
- orion_clkdev_add(NULL, "mv_xor_shared.1", xor1);
+ orion_clkdev_add(NULL, MV_XOR_NAME ".0", xor0);
+ orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
}
/*****************************************************************************
@@ -376,19 +378,44 @@ void dove_restart(char mode, const char *cmd)
#if defined(CONFIG_MACH_DOVE_DT)
/*
- * Auxdata required until real OF clock provider
+ * There are still devices that doesn't even know about DT,
+ * get clock gates here and add a clock lookup.
*/
-struct of_dev_auxdata dove_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("marvell,orion-spi", 0xf1010600, "orion_spi.0", NULL),
- OF_DEV_AUXDATA("marvell,orion-spi", 0xf1014600, "orion_spi.1", NULL),
- OF_DEV_AUXDATA("marvell,orion-wdt", 0xf1020300, "orion_wdt", NULL),
- OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011000, "mv64xxx_i2c.0",
- NULL),
- OF_DEV_AUXDATA("marvell,orion-sata", 0xf10a0000, "sata_mv.0", NULL),
- OF_DEV_AUXDATA("marvell,dove-sdhci", 0xf1092000, "sdhci-dove.0", NULL),
- OF_DEV_AUXDATA("marvell,dove-sdhci", 0xf1090000, "sdhci-dove.1", NULL),
- {},
-};
+static void __init dove_legacy_clk_init(void)
+{
+ struct device_node *np = of_find_compatible_node(NULL, NULL,
+ "marvell,dove-gating-clock");
+ struct of_phandle_args clkspec;
+
+ clkspec.np = np;
+ clkspec.args_count = 1;
+
+ clkspec.args[0] = CLOCK_GATING_BIT_USB0;
+ orion_clkdev_add(NULL, "orion-ehci.0",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CLOCK_GATING_BIT_USB1;
+ orion_clkdev_add(NULL, "orion-ehci.1",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CLOCK_GATING_BIT_GBE;
+ orion_clkdev_add(NULL, "mv643xx_eth_port.0",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CLOCK_GATING_BIT_PCIE0;
+ orion_clkdev_add("0", "pcie",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CLOCK_GATING_BIT_PCIE1;
+ orion_clkdev_add("1", "pcie",
+ of_clk_get_from_provider(&clkspec));
+}
+
+static void __init dove_of_clk_init(void)
+{
+ mvebu_clocks_init();
+ dove_legacy_clk_init();
+}
static struct mv643xx_eth_platform_data dove_dt_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT,
@@ -405,20 +432,17 @@ static void __init dove_dt_init(void)
dove_setup_cpu_mbus();
/* Setup root of clk tree */
- dove_clk_init();
+ dove_of_clk_init();
/* Internal devices not ported to DT yet */
dove_rtc_init();
- dove_xor0_init();
- dove_xor1_init();
dove_ge00_init(&dove_dt_ge00_data);
dove_ehci0_init();
dove_ehci1_init();
dove_pcie_init(1, 1);
- of_platform_populate(NULL, of_default_bus_match_table,
- dove_auxdata_lookup, NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * const dove_dt_board_compat[] = {
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h
index 16026c2b1c8..d64274fc576 100644
--- a/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -47,13 +47,9 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
static inline void putc(int c)
{
- int i;
-
- for (i = 0; i < 1000; i++) {
- /* Transmit fifo not full? */
- if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
- break;
- }
+ /* Transmit fifo not full? */
+ while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)
+ ;
__raw_writeb(c, PHYS_UART_DATA);
}
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 070c7b6d3d8..91d5b6f1d5a 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -63,6 +63,7 @@ config SOC_EXYNOS5250
depends on ARCH_EXYNOS5
select S5P_PM if PM
select S5P_SLEEP if PM
+ select S5P_DEV_MFC
select SAMSUNG_DMADEV
help
Enable EXYNOS5250 SoC support
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 66135eedf49..b189881657e 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
obj-$(CONFIG_EXYNOS_DEV_DMA) += dma.o
obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o
-obj-$(CONFIG_EXYNOS_DEV_DRM) += dev-drm.o
obj-$(CONFIG_EXYNOS_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_ARCH_EXYNOS) += setup-i2c0.o
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index fa8a13405c9..bbcb3dea0d4 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -529,7 +529,7 @@ static struct clk exynos4_init_clocks_off[] = {
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 8),
}, {
- .name = "dwmmc",
+ .name = "biu",
.parent = &exynos4_clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 9),
@@ -576,6 +576,10 @@ static struct clk exynos4_init_clocks_off[] = {
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 15),
}, {
+ .name = "tmu_apbif",
+ .enable = exynos4_clk_ip_perir_ctrl,
+ .ctrlbit = (1 << 17),
+ }, {
.name = "keypad",
.enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 16),
@@ -1130,7 +1134,7 @@ static struct clksrc_clk exynos4_clksrcs[] = {
.reg_div = { .reg = EXYNOS4_CLKDIV_MFC, .shift = 0, .size = 4 },
}, {
.clk = {
- .name = "sclk_dwmmc",
+ .name = "ciu",
.parent = &exynos4_clk_dout_mmc4.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 16),
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index 4478757b930..e9d7b80bae4 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -80,6 +80,8 @@ static struct sleep_save exynos5_clock_save[] = {
SAVE_ITEM(EXYNOS5_VPLL_CON0),
SAVE_ITEM(EXYNOS5_VPLL_CON1),
SAVE_ITEM(EXYNOS5_VPLL_CON2),
+ SAVE_ITEM(EXYNOS5_PWR_CTRL1),
+ SAVE_ITEM(EXYNOS5_PWR_CTRL2),
};
#endif
@@ -196,6 +198,11 @@ static int exynos5_clk_ip_isp1_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP1, clk, enable);
}
+static int exynos5_clk_hdmiphy_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
+}
+
/* Core list of CMU_CPU side */
static struct clksrc_clk exynos5_clk_mout_apll = {
@@ -616,6 +623,11 @@ static struct clk exynos5_init_clocks_off[] = {
.enable = exynos5_clk_ip_peric_ctrl,
.ctrlbit = (1 << 24),
}, {
+ .name = "tmu_apbif",
+ .parent = &exynos5_clk_aclk_66.clk,
+ .enable = exynos5_clk_ip_peris_ctrl,
+ .ctrlbit = (1 << 21),
+ }, {
.name = "rtc",
.parent = &exynos5_clk_aclk_66.clk,
.enable = exynos5_clk_ip_peris_ctrl,
@@ -651,33 +663,48 @@ static struct clk exynos5_init_clocks_off[] = {
.ctrlbit = (1 << 15),
}, {
.name = "sata",
- .devname = "ahci",
+ .devname = "exynos5-sata",
+ .parent = &exynos5_clk_aclk_200.clk,
.enable = exynos5_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 6),
}, {
- .name = "sata_phy",
+ .name = "sata-phy",
+ .devname = "exynos5-sata-phy",
+ .parent = &exynos5_clk_aclk_200.clk,
.enable = exynos5_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 24),
}, {
- .name = "sata_phy_i2c",
+ .name = "i2c",
+ .devname = "exynos5-sata-phy-i2c",
+ .parent = &exynos5_clk_aclk_200.clk,
.enable = exynos5_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 25),
}, {
.name = "mfc",
- .devname = "s5p-mfc",
+ .devname = "s5p-mfc-v6",
.enable = exynos5_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "hdmi",
- .devname = "exynos4-hdmi",
+ .devname = "exynos5-hdmi",
.enable = exynos5_clk_ip_disp1_ctrl,
.ctrlbit = (1 << 6),
}, {
+ .name = "hdmiphy",
+ .devname = "exynos5-hdmi",
+ .enable = exynos5_clk_hdmiphy_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
.name = "mixer",
- .devname = "s5p-mixer",
+ .devname = "exynos5-mixer",
.enable = exynos5_clk_ip_disp1_ctrl,
.ctrlbit = (1 << 5),
}, {
+ .name = "dp",
+ .devname = "exynos-dp",
+ .enable = exynos5_clk_ip_disp1_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
.name = "jpeg",
.enable = exynos5_clk_ip_gen_ctrl,
.ctrlbit = (1 << 2),
@@ -1226,6 +1253,16 @@ static struct clksrc_clk exynos5_clksrcs[] = {
.reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 24, .size = 3 },
}, {
.clk = {
+ .name = "sclk_sata",
+ .devname = "exynos5-sata",
+ .enable = exynos5_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 24),
+ },
+ .sources = &exynos5_clkset_aclk,
+ .reg_src = { .reg = EXYNOS5_CLKSRC_FSYS, .shift = 24, .size = 1 },
+ .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS0, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
.name = "sclk_gscl_wrap",
.devname = "s5p-mipi-csis.0",
.enable = exynos5_clksrc_mask_gscl_ctrl,
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index e05f6cca2c9..d6d0dc65108 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -679,7 +679,10 @@ void __init exynos5_init_irq(void)
* Theses parameters should be NULL and 0 because EXYNOS4
* uses GIC instead of VIC.
*/
- s5p_init_irq(NULL, 0);
+ if (!of_machine_is_compatible("samsung,exynos5440"))
+ s5p_init_irq(NULL, 0);
+
+ gic_arch_extn.irq_set_wake = s3c_irq_wake;
}
struct bus_type exynos_subsys = {
@@ -1020,11 +1023,14 @@ static int __init exynos_init_irq_eint(void)
* platforms switch over to using the pinctrl driver, the wakeup
* interrupt support code here can be completely removed.
*/
+ static const struct of_device_id exynos_pinctrl_ids[] = {
+ { .compatible = "samsung,pinctrl-exynos4210", },
+ { .compatible = "samsung,pinctrl-exynos4x12", },
+ };
struct device_node *pctrl_np, *wkup_np;
- const char *pctrl_compat = "samsung,pinctrl-exynos4210";
const char *wkup_compat = "samsung,exynos4210-wakeup-eint";
- for_each_compatible_node(pctrl_np, NULL, pctrl_compat) {
+ for_each_matching_node(pctrl_np, exynos_pinctrl_ids) {
if (of_device_is_available(pctrl_np)) {
wkup_np = of_find_compatible_node(pctrl_np, NULL,
wkup_compat);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index dac146df79a..04744f9c120 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -25,7 +25,7 @@ void exynos_init_late(void);
#ifdef CONFIG_PM_GENERIC_DOMAINS
int exynos_pm_late_initcall(void);
#else
-static int exynos_pm_late_initcall(void) { return 0; }
+static inline int exynos_pm_late_initcall(void) { return 0; }
#endif
#ifdef CONFIG_ARCH_EXYNOS4
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index cff0595d0d3..05092415277 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -21,6 +21,7 @@
#include <asm/suspend.h>
#include <asm/unified.h>
#include <asm/cpuidle.h>
+#include <mach/regs-clock.h>
#include <mach/regs-pmu.h>
#include <mach/pmu.h>
@@ -116,7 +117,8 @@ static int exynos4_enter_core0_aftr(struct cpuidle_device *dev,
cpu_suspend(0, idle_finisher);
#ifdef CONFIG_SMP
- scu_enable(S5P_VA_SCU);
+ if (!soc_is_exynos5250())
+ scu_enable(S5P_VA_SCU);
#endif
cpu_pm_exit();
@@ -156,12 +158,47 @@ static int exynos4_enter_lowpower(struct cpuidle_device *dev,
return exynos4_enter_core0_aftr(dev, drv, new_index);
}
+static void __init exynos5_core_down_clk(void)
+{
+ unsigned int tmp;
+
+ /*
+ * Enable arm clock down (in idle) and set arm divider
+ * ratios in WFI/WFE state.
+ */
+ tmp = PWR_CTRL1_CORE2_DOWN_RATIO | \
+ PWR_CTRL1_CORE1_DOWN_RATIO | \
+ PWR_CTRL1_DIV2_DOWN_EN | \
+ PWR_CTRL1_DIV1_DOWN_EN | \
+ PWR_CTRL1_USE_CORE1_WFE | \
+ PWR_CTRL1_USE_CORE0_WFE | \
+ PWR_CTRL1_USE_CORE1_WFI | \
+ PWR_CTRL1_USE_CORE0_WFI;
+ __raw_writel(tmp, EXYNOS5_PWR_CTRL1);
+
+ /*
+ * Enable arm clock up (on exiting idle). Set arm divider
+ * ratios when not in idle along with the standby duration
+ * ratios.
+ */
+ tmp = PWR_CTRL2_DIV2_UP_EN | \
+ PWR_CTRL2_DIV1_UP_EN | \
+ PWR_CTRL2_DUR_STANDBY2_VAL | \
+ PWR_CTRL2_DUR_STANDBY1_VAL | \
+ PWR_CTRL2_CORE2_UP_RATIO | \
+ PWR_CTRL2_CORE1_UP_RATIO;
+ __raw_writel(tmp, EXYNOS5_PWR_CTRL2);
+}
+
static int __init exynos4_init_cpuidle(void)
{
int i, max_cpuidle_state, cpu_id;
struct cpuidle_device *device;
struct cpuidle_driver *drv = &exynos4_idle_driver;
+ if (soc_is_exynos5250())
+ exynos5_core_down_clk();
+
/* Setup cpuidle driver */
drv->state_count = (sizeof(exynos4_cpuidle_set) /
sizeof(struct cpuidle_state));
diff --git a/arch/arm/mach-exynos/dev-audio.c b/arch/arm/mach-exynos/dev-audio.c
index a1cb42c3959..9d1a60951d7 100644
--- a/arch/arm/mach-exynos/dev-audio.c
+++ b/arch/arm/mach-exynos/dev-audio.c
@@ -23,11 +23,6 @@
#include <mach/irqs.h>
#include <mach/regs-audss.h>
-static const char *rclksrc[] = {
- [0] = "busclk",
- [1] = "i2sclk",
-};
-
static int exynos4_cfg_i2s(struct platform_device *pdev)
{
/* configure GPIO for i2s port */
@@ -55,7 +50,6 @@ static struct s3c_audio_pdata i2sv5_pdata = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
| QUIRK_NEED_RSTCLR,
- .src_clk = rclksrc,
.idma_addr = EXYNOS4_AUDSS_INT_MEM,
},
},
@@ -78,17 +72,11 @@ struct platform_device exynos4_device_i2s0 = {
},
};
-static const char *rclksrc_v3[] = {
- [0] = "sclk_i2s",
- [1] = "no_such_clock",
-};
-
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = exynos4_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_NO_MUXPSR,
- .src_clk = rclksrc_v3,
},
},
};
diff --git a/arch/arm/mach-exynos/dev-drm.c b/arch/arm/mach-exynos/dev-drm.c
deleted file mode 100644
index 17c9c6ecc2e..00000000000
--- a/arch/arm/mach-exynos/dev-drm.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * linux/arch/arm/mach-exynos/dev-drm.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - core DRM device
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-
-#include <plat/devs.h>
-
-static u64 exynos_drm_dma_mask = DMA_BIT_MASK(32);
-
-struct platform_device exynos_device_drm = {
- .name = "exynos-drm",
- .dev = {
- .dma_mask = &exynos_drm_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- }
-};
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index f4d7dd20cda..c3f825b2794 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -20,10 +20,11 @@
#include <asm/smp_plat.h>
#include <mach/regs-pmu.h>
+#include <plat/cpu.h>
#include "common.h"
-static inline void cpu_enter_lowpower(void)
+static inline void cpu_enter_lowpower_a9(void)
{
unsigned int v;
@@ -45,6 +46,35 @@ static inline void cpu_enter_lowpower(void)
: "cc");
}
+static inline void cpu_enter_lowpower_a15(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "Ir" (CR_C)
+ : "cc");
+
+ flush_cache_louis();
+
+ asm volatile(
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+}
+
static inline void cpu_leave_lowpower(void)
{
unsigned int v;
@@ -103,11 +133,20 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
void __ref exynos_cpu_die(unsigned int cpu)
{
int spurious = 0;
+ int primary_part = 0;
/*
- * we're ready for shutdown now, so do it
+ * we're ready for shutdown now, so do it.
+ * Exynos4 is A9 based while Exynos5 is A15; check the CPU part
+ * number by reading the Main ID register and then perform the
+ * appropriate sequence for entering low power.
*/
- cpu_enter_lowpower();
+ asm("mrc p15, 0, %0, c0, c0, 0" : "=r"(primary_part) : : "cc");
+ if ((primary_part & 0xfff0) == 0xc0f0)
+ cpu_enter_lowpower_a15();
+ else
+ cpu_enter_lowpower_a9();
+
platform_do_lowpower(cpu, &spurious);
/*
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index e0f0ae3e0cf..1f4dc35cd4b 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -136,6 +136,9 @@
#define EXYNOS4_IRQ_TSI IRQ_SPI(115)
#define EXYNOS4_IRQ_SATA IRQ_SPI(116)
+#define EXYNOS4_IRQ_TMU_TRIG0 COMBINER_IRQ(2, 4)
+#define EXYNOS4_IRQ_TMU_TRIG1 COMBINER_IRQ(3, 4)
+
#define EXYNOS4_IRQ_SYSMMU_MDMA0_0 COMBINER_IRQ(4, 0)
#define EXYNOS4_IRQ_SYSMMU_SSS_0 COMBINER_IRQ(4, 1)
#define EXYNOS4_IRQ_SYSMMU_FIMC0_0 COMBINER_IRQ(4, 2)
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 61b74e12d12..1df6abbf53b 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -89,6 +89,8 @@
#define EXYNOS4_PA_TWD 0x10500600
#define EXYNOS4_PA_L2CC 0x10502000
+#define EXYNOS4_PA_TMU 0x100C0000
+
#define EXYNOS4_PA_MDMA0 0x10810000
#define EXYNOS4_PA_MDMA1 0x12850000
#define EXYNOS4_PA_S_MDMA1 0x12840000
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index 8c9b38c9c50..d36ad76ad6a 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -267,6 +267,9 @@
#define EXYNOS5_CLKDIV_STATCPU0 EXYNOS_CLKREG(0x00600)
#define EXYNOS5_CLKDIV_STATCPU1 EXYNOS_CLKREG(0x00604)
+#define EXYNOS5_PWR_CTRL1 EXYNOS_CLKREG(0x01020)
+#define EXYNOS5_PWR_CTRL2 EXYNOS_CLKREG(0x01024)
+
#define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100)
#define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204)
@@ -344,6 +347,22 @@
#define EXYNOS5_EPLLCON0_LOCKED_SHIFT (29)
+#define PWR_CTRL1_CORE2_DOWN_RATIO (7 << 28)
+#define PWR_CTRL1_CORE1_DOWN_RATIO (7 << 16)
+#define PWR_CTRL1_DIV2_DOWN_EN (1 << 9)
+#define PWR_CTRL1_DIV1_DOWN_EN (1 << 8)
+#define PWR_CTRL1_USE_CORE1_WFE (1 << 5)
+#define PWR_CTRL1_USE_CORE0_WFE (1 << 4)
+#define PWR_CTRL1_USE_CORE1_WFI (1 << 1)
+#define PWR_CTRL1_USE_CORE0_WFI (1 << 0)
+
+#define PWR_CTRL2_DIV2_UP_EN (1 << 25)
+#define PWR_CTRL2_DIV1_UP_EN (1 << 24)
+#define PWR_CTRL2_DUR_STANDBY2_VAL (1 << 16)
+#define PWR_CTRL2_DUR_STANDBY1_VAL (1 << 8)
+#define PWR_CTRL2_CORE2_UP_RATIO (1 << 4)
+#define PWR_CTRL2_CORE1_UP_RATIO (1 << 0)
+
/* Compatibility defines and inclusion */
#include <mach/regs-pmu.h>
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index 84428e72cf5..3f30aa1ae35 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -15,6 +15,7 @@
#include <mach/map.h>
#define S5P_PMUREG(x) (S5P_VA_PMU + (x))
+#define S5P_SYSREG(x) (S3C_VA_SYS + (x))
#define S5P_CENTRAL_SEQ_CONFIGURATION S5P_PMUREG(0x0200)
@@ -231,6 +232,8 @@
/* For EXYNOS5 */
+#define EXYNOS5_SYS_I2C_CFG S5P_SYSREG(0x0234)
+
#define EXYNOS5_AUTO_WDTRESET_DISABLE S5P_PMUREG(0x0408)
#define EXYNOS5_MASK_WDTRESET_REQUEST S5P_PMUREG(0x040C)
diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
index 3f37a5e8a1f..b938f9fc1dd 100644
--- a/arch/arm/mach-exynos/mach-armlex4210.c
+++ b/arch/arm/mach-exynos/mach-armlex4210.c
@@ -147,7 +147,6 @@ static struct platform_device *armlex4210_devices[] __initdata = {
&s3c_device_hsmmc3,
&s3c_device_rtc,
&s3c_device_wdt,
- &samsung_asoc_dma,
&armlex4210_smsc911x,
&exynos4_device_ahci,
};
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 6df99c06419..92757ff817a 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -78,6 +78,8 @@ static const struct of_dev_auxdata exynos4_auxdata_lookup[] __initconst = {
OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA1, "dma-pl330.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_MDMA1, "dma-pl330.2", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-tmu", EXYNOS4_PA_TMU,
+ "exynos-tmu", NULL),
{},
};
@@ -95,6 +97,8 @@ static void __init exynos4_dt_machine_init(void)
static char const *exynos4_dt_compat[] __initdata = {
"samsung,exynos4210",
+ "samsung,exynos4212",
+ "samsung,exynos4412",
NULL
};
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index f1326be80b9..e99d3d8f2bc 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -12,13 +12,17 @@
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/serial_core.h>
+#include <linux/memblock.h>
+#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <mach/map.h>
+#include <mach/regs-pmu.h>
#include <plat/cpu.h>
#include <plat/regs-serial.h>
+#include <plat/mfc.h>
#include "common.h"
@@ -48,6 +52,20 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"s3c2440-i2c.0", NULL),
OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(1),
"s3c2440-i2c.1", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(2),
+ "s3c2440-i2c.2", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(3),
+ "s3c2440-i2c.3", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(4),
+ "s3c2440-i2c.4", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(5),
+ "s3c2440-i2c.5", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(6),
+ "s3c2440-i2c.6", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(7),
+ "s3c2440-i2c.7", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-hdmiphy-i2c", EXYNOS5_PA_IIC(8),
+ "s3c2440-hdmiphy-i2c", NULL),
OF_DEV_AUXDATA("samsung,exynos5250-dw-mshc", EXYNOS5_PA_DWMCI0,
"dw_mmc.0", NULL),
OF_DEV_AUXDATA("samsung,exynos5250-dw-mshc", EXYNOS5_PA_DWMCI1,
@@ -62,6 +80,12 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"exynos4210-spi.1", NULL),
OF_DEV_AUXDATA("samsung,exynos4210-spi", EXYNOS5_PA_SPI2,
"exynos4210-spi.2", NULL),
+ OF_DEV_AUXDATA("samsung,exynos5-sata-ahci", 0x122F0000,
+ "exynos5-sata", NULL),
+ OF_DEV_AUXDATA("samsung,exynos5-sata-phy", 0x12170000,
+ "exynos5-sata-phy", NULL),
+ OF_DEV_AUXDATA("samsung,exynos5-sata-phy-i2c", 0x121D0000,
+ "exynos5-sata-phy-i2c", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
@@ -73,6 +97,13 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"exynos-gsc.2", NULL),
OF_DEV_AUXDATA("samsung,exynos5-gsc", EXYNOS5_PA_GSC3,
"exynos-gsc.3", NULL),
+ OF_DEV_AUXDATA("samsung,exynos5-hdmi", 0x14530000,
+ "exynos5-hdmi", NULL),
+ OF_DEV_AUXDATA("samsung,exynos5-mixer", 0x14450000,
+ "exynos5-mixer", NULL),
+ OF_DEV_AUXDATA("samsung,mfc-v6", 0x11000000, "s5p-mfc-v6", NULL),
+ OF_DEV_AUXDATA("samsung,exynos5250-tmu", 0x10060000,
+ "exynos-tmu", NULL),
{},
};
@@ -94,6 +125,28 @@ static void __init exynos5_dt_map_io(void)
static void __init exynos5_dt_machine_init(void)
{
+ struct device_node *i2c_np;
+ const char *i2c_compat = "samsung,s3c2440-i2c";
+ unsigned int tmp;
+
+ /*
+ * Exynos5's legacy i2c controller and new high speed i2c
+ * controller have muxed interrupt sources. By default the
+ * interrupts for 4-channel HS-I2C controller are enabled.
+ * If node for first four channels of legacy i2c controller
+ * are available then re-configure the interrupts via the
+ * system register.
+ */
+ for_each_compatible_node(i2c_np, NULL, i2c_compat) {
+ if (of_device_is_available(i2c_np)) {
+ if (of_alias_get_id(i2c_np, "i2c") < 4) {
+ tmp = readl(EXYNOS5_SYS_I2C_CFG);
+ writel(tmp & ~(0x1 << of_alias_get_id(i2c_np, "i2c")),
+ EXYNOS5_SYS_I2C_CFG);
+ }
+ }
+ }
+
if (of_machine_is_compatible("samsung,exynos5250"))
of_platform_populate(NULL, of_default_bus_match_table,
exynos5250_auxdata_lookup, NULL);
@@ -108,6 +161,19 @@ static char const *exynos5_dt_compat[] __initdata = {
NULL
};
+static void __init exynos5_reserve(void)
+{
+#ifdef CONFIG_S5P_DEV_MFC
+ struct s5p_mfc_dt_meminfo mfc_mem;
+
+ /* Reserve memory for MFC only if it's available */
+ mfc_mem.compatible = "samsung,mfc-v6";
+ if (of_scan_flat_dt(s5p_fdt_find_mfc_mem, &mfc_mem))
+ s5p_mfc_reserve_mem(mfc_mem.roff, mfc_mem.rsize, mfc_mem.loff,
+ mfc_mem.lsize);
+#endif
+}
+
DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.init_irq = exynos5_init_irq,
@@ -119,4 +185,5 @@ DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)")
.timer = &exynos4_timer,
.dt_compat = exynos5_dt_compat,
.restart = exynos5_restart,
+ .reserve = exynos5_reserve,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 69359a0c8a1..27d4ed8b116 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -1326,9 +1326,6 @@ static struct platform_device *nuri_devices[] __initdata = {
&cam_vdda_fixed_rdev,
&cam_8m_12v_fixed_rdev,
&exynos4_bus_devfreq,
-#ifdef CONFIG_DRM_EXYNOS
- &exynos_device_drm,
-#endif
};
static void __init nuri_map_io(void)
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index c606080b5df..5e34b9c1619 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -621,7 +621,7 @@ static struct pwm_lookup origen_pwm_lookup[] = {
PWM_LOOKUP("s3c24xx-pwm.0", 0, "pwm-backlight.0", NULL),
};
-#ifdef CONFIG_DRM_EXYNOS
+#ifdef CONFIG_DRM_EXYNOS_FIMD
static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
.panel = {
.timing = {
@@ -711,9 +711,6 @@ static struct platform_device *origen_devices[] __initdata = {
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&s5p_device_mixer,
-#ifdef CONFIG_DRM_EXYNOS
- &exynos_device_drm,
-#endif
&exynos4_device_ohci,
&origen_device_gpiokeys,
&origen_lcd_hv070wsa,
@@ -796,7 +793,7 @@ static void __init origen_machine_init(void)
s5p_i2c_hdmiphy_set_platdata(NULL);
s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
-#ifdef CONFIG_DRM_EXYNOS
+#ifdef CONFIG_DRM_EXYNOS_FIMD
s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
exynos4_fimd0_gpio_setup_24bpp();
#else
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index ddb92631252..ae6da40c2aa 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -246,7 +246,7 @@ static struct samsung_keypad_platdata smdk4x12_keypad_data __initdata = {
.cols = 8,
};
-#ifdef CONFIG_DRM_EXYNOS
+#ifdef CONFIG_DRM_EXYNOS_FIMD
static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
.panel = {
.timing = {
@@ -317,9 +317,6 @@ static struct platform_device *smdk4x12_devices[] __initdata = {
&s5p_device_mfc,
&s5p_device_mfc_l,
&s5p_device_mfc_r,
-#ifdef CONFIG_DRM_EXYNOS
- &exynos_device_drm,
-#endif
&samsung_device_keypad,
};
@@ -363,7 +360,7 @@ static void __init smdk4x12_machine_init(void)
s3c_hsotg_set_platdata(&smdk4x12_hsotg_pdata);
-#ifdef CONFIG_DRM_EXYNOS
+#ifdef CONFIG_DRM_EXYNOS_FIMD
s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
exynos4_fimd0_gpio_setup_24bpp();
#else
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 8dd6a1e8030..35548e3c097 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -159,7 +159,7 @@ static struct platform_device smdkv310_lcd_lte480wv = {
.dev.platform_data = &smdkv310_lcd_lte480wv_data,
};
-#ifdef CONFIG_DRM_EXYNOS
+#ifdef CONFIG_DRM_EXYNOS_FIMD
static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
.panel = {
.timing = {
@@ -300,9 +300,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s5p_device_fimc_md,
&s5p_device_g2d,
&s5p_device_jpeg,
-#ifdef CONFIG_DRM_EXYNOS
- &exynos_device_drm,
-#endif
&exynos4_device_ac97,
&exynos4_device_i2s0,
&exynos4_device_ohci,
@@ -311,7 +308,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_spdif,
- &samsung_asoc_dma,
&samsung_asoc_idma,
&s5p_device_fimd0,
&smdkv310_device_audio,
@@ -406,7 +402,7 @@ static void __init smdkv310_machine_init(void)
samsung_bl_set(&smdkv310_bl_gpio_info, &smdkv310_bl_data);
pwm_add_table(smdkv310_pwm_lookup, ARRAY_SIZE(smdkv310_pwm_lookup));
-#ifdef CONFIG_DRM_EXYNOS
+#ifdef CONFIG_DRM_EXYNOS_FIMD
s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
exynos4_fimd0_gpio_setup_24bpp();
#else
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 2d6bc83d5c9..9e3340f1895 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -1080,9 +1080,6 @@ static struct platform_device *universal_devices[] __initdata = {
&s5p_device_onenand,
&s5p_device_fimd0,
&s5p_device_jpeg,
-#ifdef CONFIG_DRM_EXYNOS
- &exynos_device_drm,
-#endif
&s3c_device_usb_hsotg,
&s5p_device_mfc,
&s5p_device_mfc_l,
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 4ca8ff14a5b..c5c840e947b 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -198,7 +198,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
{
int i;
- if (!soc_is_exynos5250())
+ if (!(soc_is_exynos5250() || soc_is_exynos5440()))
scu_enable(scu_base_addr());
/*
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index c06c992943a..b9b539cac81 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -62,6 +62,10 @@ static struct sleep_save exynos4_vpll_save[] = {
SAVE_ITEM(EXYNOS4_VPLL_CON1),
};
+static struct sleep_save exynos5_sys_save[] = {
+ SAVE_ITEM(EXYNOS5_SYS_I2C_CFG),
+};
+
static struct sleep_save exynos_core_save[] = {
/* SROM side */
SAVE_ITEM(S5P_SROM_BW),
@@ -81,6 +85,9 @@ static int exynos_cpu_suspend(unsigned long arg)
outer_flush_all();
#endif
+ if (soc_is_exynos5250())
+ flush_cache_all();
+
/* issue the standby signal into the pm unit. */
cpu_do_idle();
@@ -98,6 +105,7 @@ static void exynos_pm_prepare(void)
s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
} else {
+ s3c_pm_do_save(exynos5_sys_save, ARRAY_SIZE(exynos5_sys_save));
/* Disable USE_RETENTION of JPEG_MEM_OPTION */
tmp = __raw_readl(EXYNOS5_JPEG_MEM_OPTION);
tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
@@ -301,6 +309,10 @@ static void exynos_pm_resume(void)
__raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION);
__raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION);
+ if (soc_is_exynos5250())
+ s3c_pm_do_restore(exynos5_sys_save,
+ ARRAY_SIZE(exynos5_sys_save));
+
s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
if (!soc_is_exynos5250()) {
@@ -312,6 +324,10 @@ static void exynos_pm_resume(void)
}
early_wakeup:
+
+ /* Clear SLEEP mode set in INFORM1 */
+ __raw_writel(0x0, S5P_INFORM1);
+
return;
}
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index c0bc83a7663..9f1351de52f 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -19,6 +19,8 @@
#include <linux/pm_domain.h>
#include <linux/delay.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
#include <mach/regs-pmu.h>
#include <plat/devs.h>
@@ -83,12 +85,88 @@ static struct exynos_pm_domain PD = { \
}
#ifdef CONFIG_OF
+static void exynos_add_device_to_domain(struct exynos_pm_domain *pd,
+ struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "adding to power domain %s\n", pd->pd.name);
+
+ while (1) {
+ ret = pm_genpd_add_device(&pd->pd, dev);
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ }
+
+ pm_genpd_dev_need_restore(dev, true);
+}
+
+static void exynos_remove_device_from_domain(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+ int ret;
+
+ dev_dbg(dev, "removing from power domain %s\n", genpd->name);
+
+ while (1) {
+ ret = pm_genpd_remove_device(genpd, dev);
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ }
+}
+
+static void exynos_read_domain_from_dt(struct device *dev)
+{
+ struct platform_device *pd_pdev;
+ struct exynos_pm_domain *pd;
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->of_node, "samsung,power-domain", 0);
+ if (!node)
+ return;
+ pd_pdev = of_find_device_by_node(node);
+ if (!pd_pdev)
+ return;
+ pd = platform_get_drvdata(pd_pdev);
+ exynos_add_device_to_domain(pd, dev);
+}
+
+static int exynos_pm_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct device *dev = data;
+
+ switch (event) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ if (dev->of_node)
+ exynos_read_domain_from_dt(dev);
+
+ break;
+
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ exynos_remove_device_from_domain(dev);
+
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block platform_nb = {
+ .notifier_call = exynos_pm_notifier_call,
+};
+
static __init int exynos_pm_dt_parse_domains(void)
{
+ struct platform_device *pdev;
struct device_node *np;
for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
struct exynos_pm_domain *pd;
+ int on;
+
+ pdev = of_find_device_by_node(np);
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
@@ -97,15 +175,22 @@ static __init int exynos_pm_dt_parse_domains(void)
return -ENOMEM;
}
- if (of_get_property(np, "samsung,exynos4210-pd-off", NULL))
- pd->is_off = true;
- pd->name = np->name;
+ pd->pd.name = kstrdup(np->name, GFP_KERNEL);
+ pd->name = pd->pd.name;
pd->base = of_iomap(np, 0);
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
pd->pd.of_node = np;
- pm_genpd_init(&pd->pd, NULL, false);
+
+ platform_set_drvdata(pdev, pd);
+
+ on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+ pm_genpd_init(&pd->pd, NULL, !on);
}
+
+ bus_register_notifier(&platform_bus_type, &platform_nb);
+
return 0;
}
#else
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 4e24b8c77eb..1ad0d76de8c 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -272,6 +272,13 @@ config MACH_EUKREA_MBIMXSD25_BASEBOARD
endchoice
+config MACH_IMX25_DT
+ bool "Support i.MX25 platforms from device tree"
+ select SOC_IMX25
+ help
+ Include support for Freescale i.MX25 based platforms
+ using the device tree for discovery
+
comment "MX27 platforms:"
config MACH_MX27ADS
@@ -831,7 +838,14 @@ config SOC_IMX53
config SOC_IMX6Q
bool "i.MX6 Quad support"
+ select ARCH_HAS_CPUFREQ
+ select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
+ select ARM_ERRATA_743622
+ select ARM_ERRATA_751472
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_764369 if SMP
+ select ARM_ERRATA_775420
select ARM_GIC
select COMMON_CLK
select CPU_V7
@@ -843,6 +857,10 @@ config SOC_IMX6Q
select MFD_SYSCON
select PINCTRL
select PINCTRL_IMX6Q
+ select PL310_ERRATA_588369 if CACHE_PL310
+ select PL310_ERRATA_727915 if CACHE_PL310
+ select PL310_ERRATA_769419 if CACHE_PL310
+ select PM_OPP if PM
help
This enables support for Freescale i.MX6 Quad processor.
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index fe47b71469c..0634b3152c2 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
+obj-$(CONFIG_MACH_IMX25_DT) += imx25-dt.o
# i.MX27 based machines
obj-$(CONFIG_MACH_MX27ADS) += mach-mx27ads.o
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index bc885801cd6..b197aa73dc4 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -23,6 +23,9 @@
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "clk.h"
#include "common.h"
@@ -55,6 +58,8 @@
#define ccm(x) (CRM_BASE + (x))
+static struct clk_onecell_data clk_data;
+
static const char *cpu_sel_clks[] = { "mpll", "mpll_cpu_3_4", };
static const char *per_sel_clks[] = { "ahb", "upll", };
@@ -64,24 +69,30 @@ enum mx25_clks {
per7_sel, per8_sel, per9_sel, per10_sel, per11_sel, per12_sel,
per13_sel, per14_sel, per15_sel, per0, per1, per2, per3, per4, per5,
per6, per7, per8, per9, per10, per11, per12, per13, per14, per15,
- csi_ipg_per, esdhc1_ipg_per, esdhc2_ipg_per, gpt_ipg_per, i2c_ipg_per,
- lcdc_ipg_per, nfc_ipg_per, ssi1_ipg_per, ssi2_ipg_per, uart_ipg_per,
- csi_ahb, esdhc1_ahb, esdhc2_ahb, fec_ahb, lcdc_ahb, sdma_ahb,
- usbotg_ahb, can1_ipg, can2_ipg, csi_ipg, cspi1_ipg, cspi2_ipg,
- cspi3_ipg, dryice_ipg, esdhc1_ipg, esdhc2_ipg, fec_ipg, iim_ipg,
- kpp_ipg, lcdc_ipg, pwm1_ipg, pwm2_ipg, pwm3_ipg, pwm4_ipg, sdma_ipg,
- ssi1_ipg, ssi2_ipg, tsc_ipg, uart1_ipg, uart2_ipg, uart3_ipg,
- uart4_ipg, uart5_ipg, wdt_ipg, clk_max
+ csi_ipg_per, epit_ipg_per, esai_ipg_per, esdhc1_ipg_per, esdhc2_ipg_per,
+ gpt_ipg_per, i2c_ipg_per, lcdc_ipg_per, nfc_ipg_per, owire_ipg_per,
+ pwm_ipg_per, sim1_ipg_per, sim2_ipg_per, ssi1_ipg_per, ssi2_ipg_per,
+ uart_ipg_per, ata_ahb, reserved1, csi_ahb, emi_ahb, esai_ahb, esdhc1_ahb,
+ esdhc2_ahb, fec_ahb, lcdc_ahb, rtic_ahb, sdma_ahb, slcdc_ahb, usbotg_ahb,
+ reserved2, reserved3, reserved4, reserved5, can1_ipg, can2_ipg, csi_ipg,
+ cspi1_ipg, cspi2_ipg, cspi3_ipg, dryice_ipg, ect_ipg, epit1_ipg, epit2_ipg,
+ reserved6, esdhc1_ipg, esdhc2_ipg, fec_ipg, reserved7, reserved8, reserved9,
+ gpt1_ipg, gpt2_ipg, gpt3_ipg, gpt4_ipg, reserved10, reserved11, reserved12,
+ iim_ipg, reserved13, reserved14, kpp_ipg, lcdc_ipg, reserved15, pwm1_ipg,
+ pwm2_ipg, pwm3_ipg, pwm4_ipg, rngb_ipg, reserved16, scc_ipg, sdma_ipg,
+ sim1_ipg, sim2_ipg, slcdc_ipg, spba_ipg, ssi1_ipg, ssi2_ipg, tsc_ipg,
+ uart1_ipg, uart2_ipg, uart3_ipg, uart4_ipg, uart5_ipg, reserved17,
+ wdt_ipg, clk_max
};
static struct clk *clk[clk_max];
-int __init mx25_clocks_init(void)
+static int __init __mx25_clocks_init(unsigned long osc_rate)
{
int i;
clk[dummy] = imx_clk_fixed("dummy", 0);
- clk[osc] = imx_clk_fixed("osc", 24000000);
+ clk[osc] = imx_clk_fixed("osc", osc_rate);
clk[mpll] = imx_clk_pllv1("mpll", "osc", ccm(CCM_MPCTL));
clk[upll] = imx_clk_pllv1("upll", "osc", ccm(CCM_UPCTL));
clk[mpll_cpu_3_4] = imx_clk_fixed_factor("mpll_cpu_3_4", "mpll", 3, 4);
@@ -123,22 +134,36 @@ int __init mx25_clocks_init(void)
clk[per14] = imx_clk_divider("per14", "per14_sel", ccm(CCM_PCDR3), 16, 6);
clk[per15] = imx_clk_divider("per15", "per15_sel", ccm(CCM_PCDR3), 24, 6);
clk[csi_ipg_per] = imx_clk_gate("csi_ipg_per", "per0", ccm(CCM_CGCR0), 0);
+ clk[epit_ipg_per] = imx_clk_gate("epit_ipg_per", "per1", ccm(CCM_CGCR0), 1);
+ clk[esai_ipg_per] = imx_clk_gate("esai_ipg_per", "per2", ccm(CCM_CGCR0), 2);
clk[esdhc1_ipg_per] = imx_clk_gate("esdhc1_ipg_per", "per3", ccm(CCM_CGCR0), 3);
clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0), 4);
clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0), 5);
clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0), 6);
clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per7", ccm(CCM_CGCR0), 7);
clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "per8", ccm(CCM_CGCR0), 8);
+ clk[owire_ipg_per] = imx_clk_gate("owire_ipg_per", "per9", ccm(CCM_CGCR0), 9);
+ clk[pwm_ipg_per] = imx_clk_gate("pwm_ipg_per", "per10", ccm(CCM_CGCR0), 10);
+ clk[sim1_ipg_per] = imx_clk_gate("sim1_ipg_per", "per11", ccm(CCM_CGCR0), 11);
+ clk[sim2_ipg_per] = imx_clk_gate("sim2_ipg_per", "per12", ccm(CCM_CGCR0), 12);
clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
+ clk[ata_ahb] = imx_clk_gate("ata_ahb", "ahb", ccm(CCM_CGCR0), 16);
+ /* CCM_CGCR0(17): reserved */
clk[csi_ahb] = imx_clk_gate("csi_ahb", "ahb", ccm(CCM_CGCR0), 18);
+ clk[emi_ahb] = imx_clk_gate("emi_ahb", "ahb", ccm(CCM_CGCR0), 19);
+ clk[esai_ahb] = imx_clk_gate("esai_ahb", "ahb", ccm(CCM_CGCR0), 20);
clk[esdhc1_ahb] = imx_clk_gate("esdhc1_ahb", "ahb", ccm(CCM_CGCR0), 21);
clk[esdhc2_ahb] = imx_clk_gate("esdhc2_ahb", "ahb", ccm(CCM_CGCR0), 22);
clk[fec_ahb] = imx_clk_gate("fec_ahb", "ahb", ccm(CCM_CGCR0), 23);
clk[lcdc_ahb] = imx_clk_gate("lcdc_ahb", "ahb", ccm(CCM_CGCR0), 24);
+ clk[rtic_ahb] = imx_clk_gate("rtic_ahb", "ahb", ccm(CCM_CGCR0), 25);
clk[sdma_ahb] = imx_clk_gate("sdma_ahb", "ahb", ccm(CCM_CGCR0), 26);
+ clk[slcdc_ahb] = imx_clk_gate("slcdc_ahb", "ahb", ccm(CCM_CGCR0), 27);
clk[usbotg_ahb] = imx_clk_gate("usbotg_ahb", "ahb", ccm(CCM_CGCR0), 28);
+ /* CCM_CGCR0(29-31): reserved */
+ /* CCM_CGCR1(0): reserved in datasheet, used as audmux in FSL kernel */
clk[can1_ipg] = imx_clk_gate("can1_ipg", "ipg", ccm(CCM_CGCR1), 2);
clk[can2_ipg] = imx_clk_gate("can2_ipg", "ipg", ccm(CCM_CGCR1), 3);
clk[csi_ipg] = imx_clk_gate("csi_ipg", "ipg", ccm(CCM_CGCR1), 4);
@@ -146,17 +171,41 @@ int __init mx25_clocks_init(void)
clk[cspi2_ipg] = imx_clk_gate("cspi2_ipg", "ipg", ccm(CCM_CGCR1), 6);
clk[cspi3_ipg] = imx_clk_gate("cspi3_ipg", "ipg", ccm(CCM_CGCR1), 7);
clk[dryice_ipg] = imx_clk_gate("dryice_ipg", "ipg", ccm(CCM_CGCR1), 8);
+ clk[ect_ipg] = imx_clk_gate("ect_ipg", "ipg", ccm(CCM_CGCR1), 9);
+ clk[epit1_ipg] = imx_clk_gate("epit1_ipg", "ipg", ccm(CCM_CGCR1), 10);
+ clk[epit2_ipg] = imx_clk_gate("epit2_ipg", "ipg", ccm(CCM_CGCR1), 11);
+ /* CCM_CGCR1(12): reserved in datasheet, used as esai in FSL kernel */
clk[esdhc1_ipg] = imx_clk_gate("esdhc1_ipg", "ipg", ccm(CCM_CGCR1), 13);
clk[esdhc2_ipg] = imx_clk_gate("esdhc2_ipg", "ipg", ccm(CCM_CGCR1), 14);
clk[fec_ipg] = imx_clk_gate("fec_ipg", "ipg", ccm(CCM_CGCR1), 15);
+ /* CCM_CGCR1(16): reserved in datasheet, used as gpio1 in FSL kernel */
+ /* CCM_CGCR1(17): reserved in datasheet, used as gpio2 in FSL kernel */
+ /* CCM_CGCR1(18): reserved in datasheet, used as gpio3 in FSL kernel */
+ clk[gpt1_ipg] = imx_clk_gate("gpt1_ipg", "ipg", ccm(CCM_CGCR1), 19);
+ clk[gpt2_ipg] = imx_clk_gate("gpt2_ipg", "ipg", ccm(CCM_CGCR1), 20);
+ clk[gpt3_ipg] = imx_clk_gate("gpt3_ipg", "ipg", ccm(CCM_CGCR1), 21);
+ clk[gpt4_ipg] = imx_clk_gate("gpt4_ipg", "ipg", ccm(CCM_CGCR1), 22);
+ /* CCM_CGCR1(23): reserved in datasheet, used as i2c1 in FSL kernel */
+ /* CCM_CGCR1(24): reserved in datasheet, used as i2c2 in FSL kernel */
+ /* CCM_CGCR1(25): reserved in datasheet, used as i2c3 in FSL kernel */
clk[iim_ipg] = imx_clk_gate("iim_ipg", "ipg", ccm(CCM_CGCR1), 26);
+ /* CCM_CGCR1(27): reserved in datasheet, used as iomuxc in FSL kernel */
+ /* CCM_CGCR1(28): reserved in datasheet, used as kpp in FSL kernel */
clk[kpp_ipg] = imx_clk_gate("kpp_ipg", "ipg", ccm(CCM_CGCR1), 28);
clk[lcdc_ipg] = imx_clk_gate("lcdc_ipg", "ipg", ccm(CCM_CGCR1), 29);
+ /* CCM_CGCR1(30): reserved in datasheet, used as owire in FSL kernel */
clk[pwm1_ipg] = imx_clk_gate("pwm1_ipg", "ipg", ccm(CCM_CGCR1), 31);
clk[pwm2_ipg] = imx_clk_gate("pwm2_ipg", "ipg", ccm(CCM_CGCR2), 0);
clk[pwm3_ipg] = imx_clk_gate("pwm3_ipg", "ipg", ccm(CCM_CGCR2), 1);
clk[pwm4_ipg] = imx_clk_gate("pwm4_ipg", "ipg", ccm(CCM_CGCR2), 2);
+ clk[rngb_ipg] = imx_clk_gate("rngb_ipg", "ipg", ccm(CCM_CGCR2), 3);
+ /* CCM_CGCR2(4): reserved in datasheet, used as rtic in FSL kernel */
+ clk[scc_ipg] = imx_clk_gate("scc_ipg", "ipg", ccm(CCM_CGCR2), 5);
clk[sdma_ipg] = imx_clk_gate("sdma_ipg", "ipg", ccm(CCM_CGCR2), 6);
+ clk[sim1_ipg] = imx_clk_gate("sim1_ipg", "ipg", ccm(CCM_CGCR2), 7);
+ clk[sim2_ipg] = imx_clk_gate("sim2_ipg", "ipg", ccm(CCM_CGCR2), 8);
+ clk[slcdc_ipg] = imx_clk_gate("slcdc_ipg", "ipg", ccm(CCM_CGCR2), 9);
+ clk[spba_ipg] = imx_clk_gate("spba_ipg", "ipg", ccm(CCM_CGCR2), 10);
clk[ssi1_ipg] = imx_clk_gate("ssi1_ipg", "ipg", ccm(CCM_CGCR2), 11);
clk[ssi2_ipg] = imx_clk_gate("ssi2_ipg", "ipg", ccm(CCM_CGCR2), 12);
clk[tsc_ipg] = imx_clk_gate("tsc_ipg", "ipg", ccm(CCM_CGCR2), 13);
@@ -165,6 +214,7 @@ int __init mx25_clocks_init(void)
clk[uart3_ipg] = imx_clk_gate("uart3_ipg", "ipg", ccm(CCM_CGCR2), 16);
clk[uart4_ipg] = imx_clk_gate("uart4_ipg", "ipg", ccm(CCM_CGCR2), 17);
clk[uart5_ipg] = imx_clk_gate("uart5_ipg", "ipg", ccm(CCM_CGCR2), 18);
+ /* CCM_CGCR2(19): reserved in datasheet, but used as wdt in FSL kernel */
clk[wdt_ipg] = imx_clk_gate("wdt_ipg", "ipg", ccm(CCM_CGCR2), 19);
for (i = 0; i < ARRAY_SIZE(clk); i++)
@@ -172,6 +222,18 @@ int __init mx25_clocks_init(void)
pr_err("i.MX25 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
+ clk_prepare_enable(clk[emi_ahb]);
+
+ clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+
+ return 0;
+}
+
+int __init mx25_clocks_init(void)
+{
+ __mx25_clocks_init(24000000);
+
/* i.mx25 has the i.mx21 type uart */
clk_register_clkdev(clk[uart1_ipg], "ipg", "imx21-uart.0");
clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.0");
@@ -183,8 +245,6 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.3");
clk_register_clkdev(clk[uart5_ipg], "ipg", "imx21-uart.4");
clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.4");
- clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.0");
clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
@@ -242,5 +302,40 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[iim_ipg], "iim", NULL);
mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1);
+
+ return 0;
+}
+
+int __init mx25_clocks_init_dt(void)
+{
+ struct device_node *np;
+ void __iomem *base;
+ int irq;
+ unsigned long osc_rate = 24000000;
+
+ /* retrieve the freqency of fixed clocks from device tree */
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ u32 rate;
+ if (of_property_read_u32(np, "clock-frequency", &rate))
+ continue;
+
+ if (of_device_is_compatible(np, "fsl,imx-osc"))
+ osc_rate = rate;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx25-ccm");
+ clk_data.clks = clk;
+ clk_data.clk_num = ARRAY_SIZE(clk);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ __mx25_clocks_init(osc_rate);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx25-gpt");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ irq = irq_of_parse_and_map(np, 0);
+
+ mxc_timer_init(base, irq);
+
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index e8c0473c756..579023f59dc 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -319,6 +319,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
unsigned long rate_ckih1, unsigned long rate_ckih2)
{
int i;
+ u32 val;
struct device_node *np;
clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
@@ -390,6 +391,21 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
imx_print_silicon_rev("i.MX51", mx51_revision());
clk_disable_unprepare(clk[iim_gate]);
+ /*
+ * Reference Manual says: Functionality of CCDR[18] and CLPCR[23] is no
+ * longer supported. Set to one for better power saving.
+ *
+ * The effect of not setting these bits is that MIPI clocks can't be
+ * enabled without the IPU clock being enabled aswell.
+ */
+ val = readl(MXC_CCM_CCDR);
+ val |= 1 << 18;
+ writel(val, MXC_CCM_CCDR);
+
+ val = readl(MXC_CCM_CLPCR);
+ val |= 1 << 23;
+ writel(val, MXC_CCM_CLPCR);
+
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 448476958e7..7f2c10c7413 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -424,6 +424,7 @@ int __init mx6q_clocks_init(void)
clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
clk_register_clkdev(clk[ahb], "ahb", NULL);
clk_register_clkdev(clk[cko1], "cko1", NULL);
+ clk_register_clkdev(clk[arm], NULL, "cpu0");
/*
* The gpmi needs 100MHz frequency in the EDO/Sync mode,
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index ef8db6b3484..7191ab4434e 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -66,6 +66,7 @@ extern int mx51_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
extern int mx53_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
+extern int mx25_clocks_init_dt(void);
extern int mx27_clocks_init_dt(void);
extern int mx31_clocks_init_dt(void);
extern int mx51_clocks_init_dt(void);
diff --git a/arch/arm/plat-mxc/devices/platform-mx2-emma.c b/arch/arm/mach-imx/devices/platform-mx2-emma.c
index 508404ddd4e..11bd01d402f 100644
--- a/arch/arm/plat-mxc/devices/platform-mx2-emma.c
+++ b/arch/arm/mach-imx/devices/platform-mx2-emma.c
@@ -6,8 +6,8 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
-#include <mach/hardware.h>
-#include <mach/devices-common.h>
+#include "../hardware.h"
+#include "devices-common.h"
#define imx_mx2_emmaprp_data_entry_single(soc) \
{ \
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c
new file mode 100644
index 00000000000..e17dfbc4219
--- /dev/null
+++ b/arch/arm/mach-imx/imx25-dt.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Sascha Hauer, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include "common.h"
+#include "mx25.h"
+
+static void __init imx25_dt_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static void __init imx25_timer_init(void)
+{
+ mx25_clocks_init_dt();
+}
+
+static struct sys_timer imx25_timer = {
+ .init = imx25_timer_init,
+};
+
+static const char * const imx25_dt_board_compat[] __initconst = {
+ "fsl,imx25",
+ NULL
+};
+
+DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
+ .map_io = mx25_map_io,
+ .init_early = imx25_init_early,
+ .init_irq = mx25_init_irq,
+ .handle_irq = imx25_handle_irq,
+ .timer = &imx25_timer,
+ .init_machine = imx25_dt_init,
+ .dt_compat = imx25_dt_board_compat,
+ .restart = mxc_restart,
+MACHINE_END
diff --git a/arch/arm/mach-imx/lluart.c b/arch/arm/mach-imx/lluart.c
index 5f1510363ee..2fdc9bf2fb5 100644
--- a/arch/arm/mach-imx/lluart.c
+++ b/arch/arm/mach-imx/lluart.c
@@ -17,17 +17,25 @@
#include "hardware.h"
+#define IMX6Q_UART1_BASE_ADDR 0x02020000
+#define IMX6Q_UART2_BASE_ADDR 0x021e8000
+#define IMX6Q_UART3_BASE_ADDR 0x021ec000
+#define IMX6Q_UART4_BASE_ADDR 0x021f0000
+#define IMX6Q_UART5_BASE_ADDR 0x021f4000
+
+/*
+ * IMX6Q_UART_BASE_ADDR is put in the middle to force the expansion
+ * of IMX6Q_UART##n##_BASE_ADDR.
+ */
+#define IMX6Q_UART_BASE_ADDR(n) IMX6Q_UART##n##_BASE_ADDR
+#define IMX6Q_UART_BASE(n) IMX6Q_UART_BASE_ADDR(n)
+#define IMX6Q_DEBUG_UART_BASE IMX6Q_UART_BASE(CONFIG_DEBUG_IMX6Q_UART_PORT)
+
static struct map_desc imx_lluart_desc = {
-#ifdef CONFIG_DEBUG_IMX6Q_UART2
- .virtual = MX6Q_IO_P2V(MX6Q_UART2_BASE_ADDR),
- .pfn = __phys_to_pfn(MX6Q_UART2_BASE_ADDR),
- .length = MX6Q_UART2_SIZE,
- .type = MT_DEVICE,
-#endif
-#ifdef CONFIG_DEBUG_IMX6Q_UART4
- .virtual = MX6Q_IO_P2V(MX6Q_UART4_BASE_ADDR),
- .pfn = __phys_to_pfn(MX6Q_UART4_BASE_ADDR),
- .length = MX6Q_UART4_SIZE,
+#ifdef CONFIG_DEBUG_IMX6Q_UART
+ .virtual = IMX_IO_P2V(IMX6Q_DEBUG_UART_BASE),
+ .pfn = __phys_to_pfn(IMX6Q_DEBUG_UART_BASE),
+ .length = 0x4000,
.type = MT_DEVICE,
#endif
};
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index cce33e433bd..4eb1b3ac794 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -38,6 +38,40 @@
#include "cpuidle.h"
#include "hardware.h"
+#define IMX6Q_ANALOG_DIGPROG 0x260
+
+static int imx6q_revision(void)
+{
+ struct device_node *np;
+ void __iomem *base;
+ static u32 rev;
+
+ if (!rev) {
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+ if (!np)
+ return IMX_CHIP_REVISION_UNKNOWN;
+ base = of_iomap(np, 0);
+ if (!base) {
+ of_node_put(np);
+ return IMX_CHIP_REVISION_UNKNOWN;
+ }
+ rev = readl_relaxed(base + IMX6Q_ANALOG_DIGPROG);
+ iounmap(base);
+ of_node_put(np);
+ }
+
+ switch (rev & 0xff) {
+ case 0:
+ return IMX_CHIP_REVISION_1_0;
+ case 1:
+ return IMX_CHIP_REVISION_1_1;
+ case 2:
+ return IMX_CHIP_REVISION_1_2;
+ default:
+ return IMX_CHIP_REVISION_UNKNOWN;
+ }
+}
+
void imx6q_restart(char mode, const char *cmd)
{
struct device_node *np;
@@ -204,6 +238,7 @@ static void __init imx6q_timer_init(void)
{
mx6q_clocks_init();
twd_local_timer_of_register();
+ imx_print_silicon_rev("i.MX6Q", imx6q_revision());
}
static struct sys_timer imx6q_timer = {
diff --git a/arch/arm/mach-imx/mx6q.h b/arch/arm/mach-imx/mx6q.h
index f7e7dbac8f4..19d3f54db5a 100644
--- a/arch/arm/mach-imx/mx6q.h
+++ b/arch/arm/mach-imx/mx6q.h
@@ -27,9 +27,5 @@
#define MX6Q_CCM_SIZE 0x4000
#define MX6Q_ANATOP_BASE_ADDR 0x020c8000
#define MX6Q_ANATOP_SIZE 0x1000
-#define MX6Q_UART2_BASE_ADDR 0x021e8000
-#define MX6Q_UART2_SIZE 0x4000
-#define MX6Q_UART4_BASE_ADDR 0x021f0000
-#define MX6Q_UART4_SIZE 0x4000
#endif /* __MACH_MX6Q_H__ */
diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig
index d018ad4bcc3..f91cdff5a3e 100644
--- a/arch/arm/mach-kirkwood/Kconfig
+++ b/arch/arm/mach-kirkwood/Kconfig
@@ -46,6 +46,13 @@ config MACH_GURUPLUG
config ARCH_KIRKWOOD_DT
bool "Marvell Kirkwood Flattened Device Tree"
+ select POWER_SUPPLY
+ select POWER_RESET
+ select POWER_RESET_GPIO
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+ select MVEBU_CLK_CORE
+ select MVEBU_CLK_GATING
select USE_OF
help
Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-kirkwood/board-dnskw.c b/arch/arm/mach-kirkwood/board-dnskw.c
index 43d16d6714b..a1aa87f0918 100644
--- a/arch/arm/mach-kirkwood/board-dnskw.c
+++ b/arch/arm/mach-kirkwood/board-dnskw.c
@@ -17,51 +17,11 @@
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data dnskw_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
-static unsigned int dnskw_mpp_config[] __initdata = {
- MPP13_UART1_TXD, /* Custom ... */
- MPP14_UART1_RXD, /* ... Controller (DNS-320 only) */
- MPP20_SATA1_ACTn, /* LED: White Right HDD */
- MPP21_SATA0_ACTn, /* LED: White Left HDD */
- MPP24_GPIO,
- MPP25_GPIO,
- MPP26_GPIO, /* LED: Power */
- MPP27_GPIO, /* LED: Red Right HDD */
- MPP28_GPIO, /* LED: Red Left HDD */
- MPP29_GPIO, /* LED: Red USB (DNS-325 only) */
- MPP30_GPIO,
- MPP31_GPIO,
- MPP32_GPIO,
- MPP33_GPO,
- MPP34_GPIO, /* Button: Front power */
- MPP35_GPIO, /* LED: Red USB (DNS-320 only) */
- MPP36_GPIO, /* Power: Turn off board */
- MPP37_GPIO, /* Power: Turn back on after power failure */
- MPP38_GPIO,
- MPP39_GPIO, /* Power: SATA0 */
- MPP40_GPIO, /* Power: SATA1 */
- MPP41_GPIO, /* SATA0 present */
- MPP42_GPIO, /* SATA1 present */
- MPP43_GPIO, /* LED: White USB */
- MPP44_GPIO, /* Fan: Tachometer Pin */
- MPP45_GPIO, /* Fan: high speed */
- MPP46_GPIO, /* Fan: low speed */
- MPP47_GPIO, /* Button: Back unmount */
- MPP48_GPIO, /* Button: Back reset */
- MPP49_GPIO, /* Temp Alarm (DNS-325) Pin of U5 (DNS-320) */
- 0
-};
-
-static void dnskw_power_off(void)
-{
- gpio_set_value(36, 1);
-}
-
/* Register any GPIO for output and set the value */
static void __init dnskw_gpio_register(unsigned gpio, char *name, int def)
{
@@ -76,22 +36,8 @@ static void __init dnskw_gpio_register(unsigned gpio, char *name, int def)
void __init dnskw_init(void)
{
- kirkwood_mpp_conf(dnskw_mpp_config);
-
- kirkwood_ehci_init();
kirkwood_ge00_init(&dnskw_ge00_data);
- /* Register power-off GPIO. */
- if (gpio_request(36, "dnskw:power:off") == 0
- && gpio_direction_output(36, 0) == 0)
- pm_power_off = dnskw_power_off;
- else
- pr_err("dnskw: failed to configure power-off GPIO\n");
-
- /* Ensure power is supplied to both HDDs */
- dnskw_gpio_register(39, "dnskw:power:sata0", 1);
- dnskw_gpio_register(40, "dnskw:power:sata1", 1);
-
/* Set NAS to turn back on after a power failure */
dnskw_gpio_register(37, "dnskw:power:recover", 1);
}
diff --git a/arch/arm/mach-kirkwood/board-dockstar.c b/arch/arm/mach-kirkwood/board-dockstar.c
index 6912882b0aa..d7196db3398 100644
--- a/arch/arm/mach-kirkwood/board-dockstar.c
+++ b/arch/arm/mach-kirkwood/board-dockstar.c
@@ -17,32 +17,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
-#include <linux/gpio.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data dockstar_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
-static unsigned int dockstar_mpp_config[] __initdata = {
- MPP29_GPIO, /* USB Power Enable */
- MPP46_GPIO, /* LED green */
- MPP47_GPIO, /* LED orange */
- 0
-};
-
void __init dockstar_dt_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(dockstar_mpp_config);
-
- if (gpio_request(29, "USB Power Enable") != 0 ||
- gpio_direction_output(29, 1) != 0)
- pr_err("can't setup GPIO 29 (USB Power Enable)\n");
- kirkwood_ehci_init();
-
kirkwood_ge00_init(&dockstar_ge00_data);
}
diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c
index 8a8ebe09e51..08248e24ffc 100644
--- a/arch/arm/mach-kirkwood/board-dreamplug.c
+++ b/arch/arm/mach-kirkwood/board-dreamplug.c
@@ -17,7 +17,6 @@
#include <linux/gpio.h>
#include <linux/platform_data/mmc-mvsdio.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data dreamplug_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
@@ -31,25 +30,11 @@ static struct mvsdio_platform_data dreamplug_mvsdio_data = {
/* unfortunately the CD signal has not been connected */
};
-static unsigned int dreamplug_mpp_config[] __initdata = {
- MPP0_SPI_SCn,
- MPP1_SPI_MOSI,
- MPP2_SPI_SCK,
- MPP3_SPI_MISO,
- MPP47_GPIO, /* Bluetooth LED */
- MPP48_GPIO, /* Wifi LED */
- MPP49_GPIO, /* Wifi AP LED */
- 0
-};
-
void __init dreamplug_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(dreamplug_mpp_config);
-
- kirkwood_ehci_init();
kirkwood_ge00_init(&dreamplug_ge00_data);
kirkwood_ge01_init(&dreamplug_ge01_data);
kirkwood_sdio_init(&dreamplug_mvsdio_data);
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 375f7d88551..ff4150a2ad0 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -14,11 +14,15 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/mvebu.h>
#include <linux/kexec.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/bridge-regs.h>
+#include <linux/platform_data/usb-ehci-orion.h>
#include <plat/irq.h>
+#include <plat/common.h>
#include "common.h"
static struct of_device_id kirkwood_dt_match_table[] __initdata = {
@@ -26,18 +30,50 @@ static struct of_device_id kirkwood_dt_match_table[] __initdata = {
{ }
};
-static struct of_dev_auxdata kirkwood_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("marvell,orion-spi", 0xf1010600, "orion_spi.0", NULL),
- OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011000, "mv64xxx_i2c.0",
- NULL),
- OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011100, "mv64xxx_i2c.1",
- NULL),
- OF_DEV_AUXDATA("marvell,orion-wdt", 0xf1020300, "orion_wdt", NULL),
- OF_DEV_AUXDATA("marvell,orion-sata", 0xf1080000, "sata_mv.0", NULL),
- OF_DEV_AUXDATA("marvell,orion-nand", 0xf4000000, "orion_nand", NULL),
- OF_DEV_AUXDATA("marvell,orion-crypto", 0xf1030000, "mv_crypto", NULL),
- {},
-};
+/*
+ * There are still devices that doesn't know about DT yet. Get clock
+ * gates here and add a clock lookup alias, so that old platform
+ * devices still work.
+*/
+
+static void __init kirkwood_legacy_clk_init(void)
+{
+
+ struct device_node *np = of_find_compatible_node(
+ NULL, NULL, "marvell,kirkwood-gating-clock");
+
+ struct of_phandle_args clkspec;
+
+ clkspec.np = np;
+ clkspec.args_count = 1;
+
+ clkspec.args[0] = CGC_BIT_GE0;
+ orion_clkdev_add(NULL, "mv643xx_eth_port.0",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CGC_BIT_PEX0;
+ orion_clkdev_add("0", "pcie",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CGC_BIT_USB0;
+ orion_clkdev_add(NULL, "orion-ehci.0",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CGC_BIT_PEX1;
+ orion_clkdev_add("1", "pcie",
+ of_clk_get_from_provider(&clkspec));
+
+ clkspec.args[0] = CGC_BIT_GE1;
+ orion_clkdev_add(NULL, "mv643xx_eth_port.1",
+ of_clk_get_from_provider(&clkspec));
+
+}
+
+static void __init kirkwood_of_clk_init(void)
+{
+ mvebu_clocks_init();
+ kirkwood_legacy_clk_init();
+}
static void __init kirkwood_dt_init(void)
{
@@ -56,11 +92,7 @@ static void __init kirkwood_dt_init(void)
kirkwood_l2_init();
/* Setup root of clk tree */
- kirkwood_clk_init();
-
- /* internal devices that every board has */
- kirkwood_xor0_init();
- kirkwood_xor1_init();
+ kirkwood_of_clk_init();
#ifdef CONFIG_KEXEC
kexec_reinit = kirkwood_enable_pcie;
@@ -115,8 +147,7 @@ static void __init kirkwood_dt_init(void)
if (of_machine_is_compatible("zyxel,nsa310"))
nsa310_init();
- of_platform_populate(NULL, kirkwood_dt_match_table,
- kirkwood_auxdata_lookup, NULL);
+ of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL);
}
static const char * const kirkwood_dt_board_compat[] = {
diff --git a/arch/arm/mach-kirkwood/board-goflexnet.c b/arch/arm/mach-kirkwood/board-goflexnet.c
index 5dcd0d62aa4..9db979aec82 100644
--- a/arch/arm/mach-kirkwood/board-goflexnet.c
+++ b/arch/arm/mach-kirkwood/board-goflexnet.c
@@ -19,40 +19,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
-#include <linux/gpio.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data goflexnet_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
-static unsigned int goflexnet_mpp_config[] __initdata = {
- MPP29_GPIO, /* USB Power Enable */
- MPP47_GPIO, /* LED Orange */
- MPP46_GPIO, /* LED Green */
- MPP45_GPIO, /* LED Left Capacity 3 */
- MPP44_GPIO, /* LED Left Capacity 2 */
- MPP43_GPIO, /* LED Left Capacity 1 */
- MPP42_GPIO, /* LED Left Capacity 0 */
- MPP41_GPIO, /* LED Right Capacity 3 */
- MPP40_GPIO, /* LED Right Capacity 2 */
- MPP39_GPIO, /* LED Right Capacity 1 */
- MPP38_GPIO, /* LED Right Capacity 0 */
- 0
-};
-
void __init goflexnet_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(goflexnet_mpp_config);
-
- if (gpio_request(29, "USB Power Enable") != 0 ||
- gpio_direction_output(29, 1) != 0)
- pr_err("can't setup GPIO 29 (USB Power Enable)\n");
- kirkwood_ehci_init();
-
kirkwood_ge00_init(&goflexnet_ge00_data);
}
diff --git a/arch/arm/mach-kirkwood/board-ib62x0.c b/arch/arm/mach-kirkwood/board-ib62x0.c
index 6d3a5642114..9f6f496380d 100644
--- a/arch/arm/mach-kirkwood/board-ib62x0.c
+++ b/arch/arm/mach-kirkwood/board-ib62x0.c
@@ -14,52 +14,17 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
-#include <linux/gpio.h>
#include <linux/input.h>
#include "common.h"
-#include "mpp.h"
-
-#define IB62X0_GPIO_POWER_OFF 24
static struct mv643xx_eth_platform_data ib62x0_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
-static unsigned int ib62x0_mpp_config[] __initdata = {
- MPP0_NF_IO2,
- MPP1_NF_IO3,
- MPP2_NF_IO4,
- MPP3_NF_IO5,
- MPP4_NF_IO6,
- MPP5_NF_IO7,
- MPP18_NF_IO0,
- MPP19_NF_IO1,
- MPP22_GPIO, /* OS LED red */
- MPP24_GPIO, /* Power off device */
- MPP25_GPIO, /* OS LED green */
- MPP27_GPIO, /* USB transfer LED */
- MPP28_GPIO, /* Reset button */
- MPP29_GPIO, /* USB Copy button */
- 0
-};
-
-static void ib62x0_power_off(void)
-{
- gpio_set_value(IB62X0_GPIO_POWER_OFF, 1);
-}
-
void __init ib62x0_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(ib62x0_mpp_config);
-
- kirkwood_ehci_init();
kirkwood_ge00_init(&ib62x0_ge00_data);
- if (gpio_request(IB62X0_GPIO_POWER_OFF, "ib62x0:power:off") == 0 &&
- gpio_direction_output(IB62X0_GPIO_POWER_OFF, 0) == 0)
- pm_power_off = ib62x0_power_off;
- else
- pr_err("board-ib62x0: failed to configure power-off GPIO\n");
}
diff --git a/arch/arm/mach-kirkwood/board-iconnect.c b/arch/arm/mach-kirkwood/board-iconnect.c
index 24f5aa7f698..c8ebde4919e 100644
--- a/arch/arm/mach-kirkwood/board-iconnect.c
+++ b/arch/arm/mach-kirkwood/board-iconnect.c
@@ -13,31 +13,13 @@
#include <linux/of.h>
#include <linux/mv643xx_eth.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data iconnect_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(11),
};
-static unsigned int iconnect_mpp_config[] __initdata = {
- MPP12_GPIO,
- MPP35_GPIO,
- MPP41_GPIO,
- MPP42_GPIO,
- MPP43_GPIO,
- MPP44_GPIO,
- MPP45_GPIO,
- MPP46_GPIO,
- MPP47_GPIO,
- MPP48_GPIO,
- 0
-};
-
void __init iconnect_init(void)
{
- kirkwood_mpp_conf(iconnect_mpp_config);
-
- kirkwood_ehci_init();
kirkwood_ge00_init(&iconnect_ge00_data);
}
diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index e4ed62c28f5..f655b2637b0 100644
--- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -13,7 +13,6 @@
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_NONE,
@@ -21,35 +20,10 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
.duplex = DUPLEX_FULL,
};
-static unsigned int iomega_ix2_200_mpp_config[] __initdata = {
- MPP12_GPIO, /* Reset Button */
- MPP14_GPIO, /* Power Button */
- MPP15_GPIO, /* Backup LED (blue) */
- MPP16_GPIO, /* Power LED (white) */
- MPP35_GPIO, /* OTB Button */
- MPP36_GPIO, /* Rebuild LED (white) */
- MPP37_GPIO, /* Health LED (red) */
- MPP38_GPIO, /* SATA LED brightness control 1 */
- MPP39_GPIO, /* SATA LED brightness control 2 */
- MPP40_GPIO, /* Backup LED brightness control 1 */
- MPP41_GPIO, /* Backup LED brightness control 2 */
- MPP42_GPIO, /* Power LED brightness control 1 */
- MPP43_GPIO, /* Power LED brightness control 2 */
- MPP44_GPIO, /* Health LED brightness control 1 */
- MPP45_GPIO, /* Health LED brightness control 2 */
- MPP46_GPIO, /* Rebuild LED brightness control 1 */
- MPP47_GPIO, /* Rebuild LED brightness control 2 */
- 0
-};
-
void __init iomega_ix2_200_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(iomega_ix2_200_mpp_config);
-
- kirkwood_ehci_init();
-
kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
}
diff --git a/arch/arm/mach-kirkwood/board-km_kirkwood.c b/arch/arm/mach-kirkwood/board-km_kirkwood.c
index f7d32834b75..44e4605ba0b 100644
--- a/arch/arm/mach-kirkwood/board-km_kirkwood.c
+++ b/arch/arm/mach-kirkwood/board-km_kirkwood.c
@@ -18,27 +18,15 @@
#include <linux/clk.h>
#include <linux/clk-private.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data km_kirkwood_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
-static unsigned int km_kirkwood_mpp_config[] __initdata = {
- MPP8_GPIO, /* I2C SDA */
- MPP9_GPIO, /* I2C SCL */
- 0
-};
-
void __init km_kirkwood_init(void)
{
struct clk *sata_clk;
/*
- * Basic setup. Needs to be called early.
- */
- kirkwood_mpp_conf(km_kirkwood_mpp_config);
-
- /*
* Our variant of kirkwood (integrated in the Bobcat) hangs on accessing
* SATA bits (14-15) of the Clock Gating Control Register. Since these
* devices are also not present in this variant, their clocks get
@@ -52,6 +40,5 @@ void __init km_kirkwood_init(void)
if (!IS_ERR(sata_clk))
sata_clk->flags |= CLK_IGNORE_UNUSED;
- kirkwood_ehci_init();
kirkwood_ge00_init(&km_kirkwood_ge00_data);
}
diff --git a/arch/arm/mach-kirkwood/board-lsxl.c b/arch/arm/mach-kirkwood/board-lsxl.c
index 7e18cad9b79..4ec8b7ae784 100644
--- a/arch/arm/mach-kirkwood/board-lsxl.c
+++ b/arch/arm/mach-kirkwood/board-lsxl.c
@@ -15,9 +15,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mv643xx_eth.h>
-#include <linux/gpio.h>
#include "common.h"
-#include "mpp.h"
static struct mv643xx_eth_platform_data lsxl_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
@@ -27,23 +25,6 @@ static struct mv643xx_eth_platform_data lsxl_ge01_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
-static unsigned int lsxl_mpp_config[] __initdata = {
- MPP10_GPO, /* HDD Power Enable */
- MPP11_GPIO, /* USB Vbus Enable */
- MPP18_GPO, /* FAN High Enable# */
- MPP19_GPO, /* FAN Low Enable# */
- MPP36_GPIO, /* Function Blue LED */
- MPP37_GPIO, /* Alarm LED */
- MPP38_GPIO, /* Info LED */
- MPP39_GPIO, /* Power LED */
- MPP40_GPIO, /* Fan Lock */
- MPP41_GPIO, /* Function Button */
- MPP42_GPIO, /* Power Switch */
- MPP43_GPIO, /* Power Auto Switch */
- MPP48_GPIO, /* Function Red LED */
- 0
-};
-
/*
* On the LS-XHL/LS-CHLv2, the shutdown process is following:
* - Userland monitors key events until the power switch goes to off position
@@ -57,21 +38,12 @@ static void lsxl_power_off(void)
kirkwood_restart('h', NULL);
}
-#define LSXL_GPIO_HDD_POWER 10
-#define LSXL_GPIO_USB_POWER 11
-
void __init lsxl_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(lsxl_mpp_config);
-
- /* usb and sata power on */
- gpio_set_value(LSXL_GPIO_USB_POWER, 1);
- gpio_set_value(LSXL_GPIO_HDD_POWER, 1);
- kirkwood_ehci_init();
kirkwood_ge00_init(&lsxl_ge00_data);
kirkwood_ge01_init(&lsxl_ge01_data);
diff --git a/arch/arm/mach-kirkwood/board-mplcec4.c b/arch/arm/mach-kirkwood/board-mplcec4.c
index e78a227468e..56bfe5a1605 100644
--- a/arch/arm/mach-kirkwood/board-mplcec4.c
+++ b/arch/arm/mach-kirkwood/board-mplcec4.c
@@ -24,52 +24,16 @@ static struct mv643xx_eth_platform_data mplcec4_ge01_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(2),
};
-static unsigned int mplcec4_mpp_config[] __initdata = {
- MPP0_NF_IO2,
- MPP1_NF_IO3,
- MPP2_NF_IO4,
- MPP3_NF_IO5,
- MPP4_NF_IO6,
- MPP5_NF_IO7,
- MPP6_SYSRST_OUTn,
- MPP7_GPO, /* Status LED Green High Active */
- MPP10_UART0_TXD,
- MPP11_UART0_RXD,
- MPP12_SD_CLK,
- MPP13_SD_CMD, /* Alt UART1_TXD */
- MPP14_SD_D0, /* Alt UART1_RXD */
- MPP15_SD_D1,
- MPP16_SD_D2,
- MPP17_SD_D3,
- MPP18_NF_IO0,
- MPP19_NF_IO1,
- MPP28_GPIO, /* Input SYS_POR_DET (active High) */
- MPP29_GPIO, /* Input SYS_RTC_INT (active High) */
- MPP34_SATA1_ACTn,
- MPP35_SATA0_ACTn,
- MPP40_GPIO, /* LED User1 orange */
- MPP41_GPIO, /* LED User1 green */
- MPP44_GPIO, /* LED User0 orange */
- MPP45_GPIO, /* LED User0 green */
- MPP46_GPIO, /* Status LED Yellow High Active */
- MPP47_GPIO, /* SD_CD# (in/IRQ)*/
- 0
-};
-
-
static struct mvsdio_platform_data mplcec4_mvsdio_data = {
.gpio_card_detect = 47, /* MPP47 used as SD card detect */
};
-
void __init mplcec4_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_mpp_conf(mplcec4_mpp_config);
- kirkwood_ehci_init();
kirkwood_ge00_init(&mplcec4_ge00_data);
kirkwood_ge01_init(&mplcec4_ge01_data);
kirkwood_sdio_init(&mplcec4_mvsdio_data);
diff --git a/arch/arm/mach-kirkwood/board-ns2.c b/arch/arm/mach-kirkwood/board-ns2.c
index 78596c4f76d..8821720ab5a 100644
--- a/arch/arm/mach-kirkwood/board-ns2.c
+++ b/arch/arm/mach-kirkwood/board-ns2.c
@@ -73,7 +73,6 @@ void __init ns2_init(void)
*/
kirkwood_mpp_conf(ns2_mpp_config);
- kirkwood_ehci_init();
if (of_machine_is_compatible("lacie,netspace_lite_v2") ||
of_machine_is_compatible("lacie,netspace_mini_v2"))
ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
diff --git a/arch/arm/mach-kirkwood/board-nsa310.c b/arch/arm/mach-kirkwood/board-nsa310.c
index 027ce83f3fe..f58d2e1a404 100644
--- a/arch/arm/mach-kirkwood/board-nsa310.c
+++ b/arch/arm/mach-kirkwood/board-nsa310.c
@@ -85,10 +85,6 @@ void __init nsa310_init(void)
nsa310_gpio_init();
- /* this can be removed once the mainline kirkwood.dtsi gets
- * the ehci configuration by default */
- kirkwood_ehci_init();
-
kirkwood_pcie_id(&dev, &rev);
i2c_register_board_info(0, ARRAY_AND_SIZE(nsa310_i2c_info));
diff --git a/arch/arm/mach-kirkwood/board-openblocks_a6.c b/arch/arm/mach-kirkwood/board-openblocks_a6.c
index e807e8cfdd4..815fc6451d5 100644
--- a/arch/arm/mach-kirkwood/board-openblocks_a6.c
+++ b/arch/arm/mach-kirkwood/board-openblocks_a6.c
@@ -55,8 +55,8 @@ static unsigned int openblocks_a6_mpp_config[] __initdata = {
MPP38_GPIO, /* INIT */
MPP39_GPIO, /* USB OC */
MPP41_GPIO, /* LED: Red */
- MPP42_GPIO, /* LED: Yellow */
- MPP43_GPIO, /* LED: Green */
+ MPP42_GPIO, /* LED: Green */
+ MPP43_GPIO, /* LED: Yellow */
0,
};
@@ -66,6 +66,5 @@ void __init openblocks_a6_init(void)
* Basic setup. Needs to be called early.
*/
kirkwood_mpp_conf(openblocks_a6_mpp_config);
- kirkwood_ehci_init();
kirkwood_ge00_init(&openblocks_ge00_data);
}
diff --git a/arch/arm/mach-kirkwood/board-ts219.c b/arch/arm/mach-kirkwood/board-ts219.c
index f3bfedae3a2..acb0187c7ee 100644
--- a/arch/arm/mach-kirkwood/board-ts219.c
+++ b/arch/arm/mach-kirkwood/board-ts219.c
@@ -23,47 +23,21 @@
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
#include "common.h"
-#include "mpp.h"
#include "tsx1x-common.h"
static struct mv643xx_eth_platform_data qnap_ts219_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
-static unsigned int qnap_ts219_mpp_config[] __initdata = {
- MPP0_SPI_SCn,
- MPP1_SPI_MOSI,
- MPP2_SPI_SCK,
- MPP3_SPI_MISO,
- MPP4_SATA1_ACTn,
- MPP5_SATA0_ACTn,
- MPP8_TW0_SDA,
- MPP9_TW0_SCK,
- MPP10_UART0_TXD,
- MPP11_UART0_RXD,
- MPP13_UART1_TXD, /* PIC controller */
- MPP14_UART1_RXD, /* PIC controller */
- MPP15_GPIO, /* USB Copy button (on devices with 88F6281) */
- MPP16_GPIO, /* Reset button (on devices with 88F6281) */
- MPP36_GPIO, /* RAM: 0: 256 MB, 1: 512 MB */
- MPP37_GPIO, /* Reset button (on devices with 88F6282) */
- MPP43_GPIO, /* USB Copy button (on devices with 88F6282) */
- MPP44_GPIO, /* Board ID: 0: TS-11x, 1: TS-21x */
- 0
-};
-
void __init qnap_dt_ts219_init(void)
{
u32 dev, rev;
- kirkwood_mpp_conf(qnap_ts219_mpp_config);
-
kirkwood_pcie_id(&dev, &rev);
if (dev == MV88F6282_DEV_ID)
qnap_ts219_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
kirkwood_ge00_init(&qnap_ts219_ge00_data);
- kirkwood_ehci_init();
pm_power_off = qnap_tsx1x_power_off;
}
diff --git a/arch/arm/mach-kirkwood/board-usi_topkick.c b/arch/arm/mach-kirkwood/board-usi_topkick.c
index e2ec9d891fe..15e69fcde9f 100644
--- a/arch/arm/mach-kirkwood/board-usi_topkick.c
+++ b/arch/arm/mach-kirkwood/board-usi_topkick.c
@@ -76,7 +76,6 @@ void __init usi_topkick_init(void)
/* SATA0 power enable */
gpio_set_value(TOPKICK_SATA0_PWR_ENABLE, 1);
- kirkwood_ehci_init();
kirkwood_ge00_init(&topkick_ge00_data);
kirkwood_sdio_init(&topkick_mvsdio_data);
}
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 5303be62b31..bac21a554c9 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -260,8 +260,8 @@ void __init kirkwood_clk_init(void)
orion_clkdev_add(NULL, "orion_nand", runit);
orion_clkdev_add(NULL, "mvsdio", sdio);
orion_clkdev_add(NULL, "mv_crypto", crypto);
- orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0);
- orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1);
+ orion_clkdev_add(NULL, MV_XOR_NAME ".0", xor0);
+ orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
orion_clkdev_add("0", "pcie", pex0);
orion_clkdev_add("1", "pcie", pex1);
orion_clkdev_add(NULL, "kirkwood-i2s", audio);
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 416d46ef7eb..440b13ef1fe 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -9,6 +9,10 @@ config ARCH_MVEBU
select PINCTRL
select PLAT_ORION
select SPARSE_IRQ
+ select CLKDEV_LOOKUP
+ select MVEBU_CLK_CORE
+ select MVEBU_CLK_CPU
+ select MVEBU_CLK_GATING
if ARCH_MVEBU
@@ -17,7 +21,9 @@ menu "Marvell SOC with device tree"
config MACH_ARMADA_370_XP
bool
select ARMADA_370_XP_TIMER
- select CPU_V7
+ select HAVE_SMP
+ select CACHE_L2X0
+ select CPU_PJ4B
config MACH_ARMADA_370
bool "Marvell Armada 370 boards"
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index 57f996b6aa0..5dcb369b58a 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -2,4 +2,6 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
-I$(srctree)/arch/arm/plat-orion/include
obj-y += system-controller.o
-obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o
+obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-mvebu/addr-map.c b/arch/arm/mach-mvebu/addr-map.c
index fe454a4430b..ab9b3bd4fef 100644
--- a/arch/arm/mach-mvebu/addr-map.c
+++ b/arch/arm/mach-mvebu/addr-map.c
@@ -78,7 +78,7 @@ armada_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
if (win < 8)
offset = (win << 4);
else
- offset = ARMADA_WINDOW_8_PLUS_OFFSET + (win << 3);
+ offset = ARMADA_WINDOW_8_PLUS_OFFSET + ((win - 8) << 3);
return cfg->bridge_virt_base + offset;
}
@@ -108,6 +108,9 @@ static int __init armada_setup_cpu_mbus(void)
addr_map_cfg.bridge_virt_base = mbus_unit_addr_decoding_base;
+ if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
+ addr_map_cfg.hw_io_coherency = 1;
+
/*
* Disable, clear and configure windows.
*/
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
index 49d791548ad..7434b5e3619 100644
--- a/arch/arm/mach-mvebu/armada-370-xp.c
+++ b/arch/arm/mach-mvebu/armada-370-xp.c
@@ -17,11 +17,14 @@
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/time-armada-370-xp.h>
+#include <linux/clk/mvebu.h>
+#include <linux/dma-mapping.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include "armada-370-xp.h"
#include "common.h"
+#include "coherency.h"
static struct map_desc armada_370_xp_io_desc[] __initdata = {
{
@@ -37,27 +40,45 @@ void __init armada_370_xp_map_io(void)
iotable_init(armada_370_xp_io_desc, ARRAY_SIZE(armada_370_xp_io_desc));
}
+void __init armada_370_xp_timer_and_clk_init(void)
+{
+ mvebu_clocks_init();
+ armada_370_xp_timer_init();
+}
+
+void __init armada_370_xp_init_early(void)
+{
+ /*
+ * Some Armada 370/XP devices allocate their coherent buffers
+ * from atomic context. Increase size of atomic coherent pool
+ * to make sure such the allocations won't fail.
+ */
+ init_dma_coherent_pool_size(SZ_1M);
+}
+
struct sys_timer armada_370_xp_timer = {
- .init = armada_370_xp_timer_init,
+ .init = armada_370_xp_timer_and_clk_init,
};
static void __init armada_370_xp_dt_init(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ coherency_init();
}
-static const char * const armada_370_xp_dt_board_dt_compat[] = {
- "marvell,a370-db",
- "marvell,axp-db",
+static const char * const armada_370_xp_dt_compat[] = {
+ "marvell,armada-370-xp",
NULL,
};
-DT_MACHINE_START(ARMADA_XP_DT, "Marvell Aramada 370/XP (Device Tree)")
+DT_MACHINE_START(ARMADA_XP_DT, "Marvell Armada 370/XP (Device Tree)")
+ .smp = smp_ops(armada_xp_smp_ops),
.init_machine = armada_370_xp_dt_init,
.map_io = armada_370_xp_map_io,
+ .init_early = armada_370_xp_init_early,
.init_irq = armada_370_xp_init_irq,
.handle_irq = armada_370_xp_handle_irq,
.timer = &armada_370_xp_timer,
.restart = mvebu_restart,
- .dt_compat = armada_370_xp_dt_board_dt_compat,
+ .dt_compat = armada_370_xp_dt_compat,
MACHINE_END
diff --git a/arch/arm/mach-mvebu/armada-370-xp.h b/arch/arm/mach-mvebu/armada-370-xp.h
index aac9bebc6b0..c6a7d74fddf 100644
--- a/arch/arm/mach-mvebu/armada-370-xp.h
+++ b/arch/arm/mach-mvebu/armada-370-xp.h
@@ -19,4 +19,11 @@
#define ARMADA_370_XP_REGS_VIRT_BASE IOMEM(0xfeb00000)
#define ARMADA_370_XP_REGS_SIZE SZ_1M
+#ifdef CONFIG_SMP
+#include <linux/cpumask.h>
+
+void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq);
+void armada_xp_mpic_smp_cpu_init(void);
+#endif
+
#endif /* __MACH_ARMADA_370_XP_H */
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
new file mode 100644
index 00000000000..8278960066c
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -0,0 +1,155 @@
+/*
+ * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada 370 and Armada XP SOCs have a coherency fabric which is
+ * responsible for ensuring hardware coherency between all CPUs and between
+ * CPUs and I/O masters. This file initializes the coherency fabric and
+ * supplies basic routines for configuring and controlling hardware coherency
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <asm/smp_plat.h>
+#include "armada-370-xp.h"
+
+/*
+ * Some functions in this file are called very early during SMP
+ * initialization. At that time the device tree framework is not yet
+ * ready, and it is not possible to get the register address to
+ * ioremap it. That's why the pointer below is given with an initial
+ * value matching its virtual mapping
+ */
+static void __iomem *coherency_base = ARMADA_370_XP_REGS_VIRT_BASE + 0x20200;
+static void __iomem *coherency_cpu_base;
+
+/* Coherency fabric registers */
+#define COHERENCY_FABRIC_CFG_OFFSET 0x4
+
+#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
+
+static struct of_device_id of_coherency_table[] = {
+ {.compatible = "marvell,coherency-fabric"},
+ { /* end of list */ },
+};
+
+#ifdef CONFIG_SMP
+int coherency_get_cpu_count(void)
+{
+ int reg, cnt;
+
+ reg = readl(coherency_base + COHERENCY_FABRIC_CFG_OFFSET);
+ cnt = (reg & 0xF) + 1;
+
+ return cnt;
+}
+#endif
+
+/* Function defined in coherency_ll.S */
+int ll_set_cpu_coherent(void __iomem *base_addr, unsigned int hw_cpu_id);
+
+int set_cpu_coherent(unsigned int hw_cpu_id, int smp_group_id)
+{
+ if (!coherency_base) {
+ pr_warn("Can't make CPU %d cache coherent.\n", hw_cpu_id);
+ pr_warn("Coherency fabric is not initialized\n");
+ return 1;
+ }
+
+ return ll_set_cpu_coherent(coherency_base, hw_cpu_id);
+}
+
+static inline void mvebu_hwcc_sync_io_barrier(void)
+{
+ writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
+ while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
+}
+
+static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (dir != DMA_TO_DEVICE)
+ mvebu_hwcc_sync_io_barrier();
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+
+static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (dir != DMA_TO_DEVICE)
+ mvebu_hwcc_sync_io_barrier();
+}
+
+static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ if (dir != DMA_TO_DEVICE)
+ mvebu_hwcc_sync_io_barrier();
+}
+
+static struct dma_map_ops mvebu_hwcc_dma_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = mvebu_hwcc_dma_map_page,
+ .unmap_page = mvebu_hwcc_dma_unmap_page,
+ .get_sgtable = arm_dma_get_sgtable,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_single_for_cpu = mvebu_hwcc_dma_sync,
+ .sync_single_for_device = mvebu_hwcc_dma_sync,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = arm_dma_set_mask,
+};
+
+static int mvebu_hwcc_platform_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+ set_dma_ops(dev, &mvebu_hwcc_dma_ops);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mvebu_hwcc_platform_nb = {
+ .notifier_call = mvebu_hwcc_platform_notifier,
+};
+
+int __init coherency_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, of_coherency_table);
+ if (np) {
+ pr_info("Initializing Coherency fabric\n");
+ coherency_base = of_iomap(np, 0);
+ coherency_cpu_base = of_iomap(np, 1);
+ set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
+ bus_register_notifier(&platform_bus_type,
+ &mvebu_hwcc_platform_nb);
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
new file mode 100644
index 00000000000..2f428137f6f
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-mvebu/include/mach/coherency.h
+ *
+ *
+ * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_370_XP_COHERENCY_H
+#define __MACH_370_XP_COHERENCY_H
+
+#ifdef CONFIG_SMP
+int coherency_get_cpu_count(void);
+#endif
+
+int set_cpu_coherent(int cpu_id, int smp_group_id);
+int coherency_init(void);
+
+#endif /* __MACH_370_XP_COHERENCY_H */
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
new file mode 100644
index 00000000000..53e8391192c
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -0,0 +1,49 @@
+/*
+ * Coherency fabric: low level functions
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This file implements the assembly function to add a CPU to the
+ * coherency fabric. This function is called by each of the secondary
+ * CPUs during their early boot in an SMP kernel, this why this
+ * function have to callable from assembly. It can also be called by a
+ * primary CPU from C code during its boot.
+ */
+
+#include <linux/linkage.h>
+#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
+#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
+
+ .text
+/*
+ * r0: Coherency fabric base register address
+ * r1: HW CPU id
+ */
+ENTRY(ll_set_cpu_coherent)
+ /* Create bit by cpu index */
+ mov r3, #(1 << 24)
+ lsl r1, r3, r1
+
+ /* Add CPU to SMP group - Atomic */
+ add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
+ ldr r2, [r3]
+ orr r2, r2, r1
+ str r2, [r3]
+
+ /* Enable coherency on CPU - Atomic */
+ add r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET
+ ldr r2, [r3]
+ orr r2, r2, r1
+ str r2, [r3]
+
+ dsb
+
+ mov r0, #0
+ mov pc, lr
+ENDPROC(ll_set_cpu_coherent)
diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h
index 02f89eaa25f..aa27bc2ffb6 100644
--- a/arch/arm/mach-mvebu/common.h
+++ b/arch/arm/mach-mvebu/common.h
@@ -20,4 +20,9 @@ void mvebu_restart(char mode, const char *cmd);
void armada_370_xp_init_irq(void);
void armada_370_xp_handle_irq(struct pt_regs *regs);
+void armada_xp_cpu_die(unsigned int cpu);
+int armada_370_xp_coherency_init(void);
+int armada_370_xp_pmsu_init(void);
+void armada_xp_secondary_startup(void);
+extern struct smp_operations armada_xp_smp_ops;
#endif
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
new file mode 100644
index 00000000000..a06e0ede8c0
--- /dev/null
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -0,0 +1,49 @@
+/*
+ * SMP support: Entry point for secondary CPUs
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This file implements the assembly entry point for secondary CPUs in
+ * an SMP kernel. The only thing we need to do is to add the CPU to
+ * the coherency fabric by writing to 2 registers. Currently the base
+ * register addresses are hard coded due to the early initialisation
+ * problems.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+/*
+ * At this stage the secondary CPUs don't have acces yet to the MMU, so
+ * we have to provide physical addresses
+ */
+#define ARMADA_XP_CFB_BASE 0xD0020200
+
+ __CPUINIT
+
+/*
+ * Armada XP specific entry point for secondary CPUs.
+ * We add the CPU to the coherency fabric and then jump to secondary
+ * startup
+ */
+ENTRY(armada_xp_secondary_startup)
+
+ /* Read CPU id */
+ mrc p15, 0, r1, c0, c0, 5
+ and r1, r1, #0xF
+
+ /* Add CPU to coherency fabric */
+ ldr r0, =ARMADA_XP_CFB_BASE
+
+ bl ll_set_cpu_coherent
+ b secondary_startup
+
+ENDPROC(armada_xp_secondary_startup)
diff --git a/arch/arm/mach-mvebu/hotplug.c b/arch/arm/mach-mvebu/hotplug.c
new file mode 100644
index 00000000000..b228b6a80c8
--- /dev/null
+++ b/arch/arm/mach-mvebu/hotplug.c
@@ -0,0 +1,30 @@
+/*
+ * Symmetric Multi Processing (SMP) support for Armada XP
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <asm/proc-fns.h>
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __ref armada_xp_cpu_die(unsigned int cpu)
+{
+ cpu_do_idle();
+
+ /* We should never return from idle */
+ panic("mvebu: cpu %d unexpectedly exit from shutdown\n", cpu);
+}
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 5f5f9394b6b..8e3fb082c3c 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -24,6 +24,8 @@
#include <linux/irqdomain.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/hardware/cache-l2x0.h>
/* Interrupt Controller Registers Map */
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
@@ -35,6 +37,12 @@
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
+#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
+#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
+
+#define ACTIVE_DOORBELLS (8)
+
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
@@ -51,11 +59,22 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
+#ifdef CONFIG_SMP
+static int armada_xp_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ return 0;
+}
+#endif
+
static struct irq_chip armada_370_xp_irq_chip = {
.name = "armada_370_xp_irq",
.irq_mask = armada_370_xp_irq_mask,
.irq_mask_ack = armada_370_xp_irq_mask,
.irq_unmask = armada_370_xp_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = armada_xp_set_affinity,
+#endif
};
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
@@ -72,6 +91,41 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
return 0;
}
+#ifdef CONFIG_SMP
+void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
+{
+ int cpu;
+ unsigned long map = 0;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ /* submit softirq */
+ writel((map << 8) | irq, main_int_base +
+ ARMADA_370_XP_SW_TRIG_INT_OFFS);
+}
+
+void armada_xp_mpic_smp_cpu_init(void)
+{
+ /* Clear pending IPIs */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ /* Enable first 8 IPIs */
+ writel((1 << ACTIVE_DOORBELLS) - 1, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+ /* Unmask IPI interrupt */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+#endif /* CONFIG_SMP */
+
static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.map = armada_370_xp_mpic_irq_map,
.xlate = irq_domain_xlate_onecell,
@@ -91,13 +145,18 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
armada_370_xp_mpic_domain =
- irq_domain_add_linear(node, (control >> 2) & 0x3ff,
- &armada_370_xp_mpic_irq_ops, NULL);
+ irq_domain_add_linear(node, (control >> 2) & 0x3ff,
+ &armada_370_xp_mpic_irq_ops, NULL);
if (!armada_370_xp_mpic_domain)
panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
irq_set_default_host(armada_370_xp_mpic_domain);
+
+#ifdef CONFIG_SMP
+ armada_xp_mpic_smp_cpu_init();
+#endif
+
return 0;
}
@@ -111,14 +170,36 @@ asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
ARMADA_370_XP_CPU_INTACK_OFFS);
irqnr = irqstat & 0x3FF;
- if (irqnr < 1023) {
- irqnr =
- irq_find_mapping(armada_370_xp_mpic_domain, irqnr);
+ if (irqnr > 1022)
+ break;
+
+ if (irqnr >= 8) {
+ irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
+ irqnr);
handle_IRQ(irqnr, regs);
continue;
}
+#ifdef CONFIG_SMP
+ /* IPI Handling */
+ if (irqnr == 0) {
+ u32 ipimask, ipinr;
+
+ ipimask = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+ & 0xFF;
+
+ writel(0x0, per_cpu_int_base +
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+ /* Handle all pending doorbells */
+ for (ipinr = 0; ipinr < ACTIVE_DOORBELLS; ipinr++) {
+ if (ipimask & (0x1 << ipinr))
+ handle_IPI(ipinr, regs);
+ }
+ continue;
+ }
+#endif
- break;
} while (1);
}
@@ -130,4 +211,7 @@ static const struct of_device_id mpic_of_match[] __initconst = {
void __init armada_370_xp_init_irq(void)
{
of_irq_init(mpic_of_match);
+#ifdef CONFIG_CACHE_L2X0
+ l2x0_of_init(0, ~0UL);
+#endif
}
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
new file mode 100644
index 00000000000..fe16aaf7c19
--- /dev/null
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -0,0 +1,122 @@
+/*
+ * Symmetric Multi Processing (SMP) support for Armada XP
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada XP SoC has 4 ARMv7 PJ4B CPUs running in full HW coherency
+ * This file implements the routines for preparing the SMP infrastructure
+ * and waking up the secondary CPUs
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include "common.h"
+#include "armada-370-xp.h"
+#include "pmsu.h"
+#include "coherency.h"
+
+void __init set_secondary_cpus_clock(void)
+{
+ int thiscpu;
+ unsigned long rate;
+ struct clk *cpu_clk = NULL;
+ struct device_node *np = NULL;
+
+ thiscpu = smp_processor_id();
+ for_each_node_by_type(np, "cpu") {
+ int err;
+ int cpu;
+
+ err = of_property_read_u32(np, "reg", &cpu);
+ if (WARN_ON(err))
+ return;
+
+ if (cpu == thiscpu) {
+ cpu_clk = of_clk_get(np, 0);
+ break;
+ }
+ }
+ if (WARN_ON(IS_ERR(cpu_clk)))
+ return;
+ clk_prepare_enable(cpu_clk);
+ rate = clk_get_rate(cpu_clk);
+
+ /* set all the other CPU clk to the same rate than the boot CPU */
+ for_each_node_by_type(np, "cpu") {
+ int err;
+ int cpu;
+
+ err = of_property_read_u32(np, "reg", &cpu);
+ if (WARN_ON(err))
+ return;
+
+ if (cpu != thiscpu) {
+ cpu_clk = of_clk_get(np, 0);
+ clk_set_rate(cpu_clk, rate);
+ }
+ }
+}
+
+static void __cpuinit armada_xp_secondary_init(unsigned int cpu)
+{
+ armada_xp_mpic_smp_cpu_init();
+}
+
+static int __cpuinit armada_xp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ pr_info("Booting CPU %d\n", cpu);
+
+ armada_xp_boot_cpu(cpu, armada_xp_secondary_startup);
+
+ return 0;
+}
+
+static void __init armada_xp_smp_init_cpus(void)
+{
+ unsigned int i, ncores;
+ ncores = coherency_get_cpu_count();
+
+ /* Limit possible CPUs to defconfig */
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %d CPUs physically present. Only %d configured.",
+ ncores, nr_cpu_ids);
+ pr_warn("Clipping CPU count to %d\n", nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+
+ set_smp_cross_call(armada_mpic_send_doorbell);
+}
+
+void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
+{
+ set_secondary_cpus_clock();
+ flush_cache_all();
+ set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
+}
+
+struct smp_operations armada_xp_smp_ops __initdata = {
+ .smp_init_cpus = armada_xp_smp_init_cpus,
+ .smp_prepare_cpus = armada_xp_smp_prepare_cpus,
+ .smp_secondary_init = armada_xp_secondary_init,
+ .smp_boot_secondary = armada_xp_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = armada_xp_cpu_die,
+#endif
+};
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
new file mode 100644
index 00000000000..3cc4bef6401
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -0,0 +1,75 @@
+/*
+ * Power Management Service Unit(PMSU) support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada 370 and Armada XP SOCs have a power management service
+ * unit which is responsible for powering down and waking up CPUs and
+ * other SOC units
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <asm/smp_plat.h>
+
+static void __iomem *pmsu_mp_base;
+static void __iomem *pmsu_reset_base;
+
+#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x24)
+#define PMSU_RESET_CTL_OFFSET(cpu) (cpu * 0x8)
+
+static struct of_device_id of_pmsu_table[] = {
+ {.compatible = "marvell,armada-370-xp-pmsu"},
+ { /* end of list */ },
+};
+
+#ifdef CONFIG_SMP
+int armada_xp_boot_cpu(unsigned int cpu_id, void *boot_addr)
+{
+ int reg, hw_cpu;
+
+ if (!pmsu_mp_base || !pmsu_reset_base) {
+ pr_warn("Can't boot CPU. PMSU is uninitialized\n");
+ return 1;
+ }
+
+ hw_cpu = cpu_logical_map(cpu_id);
+
+ writel(virt_to_phys(boot_addr), pmsu_mp_base +
+ PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
+
+ /* Release CPU from reset by clearing reset bit*/
+ reg = readl(pmsu_reset_base + PMSU_RESET_CTL_OFFSET(hw_cpu));
+ reg &= (~0x1);
+ writel(reg, pmsu_reset_base + PMSU_RESET_CTL_OFFSET(hw_cpu));
+
+ return 0;
+}
+#endif
+
+int __init armada_370_xp_pmsu_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, of_pmsu_table);
+ if (np) {
+ pr_info("Initializing Power Management Service Unit\n");
+ pmsu_mp_base = of_iomap(np, 0);
+ pmsu_reset_base = of_iomap(np, 1);
+ }
+
+ return 0;
+}
+
+early_initcall(armada_370_xp_pmsu_init);
diff --git a/arch/arm/mach-mvebu/pmsu.h b/arch/arm/mach-mvebu/pmsu.h
new file mode 100644
index 00000000000..07a737c6b95
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu.h
@@ -0,0 +1,16 @@
+/*
+ * Power Management Service Unit (PMSU) support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MVEBU_PMSU_H
+#define __MACH_MVEBU_PMSU_H
+
+int armada_xp_boot_cpu(unsigned int cpu_id, void *phys_addr);
+
+#endif /* __MACH_370_XP_PMSU_H */
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 4748ec551a6..c66129b5dd1 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -100,6 +100,25 @@ static struct fb_videomode apx4devkit_video_modes[] = {
},
};
+static struct fb_videomode apf28dev_video_modes[] = {
+ {
+ .name = "LW700",
+ .refresh = 60,
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 30303, /* picosecond */
+ .left_margin = 96,
+ .right_margin = 96, /* at least 3 & 1 */
+ .upper_margin = 0x14,
+ .lower_margin = 0x15,
+ .hsync_len = 64,
+ .vsync_len = 4,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
+ FB_SYNC_DATA_ENABLE_HIGH_ACT |
+ FB_SYNC_DOTCLK_FAILING_ACT,
+ },
+};
+
static struct mxsfb_platform_data mxsfb_pdata __initdata;
/*
@@ -160,6 +179,7 @@ static struct sys_timer imx28_timer = {
enum mac_oui {
OUI_FSL,
OUI_DENX,
+ OUI_CRYSTALFONTZ,
};
static void __init update_fec_mac_prop(enum mac_oui oui)
@@ -175,8 +195,12 @@ static void __init update_fec_mac_prop(enum mac_oui oui)
np = of_find_compatible_node(from, NULL, "fsl,imx28-fec");
if (!np)
return;
+
from = np;
+ if (of_get_property(np, "local-mac-address", NULL))
+ continue;
+
newmac = kzalloc(sizeof(*newmac) + 6, GFP_KERNEL);
if (!newmac)
return;
@@ -205,13 +229,18 @@ static void __init update_fec_mac_prop(enum mac_oui oui)
macaddr[1] = 0xe5;
macaddr[2] = 0x4e;
break;
+ case OUI_CRYSTALFONTZ:
+ macaddr[0] = 0x58;
+ macaddr[1] = 0xb9;
+ macaddr[2] = 0xe1;
+ break;
}
val = ocotp[i];
macaddr[3] = (val >> 16) & 0xff;
macaddr[4] = (val >> 8) & 0xff;
macaddr[5] = (val >> 0) & 0xff;
- prom_update_property(np, newmac);
+ of_update_property(np, newmac);
}
}
@@ -261,6 +290,11 @@ static void __init m28evk_init(void)
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
}
+static void __init sc_sps1_init(void)
+{
+ enable_clk_enet_out();
+}
+
static int apx4devkit_phy_fixup(struct phy_device *phy)
{
phy->dev_flags |= MICREL_PHY_50MHZ_CLK;
@@ -355,6 +389,22 @@ static void __init tx28_post_init(void)
pinctrl_put(pctl);
}
+static void __init cfa10049_init(void)
+{
+ enable_clk_enet_out();
+ update_fec_mac_prop(OUI_CRYSTALFONTZ);
+}
+
+static void __init apf28_init(void)
+{
+ enable_clk_enet_out();
+
+ mxsfb_pdata.mode_list = apf28dev_video_modes;
+ mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
+ mxsfb_pdata.default_bpp = 16;
+ mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
+}
+
static void __init mxs_machine_init(void)
{
if (of_machine_is_compatible("fsl,imx28-evk"))
@@ -365,6 +415,12 @@ static void __init mxs_machine_init(void)
m28evk_init();
else if (of_machine_is_compatible("bluegiga,apx4devkit"))
apx4devkit_init();
+ else if (of_machine_is_compatible("crystalfontz,cfa10049"))
+ cfa10049_init();
+ else if (of_machine_is_compatible("armadeus,imx28-apf28"))
+ apf28_init();
+ else if (of_machine_is_compatible("schulercontrol,imx28-sps1"))
+ sc_sps1_init();
of_platform_populate(NULL, of_default_bus_match_table,
mxs_auxdata_lookup, NULL);
diff --git a/arch/arm/mach-mxs/timer.c b/arch/arm/mach-mxs/timer.c
index 7c379261339..856f4c79606 100644
--- a/arch/arm/mach-mxs/timer.c
+++ b/arch/arm/mach-mxs/timer.c
@@ -29,6 +29,7 @@
#include <linux/of_irq.h>
#include <asm/mach/time.h>
+#include <asm/sched_clock.h>
#include <mach/mxs.h>
#include <mach/common.h>
@@ -233,15 +234,22 @@ static struct clocksource clocksource_mxs = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static u32 notrace mxs_read_sched_clock_v2(void)
+{
+ return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
+}
+
static int __init mxs_clocksource_init(struct clk *timer_clk)
{
unsigned int c = clk_get_rate(timer_clk);
if (timrot_is_v1())
clocksource_register_hz(&clocksource_mxs, c);
- else
+ else {
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
+ setup_sched_clock(mxs_read_sched_clock_v2, 32, c);
+ }
return 0;
}
diff --git a/arch/arm/mach-netx/xc.c b/arch/arm/mach-netx/xc.c
index e4cfb7e5361..f1c972d87ba 100644
--- a/arch/arm/mach-netx/xc.c
+++ b/arch/arm/mach-netx/xc.c
@@ -136,7 +136,7 @@ int xc_request_firmware(struct xc *x)
if (head->magic != 0x4e657458) {
if (head->magic == 0x5874654e) {
dev_err(x->dev,
- "firmware magic is 'XteN'. Endianess problems?\n");
+ "firmware magic is 'XteN'. Endianness problems?\n");
ret = -ENODEV;
goto exit_release_firmware;
}
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index c744946ef02..706dc5727bb 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -4,7 +4,7 @@ menu "Nomadik boards"
config MACH_NOMADIK_8815NHK
bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
- select HAS_MTU
+ select CLKSRC_NOMADIK_MTU
select NOMADIK_8815
endmenu
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 22ef8a1abe0..98167a4319f 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -19,24 +19,22 @@
#include <linux/gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/fsmc.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/pinctrl-nomadik.h>
+#include <linux/platform_data/clocksource-nomadik-mtu.h>
+#include <linux/platform_data/mtd-nomadik-nand.h>
#include <asm/hardware/vic.h>
#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
#include <asm/mach/flash.h>
#include <asm/mach/time.h>
-
-#include <plat/mtu.h>
-
-#include <linux/platform_data/mtd-nomadik-nand.h>
-#include <mach/fsmc.h>
+#include <mach/irqs.h>
#include "cpu-8815.h"
@@ -44,39 +42,34 @@
#define SRC_CR_INIT_MASK 0x00007fff
#define SRC_CR_INIT_VAL 0x2aaa8000
+#define ALE_OFF 0x1000000
+#define CLE_OFF 0x800000
+
/* These addresses span 16MB, so use three individual pages */
static struct resource nhk8815_nand_resources[] = {
{
+ .name = "nand_data",
+ .start = 0x40000000,
+ .end = 0x40000000 + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
.name = "nand_addr",
- .start = NAND_IO_ADDR,
- .end = NAND_IO_ADDR + 0xfff,
+ .start = 0x40000000 + ALE_OFF,
+ .end = 0x40000000 +ALE_OFF + SZ_16K - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "nand_cmd",
- .start = NAND_IO_CMD,
- .end = NAND_IO_CMD + 0xfff,
+ .start = 0x40000000 + CLE_OFF,
+ .end = 0x40000000 + CLE_OFF + SZ_16K - 1,
.flags = IORESOURCE_MEM,
}, {
- .name = "nand_data",
- .start = NAND_IO_DATA,
- .end = NAND_IO_DATA + 0xfff,
+ .name = "fsmc_regs",
+ .start = NOMADIK_FSMC_BASE,
+ .end = NOMADIK_FSMC_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
- }
+ },
};
-static int nhk8815_nand_init(void)
-{
- /* FSMC setup for nand chip select (8-bit nand in 8815NHK) */
- writel(0x0000000E, FSMC_PCR(0));
- writel(0x000D0A00, FSMC_PMEM(0));
- writel(0x00100A00, FSMC_PATT(0));
-
- /* enable access to the chip select area */
- writel(readl(FSMC_PCR(0)) | 0x04, FSMC_PCR(0));
-
- return 0;
-}
-
/*
* These partitions are the same as those used in the 2.6.20 release
* shipped by the vendor; the first two partitions are mandated
@@ -110,20 +103,28 @@ static struct mtd_partition nhk8815_partitions[] = {
}
};
-static struct nomadik_nand_platform_data nhk8815_nand_data = {
- .parts = nhk8815_partitions,
- .nparts = ARRAY_SIZE(nhk8815_partitions),
- .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING,
- .init = nhk8815_nand_init,
+static struct fsmc_nand_timings nhk8815_nand_timings = {
+ .thiz = 0,
+ .thold = 0x10,
+ .twait = 0x0A,
+ .tset = 0,
+};
+
+static struct fsmc_nand_platform_data nhk8815_nand_platform_data = {
+ .nand_timings = &nhk8815_nand_timings,
+ .partitions = nhk8815_partitions,
+ .nr_partitions = ARRAY_SIZE(nhk8815_partitions),
+ .width = FSMC_NAND_BW8,
};
static struct platform_device nhk8815_nand_device = {
- .name = "nomadik_nand",
- .dev = {
- .platform_data = &nhk8815_nand_data,
+ .name = "fsmc-nand",
+ .id = -1,
+ .resource = nhk8815_nand_resources,
+ .num_resources = ARRAY_SIZE(nhk8815_nand_resources),
+ .dev = {
+ .platform_data = &nhk8815_nand_platform_data,
},
- .resource = nhk8815_nand_resources,
- .num_resources = ARRAY_SIZE(nhk8815_nand_resources),
};
/* These are the partitions for the OneNand device, different from above */
@@ -178,6 +179,10 @@ static struct platform_device nhk8815_onenand_device = {
.num_resources = ARRAY_SIZE(nhk8815_onenand_resource),
};
+/* bus control reg. and bus timing reg. for CS0..CS3 */
+#define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3))
+#define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04)
+
static void __init nhk8815_onenand_init(void)
{
#ifdef CONFIG_MTD_ONENAND
@@ -260,7 +265,7 @@ static void __init nomadik_timer_init(void)
src_cr |= SRC_CR_INIT_VAL;
writel(src_cr, io_p2v(NOMADIK_SRC_BASE));
- nmdk_timer_init(io_p2v(NOMADIK_MTU0_BASE));
+ nmdk_timer_init(io_p2v(NOMADIK_MTU0_BASE), IRQ_MTU0);
}
static struct sys_timer nomadik_timer = {
diff --git a/arch/arm/mach-nomadik/include/mach/fsmc.h b/arch/arm/mach-nomadik/include/mach/fsmc.h
deleted file mode 100644
index 8c2c0518368..00000000000
--- a/arch/arm/mach-nomadik/include/mach/fsmc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-
-/* Definitions for the Nomadik FSMC "Flexible Static Memory controller" */
-
-#ifndef __ASM_ARCH_FSMC_H
-#define __ASM_ARCH_FSMC_H
-
-#include <mach/hardware.h>
-/*
- * Register list
- */
-
-/* bus control reg. and bus timing reg. for CS0..CS3 */
-#define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3))
-#define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04)
-
-/* PC-card and NAND:
- * PCR = control register
- * PMEM = memory timing
- * PATT = attribute timing
- * PIO = I/O timing
- * PECCR = ECC result
- */
-#define FSMC_PCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x00)
-#define FSMC_PMEM(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x08)
-#define FSMC_PATT(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x0c)
-#define FSMC_PIO(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x10)
-#define FSMC_PECCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x14)
-
-#endif /* __ASM_ARCH_FSMC_H */
diff --git a/arch/arm/mach-nomadik/include/mach/irqs.h b/arch/arm/mach-nomadik/include/mach/irqs.h
index a118e615f86..b549d057154 100644
--- a/arch/arm/mach-nomadik/include/mach/irqs.h
+++ b/arch/arm/mach-nomadik/include/mach/irqs.h
@@ -72,7 +72,7 @@
#define NOMADIK_NR_GPIO 128 /* last 4 not wired to pins */
#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + NOMADIK_GPIO_OFFSET)
#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - NOMADIK_GPIO_OFFSET)
-#define NR_IRQS NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
+#define NOMADIK_NR_IRQS NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
/* Following two are used by entry_macro.S, to access our dual-vic */
#define VIC_REG_IRQSR0 0
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index f0e69cbc5ba..222d58c0ae7 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
- serial.o devices.o dma.o
+ serial.o devices.o dma.o fb.o
obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o timer.o
ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 4953cf7a512..2274bd677ef 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -39,7 +39,7 @@
#include <asm/mach/map.h>
#include <mach/mux.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/tc.h>
#include <mach/irda.h>
#include <linux/platform_data/keypad-omap.h>
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 563ba167bb1..1051935f0aa 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -43,7 +43,7 @@
#include <mach/mux.h>
#include <mach/tc.h>
#include <linux/platform_data/keypad-omap.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/flash.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 3e8ead67e45..24d2f2df11a 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -112,17 +112,6 @@ static void __init mipid_dev_init(void)
omapfb_set_lcd_config(&nokia770_lcd_config);
}
-static void __init ads7846_dev_init(void)
-{
- if (gpio_request(ADS7846_PENDOWN_GPIO, "ADS7846 pendown") < 0)
- printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
-}
-
-static int ads7846_get_pendown_state(void)
-{
- return !gpio_get_value(ADS7846_PENDOWN_GPIO);
-}
-
static struct ads7846_platform_data nokia770_ads7846_platform_data __initdata = {
.x_max = 0x0fff,
.y_max = 0x0fff,
@@ -131,7 +120,7 @@ static struct ads7846_platform_data nokia770_ads7846_platform_data __initdata =
.debounce_max = 10,
.debounce_tol = 3,
.debounce_rep = 1,
- .get_pendown_state = ads7846_get_pendown_state,
+ .gpio_pendown = ADS7846_PENDOWN_GPIO,
};
static struct spi_board_info nokia770_spi_board_info[] __initdata = {
@@ -241,7 +230,6 @@ static void __init omap_nokia770_init(void)
omap_serial_init();
omap_register_i2c_bus(1, 100, NULL, 0);
hwa742_dev_init();
- ads7846_dev_init();
mipid_dev_init();
omap1_usb_init(&nokia770_usb_config);
nokia770_mmc_init();
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index 584b6fab894..c33dceb4660 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -37,7 +37,7 @@
#include <mach/flash.h>
#include <mach/mux.h>
#include <mach/tc.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/irda.h>
#include <linux/platform_data/keypad-omap.h>
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index fbc986bfe69..2948b0ee4be 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -36,7 +36,7 @@
#include <mach/flash.h>
#include <mach/mux.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/tc.h>
#include <mach/irda.h>
#include <linux/platform_data/keypad-omap.h>
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 60d917a9376..7a05895c0be 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -38,7 +38,7 @@
#include <mach/flash.h>
#include <mach/mux.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/tc.h>
#include <mach/irda.h>
#include <linux/platform_data/keypad-omap.h>
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index 1ebc7e08d6e..20ed52ae171 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -36,7 +36,7 @@
#include <mach/flash.h>
#include <mach/mux.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/irda.h>
#include <mach/tc.h>
#include <mach/board-sx1.h>
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index 978aed85d32..e190611e4b4 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -25,7 +25,7 @@
#include <linux/device.h>
#include <linux/io.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/tc.h>
#include <mach/irqs.h>
diff --git a/arch/arm/mach-omap1/fb.c b/arch/arm/mach-omap1/fb.c
new file mode 100644
index 00000000000..c770d45c722
--- /dev/null
+++ b/arch/arm/mach-omap1/fb.c
@@ -0,0 +1,80 @@
+/*
+ * File: arch/arm/plat-omap/fb.c
+ *
+ * Framebuffer device registration for TI OMAP platforms
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/memblock.h>
+#include <linux/io.h>
+#include <linux/omapfb.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach/map.h>
+
+#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
+
+static bool omapfb_lcd_configured;
+static struct omapfb_platform_data omapfb_config;
+
+static u64 omap_fb_dma_mask = ~(u32)0;
+
+static struct platform_device omap_fb_device = {
+ .name = "omapfb",
+ .id = -1,
+ .dev = {
+ .dma_mask = &omap_fb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &omapfb_config,
+ },
+ .num_resources = 0,
+};
+
+void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
+{
+ omapfb_config.lcd = *config;
+ omapfb_lcd_configured = true;
+}
+
+static int __init omap_init_fb(void)
+{
+ /*
+ * If the board file has not set the lcd config with
+ * omapfb_set_lcd_config(), don't bother registering the omapfb device
+ */
+ if (!omapfb_lcd_configured)
+ return 0;
+
+ return platform_device_register(&omap_fb_device);
+}
+
+arch_initcall(omap_init_fb);
+
+#else
+
+void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
+{
+}
+
+#endif
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 5a3b80617a1..499b8accb83 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -18,7 +18,7 @@
#include <mach/mux.h>
#include <mach/tc.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "iomap.h"
#include "common.h"
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c
index 7ed8c1857d5..77924be37d4 100644
--- a/arch/arm/mach-omap1/lcd_dma.c
+++ b/arch/arm/mach-omap1/lcd_dma.c
@@ -27,7 +27,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/hardware.h>
#include <mach/lcdc.h>
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index c6d8fdf92e9..b0d4723c9a9 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/mux.h>
#include "soc.h"
#include <linux/platform_data/asoc-ti-mcbsp.h>
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 66d663a6ef3..7a7690ab6cb 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -52,7 +52,7 @@
#include <mach/tc.h>
#include <mach/mux.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <plat/dmtimer.h>
#include <mach/irqs.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 2265e582688..41b581fd021 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -26,6 +26,8 @@ config SOC_HAS_OMAP2_SDRC
config SOC_HAS_REALTIME_COUNTER
bool "Real time free running counter"
+ depends on SOC_OMAP5
+ default y
config ARCH_OMAP2
bool "TI OMAP2"
@@ -34,6 +36,7 @@ config ARCH_OMAP2
select CPU_V6
select MULTI_IRQ_HANDLER
select SOC_HAS_OMAP2_SDRC
+ select COMMON_CLK
config ARCH_OMAP3
bool "TI OMAP3"
@@ -47,6 +50,7 @@ config ARCH_OMAP3
select PM_OPP if PM
select PM_RUNTIME if CPU_IDLE
select SOC_HAS_OMAP2_SDRC
+ select COMMON_CLK
select USB_ARCH_HAS_EHCI if USB_SUPPORT
config ARCH_OMAP4
@@ -68,6 +72,7 @@ config ARCH_OMAP4
select PM_OPP if PM
select PM_RUNTIME if CPU_IDLE
select USB_ARCH_HAS_EHCI if USB_SUPPORT
+ select COMMON_CLK
config SOC_OMAP5
bool "TI OMAP5"
@@ -76,7 +81,7 @@ config SOC_OMAP5
select ARM_GIC
select CPU_V7
select HAVE_SMP
- select SOC_HAS_REALTIME_COUNTER
+ select COMMON_CLK
comment "OMAP Core Type"
depends on ARCH_OMAP2
@@ -111,6 +116,7 @@ config SOC_AM33XX
select ARM_CPU_SUSPEND if PM
select CPU_V7
select MULTI_IRQ_HANDLER
+ select COMMON_CLK
config OMAP_PACKAGE_ZAF
bool
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index b455ffc12eb..947cafe65ae 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
+obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o gpmc.o timer.o pm.o \
common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
omap_device.o sram.o
@@ -160,17 +160,17 @@ obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpllcore.o
obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_virt_prcm_set.o
obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_apll.o clkt2xxx_osc.o
obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o clkt_iclk.o
-obj-$(CONFIG_SOC_OMAP2420) += clock2420_data.o
-obj-$(CONFIG_SOC_OMAP2430) += clock2430.o clock2430_data.o
+obj-$(CONFIG_SOC_OMAP2420) += cclock2420_data.o
+obj-$(CONFIG_SOC_OMAP2430) += clock2430.o cclock2430_data.o
obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o
obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o
obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o
-obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o clock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o cclock3xxx_data.o
obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o
-obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) cclock44xx_data.o
obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o
obj-$(CONFIG_SOC_AM33XX) += $(clock-common) dpll3xxx.o
-obj-$(CONFIG_SOC_AM33XX) += clock33xx_data.o
+obj-$(CONFIG_SOC_AM33XX) += cclock33xx_data.o
obj-$(CONFIG_SOC_OMAP5) += $(clock-common)
obj-$(CONFIG_SOC_OMAP5) += dpll3xxx.o dpll44xx.o
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 6601754f951..bb73afc9ac1 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -31,7 +31,7 @@
#include <asm/mach/map.h>
#include "common.h"
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <video/omapdss.h>
#include <video/omap-panel-tfp410.h>
@@ -157,6 +157,7 @@ static struct omap_dss_device sdp3430_lcd_device = {
static struct tfp410_platform_data dvi_panel = {
.power_down_gpio = -1,
+ .i2c_bus_num = -1,
};
static struct omap_dss_device sdp3430_dvi_device = {
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 4be58fd071f..f81a303b87f 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -208,6 +208,7 @@ static struct omap_dss_device am3517_evm_tv_device = {
static struct tfp410_platform_data dvi_panel = {
.power_down_gpio = -1,
+ .i2c_bus_num = -1,
};
static struct omap_dss_device am3517_evm_dvi_device = {
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c8e37dc0089..b3102c2f4a3 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -241,6 +241,7 @@ static struct omap_dss_device cm_t35_lcd_device = {
static struct tfp410_platform_data dvi_panel = {
.power_down_gpio = CM_T35_DVI_EN_GPIO,
+ .i2c_bus_num = -1,
};
static struct omap_dss_device cm_t35_dvi_device = {
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 7667eb74952..12865af25d3 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -141,6 +141,7 @@ static struct omap_dss_device devkit8000_lcd_device = {
static struct tfp410_platform_data dvi_panel = {
.power_down_gpio = -1,
+ .i2c_bus_num = 1,
};
static struct omap_dss_device devkit8000_dvi_device = {
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index b626dbe6f7b..3be1311f9e3 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -27,14 +27,12 @@
#include <linux/io.h>
#include <linux/input/matrix_keypad.h>
#include <linux/mfd/menelaus.h>
+#include <linux/omap-dma.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <plat-omap/dma-omap.h>
-#include <plat/debug-devices.h>
-
#include <video/omapdss.h>
#include <video/omap-panel-generic-dpi.h>
@@ -42,11 +40,9 @@
#include "mux.h"
#include "control.h"
#include "gpmc.h"
+#include "gpmc-smc91x.h"
#define H4_FLASH_CS 0
-#define H4_SMC91X_CS 1
-
-#define H4_ETHR_GPIO_IRQ 92
#if defined(CONFIG_KEYBOARD_MATRIX) || defined(CONFIG_KEYBOARD_MATRIX_MODULE)
static const uint32_t board_matrix_keys[] = {
@@ -250,71 +246,31 @@ static u32 is_gpmc_muxed(void)
return 0;
}
-static inline void __init h4_init_debug(void)
-{
- int eth_cs;
- unsigned long cs_mem_base;
- unsigned int muxed, rate;
- struct clk *gpmc_fck;
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91x_MODULE)
- eth_cs = H4_SMC91X_CS;
-
- gpmc_fck = clk_get(NULL, "gpmc_fck"); /* Always on ENABLE_ON_INIT */
- if (IS_ERR(gpmc_fck)) {
- WARN_ON(1);
- return;
- }
-
- clk_prepare_enable(gpmc_fck);
- rate = clk_get_rate(gpmc_fck);
- clk_disable_unprepare(gpmc_fck);
- clk_put(gpmc_fck);
+static struct omap_smc91x_platform_data board_smc91x_data = {
+ .cs = 1,
+ .gpio_irq = 92,
+ .flags = GPMC_TIMINGS_SMC91C96 | IORESOURCE_IRQ_LOWLEVEL,
+};
+static void __init board_smc91x_init(void)
+{
if (is_gpmc_muxed())
- muxed = 0x200;
- else
- muxed = 0;
-
- /* Make sure CS1 timings are correct */
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG1,
- 0x00011000 | muxed);
-
- if (rate >= 160000000) {
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG2, 0x001f1f01);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG3, 0x00080803);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG4, 0x1c0b1c0a);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG5, 0x041f1F1F);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG6, 0x000004C4);
- } else if (rate >= 130000000) {
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG2, 0x001f1f00);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG3, 0x00080802);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG4, 0x1C091C09);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG5, 0x041f1F1F);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG6, 0x000004C4);
- } else {/* rate = 100000000 */
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG2, 0x001f1f00);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG3, 0x00080802);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG4, 0x1C091C09);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG5, 0x031A1F1F);
- gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG6, 0x000003C2);
- }
+ board_smc91x_data.flags |= GPMC_MUX_ADD_DATA;
- if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) {
- printk(KERN_ERR "Failed to request GPMC mem for smc91x\n");
- goto out;
- }
-
- udelay(100);
+ omap_mux_init_gpio(board_smc91x_data.gpio_irq, OMAP_PIN_INPUT);
+ gpmc_smc91x_init(&board_smc91x_data);
+}
- omap_mux_init_gpio(92, 0);
- if (debug_card_init(cs_mem_base, H4_ETHR_GPIO_IRQ) < 0)
- gpmc_cs_free(eth_cs);
+#else
-out:
- clk_disable_unprepare(gpmc_fck);
- clk_put(gpmc_fck);
+static inline void board_smc91x_init(void)
+{
}
+#endif
+
static void __init h4_init_flash(void)
{
unsigned long base;
@@ -371,6 +327,7 @@ static void __init omap_h4_init(void)
omap_serial_init();
omap_sdrc_init(NULL, NULL);
h4_init_flash();
+ board_smc91x_init();
omap_display_init(&h4_dss_data);
}
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index a4e167c55c1..0abb30fe399 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -16,10 +16,12 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/stddef.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/usb/musb.h>
+#include <linux/platform_data/i2c-cbus-gpio.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <linux/platform_data/mtd-onenand-omap2.h>
#include <linux/mfd/menelaus.h>
@@ -40,6 +42,45 @@
#define TUSB6010_GPIO_ENABLE 0
#define TUSB6010_DMACHAN 0x3f
+#if defined(CONFIG_I2C_CBUS_GPIO) || defined(CONFIG_I2C_CBUS_GPIO_MODULE)
+static struct i2c_cbus_platform_data n8x0_cbus_data = {
+ .clk_gpio = 66,
+ .dat_gpio = 65,
+ .sel_gpio = 64,
+};
+
+static struct platform_device n8x0_cbus_device = {
+ .name = "i2c-cbus-gpio",
+ .id = 3,
+ .dev = {
+ .platform_data = &n8x0_cbus_data,
+ },
+};
+
+static struct i2c_board_info n8x0_i2c_board_info_3[] __initdata = {
+ {
+ I2C_BOARD_INFO("retu-mfd", 0x01),
+ },
+};
+
+static void __init n8x0_cbus_init(void)
+{
+ const int retu_irq_gpio = 108;
+
+ if (gpio_request_one(retu_irq_gpio, GPIOF_IN, "Retu IRQ"))
+ return;
+ irq_set_irq_type(gpio_to_irq(retu_irq_gpio), IRQ_TYPE_EDGE_RISING);
+ n8x0_i2c_board_info_3[0].irq = gpio_to_irq(retu_irq_gpio);
+ i2c_register_board_info(3, n8x0_i2c_board_info_3,
+ ARRAY_SIZE(n8x0_i2c_board_info_3));
+ platform_device_register(&n8x0_cbus_device);
+}
+#else /* CONFIG_I2C_CBUS_GPIO */
+static void __init n8x0_cbus_init(void)
+{
+}
+#endif /* CONFIG_I2C_CBUS_GPIO */
+
#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
/*
* Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
@@ -678,6 +719,7 @@ static void __init n8x0_init_machine(void)
gpmc_onenand_init(board_onenand_data);
n8x0_mmc_init();
n8x0_usb_init();
+ n8x0_cbus_init();
}
MACHINE_START(NOKIA_N800, "Nokia N800")
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 54647d6286b..3985f35aee0 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -240,6 +240,7 @@ static struct omap_dss_device omap3_evm_tv_device = {
static struct tfp410_platform_data dvi_panel = {
.power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
+ .i2c_bus_num = -1,
};
static struct omap_dss_device omap3_evm_dvi_device = {
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index d8638b3b4f9..53a6cbcf974 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -118,6 +118,7 @@ static struct omap_dss_device omap3_stalker_tv_device = {
static struct tfp410_platform_data dvi_panel = {
.power_down_gpio = DSS_ENABLE_GPIO,
+ .i2c_bus_num = -1,
};
static struct omap_dss_device omap3_stalker_dvi_device = {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 07005fe40a2..cf07e289b4e 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -31,7 +31,7 @@
#include <asm/system_info.h>
#include "common.h"
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "gpmc-smc91x.h"
#include "board-rx51.h"
@@ -256,6 +256,11 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
},
};
+static struct platform_device rx51_battery_device = {
+ .name = "rx51-battery",
+ .id = -1,
+};
+
static void rx51_charger_set_power(bool on)
{
gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on);
@@ -277,6 +282,7 @@ static void __init rx51_charger_init(void)
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
+ platform_device_register(&rx51_battery_device);
platform_device_register(&rx51_charger_device);
}
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index c22e111bcd0..46f4fc98276 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -16,7 +16,6 @@
#include <linux/mm.h>
#include <asm/mach-types.h>
#include <video/omapdss.h>
-#include <plat/vram.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include "board-rx51.h"
@@ -87,17 +86,4 @@ static int __init rx51_video_init(void)
}
subsys_initcall(rx51_video_init);
-
-void __init rx51_video_mem_init(void)
-{
- /*
- * GFX 864x480x32bpp
- * VID1/2 1280x720x32bpp double buffered
- */
- omap_vram_set_sdram_vram(PAGE_ALIGN(864 * 480 * 4) +
- 2 * PAGE_ALIGN(1280 * 720 * 4 * 2), 0);
-}
-
-#else
-void __init rx51_video_mem_init(void) { }
#endif /* defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index b67fe11d0d9..d0374ea2dfb 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -24,7 +24,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "common.h"
#include "mux.h"
@@ -34,8 +34,6 @@
#define RX51_GPIO_SLEEP_IND 162
-extern void rx51_video_mem_init(void);
-
static struct gpio_led gpio_leds[] = {
{
.name = "sleep_ind",
@@ -112,7 +110,6 @@ static void __init rx51_init(void)
static void __init rx51_reserve(void)
{
- rx51_video_mem_init();
omap_reserve();
}
diff --git a/arch/arm/mach-omap2/cclock2420_data.c b/arch/arm/mach-omap2/cclock2420_data.c
new file mode 100644
index 00000000000..7e5febe456d
--- /dev/null
+++ b/arch/arm/mach-omap2/cclock2420_data.c
@@ -0,0 +1,1950 @@
+/*
+ * OMAP2420 clock data
+ *
+ * Copyright (C) 2005-2012 Texas Instruments, Inc.
+ * Copyright (C) 2004-2011 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ * Updated to COMMON clk format by Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-private.h>
+#include <linux/list.h>
+
+#include "soc.h"
+#include "iomap.h"
+#include "clock.h"
+#include "clock2xxx.h"
+#include "opp2xxx.h"
+#include "cm2xxx.h"
+#include "prm2xxx.h"
+#include "prm-regbits-24xx.h"
+#include "cm-regbits-24xx.h"
+#include "sdrc.h"
+#include "control.h"
+
+#define OMAP_CM_REGADDR OMAP2420_CM_REGADDR
+
+/*
+ * 2420 clock tree.
+ *
+ * NOTE:In many cases here we are assigning a 'default' parent. In
+ * many cases the parent is selectable. The set parent calls will
+ * also switch sources.
+ *
+ * Several sources are given initial rates which may be wrong, this will
+ * be fixed up in the init func.
+ *
+ * Things are broadly separated below by clock domains. It is
+ * noteworthy that most peripherals have dependencies on multiple clock
+ * domains. Many get their interface clocks from the L4 domain, but get
+ * functional clocks from fixed sources or other core domain derived
+ * clocks.
+ */
+
+DEFINE_CLK_FIXED_RATE(alt_ck, CLK_IS_ROOT, 54000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(func_32k_ck, CLK_IS_ROOT, 32768, 0x0);
+
+DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
+
+static struct clk osc_ck;
+
+static const struct clk_ops osc_ck_ops = {
+ .recalc_rate = &omap2_osc_clk_recalc,
+};
+
+static struct clk_hw_omap osc_ck_hw = {
+ .hw = {
+ .clk = &osc_ck,
+ },
+};
+
+static struct clk osc_ck = {
+ .name = "osc_ck",
+ .ops = &osc_ck_ops,
+ .hw = &osc_ck_hw.hw,
+ .flags = CLK_IS_ROOT,
+};
+
+DEFINE_CLK_FIXED_RATE(secure_32k_ck, CLK_IS_ROOT, 32768, 0x0);
+
+static struct clk sys_ck;
+
+static const char *sys_ck_parent_names[] = {
+ "osc_ck",
+};
+
+static const struct clk_ops sys_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .recalc_rate = &omap2xxx_sys_clk_recalc,
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(sys_ck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(sys_ck, sys_ck_parent_names, sys_ck_ops);
+
+static struct dpll_data dpll_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .mult_mask = OMAP24XX_DPLL_MULT_MASK,
+ .div1_mask = OMAP24XX_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_mask = OMAP24XX_EN_DPLL_MASK,
+ .max_multiplier = 1023,
+ .min_divider = 1,
+ .max_divider = 16,
+};
+
+static struct clk dpll_ck;
+
+static const char *dpll_ck_parent_names[] = {
+ "sys_ck",
+};
+
+static const struct clk_ops dpll_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap2_dpllcore_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap2_reprogram_dpllcore,
+};
+
+static struct clk_hw_omap dpll_ck_hw = {
+ .hw = {
+ .clk = &dpll_ck,
+ },
+ .ops = &clkhwops_omap2xxx_dpll,
+ .dpll_data = &dpll_dd,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll_ck, dpll_ck_parent_names, dpll_ck_ops);
+
+static struct clk core_ck;
+
+static const char *core_ck_parent_names[] = {
+ "dpll_ck",
+};
+
+static const struct clk_ops core_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_ck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
+
+DEFINE_CLK_DIVIDER(core_l3_ck, "core_ck", &core_ck, 0x0,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_L3_SHIFT, OMAP24XX_CLKSEL_L3_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+DEFINE_CLK_DIVIDER(l4_ck, "core_l3_ck", &core_l3_ck, 0x0,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_L4_SHIFT, OMAP24XX_CLKSEL_L4_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk aes_ick;
+
+static const char *aes_ick_parent_names[] = {
+ "l4_ck",
+};
+
+static const struct clk_ops aes_ick_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static struct clk_hw_omap aes_ick_hw = {
+ .hw = {
+ .clk = &aes_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_AES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(aes_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk apll54_ck;
+
+static const struct clk_ops apll54_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clk_apll54_enable,
+ .disable = &omap2_clk_apll54_disable,
+ .recalc_rate = &omap2_clk_apll54_recalc,
+};
+
+static struct clk_hw_omap apll54_ck_hw = {
+ .hw = {
+ .clk = &apll54_ck,
+ },
+ .ops = &clkhwops_apll54,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP24XX_EN_54M_PLL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(apll54_ck, dpll_ck_parent_names, apll54_ck_ops);
+
+static struct clk apll96_ck;
+
+static const struct clk_ops apll96_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clk_apll96_enable,
+ .disable = &omap2_clk_apll96_disable,
+ .recalc_rate = &omap2_clk_apll96_recalc,
+};
+
+static struct clk_hw_omap apll96_ck_hw = {
+ .hw = {
+ .clk = &apll96_ck,
+ },
+ .ops = &clkhwops_apll96,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP24XX_EN_96M_PLL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(apll96_ck, dpll_ck_parent_names, apll96_ck_ops);
+
+static struct clk func_96m_ck;
+
+static const char *func_96m_ck_parent_names[] = {
+ "apll96_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(func_96m_ck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(func_96m_ck, func_96m_ck_parent_names, core_ck_ops);
+
+static struct clk cam_fck;
+
+static const char *cam_fck_parent_names[] = {
+ "func_96m_ck",
+};
+
+static struct clk_hw_omap cam_fck_hw = {
+ .hw = {
+ .clk = &cam_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_CAM_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(cam_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk cam_ick;
+
+static struct clk_hw_omap cam_ick_hw = {
+ .hw = {
+ .clk = &cam_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_CAM_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(cam_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk des_ick;
+
+static struct clk_hw_omap des_ick_hw = {
+ .hw = {
+ .clk = &des_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_DES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(des_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate dsp_fck_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 6, .val = 6, .flags = RATE_IN_242X },
+ { .div = 8, .val = 8, .flags = RATE_IN_242X },
+ { .div = 12, .val = 12, .flags = RATE_IN_242X },
+ { .div = 0 }
+};
+
+static const struct clksel dsp_fck_clksel[] = {
+ { .parent = &core_ck, .rates = dsp_fck_core_rates },
+ { .parent = NULL },
+};
+
+static const char *dsp_fck_parent_names[] = {
+ "core_ck",
+};
+
+static const struct clk_ops dsp_fck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(dsp_fck, "dsp_clkdm", dsp_fck_clksel,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
+ OMAP24XX_CLKSEL_DSP_MASK,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
+ OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT, &clkhwops_wait,
+ dsp_fck_parent_names, dsp_fck_ops);
+
+static const struct clksel dsp_ick_clksel[] = {
+ { .parent = &dsp_fck, .rates = dsp_ick_rates },
+ { .parent = NULL },
+};
+
+static const char *dsp_ick_parent_names[] = {
+ "dsp_fck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(dsp_ick, "dsp_clkdm", dsp_ick_clksel,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
+ OMAP24XX_CLKSEL_DSP_IF_MASK,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_ICLKEN),
+ OMAP2420_EN_DSP_IPI_SHIFT, &clkhwops_iclk_wait,
+ dsp_ick_parent_names, dsp_fck_ops);
+
+static const struct clksel_rate dss1_fck_sys_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate dss1_fck_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 5, .val = 5, .flags = RATE_IN_24XX },
+ { .div = 6, .val = 6, .flags = RATE_IN_24XX },
+ { .div = 8, .val = 8, .flags = RATE_IN_24XX },
+ { .div = 9, .val = 9, .flags = RATE_IN_24XX },
+ { .div = 12, .val = 12, .flags = RATE_IN_24XX },
+ { .div = 16, .val = 16, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel dss1_fck_clksel[] = {
+ { .parent = &sys_ck, .rates = dss1_fck_sys_rates },
+ { .parent = &core_ck, .rates = dss1_fck_core_rates },
+ { .parent = NULL },
+};
+
+static const char *dss1_fck_parent_names[] = {
+ "sys_ck", "core_ck",
+};
+
+static struct clk dss1_fck;
+
+static const struct clk_ops dss1_fck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(dss1_fck, "dss_clkdm", dss1_fck_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_DSS1_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_DSS1_SHIFT, NULL,
+ dss1_fck_parent_names, dss1_fck_ops);
+
+static const struct clksel_rate dss2_fck_sys_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate dss2_fck_48m_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate func_48m_apll96_rates[] = {
+ { .div = 2, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate func_48m_alt_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel func_48m_clksel[] = {
+ { .parent = &apll96_ck, .rates = func_48m_apll96_rates },
+ { .parent = &alt_ck, .rates = func_48m_alt_rates },
+ { .parent = NULL },
+};
+
+static const char *func_48m_ck_parent_names[] = {
+ "apll96_ck", "alt_ck",
+};
+
+static struct clk func_48m_ck;
+
+static const struct clk_ops func_48m_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+static struct clk_hw_omap func_48m_ck_hw = {
+ .hw = {
+ .clk = &func_48m_ck,
+ },
+ .clksel = func_48m_clksel,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP24XX_48M_SOURCE_MASK,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(func_48m_ck, func_48m_ck_parent_names, func_48m_ck_ops);
+
+static const struct clksel dss2_fck_clksel[] = {
+ { .parent = &sys_ck, .rates = dss2_fck_sys_rates },
+ { .parent = &func_48m_ck, .rates = dss2_fck_48m_rates },
+ { .parent = NULL },
+};
+
+static const char *dss2_fck_parent_names[] = {
+ "sys_ck", "func_48m_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(dss2_fck, "dss_clkdm", dss2_fck_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_DSS2_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_DSS2_SHIFT, NULL,
+ dss2_fck_parent_names, dss1_fck_ops);
+
+static const char *func_54m_ck_parent_names[] = {
+ "apll54_ck", "alt_ck",
+};
+
+DEFINE_CLK_MUX(func_54m_ck, func_54m_ck_parent_names, NULL, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ OMAP24XX_54M_SOURCE_SHIFT, OMAP24XX_54M_SOURCE_WIDTH,
+ 0x0, NULL);
+
+static struct clk dss_54m_fck;
+
+static const char *dss_54m_fck_parent_names[] = {
+ "func_54m_ck",
+};
+
+static struct clk_hw_omap dss_54m_fck_hw = {
+ .hw = {
+ .clk = &dss_54m_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_TV_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_54m_fck, dss_54m_fck_parent_names, aes_ick_ops);
+
+static struct clk dss_ick;
+
+static struct clk_hw_omap dss_ick_hw = {
+ .hw = {
+ .clk = &dss_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk eac_fck;
+
+static struct clk_hw_omap eac_fck_hw = {
+ .hw = {
+ .clk = &eac_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP2420_EN_EAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(eac_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk eac_ick;
+
+static struct clk_hw_omap eac_ick_hw = {
+ .hw = {
+ .clk = &eac_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_EAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(eac_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk emul_ck;
+
+static struct clk_hw_omap emul_ck_hw = {
+ .hw = {
+ .clk = &emul_ck,
+ },
+ .enable_reg = OMAP2420_PRCM_CLKEMUL_CTRL,
+ .enable_bit = OMAP24XX_EMULATION_EN_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(emul_ck, dss_54m_fck_parent_names, aes_ick_ops);
+
+DEFINE_CLK_FIXED_FACTOR(func_12m_ck, "func_48m_ck", &func_48m_ck, 0x0, 1, 4);
+
+static struct clk fac_fck;
+
+static const char *fac_fck_parent_names[] = {
+ "func_12m_ck",
+};
+
+static struct clk_hw_omap fac_fck_hw = {
+ .hw = {
+ .clk = &fac_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_FAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(fac_fck, fac_fck_parent_names, aes_ick_ops);
+
+static struct clk fac_ick;
+
+static struct clk_hw_omap fac_ick_hw = {
+ .hw = {
+ .clk = &fac_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_FAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(fac_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel gfx_fck_clksel[] = {
+ { .parent = &core_l3_ck, .rates = gfx_l3_rates },
+ { .parent = NULL },
+};
+
+static const char *gfx_2d_fck_parent_names[] = {
+ "core_l3_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(gfx_2d_fck, "gfx_clkdm", gfx_fck_clksel,
+ OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
+ OMAP_CLKSEL_GFX_MASK,
+ OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ OMAP24XX_EN_2D_SHIFT, &clkhwops_wait,
+ gfx_2d_fck_parent_names, dsp_fck_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gfx_3d_fck, "gfx_clkdm", gfx_fck_clksel,
+ OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
+ OMAP_CLKSEL_GFX_MASK,
+ OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ OMAP24XX_EN_3D_SHIFT, &clkhwops_wait,
+ gfx_2d_fck_parent_names, dsp_fck_ops);
+
+static struct clk gfx_ick;
+
+static const char *gfx_ick_parent_names[] = {
+ "core_l3_ck",
+};
+
+static struct clk_hw_omap gfx_ick_hw = {
+ .hw = {
+ .clk = &gfx_ick,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
+ .enable_bit = OMAP_EN_GFX_SHIFT,
+ .clkdm_name = "gfx_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gfx_ick, gfx_ick_parent_names, aes_ick_ops);
+
+static struct clk gpios_fck;
+
+static const char *gpios_fck_parent_names[] = {
+ "func_32k_ck",
+};
+
+static struct clk_hw_omap gpios_fck_hw = {
+ .hw = {
+ .clk = &gpios_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpios_fck, gpios_fck_parent_names, aes_ick_ops);
+
+static struct clk wu_l4_ick;
+
+DEFINE_STRUCT_CLK_HW_OMAP(wu_l4_ick, "wkup_clkdm");
+DEFINE_STRUCT_CLK(wu_l4_ick, dpll_ck_parent_names, core_ck_ops);
+
+static struct clk gpios_ick;
+
+static const char *gpios_ick_parent_names[] = {
+ "wu_l4_ick",
+};
+
+static struct clk_hw_omap gpios_ick_hw = {
+ .hw = {
+ .clk = &gpios_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpios_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk gpmc_fck;
+
+static struct clk_hw_omap gpmc_fck_hw = {
+ .hw = {
+ .clk = &gpmc_fck,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP24XX_AUTO_GPMC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpmc_fck, gfx_ick_parent_names, core_ck_ops);
+
+static const struct clksel_rate gpt_alt_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel omap24xx_gpt_clksel[] = {
+ { .parent = &func_32k_ck, .rates = gpt_32k_rates },
+ { .parent = &sys_ck, .rates = gpt_sys_rates },
+ { .parent = &alt_ck, .rates = gpt_alt_rates },
+ { .parent = NULL },
+};
+
+static const char *gpt10_fck_parent_names[] = {
+ "func_32k_ck", "sys_ck", "alt_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT10_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT10_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt10_ick;
+
+static struct clk_hw_omap gpt10_ick_hw = {
+ .hw = {
+ .clk = &gpt10_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT10_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt10_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT11_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT11_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt11_ick;
+
+static struct clk_hw_omap gpt11_ick_hw = {
+ .hw = {
+ .clk = &gpt11_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT11_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt11_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt12_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT12_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT12_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt12_ick;
+
+static struct clk_hw_omap gpt12_ick_hw = {
+ .hw = {
+ .clk = &gpt12_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT12_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt12_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clk_ops gpt1_fck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_GPT1_MASK,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ OMAP24XX_EN_GPT1_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, gpt1_fck_ops);
+
+static struct clk gpt1_ick;
+
+static struct clk_hw_omap gpt1_ick_hw = {
+ .hw = {
+ .clk = &gpt1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_GPT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt1_ick, gpios_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT2_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT2_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt2_ick;
+
+static struct clk_hw_omap gpt2_ick_hw = {
+ .hw = {
+ .clk = &gpt2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt2_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT3_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT3_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt3_ick;
+
+static struct clk_hw_omap gpt3_ick_hw = {
+ .hw = {
+ .clk = &gpt3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt3_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT4_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT4_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt4_ick;
+
+static struct clk_hw_omap gpt4_ick_hw = {
+ .hw = {
+ .clk = &gpt4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt4_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT5_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT5_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt5_ick;
+
+static struct clk_hw_omap gpt5_ick_hw = {
+ .hw = {
+ .clk = &gpt5_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT5_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt5_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT6_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT6_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt6_ick;
+
+static struct clk_hw_omap gpt6_ick_hw = {
+ .hw = {
+ .clk = &gpt6_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT6_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt6_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT7_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT7_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt7_ick;
+
+static struct clk_hw_omap gpt7_ick_hw = {
+ .hw = {
+ .clk = &gpt7_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT7_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt7_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT8_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT8_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt8_ick;
+
+static struct clk_hw_omap gpt8_ick_hw = {
+ .hw = {
+ .clk = &gpt8_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT8_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt8_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT9_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT9_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt9_ick;
+
+static struct clk_hw_omap gpt9_ick_hw = {
+ .hw = {
+ .clk = &gpt9_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT9_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt9_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk hdq_fck;
+
+static struct clk_hw_omap hdq_fck_hw = {
+ .hw = {
+ .clk = &hdq_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hdq_fck, fac_fck_parent_names, aes_ick_ops);
+
+static struct clk hdq_ick;
+
+static struct clk_hw_omap hdq_ick_hw = {
+ .hw = {
+ .clk = &hdq_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hdq_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk i2c1_fck;
+
+static struct clk_hw_omap i2c1_fck_hw = {
+ .hw = {
+ .clk = &i2c1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP2420_EN_I2C1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c1_fck, fac_fck_parent_names, aes_ick_ops);
+
+static struct clk i2c1_ick;
+
+static struct clk_hw_omap i2c1_ick_hw = {
+ .hw = {
+ .clk = &i2c1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_I2C1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c1_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk i2c2_fck;
+
+static struct clk_hw_omap i2c2_fck_hw = {
+ .hw = {
+ .clk = &i2c2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP2420_EN_I2C2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c2_fck, fac_fck_parent_names, aes_ick_ops);
+
+static struct clk i2c2_ick;
+
+static struct clk_hw_omap i2c2_ick_hw = {
+ .hw = {
+ .clk = &i2c2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_I2C2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c2_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(iva1_ifck, "iva1_clkdm", dsp_fck_clksel,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
+ OMAP2420_CLKSEL_IVA_MASK,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
+ OMAP2420_EN_IVA_COP_SHIFT, &clkhwops_wait,
+ dsp_fck_parent_names, dsp_fck_ops);
+
+static struct clk iva1_mpu_int_ifck;
+
+static const char *iva1_mpu_int_ifck_parent_names[] = {
+ "iva1_ifck",
+};
+
+static const struct clk_ops iva1_mpu_int_ifck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap_fixed_divisor_recalc,
+};
+
+static struct clk_hw_omap iva1_mpu_int_ifck_hw = {
+ .hw = {
+ .clk = &iva1_mpu_int_ifck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP2420_EN_IVA_MPU_SHIFT,
+ .clkdm_name = "iva1_clkdm",
+ .fixed_div = 2,
+};
+
+DEFINE_STRUCT_CLK(iva1_mpu_int_ifck, iva1_mpu_int_ifck_parent_names,
+ iva1_mpu_int_ifck_ops);
+
+static struct clk mailboxes_ick;
+
+static struct clk_hw_omap mailboxes_ick_hw = {
+ .hw = {
+ .clk = &mailboxes_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mailboxes_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate common_mcbsp_96m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel mcbsp_fck_clksel[] = {
+ { .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp1_fck_parent_names[] = {
+ "func_96m_ck", "mcbsp_clks",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_fck_clksel,
+ OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ OMAP2_MCBSP1_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_MCBSP1_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, dss1_fck_ops);
+
+static struct clk mcbsp1_ick;
+
+static struct clk_hw_omap mcbsp1_ick_hw = {
+ .hw = {
+ .clk = &mcbsp1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp1_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "core_l4_clkdm", mcbsp_fck_clksel,
+ OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ OMAP2_MCBSP2_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_MCBSP2_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, dss1_fck_ops);
+
+static struct clk mcbsp2_ick;
+
+static struct clk_hw_omap mcbsp2_ick_hw = {
+ .hw = {
+ .clk = &mcbsp2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp2_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mcspi1_fck;
+
+static const char *mcspi1_fck_parent_names[] = {
+ "func_48m_ck",
+};
+
+static struct clk_hw_omap mcspi1_fck_hw = {
+ .hw = {
+ .clk = &mcspi1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi1_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk mcspi1_ick;
+
+static struct clk_hw_omap mcspi1_ick_hw = {
+ .hw = {
+ .clk = &mcspi1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi1_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mcspi2_fck;
+
+static struct clk_hw_omap mcspi2_fck_hw = {
+ .hw = {
+ .clk = &mcspi2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi2_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk mcspi2_ick;
+
+static struct clk_hw_omap mcspi2_ick_hw = {
+ .hw = {
+ .clk = &mcspi2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi2_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mmc_fck;
+
+static struct clk_hw_omap mmc_fck_hw = {
+ .hw = {
+ .clk = &mmc_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP2420_EN_MMC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmc_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk mmc_ick;
+
+static struct clk_hw_omap mmc_ick_hw = {
+ .hw = {
+ .clk = &mmc_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_MMC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmc_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_DIVIDER(mpu_ck, "core_ck", &core_ck, 0x0,
+ OMAP_CM_REGADDR(MPU_MOD, CM_CLKSEL),
+ OMAP24XX_CLKSEL_MPU_SHIFT, OMAP24XX_CLKSEL_MPU_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk mpu_wdt_fck;
+
+static struct clk_hw_omap mpu_wdt_fck_hw = {
+ .hw = {
+ .clk = &mpu_wdt_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mpu_wdt_fck, gpios_fck_parent_names, aes_ick_ops);
+
+static struct clk mpu_wdt_ick;
+
+static struct clk_hw_omap mpu_wdt_ick_hw = {
+ .hw = {
+ .clk = &mpu_wdt_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mpu_wdt_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk mspro_fck;
+
+static struct clk_hw_omap mspro_fck_hw = {
+ .hw = {
+ .clk = &mspro_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mspro_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk mspro_ick;
+
+static struct clk_hw_omap mspro_ick_hw = {
+ .hw = {
+ .clk = &mspro_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mspro_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk omapctrl_ick;
+
+static struct clk_hw_omap omapctrl_ick_hw = {
+ .hw = {
+ .clk = &omapctrl_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_OMAPCTRL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(omapctrl_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk pka_ick;
+
+static struct clk_hw_omap pka_ick_hw = {
+ .hw = {
+ .clk = &pka_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_PKA_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(pka_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk rng_ick;
+
+static struct clk_hw_omap rng_ick_hw = {
+ .hw = {
+ .clk = &rng_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_RNG_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(rng_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk sdma_fck;
+
+DEFINE_STRUCT_CLK_HW_OMAP(sdma_fck, "core_l3_clkdm");
+DEFINE_STRUCT_CLK(sdma_fck, gfx_ick_parent_names, core_ck_ops);
+
+static struct clk sdma_ick;
+
+static struct clk_hw_omap sdma_ick_hw = {
+ .hw = {
+ .clk = &sdma_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP24XX_AUTO_SDMA_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sdma_ick, gfx_ick_parent_names, core_ck_ops);
+
+static struct clk sdrc_ick;
+
+static struct clk_hw_omap sdrc_ick_hw = {
+ .hw = {
+ .clk = &sdrc_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP24XX_AUTO_SDRC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sdrc_ick, gfx_ick_parent_names, core_ck_ops);
+
+static struct clk sha_ick;
+
+static struct clk_hw_omap sha_ick_hw = {
+ .hw = {
+ .clk = &sha_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_SHA_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sha_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk ssi_l4_ick;
+
+static struct clk_hw_omap ssi_l4_ick_hw = {
+ .hw = {
+ .clk = &ssi_l4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP24XX_EN_SSI_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(ssi_l4_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 6, .val = 6, .flags = RATE_IN_242X },
+ { .div = 8, .val = 8, .flags = RATE_IN_242X },
+ { .div = 0 }
+};
+
+static const struct clksel ssi_ssr_sst_fck_clksel[] = {
+ { .parent = &core_ck, .rates = ssi_ssr_sst_fck_core_rates },
+ { .parent = NULL },
+};
+
+static const char *ssi_ssr_sst_fck_parent_names[] = {
+ "core_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_sst_fck, "core_l3_clkdm",
+ ssi_ssr_sst_fck_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_SSI_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ OMAP24XX_EN_SSI_SHIFT, &clkhwops_wait,
+ ssi_ssr_sst_fck_parent_names, dsp_fck_ops);
+
+static struct clk sync_32k_ick;
+
+static struct clk_hw_omap sync_32k_ick_hw = {
+ .hw = {
+ .clk = &sync_32k_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_32KSYNC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sync_32k_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate common_clkout_src_core_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_clkout_src_sys_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_clkout_src_96m_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_clkout_src_54m_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel common_clkout_src_clksel[] = {
+ { .parent = &core_ck, .rates = common_clkout_src_core_rates },
+ { .parent = &sys_ck, .rates = common_clkout_src_sys_rates },
+ { .parent = &func_96m_ck, .rates = common_clkout_src_96m_rates },
+ { .parent = &func_54m_ck, .rates = common_clkout_src_54m_rates },
+ { .parent = NULL },
+};
+
+static const char *sys_clkout_src_parent_names[] = {
+ "core_ck", "sys_ck", "func_96m_ck", "func_54m_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(sys_clkout_src, "wkup_clkdm", common_clkout_src_clksel,
+ OMAP2420_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_SOURCE_MASK,
+ OMAP2420_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_EN_SHIFT,
+ NULL, sys_clkout_src_parent_names, gpt1_fck_ops);
+
+DEFINE_CLK_DIVIDER(sys_clkout, "sys_clkout_src", &sys_clkout_src, 0x0,
+ OMAP2420_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_DIV_SHIFT,
+ OMAP24XX_CLKOUT_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+DEFINE_CLK_OMAP_MUX_GATE(sys_clkout2_src, "wkup_clkdm",
+ common_clkout_src_clksel, OMAP2420_PRCM_CLKOUT_CTRL,
+ OMAP2420_CLKOUT2_SOURCE_MASK,
+ OMAP2420_PRCM_CLKOUT_CTRL, OMAP2420_CLKOUT2_EN_SHIFT,
+ NULL, sys_clkout_src_parent_names, gpt1_fck_ops);
+
+DEFINE_CLK_DIVIDER(sys_clkout2, "sys_clkout2_src", &sys_clkout2_src, 0x0,
+ OMAP2420_PRCM_CLKOUT_CTRL, OMAP2420_CLKOUT2_DIV_SHIFT,
+ OMAP2420_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+static struct clk uart1_fck;
+
+static struct clk_hw_omap uart1_fck_hw = {
+ .hw = {
+ .clk = &uart1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart1_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk uart1_ick;
+
+static struct clk_hw_omap uart1_ick_hw = {
+ .hw = {
+ .clk = &uart1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart1_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk uart2_fck;
+
+static struct clk_hw_omap uart2_fck_hw = {
+ .hw = {
+ .clk = &uart2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart2_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk uart2_ick;
+
+static struct clk_hw_omap uart2_ick_hw = {
+ .hw = {
+ .clk = &uart2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart2_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk uart3_fck;
+
+static struct clk_hw_omap uart3_fck_hw = {
+ .hw = {
+ .clk = &uart3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP24XX_EN_UART3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart3_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk uart3_ick;
+
+static struct clk_hw_omap uart3_ick_hw = {
+ .hw = {
+ .clk = &uart3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP24XX_EN_UART3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart3_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk usb_fck;
+
+static struct clk_hw_omap usb_fck_hw = {
+ .hw = {
+ .clk = &usb_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP24XX_EN_USB_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usb_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel usb_l4_ick_clksel[] = {
+ { .parent = &core_l3_ck, .rates = usb_l4_ick_core_l3_rates },
+ { .parent = NULL },
+};
+
+static const char *usb_l4_ick_parent_names[] = {
+ "core_l3_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_ick_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_USB_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ OMAP24XX_EN_USB_SHIFT, &clkhwops_iclk_wait,
+ usb_l4_ick_parent_names, dsp_fck_ops);
+
+static struct clk virt_prcm_set;
+
+static const char *virt_prcm_set_parent_names[] = {
+ "mpu_ck",
+};
+
+static const struct clk_ops virt_prcm_set_ops = {
+ .recalc_rate = &omap2_table_mpu_recalc,
+ .set_rate = &omap2_select_table_rate,
+ .round_rate = &omap2_round_to_table_rate,
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(virt_prcm_set, NULL);
+DEFINE_STRUCT_CLK(virt_prcm_set, virt_prcm_set_parent_names, virt_prcm_set_ops);
+
+static const struct clksel_rate vlynq_fck_96m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_242X },
+ { .div = 0 }
+};
+
+static const struct clksel_rate vlynq_fck_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_242X },
+ { .div = 2, .val = 2, .flags = RATE_IN_242X },
+ { .div = 3, .val = 3, .flags = RATE_IN_242X },
+ { .div = 4, .val = 4, .flags = RATE_IN_242X },
+ { .div = 6, .val = 6, .flags = RATE_IN_242X },
+ { .div = 8, .val = 8, .flags = RATE_IN_242X },
+ { .div = 9, .val = 9, .flags = RATE_IN_242X },
+ { .div = 12, .val = 12, .flags = RATE_IN_242X },
+ { .div = 16, .val = 16, .flags = RATE_IN_242X },
+ { .div = 18, .val = 18, .flags = RATE_IN_242X },
+ { .div = 0 }
+};
+
+static const struct clksel vlynq_fck_clksel[] = {
+ { .parent = &func_96m_ck, .rates = vlynq_fck_96m_rates },
+ { .parent = &core_ck, .rates = vlynq_fck_core_rates },
+ { .parent = NULL },
+};
+
+static const char *vlynq_fck_parent_names[] = {
+ "func_96m_ck", "core_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(vlynq_fck, "core_l3_clkdm", vlynq_fck_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP2420_CLKSEL_VLYNQ_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP2420_EN_VLYNQ_SHIFT, &clkhwops_wait,
+ vlynq_fck_parent_names, dss1_fck_ops);
+
+static struct clk vlynq_ick;
+
+static struct clk_hw_omap vlynq_ick_hw = {
+ .hw = {
+ .clk = &vlynq_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_VLYNQ_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(vlynq_ick, gfx_ick_parent_names, aes_ick_ops);
+
+static struct clk wdt1_ick;
+
+static struct clk_hw_omap wdt1_ick_hw = {
+ .hw = {
+ .clk = &wdt1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_WDT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt1_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk wdt1_osc_ck;
+
+static const struct clk_ops wdt1_osc_ck_ops = {};
+
+DEFINE_STRUCT_CLK_HW_OMAP(wdt1_osc_ck, NULL);
+DEFINE_STRUCT_CLK(wdt1_osc_ck, sys_ck_parent_names, wdt1_osc_ck_ops);
+
+static struct clk wdt3_fck;
+
+static struct clk_hw_omap wdt3_fck_hw = {
+ .hw = {
+ .clk = &wdt3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP2420_EN_WDT3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt3_fck, gpios_fck_parent_names, aes_ick_ops);
+
+static struct clk wdt3_ick;
+
+static struct clk_hw_omap wdt3_ick_hw = {
+ .hw = {
+ .clk = &wdt3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_WDT3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt3_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk wdt4_fck;
+
+static struct clk_hw_omap wdt4_fck_hw = {
+ .hw = {
+ .clk = &wdt4_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt4_fck, gpios_fck_parent_names, aes_ick_ops);
+
+static struct clk wdt4_ick;
+
+static struct clk_hw_omap wdt4_ick_hw = {
+ .hw = {
+ .clk = &wdt4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt4_ick, aes_ick_parent_names, aes_ick_ops);
+
+/*
+ * clkdev integration
+ */
+
+static struct omap_clk omap2420_clks[] = {
+ /* external root sources */
+ CLK(NULL, "func_32k_ck", &func_32k_ck, CK_242X),
+ CLK(NULL, "secure_32k_ck", &secure_32k_ck, CK_242X),
+ CLK(NULL, "osc_ck", &osc_ck, CK_242X),
+ CLK(NULL, "sys_ck", &sys_ck, CK_242X),
+ CLK(NULL, "alt_ck", &alt_ck, CK_242X),
+ CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_242X),
+ /* internal analog sources */
+ CLK(NULL, "dpll_ck", &dpll_ck, CK_242X),
+ CLK(NULL, "apll96_ck", &apll96_ck, CK_242X),
+ CLK(NULL, "apll54_ck", &apll54_ck, CK_242X),
+ /* internal prcm root sources */
+ CLK(NULL, "func_54m_ck", &func_54m_ck, CK_242X),
+ CLK(NULL, "core_ck", &core_ck, CK_242X),
+ CLK(NULL, "func_96m_ck", &func_96m_ck, CK_242X),
+ CLK(NULL, "func_48m_ck", &func_48m_ck, CK_242X),
+ CLK(NULL, "func_12m_ck", &func_12m_ck, CK_242X),
+ CLK(NULL, "ck_wdt1_osc", &wdt1_osc_ck, CK_242X),
+ CLK(NULL, "sys_clkout_src", &sys_clkout_src, CK_242X),
+ CLK(NULL, "sys_clkout", &sys_clkout, CK_242X),
+ CLK(NULL, "sys_clkout2_src", &sys_clkout2_src, CK_242X),
+ CLK(NULL, "sys_clkout2", &sys_clkout2, CK_242X),
+ CLK(NULL, "emul_ck", &emul_ck, CK_242X),
+ /* mpu domain clocks */
+ CLK(NULL, "mpu_ck", &mpu_ck, CK_242X),
+ /* dsp domain clocks */
+ CLK(NULL, "dsp_fck", &dsp_fck, CK_242X),
+ CLK(NULL, "dsp_ick", &dsp_ick, CK_242X),
+ CLK(NULL, "iva1_ifck", &iva1_ifck, CK_242X),
+ CLK(NULL, "iva1_mpu_int_ifck", &iva1_mpu_int_ifck, CK_242X),
+ /* GFX domain clocks */
+ CLK(NULL, "gfx_3d_fck", &gfx_3d_fck, CK_242X),
+ CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_242X),
+ CLK(NULL, "gfx_ick", &gfx_ick, CK_242X),
+ /* DSS domain clocks */
+ CLK("omapdss_dss", "ick", &dss_ick, CK_242X),
+ CLK(NULL, "dss_ick", &dss_ick, CK_242X),
+ CLK(NULL, "dss1_fck", &dss1_fck, CK_242X),
+ CLK(NULL, "dss2_fck", &dss2_fck, CK_242X),
+ CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_242X),
+ /* L3 domain clocks */
+ CLK(NULL, "core_l3_ck", &core_l3_ck, CK_242X),
+ CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_242X),
+ CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_242X),
+ /* L4 domain clocks */
+ CLK(NULL, "l4_ck", &l4_ck, CK_242X),
+ CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_242X),
+ CLK(NULL, "wu_l4_ick", &wu_l4_ick, CK_242X),
+ /* virtual meta-group clock */
+ CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_242X),
+ /* general l4 interface ck, multi-parent functional clk */
+ CLK(NULL, "gpt1_ick", &gpt1_ick, CK_242X),
+ CLK(NULL, "gpt1_fck", &gpt1_fck, CK_242X),
+ CLK(NULL, "gpt2_ick", &gpt2_ick, CK_242X),
+ CLK(NULL, "gpt2_fck", &gpt2_fck, CK_242X),
+ CLK(NULL, "gpt3_ick", &gpt3_ick, CK_242X),
+ CLK(NULL, "gpt3_fck", &gpt3_fck, CK_242X),
+ CLK(NULL, "gpt4_ick", &gpt4_ick, CK_242X),
+ CLK(NULL, "gpt4_fck", &gpt4_fck, CK_242X),
+ CLK(NULL, "gpt5_ick", &gpt5_ick, CK_242X),
+ CLK(NULL, "gpt5_fck", &gpt5_fck, CK_242X),
+ CLK(NULL, "gpt6_ick", &gpt6_ick, CK_242X),
+ CLK(NULL, "gpt6_fck", &gpt6_fck, CK_242X),
+ CLK(NULL, "gpt7_ick", &gpt7_ick, CK_242X),
+ CLK(NULL, "gpt7_fck", &gpt7_fck, CK_242X),
+ CLK(NULL, "gpt8_ick", &gpt8_ick, CK_242X),
+ CLK(NULL, "gpt8_fck", &gpt8_fck, CK_242X),
+ CLK(NULL, "gpt9_ick", &gpt9_ick, CK_242X),
+ CLK(NULL, "gpt9_fck", &gpt9_fck, CK_242X),
+ CLK(NULL, "gpt10_ick", &gpt10_ick, CK_242X),
+ CLK(NULL, "gpt10_fck", &gpt10_fck, CK_242X),
+ CLK(NULL, "gpt11_ick", &gpt11_ick, CK_242X),
+ CLK(NULL, "gpt11_fck", &gpt11_fck, CK_242X),
+ CLK(NULL, "gpt12_ick", &gpt12_ick, CK_242X),
+ CLK(NULL, "gpt12_fck", &gpt12_fck, CK_242X),
+ CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_242X),
+ CLK(NULL, "mcbsp1_ick", &mcbsp1_ick, CK_242X),
+ CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_242X),
+ CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_242X),
+ CLK(NULL, "mcbsp2_ick", &mcbsp2_ick, CK_242X),
+ CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_242X),
+ CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_242X),
+ CLK(NULL, "mcspi1_ick", &mcspi1_ick, CK_242X),
+ CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_242X),
+ CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_242X),
+ CLK(NULL, "mcspi2_ick", &mcspi2_ick, CK_242X),
+ CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_242X),
+ CLK(NULL, "uart1_ick", &uart1_ick, CK_242X),
+ CLK(NULL, "uart1_fck", &uart1_fck, CK_242X),
+ CLK(NULL, "uart2_ick", &uart2_ick, CK_242X),
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_242X),
+ CLK(NULL, "uart3_ick", &uart3_ick, CK_242X),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_242X),
+ CLK(NULL, "gpios_ick", &gpios_ick, CK_242X),
+ CLK(NULL, "gpios_fck", &gpios_fck, CK_242X),
+ CLK("omap_wdt", "ick", &mpu_wdt_ick, CK_242X),
+ CLK(NULL, "mpu_wdt_ick", &mpu_wdt_ick, CK_242X),
+ CLK(NULL, "mpu_wdt_fck", &mpu_wdt_fck, CK_242X),
+ CLK(NULL, "sync_32k_ick", &sync_32k_ick, CK_242X),
+ CLK(NULL, "wdt1_ick", &wdt1_ick, CK_242X),
+ CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_242X),
+ CLK("omap24xxcam", "fck", &cam_fck, CK_242X),
+ CLK(NULL, "cam_fck", &cam_fck, CK_242X),
+ CLK("omap24xxcam", "ick", &cam_ick, CK_242X),
+ CLK(NULL, "cam_ick", &cam_ick, CK_242X),
+ CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_242X),
+ CLK(NULL, "wdt4_ick", &wdt4_ick, CK_242X),
+ CLK(NULL, "wdt4_fck", &wdt4_fck, CK_242X),
+ CLK(NULL, "wdt3_ick", &wdt3_ick, CK_242X),
+ CLK(NULL, "wdt3_fck", &wdt3_fck, CK_242X),
+ CLK(NULL, "mspro_ick", &mspro_ick, CK_242X),
+ CLK(NULL, "mspro_fck", &mspro_fck, CK_242X),
+ CLK("mmci-omap.0", "ick", &mmc_ick, CK_242X),
+ CLK(NULL, "mmc_ick", &mmc_ick, CK_242X),
+ CLK("mmci-omap.0", "fck", &mmc_fck, CK_242X),
+ CLK(NULL, "mmc_fck", &mmc_fck, CK_242X),
+ CLK(NULL, "fac_ick", &fac_ick, CK_242X),
+ CLK(NULL, "fac_fck", &fac_fck, CK_242X),
+ CLK(NULL, "eac_ick", &eac_ick, CK_242X),
+ CLK(NULL, "eac_fck", &eac_fck, CK_242X),
+ CLK("omap_hdq.0", "ick", &hdq_ick, CK_242X),
+ CLK(NULL, "hdq_ick", &hdq_ick, CK_242X),
+ CLK("omap_hdq.0", "fck", &hdq_fck, CK_242X),
+ CLK(NULL, "hdq_fck", &hdq_fck, CK_242X),
+ CLK("omap_i2c.1", "ick", &i2c1_ick, CK_242X),
+ CLK(NULL, "i2c1_ick", &i2c1_ick, CK_242X),
+ CLK(NULL, "i2c1_fck", &i2c1_fck, CK_242X),
+ CLK("omap_i2c.2", "ick", &i2c2_ick, CK_242X),
+ CLK(NULL, "i2c2_ick", &i2c2_ick, CK_242X),
+ CLK(NULL, "i2c2_fck", &i2c2_fck, CK_242X),
+ CLK(NULL, "gpmc_fck", &gpmc_fck, CK_242X),
+ CLK(NULL, "sdma_fck", &sdma_fck, CK_242X),
+ CLK(NULL, "sdma_ick", &sdma_ick, CK_242X),
+ CLK(NULL, "sdrc_ick", &sdrc_ick, CK_242X),
+ CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
+ CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
+ CLK(NULL, "des_ick", &des_ick, CK_242X),
+ CLK("omap-sham", "ick", &sha_ick, CK_242X),
+ CLK(NULL, "sha_ick", &sha_ick, CK_242X),
+ CLK("omap_rng", "ick", &rng_ick, CK_242X),
+ CLK(NULL, "rng_ick", &rng_ick, CK_242X),
+ CLK("omap-aes", "ick", &aes_ick, CK_242X),
+ CLK(NULL, "aes_ick", &aes_ick, CK_242X),
+ CLK(NULL, "pka_ick", &pka_ick, CK_242X),
+ CLK(NULL, "usb_fck", &usb_fck, CK_242X),
+ CLK("musb-hdrc", "fck", &osc_ck, CK_242X),
+ CLK(NULL, "timer_32k_ck", &func_32k_ck, CK_242X),
+ CLK(NULL, "timer_sys_ck", &sys_ck, CK_242X),
+ CLK(NULL, "timer_ext_ck", &alt_ck, CK_242X),
+ CLK(NULL, "cpufreq_ck", &virt_prcm_set, CK_242X),
+};
+
+
+static const char *enable_init_clks[] = {
+ "apll96_ck",
+ "apll54_ck",
+ "sync_32k_ick",
+ "omapctrl_ick",
+ "gpmc_fck",
+ "sdrc_ick",
+};
+
+/*
+ * init code
+ */
+
+int __init omap2420_clk_init(void)
+{
+ struct omap_clk *c;
+
+ prcm_clksrc_ctrl = OMAP2420_PRCM_CLKSRC_CTRL;
+ cpu_mask = RATE_IN_242X;
+ rate_table = omap2420_rate_table;
+
+ omap2xxx_clkt_dpllcore_init(&dpll_ck_hw.hw);
+
+ omap2xxx_clkt_vps_check_bootloader_rates();
+
+ for (c = omap2420_clks; c < omap2420_clks + ARRAY_SIZE(omap2420_clks);
+ c++) {
+ clkdev_add(&c->lk);
+ if (!__clk_init(NULL, c->lk.clk))
+ omap2_init_clk_hw_omap_clocks(c->lk.clk);
+ }
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(&sys_ck) / 1000000),
+ (clk_get_rate(&sys_ck) / 100000) % 10,
+ (clk_get_rate(&dpll_ck) / 1000000),
+ (clk_get_rate(&mpu_ck) / 1000000));
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/cclock2430_data.c b/arch/arm/mach-omap2/cclock2430_data.c
new file mode 100644
index 00000000000..eda079b96c6
--- /dev/null
+++ b/arch/arm/mach-omap2/cclock2430_data.c
@@ -0,0 +1,2065 @@
+/*
+ * OMAP2430 clock data
+ *
+ * Copyright (C) 2005-2009, 2012 Texas Instruments, Inc.
+ * Copyright (C) 2004-2011 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-private.h>
+#include <linux/list.h>
+
+#include "soc.h"
+#include "iomap.h"
+#include "clock.h"
+#include "clock2xxx.h"
+#include "opp2xxx.h"
+#include "cm2xxx.h"
+#include "prm2xxx.h"
+#include "prm-regbits-24xx.h"
+#include "cm-regbits-24xx.h"
+#include "sdrc.h"
+#include "control.h"
+
+#define OMAP_CM_REGADDR OMAP2430_CM_REGADDR
+
+/*
+ * 2430 clock tree.
+ *
+ * NOTE:In many cases here we are assigning a 'default' parent. In
+ * many cases the parent is selectable. The set parent calls will
+ * also switch sources.
+ *
+ * Several sources are given initial rates which may be wrong, this will
+ * be fixed up in the init func.
+ *
+ * Things are broadly separated below by clock domains. It is
+ * noteworthy that most peripherals have dependencies on multiple clock
+ * domains. Many get their interface clocks from the L4 domain, but get
+ * functional clocks from fixed sources or other core domain derived
+ * clocks.
+ */
+
+DEFINE_CLK_FIXED_RATE(alt_ck, CLK_IS_ROOT, 54000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(func_32k_ck, CLK_IS_ROOT, 32768, 0x0);
+
+DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
+
+static struct clk osc_ck;
+
+static const struct clk_ops osc_ck_ops = {
+ .enable = &omap2_enable_osc_ck,
+ .disable = omap2_disable_osc_ck,
+ .recalc_rate = &omap2_osc_clk_recalc,
+};
+
+static struct clk_hw_omap osc_ck_hw = {
+ .hw = {
+ .clk = &osc_ck,
+ },
+};
+
+static struct clk osc_ck = {
+ .name = "osc_ck",
+ .ops = &osc_ck_ops,
+ .hw = &osc_ck_hw.hw,
+ .flags = CLK_IS_ROOT,
+};
+
+DEFINE_CLK_FIXED_RATE(secure_32k_ck, CLK_IS_ROOT, 32768, 0x0);
+
+static struct clk sys_ck;
+
+static const char *sys_ck_parent_names[] = {
+ "osc_ck",
+};
+
+static const struct clk_ops sys_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .recalc_rate = &omap2xxx_sys_clk_recalc,
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(sys_ck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(sys_ck, sys_ck_parent_names, sys_ck_ops);
+
+static struct dpll_data dpll_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .mult_mask = OMAP24XX_DPLL_MULT_MASK,
+ .div1_mask = OMAP24XX_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_mask = OMAP24XX_EN_DPLL_MASK,
+ .max_multiplier = 1023,
+ .min_divider = 1,
+ .max_divider = 16,
+};
+
+static struct clk dpll_ck;
+
+static const char *dpll_ck_parent_names[] = {
+ "sys_ck",
+};
+
+static const struct clk_ops dpll_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap2_dpllcore_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap2_reprogram_dpllcore,
+};
+
+static struct clk_hw_omap dpll_ck_hw = {
+ .hw = {
+ .clk = &dpll_ck,
+ },
+ .ops = &clkhwops_omap2xxx_dpll,
+ .dpll_data = &dpll_dd,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll_ck, dpll_ck_parent_names, dpll_ck_ops);
+
+static struct clk core_ck;
+
+static const char *core_ck_parent_names[] = {
+ "dpll_ck",
+};
+
+static const struct clk_ops core_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_ck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
+
+DEFINE_CLK_DIVIDER(core_l3_ck, "core_ck", &core_ck, 0x0,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_L3_SHIFT, OMAP24XX_CLKSEL_L3_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+DEFINE_CLK_DIVIDER(l4_ck, "core_l3_ck", &core_l3_ck, 0x0,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_L4_SHIFT, OMAP24XX_CLKSEL_L4_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk aes_ick;
+
+static const char *aes_ick_parent_names[] = {
+ "l4_ck",
+};
+
+static const struct clk_ops aes_ick_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static struct clk_hw_omap aes_ick_hw = {
+ .hw = {
+ .clk = &aes_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_AES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(aes_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk apll54_ck;
+
+static const struct clk_ops apll54_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clk_apll54_enable,
+ .disable = &omap2_clk_apll54_disable,
+ .recalc_rate = &omap2_clk_apll54_recalc,
+};
+
+static struct clk_hw_omap apll54_ck_hw = {
+ .hw = {
+ .clk = &apll54_ck,
+ },
+ .ops = &clkhwops_apll54,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP24XX_EN_54M_PLL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(apll54_ck, dpll_ck_parent_names, apll54_ck_ops);
+
+static struct clk apll96_ck;
+
+static const struct clk_ops apll96_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clk_apll96_enable,
+ .disable = &omap2_clk_apll96_disable,
+ .recalc_rate = &omap2_clk_apll96_recalc,
+};
+
+static struct clk_hw_omap apll96_ck_hw = {
+ .hw = {
+ .clk = &apll96_ck,
+ },
+ .ops = &clkhwops_apll96,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP24XX_EN_96M_PLL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(apll96_ck, dpll_ck_parent_names, apll96_ck_ops);
+
+static const char *func_96m_ck_parent_names[] = {
+ "apll96_ck", "alt_ck",
+};
+
+DEFINE_CLK_MUX(func_96m_ck, func_96m_ck_parent_names, NULL, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP2430_96M_SOURCE_SHIFT,
+ OMAP2430_96M_SOURCE_WIDTH, 0x0, NULL);
+
+static struct clk cam_fck;
+
+static const char *cam_fck_parent_names[] = {
+ "func_96m_ck",
+};
+
+static struct clk_hw_omap cam_fck_hw = {
+ .hw = {
+ .clk = &cam_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_CAM_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(cam_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk cam_ick;
+
+static struct clk_hw_omap cam_ick_hw = {
+ .hw = {
+ .clk = &cam_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_CAM_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(cam_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk des_ick;
+
+static struct clk_hw_omap des_ick_hw = {
+ .hw = {
+ .clk = &des_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_DES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(des_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate dsp_fck_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel dsp_fck_clksel[] = {
+ { .parent = &core_ck, .rates = dsp_fck_core_rates },
+ { .parent = NULL },
+};
+
+static const char *dsp_fck_parent_names[] = {
+ "core_ck",
+};
+
+static struct clk dsp_fck;
+
+static const struct clk_ops dsp_fck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(dsp_fck, "dsp_clkdm", dsp_fck_clksel,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
+ OMAP24XX_CLKSEL_DSP_MASK,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
+ OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT, &clkhwops_wait,
+ dsp_fck_parent_names, dsp_fck_ops);
+
+static const struct clksel_rate dss1_fck_sys_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate dss1_fck_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 5, .val = 5, .flags = RATE_IN_24XX },
+ { .div = 6, .val = 6, .flags = RATE_IN_24XX },
+ { .div = 8, .val = 8, .flags = RATE_IN_24XX },
+ { .div = 9, .val = 9, .flags = RATE_IN_24XX },
+ { .div = 12, .val = 12, .flags = RATE_IN_24XX },
+ { .div = 16, .val = 16, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel dss1_fck_clksel[] = {
+ { .parent = &sys_ck, .rates = dss1_fck_sys_rates },
+ { .parent = &core_ck, .rates = dss1_fck_core_rates },
+ { .parent = NULL },
+};
+
+static const char *dss1_fck_parent_names[] = {
+ "sys_ck", "core_ck",
+};
+
+static const struct clk_ops dss1_fck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(dss1_fck, "dss_clkdm", dss1_fck_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_DSS1_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_DSS1_SHIFT, NULL,
+ dss1_fck_parent_names, dss1_fck_ops);
+
+static const struct clksel_rate dss2_fck_sys_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate dss2_fck_48m_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate func_48m_apll96_rates[] = {
+ { .div = 2, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate func_48m_alt_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel func_48m_clksel[] = {
+ { .parent = &apll96_ck, .rates = func_48m_apll96_rates },
+ { .parent = &alt_ck, .rates = func_48m_alt_rates },
+ { .parent = NULL },
+};
+
+static const char *func_48m_ck_parent_names[] = {
+ "apll96_ck", "alt_ck",
+};
+
+static struct clk func_48m_ck;
+
+static const struct clk_ops func_48m_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+static struct clk_hw_omap func_48m_ck_hw = {
+ .hw = {
+ .clk = &func_48m_ck,
+ },
+ .clksel = func_48m_clksel,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP24XX_48M_SOURCE_MASK,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(func_48m_ck, func_48m_ck_parent_names, func_48m_ck_ops);
+
+static const struct clksel dss2_fck_clksel[] = {
+ { .parent = &sys_ck, .rates = dss2_fck_sys_rates },
+ { .parent = &func_48m_ck, .rates = dss2_fck_48m_rates },
+ { .parent = NULL },
+};
+
+static const char *dss2_fck_parent_names[] = {
+ "sys_ck", "func_48m_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(dss2_fck, "dss_clkdm", dss2_fck_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_DSS2_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_DSS2_SHIFT, NULL,
+ dss2_fck_parent_names, dss1_fck_ops);
+
+static const char *func_54m_ck_parent_names[] = {
+ "apll54_ck", "alt_ck",
+};
+
+DEFINE_CLK_MUX(func_54m_ck, func_54m_ck_parent_names, NULL, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ OMAP24XX_54M_SOURCE_SHIFT, OMAP24XX_54M_SOURCE_WIDTH, 0x0, NULL);
+
+static struct clk dss_54m_fck;
+
+static const char *dss_54m_fck_parent_names[] = {
+ "func_54m_ck",
+};
+
+static struct clk_hw_omap dss_54m_fck_hw = {
+ .hw = {
+ .clk = &dss_54m_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_TV_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_54m_fck, dss_54m_fck_parent_names, aes_ick_ops);
+
+static struct clk dss_ick;
+
+static struct clk_hw_omap dss_ick_hw = {
+ .hw = {
+ .clk = &dss_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_DSS1_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk emul_ck;
+
+static struct clk_hw_omap emul_ck_hw = {
+ .hw = {
+ .clk = &emul_ck,
+ },
+ .enable_reg = OMAP2430_PRCM_CLKEMUL_CTRL,
+ .enable_bit = OMAP24XX_EMULATION_EN_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(emul_ck, dss_54m_fck_parent_names, aes_ick_ops);
+
+DEFINE_CLK_FIXED_FACTOR(func_12m_ck, "func_48m_ck", &func_48m_ck, 0x0, 1, 4);
+
+static struct clk fac_fck;
+
+static const char *fac_fck_parent_names[] = {
+ "func_12m_ck",
+};
+
+static struct clk_hw_omap fac_fck_hw = {
+ .hw = {
+ .clk = &fac_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_FAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(fac_fck, fac_fck_parent_names, aes_ick_ops);
+
+static struct clk fac_ick;
+
+static struct clk_hw_omap fac_ick_hw = {
+ .hw = {
+ .clk = &fac_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_FAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(fac_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel gfx_fck_clksel[] = {
+ { .parent = &core_l3_ck, .rates = gfx_l3_rates },
+ { .parent = NULL },
+};
+
+static const char *gfx_2d_fck_parent_names[] = {
+ "core_l3_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(gfx_2d_fck, "gfx_clkdm", gfx_fck_clksel,
+ OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
+ OMAP_CLKSEL_GFX_MASK,
+ OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ OMAP24XX_EN_2D_SHIFT, &clkhwops_wait,
+ gfx_2d_fck_parent_names, dsp_fck_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gfx_3d_fck, "gfx_clkdm", gfx_fck_clksel,
+ OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
+ OMAP_CLKSEL_GFX_MASK,
+ OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ OMAP24XX_EN_3D_SHIFT, &clkhwops_wait,
+ gfx_2d_fck_parent_names, dsp_fck_ops);
+
+static struct clk gfx_ick;
+
+static const char *gfx_ick_parent_names[] = {
+ "core_l3_ck",
+};
+
+static struct clk_hw_omap gfx_ick_hw = {
+ .hw = {
+ .clk = &gfx_ick,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
+ .enable_bit = OMAP_EN_GFX_SHIFT,
+ .clkdm_name = "gfx_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gfx_ick, gfx_ick_parent_names, aes_ick_ops);
+
+static struct clk gpio5_fck;
+
+static const char *gpio5_fck_parent_names[] = {
+ "func_32k_ck",
+};
+
+static struct clk_hw_omap gpio5_fck_hw = {
+ .hw = {
+ .clk = &gpio5_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_GPIO5_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio5_fck, gpio5_fck_parent_names, aes_ick_ops);
+
+static struct clk gpio5_ick;
+
+static struct clk_hw_omap gpio5_ick_hw = {
+ .hw = {
+ .clk = &gpio5_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_GPIO5_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio5_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk gpios_fck;
+
+static struct clk_hw_omap gpios_fck_hw = {
+ .hw = {
+ .clk = &gpios_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpios_fck, gpio5_fck_parent_names, aes_ick_ops);
+
+static struct clk wu_l4_ick;
+
+DEFINE_STRUCT_CLK_HW_OMAP(wu_l4_ick, "wkup_clkdm");
+DEFINE_STRUCT_CLK(wu_l4_ick, dpll_ck_parent_names, core_ck_ops);
+
+static struct clk gpios_ick;
+
+static const char *gpios_ick_parent_names[] = {
+ "wu_l4_ick",
+};
+
+static struct clk_hw_omap gpios_ick_hw = {
+ .hw = {
+ .clk = &gpios_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpios_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk gpmc_fck;
+
+static struct clk_hw_omap gpmc_fck_hw = {
+ .hw = {
+ .clk = &gpmc_fck,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP24XX_AUTO_GPMC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpmc_fck, gfx_ick_parent_names, core_ck_ops);
+
+static const struct clksel_rate gpt_alt_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel omap24xx_gpt_clksel[] = {
+ { .parent = &func_32k_ck, .rates = gpt_32k_rates },
+ { .parent = &sys_ck, .rates = gpt_sys_rates },
+ { .parent = &alt_ck, .rates = gpt_alt_rates },
+ { .parent = NULL },
+};
+
+static const char *gpt10_fck_parent_names[] = {
+ "func_32k_ck", "sys_ck", "alt_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT10_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT10_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt10_ick;
+
+static struct clk_hw_omap gpt10_ick_hw = {
+ .hw = {
+ .clk = &gpt10_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT10_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt10_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT11_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT11_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt11_ick;
+
+static struct clk_hw_omap gpt11_ick_hw = {
+ .hw = {
+ .clk = &gpt11_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT11_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt11_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt12_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT12_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT12_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt12_ick;
+
+static struct clk_hw_omap gpt12_ick_hw = {
+ .hw = {
+ .clk = &gpt12_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT12_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt12_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clk_ops gpt1_fck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_GPT1_MASK,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ OMAP24XX_EN_GPT1_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, gpt1_fck_ops);
+
+static struct clk gpt1_ick;
+
+static struct clk_hw_omap gpt1_ick_hw = {
+ .hw = {
+ .clk = &gpt1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_GPT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt1_ick, gpios_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT2_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT2_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt2_ick;
+
+static struct clk_hw_omap gpt2_ick_hw = {
+ .hw = {
+ .clk = &gpt2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt2_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT3_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT3_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt3_ick;
+
+static struct clk_hw_omap gpt3_ick_hw = {
+ .hw = {
+ .clk = &gpt3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt3_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT4_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT4_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt4_ick;
+
+static struct clk_hw_omap gpt4_ick_hw = {
+ .hw = {
+ .clk = &gpt4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt4_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT5_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT5_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt5_ick;
+
+static struct clk_hw_omap gpt5_ick_hw = {
+ .hw = {
+ .clk = &gpt5_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT5_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt5_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT6_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT6_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt6_ick;
+
+static struct clk_hw_omap gpt6_ick_hw = {
+ .hw = {
+ .clk = &gpt6_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT6_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt6_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT7_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT7_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt7_ick;
+
+static struct clk_hw_omap gpt7_ick_hw = {
+ .hw = {
+ .clk = &gpt7_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT7_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt7_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk gpt8_fck;
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT8_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT8_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt8_ick;
+
+static struct clk_hw_omap gpt8_ick_hw = {
+ .hw = {
+ .clk = &gpt8_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT8_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt8_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
+ OMAP24XX_CLKSEL_GPT9_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_GPT9_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, dss1_fck_ops);
+
+static struct clk gpt9_ick;
+
+static struct clk_hw_omap gpt9_ick_hw = {
+ .hw = {
+ .clk = &gpt9_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_GPT9_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt9_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk hdq_fck;
+
+static struct clk_hw_omap hdq_fck_hw = {
+ .hw = {
+ .clk = &hdq_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hdq_fck, fac_fck_parent_names, aes_ick_ops);
+
+static struct clk hdq_ick;
+
+static struct clk_hw_omap hdq_ick_hw = {
+ .hw = {
+ .clk = &hdq_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hdq_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk i2c1_ick;
+
+static struct clk_hw_omap i2c1_ick_hw = {
+ .hw = {
+ .clk = &i2c1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_I2C1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c1_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk i2c2_ick;
+
+static struct clk_hw_omap i2c2_ick_hw = {
+ .hw = {
+ .clk = &i2c2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP2420_EN_I2C2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c2_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk i2chs1_fck;
+
+static struct clk_hw_omap i2chs1_fck_hw = {
+ .hw = {
+ .clk = &i2chs1_fck,
+ },
+ .ops = &clkhwops_omap2430_i2chs_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_I2CHS1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2chs1_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk i2chs2_fck;
+
+static struct clk_hw_omap i2chs2_fck_hw = {
+ .hw = {
+ .clk = &i2chs2_fck,
+ },
+ .ops = &clkhwops_omap2430_i2chs_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_I2CHS2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2chs2_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk icr_ick;
+
+static struct clk_hw_omap icr_ick_hw = {
+ .hw = {
+ .clk = &icr_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP2430_EN_ICR_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(icr_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static const struct clksel dsp_ick_clksel[] = {
+ { .parent = &dsp_fck, .rates = dsp_ick_rates },
+ { .parent = NULL },
+};
+
+static const char *iva2_1_ick_parent_names[] = {
+ "dsp_fck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(iva2_1_ick, "dsp_clkdm", dsp_ick_clksel,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
+ OMAP24XX_CLKSEL_DSP_IF_MASK,
+ OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
+ OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT, &clkhwops_wait,
+ iva2_1_ick_parent_names, dsp_fck_ops);
+
+static struct clk mailboxes_ick;
+
+static struct clk_hw_omap mailboxes_ick_hw = {
+ .hw = {
+ .clk = &mailboxes_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mailboxes_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate common_mcbsp_96m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel mcbsp_fck_clksel[] = {
+ { .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp1_fck_parent_names[] = {
+ "func_96m_ck", "mcbsp_clks",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_fck_clksel,
+ OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ OMAP2_MCBSP1_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_MCBSP1_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, dss1_fck_ops);
+
+static struct clk mcbsp1_ick;
+
+static struct clk_hw_omap mcbsp1_ick_hw = {
+ .hw = {
+ .clk = &mcbsp1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp1_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "core_l4_clkdm", mcbsp_fck_clksel,
+ OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ OMAP2_MCBSP2_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP24XX_EN_MCBSP2_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, dss1_fck_ops);
+
+static struct clk mcbsp2_ick;
+
+static struct clk_hw_omap mcbsp2_ick_hw = {
+ .hw = {
+ .clk = &mcbsp2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp2_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "core_l4_clkdm", mcbsp_fck_clksel,
+ OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
+ OMAP2_MCBSP3_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ OMAP2430_EN_MCBSP3_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, dss1_fck_ops);
+
+static struct clk mcbsp3_ick;
+
+static struct clk_hw_omap mcbsp3_ick_hw = {
+ .hw = {
+ .clk = &mcbsp3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_MCBSP3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp3_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "core_l4_clkdm", mcbsp_fck_clksel,
+ OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
+ OMAP2_MCBSP4_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ OMAP2430_EN_MCBSP4_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, dss1_fck_ops);
+
+static struct clk mcbsp4_ick;
+
+static struct clk_hw_omap mcbsp4_ick_hw = {
+ .hw = {
+ .clk = &mcbsp4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_MCBSP4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp4_ick, aes_ick_parent_names, aes_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_fck_clksel,
+ OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
+ OMAP2_MCBSP5_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ OMAP2430_EN_MCBSP5_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, dss1_fck_ops);
+
+static struct clk mcbsp5_ick;
+
+static struct clk_hw_omap mcbsp5_ick_hw = {
+ .hw = {
+ .clk = &mcbsp5_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_MCBSP5_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp5_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mcspi1_fck;
+
+static const char *mcspi1_fck_parent_names[] = {
+ "func_48m_ck",
+};
+
+static struct clk_hw_omap mcspi1_fck_hw = {
+ .hw = {
+ .clk = &mcspi1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi1_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk mcspi1_ick;
+
+static struct clk_hw_omap mcspi1_ick_hw = {
+ .hw = {
+ .clk = &mcspi1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi1_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mcspi2_fck;
+
+static struct clk_hw_omap mcspi2_fck_hw = {
+ .hw = {
+ .clk = &mcspi2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi2_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk mcspi2_ick;
+
+static struct clk_hw_omap mcspi2_ick_hw = {
+ .hw = {
+ .clk = &mcspi2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi2_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mcspi3_fck;
+
+static struct clk_hw_omap mcspi3_fck_hw = {
+ .hw = {
+ .clk = &mcspi3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_MCSPI3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi3_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk mcspi3_ick;
+
+static struct clk_hw_omap mcspi3_ick_hw = {
+ .hw = {
+ .clk = &mcspi3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_MCSPI3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi3_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate mdm_ick_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_243X },
+ { .div = 4, .val = 4, .flags = RATE_IN_243X },
+ { .div = 6, .val = 6, .flags = RATE_IN_243X },
+ { .div = 9, .val = 9, .flags = RATE_IN_243X },
+ { .div = 0 }
+};
+
+static const struct clksel mdm_ick_clksel[] = {
+ { .parent = &core_ck, .rates = mdm_ick_core_rates },
+ { .parent = NULL },
+};
+
+static const char *mdm_ick_parent_names[] = {
+ "core_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(mdm_ick, "mdm_clkdm", mdm_ick_clksel,
+ OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_CLKSEL),
+ OMAP2430_CLKSEL_MDM_MASK,
+ OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_ICLKEN),
+ OMAP2430_CM_ICLKEN_MDM_EN_MDM_SHIFT,
+ &clkhwops_iclk_wait, mdm_ick_parent_names,
+ dsp_fck_ops);
+
+static struct clk mdm_intc_ick;
+
+static struct clk_hw_omap mdm_intc_ick_hw = {
+ .hw = {
+ .clk = &mdm_intc_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_MDM_INTC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mdm_intc_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mdm_osc_ck;
+
+static struct clk_hw_omap mdm_osc_ck_hw = {
+ .hw = {
+ .clk = &mdm_osc_ck,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_FCLKEN),
+ .enable_bit = OMAP2430_EN_OSC_SHIFT,
+ .clkdm_name = "mdm_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mdm_osc_ck, sys_ck_parent_names, aes_ick_ops);
+
+static struct clk mmchs1_fck;
+
+static struct clk_hw_omap mmchs1_fck_hw = {
+ .hw = {
+ .clk = &mmchs1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_MMCHS1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs1_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk mmchs1_ick;
+
+static struct clk_hw_omap mmchs1_ick_hw = {
+ .hw = {
+ .clk = &mmchs1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_MMCHS1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs1_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mmchs2_fck;
+
+static struct clk_hw_omap mmchs2_fck_hw = {
+ .hw = {
+ .clk = &mmchs2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_MMCHS2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs2_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk mmchs2_ick;
+
+static struct clk_hw_omap mmchs2_ick_hw = {
+ .hw = {
+ .clk = &mmchs2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_MMCHS2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs2_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk mmchsdb1_fck;
+
+static struct clk_hw_omap mmchsdb1_fck_hw = {
+ .hw = {
+ .clk = &mmchsdb1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_MMCHSDB1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchsdb1_fck, gpio5_fck_parent_names, aes_ick_ops);
+
+static struct clk mmchsdb2_fck;
+
+static struct clk_hw_omap mmchsdb2_fck_hw = {
+ .hw = {
+ .clk = &mmchsdb2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP2430_EN_MMCHSDB2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchsdb2_fck, gpio5_fck_parent_names, aes_ick_ops);
+
+DEFINE_CLK_DIVIDER(mpu_ck, "core_ck", &core_ck, 0x0,
+ OMAP_CM_REGADDR(MPU_MOD, CM_CLKSEL),
+ OMAP24XX_CLKSEL_MPU_SHIFT, OMAP24XX_CLKSEL_MPU_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk mpu_wdt_fck;
+
+static struct clk_hw_omap mpu_wdt_fck_hw = {
+ .hw = {
+ .clk = &mpu_wdt_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mpu_wdt_fck, gpio5_fck_parent_names, aes_ick_ops);
+
+static struct clk mpu_wdt_ick;
+
+static struct clk_hw_omap mpu_wdt_ick_hw = {
+ .hw = {
+ .clk = &mpu_wdt_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mpu_wdt_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk mspro_fck;
+
+static struct clk_hw_omap mspro_fck_hw = {
+ .hw = {
+ .clk = &mspro_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mspro_fck, cam_fck_parent_names, aes_ick_ops);
+
+static struct clk mspro_ick;
+
+static struct clk_hw_omap mspro_ick_hw = {
+ .hw = {
+ .clk = &mspro_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mspro_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk omapctrl_ick;
+
+static struct clk_hw_omap omapctrl_ick_hw = {
+ .hw = {
+ .clk = &omapctrl_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_OMAPCTRL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(omapctrl_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk pka_ick;
+
+static struct clk_hw_omap pka_ick_hw = {
+ .hw = {
+ .clk = &pka_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_PKA_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(pka_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk rng_ick;
+
+static struct clk_hw_omap rng_ick_hw = {
+ .hw = {
+ .clk = &rng_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_RNG_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(rng_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk sdma_fck;
+
+DEFINE_STRUCT_CLK_HW_OMAP(sdma_fck, "core_l3_clkdm");
+DEFINE_STRUCT_CLK(sdma_fck, gfx_ick_parent_names, core_ck_ops);
+
+static struct clk sdma_ick;
+
+static struct clk_hw_omap sdma_ick_hw = {
+ .hw = {
+ .clk = &sdma_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP24XX_AUTO_SDMA_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sdma_ick, gfx_ick_parent_names, core_ck_ops);
+
+static struct clk sdrc_ick;
+
+static struct clk_hw_omap sdrc_ick_hw = {
+ .hw = {
+ .clk = &sdrc_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP2430_EN_SDRC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sdrc_ick, gfx_ick_parent_names, core_ck_ops);
+
+static struct clk sha_ick;
+
+static struct clk_hw_omap sha_ick_hw = {
+ .hw = {
+ .clk = &sha_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
+ .enable_bit = OMAP24XX_EN_SHA_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sha_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk ssi_l4_ick;
+
+static struct clk_hw_omap ssi_l4_ick_hw = {
+ .hw = {
+ .clk = &ssi_l4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP24XX_EN_SSI_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(ssi_l4_ick, aes_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 5, .val = 5, .flags = RATE_IN_243X },
+ { .div = 0 }
+};
+
+static const struct clksel ssi_ssr_sst_fck_clksel[] = {
+ { .parent = &core_ck, .rates = ssi_ssr_sst_fck_core_rates },
+ { .parent = NULL },
+};
+
+static const char *ssi_ssr_sst_fck_parent_names[] = {
+ "core_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_sst_fck, "core_l3_clkdm",
+ ssi_ssr_sst_fck_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_SSI_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ OMAP24XX_EN_SSI_SHIFT, &clkhwops_wait,
+ ssi_ssr_sst_fck_parent_names, dsp_fck_ops);
+
+static struct clk sync_32k_ick;
+
+static struct clk_hw_omap sync_32k_ick_hw = {
+ .hw = {
+ .clk = &sync_32k_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_32KSYNC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sync_32k_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static const struct clksel_rate common_clkout_src_core_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_clkout_src_sys_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_clkout_src_96m_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_clkout_src_54m_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel common_clkout_src_clksel[] = {
+ { .parent = &core_ck, .rates = common_clkout_src_core_rates },
+ { .parent = &sys_ck, .rates = common_clkout_src_sys_rates },
+ { .parent = &func_96m_ck, .rates = common_clkout_src_96m_rates },
+ { .parent = &func_54m_ck, .rates = common_clkout_src_54m_rates },
+ { .parent = NULL },
+};
+
+static const char *sys_clkout_src_parent_names[] = {
+ "core_ck", "sys_ck", "func_96m_ck", "func_54m_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(sys_clkout_src, "wkup_clkdm", common_clkout_src_clksel,
+ OMAP2430_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_SOURCE_MASK,
+ OMAP2430_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_EN_SHIFT,
+ NULL, sys_clkout_src_parent_names, gpt1_fck_ops);
+
+DEFINE_CLK_DIVIDER(sys_clkout, "sys_clkout_src", &sys_clkout_src, 0x0,
+ OMAP2430_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_DIV_SHIFT,
+ OMAP24XX_CLKOUT_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+static struct clk uart1_fck;
+
+static struct clk_hw_omap uart1_fck_hw = {
+ .hw = {
+ .clk = &uart1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart1_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk uart1_ick;
+
+static struct clk_hw_omap uart1_ick_hw = {
+ .hw = {
+ .clk = &uart1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart1_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk uart2_fck;
+
+static struct clk_hw_omap uart2_fck_hw = {
+ .hw = {
+ .clk = &uart2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart2_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk uart2_ick;
+
+static struct clk_hw_omap uart2_ick_hw = {
+ .hw = {
+ .clk = &uart2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart2_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk uart3_fck;
+
+static struct clk_hw_omap uart3_fck_hw = {
+ .hw = {
+ .clk = &uart3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP24XX_EN_UART3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart3_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static struct clk uart3_ick;
+
+static struct clk_hw_omap uart3_ick_hw = {
+ .hw = {
+ .clk = &uart3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP24XX_EN_UART3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart3_ick, aes_ick_parent_names, aes_ick_ops);
+
+static struct clk usb_fck;
+
+static struct clk_hw_omap usb_fck_hw = {
+ .hw = {
+ .clk = &usb_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
+ .enable_bit = OMAP24XX_EN_USB_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usb_fck, mcspi1_fck_parent_names, aes_ick_ops);
+
+static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
+ { .div = 0 }
+};
+
+static const struct clksel usb_l4_ick_clksel[] = {
+ { .parent = &core_l3_ck, .rates = usb_l4_ick_core_l3_rates },
+ { .parent = NULL },
+};
+
+static const char *usb_l4_ick_parent_names[] = {
+ "core_l3_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_ick_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
+ OMAP24XX_CLKSEL_USB_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ OMAP24XX_EN_USB_SHIFT, &clkhwops_iclk_wait,
+ usb_l4_ick_parent_names, dsp_fck_ops);
+
+static struct clk usbhs_ick;
+
+static struct clk_hw_omap usbhs_ick_hw = {
+ .hw = {
+ .clk = &usbhs_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP2430_EN_USBHS_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usbhs_ick, gfx_ick_parent_names, aes_ick_ops);
+
+static struct clk virt_prcm_set;
+
+static const char *virt_prcm_set_parent_names[] = {
+ "mpu_ck",
+};
+
+static const struct clk_ops virt_prcm_set_ops = {
+ .recalc_rate = &omap2_table_mpu_recalc,
+ .set_rate = &omap2_select_table_rate,
+ .round_rate = &omap2_round_to_table_rate,
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(virt_prcm_set, NULL);
+DEFINE_STRUCT_CLK(virt_prcm_set, virt_prcm_set_parent_names, virt_prcm_set_ops);
+
+static struct clk wdt1_ick;
+
+static struct clk_hw_omap wdt1_ick_hw = {
+ .hw = {
+ .clk = &wdt1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP24XX_EN_WDT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt1_ick, gpios_ick_parent_names, aes_ick_ops);
+
+static struct clk wdt1_osc_ck;
+
+static const struct clk_ops wdt1_osc_ck_ops = {};
+
+DEFINE_STRUCT_CLK_HW_OMAP(wdt1_osc_ck, NULL);
+DEFINE_STRUCT_CLK(wdt1_osc_ck, sys_ck_parent_names, wdt1_osc_ck_ops);
+
+static struct clk wdt4_fck;
+
+static struct clk_hw_omap wdt4_fck_hw = {
+ .hw = {
+ .clk = &wdt4_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt4_fck, gpio5_fck_parent_names, aes_ick_ops);
+
+static struct clk wdt4_ick;
+
+static struct clk_hw_omap wdt4_ick_hw = {
+ .hw = {
+ .clk = &wdt4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt4_ick, aes_ick_parent_names, aes_ick_ops);
+
+/*
+ * clkdev integration
+ */
+
+static struct omap_clk omap2430_clks[] = {
+ /* external root sources */
+ CLK(NULL, "func_32k_ck", &func_32k_ck, CK_243X),
+ CLK(NULL, "secure_32k_ck", &secure_32k_ck, CK_243X),
+ CLK(NULL, "osc_ck", &osc_ck, CK_243X),
+ CLK("twl", "fck", &osc_ck, CK_243X),
+ CLK(NULL, "sys_ck", &sys_ck, CK_243X),
+ CLK(NULL, "alt_ck", &alt_ck, CK_243X),
+ CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_243X),
+ /* internal analog sources */
+ CLK(NULL, "dpll_ck", &dpll_ck, CK_243X),
+ CLK(NULL, "apll96_ck", &apll96_ck, CK_243X),
+ CLK(NULL, "apll54_ck", &apll54_ck, CK_243X),
+ /* internal prcm root sources */
+ CLK(NULL, "func_54m_ck", &func_54m_ck, CK_243X),
+ CLK(NULL, "core_ck", &core_ck, CK_243X),
+ CLK(NULL, "func_96m_ck", &func_96m_ck, CK_243X),
+ CLK(NULL, "func_48m_ck", &func_48m_ck, CK_243X),
+ CLK(NULL, "func_12m_ck", &func_12m_ck, CK_243X),
+ CLK(NULL, "ck_wdt1_osc", &wdt1_osc_ck, CK_243X),
+ CLK(NULL, "sys_clkout_src", &sys_clkout_src, CK_243X),
+ CLK(NULL, "sys_clkout", &sys_clkout, CK_243X),
+ CLK(NULL, "emul_ck", &emul_ck, CK_243X),
+ /* mpu domain clocks */
+ CLK(NULL, "mpu_ck", &mpu_ck, CK_243X),
+ /* dsp domain clocks */
+ CLK(NULL, "dsp_fck", &dsp_fck, CK_243X),
+ CLK(NULL, "iva2_1_ick", &iva2_1_ick, CK_243X),
+ /* GFX domain clocks */
+ CLK(NULL, "gfx_3d_fck", &gfx_3d_fck, CK_243X),
+ CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_243X),
+ CLK(NULL, "gfx_ick", &gfx_ick, CK_243X),
+ /* Modem domain clocks */
+ CLK(NULL, "mdm_ick", &mdm_ick, CK_243X),
+ CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X),
+ /* DSS domain clocks */
+ CLK("omapdss_dss", "ick", &dss_ick, CK_243X),
+ CLK(NULL, "dss_ick", &dss_ick, CK_243X),
+ CLK(NULL, "dss1_fck", &dss1_fck, CK_243X),
+ CLK(NULL, "dss2_fck", &dss2_fck, CK_243X),
+ CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_243X),
+ /* L3 domain clocks */
+ CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X),
+ CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X),
+ CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_243X),
+ /* L4 domain clocks */
+ CLK(NULL, "l4_ck", &l4_ck, CK_243X),
+ CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_243X),
+ CLK(NULL, "wu_l4_ick", &wu_l4_ick, CK_243X),
+ /* virtual meta-group clock */
+ CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_243X),
+ /* general l4 interface ck, multi-parent functional clk */
+ CLK(NULL, "gpt1_ick", &gpt1_ick, CK_243X),
+ CLK(NULL, "gpt1_fck", &gpt1_fck, CK_243X),
+ CLK(NULL, "gpt2_ick", &gpt2_ick, CK_243X),
+ CLK(NULL, "gpt2_fck", &gpt2_fck, CK_243X),
+ CLK(NULL, "gpt3_ick", &gpt3_ick, CK_243X),
+ CLK(NULL, "gpt3_fck", &gpt3_fck, CK_243X),
+ CLK(NULL, "gpt4_ick", &gpt4_ick, CK_243X),
+ CLK(NULL, "gpt4_fck", &gpt4_fck, CK_243X),
+ CLK(NULL, "gpt5_ick", &gpt5_ick, CK_243X),
+ CLK(NULL, "gpt5_fck", &gpt5_fck, CK_243X),
+ CLK(NULL, "gpt6_ick", &gpt6_ick, CK_243X),
+ CLK(NULL, "gpt6_fck", &gpt6_fck, CK_243X),
+ CLK(NULL, "gpt7_ick", &gpt7_ick, CK_243X),
+ CLK(NULL, "gpt7_fck", &gpt7_fck, CK_243X),
+ CLK(NULL, "gpt8_ick", &gpt8_ick, CK_243X),
+ CLK(NULL, "gpt8_fck", &gpt8_fck, CK_243X),
+ CLK(NULL, "gpt9_ick", &gpt9_ick, CK_243X),
+ CLK(NULL, "gpt9_fck", &gpt9_fck, CK_243X),
+ CLK(NULL, "gpt10_ick", &gpt10_ick, CK_243X),
+ CLK(NULL, "gpt10_fck", &gpt10_fck, CK_243X),
+ CLK(NULL, "gpt11_ick", &gpt11_ick, CK_243X),
+ CLK(NULL, "gpt11_fck", &gpt11_fck, CK_243X),
+ CLK(NULL, "gpt12_ick", &gpt12_ick, CK_243X),
+ CLK(NULL, "gpt12_fck", &gpt12_fck, CK_243X),
+ CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_243X),
+ CLK(NULL, "mcbsp1_ick", &mcbsp1_ick, CK_243X),
+ CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_243X),
+ CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_243X),
+ CLK(NULL, "mcbsp2_ick", &mcbsp2_ick, CK_243X),
+ CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_243X),
+ CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_243X),
+ CLK(NULL, "mcbsp3_ick", &mcbsp3_ick, CK_243X),
+ CLK(NULL, "mcbsp3_fck", &mcbsp3_fck, CK_243X),
+ CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_243X),
+ CLK(NULL, "mcbsp4_ick", &mcbsp4_ick, CK_243X),
+ CLK(NULL, "mcbsp4_fck", &mcbsp4_fck, CK_243X),
+ CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_243X),
+ CLK(NULL, "mcbsp5_ick", &mcbsp5_ick, CK_243X),
+ CLK(NULL, "mcbsp5_fck", &mcbsp5_fck, CK_243X),
+ CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_243X),
+ CLK(NULL, "mcspi1_ick", &mcspi1_ick, CK_243X),
+ CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_243X),
+ CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_243X),
+ CLK(NULL, "mcspi2_ick", &mcspi2_ick, CK_243X),
+ CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_243X),
+ CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_243X),
+ CLK(NULL, "mcspi3_ick", &mcspi3_ick, CK_243X),
+ CLK(NULL, "mcspi3_fck", &mcspi3_fck, CK_243X),
+ CLK(NULL, "uart1_ick", &uart1_ick, CK_243X),
+ CLK(NULL, "uart1_fck", &uart1_fck, CK_243X),
+ CLK(NULL, "uart2_ick", &uart2_ick, CK_243X),
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_243X),
+ CLK(NULL, "uart3_ick", &uart3_ick, CK_243X),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_243X),
+ CLK(NULL, "gpios_ick", &gpios_ick, CK_243X),
+ CLK(NULL, "gpios_fck", &gpios_fck, CK_243X),
+ CLK("omap_wdt", "ick", &mpu_wdt_ick, CK_243X),
+ CLK(NULL, "mpu_wdt_ick", &mpu_wdt_ick, CK_243X),
+ CLK(NULL, "mpu_wdt_fck", &mpu_wdt_fck, CK_243X),
+ CLK(NULL, "sync_32k_ick", &sync_32k_ick, CK_243X),
+ CLK(NULL, "wdt1_ick", &wdt1_ick, CK_243X),
+ CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_243X),
+ CLK(NULL, "icr_ick", &icr_ick, CK_243X),
+ CLK("omap24xxcam", "fck", &cam_fck, CK_243X),
+ CLK(NULL, "cam_fck", &cam_fck, CK_243X),
+ CLK("omap24xxcam", "ick", &cam_ick, CK_243X),
+ CLK(NULL, "cam_ick", &cam_ick, CK_243X),
+ CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_243X),
+ CLK(NULL, "wdt4_ick", &wdt4_ick, CK_243X),
+ CLK(NULL, "wdt4_fck", &wdt4_fck, CK_243X),
+ CLK(NULL, "mspro_ick", &mspro_ick, CK_243X),
+ CLK(NULL, "mspro_fck", &mspro_fck, CK_243X),
+ CLK(NULL, "fac_ick", &fac_ick, CK_243X),
+ CLK(NULL, "fac_fck", &fac_fck, CK_243X),
+ CLK("omap_hdq.0", "ick", &hdq_ick, CK_243X),
+ CLK(NULL, "hdq_ick", &hdq_ick, CK_243X),
+ CLK("omap_hdq.1", "fck", &hdq_fck, CK_243X),
+ CLK(NULL, "hdq_fck", &hdq_fck, CK_243X),
+ CLK("omap_i2c.1", "ick", &i2c1_ick, CK_243X),
+ CLK(NULL, "i2c1_ick", &i2c1_ick, CK_243X),
+ CLK(NULL, "i2chs1_fck", &i2chs1_fck, CK_243X),
+ CLK("omap_i2c.2", "ick", &i2c2_ick, CK_243X),
+ CLK(NULL, "i2c2_ick", &i2c2_ick, CK_243X),
+ CLK(NULL, "i2chs2_fck", &i2chs2_fck, CK_243X),
+ CLK(NULL, "gpmc_fck", &gpmc_fck, CK_243X),
+ CLK(NULL, "sdma_fck", &sdma_fck, CK_243X),
+ CLK(NULL, "sdma_ick", &sdma_ick, CK_243X),
+ CLK(NULL, "sdrc_ick", &sdrc_ick, CK_243X),
+ CLK(NULL, "des_ick", &des_ick, CK_243X),
+ CLK("omap-sham", "ick", &sha_ick, CK_243X),
+ CLK("omap_rng", "ick", &rng_ick, CK_243X),
+ CLK(NULL, "rng_ick", &rng_ick, CK_243X),
+ CLK("omap-aes", "ick", &aes_ick, CK_243X),
+ CLK(NULL, "pka_ick", &pka_ick, CK_243X),
+ CLK(NULL, "usb_fck", &usb_fck, CK_243X),
+ CLK("musb-omap2430", "ick", &usbhs_ick, CK_243X),
+ CLK(NULL, "usbhs_ick", &usbhs_ick, CK_243X),
+ CLK("omap_hsmmc.0", "ick", &mmchs1_ick, CK_243X),
+ CLK(NULL, "mmchs1_ick", &mmchs1_ick, CK_243X),
+ CLK(NULL, "mmchs1_fck", &mmchs1_fck, CK_243X),
+ CLK("omap_hsmmc.1", "ick", &mmchs2_ick, CK_243X),
+ CLK(NULL, "mmchs2_ick", &mmchs2_ick, CK_243X),
+ CLK(NULL, "mmchs2_fck", &mmchs2_fck, CK_243X),
+ CLK(NULL, "gpio5_ick", &gpio5_ick, CK_243X),
+ CLK(NULL, "gpio5_fck", &gpio5_fck, CK_243X),
+ CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
+ CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
+ CLK(NULL, "mmchsdb1_fck", &mmchsdb1_fck, CK_243X),
+ CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
+ CLK(NULL, "mmchsdb2_fck", &mmchsdb2_fck, CK_243X),
+ CLK(NULL, "timer_32k_ck", &func_32k_ck, CK_243X),
+ CLK(NULL, "timer_sys_ck", &sys_ck, CK_243X),
+ CLK(NULL, "timer_ext_ck", &alt_ck, CK_243X),
+ CLK(NULL, "cpufreq_ck", &virt_prcm_set, CK_243X),
+};
+
+static const char *enable_init_clks[] = {
+ "apll96_ck",
+ "apll54_ck",
+ "sync_32k_ick",
+ "omapctrl_ick",
+ "gpmc_fck",
+ "sdrc_ick",
+};
+
+/*
+ * init code
+ */
+
+int __init omap2430_clk_init(void)
+{
+ struct omap_clk *c;
+
+ prcm_clksrc_ctrl = OMAP2430_PRCM_CLKSRC_CTRL;
+ cpu_mask = RATE_IN_243X;
+ rate_table = omap2430_rate_table;
+
+ omap2xxx_clkt_dpllcore_init(&dpll_ck_hw.hw);
+
+ omap2xxx_clkt_vps_check_bootloader_rates();
+
+ for (c = omap2430_clks; c < omap2430_clks + ARRAY_SIZE(omap2430_clks);
+ c++) {
+ clkdev_add(&c->lk);
+ if (!__clk_init(NULL, c->lk.clk))
+ omap2_init_clk_hw_omap_clocks(c->lk.clk);
+ }
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(&sys_ck) / 1000000),
+ (clk_get_rate(&sys_ck) / 100000) % 10,
+ (clk_get_rate(&dpll_ck) / 1000000),
+ (clk_get_rate(&mpu_ck) / 1000000));
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c
new file mode 100644
index 00000000000..ea64ad60675
--- /dev/null
+++ b/arch/arm/mach-omap2/cclock33xx_data.c
@@ -0,0 +1,961 @@
+/*
+ * AM33XX Clock data
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+
+#include "am33xx.h"
+#include "soc.h"
+#include "iomap.h"
+#include "clock.h"
+#include "control.h"
+#include "cm.h"
+#include "cm33xx.h"
+#include "cm-regbits-33xx.h"
+#include "prm.h"
+
+/* Modulemode control */
+#define AM33XX_MODULEMODE_HWCTRL_SHIFT 0
+#define AM33XX_MODULEMODE_SWCTRL_SHIFT 1
+
+/*LIST_HEAD(clocks);*/
+
+/* Root clocks */
+
+/* RTC 32k */
+DEFINE_CLK_FIXED_RATE(clk_32768_ck, CLK_IS_ROOT, 32768, 0x0);
+
+/* On-Chip 32KHz RC OSC */
+DEFINE_CLK_FIXED_RATE(clk_rc32k_ck, CLK_IS_ROOT, 32000, 0x0);
+
+/* Crystal input clks */
+DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_24000000_ck, CLK_IS_ROOT, 24000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_25000000_ck, CLK_IS_ROOT, 25000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
+
+/* Oscillator clock */
+/* 19.2, 24, 25 or 26 MHz */
+static const char *sys_clkin_ck_parents[] = {
+ "virt_19200000_ck", "virt_24000000_ck", "virt_25000000_ck",
+ "virt_26000000_ck",
+};
+
+/*
+ * sys_clk in: input to the dpll and also used as funtional clock for,
+ * adc_tsc, smartreflex0-1, timer1-7, mcasp0-1, dcan0-1, cefuse
+ *
+ */
+DEFINE_CLK_MUX(sys_clkin_ck, sys_clkin_ck_parents, NULL, 0x0,
+ AM33XX_CTRL_REGADDR(AM33XX_CONTROL_STATUS),
+ AM33XX_CONTROL_STATUS_SYSBOOT1_SHIFT,
+ AM33XX_CONTROL_STATUS_SYSBOOT1_WIDTH,
+ 0, NULL);
+
+/* External clock - 12 MHz */
+DEFINE_CLK_FIXED_RATE(tclkin_ck, CLK_IS_ROOT, 12000000, 0x0);
+
+/* Module clocks and DPLL outputs */
+
+/* DPLL_CORE */
+static struct dpll_data dpll_core_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_CORE,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_CORE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_CORE,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+/* CLKDCOLDO output */
+static const char *dpll_core_ck_parents[] = {
+ "sys_clkin_ck",
+};
+
+static struct clk dpll_core_ck;
+
+static const struct clk_ops dpll_core_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static struct clk_hw_omap dpll_core_ck_hw = {
+ .hw = {
+ .clk = &dpll_core_ck,
+ },
+ .dpll_data = &dpll_core_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_core_ck, dpll_core_ck_parents, dpll_core_ck_ops);
+
+static const char *dpll_core_x2_ck_parents[] = {
+ "dpll_core_ck",
+};
+
+static struct clk dpll_core_x2_ck;
+
+static const struct clk_ops dpll_x2_ck_ops = {
+ .recalc_rate = &omap3_clkoutx2_recalc,
+};
+
+static struct clk_hw_omap dpll_core_x2_ck_hw = {
+ .hw = {
+ .clk = &dpll_core_x2_ck,
+ },
+ .flags = CLOCK_CLKOUTX2,
+};
+
+DEFINE_STRUCT_CLK(dpll_core_x2_ck, dpll_core_x2_ck_parents, dpll_x2_ck_ops);
+
+DEFINE_CLK_DIVIDER(dpll_core_m4_ck, "dpll_core_x2_ck", &dpll_core_x2_ck,
+ 0x0, AM33XX_CM_DIV_M4_DPLL_CORE,
+ AM33XX_HSDIVIDER_CLKOUT1_DIV_SHIFT,
+ AM33XX_HSDIVIDER_CLKOUT1_DIV_WIDTH, CLK_DIVIDER_ONE_BASED,
+ NULL);
+
+DEFINE_CLK_DIVIDER(dpll_core_m5_ck, "dpll_core_x2_ck", &dpll_core_x2_ck,
+ 0x0, AM33XX_CM_DIV_M5_DPLL_CORE,
+ AM33XX_HSDIVIDER_CLKOUT2_DIV_SHIFT,
+ AM33XX_HSDIVIDER_CLKOUT2_DIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+DEFINE_CLK_DIVIDER(dpll_core_m6_ck, "dpll_core_x2_ck", &dpll_core_x2_ck,
+ 0x0, AM33XX_CM_DIV_M6_DPLL_CORE,
+ AM33XX_HSDIVIDER_CLKOUT3_DIV_SHIFT,
+ AM33XX_HSDIVIDER_CLKOUT3_DIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+
+/* DPLL_MPU */
+static struct dpll_data dpll_mpu_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_MPU,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_MPU,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_MPU,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+/* CLKOUT: fdpll/M2 */
+static struct clk dpll_mpu_ck;
+
+static const struct clk_ops dpll_mpu_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static struct clk_hw_omap dpll_mpu_ck_hw = {
+ .hw = {
+ .clk = &dpll_mpu_ck,
+ },
+ .dpll_data = &dpll_mpu_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_core_ck_parents, dpll_mpu_ck_ops);
+
+/*
+ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
+ * and ALT_CLK1/2)
+ */
+DEFINE_CLK_DIVIDER(dpll_mpu_m2_ck, "dpll_mpu_ck", &dpll_mpu_ck,
+ 0x0, AM33XX_CM_DIV_M2_DPLL_MPU, AM33XX_DPLL_CLKOUT_DIV_SHIFT,
+ AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+
+/* DPLL_DDR */
+static struct dpll_data dpll_ddr_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DDR,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_DDR,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_DDR,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+/* CLKOUT: fdpll/M2 */
+static struct clk dpll_ddr_ck;
+
+static const struct clk_ops dpll_ddr_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+};
+
+static struct clk_hw_omap dpll_ddr_ck_hw = {
+ .hw = {
+ .clk = &dpll_ddr_ck,
+ },
+ .dpll_data = &dpll_ddr_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_ddr_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
+
+/*
+ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
+ * and ALT_CLK1/2)
+ */
+DEFINE_CLK_DIVIDER(dpll_ddr_m2_ck, "dpll_ddr_ck", &dpll_ddr_ck,
+ 0x0, AM33XX_CM_DIV_M2_DPLL_DDR,
+ AM33XX_DPLL_CLKOUT_DIV_SHIFT, AM33XX_DPLL_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+/* emif_fck functional clock */
+DEFINE_CLK_FIXED_FACTOR(dpll_ddr_m2_div2_ck, "dpll_ddr_m2_ck", &dpll_ddr_m2_ck,
+ 0x0, 1, 2);
+
+/* DPLL_DISP */
+static struct dpll_data dpll_disp_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DISP,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_DISP,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_DISP,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+/* CLKOUT: fdpll/M2 */
+static struct clk dpll_disp_ck;
+
+static struct clk_hw_omap dpll_disp_ck_hw = {
+ .hw = {
+ .clk = &dpll_disp_ck,
+ },
+ .dpll_data = &dpll_disp_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_disp_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
+
+/*
+ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
+ * and ALT_CLK1/2)
+ */
+DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck, 0x0,
+ AM33XX_CM_DIV_M2_DPLL_DISP, AM33XX_DPLL_CLKOUT_DIV_SHIFT,
+ AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+
+/* DPLL_PER */
+static struct dpll_data dpll_per_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_PERIPH,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_PER,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_PER,
+ .mult_mask = AM33XX_DPLL_MULT_PERIPH_MASK,
+ .div1_mask = AM33XX_DPLL_PER_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .flags = DPLL_J_TYPE,
+};
+
+/* CLKDCOLDO */
+static struct clk dpll_per_ck;
+
+static struct clk_hw_omap dpll_per_ck_hw = {
+ .hw = {
+ .clk = &dpll_per_ck,
+ },
+ .dpll_data = &dpll_per_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_per_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
+
+/* CLKOUT: fdpll/M2 */
+DEFINE_CLK_DIVIDER(dpll_per_m2_ck, "dpll_per_ck", &dpll_per_ck, 0x0,
+ AM33XX_CM_DIV_M2_DPLL_PER, AM33XX_DPLL_CLKOUT_DIV_SHIFT,
+ AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED,
+ NULL);
+
+DEFINE_CLK_FIXED_FACTOR(dpll_per_m2_div4_wkupdm_ck, "dpll_per_m2_ck",
+ &dpll_per_m2_ck, 0x0, 1, 4);
+
+DEFINE_CLK_FIXED_FACTOR(dpll_per_m2_div4_ck, "dpll_per_m2_ck",
+ &dpll_per_m2_ck, 0x0, 1, 4);
+
+DEFINE_CLK_FIXED_FACTOR(dpll_core_m4_div2_ck, "dpll_core_m4_ck",
+ &dpll_core_m4_ck, 0x0, 1, 2);
+
+DEFINE_CLK_FIXED_FACTOR(l4_rtc_gclk, "dpll_core_m4_ck", &dpll_core_m4_ck, 0x0,
+ 1, 2);
+
+DEFINE_CLK_FIXED_FACTOR(clk_24mhz, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0, 1,
+ 8);
+
+/*
+ * Below clock nodes describes clockdomains derived out
+ * of core clock.
+ */
+static const struct clk_ops clk_ops_null = {
+};
+
+static const char *l3_gclk_parents[] = {
+ "dpll_core_m4_ck"
+};
+
+static struct clk l3_gclk;
+DEFINE_STRUCT_CLK_HW_OMAP(l3_gclk, NULL);
+DEFINE_STRUCT_CLK(l3_gclk, l3_gclk_parents, clk_ops_null);
+
+static struct clk l4hs_gclk;
+DEFINE_STRUCT_CLK_HW_OMAP(l4hs_gclk, NULL);
+DEFINE_STRUCT_CLK(l4hs_gclk, l3_gclk_parents, clk_ops_null);
+
+static const char *l3s_gclk_parents[] = {
+ "dpll_core_m4_div2_ck"
+};
+
+static struct clk l3s_gclk;
+DEFINE_STRUCT_CLK_HW_OMAP(l3s_gclk, NULL);
+DEFINE_STRUCT_CLK(l3s_gclk, l3s_gclk_parents, clk_ops_null);
+
+static struct clk l4fw_gclk;
+DEFINE_STRUCT_CLK_HW_OMAP(l4fw_gclk, NULL);
+DEFINE_STRUCT_CLK(l4fw_gclk, l3s_gclk_parents, clk_ops_null);
+
+static struct clk l4ls_gclk;
+DEFINE_STRUCT_CLK_HW_OMAP(l4ls_gclk, NULL);
+DEFINE_STRUCT_CLK(l4ls_gclk, l3s_gclk_parents, clk_ops_null);
+
+static struct clk sysclk_div_ck;
+DEFINE_STRUCT_CLK_HW_OMAP(sysclk_div_ck, NULL);
+DEFINE_STRUCT_CLK(sysclk_div_ck, l3_gclk_parents, clk_ops_null);
+
+/*
+ * In order to match the clock domain with hwmod clockdomain entry,
+ * separate clock nodes is required for the modules which are
+ * directly getting their funtioncal clock from sys_clkin.
+ */
+static struct clk adc_tsc_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(adc_tsc_fck, NULL);
+DEFINE_STRUCT_CLK(adc_tsc_fck, dpll_core_ck_parents, clk_ops_null);
+
+static struct clk dcan0_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(dcan0_fck, NULL);
+DEFINE_STRUCT_CLK(dcan0_fck, dpll_core_ck_parents, clk_ops_null);
+
+static struct clk dcan1_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(dcan1_fck, NULL);
+DEFINE_STRUCT_CLK(dcan1_fck, dpll_core_ck_parents, clk_ops_null);
+
+static struct clk mcasp0_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(mcasp0_fck, NULL);
+DEFINE_STRUCT_CLK(mcasp0_fck, dpll_core_ck_parents, clk_ops_null);
+
+static struct clk mcasp1_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(mcasp1_fck, NULL);
+DEFINE_STRUCT_CLK(mcasp1_fck, dpll_core_ck_parents, clk_ops_null);
+
+static struct clk smartreflex0_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(smartreflex0_fck, NULL);
+DEFINE_STRUCT_CLK(smartreflex0_fck, dpll_core_ck_parents, clk_ops_null);
+
+static struct clk smartreflex1_fck;
+DEFINE_STRUCT_CLK_HW_OMAP(smartreflex1_fck, NULL);
+DEFINE_STRUCT_CLK(smartreflex1_fck, dpll_core_ck_parents, clk_ops_null);
+
+/*
+ * Modules clock nodes
+ *
+ * The following clock leaf nodes are added for the moment because:
+ *
+ * - hwmod data is not present for these modules, either hwmod
+ * control is not required or its not populated.
+ * - Driver code is not yet migrated to use hwmod/runtime pm
+ * - Modules outside kernel access (to disable them by default)
+ *
+ * - debugss
+ * - mmu (gfx domain)
+ * - cefuse
+ * - usbotg_fck (its additional clock and not really a modulemode)
+ * - ieee5000
+ */
+DEFINE_CLK_GATE(debugss_ick, "dpll_core_m4_ck", &dpll_core_m4_ck, 0x0,
+ AM33XX_CM_WKUP_DEBUGSS_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(mmu_fck, "dpll_core_m4_ck", &dpll_core_m4_ck, 0x0,
+ AM33XX_CM_GFX_MMUDATA_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
+ AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+/*
+ * clkdiv32 is generated from fixed division of 732.4219
+ */
+DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732);
+
+DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0,
+ AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+/* "usbotg_fck" is an additional clock and not really a modulemode */
+DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0,
+ AM33XX_CM_CLKDCOLDO_DPLL_PER, AM33XX_ST_DPLL_CLKDCOLDO_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(ieee5000_fck, "dpll_core_m4_div2_ck", &dpll_core_m4_div2_ck,
+ 0x0, AM33XX_CM_PER_IEEE5000_CLKCTRL,
+ AM33XX_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+/* Timers */
+static const struct clksel timer1_clkmux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
+ { .parent = &tclkin_ck, .rates = div_1_2_rates },
+ { .parent = &clk_rc32k_ck, .rates = div_1_3_rates },
+ { .parent = &clk_32768_ck, .rates = div_1_4_rates },
+ { .parent = NULL },
+};
+
+static const char *timer1_ck_parents[] = {
+ "sys_clkin_ck", "clkdiv32k_ick", "tclkin_ck", "clk_rc32k_ck",
+ "clk_32768_ck",
+};
+
+static struct clk timer1_fck;
+
+static const struct clk_ops timer1_fck_ops = {
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+ .init = &omap2_init_clk_clkdm,
+};
+
+static struct clk_hw_omap timer1_fck_hw = {
+ .hw = {
+ .clk = &timer1_fck,
+ },
+ .clkdm_name = "l4ls_clkdm",
+ .clksel = timer1_clkmux_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER1MS_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_2_MASK,
+};
+
+DEFINE_STRUCT_CLK(timer1_fck, timer1_ck_parents, timer1_fck_ops);
+
+static const struct clksel timer2_to_7_clk_sel[] = {
+ { .parent = &tclkin_ck, .rates = div_1_0_rates },
+ { .parent = &sys_clkin_ck, .rates = div_1_1_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const char *timer2_to_7_ck_parents[] = {
+ "tclkin_ck", "sys_clkin_ck", "clkdiv32k_ick",
+};
+
+static struct clk timer2_fck;
+
+static struct clk_hw_omap timer2_fck_hw = {
+ .hw = {
+ .clk = &timer2_fck,
+ },
+ .clkdm_name = "l4ls_clkdm",
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER2_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(timer2_fck, timer2_to_7_ck_parents, timer1_fck_ops);
+
+static struct clk timer3_fck;
+
+static struct clk_hw_omap timer3_fck_hw = {
+ .hw = {
+ .clk = &timer3_fck,
+ },
+ .clkdm_name = "l4ls_clkdm",
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER3_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(timer3_fck, timer2_to_7_ck_parents, timer1_fck_ops);
+
+static struct clk timer4_fck;
+
+static struct clk_hw_omap timer4_fck_hw = {
+ .hw = {
+ .clk = &timer4_fck,
+ },
+ .clkdm_name = "l4ls_clkdm",
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER4_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(timer4_fck, timer2_to_7_ck_parents, timer1_fck_ops);
+
+static struct clk timer5_fck;
+
+static struct clk_hw_omap timer5_fck_hw = {
+ .hw = {
+ .clk = &timer5_fck,
+ },
+ .clkdm_name = "l4ls_clkdm",
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER5_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(timer5_fck, timer2_to_7_ck_parents, timer1_fck_ops);
+
+static struct clk timer6_fck;
+
+static struct clk_hw_omap timer6_fck_hw = {
+ .hw = {
+ .clk = &timer6_fck,
+ },
+ .clkdm_name = "l4ls_clkdm",
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER6_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(timer6_fck, timer2_to_7_ck_parents, timer1_fck_ops);
+
+static struct clk timer7_fck;
+
+static struct clk_hw_omap timer7_fck_hw = {
+ .hw = {
+ .clk = &timer7_fck,
+ },
+ .clkdm_name = "l4ls_clkdm",
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER7_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(timer7_fck, timer2_to_7_ck_parents, timer1_fck_ops);
+
+DEFINE_CLK_FIXED_FACTOR(cpsw_125mhz_gclk,
+ "dpll_core_m5_ck",
+ &dpll_core_m5_ck,
+ 0x0,
+ 1, 2);
+
+static const struct clk_ops cpsw_fck_ops = {
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+static const struct clksel cpsw_cpts_rft_clkmux_sel[] = {
+ { .parent = &dpll_core_m5_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_core_m4_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static const char *cpsw_cpts_rft_ck_parents[] = {
+ "dpll_core_m5_ck", "dpll_core_m4_ck",
+};
+
+static struct clk cpsw_cpts_rft_clk;
+
+static struct clk_hw_omap cpsw_cpts_rft_clk_hw = {
+ .hw = {
+ .clk = &cpsw_cpts_rft_clk,
+ },
+ .clkdm_name = "cpsw_125mhz_clkdm",
+ .clksel = cpsw_cpts_rft_clkmux_sel,
+ .clksel_reg = AM33XX_CM_CPTS_RFT_CLKSEL,
+ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
+};
+
+DEFINE_STRUCT_CLK(cpsw_cpts_rft_clk, cpsw_cpts_rft_ck_parents, cpsw_fck_ops);
+
+
+/* gpio */
+static const char *gpio0_ck_parents[] = {
+ "clk_rc32k_ck", "clk_32768_ck", "clkdiv32k_ick",
+};
+
+static const struct clksel gpio0_dbclk_mux_sel[] = {
+ { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
+ { .parent = &clk_32768_ck, .rates = div_1_1_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const struct clk_ops gpio_fck_ops = {
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+ .init = &omap2_init_clk_clkdm,
+};
+
+static struct clk gpio0_dbclk_mux_ck;
+
+static struct clk_hw_omap gpio0_dbclk_mux_ck_hw = {
+ .hw = {
+ .clk = &gpio0_dbclk_mux_ck,
+ },
+ .clkdm_name = "l4_wkup_clkdm",
+ .clksel = gpio0_dbclk_mux_sel,
+ .clksel_reg = AM33XX_CLKSEL_GPIO0_DBCLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(gpio0_dbclk_mux_ck, gpio0_ck_parents, gpio_fck_ops);
+
+DEFINE_CLK_GATE(gpio0_dbclk, "gpio0_dbclk_mux_ck", &gpio0_dbclk_mux_ck, 0x0,
+ AM33XX_CM_WKUP_GPIO0_CLKCTRL,
+ AM33XX_OPTFCLKEN_GPIO0_GDBCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio1_dbclk, "clkdiv32k_ick", &clkdiv32k_ick, 0x0,
+ AM33XX_CM_PER_GPIO1_CLKCTRL,
+ AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio2_dbclk, "clkdiv32k_ick", &clkdiv32k_ick, 0x0,
+ AM33XX_CM_PER_GPIO2_CLKCTRL,
+ AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio3_dbclk, "clkdiv32k_ick", &clkdiv32k_ick, 0x0,
+ AM33XX_CM_PER_GPIO3_CLKCTRL,
+ AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_SHIFT, 0x0, NULL);
+
+
+static const char *pruss_ck_parents[] = {
+ "l3_gclk", "dpll_disp_m2_ck",
+};
+
+static const struct clksel pruss_ocp_clk_mux_sel[] = {
+ { .parent = &l3_gclk, .rates = div_1_0_rates },
+ { .parent = &dpll_disp_m2_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk pruss_ocp_gclk;
+
+static struct clk_hw_omap pruss_ocp_gclk_hw = {
+ .hw = {
+ .clk = &pruss_ocp_gclk,
+ },
+ .clkdm_name = "pruss_ocp_clkdm",
+ .clksel = pruss_ocp_clk_mux_sel,
+ .clksel_reg = AM33XX_CLKSEL_PRUSS_OCP_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
+};
+
+DEFINE_STRUCT_CLK(pruss_ocp_gclk, pruss_ck_parents, gpio_fck_ops);
+
+static const char *lcd_ck_parents[] = {
+ "dpll_disp_m2_ck", "dpll_core_m5_ck", "dpll_per_m2_ck",
+};
+
+static const struct clksel lcd_clk_mux_sel[] = {
+ { .parent = &dpll_disp_m2_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_core_m5_ck, .rates = div_1_1_rates },
+ { .parent = &dpll_per_m2_ck, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static struct clk lcd_gclk;
+
+static struct clk_hw_omap lcd_gclk_hw = {
+ .hw = {
+ .clk = &lcd_gclk,
+ },
+ .clkdm_name = "lcdc_clkdm",
+ .clksel = lcd_clk_mux_sel,
+ .clksel_reg = AM33XX_CLKSEL_LCDC_PIXEL_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(lcd_gclk, lcd_ck_parents, gpio_fck_ops);
+
+DEFINE_CLK_FIXED_FACTOR(mmc_clk, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0, 1, 2);
+
+static const char *gfx_ck_parents[] = {
+ "dpll_core_m4_ck", "dpll_per_m2_ck",
+};
+
+static const struct clksel gfx_clksel_sel[] = {
+ { .parent = &dpll_core_m4_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_per_m2_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk gfx_fclk_clksel_ck;
+
+static struct clk_hw_omap gfx_fclk_clksel_ck_hw = {
+ .hw = {
+ .clk = &gfx_fclk_clksel_ck,
+ },
+ .clksel = gfx_clksel_sel,
+ .clksel_reg = AM33XX_CLKSEL_GFX_FCLK,
+ .clksel_mask = AM33XX_CLKSEL_GFX_FCLK_MASK,
+};
+
+DEFINE_STRUCT_CLK(gfx_fclk_clksel_ck, gfx_ck_parents, gpio_fck_ops);
+
+static const struct clk_div_table div_1_0_2_1_rates[] = {
+ { .div = 1, .val = 0, },
+ { .div = 2, .val = 1, },
+ { .div = 0 },
+};
+
+DEFINE_CLK_DIVIDER_TABLE(gfx_fck_div_ck, "gfx_fclk_clksel_ck",
+ &gfx_fclk_clksel_ck, 0x0, AM33XX_CLKSEL_GFX_FCLK,
+ AM33XX_CLKSEL_0_0_SHIFT, AM33XX_CLKSEL_0_0_WIDTH,
+ 0x0, div_1_0_2_1_rates, NULL);
+
+static const char *sysclkout_ck_parents[] = {
+ "clk_32768_ck", "l3_gclk", "dpll_ddr_m2_ck", "dpll_per_m2_ck",
+ "lcd_gclk",
+};
+
+static const struct clksel sysclkout_pre_sel[] = {
+ { .parent = &clk_32768_ck, .rates = div_1_0_rates },
+ { .parent = &l3_gclk, .rates = div_1_1_rates },
+ { .parent = &dpll_ddr_m2_ck, .rates = div_1_2_rates },
+ { .parent = &dpll_per_m2_ck, .rates = div_1_3_rates },
+ { .parent = &lcd_gclk, .rates = div_1_4_rates },
+ { .parent = NULL },
+};
+
+static struct clk sysclkout_pre_ck;
+
+static struct clk_hw_omap sysclkout_pre_ck_hw = {
+ .hw = {
+ .clk = &sysclkout_pre_ck,
+ },
+ .clksel = sysclkout_pre_sel,
+ .clksel_reg = AM33XX_CM_CLKOUT_CTRL,
+ .clksel_mask = AM33XX_CLKOUT2SOURCE_MASK,
+};
+
+DEFINE_STRUCT_CLK(sysclkout_pre_ck, sysclkout_ck_parents, gpio_fck_ops);
+
+/* Divide by 8 clock rates with default clock is 1/1*/
+static const struct clk_div_table div8_rates[] = {
+ { .div = 1, .val = 0, },
+ { .div = 2, .val = 1, },
+ { .div = 3, .val = 2, },
+ { .div = 4, .val = 3, },
+ { .div = 5, .val = 4, },
+ { .div = 6, .val = 5, },
+ { .div = 7, .val = 6, },
+ { .div = 8, .val = 7, },
+ { .div = 0 },
+};
+
+DEFINE_CLK_DIVIDER_TABLE(clkout2_div_ck, "sysclkout_pre_ck", &sysclkout_pre_ck,
+ 0x0, AM33XX_CM_CLKOUT_CTRL, AM33XX_CLKOUT2DIV_SHIFT,
+ AM33XX_CLKOUT2DIV_WIDTH, 0x0, div8_rates, NULL);
+
+DEFINE_CLK_GATE(clkout2_ck, "clkout2_div_ck", &clkout2_div_ck, 0x0,
+ AM33XX_CM_CLKOUT_CTRL, AM33XX_CLKOUT2EN_SHIFT, 0x0, NULL);
+
+static const char *wdt_ck_parents[] = {
+ "clk_rc32k_ck", "clkdiv32k_ick",
+};
+
+static const struct clksel wdt_clkmux_sel[] = {
+ { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk wdt1_fck;
+
+static struct clk_hw_omap wdt1_fck_hw = {
+ .hw = {
+ .clk = &wdt1_fck,
+ },
+ .clkdm_name = "l4_wkup_clkdm",
+ .clksel = wdt_clkmux_sel,
+ .clksel_reg = AM33XX_CLKSEL_WDT1_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+};
+
+DEFINE_STRUCT_CLK(wdt1_fck, wdt_ck_parents, gpio_fck_ops);
+
+/*
+ * clkdev
+ */
+static struct omap_clk am33xx_clks[] = {
+ CLK(NULL, "clk_32768_ck", &clk_32768_ck, CK_AM33XX),
+ CLK(NULL, "clk_rc32k_ck", &clk_rc32k_ck, CK_AM33XX),
+ CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_AM33XX),
+ CLK(NULL, "virt_24000000_ck", &virt_24000000_ck, CK_AM33XX),
+ CLK(NULL, "virt_25000000_ck", &virt_25000000_ck, CK_AM33XX),
+ CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_AM33XX),
+ CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_AM33XX),
+ CLK(NULL, "tclkin_ck", &tclkin_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_m4_ck", &dpll_core_m4_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_m5_ck", &dpll_core_m5_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_m6_ck", &dpll_core_m6_ck, CK_AM33XX),
+ CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_AM33XX),
+ CLK("cpu0", NULL, &dpll_mpu_ck, CK_AM33XX),
+ CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_ddr_ck", &dpll_ddr_ck, CK_AM33XX),
+ CLK(NULL, "dpll_ddr_m2_ck", &dpll_ddr_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_ddr_m2_div2_ck", &dpll_ddr_m2_div2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_disp_ck", &dpll_disp_ck, CK_AM33XX),
+ CLK(NULL, "dpll_disp_m2_ck", &dpll_disp_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", &dpll_per_m2_div4_wkupdm_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_m2_div4_ck", &dpll_per_m2_div4_ck, CK_AM33XX),
+ CLK(NULL, "adc_tsc_fck", &adc_tsc_fck, CK_AM33XX),
+ CLK(NULL, "cefuse_fck", &cefuse_fck, CK_AM33XX),
+ CLK(NULL, "clkdiv32k_ck", &clkdiv32k_ck, CK_AM33XX),
+ CLK(NULL, "clkdiv32k_ick", &clkdiv32k_ick, CK_AM33XX),
+ CLK(NULL, "dcan0_fck", &dcan0_fck, CK_AM33XX),
+ CLK("481cc000.d_can", NULL, &dcan0_fck, CK_AM33XX),
+ CLK(NULL, "dcan1_fck", &dcan1_fck, CK_AM33XX),
+ CLK("481d0000.d_can", NULL, &dcan1_fck, CK_AM33XX),
+ CLK(NULL, "debugss_ick", &debugss_ick, CK_AM33XX),
+ CLK(NULL, "pruss_ocp_gclk", &pruss_ocp_gclk, CK_AM33XX),
+ CLK(NULL, "mcasp0_fck", &mcasp0_fck, CK_AM33XX),
+ CLK(NULL, "mcasp1_fck", &mcasp1_fck, CK_AM33XX),
+ CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
+ CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
+ CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
+ CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
+ CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
+ CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
+ CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
+ CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
+ CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
+ CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
+ CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
+ CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
+ CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
+ CLK(NULL, "l4_rtc_gclk", &l4_rtc_gclk, CK_AM33XX),
+ CLK(NULL, "l3_gclk", &l3_gclk, CK_AM33XX),
+ CLK(NULL, "dpll_core_m4_div2_ck", &dpll_core_m4_div2_ck, CK_AM33XX),
+ CLK(NULL, "l4hs_gclk", &l4hs_gclk, CK_AM33XX),
+ CLK(NULL, "l3s_gclk", &l3s_gclk, CK_AM33XX),
+ CLK(NULL, "l4fw_gclk", &l4fw_gclk, CK_AM33XX),
+ CLK(NULL, "l4ls_gclk", &l4ls_gclk, CK_AM33XX),
+ CLK(NULL, "clk_24mhz", &clk_24mhz, CK_AM33XX),
+ CLK(NULL, "sysclk_div_ck", &sysclk_div_ck, CK_AM33XX),
+ CLK(NULL, "cpsw_125mhz_gclk", &cpsw_125mhz_gclk, CK_AM33XX),
+ CLK(NULL, "cpsw_cpts_rft_clk", &cpsw_cpts_rft_clk, CK_AM33XX),
+ CLK(NULL, "gpio0_dbclk_mux_ck", &gpio0_dbclk_mux_ck, CK_AM33XX),
+ CLK(NULL, "gpio0_dbclk", &gpio0_dbclk, CK_AM33XX),
+ CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_AM33XX),
+ CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_AM33XX),
+ CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_AM33XX),
+ CLK(NULL, "lcd_gclk", &lcd_gclk, CK_AM33XX),
+ CLK(NULL, "mmc_clk", &mmc_clk, CK_AM33XX),
+ CLK(NULL, "gfx_fclk_clksel_ck", &gfx_fclk_clksel_ck, CK_AM33XX),
+ CLK(NULL, "gfx_fck_div_ck", &gfx_fck_div_ck, CK_AM33XX),
+ CLK(NULL, "sysclkout_pre_ck", &sysclkout_pre_ck, CK_AM33XX),
+ CLK(NULL, "clkout2_div_ck", &clkout2_div_ck, CK_AM33XX),
+ CLK(NULL, "timer_32k_ck", &clkdiv32k_ick, CK_AM33XX),
+ CLK(NULL, "timer_sys_ck", &sys_clkin_ck, CK_AM33XX),
+};
+
+
+static const char *enable_init_clks[] = {
+ "dpll_ddr_m2_ck",
+ "dpll_mpu_m2_ck",
+ "l3_gclk",
+ "l4hs_gclk",
+ "l4fw_gclk",
+ "l4ls_gclk",
+};
+
+int __init am33xx_clk_init(void)
+{
+ struct omap_clk *c;
+ u32 cpu_clkflg;
+
+ if (soc_is_am33xx()) {
+ cpu_mask = RATE_IN_AM33XX;
+ cpu_clkflg = CK_AM33XX;
+ }
+
+ for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++) {
+ if (c->cpu & cpu_clkflg) {
+ clkdev_add(&c->lk);
+ if (!__clk_init(NULL, c->lk.clk))
+ omap2_init_clk_hw_omap_clocks(c->lk.clk);
+ }
+ }
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ /* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
+ * physically present, in such a case HWMOD enabling of
+ * clock would be failure with default parent. And timer
+ * probe thinks clock is already enabled, this leads to
+ * crash upon accessing timer 3 & 6 registers in probe.
+ * Fix by setting parent of both these timers to master
+ * oscillator clock.
+ */
+
+ clk_set_parent(&timer3_fck, &sys_clkin_ck);
+ clk_set_parent(&timer6_fck, &sys_clkin_ck);
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
new file mode 100644
index 00000000000..bdf39481fbd
--- /dev/null
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -0,0 +1,3595 @@
+/*
+ * OMAP3 clock data
+ *
+ * Copyright (C) 2007-2012 Texas Instruments, Inc.
+ * Copyright (C) 2007-2011 Nokia Corporation
+ *
+ * Written by Paul Walmsley
+ * Updated to COMMON clk data format by Rajendra Nayak <rnayak@ti.com>
+ * With many device clock fixes by Kevin Hilman and Jouni Högander
+ * DPLL bypass clock support added by Roman Tereshonkov
+ *
+ */
+
+/*
+ * Virtual clocks are introduced as convenient tools.
+ * They are sources for other clocks and not supposed
+ * to be requested from drivers directly.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-private.h>
+#include <linux/list.h>
+#include <linux/io.h>
+
+#include "soc.h"
+#include "iomap.h"
+#include "clock.h"
+#include "clock3xxx.h"
+#include "clock34xx.h"
+#include "clock36xx.h"
+#include "clock3517.h"
+#include "cm3xxx.h"
+#include "cm-regbits-34xx.h"
+#include "prm3xxx.h"
+#include "prm-regbits-34xx.h"
+#include "control.h"
+
+/*
+ * clocks
+ */
+
+#define OMAP_CM_REGADDR OMAP34XX_CM_REGADDR
+
+/* Maximum DPLL multiplier, divider values for OMAP3 */
+#define OMAP3_MAX_DPLL_MULT 2047
+#define OMAP3630_MAX_JTYPE_DPLL_MULT 4095
+#define OMAP3_MAX_DPLL_DIV 128
+
+DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0);
+
+DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
+
+DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0);
+
+DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0);
+
+DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0);
+
+static const char *osc_sys_ck_parent_names[] = {
+ "virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck",
+ "virt_38_4m_ck", "virt_16_8m_ck",
+};
+
+DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0,
+ OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT,
+ OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0,
+ OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT,
+ OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct dpll_data dpll3_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .mult_mask = OMAP3430_CORE_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_CORE_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_CORE_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_mask = OMAP3430_EN_CORE_DPLL_MASK,
+ .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_CORE_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
+ .autoidle_mask = OMAP3430_AUTO_CORE_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
+ .idlest_mask = OMAP3430_ST_CORE_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+};
+
+static struct clk dpll3_ck;
+
+static const char *dpll3_ck_parent_names[] = {
+ "sys_ck",
+};
+
+static const struct clk_ops dpll3_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
+static struct clk_hw_omap dpll3_ck_hw = {
+ .hw = {
+ .clk = &dpll3_ck,
+ },
+ .ops = &clkhwops_omap3_dpll,
+ .dpll_data = &dpll3_dd,
+ .clkdm_name = "dpll3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops);
+
+DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT,
+ OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk core_ck;
+
+static const char *core_ck_parent_names[] = {
+ "dpll3_m2_ck",
+};
+
+static const struct clk_ops core_ck_ops = {};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL);
+DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
+
+DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk security_l4_ick2;
+
+static const char *security_l4_ick2_parent_names[] = {
+ "l4_ick",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL);
+DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops);
+
+static struct clk aes1_ick;
+
+static const char *aes1_ick_parent_names[] = {
+ "security_l4_ick2",
+};
+
+static const struct clk_ops aes1_ick_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static struct clk_hw_omap aes1_ick_hw = {
+ .hw = {
+ .clk = &aes1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_AES1_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops);
+
+static struct clk core_l4_ick;
+
+static const struct clk_ops core_l4_ick_ops = {
+ .init = &omap2_init_clk_clkdm,
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm");
+DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
+
+static struct clk aes2_ick;
+
+static const char *aes2_ick_parent_names[] = {
+ "core_l4_ick",
+};
+
+static const struct clk_ops aes2_ick_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static struct clk_hw_omap aes2_ick_hw = {
+ .hw = {
+ .clk = &aes2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_AES2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk dpll1_fck;
+
+static struct dpll_data dpll1_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ .mult_mask = OMAP3430_MPU_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_MPU_DPLL_DIV_MASK,
+ .clk_bypass = &dpll1_fck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_MPU_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
+ .enable_mask = OMAP3430_EN_MPU_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_MPU_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
+ .autoidle_mask = OMAP3430_AUTO_MPU_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
+ .idlest_mask = OMAP3430_ST_MPU_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+};
+
+static struct clk dpll1_ck;
+
+static const struct clk_ops dpll1_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
+static struct clk_hw_omap dpll1_ck_hw = {
+ .hw = {
+ .clk = &dpll1_ck,
+ },
+ .ops = &clkhwops_omap3_dpll,
+ .dpll_data = &dpll1_dd,
+ .clkdm_name = "dpll1_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops);
+
+DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1);
+
+DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0,
+ OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
+ OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT,
+ OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk mpu_ck;
+
+static const char *mpu_ck_parent_names[] = {
+ "dpll1_x2m2_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm");
+DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops);
+
+DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0,
+ OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
+ OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH,
+ 0x0, NULL);
+
+static struct clk cam_ick;
+
+static struct clk_hw_omap cam_ick_hw = {
+ .hw = {
+ .clk = &cam_ick,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_CAM_SHIFT,
+ .clkdm_name = "cam_clkdm",
+};
+
+DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops);
+
+/* DPLL4 */
+/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
+/* Type: DPLL */
+static struct dpll_data dpll4_dd;
+
+static struct dpll_data dpll4_dd_34xx __initdata = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
+ .mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
+ .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
+ .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+};
+
+static struct dpll_data dpll4_dd_3630 __initdata = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
+ .mult_mask = OMAP3630_PERIPH_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
+ .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
+ .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
+ .dco_mask = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
+ .sddiv_mask = OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
+ .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+ .flags = DPLL_J_TYPE
+};
+
+static struct clk dpll4_ck;
+
+static const struct clk_ops dpll4_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_dpll4_set_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
+static struct clk_hw_omap dpll4_ck_hw = {
+ .hw = {
+ .clk = &dpll4_ck,
+ },
+ .dpll_data = &dpll4_dd,
+ .ops = &clkhwops_omap3_dpll,
+ .clkdm_name = "dpll4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops);
+
+DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dpll4_m5x2_ck;
+
+static const char *dpll4_m5x2_ck_parent_names[] = {
+ "dpll4_m5_ck",
+};
+
+static const struct clk_ops dpll4_m5x2_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap3_clkoutx2_recalc,
+};
+
+static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap36xx_pwrdn_clk_enable_with_hsdiv_restore,
+ .disable = &omap2_dflt_clk_disable,
+ .recalc_rate = &omap3_clkoutx2_recalc,
+};
+
+static struct clk_hw_omap dpll4_m5x2_ck_hw = {
+ .hw = {
+ .clk = &dpll4_m5x2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
+
+static struct clk dpll4_m5x2_ck_3630 = {
+ .name = "dpll4_m5x2_ck",
+ .hw = &dpll4_m5x2_ck_hw.hw,
+ .parent_names = dpll4_m5x2_ck_parent_names,
+ .num_parents = ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
+ .ops = &dpll4_m5x2_ck_3630_ops,
+};
+
+static struct clk cam_mclk;
+
+static const char *cam_mclk_parent_names[] = {
+ "dpll4_m5x2_ck",
+};
+
+static struct clk_hw_omap cam_mclk_hw = {
+ .hw = {
+ .clk = &cam_mclk,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_CAM_SHIFT,
+ .clkdm_name = "cam_clkdm",
+};
+
+DEFINE_STRUCT_CLK(cam_mclk, cam_mclk_parent_names, aes2_ick_ops);
+
+static const struct clksel_rate clkout2_src_core_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate clkout2_src_sys_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate clkout2_src_96m_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
+ OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dpll4_m2x2_ck;
+
+static const char *dpll4_m2x2_ck_parent_names[] = {
+ "dpll4_m2_ck",
+};
+
+static struct clk_hw_omap dpll4_m2x2_ck_hw = {
+ .hw = {
+ .clk = &dpll4_m2x2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_96M_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops);
+
+static struct clk dpll4_m2x2_ck_3630 = {
+ .name = "dpll4_m2x2_ck",
+ .hw = &dpll4_m2x2_ck_hw.hw,
+ .parent_names = dpll4_m2x2_ck_parent_names,
+ .num_parents = ARRAY_SIZE(dpll4_m2x2_ck_parent_names),
+ .ops = &dpll4_m5x2_ck_3630_ops,
+};
+
+static struct clk omap_96m_alwon_fck;
+
+static const char *omap_96m_alwon_fck_parent_names[] = {
+ "dpll4_m2x2_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL);
+DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names,
+ core_ck_ops);
+
+static struct clk cm_96m_fck;
+
+static const char *cm_96m_fck_parent_names[] = {
+ "omap_96m_alwon_fck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL);
+DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops);
+
+static const struct clksel_rate clkout2_src_54m_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+DEFINE_CLK_DIVIDER(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dpll4_m3x2_ck;
+
+static const char *dpll4_m3x2_ck_parent_names[] = {
+ "dpll4_m3_ck",
+};
+
+static struct clk_hw_omap dpll4_m3x2_ck_hw = {
+ .hw = {
+ .clk = &dpll4_m3x2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_TV_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
+
+static struct clk dpll4_m3x2_ck_3630 = {
+ .name = "dpll4_m3x2_ck",
+ .hw = &dpll4_m3x2_ck_hw.hw,
+ .parent_names = dpll4_m3x2_ck_parent_names,
+ .num_parents = ARRAY_SIZE(dpll4_m3x2_ck_parent_names),
+ .ops = &dpll4_m5x2_ck_3630_ops,
+};
+
+static const char *omap_54m_fck_parent_names[] = {
+ "dpll4_m3x2_ck", "sys_altclk",
+};
+
+DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT,
+ OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL);
+
+static const struct clksel clkout2_src_clksel[] = {
+ { .parent = &core_ck, .rates = clkout2_src_core_rates },
+ { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
+ { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
+ { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
+ { .parent = NULL },
+};
+
+static const char *clkout2_src_ck_parent_names[] = {
+ "core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck",
+};
+
+static const struct clk_ops clkout2_src_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm",
+ clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL,
+ OMAP3430_CLKOUT2SOURCE_MASK,
+ OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT,
+ NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops);
+
+static const struct clksel_rate omap_48m_cm96m_rates[] = {
+ { .div = 2, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate omap_48m_alt_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel omap_48m_clksel[] = {
+ { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
+ { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
+ { .parent = NULL },
+};
+
+static const char *omap_48m_fck_parent_names[] = {
+ "cm_96m_fck", "sys_altclk",
+};
+
+static struct clk omap_48m_fck;
+
+static const struct clk_ops omap_48m_fck_ops = {
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+static struct clk_hw_omap omap_48m_fck_hw = {
+ .hw = {
+ .clk = &omap_48m_fck,
+ },
+ .clksel = omap_48m_clksel,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_SOURCE_48M_MASK,
+};
+
+DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops);
+
+DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4);
+
+static struct clk core_12m_fck;
+
+static const char *core_12m_fck_parent_names[] = {
+ "omap_12m_fck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm");
+DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops);
+
+static struct clk core_48m_fck;
+
+static const char *core_48m_fck_parent_names[] = {
+ "omap_48m_fck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm");
+DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
+
+static const char *omap_96m_fck_parent_names[] = {
+ "cm_96m_fck", "sys_ck",
+};
+
+DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL);
+
+static struct clk core_96m_fck;
+
+static const char *core_96m_fck_parent_names[] = {
+ "omap_96m_fck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm");
+DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops);
+
+static struct clk core_l3_ick;
+
+static const char *core_l3_ick_parent_names[] = {
+ "l3_ick",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm");
+DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops);
+
+DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1);
+
+static struct clk corex2_fck;
+
+static const char *corex2_fck_parent_names[] = {
+ "dpll3_m2x2_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
+DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
+
+static struct clk cpefuse_fck;
+
+static struct clk_hw_omap cpefuse_fck_hw = {
+ .hw = {
+ .clk = &cpefuse_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
+ .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(cpefuse_fck, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk csi2_96m_fck;
+
+static const char *csi2_96m_fck_parent_names[] = {
+ "core_96m_fck",
+};
+
+static struct clk_hw_omap csi2_96m_fck_hw = {
+ .hw = {
+ .clk = &csi2_96m_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_CSI2_SHIFT,
+ .clkdm_name = "cam_clkdm",
+};
+
+DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk d2d_26m_fck;
+
+static struct clk_hw_omap d2d_26m_fck_hw = {
+ .hw = {
+ .clk = &d2d_26m_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430ES1_EN_D2D_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+};
+
+DEFINE_STRUCT_CLK(d2d_26m_fck, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk des1_ick;
+
+static struct clk_hw_omap des1_ick_hw = {
+ .hw = {
+ .clk = &des1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_DES1_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops);
+
+static struct clk des2_ick;
+
+static struct clk_hw_omap des2_ick_hw = {
+ .hw = {
+ .clk = &des2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_DES2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0,
+ OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dpll2_fck;
+
+static struct dpll_data dpll2_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ .mult_mask = OMAP3430_IVA2_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_IVA2_DPLL_DIV_MASK,
+ .clk_bypass = &dpll2_fck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
+ .enable_mask = OMAP3430_EN_IVA2_DPLL_MASK,
+ .modes = ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
+ (1 << DPLL_LOW_POWER_BYPASS)),
+ .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
+ .autoidle_mask = OMAP3430_AUTO_IVA2_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
+ .idlest_mask = OMAP3430_ST_IVA2_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+};
+
+static struct clk dpll2_ck;
+
+static struct clk_hw_omap dpll2_ck_hw = {
+ .hw = {
+ .clk = &dpll2_ck,
+ },
+ .ops = &clkhwops_omap3_dpll,
+ .dpll_data = &dpll2_dd,
+ .clkdm_name = "dpll2_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops);
+
+DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL),
+ OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT,
+ OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dpll3_m3x2_ck;
+
+static const char *dpll3_m3x2_ck_parent_names[] = {
+ "dpll3_m3_ck",
+};
+
+static struct clk_hw_omap dpll3_m3x2_ck_hw = {
+ .hw = {
+ .clk = &dpll3_m3x2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_EMU_CORE_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
+
+static struct clk dpll3_m3x2_ck_3630 = {
+ .name = "dpll3_m3x2_ck",
+ .hw = &dpll3_m3x2_ck_hw.hw,
+ .parent_names = dpll3_m3x2_ck_parent_names,
+ .num_parents = ARRAY_SIZE(dpll3_m3x2_ck_parent_names),
+ .ops = &dpll4_m5x2_ck_3630_ops,
+};
+
+DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1);
+
+DEFINE_CLK_DIVIDER(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dpll4_m4x2_ck;
+
+static const char *dpll4_m4x2_ck_parent_names[] = {
+ "dpll4_m4_ck",
+};
+
+static struct clk_hw_omap dpll4_m4x2_ck_hw = {
+ .hw = {
+ .clk = &dpll4_m4x2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_DSS1_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names, dpll4_m5x2_ck_ops);
+
+static struct clk dpll4_m4x2_ck_3630 = {
+ .name = "dpll4_m4x2_ck",
+ .hw = &dpll4_m4x2_ck_hw.hw,
+ .parent_names = dpll4_m4x2_ck_parent_names,
+ .num_parents = ARRAY_SIZE(dpll4_m4x2_ck_parent_names),
+ .ops = &dpll4_m5x2_ck_3630_ops,
+};
+
+DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dpll4_m6x2_ck;
+
+static const char *dpll4_m6x2_ck_parent_names[] = {
+ "dpll4_m6_ck",
+};
+
+static struct clk_hw_omap dpll4_m6x2_ck_hw = {
+ .hw = {
+ .clk = &dpll4_m6x2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops);
+
+static struct clk dpll4_m6x2_ck_3630 = {
+ .name = "dpll4_m6x2_ck",
+ .hw = &dpll4_m6x2_ck_hw.hw,
+ .parent_names = dpll4_m6x2_ck_parent_names,
+ .num_parents = ARRAY_SIZE(dpll4_m6x2_ck_parent_names),
+ .ops = &dpll4_m5x2_ck_3630_ops,
+};
+
+DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1);
+
+static struct dpll_data dpll5_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
+ .mult_mask = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
+ .enable_mask = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
+ .autoidle_mask = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
+ .idlest_mask = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+};
+
+static struct clk dpll5_ck;
+
+static struct clk_hw_omap dpll5_ck_hw = {
+ .hw = {
+ .clk = &dpll5_ck,
+ },
+ .ops = &clkhwops_omap3_dpll,
+ .dpll_data = &dpll5_dd,
+ .clkdm_name = "dpll5_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops);
+
+DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0,
+ OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
+ OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk dss1_alwon_fck_3430es1;
+
+static const char *dss1_alwon_fck_3430es1_parent_names[] = {
+ "dpll4_m4x2_ck",
+};
+
+static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = {
+ .hw = {
+ .clk = &dss1_alwon_fck_3430es1,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_DSS1_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss1_alwon_fck_3430es1, dss1_alwon_fck_3430es1_parent_names,
+ aes2_ick_ops);
+
+static struct clk dss1_alwon_fck_3430es2;
+
+static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = {
+ .hw = {
+ .clk = &dss1_alwon_fck_3430es2,
+ },
+ .ops = &clkhwops_omap3430es2_dss_usbhost_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_DSS1_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss1_alwon_fck_3430es2, dss1_alwon_fck_3430es1_parent_names,
+ aes2_ick_ops);
+
+static struct clk dss2_alwon_fck;
+
+static struct clk_hw_omap dss2_alwon_fck_hw = {
+ .hw = {
+ .clk = &dss2_alwon_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_DSS2_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss2_alwon_fck, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk dss_96m_fck;
+
+static struct clk_hw_omap dss_96m_fck_hw = {
+ .hw = {
+ .clk = &dss_96m_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_TV_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk dss_ick_3430es1;
+
+static struct clk_hw_omap dss_ick_3430es1_hw = {
+ .hw = {
+ .clk = &dss_ick_3430es1,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops);
+
+static struct clk dss_ick_3430es2;
+
+static struct clk_hw_omap dss_ick_3430es2_hw = {
+ .hw = {
+ .clk = &dss_ick_3430es2,
+ },
+ .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops);
+
+static struct clk dss_tv_fck;
+
+static const char *dss_tv_fck_parent_names[] = {
+ "omap_54m_fck",
+};
+
+static struct clk_hw_omap dss_tv_fck_hw = {
+ .hw = {
+ .clk = &dss_tv_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_TV_SHIFT,
+ .clkdm_name = "dss_clkdm",
+};
+
+DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops);
+
+static struct clk emac_fck;
+
+static const char *emac_fck_parent_names[] = {
+ "rmii_ck",
+};
+
+static struct clk_hw_omap emac_fck_hw = {
+ .hw = {
+ .clk = &emac_fck,
+ },
+ .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
+ .enable_bit = AM35XX_CPGMAC_FCLK_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops);
+
+static struct clk ipss_ick;
+
+static const char *ipss_ick_parent_names[] = {
+ "core_l3_ick",
+};
+
+static struct clk_hw_omap ipss_ick_hw = {
+ .hw = {
+ .clk = &ipss_ick,
+ },
+ .ops = &clkhwops_am35xx_ipss_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = AM35XX_EN_IPSS_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops);
+
+static struct clk emac_ick;
+
+static const char *emac_ick_parent_names[] = {
+ "ipss_ick",
+};
+
+static struct clk_hw_omap emac_ick_hw = {
+ .hw = {
+ .clk = &emac_ick,
+ },
+ .ops = &clkhwops_am35xx_ipss_module_wait,
+ .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
+ .enable_bit = AM35XX_CPGMAC_VBUSP_CLK_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops);
+
+static struct clk emu_core_alwon_ck;
+
+static const char *emu_core_alwon_ck_parent_names[] = {
+ "dpll3_m3x2_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm");
+DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names,
+ core_l4_ick_ops);
+
+static struct clk emu_mpu_alwon_ck;
+
+static const char *emu_mpu_alwon_ck_parent_names[] = {
+ "mpu_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL);
+DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops);
+
+static struct clk emu_per_alwon_ck;
+
+static const char *emu_per_alwon_ck_parent_names[] = {
+ "dpll4_m6x2_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm");
+DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names,
+ core_l4_ick_ops);
+
+static const char *emu_src_ck_parent_names[] = {
+ "sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck",
+};
+
+static const struct clksel_rate emu_src_sys_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 0 },
+};
+
+static const struct clksel_rate emu_src_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 0 },
+};
+
+static const struct clksel_rate emu_src_per_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 0 },
+};
+
+static const struct clksel_rate emu_src_mpu_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 0 },
+};
+
+static const struct clksel emu_src_clksel[] = {
+ { .parent = &sys_ck, .rates = emu_src_sys_rates },
+ { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
+ { .parent = &emu_per_alwon_ck, .rates = emu_src_per_rates },
+ { .parent = &emu_mpu_alwon_ck, .rates = emu_src_mpu_rates },
+ { .parent = NULL },
+};
+
+static const struct clk_ops emu_src_ck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+static struct clk emu_src_ck;
+
+static struct clk_hw_omap emu_src_ck_hw = {
+ .hw = {
+ .clk = &emu_src_ck,
+ },
+ .clksel = emu_src_clksel,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_MUX_CTRL_MASK,
+ .clkdm_name = "emu_clkdm",
+};
+
+DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops);
+
+DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk fac_ick;
+
+static struct clk_hw_omap fac_ick_hw = {
+ .hw = {
+ .clk = &fac_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430ES1_EN_FAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk fshostusb_fck;
+
+static const char *fshostusb_fck_parent_names[] = {
+ "core_48m_fck",
+};
+
+static struct clk_hw_omap fshostusb_fck_hw = {
+ .hw = {
+ .clk = &fshostusb_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk gfx_l3_ck;
+
+static struct clk_hw_omap gfx_l3_ck_hw = {
+ .hw = {
+ .clk = &gfx_l3_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
+ .enable_bit = OMAP_EN_GFX_SHIFT,
+ .clkdm_name = "gfx_3430es1_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops);
+
+DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0,
+ OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
+ OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk gfx_cg1_ck;
+
+static const char *gfx_cg1_ck_parent_names[] = {
+ "gfx_l3_fck",
+};
+
+static struct clk_hw_omap gfx_cg1_ck_hw = {
+ .hw = {
+ .clk = &gfx_cg1_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES1_EN_2D_SHIFT,
+ .clkdm_name = "gfx_3430es1_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
+
+static struct clk gfx_cg2_ck;
+
+static struct clk_hw_omap gfx_cg2_ck_hw = {
+ .hw = {
+ .clk = &gfx_cg2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES1_EN_3D_SHIFT,
+ .clkdm_name = "gfx_3430es1_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
+
+static struct clk gfx_l3_ick;
+
+static const char *gfx_l3_ick_parent_names[] = {
+ "gfx_l3_ck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm");
+DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops);
+
+static struct clk wkup_32k_fck;
+
+static const char *wkup_32k_fck_parent_names[] = {
+ "omap_32k_fck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops);
+
+static struct clk gpio1_dbck;
+
+static const char *gpio1_dbck_parent_names[] = {
+ "wkup_32k_fck",
+};
+
+static struct clk_hw_omap gpio1_dbck_hw = {
+ .hw = {
+ .clk = &gpio1_dbck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
+
+static struct clk wkup_l4_ick;
+
+DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
+DEFINE_STRUCT_CLK(wkup_l4_ick, dpll3_ck_parent_names, core_l4_ick_ops);
+
+static struct clk gpio1_ick;
+
+static const char *gpio1_ick_parent_names[] = {
+ "wkup_l4_ick",
+};
+
+static struct clk_hw_omap gpio1_ick_hw = {
+ .hw = {
+ .clk = &gpio1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops);
+
+static struct clk per_32k_alwon_fck;
+
+DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm");
+DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names,
+ core_l4_ick_ops);
+
+static struct clk gpio2_dbck;
+
+static const char *gpio2_dbck_parent_names[] = {
+ "per_32k_alwon_fck",
+};
+
+static struct clk_hw_omap gpio2_dbck_hw = {
+ .hw = {
+ .clk = &gpio2_dbck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
+
+static struct clk per_l4_ick;
+
+DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm");
+DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
+
+static struct clk gpio2_ick;
+
+static const char *gpio2_ick_parent_names[] = {
+ "per_l4_ick",
+};
+
+static struct clk_hw_omap gpio2_ick_hw = {
+ .hw = {
+ .clk = &gpio2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk gpio3_dbck;
+
+static struct clk_hw_omap gpio3_dbck_hw = {
+ .hw = {
+ .clk = &gpio3_dbck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
+
+static struct clk gpio3_ick;
+
+static struct clk_hw_omap gpio3_ick_hw = {
+ .hw = {
+ .clk = &gpio3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk gpio4_dbck;
+
+static struct clk_hw_omap gpio4_dbck_hw = {
+ .hw = {
+ .clk = &gpio4_dbck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
+
+static struct clk gpio4_ick;
+
+static struct clk_hw_omap gpio4_ick_hw = {
+ .hw = {
+ .clk = &gpio4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk gpio5_dbck;
+
+static struct clk_hw_omap gpio5_dbck_hw = {
+ .hw = {
+ .clk = &gpio5_dbck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
+
+static struct clk gpio5_ick;
+
+static struct clk_hw_omap gpio5_ick_hw = {
+ .hw = {
+ .clk = &gpio5_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk gpio6_dbck;
+
+static struct clk_hw_omap gpio6_dbck_hw = {
+ .hw = {
+ .clk = &gpio6_dbck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
+
+static struct clk gpio6_ick;
+
+static struct clk_hw_omap gpio6_ick_hw = {
+ .hw = {
+ .clk = &gpio6_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk gpmc_fck;
+
+static struct clk_hw_omap gpmc_fck_hw = {
+ .hw = {
+ .clk = &gpmc_fck,
+ },
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops);
+
+static const struct clksel omap343x_gpt_clksel[] = {
+ { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
+ { .parent = &sys_ck, .rates = gpt_sys_rates },
+ { .parent = NULL },
+};
+
+static const char *gpt10_fck_parent_names[] = {
+ "omap_32k_fck", "sys_ck",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT10_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt10_ick;
+
+static struct clk_hw_omap gpt10_ick_hw = {
+ .hw = {
+ .clk = &gpt10_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_GPT10_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT11_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt11_ick;
+
+static struct clk_hw_omap gpt11_ick_hw = {
+ .hw = {
+ .clk = &gpt11_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_GPT11_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk gpt12_fck;
+
+static const char *gpt12_fck_parent_names[] = {
+ "secure_32k_fck",
+};
+
+DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops);
+
+static struct clk gpt12_ick;
+
+static struct clk_hw_omap gpt12_ick_hw = {
+ .hw = {
+ .clk = &gpt12_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT12_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT1_MASK,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt1_ick;
+
+static struct clk_hw_omap gpt1_ick_hw = {
+ .hw = {
+ .clk = &gpt1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT2_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt2_ick;
+
+static struct clk_hw_omap gpt2_ick_hw = {
+ .hw = {
+ .clk = &gpt2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT2_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT3_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt3_ick;
+
+static struct clk_hw_omap gpt3_ick_hw = {
+ .hw = {
+ .clk = &gpt3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT4_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt4_ick;
+
+static struct clk_hw_omap gpt4_ick_hw = {
+ .hw = {
+ .clk = &gpt4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT4_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT5_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt5_ick;
+
+static struct clk_hw_omap gpt5_ick_hw = {
+ .hw = {
+ .clk = &gpt5_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT5_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT6_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt6_ick;
+
+static struct clk_hw_omap gpt6_ick_hw = {
+ .hw = {
+ .clk = &gpt6_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT6_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT7_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt7_ick;
+
+static struct clk_hw_omap gpt7_ick_hw = {
+ .hw = {
+ .clk = &gpt7_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT7_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT8_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt8_ick;
+
+static struct clk_hw_omap gpt8_ick_hw = {
+ .hw = {
+ .clk = &gpt8_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT8_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_GPT9_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait,
+ gpt10_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk gpt9_ick;
+
+static struct clk_hw_omap gpt9_ick_hw = {
+ .hw = {
+ .clk = &gpt9_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT9_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk hdq_fck;
+
+static const char *hdq_fck_parent_names[] = {
+ "core_12m_fck",
+};
+
+static struct clk_hw_omap hdq_fck_hw = {
+ .hw = {
+ .clk = &hdq_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_HDQ_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops);
+
+static struct clk hdq_ick;
+
+static struct clk_hw_omap hdq_ick_hw = {
+ .hw = {
+ .clk = &hdq_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_HDQ_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk hecc_ck;
+
+static struct clk_hw_omap hecc_ck_hw = {
+ .hw = {
+ .clk = &hecc_ck,
+ },
+ .ops = &clkhwops_am35xx_ipss_module_wait,
+ .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
+ .enable_bit = AM35XX_HECC_VBUSP_CLK_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hecc_ck, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk hsotgusb_fck_am35xx;
+
+static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
+ .hw = {
+ .clk = &hsotgusb_fck_am35xx,
+ },
+ .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
+ .enable_bit = AM35XX_USBOTG_FCLK_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk hsotgusb_ick_3430es1;
+
+static struct clk_hw_omap hsotgusb_ick_3430es1_hw = {
+ .hw = {
+ .clk = &hsotgusb_ick_3430es1,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops);
+
+static struct clk hsotgusb_ick_3430es2;
+
+static struct clk_hw_omap hsotgusb_ick_3430es2_hw = {
+ .hw = {
+ .clk = &hsotgusb_ick_3430es2,
+ },
+ .ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops);
+
+static struct clk hsotgusb_ick_am35xx;
+
+static struct clk_hw_omap hsotgusb_ick_am35xx_hw = {
+ .hw = {
+ .clk = &hsotgusb_ick_am35xx,
+ },
+ .ops = &clkhwops_am35xx_ipss_module_wait,
+ .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
+ .enable_bit = AM35XX_USBOTG_VBUSP_CLK_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops);
+
+static struct clk i2c1_fck;
+
+static struct clk_hw_omap i2c1_fck_hw = {
+ .hw = {
+ .clk = &i2c1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_I2C1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk i2c1_ick;
+
+static struct clk_hw_omap i2c1_ick_hw = {
+ .hw = {
+ .clk = &i2c1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_I2C1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk i2c2_fck;
+
+static struct clk_hw_omap i2c2_fck_hw = {
+ .hw = {
+ .clk = &i2c2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_I2C2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk i2c2_ick;
+
+static struct clk_hw_omap i2c2_ick_hw = {
+ .hw = {
+ .clk = &i2c2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_I2C2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk i2c3_fck;
+
+static struct clk_hw_omap i2c3_fck_hw = {
+ .hw = {
+ .clk = &i2c3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_I2C3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk i2c3_ick;
+
+static struct clk_hw_omap i2c3_ick_hw = {
+ .hw = {
+ .clk = &i2c3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_I2C3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk icr_ick;
+
+static struct clk_hw_omap icr_ick_hw = {
+ .hw = {
+ .clk = &icr_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_ICR_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk iva2_ck;
+
+static const char *iva2_ck_parent_names[] = {
+ "dpll2_m2_ck",
+};
+
+static struct clk_hw_omap iva2_ck_hw = {
+ .hw = {
+ .clk = &iva2_ck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
+ .clkdm_name = "iva2_clkdm",
+};
+
+DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops);
+
+static struct clk mad2d_ick;
+
+static struct clk_hw_omap mad2d_ick_hw = {
+ .hw = {
+ .clk = &mad2d_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP3430_EN_MAD2D_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
+
+static struct clk mailboxes_ick;
+
+static struct clk_hw_omap mailboxes_ick_hw = {
+ .hw = {
+ .clk = &mailboxes_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MAILBOXES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static const struct clksel_rate common_mcbsp_96m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel mcbsp_15_clksel[] = {
+ { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp1_fck_parent_names[] = {
+ "core_96m_fck", "mcbsp_clks",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel,
+ OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ OMAP2_MCBSP1_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk mcbsp1_ick;
+
+static struct clk_hw_omap mcbsp1_ick_hw = {
+ .hw = {
+ .clk = &mcbsp1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk per_96m_fck;
+
+DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm");
+DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops);
+
+static const struct clksel mcbsp_234_clksel[] = {
+ { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp2_fck_parent_names[] = {
+ "per_96m_fck", "mcbsp_clks",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel,
+ OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ OMAP2_MCBSP2_CLKS_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait,
+ mcbsp2_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk mcbsp2_ick;
+
+static struct clk_hw_omap mcbsp2_ick_hw = {
+ .hw = {
+ .clk = &mcbsp2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel,
+ OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
+ OMAP2_MCBSP3_CLKS_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait,
+ mcbsp2_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk mcbsp3_ick;
+
+static struct clk_hw_omap mcbsp3_ick_hw = {
+ .hw = {
+ .clk = &mcbsp3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel,
+ OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
+ OMAP2_MCBSP4_CLKS_MASK,
+ OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait,
+ mcbsp2_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk mcbsp4_ick;
+
+static struct clk_hw_omap mcbsp4_ick_hw = {
+ .hw = {
+ .clk = &mcbsp4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel,
+ OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
+ OMAP2_MCBSP5_CLKS_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait,
+ mcbsp1_fck_parent_names, clkout2_src_ck_ops);
+
+static struct clk mcbsp5_ick;
+
+static struct clk_hw_omap mcbsp5_ick_hw = {
+ .hw = {
+ .clk = &mcbsp5_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk mcspi1_fck;
+
+static struct clk_hw_omap mcspi1_fck_hw = {
+ .hw = {
+ .clk = &mcspi1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk mcspi1_ick;
+
+static struct clk_hw_omap mcspi1_ick_hw = {
+ .hw = {
+ .clk = &mcspi1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk mcspi2_fck;
+
+static struct clk_hw_omap mcspi2_fck_hw = {
+ .hw = {
+ .clk = &mcspi2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk mcspi2_ick;
+
+static struct clk_hw_omap mcspi2_ick_hw = {
+ .hw = {
+ .clk = &mcspi2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk mcspi3_fck;
+
+static struct clk_hw_omap mcspi3_fck_hw = {
+ .hw = {
+ .clk = &mcspi3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk mcspi3_ick;
+
+static struct clk_hw_omap mcspi3_ick_hw = {
+ .hw = {
+ .clk = &mcspi3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk mcspi4_fck;
+
+static struct clk_hw_omap mcspi4_fck_hw = {
+ .hw = {
+ .clk = &mcspi4_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk mcspi4_ick;
+
+static struct clk_hw_omap mcspi4_ick_hw = {
+ .hw = {
+ .clk = &mcspi4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk mmchs1_fck;
+
+static struct clk_hw_omap mmchs1_fck_hw = {
+ .hw = {
+ .clk = &mmchs1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MMC1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk mmchs1_ick;
+
+static struct clk_hw_omap mmchs1_ick_hw = {
+ .hw = {
+ .clk = &mmchs1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MMC1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk mmchs2_fck;
+
+static struct clk_hw_omap mmchs2_fck_hw = {
+ .hw = {
+ .clk = &mmchs2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MMC2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk mmchs2_ick;
+
+static struct clk_hw_omap mmchs2_ick_hw = {
+ .hw = {
+ .clk = &mmchs2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MMC2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk mmchs3_fck;
+
+static struct clk_hw_omap mmchs3_fck_hw = {
+ .hw = {
+ .clk = &mmchs3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk mmchs3_ick;
+
+static struct clk_hw_omap mmchs3_ick_hw = {
+ .hw = {
+ .clk = &mmchs3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk modem_fck;
+
+static struct clk_hw_omap modem_fck_hw = {
+ .hw = {
+ .clk = &modem_fck,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MODEM_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+};
+
+DEFINE_STRUCT_CLK(modem_fck, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk mspro_fck;
+
+static struct clk_hw_omap mspro_fck_hw = {
+ .hw = {
+ .clk = &mspro_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
+
+static struct clk mspro_ick;
+
+static struct clk_hw_omap mspro_ick_hw = {
+ .hw = {
+ .clk = &mspro_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk omap_192m_alwon_fck;
+
+DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL);
+DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names,
+ core_ck_ops);
+
+static struct clk omap_32ksync_ick;
+
+static struct clk_hw_omap omap_32ksync_ick_hw = {
+ .hw = {
+ .clk = &omap_32ksync_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_32KSYNC_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops);
+
+static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_36XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_36XX },
+ { .div = 0 }
+};
+
+static const struct clksel omap_96m_alwon_fck_clksel[] = {
+ { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates },
+ { .parent = NULL }
+};
+
+static struct clk omap_96m_alwon_fck_3630;
+
+static const char *omap_96m_alwon_fck_3630_parent_names[] = {
+ "omap_192m_alwon_fck",
+};
+
+static const struct clk_ops omap_96m_alwon_fck_3630_ops = {
+ .set_rate = &omap2_clksel_set_rate,
+ .recalc_rate = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+};
+
+static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = {
+ .hw = {
+ .clk = &omap_96m_alwon_fck_3630,
+ },
+ .clksel = omap_96m_alwon_fck_clksel,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3630_CLKSEL_96M_MASK,
+};
+
+static struct clk omap_96m_alwon_fck_3630 = {
+ .name = "omap_96m_alwon_fck",
+ .hw = &omap_96m_alwon_fck_3630_hw.hw,
+ .parent_names = omap_96m_alwon_fck_3630_parent_names,
+ .num_parents = ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names),
+ .ops = &omap_96m_alwon_fck_3630_ops,
+};
+
+static struct clk omapctrl_ick;
+
+static struct clk_hw_omap omapctrl_ick_hw = {
+ .hw = {
+ .clk = &omapctrl_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk per_48m_fck;
+
+DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm");
+DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
+
+static struct clk security_l3_ick;
+
+DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL);
+DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops);
+
+static struct clk pka_ick;
+
+static const char *pka_ick_parent_names[] = {
+ "security_l3_ick",
+};
+
+static struct clk_hw_omap pka_ick_hw = {
+ .hw = {
+ .clk = &pka_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_PKA_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops);
+
+DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH,
+ CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk rng_ick;
+
+static struct clk_hw_omap rng_ick_hw = {
+ .hw = {
+ .clk = &rng_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_RNG_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops);
+
+static struct clk sad2d_ick;
+
+static struct clk_hw_omap sad2d_ick_hw = {
+ .hw = {
+ .clk = &sad2d_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SAD2D_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
+
+static struct clk sdrc_ick;
+
+static struct clk_hw_omap sdrc_ick_hw = {
+ .hw = {
+ .clk = &sdrc_ick,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SDRC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops);
+
+static const struct clksel_rate sgx_core_rates[] = {
+ { .div = 2, .val = 5, .flags = RATE_IN_36XX },
+ { .div = 3, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 6, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate sgx_96m_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate sgx_192m_rates[] = {
+ { .div = 1, .val = 4, .flags = RATE_IN_36XX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate sgx_corex2_rates[] = {
+ { .div = 3, .val = 6, .flags = RATE_IN_36XX },
+ { .div = 5, .val = 7, .flags = RATE_IN_36XX },
+ { .div = 0 }
+};
+
+static const struct clksel sgx_clksel[] = {
+ { .parent = &core_ck, .rates = sgx_core_rates },
+ { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
+ { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates },
+ { .parent = &corex2_fck, .rates = sgx_corex2_rates },
+ { .parent = NULL },
+};
+
+static const char *sgx_fck_parent_names[] = {
+ "core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck",
+};
+
+static struct clk sgx_fck;
+
+static const struct clk_ops sgx_fck_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel,
+ OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
+ OMAP3430ES2_CLKSEL_SGX_MASK,
+ OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
+ OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
+ &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops);
+
+static struct clk sgx_ick;
+
+static struct clk_hw_omap sgx_ick_hw = {
+ .hw = {
+ .clk = &sgx_ick,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
+ .clkdm_name = "sgx_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops);
+
+static struct clk sha11_ick;
+
+static struct clk_hw_omap sha11_ick_hw = {
+ .hw = {
+ .clk = &sha11_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_SHA11_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops);
+
+static struct clk sha12_ick;
+
+static struct clk_hw_omap sha12_ick_hw = {
+ .hw = {
+ .clk = &sha12_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SHA12_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk sr1_fck;
+
+static struct clk_hw_omap sr1_fck_hw = {
+ .hw = {
+ .clk = &sr1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_SR1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sr1_fck, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk sr2_fck;
+
+static struct clk_hw_omap sr2_fck_hw = {
+ .hw = {
+ .clk = &sr2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_SR2_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(sr2_fck, dpll3_ck_parent_names, aes2_ick_ops);
+
+static struct clk sr_l4_ick;
+
+DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm");
+DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
+
+static struct clk ssi_l4_ick;
+
+DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm");
+DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
+
+static struct clk ssi_ick_3430es1;
+
+static const char *ssi_ick_3430es1_parent_names[] = {
+ "ssi_l4_ick",
+};
+
+static struct clk_hw_omap ssi_ick_3430es1_hw = {
+ .hw = {
+ .clk = &ssi_ick_3430es1,
+ },
+ .ops = &clkhwops_iclk,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops);
+
+static struct clk ssi_ick_3430es2;
+
+static struct clk_hw_omap ssi_ick_3430es2_hw = {
+ .hw = {
+ .clk = &ssi_ick_3430es2,
+ },
+ .ops = &clkhwops_omap3430es2_iclk_ssi_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops);
+
+static const struct clksel_rate ssi_ssr_corex2_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+ { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel ssi_ssr_clksel[] = {
+ { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
+ { .parent = NULL },
+};
+
+static const char *ssi_ssr_fck_3430es1_parent_names[] = {
+ "corex2_fck",
+};
+
+static const struct clk_ops ssi_ssr_fck_3430es1_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm",
+ ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_SSI_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP3430_EN_SSI_SHIFT,
+ NULL, ssi_ssr_fck_3430es1_parent_names,
+ ssi_ssr_fck_3430es1_ops);
+
+DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm",
+ ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ OMAP3430_CLKSEL_SSI_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ OMAP3430_EN_SSI_SHIFT,
+ NULL, ssi_ssr_fck_3430es1_parent_names,
+ ssi_ssr_fck_3430es1_ops);
+
+DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1",
+ &ssi_ssr_fck_3430es1, 0x0, 1, 2);
+
+DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2",
+ &ssi_ssr_fck_3430es2, 0x0, 1, 2);
+
+static struct clk sys_clkout1;
+
+static const char *sys_clkout1_parent_names[] = {
+ "osc_sys_ck",
+};
+
+static struct clk_hw_omap sys_clkout1_hw = {
+ .hw = {
+ .clk = &sys_clkout1,
+ },
+ .enable_reg = OMAP3430_PRM_CLKOUT_CTRL,
+ .enable_bit = OMAP3430_CLKOUT_EN_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops);
+
+DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0,
+ OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT,
+ OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0,
+ OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ OMAP3430_CLKSEL_TRACECLK_SHIFT,
+ OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+
+static struct clk ts_fck;
+
+static struct clk_hw_omap ts_fck_hw = {
+ .hw = {
+ .clk = &ts_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
+ .enable_bit = OMAP3430ES2_EN_TS_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops);
+
+static struct clk uart1_fck;
+
+static struct clk_hw_omap uart1_fck_hw = {
+ .hw = {
+ .clk = &uart1_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk uart1_ick;
+
+static struct clk_hw_omap uart1_ick_hw = {
+ .hw = {
+ .clk = &uart1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk uart2_fck;
+
+static struct clk_hw_omap uart2_fck_hw = {
+ .hw = {
+ .clk = &uart2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk uart2_ick;
+
+static struct clk_hw_omap uart2_ick_hw = {
+ .hw = {
+ .clk = &uart2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static struct clk uart3_fck;
+
+static const char *uart3_fck_parent_names[] = {
+ "per_48m_fck",
+};
+
+static struct clk_hw_omap uart3_fck_hw = {
+ .hw = {
+ .clk = &uart3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_UART3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops);
+
+static struct clk uart3_ick;
+
+static struct clk_hw_omap uart3_ick_hw = {
+ .hw = {
+ .clk = &uart3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_UART3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk uart4_fck;
+
+static struct clk_hw_omap uart4_fck_hw = {
+ .hw = {
+ .clk = &uart4_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3630_EN_UART4_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops);
+
+static struct clk uart4_fck_am35xx;
+
+static struct clk_hw_omap uart4_fck_am35xx_hw = {
+ .hw = {
+ .clk = &uart4_fck_am35xx,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = AM35XX_EN_UART4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops);
+
+static struct clk uart4_ick;
+
+static struct clk_hw_omap uart4_ick_hw = {
+ .hw = {
+ .clk = &uart4_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3630_EN_UART4_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+static struct clk uart4_ick_am35xx;
+
+static struct clk_hw_omap uart4_ick_am35xx_hw = {
+ .hw = {
+ .clk = &uart4_ick_am35xx,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = AM35XX_EN_UART4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops);
+
+static const struct clksel_rate div2_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel usb_l4_clksel[] = {
+ { .parent = &l4_ick, .rates = div2_rates },
+ { .parent = NULL },
+};
+
+static const char *usb_l4_ick_parent_names[] = {
+ "l4_ick",
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel,
+ OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
+ OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
+ &clkhwops_iclk_wait, usb_l4_ick_parent_names,
+ ssi_ssr_fck_3430es1_ops);
+
+static struct clk usbhost_120m_fck;
+
+static const char *usbhost_120m_fck_parent_names[] = {
+ "dpll5_m2_ck",
+};
+
+static struct clk_hw_omap usbhost_120m_fck_hw = {
+ .hw = {
+ .clk = &usbhost_120m_fck,
+ },
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES2_EN_USBHOST2_SHIFT,
+ .clkdm_name = "usbhost_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names,
+ aes2_ick_ops);
+
+static struct clk usbhost_48m_fck;
+
+static struct clk_hw_omap usbhost_48m_fck_hw = {
+ .hw = {
+ .clk = &usbhost_48m_fck,
+ },
+ .ops = &clkhwops_omap3430es2_dss_usbhost_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
+ .clkdm_name = "usbhost_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops);
+
+static struct clk usbhost_ick;
+
+static struct clk_hw_omap usbhost_ick_hw = {
+ .hw = {
+ .clk = &usbhost_ick,
+ },
+ .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430ES2_EN_USBHOST_SHIFT,
+ .clkdm_name = "usbhost_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops);
+
+static struct clk usbtll_fck;
+
+static struct clk_hw_omap usbtll_fck_hw = {
+ .hw = {
+ .clk = &usbtll_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
+ .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops);
+
+static struct clk usbtll_ick;
+
+static struct clk_hw_omap usbtll_ick_hw = {
+ .hw = {
+ .clk = &usbtll_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops);
+
+static const struct clksel_rate usim_96m_rates[] = {
+ { .div = 2, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 5, .flags = RATE_IN_3XXX },
+ { .div = 10, .val = 6, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel_rate usim_120m_rates[] = {
+ { .div = 4, .val = 7, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
+ { .div = 16, .val = 9, .flags = RATE_IN_3XXX },
+ { .div = 20, .val = 10, .flags = RATE_IN_3XXX },
+ { .div = 0 }
+};
+
+static const struct clksel usim_clksel[] = {
+ { .parent = &omap_96m_fck, .rates = usim_96m_rates },
+ { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
+ { .parent = &sys_ck, .rates = div2_rates },
+ { .parent = NULL },
+};
+
+static const char *usim_fck_parent_names[] = {
+ "omap_96m_fck", "dpll5_m2_ck", "sys_ck",
+};
+
+static struct clk usim_fck;
+
+static const struct clk_ops usim_fck_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
+ OMAP3430ES2_CLKSEL_USIMOCP_MASK,
+ OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait,
+ usim_fck_parent_names, usim_fck_ops);
+
+static struct clk usim_ick;
+
+static struct clk_hw_omap usim_ick_hw = {
+ .hw = {
+ .clk = &usim_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops);
+
+static struct clk vpfe_fck;
+
+static const char *vpfe_fck_parent_names[] = {
+ "pclk_ck",
+};
+
+static struct clk_hw_omap vpfe_fck_hw = {
+ .hw = {
+ .clk = &vpfe_fck,
+ },
+ .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
+ .enable_bit = AM35XX_VPFE_FCLK_SHIFT,
+};
+
+DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops);
+
+static struct clk vpfe_ick;
+
+static struct clk_hw_omap vpfe_ick_hw = {
+ .hw = {
+ .clk = &vpfe_ick,
+ },
+ .ops = &clkhwops_am35xx_ipss_module_wait,
+ .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
+ .enable_bit = AM35XX_VPFE_VBUSP_CLK_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+};
+
+DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops);
+
+static struct clk wdt1_fck;
+
+DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm");
+DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops);
+
+static struct clk wdt1_ick;
+
+static struct clk_hw_omap wdt1_ick_hw = {
+ .hw = {
+ .clk = &wdt1_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_WDT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
+
+static struct clk wdt2_fck;
+
+static struct clk_hw_omap wdt2_fck_hw = {
+ .hw = {
+ .clk = &wdt2_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_WDT2_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops);
+
+static struct clk wdt2_ick;
+
+static struct clk_hw_omap wdt2_ick_hw = {
+ .hw = {
+ .clk = &wdt2_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_WDT2_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops);
+
+static struct clk wdt3_fck;
+
+static struct clk_hw_omap wdt3_fck_hw = {
+ .hw = {
+ .clk = &wdt3_fck,
+ },
+ .ops = &clkhwops_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_WDT3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops);
+
+static struct clk wdt3_ick;
+
+static struct clk_hw_omap wdt3_ick_hw = {
+ .hw = {
+ .clk = &wdt3_ick,
+ },
+ .ops = &clkhwops_iclk_wait,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_WDT3_SHIFT,
+ .clkdm_name = "per_clkdm",
+};
+
+DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
+
+/*
+ * clkdev
+ */
+static struct omap_clk omap3xxx_clks[] = {
+ CLK(NULL, "apb_pclk", &dummy_apb_pclk, CK_3XXX),
+ CLK(NULL, "omap_32k_fck", &omap_32k_fck, CK_3XXX),
+ CLK(NULL, "virt_12m_ck", &virt_12m_ck, CK_3XXX),
+ CLK(NULL, "virt_13m_ck", &virt_13m_ck, CK_3XXX),
+ CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_3XXX),
+ CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_3XXX),
+ CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck, CK_3XXX),
+ CLK(NULL, "osc_sys_ck", &osc_sys_ck, CK_3XXX),
+ CLK("twl", "fck", &osc_sys_ck, CK_3XXX),
+ CLK(NULL, "sys_ck", &sys_ck, CK_3XXX),
+ CLK(NULL, "sys_altclk", &sys_altclk, CK_3XXX),
+ CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_3XXX),
+ CLK(NULL, "sys_clkout1", &sys_clkout1, CK_3XXX),
+ CLK(NULL, "dpll1_ck", &dpll1_ck, CK_3XXX),
+ CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck, CK_3XXX),
+ CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck, CK_3XXX),
+ CLK(NULL, "dpll2_ck", &dpll2_ck, CK_34XX | CK_36XX),
+ CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck, CK_34XX | CK_36XX),
+ CLK(NULL, "dpll3_ck", &dpll3_ck, CK_3XXX),
+ CLK(NULL, "core_ck", &core_ck, CK_3XXX),
+ CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck, CK_3XXX),
+ CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck, CK_3XXX),
+ CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck, CK_3XXX),
+ CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck, CK_3XXX),
+ CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck, CK_3XXX),
+ CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck, CK_3XXX),
+ CLK(NULL, "dpll4_ck", &dpll4_ck, CK_3XXX),
+ CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck, CK_3XXX),
+ CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck, CK_36XX),
+ CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck, CK_3XXX),
+ CLK(NULL, "omap_96m_fck", &omap_96m_fck, CK_3XXX),
+ CLK(NULL, "cm_96m_fck", &cm_96m_fck, CK_3XXX),
+ CLK(NULL, "omap_54m_fck", &omap_54m_fck, CK_3XXX),
+ CLK(NULL, "omap_48m_fck", &omap_48m_fck, CK_3XXX),
+ CLK(NULL, "omap_12m_fck", &omap_12m_fck, CK_3XXX),
+ CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck, CK_3XXX),
+ CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck, CK_3XXX),
+ CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck, CK_3XXX),
+ CLK(NULL, "dpll5_ck", &dpll5_ck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "clkout2_src_ck", &clkout2_src_ck, CK_3XXX),
+ CLK(NULL, "sys_clkout2", &sys_clkout2, CK_3XXX),
+ CLK(NULL, "corex2_fck", &corex2_fck, CK_3XXX),
+ CLK(NULL, "dpll1_fck", &dpll1_fck, CK_3XXX),
+ CLK(NULL, "mpu_ck", &mpu_ck, CK_3XXX),
+ CLK(NULL, "arm_fck", &arm_fck, CK_3XXX),
+ CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck, CK_3XXX),
+ CLK(NULL, "dpll2_fck", &dpll2_fck, CK_34XX | CK_36XX),
+ CLK(NULL, "iva2_ck", &iva2_ck, CK_34XX | CK_36XX),
+ CLK(NULL, "l3_ick", &l3_ick, CK_3XXX),
+ CLK(NULL, "l4_ick", &l4_ick, CK_3XXX),
+ CLK(NULL, "rm_ick", &rm_ick, CK_3XXX),
+ CLK(NULL, "gfx_l3_ck", &gfx_l3_ck, CK_3430ES1),
+ CLK(NULL, "gfx_l3_fck", &gfx_l3_fck, CK_3430ES1),
+ CLK(NULL, "gfx_l3_ick", &gfx_l3_ick, CK_3430ES1),
+ CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck, CK_3430ES1),
+ CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck, CK_3430ES1),
+ CLK(NULL, "sgx_fck", &sgx_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "sgx_ick", &sgx_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "d2d_26m_fck", &d2d_26m_fck, CK_3430ES1),
+ CLK(NULL, "modem_fck", &modem_fck, CK_34XX | CK_36XX),
+ CLK(NULL, "sad2d_ick", &sad2d_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "mad2d_ick", &mad2d_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "gpt10_fck", &gpt10_fck, CK_3XXX),
+ CLK(NULL, "gpt11_fck", &gpt11_fck, CK_3XXX),
+ CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_tll", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
+ CLK(NULL, "mmchs3_fck", &mmchs3_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "mmchs2_fck", &mmchs2_fck, CK_3XXX),
+ CLK(NULL, "mspro_fck", &mspro_fck, CK_34XX | CK_36XX),
+ CLK(NULL, "mmchs1_fck", &mmchs1_fck, CK_3XXX),
+ CLK(NULL, "i2c3_fck", &i2c3_fck, CK_3XXX),
+ CLK(NULL, "i2c2_fck", &i2c2_fck, CK_3XXX),
+ CLK(NULL, "i2c1_fck", &i2c1_fck, CK_3XXX),
+ CLK(NULL, "mcbsp5_fck", &mcbsp5_fck, CK_3XXX),
+ CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_3XXX),
+ CLK(NULL, "core_48m_fck", &core_48m_fck, CK_3XXX),
+ CLK(NULL, "mcspi4_fck", &mcspi4_fck, CK_3XXX),
+ CLK(NULL, "mcspi3_fck", &mcspi3_fck, CK_3XXX),
+ CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_3XXX),
+ CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_3XXX),
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_3XXX),
+ CLK(NULL, "uart1_fck", &uart1_fck, CK_3XXX),
+ CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1),
+ CLK(NULL, "core_12m_fck", &core_12m_fck, CK_3XXX),
+ CLK("omap_hdq.0", "fck", &hdq_fck, CK_3XXX),
+ CLK(NULL, "hdq_fck", &hdq_fck, CK_3XXX),
+ CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2, CK_3430ES2PLUS | CK_36XX),
+ CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2PLUS | CK_36XX),
+ CLK(NULL, "core_l3_ick", &core_l3_ick, CK_3XXX),
+ CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1, CK_3430ES1),
+ CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2, CK_3430ES2PLUS | CK_36XX),
+ CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1, CK_3430ES1),
+ CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2, CK_3430ES2PLUS | CK_36XX),
+ CLK(NULL, "sdrc_ick", &sdrc_ick, CK_3XXX),
+ CLK(NULL, "gpmc_fck", &gpmc_fck, CK_3XXX),
+ CLK(NULL, "security_l3_ick", &security_l3_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX),
+ CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_tll", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("omap_hsmmc.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "mmchs3_ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX),
+ CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX),
+ CLK("omap-sham", "ick", &sha12_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "des2_ick", &des2_ick, CK_34XX | CK_36XX),
+ CLK("omap_hsmmc.1", "ick", &mmchs2_ick, CK_3XXX),
+ CLK("omap_hsmmc.0", "ick", &mmchs1_ick, CK_3XXX),
+ CLK(NULL, "mmchs2_ick", &mmchs2_ick, CK_3XXX),
+ CLK(NULL, "mmchs1_ick", &mmchs1_ick, CK_3XXX),
+ CLK(NULL, "mspro_ick", &mspro_ick, CK_34XX | CK_36XX),
+ CLK("omap_hdq.0", "ick", &hdq_ick, CK_3XXX),
+ CLK(NULL, "hdq_ick", &hdq_ick, CK_3XXX),
+ CLK("omap2_mcspi.4", "ick", &mcspi4_ick, CK_3XXX),
+ CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_3XXX),
+ CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_3XXX),
+ CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_3XXX),
+ CLK(NULL, "mcspi4_ick", &mcspi4_ick, CK_3XXX),
+ CLK(NULL, "mcspi3_ick", &mcspi3_ick, CK_3XXX),
+ CLK(NULL, "mcspi2_ick", &mcspi2_ick, CK_3XXX),
+ CLK(NULL, "mcspi1_ick", &mcspi1_ick, CK_3XXX),
+ CLK("omap_i2c.3", "ick", &i2c3_ick, CK_3XXX),
+ CLK("omap_i2c.2", "ick", &i2c2_ick, CK_3XXX),
+ CLK("omap_i2c.1", "ick", &i2c1_ick, CK_3XXX),
+ CLK(NULL, "i2c3_ick", &i2c3_ick, CK_3XXX),
+ CLK(NULL, "i2c2_ick", &i2c2_ick, CK_3XXX),
+ CLK(NULL, "i2c1_ick", &i2c1_ick, CK_3XXX),
+ CLK(NULL, "uart2_ick", &uart2_ick, CK_3XXX),
+ CLK(NULL, "uart1_ick", &uart1_ick, CK_3XXX),
+ CLK(NULL, "gpt11_ick", &gpt11_ick, CK_3XXX),
+ CLK(NULL, "gpt10_ick", &gpt10_ick, CK_3XXX),
+ CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_3XXX),
+ CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_3XXX),
+ CLK(NULL, "mcbsp5_ick", &mcbsp5_ick, CK_3XXX),
+ CLK(NULL, "mcbsp1_ick", &mcbsp1_ick, CK_3XXX),
+ CLK(NULL, "fac_ick", &fac_ick, CK_3430ES1),
+ CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_3XXX),
+ CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "ssi_ick", &ssi_ick_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_ick", &ssi_ick_3430es2, CK_3430ES2PLUS | CK_36XX),
+ CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1),
+ CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_34XX | CK_36XX),
+ CLK(NULL, "aes1_ick", &aes1_ick, CK_34XX | CK_36XX),
+ CLK("omap_rng", "ick", &rng_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "sha11_ick", &sha11_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "des1_ick", &des1_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
+ CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "dss_tv_fck", &dss_tv_fck, CK_3XXX),
+ CLK(NULL, "dss_96m_fck", &dss_96m_fck, CK_3XXX),
+ CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck, CK_3XXX),
+ CLK("omapdss_dss", "ick", &dss_ick_3430es1, CK_3430ES1),
+ CLK(NULL, "dss_ick", &dss_ick_3430es1, CK_3430ES1),
+ CLK("omapdss_dss", "ick", &dss_ick_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "dss_ick", &dss_ick_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "cam_mclk", &cam_mclk, CK_34XX | CK_36XX),
+ CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX),
+ CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK(NULL, "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
+ CLK(NULL, "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
+ CLK(NULL, "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
+ CLK(NULL, "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
+ CLK(NULL, "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
+ CLK(NULL, "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_tll", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_tll", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
+ CLK(NULL, "init_60m_fclk", &dummy_ck, CK_3XXX),
+ CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX),
+ CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX),
+ CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX),
+ CLK(NULL, "gpio1_dbck", &gpio1_dbck, CK_3XXX),
+ CLK(NULL, "wdt2_fck", &wdt2_fck, CK_3XXX),
+ CLK(NULL, "wkup_l4_ick", &wkup_l4_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "usim_ick", &usim_ick, CK_3430ES2PLUS | CK_36XX),
+ CLK("omap_wdt", "ick", &wdt2_ick, CK_3XXX),
+ CLK(NULL, "wdt2_ick", &wdt2_ick, CK_3XXX),
+ CLK(NULL, "wdt1_ick", &wdt1_ick, CK_3XXX),
+ CLK(NULL, "gpio1_ick", &gpio1_ick, CK_3XXX),
+ CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick, CK_3XXX),
+ CLK(NULL, "gpt12_ick", &gpt12_ick, CK_3XXX),
+ CLK(NULL, "gpt1_ick", &gpt1_ick, CK_3XXX),
+ CLK(NULL, "per_96m_fck", &per_96m_fck, CK_3XXX),
+ CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX),
+ CLK(NULL, "uart4_fck", &uart4_fck, CK_36XX),
+ CLK(NULL, "uart4_fck", &uart4_fck_am35xx, CK_AM35XX),
+ CLK(NULL, "gpt2_fck", &gpt2_fck, CK_3XXX),
+ CLK(NULL, "gpt3_fck", &gpt3_fck, CK_3XXX),
+ CLK(NULL, "gpt4_fck", &gpt4_fck, CK_3XXX),
+ CLK(NULL, "gpt5_fck", &gpt5_fck, CK_3XXX),
+ CLK(NULL, "gpt6_fck", &gpt6_fck, CK_3XXX),
+ CLK(NULL, "gpt7_fck", &gpt7_fck, CK_3XXX),
+ CLK(NULL, "gpt8_fck", &gpt8_fck, CK_3XXX),
+ CLK(NULL, "gpt9_fck", &gpt9_fck, CK_3XXX),
+ CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck, CK_3XXX),
+ CLK(NULL, "gpio6_dbck", &gpio6_dbck, CK_3XXX),
+ CLK(NULL, "gpio5_dbck", &gpio5_dbck, CK_3XXX),
+ CLK(NULL, "gpio4_dbck", &gpio4_dbck, CK_3XXX),
+ CLK(NULL, "gpio3_dbck", &gpio3_dbck, CK_3XXX),
+ CLK(NULL, "gpio2_dbck", &gpio2_dbck, CK_3XXX),
+ CLK(NULL, "wdt3_fck", &wdt3_fck, CK_3XXX),
+ CLK(NULL, "per_l4_ick", &per_l4_ick, CK_3XXX),
+ CLK(NULL, "gpio6_ick", &gpio6_ick, CK_3XXX),
+ CLK(NULL, "gpio5_ick", &gpio5_ick, CK_3XXX),
+ CLK(NULL, "gpio4_ick", &gpio4_ick, CK_3XXX),
+ CLK(NULL, "gpio3_ick", &gpio3_ick, CK_3XXX),
+ CLK(NULL, "gpio2_ick", &gpio2_ick, CK_3XXX),
+ CLK(NULL, "wdt3_ick", &wdt3_ick, CK_3XXX),
+ CLK(NULL, "uart3_ick", &uart3_ick, CK_3XXX),
+ CLK(NULL, "uart4_ick", &uart4_ick, CK_36XX),
+ CLK(NULL, "gpt9_ick", &gpt9_ick, CK_3XXX),
+ CLK(NULL, "gpt8_ick", &gpt8_ick, CK_3XXX),
+ CLK(NULL, "gpt7_ick", &gpt7_ick, CK_3XXX),
+ CLK(NULL, "gpt6_ick", &gpt6_ick, CK_3XXX),
+ CLK(NULL, "gpt5_ick", &gpt5_ick, CK_3XXX),
+ CLK(NULL, "gpt4_ick", &gpt4_ick, CK_3XXX),
+ CLK(NULL, "gpt3_ick", &gpt3_ick, CK_3XXX),
+ CLK(NULL, "gpt2_ick", &gpt2_ick, CK_3XXX),
+ CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_3XXX),
+ CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_3XXX),
+ CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_3XXX),
+ CLK(NULL, "mcbsp4_ick", &mcbsp2_ick, CK_3XXX),
+ CLK(NULL, "mcbsp3_ick", &mcbsp3_ick, CK_3XXX),
+ CLK(NULL, "mcbsp2_ick", &mcbsp4_ick, CK_3XXX),
+ CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_3XXX),
+ CLK(NULL, "mcbsp3_fck", &mcbsp3_fck, CK_3XXX),
+ CLK(NULL, "mcbsp4_fck", &mcbsp4_fck, CK_3XXX),
+ CLK("etb", "emu_src_ck", &emu_src_ck, CK_3XXX),
+ CLK(NULL, "emu_src_ck", &emu_src_ck, CK_3XXX),
+ CLK(NULL, "pclk_fck", &pclk_fck, CK_3XXX),
+ CLK(NULL, "pclkx2_fck", &pclkx2_fck, CK_3XXX),
+ CLK(NULL, "atclk_fck", &atclk_fck, CK_3XXX),
+ CLK(NULL, "traceclk_src_fck", &traceclk_src_fck, CK_3XXX),
+ CLK(NULL, "traceclk_fck", &traceclk_fck, CK_3XXX),
+ CLK(NULL, "sr1_fck", &sr1_fck, CK_34XX | CK_36XX),
+ CLK(NULL, "sr2_fck", &sr2_fck, CK_34XX | CK_36XX),
+ CLK(NULL, "sr_l4_ick", &sr_l4_ick, CK_34XX | CK_36XX),
+ CLK(NULL, "secure_32k_fck", &secure_32k_fck, CK_3XXX),
+ CLK(NULL, "gpt12_fck", &gpt12_fck, CK_3XXX),
+ CLK(NULL, "wdt1_fck", &wdt1_fck, CK_3XXX),
+ CLK(NULL, "ipss_ick", &ipss_ick, CK_AM35XX),
+ CLK(NULL, "rmii_ck", &rmii_ck, CK_AM35XX),
+ CLK(NULL, "pclk_ck", &pclk_ck, CK_AM35XX),
+ CLK(NULL, "emac_ick", &emac_ick, CK_AM35XX),
+ CLK(NULL, "emac_fck", &emac_fck, CK_AM35XX),
+ CLK("davinci_emac.0", NULL, &emac_ick, CK_AM35XX),
+ CLK("davinci_mdio.0", NULL, &emac_fck, CK_AM35XX),
+ CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX),
+ CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX),
+ CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx, CK_AM35XX),
+ CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx, CK_AM35XX),
+ CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX),
+ CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX),
+ CLK(NULL, "timer_32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK(NULL, "timer_sys_ck", &sys_ck, CK_3XXX),
+ CLK(NULL, "cpufreq_ck", &dpll1_ck, CK_3XXX),
+};
+
+static const char *enable_init_clks[] = {
+ "sdrc_ick",
+ "gpmc_fck",
+ "omapctrl_ick",
+};
+
+int __init omap3xxx_clk_init(void)
+{
+ struct omap_clk *c;
+ u32 cpu_clkflg = 0;
+
+ /*
+ * 3505 must be tested before 3517, since 3517 returns true
+ * for both AM3517 chips and AM3517 family chips, which
+ * includes 3505. Unfortunately there's no obvious family
+ * test for 3517/3505 :-(
+ */
+ if (soc_is_am35xx()) {
+ cpu_mask = RATE_IN_34XX;
+ cpu_clkflg = CK_AM35XX;
+ } else if (cpu_is_omap3630()) {
+ cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
+ cpu_clkflg = CK_36XX;
+ } else if (cpu_is_ti816x()) {
+ cpu_mask = RATE_IN_TI816X;
+ cpu_clkflg = CK_TI816X;
+ } else if (soc_is_am33xx()) {
+ cpu_mask = RATE_IN_AM33XX;
+ } else if (cpu_is_ti814x()) {
+ cpu_mask = RATE_IN_TI814X;
+ } else if (cpu_is_omap34xx()) {
+ if (omap_rev() == OMAP3430_REV_ES1_0) {
+ cpu_mask = RATE_IN_3430ES1;
+ cpu_clkflg = CK_3430ES1;
+ } else {
+ /*
+ * Assume that anything that we haven't matched yet
+ * has 3430ES2-type clocks.
+ */
+ cpu_mask = RATE_IN_3430ES2PLUS;
+ cpu_clkflg = CK_3430ES2PLUS;
+ }
+ } else {
+ WARN(1, "clock: could not identify OMAP3 variant\n");
+ }
+
+ if (omap3_has_192mhz_clk())
+ omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
+
+ if (cpu_is_omap3630()) {
+ dpll3_m3x2_ck = dpll3_m3x2_ck_3630;
+ dpll4_m2x2_ck = dpll4_m2x2_ck_3630;
+ dpll4_m3x2_ck = dpll4_m3x2_ck_3630;
+ dpll4_m4x2_ck = dpll4_m4x2_ck_3630;
+ dpll4_m5x2_ck = dpll4_m5x2_ck_3630;
+ dpll4_m6x2_ck = dpll4_m6x2_ck_3630;
+ }
+
+ /*
+ * XXX This type of dynamic rewriting of the clock tree is
+ * deprecated and should be revised soon.
+ */
+ if (cpu_is_omap3630())
+ dpll4_dd = dpll4_dd_3630;
+ else
+ dpll4_dd = dpll4_dd_34xx;
+
+ for (c = omap3xxx_clks; c < omap3xxx_clks + ARRAY_SIZE(omap3xxx_clks);
+ c++)
+ if (c->cpu & cpu_clkflg) {
+ clkdev_add(&c->lk);
+ if (!__clk_init(NULL, c->lk.clk))
+ omap2_init_clk_hw_omap_clocks(c->lk.clk);
+ }
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(&osc_sys_ck) / 1000000),
+ (clk_get_rate(&osc_sys_ck) / 100000) % 10,
+ (clk_get_rate(&core_ck) / 1000000),
+ (clk_get_rate(&arm_fck) / 1000000));
+
+ /*
+ * Lock DPLL5 -- here only until other device init code can
+ * handle this
+ */
+ if (!cpu_is_ti81xx() && (omap_rev() >= OMAP3430_REV_ES2_0))
+ omap3_clk_lock_dpll5();
+
+ /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
+ sdrc_ick_p = clk_get(NULL, "sdrc_ick");
+ arm_fck_p = clk_get(NULL, "arm_fck");
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
new file mode 100644
index 00000000000..5789a5e2556
--- /dev/null
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -0,0 +1,2039 @@
+/*
+ * OMAP4 Clock data
+ *
+ * Copyright (C) 2009-2012 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ * Mike Turquette (mturquette@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX Some of the ES1 clocks have been removed/changed; once support
+ * is added for discriminating clocks by ES level, these should be added back
+ * in.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-private.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+
+#include "soc.h"
+#include "iomap.h"
+#include "clock.h"
+#include "clock44xx.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "cm-regbits-44xx.h"
+#include "prm44xx.h"
+#include "prm-regbits-44xx.h"
+#include "control.h"
+#include "scrm44xx.h"
+
+/* OMAP4 modulemode control */
+#define OMAP4430_MODULEMODE_HWCTRL_SHIFT 0
+#define OMAP4430_MODULEMODE_SWCTRL_SHIFT 1
+
+/*
+ * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
+ * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
+ * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
+ * half of this value.
+ */
+#define OMAP4_DPLL_ABE_DEFFREQ 98304000
+
+/* Root clocks */
+
+DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(pad_clks_src_ck, CLK_IS_ROOT, 12000000, 0x0);
+
+DEFINE_CLK_GATE(pad_clks_ck, "pad_clks_src_ck", &pad_clks_src_ck, 0x0,
+ OMAP4430_CM_CLKSEL_ABE, OMAP4430_PAD_CLKS_GATE_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_FIXED_RATE(pad_slimbus_core_clks_ck, CLK_IS_ROOT, 12000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(secure_32k_clk_src_ck, CLK_IS_ROOT, 32768, 0x0);
+
+DEFINE_CLK_FIXED_RATE(slimbus_src_clk, CLK_IS_ROOT, 12000000, 0x0);
+
+DEFINE_CLK_GATE(slimbus_clk, "slimbus_src_clk", &slimbus_src_clk, 0x0,
+ OMAP4430_CM_CLKSEL_ABE, OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_FIXED_RATE(sys_32k_ck, CLK_IS_ROOT, 32768, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_12000000_ck, CLK_IS_ROOT, 12000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_13000000_ck, CLK_IS_ROOT, 13000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_16800000_ck, CLK_IS_ROOT, 16800000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_27000000_ck, CLK_IS_ROOT, 27000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(virt_38400000_ck, CLK_IS_ROOT, 38400000, 0x0);
+
+static const char *sys_clkin_ck_parents[] = {
+ "virt_12000000_ck", "virt_13000000_ck", "virt_16800000_ck",
+ "virt_19200000_ck", "virt_26000000_ck", "virt_27000000_ck",
+ "virt_38400000_ck",
+};
+
+DEFINE_CLK_MUX(sys_clkin_ck, sys_clkin_ck_parents, NULL, 0x0,
+ OMAP4430_CM_SYS_CLKSEL, OMAP4430_SYS_CLKSEL_SHIFT,
+ OMAP4430_SYS_CLKSEL_WIDTH, CLK_MUX_INDEX_ONE, NULL);
+
+DEFINE_CLK_FIXED_RATE(tie_low_clock_ck, CLK_IS_ROOT, 0, 0x0);
+
+DEFINE_CLK_FIXED_RATE(utmi_phy_clkout_ck, CLK_IS_ROOT, 60000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(xclk60mhsp1_ck, CLK_IS_ROOT, 60000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(xclk60mhsp2_ck, CLK_IS_ROOT, 60000000, 0x0);
+
+DEFINE_CLK_FIXED_RATE(xclk60motg_ck, CLK_IS_ROOT, 60000000, 0x0);
+
+/* Module clocks and DPLL outputs */
+
+static const char *abe_dpll_bypass_clk_mux_ck_parents[] = {
+ "sys_clkin_ck", "sys_32k_ck",
+};
+
+DEFINE_CLK_MUX(abe_dpll_bypass_clk_mux_ck, abe_dpll_bypass_clk_mux_ck_parents,
+ NULL, 0x0, OMAP4430_CM_L4_WKUP_CLKSEL, OMAP4430_CLKSEL_SHIFT,
+ OMAP4430_CLKSEL_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_MUX(abe_dpll_refclk_mux_ck, abe_dpll_bypass_clk_mux_ck_parents, NULL,
+ 0x0, OMAP4430_CM_ABE_PLL_REF_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
+ OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
+
+/* DPLL_ABE */
+static struct dpll_data dpll_abe_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_ABE,
+ .clk_bypass = &abe_dpll_bypass_clk_mux_ck,
+ .clk_ref = &abe_dpll_refclk_mux_ck,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_ABE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_ABE,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_ABE,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .m4xen_mask = OMAP4430_DPLL_REGM4XEN_MASK,
+ .lpmode_mask = OMAP4430_DPLL_LPMODE_EN_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+
+static const char *dpll_abe_ck_parents[] = {
+ "abe_dpll_refclk_mux_ck",
+};
+
+static struct clk dpll_abe_ck;
+
+static const struct clk_ops dpll_abe_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap4_dpll_regm4xen_recalc,
+ .round_rate = &omap4_dpll_regm4xen_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static struct clk_hw_omap dpll_abe_ck_hw = {
+ .hw = {
+ .clk = &dpll_abe_ck,
+ },
+ .dpll_data = &dpll_abe_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_abe_ck, dpll_abe_ck_parents, dpll_abe_ck_ops);
+
+static const char *dpll_abe_x2_ck_parents[] = {
+ "dpll_abe_ck",
+};
+
+static struct clk dpll_abe_x2_ck;
+
+static const struct clk_ops dpll_abe_x2_ck_ops = {
+ .recalc_rate = &omap3_clkoutx2_recalc,
+};
+
+static struct clk_hw_omap dpll_abe_x2_ck_hw = {
+ .hw = {
+ .clk = &dpll_abe_x2_ck,
+ },
+ .flags = CLOCK_CLKOUTX2,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_ABE,
+ .ops = &clkhwops_omap4_dpllmx,
+};
+
+DEFINE_STRUCT_CLK(dpll_abe_x2_ck, dpll_abe_x2_ck_parents, dpll_abe_x2_ck_ops);
+
+static const struct clk_ops omap_hsdivider_ops = {
+ .set_rate = &omap2_clksel_set_rate,
+ .recalc_rate = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+};
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_abe_m2x2_ck, "dpll_abe_x2_ck", &dpll_abe_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M2_DPLL_ABE,
+ OMAP4430_DPLL_CLKOUT_DIV_MASK);
+
+DEFINE_CLK_FIXED_FACTOR(abe_24m_fclk, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck,
+ 0x0, 1, 8);
+
+DEFINE_CLK_DIVIDER(abe_clk, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, 0x0,
+ OMAP4430_CM_CLKSEL_ABE, OMAP4430_CLKSEL_OPP_SHIFT,
+ OMAP4430_CLKSEL_OPP_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+DEFINE_CLK_DIVIDER(aess_fclk, "abe_clk", &abe_clk, 0x0,
+ OMAP4430_CM1_ABE_AESS_CLKCTRL,
+ OMAP4430_CLKSEL_AESS_FCLK_SHIFT,
+ OMAP4430_CLKSEL_AESS_FCLK_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_abe_m3x2_ck, "dpll_abe_x2_ck", &dpll_abe_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M3_DPLL_ABE,
+ OMAP4430_DPLL_CLKOUTHIF_DIV_MASK);
+
+static const char *core_hsd_byp_clk_mux_ck_parents[] = {
+ "sys_clkin_ck", "dpll_abe_m3x2_ck",
+};
+
+DEFINE_CLK_MUX(core_hsd_byp_clk_mux_ck, core_hsd_byp_clk_mux_ck_parents, NULL,
+ 0x0, OMAP4430_CM_CLKSEL_DPLL_CORE,
+ OMAP4430_DPLL_BYP_CLKSEL_SHIFT, OMAP4430_DPLL_BYP_CLKSEL_WIDTH,
+ 0x0, NULL);
+
+/* DPLL_CORE */
+static struct dpll_data dpll_core_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_CORE,
+ .clk_bypass = &core_hsd_byp_clk_mux_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_CORE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_CORE,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_CORE,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+
+static const char *dpll_core_ck_parents[] = {
+ "sys_clkin_ck", "core_hsd_byp_clk_mux_ck"
+};
+
+static struct clk dpll_core_ck;
+
+static const struct clk_ops dpll_core_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static struct clk_hw_omap dpll_core_ck_hw = {
+ .hw = {
+ .clk = &dpll_core_ck,
+ },
+ .dpll_data = &dpll_core_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_core_ck, dpll_core_ck_parents, dpll_core_ck_ops);
+
+static const char *dpll_core_x2_ck_parents[] = {
+ "dpll_core_ck",
+};
+
+static struct clk dpll_core_x2_ck;
+
+static struct clk_hw_omap dpll_core_x2_ck_hw = {
+ .hw = {
+ .clk = &dpll_core_x2_ck,
+ },
+};
+
+DEFINE_STRUCT_CLK(dpll_core_x2_ck, dpll_core_x2_ck_parents, dpll_abe_x2_ck_ops);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m6x2_ck, "dpll_core_x2_ck",
+ &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M6_DPLL_CORE,
+ OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m2_ck, "dpll_core_ck", &dpll_core_ck, 0x0,
+ OMAP4430_CM_DIV_M2_DPLL_CORE,
+ OMAP4430_DPLL_CLKOUT_DIV_MASK);
+
+DEFINE_CLK_FIXED_FACTOR(ddrphy_ck, "dpll_core_m2_ck", &dpll_core_m2_ck, 0x0, 1,
+ 2);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m5x2_ck, "dpll_core_x2_ck",
+ &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M5_DPLL_CORE,
+ OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK);
+
+DEFINE_CLK_DIVIDER(div_core_ck, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, 0x0,
+ OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_CORE_SHIFT,
+ OMAP4430_CLKSEL_CORE_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(div_iva_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
+ 0x0, OMAP4430_CM_BYPCLK_DPLL_IVA, OMAP4430_CLKSEL_0_1_SHIFT,
+ OMAP4430_CLKSEL_0_1_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+DEFINE_CLK_DIVIDER(div_mpu_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
+ 0x0, OMAP4430_CM_BYPCLK_DPLL_MPU, OMAP4430_CLKSEL_0_1_SHIFT,
+ OMAP4430_CLKSEL_0_1_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m4x2_ck, "dpll_core_x2_ck",
+ &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M4_DPLL_CORE,
+ OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK);
+
+DEFINE_CLK_FIXED_FACTOR(dll_clk_div_ck, "dpll_core_m4x2_ck", &dpll_core_m4x2_ck,
+ 0x0, 1, 2);
+
+DEFINE_CLK_DIVIDER(dpll_abe_m2_ck, "dpll_abe_ck", &dpll_abe_ck, 0x0,
+ OMAP4430_CM_DIV_M2_DPLL_ABE, OMAP4430_DPLL_CLKOUT_DIV_SHIFT,
+ OMAP4430_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+
+static const struct clk_ops dmic_fck_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+ .set_parent = &omap2_clksel_set_parent,
+ .init = &omap2_init_clk_clkdm,
+};
+
+static const char *dpll_core_m3x2_ck_parents[] = {
+ "dpll_core_x2_ck",
+};
+
+static const struct clksel dpll_core_m3x2_div[] = {
+ { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+/* XXX Missing round_rate, set_rate in ops */
+DEFINE_CLK_OMAP_MUX_GATE(dpll_core_m3x2_ck, NULL, dpll_core_m3x2_div,
+ OMAP4430_CM_DIV_M3_DPLL_CORE,
+ OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
+ OMAP4430_CM_DIV_M3_DPLL_CORE,
+ OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT, NULL,
+ dpll_core_m3x2_ck_parents, dmic_fck_ops);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_core_m7x2_ck, "dpll_core_x2_ck",
+ &dpll_core_x2_ck, 0x0, OMAP4430_CM_DIV_M7_DPLL_CORE,
+ OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK);
+
+static const char *iva_hsd_byp_clk_mux_ck_parents[] = {
+ "sys_clkin_ck", "div_iva_hs_clk",
+};
+
+DEFINE_CLK_MUX(iva_hsd_byp_clk_mux_ck, iva_hsd_byp_clk_mux_ck_parents, NULL,
+ 0x0, OMAP4430_CM_CLKSEL_DPLL_IVA, OMAP4430_DPLL_BYP_CLKSEL_SHIFT,
+ OMAP4430_DPLL_BYP_CLKSEL_WIDTH, 0x0, NULL);
+
+/* DPLL_IVA */
+static struct dpll_data dpll_iva_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_IVA,
+ .clk_bypass = &iva_hsd_byp_clk_mux_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_IVA,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_IVA,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_IVA,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+static const char *dpll_iva_ck_parents[] = {
+ "sys_clkin_ck", "iva_hsd_byp_clk_mux_ck"
+};
+
+static struct clk dpll_iva_ck;
+
+static const struct clk_ops dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static struct clk_hw_omap dpll_iva_ck_hw = {
+ .hw = {
+ .clk = &dpll_iva_ck,
+ },
+ .dpll_data = &dpll_iva_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_iva_ck, dpll_iva_ck_parents, dpll_ck_ops);
+
+static const char *dpll_iva_x2_ck_parents[] = {
+ "dpll_iva_ck",
+};
+
+static struct clk dpll_iva_x2_ck;
+
+static struct clk_hw_omap dpll_iva_x2_ck_hw = {
+ .hw = {
+ .clk = &dpll_iva_x2_ck,
+ },
+};
+
+DEFINE_STRUCT_CLK(dpll_iva_x2_ck, dpll_iva_x2_ck_parents, dpll_abe_x2_ck_ops);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_iva_m4x2_ck, "dpll_iva_x2_ck", &dpll_iva_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M4_DPLL_IVA,
+ OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_iva_m5x2_ck, "dpll_iva_x2_ck", &dpll_iva_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M5_DPLL_IVA,
+ OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK);
+
+/* DPLL_MPU */
+static struct dpll_data dpll_mpu_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_MPU,
+ .clk_bypass = &div_mpu_hs_clk,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_MPU,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_MPU,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_MPU,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+static const char *dpll_mpu_ck_parents[] = {
+ "sys_clkin_ck", "div_mpu_hs_clk"
+};
+
+static struct clk dpll_mpu_ck;
+
+static struct clk_hw_omap dpll_mpu_ck_hw = {
+ .hw = {
+ .clk = &dpll_mpu_ck,
+ },
+ .dpll_data = &dpll_mpu_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_mpu_ck_parents, dpll_ck_ops);
+
+DEFINE_CLK_FIXED_FACTOR(mpu_periphclk, "dpll_mpu_ck", &dpll_mpu_ck, 0x0, 1, 2);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_mpu_m2_ck, "dpll_mpu_ck", &dpll_mpu_ck, 0x0,
+ OMAP4430_CM_DIV_M2_DPLL_MPU,
+ OMAP4430_DPLL_CLKOUT_DIV_MASK);
+
+DEFINE_CLK_FIXED_FACTOR(per_hs_clk_div_ck, "dpll_abe_m3x2_ck",
+ &dpll_abe_m3x2_ck, 0x0, 1, 2);
+
+static const char *per_hsd_byp_clk_mux_ck_parents[] = {
+ "sys_clkin_ck", "per_hs_clk_div_ck",
+};
+
+DEFINE_CLK_MUX(per_hsd_byp_clk_mux_ck, per_hsd_byp_clk_mux_ck_parents, NULL,
+ 0x0, OMAP4430_CM_CLKSEL_DPLL_PER, OMAP4430_DPLL_BYP_CLKSEL_SHIFT,
+ OMAP4430_DPLL_BYP_CLKSEL_WIDTH, 0x0, NULL);
+
+/* DPLL_PER */
+static struct dpll_data dpll_per_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_PER,
+ .clk_bypass = &per_hsd_byp_clk_mux_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_PER,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_PER,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_PER,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+};
+
+static const char *dpll_per_ck_parents[] = {
+ "sys_clkin_ck", "per_hsd_byp_clk_mux_ck"
+};
+
+static struct clk dpll_per_ck;
+
+static struct clk_hw_omap dpll_per_ck_hw = {
+ .hw = {
+ .clk = &dpll_per_ck,
+ },
+ .dpll_data = &dpll_per_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_per_ck, dpll_per_ck_parents, dpll_ck_ops);
+
+DEFINE_CLK_DIVIDER(dpll_per_m2_ck, "dpll_per_ck", &dpll_per_ck, 0x0,
+ OMAP4430_CM_DIV_M2_DPLL_PER, OMAP4430_DPLL_CLKOUT_DIV_SHIFT,
+ OMAP4430_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+
+static const char *dpll_per_x2_ck_parents[] = {
+ "dpll_per_ck",
+};
+
+static struct clk dpll_per_x2_ck;
+
+static struct clk_hw_omap dpll_per_x2_ck_hw = {
+ .hw = {
+ .clk = &dpll_per_x2_ck,
+ },
+ .flags = CLOCK_CLKOUTX2,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_PER,
+ .ops = &clkhwops_omap4_dpllmx,
+};
+
+DEFINE_STRUCT_CLK(dpll_per_x2_ck, dpll_per_x2_ck_parents, dpll_abe_x2_ck_ops);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m2x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M2_DPLL_PER,
+ OMAP4430_DPLL_CLKOUT_DIV_MASK);
+
+static const char *dpll_per_m3x2_ck_parents[] = {
+ "dpll_per_x2_ck",
+};
+
+static const struct clksel dpll_per_m3x2_div[] = {
+ { .parent = &dpll_per_x2_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+/* XXX Missing round_rate, set_rate in ops */
+DEFINE_CLK_OMAP_MUX_GATE(dpll_per_m3x2_ck, NULL, dpll_per_m3x2_div,
+ OMAP4430_CM_DIV_M3_DPLL_PER,
+ OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
+ OMAP4430_CM_DIV_M3_DPLL_PER,
+ OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT, NULL,
+ dpll_per_m3x2_ck_parents, dmic_fck_ops);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m4x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M4_DPLL_PER,
+ OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m5x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M5_DPLL_PER,
+ OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m6x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M6_DPLL_PER,
+ OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_per_m7x2_ck, "dpll_per_x2_ck", &dpll_per_x2_ck,
+ 0x0, OMAP4430_CM_DIV_M7_DPLL_PER,
+ OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK);
+
+DEFINE_CLK_FIXED_FACTOR(usb_hs_clk_div_ck, "dpll_abe_m3x2_ck",
+ &dpll_abe_m3x2_ck, 0x0, 1, 3);
+
+/* DPLL_USB */
+static struct dpll_data dpll_usb_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_USB,
+ .clk_bypass = &usb_hs_clk_div_ck,
+ .flags = DPLL_J_TYPE,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_USB,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_USB,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_USB,
+ .mult_mask = OMAP4430_DPLL_MULT_USB_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_0_7_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .sddiv_mask = OMAP4430_DPLL_SD_DIV_MASK,
+ .max_multiplier = 4095,
+ .max_divider = 256,
+ .min_divider = 1,
+};
+
+static const char *dpll_usb_ck_parents[] = {
+ "sys_clkin_ck", "usb_hs_clk_div_ck"
+};
+
+static struct clk dpll_usb_ck;
+
+static struct clk_hw_omap dpll_usb_ck_hw = {
+ .hw = {
+ .clk = &dpll_usb_ck,
+ },
+ .dpll_data = &dpll_usb_dd,
+ .ops = &clkhwops_omap3_dpll,
+};
+
+DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_ck_ops);
+
+static const char *dpll_usb_clkdcoldo_ck_parents[] = {
+ "dpll_usb_ck",
+};
+
+static struct clk dpll_usb_clkdcoldo_ck;
+
+static const struct clk_ops dpll_usb_clkdcoldo_ck_ops = {
+};
+
+static struct clk_hw_omap dpll_usb_clkdcoldo_ck_hw = {
+ .hw = {
+ .clk = &dpll_usb_clkdcoldo_ck,
+ },
+ .clksel_reg = OMAP4430_CM_CLKDCOLDO_DPLL_USB,
+ .ops = &clkhwops_omap4_dpllmx,
+};
+
+DEFINE_STRUCT_CLK(dpll_usb_clkdcoldo_ck, dpll_usb_clkdcoldo_ck_parents,
+ dpll_usb_clkdcoldo_ck_ops);
+
+DEFINE_CLK_OMAP_HSDIVIDER(dpll_usb_m2_ck, "dpll_usb_ck", &dpll_usb_ck, 0x0,
+ OMAP4430_CM_DIV_M2_DPLL_USB,
+ OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK);
+
+static const char *ducati_clk_mux_ck_parents[] = {
+ "div_core_ck", "dpll_per_m6x2_ck",
+};
+
+DEFINE_CLK_MUX(ducati_clk_mux_ck, ducati_clk_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT, OMAP4430_CLKSEL_0_0_SHIFT,
+ OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_FIXED_FACTOR(func_12m_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
+ 0x0, 1, 16);
+
+DEFINE_CLK_FIXED_FACTOR(func_24m_clk, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0,
+ 1, 4);
+
+DEFINE_CLK_FIXED_FACTOR(func_24mc_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
+ 0x0, 1, 8);
+
+static const struct clk_div_table func_48m_fclk_rates[] = {
+ { .div = 4, .val = 0 },
+ { .div = 8, .val = 1 },
+ { .div = 0 },
+};
+DEFINE_CLK_DIVIDER_TABLE(func_48m_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
+ 0x0, OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
+ OMAP4430_SCALE_FCLK_WIDTH, 0x0, func_48m_fclk_rates,
+ NULL);
+
+DEFINE_CLK_FIXED_FACTOR(func_48mc_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
+ 0x0, 1, 4);
+
+static const struct clk_div_table func_64m_fclk_rates[] = {
+ { .div = 2, .val = 0 },
+ { .div = 4, .val = 1 },
+ { .div = 0 },
+};
+DEFINE_CLK_DIVIDER_TABLE(func_64m_fclk, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck,
+ 0x0, OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
+ OMAP4430_SCALE_FCLK_WIDTH, 0x0, func_64m_fclk_rates,
+ NULL);
+
+static const struct clk_div_table func_96m_fclk_rates[] = {
+ { .div = 2, .val = 0 },
+ { .div = 4, .val = 1 },
+ { .div = 0 },
+};
+DEFINE_CLK_DIVIDER_TABLE(func_96m_fclk, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck,
+ 0x0, OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
+ OMAP4430_SCALE_FCLK_WIDTH, 0x0, func_96m_fclk_rates,
+ NULL);
+
+static const struct clk_div_table init_60m_fclk_rates[] = {
+ { .div = 1, .val = 0 },
+ { .div = 8, .val = 1 },
+ { .div = 0 },
+};
+DEFINE_CLK_DIVIDER_TABLE(init_60m_fclk, "dpll_usb_m2_ck", &dpll_usb_m2_ck,
+ 0x0, OMAP4430_CM_CLKSEL_USB_60MHZ,
+ OMAP4430_CLKSEL_0_0_SHIFT, OMAP4430_CLKSEL_0_0_WIDTH,
+ 0x0, init_60m_fclk_rates, NULL);
+
+DEFINE_CLK_DIVIDER(l3_div_ck, "div_core_ck", &div_core_ck, 0x0,
+ OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_L3_SHIFT,
+ OMAP4430_CLKSEL_L3_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(l4_div_ck, "l3_div_ck", &l3_div_ck, 0x0,
+ OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_L4_SHIFT,
+ OMAP4430_CLKSEL_L4_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_FIXED_FACTOR(lp_clk_div_ck, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck,
+ 0x0, 1, 16);
+
+static const char *l4_wkup_clk_mux_ck_parents[] = {
+ "sys_clkin_ck", "lp_clk_div_ck",
+};
+
+DEFINE_CLK_MUX(l4_wkup_clk_mux_ck, l4_wkup_clk_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM_L4_WKUP_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
+ OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
+
+static const struct clk_div_table ocp_abe_iclk_rates[] = {
+ { .div = 2, .val = 0 },
+ { .div = 1, .val = 1 },
+ { .div = 0 },
+};
+DEFINE_CLK_DIVIDER_TABLE(ocp_abe_iclk, "aess_fclk", &aess_fclk, 0x0,
+ OMAP4430_CM1_ABE_AESS_CLKCTRL,
+ OMAP4430_CLKSEL_AESS_FCLK_SHIFT,
+ OMAP4430_CLKSEL_AESS_FCLK_WIDTH,
+ 0x0, ocp_abe_iclk_rates, NULL);
+
+DEFINE_CLK_FIXED_FACTOR(per_abe_24m_fclk, "dpll_abe_m2_ck", &dpll_abe_m2_ck,
+ 0x0, 1, 4);
+
+DEFINE_CLK_DIVIDER(per_abe_nc_fclk, "dpll_abe_m2_ck", &dpll_abe_m2_ck, 0x0,
+ OMAP4430_CM_SCALE_FCLK, OMAP4430_SCALE_FCLK_SHIFT,
+ OMAP4430_SCALE_FCLK_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(syc_clk_div_ck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
+ OMAP4430_CM_ABE_DSS_SYS_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
+ OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
+
+static const char *dbgclk_mux_ck_parents[] = {
+ "sys_clkin_ck"
+};
+
+static struct clk dbgclk_mux_ck;
+DEFINE_STRUCT_CLK_HW_OMAP(dbgclk_mux_ck, NULL);
+DEFINE_STRUCT_CLK(dbgclk_mux_ck, dbgclk_mux_ck_parents,
+ dpll_usb_clkdcoldo_ck_ops);
+
+/* Leaf clocks controlled by modules */
+
+DEFINE_CLK_GATE(aes1_fck, "l3_div_ck", &l3_div_ck, 0x0,
+ OMAP4430_CM_L4SEC_AES1_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(aes2_fck, "l3_div_ck", &l3_div_ck, 0x0,
+ OMAP4430_CM_L4SEC_AES2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(aess_fck, "aess_fclk", &aess_fclk, 0x0,
+ OMAP4430_CM1_ABE_AESS_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(bandgap_fclk, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT, 0x0, NULL);
+
+static const struct clk_div_table div_ts_ck_rates[] = {
+ { .div = 8, .val = 0 },
+ { .div = 16, .val = 1 },
+ { .div = 32, .val = 2 },
+ { .div = 0 },
+};
+DEFINE_CLK_DIVIDER_TABLE(div_ts_ck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
+ 0x0, OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ OMAP4430_CLKSEL_24_25_SHIFT,
+ OMAP4430_CLKSEL_24_25_WIDTH, 0x0, div_ts_ck_rates,
+ NULL);
+
+DEFINE_CLK_GATE(bandgap_ts_fclk, "div_ts_ck", &div_ts_ck, 0x0,
+ OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
+ OMAP4460_OPTFCLKEN_TS_FCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(des3des_fck, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4SEC_DES3DES_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+static const char *dmic_sync_mux_ck_parents[] = {
+ "abe_24m_fclk", "syc_clk_div_ck", "func_24m_clk",
+};
+
+DEFINE_CLK_MUX(dmic_sync_mux_ck, dmic_sync_mux_ck_parents, NULL,
+ 0x0, OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
+
+static const struct clksel func_dmic_abe_gfclk_sel[] = {
+ { .parent = &dmic_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const char *dmic_fck_parents[] = {
+ "dmic_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
+};
+
+/* Merged func_dmic_abe_gfclk into dmic */
+static struct clk dmic_fck;
+
+DEFINE_CLK_OMAP_MUX_GATE(dmic_fck, "abe_clkdm", func_dmic_abe_gfclk_sel,
+ OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ OMAP4430_CLKSEL_SOURCE_MASK,
+ OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ dmic_fck_parents, dmic_fck_ops);
+
+DEFINE_CLK_GATE(dsp_fck, "dpll_iva_m4x2_ck", &dpll_iva_m4x2_ck, 0x0,
+ OMAP4430_CM_TESLA_TESLA_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(dss_sys_clk, "syc_clk_div_ck", &syc_clk_div_ck, 0x0,
+ OMAP4430_CM_DSS_DSS_CLKCTRL,
+ OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(dss_tv_clk, "extalt_clkin_ck", &extalt_clkin_ck, 0x0,
+ OMAP4430_CM_DSS_DSS_CLKCTRL,
+ OMAP4430_OPTFCLKEN_TV_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(dss_dss_clk, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, 0x0,
+ OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_OPTFCLKEN_DSSCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(dss_48mhz_clk, "func_48mc_fclk", &func_48mc_fclk, 0x0,
+ OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(dss_fck, "l3_div_ck", &l3_div_ck, 0x0,
+ OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(efuse_ctrl_cust_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
+ OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(emif1_fck, "ddrphy_ck", &ddrphy_ck, 0x0,
+ OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(emif2_fck, "ddrphy_ck", &ddrphy_ck, 0x0,
+ OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(fdif_fck, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, 0x0,
+ OMAP4430_CM_CAM_FDIF_CLKCTRL, OMAP4430_CLKSEL_FCLK_SHIFT,
+ OMAP4430_CLKSEL_FCLK_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
+
+DEFINE_CLK_GATE(fpka_fck, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio1_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ OMAP4430_OPTFCLKEN_DBCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio1_ick, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, 0x0,
+ OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio2_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO2_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio2_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio3_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+ OMAP4430_OPTFCLKEN_DBCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio3_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio4_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO4_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio4_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio5_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO5_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio5_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio6_dbclk, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO6_CLKCTRL, OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(gpio6_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(gpmc_ick, "l3_div_ck", &l3_div_ck, 0x0,
+ OMAP4430_CM_L3_2_GPMC_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT,
+ 0x0, NULL);
+
+static const struct clksel sgx_clk_mux_sel[] = {
+ { .parent = &dpll_core_m7x2_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_per_m7x2_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static const char *gpu_fck_parents[] = {
+ "dpll_core_m7x2_ck", "dpll_per_m7x2_ck",
+};
+
+/* Merged sgx_clk_mux into gpu */
+DEFINE_CLK_OMAP_MUX_GATE(gpu_fck, "l3_gfx_clkdm", sgx_clk_mux_sel,
+ OMAP4430_CM_GFX_GFX_CLKCTRL,
+ OMAP4430_CLKSEL_SGX_FCLK_MASK,
+ OMAP4430_CM_GFX_GFX_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ gpu_fck_parents, dmic_fck_ops);
+
+DEFINE_CLK_GATE(hdq1w_fck, "func_12m_fclk", &func_12m_fclk, 0x0,
+ OMAP4430_CM_L4PER_HDQ1W_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(hsi_fck, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, 0x0,
+ OMAP4430_CM_L3INIT_HSI_CLKCTRL, OMAP4430_CLKSEL_24_25_SHIFT,
+ OMAP4430_CLKSEL_24_25_WIDTH, CLK_DIVIDER_POWER_OF_TWO,
+ NULL);
+
+DEFINE_CLK_GATE(i2c1_fck, "func_96m_fclk", &func_96m_fclk, 0x0,
+ OMAP4430_CM_L4PER_I2C1_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(i2c2_fck, "func_96m_fclk", &func_96m_fclk, 0x0,
+ OMAP4430_CM_L4PER_I2C2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(i2c3_fck, "func_96m_fclk", &func_96m_fclk, 0x0,
+ OMAP4430_CM_L4PER_I2C3_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(i2c4_fck, "func_96m_fclk", &func_96m_fclk, 0x0,
+ OMAP4430_CM_L4PER_I2C4_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(ipu_fck, "ducati_clk_mux_ck", &ducati_clk_mux_ck, 0x0,
+ OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(iss_ctrlclk, "func_96m_fclk", &func_96m_fclk, 0x0,
+ OMAP4430_CM_CAM_ISS_CLKCTRL, OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(iss_fck, "ducati_clk_mux_ck", &ducati_clk_mux_ck, 0x0,
+ OMAP4430_CM_CAM_ISS_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(iva_fck, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, 0x0,
+ OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(kbd_fck, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+static struct clk l3_instr_ick;
+
+static const char *l3_instr_ick_parent_names[] = {
+ "l3_div_ck",
+};
+
+static const struct clk_ops l3_instr_ick_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .init = &omap2_init_clk_clkdm,
+};
+
+static struct clk_hw_omap l3_instr_ick_hw = {
+ .hw = {
+ .clk = &l3_instr_ick,
+ },
+ .enable_reg = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL_SHIFT,
+ .clkdm_name = "l3_instr_clkdm",
+};
+
+DEFINE_STRUCT_CLK(l3_instr_ick, l3_instr_ick_parent_names, l3_instr_ick_ops);
+
+static struct clk l3_main_3_ick;
+static struct clk_hw_omap l3_main_3_ick_hw = {
+ .hw = {
+ .clk = &l3_main_3_ick,
+ },
+ .enable_reg = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL_SHIFT,
+ .clkdm_name = "l3_instr_clkdm",
+};
+
+DEFINE_STRUCT_CLK(l3_main_3_ick, l3_instr_ick_parent_names, l3_instr_ick_ops);
+
+DEFINE_CLK_MUX(mcasp_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
+
+static const struct clksel func_mcasp_abe_gfclk_sel[] = {
+ { .parent = &mcasp_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const char *mcasp_fck_parents[] = {
+ "mcasp_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
+};
+
+/* Merged func_mcasp_abe_gfclk into mcasp */
+DEFINE_CLK_OMAP_MUX_GATE(mcasp_fck, "abe_clkdm", func_mcasp_abe_gfclk_sel,
+ OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ OMAP4430_CLKSEL_SOURCE_MASK,
+ OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ mcasp_fck_parents, dmic_fck_ops);
+
+DEFINE_CLK_MUX(mcbsp1_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
+
+static const struct clksel func_mcbsp1_gfclk_sel[] = {
+ { .parent = &mcbsp1_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp1_fck_parents[] = {
+ "mcbsp1_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
+};
+
+/* Merged func_mcbsp1_gfclk into mcbsp1 */
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "abe_clkdm", func_mcbsp1_gfclk_sel,
+ OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ OMAP4430_CLKSEL_SOURCE_MASK,
+ OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ mcbsp1_fck_parents, dmic_fck_ops);
+
+DEFINE_CLK_MUX(mcbsp2_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
+
+static const struct clksel func_mcbsp2_gfclk_sel[] = {
+ { .parent = &mcbsp2_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp2_fck_parents[] = {
+ "mcbsp2_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
+};
+
+/* Merged func_mcbsp2_gfclk into mcbsp2 */
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "abe_clkdm", func_mcbsp2_gfclk_sel,
+ OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ OMAP4430_CLKSEL_SOURCE_MASK,
+ OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ mcbsp2_fck_parents, dmic_fck_ops);
+
+DEFINE_CLK_MUX(mcbsp3_sync_mux_ck, dmic_sync_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
+
+static const struct clksel func_mcbsp3_gfclk_sel[] = {
+ { .parent = &mcbsp3_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp3_fck_parents[] = {
+ "mcbsp3_sync_mux_ck", "pad_clks_ck", "slimbus_clk",
+};
+
+/* Merged func_mcbsp3_gfclk into mcbsp3 */
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "abe_clkdm", func_mcbsp3_gfclk_sel,
+ OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ OMAP4430_CLKSEL_SOURCE_MASK,
+ OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ mcbsp3_fck_parents, dmic_fck_ops);
+
+static const char *mcbsp4_sync_mux_ck_parents[] = {
+ "func_96m_fclk", "per_abe_nc_fclk",
+};
+
+DEFINE_CLK_MUX(mcbsp4_sync_mux_ck, mcbsp4_sync_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT,
+ OMAP4430_CLKSEL_INTERNAL_SOURCE_WIDTH, 0x0, NULL);
+
+static const struct clksel per_mcbsp4_gfclk_sel[] = {
+ { .parent = &mcbsp4_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static const char *mcbsp4_fck_parents[] = {
+ "mcbsp4_sync_mux_ck", "pad_clks_ck",
+};
+
+/* Merged per_mcbsp4_gfclk into mcbsp4 */
+DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "l4_per_clkdm", per_mcbsp4_gfclk_sel,
+ OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ OMAP4430_CLKSEL_SOURCE_24_24_MASK,
+ OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ mcbsp4_fck_parents, dmic_fck_ops);
+
+DEFINE_CLK_GATE(mcpdm_fck, "pad_clks_ck", &pad_clks_ck, 0x0,
+ OMAP4430_CM1_ABE_PDM_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(mcspi1_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_MCSPI1_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(mcspi2_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_MCSPI2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(mcspi3_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_MCSPI3_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(mcspi4_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_MCSPI4_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+static const struct clksel hsmmc1_fclk_sel[] = {
+ { .parent = &func_64m_fclk, .rates = div_1_0_rates },
+ { .parent = &func_96m_fclk, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static const char *mmc1_fck_parents[] = {
+ "func_64m_fclk", "func_96m_fclk",
+};
+
+/* Merged hsmmc1_fclk into mmc1 */
+DEFINE_CLK_OMAP_MUX_GATE(mmc1_fck, "l3_init_clkdm", hsmmc1_fclk_sel,
+ OMAP4430_CM_L3INIT_MMC1_CLKCTRL, OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ mmc1_fck_parents, dmic_fck_ops);
+
+/* Merged hsmmc2_fclk into mmc2 */
+DEFINE_CLK_OMAP_MUX_GATE(mmc2_fck, "l3_init_clkdm", hsmmc1_fclk_sel,
+ OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ mmc1_fck_parents, dmic_fck_ops);
+
+DEFINE_CLK_GATE(mmc3_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(mmc4_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(mmc5_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(ocp2scp_usb_phy_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+static struct clk ocp_wp_noc_ick;
+
+static struct clk_hw_omap ocp_wp_noc_ick_hw = {
+ .hw = {
+ .clk = &ocp_wp_noc_ick,
+ },
+ .enable_reg = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL_SHIFT,
+ .clkdm_name = "l3_instr_clkdm",
+};
+
+DEFINE_STRUCT_CLK(ocp_wp_noc_ick, l3_instr_ick_parent_names, l3_instr_ick_ops);
+
+DEFINE_CLK_GATE(rng_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4SEC_RNG_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
+ OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(sl2if_ick, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, 0x0,
+ OMAP4430_CM_IVAHD_SL2_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus1_fclk_1, "func_24m_clk", &func_24m_clk, 0x0,
+ OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ OMAP4430_OPTFCLKEN_FCLK1_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus1_fclk_0, "abe_24m_fclk", &abe_24m_fclk, 0x0,
+ OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ OMAP4430_OPTFCLKEN_FCLK0_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus1_fclk_2, "pad_clks_ck", &pad_clks_ck, 0x0,
+ OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ OMAP4430_OPTFCLKEN_FCLK2_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus1_slimbus_clk, "slimbus_clk", &slimbus_clk, 0x0,
+ OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus1_fck, "ocp_abe_iclk", &ocp_abe_iclk, 0x0,
+ OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus2_fclk_1, "per_abe_24m_fclk", &per_abe_24m_fclk, 0x0,
+ OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus2_fclk_0, "func_24mc_fclk", &func_24mc_fclk, 0x0,
+ OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ OMAP4430_OPTFCLKEN_PER24MC_GFCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus2_slimbus_clk, "pad_slimbus_core_clks_ck",
+ &pad_slimbus_core_clks_ck, 0x0,
+ OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(slimbus2_fck, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(smartreflex_core_fck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
+ 0x0, OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(smartreflex_iva_fck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
+ 0x0, OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(smartreflex_mpu_fck, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck,
+ 0x0, OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+static const struct clksel dmt1_clk_mux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &sys_32k_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+/* Merged dmt1_clk_mux into timer1 */
+DEFINE_CLK_OMAP_MUX_GATE(timer1_fck, "l4_wkup_clkdm", dmt1_clk_mux_sel,
+ OMAP4430_CM_WKUP_TIMER1_CLKCTRL, OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops);
+
+/* Merged cm2_dm10_mux into timer10 */
+DEFINE_CLK_OMAP_MUX_GATE(timer10_fck, "l4_per_clkdm", dmt1_clk_mux_sel,
+ OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops);
+
+/* Merged cm2_dm11_mux into timer11 */
+DEFINE_CLK_OMAP_MUX_GATE(timer11_fck, "l4_per_clkdm", dmt1_clk_mux_sel,
+ OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops);
+
+/* Merged cm2_dm2_mux into timer2 */
+DEFINE_CLK_OMAP_MUX_GATE(timer2_fck, "l4_per_clkdm", dmt1_clk_mux_sel,
+ OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops);
+
+/* Merged cm2_dm3_mux into timer3 */
+DEFINE_CLK_OMAP_MUX_GATE(timer3_fck, "l4_per_clkdm", dmt1_clk_mux_sel,
+ OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops);
+
+/* Merged cm2_dm4_mux into timer4 */
+DEFINE_CLK_OMAP_MUX_GATE(timer4_fck, "l4_per_clkdm", dmt1_clk_mux_sel,
+ OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops);
+
+static const struct clksel timer5_sync_mux_sel[] = {
+ { .parent = &syc_clk_div_ck, .rates = div_1_0_rates },
+ { .parent = &sys_32k_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static const char *timer5_fck_parents[] = {
+ "syc_clk_div_ck", "sys_32k_ck",
+};
+
+/* Merged timer5_sync_mux into timer5 */
+DEFINE_CLK_OMAP_MUX_GATE(timer5_fck, "abe_clkdm", timer5_sync_mux_sel,
+ OMAP4430_CM1_ABE_TIMER5_CLKCTRL, OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ timer5_fck_parents, dmic_fck_ops);
+
+/* Merged timer6_sync_mux into timer6 */
+DEFINE_CLK_OMAP_MUX_GATE(timer6_fck, "abe_clkdm", timer5_sync_mux_sel,
+ OMAP4430_CM1_ABE_TIMER6_CLKCTRL, OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ timer5_fck_parents, dmic_fck_ops);
+
+/* Merged timer7_sync_mux into timer7 */
+DEFINE_CLK_OMAP_MUX_GATE(timer7_fck, "abe_clkdm", timer5_sync_mux_sel,
+ OMAP4430_CM1_ABE_TIMER7_CLKCTRL, OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ timer5_fck_parents, dmic_fck_ops);
+
+/* Merged timer8_sync_mux into timer8 */
+DEFINE_CLK_OMAP_MUX_GATE(timer8_fck, "abe_clkdm", timer5_sync_mux_sel,
+ OMAP4430_CM1_ABE_TIMER8_CLKCTRL, OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ timer5_fck_parents, dmic_fck_ops);
+
+/* Merged cm2_dm9_mux into timer9 */
+DEFINE_CLK_OMAP_MUX_GATE(timer9_fck, "l4_per_clkdm", dmt1_clk_mux_sel,
+ OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ OMAP4430_CLKSEL_MASK,
+ OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, NULL,
+ abe_dpll_bypass_clk_mux_ck_parents, dmic_fck_ops);
+
+DEFINE_CLK_GATE(uart1_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_UART1_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(uart2_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_UART2_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(uart3_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_UART3_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(uart4_fck, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L4PER_UART4_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+static struct clk usb_host_fs_fck;
+
+static const char *usb_host_fs_fck_parent_names[] = {
+ "func_48mc_fclk",
+};
+
+static const struct clk_ops usb_host_fs_fck_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static struct clk_hw_omap usb_host_fs_fck_hw = {
+ .hw = {
+ .clk = &usb_host_fs_fck,
+ },
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ .clkdm_name = "l3_init_clkdm",
+};
+
+DEFINE_STRUCT_CLK(usb_host_fs_fck, usb_host_fs_fck_parent_names,
+ usb_host_fs_fck_ops);
+
+static const char *utmi_p1_gfclk_parents[] = {
+ "init_60m_fclk", "xclk60mhsp1_ck",
+};
+
+DEFINE_CLK_MUX(utmi_p1_gfclk, utmi_p1_gfclk_parents, NULL, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_CLKSEL_UTMI_P1_SHIFT, OMAP4430_CLKSEL_UTMI_P1_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_utmi_p1_clk, "utmi_p1_gfclk", &utmi_p1_gfclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT, 0x0, NULL);
+
+static const char *utmi_p2_gfclk_parents[] = {
+ "init_60m_fclk", "xclk60mhsp2_ck",
+};
+
+DEFINE_CLK_MUX(utmi_p2_gfclk, utmi_p2_gfclk_parents, NULL, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_CLKSEL_UTMI_P2_SHIFT, OMAP4430_CLKSEL_UTMI_P2_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_utmi_p2_clk, "utmi_p2_gfclk", &utmi_p2_gfclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_utmi_p3_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_hsic480m_p1_clk, "dpll_usb_m2_ck",
+ &dpll_usb_m2_ck, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_hsic60m_p1_clk, "init_60m_fclk",
+ &init_60m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_hsic60m_p2_clk, "init_60m_fclk",
+ &init_60m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_hsic480m_p2_clk, "dpll_usb_m2_ck",
+ &dpll_usb_m2_ck, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_func48mclk, "func_48mc_fclk", &func_48mc_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_host_hs_fck, "init_60m_fclk", &init_60m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
+
+static const char *otg_60m_gfclk_parents[] = {
+ "utmi_phy_clkout_ck", "xclk60motg_ck",
+};
+
+DEFINE_CLK_MUX(otg_60m_gfclk, otg_60m_gfclk_parents, NULL, 0x0,
+ OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL, OMAP4430_CLKSEL_60M_SHIFT,
+ OMAP4430_CLKSEL_60M_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_otg_hs_xclk, "otg_60m_gfclk", &otg_60m_gfclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ OMAP4430_OPTFCLKEN_XCLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_otg_hs_ick, "l3_div_ck", &l3_div_ck, 0x0,
+ OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_phy_cm_clk32k, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_ALWON_USBPHY_CLKCTRL,
+ OMAP4430_OPTFCLKEN_CLK32K_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_tll_hs_usb_ch2_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_tll_hs_usb_ch0_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_tll_hs_usb_ch1_clk, "init_60m_fclk", &init_60m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT, 0x0, NULL);
+
+DEFINE_CLK_GATE(usb_tll_hs_ick, "l4_div_ck", &l4_div_ck, 0x0,
+ OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ OMAP4430_MODULEMODE_HWCTRL_SHIFT, 0x0, NULL);
+
+static const struct clk_div_table usim_ck_rates[] = {
+ { .div = 14, .val = 0 },
+ { .div = 18, .val = 1 },
+ { .div = 0 },
+};
+DEFINE_CLK_DIVIDER_TABLE(usim_ck, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, 0x0,
+ OMAP4430_CM_WKUP_USIM_CLKCTRL,
+ OMAP4430_CLKSEL_DIV_SHIFT, OMAP4430_CLKSEL_DIV_WIDTH,
+ 0x0, usim_ck_rates, NULL);
+
+DEFINE_CLK_GATE(usim_fclk, "usim_ck", &usim_ck, 0x0,
+ OMAP4430_CM_WKUP_USIM_CLKCTRL, OMAP4430_OPTFCLKEN_FCLK_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(usim_fck, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_WKUP_USIM_CLKCTRL, OMAP4430_MODULEMODE_HWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(wd_timer2_fck, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM_WKUP_WDT2_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+DEFINE_CLK_GATE(wd_timer3_fck, "sys_32k_ck", &sys_32k_ck, 0x0,
+ OMAP4430_CM1_ABE_WDT3_CLKCTRL, OMAP4430_MODULEMODE_SWCTRL_SHIFT,
+ 0x0, NULL);
+
+/* Remaining optional clocks */
+static const char *pmd_stm_clock_mux_ck_parents[] = {
+ "sys_clkin_ck", "dpll_core_m6x2_ck", "tie_low_clock_ck",
+};
+
+DEFINE_CLK_MUX(pmd_stm_clock_mux_ck, pmd_stm_clock_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM_EMU_DEBUGSS_CLKCTRL, OMAP4430_PMD_STM_MUX_CTRL_SHIFT,
+ OMAP4430_PMD_STM_MUX_CTRL_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_MUX(pmd_trace_clk_mux_ck, pmd_stm_clock_mux_ck_parents, NULL, 0x0,
+ OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ OMAP4430_PMD_TRACE_MUX_CTRL_SHIFT,
+ OMAP4430_PMD_TRACE_MUX_CTRL_WIDTH, 0x0, NULL);
+
+DEFINE_CLK_DIVIDER(stm_clk_div_ck, "pmd_stm_clock_mux_ck",
+ &pmd_stm_clock_mux_ck, 0x0, OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ OMAP4430_CLKSEL_PMD_STM_CLK_SHIFT,
+ OMAP4430_CLKSEL_PMD_STM_CLK_WIDTH, CLK_DIVIDER_POWER_OF_TWO,
+ NULL);
+
+static const char *trace_clk_div_ck_parents[] = {
+ "pmd_trace_clk_mux_ck",
+};
+
+static const struct clksel trace_clk_div_div[] = {
+ { .parent = &pmd_trace_clk_mux_ck, .rates = div3_1to4_rates },
+ { .parent = NULL },
+};
+
+static struct clk trace_clk_div_ck;
+
+static const struct clk_ops trace_clk_div_ck_ops = {
+ .recalc_rate = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clkops_enable_clkdm,
+ .disable = &omap2_clkops_disable_clkdm,
+};
+
+static struct clk_hw_omap trace_clk_div_ck_hw = {
+ .hw = {
+ .clk = &trace_clk_div_ck,
+ },
+ .clkdm_name = "emu_sys_clkdm",
+ .clksel = trace_clk_div_div,
+ .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
+};
+
+DEFINE_STRUCT_CLK(trace_clk_div_ck, trace_clk_div_ck_parents,
+ trace_clk_div_ck_ops);
+
+/* SCRM aux clk nodes */
+
+static const struct clksel auxclk_src_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_core_m3x2_ck, .rates = div_1_1_rates },
+ { .parent = &dpll_per_m3x2_ck, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static const char *auxclk_src_ck_parents[] = {
+ "sys_clkin_ck", "dpll_core_m3x2_ck", "dpll_per_m3x2_ck",
+};
+
+static const struct clk_ops auxclk_src_ck_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .recalc_rate = &omap2_clksel_recalc,
+ .get_parent = &omap2_clksel_find_parent_index,
+};
+
+DEFINE_CLK_OMAP_MUX_GATE(auxclk0_src_ck, NULL, auxclk_src_sel,
+ OMAP4_SCRM_AUXCLK0, OMAP4_SRCSELECT_MASK,
+ OMAP4_SCRM_AUXCLK0, OMAP4_ENABLE_SHIFT, NULL,
+ auxclk_src_ck_parents, auxclk_src_ck_ops);
+
+DEFINE_CLK_DIVIDER(auxclk0_ck, "auxclk0_src_ck", &auxclk0_src_ck, 0x0,
+ OMAP4_SCRM_AUXCLK0, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_OMAP_MUX_GATE(auxclk1_src_ck, NULL, auxclk_src_sel,
+ OMAP4_SCRM_AUXCLK1, OMAP4_SRCSELECT_MASK,
+ OMAP4_SCRM_AUXCLK1, OMAP4_ENABLE_SHIFT, NULL,
+ auxclk_src_ck_parents, auxclk_src_ck_ops);
+
+DEFINE_CLK_DIVIDER(auxclk1_ck, "auxclk1_src_ck", &auxclk1_src_ck, 0x0,
+ OMAP4_SCRM_AUXCLK1, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_OMAP_MUX_GATE(auxclk2_src_ck, NULL, auxclk_src_sel,
+ OMAP4_SCRM_AUXCLK2, OMAP4_SRCSELECT_MASK,
+ OMAP4_SCRM_AUXCLK2, OMAP4_ENABLE_SHIFT, NULL,
+ auxclk_src_ck_parents, auxclk_src_ck_ops);
+
+DEFINE_CLK_DIVIDER(auxclk2_ck, "auxclk2_src_ck", &auxclk2_src_ck, 0x0,
+ OMAP4_SCRM_AUXCLK2, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_OMAP_MUX_GATE(auxclk3_src_ck, NULL, auxclk_src_sel,
+ OMAP4_SCRM_AUXCLK3, OMAP4_SRCSELECT_MASK,
+ OMAP4_SCRM_AUXCLK3, OMAP4_ENABLE_SHIFT, NULL,
+ auxclk_src_ck_parents, auxclk_src_ck_ops);
+
+DEFINE_CLK_DIVIDER(auxclk3_ck, "auxclk3_src_ck", &auxclk3_src_ck, 0x0,
+ OMAP4_SCRM_AUXCLK3, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_OMAP_MUX_GATE(auxclk4_src_ck, NULL, auxclk_src_sel,
+ OMAP4_SCRM_AUXCLK4, OMAP4_SRCSELECT_MASK,
+ OMAP4_SCRM_AUXCLK4, OMAP4_ENABLE_SHIFT, NULL,
+ auxclk_src_ck_parents, auxclk_src_ck_ops);
+
+DEFINE_CLK_DIVIDER(auxclk4_ck, "auxclk4_src_ck", &auxclk4_src_ck, 0x0,
+ OMAP4_SCRM_AUXCLK4, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_OMAP_MUX_GATE(auxclk5_src_ck, NULL, auxclk_src_sel,
+ OMAP4_SCRM_AUXCLK5, OMAP4_SRCSELECT_MASK,
+ OMAP4_SCRM_AUXCLK5, OMAP4_ENABLE_SHIFT, NULL,
+ auxclk_src_ck_parents, auxclk_src_ck_ops);
+
+DEFINE_CLK_DIVIDER(auxclk5_ck, "auxclk5_src_ck", &auxclk5_src_ck, 0x0,
+ OMAP4_SCRM_AUXCLK5, OMAP4_CLKDIV_SHIFT, OMAP4_CLKDIV_WIDTH,
+ 0x0, NULL);
+
+static const char *auxclkreq_ck_parents[] = {
+ "auxclk0_ck", "auxclk1_ck", "auxclk2_ck", "auxclk3_ck", "auxclk4_ck",
+ "auxclk5_ck",
+};
+
+DEFINE_CLK_MUX(auxclkreq0_ck, auxclkreq_ck_parents, NULL, 0x0,
+ OMAP4_SCRM_AUXCLKREQ0, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_MUX(auxclkreq1_ck, auxclkreq_ck_parents, NULL, 0x0,
+ OMAP4_SCRM_AUXCLKREQ1, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_MUX(auxclkreq2_ck, auxclkreq_ck_parents, NULL, 0x0,
+ OMAP4_SCRM_AUXCLKREQ2, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_MUX(auxclkreq3_ck, auxclkreq_ck_parents, NULL, 0x0,
+ OMAP4_SCRM_AUXCLKREQ3, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_MUX(auxclkreq4_ck, auxclkreq_ck_parents, NULL, 0x0,
+ OMAP4_SCRM_AUXCLKREQ4, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
+ 0x0, NULL);
+
+DEFINE_CLK_MUX(auxclkreq5_ck, auxclkreq_ck_parents, NULL, 0x0,
+ OMAP4_SCRM_AUXCLKREQ5, OMAP4_MAPPING_SHIFT, OMAP4_MAPPING_WIDTH,
+ 0x0, NULL);
+
+/*
+ * clkdev
+ */
+
+static struct omap_clk omap44xx_clks[] = {
+ CLK(NULL, "extalt_clkin_ck", &extalt_clkin_ck, CK_443X),
+ CLK(NULL, "pad_clks_src_ck", &pad_clks_src_ck, CK_443X),
+ CLK(NULL, "pad_clks_ck", &pad_clks_ck, CK_443X),
+ CLK(NULL, "pad_slimbus_core_clks_ck", &pad_slimbus_core_clks_ck, CK_443X),
+ CLK(NULL, "secure_32k_clk_src_ck", &secure_32k_clk_src_ck, CK_443X),
+ CLK(NULL, "slimbus_src_clk", &slimbus_src_clk, CK_443X),
+ CLK(NULL, "slimbus_clk", &slimbus_clk, CK_443X),
+ CLK(NULL, "sys_32k_ck", &sys_32k_ck, CK_443X),
+ CLK(NULL, "virt_12000000_ck", &virt_12000000_ck, CK_443X),
+ CLK(NULL, "virt_13000000_ck", &virt_13000000_ck, CK_443X),
+ CLK(NULL, "virt_16800000_ck", &virt_16800000_ck, CK_443X),
+ CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_443X),
+ CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_443X),
+ CLK(NULL, "virt_27000000_ck", &virt_27000000_ck, CK_443X),
+ CLK(NULL, "virt_38400000_ck", &virt_38400000_ck, CK_443X),
+ CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_443X),
+ CLK(NULL, "tie_low_clock_ck", &tie_low_clock_ck, CK_443X),
+ CLK(NULL, "utmi_phy_clkout_ck", &utmi_phy_clkout_ck, CK_443X),
+ CLK(NULL, "xclk60mhsp1_ck", &xclk60mhsp1_ck, CK_443X),
+ CLK(NULL, "xclk60mhsp2_ck", &xclk60mhsp2_ck, CK_443X),
+ CLK(NULL, "xclk60motg_ck", &xclk60motg_ck, CK_443X),
+ CLK(NULL, "abe_dpll_bypass_clk_mux_ck", &abe_dpll_bypass_clk_mux_ck, CK_443X),
+ CLK(NULL, "abe_dpll_refclk_mux_ck", &abe_dpll_refclk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_abe_ck", &dpll_abe_ck, CK_443X),
+ CLK(NULL, "dpll_abe_x2_ck", &dpll_abe_x2_ck, CK_443X),
+ CLK(NULL, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, CK_443X),
+ CLK(NULL, "abe_24m_fclk", &abe_24m_fclk, CK_443X),
+ CLK(NULL, "abe_clk", &abe_clk, CK_443X),
+ CLK(NULL, "aess_fclk", &aess_fclk, CK_443X),
+ CLK(NULL, "dpll_abe_m3x2_ck", &dpll_abe_m3x2_ck, CK_443X),
+ CLK(NULL, "core_hsd_byp_clk_mux_ck", &core_hsd_byp_clk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_443X),
+ CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_443X),
+ CLK(NULL, "dpll_core_m6x2_ck", &dpll_core_m6x2_ck, CK_443X),
+ CLK(NULL, "dbgclk_mux_ck", &dbgclk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_core_m2_ck", &dpll_core_m2_ck, CK_443X),
+ CLK(NULL, "ddrphy_ck", &ddrphy_ck, CK_443X),
+ CLK(NULL, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, CK_443X),
+ CLK(NULL, "div_core_ck", &div_core_ck, CK_443X),
+ CLK(NULL, "div_iva_hs_clk", &div_iva_hs_clk, CK_443X),
+ CLK(NULL, "div_mpu_hs_clk", &div_mpu_hs_clk, CK_443X),
+ CLK(NULL, "dpll_core_m4x2_ck", &dpll_core_m4x2_ck, CK_443X),
+ CLK(NULL, "dll_clk_div_ck", &dll_clk_div_ck, CK_443X),
+ CLK(NULL, "dpll_abe_m2_ck", &dpll_abe_m2_ck, CK_443X),
+ CLK(NULL, "dpll_core_m3x2_ck", &dpll_core_m3x2_ck, CK_443X),
+ CLK(NULL, "dpll_core_m7x2_ck", &dpll_core_m7x2_ck, CK_443X),
+ CLK(NULL, "iva_hsd_byp_clk_mux_ck", &iva_hsd_byp_clk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_iva_ck", &dpll_iva_ck, CK_443X),
+ CLK(NULL, "dpll_iva_x2_ck", &dpll_iva_x2_ck, CK_443X),
+ CLK(NULL, "dpll_iva_m4x2_ck", &dpll_iva_m4x2_ck, CK_443X),
+ CLK(NULL, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, CK_443X),
+ CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_443X),
+ CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_443X),
+ CLK(NULL, "per_hs_clk_div_ck", &per_hs_clk_div_ck, CK_443X),
+ CLK(NULL, "per_hsd_byp_clk_mux_ck", &per_hsd_byp_clk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_443X),
+ CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_443X),
+ CLK(NULL, "dpll_per_x2_ck", &dpll_per_x2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m3x2_ck", &dpll_per_m3x2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m6x2_ck", &dpll_per_m6x2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m7x2_ck", &dpll_per_m7x2_ck, CK_443X),
+ CLK(NULL, "usb_hs_clk_div_ck", &usb_hs_clk_div_ck, CK_443X),
+ CLK(NULL, "dpll_usb_ck", &dpll_usb_ck, CK_443X),
+ CLK(NULL, "dpll_usb_clkdcoldo_ck", &dpll_usb_clkdcoldo_ck, CK_443X),
+ CLK(NULL, "dpll_usb_m2_ck", &dpll_usb_m2_ck, CK_443X),
+ CLK(NULL, "ducati_clk_mux_ck", &ducati_clk_mux_ck, CK_443X),
+ CLK(NULL, "func_12m_fclk", &func_12m_fclk, CK_443X),
+ CLK(NULL, "func_24m_clk", &func_24m_clk, CK_443X),
+ CLK(NULL, "func_24mc_fclk", &func_24mc_fclk, CK_443X),
+ CLK(NULL, "func_48m_fclk", &func_48m_fclk, CK_443X),
+ CLK(NULL, "func_48mc_fclk", &func_48mc_fclk, CK_443X),
+ CLK(NULL, "func_64m_fclk", &func_64m_fclk, CK_443X),
+ CLK(NULL, "func_96m_fclk", &func_96m_fclk, CK_443X),
+ CLK(NULL, "init_60m_fclk", &init_60m_fclk, CK_443X),
+ CLK(NULL, "l3_div_ck", &l3_div_ck, CK_443X),
+ CLK(NULL, "l4_div_ck", &l4_div_ck, CK_443X),
+ CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_443X),
+ CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_443X),
+ CLK("smp_twd", NULL, &mpu_periphclk, CK_443X),
+ CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_443X),
+ CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_443X),
+ CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
+ CLK(NULL, "syc_clk_div_ck", &syc_clk_div_ck, CK_443X),
+ CLK(NULL, "aes1_fck", &aes1_fck, CK_443X),
+ CLK(NULL, "aes2_fck", &aes2_fck, CK_443X),
+ CLK(NULL, "aess_fck", &aess_fck, CK_443X),
+ CLK(NULL, "bandgap_fclk", &bandgap_fclk, CK_443X),
+ CLK(NULL, "div_ts_ck", &div_ts_ck, CK_446X),
+ CLK(NULL, "bandgap_ts_fclk", &bandgap_ts_fclk, CK_446X),
+ CLK(NULL, "des3des_fck", &des3des_fck, CK_443X),
+ CLK(NULL, "dmic_sync_mux_ck", &dmic_sync_mux_ck, CK_443X),
+ CLK(NULL, "dmic_fck", &dmic_fck, CK_443X),
+ CLK(NULL, "dsp_fck", &dsp_fck, CK_443X),
+ CLK(NULL, "dss_sys_clk", &dss_sys_clk, CK_443X),
+ CLK(NULL, "dss_tv_clk", &dss_tv_clk, CK_443X),
+ CLK(NULL, "dss_dss_clk", &dss_dss_clk, CK_443X),
+ CLK(NULL, "dss_48mhz_clk", &dss_48mhz_clk, CK_443X),
+ CLK(NULL, "dss_fck", &dss_fck, CK_443X),
+ CLK("omapdss_dss", "ick", &dss_fck, CK_443X),
+ CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X),
+ CLK(NULL, "emif1_fck", &emif1_fck, CK_443X),
+ CLK(NULL, "emif2_fck", &emif2_fck, CK_443X),
+ CLK(NULL, "fdif_fck", &fdif_fck, CK_443X),
+ CLK(NULL, "fpka_fck", &fpka_fck, CK_443X),
+ CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_443X),
+ CLK(NULL, "gpio1_ick", &gpio1_ick, CK_443X),
+ CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_443X),
+ CLK(NULL, "gpio2_ick", &gpio2_ick, CK_443X),
+ CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_443X),
+ CLK(NULL, "gpio3_ick", &gpio3_ick, CK_443X),
+ CLK(NULL, "gpio4_dbclk", &gpio4_dbclk, CK_443X),
+ CLK(NULL, "gpio4_ick", &gpio4_ick, CK_443X),
+ CLK(NULL, "gpio5_dbclk", &gpio5_dbclk, CK_443X),
+ CLK(NULL, "gpio5_ick", &gpio5_ick, CK_443X),
+ CLK(NULL, "gpio6_dbclk", &gpio6_dbclk, CK_443X),
+ CLK(NULL, "gpio6_ick", &gpio6_ick, CK_443X),
+ CLK(NULL, "gpmc_ick", &gpmc_ick, CK_443X),
+ CLK(NULL, "gpu_fck", &gpu_fck, CK_443X),
+ CLK(NULL, "hdq1w_fck", &hdq1w_fck, CK_443X),
+ CLK(NULL, "hsi_fck", &hsi_fck, CK_443X),
+ CLK(NULL, "i2c1_fck", &i2c1_fck, CK_443X),
+ CLK(NULL, "i2c2_fck", &i2c2_fck, CK_443X),
+ CLK(NULL, "i2c3_fck", &i2c3_fck, CK_443X),
+ CLK(NULL, "i2c4_fck", &i2c4_fck, CK_443X),
+ CLK(NULL, "ipu_fck", &ipu_fck, CK_443X),
+ CLK(NULL, "iss_ctrlclk", &iss_ctrlclk, CK_443X),
+ CLK(NULL, "iss_fck", &iss_fck, CK_443X),
+ CLK(NULL, "iva_fck", &iva_fck, CK_443X),
+ CLK(NULL, "kbd_fck", &kbd_fck, CK_443X),
+ CLK(NULL, "l3_instr_ick", &l3_instr_ick, CK_443X),
+ CLK(NULL, "l3_main_3_ick", &l3_main_3_ick, CK_443X),
+ CLK(NULL, "mcasp_sync_mux_ck", &mcasp_sync_mux_ck, CK_443X),
+ CLK(NULL, "mcasp_fck", &mcasp_fck, CK_443X),
+ CLK(NULL, "mcbsp1_sync_mux_ck", &mcbsp1_sync_mux_ck, CK_443X),
+ CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_443X),
+ CLK(NULL, "mcbsp2_sync_mux_ck", &mcbsp2_sync_mux_ck, CK_443X),
+ CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_443X),
+ CLK(NULL, "mcbsp3_sync_mux_ck", &mcbsp3_sync_mux_ck, CK_443X),
+ CLK(NULL, "mcbsp3_fck", &mcbsp3_fck, CK_443X),
+ CLK(NULL, "mcbsp4_sync_mux_ck", &mcbsp4_sync_mux_ck, CK_443X),
+ CLK(NULL, "mcbsp4_fck", &mcbsp4_fck, CK_443X),
+ CLK(NULL, "mcpdm_fck", &mcpdm_fck, CK_443X),
+ CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_443X),
+ CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_443X),
+ CLK(NULL, "mcspi3_fck", &mcspi3_fck, CK_443X),
+ CLK(NULL, "mcspi4_fck", &mcspi4_fck, CK_443X),
+ CLK(NULL, "mmc1_fck", &mmc1_fck, CK_443X),
+ CLK(NULL, "mmc2_fck", &mmc2_fck, CK_443X),
+ CLK(NULL, "mmc3_fck", &mmc3_fck, CK_443X),
+ CLK(NULL, "mmc4_fck", &mmc4_fck, CK_443X),
+ CLK(NULL, "mmc5_fck", &mmc5_fck, CK_443X),
+ CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
+ CLK(NULL, "ocp2scp_usb_phy_ick", &ocp2scp_usb_phy_ick, CK_443X),
+ CLK(NULL, "ocp_wp_noc_ick", &ocp_wp_noc_ick, CK_443X),
+ CLK(NULL, "rng_ick", &rng_ick, CK_443X),
+ CLK("omap_rng", "ick", &rng_ick, CK_443X),
+ CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
+ CLK(NULL, "sl2if_ick", &sl2if_ick, CK_443X),
+ CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
+ CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
+ CLK(NULL, "slimbus1_fclk_2", &slimbus1_fclk_2, CK_443X),
+ CLK(NULL, "slimbus1_slimbus_clk", &slimbus1_slimbus_clk, CK_443X),
+ CLK(NULL, "slimbus1_fck", &slimbus1_fck, CK_443X),
+ CLK(NULL, "slimbus2_fclk_1", &slimbus2_fclk_1, CK_443X),
+ CLK(NULL, "slimbus2_fclk_0", &slimbus2_fclk_0, CK_443X),
+ CLK(NULL, "slimbus2_slimbus_clk", &slimbus2_slimbus_clk, CK_443X),
+ CLK(NULL, "slimbus2_fck", &slimbus2_fck, CK_443X),
+ CLK(NULL, "smartreflex_core_fck", &smartreflex_core_fck, CK_443X),
+ CLK(NULL, "smartreflex_iva_fck", &smartreflex_iva_fck, CK_443X),
+ CLK(NULL, "smartreflex_mpu_fck", &smartreflex_mpu_fck, CK_443X),
+ CLK(NULL, "timer1_fck", &timer1_fck, CK_443X),
+ CLK(NULL, "timer10_fck", &timer10_fck, CK_443X),
+ CLK(NULL, "timer11_fck", &timer11_fck, CK_443X),
+ CLK(NULL, "timer2_fck", &timer2_fck, CK_443X),
+ CLK(NULL, "timer3_fck", &timer3_fck, CK_443X),
+ CLK(NULL, "timer4_fck", &timer4_fck, CK_443X),
+ CLK(NULL, "timer5_fck", &timer5_fck, CK_443X),
+ CLK(NULL, "timer6_fck", &timer6_fck, CK_443X),
+ CLK(NULL, "timer7_fck", &timer7_fck, CK_443X),
+ CLK(NULL, "timer8_fck", &timer8_fck, CK_443X),
+ CLK(NULL, "timer9_fck", &timer9_fck, CK_443X),
+ CLK(NULL, "uart1_fck", &uart1_fck, CK_443X),
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
+ CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
+ CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X),
+ CLK("usbhs_omap", "fs_fck", &usb_host_fs_fck, CK_443X),
+ CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
+ CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
+ CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X),
+ CLK(NULL, "usb_host_hs_utmi_p2_clk", &usb_host_hs_utmi_p2_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_utmi_p3_clk", &usb_host_hs_utmi_p3_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic480m_p1_clk", &usb_host_hs_hsic480m_p1_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic60m_p1_clk", &usb_host_hs_hsic60m_p1_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
+ CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
+ CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X),
+ CLK("usbhs_omap", "hs_fck", &usb_host_hs_fck, CK_443X),
+ CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
+ CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
+ CLK(NULL, "usb_otg_hs_ick", &usb_otg_hs_ick, CK_443X),
+ CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X),
+ CLK(NULL, "usb_phy_cm_clk32k", &usb_phy_cm_clk32k, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
+ CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X),
+ CLK("usbhs_omap", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
+ CLK("usbhs_tll", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
+ CLK(NULL, "usim_ck", &usim_ck, CK_443X),
+ CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
+ CLK(NULL, "usim_fck", &usim_fck, CK_443X),
+ CLK(NULL, "wd_timer2_fck", &wd_timer2_fck, CK_443X),
+ CLK(NULL, "wd_timer3_fck", &wd_timer3_fck, CK_443X),
+ CLK(NULL, "pmd_stm_clock_mux_ck", &pmd_stm_clock_mux_ck, CK_443X),
+ CLK(NULL, "pmd_trace_clk_mux_ck", &pmd_trace_clk_mux_ck, CK_443X),
+ CLK(NULL, "stm_clk_div_ck", &stm_clk_div_ck, CK_443X),
+ CLK(NULL, "trace_clk_div_ck", &trace_clk_div_ck, CK_443X),
+ CLK(NULL, "auxclk0_src_ck", &auxclk0_src_ck, CK_443X),
+ CLK(NULL, "auxclk0_ck", &auxclk0_ck, CK_443X),
+ CLK(NULL, "auxclkreq0_ck", &auxclkreq0_ck, CK_443X),
+ CLK(NULL, "auxclk1_src_ck", &auxclk1_src_ck, CK_443X),
+ CLK(NULL, "auxclk1_ck", &auxclk1_ck, CK_443X),
+ CLK(NULL, "auxclkreq1_ck", &auxclkreq1_ck, CK_443X),
+ CLK(NULL, "auxclk2_src_ck", &auxclk2_src_ck, CK_443X),
+ CLK(NULL, "auxclk2_ck", &auxclk2_ck, CK_443X),
+ CLK(NULL, "auxclkreq2_ck", &auxclkreq2_ck, CK_443X),
+ CLK(NULL, "auxclk3_src_ck", &auxclk3_src_ck, CK_443X),
+ CLK(NULL, "auxclk3_ck", &auxclk3_ck, CK_443X),
+ CLK(NULL, "auxclkreq3_ck", &auxclkreq3_ck, CK_443X),
+ CLK(NULL, "auxclk4_src_ck", &auxclk4_src_ck, CK_443X),
+ CLK(NULL, "auxclk4_ck", &auxclk4_ck, CK_443X),
+ CLK(NULL, "auxclkreq4_ck", &auxclkreq4_ck, CK_443X),
+ CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck, CK_443X),
+ CLK(NULL, "auxclk5_ck", &auxclk5_ck, CK_443X),
+ CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck, CK_443X),
+ CLK("omap-gpmc", "fck", &dummy_ck, CK_443X),
+ CLK("omap_i2c.1", "ick", &dummy_ck, CK_443X),
+ CLK("omap_i2c.2", "ick", &dummy_ck, CK_443X),
+ CLK("omap_i2c.3", "ick", &dummy_ck, CK_443X),
+ CLK("omap_i2c.4", "ick", &dummy_ck, CK_443X),
+ CLK(NULL, "mailboxes_ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.0", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.1", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.2", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.3", "ick", &dummy_ck, CK_443X),
+ CLK("omap_hsmmc.4", "ick", &dummy_ck, CK_443X),
+ CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_443X),
+ CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X),
+ CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X),
+ CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X),
+ CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X),
+ CLK(NULL, "uart1_ick", &dummy_ck, CK_443X),
+ CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
+ CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
+ CLK(NULL, "uart4_ick", &dummy_ck, CK_443X),
+ CLK("usbhs_omap", "usbhost_ick", &dummy_ck, CK_443X),
+ CLK("usbhs_omap", "usbtll_fck", &dummy_ck, CK_443X),
+ CLK("usbhs_tll", "usbtll_fck", &dummy_ck, CK_443X),
+ CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
+ CLK(NULL, "timer_32k_ck", &sys_32k_ck, CK_443X),
+ /* TODO: Remove "omap_timer.X" aliases once DT migration is complete */
+ CLK("omap_timer.1", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.2", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.3", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.4", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.9", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.10", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.11", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.5", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("omap_timer.6", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("omap_timer.7", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("omap_timer.8", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("4a318000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("48032000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("48034000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("48036000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("4803e000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("48086000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("48088000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("40138000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("4013a000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("4013c000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("4013e000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK(NULL, "cpufreq_ck", &dpll_mpu_ck, CK_443X),
+};
+
+static const char *enable_init_clks[] = {
+ "emif1_fck",
+ "emif2_fck",
+ "gpmc_ick",
+ "l3_instr_ick",
+ "l3_main_3_ick",
+ "ocp_wp_noc_ick",
+};
+
+int __init omap4xxx_clk_init(void)
+{
+ u32 cpu_clkflg;
+ struct omap_clk *c;
+ int rc;
+
+ if (cpu_is_omap443x()) {
+ cpu_mask = RATE_IN_4430;
+ cpu_clkflg = CK_443X;
+ } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
+ cpu_mask = RATE_IN_4460 | RATE_IN_4430;
+ cpu_clkflg = CK_446X | CK_443X;
+
+ if (cpu_is_omap447x())
+ pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
+ } else {
+ return 0;
+ }
+
+ for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
+ c++) {
+ if (c->cpu & cpu_clkflg) {
+ clkdev_add(&c->lk);
+ if (!__clk_init(NULL, c->lk.clk))
+ omap2_init_clk_hw_omap_clocks(c->lk.clk);
+ }
+ }
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ /*
+ * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
+ * state when turning the ABE clock domain. Workaround this by
+ * locking the ABE DPLL on boot.
+ */
+ if (cpu_is_omap446x()) {
+ rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
+ if (!rc)
+ rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/clkt2xxx_apll.c b/arch/arm/mach-omap2/clkt2xxx_apll.c
index 8c5b13e7ee6..25b1feed480 100644
--- a/arch/arm/mach-omap2/clkt2xxx_apll.c
+++ b/arch/arm/mach-omap2/clkt2xxx_apll.c
@@ -38,62 +38,88 @@
/* Private functions */
-static int _apll96_enable(struct clk *clk)
+/**
+ * omap2xxx_clk_apll_locked - is the APLL locked?
+ * @hw: struct clk_hw * of the APLL to check
+ *
+ * If the APLL IP block referred to by @hw indicates that it's locked,
+ * return true; otherwise, return false.
+ */
+static bool omap2xxx_clk_apll_locked(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 r, apll_mask;
+
+ apll_mask = EN_APLL_LOCKED << clk->enable_bit;
+
+ r = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+
+ return ((r & apll_mask) == apll_mask) ? true : false;
+}
+
+int omap2_clk_apll96_enable(struct clk_hw *hw)
{
return omap2xxx_cm_apll96_enable();
}
-static int _apll54_enable(struct clk *clk)
+int omap2_clk_apll54_enable(struct clk_hw *hw)
{
return omap2xxx_cm_apll54_enable();
}
-static void _apll96_allow_idle(struct clk *clk)
+static void _apll96_allow_idle(struct clk_hw_omap *clk)
{
omap2xxx_cm_set_apll96_auto_low_power_stop();
}
-static void _apll96_deny_idle(struct clk *clk)
+static void _apll96_deny_idle(struct clk_hw_omap *clk)
{
omap2xxx_cm_set_apll96_disable_autoidle();
}
-static void _apll54_allow_idle(struct clk *clk)
+static void _apll54_allow_idle(struct clk_hw_omap *clk)
{
omap2xxx_cm_set_apll54_auto_low_power_stop();
}
-static void _apll54_deny_idle(struct clk *clk)
+static void _apll54_deny_idle(struct clk_hw_omap *clk)
{
omap2xxx_cm_set_apll54_disable_autoidle();
}
-static void _apll96_disable(struct clk *clk)
+void omap2_clk_apll96_disable(struct clk_hw *hw)
{
omap2xxx_cm_apll96_disable();
}
-static void _apll54_disable(struct clk *clk)
+void omap2_clk_apll54_disable(struct clk_hw *hw)
{
omap2xxx_cm_apll54_disable();
}
-/* Public data */
+unsigned long omap2_clk_apll54_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return (omap2xxx_clk_apll_locked(hw)) ? 54000000 : 0;
+}
-const struct clkops clkops_apll96 = {
- .enable = _apll96_enable,
- .disable = _apll96_disable,
- .allow_idle = _apll96_allow_idle,
- .deny_idle = _apll96_deny_idle,
-};
+unsigned long omap2_clk_apll96_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return (omap2xxx_clk_apll_locked(hw)) ? 96000000 : 0;
+}
-const struct clkops clkops_apll54 = {
- .enable = _apll54_enable,
- .disable = _apll54_disable,
+/* Public data */
+const struct clk_hw_omap_ops clkhwops_apll54 = {
.allow_idle = _apll54_allow_idle,
.deny_idle = _apll54_deny_idle,
};
+const struct clk_hw_omap_ops clkhwops_apll96 = {
+ .allow_idle = _apll96_allow_idle,
+ .deny_idle = _apll96_deny_idle,
+};
+
/* Public functions */
u32 omap2xxx_get_apll_clkin(void)
diff --git a/arch/arm/mach-omap2/clkt2xxx_dpll.c b/arch/arm/mach-omap2/clkt2xxx_dpll.c
index 399534c7843..82572e277b9 100644
--- a/arch/arm/mach-omap2/clkt2xxx_dpll.c
+++ b/arch/arm/mach-omap2/clkt2xxx_dpll.c
@@ -29,7 +29,7 @@
* REVISIT: DPLL can optionally enter low-power bypass by writing 0x1
* instead. Add some mechanism to optionally enter this mode.
*/
-static void _allow_idle(struct clk *clk)
+static void _allow_idle(struct clk_hw_omap *clk)
{
if (!clk || !clk->dpll_data)
return;
@@ -43,7 +43,7 @@ static void _allow_idle(struct clk *clk)
*
* Disable DPLL automatic idle control. No return value.
*/
-static void _deny_idle(struct clk *clk)
+static void _deny_idle(struct clk_hw_omap *clk)
{
if (!clk || !clk->dpll_data)
return;
@@ -53,9 +53,7 @@ static void _deny_idle(struct clk *clk)
/* Public data */
-
-const struct clkops clkops_omap2xxx_dpll_ops = {
+const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll = {
.allow_idle = _allow_idle,
.deny_idle = _deny_idle,
};
-
diff --git a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
index 825e44cdf1c..d8620105c42 100644
--- a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
+++ b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
@@ -40,7 +40,7 @@
* (currently defined as "dpll_ck" in the OMAP2xxx clock tree). Set
* during dpll_ck init and used later by omap2xxx_clk_get_core_rate().
*/
-static struct clk *dpll_core_ck;
+static struct clk_hw_omap *dpll_core_ck;
/**
* omap2xxx_clk_get_core_rate - return the CORE_CLK rate
@@ -104,13 +104,16 @@ static long omap2_dpllcore_round_rate(unsigned long target_rate)
}
-unsigned long omap2_dpllcore_recalc(struct clk *clk)
+unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
{
return omap2xxx_clk_get_core_rate();
}
-int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate)
+int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 cur_rate, low, mult, div, valid_rate, done_rate;
u32 bypass = 0;
struct prcm_config tmpset;
@@ -188,8 +191,8 @@ int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate)
* statically defined, this code may need to change to increment some
* kind of use count on dpll_ck.
*/
-void omap2xxx_clkt_dpllcore_init(struct clk *clk)
+void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw)
{
WARN(dpll_core_ck, "dpll_core_ck already set - should never happen");
- dpll_core_ck = clk;
+ dpll_core_ck = to_clk_hw_omap(hw);
}
diff --git a/arch/arm/mach-omap2/clkt2xxx_osc.c b/arch/arm/mach-omap2/clkt2xxx_osc.c
index e1777371bb5..19f54d43349 100644
--- a/arch/arm/mach-omap2/clkt2xxx_osc.c
+++ b/arch/arm/mach-omap2/clkt2xxx_osc.c
@@ -35,7 +35,7 @@
* clk_enable/clk_disable()-based usecounting for osc_ck should be
* replaced with autoidle-based usecounting.
*/
-static int omap2_enable_osc_ck(struct clk *clk)
+int omap2_enable_osc_ck(struct clk_hw *clk)
{
u32 pcc;
@@ -53,7 +53,7 @@ static int omap2_enable_osc_ck(struct clk *clk)
* clk_enable/clk_disable()-based usecounting for osc_ck should be
* replaced with autoidle-based usecounting.
*/
-static void omap2_disable_osc_ck(struct clk *clk)
+void omap2_disable_osc_ck(struct clk_hw *clk)
{
u32 pcc;
@@ -62,13 +62,8 @@ static void omap2_disable_osc_ck(struct clk *clk)
__raw_writel(pcc | OMAP_AUTOEXTCLKMODE_MASK, prcm_clksrc_ctrl);
}
-const struct clkops clkops_oscck = {
- .enable = omap2_enable_osc_ck,
- .disable = omap2_disable_osc_ck,
-};
-
-unsigned long omap2_osc_clk_recalc(struct clk *clk)
+unsigned long omap2_osc_clk_recalc(struct clk_hw *clk,
+ unsigned long parent_rate)
{
return omap2xxx_get_apll_clkin() * omap2xxx_get_sysclkdiv();
}
-
diff --git a/arch/arm/mach-omap2/clkt2xxx_sys.c b/arch/arm/mach-omap2/clkt2xxx_sys.c
index 46683b3c246..f467d072cd0 100644
--- a/arch/arm/mach-omap2/clkt2xxx_sys.c
+++ b/arch/arm/mach-omap2/clkt2xxx_sys.c
@@ -40,9 +40,8 @@ u32 omap2xxx_get_sysclkdiv(void)
return div;
}
-unsigned long omap2xxx_sys_clk_recalc(struct clk *clk)
+unsigned long omap2xxx_sys_clk_recalc(struct clk_hw *clk,
+ unsigned long parent_rate)
{
- return clk->parent->rate / omap2xxx_get_sysclkdiv();
+ return parent_rate / omap2xxx_get_sysclkdiv();
}
-
-
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index 1c2041fbd71..ae2b35e76dc 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -58,7 +58,8 @@ static unsigned long sys_ck_rate;
*
* Set virt_prcm_set's rate to the mpu_speed field of the current PRCM set.
*/
-unsigned long omap2_table_mpu_recalc(struct clk *clk)
+unsigned long omap2_table_mpu_recalc(struct clk_hw *clk,
+ unsigned long parent_rate)
{
return curr_prcm_set->mpu_speed;
}
@@ -70,7 +71,8 @@ unsigned long omap2_table_mpu_recalc(struct clk *clk)
* Some might argue L3-DDR, others ARM, others IVA. This code is simple and
* just uses the ARM rates.
*/
-long omap2_round_to_table_rate(struct clk *clk, unsigned long rate)
+long omap2_round_to_table_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
const struct prcm_config *ptr;
long highest_rate;
@@ -93,7 +95,8 @@ long omap2_round_to_table_rate(struct clk *clk, unsigned long rate)
}
/* Sets basic clocks based on the specified rate */
-int omap2_select_table_rate(struct clk *clk, unsigned long rate)
+int omap2_select_table_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
u32 cur_rate, done_rate, bypass = 0, tmp;
const struct prcm_config *prcm;
diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
index 6cf298e262f..eb69acf2101 100644
--- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
+++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
@@ -44,8 +44,10 @@
* Program the DPLL M2 divider with the rounded target rate. Returns
* -EINVAL upon error, or 0 upon success.
*/
-int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
+int omap3_core_dpll_m2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 new_div = 0;
u32 unlock_dll = 0;
u32 c;
@@ -63,7 +65,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
return -EINVAL;
sdrcrate = __clk_get_rate(sdrc_ick_p);
- clkrate = __clk_get_rate(clk);
+ clkrate = __clk_get_rate(hw->clk);
if (rate > clkrate)
sdrcrate <<= ((rate / clkrate) >> 1);
else
@@ -112,8 +114,6 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
0, 0, 0, 0);
- clk->rate = rate;
-
return 0;
}
diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c
index 53646facda4..0ec9f6fdf04 100644
--- a/arch/arm/mach-omap2/clkt_clksel.c
+++ b/arch/arm/mach-omap2/clkt_clksel.c
@@ -41,7 +41,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/bug.h>
@@ -58,11 +58,14 @@
* the element associated with the supplied parent clock address.
* Returns a pointer to the struct clksel on success or NULL on error.
*/
-static const struct clksel *_get_clksel_by_parent(struct clk *clk,
+static const struct clksel *_get_clksel_by_parent(struct clk_hw_omap *clk,
struct clk *src_clk)
{
const struct clksel *clks;
+ if (!src_clk)
+ return NULL;
+
for (clks = clk->clksel; clks->parent; clks++)
if (clks->parent == src_clk)
break; /* Found the requested parent */
@@ -70,7 +73,7 @@ static const struct clksel *_get_clksel_by_parent(struct clk *clk,
if (!clks->parent) {
/* This indicates a data problem */
WARN(1, "clock: %s: could not find parent clock %s in clksel array\n",
- __clk_get_name(clk), __clk_get_name(src_clk));
+ __clk_get_name(clk->hw.clk), __clk_get_name(src_clk));
return NULL;
}
@@ -78,64 +81,6 @@ static const struct clksel *_get_clksel_by_parent(struct clk *clk,
}
/**
- * _get_div_and_fieldval() - find the new clksel divisor and field value to use
- * @src_clk: planned new parent struct clk *
- * @clk: struct clk * that is being reparented
- * @field_val: pointer to a u32 to contain the register data for the divisor
- *
- * Given an intended new parent struct clk * @src_clk, and the struct
- * clk * @clk to the clock that is being reparented, find the
- * appropriate rate divisor for the new clock (returned as the return
- * value), and the corresponding register bitfield data to program to
- * reach that divisor (returned in the u32 pointed to by @field_val).
- * Returns 0 on error, or returns the newly-selected divisor upon
- * success (in this latter case, the corresponding register bitfield
- * value is passed back in the variable pointed to by @field_val)
- */
-static u8 _get_div_and_fieldval(struct clk *src_clk, struct clk *clk,
- u32 *field_val)
-{
- const struct clksel *clks;
- const struct clksel_rate *clkr, *max_clkr = NULL;
- u8 max_div = 0;
-
- clks = _get_clksel_by_parent(clk, src_clk);
- if (!clks)
- return 0;
-
- /*
- * Find the highest divisor (e.g., the one resulting in the
- * lowest rate) to use as the default. This should avoid
- * clock rates that are too high for the device. XXX A better
- * solution here would be to try to determine if there is a
- * divisor matching the original clock rate before the parent
- * switch, and if it cannot be found, to fall back to the
- * highest divisor.
- */
- for (clkr = clks->rates; clkr->div; clkr++) {
- if (!(clkr->flags & cpu_mask))
- continue;
-
- if (clkr->div > max_div) {
- max_div = clkr->div;
- max_clkr = clkr;
- }
- }
-
- if (max_div == 0) {
- /* This indicates an error in the clksel data */
- WARN(1, "clock: %s: could not find divisor for parent %s\n",
- __clk_get_name(clk),
- __clk_get_name(__clk_get_parent(src_clk)));
- return 0;
- }
-
- *field_val = max_clkr->val;
-
- return max_div;
-}
-
-/**
* _write_clksel_reg() - program a clock's clksel register in hardware
* @clk: struct clk * to program
* @v: clksel bitfield value to program (with LSB at bit 0)
@@ -148,7 +93,7 @@ static u8 _get_div_and_fieldval(struct clk *src_clk, struct clk *clk,
* take into account any time the hardware might take to switch the
* clock source.
*/
-static void _write_clksel_reg(struct clk *clk, u32 field_val)
+static void _write_clksel_reg(struct clk_hw_omap *clk, u32 field_val)
{
u32 v;
@@ -171,13 +116,14 @@ static void _write_clksel_reg(struct clk *clk, u32 field_val)
* before calling. Returns 0 on error or returns the actual integer divisor
* upon success.
*/
-static u32 _clksel_to_divisor(struct clk *clk, u32 field_val)
+static u32 _clksel_to_divisor(struct clk_hw_omap *clk, u32 field_val)
{
const struct clksel *clks;
const struct clksel_rate *clkr;
struct clk *parent;
- parent = __clk_get_parent(clk);
+ parent = __clk_get_parent(clk->hw.clk);
+
clks = _get_clksel_by_parent(clk, parent);
if (!clks)
return 0;
@@ -193,7 +139,8 @@ static u32 _clksel_to_divisor(struct clk *clk, u32 field_val)
if (!clkr->div) {
/* This indicates a data error */
WARN(1, "clock: %s: could not find fieldval %d for parent %s\n",
- __clk_get_name(clk), field_val, __clk_get_name(parent));
+ __clk_get_name(clk->hw.clk), field_val,
+ __clk_get_name(parent));
return 0;
}
@@ -210,7 +157,7 @@ static u32 _clksel_to_divisor(struct clk *clk, u32 field_val)
* register field value _before_ left-shifting (i.e., LSB is at bit
* 0); or returns 0xFFFFFFFF (~0) upon error.
*/
-static u32 _divisor_to_clksel(struct clk *clk, u32 div)
+static u32 _divisor_to_clksel(struct clk_hw_omap *clk, u32 div)
{
const struct clksel *clks;
const struct clksel_rate *clkr;
@@ -219,7 +166,7 @@ static u32 _divisor_to_clksel(struct clk *clk, u32 div)
/* should never happen */
WARN_ON(div == 0);
- parent = __clk_get_parent(clk);
+ parent = __clk_get_parent(clk->hw.clk);
clks = _get_clksel_by_parent(clk, parent);
if (!clks)
return ~0;
@@ -234,7 +181,8 @@ static u32 _divisor_to_clksel(struct clk *clk, u32 div)
if (!clkr->div) {
pr_err("clock: %s: could not find divisor %d for parent %s\n",
- __clk_get_name(clk), div, __clk_get_name(parent));
+ __clk_get_name(clk->hw.clk), div,
+ __clk_get_name(parent));
return ~0;
}
@@ -249,7 +197,7 @@ static u32 _divisor_to_clksel(struct clk *clk, u32 div)
* into the hardware, convert it into the actual divisor value, and
* return it; or return 0 on error.
*/
-static u32 _read_divisor(struct clk *clk)
+static u32 _read_divisor(struct clk_hw_omap *clk)
{
u32 v;
@@ -277,7 +225,8 @@ static u32 _read_divisor(struct clk *clk)
*
* Returns the rounded clock rate or returns 0xffffffff on error.
*/
-u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
+u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk,
+ unsigned long target_rate,
u32 *new_div)
{
unsigned long test_rate;
@@ -288,9 +237,9 @@ u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
unsigned long parent_rate;
const char *clk_name;
- parent = __clk_get_parent(clk);
+ parent = __clk_get_parent(clk->hw.clk);
+ clk_name = __clk_get_name(clk->hw.clk);
parent_rate = __clk_get_rate(parent);
- clk_name = __clk_get_name(clk);
if (!clk->clksel || !clk->clksel_mask)
return ~0;
@@ -341,27 +290,35 @@ u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
*/
/**
- * omap2_init_clksel_parent() - set a clksel clk's parent field from the hdwr
- * @clk: OMAP clock struct ptr to use
+ * omap2_clksel_find_parent_index() - return the array index of the current
+ * hardware parent of @hw
+ * @hw: struct clk_hw * to find the current hardware parent of
*
- * Given a pointer @clk to a source-selectable struct clk, read the
- * hardware register and determine what its parent is currently set
- * to. Update @clk's .parent field with the appropriate clk ptr. No
- * return value.
+ * Given a struct clk_hw pointer @hw to the 'hw' member of a struct
+ * clk_hw_omap record representing a source-selectable hardware clock,
+ * read the hardware register and determine what its parent is
+ * currently set to. Intended to be called only by the common clock
+ * framework struct clk_hw_ops.get_parent function pointer. Return
+ * the array index of this parent clock upon success -- there is no
+ * way to return an error, so if we encounter an error, just WARN()
+ * and pretend that we know that we're doing.
*/
-void omap2_init_clksel_parent(struct clk *clk)
+u8 omap2_clksel_find_parent_index(struct clk_hw *hw)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
const struct clksel *clks;
const struct clksel_rate *clkr;
u32 r, found = 0;
struct clk *parent;
const char *clk_name;
+ int ret = 0, f = 0;
- if (!clk->clksel || !clk->clksel_mask)
- return;
+ parent = __clk_get_parent(hw->clk);
+ clk_name = __clk_get_name(hw->clk);
- parent = __clk_get_parent(clk);
- clk_name = __clk_get_name(clk);
+ /* XXX should be able to return an error */
+ WARN((!clk->clksel || !clk->clksel_mask),
+ "clock: %s: attempt to call on a non-clksel clock", clk_name);
r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
r >>= __ffs(clk->clksel_mask);
@@ -372,27 +329,21 @@ void omap2_init_clksel_parent(struct clk *clk)
continue;
if (clkr->val == r) {
- if (parent != clks->parent) {
- pr_debug("clock: %s: inited parent to %s (was %s)\n",
- clk_name,
- __clk_get_name(clks->parent),
- ((parent) ?
- __clk_get_name(parent) :
- "NULL"));
- clk_reparent(clk, clks->parent);
- }
found = 1;
+ ret = f;
}
}
+ f++;
}
/* This indicates a data error */
WARN(!found, "clock: %s: init parent: could not find regval %0x\n",
clk_name, r);
- return;
+ return ret;
}
+
/**
* omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field
* @clk: struct clk *
@@ -402,21 +353,23 @@ void omap2_init_clksel_parent(struct clk *clk)
* function. Returns the clock's current rate, based on its parent's rate
* and its current divisor setting in the hardware.
*/
-unsigned long omap2_clksel_recalc(struct clk *clk)
+unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate)
{
unsigned long rate;
u32 div = 0;
- struct clk *parent;
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- div = _read_divisor(clk);
- if (div == 0)
- return __clk_get_rate(clk);
+ if (!parent_rate)
+ return 0;
- parent = __clk_get_parent(clk);
- rate = __clk_get_rate(parent) / div;
+ div = _read_divisor(clk);
+ if (!div)
+ rate = parent_rate;
+ else
+ rate = parent_rate / div;
- pr_debug("clock: %s: recalc'd rate is %ld (div %d)\n",
- __clk_get_name(clk), rate, div);
+ pr_debug("%s: recalc'd %s's rate to %lu (div %d)\n", __func__,
+ __clk_get_name(hw->clk), rate, div);
return rate;
}
@@ -432,8 +385,10 @@ unsigned long omap2_clksel_recalc(struct clk *clk)
*
* Returns the rounded clock rate or returns 0xffffffff on error.
*/
-long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate)
+long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 new_div;
return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
@@ -454,8 +409,10 @@ long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate)
* is changed, they will all be affected without any notification.
* Returns -EINVAL upon error, or 0 upon success.
*/
-int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
+int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 field_val, validrate, new_div = 0;
if (!clk->clksel || !clk->clksel_mask)
@@ -471,10 +428,8 @@ int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
_write_clksel_reg(clk, field_val);
- clk->rate = __clk_get_rate(__clk_get_parent(clk)) / new_div;
-
- pr_debug("clock: %s: set rate to %ld\n", __clk_get_name(clk),
- __clk_get_rate(clk));
+ pr_debug("clock: %s: set rate to %ld\n", __clk_get_name(hw->clk),
+ __clk_get_rate(hw->clk));
return 0;
}
@@ -499,32 +454,13 @@ int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
* affected without any notification. Returns -EINVAL upon error, or
* 0 upon success.
*/
-int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent)
+int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val)
{
- u32 field_val = 0;
- u32 parent_div;
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
if (!clk->clksel || !clk->clksel_mask)
return -EINVAL;
- parent_div = _get_div_and_fieldval(new_parent, clk, &field_val);
- if (!parent_div)
- return -EINVAL;
-
_write_clksel_reg(clk, field_val);
-
- clk_reparent(clk, new_parent);
-
- /* CLKSEL clocks follow their parents' rates, divided by a divisor */
- clk->rate = __clk_get_rate(new_parent);
-
- if (parent_div > 0)
- __clk_get_rate(clk) /= parent_div;
-
- pr_debug("clock: %s: set parent to %s (new rate %ld)\n",
- __clk_get_name(clk),
- __clk_get_name(__clk_get_parent(clk)),
- __clk_get_rate(clk));
-
return 0;
}
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 8463cc35624..924c230f894 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
#include <asm/div64.h>
@@ -76,7 +76,7 @@
* (assuming that it is counting N upwards), or -2 if the enclosing loop
* should skip to the next iteration (again assuming N is increasing).
*/
-static int _dpll_test_fint(struct clk *clk, u8 n)
+static int _dpll_test_fint(struct clk_hw_omap *clk, u8 n)
{
struct dpll_data *dd;
long fint, fint_min, fint_max;
@@ -85,7 +85,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
- fint = __clk_get_rate(__clk_get_parent(clk)) / n;
+ fint = __clk_get_rate(__clk_get_parent(clk->hw.clk)) / n;
if (cpu_is_omap24xx()) {
/* Should not be called for OMAP2, so warn if it is called */
@@ -186,15 +186,15 @@ static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
}
/* Public functions */
-
-void omap2_init_dpll_parent(struct clk *clk)
+u8 omap2_init_dpll_parent(struct clk_hw *hw)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 v;
struct dpll_data *dd;
dd = clk->dpll_data;
if (!dd)
- return;
+ return -EINVAL;
v = __raw_readl(dd->control_reg);
v &= dd->enable_mask;
@@ -204,18 +204,18 @@ void omap2_init_dpll_parent(struct clk *clk)
if (cpu_is_omap24xx()) {
if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
v == OMAP2XXX_EN_DPLL_FRBYPASS)
- clk_reparent(clk, dd->clk_bypass);
+ return 1;
} else if (cpu_is_omap34xx()) {
if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
v == OMAP3XXX_EN_DPLL_FRBYPASS)
- clk_reparent(clk, dd->clk_bypass);
+ return 1;
} else if (soc_is_am33xx() || cpu_is_omap44xx()) {
if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
v == OMAP4XXX_EN_DPLL_FRBYPASS ||
v == OMAP4XXX_EN_DPLL_MNBYPASS)
- clk_reparent(clk, dd->clk_bypass);
+ return 1;
}
- return;
+ return 0;
}
/**
@@ -232,7 +232,7 @@ void omap2_init_dpll_parent(struct clk *clk)
* locked, or the appropriate bypass rate if the DPLL is bypassed, or 0
* if the clock @clk is not a DPLL.
*/
-u32 omap2_get_dpll_rate(struct clk *clk)
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
{
long long dpll_clk;
u32 dpll_mult, dpll_div, v;
@@ -288,8 +288,10 @@ u32 omap2_get_dpll_rate(struct clk *clk)
* (expensive) function again. Returns ~0 if the target rate cannot
* be rounded, or the rounded rate upon success.
*/
-long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate)
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
int m, n, r, scaled_max_m;
unsigned long scaled_rt_rp;
unsigned long new_rate = 0;
@@ -303,7 +305,7 @@ long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate)
dd = clk->dpll_data;
ref_rate = __clk_get_rate(dd->clk_ref);
- clk_name = __clk_get_name(clk);
+ clk_name = __clk_get_name(hw->clk);
pr_debug("clock: %s: starting DPLL round_rate, target rate %ld\n",
clk_name, target_rate);
diff --git a/arch/arm/mach-omap2/clkt_iclk.c b/arch/arm/mach-omap2/clkt_iclk.c
index fe774a09dd0..f10eb03ce3e 100644
--- a/arch/arm/mach-omap2/clkt_iclk.c
+++ b/arch/arm/mach-omap2/clkt_iclk.c
@@ -11,7 +11,7 @@
#undef DEBUG
#include <linux/kernel.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
@@ -23,7 +23,7 @@
/* Private functions */
/* XXX */
-void omap2_clkt_iclk_allow_idle(struct clk *clk)
+void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk)
{
u32 v, r;
@@ -35,7 +35,7 @@ void omap2_clkt_iclk_allow_idle(struct clk *clk)
}
/* XXX */
-void omap2_clkt_iclk_deny_idle(struct clk *clk)
+void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk)
{
u32 v, r;
@@ -48,33 +48,17 @@ void omap2_clkt_iclk_deny_idle(struct clk *clk)
/* Public data */
-const struct clkops clkops_omap2_iclk_dflt_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_companion = omap2_clk_dflt_find_companion,
- .find_idlest = omap2_clk_dflt_find_idlest,
+const struct clk_hw_omap_ops clkhwops_iclk = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
};
-const struct clkops clkops_omap2_iclk_dflt = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
+const struct clk_hw_omap_ops clkhwops_iclk_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap2_clk_dflt_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
};
-const struct clkops clkops_omap2_iclk_idle_only = {
- .allow_idle = omap2_clkt_iclk_allow_idle,
- .deny_idle = omap2_clkt_iclk_deny_idle,
-};
-const struct clkops clkops_omap2_mdmclk_dflt_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_companion = omap2_clk_dflt_find_companion,
- .find_idlest = omap2_clk_dflt_find_idlest,
- .allow_idle = omap2_clkt_iclk_allow_idle,
- .deny_idle = omap2_clkt_iclk_deny_idle,
-};
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index e381d991092..e4ec3a69ee2 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -20,7 +20,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/delay.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/bitops.h>
@@ -55,9 +55,28 @@ u16 cpu_mask;
*/
static bool clkdm_control = true;
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-static DEFINE_SPINLOCK(clockfw_lock);
+static LIST_HEAD(clk_hw_omap_clocks);
+
+/*
+ * Used for clocks that have the same value as the parent clock,
+ * divided by some factor
+ */
+unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hw_omap *oclk;
+
+ if (!hw) {
+ pr_warn("%s: hw is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ oclk = to_clk_hw_omap(hw);
+
+ WARN_ON(!oclk->fixed_div);
+
+ return parent_rate / oclk->fixed_div;
+}
/*
* OMAP2+ specific clock functions
@@ -109,7 +128,7 @@ static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
* belong in the clock code and will be moved in the medium term to
* module-dependent code. No return value.
*/
-static void _omap2_module_wait_ready(struct clk *clk)
+static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
{
void __iomem *companion_reg, *idlest_reg;
u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
@@ -124,12 +143,11 @@ static void _omap2_module_wait_ready(struct clk *clk)
}
clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
-
r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
if (r) {
/* IDLEST register not in the CM module */
_wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
- clk->name);
+ __clk_get_name(clk->hw.clk));
} else {
cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
};
@@ -145,15 +163,16 @@ static void _omap2_module_wait_ready(struct clk *clk)
* clockdomain pointer, and save it into the struct clk. Intended to be
* called during clk_register(). No return value.
*/
-void omap2_init_clk_clkdm(struct clk *clk)
+void omap2_init_clk_clkdm(struct clk_hw *hw)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct clockdomain *clkdm;
const char *clk_name;
if (!clk->clkdm_name)
return;
- clk_name = __clk_get_name(clk);
+ clk_name = __clk_get_name(hw->clk);
clkdm = clkdm_lookup(clk->clkdm_name);
if (clkdm) {
@@ -200,8 +219,8 @@ void __init omap2_clk_disable_clkdm_control(void)
* associate this type of code with per-module data structures to
* avoid this issue, and remove the casts. No return value.
*/
-void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
- u8 *other_bit)
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+ void __iomem **other_reg, u8 *other_bit)
{
u32 r;
@@ -229,8 +248,8 @@ void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
* register address ID (e.g., that CM_FCLKEN2 corresponds to
* CM_IDLEST2). This is not true for all modules. No return value.
*/
-void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
- u8 *idlest_bit, u8 *idlest_val)
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val)
{
u32 r;
@@ -252,16 +271,44 @@ void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
}
-int omap2_dflt_clk_enable(struct clk *clk)
+/**
+ * omap2_dflt_clk_enable - enable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to enable
+ *
+ * Enable the clock @hw in the hardware. We first call into the OMAP
+ * clockdomain code to "enable" the corresponding clockdomain if this
+ * is the first enabled user of the clockdomain. Then program the
+ * hardware to enable the clock. Then wait for the IP block that uses
+ * this clock to leave idle (if applicable). Returns the error value
+ * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
+ * if @hw has a null clock enable_reg, or zero upon success.
+ */
+int omap2_dflt_clk_enable(struct clk_hw *hw)
{
+ struct clk_hw_omap *clk;
u32 v;
+ int ret = 0;
+
+ clk = to_clk_hw_omap(hw);
+
+ if (clkdm_control && clk->clkdm) {
+ ret = clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (ret) {
+ WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, __clk_get_name(hw->clk),
+ clk->clkdm->name, ret);
+ return ret;
+ }
+ }
if (unlikely(clk->enable_reg == NULL)) {
- pr_err("clock.c: Enable for %s without enable code\n",
- clk->name);
- return 0; /* REVISIT: -EINVAL */
+ pr_err("%s: %s missing enable_reg\n", __func__,
+ __clk_get_name(hw->clk));
+ ret = -EINVAL;
+ goto err;
}
+ /* FIXME should not have INVERT_ENABLE bit here */
v = __raw_readl(clk->enable_reg);
if (clk->flags & INVERT_ENABLE)
v &= ~(1 << clk->enable_bit);
@@ -270,22 +317,39 @@ int omap2_dflt_clk_enable(struct clk *clk)
__raw_writel(v, clk->enable_reg);
v = __raw_readl(clk->enable_reg); /* OCP barrier */
- if (clk->ops->find_idlest)
+ if (clk->ops && clk->ops->find_idlest)
_omap2_module_wait_ready(clk);
return 0;
+
+err:
+ if (clkdm_control && clk->clkdm)
+ clkdm_clk_disable(clk->clkdm, hw->clk);
+ return ret;
}
-void omap2_dflt_clk_disable(struct clk *clk)
+/**
+ * omap2_dflt_clk_disable - disable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to disable
+ *
+ * Disable the clock @hw in the hardware, and call into the OMAP
+ * clockdomain code to "disable" the corresponding clockdomain if all
+ * clocks/hwmods in that clockdomain are now disabled. No return
+ * value.
+ */
+void omap2_dflt_clk_disable(struct clk_hw *hw)
{
+ struct clk_hw_omap *clk;
u32 v;
+ clk = to_clk_hw_omap(hw);
if (!clk->enable_reg) {
/*
- * 'Independent' here refers to a clock which is not
+ * 'independent' here refers to a clock which is not
* controlled by its parent.
*/
- pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
+ pr_err("%s: independent clock %s has no enable_reg\n",
+ __func__, __clk_get_name(hw->clk));
return;
}
@@ -296,191 +360,213 @@ void omap2_dflt_clk_disable(struct clk *clk)
v &= ~(1 << clk->enable_bit);
__raw_writel(v, clk->enable_reg);
/* No OCP barrier needed here since it is a disable operation */
-}
-
-const struct clkops clkops_omap2_dflt_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_companion = omap2_clk_dflt_find_companion,
- .find_idlest = omap2_clk_dflt_find_idlest,
-};
-const struct clkops clkops_omap2_dflt = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
-};
+ if (clkdm_control && clk->clkdm)
+ clkdm_clk_disable(clk->clkdm, hw->clk);
+}
/**
- * omap2_clk_disable - disable a clock, if the system is not using it
- * @clk: struct clk * to disable
+ * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being enabled
*
- * Decrements the usecount on struct clk @clk. If there are no users
- * left, call the clkops-specific clock disable function to disable it
- * in hardware. If the clock is part of a clockdomain (which they all
- * should be), request that the clockdomain be disabled. (It too has
- * a usecount, and so will not be disabled in the hardware until it no
- * longer has any users.) If the clock has a parent clock (most of
- * them do), then call ourselves, recursing on the parent clock. This
- * can cause an entire branch of the clock tree to be powered off by
- * simply disabling one clock. Intended to be called with the clockfw_lock
- * spinlock held. No return value.
+ * Increment the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 1, the clockdomain will be "enabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_enable() as
+ * their enable function pointer. Passes along the return value of
+ * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
+ * clockdomain, or 0 if clock framework-based clockdomain control is
+ * not implemented.
*/
-void omap2_clk_disable(struct clk *clk)
+int omap2_clkops_enable_clkdm(struct clk_hw *hw)
{
- if (clk->usecount == 0) {
- WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
- return;
- }
-
- pr_debug("clock: %s: decrementing usecount\n", clk->name);
+ struct clk_hw_omap *clk;
+ int ret = 0;
- clk->usecount--;
+ clk = to_clk_hw_omap(hw);
- if (clk->usecount > 0)
- return;
+ if (unlikely(!clk->clkdm)) {
+ pr_err("%s: %s: no clkdm set ?!\n", __func__,
+ __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
- pr_debug("clock: %s: disabling in hardware\n", clk->name);
+ if (unlikely(clk->enable_reg))
+ pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
+ __clk_get_name(hw->clk));
- if (clk->ops && clk->ops->disable) {
- trace_clock_disable(clk->name, 0, smp_processor_id());
- clk->ops->disable(clk);
+ if (!clkdm_control) {
+ pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+ __func__, __clk_get_name(hw->clk));
+ return 0;
}
- if (clkdm_control && clk->clkdm)
- clkdm_clk_disable(clk->clkdm, clk);
+ ret = clkdm_clk_enable(clk->clkdm, hw->clk);
+ WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);
- if (clk->parent)
- omap2_clk_disable(clk->parent);
+ return ret;
}
/**
- * omap2_clk_enable - request that the system enable a clock
- * @clk: struct clk * to enable
+ * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being disabled
*
- * Increments the usecount on struct clk @clk. If there were no users
- * previously, then recurse up the clock tree, enabling all of the
- * clock's parents and all of the parent clockdomains, and finally,
- * enabling @clk's clockdomain, and @clk itself. Intended to be
- * called with the clockfw_lock spinlock held. Returns 0 upon success
- * or a negative error code upon failure.
+ * Decrement the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 0, the clockdomain will be "disabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
+ * disable function pointer. No return value.
*/
-int omap2_clk_enable(struct clk *clk)
+void omap2_clkops_disable_clkdm(struct clk_hw *hw)
{
- int ret;
+ struct clk_hw_omap *clk;
- pr_debug("clock: %s: incrementing usecount\n", clk->name);
+ clk = to_clk_hw_omap(hw);
- clk->usecount++;
-
- if (clk->usecount > 1)
- return 0;
-
- pr_debug("clock: %s: enabling in hardware\n", clk->name);
-
- if (clk->parent) {
- ret = omap2_clk_enable(clk->parent);
- if (ret) {
- WARN(1, "clock: %s: could not enable parent %s: %d\n",
- clk->name, clk->parent->name, ret);
- goto oce_err1;
- }
+ if (unlikely(!clk->clkdm)) {
+ pr_err("%s: %s: no clkdm set ?!\n", __func__,
+ __clk_get_name(hw->clk));
+ return;
}
- if (clkdm_control && clk->clkdm) {
- ret = clkdm_clk_enable(clk->clkdm, clk);
- if (ret) {
- WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
- clk->name, clk->clkdm->name, ret);
- goto oce_err2;
- }
- }
+ if (unlikely(clk->enable_reg))
+ pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
+ __clk_get_name(hw->clk));
- if (clk->ops && clk->ops->enable) {
- trace_clock_enable(clk->name, 1, smp_processor_id());
- ret = clk->ops->enable(clk);
- if (ret) {
- WARN(1, "clock: %s: could not enable: %d\n",
- clk->name, ret);
- goto oce_err3;
- }
+ if (!clkdm_control) {
+ pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+ __func__, __clk_get_name(hw->clk));
+ return;
}
- return 0;
-
-oce_err3:
- if (clkdm_control && clk->clkdm)
- clkdm_clk_disable(clk->clkdm, clk);
-oce_err2:
- if (clk->parent)
- omap2_clk_disable(clk->parent);
-oce_err1:
- clk->usecount--;
-
- return ret;
+ clkdm_clk_disable(clk->clkdm, hw->clk);
}
-/* Given a clock and a rate apply a clock specific rounding function */
-long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
+/**
+ * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
+ * @hw: struct clk_hw * to check
+ *
+ * Return 1 if the clock represented by @hw is enabled in the
+ * hardware, or 0 otherwise. Intended for use in the struct
+ * clk_ops.is_enabled function pointer.
+ */
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
{
- if (clk->round_rate)
- return clk->round_rate(clk, rate);
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 v;
- return clk->rate;
+ v = __raw_readl(clk->enable_reg);
+
+ if (clk->flags & INVERT_ENABLE)
+ v ^= BIT(clk->enable_bit);
+
+ v &= BIT(clk->enable_bit);
+
+ return v ? 1 : 0;
}
-/* Set the clock rate for a clock source */
-int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
+static int __initdata mpurate;
+
+/*
+ * By default we use the rate set by the bootloader.
+ * You can override this with mpurate= cmdline option.
+ */
+static int __init omap_clk_setup(char *str)
{
- int ret = -EINVAL;
+ get_option(&str, &mpurate);
- pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
+ if (!mpurate)
+ return 1;
- /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
- if (clk->set_rate) {
- trace_clock_set_rate(clk->name, rate, smp_processor_id());
- ret = clk->set_rate(clk, rate);
- }
+ if (mpurate < 1000)
+ mpurate *= 1000000;
- return ret;
+ return 1;
}
+__setup("mpurate=", omap_clk_setup);
-int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
+/**
+ * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
+ * @clk: struct clk * to initialize
+ *
+ * Add an OMAP clock @clk to the internal list of OMAP clocks. Used
+ * temporarily for autoidle handling, until this support can be
+ * integrated into the common clock framework code in some way. No
+ * return value.
+ */
+void omap2_init_clk_hw_omap_clocks(struct clk *clk)
{
- if (!clk->clksel)
- return -EINVAL;
+ struct clk_hw_omap *c;
- if (clk->parent == new_parent)
- return 0;
+ if (__clk_get_flags(clk) & CLK_IS_BASIC)
+ return;
- return omap2_clksel_set_parent(clk, new_parent);
+ c = to_clk_hw_omap(__clk_get_hw(clk));
+ list_add(&c->node, &clk_hw_omap_clocks);
}
-/*
- * OMAP2+ clock reset and init functions
+/**
+ * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Enable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them. This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code. Returns 0.
*/
+int omap2_clk_enable_autoidle_all(void)
+{
+ struct clk_hw_omap *c;
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-void omap2_clk_disable_unused(struct clk *clk)
+ list_for_each_entry(c, &clk_hw_omap_clocks, node)
+ if (c->ops && c->ops->allow_idle)
+ c->ops->allow_idle(c);
+ return 0;
+}
+
+/**
+ * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Disable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them. This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code. Returns 0.
+ */
+int omap2_clk_disable_autoidle_all(void)
{
- u32 regval32, v;
+ struct clk_hw_omap *c;
- v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
+ list_for_each_entry(c, &clk_hw_omap_clocks, node)
+ if (c->ops && c->ops->deny_idle)
+ c->ops->deny_idle(c);
+ return 0;
+}
- regval32 = __raw_readl(clk->enable_reg);
- if ((regval32 & (1 << clk->enable_bit)) == v)
- return;
+/**
+ * omap2_clk_enable_init_clocks - prepare & enable a list of clocks
+ * @clk_names: ptr to an array of strings of clock names to enable
+ * @num_clocks: number of clock names in @clk_names
+ *
+ * Prepare and enable a list of clocks, named by @clk_names. No
+ * return value. XXX Deprecated; only needed until these clocks are
+ * properly claimed and enabled by the drivers or core code that uses
+ * them. XXX What code disables & calls clk_put on these clocks?
+ */
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+{
+ struct clk *init_clk;
+ int i;
- pr_debug("Disabling unused clock \"%s\"\n", clk->name);
- if (cpu_is_omap34xx()) {
- omap2_clk_enable(clk);
- omap2_clk_disable(clk);
- } else {
- clk->ops->disable(clk);
+ for (i = 0; i < num_clocks; i++) {
+ init_clk = clk_get(NULL, clk_names[i]);
+ clk_prepare_enable(init_clk);
}
- if (clk->clkdm != NULL)
- pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
}
-#endif
+
+const struct clk_hw_omap_ops clkhwops_wait = {
+ .find_idlest = omap2_clk_dflt_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
/**
* omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
@@ -512,14 +598,12 @@ int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
r = clk_set_rate(mpurate_ck, mpurate);
if (IS_ERR_VALUE(r)) {
WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
- mpurate_ck->name, mpurate, r);
+ mpurate_ck_name, mpurate, r);
clk_put(mpurate_ck);
return -EINVAL;
}
calibrate_delay();
- recalculate_root_clocks();
-
clk_put(mpurate_ck);
return 0;
@@ -563,513 +647,3 @@ void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
(clk_get_rate(core_ck) / 1000000),
(clk_get_rate(mpu_ck) / 1000000));
}
-
-/* Common data */
-
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
- int ret;
-
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = omap2_clk_enable(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- if (clk == NULL || IS_ERR(clk))
- return;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (clk->usecount == 0) {
- pr_err("Trying disable clock %s with 0 usecount\n",
- clk->name);
- WARN_ON(1);
- goto out;
- }
-
- omap2_clk_disable(clk);
-
-out:
- spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- unsigned long flags;
- unsigned long ret;
-
- if (clk == NULL || IS_ERR(clk))
- return 0;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = clk->rate;
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/*
- * Optional clock functions defined in include/linux/clk.h
- */
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- long ret;
-
- if (clk == NULL || IS_ERR(clk))
- return 0;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = omap2_clk_round_rate(clk, rate);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (clk == NULL || IS_ERR(clk))
- return ret;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = omap2_clk_set_rate(clk, rate);
- if (ret == 0)
- propagate_rate(clk);
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
- return ret;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- if (clk->usecount == 0) {
- ret = omap2_clk_set_parent(clk, parent);
- if (ret == 0)
- propagate_rate(clk);
- } else {
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
- return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
-
-/*
- * OMAP specific clock functions shared between omap1 and omap2
- */
-
-int __initdata mpurate;
-
-/*
- * By default we use the rate set by the bootloader.
- * You can override this with mpurate= cmdline option.
- */
-static int __init omap_clk_setup(char *str)
-{
- get_option(&str, &mpurate);
-
- if (!mpurate)
- return 1;
-
- if (mpurate < 1000)
- mpurate *= 1000000;
-
- return 1;
-}
-__setup("mpurate=", omap_clk_setup);
-
-/* Used for clocks that always have same value as the parent clock */
-unsigned long followparent_recalc(struct clk *clk)
-{
- return clk->parent->rate;
-}
-
-/*
- * Used for clocks that have the same value as the parent clock,
- * divided by some factor
- */
-unsigned long omap_fixed_divisor_recalc(struct clk *clk)
-{
- WARN_ON(!clk->fixed_div);
-
- return clk->parent->rate / clk->fixed_div;
-}
-
-void clk_reparent(struct clk *child, struct clk *parent)
-{
- list_del_init(&child->sibling);
- if (parent)
- list_add(&child->sibling, &parent->children);
- child->parent = parent;
-
- /* now do the debugfs renaming to reattach the child
- to the proper parent */
-}
-
-/* Propagate rate to children */
-void propagate_rate(struct clk *tclk)
-{
- struct clk *clkp;
-
- list_for_each_entry(clkp, &tclk->children, sibling) {
- if (clkp->recalc)
- clkp->rate = clkp->recalc(clkp);
- propagate_rate(clkp);
- }
-}
-
-static LIST_HEAD(root_clks);
-
-/**
- * recalculate_root_clocks - recalculate and propagate all root clocks
- *
- * Recalculates all root clocks (clocks with no parent), which if the
- * clock's .recalc is set correctly, should also propagate their rates.
- * Called at init.
- */
-void recalculate_root_clocks(void)
-{
- struct clk *clkp;
-
- list_for_each_entry(clkp, &root_clks, sibling) {
- if (clkp->recalc)
- clkp->rate = clkp->recalc(clkp);
- propagate_rate(clkp);
- }
-}
-
-/**
- * clk_preinit - initialize any fields in the struct clk before clk init
- * @clk: struct clk * to initialize
- *
- * Initialize any struct clk fields needed before normal clk initialization
- * can run. No return value.
- */
-void clk_preinit(struct clk *clk)
-{
- INIT_LIST_HEAD(&clk->children);
-}
-
-int clk_register(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- /*
- * trap out already registered clocks
- */
- if (clk->node.next || clk->node.prev)
- return 0;
-
- mutex_lock(&clocks_mutex);
- if (clk->parent)
- list_add(&clk->sibling, &clk->parent->children);
- else
- list_add(&clk->sibling, &root_clks);
-
- list_add(&clk->node, &clocks);
- if (clk->init)
- clk->init(clk);
- mutex_unlock(&clocks_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(clk_register);
-
-void clk_unregister(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return;
-
- mutex_lock(&clocks_mutex);
- list_del(&clk->sibling);
- list_del(&clk->node);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unregister);
-
-void clk_enable_init_clocks(void)
-{
- struct clk *clkp;
-
- list_for_each_entry(clkp, &clocks, node)
- if (clkp->flags & ENABLE_ON_INIT)
- clk_enable(clkp);
-}
-
-/**
- * omap_clk_get_by_name - locate OMAP struct clk by its name
- * @name: name of the struct clk to locate
- *
- * Locate an OMAP struct clk by its name. Assumes that struct clk
- * names are unique. Returns NULL if not found or a pointer to the
- * struct clk if found.
- */
-struct clk *omap_clk_get_by_name(const char *name)
-{
- struct clk *c;
- struct clk *ret = NULL;
-
- mutex_lock(&clocks_mutex);
-
- list_for_each_entry(c, &clocks, node) {
- if (!strcmp(c->name, name)) {
- ret = c;
- break;
- }
- }
-
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-
-int omap_clk_enable_autoidle_all(void)
-{
- struct clk *c;
- unsigned long flags;
-
- spin_lock_irqsave(&clockfw_lock, flags);
-
- list_for_each_entry(c, &clocks, node)
- if (c->ops->allow_idle)
- c->ops->allow_idle(c);
-
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-
-int omap_clk_disable_autoidle_all(void)
-{
- struct clk *c;
- unsigned long flags;
-
- spin_lock_irqsave(&clockfw_lock, flags);
-
- list_for_each_entry(c, &clocks, node)
- if (c->ops->deny_idle)
- c->ops->deny_idle(c);
-
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-
-/*
- * Low level helpers
- */
-static int clkll_enable_null(struct clk *clk)
-{
- return 0;
-}
-
-static void clkll_disable_null(struct clk *clk)
-{
-}
-
-const struct clkops clkops_null = {
- .enable = clkll_enable_null,
- .disable = clkll_disable_null,
-};
-
-/*
- * Dummy clock
- *
- * Used for clock aliases that are needed on some OMAPs, but not others
- */
-struct clk dummy_ck = {
- .name = "dummy",
- .ops = &clkops_null,
-};
-
-/*
- *
- */
-
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-/*
- * Disable any unused clocks left on by the bootloader
- */
-static int __init clk_disable_unused(void)
-{
- struct clk *ck;
- unsigned long flags;
-
- pr_info("clock: disabling unused clocks to save power\n");
-
- spin_lock_irqsave(&clockfw_lock, flags);
- list_for_each_entry(ck, &clocks, node) {
- if (ck->ops == &clkops_null)
- continue;
-
- if (ck->usecount > 0 || !ck->enable_reg)
- continue;
-
- omap2_clk_disable_unused(ck);
- }
- spin_unlock_irqrestore(&clockfw_lock, flags);
-
- return 0;
-}
-late_initcall(clk_disable_unused);
-late_initcall(omap_clk_enable_autoidle_all);
-#endif
-
-#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
-/*
- * debugfs support to trace clock tree hierarchy and attributes
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static struct dentry *clk_debugfs_root;
-
-static int clk_dbg_show_summary(struct seq_file *s, void *unused)
-{
- struct clk *c;
- struct clk *pa;
-
- mutex_lock(&clocks_mutex);
- seq_printf(s, "%-30s %-30s %-10s %s\n",
- "clock-name", "parent-name", "rate", "use-count");
-
- list_for_each_entry(c, &clocks, node) {
- pa = c->parent;
- seq_printf(s, "%-30s %-30s %-10lu %d\n",
- c->name, pa ? pa->name : "none", c->rate,
- c->usecount);
- }
- mutex_unlock(&clocks_mutex);
-
- return 0;
-}
-
-static int clk_dbg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clk_dbg_show_summary, inode->i_private);
-}
-
-static const struct file_operations debug_clock_fops = {
- .open = clk_dbg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int clk_debugfs_register_one(struct clk *c)
-{
- int err;
- struct dentry *d;
- struct clk *pa = c->parent;
-
- d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
- if (!d)
- return -ENOMEM;
- c->dent = d;
-
- d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- return 0;
-
-err_out:
- debugfs_remove_recursive(c->dent);
- return err;
-}
-
-static int clk_debugfs_register(struct clk *c)
-{
- int err;
- struct clk *pa = c->parent;
-
- if (pa && !pa->dent) {
- err = clk_debugfs_register(pa);
- if (err)
- return err;
- }
-
- if (!c->dent) {
- err = clk_debugfs_register_one(c);
- if (err)
- return err;
- }
- return 0;
-}
-
-static int __init clk_debugfs_init(void)
-{
- struct clk *c;
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("clock", NULL);
- if (!d)
- return -ENOMEM;
- clk_debugfs_root = d;
-
- list_for_each_entry(c, &clocks, node) {
- err = clk_debugfs_register(c);
- if (err)
- goto err_out;
- }
-
- d = debugfs_create_file("summary", S_IRUGO,
- d, NULL, &debug_clock_fops);
- if (!d)
- return -ENOMEM;
-
- return 0;
-err_out:
- debugfs_remove_recursive(clk_debugfs_root);
- return err;
-}
-late_initcall(clk_debugfs_init);
-
-#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
-
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index ff9789bc0fd..b40204837bd 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
struct omap_clk {
u16 cpu;
@@ -52,43 +53,84 @@ struct omap_clk {
#define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS)
#define CK_3XXX (CK_34XX | CK_AM35XX | CK_36XX)
-struct module;
-struct clk;
struct clockdomain;
-
-/* Temporary, needed during the common clock framework conversion */
-#define __clk_get_name(clk) (clk->name)
-#define __clk_get_parent(clk) (clk->parent)
-#define __clk_get_rate(clk) (clk->rate)
-
-/**
- * struct clkops - some clock function pointers
- * @enable: fn ptr that enables the current clock in hardware
- * @disable: fn ptr that enables the current clock in hardware
- * @find_idlest: function returning the IDLEST register for the clock's IP blk
- * @find_companion: function returning the "companion" clk reg for the clock
- * @allow_idle: fn ptr that enables autoidle for the current clock in hardware
- * @deny_idle: fn ptr that disables autoidle for the current clock in hardware
- *
- * A "companion" clk is an accompanying clock to the one being queried
- * that must be enabled for the IP module connected to the clock to
- * become accessible by the hardware. Neither @find_idlest nor
- * @find_companion should be needed; that information is IP
- * block-specific; the hwmod code has been created to handle this, but
- * until hwmod data is ready and drivers have been converted to use PM
- * runtime calls in place of clk_enable()/clk_disable(), @find_idlest and
- * @find_companion must, unfortunately, remain.
- */
-struct clkops {
- int (*enable)(struct clk *);
- void (*disable)(struct clk *);
- void (*find_idlest)(struct clk *, void __iomem **,
- u8 *, u8 *);
- void (*find_companion)(struct clk *, void __iomem **,
- u8 *);
- void (*allow_idle)(struct clk *);
- void (*deny_idle)(struct clk *);
-};
+#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
+
+#define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name) \
+ static struct clk _name = { \
+ .name = #_name, \
+ .hw = &_name##_hw.hw, \
+ .parent_names = _parent_array_name, \
+ .num_parents = ARRAY_SIZE(_parent_array_name), \
+ .ops = &_clkops_name, \
+ };
+
+#define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \
+ static struct clk_hw_omap _name##_hw = { \
+ .hw = { \
+ .clk = &_name, \
+ }, \
+ .clkdm_name = _clkdm_name, \
+ };
+
+#define DEFINE_CLK_OMAP_MUX(_name, _clkdm_name, _clksel, \
+ _clksel_reg, _clksel_mask, \
+ _parent_names, _ops) \
+ static struct clk _name; \
+ static struct clk_hw_omap _name##_hw = { \
+ .hw = { \
+ .clk = &_name, \
+ }, \
+ .clksel = _clksel, \
+ .clksel_reg = _clksel_reg, \
+ .clksel_mask = _clksel_mask, \
+ .clkdm_name = _clkdm_name, \
+ }; \
+ DEFINE_STRUCT_CLK(_name, _parent_names, _ops);
+
+#define DEFINE_CLK_OMAP_MUX_GATE(_name, _clkdm_name, _clksel, \
+ _clksel_reg, _clksel_mask, \
+ _enable_reg, _enable_bit, \
+ _hwops, _parent_names, _ops) \
+ static struct clk _name; \
+ static struct clk_hw_omap _name##_hw = { \
+ .hw = { \
+ .clk = &_name, \
+ }, \
+ .ops = _hwops, \
+ .enable_reg = _enable_reg, \
+ .enable_bit = _enable_bit, \
+ .clksel = _clksel, \
+ .clksel_reg = _clksel_reg, \
+ .clksel_mask = _clksel_mask, \
+ .clkdm_name = _clkdm_name, \
+ }; \
+ DEFINE_STRUCT_CLK(_name, _parent_names, _ops);
+
+#define DEFINE_CLK_OMAP_HSDIVIDER(_name, _parent_name, \
+ _parent_ptr, _flags, \
+ _clksel_reg, _clksel_mask) \
+ static const struct clksel _name##_div[] = { \
+ { \
+ .parent = _parent_ptr, \
+ .rates = div31_1to31_rates \
+ }, \
+ { .parent = NULL }, \
+ }; \
+ static struct clk _name; \
+ static const char *_name##_parent_names[] = { \
+ _parent_name, \
+ }; \
+ static struct clk_hw_omap _name##_hw = { \
+ .hw = { \
+ .clk = &_name, \
+ }, \
+ .clksel = _name##_div, \
+ .clksel_reg = _clksel_reg, \
+ .clksel_mask = _clksel_mask, \
+ .ops = &clkhwops_omap4_dpllmx, \
+ }; \
+ DEFINE_STRUCT_CLK(_name, _name##_parent_names, omap_hsdivider_ops);
/* struct clksel_rate.flags possibilities */
#define RATE_IN_242X (1 << 0)
@@ -153,6 +195,10 @@ struct clksel {
* @enable_mask: mask of the DPLL mode bitfield in @control_reg
* @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
* @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_m4xen: cache of the last M4X result of
+ * omap4_dpll_regm4xen_round_rate()
+ * @last_rounded_lpmode: cache of the last lpmode result of
+ * omap4_dpll_lpmode_recalc()
* @max_multiplier: maximum valid non-bypass multiplier value (actual)
* @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
* @min_divider: minimum valid non-bypass divider value (actual)
@@ -163,6 +209,8 @@ struct clksel {
* @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg
* @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg
* @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg
+ * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg
+ * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg
* @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
* @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
* @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
@@ -191,6 +239,8 @@ struct dpll_data {
u32 enable_mask;
unsigned long last_rounded_rate;
u16 last_rounded_m;
+ u8 last_rounded_m4xen;
+ u8 last_rounded_lpmode;
u16 max_multiplier;
u8 last_rounded_n;
u8 min_divider;
@@ -203,6 +253,8 @@ struct dpll_data {
u32 idlest_mask;
u32 dco_mask;
u32 sddiv_mask;
+ u32 lpmode_mask;
+ u32 m4xen_mask;
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
@@ -229,22 +281,10 @@ struct dpll_data {
#define CLOCK_CLKOUTX2 (1 << 5)
/**
- * struct clk - OMAP struct clk
+ * struct clk_hw_omap - OMAP struct clk
* @node: list_head connecting this clock into the full clock list
- * @ops: struct clkops * for this clock
- * @name: the name of the clock in the hardware (used in hwmod data and debug)
- * @parent: pointer to this clock's parent struct clk
- * @children: list_head connecting to the child clks' @sibling list_heads
- * @sibling: list_head connecting this clk to its parent clk's @children
- * @rate: current clock rate
* @enable_reg: register to write to enable the clock (see @enable_bit)
- * @recalc: fn ptr that returns the clock's current rate
- * @set_rate: fn ptr that can change the clock's current rate
- * @round_rate: fn ptr that can round the clock's current rate
- * @init: fn ptr to do clock-specific initialization
* @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg)
- * @usecount: number of users that have requested this clock to be enabled
- * @fixed_div: when > 0, this clock's rate is its parent's rate / @fixed_div
* @flags: see "struct clk.flags possibilities" above
* @clksel_reg: for clksel clks, register va containing src/divisor select
* @clksel_mask: bitmask in @clksel_reg for the src/divisor selector
@@ -258,39 +298,17 @@ struct dpll_data {
* XXX @rate_offset, @src_offset should probably be removed and OMAP1
* clock code converted to use clksel.
*
- * XXX @usecount is poorly named. It should be "enable_count" or
- * something similar. "users" in the description refers to kernel
- * code (core code or drivers) that have called clk_enable() and not
- * yet called clk_disable(); the usecount of parent clocks is also
- * incremented by the clock code when clk_enable() is called on child
- * clocks and decremented by the clock code when clk_disable() is
- * called on child clocks.
- *
- * XXX @clkdm, @usecount, @children, @sibling should be marked for
- * internal use only.
- *
- * @children and @sibling are used to optimize parent-to-child clock
- * tree traversals. (child-to-parent traversals use @parent.)
- *
- * XXX The notion of the clock's current rate probably needs to be
- * separated from the clock's target rate.
*/
-struct clk {
+
+struct clk_hw_omap_ops;
+
+struct clk_hw_omap {
+ struct clk_hw hw;
struct list_head node;
- const struct clkops *ops;
- const char *name;
- struct clk *parent;
- struct list_head children;
- struct list_head sibling; /* node for children */
- unsigned long rate;
+ unsigned long fixed_rate;
+ u8 fixed_div;
void __iomem *enable_reg;
- unsigned long (*recalc)(struct clk *);
- int (*set_rate)(struct clk *, unsigned long);
- long (*round_rate)(struct clk *, unsigned long);
- void (*init)(struct clk *);
u8 enable_bit;
- s8 usecount;
- u8 fixed_div;
u8 flags;
void __iomem *clksel_reg;
u32 clksel_mask;
@@ -298,42 +316,22 @@ struct clk {
struct dpll_data *dpll_data;
const char *clkdm_name;
struct clockdomain *clkdm;
-#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
- struct dentry *dent; /* For visible tree hierarchy */
-#endif
+ const struct clk_hw_omap_ops *ops;
};
-struct clk_functions {
- int (*clk_enable)(struct clk *clk);
- void (*clk_disable)(struct clk *clk);
- long (*clk_round_rate)(struct clk *clk, unsigned long rate);
- int (*clk_set_rate)(struct clk *clk, unsigned long rate);
- int (*clk_set_parent)(struct clk *clk, struct clk *parent);
- void (*clk_allow_idle)(struct clk *clk);
- void (*clk_deny_idle)(struct clk *clk);
- void (*clk_disable_unused)(struct clk *clk);
+struct clk_hw_omap_ops {
+ void (*find_idlest)(struct clk_hw_omap *oclk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit, u8 *idlest_val);
+ void (*find_companion)(struct clk_hw_omap *oclk,
+ void __iomem **other_reg,
+ u8 *other_bit);
+ void (*allow_idle)(struct clk_hw_omap *oclk);
+ void (*deny_idle)(struct clk_hw_omap *oclk);
};
-extern int mpurate;
-
-extern int clk_init(struct clk_functions *custom_clocks);
-extern void clk_preinit(struct clk *clk);
-extern int clk_register(struct clk *clk);
-extern void clk_reparent(struct clk *child, struct clk *parent);
-extern void clk_unregister(struct clk *clk);
-extern void propagate_rate(struct clk *clk);
-extern void recalculate_root_clocks(void);
-extern unsigned long followparent_recalc(struct clk *clk);
-extern void clk_enable_init_clocks(void);
-unsigned long omap_fixed_divisor_recalc(struct clk *clk);
-extern struct clk *omap_clk_get_by_name(const char *name);
-extern int omap_clk_enable_autoidle_all(void);
-extern int omap_clk_disable_autoidle_all(void);
-
-extern const struct clkops clkops_null;
-
-extern struct clk dummy_ck;
-
+unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
/* CM_CLKSEL2_PLL.CORE_CLK_SRC bits (2XXX) */
#define CORE_CLK_SRC_32K 0x0
@@ -364,57 +362,62 @@ extern struct clk dummy_ck;
/* DPLL Type and DCO Selection Flags */
#define DPLL_J_TYPE 0x1
-int omap2_clk_enable(struct clk *clk);
-void omap2_clk_disable(struct clk *clk);
-long omap2_clk_round_rate(struct clk *clk, unsigned long rate);
-int omap2_clk_set_rate(struct clk *clk, unsigned long rate);
-int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent);
-long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate);
-unsigned long omap3_dpll_recalc(struct clk *clk);
-unsigned long omap3_clkoutx2_recalc(struct clk *clk);
-void omap3_dpll_allow_idle(struct clk *clk);
-void omap3_dpll_deny_idle(struct clk *clk);
-u32 omap3_dpll_autoidle_read(struct clk *clk);
-int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate);
-int omap3_noncore_dpll_enable(struct clk *clk);
-void omap3_noncore_dpll_disable(struct clk *clk);
-int omap4_dpllmx_gatectrl_read(struct clk *clk);
-void omap4_dpllmx_allow_gatectrl(struct clk *clk);
-void omap4_dpllmx_deny_gatectrl(struct clk *clk);
-long omap4_dpll_regm4xen_round_rate(struct clk *clk, unsigned long target_rate);
-unsigned long omap4_dpll_regm4xen_recalc(struct clk *clk);
-
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-void omap2_clk_disable_unused(struct clk *clk);
-#else
-#define omap2_clk_disable_unused NULL
-#endif
-
-void omap2_init_clk_clkdm(struct clk *clk);
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate);
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
+int omap3_noncore_dpll_enable(struct clk_hw *hw);
+void omap3_noncore_dpll_disable(struct clk_hw *hw);
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
+void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
+void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk);
+void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk);
+void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk);
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+ unsigned long target_rate,
+ unsigned long *parent_rate);
+
+void omap2_init_clk_clkdm(struct clk_hw *clk);
void __init omap2_clk_disable_clkdm_control(void);
/* clkt_clksel.c public functions */
-u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
+u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk,
+ unsigned long target_rate,
u32 *new_div);
-void omap2_init_clksel_parent(struct clk *clk);
-unsigned long omap2_clksel_recalc(struct clk *clk);
-long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate);
-int omap2_clksel_set_rate(struct clk *clk, unsigned long rate);
-int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent);
+u8 omap2_clksel_find_parent_index(struct clk_hw *hw);
+unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate);
+long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate);
+int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val);
/* clkt_iclk.c public functions */
-extern void omap2_clkt_iclk_allow_idle(struct clk *clk);
-extern void omap2_clkt_iclk_deny_idle(struct clk *clk);
+extern void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk);
+extern void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk);
-u32 omap2_get_dpll_rate(struct clk *clk);
-void omap2_init_dpll_parent(struct clk *clk);
+u8 omap2_init_dpll_parent(struct clk_hw *hw);
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
-int omap2_dflt_clk_enable(struct clk *clk);
-void omap2_dflt_clk_disable(struct clk *clk);
-void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
+int omap2_dflt_clk_enable(struct clk_hw *hw);
+void omap2_dflt_clk_disable(struct clk_hw *hw);
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+ void __iomem **other_reg,
u8 *other_bit);
-void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+ void __iomem **idlest_reg,
u8 *idlest_bit, u8 *idlest_val);
+void omap2_init_clk_hw_omap_clocks(struct clk *clk);
+int omap2_clk_enable_autoidle_all(void);
+int omap2_clk_disable_autoidle_all(void);
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
int omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name);
void omap2_clk_print_new_rates(const char *hfclkin_ck_name,
const char *core_ck_name,
@@ -432,28 +435,38 @@ extern const struct clksel_rate gpt_32k_rates[];
extern const struct clksel_rate gpt_sys_rates[];
extern const struct clksel_rate gfx_l3_rates[];
extern const struct clksel_rate dsp_ick_rates[];
+extern struct clk dummy_ck;
-extern const struct clkops clkops_omap2_iclk_dflt_wait;
-extern const struct clkops clkops_omap2_iclk_dflt;
-extern const struct clkops clkops_omap2_iclk_idle_only;
-extern const struct clkops clkops_omap2_mdmclk_dflt_wait;
-extern const struct clkops clkops_omap2xxx_dpll_ops;
-extern const struct clkops clkops_omap3_noncore_dpll_ops;
-extern const struct clkops clkops_omap3_core_dpll_ops;
-extern const struct clkops clkops_omap4_dpllmx_ops;
+extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
+extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
+extern const struct clk_hw_omap_ops clkhwops_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
+extern const struct clk_hw_omap_ops clkhwops_iclk;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
+extern const struct clk_hw_omap_ops clkhwops_apll54;
+extern const struct clk_hw_omap_ops clkhwops_apll96;
+extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
+extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
/* clksel_rate blocks shared between OMAP44xx and AM33xx */
extern const struct clksel_rate div_1_0_rates[];
+extern const struct clksel_rate div3_1to4_rates[];
extern const struct clksel_rate div_1_1_rates[];
extern const struct clksel_rate div_1_2_rates[];
extern const struct clksel_rate div_1_3_rates[];
extern const struct clksel_rate div_1_4_rates[];
extern const struct clksel_rate div31_1to31_rates[];
-/* clocks shared between various OMAP SoCs */
-extern struct clk virt_19200000_ck;
-extern struct clk virt_26000000_ck;
-
extern int am33xx_clk_init(void);
+extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
+extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
+
#endif
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
deleted file mode 100644
index 608874b651e..00000000000
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ /dev/null
@@ -1,1972 +0,0 @@
-/*
- * OMAP2420 clock data
- *
- * Copyright (C) 2005-2009, 2012 Texas Instruments, Inc.
- * Copyright (C) 2004-2011 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/list.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock2xxx.h"
-#include "opp2xxx.h"
-#include "cm2xxx.h"
-#include "prm2xxx_3xxx.h"
-#include "prm-regbits-24xx.h"
-#include "cm-regbits-24xx.h"
-#include "sdrc.h"
-#include "control.h"
-
-#define OMAP_CM_REGADDR OMAP2420_CM_REGADDR
-
-/*
- * 2420 clock tree.
- *
- * NOTE:In many cases here we are assigning a 'default' parent. In
- * many cases the parent is selectable. The set parent calls will
- * also switch sources.
- *
- * Several sources are given initial rates which may be wrong, this will
- * be fixed up in the init func.
- *
- * Things are broadly separated below by clock domains. It is
- * noteworthy that most peripherals have dependencies on multiple clock
- * domains. Many get their interface clocks from the L4 domain, but get
- * functional clocks from fixed sources or other core domain derived
- * clocks.
- */
-
-/* Base external input clocks */
-static struct clk func_32k_ck = {
- .name = "func_32k_ck",
- .ops = &clkops_null,
- .rate = 32768,
- .clkdm_name = "wkup_clkdm",
-};
-
-static struct clk secure_32k_ck = {
- .name = "secure_32k_ck",
- .ops = &clkops_null,
- .rate = 32768,
- .clkdm_name = "wkup_clkdm",
-};
-
-/* Typical 12/13MHz in standalone mode, will be 26Mhz in chassis mode */
-static struct clk osc_ck = { /* (*12, *13, 19.2, *26, 38.4)MHz */
- .name = "osc_ck",
- .ops = &clkops_oscck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2_osc_clk_recalc,
-};
-
-/* Without modem likely 12MHz, with modem likely 13MHz */
-static struct clk sys_ck = { /* (*12, *13, 19.2, 26, 38.4)MHz */
- .name = "sys_ck", /* ~ ref_clk also */
- .ops = &clkops_null,
- .parent = &osc_ck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2xxx_sys_clk_recalc,
-};
-
-static struct clk alt_ck = { /* Typical 54M or 48M, may not exist */
- .name = "alt_ck",
- .ops = &clkops_null,
- .rate = 54000000,
- .clkdm_name = "wkup_clkdm",
-};
-
-/* Optional external clock input for McBSP CLKS */
-static struct clk mcbsp_clks = {
- .name = "mcbsp_clks",
- .ops = &clkops_null,
-};
-
-/*
- * Analog domain root source clocks
- */
-
-/* dpll_ck, is broken out in to special cases through clksel */
-/* REVISIT: Rate changes on dpll_ck trigger a full set change. ...
- * deal with this
- */
-
-static struct dpll_data dpll_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .mult_mask = OMAP24XX_DPLL_MULT_MASK,
- .div1_mask = OMAP24XX_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_mask = OMAP24XX_EN_DPLL_MASK,
- .max_multiplier = 1023,
- .min_divider = 1,
- .max_divider = 16,
-};
-
-/*
- * XXX Cannot add round_rate here yet, as this is still a composite clock,
- * not just a DPLL
- */
-static struct clk dpll_ck = {
- .name = "dpll_ck",
- .ops = &clkops_omap2xxx_dpll_ops,
- .parent = &sys_ck, /* Can be func_32k also */
- .init = &omap2xxx_clkt_dpllcore_init,
- .dpll_data = &dpll_dd,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2_dpllcore_recalc,
- .set_rate = &omap2_reprogram_dpllcore,
-};
-
-static struct clk apll96_ck = {
- .name = "apll96_ck",
- .ops = &clkops_apll96,
- .parent = &sys_ck,
- .rate = 96000000,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP24XX_EN_96M_PLL_SHIFT,
-};
-
-static struct clk apll54_ck = {
- .name = "apll54_ck",
- .ops = &clkops_apll54,
- .parent = &sys_ck,
- .rate = 54000000,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP24XX_EN_54M_PLL_SHIFT,
-};
-
-/*
- * PRCM digital base sources
- */
-
-/* func_54m_ck */
-
-static const struct clksel_rate func_54m_apll54_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel_rate func_54m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel func_54m_clksel[] = {
- { .parent = &apll54_ck, .rates = func_54m_apll54_rates, },
- { .parent = &alt_ck, .rates = func_54m_alt_rates, },
- { .parent = NULL },
-};
-
-static struct clk func_54m_ck = {
- .name = "func_54m_ck",
- .ops = &clkops_null,
- .parent = &apll54_ck, /* can also be alt_clk */
- .clkdm_name = "wkup_clkdm",
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_54M_SOURCE_MASK,
- .clksel = func_54m_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk core_ck = {
- .name = "core_ck",
- .ops = &clkops_null,
- .parent = &dpll_ck, /* can also be 32k */
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk func_96m_ck = {
- .name = "func_96m_ck",
- .ops = &clkops_null,
- .parent = &apll96_ck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* func_48m_ck */
-
-static const struct clksel_rate func_48m_apll96_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel_rate func_48m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel func_48m_clksel[] = {
- { .parent = &apll96_ck, .rates = func_48m_apll96_rates },
- { .parent = &alt_ck, .rates = func_48m_alt_rates },
- { .parent = NULL }
-};
-
-static struct clk func_48m_ck = {
- .name = "func_48m_ck",
- .ops = &clkops_null,
- .parent = &apll96_ck, /* 96M or Alt */
- .clkdm_name = "wkup_clkdm",
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_48M_SOURCE_MASK,
- .clksel = func_48m_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk func_12m_ck = {
- .name = "func_12m_ck",
- .ops = &clkops_null,
- .parent = &func_48m_ck,
- .fixed_div = 4,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-/* Secure timer, only available in secure mode */
-static struct clk wdt1_osc_ck = {
- .name = "ck_wdt1_osc",
- .ops = &clkops_null, /* RMK: missing? */
- .parent = &osc_ck,
- .recalc = &followparent_recalc,
-};
-
-/*
- * The common_clkout* clksel_rate structs are common to
- * sys_clkout, sys_clkout_src, sys_clkout2, and sys_clkout2_src.
- * sys_clkout2_* are 2420-only, so the
- * clksel_rate flags fields are inaccurate for those clocks. This is
- * harmless since access to those clocks are gated by the struct clk
- * flags fields, which mark them as 2420-only.
- */
-static const struct clksel_rate common_clkout_src_core_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_clkout_src_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_clkout_src_96m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_clkout_src_54m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel common_clkout_src_clksel[] = {
- { .parent = &core_ck, .rates = common_clkout_src_core_rates },
- { .parent = &sys_ck, .rates = common_clkout_src_sys_rates },
- { .parent = &func_96m_ck, .rates = common_clkout_src_96m_rates },
- { .parent = &func_54m_ck, .rates = common_clkout_src_54m_rates },
- { .parent = NULL }
-};
-
-static struct clk sys_clkout_src = {
- .name = "sys_clkout_src",
- .ops = &clkops_omap2_dflt,
- .parent = &func_54m_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP2420_PRCM_CLKOUT_CTRL,
- .enable_bit = OMAP24XX_CLKOUT_EN_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
- .clksel_mask = OMAP24XX_CLKOUT_SOURCE_MASK,
- .clksel = common_clkout_src_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static const struct clksel_rate common_clkout_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 2, .val = 1, .flags = RATE_IN_24XX },
- { .div = 4, .val = 2, .flags = RATE_IN_24XX },
- { .div = 8, .val = 3, .flags = RATE_IN_24XX },
- { .div = 16, .val = 4, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel sys_clkout_clksel[] = {
- { .parent = &sys_clkout_src, .rates = common_clkout_rates },
- { .parent = NULL }
-};
-
-static struct clk sys_clkout = {
- .name = "sys_clkout",
- .ops = &clkops_null,
- .parent = &sys_clkout_src,
- .clkdm_name = "wkup_clkdm",
- .clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
- .clksel_mask = OMAP24XX_CLKOUT_DIV_MASK,
- .clksel = sys_clkout_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-/* In 2430, new in 2420 ES2 */
-static struct clk sys_clkout2_src = {
- .name = "sys_clkout2_src",
- .ops = &clkops_omap2_dflt,
- .parent = &func_54m_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP2420_PRCM_CLKOUT_CTRL,
- .enable_bit = OMAP2420_CLKOUT2_EN_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
- .clksel_mask = OMAP2420_CLKOUT2_SOURCE_MASK,
- .clksel = common_clkout_src_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static const struct clksel sys_clkout2_clksel[] = {
- { .parent = &sys_clkout2_src, .rates = common_clkout_rates },
- { .parent = NULL }
-};
-
-/* In 2430, new in 2420 ES2 */
-static struct clk sys_clkout2 = {
- .name = "sys_clkout2",
- .ops = &clkops_null,
- .parent = &sys_clkout2_src,
- .clkdm_name = "wkup_clkdm",
- .clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
- .clksel_mask = OMAP2420_CLKOUT2_DIV_MASK,
- .clksel = sys_clkout2_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk emul_ck = {
- .name = "emul_ck",
- .ops = &clkops_omap2_dflt,
- .parent = &func_54m_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP2420_PRCM_CLKEMUL_CTRL,
- .enable_bit = OMAP24XX_EMULATION_EN_SHIFT,
- .recalc = &followparent_recalc,
-
-};
-
-/*
- * MPU clock domain
- * Clocks:
- * MPU_FCLK, MPU_ICLK
- * INT_M_FCLK, INT_M_I_CLK
- *
- * - Individual clocks are hardware managed.
- * - Base divider comes from: CM_CLKSEL_MPU
- *
- */
-static const struct clksel_rate mpu_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_242X },
- { .div = 6, .val = 6, .flags = RATE_IN_242X },
- { .div = 8, .val = 8, .flags = RATE_IN_242X },
- { .div = 0 },
-};
-
-static const struct clksel mpu_clksel[] = {
- { .parent = &core_ck, .rates = mpu_core_rates },
- { .parent = NULL }
-};
-
-static struct clk mpu_ck = { /* Control cpu */
- .name = "mpu_ck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .clkdm_name = "mpu_clkdm",
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, CM_CLKSEL),
- .clksel_mask = OMAP24XX_CLKSEL_MPU_MASK,
- .clksel = mpu_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * DSP (2420-UMA+IVA1) clock domain
- * Clocks:
- * 2420: UMA_FCLK, UMA_ICLK, IVA_MPU, IVA_COP
- *
- * Won't be too specific here. The core clock comes into this block
- * it is divided then tee'ed. One branch goes directly to xyz enable
- * controls. The other branch gets further divided by 2 then possibly
- * routed into a synchronizer and out of clocks abc.
- */
-static const struct clksel_rate dsp_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 3, .val = 3, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 6, .val = 6, .flags = RATE_IN_242X },
- { .div = 8, .val = 8, .flags = RATE_IN_242X },
- { .div = 12, .val = 12, .flags = RATE_IN_242X },
- { .div = 0 },
-};
-
-static const struct clksel dsp_fck_clksel[] = {
- { .parent = &core_ck, .rates = dsp_fck_core_rates },
- { .parent = NULL }
-};
-
-static struct clk dsp_fck = {
- .name = "dsp_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_ck,
- .clkdm_name = "dsp_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP24XX_CLKSEL_DSP_MASK,
- .clksel = dsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel dsp_ick_clksel[] = {
- { .parent = &dsp_fck, .rates = dsp_ick_rates },
- { .parent = NULL }
-};
-
-static struct clk dsp_ick = {
- .name = "dsp_ick", /* apparently ipi and isp */
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &dsp_fck,
- .clkdm_name = "dsp_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_ICLKEN),
- .enable_bit = OMAP2420_EN_DSP_IPI_SHIFT, /* for ipi */
- .clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP24XX_CLKSEL_DSP_IF_MASK,
- .clksel = dsp_ick_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * The IVA1 is an ARM7 core on the 2420 that has nothing to do with
- * the C54x, but which is contained in the DSP powerdomain. Does not
- * exist on later OMAPs.
- */
-static struct clk iva1_ifck = {
- .name = "iva1_ifck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_ck,
- .clkdm_name = "iva1_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
- .enable_bit = OMAP2420_EN_IVA_COP_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP2420_CLKSEL_IVA_MASK,
- .clksel = dsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* IVA1 mpu/int/i/f clocks are /2 of parent */
-static struct clk iva1_mpu_int_ifck = {
- .name = "iva1_mpu_int_ifck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &iva1_ifck,
- .clkdm_name = "iva1_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
- .enable_bit = OMAP2420_EN_IVA_MPU_SHIFT,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-/*
- * L3 clock domain
- * L3 clocks are used for both interface and functional clocks to
- * multiple entities. Some of these clocks are completely managed
- * by hardware, and some others allow software control. Hardware
- * managed ones general are based on directly CLK_REQ signals and
- * various auto idle settings. The functional spec sets many of these
- * as 'tie-high' for their enables.
- *
- * I-CLOCKS:
- * L3-Interconnect, SMS, GPMC, SDRC, OCM_RAM, OCM_ROM, SDMA
- * CAM, HS-USB.
- * F-CLOCK
- * SSI.
- *
- * GPMC memories and SDRC have timing and clock sensitive registers which
- * may very well need notification when the clock changes. Currently for low
- * operating points, these are taken care of in sleep.S.
- */
-static const struct clksel_rate core_l3_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_242X },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 6, .val = 6, .flags = RATE_IN_24XX },
- { .div = 8, .val = 8, .flags = RATE_IN_242X },
- { .div = 12, .val = 12, .flags = RATE_IN_242X },
- { .div = 16, .val = 16, .flags = RATE_IN_242X },
- { .div = 0 }
-};
-
-static const struct clksel core_l3_clksel[] = {
- { .parent = &core_ck, .rates = core_l3_core_rates },
- { .parent = NULL }
-};
-
-static struct clk core_l3_ck = { /* Used for ick and fck, interconnect */
- .name = "core_l3_ck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .clkdm_name = "core_l3_clkdm",
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_L3_MASK,
- .clksel = core_l3_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* usb_l4_ick */
-static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel usb_l4_ick_clksel[] = {
- { .parent = &core_l3_ck, .rates = usb_l4_ick_core_l3_rates },
- { .parent = NULL },
-};
-
-/* It is unclear from TRM whether usb_l4_ick is really in L3 or L4 clkdm */
-static struct clk usb_l4_ick = { /* FS-USB interface clock */
- .name = "usb_l4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP24XX_EN_USB_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_USB_MASK,
- .clksel = usb_l4_ick_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * L4 clock management domain
- *
- * This domain contains lots of interface clocks from the L4 interface, some
- * functional clocks. Fixed APLL functional source clocks are managed in
- * this domain.
- */
-static const struct clksel_rate l4_core_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel l4_clksel[] = {
- { .parent = &core_l3_ck, .rates = l4_core_l3_rates },
- { .parent = NULL }
-};
-
-static struct clk l4_ck = { /* used both as an ick and fck */
- .name = "l4_ck",
- .ops = &clkops_null,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l4_clkdm",
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_L4_MASK,
- .clksel = l4_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * SSI is in L3 management domain, its direct parent is core not l3,
- * many core power domain entities are grouped into the L3 clock
- * domain.
- * SSI_SSR_FCLK, SSI_SST_FCLK, SSI_L4_ICLK
- *
- * ssr = core/1/2/3/4/5, sst = 1/2 ssr.
- */
-static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 3, .val = 3, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 6, .val = 6, .flags = RATE_IN_242X },
- { .div = 8, .val = 8, .flags = RATE_IN_242X },
- { .div = 0 }
-};
-
-static const struct clksel ssi_ssr_sst_fck_clksel[] = {
- { .parent = &core_ck, .rates = ssi_ssr_sst_fck_core_rates },
- { .parent = NULL }
-};
-
-static struct clk ssi_ssr_sst_fck = {
- .name = "ssi_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP24XX_EN_SSI_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_SSI_MASK,
- .clksel = ssi_ssr_sst_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * Presumably this is the same as SSI_ICLK.
- * TRM contradicts itself on what clockdomain SSI_ICLK is in
- */
-static struct clk ssi_l4_ick = {
- .name = "ssi_l4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP24XX_EN_SSI_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-
-/*
- * GFX clock domain
- * Clocks:
- * GFX_FCLK, GFX_ICLK
- * GFX_CG1(2d), GFX_CG2(3d)
- *
- * GFX_FCLK runs from L3, and is divided by (1,2,3,4)
- * The 2d and 3d clocks run at a hardware determined
- * divided value of fclk.
- *
- */
-
-/* This clksel struct is shared between gfx_3d_fck and gfx_2d_fck */
-static const struct clksel gfx_fck_clksel[] = {
- { .parent = &core_l3_ck, .rates = gfx_l3_rates },
- { .parent = NULL },
-};
-
-static struct clk gfx_3d_fck = {
- .name = "gfx_3d_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "gfx_clkdm",
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_3D_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP_CLKSEL_GFX_MASK,
- .clksel = gfx_fck_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk gfx_2d_fck = {
- .name = "gfx_2d_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "gfx_clkdm",
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_2D_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP_CLKSEL_GFX_MASK,
- .clksel = gfx_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* This interface clock does not have a CM_AUTOIDLE bit */
-static struct clk gfx_ick = {
- .name = "gfx_ick", /* From l3 */
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "gfx_clkdm",
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
- .enable_bit = OMAP_EN_GFX_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * DSS clock domain
- * CLOCKs:
- * DSS_L4_ICLK, DSS_L3_ICLK,
- * DSS_CLK1, DSS_CLK2, DSS_54MHz_CLK
- *
- * DSS is both initiator and target.
- */
-/* XXX Add RATE_NOT_VALIDATED */
-
-static const struct clksel_rate dss1_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate dss1_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 3, .val = 3, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 5, .val = 5, .flags = RATE_IN_24XX },
- { .div = 6, .val = 6, .flags = RATE_IN_24XX },
- { .div = 8, .val = 8, .flags = RATE_IN_24XX },
- { .div = 9, .val = 9, .flags = RATE_IN_24XX },
- { .div = 12, .val = 12, .flags = RATE_IN_24XX },
- { .div = 16, .val = 16, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel dss1_fck_clksel[] = {
- { .parent = &sys_ck, .rates = dss1_fck_sys_rates },
- { .parent = &core_ck, .rates = dss1_fck_core_rates },
- { .parent = NULL },
-};
-
-static struct clk dss_ick = { /* Enables both L3,L4 ICLK's */
- .name = "dss_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &l4_ck, /* really both l3 and l4 */
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_DSS1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss1_fck = {
- .name = "dss1_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &core_ck, /* Core or sys */
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_DSS1_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_DSS1_MASK,
- .clksel = dss1_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate dss2_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate dss2_fck_48m_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel dss2_fck_clksel[] = {
- { .parent = &sys_ck, .rates = dss2_fck_sys_rates },
- { .parent = &func_48m_ck, .rates = dss2_fck_48m_rates },
- { .parent = NULL }
-};
-
-static struct clk dss2_fck = { /* Alt clk used in power management */
- .name = "dss2_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &sys_ck, /* fixed at sys_ck or 48MHz */
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_DSS2_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_DSS2_MASK,
- .clksel = dss2_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk dss_54m_fck = { /* Alt clk used in power management */
- .name = "dss_54m_fck", /* 54m tv clk */
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_54m_ck,
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_TV_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wu_l4_ick = {
- .name = "wu_l4_ick",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/*
- * CORE power domain ICLK & FCLK defines.
- * Many of the these can have more than one possible parent. Entries
- * here will likely have an L4 interface parent, and may have multiple
- * functional clock parents.
- */
-static const struct clksel_rate gpt_alt_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel omap24xx_gpt_clksel[] = {
- { .parent = &func_32k_ck, .rates = gpt_32k_rates },
- { .parent = &sys_ck, .rates = gpt_sys_rates },
- { .parent = &alt_ck, .rates = gpt_alt_rates },
- { .parent = NULL },
-};
-
-static struct clk gpt1_ick = {
- .name = "gpt1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_GPT1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt1_fck = {
- .name = "gpt1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_GPT1_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_GPT1_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk gpt2_ick = {
- .name = "gpt2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt2_fck = {
- .name = "gpt2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT2_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT2_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt3_ick = {
- .name = "gpt3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt3_fck = {
- .name = "gpt3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT3_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT3_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt4_ick = {
- .name = "gpt4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt4_fck = {
- .name = "gpt4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT4_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT4_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt5_ick = {
- .name = "gpt5_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT5_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt5_fck = {
- .name = "gpt5_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT5_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT5_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt6_ick = {
- .name = "gpt6_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT6_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt6_fck = {
- .name = "gpt6_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT6_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT6_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt7_ick = {
- .name = "gpt7_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT7_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt7_fck = {
- .name = "gpt7_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT7_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT7_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt8_ick = {
- .name = "gpt8_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT8_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt8_fck = {
- .name = "gpt8_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT8_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT8_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt9_ick = {
- .name = "gpt9_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT9_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt9_fck = {
- .name = "gpt9_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT9_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT9_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt10_ick = {
- .name = "gpt10_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT10_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt10_fck = {
- .name = "gpt10_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT10_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT10_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt11_ick = {
- .name = "gpt11_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT11_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt11_fck = {
- .name = "gpt11_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT11_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT11_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt12_ick = {
- .name = "gpt12_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT12_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt12_fck = {
- .name = "gpt12_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &secure_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT12_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT12_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp1_ick = {
- .name = "mcbsp1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel mcbsp_fck_clksel[] = {
- { .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
- { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
- { .parent = NULL }
-};
-
-static struct clk mcbsp1_fck = {
- .name = "mcbsp1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .init = &omap2_init_clksel_parent,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
- .clksel_reg = OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
- .clksel = mcbsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp2_ick = {
- .name = "mcbsp2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp2_fck = {
- .name = "mcbsp2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .init = &omap2_init_clksel_parent,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
- .clksel_reg = OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
- .clksel = mcbsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcspi1_ick = {
- .name = "mcspi1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi1_fck = {
- .name = "mcspi1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_ick = {
- .name = "mcspi2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_fck = {
- .name = "mcspi2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_ick = {
- .name = "uart1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_UART1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_fck = {
- .name = "uart1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_UART1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_ick = {
- .name = "uart2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_UART2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_fck = {
- .name = "uart2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_UART2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_ick = {
- .name = "uart3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP24XX_EN_UART3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_fck = {
- .name = "uart3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP24XX_EN_UART3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpios_ick = {
- .name = "gpios_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpios_fck = {
- .name = "gpios_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mpu_wdt_ick = {
- .name = "mpu_wdt_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mpu_wdt_fck = {
- .name = "mpu_wdt_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sync_32k_ick = {
- .name = "sync_32k_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .flags = ENABLE_ON_INIT,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_32KSYNC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt1_ick = {
- .name = "wdt1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_WDT1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk omapctrl_ick = {
- .name = "omapctrl_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .flags = ENABLE_ON_INIT,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_OMAPCTRL_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk cam_ick = {
- .name = "cam_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_CAM_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * cam_fck controls both CAM_MCLK and CAM_FCLK. It should probably be
- * split into two separate clocks, since the parent clocks are different
- * and the clockdomains are also different.
- */
-static struct clk cam_fck = {
- .name = "cam_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_CAM_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mailboxes_ick = {
- .name = "mailboxes_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt4_ick = {
- .name = "wdt4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt4_fck = {
- .name = "wdt4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt3_ick = {
- .name = "wdt3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_WDT3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt3_fck = {
- .name = "wdt3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP2420_EN_WDT3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_ick = {
- .name = "mspro_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_fck = {
- .name = "mspro_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmc_ick = {
- .name = "mmc_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_MMC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmc_fck = {
- .name = "mmc_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP2420_EN_MMC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk fac_ick = {
- .name = "fac_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_FAC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk fac_fck = {
- .name = "fac_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_12m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_FAC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk eac_ick = {
- .name = "eac_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_EAC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk eac_fck = {
- .name = "eac_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP2420_EN_EAC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_ick = {
- .name = "hdq_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_fck = {
- .name = "hdq_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_12m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c2_ick = {
- .name = "i2c2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_I2C2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c2_fck = {
- .name = "i2c2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_12m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP2420_EN_I2C2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c1_ick = {
- .name = "i2c1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_I2C1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c1_fck = {
- .name = "i2c1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_12m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP2420_EN_I2C1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
- * accesses derived from this data.
- */
-static struct clk gpmc_fck = {
- .name = "gpmc_fck",
- .ops = &clkops_omap2_iclk_idle_only,
- .parent = &core_l3_ck,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP24XX_AUTO_GPMC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sdma_fck = {
- .name = "sdma_fck",
- .ops = &clkops_null, /* RMK: missing? */
- .parent = &core_l3_ck,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/*
- * The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
- * accesses derived from this data.
- */
-static struct clk sdma_ick = {
- .name = "sdma_ick",
- .ops = &clkops_omap2_iclk_idle_only,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP24XX_AUTO_SDMA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
- * accesses derived from this data.
- */
-static struct clk sdrc_ick = {
- .name = "sdrc_ick",
- .ops = &clkops_omap2_iclk_idle_only,
- .parent = &core_l3_ck,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP24XX_AUTO_SDRC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk vlynq_ick = {
- .name = "vlynq_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_VLYNQ_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel_rate vlynq_fck_96m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_242X },
- { .div = 0 }
-};
-
-static const struct clksel_rate vlynq_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_242X },
- { .div = 2, .val = 2, .flags = RATE_IN_242X },
- { .div = 3, .val = 3, .flags = RATE_IN_242X },
- { .div = 4, .val = 4, .flags = RATE_IN_242X },
- { .div = 6, .val = 6, .flags = RATE_IN_242X },
- { .div = 8, .val = 8, .flags = RATE_IN_242X },
- { .div = 9, .val = 9, .flags = RATE_IN_242X },
- { .div = 12, .val = 12, .flags = RATE_IN_242X },
- { .div = 16, .val = 16, .flags = RATE_IN_242X },
- { .div = 18, .val = 18, .flags = RATE_IN_242X },
- { .div = 0 }
-};
-
-static const struct clksel vlynq_fck_clksel[] = {
- { .parent = &func_96m_ck, .rates = vlynq_fck_96m_rates },
- { .parent = &core_ck, .rates = vlynq_fck_core_rates },
- { .parent = NULL }
-};
-
-static struct clk vlynq_fck = {
- .name = "vlynq_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP2420_EN_VLYNQ_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP2420_CLKSEL_VLYNQ_MASK,
- .clksel = vlynq_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk des_ick = {
- .name = "des_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_DES_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sha_ick = {
- .name = "sha_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_SHA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rng_ick = {
- .name = "rng_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_RNG_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk aes_ick = {
- .name = "aes_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_AES_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk pka_ick = {
- .name = "pka_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_PKA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_fck = {
- .name = "usb_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP24XX_EN_USB_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * This clock is a composite clock which does entire set changes then
- * forces a rebalance. It keys on the MPU speed, but it really could
- * be any key speed part of a set in the rate table.
- *
- * to really change a set, you need memory table sets which get changed
- * in sram, pre-notifiers & post notifiers, changing the top set, without
- * having low level display recalc's won't work... this is why dpm notifiers
- * work, isr's off, walk a list of clocks already _off_ and not messing with
- * the bus.
- *
- * This clock should have no parent. It embodies the entire upper level
- * active set. A parent will mess up some of the init also.
- */
-static struct clk virt_prcm_set = {
- .name = "virt_prcm_set",
- .ops = &clkops_null,
- .parent = &mpu_ck, /* Indexed by mpu speed, no parent */
- .recalc = &omap2_table_mpu_recalc, /* sets are keyed on mpu rate */
- .set_rate = &omap2_select_table_rate,
- .round_rate = &omap2_round_to_table_rate,
-};
-
-
-/*
- * clkdev integration
- */
-
-static struct omap_clk omap2420_clks[] = {
- /* external root sources */
- CLK(NULL, "func_32k_ck", &func_32k_ck, CK_242X),
- CLK(NULL, "secure_32k_ck", &secure_32k_ck, CK_242X),
- CLK(NULL, "osc_ck", &osc_ck, CK_242X),
- CLK(NULL, "sys_ck", &sys_ck, CK_242X),
- CLK(NULL, "alt_ck", &alt_ck, CK_242X),
- CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_242X),
- /* internal analog sources */
- CLK(NULL, "dpll_ck", &dpll_ck, CK_242X),
- CLK(NULL, "apll96_ck", &apll96_ck, CK_242X),
- CLK(NULL, "apll54_ck", &apll54_ck, CK_242X),
- /* internal prcm root sources */
- CLK(NULL, "func_54m_ck", &func_54m_ck, CK_242X),
- CLK(NULL, "core_ck", &core_ck, CK_242X),
- CLK(NULL, "func_96m_ck", &func_96m_ck, CK_242X),
- CLK(NULL, "func_48m_ck", &func_48m_ck, CK_242X),
- CLK(NULL, "func_12m_ck", &func_12m_ck, CK_242X),
- CLK(NULL, "ck_wdt1_osc", &wdt1_osc_ck, CK_242X),
- CLK(NULL, "sys_clkout_src", &sys_clkout_src, CK_242X),
- CLK(NULL, "sys_clkout", &sys_clkout, CK_242X),
- CLK(NULL, "sys_clkout2_src", &sys_clkout2_src, CK_242X),
- CLK(NULL, "sys_clkout2", &sys_clkout2, CK_242X),
- CLK(NULL, "emul_ck", &emul_ck, CK_242X),
- /* mpu domain clocks */
- CLK(NULL, "mpu_ck", &mpu_ck, CK_242X),
- /* dsp domain clocks */
- CLK(NULL, "dsp_fck", &dsp_fck, CK_242X),
- CLK(NULL, "dsp_ick", &dsp_ick, CK_242X),
- CLK(NULL, "iva1_ifck", &iva1_ifck, CK_242X),
- CLK(NULL, "iva1_mpu_int_ifck", &iva1_mpu_int_ifck, CK_242X),
- /* GFX domain clocks */
- CLK(NULL, "gfx_3d_fck", &gfx_3d_fck, CK_242X),
- CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_242X),
- CLK(NULL, "gfx_ick", &gfx_ick, CK_242X),
- /* DSS domain clocks */
- CLK("omapdss_dss", "ick", &dss_ick, CK_242X),
- CLK(NULL, "dss_ick", &dss_ick, CK_242X),
- CLK(NULL, "dss1_fck", &dss1_fck, CK_242X),
- CLK(NULL, "dss2_fck", &dss2_fck, CK_242X),
- CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_242X),
- /* L3 domain clocks */
- CLK(NULL, "core_l3_ck", &core_l3_ck, CK_242X),
- CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_242X),
- CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_242X),
- /* L4 domain clocks */
- CLK(NULL, "l4_ck", &l4_ck, CK_242X),
- CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_242X),
- CLK(NULL, "wu_l4_ick", &wu_l4_ick, CK_242X),
- /* virtual meta-group clock */
- CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_242X),
- /* general l4 interface ck, multi-parent functional clk */
- CLK(NULL, "gpt1_ick", &gpt1_ick, CK_242X),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_242X),
- CLK(NULL, "gpt2_ick", &gpt2_ick, CK_242X),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_242X),
- CLK(NULL, "gpt3_ick", &gpt3_ick, CK_242X),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_242X),
- CLK(NULL, "gpt4_ick", &gpt4_ick, CK_242X),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_242X),
- CLK(NULL, "gpt5_ick", &gpt5_ick, CK_242X),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_242X),
- CLK(NULL, "gpt6_ick", &gpt6_ick, CK_242X),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_242X),
- CLK(NULL, "gpt7_ick", &gpt7_ick, CK_242X),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_242X),
- CLK(NULL, "gpt8_ick", &gpt8_ick, CK_242X),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_242X),
- CLK(NULL, "gpt9_ick", &gpt9_ick, CK_242X),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_242X),
- CLK(NULL, "gpt10_ick", &gpt10_ick, CK_242X),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_242X),
- CLK(NULL, "gpt11_ick", &gpt11_ick, CK_242X),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_242X),
- CLK(NULL, "gpt12_ick", &gpt12_ick, CK_242X),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_242X),
- CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_242X),
- CLK(NULL, "mcbsp1_ick", &mcbsp1_ick, CK_242X),
- CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_242X),
- CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_242X),
- CLK(NULL, "mcbsp2_ick", &mcbsp2_ick, CK_242X),
- CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_242X),
- CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_242X),
- CLK(NULL, "mcspi1_ick", &mcspi1_ick, CK_242X),
- CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_242X),
- CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_242X),
- CLK(NULL, "mcspi2_ick", &mcspi2_ick, CK_242X),
- CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_242X),
- CLK(NULL, "uart1_ick", &uart1_ick, CK_242X),
- CLK(NULL, "uart1_fck", &uart1_fck, CK_242X),
- CLK(NULL, "uart2_ick", &uart2_ick, CK_242X),
- CLK(NULL, "uart2_fck", &uart2_fck, CK_242X),
- CLK(NULL, "uart3_ick", &uart3_ick, CK_242X),
- CLK(NULL, "uart3_fck", &uart3_fck, CK_242X),
- CLK(NULL, "gpios_ick", &gpios_ick, CK_242X),
- CLK(NULL, "gpios_fck", &gpios_fck, CK_242X),
- CLK("omap_wdt", "ick", &mpu_wdt_ick, CK_242X),
- CLK(NULL, "mpu_wdt_ick", &mpu_wdt_ick, CK_242X),
- CLK(NULL, "mpu_wdt_fck", &mpu_wdt_fck, CK_242X),
- CLK(NULL, "sync_32k_ick", &sync_32k_ick, CK_242X),
- CLK(NULL, "wdt1_ick", &wdt1_ick, CK_242X),
- CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_242X),
- CLK("omap24xxcam", "fck", &cam_fck, CK_242X),
- CLK(NULL, "cam_fck", &cam_fck, CK_242X),
- CLK("omap24xxcam", "ick", &cam_ick, CK_242X),
- CLK(NULL, "cam_ick", &cam_ick, CK_242X),
- CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_242X),
- CLK(NULL, "wdt4_ick", &wdt4_ick, CK_242X),
- CLK(NULL, "wdt4_fck", &wdt4_fck, CK_242X),
- CLK(NULL, "wdt3_ick", &wdt3_ick, CK_242X),
- CLK(NULL, "wdt3_fck", &wdt3_fck, CK_242X),
- CLK(NULL, "mspro_ick", &mspro_ick, CK_242X),
- CLK(NULL, "mspro_fck", &mspro_fck, CK_242X),
- CLK("mmci-omap.0", "ick", &mmc_ick, CK_242X),
- CLK(NULL, "mmc_ick", &mmc_ick, CK_242X),
- CLK("mmci-omap.0", "fck", &mmc_fck, CK_242X),
- CLK(NULL, "mmc_fck", &mmc_fck, CK_242X),
- CLK(NULL, "fac_ick", &fac_ick, CK_242X),
- CLK(NULL, "fac_fck", &fac_fck, CK_242X),
- CLK(NULL, "eac_ick", &eac_ick, CK_242X),
- CLK(NULL, "eac_fck", &eac_fck, CK_242X),
- CLK("omap_hdq.0", "ick", &hdq_ick, CK_242X),
- CLK(NULL, "hdq_ick", &hdq_ick, CK_242X),
- CLK("omap_hdq.0", "fck", &hdq_fck, CK_242X),
- CLK(NULL, "hdq_fck", &hdq_fck, CK_242X),
- CLK("omap_i2c.1", "ick", &i2c1_ick, CK_242X),
- CLK(NULL, "i2c1_ick", &i2c1_ick, CK_242X),
- CLK(NULL, "i2c1_fck", &i2c1_fck, CK_242X),
- CLK("omap_i2c.2", "ick", &i2c2_ick, CK_242X),
- CLK(NULL, "i2c2_ick", &i2c2_ick, CK_242X),
- CLK(NULL, "i2c2_fck", &i2c2_fck, CK_242X),
- CLK(NULL, "gpmc_fck", &gpmc_fck, CK_242X),
- CLK(NULL, "sdma_fck", &sdma_fck, CK_242X),
- CLK(NULL, "sdma_ick", &sdma_ick, CK_242X),
- CLK(NULL, "sdrc_ick", &sdrc_ick, CK_242X),
- CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
- CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
- CLK(NULL, "des_ick", &des_ick, CK_242X),
- CLK("omap-sham", "ick", &sha_ick, CK_242X),
- CLK(NULL, "sha_ick", &sha_ick, CK_242X),
- CLK("omap_rng", "ick", &rng_ick, CK_242X),
- CLK(NULL, "rng_ick", &rng_ick, CK_242X),
- CLK("omap-aes", "ick", &aes_ick, CK_242X),
- CLK(NULL, "aes_ick", &aes_ick, CK_242X),
- CLK(NULL, "pka_ick", &pka_ick, CK_242X),
- CLK(NULL, "usb_fck", &usb_fck, CK_242X),
- CLK("musb-hdrc", "fck", &osc_ck, CK_242X),
- CLK(NULL, "timer_32k_ck", &func_32k_ck, CK_242X),
- CLK(NULL, "timer_sys_ck", &sys_ck, CK_242X),
- CLK(NULL, "timer_ext_ck", &alt_ck, CK_242X),
- CLK(NULL, "cpufreq_ck", &virt_prcm_set, CK_242X),
-};
-
-/*
- * init code
- */
-
-int __init omap2420_clk_init(void)
-{
- struct omap_clk *c;
-
- prcm_clksrc_ctrl = OMAP2420_PRCM_CLKSRC_CTRL;
- cpu_mask = RATE_IN_242X;
- rate_table = omap2420_rate_table;
-
- for (c = omap2420_clks; c < omap2420_clks + ARRAY_SIZE(omap2420_clks);
- c++)
- clk_preinit(c->lk.clk);
-
- osc_ck.rate = omap2_osc_clk_recalc(&osc_ck);
- propagate_rate(&osc_ck);
- sys_ck.rate = omap2xxx_sys_clk_recalc(&sys_ck);
- propagate_rate(&sys_ck);
-
- for (c = omap2420_clks; c < omap2420_clks + ARRAY_SIZE(omap2420_clks);
- c++) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- omap2_init_clk_clkdm(c->lk.clk);
- }
-
- omap2xxx_clkt_vps_late_init();
-
- /* Disable autoidle on all clocks; let the PM code enable it later */
- omap_clk_disable_autoidle_all();
-
- /* XXX Can this be done from the virt_prcm_set clk init function? */
- omap2xxx_clkt_vps_check_bootloader_rates();
-
- recalculate_root_clocks();
-
- pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
- (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
- (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable_init_clocks();
-
- return 0;
-}
-
diff --git a/arch/arm/mach-omap2/clock2430.c b/arch/arm/mach-omap2/clock2430.c
index e37df538bcd..cef0c8d1de5 100644
--- a/arch/arm/mach-omap2/clock2430.c
+++ b/arch/arm/mach-omap2/clock2430.c
@@ -40,7 +40,7 @@
* passes back the correct CM_IDLEST register address for I2CHS
* modules. No return value.
*/
-static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
+static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
@@ -51,9 +51,7 @@ static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
}
/* 2430 I2CHS has non-standard IDLEST register */
-const struct clkops clkops_omap2430_i2chs_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
+const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = {
.find_idlest = omap2430_clk_i2chs_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
+ .find_companion = omap2_clk_dflt_find_companion,
};
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
deleted file mode 100644
index b179b6ef432..00000000000
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ /dev/null
@@ -1,2071 +0,0 @@
-/*
- * OMAP2430 clock data
- *
- * Copyright (C) 2005-2009, 2012 Texas Instruments, Inc.
- * Copyright (C) 2004-2011 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/list.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock2xxx.h"
-#include "opp2xxx.h"
-#include "cm2xxx.h"
-#include "prm2xxx_3xxx.h"
-#include "prm-regbits-24xx.h"
-#include "cm-regbits-24xx.h"
-#include "sdrc.h"
-#include "control.h"
-
-#define OMAP_CM_REGADDR OMAP2430_CM_REGADDR
-
-/*
- * 2430 clock tree.
- *
- * NOTE:In many cases here we are assigning a 'default' parent. In
- * many cases the parent is selectable. The set parent calls will
- * also switch sources.
- *
- * Several sources are given initial rates which may be wrong, this will
- * be fixed up in the init func.
- *
- * Things are broadly separated below by clock domains. It is
- * noteworthy that most peripherals have dependencies on multiple clock
- * domains. Many get their interface clocks from the L4 domain, but get
- * functional clocks from fixed sources or other core domain derived
- * clocks.
- */
-
-/* Base external input clocks */
-static struct clk func_32k_ck = {
- .name = "func_32k_ck",
- .ops = &clkops_null,
- .rate = 32768,
- .clkdm_name = "wkup_clkdm",
-};
-
-static struct clk secure_32k_ck = {
- .name = "secure_32k_ck",
- .ops = &clkops_null,
- .rate = 32768,
- .clkdm_name = "wkup_clkdm",
-};
-
-/* Typical 12/13MHz in standalone mode, will be 26Mhz in chassis mode */
-static struct clk osc_ck = { /* (*12, *13, 19.2, *26, 38.4)MHz */
- .name = "osc_ck",
- .ops = &clkops_oscck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2_osc_clk_recalc,
-};
-
-/* Without modem likely 12MHz, with modem likely 13MHz */
-static struct clk sys_ck = { /* (*12, *13, 19.2, 26, 38.4)MHz */
- .name = "sys_ck", /* ~ ref_clk also */
- .ops = &clkops_null,
- .parent = &osc_ck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2xxx_sys_clk_recalc,
-};
-
-static struct clk alt_ck = { /* Typical 54M or 48M, may not exist */
- .name = "alt_ck",
- .ops = &clkops_null,
- .rate = 54000000,
- .clkdm_name = "wkup_clkdm",
-};
-
-/* Optional external clock input for McBSP CLKS */
-static struct clk mcbsp_clks = {
- .name = "mcbsp_clks",
- .ops = &clkops_null,
-};
-
-/*
- * Analog domain root source clocks
- */
-
-/* dpll_ck, is broken out in to special cases through clksel */
-/* REVISIT: Rate changes on dpll_ck trigger a full set change. ...
- * deal with this
- */
-
-static struct dpll_data dpll_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .mult_mask = OMAP24XX_DPLL_MULT_MASK,
- .div1_mask = OMAP24XX_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_mask = OMAP24XX_EN_DPLL_MASK,
- .max_multiplier = 1023,
- .min_divider = 1,
- .max_divider = 16,
-};
-
-/*
- * XXX Cannot add round_rate here yet, as this is still a composite clock,
- * not just a DPLL
- */
-static struct clk dpll_ck = {
- .name = "dpll_ck",
- .ops = &clkops_omap2xxx_dpll_ops,
- .parent = &sys_ck, /* Can be func_32k also */
- .init = &omap2xxx_clkt_dpllcore_init,
- .dpll_data = &dpll_dd,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2_dpllcore_recalc,
- .set_rate = &omap2_reprogram_dpllcore,
-};
-
-static struct clk apll96_ck = {
- .name = "apll96_ck",
- .ops = &clkops_apll96,
- .parent = &sys_ck,
- .rate = 96000000,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP24XX_EN_96M_PLL_SHIFT,
-};
-
-static struct clk apll54_ck = {
- .name = "apll54_ck",
- .ops = &clkops_apll54,
- .parent = &sys_ck,
- .rate = 54000000,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP24XX_EN_54M_PLL_SHIFT,
-};
-
-/*
- * PRCM digital base sources
- */
-
-/* func_54m_ck */
-
-static const struct clksel_rate func_54m_apll54_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel_rate func_54m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel func_54m_clksel[] = {
- { .parent = &apll54_ck, .rates = func_54m_apll54_rates, },
- { .parent = &alt_ck, .rates = func_54m_alt_rates, },
- { .parent = NULL },
-};
-
-static struct clk func_54m_ck = {
- .name = "func_54m_ck",
- .ops = &clkops_null,
- .parent = &apll54_ck, /* can also be alt_clk */
- .clkdm_name = "wkup_clkdm",
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_54M_SOURCE_MASK,
- .clksel = func_54m_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk core_ck = {
- .name = "core_ck",
- .ops = &clkops_null,
- .parent = &dpll_ck, /* can also be 32k */
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* func_96m_ck */
-static const struct clksel_rate func_96m_apll96_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel_rate func_96m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_243X },
- { .div = 0 },
-};
-
-static const struct clksel func_96m_clksel[] = {
- { .parent = &apll96_ck, .rates = func_96m_apll96_rates },
- { .parent = &alt_ck, .rates = func_96m_alt_rates },
- { .parent = NULL }
-};
-
-static struct clk func_96m_ck = {
- .name = "func_96m_ck",
- .ops = &clkops_null,
- .parent = &apll96_ck,
- .clkdm_name = "wkup_clkdm",
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP2430_96M_SOURCE_MASK,
- .clksel = func_96m_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* func_48m_ck */
-
-static const struct clksel_rate func_48m_apll96_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel_rate func_48m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel func_48m_clksel[] = {
- { .parent = &apll96_ck, .rates = func_48m_apll96_rates },
- { .parent = &alt_ck, .rates = func_48m_alt_rates },
- { .parent = NULL }
-};
-
-static struct clk func_48m_ck = {
- .name = "func_48m_ck",
- .ops = &clkops_null,
- .parent = &apll96_ck, /* 96M or Alt */
- .clkdm_name = "wkup_clkdm",
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_48M_SOURCE_MASK,
- .clksel = func_48m_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk func_12m_ck = {
- .name = "func_12m_ck",
- .ops = &clkops_null,
- .parent = &func_48m_ck,
- .fixed_div = 4,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-/* Secure timer, only available in secure mode */
-static struct clk wdt1_osc_ck = {
- .name = "ck_wdt1_osc",
- .ops = &clkops_null, /* RMK: missing? */
- .parent = &osc_ck,
- .recalc = &followparent_recalc,
-};
-
-/*
- * The common_clkout* clksel_rate structs are common to
- * sys_clkout, sys_clkout_src, sys_clkout2, and sys_clkout2_src.
- * sys_clkout2_* are 2420-only, so the
- * clksel_rate flags fields are inaccurate for those clocks. This is
- * harmless since access to those clocks are gated by the struct clk
- * flags fields, which mark them as 2420-only.
- */
-static const struct clksel_rate common_clkout_src_core_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_clkout_src_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_clkout_src_96m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_clkout_src_54m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel common_clkout_src_clksel[] = {
- { .parent = &core_ck, .rates = common_clkout_src_core_rates },
- { .parent = &sys_ck, .rates = common_clkout_src_sys_rates },
- { .parent = &func_96m_ck, .rates = common_clkout_src_96m_rates },
- { .parent = &func_54m_ck, .rates = common_clkout_src_54m_rates },
- { .parent = NULL }
-};
-
-static struct clk sys_clkout_src = {
- .name = "sys_clkout_src",
- .ops = &clkops_omap2_dflt,
- .parent = &func_54m_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP2430_PRCM_CLKOUT_CTRL,
- .enable_bit = OMAP24XX_CLKOUT_EN_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP2430_PRCM_CLKOUT_CTRL,
- .clksel_mask = OMAP24XX_CLKOUT_SOURCE_MASK,
- .clksel = common_clkout_src_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static const struct clksel_rate common_clkout_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 2, .val = 1, .flags = RATE_IN_24XX },
- { .div = 4, .val = 2, .flags = RATE_IN_24XX },
- { .div = 8, .val = 3, .flags = RATE_IN_24XX },
- { .div = 16, .val = 4, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel sys_clkout_clksel[] = {
- { .parent = &sys_clkout_src, .rates = common_clkout_rates },
- { .parent = NULL }
-};
-
-static struct clk sys_clkout = {
- .name = "sys_clkout",
- .ops = &clkops_null,
- .parent = &sys_clkout_src,
- .clkdm_name = "wkup_clkdm",
- .clksel_reg = OMAP2430_PRCM_CLKOUT_CTRL,
- .clksel_mask = OMAP24XX_CLKOUT_DIV_MASK,
- .clksel = sys_clkout_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk emul_ck = {
- .name = "emul_ck",
- .ops = &clkops_omap2_dflt,
- .parent = &func_54m_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP2430_PRCM_CLKEMUL_CTRL,
- .enable_bit = OMAP24XX_EMULATION_EN_SHIFT,
- .recalc = &followparent_recalc,
-
-};
-
-/*
- * MPU clock domain
- * Clocks:
- * MPU_FCLK, MPU_ICLK
- * INT_M_FCLK, INT_M_I_CLK
- *
- * - Individual clocks are hardware managed.
- * - Base divider comes from: CM_CLKSEL_MPU
- *
- */
-static const struct clksel_rate mpu_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel mpu_clksel[] = {
- { .parent = &core_ck, .rates = mpu_core_rates },
- { .parent = NULL }
-};
-
-static struct clk mpu_ck = { /* Control cpu */
- .name = "mpu_ck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .clkdm_name = "mpu_clkdm",
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, CM_CLKSEL),
- .clksel_mask = OMAP24XX_CLKSEL_MPU_MASK,
- .clksel = mpu_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * DSP (2430-IVA2.1) clock domain
- * Clocks:
- * 2430: IVA2.1_FCLK (really just DSP_FCLK), IVA2.1_ICLK
- *
- * Won't be too specific here. The core clock comes into this block
- * it is divided then tee'ed. One branch goes directly to xyz enable
- * controls. The other branch gets further divided by 2 then possibly
- * routed into a synchronizer and out of clocks abc.
- */
-static const struct clksel_rate dsp_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 3, .val = 3, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 0 },
-};
-
-static const struct clksel dsp_fck_clksel[] = {
- { .parent = &core_ck, .rates = dsp_fck_core_rates },
- { .parent = NULL }
-};
-
-static struct clk dsp_fck = {
- .name = "dsp_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_ck,
- .clkdm_name = "dsp_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP24XX_CLKSEL_DSP_MASK,
- .clksel = dsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel dsp_ick_clksel[] = {
- { .parent = &dsp_fck, .rates = dsp_ick_rates },
- { .parent = NULL }
-};
-
-/* 2430 only - EN_DSP controls both dsp fclk and iclk on 2430 */
-static struct clk iva2_1_ick = {
- .name = "iva2_1_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dsp_fck,
- .clkdm_name = "dsp_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP24XX_CLKSEL_DSP_IF_MASK,
- .clksel = dsp_ick_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * L3 clock domain
- * L3 clocks are used for both interface and functional clocks to
- * multiple entities. Some of these clocks are completely managed
- * by hardware, and some others allow software control. Hardware
- * managed ones general are based on directly CLK_REQ signals and
- * various auto idle settings. The functional spec sets many of these
- * as 'tie-high' for their enables.
- *
- * I-CLOCKS:
- * L3-Interconnect, SMS, GPMC, SDRC, OCM_RAM, OCM_ROM, SDMA
- * CAM, HS-USB.
- * F-CLOCK
- * SSI.
- *
- * GPMC memories and SDRC have timing and clock sensitive registers which
- * may very well need notification when the clock changes. Currently for low
- * operating points, these are taken care of in sleep.S.
- */
-static const struct clksel_rate core_l3_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 6, .val = 6, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel core_l3_clksel[] = {
- { .parent = &core_ck, .rates = core_l3_core_rates },
- { .parent = NULL }
-};
-
-static struct clk core_l3_ck = { /* Used for ick and fck, interconnect */
- .name = "core_l3_ck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .clkdm_name = "core_l3_clkdm",
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_L3_MASK,
- .clksel = core_l3_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* usb_l4_ick */
-static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel usb_l4_ick_clksel[] = {
- { .parent = &core_l3_ck, .rates = usb_l4_ick_core_l3_rates },
- { .parent = NULL },
-};
-
-/* It is unclear from TRM whether usb_l4_ick is really in L3 or L4 clkdm */
-static struct clk usb_l4_ick = { /* FS-USB interface clock */
- .name = "usb_l4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP24XX_EN_USB_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_USB_MASK,
- .clksel = usb_l4_ick_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * L4 clock management domain
- *
- * This domain contains lots of interface clocks from the L4 interface, some
- * functional clocks. Fixed APLL functional source clocks are managed in
- * this domain.
- */
-static const struct clksel_rate l4_core_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel l4_clksel[] = {
- { .parent = &core_l3_ck, .rates = l4_core_l3_rates },
- { .parent = NULL }
-};
-
-static struct clk l4_ck = { /* used both as an ick and fck */
- .name = "l4_ck",
- .ops = &clkops_null,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l4_clkdm",
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_L4_MASK,
- .clksel = l4_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * SSI is in L3 management domain, its direct parent is core not l3,
- * many core power domain entities are grouped into the L3 clock
- * domain.
- * SSI_SSR_FCLK, SSI_SST_FCLK, SSI_L4_ICLK
- *
- * ssr = core/1/2/3/4/5, sst = 1/2 ssr.
- */
-static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 3, .val = 3, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 5, .val = 5, .flags = RATE_IN_243X },
- { .div = 0 }
-};
-
-static const struct clksel ssi_ssr_sst_fck_clksel[] = {
- { .parent = &core_ck, .rates = ssi_ssr_sst_fck_core_rates },
- { .parent = NULL }
-};
-
-static struct clk ssi_ssr_sst_fck = {
- .name = "ssi_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP24XX_EN_SSI_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_SSI_MASK,
- .clksel = ssi_ssr_sst_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * Presumably this is the same as SSI_ICLK.
- * TRM contradicts itself on what clockdomain SSI_ICLK is in
- */
-static struct clk ssi_l4_ick = {
- .name = "ssi_l4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP24XX_EN_SSI_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-
-/*
- * GFX clock domain
- * Clocks:
- * GFX_FCLK, GFX_ICLK
- * GFX_CG1(2d), GFX_CG2(3d)
- *
- * GFX_FCLK runs from L3, and is divided by (1,2,3,4)
- * The 2d and 3d clocks run at a hardware determined
- * divided value of fclk.
- *
- */
-
-/* This clksel struct is shared between gfx_3d_fck and gfx_2d_fck */
-static const struct clksel gfx_fck_clksel[] = {
- { .parent = &core_l3_ck, .rates = gfx_l3_rates },
- { .parent = NULL },
-};
-
-static struct clk gfx_3d_fck = {
- .name = "gfx_3d_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "gfx_clkdm",
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_3D_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP_CLKSEL_GFX_MASK,
- .clksel = gfx_fck_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk gfx_2d_fck = {
- .name = "gfx_2d_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "gfx_clkdm",
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_2D_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP_CLKSEL_GFX_MASK,
- .clksel = gfx_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* This interface clock does not have a CM_AUTOIDLE bit */
-static struct clk gfx_ick = {
- .name = "gfx_ick", /* From l3 */
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "gfx_clkdm",
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
- .enable_bit = OMAP_EN_GFX_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * Modem clock domain (2430)
- * CLOCKS:
- * MDM_OSC_CLK
- * MDM_ICLK
- * These clocks are usable in chassis mode only.
- */
-static const struct clksel_rate mdm_ick_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_243X },
- { .div = 4, .val = 4, .flags = RATE_IN_243X },
- { .div = 6, .val = 6, .flags = RATE_IN_243X },
- { .div = 9, .val = 9, .flags = RATE_IN_243X },
- { .div = 0 }
-};
-
-static const struct clksel mdm_ick_clksel[] = {
- { .parent = &core_ck, .rates = mdm_ick_core_rates },
- { .parent = NULL }
-};
-
-static struct clk mdm_ick = { /* used both as a ick and fck */
- .name = "mdm_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_ck,
- .clkdm_name = "mdm_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_ICLKEN),
- .enable_bit = OMAP2430_CM_ICLKEN_MDM_EN_MDM_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_CLKSEL),
- .clksel_mask = OMAP2430_CLKSEL_MDM_MASK,
- .clksel = mdm_ick_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mdm_osc_ck = {
- .name = "mdm_osc_ck",
- .ops = &clkops_omap2_mdmclk_dflt_wait,
- .parent = &osc_ck,
- .clkdm_name = "mdm_clkdm",
- .enable_reg = OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_FCLKEN),
- .enable_bit = OMAP2430_EN_OSC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * DSS clock domain
- * CLOCKs:
- * DSS_L4_ICLK, DSS_L3_ICLK,
- * DSS_CLK1, DSS_CLK2, DSS_54MHz_CLK
- *
- * DSS is both initiator and target.
- */
-/* XXX Add RATE_NOT_VALIDATED */
-
-static const struct clksel_rate dss1_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate dss1_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX },
- { .div = 3, .val = 3, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX },
- { .div = 5, .val = 5, .flags = RATE_IN_24XX },
- { .div = 6, .val = 6, .flags = RATE_IN_24XX },
- { .div = 8, .val = 8, .flags = RATE_IN_24XX },
- { .div = 9, .val = 9, .flags = RATE_IN_24XX },
- { .div = 12, .val = 12, .flags = RATE_IN_24XX },
- { .div = 16, .val = 16, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel dss1_fck_clksel[] = {
- { .parent = &sys_ck, .rates = dss1_fck_sys_rates },
- { .parent = &core_ck, .rates = dss1_fck_core_rates },
- { .parent = NULL },
-};
-
-static struct clk dss_ick = { /* Enables both L3,L4 ICLK's */
- .name = "dss_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &l4_ck, /* really both l3 and l4 */
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_DSS1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss1_fck = {
- .name = "dss1_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &core_ck, /* Core or sys */
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_DSS1_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_DSS1_MASK,
- .clksel = dss1_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate dss2_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate dss2_fck_48m_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel dss2_fck_clksel[] = {
- { .parent = &sys_ck, .rates = dss2_fck_sys_rates },
- { .parent = &func_48m_ck, .rates = dss2_fck_48m_rates },
- { .parent = NULL }
-};
-
-static struct clk dss2_fck = { /* Alt clk used in power management */
- .name = "dss2_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &sys_ck, /* fixed at sys_ck or 48MHz */
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_DSS2_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_DSS2_MASK,
- .clksel = dss2_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk dss_54m_fck = { /* Alt clk used in power management */
- .name = "dss_54m_fck", /* 54m tv clk */
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_54m_ck,
- .clkdm_name = "dss_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_TV_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wu_l4_ick = {
- .name = "wu_l4_ick",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/*
- * CORE power domain ICLK & FCLK defines.
- * Many of the these can have more than one possible parent. Entries
- * here will likely have an L4 interface parent, and may have multiple
- * functional clock parents.
- */
-static const struct clksel_rate gpt_alt_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel omap24xx_gpt_clksel[] = {
- { .parent = &func_32k_ck, .rates = gpt_32k_rates },
- { .parent = &sys_ck, .rates = gpt_sys_rates },
- { .parent = &alt_ck, .rates = gpt_alt_rates },
- { .parent = NULL },
-};
-
-static struct clk gpt1_ick = {
- .name = "gpt1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_GPT1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt1_fck = {
- .name = "gpt1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_GPT1_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_CLKSEL_GPT1_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-static struct clk gpt2_ick = {
- .name = "gpt2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt2_fck = {
- .name = "gpt2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT2_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT2_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt3_ick = {
- .name = "gpt3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt3_fck = {
- .name = "gpt3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT3_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT3_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt4_ick = {
- .name = "gpt4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt4_fck = {
- .name = "gpt4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT4_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT4_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt5_ick = {
- .name = "gpt5_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT5_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt5_fck = {
- .name = "gpt5_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT5_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT5_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt6_ick = {
- .name = "gpt6_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT6_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt6_fck = {
- .name = "gpt6_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT6_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT6_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt7_ick = {
- .name = "gpt7_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT7_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt7_fck = {
- .name = "gpt7_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT7_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT7_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt8_ick = {
- .name = "gpt8_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT8_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt8_fck = {
- .name = "gpt8_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT8_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT8_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt9_ick = {
- .name = "gpt9_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT9_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt9_fck = {
- .name = "gpt9_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT9_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT9_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt10_ick = {
- .name = "gpt10_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT10_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt10_fck = {
- .name = "gpt10_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT10_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT10_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt11_ick = {
- .name = "gpt11_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT11_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt11_fck = {
- .name = "gpt11_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT11_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT11_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt12_ick = {
- .name = "gpt12_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_GPT12_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt12_fck = {
- .name = "gpt12_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &secure_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_GPT12_SHIFT,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
- .clksel_mask = OMAP24XX_CLKSEL_GPT12_MASK,
- .clksel = omap24xx_gpt_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp1_ick = {
- .name = "mcbsp1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 0 }
-};
-
-static const struct clksel mcbsp_fck_clksel[] = {
- { .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
- { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
- { .parent = NULL }
-};
-
-static struct clk mcbsp1_fck = {
- .name = "mcbsp1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .init = &omap2_init_clksel_parent,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
- .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
- .clksel = mcbsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp2_ick = {
- .name = "mcbsp2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp2_fck = {
- .name = "mcbsp2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .init = &omap2_init_clksel_parent,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
- .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
- .clksel = mcbsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp3_ick = {
- .name = "mcbsp3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_MCBSP3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp3_fck = {
- .name = "mcbsp3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .init = &omap2_init_clksel_parent,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MCBSP3_SHIFT,
- .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP3_CLKS_MASK,
- .clksel = mcbsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp4_ick = {
- .name = "mcbsp4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_MCBSP4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp4_fck = {
- .name = "mcbsp4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .init = &omap2_init_clksel_parent,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MCBSP4_SHIFT,
- .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP4_CLKS_MASK,
- .clksel = mcbsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp5_ick = {
- .name = "mcbsp5_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_MCBSP5_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp5_fck = {
- .name = "mcbsp5_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .init = &omap2_init_clksel_parent,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MCBSP5_SHIFT,
- .clksel_reg = OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP5_CLKS_MASK,
- .clksel = mcbsp_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcspi1_ick = {
- .name = "mcspi1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi1_fck = {
- .name = "mcspi1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_ick = {
- .name = "mcspi2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_fck = {
- .name = "mcspi2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi3_ick = {
- .name = "mcspi3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_MCSPI3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi3_fck = {
- .name = "mcspi3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MCSPI3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_ick = {
- .name = "uart1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_UART1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_fck = {
- .name = "uart1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_UART1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_ick = {
- .name = "uart2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_UART2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_fck = {
- .name = "uart2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_UART2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_ick = {
- .name = "uart3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP24XX_EN_UART3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_fck = {
- .name = "uart3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP24XX_EN_UART3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpios_ick = {
- .name = "gpios_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpios_fck = {
- .name = "gpios_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mpu_wdt_ick = {
- .name = "mpu_wdt_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mpu_wdt_fck = {
- .name = "mpu_wdt_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sync_32k_ick = {
- .name = "sync_32k_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .flags = ENABLE_ON_INIT,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_32KSYNC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt1_ick = {
- .name = "wdt1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_WDT1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk omapctrl_ick = {
- .name = "omapctrl_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .flags = ENABLE_ON_INIT,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP24XX_EN_OMAPCTRL_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk icr_ick = {
- .name = "icr_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wu_l4_ick,
- .clkdm_name = "wkup_clkdm",
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP2430_EN_ICR_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk cam_ick = {
- .name = "cam_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_CAM_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * cam_fck controls both CAM_MCLK and CAM_FCLK. It should probably be
- * split into two separate clocks, since the parent clocks are different
- * and the clockdomains are also different.
- */
-static struct clk cam_fck = {
- .name = "cam_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_CAM_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mailboxes_ick = {
- .name = "mailboxes_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt4_ick = {
- .name = "wdt4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt4_fck = {
- .name = "wdt4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_WDT4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_ick = {
- .name = "mspro_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_fck = {
- .name = "mspro_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk fac_ick = {
- .name = "fac_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_FAC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk fac_fck = {
- .name = "fac_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_12m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_FAC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_ick = {
- .name = "hdq_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_fck = {
- .name = "hdq_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_12m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP24XX_EN_HDQ_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * XXX This is marked as a 2420-only define, but it claims to be present
- * on 2430 also. Double-check.
- */
-static struct clk i2c2_ick = {
- .name = "i2c2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_I2C2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2chs2_fck = {
- .name = "i2chs2_fck",
- .ops = &clkops_omap2430_i2chs_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_I2CHS2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * XXX This is marked as a 2420-only define, but it claims to be present
- * on 2430 also. Double-check.
- */
-static struct clk i2c1_ick = {
- .name = "i2c1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP2420_EN_I2C1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2chs1_fck = {
- .name = "i2chs1_fck",
- .ops = &clkops_omap2430_i2chs_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_I2CHS1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
- * accesses derived from this data.
- */
-static struct clk gpmc_fck = {
- .name = "gpmc_fck",
- .ops = &clkops_omap2_iclk_idle_only,
- .parent = &core_l3_ck,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP24XX_AUTO_GPMC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sdma_fck = {
- .name = "sdma_fck",
- .ops = &clkops_null, /* RMK: missing? */
- .parent = &core_l3_ck,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/*
- * The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
- * accesses derived from this data.
- */
-static struct clk sdma_ick = {
- .name = "sdma_ick",
- .ops = &clkops_omap2_iclk_idle_only,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP24XX_AUTO_SDMA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sdrc_ick = {
- .name = "sdrc_ick",
- .ops = &clkops_omap2_iclk_idle_only,
- .parent = &core_l3_ck,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP2430_EN_SDRC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk des_ick = {
- .name = "des_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_DES_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sha_ick = {
- .name = "sha_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_SHA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rng_ick = {
- .name = "rng_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_RNG_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk aes_ick = {
- .name = "aes_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_AES_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk pka_ick = {
- .name = "pka_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
- .enable_bit = OMAP24XX_EN_PKA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_fck = {
- .name = "usb_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_48m_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP24XX_EN_USB_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbhs_ick = {
- .name = "usbhs_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l3_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_USBHS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs1_ick = {
- .name = "mmchs1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_MMCHS1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs1_fck = {
- .name = "mmchs1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MMCHS1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs2_ick = {
- .name = "mmchs2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_MMCHS2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs2_fck = {
- .name = "mmchs2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_96m_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MMCHS2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_ick = {
- .name = "gpio5_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_GPIO5_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_fck = {
- .name = "gpio5_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_GPIO5_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mdm_intc_ick = {
- .name = "mdm_intc_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP2430_EN_MDM_INTC_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchsdb1_fck = {
- .name = "mmchsdb1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MMCHSDB1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchsdb2_fck = {
- .name = "mmchsdb2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &func_32k_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
- .enable_bit = OMAP2430_EN_MMCHSDB2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * This clock is a composite clock which does entire set changes then
- * forces a rebalance. It keys on the MPU speed, but it really could
- * be any key speed part of a set in the rate table.
- *
- * to really change a set, you need memory table sets which get changed
- * in sram, pre-notifiers & post notifiers, changing the top set, without
- * having low level display recalc's won't work... this is why dpm notifiers
- * work, isr's off, walk a list of clocks already _off_ and not messing with
- * the bus.
- *
- * This clock should have no parent. It embodies the entire upper level
- * active set. A parent will mess up some of the init also.
- */
-static struct clk virt_prcm_set = {
- .name = "virt_prcm_set",
- .ops = &clkops_null,
- .parent = &mpu_ck, /* Indexed by mpu speed, no parent */
- .recalc = &omap2_table_mpu_recalc, /* sets are keyed on mpu rate */
- .set_rate = &omap2_select_table_rate,
- .round_rate = &omap2_round_to_table_rate,
-};
-
-
-/*
- * clkdev integration
- */
-
-static struct omap_clk omap2430_clks[] = {
- /* external root sources */
- CLK(NULL, "func_32k_ck", &func_32k_ck, CK_243X),
- CLK(NULL, "secure_32k_ck", &secure_32k_ck, CK_243X),
- CLK(NULL, "osc_ck", &osc_ck, CK_243X),
- CLK("twl", "fck", &osc_ck, CK_243X),
- CLK(NULL, "sys_ck", &sys_ck, CK_243X),
- CLK(NULL, "alt_ck", &alt_ck, CK_243X),
- CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_243X),
- /* internal analog sources */
- CLK(NULL, "dpll_ck", &dpll_ck, CK_243X),
- CLK(NULL, "apll96_ck", &apll96_ck, CK_243X),
- CLK(NULL, "apll54_ck", &apll54_ck, CK_243X),
- /* internal prcm root sources */
- CLK(NULL, "func_54m_ck", &func_54m_ck, CK_243X),
- CLK(NULL, "core_ck", &core_ck, CK_243X),
- CLK(NULL, "func_96m_ck", &func_96m_ck, CK_243X),
- CLK(NULL, "func_48m_ck", &func_48m_ck, CK_243X),
- CLK(NULL, "func_12m_ck", &func_12m_ck, CK_243X),
- CLK(NULL, "ck_wdt1_osc", &wdt1_osc_ck, CK_243X),
- CLK(NULL, "sys_clkout_src", &sys_clkout_src, CK_243X),
- CLK(NULL, "sys_clkout", &sys_clkout, CK_243X),
- CLK(NULL, "emul_ck", &emul_ck, CK_243X),
- /* mpu domain clocks */
- CLK(NULL, "mpu_ck", &mpu_ck, CK_243X),
- /* dsp domain clocks */
- CLK(NULL, "dsp_fck", &dsp_fck, CK_243X),
- CLK(NULL, "iva2_1_ick", &iva2_1_ick, CK_243X),
- /* GFX domain clocks */
- CLK(NULL, "gfx_3d_fck", &gfx_3d_fck, CK_243X),
- CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_243X),
- CLK(NULL, "gfx_ick", &gfx_ick, CK_243X),
- /* Modem domain clocks */
- CLK(NULL, "mdm_ick", &mdm_ick, CK_243X),
- CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X),
- /* DSS domain clocks */
- CLK("omapdss_dss", "ick", &dss_ick, CK_243X),
- CLK(NULL, "dss_ick", &dss_ick, CK_243X),
- CLK(NULL, "dss1_fck", &dss1_fck, CK_243X),
- CLK(NULL, "dss2_fck", &dss2_fck, CK_243X),
- CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_243X),
- /* L3 domain clocks */
- CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X),
- CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X),
- CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_243X),
- /* L4 domain clocks */
- CLK(NULL, "l4_ck", &l4_ck, CK_243X),
- CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_243X),
- CLK(NULL, "wu_l4_ick", &wu_l4_ick, CK_243X),
- /* virtual meta-group clock */
- CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_243X),
- /* general l4 interface ck, multi-parent functional clk */
- CLK(NULL, "gpt1_ick", &gpt1_ick, CK_243X),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_243X),
- CLK(NULL, "gpt2_ick", &gpt2_ick, CK_243X),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_243X),
- CLK(NULL, "gpt3_ick", &gpt3_ick, CK_243X),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_243X),
- CLK(NULL, "gpt4_ick", &gpt4_ick, CK_243X),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_243X),
- CLK(NULL, "gpt5_ick", &gpt5_ick, CK_243X),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_243X),
- CLK(NULL, "gpt6_ick", &gpt6_ick, CK_243X),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_243X),
- CLK(NULL, "gpt7_ick", &gpt7_ick, CK_243X),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_243X),
- CLK(NULL, "gpt8_ick", &gpt8_ick, CK_243X),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_243X),
- CLK(NULL, "gpt9_ick", &gpt9_ick, CK_243X),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_243X),
- CLK(NULL, "gpt10_ick", &gpt10_ick, CK_243X),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_243X),
- CLK(NULL, "gpt11_ick", &gpt11_ick, CK_243X),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_243X),
- CLK(NULL, "gpt12_ick", &gpt12_ick, CK_243X),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_243X),
- CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_243X),
- CLK(NULL, "mcbsp1_ick", &mcbsp1_ick, CK_243X),
- CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_243X),
- CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_243X),
- CLK(NULL, "mcbsp2_ick", &mcbsp2_ick, CK_243X),
- CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_243X),
- CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_243X),
- CLK(NULL, "mcbsp3_ick", &mcbsp3_ick, CK_243X),
- CLK(NULL, "mcbsp3_fck", &mcbsp3_fck, CK_243X),
- CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_243X),
- CLK(NULL, "mcbsp4_ick", &mcbsp4_ick, CK_243X),
- CLK(NULL, "mcbsp4_fck", &mcbsp4_fck, CK_243X),
- CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_243X),
- CLK(NULL, "mcbsp5_ick", &mcbsp5_ick, CK_243X),
- CLK(NULL, "mcbsp5_fck", &mcbsp5_fck, CK_243X),
- CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_243X),
- CLK(NULL, "mcspi1_ick", &mcspi1_ick, CK_243X),
- CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_243X),
- CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_243X),
- CLK(NULL, "mcspi2_ick", &mcspi2_ick, CK_243X),
- CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_243X),
- CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_243X),
- CLK(NULL, "mcspi3_ick", &mcspi3_ick, CK_243X),
- CLK(NULL, "mcspi3_fck", &mcspi3_fck, CK_243X),
- CLK(NULL, "uart1_ick", &uart1_ick, CK_243X),
- CLK(NULL, "uart1_fck", &uart1_fck, CK_243X),
- CLK(NULL, "uart2_ick", &uart2_ick, CK_243X),
- CLK(NULL, "uart2_fck", &uart2_fck, CK_243X),
- CLK(NULL, "uart3_ick", &uart3_ick, CK_243X),
- CLK(NULL, "uart3_fck", &uart3_fck, CK_243X),
- CLK(NULL, "gpios_ick", &gpios_ick, CK_243X),
- CLK(NULL, "gpios_fck", &gpios_fck, CK_243X),
- CLK("omap_wdt", "ick", &mpu_wdt_ick, CK_243X),
- CLK(NULL, "mpu_wdt_ick", &mpu_wdt_ick, CK_243X),
- CLK(NULL, "mpu_wdt_fck", &mpu_wdt_fck, CK_243X),
- CLK(NULL, "sync_32k_ick", &sync_32k_ick, CK_243X),
- CLK(NULL, "wdt1_ick", &wdt1_ick, CK_243X),
- CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_243X),
- CLK(NULL, "icr_ick", &icr_ick, CK_243X),
- CLK("omap24xxcam", "fck", &cam_fck, CK_243X),
- CLK(NULL, "cam_fck", &cam_fck, CK_243X),
- CLK("omap24xxcam", "ick", &cam_ick, CK_243X),
- CLK(NULL, "cam_ick", &cam_ick, CK_243X),
- CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_243X),
- CLK(NULL, "wdt4_ick", &wdt4_ick, CK_243X),
- CLK(NULL, "wdt4_fck", &wdt4_fck, CK_243X),
- CLK(NULL, "mspro_ick", &mspro_ick, CK_243X),
- CLK(NULL, "mspro_fck", &mspro_fck, CK_243X),
- CLK(NULL, "fac_ick", &fac_ick, CK_243X),
- CLK(NULL, "fac_fck", &fac_fck, CK_243X),
- CLK("omap_hdq.0", "ick", &hdq_ick, CK_243X),
- CLK(NULL, "hdq_ick", &hdq_ick, CK_243X),
- CLK("omap_hdq.1", "fck", &hdq_fck, CK_243X),
- CLK(NULL, "hdq_fck", &hdq_fck, CK_243X),
- CLK("omap_i2c.1", "ick", &i2c1_ick, CK_243X),
- CLK(NULL, "i2c1_ick", &i2c1_ick, CK_243X),
- CLK(NULL, "i2chs1_fck", &i2chs1_fck, CK_243X),
- CLK("omap_i2c.2", "ick", &i2c2_ick, CK_243X),
- CLK(NULL, "i2c2_ick", &i2c2_ick, CK_243X),
- CLK(NULL, "i2chs2_fck", &i2chs2_fck, CK_243X),
- CLK(NULL, "gpmc_fck", &gpmc_fck, CK_243X),
- CLK(NULL, "sdma_fck", &sdma_fck, CK_243X),
- CLK(NULL, "sdma_ick", &sdma_ick, CK_243X),
- CLK(NULL, "sdrc_ick", &sdrc_ick, CK_243X),
- CLK(NULL, "des_ick", &des_ick, CK_243X),
- CLK("omap-sham", "ick", &sha_ick, CK_243X),
- CLK("omap_rng", "ick", &rng_ick, CK_243X),
- CLK(NULL, "rng_ick", &rng_ick, CK_243X),
- CLK("omap-aes", "ick", &aes_ick, CK_243X),
- CLK(NULL, "pka_ick", &pka_ick, CK_243X),
- CLK(NULL, "usb_fck", &usb_fck, CK_243X),
- CLK("musb-omap2430", "ick", &usbhs_ick, CK_243X),
- CLK(NULL, "usbhs_ick", &usbhs_ick, CK_243X),
- CLK("omap_hsmmc.0", "ick", &mmchs1_ick, CK_243X),
- CLK(NULL, "mmchs1_ick", &mmchs1_ick, CK_243X),
- CLK(NULL, "mmchs1_fck", &mmchs1_fck, CK_243X),
- CLK("omap_hsmmc.1", "ick", &mmchs2_ick, CK_243X),
- CLK(NULL, "mmchs2_ick", &mmchs2_ick, CK_243X),
- CLK(NULL, "mmchs2_fck", &mmchs2_fck, CK_243X),
- CLK(NULL, "gpio5_ick", &gpio5_ick, CK_243X),
- CLK(NULL, "gpio5_fck", &gpio5_fck, CK_243X),
- CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
- CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
- CLK(NULL, "mmchsdb1_fck", &mmchsdb1_fck, CK_243X),
- CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
- CLK(NULL, "mmchsdb2_fck", &mmchsdb2_fck, CK_243X),
- CLK(NULL, "timer_32k_ck", &func_32k_ck, CK_243X),
- CLK(NULL, "timer_sys_ck", &sys_ck, CK_243X),
- CLK(NULL, "timer_ext_ck", &alt_ck, CK_243X),
- CLK(NULL, "cpufreq_ck", &virt_prcm_set, CK_243X),
-};
-
-/*
- * init code
- */
-
-int __init omap2430_clk_init(void)
-{
- struct omap_clk *c;
-
- prcm_clksrc_ctrl = OMAP2430_PRCM_CLKSRC_CTRL;
- cpu_mask = RATE_IN_243X;
- rate_table = omap2430_rate_table;
-
- for (c = omap2430_clks; c < omap2430_clks + ARRAY_SIZE(omap2430_clks);
- c++)
- clk_preinit(c->lk.clk);
-
- osc_ck.rate = omap2_osc_clk_recalc(&osc_ck);
- propagate_rate(&osc_ck);
- sys_ck.rate = omap2xxx_sys_clk_recalc(&sys_ck);
- propagate_rate(&sys_ck);
-
- for (c = omap2430_clks; c < omap2430_clks + ARRAY_SIZE(omap2430_clks);
- c++) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- omap2_init_clk_clkdm(c->lk.clk);
- }
-
- omap2xxx_clkt_vps_late_init();
-
- /* Disable autoidle on all clocks; let the PM code enable it later */
- omap_clk_disable_autoidle_all();
-
- /* XXX Can this be done from the virt_prcm_set clk init function? */
- omap2xxx_clkt_vps_check_bootloader_rates();
-
- recalculate_root_clocks();
-
- pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
- (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
- (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable_init_clocks();
-
- return 0;
-}
-
diff --git a/arch/arm/mach-omap2/clock2xxx.c b/arch/arm/mach-omap2/clock2xxx.c
index 5f7faeb4c19..1ff64690862 100644
--- a/arch/arm/mach-omap2/clock2xxx.c
+++ b/arch/arm/mach-omap2/clock2xxx.c
@@ -28,6 +28,7 @@
#include "cm.h"
#include "cm-regbits-24xx.h"
+struct clk_hw *dclk_hw;
/*
* Omap24xx specific clock functions
*/
diff --git a/arch/arm/mach-omap2/clock2xxx.h b/arch/arm/mach-omap2/clock2xxx.h
index ce809c913b6..539dc08afbb 100644
--- a/arch/arm/mach-omap2/clock2xxx.h
+++ b/arch/arm/mach-omap2/clock2xxx.h
@@ -8,18 +8,32 @@
#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK2XXX_H
#define __ARCH_ARM_MACH_OMAP2_CLOCK2XXX_H
-unsigned long omap2_table_mpu_recalc(struct clk *clk);
-int omap2_select_table_rate(struct clk *clk, unsigned long rate);
-long omap2_round_to_table_rate(struct clk *clk, unsigned long rate);
-unsigned long omap2xxx_sys_clk_recalc(struct clk *clk);
-unsigned long omap2_osc_clk_recalc(struct clk *clk);
-unsigned long omap2_dpllcore_recalc(struct clk *clk);
-int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate);
+#include <linux/clk-provider.h>
+#include "clock.h"
+
+unsigned long omap2_table_mpu_recalc(struct clk_hw *clk,
+ unsigned long parent_rate);
+int omap2_select_table_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+long omap2_round_to_table_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+unsigned long omap2xxx_sys_clk_recalc(struct clk_hw *clk,
+ unsigned long parent_rate);
+unsigned long omap2_osc_clk_recalc(struct clk_hw *clk,
+ unsigned long parent_rate);
+unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
+ unsigned long parent_rate);
+void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
+unsigned long omap2_clk_apll54_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+unsigned long omap2_clk_apll96_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
unsigned long omap2xxx_clk_get_core_rate(void);
u32 omap2xxx_get_apll_clkin(void);
u32 omap2xxx_get_sysclkdiv(void);
void omap2xxx_clk_prepare_for_reboot(void);
-void omap2xxx_clkt_dpllcore_init(struct clk *clk);
void omap2xxx_clkt_vps_check_bootloader_rates(void);
void omap2xxx_clkt_vps_late_init(void);
@@ -37,9 +51,12 @@ int omap2430_clk_init(void);
extern void __iomem *prcm_clksrc_ctrl;
-extern const struct clkops clkops_omap2430_i2chs_wait;
-extern const struct clkops clkops_oscck;
-extern const struct clkops clkops_apll96;
-extern const struct clkops clkops_apll54;
+extern struct clk_hw *dclk_hw;
+int omap2_enable_osc_ck(struct clk_hw *hw);
+void omap2_disable_osc_ck(struct clk_hw *hw);
+int omap2_clk_apll96_enable(struct clk_hw *hw);
+int omap2_clk_apll54_enable(struct clk_hw *hw);
+void omap2_clk_apll96_disable(struct clk_hw *hw);
+void omap2_clk_apll54_disable(struct clk_hw *hw);
#endif
diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c
deleted file mode 100644
index 17e3de51bcb..00000000000
--- a/arch/arm/mach-omap2/clock33xx_data.c
+++ /dev/null
@@ -1,1109 +0,0 @@
-/*
- * AM33XX Clock data
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- * Vaibhav Hiremath <hvaibhav@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "control.h"
-#include "clock.h"
-#include "cm.h"
-#include "cm33xx.h"
-#include "cm-regbits-33xx.h"
-#include "prm.h"
-
-/* Maximum DPLL multiplier, divider values for AM33XX */
-#define AM33XX_MAX_DPLL_MULT 2047
-#define AM33XX_MAX_DPLL_DIV 128
-
-/* Modulemode control */
-#define AM33XX_MODULEMODE_HWCTRL 0
-#define AM33XX_MODULEMODE_SWCTRL 1
-
-/* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
- * physically present, in such a case HWMOD enabling of
- * clock would be failure with default parent. And timer
- * probe thinks clock is already enabled, this leads to
- * crash upon accessing timer 3 & 6 registers in probe.
- * Fix by setting parent of both these timers to master
- * oscillator clock.
- */
-static inline void am33xx_init_timer_parent(struct clk *clk)
-{
- omap2_clksel_set_parent(clk, clk->parent);
-}
-
-/* Root clocks */
-
-/* RTC 32k */
-static struct clk clk_32768_ck = {
- .name = "clk_32768_ck",
- .clkdm_name = "l4_rtc_clkdm",
- .rate = 32768,
- .ops = &clkops_null,
-};
-
-/* On-Chip 32KHz RC OSC */
-static struct clk clk_rc32k_ck = {
- .name = "clk_rc32k_ck",
- .rate = 32000,
- .ops = &clkops_null,
-};
-
-/* Crystal input clks */
-static struct clk virt_24000000_ck = {
- .name = "virt_24000000_ck",
- .rate = 24000000,
- .ops = &clkops_null,
-};
-
-static struct clk virt_25000000_ck = {
- .name = "virt_25000000_ck",
- .rate = 25000000,
- .ops = &clkops_null,
-};
-
-/* Oscillator clock */
-/* 19.2, 24, 25 or 26 MHz */
-static const struct clksel sys_clkin_sel[] = {
- { .parent = &virt_19200000_ck, .rates = div_1_0_rates },
- { .parent = &virt_24000000_ck, .rates = div_1_1_rates },
- { .parent = &virt_25000000_ck, .rates = div_1_2_rates },
- { .parent = &virt_26000000_ck, .rates = div_1_3_rates },
- { .parent = NULL },
-};
-
-/* External clock - 12 MHz */
-static struct clk tclkin_ck = {
- .name = "tclkin_ck",
- .rate = 12000000,
- .ops = &clkops_null,
-};
-
-/*
- * sys_clk in: input to the dpll and also used as funtional clock for,
- * adc_tsc, smartreflex0-1, timer1-7, mcasp0-1, dcan0-1, cefuse
- *
- */
-static struct clk sys_clkin_ck = {
- .name = "sys_clkin_ck",
- .parent = &virt_24000000_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = AM33XX_CTRL_REGADDR(AM33XX_CONTROL_STATUS),
- .clksel_mask = AM33XX_CONTROL_STATUS_SYSBOOT1_MASK,
- .clksel = sys_clkin_sel,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* DPLL_CORE */
-static struct dpll_data dpll_core_dd = {
- .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_CORE,
- .clk_bypass = &sys_clkin_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = AM33XX_CM_CLKMODE_DPLL_CORE,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .idlest_reg = AM33XX_CM_IDLEST_DPLL_CORE,
- .mult_mask = AM33XX_DPLL_MULT_MASK,
- .div1_mask = AM33XX_DPLL_DIV_MASK,
- .enable_mask = AM33XX_DPLL_EN_MASK,
- .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
- .max_multiplier = AM33XX_MAX_DPLL_MULT,
- .max_divider = AM33XX_MAX_DPLL_DIV,
- .min_divider = 1,
-};
-
-/* CLKDCOLDO output */
-static struct clk dpll_core_ck = {
- .name = "dpll_core_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_core_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_core_dpll_ops,
- .recalc = &omap3_dpll_recalc,
-};
-
-static struct clk dpll_core_x2_ck = {
- .name = "dpll_core_x2_ck",
- .parent = &dpll_core_ck,
- .flags = CLOCK_CLKOUTX2,
- .ops = &clkops_null,
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-
-static const struct clksel dpll_core_m4_div[] = {
- { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_core_m4_ck = {
- .name = "dpll_core_m4_ck",
- .parent = &dpll_core_x2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = dpll_core_m4_div,
- .clksel_reg = AM33XX_CM_DIV_M4_DPLL_CORE,
- .clksel_mask = AM33XX_HSDIVIDER_CLKOUT1_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel dpll_core_m5_div[] = {
- { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_core_m5_ck = {
- .name = "dpll_core_m5_ck",
- .parent = &dpll_core_x2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = dpll_core_m5_div,
- .clksel_reg = AM33XX_CM_DIV_M5_DPLL_CORE,
- .clksel_mask = AM33XX_HSDIVIDER_CLKOUT2_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel dpll_core_m6_div[] = {
- { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_core_m6_ck = {
- .name = "dpll_core_m6_ck",
- .parent = &dpll_core_x2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = dpll_core_m6_div,
- .clksel_reg = AM33XX_CM_DIV_M6_DPLL_CORE,
- .clksel_mask = AM33XX_HSDIVIDER_CLKOUT3_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-/* DPLL_MPU */
-static struct dpll_data dpll_mpu_dd = {
- .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_MPU,
- .clk_bypass = &sys_clkin_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = AM33XX_CM_CLKMODE_DPLL_MPU,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .idlest_reg = AM33XX_CM_IDLEST_DPLL_MPU,
- .mult_mask = AM33XX_DPLL_MULT_MASK,
- .div1_mask = AM33XX_DPLL_DIV_MASK,
- .enable_mask = AM33XX_DPLL_EN_MASK,
- .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
- .max_multiplier = AM33XX_MAX_DPLL_MULT,
- .max_divider = AM33XX_MAX_DPLL_DIV,
- .min_divider = 1,
-};
-
-/* CLKOUT: fdpll/M2 */
-static struct clk dpll_mpu_ck = {
- .name = "dpll_mpu_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_mpu_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-/*
- * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
- * and ALT_CLK1/2)
- */
-static const struct clksel dpll_mpu_m2_div[] = {
- { .parent = &dpll_mpu_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_mpu_m2_ck = {
- .name = "dpll_mpu_m2_ck",
- .clkdm_name = "mpu_clkdm",
- .parent = &dpll_mpu_ck,
- .clksel = dpll_mpu_m2_div,
- .clksel_reg = AM33XX_CM_DIV_M2_DPLL_MPU,
- .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-/* DPLL_DDR */
-static struct dpll_data dpll_ddr_dd = {
- .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DDR,
- .clk_bypass = &sys_clkin_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = AM33XX_CM_CLKMODE_DPLL_DDR,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .idlest_reg = AM33XX_CM_IDLEST_DPLL_DDR,
- .mult_mask = AM33XX_DPLL_MULT_MASK,
- .div1_mask = AM33XX_DPLL_DIV_MASK,
- .enable_mask = AM33XX_DPLL_EN_MASK,
- .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
- .max_multiplier = AM33XX_MAX_DPLL_MULT,
- .max_divider = AM33XX_MAX_DPLL_DIV,
- .min_divider = 1,
-};
-
-/* CLKOUT: fdpll/M2 */
-static struct clk dpll_ddr_ck = {
- .name = "dpll_ddr_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_ddr_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_null,
- .recalc = &omap3_dpll_recalc,
-};
-
-/*
- * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
- * and ALT_CLK1/2)
- */
-static const struct clksel dpll_ddr_m2_div[] = {
- { .parent = &dpll_ddr_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_ddr_m2_ck = {
- .name = "dpll_ddr_m2_ck",
- .parent = &dpll_ddr_ck,
- .clksel = dpll_ddr_m2_div,
- .clksel_reg = AM33XX_CM_DIV_M2_DPLL_DDR,
- .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-/* emif_fck functional clock */
-static struct clk dpll_ddr_m2_div2_ck = {
- .name = "dpll_ddr_m2_div2_ck",
- .clkdm_name = "l3_clkdm",
- .parent = &dpll_ddr_m2_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-/* DPLL_DISP */
-static struct dpll_data dpll_disp_dd = {
- .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DISP,
- .clk_bypass = &sys_clkin_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = AM33XX_CM_CLKMODE_DPLL_DISP,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .idlest_reg = AM33XX_CM_IDLEST_DPLL_DISP,
- .mult_mask = AM33XX_DPLL_MULT_MASK,
- .div1_mask = AM33XX_DPLL_DIV_MASK,
- .enable_mask = AM33XX_DPLL_EN_MASK,
- .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
- .max_multiplier = AM33XX_MAX_DPLL_MULT,
- .max_divider = AM33XX_MAX_DPLL_DIV,
- .min_divider = 1,
-};
-
-/* CLKOUT: fdpll/M2 */
-static struct clk dpll_disp_ck = {
- .name = "dpll_disp_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_disp_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_null,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-/*
- * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
- * and ALT_CLK1/2)
- */
-static const struct clksel dpll_disp_m2_div[] = {
- { .parent = &dpll_disp_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_disp_m2_ck = {
- .name = "dpll_disp_m2_ck",
- .parent = &dpll_disp_ck,
- .clksel = dpll_disp_m2_div,
- .clksel_reg = AM33XX_CM_DIV_M2_DPLL_DISP,
- .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-/* DPLL_PER */
-static struct dpll_data dpll_per_dd = {
- .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_PERIPH,
- .clk_bypass = &sys_clkin_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = AM33XX_CM_CLKMODE_DPLL_PER,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .idlest_reg = AM33XX_CM_IDLEST_DPLL_PER,
- .mult_mask = AM33XX_DPLL_MULT_PERIPH_MASK,
- .div1_mask = AM33XX_DPLL_PER_DIV_MASK,
- .enable_mask = AM33XX_DPLL_EN_MASK,
- .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
- .max_multiplier = AM33XX_MAX_DPLL_MULT,
- .max_divider = AM33XX_MAX_DPLL_DIV,
- .min_divider = 1,
- .flags = DPLL_J_TYPE,
-};
-
-/* CLKDCOLDO */
-static struct clk dpll_per_ck = {
- .name = "dpll_per_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_per_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_null,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-/* CLKOUT: fdpll/M2 */
-static const struct clksel dpll_per_m2_div[] = {
- { .parent = &dpll_per_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_per_m2_ck = {
- .name = "dpll_per_m2_ck",
- .parent = &dpll_per_ck,
- .clksel = dpll_per_m2_div,
- .clksel_reg = AM33XX_CM_DIV_M2_DPLL_PER,
- .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_per_m2_div4_wkupdm_ck = {
- .name = "dpll_per_m2_div4_wkupdm_ck",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &dpll_per_m2_ck,
- .fixed_div = 4,
- .ops = &clkops_null,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk dpll_per_m2_div4_ck = {
- .name = "dpll_per_m2_div4_ck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &dpll_per_m2_ck,
- .fixed_div = 4,
- .ops = &clkops_null,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk l3_gclk = {
- .name = "l3_gclk",
- .clkdm_name = "l3_clkdm",
- .parent = &dpll_core_m4_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dpll_core_m4_div2_ck = {
- .name = "dpll_core_m4_div2_ck",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &dpll_core_m4_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk l4_rtc_gclk = {
- .name = "l4_rtc_gclk",
- .parent = &dpll_core_m4_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk clk_24mhz = {
- .name = "clk_24mhz",
- .parent = &dpll_per_m2_ck,
- .fixed_div = 8,
- .ops = &clkops_null,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-/*
- * Below clock nodes describes clockdomains derived out
- * of core clock.
- */
-static struct clk l4hs_gclk = {
- .name = "l4hs_gclk",
- .clkdm_name = "l4hs_clkdm",
- .parent = &dpll_core_m4_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk l3s_gclk = {
- .name = "l3s_gclk",
- .clkdm_name = "l3s_clkdm",
- .parent = &dpll_core_m4_div2_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk l4fw_gclk = {
- .name = "l4fw_gclk",
- .clkdm_name = "l4fw_clkdm",
- .parent = &dpll_core_m4_div2_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk l4ls_gclk = {
- .name = "l4ls_gclk",
- .clkdm_name = "l4ls_clkdm",
- .parent = &dpll_core_m4_div2_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sysclk_div_ck = {
- .name = "sysclk_div_ck",
- .parent = &dpll_core_m4_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-/*
- * In order to match the clock domain with hwmod clockdomain entry,
- * separate clock nodes is required for the modules which are
- * directly getting their funtioncal clock from sys_clkin.
- */
-static struct clk adc_tsc_fck = {
- .name = "adc_tsc_fck",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dcan0_fck = {
- .name = "dcan0_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dcan1_fck = {
- .name = "dcan1_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcasp0_fck = {
- .name = "mcasp0_fck",
- .clkdm_name = "l3s_clkdm",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcasp1_fck = {
- .name = "mcasp1_fck",
- .clkdm_name = "l3s_clkdm",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk smartreflex0_fck = {
- .name = "smartreflex0_fck",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk smartreflex1_fck = {
- .name = "smartreflex1_fck",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-/*
- * Modules clock nodes
- *
- * The following clock leaf nodes are added for the moment because:
- *
- * - hwmod data is not present for these modules, either hwmod
- * control is not required or its not populated.
- * - Driver code is not yet migrated to use hwmod/runtime pm
- * - Modules outside kernel access (to disable them by default)
- *
- * - debugss
- * - mmu (gfx domain)
- * - cefuse
- * - usbotg_fck (its additional clock and not really a modulemode)
- * - ieee5000
- */
-static struct clk debugss_ick = {
- .name = "debugss_ick",
- .clkdm_name = "l3_aon_clkdm",
- .parent = &dpll_core_m4_ck,
- .ops = &clkops_omap2_dflt,
- .enable_reg = AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
- .enable_bit = AM33XX_MODULEMODE_SWCTRL,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmu_fck = {
- .name = "mmu_fck",
- .clkdm_name = "gfx_l3_clkdm",
- .parent = &dpll_core_m4_ck,
- .ops = &clkops_omap2_dflt,
- .enable_reg = AM33XX_CM_GFX_MMUDATA_CLKCTRL,
- .enable_bit = AM33XX_MODULEMODE_SWCTRL,
- .recalc = &followparent_recalc,
-};
-
-static struct clk cefuse_fck = {
- .name = "cefuse_fck",
- .clkdm_name = "l4_cefuse_clkdm",
- .parent = &sys_clkin_ck,
- .enable_reg = AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL,
- .enable_bit = AM33XX_MODULEMODE_SWCTRL,
- .ops = &clkops_omap2_dflt,
- .recalc = &followparent_recalc,
-};
-
-/*
- * clkdiv32 is generated from fixed division of 732.4219
- */
-static struct clk clkdiv32k_ick = {
- .name = "clkdiv32k_ick",
- .clkdm_name = "clk_24mhz_clkdm",
- .rate = 32768,
- .parent = &clk_24mhz,
- .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
- .enable_bit = AM33XX_MODULEMODE_SWCTRL,
- .ops = &clkops_omap2_dflt,
-};
-
-static struct clk usbotg_fck = {
- .name = "usbotg_fck",
- .clkdm_name = "l3s_clkdm",
- .parent = &dpll_per_ck,
- .enable_reg = AM33XX_CM_CLKDCOLDO_DPLL_PER,
- .enable_bit = AM33XX_ST_DPLL_CLKDCOLDO_SHIFT,
- .ops = &clkops_omap2_dflt,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ieee5000_fck = {
- .name = "ieee5000_fck",
- .clkdm_name = "l3s_clkdm",
- .parent = &dpll_core_m4_div2_ck,
- .enable_reg = AM33XX_CM_PER_IEEE5000_CLKCTRL,
- .enable_bit = AM33XX_MODULEMODE_SWCTRL,
- .ops = &clkops_omap2_dflt,
- .recalc = &followparent_recalc,
-};
-
-/* Timers */
-static const struct clksel timer1_clkmux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
- { .parent = &tclkin_ck, .rates = div_1_2_rates },
- { .parent = &clk_rc32k_ck, .rates = div_1_3_rates },
- { .parent = &clk_32768_ck, .rates = div_1_4_rates },
- { .parent = NULL },
-};
-
-static struct clk timer1_fck = {
- .name = "timer1_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = timer1_clkmux_sel,
- .clksel_reg = AM33XX_CLKSEL_TIMER1MS_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_2_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel timer2_to_7_clk_sel[] = {
- { .parent = &tclkin_ck, .rates = div_1_0_rates },
- { .parent = &sys_clkin_ck, .rates = div_1_1_rates },
- { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-static struct clk timer2_fck = {
- .name = "timer2_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = timer2_to_7_clk_sel,
- .clksel_reg = AM33XX_CLKSEL_TIMER2_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk timer3_fck = {
- .name = "timer3_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .init = &am33xx_init_timer_parent,
- .clksel = timer2_to_7_clk_sel,
- .clksel_reg = AM33XX_CLKSEL_TIMER3_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk timer4_fck = {
- .name = "timer4_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = timer2_to_7_clk_sel,
- .clksel_reg = AM33XX_CLKSEL_TIMER4_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk timer5_fck = {
- .name = "timer5_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = timer2_to_7_clk_sel,
- .clksel_reg = AM33XX_CLKSEL_TIMER5_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk timer6_fck = {
- .name = "timer6_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .init = &am33xx_init_timer_parent,
- .clksel = timer2_to_7_clk_sel,
- .clksel_reg = AM33XX_CLKSEL_TIMER6_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk timer7_fck = {
- .name = "timer7_fck",
- .clkdm_name = "l4ls_clkdm",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = timer2_to_7_clk_sel,
- .clksel_reg = AM33XX_CLKSEL_TIMER7_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk cpsw_125mhz_gclk = {
- .name = "cpsw_125mhz_gclk",
- .clkdm_name = "cpsw_125mhz_clkdm",
- .parent = &dpll_core_m5_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel cpsw_cpts_rft_clkmux_sel[] = {
- { .parent = &dpll_core_m5_ck, .rates = div_1_0_rates },
- { .parent = &dpll_core_m4_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk cpsw_cpts_rft_clk = {
- .name = "cpsw_cpts_rft_clk",
- .clkdm_name = "cpsw_125mhz_clkdm",
- .parent = &dpll_core_m5_ck,
- .clksel = cpsw_cpts_rft_clkmux_sel,
- .clksel_reg = AM33XX_CM_CPTS_RFT_CLKSEL,
- .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-/* gpio */
-static const struct clksel gpio0_dbclk_mux_sel[] = {
- { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
- { .parent = &clk_32768_ck, .rates = div_1_1_rates },
- { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-static struct clk gpio0_dbclk_mux_ck = {
- .name = "gpio0_dbclk_mux_ck",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &clk_rc32k_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = gpio0_dbclk_mux_sel,
- .clksel_reg = AM33XX_CLKSEL_GPIO0_DBCLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpio0_dbclk = {
- .name = "gpio0_dbclk",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &gpio0_dbclk_mux_ck,
- .enable_reg = AM33XX_CM_WKUP_GPIO0_CLKCTRL,
- .enable_bit = AM33XX_OPTFCLKEN_GPIO0_GDBCLK_SHIFT,
- .ops = &clkops_omap2_dflt,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio1_dbclk = {
- .name = "gpio1_dbclk",
- .clkdm_name = "l4ls_clkdm",
- .parent = &clkdiv32k_ick,
- .enable_reg = AM33XX_CM_PER_GPIO1_CLKCTRL,
- .enable_bit = AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_SHIFT,
- .ops = &clkops_omap2_dflt,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio2_dbclk = {
- .name = "gpio2_dbclk",
- .clkdm_name = "l4ls_clkdm",
- .parent = &clkdiv32k_ick,
- .enable_reg = AM33XX_CM_PER_GPIO2_CLKCTRL,
- .enable_bit = AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_SHIFT,
- .ops = &clkops_omap2_dflt,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio3_dbclk = {
- .name = "gpio3_dbclk",
- .clkdm_name = "l4ls_clkdm",
- .parent = &clkdiv32k_ick,
- .enable_reg = AM33XX_CM_PER_GPIO3_CLKCTRL,
- .enable_bit = AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_SHIFT,
- .ops = &clkops_omap2_dflt,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel pruss_ocp_clk_mux_sel[] = {
- { .parent = &l3_gclk, .rates = div_1_0_rates },
- { .parent = &dpll_disp_m2_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk pruss_ocp_gclk = {
- .name = "pruss_ocp_gclk",
- .clkdm_name = "pruss_ocp_clkdm",
- .parent = &l3_gclk,
- .init = &omap2_init_clksel_parent,
- .clksel = pruss_ocp_clk_mux_sel,
- .clksel_reg = AM33XX_CLKSEL_PRUSS_OCP_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel lcd_clk_mux_sel[] = {
- { .parent = &dpll_disp_m2_ck, .rates = div_1_0_rates },
- { .parent = &dpll_core_m5_ck, .rates = div_1_1_rates },
- { .parent = &dpll_per_m2_ck, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-static struct clk lcd_gclk = {
- .name = "lcd_gclk",
- .clkdm_name = "lcdc_clkdm",
- .parent = &dpll_disp_m2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = lcd_clk_mux_sel,
- .clksel_reg = AM33XX_CLKSEL_LCDC_PIXEL_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmc_clk = {
- .name = "mmc_clk",
- .clkdm_name = "l4ls_clkdm",
- .parent = &dpll_per_m2_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk mmc2_fck = {
- .name = "mmc2_fck",
- .clkdm_name = "l3s_clkdm",
- .parent = &mmc_clk,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel gfx_clksel_sel[] = {
- { .parent = &dpll_core_m4_ck, .rates = div_1_0_rates },
- { .parent = &dpll_per_m2_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk gfx_fclk_clksel_ck = {
- .name = "gfx_fclk_clksel_ck",
- .parent = &dpll_core_m4_ck,
- .clksel = gfx_clksel_sel,
- .ops = &clkops_null,
- .clksel_reg = AM33XX_CLKSEL_GFX_FCLK,
- .clksel_mask = AM33XX_CLKSEL_GFX_FCLK_MASK,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate div_1_0_2_1_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_AM33XX },
- { .div = 2, .val = 1, .flags = RATE_IN_AM33XX },
- { .div = 0 },
-};
-
-static const struct clksel gfx_div_sel[] = {
- { .parent = &gfx_fclk_clksel_ck, .rates = div_1_0_2_1_rates },
- { .parent = NULL },
-};
-
-static struct clk gfx_fck_div_ck = {
- .name = "gfx_fck_div_ck",
- .clkdm_name = "gfx_l3_clkdm",
- .parent = &gfx_fclk_clksel_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = gfx_div_sel,
- .clksel_reg = AM33XX_CLKSEL_GFX_FCLK,
- .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
- .ops = &clkops_null,
-};
-
-static const struct clksel sysclkout_pre_sel[] = {
- { .parent = &clk_32768_ck, .rates = div_1_0_rates },
- { .parent = &l3_gclk, .rates = div_1_1_rates },
- { .parent = &dpll_ddr_m2_ck, .rates = div_1_2_rates },
- { .parent = &dpll_per_m2_ck, .rates = div_1_3_rates },
- { .parent = &lcd_gclk, .rates = div_1_4_rates },
- { .parent = NULL },
-};
-
-static struct clk sysclkout_pre_ck = {
- .name = "sysclkout_pre_ck",
- .parent = &clk_32768_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = sysclkout_pre_sel,
- .clksel_reg = AM33XX_CM_CLKOUT_CTRL,
- .clksel_mask = AM33XX_CLKOUT2SOURCE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* Divide by 8 clock rates with default clock is 1/1*/
-static const struct clksel_rate div8_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_AM33XX },
- { .div = 2, .val = 1, .flags = RATE_IN_AM33XX },
- { .div = 3, .val = 2, .flags = RATE_IN_AM33XX },
- { .div = 4, .val = 3, .flags = RATE_IN_AM33XX },
- { .div = 5, .val = 4, .flags = RATE_IN_AM33XX },
- { .div = 6, .val = 5, .flags = RATE_IN_AM33XX },
- { .div = 7, .val = 6, .flags = RATE_IN_AM33XX },
- { .div = 8, .val = 7, .flags = RATE_IN_AM33XX },
- { .div = 0 },
-};
-
-static const struct clksel clkout2_div[] = {
- { .parent = &sysclkout_pre_ck, .rates = div8_rates },
- { .parent = NULL },
-};
-
-static struct clk clkout2_ck = {
- .name = "clkout2_ck",
- .parent = &sysclkout_pre_ck,
- .ops = &clkops_omap2_dflt,
- .clksel = clkout2_div,
- .clksel_reg = AM33XX_CM_CLKOUT_CTRL,
- .clksel_mask = AM33XX_CLKOUT2DIV_MASK,
- .enable_reg = AM33XX_CM_CLKOUT_CTRL,
- .enable_bit = AM33XX_CLKOUT2EN_SHIFT,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel wdt_clkmux_sel[] = {
- { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
- { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk wdt1_fck = {
- .name = "wdt1_fck",
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &clk_rc32k_ck,
- .init = &omap2_init_clksel_parent,
- .clksel = wdt_clkmux_sel,
- .clksel_reg = AM33XX_CLKSEL_WDT1_CLK,
- .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * clkdev
- */
-static struct omap_clk am33xx_clks[] = {
- CLK(NULL, "clk_32768_ck", &clk_32768_ck, CK_AM33XX),
- CLK(NULL, "clk_rc32k_ck", &clk_rc32k_ck, CK_AM33XX),
- CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_AM33XX),
- CLK(NULL, "virt_24000000_ck", &virt_24000000_ck, CK_AM33XX),
- CLK(NULL, "virt_25000000_ck", &virt_25000000_ck, CK_AM33XX),
- CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_AM33XX),
- CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_AM33XX),
- CLK(NULL, "tclkin_ck", &tclkin_ck, CK_AM33XX),
- CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_AM33XX),
- CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_AM33XX),
- CLK(NULL, "dpll_core_m4_ck", &dpll_core_m4_ck, CK_AM33XX),
- CLK(NULL, "dpll_core_m5_ck", &dpll_core_m5_ck, CK_AM33XX),
- CLK(NULL, "dpll_core_m6_ck", &dpll_core_m6_ck, CK_AM33XX),
- CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_AM33XX),
- CLK("cpu0", NULL, &dpll_mpu_ck, CK_AM33XX),
- CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_AM33XX),
- CLK(NULL, "dpll_ddr_ck", &dpll_ddr_ck, CK_AM33XX),
- CLK(NULL, "dpll_ddr_m2_ck", &dpll_ddr_m2_ck, CK_AM33XX),
- CLK(NULL, "dpll_ddr_m2_div2_ck", &dpll_ddr_m2_div2_ck, CK_AM33XX),
- CLK(NULL, "dpll_disp_ck", &dpll_disp_ck, CK_AM33XX),
- CLK(NULL, "dpll_disp_m2_ck", &dpll_disp_m2_ck, CK_AM33XX),
- CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_AM33XX),
- CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_AM33XX),
- CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", &dpll_per_m2_div4_wkupdm_ck, CK_AM33XX),
- CLK(NULL, "dpll_per_m2_div4_ck", &dpll_per_m2_div4_ck, CK_AM33XX),
- CLK(NULL, "adc_tsc_fck", &adc_tsc_fck, CK_AM33XX),
- CLK(NULL, "cefuse_fck", &cefuse_fck, CK_AM33XX),
- CLK(NULL, "clkdiv32k_ick", &clkdiv32k_ick, CK_AM33XX),
- CLK(NULL, "dcan0_fck", &dcan0_fck, CK_AM33XX),
- CLK("481cc000.d_can", NULL, &dcan0_fck, CK_AM33XX),
- CLK(NULL, "dcan1_fck", &dcan1_fck, CK_AM33XX),
- CLK("481d0000.d_can", NULL, &dcan1_fck, CK_AM33XX),
- CLK(NULL, "debugss_ick", &debugss_ick, CK_AM33XX),
- CLK(NULL, "pruss_ocp_gclk", &pruss_ocp_gclk, CK_AM33XX),
- CLK("davinci-mcasp.0", NULL, &mcasp0_fck, CK_AM33XX),
- CLK("davinci-mcasp.1", NULL, &mcasp1_fck, CK_AM33XX),
- CLK(NULL, "mcasp0_fck", &mcasp0_fck, CK_AM33XX),
- CLK(NULL, "mcasp1_fck", &mcasp1_fck, CK_AM33XX),
- CLK("NULL", "mmc2_fck", &mmc2_fck, CK_AM33XX),
- CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
- CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
- CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
- CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
- CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
- CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
- CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
- CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
- CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
- CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
- CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
- CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
- CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
- CLK(NULL, "l4_rtc_gclk", &l4_rtc_gclk, CK_AM33XX),
- CLK(NULL, "l3_gclk", &l3_gclk, CK_AM33XX),
- CLK(NULL, "dpll_core_m4_div2_ck", &dpll_core_m4_div2_ck, CK_AM33XX),
- CLK(NULL, "l4hs_gclk", &l4hs_gclk, CK_AM33XX),
- CLK(NULL, "l3s_gclk", &l3s_gclk, CK_AM33XX),
- CLK(NULL, "l4fw_gclk", &l4fw_gclk, CK_AM33XX),
- CLK(NULL, "l4ls_gclk", &l4ls_gclk, CK_AM33XX),
- CLK(NULL, "clk_24mhz", &clk_24mhz, CK_AM33XX),
- CLK(NULL, "sysclk_div_ck", &sysclk_div_ck, CK_AM33XX),
- CLK(NULL, "cpsw_125mhz_gclk", &cpsw_125mhz_gclk, CK_AM33XX),
- CLK(NULL, "cpsw_cpts_rft_clk", &cpsw_cpts_rft_clk, CK_AM33XX),
- CLK(NULL, "gpio0_dbclk_mux_ck", &gpio0_dbclk_mux_ck, CK_AM33XX),
- CLK(NULL, "gpio0_dbclk", &gpio0_dbclk, CK_AM33XX),
- CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_AM33XX),
- CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_AM33XX),
- CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_AM33XX),
- CLK(NULL, "lcd_gclk", &lcd_gclk, CK_AM33XX),
- CLK(NULL, "mmc_clk", &mmc_clk, CK_AM33XX),
- CLK(NULL, "gfx_fclk_clksel_ck", &gfx_fclk_clksel_ck, CK_AM33XX),
- CLK(NULL, "gfx_fck_div_ck", &gfx_fck_div_ck, CK_AM33XX),
- CLK(NULL, "sysclkout_pre_ck", &sysclkout_pre_ck, CK_AM33XX),
- CLK(NULL, "clkout2_ck", &clkout2_ck, CK_AM33XX),
- CLK(NULL, "timer_32k_ck", &clkdiv32k_ick, CK_AM33XX),
- CLK(NULL, "timer_sys_ck", &sys_clkin_ck, CK_AM33XX),
-};
-
-int __init am33xx_clk_init(void)
-{
- struct omap_clk *c;
- u32 cpu_clkflg;
-
- if (soc_is_am33xx()) {
- cpu_mask = RATE_IN_AM33XX;
- cpu_clkflg = CK_AM33XX;
- }
-
- for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++)
- clk_preinit(c->lk.clk);
-
- for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++) {
- if (c->cpu & cpu_clkflg) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- omap2_init_clk_clkdm(c->lk.clk);
- }
- }
-
- recalculate_root_clocks();
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable_init_clocks();
-
- return 0;
-}
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index e41819ba748..4596468e50a 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -37,7 +37,7 @@
* from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
-static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
+static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
@@ -49,21 +49,16 @@ static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
-
-const struct clkops clkops_omap3430es2_ssi_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
+const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait = {
.find_idlest = omap3430es2_clk_ssi_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
+ .find_companion = omap2_clk_dflt_find_companion,
};
-const struct clkops clkops_omap3430es2_iclk_ssi_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_idlest = omap3430es2_clk_ssi_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_ssi_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
};
/**
@@ -80,7 +75,7 @@ const struct clkops clkops_omap3430es2_iclk_ssi_wait = {
* default find_idlest code assumes that they are at the same
* position.) No return value.
*/
-static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
+static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
@@ -94,20 +89,16 @@ static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
-const struct clkops clkops_omap3430es2_dss_usbhost_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
+const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
.find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
+ .find_companion = omap2_clk_dflt_find_companion,
};
-const struct clkops clkops_omap3430es2_iclk_dss_usbhost_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
};
/**
@@ -121,7 +112,7 @@ const struct clkops clkops_omap3430es2_iclk_dss_usbhost_wait = {
* shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
* @idlest_reg and @idlest_bit. No return value.
*/
-static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
+static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
@@ -134,18 +125,14 @@ static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
-const struct clkops clkops_omap3430es2_hsotgusb_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
+ .find_companion = omap2_clk_dflt_find_companion,
};
-const struct clkops clkops_omap3430es2_iclk_hsotgusb_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
+const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait = {
.find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
- .allow_idle = omap2_clkt_iclk_allow_idle,
- .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_companion = omap2_clk_dflt_find_companion,
};
diff --git a/arch/arm/mach-omap2/clock3517.c b/arch/arm/mach-omap2/clock3517.c
index 622ea050261..4d79ae2c024 100644
--- a/arch/arm/mach-omap2/clock3517.c
+++ b/arch/arm/mach-omap2/clock3517.c
@@ -47,7 +47,7 @@
* in the enable register itsel at a bit offset of 4 from the enable
* bit. A value of 1 indicates that clock is enabled.
*/
-static void am35xx_clk_find_idlest(struct clk *clk,
+static void am35xx_clk_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
@@ -71,8 +71,9 @@ static void am35xx_clk_find_idlest(struct clk *clk,
* associate this type of code with per-module data structures to
* avoid this issue, and remove the casts. No return value.
*/
-static void am35xx_clk_find_companion(struct clk *clk, void __iomem **other_reg,
- u8 *other_bit)
+static void am35xx_clk_find_companion(struct clk_hw_omap *clk,
+ void __iomem **other_reg,
+ u8 *other_bit)
{
*other_reg = (__force void __iomem *)(clk->enable_reg);
if (clk->enable_bit & AM35XX_IPSS_ICK_MASK)
@@ -80,10 +81,7 @@ static void am35xx_clk_find_companion(struct clk *clk, void __iomem **other_reg,
else
*other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET;
}
-
-const struct clkops clkops_am35xx_ipss_module_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = {
.find_idlest = am35xx_clk_find_idlest,
.find_companion = am35xx_clk_find_companion,
};
@@ -99,7 +97,7 @@ const struct clkops clkops_am35xx_ipss_module_wait = {
* CM_{I,F}CLKEN bit. Pass back the correct info via @idlest_reg
* and @idlest_bit. No return value.
*/
-static void am35xx_clk_ipss_find_idlest(struct clk *clk,
+static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk,
void __iomem **idlest_reg,
u8 *idlest_bit,
u8 *idlest_val)
@@ -112,13 +110,9 @@ static void am35xx_clk_ipss_find_idlest(struct clk *clk,
*idlest_val = OMAP34XX_CM_IDLEST_VAL;
}
-const struct clkops clkops_am35xx_ipss_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_idlest = am35xx_clk_ipss_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = am35xx_clk_ipss_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
};
-
-
diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c
index 0e1e9e4e2fa..8f3bf4e5090 100644
--- a/arch/arm/mach-omap2/clock36xx.c
+++ b/arch/arm/mach-omap2/clock36xx.c
@@ -37,34 +37,32 @@
* (Any other value different from the Read value) to the
* corresponding CM_CLKSEL register will refresh the dividers.
*/
-static int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk *clk)
+int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
{
+ struct clk_hw_omap *parent;
+ struct clk_hw *parent_hw;
u32 dummy_v, orig_v, clksel_shift;
int ret;
/* Clear PWRDN bit of HSDIVIDER */
ret = omap2_dflt_clk_enable(clk);
+ parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
+ parent = to_clk_hw_omap(parent_hw);
+
/* Restore the dividers */
if (!ret) {
- clksel_shift = __ffs(clk->parent->clksel_mask);
- orig_v = __raw_readl(clk->parent->clksel_reg);
+ clksel_shift = __ffs(parent->clksel_mask);
+ orig_v = __raw_readl(parent->clksel_reg);
dummy_v = orig_v;
/* Write any other value different from the Read value */
dummy_v ^= (1 << clksel_shift);
- __raw_writel(dummy_v, clk->parent->clksel_reg);
+ __raw_writel(dummy_v, parent->clksel_reg);
/* Write the original divider */
- __raw_writel(orig_v, clk->parent->clksel_reg);
+ __raw_writel(orig_v, parent->clksel_reg);
}
return ret;
}
-
-const struct clkops clkops_omap36xx_pwrdn_with_hsdiv_wait_restore = {
- .enable = omap36xx_pwrdn_clk_enable_with_hsdiv_restore,
- .disable = omap2_dflt_clk_disable,
- .find_companion = omap2_clk_dflt_find_companion,
- .find_idlest = omap2_clk_dflt_find_idlest,
-};
diff --git a/arch/arm/mach-omap2/clock36xx.h b/arch/arm/mach-omap2/clock36xx.h
index a7dee5bc636..945bb7f083e 100644
--- a/arch/arm/mach-omap2/clock36xx.h
+++ b/arch/arm/mach-omap2/clock36xx.h
@@ -8,6 +8,6 @@
#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H
#define __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H
-extern const struct clkops clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
+extern int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *hw);
#endif
diff --git a/arch/arm/mach-omap2/clock3xxx.c b/arch/arm/mach-omap2/clock3xxx.c
index 3e8aca2b1b6..4eacab8f117 100644
--- a/arch/arm/mach-omap2/clock3xxx.c
+++ b/arch/arm/mach-omap2/clock3xxx.c
@@ -38,8 +38,8 @@
/* needed by omap3_core_dpll_m2_set_rate() */
struct clk *sdrc_ick_p, *arm_fck_p;
-
-int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
+int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
/*
* According to the 12-5 CDP code from TI, "Limitation 2.5"
@@ -51,7 +51,7 @@ int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
return -EINVAL;
}
- return omap3_noncore_dpll_set_rate(clk, rate);
+ return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
}
void __init omap3_clk_lock_dpll5(void)
diff --git a/arch/arm/mach-omap2/clock3xxx.h b/arch/arm/mach-omap2/clock3xxx.h
index 8bbeeaf399e..8cd4b0a882a 100644
--- a/arch/arm/mach-omap2/clock3xxx.h
+++ b/arch/arm/mach-omap2/clock3xxx.h
@@ -9,8 +9,10 @@
#define __ARCH_ARM_MACH_OMAP2_CLOCK3XXX_H
int omap3xxx_clk_init(void);
-int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate);
-int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate);
+int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
+ unsigned long parent_rate);
+int omap3_core_dpll_m2_set_rate(struct clk_hw *clk, unsigned long rate,
+ unsigned long parent_rate);
void omap3_clk_lock_dpll5(void);
extern struct clk *sdrc_ick_p;
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
deleted file mode 100644
index 6cca1995395..00000000000
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ /dev/null
@@ -1,3613 +0,0 @@
-/*
- * OMAP3 clock data
- *
- * Copyright (C) 2007-2010, 2012 Texas Instruments, Inc.
- * Copyright (C) 2007-2011 Nokia Corporation
- *
- * Written by Paul Walmsley
- * With many device clock fixes by Kevin Hilman and Jouni Högander
- * DPLL bypass clock support added by Roman Tereshonkov
- *
- */
-
-/*
- * Virtual clocks are introduced as convenient tools.
- * They are sources for other clocks and not supposed
- * to be requested from drivers directly.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/list.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock3xxx.h"
-#include "clock34xx.h"
-#include "clock36xx.h"
-#include "clock3517.h"
-#include "cm3xxx.h"
-#include "cm-regbits-34xx.h"
-#include "prm2xxx_3xxx.h"
-#include "prm-regbits-34xx.h"
-#include "control.h"
-
-/*
- * clocks
- */
-
-#define OMAP_CM_REGADDR OMAP34XX_CM_REGADDR
-
-/* Maximum DPLL multiplier, divider values for OMAP3 */
-#define OMAP3_MAX_DPLL_MULT 2047
-#define OMAP3630_MAX_JTYPE_DPLL_MULT 4095
-#define OMAP3_MAX_DPLL_DIV 128
-
-/*
- * DPLL1 supplies clock to the MPU.
- * DPLL2 supplies clock to the IVA2.
- * DPLL3 supplies CORE domain clocks.
- * DPLL4 supplies peripheral clocks.
- * DPLL5 supplies other peripheral clocks (USBHOST, USIM).
- */
-
-/* Forward declarations for DPLL bypass clocks */
-static struct clk dpll1_fck;
-static struct clk dpll2_fck;
-
-/* PRM CLOCKS */
-
-/* According to timer32k.c, this is a 32768Hz clock, not a 32000Hz clock. */
-static struct clk omap_32k_fck = {
- .name = "omap_32k_fck",
- .ops = &clkops_null,
- .rate = 32768,
-};
-
-static struct clk secure_32k_fck = {
- .name = "secure_32k_fck",
- .ops = &clkops_null,
- .rate = 32768,
-};
-
-/* Virtual source clocks for osc_sys_ck */
-static struct clk virt_12m_ck = {
- .name = "virt_12m_ck",
- .ops = &clkops_null,
- .rate = 12000000,
-};
-
-static struct clk virt_13m_ck = {
- .name = "virt_13m_ck",
- .ops = &clkops_null,
- .rate = 13000000,
-};
-
-static struct clk virt_16_8m_ck = {
- .name = "virt_16_8m_ck",
- .ops = &clkops_null,
- .rate = 16800000,
-};
-
-static struct clk virt_38_4m_ck = {
- .name = "virt_38_4m_ck",
- .ops = &clkops_null,
- .rate = 38400000,
-};
-
-static const struct clksel_rate osc_sys_12m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_13m_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_16_8m_rates[] = {
- { .div = 1, .val = 5, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_19_2m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_26m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_38_4m_rates[] = {
- { .div = 1, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel osc_sys_clksel[] = {
- { .parent = &virt_12m_ck, .rates = osc_sys_12m_rates },
- { .parent = &virt_13m_ck, .rates = osc_sys_13m_rates },
- { .parent = &virt_16_8m_ck, .rates = osc_sys_16_8m_rates },
- { .parent = &virt_19200000_ck, .rates = osc_sys_19_2m_rates },
- { .parent = &virt_26000000_ck, .rates = osc_sys_26m_rates },
- { .parent = &virt_38_4m_ck, .rates = osc_sys_38_4m_rates },
- { .parent = NULL },
-};
-
-/* Oscillator clock */
-/* 12, 13, 16.8, 19.2, 26, or 38.4 MHz */
-static struct clk osc_sys_ck = {
- .name = "osc_sys_ck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP3430_PRM_CLKSEL,
- .clksel_mask = OMAP3430_SYS_CLKIN_SEL_MASK,
- .clksel = osc_sys_clksel,
- /* REVISIT: deal with autoextclkmode? */
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate div2_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel sys_clksel[] = {
- { .parent = &osc_sys_ck, .rates = div2_rates },
- { .parent = NULL }
-};
-
-/* Latency: this clock is only enabled after PRM_CLKSETUP.SETUP_TIME */
-/* Feeds DPLLs - divided first by PRM_CLKSRC_CTRL.SYSCLKDIV? */
-static struct clk sys_ck = {
- .name = "sys_ck",
- .ops = &clkops_null,
- .parent = &osc_sys_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP3430_PRM_CLKSRC_CTRL,
- .clksel_mask = OMAP_SYSCLKDIV_MASK,
- .clksel = sys_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk sys_altclk = {
- .name = "sys_altclk",
- .ops = &clkops_null,
-};
-
-/* Optional external clock input for some McBSPs */
-static struct clk mcbsp_clks = {
- .name = "mcbsp_clks",
- .ops = &clkops_null,
-};
-
-/* PRM EXTERNAL CLOCK OUTPUT */
-
-static struct clk sys_clkout1 = {
- .name = "sys_clkout1",
- .ops = &clkops_omap2_dflt,
- .parent = &osc_sys_ck,
- .enable_reg = OMAP3430_PRM_CLKOUT_CTRL,
- .enable_bit = OMAP3430_CLKOUT_EN_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* DPLLS */
-
-/* CM CLOCKS */
-
-static const struct clksel_rate div16_dpll_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 5, .val = 5, .flags = RATE_IN_3XXX },
- { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
- { .div = 7, .val = 7, .flags = RATE_IN_3XXX },
- { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
- { .div = 9, .val = 9, .flags = RATE_IN_3XXX },
- { .div = 10, .val = 10, .flags = RATE_IN_3XXX },
- { .div = 11, .val = 11, .flags = RATE_IN_3XXX },
- { .div = 12, .val = 12, .flags = RATE_IN_3XXX },
- { .div = 13, .val = 13, .flags = RATE_IN_3XXX },
- { .div = 14, .val = 14, .flags = RATE_IN_3XXX },
- { .div = 15, .val = 15, .flags = RATE_IN_3XXX },
- { .div = 16, .val = 16, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate dpll4_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 5, .val = 5, .flags = RATE_IN_3XXX },
- { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
- { .div = 7, .val = 7, .flags = RATE_IN_3XXX },
- { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
- { .div = 9, .val = 9, .flags = RATE_IN_3XXX },
- { .div = 10, .val = 10, .flags = RATE_IN_3XXX },
- { .div = 11, .val = 11, .flags = RATE_IN_3XXX },
- { .div = 12, .val = 12, .flags = RATE_IN_3XXX },
- { .div = 13, .val = 13, .flags = RATE_IN_3XXX },
- { .div = 14, .val = 14, .flags = RATE_IN_3XXX },
- { .div = 15, .val = 15, .flags = RATE_IN_3XXX },
- { .div = 16, .val = 16, .flags = RATE_IN_3XXX },
- { .div = 17, .val = 17, .flags = RATE_IN_36XX },
- { .div = 18, .val = 18, .flags = RATE_IN_36XX },
- { .div = 19, .val = 19, .flags = RATE_IN_36XX },
- { .div = 20, .val = 20, .flags = RATE_IN_36XX },
- { .div = 21, .val = 21, .flags = RATE_IN_36XX },
- { .div = 22, .val = 22, .flags = RATE_IN_36XX },
- { .div = 23, .val = 23, .flags = RATE_IN_36XX },
- { .div = 24, .val = 24, .flags = RATE_IN_36XX },
- { .div = 25, .val = 25, .flags = RATE_IN_36XX },
- { .div = 26, .val = 26, .flags = RATE_IN_36XX },
- { .div = 27, .val = 27, .flags = RATE_IN_36XX },
- { .div = 28, .val = 28, .flags = RATE_IN_36XX },
- { .div = 29, .val = 29, .flags = RATE_IN_36XX },
- { .div = 30, .val = 30, .flags = RATE_IN_36XX },
- { .div = 31, .val = 31, .flags = RATE_IN_36XX },
- { .div = 32, .val = 32, .flags = RATE_IN_36XX },
- { .div = 0 }
-};
-
-/* DPLL1 */
-/* MPU clock source */
-/* Type: DPLL */
-static struct dpll_data dpll1_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .mult_mask = OMAP3430_MPU_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_MPU_DPLL_DIV_MASK,
- .clk_bypass = &dpll1_fck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_MPU_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
- .enable_mask = OMAP3430_EN_MPU_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_MPU_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
- .autoidle_mask = OMAP3430_AUTO_MPU_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
- .idlest_mask = OMAP3430_ST_MPU_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll1_ck = {
- .name = "dpll1_ck",
- .ops = &clkops_omap3_noncore_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll1_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
- .clkdm_name = "dpll1_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-/*
- * This virtual clock provides the CLKOUTX2 output from the DPLL if the
- * DPLL isn't bypassed.
- */
-static struct clk dpll1_x2_ck = {
- .name = "dpll1_x2_ck",
- .ops = &clkops_null,
- .parent = &dpll1_ck,
- .clkdm_name = "dpll1_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* On DPLL1, unlike other DPLLs, the divider is downstream from CLKOUTX2 */
-static const struct clksel div16_dpll1_x2m2_clksel[] = {
- { .parent = &dpll1_x2_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-/*
- * Does not exist in the TRM - needed to separate the M2 divider from
- * bypass selection in mpu_ck
- */
-static struct clk dpll1_x2m2_ck = {
- .name = "dpll1_x2m2_ck",
- .ops = &clkops_null,
- .parent = &dpll1_x2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
- .clksel_mask = OMAP3430_MPU_DPLL_CLKOUT_DIV_MASK,
- .clksel = div16_dpll1_x2m2_clksel,
- .clkdm_name = "dpll1_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* DPLL2 */
-/* IVA2 clock source */
-/* Type: DPLL */
-
-static struct dpll_data dpll2_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .mult_mask = OMAP3430_IVA2_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_IVA2_DPLL_DIV_MASK,
- .clk_bypass = &dpll2_fck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
- .enable_mask = OMAP3430_EN_IVA2_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
- (1 << DPLL_LOW_POWER_BYPASS),
- .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
- .autoidle_mask = OMAP3430_AUTO_IVA2_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
- .idlest_mask = OMAP3430_ST_IVA2_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll2_ck = {
- .name = "dpll2_ck",
- .ops = &clkops_omap3_noncore_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll2_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
- .clkdm_name = "dpll2_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-static const struct clksel div16_dpll2_m2x2_clksel[] = {
- { .parent = &dpll2_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-/*
- * The TRM is conflicted on whether IVA2 clock comes from DPLL2 CLKOUT
- * or CLKOUTX2. CLKOUT seems most plausible.
- */
-static struct clk dpll2_m2_ck = {
- .name = "dpll2_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD,
- OMAP3430_CM_CLKSEL2_PLL),
- .clksel_mask = OMAP3430_IVA2_DPLL_CLKOUT_DIV_MASK,
- .clksel = div16_dpll2_m2x2_clksel,
- .clkdm_name = "dpll2_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * DPLL3
- * Source clock for all interfaces and for some device fclks
- * REVISIT: Also supports fast relock bypass - not included below
- */
-static struct dpll_data dpll3_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .mult_mask = OMAP3430_CORE_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_CORE_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_CORE_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_mask = OMAP3430_EN_CORE_DPLL_MASK,
- .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_CORE_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
- .autoidle_mask = OMAP3430_AUTO_CORE_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
- .idlest_mask = OMAP3430_ST_CORE_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll3_ck = {
- .name = "dpll3_ck",
- .ops = &clkops_omap3_core_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll3_dd,
- .round_rate = &omap2_dpll_round_rate,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-/*
- * This virtual clock provides the CLKOUTX2 output from the DPLL if the
- * DPLL isn't bypassed
- */
-static struct clk dpll3_x2_ck = {
- .name = "dpll3_x2_ck",
- .ops = &clkops_null,
- .parent = &dpll3_ck,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel_rate div31_dpll3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 3, .val = 3, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 4, .val = 4, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 5, .val = 5, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 6, .val = 6, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 7, .val = 7, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 8, .val = 8, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 9, .val = 9, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 10, .val = 10, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 11, .val = 11, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 12, .val = 12, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 13, .val = 13, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 14, .val = 14, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 15, .val = 15, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 16, .val = 16, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 17, .val = 17, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 18, .val = 18, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 19, .val = 19, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 20, .val = 20, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 21, .val = 21, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 22, .val = 22, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 23, .val = 23, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 24, .val = 24, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 25, .val = 25, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 26, .val = 26, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 27, .val = 27, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 28, .val = 28, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 29, .val = 29, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 30, .val = 30, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 31, .val = 31, .flags = RATE_IN_3430ES2PLUS_36XX },
- { .div = 0 },
-};
-
-static const struct clksel div31_dpll3m2_clksel[] = {
- { .parent = &dpll3_ck, .rates = div31_dpll3_rates },
- { .parent = NULL }
-};
-
-/* DPLL3 output M2 - primary control point for CORE speed */
-static struct clk dpll3_m2_ck = {
- .name = "dpll3_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll3_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CORE_DPLL_CLKOUT_DIV_MASK,
- .clksel = div31_dpll3m2_clksel,
- .clkdm_name = "dpll3_clkdm",
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap3_core_dpll_m2_set_rate,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk core_ck = {
- .name = "core_ck",
- .ops = &clkops_null,
- .parent = &dpll3_m2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dpll3_m2x2_ck = {
- .name = "dpll3_m2x2_ck",
- .ops = &clkops_null,
- .parent = &dpll3_m2_ck,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static const struct clksel div16_dpll3_clksel[] = {
- { .parent = &dpll3_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-/* This virtual clock is the source for dpll3_m3x2_ck */
-static struct clk dpll3_m3_ck = {
- .name = "dpll3_m3_ck",
- .ops = &clkops_null,
- .parent = &dpll3_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_DIV_DPLL3_MASK,
- .clksel = div16_dpll3_clksel,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll3_m3x2_ck = {
- .name = "dpll3_m3x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll3_m3_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_EMU_CORE_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static struct clk emu_core_alwon_ck = {
- .name = "emu_core_alwon_ck",
- .ops = &clkops_null,
- .parent = &dpll3_m3x2_ck,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* DPLL4 */
-/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
-/* Type: DPLL */
-static struct dpll_data dpll4_dd;
-
-static struct dpll_data dpll4_dd_34xx __initdata = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
- .mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
- .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
- .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
- .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct dpll_data dpll4_dd_3630 __initdata = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
- .mult_mask = OMAP3630_PERIPH_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
- .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
- .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
- .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
- .dco_mask = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
- .sddiv_mask = OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
- .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
- .flags = DPLL_J_TYPE
-};
-
-static struct clk dpll4_ck = {
- .name = "dpll4_ck",
- .ops = &clkops_omap3_noncore_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll4_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_dpll4_set_rate,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-/*
- * This virtual clock provides the CLKOUTX2 output from the DPLL if the
- * DPLL isn't bypassed --
- * XXX does this serve any downstream clocks?
- */
-static struct clk dpll4_x2_ck = {
- .name = "dpll4_x2_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel dpll4_clksel[] = {
- { .parent = &dpll4_ck, .rates = dpll4_rates },
- { .parent = NULL }
-};
-
-/* This virtual clock is the source for dpll4_m2x2_ck */
-static struct clk dpll4_m2_ck = {
- .name = "dpll4_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
- .clksel_mask = OMAP3630_DIV_96M_MASK,
- .clksel = dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m2x2_ck = {
- .name = "dpll4_m2x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m2_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_96M_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/*
- * DPLL4 generates DPLL4_M2X2_CLK which is then routed into the PRM as
- * PRM_96M_ALWON_(F)CLK. Two clocks then emerge from the PRM:
- * 96M_ALWON_FCLK (called "omap_96m_alwon_fck" below) and
- * CM_96K_(F)CLK.
- */
-
-/* Adding 192MHz Clock node needed by SGX */
-static struct clk omap_192m_alwon_fck = {
- .name = "omap_192m_alwon_fck",
- .ops = &clkops_null,
- .parent = &dpll4_m2x2_ck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_36XX },
- { .div = 2, .val = 2, .flags = RATE_IN_36XX },
- { .div = 0 }
-};
-
-static const struct clksel omap_96m_alwon_fck_clksel[] = {
- { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates },
- { .parent = NULL }
-};
-
-static const struct clksel_rate omap_96m_dpll_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate omap_96m_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static struct clk omap_96m_alwon_fck = {
- .name = "omap_96m_alwon_fck",
- .ops = &clkops_null,
- .parent = &dpll4_m2x2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk omap_96m_alwon_fck_3630 = {
- .name = "omap_96m_alwon_fck",
- .parent = &omap_192m_alwon_fck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3630_CLKSEL_96M_MASK,
- .clksel = omap_96m_alwon_fck_clksel
-};
-
-static struct clk cm_96m_fck = {
- .name = "cm_96m_fck",
- .ops = &clkops_null,
- .parent = &omap_96m_alwon_fck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel omap_96m_fck_clksel[] = {
- { .parent = &cm_96m_fck, .rates = omap_96m_dpll_rates },
- { .parent = &sys_ck, .rates = omap_96m_sys_rates },
- { .parent = NULL }
-};
-
-static struct clk omap_96m_fck = {
- .name = "omap_96m_fck",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_SOURCE_96M_MASK,
- .clksel = omap_96m_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* This virtual clock is the source for dpll4_m3x2_ck */
-static struct clk dpll4_m3_ck = {
- .name = "dpll4_m3_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3630_CLKSEL_TV_MASK,
- .clksel = dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m3x2_ck = {
- .name = "dpll4_m3x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m3_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_TV_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel_rate omap_54m_d4m3x2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate omap_54m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel omap_54m_clksel[] = {
- { .parent = &dpll4_m3x2_ck, .rates = omap_54m_d4m3x2_rates },
- { .parent = &sys_altclk, .rates = omap_54m_alt_rates },
- { .parent = NULL }
-};
-
-static struct clk omap_54m_fck = {
- .name = "omap_54m_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_SOURCE_54M_MASK,
- .clksel = omap_54m_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate omap_48m_cm96m_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate omap_48m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel omap_48m_clksel[] = {
- { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
- { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
- { .parent = NULL }
-};
-
-static struct clk omap_48m_fck = {
- .name = "omap_48m_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_SOURCE_48M_MASK,
- .clksel = omap_48m_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk omap_12m_fck = {
- .name = "omap_12m_fck",
- .ops = &clkops_null,
- .parent = &omap_48m_fck,
- .fixed_div = 4,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-/* This virtual clock is the source for dpll4_m4x2_ck */
-static struct clk dpll4_m4_ck = {
- .name = "dpll4_m4_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3630_CLKSEL_DSS1_MASK,
- .clksel = dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
- .set_rate = &omap2_clksel_set_rate,
- .round_rate = &omap2_clksel_round_rate,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m4x2_ck = {
- .name = "dpll4_m4x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m4_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_DSS1_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* This virtual clock is the source for dpll4_m5x2_ck */
-static struct clk dpll4_m5_ck = {
- .name = "dpll4_m5_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3630_CLKSEL_CAM_MASK,
- .clksel = dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .set_rate = &omap2_clksel_set_rate,
- .round_rate = &omap2_clksel_round_rate,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m5x2_ck = {
- .name = "dpll4_m5x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m5_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* This virtual clock is the source for dpll4_m6x2_ck */
-static struct clk dpll4_m6_ck = {
- .name = "dpll4_m6_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3630_DIV_DPLL4_MASK,
- .clksel = dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m6x2_ck = {
- .name = "dpll4_m6x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m6_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static struct clk emu_per_alwon_ck = {
- .name = "emu_per_alwon_ck",
- .ops = &clkops_null,
- .parent = &dpll4_m6x2_ck,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* DPLL5 */
-/* Supplies 120MHz clock, USIM source clock */
-/* Type: DPLL */
-/* 3430ES2 only */
-static struct dpll_data dpll5_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
- .mult_mask = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
- .div1_mask = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
- .enable_mask = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
- .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
- .autoidle_mask = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
- .idlest_mask = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll5_ck = {
- .name = "dpll5_ck",
- .ops = &clkops_omap3_noncore_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll5_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
- .clkdm_name = "dpll5_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-static const struct clksel div16_dpll5_clksel[] = {
- { .parent = &dpll5_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-static struct clk dpll5_m2_ck = {
- .name = "dpll5_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll5_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
- .clksel_mask = OMAP3430ES2_DIV_120M_MASK,
- .clksel = div16_dpll5_clksel,
- .clkdm_name = "dpll5_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* CM EXTERNAL CLOCK OUTPUTS */
-
-static const struct clksel_rate clkout2_src_core_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_96m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_54m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel clkout2_src_clksel[] = {
- { .parent = &core_ck, .rates = clkout2_src_core_rates },
- { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
- { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
- { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
- { .parent = NULL }
-};
-
-static struct clk clkout2_src_ck = {
- .name = "clkout2_src_ck",
- .ops = &clkops_omap2_dflt,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP3430_CM_CLKOUT_CTRL,
- .enable_bit = OMAP3430_CLKOUT2_EN_SHIFT,
- .clksel_reg = OMAP3430_CM_CLKOUT_CTRL,
- .clksel_mask = OMAP3430_CLKOUT2SOURCE_MASK,
- .clksel = clkout2_src_clksel,
- .clkdm_name = "core_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate sys_clkout2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 8, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 16, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel sys_clkout2_clksel[] = {
- { .parent = &clkout2_src_ck, .rates = sys_clkout2_rates },
- { .parent = NULL },
-};
-
-static struct clk sys_clkout2 = {
- .name = "sys_clkout2",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP3430_CM_CLKOUT_CTRL,
- .clksel_mask = OMAP3430_CLKOUT2_DIV_MASK,
- .clksel = sys_clkout2_clksel,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate
-};
-
-/* CM OUTPUT CLOCKS */
-
-static struct clk corex2_fck = {
- .name = "corex2_fck",
- .ops = &clkops_null,
- .parent = &dpll3_m2x2_ck,
- .recalc = &followparent_recalc,
-};
-
-/* DPLL power domain clock controls */
-
-static const struct clksel_rate div4_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel div4_core_clksel[] = {
- { .parent = &core_ck, .rates = div4_rates },
- { .parent = NULL }
-};
-
-/*
- * REVISIT: Are these in DPLL power domain or CM power domain? docs
- * may be inconsistent here?
- */
-static struct clk dpll1_fck = {
- .name = "dpll1_fck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .clksel_mask = OMAP3430_MPU_CLK_SRC_MASK,
- .clksel = div4_core_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mpu_ck = {
- .name = "mpu_ck",
- .ops = &clkops_null,
- .parent = &dpll1_x2m2_ck,
- .clkdm_name = "mpu_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* arm_fck is divided by two when DPLL1 locked; otherwise, passthrough mpu_ck */
-static const struct clksel_rate arm_fck_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel arm_fck_clksel[] = {
- { .parent = &mpu_ck, .rates = arm_fck_rates },
- { .parent = NULL }
-};
-
-static struct clk arm_fck = {
- .name = "arm_fck",
- .ops = &clkops_null,
- .parent = &mpu_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
- .clksel_mask = OMAP3430_ST_MPU_CLK_MASK,
- .clksel = arm_fck_clksel,
- .clkdm_name = "mpu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* XXX What about neon_clkdm ? */
-
-/*
- * REVISIT: This clock is never specifically defined in the 3430 TRM,
- * although it is referenced - so this is a guess
- */
-static struct clk emu_mpu_alwon_ck = {
- .name = "emu_mpu_alwon_ck",
- .ops = &clkops_null,
- .parent = &mpu_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dpll2_fck = {
- .name = "dpll2_fck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .clksel_mask = OMAP3430_IVA2_CLK_SRC_MASK,
- .clksel = div4_core_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk iva2_ck = {
- .name = "iva2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll2_m2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
- .clkdm_name = "iva2_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* Common interface clocks */
-
-static const struct clksel div2_core_clksel[] = {
- { .parent = &core_ck, .rates = div2_rates },
- { .parent = NULL }
-};
-
-static struct clk l3_ick = {
- .name = "l3_ick",
- .ops = &clkops_null,
- .parent = &core_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_L3_MASK,
- .clksel = div2_core_clksel,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel div2_l3_clksel[] = {
- { .parent = &l3_ick, .rates = div2_rates },
- { .parent = NULL }
-};
-
-static struct clk l4_ick = {
- .name = "l4_ick",
- .ops = &clkops_null,
- .parent = &l3_ick,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_L4_MASK,
- .clksel = div2_l3_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-
-};
-
-static const struct clksel div2_l4_clksel[] = {
- { .parent = &l4_ick, .rates = div2_rates },
- { .parent = NULL }
-};
-
-static struct clk rm_ick = {
- .name = "rm_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_RM_MASK,
- .clksel = div2_l4_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* GFX power domain */
-
-/* GFX clocks are in 3430ES1 only. 3430ES2 and later uses the SGX instead */
-
-static const struct clksel gfx_l3_clksel[] = {
- { .parent = &l3_ick, .rates = gfx_l3_rates },
- { .parent = NULL }
-};
-
-/*
- * Virtual parent clock for gfx_l3_ick and gfx_l3_fck
- * This interface clock does not have a CM_AUTOIDLE bit
- */
-static struct clk gfx_l3_ck = {
- .name = "gfx_l3_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &l3_ick,
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
- .enable_bit = OMAP_EN_GFX_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gfx_l3_fck = {
- .name = "gfx_l3_fck",
- .ops = &clkops_null,
- .parent = &gfx_l3_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP_CLKSEL_GFX_MASK,
- .clksel = gfx_l3_clksel,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gfx_l3_ick = {
- .name = "gfx_l3_ick",
- .ops = &clkops_null,
- .parent = &gfx_l3_ck,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gfx_cg1_ck = {
- .name = "gfx_cg1_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &gfx_l3_fck, /* REVISIT: correct? */
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES1_EN_2D_SHIFT,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gfx_cg2_ck = {
- .name = "gfx_cg2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &gfx_l3_fck, /* REVISIT: correct? */
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES1_EN_3D_SHIFT,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* SGX power domain - 3430ES2 only */
-
-static const struct clksel_rate sgx_core_rates[] = {
- { .div = 2, .val = 5, .flags = RATE_IN_36XX },
- { .div = 3, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 6, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel_rate sgx_192m_rates[] = {
- { .div = 1, .val = 4, .flags = RATE_IN_36XX },
- { .div = 0 },
-};
-
-static const struct clksel_rate sgx_corex2_rates[] = {
- { .div = 3, .val = 6, .flags = RATE_IN_36XX },
- { .div = 5, .val = 7, .flags = RATE_IN_36XX },
- { .div = 0 },
-};
-
-static const struct clksel_rate sgx_96m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel sgx_clksel[] = {
- { .parent = &core_ck, .rates = sgx_core_rates },
- { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
- { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates },
- { .parent = &corex2_fck, .rates = sgx_corex2_rates },
- { .parent = NULL }
-};
-
-static struct clk sgx_fck = {
- .name = "sgx_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430ES2_CLKSEL_SGX_MASK,
- .clksel = sgx_clksel,
- .clkdm_name = "sgx_clkdm",
- .recalc = &omap2_clksel_recalc,
- .set_rate = &omap2_clksel_set_rate,
- .round_rate = &omap2_clksel_round_rate
-};
-
-/* This interface clock does not have a CM_AUTOIDLE bit */
-static struct clk sgx_ick = {
- .name = "sgx_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &l3_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
- .clkdm_name = "sgx_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* CORE power domain */
-
-static struct clk d2d_26m_fck = {
- .name = "d2d_26m_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430ES1_EN_D2D_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk modem_fck = {
- .name = "modem_fck",
- .ops = &clkops_omap2_mdmclk_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MODEM_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk sad2d_ick = {
- .name = "sad2d_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SAD2D_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mad2d_ick = {
- .name = "mad2d_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP3430_EN_MAD2D_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel omap343x_gpt_clksel[] = {
- { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
- { .parent = &sys_ck, .rates = gpt_sys_rates },
- { .parent = NULL}
-};
-
-static struct clk gpt10_fck = {
- .name = "gpt10_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_GPT10_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT10_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt11_fck = {
- .name = "gpt11_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_GPT11_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT11_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk cpefuse_fck = {
- .name = "cpefuse_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &sys_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
- .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ts_fck = {
- .name = "ts_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &omap_32k_fck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
- .enable_bit = OMAP3430ES2_EN_TS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbtll_fck = {
- .name = "usbtll_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll5_m2_ck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
- .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* CORE 96M FCLK-derived clocks */
-
-static struct clk core_96m_fck = {
- .name = "core_96m_fck",
- .ops = &clkops_null,
- .parent = &omap_96m_fck,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs3_fck = {
- .name = "mmchs3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs2_fck = {
- .name = "mmchs2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MMC2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_fck = {
- .name = "mspro_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs1_fck = {
- .name = "mmchs1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MMC1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c3_fck = {
- .name = "i2c3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_I2C3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c2_fck = {
- .name = "i2c2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_I2C2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c1_fck = {
- .name = "i2c1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_I2C1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/*
- * MCBSP 1 & 5 get their 96MHz clock from core_96m_fck;
- * MCBSP 2, 3, 4 get their 96MHz clock from per_96m_fck.
- */
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel mcbsp_15_clksel[] = {
- { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
- { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
- { .parent = NULL }
-};
-
-static struct clk mcbsp5_fck = {
- .name = "mcbsp5_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP5_CLKS_MASK,
- .clksel = mcbsp_15_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp1_fck = {
- .name = "mcbsp1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
- .clksel = mcbsp_15_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* CORE_48M_FCK-derived clocks */
-
-static struct clk core_48m_fck = {
- .name = "core_48m_fck",
- .ops = &clkops_null,
- .parent = &omap_48m_fck,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi4_fck = {
- .name = "mcspi4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
- .recalc = &followparent_recalc,
- .clkdm_name = "core_l4_clkdm",
-};
-
-static struct clk mcspi3_fck = {
- .name = "mcspi3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
- .recalc = &followparent_recalc,
- .clkdm_name = "core_l4_clkdm",
-};
-
-static struct clk mcspi2_fck = {
- .name = "mcspi2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
- .recalc = &followparent_recalc,
- .clkdm_name = "core_l4_clkdm",
-};
-
-static struct clk mcspi1_fck = {
- .name = "mcspi1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
- .recalc = &followparent_recalc,
- .clkdm_name = "core_l4_clkdm",
-};
-
-static struct clk uart2_fck = {
- .name = "uart2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_UART2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_fck = {
- .name = "uart1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_UART1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk fshostusb_fck = {
- .name = "fshostusb_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* CORE_12M_FCK based clocks */
-
-static struct clk core_12m_fck = {
- .name = "core_12m_fck",
- .ops = &clkops_null,
- .parent = &omap_12m_fck,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_fck = {
- .name = "hdq_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_12m_fck,
- .clkdm_name = "core_l4_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_HDQ_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* DPLL3-derived clock */
-
-static const struct clksel_rate ssi_ssr_corex2_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
- { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
- { .div = 0 }
-};
-
-static const struct clksel ssi_ssr_clksel[] = {
- { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
- { .parent = NULL }
-};
-
-static struct clk ssi_ssr_fck_3430es1 = {
- .name = "ssi_ssr_fck",
- .ops = &clkops_omap2_dflt,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
- .clksel = ssi_ssr_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk ssi_ssr_fck_3430es2 = {
- .name = "ssi_ssr_fck",
- .ops = &clkops_omap3430es2_ssi_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
- .clksel = ssi_ssr_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk ssi_sst_fck_3430es1 = {
- .name = "ssi_sst_fck",
- .ops = &clkops_null,
- .parent = &ssi_ssr_fck_3430es1,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk ssi_sst_fck_3430es2 = {
- .name = "ssi_sst_fck",
- .ops = &clkops_null,
- .parent = &ssi_ssr_fck_3430es2,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-
-
-/* CORE_L3_ICK based clocks */
-
-/*
- * XXX must add clk_enable/clk_disable for these if standard code won't
- * handle it
- */
-static struct clk core_l3_ick = {
- .name = "core_l3_ick",
- .ops = &clkops_null,
- .parent = &l3_ick,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hsotgusb_ick_3430es1 = {
- .name = "hsotgusb_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &core_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hsotgusb_ick_3430es2 = {
- .name = "hsotgusb_ick",
- .ops = &clkops_omap3430es2_iclk_hsotgusb_wait,
- .parent = &core_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* This interface clock does not have a CM_AUTOIDLE bit */
-static struct clk sdrc_ick = {
- .name = "sdrc_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SDRC_SHIFT,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpmc_fck = {
- .name = "gpmc_fck",
- .ops = &clkops_null,
- .parent = &core_l3_ick,
- .flags = ENABLE_ON_INIT, /* huh? */
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* SECURITY_L3_ICK based clocks */
-
-static struct clk security_l3_ick = {
- .name = "security_l3_ick",
- .ops = &clkops_null,
- .parent = &l3_ick,
- .recalc = &followparent_recalc,
-};
-
-static struct clk pka_ick = {
- .name = "pka_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &security_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_PKA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* CORE_L4_ICK based clocks */
-
-static struct clk core_l4_ick = {
- .name = "core_l4_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbtll_ick = {
- .name = "usbtll_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs3_ick = {
- .name = "mmchs3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* Intersystem Communication Registers - chassis mode only */
-static struct clk icr_ick = {
- .name = "icr_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_ICR_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk aes2_ick = {
- .name = "aes2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_AES2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk sha12_ick = {
- .name = "sha12_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SHA12_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk des2_ick = {
- .name = "des2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_DES2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs2_ick = {
- .name = "mmchs2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MMC2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs1_ick = {
- .name = "mmchs1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MMC1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_ick = {
- .name = "mspro_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_ick = {
- .name = "hdq_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_HDQ_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi4_ick = {
- .name = "mcspi4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi3_ick = {
- .name = "mcspi3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_ick = {
- .name = "mcspi2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi1_ick = {
- .name = "mcspi1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c3_ick = {
- .name = "i2c3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_I2C3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c2_ick = {
- .name = "i2c2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_I2C2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c1_ick = {
- .name = "i2c1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_I2C1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_ick = {
- .name = "uart2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_UART2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_ick = {
- .name = "uart1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_UART1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt11_ick = {
- .name = "gpt11_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_GPT11_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt10_ick = {
- .name = "gpt10_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_GPT10_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp5_ick = {
- .name = "mcbsp5_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp1_ick = {
- .name = "mcbsp1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk fac_ick = {
- .name = "fac_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430ES1_EN_FAC_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mailboxes_ick = {
- .name = "mailboxes_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MAILBOXES_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk omapctrl_ick = {
- .name = "omapctrl_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* SSI_L4_ICK based clocks */
-
-static struct clk ssi_l4_ick = {
- .name = "ssi_l4_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk ssi_ick_3430es1 = {
- .name = "ssi_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &ssi_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk ssi_ick_3430es2 = {
- .name = "ssi_ick",
- .ops = &clkops_omap3430es2_iclk_ssi_wait,
- .parent = &ssi_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* REVISIT: Technically the TRM claims that this is CORE_CLK based,
- * but l4_ick makes more sense to me */
-
-static const struct clksel usb_l4_clksel[] = {
- { .parent = &l4_ick, .rates = div2_rates },
- { .parent = NULL },
-};
-
-static struct clk usb_l4_ick = {
- .name = "usb_l4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &l4_ick,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
- .clksel = usb_l4_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* SECURITY_L4_ICK2 based clocks */
-
-static struct clk security_l4_ick2 = {
- .name = "security_l4_ick2",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .recalc = &followparent_recalc,
-};
-
-static struct clk aes1_ick = {
- .name = "aes1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_AES1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rng_ick = {
- .name = "rng_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_RNG_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sha11_ick = {
- .name = "sha11_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_SHA11_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk des1_ick = {
- .name = "des1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_DES1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* DSS */
-static struct clk dss1_alwon_fck_3430es1 = {
- .name = "dss1_alwon_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &dpll4_m4x2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_DSS1_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss1_alwon_fck_3430es2 = {
- .name = "dss1_alwon_fck",
- .ops = &clkops_omap3430es2_dss_usbhost_wait,
- .parent = &dpll4_m4x2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_DSS1_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_tv_fck = {
- .name = "dss_tv_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &omap_54m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_TV_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_96m_fck = {
- .name = "dss_96m_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &omap_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_TV_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss2_alwon_fck = {
- .name = "dss2_alwon_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_DSS2_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_ick_3430es1 = {
- /* Handles both L3 and L4 clocks */
- .name = "dss_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_ick_3430es2 = {
- /* Handles both L3 and L4 clocks */
- .name = "dss_ick",
- .ops = &clkops_omap3430es2_iclk_dss_usbhost_wait,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* CAM */
-
-static struct clk cam_mclk = {
- .name = "cam_mclk",
- .ops = &clkops_omap2_dflt,
- .parent = &dpll4_m5x2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_CAM_SHIFT,
- .clkdm_name = "cam_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk cam_ick = {
- /* Handles both L3 and L4 clocks */
- .name = "cam_ick",
- .ops = &clkops_omap2_iclk_dflt,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_CAM_SHIFT,
- .clkdm_name = "cam_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk csi2_96m_fck = {
- .name = "csi2_96m_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_CSI2_SHIFT,
- .clkdm_name = "cam_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* USBHOST - 3430ES2 only */
-
-static struct clk usbhost_120m_fck = {
- .name = "usbhost_120m_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &dpll5_m2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_EN_USBHOST2_SHIFT,
- .clkdm_name = "usbhost_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbhost_48m_fck = {
- .name = "usbhost_48m_fck",
- .ops = &clkops_omap3430es2_dss_usbhost_wait,
- .parent = &omap_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
- .clkdm_name = "usbhost_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbhost_ick = {
- /* Handles both L3 and L4 clocks */
- .name = "usbhost_ick",
- .ops = &clkops_omap3430es2_iclk_dss_usbhost_wait,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430ES2_EN_USBHOST_SHIFT,
- .clkdm_name = "usbhost_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* WKUP */
-
-static const struct clksel_rate usim_96m_rates[] = {
- { .div = 2, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 8, .val = 5, .flags = RATE_IN_3XXX },
- { .div = 10, .val = 6, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel_rate usim_120m_rates[] = {
- { .div = 4, .val = 7, .flags = RATE_IN_3XXX },
- { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
- { .div = 16, .val = 9, .flags = RATE_IN_3XXX },
- { .div = 20, .val = 10, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel usim_clksel[] = {
- { .parent = &omap_96m_fck, .rates = usim_96m_rates },
- { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
- { .parent = &sys_ck, .rates = div2_rates },
- { .parent = NULL },
-};
-
-/* 3430ES2 only */
-static struct clk usim_fck = {
- .name = "usim_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430ES2_CLKSEL_USIMOCP_MASK,
- .clksel = usim_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* XXX should gpt1's clksel have wkup_32k_fck as the 32k opt? */
-static struct clk gpt1_fck = {
- .name = "gpt1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT1_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT1_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk wkup_32k_fck = {
- .name = "wkup_32k_fck",
- .ops = &clkops_null,
- .parent = &omap_32k_fck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio1_dbck = {
- .name = "gpio1_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &wkup_32k_fck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt2_fck = {
- .name = "wdt2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_32k_fck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_WDT2_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wkup_l4_ick = {
- .name = "wkup_l4_ick",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* 3430ES2 only */
-/* Never specifically named in the TRM, so we have to infer a likely name */
-static struct clk usim_ick = {
- .name = "usim_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt2_ick = {
- .name = "wdt2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_WDT2_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt1_ick = {
- .name = "wdt1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_WDT1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio1_ick = {
- .name = "gpio1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk omap_32ksync_ick = {
- .name = "omap_32ksync_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_32KSYNC_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* XXX This clock no longer exists in 3430 TRM rev F */
-static struct clk gpt12_ick = {
- .name = "gpt12_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT12_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt1_ick = {
- .name = "gpt1_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-
-
-/* PER clock domain */
-
-static struct clk per_96m_fck = {
- .name = "per_96m_fck",
- .ops = &clkops_null,
- .parent = &omap_96m_alwon_fck,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk per_48m_fck = {
- .name = "per_48m_fck",
- .ops = &clkops_null,
- .parent = &omap_48m_fck,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_fck = {
- .name = "uart3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_UART3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart4_fck = {
- .name = "uart4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3630_EN_UART4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart4_fck_am35xx = {
- .name = "uart4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = AM35XX_EN_UART4_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt2_fck = {
- .name = "gpt2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT2_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT2_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt3_fck = {
- .name = "gpt3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT3_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT3_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt4_fck = {
- .name = "gpt4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT4_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT4_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt5_fck = {
- .name = "gpt5_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT5_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT5_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt6_fck = {
- .name = "gpt6_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT6_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT6_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt7_fck = {
- .name = "gpt7_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT7_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT7_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt8_fck = {
- .name = "gpt8_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT8_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT8_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt9_fck = {
- .name = "gpt9_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT9_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT9_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk per_32k_alwon_fck = {
- .name = "per_32k_alwon_fck",
- .ops = &clkops_null,
- .parent = &omap_32k_fck,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio6_dbck = {
- .name = "gpio6_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_dbck = {
- .name = "gpio5_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio4_dbck = {
- .name = "gpio4_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio3_dbck = {
- .name = "gpio3_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio2_dbck = {
- .name = "gpio2_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt3_fck = {
- .name = "wdt3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_WDT3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk per_l4_ick = {
- .name = "per_l4_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio6_ick = {
- .name = "gpio6_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_ick = {
- .name = "gpio5_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio4_ick = {
- .name = "gpio4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio3_ick = {
- .name = "gpio3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio2_ick = {
- .name = "gpio2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt3_ick = {
- .name = "wdt3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_WDT3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_ick = {
- .name = "uart3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_UART3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart4_ick = {
- .name = "uart4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3630_EN_UART4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt9_ick = {
- .name = "gpt9_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT9_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt8_ick = {
- .name = "gpt8_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT8_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt7_ick = {
- .name = "gpt7_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT7_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt6_ick = {
- .name = "gpt6_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT6_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt5_ick = {
- .name = "gpt5_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT5_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt4_ick = {
- .name = "gpt4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt3_ick = {
- .name = "gpt3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt2_ick = {
- .name = "gpt2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp2_ick = {
- .name = "mcbsp2_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp3_ick = {
- .name = "mcbsp3_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp4_ick = {
- .name = "mcbsp4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel mcbsp_234_clksel[] = {
- { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
- { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
- { .parent = NULL }
-};
-
-static struct clk mcbsp2_fck = {
- .name = "mcbsp2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
- .clksel = mcbsp_234_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp3_fck = {
- .name = "mcbsp3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP3_CLKS_MASK,
- .clksel = mcbsp_234_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp4_fck = {
- .name = "mcbsp4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP4_CLKS_MASK,
- .clksel = mcbsp_234_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* EMU clocks */
-
-/* More information: ARM Cortex-A8 Technical Reference Manual, sect 10.1 */
-
-static const struct clksel_rate emu_src_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_per_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_mpu_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel emu_src_clksel[] = {
- { .parent = &sys_ck, .rates = emu_src_sys_rates },
- { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
- { .parent = &emu_per_alwon_ck, .rates = emu_src_per_rates },
- { .parent = &emu_mpu_alwon_ck, .rates = emu_src_mpu_rates },
- { .parent = NULL },
-};
-
-/*
- * Like the clkout_src clocks, emu_src_clk is a virtual clock, existing only
- * to switch the source of some of the EMU clocks.
- * XXX Are there CLKEN bits for these EMU clks?
- */
-static struct clk emu_src_ck = {
- .name = "emu_src_ck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_MUX_CTRL_MASK,
- .clksel = emu_src_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate pclk_emu_rates[] = {
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel pclk_emu_clksel[] = {
- { .parent = &emu_src_ck, .rates = pclk_emu_rates },
- { .parent = NULL },
-};
-
-static struct clk pclk_fck = {
- .name = "pclk_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_PCLK_MASK,
- .clksel = pclk_emu_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate pclkx2_emu_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel pclkx2_emu_clksel[] = {
- { .parent = &emu_src_ck, .rates = pclkx2_emu_rates },
- { .parent = NULL },
-};
-
-static struct clk pclkx2_fck = {
- .name = "pclkx2_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_PCLKX2_MASK,
- .clksel = pclkx2_emu_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel atclk_emu_clksel[] = {
- { .parent = &emu_src_ck, .rates = div2_rates },
- { .parent = NULL },
-};
-
-static struct clk atclk_fck = {
- .name = "atclk_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_ATCLK_MASK,
- .clksel = atclk_emu_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk traceclk_src_fck = {
- .name = "traceclk_src_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_TRACE_MUX_CTRL_MASK,
- .clksel = emu_src_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate traceclk_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
- { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
- { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
- { .div = 0 },
-};
-
-static const struct clksel traceclk_clksel[] = {
- { .parent = &traceclk_src_fck, .rates = traceclk_rates },
- { .parent = NULL },
-};
-
-static struct clk traceclk_fck = {
- .name = "traceclk_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_TRACECLK_MASK,
- .clksel = traceclk_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* SR clocks */
-
-/* SmartReflex fclk (VDD1) */
-static struct clk sr1_fck = {
- .name = "sr1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_SR1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* SmartReflex fclk (VDD2) */
-static struct clk sr2_fck = {
- .name = "sr2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_SR2_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk sr_l4_ick = {
- .name = "sr_l4_ick",
- .ops = &clkops_null, /* RMK: missing? */
- .parent = &l4_ick,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* SECURE_32K_FCK clocks */
-
-static struct clk gpt12_fck = {
- .name = "gpt12_fck",
- .ops = &clkops_null,
- .parent = &secure_32k_fck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt1_fck = {
- .name = "wdt1_fck",
- .ops = &clkops_null,
- .parent = &secure_32k_fck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* Clocks for AM35XX */
-static struct clk ipss_ick = {
- .name = "ipss_ick",
- .ops = &clkops_am35xx_ipss_wait,
- .parent = &core_l3_ick,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = AM35XX_EN_IPSS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk emac_ick = {
- .name = "emac_ick",
- .ops = &clkops_am35xx_ipss_module_wait,
- .parent = &ipss_ick,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
- .enable_bit = AM35XX_CPGMAC_VBUSP_CLK_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rmii_ck = {
- .name = "rmii_ck",
- .ops = &clkops_null,
- .rate = 50000000,
-};
-
-static struct clk emac_fck = {
- .name = "emac_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &rmii_ck,
- .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
- .enable_bit = AM35XX_CPGMAC_FCLK_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk hsotgusb_ick_am35xx = {
- .name = "hsotgusb_ick",
- .ops = &clkops_am35xx_ipss_module_wait,
- .parent = &ipss_ick,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
- .enable_bit = AM35XX_USBOTG_VBUSP_CLK_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk hsotgusb_fck_am35xx = {
- .name = "hsotgusb_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &sys_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
- .enable_bit = AM35XX_USBOTG_FCLK_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk hecc_ck = {
- .name = "hecc_ck",
- .ops = &clkops_am35xx_ipss_module_wait,
- .parent = &sys_ck,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
- .enable_bit = AM35XX_HECC_VBUSP_CLK_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk vpfe_ick = {
- .name = "vpfe_ick",
- .ops = &clkops_am35xx_ipss_module_wait,
- .parent = &ipss_ick,
- .clkdm_name = "core_l3_clkdm",
- .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
- .enable_bit = AM35XX_VPFE_VBUSP_CLK_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk pclk_ck = {
- .name = "pclk_ck",
- .ops = &clkops_null,
- .rate = 27000000,
-};
-
-static struct clk vpfe_fck = {
- .name = "vpfe_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &pclk_ck,
- .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
- .enable_bit = AM35XX_VPFE_FCLK_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/*
- * The UART1/2 functional clock acts as the functional clock for
- * UART4. No separate fclk control available. XXX Well now we have a
- * uart4_fck that is apparently used as the UART4 functional clock,
- * but it also seems that uart1_fck or uart2_fck are still needed, at
- * least for UART4 softresets to complete. This really needs
- * clarification.
- */
-static struct clk uart4_ick_am35xx = {
- .name = "uart4_ick",
- .ops = &clkops_omap2_iclk_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = AM35XX_EN_UART4_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dummy_apb_pclk = {
- .name = "apb_pclk",
- .ops = &clkops_null,
-};
-
-/*
- * clkdev
- */
-
-static struct omap_clk omap3xxx_clks[] = {
- CLK(NULL, "apb_pclk", &dummy_apb_pclk, CK_3XXX),
- CLK(NULL, "omap_32k_fck", &omap_32k_fck, CK_3XXX),
- CLK(NULL, "virt_12m_ck", &virt_12m_ck, CK_3XXX),
- CLK(NULL, "virt_13m_ck", &virt_13m_ck, CK_3XXX),
- CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_3XXX),
- CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_3XXX),
- CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck, CK_3XXX),
- CLK(NULL, "osc_sys_ck", &osc_sys_ck, CK_3XXX),
- CLK("twl", "fck", &osc_sys_ck, CK_3XXX),
- CLK(NULL, "sys_ck", &sys_ck, CK_3XXX),
- CLK(NULL, "sys_altclk", &sys_altclk, CK_3XXX),
- CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_3XXX),
- CLK(NULL, "sys_clkout1", &sys_clkout1, CK_3XXX),
- CLK(NULL, "dpll1_ck", &dpll1_ck, CK_3XXX),
- CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck, CK_3XXX),
- CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck, CK_3XXX),
- CLK(NULL, "dpll2_ck", &dpll2_ck, CK_34XX | CK_36XX),
- CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck, CK_34XX | CK_36XX),
- CLK(NULL, "dpll3_ck", &dpll3_ck, CK_3XXX),
- CLK(NULL, "core_ck", &core_ck, CK_3XXX),
- CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck, CK_3XXX),
- CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck, CK_3XXX),
- CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck, CK_3XXX),
- CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck, CK_3XXX),
- CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck, CK_3XXX),
- CLK(NULL, "emu_core_alwon_ck", &emu_core_alwon_ck, CK_3XXX),
- CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck, CK_3XXX),
- CLK(NULL, "dpll4_ck", &dpll4_ck, CK_3XXX),
- CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck, CK_3XXX),
- CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck, CK_36XX),
- CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck, CK_3XXX),
- CLK(NULL, "omap_96m_alwon_fck_3630", &omap_96m_alwon_fck_3630, CK_36XX),
- CLK(NULL, "omap_96m_fck", &omap_96m_fck, CK_3XXX),
- CLK(NULL, "cm_96m_fck", &cm_96m_fck, CK_3XXX),
- CLK(NULL, "omap_54m_fck", &omap_54m_fck, CK_3XXX),
- CLK(NULL, "omap_48m_fck", &omap_48m_fck, CK_3XXX),
- CLK(NULL, "omap_12m_fck", &omap_12m_fck, CK_3XXX),
- CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck, CK_3XXX),
- CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck, CK_3XXX),
- CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck, CK_3XXX),
- CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck, CK_3XXX),
- CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck, CK_3XXX),
- CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck, CK_3XXX),
- CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck, CK_3XXX),
- CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck, CK_3XXX),
- CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck, CK_3XXX),
- CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck, CK_3XXX),
- CLK(NULL, "emu_per_alwon_ck", &emu_per_alwon_ck, CK_3XXX),
- CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck, CK_3XXX),
- CLK(NULL, "dpll5_ck", &dpll5_ck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "clkout2_src_ck", &clkout2_src_ck, CK_3XXX),
- CLK(NULL, "sys_clkout2", &sys_clkout2, CK_3XXX),
- CLK(NULL, "corex2_fck", &corex2_fck, CK_3XXX),
- CLK(NULL, "dpll1_fck", &dpll1_fck, CK_3XXX),
- CLK(NULL, "mpu_ck", &mpu_ck, CK_3XXX),
- CLK(NULL, "arm_fck", &arm_fck, CK_3XXX),
- CLK(NULL, "emu_mpu_alwon_ck", &emu_mpu_alwon_ck, CK_3XXX),
- CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck, CK_3XXX),
- CLK(NULL, "dpll2_fck", &dpll2_fck, CK_34XX | CK_36XX),
- CLK(NULL, "iva2_ck", &iva2_ck, CK_34XX | CK_36XX),
- CLK(NULL, "l3_ick", &l3_ick, CK_3XXX),
- CLK(NULL, "l4_ick", &l4_ick, CK_3XXX),
- CLK(NULL, "rm_ick", &rm_ick, CK_3XXX),
- CLK(NULL, "gfx_l3_ck", &gfx_l3_ck, CK_3430ES1),
- CLK(NULL, "gfx_l3_fck", &gfx_l3_fck, CK_3430ES1),
- CLK(NULL, "gfx_l3_ick", &gfx_l3_ick, CK_3430ES1),
- CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck, CK_3430ES1),
- CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck, CK_3430ES1),
- CLK(NULL, "sgx_fck", &sgx_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "sgx_ick", &sgx_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "d2d_26m_fck", &d2d_26m_fck, CK_3430ES1),
- CLK(NULL, "modem_fck", &modem_fck, CK_34XX | CK_36XX),
- CLK(NULL, "sad2d_ick", &sad2d_ick, CK_34XX | CK_36XX),
- CLK(NULL, "mad2d_ick", &mad2d_ick, CK_34XX | CK_36XX),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_3XXX),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_3XXX),
- CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs_omap", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs_tll", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
- CLK(NULL, "mmchs3_fck", &mmchs3_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "mmchs2_fck", &mmchs2_fck, CK_3XXX),
- CLK(NULL, "mspro_fck", &mspro_fck, CK_34XX | CK_36XX),
- CLK(NULL, "mmchs1_fck", &mmchs1_fck, CK_3XXX),
- CLK(NULL, "i2c3_fck", &i2c3_fck, CK_3XXX),
- CLK(NULL, "i2c2_fck", &i2c2_fck, CK_3XXX),
- CLK(NULL, "i2c1_fck", &i2c1_fck, CK_3XXX),
- CLK(NULL, "mcbsp5_fck", &mcbsp5_fck, CK_3XXX),
- CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_3XXX),
- CLK(NULL, "core_48m_fck", &core_48m_fck, CK_3XXX),
- CLK(NULL, "mcspi4_fck", &mcspi4_fck, CK_3XXX),
- CLK(NULL, "mcspi3_fck", &mcspi3_fck, CK_3XXX),
- CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_3XXX),
- CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_3XXX),
- CLK(NULL, "uart2_fck", &uart2_fck, CK_3XXX),
- CLK(NULL, "uart1_fck", &uart1_fck, CK_3XXX),
- CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1),
- CLK(NULL, "core_12m_fck", &core_12m_fck, CK_3XXX),
- CLK("omap_hdq.0", "fck", &hdq_fck, CK_3XXX),
- CLK(NULL, "hdq_fck", &hdq_fck, CK_3XXX),
- CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1, CK_3430ES1),
- CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2, CK_3430ES2PLUS | CK_36XX),
- CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1),
- CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2PLUS | CK_36XX),
- CLK(NULL, "core_l3_ick", &core_l3_ick, CK_3XXX),
- CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1, CK_3430ES1),
- CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2, CK_3430ES2PLUS | CK_36XX),
- CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1, CK_3430ES1),
- CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2, CK_3430ES2PLUS | CK_36XX),
- CLK(NULL, "sdrc_ick", &sdrc_ick, CK_3XXX),
- CLK(NULL, "gpmc_fck", &gpmc_fck, CK_3XXX),
- CLK(NULL, "security_l3_ick", &security_l3_ick, CK_34XX | CK_36XX),
- CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX),
- CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX),
- CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs_omap", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs_tll", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("omap_hsmmc.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "mmchs3_ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX),
- CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX),
- CLK("omap-sham", "ick", &sha12_ick, CK_34XX | CK_36XX),
- CLK(NULL, "des2_ick", &des2_ick, CK_34XX | CK_36XX),
- CLK("omap_hsmmc.1", "ick", &mmchs2_ick, CK_3XXX),
- CLK("omap_hsmmc.0", "ick", &mmchs1_ick, CK_3XXX),
- CLK(NULL, "mmchs2_ick", &mmchs2_ick, CK_3XXX),
- CLK(NULL, "mmchs1_ick", &mmchs1_ick, CK_3XXX),
- CLK(NULL, "mspro_ick", &mspro_ick, CK_34XX | CK_36XX),
- CLK("omap_hdq.0", "ick", &hdq_ick, CK_3XXX),
- CLK(NULL, "hdq_ick", &hdq_ick, CK_3XXX),
- CLK("omap2_mcspi.4", "ick", &mcspi4_ick, CK_3XXX),
- CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_3XXX),
- CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_3XXX),
- CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_3XXX),
- CLK(NULL, "mcspi4_ick", &mcspi4_ick, CK_3XXX),
- CLK(NULL, "mcspi3_ick", &mcspi3_ick, CK_3XXX),
- CLK(NULL, "mcspi2_ick", &mcspi2_ick, CK_3XXX),
- CLK(NULL, "mcspi1_ick", &mcspi1_ick, CK_3XXX),
- CLK("omap_i2c.3", "ick", &i2c3_ick, CK_3XXX),
- CLK("omap_i2c.2", "ick", &i2c2_ick, CK_3XXX),
- CLK("omap_i2c.1", "ick", &i2c1_ick, CK_3XXX),
- CLK(NULL, "i2c3_ick", &i2c3_ick, CK_3XXX),
- CLK(NULL, "i2c2_ick", &i2c2_ick, CK_3XXX),
- CLK(NULL, "i2c1_ick", &i2c1_ick, CK_3XXX),
- CLK(NULL, "uart2_ick", &uart2_ick, CK_3XXX),
- CLK(NULL, "uart1_ick", &uart1_ick, CK_3XXX),
- CLK(NULL, "gpt11_ick", &gpt11_ick, CK_3XXX),
- CLK(NULL, "gpt10_ick", &gpt10_ick, CK_3XXX),
- CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_3XXX),
- CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_3XXX),
- CLK(NULL, "mcbsp5_ick", &mcbsp5_ick, CK_3XXX),
- CLK(NULL, "mcbsp1_ick", &mcbsp1_ick, CK_3XXX),
- CLK(NULL, "fac_ick", &fac_ick, CK_3430ES1),
- CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_34XX | CK_36XX),
- CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_3XXX),
- CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_34XX | CK_36XX),
- CLK(NULL, "ssi_ick", &ssi_ick_3430es1, CK_3430ES1),
- CLK(NULL, "ssi_ick", &ssi_ick_3430es2, CK_3430ES2PLUS | CK_36XX),
- CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1),
- CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_34XX | CK_36XX),
- CLK(NULL, "aes1_ick", &aes1_ick, CK_34XX | CK_36XX),
- CLK("omap_rng", "ick", &rng_ick, CK_34XX | CK_36XX),
- CLK(NULL, "sha11_ick", &sha11_ick, CK_34XX | CK_36XX),
- CLK(NULL, "des1_ick", &des1_ick, CK_34XX | CK_36XX),
- CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
- CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "dss_tv_fck", &dss_tv_fck, CK_3XXX),
- CLK(NULL, "dss_96m_fck", &dss_96m_fck, CK_3XXX),
- CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck, CK_3XXX),
- CLK("omapdss_dss", "ick", &dss_ick_3430es1, CK_3430ES1),
- CLK(NULL, "dss_ick", &dss_ick_3430es1, CK_3430ES1),
- CLK("omapdss_dss", "ick", &dss_ick_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "dss_ick", &dss_ick_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "cam_mclk", &cam_mclk, CK_34XX | CK_36XX),
- CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX),
- CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX),
- CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs_omap", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
- CLK(NULL, "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
- CLK(NULL, "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
- CLK(NULL, "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
- CLK(NULL, "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
- CLK(NULL, "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs_omap", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs_omap", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs_tll", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs_tll", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
- CLK(NULL, "init_60m_fclk", &dummy_ck, CK_3XXX),
- CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX),
- CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX),
- CLK(NULL, "gpio1_dbck", &gpio1_dbck, CK_3XXX),
- CLK(NULL, "wdt2_fck", &wdt2_fck, CK_3XXX),
- CLK(NULL, "wkup_l4_ick", &wkup_l4_ick, CK_34XX | CK_36XX),
- CLK(NULL, "usim_ick", &usim_ick, CK_3430ES2PLUS | CK_36XX),
- CLK("omap_wdt", "ick", &wdt2_ick, CK_3XXX),
- CLK(NULL, "wdt2_ick", &wdt2_ick, CK_3XXX),
- CLK(NULL, "wdt1_ick", &wdt1_ick, CK_3XXX),
- CLK(NULL, "gpio1_ick", &gpio1_ick, CK_3XXX),
- CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick, CK_3XXX),
- CLK(NULL, "gpt12_ick", &gpt12_ick, CK_3XXX),
- CLK(NULL, "gpt1_ick", &gpt1_ick, CK_3XXX),
- CLK(NULL, "per_96m_fck", &per_96m_fck, CK_3XXX),
- CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX),
- CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX),
- CLK(NULL, "uart4_fck", &uart4_fck, CK_36XX),
- CLK(NULL, "uart4_fck", &uart4_fck_am35xx, CK_AM35XX),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_3XXX),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_3XXX),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_3XXX),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_3XXX),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_3XXX),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_3XXX),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_3XXX),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_3XXX),
- CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck, CK_3XXX),
- CLK(NULL, "gpio6_dbck", &gpio6_dbck, CK_3XXX),
- CLK(NULL, "gpio5_dbck", &gpio5_dbck, CK_3XXX),
- CLK(NULL, "gpio4_dbck", &gpio4_dbck, CK_3XXX),
- CLK(NULL, "gpio3_dbck", &gpio3_dbck, CK_3XXX),
- CLK(NULL, "gpio2_dbck", &gpio2_dbck, CK_3XXX),
- CLK(NULL, "wdt3_fck", &wdt3_fck, CK_3XXX),
- CLK(NULL, "per_l4_ick", &per_l4_ick, CK_3XXX),
- CLK(NULL, "gpio6_ick", &gpio6_ick, CK_3XXX),
- CLK(NULL, "gpio5_ick", &gpio5_ick, CK_3XXX),
- CLK(NULL, "gpio4_ick", &gpio4_ick, CK_3XXX),
- CLK(NULL, "gpio3_ick", &gpio3_ick, CK_3XXX),
- CLK(NULL, "gpio2_ick", &gpio2_ick, CK_3XXX),
- CLK(NULL, "wdt3_ick", &wdt3_ick, CK_3XXX),
- CLK(NULL, "uart3_ick", &uart3_ick, CK_3XXX),
- CLK(NULL, "uart4_ick", &uart4_ick, CK_36XX),
- CLK(NULL, "gpt9_ick", &gpt9_ick, CK_3XXX),
- CLK(NULL, "gpt8_ick", &gpt8_ick, CK_3XXX),
- CLK(NULL, "gpt7_ick", &gpt7_ick, CK_3XXX),
- CLK(NULL, "gpt6_ick", &gpt6_ick, CK_3XXX),
- CLK(NULL, "gpt5_ick", &gpt5_ick, CK_3XXX),
- CLK(NULL, "gpt4_ick", &gpt4_ick, CK_3XXX),
- CLK(NULL, "gpt3_ick", &gpt3_ick, CK_3XXX),
- CLK(NULL, "gpt2_ick", &gpt2_ick, CK_3XXX),
- CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_3XXX),
- CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_3XXX),
- CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_3XXX),
- CLK(NULL, "mcbsp4_ick", &mcbsp2_ick, CK_3XXX),
- CLK(NULL, "mcbsp3_ick", &mcbsp3_ick, CK_3XXX),
- CLK(NULL, "mcbsp2_ick", &mcbsp4_ick, CK_3XXX),
- CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_3XXX),
- CLK(NULL, "mcbsp3_fck", &mcbsp3_fck, CK_3XXX),
- CLK(NULL, "mcbsp4_fck", &mcbsp4_fck, CK_3XXX),
- CLK(NULL, "emu_src_ck", &emu_src_ck, CK_3XXX),
- CLK("etb", "emu_src_ck", &emu_src_ck, CK_3XXX),
- CLK(NULL, "pclk_fck", &pclk_fck, CK_3XXX),
- CLK(NULL, "pclkx2_fck", &pclkx2_fck, CK_3XXX),
- CLK(NULL, "atclk_fck", &atclk_fck, CK_3XXX),
- CLK(NULL, "traceclk_src_fck", &traceclk_src_fck, CK_3XXX),
- CLK(NULL, "traceclk_fck", &traceclk_fck, CK_3XXX),
- CLK(NULL, "sr1_fck", &sr1_fck, CK_34XX | CK_36XX),
- CLK(NULL, "sr2_fck", &sr2_fck, CK_34XX | CK_36XX),
- CLK(NULL, "sr_l4_ick", &sr_l4_ick, CK_34XX | CK_36XX),
- CLK(NULL, "secure_32k_fck", &secure_32k_fck, CK_3XXX),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_3XXX),
- CLK(NULL, "wdt1_fck", &wdt1_fck, CK_3XXX),
- CLK(NULL, "ipss_ick", &ipss_ick, CK_AM35XX),
- CLK(NULL, "rmii_ck", &rmii_ck, CK_AM35XX),
- CLK(NULL, "pclk_ck", &pclk_ck, CK_AM35XX),
- CLK(NULL, "emac_ick", &emac_ick, CK_AM35XX),
- CLK(NULL, "emac_fck", &emac_fck, CK_AM35XX),
- CLK("davinci_emac.0", NULL, &emac_ick, CK_AM35XX),
- CLK("davinci_mdio.0", NULL, &emac_fck, CK_AM35XX),
- CLK(NULL, "vpfe_ick", &emac_ick, CK_AM35XX),
- CLK(NULL, "vpfe_fck", &emac_fck, CK_AM35XX),
- CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX),
- CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX),
- CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx, CK_AM35XX),
- CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx, CK_AM35XX),
- CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX),
- CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX),
- CLK(NULL, "timer_32k_ck", &omap_32k_fck, CK_3XXX),
- CLK(NULL, "timer_sys_ck", &sys_ck, CK_3XXX),
- CLK(NULL, "cpufreq_ck", &dpll1_ck, CK_3XXX),
-};
-
-
-int __init omap3xxx_clk_init(void)
-{
- struct omap_clk *c;
- u32 cpu_clkflg = 0;
-
- if (soc_is_am35xx()) {
- cpu_mask = RATE_IN_34XX;
- cpu_clkflg = CK_AM35XX;
- } else if (cpu_is_omap3630()) {
- cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
- cpu_clkflg = CK_36XX;
- } else if (cpu_is_ti816x()) {
- cpu_mask = RATE_IN_TI816X;
- cpu_clkflg = CK_TI816X;
- } else if (soc_is_am33xx()) {
- cpu_mask = RATE_IN_AM33XX;
- } else if (cpu_is_ti814x()) {
- cpu_mask = RATE_IN_TI814X;
- } else if (cpu_is_omap34xx()) {
- if (omap_rev() == OMAP3430_REV_ES1_0) {
- cpu_mask = RATE_IN_3430ES1;
- cpu_clkflg = CK_3430ES1;
- } else {
- /*
- * Assume that anything that we haven't matched yet
- * has 3430ES2-type clocks.
- */
- cpu_mask = RATE_IN_3430ES2PLUS;
- cpu_clkflg = CK_3430ES2PLUS;
- }
- } else {
- WARN(1, "clock: could not identify OMAP3 variant\n");
- }
-
- if (omap3_has_192mhz_clk())
- omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
-
- if (cpu_is_omap3630()) {
- /*
- * XXX This type of dynamic rewriting of the clock tree is
- * deprecated and should be revised soon.
- *
- * For 3630: override clkops_omap2_dflt_wait for the
- * clocks affected from PWRDN reset Limitation
- */
- dpll3_m3x2_ck.ops =
- &clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
- dpll4_m2x2_ck.ops =
- &clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
- dpll4_m3x2_ck.ops =
- &clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
- dpll4_m4x2_ck.ops =
- &clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
- dpll4_m5x2_ck.ops =
- &clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
- dpll4_m6x2_ck.ops =
- &clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
- }
-
- /*
- * XXX This type of dynamic rewriting of the clock tree is
- * deprecated and should be revised soon.
- */
- if (cpu_is_omap3630())
- dpll4_dd = dpll4_dd_3630;
- else
- dpll4_dd = dpll4_dd_34xx;
-
- for (c = omap3xxx_clks; c < omap3xxx_clks + ARRAY_SIZE(omap3xxx_clks);
- c++)
- clk_preinit(c->lk.clk);
-
- for (c = omap3xxx_clks; c < omap3xxx_clks + ARRAY_SIZE(omap3xxx_clks);
- c++)
- if (c->cpu & cpu_clkflg) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- omap2_init_clk_clkdm(c->lk.clk);
- }
-
- /* Disable autoidle on all clocks; let the PM code enable it later */
- omap_clk_disable_autoidle_all();
-
- recalculate_root_clocks();
-
- pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
- (osc_sys_ck.rate / 1000000), (osc_sys_ck.rate / 100000) % 10,
- (core_ck.rate / 1000000), (arm_fck.rate / 1000000));
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable_init_clocks();
-
- /*
- * Lock DPLL5 -- here only until other device init code can
- * handle this
- */
- if (!cpu_is_ti81xx() && (omap_rev() >= OMAP3430_REV_ES2_0))
- omap3_clk_lock_dpll5();
-
- /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
- sdrc_ick_p = clk_get(NULL, "sdrc_ick");
- arm_fck_p = clk_get(NULL, "arm_fck");
-
- return 0;
-}
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
deleted file mode 100644
index 2a450c9b9a7..00000000000
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ /dev/null
@@ -1,3398 +0,0 @@
-/*
- * OMAP4 Clock data
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- * Copyright (C) 2009-2010 Nokia Corporation
- *
- * Paul Walmsley (paul@pwsan.com)
- * Rajendra Nayak (rnayak@ti.com)
- * Benoit Cousson (b-cousson@ti.com)
- *
- * This file is automatically generated from the OMAP hardware databases.
- * We respectfully ask that any modifications to this file be coordinated
- * with the public linux-omap@vger.kernel.org mailing list and the
- * authors above to ensure that the autogeneration scripts are kept
- * up-to-date with the file contents.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * XXX Some of the ES1 clocks have been removed/changed; once support
- * is added for discriminating clocks by ES level, these should be added back
- * in.
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock44xx.h"
-#include "cm1_44xx.h"
-#include "cm2_44xx.h"
-#include "cm-regbits-44xx.h"
-#include "prm44xx.h"
-#include "prm-regbits-44xx.h"
-#include "control.h"
-#include "scrm44xx.h"
-
-/* OMAP4 modulemode control */
-#define OMAP4430_MODULEMODE_HWCTRL 0
-#define OMAP4430_MODULEMODE_SWCTRL 1
-
-/* Root clocks */
-
-static struct clk extalt_clkin_ck = {
- .name = "extalt_clkin_ck",
- .rate = 59000000,
- .ops = &clkops_null,
-};
-
-static struct clk pad_clks_ck = {
- .name = "pad_clks_ck",
- .rate = 12000000,
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CLKSEL_ABE,
- .enable_bit = OMAP4430_PAD_CLKS_GATE_SHIFT,
-};
-
-static struct clk pad_slimbus_core_clks_ck = {
- .name = "pad_slimbus_core_clks_ck",
- .rate = 12000000,
- .ops = &clkops_null,
-};
-
-static struct clk secure_32k_clk_src_ck = {
- .name = "secure_32k_clk_src_ck",
- .rate = 32768,
- .ops = &clkops_null,
-};
-
-static struct clk slimbus_clk = {
- .name = "slimbus_clk",
- .rate = 12000000,
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CLKSEL_ABE,
- .enable_bit = OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
-};
-
-static struct clk sys_32k_ck = {
- .name = "sys_32k_ck",
- .clkdm_name = "prm_clkdm",
- .rate = 32768,
- .ops = &clkops_null,
-};
-
-static struct clk virt_12000000_ck = {
- .name = "virt_12000000_ck",
- .ops = &clkops_null,
- .rate = 12000000,
-};
-
-static struct clk virt_13000000_ck = {
- .name = "virt_13000000_ck",
- .ops = &clkops_null,
- .rate = 13000000,
-};
-
-static struct clk virt_16800000_ck = {
- .name = "virt_16800000_ck",
- .ops = &clkops_null,
- .rate = 16800000,
-};
-
-static struct clk virt_27000000_ck = {
- .name = "virt_27000000_ck",
- .ops = &clkops_null,
- .rate = 27000000,
-};
-
-static struct clk virt_38400000_ck = {
- .name = "virt_38400000_ck",
- .ops = &clkops_null,
- .rate = 38400000,
-};
-
-static const struct clksel_rate div_1_5_rates[] = {
- { .div = 1, .val = 5, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel_rate div_1_6_rates[] = {
- { .div = 1, .val = 6, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel_rate div_1_7_rates[] = {
- { .div = 1, .val = 7, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel sys_clkin_sel[] = {
- { .parent = &virt_12000000_ck, .rates = div_1_1_rates },
- { .parent = &virt_13000000_ck, .rates = div_1_2_rates },
- { .parent = &virt_16800000_ck, .rates = div_1_3_rates },
- { .parent = &virt_19200000_ck, .rates = div_1_4_rates },
- { .parent = &virt_26000000_ck, .rates = div_1_5_rates },
- { .parent = &virt_27000000_ck, .rates = div_1_6_rates },
- { .parent = &virt_38400000_ck, .rates = div_1_7_rates },
- { .parent = NULL },
-};
-
-static struct clk sys_clkin_ck = {
- .name = "sys_clkin_ck",
- .rate = 38400000,
- .clksel = sys_clkin_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_SYS_CLKSEL,
- .clksel_mask = OMAP4430_SYS_CLKSEL_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk tie_low_clock_ck = {
- .name = "tie_low_clock_ck",
- .rate = 0,
- .ops = &clkops_null,
-};
-
-static struct clk utmi_phy_clkout_ck = {
- .name = "utmi_phy_clkout_ck",
- .rate = 60000000,
- .ops = &clkops_null,
-};
-
-static struct clk xclk60mhsp1_ck = {
- .name = "xclk60mhsp1_ck",
- .rate = 60000000,
- .ops = &clkops_null,
-};
-
-static struct clk xclk60mhsp2_ck = {
- .name = "xclk60mhsp2_ck",
- .rate = 60000000,
- .ops = &clkops_null,
-};
-
-static struct clk xclk60motg_ck = {
- .name = "xclk60motg_ck",
- .rate = 60000000,
- .ops = &clkops_null,
-};
-
-/* Module clocks and DPLL outputs */
-
-static const struct clksel abe_dpll_bypass_clk_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &sys_32k_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk abe_dpll_bypass_clk_mux_ck = {
- .name = "abe_dpll_bypass_clk_mux_ck",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk abe_dpll_refclk_mux_ck = {
- .name = "abe_dpll_refclk_mux_ck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_ABE_PLL_REF_CLKSEL,
- .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* DPLL_ABE */
-static struct dpll_data dpll_abe_dd = {
- .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_ABE,
- .clk_bypass = &abe_dpll_bypass_clk_mux_ck,
- .clk_ref = &abe_dpll_refclk_mux_ck,
- .control_reg = OMAP4430_CM_CLKMODE_DPLL_ABE,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_ABE,
- .idlest_reg = OMAP4430_CM_IDLEST_DPLL_ABE,
- .mult_mask = OMAP4430_DPLL_MULT_MASK,
- .div1_mask = OMAP4430_DPLL_DIV_MASK,
- .enable_mask = OMAP4430_DPLL_EN_MASK,
- .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
- .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = 2047,
- .max_divider = 128,
- .min_divider = 1,
-};
-
-
-static struct clk dpll_abe_ck = {
- .name = "dpll_abe_ck",
- .parent = &abe_dpll_refclk_mux_ck,
- .dpll_data = &dpll_abe_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap4_dpll_regm4xen_recalc,
- .round_rate = &omap4_dpll_regm4xen_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-static struct clk dpll_abe_x2_ck = {
- .name = "dpll_abe_x2_ck",
- .parent = &dpll_abe_ck,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_ABE,
- .flags = CLOCK_CLKOUTX2,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel dpll_abe_m2x2_div[] = {
- { .parent = &dpll_abe_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_abe_m2x2_ck = {
- .name = "dpll_abe_m2x2_ck",
- .parent = &dpll_abe_x2_ck,
- .clksel = dpll_abe_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_ABE,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk abe_24m_fclk = {
- .name = "abe_24m_fclk",
- .parent = &dpll_abe_m2x2_ck,
- .ops = &clkops_null,
- .fixed_div = 8,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel_rate div3_1to4_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
- { .div = 4, .val = 2, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel abe_clk_div[] = {
- { .parent = &dpll_abe_m2x2_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
-};
-
-static struct clk abe_clk = {
- .name = "abe_clk",
- .parent = &dpll_abe_m2x2_ck,
- .clksel = abe_clk_div,
- .clksel_reg = OMAP4430_CM_CLKSEL_ABE,
- .clksel_mask = OMAP4430_CLKSEL_OPP_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel_rate div2_1to2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel aess_fclk_div[] = {
- { .parent = &abe_clk, .rates = div2_1to2_rates },
- { .parent = NULL },
-};
-
-static struct clk aess_fclk = {
- .name = "aess_fclk",
- .parent = &abe_clk,
- .clksel = aess_fclk_div,
- .clksel_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_AESS_FCLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_abe_m3x2_ck = {
- .name = "dpll_abe_m3x2_ck",
- .parent = &dpll_abe_x2_ck,
- .clksel = dpll_abe_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M3_DPLL_ABE,
- .clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel core_hsd_byp_clk_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &dpll_abe_m3x2_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk core_hsd_byp_clk_mux_ck = {
- .name = "core_hsd_byp_clk_mux_ck",
- .parent = &sys_clkin_ck,
- .clksel = core_hsd_byp_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_CLKSEL_DPLL_CORE,
- .clksel_mask = OMAP4430_DPLL_BYP_CLKSEL_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* DPLL_CORE */
-static struct dpll_data dpll_core_dd = {
- .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_CORE,
- .clk_bypass = &core_hsd_byp_clk_mux_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = OMAP4430_CM_CLKMODE_DPLL_CORE,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_CORE,
- .idlest_reg = OMAP4430_CM_IDLEST_DPLL_CORE,
- .mult_mask = OMAP4430_DPLL_MULT_MASK,
- .div1_mask = OMAP4430_DPLL_DIV_MASK,
- .enable_mask = OMAP4430_DPLL_EN_MASK,
- .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
- .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = 2047,
- .max_divider = 128,
- .min_divider = 1,
-};
-
-
-static struct clk dpll_core_ck = {
- .name = "dpll_core_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_core_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_core_dpll_ops,
- .recalc = &omap3_dpll_recalc,
-};
-
-static struct clk dpll_core_x2_ck = {
- .name = "dpll_core_x2_ck",
- .parent = &dpll_core_ck,
- .flags = CLOCK_CLKOUTX2,
- .ops = &clkops_null,
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel dpll_core_m6x2_div[] = {
- { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_core_m6x2_ck = {
- .name = "dpll_core_m6x2_ck",
- .parent = &dpll_core_x2_ck,
- .clksel = dpll_core_m6x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M6_DPLL_CORE,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel dbgclk_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &dpll_core_m6x2_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk dbgclk_mux_ck = {
- .name = "dbgclk_mux_ck",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel dpll_core_m2_div[] = {
- { .parent = &dpll_core_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_core_m2_ck = {
- .name = "dpll_core_m2_ck",
- .parent = &dpll_core_ck,
- .clksel = dpll_core_m2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_CORE,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk ddrphy_ck = {
- .name = "ddrphy_ck",
- .parent = &dpll_core_m2_ck,
- .ops = &clkops_null,
- .clkdm_name = "l3_emif_clkdm",
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk dpll_core_m5x2_ck = {
- .name = "dpll_core_m5x2_ck",
- .parent = &dpll_core_x2_ck,
- .clksel = dpll_core_m6x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M5_DPLL_CORE,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel div_core_div[] = {
- { .parent = &dpll_core_m5x2_ck, .rates = div2_1to2_rates },
- { .parent = NULL },
-};
-
-static struct clk div_core_ck = {
- .name = "div_core_ck",
- .parent = &dpll_core_m5x2_ck,
- .clksel = div_core_div,
- .clksel_reg = OMAP4430_CM_CLKSEL_CORE,
- .clksel_mask = OMAP4430_CLKSEL_CORE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel_rate div4_1to8_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
- { .div = 4, .val = 2, .flags = RATE_IN_4430 },
- { .div = 8, .val = 3, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel div_iva_hs_clk_div[] = {
- { .parent = &dpll_core_m5x2_ck, .rates = div4_1to8_rates },
- { .parent = NULL },
-};
-
-static struct clk div_iva_hs_clk = {
- .name = "div_iva_hs_clk",
- .parent = &dpll_core_m5x2_ck,
- .clksel = div_iva_hs_clk_div,
- .clksel_reg = OMAP4430_CM_BYPCLK_DPLL_IVA,
- .clksel_mask = OMAP4430_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk div_mpu_hs_clk = {
- .name = "div_mpu_hs_clk",
- .parent = &dpll_core_m5x2_ck,
- .clksel = div_iva_hs_clk_div,
- .clksel_reg = OMAP4430_CM_BYPCLK_DPLL_MPU,
- .clksel_mask = OMAP4430_CLKSEL_0_1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_core_m4x2_ck = {
- .name = "dpll_core_m4x2_ck",
- .parent = &dpll_core_x2_ck,
- .clksel = dpll_core_m6x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M4_DPLL_CORE,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dll_clk_div_ck = {
- .name = "dll_clk_div_ck",
- .parent = &dpll_core_m4x2_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel dpll_abe_m2_div[] = {
- { .parent = &dpll_abe_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_abe_m2_ck = {
- .name = "dpll_abe_m2_ck",
- .parent = &dpll_abe_ck,
- .clksel = dpll_abe_m2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_ABE,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_core_m3x2_ck = {
- .name = "dpll_core_m3x2_ck",
- .parent = &dpll_core_x2_ck,
- .clksel = dpll_core_m6x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
- .clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
- .enable_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
- .enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
-};
-
-static struct clk dpll_core_m7x2_ck = {
- .name = "dpll_core_m7x2_ck",
- .parent = &dpll_core_x2_ck,
- .clksel = dpll_core_m6x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M7_DPLL_CORE,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel iva_hsd_byp_clk_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &div_iva_hs_clk, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk iva_hsd_byp_clk_mux_ck = {
- .name = "iva_hsd_byp_clk_mux_ck",
- .parent = &sys_clkin_ck,
- .clksel = iva_hsd_byp_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_CLKSEL_DPLL_IVA,
- .clksel_mask = OMAP4430_DPLL_BYP_CLKSEL_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* DPLL_IVA */
-static struct dpll_data dpll_iva_dd = {
- .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_IVA,
- .clk_bypass = &iva_hsd_byp_clk_mux_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = OMAP4430_CM_CLKMODE_DPLL_IVA,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_IVA,
- .idlest_reg = OMAP4430_CM_IDLEST_DPLL_IVA,
- .mult_mask = OMAP4430_DPLL_MULT_MASK,
- .div1_mask = OMAP4430_DPLL_DIV_MASK,
- .enable_mask = OMAP4430_DPLL_EN_MASK,
- .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
- .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = 2047,
- .max_divider = 128,
- .min_divider = 1,
-};
-
-
-static struct clk dpll_iva_ck = {
- .name = "dpll_iva_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_iva_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-static struct clk dpll_iva_x2_ck = {
- .name = "dpll_iva_x2_ck",
- .parent = &dpll_iva_ck,
- .flags = CLOCK_CLKOUTX2,
- .ops = &clkops_null,
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel dpll_iva_m4x2_div[] = {
- { .parent = &dpll_iva_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_iva_m4x2_ck = {
- .name = "dpll_iva_m4x2_ck",
- .parent = &dpll_iva_x2_ck,
- .clksel = dpll_iva_m4x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M4_DPLL_IVA,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_iva_m5x2_ck = {
- .name = "dpll_iva_m5x2_ck",
- .parent = &dpll_iva_x2_ck,
- .clksel = dpll_iva_m4x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M5_DPLL_IVA,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-/* DPLL_MPU */
-static struct dpll_data dpll_mpu_dd = {
- .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_MPU,
- .clk_bypass = &div_mpu_hs_clk,
- .clk_ref = &sys_clkin_ck,
- .control_reg = OMAP4430_CM_CLKMODE_DPLL_MPU,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_MPU,
- .idlest_reg = OMAP4430_CM_IDLEST_DPLL_MPU,
- .mult_mask = OMAP4430_DPLL_MULT_MASK,
- .div1_mask = OMAP4430_DPLL_DIV_MASK,
- .enable_mask = OMAP4430_DPLL_EN_MASK,
- .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
- .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = 2047,
- .max_divider = 128,
- .min_divider = 1,
-};
-
-
-static struct clk dpll_mpu_ck = {
- .name = "dpll_mpu_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_mpu_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-static const struct clksel dpll_mpu_m2_div[] = {
- { .parent = &dpll_mpu_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_mpu_m2_ck = {
- .name = "dpll_mpu_m2_ck",
- .parent = &dpll_mpu_ck,
- .clkdm_name = "cm_clkdm",
- .clksel = dpll_mpu_m2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_MPU,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk per_hs_clk_div_ck = {
- .name = "per_hs_clk_div_ck",
- .parent = &dpll_abe_m3x2_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel per_hsd_byp_clk_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &per_hs_clk_div_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk per_hsd_byp_clk_mux_ck = {
- .name = "per_hsd_byp_clk_mux_ck",
- .parent = &sys_clkin_ck,
- .clksel = per_hsd_byp_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_CLKSEL_DPLL_PER,
- .clksel_mask = OMAP4430_DPLL_BYP_CLKSEL_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* DPLL_PER */
-static struct dpll_data dpll_per_dd = {
- .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_PER,
- .clk_bypass = &per_hsd_byp_clk_mux_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = OMAP4430_CM_CLKMODE_DPLL_PER,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_PER,
- .idlest_reg = OMAP4430_CM_IDLEST_DPLL_PER,
- .mult_mask = OMAP4430_DPLL_MULT_MASK,
- .div1_mask = OMAP4430_DPLL_DIV_MASK,
- .enable_mask = OMAP4430_DPLL_EN_MASK,
- .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
- .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = 2047,
- .max_divider = 128,
- .min_divider = 1,
-};
-
-
-static struct clk dpll_per_ck = {
- .name = "dpll_per_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_per_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-static const struct clksel dpll_per_m2_div[] = {
- { .parent = &dpll_per_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_per_m2_ck = {
- .name = "dpll_per_m2_ck",
- .parent = &dpll_per_ck,
- .clksel = dpll_per_m2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_PER,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_per_x2_ck = {
- .name = "dpll_per_x2_ck",
- .parent = &dpll_per_ck,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_PER,
- .flags = CLOCK_CLKOUTX2,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel dpll_per_m2x2_div[] = {
- { .parent = &dpll_per_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_per_m2x2_ck = {
- .name = "dpll_per_m2x2_ck",
- .parent = &dpll_per_x2_ck,
- .clksel = dpll_per_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_PER,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_per_m3x2_ck = {
- .name = "dpll_per_m3x2_ck",
- .parent = &dpll_per_x2_ck,
- .clksel = dpll_per_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
- .clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
- .enable_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
- .enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
-};
-
-static struct clk dpll_per_m4x2_ck = {
- .name = "dpll_per_m4x2_ck",
- .parent = &dpll_per_x2_ck,
- .clksel = dpll_per_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M4_DPLL_PER,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_per_m5x2_ck = {
- .name = "dpll_per_m5x2_ck",
- .parent = &dpll_per_x2_ck,
- .clksel = dpll_per_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M5_DPLL_PER,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_per_m6x2_ck = {
- .name = "dpll_per_m6x2_ck",
- .parent = &dpll_per_x2_ck,
- .clksel = dpll_per_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M6_DPLL_PER,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk dpll_per_m7x2_ck = {
- .name = "dpll_per_m7x2_ck",
- .parent = &dpll_per_x2_ck,
- .clksel = dpll_per_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M7_DPLL_PER,
- .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk usb_hs_clk_div_ck = {
- .name = "usb_hs_clk_div_ck",
- .parent = &dpll_abe_m3x2_ck,
- .ops = &clkops_null,
- .fixed_div = 3,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-/* DPLL_USB */
-static struct dpll_data dpll_usb_dd = {
- .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_USB,
- .clk_bypass = &usb_hs_clk_div_ck,
- .flags = DPLL_J_TYPE,
- .clk_ref = &sys_clkin_ck,
- .control_reg = OMAP4430_CM_CLKMODE_DPLL_USB,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_USB,
- .idlest_reg = OMAP4430_CM_IDLEST_DPLL_USB,
- .mult_mask = OMAP4430_DPLL_MULT_USB_MASK,
- .div1_mask = OMAP4430_DPLL_DIV_0_7_MASK,
- .enable_mask = OMAP4430_DPLL_EN_MASK,
- .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
- .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .sddiv_mask = OMAP4430_DPLL_SD_DIV_MASK,
- .max_multiplier = 4095,
- .max_divider = 256,
- .min_divider = 1,
-};
-
-
-static struct clk dpll_usb_ck = {
- .name = "dpll_usb_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_usb_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
- .clkdm_name = "l3_init_clkdm",
-};
-
-static struct clk dpll_usb_clkdcoldo_ck = {
- .name = "dpll_usb_clkdcoldo_ck",
- .parent = &dpll_usb_ck,
- .clksel_reg = OMAP4430_CM_CLKDCOLDO_DPLL_USB,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel dpll_usb_m2_div[] = {
- { .parent = &dpll_usb_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_usb_m2_ck = {
- .name = "dpll_usb_m2_ck",
- .parent = &dpll_usb_ck,
- .clksel = dpll_usb_m2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_USB,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel ducati_clk_mux_sel[] = {
- { .parent = &div_core_ck, .rates = div_1_0_rates },
- { .parent = &dpll_per_m6x2_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk ducati_clk_mux_ck = {
- .name = "ducati_clk_mux_ck",
- .parent = &div_core_ck,
- .clksel = ducati_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT,
- .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk func_12m_fclk = {
- .name = "func_12m_fclk",
- .parent = &dpll_per_m2x2_ck,
- .ops = &clkops_null,
- .fixed_div = 16,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk func_24m_clk = {
- .name = "func_24m_clk",
- .parent = &dpll_per_m2_ck,
- .ops = &clkops_null,
- .fixed_div = 4,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk func_24mc_fclk = {
- .name = "func_24mc_fclk",
- .parent = &dpll_per_m2x2_ck,
- .ops = &clkops_null,
- .fixed_div = 8,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel_rate div2_4to8_rates[] = {
- { .div = 4, .val = 0, .flags = RATE_IN_4430 },
- { .div = 8, .val = 1, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel func_48m_fclk_div[] = {
- { .parent = &dpll_per_m2x2_ck, .rates = div2_4to8_rates },
- { .parent = NULL },
-};
-
-static struct clk func_48m_fclk = {
- .name = "func_48m_fclk",
- .parent = &dpll_per_m2x2_ck,
- .clksel = func_48m_fclk_div,
- .clksel_reg = OMAP4430_CM_SCALE_FCLK,
- .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk func_48mc_fclk = {
- .name = "func_48mc_fclk",
- .parent = &dpll_per_m2x2_ck,
- .ops = &clkops_null,
- .fixed_div = 4,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel_rate div2_2to4_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_4430 },
- { .div = 4, .val = 1, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel func_64m_fclk_div[] = {
- { .parent = &dpll_per_m4x2_ck, .rates = div2_2to4_rates },
- { .parent = NULL },
-};
-
-static struct clk func_64m_fclk = {
- .name = "func_64m_fclk",
- .parent = &dpll_per_m4x2_ck,
- .clksel = func_64m_fclk_div,
- .clksel_reg = OMAP4430_CM_SCALE_FCLK,
- .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel func_96m_fclk_div[] = {
- { .parent = &dpll_per_m2x2_ck, .rates = div2_2to4_rates },
- { .parent = NULL },
-};
-
-static struct clk func_96m_fclk = {
- .name = "func_96m_fclk",
- .parent = &dpll_per_m2x2_ck,
- .clksel = func_96m_fclk_div,
- .clksel_reg = OMAP4430_CM_SCALE_FCLK,
- .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel_rate div2_1to8_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 8, .val = 1, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel init_60m_fclk_div[] = {
- { .parent = &dpll_usb_m2_ck, .rates = div2_1to8_rates },
- { .parent = NULL },
-};
-
-static struct clk init_60m_fclk = {
- .name = "init_60m_fclk",
- .parent = &dpll_usb_m2_ck,
- .clksel = init_60m_fclk_div,
- .clksel_reg = OMAP4430_CM_CLKSEL_USB_60MHZ,
- .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel l3_div_div[] = {
- { .parent = &div_core_ck, .rates = div2_1to2_rates },
- { .parent = NULL },
-};
-
-static struct clk l3_div_ck = {
- .name = "l3_div_ck",
- .parent = &div_core_ck,
- .clkdm_name = "cm_clkdm",
- .clksel = l3_div_div,
- .clksel_reg = OMAP4430_CM_CLKSEL_CORE,
- .clksel_mask = OMAP4430_CLKSEL_L3_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel l4_div_div[] = {
- { .parent = &l3_div_ck, .rates = div2_1to2_rates },
- { .parent = NULL },
-};
-
-static struct clk l4_div_ck = {
- .name = "l4_div_ck",
- .parent = &l3_div_ck,
- .clksel = l4_div_div,
- .clksel_reg = OMAP4430_CM_CLKSEL_CORE,
- .clksel_mask = OMAP4430_CLKSEL_L4_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk lp_clk_div_ck = {
- .name = "lp_clk_div_ck",
- .parent = &dpll_abe_m2x2_ck,
- .ops = &clkops_null,
- .fixed_div = 16,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel l4_wkup_clk_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &lp_clk_div_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk l4_wkup_clk_mux_ck = {
- .name = "l4_wkup_clk_mux_ck",
- .parent = &sys_clkin_ck,
- .clksel = l4_wkup_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4_WKUP_CLKSEL,
- .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate div2_2to1_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_4430 },
- { .div = 2, .val = 0, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel ocp_abe_iclk_div[] = {
- { .parent = &aess_fclk, .rates = div2_2to1_rates },
- { .parent = NULL },
-};
-
-static struct clk mpu_periphclk = {
- .name = "mpu_periphclk",
- .parent = &dpll_mpu_ck,
- .ops = &clkops_null,
- .fixed_div = 2,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static struct clk ocp_abe_iclk = {
- .name = "ocp_abe_iclk",
- .parent = &aess_fclk,
- .clksel = ocp_abe_iclk_div,
- .clksel_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_AESS_FCLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk per_abe_24m_fclk = {
- .name = "per_abe_24m_fclk",
- .parent = &dpll_abe_m2_ck,
- .ops = &clkops_null,
- .fixed_div = 4,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
-static const struct clksel per_abe_nc_fclk_div[] = {
- { .parent = &dpll_abe_m2_ck, .rates = div2_1to2_rates },
- { .parent = NULL },
-};
-
-static struct clk per_abe_nc_fclk = {
- .name = "per_abe_nc_fclk",
- .parent = &dpll_abe_m2_ck,
- .clksel = per_abe_nc_fclk_div,
- .clksel_reg = OMAP4430_CM_SCALE_FCLK,
- .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel pmd_stm_clock_mux_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &dpll_core_m6x2_ck, .rates = div_1_1_rates },
- { .parent = &tie_low_clock_ck, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-static struct clk pmd_stm_clock_mux_ck = {
- .name = "pmd_stm_clock_mux_ck",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk pmd_trace_clk_mux_ck = {
- .name = "pmd_trace_clk_mux_ck",
- .parent = &sys_clkin_ck,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel syc_clk_div_div[] = {
- { .parent = &sys_clkin_ck, .rates = div2_1to2_rates },
- { .parent = NULL },
-};
-
-static struct clk syc_clk_div_ck = {
- .name = "syc_clk_div_ck",
- .parent = &sys_clkin_ck,
- .clksel = syc_clk_div_div,
- .clksel_reg = OMAP4430_CM_ABE_DSS_SYS_CLKSEL,
- .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-/* Leaf clocks controlled by modules */
-
-static struct clk aes1_fck = {
- .name = "aes1_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4SEC_AES1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_secure_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk aes2_fck = {
- .name = "aes2_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4SEC_AES2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_secure_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk aess_fck = {
- .name = "aess_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
- .parent = &aess_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk bandgap_fclk = {
- .name = "bandgap_fclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk des3des_fck = {
- .name = "des3des_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4SEC_DES3DES_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_secure_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel dmic_sync_mux_sel[] = {
- { .parent = &abe_24m_fclk, .rates = div_1_0_rates },
- { .parent = &syc_clk_div_ck, .rates = div_1_1_rates },
- { .parent = &func_24m_clk, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-static struct clk dmic_sync_mux_ck = {
- .name = "dmic_sync_mux_ck",
- .parent = &abe_24m_fclk,
- .clksel = dmic_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel func_dmic_abe_gfclk_sel[] = {
- { .parent = &dmic_sync_mux_ck, .rates = div_1_0_rates },
- { .parent = &pad_clks_ck, .rates = div_1_1_rates },
- { .parent = &slimbus_clk, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-/* Merged func_dmic_abe_gfclk into dmic */
-static struct clk dmic_fck = {
- .name = "dmic_fck",
- .parent = &dmic_sync_mux_ck,
- .clksel = func_dmic_abe_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-static struct clk dsp_fck = {
- .name = "dsp_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_TESLA_TESLA_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "tesla_clkdm",
- .parent = &dpll_iva_m4x2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_sys_clk = {
- .name = "dss_sys_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT,
- .clkdm_name = "l3_dss_clkdm",
- .parent = &syc_clk_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_tv_clk = {
- .name = "dss_tv_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_TV_CLK_SHIFT,
- .clkdm_name = "l3_dss_clkdm",
- .parent = &extalt_clkin_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_dss_clk = {
- .name = "dss_dss_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_DSSCLK_SHIFT,
- .clkdm_name = "l3_dss_clkdm",
- .parent = &dpll_per_m5x2_ck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel_rate div3_8to32_rates[] = {
- { .div = 8, .val = 0, .flags = RATE_IN_4460 },
- { .div = 16, .val = 1, .flags = RATE_IN_4460 },
- { .div = 32, .val = 2, .flags = RATE_IN_4460 },
- { .div = 0 },
-};
-
-static const struct clksel div_ts_div[] = {
- { .parent = &l4_wkup_clk_mux_ck, .rates = div3_8to32_rates },
- { .parent = NULL },
-};
-
-static struct clk div_ts_ck = {
- .name = "div_ts_ck",
- .parent = &l4_wkup_clk_mux_ck,
- .clksel = div_ts_div,
- .clksel_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_24_25_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk bandgap_ts_fclk = {
- .name = "bandgap_ts_fclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
- .enable_bit = OMAP4460_OPTFCLKEN_TS_FCLK_SHIFT,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &div_ts_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_48mhz_clk = {
- .name = "dss_48mhz_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT,
- .clkdm_name = "l3_dss_clkdm",
- .parent = &func_48mc_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_fck = {
- .name = "dss_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l3_dss_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk efuse_ctrl_cust_fck = {
- .name = "efuse_ctrl_cust_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_cefuse_clkdm",
- .parent = &sys_clkin_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk emif1_fck = {
- .name = "emif1_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "l3_emif_clkdm",
- .parent = &ddrphy_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk emif2_fck = {
- .name = "emif2_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "l3_emif_clkdm",
- .parent = &ddrphy_ck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel fdif_fclk_div[] = {
- { .parent = &dpll_per_m4x2_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
-};
-
-/* Merged fdif_fclk into fdif */
-static struct clk fdif_fck = {
- .name = "fdif_fck",
- .parent = &dpll_per_m4x2_ck,
- .clksel = fdif_fclk_div,
- .clksel_reg = OMAP4430_CM_CAM_FDIF_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_FCLK_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
- .enable_reg = OMAP4430_CM_CAM_FDIF_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "iss_clkdm",
-};
-
-static struct clk fpka_fck = {
- .name = "fpka_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_secure_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio1_dbclk = {
- .name = "gpio1_dbclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio1_ick = {
- .name = "gpio1_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &l4_wkup_clk_mux_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio2_dbclk = {
- .name = "gpio2_dbclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio2_ick = {
- .name = "gpio2_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio3_dbclk = {
- .name = "gpio3_dbclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio3_ick = {
- .name = "gpio3_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio4_dbclk = {
- .name = "gpio4_dbclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio4_ick = {
- .name = "gpio4_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_dbclk = {
- .name = "gpio5_dbclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_ick = {
- .name = "gpio5_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio6_dbclk = {
- .name = "gpio6_dbclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_DBCLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio6_ick = {
- .name = "gpio6_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpmc_ick = {
- .name = "gpmc_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3_2_GPMC_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "l3_2_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel sgx_clk_mux_sel[] = {
- { .parent = &dpll_core_m7x2_ck, .rates = div_1_0_rates },
- { .parent = &dpll_per_m7x2_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-/* Merged sgx_clk_mux into gpu */
-static struct clk gpu_fck = {
- .name = "gpu_fck",
- .parent = &dpll_core_m7x2_ck,
- .clksel = sgx_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SGX_FCLK_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l3_gfx_clkdm",
-};
-
-static struct clk hdq1w_fck = {
- .name = "hdq1w_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_HDQ1W_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_12m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel hsi_fclk_div[] = {
- { .parent = &dpll_per_m2x2_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
-};
-
-/* Merged hsi_fclk into hsi */
-static struct clk hsi_fck = {
- .name = "hsi_fck",
- .parent = &dpll_per_m2x2_ck,
- .clksel = hsi_fclk_div,
- .clksel_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_24_25_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
- .enable_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_init_clkdm",
-};
-
-static struct clk i2c1_fck = {
- .name = "i2c1_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_I2C1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_96m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c2_fck = {
- .name = "i2c2_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_I2C2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_96m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c3_fck = {
- .name = "i2c3_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_I2C3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_96m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c4_fck = {
- .name = "i2c4_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_I2C4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_96m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ipu_fck = {
- .name = "ipu_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "ducati_clkdm",
- .parent = &ducati_clk_mux_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk iss_ctrlclk = {
- .name = "iss_ctrlclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT,
- .clkdm_name = "iss_clkdm",
- .parent = &func_96m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk iss_fck = {
- .name = "iss_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "iss_clkdm",
- .parent = &ducati_clk_mux_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk iva_fck = {
- .name = "iva_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "ivahd_clkdm",
- .parent = &dpll_iva_m5x2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk kbd_fck = {
- .name = "kbd_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk l3_instr_ick = {
- .name = "l3_instr_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "l3_instr_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk l3_main_3_ick = {
- .name = "l3_main_3_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "l3_instr_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcasp_sync_mux_ck = {
- .name = "mcasp_sync_mux_ck",
- .parent = &abe_24m_fclk,
- .clksel = dmic_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel func_mcasp_abe_gfclk_sel[] = {
- { .parent = &mcasp_sync_mux_ck, .rates = div_1_0_rates },
- { .parent = &pad_clks_ck, .rates = div_1_1_rates },
- { .parent = &slimbus_clk, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-/* Merged func_mcasp_abe_gfclk into mcasp */
-static struct clk mcasp_fck = {
- .name = "mcasp_fck",
- .parent = &mcasp_sync_mux_ck,
- .clksel = func_mcasp_abe_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-static struct clk mcbsp1_sync_mux_ck = {
- .name = "mcbsp1_sync_mux_ck",
- .parent = &abe_24m_fclk,
- .clksel = dmic_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel func_mcbsp1_gfclk_sel[] = {
- { .parent = &mcbsp1_sync_mux_ck, .rates = div_1_0_rates },
- { .parent = &pad_clks_ck, .rates = div_1_1_rates },
- { .parent = &slimbus_clk, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-/* Merged func_mcbsp1_gfclk into mcbsp1 */
-static struct clk mcbsp1_fck = {
- .name = "mcbsp1_fck",
- .parent = &mcbsp1_sync_mux_ck,
- .clksel = func_mcbsp1_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-static struct clk mcbsp2_sync_mux_ck = {
- .name = "mcbsp2_sync_mux_ck",
- .parent = &abe_24m_fclk,
- .clksel = dmic_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel func_mcbsp2_gfclk_sel[] = {
- { .parent = &mcbsp2_sync_mux_ck, .rates = div_1_0_rates },
- { .parent = &pad_clks_ck, .rates = div_1_1_rates },
- { .parent = &slimbus_clk, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-/* Merged func_mcbsp2_gfclk into mcbsp2 */
-static struct clk mcbsp2_fck = {
- .name = "mcbsp2_fck",
- .parent = &mcbsp2_sync_mux_ck,
- .clksel = func_mcbsp2_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-static struct clk mcbsp3_sync_mux_ck = {
- .name = "mcbsp3_sync_mux_ck",
- .parent = &abe_24m_fclk,
- .clksel = dmic_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel func_mcbsp3_gfclk_sel[] = {
- { .parent = &mcbsp3_sync_mux_ck, .rates = div_1_0_rates },
- { .parent = &pad_clks_ck, .rates = div_1_1_rates },
- { .parent = &slimbus_clk, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-/* Merged func_mcbsp3_gfclk into mcbsp3 */
-static struct clk mcbsp3_fck = {
- .name = "mcbsp3_fck",
- .parent = &mcbsp3_sync_mux_ck,
- .clksel = func_mcbsp3_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-static const struct clksel mcbsp4_sync_mux_sel[] = {
- { .parent = &func_96m_fclk, .rates = div_1_0_rates },
- { .parent = &per_abe_nc_fclk, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk mcbsp4_sync_mux_ck = {
- .name = "mcbsp4_sync_mux_ck",
- .parent = &func_96m_fclk,
- .clksel = mcbsp4_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel per_mcbsp4_gfclk_sel[] = {
- { .parent = &mcbsp4_sync_mux_ck, .rates = div_1_0_rates },
- { .parent = &pad_clks_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-/* Merged per_mcbsp4_gfclk into mcbsp4 */
-static struct clk mcbsp4_fck = {
- .name = "mcbsp4_fck",
- .parent = &mcbsp4_sync_mux_ck,
- .clksel = per_mcbsp4_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_SOURCE_24_24_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-static struct clk mcpdm_fck = {
- .name = "mcpdm_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
- .parent = &pad_clks_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi1_fck = {
- .name = "mcspi1_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_fck = {
- .name = "mcspi2_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi3_fck = {
- .name = "mcspi3_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi4_fck = {
- .name = "mcspi4_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel hsmmc1_fclk_sel[] = {
- { .parent = &func_64m_fclk, .rates = div_1_0_rates },
- { .parent = &func_96m_fclk, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-/* Merged hsmmc1_fclk into mmc1 */
-static struct clk mmc1_fck = {
- .name = "mmc1_fck",
- .parent = &func_64m_fclk,
- .clksel = hsmmc1_fclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l3_init_clkdm",
-};
-
-/* Merged hsmmc2_fclk into mmc2 */
-static struct clk mmc2_fck = {
- .name = "mmc2_fck",
- .parent = &func_64m_fclk,
- .clksel = hsmmc1_fclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l3_init_clkdm",
-};
-
-static struct clk mmc3_fck = {
- .name = "mmc3_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmc4_fck = {
- .name = "mmc4_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmc5_fck = {
- .name = "mmc5_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ocp2scp_usb_phy_phy_48m = {
- .name = "ocp2scp_usb_phy_phy_48m",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_PHY_48M_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ocp2scp_usb_phy_ick = {
- .name = "ocp2scp_usb_phy_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_init_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ocp_wp_noc_ick = {
- .name = "ocp_wp_noc_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "l3_instr_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rng_ick = {
- .name = "rng_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4SEC_RNG_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_secure_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sha2md5_fck = {
- .name = "sha2md5_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_secure_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sl2if_ick = {
- .name = "sl2if_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_IVAHD_SL2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "ivahd_clkdm",
- .parent = &dpll_iva_m5x2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus1_fclk_1 = {
- .name = "slimbus1_fclk_1",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_FCLK1_SHIFT,
- .clkdm_name = "abe_clkdm",
- .parent = &func_24m_clk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus1_fclk_0 = {
- .name = "slimbus1_fclk_0",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_FCLK0_SHIFT,
- .clkdm_name = "abe_clkdm",
- .parent = &abe_24m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus1_fclk_2 = {
- .name = "slimbus1_fclk_2",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_FCLK2_SHIFT,
- .clkdm_name = "abe_clkdm",
- .parent = &pad_clks_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus1_slimbus_clk = {
- .name = "slimbus1_slimbus_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT,
- .clkdm_name = "abe_clkdm",
- .parent = &slimbus_clk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus1_fck = {
- .name = "slimbus1_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
- .parent = &ocp_abe_iclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus2_fclk_1 = {
- .name = "slimbus2_fclk_1",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &per_abe_24m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus2_fclk_0 = {
- .name = "slimbus2_fclk_0",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_PER24MC_GFCLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_24mc_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus2_slimbus_clk = {
- .name = "slimbus2_slimbus_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT,
- .clkdm_name = "l4_per_clkdm",
- .parent = &pad_slimbus_core_clks_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk slimbus2_fck = {
- .name = "slimbus2_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk smartreflex_core_fck = {
- .name = "smartreflex_core_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_ao_clkdm",
- .parent = &l4_wkup_clk_mux_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk smartreflex_iva_fck = {
- .name = "smartreflex_iva_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_ao_clkdm",
- .parent = &l4_wkup_clk_mux_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk smartreflex_mpu_fck = {
- .name = "smartreflex_mpu_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_ao_clkdm",
- .parent = &l4_wkup_clk_mux_ck,
- .recalc = &followparent_recalc,
-};
-
-/* Merged dmt1_clk_mux into timer1 */
-static struct clk timer1_fck = {
- .name = "timer1_fck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
-};
-
-/* Merged cm2_dm10_mux into timer10 */
-static struct clk timer10_fck = {
- .name = "timer10_fck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/* Merged cm2_dm11_mux into timer11 */
-static struct clk timer11_fck = {
- .name = "timer11_fck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/* Merged cm2_dm2_mux into timer2 */
-static struct clk timer2_fck = {
- .name = "timer2_fck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/* Merged cm2_dm3_mux into timer3 */
-static struct clk timer3_fck = {
- .name = "timer3_fck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-/* Merged cm2_dm4_mux into timer4 */
-static struct clk timer4_fck = {
- .name = "timer4_fck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-static const struct clksel timer5_sync_mux_sel[] = {
- { .parent = &syc_clk_div_ck, .rates = div_1_0_rates },
- { .parent = &sys_32k_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-/* Merged timer5_sync_mux into timer5 */
-static struct clk timer5_fck = {
- .name = "timer5_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/* Merged timer6_sync_mux into timer6 */
-static struct clk timer6_fck = {
- .name = "timer6_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/* Merged timer7_sync_mux into timer7 */
-static struct clk timer7_fck = {
- .name = "timer7_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/* Merged timer8_sync_mux into timer8 */
-static struct clk timer8_fck = {
- .name = "timer8_fck",
- .parent = &syc_clk_div_ck,
- .clksel = timer5_sync_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
-};
-
-/* Merged cm2_dm9_mux into timer9 */
-static struct clk timer9_fck = {
- .name = "timer9_fck",
- .parent = &sys_clkin_ck,
- .clksel = abe_dpll_bypass_clk_mux_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_MASK,
- .ops = &clkops_omap2_dflt,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
-};
-
-static struct clk uart1_fck = {
- .name = "uart1_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_UART1_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_fck = {
- .name = "uart2_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_UART2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_fck = {
- .name = "uart3_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_UART3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart4_fck = {
- .name = "uart4_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L4PER_UART4_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_per_clkdm",
- .parent = &func_48m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_fs_fck = {
- .name = "usb_host_fs_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l3_init_clkdm",
- .parent = &func_48mc_fclk,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel utmi_p1_gfclk_sel[] = {
- { .parent = &init_60m_fclk, .rates = div_1_0_rates },
- { .parent = &xclk60mhsp1_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk utmi_p1_gfclk = {
- .name = "utmi_p1_gfclk",
- .parent = &init_60m_fclk,
- .clksel = utmi_p1_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_UTMI_P1_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk usb_host_hs_utmi_p1_clk = {
- .name = "usb_host_hs_utmi_p1_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &utmi_p1_gfclk,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel utmi_p2_gfclk_sel[] = {
- { .parent = &init_60m_fclk, .rates = div_1_0_rates },
- { .parent = &xclk60mhsp2_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk utmi_p2_gfclk = {
- .name = "utmi_p2_gfclk",
- .parent = &init_60m_fclk,
- .clksel = utmi_p2_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_UTMI_P2_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk usb_host_hs_utmi_p2_clk = {
- .name = "usb_host_hs_utmi_p2_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &utmi_p2_gfclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_hs_utmi_p3_clk = {
- .name = "usb_host_hs_utmi_p3_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &init_60m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_hs_hsic480m_p1_clk = {
- .name = "usb_host_hs_hsic480m_p1_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &dpll_usb_m2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_hs_hsic60m_p1_clk = {
- .name = "usb_host_hs_hsic60m_p1_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &init_60m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_hs_hsic60m_p2_clk = {
- .name = "usb_host_hs_hsic60m_p2_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &init_60m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_hs_hsic480m_p2_clk = {
- .name = "usb_host_hs_hsic480m_p2_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &dpll_usb_m2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_hs_func48mclk = {
- .name = "usb_host_hs_func48mclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &func_48mc_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_host_hs_fck = {
- .name = "usb_host_hs_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l3_init_clkdm",
- .parent = &init_60m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel otg_60m_gfclk_sel[] = {
- { .parent = &utmi_phy_clkout_ck, .rates = div_1_0_rates },
- { .parent = &xclk60motg_ck, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk otg_60m_gfclk = {
- .name = "otg_60m_gfclk",
- .parent = &utmi_phy_clkout_ck,
- .clksel = otg_60m_gfclk_sel,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_60M_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk usb_otg_hs_xclk = {
- .name = "usb_otg_hs_xclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_XCLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &otg_60m_gfclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_otg_hs_ick = {
- .name = "usb_otg_hs_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_init_clkdm",
- .parent = &l3_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_phy_cm_clk32k = {
- .name = "usb_phy_cm_clk32k",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_ALWON_USBPHY_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_CLK32K_SHIFT,
- .clkdm_name = "l4_ao_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_tll_hs_usb_ch2_clk = {
- .name = "usb_tll_hs_usb_ch2_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &init_60m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_tll_hs_usb_ch0_clk = {
- .name = "usb_tll_hs_usb_ch0_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &init_60m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_tll_hs_usb_ch1_clk = {
- .name = "usb_tll_hs_usb_ch1_clk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT,
- .clkdm_name = "l3_init_clkdm",
- .parent = &init_60m_fclk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usb_tll_hs_ick = {
- .name = "usb_tll_hs_ick",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_init_clkdm",
- .parent = &l4_div_ck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel_rate div2_14to18_rates[] = {
- { .div = 14, .val = 0, .flags = RATE_IN_4430 },
- { .div = 18, .val = 1, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel usim_fclk_div[] = {
- { .parent = &dpll_per_m4x2_ck, .rates = div2_14to18_rates },
- { .parent = NULL },
-};
-
-static struct clk usim_ck = {
- .name = "usim_ck",
- .parent = &dpll_per_m4x2_ck,
- .clksel = usim_fclk_div,
- .clksel_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_DIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk usim_fclk = {
- .name = "usim_fclk",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
- .enable_bit = OMAP4430_OPTFCLKEN_FCLK_SHIFT,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &usim_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usim_fck = {
- .name = "usim_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wd_timer2_fck = {
- .name = "wd_timer2_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "l4_wkup_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk wd_timer3_fck = {
- .name = "wd_timer3_fck",
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
- .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
- .clkdm_name = "abe_clkdm",
- .parent = &sys_32k_ck,
- .recalc = &followparent_recalc,
-};
-
-/* Remaining optional clocks */
-static const struct clksel stm_clk_div_div[] = {
- { .parent = &pmd_stm_clock_mux_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
-};
-
-static struct clk stm_clk_div_ck = {
- .name = "stm_clk_div_ck",
- .parent = &pmd_stm_clock_mux_ck,
- .clksel = stm_clk_div_div,
- .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_PMD_STM_CLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel trace_clk_div_div[] = {
- { .parent = &pmd_trace_clk_mux_ck, .rates = div3_1to4_rates },
- { .parent = NULL },
-};
-
-static struct clk trace_clk_div_ck = {
- .name = "trace_clk_div_ck",
- .parent = &pmd_trace_clk_mux_ck,
- .clkdm_name = "emu_sys_clkdm",
- .clksel = trace_clk_div_div,
- .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
- .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-/* SCRM aux clk nodes */
-
-static const struct clksel auxclk_src_sel[] = {
- { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
- { .parent = &dpll_core_m3x2_ck, .rates = div_1_1_rates },
- { .parent = &dpll_per_m3x2_ck, .rates = div_1_2_rates },
- { .parent = NULL },
-};
-
-static const struct clksel_rate div16_1to16_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 2, .val = 1, .flags = RATE_IN_4430 },
- { .div = 3, .val = 2, .flags = RATE_IN_4430 },
- { .div = 4, .val = 3, .flags = RATE_IN_4430 },
- { .div = 5, .val = 4, .flags = RATE_IN_4430 },
- { .div = 6, .val = 5, .flags = RATE_IN_4430 },
- { .div = 7, .val = 6, .flags = RATE_IN_4430 },
- { .div = 8, .val = 7, .flags = RATE_IN_4430 },
- { .div = 9, .val = 8, .flags = RATE_IN_4430 },
- { .div = 10, .val = 9, .flags = RATE_IN_4430 },
- { .div = 11, .val = 10, .flags = RATE_IN_4430 },
- { .div = 12, .val = 11, .flags = RATE_IN_4430 },
- { .div = 13, .val = 12, .flags = RATE_IN_4430 },
- { .div = 14, .val = 13, .flags = RATE_IN_4430 },
- { .div = 15, .val = 14, .flags = RATE_IN_4430 },
- { .div = 16, .val = 15, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static struct clk auxclk0_src_ck = {
- .name = "auxclk0_src_ck",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_src_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK0,
- .clksel_mask = OMAP4_SRCSELECT_MASK,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4_SCRM_AUXCLK0,
- .enable_bit = OMAP4_ENABLE_SHIFT,
-};
-
-static const struct clksel auxclk0_sel[] = {
- { .parent = &auxclk0_src_ck, .rates = div16_1to16_rates },
- { .parent = NULL },
-};
-
-static struct clk auxclk0_ck = {
- .name = "auxclk0_ck",
- .parent = &auxclk0_src_ck,
- .clksel = auxclk0_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK0,
- .clksel_mask = OMAP4_CLKDIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk auxclk1_src_ck = {
- .name = "auxclk1_src_ck",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_src_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK1,
- .clksel_mask = OMAP4_SRCSELECT_MASK,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4_SCRM_AUXCLK1,
- .enable_bit = OMAP4_ENABLE_SHIFT,
-};
-
-static const struct clksel auxclk1_sel[] = {
- { .parent = &auxclk1_src_ck, .rates = div16_1to16_rates },
- { .parent = NULL },
-};
-
-static struct clk auxclk1_ck = {
- .name = "auxclk1_ck",
- .parent = &auxclk1_src_ck,
- .clksel = auxclk1_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK1,
- .clksel_mask = OMAP4_CLKDIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk auxclk2_src_ck = {
- .name = "auxclk2_src_ck",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_src_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK2,
- .clksel_mask = OMAP4_SRCSELECT_MASK,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4_SCRM_AUXCLK2,
- .enable_bit = OMAP4_ENABLE_SHIFT,
-};
-
-static const struct clksel auxclk2_sel[] = {
- { .parent = &auxclk2_src_ck, .rates = div16_1to16_rates },
- { .parent = NULL },
-};
-
-static struct clk auxclk2_ck = {
- .name = "auxclk2_ck",
- .parent = &auxclk2_src_ck,
- .clksel = auxclk2_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK2,
- .clksel_mask = OMAP4_CLKDIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk auxclk3_src_ck = {
- .name = "auxclk3_src_ck",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_src_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK3,
- .clksel_mask = OMAP4_SRCSELECT_MASK,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4_SCRM_AUXCLK3,
- .enable_bit = OMAP4_ENABLE_SHIFT,
-};
-
-static const struct clksel auxclk3_sel[] = {
- { .parent = &auxclk3_src_ck, .rates = div16_1to16_rates },
- { .parent = NULL },
-};
-
-static struct clk auxclk3_ck = {
- .name = "auxclk3_ck",
- .parent = &auxclk3_src_ck,
- .clksel = auxclk3_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK3,
- .clksel_mask = OMAP4_CLKDIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk auxclk4_src_ck = {
- .name = "auxclk4_src_ck",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_src_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK4,
- .clksel_mask = OMAP4_SRCSELECT_MASK,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4_SCRM_AUXCLK4,
- .enable_bit = OMAP4_ENABLE_SHIFT,
-};
-
-static const struct clksel auxclk4_sel[] = {
- { .parent = &auxclk4_src_ck, .rates = div16_1to16_rates },
- { .parent = NULL },
-};
-
-static struct clk auxclk4_ck = {
- .name = "auxclk4_ck",
- .parent = &auxclk4_src_ck,
- .clksel = auxclk4_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK4,
- .clksel_mask = OMAP4_CLKDIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static struct clk auxclk5_src_ck = {
- .name = "auxclk5_src_ck",
- .parent = &sys_clkin_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_omap2_dflt,
- .clksel = auxclk_src_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK5,
- .clksel_mask = OMAP4_SRCSELECT_MASK,
- .recalc = &omap2_clksel_recalc,
- .enable_reg = OMAP4_SCRM_AUXCLK5,
- .enable_bit = OMAP4_ENABLE_SHIFT,
-};
-
-static const struct clksel auxclk5_sel[] = {
- { .parent = &auxclk5_src_ck, .rates = div16_1to16_rates },
- { .parent = NULL },
-};
-
-static struct clk auxclk5_ck = {
- .name = "auxclk5_ck",
- .parent = &auxclk5_src_ck,
- .clksel = auxclk5_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLK5,
- .clksel_mask = OMAP4_CLKDIV_MASK,
- .ops = &clkops_null,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
-static const struct clksel auxclkreq_sel[] = {
- { .parent = &auxclk0_ck, .rates = div_1_0_rates },
- { .parent = &auxclk1_ck, .rates = div_1_1_rates },
- { .parent = &auxclk2_ck, .rates = div_1_2_rates },
- { .parent = &auxclk3_ck, .rates = div_1_3_rates },
- { .parent = &auxclk4_ck, .rates = div_1_4_rates },
- { .parent = &auxclk5_ck, .rates = div_1_5_rates },
- { .parent = NULL },
-};
-
-static struct clk auxclkreq0_ck = {
- .name = "auxclkreq0_ck",
- .parent = &auxclk0_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_null,
- .clksel = auxclkreq_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLKREQ0,
- .clksel_mask = OMAP4_MAPPING_MASK,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk auxclkreq1_ck = {
- .name = "auxclkreq1_ck",
- .parent = &auxclk1_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_null,
- .clksel = auxclkreq_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLKREQ1,
- .clksel_mask = OMAP4_MAPPING_MASK,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk auxclkreq2_ck = {
- .name = "auxclkreq2_ck",
- .parent = &auxclk2_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_null,
- .clksel = auxclkreq_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLKREQ2,
- .clksel_mask = OMAP4_MAPPING_MASK,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk auxclkreq3_ck = {
- .name = "auxclkreq3_ck",
- .parent = &auxclk3_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_null,
- .clksel = auxclkreq_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLKREQ3,
- .clksel_mask = OMAP4_MAPPING_MASK,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk auxclkreq4_ck = {
- .name = "auxclkreq4_ck",
- .parent = &auxclk4_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_null,
- .clksel = auxclkreq_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLKREQ4,
- .clksel_mask = OMAP4_MAPPING_MASK,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk auxclkreq5_ck = {
- .name = "auxclkreq5_ck",
- .parent = &auxclk5_ck,
- .init = &omap2_init_clksel_parent,
- .ops = &clkops_null,
- .clksel = auxclkreq_sel,
- .clksel_reg = OMAP4_SCRM_AUXCLKREQ5,
- .clksel_mask = OMAP4_MAPPING_MASK,
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * clkdev
- */
-
-static struct omap_clk omap44xx_clks[] = {
- CLK(NULL, "extalt_clkin_ck", &extalt_clkin_ck, CK_443X),
- CLK(NULL, "pad_clks_ck", &pad_clks_ck, CK_443X),
- CLK(NULL, "pad_slimbus_core_clks_ck", &pad_slimbus_core_clks_ck, CK_443X),
- CLK(NULL, "secure_32k_clk_src_ck", &secure_32k_clk_src_ck, CK_443X),
- CLK(NULL, "slimbus_clk", &slimbus_clk, CK_443X),
- CLK(NULL, "sys_32k_ck", &sys_32k_ck, CK_443X),
- CLK(NULL, "virt_12000000_ck", &virt_12000000_ck, CK_443X),
- CLK(NULL, "virt_13000000_ck", &virt_13000000_ck, CK_443X),
- CLK(NULL, "virt_16800000_ck", &virt_16800000_ck, CK_443X),
- CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_443X),
- CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_443X),
- CLK(NULL, "virt_27000000_ck", &virt_27000000_ck, CK_443X),
- CLK(NULL, "virt_38400000_ck", &virt_38400000_ck, CK_443X),
- CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_443X),
- CLK(NULL, "tie_low_clock_ck", &tie_low_clock_ck, CK_443X),
- CLK(NULL, "utmi_phy_clkout_ck", &utmi_phy_clkout_ck, CK_443X),
- CLK(NULL, "xclk60mhsp1_ck", &xclk60mhsp1_ck, CK_443X),
- CLK(NULL, "xclk60mhsp2_ck", &xclk60mhsp2_ck, CK_443X),
- CLK(NULL, "xclk60motg_ck", &xclk60motg_ck, CK_443X),
- CLK(NULL, "abe_dpll_bypass_clk_mux_ck", &abe_dpll_bypass_clk_mux_ck, CK_443X),
- CLK(NULL, "abe_dpll_refclk_mux_ck", &abe_dpll_refclk_mux_ck, CK_443X),
- CLK(NULL, "dpll_abe_ck", &dpll_abe_ck, CK_443X),
- CLK(NULL, "dpll_abe_x2_ck", &dpll_abe_x2_ck, CK_443X),
- CLK(NULL, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, CK_443X),
- CLK(NULL, "abe_24m_fclk", &abe_24m_fclk, CK_443X),
- CLK(NULL, "abe_clk", &abe_clk, CK_443X),
- CLK(NULL, "aess_fclk", &aess_fclk, CK_443X),
- CLK(NULL, "dpll_abe_m3x2_ck", &dpll_abe_m3x2_ck, CK_443X),
- CLK(NULL, "core_hsd_byp_clk_mux_ck", &core_hsd_byp_clk_mux_ck, CK_443X),
- CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_443X),
- CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_443X),
- CLK(NULL, "dpll_core_m6x2_ck", &dpll_core_m6x2_ck, CK_443X),
- CLK(NULL, "dbgclk_mux_ck", &dbgclk_mux_ck, CK_443X),
- CLK(NULL, "dpll_core_m2_ck", &dpll_core_m2_ck, CK_443X),
- CLK(NULL, "ddrphy_ck", &ddrphy_ck, CK_443X),
- CLK(NULL, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, CK_443X),
- CLK(NULL, "div_core_ck", &div_core_ck, CK_443X),
- CLK(NULL, "div_iva_hs_clk", &div_iva_hs_clk, CK_443X),
- CLK(NULL, "div_mpu_hs_clk", &div_mpu_hs_clk, CK_443X),
- CLK(NULL, "dpll_core_m4x2_ck", &dpll_core_m4x2_ck, CK_443X),
- CLK(NULL, "dll_clk_div_ck", &dll_clk_div_ck, CK_443X),
- CLK(NULL, "dpll_abe_m2_ck", &dpll_abe_m2_ck, CK_443X),
- CLK(NULL, "dpll_core_m3x2_ck", &dpll_core_m3x2_ck, CK_443X),
- CLK(NULL, "dpll_core_m7x2_ck", &dpll_core_m7x2_ck, CK_443X),
- CLK(NULL, "iva_hsd_byp_clk_mux_ck", &iva_hsd_byp_clk_mux_ck, CK_443X),
- CLK(NULL, "dpll_iva_ck", &dpll_iva_ck, CK_443X),
- CLK(NULL, "dpll_iva_x2_ck", &dpll_iva_x2_ck, CK_443X),
- CLK(NULL, "dpll_iva_m4x2_ck", &dpll_iva_m4x2_ck, CK_443X),
- CLK(NULL, "dpll_iva_m5x2_ck", &dpll_iva_m5x2_ck, CK_443X),
- CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_443X),
- CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_443X),
- CLK(NULL, "per_hs_clk_div_ck", &per_hs_clk_div_ck, CK_443X),
- CLK(NULL, "per_hsd_byp_clk_mux_ck", &per_hsd_byp_clk_mux_ck, CK_443X),
- CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_443X),
- CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_443X),
- CLK(NULL, "dpll_per_x2_ck", &dpll_per_x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m3x2_ck", &dpll_per_m3x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m4x2_ck", &dpll_per_m4x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m6x2_ck", &dpll_per_m6x2_ck, CK_443X),
- CLK(NULL, "dpll_per_m7x2_ck", &dpll_per_m7x2_ck, CK_443X),
- CLK(NULL, "usb_hs_clk_div_ck", &usb_hs_clk_div_ck, CK_443X),
- CLK(NULL, "dpll_usb_ck", &dpll_usb_ck, CK_443X),
- CLK(NULL, "dpll_usb_clkdcoldo_ck", &dpll_usb_clkdcoldo_ck, CK_443X),
- CLK(NULL, "dpll_usb_m2_ck", &dpll_usb_m2_ck, CK_443X),
- CLK(NULL, "ducati_clk_mux_ck", &ducati_clk_mux_ck, CK_443X),
- CLK(NULL, "func_12m_fclk", &func_12m_fclk, CK_443X),
- CLK(NULL, "func_24m_clk", &func_24m_clk, CK_443X),
- CLK(NULL, "func_24mc_fclk", &func_24mc_fclk, CK_443X),
- CLK(NULL, "func_48m_fclk", &func_48m_fclk, CK_443X),
- CLK(NULL, "func_48mc_fclk", &func_48mc_fclk, CK_443X),
- CLK(NULL, "func_64m_fclk", &func_64m_fclk, CK_443X),
- CLK(NULL, "func_96m_fclk", &func_96m_fclk, CK_443X),
- CLK(NULL, "init_60m_fclk", &init_60m_fclk, CK_443X),
- CLK(NULL, "l3_div_ck", &l3_div_ck, CK_443X),
- CLK(NULL, "l4_div_ck", &l4_div_ck, CK_443X),
- CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_443X),
- CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_443X),
- CLK("smp_twd", NULL, &mpu_periphclk, CK_443X),
- CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_443X),
- CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_443X),
- CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
- CLK(NULL, "pmd_stm_clock_mux_ck", &pmd_stm_clock_mux_ck, CK_443X),
- CLK(NULL, "pmd_trace_clk_mux_ck", &pmd_trace_clk_mux_ck, CK_443X),
- CLK(NULL, "syc_clk_div_ck", &syc_clk_div_ck, CK_443X),
- CLK(NULL, "aes1_fck", &aes1_fck, CK_443X),
- CLK(NULL, "aes2_fck", &aes2_fck, CK_443X),
- CLK(NULL, "aess_fck", &aess_fck, CK_443X),
- CLK(NULL, "bandgap_fclk", &bandgap_fclk, CK_443X),
- CLK(NULL, "bandgap_ts_fclk", &bandgap_ts_fclk, CK_446X),
- CLK(NULL, "des3des_fck", &des3des_fck, CK_443X),
- CLK(NULL, "div_ts_ck", &div_ts_ck, CK_446X),
- CLK(NULL, "dmic_sync_mux_ck", &dmic_sync_mux_ck, CK_443X),
- CLK(NULL, "dmic_fck", &dmic_fck, CK_443X),
- CLK(NULL, "dsp_fck", &dsp_fck, CK_443X),
- CLK(NULL, "dss_sys_clk", &dss_sys_clk, CK_443X),
- CLK(NULL, "dss_tv_clk", &dss_tv_clk, CK_443X),
- CLK(NULL, "dss_48mhz_clk", &dss_48mhz_clk, CK_443X),
- CLK(NULL, "dss_dss_clk", &dss_dss_clk, CK_443X),
- CLK(NULL, "dss_fck", &dss_fck, CK_443X),
- CLK("omapdss_dss", "ick", &dss_fck, CK_443X),
- CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X),
- CLK(NULL, "emif1_fck", &emif1_fck, CK_443X),
- CLK(NULL, "emif2_fck", &emif2_fck, CK_443X),
- CLK(NULL, "fdif_fck", &fdif_fck, CK_443X),
- CLK(NULL, "fpka_fck", &fpka_fck, CK_443X),
- CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_443X),
- CLK(NULL, "gpio1_ick", &gpio1_ick, CK_443X),
- CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_443X),
- CLK(NULL, "gpio2_ick", &gpio2_ick, CK_443X),
- CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_443X),
- CLK(NULL, "gpio3_ick", &gpio3_ick, CK_443X),
- CLK(NULL, "gpio4_dbclk", &gpio4_dbclk, CK_443X),
- CLK(NULL, "gpio4_ick", &gpio4_ick, CK_443X),
- CLK(NULL, "gpio5_dbclk", &gpio5_dbclk, CK_443X),
- CLK(NULL, "gpio5_ick", &gpio5_ick, CK_443X),
- CLK(NULL, "gpio6_dbclk", &gpio6_dbclk, CK_443X),
- CLK(NULL, "gpio6_ick", &gpio6_ick, CK_443X),
- CLK(NULL, "gpmc_ick", &gpmc_ick, CK_443X),
- CLK(NULL, "gpu_fck", &gpu_fck, CK_443X),
- CLK(NULL, "hdq1w_fck", &hdq1w_fck, CK_443X),
- CLK(NULL, "hsi_fck", &hsi_fck, CK_443X),
- CLK(NULL, "i2c1_fck", &i2c1_fck, CK_443X),
- CLK(NULL, "i2c2_fck", &i2c2_fck, CK_443X),
- CLK(NULL, "i2c3_fck", &i2c3_fck, CK_443X),
- CLK(NULL, "i2c4_fck", &i2c4_fck, CK_443X),
- CLK(NULL, "ipu_fck", &ipu_fck, CK_443X),
- CLK(NULL, "iss_ctrlclk", &iss_ctrlclk, CK_443X),
- CLK(NULL, "iss_fck", &iss_fck, CK_443X),
- CLK(NULL, "iva_fck", &iva_fck, CK_443X),
- CLK(NULL, "kbd_fck", &kbd_fck, CK_443X),
- CLK(NULL, "l3_instr_ick", &l3_instr_ick, CK_443X),
- CLK(NULL, "l3_main_3_ick", &l3_main_3_ick, CK_443X),
- CLK(NULL, "mcasp_sync_mux_ck", &mcasp_sync_mux_ck, CK_443X),
- CLK(NULL, "mcasp_fck", &mcasp_fck, CK_443X),
- CLK(NULL, "mcbsp1_sync_mux_ck", &mcbsp1_sync_mux_ck, CK_443X),
- CLK(NULL, "mcbsp1_fck", &mcbsp1_fck, CK_443X),
- CLK(NULL, "mcbsp2_sync_mux_ck", &mcbsp2_sync_mux_ck, CK_443X),
- CLK(NULL, "mcbsp2_fck", &mcbsp2_fck, CK_443X),
- CLK(NULL, "mcbsp3_sync_mux_ck", &mcbsp3_sync_mux_ck, CK_443X),
- CLK(NULL, "mcbsp3_fck", &mcbsp3_fck, CK_443X),
- CLK(NULL, "mcbsp4_sync_mux_ck", &mcbsp4_sync_mux_ck, CK_443X),
- CLK(NULL, "mcbsp4_fck", &mcbsp4_fck, CK_443X),
- CLK(NULL, "mcpdm_fck", &mcpdm_fck, CK_443X),
- CLK(NULL, "mcspi1_fck", &mcspi1_fck, CK_443X),
- CLK(NULL, "mcspi2_fck", &mcspi2_fck, CK_443X),
- CLK(NULL, "mcspi3_fck", &mcspi3_fck, CK_443X),
- CLK(NULL, "mcspi4_fck", &mcspi4_fck, CK_443X),
- CLK(NULL, "mmc1_fck", &mmc1_fck, CK_443X),
- CLK(NULL, "mmc2_fck", &mmc2_fck, CK_443X),
- CLK(NULL, "mmc3_fck", &mmc3_fck, CK_443X),
- CLK(NULL, "mmc4_fck", &mmc4_fck, CK_443X),
- CLK(NULL, "mmc5_fck", &mmc5_fck, CK_443X),
- CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
- CLK(NULL, "ocp2scp_usb_phy_ick", &ocp2scp_usb_phy_ick, CK_443X),
- CLK(NULL, "ocp_wp_noc_ick", &ocp_wp_noc_ick, CK_443X),
- CLK(NULL, "rng_ick", &rng_ick, CK_443X),
- CLK("omap_rng", "ick", &rng_ick, CK_443X),
- CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
- CLK(NULL, "sl2if_ick", &sl2if_ick, CK_443X),
- CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
- CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
- CLK(NULL, "slimbus1_fclk_2", &slimbus1_fclk_2, CK_443X),
- CLK(NULL, "slimbus1_slimbus_clk", &slimbus1_slimbus_clk, CK_443X),
- CLK(NULL, "slimbus1_fck", &slimbus1_fck, CK_443X),
- CLK(NULL, "slimbus2_fclk_1", &slimbus2_fclk_1, CK_443X),
- CLK(NULL, "slimbus2_fclk_0", &slimbus2_fclk_0, CK_443X),
- CLK(NULL, "slimbus2_slimbus_clk", &slimbus2_slimbus_clk, CK_443X),
- CLK(NULL, "slimbus2_fck", &slimbus2_fck, CK_443X),
- CLK(NULL, "smartreflex_core_fck", &smartreflex_core_fck, CK_443X),
- CLK(NULL, "smartreflex_iva_fck", &smartreflex_iva_fck, CK_443X),
- CLK(NULL, "smartreflex_mpu_fck", &smartreflex_mpu_fck, CK_443X),
- CLK(NULL, "timer1_fck", &timer1_fck, CK_443X),
- CLK(NULL, "timer10_fck", &timer10_fck, CK_443X),
- CLK(NULL, "timer11_fck", &timer11_fck, CK_443X),
- CLK(NULL, "timer2_fck", &timer2_fck, CK_443X),
- CLK(NULL, "timer3_fck", &timer3_fck, CK_443X),
- CLK(NULL, "timer4_fck", &timer4_fck, CK_443X),
- CLK(NULL, "timer5_fck", &timer5_fck, CK_443X),
- CLK(NULL, "timer6_fck", &timer6_fck, CK_443X),
- CLK(NULL, "timer7_fck", &timer7_fck, CK_443X),
- CLK(NULL, "timer8_fck", &timer8_fck, CK_443X),
- CLK(NULL, "timer9_fck", &timer9_fck, CK_443X),
- CLK(NULL, "uart1_fck", &uart1_fck, CK_443X),
- CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
- CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
- CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
- CLK("usbhs_omap", "fs_fck", &usb_host_fs_fck, CK_443X),
- CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X),
- CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
- CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
- CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X),
- CLK(NULL, "usb_host_hs_utmi_p2_clk", &usb_host_hs_utmi_p2_clk, CK_443X),
- CLK(NULL, "usb_host_hs_utmi_p3_clk", &usb_host_hs_utmi_p3_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic480m_p1_clk", &usb_host_hs_hsic480m_p1_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic60m_p1_clk", &usb_host_hs_hsic60m_p1_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_443X),
- CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
- CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
- CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X),
- CLK("usbhs_omap", "hs_fck", &usb_host_hs_fck, CK_443X),
- CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
- CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
- CLK(NULL, "usb_otg_hs_ick", &usb_otg_hs_ick, CK_443X),
- CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X),
- CLK(NULL, "usb_phy_cm_clk32k", &usb_phy_cm_clk32k, CK_443X),
- CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
- CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
- CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
- CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X),
- CLK("usbhs_omap", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
- CLK("usbhs_tll", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
- CLK(NULL, "usim_ck", &usim_ck, CK_443X),
- CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
- CLK(NULL, "usim_fck", &usim_fck, CK_443X),
- CLK(NULL, "wd_timer2_fck", &wd_timer2_fck, CK_443X),
- CLK(NULL, "wd_timer3_fck", &wd_timer3_fck, CK_443X),
- CLK(NULL, "stm_clk_div_ck", &stm_clk_div_ck, CK_443X),
- CLK(NULL, "trace_clk_div_ck", &trace_clk_div_ck, CK_443X),
- CLK(NULL, "auxclk0_src_ck", &auxclk0_src_ck, CK_443X),
- CLK(NULL, "auxclk0_ck", &auxclk0_ck, CK_443X),
- CLK(NULL, "auxclkreq0_ck", &auxclkreq0_ck, CK_443X),
- CLK(NULL, "auxclk1_src_ck", &auxclk1_src_ck, CK_443X),
- CLK(NULL, "auxclk1_ck", &auxclk1_ck, CK_443X),
- CLK(NULL, "auxclkreq1_ck", &auxclkreq1_ck, CK_443X),
- CLK(NULL, "auxclk2_src_ck", &auxclk2_src_ck, CK_443X),
- CLK(NULL, "auxclk2_ck", &auxclk2_ck, CK_443X),
- CLK(NULL, "auxclkreq2_ck", &auxclkreq2_ck, CK_443X),
- CLK(NULL, "auxclk3_src_ck", &auxclk3_src_ck, CK_443X),
- CLK(NULL, "auxclk3_ck", &auxclk3_ck, CK_443X),
- CLK(NULL, "auxclkreq3_ck", &auxclkreq3_ck, CK_443X),
- CLK(NULL, "auxclk4_src_ck", &auxclk4_src_ck, CK_443X),
- CLK(NULL, "auxclk4_ck", &auxclk4_ck, CK_443X),
- CLK(NULL, "auxclkreq4_ck", &auxclkreq4_ck, CK_443X),
- CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck, CK_443X),
- CLK(NULL, "auxclk5_ck", &auxclk5_ck, CK_443X),
- CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck, CK_443X),
- CLK("omap-gpmc", "fck", &dummy_ck, CK_443X),
- CLK("omap_i2c.1", "ick", &dummy_ck, CK_443X),
- CLK("omap_i2c.2", "ick", &dummy_ck, CK_443X),
- CLK("omap_i2c.3", "ick", &dummy_ck, CK_443X),
- CLK("omap_i2c.4", "ick", &dummy_ck, CK_443X),
- CLK(NULL, "mailboxes_ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.0", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.1", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.2", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.3", "ick", &dummy_ck, CK_443X),
- CLK("omap_hsmmc.4", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_443X),
- CLK("omap-mcbsp.4", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.1", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X),
- CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart1_ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
- CLK(NULL, "uart4_ick", &dummy_ck, CK_443X),
- CLK("usbhs_omap", "usbhost_ick", &dummy_ck, CK_443X),
- CLK("usbhs_omap", "usbtll_fck", &dummy_ck, CK_443X),
- CLK("usbhs_tll", "usbtll_fck", &dummy_ck, CK_443X),
- CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
- CLK(NULL, "timer_32k_ck", &sys_32k_ck, CK_443X),
- /* TODO: Remove "omap_timer.X" aliases once DT migration is complete */
- CLK("omap_timer.1", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.2", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.3", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.4", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.9", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.10", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.11", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.5", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("omap_timer.6", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("omap_timer.7", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("omap_timer.8", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("4a318000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("48032000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("48034000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("48036000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("4803e000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("48086000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("48088000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("49038000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("4903a000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("4903c000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("4903e000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK(NULL, "cpufreq_ck", &dpll_mpu_ck, CK_443X),
-};
-
-int __init omap4xxx_clk_init(void)
-{
- struct omap_clk *c;
- u32 cpu_clkflg;
-
- if (cpu_is_omap443x()) {
- cpu_mask = RATE_IN_4430;
- cpu_clkflg = CK_443X;
- } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
- cpu_mask = RATE_IN_4460 | RATE_IN_4430;
- cpu_clkflg = CK_446X | CK_443X;
-
- if (cpu_is_omap447x())
- pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
- } else {
- return 0;
- }
-
- /*
- * Must stay commented until all OMAP SoC drivers are
- * converted to runtime PM, or drivers may start crashing
- *
- * omap2_clk_disable_clkdm_control();
- */
-
- for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
- c++)
- clk_preinit(c->lk.clk);
-
- for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
- c++)
- if (c->cpu & cpu_clkflg) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- omap2_init_clk_clkdm(c->lk.clk);
- }
-
- /* Disable autoidle on all clocks; let the PM code enable it later */
- omap_clk_disable_autoidle_all();
-
- recalculate_root_clocks();
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable_init_clocks();
-
- return 0;
-}
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
index b9f3ba68148..ef4d21bfb96 100644
--- a/arch/arm/mach-omap2/clock_common_data.c
+++ b/arch/arm/mach-omap2/clock_common_data.c
@@ -16,6 +16,7 @@
* OMAP3xxx clock definition files.
*/
+#include <linux/clk-private.h>
#include "clock.h"
/* clksel_rate data common to 24xx/343x */
@@ -52,6 +53,13 @@ const struct clksel_rate div_1_0_rates[] = {
{ .div = 0 },
};
+const struct clksel_rate div3_1to4_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 2, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 4, .val = 2, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
const struct clksel_rate div_1_1_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
{ .div = 0 },
@@ -109,14 +117,10 @@ const struct clksel_rate div31_1to31_rates[] = {
/* Clocks shared between various OMAP SoCs */
-struct clk virt_19200000_ck = {
- .name = "virt_19200000_ck",
- .ops = &clkops_null,
- .rate = 19200000,
-};
+static struct clk_ops dummy_ck_ops = {};
-struct clk virt_26000000_ck = {
- .name = "virt_26000000_ck",
- .ops = &clkops_null,
- .rate = 26000000,
+struct clk dummy_ck = {
+ .name = "dummy_clk",
+ .ops = &dummy_ck_ops,
+ .flags = CLK_IS_BASIC,
};
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 64e50465a4b..7faf82d4e85 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/limits.h>
#include <linux/err.h>
+#include <linux/clk-provider.h>
#include <linux/io.h>
@@ -947,35 +948,6 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
return 0;
}
-static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
-{
- unsigned long flags;
-
- if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
- return -EINVAL;
-
- spin_lock_irqsave(&clkdm->lock, flags);
-
- if (atomic_read(&clkdm->usecount) == 0) {
- spin_unlock_irqrestore(&clkdm->lock, flags);
- WARN_ON(1); /* underflow */
- return -ERANGE;
- }
-
- if (atomic_dec_return(&clkdm->usecount) > 0) {
- spin_unlock_irqrestore(&clkdm->lock, flags);
- return 0;
- }
-
- arch_clkdm->clkdm_clk_disable(clkdm);
- pwrdm_state_switch(clkdm->pwrdm.ptr);
- spin_unlock_irqrestore(&clkdm->lock, flags);
-
- pr_debug("clockdomain: %s: disabled\n", clkdm->name);
-
- return 0;
-}
-
/**
* clkdm_clk_enable - add an enabled downstream clock to this clkdm
* @clkdm: struct clockdomain *
@@ -1018,15 +990,38 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
*/
int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
{
- /*
- * XXX Rewrite this code to maintain a list of enabled
- * downstream clocks for debugging purposes?
- */
+ unsigned long flags;
- if (!clk)
+ if (!clkdm || !clk || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL;
- return _clkdm_clk_hwmod_disable(clkdm);
+ spin_lock_irqsave(&clkdm->lock, flags);
+
+ /* corner case: disabling unused clocks */
+ if ((__clk_get_enable_count(clk) == 0) &&
+ (atomic_read(&clkdm->usecount) == 0))
+ goto ccd_exit;
+
+ if (atomic_read(&clkdm->usecount) == 0) {
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ WARN_ON(1); /* underflow */
+ return -ERANGE;
+ }
+
+ if (atomic_dec_return(&clkdm->usecount) > 0) {
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ return 0;
+ }
+
+ arch_clkdm->clkdm_clk_disable(clkdm);
+ pwrdm_state_switch(clkdm->pwrdm.ptr);
+
+ pr_debug("clockdomain: %s: disabled\n", clkdm->name);
+
+ccd_exit:
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+
+ return 0;
}
/**
@@ -1077,6 +1072,8 @@ int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh)
*/
int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
{
+ unsigned long flags;
+
/* The clkdm attribute does not exist yet prior OMAP4 */
if (cpu_is_omap24xx() || cpu_is_omap34xx())
return 0;
@@ -1086,9 +1083,28 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
* downstream hwmods for debugging purposes?
*/
- if (!oh)
+ if (!clkdm || !oh || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL;
- return _clkdm_clk_hwmod_disable(clkdm);
+ spin_lock_irqsave(&clkdm->lock, flags);
+
+ if (atomic_read(&clkdm->usecount) == 0) {
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ WARN_ON(1); /* underflow */
+ return -ERANGE;
+ }
+
+ if (atomic_dec_return(&clkdm->usecount) > 0) {
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+ return 0;
+ }
+
+ arch_clkdm->clkdm_clk_disable(clkdm);
+ pwrdm_state_switch(clkdm->pwrdm.ptr);
+ spin_unlock_irqrestore(&clkdm->lock, flags);
+
+ pr_debug("clockdomain: %s: disabled\n", clkdm->name);
+
+ return 0;
}
diff --git a/arch/arm/mach-omap2/cm-regbits-24xx.h b/arch/arm/mach-omap2/cm-regbits-24xx.h
index 11eaf16880c..669ef51b17a 100644
--- a/arch/arm/mach-omap2/cm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-24xx.h
@@ -59,6 +59,7 @@
/* CM_CLKSEL_MPU */
#define OMAP24XX_CLKSEL_MPU_SHIFT 0
#define OMAP24XX_CLKSEL_MPU_MASK (0x1f << 0)
+#define OMAP24XX_CLKSEL_MPU_WIDTH 5
/* CM_CLKSTCTRL_MPU */
#define OMAP24XX_AUTOSTATE_MPU_SHIFT 0
@@ -237,8 +238,10 @@
#define OMAP24XX_CLKSEL_DSS1_MASK (0x1f << 8)
#define OMAP24XX_CLKSEL_L4_SHIFT 5
#define OMAP24XX_CLKSEL_L4_MASK (0x3 << 5)
+#define OMAP24XX_CLKSEL_L4_WIDTH 2
#define OMAP24XX_CLKSEL_L3_SHIFT 0
#define OMAP24XX_CLKSEL_L3_MASK (0x1f << 0)
+#define OMAP24XX_CLKSEL_L3_WIDTH 5
/* CM_CLKSEL2_CORE */
#define OMAP24XX_CLKSEL_GPT12_SHIFT 22
@@ -363,8 +366,10 @@
#define OMAP24XX_DPLL_DIV_MASK (0xf << 8)
#define OMAP24XX_54M_SOURCE_SHIFT 5
#define OMAP24XX_54M_SOURCE_MASK (1 << 5)
+#define OMAP24XX_54M_SOURCE_WIDTH 1
#define OMAP2430_96M_SOURCE_SHIFT 4
#define OMAP2430_96M_SOURCE_MASK (1 << 4)
+#define OMAP2430_96M_SOURCE_WIDTH 1
#define OMAP24XX_48M_SOURCE_SHIFT 3
#define OMAP24XX_48M_SOURCE_MASK (1 << 3)
#define OMAP2430_ALTCLK_SOURCE_SHIFT 0
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 59598ffd878..adf78d32580 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -81,6 +81,7 @@
/* CM_CLKSEL1_PLL_IVA2 */
#define OMAP3430_IVA2_CLK_SRC_SHIFT 19
#define OMAP3430_IVA2_CLK_SRC_MASK (0x7 << 19)
+#define OMAP3430_IVA2_CLK_SRC_WIDTH 3
#define OMAP3430_IVA2_DPLL_MULT_SHIFT 8
#define OMAP3430_IVA2_DPLL_MULT_MASK (0x7ff << 8)
#define OMAP3430_IVA2_DPLL_DIV_SHIFT 0
@@ -89,6 +90,7 @@
/* CM_CLKSEL2_PLL_IVA2 */
#define OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT 0
#define OMAP3430_IVA2_DPLL_CLKOUT_DIV_MASK (0x1f << 0)
+#define OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH 5
/* CM_CLKSTCTRL_IVA2 */
#define OMAP3430_CLKTRCTRL_IVA2_SHIFT 0
@@ -118,6 +120,7 @@
/* CM_IDLEST_PLL_MPU */
#define OMAP3430_ST_MPU_CLK_SHIFT 0
#define OMAP3430_ST_MPU_CLK_MASK (1 << 0)
+#define OMAP3430_ST_MPU_CLK_WIDTH 1
/* CM_AUTOIDLE_PLL_MPU */
#define OMAP3430_AUTO_MPU_DPLL_SHIFT 0
@@ -126,6 +129,7 @@
/* CM_CLKSEL1_PLL_MPU */
#define OMAP3430_MPU_CLK_SRC_SHIFT 19
#define OMAP3430_MPU_CLK_SRC_MASK (0x7 << 19)
+#define OMAP3430_MPU_CLK_SRC_WIDTH 3
#define OMAP3430_MPU_DPLL_MULT_SHIFT 8
#define OMAP3430_MPU_DPLL_MULT_MASK (0x7ff << 8)
#define OMAP3430_MPU_DPLL_DIV_SHIFT 0
@@ -134,6 +138,7 @@
/* CM_CLKSEL2_PLL_MPU */
#define OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT 0
#define OMAP3430_MPU_DPLL_CLKOUT_DIV_MASK (0x1f << 0)
+#define OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH 5
/* CM_CLKSTCTRL_MPU */
#define OMAP3430_CLKTRCTRL_MPU_SHIFT 0
@@ -345,10 +350,13 @@
#define OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK (0x3 << 4)
#define OMAP3430_CLKSEL_L4_SHIFT 2
#define OMAP3430_CLKSEL_L4_MASK (0x3 << 2)
+#define OMAP3430_CLKSEL_L4_WIDTH 2
#define OMAP3430_CLKSEL_L3_SHIFT 0
#define OMAP3430_CLKSEL_L3_MASK (0x3 << 0)
+#define OMAP3430_CLKSEL_L3_WIDTH 2
#define OMAP3630_CLKSEL_96M_SHIFT 12
#define OMAP3630_CLKSEL_96M_MASK (0x3 << 12)
+#define OMAP3630_CLKSEL_96M_WIDTH 2
/* CM_CLKSTCTRL_CORE */
#define OMAP3430ES1_CLKTRCTRL_D2D_SHIFT 4
@@ -452,6 +460,7 @@
#define OMAP3430ES2_CLKSEL_USIMOCP_MASK (0xf << 3)
#define OMAP3430_CLKSEL_RM_SHIFT 1
#define OMAP3430_CLKSEL_RM_MASK (0x3 << 1)
+#define OMAP3430_CLKSEL_RM_WIDTH 2
#define OMAP3430_CLKSEL_GPT1_SHIFT 0
#define OMAP3430_CLKSEL_GPT1_MASK (1 << 0)
@@ -520,14 +529,17 @@
/* Note that OMAP3430_CORE_DPLL_CLKOUT_DIV_MASK was (0x3 << 27) on 3430ES1 */
#define OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT 27
#define OMAP3430_CORE_DPLL_CLKOUT_DIV_MASK (0x1f << 27)
+#define OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH 5
#define OMAP3430_CORE_DPLL_MULT_SHIFT 16
#define OMAP3430_CORE_DPLL_MULT_MASK (0x7ff << 16)
#define OMAP3430_CORE_DPLL_DIV_SHIFT 8
#define OMAP3430_CORE_DPLL_DIV_MASK (0x7f << 8)
#define OMAP3430_SOURCE_96M_SHIFT 6
#define OMAP3430_SOURCE_96M_MASK (1 << 6)
+#define OMAP3430_SOURCE_96M_WIDTH 1
#define OMAP3430_SOURCE_54M_SHIFT 5
#define OMAP3430_SOURCE_54M_MASK (1 << 5)
+#define OMAP3430_SOURCE_54M_WIDTH 1
#define OMAP3430_SOURCE_48M_SHIFT 3
#define OMAP3430_SOURCE_48M_MASK (1 << 3)
@@ -545,7 +557,9 @@
/* CM_CLKSEL3_PLL */
#define OMAP3430_DIV_96M_SHIFT 0
#define OMAP3430_DIV_96M_MASK (0x1f << 0)
+#define OMAP3430_DIV_96M_WIDTH 5
#define OMAP3630_DIV_96M_MASK (0x3f << 0)
+#define OMAP3630_DIV_96M_WIDTH 6
/* CM_CLKSEL4_PLL */
#define OMAP3430ES2_PERIPH2_DPLL_MULT_SHIFT 8
@@ -556,12 +570,14 @@
/* CM_CLKSEL5_PLL */
#define OMAP3430ES2_DIV_120M_SHIFT 0
#define OMAP3430ES2_DIV_120M_MASK (0x1f << 0)
+#define OMAP3430ES2_DIV_120M_WIDTH 5
/* CM_CLKOUT_CTRL */
#define OMAP3430_CLKOUT2_EN_SHIFT 7
#define OMAP3430_CLKOUT2_EN_MASK (1 << 7)
#define OMAP3430_CLKOUT2_DIV_SHIFT 3
#define OMAP3430_CLKOUT2_DIV_MASK (0x7 << 3)
+#define OMAP3430_CLKOUT2_DIV_WIDTH 3
#define OMAP3430_CLKOUT2SOURCE_SHIFT 0
#define OMAP3430_CLKOUT2SOURCE_MASK (0x3 << 0)
@@ -592,10 +608,14 @@
/* CM_CLKSEL_DSS */
#define OMAP3430_CLKSEL_TV_SHIFT 8
#define OMAP3430_CLKSEL_TV_MASK (0x1f << 8)
+#define OMAP3430_CLKSEL_TV_WIDTH 5
#define OMAP3630_CLKSEL_TV_MASK (0x3f << 8)
+#define OMAP3630_CLKSEL_TV_WIDTH 6
#define OMAP3430_CLKSEL_DSS1_SHIFT 0
#define OMAP3430_CLKSEL_DSS1_MASK (0x1f << 0)
+#define OMAP3430_CLKSEL_DSS1_WIDTH 5
#define OMAP3630_CLKSEL_DSS1_MASK (0x3f << 0)
+#define OMAP3630_CLKSEL_DSS1_WIDTH 6
/* CM_SLEEPDEP_DSS specific bits */
@@ -623,7 +643,9 @@
/* CM_CLKSEL_CAM */
#define OMAP3430_CLKSEL_CAM_SHIFT 0
#define OMAP3430_CLKSEL_CAM_MASK (0x1f << 0)
+#define OMAP3430_CLKSEL_CAM_WIDTH 5
#define OMAP3630_CLKSEL_CAM_MASK (0x3f << 0)
+#define OMAP3630_CLKSEL_CAM_WIDTH 6
/* CM_SLEEPDEP_CAM specific bits */
@@ -721,21 +743,30 @@
/* CM_CLKSEL1_EMU */
#define OMAP3430_DIV_DPLL4_SHIFT 24
#define OMAP3430_DIV_DPLL4_MASK (0x1f << 24)
+#define OMAP3430_DIV_DPLL4_WIDTH 5
#define OMAP3630_DIV_DPLL4_MASK (0x3f << 24)
+#define OMAP3630_DIV_DPLL4_WIDTH 6
#define OMAP3430_DIV_DPLL3_SHIFT 16
#define OMAP3430_DIV_DPLL3_MASK (0x1f << 16)
+#define OMAP3430_DIV_DPLL3_WIDTH 5
#define OMAP3430_CLKSEL_TRACECLK_SHIFT 11
#define OMAP3430_CLKSEL_TRACECLK_MASK (0x7 << 11)
+#define OMAP3430_CLKSEL_TRACECLK_WIDTH 3
#define OMAP3430_CLKSEL_PCLK_SHIFT 8
#define OMAP3430_CLKSEL_PCLK_MASK (0x7 << 8)
+#define OMAP3430_CLKSEL_PCLK_WIDTH 3
#define OMAP3430_CLKSEL_PCLKX2_SHIFT 6
#define OMAP3430_CLKSEL_PCLKX2_MASK (0x3 << 6)
+#define OMAP3430_CLKSEL_PCLKX2_WIDTH 2
#define OMAP3430_CLKSEL_ATCLK_SHIFT 4
#define OMAP3430_CLKSEL_ATCLK_MASK (0x3 << 4)
+#define OMAP3430_CLKSEL_ATCLK_WIDTH 2
#define OMAP3430_TRACE_MUX_CTRL_SHIFT 2
#define OMAP3430_TRACE_MUX_CTRL_MASK (0x3 << 2)
+#define OMAP3430_TRACE_MUX_CTRL_WIDTH 2
#define OMAP3430_MUX_CTRL_SHIFT 0
#define OMAP3430_MUX_CTRL_MASK (0x3 << 0)
+#define OMAP3430_MUX_CTRL_WIDTH 2
/* CM_CLKSTCTRL_EMU */
#define OMAP3430_CLKTRCTRL_EMU_SHIFT 0
diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.h b/arch/arm/mach-omap2/cm2xxx_3xxx.h
index 98e6b3c9cd9..bfbd16fe915 100644
--- a/arch/arm/mach-omap2/cm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/cm2xxx_3xxx.h
@@ -108,6 +108,7 @@ extern void omap2xxx_cm_apll96_disable(void);
/* CM_CLKSEL_GFX */
#define OMAP_CLKSEL_GFX_SHIFT 0
#define OMAP_CLKSEL_GFX_MASK (0x7 << 0)
+#define OMAP_CLKSEL_GFX_WIDTH 3
/* CM_ICLKEN_GFX */
#define OMAP_EN_GFX_SHIFT 0
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index 5c2fd4863b2..2dabb9ecb98 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -16,8 +16,6 @@
#include <linux/init.h>
#include <linux/platform_data/dsp-omap.h>
-#include <plat/vram.h>
-
#include "common.h"
#include "omap-secure.h"
@@ -32,7 +30,6 @@ int __weak omap_secure_ram_reserve_memblock(void)
void __init omap_reserve(void)
{
- omap_vram_reserve_sdram_memblock();
omap_dsp_reserve_sdram_memblock();
omap_secure_ram_reserve_memblock();
omap_barrier_reserve_memblock();
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index 4ca8747b3cc..e6c328128a0 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -201,6 +201,7 @@
#define OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO 0x249
#define OMAP44XX_CONTROL_FUSE_CORE_OPP50 0x254
#define OMAP44XX_CONTROL_FUSE_CORE_OPP100 0x257
+#define OMAP44XX_CONTROL_FUSE_CORE_OPP100OV 0x25A
/* AM35XX only CONTROL_GENERAL register offsets */
#define AM35XX_CONTROL_MSUSPENDMUX_6 (OMAP2_CONTROL_GENERAL + 0x0038)
@@ -233,7 +234,7 @@
#define OMAP343X_PADCONF_ETK_D14 OMAP343X_PADCONF_ETK(16)
#define OMAP343X_PADCONF_ETK_D15 OMAP343X_PADCONF_ETK(17)
-/* 34xx GENERAL_WKUP regist offsets */
+/* 34xx GENERAL_WKUP register offsets */
#define OMAP343X_CONTROL_WKUP_DEBOBSMUX(i) (OMAP343X_CONTROL_GENERAL_WKUP + \
0x008 + (i))
#define OMAP343X_CONTROL_WKUP_DEBOBS0 (OMAP343X_CONTROL_GENERAL_WKUP + 0x008)
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index bca7a888570..22590dbe8f1 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -40,6 +40,8 @@ struct omap3_idle_statedata {
u32 core_state;
};
+static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
+
static struct omap3_idle_statedata omap3_idle_data[] = {
{
.mpu_state = PWRDM_POWER_ON,
@@ -71,7 +73,7 @@ static struct omap3_idle_statedata omap3_idle_data[] = {
},
};
-static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
+/* Private functions */
static int __omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
@@ -260,11 +262,11 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
return ret;
}
-DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
+static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
-struct cpuidle_driver omap3_idle_driver = {
- .name = "omap3_idle",
- .owner = THIS_MODULE,
+static struct cpuidle_driver omap3_idle_driver = {
+ .name = "omap3_idle",
+ .owner = THIS_MODULE,
.states = {
{
.enter = omap3_enter_idle_bm,
@@ -327,6 +329,8 @@ struct cpuidle_driver omap3_idle_driver = {
.safe_state_index = 0,
};
+/* Public functions */
+
/**
* omap3_idle_init - Init routine for OMAP3 idle
*
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 288bee6cbb7..d639aef0ded 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -54,6 +54,8 @@ static struct clockdomain *cpu_clkdm[NR_CPUS];
static atomic_t abort_barrier;
static bool cpu_done[NR_CPUS];
+/* Private functions */
+
/**
* omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
* @dev: cpuidle device
@@ -161,9 +163,19 @@ fail:
return index;
}
-DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+ int cpu = smp_processor_id();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
+static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
-struct cpuidle_driver omap4_idle_driver = {
+static struct cpuidle_driver omap4_idle_driver = {
.name = "omap4_idle",
.owner = THIS_MODULE,
.en_core_tk_irqen = 1,
@@ -178,7 +190,7 @@ struct cpuidle_driver omap4_idle_driver = {
.desc = "MPUSS ON"
},
{
- /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440,
.target_residency = 960,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
@@ -200,15 +212,7 @@ struct cpuidle_driver omap4_idle_driver = {
.safe_state_index = 0,
};
-/*
- * For each cpu, setup the broadcast timer because local timers
- * stops for the states above C1.
- */
-static void omap_setup_broadcast_timer(void *arg)
-{
- int cpu = smp_processor_id();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
+/* Public functions */
/**
* omap4_idle_init - Init routine for OMAP4 idle
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 3cff7dc514d..5e304d0719a 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -24,7 +24,7 @@
#include <asm/mach-types.h>
#include <asm/mach/map.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "iomap.h"
#include "omap_hwmod.h"
@@ -203,6 +203,16 @@ static struct resource omap3isp_resources[] = {
.flags = IORESOURCE_MEM,
},
{
+ .start = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE,
+ .end = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE + 3,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL,
+ .end = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL + 3,
+ .flags = IORESOURCE_MEM,
+ },
+ {
.start = 24 + OMAP_INTC_START,
.flags = IORESOURCE_IRQ,
}
@@ -216,7 +226,7 @@ static struct platform_device omap3isp_device = {
};
static struct omap_iommu_arch_data omap3_isp_iommu = {
- .name = "isp",
+ .name = "mmu_isp",
};
int omap3_init_camera(struct isp_platform_data *pdata)
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 38ba58c9762..cc75aaf6e76 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -102,17 +102,20 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
{ "dss_hdmi", "omapdss_hdmi", -1 },
};
-static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
+static void __init omap4_tpd12s015_mux_pads(void)
{
- u32 reg;
- u16 control_i2c_1;
-
omap_mux_init_signal("hdmi_cec",
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("hdmi_ddc_scl",
OMAP_PIN_INPUT_PULLUP);
omap_mux_init_signal("hdmi_ddc_sda",
OMAP_PIN_INPUT_PULLUP);
+}
+
+static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
+{
+ u32 reg;
+ u16 control_i2c_1;
/*
* CONTROL_I2C_1: HDMI_DDC_SDA_PULLUPRESX (bit 28) and
@@ -163,8 +166,10 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
int __init omap_hdmi_init(enum omap_hdmi_flags flags)
{
- if (cpu_is_omap44xx())
+ if (cpu_is_omap44xx()) {
omap4_hdmi_mux_pads(flags);
+ omap4_tpd12s015_mux_pads();
+ }
return 0;
}
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index e5aba58da5d..612b9824987 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/device.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "soc.h"
#include "omap_hwmod.h"
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index eacf51f2bc2..0a02aab5df6 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -29,6 +29,7 @@
#include <linux/clkdev.h>
#include "soc.h"
+#include "clockdomain.h"
#include "clock.h"
#include "cm2xxx_3xxx.h"
#include "cm-regbits-34xx.h"
@@ -42,7 +43,7 @@
/* Private functions */
/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
-static void _omap3_dpll_write_clken(struct clk *clk, u8 clken_bits)
+static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
{
const struct dpll_data *dd;
u32 v;
@@ -56,7 +57,7 @@ static void _omap3_dpll_write_clken(struct clk *clk, u8 clken_bits)
}
/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
-static int _omap3_wait_dpll_status(struct clk *clk, u8 state)
+static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
{
const struct dpll_data *dd;
int i = 0;
@@ -64,7 +65,7 @@ static int _omap3_wait_dpll_status(struct clk *clk, u8 state)
const char *clk_name;
dd = clk->dpll_data;
- clk_name = __clk_get_name(clk);
+ clk_name = __clk_get_name(clk->hw.clk);
state <<= __ffs(dd->idlest_mask);
@@ -88,7 +89,7 @@ static int _omap3_wait_dpll_status(struct clk *clk, u8 state)
}
/* From 3430 TRM ES2 4.7.6.2 */
-static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n)
+static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
{
unsigned long fint;
u16 f = 0;
@@ -133,14 +134,14 @@ static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n)
* locked successfully, return 0; if the DPLL did not lock in the time
* allotted, or DPLL3 was passed in, return -EINVAL.
*/
-static int _omap3_noncore_dpll_lock(struct clk *clk)
+static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
{
const struct dpll_data *dd;
u8 ai;
u8 state = 1;
int r = 0;
- pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk));
+ pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk));
dd = clk->dpll_data;
state <<= __ffs(dd->idlest_mask);
@@ -178,7 +179,7 @@ done:
* DPLL3 was passed in, or the DPLL does not support low-power bypass,
* return -EINVAL.
*/
-static int _omap3_noncore_dpll_bypass(struct clk *clk)
+static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
{
int r;
u8 ai;
@@ -187,7 +188,7 @@ static int _omap3_noncore_dpll_bypass(struct clk *clk)
return -EINVAL;
pr_debug("clock: configuring DPLL %s for low-power bypass\n",
- __clk_get_name(clk));
+ __clk_get_name(clk->hw.clk));
ai = omap3_dpll_autoidle_read(clk);
@@ -210,14 +211,14 @@ static int _omap3_noncore_dpll_bypass(struct clk *clk)
* code. If DPLL3 was passed in, or the DPLL does not support
* low-power stop, return -EINVAL; otherwise, return 0.
*/
-static int _omap3_noncore_dpll_stop(struct clk *clk)
+static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
{
u8 ai;
if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
return -EINVAL;
- pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk));
+ pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk));
ai = omap3_dpll_autoidle_read(clk);
@@ -241,11 +242,11 @@ static int _omap3_noncore_dpll_stop(struct clk *clk)
* XXX This code is not needed for 3430/AM35xx; can it be optimized
* out in non-multi-OMAP builds for those chips?
*/
-static void _lookup_dco(struct clk *clk, u8 *dco, u16 m, u8 n)
+static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
{
unsigned long fint, clkinp; /* watch out for overflow */
- clkinp = __clk_get_rate(__clk_get_parent(clk));
+ clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
fint = (clkinp / n) * m;
if (fint < 1000000000)
@@ -266,12 +267,12 @@ static void _lookup_dco(struct clk *clk, u8 *dco, u16 m, u8 n)
* XXX This code is not needed for 3430/AM35xx; can it be optimized
* out in non-multi-OMAP builds for those chips?
*/
-static void _lookup_sddiv(struct clk *clk, u8 *sd_div, u16 m, u8 n)
+static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
{
unsigned long clkinp, sd; /* watch out for overflow */
int mod1, mod2;
- clkinp = __clk_get_rate(__clk_get_parent(clk));
+ clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
/*
* target sigma-delta to near 250MHz
@@ -290,15 +291,13 @@ static void _lookup_sddiv(struct clk *clk, u8 *sd_div, u16 m, u8 n)
/*
* _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
- * @clk: struct clk * of DPLL to set
- * @m: DPLL multiplier to set
- * @n: DPLL divider to set
- * @freqsel: FREQSEL value to set
+ * @clk: struct clk * of DPLL to set
+ * @freqsel: FREQSEL value to set
*
- * Program the DPLL with the supplied M, N values, and wait for the DPLL to
- * lock.. Returns -EINVAL upon error, or 0 upon success.
+ * Program the DPLL with the last M, N values calculated, and wait for
+ * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
*/
-static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
+static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
{
struct dpll_data *dd = clk->dpll_data;
u8 dco, sd_div;
@@ -321,23 +320,45 @@ static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
/* Set DPLL multiplier, divider */
v = __raw_readl(dd->mult_div1_reg);
v &= ~(dd->mult_mask | dd->div1_mask);
- v |= m << __ffs(dd->mult_mask);
- v |= (n - 1) << __ffs(dd->div1_mask);
+ v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+ v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
/* Configure dco and sd_div for dplls that have these fields */
if (dd->dco_mask) {
- _lookup_dco(clk, &dco, m, n);
+ _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
v &= ~(dd->dco_mask);
v |= dco << __ffs(dd->dco_mask);
}
if (dd->sddiv_mask) {
- _lookup_sddiv(clk, &sd_div, m, n);
+ _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
+ dd->last_rounded_n);
v &= ~(dd->sddiv_mask);
v |= sd_div << __ffs(dd->sddiv_mask);
}
__raw_writel(v, dd->mult_div1_reg);
+ /* Set 4X multiplier and low-power mode */
+ if (dd->m4xen_mask || dd->lpmode_mask) {
+ v = __raw_readl(dd->control_reg);
+
+ if (dd->m4xen_mask) {
+ if (dd->last_rounded_m4xen)
+ v |= dd->m4xen_mask;
+ else
+ v &= ~dd->m4xen_mask;
+ }
+
+ if (dd->lpmode_mask) {
+ if (dd->last_rounded_lpmode)
+ v |= dd->lpmode_mask;
+ else
+ v &= ~dd->lpmode_mask;
+ }
+
+ __raw_writel(v, dd->control_reg);
+ }
+
/* We let the clock framework set the other output dividers later */
/* REVISIT: Set ramp-up delay? */
@@ -355,8 +376,10 @@ static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
*
* Recalculate and propagate the DPLL rate.
*/
-unsigned long omap3_dpll_recalc(struct clk *clk)
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
return omap2_get_dpll_rate(clk);
}
@@ -376,8 +399,9 @@ unsigned long omap3_dpll_recalc(struct clk *clk)
* support low-power stop, or if the DPLL took too long to enter
* bypass or lock, return -EINVAL; otherwise, return 0.
*/
-int omap3_noncore_dpll_enable(struct clk *clk)
+int omap3_noncore_dpll_enable(struct clk_hw *hw)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
int r;
struct dpll_data *dd;
struct clk *parent;
@@ -386,22 +410,26 @@ int omap3_noncore_dpll_enable(struct clk *clk)
if (!dd)
return -EINVAL;
- parent = __clk_get_parent(clk);
+ if (clk->clkdm) {
+ r = clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (r) {
+ WARN(1,
+ "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, __clk_get_name(hw->clk),
+ clk->clkdm->name, r);
+ return r;
+ }
+ }
- if (__clk_get_rate(clk) == __clk_get_rate(dd->clk_bypass)) {
+ parent = __clk_get_parent(hw->clk);
+
+ if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
WARN_ON(parent != dd->clk_bypass);
r = _omap3_noncore_dpll_bypass(clk);
} else {
WARN_ON(parent != dd->clk_ref);
r = _omap3_noncore_dpll_lock(clk);
}
- /*
- *FIXME: this is dubious - if clk->rate has changed, what about
- * propagating?
- */
- if (!r)
- clk->rate = (clk->recalc) ? clk->recalc(clk) :
- omap2_get_dpll_rate(clk);
return r;
}
@@ -413,9 +441,13 @@ int omap3_noncore_dpll_enable(struct clk *clk)
* Instructs a non-CORE DPLL to enter low-power stop. This function is
* intended for use in struct clkops. No return value.
*/
-void omap3_noncore_dpll_disable(struct clk *clk)
+void omap3_noncore_dpll_disable(struct clk_hw *hw)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
_omap3_noncore_dpll_stop(clk);
+ if (clk->clkdm)
+ clkdm_clk_disable(clk->clkdm, hw->clk);
}
@@ -432,80 +464,70 @@ void omap3_noncore_dpll_disable(struct clk *clk)
* target rate if it hasn't been done already, then program and lock
* the DPLL. Returns -EINVAL upon error, or 0 upon success.
*/
-int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct clk *new_parent = NULL;
- unsigned long hw_rate, bypass_rate;
u16 freqsel = 0;
struct dpll_data *dd;
int ret;
- if (!clk || !rate)
+ if (!hw || !rate)
return -EINVAL;
dd = clk->dpll_data;
if (!dd)
return -EINVAL;
- hw_rate = (clk->recalc) ? clk->recalc(clk) : omap2_get_dpll_rate(clk);
- if (rate == hw_rate)
- return 0;
+ __clk_prepare(dd->clk_bypass);
+ clk_enable(dd->clk_bypass);
+ __clk_prepare(dd->clk_ref);
+ clk_enable(dd->clk_ref);
- /*
- * Ensure both the bypass and ref clocks are enabled prior to
- * doing anything; we need the bypass clock running to reprogram
- * the DPLL.
- */
- omap2_clk_enable(dd->clk_bypass);
- omap2_clk_enable(dd->clk_ref);
-
- bypass_rate = __clk_get_rate(dd->clk_bypass);
- if (bypass_rate == rate &&
- (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
- pr_debug("clock: %s: set rate: entering bypass.\n", clk->name);
+ if (__clk_get_rate(dd->clk_bypass) == rate &&
+ (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+ pr_debug("%s: %s: set rate: entering bypass.\n",
+ __func__, __clk_get_name(hw->clk));
ret = _omap3_noncore_dpll_bypass(clk);
if (!ret)
new_parent = dd->clk_bypass;
} else {
if (dd->last_rounded_rate != rate)
- rate = clk->round_rate(clk, rate);
+ rate = __clk_round_rate(hw->clk, rate);
if (dd->last_rounded_rate == 0)
return -EINVAL;
/* No freqsel on OMAP4 and OMAP3630 */
- if (!soc_is_am33xx() && !cpu_is_omap44xx() && !cpu_is_omap3630()) {
+ if (!cpu_is_omap44xx() && !cpu_is_omap3630()) {
freqsel = _omap3_dpll_compute_freqsel(clk,
dd->last_rounded_n);
- if (!freqsel)
- WARN_ON(1);
+ WARN_ON(!freqsel);
}
- pr_debug("clock: %s: set rate: locking rate to %lu.\n",
- __clk_get_name(clk), rate);
+ pr_debug("%s: %s: set rate: locking rate to %lu.\n",
+ __func__, __clk_get_name(hw->clk), rate);
- ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m,
- dd->last_rounded_n, freqsel);
+ ret = omap3_noncore_dpll_program(clk, freqsel);
if (!ret)
new_parent = dd->clk_ref;
}
- if (!ret) {
- /*
- * Switch the parent clock in the hierarchy, and make sure
- * that the new parent's usecount is correct. Note: we
- * enable the new parent before disabling the old to avoid
- * any unnecessary hardware disable->enable transitions.
- */
- if (clk->usecount) {
- omap2_clk_enable(new_parent);
- omap2_clk_disable(clk->parent);
- }
- clk_reparent(clk, new_parent);
- clk->rate = rate;
- }
- omap2_clk_disable(dd->clk_ref);
- omap2_clk_disable(dd->clk_bypass);
+ /*
+ * FIXME - this is all wrong. common code handles reparenting and
+ * migrating prepare/enable counts. dplls should be a multiplexer
+ * clock and this should be a set_parent operation so that all of that
+ * stuff is inherited for free
+ */
+
+ if (!ret)
+ __clk_reparent(hw->clk, new_parent);
+
+ clk_disable(dd->clk_ref);
+ __clk_unprepare(dd->clk_ref);
+ clk_disable(dd->clk_bypass);
+ __clk_unprepare(dd->clk_bypass);
return 0;
}
@@ -520,7 +542,7 @@ int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
* -EINVAL if passed a null pointer or if the struct clk does not
* appear to refer to a DPLL.
*/
-u32 omap3_dpll_autoidle_read(struct clk *clk)
+u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
{
const struct dpll_data *dd;
u32 v;
@@ -549,7 +571,7 @@ u32 omap3_dpll_autoidle_read(struct clk *clk)
* OMAP3430. The DPLL will enter low-power stop when its downstream
* clocks are gated. No return value.
*/
-void omap3_dpll_allow_idle(struct clk *clk)
+void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
{
const struct dpll_data *dd;
u32 v;
@@ -559,11 +581,8 @@ void omap3_dpll_allow_idle(struct clk *clk)
dd = clk->dpll_data;
- if (!dd->autoidle_reg) {
- pr_debug("clock: DPLL %s: autoidle not supported\n",
- __clk_get_name(clk));
+ if (!dd->autoidle_reg)
return;
- }
/*
* REVISIT: CORE DPLL can optionally enter low-power bypass
@@ -583,7 +602,7 @@ void omap3_dpll_allow_idle(struct clk *clk)
*
* Disable DPLL automatic idle control. No return value.
*/
-void omap3_dpll_deny_idle(struct clk *clk)
+void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
{
const struct dpll_data *dd;
u32 v;
@@ -593,11 +612,8 @@ void omap3_dpll_deny_idle(struct clk *clk)
dd = clk->dpll_data;
- if (!dd->autoidle_reg) {
- pr_debug("clock: DPLL %s: autoidle not supported\n",
- __clk_get_name(clk));
+ if (!dd->autoidle_reg)
return;
- }
v = __raw_readl(dd->autoidle_reg);
v &= ~dd->autoidle_mask;
@@ -615,18 +631,25 @@ void omap3_dpll_deny_idle(struct clk *clk)
* Using parent clock DPLL data, look up DPLL state. If locked, set our
* rate to the dpll_clk * 2; otherwise, just use dpll_clk.
*/
-unsigned long omap3_clkoutx2_recalc(struct clk *clk)
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
{
const struct dpll_data *dd;
unsigned long rate;
u32 v;
- struct clk *pclk;
- unsigned long parent_rate;
+ struct clk_hw_omap *pclk = NULL;
+ struct clk *parent;
/* Walk up the parents of clk, looking for a DPLL */
- pclk = __clk_get_parent(clk);
- while (pclk && !pclk->dpll_data)
- pclk = __clk_get_parent(pclk);
+ do {
+ do {
+ parent = __clk_get_parent(hw->clk);
+ hw = __clk_get_hw(parent);
+ } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
+ if (!hw)
+ break;
+ pclk = to_clk_hw_omap(hw);
+ } while (pclk && !pclk->dpll_data);
/* clk does not have a DPLL as a parent? error in the clock data */
if (!pclk) {
@@ -638,7 +661,6 @@ unsigned long omap3_clkoutx2_recalc(struct clk *clk)
WARN_ON(!dd->enable_mask);
- parent_rate = __clk_get_rate(__clk_get_parent(clk));
v = __raw_readl(dd->control_reg) & dd->enable_mask;
v >>= __ffs(dd->enable_mask);
if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
@@ -649,15 +671,7 @@ unsigned long omap3_clkoutx2_recalc(struct clk *clk)
}
/* OMAP3/4 non-CORE DPLL clkops */
-
-const struct clkops clkops_omap3_noncore_dpll_ops = {
- .enable = omap3_noncore_dpll_enable,
- .disable = omap3_noncore_dpll_disable,
- .allow_idle = omap3_dpll_allow_idle,
- .deny_idle = omap3_dpll_deny_idle,
-};
-
-const struct clkops clkops_omap3_core_dpll_ops = {
+const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
.allow_idle = omap3_dpll_allow_idle,
.deny_idle = omap3_dpll_deny_idle,
};
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
index 5854da168a9..d28b0f72671 100644
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ b/arch/arm/mach-omap2/dpll44xx.c
@@ -20,8 +20,17 @@
#include "clock44xx.h"
#include "cm-regbits-44xx.h"
+/*
+ * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that
+ * can supported when using the DPLL low-power mode. Frequencies are
+ * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control,
+ * Status, and Low-Power Operation Mode".
+ */
+#define OMAP4_DPLL_LP_FINT_MAX 1000000
+#define OMAP4_DPLL_LP_FOUT_MAX 100000000
+
/* Supported only on OMAP4 */
-int omap4_dpllmx_gatectrl_read(struct clk *clk)
+int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk)
{
u32 v;
u32 mask;
@@ -40,7 +49,7 @@ int omap4_dpllmx_gatectrl_read(struct clk *clk)
return v;
}
-void omap4_dpllmx_allow_gatectrl(struct clk *clk)
+void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk)
{
u32 v;
u32 mask;
@@ -58,7 +67,7 @@ void omap4_dpllmx_allow_gatectrl(struct clk *clk)
__raw_writel(v, clk->clksel_reg);
}
-void omap4_dpllmx_deny_gatectrl(struct clk *clk)
+void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk)
{
u32 v;
u32 mask;
@@ -76,12 +85,37 @@ void omap4_dpllmx_deny_gatectrl(struct clk *clk)
__raw_writel(v, clk->clksel_reg);
}
-const struct clkops clkops_omap4_dpllmx_ops = {
+const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
.allow_idle = omap4_dpllmx_allow_gatectrl,
- .deny_idle = omap4_dpllmx_deny_gatectrl,
+ .deny_idle = omap4_dpllmx_deny_gatectrl,
};
/**
+ * omap4_dpll_lpmode_recalc - compute DPLL low-power setting
+ * @dd: pointer to the dpll data structure
+ *
+ * Calculates if low-power mode can be enabled based upon the last
+ * multiplier and divider values calculated. If low-power mode can be
+ * enabled, then the bit to enable low-power mode is stored in the
+ * last_rounded_lpmode variable. This implementation is based upon the
+ * criteria for enabling low-power mode as described in the OMAP4430/60
+ * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power
+ * Operation Mode".
+ */
+static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
+{
+ long fint, fout;
+
+ fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+ fout = fint * dd->last_rounded_m;
+
+ if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
+ dd->last_rounded_lpmode = 1;
+ else
+ dd->last_rounded_lpmode = 0;
+}
+
+/**
* omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
* @clk: struct clk * of the DPLL to compute the rate for
*
@@ -90,8 +124,10 @@ const struct clkops clkops_omap4_dpllmx_ops = {
* OMAP4 ABE DPLL. Returns the DPLL's output rate (before M-dividers)
* upon success, or 0 upon error.
*/
-unsigned long omap4_dpll_regm4xen_recalc(struct clk *clk)
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 v;
unsigned long rate;
struct dpll_data *dd;
@@ -123,9 +159,11 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk *clk)
* M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
* ~0 if an error occurred in omap2_dpll_round_rate().
*/
-long omap4_dpll_regm4xen_round_rate(struct clk *clk, unsigned long target_rate)
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+ unsigned long target_rate,
+ unsigned long *parent_rate)
{
- u32 v;
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct dpll_data *dd;
long r;
@@ -134,18 +172,31 @@ long omap4_dpll_regm4xen_round_rate(struct clk *clk, unsigned long target_rate)
dd = clk->dpll_data;
- /* regm4xen adds a multiplier of 4 to DPLL calculations */
- v = __raw_readl(dd->control_reg) & OMAP4430_DPLL_REGM4XEN_MASK;
-
- if (v)
- target_rate = target_rate / OMAP4430_REGM4XEN_MULT;
-
- r = omap2_dpll_round_rate(clk, target_rate);
+ dd->last_rounded_m4xen = 0;
+
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
+ r = omap2_dpll_round_rate(hw, target_rate, NULL);
+ if (r != ~0)
+ goto out;
+
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
+ NULL);
if (r == ~0)
return r;
- if (v)
- clk->dpll_data->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+
+out:
+ omap4_dpll_lpmode_recalc(dd);
- return clk->dpll_data->last_rounded_rate;
+ return dd->last_rounded_rate;
}
diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c
index fce5aa3fff4..4c7566c7e24 100644
--- a/arch/arm/mach-omap2/drm.c
+++ b/arch/arm/mach-omap2/drm.c
@@ -27,7 +27,6 @@
#include "omap_device.h"
#include "omap_hwmod.h"
-#include <plat/cpu.h>
#if defined(CONFIG_DRM_OMAP) || (CONFIG_DRM_OMAP_MODULE)
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 679a0478644..4be5cfc81ab 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -31,8 +31,7 @@
#include <video/omap-panel-nokia-dsi.h>
#include <video/omap-panel-picodlp.h>
-#include <plat/cpu.h>
-
+#include "soc.h"
#include "dss-common.h"
#include "mux.h"
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/mach-omap2/fb.c
index 3a77b30f53d..d9bd965f6d0 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/mach-omap2/fb.c
@@ -1,6 +1,4 @@
/*
- * File: arch/arm/plat-omap/fb.c
- *
* Framebuffer device registration for TI OMAP platforms
*
* Copyright (C) 2006 Nokia Corporation
@@ -29,10 +27,11 @@
#include <linux/memblock.h>
#include <linux/io.h>
#include <linux/omapfb.h>
+#include <linux/dma-mapping.h>
#include <asm/mach/map.h>
-#include <plat/cpu.h>
+#include "soc.h"
#ifdef CONFIG_OMAP2_VRFB
@@ -93,45 +92,7 @@ static int __init omap_init_vrfb(void)
arch_initcall(omap_init_vrfb);
#endif
-#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
-
-static bool omapfb_lcd_configured;
-static struct omapfb_platform_data omapfb_config;
-
-static u64 omap_fb_dma_mask = ~(u32)0;
-
-static struct platform_device omap_fb_device = {
- .name = "omapfb",
- .id = -1,
- .dev = {
- .dma_mask = &omap_fb_dma_mask,
- .coherent_dma_mask = ~(u32)0,
- .platform_data = &omapfb_config,
- },
- .num_resources = 0,
-};
-
-void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
-{
- omapfb_config.lcd = *config;
- omapfb_lcd_configured = true;
-}
-
-static int __init omap_init_fb(void)
-{
- /*
- * If the board file has not set the lcd config with
- * omapfb_set_lcd_config(), don't bother registering the omapfb device
- */
- if (!omapfb_lcd_configured)
- return 0;
-
- return platform_device_register(&omap_fb_device);
-}
-
-arch_initcall(omap_init_fb);
-
-#elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
static u64 omap_fb_dma_mask = ~(u32)0;
static struct omapfb_platform_data omapfb_config;
@@ -141,7 +102,7 @@ static struct platform_device omap_fb_device = {
.id = -1,
.dev = {
.dma_mask = &omap_fb_dma_mask,
- .coherent_dma_mask = ~(u32)0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &omapfb_config,
},
.num_resources = 0,
@@ -154,10 +115,4 @@ static int __init omap_init_fb(void)
arch_initcall(omap_init_fb);
-#else
-
-void __init omapfb_set_lcd_config(const struct omap_lcd_config *config)
-{
-}
-
#endif
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 8607735b3ab..db969a5c499 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -52,27 +52,27 @@ static int omap2_nand_gpmc_retime(
memset(&t, 0, sizeof(t));
t.sync_clk = gpmc_t->sync_clk;
- t.cs_on = gpmc_round_ns_to_ticks(gpmc_t->cs_on);
- t.adv_on = gpmc_round_ns_to_ticks(gpmc_t->adv_on);
+ t.cs_on = gpmc_t->cs_on;
+ t.adv_on = gpmc_t->adv_on;
/* Read */
- t.adv_rd_off = gpmc_round_ns_to_ticks(gpmc_t->adv_rd_off);
+ t.adv_rd_off = gpmc_t->adv_rd_off;
t.oe_on = t.adv_on;
- t.access = gpmc_round_ns_to_ticks(gpmc_t->access);
- t.oe_off = gpmc_round_ns_to_ticks(gpmc_t->oe_off);
- t.cs_rd_off = gpmc_round_ns_to_ticks(gpmc_t->cs_rd_off);
- t.rd_cycle = gpmc_round_ns_to_ticks(gpmc_t->rd_cycle);
+ t.access = gpmc_t->access;
+ t.oe_off = gpmc_t->oe_off;
+ t.cs_rd_off = gpmc_t->cs_rd_off;
+ t.rd_cycle = gpmc_t->rd_cycle;
/* Write */
- t.adv_wr_off = gpmc_round_ns_to_ticks(gpmc_t->adv_wr_off);
+ t.adv_wr_off = gpmc_t->adv_wr_off;
t.we_on = t.oe_on;
if (cpu_is_omap34xx()) {
- t.wr_data_mux_bus = gpmc_round_ns_to_ticks(gpmc_t->wr_data_mux_bus);
- t.wr_access = gpmc_round_ns_to_ticks(gpmc_t->wr_access);
+ t.wr_data_mux_bus = gpmc_t->wr_data_mux_bus;
+ t.wr_access = gpmc_t->wr_access;
}
- t.we_off = gpmc_round_ns_to_ticks(gpmc_t->we_off);
- t.cs_wr_off = gpmc_round_ns_to_ticks(gpmc_t->cs_wr_off);
- t.wr_cycle = gpmc_round_ns_to_ticks(gpmc_t->wr_cycle);
+ t.we_off = gpmc_t->we_off;
+ t.cs_wr_off = gpmc_t->cs_wr_off;
+ t.wr_cycle = gpmc_t->wr_cycle;
/* Configure GPMC */
if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index d102183ed9a..94a349e4dc9 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -33,7 +33,6 @@
static unsigned onenand_flags;
static unsigned latency;
-static int fclk_offset;
static struct omap_onenand_platform_data *gpmc_onenand_data;
@@ -50,6 +49,7 @@ static struct platform_device gpmc_onenand_device = {
static struct gpmc_timings omap2_onenand_calc_async_timings(void)
{
+ struct gpmc_device_timings dev_t;
struct gpmc_timings t;
const int t_cer = 15;
@@ -59,35 +59,24 @@ static struct gpmc_timings omap2_onenand_calc_async_timings(void)
const int t_aa = 76;
const int t_oe = 20;
const int t_cez = 20; /* max of t_cez, t_oez */
- const int t_ds = 30;
const int t_wpl = 40;
const int t_wph = 30;
- memset(&t, 0, sizeof(t));
- t.sync_clk = 0;
- t.cs_on = 0;
- t.adv_on = 0;
-
- /* Read */
- t.adv_rd_off = gpmc_round_ns_to_ticks(max_t(int, t_avdp, t_cer));
- t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(t_aavdh);
- t.access = t.adv_on + gpmc_round_ns_to_ticks(t_aa);
- t.access = max_t(int, t.access, t.cs_on + gpmc_round_ns_to_ticks(t_ce));
- t.access = max_t(int, t.access, t.oe_on + gpmc_round_ns_to_ticks(t_oe));
- t.oe_off = t.access + gpmc_round_ns_to_ticks(1);
- t.cs_rd_off = t.oe_off;
- t.rd_cycle = t.cs_rd_off + gpmc_round_ns_to_ticks(t_cez);
-
- /* Write */
- t.adv_wr_off = t.adv_rd_off;
- t.we_on = t.oe_on;
- if (cpu_is_omap34xx()) {
- t.wr_data_mux_bus = t.we_on;
- t.wr_access = t.we_on + gpmc_round_ns_to_ticks(t_ds);
- }
- t.we_off = t.we_on + gpmc_round_ns_to_ticks(t_wpl);
- t.cs_wr_off = t.we_off + gpmc_round_ns_to_ticks(t_wph);
- t.wr_cycle = t.cs_wr_off + gpmc_round_ns_to_ticks(t_cez);
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ dev_t.mux = true;
+ dev_t.t_avdp_r = max_t(int, t_avdp, t_cer) * 1000;
+ dev_t.t_avdp_w = dev_t.t_avdp_r;
+ dev_t.t_aavdh = t_aavdh * 1000;
+ dev_t.t_aa = t_aa * 1000;
+ dev_t.t_ce = t_ce * 1000;
+ dev_t.t_oe = t_oe * 1000;
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+
+ gpmc_calc_timings(&t, &dev_t);
return t;
}
@@ -173,18 +162,15 @@ static struct gpmc_timings
omap2_onenand_calc_sync_timings(struct omap_onenand_platform_data *cfg,
int freq)
{
+ struct gpmc_device_timings dev_t;
struct gpmc_timings t;
const int t_cer = 15;
const int t_avdp = 12;
const int t_cez = 20; /* max of t_cez, t_oez */
- const int t_ds = 30;
const int t_wpl = 40;
const int t_wph = 30;
int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
- u32 reg;
- int div, fclk_offset_ns, gpmc_clk_ns;
- int ticks_cez;
- int cs = cfg->cs;
+ int div, gpmc_clk_ns;
if (cfg->flags & ONENAND_SYNC_READ)
onenand_flags = ONENAND_FLAG_SYNCREAD;
@@ -251,77 +237,35 @@ omap2_onenand_calc_sync_timings(struct omap_onenand_platform_data *cfg,
latency = 4;
/* Set synchronous read timings */
- memset(&t, 0, sizeof(t));
-
- if (div == 1) {
- reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2);
- reg |= (1 << 7);
- gpmc_cs_write_reg(cs, GPMC_CS_CONFIG2, reg);
- reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG3);
- reg |= (1 << 7);
- gpmc_cs_write_reg(cs, GPMC_CS_CONFIG3, reg);
- reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG4);
- reg |= (1 << 7);
- reg |= (1 << 23);
- gpmc_cs_write_reg(cs, GPMC_CS_CONFIG4, reg);
- } else {
- reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2);
- reg &= ~(1 << 7);
- gpmc_cs_write_reg(cs, GPMC_CS_CONFIG2, reg);
- reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG3);
- reg &= ~(1 << 7);
- gpmc_cs_write_reg(cs, GPMC_CS_CONFIG3, reg);
- reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG4);
- reg &= ~(1 << 7);
- reg &= ~(1 << 23);
- gpmc_cs_write_reg(cs, GPMC_CS_CONFIG4, reg);
- }
+ memset(&dev_t, 0, sizeof(dev_t));
- t.sync_clk = min_gpmc_clk_period;
- t.cs_on = 0;
- t.adv_on = 0;
- fclk_offset_ns = gpmc_round_ns_to_ticks(max_t(int, t_ces, t_avds));
- fclk_offset = gpmc_ns_to_ticks(fclk_offset_ns);
- t.page_burst_access = gpmc_clk_ns;
-
- /* Read */
- t.adv_rd_off = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_avdh));
- t.oe_on = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_ach));
- /* Force at least 1 clk between AVD High to OE Low */
- if (t.oe_on <= t.adv_rd_off)
- t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(1);
- t.access = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div);
- t.oe_off = t.access + gpmc_round_ns_to_ticks(1);
- t.cs_rd_off = t.oe_off;
- ticks_cez = ((gpmc_ns_to_ticks(t_cez) + div - 1) / div) * div;
- t.rd_cycle = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div +
- ticks_cez);
-
- /* Write */
+ dev_t.mux = true;
+ dev_t.sync_read = true;
if (onenand_flags & ONENAND_FLAG_SYNCWRITE) {
- t.adv_wr_off = t.adv_rd_off;
- t.we_on = 0;
- t.we_off = t.cs_rd_off;
- t.cs_wr_off = t.cs_rd_off;
- t.wr_cycle = t.rd_cycle;
- if (cpu_is_omap34xx()) {
- t.wr_data_mux_bus = gpmc_ticks_to_ns(fclk_offset +
- gpmc_ps_to_ticks(min_gpmc_clk_period +
- t_rdyo * 1000));
- t.wr_access = t.access;
- }
+ dev_t.sync_write = true;
} else {
- t.adv_wr_off = gpmc_round_ns_to_ticks(max_t(int,
- t_avdp, t_cer));
- t.we_on = t.adv_wr_off + gpmc_round_ns_to_ticks(t_aavdh);
- t.we_off = t.we_on + gpmc_round_ns_to_ticks(t_wpl);
- t.cs_wr_off = t.we_off + gpmc_round_ns_to_ticks(t_wph);
- t.wr_cycle = t.cs_wr_off + gpmc_round_ns_to_ticks(t_cez);
- if (cpu_is_omap34xx()) {
- t.wr_data_mux_bus = t.we_on;
- t.wr_access = t.we_on + gpmc_round_ns_to_ticks(t_ds);
- }
+ dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+ dev_t.t_aavdh = t_aavdh * 1000;
}
+ dev_t.ce_xdelay = true;
+ dev_t.avd_xdelay = true;
+ dev_t.oe_xdelay = true;
+ dev_t.we_xdelay = true;
+ dev_t.clk = min_gpmc_clk_period;
+ dev_t.t_bacc = dev_t.clk;
+ dev_t.t_ces = t_ces * 1000;
+ dev_t.t_avds = t_avds * 1000;
+ dev_t.t_avdh = t_avdh * 1000;
+ dev_t.t_ach = t_ach * 1000;
+ dev_t.cyc_iaa = (latency + 1);
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.cyc_aavdh_oe = 1;
+ dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
+
+ gpmc_calc_timings(&t, &dev_t);
return t;
}
@@ -338,7 +282,6 @@ static int gpmc_set_sync_mode(int cs, struct gpmc_timings *t)
(sync_read ? GPMC_CONFIG1_READTYPE_SYNC : 0) |
(sync_write ? GPMC_CONFIG1_WRITEMULTIPLE_SUPP : 0) |
(sync_write ? GPMC_CONFIG1_WRITETYPE_SYNC : 0) |
- GPMC_CONFIG1_CLKACTIVATIONTIME(fclk_offset) |
GPMC_CONFIG1_PAGE_LEN(2) |
(cpu_is_omap34xx() ? 0 :
(GPMC_CONFIG1_WAIT_READ_MON |
diff --git a/arch/arm/mach-omap2/gpmc-smc91x.c b/arch/arm/mach-omap2/gpmc-smc91x.c
index 6eed907d594..11d0b756f09 100644
--- a/arch/arm/mach-omap2/gpmc-smc91x.c
+++ b/arch/arm/mach-omap2/gpmc-smc91x.c
@@ -58,6 +58,7 @@ static struct platform_device gpmc_smc91x_device = {
static int smc91c96_gpmc_retime(void)
{
struct gpmc_timings t;
+ struct gpmc_device_timings dev_t;
const int t3 = 10; /* Figure 12.2 read and 12.4 write */
const int t4_r = 20; /* Figure 12.2 read */
const int t4_w = 5; /* Figure 12.4 write */
@@ -68,32 +69,6 @@ static int smc91c96_gpmc_retime(void)
const int t20 = 185; /* Figure 12.2 read and 12.4 write */
u32 l;
- memset(&t, 0, sizeof(t));
-
- /* Read timings */
- t.cs_on = 0;
- t.adv_on = t.cs_on;
- t.oe_on = t.adv_on + t3;
- t.access = t.oe_on + t5;
- t.oe_off = t.access;
- t.adv_rd_off = t.oe_off + max(t4_r, t6);
- t.cs_rd_off = t.oe_off;
- t.rd_cycle = t20 - t.oe_on;
-
- /* Write timings */
- t.we_on = t.adv_on + t3;
-
- if (cpu_is_omap34xx() && (gpmc_cfg->flags & GPMC_MUX_ADD_DATA)) {
- t.wr_data_mux_bus = t.we_on;
- t.we_off = t.wr_data_mux_bus + t7;
- } else
- t.we_off = t.we_on + t7;
- if (cpu_is_omap34xx())
- t.wr_access = t.we_off;
- t.adv_wr_off = t.we_off + max(t4_w, t8);
- t.cs_wr_off = t.we_off + t4_w;
- t.wr_cycle = t20 - t.we_on;
-
l = GPMC_CONFIG1_DEVICESIZE_16;
if (gpmc_cfg->flags & GPMC_MUX_ADD_DATA)
l |= GPMC_CONFIG1_MUXADDDATA;
@@ -115,6 +90,22 @@ static int smc91c96_gpmc_retime(void)
if (gpmc_cfg->flags & GPMC_MUX_ADD_DATA)
return 0;
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ dev_t.t_oeasu = t3 * 1000;
+ dev_t.t_oe = t5 * 1000;
+ dev_t.t_cez_r = t4_r * 1000;
+ dev_t.t_oez = t6 * 1000;
+ dev_t.t_rd_cycle = (t20 - t3) * 1000;
+
+ dev_t.t_weasu = t3 * 1000;
+ dev_t.t_wpl = t7 * 1000;
+ dev_t.t_wph = t8 * 1000;
+ dev_t.t_cez_w = t4_w * 1000;
+ dev_t.t_wr_cycle = (t20 - t3) * 1000;
+
+ gpmc_calc_timings(&t, &dev_t);
+
return gpmc_cs_set_timings(gpmc_cfg->cs, &t);
}
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index bf6117c32f4..65468f6d7f0 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -74,6 +74,13 @@
#define GPMC_ECC_CTRL_ECCREG8 0x008
#define GPMC_ECC_CTRL_ECCREG9 0x009
+#define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
+#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
+#define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
+#define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
+#define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
+#define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
+
#define GPMC_CS0_OFFSET 0x60
#define GPMC_CS_SIZE 0x30
#define GPMC_BCH_SIZE 0x10
@@ -223,6 +230,51 @@ unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns)
return ticks * gpmc_get_fclk_period() / 1000;
}
+static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
+{
+ return ticks * gpmc_get_fclk_period();
+}
+
+static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
+{
+ unsigned long ticks = gpmc_ps_to_ticks(time_ps);
+
+ return ticks * gpmc_get_fclk_period();
+}
+
+static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
+{
+ u32 l;
+
+ l = gpmc_cs_read_reg(cs, reg);
+ if (value)
+ l |= mask;
+ else
+ l &= ~mask;
+ gpmc_cs_write_reg(cs, reg, l);
+}
+
+static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
+{
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
+ GPMC_CONFIG1_TIME_PARA_GRAN,
+ p->time_para_granularity);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
+ GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
+ GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+ GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+ GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
+ GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
+ p->cycle2cyclesamecsen);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
+ GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
+ p->cycle2cyclediffcsen);
+}
+
#ifdef DEBUG
static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
int time, const char *name)
@@ -316,6 +368,12 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
+ GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
+ GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
+
+ GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring);
+ GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation);
+
if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
if (gpmc_capability & GPMC_HAS_WR_ACCESS)
@@ -335,6 +393,8 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
}
+ gpmc_cs_bool_timings(cs, &t->bool_timings);
+
return 0;
}
@@ -748,6 +808,319 @@ static int __devinit gpmc_mem_init(void)
return 0;
}
+static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
+{
+ u32 temp;
+ int div;
+
+ div = gpmc_calc_divider(sync_clk);
+ temp = gpmc_ps_to_ticks(time_ps);
+ temp = (temp + div - 1) / div;
+ return gpmc_ticks_to_ps(temp * div);
+}
+
+/* XXX: can the cycles be avoided ? */
+static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_rd_off */
+ temp = dev_t->t_avdp_r;
+ /* XXX: mux check required ? */
+ if (mux) {
+ /* XXX: t_avdp not to be required for sync, only added for tusb
+ * this indirectly necessitates requirement of t_avdp_r and
+ * t_avdp_w instead of having a single t_avdp
+ */
+ temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ }
+ gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
+
+ /* oe_on */
+ temp = dev_t->t_oeasu; /* XXX: remove this ? */
+ if (mux) {
+ temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
+ temp = max_t(u32, temp, gpmc_t->adv_rd_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
+ }
+ gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
+
+ /* access */
+ /* XXX: any scope for improvement ?, by combining oe_on
+ * and clk_activation, need to check whether
+ * access = clk_activation + round to sync clk ?
+ */
+ temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
+ temp += gpmc_t->clk_activation;
+ if (dev_t->cyc_oe)
+ temp = max_t(u32, temp, gpmc_t->oe_on +
+ gpmc_ticks_to_ps(dev_t->cyc_oe));
+ gpmc_t->access = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
+ gpmc_t->cs_rd_off = gpmc_t->oe_off;
+
+ /* rd_cycle */
+ temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
+ temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
+ gpmc_t->access;
+ /* XXX: barter t_ce_rdyz with t_cez_r ? */
+ if (dev_t->t_ce_rdyz)
+ temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
+ gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_wr_off */
+ temp = dev_t->t_avdp_w;
+ if (mux) {
+ temp = max_t(u32, temp,
+ gpmc_t->clk_activation + dev_t->t_avdh);
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ }
+ gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
+
+ /* wr_data_mux_bus */
+ temp = max_t(u32, dev_t->t_weasu,
+ gpmc_t->clk_activation + dev_t->t_rdyo);
+ /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
+ * and in that case remember to handle we_on properly
+ */
+ if (mux) {
+ temp = max_t(u32, temp,
+ gpmc_t->adv_wr_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
+ }
+ gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
+
+ /* we_on */
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
+ gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
+ else
+ gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
+
+ /* wr_access */
+ /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
+ gpmc_t->wr_access = gpmc_t->access;
+
+ /* we_off */
+ temp = gpmc_t->we_on + dev_t->t_wpl;
+ temp = max_t(u32, temp,
+ gpmc_t->wr_access + gpmc_ticks_to_ps(1));
+ temp = max_t(u32, temp,
+ gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
+ gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
+ dev_t->t_wph);
+
+ /* wr_cycle */
+ temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
+ temp += gpmc_t->wr_access;
+ /* XXX: barter t_ce_rdyz with t_cez_w ? */
+ if (dev_t->t_ce_rdyz)
+ temp = max_t(u32, temp,
+ gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
+ gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_rd_off */
+ temp = dev_t->t_avdp_r;
+ if (mux)
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
+
+ /* oe_on */
+ temp = dev_t->t_oeasu;
+ if (mux)
+ temp = max_t(u32, temp,
+ gpmc_t->adv_rd_off + dev_t->t_aavdh);
+ gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
+
+ /* access */
+ temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
+ gpmc_t->oe_on + dev_t->t_oe);
+ temp = max_t(u32, temp,
+ gpmc_t->cs_on + dev_t->t_ce);
+ temp = max_t(u32, temp,
+ gpmc_t->adv_on + dev_t->t_aa);
+ gpmc_t->access = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
+ gpmc_t->cs_rd_off = gpmc_t->oe_off;
+
+ /* rd_cycle */
+ temp = max_t(u32, dev_t->t_rd_cycle,
+ gpmc_t->cs_rd_off + dev_t->t_cez_r);
+ temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
+ gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_wr_off */
+ temp = dev_t->t_avdp_w;
+ if (mux)
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
+
+ /* wr_data_mux_bus */
+ temp = dev_t->t_weasu;
+ if (mux) {
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
+ }
+ gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
+
+ /* we_on */
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
+ gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
+ else
+ gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
+
+ /* we_off */
+ temp = gpmc_t->we_on + dev_t->t_wpl;
+ gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
+ dev_t->t_wph);
+
+ /* wr_cycle */
+ temp = max_t(u32, dev_t->t_wr_cycle,
+ gpmc_t->cs_wr_off + dev_t->t_cez_w);
+ gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ u32 temp;
+
+ gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
+ gpmc_get_fclk_period();
+
+ gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
+ dev_t->t_bacc,
+ gpmc_t->sync_clk);
+
+ temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
+ gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
+
+ if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
+ return 0;
+
+ if (dev_t->ce_xdelay)
+ gpmc_t->bool_timings.cs_extra_delay = true;
+ if (dev_t->avd_xdelay)
+ gpmc_t->bool_timings.adv_extra_delay = true;
+ if (dev_t->oe_xdelay)
+ gpmc_t->bool_timings.oe_extra_delay = true;
+ if (dev_t->we_xdelay)
+ gpmc_t->bool_timings.we_extra_delay = true;
+
+ return 0;
+}
+
+static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ u32 temp;
+
+ /* cs_on */
+ gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
+
+ /* adv_on */
+ temp = dev_t->t_avdasu;
+ if (dev_t->t_ce_avd)
+ temp = max_t(u32, temp,
+ gpmc_t->cs_on + dev_t->t_ce_avd);
+ gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
+
+ if (dev_t->sync_write || dev_t->sync_read)
+ gpmc_calc_sync_common_timings(gpmc_t, dev_t);
+
+ return 0;
+}
+
+/* TODO: remove this function once all peripherals are confirmed to
+ * work with generic timing. Simultaneously gpmc_cs_set_timings()
+ * has to be modified to handle timings in ps instead of ns
+*/
+static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
+{
+ t->cs_on /= 1000;
+ t->cs_rd_off /= 1000;
+ t->cs_wr_off /= 1000;
+ t->adv_on /= 1000;
+ t->adv_rd_off /= 1000;
+ t->adv_wr_off /= 1000;
+ t->we_on /= 1000;
+ t->we_off /= 1000;
+ t->oe_on /= 1000;
+ t->oe_off /= 1000;
+ t->page_burst_access /= 1000;
+ t->access /= 1000;
+ t->rd_cycle /= 1000;
+ t->wr_cycle /= 1000;
+ t->bus_turnaround /= 1000;
+ t->cycle2cycle_delay /= 1000;
+ t->wait_monitoring /= 1000;
+ t->clk_activation /= 1000;
+ t->wr_access /= 1000;
+ t->wr_data_mux_bus /= 1000;
+}
+
+int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ memset(gpmc_t, 0, sizeof(*gpmc_t));
+
+ gpmc_calc_common_timings(gpmc_t, dev_t);
+
+ if (dev_t->sync_read)
+ gpmc_calc_sync_read_timings(gpmc_t, dev_t);
+ else
+ gpmc_calc_async_read_timings(gpmc_t, dev_t);
+
+ if (dev_t->sync_write)
+ gpmc_calc_sync_write_timings(gpmc_t, dev_t);
+ else
+ gpmc_calc_async_write_timings(gpmc_t, dev_t);
+
+ /* TODO: remove, see function definition */
+ gpmc_convert_ps_to_ns(gpmc_t);
+
+ return 0;
+}
+
static __devinit int gpmc_probe(struct platform_device *pdev)
{
int rc;
diff --git a/arch/arm/mach-omap2/gpmc.h b/arch/arm/mach-omap2/gpmc.h
index 79f4dfc2adb..fe0a844d500 100644
--- a/arch/arm/mach-omap2/gpmc.h
+++ b/arch/arm/mach-omap2/gpmc.h
@@ -74,6 +74,17 @@
#define GPMC_IRQ_COUNT_EVENT 0x02
+/* bool type time settings */
+struct gpmc_bool_timings {
+ bool cycle2cyclediffcsen;
+ bool cycle2cyclesamecsen;
+ bool we_extra_delay;
+ bool oe_extra_delay;
+ bool adv_extra_delay;
+ bool cs_extra_delay;
+ bool time_para_granularity;
+};
+
/*
* Note that all values in this struct are in nanoseconds except sync_clk
* (which is in picoseconds), while the register values are in gpmc_fck cycles.
@@ -83,34 +94,104 @@ struct gpmc_timings {
u32 sync_clk;
/* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
- u16 cs_on; /* Assertion time */
- u16 cs_rd_off; /* Read deassertion time */
- u16 cs_wr_off; /* Write deassertion time */
+ u32 cs_on; /* Assertion time */
+ u32 cs_rd_off; /* Read deassertion time */
+ u32 cs_wr_off; /* Write deassertion time */
/* ADV signal timings corresponding to GPMC_CONFIG3 */
- u16 adv_on; /* Assertion time */
- u16 adv_rd_off; /* Read deassertion time */
- u16 adv_wr_off; /* Write deassertion time */
+ u32 adv_on; /* Assertion time */
+ u32 adv_rd_off; /* Read deassertion time */
+ u32 adv_wr_off; /* Write deassertion time */
/* WE signals timings corresponding to GPMC_CONFIG4 */
- u16 we_on; /* WE assertion time */
- u16 we_off; /* WE deassertion time */
+ u32 we_on; /* WE assertion time */
+ u32 we_off; /* WE deassertion time */
/* OE signals timings corresponding to GPMC_CONFIG4 */
- u16 oe_on; /* OE assertion time */
- u16 oe_off; /* OE deassertion time */
+ u32 oe_on; /* OE assertion time */
+ u32 oe_off; /* OE deassertion time */
/* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
- u16 page_burst_access; /* Multiple access word delay */
- u16 access; /* Start-cycle to first data valid delay */
- u16 rd_cycle; /* Total read cycle time */
- u16 wr_cycle; /* Total write cycle time */
+ u32 page_burst_access; /* Multiple access word delay */
+ u32 access; /* Start-cycle to first data valid delay */
+ u32 rd_cycle; /* Total read cycle time */
+ u32 wr_cycle; /* Total write cycle time */
+
+ u32 bus_turnaround;
+ u32 cycle2cycle_delay;
+
+ u32 wait_monitoring;
+ u32 clk_activation;
/* The following are only on OMAP3430 */
- u16 wr_access; /* WRACCESSTIME */
- u16 wr_data_mux_bus; /* WRDATAONADMUXBUS */
+ u32 wr_access; /* WRACCESSTIME */
+ u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
+
+ struct gpmc_bool_timings bool_timings;
+};
+
+/* Device timings in picoseconds */
+struct gpmc_device_timings {
+ u32 t_ceasu; /* address setup to CS valid */
+ u32 t_avdasu; /* address setup to ADV valid */
+ /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
+ * of tusb using these timings even for sync whilst
+ * ideally for adv_rd/(wr)_off it should have considered
+ * t_avdh instead. This indirectly necessitates r/w
+ * variations of t_avdp as it is possible to have one
+ * sync & other async
+ */
+ u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
+ u32 t_avdp_w;
+ u32 t_aavdh; /* address hold time */
+ u32 t_oeasu; /* address setup to OE valid */
+ u32 t_aa; /* access time from ADV assertion */
+ u32 t_iaa; /* initial access time */
+ u32 t_oe; /* access time from OE assertion */
+ u32 t_ce; /* access time from CS asertion */
+ u32 t_rd_cycle; /* read cycle time */
+ u32 t_cez_r; /* read CS deassertion to high Z */
+ u32 t_cez_w; /* write CS deassertion to high Z */
+ u32 t_oez; /* OE deassertion to high Z */
+ u32 t_weasu; /* address setup to WE valid */
+ u32 t_wpl; /* write assertion time */
+ u32 t_wph; /* write deassertion time */
+ u32 t_wr_cycle; /* write cycle time */
+
+ u32 clk;
+ u32 t_bacc; /* burst access valid clock to output delay */
+ u32 t_ces; /* CS setup time to clk */
+ u32 t_avds; /* ADV setup time to clk */
+ u32 t_avdh; /* ADV hold time from clk */
+ u32 t_ach; /* address hold time from clk */
+ u32 t_rdyo; /* clk to ready valid */
+
+ u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
+ u32 t_ce_avd; /* CS on to ADV on delay */
+
+ /* XXX: check the possibility of combining
+ * cyc_aavhd_oe & cyc_aavdh_we
+ */
+ u8 cyc_aavdh_oe;/* read address hold time in cycles */
+ u8 cyc_aavdh_we;/* write address hold time in cycles */
+ u8 cyc_oe; /* access time from OE assertion in cycles */
+ u8 cyc_wpl; /* write deassertion time in cycles */
+ u32 cyc_iaa; /* initial access time in cycles */
+
+ bool mux; /* address & data muxed */
+ bool sync_write;/* synchronous write */
+ bool sync_read; /* synchronous read */
+
+ /* extra delays */
+ bool ce_xdelay;
+ bool avd_xdelay;
+ bool oe_xdelay;
+ bool we_xdelay;
};
+extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t);
+
extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
extern int gpmc_get_client_irq(unsigned irq_config);
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index fbb9b152cd5..b9074dde3b9 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -22,6 +22,7 @@
#include "soc.h"
#include "omap_hwmod.h"
#include "omap_device.h"
+#include "omap-pm.h"
#include "prm.h"
#include "common.h"
@@ -120,6 +121,16 @@ static int __init omap_i2c_nr_ports(void)
return ports;
}
+/*
+ * XXX This function is a temporary compatibility wrapper - only
+ * needed until the I2C driver can be converted to call
+ * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
+ */
+static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
+{
+ omap_pm_set_max_mpu_wakeup_lat(dev, t);
+}
+
static const char name[] = "omap_i2c";
int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,
@@ -157,6 +168,15 @@ int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,
dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
pdata->flags = dev_attr->flags;
+ /*
+ * When waiting for completion of a i2c transfer, we need to
+ * set a wake up latency constraint for the MPU. This is to
+ * ensure quick enough wakeup from idle, when transfer
+ * completes.
+ * Only omap3 has support for constraints
+ */
+ if (cpu_is_omap34xx())
+ pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
pdev = omap_device_build(name, bus_id, oh, pdata,
sizeof(struct omap_i2c_bus_platform_data),
NULL, 0, 0);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 9df757644cc..2c3fdd65387 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -25,7 +25,7 @@
#include <asm/tlb.h>
#include <asm/mach/map.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "omap_hwmod.h"
#include "soc.h"
@@ -50,6 +50,9 @@
#include "prcm_mpu44xx.h"
#include "prminst44xx.h"
#include "cminst44xx.h"
+#include "prm2xxx.h"
+#include "prm3xxx.h"
+#include "prm44xx.h"
/*
* The machine specific code may provide the extra mapping besides the
@@ -387,6 +390,7 @@ void __init omap2420_init_early(void)
omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP2420_PRM_BASE));
omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP2420_CM_BASE), NULL);
omap2xxx_check_revision();
+ omap2xxx_prm_init();
omap2xxx_cm_init();
omap2xxx_voltagedomains_init();
omap242x_powerdomains_init();
@@ -401,6 +405,7 @@ void __init omap2420_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap2_pm_init();
+ omap2_clk_enable_autoidle_all();
}
#endif
@@ -415,6 +420,7 @@ void __init omap2430_init_early(void)
omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP2430_PRM_BASE));
omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP2430_CM_BASE), NULL);
omap2xxx_check_revision();
+ omap2xxx_prm_init();
omap2xxx_cm_init();
omap2xxx_voltagedomains_init();
omap243x_powerdomains_init();
@@ -429,6 +435,7 @@ void __init omap2430_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap2_pm_init();
+ omap2_clk_enable_autoidle_all();
}
#endif
@@ -448,6 +455,7 @@ void __init omap3_init_early(void)
omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP3430_CM_BASE), NULL);
omap3xxx_check_revision();
omap3xxx_check_features();
+ omap3xxx_prm_init();
omap3xxx_cm_init();
omap3xxx_voltagedomains_init();
omap3xxx_powerdomains_init();
@@ -500,6 +508,7 @@ void __init omap3_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap3_pm_init();
+ omap2_clk_enable_autoidle_all();
}
void __init omap3430_init_late(void)
@@ -507,6 +516,7 @@ void __init omap3430_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap3_pm_init();
+ omap2_clk_enable_autoidle_all();
}
void __init omap35xx_init_late(void)
@@ -514,6 +524,7 @@ void __init omap35xx_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap3_pm_init();
+ omap2_clk_enable_autoidle_all();
}
void __init omap3630_init_late(void)
@@ -521,6 +532,7 @@ void __init omap3630_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap3_pm_init();
+ omap2_clk_enable_autoidle_all();
}
void __init am35xx_init_late(void)
@@ -528,6 +540,7 @@ void __init am35xx_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap3_pm_init();
+ omap2_clk_enable_autoidle_all();
}
void __init ti81xx_init_late(void)
@@ -535,6 +548,7 @@ void __init ti81xx_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap3_pm_init();
+ omap2_clk_enable_autoidle_all();
}
#endif
@@ -573,6 +587,7 @@ void __init omap4430_init_early(void)
omap_cm_base_init();
omap4xxx_check_revision();
omap4xxx_check_features();
+ omap44xx_prm_init();
omap44xx_voltagedomains_init();
omap44xx_powerdomains_init();
omap44xx_clockdomains_init();
@@ -586,6 +601,7 @@ void __init omap4430_init_late(void)
omap_mux_late_init();
omap2_common_pm_late_init();
omap4_pm_init();
+ omap2_clk_enable_autoidle_all();
}
#endif
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index bf496510eb5..df49f2a4946 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -21,7 +21,7 @@
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include <linux/pm_runtime.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "omap_device.h"
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 26126343d6a..6a217c98db5 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -135,10 +135,7 @@ static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
old_mode = omap_mux_read(partition, gpio_mux->reg_offset);
mux_mode = val & ~(OMAP_MUX_NR_MODES - 1);
- if (partition->flags & OMAP_MUX_GPIO_IN_MODE3)
- mux_mode |= OMAP_MUX_MODE3;
- else
- mux_mode |= OMAP_MUX_MODE4;
+ mux_mode |= partition->gpio;
pr_debug("%s: Setting signal %s.gpio%i 0x%04x -> 0x%04x\n", __func__,
gpio_mux->muxnames[0], gpio, old_mode, mux_mode);
omap_mux_write(partition, mux_mode, gpio_mux->reg_offset);
@@ -800,7 +797,7 @@ int __init omap_mux_late_init(void)
struct omap_mux *m = &e->mux;
u16 mode = omap_mux_read(partition, m->reg_offset);
- if (OMAP_MODE_GPIO(mode))
+ if (OMAP_MODE_GPIO(partition, mode))
continue;
#ifndef CONFIG_DEBUG_FS
@@ -1065,7 +1062,7 @@ static void __init omap_mux_init_list(struct omap_mux_partition *partition,
}
#else
/* Skip pins that are not muxed as GPIO by bootloader */
- if (!OMAP_MODE_GPIO(omap_mux_read(partition,
+ if (!OMAP_MODE_GPIO(partition, omap_mux_read(partition,
superset->reg_offset))) {
superset++;
continue;
@@ -1132,6 +1129,7 @@ int __init omap_mux_init(const char *name, u32 flags,
partition->name = name;
partition->flags = flags;
+ partition->gpio = flags & OMAP_MUX_MODE7;
partition->size = mux_size;
partition->phys = mux_pbase;
partition->base = ioremap(mux_pbase, mux_size);
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 76f9b3c2f58..fdb22f14021 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -58,7 +58,8 @@
#define OMAP_PIN_OFF_INPUT_PULLDOWN (OMAP_OFF_EN | OMAP_OFF_PULL_EN)
#define OMAP_PIN_OFF_WAKEUPENABLE OMAP_WAKEUP_EN
-#define OMAP_MODE_GPIO(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
+#define OMAP_MODE_GPIO(partition, x) (((x) & OMAP_MUX_MODE7) == \
+ partition->gpio)
#define OMAP_MODE_UART(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
/* Flags for omapX_mux_init */
@@ -79,13 +80,20 @@
/*
* omap_mux_init flags definition:
*
+ * OMAP_GPIO_MUX_MODE, bits 0-2: gpio muxing mode, same like pad control
+ * register which includes values from 0-7.
* OMAP_MUX_REG_8BIT: Ensure that access to padconf is done in 8 bits.
* The default value is 16 bits.
- * OMAP_MUX_GPIO_IN_MODE3: The GPIO is selected in mode3.
- * The default is mode4.
*/
-#define OMAP_MUX_REG_8BIT (1 << 0)
-#define OMAP_MUX_GPIO_IN_MODE3 (1 << 1)
+#define OMAP_MUX_GPIO_IN_MODE0 OMAP_MUX_MODE0
+#define OMAP_MUX_GPIO_IN_MODE1 OMAP_MUX_MODE1
+#define OMAP_MUX_GPIO_IN_MODE2 OMAP_MUX_MODE2
+#define OMAP_MUX_GPIO_IN_MODE3 OMAP_MUX_MODE3
+#define OMAP_MUX_GPIO_IN_MODE4 OMAP_MUX_MODE4
+#define OMAP_MUX_GPIO_IN_MODE5 OMAP_MUX_MODE5
+#define OMAP_MUX_GPIO_IN_MODE6 OMAP_MUX_MODE6
+#define OMAP_MUX_GPIO_IN_MODE7 OMAP_MUX_MODE7
+#define OMAP_MUX_REG_8BIT (1 << 3)
/**
* struct omap_board_data - board specific device data
@@ -105,6 +113,7 @@ struct omap_board_data {
* struct mux_partition - contain partition related information
* @name: name of the current partition
* @flags: flags specific to this partition
+ * @gpio: gpio mux mode
* @phys: physical address
* @size: partition size
* @base: virtual address after ioremap
@@ -114,6 +123,7 @@ struct omap_board_data {
struct omap_mux_partition {
const char *name;
u32 flags;
+ u32 gpio;
u32 phys;
u32 size;
void __iomem *base;
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index c47140bbbec..c53609f4629 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -2053,7 +2053,7 @@ int __init omap3_mux_init(struct omap_board_mux *board_subset, int flags)
return -EINVAL;
}
- return omap_mux_init("core", 0,
+ return omap_mux_init("core", OMAP_MUX_GPIO_IN_MODE4,
OMAP3_CONTROL_PADCONF_MUX_PBASE,
OMAP3_CONTROL_PADCONF_MUX_SIZE,
omap3_muxmodes, package_subset, board_subset,
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index a6a4ff8744b..6da4f7ae9d7 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -12,153 +12,60 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
#include <linux/platform_data/iommu-omap.h>
+#include "omap_hwmod.h"
+#include "omap_device.h"
-#include "soc.h"
-#include "common.h"
-
-struct iommu_device {
- resource_size_t base;
- int irq;
- struct iommu_platform_data pdata;
- struct resource res[2];
-};
-static struct iommu_device *devices;
-static int num_iommu_devices;
-
-#ifdef CONFIG_ARCH_OMAP3
-static struct iommu_device omap3_devices[] = {
- {
- .base = 0x480bd400,
- .irq = 24 + OMAP_INTC_START,
- .pdata = {
- .name = "isp",
- .nr_tlb_entries = 8,
- .clk_name = "cam_ick",
- .da_start = 0x0,
- .da_end = 0xFFFFF000,
- },
- },
-#if defined(CONFIG_OMAP_IOMMU_IVA2)
- {
- .base = 0x5d000000,
- .irq = 28 + OMAP_INTC_START,
- .pdata = {
- .name = "iva2",
- .nr_tlb_entries = 32,
- .clk_name = "iva2_ck",
- .da_start = 0x11000000,
- .da_end = 0xFFFFF000,
- },
- },
-#endif
-};
-#define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices)
-static struct platform_device *omap3_iommu_pdev[NR_OMAP3_IOMMU_DEVICES];
-#else
-#define omap3_devices NULL
-#define NR_OMAP3_IOMMU_DEVICES 0
-#define omap3_iommu_pdev NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-static struct iommu_device omap4_devices[] = {
- {
- .base = OMAP4_MMU1_BASE,
- .irq = 100 + OMAP44XX_IRQ_GIC_START,
- .pdata = {
- .name = "ducati",
- .nr_tlb_entries = 32,
- .clk_name = "ipu_fck",
- .da_start = 0x0,
- .da_end = 0xFFFFF000,
- },
- },
- {
- .base = OMAP4_MMU2_BASE,
- .irq = 28 + OMAP44XX_IRQ_GIC_START,
- .pdata = {
- .name = "tesla",
- .nr_tlb_entries = 32,
- .clk_name = "dsp_fck",
- .da_start = 0x0,
- .da_end = 0xFFFFF000,
- },
- },
-};
-#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices)
-static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES];
-#else
-#define omap4_devices NULL
-#define NR_OMAP4_IOMMU_DEVICES 0
-#define omap4_iommu_pdev NULL
-#endif
-
-static struct platform_device **omap_iommu_pdev;
-
-static int __init omap_iommu_init(void)
+static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused)
{
- int i, err;
- struct resource res[] = {
- { .flags = IORESOURCE_MEM },
- { .flags = IORESOURCE_IRQ },
- };
+ struct platform_device *pdev;
+ struct iommu_platform_data *pdata;
+ struct omap_mmu_dev_attr *a = (struct omap_mmu_dev_attr *)oh->dev_attr;
+ static int i;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->name = oh->name;
+ pdata->nr_tlb_entries = a->nr_tlb_entries;
+ pdata->da_start = a->da_start;
+ pdata->da_end = a->da_end;
+
+ if (oh->rst_lines_cnt == 1) {
+ pdata->reset_name = oh->rst_lines->name;
+ pdata->assert_reset = omap_device_assert_hardreset;
+ pdata->deassert_reset = omap_device_deassert_hardreset;
+ }
- if (cpu_is_omap34xx()) {
- devices = omap3_devices;
- omap_iommu_pdev = omap3_iommu_pdev;
- num_iommu_devices = NR_OMAP3_IOMMU_DEVICES;
- } else if (cpu_is_omap44xx()) {
- devices = omap4_devices;
- omap_iommu_pdev = omap4_iommu_pdev;
- num_iommu_devices = NR_OMAP4_IOMMU_DEVICES;
- } else
- return -ENODEV;
+ pdev = omap_device_build("omap-iommu", i, oh, pdata, sizeof(*pdata),
+ NULL, 0, 0);
- for (i = 0; i < num_iommu_devices; i++) {
- struct platform_device *pdev;
- const struct iommu_device *d = &devices[i];
+ kfree(pdata);
- pdev = platform_device_alloc("omap-iommu", i);
- if (!pdev) {
- err = -ENOMEM;
- goto err_out;
- }
+ if (IS_ERR(pdev)) {
+ pr_err("%s: device build err: %ld\n", __func__, PTR_ERR(pdev));
+ return PTR_ERR(pdev);
+ }
- res[0].start = d->base;
- res[0].end = d->base + MMU_REG_SIZE - 1;
- res[1].start = res[1].end = d->irq;
+ i++;
- err = platform_device_add_resources(pdev, res,
- ARRAY_SIZE(res));
- if (err)
- goto err_out;
- err = platform_device_add_data(pdev, &d->pdata,
- sizeof(d->pdata));
- if (err)
- goto err_out;
- err = platform_device_add(pdev);
- if (err)
- goto err_out;
- omap_iommu_pdev[i] = pdev;
- }
return 0;
+}
-err_out:
- while (i--)
- platform_device_put(omap_iommu_pdev[i]);
- return err;
+static int __init omap_iommu_init(void)
+{
+ return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL);
}
/* must be ready before omap3isp is probed */
subsys_initcall(omap_iommu_init);
static void __exit omap_iommu_exit(void)
{
- int i;
-
- for (i = 0; i < num_iommu_devices; i++)
- platform_device_unregister(omap_iommu_pdev[i]);
+ /* Do nothing */
}
module_exit(omap_iommu_exit);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 0ef934fec36..e065daa537c 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -441,19 +441,21 @@ int omap_device_get_context_loss_count(struct platform_device *pdev)
/**
* omap_device_count_resources - count number of struct resource entries needed
* @od: struct omap_device *
+ * @flags: Type of resources to include when counting (IRQ/DMA/MEM)
*
* Count the number of struct resource entries needed for this
* omap_device @od. Used by omap_device_build_ss() to determine how
* much memory to allocate before calling
* omap_device_fill_resources(). Returns the count.
*/
-static int omap_device_count_resources(struct omap_device *od)
+static int omap_device_count_resources(struct omap_device *od,
+ unsigned long flags)
{
int c = 0;
int i;
for (i = 0; i < od->hwmods_cnt; i++)
- c += omap_hwmod_count_resources(od->hwmods[i]);
+ c += omap_hwmod_count_resources(od->hwmods[i], flags);
pr_debug("omap_device: %s: counted %d total resources across %d hwmods\n",
od->pdev->name, c, od->hwmods_cnt);
@@ -557,52 +559,73 @@ struct omap_device *omap_device_alloc(struct platform_device *pdev,
od->hwmods = hwmods;
od->pdev = pdev;
- res_count = omap_device_count_resources(od);
/*
+ * Non-DT Boot:
+ * Here, pdev->num_resources = 0, and we should get all the
+ * resources from hwmod.
+ *
* DT Boot:
* OF framework will construct the resource structure (currently
* does for MEM & IRQ resource) and we should respect/use these
* resources, killing hwmod dependency.
* If pdev->num_resources > 0, we assume that MEM & IRQ resources
* have been allocated by OF layer already (through DTB).
- *
- * Non-DT Boot:
- * Here, pdev->num_resources = 0, and we should get all the
- * resources from hwmod.
+ * As preparation for the future we examine the OF provided resources
+ * to see if we have DMA resources provided already. In this case
+ * there is no need to update the resources for the device, we use the
+ * OF provided ones.
*
* TODO: Once DMA resource is available from OF layer, we should
* kill filling any resources from hwmod.
*/
- if (res_count > pdev->num_resources) {
- /* Allocate resources memory to account for new resources */
- res = kzalloc(sizeof(struct resource) * res_count, GFP_KERNEL);
- if (!res)
- goto oda_exit3;
-
- /*
- * If pdev->num_resources > 0, then assume that,
- * MEM and IRQ resources will only come from DT and only
- * fill DMA resource from hwmod layer.
- */
- if (pdev->num_resources && pdev->resource) {
- dev_dbg(&pdev->dev, "%s(): resources already allocated %d\n",
- __func__, res_count);
- memcpy(res, pdev->resource,
- sizeof(struct resource) * pdev->num_resources);
- _od_fill_dma_resources(od, &res[pdev->num_resources]);
- } else {
- dev_dbg(&pdev->dev, "%s(): using resources from hwmod %d\n",
- __func__, res_count);
- omap_device_fill_resources(od, res);
+ if (!pdev->num_resources) {
+ /* Count all resources for the device */
+ res_count = omap_device_count_resources(od, IORESOURCE_IRQ |
+ IORESOURCE_DMA |
+ IORESOURCE_MEM);
+ } else {
+ /* Take a look if we already have DMA resource via DT */
+ for (i = 0; i < pdev->num_resources; i++) {
+ struct resource *r = &pdev->resource[i];
+
+ /* We have it, no need to touch the resources */
+ if (r->flags == IORESOURCE_DMA)
+ goto have_everything;
}
+ /* Count only DMA resources for the device */
+ res_count = omap_device_count_resources(od, IORESOURCE_DMA);
+ /* The device has no DMA resource, no need for update */
+ if (!res_count)
+ goto have_everything;
- ret = platform_device_add_resources(pdev, res, res_count);
- kfree(res);
+ res_count += pdev->num_resources;
+ }
- if (ret)
- goto oda_exit3;
+ /* Allocate resources memory to account for new resources */
+ res = kzalloc(sizeof(struct resource) * res_count, GFP_KERNEL);
+ if (!res)
+ goto oda_exit3;
+
+ if (!pdev->num_resources) {
+ dev_dbg(&pdev->dev, "%s: using %d resources from hwmod\n",
+ __func__, res_count);
+ omap_device_fill_resources(od, res);
+ } else {
+ dev_dbg(&pdev->dev,
+ "%s: appending %d DMA resources from hwmod\n",
+ __func__, res_count - pdev->num_resources);
+ memcpy(res, pdev->resource,
+ sizeof(struct resource) * pdev->num_resources);
+ _od_fill_dma_resources(od, &res[pdev->num_resources]);
}
+ ret = platform_device_add_resources(pdev, res, res_count);
+ kfree(res);
+
+ if (ret)
+ goto oda_exit3;
+
+have_everything:
if (!pm_lats) {
pm_lats = omap_default_latency;
pm_lats_cnt = ARRAY_SIZE(omap_default_latency);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b3b00f43dd7..4653efb87a2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -130,7 +130,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/list.h>
@@ -187,6 +187,8 @@ struct omap_hwmod_soc_ops {
int (*is_hardreset_asserted)(struct omap_hwmod *oh,
struct omap_hwmod_rst_info *ohri);
int (*init_clkdm)(struct omap_hwmod *oh);
+ void (*update_context_lost)(struct omap_hwmod *oh);
+ int (*get_context_lost)(struct omap_hwmod *oh);
};
/* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
@@ -646,6 +648,19 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
return 0;
}
+static struct clockdomain *_get_clkdm(struct omap_hwmod *oh)
+{
+ struct clk_hw_omap *clk;
+
+ if (oh->clkdm) {
+ return oh->clkdm;
+ } else if (oh->_clk) {
+ clk = to_clk_hw_omap(__clk_get_hw(oh->_clk));
+ return clk->clkdm;
+ }
+ return NULL;
+}
+
/**
* _add_initiator_dep: prevent @oh from smart-idling while @init_oh is active
* @oh: struct omap_hwmod *
@@ -661,13 +676,18 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
*/
static int _add_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh)
{
- if (!oh->_clk)
+ struct clockdomain *clkdm, *init_clkdm;
+
+ clkdm = _get_clkdm(oh);
+ init_clkdm = _get_clkdm(init_oh);
+
+ if (!clkdm || !init_clkdm)
return -EINVAL;
- if (oh->_clk->clkdm && oh->_clk->clkdm->flags & CLKDM_NO_AUTODEPS)
+ if (clkdm && clkdm->flags & CLKDM_NO_AUTODEPS)
return 0;
- return clkdm_add_sleepdep(oh->_clk->clkdm, init_oh->_clk->clkdm);
+ return clkdm_add_sleepdep(clkdm, init_clkdm);
}
/**
@@ -685,13 +705,18 @@ static int _add_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh)
*/
static int _del_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh)
{
- if (!oh->_clk)
+ struct clockdomain *clkdm, *init_clkdm;
+
+ clkdm = _get_clkdm(oh);
+ init_clkdm = _get_clkdm(init_oh);
+
+ if (!clkdm || !init_clkdm)
return -EINVAL;
- if (oh->_clk->clkdm && oh->_clk->clkdm->flags & CLKDM_NO_AUTODEPS)
+ if (clkdm && clkdm->flags & CLKDM_NO_AUTODEPS)
return 0;
- return clkdm_del_sleepdep(oh->_clk->clkdm, init_oh->_clk->clkdm);
+ return clkdm_del_sleepdep(clkdm, init_clkdm);
}
/**
@@ -725,7 +750,7 @@ static int _init_main_clk(struct omap_hwmod *oh)
*/
clk_prepare(oh->_clk);
- if (!oh->_clk->clkdm)
+ if (!_get_clkdm(oh))
pr_debug("omap_hwmod: %s: missing clockdomain for %s.\n",
oh->name, oh->main_clk);
@@ -1308,6 +1333,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
u8 idlemode, sf;
u32 v;
bool clkdm_act;
+ struct clockdomain *clkdm;
if (!oh->class->sysc)
return;
@@ -1327,11 +1353,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
v = oh->_sysc_cache;
sf = oh->class->sysc->sysc_flags;
+ clkdm = _get_clkdm(oh);
if (sf & SYSC_HAS_SIDLEMODE) {
- clkdm_act = ((oh->clkdm &&
- oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
- (oh->_clk && oh->_clk->clkdm &&
- oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+ clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU);
if (clkdm_act && !(oh->class->sysc->idlemodes &
(SIDLE_SMART | SIDLE_SMART_WKUP)))
idlemode = HWMOD_IDLEMODE_FORCE;
@@ -1533,11 +1557,12 @@ static int _init_clocks(struct omap_hwmod *oh, void *data)
pr_debug("omap_hwmod: %s: looking up clocks\n", oh->name);
+ if (soc_ops.init_clkdm)
+ ret |= soc_ops.init_clkdm(oh);
+
ret |= _init_main_clk(oh);
ret |= _init_interface_clks(oh);
ret |= _init_opt_clks(oh);
- if (soc_ops.init_clkdm)
- ret |= soc_ops.init_clkdm(oh);
if (!ret)
oh->_state = _HWMOD_STATE_CLKS_INITED;
@@ -1992,6 +2017,42 @@ static void _reconfigure_io_chain(void)
}
/**
+ * _omap4_update_context_lost - increment hwmod context loss counter if
+ * hwmod context was lost, and clear hardware context loss reg
+ * @oh: hwmod to check for context loss
+ *
+ * If the PRCM indicates that the hwmod @oh lost context, increment
+ * our in-memory context loss counter, and clear the RM_*_CONTEXT
+ * bits. No return value.
+ */
+static void _omap4_update_context_lost(struct omap_hwmod *oh)
+{
+ if (oh->prcm.omap4.flags & HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT)
+ return;
+
+ if (!prm_was_any_context_lost_old(oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.context_offs))
+ return;
+
+ oh->prcm.omap4.context_lost_counter++;
+ prm_clear_context_loss_flags_old(oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.context_offs);
+}
+
+/**
+ * _omap4_get_context_lost - get context loss counter for a hwmod
+ * @oh: hwmod to get context loss counter for
+ *
+ * Returns the in-memory context loss counter for a hwmod.
+ */
+static int _omap4_get_context_lost(struct omap_hwmod *oh)
+{
+ return oh->prcm.omap4.context_lost_counter;
+}
+
+/**
* _enable - enable an omap_hwmod
* @oh: struct omap_hwmod *
*
@@ -2074,6 +2135,9 @@ static int _enable(struct omap_hwmod *oh)
if (soc_ops.enable_module)
soc_ops.enable_module(oh);
+ if (soc_ops.update_context_lost)
+ soc_ops.update_context_lost(oh);
+
r = (soc_ops.wait_target_ready) ? soc_ops.wait_target_ready(oh) :
-EINVAL;
if (!r) {
@@ -3398,7 +3462,7 @@ int omap_hwmod_reset(struct omap_hwmod *oh)
/**
* omap_hwmod_count_resources - count number of struct resources needed by hwmod
* @oh: struct omap_hwmod *
- * @res: pointer to the first element of an array of struct resource to fill
+ * @flags: Type of resources to include when counting (IRQ/DMA/MEM)
*
* Count the number of struct resource array elements necessary to
* contain omap_hwmod @oh resources. Intended to be called by code
@@ -3411,20 +3475,25 @@ int omap_hwmod_reset(struct omap_hwmod *oh)
* resource IDs.
*
*/
-int omap_hwmod_count_resources(struct omap_hwmod *oh)
+int omap_hwmod_count_resources(struct omap_hwmod *oh, unsigned long flags)
{
- struct omap_hwmod_ocp_if *os;
- struct list_head *p;
- int ret;
- int i = 0;
+ int ret = 0;
- ret = _count_mpu_irqs(oh) + _count_sdma_reqs(oh);
+ if (flags & IORESOURCE_IRQ)
+ ret += _count_mpu_irqs(oh);
- p = oh->slave_ports.next;
+ if (flags & IORESOURCE_DMA)
+ ret += _count_sdma_reqs(oh);
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
- ret += _count_ocp_if_addr_spaces(os);
+ if (flags & IORESOURCE_MEM) {
+ int i = 0;
+ struct omap_hwmod_ocp_if *os;
+ struct list_head *p = oh->slave_ports.next;
+
+ while (i < oh->slaves_cnt) {
+ os = _fetch_next_ocp_if(&p, &i);
+ ret += _count_ocp_if_addr_spaces(os);
+ }
}
return ret;
@@ -3591,10 +3660,15 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
{
struct clk *c;
struct omap_hwmod_ocp_if *oi;
+ struct clockdomain *clkdm;
+ struct clk_hw_omap *clk;
if (!oh)
return NULL;
+ if (oh->clkdm)
+ return oh->clkdm->pwrdm.ptr;
+
if (oh->_clk) {
c = oh->_clk;
} else {
@@ -3604,11 +3678,12 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
c = oi->_clk;
}
- if (!c->clkdm)
+ clk = to_clk_hw_omap(__clk_get_hw(c));
+ clkdm = clk->clkdm;
+ if (!clkdm)
return NULL;
- return c->clkdm->pwrdm.ptr;
-
+ return clkdm->pwrdm.ptr;
}
/**
@@ -3913,17 +3988,21 @@ ohsps_unlock:
* omap_hwmod_get_context_loss_count - get lost context count
* @oh: struct omap_hwmod *
*
- * Query the powerdomain of of @oh to get the context loss
- * count for this device.
+ * Returns the context loss count of associated @oh
+ * upon success, or zero if no context loss data is available.
*
- * Returns the context loss count of the powerdomain assocated with @oh
- * upon success, or zero if no powerdomain exists for @oh.
+ * On OMAP4, this queries the per-hwmod context loss register,
+ * assuming one exists. If not, or on OMAP2/3, this queries the
+ * enclosing powerdomain context loss count.
*/
int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh)
{
struct powerdomain *pwrdm;
int ret = 0;
+ if (soc_ops.get_context_lost)
+ return soc_ops.get_context_lost(oh);
+
pwrdm = omap_hwmod_get_pwrdm(oh);
if (pwrdm)
ret = pwrdm_get_context_loss_count(pwrdm);
@@ -4038,6 +4117,8 @@ void __init omap_hwmod_init(void)
soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
soc_ops.init_clkdm = _init_clkdm;
+ soc_ops.update_context_lost = _omap4_update_context_lost;
+ soc_ops.get_context_lost = _omap4_get_context_lost;
} else if (soc_is_am33xx()) {
soc_ops.enable_module = _am33xx_enable_module;
soc_ops.disable_module = _am33xx_disable_module;
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 87a3c5b7aa7..3ae852a522f 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -2,7 +2,7 @@
* omap_hwmod macros, structures
*
* Copyright (C) 2009-2011 Nokia Corporation
- * Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2011-2012 Texas Instruments, Inc.
* Paul Walmsley
*
* Created in collaboration with (alphabetical order): Benoît Cousson,
@@ -394,12 +394,15 @@ struct omap_hwmod_omap2_prcm {
/**
* struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
- * @clkctrl_reg: PRCM address of the clock control register
- * @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
+ * @clkctrl_offs: offset of the PRCM clock control register
+ * @rstctrl_offs: offset of the XXX_RSTCTRL register located in the PRM
+ * @context_offs: offset of the RM_*_CONTEXT register
* @lostcontext_mask: bitmask for selecting bits from RM_*_CONTEXT register
* @rstst_reg: (AM33XX only) address of the XXX_RSTST register in the PRM
* @submodule_wkdep_bit: bit shift of the WKDEP range
* @flags: PRCM register capabilities for this IP block
+ * @modulemode: allowable modulemodes
+ * @context_lost_counter: Count of module level context lost
*
* If @lostcontext_mask is not defined, context loss check code uses
* whole register without masking. @lostcontext_mask should only be
@@ -415,6 +418,7 @@ struct omap_hwmod_omap4_prcm {
u8 submodule_wkdep_bit;
u8 modulemode;
u8 flags;
+ int context_lost_counter;
};
@@ -633,7 +637,7 @@ void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs);
u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs);
int omap_hwmod_softreset(struct omap_hwmod *oh);
-int omap_hwmod_count_resources(struct omap_hwmod *oh);
+int omap_hwmod_count_resources(struct omap_hwmod *oh, unsigned long flags);
int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res);
int omap_hwmod_fill_dma_resources(struct omap_hwmod *oh, struct resource *res);
int omap_hwmod_get_resource_byname(struct omap_hwmod *oh, unsigned int type,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index e8efe3d1da6..b5efe58c0be 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -15,8 +15,8 @@
#include <linux/i2c-omap.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
+#include <plat/dmtimer.h>
#include "omap_hwmod.h"
#include "l3_2xxx.h"
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 32d17e3fd72..d2d3840557c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -16,8 +16,8 @@
#include <linux/i2c-omap.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
+#include <plat/dmtimer.h>
#include "omap_hwmod.h"
#include "mmc.h"
@@ -77,8 +77,7 @@ static struct omap_hwmod_class i2c_class = {
static struct omap_i2c_dev_attr i2c_dev_attr = {
.fifo_depth = 8, /* bytes */
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_BUS_SHIFT_2 |
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 |
OMAP_I2C_FLAG_FORCE_19200_INT_CLK,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index 40d6c93d985..534974e08ad 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -10,7 +10,8 @@
* published by the Free Software Foundation.
*/
-#include <plat-omap/dma-omap.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
#include "omap_hwmod.h"
#include "hdq1w.h"
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 0db8f450bad..e596117004d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -10,7 +10,7 @@
*/
#include <linux/platform_data/gpio-omap.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <plat/dmtimer.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 32820d89f5b..081c71edddf 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -1118,8 +1118,7 @@ static struct omap_hwmod_class i2c_class = {
};
static struct omap_i2c_dev_attr i2c_dev_attr = {
- .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE,
};
/* i2c1 */
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 7f73f2132ac..8bb2628df34 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -19,7 +19,7 @@
#include <linux/power/smartreflex.h>
#include <linux/platform_data/gpio-omap.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "l3_3xxx.h"
#include "l4_3xxx.h"
#include <linux/platform_data/asoc-ti-mcbsp.h>
@@ -794,9 +794,7 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
/* I2C1 */
static struct omap_i2c_dev_attr i2c1_dev_attr = {
.fifo_depth = 8, /* bytes */
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
- OMAP_I2C_FLAG_BUS_SHIFT_2,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_hwmod omap3xxx_i2c1_hwmod = {
@@ -821,9 +819,7 @@ static struct omap_hwmod omap3xxx_i2c1_hwmod = {
/* I2C2 */
static struct omap_i2c_dev_attr i2c2_dev_attr = {
.fifo_depth = 8, /* bytes */
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
- OMAP_I2C_FLAG_BUS_SHIFT_2,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_hwmod omap3xxx_i2c2_hwmod = {
@@ -848,9 +844,7 @@ static struct omap_hwmod omap3xxx_i2c2_hwmod = {
/* I2C3 */
static struct omap_i2c_dev_attr i2c3_dev_attr = {
.fifo_depth = 64, /* bytes */
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
- OMAP_I2C_FLAG_BUS_SHIFT_2,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 26f8e9f1819..129d5081ed1 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -21,11 +21,11 @@
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/power/smartreflex.h>
+#include <linux/platform_data/omap_ocp2scp.h>
#include <linux/i2c-omap.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
-#include <linux/platform_data/omap_ocp2scp.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include <linux/platform_data/iommu-omap.h>
@@ -652,7 +652,7 @@ static struct omap_hwmod omap44xx_dsp_hwmod = {
.mpu_irqs = omap44xx_dsp_irqs,
.rst_lines = omap44xx_dsp_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_resets),
- .main_clk = "dsp_fck",
+ .main_clk = "dpll_iva_m4x2_ck",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET,
@@ -1528,8 +1528,7 @@ static struct omap_hwmod_class omap44xx_i2c_hwmod_class = {
};
static struct omap_i2c_dev_attr i2c_dev_attr = {
- .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE,
};
/* i2c1 */
@@ -1679,7 +1678,7 @@ static struct omap_hwmod omap44xx_ipu_hwmod = {
.mpu_irqs = omap44xx_ipu_irqs,
.rst_lines = omap44xx_ipu_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets),
- .main_clk = "ipu_fck",
+ .main_clk = "ducati_clk_mux_ck",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/omap_opp_data.h b/arch/arm/mach-omap2/omap_opp_data.h
index 7e437bf6024..336fdfcf88b 100644
--- a/arch/arm/mach-omap2/omap_opp_data.h
+++ b/arch/arm/mach-omap2/omap_opp_data.h
@@ -89,8 +89,11 @@ extern struct omap_volt_data omap34xx_vddcore_volt_data[];
extern struct omap_volt_data omap36xx_vddmpu_volt_data[];
extern struct omap_volt_data omap36xx_vddcore_volt_data[];
-extern struct omap_volt_data omap44xx_vdd_mpu_volt_data[];
-extern struct omap_volt_data omap44xx_vdd_iva_volt_data[];
-extern struct omap_volt_data omap44xx_vdd_core_volt_data[];
+extern struct omap_volt_data omap443x_vdd_mpu_volt_data[];
+extern struct omap_volt_data omap443x_vdd_iva_volt_data[];
+extern struct omap_volt_data omap443x_vdd_core_volt_data[];
+extern struct omap_volt_data omap446x_vdd_mpu_volt_data[];
+extern struct omap_volt_data omap446x_vdd_iva_volt_data[];
+extern struct omap_volt_data omap446x_vdd_core_volt_data[];
#endif /* __ARCH_ARM_MACH_OMAP2_OMAP_OPP_DATA_H */
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 4d76a3ca5bf..e237602e10e 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -33,6 +33,38 @@
#include "control.h"
#include "usb.h"
+#define CONTROL_DEV_CONF 0x300
+#define PHY_PD 0x1
+
+/**
+ * omap4430_phy_power_down: disable MUSB PHY during early init
+ *
+ * OMAP4 MUSB PHY module is enabled by default on reset, but this will
+ * prevent core retention if not disabled by SW. USB driver will
+ * later on enable this, once and if the driver needs it.
+ */
+static int __init omap4430_phy_power_down(void)
+{
+ void __iomem *ctrl_base;
+
+ if (!cpu_is_omap44xx())
+ return 0;
+
+ ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K);
+ if (!ctrl_base) {
+ pr_err("control module ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ /* Power down the phy */
+ __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
+
+ iounmap(ctrl_base);
+
+ return 0;
+}
+early_initcall(omap4430_phy_power_down);
+
void am35x_musb_reset(void)
{
u32 regval;
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
index 2bf35dc091b..615e5b1fb02 100644
--- a/arch/arm/mach-omap2/omap_twl.c
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -31,16 +31,6 @@
#define OMAP3_VP_VSTEPMAX_VSTEPMAX 0x04
#define OMAP3_VP_VLIMITTO_TIMEOUT_US 200
-#define OMAP3430_VP1_VLIMITTO_VDDMIN 0x14
-#define OMAP3430_VP1_VLIMITTO_VDDMAX 0x42
-#define OMAP3430_VP2_VLIMITTO_VDDMIN 0x18
-#define OMAP3430_VP2_VLIMITTO_VDDMAX 0x2c
-
-#define OMAP3630_VP1_VLIMITTO_VDDMIN 0x18
-#define OMAP3630_VP1_VLIMITTO_VDDMAX 0x3c
-#define OMAP3630_VP2_VLIMITTO_VDDMIN 0x18
-#define OMAP3630_VP2_VLIMITTO_VDDMAX 0x30
-
#define OMAP4_SRI2C_SLAVE_ADDR 0x12
#define OMAP4_VDD_MPU_SR_VOLT_REG 0x55
#define OMAP4_VDD_MPU_SR_CMD_REG 0x56
@@ -54,13 +44,6 @@
#define OMAP4_VP_VSTEPMAX_VSTEPMAX 0x04
#define OMAP4_VP_VLIMITTO_TIMEOUT_US 200
-#define OMAP4_VP_MPU_VLIMITTO_VDDMIN 0xA
-#define OMAP4_VP_MPU_VLIMITTO_VDDMAX 0x39
-#define OMAP4_VP_IVA_VLIMITTO_VDDMIN 0xA
-#define OMAP4_VP_IVA_VLIMITTO_VDDMAX 0x2D
-#define OMAP4_VP_CORE_VLIMITTO_VDDMIN 0xA
-#define OMAP4_VP_CORE_VLIMITTO_VDDMAX 0x28
-
static bool is_offset_valid;
static u8 smps_offset;
/*
@@ -159,16 +142,11 @@ static u8 twl6030_uv_to_vsel(unsigned long uv)
static struct omap_voltdm_pmic omap3_mpu_pmic = {
.slew_rate = 4000,
.step_size = 12500,
- .on_volt = 1200000,
- .onlp_volt = 1000000,
- .ret_volt = 975000,
- .off_volt = 600000,
- .volt_setup_time = 0xfff,
.vp_erroroffset = OMAP3_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP3_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP3_VP_VSTEPMAX_VSTEPMAX,
- .vp_vddmin = OMAP3430_VP1_VLIMITTO_VDDMIN,
- .vp_vddmax = OMAP3430_VP1_VLIMITTO_VDDMAX,
+ .vddmin = 600000,
+ .vddmax = 1450000,
.vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP3_VDD_MPU_SR_CONTROL_REG,
@@ -180,16 +158,11 @@ static struct omap_voltdm_pmic omap3_mpu_pmic = {
static struct omap_voltdm_pmic omap3_core_pmic = {
.slew_rate = 4000,
.step_size = 12500,
- .on_volt = 1200000,
- .onlp_volt = 1000000,
- .ret_volt = 975000,
- .off_volt = 600000,
- .volt_setup_time = 0xfff,
.vp_erroroffset = OMAP3_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP3_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP3_VP_VSTEPMAX_VSTEPMAX,
- .vp_vddmin = OMAP3430_VP2_VLIMITTO_VDDMIN,
- .vp_vddmax = OMAP3430_VP2_VLIMITTO_VDDMAX,
+ .vddmin = 600000,
+ .vddmax = 1450000,
.vp_timeout_us = OMAP3_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP3_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP3_VDD_CORE_SR_CONTROL_REG,
@@ -201,21 +174,17 @@ static struct omap_voltdm_pmic omap3_core_pmic = {
static struct omap_voltdm_pmic omap4_mpu_pmic = {
.slew_rate = 4000,
.step_size = 12660,
- .on_volt = 1375000,
- .onlp_volt = 1375000,
- .ret_volt = 830000,
- .off_volt = 0,
- .volt_setup_time = 0,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
- .vp_vddmin = OMAP4_VP_MPU_VLIMITTO_VDDMIN,
- .vp_vddmax = OMAP4_VP_MPU_VLIMITTO_VDDMAX,
+ .vddmin = 0,
+ .vddmax = 2100000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP4_VDD_MPU_SR_VOLT_REG,
.cmd_reg_addr = OMAP4_VDD_MPU_SR_CMD_REG,
.i2c_high_speed = true,
+ .i2c_pad_load = 3,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
@@ -223,21 +192,17 @@ static struct omap_voltdm_pmic omap4_mpu_pmic = {
static struct omap_voltdm_pmic omap4_iva_pmic = {
.slew_rate = 4000,
.step_size = 12660,
- .on_volt = 1188000,
- .onlp_volt = 1188000,
- .ret_volt = 830000,
- .off_volt = 0,
- .volt_setup_time = 0,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
- .vp_vddmin = OMAP4_VP_IVA_VLIMITTO_VDDMIN,
- .vp_vddmax = OMAP4_VP_IVA_VLIMITTO_VDDMAX,
+ .vddmin = 0,
+ .vddmax = 2100000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP4_VDD_IVA_SR_VOLT_REG,
.cmd_reg_addr = OMAP4_VDD_IVA_SR_CMD_REG,
.i2c_high_speed = true,
+ .i2c_pad_load = 3,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
@@ -245,20 +210,17 @@ static struct omap_voltdm_pmic omap4_iva_pmic = {
static struct omap_voltdm_pmic omap4_core_pmic = {
.slew_rate = 4000,
.step_size = 12660,
- .on_volt = 1200000,
- .onlp_volt = 1200000,
- .ret_volt = 830000,
- .off_volt = 0,
- .volt_setup_time = 0,
.vp_erroroffset = OMAP4_VP_CONFIG_ERROROFFSET,
.vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
.vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
- .vp_vddmin = OMAP4_VP_CORE_VLIMITTO_VDDMIN,
- .vp_vddmax = OMAP4_VP_CORE_VLIMITTO_VDDMAX,
+ .vddmin = 0,
+ .vddmax = 2100000,
.vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
.i2c_slave_addr = OMAP4_SRI2C_SLAVE_ADDR,
.volt_reg_addr = OMAP4_VDD_CORE_SR_VOLT_REG,
.cmd_reg_addr = OMAP4_VDD_CORE_SR_CMD_REG,
+ .i2c_high_speed = true,
+ .i2c_pad_load = 3,
.vsel_to_uv = twl6030_vsel_to_uv,
.uv_to_vsel = twl6030_uv_to_vsel,
};
@@ -289,13 +251,6 @@ int __init omap3_twl_init(void)
if (!cpu_is_omap34xx())
return -ENODEV;
- if (cpu_is_omap3630()) {
- omap3_mpu_pmic.vp_vddmin = OMAP3630_VP1_VLIMITTO_VDDMIN;
- omap3_mpu_pmic.vp_vddmax = OMAP3630_VP1_VLIMITTO_VDDMAX;
- omap3_core_pmic.vp_vddmin = OMAP3630_VP2_VLIMITTO_VDDMIN;
- omap3_core_pmic.vp_vddmax = OMAP3630_VP2_VLIMITTO_VDDMAX;
- }
-
/*
* The smartreflex bit on twl4030 specifies if the setting of voltage
* is done over the I2C_SR path. Since this setting is independent of
@@ -337,8 +292,8 @@ int __init omap3_twl_set_sr_bit(bool enable)
if (twl_sr_enable_autoinit)
pr_warning("%s: unexpected multiple calls\n", __func__);
- ret = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &temp,
- TWL4030_DCDC_GLOBAL_CFG);
+ ret = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &temp,
+ TWL4030_DCDC_GLOBAL_CFG);
if (ret)
goto err;
@@ -347,8 +302,8 @@ int __init omap3_twl_set_sr_bit(bool enable)
else
temp &= ~SMARTREFLEX_ENABLE;
- ret = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, temp,
- TWL4030_DCDC_GLOBAL_CFG);
+ ret = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, temp,
+ TWL4030_DCDC_GLOBAL_CFG);
if (!ret) {
twl_sr_enable_autoinit = true;
return 0;
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index a9fd6d5fe79..d470b728e72 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -1,7 +1,7 @@
/*
* OMAP4 OPP table definitions.
*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010-2012 Texas Instruments Incorporated - http://www.ti.com/
* Nishanth Menon
* Kevin Hilman
* Thara Gopinath
@@ -35,7 +35,7 @@
#define OMAP4430_VDD_MPU_OPPTURBO_UV 1313000
#define OMAP4430_VDD_MPU_OPPNITRO_UV 1375000
-struct omap_volt_data omap44xx_vdd_mpu_volt_data[] = {
+struct omap_volt_data omap443x_vdd_mpu_volt_data[] = {
VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP50_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c),
VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP100_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16),
VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23),
@@ -47,7 +47,7 @@ struct omap_volt_data omap44xx_vdd_mpu_volt_data[] = {
#define OMAP4430_VDD_IVA_OPP100_UV 1188000
#define OMAP4430_VDD_IVA_OPPTURBO_UV 1300000
-struct omap_volt_data omap44xx_vdd_iva_volt_data[] = {
+struct omap_volt_data omap443x_vdd_iva_volt_data[] = {
VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP50_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c),
VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP100_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16),
VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23),
@@ -57,14 +57,14 @@ struct omap_volt_data omap44xx_vdd_iva_volt_data[] = {
#define OMAP4430_VDD_CORE_OPP50_UV 1025000
#define OMAP4430_VDD_CORE_OPP100_UV 1200000
-struct omap_volt_data omap44xx_vdd_core_volt_data[] = {
+struct omap_volt_data omap443x_vdd_core_volt_data[] = {
VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP50_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c),
VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP100_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16),
VOLT_DATA_DEFINE(0, 0, 0, 0),
};
-static struct omap_opp_def __initdata omap44xx_opp_def_list[] = {
+static struct omap_opp_def __initdata omap443x_opp_def_list[] = {
/* MPU OPP1 - OPP50 */
OPP_INITIALIZER("mpu", true, 300000000, OMAP4430_VDD_MPU_OPP50_UV),
/* MPU OPP2 - OPP100 */
@@ -86,6 +86,82 @@ static struct omap_opp_def __initdata omap44xx_opp_def_list[] = {
/* TODO: add DSP, aess, fdif, gpu */
};
+#define OMAP4460_VDD_MPU_OPP50_UV 1025000
+#define OMAP4460_VDD_MPU_OPP100_UV 1200000
+#define OMAP4460_VDD_MPU_OPPTURBO_UV 1313000
+#define OMAP4460_VDD_MPU_OPPNITRO_UV 1375000
+
+struct omap_volt_data omap446x_vdd_mpu_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPP50_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPP100_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_MPU_OPPNITRO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO, 0xfa, 0x27),
+ VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+#define OMAP4460_VDD_IVA_OPP50_UV 1025000
+#define OMAP4460_VDD_IVA_OPP100_UV 1200000
+#define OMAP4460_VDD_IVA_OPPTURBO_UV 1313000
+#define OMAP4460_VDD_IVA_OPPNITRO_UV 1375000
+
+struct omap_volt_data omap446x_vdd_iva_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPP50_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPP100_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_IVA_OPPNITRO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPNITRO, 0xfa, 0x23),
+ VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+#define OMAP4460_VDD_CORE_OPP50_UV 1025000
+#define OMAP4460_VDD_CORE_OPP100_UV 1200000
+#define OMAP4460_VDD_CORE_OPP100_OV_UV 1250000
+
+struct omap_volt_data omap446x_vdd_core_volt_data[] = {
+ VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP50_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP100_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16),
+ VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP100_OV_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100OV, 0xf9, 0x16),
+ VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+static struct omap_opp_def __initdata omap446x_opp_def_list[] = {
+ /* MPU OPP1 - OPP50 */
+ OPP_INITIALIZER("mpu", true, 350000000, OMAP4460_VDD_MPU_OPP50_UV),
+ /* MPU OPP2 - OPP100 */
+ OPP_INITIALIZER("mpu", true, 700000000, OMAP4460_VDD_MPU_OPP100_UV),
+ /* MPU OPP3 - OPP-Turbo */
+ OPP_INITIALIZER("mpu", true, 920000000, OMAP4460_VDD_MPU_OPPTURBO_UV),
+ /*
+ * MPU OPP4 - OPP-Nitro + Disabled as the reference schematics
+ * recommends TPS623631 - confirm and enable the opp in board file
+ * XXX: May be we should enable these based on mpu capability and
+ * Exception board files disable it...
+ */
+ OPP_INITIALIZER("mpu", false, 1200000000, OMAP4460_VDD_MPU_OPPNITRO_UV),
+ /* MPU OPP4 - OPP-Nitro SpeedBin */
+ OPP_INITIALIZER("mpu", false, 1500000000, OMAP4460_VDD_MPU_OPPNITRO_UV),
+ /* L3 OPP1 - OPP50 */
+ OPP_INITIALIZER("l3_main_1", true, 100000000, OMAP4460_VDD_CORE_OPP50_UV),
+ /* L3 OPP2 - OPP100 */
+ OPP_INITIALIZER("l3_main_1", true, 200000000, OMAP4460_VDD_CORE_OPP100_UV),
+ /* IVA OPP1 - OPP50 */
+ OPP_INITIALIZER("iva", true, 133000000, OMAP4460_VDD_IVA_OPP50_UV),
+ /* IVA OPP2 - OPP100 */
+ OPP_INITIALIZER("iva", true, 266100000, OMAP4460_VDD_IVA_OPP100_UV),
+ /*
+ * IVA OPP3 - OPP-Turbo + Disabled as the reference schematics
+ * recommends Phoenix VCORE2 which can supply only 600mA - so the ones
+ * above this OPP frequency, even though OMAP is capable, should be
+ * enabled by board file which is sure of the chip power capability
+ */
+ OPP_INITIALIZER("iva", false, 332000000, OMAP4460_VDD_IVA_OPPTURBO_UV),
+ /* IVA OPP4 - OPP-Nitro */
+ OPP_INITIALIZER("iva", false, 430000000, OMAP4460_VDD_IVA_OPPNITRO_UV),
+ /* IVA OPP5 - OPP-Nitro SpeedBin*/
+ OPP_INITIALIZER("iva", false, 500000000, OMAP4460_VDD_IVA_OPPNITRO_UV),
+
+ /* TODO: add DSP, aess, fdif, gpu */
+};
+
/**
* omap4_opp_init() - initialize omap4 opp table
*/
@@ -93,12 +169,12 @@ int __init omap4_opp_init(void)
{
int r = -ENODEV;
- if (!cpu_is_omap443x())
- return r;
-
- r = omap_init_opp_table(omap44xx_opp_def_list,
- ARRAY_SIZE(omap44xx_opp_def_list));
-
+ if (cpu_is_omap443x())
+ r = omap_init_opp_table(omap443x_opp_def_list,
+ ARRAY_SIZE(omap443x_opp_def_list));
+ else if (cpu_is_omap446x())
+ r = omap_init_opp_table(omap446x_opp_def_list,
+ ARRAY_SIZE(omap446x_opp_def_list));
return r;
}
device_initcall(omap4_opp_init);
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 331478f9b86..f4b3143a8b1 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -40,6 +40,38 @@ static struct omap_device_pm_latency *pm_lats;
*/
int (*omap_pm_suspend)(void);
+#ifdef CONFIG_PM
+/**
+ * struct omap2_oscillator - Describe the board main oscillator latencies
+ * @startup_time: oscillator startup latency
+ * @shutdown_time: oscillator shutdown latency
+ */
+struct omap2_oscillator {
+ u32 startup_time;
+ u32 shutdown_time;
+};
+
+static struct omap2_oscillator oscillator = {
+ .startup_time = ULONG_MAX,
+ .shutdown_time = ULONG_MAX,
+};
+
+void omap_pm_setup_oscillator(u32 tstart, u32 tshut)
+{
+ oscillator.startup_time = tstart;
+ oscillator.shutdown_time = tshut;
+}
+
+void omap_pm_get_oscillator(u32 *tstart, u32 *tshut)
+{
+ if (!tstart || !tshut)
+ return;
+
+ *tstart = oscillator.startup_time;
+ *tshut = oscillator.shutdown_time;
+}
+#endif
+
static int __init _init_omap_device(char *name)
{
struct omap_hwmod *oh;
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index fc3c96d5e01..c22503b17ab 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -138,4 +138,14 @@ static inline int omap4_twl_init(void)
}
#endif
+#ifdef CONFIG_PM
+extern void omap_pm_setup_oscillator(u32 tstart, u32 tshut);
+extern void omap_pm_get_oscillator(u32 *tstart, u32 *tshut);
+extern void omap_pm_setup_sr_i2c_pcb_length(u32 mm);
+#else
+static inline void omap_pm_setup_oscillator(u32 tstart, u32 tshut) { }
+static inline void omap_pm_get_oscillator(u32 *tstart, u32 *tshut) { *tstart = *tshut = 0; }
+static inline void omap_pm_setup_sr_i2c_pcb_length(u32 mm) { }
+#endif
+
#endif
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 13e1f430398..c333fa6dffa 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -25,7 +25,7 @@
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/irq.h>
#include <linux/time.h>
#include <linux/gpio.h>
@@ -38,7 +38,7 @@
#include <asm/mach-types.h>
#include <asm/system_misc.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "soc.h"
#include "common.h"
@@ -203,7 +203,7 @@ static int omap2_can_sleep(void)
{
if (omap2_fclks_active())
return 0;
- if (osc_ck->usecount > 1)
+ if (__clk_is_enabled(osc_ck))
return 0;
if (omap_dma_running())
return 0;
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 77032006142..7be3622cfc8 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -28,6 +28,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/omap-dma.h>
#include <linux/platform_data/gpio-omap.h>
#include <trace/events/power.h>
@@ -38,8 +39,6 @@
#include "clockdomain.h"
#include "powerdomain.h"
-#include <plat-omap/dma-omap.h>
-
#include "soc.h"
#include "common.h"
#include "cm3xxx.h"
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 7da75aed151..aa6fd98f606 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -101,13 +101,6 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
if (!strncmp(pwrdm->name, "cpu", 3))
return 0;
- /*
- * FIXME: Remove this check when core retention is supported
- * Only MPUSS power domain is added in the list.
- */
- if (strcmp(pwrdm->name, "mpu_pwrdm"))
- return 0;
-
pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
if (!pwrst)
return -ENOMEM;
diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c
index 250d909e38b..eb78ae7a346 100644
--- a/arch/arm/mach-omap2/pmu.c
+++ b/arch/arm/mach-omap2/pmu.c
@@ -11,8 +11,6 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/pm_runtime.h>
-
#include <asm/pmu.h>
#include "soc.h"
diff --git a/arch/arm/mach-omap2/prm-regbits-24xx.h b/arch/arm/mach-omap2/prm-regbits-24xx.h
index 638da6dd41c..91aa5106d63 100644
--- a/arch/arm/mach-omap2/prm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-24xx.h
@@ -107,12 +107,14 @@
#define OMAP2420_CLKOUT2_EN_MASK (1 << 15)
#define OMAP2420_CLKOUT2_DIV_SHIFT 11
#define OMAP2420_CLKOUT2_DIV_MASK (0x7 << 11)
+#define OMAP2420_CLKOUT2_DIV_WIDTH 3
#define OMAP2420_CLKOUT2_SOURCE_SHIFT 8
#define OMAP2420_CLKOUT2_SOURCE_MASK (0x3 << 8)
#define OMAP24XX_CLKOUT_EN_SHIFT 7
#define OMAP24XX_CLKOUT_EN_MASK (1 << 7)
#define OMAP24XX_CLKOUT_DIV_SHIFT 3
#define OMAP24XX_CLKOUT_DIV_MASK (0x7 << 3)
+#define OMAP24XX_CLKOUT_DIV_WIDTH 3
#define OMAP24XX_CLKOUT_SOURCE_SHIFT 0
#define OMAP24XX_CLKOUT_SOURCE_MASK (0x3 << 0)
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 838b594d4e1..b0a2142eeb9 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -384,6 +384,7 @@
/* PRM_CLKSEL */
#define OMAP3430_SYS_CLKIN_SEL_SHIFT 0
#define OMAP3430_SYS_CLKIN_SEL_MASK (0x7 << 0)
+#define OMAP3430_SYS_CLKIN_SEL_WIDTH 3
/* PRM_CLKOUT_CTRL */
#define OMAP3430_CLKOUT_EN_MASK (1 << 7)
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index a1a266ce90d..ac25ae6667c 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -114,16 +114,25 @@ struct prm_reset_src_map {
/**
* struct prm_ll_data - fn ptrs to per-SoC PRM function implementations
- * @read_reset_sources: ptr to the Soc PRM-specific get_reset_source impl
+ * @read_reset_sources: ptr to the SoC PRM-specific get_reset_source impl
+ * @was_any_context_lost_old: ptr to the SoC PRM context loss test fn
+ * @clear_context_loss_flags_old: ptr to the SoC PRM context loss flag clear fn
+ *
+ * XXX @was_any_context_lost_old and @clear_context_loss_flags_old are
+ * deprecated.
*/
struct prm_ll_data {
u32 (*read_reset_sources)(void);
+ bool (*was_any_context_lost_old)(u8 part, s16 inst, u16 idx);
+ void (*clear_context_loss_flags_old)(u8 part, s16 inst, u16 idx);
};
extern int prm_register(struct prm_ll_data *pld);
extern int prm_unregister(struct prm_ll_data *pld);
extern u32 prm_read_reset_sources(void);
+extern bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx);
+extern void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx);
#endif
diff --git a/arch/arm/mach-omap2/prm2xxx.c b/arch/arm/mach-omap2/prm2xxx.c
index bf24fc47603..cc0e71430af 100644
--- a/arch/arm/mach-omap2/prm2xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx.c
@@ -18,9 +18,8 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include "soc.h"
#include "common.h"
-#include <plat/cpu.h>
-
#include "vp.h"
#include "powerdomain.h"
#include "clockdomain.h"
@@ -118,14 +117,13 @@ static struct prm_ll_data omap2xxx_prm_ll_data = {
.read_reset_sources = &omap2xxx_prm_read_reset_sources,
};
-static int __init omap2xxx_prm_init(void)
+int __init omap2xxx_prm_init(void)
{
if (!cpu_is_omap24xx())
return 0;
return prm_register(&omap2xxx_prm_ll_data);
}
-subsys_initcall(omap2xxx_prm_init);
static void __exit omap2xxx_prm_exit(void)
{
diff --git a/arch/arm/mach-omap2/prm2xxx.h b/arch/arm/mach-omap2/prm2xxx.h
index fe8a14f190a..3194dd87e0e 100644
--- a/arch/arm/mach-omap2/prm2xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx.h
@@ -126,8 +126,7 @@ extern int omap2xxx_clkdm_wakeup(struct clockdomain *clkdm);
extern void omap2xxx_prm_dpll_reset(void);
-extern int __init prm2xxx_init(void);
-extern int __exit prm2xxx_exit(void);
+extern int __init omap2xxx_prm_init(void);
#endif
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
index 78532d6fecd..9624b40836d 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -152,6 +152,7 @@ extern int omap2_clkdm_clear_all_wkdeps(struct clockdomain *clkdm);
/* Named PRCM_CLKSRC_CTRL on the 24XX */
#define OMAP_SYSCLKDIV_SHIFT 6
#define OMAP_SYSCLKDIV_MASK (0x3 << 6)
+#define OMAP_SYSCLKDIV_WIDTH 2
#define OMAP_AUTOEXTCLKMODE_SHIFT 3
#define OMAP_AUTOEXTCLKMODE_MASK (0x3 << 3)
#define OMAP_SYSCLKSEL_SHIFT 0
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index b86116cf0db..39822aabcff 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -18,9 +18,8 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include "soc.h"
#include "common.h"
-#include <plat/cpu.h>
-
#include "vp.h"
#include "powerdomain.h"
#include "prm3xxx.h"
@@ -383,27 +382,30 @@ static struct prm_ll_data omap3xxx_prm_ll_data = {
.read_reset_sources = &omap3xxx_prm_read_reset_sources,
};
-static int __init omap3xxx_prm_init(void)
+int __init omap3xxx_prm_init(void)
+{
+ if (!cpu_is_omap34xx())
+ return 0;
+
+ return prm_register(&omap3xxx_prm_ll_data);
+}
+
+static int __init omap3xxx_prm_late_init(void)
{
int ret;
if (!cpu_is_omap34xx())
return 0;
- ret = prm_register(&omap3xxx_prm_ll_data);
- if (ret)
- return ret;
-
omap3xxx_prm_enable_io_wakeup();
ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
if (!ret)
irq_set_status_flags(omap_prcm_event_to_irq("io"),
IRQ_NOAUTOEN);
-
return ret;
}
-subsys_initcall(omap3xxx_prm_init);
+subsys_initcall(omap3xxx_prm_late_init);
static void __exit omap3xxx_prm_exit(void)
{
diff --git a/arch/arm/mach-omap2/prm3xxx.h b/arch/arm/mach-omap2/prm3xxx.h
index 10cd41a8129..277f71794e6 100644
--- a/arch/arm/mach-omap2/prm3xxx.h
+++ b/arch/arm/mach-omap2/prm3xxx.h
@@ -154,6 +154,7 @@ extern void omap3xxx_prm_restore_irqen(u32 *saved_mask);
extern void omap3xxx_prm_dpll3_reset(void);
+extern int __init omap3xxx_prm_init(void);
extern u32 omap3xxx_prm_get_reset_sources(void);
#endif /* __ASSEMBLER */
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 6d3467af205..7498bc77fe8 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -346,6 +346,37 @@ static u32 omap44xx_prm_read_reset_sources(void)
return r;
}
+/**
+ * omap44xx_prm_was_any_context_lost_old - was module hardware context lost?
+ * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
+ * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
+ * @idx: CONTEXT register offset
+ *
+ * Return 1 if any bits were set in the *_CONTEXT_* register
+ * identified by (@part, @inst, @idx), which means that some context
+ * was lost for that module; otherwise, return 0.
+ */
+static bool omap44xx_prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
+{
+ return (omap4_prminst_read_inst_reg(part, inst, idx)) ? 1 : 0;
+}
+
+/**
+ * omap44xx_prm_clear_context_lost_flags_old - clear context loss flags
+ * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
+ * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
+ * @idx: CONTEXT register offset
+ *
+ * Clear hardware context loss bits for the module identified by
+ * (@part, @inst, @idx). No return value. XXX Writes to reserved bits;
+ * is there a way to avoid this?
+ */
+static void omap44xx_prm_clear_context_loss_flags_old(u8 part, s16 inst,
+ u16 idx)
+{
+ omap4_prminst_write_inst_reg(0xffffffff, part, inst, idx);
+}
+
/* Powerdomain low-level functions */
static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
@@ -613,24 +644,28 @@ struct pwrdm_ops omap4_pwrdm_operations = {
*/
static struct prm_ll_data omap44xx_prm_ll_data = {
.read_reset_sources = &omap44xx_prm_read_reset_sources,
+ .was_any_context_lost_old = &omap44xx_prm_was_any_context_lost_old,
+ .clear_context_loss_flags_old = &omap44xx_prm_clear_context_loss_flags_old,
};
-static int __init omap44xx_prm_init(void)
+int __init omap44xx_prm_init(void)
{
- int ret;
-
if (!cpu_is_omap44xx())
return 0;
- ret = prm_register(&omap44xx_prm_ll_data);
- if (ret)
- return ret;
+ return prm_register(&omap44xx_prm_ll_data);
+}
+
+static int __init omap44xx_prm_late_init(void)
+{
+ if (!cpu_is_omap44xx())
+ return 0;
omap44xx_prm_enable_io_wakeup();
return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup);
}
-subsys_initcall(omap44xx_prm_init);
+subsys_initcall(omap44xx_prm_late_init);
static void __exit omap44xx_prm_exit(void)
{
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index c8e1accdc90..22b0979206c 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -771,6 +771,7 @@ extern void omap44xx_prm_ocp_barrier(void);
extern void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask);
extern void omap44xx_prm_restore_irqen(u32 *saved_mask);
+extern int __init omap44xx_prm_init(void);
extern u32 omap44xx_prm_get_reset_sources(void);
# endif
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index f596e1e91ff..228b850e632 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -365,6 +365,51 @@ u32 prm_read_reset_sources(void)
}
/**
+ * prm_was_any_context_lost_old - was device context lost? (old API)
+ * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
+ * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
+ * @idx: CONTEXT register offset
+ *
+ * Return 1 if any bits were set in the *_CONTEXT_* register
+ * identified by (@part, @inst, @idx), which means that some context
+ * was lost for that module; otherwise, return 0. XXX Deprecated;
+ * callers need to use a less-SoC-dependent way to identify hardware
+ * IP blocks.
+ */
+bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
+{
+ bool ret = true;
+
+ if (prm_ll_data->was_any_context_lost_old)
+ ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
+ else
+ WARN_ONCE(1, "prm: %s: no mapping function defined\n",
+ __func__);
+
+ return ret;
+}
+
+/**
+ * prm_clear_context_lost_flags_old - clear context loss flags (old API)
+ * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
+ * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
+ * @idx: CONTEXT register offset
+ *
+ * Clear hardware context loss bits for the module identified by
+ * (@part, @inst, @idx). No return value. XXX Deprecated; callers
+ * need to use a less-SoC-dependent way to identify hardware IP
+ * blocks.
+ */
+void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
+{
+ if (prm_ll_data->clear_context_loss_flags_old)
+ prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
+ else
+ WARN_ONCE(1, "prm: %s: no mapping function defined\n",
+ __func__);
+}
+
+/**
* prm_register - register per-SoC low-level data with the PRM
* @pld: low-level per-SoC OMAP PRM data & function pointers to register
*
diff --git a/arch/arm/mach-omap2/scrm44xx.h b/arch/arm/mach-omap2/scrm44xx.h
index 701bf2d3294..e897ac89a3f 100644
--- a/arch/arm/mach-omap2/scrm44xx.h
+++ b/arch/arm/mach-omap2/scrm44xx.h
@@ -127,12 +127,14 @@
/* AUXCLKREQ0 */
#define OMAP4_MAPPING_SHIFT 2
#define OMAP4_MAPPING_MASK (0x7 << 2)
+#define OMAP4_MAPPING_WIDTH 3
#define OMAP4_ACCURACY_SHIFT 1
#define OMAP4_ACCURACY_MASK (1 << 1)
/* AUXCLK0 */
#define OMAP4_CLKDIV_SHIFT 16
#define OMAP4_CLKDIV_MASK (0xf << 16)
+#define OMAP4_CLKDIV_WIDTH 4
#define OMAP4_DISABLECLK_SHIFT 9
#define OMAP4_DISABLECLK_MASK (1 << 9)
#define OMAP4_ENABLE_SHIFT 8
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index aa30a3c2088..04fdbc4c499 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -26,9 +26,8 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/console.h>
-
-#include <plat/omap-serial.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_data/serial-omap.h>
#include "common.h"
#include "omap_hwmod.h"
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index b0e77a40704..b9753fe2723 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -121,6 +121,19 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
sr_data->senn_mod = 0x1;
sr_data->senp_mod = 0x1;
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ sr_data->err_weight = OMAP3430_SR_ERRWEIGHT;
+ sr_data->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT;
+ sr_data->accum_data = OMAP3430_SR_ACCUMDATA;
+ if (!(strcmp(sr_data->name, "smartreflex_mpu"))) {
+ sr_data->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT;
+ sr_data->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT;
+ } else {
+ sr_data->senn_avgweight = OMAP3430_SR2_SENNAVGWEIGHT;
+ sr_data->senp_avgweight = OMAP3430_SR2_SENPAVGWEIGHT;
+ }
+ }
+
sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
if (!sr_data->voltdm) {
pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 7016637b531..691aa674665 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -175,7 +175,7 @@ static struct device_node * __init omap_get_timer_dt(struct of_device_id *match,
continue;
}
- prom_add_property(np, &device_disabled);
+ of_add_property(np, &device_disabled);
return np;
}
@@ -190,7 +190,7 @@ static struct device_node * __init omap_get_timer_dt(struct of_device_id *match,
* kernel registering these devices remove them dynamically from the device
* tree on boot.
*/
-void __init omap_dmtimer_init(void)
+static void __init omap_dmtimer_init(void)
{
struct device_node *np;
@@ -210,7 +210,7 @@ void __init omap_dmtimer_init(void)
*
* Get the timer errata flags that are specific to the OMAP device being used.
*/
-u32 __init omap_dm_timer_get_errata(void)
+static u32 __init omap_dm_timer_get_errata(void)
{
if (cpu_is_omap24xx())
return 0;
@@ -392,7 +392,7 @@ static struct of_device_id omap_counter_match[] __initdata = {
};
/* Setup free-running counter for clocksource */
-static int __init omap2_sync32k_clocksource_init(void)
+static int __init __maybe_unused omap2_sync32k_clocksource_init(void)
{
int ret;
struct device_node *np = NULL;
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c
index d1dbe125b34..2e44e8a2288 100644
--- a/arch/arm/mach-omap2/usb-host.c
+++ b/arch/arm/mach-omap2/usb-host.c
@@ -508,6 +508,10 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
if (cpu_is_omap34xx()) {
setup_ehci_io_mux(pdata->port_mode);
setup_ohci_io_mux(pdata->port_mode);
+
+ if (omap_rev() <= OMAP3430_REV_ES2_1)
+ usbhs_data.single_ulpi_bypass = true;
+
} else if (cpu_is_omap44xx()) {
setup_4430ehci_io_mux(pdata->port_mode);
setup_4430ohci_io_mux(pdata->port_mode);
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index a8795ff19e6..c5a3c6f9504 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -27,180 +27,88 @@ static u8 async_cs, sync_cs;
static unsigned refclk_psec;
-/* t2_ps, when quantized to fclk units, must happen no earlier than
- * the clock after after t1_NS.
- *
- * Return a possibly updated value of t2_ps, converted to nsec.
- */
-static unsigned
-next_clk(unsigned t1_NS, unsigned t2_ps, unsigned fclk_ps)
-{
- unsigned t1_ps = t1_NS * 1000;
- unsigned t1_f, t2_f;
-
- if ((t1_ps + fclk_ps) < t2_ps)
- return t2_ps / 1000;
-
- t1_f = (t1_ps + fclk_ps - 1) / fclk_ps;
- t2_f = (t2_ps + fclk_ps - 1) / fclk_ps;
-
- if (t1_f >= t2_f)
- t2_f = t1_f + 1;
-
- return (t2_f * fclk_ps) / 1000;
-}
-
/* NOTE: timings are from tusb 6010 datasheet Rev 1.8, 12-Sept 2006 */
-static int tusb_set_async_mode(unsigned sysclk_ps, unsigned fclk_ps)
+static int tusb_set_async_mode(unsigned sysclk_ps)
{
+ struct gpmc_device_timings dev_t;
struct gpmc_timings t;
unsigned t_acsnh_advnh = sysclk_ps + 3000;
- unsigned tmp;
-
- memset(&t, 0, sizeof(t));
-
- /* CS_ON = t_acsnh_acsnl */
- t.cs_on = 8;
- /* ADV_ON = t_acsnh_advnh - t_advn */
- t.adv_on = next_clk(t.cs_on, t_acsnh_advnh - 7000, fclk_ps);
-
- /*
- * READ ... from omap2420 TRM fig 12-13
- */
-
- /* ADV_RD_OFF = t_acsnh_advnh */
- t.adv_rd_off = next_clk(t.adv_on, t_acsnh_advnh, fclk_ps);
-
- /* OE_ON = t_acsnh_advnh + t_advn_oen (then wait for nRDY) */
- t.oe_on = next_clk(t.adv_on, t_acsnh_advnh + 1000, fclk_ps);
-
- /* ACCESS = counters continue only after nRDY */
- tmp = t.oe_on * 1000 + 300;
- t.access = next_clk(t.oe_on, tmp, fclk_ps);
-
- /* OE_OFF = after data gets sampled */
- tmp = t.access * 1000;
- t.oe_off = next_clk(t.access, tmp, fclk_ps);
-
- t.cs_rd_off = t.oe_off;
-
- tmp = t.cs_rd_off * 1000 + 7000 /* t_acsn_rdy_z */;
- t.rd_cycle = next_clk(t.cs_rd_off, tmp, fclk_ps);
-
- /*
- * WRITE ... from omap2420 TRM fig 12-15
- */
-
- /* ADV_WR_OFF = t_acsnh_advnh */
- t.adv_wr_off = t.adv_rd_off;
- /* WE_ON = t_acsnh_advnh + t_advn_wen (then wait for nRDY) */
- t.we_on = next_clk(t.adv_wr_off, t_acsnh_advnh + 1000, fclk_ps);
+ memset(&dev_t, 0, sizeof(dev_t));
- /* WE_OFF = after data gets sampled */
- tmp = t.we_on * 1000 + 300;
- t.we_off = next_clk(t.we_on, tmp, fclk_ps);
+ dev_t.mux = true;
- t.cs_wr_off = t.we_off;
+ dev_t.t_ceasu = 8 * 1000;
+ dev_t.t_avdasu = t_acsnh_advnh - 7000;
+ dev_t.t_ce_avd = 1000;
+ dev_t.t_avdp_r = t_acsnh_advnh;
+ dev_t.t_oeasu = t_acsnh_advnh + 1000;
+ dev_t.t_oe = 300;
+ dev_t.t_cez_r = 7000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.t_avdp_w = t_acsnh_advnh;
+ dev_t.t_weasu = t_acsnh_advnh + 1000;
+ dev_t.t_wpl = 300;
+ dev_t.cyc_aavdh_we = 1;
- tmp = t.cs_wr_off * 1000 + 7000 /* t_acsn_rdy_z */;
- t.wr_cycle = next_clk(t.cs_wr_off, tmp, fclk_ps);
+ gpmc_calc_timings(&t, &dev_t);
return gpmc_cs_set_timings(async_cs, &t);
}
-static int tusb_set_sync_mode(unsigned sysclk_ps, unsigned fclk_ps)
+static int tusb_set_sync_mode(unsigned sysclk_ps)
{
+ struct gpmc_device_timings dev_t;
struct gpmc_timings t;
unsigned t_scsnh_advnh = sysclk_ps + 3000;
- unsigned tmp;
-
- memset(&t, 0, sizeof(t));
- t.cs_on = 8;
-
- /* ADV_ON = t_acsnh_advnh - t_advn */
- t.adv_on = next_clk(t.cs_on, t_scsnh_advnh - 7000, fclk_ps);
-
- /* GPMC_CLK rate = fclk rate / div */
- t.sync_clk = 11100 /* 11.1 nsec */;
- tmp = (t.sync_clk + fclk_ps - 1) / fclk_ps;
- if (tmp > 4)
- return -ERANGE;
- if (tmp == 0)
- tmp = 1;
- t.page_burst_access = (fclk_ps * tmp) / 1000;
-
- /*
- * READ ... based on omap2420 TRM fig 12-19, 12-20
- */
-
- /* ADV_RD_OFF = t_scsnh_advnh */
- t.adv_rd_off = next_clk(t.adv_on, t_scsnh_advnh, fclk_ps);
-
- /* OE_ON = t_scsnh_advnh + t_advn_oen * fclk_ps (then wait for nRDY) */
- tmp = (t.adv_rd_off * 1000) + (3 * fclk_ps);
- t.oe_on = next_clk(t.adv_on, tmp, fclk_ps);
-
- /* ACCESS = number of clock cycles after t_adv_eon */
- tmp = (t.oe_on * 1000) + (5 * fclk_ps);
- t.access = next_clk(t.oe_on, tmp, fclk_ps);
- /* OE_OFF = after data gets sampled */
- tmp = (t.access * 1000) + (1 * fclk_ps);
- t.oe_off = next_clk(t.access, tmp, fclk_ps);
-
- t.cs_rd_off = t.oe_off;
-
- tmp = t.cs_rd_off * 1000 + 7000 /* t_scsn_rdy_z */;
- t.rd_cycle = next_clk(t.cs_rd_off, tmp, fclk_ps);
-
- /*
- * WRITE ... based on omap2420 TRM fig 12-21
- */
-
- /* ADV_WR_OFF = t_scsnh_advnh */
- t.adv_wr_off = t.adv_rd_off;
-
- /* WE_ON = t_scsnh_advnh + t_advn_wen * fclk_ps (then wait for nRDY) */
- tmp = (t.adv_wr_off * 1000) + (3 * fclk_ps);
- t.we_on = next_clk(t.adv_wr_off, tmp, fclk_ps);
-
- /* WE_OFF = number of clock cycles after t_adv_wen */
- tmp = (t.we_on * 1000) + (6 * fclk_ps);
- t.we_off = next_clk(t.we_on, tmp, fclk_ps);
-
- t.cs_wr_off = t.we_off;
-
- tmp = t.cs_wr_off * 1000 + 7000 /* t_scsn_rdy_z */;
- t.wr_cycle = next_clk(t.cs_wr_off, tmp, fclk_ps);
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ dev_t.mux = true;
+ dev_t.sync_read = true;
+ dev_t.sync_write = true;
+
+ dev_t.clk = 11100;
+ dev_t.t_bacc = 1000;
+ dev_t.t_ces = 1000;
+ dev_t.t_ceasu = 8 * 1000;
+ dev_t.t_avdasu = t_scsnh_advnh - 7000;
+ dev_t.t_ce_avd = 1000;
+ dev_t.t_avdp_r = t_scsnh_advnh;
+ dev_t.cyc_aavdh_oe = 3;
+ dev_t.cyc_oe = 5;
+ dev_t.t_ce_rdyz = 7000;
+ dev_t.t_avdp_w = t_scsnh_advnh;
+ dev_t.cyc_aavdh_we = 3;
+ dev_t.cyc_wpl = 6;
+ dev_t.t_ce_rdyz = 7000;
+
+ gpmc_calc_timings(&t, &dev_t);
return gpmc_cs_set_timings(sync_cs, &t);
}
-extern unsigned long gpmc_get_fclk_period(void);
-
/* tusb driver calls this when it changes the chip's clocking */
int tusb6010_platform_retime(unsigned is_refclk)
{
static const char error[] =
KERN_ERR "tusb6010 %s retime error %d\n";
- unsigned fclk_ps = gpmc_get_fclk_period();
unsigned sysclk_ps;
int status;
- if (!refclk_psec || fclk_ps == 0)
+ if (!refclk_psec)
return -ENODEV;
sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60;
- status = tusb_set_async_mode(sysclk_ps, fclk_ps);
+ status = tusb_set_async_mode(sysclk_ps);
if (status < 0) {
printk(error, "async", status);
goto done;
}
- status = tusb_set_sync_mode(sysclk_ps, fclk_ps);
+ status = tusb_set_sync_mode(sysclk_ps);
if (status < 0)
printk(error, "sync", status);
done:
@@ -284,7 +192,6 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
| GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITEMULTIPLE_SUPP
| GPMC_CONFIG1_WRITETYPE_SYNC
- | GPMC_CONFIG1_CLKACTIVATIONTIME(1)
| GPMC_CONFIG1_PAGE_LEN(2)
| GPMC_CONFIG1_WAIT_READ_MON
| GPMC_CONFIG1_WAIT_WRITE_MON
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index 75878c37959..49ac7977e03 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -11,13 +11,20 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/bug.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+
+#include "iomap.h"
#include "soc.h"
#include "voltage.h"
#include "vc.h"
#include "prm-regbits-34xx.h"
#include "prm-regbits-44xx.h"
#include "prm44xx.h"
+#include "pm.h"
+#include "scrm44xx.h"
+#include "control.h"
/**
* struct omap_vc_channel_cfg - describe the cfg_channel bitfield
@@ -63,6 +70,9 @@ static struct omap_vc_channel_cfg vc_mutant_channel_cfg = {
};
static struct omap_vc_channel_cfg *vc_cfg_bits;
+
+/* Default I2C trace length on pcb, 6.3cm. Used for capacitance calculations. */
+static u32 sr_i2c_pcb_length = 63;
#define CFG_CHANNEL_MASK 0x1f
/**
@@ -135,6 +145,8 @@ int omap_vc_pre_scale(struct voltagedomain *voltdm,
vc_cmdval |= (*target_vsel << vc->common->cmd_on_shift);
voltdm->write(vc_cmdval, vc->cmdval_reg);
+ voltdm->vc_param->on = target_volt;
+
omap_vp_update_errorgain(voltdm, target_volt);
return 0;
@@ -202,46 +214,389 @@ int omap_vc_bypass_scale(struct voltagedomain *voltdm,
return 0;
}
-static void __init omap3_vfsm_init(struct voltagedomain *voltdm)
+/* Convert microsecond value to number of 32kHz clock cycles */
+static inline u32 omap_usec_to_32k(u32 usec)
+{
+ return DIV_ROUND_UP_ULL(32768ULL * (u64)usec, 1000000ULL);
+}
+
+/* Set oscillator setup time for omap3 */
+static void omap3_set_clksetup(u32 usec, struct voltagedomain *voltdm)
+{
+ voltdm->write(omap_usec_to_32k(usec), OMAP3_PRM_CLKSETUP_OFFSET);
+}
+
+/**
+ * omap3_set_i2c_timings - sets i2c sleep timings for a channel
+ * @voltdm: channel to configure
+ * @off_mode: select whether retention or off mode values used
+ *
+ * Calculates and sets up voltage controller to use I2C based
+ * voltage scaling for sleep modes. This can be used for either off mode
+ * or retention. Off mode has additionally an option to use sys_off_mode
+ * pad, which uses a global signal to program the whole power IC to
+ * off-mode.
+ */
+static void omap3_set_i2c_timings(struct voltagedomain *voltdm, bool off_mode)
{
+ unsigned long voltsetup1;
+ u32 tgt_volt;
+
+ /*
+ * Oscillator is shut down only if we are using sys_off_mode pad,
+ * thus we set a minimal setup time here
+ */
+ omap3_set_clksetup(1, voltdm);
+
+ if (off_mode)
+ tgt_volt = voltdm->vc_param->off;
+ else
+ tgt_volt = voltdm->vc_param->ret;
+
+ voltsetup1 = (voltdm->vc_param->on - tgt_volt) /
+ voltdm->pmic->slew_rate;
+
+ voltsetup1 = voltsetup1 * voltdm->sys_clk.rate / 8 / 1000000 + 1;
+
+ voltdm->rmw(voltdm->vfsm->voltsetup_mask,
+ voltsetup1 << __ffs(voltdm->vfsm->voltsetup_mask),
+ voltdm->vfsm->voltsetup_reg);
+
/*
- * Voltage Manager FSM parameters init
- * XXX This data should be passed in from the board file
+ * pmic is not controlling the voltage scaling during retention,
+ * thus set voltsetup2 to 0
*/
- voltdm->write(OMAP3_CLKSETUP, OMAP3_PRM_CLKSETUP_OFFSET);
- voltdm->write(OMAP3_VOLTOFFSET, OMAP3_PRM_VOLTOFFSET_OFFSET);
- voltdm->write(OMAP3_VOLTSETUP2, OMAP3_PRM_VOLTSETUP2_OFFSET);
+ voltdm->write(0, OMAP3_PRM_VOLTSETUP2_OFFSET);
}
-static void __init omap3_vc_init_channel(struct voltagedomain *voltdm)
+/**
+ * omap3_set_off_timings - sets off-mode timings for a channel
+ * @voltdm: channel to configure
+ *
+ * Calculates and sets up off-mode timings for a channel. Off-mode
+ * can use either I2C based voltage scaling, or alternatively
+ * sys_off_mode pad can be used to send a global command to power IC.
+ * This function first checks which mode is being used, and calls
+ * omap3_set_i2c_timings() if the system is using I2C control mode.
+ * sys_off_mode has the additional benefit that voltages can be
+ * scaled to zero volt level with TWL4030 / TWL5030, I2C can only
+ * scale to 600mV.
+ */
+static void omap3_set_off_timings(struct voltagedomain *voltdm)
{
- static bool is_initialized;
+ unsigned long clksetup;
+ unsigned long voltsetup2;
+ unsigned long voltsetup2_old;
+ u32 val;
+ u32 tstart, tshut;
- if (is_initialized)
+ /* check if sys_off_mode is used to control off-mode voltages */
+ val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
+ if (!(val & OMAP3430_SEL_OFF_MASK)) {
+ /* No, omap is controlling them over I2C */
+ omap3_set_i2c_timings(voltdm, true);
return;
+ }
+
+ omap_pm_get_oscillator(&tstart, &tshut);
+ omap3_set_clksetup(tstart, voltdm);
+
+ clksetup = voltdm->read(OMAP3_PRM_CLKSETUP_OFFSET);
+
+ /* voltsetup 2 in us */
+ voltsetup2 = voltdm->vc_param->on / voltdm->pmic->slew_rate;
+
+ /* convert to 32k clk cycles */
+ voltsetup2 = DIV_ROUND_UP(voltsetup2 * 32768, 1000000);
+
+ voltsetup2_old = voltdm->read(OMAP3_PRM_VOLTSETUP2_OFFSET);
+
+ /*
+ * Update voltsetup2 if higher than current value (needed because
+ * we have multiple channels with different ramp times), also
+ * update voltoffset always to value recommended by TRM
+ */
+ if (voltsetup2 > voltsetup2_old) {
+ voltdm->write(voltsetup2, OMAP3_PRM_VOLTSETUP2_OFFSET);
+ voltdm->write(clksetup - voltsetup2,
+ OMAP3_PRM_VOLTOFFSET_OFFSET);
+ } else
+ voltdm->write(clksetup - voltsetup2_old,
+ OMAP3_PRM_VOLTOFFSET_OFFSET);
+
+ /*
+ * omap is not controlling voltage scaling during off-mode,
+ * thus set voltsetup1 to 0
+ */
+ voltdm->rmw(voltdm->vfsm->voltsetup_mask, 0,
+ voltdm->vfsm->voltsetup_reg);
+
+ /* voltoffset must be clksetup minus voltsetup2 according to TRM */
+ voltdm->write(clksetup - voltsetup2, OMAP3_PRM_VOLTOFFSET_OFFSET);
+}
+
+static void __init omap3_vc_init_channel(struct voltagedomain *voltdm)
+{
+ omap3_set_off_timings(voltdm);
+}
+
+/**
+ * omap4_calc_volt_ramp - calculates voltage ramping delays on omap4
+ * @voltdm: channel to calculate values for
+ * @voltage_diff: voltage difference in microvolts
+ *
+ * Calculates voltage ramp prescaler + counter values for a voltage
+ * difference on omap4. Returns a field value suitable for writing to
+ * VOLTSETUP register for a channel in following format:
+ * bits[8:9] prescaler ... bits[0:5] counter. See OMAP4 TRM for reference.
+ */
+static u32 omap4_calc_volt_ramp(struct voltagedomain *voltdm, u32 voltage_diff)
+{
+ u32 prescaler;
+ u32 cycles;
+ u32 time;
+
+ time = voltage_diff / voltdm->pmic->slew_rate;
+
+ cycles = voltdm->sys_clk.rate / 1000 * time / 1000;
+
+ cycles /= 64;
+ prescaler = 0;
+
+ /* shift to next prescaler until no overflow */
+
+ /* scale for div 256 = 64 * 4 */
+ if (cycles > 63) {
+ cycles /= 4;
+ prescaler++;
+ }
+
+ /* scale for div 512 = 256 * 2 */
+ if (cycles > 63) {
+ cycles /= 2;
+ prescaler++;
+ }
+
+ /* scale for div 2048 = 512 * 4 */
+ if (cycles > 63) {
+ cycles /= 4;
+ prescaler++;
+ }
+
+ /* check for overflow => invalid ramp time */
+ if (cycles > 63) {
+ pr_warn("%s: invalid setuptime for vdd_%s\n", __func__,
+ voltdm->name);
+ return 0;
+ }
+
+ cycles++;
- omap3_vfsm_init(voltdm);
+ return (prescaler << OMAP4430_RAMP_UP_PRESCAL_SHIFT) |
+ (cycles << OMAP4430_RAMP_UP_COUNT_SHIFT);
+}
+
+/**
+ * omap4_usec_to_val_scrm - convert microsecond value to SCRM module bitfield
+ * @usec: microseconds
+ * @shift: number of bits to shift left
+ * @mask: bitfield mask
+ *
+ * Converts microsecond value to OMAP4 SCRM bitfield. Bitfield is
+ * shifted to requested position, and checked agains the mask value.
+ * If larger, forced to the max value of the field (i.e. the mask itself.)
+ * Returns the SCRM bitfield value.
+ */
+static u32 omap4_usec_to_val_scrm(u32 usec, int shift, u32 mask)
+{
+ u32 val;
+
+ val = omap_usec_to_32k(usec) << shift;
- is_initialized = true;
+ /* Check for overflow, if yes, force to max value */
+ if (val > mask)
+ val = mask;
+
+ return val;
}
+/**
+ * omap4_set_timings - set voltage ramp timings for a channel
+ * @voltdm: channel to configure
+ * @off_mode: whether off-mode values are used
+ *
+ * Calculates and sets the voltage ramp up / down values for a channel.
+ */
+static void omap4_set_timings(struct voltagedomain *voltdm, bool off_mode)
+{
+ u32 val;
+ u32 ramp;
+ int offset;
+ u32 tstart, tshut;
+
+ if (off_mode) {
+ ramp = omap4_calc_volt_ramp(voltdm,
+ voltdm->vc_param->on - voltdm->vc_param->off);
+ offset = voltdm->vfsm->voltsetup_off_reg;
+ } else {
+ ramp = omap4_calc_volt_ramp(voltdm,
+ voltdm->vc_param->on - voltdm->vc_param->ret);
+ offset = voltdm->vfsm->voltsetup_reg;
+ }
+
+ if (!ramp)
+ return;
+
+ val = voltdm->read(offset);
+
+ val |= ramp << OMAP4430_RAMP_DOWN_COUNT_SHIFT;
+
+ val |= ramp << OMAP4430_RAMP_UP_COUNT_SHIFT;
+
+ voltdm->write(val, offset);
+
+ omap_pm_get_oscillator(&tstart, &tshut);
+
+ val = omap4_usec_to_val_scrm(tstart, OMAP4_SETUPTIME_SHIFT,
+ OMAP4_SETUPTIME_MASK);
+ val |= omap4_usec_to_val_scrm(tshut, OMAP4_DOWNTIME_SHIFT,
+ OMAP4_DOWNTIME_MASK);
+
+ __raw_writel(val, OMAP4_SCRM_CLKSETUPTIME);
+}
/* OMAP4 specific voltage init functions */
static void __init omap4_vc_init_channel(struct voltagedomain *voltdm)
{
- static bool is_initialized;
- u32 vc_val;
+ omap4_set_timings(voltdm, true);
+ omap4_set_timings(voltdm, false);
+}
+
+struct i2c_init_data {
+ u8 loadbits;
+ u8 load;
+ u8 hsscll_38_4;
+ u8 hsscll_26;
+ u8 hsscll_19_2;
+ u8 hsscll_16_8;
+ u8 hsscll_12;
+};
- if (is_initialized)
+static const __initdata struct i2c_init_data omap4_i2c_timing_data[] = {
+ {
+ .load = 50,
+ .loadbits = 0x3,
+ .hsscll_38_4 = 13,
+ .hsscll_26 = 11,
+ .hsscll_19_2 = 9,
+ .hsscll_16_8 = 9,
+ .hsscll_12 = 8,
+ },
+ {
+ .load = 25,
+ .loadbits = 0x2,
+ .hsscll_38_4 = 13,
+ .hsscll_26 = 11,
+ .hsscll_19_2 = 9,
+ .hsscll_16_8 = 9,
+ .hsscll_12 = 8,
+ },
+ {
+ .load = 12,
+ .loadbits = 0x1,
+ .hsscll_38_4 = 11,
+ .hsscll_26 = 10,
+ .hsscll_19_2 = 9,
+ .hsscll_16_8 = 9,
+ .hsscll_12 = 8,
+ },
+ {
+ .load = 0,
+ .loadbits = 0x0,
+ .hsscll_38_4 = 12,
+ .hsscll_26 = 10,
+ .hsscll_19_2 = 9,
+ .hsscll_16_8 = 8,
+ .hsscll_12 = 8,
+ },
+};
+
+/**
+ * omap4_vc_i2c_timing_init - sets up board I2C timing parameters
+ * @voltdm: voltagedomain pointer to get data from
+ *
+ * Use PMIC + board supplied settings for calculating the total I2C
+ * channel capacitance and set the timing parameters based on this.
+ * Pre-calculated values are provided in data tables, as it is not
+ * too straightforward to calculate these runtime.
+ */
+static void __init omap4_vc_i2c_timing_init(struct voltagedomain *voltdm)
+{
+ u32 capacitance;
+ u32 val;
+ u16 hsscll;
+ const struct i2c_init_data *i2c_data;
+
+ if (!voltdm->pmic->i2c_high_speed) {
+ pr_warn("%s: only high speed supported!\n", __func__);
return;
+ }
+
+ /* PCB trace capacitance, 0.125pF / mm => mm / 8 */
+ capacitance = DIV_ROUND_UP(sr_i2c_pcb_length, 8);
+
+ /* OMAP pad capacitance */
+ capacitance += 4;
+
+ /* PMIC pad capacitance */
+ capacitance += voltdm->pmic->i2c_pad_load;
+
+ /* Search for capacitance match in the table */
+ i2c_data = omap4_i2c_timing_data;
+
+ while (i2c_data->load > capacitance)
+ i2c_data++;
+
+ /* Select proper values based on sysclk frequency */
+ switch (voltdm->sys_clk.rate) {
+ case 38400000:
+ hsscll = i2c_data->hsscll_38_4;
+ break;
+ case 26000000:
+ hsscll = i2c_data->hsscll_26;
+ break;
+ case 19200000:
+ hsscll = i2c_data->hsscll_19_2;
+ break;
+ case 16800000:
+ hsscll = i2c_data->hsscll_16_8;
+ break;
+ case 12000000:
+ hsscll = i2c_data->hsscll_12;
+ break;
+ default:
+ pr_warn("%s: unsupported sysclk rate: %d!\n", __func__,
+ voltdm->sys_clk.rate);
+ return;
+ }
- /* XXX These are magic numbers and do not belong! */
- vc_val = (0x60 << OMAP4430_SCLL_SHIFT | 0x26 << OMAP4430_SCLH_SHIFT);
- voltdm->write(vc_val, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET);
+ /* Loadbits define pull setup for the I2C channels */
+ val = i2c_data->loadbits << 25 | i2c_data->loadbits << 29;
- is_initialized = true;
+ /* Write to SYSCTRL_PADCONF_WKUP_CTRL_I2C_2 to setup I2C pull */
+ __raw_writel(val, OMAP2_L4_IO_ADDRESS(OMAP4_CTRL_MODULE_PAD_WKUP +
+ OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_I2C_2));
+
+ /* HSSCLH can always be zero */
+ val = hsscll << OMAP4430_HSSCLL_SHIFT;
+ val |= (0x28 << OMAP4430_SCLL_SHIFT | 0x2c << OMAP4430_SCLH_SHIFT);
+
+ /* Write setup times to I2C config register */
+ voltdm->write(val, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET);
}
+
+
/**
* omap_vc_i2c_init - initialize I2C interface to PMIC
* @voltdm: voltage domain containing VC data
@@ -281,9 +636,51 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
mcode << __ffs(vc->common->i2c_mcode_mask),
vc->common->i2c_cfg_reg);
+ if (cpu_is_omap44xx())
+ omap4_vc_i2c_timing_init(voltdm);
+
initialized = true;
}
+/**
+ * omap_vc_calc_vsel - calculate vsel value for a channel
+ * @voltdm: channel to calculate value for
+ * @uvolt: microvolt value to convert to vsel
+ *
+ * Converts a microvolt value to vsel value for the used PMIC.
+ * This checks whether the microvolt value is out of bounds, and
+ * adjusts the value accordingly. If unsupported value detected,
+ * warning is thrown.
+ */
+static u8 omap_vc_calc_vsel(struct voltagedomain *voltdm, u32 uvolt)
+{
+ if (voltdm->pmic->vddmin > uvolt)
+ uvolt = voltdm->pmic->vddmin;
+ if (voltdm->pmic->vddmax < uvolt) {
+ WARN(1, "%s: voltage not supported by pmic: %u vs max %u\n",
+ __func__, uvolt, voltdm->pmic->vddmax);
+ /* Lets try maximum value anyway */
+ uvolt = voltdm->pmic->vddmax;
+ }
+
+ return voltdm->pmic->uv_to_vsel(uvolt);
+}
+
+#ifdef CONFIG_PM
+/**
+ * omap_pm_setup_sr_i2c_pcb_length - set length of SR I2C traces on PCB
+ * @mm: length of the PCB trace in millimetres
+ *
+ * Sets the PCB trace length for the I2C channel. By default uses 63mm.
+ * This is needed for properly calculating the capacitance value for
+ * the PCB trace, and for setting the SR I2C channel timing parameters.
+ */
+void __init omap_pm_setup_sr_i2c_pcb_length(u32 mm)
+{
+ sr_i2c_pcb_length = mm;
+}
+#endif
+
void __init omap_vc_init_channel(struct voltagedomain *voltdm)
{
struct omap_vc_channel *vc = voltdm->vc;
@@ -311,7 +708,6 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm)
vc->i2c_slave_addr = voltdm->pmic->i2c_slave_addr;
vc->volt_reg_addr = voltdm->pmic->volt_reg_addr;
vc->cmd_reg_addr = voltdm->pmic->cmd_reg_addr;
- vc->setup_time = voltdm->pmic->volt_setup_time;
/* Configure the i2c slave address for this VC */
voltdm->rmw(vc->smps_sa_mask,
@@ -331,14 +727,18 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm)
voltdm->rmw(vc->smps_cmdra_mask,
vc->cmd_reg_addr << __ffs(vc->smps_cmdra_mask),
vc->smps_cmdra_reg);
- vc->cfg_channel |= vc_cfg_bits->rac | vc_cfg_bits->racen;
+ vc->cfg_channel |= vc_cfg_bits->rac;
}
+ if (vc->cmd_reg_addr == vc->volt_reg_addr)
+ vc->cfg_channel |= vc_cfg_bits->racen;
+
/* Set up the on, inactive, retention and off voltage */
- on_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->on_volt);
- onlp_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->onlp_volt);
- ret_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->ret_volt);
- off_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->off_volt);
+ on_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->on);
+ onlp_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->onlp);
+ ret_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->ret);
+ off_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->off);
+
val = ((on_vsel << vc->common->cmd_on_shift) |
(onlp_vsel << vc->common->cmd_onlp_shift) |
(ret_vsel << vc->common->cmd_ret_shift) |
@@ -349,11 +749,6 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm)
/* Channel configuration */
omap_vc_config_channel(voltdm);
- /* Configure the setup times */
- voltdm->rmw(voltdm->vfsm->voltsetup_mask,
- vc->setup_time << __ffs(voltdm->vfsm->voltsetup_mask),
- voltdm->vfsm->voltsetup_reg);
-
omap_vc_i2c_init(voltdm);
if (cpu_is_omap34xx())
diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
index 478bf6b432c..91c8d75bf2e 100644
--- a/arch/arm/mach-omap2/vc.h
+++ b/arch/arm/mach-omap2/vc.h
@@ -86,7 +86,6 @@ struct omap_vc_channel {
u16 i2c_slave_addr;
u16 volt_reg_addr;
u16 cmd_reg_addr;
- u16 setup_time;
u8 cfg_channel;
bool i2c_high_speed;
@@ -111,6 +110,13 @@ extern struct omap_vc_channel omap4_vc_mpu;
extern struct omap_vc_channel omap4_vc_iva;
extern struct omap_vc_channel omap4_vc_core;
+extern struct omap_vc_param omap3_mpu_vc_data;
+extern struct omap_vc_param omap3_core_vc_data;
+
+extern struct omap_vc_param omap4_mpu_vc_data;
+extern struct omap_vc_param omap4_iva_vc_data;
+extern struct omap_vc_param omap4_core_vc_data;
+
void omap_vc_init_channel(struct voltagedomain *voltdm);
int omap_vc_pre_scale(struct voltagedomain *voltdm,
unsigned long target_volt,
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
index 5d8eaf31569..75bc4aa22b3 100644
--- a/arch/arm/mach-omap2/vc3xxx_data.c
+++ b/arch/arm/mach-omap2/vc3xxx_data.c
@@ -71,3 +71,25 @@ struct omap_vc_channel omap3_vc_core = {
.smps_cmdra_mask = OMAP3430_CMDRA1_MASK,
.cfg_channel_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA1_SHIFT,
};
+
+/*
+ * Voltage levels for different operating modes: on, sleep, retention and off
+ */
+#define OMAP3_ON_VOLTAGE_UV 1200000
+#define OMAP3_ONLP_VOLTAGE_UV 1000000
+#define OMAP3_RET_VOLTAGE_UV 975000
+#define OMAP3_OFF_VOLTAGE_UV 600000
+
+struct omap_vc_param omap3_mpu_vc_data = {
+ .on = OMAP3_ON_VOLTAGE_UV,
+ .onlp = OMAP3_ONLP_VOLTAGE_UV,
+ .ret = OMAP3_RET_VOLTAGE_UV,
+ .off = OMAP3_OFF_VOLTAGE_UV,
+};
+
+struct omap_vc_param omap3_core_vc_data = {
+ .on = OMAP3_ON_VOLTAGE_UV,
+ .onlp = OMAP3_ONLP_VOLTAGE_UV,
+ .ret = OMAP3_RET_VOLTAGE_UV,
+ .off = OMAP3_OFF_VOLTAGE_UV,
+};
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
index d70b930f273..085e5d6a04f 100644
--- a/arch/arm/mach-omap2/vc44xx_data.c
+++ b/arch/arm/mach-omap2/vc44xx_data.c
@@ -87,3 +87,31 @@ struct omap_vc_channel omap4_vc_core = {
.cfg_channel_sa_shift = OMAP4430_SA_VDD_CORE_L_SHIFT,
};
+/*
+ * Voltage levels for different operating modes: on, sleep, retention and off
+ */
+#define OMAP4_ON_VOLTAGE_UV 1375000
+#define OMAP4_ONLP_VOLTAGE_UV 1375000
+#define OMAP4_RET_VOLTAGE_UV 837500
+#define OMAP4_OFF_VOLTAGE_UV 0
+
+struct omap_vc_param omap4_mpu_vc_data = {
+ .on = OMAP4_ON_VOLTAGE_UV,
+ .onlp = OMAP4_ONLP_VOLTAGE_UV,
+ .ret = OMAP4_RET_VOLTAGE_UV,
+ .off = OMAP4_OFF_VOLTAGE_UV,
+};
+
+struct omap_vc_param omap4_iva_vc_data = {
+ .on = OMAP4_ON_VOLTAGE_UV,
+ .onlp = OMAP4_ONLP_VOLTAGE_UV,
+ .ret = OMAP4_RET_VOLTAGE_UV,
+ .off = OMAP4_OFF_VOLTAGE_UV,
+};
+
+struct omap_vc_param omap4_core_vc_data = {
+ .on = OMAP4_ON_VOLTAGE_UV,
+ .onlp = OMAP4_ONLP_VOLTAGE_UV,
+ .ret = OMAP4_RET_VOLTAGE_UV,
+ .off = OMAP4_OFF_VOLTAGE_UV,
+};
diff --git a/arch/arm/mach-omap2/voltage.h b/arch/arm/mach-omap2/voltage.h
index 7283b7ed7de..a0ce4f10ff1 100644
--- a/arch/arm/mach-omap2/voltage.h
+++ b/arch/arm/mach-omap2/voltage.h
@@ -40,12 +40,14 @@ struct powerdomain;
* data
* @voltsetup_mask: SETUP_TIME* bitmask in the PRM_VOLTSETUP* register
* @voltsetup_reg: register offset of PRM_VOLTSETUP from PRM base
+ * @voltsetup_off_reg: register offset of PRM_VOLTSETUP_OFF from PRM base
*
* XXX What about VOLTOFFSET/VOLTCTRL?
*/
struct omap_vfsm_instance {
u32 voltsetup_mask;
u8 voltsetup_reg;
+ u8 voltsetup_off_reg;
};
/**
@@ -74,6 +76,8 @@ struct voltagedomain {
const struct omap_vfsm_instance *vfsm;
struct omap_vp_instance *vp;
struct omap_voltdm_pmic *pmic;
+ struct omap_vp_param *vp_param;
+ struct omap_vc_param *vc_param;
/* VC/VP register access functions: SoC specific */
u32 (*read) (u8 offset);
@@ -92,6 +96,24 @@ struct voltagedomain {
struct omap_volt_data *volt_data;
};
+/* Min and max voltages from OMAP perspective */
+#define OMAP3430_VP1_VLIMITTO_VDDMIN 850000
+#define OMAP3430_VP1_VLIMITTO_VDDMAX 1425000
+#define OMAP3430_VP2_VLIMITTO_VDDMIN 900000
+#define OMAP3430_VP2_VLIMITTO_VDDMAX 1150000
+
+#define OMAP3630_VP1_VLIMITTO_VDDMIN 900000
+#define OMAP3630_VP1_VLIMITTO_VDDMAX 1350000
+#define OMAP3630_VP2_VLIMITTO_VDDMIN 900000
+#define OMAP3630_VP2_VLIMITTO_VDDMAX 1200000
+
+#define OMAP4_VP_MPU_VLIMITTO_VDDMIN 830000
+#define OMAP4_VP_MPU_VLIMITTO_VDDMAX 1410000
+#define OMAP4_VP_IVA_VLIMITTO_VDDMIN 830000
+#define OMAP4_VP_IVA_VLIMITTO_VDDMAX 1260000
+#define OMAP4_VP_CORE_VLIMITTO_VDDMIN 830000
+#define OMAP4_VP_CORE_VLIMITTO_VDDMAX 1200000
+
/**
* struct omap_voltdm_pmic - PMIC specific data required by voltage driver.
* @slew_rate: PMIC slew rate (in uv/us)
@@ -107,26 +129,34 @@ struct voltagedomain {
struct omap_voltdm_pmic {
int slew_rate;
int step_size;
- u32 on_volt;
- u32 onlp_volt;
- u32 ret_volt;
- u32 off_volt;
- u16 volt_setup_time;
u16 i2c_slave_addr;
u16 volt_reg_addr;
u16 cmd_reg_addr;
u8 vp_erroroffset;
u8 vp_vstepmin;
u8 vp_vstepmax;
- u8 vp_vddmin;
- u8 vp_vddmax;
+ u32 vddmin;
+ u32 vddmax;
u8 vp_timeout_us;
bool i2c_high_speed;
+ u32 i2c_pad_load;
u8 i2c_mcode;
unsigned long (*vsel_to_uv) (const u8 vsel);
u8 (*uv_to_vsel) (unsigned long uV);
};
+struct omap_vp_param {
+ u32 vddmax;
+ u32 vddmin;
+};
+
+struct omap_vc_param {
+ u32 on;
+ u32 onlp;
+ u32 ret;
+ u32 off;
+};
+
void omap_voltage_get_volttable(struct voltagedomain *voltdm,
struct omap_volt_data **volt_data);
struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 63afbfed3cb..261bb7cb4e6 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -117,6 +117,11 @@ void __init omap3xxx_voltagedomains_init(void)
}
#endif
+ omap3_voltdm_mpu.vp_param = &omap3_mpu_vp_data;
+ omap3_voltdm_core.vp_param = &omap3_core_vp_data;
+ omap3_voltdm_mpu.vc_param = &omap3_mpu_vc_data;
+ omap3_voltdm_core.vc_param = &omap3_core_vc_data;
+
if (soc_is_am35xx())
voltdms = voltagedomains_am35xx;
else
diff --git a/arch/arm/mach-omap2/voltagedomains44xx_data.c b/arch/arm/mach-omap2/voltagedomains44xx_data.c
index c3115f6853d..48b22a0a0c8 100644
--- a/arch/arm/mach-omap2/voltagedomains44xx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains44xx_data.c
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include "common.h"
-
+#include "soc.h"
#include "prm-regbits-44xx.h"
#include "prm44xx.h"
#include "prcm44xx.h"
@@ -34,14 +34,17 @@
static const struct omap_vfsm_instance omap4_vdd_mpu_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET,
+ .voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET,
};
static const struct omap_vfsm_instance omap4_vdd_iva_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET,
+ .voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET,
};
static const struct omap_vfsm_instance omap4_vdd_core_vfsm = {
.voltsetup_reg = OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET,
+ .voltsetup_off_reg = OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET,
};
static struct voltagedomain omap4_voltdm_mpu = {
@@ -101,11 +104,25 @@ void __init omap44xx_voltagedomains_init(void)
* for the currently-running IC
*/
#ifdef CONFIG_PM_OPP
- omap4_voltdm_mpu.volt_data = omap44xx_vdd_mpu_volt_data;
- omap4_voltdm_iva.volt_data = omap44xx_vdd_iva_volt_data;
- omap4_voltdm_core.volt_data = omap44xx_vdd_core_volt_data;
+ if (cpu_is_omap443x()) {
+ omap4_voltdm_mpu.volt_data = omap443x_vdd_mpu_volt_data;
+ omap4_voltdm_iva.volt_data = omap443x_vdd_iva_volt_data;
+ omap4_voltdm_core.volt_data = omap443x_vdd_core_volt_data;
+ } else if (cpu_is_omap446x()) {
+ omap4_voltdm_mpu.volt_data = omap446x_vdd_mpu_volt_data;
+ omap4_voltdm_iva.volt_data = omap446x_vdd_iva_volt_data;
+ omap4_voltdm_core.volt_data = omap446x_vdd_core_volt_data;
+ }
#endif
+ omap4_voltdm_mpu.vp_param = &omap4_mpu_vp_data;
+ omap4_voltdm_iva.vp_param = &omap4_iva_vp_data;
+ omap4_voltdm_core.vp_param = &omap4_core_vp_data;
+
+ omap4_voltdm_mpu.vc_param = &omap4_mpu_vc_data;
+ omap4_voltdm_iva.vc_param = &omap4_iva_vc_data;
+ omap4_voltdm_core.vc_param = &omap4_core_vc_data;
+
for (i = 0; voltdm = voltagedomains_omap4[i], voltdm; i++)
voltdm->sys_clk.name = sys_clk_name;
diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
index 85241b828c0..a3c30655aa3 100644
--- a/arch/arm/mach-omap2/vp.c
+++ b/arch/arm/mach-omap2/vp.c
@@ -58,8 +58,10 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
sys_clk_rate = voltdm->sys_clk.rate / 1000;
timeout = (sys_clk_rate * voltdm->pmic->vp_timeout_us) / 1000;
- vddmin = voltdm->pmic->vp_vddmin;
- vddmax = voltdm->pmic->vp_vddmax;
+ vddmin = max(voltdm->vp_param->vddmin, voltdm->pmic->vddmin);
+ vddmax = min(voltdm->vp_param->vddmax, voltdm->pmic->vddmax);
+ vddmin = voltdm->pmic->uv_to_vsel(vddmin);
+ vddmax = voltdm->pmic->uv_to_vsel(vddmax);
waittime = DIV_ROUND_UP(voltdm->pmic->step_size * sys_clk_rate,
1000 * voltdm->pmic->slew_rate);
@@ -138,7 +140,7 @@ int omap_vp_forceupdate_scale(struct voltagedomain *voltdm,
udelay(1);
}
if (timeout >= VP_TRANXDONE_TIMEOUT) {
- pr_warn("%s: vdd_%s TRANXDONE timeout exceeded. Voltage change aborted",
+ pr_warn("%s: vdd_%s TRANXDONE timeout exceeded. Voltage change aborted\n",
__func__, voltdm->name);
return -ETIMEDOUT;
}
@@ -197,7 +199,7 @@ void omap_vp_enable(struct voltagedomain *voltdm)
u32 vpconfig, volt;
if (!voltdm || IS_ERR(voltdm)) {
- pr_warning("%s: VDD specified does not exist!\n", __func__);
+ pr_warn("%s: VDD specified does not exist!\n", __func__);
return;
}
@@ -214,8 +216,8 @@ void omap_vp_enable(struct voltagedomain *voltdm)
volt = voltdm_get_voltage(voltdm);
if (!volt) {
- pr_warning("%s: unable to find current voltage for %s\n",
- __func__, voltdm->name);
+ pr_warn("%s: unable to find current voltage for %s\n",
+ __func__, voltdm->name);
return;
}
@@ -242,7 +244,7 @@ void omap_vp_disable(struct voltagedomain *voltdm)
int timeout;
if (!voltdm || IS_ERR(voltdm)) {
- pr_warning("%s: VDD specified does not exist!\n", __func__);
+ pr_warn("%s: VDD specified does not exist!\n", __func__);
return;
}
@@ -272,8 +274,7 @@ void omap_vp_disable(struct voltagedomain *voltdm)
VP_IDLE_TIMEOUT, timeout);
if (timeout >= VP_IDLE_TIMEOUT)
- pr_warning("%s: vdd_%s idle timedout\n",
- __func__, voltdm->name);
+ pr_warn("%s: vdd_%s idle timedout\n", __func__, voltdm->name);
vp->enabled = false;
diff --git a/arch/arm/mach-omap2/vp.h b/arch/arm/mach-omap2/vp.h
index 7c155d248aa..0fdf7080e4a 100644
--- a/arch/arm/mach-omap2/vp.h
+++ b/arch/arm/mach-omap2/vp.h
@@ -117,6 +117,13 @@ extern struct omap_vp_instance omap4_vp_mpu;
extern struct omap_vp_instance omap4_vp_iva;
extern struct omap_vp_instance omap4_vp_core;
+extern struct omap_vp_param omap3_mpu_vp_data;
+extern struct omap_vp_param omap3_core_vp_data;
+
+extern struct omap_vp_param omap4_mpu_vp_data;
+extern struct omap_vp_param omap4_iva_vp_data;
+extern struct omap_vp_param omap4_core_vp_data;
+
void omap_vp_init(struct voltagedomain *voltdm);
void omap_vp_enable(struct voltagedomain *voltdm);
void omap_vp_disable(struct voltagedomain *voltdm);
diff --git a/arch/arm/mach-omap2/vp3xxx_data.c b/arch/arm/mach-omap2/vp3xxx_data.c
index bd89f80089f..1914e026245 100644
--- a/arch/arm/mach-omap2/vp3xxx_data.c
+++ b/arch/arm/mach-omap2/vp3xxx_data.c
@@ -77,3 +77,13 @@ struct omap_vp_instance omap3_vp_core = {
.vstatus = OMAP3_PRM_VP2_STATUS_OFFSET,
.voltage = OMAP3_PRM_VP2_VOLTAGE_OFFSET,
};
+
+struct omap_vp_param omap3_mpu_vp_data = {
+ .vddmin = OMAP3430_VP1_VLIMITTO_VDDMIN,
+ .vddmax = OMAP3430_VP1_VLIMITTO_VDDMAX,
+};
+
+struct omap_vp_param omap3_core_vp_data = {
+ .vddmin = OMAP3430_VP2_VLIMITTO_VDDMIN,
+ .vddmax = OMAP3430_VP2_VLIMITTO_VDDMAX,
+};
diff --git a/arch/arm/mach-omap2/vp44xx_data.c b/arch/arm/mach-omap2/vp44xx_data.c
index 8c031d16879..e62f6b018be 100644
--- a/arch/arm/mach-omap2/vp44xx_data.c
+++ b/arch/arm/mach-omap2/vp44xx_data.c
@@ -87,3 +87,18 @@ struct omap_vp_instance omap4_vp_core = {
.vstatus = OMAP4_PRM_VP_CORE_STATUS_OFFSET,
.voltage = OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET,
};
+
+struct omap_vp_param omap4_mpu_vp_data = {
+ .vddmin = OMAP4_VP_MPU_VLIMITTO_VDDMIN,
+ .vddmax = OMAP4_VP_MPU_VLIMITTO_VDDMAX,
+};
+
+struct omap_vp_param omap4_iva_vp_data = {
+ .vddmin = OMAP4_VP_IVA_VLIMITTO_VDDMIN,
+ .vddmax = OMAP4_VP_IVA_VLIMITTO_VDDMAX,
+};
+
+struct omap_vp_param omap4_core_vp_data = {
+ .vddmin = OMAP4_VP_CORE_VLIMITTO_VDDMIN,
+ .vddmax = OMAP4_VP_CORE_VLIMITTO_VDDMAX,
+};
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 113c57a0356..fb7f1d1627d 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -26,6 +26,7 @@
#include <linux/i2c/pxa-i2c.h>
#include <linux/pwm_backlight.h>
+#include <media/mt9v022.h>
#include <media/soc_camera.h>
#include <linux/platform_data/camera-pxa.h>
@@ -468,6 +469,10 @@ static struct i2c_board_info __initdata pcm990_i2c_devices[] = {
},
};
+static struct mt9v022_platform_data mt9v022_pdata = {
+ .y_skip_top = 1,
+};
+
static struct i2c_board_info pcm990_camera_i2c[] = {
{
I2C_BOARD_INFO("mt9v022", 0x48),
@@ -480,6 +485,7 @@ static struct soc_camera_link iclink[] = {
{
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm990_camera_i2c[0],
+ .priv = &mt9v022_pdata,
.i2c_adapter_id = 0,
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
diff --git a/arch/arm/mach-realview/include/mach/board-eb.h b/arch/arm/mach-realview/include/mach/board-eb.h
index 124bce6b4d7..a301e61a555 100644
--- a/arch/arm/mach-realview/include/mach/board-eb.h
+++ b/arch/arm/mach-realview/include/mach/board-eb.h
@@ -47,7 +47,7 @@
#define REALVIEW_EB_USB_BASE 0x4F000000 /* USB */
#ifdef CONFIG_REALVIEW_EB_ARM11MP_REVB
-#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x1F000000
+#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x10100000
#define REALVIEW_EB11MP_L220_BASE 0x10102000 /* L220 registers */
#define REALVIEW_EB11MP_SYS_PLD_CTRL1 0xD8 /* Register offset for MPCore sysctl */
#else
diff --git a/arch/arm/mach-s3c24xx/include/mach/bast-map.h b/arch/arm/mach-s3c24xx/include/mach/bast-map.h
index 6e7dc9d0cf0..eecea2a50f8 100644
--- a/arch/arm/mach-s3c24xx/include/mach/bast-map.h
+++ b/arch/arm/mach-s3c24xx/include/mach/bast-map.h
@@ -74,7 +74,7 @@
/* 0xE0000000 contains the IO space that is split by speed and
- * wether the access is for 8 or 16bit IO... this ensures that
+ * whether the access is for 8 or 16bit IO... this ensures that
* the correct access is made
*
* 0x10000000 of space, partitioned as so:
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index ee99fd56c04..6b72d5a4b37 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -88,7 +88,7 @@ enum s3c2410_dma_state {
*
* This represents the state of the DMA engine, wrt to the loaded / running
* transfers. Since we don't have any way of knowing exactly the state of
- * the DMA transfers, we need to know the state to make decisions on wether
+ * the DMA transfers, we need to know the state to make decisions on whether
* we can
*
* S3C2410_DMA_NONE
diff --git a/arch/arm/mach-s3c24xx/include/mach/vr1000-map.h b/arch/arm/mach-s3c24xx/include/mach/vr1000-map.h
index 99612fcc4eb..28376e56dd3 100644
--- a/arch/arm/mach-s3c24xx/include/mach/vr1000-map.h
+++ b/arch/arm/mach-s3c24xx/include/mach/vr1000-map.h
@@ -51,7 +51,7 @@
#define VR1000_VA_PC104_IRQMASK VR1000_IOADDR(0x00600000)
/* 0xE0000000 contains the IO space that is split by speed and
- * wether the access is for 8 or 16bit IO... this ensures that
+ * whether the access is for 8 or 16bit IO... this ensures that
* the correct access is made
*
* 0x10000000 of space, partitioned as so:
diff --git a/arch/arm/mach-s3c24xx/mach-gta02.c b/arch/arm/mach-s3c24xx/mach-gta02.c
index 4a963467b7e..973b87ca87f 100644
--- a/arch/arm/mach-s3c24xx/mach-gta02.c
+++ b/arch/arm/mach-s3c24xx/mach-gta02.c
@@ -521,7 +521,6 @@ static struct platform_device *gta02_devices[] __initdata = {
&gta02_nor_flash,
&s3c24xx_pwm_device,
&s3c_device_iis,
- &samsung_asoc_dma,
&s3c_device_i2c0,
&gta02_dfbmcs320_device,
&gta02_buttons_device,
diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
index 63aaf076f61..b23dd1b106e 100644
--- a/arch/arm/mach-s3c24xx/mach-h1940.c
+++ b/arch/arm/mach-s3c24xx/mach-h1940.c
@@ -632,7 +632,6 @@ static struct platform_device *h1940_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
- &samsung_asoc_dma,
&s3c_device_usbgadget,
&h1940_device_leds,
&h1940_device_bluetooth,
diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
index 393c0f1ac11..a31d5b83e5f 100644
--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
+++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
@@ -519,7 +519,6 @@ static struct platform_device *mini2440_devices[] __initdata = {
&s3c_device_iis,
&uda1340_codec,
&mini2440_audio,
- &samsung_asoc_dma,
};
static void __init mini2440_map_io(void)
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index 379fde521d3..0606f2faaa5 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -712,7 +712,6 @@ static struct platform_device *rx1950_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
- &samsung_asoc_dma,
&s3c_device_usbgadget,
&s3c_device_rtc,
&s3c_device_nand,
diff --git a/arch/arm/mach-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c
index 60627e63a25..724755f0b0f 100644
--- a/arch/arm/mach-s3c24xx/pm.c
+++ b/arch/arm/mach-s3c24xx/pm.c
@@ -121,7 +121,7 @@ void s3c_pm_configure_extint(void)
int pin;
/* for each of the external interrupts (EINT0..EINT15) we
- * need to check wether it is an external interrupt source,
+ * need to check whether it is an external interrupt source,
* and then configure it as an input if it is not
*/
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 1a6f8577744..803711e283b 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -149,25 +149,6 @@ static struct clk init_clocks_off[] = {
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
}, {
- .name = "iis",
- .devname = "samsung-i2s.0",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_IIS0,
- }, {
- .name = "iis",
- .devname = "samsung-i2s.1",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C_CLKCON_PCLK_IIS1,
- }, {
-#ifdef CONFIG_CPU_S3C6410
- .name = "iis",
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C6410_CLKCON_PCLK_IIS2,
- }, {
-#endif
.name = "keypad",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
@@ -337,6 +318,32 @@ static struct clk clk_48m_spi1 = {
.ctrlbit = S3C_CLKCON_SCLK_SPI1_48,
};
+static struct clk clk_i2s0 = {
+ .name = "iis",
+ .devname = "samsung-i2s.0",
+ .parent = &clk_p,
+ .enable = s3c64xx_pclk_ctrl,
+ .ctrlbit = S3C_CLKCON_PCLK_IIS0,
+};
+
+static struct clk clk_i2s1 = {
+ .name = "iis",
+ .devname = "samsung-i2s.1",
+ .parent = &clk_p,
+ .enable = s3c64xx_pclk_ctrl,
+ .ctrlbit = S3C_CLKCON_PCLK_IIS1,
+};
+
+#ifdef CONFIG_CPU_S3C6410
+static struct clk clk_i2s2 = {
+ .name = "iis",
+ .devname = "samsung-i2s.2",
+ .parent = &clk_p,
+ .enable = s3c64xx_pclk_ctrl,
+ .ctrlbit = S3C6410_CLKCON_PCLK_IIS2,
+};
+#endif
+
static struct clk init_clocks[] = {
{
.name = "lcd",
@@ -660,6 +667,7 @@ static struct clksrc_sources clkset_audio1 = {
.nr_sources = ARRAY_SIZE(clkset_audio1_list),
};
+#ifdef CONFIG_CPU_S3C6410
static struct clk *clkset_audio2_list[] = {
[0] = &clk_mout_epll.clk,
[1] = &clk_dout_mpll,
@@ -672,6 +680,7 @@ static struct clksrc_sources clkset_audio2 = {
.sources = clkset_audio2_list,
.nr_sources = ARRAY_SIZE(clkset_audio2_list),
};
+#endif
static struct clksrc_clk clksrcs[] = {
{
@@ -685,36 +694,6 @@ static struct clksrc_clk clksrcs[] = {
.sources = &clkset_uhost,
}, {
.clk = {
- .name = "audio-bus",
- .devname = "samsung-i2s.0",
- .ctrlbit = S3C_CLKCON_SCLK_AUDIO0,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 7, .size = 3 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 8, .size = 4 },
- .sources = &clkset_audio0,
- }, {
- .clk = {
- .name = "audio-bus",
- .devname = "samsung-i2s.1",
- .ctrlbit = S3C_CLKCON_SCLK_AUDIO1,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 10, .size = 3 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 12, .size = 4 },
- .sources = &clkset_audio1,
- }, {
- .clk = {
- .name = "audio-bus",
- .devname = "samsung-i2s.2",
- .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 },
- .sources = &clkset_audio2,
- }, {
- .clk = {
.name = "irda-bus",
.ctrlbit = S3C_CLKCON_SCLK_IRDA,
.enable = s3c64xx_sclk_ctrl,
@@ -805,6 +784,43 @@ static struct clksrc_clk clk_sclk_spi1 = {
.sources = &clkset_spi_mmc,
};
+static struct clksrc_clk clk_audio_bus0 = {
+ .clk = {
+ .name = "audio-bus",
+ .devname = "samsung-i2s.0",
+ .ctrlbit = S3C_CLKCON_SCLK_AUDIO0,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 7, .size = 3 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 8, .size = 4 },
+ .sources = &clkset_audio0,
+};
+
+static struct clksrc_clk clk_audio_bus1 = {
+ .clk = {
+ .name = "audio-bus",
+ .devname = "samsung-i2s.1",
+ .ctrlbit = S3C_CLKCON_SCLK_AUDIO1,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 10, .size = 3 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 12, .size = 4 },
+ .sources = &clkset_audio1,
+};
+
+#ifdef CONFIG_CPU_S3C6410
+static struct clksrc_clk clk_audio_bus2 = {
+ .clk = {
+ .name = "audio-bus",
+ .devname = "samsung-i2s.2",
+ .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 },
+ .sources = &clkset_audio2,
+};
+#endif
/* Clock initialisation code */
static struct clksrc_clk *init_parents[] = {
@@ -820,6 +836,8 @@ static struct clksrc_clk *clksrc_cdev[] = {
&clk_sclk_mmc2,
&clk_sclk_spi0,
&clk_sclk_spi1,
+ &clk_audio_bus0,
+ &clk_audio_bus1,
};
static struct clk *clk_cdev[] = {
@@ -828,6 +846,8 @@ static struct clk *clk_cdev[] = {
&clk_hsmmc2,
&clk_48m_spi0,
&clk_48m_spi1,
+ &clk_i2s0,
+ &clk_i2s1,
};
static struct clk_lookup s3c64xx_clk_lookup[] = {
@@ -844,6 +864,14 @@ static struct clk_lookup s3c64xx_clk_lookup[] = {
CLKDEV_INIT("s3c6410-spi.0", "spi_busclk2", &clk_48m_spi0),
CLKDEV_INIT("s3c6410-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
CLKDEV_INIT("s3c6410-spi.1", "spi_busclk2", &clk_48m_spi1),
+ CLKDEV_INIT("samsung-i2s.0", "i2s_opclk0", &clk_i2s0),
+ CLKDEV_INIT("samsung-i2s.0", "i2s_opclk1", &clk_audio_bus0.clk),
+ CLKDEV_INIT("samsung-i2s.1", "i2s_opclk0", &clk_i2s1),
+ CLKDEV_INIT("samsung-i2s.1", "i2s_opclk1", &clk_audio_bus1.clk),
+#ifdef CONFIG_CPU_S3C6410
+ CLKDEV_INIT("samsung-i2s.2", "i2s_opclk0", &clk_i2s2),
+ CLKDEV_INIT("samsung-i2s.2", "i2s_opclk1", &clk_audio_bus2.clk),
+#endif
};
#define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
index 35f3e07eacc..e367e87bbc2 100644
--- a/arch/arm/mach-s3c64xx/dev-audio.c
+++ b/arch/arm/mach-s3c64xx/dev-audio.c
@@ -23,11 +23,6 @@
#include <linux/platform_data/asoc-s3c.h>
#include <plat/gpio-cfg.h>
-static const char *rclksrc[] = {
- [0] = "iis",
- [1] = "audio-bus",
-};
-
static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
{
unsigned int base;
@@ -64,11 +59,6 @@ static struct resource s3c64xx_iis0_resource[] = {
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = s3c64xx_i2s_cfg_gpio,
- .type = {
- .i2s = {
- .src_clk = rclksrc,
- },
- },
};
struct platform_device s3c64xx_device_iis0 = {
@@ -110,7 +100,6 @@ static struct s3c_audio_pdata i2sv4_pdata = {
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN,
- .src_clk = rclksrc,
},
},
};
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index 701f421de1a..cdde249166b 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -379,7 +379,6 @@ static struct platform_device *crag6410_devices[] __initdata = {
&s3c_device_timer[0],
&s3c64xx_device_iis0,
&s3c64xx_device_iis1,
- &samsung_asoc_dma,
&samsung_device_keypad,
&crag6410_gpio_keydev,
&crag6410_dm9k_device,
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index da1a771a29e..574a9eef588 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -275,7 +275,6 @@ static struct platform_device *smdk6410_devices[] __initdata = {
&s3c_device_fb,
&s3c_device_ohci,
&s3c_device_usb_hsotg,
- &samsung_asoc_dma,
&s3c64xx_device_iisv4,
&samsung_device_keypad,
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index 000445596ec..5112371079d 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -243,12 +243,6 @@ static struct clk init_clocks_off[] = {
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 25),
}, {
- .name = "iis",
- .devname = "samsung-i2s.0",
- .parent = &clk_pclk_low.clk,
- .enable = s5p64x0_pclk_ctrl,
- .ctrlbit = (1 << 26),
- }, {
.name = "dsim",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
@@ -405,15 +399,6 @@ static struct clksrc_clk clksrcs[] = {
.sources = &clkset_group1,
.reg_src = { .reg = S5P64X0_CLK_SRC1, .shift = 8, .size = 2 },
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 4, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_audio2",
- .ctrlbit = (1 << 11),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_audio,
- .reg_src = { .reg = S5P64X0_CLK_SRC1, .shift = 0, .size = 3 },
- .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 24, .size = 4 },
},
};
@@ -464,6 +449,26 @@ static struct clksrc_clk clk_sclk_uclk = {
.reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 16, .size = 4 },
};
+static struct clk clk_i2s0 = {
+ .name = "iis",
+ .devname = "samsung-i2s.0",
+ .parent = &clk_pclk_low.clk,
+ .enable = s5p64x0_pclk_ctrl,
+ .ctrlbit = (1 << 26),
+};
+
+static struct clksrc_clk clk_audio_bus2 = {
+ .clk = {
+ .name = "sclk_audio2",
+ .devname = "samsung-i2s.0",
+ .ctrlbit = (1 << 11),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_audio,
+ .reg_src = { .reg = S5P64X0_CLK_SRC1, .shift = 0, .size = 3 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 24, .size = 4 },
+};
+
static struct clksrc_clk clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
@@ -506,13 +511,18 @@ static struct clk dummy_apb_pclk = {
.id = -1,
};
+static struct clk *clk_cdev[] = {
+ &clk_i2s0,
+};
+
static struct clksrc_clk *clksrc_cdev[] = {
&clk_sclk_uclk,
&clk_sclk_spi0,
&clk_sclk_spi1,
&clk_sclk_mmc0,
&clk_sclk_mmc1,
- &clk_sclk_mmc2
+ &clk_sclk_mmc2,
+ &clk_audio_bus2,
};
static struct clk_lookup s5p6440_clk_lookup[] = {
@@ -524,6 +534,8 @@ static struct clk_lookup s5p6440_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+ CLKDEV_INIT("samsung-i2s.0", "i2s_opclk0", &clk_i2s0),
+ CLKDEV_INIT("samsung-i2s.0", "i2s_opclk1", &clk_audio_bus2.clk),
};
void __init_or_cpufreq s5p6440_setup_clocks(void)
@@ -596,12 +608,17 @@ static struct clk *clks[] __initdata = {
void __init s5p6440_register_clocks(void)
{
int ptr;
+ unsigned int cnt;
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
s3c_register_clksrc(sysclks[ptr], 1);
+ s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
+ for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++)
+ s3c_disable_clocks(clk_cdev[cnt], 1);
+
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index f3e0ef3d27c..154dea702d7 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -247,24 +247,6 @@ static struct clk init_clocks_off[] = {
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 22),
}, {
- .name = "iis",
- .devname = "samsung-i2s.0",
- .parent = &clk_pclk_low.clk,
- .enable = s5p64x0_pclk_ctrl,
- .ctrlbit = (1 << 26),
- }, {
- .name = "iis",
- .devname = "samsung-i2s.1",
- .parent = &clk_pclk_low.clk,
- .enable = s5p64x0_pclk_ctrl,
- .ctrlbit = (1 << 15),
- }, {
- .name = "iis",
- .devname = "samsung-i2s.2",
- .parent = &clk_pclk_low.clk,
- .enable = s5p64x0_pclk_ctrl,
- .ctrlbit = (1 << 16),
- }, {
.name = "i2c",
.devname = "s3c2440-i2c.1",
.parent = &clk_pclk_low.clk,
@@ -402,6 +384,7 @@ static struct clksrc_sources clkset_sclk_audio0 = {
static struct clksrc_clk clk_sclk_audio0 = {
.clk = {
.name = "audio-bus",
+ .devname = "samsung-i2s.0",
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 8),
.parent = &clk_dout_epll.clk,
@@ -549,6 +532,36 @@ static struct clksrc_clk clk_sclk_spi1 = {
.reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 4, .size = 4 },
};
+static struct clk clk_i2s0 = {
+ .name = "iis",
+ .devname = "samsung-i2s.0",
+ .parent = &clk_pclk_low.clk,
+ .enable = s5p64x0_pclk_ctrl,
+ .ctrlbit = (1 << 26),
+};
+
+static struct clk clk_i2s1 = {
+ .name = "iis",
+ .devname = "samsung-i2s.1",
+ .parent = &clk_pclk_low.clk,
+ .enable = s5p64x0_pclk_ctrl,
+ .ctrlbit = (1 << 15),
+};
+
+static struct clk clk_i2s2 = {
+ .name = "iis",
+ .devname = "samsung-i2s.2",
+ .parent = &clk_pclk_low.clk,
+ .enable = s5p64x0_pclk_ctrl,
+ .ctrlbit = (1 << 16),
+};
+
+static struct clk *clk_cdev[] = {
+ &clk_i2s0,
+ &clk_i2s1,
+ &clk_i2s2,
+};
+
static struct clksrc_clk *clksrc_cdev[] = {
&clk_sclk_uclk,
&clk_sclk_spi0,
@@ -556,6 +569,7 @@ static struct clksrc_clk *clksrc_cdev[] = {
&clk_sclk_mmc0,
&clk_sclk_mmc1,
&clk_sclk_mmc2,
+ &clk_sclk_audio0,
};
static struct clk_lookup s5p6450_clk_lookup[] = {
@@ -567,6 +581,10 @@ static struct clk_lookup s5p6450_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+ CLKDEV_INIT("samsung-i2s.0", "i2s_opclk0", &clk_i2s0),
+ CLKDEV_INIT("samsung-i2s.0", "i2s_opclk1", &clk_sclk_audio0.clk),
+ CLKDEV_INIT("samsung-i2s.1", "i2s_opclk0", &clk_i2s1),
+ CLKDEV_INIT("samsung-i2s.2", "i2s_opclk0", &clk_i2s2),
};
/* Clock initialization code */
@@ -584,7 +602,6 @@ static struct clksrc_clk *sysclks[] = {
&clk_pclk,
&clk_hclk_low,
&clk_pclk_low,
- &clk_sclk_audio0,
};
static struct clk dummy_apb_pclk = {
@@ -661,10 +678,16 @@ void __init_or_cpufreq s5p6450_setup_clocks(void)
void __init s5p6450_register_clocks(void)
{
int ptr;
+ unsigned int cnt;
for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
s3c_register_clksrc(sysclks[ptr], 1);
+
+ s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
+ for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++)
+ s3c_disable_clocks(clk_cdev[cnt], 1);
+
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
diff --git a/arch/arm/mach-s5p64x0/dev-audio.c b/arch/arm/mach-s5p64x0/dev-audio.c
index a0d6edfd23a..723d4773c32 100644
--- a/arch/arm/mach-s5p64x0/dev-audio.c
+++ b/arch/arm/mach-s5p64x0/dev-audio.c
@@ -19,11 +19,6 @@
#include <mach/dma.h>
#include <mach/irqs.h>
-static const char *rclksrc[] = {
- [0] = "iis",
- [1] = "sclk_audio2",
-};
-
static int s5p6440_cfg_i2s(struct platform_device *pdev)
{
switch (pdev->id) {
@@ -45,7 +40,6 @@ static struct s3c_audio_pdata s5p6440_i2s_pdata = {
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN,
- .src_clk = rclksrc,
},
},
};
@@ -93,7 +87,6 @@ static struct s3c_audio_pdata s5p6450_i2s0_pdata = {
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN,
- .src_clk = rclksrc,
},
},
};
@@ -110,11 +103,6 @@ struct platform_device s5p6450_device_iis0 = {
static struct s3c_audio_pdata s5p6450_i2s_pdata = {
.cfg_gpio = s5p6450_cfg_i2s,
- .type = {
- .i2s = {
- .src_clk = rclksrc,
- },
- },
};
static struct resource s5p6450_i2s1_resource[] = {
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 96ea1fe0ec9..1af823558c6 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -165,7 +165,6 @@ static struct platform_device *smdk6440_devices[] __initdata = {
&s3c_device_i2c1,
&s3c_device_ts,
&s3c_device_wdt,
- &samsung_asoc_dma,
&s5p6440_device_iis,
&s3c_device_fb,
&smdk6440_lcd_lte480wv,
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index 12748b6eaa7..62526ccf6b7 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -183,7 +183,6 @@ static struct platform_device *smdk6450_devices[] __initdata = {
&s3c_device_i2c1,
&s3c_device_ts,
&s3c_device_wdt,
- &samsung_asoc_dma,
&s5p6450_device_iis0,
&s3c_device_fb,
&smdk6450_lcd_lte480wv,
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 926219791f0..a206dc35eff 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -606,24 +606,6 @@ static struct clk init_clocks_off[] = {
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 13),
}, {
- .name = "iis",
- .devname = "samsung-i2s.0",
- .parent = &clk_div_pclkd1.clk,
- .enable = s5pc100_d1_5_ctrl,
- .ctrlbit = (1 << 0),
- }, {
- .name = "iis",
- .devname = "samsung-i2s.1",
- .parent = &clk_div_pclkd1.clk,
- .enable = s5pc100_d1_5_ctrl,
- .ctrlbit = (1 << 1),
- }, {
- .name = "iis",
- .devname = "samsung-i2s.2",
- .parent = &clk_div_pclkd1.clk,
- .enable = s5pc100_d1_5_ctrl,
- .ctrlbit = (1 << 2),
- }, {
.name = "ac97",
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
@@ -724,6 +706,30 @@ static struct clk clk_48m_spi2 = {
.ctrlbit = (1 << 9),
};
+static struct clk clk_i2s0 = {
+ .name = "iis",
+ .devname = "samsung-i2s.0",
+ .parent = &clk_div_pclkd1.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 0),
+};
+
+static struct clk clk_i2s1 = {
+ .name = "iis",
+ .devname = "samsung-i2s.1",
+ .parent = &clk_div_pclkd1.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 1),
+};
+
+static struct clk clk_i2s2 = {
+ .name = "iis",
+ .devname = "samsung-i2s.2",
+ .parent = &clk_div_pclkd1.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 2),
+};
+
static struct clk clk_vclk54m = {
.name = "vclk_54m",
.rate = 54000000,
@@ -1154,6 +1160,9 @@ static struct clk *clk_cdev[] = {
&clk_48m_spi0,
&clk_48m_spi1,
&clk_48m_spi2,
+ &clk_i2s0,
+ &clk_i2s1,
+ &clk_i2s2,
};
static struct clksrc_clk *clksrc_cdev[] = {
@@ -1321,6 +1330,9 @@ static struct clk_lookup s5pc100_clk_lookup[] = {
CLKDEV_INIT("s5pc100-spi.1", "spi_busclk2", &clk_sclk_spi1.clk),
CLKDEV_INIT("s5pc100-spi.2", "spi_busclk1", &clk_48m_spi2),
CLKDEV_INIT("s5pc100-spi.2", "spi_busclk2", &clk_sclk_spi2.clk),
+ CLKDEV_INIT("samsung-i2s.0", "i2s_opclk0", &clk_i2s0),
+ CLKDEV_INIT("samsung-i2s.1", "i2s_opclk0", &clk_i2s1),
+ CLKDEV_INIT("samsung-i2s.2", "i2s_opclk0", &clk_i2s2),
};
void __init s5pc100_register_clocks(void)
diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c
index 1cc252cef26..46f488b0939 100644
--- a/arch/arm/mach-s5pc100/dev-audio.c
+++ b/arch/arm/mach-s5pc100/dev-audio.c
@@ -39,18 +39,12 @@ static int s5pc100_cfg_i2s(struct platform_device *pdev)
return 0;
}
-static const char *rclksrc_v5[] = {
- [0] = "iis",
- [1] = "i2sclkd2",
-};
-
static struct s3c_audio_pdata i2sv5_pdata = {
.cfg_gpio = s5pc100_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
| QUIRK_NEED_RSTCLR,
- .src_clk = rclksrc_v5,
},
},
};
@@ -72,18 +66,8 @@ struct platform_device s5pc100_device_iis0 = {
},
};
-static const char *rclksrc_v3[] = {
- [0] = "iis",
- [1] = "sclk_audio",
-};
-
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = s5pc100_cfg_i2s,
- .type = {
- .i2s = {
- .src_clk = rclksrc_v3,
- },
- },
};
static struct resource s5pc100_iis1_resource[] = {
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index dba7384a87b..9abe95e806a 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -197,7 +197,6 @@ static struct platform_device *smdkc100_devices[] __initdata = {
&s3c_device_ts,
&s3c_device_wdt,
&smdkc100_lcd_powerdev,
- &samsung_asoc_dma,
&s5pc100_device_iis0,
&samsung_device_keypad,
&s5pc100_device_ac97,
diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c
index 0a5480bbcbd..addfb165c13 100644
--- a/arch/arm/mach-s5pv210/dev-audio.c
+++ b/arch/arm/mach-s5pv210/dev-audio.c
@@ -20,11 +20,6 @@
#include <mach/irqs.h>
#include <mach/regs-audss.h>
-static const char *rclksrc[] = {
- [0] = "busclk",
- [1] = "i2sclk",
-};
-
static int s5pv210_cfg_i2s(struct platform_device *pdev)
{
/* configure GPIO for i2s port */
@@ -52,7 +47,6 @@ static struct s3c_audio_pdata i2sv5_pdata = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
| QUIRK_NEED_RSTCLR,
- .src_clk = rclksrc,
.idma_addr = S5PV210_AUDSS_INT_MEM,
},
},
@@ -75,18 +69,8 @@ struct platform_device s5pv210_device_iis0 = {
},
};
-static const char *rclksrc_v3[] = {
- [0] = "iis",
- [1] = "audio-bus",
-};
-
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = s5pv210_cfg_i2s,
- .type = {
- .i2s = {
- .src_clk = rclksrc_v3,
- },
- },
};
static struct resource s5pv210_iis1_resource[] = {
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index d9c99fcc1aa..f1f3bd37ecd 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -85,7 +85,6 @@ static struct s3c_ide_platdata smdkc110_ide_pdata __initdata = {
};
static struct platform_device *smdkc110_devices[] __initdata = {
- &samsung_asoc_dma,
&s5pv210_device_iis0,
&s5pv210_device_ac97,
&s5pv210_device_spdif,
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index 4cdb5bb7bbc..6bc8404bf67 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -234,7 +234,6 @@ static struct platform_device *smdkv210_devices[] __initdata = {
&s5pv210_device_ac97,
&s5pv210_device_iis0,
&s5pv210_device_spdif,
- &samsung_asoc_dma,
&samsung_asoc_idma,
&samsung_device_keypad,
&smdkv210_dm9000,
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 4eddca14ae0..9255546e7bf 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -29,6 +29,8 @@ config ARCH_R8A7779
select ARM_GIC
select CPU_V7
select SH_CLK_CPG
+ select USB_ARCH_HAS_EHCI
+ select USB_ARCH_HAS_OHCI
config ARCH_EMEV2
bool "Emma Mobile EV2"
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 25eb88a923e..032d10817e7 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -213,95 +213,6 @@ static struct platform_device irda_device = {
.num_resources = ARRAY_SIZE(irda_resources),
};
-static unsigned char lcd_backlight_seq[3][2] = {
- { 0x04, 0x07 },
- { 0x23, 0x80 },
- { 0x03, 0x01 },
-};
-
-static void lcd_backlight_on(void)
-{
- struct i2c_adapter *a;
- struct i2c_msg msg;
- int k;
-
- a = i2c_get_adapter(1);
- for (k = 0; a && k < 3; k++) {
- msg.addr = 0x6d;
- msg.buf = &lcd_backlight_seq[k][0];
- msg.len = 2;
- msg.flags = 0;
- if (i2c_transfer(a, &msg, 1) != 1)
- break;
- }
-}
-
-static void lcd_backlight_reset(void)
-{
- gpio_set_value(GPIO_PORT235, 0);
- mdelay(24);
- gpio_set_value(GPIO_PORT235, 1);
-}
-
-/* LCDC0 */
-static const struct fb_videomode lcdc0_modes[] = {
- {
- .name = "R63302(QHD)",
- .xres = 544,
- .yres = 961,
- .left_margin = 72,
- .right_margin = 600,
- .hsync_len = 16,
- .upper_margin = 8,
- .lower_margin = 8,
- .vsync_len = 2,
- .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
- },
-};
-
-static struct sh_mobile_lcdc_info lcdc0_info = {
- .clock_source = LCDC_CLK_PERIPHERAL,
- .ch[0] = {
- .chan = LCDC_CHAN_MAINLCD,
- .interface_type = RGB24,
- .clock_divider = 1,
- .flags = LCDC_FLAGS_DWPOL,
- .fourcc = V4L2_PIX_FMT_RGB565,
- .lcd_modes = lcdc0_modes,
- .num_modes = ARRAY_SIZE(lcdc0_modes),
- .panel_cfg = {
- .width = 44,
- .height = 79,
- .display_on = lcd_backlight_on,
- .display_off = lcd_backlight_reset,
- },
- }
-};
-
-static struct resource lcdc0_resources[] = {
- [0] = {
- .name = "LCDC0",
- .start = 0xfe940000, /* P4-only space */
- .end = 0xfe943fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = intcs_evt2irq(0x580),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device lcdc0_device = {
- .name = "sh_mobile_lcdc_fb",
- .num_resources = ARRAY_SIZE(lcdc0_resources),
- .resource = lcdc0_resources,
- .id = 0,
- .dev = {
- .platform_data = &lcdc0_info,
- .coherent_dma_mask = ~0,
- },
-};
-
/* MIPI-DSI */
static struct resource mipidsi0_resources[] = {
[0] = {
@@ -358,7 +269,7 @@ sh_mipi_set_dot_clock_pck_err:
static struct sh_mipi_dsi_info mipidsi0_info = {
.data_format = MIPI_RGB888,
- .lcd_chan = &lcdc0_info.ch[0],
+ .channel = LCDC_CHAN_MAINLCD,
.lane = 2,
.vsynw_offset = 20,
.clksrc = 1,
@@ -378,6 +289,109 @@ static struct platform_device mipidsi0_device = {
},
};
+static unsigned char lcd_backlight_seq[3][2] = {
+ { 0x04, 0x07 },
+ { 0x23, 0x80 },
+ { 0x03, 0x01 },
+};
+
+static int lcd_backlight_set_brightness(int brightness)
+{
+ struct i2c_adapter *adap;
+ struct i2c_msg msg;
+ unsigned int i;
+ int ret;
+
+ if (brightness == 0) {
+ /* Reset the chip */
+ gpio_set_value(GPIO_PORT235, 0);
+ mdelay(24);
+ gpio_set_value(GPIO_PORT235, 1);
+ return 0;
+ }
+
+ adap = i2c_get_adapter(1);
+ if (adap == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(lcd_backlight_seq); i++) {
+ msg.addr = 0x6d;
+ msg.buf = &lcd_backlight_seq[i][0];
+ msg.len = 2;
+ msg.flags = 0;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ if (ret < 0)
+ break;
+ }
+
+ i2c_put_adapter(adap);
+ return ret < 0 ? ret : 0;
+}
+
+/* LCDC0 */
+static const struct fb_videomode lcdc0_modes[] = {
+ {
+ .name = "R63302(QHD)",
+ .xres = 544,
+ .yres = 961,
+ .left_margin = 72,
+ .right_margin = 600,
+ .hsync_len = 16,
+ .upper_margin = 8,
+ .lower_margin = 8,
+ .vsync_len = 2,
+ .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
+ },
+};
+
+static struct sh_mobile_lcdc_info lcdc0_info = {
+ .clock_source = LCDC_CLK_PERIPHERAL,
+ .ch[0] = {
+ .chan = LCDC_CHAN_MAINLCD,
+ .interface_type = RGB24,
+ .clock_divider = 1,
+ .flags = LCDC_FLAGS_DWPOL,
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .lcd_modes = lcdc0_modes,
+ .num_modes = ARRAY_SIZE(lcdc0_modes),
+ .panel_cfg = {
+ .width = 44,
+ .height = 79,
+ },
+ .bl_info = {
+ .name = "sh_mobile_lcdc_bl",
+ .max_brightness = 1,
+ .set_brightness = lcd_backlight_set_brightness,
+ },
+ .tx_dev = &mipidsi0_device,
+ }
+};
+
+static struct resource lcdc0_resources[] = {
+ [0] = {
+ .name = "LCDC0",
+ .start = 0xfe940000, /* P4-only space */
+ .end = 0xfe943fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0x580),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device lcdc0_device = {
+ .name = "sh_mobile_lcdc_fb",
+ .num_resources = ARRAY_SIZE(lcdc0_resources),
+ .resource = lcdc0_resources,
+ .id = 0,
+ .dev = {
+ .platform_data = &lcdc0_info,
+ .coherent_dma_mask = ~0,
+ },
+};
+
/* Fixed 2.8V regulators to be used by SDHI0 */
static struct regulator_consumer_supply fixed2v8_power_consumers[] =
{
@@ -531,8 +545,8 @@ static struct platform_device *ag5evm_devices[] __initdata = {
&fsi_device,
&mmc_device,
&irda_device,
- &lcdc0_device,
&mipidsi0_device,
+ &lcdc0_device,
&sdhi0_device,
&sdhi1_device,
};
@@ -621,7 +635,7 @@ static void __init ag5evm_init(void)
/* LCD backlight controller */
gpio_request(GPIO_PORT235, NULL); /* RESET */
gpio_direction_output(GPIO_PORT235, 0);
- lcd_backlight_reset();
+ lcd_backlight_set_brightness(0);
/* enable SDHI0 on CN15 [SD I/F] */
gpio_request(GPIO_FN_SDHIWP0, NULL);
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index cefdd030361..99ef190d090 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -552,11 +552,9 @@ static struct resource mipidsi0_resources[] = {
},
};
-static struct sh_mobile_lcdc_info lcdc_info;
-
static struct sh_mipi_dsi_info mipidsi0_info = {
.data_format = MIPI_RGB888,
- .lcd_chan = &lcdc_info.ch[0],
+ .channel = LCDC_CHAN_MAINLCD,
.lane = 2,
.vsynw_offset = 17,
.phyctrl = 0x6 << 8,
@@ -658,133 +656,16 @@ static struct platform_device lcdc_device = {
/* FSI */
#define IRQ_FSI evt2irq(0x1840)
-static int __fsi_set_rate(struct clk *clk, long rate, int enable)
-{
- int ret = 0;
-
- if (rate <= 0)
- return ret;
-
- if (enable) {
- ret = clk_set_rate(clk, rate);
- if (0 == ret)
- ret = clk_enable(clk);
- } else {
- clk_disable(clk);
- }
-
- return ret;
-}
-
-static int __fsi_set_round_rate(struct clk *clk, long rate, int enable)
-{
- return __fsi_set_rate(clk, clk_round_rate(clk, rate), enable);
-}
-
-static int fsi_ak4642_set_rate(struct device *dev, int rate, int enable)
-{
- struct clk *fsia_ick;
- struct clk *fsiack;
- int ret = -EIO;
-
- fsia_ick = clk_get(dev, "icka");
- if (IS_ERR(fsia_ick))
- return PTR_ERR(fsia_ick);
-
- /*
- * FSIACK is connected to AK4642,
- * and use external clock pin from it.
- * it is parent of fsia_ick now.
- */
- fsiack = clk_get_parent(fsia_ick);
- if (!fsiack)
- goto fsia_ick_out;
-
- /*
- * we get 1/1 divided clock by setting same rate to fsiack and fsia_ick
- *
- ** FIXME **
- * Because the freq_table of external clk (fsiack) are all 0,
- * the return value of clk_round_rate became 0.
- * So, it use __fsi_set_rate here.
- */
- ret = __fsi_set_rate(fsiack, rate, enable);
- if (ret < 0)
- goto fsiack_out;
-
- ret = __fsi_set_round_rate(fsia_ick, rate, enable);
- if ((ret < 0) && enable)
- __fsi_set_round_rate(fsiack, rate, 0); /* disable FSI ACK */
-
-fsiack_out:
- clk_put(fsiack);
-
-fsia_ick_out:
- clk_put(fsia_ick);
-
- return 0;
-}
-
-static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable)
-{
- struct clk *fsib_clk;
- struct clk *fdiv_clk = clk_get(NULL, "fsidivb");
- long fsib_rate = 0;
- long fdiv_rate = 0;
- int ackmd_bpfmd;
- int ret;
-
- switch (rate) {
- case 44100:
- fsib_rate = rate * 256;
- ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
- break;
- case 48000:
- fsib_rate = 85428000; /* around 48kHz x 256 x 7 */
- fdiv_rate = rate * 256;
- ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
- break;
- default:
- pr_err("unsupported rate in FSI2 port B\n");
- return -EINVAL;
- }
-
- /* FSI B setting */
- fsib_clk = clk_get(dev, "ickb");
- if (IS_ERR(fsib_clk))
- return -EIO;
-
- ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
- if (ret < 0)
- goto fsi_set_rate_end;
-
- /* FSI DIV setting */
- ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
- if (ret < 0) {
- /* disable FSI B */
- if (enable)
- __fsi_set_round_rate(fsib_clk, fsib_rate, 0);
- goto fsi_set_rate_end;
- }
-
- ret = ackmd_bpfmd;
-
-fsi_set_rate_end:
- clk_put(fsib_clk);
- return ret;
-}
-
static struct sh_fsi_platform_info fsi_info = {
.port_a = {
.flags = SH_FSI_BRS_INV,
- .set_rate = fsi_ak4642_set_rate,
},
.port_b = {
.flags = SH_FSI_BRS_INV |
SH_FSI_BRM_INV |
SH_FSI_LRS_INV |
+ SH_FSI_CLK_CPG |
SH_FSI_FMT_SPDIF,
- .set_rate = fsi_hdmi_set_rate,
},
};
@@ -1144,25 +1025,6 @@ out:
clk_put(hdmi_ick);
}
-static void __init fsi_init_pm_clock(void)
-{
- struct clk *fsia_ick;
- int ret;
-
- fsia_ick = clk_get(&fsi_device.dev, "icka");
- if (IS_ERR(fsia_ick)) {
- ret = PTR_ERR(fsia_ick);
- pr_err("Cannot get FSI ICK: %d\n", ret);
- return;
- }
-
- ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk);
- if (ret < 0)
- pr_err("Cannot set FSI-A parent: %d\n", ret);
-
- clk_put(fsia_ick);
-}
-
/* TouchScreen */
#ifdef CONFIG_AP4EVB_QHD
# define GPIO_TSC_IRQ GPIO_FN_IRQ28_123
@@ -1476,7 +1338,6 @@ static void __init ap4evb_init(void)
ARRAY_SIZE(domain_devices));
hdmi_init_pm_clock();
- fsi_init_pm_clock();
sh7372_pm_init();
pm_clk_add(&fsi_device.dev, "spu2");
pm_clk_add(&lcdc1_device.dev, "hdmi");
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 499e6e37666..5353adf6b82 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -768,32 +768,6 @@ static struct platform_device ceu0_device = {
};
/* FSI */
-static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable)
-{
- struct clk *fsib;
- int ret;
-
- /* it support 48KHz only */
- if (48000 != rate)
- return -EINVAL;
-
- fsib = clk_get(dev, "ickb");
- if (IS_ERR(fsib))
- return -EINVAL;
-
- if (enable) {
- ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
- clk_enable(fsib);
- } else {
- ret = 0;
- clk_disable(fsib);
- }
-
- clk_put(fsib);
-
- return ret;
-}
-
static struct sh_fsi_platform_info fsi_info = {
/* FSI-WM8978 */
.port_a = {
@@ -802,8 +776,8 @@ static struct sh_fsi_platform_info fsi_info = {
/* FSI-HDMI */
.port_b = {
.flags = SH_FSI_FMT_SPDIF |
- SH_FSI_ENABLE_STREAM_MODE,
- .set_rate = fsi_hdmi_set_rate,
+ SH_FSI_ENABLE_STREAM_MODE |
+ SH_FSI_CLK_CPG,
.tx_id = SHDMA_SLAVE_FSIB_TX,
}
};
@@ -938,13 +912,11 @@ static void __init eva_clock_init(void)
struct clk *xtal1 = clk_get(NULL, "extal1");
struct clk *usb24s = clk_get(NULL, "usb24s");
struct clk *fsibck = clk_get(NULL, "fsibck");
- struct clk *fsib = clk_get(&fsi_device.dev, "ickb");
if (IS_ERR(system) ||
IS_ERR(xtal1) ||
IS_ERR(usb24s) ||
- IS_ERR(fsibck) ||
- IS_ERR(fsib)) {
+ IS_ERR(fsibck)) {
pr_err("armadillo800eva board clock init failed\n");
goto clock_error;
}
@@ -956,9 +928,7 @@ static void __init eva_clock_init(void)
clk_set_parent(usb24s, system);
/* FSIBCK is 12.288MHz, and it is parent of FSI-B */
- clk_set_parent(fsib, fsibck);
clk_set_rate(fsibck, 12288000);
- clk_set_rate(fsib, 12288000);
clock_error:
if (!IS_ERR(system))
@@ -969,8 +939,6 @@ clock_error:
clk_put(usb24s);
if (!IS_ERR(fsibck))
clk_put(fsibck);
- if (!IS_ERR(fsib))
- clk_put(fsib);
}
/*
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index f274252e470..2fed62f6604 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -370,11 +370,6 @@ static int mackerel_set_brightness(int brightness)
return 0;
}
-static int mackerel_get_brightness(void)
-{
- return gpio_get_value(GPIO_PORT31);
-}
-
static const struct sh_mobile_meram_cfg lcd_meram_cfg = {
.icb[0] = {
.meram_size = 0x40,
@@ -403,7 +398,6 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.name = "sh_mobile_lcdc_bl",
.max_brightness = 1,
.set_brightness = mackerel_set_brightness,
- .get_brightness = mackerel_get_brightness,
},
.meram_cfg = &lcd_meram_cfg,
}
@@ -816,6 +810,8 @@ static struct platform_device usbhs1_device = {
.id = 1,
.dev = {
.platform_data = &usbhs1_private.info,
+ .dma_mask = &usbhs1_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(usbhs1_resources),
.resource = usbhs1_resources,
@@ -860,76 +856,6 @@ static struct platform_device leds_device = {
/* FSI */
#define IRQ_FSI evt2irq(0x1840)
-static int __fsi_set_round_rate(struct clk *clk, long rate, int enable)
-{
- int ret;
-
- if (rate <= 0)
- return 0;
-
- if (!enable) {
- clk_disable(clk);
- return 0;
- }
-
- ret = clk_set_rate(clk, clk_round_rate(clk, rate));
- if (ret < 0)
- return ret;
-
- return clk_enable(clk);
-}
-
-static int fsi_b_set_rate(struct device *dev, int rate, int enable)
-{
- struct clk *fsib_clk;
- struct clk *fdiv_clk = clk_get(NULL, "fsidivb");
- long fsib_rate = 0;
- long fdiv_rate = 0;
- int ackmd_bpfmd;
- int ret;
-
- /* clock start */
- switch (rate) {
- case 44100:
- fsib_rate = rate * 256;
- ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
- break;
- case 48000:
- fsib_rate = 85428000; /* around 48kHz x 256 x 7 */
- fdiv_rate = rate * 256;
- ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
- break;
- default:
- pr_err("unsupported rate in FSI2 port B\n");
- return -EINVAL;
- }
-
- /* FSI B setting */
- fsib_clk = clk_get(dev, "ickb");
- if (IS_ERR(fsib_clk))
- return -EIO;
-
- /* fsib */
- ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
- if (ret < 0)
- goto fsi_set_rate_end;
-
- /* FSI DIV */
- ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
- if (ret < 0) {
- /* disable FSI B */
- if (enable)
- __fsi_set_round_rate(fsib_clk, fsib_rate, 0);
- goto fsi_set_rate_end;
- }
-
- ret = ackmd_bpfmd;
-
-fsi_set_rate_end:
- clk_put(fsib_clk);
- return ret;
-}
-
static struct sh_fsi_platform_info fsi_info = {
.port_a = {
.flags = SH_FSI_BRS_INV,
@@ -940,8 +866,8 @@ static struct sh_fsi_platform_info fsi_info = {
.flags = SH_FSI_BRS_INV |
SH_FSI_BRM_INV |
SH_FSI_LRS_INV |
+ SH_FSI_CLK_CPG |
SH_FSI_FMT_SPDIF,
- .set_rate = fsi_b_set_rate,
}
};
@@ -1018,7 +944,11 @@ static struct resource nand_flash_resources[] = {
.start = 0xe6a30000,
.end = 0xe6a3009b,
.flags = IORESOURCE_MEM,
- }
+ },
+ [1] = {
+ .start = evt2irq(0x0d80), /* flstei: status error irq */
+ .flags = IORESOURCE_IRQ,
+ },
};
static struct sh_flctl_platform_data nand_flash_data = {
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 69f7f464eff..449f9289567 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -34,6 +34,10 @@
#include <linux/spi/sh_hspi.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <linux/pm_runtime.h>
#include <mach/hardware.h>
#include <mach/r8a7779.h>
#include <mach/common.h>
@@ -144,13 +148,185 @@ static struct platform_device hspi_device = {
.num_resources = ARRAY_SIZE(hspi_resources),
};
+/* USB PHY */
+static struct resource usb_phy_resources[] = {
+ [0] = {
+ .start = 0xffe70000,
+ .end = 0xffe70900 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 0xfff70000,
+ .end = 0xfff70900 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device usb_phy_device = {
+ .name = "rcar_usb_phy",
+ .resource = usb_phy_resources,
+ .num_resources = ARRAY_SIZE(usb_phy_resources),
+};
+
static struct platform_device *marzen_devices[] __initdata = {
&eth_device,
&sdhi0_device,
&thermal_device,
&hspi_device,
+ &usb_phy_device,
+};
+
+/* USB */
+static struct usb_phy *phy;
+static int usb_power_on(struct platform_device *pdev)
+{
+ if (!phy)
+ return -EIO;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ usb_phy_init(phy);
+
+ return 0;
+}
+
+static void usb_power_off(struct platform_device *pdev)
+{
+ if (!phy)
+ return;
+
+ usb_phy_shutdown(phy);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+}
+
+static struct usb_ehci_pdata ehcix_pdata = {
+ .power_on = usb_power_on,
+ .power_off = usb_power_off,
+ .power_suspend = usb_power_off,
+};
+
+static struct resource ehci0_resources[] = {
+ [0] = {
+ .start = 0xffe70000,
+ .end = 0xffe70400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(44),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ehci0_device = {
+ .name = "ehci-platform",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ehci0_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &ehcix_pdata,
+ },
+ .num_resources = ARRAY_SIZE(ehci0_resources),
+ .resource = ehci0_resources,
};
+static struct resource ehci1_resources[] = {
+ [0] = {
+ .start = 0xfff70000,
+ .end = 0xfff70400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(45),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ehci1_device = {
+ .name = "ehci-platform",
+ .id = 1,
+ .dev = {
+ .dma_mask = &ehci1_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &ehcix_pdata,
+ },
+ .num_resources = ARRAY_SIZE(ehci1_resources),
+ .resource = ehci1_resources,
+};
+
+static struct usb_ohci_pdata ohcix_pdata = {
+ .power_on = usb_power_on,
+ .power_off = usb_power_off,
+ .power_suspend = usb_power_off,
+};
+
+static struct resource ohci0_resources[] = {
+ [0] = {
+ .start = 0xffe70400,
+ .end = 0xffe70800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(44),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ohci0_device = {
+ .name = "ohci-platform",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ohci0_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &ohcix_pdata,
+ },
+ .num_resources = ARRAY_SIZE(ohci0_resources),
+ .resource = ohci0_resources,
+};
+
+static struct resource ohci1_resources[] = {
+ [0] = {
+ .start = 0xfff70400,
+ .end = 0xfff70800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(45),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ohci1_device = {
+ .name = "ohci-platform",
+ .id = 1,
+ .dev = {
+ .dma_mask = &ohci1_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &ohcix_pdata,
+ },
+ .num_resources = ARRAY_SIZE(ohci1_resources),
+ .resource = ohci1_resources,
+};
+
+static struct platform_device *marzen_late_devices[] __initdata = {
+ &ehci0_device,
+ &ehci1_device,
+ &ohci0_device,
+ &ohci1_device,
+};
+
+void __init marzen_init_late(void)
+{
+ /* get usb phy */
+ phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+ shmobile_init_late();
+ platform_add_devices(marzen_late_devices,
+ ARRAY_SIZE(marzen_late_devices));
+}
+
static void __init marzen_init(void)
{
regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
@@ -188,6 +364,14 @@ static void __init marzen_init(void)
gpio_request(GPIO_FN_HSPI_TX0, NULL);
gpio_request(GPIO_FN_HSPI_RX0, NULL);
+ /* USB (CN21) */
+ gpio_request(GPIO_FN_USB_OVC0, NULL);
+ gpio_request(GPIO_FN_USB_OVC1, NULL);
+ gpio_request(GPIO_FN_USB_OVC2, NULL);
+
+ /* USB (CN22) */
+ gpio_request(GPIO_FN_USB_PENC2, NULL);
+
r8a7779_add_standard_devices();
platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
}
@@ -200,6 +384,6 @@ MACHINE_START(MARZEN, "marzen")
.init_irq = r8a7779_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = marzen_init,
- .init_late = shmobile_init_late,
+ .init_late = marzen_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 4d57e342537..3ca6757b129 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -295,10 +295,10 @@ struct clk sh7372_pllc2_clk = {
};
/* External input clock (pin name: FSIACK/FSIBCK ) */
-struct clk sh7372_fsiack_clk = {
+static struct clk fsiack_clk = {
};
-struct clk sh7372_fsibck_clk = {
+static struct clk fsibck_clk = {
};
static struct clk *main_clks[] = {
@@ -314,8 +314,8 @@ static struct clk *main_clks[] = {
&pllc1_clk,
&pllc1_div2_clk,
&sh7372_pllc2_clk,
- &sh7372_fsiack_clk,
- &sh7372_fsibck_clk,
+ &fsiack_clk,
+ &fsibck_clk,
};
static void div4_kick(struct clk *clk)
@@ -399,14 +399,14 @@ static struct clk *hdmi_parent[] = {
static struct clk *fsiackcr_parent[] = {
[0] = &pllc1_div2_clk,
[1] = &sh7372_pllc2_clk,
- [2] = &sh7372_fsiack_clk, /* external input for FSI A */
+ [2] = &fsiack_clk, /* external input for FSI A */
[3] = NULL, /* setting prohibited */
};
static struct clk *fsibckcr_parent[] = {
[0] = &pllc1_div2_clk,
[1] = &sh7372_pllc2_clk,
- [2] = &sh7372_fsibck_clk, /* external input for FSI B */
+ [2] = &fsibck_clk, /* external input for FSI B */
[3] = NULL, /* setting prohibited */
};
@@ -507,8 +507,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("pllc1_clk", &pllc1_clk),
CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk),
CLKDEV_CON_ID("pllc2_clk", &sh7372_pllc2_clk),
- CLKDEV_CON_ID("fsidiva", &fsidivs[FSIDIV_A]),
- CLKDEV_CON_ID("fsidivb", &fsidivs[FSIDIV_B]),
+ CLKDEV_CON_ID("fsiack", &fsiack_clk),
+ CLKDEV_CON_ID("fsibck", &fsibck_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]),
@@ -606,8 +606,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]),
CLKDEV_ICK_ID("diva", "sh_fsi2", &fsidivs[FSIDIV_A]),
CLKDEV_ICK_ID("divb", "sh_fsi2", &fsidivs[FSIDIV_B]),
- CLKDEV_ICK_ID("xcka", "sh_fsi2", &sh7372_fsiack_clk),
- CLKDEV_ICK_ID("xckb", "sh_fsi2", &sh7372_fsibck_clk),
+ CLKDEV_ICK_ID("xcka", "sh_fsi2", &fsiack_clk),
+ CLKDEV_ICK_ID("xckb", "sh_fsi2", &fsibck_clk),
};
void __init sh7372_clock_init(void)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 26cd1016fad..b582facc1cf 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -477,8 +477,6 @@ extern struct clk sh7372_extal2_clk;
extern struct clk sh7372_dv_clki_clk;
extern struct clk sh7372_dv_clki_div2_clk;
extern struct clk sh7372_pllc2_clk;
-extern struct clk sh7372_fsiack_clk;
-extern struct clk sh7372_fsibck_clk;
extern void sh7372_intcs_suspend(void);
extern void sh7372_intcs_resume(void);
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 535426c306b..f6745628628 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -32,8 +32,24 @@
#define EMEV2_SCU_BASE 0x1e000000
+static DEFINE_SPINLOCK(scu_lock);
static void __iomem *scu_base;
+static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
+{
+ unsigned long tmp;
+
+ /* we assume this code is running on a different cpu
+ * than the one that is changing coherency setting */
+ spin_lock(&scu_lock);
+ tmp = readl(scu_base + 8);
+ tmp &= ~clr;
+ tmp |= set;
+ writel(tmp, scu_base + 8);
+ spin_unlock(&scu_lock);
+
+}
+
static unsigned int __init emev2_get_core_count(void)
{
if (!scu_base) {
@@ -79,7 +95,7 @@ static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *
cpu = cpu_logical_map(cpu);
/* enable cache coherency */
- scu_power_mode(scu_base, 0);
+ modify_scu_cpu_psr(0, 3 << (cpu * 8));
/* Tell ROM loader about our vector (in headsmp.S) */
emev2_set_boot_vector(__pa(shmobile_secondary_vector));
@@ -90,10 +106,12 @@ static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *
static void __init emev2_smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpu = cpu_logical_map(0);
+
scu_enable(scu_base);
/* enable cache coherency on CPU0 */
- scu_power_mode(scu_base, 0);
+ modify_scu_cpu_psr(0, 3 << (cpu * 8));
}
static void __init emev2_smp_init_cpus(void)
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 9def0f22bf2..2ce6af9a6a3 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -61,6 +61,9 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)0xf0000000;
}
+static DEFINE_SPINLOCK(scu_lock);
+static unsigned long tmp;
+
#ifdef CONFIG_HAVE_ARM_TWD
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
@@ -70,6 +73,20 @@ void __init r8a7779_register_twd(void)
}
#endif
+static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
+{
+ void __iomem *scu_base = scu_base_addr();
+
+ spin_lock(&scu_lock);
+ tmp = __raw_readl(scu_base + 8);
+ tmp &= ~clr;
+ tmp |= set;
+ spin_unlock(&scu_lock);
+
+ /* disable cache coherency after releasing the lock */
+ __raw_writel(tmp, scu_base + 8);
+}
+
static unsigned int __init r8a7779_get_core_count(void)
{
void __iomem *scu_base = scu_base_addr();
@@ -85,7 +102,7 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
cpu = cpu_logical_map(cpu);
/* disable cache coherency */
- scu_power_mode(scu_base_addr(), 3);
+ modify_scu_cpu_psr(3 << (cpu * 8), 0);
if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
ch = r8a7779_ch_cpu[cpu];
@@ -128,7 +145,7 @@ static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct
cpu = cpu_logical_map(cpu);
/* enable cache coherency */
- scu_power_mode(scu_base_addr(), 0);
+ modify_scu_cpu_psr(0, 3 << (cpu * 8));
if (cpu < ARRAY_SIZE(r8a7779_ch_cpu))
ch = r8a7779_ch_cpu[cpu];
@@ -141,13 +158,15 @@ static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct
static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpu = cpu_logical_map(0);
+
scu_enable(scu_base_addr());
/* Map the reset vector (in headsmp.S) */
__raw_writel(__pa(shmobile_secondary_vector), AVECR);
/* enable cache coherency on CPU0 */
- scu_power_mode(scu_base_addr(), 0);
+ modify_scu_cpu_psr(0, 3 << (cpu * 8));
r8a7779_pm_init();
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index 96ddb97babb..624f00f70ab 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -41,6 +41,9 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)0xf0000000;
}
+static DEFINE_SPINLOCK(scu_lock);
+static unsigned long tmp;
+
#ifdef CONFIG_HAVE_ARM_TWD
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
void __init sh73a0_register_twd(void)
@@ -49,6 +52,20 @@ void __init sh73a0_register_twd(void)
}
#endif
+static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
+{
+ void __iomem *scu_base = scu_base_addr();
+
+ spin_lock(&scu_lock);
+ tmp = __raw_readl(scu_base + 8);
+ tmp &= ~clr;
+ tmp |= set;
+ spin_unlock(&scu_lock);
+
+ /* disable cache coherency after releasing the lock */
+ __raw_writel(tmp, scu_base + 8);
+}
+
static unsigned int __init sh73a0_get_core_count(void)
{
void __iomem *scu_base = scu_base_addr();
@@ -66,7 +83,7 @@ static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct
cpu = cpu_logical_map(cpu);
/* enable cache coherency */
- scu_power_mode(scu_base_addr(), 0);
+ modify_scu_cpu_psr(0, 3 << (cpu * 8));
if (((__raw_readl(PSTR) >> (4 * cpu)) & 3) == 3)
__raw_writel(1 << cpu, WUPCR); /* wake up */
@@ -78,6 +95,8 @@ static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct
static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpu = cpu_logical_map(0);
+
scu_enable(scu_base_addr());
/* Map the reset vector (in headsmp.S) */
@@ -85,7 +104,7 @@ static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus)
__raw_writel(__pa(shmobile_secondary_vector), SBAR);
/* enable cache coherency on CPU0 */
- scu_power_mode(scu_base_addr(), 0);
+ modify_scu_cpu_psr(0, 3 << (cpu * 8));
}
static void __init sh73a0_smp_init_cpus(void)
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index 803a3281feb..566e804d403 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -12,5 +12,6 @@ config ARCH_SOCFPGA
select GENERIC_CLOCKEVENTS
select GPIO_PL061 if GPIOLIB
select HAVE_ARM_SCU
+ select HAVE_SMP
select SPARSE_IRQ
select USE_OF
diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile
index 4fb93240971..6dd7a93a90f 100644
--- a/arch/arm/mach-socfpga/Makefile
+++ b/arch/arm/mach-socfpga/Makefile
@@ -3,3 +3,4 @@
#
obj-y := socfpga.o
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
new file mode 100644
index 00000000000..9941caa9493
--- /dev/null
+++ b/arch/arm/mach-socfpga/core.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 Pavel Machek <pavel@denx.de>
+ * Copyright (C) 2012 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __MACH_CORE_H
+#define __MACH_CORE_H
+
+extern void secondary_startup(void);
+extern void __iomem *socfpga_scu_base_addr;
+
+extern void socfpga_init_clocks(void);
+extern void socfpga_sysmgr_init(void);
+
+extern struct smp_operations socfpga_smp_ops;
+extern char secondary_trampoline, secondary_trampoline_end;
+
+#define SOCFPGA_SCU_VIRT_BASE 0xfffec000
+
+#endif
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
new file mode 100644
index 00000000000..f09b1283ffc
--- /dev/null
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2003 ARM Limited
+ * Copyright (c) u-boot contributors
+ * Copyright (c) 2012 Pavel Machek <pavel@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __CPUINIT
+ .arch armv7-a
+
+#define CPU1_START_ADDR 0xffd08010
+
+ENTRY(secondary_trampoline)
+ movw r0, #:lower16:CPU1_START_ADDR
+ movt r0, #:upper16:CPU1_START_ADDR
+
+ ldr r1, [r0]
+ bx r1
+
+ENTRY(secondary_trampoline_end)
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
new file mode 100644
index 00000000000..68dd1b69512
--- /dev/null
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ * Copyright 2012 Pavel Machek <pavel@denx.de>
+ * Based on platsmp.c, Copyright (C) 2002 ARM Ltd.
+ * Copyright (C) 2012 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/gic.h>
+#include <asm/smp_scu.h>
+#include <asm/smp_plat.h>
+
+#include "core.h"
+
+extern void __iomem *sys_manager_base_addr;
+extern void __iomem *rst_manager_base_addr;
+
+static void __cpuinit socfpga_secondary_init(unsigned int cpu)
+{
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_secondary_init(0);
+}
+
+static int __cpuinit socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
+
+ memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+
+ __raw_writel(virt_to_phys(secondary_startup), (sys_manager_base_addr+0x10));
+
+ flush_cache_all();
+ smp_wmb();
+ outer_clean_range(0, trampoline_size);
+
+ /* This will release CPU #1 out of reset.*/
+ __raw_writel(0, rst_manager_base_addr + 0x10);
+
+ return 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+static void __init socfpga_smp_init_cpus(void)
+{
+ unsigned int i, ncores;
+
+ ncores = scu_get_core_count(socfpga_scu_base_addr);
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+
+ /* sanity check */
+ if (ncores > num_possible_cpus()) {
+ pr_warn("socfpga: no. of cores (%d) greater than configured"
+ "maximum of %d - clipping\n", ncores, num_possible_cpus());
+ ncores = num_possible_cpus();
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+
+ set_smp_cross_call(gic_raise_softirq);
+}
+
+static void __init socfpga_smp_prepare_cpus(unsigned int max_cpus)
+{
+ scu_enable(socfpga_scu_base_addr);
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+static void socfpga_cpu_die(unsigned int cpu)
+{
+ cpu_do_idle();
+
+ /* We should have never returned from idle */
+ panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+}
+
+struct smp_operations socfpga_smp_ops __initdata = {
+ .smp_init_cpus = socfpga_smp_init_cpus,
+ .smp_prepare_cpus = socfpga_smp_prepare_cpus,
+ .smp_secondary_init = socfpga_secondary_init,
+ .smp_boot_secondary = socfpga_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = socfpga_cpu_die,
+#endif
+};
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index f01e1ebf539..6732924a5fe 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -15,23 +15,73 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/dw_apb_timer.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
-extern void socfpga_init_clocks(void);
+#include "core.h"
+
+void __iomem *socfpga_scu_base_addr = ((void __iomem *)(SOCFPGA_SCU_VIRT_BASE));
+void __iomem *sys_manager_base_addr;
+void __iomem *rst_manager_base_addr;
+
+static struct map_desc scu_io_desc __initdata = {
+ .virtual = SOCFPGA_SCU_VIRT_BASE,
+ .pfn = 0, /* run-time */
+ .length = SZ_8K,
+ .type = MT_DEVICE,
+};
+
+static struct map_desc uart_io_desc __initdata = {
+ .virtual = 0xfec02000,
+ .pfn = __phys_to_pfn(0xffc02000),
+ .length = SZ_8K,
+ .type = MT_DEVICE,
+};
+
+static void __init socfpga_scu_map_io(void)
+{
+ unsigned long base;
+
+ /* Get SCU base */
+ asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (base));
+
+ scu_io_desc.pfn = __phys_to_pfn(base);
+ iotable_init(&scu_io_desc, 1);
+}
+
+static void __init socfpga_map_io(void)
+{
+ socfpga_scu_map_io();
+ iotable_init(&uart_io_desc, 1);
+ early_printk("Early printk initialized\n");
+}
const static struct of_device_id irq_match[] = {
{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
{}
};
+void __init socfpga_sysmgr_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "altr,sys-mgr");
+ sys_manager_base_addr = of_iomap(np, 0);
+
+ np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr");
+ rst_manager_base_addr = of_iomap(np, 0);
+}
+
static void __init gic_init_irq(void)
{
of_irq_init(irq_match);
+ socfpga_sysmgr_init();
}
static void socfpga_cyclone5_restart(char mode, const char *cmd)
@@ -53,6 +103,8 @@ static const char *altera_dt_match[] = {
};
DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA")
+ .smp = smp_ops(socfpga_smp_ops),
+ .map_io = socfpga_map_io,
.init_irq = gic_init_irq,
.handle_irq = gic_handle_irq,
.timer = &dw_apb_timer,
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
index 07d90acc92c..7cfa6818865 100644
--- a/arch/arm/mach-spear13xx/include/mach/spear.h
+++ b/arch/arm/mach-spear13xx/include/mach/spear.h
@@ -47,14 +47,6 @@
#define DMAC1_BASE UL(0xEB000000)
#define MCIF_CF_BASE UL(0xB2800000)
-/* Devices present in SPEAr1310 */
-#ifdef CONFIG_MACH_SPEAR1310
-#define SPEAR1310_RAS_GRP1_BASE UL(0xD8000000)
-#define VA_SPEAR1310_RAS_GRP1_BASE UL(0xFA000000)
-#define SPEAR1310_RAS_BASE UL(0xD8400000)
-#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
-#endif /* CONFIG_MACH_SPEAR1310 */
-
/* Debug uart for linux, will be used for debug and uncompress messages */
#define SPEAR_DBG_UART_BASE UART_BASE
#define VA_SPEAR_DBG_UART_BASE VA_UART_BASE
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
index 9fbbfc5650a..02f4724bb0d 100644
--- a/arch/arm/mach-spear13xx/spear1310.c
+++ b/arch/arm/mach-spear13xx/spear1310.c
@@ -15,6 +15,7 @@
#include <linux/amba/pl022.h>
#include <linux/of_platform.h>
+#include <linux/pata_arasan_cf_data.h>
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -27,16 +28,25 @@
#define SPEAR1310_SATA1_BASE UL(0xB1800000)
#define SPEAR1310_SATA2_BASE UL(0xB4000000)
+#define SPEAR1310_RAS_GRP1_BASE UL(0xD8000000)
+#define VA_SPEAR1310_RAS_GRP1_BASE UL(0xFA000000)
+#define SPEAR1310_RAS_BASE UL(0xD8400000)
+#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
+
+static struct arasan_cf_pdata cf_pdata = {
+ .cf_if_clk = CF_IF_CLK_166M,
+ .quirk = CF_BROKEN_UDMA,
+ .dma_priv = &cf_dma_priv,
+};
+
/* ssp device registration */
static struct pl022_ssp_controller ssp1_plat_data = {
- .bus_id = 0,
.enable_dma = 0,
- .num_chipselect = 3,
};
/* Add SPEAr1310 auxdata to pass platform data */
static struct of_dev_auxdata spear1310_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+ OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_pdata),
OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
index 5633d698f1e..c4af775a845 100644
--- a/arch/arm/mach-spear13xx/spear13xx.c
+++ b/arch/arm/mach-spear13xx/spear13xx.c
@@ -57,12 +57,10 @@ static struct dw_dma_slave ssp_dma_param[] = {
};
struct pl022_ssp_controller pl022_plat_data = {
- .bus_id = 0,
.enable_dma = 1,
.dma_filter = dw_dma_filter,
.dma_rx_param = &ssp_dma_param[1],
.dma_tx_param = &ssp_dma_param[0],
- .num_chipselect = 3,
};
/* CF device registration */
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
index 803de76f5f3..f95e5b2b668 100644
--- a/arch/arm/mach-spear3xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -14,14 +14,6 @@
#ifndef __MACH_IRQS_H
#define __MACH_IRQS_H
-/* FIXME: probe all these from DT */
-#define SPEAR3XX_IRQ_INTRCOMM_RAS_ARM 1
-#define SPEAR3XX_IRQ_GEN_RAS_1 28
-#define SPEAR3XX_IRQ_GEN_RAS_2 29
-#define SPEAR3XX_IRQ_GEN_RAS_3 30
-#define SPEAR3XX_IRQ_VIC_END 32
-#define SPEAR3XX_VIRQ_START SPEAR3XX_IRQ_VIC_END
-
-#define NR_IRQS 160
+#define NR_IRQS 256
#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 6ec30054996..a69cbfdb07e 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -17,102 +17,9 @@
#include <linux/of_platform.h>
#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
-#include <plat/shirq.h>
#include <mach/generic.h>
#include <mach/spear.h>
-/* Base address of various IPs */
-#define SPEAR300_TELECOM_BASE UL(0x50000000)
-
-/* Interrupt registers offsets and masks */
-#define SPEAR300_INT_ENB_MASK_REG 0x54
-#define SPEAR300_INT_STS_MASK_REG 0x58
-#define SPEAR300_IT_PERS_S_IRQ_MASK (1 << 0)
-#define SPEAR300_IT_CHANGE_S_IRQ_MASK (1 << 1)
-#define SPEAR300_I2S_IRQ_MASK (1 << 2)
-#define SPEAR300_TDM_IRQ_MASK (1 << 3)
-#define SPEAR300_CAMERA_L_IRQ_MASK (1 << 4)
-#define SPEAR300_CAMERA_F_IRQ_MASK (1 << 5)
-#define SPEAR300_CAMERA_V_IRQ_MASK (1 << 6)
-#define SPEAR300_KEYBOARD_IRQ_MASK (1 << 7)
-#define SPEAR300_GPIO1_IRQ_MASK (1 << 8)
-
-#define SPEAR300_SHIRQ_RAS1_MASK 0x1FF
-
-#define SPEAR300_SOC_CONFIG_BASE UL(0x99000000)
-
-
-/* SPEAr300 Virtual irq definitions */
-/* IRQs sharing IRQ_GEN_RAS_1 */
-#define SPEAR300_VIRQ_IT_PERS_S (SPEAR3XX_VIRQ_START + 0)
-#define SPEAR300_VIRQ_IT_CHANGE_S (SPEAR3XX_VIRQ_START + 1)
-#define SPEAR300_VIRQ_I2S (SPEAR3XX_VIRQ_START + 2)
-#define SPEAR300_VIRQ_TDM (SPEAR3XX_VIRQ_START + 3)
-#define SPEAR300_VIRQ_CAMERA_L (SPEAR3XX_VIRQ_START + 4)
-#define SPEAR300_VIRQ_CAMERA_F (SPEAR3XX_VIRQ_START + 5)
-#define SPEAR300_VIRQ_CAMERA_V (SPEAR3XX_VIRQ_START + 6)
-#define SPEAR300_VIRQ_KEYBOARD (SPEAR3XX_VIRQ_START + 7)
-#define SPEAR300_VIRQ_GPIO1 (SPEAR3XX_VIRQ_START + 8)
-
-/* IRQs sharing IRQ_GEN_RAS_3 */
-#define SPEAR300_IRQ_CLCD SPEAR3XX_IRQ_GEN_RAS_3
-
-/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
-#define SPEAR300_IRQ_SDHCI SPEAR3XX_IRQ_INTRCOMM_RAS_ARM
-
-/* spear3xx shared irq */
-static struct shirq_dev_config shirq_ras1_config[] = {
- {
- .virq = SPEAR300_VIRQ_IT_PERS_S,
- .enb_mask = SPEAR300_IT_PERS_S_IRQ_MASK,
- .status_mask = SPEAR300_IT_PERS_S_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_IT_CHANGE_S,
- .enb_mask = SPEAR300_IT_CHANGE_S_IRQ_MASK,
- .status_mask = SPEAR300_IT_CHANGE_S_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_I2S,
- .enb_mask = SPEAR300_I2S_IRQ_MASK,
- .status_mask = SPEAR300_I2S_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_TDM,
- .enb_mask = SPEAR300_TDM_IRQ_MASK,
- .status_mask = SPEAR300_TDM_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_CAMERA_L,
- .enb_mask = SPEAR300_CAMERA_L_IRQ_MASK,
- .status_mask = SPEAR300_CAMERA_L_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_CAMERA_F,
- .enb_mask = SPEAR300_CAMERA_F_IRQ_MASK,
- .status_mask = SPEAR300_CAMERA_F_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_CAMERA_V,
- .enb_mask = SPEAR300_CAMERA_V_IRQ_MASK,
- .status_mask = SPEAR300_CAMERA_V_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_KEYBOARD,
- .enb_mask = SPEAR300_KEYBOARD_IRQ_MASK,
- .status_mask = SPEAR300_KEYBOARD_IRQ_MASK,
- }, {
- .virq = SPEAR300_VIRQ_GPIO1,
- .enb_mask = SPEAR300_GPIO1_IRQ_MASK,
- .status_mask = SPEAR300_GPIO1_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_ras1 = {
- .irq = SPEAR3XX_IRQ_GEN_RAS_1,
- .dev_config = shirq_ras1_config,
- .dev_count = ARRAY_SIZE(shirq_ras1_config),
- .regs = {
- .enb_reg = SPEAR300_INT_ENB_MASK_REG,
- .status_reg = SPEAR300_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR300_SHIRQ_RAS1_MASK,
- .clear_reg = -1,
- },
-};
-
/* DMAC platform data's slave info */
struct pl08x_channel_data spear300_dma_info[] = {
{
@@ -285,21 +192,11 @@ static struct of_dev_auxdata spear300_auxdata_lookup[] __initdata = {
static void __init spear300_dt_init(void)
{
- int ret;
-
pl080_plat_data.slave_channels = spear300_dma_info;
pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear300_dma_info);
of_platform_populate(NULL, of_default_bus_match_table,
spear300_auxdata_lookup, NULL);
-
- /* shared irq registration */
- shirq_ras1.regs.base = ioremap(SPEAR300_TELECOM_BASE, SZ_4K);
- if (shirq_ras1.regs.base) {
- ret = spear_shirq_register(&shirq_ras1);
- if (ret)
- pr_err("Error registering Shared IRQ\n");
- }
}
static const char * const spear300_dt_board_compat[] = {
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index 1d0e435b904..b963ebb10b5 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -18,7 +18,6 @@
#include <linux/of_platform.h>
#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
-#include <plat/shirq.h>
#include <mach/generic.h>
#include <mach/spear.h>
@@ -27,176 +26,6 @@
#define SPEAR310_UART3_BASE UL(0xB2100000)
#define SPEAR310_UART4_BASE UL(0xB2180000)
#define SPEAR310_UART5_BASE UL(0xB2200000)
-#define SPEAR310_SOC_CONFIG_BASE UL(0xB4000000)
-
-/* Interrupt registers offsets and masks */
-#define SPEAR310_INT_STS_MASK_REG 0x04
-#define SPEAR310_SMII0_IRQ_MASK (1 << 0)
-#define SPEAR310_SMII1_IRQ_MASK (1 << 1)
-#define SPEAR310_SMII2_IRQ_MASK (1 << 2)
-#define SPEAR310_SMII3_IRQ_MASK (1 << 3)
-#define SPEAR310_WAKEUP_SMII0_IRQ_MASK (1 << 4)
-#define SPEAR310_WAKEUP_SMII1_IRQ_MASK (1 << 5)
-#define SPEAR310_WAKEUP_SMII2_IRQ_MASK (1 << 6)
-#define SPEAR310_WAKEUP_SMII3_IRQ_MASK (1 << 7)
-#define SPEAR310_UART1_IRQ_MASK (1 << 8)
-#define SPEAR310_UART2_IRQ_MASK (1 << 9)
-#define SPEAR310_UART3_IRQ_MASK (1 << 10)
-#define SPEAR310_UART4_IRQ_MASK (1 << 11)
-#define SPEAR310_UART5_IRQ_MASK (1 << 12)
-#define SPEAR310_EMI_IRQ_MASK (1 << 13)
-#define SPEAR310_TDM_HDLC_IRQ_MASK (1 << 14)
-#define SPEAR310_RS485_0_IRQ_MASK (1 << 15)
-#define SPEAR310_RS485_1_IRQ_MASK (1 << 16)
-
-#define SPEAR310_SHIRQ_RAS1_MASK 0x000FF
-#define SPEAR310_SHIRQ_RAS2_MASK 0x01F00
-#define SPEAR310_SHIRQ_RAS3_MASK 0x02000
-#define SPEAR310_SHIRQ_INTRCOMM_RAS_MASK 0x1C000
-
-/* SPEAr310 Virtual irq definitions */
-/* IRQs sharing IRQ_GEN_RAS_1 */
-#define SPEAR310_VIRQ_SMII0 (SPEAR3XX_VIRQ_START + 0)
-#define SPEAR310_VIRQ_SMII1 (SPEAR3XX_VIRQ_START + 1)
-#define SPEAR310_VIRQ_SMII2 (SPEAR3XX_VIRQ_START + 2)
-#define SPEAR310_VIRQ_SMII3 (SPEAR3XX_VIRQ_START + 3)
-#define SPEAR310_VIRQ_WAKEUP_SMII0 (SPEAR3XX_VIRQ_START + 4)
-#define SPEAR310_VIRQ_WAKEUP_SMII1 (SPEAR3XX_VIRQ_START + 5)
-#define SPEAR310_VIRQ_WAKEUP_SMII2 (SPEAR3XX_VIRQ_START + 6)
-#define SPEAR310_VIRQ_WAKEUP_SMII3 (SPEAR3XX_VIRQ_START + 7)
-
-/* IRQs sharing IRQ_GEN_RAS_2 */
-#define SPEAR310_VIRQ_UART1 (SPEAR3XX_VIRQ_START + 8)
-#define SPEAR310_VIRQ_UART2 (SPEAR3XX_VIRQ_START + 9)
-#define SPEAR310_VIRQ_UART3 (SPEAR3XX_VIRQ_START + 10)
-#define SPEAR310_VIRQ_UART4 (SPEAR3XX_VIRQ_START + 11)
-#define SPEAR310_VIRQ_UART5 (SPEAR3XX_VIRQ_START + 12)
-
-/* IRQs sharing IRQ_GEN_RAS_3 */
-#define SPEAR310_VIRQ_EMI (SPEAR3XX_VIRQ_START + 13)
-#define SPEAR310_VIRQ_PLGPIO (SPEAR3XX_VIRQ_START + 14)
-
-/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
-#define SPEAR310_VIRQ_TDM_HDLC (SPEAR3XX_VIRQ_START + 15)
-#define SPEAR310_VIRQ_RS485_0 (SPEAR3XX_VIRQ_START + 16)
-#define SPEAR310_VIRQ_RS485_1 (SPEAR3XX_VIRQ_START + 17)
-
-
-/* spear3xx shared irq */
-static struct shirq_dev_config shirq_ras1_config[] = {
- {
- .virq = SPEAR310_VIRQ_SMII0,
- .status_mask = SPEAR310_SMII0_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_SMII1,
- .status_mask = SPEAR310_SMII1_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_SMII2,
- .status_mask = SPEAR310_SMII2_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_SMII3,
- .status_mask = SPEAR310_SMII3_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_WAKEUP_SMII0,
- .status_mask = SPEAR310_WAKEUP_SMII0_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_WAKEUP_SMII1,
- .status_mask = SPEAR310_WAKEUP_SMII1_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_WAKEUP_SMII2,
- .status_mask = SPEAR310_WAKEUP_SMII2_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_WAKEUP_SMII3,
- .status_mask = SPEAR310_WAKEUP_SMII3_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_ras1 = {
- .irq = SPEAR3XX_IRQ_GEN_RAS_1,
- .dev_config = shirq_ras1_config,
- .dev_count = ARRAY_SIZE(shirq_ras1_config),
- .regs = {
- .enb_reg = -1,
- .status_reg = SPEAR310_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR310_SHIRQ_RAS1_MASK,
- .clear_reg = -1,
- },
-};
-
-static struct shirq_dev_config shirq_ras2_config[] = {
- {
- .virq = SPEAR310_VIRQ_UART1,
- .status_mask = SPEAR310_UART1_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_UART2,
- .status_mask = SPEAR310_UART2_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_UART3,
- .status_mask = SPEAR310_UART3_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_UART4,
- .status_mask = SPEAR310_UART4_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_UART5,
- .status_mask = SPEAR310_UART5_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_ras2 = {
- .irq = SPEAR3XX_IRQ_GEN_RAS_2,
- .dev_config = shirq_ras2_config,
- .dev_count = ARRAY_SIZE(shirq_ras2_config),
- .regs = {
- .enb_reg = -1,
- .status_reg = SPEAR310_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR310_SHIRQ_RAS2_MASK,
- .clear_reg = -1,
- },
-};
-
-static struct shirq_dev_config shirq_ras3_config[] = {
- {
- .virq = SPEAR310_VIRQ_EMI,
- .status_mask = SPEAR310_EMI_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_ras3 = {
- .irq = SPEAR3XX_IRQ_GEN_RAS_3,
- .dev_config = shirq_ras3_config,
- .dev_count = ARRAY_SIZE(shirq_ras3_config),
- .regs = {
- .enb_reg = -1,
- .status_reg = SPEAR310_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR310_SHIRQ_RAS3_MASK,
- .clear_reg = -1,
- },
-};
-
-static struct shirq_dev_config shirq_intrcomm_ras_config[] = {
- {
- .virq = SPEAR310_VIRQ_TDM_HDLC,
- .status_mask = SPEAR310_TDM_HDLC_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_RS485_0,
- .status_mask = SPEAR310_RS485_0_IRQ_MASK,
- }, {
- .virq = SPEAR310_VIRQ_RS485_1,
- .status_mask = SPEAR310_RS485_1_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_intrcomm_ras = {
- .irq = SPEAR3XX_IRQ_INTRCOMM_RAS_ARM,
- .dev_config = shirq_intrcomm_ras_config,
- .dev_count = ARRAY_SIZE(shirq_intrcomm_ras_config),
- .regs = {
- .enb_reg = -1,
- .status_reg = SPEAR310_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR310_SHIRQ_INTRCOMM_RAS_MASK,
- .clear_reg = -1,
- },
-};
/* DMAC platform data's slave info */
struct pl08x_channel_data spear310_dma_info[] = {
@@ -405,42 +234,11 @@ static struct of_dev_auxdata spear310_auxdata_lookup[] __initdata = {
static void __init spear310_dt_init(void)
{
- void __iomem *base;
- int ret;
-
pl080_plat_data.slave_channels = spear310_dma_info;
pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear310_dma_info);
of_platform_populate(NULL, of_default_bus_match_table,
spear310_auxdata_lookup, NULL);
-
- /* shared irq registration */
- base = ioremap(SPEAR310_SOC_CONFIG_BASE, SZ_4K);
- if (base) {
- /* shirq 1 */
- shirq_ras1.regs.base = base;
- ret = spear_shirq_register(&shirq_ras1);
- if (ret)
- pr_err("Error registering Shared IRQ 1\n");
-
- /* shirq 2 */
- shirq_ras2.regs.base = base;
- ret = spear_shirq_register(&shirq_ras2);
- if (ret)
- pr_err("Error registering Shared IRQ 2\n");
-
- /* shirq 3 */
- shirq_ras3.regs.base = base;
- ret = spear_shirq_register(&shirq_ras3);
- if (ret)
- pr_err("Error registering Shared IRQ 3\n");
-
- /* shirq 4 */
- shirq_intrcomm_ras.regs.base = base;
- ret = spear_shirq_register(&shirq_intrcomm_ras);
- if (ret)
- pr_err("Error registering Shared IRQ 4\n");
- }
}
static const char * const spear310_dt_board_compat[] = {
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index fd823c62457..66e3a0c33e7 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -19,7 +19,6 @@
#include <linux/of_platform.h>
#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
-#include <plat/shirq.h>
#include <mach/generic.h>
#include <mach/spear.h>
@@ -28,184 +27,6 @@
#define SPEAR320_SSP0_BASE UL(0xA5000000)
#define SPEAR320_SSP1_BASE UL(0xA6000000)
-/* Interrupt registers offsets and masks */
-#define SPEAR320_INT_STS_MASK_REG 0x04
-#define SPEAR320_INT_CLR_MASK_REG 0x04
-#define SPEAR320_INT_ENB_MASK_REG 0x08
-#define SPEAR320_GPIO_IRQ_MASK (1 << 0)
-#define SPEAR320_I2S_PLAY_IRQ_MASK (1 << 1)
-#define SPEAR320_I2S_REC_IRQ_MASK (1 << 2)
-#define SPEAR320_EMI_IRQ_MASK (1 << 7)
-#define SPEAR320_CLCD_IRQ_MASK (1 << 8)
-#define SPEAR320_SPP_IRQ_MASK (1 << 9)
-#define SPEAR320_SDHCI_IRQ_MASK (1 << 10)
-#define SPEAR320_CAN_U_IRQ_MASK (1 << 11)
-#define SPEAR320_CAN_L_IRQ_MASK (1 << 12)
-#define SPEAR320_UART1_IRQ_MASK (1 << 13)
-#define SPEAR320_UART2_IRQ_MASK (1 << 14)
-#define SPEAR320_SSP1_IRQ_MASK (1 << 15)
-#define SPEAR320_SSP2_IRQ_MASK (1 << 16)
-#define SPEAR320_SMII0_IRQ_MASK (1 << 17)
-#define SPEAR320_MII1_SMII1_IRQ_MASK (1 << 18)
-#define SPEAR320_WAKEUP_SMII0_IRQ_MASK (1 << 19)
-#define SPEAR320_WAKEUP_MII1_SMII1_IRQ_MASK (1 << 20)
-#define SPEAR320_I2C1_IRQ_MASK (1 << 21)
-
-#define SPEAR320_SHIRQ_RAS1_MASK 0x000380
-#define SPEAR320_SHIRQ_RAS3_MASK 0x000007
-#define SPEAR320_SHIRQ_INTRCOMM_RAS_MASK 0x3FF800
-
-/* SPEAr320 Virtual irq definitions */
-/* IRQs sharing IRQ_GEN_RAS_1 */
-#define SPEAR320_VIRQ_EMI (SPEAR3XX_VIRQ_START + 0)
-#define SPEAR320_VIRQ_CLCD (SPEAR3XX_VIRQ_START + 1)
-#define SPEAR320_VIRQ_SPP (SPEAR3XX_VIRQ_START + 2)
-
-/* IRQs sharing IRQ_GEN_RAS_2 */
-#define SPEAR320_IRQ_SDHCI SPEAR3XX_IRQ_GEN_RAS_2
-
-/* IRQs sharing IRQ_GEN_RAS_3 */
-#define SPEAR320_VIRQ_PLGPIO (SPEAR3XX_VIRQ_START + 3)
-#define SPEAR320_VIRQ_I2S_PLAY (SPEAR3XX_VIRQ_START + 4)
-#define SPEAR320_VIRQ_I2S_REC (SPEAR3XX_VIRQ_START + 5)
-
-/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
-#define SPEAR320_VIRQ_CANU (SPEAR3XX_VIRQ_START + 6)
-#define SPEAR320_VIRQ_CANL (SPEAR3XX_VIRQ_START + 7)
-#define SPEAR320_VIRQ_UART1 (SPEAR3XX_VIRQ_START + 8)
-#define SPEAR320_VIRQ_UART2 (SPEAR3XX_VIRQ_START + 9)
-#define SPEAR320_VIRQ_SSP1 (SPEAR3XX_VIRQ_START + 10)
-#define SPEAR320_VIRQ_SSP2 (SPEAR3XX_VIRQ_START + 11)
-#define SPEAR320_VIRQ_SMII0 (SPEAR3XX_VIRQ_START + 12)
-#define SPEAR320_VIRQ_MII1_SMII1 (SPEAR3XX_VIRQ_START + 13)
-#define SPEAR320_VIRQ_WAKEUP_SMII0 (SPEAR3XX_VIRQ_START + 14)
-#define SPEAR320_VIRQ_WAKEUP_MII1_SMII1 (SPEAR3XX_VIRQ_START + 15)
-#define SPEAR320_VIRQ_I2C1 (SPEAR3XX_VIRQ_START + 16)
-
-/* spear3xx shared irq */
-static struct shirq_dev_config shirq_ras1_config[] = {
- {
- .virq = SPEAR320_VIRQ_EMI,
- .status_mask = SPEAR320_EMI_IRQ_MASK,
- .clear_mask = SPEAR320_EMI_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_CLCD,
- .status_mask = SPEAR320_CLCD_IRQ_MASK,
- .clear_mask = SPEAR320_CLCD_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_SPP,
- .status_mask = SPEAR320_SPP_IRQ_MASK,
- .clear_mask = SPEAR320_SPP_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_ras1 = {
- .irq = SPEAR3XX_IRQ_GEN_RAS_1,
- .dev_config = shirq_ras1_config,
- .dev_count = ARRAY_SIZE(shirq_ras1_config),
- .regs = {
- .enb_reg = -1,
- .status_reg = SPEAR320_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR320_SHIRQ_RAS1_MASK,
- .clear_reg = SPEAR320_INT_CLR_MASK_REG,
- .reset_to_clear = 1,
- },
-};
-
-static struct shirq_dev_config shirq_ras3_config[] = {
- {
- .virq = SPEAR320_VIRQ_PLGPIO,
- .enb_mask = SPEAR320_GPIO_IRQ_MASK,
- .status_mask = SPEAR320_GPIO_IRQ_MASK,
- .clear_mask = SPEAR320_GPIO_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_I2S_PLAY,
- .enb_mask = SPEAR320_I2S_PLAY_IRQ_MASK,
- .status_mask = SPEAR320_I2S_PLAY_IRQ_MASK,
- .clear_mask = SPEAR320_I2S_PLAY_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_I2S_REC,
- .enb_mask = SPEAR320_I2S_REC_IRQ_MASK,
- .status_mask = SPEAR320_I2S_REC_IRQ_MASK,
- .clear_mask = SPEAR320_I2S_REC_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_ras3 = {
- .irq = SPEAR3XX_IRQ_GEN_RAS_3,
- .dev_config = shirq_ras3_config,
- .dev_count = ARRAY_SIZE(shirq_ras3_config),
- .regs = {
- .enb_reg = SPEAR320_INT_ENB_MASK_REG,
- .reset_to_enb = 1,
- .status_reg = SPEAR320_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR320_SHIRQ_RAS3_MASK,
- .clear_reg = SPEAR320_INT_CLR_MASK_REG,
- .reset_to_clear = 1,
- },
-};
-
-static struct shirq_dev_config shirq_intrcomm_ras_config[] = {
- {
- .virq = SPEAR320_VIRQ_CANU,
- .status_mask = SPEAR320_CAN_U_IRQ_MASK,
- .clear_mask = SPEAR320_CAN_U_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_CANL,
- .status_mask = SPEAR320_CAN_L_IRQ_MASK,
- .clear_mask = SPEAR320_CAN_L_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_UART1,
- .status_mask = SPEAR320_UART1_IRQ_MASK,
- .clear_mask = SPEAR320_UART1_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_UART2,
- .status_mask = SPEAR320_UART2_IRQ_MASK,
- .clear_mask = SPEAR320_UART2_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_SSP1,
- .status_mask = SPEAR320_SSP1_IRQ_MASK,
- .clear_mask = SPEAR320_SSP1_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_SSP2,
- .status_mask = SPEAR320_SSP2_IRQ_MASK,
- .clear_mask = SPEAR320_SSP2_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_SMII0,
- .status_mask = SPEAR320_SMII0_IRQ_MASK,
- .clear_mask = SPEAR320_SMII0_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_MII1_SMII1,
- .status_mask = SPEAR320_MII1_SMII1_IRQ_MASK,
- .clear_mask = SPEAR320_MII1_SMII1_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_WAKEUP_SMII0,
- .status_mask = SPEAR320_WAKEUP_SMII0_IRQ_MASK,
- .clear_mask = SPEAR320_WAKEUP_SMII0_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_WAKEUP_MII1_SMII1,
- .status_mask = SPEAR320_WAKEUP_MII1_SMII1_IRQ_MASK,
- .clear_mask = SPEAR320_WAKEUP_MII1_SMII1_IRQ_MASK,
- }, {
- .virq = SPEAR320_VIRQ_I2C1,
- .status_mask = SPEAR320_I2C1_IRQ_MASK,
- .clear_mask = SPEAR320_I2C1_IRQ_MASK,
- },
-};
-
-static struct spear_shirq shirq_intrcomm_ras = {
- .irq = SPEAR3XX_IRQ_INTRCOMM_RAS_ARM,
- .dev_config = shirq_intrcomm_ras_config,
- .dev_count = ARRAY_SIZE(shirq_intrcomm_ras_config),
- .regs = {
- .enb_reg = -1,
- .status_reg = SPEAR320_INT_STS_MASK_REG,
- .status_reg_mask = SPEAR320_SHIRQ_INTRCOMM_RAS_MASK,
- .clear_reg = SPEAR320_INT_CLR_MASK_REG,
- .reset_to_clear = 1,
- },
-};
-
/* DMAC platform data's slave info */
struct pl08x_channel_data spear320_dma_info[] = {
{
@@ -416,41 +237,17 @@ static struct of_dev_auxdata spear320_auxdata_lookup[] __initdata = {
static void __init spear320_dt_init(void)
{
- void __iomem *base;
- int ret;
-
pl080_plat_data.slave_channels = spear320_dma_info;
pl080_plat_data.num_slave_channels = ARRAY_SIZE(spear320_dma_info);
of_platform_populate(NULL, of_default_bus_match_table,
spear320_auxdata_lookup, NULL);
-
- /* shared irq registration */
- base = ioremap(SPEAR320_SOC_CONFIG_BASE, SZ_4K);
- if (base) {
- /* shirq 1 */
- shirq_ras1.regs.base = base;
- ret = spear_shirq_register(&shirq_ras1);
- if (ret)
- pr_err("Error registering Shared IRQ 1\n");
-
- /* shirq 3 */
- shirq_ras3.regs.base = base;
- ret = spear_shirq_register(&shirq_ras3);
- if (ret)
- pr_err("Error registering Shared IRQ 3\n");
-
- /* shirq 4 */
- shirq_intrcomm_ras.regs.base = base;
- ret = spear_shirq_register(&shirq_intrcomm_ras);
- if (ret)
- pr_err("Error registering Shared IRQ 4\n");
- }
}
static const char * const spear320_dt_board_compat[] = {
"st,spear320",
"st,spear320-evb",
+ "st,spear320-hmi",
NULL,
};
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 98144baf888..38fe95db31a 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -15,6 +15,7 @@
#include <linux/amba/pl022.h>
#include <linux/amba/pl08x.h>
+#include <linux/irqchip/spear-shirq.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <asm/hardware/pl080.h>
@@ -121,6 +122,9 @@ struct sys_timer spear3xx_timer = {
static const struct of_device_id vic_of_match[] __initconst = {
{ .compatible = "arm,pl190-vic", .data = vic_of_init, },
+ { .compatible = "st,spear300-shirq", .data = spear300_shirq_of_init, },
+ { .compatible = "st,spear310-shirq", .data = spear310_shirq_of_init, },
+ { .compatible = "st,spear320-shirq", .data = spear320_shirq_of_init, },
{ /* Sentinel */ }
};
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 9be910f7920..1dc8a92e5a5 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -80,8 +80,8 @@ static void __init sunxi_dt_init(void)
}
static const char * const sunxi_board_dt_compat[] = {
- "allwinner,sun4i",
- "allwinner,sun5i",
+ "allwinner,sun4i-a10",
+ "allwinner,sun5i-a13",
NULL,
};
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 9ff6f6ea361..b442f15fd01 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -55,58 +55,7 @@ config TEGRA_AHB
help
Adds AHB configuration functionality for NVIDIA Tegra SoCs,
which controls AHB bus master arbitration and some
- perfomance parameters(priority, prefech size).
-
-choice
- prompt "Default low-level debug console UART"
- default TEGRA_DEBUG_UART_NONE
-
-config TEGRA_DEBUG_UART_NONE
- bool "None"
-
-config TEGRA_DEBUG_UARTA
- bool "UART-A"
-
-config TEGRA_DEBUG_UARTB
- bool "UART-B"
-
-config TEGRA_DEBUG_UARTC
- bool "UART-C"
-
-config TEGRA_DEBUG_UARTD
- bool "UART-D"
-
-config TEGRA_DEBUG_UARTE
- bool "UART-E"
-
-endchoice
-
-choice
- prompt "Automatic low-level debug console UART"
- default TEGRA_DEBUG_UART_AUTO_NONE
-
-config TEGRA_DEBUG_UART_AUTO_NONE
- bool "None"
-
-config TEGRA_DEBUG_UART_AUTO_ODMDATA
- bool "Via ODMDATA"
- help
- Automatically determines which UART to use for low-level debug based
- on the ODMDATA value. This value is part of the BCT, and is written
- to the boot memory device using nvflash, or other flashing tool.
- When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
- 0/1/2/3/4 are UART A/B/C/D/E.
-
-config TEGRA_DEBUG_UART_AUTO_SCRATCH
- bool "Via UART scratch register"
- help
- Automatically determines which UART to use for low-level debug based
- on the UART scratch register value. Some bootloaders put ASCII 'D'
- in this register when they initialize their own console UART output.
- Using this option allows the kernel to automatically pick the same
- UART.
-
-endchoice
+ performance parameters(priority, prefech size).
config TEGRA_EMC_SCALING_ENABLE
bool "Enable scaling the memory frequency"
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 11a74db51e5..d54cfc54b9f 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -45,14 +45,15 @@
* kernel is loaded. The data is declared here rather than debug-macro.S so
* that multiple inclusions of debug-macro.S point at the same data.
*/
-#define TEGRA_DEBUG_UART_OFFSET (TEGRA_DEBUG_UART_BASE & 0xFFFF)
-u32 tegra_uart_config[3] = {
+u32 tegra_uart_config[4] = {
/* Debug UART initialization required */
1,
/* Debug UART physical address */
- (u32)(IO_APB_PHYS + TEGRA_DEBUG_UART_OFFSET),
+ 0,
/* Debug UART virtual address */
- (u32)(IO_APB_VIRT + TEGRA_DEBUG_UART_OFFSET),
+ 0,
+ /* Scratch space for debug macro */
+ 0,
};
#ifdef CONFIG_OF
@@ -103,7 +104,7 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
/* name parent rate enabled */
{ "clk_m", NULL, 0, true },
- { "pll_p", "clk_m", 408000000, true },
+ { "pll_p", "pll_ref", 408000000, true },
{ "pll_p_out1", "pll_p", 9600000, true },
{ "pll_p_out4", "pll_p", 102000000, true },
{ "sclk", "pll_p_out4", 102000000, true },
diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S
deleted file mode 100644
index 44ca7b1d8b8..00000000000
--- a/arch/arm/mach-tegra/include/mach/debug-macro.S
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * arch/arm/mach-tegra/include/mach/debug-macro.S
- *
- * Copyright (C) 2010,2011 Google, Inc.
- * Copyright (C) 2011-2012 NVIDIA CORPORATION. All Rights Reserved.
- *
- * Author:
- * Colin Cross <ccross@google.com>
- * Erik Gilling <konkers@google.com>
- * Doug Anderson <dianders@chromium.org>
- * Stephen Warren <swarren@nvidia.com>
- *
- * Portions based on mach-omap2's debug-macro.S
- * Copyright (C) 1994-1999 Russell King
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/serial_reg.h>
-
-#include "../../iomap.h"
-#include "../../irammap.h"
-
- .macro addruart, rp, rv, tmp
- adr \rp, 99f @ actual addr of 99f
- ldr \rv, [\rp] @ linked addr is stored there
- sub \rv, \rv, \rp @ offset between the two
- ldr \rp, [\rp, #4] @ linked tegra_uart_config
- sub \tmp, \rp, \rv @ actual tegra_uart_config
- ldr \rp, [\tmp] @ Load tegra_uart_config
- cmp \rp, #1 @ needs intitialization?
- bne 100f @ no; go load the addresses
- mov \rv, #0 @ yes; record init is done
- str \rv, [\tmp]
- mov \rp, #TEGRA_IRAM_BASE @ See if cookie is in IRAM
- ldr \rv, [\rp, #TEGRA_IRAM_DEBUG_UART_OFFSET]
- movw \rp, #TEGRA_IRAM_DEBUG_UART_COOKIE & 0xffff
- movt \rp, #TEGRA_IRAM_DEBUG_UART_COOKIE >> 16
- cmp \rv, \rp @ Cookie present?
- bne 100f @ No, use default UART
- mov \rp, #TEGRA_IRAM_BASE @ Load UART address from IRAM
- ldr \rv, [\rp, #TEGRA_IRAM_DEBUG_UART_OFFSET + 4]
- str \rv, [\tmp, #4] @ Store in tegra_uart_phys
- sub \rv, \rv, #IO_APB_PHYS @ Calculate virt address
- add \rv, \rv, #IO_APB_VIRT
- str \rv, [\tmp, #8] @ Store in tegra_uart_virt
- b 100f
-
- .align
-99: .word .
- .word tegra_uart_config
- .ltorg
-
-100: ldr \rp, [\tmp, #4] @ Load tegra_uart_phys
- ldr \rv, [\tmp, #8] @ Load tegra_uart_virt
- .endm
-
-#define UART_SHIFT 2
-
-/*
- * Code below is swiped from <asm/hardware/debug-8250.S>, but add an extra
- * check to make sure that we aren't in the CONFIG_TEGRA_DEBUG_UART_NONE case.
- * We use the fact that all 5 valid UART addresses all have something in the
- * 2nd-to-lowest byte.
- */
-
- .macro senduart, rd, rx
- tst \rx, #0x0000ff00
- strneb \rd, [\rx, #UART_TX << UART_SHIFT]
-1001:
- .endm
-
- .macro busyuart, rd, rx
- tst \rx, #0x0000ff00
- beq 1002f
-1001: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1001b
-1002:
- .endm
-
- .macro waituart, rd, rx
-#ifdef FLOW_CONTROL
- tst \rx, #0x0000ff00
- beq 1002f
-1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
- tst \rd, #UART_MSR_CTS
- beq 1001b
-1002:
-#endif
- .endm
diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h
deleted file mode 100644
index aad1a2c1d71..00000000000
--- a/arch/arm/mach-tegra/include/mach/irqs.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * arch/arm/mach-tegra/include/mach/irqs.h
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- * Colin Cross <ccross@google.com>
- * Erik Gilling <konkers@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __MACH_TEGRA_IRQS_H
-#define __MACH_TEGRA_IRQS_H
-
-#define INT_GIC_BASE 0
-
-#define IRQ_LOCALTIMER 29
-
-/* Primary Interrupt Controller */
-#define INT_PRI_BASE (INT_GIC_BASE + 32)
-#define INT_TMR1 (INT_PRI_BASE + 0)
-#define INT_TMR2 (INT_PRI_BASE + 1)
-#define INT_RTC (INT_PRI_BASE + 2)
-#define INT_I2S2 (INT_PRI_BASE + 3)
-#define INT_SHR_SEM_INBOX_IBF (INT_PRI_BASE + 4)
-#define INT_SHR_SEM_INBOX_IBE (INT_PRI_BASE + 5)
-#define INT_SHR_SEM_OUTBOX_IBF (INT_PRI_BASE + 6)
-#define INT_SHR_SEM_OUTBOX_IBE (INT_PRI_BASE + 7)
-#define INT_VDE_UCQ_ERROR (INT_PRI_BASE + 8)
-#define INT_VDE_SYNC_TOKEN (INT_PRI_BASE + 9)
-#define INT_VDE_BSE_V (INT_PRI_BASE + 10)
-#define INT_VDE_BSE_A (INT_PRI_BASE + 11)
-#define INT_VDE_SXE (INT_PRI_BASE + 12)
-#define INT_I2S1 (INT_PRI_BASE + 13)
-#define INT_SDMMC1 (INT_PRI_BASE + 14)
-#define INT_SDMMC2 (INT_PRI_BASE + 15)
-#define INT_XIO (INT_PRI_BASE + 16)
-#define INT_VDE (INT_PRI_BASE + 17)
-#define INT_AVP_UCQ (INT_PRI_BASE + 18)
-#define INT_SDMMC3 (INT_PRI_BASE + 19)
-#define INT_USB (INT_PRI_BASE + 20)
-#define INT_USB2 (INT_PRI_BASE + 21)
-#define INT_PRI_RES_22 (INT_PRI_BASE + 22)
-#define INT_EIDE (INT_PRI_BASE + 23)
-#define INT_NANDFLASH (INT_PRI_BASE + 24)
-#define INT_VCP (INT_PRI_BASE + 25)
-#define INT_APB_DMA (INT_PRI_BASE + 26)
-#define INT_AHB_DMA (INT_PRI_BASE + 27)
-#define INT_GNT_0 (INT_PRI_BASE + 28)
-#define INT_GNT_1 (INT_PRI_BASE + 29)
-#define INT_OWR (INT_PRI_BASE + 30)
-#define INT_SDMMC4 (INT_PRI_BASE + 31)
-
-/* Secondary Interrupt Controller */
-#define INT_SEC_BASE (INT_PRI_BASE + 32)
-#define INT_GPIO1 (INT_SEC_BASE + 0)
-#define INT_GPIO2 (INT_SEC_BASE + 1)
-#define INT_GPIO3 (INT_SEC_BASE + 2)
-#define INT_GPIO4 (INT_SEC_BASE + 3)
-#define INT_UARTA (INT_SEC_BASE + 4)
-#define INT_UARTB (INT_SEC_BASE + 5)
-#define INT_I2C (INT_SEC_BASE + 6)
-#define INT_SPI (INT_SEC_BASE + 7)
-#define INT_TWC (INT_SEC_BASE + 8)
-#define INT_TMR3 (INT_SEC_BASE + 9)
-#define INT_TMR4 (INT_SEC_BASE + 10)
-#define INT_FLOW_RSM0 (INT_SEC_BASE + 11)
-#define INT_FLOW_RSM1 (INT_SEC_BASE + 12)
-#define INT_SPDIF (INT_SEC_BASE + 13)
-#define INT_UARTC (INT_SEC_BASE + 14)
-#define INT_MIPI (INT_SEC_BASE + 15)
-#define INT_EVENTA (INT_SEC_BASE + 16)
-#define INT_EVENTB (INT_SEC_BASE + 17)
-#define INT_EVENTC (INT_SEC_BASE + 18)
-#define INT_EVENTD (INT_SEC_BASE + 19)
-#define INT_VFIR (INT_SEC_BASE + 20)
-#define INT_DVC (INT_SEC_BASE + 21)
-#define INT_SYS_STATS_MON (INT_SEC_BASE + 22)
-#define INT_GPIO5 (INT_SEC_BASE + 23)
-#define INT_CPU0_PMU_INTR (INT_SEC_BASE + 24)
-#define INT_CPU1_PMU_INTR (INT_SEC_BASE + 25)
-#define INT_SEC_RES_26 (INT_SEC_BASE + 26)
-#define INT_S_LINK1 (INT_SEC_BASE + 27)
-#define INT_APB_DMA_COP (INT_SEC_BASE + 28)
-#define INT_AHB_DMA_COP (INT_SEC_BASE + 29)
-#define INT_DMA_TX (INT_SEC_BASE + 30)
-#define INT_DMA_RX (INT_SEC_BASE + 31)
-
-/* Tertiary Interrupt Controller */
-#define INT_TRI_BASE (INT_SEC_BASE + 32)
-#define INT_HOST1X_COP_SYNCPT (INT_TRI_BASE + 0)
-#define INT_HOST1X_MPCORE_SYNCPT (INT_TRI_BASE + 1)
-#define INT_HOST1X_COP_GENERAL (INT_TRI_BASE + 2)
-#define INT_HOST1X_MPCORE_GENERAL (INT_TRI_BASE + 3)
-#define INT_MPE_GENERAL (INT_TRI_BASE + 4)
-#define INT_VI_GENERAL (INT_TRI_BASE + 5)
-#define INT_EPP_GENERAL (INT_TRI_BASE + 6)
-#define INT_ISP_GENERAL (INT_TRI_BASE + 7)
-#define INT_2D_GENERAL (INT_TRI_BASE + 8)
-#define INT_DISPLAY_GENERAL (INT_TRI_BASE + 9)
-#define INT_DISPLAY_B_GENERAL (INT_TRI_BASE + 10)
-#define INT_HDMI (INT_TRI_BASE + 11)
-#define INT_TVO_GENERAL (INT_TRI_BASE + 12)
-#define INT_MC_GENERAL (INT_TRI_BASE + 13)
-#define INT_EMC_GENERAL (INT_TRI_BASE + 14)
-#define INT_TRI_RES_15 (INT_TRI_BASE + 15)
-#define INT_TRI_RES_16 (INT_TRI_BASE + 16)
-#define INT_AC97 (INT_TRI_BASE + 17)
-#define INT_SPI_2 (INT_TRI_BASE + 18)
-#define INT_SPI_3 (INT_TRI_BASE + 19)
-#define INT_I2C2 (INT_TRI_BASE + 20)
-#define INT_KBC (INT_TRI_BASE + 21)
-#define INT_EXTERNAL_PMU (INT_TRI_BASE + 22)
-#define INT_GPIO6 (INT_TRI_BASE + 23)
-#define INT_TVDAC (INT_TRI_BASE + 24)
-#define INT_GPIO7 (INT_TRI_BASE + 25)
-#define INT_UARTD (INT_TRI_BASE + 26)
-#define INT_UARTE (INT_TRI_BASE + 27)
-#define INT_I2C3 (INT_TRI_BASE + 28)
-#define INT_SPI_4 (INT_TRI_BASE + 29)
-#define INT_TRI_RES_30 (INT_TRI_BASE + 30)
-#define INT_SW_RESERVED (INT_TRI_BASE + 31)
-
-/* Quaternary Interrupt Controller */
-#define INT_QUAD_BASE (INT_TRI_BASE + 32)
-#define INT_SNOR (INT_QUAD_BASE + 0)
-#define INT_USB3 (INT_QUAD_BASE + 1)
-#define INT_PCIE_INTR (INT_QUAD_BASE + 2)
-#define INT_PCIE_MSI (INT_QUAD_BASE + 3)
-#define INT_QUAD_RES_4 (INT_QUAD_BASE + 4)
-#define INT_QUAD_RES_5 (INT_QUAD_BASE + 5)
-#define INT_QUAD_RES_6 (INT_QUAD_BASE + 6)
-#define INT_QUAD_RES_7 (INT_QUAD_BASE + 7)
-#define INT_APB_DMA_CH0 (INT_QUAD_BASE + 8)
-#define INT_APB_DMA_CH1 (INT_QUAD_BASE + 9)
-#define INT_APB_DMA_CH2 (INT_QUAD_BASE + 10)
-#define INT_APB_DMA_CH3 (INT_QUAD_BASE + 11)
-#define INT_APB_DMA_CH4 (INT_QUAD_BASE + 12)
-#define INT_APB_DMA_CH5 (INT_QUAD_BASE + 13)
-#define INT_APB_DMA_CH6 (INT_QUAD_BASE + 14)
-#define INT_APB_DMA_CH7 (INT_QUAD_BASE + 15)
-#define INT_APB_DMA_CH8 (INT_QUAD_BASE + 16)
-#define INT_APB_DMA_CH9 (INT_QUAD_BASE + 17)
-#define INT_APB_DMA_CH10 (INT_QUAD_BASE + 18)
-#define INT_APB_DMA_CH11 (INT_QUAD_BASE + 19)
-#define INT_APB_DMA_CH12 (INT_QUAD_BASE + 20)
-#define INT_APB_DMA_CH13 (INT_QUAD_BASE + 21)
-#define INT_APB_DMA_CH14 (INT_QUAD_BASE + 22)
-#define INT_APB_DMA_CH15 (INT_QUAD_BASE + 23)
-#define INT_QUAD_RES_24 (INT_QUAD_BASE + 24)
-#define INT_QUAD_RES_25 (INT_QUAD_BASE + 25)
-#define INT_QUAD_RES_26 (INT_QUAD_BASE + 26)
-#define INT_QUAD_RES_27 (INT_QUAD_BASE + 27)
-#define INT_QUAD_RES_28 (INT_QUAD_BASE + 28)
-#define INT_QUAD_RES_29 (INT_QUAD_BASE + 29)
-#define INT_QUAD_RES_30 (INT_QUAD_BASE + 30)
-#define INT_QUAD_RES_31 (INT_QUAD_BASE + 31)
-
-/* Tegra30 has 5 banks of 32 IRQs */
-#define INT_MAIN_NR (32 * 5)
-#define INT_GPIO_BASE (INT_PRI_BASE + INT_MAIN_NR)
-
-/* Tegra30 has 8 banks of 32 GPIOs */
-#define INT_GPIO_NR (32 * 8)
-
-#define TEGRA_NR_IRQS (INT_GPIO_BASE + INT_GPIO_NR)
-
-#define INT_BOARD_BASE TEGRA_NR_IRQS
-#define NR_BOARD_IRQS 32
-
-#define NR_IRQS (INT_BOARD_BASE + NR_BOARD_IRQS)
-
-#endif
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h
index 27725750ca3..485003f9b63 100644
--- a/arch/arm/mach-tegra/include/mach/uncompress.h
+++ b/arch/arm/mach-tegra/include/mach/uncompress.h
@@ -29,7 +29,6 @@
#include <linux/serial_reg.h>
#include "../../iomap.h"
-#include "../../irammap.h"
#define BIT(x) (1 << (x))
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
@@ -52,17 +51,6 @@ static inline void flush(void)
{
}
-static inline void save_uart_address(void)
-{
- u32 *buf = (u32 *)(TEGRA_IRAM_BASE + TEGRA_IRAM_DEBUG_UART_OFFSET);
-
- if (uart) {
- buf[0] = TEGRA_IRAM_DEBUG_UART_COOKIE;
- buf[1] = (u32)uart;
- } else
- buf[0] = 0;
-}
-
static const struct {
u32 base;
u32 reset_reg;
@@ -139,51 +127,19 @@ int auto_odmdata(void)
}
#endif
-#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH
-int auto_scratch(void)
-{
- int i;
-
- /*
- * Look for the first UART that:
- * a) Is not in reset.
- * b) Is clocked.
- * c) Has a 'D' in the scratchpad register.
- *
- * Note that on Tegra30, the first two conditions are required, since
- * if not true, accesses to the UART scratch register will hang.
- * Tegra20 doesn't have this issue.
- *
- * The intent is that the bootloader will tell the kernel which UART
- * to use by setting up those conditions. If nothing found, we'll fall
- * back to what's specified in TEGRA_DEBUG_UART_BASE.
- */
- for (i = 0; i < ARRAY_SIZE(uarts); i++) {
- if (!uart_clocked(i))
- continue;
-
- uart = (volatile u8 *)uarts[i].base;
- if (uart[UART_SCR << DEBUG_UART_SHIFT] != 'D')
- continue;
-
- return i;
- }
-
- return -1;
-}
-#endif
-
/*
* Setup before decompression. This is where we do UART selection for
* earlyprintk and init the uart_base register.
*/
static inline void arch_decomp_setup(void)
{
- int uart_id, auto_uart_id;
+ int uart_id;
volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
u32 chip, div;
-#if defined(CONFIG_TEGRA_DEBUG_UARTA)
+#if defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ uart_id = auto_odmdata();
+#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
uart_id = 0;
#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
uart_id = 1;
@@ -193,19 +149,7 @@ static inline void arch_decomp_setup(void)
uart_id = 3;
#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
uart_id = 4;
-#else
- uart_id = -1;
-#endif
-
-#if defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
- auto_uart_id = auto_odmdata();
-#elif defined(CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH)
- auto_uart_id = auto_scratch();
-#else
- auto_uart_id = -1;
#endif
- if (auto_uart_id != -1)
- uart_id = auto_uart_id;
if (uart_id < 0 || uart_id >= ARRAY_SIZE(uarts) ||
!uart_clocked(uart_id))
@@ -213,7 +157,6 @@ static inline void arch_decomp_setup(void)
else
uart = (volatile u8 *)uarts[uart_id].base;
- save_uart_address();
if (uart == NULL)
return;
diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
index 7d09f301b3a..bb9c9c29d18 100644
--- a/arch/arm/mach-tegra/io.c
+++ b/arch/arm/mach-tegra/io.c
@@ -59,5 +59,6 @@ static struct map_desc tegra_io_desc[] __initdata = {
void __init tegra_map_common_io(void)
{
+ debug_ll_io_init();
iotable_init(tegra_io_desc, ARRAY_SIZE(tegra_io_desc));
}
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index 53151030a07..db8be51cad8 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -261,20 +261,6 @@
#define TEGRA_SDMMC4_BASE 0xC8000600
#define TEGRA_SDMMC4_SIZE SZ_512
-#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
-# define TEGRA_DEBUG_UART_BASE 0
-#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
-# define TEGRA_DEBUG_UART_BASE TEGRA_UARTA_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
-# define TEGRA_DEBUG_UART_BASE TEGRA_UARTB_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
-# define TEGRA_DEBUG_UART_BASE TEGRA_UARTC_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
-# define TEGRA_DEBUG_UART_BASE TEGRA_UARTD_BASE
-#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
-# define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE
-#endif
-
/* On TEGRA, many peripherals are very closely packed in
* two 256MB io windows (that actually only use about 64KB
* at the start of each).
diff --git a/arch/arm/mach-tegra/irammap.h b/arch/arm/mach-tegra/irammap.h
index 0cbe6326185..501952a8434 100644
--- a/arch/arm/mach-tegra/irammap.h
+++ b/arch/arm/mach-tegra/irammap.h
@@ -23,13 +23,4 @@
#define TEGRA_IRAM_RESET_HANDLER_OFFSET 0
#define TEGRA_IRAM_RESET_HANDLER_SIZE SZ_1K
-/*
- * These locations are written to by uncompress.h, and read by debug-macro.S.
- * The first word holds the cookie value if the data is valid. The second
- * word holds the UART physical address.
- */
-#define TEGRA_IRAM_DEBUG_UART_OFFSET SZ_1K
-#define TEGRA_IRAM_DEBUG_UART_SIZE 8
-#define TEGRA_IRAM_DEBUG_UART_COOKIE 0x55415254
-
#endif
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
index f18fc3ab4e5..53d08587179 100644
--- a/arch/arm/mach-tegra/pcie.c
+++ b/arch/arm/mach-tegra/pcie.c
@@ -43,6 +43,9 @@
#include "board.h"
#include "iomap.h"
+/* Hack - need to parse this from DT */
+#define INT_PCIE_INTR 130
+
/* register definitions */
#define AFI_OFFSET 0x3800
#define PADS_OFFSET 0x3000
diff --git a/arch/arm/mach-tegra/tegra30_clocks.c b/arch/arm/mach-tegra/tegra30_clocks.c
index efc000e32e1..d7147779f8e 100644
--- a/arch/arm/mach-tegra/tegra30_clocks.c
+++ b/arch/arm/mach-tegra/tegra30_clocks.c
@@ -2045,9 +2045,7 @@ struct clk_ops tegra30_periph_clk_ops = {
static int tegra30_dsib_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct clk *d = clk_get_sys(NULL, "pll_d");
- /* The DSIB parent selection bit is in PLLD base
- register - can not do direct r-m-w, must be
- protected by PLLD lock */
+ /* The DSIB parent selection bit is in PLLD base register */
tegra_clk_cfg_ex(
d, TEGRA_CLK_PLLD_MIPI_MUX_SEL, index);
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 6ff50353651..e4863f3e9ee 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -26,16 +26,14 @@
#include <linux/clocksource.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/mach/time.h>
#include <asm/smp_twd.h>
#include <asm/sched_clock.h>
-#include <mach/irqs.h>
-
#include "board.h"
-#include "clock.h"
-#include "iomap.h"
#define RTC_SECONDS 0x08
#define RTC_SHADOW_SECONDS 0x0c
@@ -53,8 +51,8 @@
#define TIMER_PTV 0x0
#define TIMER_PCR 0x4
-static void __iomem *timer_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
-static void __iomem *rtc_base = IO_ADDRESS(TEGRA_RTC_BASE);
+static void __iomem *timer_reg_base;
+static void __iomem *rtc_base;
static struct timespec persistent_ts;
static u64 persistent_ms, last_persistent_ms;
@@ -158,40 +156,66 @@ static struct irqaction tegra_timer_irq = {
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH,
.handler = tegra_timer_interrupt,
.dev_id = &tegra_clockevent,
- .irq = INT_TMR3,
};
-#ifdef CONFIG_HAVE_ARM_TWD
-static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
- TEGRA_ARM_PERIF_BASE + 0x600,
- IRQ_LOCALTIMER);
+static const struct of_device_id timer_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-timer" },
+ {}
+};
-static void __init tegra_twd_init(void)
-{
- int err = twd_local_timer_register(&twd_local_timer);
- if (err)
- pr_err("twd_local_timer_register failed %d\n", err);
-}
-#else
-#define tegra_twd_init() do {} while(0)
-#endif
+static const struct of_device_id rtc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-rtc" },
+ {}
+};
static void __init tegra_init_timer(void)
{
+ struct device_node *np;
struct clk *clk;
unsigned long rate;
int ret;
+ np = of_find_matching_node(NULL, timer_match);
+ if (!np) {
+ pr_err("Failed to find timer DT node\n");
+ BUG();
+ }
+
+ timer_reg_base = of_iomap(np, 0);
+ if (!timer_reg_base) {
+ pr_err("Can't map timer registers");
+ BUG();
+ }
+
+ tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
+ if (tegra_timer_irq.irq <= 0) {
+ pr_err("Failed to map timer IRQ\n");
+ BUG();
+ }
+
clk = clk_get_sys("timer", NULL);
if (IS_ERR(clk)) {
- pr_warn("Unable to get timer clock."
- " Assuming 12Mhz input clock.\n");
+ pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
rate = 12000000;
} else {
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
}
+ of_node_put(np);
+
+ np = of_find_matching_node(NULL, rtc_match);
+ if (!np) {
+ pr_err("Failed to find RTC DT node\n");
+ BUG();
+ }
+
+ rtc_base = of_iomap(np, 0);
+ if (!rtc_base) {
+ pr_err("Can't map RTC registers");
+ BUG();
+ }
+
/*
* rtc registers are used by read_persistent_clock, keep the rtc clock
* enabled
@@ -202,6 +226,8 @@ static void __init tegra_init_timer(void)
else
clk_prepare_enable(clk);
+ of_node_put(np);
+
switch (rate) {
case 12000000:
timer_writel(0x000b, TIMERUS_USEC_CFG);
@@ -223,13 +249,13 @@ static void __init tegra_init_timer(void)
if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
- printk(KERN_ERR "Failed to register clocksource\n");
+ pr_err("Failed to register clocksource\n");
BUG();
}
ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
if (ret) {
- printk(KERN_ERR "Failed to register timer IRQ: %d\n", ret);
+ pr_err("Failed to register timer IRQ: %d\n", ret);
BUG();
}
@@ -241,7 +267,9 @@ static void __init tegra_init_timer(void)
tegra_clockevent.cpumask = cpu_all_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_register_device(&tegra_clockevent);
- tegra_twd_init();
+#ifdef CONFIG_HAVE_ARM_TWD
+ twd_local_timer_of_register();
+#endif
register_persistent_clock(NULL, tegra_read_persistent_clock);
}
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 12f3994c43d..4ce77cdc31c 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -27,7 +27,6 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/fsmc.h>
#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/clk-u300.h>
@@ -250,6 +249,18 @@ static struct resource rtc_resources[] = {
*/
static struct resource fsmc_resources[] = {
{
+ .name = "nand_addr",
+ .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE,
+ .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "nand_cmd",
+ .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE,
+ .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
.name = "nand_data",
.start = U300_NAND_CS0_PHYS_BASE,
.end = U300_NAND_CS0_PHYS_BASE + SZ_16K - 1,
@@ -1492,8 +1503,6 @@ static struct fsmc_nand_platform_data nand_platform_data = {
.nr_partitions = ARRAY_SIZE(u300_partitions),
.options = NAND_SKIP_BBTSCAN,
.width = FSMC_NAND_BW8,
- .ale_off = PLAT_NAND_ALE,
- .cle_off = PLAT_NAND_CLE,
};
static struct platform_device nand_device = {
@@ -1543,39 +1552,6 @@ static struct pinctrl_map __initdata u300_pinmux_map[] = {
pin_highz_conf),
};
-struct u300_mux_hog {
- struct device *dev;
- struct pinctrl *p;
-};
-
-static struct u300_mux_hog u300_mux_hogs[] = {
- {
- .dev = &uart0_device.dev,
- },
- {
- .dev = &mmcsd_device.dev,
- },
-};
-
-static int __init u300_pinctrl_fetch(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(u300_mux_hogs); i++) {
- struct pinctrl *p;
-
- p = pinctrl_get_select_default(u300_mux_hogs[i].dev);
- if (IS_ERR(p)) {
- pr_err("u300: could not get pinmux hog for dev %s\n",
- dev_name(u300_mux_hogs[i].dev));
- continue;
- }
- u300_mux_hogs[i].p = p;
- }
- return 0;
-}
-subsys_initcall(u300_pinctrl_fetch);
-
/*
* Notice that AMBA devices are initialized before platform devices.
*
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index e8c3f0d70ca..5dea90636d9 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -7,8 +7,8 @@ config UX500_SOC_COMMON
select ARM_ERRATA_764369 if SMP
select ARM_GIC
select CACHE_L2X0
+ select CLKSRC_NOMADIK_MTU
select COMMON_CLK
- select HAS_MTU
select PINCTRL
select PINCTRL_NOMADIK
select PL310_ERRATA_753970 if CACHE_PL310
diff --git a/arch/arm/mach-ux500/board-mop500-audio.c b/arch/arm/mach-ux500/board-mop500-audio.c
index 33631c9f121..7209db7cdc7 100644
--- a/arch/arm/mach-ux500/board-mop500-audio.c
+++ b/arch/arm/mach-ux500/board-mop500-audio.c
@@ -8,8 +8,7 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/platform_data/pinctrl-nomadik.h>
-
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include <mach/devices.h>
#include <mach/hardware.h>
@@ -149,15 +148,6 @@ static struct platform_device snd_soc_mop500 = {
},
};
-/* Platform device for Ux500-PCM */
-static struct platform_device ux500_pcm = {
- .name = "ux500-pcm",
- .id = 0,
- .dev = {
- .platform_data = NULL,
- },
-};
-
struct msp_i2s_platform_data msp2_platform_data = {
.id = MSP_I2S_2,
.msp_i2s_dma_rx = &msp2_dma_rx,
@@ -185,10 +175,3 @@ void mop500_audio_init(struct device *parent)
db8500_add_msp_i2s(parent, 3, U8500_MSP3_BASE, IRQ_DB8500_MSP1,
&msp3_platform_data);
}
-
-/* Due for removal once the MSP driver has been fully DT:ed. */
-void mop500_of_audio_init(struct device *parent)
-{
- pr_info("%s: Register platform-device 'ux500-pcm'\n", __func__);
- platform_device_register(&ux500_pcm);
-}
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c
index c34d4efd0d5..0a3f30df1eb 100644
--- a/arch/arm/mach-ux500/board-mop500-pins.c
+++ b/arch/arm/mach-ux500/board-mop500-pins.c
@@ -33,8 +33,6 @@ BIAS(in_nopull, PIN_INPUT_NOPULL);
BIAS(in_nopull_slpm_nowkup, PIN_INPUT_NOPULL|PIN_SLPM_WAKEUP_DISABLE);
BIAS(in_pu, PIN_INPUT_PULLUP);
BIAS(in_pd, PIN_INPUT_PULLDOWN);
-BIAS(in_pd_slpm_in_pu, PIN_INPUT_PULLDOWN|PIN_SLPM_INPUT_PULLUP);
-BIAS(in_pu_slpm_out_lo, PIN_INPUT_PULLUP|PIN_SLPM_OUTPUT_LOW);
BIAS(out_hi, PIN_OUTPUT_HIGH);
BIAS(out_lo, PIN_OUTPUT_LOW);
BIAS(out_lo_slpm_nowkup, PIN_OUTPUT_LOW|PIN_SLPM_WAKEUP_DISABLE);
@@ -46,14 +44,34 @@ BIAS(gpio_in_pd_slpm_gpio_nopull, PIN_INPUT_PULLDOWN|PIN_GPIOMODE_ENABLED|PIN_SL
BIAS(gpio_out_hi, PIN_OUTPUT_HIGH|PIN_GPIOMODE_ENABLED);
BIAS(gpio_out_lo, PIN_OUTPUT_LOW|PIN_GPIOMODE_ENABLED);
/* Sleep modes */
-BIAS(slpm_in_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_DIR_INPUT|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
-BIAS(slpm_in_nopull_wkup, PIN_SLEEPMODE_ENABLED|PIN_SLPM_DIR_INPUT|PIN_SLPM_PULL_NONE|PIN_SLPM_WAKEUP_ENABLE);
-BIAS(slpm_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
-BIAS(slpm_out_hi_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_OUTPUT_HIGH|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
-BIAS(slpm_out_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
-BIAS(slpm_out_lo_wkup, PIN_SLEEPMODE_ENABLED|PIN_SLPM_OUTPUT_LOW|PIN_SLPM_WAKEUP_ENABLE);
-BIAS(slpm_out_lo_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_OUTPUT_LOW|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
-BIAS(slpm_in_nopull_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_INPUT_NOPULL|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_in_nopull_wkup, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_DIR_INPUT|PIN_SLPM_PULL_NONE|PIN_SLPM_WAKEUP_ENABLE);
+BIAS(slpm_in_wkup_pdis, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_DIR_INPUT|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_wkup_pdis, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_out_lo_pdis, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_OUTPUT_LOW|PIN_SLPM_WAKEUP_DISABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_out_lo_wkup, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_OUTPUT_LOW|PIN_SLPM_WAKEUP_ENABLE);
+BIAS(slpm_out_lo_wkup_pdis, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_OUTPUT_LOW|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_out_hi_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_OUTPUT_HIGH|
+ PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_in_nopull_wkup_pdis, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_INPUT_NOPULL|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_in_pu_wkup_pdis_en, PIN_SLEEPMODE_ENABLED|PIN_SLPM_INPUT_PULLUP|
+ PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_ENABLED);
+BIAS(slpm_out_wkup_pdis, PIN_SLEEPMODE_ENABLED|
+ PIN_SLPM_DIR_OUTPUT|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(out_lo_wkup_pdis, PIN_SLPM_OUTPUT_LOW|
+ PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(in_wkup_pdis_en, PIN_SLPM_DIR_INPUT|PIN_SLPM_WAKEUP_ENABLE|
+ PIN_SLPM_PDIS_ENABLED);
+BIAS(in_wkup_pdis, PIN_SLPM_DIR_INPUT|PIN_SLPM_WAKEUP_ENABLE|
+ PIN_SLPM_PDIS_DISABLED);
+BIAS(out_wkup_pdis, PIN_SLPM_DIR_OUTPUT|PIN_SLPM_WAKEUP_ENABLE|
+ PIN_SLPM_PDIS_DISABLED);
/* We use these to define hog settings that are always done on boot */
#define DB8500_MUX_HOG(group,func) \
@@ -69,13 +87,16 @@ BIAS(slpm_in_nopull_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_INPUT_NOPULL|PIN_S
PIN_MAP_MUX_GROUP_DEFAULT(dev, "pinctrl-db8500", group, func)
#define DB8500_PIN(pin,conf,dev) \
PIN_MAP_CONFIGS_PIN_DEFAULT(dev, "pinctrl-db8500", pin, conf)
-#define DB8500_PIN_SLEEP(pin, conf, dev) \
- PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_SLEEP, "pinctrl-db8500", \
+#define DB8500_PIN_IDLE(pin, conf, dev) \
+ PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_IDLE, "pinctrl-db8500", \
pin, conf)
-
-#define DB8500_PIN_SLEEP(pin,conf,dev) \
+#define DB8500_PIN_SLEEP(pin, conf, dev) \
PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_SLEEP, "pinctrl-db8500", \
pin, conf)
+#define DB8500_MUX_STATE(group, func, dev, state) \
+ PIN_MAP_MUX_GROUP(dev, state, "pinctrl-db8500", group, func)
+#define DB8500_PIN_STATE(pin, conf, dev, state) \
+ PIN_MAP_CONFIGS_PIN(dev, state, "pinctrl-db8500", pin, conf)
/* Pin control settings */
static struct pinctrl_map __initdata mop500_family_pinmap[] = {
@@ -112,7 +133,7 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
* UART0, we do not mux in u0 here.
* uart-0 pins gpio configuration should be kept intact to prevent
* a glitch in tx line when the tty dev is opened. Later these pins
- * are configured to uart mop500_pins_uart0
+ * are configured by uart driver
*/
DB8500_PIN_HOG("GPIO0_AJ5", in_pu), /* CTS */
DB8500_PIN_HOG("GPIO1_AJ3", out_hi), /* RTS */
@@ -123,12 +144,13 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
* TODO: is this used on U8500 variants and Snowball really?
* The setting on GPIO31 conflicts with magnetometer use on hrefv60
*/
- DB8500_MUX_HOG("u2rxtx_c_1", "u2"),
- DB8500_MUX_HOG("u2ctsrts_c_1", "u2"),
- DB8500_PIN_HOG("GPIO29_W2", in_pu), /* RXD */
- DB8500_PIN_HOG("GPIO30_W3", out_hi), /* TXD */
- DB8500_PIN_HOG("GPIO31_V3", in_pu), /* CTS */
- DB8500_PIN_HOG("GPIO32_V2", out_hi), /* RTS */
+ /* default state for UART2 */
+ DB8500_MUX("u2rxtx_c_1", "u2", "uart2"),
+ DB8500_PIN("GPIO29_W2", in_pu, "uart2"), /* RXD */
+ DB8500_PIN("GPIO30_W3", out_hi, "uart2"), /* TXD */
+ /* Sleep state for UART2 */
+ DB8500_PIN_SLEEP("GPIO29_W2", in_wkup_pdis, "uart2"),
+ DB8500_PIN_SLEEP("GPIO30_W3", out_wkup_pdis, "uart2"),
/*
* The following pin sets were known as "runtime pins" before being
* converted to the pinctrl model. Here we model them as "default"
@@ -140,11 +162,18 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
DB8500_PIN("GPIO1_AJ3", out_hi, "uart0"), /* RTS */
DB8500_PIN("GPIO2_AH4", in_pu, "uart0"), /* RXD */
DB8500_PIN("GPIO3_AH3", out_hi, "uart0"), /* TXD */
- /* UART0 sleep state */
+ /* Sleep state for UART0 */
DB8500_PIN_SLEEP("GPIO0_AJ5", slpm_in_wkup_pdis, "uart0"),
DB8500_PIN_SLEEP("GPIO1_AJ3", slpm_out_hi_wkup_pdis, "uart0"),
DB8500_PIN_SLEEP("GPIO2_AH4", slpm_in_wkup_pdis, "uart0"),
DB8500_PIN_SLEEP("GPIO3_AH3", slpm_out_wkup_pdis, "uart0"),
+ /* Mux in UART1 after initialization */
+ DB8500_MUX("u1rxtx_a_1", "u1", "uart1"),
+ DB8500_PIN("GPIO4_AH6", in_pu, "uart1"), /* RXD */
+ DB8500_PIN("GPIO5_AG6", out_hi, "uart1"), /* TXD */
+ /* Sleep state for UART1 */
+ DB8500_PIN_SLEEP("GPIO4_AH6", slpm_in_wkup_pdis, "uart1"),
+ DB8500_PIN_SLEEP("GPIO5_AG6", slpm_out_wkup_pdis, "uart1"),
/* MSP1 for ALSA codec */
DB8500_MUX("msp1txrx_a_1", "msp1", "ux500-msp-i2s.1"),
DB8500_MUX("msp1_a_1", "msp1", "ux500-msp-i2s.1"),
@@ -161,7 +190,10 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
DB8500_MUX("lcd_d8_d11_a_1", "lcd", "mcde-tvout"),
DB8500_MUX("lcdaclk_b_1", "lcda", "mcde-tvout"),
/* Mux in LCD VSI1 and pull it up for MCDE HDMI output */
- DB8500_MUX("lcdvsi1_a_1", "lcd", "av8100-hdmi"),
+ DB8500_MUX("lcdvsi1_a_1", "lcd", "0-0070"),
+ DB8500_PIN("GPIO69_E2", in_pu, "0-0070"),
+ /* LCD VSI1 sleep state */
+ DB8500_PIN_SLEEP("GPIO69_E2", slpm_in_wkup_pdis, "0-0070"),
/* Mux in i2c0 block, default state */
DB8500_MUX("i2c0_a_1", "i2c0", "nmk-i2c.0"),
/* i2c0 sleep state */
@@ -194,6 +226,18 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
DB8500_PIN("GPIO26_Y2", in_pu, "sdi0"), /* DAT1 */
DB8500_PIN("GPIO27_AA2", in_pu, "sdi0"), /* DAT2 */
DB8500_PIN("GPIO28_AA1", in_pu, "sdi0"), /* DAT3 */
+ /* SDI0 sleep state */
+ DB8500_PIN_SLEEP("GPIO18_AC2", slpm_out_hi_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO19_AC1", slpm_out_hi_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO20_AB4", slpm_out_hi_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO22_AA3", slpm_in_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO23_AA4", slpm_out_lo_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO24_AB2", slpm_in_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO25_Y4", slpm_in_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO26_Y2", slpm_in_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO27_AA2", slpm_in_wkup_pdis, "sdi0"),
+ DB8500_PIN_SLEEP("GPIO28_AA1", slpm_in_wkup_pdis, "sdi0"),
+
/* Mux in SDI1 (here called MC1) used for SDIO for CW1200 WLAN */
DB8500_MUX("mc1_a_1", "mc1", "sdi1"),
DB8500_PIN("GPIO208_AH16", out_lo, "sdi1"), /* CLK */
@@ -203,6 +247,15 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
DB8500_PIN("GPIO212_AF13", in_pu, "sdi1"), /* DAT1 */
DB8500_PIN("GPIO213_AG13", in_pu, "sdi1"), /* DAT2 */
DB8500_PIN("GPIO214_AH15", in_pu, "sdi1"), /* DAT3 */
+ /* SDI1 sleep state */
+ DB8500_PIN_SLEEP("GPIO208_AH16", slpm_out_lo_wkup_pdis, "sdi1"), /* CLK */
+ DB8500_PIN_SLEEP("GPIO209_AG15", slpm_in_wkup_pdis, "sdi1"), /* FBCLK */
+ DB8500_PIN_SLEEP("GPIO210_AJ15", slpm_in_wkup_pdis, "sdi1"), /* CMD */
+ DB8500_PIN_SLEEP("GPIO211_AG14", slpm_in_wkup_pdis, "sdi1"), /* DAT0 */
+ DB8500_PIN_SLEEP("GPIO212_AF13", slpm_in_wkup_pdis, "sdi1"), /* DAT1 */
+ DB8500_PIN_SLEEP("GPIO213_AG13", slpm_in_wkup_pdis, "sdi1"), /* DAT2 */
+ DB8500_PIN_SLEEP("GPIO214_AH15", slpm_in_wkup_pdis, "sdi1"), /* DAT3 */
+
/* Mux in SDI2 (here called MC2) used for for PoP eMMC */
DB8500_MUX("mc2_a_1", "mc2", "sdi2"),
DB8500_PIN("GPIO128_A5", out_lo, "sdi2"), /* CLK */
@@ -216,6 +269,19 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
DB8500_PIN("GPIO136_C7", in_pu, "sdi2"), /* DAT5 */
DB8500_PIN("GPIO137_A7", in_pu, "sdi2"), /* DAT6 */
DB8500_PIN("GPIO138_C5", in_pu, "sdi2"), /* DAT7 */
+ /* SDI2 sleep state */
+ DB8500_PIN_SLEEP("GPIO128_A5", out_lo_wkup_pdis, "sdi2"), /* CLK */
+ DB8500_PIN_SLEEP("GPIO129_B4", in_wkup_pdis_en, "sdi2"), /* CMD */
+ DB8500_PIN_SLEEP("GPIO130_C8", in_wkup_pdis_en, "sdi2"), /* FBCLK */
+ DB8500_PIN_SLEEP("GPIO131_A12", in_wkup_pdis, "sdi2"), /* DAT0 */
+ DB8500_PIN_SLEEP("GPIO132_C10", in_wkup_pdis, "sdi2"), /* DAT1 */
+ DB8500_PIN_SLEEP("GPIO133_B10", in_wkup_pdis, "sdi2"), /* DAT2 */
+ DB8500_PIN_SLEEP("GPIO134_B9", in_wkup_pdis, "sdi2"), /* DAT3 */
+ DB8500_PIN_SLEEP("GPIO135_A9", in_wkup_pdis, "sdi2"), /* DAT4 */
+ DB8500_PIN_SLEEP("GPIO136_C7", in_wkup_pdis, "sdi2"), /* DAT5 */
+ DB8500_PIN_SLEEP("GPIO137_A7", in_wkup_pdis, "sdi2"), /* DAT6 */
+ DB8500_PIN_SLEEP("GPIO138_C5", in_wkup_pdis, "sdi2"), /* DAT7 */
+
/* Mux in SDI4 (here called MC4) used for for PCB-mounted eMMC */
DB8500_MUX("mc4_a_1", "mc4", "sdi4"),
DB8500_PIN("GPIO197_AH24", in_pu, "sdi4"), /* DAT3 */
@@ -229,6 +295,19 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
DB8500_PIN("GPIO205_AG23", in_pu, "sdi4"), /* DAT6 */
DB8500_PIN("GPIO206_AG24", in_pu, "sdi4"), /* DAT5 */
DB8500_PIN("GPIO207_AJ23", in_pu, "sdi4"), /* DAT4 */
+ /*SDI4 sleep state */
+ DB8500_PIN_SLEEP("GPIO197_AH24", slpm_in_wkup_pdis, "sdi4"), /* DAT3 */
+ DB8500_PIN_SLEEP("GPIO198_AG25", slpm_in_wkup_pdis, "sdi4"), /* DAT2 */
+ DB8500_PIN_SLEEP("GPIO199_AH23", slpm_in_wkup_pdis, "sdi4"), /* DAT1 */
+ DB8500_PIN_SLEEP("GPIO200_AH26", slpm_in_wkup_pdis, "sdi4"), /* DAT0 */
+ DB8500_PIN_SLEEP("GPIO201_AF24", slpm_in_wkup_pdis, "sdi4"), /* CMD */
+ DB8500_PIN_SLEEP("GPIO202_AF25", slpm_in_wkup_pdis, "sdi4"), /* FBCLK */
+ DB8500_PIN_SLEEP("GPIO203_AE23", slpm_out_lo_wkup_pdis, "sdi4"), /* CLK */
+ DB8500_PIN_SLEEP("GPIO204_AF23", slpm_in_wkup_pdis, "sdi4"), /* DAT7 */
+ DB8500_PIN_SLEEP("GPIO205_AG23", slpm_in_wkup_pdis, "sdi4"), /* DAT6 */
+ DB8500_PIN_SLEEP("GPIO206_AG24", slpm_in_wkup_pdis, "sdi4"), /* DAT5 */
+ DB8500_PIN_SLEEP("GPIO207_AJ23", slpm_in_wkup_pdis, "sdi4"), /* DAT4 */
+
/* Mux in USB pins, drive STP high */
DB8500_MUX("usb_a_1", "usb", "musb-ux500.0"),
DB8500_PIN("GPIO257_AE29", out_hi, "musb-ux500.0"), /* STP */
@@ -238,10 +317,232 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
DB8500_PIN("GPIO218_AH11", in_pd, "spi2"), /* RXD */
DB8500_PIN("GPIO215_AH13", out_lo, "spi2"), /* TXD */
DB8500_PIN("GPIO217_AH12", out_lo, "spi2"), /* CLK */
+ /* SPI2 idle state */
+ DB8500_PIN_SLEEP("GPIO218_AH11", slpm_in_wkup_pdis, "spi2"), /* RXD */
+ DB8500_PIN_SLEEP("GPIO215_AH13", slpm_out_lo_wkup_pdis, "spi2"), /* TXD */
+ DB8500_PIN_SLEEP("GPIO217_AH12", slpm_wkup_pdis, "spi2"), /* CLK */
/* SPI2 sleep state */
+ DB8500_PIN_SLEEP("GPIO216_AG12", slpm_in_wkup_pdis, "spi2"), /* FRM */
DB8500_PIN_SLEEP("GPIO218_AH11", slpm_in_wkup_pdis, "spi2"), /* RXD */
DB8500_PIN_SLEEP("GPIO215_AH13", slpm_out_lo_wkup_pdis, "spi2"), /* TXD */
DB8500_PIN_SLEEP("GPIO217_AH12", slpm_wkup_pdis, "spi2"), /* CLK */
+
+ /* ske default state */
+ DB8500_MUX("kp_a_2", "kp", "nmk-ske-keypad"),
+ DB8500_PIN("GPIO153_B17", in_pd, "nmk-ske-keypad"), /* I7 */
+ DB8500_PIN("GPIO154_C16", in_pd, "nmk-ske-keypad"), /* I6 */
+ DB8500_PIN("GPIO155_C19", in_pd, "nmk-ske-keypad"), /* I5 */
+ DB8500_PIN("GPIO156_C17", in_pd, "nmk-ske-keypad"), /* I4 */
+ DB8500_PIN("GPIO161_D21", in_pd, "nmk-ske-keypad"), /* I3 */
+ DB8500_PIN("GPIO162_D20", in_pd, "nmk-ske-keypad"), /* I2 */
+ DB8500_PIN("GPIO163_C20", in_pd, "nmk-ske-keypad"), /* I1 */
+ DB8500_PIN("GPIO164_B21", in_pd, "nmk-ske-keypad"), /* I0 */
+ DB8500_PIN("GPIO157_A18", out_lo, "nmk-ske-keypad"), /* O7 */
+ DB8500_PIN("GPIO158_C18", out_lo, "nmk-ske-keypad"), /* O6 */
+ DB8500_PIN("GPIO159_B19", out_lo, "nmk-ske-keypad"), /* O5 */
+ DB8500_PIN("GPIO160_B20", out_lo, "nmk-ske-keypad"), /* O4 */
+ DB8500_PIN("GPIO165_C21", out_lo, "nmk-ske-keypad"), /* O3 */
+ DB8500_PIN("GPIO166_A22", out_lo, "nmk-ske-keypad"), /* O2 */
+ DB8500_PIN("GPIO167_B24", out_lo, "nmk-ske-keypad"), /* O1 */
+ DB8500_PIN("GPIO168_C22", out_lo, "nmk-ske-keypad"), /* O0 */
+ /* ske sleep state */
+ DB8500_PIN_SLEEP("GPIO153_B17", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I7 */
+ DB8500_PIN_SLEEP("GPIO154_C16", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I6 */
+ DB8500_PIN_SLEEP("GPIO155_C19", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I5 */
+ DB8500_PIN_SLEEP("GPIO156_C17", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I4 */
+ DB8500_PIN_SLEEP("GPIO161_D21", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I3 */
+ DB8500_PIN_SLEEP("GPIO162_D20", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I2 */
+ DB8500_PIN_SLEEP("GPIO163_C20", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I1 */
+ DB8500_PIN_SLEEP("GPIO164_B21", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I0 */
+ DB8500_PIN_SLEEP("GPIO157_A18", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O7 */
+ DB8500_PIN_SLEEP("GPIO158_C18", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O6 */
+ DB8500_PIN_SLEEP("GPIO159_B19", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O5 */
+ DB8500_PIN_SLEEP("GPIO160_B20", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O4 */
+ DB8500_PIN_SLEEP("GPIO165_C21", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O3 */
+ DB8500_PIN_SLEEP("GPIO166_A22", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O2 */
+ DB8500_PIN_SLEEP("GPIO167_B24", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O1 */
+ DB8500_PIN_SLEEP("GPIO168_C22", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O0 */
+
+ /* STM APE pins states */
+ DB8500_MUX_STATE("stmape_c_1", "stmape",
+ "stm", "ape_mipi34"),
+ DB8500_PIN_STATE("GPIO70_G5", in_nopull,
+ "stm", "ape_mipi34"), /* clk */
+ DB8500_PIN_STATE("GPIO71_G4", in_nopull,
+ "stm", "ape_mipi34"), /* dat3 */
+ DB8500_PIN_STATE("GPIO72_H4", in_nopull,
+ "stm", "ape_mipi34"), /* dat2 */
+ DB8500_PIN_STATE("GPIO73_H3", in_nopull,
+ "stm", "ape_mipi34"), /* dat1 */
+ DB8500_PIN_STATE("GPIO74_J3", in_nopull,
+ "stm", "ape_mipi34"), /* dat0 */
+
+ DB8500_PIN_STATE("GPIO70_G5", slpm_out_lo_pdis,
+ "stm", "ape_mipi34_sleep"), /* clk */
+ DB8500_PIN_STATE("GPIO71_G4", slpm_out_lo_pdis,
+ "stm", "ape_mipi34_sleep"), /* dat3 */
+ DB8500_PIN_STATE("GPIO72_H4", slpm_out_lo_pdis,
+ "stm", "ape_mipi34_sleep"), /* dat2 */
+ DB8500_PIN_STATE("GPIO73_H3", slpm_out_lo_pdis,
+ "stm", "ape_mipi34_sleep"), /* dat1 */
+ DB8500_PIN_STATE("GPIO74_J3", slpm_out_lo_pdis,
+ "stm", "ape_mipi34_sleep"), /* dat0 */
+
+ DB8500_MUX_STATE("stmape_oc1_1", "stmape",
+ "stm", "ape_microsd"),
+ DB8500_PIN_STATE("GPIO23_AA4", in_nopull,
+ "stm", "ape_microsd"), /* clk */
+ DB8500_PIN_STATE("GPIO25_Y4", in_nopull,
+ "stm", "ape_microsd"), /* dat0 */
+ DB8500_PIN_STATE("GPIO26_Y2", in_nopull,
+ "stm", "ape_microsd"), /* dat1 */
+ DB8500_PIN_STATE("GPIO27_AA2", in_nopull,
+ "stm", "ape_microsd"), /* dat2 */
+ DB8500_PIN_STATE("GPIO28_AA1", in_nopull,
+ "stm", "ape_microsd"), /* dat3 */
+
+ DB8500_PIN_STATE("GPIO23_AA4", slpm_out_lo_wkup_pdis,
+ "stm", "ape_microsd_sleep"), /* clk */
+ DB8500_PIN_STATE("GPIO25_Y4", slpm_in_wkup_pdis,
+ "stm", "ape_microsd_sleep"), /* dat0 */
+ DB8500_PIN_STATE("GPIO26_Y2", slpm_in_wkup_pdis,
+ "stm", "ape_microsd_sleep"), /* dat1 */
+ DB8500_PIN_STATE("GPIO27_AA2", slpm_in_wkup_pdis,
+ "stm", "ape_microsd_sleep"), /* dat2 */
+ DB8500_PIN_STATE("GPIO28_AA1", slpm_in_wkup_pdis,
+ "stm", "ape_microsd_sleep"), /* dat3 */
+
+ /* STM Modem pins states */
+ DB8500_MUX_STATE("stmmod_oc3_2", "stmmod",
+ "stm", "mod_mipi34"),
+ DB8500_MUX_STATE("uartmodrx_oc3_1", "uartmod",
+ "stm", "mod_mipi34"),
+ DB8500_MUX_STATE("uartmodtx_oc3_1", "uartmod",
+ "stm", "mod_mipi34"),
+ DB8500_PIN_STATE("GPIO70_G5", in_nopull,
+ "stm", "mod_mipi34"), /* clk */
+ DB8500_PIN_STATE("GPIO71_G4", in_nopull,
+ "stm", "mod_mipi34"), /* dat3 */
+ DB8500_PIN_STATE("GPIO72_H4", in_nopull,
+ "stm", "mod_mipi34"), /* dat2 */
+ DB8500_PIN_STATE("GPIO73_H3", in_nopull,
+ "stm", "mod_mipi34"), /* dat1 */
+ DB8500_PIN_STATE("GPIO74_J3", in_nopull,
+ "stm", "mod_mipi34"), /* dat0 */
+ DB8500_PIN_STATE("GPIO75_H2", in_pu,
+ "stm", "mod_mipi34"), /* uartmod rx */
+ DB8500_PIN_STATE("GPIO76_J2", out_lo,
+ "stm", "mod_mipi34"), /* uartmod tx */
+
+ DB8500_PIN_STATE("GPIO70_G5", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_sleep"), /* clk */
+ DB8500_PIN_STATE("GPIO71_G4", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_sleep"), /* dat3 */
+ DB8500_PIN_STATE("GPIO72_H4", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_sleep"), /* dat2 */
+ DB8500_PIN_STATE("GPIO73_H3", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_sleep"), /* dat1 */
+ DB8500_PIN_STATE("GPIO74_J3", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_sleep"), /* dat0 */
+ DB8500_PIN_STATE("GPIO75_H2", slpm_in_wkup_pdis,
+ "stm", "mod_mipi34_sleep"), /* uartmod rx */
+ DB8500_PIN_STATE("GPIO76_J2", slpm_out_lo_wkup_pdis,
+ "stm", "mod_mipi34_sleep"), /* uartmod tx */
+
+ DB8500_MUX_STATE("stmmod_b_1", "stmmod",
+ "stm", "mod_microsd"),
+ DB8500_MUX_STATE("uartmodrx_oc3_1", "uartmod",
+ "stm", "mod_microsd"),
+ DB8500_MUX_STATE("uartmodtx_oc3_1", "uartmod",
+ "stm", "mod_microsd"),
+ DB8500_PIN_STATE("GPIO23_AA4", in_nopull,
+ "stm", "mod_microsd"), /* clk */
+ DB8500_PIN_STATE("GPIO25_Y4", in_nopull,
+ "stm", "mod_microsd"), /* dat0 */
+ DB8500_PIN_STATE("GPIO26_Y2", in_nopull,
+ "stm", "mod_microsd"), /* dat1 */
+ DB8500_PIN_STATE("GPIO27_AA2", in_nopull,
+ "stm", "mod_microsd"), /* dat2 */
+ DB8500_PIN_STATE("GPIO28_AA1", in_nopull,
+ "stm", "mod_microsd"), /* dat3 */
+ DB8500_PIN_STATE("GPIO75_H2", in_pu,
+ "stm", "mod_microsd"), /* uartmod rx */
+ DB8500_PIN_STATE("GPIO76_J2", out_lo,
+ "stm", "mod_microsd"), /* uartmod tx */
+
+ DB8500_PIN_STATE("GPIO23_AA4", slpm_out_lo_wkup_pdis,
+ "stm", "mod_microsd_sleep"), /* clk */
+ DB8500_PIN_STATE("GPIO25_Y4", slpm_in_wkup_pdis,
+ "stm", "mod_microsd_sleep"), /* dat0 */
+ DB8500_PIN_STATE("GPIO26_Y2", slpm_in_wkup_pdis,
+ "stm", "mod_microsd_sleep"), /* dat1 */
+ DB8500_PIN_STATE("GPIO27_AA2", slpm_in_wkup_pdis,
+ "stm", "mod_microsd_sleep"), /* dat2 */
+ DB8500_PIN_STATE("GPIO28_AA1", slpm_in_wkup_pdis,
+ "stm", "mod_microsd_sleep"), /* dat3 */
+ DB8500_PIN_STATE("GPIO75_H2", slpm_in_wkup_pdis,
+ "stm", "mod_microsd_sleep"), /* uartmod rx */
+ DB8500_PIN_STATE("GPIO76_J2", slpm_out_lo_wkup_pdis,
+ "stm", "mod_microsd_sleep"), /* uartmod tx */
+
+ /* STM dual Modem/APE pins state */
+ DB8500_MUX_STATE("stmmod_oc3_2", "stmmod",
+ "stm", "mod_mipi34_ape_mipi60"),
+ DB8500_MUX_STATE("stmape_c_2", "stmape",
+ "stm", "mod_mipi34_ape_mipi60"),
+ DB8500_MUX_STATE("uartmodrx_oc3_1", "uartmod",
+ "stm", "mod_mipi34_ape_mipi60"),
+ DB8500_MUX_STATE("uartmodtx_oc3_1", "uartmod",
+ "stm", "mod_mipi34_ape_mipi60"),
+ DB8500_PIN_STATE("GPIO70_G5", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* clk */
+ DB8500_PIN_STATE("GPIO71_G4", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat3 */
+ DB8500_PIN_STATE("GPIO72_H4", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat2 */
+ DB8500_PIN_STATE("GPIO73_H3", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat1 */
+ DB8500_PIN_STATE("GPIO74_J3", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat0 */
+ DB8500_PIN_STATE("GPIO75_H2", in_pu,
+ "stm", "mod_mipi34_ape_mipi60"), /* uartmod rx */
+ DB8500_PIN_STATE("GPIO76_J2", out_lo,
+ "stm", "mod_mipi34_ape_mipi60"), /* uartmod tx */
+ DB8500_PIN_STATE("GPIO155_C19", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* clk */
+ DB8500_PIN_STATE("GPIO156_C17", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat3 */
+ DB8500_PIN_STATE("GPIO157_A18", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat2 */
+ DB8500_PIN_STATE("GPIO158_C18", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat1 */
+ DB8500_PIN_STATE("GPIO159_B19", in_nopull,
+ "stm", "mod_mipi34_ape_mipi60"), /* dat0 */
+
+ DB8500_PIN_STATE("GPIO70_G5", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* clk */
+ DB8500_PIN_STATE("GPIO71_G4", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat3 */
+ DB8500_PIN_STATE("GPIO72_H4", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat2 */
+ DB8500_PIN_STATE("GPIO73_H3", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat1 */
+ DB8500_PIN_STATE("GPIO74_J3", slpm_out_lo_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat0 */
+ DB8500_PIN_STATE("GPIO75_H2", slpm_in_wkup_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* uartmod rx */
+ DB8500_PIN_STATE("GPIO76_J2", slpm_out_lo_wkup_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* uartmod tx */
+ DB8500_PIN_STATE("GPIO155_C19", slpm_in_wkup_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* clk */
+ DB8500_PIN_STATE("GPIO156_C17", slpm_in_wkup_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat3 */
+ DB8500_PIN_STATE("GPIO157_A18", slpm_in_wkup_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat2 */
+ DB8500_PIN_STATE("GPIO158_C18", slpm_in_wkup_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat1 */
+ DB8500_PIN_STATE("GPIO159_B19", slpm_in_wkup_pdis,
+ "stm", "mod_mipi34_ape_mipi60_sleep"), /* dat0 */
};
/*
@@ -267,32 +568,48 @@ static struct pinctrl_map __initdata mop500_pinmap[] = {
DB8500_PIN_HOG("GPIO217_AH12", gpio_in_pu),
/* Mux in UART1 and set the pull-ups */
DB8500_MUX_HOG("u1rxtx_a_1", "u1"),
- DB8500_MUX_HOG("u1ctsrts_a_1", "u1"),
DB8500_PIN_HOG("GPIO4_AH6", in_pu), /* RXD */
DB8500_PIN_HOG("GPIO5_AG6", out_hi), /* TXD */
- DB8500_PIN_HOG("GPIO6_AF6", in_pu), /* CTS */
- DB8500_PIN_HOG("GPIO7_AG5", out_hi), /* RTS */
/*
* Runtime stuff: make it possible to mux in the SKE keypad
* and bias the pins
*/
- DB8500_MUX("kp_a_2", "kp", "ske"),
- DB8500_PIN("GPIO153_B17", in_pd_slpm_in_pu, "ske"), /* I7 */
- DB8500_PIN("GPIO154_C16", in_pd_slpm_in_pu, "ske"), /* I6 */
- DB8500_PIN("GPIO155_C19", in_pd_slpm_in_pu, "ske"), /* I5 */
- DB8500_PIN("GPIO156_C17", in_pd_slpm_in_pu, "ske"), /* I4 */
- DB8500_PIN("GPIO161_D21", in_pd_slpm_in_pu, "ske"), /* I3 */
- DB8500_PIN("GPIO162_D20", in_pd_slpm_in_pu, "ske"), /* I2 */
- DB8500_PIN("GPIO163_C20", in_pd_slpm_in_pu, "ske"), /* I1 */
- DB8500_PIN("GPIO164_B21", in_pd_slpm_in_pu, "ske"), /* I0 */
- DB8500_PIN("GPIO157_A18", in_pu_slpm_out_lo, "ske"), /* O7 */
- DB8500_PIN("GPIO158_C18", in_pu_slpm_out_lo, "ske"), /* O6 */
- DB8500_PIN("GPIO159_B19", in_pu_slpm_out_lo, "ske"), /* O5 */
- DB8500_PIN("GPIO160_B20", in_pu_slpm_out_lo, "ske"), /* O4 */
- DB8500_PIN("GPIO165_C21", in_pu_slpm_out_lo, "ske"), /* O3 */
- DB8500_PIN("GPIO166_A22", in_pu_slpm_out_lo, "ske"), /* O2 */
- DB8500_PIN("GPIO167_B24", in_pu_slpm_out_lo, "ske"), /* O1 */
- DB8500_PIN("GPIO168_C22", in_pu_slpm_out_lo, "ske"), /* O0 */
+ /* ske default state */
+ DB8500_MUX("kp_a_2", "kp", "nmk-ske-keypad"),
+ DB8500_PIN("GPIO153_B17", in_pu, "nmk-ske-keypad"), /* I7 */
+ DB8500_PIN("GPIO154_C16", in_pu, "nmk-ske-keypad"), /* I6 */
+ DB8500_PIN("GPIO155_C19", in_pu, "nmk-ske-keypad"), /* I5 */
+ DB8500_PIN("GPIO156_C17", in_pu, "nmk-ske-keypad"), /* I4 */
+ DB8500_PIN("GPIO161_D21", in_pu, "nmk-ske-keypad"), /* I3 */
+ DB8500_PIN("GPIO162_D20", in_pu, "nmk-ske-keypad"), /* I2 */
+ DB8500_PIN("GPIO163_C20", in_pu, "nmk-ske-keypad"), /* I1 */
+ DB8500_PIN("GPIO164_B21", in_pu, "nmk-ske-keypad"), /* I0 */
+ DB8500_PIN("GPIO157_A18", out_lo, "nmk-ske-keypad"), /* O7 */
+ DB8500_PIN("GPIO158_C18", out_lo, "nmk-ske-keypad"), /* O6 */
+ DB8500_PIN("GPIO159_B19", out_lo, "nmk-ske-keypad"), /* O5 */
+ DB8500_PIN("GPIO160_B20", out_lo, "nmk-ske-keypad"), /* O4 */
+ DB8500_PIN("GPIO165_C21", out_lo, "nmk-ske-keypad"), /* O3 */
+ DB8500_PIN("GPIO166_A22", out_lo, "nmk-ske-keypad"), /* O2 */
+ DB8500_PIN("GPIO167_B24", out_lo, "nmk-ske-keypad"), /* O1 */
+ DB8500_PIN("GPIO168_C22", out_lo, "nmk-ske-keypad"), /* O0 */
+ /* ske sleep state */
+ DB8500_PIN_SLEEP("GPIO153_B17", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I7 */
+ DB8500_PIN_SLEEP("GPIO154_C16", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I6 */
+ DB8500_PIN_SLEEP("GPIO155_C19", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I5 */
+ DB8500_PIN_SLEEP("GPIO156_C17", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I4 */
+ DB8500_PIN_SLEEP("GPIO161_D21", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I3 */
+ DB8500_PIN_SLEEP("GPIO162_D20", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I2 */
+ DB8500_PIN_SLEEP("GPIO163_C20", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I1 */
+ DB8500_PIN_SLEEP("GPIO164_B21", slpm_in_pu_wkup_pdis_en, "nmk-ske-keypad"), /* I0 */
+ DB8500_PIN_SLEEP("GPIO157_A18", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O7 */
+ DB8500_PIN_SLEEP("GPIO158_C18", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O6 */
+ DB8500_PIN_SLEEP("GPIO159_B19", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O5 */
+ DB8500_PIN_SLEEP("GPIO160_B20", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O4 */
+ DB8500_PIN_SLEEP("GPIO165_C21", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O3 */
+ DB8500_PIN_SLEEP("GPIO166_A22", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O2 */
+ DB8500_PIN_SLEEP("GPIO167_B24", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O1 */
+ DB8500_PIN_SLEEP("GPIO168_C22", slpm_out_lo_pdis, "nmk-ske-keypad"), /* O0 */
+
/* Mux in and drive the SDI0 DAT31DIR line high at runtime */
DB8500_MUX("mc0dat31dir_a_1", "mc0", "sdi0"),
DB8500_PIN("GPIO21_AB3", out_hi, "sdi0"),
@@ -395,28 +712,6 @@ static struct pinctrl_map __initdata hrefv60_pinmap[] = {
DB8500_PIN("GPIO217_AH12", gpio_in_pu_slpm_gpio_nopull, "gpio-keys.0"),
DB8500_PIN("GPIO145_C13", gpio_in_pd_slpm_gpio_nopull, "gpio-keys.0"),
DB8500_PIN("GPIO139_C9", gpio_in_pu_slpm_gpio_nopull, "gpio-keys.0"),
- /*
- * Make it possible to mux in the SKE keypad and bias the pins
- * FIXME: what's the point with this on HREFv60? KP/SKE is already
- * muxed in at another place! Enabling this will bork.
- */
- DB8500_MUX("kp_a_2", "kp", "ske"),
- DB8500_PIN("GPIO153_B17", in_pd_slpm_in_pu, "ske"), /* I7 */
- DB8500_PIN("GPIO154_C16", in_pd_slpm_in_pu, "ske"), /* I6 */
- DB8500_PIN("GPIO155_C19", in_pd_slpm_in_pu, "ske"), /* I5 */
- DB8500_PIN("GPIO156_C17", in_pd_slpm_in_pu, "ske"), /* I4 */
- DB8500_PIN("GPIO161_D21", in_pd_slpm_in_pu, "ske"), /* I3 */
- DB8500_PIN("GPIO162_D20", in_pd_slpm_in_pu, "ske"), /* I2 */
- DB8500_PIN("GPIO163_C20", in_pd_slpm_in_pu, "ske"), /* I1 */
- DB8500_PIN("GPIO164_B21", in_pd_slpm_in_pu, "ske"), /* I0 */
- DB8500_PIN("GPIO157_A18", in_pu_slpm_out_lo, "ske"), /* O7 */
- DB8500_PIN("GPIO158_C18", in_pu_slpm_out_lo, "ske"), /* O6 */
- DB8500_PIN("GPIO159_B19", in_pu_slpm_out_lo, "ske"), /* O5 */
- DB8500_PIN("GPIO160_B20", in_pu_slpm_out_lo, "ske"), /* O4 */
- DB8500_PIN("GPIO165_C21", in_pu_slpm_out_lo, "ske"), /* O3 */
- DB8500_PIN("GPIO166_A22", in_pu_slpm_out_lo, "ske"), /* O2 */
- DB8500_PIN("GPIO167_B24", in_pu_slpm_out_lo, "ske"), /* O1 */
- DB8500_PIN("GPIO168_C22", in_pu_slpm_out_lo, "ske"), /* O0 */
};
static struct pinctrl_map __initdata u9500_pinmap[] = {
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 9c8e4a9e83e..051b62c2710 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -11,9 +11,9 @@
#include <linux/amba/mmci.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include <asm/mach-types.h>
-#include <plat/ste_dma40.h>
#include <mach/devices.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-ux500/board-mop500-stuib.c b/arch/arm/mach-ux500/board-mop500-stuib.c
index 8c979770d87..7e1f294f043 100644
--- a/arch/arm/mach-ux500/board-mop500-stuib.c
+++ b/arch/arm/mach-ux500/board-mop500-stuib.c
@@ -77,9 +77,6 @@ static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = {
* BU21013 ROHM touchscreen interface on the STUIBs
*/
-/* tracks number of bu21013 devices being enabled */
-static int bu21013_devices;
-
#define TOUCH_GPIO_PIN 84
#define TOUCH_XMAX 384
@@ -88,85 +85,8 @@ static int bu21013_devices;
#define PRCMU_CLOCK_OCR 0x1CC
#define TSC_EXT_CLOCK_9_6MHZ 0x840000
-/**
- * bu21013_gpio_board_init : configures the touch panel.
- * @reset_pin: reset pin number
- * This function can be used to configures
- * the voltage and reset the touch panel controller.
- */
-static int bu21013_gpio_board_init(int reset_pin)
-{
- int retval = 0;
-
- bu21013_devices++;
- if (bu21013_devices == 1) {
- retval = gpio_request(reset_pin, "touchp_reset");
- if (retval) {
- printk(KERN_ERR "Unable to request gpio reset_pin");
- return retval;
- }
- retval = gpio_direction_output(reset_pin, 1);
- if (retval < 0) {
- printk(KERN_ERR "%s: gpio direction failed\n",
- __func__);
- return retval;
- }
- }
-
- return retval;
-}
-
-/**
- * bu21013_gpio_board_exit : deconfigures the touch panel controller
- * @reset_pin: reset pin number
- * This function can be used to deconfigures the chip selection
- * for touch panel controller.
- */
-static int bu21013_gpio_board_exit(int reset_pin)
-{
- int retval = 0;
-
- if (bu21013_devices == 1) {
- retval = gpio_direction_output(reset_pin, 0);
- if (retval < 0) {
- printk(KERN_ERR "%s: gpio direction failed\n",
- __func__);
- return retval;
- }
- gpio_set_value(reset_pin, 0);
- }
- bu21013_devices--;
-
- return retval;
-}
-
-/**
- * bu21013_read_pin_val : get the interrupt pin value
- * This function can be used to get the interrupt pin value for touch panel
- * controller.
- */
-static int bu21013_read_pin_val(void)
-{
- return gpio_get_value(TOUCH_GPIO_PIN);
-}
-
static struct bu21013_platform_device tsc_plat_device = {
- .cs_en = bu21013_gpio_board_init,
- .cs_dis = bu21013_gpio_board_exit,
- .irq_read_val = bu21013_read_pin_val,
- .irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
- .touch_x_max = TOUCH_XMAX,
- .touch_y_max = TOUCH_YMAX,
- .ext_clk = false,
- .x_flip = false,
- .y_flip = true,
-};
-
-static struct bu21013_platform_device tsc_plat2_device = {
- .cs_en = bu21013_gpio_board_init,
- .cs_dis = bu21013_gpio_board_exit,
- .irq_read_val = bu21013_read_pin_val,
- .irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
+ .touch_pin = TOUCH_GPIO_PIN,
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
.ext_clk = false,
@@ -181,21 +101,16 @@ static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = {
},
{
I2C_BOARD_INFO("bu21013_tp", 0x5D),
- .platform_data = &tsc_plat2_device,
+ .platform_data = &tsc_plat_device,
},
-
};
void __init mop500_stuib_init(void)
{
- if (machine_is_hrefv60()) {
+ if (machine_is_hrefv60())
tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
- tsc_plat2_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
- } else {
+ else
tsc_plat_device.cs_pin = GPIO_BU21013_CS;
- tsc_plat2_device.cs_pin = GPIO_BU21013_CS;
-
- }
mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib,
ARRAY_SIZE(mop500_i2c0_devices_stuib));
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index e6ad161449d..d453522edb0 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -33,18 +33,15 @@
#include <linux/smsc911x.h>
#include <linux/gpio_keys.h>
#include <linux/delay.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/leds.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/pinctrl-nomadik.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
-#include <plat/ste_dma40.h>
-
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
@@ -525,7 +522,7 @@ static struct stedma40_chan_cfg ssp0_dma_cfg_tx = {
};
#endif
-static struct pl022_ssp_controller ssp0_plat = {
+struct pl022_ssp_controller ssp0_plat = {
.bus_id = 0,
#ifdef CONFIG_STE_DMA40
.enable_dma = 1,
@@ -602,7 +599,7 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
};
#endif
-static struct amba_pl011_data uart0_plat = {
+struct amba_pl011_data uart0_plat = {
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
.dma_rx_param = &uart0_dma_cfg_rx,
@@ -610,7 +607,7 @@ static struct amba_pl011_data uart0_plat = {
#endif
};
-static struct amba_pl011_data uart1_plat = {
+struct amba_pl011_data uart1_plat = {
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
.dma_rx_param = &uart1_dma_cfg_rx,
@@ -618,7 +615,7 @@ static struct amba_pl011_data uart1_plat = {
#endif
};
-static struct amba_pl011_data uart2_plat = {
+struct amba_pl011_data uart2_plat = {
#ifdef CONFIG_STE_DMA40
.dma_filter = stedma40_filter,
.dma_rx_param = &uart2_dma_cfg_rx,
@@ -681,8 +678,6 @@ static void __init mop500_init_machine(void)
/* This board has full regulator constraints */
regulator_has_full_constraints();
-
- mop500_uib_init();
}
static void __init snowball_init_machine(void)
@@ -747,8 +742,6 @@ static void __init hrefv60_init_machine(void)
/* This board has full regulator constraints */
regulator_has_full_constraints();
-
- mop500_uib_init();
}
MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
@@ -794,135 +787,5 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = snowball_init_machine,
- .init_late = ux500_init_late,
+ .init_late = NULL,
MACHINE_END
-
-#ifdef CONFIG_MACH_UX500_DT
-
-struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
- /* Requires call-back bindings. */
- OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
- /* Requires DMA and call-back bindings. */
- OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
- OF_DEV_AUXDATA("arm,pl011", 0x80121000, "uart1", &uart1_plat),
- OF_DEV_AUXDATA("arm,pl011", 0x80007000, "uart2", &uart2_plat),
- /* Requires DMA bindings. */
- OF_DEV_AUXDATA("arm,pl022", 0x80002000, "ssp0", &ssp0_plat),
- OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data),
- OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", &mop500_sdi1_data),
- OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", &mop500_sdi2_data),
- OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data),
- /* Requires clock name bindings. */
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e000, "gpio.0", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e080, "gpio.1", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e000, "gpio.2", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e080, "gpio.3", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e100, "gpio.4", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e180, "gpio.5", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
- OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80004000, "nmk-i2c.0", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80122000, "nmk-i2c.1", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80128000, "nmk-i2c.2", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
- OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
- /* Requires device name bindings. */
- OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
- /* Requires clock name and DMA bindings. */
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
- "ux500-msp-i2s.0", &msp0_platform_data),
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000,
- "ux500-msp-i2s.1", &msp1_platform_data),
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80117000,
- "ux500-msp-i2s.2", &msp2_platform_data),
- OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80125000,
- "ux500-msp-i2s.3", &msp3_platform_data),
- {},
-};
-
-static const struct of_device_id u8500_local_bus_nodes[] = {
- /* only create devices below soc node */
- { .compatible = "stericsson,db8500", },
- { .compatible = "stericsson,db8500-prcmu", },
- { .compatible = "simple-bus"},
- { },
-};
-
-static void __init u8500_init_machine(void)
-{
- struct device *parent = NULL;
- int i2c0_devs;
- int i;
-
- /* Pinmaps must be in place before devices register */
- if (of_machine_is_compatible("st-ericsson,mop500"))
- mop500_pinmaps_init();
- else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
- snowball_pinmaps_init();
- else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
- hrefv60_pinmaps_init();
-
- parent = u8500_of_init_devices();
-
- for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
- mop500_platform_devs[i]->dev.parent = parent;
-
- /* automatically probe child nodes of db8500 device */
- of_platform_populate(NULL, u8500_local_bus_nodes, u8500_auxdata_lookup, parent);
-
- if (of_machine_is_compatible("st-ericsson,mop500")) {
- mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR;
-
- platform_add_devices(mop500_platform_devs,
- ARRAY_SIZE(mop500_platform_devs));
-
- mop500_sdi_init(parent);
- mop500_audio_init(parent);
- i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
- i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
- i2c_register_board_info(2, mop500_i2c2_devices,
- ARRAY_SIZE(mop500_i2c2_devices));
-
- mop500_uib_init();
-
- } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
- mop500_of_audio_init(parent);
- } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
- /*
- * The HREFv60 board removed a GPIO expander and routed
- * all these GPIO pins to the internal GPIO controller
- * instead.
- */
- mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO;
- platform_add_devices(mop500_platform_devs,
- ARRAY_SIZE(mop500_platform_devs));
-
- mop500_uib_init();
- }
-
- /* This board has full regulator constraints */
- regulator_has_full_constraints();
-}
-
-static const char * u8500_dt_board_compat[] = {
- "calaosystems,snowball-a9500",
- "st-ericsson,hrefv60+",
- "st-ericsson,u8500",
- "st-ericsson,mop500",
- NULL,
-};
-
-
-DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)")
- .smp = smp_ops(ux500_smp_ops),
- .map_io = u8500_map_io,
- .init_irq = ux500_init_irq,
- /* we re-use nomadik timer here */
- .timer = &ux500_timer,
- .handle_irq = gic_handle_irq,
- .init_machine = u8500_init_machine,
- .init_late = ux500_init_late,
- .dt_compat = u8500_dt_board_compat,
-MACHINE_END
-#endif
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index aca39a68712..eaa605f5d90 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -89,6 +89,10 @@ extern struct msp_i2s_platform_data msp1_platform_data;
extern struct msp_i2s_platform_data msp2_platform_data;
extern struct msp_i2s_platform_data msp3_platform_data;
extern struct arm_pmu_platdata db8500_pmu_platdata;
+extern struct amba_pl011_data uart0_plat;
+extern struct amba_pl011_data uart1_plat;
+extern struct amba_pl011_data uart2_plat;
+extern struct pl022_ssp_controller ssp0_plat;
extern void mop500_sdi_init(struct device *parent);
extern void snowball_sdi_init(struct device *parent);
@@ -100,14 +104,8 @@ void __init mop500_pinmaps_init(void);
void __init snowball_pinmaps_init(void);
void __init hrefv60_pinmaps_init(void);
void mop500_audio_init(struct device *parent);
-/* Due for removal once the MSP driver has been fully DT:ed. */
-void mop500_of_audio_init(struct device *parent);
int __init mop500_uib_init(void);
void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
unsigned n);
-
-/* TODO: Once all pieces are DT:ed, remove completely. */
-struct device * __init u8500_of_init_devices(void);
-
#endif
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 5c5ad70e48b..db0bb75e2c7 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -17,19 +17,27 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <linux/platform_data/usb-musb-ux500.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/machine.h>
#include <linux/platform_data/pinctrl-nomadik.h>
#include <linux/random.h>
#include <asm/pmu.h>
#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
+
#include <mach/hardware.h>
#include <mach/setup.h>
#include <mach/devices.h>
#include <mach/db8500-regs.h>
+#include <mach/irqs.h>
#include "devices-db8500.h"
#include "ste-dma40-db8500.h"
+#include "board-mop500.h"
/* minimum static i/o mapping required to boot U8500 platforms */
static struct map_desc u8500_uart_io_desc[] __initdata = {
@@ -227,12 +235,12 @@ struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
return parent;
}
+#ifdef CONFIG_MACH_UX500_DT
+
/* TODO: Once all pieces are DT:ed, remove completely. */
-struct device * __init u8500_of_init_devices(void)
+static struct device * __init u8500_of_init_devices(void)
{
- struct device *parent;
-
- parent = db8500_soc_device_init();
+ struct device *parent = db8500_soc_device_init();
db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
@@ -248,3 +256,95 @@ struct device * __init u8500_of_init_devices(void)
return parent;
}
+
+static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
+ /* Requires call-back bindings. */
+ OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
+ /* Requires DMA bindings. */
+ OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
+ OF_DEV_AUXDATA("arm,pl011", 0x80121000, "uart1", &uart1_plat),
+ OF_DEV_AUXDATA("arm,pl011", 0x80007000, "uart2", &uart2_plat),
+ OF_DEV_AUXDATA("arm,pl022", 0x80002000, "ssp0", &ssp0_plat),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", &mop500_sdi1_data),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", &mop500_sdi2_data),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data),
+ /* Requires clock name bindings. */
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e000, "gpio.0", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e080, "gpio.1", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e000, "gpio.2", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e080, "gpio.3", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e100, "gpio.4", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8000e180, "gpio.5", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
+ OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80004000, "nmk-i2c.0", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80122000, "nmk-i2c.1", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80128000, "nmk-i2c.2", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
+ /* Requires device name bindings. */
+ OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
+ /* Requires clock name and DMA bindings. */
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
+ "ux500-msp-i2s.0", &msp0_platform_data),
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000,
+ "ux500-msp-i2s.1", &msp1_platform_data),
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80117000,
+ "ux500-msp-i2s.2", &msp2_platform_data),
+ OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80125000,
+ "ux500-msp-i2s.3", &msp3_platform_data),
+ {},
+};
+
+static const struct of_device_id u8500_local_bus_nodes[] = {
+ /* only create devices below soc node */
+ { .compatible = "stericsson,db8500", },
+ { .compatible = "stericsson,db8500-prcmu", },
+ { .compatible = "simple-bus"},
+ { },
+};
+
+static void __init u8500_init_machine(void)
+{
+ struct device *parent = NULL;
+
+ /* Pinmaps must be in place before devices register */
+ if (of_machine_is_compatible("st-ericsson,mop500"))
+ mop500_pinmaps_init();
+ else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
+ snowball_pinmaps_init();
+ else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
+ hrefv60_pinmaps_init();
+ else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
+ /* TODO: Add pinmaps for ccu9540 board. */
+
+ /* TODO: Export SoC, USB, cpu-freq and DMA40 */
+ parent = u8500_of_init_devices();
+
+ /* automatically probe child nodes of db8500 device */
+ of_platform_populate(NULL, u8500_local_bus_nodes, u8500_auxdata_lookup, parent);
+}
+
+static const char * stericsson_dt_platform_compat[] = {
+ "st-ericsson,u8500",
+ "st-ericsson,u8540",
+ "st-ericsson,u9500",
+ "st-ericsson,u9540",
+ NULL,
+};
+
+DT_MACHINE_START(U8500_DT, "ST-Ericsson Ux5x0 platform (Device Tree Support)")
+ .smp = smp_ops(ux500_smp_ops),
+ .map_io = u8500_map_io,
+ .init_irq = ux500_init_irq,
+ /* we re-use nomadik timer here */
+ .timer = &ux500_timer,
+ .handle_irq = gic_handle_irq,
+ .init_machine = u8500_init_machine,
+ .init_late = NULL,
+ .dt_compat = stericsson_dt_platform_compat,
+MACHINE_END
+
+#endif
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 1f3fbc2bb77..721e7b4275f 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -26,6 +26,8 @@
#include <mach/setup.h>
#include <mach/devices.h>
+#include "board-mop500.h"
+
void __iomem *_PRCMU_BASE;
/*
@@ -82,6 +84,7 @@ void __init ux500_init_irq(void)
void __init ux500_init_late(void)
{
+ mop500_uib_init();
}
static const char * __init ux500_get_machine(void)
diff --git a/arch/arm/mach-ux500/devices-common.c b/arch/arm/mach-ux500/devices-common.c
index 692a77a1c15..16b5f71e697 100644
--- a/arch/arm/mach-ux500/devices-common.c
+++ b/arch/arm/mach-ux500/devices-common.c
@@ -14,6 +14,7 @@
#include <linux/platform_data/pinctrl-nomadik.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include "devices-common.h"
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 91754a8a0d4..318d4902089 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -12,11 +12,11 @@
#include <linux/gpio.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
-
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include <mach/hardware.h>
#include <mach/setup.h>
+#include <mach/irqs.h>
#include "ste-dma40-db8500.h"
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 3c8010f4fb3..a5e05f6e256 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -8,6 +8,8 @@
#ifndef __DEVICES_DB8500_H
#define __DEVICES_DB8500_H
+#include <linux/platform_data/usb-musb-ux500.h>
+#include <mach/irqs.h>
#include "devices-common.h"
struct ske_keypad_platform_data;
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index e8928548b6a..fc77b4274c8 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -46,6 +46,6 @@
#include <mach/irqs-board-mop500.h>
#endif
-#define NR_IRQS IRQ_BOARD_END
+#define UX500_NR_IRQS IRQ_BOARD_END
#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-ux500/include/mach/msp.h b/arch/arm/mach-ux500/include/mach/msp.h
index 3cc7142eee0..9991aea3d57 100644
--- a/arch/arm/mach-ux500/include/mach/msp.h
+++ b/arch/arm/mach-ux500/include/mach/msp.h
@@ -8,7 +8,7 @@
#ifndef __MSP_H
#define __MSP_H
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
enum msp_i2s_id {
MSP_I2S_0 = 0,
diff --git a/arch/arm/mach-ux500/timer.c b/arch/arm/mach-ux500/timer.c
index 6f39731951b..875309acb02 100644
--- a/arch/arm/mach-ux500/timer.c
+++ b/arch/arm/mach-ux500/timer.c
@@ -9,11 +9,10 @@
#include <linux/clksrc-dbx500-prcmu.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <asm/smp_twd.h>
-#include <plat/mtu.h>
-
#include <mach/setup.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
@@ -96,7 +95,7 @@ dt_fail:
*
*/
- nmdk_timer_init(mtu_timer_base);
+ nmdk_timer_init(mtu_timer_base, IRQ_MTU0);
clksrc_dbx500_prcmu_init(prcmu_timer_base);
ux500_twd_init();
}
diff --git a/arch/arm/mach-ux500/usb.c b/arch/arm/mach-ux500/usb.c
index 145482e7441..78ac65f62e8 100644
--- a/arch/arm/mach-ux500/usb.c
+++ b/arch/arm/mach-ux500/usb.c
@@ -7,10 +7,10 @@
#include <linux/platform_device.h>
#include <linux/usb/musb.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/usb-musb-ux500.h>
+#include <linux/platform_data/dma-ste-dma40.h>
-#include <plat/ste_dma40.h>
#include <mach/hardware.h>
-#include <linux/platform_data/usb-musb-ux500.h>
#define MUSB_DMA40_RX_CH { \
.mode = STEDMA40_MODE_LOGICAL, \
diff --git a/arch/arm/mach-vexpress/reset.c b/arch/arm/mach-vexpress/reset.c
new file mode 100644
index 00000000000..465923aa381
--- /dev/null
+++ b/arch/arm/mach-vexpress/reset.c
@@ -0,0 +1,141 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+#include <linux/vexpress.h>
+
+static void vexpress_reset_do(struct device *dev, const char *what)
+{
+ int err = -ENOENT;
+ struct vexpress_config_func *func =
+ vexpress_config_func_get_by_dev(dev);
+
+ if (func) {
+ unsigned long timeout;
+
+ err = vexpress_config_write(func, 0, 0);
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout))
+ cpu_relax();
+ }
+
+ dev_emerg(dev, "Unable to %s (%d)\n", what, err);
+}
+
+static struct device *vexpress_power_off_device;
+
+void vexpress_power_off(void)
+{
+ vexpress_reset_do(vexpress_power_off_device, "power off");
+}
+
+static struct device *vexpress_restart_device;
+
+void vexpress_restart(char str, const char *cmd)
+{
+ vexpress_reset_do(vexpress_restart_device, "restart");
+}
+
+static ssize_t vexpress_reset_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", vexpress_restart_device == dev);
+}
+
+static ssize_t vexpress_reset_active_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ long value;
+ int err = kstrtol(buf, 0, &value);
+
+ if (!err && value)
+ vexpress_restart_device = dev;
+
+ return err ? err : count;
+}
+
+DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show,
+ vexpress_reset_active_store);
+
+
+enum vexpress_reset_func { FUNC_RESET, FUNC_SHUTDOWN, FUNC_REBOOT };
+
+static struct of_device_id vexpress_reset_of_match[] = {
+ {
+ .compatible = "arm,vexpress-reset",
+ .data = (void *)FUNC_RESET,
+ }, {
+ .compatible = "arm,vexpress-shutdown",
+ .data = (void *)FUNC_SHUTDOWN
+ }, {
+ .compatible = "arm,vexpress-reboot",
+ .data = (void *)FUNC_REBOOT
+ },
+ {}
+};
+
+static int vexpress_reset_probe(struct platform_device *pdev)
+{
+ enum vexpress_reset_func func;
+ const struct of_device_id *match =
+ of_match_device(vexpress_reset_of_match, &pdev->dev);
+
+ if (match)
+ func = (enum vexpress_reset_func)match->data;
+ else
+ func = pdev->id_entry->driver_data;
+
+ switch (func) {
+ case FUNC_SHUTDOWN:
+ vexpress_power_off_device = &pdev->dev;
+ break;
+ case FUNC_RESET:
+ if (!vexpress_restart_device)
+ vexpress_restart_device = &pdev->dev;
+ device_create_file(&pdev->dev, &dev_attr_active);
+ break;
+ case FUNC_REBOOT:
+ vexpress_restart_device = &pdev->dev;
+ device_create_file(&pdev->dev, &dev_attr_active);
+ break;
+ };
+
+ return 0;
+}
+
+static const struct platform_device_id vexpress_reset_id_table[] = {
+ { .name = "vexpress-reset", .driver_data = FUNC_RESET, },
+ { .name = "vexpress-shutdown", .driver_data = FUNC_SHUTDOWN, },
+ { .name = "vexpress-reboot", .driver_data = FUNC_REBOOT, },
+ {}
+};
+
+static struct platform_driver vexpress_reset_driver = {
+ .probe = vexpress_reset_probe,
+ .driver = {
+ .name = "vexpress-reset",
+ .of_match_table = vexpress_reset_of_match,
+ },
+ .id_table = vexpress_reset_id_table,
+};
+
+static int __init vexpress_reset_init(void)
+{
+ return platform_driver_register(&vexpress_reset_driver);
+}
+device_initcall(vexpress_reset_init);
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
new file mode 100644
index 00000000000..2ed0b7d95db
--- /dev/null
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -0,0 +1,12 @@
+config ARCH_VT8500
+ bool "VIA/WonderMedia 85xx" if ARCH_MULTI_V5
+ default ARCH_VT8500_SINGLE
+ select ARCH_HAS_CPUFREQ
+ select ARCH_REQUIRE_GPIOLIB
+ select CLKDEV_LOOKUP
+ select CPU_ARM926T
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_GPIO
+ select HAVE_CLK
+ help
+ Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
diff --git a/arch/arm/mach-vt8500/common.h b/arch/arm/mach-vt8500/common.h
index 2b2419646e9..6f2b843115d 100644
--- a/arch/arm/mach-vt8500/common.h
+++ b/arch/arm/mach-vt8500/common.h
@@ -25,4 +25,7 @@ int __init vt8500_irq_init(struct device_node *node,
/* defined in drivers/clk/clk-vt8500.c */
void __init vtwm_clk_init(void __iomem *pmc_base);
+/* defined in irq.c */
+asmlinkage void vt8500_handle_irq(struct pt_regs *regs);
+
#endif
diff --git a/arch/arm/mach-vt8500/include/mach/entry-macro.S b/arch/arm/mach-vt8500/include/mach/entry-macro.S
deleted file mode 100644
index 367d1b55fb9..00000000000
--- a/arch/arm/mach-vt8500/include/mach/entry-macro.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * arch/arm/mach-vt8500/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for VIA VT8500
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
- .macro get_irqnr_preamble, base, tmp
- @ physical 0xd8140000 is virtual 0xf8140000
- mov \base, #0xf8000000
- orr \base, \base, #0x00140000
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqnr, [\base]
- cmp \irqnr, #63 @ may be false positive, check interrupt status
- bne 1001f
- ldr \irqstat, [\base, #0x84]
- ands \irqstat, #0x80000000
- moveq \irqnr, #0
-1001:
- .endm
-
diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c
index f8f9ab9bc56..b9cf5ce9efb 100644
--- a/arch/arm/mach-vt8500/irq.c
+++ b/arch/arm/mach-vt8500/irq.c
@@ -36,7 +36,7 @@
#include <linux/of_address.h>
#include <asm/irq.h>
-
+#include <asm/exception.h>
#define VT8500_ICPC_IRQ 0x20
#define VT8500_ICPC_FIQ 0x24
@@ -66,30 +66,34 @@
#define VT8500_EDGE ( VT8500_TRIGGER_RISING \
| VT8500_TRIGGER_FALLING)
-static int irq_cnt;
+/* vt8500 has 1 intc, wm8505 and wm8650 have 2 */
+#define VT8500_INTC_MAX 2
-struct vt8500_irq_priv {
- void __iomem *base;
+struct vt8500_irq_data {
+ void __iomem *base; /* IO Memory base address */
+ struct irq_domain *domain; /* Domain for this controller */
};
+/* Global variable for accessing io-mem addresses */
+static struct vt8500_irq_data intc[VT8500_INTC_MAX];
+static u32 active_cnt = 0;
+
static void vt8500_irq_mask(struct irq_data *d)
{
- struct vt8500_irq_priv *priv =
- (struct vt8500_irq_priv *)(d->domain->host_data);
+ struct vt8500_irq_data *priv = d->domain->host_data;
void __iomem *base = priv->base;
- u8 edge;
+ void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
+ u8 edge, dctr;
+ u32 status;
edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
if (edge) {
- void __iomem *stat_reg = base + VT8500_ICIS
- + (d->hwirq < 32 ? 0 : 4);
- unsigned status = readl(stat_reg);
+ status = readl(stat_reg);
status |= (1 << (d->hwirq & 0x1f));
writel(status, stat_reg);
} else {
- u8 dctr = readb(base + VT8500_ICDC + d->hwirq);
-
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
dctr &= ~VT8500_INT_ENABLE;
writeb(dctr, base + VT8500_ICDC + d->hwirq);
}
@@ -97,8 +101,7 @@ static void vt8500_irq_mask(struct irq_data *d)
static void vt8500_irq_unmask(struct irq_data *d)
{
- struct vt8500_irq_priv *priv =
- (struct vt8500_irq_priv *)(d->domain->host_data);
+ struct vt8500_irq_data *priv = d->domain->host_data;
void __iomem *base = priv->base;
u8 dctr;
@@ -109,8 +112,7 @@ static void vt8500_irq_unmask(struct irq_data *d)
static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct vt8500_irq_priv *priv =
- (struct vt8500_irq_priv *)(d->domain->host_data);
+ struct vt8500_irq_data *priv = d->domain->host_data;
void __iomem *base = priv->base;
u8 dctr;
@@ -148,17 +150,15 @@ static struct irq_chip vt8500_irq_chip = {
static void __init vt8500_init_irq_hw(void __iomem *base)
{
- unsigned int i;
+ u32 i;
/* Enable rotating priority for IRQ */
writel(ICPC_ROTATE, base + VT8500_ICPC_IRQ);
writel(0x00, base + VT8500_ICPC_FIQ);
- for (i = 0; i < 64; i++) {
- /* Disable all interrupts and route them to IRQ */
- writeb(VT8500_INT_DISABLE | ICDC_IRQ,
- base + VT8500_ICDC + i);
- }
+ /* Disable all interrupts and route them to IRQ */
+ for (i = 0; i < 64; i++)
+ writeb(VT8500_INT_DISABLE | ICDC_IRQ, base + VT8500_ICDC + i);
}
static int vt8500_irq_map(struct irq_domain *h, unsigned int virq,
@@ -175,33 +175,67 @@ static struct irq_domain_ops vt8500_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
+asmlinkage void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
+{
+ u32 stat, i;
+ int irqnr, virq;
+ void __iomem *base;
+
+ /* Loop through each active controller */
+ for (i=0; i<active_cnt; i++) {
+ base = intc[i].base;
+ irqnr = readl_relaxed(base) & 0x3F;
+ /*
+ Highest Priority register default = 63, so check that this
+ is a real interrupt by checking the status register
+ */
+ if (irqnr == 63) {
+ stat = readl_relaxed(base + VT8500_ICIS + 4);
+ if (!(stat & BIT(31)))
+ continue;
+ }
+
+ virq = irq_find_mapping(intc[i].domain, irqnr);
+ handle_IRQ(virq, regs);
+ }
+}
+
int __init vt8500_irq_init(struct device_node *node, struct device_node *parent)
{
- struct irq_domain *vt8500_irq_domain;
- struct vt8500_irq_priv *priv;
int irq, i;
struct device_node *np = node;
- priv = kzalloc(sizeof(struct vt8500_irq_priv), GFP_KERNEL);
- priv->base = of_iomap(np, 0);
+ if (active_cnt == VT8500_INTC_MAX) {
+ pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
+ __func__);
+ goto out;
+ }
+
+ intc[active_cnt].base = of_iomap(np, 0);
+ intc[active_cnt].domain = irq_domain_add_linear(node, 64,
+ &vt8500_irq_domain_ops, &intc[active_cnt]);
- vt8500_irq_domain = irq_domain_add_legacy(node, 64, irq_cnt, 0,
- &vt8500_irq_domain_ops, priv);
- if (!vt8500_irq_domain)
- pr_err("%s: Unable to add wmt irq domain!\n", __func__);
+ if (!intc[active_cnt].base) {
+ pr_err("%s: Unable to map IO memory\n", __func__);
+ goto out;
+ }
+
+ if (!intc[active_cnt].domain) {
+ pr_err("%s: Unable to add irq domain!\n", __func__);
+ goto out;
+ }
- irq_set_default_host(vt8500_irq_domain);
+ vt8500_init_irq_hw(intc[active_cnt].base);
- vt8500_init_irq_hw(priv->base);
+ pr_info("vt8500-irq: Added interrupt controller\n");
- pr_info("Added IRQ Controller @ %x [virq_base = %d]\n",
- (u32)(priv->base), irq_cnt);
+ active_cnt++;
/* check if this is a slaved controller */
if (of_irq_count(np) != 0) {
/* check that we have the correct number of interrupts */
if (of_irq_count(np) != 8) {
- pr_err("%s: Incorrect IRQ map for slave controller\n",
+ pr_err("%s: Incorrect IRQ map for slaved controller\n",
__func__);
return -EINVAL;
}
@@ -213,9 +247,7 @@ int __init vt8500_irq_init(struct device_node *node, struct device_node *parent)
pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
}
-
- irq_cnt += 64;
-
+out:
return 0;
}
diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c
index a5bd28692b0..3c66d48ea08 100644
--- a/arch/arm/mach-vt8500/vt8500.c
+++ b/arch/arm/mach-vt8500/vt8500.c
@@ -192,5 +192,6 @@ DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)")
.timer = &vt8500_timer,
.init_machine = vt8500_init,
.restart = vt8500_restart,
+ .handle_irq = vt8500_handle_irq,
MACHINE_END
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
new file mode 100644
index 00000000000..adb6c0ea0e5
--- /dev/null
+++ b/arch/arm/mach-zynq/Kconfig
@@ -0,0 +1,13 @@
+config ARCH_ZYNQ
+ bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
+ select ARM_AMBA
+ select ARM_GIC
+ select COMMON_CLK
+ select CPU_V7
+ select GENERIC_CLOCKEVENTS
+ select ICST
+ select MIGHT_HAVE_CACHE_L2X0
+ select USE_OF
+ select SPARSE_IRQ
+ help
+ Support for Xilinx Zynq ARM Cortex A9 Platform
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index ba8d14f78d4..e16d4bed0f7 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -19,18 +19,21 @@
#include <linux/cpumask.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/clk/zynq.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
-#include <mach/zynq_soc.h>
#include "common.h"
static struct of_device_id zynq_of_bus_ids[] __initdata = {
@@ -65,32 +68,36 @@ static void __init xilinx_irq_init(void)
of_irq_init(irq_match);
}
-/* The minimum devices needed to be mapped before the VM system is up and
- * running include the GIC, UART and Timer Counter.
- */
+#define SCU_PERIPH_PHYS 0xF8F00000
+#define SCU_PERIPH_SIZE SZ_8K
+#define SCU_PERIPH_VIRT (VMALLOC_END - SCU_PERIPH_SIZE)
+
+static struct map_desc scu_desc __initdata = {
+ .virtual = SCU_PERIPH_VIRT,
+ .pfn = __phys_to_pfn(SCU_PERIPH_PHYS),
+ .length = SCU_PERIPH_SIZE,
+ .type = MT_DEVICE,
+};
+
+static void __init xilinx_zynq_timer_init(void)
+{
+ struct device_node *np;
+ void __iomem *slcr;
-static struct map_desc io_desc[] __initdata = {
- {
- .virtual = TTC0_VIRT,
- .pfn = __phys_to_pfn(TTC0_PHYS),
- .length = TTC0_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = SCU_PERIPH_VIRT,
- .pfn = __phys_to_pfn(SCU_PERIPH_PHYS),
- .length = SCU_PERIPH_SIZE,
- .type = MT_DEVICE,
- },
-
-#ifdef CONFIG_DEBUG_LL
- {
- .virtual = UART0_VIRT,
- .pfn = __phys_to_pfn(UART0_PHYS),
- .length = UART0_SIZE,
- .type = MT_DEVICE,
- },
-#endif
+ np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-slcr");
+ slcr = of_iomap(np, 0);
+ WARN_ON(!slcr);
+ xilinx_zynq_clocks_init(slcr);
+
+ xttcpss_timer_init();
+}
+
+/*
+ * Instantiate and initialize the system timer structure
+ */
+static struct sys_timer xttcpss_sys_timer = {
+ .init = xilinx_zynq_timer_init,
};
/**
@@ -98,11 +105,13 @@ static struct map_desc io_desc[] __initdata = {
*/
static void __init xilinx_map_io(void)
{
- iotable_init(io_desc, ARRAY_SIZE(io_desc));
+ debug_ll_io_init();
+ iotable_init(&scu_desc, 1);
}
static const char *xilinx_dt_match[] = {
- "xlnx,zynq-ep107",
+ "xlnx,zynq-zc702",
+ "xlnx,zynq-7000",
NULL
};
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index a009644a155..954b91c13c9 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -17,8 +17,6 @@
#ifndef __MACH_ZYNQ_COMMON_H__
#define __MACH_ZYNQ_COMMON_H__
-#include <asm/mach/time.h>
-
-extern struct sys_timer xttcpss_sys_timer;
+void __init xttcpss_timer_init(void);
#endif
diff --git a/arch/arm/mach-zynq/include/mach/hardware.h b/arch/arm/mach-zynq/include/mach/hardware.h
deleted file mode 100644
index d558d8a94be..00000000000
--- a/arch/arm/mach-zynq/include/mach/hardware.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/hardware.h
- *
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_HARDWARE_H__
-#define __MACH_HARDWARE_H__
-
-#endif
diff --git a/arch/arm/mach-zynq/include/mach/irqs.h b/arch/arm/mach-zynq/include/mach/irqs.h
deleted file mode 100644
index 5fb04fd3bac..00000000000
--- a/arch/arm/mach-zynq/include/mach/irqs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/irqs.h
- *
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_IRQS_H
-#define __MACH_IRQS_H
-
-#define ARCH_NR_GPIOS 118
-#define NR_IRQS (128 + ARCH_NR_GPIOS)
-
-#endif
diff --git a/arch/arm/mach-zynq/include/mach/timex.h b/arch/arm/mach-zynq/include/mach/timex.h
deleted file mode 100644
index 6c0245e42a5..00000000000
--- a/arch/arm/mach-zynq/include/mach/timex.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/timex.h
- *
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_TIMEX_H__
-#define __MACH_TIMEX_H__
-
-/* the following is needed for the system to build but will be removed
- in the future, the value is not important but won't hurt
-*/
-#define CLOCK_TICK_RATE (100 * HZ)
-
-#endif
diff --git a/arch/arm/mach-zynq/include/mach/uart.h b/arch/arm/mach-zynq/include/mach/uart.h
deleted file mode 100644
index 5c47c97156f..00000000000
--- a/arch/arm/mach-zynq/include/mach/uart.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/uart.h
- *
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_UART_H__
-#define __MACH_UART_H__
-
-#define UART_CR_OFFSET 0x00 /* Control Register [8:0] */
-#define UART_SR_OFFSET 0x2C /* Channel Status [11:0] */
-#define UART_FIFO_OFFSET 0x30 /* FIFO [15:0] or [7:0] */
-
-#define UART_SR_TXFULL 0x00000010 /* TX FIFO full */
-#define UART_SR_TXEMPTY 0x00000008 /* TX FIFO empty */
-
-#endif
diff --git a/arch/arm/mach-zynq/include/mach/uncompress.h b/arch/arm/mach-zynq/include/mach/uncompress.h
deleted file mode 100644
index af4e8447bfa..00000000000
--- a/arch/arm/mach-zynq/include/mach/uncompress.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/uncompress.h
- *
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_UNCOMPRESS_H__
-#define __MACH_UNCOMPRESS_H__
-
-#include <linux/io.h>
-#include <asm/processor.h>
-#include <mach/zynq_soc.h>
-#include <mach/uart.h>
-
-void arch_decomp_setup(void)
-{
-}
-
-static inline void flush(void)
-{
- /*
- * Wait while the FIFO is not empty
- */
- while (!(__raw_readl(IOMEM(LL_UART_PADDR + UART_SR_OFFSET)) &
- UART_SR_TXEMPTY))
- cpu_relax();
-}
-
-#define arch_decomp_wdog()
-
-static void putc(char ch)
-{
- /*
- * Wait for room in the FIFO, then write the char into the FIFO
- */
- while (__raw_readl(IOMEM(LL_UART_PADDR + UART_SR_OFFSET)) &
- UART_SR_TXFULL)
- cpu_relax();
-
- __raw_writel(ch, IOMEM(LL_UART_PADDR + UART_FIFO_OFFSET));
-}
-
-#endif
diff --git a/arch/arm/mach-zynq/include/mach/zynq_soc.h b/arch/arm/mach-zynq/include/mach/zynq_soc.h
deleted file mode 100644
index 1b8bf0ecbcb..00000000000
--- a/arch/arm/mach-zynq/include/mach/zynq_soc.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/zynq_soc.h
- *
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_XILINX_SOC_H__
-#define __MACH_XILINX_SOC_H__
-
-#include <asm/pgtable.h>
-
-#define PERIPHERAL_CLOCK_RATE 2500000
-
-/* Static peripheral mappings are mapped at the top of the vmalloc region. The
- * early uart mapping causes intermediate problems/failure at certain
- * addresses, including the very top of the vmalloc region. Map it at an
- * address that is known to work.
- */
-#define UART0_PHYS 0xE0000000
-#define UART0_SIZE SZ_4K
-#define UART0_VIRT 0xF0001000
-
-#define TTC0_PHYS 0xF8001000
-#define TTC0_SIZE SZ_4K
-#define TTC0_VIRT (VMALLOC_END - TTC0_SIZE)
-
-#define SCU_PERIPH_PHYS 0xF8F00000
-#define SCU_PERIPH_SIZE SZ_8K
-#define SCU_PERIPH_VIRT (TTC0_VIRT - SCU_PERIPH_SIZE)
-
-/* The following are intended for the devices that are mapped early */
-
-#define TTC0_BASE IOMEM(TTC0_VIRT)
-#define SCU_PERIPH_BASE IOMEM(SCU_PERIPH_VIRT)
-
-#define LL_UART_PADDR UART0_PHYS
-#define LL_UART_VADDR UART0_VIRT
-
-#endif
diff --git a/arch/arm/mach-zynq/timer.c b/arch/arm/mach-zynq/timer.c
index c2c96cc7d6e..de3df283da7 100644
--- a/arch/arm/mach-zynq/timer.c
+++ b/arch/arm/mach-zynq/timer.c
@@ -23,32 +23,14 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
-#include <asm/mach/time.h>
-#include <mach/zynq_soc.h>
#include "common.h"
-#define IRQ_TIMERCOUNTER0 42
-
-/*
- * This driver configures the 2 16-bit count-up timers as follows:
- *
- * T1: Timer 1, clocksource for generic timekeeping
- * T2: Timer 2, clockevent source for hrtimers
- * T3: Timer 3, <unused>
- *
- * The input frequency to the timer module for emulation is 2.5MHz which is
- * common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32,
- * the timers are clocked at 78.125KHz (12.8 us resolution).
- *
- * The input frequency to the timer module in silicon will be 200MHz. With the
- * pre-scaler of 32, the timers are clocked at 6.25MHz (160ns resolution).
- */
-#define XTTCPSS_CLOCKSOURCE 0 /* Timer 1 as a generic timekeeping */
-#define XTTCPSS_CLOCKEVENT 1 /* Timer 2 as a clock event */
-
-#define XTTCPSS_TIMER_BASE TTC0_BASE
-#define XTTCPCC_EVENT_TIMER_IRQ (IRQ_TIMERCOUNTER0 + 1)
/*
* Timer Register Offset Definitions of Timer 1, Increment base address by 4
* and use same offsets for Timer 2
@@ -65,9 +47,14 @@
#define XTTCPSS_CNT_CNTRL_DISABLE_MASK 0x1
-/* Setup the timers to use pre-scaling */
-
-#define TIMER_RATE (PERIPHERAL_CLOCK_RATE / 32)
+/* Setup the timers to use pre-scaling, using a fixed value for now that will
+ * work across most input frequency, but it may need to be more dynamic
+ */
+#define PRESCALE_EXPONENT 11 /* 2 ^ PRESCALE_EXPONENT = PRESCALE */
+#define PRESCALE 2048 /* The exponent must match this */
+#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1)
+#define CLK_CNTRL_PRESCALE_EN 1
+#define CNT_CNTRL_RESET (1<<4)
/**
* struct xttcpss_timer - This definition defines local timer structure
@@ -75,11 +62,25 @@
* @base_addr: Base address of timer
**/
struct xttcpss_timer {
- void __iomem *base_addr;
+ void __iomem *base_addr;
};
-static struct xttcpss_timer timers[2];
-static struct clock_event_device xttcpss_clockevent;
+struct xttcpss_timer_clocksource {
+ struct xttcpss_timer xttc;
+ struct clocksource cs;
+};
+
+#define to_xttcpss_timer_clksrc(x) \
+ container_of(x, struct xttcpss_timer_clocksource, cs)
+
+struct xttcpss_timer_clockevent {
+ struct xttcpss_timer xttc;
+ struct clock_event_device ce;
+ struct clk *clk;
+};
+
+#define to_xttcpss_timer_clkevent(x) \
+ container_of(x, struct xttcpss_timer_clockevent, ce)
/**
* xttcpss_set_interval - Set the timer interval value
@@ -101,7 +102,7 @@ static void xttcpss_set_interval(struct xttcpss_timer *timer,
/* Reset the counter (0x10) so that it starts from 0, one-shot
mode makes this needed for timing to be right. */
- ctrl_reg |= 0x10;
+ ctrl_reg |= CNT_CNTRL_RESET;
ctrl_reg &= ~XTTCPSS_CNT_CNTRL_DISABLE_MASK;
__raw_writel(ctrl_reg, timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
}
@@ -116,90 +117,31 @@ static void xttcpss_set_interval(struct xttcpss_timer *timer,
**/
static irqreturn_t xttcpss_clock_event_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = &xttcpss_clockevent;
- struct xttcpss_timer *timer = dev_id;
+ struct xttcpss_timer_clockevent *xttce = dev_id;
+ struct xttcpss_timer *timer = &xttce->xttc;
/* Acknowledge the interrupt and call event handler */
__raw_writel(__raw_readl(timer->base_addr + XTTCPSS_ISR_OFFSET),
timer->base_addr + XTTCPSS_ISR_OFFSET);
- evt->event_handler(evt);
+ xttce->ce.event_handler(&xttce->ce);
return IRQ_HANDLED;
}
-static struct irqaction event_timer_irq = {
- .name = "xttcpss clockevent",
- .flags = IRQF_DISABLED | IRQF_TIMER,
- .handler = xttcpss_clock_event_interrupt,
-};
-
/**
- * xttcpss_timer_hardware_init - Initialize the timer hardware
- *
- * Initialize the hardware to start the clock source, get the clock
- * event timer ready to use, and hook up the interrupt.
- **/
-static void __init xttcpss_timer_hardware_init(void)
-{
- /* Setup the clock source counter to be an incrementing counter
- * with no interrupt and it rolls over at 0xFFFF. Pre-scale
- it by 32 also. Let it start running now.
- */
- timers[XTTCPSS_CLOCKSOURCE].base_addr = XTTCPSS_TIMER_BASE;
-
- __raw_writel(0x0, timers[XTTCPSS_CLOCKSOURCE].base_addr +
- XTTCPSS_IER_OFFSET);
- __raw_writel(0x9, timers[XTTCPSS_CLOCKSOURCE].base_addr +
- XTTCPSS_CLK_CNTRL_OFFSET);
- __raw_writel(0x10, timers[XTTCPSS_CLOCKSOURCE].base_addr +
- XTTCPSS_CNT_CNTRL_OFFSET);
-
- /* Setup the clock event timer to be an interval timer which
- * is prescaled by 32 using the interval interrupt. Leave it
- * disabled for now.
- */
-
- timers[XTTCPSS_CLOCKEVENT].base_addr = XTTCPSS_TIMER_BASE + 4;
-
- __raw_writel(0x23, timers[XTTCPSS_CLOCKEVENT].base_addr +
- XTTCPSS_CNT_CNTRL_OFFSET);
- __raw_writel(0x9, timers[XTTCPSS_CLOCKEVENT].base_addr +
- XTTCPSS_CLK_CNTRL_OFFSET);
- __raw_writel(0x1, timers[XTTCPSS_CLOCKEVENT].base_addr +
- XTTCPSS_IER_OFFSET);
-
- /* Setup IRQ the clock event timer */
- event_timer_irq.dev_id = &timers[XTTCPSS_CLOCKEVENT];
- setup_irq(XTTCPCC_EVENT_TIMER_IRQ, &event_timer_irq);
-}
-
-/**
- * __raw_readl_cycles - Reads the timer counter register
+ * __xttc_clocksource_read - Reads the timer counter register
*
* returns: Current timer counter register value
**/
-static cycle_t __raw_readl_cycles(struct clocksource *cs)
+static cycle_t __xttc_clocksource_read(struct clocksource *cs)
{
- struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKSOURCE];
+ struct xttcpss_timer *timer = &to_xttcpss_timer_clksrc(cs)->xttc;
return (cycle_t)__raw_readl(timer->base_addr +
XTTCPSS_COUNT_VAL_OFFSET);
}
-
-/*
- * Instantiate and initialize the clock source structure
- */
-static struct clocksource clocksource_xttcpss = {
- .name = "xttcpss_timer1",
- .rating = 200, /* Reasonable clock source */
- .read = __raw_readl_cycles,
- .mask = CLOCKSOURCE_MASK(16),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-
/**
* xttcpss_set_next_event - Sets the time interval for next event
*
@@ -211,7 +153,8 @@ static struct clocksource clocksource_xttcpss = {
static int xttcpss_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKEVENT];
+ struct xttcpss_timer_clockevent *xttce = to_xttcpss_timer_clkevent(evt);
+ struct xttcpss_timer *timer = &xttce->xttc;
xttcpss_set_interval(timer, cycles);
return 0;
@@ -226,12 +169,15 @@ static int xttcpss_set_next_event(unsigned long cycles,
static void xttcpss_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKEVENT];
+ struct xttcpss_timer_clockevent *xttce = to_xttcpss_timer_clkevent(evt);
+ struct xttcpss_timer *timer = &xttce->xttc;
u32 ctrl_reg;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- xttcpss_set_interval(timer, TIMER_RATE / HZ);
+ xttcpss_set_interval(timer,
+ DIV_ROUND_CLOSEST(clk_get_rate(xttce->clk),
+ PRESCALE * HZ));
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_UNUSED:
@@ -252,15 +198,106 @@ static void xttcpss_set_mode(enum clock_event_mode mode,
}
}
-/*
- * Instantiate and initialize the clock event structure
- */
-static struct clock_event_device xttcpss_clockevent = {
- .name = "xttcpss_timer2",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = xttcpss_set_next_event,
- .set_mode = xttcpss_set_mode,
- .rating = 200,
+static void __init zynq_ttc_setup_clocksource(struct device_node *np,
+ void __iomem *base)
+{
+ struct xttcpss_timer_clocksource *ttccs;
+ struct clk *clk;
+ int err;
+ u32 reg;
+
+ ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
+ if (WARN_ON(!ttccs))
+ return;
+
+ err = of_property_read_u32(np, "reg", &reg);
+ if (WARN_ON(err))
+ return;
+
+ clk = of_clk_get_by_name(np, "cpu_1x");
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
+ err = clk_prepare_enable(clk);
+ if (WARN_ON(err))
+ return;
+
+ ttccs->xttc.base_addr = base + reg * 4;
+
+ ttccs->cs.name = np->name;
+ ttccs->cs.rating = 200;
+ ttccs->cs.read = __xttc_clocksource_read;
+ ttccs->cs.mask = CLOCKSOURCE_MASK(16);
+ ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ __raw_writel(0x0, ttccs->xttc.base_addr + XTTCPSS_IER_OFFSET);
+ __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ ttccs->xttc.base_addr + XTTCPSS_CLK_CNTRL_OFFSET);
+ __raw_writel(CNT_CNTRL_RESET,
+ ttccs->xttc.base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
+
+ err = clocksource_register_hz(&ttccs->cs, clk_get_rate(clk) / PRESCALE);
+ if (WARN_ON(err))
+ return;
+}
+
+static void __init zynq_ttc_setup_clockevent(struct device_node *np,
+ void __iomem *base)
+{
+ struct xttcpss_timer_clockevent *ttcce;
+ int err, irq;
+ u32 reg;
+
+ ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
+ if (WARN_ON(!ttcce))
+ return;
+
+ err = of_property_read_u32(np, "reg", &reg);
+ if (WARN_ON(err))
+ return;
+
+ ttcce->xttc.base_addr = base + reg * 4;
+
+ ttcce->clk = of_clk_get_by_name(np, "cpu_1x");
+ if (WARN_ON(IS_ERR(ttcce->clk)))
+ return;
+
+ err = clk_prepare_enable(ttcce->clk);
+ if (WARN_ON(err))
+ return;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (WARN_ON(!irq))
+ return;
+
+ ttcce->ce.name = np->name;
+ ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ttcce->ce.set_next_event = xttcpss_set_next_event;
+ ttcce->ce.set_mode = xttcpss_set_mode;
+ ttcce->ce.rating = 200;
+ ttcce->ce.irq = irq;
+
+ __raw_writel(0x23, ttcce->xttc.base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
+ __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
+ ttcce->xttc.base_addr + XTTCPSS_CLK_CNTRL_OFFSET);
+ __raw_writel(0x1, ttcce->xttc.base_addr + XTTCPSS_IER_OFFSET);
+
+ err = request_irq(irq, xttcpss_clock_event_interrupt, IRQF_TIMER,
+ np->name, ttcce);
+ if (WARN_ON(err))
+ return;
+
+ clockevents_config_and_register(&ttcce->ce,
+ clk_get_rate(ttcce->clk) / PRESCALE,
+ 1, 0xfffe);
+}
+
+static const __initconst struct of_device_id zynq_ttc_match[] = {
+ { .compatible = "xlnx,ttc-counter-clocksource",
+ .data = zynq_ttc_setup_clocksource, },
+ { .compatible = "xlnx,ttc-counter-clockevent",
+ .data = zynq_ttc_setup_clockevent, },
+ {}
};
/**
@@ -269,30 +306,27 @@ static struct clock_event_device xttcpss_clockevent = {
* Initializes the timer hardware and register the clock source and clock event
* timers with Linux kernal timer framework
**/
-static void __init xttcpss_timer_init(void)
+void __init xttcpss_timer_init(void)
{
- xttcpss_timer_hardware_init();
- clocksource_register_hz(&clocksource_xttcpss, TIMER_RATE);
-
- /* Calculate the parameters to allow the clockevent to operate using
- integer math
- */
- clockevents_calc_mult_shift(&xttcpss_clockevent, TIMER_RATE, 4);
-
- xttcpss_clockevent.max_delta_ns =
- clockevent_delta2ns(0xfffe, &xttcpss_clockevent);
- xttcpss_clockevent.min_delta_ns =
- clockevent_delta2ns(1, &xttcpss_clockevent);
-
- /* Indicate that clock event is on 1st CPU as SMP boot needs it */
-
- xttcpss_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&xttcpss_clockevent);
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "xlnx,ttc") {
+ struct device_node *np_chld;
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return;
+
+ for_each_available_child_of_node(np, np_chld) {
+ int (*cb)(struct device_node *np, void __iomem *base);
+ const struct of_device_id *match;
+
+ match = of_match_node(zynq_ttc_match, np_chld);
+ if (match) {
+ cb = match->data;
+ cb(np_chld, base);
+ }
+ }
+ }
}
-
-/*
- * Instantiate and initialize the system timer structure
- */
-struct sys_timer xttcpss_sys_timer = {
- .init = xttcpss_timer_init,
-};
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 94186b6c685..3fd629d5a51 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -352,6 +352,10 @@ config CPU_PJ4
select ARM_THUMBEE
select CPU_V7
+config CPU_PJ4B
+ bool
+ select CPU_V7
+
# ARMv6
config CPU_V6
bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index cd956647c21..7539ec27506 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -44,8 +44,10 @@ ENDPROC(v7_flush_icache_all)
ENTRY(v7_flush_dcache_louis)
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
- ands r3, r0, #0xe00000 @ extract LoUIS from clidr
- mov r3, r3, lsr #20 @ r3 = LoUIS * 2
+ ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
+ ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
+ ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
+ ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
moveq pc, lr @ return if level == 0
mov r10, #0 @ r10 (starting level) = 0
b flush_levels @ start flushing cache levels
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 58bc3e4d3bd..6b2fb87c869 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -124,8 +124,6 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
-static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
-
struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -971,7 +969,7 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
-static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+int arm_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
@@ -1036,7 +1034,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags);
}
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, struct dma_attrs *attrs)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
@@ -1050,6 +1049,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
if (!pages)
return NULL;
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+ {
+ unsigned long order = get_order(size);
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ goto error;
+
+ __dma_clear_buffer(page, size);
+
+ for (i = 0; i < count; i++)
+ pages[i] = page + i;
+
+ return pages;
+ }
+
while (count) {
int j, order = __fls(count);
@@ -1083,14 +1099,21 @@ error:
return NULL;
}
-static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+ size_t size, struct dma_attrs *attrs)
{
int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *);
int i;
- for (i = 0; i < count; i++)
- if (pages[i])
- __free_pages(pages[i], 0);
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ dma_release_from_contiguous(dev, pages[0], count);
+ } else {
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ }
+
if (array_size <= PAGE_SIZE)
kfree(pages);
else
@@ -1252,7 +1275,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
- pages = __iommu_alloc_buffer(dev, size, gfp);
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages)
return NULL;
@@ -1273,7 +1296,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping:
__iommu_remove_mapping(dev, *handle, size);
err_buffer:
- __iommu_free_buffer(dev, pages, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
return NULL;
}
@@ -1329,7 +1352,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
__iommu_remove_mapping(dev, handle, size);
- __iommu_free_buffer(dev, pages, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 42cc833aa02..350f6a74992 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -169,6 +169,63 @@ __v7_ca15mp_setup:
orreq r0, r0, r10 @ Enable CPU-specific SMP bits
mcreq p15, 0, r0, c1, c0, 1
#endif
+
+__v7_pj4b_setup:
+#ifdef CONFIG_CPU_PJ4B
+
+/* Auxiliary Debug Modes Control 1 Register */
+#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
+#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
+#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
+#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
+
+/* Auxiliary Debug Modes Control 2 Register */
+#define PJ4B_FAST_LDR (1 << 23) /* Disable fast LDR */
+#define PJ4B_SNOOP_DATA (1 << 25) /* Do not interleave write and snoop data */
+#define PJ4B_CWF (1 << 27) /* Disable Critical Word First feature */
+#define PJ4B_OUTSDNG_NC (1 << 29) /* Disable outstanding non cacheable rqst */
+#define PJ4B_L1_REP_RR (1 << 30) /* L1 replacement - Strict round robin */
+#define PJ4B_AUX_DBG_CTRL2 (PJ4B_SNOOP_DATA | PJ4B_CWF |\
+ PJ4B_OUTSDNG_NC | PJ4B_L1_REP_RR)
+
+/* Auxiliary Functional Modes Control Register 0 */
+#define PJ4B_SMP_CFB (1 << 1) /* Set SMP mode. Join the coherency fabric */
+#define PJ4B_L1_PAR_CHK (1 << 2) /* Support L1 parity checking */
+#define PJ4B_BROADCAST_CACHE (1 << 8) /* Broadcast Cache and TLB maintenance */
+
+/* Auxiliary Debug Modes Control 0 Register */
+#define PJ4B_WFI_WFE (1 << 22) /* WFI/WFE - serve the DVM and back to idle */
+
+ /* Auxiliary Debug Modes Control 1 Register */
+ mrc p15, 1, r0, c15, c1, 1
+ orr r0, r0, #PJ4B_CLEAN_LINE
+ orr r0, r0, #PJ4B_BCK_OFF_STREX
+ orr r0, r0, #PJ4B_INTER_PARITY
+ bic r0, r0, #PJ4B_STATIC_BP
+ mcr p15, 1, r0, c15, c1, 1
+
+ /* Auxiliary Debug Modes Control 2 Register */
+ mrc p15, 1, r0, c15, c1, 2
+ bic r0, r0, #PJ4B_FAST_LDR
+ orr r0, r0, #PJ4B_AUX_DBG_CTRL2
+ mcr p15, 1, r0, c15, c1, 2
+
+ /* Auxiliary Functional Modes Control Register 0 */
+ mrc p15, 1, r0, c15, c2, 0
+#ifdef CONFIG_SMP
+ orr r0, r0, #PJ4B_SMP_CFB
+#endif
+ orr r0, r0, #PJ4B_L1_PAR_CHK
+ orr r0, r0, #PJ4B_BROADCAST_CACHE
+ mcr p15, 1, r0, c15, c2, 0
+
+ /* Auxiliary Debug Modes Control 0 Register */
+ mrc p15, 1, r0, c15, c1, 0
+ orr r0, r0, #PJ4B_WFI_WFE
+ mcr p15, 1, r0, c15, c1, 0
+
+#endif /* CONFIG_CPU_PJ4B */
+
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
@@ -342,6 +399,16 @@ __v7_ca9mp_proc_info:
.long 0xff0ffff0
__v7_proc __v7_ca9mp_setup
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
+ /*
+ * Marvell PJ4B processor.
+ */
+ .type __v7_pj4b_proc_info, #object
+__v7_pj4b_proc_info:
+ .long 0x562f5840
+ .long 0xfffffff0
+ __v7_proc __v7_pj4b_setup
+ .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
#endif /* CONFIG_ARM_LPAE */
/*
diff --git a/arch/arm/plat-nomadik/Kconfig b/arch/arm/plat-nomadik/Kconfig
deleted file mode 100644
index 19f55cae5d7..00000000000
--- a/arch/arm/plat-nomadik/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
-# We keep common IP's here for Nomadik and other similar
-# familiy of processors from ST-Ericsson. At the moment we have
-# just MTU, others to follow soon.
-
-config PLAT_NOMADIK
- bool
- depends on ARCH_NOMADIK || ARCH_U8500
- default y
- select CLKSRC_MMIO
- help
- Common platform code for Nomadik and other ST-Ericsson
- platforms.
-
-if PLAT_NOMADIK
-
-config HAS_MTU
- bool
- help
- Support for Multi Timer Unit. MTU provides access
- to multiple interrupt generating programmable
- 32-bit free running decrementing counters.
-
-config NOMADIK_MTU_SCHED_CLOCK
- bool
- depends on HAS_MTU
- help
- Use the Multi Timer Unit as the sched_clock.
-
-endif
diff --git a/arch/arm/plat-nomadik/Makefile b/arch/arm/plat-nomadik/Makefile
deleted file mode 100644
index 37c7cdd0f8f..00000000000
--- a/arch/arm/plat-nomadik/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# arch/arm/plat-nomadik/Makefile
-# Copyright 2009 ST-Ericsson
-# Licensed under GPLv2
-
-obj-$(CONFIG_HAS_MTU) += timer.o
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 8d885848600..a14a78a2f14 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := sram.o dma.o fb.o counter_32k.o
+obj-y := sram.o dma.o counter_32k.o
obj-m :=
obj-n :=
obj- :=
@@ -11,7 +11,6 @@ obj- :=
# omap_device support (OMAP2+ only at the moment)
obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
-obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o
obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
obj-y += $(i2c-omap-m) $(i2c-omap-y)
diff --git a/arch/arm/plat-omap/debug-devices.c b/arch/arm/plat-omap/debug-devices.c
deleted file mode 100644
index a609e216181..00000000000
--- a/arch/arm/plat-omap/debug-devices.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * linux/arch/arm/plat-omap/debug-devices.c
- *
- * Copyright (C) 2005 Nokia Corporation
- * Modified from mach-omap2/board-h4.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/smc91x.h>
-
-#include <plat/debug-devices.h>
-
-/* Many OMAP development platforms reuse the same "debug board"; these
- * platforms include H2, H3, H4, and Perseus2.
- */
-
-static struct smc91x_platdata smc91x_info = {
- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
- .leda = RPC_LED_100_10,
- .ledb = RPC_LED_TX_RX,
-};
-
-static struct resource smc91x_resources[] = {
- [0] = {
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
- },
-};
-
-static struct platform_device smc91x_device = {
- .name = "smc91x",
- .id = -1,
- .dev = {
- .platform_data = &smc91x_info,
- },
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
-};
-
-static struct resource led_resources[] = {
- [0] = {
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device led_device = {
- .name = "omap_dbg_led",
- .id = -1,
- .num_resources = ARRAY_SIZE(led_resources),
- .resource = led_resources,
-};
-
-static struct platform_device *debug_devices[] __initdata = {
- &smc91x_device,
- &led_device,
- /* ps2 kbd + mouse ports */
- /* 4 extra uarts */
- /* 6 input dip switches */
- /* 8 output pins */
-};
-
-int __init debug_card_init(u32 addr, unsigned gpio)
-{
- int status;
-
- smc91x_resources[0].start = addr + 0x300;
- smc91x_resources[0].end = addr + 0x30f;
-
- smc91x_resources[1].start = gpio_to_irq(gpio);
- smc91x_resources[1].end = gpio_to_irq(gpio);
-
- status = gpio_request(gpio, "SMC91x irq");
- if (status < 0) {
- printk(KERN_ERR "GPIO%d unavailable for smc91x IRQ\n", gpio);
- return status;
- }
- gpio_direction_input(gpio);
-
- led_resources[0].start = addr;
- led_resources[0].end = addr + SZ_4K - 1;
-
- return platform_add_devices(debug_devices, ARRAY_SIZE(debug_devices));
-}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c288b76f8e6..37a488aaa2b 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -36,7 +36,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
/*
* MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 89585c29355..d51b75bdcad 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -898,19 +898,8 @@ static struct platform_driver omap_dm_timer_driver = {
},
};
-static int __init omap_dm_timer_driver_init(void)
-{
- return platform_driver_register(&omap_dm_timer_driver);
-}
-
-static void __exit omap_dm_timer_driver_exit(void)
-{
- platform_driver_unregister(&omap_dm_timer_driver);
-}
-
early_platform_init("earlytimer", &omap_dm_timer_driver);
-module_init(omap_dm_timer_driver_init);
-module_exit(omap_dm_timer_driver_exit);
+module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
MODULE_LICENSE("GPL");
diff --git a/arch/arm/plat-omap/include/plat-omap/dma-omap.h b/arch/arm/plat-omap/include/plat-omap/dma-omap.h
deleted file mode 100644
index 6f506ba9e45..00000000000
--- a/arch/arm/plat-omap/include/plat-omap/dma-omap.h
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * OMAP DMA handling defines and function
- *
- * Copyright (C) 2003 Nokia Corporation
- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <linux/platform_device.h>
-
-#define INT_DMA_LCD 25
-
-#define OMAP1_DMA_TOUT_IRQ (1 << 0)
-#define OMAP_DMA_DROP_IRQ (1 << 1)
-#define OMAP_DMA_HALF_IRQ (1 << 2)
-#define OMAP_DMA_FRAME_IRQ (1 << 3)
-#define OMAP_DMA_LAST_IRQ (1 << 4)
-#define OMAP_DMA_BLOCK_IRQ (1 << 5)
-#define OMAP1_DMA_SYNC_IRQ (1 << 6)
-#define OMAP2_DMA_PKT_IRQ (1 << 7)
-#define OMAP2_DMA_TRANS_ERR_IRQ (1 << 8)
-#define OMAP2_DMA_SECURE_ERR_IRQ (1 << 9)
-#define OMAP2_DMA_SUPERVISOR_ERR_IRQ (1 << 10)
-#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
-
-#define OMAP_DMA_CCR_EN (1 << 7)
-#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9)
-#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10)
-#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
-#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25)
-
-#define OMAP_DMA_DATA_TYPE_S8 0x00
-#define OMAP_DMA_DATA_TYPE_S16 0x01
-#define OMAP_DMA_DATA_TYPE_S32 0x02
-
-#define OMAP_DMA_SYNC_ELEMENT 0x00
-#define OMAP_DMA_SYNC_FRAME 0x01
-#define OMAP_DMA_SYNC_BLOCK 0x02
-#define OMAP_DMA_SYNC_PACKET 0x03
-
-#define OMAP_DMA_DST_SYNC_PREFETCH 0x02
-#define OMAP_DMA_SRC_SYNC 0x01
-#define OMAP_DMA_DST_SYNC 0x00
-
-#define OMAP_DMA_PORT_EMIFF 0x00
-#define OMAP_DMA_PORT_EMIFS 0x01
-#define OMAP_DMA_PORT_OCP_T1 0x02
-#define OMAP_DMA_PORT_TIPB 0x03
-#define OMAP_DMA_PORT_OCP_T2 0x04
-#define OMAP_DMA_PORT_MPUI 0x05
-
-#define OMAP_DMA_AMODE_CONSTANT 0x00
-#define OMAP_DMA_AMODE_POST_INC 0x01
-#define OMAP_DMA_AMODE_SINGLE_IDX 0x02
-#define OMAP_DMA_AMODE_DOUBLE_IDX 0x03
-
-#define DMA_DEFAULT_FIFO_DEPTH 0x10
-#define DMA_DEFAULT_ARB_RATE 0x01
-/* Pass THREAD_RESERVE ORed with THREAD_FIFO for tparams */
-#define DMA_THREAD_RESERVE_NORM (0x00 << 12) /* Def */
-#define DMA_THREAD_RESERVE_ONET (0x01 << 12)
-#define DMA_THREAD_RESERVE_TWOT (0x02 << 12)
-#define DMA_THREAD_RESERVE_THREET (0x03 << 12)
-#define DMA_THREAD_FIFO_NONE (0x00 << 14) /* Def */
-#define DMA_THREAD_FIFO_75 (0x01 << 14)
-#define DMA_THREAD_FIFO_25 (0x02 << 14)
-#define DMA_THREAD_FIFO_50 (0x03 << 14)
-
-/* DMA4_OCP_SYSCONFIG bits */
-#define DMA_SYSCONFIG_MIDLEMODE_MASK (3 << 12)
-#define DMA_SYSCONFIG_CLOCKACTIVITY_MASK (3 << 8)
-#define DMA_SYSCONFIG_EMUFREE (1 << 5)
-#define DMA_SYSCONFIG_SIDLEMODE_MASK (3 << 3)
-#define DMA_SYSCONFIG_SOFTRESET (1 << 2)
-#define DMA_SYSCONFIG_AUTOIDLE (1 << 0)
-
-#define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12)
-#define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3)
-
-#define DMA_IDLEMODE_SMARTIDLE 0x2
-#define DMA_IDLEMODE_NO_IDLE 0x1
-#define DMA_IDLEMODE_FORCE_IDLE 0x0
-
-/* Chaining modes*/
-#ifndef CONFIG_ARCH_OMAP1
-#define OMAP_DMA_STATIC_CHAIN 0x1
-#define OMAP_DMA_DYNAMIC_CHAIN 0x2
-#define OMAP_DMA_CHAIN_ACTIVE 0x1
-#define OMAP_DMA_CHAIN_INACTIVE 0x0
-#endif
-
-#define DMA_CH_PRIO_HIGH 0x1
-#define DMA_CH_PRIO_LOW 0x0 /* Def */
-
-/* Errata handling */
-#define IS_DMA_ERRATA(id) (errata & (id))
-#define SET_DMA_ERRATA(id) (errata |= (id))
-
-#define DMA_ERRATA_IFRAME_BUFFERING BIT(0x0)
-#define DMA_ERRATA_PARALLEL_CHANNELS BIT(0x1)
-#define DMA_ERRATA_i378 BIT(0x2)
-#define DMA_ERRATA_i541 BIT(0x3)
-#define DMA_ERRATA_i88 BIT(0x4)
-#define DMA_ERRATA_3_3 BIT(0x5)
-#define DMA_ROMCODE_BUG BIT(0x6)
-
-/* Attributes for OMAP DMA Contrller */
-#define DMA_LINKED_LCH BIT(0x0)
-#define GLOBAL_PRIORITY BIT(0x1)
-#define RESERVE_CHANNEL BIT(0x2)
-#define IS_CSSA_32 BIT(0x3)
-#define IS_CDSA_32 BIT(0x4)
-#define IS_RW_PRIORITY BIT(0x5)
-#define ENABLE_1510_MODE BIT(0x6)
-#define SRC_PORT BIT(0x7)
-#define DST_PORT BIT(0x8)
-#define SRC_INDEX BIT(0x9)
-#define DST_INDEX BIT(0xa)
-#define IS_BURST_ONLY4 BIT(0xb)
-#define CLEAR_CSR_ON_READ BIT(0xc)
-#define IS_WORD_16 BIT(0xd)
-#define ENABLE_16XX_MODE BIT(0xe)
-#define HS_CHANNELS_RESERVED BIT(0xf)
-
-/* Defines for DMA Capabilities */
-#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
-#define DMA_HAS_CONSTANT_FILL_CAPS (0x1 << 19)
-#define DMA_HAS_DESCRIPTOR_CAPS (0x3 << 20)
-
-enum omap_reg_offsets {
-
-GCR, GSCR, GRST1, HW_ID,
-PCH2_ID, PCH0_ID, PCH1_ID, PCHG_ID,
-PCHD_ID, CAPS_0, CAPS_1, CAPS_2,
-CAPS_3, CAPS_4, PCH2_SR, PCH0_SR,
-PCH1_SR, PCHD_SR, REVISION, IRQSTATUS_L0,
-IRQSTATUS_L1, IRQSTATUS_L2, IRQSTATUS_L3, IRQENABLE_L0,
-IRQENABLE_L1, IRQENABLE_L2, IRQENABLE_L3, SYSSTATUS,
-OCP_SYSCONFIG,
-
-/* omap1+ specific */
-CPC, CCR2, LCH_CTRL,
-
-/* Common registers for all omap's */
-CSDP, CCR, CICR, CSR,
-CEN, CFN, CSFI, CSEI,
-CSAC, CDAC, CDEI,
-CDFI, CLNK_CTRL,
-
-/* Channel specific registers */
-CSSA, CDSA, COLOR,
-CCEN, CCFN,
-
-/* omap3630 and omap4 specific */
-CDP, CNDP, CCDN,
-
-};
-
-enum omap_dma_burst_mode {
- OMAP_DMA_DATA_BURST_DIS = 0,
- OMAP_DMA_DATA_BURST_4,
- OMAP_DMA_DATA_BURST_8,
- OMAP_DMA_DATA_BURST_16,
-};
-
-enum end_type {
- OMAP_DMA_LITTLE_ENDIAN = 0,
- OMAP_DMA_BIG_ENDIAN
-};
-
-enum omap_dma_color_mode {
- OMAP_DMA_COLOR_DIS = 0,
- OMAP_DMA_CONSTANT_FILL,
- OMAP_DMA_TRANSPARENT_COPY
-};
-
-enum omap_dma_write_mode {
- OMAP_DMA_WRITE_NON_POSTED = 0,
- OMAP_DMA_WRITE_POSTED,
- OMAP_DMA_WRITE_LAST_NON_POSTED
-};
-
-enum omap_dma_channel_mode {
- OMAP_DMA_LCH_2D = 0,
- OMAP_DMA_LCH_G,
- OMAP_DMA_LCH_P,
- OMAP_DMA_LCH_PD
-};
-
-struct omap_dma_channel_params {
- int data_type; /* data type 8,16,32 */
- int elem_count; /* number of elements in a frame */
- int frame_count; /* number of frames in a element */
-
- int src_port; /* Only on OMAP1 REVISIT: Is this needed? */
- int src_amode; /* constant, post increment, indexed,
- double indexed */
- unsigned long src_start; /* source address : physical */
- int src_ei; /* source element index */
- int src_fi; /* source frame index */
-
- int dst_port; /* Only on OMAP1 REVISIT: Is this needed? */
- int dst_amode; /* constant, post increment, indexed,
- double indexed */
- unsigned long dst_start; /* source address : physical */
- int dst_ei; /* source element index */
- int dst_fi; /* source frame index */
-
- int trigger; /* trigger attached if the channel is
- synchronized */
- int sync_mode; /* sycn on element, frame , block or packet */
- int src_or_dst_synch; /* source synch(1) or destination synch(0) */
-
- int ie; /* interrupt enabled */
-
- unsigned char read_prio;/* read priority */
- unsigned char write_prio;/* write priority */
-
-#ifndef CONFIG_ARCH_OMAP1
- enum omap_dma_burst_mode burst_mode; /* Burst mode 4/8/16 words */
-#endif
-};
-
-struct omap_dma_lch {
- int next_lch;
- int dev_id;
- u16 saved_csr;
- u16 enabled_irqs;
- const char *dev_name;
- void (*callback)(int lch, u16 ch_status, void *data);
- void *data;
- long flags;
- /* required for Dynamic chaining */
- int prev_linked_ch;
- int next_linked_ch;
- int state;
- int chain_id;
- int status;
-};
-
-struct omap_dma_dev_attr {
- u32 dev_caps;
- u16 lch_count;
- u16 chan_count;
- struct omap_dma_lch *chan;
-};
-
-/* System DMA platform data structure */
-struct omap_system_dma_plat_info {
- struct omap_dma_dev_attr *dma_attr;
- u32 errata;
- void (*disable_irq_lch)(int lch);
- void (*show_dma_caps)(void);
- void (*clear_lch_regs)(int lch);
- void (*clear_dma)(int lch);
- void (*dma_write)(u32 val, int reg, int lch);
- u32 (*dma_read)(int reg, int lch);
-};
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
-#define dma_omap2plus() 1
-#else
-#define dma_omap2plus() 0
-#endif
-#define dma_omap1() (!dma_omap2plus())
-#define dma_omap15xx() ((dma_omap1() && (d->dev_caps & ENABLE_1510_MODE)))
-#define dma_omap16xx() ((dma_omap1() && (d->dev_caps & ENABLE_16XX_MODE)))
-
-extern void omap_set_dma_priority(int lch, int dst_port, int priority);
-extern int omap_request_dma(int dev_id, const char *dev_name,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data, int *dma_ch);
-extern void omap_enable_dma_irq(int ch, u16 irq_bits);
-extern void omap_disable_dma_irq(int ch, u16 irq_bits);
-extern void omap_free_dma(int ch);
-extern void omap_start_dma(int lch);
-extern void omap_stop_dma(int lch);
-extern void omap_set_dma_transfer_params(int lch, int data_type,
- int elem_count, int frame_count,
- int sync_mode,
- int dma_trigger, int src_or_dst_synch);
-extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode,
- u32 color);
-extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
-extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
-
-extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
- unsigned long src_start,
- int src_ei, int src_fi);
-extern void omap_set_dma_src_index(int lch, int eidx, int fidx);
-extern void omap_set_dma_src_data_pack(int lch, int enable);
-extern void omap_set_dma_src_burst_mode(int lch,
- enum omap_dma_burst_mode burst_mode);
-
-extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
- unsigned long dest_start,
- int dst_ei, int dst_fi);
-extern void omap_set_dma_dest_index(int lch, int eidx, int fidx);
-extern void omap_set_dma_dest_data_pack(int lch, int enable);
-extern void omap_set_dma_dest_burst_mode(int lch,
- enum omap_dma_burst_mode burst_mode);
-
-extern void omap_set_dma_params(int lch,
- struct omap_dma_channel_params *params);
-
-extern void omap_dma_link_lch(int lch_head, int lch_queue);
-extern void omap_dma_unlink_lch(int lch_head, int lch_queue);
-
-extern int omap_set_dma_callback(int lch,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data);
-extern dma_addr_t omap_get_dma_src_pos(int lch);
-extern dma_addr_t omap_get_dma_dst_pos(int lch);
-extern void omap_clear_dma(int lch);
-extern int omap_get_dma_active_status(int lch);
-extern int omap_dma_running(void);
-extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
- int tparams);
-extern int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
- unsigned char write_prio);
-extern void omap_set_dma_dst_endian_type(int lch, enum end_type etype);
-extern void omap_set_dma_src_endian_type(int lch, enum end_type etype);
-extern int omap_get_dma_index(int lch, int *ei, int *fi);
-
-void omap_dma_global_context_save(void);
-void omap_dma_global_context_restore(void);
-
-extern void omap_dma_disable_irq(int lch);
-
-/* Chaining APIs */
-#ifndef CONFIG_ARCH_OMAP1
-extern int omap_request_dma_chain(int dev_id, const char *dev_name,
- void (*callback) (int lch, u16 ch_status,
- void *data),
- int *chain_id, int no_of_chans,
- int chain_mode,
- struct omap_dma_channel_params params);
-extern int omap_free_dma_chain(int chain_id);
-extern int omap_dma_chain_a_transfer(int chain_id, int src_start,
- int dest_start, int elem_count,
- int frame_count, void *callbk_data);
-extern int omap_start_dma_chain_transfers(int chain_id);
-extern int omap_stop_dma_chain_transfers(int chain_id);
-extern int omap_get_dma_chain_index(int chain_id, int *ei, int *fi);
-extern int omap_get_dma_chain_dst_pos(int chain_id);
-extern int omap_get_dma_chain_src_pos(int chain_id);
-
-extern int omap_modify_dma_chain_params(int chain_id,
- struct omap_dma_channel_params params);
-extern int omap_dma_chain_status(int chain_id);
-#endif
-
-#if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_FB_OMAP)
-#include <mach/lcd_dma.h>
-#else
-static inline int omap_lcd_dma_running(void)
-{
- return 0;
-}
-#endif
-
-#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index b4516aba67e..c9a66bf36c9 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -32,8 +32,4 @@
#include <mach/soc.h>
#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
-#include "../../mach-omap2/soc.h"
-#endif
-
#endif
diff --git a/arch/arm/plat-omap/include/plat/debug-devices.h b/arch/arm/plat-omap/include/plat/debug-devices.h
deleted file mode 100644
index 8fc4287222d..00000000000
--- a/arch/arm/plat-omap/include/plat/debug-devices.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* for TI reference platforms sharing the same debug card */
-extern int debug_card_init(u32 addr, unsigned gpio);
diff --git a/arch/arm/plat-omap/include/plat/vram.h b/arch/arm/plat-omap/include/plat/vram.h
deleted file mode 100644
index 4d65b7d06e6..00000000000
--- a/arch/arm/plat-omap/include/plat/vram.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * VRAM manager for OMAP
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __OMAP_VRAM_H__
-#define __OMAP_VRAM_H__
-
-#include <linux/types.h>
-
-extern int omap_vram_add_region(unsigned long paddr, size_t size);
-extern int omap_vram_free(unsigned long paddr, size_t size);
-extern int omap_vram_alloc(size_t size, unsigned long *paddr);
-extern int omap_vram_reserve(unsigned long paddr, size_t size);
-extern void omap_vram_get_info(unsigned long *vram, unsigned long *free_vram,
- unsigned long *largest_free_block);
-
-#ifdef CONFIG_OMAP2_VRAM
-extern void omap_vram_set_sdram_vram(u32 size, u32 start);
-
-extern void omap_vram_reserve_sdram_memblock(void);
-#else
-static inline void omap_vram_set_sdram_vram(u32 size, u32 start) { }
-
-static inline void omap_vram_reserve_sdram_memblock(void) { }
-#endif
-
-#endif
diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c
index a7b8060c293..febe3862873 100644
--- a/arch/arm/plat-orion/addr-map.c
+++ b/arch/arm/plat-orion/addr-map.c
@@ -42,6 +42,8 @@ EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
#define WIN_REMAP_LO_OFF 0x0008
#define WIN_REMAP_HI_OFF 0x000c
+#define ATTR_HW_COHERENCY (0x1 << 4)
+
/*
* Default implementation
*/
@@ -163,6 +165,8 @@ void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg,
w = &orion_mbus_dram_info.cs[cs++];
w->cs_index = i;
w->mbus_attr = 0xf & ~(1 << i);
+ if (cfg->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base & 0xffff0000;
w->size = (size | 0x0000ffff) + 1;
}
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index b8a688cad4c..2d4b6414609 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -606,26 +606,6 @@ void __init orion_wdt_init(void)
****************************************************************************/
static u64 orion_xor_dmamask = DMA_BIT_MASK(32);
-void __init orion_xor_init_channels(
- struct mv_xor_platform_data *orion_xor0_data,
- struct platform_device *orion_xor0_channel,
- struct mv_xor_platform_data *orion_xor1_data,
- struct platform_device *orion_xor1_channel)
-{
- /*
- * two engines can't do memset simultaneously, this limitation
- * satisfied by removing memset support from one of the engines.
- */
- dma_cap_set(DMA_MEMCPY, orion_xor0_data->cap_mask);
- dma_cap_set(DMA_XOR, orion_xor0_data->cap_mask);
- platform_device_register(orion_xor0_channel);
-
- dma_cap_set(DMA_MEMCPY, orion_xor1_data->cap_mask);
- dma_cap_set(DMA_MEMSET, orion_xor1_data->cap_mask);
- dma_cap_set(DMA_XOR, orion_xor1_data->cap_mask);
- platform_device_register(orion_xor1_channel);
-}
-
/*****************************************************************************
* XOR0
****************************************************************************/
@@ -636,61 +616,30 @@ static struct resource orion_xor0_shared_resources[] = {
}, {
.name = "xor 0 high",
.flags = IORESOURCE_MEM,
+ }, {
+ .name = "irq channel 0",
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "irq channel 1",
+ .flags = IORESOURCE_IRQ,
},
};
-static struct platform_device orion_xor0_shared = {
- .name = MV_XOR_SHARED_NAME,
- .id = 0,
- .num_resources = ARRAY_SIZE(orion_xor0_shared_resources),
- .resource = orion_xor0_shared_resources,
-};
+static struct mv_xor_channel_data orion_xor0_channels_data[2];
-static struct resource orion_xor00_resources[] = {
- [0] = {
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct mv_xor_platform_data orion_xor00_data = {
- .shared = &orion_xor0_shared,
- .hw_id = 0,
- .pool_size = PAGE_SIZE,
+static struct mv_xor_platform_data orion_xor0_pdata = {
+ .channels = orion_xor0_channels_data,
};
-static struct platform_device orion_xor00_channel = {
+static struct platform_device orion_xor0_shared = {
.name = MV_XOR_NAME,
.id = 0,
- .num_resources = ARRAY_SIZE(orion_xor00_resources),
- .resource = orion_xor00_resources,
- .dev = {
- .dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
- .platform_data = &orion_xor00_data,
- },
-};
-
-static struct resource orion_xor01_resources[] = {
- [0] = {
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct mv_xor_platform_data orion_xor01_data = {
- .shared = &orion_xor0_shared,
- .hw_id = 1,
- .pool_size = PAGE_SIZE,
-};
-
-static struct platform_device orion_xor01_channel = {
- .name = MV_XOR_NAME,
- .id = 1,
- .num_resources = ARRAY_SIZE(orion_xor01_resources),
- .resource = orion_xor01_resources,
- .dev = {
- .dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
- .platform_data = &orion_xor01_data,
+ .num_resources = ARRAY_SIZE(orion_xor0_shared_resources),
+ .resource = orion_xor0_shared_resources,
+ .dev = {
+ .dma_mask = &orion_xor_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ .platform_data = &orion_xor0_pdata,
},
};
@@ -704,15 +653,23 @@ void __init orion_xor0_init(unsigned long mapbase_low,
orion_xor0_shared_resources[1].start = mapbase_high;
orion_xor0_shared_resources[1].end = mapbase_high + 0xff;
- orion_xor00_resources[0].start = irq_0;
- orion_xor00_resources[0].end = irq_0;
- orion_xor01_resources[0].start = irq_1;
- orion_xor01_resources[0].end = irq_1;
+ orion_xor0_shared_resources[2].start = irq_0;
+ orion_xor0_shared_resources[2].end = irq_0;
+ orion_xor0_shared_resources[3].start = irq_1;
+ orion_xor0_shared_resources[3].end = irq_1;
- platform_device_register(&orion_xor0_shared);
+ /*
+ * two engines can't do memset simultaneously, this limitation
+ * satisfied by removing memset support from one of the engines.
+ */
+ dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask);
+ dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask);
+
+ dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask);
+ dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask);
+ dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask);
- orion_xor_init_channels(&orion_xor00_data, &orion_xor00_channel,
- &orion_xor01_data, &orion_xor01_channel);
+ platform_device_register(&orion_xor0_shared);
}
/*****************************************************************************
@@ -725,61 +682,30 @@ static struct resource orion_xor1_shared_resources[] = {
}, {
.name = "xor 1 high",
.flags = IORESOURCE_MEM,
+ }, {
+ .name = "irq channel 0",
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "irq channel 1",
+ .flags = IORESOURCE_IRQ,
},
};
-static struct platform_device orion_xor1_shared = {
- .name = MV_XOR_SHARED_NAME,
- .id = 1,
- .num_resources = ARRAY_SIZE(orion_xor1_shared_resources),
- .resource = orion_xor1_shared_resources,
-};
-
-static struct resource orion_xor10_resources[] = {
- [0] = {
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct mv_xor_platform_data orion_xor10_data = {
- .shared = &orion_xor1_shared,
- .hw_id = 0,
- .pool_size = PAGE_SIZE,
-};
-
-static struct platform_device orion_xor10_channel = {
- .name = MV_XOR_NAME,
- .id = 2,
- .num_resources = ARRAY_SIZE(orion_xor10_resources),
- .resource = orion_xor10_resources,
- .dev = {
- .dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
- .platform_data = &orion_xor10_data,
- },
-};
-
-static struct resource orion_xor11_resources[] = {
- [0] = {
- .flags = IORESOURCE_IRQ,
- },
-};
+static struct mv_xor_channel_data orion_xor1_channels_data[2];
-static struct mv_xor_platform_data orion_xor11_data = {
- .shared = &orion_xor1_shared,
- .hw_id = 1,
- .pool_size = PAGE_SIZE,
+static struct mv_xor_platform_data orion_xor1_pdata = {
+ .channels = orion_xor1_channels_data,
};
-static struct platform_device orion_xor11_channel = {
+static struct platform_device orion_xor1_shared = {
.name = MV_XOR_NAME,
- .id = 3,
- .num_resources = ARRAY_SIZE(orion_xor11_resources),
- .resource = orion_xor11_resources,
- .dev = {
- .dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
- .platform_data = &orion_xor11_data,
+ .id = 1,
+ .num_resources = ARRAY_SIZE(orion_xor1_shared_resources),
+ .resource = orion_xor1_shared_resources,
+ .dev = {
+ .dma_mask = &orion_xor_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ .platform_data = &orion_xor1_pdata,
},
};
@@ -793,15 +719,23 @@ void __init orion_xor1_init(unsigned long mapbase_low,
orion_xor1_shared_resources[1].start = mapbase_high;
orion_xor1_shared_resources[1].end = mapbase_high + 0xff;
- orion_xor10_resources[0].start = irq_0;
- orion_xor10_resources[0].end = irq_0;
- orion_xor11_resources[0].start = irq_1;
- orion_xor11_resources[0].end = irq_1;
+ orion_xor1_shared_resources[2].start = irq_0;
+ orion_xor1_shared_resources[2].end = irq_0;
+ orion_xor1_shared_resources[3].start = irq_1;
+ orion_xor1_shared_resources[3].end = irq_1;
- platform_device_register(&orion_xor1_shared);
+ /*
+ * two engines can't do memset simultaneously, this limitation
+ * satisfied by removing memset support from one of the engines.
+ */
+ dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask);
+ dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask);
- orion_xor_init_channels(&orion_xor10_data, &orion_xor10_channel,
- &orion_xor11_data, &orion_xor11_channel);
+ dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask);
+ dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask);
+ dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask);
+
+ platform_device_register(&orion_xor1_shared);
}
/*****************************************************************************
diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
index ec63e4a627d..b76c06569fe 100644
--- a/arch/arm/plat-orion/include/plat/addr-map.h
+++ b/arch/arm/plat-orion/include/plat/addr-map.h
@@ -17,6 +17,7 @@ struct orion_addr_map_cfg {
const int num_wins; /* Total number of windows */
const int remappable_wins;
void __iomem *bridge_virt_base;
+ int hw_io_coherency;
/* If NULL, the default cpu_win_can_remap will be used, using
the value in remappable_wins */
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index 6bbc3fe5f58..e06fc5fefa1 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -12,6 +12,7 @@
#include <linux/mv643xx_eth.h>
struct dsa_platform_data;
+struct mv_sata_platform_data;
void __init orion_uart0_init(void __iomem *membase,
resource_size_t mapbase,
diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
index 1867944415c..8db0b981ca6 100644
--- a/arch/arm/plat-orion/irq.c
+++ b/arch/arm/plat-orion/irq.c
@@ -41,7 +41,7 @@ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
static int __init orion_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- int i = 0, irq_gpio;
+ int i = 0;
void __iomem *base;
do {
@@ -54,10 +54,6 @@ static int __init orion_add_irq_domain(struct device_node *np,
irq_domain_add_legacy(np, i * 32, 0, 0,
&irq_domain_simple_ops, NULL);
-
- irq_gpio = i * 32;
- orion_gpio_of_init(irq_gpio);
-
return 0;
}
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 0abd1c46988..ba3e76c9550 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -325,7 +325,7 @@ static int s3c2410_dma_start(struct s3c2410_dma_chan *chan)
chan->state = S3C2410_DMA_RUNNING;
- /* check wether there is anything to load, and if not, see
+ /* check whether there is anything to load, and if not, see
* if we can find anything to load
*/
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 012bbd0b8d8..47c9fad43f0 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -389,6 +389,72 @@ int __init s3c24xx_register_baseclocks(unsigned long xtal)
static struct dentry *clk_debugfs_root;
+static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
+{
+ struct clk *child;
+ const char *state;
+ char buf[255] = { 0 };
+ int n = 0;
+
+ if (c->name)
+ n = snprintf(buf, sizeof(buf) - 1, "%s", c->name);
+
+ if (c->devname)
+ n += snprintf(buf + n, sizeof(buf) - 1 - n, ":%s", c->devname);
+
+ state = (c->usage > 0) ? "on" : "off";
+
+ seq_printf(s, "%*s%-*s %-6s %-3d %-10lu\n",
+ level * 3 + 1, "",
+ 50 - level * 3, buf,
+ state, c->usage, clk_get_rate(c));
+
+ list_for_each_entry(child, &clocks, list) {
+ if (child->parent != c)
+ continue;
+
+ clock_tree_show_one(s, child, level + 1);
+ }
+}
+
+static int clock_tree_show(struct seq_file *s, void *data)
+{
+ struct clk *c;
+ unsigned long flags;
+
+ seq_printf(s, " clock state ref rate\n");
+ seq_printf(s, "----------------------------------------------------\n");
+
+ spin_lock_irqsave(&clocks_lock, flags);
+
+ list_for_each_entry(c, &clocks, list)
+ if (c->parent == NULL)
+ clock_tree_show_one(s, c, 0);
+
+ spin_unlock_irqrestore(&clocks_lock, flags);
+ return 0;
+}
+
+static int clock_tree_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clock_tree_show, inode->i_private);
+}
+
+static const struct file_operations clock_tree_fops = {
+ .open = clock_tree_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int clock_rate_show(void *data, u64 *val)
+{
+ struct clk *c = data;
+ *val = clk_get_rate(c);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_rate_show, NULL, "%llu\n");
+
static int clk_debugfs_register_one(struct clk *c)
{
int err;
@@ -411,7 +477,7 @@ static int clk_debugfs_register_one(struct clk *c)
goto err_out;
}
- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+ d = debugfs_create_file("rate", S_IRUGO, c->dent, c, &clock_rate_fops);
if (!d) {
err = -ENOMEM;
goto err_out;
@@ -446,13 +512,18 @@ static int __init clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
- int err;
+ int err = -ENOMEM;
d = debugfs_create_dir("clock", NULL);
if (!d)
return -ENOMEM;
clk_debugfs_root = d;
+ d = debugfs_create_file("clock_tree", S_IRUGO, clk_debugfs_root, NULL,
+ &clock_tree_fops);
+ if (!d)
+ goto err_out;
+
list_for_each_entry(c, &clocks, list) {
err = clk_debugfs_register(c);
if (err)
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index a17d7b3e372..51afedda9ab 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -146,15 +146,6 @@ struct platform_device s3c_device_camif = {
/* ASOC DMA */
-struct platform_device samsung_asoc_dma = {
- .name = "samsung-audio",
- .id = -1,
- .dev = {
- .dma_mask = &samsung_device_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- }
-};
-
struct platform_device samsung_asoc_idma = {
.name = "samsung-idma",
.id = -1,
@@ -929,6 +920,7 @@ struct platform_device s5p_device_mfc_r = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
+
#endif /* CONFIG_S5P_DEV_MFC */
/* MIPI CSIS */
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index a9b8096b825..87d501ff332 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -132,9 +132,6 @@ extern struct platform_device exynos4_device_pcm1;
extern struct platform_device exynos4_device_pcm2;
extern struct platform_device exynos4_device_spdif;
-extern struct platform_device exynos_device_drm;
-
-extern struct platform_device samsung_asoc_dma;
extern struct platform_device samsung_asoc_idma;
extern struct platform_device samsung_device_keypad;
diff --git a/arch/arm/plat-samsung/include/plat/gpio-core.h b/arch/arm/plat-samsung/include/plat/gpio-core.h
index 1fe6917f6a2..f7a3ea2c498 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-core.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-core.h
@@ -11,6 +11,9 @@
* published by the Free Software Foundation.
*/
+#ifndef __PLAT_SAMSUNG_GPIO_CORE_H
+#define __PLAT_SAMSUNG_GPIO_CORE_H
+
#define GPIOCON_OFF (0x00)
#define GPIODAT_OFF (0x04)
@@ -48,6 +51,7 @@ struct samsung_gpio_cfg;
* @config: special function and pull-resistor control information.
* @lock: Lock for exclusive access to this gpio bank.
* @pm_save: Save information for suspend/resume support.
+ * @bitmap_gpio_int: Bitmap for representing GPIO interrupt or not.
*
* This wrapper provides the necessary information for the Samsung
* specific gpios being registered with gpiolib.
@@ -71,6 +75,7 @@ struct samsung_gpio_chip {
#ifdef CONFIG_PM
u32 pm_save[4];
#endif
+ u32 bitmap_gpio_int;
};
static inline struct samsung_gpio_chip *to_samsung_gpio(struct gpio_chip *gpc)
@@ -122,3 +127,5 @@ extern struct samsung_gpio_pm samsung_gpio_pm_4bit;
/* locking wrappers to deal with multiple access to the same gpio bank */
#define samsung_gpio_lock(_oc, _fl) spin_lock_irqsave(&(_oc)->lock, _fl)
#define samsung_gpio_unlock(_oc, _fl) spin_unlock_irqrestore(&(_oc)->lock, _fl)
+
+#endif /* __PLAT_SAMSUNG_GPIO_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/mfc.h b/arch/arm/plat-samsung/include/plat/mfc.h
index ac13227272f..e6d7c42d68b 100644
--- a/arch/arm/plat-samsung/include/plat/mfc.h
+++ b/arch/arm/plat-samsung/include/plat/mfc.h
@@ -10,6 +10,14 @@
#ifndef __PLAT_SAMSUNG_MFC_H
#define __PLAT_SAMSUNG_MFC_H __FILE__
+struct s5p_mfc_dt_meminfo {
+ unsigned long loff;
+ unsigned long lsize;
+ unsigned long roff;
+ unsigned long rsize;
+ char *compatible;
+};
+
/**
* s5p_mfc_reserve_mem - function to early reserve memory for MFC driver
* @rbase: base address for MFC 'right' memory interface
@@ -24,4 +32,7 @@
void __init s5p_mfc_reserve_mem(phys_addr_t rbase, unsigned int rsize,
phys_addr_t lbase, unsigned int lsize);
+int __init s5p_fdt_find_mfc_mem(unsigned long node, const char *uname,
+ int depth, void *data);
+
#endif /* __PLAT_SAMSUNG_MFC_H */
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 61fc53740fb..887a0c95437 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -107,10 +107,12 @@ extern void s3c_pm_do_restore(struct sleep_save *ptr, int count);
extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
#ifdef CONFIG_PM
+extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
extern int s3c24xx_irq_suspend(void);
extern void s3c24xx_irq_resume(void);
#else
+#define s3c_irq_wake NULL
#define s3c_irqext_wake NULL
#define s3c24xx_irq_suspend NULL
#define s3c24xx_irq_resume NULL
diff --git a/arch/arm/plat-samsung/s5p-dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c
index ad6089465e2..5ec104b5408 100644
--- a/arch/arm/plat-samsung/s5p-dev-mfc.c
+++ b/arch/arm/plat-samsung/s5p-dev-mfc.c
@@ -14,6 +14,8 @@
#include <linux/dma-mapping.h>
#include <linux/memblock.h>
#include <linux/ioport.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
#include <mach/map.h>
#include <plat/devs.h>
@@ -69,3 +71,35 @@ static int __init s5p_mfc_memory_init(void)
return 0;
}
device_initcall(s5p_mfc_memory_init);
+
+#ifdef CONFIG_OF
+int __init s5p_fdt_find_mfc_mem(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ __be32 *prop;
+ unsigned long len;
+ struct s5p_mfc_dt_meminfo *mfc_mem = data;
+
+ if (!data)
+ return 0;
+
+ if (!of_flat_dt_is_compatible(node, mfc_mem->compatible))
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "samsung,mfc-l", &len);
+ if (!prop || (len != 2 * sizeof(unsigned long)))
+ return 0;
+
+ mfc_mem->loff = be32_to_cpu(prop[0]);
+ mfc_mem->lsize = be32_to_cpu(prop[1]);
+
+ prop = of_get_flat_dt_prop(node, "samsung,mfc-r", &len);
+ if (!prop || (len != 2 * sizeof(unsigned long)))
+ return 0;
+
+ mfc_mem->roff = be32_to_cpu(prop[0]);
+ mfc_mem->rsize = be32_to_cpu(prop[1]);
+
+ return 1;
+}
+#endif
diff --git a/arch/arm/plat-samsung/s5p-irq-gpioint.c b/arch/arm/plat-samsung/s5p-irq-gpioint.c
index 23557d30e44..bae56131a50 100644
--- a/arch/arm/plat-samsung/s5p-irq-gpioint.c
+++ b/arch/arm/plat-samsung/s5p-irq-gpioint.c
@@ -185,7 +185,7 @@ int __init s5p_register_gpio_interrupt(int pin)
/* check if the group has been already registered */
if (my_chip->irq_base)
- return my_chip->irq_base + offset;
+ goto success;
/* register gpio group */
ret = s5p_gpioint_add(my_chip);
@@ -193,9 +193,13 @@ int __init s5p_register_gpio_interrupt(int pin)
my_chip->chip.to_irq = samsung_gpiolib_to_irq;
printk(KERN_INFO "Registered interrupt support for gpio group %d.\n",
group);
- return my_chip->irq_base + offset;
+ goto success;
}
return ret;
+success:
+ my_chip->bitmap_gpio_int |= BIT(offset);
+
+ return my_chip->irq_base + offset;
}
int __init s5p_register_gpioint_bank(int chain_irq, int start, int nr_groups)
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index 2607bd05c52..01e88532a5d 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -5,5 +5,5 @@
# Common support
obj-y := restart.o time.o
-obj-$(CONFIG_ARCH_SPEAR3XX) += pl080.o shirq.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += pl080.o
obj-$(CONFIG_ARCH_SPEAR6XX) += pl080.o
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
deleted file mode 100644
index 853e891e118..00000000000
--- a/arch/arm/plat-spear/shirq.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * arch/arm/plat-spear/shirq.c
- *
- * SPEAr platform shared irq layer source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/spinlock.h>
-#include <plat/shirq.h>
-
-struct spear_shirq *shirq;
-static DEFINE_SPINLOCK(lock);
-
-static void shirq_irq_mask(struct irq_data *d)
-{
- struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
- u32 val, id = d->irq - shirq->dev_config[0].virq;
- unsigned long flags;
-
- if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
- return;
-
- spin_lock_irqsave(&lock, flags);
- val = readl(shirq->regs.base + shirq->regs.enb_reg);
- if (shirq->regs.reset_to_enb)
- val |= shirq->dev_config[id].enb_mask;
- else
- val &= ~(shirq->dev_config[id].enb_mask);
- writel(val, shirq->regs.base + shirq->regs.enb_reg);
- spin_unlock_irqrestore(&lock, flags);
-}
-
-static void shirq_irq_unmask(struct irq_data *d)
-{
- struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
- u32 val, id = d->irq - shirq->dev_config[0].virq;
- unsigned long flags;
-
- if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
- return;
-
- spin_lock_irqsave(&lock, flags);
- val = readl(shirq->regs.base + shirq->regs.enb_reg);
- if (shirq->regs.reset_to_enb)
- val &= ~(shirq->dev_config[id].enb_mask);
- else
- val |= shirq->dev_config[id].enb_mask;
- writel(val, shirq->regs.base + shirq->regs.enb_reg);
- spin_unlock_irqrestore(&lock, flags);
-}
-
-static struct irq_chip shirq_chip = {
- .name = "spear_shirq",
- .irq_ack = shirq_irq_mask,
- .irq_mask = shirq_irq_mask,
- .irq_unmask = shirq_irq_unmask,
-};
-
-static void shirq_handler(unsigned irq, struct irq_desc *desc)
-{
- u32 i, val, mask;
- struct spear_shirq *shirq = irq_get_handler_data(irq);
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- while ((val = readl(shirq->regs.base + shirq->regs.status_reg) &
- shirq->regs.status_reg_mask)) {
- for (i = 0; (i < shirq->dev_count) && val; i++) {
- if (!(shirq->dev_config[i].status_mask & val))
- continue;
-
- generic_handle_irq(shirq->dev_config[i].virq);
-
- /* clear interrupt */
- val &= ~shirq->dev_config[i].status_mask;
- if ((shirq->regs.clear_reg == -1) ||
- shirq->dev_config[i].clear_mask == -1)
- continue;
- mask = readl(shirq->regs.base + shirq->regs.clear_reg);
- if (shirq->regs.reset_to_clear)
- mask &= ~shirq->dev_config[i].clear_mask;
- else
- mask |= shirq->dev_config[i].clear_mask;
- writel(mask, shirq->regs.base + shirq->regs.clear_reg);
- }
- }
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
-}
-
-int spear_shirq_register(struct spear_shirq *shirq)
-{
- int i;
-
- if (!shirq || !shirq->dev_config || !shirq->regs.base)
- return -EFAULT;
-
- if (!shirq->dev_count)
- return -EINVAL;
-
- irq_set_chained_handler(shirq->irq, shirq_handler);
- for (i = 0; i < shirq->dev_count; i++) {
- irq_set_chip_and_handler(shirq->dev_config[i].virq,
- &shirq_chip, handle_simple_irq);
- set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID);
- irq_set_chip_data(shirq->dev_config[i].virq, shirq);
- }
-
- irq_set_handler_data(shirq->irq, shirq);
- return 0;
-}
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f5760927544..7a32976fa2a 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -8,6 +8,8 @@
#include <xen/features.h>
#include <xen/platform_pci.h>
#include <xen/xenbus.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <linux/interrupt.h>
@@ -17,6 +19,8 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/mm.h>
+
struct start_info _xen_start_info;
struct start_info *xen_start_info = &_xen_start_info;
EXPORT_SYMBOL_GPL(xen_start_info);
@@ -29,6 +33,10 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+/* These are unused until we support booting "pre-ballooned" */
+unsigned long xen_released_pages;
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
+
/* TODO: to be removed */
__read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
@@ -38,15 +46,106 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
static __read_mostly int xen_events_irq = -1;
+/* map fgmfn of domid to lpfn in the current domain */
+static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
+ unsigned int domid)
+{
+ int rc;
+ struct xen_add_to_physmap_range xatp = {
+ .domid = DOMID_SELF,
+ .foreign_domid = domid,
+ .size = 1,
+ .space = XENMAPSPACE_gmfn_foreign,
+ };
+ xen_ulong_t idx = fgmfn;
+ xen_pfn_t gpfn = lpfn;
+
+ set_xen_guest_handle(xatp.idxs, &idx);
+ set_xen_guest_handle(xatp.gpfns, &gpfn);
+
+ rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
+ if (rc) {
+ pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n",
+ rc, lpfn, fgmfn);
+ return 1;
+ }
+ return 0;
+}
+
+struct remap_data {
+ xen_pfn_t fgmfn; /* foreign domain's gmfn */
+ pgprot_t prot;
+ domid_t domid;
+ struct vm_area_struct *vma;
+ int index;
+ struct page **pages;
+ struct xen_remap_mfn_info *info;
+};
+
+static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct remap_data *info = data;
+ struct page *page = info->pages[info->index++];
+ unsigned long pfn = page_to_pfn(page);
+ pte_t pte = pfn_pte(pfn, info->prot);
+
+ if (map_foreign_page(pfn, info->fgmfn, info->domid))
+ return -EFAULT;
+ set_pte_at(info->vma->vm_mm, addr, ptep, pte);
+
+ return 0;
+}
+
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- unsigned long mfn, int nr,
- pgprot_t prot, unsigned domid)
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
{
- return -ENOSYS;
+ int err;
+ struct remap_data data;
+
+ /* TBD: Batching, current sole caller only does page at a time */
+ if (nr > 1)
+ return -EINVAL;
+
+ data.fgmfn = mfn;
+ data.prot = prot;
+ data.domid = domid;
+ data.vma = vma;
+ data.index = 0;
+ data.pages = pages;
+ err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
+ remap_pte_fn, &data);
+ return err;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct xen_remove_from_physmap xrp;
+ unsigned long rc, pfn;
+
+ pfn = page_to_pfn(pages[i]);
+
+ xrp.domid = DOMID_SELF;
+ xrp.gpfn = pfn;
+ rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
+ if (rc) {
+ pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
+ pfn, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
+
/*
* see Documentation/devicetree/bindings/arm/xen.txt for the
* documentation of the Xen Device Tree format.
@@ -149,24 +248,6 @@ static int __init xen_init_events(void)
}
postcore_initcall(xen_init_events);
-/* XXX: only until balloon is properly working */
-int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
-{
- *pages = alloc_pages(highmem ? GFP_HIGHUSER : GFP_KERNEL,
- get_order(nr_pages));
- if (*pages == NULL)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL_GPL(alloc_xenballooned_pages);
-
-void free_xenballooned_pages(int nr_pages, struct page **pages)
-{
- kfree(*pages);
- *pages = NULL;
-}
-EXPORT_SYMBOL_GPL(free_xenballooned_pages);
-
/* In the hypervisor.S file. */
EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f9ccff91591..9c829b00826 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -8,8 +8,6 @@ config ARM64
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- select GENERIC_KERNEL_EXECVE
- select GENERIC_KERNEL_THREAD
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 37e610dc084..d9ec40217a2 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -209,10 +209,11 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
+#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
+
static inline void __user *arch_compat_alloc_user_space(long len)
{
- struct pt_regs *regs = task_pt_regs(current);
- return (void __user *)regs->compat_sp - len;
+ return (void __user *)compat_user_stack_pointer() - len;
}
struct compat_ipc64_perm {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 538f4b44db5..99477689419 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -50,6 +50,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dev_addr);
return ops->mapping_error(dev, dev_addr);
}
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index d69aeea6da1..744087fb521 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -20,6 +20,7 @@
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
@@ -27,6 +28,5 @@
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#endif
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index f7b05edf8ce..26e9c4eeaba 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -28,21 +28,6 @@
#include <asm/cacheflush.h>
#include <asm/unistd32.h>
-asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval)
-{
- struct timespec t;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
- set_fs(old_fs);
- if (put_compat_timespec(&t, interval))
- return -EFAULT;
- return ret;
-}
-
static inline void
do_compat_cache_op(unsigned long start, unsigned long end, int flags)
{
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index e40c9bd7914..2ae6591b3a5 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -17,8 +17,6 @@ config AVR32
select GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
diff --git a/arch/avr32/include/asm/ptrace.h b/arch/avr32/include/asm/ptrace.h
index 8d3c412fc65..630e4f9bf5f 100644
--- a/arch/avr32/include/asm/ptrace.h
+++ b/arch/avr32/include/asm/ptrace.h
@@ -21,6 +21,7 @@
#define user_mode(regs) (((regs)->sr & MODE_MASK) == MODE_USER)
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->sp)
static __inline__ int valid_user_regs(struct pt_regs *regs)
{
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
index f05a9804e8e..0bdf6371574 100644
--- a/arch/avr32/include/asm/unistd.h
+++ b/arch/avr32/include/asm/unistd.h
@@ -39,7 +39,6 @@
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/avr32/include/uapi/asm/signal.h b/arch/avr32/include/uapi/asm/signal.h
index eb46f61adb7..1b77a93eff5 100644
--- a/arch/avr32/include/uapi/asm/signal.h
+++ b/arch/avr32/include/uapi/asm/signal.h
@@ -89,12 +89,6 @@ typedef unsigned long sigset_t;
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index ab9ff4075f4..b6f3ad5441c 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -45,8 +45,6 @@ config BLACKFIN
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
config GENERIC_CSUM
def_bool y
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 27d70759474..127826f8a37 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -1,4 +1,3 @@
-include include/asm-generic/Kbuild.asm
generic-y += auxvec.h
generic-y += bitsperlong.h
@@ -17,6 +16,7 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
+generic-y += kvm_para.h
generic-y += local64.h
generic-y += local.h
generic-y += mman.h
@@ -44,7 +44,3 @@ generic-y += ucontext.h
generic-y += unaligned.h
generic-y += user.h
generic-y += xor.h
-
-header-y += bfin_sport.h
-header-y += cachectl.h
-header-y += fixed_code.h
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index f8907ea6b5b..50b9dfd4839 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -5,65 +5,12 @@
*
* Licensed under the GPL-2 or later.
*/
-
#ifndef __BFIN_SPORT_H__
#define __BFIN_SPORT_H__
-/* Sport mode: it can be set to TDM, i2s or others */
-#define NORM_MODE 0x0
-#define TDM_MODE 0x1
-#define I2S_MODE 0x2
-#define NDSO_MODE 0x3
-
-/* Data format, normal, a-law or u-law */
-#define NORM_FORMAT 0x0
-#define ALAW_FORMAT 0x2
-#define ULAW_FORMAT 0x3
-
-/* Function driver which use sport must initialize the structure */
-struct sport_config {
- /* TDM (multichannels), I2S or other mode */
- unsigned int mode:3;
- unsigned int polled; /* use poll instead of irq when set */
-
- /* if TDM mode is selected, channels must be set */
- int channels; /* Must be in 8 units */
- unsigned int frame_delay:4; /* Delay between frame sync pulse and first bit */
-
- /* I2S mode */
- unsigned int right_first:1; /* Right stereo channel first */
-
- /* In mormal mode, the following item need to be set */
- unsigned int lsb_first:1; /* order of transmit or receive data */
- unsigned int fsync:1; /* Frame sync required */
- unsigned int data_indep:1; /* data independent frame sync generated */
- unsigned int act_low:1; /* Active low TFS */
- unsigned int late_fsync:1; /* Late frame sync */
- unsigned int tckfe:1;
- unsigned int sec_en:1; /* Secondary side enabled */
-
- /* Choose clock source */
- unsigned int int_clk:1; /* Internal or external clock */
-
- /* If external clock is used, the following fields are ignored */
- int serial_clk;
- int fsync_clk;
-
- unsigned int data_format:2; /* Normal, u-law or a-law */
-
- int word_len; /* How length of the word in bits, 3-32 bits */
- int dma_enabled;
-};
-
-/* Userspace interface */
-#define SPORT_IOC_MAGIC 'P'
-#define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config)
-#define SPORT_IOC_GET_SYSTEMCLOCK _IOR('P', 0x02, unsigned long)
-#define SPORT_IOC_SET_BAUDRATE _IOW('P', 0x03, unsigned long)
-
-#ifdef __KERNEL__
#include <linux/types.h>
+#include <uapi/asm/bfin_sport.h>
/*
* All Blackfin system MMRs are padded to 32bits even if the register
@@ -122,76 +69,3 @@ struct bfin_snd_platform_data {
})
#endif
-
-/* SPORT_TCR1 Masks */
-#define TSPEN 0x0001 /* TX enable */
-#define ITCLK 0x0002 /* Internal TX Clock Select */
-#define TDTYPE 0x000C /* TX Data Formatting Select */
-#define DTYPE_NORM 0x0000 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define TLSBIT 0x0010 /* TX Bit Order */
-#define ITFS 0x0200 /* Internal TX Frame Sync Select */
-#define TFSR 0x0400 /* TX Frame Sync Required Select */
-#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
-#define LTFS 0x1000 /* Low TX Frame Sync Select */
-#define LATFS 0x2000 /* Late TX Frame Sync Select */
-#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
-
-/* SPORT_TCR2 Masks */
-#define SLEN 0x001F /* SPORT TX Word Length (2 - 31) */
-#define DP_SLEN(x) BFIN_DEPOSIT(SLEN, x)
-#define EX_SLEN(x) BFIN_EXTRACT(SLEN, x)
-#define TXSE 0x0100 /* TX Secondary Enable */
-#define TSFSE 0x0200 /* TX Stereo Frame Sync Enable */
-#define TRFST 0x0400 /* TX Right-First Data Order */
-
-/* SPORT_RCR1 Masks */
-#define RSPEN 0x0001 /* RX enable */
-#define IRCLK 0x0002 /* Internal RX Clock Select */
-#define RDTYPE 0x000C /* RX Data Formatting Select */
-/* DTYPE_* defined above */
-#define RLSBIT 0x0010 /* RX Bit Order */
-#define IRFS 0x0200 /* Internal RX Frame Sync Select */
-#define RFSR 0x0400 /* RX Frame Sync Required Select */
-#define LRFS 0x1000 /* Low RX Frame Sync Select */
-#define LARFS 0x2000 /* Late RX Frame Sync Select */
-#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
-
-/* SPORT_RCR2 Masks */
-/* SLEN defined above */
-#define RXSE 0x0100 /* RX Secondary Enable */
-#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
-#define RRFST 0x0400 /* Right-First Data Order */
-
-/* SPORT_STAT Masks */
-#define RXNE 0x0001 /* RX FIFO Not Empty Status */
-#define RUVF 0x0002 /* RX Underflow Status */
-#define ROVF 0x0004 /* RX Overflow Status */
-#define TXF 0x0008 /* TX FIFO Full Status */
-#define TUVF 0x0010 /* TX Underflow Status */
-#define TOVF 0x0020 /* TX Overflow Status */
-#define TXHRE 0x0040 /* TX Hold Register Empty */
-
-/* SPORT_MCMC1 Masks */
-#define SP_WOFF 0x03FF /* Multichannel Window Offset Field */
-#define DP_SP_WOFF(x) BFIN_DEPOSIT(SP_WOFF, x)
-#define EX_SP_WOFF(x) BFIN_EXTRACT(SP_WOFF, x)
-#define SP_WSIZE 0xF000 /* Multichannel Window Size Field */
-#define DP_SP_WSIZE(x) BFIN_DEPOSIT(SP_WSIZE, x)
-#define EX_SP_WSIZE(x) BFIN_EXTRACT(SP_WSIZE, x)
-
-/* SPORT_MCMC2 Masks */
-#define MCCRM 0x0003 /* Multichannel Clock Recovery Mode */
-#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
-#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
-#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
-#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
-#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
-#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
-#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
-#define MFD 0xF000 /* Multichannel Frame Delay */
-#define DP_MFD(x) BFIN_DEPOSIT(MFD, x)
-#define EX_MFD(x) BFIN_EXTRACT(MFD, x)
-
-#endif
diff --git a/arch/blackfin/include/asm/bfin_twi.h b/arch/blackfin/include/asm/bfin_twi.h
index f4a07278743..90c3c006557 100644
--- a/arch/blackfin/include/asm/bfin_twi.h
+++ b/arch/blackfin/include/asm/bfin_twi.h
@@ -61,7 +61,7 @@ struct bfin_twi_iface {
int cur_msg;
u16 saved_clkdiv;
u16 saved_control;
- struct bfin_twi_regs *regs_base;
+ struct bfin_twi_regs __iomem *regs_base;
};
#define DEFINE_TWI_REG(reg_name, reg) \
diff --git a/arch/blackfin/include/asm/fixed_code.h b/arch/blackfin/include/asm/fixed_code.h
index 5395088b2d0..bc330f06207 100644
--- a/arch/blackfin/include/asm/fixed_code.h
+++ b/arch/blackfin/include/asm/fixed_code.h
@@ -6,11 +6,11 @@
*
* Licensed under the GPL-2 or later.
*/
-
#ifndef __BFIN_ASM_FIXED_CODE_H__
#define __BFIN_ASM_FIXED_CODE_H__
-#ifdef __KERNEL__
+#include <uapi/asm/fixed_code.h>
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
#include <linux/ptrace.h>
@@ -28,29 +28,3 @@ extern void safe_user_instruction(void);
extern void sigreturn_stub(void);
#endif
#endif
-
-#ifndef CONFIG_PHY_RAM_BASE_ADDRESS
-#define CONFIG_PHY_RAM_BASE_ADDRESS 0x0
-#endif
-
-#define FIXED_CODE_START (CONFIG_PHY_RAM_BASE_ADDRESS + 0x400)
-
-#define SIGRETURN_STUB (CONFIG_PHY_RAM_BASE_ADDRESS + 0x400)
-
-#define ATOMIC_SEQS_START (CONFIG_PHY_RAM_BASE_ADDRESS + 0x410)
-
-#define ATOMIC_XCHG32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x410)
-#define ATOMIC_CAS32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x420)
-#define ATOMIC_ADD32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x430)
-#define ATOMIC_SUB32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x440)
-#define ATOMIC_IOR32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x450)
-#define ATOMIC_AND32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x460)
-#define ATOMIC_XOR32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x470)
-
-#define ATOMIC_SEQS_END (CONFIG_PHY_RAM_BASE_ADDRESS + 0x480)
-
-#define SAFE_USER_INSTRUCTION (CONFIG_PHY_RAM_BASE_ADDRESS + 0x480)
-
-#define FIXED_CODE_END (CONFIG_PHY_RAM_BASE_ADDRESS + 0x490)
-
-#endif
diff --git a/arch/blackfin/include/asm/pgtable.h b/arch/blackfin/include/asm/pgtable.h
index dcca3e6d6e8..b8663921d3c 100644
--- a/arch/blackfin/include/asm/pgtable.h
+++ b/arch/blackfin/include/asm/pgtable.h
@@ -83,8 +83,6 @@ PTE_BIT_FUNC(mkyoung, |= _PAGE_ACCESSED);
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
extern char empty_zero_page[];
-extern unsigned int kobjsize(const void *objp);
-
#define swapper_pg_dir ((pgd_t *) 0)
/*
* No page table caches to initialise.
diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h
index 10d8641180f..c00491594b4 100644
--- a/arch/blackfin/include/asm/ptrace.h
+++ b/arch/blackfin/include/asm/ptrace.h
@@ -3,102 +3,13 @@
*
* Licensed under the GPL-2 or later.
*/
-
#ifndef _BFIN_PTRACE_H
#define _BFIN_PTRACE_H
-/*
- * GCC defines register number like this:
- * -----------------------------
- * 0 - 7 are data registers R0-R7
- * 8 - 15 are address registers P0-P7
- * 16 - 31 dsp registers I/B/L0 -- I/B/L3 & M0--M3
- * 32 - 33 A registers A0 & A1
- * 34 - status register
- * -----------------------------
- *
- * We follows above, except:
- * 32-33 --- Low 32-bit of A0&1
- * 34-35 --- High 8-bit of A0&1
- */
+#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-struct task_struct;
-
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-
-struct pt_regs {
- long orig_pc;
- long ipend;
- long seqstat;
- long rete;
- long retn;
- long retx;
- long pc; /* PC == RETI */
- long rets;
- long reserved; /* Used as scratch during system calls */
- long astat;
- long lb1;
- long lb0;
- long lt1;
- long lt0;
- long lc1;
- long lc0;
- long a1w;
- long a1x;
- long a0w;
- long a0x;
- long b3;
- long b2;
- long b1;
- long b0;
- long l3;
- long l2;
- long l1;
- long l0;
- long m3;
- long m2;
- long m1;
- long m0;
- long i3;
- long i2;
- long i1;
- long i0;
- long usp;
- long fp;
- long p5;
- long p4;
- long p3;
- long p2;
- long p1;
- long p0;
- long r7;
- long r6;
- long r5;
- long r4;
- long r3;
- long r2;
- long r1;
- long r0;
- long orig_r0;
- long orig_p0;
- long syscfg;
-};
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13 /* ptrace signal */
-
-#define PTRACE_GETFDPIC 31 /* get the ELF fdpic loadmap address */
-#define PTRACE_GETFDPIC_EXEC 0 /* [addr] request the executable loadmap */
-#define PTRACE_GETFDPIC_INTERP 1 /* [addr] request the interpreter loadmap */
-
-#define PS_S (0x0002)
-
-#ifdef __KERNEL__
-
/* user_mode returns true if only one bit is set in IPEND, other than the
master interrupt enable. */
#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1)))
@@ -106,6 +17,7 @@ struct pt_regs {
#define arch_has_single_step() (1)
/* common code demands this function */
#define ptrace_disable(child) user_disable_single_step(child)
+#define current_user_stack_pointer() rdusp()
extern int is_user_addr_valid(struct task_struct *child,
unsigned long start, unsigned long len);
@@ -126,75 +38,5 @@ extern int is_user_addr_valid(struct task_struct *child,
#include <asm-generic/ptrace.h>
-#endif /* __KERNEL__ */
-
#endif /* __ASSEMBLY__ */
-
-/*
- * Offsets used by 'ptrace' system call interface.
- */
-
-#define PT_R0 204
-#define PT_R1 200
-#define PT_R2 196
-#define PT_R3 192
-#define PT_R4 188
-#define PT_R5 184
-#define PT_R6 180
-#define PT_R7 176
-#define PT_P0 172
-#define PT_P1 168
-#define PT_P2 164
-#define PT_P3 160
-#define PT_P4 156
-#define PT_P5 152
-#define PT_FP 148
-#define PT_USP 144
-#define PT_I0 140
-#define PT_I1 136
-#define PT_I2 132
-#define PT_I3 128
-#define PT_M0 124
-#define PT_M1 120
-#define PT_M2 116
-#define PT_M3 112
-#define PT_L0 108
-#define PT_L1 104
-#define PT_L2 100
-#define PT_L3 96
-#define PT_B0 92
-#define PT_B1 88
-#define PT_B2 84
-#define PT_B3 80
-#define PT_A0X 76
-#define PT_A0W 72
-#define PT_A1X 68
-#define PT_A1W 64
-#define PT_LC0 60
-#define PT_LC1 56
-#define PT_LT0 52
-#define PT_LT1 48
-#define PT_LB0 44
-#define PT_LB1 40
-#define PT_ASTAT 36
-#define PT_RESERVED 32
-#define PT_RETS 28
-#define PT_PC 24
-#define PT_RETX 20
-#define PT_RETN 16
-#define PT_RETE 12
-#define PT_SEQSTAT 8
-#define PT_IPEND 4
-
-#define PT_ORIG_R0 208
-#define PT_ORIG_P0 212
-#define PT_SYSCFG 216
-#define PT_TEXT_ADDR 220
-#define PT_TEXT_END_ADDR 224
-#define PT_DATA_ADDR 228
-#define PT_FDPIC_EXEC 232
-#define PT_FDPIC_INTERP 236
-
-#define PT_LAST_PSEUDO PT_FDPIC_INTERP
-
#endif /* _BFIN_PTRACE_H */
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 5cc11150282..461bb542e2e 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -34,23 +34,6 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size))
-static inline int is_in_rom(unsigned long addr)
-{
- /*
- * What we are really trying to do is determine if addr is
- * in an allocated kernel memory region. If not then assume
- * we cannot free it or otherwise de-allocate it. Ideally
- * we could restrict this to really being in a ROM or flash,
- * but that would need to be done on a board by board basis,
- * not globally.
- */
- if ((addr < _ramstart) || (addr >= _ramend))
- return (1);
-
- /* Default case, not in ROM */
- return (0);
-}
-
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
@@ -89,7 +72,7 @@ struct exception_table_entry {
({ \
int _err = 0; \
typeof(*(p)) _x = (x); \
- typeof(*(p)) *_p = (p); \
+ typeof(*(p)) __user *_p = (p); \
if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\
_err = -EFAULT; \
} \
@@ -108,8 +91,8 @@ struct exception_table_entry {
long _xl, _xh; \
_xl = ((long *)&_x)[0]; \
_xh = ((long *)&_x)[1]; \
- __put_user_asm(_xl, ((long *)_p)+0, ); \
- __put_user_asm(_xh, ((long *)_p)+1, ); \
+ __put_user_asm(_xl, ((long __user *)_p)+0, ); \
+ __put_user_asm(_xh, ((long __user *)_p)+1, ); \
} break; \
default: \
_err = __put_user_bad(); \
@@ -136,7 +119,7 @@ static inline int bad_user_access_length(void)
* aliasing issues.
*/
-#define __ptr(x) ((unsigned long *)(x))
+#define __ptr(x) ((unsigned long __force *)(x))
#define __put_user_asm(x,p,bhw) \
__asm__ (#bhw"[%1] = %0;\n\t" \
@@ -216,12 +199,12 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
*/
static inline long __must_check
-strncpy_from_user(char *dst, const char *src, long count)
+strncpy_from_user(char *dst, const char __user *src, long count)
{
char *tmp;
if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
- strncpy(dst, src, count);
+ strncpy(dst, (const char __force *)src, count);
for (tmp = dst; *tmp && count > 0; tmp++, count--) ;
return (tmp - dst);
}
@@ -237,18 +220,18 @@ strncpy_from_user(char *dst, const char *src, long count)
* On exception, returns 0.
* If the string is too long, returns a value greater than n.
*/
-static inline long __must_check strnlen_user(const char *src, long n)
+static inline long __must_check strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
- return strnlen(src, n) + 1;
+ return strnlen((const char __force *)src, n) + 1;
}
-static inline long __must_check strlen_user(const char *src)
+static inline long __must_check strlen_user(const char __user *src)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
- return strlen(src) + 1;
+ return strlen((const char __force *)src) + 1;
}
/*
@@ -256,11 +239,11 @@ static inline long __must_check strlen_user(const char *src)
*/
static inline unsigned long __must_check
-__clear_user(void *to, unsigned long n)
+__clear_user(void __user *to, unsigned long n)
{
if (!access_ok(VERIFY_WRITE, to, n))
return n;
- memset(to, 0, n);
+ memset((void __force *)to, 0, n);
return 0;
}
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 460514a1a4e..e943cb13004 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -3,437 +3,11 @@
*
* Licensed under the GPL-2 or later.
*/
-
#ifndef __ASM_BFIN_UNISTD_H
#define __ASM_BFIN_UNISTD_H
-/*
- * This file contains the system call numbers.
- */
-#define __NR_restart_syscall 0
-#define __NR_exit 1
- /* 2 __NR_fork not supported on nommu */
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
- /* 7 __NR_waitpid obsolete */
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_chown 16
- /* 17 __NR_break obsolete */
- /* 18 __NR_oldstat obsolete */
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
- /* 22 __NR_umount obsolete */
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
- /* 28 __NR_oldfstat obsolete */
-#define __NR_pause 29
- /* 30 __NR_utime obsolete */
- /* 31 __NR_stty obsolete */
- /* 32 __NR_gtty obsolete */
-#define __NR_access 33
-#define __NR_nice 34
- /* 35 __NR_ftime obsolete */
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
- /* 44 __NR_prof obsolete */
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
- /* 48 __NR_signal obsolete */
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
- /* 53 __NR_lock obsolete */
-#define __NR_ioctl 54
-#define __NR_fcntl 55
- /* 56 __NR_mpx obsolete */
-#define __NR_setpgid 57
- /* 58 __NR_ulimit obsolete */
- /* 59 __NR_oldolduname obsolete */
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
- /* 67 __NR_sigaction obsolete */
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
- /* 72 __NR_sigsuspend obsolete */
- /* 73 __NR_sigpending obsolete */
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
- /* 76 __NR_old_getrlimit obsolete */
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
- /* 82 __NR_select obsolete */
-#define __NR_symlink 83
- /* 84 __NR_oldlstat obsolete */
-#define __NR_readlink 85
- /* 86 __NR_uselib obsolete */
- /* 87 __NR_swapon obsolete */
-#define __NR_reboot 88
- /* 89 __NR_readdir obsolete */
- /* 90 __NR_mmap obsolete */
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
- /* 98 __NR_profil obsolete */
-#define __NR_statfs 99
-#define __NR_fstatfs 100
- /* 101 __NR_ioperm */
- /* 102 __NR_socketcall obsolete */
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
- /* 109 __NR_olduname obsolete */
- /* 110 __NR_iopl obsolete */
-#define __NR_vhangup 111
- /* 112 __NR_idle obsolete */
- /* 113 __NR_vm86old */
-#define __NR_wait4 114
- /* 115 __NR_swapoff obsolete */
-#define __NR_sysinfo 116
- /* 117 __NR_ipc oboslete */
-#define __NR_fsync 118
- /* 119 __NR_sigreturn obsolete */
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
- /* 123 __NR_modify_ldt obsolete */
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
- /* 126 __NR_sigprocmask obsolete */
- /* 127 __NR_create_module obsolete */
-#define __NR_init_module 128
-#define __NR_delete_module 129
- /* 130 __NR_get_kernel_syms obsolete */
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
- /* 135 was sysfs */
-#define __NR_personality 136
- /* 137 __NR_afs_syscall */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
- /* 142 __NR__newselect obsolete */
-#define __NR_flock 143
- /* 144 __NR_msync obsolete */
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
- /* 150 __NR_mlock */
- /* 151 __NR_munlock */
- /* 152 __NR_mlockall */
- /* 153 __NR_munlockall */
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
- /* 166 __NR_vm86 */
- /* 167 __NR_query_module */
- /* 168 __NR_poll */
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread 180
-#define __NR_pwrite 181
-#define __NR_lchown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
- /* 188 __NR_getpmsg */
- /* 189 __NR_putpmsg */
-#define __NR_vfork 190
-#define __NR_getrlimit 191
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_chown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_lchown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
- /* 218 __NR_mincore */
- /* 219 __NR_madvise */
-#define __NR_getdents64 220
-#define __NR_fcntl64 221
- /* 222 reserved for TUX */
- /* 223 reserved for TUX */
-#define __NR_gettid 224
-#define __NR_readahead 225
-#define __NR_setxattr 226
-#define __NR_lsetxattr 227
-#define __NR_fsetxattr 228
-#define __NR_getxattr 229
-#define __NR_lgetxattr 230
-#define __NR_fgetxattr 231
-#define __NR_listxattr 232
-#define __NR_llistxattr 233
-#define __NR_flistxattr 234
-#define __NR_removexattr 235
-#define __NR_lremovexattr 236
-#define __NR_fremovexattr 237
-#define __NR_tkill 238
-#define __NR_sendfile64 239
-#define __NR_futex 240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
- /* 243 __NR_set_thread_area */
- /* 244 __NR_get_thread_area */
-#define __NR_io_setup 245
-#define __NR_io_destroy 246
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-#define __NR_io_cancel 249
- /* 250 __NR_alloc_hugepages */
- /* 251 __NR_free_hugepages */
-#define __NR_exit_group 252
-#define __NR_lookup_dcookie 253
-#define __NR_bfin_spinlock 254
-
-#define __NR_epoll_create 255
-#define __NR_epoll_ctl 256
-#define __NR_epoll_wait 257
- /* 258 __NR_remap_file_pages */
-#define __NR_set_tid_address 259
-#define __NR_timer_create 260
-#define __NR_timer_settime 261
-#define __NR_timer_gettime 262
-#define __NR_timer_getoverrun 263
-#define __NR_timer_delete 264
-#define __NR_clock_settime 265
-#define __NR_clock_gettime 266
-#define __NR_clock_getres 267
-#define __NR_clock_nanosleep 268
-#define __NR_statfs64 269
-#define __NR_fstatfs64 270
-#define __NR_tgkill 271
-#define __NR_utimes 272
-#define __NR_fadvise64_64 273
- /* 274 __NR_vserver */
- /* 275 __NR_mbind */
- /* 276 __NR_get_mempolicy */
- /* 277 __NR_set_mempolicy */
-#define __NR_mq_open 278
-#define __NR_mq_unlink 279
-#define __NR_mq_timedsend 280
-#define __NR_mq_timedreceive 281
-#define __NR_mq_notify 282
-#define __NR_mq_getsetattr 283
-#define __NR_kexec_load 284
-#define __NR_waitid 285
-#define __NR_add_key 286
-#define __NR_request_key 287
-#define __NR_keyctl 288
-#define __NR_ioprio_set 289
-#define __NR_ioprio_get 290
-#define __NR_inotify_init 291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch 293
- /* 294 __NR_migrate_pages */
-#define __NR_openat 295
-#define __NR_mkdirat 296
-#define __NR_mknodat 297
-#define __NR_fchownat 298
-#define __NR_futimesat 299
-#define __NR_fstatat64 300
-#define __NR_unlinkat 301
-#define __NR_renameat 302
-#define __NR_linkat 303
-#define __NR_symlinkat 304
-#define __NR_readlinkat 305
-#define __NR_fchmodat 306
-#define __NR_faccessat 307
-#define __NR_pselect6 308
-#define __NR_ppoll 309
-#define __NR_unshare 310
-
-/* Blackfin private syscalls */
-#define __NR_sram_alloc 311
-#define __NR_sram_free 312
-#define __NR_dma_memcpy 313
-
-/* socket syscalls */
-#define __NR_accept 314
-#define __NR_bind 315
-#define __NR_connect 316
-#define __NR_getpeername 317
-#define __NR_getsockname 318
-#define __NR_getsockopt 319
-#define __NR_listen 320
-#define __NR_recv 321
-#define __NR_recvfrom 322
-#define __NR_recvmsg 323
-#define __NR_send 324
-#define __NR_sendmsg 325
-#define __NR_sendto 326
-#define __NR_setsockopt 327
-#define __NR_shutdown 328
-#define __NR_socket 329
-#define __NR_socketpair 330
-
-/* sysv ipc syscalls */
-#define __NR_semctl 331
-#define __NR_semget 332
-#define __NR_semop 333
-#define __NR_msgctl 334
-#define __NR_msgget 335
-#define __NR_msgrcv 336
-#define __NR_msgsnd 337
-#define __NR_shmat 338
-#define __NR_shmctl 339
-#define __NR_shmdt 340
-#define __NR_shmget 341
-#define __NR_splice 342
-#define __NR_sync_file_range 343
-#define __NR_tee 344
-#define __NR_vmsplice 345
+#include <uapi/asm/unistd.h>
-#define __NR_epoll_pwait 346
-#define __NR_utimensat 347
-#define __NR_signalfd 348
-#define __NR_timerfd_create 349
-#define __NR_eventfd 350
-#define __NR_pread64 351
-#define __NR_pwrite64 352
-#define __NR_fadvise64 353
-#define __NR_set_robust_list 354
-#define __NR_get_robust_list 355
-#define __NR_fallocate 356
-#define __NR_semtimedop 357
-#define __NR_timerfd_settime 358
-#define __NR_timerfd_gettime 359
-#define __NR_signalfd4 360
-#define __NR_eventfd2 361
-#define __NR_epoll_create1 362
-#define __NR_dup3 363
-#define __NR_pipe2 364
-#define __NR_inotify_init1 365
-#define __NR_preadv 366
-#define __NR_pwritev 367
-#define __NR_rt_tgsigqueueinfo 368
-#define __NR_perf_event_open 369
-#define __NR_recvmmsg 370
-#define __NR_fanotify_init 371
-#define __NR_fanotify_mark 372
-#define __NR_prlimit64 373
-#define __NR_cacheflush 374
-#define __NR_name_to_handle_at 375
-#define __NR_open_by_handle_at 376
-#define __NR_clock_adjtime 377
-#define __NR_syncfs 378
-#define __NR_setns 379
-#define __NR_sendmmsg 380
-#define __NR_process_vm_readv 381
-#define __NR_process_vm_writev 382
-
-#define __NR_syscall 383
-#define NR_syscalls __NR_syscall
-
-/* Old optional stuff no one actually uses */
-#define __IGNORE_sysfs
-#define __IGNORE_uselib
-
-/* Implement the newer interfaces */
-#define __IGNORE_mmap
-#define __IGNORE_poll
-#define __IGNORE_select
-#define __IGNORE_utime
-
-/* Not relevant on no-mmu */
-#define __IGNORE_swapon
-#define __IGNORE_swapoff
-#define __IGNORE_msync
-#define __IGNORE_mlock
-#define __IGNORE_munlock
-#define __IGNORE_mlockall
-#define __IGNORE_munlockall
-#define __IGNORE_mincore
-#define __IGNORE_madvise
-#define __IGNORE_remap_file_pages
-#define __IGNORE_mbind
-#define __IGNORE_get_mempolicy
-#define __IGNORE_set_mempolicy
-#define __IGNORE_migrate_pages
-#define __IGNORE_move_pages
-#define __IGNORE_getcpu
-
-#ifdef __KERNEL__
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -446,7 +20,6 @@
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_VFORK
/*
@@ -457,6 +30,4 @@
*/
#define cond_syscall(x) asm(".weak\t_" #x "\n\t.set\t_" #x ",_sys_ni_syscall");
-#endif /* __KERNEL__ */
-
#endif /* __ASM_BFIN_UNISTD_H */
diff --git a/arch/blackfin/include/mach-common/irq.h b/arch/blackfin/include/mach-common/irq.h
index cab14e911dc..af9fc8171eb 100644
--- a/arch/blackfin/include/mach-common/irq.h
+++ b/arch/blackfin/include/mach-common/irq.h
@@ -40,8 +40,6 @@
#define IRQ_HWERR 5 /* Hardware Error */
#define IRQ_CORETMR 6 /* Core timer */
-#define BFIN_IRQ(x) ((x) + 7)
-
#define IVG7 7
#define IVG8 8
#define IVG9 9
@@ -52,6 +50,9 @@
#define IVG14 14
#define IVG15 15
+#define BFIN_IRQ(x) ((x) + IVG7)
+#define BFIN_SYSIRQ(x) ((x) - IVG7)
+
#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
#endif
diff --git a/arch/blackfin/include/uapi/asm/Kbuild b/arch/blackfin/include/uapi/asm/Kbuild
index baebb3da1d4..0bd28f77abc 100644
--- a/arch/blackfin/include/uapi/asm/Kbuild
+++ b/arch/blackfin/include/uapi/asm/Kbuild
@@ -1,3 +1,19 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += bfin_sport.h
+header-y += byteorder.h
+header-y += cachectl.h
+header-y += fcntl.h
+header-y += fixed_code.h
+header-y += ioctls.h
+header-y += kvm_para.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += stat.h
+header-y += swab.h
+header-y += unistd.h
diff --git a/arch/blackfin/include/uapi/asm/bfin_sport.h b/arch/blackfin/include/uapi/asm/bfin_sport.h
new file mode 100644
index 00000000000..c086de87ee6
--- /dev/null
+++ b/arch/blackfin/include/uapi/asm/bfin_sport.h
@@ -0,0 +1,136 @@
+/*
+ * bfin_sport.h - interface to Blackfin SPORTs
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _UAPI__BFIN_SPORT_H__
+#define _UAPI__BFIN_SPORT_H__
+
+/* Sport mode: it can be set to TDM, i2s or others */
+#define NORM_MODE 0x0
+#define TDM_MODE 0x1
+#define I2S_MODE 0x2
+#define NDSO_MODE 0x3
+
+/* Data format, normal, a-law or u-law */
+#define NORM_FORMAT 0x0
+#define ALAW_FORMAT 0x2
+#define ULAW_FORMAT 0x3
+
+/* Function driver which use sport must initialize the structure */
+struct sport_config {
+ /* TDM (multichannels), I2S or other mode */
+ unsigned int mode:3;
+ unsigned int polled; /* use poll instead of irq when set */
+
+ /* if TDM mode is selected, channels must be set */
+ int channels; /* Must be in 8 units */
+ unsigned int frame_delay:4; /* Delay between frame sync pulse and first bit */
+
+ /* I2S mode */
+ unsigned int right_first:1; /* Right stereo channel first */
+
+ /* In mormal mode, the following item need to be set */
+ unsigned int lsb_first:1; /* order of transmit or receive data */
+ unsigned int fsync:1; /* Frame sync required */
+ unsigned int data_indep:1; /* data independent frame sync generated */
+ unsigned int act_low:1; /* Active low TFS */
+ unsigned int late_fsync:1; /* Late frame sync */
+ unsigned int tckfe:1;
+ unsigned int sec_en:1; /* Secondary side enabled */
+
+ /* Choose clock source */
+ unsigned int int_clk:1; /* Internal or external clock */
+
+ /* If external clock is used, the following fields are ignored */
+ int serial_clk;
+ int fsync_clk;
+
+ unsigned int data_format:2; /* Normal, u-law or a-law */
+
+ int word_len; /* How length of the word in bits, 3-32 bits */
+ int dma_enabled;
+};
+
+/* Userspace interface */
+#define SPORT_IOC_MAGIC 'P'
+#define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config)
+#define SPORT_IOC_GET_SYSTEMCLOCK _IOR('P', 0x02, unsigned long)
+#define SPORT_IOC_SET_BAUDRATE _IOW('P', 0x03, unsigned long)
+
+
+/* SPORT_TCR1 Masks */
+#define TSPEN 0x0001 /* TX enable */
+#define ITCLK 0x0002 /* Internal TX Clock Select */
+#define TDTYPE 0x000C /* TX Data Formatting Select */
+#define DTYPE_NORM 0x0000 /* Data Format Normal */
+#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
+#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
+#define TLSBIT 0x0010 /* TX Bit Order */
+#define ITFS 0x0200 /* Internal TX Frame Sync Select */
+#define TFSR 0x0400 /* TX Frame Sync Required Select */
+#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
+#define LTFS 0x1000 /* Low TX Frame Sync Select */
+#define LATFS 0x2000 /* Late TX Frame Sync Select */
+#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
+
+/* SPORT_TCR2 Masks */
+#define SLEN 0x001F /* SPORT TX Word Length (2 - 31) */
+#define DP_SLEN(x) BFIN_DEPOSIT(SLEN, x)
+#define EX_SLEN(x) BFIN_EXTRACT(SLEN, x)
+#define TXSE 0x0100 /* TX Secondary Enable */
+#define TSFSE 0x0200 /* TX Stereo Frame Sync Enable */
+#define TRFST 0x0400 /* TX Right-First Data Order */
+
+/* SPORT_RCR1 Masks */
+#define RSPEN 0x0001 /* RX enable */
+#define IRCLK 0x0002 /* Internal RX Clock Select */
+#define RDTYPE 0x000C /* RX Data Formatting Select */
+/* DTYPE_* defined above */
+#define RLSBIT 0x0010 /* RX Bit Order */
+#define IRFS 0x0200 /* Internal RX Frame Sync Select */
+#define RFSR 0x0400 /* RX Frame Sync Required Select */
+#define LRFS 0x1000 /* Low RX Frame Sync Select */
+#define LARFS 0x2000 /* Late RX Frame Sync Select */
+#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
+
+/* SPORT_RCR2 Masks */
+/* SLEN defined above */
+#define RXSE 0x0100 /* RX Secondary Enable */
+#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
+#define RRFST 0x0400 /* Right-First Data Order */
+
+/* SPORT_STAT Masks */
+#define RXNE 0x0001 /* RX FIFO Not Empty Status */
+#define RUVF 0x0002 /* RX Underflow Status */
+#define ROVF 0x0004 /* RX Overflow Status */
+#define TXF 0x0008 /* TX FIFO Full Status */
+#define TUVF 0x0010 /* TX Underflow Status */
+#define TOVF 0x0020 /* TX Overflow Status */
+#define TXHRE 0x0040 /* TX Hold Register Empty */
+
+/* SPORT_MCMC1 Masks */
+#define SP_WOFF 0x03FF /* Multichannel Window Offset Field */
+#define DP_SP_WOFF(x) BFIN_DEPOSIT(SP_WOFF, x)
+#define EX_SP_WOFF(x) BFIN_EXTRACT(SP_WOFF, x)
+#define SP_WSIZE 0xF000 /* Multichannel Window Size Field */
+#define DP_SP_WSIZE(x) BFIN_DEPOSIT(SP_WSIZE, x)
+#define EX_SP_WSIZE(x) BFIN_EXTRACT(SP_WSIZE, x)
+
+/* SPORT_MCMC2 Masks */
+#define MCCRM 0x0003 /* Multichannel Clock Recovery Mode */
+#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
+#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
+#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
+#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
+#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
+#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
+#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
+#define MFD 0xF000 /* Multichannel Frame Delay */
+#define DP_MFD(x) BFIN_DEPOSIT(MFD, x)
+#define EX_MFD(x) BFIN_EXTRACT(MFD, x)
+
+#endif /* _UAPI__BFIN_SPORT_H__ */
diff --git a/arch/blackfin/include/asm/byteorder.h b/arch/blackfin/include/uapi/asm/byteorder.h
index 9558416d578..9558416d578 100644
--- a/arch/blackfin/include/asm/byteorder.h
+++ b/arch/blackfin/include/uapi/asm/byteorder.h
diff --git a/arch/blackfin/include/asm/cachectl.h b/arch/blackfin/include/uapi/asm/cachectl.h
index 03255df6c1e..03255df6c1e 100644
--- a/arch/blackfin/include/asm/cachectl.h
+++ b/arch/blackfin/include/uapi/asm/cachectl.h
diff --git a/arch/blackfin/include/asm/fcntl.h b/arch/blackfin/include/uapi/asm/fcntl.h
index 251c911d59c..251c911d59c 100644
--- a/arch/blackfin/include/asm/fcntl.h
+++ b/arch/blackfin/include/uapi/asm/fcntl.h
diff --git a/arch/blackfin/include/uapi/asm/fixed_code.h b/arch/blackfin/include/uapi/asm/fixed_code.h
new file mode 100644
index 00000000000..3bef1dca379
--- /dev/null
+++ b/arch/blackfin/include/uapi/asm/fixed_code.h
@@ -0,0 +1,38 @@
+/*
+ * This file defines the fixed addresses where userspace programs
+ * can find atomic code sequences.
+ *
+ * Copyright 2007-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _UAPI__BFIN_ASM_FIXED_CODE_H__
+#define _UAPI__BFIN_ASM_FIXED_CODE_H__
+
+
+#ifndef CONFIG_PHY_RAM_BASE_ADDRESS
+#define CONFIG_PHY_RAM_BASE_ADDRESS 0x0
+#endif
+
+#define FIXED_CODE_START (CONFIG_PHY_RAM_BASE_ADDRESS + 0x400)
+
+#define SIGRETURN_STUB (CONFIG_PHY_RAM_BASE_ADDRESS + 0x400)
+
+#define ATOMIC_SEQS_START (CONFIG_PHY_RAM_BASE_ADDRESS + 0x410)
+
+#define ATOMIC_XCHG32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x410)
+#define ATOMIC_CAS32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x420)
+#define ATOMIC_ADD32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x430)
+#define ATOMIC_SUB32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x440)
+#define ATOMIC_IOR32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x450)
+#define ATOMIC_AND32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x460)
+#define ATOMIC_XOR32 (CONFIG_PHY_RAM_BASE_ADDRESS + 0x470)
+
+#define ATOMIC_SEQS_END (CONFIG_PHY_RAM_BASE_ADDRESS + 0x480)
+
+#define SAFE_USER_INSTRUCTION (CONFIG_PHY_RAM_BASE_ADDRESS + 0x480)
+
+#define FIXED_CODE_END (CONFIG_PHY_RAM_BASE_ADDRESS + 0x490)
+
+#endif /* _UAPI__BFIN_ASM_FIXED_CODE_H__ */
diff --git a/arch/blackfin/include/asm/ioctls.h b/arch/blackfin/include/uapi/asm/ioctls.h
index eca8d75b0a8..eca8d75b0a8 100644
--- a/arch/blackfin/include/asm/ioctls.h
+++ b/arch/blackfin/include/uapi/asm/ioctls.h
diff --git a/arch/blackfin/include/asm/poll.h b/arch/blackfin/include/uapi/asm/poll.h
index 072d8966c5c..072d8966c5c 100644
--- a/arch/blackfin/include/asm/poll.h
+++ b/arch/blackfin/include/uapi/asm/poll.h
diff --git a/arch/blackfin/include/asm/posix_types.h b/arch/blackfin/include/uapi/asm/posix_types.h
index 1bd3436db6a..1bd3436db6a 100644
--- a/arch/blackfin/include/asm/posix_types.h
+++ b/arch/blackfin/include/uapi/asm/posix_types.h
diff --git a/arch/blackfin/include/uapi/asm/ptrace.h b/arch/blackfin/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..fd48bd0739d
--- /dev/null
+++ b/arch/blackfin/include/uapi/asm/ptrace.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _UAPI_BFIN_PTRACE_H
+#define _UAPI_BFIN_PTRACE_H
+
+/*
+ * GCC defines register number like this:
+ * -----------------------------
+ * 0 - 7 are data registers R0-R7
+ * 8 - 15 are address registers P0-P7
+ * 16 - 31 dsp registers I/B/L0 -- I/B/L3 & M0--M3
+ * 32 - 33 A registers A0 & A1
+ * 34 - status register
+ * -----------------------------
+ *
+ * We follows above, except:
+ * 32-33 --- Low 32-bit of A0&1
+ * 34-35 --- High 8-bit of A0&1
+ */
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long orig_pc;
+ long ipend;
+ long seqstat;
+ long rete;
+ long retn;
+ long retx;
+ long pc; /* PC == RETI */
+ long rets;
+ long reserved; /* Used as scratch during system calls */
+ long astat;
+ long lb1;
+ long lb0;
+ long lt1;
+ long lt0;
+ long lc1;
+ long lc0;
+ long a1w;
+ long a1x;
+ long a0w;
+ long a0x;
+ long b3;
+ long b2;
+ long b1;
+ long b0;
+ long l3;
+ long l2;
+ long l1;
+ long l0;
+ long m3;
+ long m2;
+ long m1;
+ long m0;
+ long i3;
+ long i2;
+ long i1;
+ long i0;
+ long usp;
+ long fp;
+ long p5;
+ long p4;
+ long p3;
+ long p2;
+ long p1;
+ long p0;
+ long r7;
+ long r6;
+ long r5;
+ long r4;
+ long r3;
+ long r2;
+ long r1;
+ long r0;
+ long orig_r0;
+ long orig_p0;
+ long syscfg;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13 /* ptrace signal */
+
+#define PTRACE_GETFDPIC 31 /* get the ELF fdpic loadmap address */
+#define PTRACE_GETFDPIC_EXEC 0 /* [addr] request the executable loadmap */
+#define PTRACE_GETFDPIC_INTERP 1 /* [addr] request the interpreter loadmap */
+
+#define PS_S (0x0002)
+
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ */
+
+#define PT_R0 204
+#define PT_R1 200
+#define PT_R2 196
+#define PT_R3 192
+#define PT_R4 188
+#define PT_R5 184
+#define PT_R6 180
+#define PT_R7 176
+#define PT_P0 172
+#define PT_P1 168
+#define PT_P2 164
+#define PT_P3 160
+#define PT_P4 156
+#define PT_P5 152
+#define PT_FP 148
+#define PT_USP 144
+#define PT_I0 140
+#define PT_I1 136
+#define PT_I2 132
+#define PT_I3 128
+#define PT_M0 124
+#define PT_M1 120
+#define PT_M2 116
+#define PT_M3 112
+#define PT_L0 108
+#define PT_L1 104
+#define PT_L2 100
+#define PT_L3 96
+#define PT_B0 92
+#define PT_B1 88
+#define PT_B2 84
+#define PT_B3 80
+#define PT_A0X 76
+#define PT_A0W 72
+#define PT_A1X 68
+#define PT_A1W 64
+#define PT_LC0 60
+#define PT_LC1 56
+#define PT_LT0 52
+#define PT_LT1 48
+#define PT_LB0 44
+#define PT_LB1 40
+#define PT_ASTAT 36
+#define PT_RESERVED 32
+#define PT_RETS 28
+#define PT_PC 24
+#define PT_RETX 20
+#define PT_RETN 16
+#define PT_RETE 12
+#define PT_SEQSTAT 8
+#define PT_IPEND 4
+
+#define PT_ORIG_R0 208
+#define PT_ORIG_P0 212
+#define PT_SYSCFG 216
+#define PT_TEXT_ADDR 220
+#define PT_TEXT_END_ADDR 224
+#define PT_DATA_ADDR 228
+#define PT_FDPIC_EXEC 232
+#define PT_FDPIC_INTERP 236
+
+#define PT_LAST_PSEUDO PT_FDPIC_INTERP
+
+#endif /* _UAPI_BFIN_PTRACE_H */
diff --git a/arch/blackfin/include/asm/sigcontext.h b/arch/blackfin/include/uapi/asm/sigcontext.h
index 906bdc1f5fd..906bdc1f5fd 100644
--- a/arch/blackfin/include/asm/sigcontext.h
+++ b/arch/blackfin/include/uapi/asm/sigcontext.h
diff --git a/arch/blackfin/include/asm/siginfo.h b/arch/blackfin/include/uapi/asm/siginfo.h
index 3e81306394e..3e81306394e 100644
--- a/arch/blackfin/include/asm/siginfo.h
+++ b/arch/blackfin/include/uapi/asm/siginfo.h
diff --git a/arch/blackfin/include/asm/signal.h b/arch/blackfin/include/uapi/asm/signal.h
index 77a3bf37b69..77a3bf37b69 100644
--- a/arch/blackfin/include/asm/signal.h
+++ b/arch/blackfin/include/uapi/asm/signal.h
diff --git a/arch/blackfin/include/asm/stat.h b/arch/blackfin/include/uapi/asm/stat.h
index 2e27665c4e9..2e27665c4e9 100644
--- a/arch/blackfin/include/asm/stat.h
+++ b/arch/blackfin/include/uapi/asm/stat.h
diff --git a/arch/blackfin/include/asm/swab.h b/arch/blackfin/include/uapi/asm/swab.h
index 89de6507ca2..89de6507ca2 100644
--- a/arch/blackfin/include/asm/swab.h
+++ b/arch/blackfin/include/uapi/asm/swab.h
diff --git a/arch/blackfin/include/uapi/asm/unistd.h b/arch/blackfin/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..a4511649a86
--- /dev/null
+++ b/arch/blackfin/include/uapi/asm/unistd.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _UAPI__ASM_BFIN_UNISTD_H
+#define _UAPI__ASM_BFIN_UNISTD_H
+/*
+ * This file contains the system call numbers.
+ */
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+ /* 2 __NR_fork not supported on nommu */
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+ /* 7 __NR_waitpid obsolete */
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+ /* 17 __NR_break obsolete */
+ /* 18 __NR_oldstat obsolete */
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+ /* 22 __NR_umount obsolete */
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+ /* 28 __NR_oldfstat obsolete */
+#define __NR_pause 29
+ /* 30 __NR_utime obsolete */
+ /* 31 __NR_stty obsolete */
+ /* 32 __NR_gtty obsolete */
+#define __NR_access 33
+#define __NR_nice 34
+ /* 35 __NR_ftime obsolete */
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+ /* 44 __NR_prof obsolete */
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+ /* 48 __NR_signal obsolete */
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+ /* 53 __NR_lock obsolete */
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+ /* 56 __NR_mpx obsolete */
+#define __NR_setpgid 57
+ /* 58 __NR_ulimit obsolete */
+ /* 59 __NR_oldolduname obsolete */
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+ /* 67 __NR_sigaction obsolete */
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+ /* 72 __NR_sigsuspend obsolete */
+ /* 73 __NR_sigpending obsolete */
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+ /* 76 __NR_old_getrlimit obsolete */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+ /* 82 __NR_select obsolete */
+#define __NR_symlink 83
+ /* 84 __NR_oldlstat obsolete */
+#define __NR_readlink 85
+ /* 86 __NR_uselib obsolete */
+ /* 87 __NR_swapon obsolete */
+#define __NR_reboot 88
+ /* 89 __NR_readdir obsolete */
+ /* 90 __NR_mmap obsolete */
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+ /* 98 __NR_profil obsolete */
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+ /* 101 __NR_ioperm */
+ /* 102 __NR_socketcall obsolete */
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+ /* 109 __NR_olduname obsolete */
+ /* 110 __NR_iopl obsolete */
+#define __NR_vhangup 111
+ /* 112 __NR_idle obsolete */
+ /* 113 __NR_vm86old */
+#define __NR_wait4 114
+ /* 115 __NR_swapoff obsolete */
+#define __NR_sysinfo 116
+ /* 117 __NR_ipc oboslete */
+#define __NR_fsync 118
+ /* 119 __NR_sigreturn obsolete */
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+ /* 123 __NR_modify_ldt obsolete */
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+ /* 126 __NR_sigprocmask obsolete */
+ /* 127 __NR_create_module obsolete */
+#define __NR_init_module 128
+#define __NR_delete_module 129
+ /* 130 __NR_get_kernel_syms obsolete */
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+ /* 135 was sysfs */
+#define __NR_personality 136
+ /* 137 __NR_afs_syscall */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+ /* 142 __NR__newselect obsolete */
+#define __NR_flock 143
+ /* 144 __NR_msync obsolete */
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+ /* 150 __NR_mlock */
+ /* 151 __NR_munlock */
+ /* 152 __NR_mlockall */
+ /* 153 __NR_munlockall */
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+ /* 166 __NR_vm86 */
+ /* 167 __NR_query_module */
+ /* 168 __NR_poll */
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread 180
+#define __NR_pwrite 181
+#define __NR_lchown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+ /* 188 __NR_getpmsg */
+ /* 189 __NR_putpmsg */
+#define __NR_vfork 190
+#define __NR_getrlimit 191
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_chown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_lchown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+ /* 218 __NR_mincore */
+ /* 219 __NR_madvise */
+#define __NR_getdents64 220
+#define __NR_fcntl64 221
+ /* 222 reserved for TUX */
+ /* 223 reserved for TUX */
+#define __NR_gettid 224
+#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
+#define __NR_tkill 238
+#define __NR_sendfile64 239
+#define __NR_futex 240
+#define __NR_sched_setaffinity 241
+#define __NR_sched_getaffinity 242
+ /* 243 __NR_set_thread_area */
+ /* 244 __NR_get_thread_area */
+#define __NR_io_setup 245
+#define __NR_io_destroy 246
+#define __NR_io_getevents 247
+#define __NR_io_submit 248
+#define __NR_io_cancel 249
+ /* 250 __NR_alloc_hugepages */
+ /* 251 __NR_free_hugepages */
+#define __NR_exit_group 252
+#define __NR_lookup_dcookie 253
+#define __NR_bfin_spinlock 254
+
+#define __NR_epoll_create 255
+#define __NR_epoll_ctl 256
+#define __NR_epoll_wait 257
+ /* 258 __NR_remap_file_pages */
+#define __NR_set_tid_address 259
+#define __NR_timer_create 260
+#define __NR_timer_settime 261
+#define __NR_timer_gettime 262
+#define __NR_timer_getoverrun 263
+#define __NR_timer_delete 264
+#define __NR_clock_settime 265
+#define __NR_clock_gettime 266
+#define __NR_clock_getres 267
+#define __NR_clock_nanosleep 268
+#define __NR_statfs64 269
+#define __NR_fstatfs64 270
+#define __NR_tgkill 271
+#define __NR_utimes 272
+#define __NR_fadvise64_64 273
+ /* 274 __NR_vserver */
+ /* 275 __NR_mbind */
+ /* 276 __NR_get_mempolicy */
+ /* 277 __NR_set_mempolicy */
+#define __NR_mq_open 278
+#define __NR_mq_unlink 279
+#define __NR_mq_timedsend 280
+#define __NR_mq_timedreceive 281
+#define __NR_mq_notify 282
+#define __NR_mq_getsetattr 283
+#define __NR_kexec_load 284
+#define __NR_waitid 285
+#define __NR_add_key 286
+#define __NR_request_key 287
+#define __NR_keyctl 288
+#define __NR_ioprio_set 289
+#define __NR_ioprio_get 290
+#define __NR_inotify_init 291
+#define __NR_inotify_add_watch 292
+#define __NR_inotify_rm_watch 293
+ /* 294 __NR_migrate_pages */
+#define __NR_openat 295
+#define __NR_mkdirat 296
+#define __NR_mknodat 297
+#define __NR_fchownat 298
+#define __NR_futimesat 299
+#define __NR_fstatat64 300
+#define __NR_unlinkat 301
+#define __NR_renameat 302
+#define __NR_linkat 303
+#define __NR_symlinkat 304
+#define __NR_readlinkat 305
+#define __NR_fchmodat 306
+#define __NR_faccessat 307
+#define __NR_pselect6 308
+#define __NR_ppoll 309
+#define __NR_unshare 310
+
+/* Blackfin private syscalls */
+#define __NR_sram_alloc 311
+#define __NR_sram_free 312
+#define __NR_dma_memcpy 313
+
+/* socket syscalls */
+#define __NR_accept 314
+#define __NR_bind 315
+#define __NR_connect 316
+#define __NR_getpeername 317
+#define __NR_getsockname 318
+#define __NR_getsockopt 319
+#define __NR_listen 320
+#define __NR_recv 321
+#define __NR_recvfrom 322
+#define __NR_recvmsg 323
+#define __NR_send 324
+#define __NR_sendmsg 325
+#define __NR_sendto 326
+#define __NR_setsockopt 327
+#define __NR_shutdown 328
+#define __NR_socket 329
+#define __NR_socketpair 330
+
+/* sysv ipc syscalls */
+#define __NR_semctl 331
+#define __NR_semget 332
+#define __NR_semop 333
+#define __NR_msgctl 334
+#define __NR_msgget 335
+#define __NR_msgrcv 336
+#define __NR_msgsnd 337
+#define __NR_shmat 338
+#define __NR_shmctl 339
+#define __NR_shmdt 340
+#define __NR_shmget 341
+
+#define __NR_splice 342
+#define __NR_sync_file_range 343
+#define __NR_tee 344
+#define __NR_vmsplice 345
+
+#define __NR_epoll_pwait 346
+#define __NR_utimensat 347
+#define __NR_signalfd 348
+#define __NR_timerfd_create 349
+#define __NR_eventfd 350
+#define __NR_pread64 351
+#define __NR_pwrite64 352
+#define __NR_fadvise64 353
+#define __NR_set_robust_list 354
+#define __NR_get_robust_list 355
+#define __NR_fallocate 356
+#define __NR_semtimedop 357
+#define __NR_timerfd_settime 358
+#define __NR_timerfd_gettime 359
+#define __NR_signalfd4 360
+#define __NR_eventfd2 361
+#define __NR_epoll_create1 362
+#define __NR_dup3 363
+#define __NR_pipe2 364
+#define __NR_inotify_init1 365
+#define __NR_preadv 366
+#define __NR_pwritev 367
+#define __NR_rt_tgsigqueueinfo 368
+#define __NR_perf_event_open 369
+#define __NR_recvmmsg 370
+#define __NR_fanotify_init 371
+#define __NR_fanotify_mark 372
+#define __NR_prlimit64 373
+#define __NR_cacheflush 374
+#define __NR_name_to_handle_at 375
+#define __NR_open_by_handle_at 376
+#define __NR_clock_adjtime 377
+#define __NR_syncfs 378
+#define __NR_setns 379
+#define __NR_sendmmsg 380
+#define __NR_process_vm_readv 381
+#define __NR_process_vm_writev 382
+
+#define __NR_syscall 383
+#define NR_syscalls __NR_syscall
+
+/* Old optional stuff no one actually uses */
+#define __IGNORE_sysfs
+#define __IGNORE_uselib
+
+/* Implement the newer interfaces */
+#define __IGNORE_mmap
+#define __IGNORE_poll
+#define __IGNORE_select
+#define __IGNORE_utime
+
+/* Not relevant on no-mmu */
+#define __IGNORE_swapon
+#define __IGNORE_swapoff
+#define __IGNORE_msync
+#define __IGNORE_mlock
+#define __IGNORE_munlock
+#define __IGNORE_mlockall
+#define __IGNORE_munlockall
+#define __IGNORE_mincore
+#define __IGNORE_madvise
+#define __IGNORE_remap_file_pages
+#define __IGNORE_mbind
+#define __IGNORE_get_mempolicy
+#define __IGNORE_set_mempolicy
+#define __IGNORE_migrate_pages
+#define __IGNORE_move_pages
+#define __IGNORE_getcpu
+
+
+#endif /* _UAPI__ASM_BFIN_UNISTD_H */
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 9b80b152435..b882ce22c34 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -329,6 +329,9 @@ static void bfin_disable_hw_debug(struct pt_regs *regs)
}
#ifdef CONFIG_SMP
+extern void generic_exec_single(int cpu, struct call_single_data *data, int wait);
+static struct call_single_data kgdb_smp_ipi_data[NR_CPUS];
+
void kgdb_passive_cpu_callback(void *info)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
@@ -336,12 +339,18 @@ void kgdb_passive_cpu_callback(void *info)
void kgdb_roundup_cpus(unsigned long flags)
{
- smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
+ unsigned int cpu;
+
+ for (cpu = cpumask_first(cpu_online_mask); cpu < nr_cpu_ids;
+ cpu = cpumask_next(cpu, cpu_online_mask)) {
+ kgdb_smp_ipi_data[cpu].func = kgdb_passive_cpu_callback;
+ generic_exec_single(cpu, &kgdb_smp_ipi_data[cpu], 0);
+ }
}
void kgdb_roundup_cpu(int cpu, unsigned long flags)
{
- smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
+ generic_exec_single(cpu, &kgdb_smp_ipi_data[cpu], 0);
}
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index 845e6bc8d63..46cb88231d6 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -165,5 +165,6 @@
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#define ANOMALY_05000480 (0)
+#define ANOMALY_16000030 (0)
#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index aa14110be4c..2f9cc33deec 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -285,5 +285,6 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000474 (0)
#define ANOMALY_05000480 (0)
+#define ANOMALY_16000030 (0)
#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 3a8f73a669f..0e754efc3cf 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -378,5 +378,6 @@
#define ANOMALY_05000474 (0)
#define ANOMALY_05000480 (0)
#define ANOMALY_05000485 (0)
+#define ANOMALY_16000030 (0)
#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index df921269639..2bc70c5b941 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -236,5 +236,6 @@
#define ANOMALY_05000467 (0)
#define ANOMALY_05000474 (0)
#define ANOMALY_05000485 (0)
+#define ANOMALY_16000030 (0)
#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 318d922d11d..eaac26973f6 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -210,5 +210,6 @@
#define ANOMALY_05000474 (0)
#define ANOMALY_05000480 (0)
#define ANOMALY_05000485 (0)
+#define ANOMALY_16000030 (0)
#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 5b711d85b90..098fad63e03 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -296,5 +296,6 @@
#define ANOMALY_05000440 (0)
#define ANOMALY_05000475 (0)
#define ANOMALY_05000480 (0)
+#define ANOMALY_16000030 (0)
#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 72476ff5033..038249c1d0d 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -348,5 +348,6 @@
#define ANOMALY_05000474 (0)
#define ANOMALY_05000480 (0)
#define ANOMALY_05000485 (0)
+#define ANOMALY_16000030 (0)
#endif
diff --git a/arch/blackfin/mach-bf609/Kconfig b/arch/blackfin/mach-bf609/Kconfig
index 101b33ee9bb..95a4f1b676c 100644
--- a/arch/blackfin/mach-bf609/Kconfig
+++ b/arch/blackfin/mach-bf609/Kconfig
@@ -56,7 +56,7 @@ config SEC_IRQ_PRIORITY_LEVELS
default 7
range 0 7
help
- Devide the total number of interrupt priority levels into sub-levels.
+ Divide the total number of interrupt priority levels into sub-levels.
There is 2 ^ (SEC_IRQ_PRIORITY_LEVELS + 1) different levels.
endmenu
diff --git a/arch/blackfin/mach-bf609/include/mach/irq.h b/arch/blackfin/mach-bf609/include/mach/irq.h
index 23e74cdeeee..fa0843d5d77 100644
--- a/arch/blackfin/mach-bf609/include/mach/irq.h
+++ b/arch/blackfin/mach-bf609/include/mach/irq.h
@@ -9,9 +9,6 @@
#include <mach-common/irq.h>
-#undef BFIN_IRQ
-#define BFIN_IRQ(x) ((x) + IVG15)
-
#define NR_PERI_INTS (5 * 32)
#define IRQ_SEC_ERR BFIN_IRQ(0) /* SEC Error */
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index dacafc163f7..ad505d9db4a 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -174,7 +174,6 @@ void bfin_hibernate_syscontrol(void)
bfin_write32(DPM0_RESTORE5, bfin_read32(DPM0_RESTORE5) | 4);
}
-#define IRQ_SID(irq) ((irq) - IVG15)
asmlinkage void enter_deepsleep(void);
__attribute__((l1_text))
@@ -311,7 +310,7 @@ static irqreturn_t test_isr(int irq, void *dev_id)
{
printk(KERN_DEBUG "gpio irq %d\n", irq);
if (irq == 231)
- bfin_sec_raise_irq(IRQ_SID(IRQ_SOFT1));
+ bfin_sec_raise_irq(BFIN_SYSIRQ(IRQ_SOFT1));
return IRQ_HANDLED;
}
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c
index f5685a496c5..978bb400be0 100644
--- a/arch/blackfin/mach-common/dpmc.c
+++ b/arch/blackfin/mach-common/dpmc.c
@@ -157,24 +157,7 @@ struct platform_driver bfin_dpmc_device_driver = {
.name = DRIVER_NAME,
}
};
-
-/**
- * bfin_dpmc_init - Init driver
- */
-static int __init bfin_dpmc_init(void)
-{
- return platform_driver_register(&bfin_dpmc_device_driver);
-}
-module_init(bfin_dpmc_init);
-
-/**
- * bfin_dpmc_exit - break down driver
- */
-static void __exit bfin_dpmc_exit(void)
-{
- platform_driver_unregister(&bfin_dpmc_device_driver);
-}
-module_exit(bfin_dpmc_exit);
+module_platform_driver(bfin_dpmc_device_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("cpu power management driver for Blackfin");
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 902bebc434c..83ff311fd6e 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -28,12 +28,6 @@
#include <asm/dpmc.h>
#include <asm/traps.h>
-#ifndef SEC_GCTL
-# define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
-#else
-# define SIC_SYSIRQ(irq) ((irq) - IVG15)
-#endif
-
/*
* NOTES:
* - we have separated the physical Hardware interrupt from the
@@ -141,13 +135,13 @@ static void bfin_core_unmask_irq(struct irq_data *d)
return;
}
+#ifndef SEC_GCTL
void bfin_internal_mask_irq(unsigned int irq)
{
unsigned long flags = hard_local_irq_save();
-#ifndef SEC_GCTL
#ifdef SIC_IMASK0
- unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
- unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
+ unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
+ unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
~(1 << mask_bit));
# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
@@ -156,9 +150,8 @@ void bfin_internal_mask_irq(unsigned int irq)
# endif
#else
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
- ~(1 << SIC_SYSIRQ(irq)));
+ ~(1 << BFIN_SYSIRQ(irq)));
#endif /* end of SIC_IMASK0 */
-#endif
hard_local_irq_restore(flags);
}
@@ -176,10 +169,9 @@ void bfin_internal_unmask_irq(unsigned int irq)
{
unsigned long flags = hard_local_irq_save();
-#ifndef SEC_GCTL
#ifdef SIC_IMASK0
- unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
- unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
+ unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
+ unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
# ifdef CONFIG_SMP
if (cpumask_test_cpu(0, affinity))
# endif
@@ -194,17 +186,103 @@ void bfin_internal_unmask_irq(unsigned int irq)
# endif
#else
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
- (1 << SIC_SYSIRQ(irq)));
+ (1 << BFIN_SYSIRQ(irq)));
+#endif
+ hard_local_irq_restore(flags);
+}
+
+#ifdef CONFIG_SMP
+static void bfin_internal_unmask_irq_chip(struct irq_data *d)
+{
+ bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
+}
+
+static int bfin_internal_set_affinity(struct irq_data *d,
+ const struct cpumask *mask, bool force)
+{
+ bfin_internal_mask_irq(d->irq);
+ bfin_internal_unmask_irq_affinity(d->irq, mask);
+
+ return 0;
+}
+#else
+static void bfin_internal_unmask_irq_chip(struct irq_data *d)
+{
+ bfin_internal_unmask_irq(d->irq);
+}
#endif
+
+#if defined(CONFIG_PM)
+int bfin_internal_set_wake(unsigned int irq, unsigned int state)
+{
+ u32 bank, bit, wakeup = 0;
+ unsigned long flags;
+ bank = BFIN_SYSIRQ(irq) / 32;
+ bit = BFIN_SYSIRQ(irq) % 32;
+
+ switch (irq) {
+#ifdef IRQ_RTC
+ case IRQ_RTC:
+ wakeup |= WAKE;
+ break;
+#endif
+#ifdef IRQ_CAN0_RX
+ case IRQ_CAN0_RX:
+ wakeup |= CANWE;
+ break;
#endif
+#ifdef IRQ_CAN1_RX
+ case IRQ_CAN1_RX:
+ wakeup |= CANWE;
+ break;
+#endif
+#ifdef IRQ_USB_INT0
+ case IRQ_USB_INT0:
+ wakeup |= USBWE;
+ break;
+#endif
+#ifdef CONFIG_BF54x
+ case IRQ_CNT:
+ wakeup |= ROTWE;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ flags = hard_local_irq_save();
+
+ if (state) {
+ bfin_sic_iwr[bank] |= (1 << bit);
+ vr_wakeup |= wakeup;
+
+ } else {
+ bfin_sic_iwr[bank] &= ~(1 << bit);
+ vr_wakeup &= ~wakeup;
+ }
+
hard_local_irq_restore(flags);
+
+ return 0;
}
-#ifdef SEC_GCTL
+static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
+{
+ return bfin_internal_set_wake(d->irq, state);
+}
+#else
+inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
+{
+ return 0;
+}
+# define bfin_internal_set_wake_chip NULL
+#endif
+
+#else /* SEC_GCTL */
static void bfin_sec_preflow_handler(struct irq_data *d)
{
unsigned long flags = hard_local_irq_save();
- unsigned int sid = SIC_SYSIRQ(d->irq);
+ unsigned int sid = BFIN_SYSIRQ(d->irq);
bfin_write_SEC_SCI(0, SEC_CSID, sid);
@@ -214,7 +292,7 @@ static void bfin_sec_preflow_handler(struct irq_data *d)
static void bfin_sec_mask_ack_irq(struct irq_data *d)
{
unsigned long flags = hard_local_irq_save();
- unsigned int sid = SIC_SYSIRQ(d->irq);
+ unsigned int sid = BFIN_SYSIRQ(d->irq);
bfin_write_SEC_SCI(0, SEC_CSID, sid);
@@ -224,7 +302,7 @@ static void bfin_sec_mask_ack_irq(struct irq_data *d)
static void bfin_sec_unmask_irq(struct irq_data *d)
{
unsigned long flags = hard_local_irq_save();
- unsigned int sid = SIC_SYSIRQ(d->irq);
+ unsigned int sid = BFIN_SYSIRQ(d->irq);
bfin_write32(SEC_END, sid);
@@ -269,7 +347,7 @@ static void bfin_sec_enable_sci(unsigned int sid)
unsigned long flags = hard_local_irq_save();
uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
- if (sid == SIC_SYSIRQ(IRQ_WATCH0))
+ if (sid == BFIN_SYSIRQ(IRQ_WATCH0))
reg_sctl |= SEC_SCTL_FAULT_EN;
else
reg_sctl |= SEC_SCTL_INT_EN;
@@ -292,7 +370,7 @@ static void bfin_sec_disable_sci(unsigned int sid)
static void bfin_sec_enable(struct irq_data *d)
{
unsigned long flags = hard_local_irq_save();
- unsigned int sid = SIC_SYSIRQ(d->irq);
+ unsigned int sid = BFIN_SYSIRQ(d->irq);
bfin_sec_enable_sci(sid);
bfin_sec_enable_ssi(sid);
@@ -303,7 +381,7 @@ static void bfin_sec_enable(struct irq_data *d)
static void bfin_sec_disable(struct irq_data *d)
{
unsigned long flags = hard_local_irq_save();
- unsigned int sid = SIC_SYSIRQ(d->irq);
+ unsigned int sid = BFIN_SYSIRQ(d->irq);
bfin_sec_disable_sci(sid);
bfin_sec_disable_ssi(sid);
@@ -328,9 +406,10 @@ static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_prior
hard_local_irq_restore(flags);
}
-void bfin_sec_raise_irq(unsigned int sid)
+void bfin_sec_raise_irq(unsigned int irq)
{
unsigned long flags = hard_local_irq_save();
+ unsigned int sid = BFIN_SYSIRQ(irq);
bfin_write32(SEC_RAISE, sid);
@@ -341,8 +420,13 @@ static void init_software_driven_irq(void)
{
bfin_sec_set_ssi_coreid(34, 0);
bfin_sec_set_ssi_coreid(35, 1);
+
+ bfin_sec_enable_sci(35);
+ bfin_sec_enable_ssi(35);
bfin_sec_set_ssi_coreid(36, 0);
bfin_sec_set_ssi_coreid(37, 1);
+ bfin_sec_enable_sci(37);
+ bfin_sec_enable_ssi(37);
}
void bfin_sec_resume(void)
@@ -412,6 +496,8 @@ void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
}
raw_spin_unlock(&desc->lock);
+
+ handle_fasteoi_irq(irq, desc);
}
void handle_core_fault(unsigned int irq, struct irq_desc *desc)
@@ -431,105 +517,18 @@ void handle_core_fault(unsigned int irq, struct irq_desc *desc)
printk(KERN_NOTICE "Kernel Stack\n");
show_stack(current, NULL);
print_modules();
- panic("Kernel core hardware error");
+ panic("Core 0 hardware error");
break;
case IRQ_C0_NMI_L1_PARITY_ERR:
- panic("NMI occurs unexpectedly");
+ panic("Core 0 NMI L1 parity error");
break;
default:
- panic("Core 1 fault occurs unexpectedly");
+ panic("Core 1 fault %d occurs unexpectedly", irq);
}
raw_spin_unlock(&desc->lock);
}
-#endif
-
-#ifdef CONFIG_SMP
-static void bfin_internal_unmask_irq_chip(struct irq_data *d)
-{
- bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
-}
-
-static int bfin_internal_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
-{
- bfin_internal_mask_irq(d->irq);
- bfin_internal_unmask_irq_affinity(d->irq, mask);
-
- return 0;
-}
-#else
-static void bfin_internal_unmask_irq_chip(struct irq_data *d)
-{
- bfin_internal_unmask_irq(d->irq);
-}
-#endif
-
-#if defined(CONFIG_PM) && !defined(SEC_GCTL)
-int bfin_internal_set_wake(unsigned int irq, unsigned int state)
-{
- u32 bank, bit, wakeup = 0;
- unsigned long flags;
- bank = SIC_SYSIRQ(irq) / 32;
- bit = SIC_SYSIRQ(irq) % 32;
-
- switch (irq) {
-#ifdef IRQ_RTC
- case IRQ_RTC:
- wakeup |= WAKE;
- break;
-#endif
-#ifdef IRQ_CAN0_RX
- case IRQ_CAN0_RX:
- wakeup |= CANWE;
- break;
-#endif
-#ifdef IRQ_CAN1_RX
- case IRQ_CAN1_RX:
- wakeup |= CANWE;
- break;
-#endif
-#ifdef IRQ_USB_INT0
- case IRQ_USB_INT0:
- wakeup |= USBWE;
- break;
-#endif
-#ifdef CONFIG_BF54x
- case IRQ_CNT:
- wakeup |= ROTWE;
- break;
-#endif
- default:
- break;
- }
-
- flags = hard_local_irq_save();
-
- if (state) {
- bfin_sic_iwr[bank] |= (1 << bit);
- vr_wakeup |= wakeup;
-
- } else {
- bfin_sic_iwr[bank] &= ~(1 << bit);
- vr_wakeup &= ~wakeup;
- }
-
- hard_local_irq_restore(flags);
-
- return 0;
-}
-
-static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
-{
- return bfin_internal_set_wake(d->irq, state);
-}
-#else
-inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
-{
- return 0;
-}
-# define bfin_internal_set_wake_chip NULL
-#endif
+#endif /* SEC_GCTL */
static struct irq_chip bfin_core_irqchip = {
.name = "CORE",
@@ -537,6 +536,7 @@ static struct irq_chip bfin_core_irqchip = {
.irq_unmask = bfin_core_unmask_irq,
};
+#ifndef SEC_GCTL
static struct irq_chip bfin_internal_irqchip = {
.name = "INTN",
.irq_mask = bfin_internal_mask_irq_chip,
@@ -548,8 +548,7 @@ static struct irq_chip bfin_internal_irqchip = {
#endif
.irq_set_wake = bfin_internal_set_wake_chip,
};
-
-#ifdef SEC_GCTL
+#else
static struct irq_chip bfin_sec_irqchip = {
.name = "SEC",
.irq_mask_ack = bfin_sec_mask_ack_irq,
@@ -1138,7 +1137,9 @@ static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
return -EINVAL;
}
+#ifndef SEC_GCTL
bfin_internal_set_wake(pint_irq, state);
+#endif
return 0;
}
@@ -1173,7 +1174,7 @@ static int sec_suspend(void)
u32 bank;
for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
- save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + SIC_SYSIRQ(IRQ_PINT0));
+ save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0));
return 0;
}
@@ -1187,7 +1188,7 @@ static void sec_resume(void)
bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
- bfin_write_SEC_SCTL(bank + SIC_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
+ bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
}
static struct syscore_ops sec_pm_syscore_ops = {
@@ -1538,33 +1539,26 @@ int __init init_arch_irq(void)
for (irq = 0; irq <= SYS_IRQS; irq++) {
if (irq <= IRQ_CORETMR) {
- irq_set_chip(irq, &bfin_core_irqchip);
-#ifdef CONFIG_TICKSOURCE_CORETMR
+ irq_set_chip_and_handler(irq, &bfin_core_irqchip,
+ handle_simple_irq);
+#if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP)
if (irq == IRQ_CORETMR)
-# ifdef CONFIG_SMP
irq_set_handler(irq, handle_percpu_irq);
-# else
- irq_set_handler(irq, handle_simple_irq);
-# endif
#endif
- } else if (irq < BFIN_IRQ(0)) {
- irq_set_chip_and_handler(irq, &bfin_internal_irqchip,
- handle_simple_irq);
- } else if (irq == IRQ_SEC_ERR) {
- irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
- handle_sec_fault);
- } else if (irq < CORE_IRQS && irq >= IRQ_C0_DBL_FAULT) {
- irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
- handle_core_fault);
} else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
irq_set_chip(irq, &bfin_sec_irqchip);
irq_set_chained_handler(irq, bfin_demux_gpio_irq);
} else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
- irq_set_chip(irq, &bfin_sec_irqchip);
- irq_set_handler(irq, handle_percpu_irq);
- } else {
irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
- handle_fasteoi_irq);
+ handle_percpu_irq);
+ } else {
+ irq_set_chip(irq, &bfin_sec_irqchip);
+ if (irq == IRQ_SEC_ERR)
+ irq_set_handler(irq, handle_sec_fault);
+ else if (irq >= IRQ_C0_DBL_FAULT && irq < CORE_IRQS)
+ irq_set_handler(irq, handle_core_fault);
+ else
+ irq_set_handler(irq, handle_fasteoi_irq);
__irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
}
}
@@ -1593,8 +1587,8 @@ int __init init_arch_irq(void)
bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
- bfin_sec_enable_sci(SIC_SYSIRQ(IRQ_WATCH0));
- bfin_sec_enable_ssi(SIC_SYSIRQ(IRQ_WATCH0));
+ bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0));
+ bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0));
bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
udelay(100);
bfin_write_SEC_GCTL(SEC_GCTL_EN);
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 342e378da1e..1f3b3ef3e10 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -191,7 +191,7 @@ static irqreturn_t l2_ecc_err(int irq, void *dev_id)
{
int status;
- printk(KERN_ERR "L2 ecc error happend\n");
+ printk(KERN_ERR "L2 ecc error happened\n");
status = bfin_read32(L2CTL0_STAT);
if (status & 0x1)
printk(KERN_ERR "Core channel error type:0x%x, addr:0x%x\n",
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 66eab3703c7..f6a3648f5ec 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -17,8 +17,6 @@ config C6X
select OF
select OF_EARLY_FLATTREE
select GENERIC_CLOCKEVENTS
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select MODULES_USE_ELF_RELA
config MMU
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index eae7b5963e8..4258b088aa9 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
generic-y += mman.h
+generic-y += mmu.h
generic-y += mmu_context.h
generic-y += msgbuf.h
generic-y += param.h
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index 03579fd99db..3c694065030 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -32,6 +32,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
+ debug_dma_mapping_error(dev, dma_addr);
return dma_addr == ~0;
}
diff --git a/arch/c6x/include/asm/mmu.h b/arch/c6x/include/asm/mmu.h
deleted file mode 100644
index 4467e770a1c..00000000000
--- a/arch/c6x/include/asm/mmu.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASM_C6X_MMU_H
-#define _ASM_C6X_MMU_H
-
-typedef struct {
- unsigned long end_brk;
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- unsigned long exec_fdpic_loadmap;
- unsigned long interp_fdpic_loadmap;
-#endif
-} mm_context_t;
-
-#endif /* _ASM_C6X_MMU_H */
diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h
index f3987a8703d..e7d09a614d1 100644
--- a/arch/c6x/include/uapi/asm/unistd.h
+++ b/arch/c6x/include/uapi/asm/unistd.h
@@ -14,7 +14,6 @@
* more details.
*/
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
/* Use the standard ABI for syscalls. */
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 0cac6a49f23..c59a01dd9c0 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -49,8 +49,6 @@ config CRIS
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
select GENERIC_CMOS_UPDATE
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select CLONE_BACKWARDS2
config HZ
diff --git a/arch/cris/include/arch-v10/arch/Kbuild b/arch/cris/include/arch-v10/arch/Kbuild
index 7a192e1290b..1f0fc7a66f5 100644
--- a/arch/cris/include/arch-v10/arch/Kbuild
+++ b/arch/cris/include/arch-v10/arch/Kbuild
@@ -1,4 +1 @@
-header-y += user.h
-header-y += svinto.h
-header-y += sv_addr_ag.h
-header-y += sv_addr.agh
+# CRISv10 arch
diff --git a/arch/cris/include/arch-v10/arch/irq.h b/arch/cris/include/arch-v10/arch/irq.h
index 7d345947b3e..ca2675ae08e 100644
--- a/arch/cris/include/arch-v10/arch/irq.h
+++ b/arch/cris/include/arch-v10/arch/irq.h
@@ -142,7 +142,7 @@ __asm__ ( \
* it here, we would not get the multiple_irq at all.
*
* The non-blocking here is based on the knowledge that the timer interrupt is
- * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not
+ * registered as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not
* be an sti() before the timer irq handler is run to acknowledge the interrupt.
*/
diff --git a/arch/cris/include/arch-v32/arch/Kbuild b/arch/cris/include/arch-v32/arch/Kbuild
index 35f2fc4f993..2fd65c7e15c 100644
--- a/arch/cris/include/arch-v32/arch/Kbuild
+++ b/arch/cris/include/arch-v32/arch/Kbuild
@@ -1,2 +1 @@
-header-y += user.h
-header-y += cryptocop.h
+# CRISv32 arch
diff --git a/arch/cris/include/arch-v32/arch/cryptocop.h b/arch/cris/include/arch-v32/arch/cryptocop.h
index e1cd83dfabb..716e434e926 100644
--- a/arch/cris/include/arch-v32/arch/cryptocop.h
+++ b/arch/cris/include/arch-v32/arch/cryptocop.h
@@ -2,124 +2,12 @@
* The device /dev/cryptocop is accessible using this driver using
* CRYPTOCOP_MAJOR (254) and minor number 0.
*/
-
#ifndef CRYPTOCOP_H
#define CRYPTOCOP_H
-#include <linux/uio.h>
-
-
-#define CRYPTOCOP_SESSION_ID_NONE (0)
-
-typedef unsigned long long int cryptocop_session_id;
-
-/* cryptocop ioctls */
-#define ETRAXCRYPTOCOP_IOCTYPE (250)
-
-#define CRYPTOCOP_IO_CREATE_SESSION _IOWR(ETRAXCRYPTOCOP_IOCTYPE, 1, struct strcop_session_op)
-#define CRYPTOCOP_IO_CLOSE_SESSION _IOW(ETRAXCRYPTOCOP_IOCTYPE, 2, struct strcop_session_op)
-#define CRYPTOCOP_IO_PROCESS_OP _IOWR(ETRAXCRYPTOCOP_IOCTYPE, 3, struct strcop_crypto_op)
-#define CRYPTOCOP_IO_MAXNR (3)
-
-typedef enum {
- cryptocop_cipher_des = 0,
- cryptocop_cipher_3des = 1,
- cryptocop_cipher_aes = 2,
- cryptocop_cipher_m2m = 3, /* mem2mem is essentially a NULL cipher with blocklength=1 */
- cryptocop_cipher_none
-} cryptocop_cipher_type;
-
-typedef enum {
- cryptocop_digest_sha1 = 0,
- cryptocop_digest_md5 = 1,
- cryptocop_digest_none
-} cryptocop_digest_type;
-
-typedef enum {
- cryptocop_csum_le = 0,
- cryptocop_csum_be = 1,
- cryptocop_csum_none
-} cryptocop_csum_type;
-
-typedef enum {
- cryptocop_cipher_mode_ecb = 0,
- cryptocop_cipher_mode_cbc,
- cryptocop_cipher_mode_none
-} cryptocop_cipher_mode;
-
-typedef enum {
- cryptocop_3des_eee = 0,
- cryptocop_3des_eed = 1,
- cryptocop_3des_ede = 2,
- cryptocop_3des_edd = 3,
- cryptocop_3des_dee = 4,
- cryptocop_3des_ded = 5,
- cryptocop_3des_dde = 6,
- cryptocop_3des_ddd = 7
-} cryptocop_3des_mode;
-
-/* Usermode accessible (ioctl) operations. */
-struct strcop_session_op{
- cryptocop_session_id ses_id;
-
- cryptocop_cipher_type cipher; /* AES, DES, 3DES, m2m, none */
-
- cryptocop_cipher_mode cmode; /* ECB, CBC, none */
- cryptocop_3des_mode des3_mode;
-
- cryptocop_digest_type digest; /* MD5, SHA1, none */
-
- cryptocop_csum_type csum; /* BE, LE, none */
-
- unsigned char *key;
- size_t keylen;
-};
-
-#define CRYPTOCOP_CSUM_LENGTH (2)
-#define CRYPTOCOP_MAX_DIGEST_LENGTH (20) /* SHA-1 20, MD5 16 */
-#define CRYPTOCOP_MAX_IV_LENGTH (16) /* (3)DES==8, AES == 16 */
-#define CRYPTOCOP_MAX_KEY_LENGTH (32)
-
-struct strcop_crypto_op{
- cryptocop_session_id ses_id;
-
- /* Indata. */
- unsigned char *indata;
- size_t inlen; /* Total indata length. */
-
- /* Cipher configuration. */
- unsigned char do_cipher:1;
- unsigned char decrypt:1; /* 1 == decrypt, 0 == encrypt */
- unsigned char cipher_explicit:1;
- size_t cipher_start;
- size_t cipher_len;
- /* cipher_iv is used if do_cipher and cipher_explicit and the cipher
- mode is CBC. The length is controlled by the type of cipher,
- e.g. DES/3DES 8 octets and AES 16 octets. */
- unsigned char cipher_iv[CRYPTOCOP_MAX_IV_LENGTH];
- /* Outdata. */
- unsigned char *cipher_outdata;
- size_t cipher_outlen;
-
- /* digest configuration. */
- unsigned char do_digest:1;
- size_t digest_start;
- size_t digest_len;
- /* Outdata. The actual length is determined by the type of the digest. */
- unsigned char digest[CRYPTOCOP_MAX_DIGEST_LENGTH];
-
- /* Checksum configuration. */
- unsigned char do_csum:1;
- size_t csum_start;
- size_t csum_len;
- /* Outdata. */
- unsigned char csum[CRYPTOCOP_CSUM_LENGTH];
-};
+#include <uapi/arch-v32/arch/cryptocop.h>
-
-#ifdef __KERNEL__
-
/********** The API to use from inside the kernel. ************/
#include <arch/hwregs/dma.h>
@@ -267,6 +155,4 @@ int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation);
int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation);
-#endif /* __KERNEL__ */
-
#endif /* CRYPTOCOP_H */
diff --git a/arch/cris/include/arch-v32/arch/irq.h b/arch/cris/include/arch-v32/arch/irq.h
index b31e9984f84..fe3cdd22bed 100644
--- a/arch/cris/include/arch-v32/arch/irq.h
+++ b/arch/cris/include/arch-v32/arch/irq.h
@@ -103,7 +103,7 @@ __asm__ ( \
* if we had BLOCK'edit here, we would not get the multiple_irq at all.
*
* The non-blocking here is based on the knowledge that the timer interrupt is
- * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not
+ * registered as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not
* be an sti() before the timer irq handler is run to acknowledge the interrupt.
*/
#define BUILD_TIMER_IRQ(nr, mask) \
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index f171a6600fb..f13275522f4 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -118,7 +118,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
ret = 1;
}
arch_spin_unlock(&rw->slock);
- return 1;
+ return ret;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 15a122c3767..f1e79edc9dd 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,12 +1,7 @@
-include include/asm-generic/Kbuild.asm
header-y += arch-v10/
header-y += arch-v32/
-header-y += ethernet.h
-header-y += etraxgpio.h
-header-y += rs485.h
-header-y += sync_serial.h
generic-y += clkdev.h
generic-y += exec.h
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index 32567bc2a42..ac12ae2b928 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
-#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1)
-#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1)
-#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1)
-#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count)
-#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count)
-#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count)
+static inline void outb(unsigned char data, unsigned int port)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *) &data, 1, 1);
+}
+static inline void outw(unsigned short data, unsigned int port)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *) &data, 2, 1);
+}
+static inline void outl(unsigned int data, unsigned int port)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *) &data, 4, 1);
+}
+static inline void outsb(unsigned int port, const void *addr,
+ unsigned long count)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *)addr, 1, count);
+}
+static inline void outsw(unsigned int port, const void *addr,
+ unsigned long count)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *)addr, 2, count);
+}
+static inline void outsl(unsigned int port, const void *addr,
+ unsigned long count)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *)addr, 4, count);
+}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/cris/include/asm/ptrace.h b/arch/cris/include/asm/ptrace.h
index 6618893bfe8..9e788d04a4e 100644
--- a/arch/cris/include/asm/ptrace.h
+++ b/arch/cris/include/asm/ptrace.h
@@ -1,16 +1,14 @@
#ifndef _CRIS_PTRACE_H
#define _CRIS_PTRACE_H
-#include <arch/ptrace.h>
+#include <uapi/asm/ptrace.h>
-#ifdef __KERNEL__
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define profile_pc(regs) instruction_pointer(regs)
-
-#endif /* __KERNEL__ */
+#define current_user_stack_pointer() rdusp()
#endif /* _CRIS_PTRACE_H */
diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h
index 72dbbf59dfa..c0cb1fd4644 100644
--- a/arch/cris/include/asm/signal.h
+++ b/arch/cris/include/asm/signal.h
@@ -1,12 +1,8 @@
#ifndef _ASM_CRIS_SIGNAL_H
#define _ASM_CRIS_SIGNAL_H
-#include <linux/types.h>
+#include <uapi/asm/signal.h>
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
@@ -20,95 +16,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-
-#define SA_NOCLDSTOP 0x00000001u
-#define SA_NOCLDWAIT 0x00000002u
-#define SA_SIGINFO 0x00000004u
-#define SA_ONSTACK 0x08000000u
-#define SA_RESTART 0x10000000u
-#define SA_NODEFER 0x40000000u
-#define SA_RESETHAND 0x80000000u
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
-#ifdef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
@@ -126,32 +33,6 @@ struct sigaction {
struct k_sigaction {
struct sigaction sa;
};
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
-#endif /* __KERNEL__ */
#endif
diff --git a/arch/cris/include/asm/swab.h b/arch/cris/include/asm/swab.h
index 80668e88419..991b6ace1ba 100644
--- a/arch/cris/include/asm/swab.h
+++ b/arch/cris/include/asm/swab.h
@@ -1,8 +1,7 @@
#ifndef _CRIS_SWAB_H
#define _CRIS_SWAB_H
-#ifdef __KERNEL__
#include <arch/swab.h>
-#endif /* __KERNEL__ */
+#include <uapi/asm/swab.h>
#endif /* _CRIS_SWAB_H */
diff --git a/arch/cris/include/asm/termios.h b/arch/cris/include/asm/termios.h
index 1265109f4ce..1991cd9e408 100644
--- a/arch/cris/include/asm/termios.h
+++ b/arch/cris/include/asm/termios.h
@@ -1,47 +1,8 @@
#ifndef _CRIS_TERMIOS_H
#define _CRIS_TERMIOS_H
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-#include <asm/rs485.h>
-#include <linux/serial.h>
+#include <uapi/asm/termios.h>
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
@@ -87,6 +48,4 @@ struct termio {
#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif /* __KERNEL__ */
-
#endif /* _CRIS_TERMIOS_H */
diff --git a/arch/cris/include/asm/types.h b/arch/cris/include/asm/types.h
index adaf82780bb..a3cac7757c7 100644
--- a/arch/cris/include/asm/types.h
+++ b/arch/cris/include/asm/types.h
@@ -1,15 +1,12 @@
#ifndef _ETRAX_TYPES_H
#define _ETRAX_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
-#ifdef __KERNEL__
#define BITS_PER_LONG 32
-#endif /* __KERNEL__ */
-
#endif
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index f27b542e0eb..6d062bdf92d 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -1,347 +1,8 @@
#ifndef _ASM_CRIS_UNISTD_H_
#define _ASM_CRIS_UNISTD_H_
-/*
- * This file contains the system call numbers, and stub macros for libc.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-
-#define __NR_query_module 167
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_chown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-#define __NR_getpmsg 188 /* some people actually want streams */
-#define __NR_putpmsg 189 /* some people actually want streams */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-#define __NR_getdents64 220
-#define __NR_fcntl64 221
-/* 223 is unused */
-#define __NR_gettid 224
-#define __NR_readahead 225
-#define __NR_setxattr 226
-#define __NR_lsetxattr 227
-#define __NR_fsetxattr 228
-#define __NR_getxattr 229
-#define __NR_lgetxattr 230
-#define __NR_fgetxattr 231
-#define __NR_listxattr 232
-#define __NR_llistxattr 233
-#define __NR_flistxattr 234
-#define __NR_removexattr 235
-#define __NR_lremovexattr 236
-#define __NR_fremovexattr 237
-#define __NR_tkill 238
-#define __NR_sendfile64 239
-#define __NR_futex 240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area 243
-#define __NR_get_thread_area 244
-#define __NR_io_setup 245
-#define __NR_io_destroy 246
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-#define __NR_io_cancel 249
-#define __NR_fadvise64 250
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group 252
-#define __NR_lookup_dcookie 253
-#define __NR_epoll_create 254
-#define __NR_epoll_ctl 255
-#define __NR_epoll_wait 256
-#define __NR_remap_file_pages 257
-#define __NR_set_tid_address 258
-#define __NR_timer_create 259
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 268
-#define __NR_fstatfs64 269
-#define __NR_tgkill 270
-#define __NR_utimes 271
-#define __NR_fadvise64_64 272
-#define __NR_vserver 273
-#define __NR_mbind 274
-#define __NR_get_mempolicy 275
-#define __NR_set_mempolicy 276
-#define __NR_mq_open 277
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
-#define __NR_kexec_load 283
-#define __NR_waitid 284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key 286
-#define __NR_request_key 287
-#define __NR_keyctl 288
-#define __NR_ioprio_set 289
-#define __NR_ioprio_get 290
-#define __NR_inotify_init 291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch 293
-#define __NR_migrate_pages 294
-#define __NR_openat 295
-#define __NR_mkdirat 296
-#define __NR_mknodat 297
-#define __NR_fchownat 298
-#define __NR_futimesat 299
-#define __NR_fstatat64 300
-#define __NR_unlinkat 301
-#define __NR_renameat 302
-#define __NR_linkat 303
-#define __NR_symlinkat 304
-#define __NR_readlinkat 305
-#define __NR_fchmodat 306
-#define __NR_faccessat 307
-#define __NR_pselect6 308
-#define __NR_ppoll 309
-#define __NR_unshare 310
-#define __NR_set_robust_list 311
-#define __NR_get_robust_list 312
-#define __NR_splice 313
-#define __NR_sync_file_range 314
-#define __NR_tee 315
-#define __NR_vmsplice 316
-#define __NR_move_pages 317
-#define __NR_getcpu 318
-#define __NR_epoll_pwait 319
-#define __NR_utimensat 320
-#define __NR_signalfd 321
-#define __NR_timerfd_create 322
-#define __NR_eventfd 323
-#define __NR_fallocate 324
-#define __NR_timerfd_settime 325
-#define __NR_timerfd_gettime 326
-#define __NR_signalfd4 327
-#define __NR_eventfd2 328
-#define __NR_epoll_create1 329
-#define __NR_dup3 330
-#define __NR_pipe2 331
-#define __NR_inotify_init1 332
-#define __NR_preadv 333
-#define __NR_pwritev 334
-#define __NR_setns 335
+#include <uapi/asm/unistd.h>
-#ifdef __KERNEL__
#define NR_syscalls 336
@@ -371,7 +32,6 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
@@ -384,5 +44,4 @@
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif /* __KERNEL__ */
#endif /* _ASM_CRIS_UNISTD_H_ */
diff --git a/arch/cris/include/uapi/arch-v10/arch/Kbuild b/arch/cris/include/uapi/arch-v10/arch/Kbuild
index aafaa5aa54d..9048c87a782 100644
--- a/arch/cris/include/uapi/arch-v10/arch/Kbuild
+++ b/arch/cris/include/uapi/arch-v10/arch/Kbuild
@@ -1 +1,5 @@
# UAPI Header export list
+header-y += sv_addr.agh
+header-y += sv_addr_ag.h
+header-y += svinto.h
+header-y += user.h
diff --git a/arch/cris/include/arch-v10/arch/sv_addr.agh b/arch/cris/include/uapi/arch-v10/arch/sv_addr.agh
index 6ac3a7bc976..6ac3a7bc976 100644
--- a/arch/cris/include/arch-v10/arch/sv_addr.agh
+++ b/arch/cris/include/uapi/arch-v10/arch/sv_addr.agh
diff --git a/arch/cris/include/arch-v10/arch/sv_addr_ag.h b/arch/cris/include/uapi/arch-v10/arch/sv_addr_ag.h
index 5517f04153a..5517f04153a 100644
--- a/arch/cris/include/arch-v10/arch/sv_addr_ag.h
+++ b/arch/cris/include/uapi/arch-v10/arch/sv_addr_ag.h
diff --git a/arch/cris/include/arch-v10/arch/svinto.h b/arch/cris/include/uapi/arch-v10/arch/svinto.h
index da5c1527265..da5c1527265 100644
--- a/arch/cris/include/arch-v10/arch/svinto.h
+++ b/arch/cris/include/uapi/arch-v10/arch/svinto.h
diff --git a/arch/cris/include/arch-v10/arch/user.h b/arch/cris/include/uapi/arch-v10/arch/user.h
index 9303ea77c91..9303ea77c91 100644
--- a/arch/cris/include/arch-v10/arch/user.h
+++ b/arch/cris/include/uapi/arch-v10/arch/user.h
diff --git a/arch/cris/include/uapi/arch-v32/arch/Kbuild b/arch/cris/include/uapi/arch-v32/arch/Kbuild
index aafaa5aa54d..59efffd16b6 100644
--- a/arch/cris/include/uapi/arch-v32/arch/Kbuild
+++ b/arch/cris/include/uapi/arch-v32/arch/Kbuild
@@ -1 +1,3 @@
# UAPI Header export list
+header-y += cryptocop.h
+header-y += user.h
diff --git a/arch/cris/include/uapi/arch-v32/arch/cryptocop.h b/arch/cris/include/uapi/arch-v32/arch/cryptocop.h
new file mode 100644
index 00000000000..694fd13ce1c
--- /dev/null
+++ b/arch/cris/include/uapi/arch-v32/arch/cryptocop.h
@@ -0,0 +1,122 @@
+/*
+ * The device /dev/cryptocop is accessible using this driver using
+ * CRYPTOCOP_MAJOR (254) and minor number 0.
+ */
+
+#ifndef _UAPICRYPTOCOP_H
+#define _UAPICRYPTOCOP_H
+
+#include <linux/uio.h>
+
+
+#define CRYPTOCOP_SESSION_ID_NONE (0)
+
+typedef unsigned long long int cryptocop_session_id;
+
+/* cryptocop ioctls */
+#define ETRAXCRYPTOCOP_IOCTYPE (250)
+
+#define CRYPTOCOP_IO_CREATE_SESSION _IOWR(ETRAXCRYPTOCOP_IOCTYPE, 1, struct strcop_session_op)
+#define CRYPTOCOP_IO_CLOSE_SESSION _IOW(ETRAXCRYPTOCOP_IOCTYPE, 2, struct strcop_session_op)
+#define CRYPTOCOP_IO_PROCESS_OP _IOWR(ETRAXCRYPTOCOP_IOCTYPE, 3, struct strcop_crypto_op)
+#define CRYPTOCOP_IO_MAXNR (3)
+
+typedef enum {
+ cryptocop_cipher_des = 0,
+ cryptocop_cipher_3des = 1,
+ cryptocop_cipher_aes = 2,
+ cryptocop_cipher_m2m = 3, /* mem2mem is essentially a NULL cipher with blocklength=1 */
+ cryptocop_cipher_none
+} cryptocop_cipher_type;
+
+typedef enum {
+ cryptocop_digest_sha1 = 0,
+ cryptocop_digest_md5 = 1,
+ cryptocop_digest_none
+} cryptocop_digest_type;
+
+typedef enum {
+ cryptocop_csum_le = 0,
+ cryptocop_csum_be = 1,
+ cryptocop_csum_none
+} cryptocop_csum_type;
+
+typedef enum {
+ cryptocop_cipher_mode_ecb = 0,
+ cryptocop_cipher_mode_cbc,
+ cryptocop_cipher_mode_none
+} cryptocop_cipher_mode;
+
+typedef enum {
+ cryptocop_3des_eee = 0,
+ cryptocop_3des_eed = 1,
+ cryptocop_3des_ede = 2,
+ cryptocop_3des_edd = 3,
+ cryptocop_3des_dee = 4,
+ cryptocop_3des_ded = 5,
+ cryptocop_3des_dde = 6,
+ cryptocop_3des_ddd = 7
+} cryptocop_3des_mode;
+
+/* Usermode accessible (ioctl) operations. */
+struct strcop_session_op{
+ cryptocop_session_id ses_id;
+
+ cryptocop_cipher_type cipher; /* AES, DES, 3DES, m2m, none */
+
+ cryptocop_cipher_mode cmode; /* ECB, CBC, none */
+ cryptocop_3des_mode des3_mode;
+
+ cryptocop_digest_type digest; /* MD5, SHA1, none */
+
+ cryptocop_csum_type csum; /* BE, LE, none */
+
+ unsigned char *key;
+ size_t keylen;
+};
+
+#define CRYPTOCOP_CSUM_LENGTH (2)
+#define CRYPTOCOP_MAX_DIGEST_LENGTH (20) /* SHA-1 20, MD5 16 */
+#define CRYPTOCOP_MAX_IV_LENGTH (16) /* (3)DES==8, AES == 16 */
+#define CRYPTOCOP_MAX_KEY_LENGTH (32)
+
+struct strcop_crypto_op{
+ cryptocop_session_id ses_id;
+
+ /* Indata. */
+ unsigned char *indata;
+ size_t inlen; /* Total indata length. */
+
+ /* Cipher configuration. */
+ unsigned char do_cipher:1;
+ unsigned char decrypt:1; /* 1 == decrypt, 0 == encrypt */
+ unsigned char cipher_explicit:1;
+ size_t cipher_start;
+ size_t cipher_len;
+ /* cipher_iv is used if do_cipher and cipher_explicit and the cipher
+ mode is CBC. The length is controlled by the type of cipher,
+ e.g. DES/3DES 8 octets and AES 16 octets. */
+ unsigned char cipher_iv[CRYPTOCOP_MAX_IV_LENGTH];
+ /* Outdata. */
+ unsigned char *cipher_outdata;
+ size_t cipher_outlen;
+
+ /* digest configuration. */
+ unsigned char do_digest:1;
+ size_t digest_start;
+ size_t digest_len;
+ /* Outdata. The actual length is determined by the type of the digest. */
+ unsigned char digest[CRYPTOCOP_MAX_DIGEST_LENGTH];
+
+ /* Checksum configuration. */
+ unsigned char do_csum:1;
+ size_t csum_start;
+ size_t csum_len;
+ /* Outdata. */
+ unsigned char csum[CRYPTOCOP_CSUM_LENGTH];
+};
+
+
+
+
+#endif /* _UAPICRYPTOCOP_H */
diff --git a/arch/cris/include/arch-v32/arch/user.h b/arch/cris/include/uapi/arch-v32/arch/user.h
index 03fa1f3c3c0..03fa1f3c3c0 100644
--- a/arch/cris/include/arch-v32/arch/user.h
+++ b/arch/cris/include/uapi/arch-v32/arch/user.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index f50236ae9ca..7d47b366ad8 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -3,3 +3,37 @@ include include/uapi/asm-generic/Kbuild.asm
header-y += arch-v10/
header-y += arch-v32/
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += byteorder.h
+header-y += errno.h
+header-y += ethernet.h
+header-y += etraxgpio.h
+header-y += fcntl.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += rs485.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += swab.h
+header-y += sync_serial.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += unistd.h
diff --git a/arch/cris/include/asm/auxvec.h b/arch/cris/include/uapi/asm/auxvec.h
index cb30b01bf19..cb30b01bf19 100644
--- a/arch/cris/include/asm/auxvec.h
+++ b/arch/cris/include/uapi/asm/auxvec.h
diff --git a/arch/cris/include/asm/bitsperlong.h b/arch/cris/include/uapi/asm/bitsperlong.h
index 6dc0bb0c13b..6dc0bb0c13b 100644
--- a/arch/cris/include/asm/bitsperlong.h
+++ b/arch/cris/include/uapi/asm/bitsperlong.h
diff --git a/arch/cris/include/asm/byteorder.h b/arch/cris/include/uapi/asm/byteorder.h
index bcd189798e2..bcd189798e2 100644
--- a/arch/cris/include/asm/byteorder.h
+++ b/arch/cris/include/uapi/asm/byteorder.h
diff --git a/arch/cris/include/asm/errno.h b/arch/cris/include/uapi/asm/errno.h
index 2bf5eb5fa77..2bf5eb5fa77 100644
--- a/arch/cris/include/asm/errno.h
+++ b/arch/cris/include/uapi/asm/errno.h
diff --git a/arch/cris/include/asm/ethernet.h b/arch/cris/include/uapi/asm/ethernet.h
index 4d58652c3a4..4d58652c3a4 100644
--- a/arch/cris/include/asm/ethernet.h
+++ b/arch/cris/include/uapi/asm/ethernet.h
diff --git a/arch/cris/include/asm/etraxgpio.h b/arch/cris/include/uapi/asm/etraxgpio.h
index 461c089db76..461c089db76 100644
--- a/arch/cris/include/asm/etraxgpio.h
+++ b/arch/cris/include/uapi/asm/etraxgpio.h
diff --git a/arch/cris/include/asm/fcntl.h b/arch/cris/include/uapi/asm/fcntl.h
index 46ab12db573..46ab12db573 100644
--- a/arch/cris/include/asm/fcntl.h
+++ b/arch/cris/include/uapi/asm/fcntl.h
diff --git a/arch/cris/include/asm/ioctl.h b/arch/cris/include/uapi/asm/ioctl.h
index b279fe06dfe..b279fe06dfe 100644
--- a/arch/cris/include/asm/ioctl.h
+++ b/arch/cris/include/uapi/asm/ioctl.h
diff --git a/arch/cris/include/asm/ioctls.h b/arch/cris/include/uapi/asm/ioctls.h
index 488fbb3f5e8..488fbb3f5e8 100644
--- a/arch/cris/include/asm/ioctls.h
+++ b/arch/cris/include/uapi/asm/ioctls.h
diff --git a/arch/cris/include/asm/ipcbuf.h b/arch/cris/include/uapi/asm/ipcbuf.h
index 84c7e51cb6d..84c7e51cb6d 100644
--- a/arch/cris/include/asm/ipcbuf.h
+++ b/arch/cris/include/uapi/asm/ipcbuf.h
diff --git a/arch/cris/include/asm/mman.h b/arch/cris/include/uapi/asm/mman.h
index 8eebf89f5ab..8eebf89f5ab 100644
--- a/arch/cris/include/asm/mman.h
+++ b/arch/cris/include/uapi/asm/mman.h
diff --git a/arch/cris/include/asm/msgbuf.h b/arch/cris/include/uapi/asm/msgbuf.h
index ada63df1d57..ada63df1d57 100644
--- a/arch/cris/include/asm/msgbuf.h
+++ b/arch/cris/include/uapi/asm/msgbuf.h
diff --git a/arch/cris/include/asm/param.h b/arch/cris/include/uapi/asm/param.h
index 484fcf8667c..484fcf8667c 100644
--- a/arch/cris/include/asm/param.h
+++ b/arch/cris/include/uapi/asm/param.h
diff --git a/arch/cris/include/asm/poll.h b/arch/cris/include/uapi/asm/poll.h
index c98509d3149..c98509d3149 100644
--- a/arch/cris/include/asm/poll.h
+++ b/arch/cris/include/uapi/asm/poll.h
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/uapi/asm/posix_types.h
index ce4e5179315..ce4e5179315 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/uapi/asm/posix_types.h
diff --git a/arch/cris/include/uapi/asm/ptrace.h b/arch/cris/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..c689c9bbbe5
--- /dev/null
+++ b/arch/cris/include/uapi/asm/ptrace.h
@@ -0,0 +1 @@
+#include <arch/ptrace.h>
diff --git a/arch/cris/include/asm/resource.h b/arch/cris/include/uapi/asm/resource.h
index b5d29448de4..b5d29448de4 100644
--- a/arch/cris/include/asm/resource.h
+++ b/arch/cris/include/uapi/asm/resource.h
diff --git a/arch/cris/include/asm/rs485.h b/arch/cris/include/uapi/asm/rs485.h
index ad40f9fbcb8..ad40f9fbcb8 100644
--- a/arch/cris/include/asm/rs485.h
+++ b/arch/cris/include/uapi/asm/rs485.h
diff --git a/arch/cris/include/asm/sembuf.h b/arch/cris/include/uapi/asm/sembuf.h
index 7fed9843796..7fed9843796 100644
--- a/arch/cris/include/asm/sembuf.h
+++ b/arch/cris/include/uapi/asm/sembuf.h
diff --git a/arch/cris/include/asm/setup.h b/arch/cris/include/uapi/asm/setup.h
index b90728652d1..b90728652d1 100644
--- a/arch/cris/include/asm/setup.h
+++ b/arch/cris/include/uapi/asm/setup.h
diff --git a/arch/cris/include/asm/shmbuf.h b/arch/cris/include/uapi/asm/shmbuf.h
index 3239e3f000e..3239e3f000e 100644
--- a/arch/cris/include/asm/shmbuf.h
+++ b/arch/cris/include/uapi/asm/shmbuf.h
diff --git a/arch/cris/include/asm/sigcontext.h b/arch/cris/include/uapi/asm/sigcontext.h
index a1d634e120d..a1d634e120d 100644
--- a/arch/cris/include/asm/sigcontext.h
+++ b/arch/cris/include/uapi/asm/sigcontext.h
diff --git a/arch/cris/include/asm/siginfo.h b/arch/cris/include/uapi/asm/siginfo.h
index c1cd6d16928..c1cd6d16928 100644
--- a/arch/cris/include/asm/siginfo.h
+++ b/arch/cris/include/uapi/asm/siginfo.h
diff --git a/arch/cris/include/uapi/asm/signal.h b/arch/cris/include/uapi/asm/signal.h
new file mode 100644
index 00000000000..ce42fa7c32a
--- /dev/null
+++ b/arch/cris/include/uapi/asm/signal.h
@@ -0,0 +1,116 @@
+#ifndef _UAPI_ASM_CRIS_SIGNAL_H
+#define _UAPI_ASM_CRIS_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+
+#define SA_NOCLDSTOP 0x00000001u
+#define SA_NOCLDWAIT 0x00000002u
+#define SA_SIGINFO 0x00000004u
+#define SA_ONSTACK 0x08000000u
+#define SA_RESTART 0x10000000u
+#define SA_NODEFER 0x40000000u
+#define SA_RESETHAND 0x80000000u
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+
+#endif /* _UAPI_ASM_CRIS_SIGNAL_H */
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index b681b043f6c..b681b043f6c 100644
--- a/arch/cris/include/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
diff --git a/arch/cris/include/asm/sockios.h b/arch/cris/include/uapi/asm/sockios.h
index cfe7bfecf59..cfe7bfecf59 100644
--- a/arch/cris/include/asm/sockios.h
+++ b/arch/cris/include/uapi/asm/sockios.h
diff --git a/arch/cris/include/asm/stat.h b/arch/cris/include/uapi/asm/stat.h
index 9e558cc3c43..9e558cc3c43 100644
--- a/arch/cris/include/asm/stat.h
+++ b/arch/cris/include/uapi/asm/stat.h
diff --git a/arch/cris/include/asm/statfs.h b/arch/cris/include/uapi/asm/statfs.h
index fdaf921844b..fdaf921844b 100644
--- a/arch/cris/include/asm/statfs.h
+++ b/arch/cris/include/uapi/asm/statfs.h
diff --git a/arch/cris/include/uapi/asm/swab.h b/arch/cris/include/uapi/asm/swab.h
new file mode 100644
index 00000000000..4adf1e9f0b0
--- /dev/null
+++ b/arch/cris/include/uapi/asm/swab.h
@@ -0,0 +1,3 @@
+/*
+ * CRIS byte swapping.
+ */
diff --git a/arch/cris/include/asm/sync_serial.h b/arch/cris/include/uapi/asm/sync_serial.h
index 7f827fea30e..7f827fea30e 100644
--- a/arch/cris/include/asm/sync_serial.h
+++ b/arch/cris/include/uapi/asm/sync_serial.h
diff --git a/arch/cris/include/asm/termbits.h b/arch/cris/include/uapi/asm/termbits.h
index 1c43bc874cc..1c43bc874cc 100644
--- a/arch/cris/include/asm/termbits.h
+++ b/arch/cris/include/uapi/asm/termbits.h
diff --git a/arch/cris/include/uapi/asm/termios.h b/arch/cris/include/uapi/asm/termios.h
new file mode 100644
index 00000000000..0a0386a5502
--- /dev/null
+++ b/arch/cris/include/uapi/asm/termios.h
@@ -0,0 +1,45 @@
+#ifndef _UAPI_CRIS_TERMIOS_H
+#define _UAPI_CRIS_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+#include <asm/rs485.h>
+#include <linux/serial.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+
+#endif /* _UAPI_CRIS_TERMIOS_H */
diff --git a/arch/cris/include/uapi/asm/types.h b/arch/cris/include/uapi/asm/types.h
new file mode 100644
index 00000000000..9ec9d4c5ac4
--- /dev/null
+++ b/arch/cris/include/uapi/asm/types.h
@@ -0,0 +1 @@
+#include <asm-generic/int-ll64.h>
diff --git a/arch/cris/include/uapi/asm/unistd.h b/arch/cris/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..48842896f6c
--- /dev/null
+++ b/arch/cris/include/uapi/asm/unistd.h
@@ -0,0 +1,344 @@
+#ifndef _UAPI_ASM_CRIS_UNISTD_H_
+#define _UAPI_ASM_CRIS_UNISTD_H_
+
+/*
+ * This file contains the system call numbers, and stub macros for libc.
+ */
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188 /* some people actually want streams */
+#define __NR_putpmsg 189 /* some people actually want streams */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+#define __NR_getdents64 220
+#define __NR_fcntl64 221
+/* 223 is unused */
+#define __NR_gettid 224
+#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
+#define __NR_tkill 238
+#define __NR_sendfile64 239
+#define __NR_futex 240
+#define __NR_sched_setaffinity 241
+#define __NR_sched_getaffinity 242
+#define __NR_set_thread_area 243
+#define __NR_get_thread_area 244
+#define __NR_io_setup 245
+#define __NR_io_destroy 246
+#define __NR_io_getevents 247
+#define __NR_io_submit 248
+#define __NR_io_cancel 249
+#define __NR_fadvise64 250
+/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
+#define __NR_exit_group 252
+#define __NR_lookup_dcookie 253
+#define __NR_epoll_create 254
+#define __NR_epoll_ctl 255
+#define __NR_epoll_wait 256
+#define __NR_remap_file_pages 257
+#define __NR_set_tid_address 258
+#define __NR_timer_create 259
+#define __NR_timer_settime (__NR_timer_create+1)
+#define __NR_timer_gettime (__NR_timer_create+2)
+#define __NR_timer_getoverrun (__NR_timer_create+3)
+#define __NR_timer_delete (__NR_timer_create+4)
+#define __NR_clock_settime (__NR_timer_create+5)
+#define __NR_clock_gettime (__NR_timer_create+6)
+#define __NR_clock_getres (__NR_timer_create+7)
+#define __NR_clock_nanosleep (__NR_timer_create+8)
+#define __NR_statfs64 268
+#define __NR_fstatfs64 269
+#define __NR_tgkill 270
+#define __NR_utimes 271
+#define __NR_fadvise64_64 272
+#define __NR_vserver 273
+#define __NR_mbind 274
+#define __NR_get_mempolicy 275
+#define __NR_set_mempolicy 276
+#define __NR_mq_open 277
+#define __NR_mq_unlink (__NR_mq_open+1)
+#define __NR_mq_timedsend (__NR_mq_open+2)
+#define __NR_mq_timedreceive (__NR_mq_open+3)
+#define __NR_mq_notify (__NR_mq_open+4)
+#define __NR_mq_getsetattr (__NR_mq_open+5)
+#define __NR_kexec_load 283
+#define __NR_waitid 284
+/* #define __NR_sys_setaltroot 285 */
+#define __NR_add_key 286
+#define __NR_request_key 287
+#define __NR_keyctl 288
+#define __NR_ioprio_set 289
+#define __NR_ioprio_get 290
+#define __NR_inotify_init 291
+#define __NR_inotify_add_watch 292
+#define __NR_inotify_rm_watch 293
+#define __NR_migrate_pages 294
+#define __NR_openat 295
+#define __NR_mkdirat 296
+#define __NR_mknodat 297
+#define __NR_fchownat 298
+#define __NR_futimesat 299
+#define __NR_fstatat64 300
+#define __NR_unlinkat 301
+#define __NR_renameat 302
+#define __NR_linkat 303
+#define __NR_symlinkat 304
+#define __NR_readlinkat 305
+#define __NR_fchmodat 306
+#define __NR_faccessat 307
+#define __NR_pselect6 308
+#define __NR_ppoll 309
+#define __NR_unshare 310
+#define __NR_set_robust_list 311
+#define __NR_get_robust_list 312
+#define __NR_splice 313
+#define __NR_sync_file_range 314
+#define __NR_tee 315
+#define __NR_vmsplice 316
+#define __NR_move_pages 317
+#define __NR_getcpu 318
+#define __NR_epoll_pwait 319
+#define __NR_utimensat 320
+#define __NR_signalfd 321
+#define __NR_timerfd_create 322
+#define __NR_eventfd 323
+#define __NR_fallocate 324
+#define __NR_timerfd_settime 325
+#define __NR_timerfd_gettime 326
+#define __NR_signalfd4 327
+#define __NR_eventfd2 328
+#define __NR_epoll_create1 329
+#define __NR_dup3 330
+#define __NR_pipe2 331
+#define __NR_inotify_init1 332
+#define __NR_preadv 333
+#define __NR_pwritev 334
+#define __NR_setns 335
+
+#endif /* _UAPI_ASM_CRIS_UNISTD_H_ */
diff --git a/arch/cris/kernel/asm-offsets.c b/arch/cris/kernel/asm-offsets.c
index dd7b8e98322..a5fd88d816a 100644
--- a/arch/cris/kernel/asm-offsets.c
+++ b/arch/cris/kernel/asm-offsets.c
@@ -1,3 +1,4 @@
+#include <linux/kbuild.h>
#include <linux/sched.h>
#include <asm/thread_info.h>
@@ -7,11 +8,6 @@
* and format the required data.
*/
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
#if !defined(CONFIG_ETRAX_ARCH_V10) && !defined(CONFIG_ETRAX_ARCH_V32)
#error One of ARCH v10 and ARCH v32 must be true!
#endif
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index 37400f5869e..51123f985eb 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -32,8 +32,6 @@
#ifdef CONFIG_ETRAX_KMALLOCED_MODULES
void *module_alloc(unsigned long size)
{
- if (size == 0)
- return NULL;
return kmalloc(size, GFP_KERNEL);
}
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index df2eb4bd9fa..9d262645f66 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -12,8 +12,6 @@ config FRV
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CPU_DEVICES
select ARCH_WANT_IPC_PARSE_VERSION
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 1807d8ea8cb..d685da17f5f 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -29,7 +29,6 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index b8993c87d3d..3cb3392f799 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -804,9 +804,9 @@ void __init setup_arch(char **cmdline_p)
BUG_ON(memory_start == memory_end);
- init_mm.start_code = (unsigned long) &_stext;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = (unsigned long) &_edata;
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
#if 0 /* DAVIDM - don't set brk just incase someone decides to use it */
init_mm.brk = (unsigned long) &_end;
#else
@@ -814,10 +814,8 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef DEBUG
- printk("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x BSS=0x%06x-0x%06x\n",
- (int) &_stext, (int) &_etext,
- (int) &_sdata, (int) &_edata,
- (int) &_sbss, (int) &_ebss);
+ printk("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
+ _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
#endif
#ifdef CONFIG_VT
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index a19effcccb3..92e97b0894a 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -146,7 +146,7 @@ void __init mem_init(void)
#else
codek = (_etext - _stext) >> 10;
- datak = 0; //(_ebss - _sdata) >> 10;
+ datak = 0; //(__bss_stop - _sdata) >> 10;
#endif
tmp = nr_free_pages() << PAGE_SHIFT;
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 4fb63a36bd5..f6084bc524e 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -77,7 +77,7 @@ void __set_pmd(pmd_t *pmdptr, unsigned long pmd)
* checks at dup_mmap(), exec(), and other mmlist addition points
* could be used. The locking scheme was chosen on the basis of
* manfred's recommendations and having no core impact whatsoever.
- * -- wli
+ * -- nyc
*/
DEFINE_SPINLOCK(pgd_lock);
struct page *pgd_list;
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 04bef4d25b4..2d2efb653ee 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -3,13 +3,12 @@ config H8300
default y
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
+ select GENERIC_ATOMIC64
select HAVE_UID16
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
config SYMBOL_PREFIX
string
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 4bc8ae73e08..995eb47e01b 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,6 +1,6 @@
-include include/asm-generic/Kbuild.asm
generic-y += clkdev.h
generic-y += exec.h
+generic-y += mmu.h
generic-y += module.h
generic-y += trace_clock.h
diff --git a/arch/h8300/include/asm/mmu.h b/arch/h8300/include/asm/mmu.h
deleted file mode 100644
index 31309969df7..00000000000
--- a/arch/h8300/include/asm/mmu.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __MMU_H
-#define __MMU_H
-
-/* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
-
-typedef struct {
- unsigned long end_brk;
-} mm_context_t;
-
-#endif
diff --git a/arch/h8300/include/asm/param.h b/arch/h8300/include/asm/param.h
index 1c72fb8080f..c3909e7ff17 100644
--- a/arch/h8300/include/asm/param.h
+++ b/arch/h8300/include/asm/param.h
@@ -1,20 +1,9 @@
#ifndef _H8300_PARAM_H
#define _H8300_PARAM_H
-#ifdef __KERNEL__
+#include <uapi/asm/param.h>
+
#define HZ CONFIG_HZ
#define USER_HZ HZ
#define CLOCKS_PER_SEC (USER_HZ)
-#else
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
#endif /* _H8300_PARAM_H */
diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h
index 7468589a128..c1826b95c5c 100644
--- a/arch/h8300/include/asm/ptrace.h
+++ b/arch/h8300/include/asm/ptrace.h
@@ -1,46 +1,11 @@
#ifndef _H8300_PTRACE_H
#define _H8300_PTRACE_H
-#ifndef __ASSEMBLY__
-
-#define PT_ER1 0
-#define PT_ER2 1
-#define PT_ER3 2
-#define PT_ER4 3
-#define PT_ER5 4
-#define PT_ER6 5
-#define PT_ER0 6
-#define PT_ORIG_ER0 7
-#define PT_CCR 8
-#define PT_PC 9
-#define PT_USP 10
-#define PT_EXR 12
-
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
+#include <uapi/asm/ptrace.h>
-struct pt_regs {
- long retpc;
- long er4;
- long er5;
- long er6;
- long er3;
- long er2;
- long er1;
- long orig_er0;
- unsigned short ccr;
- long er0;
- long vector;
+#ifndef __ASSEMBLY__
#if defined(CONFIG_CPU_H8S)
- unsigned short exr;
#endif
- unsigned long pc;
-} __attribute__((aligned(2),packed));
-
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-
-#ifdef __KERNEL__
#ifndef PS_S
#define PS_S (0x10)
#endif
@@ -63,6 +28,6 @@ struct pt_regs {
#define current_pt_regs() ((struct pt_regs *) \
(THREAD_SIZE + (unsigned long)current_thread_info()) - 1)
#define signal_pt_regs() ((struct pt_regs *)current->thread.esp0)
-#endif /* __KERNEL__ */
+#define current_user_stack_pointer() rdusp()
#endif /* __ASSEMBLY__ */
#endif /* _H8300_PTRACE_H */
diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
index c43c0a7d2c2..66c81c67e55 100644
--- a/arch/h8300/include/asm/signal.h
+++ b/arch/h8300/include/asm/signal.h
@@ -1,12 +1,8 @@
#ifndef _H8300_SIGNAL_H
#define _H8300_SIGNAL_H
-#include <linux/types.h>
+#include <uapi/asm/signal.h>
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
@@ -20,94 +16,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
-#ifdef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
@@ -125,35 +33,8 @@ struct sigaction {
struct k_sigaction {
struct sigaction sa;
};
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
-#endif /* __KERNEL__ */
-
#endif /* _H8300_SIGNAL_H */
diff --git a/arch/h8300/include/asm/termios.h b/arch/h8300/include/asm/termios.h
index 70eea64b421..93a63df5624 100644
--- a/arch/h8300/include/asm/termios.h
+++ b/arch/h8300/include/asm/termios.h
@@ -1,27 +1,8 @@
#ifndef _H8300_TERMIOS_H
#define _H8300_TERMIOS_H
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
+#include <uapi/asm/termios.h>
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-#ifdef __KERNEL__
/* intr=^C quit=^| erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
@@ -29,27 +10,6 @@ struct termio {
eol2=\0
*/
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-#endif
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
/*
* Translate a "termio" structure into a "termios". Ugh.
@@ -87,6 +47,4 @@ struct termio {
#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif /* __KERNEL__ */
-
#endif /* _H8300_TERMIOS_H */
diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h
index 07257d9487d..c012707f603 100644
--- a/arch/h8300/include/asm/types.h
+++ b/arch/h8300/include/asm/types.h
@@ -1,12 +1,9 @@
#ifndef _H8300_TYPES_H
#define _H8300_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
-#ifdef __KERNEL__
#define BITS_PER_LONG 32
-#endif /* __KERNEL__ */
-
#endif /* _H8300_TYPES_H */
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
index c2c2f5c7d6b..aa38105959f 100644
--- a/arch/h8300/include/asm/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
@@ -1,333 +1,8 @@
#ifndef _ASM_H8300_UNISTD_H_
#define _ASM_H8300_UNISTD_H_
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86old 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_vm86 166
-#define __NR_query_module 167
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_chown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-#define __NR_getpmsg 188 /* some people actually want streams */
-#define __NR_putpmsg 189 /* some people actually want streams */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-#define __NR_madvise1 219
-#define __NR_getdents64 220
-#define __NR_fcntl64 221
-/* 223 is unused */
-#define __NR_gettid 224
-#define __NR_readahead 225
-#define __NR_setxattr 226
-#define __NR_lsetxattr 227
-#define __NR_fsetxattr 228
-#define __NR_getxattr 229
-#define __NR_lgetxattr 230
-#define __NR_fgetxattr 231
-#define __NR_listxattr 232
-#define __NR_llistxattr 233
-#define __NR_flistxattr 234
-#define __NR_removexattr 235
-#define __NR_lremovexattr 236
-#define __NR_fremovexattr 237
-#define __NR_tkill 238
-#define __NR_sendfile64 239
-#define __NR_futex 240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area 243
-#define __NR_get_thread_area 244
-#define __NR_io_setup 245
-#define __NR_io_destroy 246
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-#define __NR_io_cancel 249
-#define __NR_fadvise64 250
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group 252
-#define __NR_lookup_dcookie 253
-#define __NR_epoll_create 254
-#define __NR_epoll_ctl 255
-#define __NR_epoll_wait 256
-#define __NR_remap_file_pages 257
-#define __NR_set_tid_address 258
-#define __NR_timer_create 259
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 268
-#define __NR_fstatfs64 269
-#define __NR_tgkill 270
-#define __NR_utimes 271
-#define __NR_fadvise64_64 272
-#define __NR_vserver 273
-#define __NR_mbind 274
-#define __NR_get_mempolicy 275
-#define __NR_set_mempolicy 276
-#define __NR_mq_open 277
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
-#define __NR_kexec_load 283
-#define __NR_waitid 284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key 286
-#define __NR_request_key 287
-#define __NR_keyctl 288
-#define __NR_ioprio_set 289
-#define __NR_ioprio_get 290
-#define __NR_inotify_init 291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch 293
-#define __NR_migrate_pages 294
-#define __NR_openat 295
-#define __NR_mkdirat 296
-#define __NR_mknodat 297
-#define __NR_fchownat 298
-#define __NR_futimesat 299
-#define __NR_fstatat64 300
-#define __NR_unlinkat 301
-#define __NR_renameat 302
-#define __NR_linkat 303
-#define __NR_symlinkat 304
-#define __NR_readlinkat 305
-#define __NR_fchmodat 306
-#define __NR_faccessat 307
-#define __NR_pselect6 308
-#define __NR_ppoll 309
-#define __NR_unshare 310
-#define __NR_set_robust_list 311
-#define __NR_get_robust_list 312
-#define __NR_splice 313
-#define __NR_sync_file_range 314
-#define __NR_tee 315
-#define __NR_vmsplice 316
-#define __NR_move_pages 317
-#define __NR_getcpu 318
-#define __NR_epoll_pwait 319
-#define __NR_setns 320
+#include <uapi/asm/unistd.h>
-#ifdef __KERNEL__
#define NR_syscalls 321
@@ -356,7 +31,6 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
@@ -368,5 +42,4 @@
asm (".weak\t_" #name "\n" \
".set\t_" #name ",_sys_ni_syscall");
-#endif /* __KERNEL__ */
#endif /* _ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index baebb3da1d4..040178cdb3e 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,3 +1,34 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += byteorder.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += kvm_para.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += unistd.h
diff --git a/arch/h8300/include/asm/auxvec.h b/arch/h8300/include/uapi/asm/auxvec.h
index 1d36fe38b08..1d36fe38b08 100644
--- a/arch/h8300/include/asm/auxvec.h
+++ b/arch/h8300/include/uapi/asm/auxvec.h
diff --git a/arch/h8300/include/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h
index 6dc0bb0c13b..6dc0bb0c13b 100644
--- a/arch/h8300/include/asm/bitsperlong.h
+++ b/arch/h8300/include/uapi/asm/bitsperlong.h
diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/uapi/asm/byteorder.h
index 13539da99ef..13539da99ef 100644
--- a/arch/h8300/include/asm/byteorder.h
+++ b/arch/h8300/include/uapi/asm/byteorder.h
diff --git a/arch/h8300/include/asm/errno.h b/arch/h8300/include/uapi/asm/errno.h
index 0c2f5641fdc..0c2f5641fdc 100644
--- a/arch/h8300/include/asm/errno.h
+++ b/arch/h8300/include/uapi/asm/errno.h
diff --git a/arch/h8300/include/asm/fcntl.h b/arch/h8300/include/uapi/asm/fcntl.h
index 1952cb2e3b0..1952cb2e3b0 100644
--- a/arch/h8300/include/asm/fcntl.h
+++ b/arch/h8300/include/uapi/asm/fcntl.h
diff --git a/arch/h8300/include/asm/ioctl.h b/arch/h8300/include/uapi/asm/ioctl.h
index b279fe06dfe..b279fe06dfe 100644
--- a/arch/h8300/include/asm/ioctl.h
+++ b/arch/h8300/include/uapi/asm/ioctl.h
diff --git a/arch/h8300/include/asm/ioctls.h b/arch/h8300/include/uapi/asm/ioctls.h
index 30eaed2facd..30eaed2facd 100644
--- a/arch/h8300/include/asm/ioctls.h
+++ b/arch/h8300/include/uapi/asm/ioctls.h
diff --git a/arch/h8300/include/asm/ipcbuf.h b/arch/h8300/include/uapi/asm/ipcbuf.h
index 84c7e51cb6d..84c7e51cb6d 100644
--- a/arch/h8300/include/asm/ipcbuf.h
+++ b/arch/h8300/include/uapi/asm/ipcbuf.h
diff --git a/arch/blackfin/include/asm/kvm_para.h b/arch/h8300/include/uapi/asm/kvm_para.h
index 14fab8f0b95..14fab8f0b95 100644
--- a/arch/blackfin/include/asm/kvm_para.h
+++ b/arch/h8300/include/uapi/asm/kvm_para.h
diff --git a/arch/h8300/include/asm/mman.h b/arch/h8300/include/uapi/asm/mman.h
index 8eebf89f5ab..8eebf89f5ab 100644
--- a/arch/h8300/include/asm/mman.h
+++ b/arch/h8300/include/uapi/asm/mman.h
diff --git a/arch/h8300/include/asm/msgbuf.h b/arch/h8300/include/uapi/asm/msgbuf.h
index 6b148cd09aa..6b148cd09aa 100644
--- a/arch/h8300/include/asm/msgbuf.h
+++ b/arch/h8300/include/uapi/asm/msgbuf.h
diff --git a/arch/h8300/include/uapi/asm/param.h b/arch/h8300/include/uapi/asm/param.h
new file mode 100644
index 00000000000..3dd18ae15f0
--- /dev/null
+++ b/arch/h8300/include/uapi/asm/param.h
@@ -0,0 +1,16 @@
+#ifndef _UAPI_H8300_PARAM_H
+#define _UAPI_H8300_PARAM_H
+
+#ifndef __KERNEL__
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _UAPI_H8300_PARAM_H */
diff --git a/arch/h8300/include/asm/poll.h b/arch/h8300/include/uapi/asm/poll.h
index f61540c22d9..f61540c22d9 100644
--- a/arch/h8300/include/asm/poll.h
+++ b/arch/h8300/include/uapi/asm/poll.h
diff --git a/arch/h8300/include/asm/posix_types.h b/arch/h8300/include/uapi/asm/posix_types.h
index 91e62ba4c7b..91e62ba4c7b 100644
--- a/arch/h8300/include/asm/posix_types.h
+++ b/arch/h8300/include/uapi/asm/posix_types.h
diff --git a/arch/h8300/include/uapi/asm/ptrace.h b/arch/h8300/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..ef39ec5977b
--- /dev/null
+++ b/arch/h8300/include/uapi/asm/ptrace.h
@@ -0,0 +1,44 @@
+#ifndef _UAPI_H8300_PTRACE_H
+#define _UAPI_H8300_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+#define PT_ER1 0
+#define PT_ER2 1
+#define PT_ER3 2
+#define PT_ER4 3
+#define PT_ER5 4
+#define PT_ER6 5
+#define PT_ER0 6
+#define PT_ORIG_ER0 7
+#define PT_CCR 8
+#define PT_PC 9
+#define PT_USP 10
+#define PT_EXR 12
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long retpc;
+ long er4;
+ long er5;
+ long er6;
+ long er3;
+ long er2;
+ long er1;
+ long orig_er0;
+ unsigned short ccr;
+ long er0;
+ long vector;
+#if defined(CONFIG_CPU_H8S)
+ unsigned short exr;
+#endif
+ unsigned long pc;
+} __attribute__((aligned(2),packed));
+
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+
+#endif /* __ASSEMBLY__ */
+#endif /* _UAPI_H8300_PTRACE_H */
diff --git a/arch/h8300/include/asm/resource.h b/arch/h8300/include/uapi/asm/resource.h
index 46c5f439160..46c5f439160 100644
--- a/arch/h8300/include/asm/resource.h
+++ b/arch/h8300/include/uapi/asm/resource.h
diff --git a/arch/h8300/include/asm/sembuf.h b/arch/h8300/include/uapi/asm/sembuf.h
index e04a3ec0cb9..e04a3ec0cb9 100644
--- a/arch/h8300/include/asm/sembuf.h
+++ b/arch/h8300/include/uapi/asm/sembuf.h
diff --git a/arch/h8300/include/asm/setup.h b/arch/h8300/include/uapi/asm/setup.h
index e2c600e9673..e2c600e9673 100644
--- a/arch/h8300/include/asm/setup.h
+++ b/arch/h8300/include/uapi/asm/setup.h
diff --git a/arch/h8300/include/asm/shmbuf.h b/arch/h8300/include/uapi/asm/shmbuf.h
index 64e77993a7a..64e77993a7a 100644
--- a/arch/h8300/include/asm/shmbuf.h
+++ b/arch/h8300/include/uapi/asm/shmbuf.h
diff --git a/arch/h8300/include/asm/sigcontext.h b/arch/h8300/include/uapi/asm/sigcontext.h
index e4b81505f8f..e4b81505f8f 100644
--- a/arch/h8300/include/asm/sigcontext.h
+++ b/arch/h8300/include/uapi/asm/sigcontext.h
diff --git a/arch/h8300/include/asm/siginfo.h b/arch/h8300/include/uapi/asm/siginfo.h
index bc8fbea931a..bc8fbea931a 100644
--- a/arch/h8300/include/asm/siginfo.h
+++ b/arch/h8300/include/uapi/asm/siginfo.h
diff --git a/arch/h8300/include/uapi/asm/signal.h b/arch/h8300/include/uapi/asm/signal.h
new file mode 100644
index 00000000000..af3a6c37fee
--- /dev/null
+++ b/arch/h8300/include/uapi/asm/signal.h
@@ -0,0 +1,115 @@
+#ifndef _UAPI_H8300_SIGNAL_H
+#define _UAPI_H8300_SIGNAL_H
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+
+#endif /* _UAPI_H8300_SIGNAL_H */
diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
index 90a2e573c7e..90a2e573c7e 100644
--- a/arch/h8300/include/asm/socket.h
+++ b/arch/h8300/include/uapi/asm/socket.h
diff --git a/arch/h8300/include/asm/sockios.h b/arch/h8300/include/uapi/asm/sockios.h
index e9c7ec810c2..e9c7ec810c2 100644
--- a/arch/h8300/include/asm/sockios.h
+++ b/arch/h8300/include/uapi/asm/sockios.h
diff --git a/arch/h8300/include/asm/stat.h b/arch/h8300/include/uapi/asm/stat.h
index 62c3cc24dfe..62c3cc24dfe 100644
--- a/arch/h8300/include/asm/stat.h
+++ b/arch/h8300/include/uapi/asm/stat.h
diff --git a/arch/h8300/include/asm/statfs.h b/arch/h8300/include/uapi/asm/statfs.h
index b96efa712aa..b96efa712aa 100644
--- a/arch/h8300/include/asm/statfs.h
+++ b/arch/h8300/include/uapi/asm/statfs.h
diff --git a/arch/h8300/include/asm/swab.h b/arch/h8300/include/uapi/asm/swab.h
index 39abbf52807..39abbf52807 100644
--- a/arch/h8300/include/asm/swab.h
+++ b/arch/h8300/include/uapi/asm/swab.h
diff --git a/arch/h8300/include/asm/termbits.h b/arch/h8300/include/uapi/asm/termbits.h
index 3287a6244d7..3287a6244d7 100644
--- a/arch/h8300/include/asm/termbits.h
+++ b/arch/h8300/include/uapi/asm/termbits.h
diff --git a/arch/h8300/include/uapi/asm/termios.h b/arch/h8300/include/uapi/asm/termios.h
new file mode 100644
index 00000000000..5a67d7e3884
--- /dev/null
+++ b/arch/h8300/include/uapi/asm/termios.h
@@ -0,0 +1,44 @@
+#ifndef _UAPI_H8300_TERMIOS_H
+#define _UAPI_H8300_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+
+#endif /* _UAPI_H8300_TERMIOS_H */
diff --git a/arch/h8300/include/uapi/asm/types.h b/arch/h8300/include/uapi/asm/types.h
new file mode 100644
index 00000000000..9ec9d4c5ac4
--- /dev/null
+++ b/arch/h8300/include/uapi/asm/types.h
@@ -0,0 +1 @@
+#include <asm-generic/int-ll64.h>
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..8cb5d429f84
--- /dev/null
+++ b/arch/h8300/include/uapi/asm/unistd.h
@@ -0,0 +1,330 @@
+#ifndef _UAPI_ASM_H8300_UNISTD_H_
+#define _UAPI_ASM_H8300_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86old 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_vm86 166
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188 /* some people actually want streams */
+#define __NR_putpmsg 189 /* some people actually want streams */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+#define __NR_madvise1 219
+#define __NR_getdents64 220
+#define __NR_fcntl64 221
+/* 223 is unused */
+#define __NR_gettid 224
+#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
+#define __NR_tkill 238
+#define __NR_sendfile64 239
+#define __NR_futex 240
+#define __NR_sched_setaffinity 241
+#define __NR_sched_getaffinity 242
+#define __NR_set_thread_area 243
+#define __NR_get_thread_area 244
+#define __NR_io_setup 245
+#define __NR_io_destroy 246
+#define __NR_io_getevents 247
+#define __NR_io_submit 248
+#define __NR_io_cancel 249
+#define __NR_fadvise64 250
+/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
+#define __NR_exit_group 252
+#define __NR_lookup_dcookie 253
+#define __NR_epoll_create 254
+#define __NR_epoll_ctl 255
+#define __NR_epoll_wait 256
+#define __NR_remap_file_pages 257
+#define __NR_set_tid_address 258
+#define __NR_timer_create 259
+#define __NR_timer_settime (__NR_timer_create+1)
+#define __NR_timer_gettime (__NR_timer_create+2)
+#define __NR_timer_getoverrun (__NR_timer_create+3)
+#define __NR_timer_delete (__NR_timer_create+4)
+#define __NR_clock_settime (__NR_timer_create+5)
+#define __NR_clock_gettime (__NR_timer_create+6)
+#define __NR_clock_getres (__NR_timer_create+7)
+#define __NR_clock_nanosleep (__NR_timer_create+8)
+#define __NR_statfs64 268
+#define __NR_fstatfs64 269
+#define __NR_tgkill 270
+#define __NR_utimes 271
+#define __NR_fadvise64_64 272
+#define __NR_vserver 273
+#define __NR_mbind 274
+#define __NR_get_mempolicy 275
+#define __NR_set_mempolicy 276
+#define __NR_mq_open 277
+#define __NR_mq_unlink (__NR_mq_open+1)
+#define __NR_mq_timedsend (__NR_mq_open+2)
+#define __NR_mq_timedreceive (__NR_mq_open+3)
+#define __NR_mq_notify (__NR_mq_open+4)
+#define __NR_mq_getsetattr (__NR_mq_open+5)
+#define __NR_kexec_load 283
+#define __NR_waitid 284
+/* #define __NR_sys_setaltroot 285 */
+#define __NR_add_key 286
+#define __NR_request_key 287
+#define __NR_keyctl 288
+#define __NR_ioprio_set 289
+#define __NR_ioprio_get 290
+#define __NR_inotify_init 291
+#define __NR_inotify_add_watch 292
+#define __NR_inotify_rm_watch 293
+#define __NR_migrate_pages 294
+#define __NR_openat 295
+#define __NR_mkdirat 296
+#define __NR_mknodat 297
+#define __NR_fchownat 298
+#define __NR_futimesat 299
+#define __NR_fstatat64 300
+#define __NR_unlinkat 301
+#define __NR_renameat 302
+#define __NR_linkat 303
+#define __NR_symlinkat 304
+#define __NR_readlinkat 305
+#define __NR_fchmodat 306
+#define __NR_faccessat 307
+#define __NR_pselect6 308
+#define __NR_ppoll 309
+#define __NR_unshare 310
+#define __NR_set_robust_list 311
+#define __NR_get_robust_list 312
+#define __NR_splice 313
+#define __NR_sync_file_range 314
+#define __NR_tee 315
+#define __NR_vmsplice 316
+#define __NR_move_pages 317
+#define __NR_getcpu 318
+#define __NR_epoll_pwait 319
+#define __NR_setns 320
+
+#endif /* _UAPI_ASM_H8300_UNISTD_H_ */
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index e418803b6c8..0744f7d7b1f 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -31,8 +31,6 @@ config HEXAGON
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
index 2af81533bd0..4a87cc47075 100644
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -27,7 +27,6 @@
*/
#define sys_mmap2 sys_mmap_pgoff
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#include <asm-generic/unistd.h>
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 67060046812..3279646120e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -42,8 +42,6 @@ config IA64
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 4f5e8148440..cf3ab7e784b 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -58,6 +58,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
{
struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ debug_dma_mapping_error(dev, daddr);
return ops->mapping_error(dev, daddr);
}
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index b0e973649cb..845143990a1 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -78,6 +78,11 @@ static inline long regs_return_value(struct pt_regs *regs)
unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
})
+/*
+ * Why not default? Because user_stack_pointer() on ia64 gives register
+ * stack backing store instead...
+ */
+#define current_user_stack_pointer() (current_pt_regs()->r12)
/* given a pointer to a task_struct, return the user's pt_regs */
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 1574bca8613..8b3ff2f5b86 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -29,7 +29,6 @@
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h
index e531c424434..c0ea2855e96 100644
--- a/arch/ia64/include/uapi/asm/signal.h
+++ b/arch/ia64/include/uapi/asm/signal.h
@@ -79,12 +79,6 @@
#define SA_RESTORER 0x04000000
/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-/*
* The minimum stack size needs to be fairly large because we want to
* be sure that an app compiled for today's CPUs will continue to run
* on all future CPU models. The CPU model matters because the signal
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 8b3a9c0e771..bd1c5155503 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1,5 +1,5 @@
/*
- * kvm_ia64.c: Basic KVM suppport On Itanium series processors
+ * kvm_ia64.c: Basic KVM support On Itanium series processors
*
*
* Copyright (C) 2007, Intel Corporation.
@@ -1330,6 +1330,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
@@ -1362,11 +1367,9 @@ static void kvm_release_vm_pages(struct kvm *kvm)
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int j;
- unsigned long base_gfn;
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
- base_gfn = memslot->base_gfn;
for (j = 0; j < memslot->npages; j++) {
if (memslot->rmap[j])
put_page((struct page *)memslot->rmap[j]);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 5183f43a2cf..f807721e19a 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -15,8 +15,6 @@ config M32R
select GENERIC_ATOMIC64
select ARCH_USES_GETTIMEOFFSET
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
config SBUS
bool
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 4bc8ae73e08..bebdc36ebb0 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -1,4 +1,3 @@
-include include/asm-generic/Kbuild.asm
generic-y += clkdev.h
generic-y += exec.h
diff --git a/arch/m32r/include/asm/ptrace.h b/arch/m32r/include/asm/ptrace.h
index c4432f1fb2c..fa58ccfff86 100644
--- a/arch/m32r/include/asm/ptrace.h
+++ b/arch/m32r/include/asm/ptrace.h
@@ -1,6 +1,3 @@
-#ifndef _ASM_M32R_PTRACE_H
-#define _ASM_M32R_PTRACE_H
-
/*
* linux/include/asm-m32r/ptrace.h
*
@@ -11,111 +8,12 @@
* M32R version:
* Copyright (C) 2001-2002, 2004 Hirokazu Takata <takata at linux-m32r.org>
*/
+#ifndef _ASM_M32R_PTRACE_H
+#define _ASM_M32R_PTRACE_H
-/* 0 - 13 are integer registers (general purpose registers). */
-#define PT_R4 0
-#define PT_R5 1
-#define PT_R6 2
-#define PT_REGS 3
-#define PT_R0 4
-#define PT_R1 5
-#define PT_R2 6
-#define PT_R3 7
-#define PT_R7 8
-#define PT_R8 9
-#define PT_R9 10
-#define PT_R10 11
-#define PT_R11 12
-#define PT_R12 13
-#define PT_SYSCNR 14
-#define PT_R13 PT_FP
-#define PT_R14 PT_LR
-#define PT_R15 PT_SP
-
-/* processor status and miscellaneous context registers. */
-#define PT_ACC0H 15
-#define PT_ACC0L 16
-#define PT_ACC1H 17 /* ISA_DSP_LEVEL2 only */
-#define PT_ACC1L 18 /* ISA_DSP_LEVEL2 only */
-#define PT_PSW 19
-#define PT_BPC 20
-#define PT_BBPSW 21
-#define PT_BBPC 22
-#define PT_SPU 23
-#define PT_FP 24
-#define PT_LR 25
-#define PT_SPI 26
-#define PT_ORIGR0 27
-
-/* virtual pt_reg entry for gdb */
-#define PT_PC 30
-#define PT_CBR 31
-#define PT_EVB 32
-
-
-/* Control registers. */
-#define SPR_CR0 PT_PSW
-#define SPR_CR1 PT_CBR /* read only */
-#define SPR_CR2 PT_SPI
-#define SPR_CR3 PT_SPU
-#define SPR_CR4
-#define SPR_CR5 PT_EVB /* part of M32R/E, M32R/I core only */
-#define SPR_CR6 PT_BPC
-#define SPR_CR7
-#define SPR_CR8 PT_BBPSW
-#define SPR_CR9
-#define SPR_CR10
-#define SPR_CR11
-#define SPR_CR12
-#define SPR_CR13 PT_WR
-#define SPR_CR14 PT_BBPC
-#define SPR_CR15
-
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-struct pt_regs {
- /* Saved main processor registers. */
- unsigned long r4;
- unsigned long r5;
- unsigned long r6;
- struct pt_regs *pt_regs;
- unsigned long r0;
- unsigned long r1;
- unsigned long r2;
- unsigned long r3;
- unsigned long r7;
- unsigned long r8;
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- long syscall_nr;
-
- /* Saved main processor status and miscellaneous context registers. */
- unsigned long acc0h;
- unsigned long acc0l;
- unsigned long acc1h; /* ISA_DSP_LEVEL2 only */
- unsigned long acc1l; /* ISA_DSP_LEVEL2 only */
- unsigned long psw;
- unsigned long bpc; /* saved PC for TRAP syscalls */
- unsigned long bbpsw;
- unsigned long bbpc;
- unsigned long spu; /* saved user stack */
- unsigned long fp;
- unsigned long lr; /* saved PC for JL syscalls */
- unsigned long spi; /* saved kernel stack */
- unsigned long orig_r0;
-};
-
-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
-
-#define PTRACE_OLDSETOPTIONS 21
-
-#ifdef __KERNEL__
#include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */
+#include <uapi/asm/ptrace.h>
#define arch_has_single_step() (1)
@@ -134,6 +32,7 @@ extern void init_debug_traps(struct task_struct *);
#define instruction_pointer(regs) ((regs)->bpc)
#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->spu)
extern void withdraw_debug_trap(struct pt_regs *regs);
@@ -142,6 +41,4 @@ extern void withdraw_debug_trap(struct pt_regs *regs);
#define current_pt_regs() ((struct pt_regs *) \
((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
-#endif /* __KERNEL */
-
#endif /* _ASM_M32R_PTRACE_H */
diff --git a/arch/m32r/include/asm/setup.h b/arch/m32r/include/asm/setup.h
index c637ab99239..bbe59a9ce8c 100644
--- a/arch/m32r/include/asm/setup.h
+++ b/arch/m32r/include/asm/setup.h
@@ -1,13 +1,8 @@
#ifndef _ASM_M32R_SETUP_H
#define _ASM_M32R_SETUP_H
-/*
- * This is set up by the setup-routine at boot-time
- */
+#include <uapi/asm/setup.h>
-#define COMMAND_LINE_SIZE 512
-
-#ifdef __KERNEL__
#define PARAM ((unsigned char *)empty_zero_page)
@@ -33,6 +28,4 @@
extern unsigned long memory_start;
extern unsigned long memory_end;
-#endif /* __KERNEL__ */
-
#endif /* _ASM_M32R_SETUP_H */
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
index e4d2e2ad5f1..a5ba4a217fb 100644
--- a/arch/m32r/include/asm/signal.h
+++ b/arch/m32r/include/asm/signal.h
@@ -1,14 +1,8 @@
#ifndef _ASM_M32R_SIGNAL_H
#define _ASM_M32R_SIGNAL_H
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/compiler.h>
+#include <uapi/asm/signal.h>
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
@@ -22,94 +16,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001u
-#define SA_NOCLDWAIT 0x00000002u
-#define SA_SIGINFO 0x00000004u
-#define SA_ONSTACK 0x08000000u
-#define SA_RESTART 0x10000000u
-#define SA_NODEFER 0x40000000u
-#define SA_RESETHAND 0x80000000u
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
-#ifdef __KERNEL__
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
@@ -120,35 +26,8 @@ struct sigaction {
struct k_sigaction {
struct sigaction sa;
};
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
-#endif /* __KERNEL__ */
-
#endif /* _ASM_M32R_SIGNAL_H */
diff --git a/arch/m32r/include/asm/termios.h b/arch/m32r/include/asm/termios.h
index 93ce79fd342..680898f0b3d 100644
--- a/arch/m32r/include/asm/termios.h
+++ b/arch/m32r/include/asm/termios.h
@@ -1,46 +1,8 @@
#ifndef _M32R_TERMIOS_H
#define _M32R_TERMIOS_H
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
#include <linux/module.h>
+#include <uapi/asm/termios.h>
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
@@ -86,6 +48,4 @@ struct termio {
#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif /* __KERNEL__ */
-
#endif /* _M32R_TERMIOS_H */
diff --git a/arch/m32r/include/asm/types.h b/arch/m32r/include/asm/types.h
index bb2eeadecf9..04a44c6ee34 100644
--- a/arch/m32r/include/asm/types.h
+++ b/arch/m32r/include/asm/types.h
@@ -1,15 +1,12 @@
#ifndef _ASM_M32R_TYPES_H
#define _ASM_M32R_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
-#ifdef __KERNEL__
#define BITS_PER_LONG 32
-#endif /* __KERNEL__ */
-
#endif /* _ASM_M32R_TYPES_H */
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index d9e7351af2a..79b063caec8 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -1,338 +1,8 @@
#ifndef _ASM_M32R_UNISTD_H
#define _ASM_M32R_UNISTD_H
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-/* 16 is unused */
-/* 17 is unused */
-/* 18 is unused */
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-/* 23 is unused */
-/* 24 is unused */
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-/* 28 is unused */
-#define __NR_pause 29
-#define __NR_utime 30
-/* 31 is unused */
-#define __NR_cachectl 32 /* old #define __NR_gtty 32*/
-#define __NR_access 33
-/* 34 is unused */
-/* 35 is unused */
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-/* 44 is unused */
-#define __NR_brk 45
-/* 46 is unused */
-/* 47 is unused (getgid16) */
-/* 48 is unused */
-/* 49 is unused */
-/* 50 is unused */
-#define __NR_acct 51
-#define __NR_umount2 52
-/* 53 is unused */
-#define __NR_ioctl 54
-/* 55 is unused (fcntl) */
-/* 56 is unused */
-#define __NR_setpgid 57
-/* 58 is unused */
-/* 59 is unused */
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-/* 67 is unused */
-/* 68 is unused*/
-/* 69 is unused*/
-/* 70 is unused */
-/* 71 is unused */
-/* 72 is unused */
-/* 73 is unused */
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-/* 76 is unused (old getrlimit) */
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-/* 80 is unused */
-/* 81 is unused */
-/* 82 is unused */
-#define __NR_symlink 83
-/* 84 is unused */
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-/* 89 is unused */
-/* 90 is unused */
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-/* 95 is unused */
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-/* 98 is unused */
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-/* 101 is unused */
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-/* 109 is unused */
-/* 110 is unused */
-#define __NR_vhangup 111
-/* 112 is unused */
-/* 113 is unused */
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-/* 119 is unused */
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-/* 123 is unused */
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-/* 126 is unused */
-/* 127 is unused */
-#define __NR_init_module 128
-#define __NR_delete_module 129
-/* 130 is unused */
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-/* 137 is unused */
-/* 138 is unused */
-/* 139 is unused */
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-/* 164 is unused */
-/* 165 is unused */
-#define __NR_tas 166
-/* 167 is unused */
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-/* 170 is unused */
-/* 171 is unused */
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-/* 182 is unused */
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-/* 188 is unused */
-/* 189 is unused */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-#define __NR_getdents64 220
-#define __NR_fcntl64 221
-/* 222 is unused */
-/* 223 is unused */
-#define __NR_gettid 224
-#define __NR_readahead 225
-#define __NR_setxattr 226
-#define __NR_lsetxattr 227
-#define __NR_fsetxattr 228
-#define __NR_getxattr 229
-#define __NR_lgetxattr 230
-#define __NR_fgetxattr 231
-#define __NR_listxattr 232
-#define __NR_llistxattr 233
-#define __NR_flistxattr 234
-#define __NR_removexattr 235
-#define __NR_lremovexattr 236
-#define __NR_fremovexattr 237
-#define __NR_tkill 238
-#define __NR_sendfile64 239
-#define __NR_futex 240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area 243
-#define __NR_get_thread_area 244
-#define __NR_io_setup 245
-#define __NR_io_destroy 246
-#define __NR_io_getevents 247
-#define __NR_io_submit 248
-#define __NR_io_cancel 249
-#define __NR_fadvise64 250
-/* 251 is unused */
-#define __NR_exit_group 252
-#define __NR_lookup_dcookie 253
-#define __NR_epoll_create 254
-#define __NR_epoll_ctl 255
-#define __NR_epoll_wait 256
-#define __NR_remap_file_pages 257
-#define __NR_set_tid_address 258
-#define __NR_timer_create 259
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 268
-#define __NR_fstatfs64 269
-#define __NR_tgkill 270
-#define __NR_utimes 271
-#define __NR_fadvise64_64 272
-#define __NR_vserver 273
-#define __NR_mbind 274
-#define __NR_get_mempolicy 275
-#define __NR_set_mempolicy 276
-#define __NR_mq_open 277
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
-#define __NR_kexec_load 283
-#define __NR_waitid 284
-/* 285 is unused */
-#define __NR_add_key 286
-#define __NR_request_key 287
-#define __NR_keyctl 288
-#define __NR_ioprio_set 289
-#define __NR_ioprio_get 290
-#define __NR_inotify_init 291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch 293
-#define __NR_migrate_pages 294
-#define __NR_openat 295
-#define __NR_mkdirat 296
-#define __NR_mknodat 297
-#define __NR_fchownat 298
-#define __NR_futimesat 299
-#define __NR_fstatat64 300
-#define __NR_unlinkat 301
-#define __NR_renameat 302
-#define __NR_linkat 303
-#define __NR_symlinkat 304
-#define __NR_readlinkat 305
-#define __NR_fchmodat 306
-#define __NR_faccessat 307
-#define __NR_pselect6 308
-#define __NR_ppoll 309
-#define __NR_unshare 310
-#define __NR_set_robust_list 311
-#define __NR_get_robust_list 312
-#define __NR_splice 313
-#define __NR_sync_file_range 314
-#define __NR_tee 315
-#define __NR_vmsplice 316
-#define __NR_move_pages 317
-#define __NR_getcpu 318
-#define __NR_epoll_pwait 319
-#define __NR_utimensat 320
-#define __NR_signalfd 321
-/* #define __NR_timerfd 322 removed */
-#define __NR_eventfd 323
-#define __NR_fallocate 324
-#define __NR_setns 325
+#include <uapi/asm/unistd.h>
-#ifdef __KERNEL__
#define NR_syscalls 326
@@ -352,7 +22,6 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
@@ -391,5 +60,4 @@
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
#endif
-#endif /* __KERNEL__ */
#endif /* _ASM_M32R_UNISTD_H */
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index baebb3da1d4..43937a61d6c 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -1,3 +1,33 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += byteorder.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += unistd.h
diff --git a/arch/m32r/include/asm/auxvec.h b/arch/m32r/include/uapi/asm/auxvec.h
index f76dcc860fa..f76dcc860fa 100644
--- a/arch/m32r/include/asm/auxvec.h
+++ b/arch/m32r/include/uapi/asm/auxvec.h
diff --git a/arch/m32r/include/asm/bitsperlong.h b/arch/m32r/include/uapi/asm/bitsperlong.h
index 6dc0bb0c13b..6dc0bb0c13b 100644
--- a/arch/m32r/include/asm/bitsperlong.h
+++ b/arch/m32r/include/uapi/asm/bitsperlong.h
diff --git a/arch/m32r/include/asm/byteorder.h b/arch/m32r/include/uapi/asm/byteorder.h
index 21855d8b028..21855d8b028 100644
--- a/arch/m32r/include/asm/byteorder.h
+++ b/arch/m32r/include/uapi/asm/byteorder.h
diff --git a/arch/m32r/include/asm/errno.h b/arch/m32r/include/uapi/asm/errno.h
index 777149262aa..777149262aa 100644
--- a/arch/m32r/include/asm/errno.h
+++ b/arch/m32r/include/uapi/asm/errno.h
diff --git a/arch/m32r/include/asm/fcntl.h b/arch/m32r/include/uapi/asm/fcntl.h
index 46ab12db573..46ab12db573 100644
--- a/arch/m32r/include/asm/fcntl.h
+++ b/arch/m32r/include/uapi/asm/fcntl.h
diff --git a/arch/m32r/include/asm/ioctl.h b/arch/m32r/include/uapi/asm/ioctl.h
index b279fe06dfe..b279fe06dfe 100644
--- a/arch/m32r/include/asm/ioctl.h
+++ b/arch/m32r/include/uapi/asm/ioctl.h
diff --git a/arch/m32r/include/asm/ioctls.h b/arch/m32r/include/uapi/asm/ioctls.h
index 349bf87bfbd..349bf87bfbd 100644
--- a/arch/m32r/include/asm/ioctls.h
+++ b/arch/m32r/include/uapi/asm/ioctls.h
diff --git a/arch/m32r/include/asm/ipcbuf.h b/arch/m32r/include/uapi/asm/ipcbuf.h
index 84c7e51cb6d..84c7e51cb6d 100644
--- a/arch/m32r/include/asm/ipcbuf.h
+++ b/arch/m32r/include/uapi/asm/ipcbuf.h
diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/uapi/asm/mman.h
index 8eebf89f5ab..8eebf89f5ab 100644
--- a/arch/m32r/include/asm/mman.h
+++ b/arch/m32r/include/uapi/asm/mman.h
diff --git a/arch/m32r/include/asm/msgbuf.h b/arch/m32r/include/uapi/asm/msgbuf.h
index 0d5a877b813..0d5a877b813 100644
--- a/arch/m32r/include/asm/msgbuf.h
+++ b/arch/m32r/include/uapi/asm/msgbuf.h
diff --git a/arch/m32r/include/asm/param.h b/arch/m32r/include/uapi/asm/param.h
index fa207bdf96e..fa207bdf96e 100644
--- a/arch/m32r/include/asm/param.h
+++ b/arch/m32r/include/uapi/asm/param.h
diff --git a/arch/m32r/include/asm/poll.h b/arch/m32r/include/uapi/asm/poll.h
index c98509d3149..c98509d3149 100644
--- a/arch/m32r/include/asm/poll.h
+++ b/arch/m32r/include/uapi/asm/poll.h
diff --git a/arch/m32r/include/asm/posix_types.h b/arch/m32r/include/uapi/asm/posix_types.h
index 236de26a409..236de26a409 100644
--- a/arch/m32r/include/asm/posix_types.h
+++ b/arch/m32r/include/uapi/asm/posix_types.h
diff --git a/arch/m32r/include/uapi/asm/ptrace.h b/arch/m32r/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..f6930a82251
--- /dev/null
+++ b/arch/m32r/include/uapi/asm/ptrace.h
@@ -0,0 +1,117 @@
+/*
+ * linux/include/asm-m32r/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * M32R version:
+ * Copyright (C) 2001-2002, 2004 Hirokazu Takata <takata at linux-m32r.org>
+ */
+#ifndef _UAPI_ASM_M32R_PTRACE_H
+#define _UAPI_ASM_M32R_PTRACE_H
+
+
+/* 0 - 13 are integer registers (general purpose registers). */
+#define PT_R4 0
+#define PT_R5 1
+#define PT_R6 2
+#define PT_REGS 3
+#define PT_R0 4
+#define PT_R1 5
+#define PT_R2 6
+#define PT_R3 7
+#define PT_R7 8
+#define PT_R8 9
+#define PT_R9 10
+#define PT_R10 11
+#define PT_R11 12
+#define PT_R12 13
+#define PT_SYSCNR 14
+#define PT_R13 PT_FP
+#define PT_R14 PT_LR
+#define PT_R15 PT_SP
+
+/* processor status and miscellaneous context registers. */
+#define PT_ACC0H 15
+#define PT_ACC0L 16
+#define PT_ACC1H 17 /* ISA_DSP_LEVEL2 only */
+#define PT_ACC1L 18 /* ISA_DSP_LEVEL2 only */
+#define PT_PSW 19
+#define PT_BPC 20
+#define PT_BBPSW 21
+#define PT_BBPC 22
+#define PT_SPU 23
+#define PT_FP 24
+#define PT_LR 25
+#define PT_SPI 26
+#define PT_ORIGR0 27
+
+/* virtual pt_reg entry for gdb */
+#define PT_PC 30
+#define PT_CBR 31
+#define PT_EVB 32
+
+
+/* Control registers. */
+#define SPR_CR0 PT_PSW
+#define SPR_CR1 PT_CBR /* read only */
+#define SPR_CR2 PT_SPI
+#define SPR_CR3 PT_SPU
+#define SPR_CR4
+#define SPR_CR5 PT_EVB /* part of M32R/E, M32R/I core only */
+#define SPR_CR6 PT_BPC
+#define SPR_CR7
+#define SPR_CR8 PT_BBPSW
+#define SPR_CR9
+#define SPR_CR10
+#define SPR_CR11
+#define SPR_CR12
+#define SPR_CR13 PT_WR
+#define SPR_CR14 PT_BBPC
+#define SPR_CR15
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+struct pt_regs {
+ /* Saved main processor registers. */
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ struct pt_regs *pt_regs;
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ long syscall_nr;
+
+ /* Saved main processor status and miscellaneous context registers. */
+ unsigned long acc0h;
+ unsigned long acc0l;
+ unsigned long acc1h; /* ISA_DSP_LEVEL2 only */
+ unsigned long acc1l; /* ISA_DSP_LEVEL2 only */
+ unsigned long psw;
+ unsigned long bpc; /* saved PC for TRAP syscalls */
+ unsigned long bbpsw;
+ unsigned long bbpc;
+ unsigned long spu; /* saved user stack */
+ unsigned long fp;
+ unsigned long lr; /* saved PC for JL syscalls */
+ unsigned long spi; /* saved kernel stack */
+ unsigned long orig_r0;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+
+#define PTRACE_OLDSETOPTIONS 21
+
+
+#endif /* _UAPI_ASM_M32R_PTRACE_H */
diff --git a/arch/m32r/include/asm/resource.h b/arch/m32r/include/uapi/asm/resource.h
index b1ce766e37a..b1ce766e37a 100644
--- a/arch/m32r/include/asm/resource.h
+++ b/arch/m32r/include/uapi/asm/resource.h
diff --git a/arch/m32r/include/asm/sembuf.h b/arch/m32r/include/uapi/asm/sembuf.h
index c9873d6890e..c9873d6890e 100644
--- a/arch/m32r/include/asm/sembuf.h
+++ b/arch/m32r/include/uapi/asm/sembuf.h
diff --git a/arch/m32r/include/uapi/asm/setup.h b/arch/m32r/include/uapi/asm/setup.h
new file mode 100644
index 00000000000..96961a42e5f
--- /dev/null
+++ b/arch/m32r/include/uapi/asm/setup.h
@@ -0,0 +1,11 @@
+#ifndef _UAPI_ASM_M32R_SETUP_H
+#define _UAPI_ASM_M32R_SETUP_H
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+
+#define COMMAND_LINE_SIZE 512
+
+
+#endif /* _UAPI_ASM_M32R_SETUP_H */
diff --git a/arch/m32r/include/asm/shmbuf.h b/arch/m32r/include/uapi/asm/shmbuf.h
index b0cdf0aa7d6..b0cdf0aa7d6 100644
--- a/arch/m32r/include/asm/shmbuf.h
+++ b/arch/m32r/include/uapi/asm/shmbuf.h
diff --git a/arch/m32r/include/asm/sigcontext.h b/arch/m32r/include/uapi/asm/sigcontext.h
index da4a9c36d09..da4a9c36d09 100644
--- a/arch/m32r/include/asm/sigcontext.h
+++ b/arch/m32r/include/uapi/asm/sigcontext.h
diff --git a/arch/m32r/include/asm/siginfo.h b/arch/m32r/include/uapi/asm/siginfo.h
index 7d9cd9ebfd0..7d9cd9ebfd0 100644
--- a/arch/m32r/include/asm/siginfo.h
+++ b/arch/m32r/include/uapi/asm/siginfo.h
diff --git a/arch/m32r/include/uapi/asm/signal.h b/arch/m32r/include/uapi/asm/signal.h
new file mode 100644
index 00000000000..54acacb1f1f
--- /dev/null
+++ b/arch/m32r/include/uapi/asm/signal.h
@@ -0,0 +1,117 @@
+#ifndef _UAPI_ASM_M32R_SIGNAL_H
+#define _UAPI_ASM_M32R_SIGNAL_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/compiler.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001u
+#define SA_NOCLDWAIT 0x00000002u
+#define SA_SIGINFO 0x00000004u
+#define SA_ONSTACK 0x08000000u
+#define SA_RESTART 0x10000000u
+#define SA_NODEFER 0x40000000u
+#define SA_RESETHAND 0x80000000u
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+
+#endif /* _UAPI_ASM_M32R_SIGNAL_H */
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 5e7088a2672..5e7088a2672 100644
--- a/arch/m32r/include/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
diff --git a/arch/m32r/include/asm/sockios.h b/arch/m32r/include/uapi/asm/sockios.h
index 6c1fb9b43bd..6c1fb9b43bd 100644
--- a/arch/m32r/include/asm/sockios.h
+++ b/arch/m32r/include/uapi/asm/sockios.h
diff --git a/arch/m32r/include/asm/stat.h b/arch/m32r/include/uapi/asm/stat.h
index da4518f82d6..da4518f82d6 100644
--- a/arch/m32r/include/asm/stat.h
+++ b/arch/m32r/include/uapi/asm/stat.h
diff --git a/arch/m32r/include/asm/statfs.h b/arch/m32r/include/uapi/asm/statfs.h
index 6eb4c6007e6..6eb4c6007e6 100644
--- a/arch/m32r/include/asm/statfs.h
+++ b/arch/m32r/include/uapi/asm/statfs.h
diff --git a/arch/m32r/include/asm/swab.h b/arch/m32r/include/uapi/asm/swab.h
index 54dab001d6d..54dab001d6d 100644
--- a/arch/m32r/include/asm/swab.h
+++ b/arch/m32r/include/uapi/asm/swab.h
diff --git a/arch/m32r/include/asm/termbits.h b/arch/m32r/include/uapi/asm/termbits.h
index 957a3c68854..957a3c68854 100644
--- a/arch/m32r/include/asm/termbits.h
+++ b/arch/m32r/include/uapi/asm/termbits.h
diff --git a/arch/m68k/include/uapi/asm/termios.h b/arch/m32r/include/uapi/asm/termios.h
index ce2142c9ac1..07ad27b8f7d 100644
--- a/arch/m68k/include/uapi/asm/termios.h
+++ b/arch/m32r/include/uapi/asm/termios.h
@@ -1,5 +1,5 @@
-#ifndef _UAPI_M68K_TERMIOS_H
-#define _UAPI_M68K_TERMIOS_H
+#ifndef _UAPI_M32R_TERMIOS_H
+#define _UAPI_M32R_TERMIOS_H
#include <asm/termbits.h>
#include <asm/ioctls.h>
@@ -21,7 +21,6 @@ struct termio {
unsigned char c_cc[NCC]; /* control characters */
};
-
/* modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002
@@ -41,4 +40,4 @@ struct termio {
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-#endif /* _UAPI_M68K_TERMIOS_H */
+#endif /* _UAPI_M32R_TERMIOS_H */
diff --git a/arch/m32r/include/uapi/asm/types.h b/arch/m32r/include/uapi/asm/types.h
new file mode 100644
index 00000000000..9ec9d4c5ac4
--- /dev/null
+++ b/arch/m32r/include/uapi/asm/types.h
@@ -0,0 +1 @@
+#include <asm-generic/int-ll64.h>
diff --git a/arch/m32r/include/uapi/asm/unistd.h b/arch/m32r/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..5a54f2ae3b5
--- /dev/null
+++ b/arch/m32r/include/uapi/asm/unistd.h
@@ -0,0 +1,335 @@
+#ifndef _UAPI_ASM_M32R_UNISTD_H
+#define _UAPI_ASM_M32R_UNISTD_H
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+/* 16 is unused */
+/* 17 is unused */
+/* 18 is unused */
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+/* 23 is unused */
+/* 24 is unused */
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+/* 28 is unused */
+#define __NR_pause 29
+#define __NR_utime 30
+/* 31 is unused */
+#define __NR_cachectl 32 /* old #define __NR_gtty 32*/
+#define __NR_access 33
+/* 34 is unused */
+/* 35 is unused */
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+/* 44 is unused */
+#define __NR_brk 45
+/* 46 is unused */
+/* 47 is unused (getgid16) */
+/* 48 is unused */
+/* 49 is unused */
+/* 50 is unused */
+#define __NR_acct 51
+#define __NR_umount2 52
+/* 53 is unused */
+#define __NR_ioctl 54
+/* 55 is unused (fcntl) */
+/* 56 is unused */
+#define __NR_setpgid 57
+/* 58 is unused */
+/* 59 is unused */
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+/* 67 is unused */
+/* 68 is unused*/
+/* 69 is unused*/
+/* 70 is unused */
+/* 71 is unused */
+/* 72 is unused */
+/* 73 is unused */
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+/* 76 is unused (old getrlimit) */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+/* 80 is unused */
+/* 81 is unused */
+/* 82 is unused */
+#define __NR_symlink 83
+/* 84 is unused */
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+/* 89 is unused */
+/* 90 is unused */
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+/* 95 is unused */
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+/* 98 is unused */
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+/* 101 is unused */
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+/* 109 is unused */
+/* 110 is unused */
+#define __NR_vhangup 111
+/* 112 is unused */
+/* 113 is unused */
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+/* 119 is unused */
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+/* 123 is unused */
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+/* 126 is unused */
+/* 127 is unused */
+#define __NR_init_module 128
+#define __NR_delete_module 129
+/* 130 is unused */
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+/* 137 is unused */
+/* 138 is unused */
+/* 139 is unused */
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+/* 164 is unused */
+/* 165 is unused */
+#define __NR_tas 166
+/* 167 is unused */
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+/* 170 is unused */
+/* 171 is unused */
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+/* 182 is unused */
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+/* 188 is unused */
+/* 189 is unused */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+#define __NR_getdents64 220
+#define __NR_fcntl64 221
+/* 222 is unused */
+/* 223 is unused */
+#define __NR_gettid 224
+#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
+#define __NR_tkill 238
+#define __NR_sendfile64 239
+#define __NR_futex 240
+#define __NR_sched_setaffinity 241
+#define __NR_sched_getaffinity 242
+#define __NR_set_thread_area 243
+#define __NR_get_thread_area 244
+#define __NR_io_setup 245
+#define __NR_io_destroy 246
+#define __NR_io_getevents 247
+#define __NR_io_submit 248
+#define __NR_io_cancel 249
+#define __NR_fadvise64 250
+/* 251 is unused */
+#define __NR_exit_group 252
+#define __NR_lookup_dcookie 253
+#define __NR_epoll_create 254
+#define __NR_epoll_ctl 255
+#define __NR_epoll_wait 256
+#define __NR_remap_file_pages 257
+#define __NR_set_tid_address 258
+#define __NR_timer_create 259
+#define __NR_timer_settime (__NR_timer_create+1)
+#define __NR_timer_gettime (__NR_timer_create+2)
+#define __NR_timer_getoverrun (__NR_timer_create+3)
+#define __NR_timer_delete (__NR_timer_create+4)
+#define __NR_clock_settime (__NR_timer_create+5)
+#define __NR_clock_gettime (__NR_timer_create+6)
+#define __NR_clock_getres (__NR_timer_create+7)
+#define __NR_clock_nanosleep (__NR_timer_create+8)
+#define __NR_statfs64 268
+#define __NR_fstatfs64 269
+#define __NR_tgkill 270
+#define __NR_utimes 271
+#define __NR_fadvise64_64 272
+#define __NR_vserver 273
+#define __NR_mbind 274
+#define __NR_get_mempolicy 275
+#define __NR_set_mempolicy 276
+#define __NR_mq_open 277
+#define __NR_mq_unlink (__NR_mq_open+1)
+#define __NR_mq_timedsend (__NR_mq_open+2)
+#define __NR_mq_timedreceive (__NR_mq_open+3)
+#define __NR_mq_notify (__NR_mq_open+4)
+#define __NR_mq_getsetattr (__NR_mq_open+5)
+#define __NR_kexec_load 283
+#define __NR_waitid 284
+/* 285 is unused */
+#define __NR_add_key 286
+#define __NR_request_key 287
+#define __NR_keyctl 288
+#define __NR_ioprio_set 289
+#define __NR_ioprio_get 290
+#define __NR_inotify_init 291
+#define __NR_inotify_add_watch 292
+#define __NR_inotify_rm_watch 293
+#define __NR_migrate_pages 294
+#define __NR_openat 295
+#define __NR_mkdirat 296
+#define __NR_mknodat 297
+#define __NR_fchownat 298
+#define __NR_futimesat 299
+#define __NR_fstatat64 300
+#define __NR_unlinkat 301
+#define __NR_renameat 302
+#define __NR_linkat 303
+#define __NR_symlinkat 304
+#define __NR_readlinkat 305
+#define __NR_fchmodat 306
+#define __NR_faccessat 307
+#define __NR_pselect6 308
+#define __NR_ppoll 309
+#define __NR_unshare 310
+#define __NR_set_robust_list 311
+#define __NR_get_robust_list 312
+#define __NR_splice 313
+#define __NR_sync_file_range 314
+#define __NR_tee 315
+#define __NR_vmsplice 316
+#define __NR_move_pages 317
+#define __NR_getcpu 318
+#define __NR_epoll_pwait 319
+#define __NR_utimensat 320
+#define __NR_signalfd 321
+/* #define __NR_timerfd 322 removed */
+#define __NR_eventfd 323
+#define __NR_fallocate 324
+#define __NR_setns 325
+
+#endif /* _UAPI_ASM_M32R_UNISTD_H */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 953a7ba5d05..6710084e072 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -15,8 +15,6 @@ config M68K
select FPU if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
index ffc0601a2a1..93ef0346b20 100644
--- a/arch/m68k/Kconfig.bus
+++ b/arch/m68k/Kconfig.bus
@@ -28,8 +28,8 @@ config ZORRO
Linux use these.
config AMIGA_PCMCIA
- bool "Amiga 1200/600 PCMCIA support (EXPERIMENTAL)"
- depends on AMIGA && EXPERIMENTAL
+ bool "Amiga 1200/600 PCMCIA support"
+ depends on AMIGA
help
Include support in the kernel for pcmcia on Amiga 1200 and Amiga
600. If you intend to use pcmcia cards say Y; otherwise say N.
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index c4eb79edece..b1cfff832fb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -35,7 +35,8 @@ endchoice
if M68KCLASSIC
config M68000
- bool
+ bool "MC68000"
+ depends on !MMU
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select CPU_HAS_NO_UNALIGNED
@@ -274,9 +275,8 @@ endif # COLDFIRE
comment "Processor Specific Options"
config M68KFPU_EMU
- bool "Math emulation support (EXPERIMENTAL)"
+ bool "Math emulation support"
depends on MMU
- depends on EXPERIMENTAL
help
At some point in the future, this will cause floating-point math
instructions to be emulated by the kernel on machines that lack a
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
index 87233acef18..fa12283d58f 100644
--- a/arch/m68k/Kconfig.debug
+++ b/arch/m68k/Kconfig.debug
@@ -41,7 +41,7 @@ config NO_KERNEL_MSG
config BDM_DISABLE
bool "Disable BDM signals"
- depends on (EXPERIMENTAL && COLDFIRE)
+ depends on COLDFIRE
help
Disable the ColdFire CPU's BDM signals.
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index 04a3d9be90e..c4cdfe444c6 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -60,8 +60,8 @@ endmenu
menu "Character devices"
config ATARI_DSP56K
- tristate "Atari DSP56k support (EXPERIMENTAL)"
- depends on ATARI && EXPERIMENTAL
+ tristate "Atari DSP56k support"
+ depends on ATARI
help
If you want to be able to use the DSP56001 in Falcons, say Y. This
driver is still experimental, and if you don't know what it is, or
@@ -87,7 +87,7 @@ config HPDCA
config HPAPCI
tristate "HP APCI serial support"
- depends on HP300 && SERIAL_8250 && EXPERIMENTAL
+ depends on HP300 && SERIAL_8250
help
If you want to use the internal "APCI" serial ports on an HP400
machine, say Y here.
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 7636751f2f8..2f02acfb8ed 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -92,7 +92,7 @@ endif
head-y := arch/m68k/kernel/head.o
head-$(CONFIG_SUN3) := arch/m68k/kernel/sun3-head.o
head-$(CONFIG_M68360) := arch/m68k/platform/68360/head.o
-head-$(CONFIG_M68000) := arch/m68k/platform/68328/head.o
+head-$(CONFIG_M68000) := arch/m68k/platform/68000/head.o
head-$(CONFIG_COLDFIRE) := arch/m68k/platform/coldfire/head.o
core-y += arch/m68k/kernel/ arch/m68k/mm/
@@ -114,9 +114,7 @@ core-$(CONFIG_M68040) += arch/m68k/fpsp040/
core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
core-$(CONFIG_M68360) += arch/m68k/platform/68360/
-core-$(CONFIG_M68000) += arch/m68k/platform/68328/
-core-$(CONFIG_M68EZ328) += arch/m68k/platform/68EZ328/
-core-$(CONFIG_M68VZ328) += arch/m68k/platform/68VZ328/
+core-$(CONFIG_M68000) += arch/m68k/platform/68000/
core-$(CONFIG_COLDFIRE) += arch/m68k/platform/coldfire/
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 7f1949c0e08..c7933e41f10 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += futex.h
+generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
@@ -21,8 +22,11 @@ generic-y += percpu.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
+generic-y += shmparam.h
generic-y += siginfo.h
+generic-y += spinlock.h
generic-y += statfs.h
+generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
diff --git a/arch/m68k/include/asm/hw_irq.h b/arch/m68k/include/asm/hw_irq.h
deleted file mode 100644
index eacef0951fb..00000000000
--- a/arch/m68k/include/asm/hw_irq.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_HW_IRQ_H
-#define __ASM_M68K_HW_IRQ_H
-
-/* Dummy include. */
-
-#endif
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h
deleted file mode 100644
index fdf45e6807c..00000000000
--- a/arch/m68k/include/asm/m5249sim.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/****************************************************************************/
-
-/*
- * m5249sim.h -- ColdFire 5249 System Integration Module support.
- *
- * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
- */
-
-/****************************************************************************/
-#ifndef m5249sim_h
-#define m5249sim_h
-/****************************************************************************/
-
-#define CPU_NAME "COLDFIRE(m5249)"
-#define CPU_INSTR_PER_JIFFY 3
-#define MCF_BUSCLK (MCF_CLK / 2)
-
-#include <asm/m52xxacr.h>
-
-/*
- * The 5249 has a second MBAR region, define its address.
- */
-#define MCF_MBAR2 0x80000000
-
-/*
- * Define the 5249 SIM register set addresses.
- */
-#define MCFSIM_RSR (MCF_MBAR + 0x00) /* Reset Status */
-#define MCFSIM_SYPCR (MCF_MBAR + 0x01) /* System Protection */
-#define MCFSIM_SWIVR (MCF_MBAR + 0x02) /* SW Watchdog intr */
-#define MCFSIM_SWSR (MCF_MBAR + 0x03) /* SW Watchdog srv */
-#define MCFSIM_PAR (MCF_MBAR + 0x04) /* Pin Assignment */
-#define MCFSIM_IRQPAR (MCF_MBAR + 0x06) /* Intr Assignment */
-#define MCFSIM_MPARK (MCF_MBAR + 0x0C) /* BUS Master Ctrl */
-#define MCFSIM_IPR (MCF_MBAR + 0x40) /* Interrupt Pending */
-#define MCFSIM_IMR (MCF_MBAR + 0x44) /* Interrupt Mask */
-#define MCFSIM_AVR (MCF_MBAR + 0x4b) /* Autovector Ctrl */
-#define MCFSIM_ICR0 (MCF_MBAR + 0x4c) /* Intr Ctrl reg 0 */
-#define MCFSIM_ICR1 (MCF_MBAR + 0x4d) /* Intr Ctrl reg 1 */
-#define MCFSIM_ICR2 (MCF_MBAR + 0x4e) /* Intr Ctrl reg 2 */
-#define MCFSIM_ICR3 (MCF_MBAR + 0x4f) /* Intr Ctrl reg 3 */
-#define MCFSIM_ICR4 (MCF_MBAR + 0x50) /* Intr Ctrl reg 4 */
-#define MCFSIM_ICR5 (MCF_MBAR + 0x51) /* Intr Ctrl reg 5 */
-#define MCFSIM_ICR6 (MCF_MBAR + 0x52) /* Intr Ctrl reg 6 */
-#define MCFSIM_ICR7 (MCF_MBAR + 0x53) /* Intr Ctrl reg 7 */
-#define MCFSIM_ICR8 (MCF_MBAR + 0x54) /* Intr Ctrl reg 8 */
-#define MCFSIM_ICR9 (MCF_MBAR + 0x55) /* Intr Ctrl reg 9 */
-#define MCFSIM_ICR10 (MCF_MBAR + 0x56) /* Intr Ctrl reg 10 */
-#define MCFSIM_ICR11 (MCF_MBAR + 0x57) /* Intr Ctrl reg 11 */
-
-#define MCFSIM_CSAR0 (MCF_MBAR + 0x80) /* CS 0 Address reg */
-#define MCFSIM_CSMR0 (MCF_MBAR + 0x84) /* CS 0 Mask reg */
-#define MCFSIM_CSCR0 (MCF_MBAR + 0x8a) /* CS 0 Control reg */
-#define MCFSIM_CSAR1 (MCF_MBAR + 0x8c) /* CS 1 Address reg */
-#define MCFSIM_CSMR1 (MCF_MBAR + 0x90) /* CS 1 Mask reg */
-#define MCFSIM_CSCR1 (MCF_MBAR + 0x96) /* CS 1 Control reg */
-#define MCFSIM_CSAR2 (MCF_MBAR + 0x98) /* CS 2 Address reg */
-#define MCFSIM_CSMR2 (MCF_MBAR + 0x9c) /* CS 2 Mask reg */
-#define MCFSIM_CSCR2 (MCF_MBAR + 0xa2) /* CS 2 Control reg */
-#define MCFSIM_CSAR3 (MCF_MBAR + 0xa4) /* CS 3 Address reg */
-#define MCFSIM_CSMR3 (MCF_MBAR + 0xa8) /* CS 3 Mask reg */
-#define MCFSIM_CSCR3 (MCF_MBAR + 0xae) /* CS 3 Control reg */
-
-#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */
-#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */
-#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */
-#define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */
-#define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */
-
-/*
- * Timer module.
- */
-#define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */
-#define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */
-
-/*
- * UART module.
- */
-#define MCFUART_BASE0 (MCF_MBAR + 0x1c0) /* Base address UART0 */
-#define MCFUART_BASE1 (MCF_MBAR + 0x200) /* Base address UART1 */
-
-/*
- * QSPI module.
- */
-#define MCFQSPI_BASE (MCF_MBAR + 0x300) /* Base address QSPI */
-#define MCFQSPI_SIZE 0x40 /* Register set size */
-
-#define MCFQSPI_CS0 29
-#define MCFQSPI_CS1 24
-#define MCFQSPI_CS2 21
-#define MCFQSPI_CS3 22
-
-/*
- * DMA unit base addresses.
- */
-#define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */
-#define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */
-#define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */
-#define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */
-
-/*
- * Some symbol defines for the above...
- */
-#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
-#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
-#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
-#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
-#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
-#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
-#define MCFSIM_DMA1ICR MCFSIM_ICR7 /* DMA 1 ICR */
-#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
-#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
-#define MCFSIM_QSPIICR MCFSIM_ICR10 /* QSPI ICR */
-
-/*
- * Define system peripheral IRQ usage.
- */
-#define MCF_IRQ_QSPI 28 /* QSPI, Level 4 */
-#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
-#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
-
-#define MCF_IRQ_UART0 73 /* UART0 */
-#define MCF_IRQ_UART1 74 /* UART1 */
-
-/*
- * General purpose IO registers (in MBAR2).
- */
-#define MCFSIM2_GPIOREAD (MCF_MBAR2 + 0x000) /* GPIO read values */
-#define MCFSIM2_GPIOWRITE (MCF_MBAR2 + 0x004) /* GPIO write values */
-#define MCFSIM2_GPIOENABLE (MCF_MBAR2 + 0x008) /* GPIO enabled */
-#define MCFSIM2_GPIOFUNC (MCF_MBAR2 + 0x00C) /* GPIO function */
-#define MCFSIM2_GPIO1READ (MCF_MBAR2 + 0x0B0) /* GPIO1 read values */
-#define MCFSIM2_GPIO1WRITE (MCF_MBAR2 + 0x0B4) /* GPIO1 write values */
-#define MCFSIM2_GPIO1ENABLE (MCF_MBAR2 + 0x0B8) /* GPIO1 enabled */
-#define MCFSIM2_GPIO1FUNC (MCF_MBAR2 + 0x0BC) /* GPIO1 function */
-
-#define MCFSIM2_GPIOINTSTAT (MCF_MBAR2 + 0xc0) /* GPIO intr status */
-#define MCFSIM2_GPIOINTCLEAR (MCF_MBAR2 + 0xc0) /* GPIO intr clear */
-#define MCFSIM2_GPIOINTENABLE (MCF_MBAR2 + 0xc4) /* GPIO intr enable */
-
-#define MCFSIM2_INTLEVEL1 (MCF_MBAR2 + 0x140) /* Intr level reg 1 */
-#define MCFSIM2_INTLEVEL2 (MCF_MBAR2 + 0x144) /* Intr level reg 2 */
-#define MCFSIM2_INTLEVEL3 (MCF_MBAR2 + 0x148) /* Intr level reg 3 */
-#define MCFSIM2_INTLEVEL4 (MCF_MBAR2 + 0x14c) /* Intr level reg 4 */
-#define MCFSIM2_INTLEVEL5 (MCF_MBAR2 + 0x150) /* Intr level reg 5 */
-#define MCFSIM2_INTLEVEL6 (MCF_MBAR2 + 0x154) /* Intr level reg 6 */
-#define MCFSIM2_INTLEVEL7 (MCF_MBAR2 + 0x158) /* Intr level reg 7 */
-#define MCFSIM2_INTLEVEL8 (MCF_MBAR2 + 0x15c) /* Intr level reg 8 */
-
-#define MCFSIM2_DMAROUTE (MCF_MBAR2 + 0x188) /* DMA routing */
-
-#define MCFSIM2_IDECONFIG1 (MCF_MBAR2 + 0x18c) /* IDEconfig1 */
-#define MCFSIM2_IDECONFIG2 (MCF_MBAR2 + 0x190) /* IDEconfig2 */
-
-/*
- * Define the base interrupt for the second interrupt controller.
- * We set it to 128, out of the way of the base interrupts, and plenty
- * of room for its 64 interrupts.
- */
-#define MCFINTC2_VECBASE 128
-
-#define MCFINTC2_GPIOIRQ0 (MCFINTC2_VECBASE + 32)
-#define MCFINTC2_GPIOIRQ1 (MCFINTC2_VECBASE + 33)
-#define MCFINTC2_GPIOIRQ2 (MCFINTC2_VECBASE + 34)
-#define MCFINTC2_GPIOIRQ3 (MCFINTC2_VECBASE + 35)
-#define MCFINTC2_GPIOIRQ4 (MCFINTC2_VECBASE + 36)
-#define MCFINTC2_GPIOIRQ5 (MCFINTC2_VECBASE + 37)
-#define MCFINTC2_GPIOIRQ6 (MCFINTC2_VECBASE + 38)
-#define MCFINTC2_GPIOIRQ7 (MCFINTC2_VECBASE + 39)
-
-/*
- * Generic GPIO support
- */
-#define MCFGPIO_PIN_MAX 64
-#define MCFGPIO_IRQ_MAX -1
-#define MCFGPIO_IRQ_VECBASE -1
-
-/****************************************************************************/
-
-#ifdef __ASSEMBLER__
-
-/*
- * The M5249C3 board needs a little help getting all its SIM devices
- * initialized at kernel start time. dBUG doesn't set much up, so
- * we need to do it manually.
- */
-.macro m5249c3_setup
- /*
- * Set MBAR1 and MBAR2, just incase they are not set.
- */
- movel #0x10000001,%a0
- movec %a0,%MBAR /* map MBAR region */
- subql #1,%a0 /* get MBAR address in a0 */
-
- movel #0x80000001,%a1
- movec %a1,#3086 /* map MBAR2 region */
- subql #1,%a1 /* get MBAR2 address in a1 */
-
- /*
- * Move secondary interrupts to their base (128).
- */
- moveb #MCFINTC2_VECBASE,%d0
- moveb %d0,0x16b(%a1) /* interrupt base register */
-
- /*
- * Work around broken CSMR0/DRAM vector problem.
- */
- movel #0x001F0021,%d0 /* disable C/I bit */
- movel %d0,0x84(%a0) /* set CSMR0 */
-
- /*
- * Disable the PLL firstly. (Who knows what state it is
- * in here!).
- */
- movel 0x180(%a1),%d0 /* get current PLL value */
- andl #0xfffffffe,%d0 /* PLL bypass first */
- movel %d0,0x180(%a1) /* set PLL register */
- nop
-
-#if CONFIG_CLOCK_FREQ == 140000000
- /*
- * Set initial clock frequency. This assumes M5249C3 board
- * is fitted with 11.2896MHz crystal. It will program the
- * PLL for 140MHz. Lets go fast :-)
- */
- movel #0x125a40f0,%d0 /* set for 140MHz */
- movel %d0,0x180(%a1) /* set PLL register */
- orl #0x1,%d0
- movel %d0,0x180(%a1) /* set PLL register */
-#endif
-
- /*
- * Setup CS1 for ethernet controller.
- * (Setup as per M5249C3 doco).
- */
- movel #0xe0000000,%d0 /* CS1 mapped at 0xe0000000 */
- movel %d0,0x8c(%a0)
- movel #0x001f0021,%d0 /* CS1 size of 1Mb */
- movel %d0,0x90(%a0)
- movew #0x0080,%d0 /* CS1 = 16bit port, AA */
- movew %d0,0x96(%a0)
-
- /*
- * Setup CS2 for IDE interface.
- */
- movel #0x50000000,%d0 /* CS2 mapped at 0x50000000 */
- movel %d0,0x98(%a0)
- movel #0x001f0001,%d0 /* CS2 size of 1MB */
- movel %d0,0x9c(%a0)
- movew #0x0080,%d0 /* CS2 = 16bit, TA */
- movew %d0,0xa2(%a0)
-
- movel #0x00107000,%d0 /* IDEconfig1 */
- movel %d0,0x18c(%a1)
- movel #0x000c0400,%d0 /* IDEconfig2 */
- movel %d0,0x190(%a1)
-
- movel #0x00080000,%d0 /* GPIO19, IDE reset bit */
- orl %d0,0xc(%a1) /* function GPIO19 */
- orl %d0,0x8(%a1) /* enable GPIO19 as output */
- orl %d0,0x4(%a1) /* de-assert IDE reset */
-.endm
-
-#define PLATFORM_SETUP m5249c3_setup
-
-#endif /* __ASSEMBLER__ */
-
-/****************************************************************************/
-#endif /* m5249sim_h */
diff --git a/arch/m68k/include/asm/m525xsim.h b/arch/m68k/include/asm/m525xsim.h
index acab61cb91e..e33f5bb6aca 100644
--- a/arch/m68k/include/asm/m525xsim.h
+++ b/arch/m68k/include/asm/m525xsim.h
@@ -12,6 +12,11 @@
#define m525xsim_h
/****************************************************************************/
+/*
+ * This header supports ColdFire 5249, 5251 and 5253. There are a few
+ * little differences between them, but most of the peripheral support
+ * can be used by all of them.
+ */
#define CPU_NAME "COLDFIRE(m525x)"
#define CPU_INSTR_PER_JIFFY 3
#define MCF_BUSCLK (MCF_CLK / 2)
@@ -65,6 +70,8 @@
#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */
#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */
#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */
+#define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */
+#define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */
/*
* Secondary Interrupt Controller (in MBAR2)
@@ -101,11 +108,17 @@
#define MCFQSPI_BASE (MCF_MBAR + 0x300) /* Base address QSPI */
#define MCFQSPI_SIZE 0x40 /* Register set size */
-
+#ifdef CONFIG_M5249
+#define MCFQSPI_CS0 29
+#define MCFQSPI_CS1 24
+#define MCFQSPI_CS2 21
+#define MCFQSPI_CS3 22
+#else
#define MCFQSPI_CS0 15
#define MCFQSPI_CS1 16
#define MCFQSPI_CS2 24
#define MCFQSPI_CS3 28
+#endif
/*
* I2C module.
@@ -115,6 +128,7 @@
#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base addreess I2C1 */
#define MCFI2C_SIZE1 0x20 /* Register set size */
+
/*
* DMA unit base addresses.
*/
@@ -163,6 +177,7 @@
#define MCF_IRQ_GPIO4 (MCFINTC2_VECBASE + 36)
#define MCF_IRQ_GPIO5 (MCFINTC2_VECBASE + 37)
#define MCF_IRQ_GPIO6 (MCFINTC2_VECBASE + 38)
+#define MCF_IRQ_GPIO7 (MCFINTC2_VECBASE + 39)
#define MCF_IRQ_USBWUP (MCFINTC2_VECBASE + 40)
#define MCF_IRQ_I2C1 (MCFINTC2_VECBASE + 62)
@@ -183,12 +198,111 @@
#define MCFSIM2_GPIOINTCLEAR (MCF_MBAR2 + 0xc0) /* GPIO intr clear */
#define MCFSIM2_GPIOINTENABLE (MCF_MBAR2 + 0xc4) /* GPIO intr enable */
+#define MCFSIM2_DMAROUTE (MCF_MBAR2 + 0x188) /* DMA routing */
+#define MCFSIM2_IDECONFIG1 (MCF_MBAR2 + 0x18c) /* IDEconfig1 */
+#define MCFSIM2_IDECONFIG2 (MCF_MBAR2 + 0x190) /* IDEconfig2 */
+
/*
* Generic GPIO support
*/
#define MCFGPIO_PIN_MAX 64
+#ifdef CONFIG_M5249
+#define MCFGPIO_IRQ_MAX -1
+#define MCFGPIO_IRQ_VECBASE -1
+#else
#define MCFGPIO_IRQ_MAX 7
#define MCFGPIO_IRQ_VECBASE MCF_IRQ_GPIO0
+#endif
+
+/****************************************************************************/
+
+#ifdef __ASSEMBLER__
+#ifdef CONFIG_M5249C3
+/*
+ * The M5249C3 board needs a little help getting all its SIM devices
+ * initialized at kernel start time. dBUG doesn't set much up, so
+ * we need to do it manually.
+ */
+.macro m5249c3_setup
+ /*
+ * Set MBAR1 and MBAR2, just incase they are not set.
+ */
+ movel #0x10000001,%a0
+ movec %a0,%MBAR /* map MBAR region */
+ subql #1,%a0 /* get MBAR address in a0 */
+
+ movel #0x80000001,%a1
+ movec %a1,#3086 /* map MBAR2 region */
+ subql #1,%a1 /* get MBAR2 address in a1 */
+
+ /*
+ * Move secondary interrupts to their base (128).
+ */
+ moveb #MCFINTC2_VECBASE,%d0
+ moveb %d0,0x16b(%a1) /* interrupt base register */
+
+ /*
+ * Work around broken CSMR0/DRAM vector problem.
+ */
+ movel #0x001F0021,%d0 /* disable C/I bit */
+ movel %d0,0x84(%a0) /* set CSMR0 */
+
+ /*
+ * Disable the PLL firstly. (Who knows what state it is
+ * in here!).
+ */
+ movel 0x180(%a1),%d0 /* get current PLL value */
+ andl #0xfffffffe,%d0 /* PLL bypass first */
+ movel %d0,0x180(%a1) /* set PLL register */
+ nop
+
+#if CONFIG_CLOCK_FREQ == 140000000
+ /*
+ * Set initial clock frequency. This assumes M5249C3 board
+ * is fitted with 11.2896MHz crystal. It will program the
+ * PLL for 140MHz. Lets go fast :-)
+ */
+ movel #0x125a40f0,%d0 /* set for 140MHz */
+ movel %d0,0x180(%a1) /* set PLL register */
+ orl #0x1,%d0
+ movel %d0,0x180(%a1) /* set PLL register */
+#endif
+
+ /*
+ * Setup CS1 for ethernet controller.
+ * (Setup as per M5249C3 doco).
+ */
+ movel #0xe0000000,%d0 /* CS1 mapped at 0xe0000000 */
+ movel %d0,0x8c(%a0)
+ movel #0x001f0021,%d0 /* CS1 size of 1Mb */
+ movel %d0,0x90(%a0)
+ movew #0x0080,%d0 /* CS1 = 16bit port, AA */
+ movew %d0,0x96(%a0)
+
+ /*
+ * Setup CS2 for IDE interface.
+ */
+ movel #0x50000000,%d0 /* CS2 mapped at 0x50000000 */
+ movel %d0,0x98(%a0)
+ movel #0x001f0001,%d0 /* CS2 size of 1MB */
+ movel %d0,0x9c(%a0)
+ movew #0x0080,%d0 /* CS2 = 16bit, TA */
+ movew %d0,0xa2(%a0)
+
+ movel #0x00107000,%d0 /* IDEconfig1 */
+ movel %d0,0x18c(%a1)
+ movel #0x000c0400,%d0 /* IDEconfig2 */
+ movel %d0,0x190(%a1)
+
+ movel #0x00080000,%d0 /* GPIO19, IDE reset bit */
+ orl %d0,0xc(%a1) /* function GPIO19 */
+ orl %d0,0x8(%a1) /* enable GPIO19 as output */
+ orl %d0,0x4(%a1) /* de-assert IDE reset */
+.endm
+
+#define PLATFORM_SETUP m5249c3_setup
+#endif /* CONFIG_M5249C3 */
+#endif /* __ASSEMBLER__ */
/****************************************************************************/
#endif /* m525xsim_h */
diff --git a/arch/m68k/include/asm/mcfclk.h b/arch/m68k/include/asm/mcfclk.h
index b676a02bb39..ea4791e3a55 100644
--- a/arch/m68k/include/asm/mcfclk.h
+++ b/arch/m68k/include/asm/mcfclk.h
@@ -8,7 +8,6 @@
struct clk;
-#ifdef MCFPM_PPMCR0
struct clk_ops {
void (*enable)(struct clk *);
void (*disable)(struct clk *);
@@ -23,6 +22,8 @@ struct clk {
};
extern struct clk *mcf_clks[];
+
+#ifdef MCFPM_PPMCR0
extern struct clk_ops clk_ops0;
#ifdef MCFPM_PPMCR1
extern struct clk_ops clk_ops1;
@@ -38,6 +39,12 @@ static struct clk __clk_##clk_bank##_##clk_slot = { \
void __clk_init_enabled(struct clk *);
void __clk_init_disabled(struct clk *);
+#else
+#define DEFINE_CLK(clk_ref, clk_name, clk_rate) \
+ static struct clk clk_##clk_ref = { \
+ .name = clk_name, \
+ .rate = clk_rate, \
+ }
#endif /* MCFPM_PPMCR0 */
#endif /* mcfclk_h */
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index 7a83e619e73..a04fd9b2714 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -24,10 +24,7 @@
#elif defined(CONFIG_M523x)
#include <asm/m523xsim.h>
#include <asm/mcfintc.h>
-#elif defined(CONFIG_M5249)
-#include <asm/m5249sim.h>
-#include <asm/mcfintc.h>
-#elif defined(CONFIG_M525x)
+#elif defined(CONFIG_M5249) || defined(CONFIG_M525x)
#include <asm/m525xsim.h>
#include <asm/mcfintc.h>
#elif defined(CONFIG_M527x)
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 90595721185..ef209169579 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -26,7 +26,7 @@ extern unsigned long memory_end;
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+#define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h
index 0f717045bdd..a45cb6894ad 100644
--- a/arch/m68k/include/asm/ptrace.h
+++ b/arch/m68k/include/asm/ptrace.h
@@ -15,6 +15,7 @@
#define profile_pc(regs) instruction_pointer(regs)
#define current_pt_regs() \
(struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1
+#define current_user_stack_pointer() rdusp()
#define arch_has_single_step() (1)
diff --git a/arch/m68k/include/asm/shmparam.h b/arch/m68k/include/asm/shmparam.h
deleted file mode 100644
index 558892a2efb..00000000000
--- a/arch/m68k/include/asm/shmparam.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SHMPARAM_H
-#define _M68K_SHMPARAM_H
-
-#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
-
-#endif /* _M68K_SHMPARAM_H */
diff --git a/arch/m68k/include/asm/spinlock.h b/arch/m68k/include/asm/spinlock.h
deleted file mode 100644
index 20f46e27b53..00000000000
--- a/arch/m68k/include/asm/spinlock.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __M68K_SPINLOCK_H
-#define __M68K_SPINLOCK_H
-
-#error "m68k doesn't do SMP yet"
-
-#endif
diff --git a/arch/m68k/include/asm/termios.h b/arch/m68k/include/asm/termios.h
deleted file mode 100644
index ad8efb09866..00000000000
--- a/arch/m68k/include/asm/termios.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _M68K_TERMIOS_H
-#define _M68K_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-/* intr=^C quit=^| erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
- unsigned short tmp; \
- get_user(tmp, &(termio)->c_iflag); \
- (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \
- get_user(tmp, &(termio)->c_oflag); \
- (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \
- get_user(tmp, &(termio)->c_cflag); \
- (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \
- get_user(tmp, &(termio)->c_lflag); \
- (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \
- get_user((termios)->c_line, &(termio)->c_line); \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* _M68K_TERMIOS_H */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a021d67cdd7..847994ce680 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -31,7 +31,6 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 972bce120e1..1fef45ada09 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -1,26 +1,27 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += msgbuf.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += termbits.h
+generic-y += termios.h
+
header-y += a.out.h
-header-y += auxvec.h
header-y += byteorder.h
header-y += cachectl.h
header-y += fcntl.h
header-y += ioctls.h
-header-y += msgbuf.h
header-y += param.h
header-y += poll.h
header-y += posix_types.h
header-y += ptrace.h
-header-y += sembuf.h
header-y += setup.h
-header-y += shmbuf.h
header-y += sigcontext.h
header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
header-y += stat.h
header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
header-y += unistd.h
diff --git a/arch/m68k/include/uapi/asm/auxvec.h b/arch/m68k/include/uapi/asm/auxvec.h
deleted file mode 100644
index 844d6d52204..00000000000
--- a/arch/m68k/include/uapi/asm/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASMm68k_AUXVEC_H
-#define __ASMm68k_AUXVEC_H
-
-#endif
diff --git a/arch/m68k/include/uapi/asm/msgbuf.h b/arch/m68k/include/uapi/asm/msgbuf.h
deleted file mode 100644
index 243cb798de8..00000000000
--- a/arch/m68k/include/uapi/asm/msgbuf.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _M68K_MSGBUF_H
-#define _M68K_MSGBUF_H
-
-/*
- * The msqid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
- unsigned long __unused1;
- __kernel_time_t msg_rtime; /* last msgrcv time */
- unsigned long __unused2;
- __kernel_time_t msg_ctime; /* last change time */
- unsigned long __unused3;
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif /* _M68K_MSGBUF_H */
diff --git a/arch/m68k/include/uapi/asm/sembuf.h b/arch/m68k/include/uapi/asm/sembuf.h
deleted file mode 100644
index 2308052a8c2..00000000000
--- a/arch/m68k/include/uapi/asm/sembuf.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _M68K_SEMBUF_H
-#define _M68K_SEMBUF_H
-
-/*
- * The semid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
- struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- unsigned long __unused1;
- __kernel_time_t sem_ctime; /* last change time */
- unsigned long __unused2;
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _M68K_SEMBUF_H */
diff --git a/arch/m68k/include/uapi/asm/shmbuf.h b/arch/m68k/include/uapi/asm/shmbuf.h
deleted file mode 100644
index f8928d62f1b..00000000000
--- a/arch/m68k/include/uapi/asm/shmbuf.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _M68K_SHMBUF_H
-#define _M68K_SHMBUF_H
-
-/*
- * The shmid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- unsigned long __unused1;
- __kernel_time_t shm_dtime; /* last detach time */
- unsigned long __unused2;
- __kernel_time_t shm_ctime; /* last change time */
- unsigned long __unused3;
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _M68K_SHMBUF_H */
diff --git a/arch/m68k/include/uapi/asm/signal.h b/arch/m68k/include/uapi/asm/signal.h
index 2b450f311bd..cba6f858bb4 100644
--- a/arch/m68k/include/uapi/asm/signal.h
+++ b/arch/m68k/include/uapi/asm/signal.h
@@ -80,12 +80,6 @@ typedef unsigned long sigset_t;
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/m68k/include/uapi/asm/socket.h b/arch/m68k/include/uapi/asm/socket.h
deleted file mode 100644
index 285da3b6ad9..00000000000
--- a/arch/m68k/include/uapi/asm/socket.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-#define SO_GET_FILTER SO_ATTACH_FILTER
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#define SO_PROTOCOL 38
-#define SO_DOMAIN 39
-
-#define SO_RXQ_OVFL 40
-
-#define SO_WIFI_STATUS 41
-#define SCM_WIFI_STATUS SO_WIFI_STATUS
-#define SO_PEEK_OFF 42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS 43
-
-#endif /* _ASM_SOCKET_H */
diff --git a/arch/m68k/include/uapi/asm/sockios.h b/arch/m68k/include/uapi/asm/sockios.h
deleted file mode 100644
index c04a23943cb..00000000000
--- a/arch/m68k/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_M68K_SOCKIOS__
-#define __ARCH_M68K_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* __ARCH_M68K_SOCKIOS__ */
diff --git a/arch/m68k/include/uapi/asm/termbits.h b/arch/m68k/include/uapi/asm/termbits.h
deleted file mode 100644
index aea1e37b765..00000000000
--- a/arch/m68k/include/uapi/asm/termbits.h
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef __ARCH_M68K_TERMBITS_H__
-#define __ARCH_M68K_TERMBITS_H__
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC 0200000
-
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* __ARCH_M68K_TERMBITS_H__ */
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 388e5cc8959..cbc624af449 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -506,7 +506,7 @@ static inline void bus_error030 (struct frame *fp)
addr -= 2;
if (buserr_type & SUN3_BUSERR_INVALID) {
- if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
+ if (!mmu_emu_handle_fault(addr, 1, 0))
do_page_fault (&fp->ptregs, addr, 0);
} else {
#ifdef DEBUG
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
index 10ca051d56b..c1e2dfb206f 100644
--- a/arch/m68k/lib/memcpy.c
+++ b/arch/m68k/lib/memcpy.c
@@ -10,7 +10,7 @@
void *memcpy(void *to, const void *from, size_t n)
{
void *xto = to;
- size_t temp, temp1;
+ size_t temp;
if (!n)
return xto;
@@ -47,6 +47,7 @@ void *memcpy(void *to, const void *from, size_t n)
for (; temp; temp--)
*lto++ = *lfrom++;
#else
+ size_t temp1;
asm volatile (
" movel %2,%3\n"
" andw #7,%3\n"
diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c
index 3384a5244fb..0663067870f 100644
--- a/arch/m68k/math-emu/fp_log.c
+++ b/arch/m68k/math-emu/fp_log.c
@@ -50,7 +50,7 @@ fp_fsqrt(struct fp_ext *dest, struct fp_ext *src)
* sqrt(m*2^e) =
* sqrt(2*m) * 2^(p) , if e = 2*p + 1
*
- * So we use the last bit of the exponent to decide wether to
+ * So we use the last bit of the exponent to decide whether to
* use the m or 2*m.
*
* Since only the fractional part of the mantissa is stored and
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 27b5ce089a3..f0e05bce92f 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -1,5 +1,225 @@
+/*
+ * linux/arch/m68k/mm/init.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+ *
+ * Contains common initialization routines, specific init code moved
+ * to motorola.c and sun3mmu.c
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#ifdef CONFIG_ATARI
+#include <asm/atari_stram.h>
+#endif
+#include <asm/sections.h>
+#include <asm/tlb.h>
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+void *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
#ifdef CONFIG_MMU
-#include "init_mm.c"
+
+pg_data_t pg_data_map[MAX_NUMNODES];
+EXPORT_SYMBOL(pg_data_map);
+
+int m68k_virt_to_node_shift;
+
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+pg_data_t *pg_data_table[65];
+EXPORT_SYMBOL(pg_data_table);
+#endif
+
+void __init m68k_setup_node(int node)
+{
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+ struct mem_info *info = m68k_memory + node;
+ int i, end;
+
+ i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
+ end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
+ for (; i <= end; i++) {
+ if (pg_data_table[i])
+ printk("overlap at %u for chunk %u\n", i, node);
+ pg_data_table[i] = pg_data_map + node;
+ }
+#endif
+ pg_data_map[node].bdata = bootmem_node_data + node;
+ node_set_online(node);
+}
+
+extern void init_pointer_table(unsigned long ptable);
+extern pmd_t *zero_pgtable;
+
+#else /* CONFIG_MMU */
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+ /*
+ * Make sure start_mem is page aligned, otherwise bootmem and
+ * page_alloc get different views of the world.
+ */
+ unsigned long end_mem = memory_end & PAGE_MASK;
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+
+ high_memory = (void *) end_mem;
+
+ empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ /*
+ * Set up SFC/DFC registers (user data space).
+ */
+ set_fs (USER_DS);
+
+ zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
+ free_area_init(zones_size);
+}
+
+#endif /* CONFIG_MMU */
+
+void free_initmem(void)
+{
+#ifndef CONFIG_MMU_SUN3
+ unsigned long addr;
+
+ addr = (unsigned long) __init_begin;
+ for (; addr < ((unsigned long) __init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+ pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
+ (addr - (unsigned long) __init_begin) >> 10,
+ (unsigned int) __init_begin, (unsigned int) __init_end);
+#endif /* CONFIG_MMU_SUN3 */
+}
+
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+#define VECTORS &vectors[0]
#else
-#include "init_no.c"
+#define VECTORS _ramvec
+#endif
+
+void __init print_memmap(void)
+{
+#define UL(x) ((unsigned long) (x))
+#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
+#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
+
+ pr_notice("Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
+ " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
+ MLK(VECTORS, VECTORS + 256),
+ MLM(KMAP_START, KMAP_END),
+ MLM(VMALLOC_START, VMALLOC_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_stext, _etext),
+ MLK_ROUNDUP(_sdata, _edata),
+ MLK_ROUNDUP(__bss_start, __bss_stop));
+}
+
+void __init mem_init(void)
+{
+ pg_data_t *pgdat;
+ int codepages = 0;
+ int datapages = 0;
+ int initpages = 0;
+ int i;
+
+ /* this will put all memory onto the freelists */
+ totalram_pages = num_physpages = 0;
+ for_each_online_pgdat(pgdat) {
+ num_physpages += pgdat->node_present_pages;
+
+ totalram_pages += free_all_bootmem_node(pgdat);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat->node_mem_map + i;
+ char *addr = page_to_virt(page);
+
+ if (!PageReserved(page))
+ continue;
+ if (addr >= _text &&
+ addr < _etext)
+ codepages++;
+ else if (addr >= __init_begin &&
+ addr < __init_end)
+ initpages++;
+ else
+ datapages++;
+ }
+ }
+
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ /* insert pointer tables allocated so far into the tablelist */
+ init_pointer_table((unsigned long)kernel_pg_dir);
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+ if (pgd_present(kernel_pg_dir[i]))
+ init_pointer_table(__pgd_page(kernel_pg_dir[i]));
+ }
+
+ /* insert also pointer table that we used to unmap the zero page */
+ if (zero_pgtable)
+ init_pointer_table((unsigned long)zero_pgtable);
+#endif
+
+ pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ totalram_pages << (PAGE_SHIFT-10),
+ codepages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10));
+ print_memmap();
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ int pages = 0;
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ init_page_count(virt_to_page(start));
+ free_page(start);
+ totalram_pages++;
+ pages++;
+ }
+ pr_notice("Freeing initrd memory: %dk freed\n",
+ pages << (PAGE_SHIFT - 10));
+}
#endif
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
deleted file mode 100644
index 282f9de6896..00000000000
--- a/arch/m68k/mm/init_mm.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * linux/arch/m68k/mm/init.c
- *
- * Copyright (C) 1995 Hamish Macdonald
- *
- * Contains common initialization routines, specific init code moved
- * to motorola.c and sun3mmu.c
- */
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/gfp.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/traps.h>
-#include <asm/machdep.h>
-#include <asm/io.h>
-#ifdef CONFIG_ATARI
-#include <asm/atari_stram.h>
-#endif
-#include <asm/sections.h>
-#include <asm/tlb.h>
-
-pg_data_t pg_data_map[MAX_NUMNODES];
-EXPORT_SYMBOL(pg_data_map);
-
-int m68k_virt_to_node_shift;
-
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-pg_data_t *pg_data_table[65];
-EXPORT_SYMBOL(pg_data_table);
-#endif
-
-void __init m68k_setup_node(int node)
-{
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
- struct mem_info *info = m68k_memory + node;
- int i, end;
-
- i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
- end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
- for (; i <= end; i++) {
- if (pg_data_table[i])
- printk("overlap at %u for chunk %u\n", i, node);
- pg_data_table[i] = pg_data_map + node;
- }
-#endif
- pg_data_map[node].bdata = bootmem_node_data + node;
- node_set_online(node);
-}
-
-
-/*
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-
-void *empty_zero_page;
-EXPORT_SYMBOL(empty_zero_page);
-
-extern void init_pointer_table(unsigned long ptable);
-
-/* References to section boundaries */
-
-extern pmd_t *zero_pgtable;
-
-#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
-#define VECTORS &vectors[0]
-#else
-#define VECTORS _ramvec
-#endif
-
-void __init print_memmap(void)
-{
-#define UL(x) ((unsigned long) (x))
-#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
-#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
-
- pr_notice("Virtual kernel memory layout:\n"
- " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
- " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
- " .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
- " .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
- " .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
- MLK(VECTORS, VECTORS + 256),
- MLM(KMAP_START, KMAP_END),
- MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_stext, _etext),
- MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(__bss_start, __bss_stop));
-}
-
-void __init mem_init(void)
-{
- pg_data_t *pgdat;
- int codepages = 0;
- int datapages = 0;
- int initpages = 0;
- int i;
-
- /* this will put all memory onto the freelists */
- totalram_pages = num_physpages = 0;
- for_each_online_pgdat(pgdat) {
- num_physpages += pgdat->node_present_pages;
-
- totalram_pages += free_all_bootmem_node(pgdat);
- for (i = 0; i < pgdat->node_spanned_pages; i++) {
- struct page *page = pgdat->node_mem_map + i;
- char *addr = page_to_virt(page);
-
- if (!PageReserved(page))
- continue;
- if (addr >= _text &&
- addr < _etext)
- codepages++;
- else if (addr >= __init_begin &&
- addr < __init_end)
- initpages++;
- else
- datapages++;
- }
- }
-
-#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
- /* insert pointer tables allocated so far into the tablelist */
- init_pointer_table((unsigned long)kernel_pg_dir);
- for (i = 0; i < PTRS_PER_PGD; i++) {
- if (pgd_present(kernel_pg_dir[i]))
- init_pointer_table(__pgd_page(kernel_pg_dir[i]));
- }
-
- /* insert also pointer table that we used to unmap the zero page */
- if (zero_pgtable)
- init_pointer_table((unsigned long)zero_pgtable);
-#endif
-
- printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- totalram_pages << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10),
- initpages << (PAGE_SHIFT-10));
- print_memmap();
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- printk ("Freeing initrd memory: %dk freed\n", pages);
-}
-#endif
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
deleted file mode 100644
index 688e3664aea..00000000000
--- a/arch/m68k/mm/init_no.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * linux/arch/m68knommu/mm/init.c
- *
- * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
- * Kenneth Albanowski <kjahds@kjahds.com>,
- * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
- *
- * Based on:
- *
- * linux/arch/m68k/mm/init.c
- *
- * Copyright (C) 1995 Hamish Macdonald
- *
- * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com)
- * DEC/2000 -- linux 2.4 support <davidm@snapgear.com>
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/gfp.h>
-
-#include <asm/setup.h>
-#include <asm/sections.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
-
-/*
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-void *empty_zero_page;
-
-/*
- * paging_init() continues the virtual memory environment setup which
- * was begun by the code in arch/head.S.
- * The parameters are pointers to where to stick the starting and ending
- * addresses of available kernel virtual memory.
- */
-void __init paging_init(void)
-{
- /*
- * Make sure start_mem is page aligned, otherwise bootmem and
- * page_alloc get different views of the world.
- */
- unsigned long end_mem = memory_end & PAGE_MASK;
- unsigned long zones_size[MAX_NR_ZONES] = {0, };
-
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
-
- /*
- * Set up SFC/DFC registers (user data space).
- */
- set_fs (USER_DS);
-
- zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
- free_area_init(zones_size);
-}
-
-void __init mem_init(void)
-{
- int codek = 0, datak = 0, initk = 0;
- unsigned long tmp;
- unsigned long len = _ramend - _rambase;
- unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
- unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */
-
- pr_debug("Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
-
- end_mem &= PAGE_MASK;
- high_memory = (void *) end_mem;
-
- start_mem = PAGE_ALIGN(start_mem);
- max_mapnr = num_physpages = (((unsigned long) high_memory) - PAGE_OFFSET) >> PAGE_SHIFT;
-
- /* this will put all memory onto the freelists */
- totalram_pages = free_all_bootmem();
-
- codek = (_etext - _stext) >> 10;
- datak = (__bss_stop - _sdata) >> 10;
- initk = (__init_begin - __init_end) >> 10;
-
- tmp = nr_free_pages() << PAGE_SHIFT;
- printk(KERN_INFO "Memory available: %luk/%luk RAM, (%dk kernel code, %dk data)\n",
- tmp >> 10,
- len >> 10,
- codek,
- datak
- );
-}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- pr_notice("Freeing initrd memory: %luk freed\n",
- pages * (PAGE_SIZE / 1024));
-}
-#endif
-
-void free_initmem(void)
-{
-#ifdef CONFIG_RAMKERNEL
- unsigned long addr;
- /*
- * The following code should be cool even if these sections
- * are not page aligned.
- */
- addr = PAGE_ALIGN((unsigned long) __init_begin);
- /* next to check that the page we free is not a partial page */
- for (; addr + PAGE_SIZE < ((unsigned long) __init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
- (addr - PAGE_ALIGN((unsigned long) __init_begin)) >> 10,
- (int)(PAGE_ALIGN((unsigned long) __init_begin)),
- (int)(addr - PAGE_SIZE));
-#endif
-}
-
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 875b800ef0d..f58fafe7e4c 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -29,10 +29,6 @@ atomic_t nr_free_contexts;
struct mm_struct *context_mm[LAST_CONTEXT+1];
extern unsigned long num_pages;
-void free_initmem(void)
-{
-}
-
/*
* ColdFire paging_init derived from sun3.
*/
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 0dafa693515..251c5437787 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -304,17 +304,3 @@ void __init paging_init(void)
}
}
-void free_initmem(void)
-{
- unsigned long addr;
-
- addr = (unsigned long)__init_begin;
- for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- virt_to_page(addr)->flags &= ~(1 << PG_reserved);
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
-}
-
-
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index e0804060501..269f81158a3 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -30,10 +30,6 @@ const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
extern unsigned long num_pages;
-void free_initmem(void)
-{
-}
-
/* For the sun3 we try to follow the i386 paging_init() more closely */
/* start_mem and end_mem have PAGE_OFFSET added already */
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
diff --git a/arch/m68k/platform/68000/Makefile b/arch/m68k/platform/68000/Makefile
new file mode 100644
index 00000000000..1eab70c7194
--- /dev/null
+++ b/arch/m68k/platform/68000/Makefile
@@ -0,0 +1,18 @@
+##################################################
+#
+# Makefile for 68000 core based cpus
+#
+# 2012.10.21, Luis Alves <ljalvs@gmail.com>
+# Merged all 68000 based cpu's config
+# files into a single directory.
+#
+
+# 68328, 68EZ328, 68VZ328
+
+obj-y += entry.o ints.o timers.o
+obj-$(CONFIG_M68328) += m68328.o
+obj-$(CONFIG_M68EZ328) += m68EZ328.o
+obj-$(CONFIG_M68VZ328) += m68VZ328.o
+obj-$(CONFIG_ROM) += romvec.o
+
+extra-y := head.o
diff --git a/arch/m68k/platform/68VZ328/bootlogo.h b/arch/m68k/platform/68000/bootlogo-vz.h
index b38e2b25514..b38e2b25514 100644
--- a/arch/m68k/platform/68VZ328/bootlogo.h
+++ b/arch/m68k/platform/68000/bootlogo-vz.h
diff --git a/arch/m68k/platform/68328/bootlogo.h b/arch/m68k/platform/68000/bootlogo.h
index b896c933faf..b896c933faf 100644
--- a/arch/m68k/platform/68328/bootlogo.h
+++ b/arch/m68k/platform/68000/bootlogo.h
diff --git a/arch/m68k/platform/68328/entry.S b/arch/m68k/platform/68000/entry.S
index 7f91c2fde50..7f91c2fde50 100644
--- a/arch/m68k/platform/68328/entry.S
+++ b/arch/m68k/platform/68000/entry.S
diff --git a/arch/m68k/platform/68000/head.S b/arch/m68k/platform/68000/head.S
new file mode 100644
index 00000000000..536ef9616da
--- /dev/null
+++ b/arch/m68k/platform/68000/head.S
@@ -0,0 +1,240 @@
+/*
+ * head.S - Common startup code for 68000 core based CPU's
+ *
+ * 2012.10.21, Luis Alves <ljalvs@gmail.com>, Single head.S file for all
+ * 68000 core based CPU's. Based on the sources from:
+ * Coldfire by Greg Ungerer <gerg@snapgear.com>
+ * 68328 by D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
+ * Kenneth Albanowski <kjahds@kjahds.com>,
+ * The Silver Hammer Group, Ltd.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+
+/*****************************************************************************
+ * UCSIMM and UCDIMM use CONFIG_MEMORY_RESERVE to reserve some RAM
+ *****************************************************************************/
+#ifdef CONFIG_MEMORY_RESERVE
+#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)-(CONFIG_MEMORY_RESERVE*0x100000)
+#else
+#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)
+#endif
+/*****************************************************************************/
+
+.global _start
+.global _rambase
+.global _ramvec
+.global _ramstart
+.global _ramend
+
+#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
+.global bootlogo_bits
+#endif
+
+/* Defining DEBUG_HEAD_CODE, serial port in 68x328 is inited */
+/* #define DEBUG_HEAD_CODE */
+#undef DEBUG_HEAD_CODE
+
+.data
+
+/*****************************************************************************
+ * RAM setup pointers. Used by the kernel to determine RAM location and size.
+ *****************************************************************************/
+
+_rambase:
+ .long 0
+_ramvec:
+ .long 0
+_ramstart:
+ .long 0
+_ramend:
+ .long 0
+
+__HEAD
+
+/*****************************************************************************
+ * Entry point, where all begins!
+ *****************************************************************************/
+
+_start:
+
+/* Pilot need this specific signature at the start of ROM */
+#ifdef CONFIG_PILOT
+ .byte 0x4e, 0xfa, 0x00, 0x0a /* bra opcode (jmp 10 bytes) */
+ .byte 'b', 'o', 'o', 't'
+ .word 10000
+ nop
+ moveq #0, %d0
+ movew %d0, 0xfffff618 /* Watchdog off */
+ movel #0x00011f07, 0xfffff114 /* CS A1 Mask */
+#endif /* CONFIG_PILOT */
+
+ movew #0x2700, %sr /* disable all interrupts */
+
+/*****************************************************************************
+ * Setup PLL and wait for it to settle (in 68x328 cpu's).
+ * Also, if enabled, init serial port.
+ *****************************************************************************/
+#if defined(CONFIG_M68328) || \
+ defined(CONFIG_M68EZ328) || \
+ defined(CONFIG_M68VZ328)
+
+/* Serial port setup. Should only be needed if debugging this startup code. */
+#ifdef DEBUG_HEAD_CODE
+ movew #0x0800, 0xfffff906 /* Ignore CTS */
+ movew #0x010b, 0xfffff902 /* BAUD to 9600 */
+ movew #0xe100, 0xfffff900 /* enable */
+#endif /* DEBUG_HEAD */
+
+#ifdef CONFIG_PILOT
+ movew #0x2410, 0xfffff200 /* PLLCR */
+#else
+ movew #0x2400, 0xfffff200 /* PLLCR */
+#endif
+ movew #0x0123, 0xfffff202 /* PLLFSR */
+ moveq #0, %d0
+ movew #16384, %d0 /* PLL settle wait loop */
+_pll_settle:
+ subw #1, %d0
+ bne _pll_settle
+#endif /* CONFIG_M68x328 */
+
+
+/*****************************************************************************
+ * If running kernel from ROM some specific initialization has to be done.
+ * (Assuming that everything is already init'ed when running from RAM)
+ *****************************************************************************/
+#ifdef CONFIG_ROMKERNEL
+
+/*****************************************************************************
+ * Init chip registers (uCsimm specific)
+ *****************************************************************************/
+#ifdef CONFIG_UCSIMM
+ moveb #0x00, 0xfffffb0b /* Watchdog off */
+ moveb #0x10, 0xfffff000 /* SCR */
+ moveb #0x00, 0xfffff40b /* enable chip select */
+ moveb #0x00, 0xfffff423 /* enable /DWE */
+ moveb #0x08, 0xfffffd0d /* disable hardmap */
+ moveb #0x07, 0xfffffd0e /* level 7 interrupt clear */
+ movew #0x8600, 0xfffff100 /* FLASH at 0x10c00000 */
+ movew #0x018b, 0xfffff110 /* 2Meg, enable, 0ws */
+ movew #0x8f00, 0xfffffc00 /* DRAM configuration */
+ movew #0x9667, 0xfffffc02 /* DRAM control */
+ movew #0x0000, 0xfffff106 /* DRAM at 0x00000000 */
+ movew #0x068f, 0xfffff116 /* 8Meg, enable, 0ws */
+ moveb #0x40, 0xfffff300 /* IVR */
+ movel #0x007FFFFF, %d0 /* IMR */
+ movel %d0, 0xfffff304
+ moveb 0xfffff42b, %d0
+ andb #0xe0, %d0
+ moveb %d0, 0xfffff42b
+#endif
+
+/*****************************************************************************
+ * Init LCD controller.
+ * (Assuming that LCD controller is already init'ed when running from RAM)
+ *****************************************************************************/
+#ifdef CONFIG_INIT_LCD
+#ifdef CONFIG_PILOT
+ moveb #0, 0xfffffA27 /* LCKCON */
+ movel #_start, 0xfffffA00 /* LSSA */
+ moveb #0xa, 0xfffffA05 /* LVPW */
+ movew #0x9f, 0xFFFFFa08 /* LXMAX */
+ movew #0x9f, 0xFFFFFa0a /* LYMAX */
+ moveb #9, 0xfffffa29 /* LBAR */
+ moveb #0, 0xfffffa25 /* LPXCD */
+ moveb #0x04, 0xFFFFFa20 /* LPICF */
+ moveb #0x58, 0xfffffA27 /* LCKCON */
+ moveb #0x85, 0xfffff429 /* PFDATA */
+ moveb #0xd8, 0xfffffA27 /* LCKCON */
+ moveb #0xc5, 0xfffff429 /* PFDATA */
+ moveb #0xd5, 0xfffff429 /* PFDATA */
+ movel #bootlogo_bits, 0xFFFFFA00 /* LSSA */
+ moveb #10, 0xFFFFFA05 /* LVPW */
+ movew #160, 0xFFFFFA08 /* LXMAX */
+ movew #160, 0xFFFFFA0A /* LYMAX */
+#else /* CONFIG_PILOT */
+ movel #bootlogo_bits, 0xfffffA00 /* LSSA */
+ moveb #0x28, 0xfffffA05 /* LVPW */
+ movew #0x280, 0xFFFFFa08 /* LXMAX */
+ movew #0x1df, 0xFFFFFa0a /* LYMAX */
+ moveb #0, 0xfffffa29 /* LBAR */
+ moveb #0, 0xfffffa25 /* LPXCD */
+ moveb #0x08, 0xFFFFFa20 /* LPICF */
+ moveb #0x01, 0xFFFFFA21 /* -ve pol */
+ moveb #0x81, 0xfffffA27 /* LCKCON */
+ movew #0xff00, 0xfffff412 /* LCD pins */
+#endif /* CONFIG_PILOT */
+#endif /* CONFIG_INIT_LCD */
+
+/*****************************************************************************
+ * Kernel is running from FLASH/ROM (XIP)
+ * Copy init text & data to RAM
+ *****************************************************************************/
+ moveal #_etext, %a0
+ moveal #_sdata, %a1
+ moveal #__bss_start, %a2
+_copy_initmem:
+ movel %a0@+, %a1@+
+ cmpal %a1, %a2
+ bhi _copy_initmem
+#endif /* CONFIG_ROMKERNEL */
+
+/*****************************************************************************
+ * Setup basic memory information for kernel
+ *****************************************************************************/
+ movel #CONFIG_VECTORBASE,_ramvec /* set vector base location */
+ movel #CONFIG_RAMBASE,_rambase /* set the base of RAM */
+ movel #RAMEND, _ramend /* set end ram addr */
+ lea __bss_stop,%a1
+ movel %a1,_ramstart
+
+/*****************************************************************************
+ * If the kernel is in RAM, move romfs to right above bss and
+ * adjust _ramstart to where romfs ends.
+ *
+ * (Do this only if CONFIG_MTD_UCLINUX is true)
+ *****************************************************************************/
+
+#if defined(CONFIG_ROMFS_FS) && defined(CONFIG_RAMKERNEL) && \
+ defined(CONFIG_MTD_UCLINUX)
+ lea __bss_start, %a0 /* get start of bss */
+ lea __bss_stop, %a1 /* set up destination */
+ movel %a0, %a2 /* copy of bss start */
+
+ movel 8(%a0), %d0 /* get size of ROMFS */
+ addql #8, %d0 /* allow for rounding */
+ andl #0xfffffffc, %d0 /* whole words */
+
+ addl %d0, %a0 /* copy from end */
+ addl %d0, %a1 /* copy from end */
+ movel %a1, _ramstart /* set start of ram */
+_copy_romfs:
+ movel -(%a0), -(%a1) /* copy dword */
+ cmpl %a0, %a2 /* check if at end */
+ bne _copy_romfs
+#endif /* CONFIG_ROMFS_FS && CONFIG_RAMKERNEL && CONFIG_MTD_UCLINUX */
+
+/*****************************************************************************
+ * Clear bss region
+ *****************************************************************************/
+ lea __bss_start, %a0 /* get start of bss */
+ lea __bss_stop, %a1 /* get end of bss */
+_clear_bss:
+ movel #0, (%a0)+ /* clear each word */
+ cmpl %a0, %a1 /* check if at end */
+ bne _clear_bss
+
+/*****************************************************************************
+ * Load the current task pointer and stack.
+ *****************************************************************************/
+ lea init_thread_union,%a0
+ lea THREAD_SIZE(%a0),%sp
+ jsr start_kernel /* start Linux kernel */
+_exit:
+ jmp _exit /* should never get here */
diff --git a/arch/m68k/platform/68328/ints.c b/arch/m68k/platform/68000/ints.c
index b3810febb3e..cda49b12d7b 100644
--- a/arch/m68k/platform/68328/ints.c
+++ b/arch/m68k/platform/68000/ints.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/m68knommu/platform/68328/ints.c
+ * ints.c - Generic interrupt controller support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
diff --git a/arch/m68k/platform/68328/config.c b/arch/m68k/platform/68000/m68328.c
index 8c20e891e98..a86eb66835a 100644
--- a/arch/m68k/platform/68328/config.c
+++ b/arch/m68k/platform/68000/m68328.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68328/config.c
+ * m68328.c - 68328 specific config
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
diff --git a/arch/m68k/platform/68EZ328/config.c b/arch/m68k/platform/68000/m68EZ328.c
index 4f158d551f0..a6eb72d7500 100644
--- a/arch/m68k/platform/68EZ328/config.c
+++ b/arch/m68k/platform/68000/m68EZ328.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68EZ328/config.c
+ * m68EZ328.c - 68EZ328 specific config
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
diff --git a/arch/m68k/platform/68VZ328/config.c b/arch/m68k/platform/68000/m68VZ328.c
index 2ed8dc305e4..eb6964fbec0 100644
--- a/arch/m68k/platform/68VZ328/config.c
+++ b/arch/m68k/platform/68000/m68VZ328.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68VZ328/config.c
+ * m68VZ328.c - 68VZ328 specific config
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
@@ -28,7 +28,7 @@
#include <asm/bootstd.h>
#ifdef CONFIG_INIT_LCD
-#include "bootlogo.h"
+#include "bootlogo-vz.h"
#endif
/***************************************************************************/
diff --git a/arch/m68k/platform/68328/romvec.S b/arch/m68k/platform/68000/romvec.S
index 31084466eae..15c70cd6453 100644
--- a/arch/m68k/platform/68328/romvec.S
+++ b/arch/m68k/platform/68000/romvec.S
@@ -1,5 +1,5 @@
/*
- * linux/arch/m68knommu/platform/68328/romvec.S
+ * romvec.S - Vector table for 68000 cpus
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68000/timers.c
index f4dc9b29560..ec30acbfe6d 100644
--- a/arch/m68k/platform/68328/timers.c
+++ b/arch/m68k/platform/68000/timers.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68328/timers.c
+ * timers.c - Generic hardware timer support.
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
diff --git a/arch/m68k/platform/68328/Makefile b/arch/m68k/platform/68328/Makefile
deleted file mode 100644
index ee61bf84d4a..00000000000
--- a/arch/m68k/platform/68328/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Makefile for arch/m68knommu/platform/68328.
-#
-
-model-y := ram
-model-$(CONFIG_ROMKERNEL) := rom
-
-head-y = head-$(model-y).o
-head-$(CONFIG_PILOT) = head-pilot.o
-head-$(CONFIG_DRAGEN2) = head-de2.o
-
-obj-y += entry.o ints.o timers.o
-obj-$(CONFIG_M68328) += config.o
-obj-$(CONFIG_ROM) += romvec.o
-
-extra-y := head.o
-
-$(obj)/head.o: $(obj)/$(head-y)
- ln -sf $(head-y) $(obj)/head.o
-
-clean-files := $(obj)/head.o $(head-y)
diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S
deleted file mode 100644
index 537d3245b53..00000000000
--- a/arch/m68k/platform/68328/head-de2.S
+++ /dev/null
@@ -1,128 +0,0 @@
-
-#define MEM_END 0x00800000 /* Memory size 8Mb */
-
-#undef CRT_DEBUG
-
-.macro PUTC CHAR
-#ifdef CRT_DEBUG
- moveq #\CHAR, %d7
- jsr putc
-#endif
-.endm
-
- .global _start
- .global _rambase
- .global _ramvec
- .global _ramstart
- .global _ramend
-
- .data
-
-/*
- * Set up the usable of RAM stuff
- */
-_rambase:
- .long 0
-_ramvec:
- .long 0
-_ramstart:
- .long 0
-_ramend:
- .long 0
-
- .text
-
-_start:
-
-/*
- * Setup initial stack
- */
- /* disable all interrupts */
- movew #0x2700, %sr
- movel #-1, 0xfffff304
- movel #MEM_END-4, %sp
-
- PUTC '\r'
- PUTC '\n'
- PUTC 'A'
- PUTC 'B'
-
-/*
- * Determine end of RAM
- */
-
- movel #MEM_END, %a0
- movel %a0, _ramend
-
- PUTC 'C'
-
-/*
- * Move ROM filesystem above bss :-)
- */
-
- moveal #__bss_start, %a0 /* romfs at the start of bss */
- moveal #__bss_stop, %a1 /* Set up destination */
- movel %a0, %a2 /* Copy of bss start */
-
- movel 8(%a0), %d1 /* Get size of ROMFS */
- addql #8, %d1 /* Allow for rounding */
- andl #0xfffffffc, %d1 /* Whole words */
-
- addl %d1, %a0 /* Copy from end */
- addl %d1, %a1 /* Copy from end */
- movel %a1, _ramstart /* Set start of ram */
-
-1:
- movel -(%a0), %d0 /* Copy dword */
- movel %d0, -(%a1)
- cmpl %a0, %a2 /* Check if at end */
- bne 1b
-
- PUTC 'D'
-
-/*
- * Initialize BSS segment to 0
- */
-
- lea __bss_start, %a0
- lea __bss_stop, %a1
-
- /* Copy 0 to %a0 until %a0 == %a1 */
-2: cmpal %a0, %a1
- beq 1f
- clrl (%a0)+
- bra 2b
-1:
-
- PUTC 'E'
-
-/*
- * Load the current task pointer and stack
- */
-
- lea init_thread_union, %a0
- lea 0x2000(%a0), %sp
-
- PUTC 'F'
- PUTC '\r'
- PUTC '\n'
-
-/*
- * Go
- */
-
- jmp start_kernel
-
-/*
- * Local functions
- */
-
-#ifdef CRT_DEBUG
-putc:
- moveb %d7, 0xfffff907
-1:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq 1b
- rts
-#endif
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
deleted file mode 100644
index 45a9dad29e3..00000000000
--- a/arch/m68k/platform/68328/head-pilot.S
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * linux/arch/m68knommu/platform/68328/head-pilot.S
- * - A startup file for the MC68328
- *
- * Copyright (C) 1998 D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
- * Kenneth Albanowski <kjahds@kjahds.com>,
- * The Silver Hammer Group, Ltd.
- *
- * (c) 1995, Dionne & Associates
- * (c) 1995, DKG Display Tech.
- */
-
-#define ASSEMBLY
-
-#define IMMED #
-#define DBG_PUTC(x) moveb IMMED x, 0xfffff907
-
-
-.global _stext
-.global _start
-
-.global _rambase
-.global _ramvec
-.global _ramstart
-.global _ramend
-
-.global bootlogo_bits
-
-/*****************************************************************************/
-
-.data
-
-/*
- * Set up the usable of RAM stuff. Size of RAM is determined then
- * an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long 0
-_rambase:
-.long 0
-_ramstart:
-.long 0
-_ramend:
-.long 0
-
-.text
-
-_start:
-_stext:
-
-
-#ifdef CONFIG_M68328
-
-#ifdef CONFIG_PILOT
- .byte 0x4e, 0xfa, 0x00, 0x0a /* Jmp +X bytes */
- .byte 'b', 'o', 'o', 't'
- .word 10000
-
- nop
-#endif
-
- moveq #0, %d0
- movew %d0, 0xfffff618 /* Watchdog off */
- movel #0x00011f07, 0xfffff114 /* CS A1 Mask */
-
- movew #0x0800, 0xfffff906 /* Ignore CTS */
- movew #0x010b, 0xfffff902 /* BAUD to 9600 */
-
- movew #0x2410, 0xfffff200 /* PLLCR */
- movew #0x123, 0xfffff202 /* PLLFSR */
-
-#ifdef CONFIG_PILOT
- moveb #0, 0xfffffA27 /* LCKCON */
- movel #_start, 0xfffffA00 /* LSSA */
- moveb #0xa, 0xfffffA05 /* LVPW */
- movew #0x9f, 0xFFFFFa08 /* LXMAX */
- movew #0x9f, 0xFFFFFa0a /* LYMAX */
- moveb #9, 0xfffffa29 /* LBAR */
- moveb #0, 0xfffffa25 /* LPXCD */
- moveb #0x04, 0xFFFFFa20 /* LPICF */
- moveb #0x58, 0xfffffA27 /* LCKCON */
- moveb #0x85, 0xfffff429 /* PFDATA */
- moveb #0xd8, 0xfffffA27 /* LCKCON */
- moveb #0xc5, 0xfffff429 /* PFDATA */
- moveb #0xd5, 0xfffff429 /* PFDATA */
-
- moveal #0x00100000, %a3
- moveal #0x100ffc00, %a4
-#endif /* CONFIG_PILOT */
-
-#endif /* CONFIG_M68328 */
-
- movew #0x2700, %sr
- lea %a4@(-4), %sp
-
- DBG_PUTC('\r')
- DBG_PUTC('\n')
- DBG_PUTC('A')
-
- moveq #0,%d0
- movew #16384, %d0 /* PLL settle wait loop */
-L0:
- subw #1, %d0
- bne L0
-
- DBG_PUTC('B')
-
- /* Copy command line from beginning of RAM (+16) to end of bss */
- movel #CONFIG_VECTORBASE, %d7
- addl #16, %d7
- moveal %d7, %a0
- moveal #__bss_stop, %a1
- lea %a1@(512), %a2
-
- DBG_PUTC('C')
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-L2:
- movel %a0@+, %d0
- movel %d0, %a1@+
- cmpal %a1, %a2
- bhi L2
-
- /* Copy data+init segment from ROM to RAM */
- moveal #_etext, %a0
- moveal #_sdata, %a1
- moveal #__init_end, %a2
-
- DBG_PUTC('D')
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-LD1:
- movel %a0@+, %d0
- movel %d0, %a1@+
- cmpal %a1, %a2
- bhi LD1
-
- DBG_PUTC('E')
-
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
-
- /* Copy 0 to %a0 until %a0 == %a1 */
-L1:
- movel #0, %a0@+
- cmpal %a0, %a1
- bhi L1
-
- DBG_PUTC('F')
-
- /* Copy command line from end of bss to command line */
- moveal #__bss_stop, %a0
- moveal #command_line, %a1
- lea %a1@(512), %a2
-
- DBG_PUTC('G')
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-L3:
- movel %a0@+, %d0
- movel %d0, %a1@+
- cmpal %a1, %a2
- bhi L3
-
- movel #_sdata, %d0
- movel %d0, _rambase
- movel #__bss_stop, %d0
- movel %d0, _ramstart
-
- movel %a4, %d0
- subl #4096, %d0 /* Reserve 4K of stack */
- moveq #79, %d7
- movel %d0, _ramend
-
- pea 0
- pea env
- pea %sp@(4)
- pea 0
-
- DBG_PUTC('H')
-
-#ifdef CONFIG_PILOT
- movel #bootlogo_bits, 0xFFFFFA00
- moveb #10, 0xFFFFFA05
- movew #160, 0xFFFFFA08
- movew #160, 0xFFFFFA0A
-#endif /* CONFIG_PILOT */
-
- DBG_PUTC('I')
-
- lea init_thread_union, %a0
- lea 0x2000(%a0), %sp
-
- DBG_PUTC('J')
- DBG_PUTC('\r')
- DBG_PUTC('\n')
-
- jsr start_kernel
-_exit:
-
- jmp _exit
-
-
- .data
-env:
- .long 0
diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S
deleted file mode 100644
index 5189ef92609..00000000000
--- a/arch/m68k/platform/68328/head-ram.S
+++ /dev/null
@@ -1,141 +0,0 @@
-
- .global __main
- .global __rom_start
-
- .global _rambase
- .global _ramstart
-
- .global splash_bits
- .global _start
- .global _stext
- .global _edata
-
-#define DEBUG
-#define ROM_OFFSET 0x10C00000
-#define STACK_GAURD 0x10
-
- .text
-
-_start:
-_stext:
- movew #0x2700, %sr /* Exceptions off! */
-
-#if 0
- /* Init chip registers. uCsimm specific */
- moveb #0x00, 0xfffffb0b /* Watchdog off */
- moveb #0x10, 0xfffff000 /* SCR */
-
- movew #0x2400, 0xfffff200 /* PLLCR */
- movew #0x0123, 0xfffff202 /* PLLFSR */
-
- moveb #0x00, 0xfffff40b /* enable chip select */
- moveb #0x00, 0xfffff423 /* enable /DWE */
- moveb #0x08, 0xfffffd0d /* disable hardmap */
- moveb #0x07, 0xfffffd0e /* level 7 interrupt clear */
-
- movew #0x8600, 0xfffff100 /* FLASH at 0x10c00000 */
- movew #0x018b, 0xfffff110 /* 2Meg, enable, 0ws */
-
- movew #0x8f00, 0xfffffc00 /* DRAM configuration */
- movew #0x9667, 0xfffffc02 /* DRAM control */
- movew #0x0000, 0xfffff106 /* DRAM at 0x00000000 */
- movew #0x068f, 0xfffff116 /* 8Meg, enable, 0ws */
-
- moveb #0x40, 0xfffff300 /* IVR */
- movel #0x007FFFFF, %d0 /* IMR */
- movel %d0, 0xfffff304
-
- moveb 0xfffff42b, %d0
- andb #0xe0, %d0
- moveb %d0, 0xfffff42b
-
- moveb #0x08, 0xfffff907 /* Ignore CTS */
- movew #0x010b, 0xfffff902 /* BAUD to 9600 */
- movew #0xe100, 0xfffff900 /* enable */
-#endif
-
- movew #16384, %d0 /* PLL settle wait loop */
-L0:
- subw #1, %d0
- bne L0
-#ifdef DEBUG
- moveq #70, %d7 /* 'F' */
- moveb %d7,0xfffff907 /* No absolute addresses */
-pclp1:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq pclp1
-#endif /* DEBUG */
-
-#ifdef DEBUG
- moveq #82, %d7 /* 'R' */
- moveb %d7,0xfffff907 /* No absolute addresses */
-pclp3:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq pclp3
-#endif /* DEBUG */
- moveal #0x007ffff0, %ssp
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
-
- /* Copy 0 to %a0 until %a0 >= %a1 */
-L1:
- movel #0, %a0@+
- cmpal %a0, %a1
- bhi L1
-
-#ifdef DEBUG
- moveq #67, %d7 /* 'C' */
- jsr putc
-#endif /* DEBUG */
-
- pea 0
- pea env
- pea %sp@(4)
- pea 0
-
-#ifdef DEBUG
- moveq #70, %d7 /* 'F' */
- jsr putc
-#endif /* DEBUG */
-
-lp:
- jsr start_kernel
- jmp lp
-_exit:
-
- jmp _exit
-
-__main:
- /* nothing */
- rts
-
-#ifdef DEBUG
-putc:
- moveb %d7,0xfffff907
-pclp:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq pclp
- rts
-#endif /* DEBUG */
-
- .data
-
-/*
- * Set up the usable of RAM stuff. Size of RAM is determined then
- * an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long 0
-_rambase:
-.long 0
-_ramstart:
-.long 0
-_ramend:
-.long 0
-
-env:
- .long 0
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
deleted file mode 100644
index 3dff98ba2e9..00000000000
--- a/arch/m68k/platform/68328/head-rom.S
+++ /dev/null
@@ -1,105 +0,0 @@
-
- .global _start
- .global _stext
-
- .global _rambase
- .global _ramvec
- .global _ramstart
- .global _ramend
-
-#ifdef CONFIG_INIT_LCD
- .global bootlogo_bits
-#endif
-
- .data
-
-/*
- * Set up the usable of RAM stuff. Size of RAM is determined then
- * an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long 0
-_rambase:
-.long 0
-_ramstart:
-.long 0
-_ramend:
-.long 0
-
-#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
-
- .text
-_start:
-_stext: movew #0x2700,%sr
-#ifdef CONFIG_INIT_LCD
- movel #bootlogo_bits, 0xfffffA00 /* LSSA */
- moveb #0x28, 0xfffffA05 /* LVPW */
- movew #0x280, 0xFFFFFa08 /* LXMAX */
- movew #0x1df, 0xFFFFFa0a /* LYMAX */
- moveb #0, 0xfffffa29 /* LBAR */
- moveb #0, 0xfffffa25 /* LPXCD */
- moveb #0x08, 0xFFFFFa20 /* LPICF */
- moveb #0x01, 0xFFFFFA21 /* -ve pol */
- moveb #0x81, 0xfffffA27 /* LCKCON */
- movew #0xff00, 0xfffff412 /* LCD pins */
-#endif
- moveal #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
- movew #32767, %d0 /* PLL settle wait loop */
-1: subq #1, %d0
- bne 1b
-
- /* Copy data segment from ROM to RAM */
- moveal #_etext, %a0
- moveal #_sdata, %a1
- moveal #_edata, %a2
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-1: movel %a0@+, %a1@+
- cmpal %a1, %a2
- bhi 1b
-
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
- /* Copy 0 to %a0 until %a0 == %a1 */
-
-1:
- clrl %a0@+
- cmpal %a0, %a1
- bhi 1b
-
- movel #_sdata, %d0
- movel %d0, _rambase
- movel #__bss_stop, %d0
- movel %d0, _ramstart
- movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
- movel %d0, _ramend
- movel #CONFIG_VECTORBASE, %d0
- movel %d0, _ramvec
-
-/*
- * load the current task pointer and stack
- */
- lea init_thread_union, %a0
- lea 0x2000(%a0), %sp
-
-1: jsr start_kernel
- bra 1b
-_exit:
-
- jmp _exit
-
-
-putc:
- moveb %d7,0xfffff907
-1:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq 1b
- rts
-
- .data
-env:
- .long 0
- .text
-
diff --git a/arch/m68k/platform/68EZ328/Makefile b/arch/m68k/platform/68EZ328/Makefile
deleted file mode 100644
index b44d799b111..00000000000
--- a/arch/m68k/platform/68EZ328/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for arch/m68knommu/platform/68EZ328.
-#
-
-obj-y := config.o
diff --git a/arch/m68k/platform/68VZ328/Makefile b/arch/m68k/platform/68VZ328/Makefile
deleted file mode 100644
index 81667416468..00000000000
--- a/arch/m68k/platform/68VZ328/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for arch/m68k/platform/68VZ328.
-#
-
-obj-y := config.o
diff --git a/arch/m68k/platform/coldfire/clk.c b/arch/m68k/platform/coldfire/clk.c
index 9cd13b4ce42..fddfdccae63 100644
--- a/arch/m68k/platform/coldfire/clk.c
+++ b/arch/m68k/platform/coldfire/clk.c
@@ -19,37 +19,58 @@
#include <asm/mcfsim.h>
#include <asm/mcfclk.h>
-/***************************************************************************/
-#ifndef MCFPM_PPMCR0
-struct clk *clk_get(struct device *dev, const char *id)
+static DEFINE_SPINLOCK(clk_lock);
+
+#ifdef MCFPM_PPMCR0
+/*
+ * For more advanced ColdFire parts that have clocks that can be enabled
+ * we supply enable/disable functions. These must properly define their
+ * clocks in their platform specific code.
+ */
+void __clk_init_enabled(struct clk *clk)
{
- return NULL;
+ clk->enabled = 1;
+ clk->clk_ops->enable(clk);
}
-EXPORT_SYMBOL(clk_get);
-int clk_enable(struct clk *clk)
+void __clk_init_disabled(struct clk *clk)
{
- return 0;
+ clk->enabled = 0;
+ clk->clk_ops->disable(clk);
}
-EXPORT_SYMBOL(clk_enable);
-void clk_disable(struct clk *clk)
+static void __clk_enable0(struct clk *clk)
{
+ __raw_writeb(clk->slot, MCFPM_PPMCR0);
}
-EXPORT_SYMBOL(clk_disable);
-void clk_put(struct clk *clk)
+static void __clk_disable0(struct clk *clk)
+{
+ __raw_writeb(clk->slot, MCFPM_PPMSR0);
+}
+
+struct clk_ops clk_ops0 = {
+ .enable = __clk_enable0,
+ .disable = __clk_disable0,
+};
+
+#ifdef MCFPM_PPMCR1
+static void __clk_enable1(struct clk *clk)
{
+ __raw_writeb(clk->slot, MCFPM_PPMCR1);
}
-EXPORT_SYMBOL(clk_put);
-unsigned long clk_get_rate(struct clk *clk)
+static void __clk_disable1(struct clk *clk)
{
- return MCF_CLK;
+ __raw_writeb(clk->slot, MCFPM_PPMSR1);
}
-EXPORT_SYMBOL(clk_get_rate);
-#else
-static DEFINE_SPINLOCK(clk_lock);
+
+struct clk_ops clk_ops1 = {
+ .enable = __clk_enable1,
+ .disable = __clk_disable1,
+};
+#endif /* MCFPM_PPMCR1 */
+#endif /* MCFPM_PPMCR0 */
struct clk *clk_get(struct device *dev, const char *id)
{
@@ -101,48 +122,3 @@ unsigned long clk_get_rate(struct clk *clk)
EXPORT_SYMBOL(clk_get_rate);
/***************************************************************************/
-
-void __clk_init_enabled(struct clk *clk)
-{
- clk->enabled = 1;
- clk->clk_ops->enable(clk);
-}
-
-void __clk_init_disabled(struct clk *clk)
-{
- clk->enabled = 0;
- clk->clk_ops->disable(clk);
-}
-
-static void __clk_enable0(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMCR0);
-}
-
-static void __clk_disable0(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMSR0);
-}
-
-struct clk_ops clk_ops0 = {
- .enable = __clk_enable0,
- .disable = __clk_disable0,
-};
-
-#ifdef MCFPM_PPMCR1
-static void __clk_enable1(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMCR1);
-}
-
-static void __clk_disable1(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMSR1);
-}
-
-struct clk_ops clk_ops1 = {
- .enable = __clk_enable1,
- .disable = __clk_disable1,
-};
-#endif /* MCFPM_PPMCR1 */
-#endif /* MCFPM_PPMCR0 */
diff --git a/arch/m68k/platform/coldfire/intc-5249.c b/arch/m68k/platform/coldfire/intc-5249.c
index 0864b836699..b0d1641053e 100644
--- a/arch/m68k/platform/coldfire/intc-5249.c
+++ b/arch/m68k/platform/coldfire/intc-5249.c
@@ -21,7 +21,7 @@ static void intc2_irq_gpio_mask(struct irq_data *d)
{
u32 imr;
imr = readl(MCFSIM2_GPIOINTENABLE);
- imr &= ~(0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
+ imr &= ~(0x1 << (d->irq - MCF_IRQ_GPIO0));
writel(imr, MCFSIM2_GPIOINTENABLE);
}
@@ -29,13 +29,13 @@ static void intc2_irq_gpio_unmask(struct irq_data *d)
{
u32 imr;
imr = readl(MCFSIM2_GPIOINTENABLE);
- imr |= (0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
+ imr |= (0x1 << (d->irq - MCF_IRQ_GPIO0));
writel(imr, MCFSIM2_GPIOINTENABLE);
}
static void intc2_irq_gpio_ack(struct irq_data *d)
{
- writel(0x1 << (d->irq - MCFINTC2_GPIOIRQ0), MCFSIM2_GPIOINTCLEAR);
+ writel(0x1 << (d->irq - MCF_IRQ_GPIO0), MCFSIM2_GPIOINTCLEAR);
}
static struct irq_chip intc2_irq_gpio_chip = {
@@ -50,7 +50,7 @@ static int __init mcf_intc2_init(void)
int irq;
/* GPIO interrupt sources */
- for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
+ for (irq = MCF_IRQ_GPIO0; (irq <= MCF_IRQ_GPIO7); irq++) {
irq_set_chip(irq, &intc2_irq_gpio_chip);
irq_set_handler(irq, handle_edge_irq);
}
diff --git a/arch/m68k/platform/coldfire/m5206.c b/arch/m68k/platform/coldfire/m5206.c
index 6bfbeebd231..0e55f449a88 100644
--- a/arch/m68k/platform/coldfire/m5206.c
+++ b/arch/m68k/platform/coldfire/m5206.c
@@ -16,6 +16,26 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m523x.c b/arch/m68k/platform/coldfire/m523x.c
index ff37fe9553e..2b10e9f198c 100644
--- a/arch/m68k/platform/coldfire/m523x.c
+++ b/arch/m68k/platform/coldfire/m523x.c
@@ -19,6 +19,34 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfpit0,
+ &clk_mcfpit1,
+ &clk_mcfpit2,
+ &clk_mcfpit3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_fec0,
+ NULL
+};
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m5249.c b/arch/m68k/platform/coldfire/m5249.c
index 23b19cb7ab5..c80b5e51d29 100644
--- a/arch/m68k/platform/coldfire/m5249.c
+++ b/arch/m68k/platform/coldfire/m5249.c
@@ -16,6 +16,26 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
@@ -28,8 +48,8 @@ static struct resource m5249_smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = MCFINTC2_GPIOIRQ6,
- .end = MCFINTC2_GPIOIRQ6,
+ .start = MCF_IRQ_GPIO6,
+ .end = MCF_IRQ_GPIO6,
.flags = IORESOURCE_IRQ,
},
};
@@ -75,8 +95,8 @@ static void __init m5249_smc91x_init(void)
gpio = readl(MCFSIM2_GPIOINTENABLE);
writel(gpio | 0x40, MCFSIM2_GPIOINTENABLE);
- gpio = readl(MCFSIM2_INTLEVEL5);
- writel(gpio | 0x04000000, MCFSIM2_INTLEVEL5);
+ gpio = readl(MCFINTC2_INTPRI5);
+ writel(gpio | 0x04000000, MCFINTC2_INTPRI5);
}
#endif /* CONFIG_M5249C3 */
diff --git a/arch/m68k/platform/coldfire/m525x.c b/arch/m68k/platform/coldfire/m525x.c
index fce8f8a45bf..5b9f657b2df 100644
--- a/arch/m68k/platform/coldfire/m525x.c
+++ b/arch/m68k/platform/coldfire/m525x.c
@@ -16,6 +16,26 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m5272.c b/arch/m68k/platform/coldfire/m5272.c
index 45b246d052e..a8c5856fe5e 100644
--- a/arch/m68k/platform/coldfire/m5272.c
+++ b/arch/m68k/platform/coldfire/m5272.c
@@ -19,6 +19,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
/***************************************************************************/
@@ -30,6 +31,31 @@ unsigned char ledbank = 0xff;
/***************************************************************************/
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcftmr2, "mcftmr.2", MCF_BUSCLK);
+DEFINE_CLK(mcftmr3, "mcftmr.3", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcftmr2,
+ &clk_mcftmr3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_fec0,
+ NULL
+};
+
+/***************************************************************************/
+
static void __init m5272_uarts_init(void)
{
u32 v;
diff --git a/arch/m68k/platform/coldfire/m527x.c b/arch/m68k/platform/coldfire/m527x.c
index 1431ba03c60..6fbfe9096c3 100644
--- a/arch/m68k/platform/coldfire/m527x.c
+++ b/arch/m68k/platform/coldfire/m527x.c
@@ -20,6 +20,36 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+DEFINE_CLK(fec1, "fec.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfpit0,
+ &clk_mcfpit1,
+ &clk_mcfpit2,
+ &clk_mcfpit3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_fec0,
+ &clk_fec1,
+ NULL
+};
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m528x.c b/arch/m68k/platform/coldfire/m528x.c
index f9f7e6a13d0..83b7dad7a84 100644
--- a/arch/m68k/platform/coldfire/m528x.c
+++ b/arch/m68k/platform/coldfire/m528x.c
@@ -21,6 +21,34 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfpit0,
+ &clk_mcfpit1,
+ &clk_mcfpit2,
+ &clk_mcfpit3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_fec0,
+ NULL
+};
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m5307.c b/arch/m68k/platform/coldfire/m5307.c
index a568d2870d1..88743536138 100644
--- a/arch/m68k/platform/coldfire/m5307.c
+++ b/arch/m68k/platform/coldfire/m5307.c
@@ -17,6 +17,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfwdebug.h>
+#include <asm/mcfclk.h>
/***************************************************************************/
@@ -28,6 +29,25 @@ unsigned char ledbank = 0xff;
/***************************************************************************/
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
+
+/***************************************************************************/
+
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel) || \
diff --git a/arch/m68k/platform/coldfire/m5407.c b/arch/m68k/platform/coldfire/m5407.c
index bb6c746ae81..2fb3cdbfde3 100644
--- a/arch/m68k/platform/coldfire/m5407.c
+++ b/arch/m68k/platform/coldfire/m5407.c
@@ -16,6 +16,26 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m54xx.c b/arch/m68k/platform/coldfire/m54xx.c
index b587bf35175..952da53aa0b 100644
--- a/arch/m68k/platform/coldfire/m54xx.c
+++ b/arch/m68k/platform/coldfire/m54xx.c
@@ -14,19 +14,45 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/clk.h>
#include <linux/bootmem.h>
#include <asm/pgalloc.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/m54xxsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
#include <asm/m54xxgpt.h>
+#include <asm/mcfclk.h>
#ifdef CONFIG_MMU
#include <asm/mmu_context.h>
#endif
/***************************************************************************/
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfslt0, "mcfslt.0", MCF_BUSCLK);
+DEFINE_CLK(mcfslt1, "mcfslt.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(mcfuart3, "mcfuart.3", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfslt0,
+ &clk_mcfslt1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_mcfuart3,
+ NULL
+};
+
+/***************************************************************************/
+
static void __init m54xx_uarts_init(void)
{
/* enable io pins */
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 78b60f53e90..6bbca30c918 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -66,6 +66,8 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
#ifdef CONFIG_SUN3
intersil_clear();
#endif
+ sun3_disable_irq(5);
+ sun3_enable_irq(5);
#ifdef CONFIG_SUN3
intersil_clear();
#endif
@@ -79,41 +81,18 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
static irqreturn_t sun3_vec255(int irq, void *dev_id)
{
-// intersil_clear();
return IRQ_HANDLED;
}
-static void sun3_irq_enable(struct irq_data *data)
-{
- sun3_enable_irq(data->irq);
-};
-
-static void sun3_irq_disable(struct irq_data *data)
-{
- sun3_disable_irq(data->irq);
-};
-
-static struct irq_chip sun3_irq_chip = {
- .name = "sun3",
- .irq_startup = m68k_irq_startup,
- .irq_shutdown = m68k_irq_shutdown,
- .irq_enable = sun3_irq_enable,
- .irq_disable = sun3_irq_disable,
- .irq_mask = sun3_irq_disable,
- .irq_unmask = sun3_irq_enable,
-};
-
void __init sun3_init_IRQ(void)
{
*sun3_intreg = 1;
- m68k_setup_irq_controller(&sun3_irq_chip, handle_level_irq, IRQ_AUTO_1,
- 7);
m68k_setup_user_interrupt(VEC_USER, 128);
- if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
+ if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "clock", NULL))
pr_err("Couldn't register %s interrupt\n", "int5");
- if (request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL))
+ if (request_irq(IRQ_AUTO_7, sun3_int7, 0, "nmi", NULL))
pr_err("Couldn't register %s interrupt\n", "int7");
if (request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL))
pr_err("Couldn't register %s interrupt\n", "vec255");
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 4bcf89148f3..ba3b7c8c04b 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -26,8 +26,6 @@ config MICROBLAZE
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select CLONE_BACKWARDS
config SWAP
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index eb3a46c096f..d3c51a6a601 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,6 +1,4 @@
-include include/asm-generic/Kbuild.asm
-header-y += elf.h
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 01d228286cb..46460f1c49c 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -114,6 +114,8 @@ static inline void __dma_sync(unsigned long paddr,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index 640ddd4b6a9..65902444906 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -7,119 +7,24 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#ifndef _ASM_MICROBLAZE_ELF_H
#define _ASM_MICROBLAZE_ELF_H
-/*
- * Note there is no "official" ELF designation for Microblaze.
- * I've snaffled the value from the microblaze binutils source code
- * /binutils/microblaze/include/elf/microblaze.h
- */
-#define EM_MICROBLAZE 189
-#define EM_MICROBLAZE_OLD 0xbaab
-#define ELF_ARCH EM_MICROBLAZE
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_MICROBLAZE \
- || (x)->e_machine == EM_MICROBLAZE_OLD)
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS ELFCLASS32
+#include <uapi/asm/elf.h>
#ifndef __uClinux__
-
-/*
- * ELF register definitions..
- */
-
-#include <asm/ptrace.h>
-#include <asm/byteorder.h>
-
#ifndef ELF_GREG_T
-#define ELF_GREG_T
-typedef unsigned long elf_greg_t;
#endif
-
#ifndef ELF_NGREG
-#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
#endif
-
#ifndef ELF_GREGSET_T
-#define ELF_GREGSET_T
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#endif
-
#ifndef ELF_FPREGSET_T
-#define ELF_FPREGSET_T
-
-/* TBD */
-#define ELF_NFPREG 33 /* includes fsr */
-typedef unsigned long elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/* typedef struct user_fpu_struct elf_fpregset_t; */
#endif
-
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader. We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
- */
-
-#define ELF_ET_DYN_BASE (0x08000000)
-
#ifdef __MICROBLAZEEL__
-#define ELF_DATA ELFDATA2LSB
#else
-#define ELF_DATA ELFDATA2MSB
#endif
-
-#define ELF_EXEC_PAGESIZE PAGE_SIZE
-
-
-#define ELF_CORE_COPY_REGS(_dest, _regs) \
- memcpy((char *) &_dest, (char *) _regs, \
- sizeof(struct pt_regs));
-
-/* This yields a mask that user programs can use to figure out what
- * instruction set this CPU supports. This could be done in user space,
- * but it's not easy, and we've already done it here.
- */
-#define ELF_HWCAP (0)
-
-/* This yields a string that ld.so will use to load implementation
- * specific libraries for optimization. This is more specific in
- * intent than poking at uname or /proc/cpuinfo.
-
- * For the moment, we have only optimizations for the Intel generations,
- * but that could change...
- */
-#define ELF_PLATFORM (NULL)
-
-/* Added _f parameter. Is this definition correct: TBD */
-#define ELF_PLAT_INIT(_r, _f) \
-do { \
- _r->r1 = _r->r1 = _r->r2 = _r->r3 = \
- _r->r4 = _r->r5 = _r->r6 = _r->r7 = \
- _r->r8 = _r->r9 = _r->r10 = _r->r11 = \
- _r->r12 = _r->r13 = _r->r14 = _r->r15 = \
- _r->r16 = _r->r17 = _r->r18 = _r->r19 = \
- _r->r20 = _r->r21 = _r->r22 = _r->r23 = \
- _r->r24 = _r->r25 = _r->r26 = _r->r27 = \
- _r->r28 = _r->r29 = _r->r30 = _r->r31 = \
- 0; \
-} while (0)
-
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
-#endif
-
#endif /* __uClinux__ */
-
#endif /* _ASM_MICROBLAZE_ELF_H */
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index af0144b91b7..b4a4cb150aa 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -29,6 +29,8 @@ DECLARE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
DECLARE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
+
+extern asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall);
# endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_ENTRY_H */
diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h
index 94e92c80585..5b18ec124e5 100644
--- a/arch/microblaze/include/asm/ptrace.h
+++ b/arch/microblaze/include/asm/ptrace.h
@@ -5,80 +5,23 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#ifndef _ASM_MICROBLAZE_PTRACE_H
#define _ASM_MICROBLAZE_PTRACE_H
-#ifndef __ASSEMBLY__
-
-typedef unsigned long microblaze_reg_t;
+#include <uapi/asm/ptrace.h>
-struct pt_regs {
- microblaze_reg_t r0;
- microblaze_reg_t r1;
- microblaze_reg_t r2;
- microblaze_reg_t r3;
- microblaze_reg_t r4;
- microblaze_reg_t r5;
- microblaze_reg_t r6;
- microblaze_reg_t r7;
- microblaze_reg_t r8;
- microblaze_reg_t r9;
- microblaze_reg_t r10;
- microblaze_reg_t r11;
- microblaze_reg_t r12;
- microblaze_reg_t r13;
- microblaze_reg_t r14;
- microblaze_reg_t r15;
- microblaze_reg_t r16;
- microblaze_reg_t r17;
- microblaze_reg_t r18;
- microblaze_reg_t r19;
- microblaze_reg_t r20;
- microblaze_reg_t r21;
- microblaze_reg_t r22;
- microblaze_reg_t r23;
- microblaze_reg_t r24;
- microblaze_reg_t r25;
- microblaze_reg_t r26;
- microblaze_reg_t r27;
- microblaze_reg_t r28;
- microblaze_reg_t r29;
- microblaze_reg_t r30;
- microblaze_reg_t r31;
- microblaze_reg_t pc;
- microblaze_reg_t msr;
- microblaze_reg_t ear;
- microblaze_reg_t esr;
- microblaze_reg_t fsr;
- int pt_mode;
-};
-
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#define kernel_mode(regs) ((regs)->pt_mode)
#define user_mode(regs) (!kernel_mode(regs))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->r1)
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->r3;
}
-#else /* __KERNEL__ */
-
-/* pt_regs offsets used by gdbserver etc in ptrace syscalls */
-#define PT_GPR(n) ((n) * sizeof(microblaze_reg_t))
-#define PT_PC (32 * sizeof(microblaze_reg_t))
-#define PT_MSR (33 * sizeof(microblaze_reg_t))
-#define PT_EAR (34 * sizeof(microblaze_reg_t))
-#define PT_ESR (35 * sizeof(microblaze_reg_t))
-#define PT_FSR (36 * sizeof(microblaze_reg_t))
-#define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t))
-
-#endif /* __KERNEL */
-
#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_MICROBLAZE_PTRACE_H */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 0061aa13a34..0e0b0a5ec75 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -7,15 +7,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#ifndef _ASM_MICROBLAZE_SETUP_H
#define _ASM_MICROBLAZE_SETUP_H
-#define COMMAND_LINE_SIZE 256
+#include <uapi/asm/setup.h>
# ifndef __ASSEMBLY__
-
-# ifdef __KERNEL__
extern unsigned int boot_cpuid; /* move to smp.h */
extern char cmd_line[COMMAND_LINE_SIZE];
@@ -53,6 +50,5 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end);
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
-# endif/* __KERNEL__ */
# endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_SETUP_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index ef25f7538d4..927540d3cb7 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -298,11 +298,10 @@ extern long __user_bad(void);
#define __put_user_check(x, ptr, size) \
({ \
- typeof(*(ptr)) __pu_val; \
+ typeof(*(ptr)) volatile __pu_val = x; \
typeof(*(ptr)) __user *__pu_addr = (ptr); \
int __pu_err = 0; \
\
- __pu_val = (x); \
if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
switch (size) { \
case 1: \
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 94d978986b7..a5f06ac9711 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -6,398 +6,11 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#ifndef _ASM_MICROBLAZE_UNISTD_H
#define _ASM_MICROBLAZE_UNISTD_H
-#define __NR_restart_syscall 0 /* ok */
-#define __NR_exit 1 /* ok */
-#define __NR_fork 2 /* not for no MMU - weird */
-#define __NR_read 3 /* ok */
-#define __NR_write 4 /* ok */
-#define __NR_open 5 /* openat */
-#define __NR_close 6 /* ok */
-#define __NR_waitpid 7 /* waitid */
-#define __NR_creat 8 /* openat */
-#define __NR_link 9 /* linkat */
-#define __NR_unlink 10 /* unlinkat */
-#define __NR_execve 11 /* ok */
-#define __NR_chdir 12 /* ok */
-#define __NR_time 13 /* obsolete -> sys_gettimeofday */
-#define __NR_mknod 14 /* mknodat */
-#define __NR_chmod 15 /* fchmodat */
-#define __NR_lchown 16 /* ok */
-#define __NR_break 17 /* don't know */
-#define __NR_oldstat 18 /* remove */
-#define __NR_lseek 19 /* ok */
-#define __NR_getpid 20 /* ok */
-#define __NR_mount 21 /* ok */
-#define __NR_umount 22 /* ok */ /* use only umount2 */
-#define __NR_setuid 23 /* ok */
-#define __NR_getuid 24 /* ok */
-#define __NR_stime 25 /* obsolete -> sys_settimeofday */
-#define __NR_ptrace 26 /* ok */
-#define __NR_alarm 27 /* obsolete -> sys_setitimer */
-#define __NR_oldfstat 28 /* remove */
-#define __NR_pause 29 /* obsolete -> sys_rt_sigtimedwait */
-#define __NR_utime 30 /* obsolete -> sys_utimesat */
-#define __NR_stty 31 /* remove */
-#define __NR_gtty 32 /* remove */
-#define __NR_access 33 /* faccessat */
-/* can be implemented by sys_setpriority */
-#define __NR_nice 34
-#define __NR_ftime 35 /* remove */
-#define __NR_sync 36 /* ok */
-#define __NR_kill 37 /* ok */
-#define __NR_rename 38 /* renameat */
-#define __NR_mkdir 39 /* mkdirat */
-#define __NR_rmdir 40 /* unlinkat */
-#define __NR_dup 41 /* ok */
-#define __NR_pipe 42 /* ok */
-#define __NR_times 43 /* ok */
-#define __NR_prof 44 /* remove */
-#define __NR_brk 45 /* ok -mmu, nommu specific */
-#define __NR_setgid 46 /* ok */
-#define __NR_getgid 47 /* ok */
-#define __NR_signal 48 /* obsolete -> sys_rt_sigaction */
-#define __NR_geteuid 49 /* ok */
-#define __NR_getegid 50 /* ok */
-#define __NR_acct 51 /* add it and then I can disable it */
-#define __NR_umount2 52 /* remove */
-#define __NR_lock 53 /* remove */
-#define __NR_ioctl 54 /* ok */
-#define __NR_fcntl 55 /* ok -> 64bit version*/
-#define __NR_mpx 56 /* remove */
-#define __NR_setpgid 57 /* ok */
-#define __NR_ulimit 58 /* remove */
-#define __NR_oldolduname 59 /* remove */
-#define __NR_umask 60 /* ok */
-#define __NR_chroot 61 /* ok */
-#define __NR_ustat 62 /* obsolete -> statfs64 */
-#define __NR_dup2 63 /* ok */
-#define __NR_getppid 64 /* ok */
-#define __NR_getpgrp 65 /* obsolete -> sys_getpgid */
-#define __NR_setsid 66 /* ok */
-#define __NR_sigaction 67 /* obsolete -> rt_sigaction */
-#define __NR_sgetmask 68 /* obsolete -> sys_rt_sigprocmask */
-#define __NR_ssetmask 69 /* obsolete ->sys_rt_sigprocmask */
-#define __NR_setreuid 70 /* ok */
-#define __NR_setregid 71 /* ok */
-#define __NR_sigsuspend 72 /* obsolete -> rt_sigsuspend */
-#define __NR_sigpending 73 /* obsolete -> sys_rt_sigpending */
-#define __NR_sethostname 74 /* ok */
-#define __NR_setrlimit 75 /* ok */
-#define __NR_getrlimit 76 /* ok Back compatible 2G limited rlimit */
-#define __NR_getrusage 77 /* ok */
-#define __NR_gettimeofday 78 /* ok */
-#define __NR_settimeofday 79 /* ok */
-#define __NR_getgroups 80 /* ok */
-#define __NR_setgroups 81 /* ok */
-#define __NR_select 82 /* obsolete -> sys_pselect7 */
-#define __NR_symlink 83 /* symlinkat */
-#define __NR_oldlstat 84 /* remove */
-#define __NR_readlink 85 /* obsolete -> sys_readlinkat */
-#define __NR_uselib 86 /* remove */
-#define __NR_swapon 87 /* ok */
-#define __NR_reboot 88 /* ok */
-#define __NR_readdir 89 /* remove ? */
-#define __NR_mmap 90 /* obsolete -> sys_mmap2 */
-#define __NR_munmap 91 /* ok - mmu and nommu */
-#define __NR_truncate 92 /* ok or truncate64 */
-#define __NR_ftruncate 93 /* ok or ftruncate64 */
-#define __NR_fchmod 94 /* ok */
-#define __NR_fchown 95 /* ok */
-#define __NR_getpriority 96 /* ok */
-#define __NR_setpriority 97 /* ok */
-#define __NR_profil 98 /* remove */
-#define __NR_statfs 99 /* ok or statfs64 */
-#define __NR_fstatfs 100 /* ok or fstatfs64 */
-#define __NR_ioperm 101 /* remove */
-#define __NR_socketcall 102 /* remove */
-#define __NR_syslog 103 /* ok */
-#define __NR_setitimer 104 /* ok */
-#define __NR_getitimer 105 /* ok */
-#define __NR_stat 106 /* remove */
-#define __NR_lstat 107 /* remove */
-#define __NR_fstat 108 /* remove */
-#define __NR_olduname 109 /* remove */
-#define __NR_iopl 110 /* remove */
-#define __NR_vhangup 111 /* ok */
-#define __NR_idle 112 /* remove */
-#define __NR_vm86old 113 /* remove */
-#define __NR_wait4 114 /* obsolete -> waitid */
-#define __NR_swapoff 115 /* ok */
-#define __NR_sysinfo 116 /* ok */
-#define __NR_ipc 117 /* remove - direct call */
-#define __NR_fsync 118 /* ok */
-#define __NR_sigreturn 119 /* obsolete -> sys_rt_sigreturn */
-#define __NR_clone 120 /* ok */
-#define __NR_setdomainname 121 /* ok */
-#define __NR_uname 122 /* remove */
-#define __NR_modify_ldt 123 /* remove */
-#define __NR_adjtimex 124 /* ok */
-#define __NR_mprotect 125 /* remove */
-#define __NR_sigprocmask 126 /* obsolete -> sys_rt_sigprocmask */
-#define __NR_create_module 127 /* remove */
-#define __NR_init_module 128 /* ok */
-#define __NR_delete_module 129 /* ok */
-#define __NR_get_kernel_syms 130 /* remove */
-#define __NR_quotactl 131 /* ok */
-#define __NR_getpgid 132 /* ok */
-#define __NR_fchdir 133 /* ok */
-#define __NR_bdflush 134 /* remove */
-#define __NR_sysfs 135 /* needed for busybox */
-#define __NR_personality 136 /* ok */
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138 /* ok */
-#define __NR_setfsgid 139 /* ok */
-#define __NR__llseek 140 /* remove only lseek */
-#define __NR_getdents 141 /* ok or getdents64 */
-#define __NR__newselect 142 /* remove */
-#define __NR_flock 143 /* ok */
-#define __NR_msync 144 /* remove */
-#define __NR_readv 145 /* ok */
-#define __NR_writev 146 /* ok */
-#define __NR_getsid 147 /* ok */
-#define __NR_fdatasync 148 /* ok */
-#define __NR__sysctl 149 /* remove */
-#define __NR_mlock 150 /* ok - nommu or mmu */
-#define __NR_munlock 151 /* ok - nommu or mmu */
-#define __NR_mlockall 152 /* ok - nommu or mmu */
-#define __NR_munlockall 153 /* ok - nommu or mmu */
-#define __NR_sched_setparam 154 /* ok */
-#define __NR_sched_getparam 155 /* ok */
-#define __NR_sched_setscheduler 156 /* ok */
-#define __NR_sched_getscheduler 157 /* ok */
-#define __NR_sched_yield 158 /* ok */
-#define __NR_sched_get_priority_max 159 /* ok */
-#define __NR_sched_get_priority_min 160 /* ok */
-#define __NR_sched_rr_get_interval 161 /* ok */
-#define __NR_nanosleep 162 /* ok */
-#define __NR_mremap 163 /* ok - nommu or mmu */
-#define __NR_setresuid 164 /* ok */
-#define __NR_getresuid 165 /* ok */
-#define __NR_vm86 166 /* remove */
-#define __NR_query_module 167 /* ok */
-#define __NR_poll 168 /* obsolete -> sys_ppoll */
-#define __NR_nfsservctl 169 /* ok */
-#define __NR_setresgid 170 /* ok */
-#define __NR_getresgid 171 /* ok */
-#define __NR_prctl 172 /* ok */
-#define __NR_rt_sigreturn 173 /* ok */
-#define __NR_rt_sigaction 174 /* ok */
-#define __NR_rt_sigprocmask 175 /* ok */
-#define __NR_rt_sigpending 176 /* ok */
-#define __NR_rt_sigtimedwait 177 /* ok */
-#define __NR_rt_sigqueueinfo 178 /* ok */
-#define __NR_rt_sigsuspend 179 /* ok */
-#define __NR_pread64 180 /* ok */
-#define __NR_pwrite64 181 /* ok */
-#define __NR_chown 182 /* obsolete -> fchownat */
-#define __NR_getcwd 183 /* ok */
-#define __NR_capget 184 /* ok */
-#define __NR_capset 185 /* ok */
-#define __NR_sigaltstack 186 /* remove */
-#define __NR_sendfile 187 /* ok -> exist 64bit version*/
-#define __NR_getpmsg 188 /* remove */
-/* remove - some people actually want streams */
-#define __NR_putpmsg 189
-/* for noMMU - group with clone -> maybe remove */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* remove - SuS compliant getrlimit */
-#define __NR_mmap2 192 /* ok */
-#define __NR_truncate64 193 /* ok */
-#define __NR_ftruncate64 194 /* ok */
-#define __NR_stat64 195 /* remove _ARCH_WANT_STAT64 */
-#define __NR_lstat64 196 /* remove _ARCH_WANT_STAT64 */
-#define __NR_fstat64 197 /* remove _ARCH_WANT_STAT64 */
-#define __NR_lchown32 198 /* ok - without 32 */
-#define __NR_getuid32 199 /* ok - without 32 */
-#define __NR_getgid32 200 /* ok - without 32 */
-#define __NR_geteuid32 201 /* ok - without 32 */
-#define __NR_getegid32 202 /* ok - without 32 */
-#define __NR_setreuid32 203 /* ok - without 32 */
-#define __NR_setregid32 204 /* ok - without 32 */
-#define __NR_getgroups32 205 /* ok - without 32 */
-#define __NR_setgroups32 206 /* ok - without 32 */
-#define __NR_fchown32 207 /* ok - without 32 */
-#define __NR_setresuid32 208 /* ok - without 32 */
-#define __NR_getresuid32 209 /* ok - without 32 */
-#define __NR_setresgid32 210 /* ok - without 32 */
-#define __NR_getresgid32 211 /* ok - without 32 */
-#define __NR_chown32 212 /* ok - without 32 -obsolete -> fchownat */
-#define __NR_setuid32 213 /* ok - without 32 */
-#define __NR_setgid32 214 /* ok - without 32 */
-#define __NR_setfsuid32 215 /* ok - without 32 */
-#define __NR_setfsgid32 216 /* ok - without 32 */
-#define __NR_pivot_root 217 /* ok */
-#define __NR_mincore 218 /* ok */
-#define __NR_madvise 219 /* ok */
-#define __NR_getdents64 220 /* ok */
-#define __NR_fcntl64 221 /* ok */
-/* 223 is unused */
-#define __NR_gettid 224 /* ok */
-#define __NR_readahead 225 /* ok */
-#define __NR_setxattr 226 /* ok */
-#define __NR_lsetxattr 227 /* ok */
-#define __NR_fsetxattr 228 /* ok */
-#define __NR_getxattr 229 /* ok */
-#define __NR_lgetxattr 230 /* ok */
-#define __NR_fgetxattr 231 /* ok */
-#define __NR_listxattr 232 /* ok */
-#define __NR_llistxattr 233 /* ok */
-#define __NR_flistxattr 234 /* ok */
-#define __NR_removexattr 235 /* ok */
-#define __NR_lremovexattr 236 /* ok */
-#define __NR_fremovexattr 237 /* ok */
-#define __NR_tkill 238 /* ok */
-#define __NR_sendfile64 239 /* ok */
-#define __NR_futex 240 /* ok */
-#define __NR_sched_setaffinity 241 /* ok */
-#define __NR_sched_getaffinity 242 /* ok */
-#define __NR_set_thread_area 243 /* remove */
-#define __NR_get_thread_area 244 /* remove */
-#define __NR_io_setup 245 /* ok */
-#define __NR_io_destroy 246 /* ok */
-#define __NR_io_getevents 247 /* ok */
-#define __NR_io_submit 248 /* ok */
-#define __NR_io_cancel 249 /* ok */
-#define __NR_fadvise64 250 /* remove -> sys_fadvise64_64 */
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group 252 /* ok */
-#define __NR_lookup_dcookie 253 /* ok */
-#define __NR_epoll_create 254 /* ok */
-#define __NR_epoll_ctl 255 /* ok */
-#define __NR_epoll_wait 256 /* obsolete -> sys_epoll_pwait */
-#define __NR_remap_file_pages 257 /* only for mmu */
-#define __NR_set_tid_address 258 /* ok */
-#define __NR_timer_create 259 /* ok */
-#define __NR_timer_settime (__NR_timer_create+1) /* 260 */ /* ok */
-#define __NR_timer_gettime (__NR_timer_create+2) /* 261 */ /* ok */
-#define __NR_timer_getoverrun (__NR_timer_create+3) /* 262 */ /* ok */
-#define __NR_timer_delete (__NR_timer_create+4) /* 263 */ /* ok */
-#define __NR_clock_settime (__NR_timer_create+5) /* 264 */ /* ok */
-#define __NR_clock_gettime (__NR_timer_create+6) /* 265 */ /* ok */
-#define __NR_clock_getres (__NR_timer_create+7) /* 266 */ /* ok */
-#define __NR_clock_nanosleep (__NR_timer_create+8) /* 267 */ /* ok */
-#define __NR_statfs64 268 /* ok */
-#define __NR_fstatfs64 269 /* ok */
-#define __NR_tgkill 270 /* ok */
-#define __NR_utimes 271 /* obsolete -> sys_futimesat */
-#define __NR_fadvise64_64 272 /* ok */
-#define __NR_vserver 273 /* ok */
-#define __NR_mbind 274 /* only for mmu */
-#define __NR_get_mempolicy 275 /* only for mmu */
-#define __NR_set_mempolicy 276 /* only for mmu */
-#define __NR_mq_open 277 /* ok */
-#define __NR_mq_unlink (__NR_mq_open+1) /* 278 */ /* ok */
-#define __NR_mq_timedsend (__NR_mq_open+2) /* 279 */ /* ok */
-#define __NR_mq_timedreceive (__NR_mq_open+3) /* 280 */ /* ok */
-#define __NR_mq_notify (__NR_mq_open+4) /* 281 */ /* ok */
-#define __NR_mq_getsetattr (__NR_mq_open+5) /* 282 */ /* ok */
-#define __NR_kexec_load 283 /* ok */
-#define __NR_waitid 284 /* ok */
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key 286 /* ok */
-#define __NR_request_key 287 /* ok */
-#define __NR_keyctl 288 /* ok */
-#define __NR_ioprio_set 289 /* ok */
-#define __NR_ioprio_get 290 /* ok */
-#define __NR_inotify_init 291 /* ok */
-#define __NR_inotify_add_watch 292 /* ok */
-#define __NR_inotify_rm_watch 293 /* ok */
-#define __NR_migrate_pages 294 /* mmu */
-#define __NR_openat 295 /* ok */
-#define __NR_mkdirat 296 /* ok */
-#define __NR_mknodat 297 /* ok */
-#define __NR_fchownat 298 /* ok */
-#define __NR_futimesat 299 /* obsolete -> sys_utimesat */
-#define __NR_fstatat64 300 /* stat64 */
-#define __NR_unlinkat 301 /* ok */
-#define __NR_renameat 302 /* ok */
-#define __NR_linkat 303 /* ok */
-#define __NR_symlinkat 304 /* ok */
-#define __NR_readlinkat 305 /* ok */
-#define __NR_fchmodat 306 /* ok */
-#define __NR_faccessat 307 /* ok */
-#define __NR_pselect6 308 /* obsolete -> sys_pselect7 */
-#define __NR_ppoll 309 /* ok */
-#define __NR_unshare 310 /* ok */
-#define __NR_set_robust_list 311 /* ok */
-#define __NR_get_robust_list 312 /* ok */
-#define __NR_splice 313 /* ok */
-#define __NR_sync_file_range 314 /* ok */
-#define __NR_tee 315 /* ok */
-#define __NR_vmsplice 316 /* ok */
-#define __NR_move_pages 317 /* mmu */
-#define __NR_getcpu 318 /* ok */
-#define __NR_epoll_pwait 319 /* ok */
-#define __NR_utimensat 320 /* ok */
-#define __NR_signalfd 321 /* ok */
-#define __NR_timerfd_create 322 /* ok */
-#define __NR_eventfd 323 /* ok */
-#define __NR_fallocate 324 /* ok */
-#define __NR_semtimedop 325 /* ok - semaphore group */
-#define __NR_timerfd_settime 326 /* ok */
-#define __NR_timerfd_gettime 327 /* ok */
-/* sysv ipc syscalls */
-#define __NR_semctl 328 /* ok */
-#define __NR_semget 329 /* ok */
-#define __NR_semop 330 /* ok */
-#define __NR_msgctl 331 /* ok */
-#define __NR_msgget 332 /* ok */
-#define __NR_msgrcv 333 /* ok */
-#define __NR_msgsnd 334 /* ok */
-#define __NR_shmat 335 /* ok */
-#define __NR_shmctl 336 /* ok */
-#define __NR_shmdt 337 /* ok */
-#define __NR_shmget 338 /* ok */
-
-
-#define __NR_signalfd4 339 /* new */
-#define __NR_eventfd2 340 /* new */
-#define __NR_epoll_create1 341 /* new */
-#define __NR_dup3 342 /* new */
-#define __NR_pipe2 343 /* new */
-#define __NR_inotify_init1 344 /* new */
-#define __NR_socket 345 /* new */
-#define __NR_socketpair 346 /* new */
-#define __NR_bind 347 /* new */
-#define __NR_listen 348 /* new */
-#define __NR_accept 349 /* new */
-#define __NR_connect 350 /* new */
-#define __NR_getsockname 351 /* new */
-#define __NR_getpeername 352 /* new */
-#define __NR_sendto 353 /* new */
-#define __NR_send 354 /* new */
-#define __NR_recvfrom 355 /* new */
-#define __NR_recv 356 /* new */
-#define __NR_setsockopt 357 /* new */
-#define __NR_getsockopt 358 /* new */
-#define __NR_shutdown 359 /* new */
-#define __NR_sendmsg 360 /* new */
-#define __NR_recvmsg 361 /* new */
-#define __NR_accept4 362 /* new */
-#define __NR_preadv 363 /* new */
-#define __NR_pwritev 364 /* new */
-#define __NR_rt_tgsigqueueinfo 365 /* new */
-#define __NR_perf_event_open 366 /* new */
-#define __NR_recvmmsg 367 /* new */
-#define __NR_fanotify_init 368
-#define __NR_fanotify_mark 369
-#define __NR_prlimit64 370
-#define __NR_name_to_handle_at 371
-#define __NR_open_by_handle_at 372
-#define __NR_clock_adjtime 373
-#define __NR_syncfs 374
-#define __NR_setns 375
-#define __NR_sendmmsg 376
-#define __NR_process_vm_readv 377
-#define __NR_process_vm_writev 378
-
-#define __NR_syscalls 379
+#include <uapi/asm/unistd.h>
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/* #define __ARCH_WANT_OLD_READDIR */
@@ -422,7 +35,6 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
#ifdef CONFIG_MMU
@@ -438,5 +50,4 @@
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index baebb3da1d4..6d7d7f4aaae 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -1,3 +1,35 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += byteorder.h
+header-y += elf.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += kvm_para.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += unistd.h
diff --git a/arch/microblaze/include/asm/auxvec.h b/arch/microblaze/include/uapi/asm/auxvec.h
index 8b137891791..8b137891791 100644
--- a/arch/microblaze/include/asm/auxvec.h
+++ b/arch/microblaze/include/uapi/asm/auxvec.h
diff --git a/arch/microblaze/include/asm/bitsperlong.h b/arch/microblaze/include/uapi/asm/bitsperlong.h
index 6dc0bb0c13b..6dc0bb0c13b 100644
--- a/arch/microblaze/include/asm/bitsperlong.h
+++ b/arch/microblaze/include/uapi/asm/bitsperlong.h
diff --git a/arch/microblaze/include/asm/byteorder.h b/arch/microblaze/include/uapi/asm/byteorder.h
index 31902762a42..31902762a42 100644
--- a/arch/microblaze/include/asm/byteorder.h
+++ b/arch/microblaze/include/uapi/asm/byteorder.h
diff --git a/arch/microblaze/include/uapi/asm/elf.h b/arch/microblaze/include/uapi/asm/elf.h
new file mode 100644
index 00000000000..be1731d5e2f
--- /dev/null
+++ b/arch/microblaze/include/uapi/asm/elf.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_MICROBLAZE_ELF_H
+#define _UAPI_ASM_MICROBLAZE_ELF_H
+
+/*
+ * Note there is no "official" ELF designation for Microblaze.
+ * I've snaffled the value from the microblaze binutils source code
+ * /binutils/microblaze/include/elf/microblaze.h
+ */
+#define EM_MICROBLAZE 189
+#define EM_MICROBLAZE_OLD 0xbaab
+#define ELF_ARCH EM_MICROBLAZE
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_MICROBLAZE \
+ || (x)->e_machine == EM_MICROBLAZE_OLD)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+
+#ifndef __uClinux__
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+#include <asm/byteorder.h>
+
+#ifndef ELF_GREG_T
+#define ELF_GREG_T
+typedef unsigned long elf_greg_t;
+#endif
+
+#ifndef ELF_NGREG
+#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
+#endif
+
+#ifndef ELF_GREGSET_T
+#define ELF_GREGSET_T
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+#endif
+
+#ifndef ELF_FPREGSET_T
+#define ELF_FPREGSET_T
+
+/* TBD */
+#define ELF_NFPREG 33 /* includes fsr */
+typedef unsigned long elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/* typedef struct user_fpu_struct elf_fpregset_t; */
+#endif
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (0x08000000)
+
+#ifdef __MICROBLAZEEL__
+#define ELF_DATA ELFDATA2LSB
+#else
+#define ELF_DATA ELFDATA2MSB
+#endif
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+
+#define ELF_CORE_COPY_REGS(_dest, _regs) \
+ memcpy((char *) &_dest, (char *) _regs, \
+ sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+
+ * For the moment, we have only optimizations for the Intel generations,
+ * but that could change...
+ */
+#define ELF_PLATFORM (NULL)
+
+/* Added _f parameter. Is this definition correct: TBD */
+#define ELF_PLAT_INIT(_r, _f) \
+do { \
+ _r->r0 = _r->r1 = _r->r2 = _r->r3 = \
+ _r->r4 = _r->r5 = _r->r6 = _r->r7 = \
+ _r->r8 = _r->r9 = _r->r10 = _r->r11 = \
+ _r->r12 = _r->r13 = _r->r14 = _r->r15 = \
+ _r->r16 = _r->r17 = _r->r18 = _r->r19 = \
+ _r->r20 = _r->r21 = _r->r22 = _r->r23 = \
+ _r->r24 = _r->r25 = _r->r26 = _r->r27 = \
+ _r->r28 = _r->r29 = _r->r30 = _r->r31 = \
+ 0; \
+} while (0)
+
+
+#endif /* __uClinux__ */
+
+#endif /* _UAPI_ASM_MICROBLAZE_ELF_H */
diff --git a/arch/microblaze/include/asm/errno.h b/arch/microblaze/include/uapi/asm/errno.h
index 4c82b503d92..4c82b503d92 100644
--- a/arch/microblaze/include/asm/errno.h
+++ b/arch/microblaze/include/uapi/asm/errno.h
diff --git a/arch/microblaze/include/asm/fcntl.h b/arch/microblaze/include/uapi/asm/fcntl.h
index 46ab12db573..46ab12db573 100644
--- a/arch/microblaze/include/asm/fcntl.h
+++ b/arch/microblaze/include/uapi/asm/fcntl.h
diff --git a/arch/microblaze/include/asm/ioctl.h b/arch/microblaze/include/uapi/asm/ioctl.h
index b279fe06dfe..b279fe06dfe 100644
--- a/arch/microblaze/include/asm/ioctl.h
+++ b/arch/microblaze/include/uapi/asm/ioctl.h
diff --git a/arch/microblaze/include/asm/ioctls.h b/arch/microblaze/include/uapi/asm/ioctls.h
index ec34c760665..ec34c760665 100644
--- a/arch/microblaze/include/asm/ioctls.h
+++ b/arch/microblaze/include/uapi/asm/ioctls.h
diff --git a/arch/microblaze/include/asm/ipcbuf.h b/arch/microblaze/include/uapi/asm/ipcbuf.h
index 84c7e51cb6d..84c7e51cb6d 100644
--- a/arch/microblaze/include/asm/ipcbuf.h
+++ b/arch/microblaze/include/uapi/asm/ipcbuf.h
diff --git a/arch/h8300/include/asm/kvm_para.h b/arch/microblaze/include/uapi/asm/kvm_para.h
index 14fab8f0b95..14fab8f0b95 100644
--- a/arch/h8300/include/asm/kvm_para.h
+++ b/arch/microblaze/include/uapi/asm/kvm_para.h
diff --git a/arch/microblaze/include/asm/mman.h b/arch/microblaze/include/uapi/asm/mman.h
index 8eebf89f5ab..8eebf89f5ab 100644
--- a/arch/microblaze/include/asm/mman.h
+++ b/arch/microblaze/include/uapi/asm/mman.h
diff --git a/arch/microblaze/include/asm/msgbuf.h b/arch/microblaze/include/uapi/asm/msgbuf.h
index 809134c644a..809134c644a 100644
--- a/arch/microblaze/include/asm/msgbuf.h
+++ b/arch/microblaze/include/uapi/asm/msgbuf.h
diff --git a/arch/microblaze/include/asm/param.h b/arch/microblaze/include/uapi/asm/param.h
index 965d4542797..965d4542797 100644
--- a/arch/microblaze/include/asm/param.h
+++ b/arch/microblaze/include/uapi/asm/param.h
diff --git a/arch/microblaze/include/asm/poll.h b/arch/microblaze/include/uapi/asm/poll.h
index c98509d3149..c98509d3149 100644
--- a/arch/microblaze/include/asm/poll.h
+++ b/arch/microblaze/include/uapi/asm/poll.h
diff --git a/arch/microblaze/include/asm/posix_types.h b/arch/microblaze/include/uapi/asm/posix_types.h
index 0e15039673e..0e15039673e 100644
--- a/arch/microblaze/include/asm/posix_types.h
+++ b/arch/microblaze/include/uapi/asm/posix_types.h
diff --git a/arch/microblaze/include/uapi/asm/ptrace.h b/arch/microblaze/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..d31238a5f94
--- /dev/null
+++ b/arch/microblaze/include/uapi/asm/ptrace.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_MICROBLAZE_PTRACE_H
+#define _UAPI_ASM_MICROBLAZE_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long microblaze_reg_t;
+
+struct pt_regs {
+ microblaze_reg_t r0;
+ microblaze_reg_t r1;
+ microblaze_reg_t r2;
+ microblaze_reg_t r3;
+ microblaze_reg_t r4;
+ microblaze_reg_t r5;
+ microblaze_reg_t r6;
+ microblaze_reg_t r7;
+ microblaze_reg_t r8;
+ microblaze_reg_t r9;
+ microblaze_reg_t r10;
+ microblaze_reg_t r11;
+ microblaze_reg_t r12;
+ microblaze_reg_t r13;
+ microblaze_reg_t r14;
+ microblaze_reg_t r15;
+ microblaze_reg_t r16;
+ microblaze_reg_t r17;
+ microblaze_reg_t r18;
+ microblaze_reg_t r19;
+ microblaze_reg_t r20;
+ microblaze_reg_t r21;
+ microblaze_reg_t r22;
+ microblaze_reg_t r23;
+ microblaze_reg_t r24;
+ microblaze_reg_t r25;
+ microblaze_reg_t r26;
+ microblaze_reg_t r27;
+ microblaze_reg_t r28;
+ microblaze_reg_t r29;
+ microblaze_reg_t r30;
+ microblaze_reg_t r31;
+ microblaze_reg_t pc;
+ microblaze_reg_t msr;
+ microblaze_reg_t ear;
+ microblaze_reg_t esr;
+ microblaze_reg_t fsr;
+ int pt_mode;
+};
+
+#ifndef __KERNEL__
+
+/* pt_regs offsets used by gdbserver etc in ptrace syscalls */
+#define PT_GPR(n) ((n) * sizeof(microblaze_reg_t))
+#define PT_PC (32 * sizeof(microblaze_reg_t))
+#define PT_MSR (33 * sizeof(microblaze_reg_t))
+#define PT_EAR (34 * sizeof(microblaze_reg_t))
+#define PT_ESR (35 * sizeof(microblaze_reg_t))
+#define PT_FSR (36 * sizeof(microblaze_reg_t))
+#define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t))
+
+#endif /* __KERNEL */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_MICROBLAZE_PTRACE_H */
diff --git a/arch/microblaze/include/asm/resource.h b/arch/microblaze/include/uapi/asm/resource.h
index 04bc4db8921..04bc4db8921 100644
--- a/arch/microblaze/include/asm/resource.h
+++ b/arch/microblaze/include/uapi/asm/resource.h
diff --git a/arch/microblaze/include/asm/sembuf.h b/arch/microblaze/include/uapi/asm/sembuf.h
index 7673b83cfef..7673b83cfef 100644
--- a/arch/microblaze/include/asm/sembuf.h
+++ b/arch/microblaze/include/uapi/asm/sembuf.h
diff --git a/arch/microblaze/include/uapi/asm/setup.h b/arch/microblaze/include/uapi/asm/setup.h
new file mode 100644
index 00000000000..76bc2acee6a
--- /dev/null
+++ b/arch/microblaze/include/uapi/asm/setup.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_MICROBLAZE_SETUP_H
+#define _UAPI_ASM_MICROBLAZE_SETUP_H
+
+#define COMMAND_LINE_SIZE 256
+
+# ifndef __ASSEMBLY__
+
+# endif /* __ASSEMBLY__ */
+#endif /* _UAPI_ASM_MICROBLAZE_SETUP_H */
diff --git a/arch/microblaze/include/asm/shmbuf.h b/arch/microblaze/include/uapi/asm/shmbuf.h
index 83c05fc2de3..83c05fc2de3 100644
--- a/arch/microblaze/include/asm/shmbuf.h
+++ b/arch/microblaze/include/uapi/asm/shmbuf.h
diff --git a/arch/microblaze/include/asm/sigcontext.h b/arch/microblaze/include/uapi/asm/sigcontext.h
index 55873c80c91..55873c80c91 100644
--- a/arch/microblaze/include/asm/sigcontext.h
+++ b/arch/microblaze/include/uapi/asm/sigcontext.h
diff --git a/arch/microblaze/include/asm/siginfo.h b/arch/microblaze/include/uapi/asm/siginfo.h
index 0815d29d82e..0815d29d82e 100644
--- a/arch/microblaze/include/asm/siginfo.h
+++ b/arch/microblaze/include/uapi/asm/siginfo.h
diff --git a/arch/microblaze/include/asm/signal.h b/arch/microblaze/include/uapi/asm/signal.h
index 7b1573ce19d..7b1573ce19d 100644
--- a/arch/microblaze/include/asm/signal.h
+++ b/arch/microblaze/include/uapi/asm/signal.h
diff --git a/arch/microblaze/include/asm/socket.h b/arch/microblaze/include/uapi/asm/socket.h
index 6b71384b9d8..6b71384b9d8 100644
--- a/arch/microblaze/include/asm/socket.h
+++ b/arch/microblaze/include/uapi/asm/socket.h
diff --git a/arch/microblaze/include/asm/sockios.h b/arch/microblaze/include/uapi/asm/sockios.h
index def6d4746ee..def6d4746ee 100644
--- a/arch/microblaze/include/asm/sockios.h
+++ b/arch/microblaze/include/uapi/asm/sockios.h
diff --git a/arch/microblaze/include/asm/stat.h b/arch/microblaze/include/uapi/asm/stat.h
index 3dc90fa92c7..3dc90fa92c7 100644
--- a/arch/microblaze/include/asm/stat.h
+++ b/arch/microblaze/include/uapi/asm/stat.h
diff --git a/arch/microblaze/include/asm/statfs.h b/arch/microblaze/include/uapi/asm/statfs.h
index 0b91fe198c2..0b91fe198c2 100644
--- a/arch/microblaze/include/asm/statfs.h
+++ b/arch/microblaze/include/uapi/asm/statfs.h
diff --git a/arch/microblaze/include/asm/swab.h b/arch/microblaze/include/uapi/asm/swab.h
index 7847e563ab6..7847e563ab6 100644
--- a/arch/microblaze/include/asm/swab.h
+++ b/arch/microblaze/include/uapi/asm/swab.h
diff --git a/arch/microblaze/include/asm/termbits.h b/arch/microblaze/include/uapi/asm/termbits.h
index 3935b106de7..3935b106de7 100644
--- a/arch/microblaze/include/asm/termbits.h
+++ b/arch/microblaze/include/uapi/asm/termbits.h
diff --git a/arch/microblaze/include/asm/termios.h b/arch/microblaze/include/uapi/asm/termios.h
index 280d78a9d96..280d78a9d96 100644
--- a/arch/microblaze/include/asm/termios.h
+++ b/arch/microblaze/include/uapi/asm/termios.h
diff --git a/arch/microblaze/include/asm/types.h b/arch/microblaze/include/uapi/asm/types.h
index b9e79bc580d..b9e79bc580d 100644
--- a/arch/microblaze/include/asm/types.h
+++ b/arch/microblaze/include/uapi/asm/types.h
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..ccb6920f3b3
--- /dev/null
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _UAPI_ASM_MICROBLAZE_UNISTD_H
+#define _UAPI_ASM_MICROBLAZE_UNISTD_H
+
+#define __NR_restart_syscall 0 /* ok */
+#define __NR_exit 1 /* ok */
+#define __NR_fork 2 /* not for no MMU - weird */
+#define __NR_read 3 /* ok */
+#define __NR_write 4 /* ok */
+#define __NR_open 5 /* openat */
+#define __NR_close 6 /* ok */
+#define __NR_waitpid 7 /* waitid */
+#define __NR_creat 8 /* openat */
+#define __NR_link 9 /* linkat */
+#define __NR_unlink 10 /* unlinkat */
+#define __NR_execve 11 /* ok */
+#define __NR_chdir 12 /* ok */
+#define __NR_time 13 /* obsolete -> sys_gettimeofday */
+#define __NR_mknod 14 /* mknodat */
+#define __NR_chmod 15 /* fchmodat */
+#define __NR_lchown 16 /* ok */
+#define __NR_break 17 /* don't know */
+#define __NR_oldstat 18 /* remove */
+#define __NR_lseek 19 /* ok */
+#define __NR_getpid 20 /* ok */
+#define __NR_mount 21 /* ok */
+#define __NR_umount 22 /* ok */ /* use only umount2 */
+#define __NR_setuid 23 /* ok */
+#define __NR_getuid 24 /* ok */
+#define __NR_stime 25 /* obsolete -> sys_settimeofday */
+#define __NR_ptrace 26 /* ok */
+#define __NR_alarm 27 /* obsolete -> sys_setitimer */
+#define __NR_oldfstat 28 /* remove */
+#define __NR_pause 29 /* obsolete -> sys_rt_sigtimedwait */
+#define __NR_utime 30 /* obsolete -> sys_utimesat */
+#define __NR_stty 31 /* remove */
+#define __NR_gtty 32 /* remove */
+#define __NR_access 33 /* faccessat */
+/* can be implemented by sys_setpriority */
+#define __NR_nice 34
+#define __NR_ftime 35 /* remove */
+#define __NR_sync 36 /* ok */
+#define __NR_kill 37 /* ok */
+#define __NR_rename 38 /* renameat */
+#define __NR_mkdir 39 /* mkdirat */
+#define __NR_rmdir 40 /* unlinkat */
+#define __NR_dup 41 /* ok */
+#define __NR_pipe 42 /* ok */
+#define __NR_times 43 /* ok */
+#define __NR_prof 44 /* remove */
+#define __NR_brk 45 /* ok -mmu, nommu specific */
+#define __NR_setgid 46 /* ok */
+#define __NR_getgid 47 /* ok */
+#define __NR_signal 48 /* obsolete -> sys_rt_sigaction */
+#define __NR_geteuid 49 /* ok */
+#define __NR_getegid 50 /* ok */
+#define __NR_acct 51 /* add it and then I can disable it */
+#define __NR_umount2 52 /* remove */
+#define __NR_lock 53 /* remove */
+#define __NR_ioctl 54 /* ok */
+#define __NR_fcntl 55 /* ok -> 64bit version*/
+#define __NR_mpx 56 /* remove */
+#define __NR_setpgid 57 /* ok */
+#define __NR_ulimit 58 /* remove */
+#define __NR_oldolduname 59 /* remove */
+#define __NR_umask 60 /* ok */
+#define __NR_chroot 61 /* ok */
+#define __NR_ustat 62 /* obsolete -> statfs64 */
+#define __NR_dup2 63 /* ok */
+#define __NR_getppid 64 /* ok */
+#define __NR_getpgrp 65 /* obsolete -> sys_getpgid */
+#define __NR_setsid 66 /* ok */
+#define __NR_sigaction 67 /* obsolete -> rt_sigaction */
+#define __NR_sgetmask 68 /* obsolete -> sys_rt_sigprocmask */
+#define __NR_ssetmask 69 /* obsolete ->sys_rt_sigprocmask */
+#define __NR_setreuid 70 /* ok */
+#define __NR_setregid 71 /* ok */
+#define __NR_sigsuspend 72 /* obsolete -> rt_sigsuspend */
+#define __NR_sigpending 73 /* obsolete -> sys_rt_sigpending */
+#define __NR_sethostname 74 /* ok */
+#define __NR_setrlimit 75 /* ok */
+#define __NR_getrlimit 76 /* ok Back compatible 2G limited rlimit */
+#define __NR_getrusage 77 /* ok */
+#define __NR_gettimeofday 78 /* ok */
+#define __NR_settimeofday 79 /* ok */
+#define __NR_getgroups 80 /* ok */
+#define __NR_setgroups 81 /* ok */
+#define __NR_select 82 /* obsolete -> sys_pselect7 */
+#define __NR_symlink 83 /* symlinkat */
+#define __NR_oldlstat 84 /* remove */
+#define __NR_readlink 85 /* obsolete -> sys_readlinkat */
+#define __NR_uselib 86 /* remove */
+#define __NR_swapon 87 /* ok */
+#define __NR_reboot 88 /* ok */
+#define __NR_readdir 89 /* remove ? */
+#define __NR_mmap 90 /* obsolete -> sys_mmap2 */
+#define __NR_munmap 91 /* ok - mmu and nommu */
+#define __NR_truncate 92 /* ok or truncate64 */
+#define __NR_ftruncate 93 /* ok or ftruncate64 */
+#define __NR_fchmod 94 /* ok */
+#define __NR_fchown 95 /* ok */
+#define __NR_getpriority 96 /* ok */
+#define __NR_setpriority 97 /* ok */
+#define __NR_profil 98 /* remove */
+#define __NR_statfs 99 /* ok or statfs64 */
+#define __NR_fstatfs 100 /* ok or fstatfs64 */
+#define __NR_ioperm 101 /* remove */
+#define __NR_socketcall 102 /* remove */
+#define __NR_syslog 103 /* ok */
+#define __NR_setitimer 104 /* ok */
+#define __NR_getitimer 105 /* ok */
+#define __NR_stat 106 /* remove */
+#define __NR_lstat 107 /* remove */
+#define __NR_fstat 108 /* remove */
+#define __NR_olduname 109 /* remove */
+#define __NR_iopl 110 /* remove */
+#define __NR_vhangup 111 /* ok */
+#define __NR_idle 112 /* remove */
+#define __NR_vm86old 113 /* remove */
+#define __NR_wait4 114 /* obsolete -> waitid */
+#define __NR_swapoff 115 /* ok */
+#define __NR_sysinfo 116 /* ok */
+#define __NR_ipc 117 /* remove - direct call */
+#define __NR_fsync 118 /* ok */
+#define __NR_sigreturn 119 /* obsolete -> sys_rt_sigreturn */
+#define __NR_clone 120 /* ok */
+#define __NR_setdomainname 121 /* ok */
+#define __NR_uname 122 /* remove */
+#define __NR_modify_ldt 123 /* remove */
+#define __NR_adjtimex 124 /* ok */
+#define __NR_mprotect 125 /* remove */
+#define __NR_sigprocmask 126 /* obsolete -> sys_rt_sigprocmask */
+#define __NR_create_module 127 /* remove */
+#define __NR_init_module 128 /* ok */
+#define __NR_delete_module 129 /* ok */
+#define __NR_get_kernel_syms 130 /* remove */
+#define __NR_quotactl 131 /* ok */
+#define __NR_getpgid 132 /* ok */
+#define __NR_fchdir 133 /* ok */
+#define __NR_bdflush 134 /* remove */
+#define __NR_sysfs 135 /* needed for busybox */
+#define __NR_personality 136 /* ok */
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138 /* ok */
+#define __NR_setfsgid 139 /* ok */
+#define __NR__llseek 140 /* remove only lseek */
+#define __NR_getdents 141 /* ok or getdents64 */
+#define __NR__newselect 142 /* remove */
+#define __NR_flock 143 /* ok */
+#define __NR_msync 144 /* remove */
+#define __NR_readv 145 /* ok */
+#define __NR_writev 146 /* ok */
+#define __NR_getsid 147 /* ok */
+#define __NR_fdatasync 148 /* ok */
+#define __NR__sysctl 149 /* remove */
+#define __NR_mlock 150 /* ok - nommu or mmu */
+#define __NR_munlock 151 /* ok - nommu or mmu */
+#define __NR_mlockall 152 /* ok - nommu or mmu */
+#define __NR_munlockall 153 /* ok - nommu or mmu */
+#define __NR_sched_setparam 154 /* ok */
+#define __NR_sched_getparam 155 /* ok */
+#define __NR_sched_setscheduler 156 /* ok */
+#define __NR_sched_getscheduler 157 /* ok */
+#define __NR_sched_yield 158 /* ok */
+#define __NR_sched_get_priority_max 159 /* ok */
+#define __NR_sched_get_priority_min 160 /* ok */
+#define __NR_sched_rr_get_interval 161 /* ok */
+#define __NR_nanosleep 162 /* ok */
+#define __NR_mremap 163 /* ok - nommu or mmu */
+#define __NR_setresuid 164 /* ok */
+#define __NR_getresuid 165 /* ok */
+#define __NR_vm86 166 /* remove */
+#define __NR_query_module 167 /* ok */
+#define __NR_poll 168 /* obsolete -> sys_ppoll */
+#define __NR_nfsservctl 169 /* ok */
+#define __NR_setresgid 170 /* ok */
+#define __NR_getresgid 171 /* ok */
+#define __NR_prctl 172 /* ok */
+#define __NR_rt_sigreturn 173 /* ok */
+#define __NR_rt_sigaction 174 /* ok */
+#define __NR_rt_sigprocmask 175 /* ok */
+#define __NR_rt_sigpending 176 /* ok */
+#define __NR_rt_sigtimedwait 177 /* ok */
+#define __NR_rt_sigqueueinfo 178 /* ok */
+#define __NR_rt_sigsuspend 179 /* ok */
+#define __NR_pread64 180 /* ok */
+#define __NR_pwrite64 181 /* ok */
+#define __NR_chown 182 /* obsolete -> fchownat */
+#define __NR_getcwd 183 /* ok */
+#define __NR_capget 184 /* ok */
+#define __NR_capset 185 /* ok */
+#define __NR_sigaltstack 186 /* remove */
+#define __NR_sendfile 187 /* ok -> exist 64bit version*/
+#define __NR_getpmsg 188 /* remove */
+/* remove - some people actually want streams */
+#define __NR_putpmsg 189
+/* for noMMU - group with clone -> maybe remove */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* remove - SuS compliant getrlimit */
+#define __NR_mmap2 192 /* ok */
+#define __NR_truncate64 193 /* ok */
+#define __NR_ftruncate64 194 /* ok */
+#define __NR_stat64 195 /* remove _ARCH_WANT_STAT64 */
+#define __NR_lstat64 196 /* remove _ARCH_WANT_STAT64 */
+#define __NR_fstat64 197 /* remove _ARCH_WANT_STAT64 */
+#define __NR_lchown32 198 /* ok - without 32 */
+#define __NR_getuid32 199 /* ok - without 32 */
+#define __NR_getgid32 200 /* ok - without 32 */
+#define __NR_geteuid32 201 /* ok - without 32 */
+#define __NR_getegid32 202 /* ok - without 32 */
+#define __NR_setreuid32 203 /* ok - without 32 */
+#define __NR_setregid32 204 /* ok - without 32 */
+#define __NR_getgroups32 205 /* ok - without 32 */
+#define __NR_setgroups32 206 /* ok - without 32 */
+#define __NR_fchown32 207 /* ok - without 32 */
+#define __NR_setresuid32 208 /* ok - without 32 */
+#define __NR_getresuid32 209 /* ok - without 32 */
+#define __NR_setresgid32 210 /* ok - without 32 */
+#define __NR_getresgid32 211 /* ok - without 32 */
+#define __NR_chown32 212 /* ok - without 32 -obsolete -> fchownat */
+#define __NR_setuid32 213 /* ok - without 32 */
+#define __NR_setgid32 214 /* ok - without 32 */
+#define __NR_setfsuid32 215 /* ok - without 32 */
+#define __NR_setfsgid32 216 /* ok - without 32 */
+#define __NR_pivot_root 217 /* ok */
+#define __NR_mincore 218 /* ok */
+#define __NR_madvise 219 /* ok */
+#define __NR_getdents64 220 /* ok */
+#define __NR_fcntl64 221 /* ok */
+/* 223 is unused */
+#define __NR_gettid 224 /* ok */
+#define __NR_readahead 225 /* ok */
+#define __NR_setxattr 226 /* ok */
+#define __NR_lsetxattr 227 /* ok */
+#define __NR_fsetxattr 228 /* ok */
+#define __NR_getxattr 229 /* ok */
+#define __NR_lgetxattr 230 /* ok */
+#define __NR_fgetxattr 231 /* ok */
+#define __NR_listxattr 232 /* ok */
+#define __NR_llistxattr 233 /* ok */
+#define __NR_flistxattr 234 /* ok */
+#define __NR_removexattr 235 /* ok */
+#define __NR_lremovexattr 236 /* ok */
+#define __NR_fremovexattr 237 /* ok */
+#define __NR_tkill 238 /* ok */
+#define __NR_sendfile64 239 /* ok */
+#define __NR_futex 240 /* ok */
+#define __NR_sched_setaffinity 241 /* ok */
+#define __NR_sched_getaffinity 242 /* ok */
+#define __NR_set_thread_area 243 /* remove */
+#define __NR_get_thread_area 244 /* remove */
+#define __NR_io_setup 245 /* ok */
+#define __NR_io_destroy 246 /* ok */
+#define __NR_io_getevents 247 /* ok */
+#define __NR_io_submit 248 /* ok */
+#define __NR_io_cancel 249 /* ok */
+#define __NR_fadvise64 250 /* remove -> sys_fadvise64_64 */
+/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
+#define __NR_exit_group 252 /* ok */
+#define __NR_lookup_dcookie 253 /* ok */
+#define __NR_epoll_create 254 /* ok */
+#define __NR_epoll_ctl 255 /* ok */
+#define __NR_epoll_wait 256 /* obsolete -> sys_epoll_pwait */
+#define __NR_remap_file_pages 257 /* only for mmu */
+#define __NR_set_tid_address 258 /* ok */
+#define __NR_timer_create 259 /* ok */
+#define __NR_timer_settime (__NR_timer_create+1) /* 260 */ /* ok */
+#define __NR_timer_gettime (__NR_timer_create+2) /* 261 */ /* ok */
+#define __NR_timer_getoverrun (__NR_timer_create+3) /* 262 */ /* ok */
+#define __NR_timer_delete (__NR_timer_create+4) /* 263 */ /* ok */
+#define __NR_clock_settime (__NR_timer_create+5) /* 264 */ /* ok */
+#define __NR_clock_gettime (__NR_timer_create+6) /* 265 */ /* ok */
+#define __NR_clock_getres (__NR_timer_create+7) /* 266 */ /* ok */
+#define __NR_clock_nanosleep (__NR_timer_create+8) /* 267 */ /* ok */
+#define __NR_statfs64 268 /* ok */
+#define __NR_fstatfs64 269 /* ok */
+#define __NR_tgkill 270 /* ok */
+#define __NR_utimes 271 /* obsolete -> sys_futimesat */
+#define __NR_fadvise64_64 272 /* ok */
+#define __NR_vserver 273 /* ok */
+#define __NR_mbind 274 /* only for mmu */
+#define __NR_get_mempolicy 275 /* only for mmu */
+#define __NR_set_mempolicy 276 /* only for mmu */
+#define __NR_mq_open 277 /* ok */
+#define __NR_mq_unlink (__NR_mq_open+1) /* 278 */ /* ok */
+#define __NR_mq_timedsend (__NR_mq_open+2) /* 279 */ /* ok */
+#define __NR_mq_timedreceive (__NR_mq_open+3) /* 280 */ /* ok */
+#define __NR_mq_notify (__NR_mq_open+4) /* 281 */ /* ok */
+#define __NR_mq_getsetattr (__NR_mq_open+5) /* 282 */ /* ok */
+#define __NR_kexec_load 283 /* ok */
+#define __NR_waitid 284 /* ok */
+/* #define __NR_sys_setaltroot 285 */
+#define __NR_add_key 286 /* ok */
+#define __NR_request_key 287 /* ok */
+#define __NR_keyctl 288 /* ok */
+#define __NR_ioprio_set 289 /* ok */
+#define __NR_ioprio_get 290 /* ok */
+#define __NR_inotify_init 291 /* ok */
+#define __NR_inotify_add_watch 292 /* ok */
+#define __NR_inotify_rm_watch 293 /* ok */
+#define __NR_migrate_pages 294 /* mmu */
+#define __NR_openat 295 /* ok */
+#define __NR_mkdirat 296 /* ok */
+#define __NR_mknodat 297 /* ok */
+#define __NR_fchownat 298 /* ok */
+#define __NR_futimesat 299 /* obsolete -> sys_utimesat */
+#define __NR_fstatat64 300 /* stat64 */
+#define __NR_unlinkat 301 /* ok */
+#define __NR_renameat 302 /* ok */
+#define __NR_linkat 303 /* ok */
+#define __NR_symlinkat 304 /* ok */
+#define __NR_readlinkat 305 /* ok */
+#define __NR_fchmodat 306 /* ok */
+#define __NR_faccessat 307 /* ok */
+#define __NR_pselect6 308 /* obsolete -> sys_pselect7 */
+#define __NR_ppoll 309 /* ok */
+#define __NR_unshare 310 /* ok */
+#define __NR_set_robust_list 311 /* ok */
+#define __NR_get_robust_list 312 /* ok */
+#define __NR_splice 313 /* ok */
+#define __NR_sync_file_range 314 /* ok */
+#define __NR_tee 315 /* ok */
+#define __NR_vmsplice 316 /* ok */
+#define __NR_move_pages 317 /* mmu */
+#define __NR_getcpu 318 /* ok */
+#define __NR_epoll_pwait 319 /* ok */
+#define __NR_utimensat 320 /* ok */
+#define __NR_signalfd 321 /* ok */
+#define __NR_timerfd_create 322 /* ok */
+#define __NR_eventfd 323 /* ok */
+#define __NR_fallocate 324 /* ok */
+#define __NR_semtimedop 325 /* ok - semaphore group */
+#define __NR_timerfd_settime 326 /* ok */
+#define __NR_timerfd_gettime 327 /* ok */
+/* sysv ipc syscalls */
+#define __NR_semctl 328 /* ok */
+#define __NR_semget 329 /* ok */
+#define __NR_semop 330 /* ok */
+#define __NR_msgctl 331 /* ok */
+#define __NR_msgget 332 /* ok */
+#define __NR_msgrcv 333 /* ok */
+#define __NR_msgsnd 334 /* ok */
+#define __NR_shmat 335 /* ok */
+#define __NR_shmctl 336 /* ok */
+#define __NR_shmdt 337 /* ok */
+#define __NR_shmget 338 /* ok */
+
+
+#define __NR_signalfd4 339 /* new */
+#define __NR_eventfd2 340 /* new */
+#define __NR_epoll_create1 341 /* new */
+#define __NR_dup3 342 /* new */
+#define __NR_pipe2 343 /* new */
+#define __NR_inotify_init1 344 /* new */
+#define __NR_socket 345 /* new */
+#define __NR_socketpair 346 /* new */
+#define __NR_bind 347 /* new */
+#define __NR_listen 348 /* new */
+#define __NR_accept 349 /* new */
+#define __NR_connect 350 /* new */
+#define __NR_getsockname 351 /* new */
+#define __NR_getpeername 352 /* new */
+#define __NR_sendto 353 /* new */
+#define __NR_send 354 /* new */
+#define __NR_recvfrom 355 /* new */
+#define __NR_recv 356 /* new */
+#define __NR_setsockopt 357 /* new */
+#define __NR_getsockopt 358 /* new */
+#define __NR_shutdown 359 /* new */
+#define __NR_sendmsg 360 /* new */
+#define __NR_recvmsg 361 /* new */
+#define __NR_accept4 362 /* new */
+#define __NR_preadv 363 /* new */
+#define __NR_pwritev 364 /* new */
+#define __NR_rt_tgsigqueueinfo 365 /* new */
+#define __NR_perf_event_open 366 /* new */
+#define __NR_recvmmsg 367 /* new */
+#define __NR_fanotify_init 368
+#define __NR_fanotify_mark 369
+#define __NR_prlimit64 370
+#define __NR_name_to_handle_at 371
+#define __NR_open_by_handle_at 372
+#define __NR_clock_adjtime 373
+#define __NR_syncfs 374
+#define __NR_setns 375
+#define __NR_sendmmsg 376
+#define __NR_process_vm_readv 377
+#define __NR_process_vm_writev 378
+#define __NR_kcmp 379
+
+#define __NR_syscalls 380
+
+#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index cb0327f204a..70da83a4967 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -465,7 +465,6 @@ ENTRY(_switch_to)
ENTRY(ret_from_fork)
addk r5, r0, r3
- addk r6, r0, r1
brlid r15, schedule_tail
nop
swi r31, r1, PT_R31 /* save r31 in user context. */
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 6c54d4dcdec..7a1a8d4354f 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -44,7 +44,6 @@ static void intc_enable_or_unmask(struct irq_data *d)
unsigned long mask = 1 << d->hwirq;
pr_debug("enable_or_unmask: %ld\n", d->hwirq);
- out_be32(INTC_BASE + SIE, mask);
/* ack level irqs because they can't be acked during
* ack function since the handle_level_irq function
@@ -52,6 +51,8 @@ static void intc_enable_or_unmask(struct irq_data *d)
*/
if (irqd_is_level_type(d))
out_be32(INTC_BASE + IAR, mask);
+
+ out_be32(INTC_BASE + SIE, mask);
}
static void intc_disable_or_mask(struct irq_data *d)
@@ -98,7 +99,7 @@ unsigned int get_irq(void)
return irq;
}
-int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
u32 intr_mask = (u32)d->host_data;
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 40823fd1db0..a5b74f729e5 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -162,7 +162,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
* excepting for VM and UMS
* don't touch UMS , CARRY and cache bits
* right now MSR is a copy of parent one */
- childregs->msr |= MSR_BIP;
childregs->msr &= ~MSR_EIP;
childregs->msr |= MSR_IE;
childregs->msr &= ~MSR_VM;
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 4a764ccb9f2..a744e3f1888 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -52,9 +52,9 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
}
#ifdef CONFIG_EARLY_PRINTK
-char *stdout;
+static char *stdout;
-int __init early_init_dt_scan_chosen_serial(unsigned long node,
+static int __init early_init_dt_scan_chosen_serial(unsigned long node,
const char *uname, int depth, void *data)
{
unsigned long l;
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 3903e3d11f5..ac3d0a0f481 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -354,7 +354,7 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
restore_saved_sigmask();
}
-void do_notify_resume(struct pt_regs *regs, int in_syscall)
+asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall)
{
/*
* We want the common case to go fast, which
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index ff6431e5468..1cbace29b5e 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -379,3 +379,4 @@ ENTRY(sys_call_table)
.long sys_sendmmsg
.long sys_process_vm_readv
.long sys_process_vm_writev
+ .long sys_kcmp
diff --git a/arch/microblaze/lib/libgcc.h b/arch/microblaze/lib/libgcc.h
index 05909d58e2f..ab077ef7e14 100644
--- a/arch/microblaze/lib/libgcc.h
+++ b/arch/microblaze/lib/libgcc.h
@@ -22,4 +22,11 @@ typedef union {
long long ll;
} DWunion;
+extern long long __ashldi3(long long u, word_type b);
+extern long long __ashrdi3(long long u, word_type b);
+extern word_type __cmpdi2(long long a, long long b);
+extern long long __lshrdi3(long long u, word_type b);
+extern long long __muldi3(long long u, long long v);
+extern word_type __ucmpdi2(unsigned long long a, unsigned long long b);
+
#endif /* __ASM_LIBGCC_H */
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c
index 0585bccb7fa..d3659244ab6 100644
--- a/arch/microblaze/lib/muldi3.c
+++ b/arch/microblaze/lib/muldi3.c
@@ -2,32 +2,28 @@
#include "libgcc.h"
-#define DWtype long long
-#define UWtype unsigned long
-#define UHWtype unsigned short
-
#define W_TYPE_SIZE 32
-#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
-#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
-#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
/* If we still don't have umul_ppmm, define it using plain C. */
#if !defined(umul_ppmm)
#define umul_ppmm(w1, w0, u, v) \
do { \
- UWtype __x0, __x1, __x2, __x3; \
- UHWtype __ul, __vl, __uh, __vh; \
+ unsigned long __x0, __x1, __x2, __x3; \
+ unsigned short __ul, __vl, __uh, __vh; \
\
__ul = __ll_lowpart(u); \
__uh = __ll_highpart(u); \
__vl = __ll_lowpart(v); \
__vh = __ll_highpart(v); \
\
- __x0 = (UWtype) __ul * __vl; \
- __x1 = (UWtype) __ul * __vh; \
- __x2 = (UWtype) __uh * __vl; \
- __x3 = (UWtype) __uh * __vh; \
+ __x0 = (unsigned long) __ul * __vl; \
+ __x1 = (unsigned long) __ul * __vh; \
+ __x2 = (unsigned long) __uh * __vl; \
+ __x3 = (unsigned long) __uh * __vh; \
\
__x1 += __ll_highpart(__x0); /* this can't give carry */\
__x1 += __x2; /* but this indeed can */ \
@@ -47,14 +43,14 @@
})
#endif
-DWtype __muldi3(DWtype u, DWtype v)
+long long __muldi3(long long u, long long v)
{
const DWunion uu = {.ll = u};
const DWunion vv = {.ll = v};
DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
- w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
- + (UWtype) uu.s.high * (UWtype) vv.s.low);
+ w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high
+ + (unsigned long) uu.s.high * (unsigned long) vv.s.low);
return w.ll;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4183e62f178..b7dc39c6c84 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -19,6 +19,7 @@ config MIPS
select HAVE_KRETPROBES
select HAVE_DEBUG_KMEMLEAK
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -40,8 +41,6 @@ config MIPS
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA if 64BIT
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
menu "Machine selection"
@@ -55,8 +54,8 @@ choice
config MIPS_ALCHEMY
bool "Alchemy processor based machines"
select 64BIT_PHYS_ADDR
- select CEVT_R4K_LIB
- select CSRC_R4K_LIB
+ select CEVT_R4K
+ select CSRC_R4K
select IRQ_CPU
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
@@ -107,16 +106,16 @@ config ATH79
config BCM47XX
bool "Broadcom BCM47XX based boards"
+ select ARCH_WANT_OPTIONAL_GPIOLIB
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
+ select FW_CFE
select HW_HAS_PCI
select IRQ_CPU
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
- select GENERIC_GPIO
select SYS_HAS_EARLY_PRINTK
- select CFE
help
Support for BCM47XX based boards
@@ -193,8 +192,8 @@ config MACH_DECSTATION
config MACH_JAZZ
bool "Jazz family of machines"
- select ARC
- select ARC32
+ select FW_ARC
+ select FW_ARC32
select ARCH_MAY_HAVE_PC_FDC
select CEVT_R4K
select CSRC_R4K
@@ -417,27 +416,6 @@ config PMC_MSP
of integrated peripherals, interfaces and DSPs in addition to
a variety of MIPS cores.
-config PMC_YOSEMITE
- bool "PMC-Sierra Yosemite eval board"
- select CEVT_R4K
- select CSRC_R4K
- select DMA_COHERENT
- select HW_HAS_PCI
- select IRQ_CPU
- select IRQ_CPU_RM7K
- select IRQ_CPU_RM9K
- select SWAP_IO_SPACE
- select SYS_HAS_CPU_RM9000
- select SYS_HAS_EARLY_PRINTK
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_64BIT_KERNEL
- select SYS_SUPPORTS_BIG_ENDIAN
- select SYS_SUPPORTS_HIGHMEM
- select SYS_SUPPORTS_SMP
- help
- Yosemite is an evaluation board for the RM9000x2 processor
- manufactured by PMC-Sierra.
-
config POWERTV
bool "Cisco PowerTV"
select BOOT_ELF32
@@ -458,8 +436,8 @@ config POWERTV
config SGI_IP22
bool "SGI IP22 (Indy/Indigo2)"
- select ARC
- select ARC32
+ select FW_ARC
+ select FW_ARC32
select BOOT_ELF32
select CEVT_R4K
select CSRC_R4K
@@ -498,8 +476,8 @@ config SGI_IP22
config SGI_IP27
bool "SGI IP27 (Origin200/2000)"
- select ARC
- select ARC64
+ select FW_ARC
+ select FW_ARC64
select BOOT_ELF64
select DEFAULT_SGI_PARTITION
select DMA_COHERENT
@@ -519,8 +497,8 @@ config SGI_IP27
config SGI_IP28
bool "SGI IP28 (Indigo2 R10k) (EXPERIMENTAL)"
depends on EXPERIMENTAL
- select ARC
- select ARC64
+ select FW_ARC
+ select FW_ARC64
select BOOT_ELF64
select CEVT_R4K
select CSRC_R4K
@@ -555,8 +533,8 @@ config SGI_IP28
config SGI_IP32
bool "SGI IP32 (O2)"
- select ARC
- select ARC32
+ select FW_ARC
+ select FW_ARC32
select BOOT_ELF32
select CEVT_R4K
select CSRC_R4K
@@ -674,8 +652,8 @@ config SIBYTE_BIGSUR
config SNI_RM
bool "SNI RM200/300/400"
- select ARC if CPU_LITTLE_ENDIAN
- select ARC32 if CPU_LITTLE_ENDIAN
+ select FW_ARC if CPU_LITTLE_ENDIAN
+ select FW_ARC32 if CPU_LITTLE_ENDIAN
select SNIPROM if CPU_BIG_ENDIAN
select ARCH_MAY_HAVE_PC_FDC
select BOOT_ELF32
@@ -776,6 +754,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
+ select EDAC_SUPPORT
select SYS_SUPPORTS_HOTPLUG_CPU
select SYS_HAS_EARLY_PRINTK
select SYS_HAS_CPU_CAVIUM_OCTEON
@@ -819,7 +798,7 @@ config NLM_XLR_BOARD
select CSRC_R4K
select IRQ_CPU
select ARCH_SUPPORTS_MSI
- select ZONE_DMA if 64BIT
+ select ZONE_DMA32 if 64BIT
select SYNC_R4K
select SYS_HAS_EARLY_PRINTK
select USB_ARCH_HAS_OHCI if USB_SUPPORT
@@ -847,7 +826,7 @@ config NLM_XLP_BOARD
select CEVT_R4K
select CSRC_R4K
select IRQ_CPU
- select ZONE_DMA if 64BIT
+ select ZONE_DMA32 if 64BIT
select SYNC_R4K
select SYS_HAS_EARLY_PRINTK
select USE_OF
@@ -908,7 +887,7 @@ config SCHED_OMIT_FRAME_POINTER
#
# Select some configuration options automatically based on user selections.
#
-config ARC
+config FW_ARC
bool
config ARCH_MAY_HAVE_PC_FDC
@@ -926,11 +905,7 @@ config CEVT_DS1287
config CEVT_GT641XX
bool
-config CEVT_R4K_LIB
- bool
-
config CEVT_R4K
- select CEVT_R4K_LIB
bool
config CEVT_SB1250
@@ -948,11 +923,7 @@ config CSRC_IOASIC
config CSRC_POWERTV
bool
-config CSRC_R4K_LIB
- bool
-
config CSRC_R4K
- select CSRC_R4K_LIB
bool
config CSRC_SB1250
@@ -963,7 +934,7 @@ config GPIO_TXX9
select ARCH_REQUIRE_GPIOLIB
bool
-config CFE
+config FW_CFE
bool
config ARCH_DMA_ADDR_T_64BIT
@@ -1079,15 +1050,15 @@ config SYS_SUPPORTS_HUGETLBFS
depends on CPU_SUPPORTS_HUGEPAGES && 64BIT
default y
+config MIPS_HUGE_TLB_SUPPORT
+ def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE
+
config IRQ_CPU
bool
config IRQ_CPU_RM7K
bool
-config IRQ_CPU_RM9K
- bool
-
config IRQ_MSP_SLP
bool
@@ -1112,10 +1083,6 @@ config PCI_GT64XXX_PCI0
config NO_EXCEPT_FILL
bool
-config MIPS_RM9122
- bool
- select SERIAL_RM9000
-
config SOC_EMMA2RH
bool
select CEVT_R4K
@@ -1161,9 +1128,6 @@ config SOC_PNX8550
config SWAP_IO_SPACE
bool
-config SERIAL_RM9000
- bool
-
config SGI_HAS_INDYDOG
bool
@@ -1185,7 +1149,7 @@ config SGI_HAS_I8042
config DEFAULT_SGI_PARTITION
bool
-config ARC32
+config FW_ARC32
bool
config SNIPROM
@@ -1218,7 +1182,7 @@ config ARC_PROMLIB
depends on MACH_JAZZ || SNI_RM || SGI_IP22 || SGI_IP28 || SGI_IP32
default y
-config ARC64
+config FW_ARC64
bool
config BOOT_ELF64
@@ -1370,6 +1334,7 @@ config CPU_R4X00
depends on SYS_HAS_CPU_R4X00
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
help
MIPS Technologies R4000-series processors other than 4300, including
the R4000, R4400, R4600, and 4700.
@@ -1380,12 +1345,14 @@ config CPU_TX49XX
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
config CPU_R5000
bool "R5000"
depends on SYS_HAS_CPU_R5000
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
help
MIPS Technologies R5000-series processors other than the Nevada.
@@ -1394,6 +1361,7 @@ config CPU_R5432
depends on SYS_HAS_CPU_R5432
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
config CPU_R5500
bool "R5500"
@@ -1419,6 +1387,7 @@ config CPU_NEVADA
depends on SYS_HAS_CPU_NEVADA
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HUGEPAGES
help
QED / PMC-Sierra RM52xx-series ("Nevada") processors.
@@ -1439,6 +1408,7 @@ config CPU_R10000
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
help
MIPS Technologies R10000-series processors.
@@ -1449,15 +1419,7 @@ config CPU_RM7000
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
-
-config CPU_RM9000
- bool "RM9000"
- depends on SYS_HAS_CPU_RM9000
- select CPU_HAS_PREFETCH
- select CPU_SUPPORTS_32BIT_KERNEL
- select CPU_SUPPORTS_64BIT_KERNEL
- select CPU_SUPPORTS_HIGHMEM
- select WEAK_ORDERING
+ select CPU_SUPPORTS_HUGEPAGES
config CPU_SB1
bool "SB1"
@@ -1465,6 +1427,7 @@ config CPU_SB1
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
select WEAK_ORDERING
config CPU_CAVIUM_OCTEON
@@ -1528,9 +1491,9 @@ config CPU_XLR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
- select CPU_SUPPORTS_HUGEPAGES
help
Netlogic Microsystems XLR/XLS processors.
@@ -1544,6 +1507,7 @@ config CPU_XLP
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
+ select CPU_MIPSR2
help
Netlogic Microsystems XLP processors.
endchoice
@@ -1591,6 +1555,7 @@ config CPU_LOONGSON2
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
config CPU_LOONGSON1
bool
@@ -1675,9 +1640,6 @@ config SYS_HAS_CPU_R10000
config SYS_HAS_CPU_RM7000
bool
-config SYS_HAS_CPU_RM9000
- bool
-
config SYS_HAS_CPU_SB1
bool
@@ -1757,7 +1719,7 @@ config CPU_SUPPORTS_UNCACHED_ACCELERATED
bool
config MIPS_PGD_C0_CONTEXT
bool
- default y if 64BIT && CPU_MIPSR2
+ default y if 64BIT && CPU_MIPSR2 && !CPU_XLP
#
# Set to y for ptrace access to watch registers.
@@ -2188,7 +2150,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON)
+ depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP)
default y
help
Enable hardware performance counter support for perf events. If
@@ -2366,6 +2328,29 @@ config KEXEC
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
+config CRASH_DUMP
+ bool "Kernel crash dumps"
+ help
+ Generate crash dump after being started by kexec.
+ This should be normally only set in special crash dump kernels
+ which are loaded in the main kernel with kexec-tools into
+ a specially reserved region and then later executed after
+ a crash by kdump/kexec. The crash dump kernel must be compiled
+ to a memory address not used by the main kernel or firmware using
+ PHYSICAL_START.
+
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded"
+ default "0xffffffff84000000" if 64BIT
+ default "0x84000000" if 32BIT
+ depends on CRASH_DUMP
+ help
+ This gives the CKSEG0 or KSEG0 address where the kernel is loaded.
+ If you plan to use kernel for capturing the crash dump change
+ this value to start of the reserved region (the "X" value as
+ specified in the "crashkernel=YM@XM" command line boot parameter
+ passed to the panic-ed kernel).
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
@@ -2572,6 +2557,8 @@ source "net/Kconfig"
source "drivers/Kconfig"
+source "drivers/firmware/Kconfig"
+
source "fs/Kconfig"
source "arch/mips/Kconfig.debug"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 654b1ad39f0..f2dfd404550 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -145,8 +145,6 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
-Wa,--trap
cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
-Wa,--trap
-cflags-$(CONFIG_CPU_RM9000) += $(call cc-option,-march=rm9000,-march=r5000) \
- -Wa,--trap
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \
-Wa,--trap
cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap
@@ -173,9 +171,9 @@ endif
#
# Firmware support
#
-libs-$(CONFIG_ARC) += arch/mips/fw/arc/
-libs-$(CONFIG_CFE) += arch/mips/fw/cfe/
-libs-$(CONFIG_SNIPROM) += arch/mips/fw/sni/
+libs-$(CONFIG_FW_ARC) += arch/mips/fw/arc/
+libs-$(CONFIG_FW_CFE) += arch/mips/fw/cfe/
+libs-$(CONFIG_FW_SNIPROM) += arch/mips/fw/sni/
libs-y += arch/mips/fw/lib/
#
@@ -192,6 +190,10 @@ endif
#
include $(srctree)/arch/mips/Kbuild.platforms
+ifdef CONFIG_PHYSICAL_START
+load-y = $(CONFIG_PHYSICAL_START)
+endif
+
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 1bbc24b0868..7477fd2127a 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -202,8 +202,11 @@ static struct resource physmap_flash_resource = {
.end = 0x107fffff,
};
+static const char *ar7_probe_types[] = { "ar7part", NULL };
+
static struct physmap_flash_data physmap_flash_data = {
.width = 2,
+ .part_probe_types = ar7_probe_types,
};
static struct platform_device physmap_flash = {
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index b311be45a72..d7af29f1fcf 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -9,6 +9,7 @@ config BCM47XX_SSB
select SSB_EMBEDDED
select SSB_B43_PCI_BRIDGE if PCI
select SSB_PCICORE_HOSTMODE if PCI
+ select SSB_DRIVER_GPIO
default y
help
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
@@ -23,6 +24,7 @@ config BCM47XX_BCMA
select BCMA_DRIVER_MIPS
select BCMA_HOST_PCI if PCI
select BCMA_DRIVER_PCI_HOSTMODE if PCI
+ select BCMA_DRIVER_GPIO
default y
help
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 4389de182eb..1a3567f07e7 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,5 +3,5 @@
# under Linux.
#
-obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
+obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
diff --git a/arch/mips/bcm47xx/gpio.c b/arch/mips/bcm47xx/gpio.c
deleted file mode 100644
index 5ebdf62e96b..00000000000
--- a/arch/mips/bcm47xx/gpio.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
- */
-
-#include <linux/export.h>
-#include <linux/ssb/ssb.h>
-#include <linux/ssb/ssb_driver_chipcommon.h>
-#include <linux/ssb/ssb_driver_extif.h>
-#include <asm/mach-bcm47xx/bcm47xx.h>
-#include <asm/mach-bcm47xx/gpio.h>
-
-#if (BCM47XX_CHIPCO_GPIO_LINES > BCM47XX_EXTIF_GPIO_LINES)
-static DECLARE_BITMAP(gpio_in_use, BCM47XX_CHIPCO_GPIO_LINES);
-#else
-static DECLARE_BITMAP(gpio_in_use, BCM47XX_EXTIF_GPIO_LINES);
-#endif
-
-int gpio_request(unsigned gpio, const char *tag)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) &&
- ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
- return -EINVAL;
-
- if (ssb_extif_available(&bcm47xx_bus.ssb.extif) &&
- ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
- return -EINVAL;
-
- if (test_and_set_bit(gpio, gpio_in_use))
- return -EBUSY;
-
- return 0;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- if (gpio >= BCM47XX_CHIPCO_GPIO_LINES)
- return -EINVAL;
-
- if (test_and_set_bit(gpio, gpio_in_use))
- return -EBUSY;
-
- return 0;
-#endif
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_request);
-
-void gpio_free(unsigned gpio)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) &&
- ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
- return;
-
- if (ssb_extif_available(&bcm47xx_bus.ssb.extif) &&
- ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
- return;
-
- clear_bit(gpio, gpio_in_use);
- return;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- if (gpio >= BCM47XX_CHIPCO_GPIO_LINES)
- return;
-
- clear_bit(gpio, gpio_in_use);
- return;
-#endif
- }
-}
-EXPORT_SYMBOL(gpio_free);
-
-int gpio_to_irq(unsigned gpio)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco))
- return ssb_mips_irq(bcm47xx_bus.ssb.chipco.dev) + 2;
- else if (ssb_extif_available(&bcm47xx_bus.ssb.extif))
- return ssb_mips_irq(bcm47xx_bus.ssb.extif.dev) + 2;
- else
- return -EINVAL;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- return bcma_core_mips_irq(bcm47xx_bus.bcma.bus.drv_cc.core) + 2;
-#endif
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(gpio_to_irq);
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index f6e9063cc4c..8c155afb129 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
* Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -27,6 +28,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
+#include <linux/smp.h>
#include <asm/bootinfo.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
@@ -127,6 +129,8 @@ static __init void prom_init_mem(void)
{
unsigned long mem;
unsigned long max;
+ unsigned long off;
+ struct cpuinfo_mips *c = &current_cpu_data;
/* Figure out memory size by finding aliases.
*
@@ -143,18 +147,26 @@ static __init void prom_init_mem(void)
* max contains the biggest possible address supported by the platform.
* If the method wants to try something above we assume 128MB ram.
*/
- max = ((unsigned long)(prom_init) | ((128 << 20) - 1));
+ off = (unsigned long)prom_init;
+ max = off | ((128 << 20) - 1);
for (mem = (1 << 20); mem < (128 << 20); mem += (1 << 20)) {
- if (((unsigned long)(prom_init) + mem) > max) {
+ if ((off + mem) > max) {
mem = (128 << 20);
printk(KERN_DEBUG "assume 128MB RAM\n");
break;
}
- if (*(unsigned long *)((unsigned long)(prom_init) + mem) ==
- *(unsigned long *)(prom_init))
+ if (!memcmp(prom_init, prom_init + mem, 32))
break;
}
+ /* Ignoring the last page when ddr size is 128M. Cached
+ * accesses to last page is causing the processor to prefetch
+ * using address above 128M stepping out of the ddr address
+ * space.
+ */
+ if (c->cputype == CPU_74K && (mem == (128 << 20)))
+ mem -= 0x1000;
+
add_memory_region(0, mem, BOOT_MEM_RAM);
}
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 95bf4d7bac2..4d54b58dbd3 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -94,7 +94,7 @@ static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
- bcm47xx_fill_sprom(out, prefix);
+ bcm47xx_fill_sprom(out, prefix, false);
return 0;
} else {
printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n");
@@ -113,7 +113,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
bcm47xx_fill_ssb_boardinfo(&iv->boardinfo, NULL);
memset(&iv->sprom, 0, sizeof(struct ssb_sprom));
- bcm47xx_fill_sprom(&iv->sprom, NULL);
+ bcm47xx_fill_sprom(&iv->sprom, NULL, false);
if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
@@ -165,16 +165,17 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
- bcm47xx_fill_sprom(out, prefix);
+ bcm47xx_fill_sprom(out, prefix, false);
return 0;
case BCMA_HOSTTYPE_SOC:
memset(out, 0, sizeof(struct ssb_sprom));
- bcm47xx_fill_sprom_ethernet(out, NULL);
core = bcma_find_core(bus, BCMA_CORE_80211);
if (core) {
snprintf(prefix, sizeof(prefix), "sb/%u/",
core->core_index);
- bcm47xx_fill_sprom(out, prefix);
+ bcm47xx_fill_sprom(out, prefix, true);
+ } else {
+ bcm47xx_fill_sprom(out, NULL, false);
}
return 0;
default:
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index d3a889745e2..289cc0a3863 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -42,25 +42,39 @@ static void create_key(const char *prefix, const char *postfix,
snprintf(buf, len, "%s", name);
}
+static int get_nvram_var(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len, bool fallback)
+{
+ char key[40];
+ int err;
+
+ create_key(prefix, postfix, name, key, sizeof(key));
+
+ err = nvram_getenv(key, buf, len);
+ if (fallback && err == NVRAM_ERR_ENVNOTFOUND && prefix) {
+ create_key(NULL, postfix, name, key, sizeof(key));
+ err = nvram_getenv(key, buf, len);
+ }
+ return err;
+}
+
#define NVRAM_READ_VAL(type) \
static void nvram_read_ ## type (const char *prefix, \
const char *postfix, const char *name, \
- type *val, type allset) \
+ type *val, type allset, bool fallback) \
{ \
char buf[100]; \
- char key[40]; \
int err; \
type var; \
\
- create_key(prefix, postfix, name, key, sizeof(key)); \
- \
- err = nvram_getenv(key, buf, sizeof(buf)); \
+ err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \
+ fallback); \
if (err < 0) \
return; \
err = kstrto ## type (buf, 0, &var); \
if (err) { \
- pr_warn("can not parse nvram name %s with value %s" \
- " got %i", key, buf, err); \
+ pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
+ prefix, name, postfix, buf, err); \
return; \
} \
if (allset && var == allset) \
@@ -76,22 +90,19 @@ NVRAM_READ_VAL(u32)
#undef NVRAM_READ_VAL
static void nvram_read_u32_2(const char *prefix, const char *name,
- u16 *val_lo, u16 *val_hi)
+ u16 *val_lo, u16 *val_hi, bool fallback)
{
char buf[100];
- char key[40];
int err;
u32 val;
- create_key(prefix, NULL, name, key, sizeof(key));
-
- err = nvram_getenv(key, buf, sizeof(buf));
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
err = kstrtou32(buf, 0, &val);
if (err) {
- pr_warn("can not parse nvram name %s with value %s got %i",
- key, buf, err);
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
return;
}
*val_lo = (val & 0x0000FFFFU);
@@ -99,22 +110,20 @@ static void nvram_read_u32_2(const char *prefix, const char *name,
}
static void nvram_read_leddc(const char *prefix, const char *name,
- u8 *leddc_on_time, u8 *leddc_off_time)
+ u8 *leddc_on_time, u8 *leddc_off_time,
+ bool fallback)
{
char buf[100];
- char key[40];
int err;
u32 val;
- create_key(prefix, NULL, name, key, sizeof(key));
-
- err = nvram_getenv(key, buf, sizeof(buf));
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
err = kstrtou32(buf, 0, &val);
if (err) {
- pr_warn("can not parse nvram name %s with value %s got %i",
- key, buf, err);
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
return;
}
@@ -126,355 +135,435 @@ static void nvram_read_leddc(const char *prefix, const char *name,
}
static void nvram_read_macaddr(const char *prefix, const char *name,
- u8 (*val)[6])
+ u8 (*val)[6], bool fallback)
{
char buf[100];
- char key[40];
int err;
- create_key(prefix, NULL, name, key, sizeof(key));
-
- err = nvram_getenv(key, buf, sizeof(buf));
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
+
nvram_parse_macaddr(buf, *val);
}
static void nvram_read_alpha2(const char *prefix, const char *name,
- char (*val)[2])
+ char (*val)[2], bool fallback)
{
char buf[10];
- char key[40];
int err;
- create_key(prefix, NULL, name, key, sizeof(key));
-
- err = nvram_getenv(key, buf, sizeof(buf));
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
if (buf[0] == '0')
return;
if (strlen(buf) > 2) {
- pr_warn("alpha2 is too long %s", buf);
+ pr_warn("alpha2 is too long %s\n", buf);
return;
}
memcpy(val, buf, sizeof(val));
}
static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
- const char *prefix)
+ const char *prefix, bool fallback)
{
- nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0);
- if (!sprom->board_rev)
- nvram_read_u16(NULL, NULL, "boardrev", &sprom->board_rev, 0);
- nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0);
- nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff);
- nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff);
- nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff);
- nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff);
- nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0);
- nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0);
- nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0);
- nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0);
- nvram_read_alpha2(prefix, "ccode", &sprom->alpha2);
+ nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback);
+ nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback);
+ nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback);
+ nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff, fallback);
+ nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0,
+ fallback);
+ nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0,
+ fallback);
+ nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0,
+ fallback);
+ nvram_read_alpha2(prefix, "ccode", &sprom->alpha2, fallback);
}
static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom,
- const char *prefix)
+ const char *prefix, bool fallback)
{
- nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0);
- nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0);
- nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0);
- nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0);
- nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0);
- nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0);
- nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0);
- nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0);
- nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0);
- nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0);
+ nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0, fallback);
+ nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0, fallback);
+ nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0, fallback);
+ nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0, fallback);
+ nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0, fallback);
}
-static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0);
- nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0);
+ nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0, fallback);
}
static void bcm47xx_fill_sprom_r2389(struct ssb_sprom *sprom,
- const char *prefix)
-{
- nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0);
- nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0);
- nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0);
- nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0);
- nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0);
- nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0);
- nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0);
- nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0);
- nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0);
-}
-
-static void bcm47xx_fill_sprom_r2(struct ssb_sprom *sprom, const char *prefix)
+ const char *prefix, bool fallback)
{
- nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
- &sprom->boardflags_hi);
- nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
+ nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0, fallback);
+ nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0, fallback);
+ nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0,
+ fallback);
}
-static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0);
- nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0);
- nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0);
- nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0);
- nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0);
- nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0);
- nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0);
- nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0);
- nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0);
- nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0);
- nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0);
- nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0);
- nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0);
- nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0);
+ nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0, fallback);
+ nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0, fallback);
+ nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0, fallback);
+ nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0, fallback);
+ nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0, fallback);
+ nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0, fallback);
+ nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0, fallback);
+ nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0, fallback);
}
-static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
- &sprom->boardflags_hi);
- nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
- nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0);
+ nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback);
nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
- &sprom->leddc_off_time);
+ &sprom->leddc_off_time, fallback);
}
static void bcm47xx_fill_sprom_r4589(struct ssb_sprom *sprom,
- const char *prefix)
+ const char *prefix, bool fallback)
{
- nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
- &sprom->boardflags_hi);
- nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
- &sprom->boardflags2_hi);
- nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
- nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0);
- nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0);
- nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0);
- nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf);
- nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf);
- nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff);
+ nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback);
+ nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0,
+ fallback);
+ nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf, fallback);
+ nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf, fallback);
+ nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff,
+ fallback);
nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
- &sprom->leddc_off_time);
+ &sprom->leddc_off_time, fallback);
}
-static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0);
- nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0);
- nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0);
- nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0);
- nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0);
- nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0);
- nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0);
- nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0);
- nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0);
- nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0);
- nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0);
- nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0);
- nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0);
+ nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0, fallback);
+ nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0, fallback);
+ nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0, fallback);
+ nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0,
+ fallback);
+ nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0, fallback);
+ nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0, fallback);
+ nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0, fallback);
+ nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0, fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0,
+ fallback);
}
-static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0);
- nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0);
- nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0);
- nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0);
- nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0);
- nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0);
- nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0);
- nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0);
- nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0);
- nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0);
- nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0);
- nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0);
- nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0);
- nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0);
- nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0);
- nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0);
+ nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0,
+ fallback);
}
-static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0);
+ nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0,
+ fallback);
nvram_read_u8(prefix, NULL, "extpagain2g",
- &sprom->fem.ghz2.extpa_gain, 0);
+ &sprom->fem.ghz2.extpa_gain, 0, fallback);
nvram_read_u8(prefix, NULL, "pdetrange2g",
- &sprom->fem.ghz2.pdet_range, 0);
- nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0);
- nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0);
- nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0);
+ &sprom->fem.ghz2.pdet_range, 0, fallback);
+ nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0,
+ fallback);
nvram_read_u8(prefix, NULL, "extpagain5g",
- &sprom->fem.ghz5.extpa_gain, 0);
+ &sprom->fem.ghz5.extpa_gain, 0, fallback);
nvram_read_u8(prefix, NULL, "pdetrange5g",
- &sprom->fem.ghz5.pdet_range, 0);
- nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0);
- nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0);
- nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0);
- nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0);
- nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0);
- nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0);
+ &sprom->fem.ghz5.pdet_range, 0, fallback);
+ nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0,
+ fallback);
nvram_read_u8(prefix, NULL, "tempsense_slope",
- &sprom->tempsense_slope, 0);
- nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0);
+ &sprom->tempsense_slope, 0, fallback);
+ nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0,
+ fallback);
nvram_read_u8(prefix, NULL, "tempsense_option",
- &sprom->tempsense_option, 0);
+ &sprom->tempsense_option, 0, fallback);
nvram_read_u8(prefix, NULL, "freqoffset_corr",
- &sprom->freqoffset_corr, 0);
- nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0);
- nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0);
- nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0);
- nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0);
+ &sprom->freqoffset_corr, 0, fallback);
+ nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0, fallback);
+ nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0, fallback);
nvram_read_u8(prefix, NULL, "phycal_tempdelta",
- &sprom->phycal_tempdelta, 0);
- nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0);
+ &sprom->phycal_tempdelta, 0, fallback);
+ nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0,
+ fallback);
nvram_read_u8(prefix, NULL, "temps_hysteresis",
- &sprom->temps_hysteresis, 0);
- nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0);
- nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0);
+ &sprom->temps_hysteresis, 0, fallback);
+ nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0,
+ fallback);
nvram_read_u8(prefix, NULL, "rxgainerr2ga0",
- &sprom->rxgainerr2ga[0], 0);
+ &sprom->rxgainerr2ga[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr2ga1",
- &sprom->rxgainerr2ga[1], 0);
+ &sprom->rxgainerr2ga[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr2ga2",
- &sprom->rxgainerr2ga[2], 0);
+ &sprom->rxgainerr2ga[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gla0",
- &sprom->rxgainerr5gla[0], 0);
+ &sprom->rxgainerr5gla[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gla1",
- &sprom->rxgainerr5gla[1], 0);
+ &sprom->rxgainerr5gla[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gla2",
- &sprom->rxgainerr5gla[2], 0);
+ &sprom->rxgainerr5gla[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gma0",
- &sprom->rxgainerr5gma[0], 0);
+ &sprom->rxgainerr5gma[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gma1",
- &sprom->rxgainerr5gma[1], 0);
+ &sprom->rxgainerr5gma[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gma2",
- &sprom->rxgainerr5gma[2], 0);
+ &sprom->rxgainerr5gma[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gha0",
- &sprom->rxgainerr5gha[0], 0);
+ &sprom->rxgainerr5gha[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gha1",
- &sprom->rxgainerr5gha[1], 0);
+ &sprom->rxgainerr5gha[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gha2",
- &sprom->rxgainerr5gha[2], 0);
+ &sprom->rxgainerr5gha[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gua0",
- &sprom->rxgainerr5gua[0], 0);
+ &sprom->rxgainerr5gua[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gua1",
- &sprom->rxgainerr5gua[1], 0);
+ &sprom->rxgainerr5gua[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gua2",
- &sprom->rxgainerr5gua[2], 0);
- nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0);
- nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0);
- nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0);
+ &sprom->rxgainerr5gua[2], 0, fallback);
+ nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0,
+ fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gla0",
- &sprom->noiselvl5gla[0], 0);
+ &sprom->noiselvl5gla[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gla1",
- &sprom->noiselvl5gla[1], 0);
+ &sprom->noiselvl5gla[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gla2",
- &sprom->noiselvl5gla[2], 0);
+ &sprom->noiselvl5gla[2], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gma0",
- &sprom->noiselvl5gma[0], 0);
+ &sprom->noiselvl5gma[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gma1",
- &sprom->noiselvl5gma[1], 0);
+ &sprom->noiselvl5gma[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gma2",
- &sprom->noiselvl5gma[2], 0);
+ &sprom->noiselvl5gma[2], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gha0",
- &sprom->noiselvl5gha[0], 0);
+ &sprom->noiselvl5gha[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gha1",
- &sprom->noiselvl5gha[1], 0);
+ &sprom->noiselvl5gha[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gha2",
- &sprom->noiselvl5gha[2], 0);
+ &sprom->noiselvl5gha[2], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gua0",
- &sprom->noiselvl5gua[0], 0);
+ &sprom->noiselvl5gua[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gua1",
- &sprom->noiselvl5gua[1], 0);
+ &sprom->noiselvl5gua[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gua2",
- &sprom->noiselvl5gua[2], 0);
+ &sprom->noiselvl5gua[2], 0, fallback);
nvram_read_u8(prefix, NULL, "pcieingress_war",
- &sprom->pcieingress_war, 0);
+ &sprom->pcieingress_war, 0, fallback);
}
-static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0);
- nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0);
+ nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0,
+ fallback);
nvram_read_u32(prefix, NULL, "legofdmbw202gpo",
- &sprom->legofdmbw202gpo, 0);
+ &sprom->legofdmbw202gpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul2gpo",
- &sprom->legofdmbw20ul2gpo, 0);
+ &sprom->legofdmbw20ul2gpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw205glpo",
- &sprom->legofdmbw205glpo, 0);
+ &sprom->legofdmbw205glpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul5glpo",
- &sprom->legofdmbw20ul5glpo, 0);
+ &sprom->legofdmbw20ul5glpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw205gmpo",
- &sprom->legofdmbw205gmpo, 0);
+ &sprom->legofdmbw205gmpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul5gmpo",
- &sprom->legofdmbw20ul5gmpo, 0);
+ &sprom->legofdmbw20ul5gmpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw205ghpo",
- &sprom->legofdmbw205ghpo, 0);
+ &sprom->legofdmbw205ghpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul5ghpo",
- &sprom->legofdmbw20ul5ghpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0);
+ &sprom->legofdmbw20ul5ghpo, 0, fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0,
+ fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0,
+ fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0,
+ fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0,
+ fallback);
nvram_read_u32(prefix, NULL, "mcsbw20ul5glpo",
- &sprom->mcsbw20ul5glpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0);
+ &sprom->mcsbw20ul5glpo, 0, fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0,
+ fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0,
+ fallback);
nvram_read_u32(prefix, NULL, "mcsbw20ul5gmpo",
- &sprom->mcsbw20ul5gmpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0);
+ &sprom->mcsbw20ul5gmpo, 0, fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0,
+ fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0,
+ fallback);
nvram_read_u32(prefix, NULL, "mcsbw20ul5ghpo",
- &sprom->mcsbw20ul5ghpo, 0);
- nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0);
- nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0);
+ &sprom->mcsbw20ul5ghpo, 0, fallback);
+ nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0, fallback);
nvram_read_u16(prefix, NULL, "legofdm40duppo",
- &sprom->legofdm40duppo, 0);
- nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0);
- nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0);
+ &sprom->legofdm40duppo, 0, fallback);
+ nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0, fallback);
+ nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0, fallback);
}
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
- const char *prefix)
+ const char *prefix, bool fallback)
{
char postfix[2];
int i;
@@ -483,46 +572,46 @@ static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i];
snprintf(postfix, sizeof(postfix), "%i", i);
nvram_read_u8(prefix, postfix, "maxp2ga",
- &pwr_info->maxpwr_2g, 0);
+ &pwr_info->maxpwr_2g, 0, fallback);
nvram_read_u8(prefix, postfix, "itt2ga",
- &pwr_info->itssi_2g, 0);
+ &pwr_info->itssi_2g, 0, fallback);
nvram_read_u8(prefix, postfix, "itt5ga",
- &pwr_info->itssi_5g, 0);
+ &pwr_info->itssi_5g, 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw0a",
- &pwr_info->pa_2g[0], 0);
+ &pwr_info->pa_2g[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw1a",
- &pwr_info->pa_2g[1], 0);
+ &pwr_info->pa_2g[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw2a",
- &pwr_info->pa_2g[2], 0);
+ &pwr_info->pa_2g[2], 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5ga",
- &pwr_info->maxpwr_5g, 0);
+ &pwr_info->maxpwr_5g, 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5gha",
- &pwr_info->maxpwr_5gh, 0);
+ &pwr_info->maxpwr_5gh, 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5gla",
- &pwr_info->maxpwr_5gl, 0);
+ &pwr_info->maxpwr_5gl, 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw0a",
- &pwr_info->pa_5g[0], 0);
+ &pwr_info->pa_5g[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw1a",
- &pwr_info->pa_5g[1], 0);
+ &pwr_info->pa_5g[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw2a",
- &pwr_info->pa_5g[2], 0);
+ &pwr_info->pa_5g[2], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw0a",
- &pwr_info->pa_5gl[0], 0);
+ &pwr_info->pa_5gl[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw1a",
- &pwr_info->pa_5gl[1], 0);
+ &pwr_info->pa_5gl[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw2a",
- &pwr_info->pa_5gl[2], 0);
+ &pwr_info->pa_5gl[2], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw0a",
- &pwr_info->pa_5gh[0], 0);
+ &pwr_info->pa_5gh[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw1a",
- &pwr_info->pa_5gh[1], 0);
+ &pwr_info->pa_5gh[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw2a",
- &pwr_info->pa_5gh[2], 0);
+ &pwr_info->pa_5gh[2], 0, fallback);
}
}
static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
- const char *prefix)
+ const char *prefix, bool fallback)
{
char postfix[2];
int i;
@@ -531,91 +620,112 @@ static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i];
snprintf(postfix, sizeof(postfix), "%i", i);
nvram_read_u16(prefix, postfix, "pa2gw3a",
- &pwr_info->pa_2g[3], 0);
+ &pwr_info->pa_2g[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw3a",
- &pwr_info->pa_5g[3], 0);
+ &pwr_info->pa_5g[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw3a",
- &pwr_info->pa_5gl[3], 0);
+ &pwr_info->pa_5gl[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw3a",
- &pwr_info->pa_5gh[3], 0);
+ &pwr_info->pa_5gh[3], 0, fallback);
}
}
-void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix)
+static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
{
- nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac);
- nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0);
- nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0);
-
- nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac);
- nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0);
- nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0);
+ nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac, fallback);
+ nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac, fallback);
+ nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac, fallback);
+ nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac, fallback);
+}
- nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac);
- nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac);
+static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
+{
+ nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0,
+ fallback);
+ nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0,
+ fallback);
+ nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+ &sprom->boardflags_hi, fallback);
+ nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+ &sprom->boardflags2_hi, fallback);
}
-void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
{
- bcm47xx_fill_sprom_ethernet(sprom, prefix);
+ bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
+ bcm47xx_fill_board_data(sprom, prefix, fallback);
- nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0);
+ nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
switch (sprom->revision) {
case 1:
- bcm47xx_fill_sprom_r1234589(sprom, prefix);
- bcm47xx_fill_sprom_r12389(sprom, prefix);
- bcm47xx_fill_sprom_r1(sprom, prefix);
+ bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r1(sprom, prefix, fallback);
break;
case 2:
- bcm47xx_fill_sprom_r1234589(sprom, prefix);
- bcm47xx_fill_sprom_r12389(sprom, prefix);
- bcm47xx_fill_sprom_r2389(sprom, prefix);
- bcm47xx_fill_sprom_r2(sprom, prefix);
+ bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
break;
case 3:
- bcm47xx_fill_sprom_r1234589(sprom, prefix);
- bcm47xx_fill_sprom_r12389(sprom, prefix);
- bcm47xx_fill_sprom_r2389(sprom, prefix);
- bcm47xx_fill_sprom_r389(sprom, prefix);
- bcm47xx_fill_sprom_r3(sprom, prefix);
+ bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r3(sprom, prefix, fallback);
break;
case 4:
case 5:
- bcm47xx_fill_sprom_r1234589(sprom, prefix);
- bcm47xx_fill_sprom_r4589(sprom, prefix);
- bcm47xx_fill_sprom_r458(sprom, prefix);
- bcm47xx_fill_sprom_r45(sprom, prefix);
- bcm47xx_fill_sprom_path_r4589(sprom, prefix);
- bcm47xx_fill_sprom_path_r45(sprom, prefix);
+ bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r458(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r45(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
break;
case 8:
- bcm47xx_fill_sprom_r1234589(sprom, prefix);
- bcm47xx_fill_sprom_r12389(sprom, prefix);
- bcm47xx_fill_sprom_r2389(sprom, prefix);
- bcm47xx_fill_sprom_r389(sprom, prefix);
- bcm47xx_fill_sprom_r4589(sprom, prefix);
- bcm47xx_fill_sprom_r458(sprom, prefix);
- bcm47xx_fill_sprom_r89(sprom, prefix);
- bcm47xx_fill_sprom_path_r4589(sprom, prefix);
+ bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r458(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r89(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
break;
case 9:
- bcm47xx_fill_sprom_r1234589(sprom, prefix);
- bcm47xx_fill_sprom_r12389(sprom, prefix);
- bcm47xx_fill_sprom_r2389(sprom, prefix);
- bcm47xx_fill_sprom_r389(sprom, prefix);
- bcm47xx_fill_sprom_r4589(sprom, prefix);
- bcm47xx_fill_sprom_r89(sprom, prefix);
- bcm47xx_fill_sprom_r9(sprom, prefix);
- bcm47xx_fill_sprom_path_r4589(sprom, prefix);
+ bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r89(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r9(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
break;
default:
pr_warn("Unsupported SPROM revision %d detected. Will extract"
" v1\n", sprom->revision);
sprom->revision = 1;
- bcm47xx_fill_sprom_r1234589(sprom, prefix);
- bcm47xx_fill_sprom_r12389(sprom, prefix);
- bcm47xx_fill_sprom_r1(sprom, prefix);
+ bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_r1(sprom, prefix, fallback);
}
}
@@ -623,11 +733,12 @@ void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
const char *prefix)
{
- nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0);
+ nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0,
+ true);
if (!boardinfo->vendor)
boardinfo->vendor = SSB_BOARDVENDOR_BCM;
- nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0);
+ nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true);
}
#endif
@@ -635,10 +746,11 @@ void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo,
const char *prefix)
{
- nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0);
+ nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0,
+ true);
if (!boardinfo->vendor)
boardinfo->vendor = SSB_BOARDVENDOR_BCM;
- nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0);
+ nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true);
}
#endif
diff --git a/arch/mips/bcm47xx/wgt634u.c b/arch/mips/bcm47xx/wgt634u.c
index e80d585731a..9d111e8087e 100644
--- a/arch/mips/bcm47xx/wgt634u.c
+++ b/arch/mips/bcm47xx/wgt634u.c
@@ -11,6 +11,7 @@
#include <linux/leds.h>
#include <linux/mtd/physmap.h>
#include <linux/ssb/ssb.h>
+#include <linux/ssb/ssb_embedded.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/gpio.h>
@@ -116,7 +117,8 @@ static irqreturn_t gpio_interrupt(int irq, void *ignored)
/* Interrupt are level triggered, revert the interrupt polarity
to clear the interrupt. */
- gpio_polarity(WGT634U_GPIO_RESET, state);
+ ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << WGT634U_GPIO_RESET,
+ state ? 1 << WGT634U_GPIO_RESET : 0);
if (!state) {
printk(KERN_INFO "Reset button pressed");
@@ -150,7 +152,9 @@ static int __init wgt634u_init(void)
gpio_interrupt, IRQF_SHARED,
"WGT634U GPIO", &bcm47xx_bus.ssb.chipco)) {
gpio_direction_input(WGT634U_GPIO_RESET);
- gpio_intmask(WGT634U_GPIO_RESET, 1);
+ ssb_gpio_intmask(&bcm47xx_bus.ssb,
+ 1 << WGT634U_GPIO_RESET,
+ 1 << WGT634U_GPIO_RESET);
ssb_chipco_irq_mask(&bcm47xx_bus.ssb.chipco,
SSB_CHIPCO_IRQ_GPIO,
SSB_CHIPCO_IRQ_GPIO);
diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
index 9bbb30a9dc2..ac2807397c1 100644
--- a/arch/mips/bcm63xx/Makefile
+++ b/arch/mips/bcm63xx/Makefile
@@ -1,6 +1,7 @@
-obj-y += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \
- dev-dsp.o dev-enet.o dev-flash.o dev-pcmcia.o dev-rng.o \
- dev-spi.o dev-uart.o dev-wdt.o dev-usb-usbd.o
+obj-y += clk.o cpu.o cs.o gpio.o irq.o nvram.o prom.o reset.o \
+ setup.o timer.o dev-dsp.o dev-enet.o dev-flash.o \
+ dev-pcmcia.o dev-rng.o dev-spi.o dev-uart.o dev-wdt.o \
+ dev-usb-usbd.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-y += boards/
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 1cd4d73f23c..73be9b34969 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -18,6 +18,7 @@
#include <bcm63xx_dev_uart.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
+#include <bcm63xx_nvram.h>
#include <bcm63xx_dev_pci.h>
#include <bcm63xx_dev_enet.h>
#include <bcm63xx_dev_dsp.h>
@@ -29,8 +30,6 @@
#define PFX "board_bcm963xx: "
-static struct bcm963xx_nvram nvram;
-static unsigned int mac_addr_used;
static struct board_info board;
/*
@@ -716,50 +715,14 @@ const char *board_get_name(void)
}
/*
- * register & return a new board mac address
- */
-static int board_get_mac_address(u8 *mac)
-{
- u8 *oui;
- int count;
-
- if (mac_addr_used >= nvram.mac_addr_count) {
- printk(KERN_ERR PFX "not enough mac address\n");
- return -ENODEV;
- }
-
- memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
- oui = mac + ETH_ALEN/2 - 1;
- count = mac_addr_used;
-
- while (count--) {
- u8 *p = mac + ETH_ALEN - 1;
-
- do {
- (*p)++;
- if (*p != 0)
- break;
- p--;
- } while (p != oui);
-
- if (p == oui) {
- printk(KERN_ERR PFX "unable to fetch mac address\n");
- return -ENODEV;
- }
- }
-
- mac_addr_used++;
- return 0;
-}
-
-/*
* early init callback, read nvram data from flash and checksum it
*/
void __init board_prom_init(void)
{
- unsigned int check_len, i;
- u8 *boot_addr, *cfe, *p;
+ unsigned int i;
+ u8 *boot_addr, *cfe;
char cfe_version[32];
+ char *board_name;
u32 val;
/* read base address of boot chip select (0)
@@ -782,27 +745,15 @@ void __init board_prom_init(void)
strcpy(cfe_version, "unknown");
printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
- /* extract nvram data */
- memcpy(&nvram, boot_addr + BCM963XX_NVRAM_OFFSET, sizeof(nvram));
-
- /* check checksum before using data */
- if (nvram.version <= 4)
- check_len = offsetof(struct bcm963xx_nvram, checksum_old);
- else
- check_len = sizeof(nvram);
- val = 0;
- p = (u8 *)&nvram;
- while (check_len--)
- val += *p;
- if (val) {
+ if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
printk(KERN_ERR PFX "invalid nvram checksum\n");
return;
}
+ board_name = bcm63xx_nvram_get_name();
/* find board by name */
for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) {
- if (strncmp(nvram.name, bcm963xx_boards[i]->name,
- sizeof(nvram.name)))
+ if (strncmp(board_name, bcm963xx_boards[i]->name, 16))
continue;
/* copy, board desc array is marked initdata */
memcpy(&board, bcm963xx_boards[i], sizeof(board));
@@ -812,7 +763,7 @@ void __init board_prom_init(void)
/* bail out if board is not found, will complain later */
if (!board.name[0]) {
char name[17];
- memcpy(name, nvram.name, 16);
+ memcpy(name, board_name, 16);
name[16] = 0;
printk(KERN_ERR PFX "unknown bcm963xx board: %s\n",
name);
@@ -890,11 +841,11 @@ int __init board_register_devices(void)
bcm63xx_pcmcia_register();
if (board.has_enet0 &&
- !board_get_mac_address(board.enet0.mac_addr))
+ !bcm63xx_nvram_get_mac_address(board.enet0.mac_addr))
bcm63xx_enet_register(0, &board.enet0);
if (board.has_enet1 &&
- !board_get_mac_address(board.enet1.mac_addr))
+ !bcm63xx_nvram_get_mac_address(board.enet1.mac_addr))
bcm63xx_enet_register(1, &board.enet1);
if (board.has_usbd)
@@ -907,7 +858,7 @@ int __init board_register_devices(void)
* do this after registering enet devices
*/
#ifdef CONFIG_SSB_PCIHOST
- if (!board_get_mac_address(bcm63xx_sprom.il0mac)) {
+ if (!bcm63xx_nvram_get_mac_address(bcm63xx_sprom.il0mac)) {
memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
if (ssb_arch_register_fallback_sprom(
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index dff79ab6005..b9e948d5943 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -14,6 +14,7 @@
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
+#include <bcm63xx_reset.h>
#include <bcm63xx_clk.h>
static DEFINE_MUTEX(clocks_mutex);
@@ -124,15 +125,10 @@ static void enetsw_set(struct clk *clk, int enable)
CKCTL_6368_SWPKT_USB_EN |
CKCTL_6368_SWPKT_SAR_EN, enable);
if (enable) {
- u32 val;
-
/* reset switch core afer clock change */
- val = bcm_perf_readl(PERF_SOFTRESET_6368_REG);
- val &= ~SOFTRESET_6368_ENETSW_MASK;
- bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
+ bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
msleep(10);
- val |= SOFTRESET_6368_ENETSW_MASK;
- bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
+ bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0);
msleep(10);
}
}
@@ -222,15 +218,10 @@ static void xtm_set(struct clk *clk, int enable)
CKCTL_6368_SWPKT_SAR_EN, enable);
if (enable) {
- u32 val;
-
/* reset sar core afer clock change */
- val = bcm_perf_readl(PERF_SOFTRESET_6368_REG);
- val &= ~SOFTRESET_6368_SAR_MASK;
- bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
+ bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1);
mdelay(1);
- val |= SOFTRESET_6368_SAR_MASK;
- bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
+ bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0);
mdelay(1);
}
}
@@ -253,6 +244,19 @@ static struct clk clk_ipsec = {
};
/*
+ * PCIe clock
+ */
+
+static void pcie_set(struct clk *clk, int enable)
+{
+ bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+}
+
+static struct clk clk_pcie = {
+ .set = pcie_set,
+};
+
+/*
* Internal peripheral clock
*/
static struct clk clk_periph = {
@@ -313,6 +317,8 @@ struct clk *clk_get(struct device *dev, const char *id)
return &clk_pcm;
if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
return &clk_ipsec;
+ if (BCMCPU_IS_6328() && !strcmp(id, "pcie"))
+ return &clk_pcie;
return ERR_PTR(-ENOENT);
}
diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c
new file mode 100644
index 00000000000..62061168083
--- /dev/null
+++ b/arch/mips/bcm63xx/nvram.c
@@ -0,0 +1,107 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#define pr_fmt(fmt) "bcm63xx_nvram: " fmt
+
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+
+#include <bcm63xx_nvram.h>
+
+/*
+ * nvram structure
+ */
+struct bcm963xx_nvram {
+ u32 version;
+ u8 reserved1[256];
+ u8 name[16];
+ u32 main_tp_number;
+ u32 psi_size;
+ u32 mac_addr_count;
+ u8 mac_addr_base[ETH_ALEN];
+ u8 reserved2[2];
+ u32 checksum_old;
+ u8 reserved3[720];
+ u32 checksum_high;
+};
+
+static struct bcm963xx_nvram nvram;
+static int mac_addr_used;
+
+int __init bcm63xx_nvram_init(void *addr)
+{
+ unsigned int check_len;
+ u32 crc, expected_crc;
+
+ /* extract nvram data */
+ memcpy(&nvram, addr, sizeof(nvram));
+
+ /* check checksum before using data */
+ if (nvram.version <= 4) {
+ check_len = offsetof(struct bcm963xx_nvram, reserved3);
+ expected_crc = nvram.checksum_old;
+ nvram.checksum_old = 0;
+ } else {
+ check_len = sizeof(nvram);
+ expected_crc = nvram.checksum_high;
+ nvram.checksum_high = 0;
+ }
+
+ crc = crc32_le(~0, (u8 *)&nvram, check_len);
+
+ if (crc != expected_crc)
+ return -EINVAL;
+
+ return 0;
+}
+
+u8 *bcm63xx_nvram_get_name(void)
+{
+ return nvram.name;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_name);
+
+int bcm63xx_nvram_get_mac_address(u8 *mac)
+{
+ u8 *oui;
+ int count;
+
+ if (mac_addr_used >= nvram.mac_addr_count) {
+ pr_err("not enough mac addresses\n");
+ return -ENODEV;
+ }
+
+ memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
+ oui = mac + ETH_ALEN/2 - 1;
+ count = mac_addr_used;
+
+ while (count--) {
+ u8 *p = mac + ETH_ALEN - 1;
+
+ do {
+ (*p)++;
+ if (*p != 0)
+ break;
+ p--;
+ } while (p != oui);
+
+ if (p == oui) {
+ pr_err("unable to fetch mac address\n");
+ return -ENODEV;
+ }
+ }
+
+ mac_addr_used++;
+ return 0;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c
new file mode 100644
index 00000000000..68a31bb90cb
--- /dev/null
+++ b/arch/mips/bcm63xx/reset.c
@@ -0,0 +1,223 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_reset.h>
+
+#define __GEN_RESET_BITS_TABLE(__cpu) \
+ [BCM63XX_RESET_SPI] = BCM## __cpu ##_RESET_SPI, \
+ [BCM63XX_RESET_ENET] = BCM## __cpu ##_RESET_ENET, \
+ [BCM63XX_RESET_USBH] = BCM## __cpu ##_RESET_USBH, \
+ [BCM63XX_RESET_USBD] = BCM## __cpu ##_RESET_USBD, \
+ [BCM63XX_RESET_DSL] = BCM## __cpu ##_RESET_DSL, \
+ [BCM63XX_RESET_SAR] = BCM## __cpu ##_RESET_SAR, \
+ [BCM63XX_RESET_EPHY] = BCM## __cpu ##_RESET_EPHY, \
+ [BCM63XX_RESET_ENETSW] = BCM## __cpu ##_RESET_ENETSW, \
+ [BCM63XX_RESET_PCM] = BCM## __cpu ##_RESET_PCM, \
+ [BCM63XX_RESET_MPI] = BCM## __cpu ##_RESET_MPI, \
+ [BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \
+ [BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT,
+
+#define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK
+#define BCM6328_RESET_ENET 0
+#define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK
+#define BCM6328_RESET_USBD SOFTRESET_6328_USBS_MASK
+#define BCM6328_RESET_DSL 0
+#define BCM6328_RESET_SAR SOFTRESET_6328_SAR_MASK
+#define BCM6328_RESET_EPHY SOFTRESET_6328_EPHY_MASK
+#define BCM6328_RESET_ENETSW SOFTRESET_6328_ENETSW_MASK
+#define BCM6328_RESET_PCM SOFTRESET_6328_PCM_MASK
+#define BCM6328_RESET_MPI 0
+#define BCM6328_RESET_PCIE \
+ (SOFTRESET_6328_PCIE_MASK | \
+ SOFTRESET_6328_PCIE_CORE_MASK | \
+ SOFTRESET_6328_PCIE_HARD_MASK)
+#define BCM6328_RESET_PCIE_EXT SOFTRESET_6328_PCIE_EXT_MASK
+
+#define BCM6338_RESET_SPI SOFTRESET_6338_SPI_MASK
+#define BCM6338_RESET_ENET SOFTRESET_6338_ENET_MASK
+#define BCM6338_RESET_USBH SOFTRESET_6338_USBH_MASK
+#define BCM6338_RESET_USBD SOFTRESET_6338_USBS_MASK
+#define BCM6338_RESET_DSL SOFTRESET_6338_ADSL_MASK
+#define BCM6338_RESET_SAR SOFTRESET_6338_SAR_MASK
+#define BCM6338_RESET_EPHY 0
+#define BCM6338_RESET_ENETSW 0
+#define BCM6338_RESET_PCM 0
+#define BCM6338_RESET_MPI 0
+#define BCM6338_RESET_PCIE 0
+#define BCM6338_RESET_PCIE_EXT 0
+
+#define BCM6348_RESET_SPI SOFTRESET_6348_SPI_MASK
+#define BCM6348_RESET_ENET SOFTRESET_6348_ENET_MASK
+#define BCM6348_RESET_USBH SOFTRESET_6348_USBH_MASK
+#define BCM6348_RESET_USBD SOFTRESET_6348_USBS_MASK
+#define BCM6348_RESET_DSL SOFTRESET_6348_ADSL_MASK
+#define BCM6348_RESET_SAR SOFTRESET_6348_SAR_MASK
+#define BCM6348_RESET_EPHY 0
+#define BCM6348_RESET_ENETSW 0
+#define BCM6348_RESET_PCM 0
+#define BCM6348_RESET_MPI 0
+#define BCM6348_RESET_PCIE 0
+#define BCM6348_RESET_PCIE_EXT 0
+
+#define BCM6358_RESET_SPI SOFTRESET_6358_SPI_MASK
+#define BCM6358_RESET_ENET SOFTRESET_6358_ENET_MASK
+#define BCM6358_RESET_USBH SOFTRESET_6358_USBH_MASK
+#define BCM6358_RESET_USBD 0
+#define BCM6358_RESET_DSL SOFTRESET_6358_ADSL_MASK
+#define BCM6358_RESET_SAR SOFTRESET_6358_SAR_MASK
+#define BCM6358_RESET_EPHY SOFTRESET_6358_EPHY_MASK
+#define BCM6358_RESET_ENETSW 0
+#define BCM6358_RESET_PCM SOFTRESET_6358_PCM_MASK
+#define BCM6358_RESET_MPI SOFTRESET_6358_MPI_MASK
+#define BCM6358_RESET_PCIE 0
+#define BCM6358_RESET_PCIE_EXT 0
+
+#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK
+#define BCM6368_RESET_ENET 0
+#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK
+#define BCM6368_RESET_USBD SOFTRESET_6368_USBS_MASK
+#define BCM6368_RESET_DSL 0
+#define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK
+#define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK
+#define BCM6368_RESET_ENETSW 0
+#define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK
+#define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK
+#define BCM6368_RESET_PCIE 0
+#define BCM6368_RESET_PCIE_EXT 0
+
+#ifdef BCMCPU_RUNTIME_DETECT
+
+/*
+ * core reset bits
+ */
+static const u32 bcm6328_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6328)
+};
+
+static const u32 bcm6338_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6338)
+};
+
+static const u32 bcm6348_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6348)
+};
+
+static const u32 bcm6358_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6358)
+};
+
+static const u32 bcm6368_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6368)
+};
+
+const u32 *bcm63xx_reset_bits;
+static int reset_reg;
+
+static int __init bcm63xx_reset_bits_init(void)
+{
+ if (BCMCPU_IS_6328()) {
+ reset_reg = PERF_SOFTRESET_6328_REG;
+ bcm63xx_reset_bits = bcm6328_reset_bits;
+ } else if (BCMCPU_IS_6338()) {
+ reset_reg = PERF_SOFTRESET_REG;
+ bcm63xx_reset_bits = bcm6338_reset_bits;
+ } else if (BCMCPU_IS_6348()) {
+ reset_reg = PERF_SOFTRESET_REG;
+ bcm63xx_reset_bits = bcm6348_reset_bits;
+ } else if (BCMCPU_IS_6358()) {
+ reset_reg = PERF_SOFTRESET_6358_REG;
+ bcm63xx_reset_bits = bcm6358_reset_bits;
+ } else if (BCMCPU_IS_6368()) {
+ reset_reg = PERF_SOFTRESET_6368_REG;
+ bcm63xx_reset_bits = bcm6368_reset_bits;
+ }
+
+ return 0;
+}
+#else
+
+#ifdef CONFIG_BCM63XX_CPU_6328
+static const u32 bcm63xx_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6328)
+};
+#define reset_reg PERF_SOFTRESET_6328_REG
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6338
+static const u32 bcm63xx_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6338)
+};
+#define reset_reg PERF_SOFTRESET_REG
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6345
+static const u32 bcm63xx_reset_bits[] = { };
+#define reset_reg 0
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6348
+static const u32 bcm63xx_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6348)
+};
+#define reset_reg PERF_SOFTRESET_REG
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6358
+static const u32 bcm63xx_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6358)
+};
+#define reset_reg PERF_SOFTRESET_6358_REG
+#endif
+
+#ifdef CONFIG_BCM63XX_CPU_6368
+static const u32 bcm63xx_reset_bits[] = {
+ __GEN_RESET_BITS_TABLE(6368)
+};
+#define reset_reg PERF_SOFTRESET_6368_REG
+#endif
+
+static int __init bcm63xx_reset_bits_init(void) { return 0; }
+#endif
+
+static DEFINE_SPINLOCK(reset_mutex);
+
+static void __bcm63xx_core_set_reset(u32 mask, int enable)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (!mask)
+ return;
+
+ spin_lock_irqsave(&reset_mutex, flags);
+ val = bcm_perf_readl(reset_reg);
+
+ if (enable)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ bcm_perf_writel(val, reset_reg);
+ spin_unlock_irqrestore(&reset_mutex, flags);
+}
+
+void bcm63xx_core_set_reset(enum bcm63xx_core_reset core, int reset)
+{
+ __bcm63xx_core_set_reset(bcm63xx_reset_bits[core], reset);
+}
+EXPORT_SYMBOL(bcm63xx_core_set_reset);
+
+postcore_initcall(bcm63xx_reset_bits_init);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index fdf5f19bfdb..6d5ddbc112c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -688,3 +688,8 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
return addr_allocated;
}
+
+struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void)
+{
+ return cvmx_bootmem_desc;
+}
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index e44a55bc7f0..237e5b1a72d 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -51,7 +51,8 @@ static int __init flash_init(void)
flash_map.name = "phys_mapped_flash";
flash_map.phys = region_cfg.s.base << 16;
flash_map.size = 0x1fc00000 - flash_map.phys;
- flash_map.bankwidth = 1;
+ /* 8-bit bus (0 + 1) or 16-bit bus (1 + 1) */
+ flash_map.bankwidth = region_cfg.s.width + 1;
flash_map.virt = ioremap(flash_map.phys, flash_map.size);
pr_notice("Bootbus flash: Setting flash for %luMB flash at "
"0x%08llx\n", flash_map.size >> 20, flash_map.phys);
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 02b15eed4bc..46f5dbceeec 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1266,7 +1266,6 @@ static void __init octeon_irq_init_ciu(void)
octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
- octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
/* CIU_1 */
for (i = 0; i < 16; i++)
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index db478dbb9c7..0ba0eb96d9a 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -79,11 +79,6 @@
/*
* Only on the 64-bit kernel we can made use of 64-bit registers.
*/
-#ifdef CONFIG_64BIT
-#define USE_DOUBLE
-#endif
-
-#ifdef USE_DOUBLE
#define LOAD ld
#define LOADL ldl
@@ -119,26 +114,6 @@
#define t6 $14
#define t7 $15
-#else
-
-#define LOAD lw
-#define LOADL lwl
-#define LOADR lwr
-#define STOREL swl
-#define STORER swr
-#define STORE sw
-#define ADD addu
-#define SUB subu
-#define SRL srl
-#define SLL sll
-#define SRA sra
-#define SLLV sllv
-#define SRLV srlv
-#define NBYTES 4
-#define LOG_NBYTES 2
-
-#endif /* USE_DOUBLE */
-
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
#define LDREST LOADL
@@ -395,12 +370,10 @@ EXC( sb t0, N(dst), s_exc_p1)
COPY_BYTE(0)
COPY_BYTE(1)
-#ifdef USE_DOUBLE
COPY_BYTE(2)
COPY_BYTE(3)
COPY_BYTE(4)
COPY_BYTE(5)
-#endif
EXC( lb t0, NBYTES-2(src), l_exc)
SUB len, len, 1
jr ra
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 0938df10a71..3c1b625a585 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -24,108 +24,6 @@
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
-static struct octeon_cf_data octeon_cf_data;
-
-static int __init octeon_cf_device_init(void)
-{
- union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
- unsigned long base_ptr, region_base, region_size;
- struct platform_device *pd;
- struct resource cf_resources[3];
- unsigned int num_resources;
- int i;
- int ret = 0;
-
- /* Setup octeon-cf platform device if present. */
- base_ptr = 0;
- if (octeon_bootinfo->major_version == 1
- && octeon_bootinfo->minor_version >= 1) {
- if (octeon_bootinfo->compact_flash_common_base_addr)
- base_ptr =
- octeon_bootinfo->compact_flash_common_base_addr;
- } else {
- base_ptr = 0x1d000800;
- }
-
- if (!base_ptr)
- return ret;
-
- /* Find CS0 region. */
- for (i = 0; i < 8; i++) {
- mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
- region_base = mio_boot_reg_cfg.s.base << 16;
- region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
- if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
- && base_ptr < region_base + region_size)
- break;
- }
- if (i >= 7) {
- /* i and i + 1 are CS0 and CS1, both must be less than 8. */
- goto out;
- }
- octeon_cf_data.base_region = i;
- octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
- octeon_cf_data.base_region_bias = base_ptr - region_base;
- memset(cf_resources, 0, sizeof(cf_resources));
- num_resources = 0;
- cf_resources[num_resources].flags = IORESOURCE_MEM;
- cf_resources[num_resources].start = region_base;
- cf_resources[num_resources].end = region_base + region_size - 1;
- num_resources++;
-
-
- if (!(base_ptr & 0xfffful)) {
- /*
- * Boot loader signals availability of DMA (true_ide
- * mode) by setting low order bits of base_ptr to
- * zero.
- */
-
- /* Assume that CS1 immediately follows. */
- mio_boot_reg_cfg.u64 =
- cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
- region_base = mio_boot_reg_cfg.s.base << 16;
- region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
- if (!mio_boot_reg_cfg.s.en)
- goto out;
-
- cf_resources[num_resources].flags = IORESOURCE_MEM;
- cf_resources[num_resources].start = region_base;
- cf_resources[num_resources].end = region_base + region_size - 1;
- num_resources++;
-
- octeon_cf_data.dma_engine = 0;
- cf_resources[num_resources].flags = IORESOURCE_IRQ;
- cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA;
- cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA;
- num_resources++;
- } else {
- octeon_cf_data.dma_engine = -1;
- }
-
- pd = platform_device_alloc("pata_octeon_cf", -1);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
- pd->dev.platform_data = &octeon_cf_data;
-
- ret = platform_device_add_resources(pd, cf_resources, num_resources);
- if (ret)
- goto fail;
-
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
-
- return ret;
-fail:
- platform_device_put(pd);
-out:
- return ret;
-}
-device_initcall(octeon_cf_device_init);
-
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
{
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 04dd8ff0e0d..d7e0a09f77c 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -4,9 +4,11 @@
* for more details.
*
* Copyright (C) 2004-2007 Cavium Networks
- * Copyright (C) 2008 Wind River Systems
+ * Copyright (C) 2008, 2009 Wind River Systems
+ * written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -23,6 +25,7 @@
#include <linux/serial_8250.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
+#include <linux/kexec.h>
#include <asm/processor.h>
#include <asm/reboot.h>
@@ -56,11 +59,208 @@ struct octeon_boot_descriptor *octeon_boot_desc_ptr;
struct cvmx_bootinfo *octeon_bootinfo;
EXPORT_SYMBOL(octeon_bootinfo);
+static unsigned long long RESERVE_LOW_MEM = 0ull;
+#ifdef CONFIG_KEXEC
+#ifdef CONFIG_SMP
+/*
+ * Wait for relocation code is prepared and send
+ * secondary CPUs to spin until kernel is relocated.
+ */
+static void octeon_kexec_smp_down(void *ignored)
+{
+ int cpu = smp_processor_id();
+
+ local_irq_disable();
+ set_cpu_online(cpu, false);
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ asm volatile (
+ " sync \n"
+ " synci ($0) \n");
+
+ relocated_kexec_smp_wait(NULL);
+}
+#endif
+
+#define OCTEON_DDR0_BASE (0x0ULL)
+#define OCTEON_DDR0_SIZE (0x010000000ULL)
+#define OCTEON_DDR1_BASE (0x410000000ULL)
+#define OCTEON_DDR1_SIZE (0x010000000ULL)
+#define OCTEON_DDR2_BASE (0x020000000ULL)
+#define OCTEON_DDR2_SIZE (0x3e0000000ULL)
+#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
+
+static struct kimage *kimage_ptr;
+
+static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
+{
+ int64_t addr;
+ struct cvmx_bootmem_desc *bootmem_desc;
+
+ bootmem_desc = cvmx_bootmem_get_desc();
+
+ if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
+ mem_size = OCTEON_MAX_PHY_MEM_SIZE;
+ pr_err("Error: requested memory too large,"
+ "truncating to maximum size\n");
+ }
+
+ bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
+ bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
+
+ addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes);
+ bootmem_desc->head_addr = 0;
+
+ if (mem_size <= OCTEON_DDR0_SIZE) {
+ __cvmx_bootmem_phy_free(addr,
+ mem_size - RESERVE_LOW_MEM -
+ low_reserved_bytes, 0);
+ return;
+ }
+
+ __cvmx_bootmem_phy_free(addr,
+ OCTEON_DDR0_SIZE - RESERVE_LOW_MEM -
+ low_reserved_bytes, 0);
+
+ mem_size -= OCTEON_DDR0_SIZE;
+
+ if (mem_size > OCTEON_DDR1_SIZE) {
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
+ __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
+ mem_size - OCTEON_DDR1_SIZE, 0);
+ } else
+ __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
+}
+
+static int octeon_kexec_prepare(struct kimage *image)
+{
+ int i;
+ char *bootloader = "kexec";
+
+ octeon_boot_desc_ptr->argc = 0;
+ for (i = 0; i < image->nr_segments; i++) {
+ if (!strncmp(bootloader, (char *)image->segment[i].buf,
+ strlen(bootloader))) {
+ /*
+ * convert command line string to array
+ * of parameters (as bootloader does).
+ */
+ int argc = 0, offt;
+ char *str = (char *)image->segment[i].buf;
+ char *ptr = strchr(str, ' ');
+ while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
+ *ptr = '\0';
+ if (ptr[1] != ' ') {
+ offt = (int)(ptr - str + 1);
+ octeon_boot_desc_ptr->argv[argc] =
+ image->segment[i].mem + offt;
+ argc++;
+ }
+ ptr = strchr(ptr + 1, ' ');
+ }
+ octeon_boot_desc_ptr->argc = argc;
+ break;
+ }
+ }
+
+ /*
+ * Information about segments will be needed during pre-boot memory
+ * initialization.
+ */
+ kimage_ptr = image;
+ return 0;
+}
+
+static void octeon_generic_shutdown(void)
+{
+ int cpu, i;
+ struct cvmx_bootmem_desc *bootmem_desc;
+ void *named_block_array_ptr;
+
+ bootmem_desc = cvmx_bootmem_get_desc();
+ named_block_array_ptr =
+ cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
+
+#ifdef CONFIG_SMP
+ /* disable watchdogs */
+ for_each_online_cpu(cpu)
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+ if (kimage_ptr != kexec_crash_image) {
+ memset(named_block_array_ptr,
+ 0x0,
+ CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
+ sizeof(struct cvmx_bootmem_named_block_desc));
+ /*
+ * Mark all memory (except low 0x100000 bytes) as free.
+ * It is the same thing that bootloader does.
+ */
+ kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
+ 0x100000);
+ /*
+ * Allocate all segments to avoid their corruption during boot.
+ */
+ for (i = 0; i < kimage_ptr->nr_segments; i++)
+ cvmx_bootmem_alloc_address(
+ kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
+ kimage_ptr->segment[i].mem - PAGE_SIZE,
+ PAGE_SIZE);
+ } else {
+ /*
+ * Do not mark all memory as free. Free only named sections
+ * leaving the rest of memory unchanged.
+ */
+ struct cvmx_bootmem_named_block_desc *ptr =
+ (struct cvmx_bootmem_named_block_desc *)
+ named_block_array_ptr;
+
+ for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
+ if (ptr[i].size)
+ cvmx_bootmem_free_named(ptr[i].name);
+ }
+ kexec_args[2] = 1UL; /* running on octeon_main_processor */
+ kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
+#ifdef CONFIG_SMP
+ secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
+ secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
+#endif
+}
+
+static void octeon_shutdown(void)
+{
+ octeon_generic_shutdown();
+#ifdef CONFIG_SMP
+ smp_call_function(octeon_kexec_smp_down, NULL, 0);
+ smp_wmb();
+ while (num_online_cpus() > 1) {
+ cpu_relax();
+ mdelay(1);
+ }
+#endif
+}
+
+static void octeon_crash_shutdown(struct pt_regs *regs)
+{
+ octeon_generic_shutdown();
+ default_machine_crash_shutdown(regs);
+}
+
+#endif /* CONFIG_KEXEC */
+
#ifdef CONFIG_CAVIUM_RESERVE32
uint64_t octeon_reserve32_memory;
EXPORT_SYMBOL(octeon_reserve32_memory);
#endif
+#ifdef CONFIG_KEXEC
+/* crashkernel cmdline parameter is parsed _after_ memory setup
+ * we also parse it here (workaround for EHB5200) */
+static uint64_t crashk_size, crashk_base;
+#endif
+
static int octeon_uart;
extern asmlinkage void handle_int(void);
@@ -415,6 +615,8 @@ void octeon_user_io_init(void)
void __init prom_init(void)
{
struct cvmx_sysinfo *sysinfo;
+ const char *arg;
+ char *p;
int i;
int argc;
#ifdef CONFIG_CAVIUM_RESERVE32
@@ -566,6 +768,15 @@ void __init prom_init(void)
if (octeon_is_simulation())
MAX_MEMORY = 64ull << 20;
+ arg = strstr(arcs_cmdline, "mem=");
+ if (arg) {
+ MAX_MEMORY = memparse(arg + 4, &p);
+ if (MAX_MEMORY == 0)
+ MAX_MEMORY = 32ull << 30;
+ if (*p == '@')
+ RESERVE_LOW_MEM = memparse(p + 1, &p);
+ }
+
arcs_cmdline[0] = 0;
argc = octeon_boot_desc_ptr->argc;
for (i = 0; i < argc; i++) {
@@ -573,16 +784,30 @@ void __init prom_init(void)
cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
if ((strncmp(arg, "MEM=", 4) == 0) ||
(strncmp(arg, "mem=", 4) == 0)) {
- sscanf(arg + 4, "%llu", &MAX_MEMORY);
- MAX_MEMORY <<= 20;
+ MAX_MEMORY = memparse(arg + 4, &p);
if (MAX_MEMORY == 0)
MAX_MEMORY = 32ull << 30;
+ if (*p == '@')
+ RESERVE_LOW_MEM = memparse(p + 1, &p);
} else if (strcmp(arg, "ecc_verbose") == 0) {
#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
__cvmx_interrupt_ecc_report_single_bit_errors = 1;
pr_notice("Reporting of single bit ECC errors is "
"turned on\n");
#endif
+#ifdef CONFIG_KEXEC
+ } else if (strncmp(arg, "crashkernel=", 12) == 0) {
+ crashk_size = memparse(arg+12, &p);
+ if (*p == '@')
+ crashk_base = memparse(p+1, &p);
+ strcat(arcs_cmdline, " ");
+ strcat(arcs_cmdline, arg);
+ /*
+ * To do: switch parsing to new style, something like:
+ * parse_crashkernel(arg, sysinfo->system_dram_size,
+ * &crashk_size, &crashk_base);
+ */
+#endif
} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
sizeof(arcs_cmdline) - 1) {
strcat(arcs_cmdline, " ");
@@ -617,11 +842,18 @@ void __init prom_init(void)
_machine_restart = octeon_restart;
_machine_halt = octeon_halt;
+#ifdef CONFIG_KEXEC
+ _machine_kexec_shutdown = octeon_shutdown;
+ _machine_crash_shutdown = octeon_crash_shutdown;
+ _machine_kexec_prepare = octeon_kexec_prepare;
+#endif
+
octeon_user_io_init();
register_smp_ops(&octeon_smp_ops);
}
/* Exclude a single page from the regions obtained in plat_mem_setup. */
+#ifndef CONFIG_CRASH_DUMP
static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
{
if (addr > *mem && addr < *mem + *size) {
@@ -636,14 +868,21 @@ static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
*size -= PAGE_SIZE;
}
}
+#endif /* CONFIG_CRASH_DUMP */
void __init plat_mem_setup(void)
{
uint64_t mem_alloc_size;
uint64_t total;
+ uint64_t crashk_end;
+#ifndef CONFIG_CRASH_DUMP
int64_t memory;
+ uint64_t kernel_start;
+ uint64_t kernel_size;
+#endif
total = 0;
+ crashk_end = 0;
/*
* The Mips memory init uses the first memory location for
@@ -656,6 +895,17 @@ void __init plat_mem_setup(void)
if (mem_alloc_size > MAX_MEMORY)
mem_alloc_size = MAX_MEMORY;
+/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
+#ifdef CONFIG_CRASH_DUMP
+ add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM);
+ total += MAX_MEMORY;
+#else
+#ifdef CONFIG_KEXEC
+ if (crashk_size > 0) {
+ add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM);
+ crashk_end = crashk_base + crashk_size;
+ }
+#endif
/*
* When allocating memory, we want incrementing addresses from
* bootmem_alloc so the code in add_memory_region can merge
@@ -664,22 +914,15 @@ void __init plat_mem_setup(void)
cvmx_bootmem_lock();
while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
&& (total < MAX_MEMORY)) {
-#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
__pa_symbol(&__init_end), -1,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
-#elif defined(CONFIG_HIGHMEM)
- memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
- 0x100000,
- CVMX_BOOTMEM_FLAG_NO_LOCKING);
-#else
- memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
- 0x100000,
- CVMX_BOOTMEM_FLAG_NO_LOCKING);
-#endif
if (memory >= 0) {
u64 size = mem_alloc_size;
+#ifdef CONFIG_KEXEC
+ uint64_t end;
+#endif
/*
* exclude a page at the beginning and end of
@@ -692,20 +935,67 @@ void __init plat_mem_setup(void)
memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
CVMX_PCIE_BAR1_PHYS_SIZE,
&memory, &size);
+#ifdef CONFIG_KEXEC
+ end = memory + mem_alloc_size;
/*
- * This function automatically merges address
- * regions next to each other if they are
- * received in incrementing order.
+ * This function automatically merges address regions
+ * next to each other if they are received in
+ * incrementing order
*/
- if (size)
- add_memory_region(memory, size, BOOT_MEM_RAM);
+ if (memory < crashk_base && end > crashk_end) {
+ /* region is fully in */
+ add_memory_region(memory,
+ crashk_base - memory,
+ BOOT_MEM_RAM);
+ total += crashk_base - memory;
+ add_memory_region(crashk_end,
+ end - crashk_end,
+ BOOT_MEM_RAM);
+ total += end - crashk_end;
+ continue;
+ }
+
+ if (memory >= crashk_base && end <= crashk_end)
+ /*
+ * Entire memory region is within the new
+ * kernel's memory, ignore it.
+ */
+ continue;
+
+ if (memory > crashk_base && memory < crashk_end &&
+ end > crashk_end) {
+ /*
+ * Overlap with the beginning of the region,
+ * reserve the beginning.
+ */
+ mem_alloc_size -= crashk_end - memory;
+ memory = crashk_end;
+ } else if (memory < crashk_base && end > crashk_base &&
+ end < crashk_end)
+ /*
+ * Overlap with the beginning of the region,
+ * chop of end.
+ */
+ mem_alloc_size -= end - crashk_base;
+#endif
+ add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
total += mem_alloc_size;
+ /* Recovering mem_alloc_size */
+ mem_alloc_size = 4 << 20;
} else {
break;
}
}
cvmx_bootmem_unlock();
+ /* Add the memory region for the kernel. */
+ kernel_start = (unsigned long) _text;
+ kernel_size = ALIGN(_end - _text, 0x100000);
+
+ /* Adjust for physical offset. */
+ kernel_start &= ~0xffffffff80000000ULL;
+ add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM);
+#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_CAVIUM_RESERVE32
/*
@@ -821,3 +1111,51 @@ void __init device_tree_init(void)
}
unflatten_device_tree();
}
+
+static int __initdata disable_octeon_edac_p;
+
+static int __init disable_octeon_edac(char *str)
+{
+ disable_octeon_edac_p = 1;
+ return 0;
+}
+early_param("disable_octeon_edac", disable_octeon_edac);
+
+static char *edac_device_names[] = {
+ "octeon_l2c_edac",
+ "octeon_pc_edac",
+};
+
+static int __init edac_devinit(void)
+{
+ struct platform_device *dev;
+ int i, err = 0;
+ int num_lmc;
+ char *name;
+
+ if (disable_octeon_edac_p)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
+ name = edac_device_names[i];
+ dev = platform_device_register_simple(name, -1, NULL, 0);
+ if (IS_ERR(dev)) {
+ pr_err("Registation of %s failed!\n", name);
+ err = PTR_ERR(dev);
+ }
+ }
+
+ num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
+ (OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
+ for (i = 0; i < num_lmc; i++) {
+ dev = platform_device_register_simple("octeon_lmc_edac",
+ i, NULL, 0);
+ if (IS_ERR(dev)) {
+ pr_err("Registation of octeon_lmc_edac %d failed!\n", i);
+ err = PTR_ERR(dev);
+ }
+ }
+
+ return err;
+}
+device_initcall(edac_devinit);
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
new file mode 100644
index 00000000000..ea87d43ba60
--- /dev/null
+++ b/arch/mips/configs/ath79_defconfig
@@ -0,0 +1,111 @@
+CONFIG_ATH79=y
+CONFIG_ATH79_MACH_AP121=y
+CONFIG_ATH79_MACH_AP81=y
+CONFIG_ATH79_MACH_DB120=y
+CONFIG_ATH79_MACH_PB44=y
+CONFIG_ATH79_MACH_UBNT_XM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_RD_LZMA=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_PCI=y
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_M25P80=y
+# CONFIG_M25PXX_USE_FAST_READ is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_PACKET_ENGINE is not set
+CONFIG_ATH_COMMON=m
+CONFIG_ATH9K=m
+CONFIG_ATH9K_AHB=y
+CONFIG_INPUT=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO_POLLED=m
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_AR933X=y
+CONFIG_SERIAL_AR933X_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_ATH79=y
+CONFIG_SPI_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PCF857X=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_ATH79_WDT=y
+# CONFIG_VGA_ARB is not set
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_ITU_T=m
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index 75165dfa60c..014ba4bbba7 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -1,7 +1,11 @@
CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD=y
+CONFIG_CAVIUM_CN63XXP1=y
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2
CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_SMP=y
+CONFIG_NR_CPUS=32
+CONFIG_HZ_100=y
CONFIG_PREEMPT=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
@@ -11,16 +15,15 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
@@ -42,22 +45,68 @@ CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_OF_PARTS is not set
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_SLRAM=y
+CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_PATA_OCTEON_CF=y
+CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_BCM87XX_PHY=y
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -66,24 +115,39 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_OCTEON=y
+CONFIG_SPI=y
+CONFIG_SPI_OCTEON=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_STAGING=y
+CONFIG_OCTEON_ETHERNET=y
+# CONFIG_NET_VENDOR_SILICOM is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_NLS=y
+CONFIG_HUGETLBFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_EARLY_PRINTK is not set
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_CRYPTO_CBC=y
diff --git a/arch/mips/configs/yosemite_defconfig b/arch/mips/configs/yosemite_defconfig
deleted file mode 100644
index f72d305a3f0..00000000000
--- a/arch/mips/configs/yosemite_defconfig
+++ /dev/null
@@ -1,94 +0,0 @@
-CONFIG_PMC_YOSEMITE=y
-CONFIG_HIGHMEM=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=2
-CONFIG_HZ_1000=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PCI=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=m
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_IPV6_PRIVACY=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETWORK_SECMARK=y
-CONFIG_FW_LOADER=m
-CONFIG_CONNECTOR=m
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
-CONFIG_SGI_IOC4=m
-CONFIG_RAID_ATTRS=m
-CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_QLA3XXX=m
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FUSE_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_KEYS=y
-CONFIG_KEYS_DEBUG_PROC_KEYS=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRC16=m
-CONFIG_CRC32=m
-CONFIG_LIBCRC32C=m
diff --git a/arch/mips/fw/sni/Makefile b/arch/mips/fw/sni/Makefile
index d9740a3788e..3f01dd36e6b 100644
--- a/arch/mips/fw/sni/Makefile
+++ b/arch/mips/fw/sni/Makefile
@@ -2,4 +2,4 @@
# Makefile for the SNI prom monitor routines under Linux.
#
-lib-$(CONFIG_SNIPROM) += sniprom.o
+lib-$(CONFIG_FW_SNIPROM) += sniprom.o
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 52c4e914f95..90112adb194 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -243,9 +243,9 @@ enum cpu_type_enum {
*/
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
- CPU_R4700, CPU_R5000, CPU_R5000A, CPU_R5500, CPU_NEVADA, CPU_R5432,
- CPU_R10000, CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
- CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
+ CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
+ CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,
+ CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_SR71000, CPU_RM9000, CPU_TX49XX,
/*
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index be39a12901c..006b43e38a9 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -40,6 +40,8 @@ static inline int dma_supported(struct device *dev, u64 mask)
static inline int dma_mapping_error(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_mapping_error(dev, mask);
return ops->mapping_error(dev, mask);
}
diff --git a/arch/mips/include/asm/fw/arc/types.h b/arch/mips/include/asm/fw/arc/types.h
index b9adcd6f086..2b11f87d6fb 100644
--- a/arch/mips/include/asm/fw/arc/types.h
+++ b/arch/mips/include/asm/fw/arc/types.h
@@ -10,7 +10,7 @@
#define _ASM_ARC_TYPES_H
-#ifdef CONFIG_ARC32
+#ifdef CONFIG_FW_ARC32
typedef char CHAR;
typedef short SHORT;
@@ -33,9 +33,9 @@ typedef LONG _PUSHORT;
typedef LONG _PULONG;
typedef LONG _PVOID;
-#endif /* CONFIG_ARC32 */
+#endif /* CONFIG_FW_ARC32 */
-#ifdef CONFIG_ARC64
+#ifdef CONFIG_FW_ARC64
typedef char CHAR;
typedef short SHORT;
@@ -57,7 +57,7 @@ typedef USHORT *_PUSHORT;
typedef ULONG *_PULONG;
typedef VOID *_PVOID;
-#endif /* CONFIG_ARC64 */
+#endif /* CONFIG_FW_ARC64 */
typedef CHAR *PCHAR;
typedef SHORT *PSHORT;
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index b4c20e4f87c..f0324e92d08 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -161,31 +161,6 @@ ASMMACRO(back_to_back_c0_hazard,
)
#define instruction_hazard() do { } while (0)
-#elif defined(CONFIG_CPU_RM9000)
-
-/*
- * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
- * use of the JTLB for instructions should not occur for 4 cpu cycles and use
- * for data translations should not occur for 3 cpu cycles.
- */
-
-ASMMACRO(mtc0_tlbw_hazard,
- _ssnop; _ssnop; _ssnop; _ssnop
- )
-ASMMACRO(tlbw_use_hazard,
- _ssnop; _ssnop; _ssnop; _ssnop
- )
-ASMMACRO(tlb_probe_hazard,
- _ssnop; _ssnop; _ssnop; _ssnop
- )
-ASMMACRO(irq_enable_hazard,
- )
-ASMMACRO(irq_disable_hazard,
- )
-ASMMACRO(back_to_back_c0_hazard,
- )
-#define instruction_hazard() do { } while (0)
-
#elif defined(CONFIG_CPU_SB1)
/*
diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h
index 4314892aaeb..ee25ebbf2a2 100644
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -9,22 +9,43 @@
#ifndef _MIPS_KEXEC
# define _MIPS_KEXEC
+#include <asm/stacktrace.h>
+
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
-
-#define KEXEC_CONTROL_PAGE_SIZE 4096
+/* Reserve 3*4096 bytes for board-specific info */
+#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
+#define MAX_NOTE_BYTES 1024
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
- /* Dummy implementation for now */
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else
+ prepare_frametrace(newregs);
}
+#ifdef CONFIG_KEXEC
+struct kimage;
+extern unsigned long kexec_args[4];
+extern int (*_machine_kexec_prepare)(struct kimage *);
+extern void (*_machine_kexec_shutdown)(void);
+extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+#ifdef CONFIG_SMP
+extern const unsigned char kexec_smp_wait[];
+extern unsigned long secondary_kexec_args[4];
+extern void (*relocated_kexec_smp_wait) (void *);
+extern atomic_t kexec_ready_to_reboot;
+#endif
+#endif
+
#endif /* !_MIPS_KEXEC */
diff --git a/arch/mips/include/asm/mach-ar7/war.h b/arch/mips/include/asm/mach-ar7/war.h
index f4862b56308..99071e50faa 100644
--- a/arch/mips/include/asm/mach-ar7/war.h
+++ b/arch/mips/include/asm/mach-ar7/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-ath79/war.h b/arch/mips/include/asm/mach-ath79/war.h
index 323d9f1d8c4..0bb30905fd5 100644
--- a/arch/mips/include/asm/mach-ath79/war.h
+++ b/arch/mips/include/asm/mach-ath79/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-au1x00/war.h b/arch/mips/include/asm/mach-au1x00/war.h
index dd57d03d68b..72e260d24e5 100644
--- a/arch/mips/include/asm/mach-au1x00/war.h
+++ b/arch/mips/include/asm/mach-au1x00/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
index 26fdaf40b93..cc7563ba1cb 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h
@@ -44,8 +44,8 @@ union bcm47xx_bus {
extern union bcm47xx_bus bcm47xx_bus;
extern enum bcm47xx_bus_type bcm47xx_bus_type;
-void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix);
-void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix);
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback);
#ifdef CONFIG_BCM47XX_SSB
void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
index 2ef17e8df40..90daefa24a4 100644
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h
@@ -1,155 +1,17 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
- */
+#ifndef __ASM_MIPS_MACH_BCM47XX_GPIO_H
+#define __ASM_MIPS_MACH_BCM47XX_GPIO_H
-#ifndef __BCM47XX_GPIO_H
-#define __BCM47XX_GPIO_H
+#include <asm-generic/gpio.h>
-#include <linux/ssb/ssb_embedded.h>
-#include <linux/bcma/bcma.h>
-#include <asm/mach-bcm47xx/bcm47xx.h>
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
-#define BCM47XX_EXTIF_GPIO_LINES 5
-#define BCM47XX_CHIPCO_GPIO_LINES 16
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
-extern int gpio_request(unsigned gpio, const char *label);
-extern void gpio_free(unsigned gpio);
-extern int gpio_to_irq(unsigned gpio);
-
-static inline int gpio_get_value(unsigned gpio)
+static inline int irq_to_gpio(unsigned int irq)
{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- return ssb_gpio_in(&bcm47xx_bus.ssb, 1 << gpio);
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- return bcma_chipco_gpio_in(&bcm47xx_bus.bcma.bus.drv_cc,
- 1 << gpio);
-#endif
- }
return -EINVAL;
}
-#define gpio_get_value_cansleep gpio_get_value
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
- value ? 1 << gpio : 0);
- return;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
- value ? 1 << gpio : 0);
- return;
#endif
- }
-}
-
-#define gpio_set_value_cansleep gpio_set_value
-
-static inline int gpio_cansleep(unsigned gpio)
-{
- return 0;
-}
-
-static inline int gpio_is_valid(unsigned gpio)
-{
- return gpio < (BCM47XX_EXTIF_GPIO_LINES + BCM47XX_CHIPCO_GPIO_LINES);
-}
-
-
-static inline int gpio_direction_input(unsigned gpio)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 0);
- return 0;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
- 0);
- return 0;
-#endif
- }
- return -EINVAL;
-}
-
-static inline int gpio_direction_output(unsigned gpio, int value)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- /* first set the gpio out value */
- ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
- value ? 1 << gpio : 0);
- /* then set the gpio mode */
- ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 1 << gpio);
- return 0;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- /* first set the gpio out value */
- bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
- value ? 1 << gpio : 0);
- /* then set the gpio mode */
- bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
- 1 << gpio);
- return 0;
-#endif
- }
- return -EINVAL;
-}
-
-static inline int gpio_intmask(unsigned gpio, int value)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- ssb_gpio_intmask(&bcm47xx_bus.ssb, 1 << gpio,
- value ? 1 << gpio : 0);
- return 0;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_chipco_gpio_intmask(&bcm47xx_bus.bcma.bus.drv_cc,
- 1 << gpio, value ? 1 << gpio : 0);
- return 0;
-#endif
- }
- return -EINVAL;
-}
-
-static inline int gpio_polarity(unsigned gpio, int value)
-{
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << gpio,
- value ? 1 << gpio : 0);
- return 0;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_chipco_gpio_polarity(&bcm47xx_bus.bcma.bus.drv_cc,
- 1 << gpio, value ? 1 << gpio : 0);
- return 0;
-#endif
- }
- return -EINVAL;
-}
-
-
-#endif /* __BCM47XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/war.h b/arch/mips/include/asm/mach-bcm47xx/war.h
index 87cd4651dda..a3d2f448b10 100644
--- a/arch/mips/include/asm/mach-bcm47xx/war.h
+++ b/arch/mips/include/asm/mach-bcm47xx/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
new file mode 100644
index 00000000000..62d6a3b4d3b
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
@@ -0,0 +1,35 @@
+#ifndef BCM63XX_NVRAM_H
+#define BCM63XX_NVRAM_H
+
+#include <linux/types.h>
+
+/**
+ * bcm63xx_nvram_init() - initializes nvram
+ * @nvram: address of the nvram data
+ *
+ * Initialized the local nvram copy from the target address and checks
+ * its checksum.
+ *
+ * Returns 0 on success.
+ */
+int __init bcm63xx_nvram_init(void *nvram);
+
+/**
+ * bcm63xx_nvram_get_name() - returns the board name according to nvram
+ *
+ * Returns the board name field from nvram. Note that it might not be
+ * null terminated if it is exactly 16 bytes long.
+ */
+u8 *bcm63xx_nvram_get_name(void);
+
+/**
+ * bcm63xx_nvram_get_mac_address() - register & return a new mac address
+ * @mac: pointer to array for allocated mac
+ *
+ * Registers and returns a mac address from the allocated macs from nvram.
+ *
+ * Returns 0 on success.
+ */
+int bcm63xx_nvram_get_mac_address(u8 *mac);
+
+#endif /* BCM63XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 12963d05da8..c3eeb90b480 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -53,13 +53,18 @@
CKCTL_6338_SAR_EN | \
CKCTL_6338_SPI_EN)
-#define CKCTL_6345_CPU_EN (1 << 0)
-#define CKCTL_6345_BUS_EN (1 << 1)
-#define CKCTL_6345_EBI_EN (1 << 2)
-#define CKCTL_6345_UART_EN (1 << 3)
-#define CKCTL_6345_ADSLPHY_EN (1 << 4)
-#define CKCTL_6345_ENET_EN (1 << 7)
-#define CKCTL_6345_USBH_EN (1 << 8)
+/* BCM6345 clock bits are shifted by 16 on the left, because of the test
+ * control register which is 16-bits wide. That way we do not have any
+ * specific BCM6345 code for handling clocks, and writing 0 to the test
+ * control register is fine.
+ */
+#define CKCTL_6345_CPU_EN (1 << 16)
+#define CKCTL_6345_BUS_EN (1 << 17)
+#define CKCTL_6345_EBI_EN (1 << 18)
+#define CKCTL_6345_UART_EN (1 << 19)
+#define CKCTL_6345_ADSLPHY_EN (1 << 20)
+#define CKCTL_6345_ENET_EN (1 << 23)
+#define CKCTL_6345_USBH_EN (1 << 24)
#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_ENET_EN | \
CKCTL_6345_USBH_EN | \
@@ -191,6 +196,7 @@
/* Soft Reset register */
#define PERF_SOFTRESET_REG 0x28
#define PERF_SOFTRESET_6328_REG 0x10
+#define PERF_SOFTRESET_6358_REG 0x34
#define PERF_SOFTRESET_6368_REG 0x10
#define SOFTRESET_6328_SPI_MASK (1 << 0)
@@ -244,6 +250,15 @@
SOFTRESET_6348_ACLC_MASK | \
SOFTRESET_6348_ADSLMIPSPLL_MASK)
+#define SOFTRESET_6358_SPI_MASK (1 << 0)
+#define SOFTRESET_6358_ENET_MASK (1 << 2)
+#define SOFTRESET_6358_MPI_MASK (1 << 3)
+#define SOFTRESET_6358_EPHY_MASK (1 << 6)
+#define SOFTRESET_6358_SAR_MASK (1 << 7)
+#define SOFTRESET_6358_USBH_MASK (1 << 12)
+#define SOFTRESET_6358_PCM_MASK (1 << 13)
+#define SOFTRESET_6358_ADSL_MASK (1 << 14)
+
#define SOFTRESET_6368_SPI_MASK (1 << 0)
#define SOFTRESET_6368_MPI_MASK (1 << 3)
#define SOFTRESET_6368_EPHY_MASK (1 << 6)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h
new file mode 100644
index 00000000000..3a6eb9c1adc
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_reset.h
@@ -0,0 +1,21 @@
+#ifndef __BCM63XX_RESET_H
+#define __BCM63XX_RESET_H
+
+enum bcm63xx_core_reset {
+ BCM63XX_RESET_SPI,
+ BCM63XX_RESET_ENET,
+ BCM63XX_RESET_USBH,
+ BCM63XX_RESET_USBD,
+ BCM63XX_RESET_SAR,
+ BCM63XX_RESET_DSL,
+ BCM63XX_RESET_EPHY,
+ BCM63XX_RESET_ENETSW,
+ BCM63XX_RESET_PCM,
+ BCM63XX_RESET_MPI,
+ BCM63XX_RESET_PCIE,
+ BCM63XX_RESET_PCIE_EXT,
+};
+
+void bcm63xx_core_set_reset(enum bcm63xx_core_reset, int reset);
+
+#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
index b0dd4bb53f7..682bcf3b492 100644
--- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
+++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
@@ -15,23 +15,6 @@
#define BCM963XX_NVRAM_OFFSET 0x580
/*
- * nvram structure
- */
-struct bcm963xx_nvram {
- u32 version;
- u8 reserved1[256];
- u8 name[16];
- u32 main_tp_number;
- u32 psi_size;
- u32 mac_addr_count;
- u8 mac_addr_base[6];
- u8 reserved2[2];
- u32 checksum_old;
- u8 reserved3[720];
- u32 checksum_high;
-};
-
-/*
* board definition
*/
struct board_info {
diff --git a/arch/mips/include/asm/mach-bcm63xx/war.h b/arch/mips/include/asm/mach-bcm63xx/war.h
index 8e3f3fdf320..05ee8671bef 100644
--- a/arch/mips/include/asm/mach-bcm63xx/war.h
+++ b/arch/mips/include/asm/mach-bcm63xx/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index ff0d4909d84..502bb1815ae 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -42,7 +42,6 @@ enum octeon_irq {
OCTEON_IRQ_TIMER3,
OCTEON_IRQ_USB0,
OCTEON_IRQ_USB1,
- OCTEON_IRQ_BOOTDMA,
#ifndef CONFIG_PCI_MSI
OCTEON_IRQ_LAST = 127
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h
index c4712d7cc81..eb72b35cf04 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/war.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/war.h
@@ -18,7 +18,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-cobalt/war.h b/arch/mips/include/asm/mach-cobalt/war.h
index 97884fd18ac..34ae4046541 100644
--- a/arch/mips/include/asm/mach-cobalt/war.h
+++ b/arch/mips/include/asm/mach-cobalt/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-dec/war.h b/arch/mips/include/asm/mach-dec/war.h
index ca5e2ef909a..d29996feb3e 100644
--- a/arch/mips/include/asm/mach-dec/war.h
+++ b/arch/mips/include/asm/mach-dec/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-emma2rh/war.h b/arch/mips/include/asm/mach-emma2rh/war.h
index b660a4c30e6..79ae82da3ec 100644
--- a/arch/mips/include/asm/mach-emma2rh/war.h
+++ b/arch/mips/include/asm/mach-emma2rh/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
index 70d9a25132c..e014264b2be 100644
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -34,12 +34,6 @@
#endif
#endif
-#ifdef CONFIG_IRQ_CPU_RM9K
-#ifndef RM9K_CPU_IRQ_BASE
-#define RM9K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+12)
-#endif
-#endif
-
#endif /* CONFIG_IRQ_CPU */
#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ip22/war.h b/arch/mips/include/asm/mach-ip22/war.h
index a44fa9656a8..fba640517f4 100644
--- a/arch/mips/include/asm/mach-ip22/war.h
+++ b/arch/mips/include/asm/mach-ip22/war.h
@@ -21,7 +21,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-ip27/war.h b/arch/mips/include/asm/mach-ip27/war.h
index e2ddcc9b1ff..4ee0e4bdf4f 100644
--- a/arch/mips/include/asm/mach-ip27/war.h
+++ b/arch/mips/include/asm/mach-ip27/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 1
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-ip28/war.h b/arch/mips/include/asm/mach-ip28/war.h
index a1baafab486..4821c7b7a38 100644
--- a/arch/mips/include/asm/mach-ip28/war.h
+++ b/arch/mips/include/asm/mach-ip28/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 1
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-ip32/war.h b/arch/mips/include/asm/mach-ip32/war.h
index d194056dcd7..7237a935a13 100644
--- a/arch/mips/include/asm/mach-ip32/war.h
+++ b/arch/mips/include/asm/mach-ip32/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-jazz/war.h b/arch/mips/include/asm/mach-jazz/war.h
index 6158ee861bf..5b18b9a3d0e 100644
--- a/arch/mips/include/asm/mach-jazz/war.h
+++ b/arch/mips/include/asm/mach-jazz/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-jz4740/war.h b/arch/mips/include/asm/mach-jz4740/war.h
index 3a5bc17e28f..9b511d32383 100644
--- a/arch/mips/include/asm/mach-jz4740/war.h
+++ b/arch/mips/include/asm/mach-jz4740/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-lantiq/war.h b/arch/mips/include/asm/mach-lantiq/war.h
index 01b08ef368d..b6c568c280e 100644
--- a/arch/mips/include/asm/mach-lantiq/war.h
+++ b/arch/mips/include/asm/mach-lantiq/war.h
@@ -16,7 +16,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index 6a2df709c57..133336b493b 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -82,6 +82,9 @@ extern __iomem void *ltq_cgu_membase;
#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
+/* allow booting xrx200 phys */
+int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr);
+
/* request a non-gpio and set the PIO config */
#define PMU_PPE BIT(13)
extern void ltq_pmu_enable(unsigned int module);
diff --git a/arch/mips/include/asm/mach-lasat/war.h b/arch/mips/include/asm/mach-lasat/war.h
index bb1e0325c9b..741ae724adc 100644
--- a/arch/mips/include/asm/mach-lasat/war.h
+++ b/arch/mips/include/asm/mach-lasat/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-loongson/war.h b/arch/mips/include/asm/mach-loongson/war.h
index 4b971c3ffd8..f2570df66bb 100644
--- a/arch/mips/include/asm/mach-loongson/war.h
+++ b/arch/mips/include/asm/mach-loongson/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-loongson1/platform.h b/arch/mips/include/asm/mach-loongson1/platform.h
index 2f171617bad..718a1228a4f 100644
--- a/arch/mips/include/asm/mach-loongson1/platform.h
+++ b/arch/mips/include/asm/mach-loongson1/platform.h
@@ -18,6 +18,7 @@ extern struct platform_device ls1x_eth0_device;
extern struct platform_device ls1x_ehci_device;
extern struct platform_device ls1x_rtc_device;
-void ls1x_serial_setup(void);
+extern void __init ls1x_clk_init(void);
+extern void __init ls1x_serial_setup(struct platform_device *pdev);
#endif /* __ASM_MACH_LOONGSON1_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-clk.h b/arch/mips/include/asm/mach-loongson1/regs-clk.h
index 8efa7fb9f73..a81fa3d0dc9 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-clk.h
@@ -20,14 +20,15 @@
/* Clock PLL Divisor Register Bits */
#define DIV_DC_EN (0x1 << 31)
-#define DIV_DC (0x1f << 26)
#define DIV_CPU_EN (0x1 << 25)
-#define DIV_CPU (0x1f << 20)
#define DIV_DDR_EN (0x1 << 19)
-#define DIV_DDR (0x1f << 14)
#define DIV_DC_SHIFT 26
#define DIV_CPU_SHIFT 20
#define DIV_DDR_SHIFT 14
+#define DIV_DC_WIDTH 5
+#define DIV_CPU_WIDTH 5
+#define DIV_DDR_WIDTH 5
+
#endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson1/war.h b/arch/mips/include/asm/mach-loongson1/war.h
index e3680a8fb34..8fb50d00813 100644
--- a/arch/mips/include/asm/mach-loongson1/war.h
+++ b/arch/mips/include/asm/mach-loongson1/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-malta/war.h b/arch/mips/include/asm/mach-malta/war.h
index 7c6931d5f45..d068fc411f4 100644
--- a/arch/mips/include/asm/mach-malta/war.h
+++ b/arch/mips/include/asm/mach-malta/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 1
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-netlogic/irq.h b/arch/mips/include/asm/mach-netlogic/irq.h
index b5902458e7c..868ed8a2ed5 100644
--- a/arch/mips/include/asm/mach-netlogic/irq.h
+++ b/arch/mips/include/asm/mach-netlogic/irq.h
@@ -8,7 +8,9 @@
#ifndef __ASM_NETLOGIC_IRQ_H
#define __ASM_NETLOGIC_IRQ_H
-#define NR_IRQS 64
+#include <asm/mach-netlogic/multi-node.h>
+#define NR_IRQS (64 * NLM_NR_NODES)
+
#define MIPS_CPU_IRQ_BASE 0
#endif /* __ASM_NETLOGIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-netlogic/multi-node.h b/arch/mips/include/asm/mach-netlogic/multi-node.h
new file mode 100644
index 00000000000..d62fc773f4d
--- /dev/null
+++ b/arch/mips/include/asm/mach-netlogic/multi-node.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NETLOGIC_MULTI_NODE_H_
+#define _NETLOGIC_MULTI_NODE_H_
+
+#ifndef CONFIG_NLM_MULTINODE
+#define NLM_NR_NODES 1
+#else
+#if defined(CONFIG_NLM_MULTINODE_2)
+#define NLM_NR_NODES 2
+#elif defined(CONFIG_NLM_MULTINODE_4)
+#define NLM_NR_NODES 4
+#else
+#define NLM_NR_NODES 1
+#endif
+#endif
+
+#define NLM_CORES_PER_NODE 8
+#define NLM_THREADS_PER_CORE 4
+#define NLM_CPUS_PER_NODE (NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE)
+
+#endif
diff --git a/arch/mips/include/asm/mach-netlogic/war.h b/arch/mips/include/asm/mach-netlogic/war.h
index 22da8932735..2c7216840e1 100644
--- a/arch/mips/include/asm/mach-netlogic/war.h
+++ b/arch/mips/include/asm/mach-netlogic/war.h
@@ -18,7 +18,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-pnx833x/war.h b/arch/mips/include/asm/mach-pnx833x/war.h
index 82cd1e97bc2..edaa06d9d49 100644
--- a/arch/mips/include/asm/mach-pnx833x/war.h
+++ b/arch/mips/include/asm/mach-pnx833x/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-pnx8550/war.h b/arch/mips/include/asm/mach-pnx8550/war.h
index d0458dd082f..de8894c4668 100644
--- a/arch/mips/include/asm/mach-pnx8550/war.h
+++ b/arch/mips/include/asm/mach-pnx8550/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-powertv/war.h b/arch/mips/include/asm/mach-powertv/war.h
index 7ac05ecc512..c5651c8e58d 100644
--- a/arch/mips/include/asm/mach-powertv/war.h
+++ b/arch/mips/include/asm/mach-powertv/war.h
@@ -20,7 +20,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 1
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-rc32434/war.h b/arch/mips/include/asm/mach-rc32434/war.h
index 3ddf187e98a..1bfd489a370 100644
--- a/arch/mips/include/asm/mach-rc32434/war.h
+++ b/arch/mips/include/asm/mach-rc32434/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-rm/war.h b/arch/mips/include/asm/mach-rm/war.h
index 948d3129a11..a3dde98549b 100644
--- a/arch/mips/include/asm/mach-rm/war.h
+++ b/arch/mips/include/asm/mach-rm/war.h
@@ -21,7 +21,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-sead3/war.h b/arch/mips/include/asm/mach-sead3/war.h
index 7c6931d5f45..d068fc411f4 100644
--- a/arch/mips/include/asm/mach-sead3/war.h
+++ b/arch/mips/include/asm/mach-sead3/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 1
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 743385d7b5f..176f5b32dc6 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -33,7 +33,6 @@ extern int sb1250_m3_workaround_needed(void);
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-tx39xx/war.h b/arch/mips/include/asm/mach-tx39xx/war.h
index 43381461635..6a52e653477 100644
--- a/arch/mips/include/asm/mach-tx39xx/war.h
+++ b/arch/mips/include/asm/mach-tx39xx/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-tx49xx/war.h b/arch/mips/include/asm/mach-tx49xx/war.h
index 39b5d1177c5..a8e2c586a18 100644
--- a/arch/mips/include/asm/mach-tx49xx/war.h
+++ b/arch/mips/include/asm/mach-tx49xx/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 1
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-vr41xx/war.h b/arch/mips/include/asm/mach-vr41xx/war.h
index 56a38926412..ffe31e73600 100644
--- a/arch/mips/include/asm/mach-vr41xx/war.h
+++ b/arch/mips/include/asm/mach-vr41xx/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-wrppmc/war.h b/arch/mips/include/asm/mach-wrppmc/war.h
index ac48629bb1c..e86084c0bd6 100644
--- a/arch/mips/include/asm/mach-wrppmc/war.h
+++ b/arch/mips/include/asm/mach-wrppmc/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-yosemite/cpu-feature-overrides.h b/arch/mips/include/asm/mach-yosemite/cpu-feature-overrides.h
deleted file mode 100644
index 56bdd329860..00000000000
--- a/arch/mips/include/asm/mach-yosemite/cpu-feature-overrides.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org)
- */
-#ifndef __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H
-#define __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H
-
-/*
- * Momentum Jaguar ATX always has the RM9000 processor.
- */
-#define cpu_has_watch 1
-#define cpu_has_mips16 0
-#define cpu_has_divec 0
-#define cpu_has_vce 0
-#define cpu_has_cache_cdex_p 0
-#define cpu_has_cache_cdex_s 0
-#define cpu_has_prefetch 1
-#define cpu_has_mcheck 0
-#define cpu_has_ejtag 0
-
-#define cpu_has_llsc 1
-#define cpu_has_vtag_icache 0
-#define cpu_has_dc_aliases 0
-#define cpu_has_ic_fills_f_dc 0
-#define cpu_has_dsp 0
-#define cpu_has_dsp2 0
-#define cpu_has_mipsmt 0
-#define cpu_has_userlocal 0
-#define cpu_icache_snoops_remote_store 0
-
-#define cpu_has_nofpuex 0
-#define cpu_has_64bits 1
-
-#define cpu_has_inclusive_pcaches 0
-
-#define cpu_dcache_line_size() 32
-#define cpu_icache_line_size() 32
-#define cpu_scache_line_size() 32
-
-#define cpu_has_mips32r1 0
-#define cpu_has_mips32r2 0
-#define cpu_has_mips64r1 0
-#define cpu_has_mips64r2 0
-
-#endif /* __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-yosemite/war.h b/arch/mips/include/asm/mach-yosemite/war.h
deleted file mode 100644
index e5c6d53efc8..00000000000
--- a/arch/mips/include/asm/mach-yosemite/war.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_YOSEMITE_WAR_H
-#define __ASM_MIPS_MACH_YOSEMITE_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 1
-#define ICACHE_REFILLS_WORKAROUND_WAR 1
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_YOSEMITE_WAR_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index eb742895dcb..7e4e6f8fab3 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -240,7 +240,7 @@
#define PM_HUGE_MASK PM_64M
#elif defined(CONFIG_PAGE_SIZE_64KB)
#define PM_HUGE_MASK PM_256M
-#elif defined(CONFIG_HUGETLB_PAGE)
+#elif defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
#error Bad page size configuration for hugetlbfs!
#endif
@@ -977,10 +977,6 @@ do { \
#define read_c0_framemask() __read_32bit_c0_register($21, 0)
#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
-/* RM9000 PerfControl performance counter control register */
-#define read_c0_perfcontrol() __read_32bit_c0_register($22, 0)
-#define write_c0_perfcontrol(val) __write_32bit_c0_register($22, 0, val)
-
#define read_c0_diag() __read_32bit_c0_register($22, 0)
#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val)
@@ -1033,10 +1029,6 @@ do { \
#define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7)
#define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val)
-/* RM9000 PerfCount performance counter register */
-#define read_c0_perfcount() __read_64bit_c0_register($25, 0)
-#define write_c0_perfcount(val) __write_64bit_c0_register($25, 0, val)
-
#define read_c0_ecc() __read_32bit_c0_register($26, 0)
#define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val)
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 9b02cfba744..45cfa1ad86a 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -72,12 +72,6 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x10
#define ASID_MASK 0xff0
-#elif defined(CONFIG_CPU_RM9000)
-
-#define ASID_INC 0x1
-#define ASID_MASK 0xfff
-
-/* SMTC/34K debug hack - but maybe we'll keep it */
#elif defined(CONFIG_MIPS_MT_SMTC)
#define ASID_INC 0x1
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 26137da1c71..44b705d0826 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -120,8 +120,6 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "R10000 "
#elif defined CONFIG_CPU_RM7000
#define MODULE_PROC_FAMILY "RM7000 "
-#elif defined CONFIG_CPU_RM9000
-#define MODULE_PROC_FAMILY "RM9000 "
#elif defined CONFIG_CPU_SB1
#define MODULE_PROC_FAMILY "SB1 "
#elif defined CONFIG_CPU_LOONGSON1
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index fdd2f44c7b5..42bfd5f1eee 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -45,15 +45,19 @@
#define BOOT_NMI_HANDLER 8
#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/mach-netlogic/multi-node.h>
+
struct irq_desc;
-extern struct plat_smp_ops nlm_smp_ops;
-extern char nlm_reset_entry[], nlm_reset_entry_end[];
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc);
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc);
-void nlm_smp_irq_init(void);
+void nlm_smp_irq_init(int hwcpuid);
void nlm_boot_secondary_cpus(void);
-int nlm_wakeup_secondary_cpus(u32 wakeup_mask);
+int nlm_wakeup_secondary_cpus(void);
void nlm_rmiboot_preboot(void);
+void nlm_percpu_init(int hwcpuid);
static inline void
nlm_set_nmi_handler(void *handler)
@@ -68,9 +72,42 @@ nlm_set_nmi_handler(void *handler)
* Misc.
*/
unsigned int nlm_get_cpu_frequency(void);
+void nlm_node_init(int node);
+extern struct plat_smp_ops nlm_smp_ops;
+extern char nlm_reset_entry[], nlm_reset_entry_end[];
+
+extern unsigned int nlm_threads_per_core;
+extern cpumask_t nlm_cpumask;
+
+struct nlm_soc_info {
+ unsigned long coremask; /* cores enabled on the soc */
+ unsigned long ebase;
+ uint64_t irqmask;
+ uint64_t sysbase; /* only for XLP */
+ uint64_t picbase;
+ spinlock_t piclock;
+};
+
+#define nlm_get_node(i) (&nlm_nodes[i])
+#ifdef CONFIG_CPU_XLR
+#define nlm_current_node() (&nlm_nodes[0])
+#else
+#define nlm_current_node() (&nlm_nodes[nlm_nodeid()])
+#endif
+
+struct irq_data;
+uint64_t nlm_pci_irqmask(int node);
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *));
+
+/*
+ * The NR_IRQs is divided between nodes, each of them has a separate irq space
+ */
+static inline int nlm_irq_to_xirq(int node, int irq)
+{
+ return node * NR_IRQS / NLM_NR_NODES + irq;
+}
-extern unsigned long nlm_common_ebase;
-extern int nlm_threads_per_core;
-extern uint32_t nlm_cpumask, nlm_coremask;
+extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+extern int nlm_cpu_ready[];
#endif
#endif /* _NETLOGIC_COMMON_H_ */
diff --git a/arch/mips/include/asm/netlogic/interrupt.h b/arch/mips/include/asm/netlogic/interrupt.h
index a85aadb6cfd..ed5993d9b7b 100644
--- a/arch/mips/include/asm/netlogic/interrupt.h
+++ b/arch/mips/include/asm/netlogic/interrupt.h
@@ -39,7 +39,7 @@
#define IRQ_IPI_SMP_FUNCTION 3
#define IRQ_IPI_SMP_RESCHEDULE 4
-#define IRQ_MSGRING 6
+#define IRQ_FMN 5
#define IRQ_TIMER 7
#endif
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 8c53d0ba4bf..32ba6d95d47 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -73,4 +73,146 @@ static inline int hard_smp_processor_id(void)
return __read_32bit_c0_register($15, 1) & 0x3ff;
}
+static inline int nlm_nodeid(void)
+{
+ return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
+}
+
+static inline unsigned int nlm_core_id(void)
+{
+ return (read_c0_ebase() & 0x1c) >> 2;
+}
+
+static inline unsigned int nlm_thread_id(void)
+{
+ return read_c0_ebase() & 0x3;
+}
+
+#define __read_64bit_c2_split(source, sel) \
+({ \
+ unsigned long long __val; \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%M0, " #source "\n\t" \
+ "dsll\t%L0, %M0, 32\n\t" \
+ "dsra\t%M0, %M0, 32\n\t" \
+ "dsra\t%L0, %L0, 32\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__val)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%M0, " #source ", " #sel "\n\t" \
+ "dsll\t%L0, %M0, 32\n\t" \
+ "dsra\t%M0, %M0, 32\n\t" \
+ "dsra\t%L0, %L0, 32\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__val)); \
+ local_irq_restore(__flags); \
+ \
+ __val; \
+})
+
+#define __write_64bit_c2_split(source, sel, val) \
+do { \
+ unsigned long __flags; \
+ \
+ local_irq_save(__flags); \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc2\t%L0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "r" (val)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dsll\t%L0, %L0, 32\n\t" \
+ "dsrl\t%L0, %L0, 32\n\t" \
+ "dsll\t%M0, %M0, 32\n\t" \
+ "or\t%L0, %L0, %M0\n\t" \
+ "dmtc2\t%L0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "r" (val)); \
+ local_irq_restore(__flags); \
+} while (0)
+
+#define __read_32bit_c2_register(source, sel) \
+({ uint32_t __res; \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mfc2\t%0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mfc2\t%0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __read_64bit_c2_register(source, sel) \
+({ unsigned long long __res; \
+ if (sizeof(unsigned long) == 4) \
+ __res = __read_64bit_c2_split(source, sel); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%0, " #source "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmfc2\t%0, " #source ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : "=r" (__res)); \
+ __res; \
+})
+
+#define __write_64bit_c2_register(register, sel, value) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_64bit_c2_split(register, sel, value); \
+ else if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmtc2\t%z0, " #register "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips64\n\t" \
+ "dmtc2\t%z0, " #register ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+} while (0)
+
+#define __write_32bit_c2_register(reg, sel, value) \
+({ \
+ if (sel == 0) \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mtc2\t%z0, " #reg "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+ else \
+ __asm__ __volatile__( \
+ ".set\tmips32\n\t" \
+ "mtc2\t%z0, " #reg ", " #sel "\n\t" \
+ ".set\tmips0\n\t" \
+ : : "Jr" (value)); \
+})
+
#endif /*_ASM_NLM_MIPS_EXTS_H */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index ad8b80233a6..b2e53a5383a 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -273,36 +273,16 @@ nlm_pic_read_irt(uint64_t base, int irt_index)
return nlm_read_pic_reg(base, PIC_IRT(irt_index));
}
-static inline uint64_t
-nlm_pic_read_control(uint64_t base)
-{
- return nlm_read_pic_reg(base, PIC_CTRL);
-}
-
-static inline void
-nlm_pic_write_control(uint64_t base, uint64_t control)
-{
- nlm_write_pic_reg(base, PIC_CTRL, control);
-}
-
-static inline void
-nlm_pic_update_control(uint64_t base, uint64_t control)
-{
- uint64_t val;
-
- val = nlm_read_pic_reg(base, PIC_CTRL);
- nlm_write_pic_reg(base, PIC_CTRL, control | val);
-}
-
static inline void
nlm_set_irt_to_cpu(uint64_t base, int irt, int cpu)
{
uint64_t val;
val = nlm_read_pic_reg(base, PIC_IRT(irt));
- val |= cpu & 0xf;
- if (cpu > 15)
- val |= 1 << 16;
+ /* clear cpuset and mask */
+ val &= ~((0x7ull << 16) | 0xffff);
+ /* set DB, cpuset and cpumask */
+ val |= (1 << 19) | ((cpu >> 4) << 16) | (1 << (cpu & 0xf));
nlm_write_pic_reg(base, PIC_IRT(irt), val);
}
@@ -369,7 +349,7 @@ nlm_pic_enable_irt(uint64_t base, int irt)
static inline void
nlm_pic_disable_irt(uint64_t base, int irt)
{
- uint32_t reg;
+ uint64_t reg;
reg = nlm_read_pic_reg(base, PIC_IRT(irt));
nlm_write_pic_reg(base, PIC_IRT(irt), reg & ~((uint64_t)1 << 31));
@@ -379,15 +359,9 @@ static inline void
nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi)
{
uint64_t ipi;
- int node, ncpu;
-
- node = hwt / 32;
- ncpu = hwt & 0x1f;
- ipi = ((uint64_t)nmi << 31) | (irq << 20) | (node << 17) |
- (1 << (ncpu & 0xf));
- if (ncpu > 15)
- ipi |= 0x10000; /* Setting bit 16 to select cpus 16-31 */
+ ipi = (nmi << 31) | (irq << 20);
+ ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */
nlm_write_pic_reg(base, PIC_IPI_CTL, ipi);
}
@@ -404,12 +378,10 @@ nlm_pic_ack(uint64_t base, int irt_num)
static inline void
nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
{
- nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, 0);
+ nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt);
}
-extern uint64_t nlm_pic_base;
int nlm_irq_to_irt(int irq);
-int nlm_irt_to_irq(int irt);
#endif /* __ASSEMBLY__ */
#endif /* _NLM_HAL_PIC_H */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
index 21432f7d89b..258e8cc00e9 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -124,6 +124,5 @@
#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
-extern uint64_t nlm_sys_base;
#endif
#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
new file mode 100644
index 00000000000..68d5167c86b
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NLM_FMN_H_
+#define _NLM_FMN_H_
+
+#include <asm/netlogic/mips-extns.h> /* for COP2 access */
+
+/* Station IDs */
+#define FMN_STNID_CPU0 0x00
+#define FMN_STNID_CPU1 0x08
+#define FMN_STNID_CPU2 0x10
+#define FMN_STNID_CPU3 0x18
+#define FMN_STNID_CPU4 0x20
+#define FMN_STNID_CPU5 0x28
+#define FMN_STNID_CPU6 0x30
+#define FMN_STNID_CPU7 0x38
+
+#define FMN_STNID_XGS0_TX 64
+#define FMN_STNID_XMAC0_00_TX 64
+#define FMN_STNID_XMAC0_01_TX 65
+#define FMN_STNID_XMAC0_02_TX 66
+#define FMN_STNID_XMAC0_03_TX 67
+#define FMN_STNID_XMAC0_04_TX 68
+#define FMN_STNID_XMAC0_05_TX 69
+#define FMN_STNID_XMAC0_06_TX 70
+#define FMN_STNID_XMAC0_07_TX 71
+#define FMN_STNID_XMAC0_08_TX 72
+#define FMN_STNID_XMAC0_09_TX 73
+#define FMN_STNID_XMAC0_10_TX 74
+#define FMN_STNID_XMAC0_11_TX 75
+#define FMN_STNID_XMAC0_12_TX 76
+#define FMN_STNID_XMAC0_13_TX 77
+#define FMN_STNID_XMAC0_14_TX 78
+#define FMN_STNID_XMAC0_15_TX 79
+
+#define FMN_STNID_XGS1_TX 80
+#define FMN_STNID_XMAC1_00_TX 80
+#define FMN_STNID_XMAC1_01_TX 81
+#define FMN_STNID_XMAC1_02_TX 82
+#define FMN_STNID_XMAC1_03_TX 83
+#define FMN_STNID_XMAC1_04_TX 84
+#define FMN_STNID_XMAC1_05_TX 85
+#define FMN_STNID_XMAC1_06_TX 86
+#define FMN_STNID_XMAC1_07_TX 87
+#define FMN_STNID_XMAC1_08_TX 88
+#define FMN_STNID_XMAC1_09_TX 89
+#define FMN_STNID_XMAC1_10_TX 90
+#define FMN_STNID_XMAC1_11_TX 91
+#define FMN_STNID_XMAC1_12_TX 92
+#define FMN_STNID_XMAC1_13_TX 93
+#define FMN_STNID_XMAC1_14_TX 94
+#define FMN_STNID_XMAC1_15_TX 95
+
+#define FMN_STNID_GMAC 96
+#define FMN_STNID_GMACJFR_0 96
+#define FMN_STNID_GMACRFR_0 97
+#define FMN_STNID_GMACTX0 98
+#define FMN_STNID_GMACTX1 99
+#define FMN_STNID_GMACTX2 100
+#define FMN_STNID_GMACTX3 101
+#define FMN_STNID_GMACJFR_1 102
+#define FMN_STNID_GMACRFR_1 103
+
+#define FMN_STNID_DMA 104
+#define FMN_STNID_DMA_0 104
+#define FMN_STNID_DMA_1 105
+#define FMN_STNID_DMA_2 106
+#define FMN_STNID_DMA_3 107
+
+#define FMN_STNID_XGS0FR 112
+#define FMN_STNID_XMAC0JFR 112
+#define FMN_STNID_XMAC0RFR 113
+
+#define FMN_STNID_XGS1FR 114
+#define FMN_STNID_XMAC1JFR 114
+#define FMN_STNID_XMAC1RFR 115
+#define FMN_STNID_SEC 120
+#define FMN_STNID_SEC0 120
+#define FMN_STNID_SEC1 121
+#define FMN_STNID_SEC2 122
+#define FMN_STNID_SEC3 123
+#define FMN_STNID_PK0 124
+#define FMN_STNID_SEC_RSA 124
+#define FMN_STNID_SEC_RSVD0 125
+#define FMN_STNID_SEC_RSVD1 126
+#define FMN_STNID_SEC_RSVD2 127
+
+#define FMN_STNID_GMAC1 80
+#define FMN_STNID_GMAC1_FR_0 81
+#define FMN_STNID_GMAC1_TX0 82
+#define FMN_STNID_GMAC1_TX1 83
+#define FMN_STNID_GMAC1_TX2 84
+#define FMN_STNID_GMAC1_TX3 85
+#define FMN_STNID_GMAC1_FR_1 87
+#define FMN_STNID_GMAC0 96
+#define FMN_STNID_GMAC0_FR_0 97
+#define FMN_STNID_GMAC0_TX0 98
+#define FMN_STNID_GMAC0_TX1 99
+#define FMN_STNID_GMAC0_TX2 100
+#define FMN_STNID_GMAC0_TX3 101
+#define FMN_STNID_GMAC0_FR_1 103
+#define FMN_STNID_CMP_0 108
+#define FMN_STNID_CMP_1 109
+#define FMN_STNID_CMP_2 110
+#define FMN_STNID_CMP_3 111
+#define FMN_STNID_PCIE_0 116
+#define FMN_STNID_PCIE_1 117
+#define FMN_STNID_PCIE_2 118
+#define FMN_STNID_PCIE_3 119
+#define FMN_STNID_XLS_PK0 121
+
+#define nlm_read_c2_cc0(s) __read_32bit_c2_register($16, s)
+#define nlm_read_c2_cc1(s) __read_32bit_c2_register($17, s)
+#define nlm_read_c2_cc2(s) __read_32bit_c2_register($18, s)
+#define nlm_read_c2_cc3(s) __read_32bit_c2_register($19, s)
+#define nlm_read_c2_cc4(s) __read_32bit_c2_register($20, s)
+#define nlm_read_c2_cc5(s) __read_32bit_c2_register($21, s)
+#define nlm_read_c2_cc6(s) __read_32bit_c2_register($22, s)
+#define nlm_read_c2_cc7(s) __read_32bit_c2_register($23, s)
+#define nlm_read_c2_cc8(s) __read_32bit_c2_register($24, s)
+#define nlm_read_c2_cc9(s) __read_32bit_c2_register($25, s)
+#define nlm_read_c2_cc10(s) __read_32bit_c2_register($26, s)
+#define nlm_read_c2_cc11(s) __read_32bit_c2_register($27, s)
+#define nlm_read_c2_cc12(s) __read_32bit_c2_register($28, s)
+#define nlm_read_c2_cc13(s) __read_32bit_c2_register($29, s)
+#define nlm_read_c2_cc14(s) __read_32bit_c2_register($30, s)
+#define nlm_read_c2_cc15(s) __read_32bit_c2_register($31, s)
+
+#define nlm_write_c2_cc0(s, v) __write_32bit_c2_register($16, s, v)
+#define nlm_write_c2_cc1(s, v) __write_32bit_c2_register($17, s, v)
+#define nlm_write_c2_cc2(s, v) __write_32bit_c2_register($18, s, v)
+#define nlm_write_c2_cc3(s, v) __write_32bit_c2_register($19, s, v)
+#define nlm_write_c2_cc4(s, v) __write_32bit_c2_register($20, s, v)
+#define nlm_write_c2_cc5(s, v) __write_32bit_c2_register($21, s, v)
+#define nlm_write_c2_cc6(s, v) __write_32bit_c2_register($22, s, v)
+#define nlm_write_c2_cc7(s, v) __write_32bit_c2_register($23, s, v)
+#define nlm_write_c2_cc8(s, v) __write_32bit_c2_register($24, s, v)
+#define nlm_write_c2_cc9(s, v) __write_32bit_c2_register($25, s, v)
+#define nlm_write_c2_cc10(s, v) __write_32bit_c2_register($26, s, v)
+#define nlm_write_c2_cc11(s, v) __write_32bit_c2_register($27, s, v)
+#define nlm_write_c2_cc12(s, v) __write_32bit_c2_register($28, s, v)
+#define nlm_write_c2_cc13(s, v) __write_32bit_c2_register($29, s, v)
+#define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v)
+#define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v)
+
+#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
+#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
+#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
+#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
+#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
+
+#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
+#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
+#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
+#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
+
+#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
+#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
+#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
+#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
+
+#define FMN_STN_RX_QSIZE 256
+#define FMN_NSTATIONS 128
+#define FMN_CORE_NBUCKETS 8
+
+static inline void nlm_msgsnd(unsigned int stid)
+{
+ __asm__ volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ ".set noat\n"
+ "move $1, %0\n"
+ "c2 0x10001\n" /* msgsnd $1 */
+ ".set pop\n"
+ : : "r" (stid) : "$1"
+ );
+}
+
+static inline void nlm_msgld(unsigned int pri)
+{
+ __asm__ volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ ".set noat\n"
+ "move $1, %0\n"
+ "c2 0x10002\n" /* msgld $1 */
+ ".set pop\n"
+ : : "r" (pri) : "$1"
+ );
+}
+
+static inline void nlm_msgwait(unsigned int mask)
+{
+ __asm__ volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ ".set noat\n"
+ "move $8, %0\n"
+ "c2 0x10003\n" /* msgwait $1 */
+ ".set pop\n"
+ : : "r" (mask) : "$1"
+ );
+}
+
+/*
+ * Disable interrupts and enable COP2 access
+ */
+static inline uint32_t nlm_cop2_enable(void)
+{
+ uint32_t sr = read_c0_status();
+
+ write_c0_status((sr & ~ST0_IE) | ST0_CU2);
+ return sr;
+}
+
+static inline void nlm_cop2_restore(uint32_t sr)
+{
+ write_c0_status(sr);
+}
+
+static inline void nlm_fmn_setup_intr(int irq, unsigned int tmask)
+{
+ uint32_t config;
+
+ config = (1 << 24) /* interrupt water mark - 1 msg */
+ | (irq << 16) /* irq */
+ | (tmask << 8) /* thread mask */
+ | 0x2; /* enable watermark intr, disable empty intr */
+ nlm_write_c2_config(config);
+}
+
+struct nlm_fmn_msg {
+ uint64_t msg0;
+ uint64_t msg1;
+ uint64_t msg2;
+ uint64_t msg3;
+};
+
+static inline int nlm_fmn_send(unsigned int size, unsigned int code,
+ unsigned int stid, struct nlm_fmn_msg *msg)
+{
+ unsigned int dest;
+ uint32_t status;
+ int i;
+
+ /*
+ * Make sure that all the writes pending at the cpu are flushed.
+ * Any writes pending on CPU will not be see by devices. L1/L2
+ * caches are coherent with IO, so no cache flush needed.
+ */
+ __asm __volatile("sync");
+
+ /* Load TX message buffers */
+ nlm_write_c2_tx_msg0(msg->msg0);
+ nlm_write_c2_tx_msg1(msg->msg1);
+ nlm_write_c2_tx_msg2(msg->msg2);
+ nlm_write_c2_tx_msg3(msg->msg3);
+ dest = ((size - 1) << 16) | (code << 8) | stid;
+
+ /*
+ * Retry a few times on credit fail, this should be a
+ * transient condition, unless there is a configuration
+ * failure, or the receiver is stuck.
+ */
+ for (i = 0; i < 8; i++) {
+ nlm_msgsnd(dest);
+ status = nlm_read_c2_status(0);
+ if ((status & 0x2) == 1)
+ pr_info("Send pending fail!\n");
+ if ((status & 0x4) == 0)
+ return 0;
+ }
+
+ /* If there is a credit failure, return error */
+ return status & 0x06;
+}
+
+static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid,
+ struct nlm_fmn_msg *msg)
+{
+ uint32_t status, tmp;
+
+ nlm_msgld(bucket);
+
+ /* wait for load pending to clear */
+ do {
+ status = nlm_read_c2_status(1);
+ } while ((status & 0x08) != 0);
+
+ /* receive error bits */
+ tmp = status & 0x30;
+ if (tmp != 0)
+ return tmp;
+
+ *size = ((status & 0xc0) >> 6) + 1;
+ *code = (status & 0xff00) >> 8;
+ *stid = (status & 0x7f0000) >> 16;
+ msg->msg0 = nlm_read_c2_rx_msg0();
+ msg->msg1 = nlm_read_c2_rx_msg1();
+ msg->msg2 = nlm_read_c2_rx_msg2();
+ msg->msg3 = nlm_read_c2_rx_msg3();
+
+ return 0;
+}
+
+struct xlr_fmn_info {
+ int num_buckets;
+ int start_stn_id;
+ int end_stn_id;
+ int credit_config[128];
+};
+
+struct xlr_board_fmn_config {
+ int bucket_size[128]; /* size of buckets for all stations */
+ struct xlr_fmn_info cpu[8];
+ struct xlr_fmn_info gmac[2];
+ struct xlr_fmn_info dma;
+ struct xlr_fmn_info cmp;
+ struct xlr_fmn_info sae;
+ struct xlr_fmn_info xgmac[2];
+};
+
+extern int nlm_register_fmn_handler(int start, int end,
+ void (*fn)(int, int, int, int, struct nlm_fmn_msg *, void *),
+ void *arg);
+extern void xlr_percpu_fmn_init(void);
+extern void nlm_setup_fmn_irq(void);
+extern void xlr_board_info_setup(void);
+
+extern struct xlr_board_fmn_config xlr_board_fmn_config;
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/pic.h b/arch/mips/include/asm/netlogic/xlr/pic.h
index 868013e62f3..9a691b1f91b 100644
--- a/arch/mips/include/asm/netlogic/xlr/pic.h
+++ b/arch/mips/include/asm/netlogic/xlr/pic.h
@@ -258,7 +258,5 @@ nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
nlm_write_reg(base, PIC_IRT_1(irt),
(1 << 30) | (1 << 6) | irq);
}
-
-extern uint64_t nlm_pic_base;
#endif
#endif /* _ASM_NLM_XLR_PIC_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/xlr.h b/arch/mips/include/asm/netlogic/xlr/xlr.h
index ff4a17b0bf7..c1667e0c272 100644
--- a/arch/mips/include/asm/netlogic/xlr/xlr.h
+++ b/arch/mips/include/asm/netlogic/xlr/xlr.h
@@ -51,10 +51,8 @@ static inline unsigned int nlm_chip_is_xls_b(void)
return ((prid & 0xf000) == 0x4000);
}
-/*
- * XLR chip types
- */
- /* The XLS product line has chip versions 0x[48c]? */
+/* XLR chip types */
+/* The XLS product line has chip versions 0x[48c]? */
static inline unsigned int nlm_chip_is_xls(void)
{
uint32_t prid = read_c0_prid();
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
index 877845b84b1..42db2be663f 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootmem.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -370,4 +370,6 @@ void cvmx_bootmem_lock(void);
*/
void cvmx_bootmem_unlock(void);
+extern struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void);
+
#endif /* __CVMX_BOOTMEM_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
new file mode 100644
index 00000000000..36f51072114
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
@@ -0,0 +1,3457 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2012 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_LMCX_DEFS_H__
+#define __CVMX_LMCX_DEFS_H__
+
+#define CVMX_LMCX_BIST_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_BIST_RESULT(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_CHAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CHAR_MASK4(block_id) (CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_COMP_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CONFIG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DCLK_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DCLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DDR2_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DDR_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DELAY_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DIMMX_PARAMS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_DIMM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_DLL_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_DLL_CTL3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull)
+static inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180088000098ull) + (block_id) * 0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180088000038ull) + (block_id) * 0x60000000ull;
+}
+
+static inline uint64_t CVMX_LMCX_FADR(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x0001180088000020ull) + (block_id) * 0x60000000ull;
+}
+
+#define CVMX_LMCX_IFB_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_IFB_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_IFB_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MEM_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_MEM_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_MODEREG_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull)
+static inline uint64_t CVMX_LMCX_NXM(unsigned long block_id)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull;
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x1000000ull;
+ }
+ return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + (block_id) * 0x60000000ull;
+}
+
+#define CVMX_LMCX_OPS_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_OPS_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_OPS_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_PHY_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_PLL_BWCTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000040ull))
+#define CVMX_LMCX_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_PLL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_READ_LEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8)
+#define CVMX_LMCX_RESET_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_RLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_RODT_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_RODT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_RODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SCRAMBLED_FADR(block_id) (CVMX_ADD_IO_SEG(0x0001180088000330ull))
+#define CVMX_LMCX_SCRAMBLE_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000320ull))
+#define CVMX_LMCX_SCRAMBLE_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000328ull))
+#define CVMX_LMCX_SLOT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_SLOT_CTL2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TIMING_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TIMING_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TRO_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_TRO_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull)
+#define CVMX_LMCX_WLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
+#define CVMX_LMCX_WODT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_WODT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull)
+#define CVMX_LMCX_WODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull)
+
+union cvmx_lmcx_bist_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_bist_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t start:1;
+#else
+ uint64_t start:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+ struct cvmx_lmcx_bist_ctl_s cn50xx;
+ struct cvmx_lmcx_bist_ctl_s cn52xx;
+ struct cvmx_lmcx_bist_ctl_s cn52xxp1;
+ struct cvmx_lmcx_bist_ctl_s cn56xx;
+ struct cvmx_lmcx_bist_ctl_s cn56xxp1;
+};
+
+union cvmx_lmcx_bist_result {
+ uint64_t u64;
+ struct cvmx_lmcx_bist_result_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t csrd2e:1;
+ uint64_t csre2d:1;
+ uint64_t mwf:1;
+ uint64_t mwd:3;
+ uint64_t mwc:1;
+ uint64_t mrf:1;
+ uint64_t mrd:3;
+#else
+ uint64_t mrd:3;
+ uint64_t mrf:1;
+ uint64_t mwc:1;
+ uint64_t mwd:3;
+ uint64_t mwf:1;
+ uint64_t csre2d:1;
+ uint64_t csrd2e:1;
+ uint64_t reserved_11_63:53;
+#endif
+ } s;
+ struct cvmx_lmcx_bist_result_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t mwf:1;
+ uint64_t mwd:3;
+ uint64_t mwc:1;
+ uint64_t mrf:1;
+ uint64_t mrd:3;
+#else
+ uint64_t mrd:3;
+ uint64_t mrf:1;
+ uint64_t mwc:1;
+ uint64_t mwd:3;
+ uint64_t mwf:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_bist_result_s cn52xx;
+ struct cvmx_lmcx_bist_result_s cn52xxp1;
+ struct cvmx_lmcx_bist_result_s cn56xx;
+ struct cvmx_lmcx_bist_result_s cn56xxp1;
+};
+
+union cvmx_lmcx_char_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_char_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t dr:1;
+ uint64_t skew_on:1;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+#else
+ uint64_t prbs:32;
+ uint64_t prog:8;
+ uint64_t sel:1;
+ uint64_t en:1;
+ uint64_t skew_on:1;
+ uint64_t dr:1;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+ struct cvmx_lmcx_char_ctl_s cn61xx;
+ struct cvmx_lmcx_char_ctl_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_42_63:22;
+ uint64_t en:1;
+ uint64_t sel:1;
+ uint64_t prog:8;
+ uint64_t prbs:32;
+#else
+ uint64_t prbs:32;
+ uint64_t prog:8;
+ uint64_t sel:1;
+ uint64_t en:1;
+ uint64_t reserved_42_63:22;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1;
+ struct cvmx_lmcx_char_ctl_s cn66xx;
+ struct cvmx_lmcx_char_ctl_s cn68xx;
+ struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1;
+ struct cvmx_lmcx_char_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_char_mask0 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask:64;
+#else
+ uint64_t mask:64;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask0_s cn61xx;
+ struct cvmx_lmcx_char_mask0_s cn63xx;
+ struct cvmx_lmcx_char_mask0_s cn63xxp1;
+ struct cvmx_lmcx_char_mask0_s cn66xx;
+ struct cvmx_lmcx_char_mask0_s cn68xx;
+ struct cvmx_lmcx_char_mask0_s cn68xxp1;
+ struct cvmx_lmcx_char_mask0_s cnf71xx;
+};
+
+union cvmx_lmcx_char_mask1 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t mask:8;
+#else
+ uint64_t mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask1_s cn61xx;
+ struct cvmx_lmcx_char_mask1_s cn63xx;
+ struct cvmx_lmcx_char_mask1_s cn63xxp1;
+ struct cvmx_lmcx_char_mask1_s cn66xx;
+ struct cvmx_lmcx_char_mask1_s cn68xx;
+ struct cvmx_lmcx_char_mask1_s cn68xxp1;
+ struct cvmx_lmcx_char_mask1_s cnf71xx;
+};
+
+union cvmx_lmcx_char_mask2 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t mask:64;
+#else
+ uint64_t mask:64;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask2_s cn61xx;
+ struct cvmx_lmcx_char_mask2_s cn63xx;
+ struct cvmx_lmcx_char_mask2_s cn63xxp1;
+ struct cvmx_lmcx_char_mask2_s cn66xx;
+ struct cvmx_lmcx_char_mask2_s cn68xx;
+ struct cvmx_lmcx_char_mask2_s cn68xxp1;
+ struct cvmx_lmcx_char_mask2_s cnf71xx;
+};
+
+union cvmx_lmcx_char_mask3 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t mask:8;
+#else
+ uint64_t mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask3_s cn61xx;
+ struct cvmx_lmcx_char_mask3_s cn63xx;
+ struct cvmx_lmcx_char_mask3_s cn63xxp1;
+ struct cvmx_lmcx_char_mask3_s cn66xx;
+ struct cvmx_lmcx_char_mask3_s cn68xx;
+ struct cvmx_lmcx_char_mask3_s cn68xxp1;
+ struct cvmx_lmcx_char_mask3_s cnf71xx;
+};
+
+union cvmx_lmcx_char_mask4 {
+ uint64_t u64;
+ struct cvmx_lmcx_char_mask4_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t reset_n_mask:1;
+ uint64_t a_mask:16;
+ uint64_t ba_mask:3;
+ uint64_t we_n_mask:1;
+ uint64_t cas_n_mask:1;
+ uint64_t ras_n_mask:1;
+ uint64_t odt1_mask:2;
+ uint64_t odt0_mask:2;
+ uint64_t cs1_n_mask:2;
+ uint64_t cs0_n_mask:2;
+ uint64_t cke_mask:2;
+#else
+ uint64_t cke_mask:2;
+ uint64_t cs0_n_mask:2;
+ uint64_t cs1_n_mask:2;
+ uint64_t odt0_mask:2;
+ uint64_t odt1_mask:2;
+ uint64_t ras_n_mask:1;
+ uint64_t cas_n_mask:1;
+ uint64_t we_n_mask:1;
+ uint64_t ba_mask:3;
+ uint64_t a_mask:16;
+ uint64_t reset_n_mask:1;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+ struct cvmx_lmcx_char_mask4_s cn61xx;
+ struct cvmx_lmcx_char_mask4_s cn63xx;
+ struct cvmx_lmcx_char_mask4_s cn63xxp1;
+ struct cvmx_lmcx_char_mask4_s cn66xx;
+ struct cvmx_lmcx_char_mask4_s cn68xx;
+ struct cvmx_lmcx_char_mask4_s cn68xxp1;
+ struct cvmx_lmcx_char_mask4_s cnf71xx;
+};
+
+union cvmx_lmcx_comp_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t pctl_clk:4;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t pctl_clk:4;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_comp_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t pctl_clk:4;
+ uint64_t pctl_cmd:4;
+ uint64_t pctl_dat:4;
+#else
+ uint64_t pctl_dat:4;
+ uint64_t pctl_cmd:4;
+ uint64_t pctl_clk:4;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t nctl_cmd:4;
+ uint64_t nctl_clk:4;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_comp_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_comp_ctl_cn30xx cn38xx;
+ struct cvmx_lmcx_comp_ctl_cn30xx cn38xxp2;
+ struct cvmx_lmcx_comp_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t reserved_5_11:7;
+ uint64_t pctl_dat:5;
+#else
+ uint64_t pctl_dat:5;
+ uint64_t reserved_5_11:7;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn52xx;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn52xxp1;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn56xx;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn56xxp1;
+ struct cvmx_lmcx_comp_ctl_cn50xx cn58xx;
+ struct cvmx_lmcx_comp_ctl_cn58xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_dat:4;
+ uint64_t pctl_csr:4;
+ uint64_t reserved_4_11:8;
+ uint64_t pctl_dat:4;
+#else
+ uint64_t pctl_dat:4;
+ uint64_t reserved_4_11:8;
+ uint64_t pctl_csr:4;
+ uint64_t nctl_dat:4;
+ uint64_t reserved_20_27:8;
+ uint64_t nctl_csr:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn58xxp1;
+};
+
+union cvmx_lmcx_comp_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_comp_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_34_63:30;
+ uint64_t ddr__ptune:4;
+ uint64_t ddr__ntune:4;
+ uint64_t m180:1;
+ uint64_t byp:1;
+ uint64_t ptune:4;
+ uint64_t ntune:4;
+ uint64_t rodt_ctl:4;
+ uint64_t cmd_ctl:4;
+ uint64_t ck_ctl:4;
+ uint64_t dqx_ctl:4;
+#else
+ uint64_t dqx_ctl:4;
+ uint64_t ck_ctl:4;
+ uint64_t cmd_ctl:4;
+ uint64_t rodt_ctl:4;
+ uint64_t ntune:4;
+ uint64_t ptune:4;
+ uint64_t byp:1;
+ uint64_t m180:1;
+ uint64_t ddr__ntune:4;
+ uint64_t ddr__ptune:4;
+ uint64_t reserved_34_63:30;
+#endif
+ } s;
+ struct cvmx_lmcx_comp_ctl2_s cn61xx;
+ struct cvmx_lmcx_comp_ctl2_s cn63xx;
+ struct cvmx_lmcx_comp_ctl2_s cn63xxp1;
+ struct cvmx_lmcx_comp_ctl2_s cn66xx;
+ struct cvmx_lmcx_comp_ctl2_s cn68xx;
+ struct cvmx_lmcx_comp_ctl2_s cn68xxp1;
+ struct cvmx_lmcx_comp_ctl2_s cnf71xx;
+};
+
+union cvmx_lmcx_config {
+ uint64_t u64;
+ struct cvmx_lmcx_config_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_61_63:3;
+ uint64_t mode32b:1;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t scrz:1;
+ uint64_t mode32b:1;
+ uint64_t reserved_61_63:3;
+#endif
+ } s;
+ struct cvmx_lmcx_config_s cn61xx;
+ struct cvmx_lmcx_config_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_config_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_55_63:9;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t reserved_55_63:9;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_config_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_60_63:4;
+ uint64_t scrz:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t init_status:4;
+ uint64_t mirrmask:4;
+ uint64_t rankmask:4;
+ uint64_t rank_ena:1;
+ uint64_t sref_with_dll:1;
+ uint64_t early_dqx:1;
+ uint64_t sequence:3;
+ uint64_t ref_zqcs_int:19;
+ uint64_t reset:1;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reset:1;
+ uint64_t ref_zqcs_int:19;
+ uint64_t sequence:3;
+ uint64_t early_dqx:1;
+ uint64_t sref_with_dll:1;
+ uint64_t rank_ena:1;
+ uint64_t rankmask:4;
+ uint64_t mirrmask:4;
+ uint64_t init_status:4;
+ uint64_t early_unload_d0_r0:1;
+ uint64_t early_unload_d0_r1:1;
+ uint64_t early_unload_d1_r0:1;
+ uint64_t early_unload_d1_r1:1;
+ uint64_t scrz:1;
+ uint64_t reserved_60_63:4;
+#endif
+ } cn66xx;
+ struct cvmx_lmcx_config_cn63xx cn68xx;
+ struct cvmx_lmcx_config_cn63xx cn68xxp1;
+ struct cvmx_lmcx_config_s cnf71xx;
+};
+
+union cvmx_lmcx_control {
+ uint64_t u64;
+ struct cvmx_lmcx_control_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scramble_ena:1;
+ uint64_t thrcnt:12;
+ uint64_t persub:8;
+ uint64_t thrmax:4;
+ uint64_t crm_cnt:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_max:5;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t crm_max:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_cnt:5;
+ uint64_t thrmax:4;
+ uint64_t persub:8;
+ uint64_t thrcnt:12;
+ uint64_t scramble_ena:1;
+#endif
+ } s;
+ struct cvmx_lmcx_control_s cn61xx;
+ struct cvmx_lmcx_control_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t reserved_24_63:40;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_control_cn63xx cn63xxp1;
+ struct cvmx_lmcx_control_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t scramble_ena:1;
+ uint64_t reserved_24_62:39;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t reserved_24_62:39;
+ uint64_t scramble_ena:1;
+#endif
+ } cn66xx;
+ struct cvmx_lmcx_control_cn68xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63_63:1;
+ uint64_t thrcnt:12;
+ uint64_t persub:8;
+ uint64_t thrmax:4;
+ uint64_t crm_cnt:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_max:5;
+ uint64_t rodt_bprch:1;
+ uint64_t wodt_bprch:1;
+ uint64_t bprch:2;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t nxm_write_en:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t inorder_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t throttle_rd:1;
+ uint64_t fprch2:2;
+ uint64_t pocas:1;
+ uint64_t ddr2t:1;
+ uint64_t bwcnt:1;
+ uint64_t rdimm_ena:1;
+#else
+ uint64_t rdimm_ena:1;
+ uint64_t bwcnt:1;
+ uint64_t ddr2t:1;
+ uint64_t pocas:1;
+ uint64_t fprch2:2;
+ uint64_t throttle_rd:1;
+ uint64_t throttle_wr:1;
+ uint64_t inorder_rd:1;
+ uint64_t inorder_wr:1;
+ uint64_t elev_prio_dis:1;
+ uint64_t nxm_write_en:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t auto_dclkdis:1;
+ uint64_t int_zqcs_dis:1;
+ uint64_t ext_zqcs_dis:1;
+ uint64_t bprch:2;
+ uint64_t wodt_bprch:1;
+ uint64_t rodt_bprch:1;
+ uint64_t crm_max:5;
+ uint64_t crm_thr:5;
+ uint64_t crm_cnt:5;
+ uint64_t thrmax:4;
+ uint64_t persub:8;
+ uint64_t thrcnt:12;
+ uint64_t reserved_63_63:1;
+#endif
+ } cn68xx;
+ struct cvmx_lmcx_control_cn68xx cn68xxp1;
+ struct cvmx_lmcx_control_cn66xx cnf71xx;
+};
+
+union cvmx_lmcx_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t pll_div2:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t reserved_10_11:2;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t reserved_10_11:2;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t pll_bypass:1;
+ uint64_t pll_div2:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t pll_div2:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode32b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t pll_bypass:1;
+ uint64_t pll_div2:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_ctl_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t set_zero:1;
+ uint64_t mode128b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode128b:1;
+ uint64_t set_zero:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t reserved_16_17:2;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_ctl_cn38xx cn38xxp2;
+ struct cvmx_lmcx_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_17_17:1;
+ uint64_t pll_bypass:1;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode32b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t pll_bypass:1;
+ uint64_t reserved_17_17:1;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_ctl_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode32b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode32b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t reserved_16_17:2;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_ctl_cn52xx cn52xxp1;
+ struct cvmx_lmcx_ctl_cn52xx cn56xx;
+ struct cvmx_lmcx_ctl_cn52xx cn56xxp1;
+ struct cvmx_lmcx_ctl_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:4;
+ uint64_t ddr__pctl:4;
+ uint64_t slow_scf:1;
+ uint64_t xor_bank:1;
+ uint64_t max_write_batch:4;
+ uint64_t reserved_16_17:2;
+ uint64_t rdimm_ena:1;
+ uint64_t r2r_slot:1;
+ uint64_t inorder_mwf:1;
+ uint64_t inorder_mrf:1;
+ uint64_t dreset:1;
+ uint64_t mode128b:1;
+ uint64_t fprch2:1;
+ uint64_t bprch:1;
+ uint64_t sil_lat:2;
+ uint64_t tskw:2;
+ uint64_t qs_dic:2;
+ uint64_t dic:2;
+#else
+ uint64_t dic:2;
+ uint64_t qs_dic:2;
+ uint64_t tskw:2;
+ uint64_t sil_lat:2;
+ uint64_t bprch:1;
+ uint64_t fprch2:1;
+ uint64_t mode128b:1;
+ uint64_t dreset:1;
+ uint64_t inorder_mrf:1;
+ uint64_t inorder_mwf:1;
+ uint64_t r2r_slot:1;
+ uint64_t rdimm_ena:1;
+ uint64_t reserved_16_17:2;
+ uint64_t max_write_batch:4;
+ uint64_t xor_bank:1;
+ uint64_t slow_scf:1;
+ uint64_t ddr__pctl:4;
+ uint64_t ddr__nctl:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn58xx;
+ struct cvmx_lmcx_ctl_cn58xx cn58xxp1;
+};
+
+union cvmx_lmcx_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t sequence:3;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_2_7:6;
+ uint64_t data_layout:2;
+#else
+ uint64_t data_layout:2;
+ uint64_t reserved_2_7:6;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t sequence:3;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } s;
+ struct cvmx_lmcx_ctl1_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t data_layout:2;
+#else
+ uint64_t data_layout:2;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ctl1_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_2_7:6;
+ uint64_t data_layout:2;
+#else
+ uint64_t data_layout:2;
+ uint64_t reserved_2_7:6;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_ctl1_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t ecc_adr:1;
+ uint64_t forcewrite:4;
+ uint64_t idlepower:3;
+ uint64_t sequence:3;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t sequence:3;
+ uint64_t idlepower:3;
+ uint64_t forcewrite:4;
+ uint64_t ecc_adr:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_ctl1_cn52xx cn52xxp1;
+ struct cvmx_lmcx_ctl1_cn52xx cn56xx;
+ struct cvmx_lmcx_ctl1_cn52xx cn56xxp1;
+ struct cvmx_lmcx_ctl1_cn58xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t sil_mode:1;
+ uint64_t dcc_enable:1;
+ uint64_t reserved_0_7:8;
+#else
+ uint64_t reserved_0_7:8;
+ uint64_t dcc_enable:1;
+ uint64_t sil_mode:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn58xx;
+ struct cvmx_lmcx_ctl1_cn58xx cn58xxp1;
+};
+
+union cvmx_lmcx_dclk_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dclkcnt:64;
+#else
+ uint64_t dclkcnt:64;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_cnt_s cn61xx;
+ struct cvmx_lmcx_dclk_cnt_s cn63xx;
+ struct cvmx_lmcx_dclk_cnt_s cn63xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cn66xx;
+ struct cvmx_lmcx_dclk_cnt_s cn68xx;
+ struct cvmx_lmcx_dclk_cnt_s cn68xxp1;
+ struct cvmx_lmcx_dclk_cnt_s cnf71xx;
+};
+
+union cvmx_lmcx_dclk_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t dclkcnt_hi:32;
+#else
+ uint64_t dclkcnt_hi:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1;
+};
+
+union cvmx_lmcx_dclk_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t dclkcnt_lo:32;
+#else
+ uint64_t dclkcnt_lo:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1;
+};
+
+union cvmx_lmcx_dclk_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dclk_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t off90_ena:1;
+ uint64_t dclk90_byp:1;
+ uint64_t dclk90_ld:1;
+ uint64_t dclk90_vlu:5;
+#else
+ uint64_t dclk90_vlu:5;
+ uint64_t dclk90_ld:1;
+ uint64_t dclk90_byp:1;
+ uint64_t off90_ena:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_lmcx_dclk_ctl_s cn56xx;
+ struct cvmx_lmcx_dclk_ctl_s cn56xxp1;
+};
+
+union cvmx_lmcx_ddr2_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ddr2_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bank8:1;
+ uint64_t burst8:1;
+ uint64_t addlat:3;
+ uint64_t pocas:1;
+ uint64_t bwcnt:1;
+ uint64_t twr:3;
+ uint64_t silo_hc:1;
+ uint64_t ddr_eof:4;
+ uint64_t tfaw:5;
+ uint64_t crip_mode:1;
+ uint64_t ddr2t:1;
+ uint64_t odt_ena:1;
+ uint64_t qdll_ena:1;
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_byp:1;
+ uint64_t rdqs:1;
+ uint64_t ddr2:1;
+#else
+ uint64_t ddr2:1;
+ uint64_t rdqs:1;
+ uint64_t dll90_byp:1;
+ uint64_t dll90_vlu:5;
+ uint64_t qdll_ena:1;
+ uint64_t odt_ena:1;
+ uint64_t ddr2t:1;
+ uint64_t crip_mode:1;
+ uint64_t tfaw:5;
+ uint64_t ddr_eof:4;
+ uint64_t silo_hc:1;
+ uint64_t twr:3;
+ uint64_t bwcnt:1;
+ uint64_t pocas:1;
+ uint64_t addlat:3;
+ uint64_t burst8:1;
+ uint64_t bank8:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ddr2_ctl_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bank8:1;
+ uint64_t burst8:1;
+ uint64_t addlat:3;
+ uint64_t pocas:1;
+ uint64_t bwcnt:1;
+ uint64_t twr:3;
+ uint64_t silo_hc:1;
+ uint64_t ddr_eof:4;
+ uint64_t tfaw:5;
+ uint64_t crip_mode:1;
+ uint64_t ddr2t:1;
+ uint64_t odt_ena:1;
+ uint64_t qdll_ena:1;
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_byp:1;
+ uint64_t reserved_1_1:1;
+ uint64_t ddr2:1;
+#else
+ uint64_t ddr2:1;
+ uint64_t reserved_1_1:1;
+ uint64_t dll90_byp:1;
+ uint64_t dll90_vlu:5;
+ uint64_t qdll_ena:1;
+ uint64_t odt_ena:1;
+ uint64_t ddr2t:1;
+ uint64_t crip_mode:1;
+ uint64_t tfaw:5;
+ uint64_t ddr_eof:4;
+ uint64_t silo_hc:1;
+ uint64_t twr:3;
+ uint64_t bwcnt:1;
+ uint64_t pocas:1;
+ uint64_t addlat:3;
+ uint64_t burst8:1;
+ uint64_t bank8:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn38xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn38xxp2;
+ struct cvmx_lmcx_ddr2_ctl_s cn50xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn52xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn52xxp1;
+ struct cvmx_lmcx_ddr2_ctl_s cn56xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn56xxp1;
+ struct cvmx_lmcx_ddr2_ctl_s cn58xx;
+ struct cvmx_lmcx_ddr2_ctl_s cn58xxp1;
+};
+
+union cvmx_lmcx_ddr_pll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_ddr_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_27_63:37;
+ uint64_t jtg_test_mode:1;
+ uint64_t dfm_div_reset:1;
+ uint64_t dfm_ps_en:3;
+ uint64_t ddr_div_reset:1;
+ uint64_t ddr_ps_en:3;
+ uint64_t diffamp:4;
+ uint64_t cps:3;
+ uint64_t cpb:3;
+ uint64_t reset_n:1;
+ uint64_t clkf:7;
+#else
+ uint64_t clkf:7;
+ uint64_t reset_n:1;
+ uint64_t cpb:3;
+ uint64_t cps:3;
+ uint64_t diffamp:4;
+ uint64_t ddr_ps_en:3;
+ uint64_t ddr_div_reset:1;
+ uint64_t dfm_ps_en:3;
+ uint64_t dfm_div_reset:1;
+ uint64_t jtg_test_mode:1;
+ uint64_t reserved_27_63:37;
+#endif
+ } s;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn61xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn63xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn63xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn66xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn68xx;
+ struct cvmx_lmcx_ddr_pll_ctl_s cn68xxp1;
+ struct cvmx_lmcx_ddr_pll_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_delay_cfg {
+ uint64_t u64;
+ struct cvmx_lmcx_delay_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t dq:5;
+ uint64_t cmd:5;
+ uint64_t clk:5;
+#else
+ uint64_t clk:5;
+ uint64_t cmd:5;
+ uint64_t dq:5;
+ uint64_t reserved_15_63:49;
+#endif
+ } s;
+ struct cvmx_lmcx_delay_cfg_s cn30xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t dq:4;
+ uint64_t reserved_9_9:1;
+ uint64_t cmd:4;
+ uint64_t reserved_4_4:1;
+ uint64_t clk:4;
+#else
+ uint64_t clk:4;
+ uint64_t reserved_4_4:1;
+ uint64_t cmd:4;
+ uint64_t reserved_9_9:1;
+ uint64_t dq:4;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn50xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn52xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn56xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn58xx;
+ struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1;
+};
+
+union cvmx_lmcx_dimmx_params {
+ uint64_t u64;
+ struct cvmx_lmcx_dimmx_params_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rc15:4;
+ uint64_t rc14:4;
+ uint64_t rc13:4;
+ uint64_t rc12:4;
+ uint64_t rc11:4;
+ uint64_t rc10:4;
+ uint64_t rc9:4;
+ uint64_t rc8:4;
+ uint64_t rc7:4;
+ uint64_t rc6:4;
+ uint64_t rc5:4;
+ uint64_t rc4:4;
+ uint64_t rc3:4;
+ uint64_t rc2:4;
+ uint64_t rc1:4;
+ uint64_t rc0:4;
+#else
+ uint64_t rc0:4;
+ uint64_t rc1:4;
+ uint64_t rc2:4;
+ uint64_t rc3:4;
+ uint64_t rc4:4;
+ uint64_t rc5:4;
+ uint64_t rc6:4;
+ uint64_t rc7:4;
+ uint64_t rc8:4;
+ uint64_t rc9:4;
+ uint64_t rc10:4;
+ uint64_t rc11:4;
+ uint64_t rc12:4;
+ uint64_t rc13:4;
+ uint64_t rc14:4;
+ uint64_t rc15:4;
+#endif
+ } s;
+ struct cvmx_lmcx_dimmx_params_s cn61xx;
+ struct cvmx_lmcx_dimmx_params_s cn63xx;
+ struct cvmx_lmcx_dimmx_params_s cn63xxp1;
+ struct cvmx_lmcx_dimmx_params_s cn66xx;
+ struct cvmx_lmcx_dimmx_params_s cn68xx;
+ struct cvmx_lmcx_dimmx_params_s cn68xxp1;
+ struct cvmx_lmcx_dimmx_params_s cnf71xx;
+};
+
+union cvmx_lmcx_dimm_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dimm_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t parity:1;
+ uint64_t tcws:13;
+ uint64_t dimm1_wmask:16;
+ uint64_t dimm0_wmask:16;
+#else
+ uint64_t dimm0_wmask:16;
+ uint64_t dimm1_wmask:16;
+ uint64_t tcws:13;
+ uint64_t parity:1;
+ uint64_t reserved_46_63:18;
+#endif
+ } s;
+ struct cvmx_lmcx_dimm_ctl_s cn61xx;
+ struct cvmx_lmcx_dimm_ctl_s cn63xx;
+ struct cvmx_lmcx_dimm_ctl_s cn63xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cn66xx;
+ struct cvmx_lmcx_dimm_ctl_s cn68xx;
+ struct cvmx_lmcx_dimm_ctl_s cn68xxp1;
+ struct cvmx_lmcx_dimm_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_dll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t dreset:1;
+ uint64_t dll90_byp:1;
+ uint64_t dll90_ena:1;
+ uint64_t dll90_vlu:5;
+#else
+ uint64_t dll90_vlu:5;
+ uint64_t dll90_ena:1;
+ uint64_t dll90_byp:1;
+ uint64_t dreset:1;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl_s cn52xx;
+ struct cvmx_lmcx_dll_ctl_s cn52xxp1;
+ struct cvmx_lmcx_dll_ctl_s cn56xx;
+ struct cvmx_lmcx_dll_ctl_s cn56xxp1;
+};
+
+union cvmx_lmcx_dll_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_16_63:48;
+ uint64_t intf_en:1;
+ uint64_t dll_bringup:1;
+ uint64_t dreset:1;
+ uint64_t quad_dll_ena:1;
+ uint64_t byp_sel:4;
+ uint64_t byp_setting:8;
+#else
+ uint64_t byp_setting:8;
+ uint64_t byp_sel:4;
+ uint64_t quad_dll_ena:1;
+ uint64_t dreset:1;
+ uint64_t dll_bringup:1;
+ uint64_t intf_en:1;
+ uint64_t reserved_16_63:48;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl2_s cn61xx;
+ struct cvmx_lmcx_dll_ctl2_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t dll_bringup:1;
+ uint64_t dreset:1;
+ uint64_t quad_dll_ena:1;
+ uint64_t byp_sel:4;
+ uint64_t byp_setting:8;
+#else
+ uint64_t byp_setting:8;
+ uint64_t byp_sel:4;
+ uint64_t quad_dll_ena:1;
+ uint64_t dreset:1;
+ uint64_t dll_bringup:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1;
+ struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx;
+ struct cvmx_lmcx_dll_ctl2_s cn68xx;
+ struct cvmx_lmcx_dll_ctl2_s cn68xxp1;
+ struct cvmx_lmcx_dll_ctl2_s cnf71xx;
+};
+
+union cvmx_lmcx_dll_ctl3 {
+ uint64_t u64;
+ struct cvmx_lmcx_dll_ctl3_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_41_63:23;
+ uint64_t dclk90_fwd:1;
+ uint64_t ddr_90_dly_byp:1;
+ uint64_t dclk90_recal_dis:1;
+ uint64_t dclk90_byp_sel:1;
+ uint64_t dclk90_byp_setting:8;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:8;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:6;
+#else
+ uint64_t offset:6;
+ uint64_t byte_sel:4;
+ uint64_t mode_sel:2;
+ uint64_t load_offset:1;
+ uint64_t offset_ena:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t dll_mode:1;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll90_setting:8;
+ uint64_t dll_fast:1;
+ uint64_t dclk90_byp_setting:8;
+ uint64_t dclk90_byp_sel:1;
+ uint64_t dclk90_recal_dis:1;
+ uint64_t ddr_90_dly_byp:1;
+ uint64_t dclk90_fwd:1;
+ uint64_t reserved_41_63:23;
+#endif
+ } s;
+ struct cvmx_lmcx_dll_ctl3_s cn61xx;
+ struct cvmx_lmcx_dll_ctl3_cn63xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t dll_fast:1;
+ uint64_t dll90_setting:8;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll_mode:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t offset_ena:1;
+ uint64_t load_offset:1;
+ uint64_t mode_sel:2;
+ uint64_t byte_sel:4;
+ uint64_t offset:6;
+#else
+ uint64_t offset:6;
+ uint64_t byte_sel:4;
+ uint64_t mode_sel:2;
+ uint64_t load_offset:1;
+ uint64_t offset_ena:1;
+ uint64_t dll90_byte_sel:4;
+ uint64_t dll_mode:1;
+ uint64_t fine_tune_mode:1;
+ uint64_t dll90_setting:8;
+ uint64_t dll_fast:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn63xx;
+ struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1;
+ struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx;
+ struct cvmx_lmcx_dll_ctl3_s cn68xx;
+ struct cvmx_lmcx_dll_ctl3_s cn68xxp1;
+ struct cvmx_lmcx_dll_ctl3_s cnf71xx;
+};
+
+union cvmx_lmcx_dual_memcfg {
+ uint64_t u64;
+ struct cvmx_lmcx_dual_memcfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_20_63:44;
+ uint64_t bank8:1;
+ uint64_t row_lsb:3;
+ uint64_t reserved_8_15:8;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t reserved_8_15:8;
+ uint64_t row_lsb:3;
+ uint64_t bank8:1;
+ uint64_t reserved_20_63:44;
+#endif
+ } s;
+ struct cvmx_lmcx_dual_memcfg_s cn50xx;
+ struct cvmx_lmcx_dual_memcfg_s cn52xx;
+ struct cvmx_lmcx_dual_memcfg_s cn52xxp1;
+ struct cvmx_lmcx_dual_memcfg_s cn56xx;
+ struct cvmx_lmcx_dual_memcfg_s cn56xxp1;
+ struct cvmx_lmcx_dual_memcfg_s cn58xx;
+ struct cvmx_lmcx_dual_memcfg_s cn58xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_19_63:45;
+ uint64_t row_lsb:3;
+ uint64_t reserved_8_15:8;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t reserved_8_15:8;
+ uint64_t row_lsb:3;
+ uint64_t reserved_19_63:45;
+#endif
+ } cn61xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1;
+ struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx;
+};
+
+union cvmx_lmcx_ecc_synd {
+ uint64_t u64;
+ struct cvmx_lmcx_ecc_synd_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t mrdsyn3:8;
+ uint64_t mrdsyn2:8;
+ uint64_t mrdsyn1:8;
+ uint64_t mrdsyn0:8;
+#else
+ uint64_t mrdsyn0:8;
+ uint64_t mrdsyn1:8;
+ uint64_t mrdsyn2:8;
+ uint64_t mrdsyn3:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ecc_synd_s cn30xx;
+ struct cvmx_lmcx_ecc_synd_s cn31xx;
+ struct cvmx_lmcx_ecc_synd_s cn38xx;
+ struct cvmx_lmcx_ecc_synd_s cn38xxp2;
+ struct cvmx_lmcx_ecc_synd_s cn50xx;
+ struct cvmx_lmcx_ecc_synd_s cn52xx;
+ struct cvmx_lmcx_ecc_synd_s cn52xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn56xx;
+ struct cvmx_lmcx_ecc_synd_s cn56xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn58xx;
+ struct cvmx_lmcx_ecc_synd_s cn58xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn61xx;
+ struct cvmx_lmcx_ecc_synd_s cn63xx;
+ struct cvmx_lmcx_ecc_synd_s cn63xxp1;
+ struct cvmx_lmcx_ecc_synd_s cn66xx;
+ struct cvmx_lmcx_ecc_synd_s cn68xx;
+ struct cvmx_lmcx_ecc_synd_s cn68xxp1;
+ struct cvmx_lmcx_ecc_synd_s cnf71xx;
+};
+
+union cvmx_lmcx_fadr {
+ uint64_t u64;
+ struct cvmx_lmcx_fadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_lmcx_fadr_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:14;
+ uint64_t fcol:12;
+#else
+ uint64_t fcol:12;
+ uint64_t frow:14;
+ uint64_t fbank:3;
+ uint64_t fbunk:1;
+ uint64_t fdimm:2;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_fadr_cn30xx cn31xx;
+ struct cvmx_lmcx_fadr_cn30xx cn38xx;
+ struct cvmx_lmcx_fadr_cn30xx cn38xxp2;
+ struct cvmx_lmcx_fadr_cn30xx cn50xx;
+ struct cvmx_lmcx_fadr_cn30xx cn52xx;
+ struct cvmx_lmcx_fadr_cn30xx cn52xxp1;
+ struct cvmx_lmcx_fadr_cn30xx cn56xx;
+ struct cvmx_lmcx_fadr_cn30xx cn56xxp1;
+ struct cvmx_lmcx_fadr_cn30xx cn58xx;
+ struct cvmx_lmcx_fadr_cn30xx cn58xxp1;
+ struct cvmx_lmcx_fadr_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:16;
+ uint64_t fcol:14;
+#else
+ uint64_t fcol:14;
+ uint64_t frow:16;
+ uint64_t fbank:3;
+ uint64_t fbunk:1;
+ uint64_t fdimm:2;
+ uint64_t reserved_36_63:28;
+#endif
+ } cn61xx;
+ struct cvmx_lmcx_fadr_cn61xx cn63xx;
+ struct cvmx_lmcx_fadr_cn61xx cn63xxp1;
+ struct cvmx_lmcx_fadr_cn61xx cn66xx;
+ struct cvmx_lmcx_fadr_cn61xx cn68xx;
+ struct cvmx_lmcx_fadr_cn61xx cn68xxp1;
+ struct cvmx_lmcx_fadr_cn61xx cnf71xx;
+};
+
+union cvmx_lmcx_ifb_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ifbcnt:64;
+#else
+ uint64_t ifbcnt:64;
+#endif
+ } s;
+ struct cvmx_lmcx_ifb_cnt_s cn61xx;
+ struct cvmx_lmcx_ifb_cnt_s cn63xx;
+ struct cvmx_lmcx_ifb_cnt_s cn63xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cn66xx;
+ struct cvmx_lmcx_ifb_cnt_s cn68xx;
+ struct cvmx_lmcx_ifb_cnt_s cn68xxp1;
+ struct cvmx_lmcx_ifb_cnt_s cnf71xx;
+};
+
+union cvmx_lmcx_ifb_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ifbcnt_hi:32;
+#else
+ uint64_t ifbcnt_hi:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1;
+};
+
+union cvmx_lmcx_ifb_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_ifb_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ifbcnt_lo:32;
+#else
+ uint64_t ifbcnt_lo:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1;
+};
+
+union cvmx_lmcx_int {
+ uint64_t u64;
+ struct cvmx_lmcx_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t nxm_wr_err:1;
+#else
+ uint64_t nxm_wr_err:1;
+ uint64_t sec_err:4;
+ uint64_t ded_err:4;
+ uint64_t reserved_9_63:55;
+#endif
+ } s;
+ struct cvmx_lmcx_int_s cn61xx;
+ struct cvmx_lmcx_int_s cn63xx;
+ struct cvmx_lmcx_int_s cn63xxp1;
+ struct cvmx_lmcx_int_s cn66xx;
+ struct cvmx_lmcx_int_s cn68xx;
+ struct cvmx_lmcx_int_s cn68xxp1;
+ struct cvmx_lmcx_int_s cnf71xx;
+};
+
+union cvmx_lmcx_int_en {
+ uint64_t u64;
+ struct cvmx_lmcx_int_en_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t intr_ded_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_nxm_wr_ena:1;
+#else
+ uint64_t intr_nxm_wr_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_ded_ena:1;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+ struct cvmx_lmcx_int_en_s cn61xx;
+ struct cvmx_lmcx_int_en_s cn63xx;
+ struct cvmx_lmcx_int_en_s cn63xxp1;
+ struct cvmx_lmcx_int_en_s cn66xx;
+ struct cvmx_lmcx_int_en_s cn68xx;
+ struct cvmx_lmcx_int_en_s cn68xxp1;
+ struct cvmx_lmcx_int_en_s cnf71xx;
+};
+
+union cvmx_lmcx_mem_cfg0 {
+ uint64_t u64;
+ struct cvmx_lmcx_mem_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t reset:1;
+ uint64_t silo_qc:1;
+ uint64_t bunk_ena:1;
+ uint64_t ded_err:4;
+ uint64_t sec_err:4;
+ uint64_t intr_ded_ena:1;
+ uint64_t intr_sec_ena:1;
+ uint64_t tcl:4;
+ uint64_t ref_int:6;
+ uint64_t pbank_lsb:4;
+ uint64_t row_lsb:3;
+ uint64_t ecc_ena:1;
+ uint64_t init_start:1;
+#else
+ uint64_t init_start:1;
+ uint64_t ecc_ena:1;
+ uint64_t row_lsb:3;
+ uint64_t pbank_lsb:4;
+ uint64_t ref_int:6;
+ uint64_t tcl:4;
+ uint64_t intr_sec_ena:1;
+ uint64_t intr_ded_ena:1;
+ uint64_t sec_err:4;
+ uint64_t ded_err:4;
+ uint64_t bunk_ena:1;
+ uint64_t silo_qc:1;
+ uint64_t reset:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_mem_cfg0_s cn30xx;
+ struct cvmx_lmcx_mem_cfg0_s cn31xx;
+ struct cvmx_lmcx_mem_cfg0_s cn38xx;
+ struct cvmx_lmcx_mem_cfg0_s cn38xxp2;
+ struct cvmx_lmcx_mem_cfg0_s cn50xx;
+ struct cvmx_lmcx_mem_cfg0_s cn52xx;
+ struct cvmx_lmcx_mem_cfg0_s cn52xxp1;
+ struct cvmx_lmcx_mem_cfg0_s cn56xx;
+ struct cvmx_lmcx_mem_cfg0_s cn56xxp1;
+ struct cvmx_lmcx_mem_cfg0_s cn58xx;
+ struct cvmx_lmcx_mem_cfg0_s cn58xxp1;
+};
+
+union cvmx_lmcx_mem_cfg1 {
+ uint64_t u64;
+ struct cvmx_lmcx_mem_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t comp_bypass:1;
+ uint64_t trrd:3;
+ uint64_t caslat:3;
+ uint64_t tmrd:3;
+ uint64_t trfc:5;
+ uint64_t trp:4;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+#else
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trp:4;
+ uint64_t trfc:5;
+ uint64_t tmrd:3;
+ uint64_t caslat:3;
+ uint64_t trrd:3;
+ uint64_t comp_bypass:1;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_mem_cfg1_s cn30xx;
+ struct cvmx_lmcx_mem_cfg1_s cn31xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_31_63:33;
+ uint64_t trrd:3;
+ uint64_t caslat:3;
+ uint64_t tmrd:3;
+ uint64_t trfc:5;
+ uint64_t trp:4;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+#else
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trp:4;
+ uint64_t trfc:5;
+ uint64_t tmrd:3;
+ uint64_t caslat:3;
+ uint64_t trrd:3;
+ uint64_t reserved_31_63:33;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2;
+ struct cvmx_lmcx_mem_cfg1_s cn50xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx;
+ struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1;
+};
+
+union cvmx_lmcx_modereg_params0 {
+ uint64_t u64;
+ struct cvmx_lmcx_modereg_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_25_63:39;
+ uint64_t ppd:1;
+ uint64_t wrp:3;
+ uint64_t dllr:1;
+ uint64_t tm:1;
+ uint64_t rbt:1;
+ uint64_t cl:4;
+ uint64_t bl:2;
+ uint64_t qoff:1;
+ uint64_t tdqs:1;
+ uint64_t wlev:1;
+ uint64_t al:2;
+ uint64_t dll:1;
+ uint64_t mpr:1;
+ uint64_t mprloc:2;
+ uint64_t cwl:3;
+#else
+ uint64_t cwl:3;
+ uint64_t mprloc:2;
+ uint64_t mpr:1;
+ uint64_t dll:1;
+ uint64_t al:2;
+ uint64_t wlev:1;
+ uint64_t tdqs:1;
+ uint64_t qoff:1;
+ uint64_t bl:2;
+ uint64_t cl:4;
+ uint64_t rbt:1;
+ uint64_t tm:1;
+ uint64_t dllr:1;
+ uint64_t wrp:3;
+ uint64_t ppd:1;
+ uint64_t reserved_25_63:39;
+#endif
+ } s;
+ struct cvmx_lmcx_modereg_params0_s cn61xx;
+ struct cvmx_lmcx_modereg_params0_s cn63xx;
+ struct cvmx_lmcx_modereg_params0_s cn63xxp1;
+ struct cvmx_lmcx_modereg_params0_s cn66xx;
+ struct cvmx_lmcx_modereg_params0_s cn68xx;
+ struct cvmx_lmcx_modereg_params0_s cn68xxp1;
+ struct cvmx_lmcx_modereg_params0_s cnf71xx;
+};
+
+union cvmx_lmcx_modereg_params1 {
+ uint64_t u64;
+ struct cvmx_lmcx_modereg_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t rtt_nom_11:3;
+ uint64_t dic_11:2;
+ uint64_t rtt_wr_11:2;
+ uint64_t srt_11:1;
+ uint64_t asr_11:1;
+ uint64_t pasr_11:3;
+ uint64_t rtt_nom_10:3;
+ uint64_t dic_10:2;
+ uint64_t rtt_wr_10:2;
+ uint64_t srt_10:1;
+ uint64_t asr_10:1;
+ uint64_t pasr_10:3;
+ uint64_t rtt_nom_01:3;
+ uint64_t dic_01:2;
+ uint64_t rtt_wr_01:2;
+ uint64_t srt_01:1;
+ uint64_t asr_01:1;
+ uint64_t pasr_01:3;
+ uint64_t rtt_nom_00:3;
+ uint64_t dic_00:2;
+ uint64_t rtt_wr_00:2;
+ uint64_t srt_00:1;
+ uint64_t asr_00:1;
+ uint64_t pasr_00:3;
+#else
+ uint64_t pasr_00:3;
+ uint64_t asr_00:1;
+ uint64_t srt_00:1;
+ uint64_t rtt_wr_00:2;
+ uint64_t dic_00:2;
+ uint64_t rtt_nom_00:3;
+ uint64_t pasr_01:3;
+ uint64_t asr_01:1;
+ uint64_t srt_01:1;
+ uint64_t rtt_wr_01:2;
+ uint64_t dic_01:2;
+ uint64_t rtt_nom_01:3;
+ uint64_t pasr_10:3;
+ uint64_t asr_10:1;
+ uint64_t srt_10:1;
+ uint64_t rtt_wr_10:2;
+ uint64_t dic_10:2;
+ uint64_t rtt_nom_10:3;
+ uint64_t pasr_11:3;
+ uint64_t asr_11:1;
+ uint64_t srt_11:1;
+ uint64_t rtt_wr_11:2;
+ uint64_t dic_11:2;
+ uint64_t rtt_nom_11:3;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_lmcx_modereg_params1_s cn61xx;
+ struct cvmx_lmcx_modereg_params1_s cn63xx;
+ struct cvmx_lmcx_modereg_params1_s cn63xxp1;
+ struct cvmx_lmcx_modereg_params1_s cn66xx;
+ struct cvmx_lmcx_modereg_params1_s cn68xx;
+ struct cvmx_lmcx_modereg_params1_s cn68xxp1;
+ struct cvmx_lmcx_modereg_params1_s cnf71xx;
+};
+
+union cvmx_lmcx_nxm {
+ uint64_t u64;
+ struct cvmx_lmcx_nxm_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t mem_msb_d3_r1:4;
+ uint64_t mem_msb_d3_r0:4;
+ uint64_t mem_msb_d2_r1:4;
+ uint64_t mem_msb_d2_r0:4;
+ uint64_t mem_msb_d1_r1:4;
+ uint64_t mem_msb_d1_r0:4;
+ uint64_t mem_msb_d0_r1:4;
+ uint64_t mem_msb_d0_r0:4;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t mem_msb_d0_r0:4;
+ uint64_t mem_msb_d0_r1:4;
+ uint64_t mem_msb_d1_r0:4;
+ uint64_t mem_msb_d1_r1:4;
+ uint64_t mem_msb_d2_r0:4;
+ uint64_t mem_msb_d2_r1:4;
+ uint64_t mem_msb_d3_r0:4;
+ uint64_t mem_msb_d3_r1:4;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+ struct cvmx_lmcx_nxm_cn52xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t cs_mask:8;
+#else
+ uint64_t cs_mask:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } cn52xx;
+ struct cvmx_lmcx_nxm_cn52xx cn56xx;
+ struct cvmx_lmcx_nxm_cn52xx cn58xx;
+ struct cvmx_lmcx_nxm_s cn61xx;
+ struct cvmx_lmcx_nxm_s cn63xx;
+ struct cvmx_lmcx_nxm_s cn63xxp1;
+ struct cvmx_lmcx_nxm_s cn66xx;
+ struct cvmx_lmcx_nxm_s cn68xx;
+ struct cvmx_lmcx_nxm_s cn68xxp1;
+ struct cvmx_lmcx_nxm_s cnf71xx;
+};
+
+union cvmx_lmcx_ops_cnt {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t opscnt:64;
+#else
+ uint64_t opscnt:64;
+#endif
+ } s;
+ struct cvmx_lmcx_ops_cnt_s cn61xx;
+ struct cvmx_lmcx_ops_cnt_s cn63xx;
+ struct cvmx_lmcx_ops_cnt_s cn63xxp1;
+ struct cvmx_lmcx_ops_cnt_s cn66xx;
+ struct cvmx_lmcx_ops_cnt_s cn68xx;
+ struct cvmx_lmcx_ops_cnt_s cn68xxp1;
+ struct cvmx_lmcx_ops_cnt_s cnf71xx;
+};
+
+union cvmx_lmcx_ops_cnt_hi {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_hi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t opscnt_hi:32;
+#else
+ uint64_t opscnt_hi:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ops_cnt_hi_s cn30xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn31xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn38xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2;
+ struct cvmx_lmcx_ops_cnt_hi_s cn50xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn52xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1;
+ struct cvmx_lmcx_ops_cnt_hi_s cn56xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1;
+ struct cvmx_lmcx_ops_cnt_hi_s cn58xx;
+ struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1;
+};
+
+union cvmx_lmcx_ops_cnt_lo {
+ uint64_t u64;
+ struct cvmx_lmcx_ops_cnt_lo_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t opscnt_lo:32;
+#else
+ uint64_t opscnt_lo:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_ops_cnt_lo_s cn30xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn31xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn38xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2;
+ struct cvmx_lmcx_ops_cnt_lo_s cn50xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn52xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1;
+ struct cvmx_lmcx_ops_cnt_lo_s cn56xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1;
+ struct cvmx_lmcx_ops_cnt_lo_s cn58xx;
+ struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1;
+};
+
+union cvmx_lmcx_phy_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_phy_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_15_63:49;
+ uint64_t rx_always_on:1;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+#else
+ uint64_t ts_stagger:1;
+ uint64_t loopback_pos:1;
+ uint64_t loopback:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune1:1;
+ uint64_t lv_mode:1;
+ uint64_t rx_always_on:1;
+ uint64_t reserved_15_63:49;
+#endif
+ } s;
+ struct cvmx_lmcx_phy_ctl_s cn61xx;
+ struct cvmx_lmcx_phy_ctl_s cn63xx;
+ struct cvmx_lmcx_phy_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_14_63:50;
+ uint64_t lv_mode:1;
+ uint64_t ck_tune1:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t loopback:1;
+ uint64_t loopback_pos:1;
+ uint64_t ts_stagger:1;
+#else
+ uint64_t ts_stagger:1;
+ uint64_t loopback_pos:1;
+ uint64_t loopback:1;
+ uint64_t ck_dlyout0:4;
+ uint64_t ck_tune0:1;
+ uint64_t ck_dlyout1:4;
+ uint64_t ck_tune1:1;
+ uint64_t lv_mode:1;
+ uint64_t reserved_14_63:50;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_phy_ctl_s cn66xx;
+ struct cvmx_lmcx_phy_ctl_s cn68xx;
+ struct cvmx_lmcx_phy_ctl_s cn68xxp1;
+ struct cvmx_lmcx_phy_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_pll_bwctl {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_bwctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63:59;
+ uint64_t bwupd:1;
+ uint64_t bwctl:4;
+#else
+ uint64_t bwctl:4;
+ uint64_t bwupd:1;
+ uint64_t reserved_5_63:59;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_bwctl_s cn30xx;
+ struct cvmx_lmcx_pll_bwctl_s cn31xx;
+ struct cvmx_lmcx_pll_bwctl_s cn38xx;
+ struct cvmx_lmcx_pll_bwctl_s cn38xxp2;
+};
+
+union cvmx_lmcx_pll_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_30_63:34;
+ uint64_t bypass:1;
+ uint64_t fasten_n:1;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+#else
+ uint64_t en2:1;
+ uint64_t en4:1;
+ uint64_t en6:1;
+ uint64_t en8:1;
+ uint64_t en12:1;
+ uint64_t en16:1;
+ uint64_t reserved_6_7:2;
+ uint64_t clkr:6;
+ uint64_t clkf:12;
+ uint64_t reset_n:1;
+ uint64_t div_reset:1;
+ uint64_t fasten_n:1;
+ uint64_t bypass:1;
+ uint64_t reserved_30_63:34;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_ctl_cn50xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t fasten_n:1;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+#else
+ uint64_t en2:1;
+ uint64_t en4:1;
+ uint64_t en6:1;
+ uint64_t en8:1;
+ uint64_t en12:1;
+ uint64_t en16:1;
+ uint64_t reserved_6_7:2;
+ uint64_t clkr:6;
+ uint64_t clkf:12;
+ uint64_t reset_n:1;
+ uint64_t div_reset:1;
+ uint64_t fasten_n:1;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn50xx;
+ struct cvmx_lmcx_pll_ctl_s cn52xx;
+ struct cvmx_lmcx_pll_ctl_s cn52xxp1;
+ struct cvmx_lmcx_pll_ctl_cn50xx cn56xx;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_28_63:36;
+ uint64_t div_reset:1;
+ uint64_t reset_n:1;
+ uint64_t clkf:12;
+ uint64_t clkr:6;
+ uint64_t reserved_6_7:2;
+ uint64_t en16:1;
+ uint64_t en12:1;
+ uint64_t en8:1;
+ uint64_t en6:1;
+ uint64_t en4:1;
+ uint64_t en2:1;
+#else
+ uint64_t en2:1;
+ uint64_t en4:1;
+ uint64_t en6:1;
+ uint64_t en8:1;
+ uint64_t en12:1;
+ uint64_t en16:1;
+ uint64_t reserved_6_7:2;
+ uint64_t clkr:6;
+ uint64_t clkf:12;
+ uint64_t reset_n:1;
+ uint64_t div_reset:1;
+ uint64_t reserved_28_63:36;
+#endif
+ } cn56xxp1;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx;
+ struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1;
+};
+
+union cvmx_lmcx_pll_status {
+ uint64_t u64;
+ struct cvmx_lmcx_pll_status_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ddr__nctl:5;
+ uint64_t ddr__pctl:5;
+ uint64_t reserved_2_21:20;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+#else
+ uint64_t fbslip:1;
+ uint64_t rfslip:1;
+ uint64_t reserved_2_21:20;
+ uint64_t ddr__pctl:5;
+ uint64_t ddr__nctl:5;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_pll_status_s cn50xx;
+ struct cvmx_lmcx_pll_status_s cn52xx;
+ struct cvmx_lmcx_pll_status_s cn52xxp1;
+ struct cvmx_lmcx_pll_status_s cn56xx;
+ struct cvmx_lmcx_pll_status_s cn56xxp1;
+ struct cvmx_lmcx_pll_status_s cn58xx;
+ struct cvmx_lmcx_pll_status_cn58xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63:62;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+#else
+ uint64_t fbslip:1;
+ uint64_t rfslip:1;
+ uint64_t reserved_2_63:62;
+#endif
+ } cn58xxp1;
+};
+
+union cvmx_lmcx_read_level_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_44_63:20;
+ uint64_t rankmask:4;
+ uint64_t pattern:8;
+ uint64_t row:16;
+ uint64_t col:12;
+ uint64_t reserved_3_3:1;
+ uint64_t bnk:3;
+#else
+ uint64_t bnk:3;
+ uint64_t reserved_3_3:1;
+ uint64_t col:12;
+ uint64_t row:16;
+ uint64_t pattern:8;
+ uint64_t rankmask:4;
+ uint64_t reserved_44_63:20;
+#endif
+ } s;
+ struct cvmx_lmcx_read_level_ctl_s cn52xx;
+ struct cvmx_lmcx_read_level_ctl_s cn52xxp1;
+ struct cvmx_lmcx_read_level_ctl_s cn56xx;
+ struct cvmx_lmcx_read_level_ctl_s cn56xxp1;
+};
+
+union cvmx_lmcx_read_level_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t bitmask:16;
+ uint64_t reserved_4_15:12;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t reserved_4_15:12;
+ uint64_t bitmask:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_read_level_dbg_s cn52xx;
+ struct cvmx_lmcx_read_level_dbg_s cn52xxp1;
+ struct cvmx_lmcx_read_level_dbg_s cn56xx;
+ struct cvmx_lmcx_read_level_dbg_s cn56xxp1;
+};
+
+union cvmx_lmcx_read_level_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_read_level_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_38_63:26;
+ uint64_t status:2;
+ uint64_t byte8:4;
+ uint64_t byte7:4;
+ uint64_t byte6:4;
+ uint64_t byte5:4;
+ uint64_t byte4:4;
+ uint64_t byte3:4;
+ uint64_t byte2:4;
+ uint64_t byte1:4;
+ uint64_t byte0:4;
+#else
+ uint64_t byte0:4;
+ uint64_t byte1:4;
+ uint64_t byte2:4;
+ uint64_t byte3:4;
+ uint64_t byte4:4;
+ uint64_t byte5:4;
+ uint64_t byte6:4;
+ uint64_t byte7:4;
+ uint64_t byte8:4;
+ uint64_t status:2;
+ uint64_t reserved_38_63:26;
+#endif
+ } s;
+ struct cvmx_lmcx_read_level_rankx_s cn52xx;
+ struct cvmx_lmcx_read_level_rankx_s cn52xxp1;
+ struct cvmx_lmcx_read_level_rankx_s cn56xx;
+ struct cvmx_lmcx_read_level_rankx_s cn56xxp1;
+};
+
+union cvmx_lmcx_reset_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_reset_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t ddr3psv:1;
+ uint64_t ddr3psoft:1;
+ uint64_t ddr3pwarm:1;
+ uint64_t ddr3rst:1;
+#else
+ uint64_t ddr3rst:1;
+ uint64_t ddr3pwarm:1;
+ uint64_t ddr3psoft:1;
+ uint64_t ddr3psv:1;
+ uint64_t reserved_4_63:60;
+#endif
+ } s;
+ struct cvmx_lmcx_reset_ctl_s cn61xx;
+ struct cvmx_lmcx_reset_ctl_s cn63xx;
+ struct cvmx_lmcx_reset_ctl_s cn63xxp1;
+ struct cvmx_lmcx_reset_ctl_s cn66xx;
+ struct cvmx_lmcx_reset_ctl_s cn68xx;
+ struct cvmx_lmcx_reset_ctl_s cn68xxp1;
+ struct cvmx_lmcx_reset_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_rlevel_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t delay_unload_3:1;
+ uint64_t delay_unload_2:1;
+ uint64_t delay_unload_1:1;
+ uint64_t delay_unload_0:1;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t offset:4;
+ uint64_t offset_en:1;
+ uint64_t or_dis:1;
+ uint64_t bitmask:8;
+ uint64_t delay_unload_0:1;
+ uint64_t delay_unload_1:1;
+ uint64_t delay_unload_2:1;
+ uint64_t delay_unload_3:1;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_lmcx_rlevel_ctl_s cn61xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn63xx;
+ struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63:55;
+ uint64_t offset_en:1;
+ uint64_t offset:4;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t offset:4;
+ uint64_t offset_en:1;
+ uint64_t reserved_9_63:55;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_rlevel_ctl_s cn66xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn68xx;
+ struct cvmx_lmcx_rlevel_ctl_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_rlevel_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bitmask:64;
+#else
+ uint64_t bitmask:64;
+#endif
+ } s;
+ struct cvmx_lmcx_rlevel_dbg_s cn61xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn63xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn63xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cn66xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn68xx;
+ struct cvmx_lmcx_rlevel_dbg_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_dbg_s cnf71xx;
+};
+
+union cvmx_lmcx_rlevel_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_rlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_56_63:8;
+ uint64_t status:2;
+ uint64_t byte8:6;
+ uint64_t byte7:6;
+ uint64_t byte6:6;
+ uint64_t byte5:6;
+ uint64_t byte4:6;
+ uint64_t byte3:6;
+ uint64_t byte2:6;
+ uint64_t byte1:6;
+ uint64_t byte0:6;
+#else
+ uint64_t byte0:6;
+ uint64_t byte1:6;
+ uint64_t byte2:6;
+ uint64_t byte3:6;
+ uint64_t byte4:6;
+ uint64_t byte5:6;
+ uint64_t byte6:6;
+ uint64_t byte7:6;
+ uint64_t byte8:6;
+ uint64_t status:2;
+ uint64_t reserved_56_63:8;
+#endif
+ } s;
+ struct cvmx_lmcx_rlevel_rankx_s cn61xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn63xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn63xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cn66xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn68xx;
+ struct cvmx_lmcx_rlevel_rankx_s cn68xxp1;
+ struct cvmx_lmcx_rlevel_rankx_s cnf71xx;
+};
+
+union cvmx_lmcx_rodt_comp_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_comp_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t enable:1;
+ uint64_t reserved_12_15:4;
+ uint64_t nctl:4;
+ uint64_t reserved_5_7:3;
+ uint64_t pctl:5;
+#else
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:4;
+ uint64_t reserved_12_15:4;
+ uint64_t enable:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } s;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn50xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn52xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn56xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn58xx;
+ struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1;
+};
+
+union cvmx_lmcx_rodt_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t rodt_hi3:4;
+ uint64_t rodt_hi2:4;
+ uint64_t rodt_hi1:4;
+ uint64_t rodt_hi0:4;
+ uint64_t rodt_lo3:4;
+ uint64_t rodt_lo2:4;
+ uint64_t rodt_lo1:4;
+ uint64_t rodt_lo0:4;
+#else
+ uint64_t rodt_lo0:4;
+ uint64_t rodt_lo1:4;
+ uint64_t rodt_lo2:4;
+ uint64_t rodt_lo3:4;
+ uint64_t rodt_hi0:4;
+ uint64_t rodt_hi1:4;
+ uint64_t rodt_hi2:4;
+ uint64_t rodt_hi3:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_rodt_ctl_s cn30xx;
+ struct cvmx_lmcx_rodt_ctl_s cn31xx;
+ struct cvmx_lmcx_rodt_ctl_s cn38xx;
+ struct cvmx_lmcx_rodt_ctl_s cn38xxp2;
+ struct cvmx_lmcx_rodt_ctl_s cn50xx;
+ struct cvmx_lmcx_rodt_ctl_s cn52xx;
+ struct cvmx_lmcx_rodt_ctl_s cn52xxp1;
+ struct cvmx_lmcx_rodt_ctl_s cn56xx;
+ struct cvmx_lmcx_rodt_ctl_s cn56xxp1;
+ struct cvmx_lmcx_rodt_ctl_s cn58xx;
+ struct cvmx_lmcx_rodt_ctl_s cn58xxp1;
+};
+
+union cvmx_lmcx_rodt_mask {
+ uint64_t u64;
+ struct cvmx_lmcx_rodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t rodt_d3_r1:8;
+ uint64_t rodt_d3_r0:8;
+ uint64_t rodt_d2_r1:8;
+ uint64_t rodt_d2_r0:8;
+ uint64_t rodt_d1_r1:8;
+ uint64_t rodt_d1_r0:8;
+ uint64_t rodt_d0_r1:8;
+ uint64_t rodt_d0_r0:8;
+#else
+ uint64_t rodt_d0_r0:8;
+ uint64_t rodt_d0_r1:8;
+ uint64_t rodt_d1_r0:8;
+ uint64_t rodt_d1_r1:8;
+ uint64_t rodt_d2_r0:8;
+ uint64_t rodt_d2_r1:8;
+ uint64_t rodt_d3_r0:8;
+ uint64_t rodt_d3_r1:8;
+#endif
+ } s;
+ struct cvmx_lmcx_rodt_mask_s cn61xx;
+ struct cvmx_lmcx_rodt_mask_s cn63xx;
+ struct cvmx_lmcx_rodt_mask_s cn63xxp1;
+ struct cvmx_lmcx_rodt_mask_s cn66xx;
+ struct cvmx_lmcx_rodt_mask_s cn68xx;
+ struct cvmx_lmcx_rodt_mask_s cn68xxp1;
+ struct cvmx_lmcx_rodt_mask_s cnf71xx;
+};
+
+union cvmx_lmcx_scramble_cfg0 {
+ uint64_t u64;
+ struct cvmx_lmcx_scramble_cfg0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key:64;
+#else
+ uint64_t key:64;
+#endif
+ } s;
+ struct cvmx_lmcx_scramble_cfg0_s cn61xx;
+ struct cvmx_lmcx_scramble_cfg0_s cn66xx;
+ struct cvmx_lmcx_scramble_cfg0_s cnf71xx;
+};
+
+union cvmx_lmcx_scramble_cfg1 {
+ uint64_t u64;
+ struct cvmx_lmcx_scramble_cfg1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t key:64;
+#else
+ uint64_t key:64;
+#endif
+ } s;
+ struct cvmx_lmcx_scramble_cfg1_s cn61xx;
+ struct cvmx_lmcx_scramble_cfg1_s cn66xx;
+ struct cvmx_lmcx_scramble_cfg1_s cnf71xx;
+};
+
+union cvmx_lmcx_scrambled_fadr {
+ uint64_t u64;
+ struct cvmx_lmcx_scrambled_fadr_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_36_63:28;
+ uint64_t fdimm:2;
+ uint64_t fbunk:1;
+ uint64_t fbank:3;
+ uint64_t frow:16;
+ uint64_t fcol:14;
+#else
+ uint64_t fcol:14;
+ uint64_t frow:16;
+ uint64_t fbank:3;
+ uint64_t fbunk:1;
+ uint64_t fdimm:2;
+ uint64_t reserved_36_63:28;
+#endif
+ } s;
+ struct cvmx_lmcx_scrambled_fadr_s cn61xx;
+ struct cvmx_lmcx_scrambled_fadr_s cn66xx;
+ struct cvmx_lmcx_scrambled_fadr_s cnf71xx;
+};
+
+union cvmx_lmcx_slot_ctl0 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_init:6;
+ uint64_t w2r_init:6;
+ uint64_t r2w_init:6;
+ uint64_t r2r_init:6;
+#else
+ uint64_t r2r_init:6;
+ uint64_t r2w_init:6;
+ uint64_t w2r_init:6;
+ uint64_t w2w_init:6;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+ struct cvmx_lmcx_slot_ctl0_s cn61xx;
+ struct cvmx_lmcx_slot_ctl0_s cn63xx;
+ struct cvmx_lmcx_slot_ctl0_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl0_s cn66xx;
+ struct cvmx_lmcx_slot_ctl0_s cn68xx;
+ struct cvmx_lmcx_slot_ctl0_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl0_s cnf71xx;
+};
+
+union cvmx_lmcx_slot_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_xrank_init:6;
+ uint64_t w2r_xrank_init:6;
+ uint64_t r2w_xrank_init:6;
+ uint64_t r2r_xrank_init:6;
+#else
+ uint64_t r2r_xrank_init:6;
+ uint64_t r2w_xrank_init:6;
+ uint64_t w2r_xrank_init:6;
+ uint64_t w2w_xrank_init:6;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+ struct cvmx_lmcx_slot_ctl1_s cn61xx;
+ struct cvmx_lmcx_slot_ctl1_s cn63xx;
+ struct cvmx_lmcx_slot_ctl1_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cn66xx;
+ struct cvmx_lmcx_slot_ctl1_s cn68xx;
+ struct cvmx_lmcx_slot_ctl1_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl1_s cnf71xx;
+};
+
+union cvmx_lmcx_slot_ctl2 {
+ uint64_t u64;
+ struct cvmx_lmcx_slot_ctl2_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63:40;
+ uint64_t w2w_xdimm_init:6;
+ uint64_t w2r_xdimm_init:6;
+ uint64_t r2w_xdimm_init:6;
+ uint64_t r2r_xdimm_init:6;
+#else
+ uint64_t r2r_xdimm_init:6;
+ uint64_t r2w_xdimm_init:6;
+ uint64_t w2r_xdimm_init:6;
+ uint64_t w2w_xdimm_init:6;
+ uint64_t reserved_24_63:40;
+#endif
+ } s;
+ struct cvmx_lmcx_slot_ctl2_s cn61xx;
+ struct cvmx_lmcx_slot_ctl2_s cn63xx;
+ struct cvmx_lmcx_slot_ctl2_s cn63xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cn66xx;
+ struct cvmx_lmcx_slot_ctl2_s cn68xx;
+ struct cvmx_lmcx_slot_ctl2_s cn68xxp1;
+ struct cvmx_lmcx_slot_ctl2_s cnf71xx;
+};
+
+union cvmx_lmcx_timing_params0 {
+ uint64_t u64;
+ struct cvmx_lmcx_timing_params0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t trp_ext:1;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t tckeon:10;
+#else
+ uint64_t tckeon:10;
+ uint64_t tzqcs:4;
+ uint64_t tcke:4;
+ uint64_t txpr:4;
+ uint64_t tmrd:4;
+ uint64_t tmod:4;
+ uint64_t tdllk:4;
+ uint64_t tzqinit:4;
+ uint64_t trp:4;
+ uint64_t tcksre:4;
+ uint64_t trp_ext:1;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+ struct cvmx_lmcx_timing_params0_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t trp_ext:1;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t reserved_0_9:10;
+#else
+ uint64_t reserved_0_9:10;
+ uint64_t tzqcs:4;
+ uint64_t tcke:4;
+ uint64_t txpr:4;
+ uint64_t tmrd:4;
+ uint64_t tmod:4;
+ uint64_t tdllk:4;
+ uint64_t tzqinit:4;
+ uint64_t trp:4;
+ uint64_t tcksre:4;
+ uint64_t trp_ext:1;
+ uint64_t reserved_47_63:17;
+#endif
+ } cn61xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn63xx;
+ struct cvmx_lmcx_timing_params0_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t tcksre:4;
+ uint64_t trp:4;
+ uint64_t tzqinit:4;
+ uint64_t tdllk:4;
+ uint64_t tmod:4;
+ uint64_t tmrd:4;
+ uint64_t txpr:4;
+ uint64_t tcke:4;
+ uint64_t tzqcs:4;
+ uint64_t tckeon:10;
+#else
+ uint64_t tckeon:10;
+ uint64_t tzqcs:4;
+ uint64_t tcke:4;
+ uint64_t txpr:4;
+ uint64_t tmrd:4;
+ uint64_t tmod:4;
+ uint64_t tdllk:4;
+ uint64_t tzqinit:4;
+ uint64_t trp:4;
+ uint64_t tcksre:4;
+ uint64_t reserved_46_63:18;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_timing_params0_cn61xx cn66xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn68xx;
+ struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1;
+ struct cvmx_lmcx_timing_params0_cn61xx cnf71xx;
+};
+
+union cvmx_lmcx_timing_params1 {
+ uint64_t u64;
+ struct cvmx_lmcx_timing_params1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t tras_ext:1;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:5;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ uint64_t tmprr:4;
+#else
+ uint64_t tmprr:4;
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trfc:5;
+ uint64_t trrd:3;
+ uint64_t txp:3;
+ uint64_t twlmrd:4;
+ uint64_t twldqsen:4;
+ uint64_t tfaw:5;
+ uint64_t txpdll:5;
+ uint64_t tras_ext:1;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+ struct cvmx_lmcx_timing_params1_s cn61xx;
+ struct cvmx_lmcx_timing_params1_s cn63xx;
+ struct cvmx_lmcx_timing_params1_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63:18;
+ uint64_t txpdll:5;
+ uint64_t tfaw:5;
+ uint64_t twldqsen:4;
+ uint64_t twlmrd:4;
+ uint64_t txp:3;
+ uint64_t trrd:3;
+ uint64_t trfc:5;
+ uint64_t twtr:4;
+ uint64_t trcd:4;
+ uint64_t tras:5;
+ uint64_t tmprr:4;
+#else
+ uint64_t tmprr:4;
+ uint64_t tras:5;
+ uint64_t trcd:4;
+ uint64_t twtr:4;
+ uint64_t trfc:5;
+ uint64_t trrd:3;
+ uint64_t txp:3;
+ uint64_t twlmrd:4;
+ uint64_t twldqsen:4;
+ uint64_t tfaw:5;
+ uint64_t txpdll:5;
+ uint64_t reserved_46_63:18;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_timing_params1_s cn66xx;
+ struct cvmx_lmcx_timing_params1_s cn68xx;
+ struct cvmx_lmcx_timing_params1_s cn68xxp1;
+ struct cvmx_lmcx_timing_params1_s cnf71xx;
+};
+
+union cvmx_lmcx_tro_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_tro_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_33_63:31;
+ uint64_t rclk_cnt:32;
+ uint64_t treset:1;
+#else
+ uint64_t treset:1;
+ uint64_t rclk_cnt:32;
+ uint64_t reserved_33_63:31;
+#endif
+ } s;
+ struct cvmx_lmcx_tro_ctl_s cn61xx;
+ struct cvmx_lmcx_tro_ctl_s cn63xx;
+ struct cvmx_lmcx_tro_ctl_s cn63xxp1;
+ struct cvmx_lmcx_tro_ctl_s cn66xx;
+ struct cvmx_lmcx_tro_ctl_s cn68xx;
+ struct cvmx_lmcx_tro_ctl_s cn68xxp1;
+ struct cvmx_lmcx_tro_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_tro_stat {
+ uint64_t u64;
+ struct cvmx_lmcx_tro_stat_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t ring_cnt:32;
+#else
+ uint64_t ring_cnt:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_tro_stat_s cn61xx;
+ struct cvmx_lmcx_tro_stat_s cn63xx;
+ struct cvmx_lmcx_tro_stat_s cn63xxp1;
+ struct cvmx_lmcx_tro_stat_s cn66xx;
+ struct cvmx_lmcx_tro_stat_s cn68xx;
+ struct cvmx_lmcx_tro_stat_s cn68xxp1;
+ struct cvmx_lmcx_tro_stat_s cnf71xx;
+};
+
+union cvmx_lmcx_wlevel_ctl {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t rtt_nom:3;
+ uint64_t bitmask:8;
+ uint64_t or_dis:1;
+ uint64_t sset:1;
+ uint64_t lanemask:9;
+#else
+ uint64_t lanemask:9;
+ uint64_t sset:1;
+ uint64_t or_dis:1;
+ uint64_t bitmask:8;
+ uint64_t rtt_nom:3;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_lmcx_wlevel_ctl_s cn61xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn63xx;
+ struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t sset:1;
+ uint64_t lanemask:9;
+#else
+ uint64_t lanemask:9;
+ uint64_t sset:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } cn63xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cn66xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn68xx;
+ struct cvmx_lmcx_wlevel_ctl_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_ctl_s cnf71xx;
+};
+
+union cvmx_lmcx_wlevel_dbg {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t bitmask:8;
+ uint64_t byte:4;
+#else
+ uint64_t byte:4;
+ uint64_t bitmask:8;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_lmcx_wlevel_dbg_s cn61xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn63xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn63xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cn66xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn68xx;
+ struct cvmx_lmcx_wlevel_dbg_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_dbg_s cnf71xx;
+};
+
+union cvmx_lmcx_wlevel_rankx {
+ uint64_t u64;
+ struct cvmx_lmcx_wlevel_rankx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t status:2;
+ uint64_t byte8:5;
+ uint64_t byte7:5;
+ uint64_t byte6:5;
+ uint64_t byte5:5;
+ uint64_t byte4:5;
+ uint64_t byte3:5;
+ uint64_t byte2:5;
+ uint64_t byte1:5;
+ uint64_t byte0:5;
+#else
+ uint64_t byte0:5;
+ uint64_t byte1:5;
+ uint64_t byte2:5;
+ uint64_t byte3:5;
+ uint64_t byte4:5;
+ uint64_t byte5:5;
+ uint64_t byte6:5;
+ uint64_t byte7:5;
+ uint64_t byte8:5;
+ uint64_t status:2;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+ struct cvmx_lmcx_wlevel_rankx_s cn61xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn63xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn63xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cn66xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn68xx;
+ struct cvmx_lmcx_wlevel_rankx_s cn68xxp1;
+ struct cvmx_lmcx_wlevel_rankx_s cnf71xx;
+};
+
+union cvmx_lmcx_wodt_ctl0 {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_ctl0_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_0_63:64;
+#else
+ uint64_t reserved_0_63:64;
+#endif
+ } s;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d0_r0:8;
+#else
+ uint64_t wodt_d0_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d1_r1:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn30xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_hi3:4;
+ uint64_t wodt_hi2:4;
+ uint64_t wodt_hi1:4;
+ uint64_t wodt_hi0:4;
+ uint64_t wodt_lo3:4;
+ uint64_t wodt_lo2:4;
+ uint64_t wodt_lo1:4;
+ uint64_t wodt_lo0:4;
+#else
+ uint64_t wodt_lo0:4;
+ uint64_t wodt_lo1:4;
+ uint64_t wodt_lo2:4;
+ uint64_t wodt_lo3:4;
+ uint64_t wodt_hi0:4;
+ uint64_t wodt_hi1:4;
+ uint64_t wodt_hi2:4;
+ uint64_t wodt_hi3:4;
+ uint64_t reserved_32_63:32;
+#endif
+ } cn38xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx;
+ struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx;
+ struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1;
+};
+
+union cvmx_lmcx_wodt_ctl1 {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_ctl1_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t wodt_d3_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d2_r0:8;
+#else
+ uint64_t wodt_d2_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d3_r1:8;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_lmcx_wodt_ctl1_s cn30xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn31xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn52xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn52xxp1;
+ struct cvmx_lmcx_wodt_ctl1_s cn56xx;
+ struct cvmx_lmcx_wodt_ctl1_s cn56xxp1;
+};
+
+union cvmx_lmcx_wodt_mask {
+ uint64_t u64;
+ struct cvmx_lmcx_wodt_mask_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t wodt_d3_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d2_r0:8;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d0_r0:8;
+#else
+ uint64_t wodt_d0_r0:8;
+ uint64_t wodt_d0_r1:8;
+ uint64_t wodt_d1_r0:8;
+ uint64_t wodt_d1_r1:8;
+ uint64_t wodt_d2_r0:8;
+ uint64_t wodt_d2_r1:8;
+ uint64_t wodt_d3_r0:8;
+ uint64_t wodt_d3_r1:8;
+#endif
+ } s;
+ struct cvmx_lmcx_wodt_mask_s cn61xx;
+ struct cvmx_lmcx_wodt_mask_s cn63xx;
+ struct cvmx_lmcx_wodt_mask_s cn63xxp1;
+ struct cvmx_lmcx_wodt_mask_s cn66xx;
+ struct cvmx_lmcx_wodt_mask_s cn68xx;
+ struct cvmx_lmcx_wodt_mask_s cn68xxp1;
+ struct cvmx_lmcx_wodt_mask_s cnf71xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index 14dd11f4492..349bb2ba840 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -218,6 +218,12 @@
#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+/* These are used to cover entire families of OCTEON processors */
+#define OCTEON_FAM_1 (OCTEON_CN3XXX)
+#define OCTEON_FAM_PLUS (OCTEON_CN5XXX)
+#define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
+#define OCTEON_FAM_2 (OCTEON_CN6XXX)
+
/* The revision byte (low byte) has two different encodings.
* CN3XXX:
*
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 790939dd824..254e9954ed7 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -209,13 +209,6 @@ union octeon_cvmemctl {
} s;
};
-struct octeon_cf_data {
- unsigned long base_region_bias;
- unsigned int base_region; /* The chip select region used by CF */
- int is16bit; /* 0 - 8bit, !0 - 16bit */
- int dma_engine; /* -1 for no DMA */
-};
-
extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
extern int octeon_get_boot_debug_flag(void);
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index da9bd7d270d..31ab10f02ba 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -31,19 +31,19 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#else /* !CONFIG_HUGETLB_PAGE */
+#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
#define HPAGE_SHIFT ({BUILD_BUG(); 0; })
#define HPAGE_SIZE ({BUILD_BUG(); 0; })
#define HPAGE_MASK ({BUILD_BUG(); 0; })
#define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#ifndef __ASSEMBLY__
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index f5b521d5a67..c63191055e6 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -175,7 +175,7 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* pmd_huge(pmd) but inline */
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
return 0;
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index da4ba49adcf..f6a0439a408 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -34,38 +34,72 @@
*/
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
-#define _PAGE_PRESENT (1<<6) /* implemented in software */
-#define _PAGE_READ (1<<7) /* implemented in software */
-#define _PAGE_WRITE (1<<8) /* implemented in software */
-#define _PAGE_ACCESSED (1<<9) /* implemented in software */
-#define _PAGE_MODIFIED (1<<10) /* implemented in software */
-#define _PAGE_FILE (1<<10) /* set:pagecache unset:swap */
-
-#define _PAGE_R4KBUG (1<<0) /* workaround for r4k bug */
-#define _PAGE_GLOBAL (1<<0)
-#define _PAGE_VALID (1<<1)
-#define _PAGE_SILENT_READ (1<<1) /* synonym */
-#define _PAGE_DIRTY (1<<2) /* The MIPS dirty bit */
-#define _PAGE_SILENT_WRITE (1<<2)
-#define _CACHE_SHIFT 3
-#define _CACHE_MASK (7<<3)
+/*
+ * The following bits are directly used by the TLB hardware
+ */
+#define _PAGE_R4KBUG (1 << 0) /* workaround for r4k bug */
+#define _PAGE_GLOBAL (1 << 0)
+#define _PAGE_VALID_SHIFT 1
+#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
+#define _PAGE_SILENT_READ (1 << 1) /* synonym */
+#define _PAGE_DIRTY_SHIFT 2
+#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */
+#define _PAGE_SILENT_WRITE (1 << 2)
+#define _CACHE_SHIFT 3
+#define _CACHE_MASK (7 << 3)
+
+/*
+ * The following bits are implemented in software
+ *
+ * _PAGE_FILE semantics: set:pagecache unset:swap
+ */
+#define _PAGE_PRESENT_SHIFT 6
+#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_READ_SHIFT 7
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT 8
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT 9
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED_SHIFT 10
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+
+#define _PAGE_FILE (1 << 10)
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-#define _PAGE_PRESENT (1<<0) /* implemented in software */
-#define _PAGE_READ (1<<1) /* implemented in software */
-#define _PAGE_WRITE (1<<2) /* implemented in software */
-#define _PAGE_ACCESSED (1<<3) /* implemented in software */
-#define _PAGE_MODIFIED (1<<4) /* implemented in software */
-#define _PAGE_FILE (1<<4) /* set:pagecache unset:swap */
-
-#define _PAGE_GLOBAL (1<<8)
-#define _PAGE_VALID (1<<9)
-#define _PAGE_SILENT_READ (1<<9) /* synonym */
-#define _PAGE_DIRTY (1<<10) /* The MIPS dirty bit */
-#define _PAGE_SILENT_WRITE (1<<10)
-#define _CACHE_UNCACHED (1<<11)
-#define _CACHE_MASK (1<<11)
+/*
+ * The following are implemented by software
+ *
+ * _PAGE_FILE semantics: set:pagecache unset:swap
+ */
+#define _PAGE_PRESENT_SHIFT 0
+#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_READ_SHIFT 1
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT 2
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT 3
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED_SHIFT 4
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+#define _PAGE_FILE_SHIFT 4
+#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT)
+
+/*
+ * And these are the hardware TLB bits
+ */
+#define _PAGE_GLOBAL_SHIFT 8
+#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID_SHIFT 9
+#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
+#define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */
+#define _PAGE_DIRTY_SHIFT 10
+#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
+#define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT)
+#define _CACHE_UNCACHED_SHIFT 11
+#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
+#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT)
#else /* 'Normal' r4K case */
/*
@@ -76,25 +110,25 @@
* which is more than we need right now.
*/
-/* implemented in software */
+/*
+ * The following bits are implemented in software
+ *
+ * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
+ * _PAGE_FILE semantics: set:pagecache unset:swap
+ */
#define _PAGE_PRESENT_SHIFT (0)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-/* implemented in software, should be unused if cpu_has_rixi. */
#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
-/* implemented in software */
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-/* implemented in software */
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-/* implemented in software */
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-/* set:pagecache unset:swap */
#define _PAGE_FILE (_PAGE_MODIFIED)
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
@@ -103,8 +137,17 @@
#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
#endif
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+/* huge tlb page */
+#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
+#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
+#else
+#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
+#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
+#endif
+
/* Page cannot be executed */
-#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_HUGE_SHIFT + 1 : _PAGE_HUGE_SHIFT)
+#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT)
#define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; })
/* Page cannot be read */
@@ -192,20 +235,6 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
-#elif defined(CONFIG_CPU_RM9000)
-
-#define _CACHE_WT (0<<_CACHE_SHIFT)
-#define _CACHE_WTWA (1<<_CACHE_SHIFT)
-#define _CACHE_UC_B (2<<_CACHE_SHIFT)
-#define _CACHE_WB (3<<_CACHE_SHIFT)
-#define _CACHE_CWBEA (4<<_CACHE_SHIFT)
-#define _CACHE_CWB (5<<_CACHE_SHIFT)
-#define _CACHE_UCNB (6<<_CACHE_SHIFT)
-#define _CACHE_FPC (7<<_CACHE_SHIFT)
-
-#define _CACHE_UNCACHED _CACHE_UC_B
-#define _CACHE_CACHABLE_NONCOHERENT _CACHE_WB
-
#else
#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index c02158be836..ec50d52cfb7 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -8,6 +8,7 @@
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
+#include <linux/mmzone.h>
#ifdef CONFIG_32BIT
#include <asm/pgtable-32.h>
#endif
@@ -76,16 +77,7 @@ extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \
(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
-
-#define is_zero_pfn is_zero_pfn
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
-}
-
-#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+#define __HAVE_COLOR_ZERO_PAGE
extern void paging_init(void);
@@ -94,7 +86,12 @@ extern void paging_init(void);
* and a page entry and page directory to the page they refer to.
*/
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
-#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+
+#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_page(pmd) __pmd_page(pmd)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
#define pmd_page_vaddr(pmd) pmd_val(pmd)
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
@@ -107,7 +104,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
ptep->pte_high = pte.pte_high;
smp_wmb();
ptep->pte_low = pte.pte_low;
- //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
if (pte.pte_low & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
@@ -375,6 +371,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
__update_cache(vma, address, pte);
}
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pte_t pte = *(pte_t *)pmdp;
+
+ __update_tlb(vma, address, pte);
+}
+
#define kern_addr_valid(addr) (1)
#ifdef CONFIG_64BIT_PHYS_ADDR
@@ -394,6 +398,157 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+extern int has_transparent_hugepage(void);
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_HUGE);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_HUGE;
+
+ return pmd;
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_SPLITTING);
+}
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_SPLITTING;
+
+ return pmd;
+}
+
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+/* Extern to avoid header file madness */
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_WRITE);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_WRITE;
+ if (pmd_val(pmd) & _PAGE_MODIFIED)
+ pmd_val(pmd) |= _PAGE_SILENT_WRITE;
+
+ return pmd;
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_MODIFIED);
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
+ return pmd;
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_MODIFIED;
+ if (pmd_val(pmd) & _PAGE_WRITE)
+ pmd_val(pmd) |= _PAGE_SILENT_WRITE;
+
+ return pmd;
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
+
+ return pmd;
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ pmd_val(pmd) |= _PAGE_ACCESSED;
+
+ if (cpu_has_rixi) {
+ if (!(pmd_val(pmd) & _PAGE_NO_READ))
+ pmd_val(pmd) |= _PAGE_SILENT_READ;
+ } else {
+ if (pmd_val(pmd) & _PAGE_READ)
+ pmd_val(pmd) |= _PAGE_SILENT_READ;
+ }
+
+ return pmd;
+}
+
+/* Extern to avoid header file madness */
+extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return pmd_val(pmd) >> _PFN_SHIFT;
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ if (pmd_trans_huge(pmd))
+ return pfn_to_page(pmd_pfn(pmd));
+
+ return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pmd;
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
+
+ return pmd;
+}
+
+/*
+ * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
+ * different prototype.
+ */
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old = *pmdp;
+
+ pmd_clear(pmdp);
+
+ return old;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
#include <asm-generic/pgtable.h>
/*
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h b/arch/mips/include/asm/pmc-sierra/msp71xx/war.h
index 9e2ee429c52..c74eb1657f5 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h
+++ b/arch/mips/include/asm/pmc-sierra/msp71xx/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index d28c41e0887..bd98b503f04 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -226,8 +226,6 @@ struct thread_struct {
unsigned long cp0_badvaddr; /* Last user fault */
unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
unsigned long error_code;
- unsigned long irix_trampoline; /* Wheee... */
- unsigned long irix_oldctx;
#ifdef CONFIG_CPU_CAVIUM_OCTEON
struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
@@ -297,8 +295,6 @@ struct thread_struct {
.cp0_badvaddr = 0, \
.cp0_baduaddr = 0, \
.error_code = 0, \
- .irix_trampoline = 0, \
- .irix_oldctx = 0, \
/* \
* Cavium Octeon specifics (null if not Octeon) \
*/ \
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index cec5e125f7e..a3186f2bb8a 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -49,6 +49,7 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) ((regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(r) ((r)->regs[29])
extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 14934295143..3dce7c788b3 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -366,7 +366,7 @@ struct linux_smonblock {
* Macros for calling a 32-bit ARC implementation from 64-bit code
*/
-#if defined(CONFIG_64BIT) && defined(CONFIG_ARC32)
+#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
#define __arc_clobbers \
"$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
@@ -475,10 +475,10 @@ struct linux_smonblock {
__res; \
})
-#endif /* defined(CONFIG_64BIT) && defined(CONFIG_ARC32) */
+#endif /* defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32) */
-#if (defined(CONFIG_32BIT) && defined(CONFIG_ARC32)) || \
- (defined(CONFIG_64BIT) && defined(CONFIG_ARC64))
+#if (defined(CONFIG_32BIT) && defined(CONFIG_FW_ARC32)) || \
+ (defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC64))
#define ARC_CALL0(dest) \
({ long __res; \
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index d4fb4d852a6..f33b5fd6972 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -40,6 +40,8 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
+/* Used by kexec crashdump to save all cpu's state */
+#define SMP_DUMP 0x8
extern volatile cpumask_t cpu_callin_map;
@@ -91,4 +93,8 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
+#if defined(CONFIG_KEXEC)
+extern void (*dump_ipi_function_ptr)(void *);
+void dump_send_ipi(void (*dump_ipi_callback)(void *));
+#endif
#endif /* __ASM_SMP_H */
diff --git a/arch/mips/include/asm/smvp.h b/arch/mips/include/asm/smvp.h
deleted file mode 100644
index 0d0e80a39e8..00000000000
--- a/arch/mips/include/asm/smvp.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_SMVP_H
-#define _ASM_SMVP_H
-
-/*
- * Definitions for SMVP multitasking on MIPS MT cores
- */
-struct task_struct;
-
-extern void smvp_smp_setup(void);
-extern void smvp_smp_finish(void);
-extern void smvp_boot_secondary(int cpu, struct task_struct *t);
-extern void smvp_init_secondary(void);
-extern void smvp_smp_finish(void);
-extern void smvp_cpus_done(void);
-extern void smvp_prepare_cpus(unsigned int max_cpus);
-
-/* This is platform specific */
-extern void smvp_send_ipi(int cpu, unsigned int action);
-#endif /* _ASM_SMVP_H */
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 4461198361c..65900dab3ad 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -6,7 +6,7 @@
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
-#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && defined(CONFIG_PAGE_SIZE_64KB)
# define SECTION_SIZE_BITS 29
#else
# define SECTION_SIZE_BITS 28
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index bc14447e69b..761f2e92119 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -50,10 +50,8 @@ extern int (*perf_irq)(void);
/*
* Initialize the calling CPU's compare interrupt as clockevent device
*/
-#ifdef CONFIG_CEVT_R4K_LIB
extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void);
-#endif
static inline int mips_clockevent_init(void)
{
@@ -71,7 +69,7 @@ static inline int mips_clockevent_init(void)
/*
* Initialize the count register as a clocksource
*/
-#ifdef CONFIG_CSRC_R4K_LIB
+#ifdef CONFIG_CSRC_R4K
extern int init_r4k_clocksource(void);
#endif
diff --git a/arch/mips/include/asm/titan_dep.h b/arch/mips/include/asm/titan_dep.h
deleted file mode 100644
index fee1908c65d..00000000000
--- a/arch/mips/include/asm/titan_dep.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright 2003 PMC-Sierra
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * Board specific definititions for the PMC-Sierra Yosemite
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __TITAN_DEP_H__
-#define __TITAN_DEP_H__
-
-#include <asm/addrspace.h> /* for KSEG1ADDR() */
-#include <asm/byteorder.h> /* for cpu_to_le32() */
-
-#define TITAN_READ(ofs) \
- (*(volatile u32 *)(ocd_base+(ofs)))
-#define TITAN_READ_16(ofs) \
- (*(volatile u16 *)(ocd_base+(ofs)))
-#define TITAN_READ_8(ofs) \
- (*(volatile u8 *)(ocd_base+(ofs)))
-
-#define TITAN_WRITE(ofs, data) \
- do { *(volatile u32 *)(ocd_base+(ofs)) = (data); } while (0)
-#define TITAN_WRITE_16(ofs, data) \
- do { *(volatile u16 *)(ocd_base+(ofs)) = (data); } while (0)
-#define TITAN_WRITE_8(ofs, data) \
- do { *(volatile u8 *)(ocd_base+(ofs)) = (data); } while (0)
-
-/*
- * PCI specific defines
- */
-#define TITAN_PCI_0_CONFIG_ADDRESS 0x780
-#define TITAN_PCI_0_CONFIG_DATA 0x784
-
-/*
- * HT specific defines
- */
-#define RM9000x2_HTLINK_REG 0xbb000644
-#define RM9000x2_BASE_ADDR 0xbb000000
-
-#define OCD_BASE 0xfb000000UL
-#define OCD_SIZE 0x3000UL
-
-extern unsigned long ocd_base;
-
-/*
- * OCD Registers
- */
-#define RM9000x2_OCD_LKB5 0x0128 /* Ethernet */
-#define RM9000x2_OCD_LKM5 0x012c
-
-#define RM9000x2_OCD_LKB7 0x0138 /* HT Region 0 */
-#define RM9000x2_OCD_LKM7 0x013c
-#define RM9000x2_OCD_LKB8 0x0140 /* HT Region 1 */
-#define RM9000x2_OCD_LKM8 0x0144
-
-#define RM9000x2_OCD_LKB9 0x0148 /* Local Bus */
-#define RM9000x2_OCD_LKM9 0x014c
-#define RM9000x2_OCD_LKB10 0x0150
-#define RM9000x2_OCD_LKM10 0x0154
-#define RM9000x2_OCD_LKB11 0x0158
-#define RM9000x2_OCD_LKM11 0x015c
-#define RM9000x2_OCD_LKB12 0x0160
-#define RM9000x2_OCD_LKM12 0x0164
-
-#define RM9000x2_OCD_LKB13 0x0168 /* Scratch RAM */
-#define RM9000x2_OCD_LKM13 0x016c
-
-#define RM9000x2_OCD_LPD0 0x0200 /* Local Bus */
-#define RM9000x2_OCD_LPD1 0x0210
-#define RM9000x2_OCD_LPD2 0x0220
-#define RM9000x2_OCD_LPD3 0x0230
-
-#define RM9000x2_OCD_HTDVID 0x0600 /* HT Device Header */
-#define RM9000x2_OCD_HTSC 0x0604
-#define RM9000x2_OCD_HTCCR 0x0608
-#define RM9000x2_OCD_HTBHL 0x060c
-#define RM9000x2_OCD_HTBAR0 0x0610
-#define RM9000x2_OCD_HTBAR1 0x0614
-#define RM9000x2_OCD_HTBAR2 0x0618
-#define RM9000x2_OCD_HTBAR3 0x061c
-#define RM9000x2_OCD_HTBAR4 0x0620
-#define RM9000x2_OCD_HTBAR5 0x0624
-#define RM9000x2_OCD_HTCBCPT 0x0628
-#define RM9000x2_OCD_HTSDVID 0x062c
-#define RM9000x2_OCD_HTXRA 0x0630
-#define RM9000x2_OCD_HTCAP1 0x0634
-#define RM9000x2_OCD_HTIL 0x063c
-
-#define RM9000x2_OCD_HTLCC 0x0640 /* HT Capability Block */
-#define RM9000x2_OCD_HTLINK 0x0644
-#define RM9000x2_OCD_HTFQREV 0x0648
-
-#define RM9000x2_OCD_HTERCTL 0x0668 /* HT Controller */
-#define RM9000x2_OCD_HTRXDB 0x066c
-#define RM9000x2_OCD_HTIMPED 0x0670
-#define RM9000x2_OCD_HTSWIMP 0x0674
-#define RM9000x2_OCD_HTCAL 0x0678
-
-#define RM9000x2_OCD_HTBAA30 0x0680
-#define RM9000x2_OCD_HTBAA54 0x0684
-#define RM9000x2_OCD_HTMASK0 0x0688
-#define RM9000x2_OCD_HTMASK1 0x068c
-#define RM9000x2_OCD_HTMASK2 0x0690
-#define RM9000x2_OCD_HTMASK3 0x0694
-#define RM9000x2_OCD_HTMASK4 0x0698
-#define RM9000x2_OCD_HTMASK5 0x069c
-
-#define RM9000x2_OCD_HTIFCTL 0x06a0
-#define RM9000x2_OCD_HTPLL 0x06a4
-
-#define RM9000x2_OCD_HTSRI 0x06b0
-#define RM9000x2_OCD_HTRXNUM 0x06b4
-#define RM9000x2_OCD_HTTXNUM 0x06b8
-
-#define RM9000x2_OCD_HTTXCNT 0x06c8
-
-#define RM9000x2_OCD_HTERROR 0x06d8
-#define RM9000x2_OCD_HTRCRCE 0x06dc
-#define RM9000x2_OCD_HTEOI 0x06e0
-
-#define RM9000x2_OCD_CRCR 0x06f0
-
-#define RM9000x2_OCD_HTCFGA 0x06f8
-#define RM9000x2_OCD_HTCFGD 0x06fc
-
-#define RM9000x2_OCD_INTMSG 0x0a00
-
-#define RM9000x2_OCD_INTPIN0 0x0a40
-#define RM9000x2_OCD_INTPIN1 0x0a44
-#define RM9000x2_OCD_INTPIN2 0x0a48
-#define RM9000x2_OCD_INTPIN3 0x0a4c
-#define RM9000x2_OCD_INTPIN4 0x0a50
-#define RM9000x2_OCD_INTPIN5 0x0a54
-#define RM9000x2_OCD_INTPIN6 0x0a58
-#define RM9000x2_OCD_INTPIN7 0x0a5c
-#define RM9000x2_OCD_SEM 0x0a60
-#define RM9000x2_OCD_SEMSET 0x0a64
-#define RM9000x2_OCD_SEMCLR 0x0a68
-
-#define RM9000x2_OCD_TKT 0x0a70
-#define RM9000x2_OCD_TKTINC 0x0a74
-
-#define RM9000x2_OCD_NMICONFIG 0x0ac0 /* Interrupts */
-#define RM9000x2_OCD_INTP0PRI 0x1a80
-#define RM9000x2_OCD_INTP1PRI 0x1a80
-#define RM9000x2_OCD_INTP0STATUS0 0x1b00
-#define RM9000x2_OCD_INTP0MASK0 0x1b04
-#define RM9000x2_OCD_INTP0SET0 0x1b08
-#define RM9000x2_OCD_INTP0CLEAR0 0x1b0c
-#define RM9000x2_OCD_INTP0STATUS1 0x1b10
-#define RM9000x2_OCD_INTP0MASK1 0x1b14
-#define RM9000x2_OCD_INTP0SET1 0x1b18
-#define RM9000x2_OCD_INTP0CLEAR1 0x1b1c
-#define RM9000x2_OCD_INTP0STATUS2 0x1b20
-#define RM9000x2_OCD_INTP0MASK2 0x1b24
-#define RM9000x2_OCD_INTP0SET2 0x1b28
-#define RM9000x2_OCD_INTP0CLEAR2 0x1b2c
-#define RM9000x2_OCD_INTP0STATUS3 0x1b30
-#define RM9000x2_OCD_INTP0MASK3 0x1b34
-#define RM9000x2_OCD_INTP0SET3 0x1b38
-#define RM9000x2_OCD_INTP0CLEAR3 0x1b3c
-#define RM9000x2_OCD_INTP0STATUS4 0x1b40
-#define RM9000x2_OCD_INTP0MASK4 0x1b44
-#define RM9000x2_OCD_INTP0SET4 0x1b48
-#define RM9000x2_OCD_INTP0CLEAR4 0x1b4c
-#define RM9000x2_OCD_INTP0STATUS5 0x1b50
-#define RM9000x2_OCD_INTP0MASK5 0x1b54
-#define RM9000x2_OCD_INTP0SET5 0x1b58
-#define RM9000x2_OCD_INTP0CLEAR5 0x1b5c
-#define RM9000x2_OCD_INTP0STATUS6 0x1b60
-#define RM9000x2_OCD_INTP0MASK6 0x1b64
-#define RM9000x2_OCD_INTP0SET6 0x1b68
-#define RM9000x2_OCD_INTP0CLEAR6 0x1b6c
-#define RM9000x2_OCD_INTP0STATUS7 0x1b70
-#define RM9000x2_OCD_INTP0MASK7 0x1b74
-#define RM9000x2_OCD_INTP0SET7 0x1b78
-#define RM9000x2_OCD_INTP0CLEAR7 0x1b7c
-#define RM9000x2_OCD_INTP1STATUS0 0x2b00
-#define RM9000x2_OCD_INTP1MASK0 0x2b04
-#define RM9000x2_OCD_INTP1SET0 0x2b08
-#define RM9000x2_OCD_INTP1CLEAR0 0x2b0c
-#define RM9000x2_OCD_INTP1STATUS1 0x2b10
-#define RM9000x2_OCD_INTP1MASK1 0x2b14
-#define RM9000x2_OCD_INTP1SET1 0x2b18
-#define RM9000x2_OCD_INTP1CLEAR1 0x2b1c
-#define RM9000x2_OCD_INTP1STATUS2 0x2b20
-#define RM9000x2_OCD_INTP1MASK2 0x2b24
-#define RM9000x2_OCD_INTP1SET2 0x2b28
-#define RM9000x2_OCD_INTP1CLEAR2 0x2b2c
-#define RM9000x2_OCD_INTP1STATUS3 0x2b30
-#define RM9000x2_OCD_INTP1MASK3 0x2b34
-#define RM9000x2_OCD_INTP1SET3 0x2b38
-#define RM9000x2_OCD_INTP1CLEAR3 0x2b3c
-#define RM9000x2_OCD_INTP1STATUS4 0x2b40
-#define RM9000x2_OCD_INTP1MASK4 0x2b44
-#define RM9000x2_OCD_INTP1SET4 0x2b48
-#define RM9000x2_OCD_INTP1CLEAR4 0x2b4c
-#define RM9000x2_OCD_INTP1STATUS5 0x2b50
-#define RM9000x2_OCD_INTP1MASK5 0x2b54
-#define RM9000x2_OCD_INTP1SET5 0x2b58
-#define RM9000x2_OCD_INTP1CLEAR5 0x2b5c
-#define RM9000x2_OCD_INTP1STATUS6 0x2b60
-#define RM9000x2_OCD_INTP1MASK6 0x2b64
-#define RM9000x2_OCD_INTP1SET6 0x2b68
-#define RM9000x2_OCD_INTP1CLEAR6 0x2b6c
-#define RM9000x2_OCD_INTP1STATUS7 0x2b70
-#define RM9000x2_OCD_INTP1MASK7 0x2b74
-#define RM9000x2_OCD_INTP1SET7 0x2b78
-#define RM9000x2_OCD_INTP1CLEAR7 0x2b7c
-
-#define OCD_READ(reg) (*(volatile unsigned int *)(ocd_base + (reg)))
-#define OCD_WRITE(reg, val) \
- do { *(volatile unsigned int *)(ocd_base + (reg)) = (val); } while (0)
-
-/*
- * Hypertransport specific macros
- */
-#define RM9K_WRITE(ofs, data) *(volatile u_int32_t *)(RM9000x2_BASE_ADDR+ofs) = data
-#define RM9K_WRITE_8(ofs, data) *(volatile u8 *)(RM9000x2_BASE_ADDR+ofs) = data
-#define RM9K_WRITE_16(ofs, data) *(volatile u16 *)(RM9000x2_BASE_ADDR+ofs) = data
-
-#define RM9K_READ(ofs, val) *(val) = *(volatile u_int32_t *)(RM9000x2_BASE_ADDR+ofs)
-#define RM9K_READ_8(ofs, val) *(val) = *(volatile u8 *)(RM9000x2_BASE_ADDR+ofs)
-#define RM9K_READ_16(ofs, val) *(val) = *(volatile u16 *)(RM9000x2_BASE_ADDR+ofs)
-
-#endif
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index b306e2081ca..9e47cc11aa2 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -20,7 +20,6 @@
#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/mips/include/asm/war.h b/arch/mips/include/asm/war.h
index fa133c1bc1f..65e344532de 100644
--- a/arch/mips/include/asm/war.h
+++ b/arch/mips/include/asm/war.h
@@ -209,14 +209,6 @@
#endif
/*
- * On the RM9000 there is a problem which makes the CreateDirtyExclusive
- * eache operation unusable on SMP systems.
- */
-#ifndef RM9000_CDEX_SMP_WAR
-#error Check setting of RM9000_CDEX_SMP_WAR for your platform
-#endif
-
-/*
* The RM7000 processors and the E9000 cores have a bug (though PMC-Sierra
* opposes it being called that) where invalid instructions in the same
* I-cache line worth of instructions being fetched may case spurious
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
index 3f1237c6c80..770732cb8d0 100644
--- a/arch/mips/include/uapi/asm/signal.h
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -86,12 +86,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SA_RESTORER 0x04000000 /* Only for o32 */
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 8b28bc4e14e..007c33d7371 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -16,7 +16,7 @@ CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
-obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o
+obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
@@ -25,7 +25,7 @@ obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
-obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o
+obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
@@ -58,7 +58,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
-obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
@@ -80,7 +79,8 @@ obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0c4bce4882a..9690998d4ef 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -125,10 +125,6 @@ void output_thread_defines(void)
thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
- OFFSET(THREAD_TRAMP, task_struct, \
- thread.irix_trampoline);
- OFFSET(THREAD_OLDCTX, task_struct, \
- thread.irix_oldctx);
BLANK();
}
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
new file mode 100644
index 00000000000..0f53c39324b
--- /dev/null
+++ b/arch/mips/kernel/crash.c
@@ -0,0 +1,71 @@
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu = -1;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+static void crash_shutdown_secondary(void *ignore)
+{
+ struct pt_regs *regs;
+ int cpu = smp_processor_id();
+
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ local_irq_disable();
+ if (!cpu_isset(cpu, cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpu_set(cpu, cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+ relocated_kexec_smp_wait(NULL);
+ /* NOTREACHED */
+}
+
+static void crash_kexec_prepare_cpus(void)
+{
+ unsigned int msecs;
+
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+
+ dump_send_ipi(crash_shutdown_secondary);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ pr_emerg("Sending IPI to other cpus...\n");
+ msecs = 10000;
+ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ cpu_relax();
+ mdelay(1);
+ }
+}
+
+#else /* !defined(CONFIG_SMP) */
+static void crash_kexec_prepare_cpus(void) {}
+#endif /* !defined(CONFIG_SMP) */
+
+void default_machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus();
+ cpu_set(crashing_cpu, cpus_in_crash);
+}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
new file mode 100644
index 00000000000..35bed0d2342
--- /dev/null
+++ b/arch/mips/kernel/crash_dump.c
@@ -0,0 +1,75 @@
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <asm/uaccess.h>
+
+static int __init parse_savemaxmem(char *p)
+{
+ if (p)
+ saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
+
+ return 1;
+}
+__setup("savemaxmem=", parse_savemaxmem);
+
+
+static void *kdump_buf_page;
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel.
+ *
+ * Calling copy_to_user() in atomic context is not desirable. Hence first
+ * copying the data to a pre-allocated kernel page and then copying to user
+ * space in non-atomic context.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = kmap_atomic_pfn(pfn);
+
+ if (!userbuf) {
+ memcpy(buf, (vaddr + offset), csize);
+ kunmap_atomic(vaddr);
+ } else {
+ if (!kdump_buf_page) {
+ pr_warning("Kdump: Kdump buffer page not allocated\n");
+
+ return -EFAULT;
+ }
+ copy_page(kdump_buf_page, vaddr);
+ kunmap_atomic(vaddr);
+ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+ return -EFAULT;
+ }
+
+ return csize;
+}
+
+static int __init kdump_buf_page_init(void)
+{
+ int ret = 0;
+
+ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kdump_buf_page) {
+ pr_warning("Kdump: Failed to allocate kdump buffer page\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+arch_initcall(kdump_buf_page_init);
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
deleted file mode 100644
index 1282b9ae81c..00000000000
--- a/arch/mips/kernel/irq-rm9000.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2003 Ralf Baechle
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Handler for RM9000 extended interrupts. These are a non-standard
- * feature so we handle them separately from standard interrupts.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-
-static inline void unmask_rm9k_irq(struct irq_data *d)
-{
- set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
-}
-
-static inline void mask_rm9k_irq(struct irq_data *d)
-{
- clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
-}
-
-static inline void rm9k_cpu_irq_enable(struct irq_data *d)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- unmask_rm9k_irq(d);
- local_irq_restore(flags);
-}
-
-/*
- * Performance counter interrupts are global on all processors.
- */
-static void local_rm9k_perfcounter_irq_startup(void *args)
-{
- rm9k_cpu_irq_enable(args);
-}
-
-static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d)
-{
- on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1);
-
- return 0;
-}
-
-static void local_rm9k_perfcounter_irq_shutdown(void *args)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mask_rm9k_irq(args);
- local_irq_restore(flags);
-}
-
-static void rm9k_perfcounter_irq_shutdown(struct irq_data *d)
-{
- on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1);
-}
-
-static struct irq_chip rm9k_irq_controller = {
- .name = "RM9000",
- .irq_ack = mask_rm9k_irq,
- .irq_mask = mask_rm9k_irq,
- .irq_mask_ack = mask_rm9k_irq,
- .irq_unmask = unmask_rm9k_irq,
- .irq_eoi = unmask_rm9k_irq
-};
-
-static struct irq_chip rm9k_perfcounter_irq = {
- .name = "RM9000",
- .irq_startup = rm9k_perfcounter_irq_startup,
- .irq_shutdown = rm9k_perfcounter_irq_shutdown,
- .irq_ack = mask_rm9k_irq,
- .irq_mask = mask_rm9k_irq,
- .irq_mask_ack = mask_rm9k_irq,
- .irq_unmask = unmask_rm9k_irq,
-};
-
-unsigned int rm9000_perfcount_irq;
-
-EXPORT_SYMBOL(rm9000_perfcount_irq);
-
-void __init rm9k_cpu_irq_init(void)
-{
- int base = RM9K_CPU_IRQ_BASE;
- int i;
-
- clear_c0_intcontrol(0x0000f000); /* Mask all */
-
- for (i = base; i < base + 4; i++)
- irq_set_chip_and_handler(i, &rm9k_irq_controller,
- handle_level_irq);
-
- rm9000_perfcount_irq = base + 1;
- irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
- handle_percpu_irq);
-}
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 85beb9b0b2d..992e18474da 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -5,7 +5,7 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-
+#include <linux/compiler.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
@@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+#ifdef CONFIG_SMP
+void (*relocated_kexec_smp_wait) (void *);
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+#endif
+
int
machine_kexec_prepare(struct kimage *kimage)
{
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
return 0;
}
@@ -33,14 +43,20 @@ machine_kexec_cleanup(struct kimage *kimage)
void
machine_shutdown(void)
{
+ if (_machine_kexec_shutdown)
+ _machine_kexec_shutdown();
}
void
machine_crash_shutdown(struct pt_regs *regs)
{
+ if (_machine_crash_shutdown)
+ _machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
}
-typedef void (*noretfun_t)(void) __attribute__((noreturn));
+typedef void (*noretfun_t)(void) __noreturn;
void
machine_kexec(struct kimage *image)
@@ -52,7 +68,9 @@ machine_kexec(struct kimage *image)
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
- kexec_start_address = image->start;
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
+
kexec_indirection_page =
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
@@ -63,7 +81,7 @@ machine_kexec(struct kimage *image)
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through KSEG0 (or
* CKSEG0 or XPHYS if on 64bit system), hence the
- * pys_to_virt() call.
+ * phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
@@ -81,5 +99,12 @@ machine_kexec(struct kimage *image)
printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
__flush_cache_all();
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+ (void *)(kexec_smp_wait - relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
((noretfun_t) reboot_code_buffer)();
}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 33f63bab478..fd814e08c94 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -50,8 +50,8 @@ static bool check_same_owner(struct task_struct *p)
rcu_read_lock();
pcred = __task_cred(p);
- match = (cred->euid == pcred->euid ||
- cred->euid == pcred->uid);
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
rcu_read_unlock();
return match;
}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 2d9304c2b54..df1e3e455f9 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/export.h>
#include <asm/checksum.h>
-#include <asm/pgtable.h>
+#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/ftrace.h>
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index a9b995dcf69..b14c14d90fc 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -840,6 +840,16 @@ static const struct mips_perf_event bmips5000_event_map
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
};
+static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
+ [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
+};
+
/* 24K/34K/1004K cores can share the same cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map
[PERF_COUNT_HW_CACHE_MAX]
@@ -1092,6 +1102,100 @@ static const struct mips_perf_event octeon_cache_map
},
};
+static const struct mips_perf_event xlp_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
+ [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
+ [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+};
+
#ifdef CONFIG_MIPS_MT_SMP
static void check_and_calc_range(struct perf_event *event,
const struct mips_perf_event *pev)
@@ -1444,6 +1548,20 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
return &raw_event;
}
+static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+
+ /* Only 1-63 are defined */
+ if ((raw_id < 0x01) || (raw_id > 0x3f))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = raw_id;
+
+ return &raw_event;
+}
+
static int __init
init_hw_perf_events(void)
{
@@ -1522,6 +1640,12 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &bmips5000_event_map;
mipspmu.cache_event_map = &bmips5000_cache_map;
break;
+ case CPU_XLP:
+ mipspmu.name = "xlp";
+ mipspmu.general_event_map = &xlp_event_map;
+ mipspmu.cache_event_map = &xlp_cache_map;
+ mipspmu.map_raw_event = xlp_pmu_map_raw_event;
+ break;
default:
pr_cont("Either hardware does not support performance "
"counters, or not yet implemented.\n");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 38097652d62..a11c6f9fdd5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -72,9 +72,7 @@ void __noreturn cpu_idle(void)
}
}
#ifdef CONFIG_HOTPLUG_CPU
- if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
- (system_state == SYSTEM_RUNNING ||
- system_state == SYSTEM_BOOTING))
+ if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
play_dead();
#endif
rcu_idle_exit();
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 87481f916a6..e4142c5f7c2 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -15,6 +15,11 @@
#include <asm/addrspace.h>
LEAF(relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+ PTR_L a3, arg3
+
PTR_L s0, kexec_indirection_page
PTR_L s1, kexec_start_address
@@ -26,7 +31,6 @@ process_entry:
and s3, s2, 0x1
beq s3, zero, 1f
and s4, s2, ~0x1 /* store destination addr in s4 */
- move a0, s4
b process_entry
1:
@@ -60,10 +64,111 @@ copy_word:
b process_entry
done:
+#ifdef CONFIG_SMP
+ /* kexec_flag reset is signal to other CPUs what kernel
+ was moved to it's location. Note - we need relocated address
+ of kexec_flag. */
+
+ bal 1f
+ 1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+ LONG_S zero,(t0)
+#endif
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ /* We need to flush I-cache before jumping to new kernel.
+ * Unfortunatelly, this code is cpu-specific.
+ */
+ .set push
+ .set noreorder
+ syncw
+ syncw
+ synci 0($0)
+ .set pop
+#else
+ sync
+#endif
/* jump to kexec_start_address */
j s1
END(relocate_new_kernel)
+#ifdef CONFIG_SMP
+/*
+ * Other CPUs should wait until code is relocated and
+ * then start at entry (?) point.
+ */
+LEAF(kexec_smp_wait)
+ PTR_L a0, s_arg0
+ PTR_L a1, s_arg1
+ PTR_L a2, s_arg2
+ PTR_L a3, s_arg3
+ PTR_L s1, kexec_start_address
+
+ /* Non-relocated address works for args and kexec_start_address ( old
+ * kernel is not overwritten). But we need relocated address of
+ * kexec_flag.
+ */
+
+ bal 1f
+1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+
+1: LONG_L s0, (t0)
+ bne s0, zero,1b
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ .set push
+ .set noreorder
+ synci 0($0)
+ .set pop
+#else
+ sync
+#endif
+ j s1
+ END(kexec_smp_wait)
+#endif
+
+#ifdef __mips64
+ /* all PTR's must be aligned to 8 byte in 64-bit mode */
+ .align 3
+#endif
+
+/* All parameters to new kernel are passed in registers a0-a3.
+ * kexec_args[0..3] are uses to prepare register values.
+ */
+
+kexec_args:
+ EXPORT(kexec_args)
+arg0: PTR 0x0
+arg1: PTR 0x0
+arg2: PTR 0x0
+arg3: PTR 0x0
+ .size kexec_args,PTRSIZE*4
+
+#ifdef CONFIG_SMP
+/*
+ * Secondary CPUs may have different kernel parameters in
+ * their registers a0-a3. secondary_kexec_args[0..3] are used
+ * to prepare register values.
+ */
+secondary_kexec_args:
+ EXPORT(secondary_kexec_args)
+s_arg0: PTR 0x0
+s_arg1: PTR 0x0
+s_arg2: PTR 0x0
+s_arg3: PTR 0x0
+ .size secondary_kexec_args,PTRSIZE*4
+kexec_flag:
+ LONG 0x1
+
+#endif
+
kexec_start_address:
EXPORT(kexec_start_address)
PTR 0x0
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 62971914376..ad3de9668da 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -17,12 +17,6 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
-/* This duplicates the definition from <linux/sched.h> */
-#define PT_TRACESYS 0x00000002 /* tracing system calls */
-
-/* This duplicates the definition from <asm/signal.h> */
-#define SIGILL 4 /* Illegal instruction (ANSI). */
-
#ifndef CONFIG_MIPS32_O32
/* No O32, so define handle_sys here */
#define handle_sysn32 handle_sys
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 290dc6a1d7a..8c41187801c 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/console.h>
#include <linux/pfn.h>
#include <linux/debugfs.h>
+#include <linux/kexec.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -536,12 +537,64 @@ static void __init arch_mem_init(char **cmdline_p)
}
bootmem_init();
+#ifdef CONFIG_KEXEC
+ if (crashk_res.start != crashk_res.end)
+ reserve_bootmem(crashk_res.start,
+ crashk_res.end - crashk_res.start + 1,
+ BOOTMEM_DEFAULT);
+#endif
device_tree_init();
sparse_init();
plat_swiotlb_setup();
paging_init();
}
+#ifdef CONFIG_KEXEC
+static inline unsigned long long get_total_mem(void)
+{
+ unsigned long long total;
+
+ total = max_pfn - min_low_pfn;
+ return total << PAGE_SHIFT;
+}
+
+static void __init mips_parse_crashkernel(void)
+{
+ unsigned long long total_mem;
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ total_mem = get_total_mem();
+ ret = parse_crashkernel(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+ int ret;
+
+ ret = request_resource(res, &crashk_res);
+ if (!ret)
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
+ (unsigned long)((crashk_res.end -
+ crashk_res.start + 1) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+}
+#else /* !defined(CONFIG_KEXEC) */
+static void __init mips_parse_crashkernel(void)
+{
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+}
+#endif /* !defined(CONFIG_KEXEC) */
+
static void __init resource_init(void)
{
int i;
@@ -557,6 +610,8 @@ static void __init resource_init(void)
/*
* Request address space for all standard RAM.
*/
+ mips_parse_crashkernel();
+
for (i = 0; i < boot_mem_map.nr_map; i++) {
struct resource *res;
unsigned long start, end;
@@ -593,6 +648,7 @@ static void __init resource_init(void)
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
+ request_crashkernel(res);
}
}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 0e1a5b8ae81..b6aa7703501 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -568,17 +568,20 @@ static void do_signal(struct pt_regs *regs)
}
if (regs->regs[0]) {
- if (regs->regs[2] == ERESTARTNOHAND ||
- regs->regs[2] == ERESTARTSYS ||
- regs->regs[2] == ERESTARTNOINTR) {
+ switch (regs->regs[2]) {
+ case ERESTARTNOHAND:
+ case ERESTARTSYS:
+ case ERESTARTNOINTR:
regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
- }
- if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
+ break;
+
+ case ERESTART_RESTARTBLOCK:
regs->regs[2] = current->thread.abi->restart;
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
+ break;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9005bf9fb85..2e6374a589e 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -386,3 +386,20 @@ void flush_tlb_one(unsigned long vaddr)
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_one);
+
+#if defined(CONFIG_KEXEC)
+void (*dump_ipi_function_ptr)(void *) = NULL;
+void dump_send_ipi(void (*dump_ipi_callback)(void *))
+{
+ int i;
+ int cpu = smp_processor_id();
+
+ dump_ipi_function_ptr = dump_ipi_callback;
+ smp_mb();
+ for_each_online_cpu(i)
+ if (i != cpu)
+ mp_ops->send_ipi_single(i, SMP_DUMP);
+
+}
+EXPORT_SYMBOL(dump_send_ipi);
+#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9be3df1fa8a..cf7ac5483f5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -13,6 +13,7 @@
*/
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/kexec.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -409,6 +410,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
panic("Fatal exception");
}
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
do_exit(sig);
}
@@ -1021,6 +1025,24 @@ asmlinkage void do_cpu(struct pt_regs *regs)
return;
+ case 3:
+ /*
+ * Old (MIPS I and MIPS II) processors will set this code
+ * for COP1X opcode instructions that replaced the original
+ * COP3 space. We don't limit COP1 space instructions in
+ * the emulator according to the CPU ISA, so we want to
+ * treat COP1X instructions consistently regardless of which
+ * code the CPU chose. Therefore we redirect this trap to
+ * the FP emulator too.
+ *
+ * Then some newer FPU-less processors use this code
+ * erroneously too, so they are covered by this choice
+ * as well.
+ */
+ if (raw_cpu_has_fpu)
+ break;
+ /* Fall through. */
+
case 1:
if (used_math()) /* Using the FPU again. */
own_fpu(1);
@@ -1044,9 +1066,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
return;
-
- case 3:
- break;
}
force_sig(SIGILL, current);
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index d84f361f1e4..c0021912131 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -36,4 +36,8 @@ config PCI_LANTIQ
bool "PCI Support"
depends on SOC_XWAY && PCI
+config XRX200_PHY_FW
+ bool "XRX200 PHY firmware loader"
+ depends on SOC_XWAY
+
endif
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 6cfd6117fbf..9f9e875967a 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -87,9 +87,6 @@ void __init device_tree_init(void)
reserve_bootmem(base, size, BOOTMEM_DEFAULT);
unflatten_device_tree();
-
- /* free the space reserved for the dt blob */
- free_bootmem(base, size);
}
void __init prom_init(void)
@@ -119,7 +116,7 @@ int __init plat_of_setup(void)
sizeof(of_ids[0].compatible));
strncpy(of_ids[1].compatible, "simple-bus",
sizeof(of_ids[1].compatible));
- return of_platform_bus_probe(NULL, of_ids, NULL);
+ return of_platform_populate(NULL, of_ids, NULL, NULL);
}
arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
index 70a58c747bd..7a13660d630 100644
--- a/arch/mips/lantiq/xway/Makefile
+++ b/arch/mips/lantiq/xway/Makefile
@@ -1 +1,3 @@
obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o
+
+obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 55d2c4fa471..6453962ac89 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -25,6 +25,7 @@
#include <lantiq_soc.h>
#include <xway_dma.h>
+#define LTQ_DMA_ID 0x08
#define LTQ_DMA_CTRL 0x10
#define LTQ_DMA_CPOLL 0x14
#define LTQ_DMA_CS 0x18
@@ -48,7 +49,7 @@
#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */
#define DMA_2W_BURST BIT(1) /* 2 word burst length */
#define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */
-#define DMA_ETOP_ENDIANESS (0xf << 8) /* endianess swap etop channels */
+#define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */
#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */
#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x))
@@ -191,10 +192,10 @@ ltq_dma_init_port(int p)
switch (p) {
case DMA_PORT_ETOP:
/*
- * Tell the DMA engine to swap the endianess of data frames and
+ * Tell the DMA engine to swap the endianness of data frames and
* drop packets if the channel arbitration fails.
*/
- ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN,
+ ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN,
LTQ_DMA_PCTRL);
break;
@@ -214,6 +215,7 @@ ltq_dma_init(struct platform_device *pdev)
{
struct clk *clk;
struct resource *res;
+ unsigned id;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -243,7 +245,12 @@ ltq_dma_init(struct platform_device *pdev)
ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
}
- dev_info(&pdev->dev, "init done\n");
+
+ id = ltq_dma_r32(LTQ_DMA_ID);
+ dev_info(&pdev->dev,
+ "Init done - hw rev: %X, ports: %d, channels: %d\n",
+ id & 0x1f, (id >> 16) & 0xf, id >> 20);
+
return 0;
}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 22c55f73aa9..544dbb7fb42 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -28,17 +28,24 @@
#define RCU_RST_REQ 0x0010
/* reset status register */
#define RCU_RST_STAT 0x0014
+/* vr9 gphy registers */
+#define RCU_GFS_ADD0_XRX200 0x0020
+#define RCU_GFS_ADD1_XRX200 0x0068
/* reboot bit */
+#define RCU_RD_GPHY0_XRX200 BIT(31)
#define RCU_RD_SRST BIT(30)
+#define RCU_RD_GPHY1_XRX200 BIT(29)
+
/* reset cause */
#define RCU_STAT_SHIFT 26
/* boot selection */
-#define RCU_BOOT_SEL_SHIFT 26
-#define RCU_BOOT_SEL_MASK 0x7
+#define RCU_BOOT_SEL(x) ((x >> 18) & 0x7)
+#define RCU_BOOT_SEL_XRX200(x) (((x >> 17) & 0xf) | ((x >> 8) & 0x10))
/* remapped base addr of the reset control unit */
static void __iomem *ltq_rcu_membase;
+static struct device_node *ltq_rcu_np;
/* This function is used by the watchdog driver */
int ltq_reset_cause(void)
@@ -52,7 +59,41 @@ EXPORT_SYMBOL_GPL(ltq_reset_cause);
unsigned char ltq_boot_select(void)
{
u32 val = ltq_rcu_r32(RCU_RST_STAT);
- return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK;
+
+ if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200"))
+ return RCU_BOOT_SEL_XRX200(val);
+
+ return RCU_BOOT_SEL(val);
+}
+
+/* reset / boot a gphy */
+static struct ltq_xrx200_gphy_reset {
+ u32 rd;
+ u32 addr;
+} xrx200_gphy[] = {
+ {RCU_RD_GPHY0_XRX200, RCU_GFS_ADD0_XRX200},
+ {RCU_RD_GPHY1_XRX200, RCU_GFS_ADD1_XRX200},
+};
+
+/* reset and boot a gphy. these phys only exist on xrx200 SoC */
+int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr)
+{
+ if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) {
+ dev_err(dev, "this SoC has no GPHY\n");
+ return -EINVAL;
+ }
+ if (id > 1) {
+ dev_err(dev, "%u is an invalid gphy id\n", id);
+ return -EINVAL;
+ }
+ dev_info(dev, "booting GPHY%u firmware at %X\n", id, dev_addr);
+
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | xrx200_gphy[id].rd,
+ RCU_RST_REQ);
+ ltq_rcu_w32(dev_addr, xrx200_gphy[id].addr);
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~xrx200_gphy[id].rd,
+ RCU_RST_REQ);
+ return 0;
}
/* reset a io domain for u micro seconds */
@@ -85,14 +126,17 @@ static void ltq_machine_power_off(void)
static int __init mips_reboot_setup(void)
{
struct resource res;
- struct device_node *np =
- of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
+
+ ltq_rcu_np = of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
+ if (!ltq_rcu_np)
+ ltq_rcu_np = of_find_compatible_node(NULL, NULL,
+ "lantiq,rcu-xrx200");
/* check if all the reset register range is available */
- if (!np)
+ if (!ltq_rcu_np)
panic("Failed to load reset resources from devicetree");
- if (of_address_to_resource(np, 0, &res))
+ if (of_address_to_resource(ltq_rcu_np, 0, &res))
panic("Failed to get rcu memory range");
if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 2917b56b6b2..3925e6609ac 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -370,6 +370,10 @@ void __init ltq_soc_init(void)
clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI);
clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL);
clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
+ clkdev_add_pmu("1e108000.eth", NULL, 0,
+ PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_PPE_QSB | PMU_PPE_TOP);
} else if (of_machine_is_compatible("lantiq,ar9")) {
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
ltq_ar9_fpi_hz());
diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c
new file mode 100644
index 00000000000..fe808bf5366
--- /dev/null
+++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c
@@ -0,0 +1,97 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+static dma_addr_t xway_gphy_load(struct platform_device *pdev)
+{
+ const struct firmware *fw;
+ dma_addr_t dev_addr = 0;
+ const char *fw_name;
+ void *fw_addr;
+ size_t size;
+
+ if (of_property_read_string(pdev->dev.of_node, "firmware", &fw_name)) {
+ dev_err(&pdev->dev, "failed to load firmware filename\n");
+ return 0;
+ }
+
+ dev_info(&pdev->dev, "requesting %s\n", fw_name);
+ if (request_firmware(&fw, fw_name, &pdev->dev)) {
+ dev_err(&pdev->dev, "failed to load firmware: %s\n", fw_name);
+ return 0;
+ }
+
+ /*
+ * GPHY cores need the firmware code in a persistent and contiguous
+ * memory area with a 16 kB boundary aligned start address
+ */
+ size = fw->size + XRX200_GPHY_FW_ALIGN;
+
+ fw_addr = dma_alloc_coherent(&pdev->dev, size, &dev_addr, GFP_KERNEL);
+ if (fw_addr) {
+ fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
+ dev_addr = ALIGN(dev_addr, XRX200_GPHY_FW_ALIGN);
+ memcpy(fw_addr, fw->data, fw->size);
+ } else {
+ dev_err(&pdev->dev, "failed to alloc firmware memory\n");
+ }
+
+ release_firmware(fw);
+ return dev_addr;
+}
+
+static int __devinit xway_phy_fw_probe(struct platform_device *pdev)
+{
+ dma_addr_t fw_addr;
+ struct property *pp;
+ unsigned char *phyids;
+ int i, ret = 0;
+
+ fw_addr = xway_gphy_load(pdev);
+ if (!fw_addr)
+ return -EINVAL;
+ pp = of_find_property(pdev->dev.of_node, "phys", NULL);
+ if (!pp)
+ return -ENOENT;
+ phyids = pp->value;
+ for (i = 0; i < pp->length && !ret; i++)
+ ret = xrx200_gphy_boot(&pdev->dev, phyids[i], fw_addr);
+ if (!ret)
+ mdelay(100);
+ return ret;
+}
+
+static const struct of_device_id xway_phy_match[] = {
+ { .compatible = "lantiq,phy-xrx200" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_phy_match);
+
+static struct platform_driver xway_phy_driver = {
+ .probe = xway_phy_fw_probe,
+ .driver = {
+ .name = "phy-xrx200",
+ .owner = THIS_MODULE,
+ .of_match_table = xway_phy_match,
+ },
+};
+
+module_platform_driver(xway_phy_driver);
+
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Lantiq XRX200 PHY Firmware Loader");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig
index a9a14d6e81a..fbf75f63579 100644
--- a/arch/mips/loongson1/Kconfig
+++ b/arch/mips/loongson1/Kconfig
@@ -15,7 +15,7 @@ config LOONGSON1_LS1B
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_HAS_EARLY_PRINTK
- select HAVE_CLK
+ select COMMON_CLK
endchoice
diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c
index 1bbbbec1208..07133defa14 100644
--- a/arch/mips/loongson1/common/clock.c
+++ b/arch/mips/loongson1/common/clock.c
@@ -7,175 +7,22 @@
* option) any later version.
*/
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <asm/clock.h>
#include <asm/time.h>
-
-#include <loongson1.h>
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-struct clk *clk_get(struct device *dev, const char *name)
-{
- struct clk *c;
- struct clk *ret = NULL;
-
- mutex_lock(&clocks_mutex);
- list_for_each_entry(c, &clocks, node) {
- if (!strcmp(c->name, name)) {
- ret = c;
- break;
- }
- }
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_get);
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
-static void pll_clk_init(struct clk *clk)
-{
- u32 pll;
-
- pll = __raw_readl(LS1X_CLK_PLL_FREQ);
- clk->rate = (12 + (pll & 0x3f)) * 33 / 2
- + ((pll >> 8) & 0x3ff) * 33 / 1024 / 2;
- clk->rate *= 1000000;
-}
-
-static void cpu_clk_init(struct clk *clk)
-{
- u32 pll, ctrl;
-
- pll = clk_get_rate(clk->parent);
- ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_CPU;
- clk->rate = pll / (ctrl >> DIV_CPU_SHIFT);
-}
-
-static void ddr_clk_init(struct clk *clk)
-{
- u32 pll, ctrl;
-
- pll = clk_get_rate(clk->parent);
- ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DDR;
- clk->rate = pll / (ctrl >> DIV_DDR_SHIFT);
-}
-
-static void dc_clk_init(struct clk *clk)
-{
- u32 pll, ctrl;
-
- pll = clk_get_rate(clk->parent);
- ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DC;
- clk->rate = pll / (ctrl >> DIV_DC_SHIFT);
-}
-
-static struct clk_ops pll_clk_ops = {
- .init = pll_clk_init,
-};
-
-static struct clk_ops cpu_clk_ops = {
- .init = cpu_clk_init,
-};
-
-static struct clk_ops ddr_clk_ops = {
- .init = ddr_clk_init,
-};
-
-static struct clk_ops dc_clk_ops = {
- .init = dc_clk_init,
-};
-
-static struct clk pll_clk = {
- .name = "pll",
- .ops = &pll_clk_ops,
-};
-
-static struct clk cpu_clk = {
- .name = "cpu",
- .parent = &pll_clk,
- .ops = &cpu_clk_ops,
-};
-
-static struct clk ddr_clk = {
- .name = "ddr",
- .parent = &pll_clk,
- .ops = &ddr_clk_ops,
-};
-
-static struct clk dc_clk = {
- .name = "dc",
- .parent = &pll_clk,
- .ops = &dc_clk_ops,
-};
-
-int clk_register(struct clk *clk)
-{
- mutex_lock(&clocks_mutex);
- list_add(&clk->node, &clocks);
- if (clk->ops->init)
- clk->ops->init(clk);
- mutex_unlock(&clocks_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(clk_register);
-
-static struct clk *ls1x_clks[] = {
- &pll_clk,
- &cpu_clk,
- &ddr_clk,
- &dc_clk,
-};
-
-int __init ls1x_clock_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ls1x_clks); i++)
- clk_register(ls1x_clks[i]);
-
- return 0;
-}
+#include <platform.h>
void __init plat_time_init(void)
{
struct clk *clk;
/* Initialize LS1X clocks */
- ls1x_clock_init();
+ ls1x_clk_init();
/* setup mips r4k timer */
clk = clk_get(NULL, "cpu");
if (IS_ERR(clk))
- panic("unable to get dc clock, err=%ld", PTR_ERR(clk));
+ panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
mips_hpt_frequency = clk_get_rate(clk) / 2;
}
diff --git a/arch/mips/loongson1/common/platform.c b/arch/mips/loongson1/common/platform.c
index 0412ad61e29..69dad4cfaaf 100644
--- a/arch/mips/loongson1/common/platform.c
+++ b/arch/mips/loongson1/common/platform.c
@@ -43,16 +43,17 @@ struct platform_device ls1x_uart_device = {
},
};
-void __init ls1x_serial_setup(void)
+void __init ls1x_serial_setup(struct platform_device *pdev)
{
struct clk *clk;
struct plat_serial8250_port *p;
- clk = clk_get(NULL, "dc");
+ clk = clk_get(NULL, pdev->name);
if (IS_ERR(clk))
- panic("unable to get dc clock, err=%ld", PTR_ERR(clk));
+ panic("unable to get %s clock, err=%ld",
+ pdev->name, PTR_ERR(clk));
- for (p = ls1x_serial8250_port; p->flags != 0; ++p)
+ for (p = pdev->dev.platform_data; p->flags != 0; ++p)
p->uartclk = clk_get_rate(clk);
}
@@ -71,7 +72,6 @@ static struct resource ls1x_eth0_resources[] = {
};
static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
- .bus_id = 0,
.phy_mask = 0,
};
diff --git a/arch/mips/loongson1/ls1b/board.c b/arch/mips/loongson1/ls1b/board.c
index 295b1be893e..1fbd5264f66 100644
--- a/arch/mips/loongson1/ls1b/board.c
+++ b/arch/mips/loongson1/ls1b/board.c
@@ -9,9 +9,6 @@
#include <platform.h>
-#include <linux/serial_8250.h>
-#include <loongson1.h>
-
static struct platform_device *ls1b_platform_devices[] __initdata = {
&ls1x_uart_device,
&ls1x_eth0_device,
@@ -23,7 +20,7 @@ static int __init ls1b_platform_init(void)
{
int err;
- ls1x_serial_setup();
+ ls1x_serial_setup(&ls1x_uart_device);
err = platform_add_devices(ls1b_platform_devices,
ARRAY_SIZE(ls1b_platform_devices));
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index a03bf00a1a9..47c77e7ffbf 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -171,16 +171,17 @@ static int isBranchInstr(mips_instruction * i)
* In the Linux kernel, we support selection of FPR format on the
* basis of the Status.FR bit. If an FPU is not present, the FR bit
* is hardwired to zero, which would imply a 32-bit FPU even for
- * 64-bit CPUs. For 64-bit kernels with no FPU we use TIF_32BIT_REGS
- * as a proxy for the FR bit so that a 64-bit FPU is emulated. In any
- * case, for a 32-bit kernel which uses the O32 MIPS ABI, only the
- * even FPRs are used (Status.FR = 0).
+ * 64-bit CPUs so we rather look at TIF_32BIT_REGS.
+ * FPU emu is slow and bulky and optimizing this function offers fairly
+ * sizeable benefits so we try to be clever and make this function return
+ * a constant whenever possible, that is on 64-bit kernels without O32
+ * compatibility enabled and on 32-bit kernels.
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
- if (cpu_has_fpu)
- return xcp->cp0_status & ST0_FR;
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && !defined(CONFIG_MIPS32_O32)
+ return 1;
+#elif defined(CONFIG_64BIT) && defined(CONFIG_MIPS32_O32)
return !test_thread_flag(TIF_32BIT_REGS);
#else
return 0;
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 44e69e7a451..6ec04daf423 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2005-2007 Cavium Networks
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -28,6 +29,7 @@
#include <asm/octeon/octeon.h>
unsigned long long cache_err_dcache[NR_CPUS];
+EXPORT_SYMBOL_GPL(cache_err_dcache);
/**
* Octeon automatically flushes the dcache on tlb changes, so
@@ -284,39 +286,59 @@ void __cpuinit octeon_cache_init(void)
board_cache_error_setup = octeon_cache_error_setup;
}
-/**
+/*
* Handle a cache error exception
*/
+static RAW_NOTIFIER_HEAD(co_cache_error_chain);
-static void cache_parity_error_octeon(int non_recoverable)
+int register_co_cache_error_notifier(struct notifier_block *nb)
{
- unsigned long coreid = cvmx_get_core_num();
- uint64_t icache_err = read_octeon_c0_icacheerr();
-
- pr_err("Cache error exception:\n");
- pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
- if (icache_err & 1) {
- pr_err("CacheErr (Icache) == %llx\n",
- (unsigned long long)icache_err);
- write_octeon_c0_icacheerr(0);
- }
- if (cache_err_dcache[coreid] & 1) {
- pr_err("CacheErr (Dcache) == %llx\n",
- (unsigned long long)cache_err_dcache[coreid]);
- cache_err_dcache[coreid] = 0;
- }
+ return raw_notifier_chain_register(&co_cache_error_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
+
+int unregister_co_cache_error_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
- if (non_recoverable)
- panic("Can't handle cache error: nested exception");
+static void co_cache_error_call_notifiers(unsigned long val)
+{
+ int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
+ if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
+ u64 dcache_err;
+ unsigned long coreid = cvmx_get_core_num();
+ u64 icache_err = read_octeon_c0_icacheerr();
+
+ if (val) {
+ dcache_err = cache_err_dcache[coreid];
+ cache_err_dcache[coreid] = 0;
+ } else {
+ dcache_err = read_octeon_c0_dcacheerr();
+ }
+
+ pr_err("Core%lu: Cache error exception:\n", coreid);
+ pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
+ if (icache_err & 1) {
+ pr_err("CacheErr (Icache) == %llx\n",
+ (unsigned long long)icache_err);
+ write_octeon_c0_icacheerr(0);
+ }
+ if (dcache_err & 1) {
+ pr_err("CacheErr (Dcache) == %llx\n",
+ (unsigned long long)dcache_err);
+ }
+ }
}
-/**
+/*
* Called when the the exception is recoverable
*/
asmlinkage void cache_parity_error_octeon_recoverable(void)
{
- cache_parity_error_octeon(0);
+ co_cache_error_call_notifiers(0);
}
/**
@@ -325,5 +347,6 @@ asmlinkage void cache_parity_error_octeon_recoverable(void)
asmlinkage void cache_parity_error_octeon_non_recoverable(void)
{
- cache_parity_error_octeon(1);
+ co_cache_error_call_notifiers(1);
+ panic("Can't handle cache error: nested exception");
}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4c32ede464b..0f7d788e881 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -632,9 +632,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (size >= scache_size)
r4k_blast_scache();
else {
- unsigned long lsize = cpu_scache_line_size();
- unsigned long almask = ~(lsize - 1);
-
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
@@ -643,9 +640,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
* hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
- cache_op(Hit_Writeback_Inv_SD, addr & almask);
- cache_op(Hit_Writeback_Inv_SD,
- (addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}
__sync();
@@ -655,12 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (cpu_has_safe_index_cacheops && size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long lsize = cpu_dcache_line_size();
- unsigned long almask = ~(lsize - 1);
-
R4600_HIT_CACHEOP_WAR_IMPL;
- cache_op(Hit_Writeback_Inv_D, addr & almask);
- cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
blast_inv_dcache_range(addr, addr + size);
}
@@ -947,7 +936,6 @@ static void __cpuinit probe_pcache(void)
case CPU_RM7000:
rm7k_erratum31();
- case CPU_RM9000:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 4;
@@ -958,9 +946,7 @@ static void __cpuinit probe_pcache(void)
c->dcache.ways = 4;
c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
-#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
c->options |= MIPS_CPU_CACHE_CDEX_P;
-#endif
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -1245,7 +1231,6 @@ static void __cpuinit setup_scache(void)
return;
case CPU_RM7000:
- case CPU_RM9000:
#ifdef CONFIG_RM7000_CPU_SCACHE
rm7k_sc_init();
#endif
@@ -1348,10 +1333,10 @@ static int __init cca_setup(char *str)
{
get_option(&str, &cca);
- return 1;
+ return 0;
}
-__setup("cca=", cca_setup);
+early_param("cca", cca_setup);
static void __cpuinit coherency_setup(void)
{
@@ -1401,10 +1386,10 @@ static int __init setcoherentio(char *str)
{
coherentio = 1;
- return 1;
+ return 0;
}
-__setup("coherentio", setcoherentio);
+early_param("coherentio", setcoherentio);
#endif
static void __cpuinit r4k_cache_error_setup(void)
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index aff57057a94..da815d29523 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,3 +1,4 @@
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
@@ -67,7 +68,7 @@ EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
+ int type __maybe_unused;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 98f530e1821..8e666c55f4d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -140,15 +140,6 @@ static void __cpuinit set_prefetch_parameters(void)
pref_bias_copy_load = 256;
break;
- case CPU_RM9000:
- /*
- * As a workaround for erratum G105 which make the
- * PrepareForStore hint unusable we fall back to
- * StoreRetained on the RM9000. Once it is known which
- * versions of the RM9000 we'll be able to condition-
- * alize this.
- */
-
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 25407794edb..ee331bbd8f8 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -11,6 +11,7 @@
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
void pgd_init(unsigned long page)
{
@@ -61,6 +62,36 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
}
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ if (!pmd_trans_splitting(*pmdp)) {
+ pmd_t pmd = pmd_mksplitting(*pmdp);
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ }
+}
+
+#endif
+
+pmd_t mk_pmd(struct page *page, pgprot_t prot)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+
+ return pmd;
+}
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+ flush_tlb_all();
+}
+
void __init pagetable_init(void)
{
unsigned long vaddr;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 88e79ad6f81..2a7c9725b2a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -295,7 +295,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
pudp = pud_offset(pgdp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
if (pmd_huge(*pmdp)) {
unsigned long lo;
@@ -367,6 +367,26 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
EXIT_CRITICAL(flags);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+int __init has_transparent_hugepage(void)
+{
+ unsigned int mask;
+ unsigned long flags;
+
+ ENTER_CRITICAL(flags);
+ write_c0_pagemask(PM_HUGE_MASK);
+ back_to_back_c0_hazard();
+ mask = read_c0_pagemask();
+ write_c0_pagemask(PM_DEFAULT_MASK);
+
+ EXIT_CRITICAL(flags);
+
+ return mask == PM_HUGE_MASK;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
static int __cpuinitdata ntlb;
static int __init set_ntlb(char *str)
{
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 2833dcb67b5..05613355627 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -158,7 +158,7 @@ enum label_id {
label_smp_pgtable_change,
label_r3000_write_probe_fail,
label_large_segbits_fault,
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
label_tlb_huge_update,
#endif
};
@@ -177,13 +177,15 @@ UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
UASM_L_LA(_large_segbits_fault)
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
UASM_L_LA(_tlb_huge_update)
#endif
static int __cpuinitdata hazard_instance;
-static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
+static void __cpuinit uasm_bgezl_hazard(u32 **p,
+ struct uasm_reloc **r,
+ int instance)
{
switch (instance) {
case 0 ... 7:
@@ -194,7 +196,9 @@ static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
}
}
-static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
+static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
+ u32 **p,
+ int instance)
{
switch (instance) {
case 0 ... 7:
@@ -206,19 +210,59 @@ static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
}
/*
- * For debug purposes.
+ * pgtable bits are assigned dynamically depending on processor feature
+ * and statically based on kernel configuration. This spits out the actual
+ * values the kernel is using. Required to make sense from disassembled
+ * TLB exception handlers.
*/
-static inline void dump_handler(const u32 *handler, int count)
+static void output_pgtable_bits_defines(void)
+{
+#define pr_define(fmt, ...) \
+ pr_debug("#define " fmt, ##__VA_ARGS__)
+
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+
+ pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
+ pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
+ pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+ pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
+ pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
+#endif
+ if (cpu_has_rixi) {
+#ifdef _PAGE_NO_EXEC_SHIFT
+ pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+#endif
+#ifdef _PAGE_NO_READ_SHIFT
+ pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
+#endif
+ }
+ pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
+ pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
+ pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
+ pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+ pr_debug("\n");
+}
+
+static inline void dump_handler(const char *symbol, const u32 *handler, int count)
{
int i;
+ pr_debug("LEAF(%s)\n", symbol);
+
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
for (i = 0; i < count; i++)
- pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
+ pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
+
+ pr_debug("\t.set\tpop\n");
- pr_debug("\t.set pop\n");
+ pr_debug("\tEND(%s)\n", symbol);
}
/* The only general purpose registers allowed in TLB handlers. */
@@ -401,7 +445,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
memcpy((void *)ebase, tlb_handler, 0x80);
- dump_handler((u32 *)ebase, 32);
+ dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@@ -443,7 +487,6 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
case CPU_R4600:
case CPU_R4700:
case CPU_R5000:
- case CPU_R5000A:
case CPU_NEVADA:
uasm_i_nop(p);
uasm_i_tlbp(p);
@@ -517,7 +560,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
break;
case CPU_R5000:
- case CPU_R5000A:
case CPU_NEVADA:
uasm_i_nop(p); /* QED specifies 2 nops hazard */
uasm_i_nop(p); /* QED specifies 2 nops hazard */
@@ -565,24 +607,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
- case CPU_RM9000:
- /*
- * When the JTLB is updated by tlbwi or tlbwr, a subsequent
- * use of the JTLB for instructions should not occur for 4
- * cpu cycles and use for data translations should not occur
- * for 3 cpu cycles.
- */
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- tlbw(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- break;
-
case CPU_VR4111:
case CPU_VR4121:
case CPU_VR4122:
@@ -629,7 +653,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
}
}
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
static __cpuinit void build_restore_pagemask(u32 **p,
struct uasm_reloc **r,
@@ -755,7 +779,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#ifdef CONFIG_64BIT
/*
@@ -1200,7 +1224,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
/* Adjust the context during the load latency. */
build_adjust_context(p, tmp);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
@@ -1209,7 +1233,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
*/
if (use_lwx_insns())
uasm_i_nop(p);
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
/* build_update_entries */
@@ -1312,7 +1336,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
#endif
@@ -1322,7 +1346,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
}
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
@@ -1367,7 +1391,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
} else {
-#if defined(CONFIG_HUGETLB_PAGE)
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
const enum label_id ls = label_tlb_huge_update;
#else
const enum label_id ls = label_vmalloc;
@@ -1436,7 +1460,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
memcpy((void *)ebase, final_handler, 0x100);
- dump_handler((u32 *)ebase, 64);
+ dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
}
/*
@@ -1493,7 +1517,8 @@ static void __cpuinit build_r4000_setup_pgd(void)
pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
(unsigned int)(p - tlbmiss_handler_setup_pgd));
- dump_handler(tlbmiss_handler_setup_pgd,
+ dump_handler("tlbmiss_handler",
+ tlbmiss_handler_setup_pgd,
ARRAY_SIZE(tlbmiss_handler_setup_pgd));
}
#endif
@@ -1763,7 +1788,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl));
- dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+ dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
}
static void __cpuinit build_r3000_tlb_store_handler(void)
@@ -1793,7 +1818,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs));
- dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+ dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
}
static void __cpuinit build_r3000_tlb_modify_handler(void)
@@ -1823,7 +1848,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm));
- dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+ dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@@ -1842,7 +1867,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
#endif
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* For huge tlb entries, pmd doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
@@ -1958,7 +1983,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
build_make_valid(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when build_r4000_tlbchange_handler_head
* spots a huge page.
@@ -2030,7 +2055,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl));
- dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+ dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
}
static void __cpuinit build_r4000_tlb_store_handler(void)
@@ -2051,7 +2076,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
@@ -2077,7 +2102,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs));
- dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+ dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
}
static void __cpuinit build_r4000_tlb_modify_handler(void)
@@ -2099,7 +2124,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
@@ -2125,7 +2150,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm));
- dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+ dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
}
void __cpuinit build_tlb_refill_handler(void)
@@ -2137,6 +2162,8 @@ void __cpuinit build_tlb_refill_handler(void)
*/
static int run_once = 0;
+ output_pgtable_bits_defines();
+
#ifdef CONFIG_64BIT
check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
#endif
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 8059eb76f8e..3c05bf9e280 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -9,6 +9,34 @@ config DT_XLP_EVP
This DTB will be used if the firmware does not pass in a DTB
pointer to the kernel. The corresponding DTS file is at
arch/mips/netlogic/dts/xlp_evp.dts
+
+config NLM_MULTINODE
+ bool "Support for multi-chip boards"
+ depends on NLM_XLP_BOARD
+ default n
+ help
+ Add support for boards with 2 or 4 XLPs connected over ICI.
+
+if NLM_MULTINODE
+choice
+ prompt "Number of XLPs on the board"
+ default NLM_MULTINODE_2
+ help
+ In the multi-node case, specify the number of SoCs on the board.
+
+config NLM_MULTINODE_2
+ bool "Dual-XLP board"
+ help
+ Support boards with upto two XLPs connected over ICI.
+
+config NLM_MULTINODE_4
+ bool "Quad-XLP board"
+ help
+ Support boards with upto four XLPs connected over ICI.
+
+endchoice
+
+endif
endif
config NLM_COMMON
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index e52bfcbce09..00dcc7a2bc5 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -36,7 +36,6 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/irq.h>
@@ -59,68 +58,70 @@
#elif defined(CONFIG_CPU_XLR)
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/fmn.h>
#else
#error "Unknown CPU"
#endif
-/*
- * These are the routines that handle all the low level interrupt stuff.
- * Actions handled here are: initialization of the interrupt map, requesting of
- * interrupt lines by handlers, dispatching if interrupts to handlers, probing
- * for interrupt lines
- */
-/* Globals */
-static uint64_t nlm_irq_mask;
-static DEFINE_SPINLOCK(nlm_pic_lock);
+#ifdef CONFIG_SMP
+#define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \
+ (1ULL << IRQ_IPI_SMP_RESCHEDULE))
+#else
+#define SMP_IRQ_MASK 0
+#endif
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+ (1ull << IRQ_FMN))
+
+struct nlm_pic_irq {
+ void (*extra_ack)(struct irq_data *);
+ struct nlm_soc_info *node;
+ int picirq;
+ int irt;
+ int flags;
+};
static void xlp_pic_enable(struct irq_data *d)
{
unsigned long flags;
- int irt;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
- return;
- spin_lock_irqsave(&nlm_pic_lock, flags);
- nlm_pic_enable_irt(nlm_pic_base, irt);
- spin_unlock_irqrestore(&nlm_pic_lock, flags);
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_enable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_disable(struct irq_data *d)
{
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
unsigned long flags;
- int irt;
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
- return;
- spin_lock_irqsave(&nlm_pic_lock, flags);
- nlm_pic_disable_irt(nlm_pic_base, irt);
- spin_unlock_irqrestore(&nlm_pic_lock, flags);
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_disable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_mask_ack(struct irq_data *d)
{
- uint64_t mask = 1ull << d->irq;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+ uint64_t mask = 1ull << pd->picirq;
write_c0_eirr(mask); /* ack by writing EIRR */
}
static void xlp_pic_unmask(struct irq_data *d)
{
- void *hd = irq_data_get_irq_handler_data(d);
- int irt;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
+ if (!pd)
return;
- if (hd) {
- void (*extra_ack)(void *) = hd;
- extra_ack(d);
- }
+ if (pd->extra_ack)
+ pd->extra_ack(d);
+
/* Ack is a single write, no need to lock */
- nlm_pic_ack(nlm_pic_base, irt);
+ nlm_pic_ack(pd->node->picbase, pd->irt);
}
static struct irq_chip xlp_pic = {
@@ -174,64 +175,108 @@ struct irq_chip nlm_cpu_intr = {
.irq_eoi = cpuintr_ack,
};
-void __init init_nlm_common_irqs(void)
+static void __init nlm_init_percpu_irqs(void)
{
- int i, irq, irt;
+ int i;
for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
-
- for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ ; i++)
- irq_set_chip_and_handler(i, &xlp_pic, handle_level_irq);
-
#ifdef CONFIG_SMP
irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
nlm_smp_function_ipi_handler);
irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
nlm_smp_resched_ipi_handler);
- nlm_irq_mask |=
- ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
#endif
+}
+
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
+ BUG_ON(pic_data == NULL);
+ pic_data->irt = irt;
+ pic_data->picirq = picirq;
+ pic_data->node = nlm_get_node(node);
+ irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
+ irq_set_handler_data(xirq, pic_data);
+}
+
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = irq_get_handler_data(xirq);
+ pic_data->extra_ack = xack;
+}
- for (irq = PIC_IRT_FIRST_IRQ; irq <= PIC_IRT_LAST_IRQ; irq++) {
- irt = nlm_irq_to_irt(irq);
+static void nlm_init_node_irqs(int node)
+{
+ int i, irt;
+ uint64_t irqmask;
+ struct nlm_soc_info *nodep;
+
+ pr_info("Init IRQ for node %d\n", node);
+ nodep = nlm_get_node(node);
+ irqmask = PERCPU_IRQ_MASK;
+ for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
+ irt = nlm_irq_to_irt(i);
if (irt == -1)
continue;
- nlm_irq_mask |= (1ULL << irq);
- nlm_pic_init_irt(nlm_pic_base, irt, irq, 0);
+ nlm_setup_pic_irq(node, i, i, irt);
+ /* set interrupts to first cpu in node */
+ nlm_pic_init_irt(nodep->picbase, irt, i,
+ node * NLM_CPUS_PER_NODE);
+ irqmask |= (1ull << i);
}
-
- nlm_irq_mask |= (1ULL << IRQ_TIMER);
+ nodep->irqmask = irqmask;
}
void __init arch_init_irq(void)
{
/* Initialize the irq descriptors */
- init_nlm_common_irqs();
-
- write_c0_eimr(nlm_irq_mask);
+ nlm_init_percpu_irqs();
+ nlm_init_node_irqs(0);
+ write_c0_eimr(nlm_current_node()->irqmask);
+#if defined(CONFIG_CPU_XLR)
+ nlm_setup_fmn_irq();
+#endif
}
-void __cpuinit nlm_smp_irq_init(void)
+void nlm_smp_irq_init(int hwcpuid)
{
- /* set interrupt mask for non-zero cpus */
- write_c0_eimr(nlm_irq_mask);
+ int node, cpu;
+
+ node = hwcpuid / NLM_CPUS_PER_NODE;
+ cpu = hwcpuid % NLM_CPUS_PER_NODE;
+
+ if (cpu == 0 && node != 0)
+ nlm_init_node_irqs(node);
+ write_c0_eimr(nlm_current_node()->irqmask);
}
asmlinkage void plat_irq_dispatch(void)
{
uint64_t eirr;
- int i;
+ int i, node;
+ node = nlm_nodeid();
eirr = read_c0_eirr() & read_c0_eimr();
- if (eirr & (1 << IRQ_TIMER)) {
- do_IRQ(IRQ_TIMER);
- return;
- }
i = __ilog2_u64(eirr);
if (i == -1)
return;
- do_IRQ(i);
+ /* per-CPU IRQs don't need translation */
+ if (eirr & PERCPU_IRQ_MASK) {
+ do_IRQ(i);
+ return;
+ }
+
+ /* top level irq handling */
+ do_IRQ(nlm_irq_to_xirq(node, i));
}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index fab316de57e..a080d9ee3cd 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,12 +59,17 @@
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
{
- int cpu = cpu_logical_map(logical_cpu);
+ int cpu, node;
+ uint64_t picbase;
+
+ cpu = cpu_logical_map(logical_cpu);
+ node = cpu / NLM_CPUS_PER_NODE;
+ picbase = nlm_get_node(node)->picbase;
if (action & SMP_CALL_FUNCTION)
- nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0);
+ nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
if (action & SMP_RESCHEDULE_YOURSELF)
- nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
+ nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
}
void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -96,11 +101,12 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
void nlm_early_init_secondary(int cpu)
{
change_c0_config(CONF_CM_CMASK, 0x3);
- write_c0_ebase((uint32_t)nlm_common_ebase);
#ifdef CONFIG_CPU_XLP
- if (hard_smp_processor_id() % 4 == 0)
+ /* mmu init, once per core */
+ if (cpu % NLM_THREADS_PER_CORE == 0)
xlp_mmu_init();
#endif
+ write_c0_ebase(nlm_current_node()->ebase);
}
/*
@@ -108,8 +114,12 @@ void nlm_early_init_secondary(int cpu)
*/
static void __cpuinit nlm_init_secondary(void)
{
- current_cpu_data.core = hard_smp_processor_id() / 4;
- nlm_smp_irq_init();
+ int hwtid;
+
+ hwtid = hard_smp_processor_id();
+ current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
+ nlm_percpu_init(hwtid);
+ nlm_smp_irq_init(hwtid);
}
void nlm_prepare_cpus(unsigned int max_cpus)
@@ -120,9 +130,6 @@ void nlm_prepare_cpus(unsigned int max_cpus)
void nlm_smp_finish(void)
{
-#ifdef notyet
- nlm_common_msgring_cpu_init();
-#endif
local_irq_enable();
}
@@ -142,27 +149,27 @@ cpumask_t phys_cpu_present_map;
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
{
- unsigned long gp = (unsigned long)task_thread_info(idle);
- unsigned long sp = (unsigned long)__KSTK_TOS(idle);
- int cpu = cpu_logical_map(logical_cpu);
+ int cpu, node;
- nlm_next_sp = sp;
- nlm_next_gp = gp;
+ cpu = cpu_logical_map(logical_cpu);
+ node = cpu / NLM_CPUS_PER_NODE;
+ nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
+ nlm_next_gp = (unsigned long)task_thread_info(idle);
- /* barrier */
+ /* barrier for sp/gp store above */
__sync();
- nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1);
+ nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
}
void __init nlm_smp_setup(void)
{
unsigned int boot_cpu;
- int num_cpus, i;
+ int num_cpus, i, ncore;
boot_cpu = hard_smp_processor_id();
- cpus_clear(phys_cpu_present_map);
+ cpumask_clear(&phys_cpu_present_map);
- cpu_set(boot_cpu, phys_cpu_present_map);
+ cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
set_cpu_possible(0, true);
@@ -174,7 +181,7 @@ void __init nlm_smp_setup(void)
* it is only set for ASPs (see smpboot.S)
*/
if (nlm_cpu_ready[i]) {
- cpu_set(i, phys_cpu_present_map);
+ cpumask_set_cpu(i, &phys_cpu_present_map);
__cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i;
set_cpu_possible(num_cpus, true);
@@ -182,20 +189,28 @@ void __init nlm_smp_setup(void)
}
}
+ /* check with the cores we have worken up */
+ for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
+ ncore += hweight32(nlm_get_node(i)->coremask);
+
pr_info("Phys CPU present map: %lx, possible map %lx\n",
- (unsigned long)phys_cpu_present_map.bits[0],
+ (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
(unsigned long)cpumask_bits(cpu_possible_mask)[0]);
- pr_info("Detected %i Slave CPU(s)\n", num_cpus);
+ pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
+ nlm_threads_per_core, num_cpus);
nlm_set_nmi_handler(nlm_boot_secondary_cpus);
}
-static int nlm_parse_cpumask(u32 cpu_mask)
+static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
{
uint32_t core0_thr_mask, core_thr_mask;
- int threadmode, i;
+ int threadmode, i, j;
- core0_thr_mask = cpu_mask & 0xf;
+ core0_thr_mask = 0;
+ for (i = 0; i < NLM_THREADS_PER_CORE; i++)
+ if (cpumask_test_cpu(i, wakeup_mask))
+ core0_thr_mask |= (1 << i);
switch (core0_thr_mask) {
case 1:
nlm_threads_per_core = 1;
@@ -214,25 +229,23 @@ static int nlm_parse_cpumask(u32 cpu_mask)
}
/* Verify other cores CPU masks */
- nlm_coremask = 1;
- nlm_cpumask = core0_thr_mask;
- for (i = 1; i < 8; i++) {
- core_thr_mask = (cpu_mask >> (i * 4)) & 0xf;
- if (core_thr_mask) {
- if (core_thr_mask != core0_thr_mask)
+ for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
+ core_thr_mask = 0;
+ for (j = 0; j < NLM_THREADS_PER_CORE; j++)
+ if (cpumask_test_cpu(i + j, wakeup_mask))
+ core_thr_mask |= (1 << j);
+ if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
goto unsupp;
- nlm_coremask |= 1 << i;
- nlm_cpumask |= core0_thr_mask << (4 * i);
- }
}
return threadmode;
unsupp:
- panic("Unsupported CPU mask %x\n", cpu_mask);
+ panic("Unsupported CPU mask %lx\n",
+ (unsigned long)cpumask_bits(wakeup_mask)[0]);
return 0;
}
-int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
+int __cpuinit nlm_wakeup_secondary_cpus(void)
{
unsigned long reset_vec;
char *reset_data;
@@ -244,7 +257,7 @@ int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
(nlm_reset_entry_end - nlm_reset_entry));
/* verify the mask and setup core config variables */
- threadmode = nlm_parse_cpumask(wakeup_mask);
+ threadmode = nlm_parse_cpumask(&nlm_cpumask);
/* Setup CPU init parameters */
reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index a13355cc97e..a0b74874beb 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -61,7 +61,7 @@
li t0, LSU_DEFEATURE
mfcr t1, t0
- lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
+ lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
or t1, t1, t2
#ifdef XLP_AX_WORKAROUND
li t2, ~0xe /* S1RCM */
@@ -186,7 +186,7 @@ EXPORT(nlm_boot_siblings)
* jump to the secondary wait function.
*/
mfc0 v0, CP0_EBASE, 1
- andi v0, 0x7f /* v0 <- node/core */
+ andi v0, 0x3ff /* v0 <- node/core */
/* Init MMU in the first thread after changing THREAD_MODE
* register (Ax Errata?)
@@ -263,6 +263,8 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
PTR_L gp, 0(t1)
/* a0 has the processor id */
+ mfc0 a0, CP0_EBASE, 1
+ andi a0, 0x3ff /* a0 <- node/core */
PTR_LA t0, nlm_early_init_secondary
jalr t0
nop
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 6c65ac70191..529e74742d9 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -40,23 +40,23 @@
#include <asm/mipsregs.h>
#include <asm/time.h>
+#include <asm/netlogic/common.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/pic.h>
#include <asm/netlogic/xlp-hal/sys.h>
-/* These addresses are computed by the nlm_hal_init() */
-uint64_t nlm_io_base;
-uint64_t nlm_sys_base;
-uint64_t nlm_pic_base;
-
/* Main initialization */
-void nlm_hal_init(void)
+void nlm_node_init(int node)
{
- nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
- nlm_sys_base = nlm_get_sys_regbase(0); /* node 0 */
- nlm_pic_base = nlm_get_pic_regbase(0); /* node 0 */
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_get_node(node);
+ nodep->sysbase = nlm_get_sys_regbase(node);
+ nodep->picbase = nlm_get_pic_regbase(node);
+ nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+ spin_lock_init(&nodep->piclock);
}
int nlm_irq_to_irt(int irq)
@@ -100,52 +100,15 @@ int nlm_irq_to_irt(int irq)
}
}
-int nlm_irt_to_irq(int irt)
-{
- switch (irt) {
- case PIC_IRT_UART_0_INDEX:
- return PIC_UART_0_IRQ;
- case PIC_IRT_UART_1_INDEX:
- return PIC_UART_1_IRQ;
- case PIC_IRT_PCIE_LINK_0_INDEX:
- return PIC_PCIE_LINK_0_IRQ;
- case PIC_IRT_PCIE_LINK_1_INDEX:
- return PIC_PCIE_LINK_1_IRQ;
- case PIC_IRT_PCIE_LINK_2_INDEX:
- return PIC_PCIE_LINK_2_IRQ;
- case PIC_IRT_PCIE_LINK_3_INDEX:
- return PIC_PCIE_LINK_3_IRQ;
- case PIC_IRT_EHCI_0_INDEX:
- return PIC_EHCI_0_IRQ;
- case PIC_IRT_EHCI_1_INDEX:
- return PIC_EHCI_1_IRQ;
- case PIC_IRT_OHCI_0_INDEX:
- return PIC_OHCI_0_IRQ;
- case PIC_IRT_OHCI_1_INDEX:
- return PIC_OHCI_1_IRQ;
- case PIC_IRT_OHCI_2_INDEX:
- return PIC_OHCI_2_IRQ;
- case PIC_IRT_OHCI_3_INDEX:
- return PIC_OHCI_3_IRQ;
- case PIC_IRT_MMC_INDEX:
- return PIC_MMC_IRQ;
- case PIC_IRT_I2C_0_INDEX:
- return PIC_I2C_0_IRQ;
- case PIC_IRT_I2C_1_INDEX:
- return PIC_I2C_1_IRQ;
- default:
- return -1;
- }
-}
-
-unsigned int nlm_get_core_frequency(int core)
+unsigned int nlm_get_core_frequency(int node, int core)
{
unsigned int pll_divf, pll_divr, dfs_div, ext_div;
unsigned int rstval, dfsval, denom;
- uint64_t num;
+ uint64_t num, sysbase;
- rstval = nlm_read_sys_reg(nlm_sys_base, SYS_POWER_ON_RESET_CFG);
- dfsval = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIV_VALUE);
+ sysbase = nlm_get_node(node)->sysbase;
+ rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
+ dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
pll_divf = ((rstval >> 10) & 0x7f) + 1;
pll_divr = ((rstval >> 8) & 0x3) + 1;
ext_div = ((rstval >> 30) & 0x3) + 1;
@@ -159,5 +122,5 @@ unsigned int nlm_get_core_frequency(int core)
unsigned int nlm_get_cpu_frequency(void)
{
- return nlm_get_core_frequency(0);
+ return nlm_get_core_frequency(0, 0);
}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index d8997098def..4894d62043a 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -52,26 +52,40 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
-unsigned long nlm_common_ebase = 0x0;
-
-/* default to uniprocessor */
-uint32_t nlm_coremask = 1, nlm_cpumask = 1;
-int nlm_threads_per_core = 1;
+uint64_t nlm_io_base;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
+unsigned int nlm_threads_per_core;
extern u32 __dtb_start[];
static void nlm_linux_exit(void)
{
- nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1);
+ uint64_t sysbase = nlm_get_node(0)->sysbase;
+
+ nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
for ( ; ; )
cpu_wait();
}
void __init plat_mem_setup(void)
{
+ void *fdtp;
+
panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
+
+ /*
+ * If no FDT pointer is passed in, use the built-in FDT.
+ * device_tree_init() does not handle CKSEG0 pointers in
+ * 64-bit, so convert pointer.
+ */
+ fdtp = (void *)(long)fw_arg0;
+ if (!fdtp)
+ fdtp = __dtb_start;
+ fdtp = phys_to_virt(__pa(fdtp));
+ early_init_devtree(fdtp);
}
const char *get_system_type(void)
@@ -94,27 +108,19 @@ void xlp_mmu_init(void)
(13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
}
-void __init prom_init(void)
+void nlm_percpu_init(int hwcpuid)
{
- void *fdtp;
+}
+void __init prom_init(void)
+{
+ nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
xlp_mmu_init();
- nlm_hal_init();
-
- /*
- * If no FDT pointer is passed in, use the built-in FDT.
- * device_tree_init() does not handle CKSEG0 pointers in
- * 64-bit, so convert pointer.
- */
- fdtp = (void *)(long)fw_arg0;
- if (!fdtp)
- fdtp = __dtb_start;
- fdtp = phys_to_virt(__pa(fdtp));
- early_init_devtree(fdtp);
+ nlm_node_init(0);
- nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
#ifdef CONFIG_SMP
- nlm_wakeup_secondary_cpus(0xffffffff);
+ cpumask_setall(&nlm_cpumask);
+ nlm_wakeup_secondary_cpus();
/* update TLB size after waking up threads */
current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index 44d923ff384..cb9010642ac 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -51,45 +51,72 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
-static void xlp_enable_secondary_cores(void)
+static int xlp_wakeup_core(uint64_t sysbase, int core)
{
- uint32_t core, value, coremask, syscoremask;
+ uint32_t coremask, value;
int count;
- /* read cores in reset from SYS block */
- syscoremask = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
+ coremask = (1 << core);
- /* update user specified */
- nlm_coremask = nlm_coremask & (syscoremask | 1);
+ /* Enable CPU clock */
+ value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
- for (core = 1; core < 8; core++) {
- coremask = 1 << core;
- if ((nlm_coremask & coremask) == 0)
- continue;
+ /* Remove CPU Reset */
+ value = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_CPU_RESET, value);
- /* Enable CPU clock */
- value = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL);
- value &= ~coremask;
- nlm_write_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL, value);
+ /* Poll for CPU to mark itself coherent */
+ count = 100000;
+ do {
+ value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
+ } while ((value & coremask) != 0 && --count > 0);
- /* Remove CPU Reset */
- value = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
- value &= ~coremask;
- nlm_write_sys_reg(nlm_sys_base, SYS_CPU_RESET, value);
+ return count != 0;
+}
+
+static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
+{
+ struct nlm_soc_info *nodep;
+ uint64_t syspcibase;
+ uint32_t syscoremask;
+ int core, n, cpu;
+
+ for (n = 0; n < NLM_NR_NODES; n++) {
+ syspcibase = nlm_get_sys_pcibase(n);
+ if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
+ break;
+
+ /* read cores in reset from SYS and account for boot cpu */
+ nlm_node_init(n);
+ nodep = nlm_get_node(n);
+ syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
+ if (n == 0)
+ syscoremask |= 1;
+
+ for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+ /* see if the core exists */
+ if ((syscoremask & (1 << core)) == 0)
+ continue;
- /* Poll for CPU to mark itself coherent */
- count = 100000;
- do {
- value = nlm_read_sys_reg(nlm_sys_base,
- SYS_CPU_NONCOHERENT_MODE);
- } while ((value & coremask) != 0 && count-- > 0);
+ /* see if at least the first thread is enabled */
+ cpu = (n * NLM_CORES_PER_NODE + core)
+ * NLM_THREADS_PER_CORE;
+ if (!cpumask_test_cpu(cpu, wakeup_mask))
+ continue;
- if (count == 0)
- pr_err("Failed to enable core %d\n", core);
+ /* wake up the core */
+ if (xlp_wakeup_core(nodep->sysbase, core))
+ nodep->coremask |= 1u << core;
+ else
+ pr_err("Failed to enable core %d\n", core);
+ }
}
}
-void xlp_wakeup_secondary_cpus(void)
+void xlp_wakeup_secondary_cpus()
{
/*
* In case of u-boot, the secondaries are in reset
@@ -98,5 +125,5 @@ void xlp_wakeup_secondary_cpus(void)
xlp_boot_core0_siblings();
/* now get other cores out of reset */
- xlp_enable_secondary_cores();
+ xlp_enable_secondary_cores(&nlm_cpumask);
}
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
index c287dea8757..05902bc6f08 100644
--- a/arch/mips/netlogic/xlr/Makefile
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -1,2 +1,2 @@
-obj-y += setup.o platform.o platform-flash.o
-obj-$(CONFIG_SMP) += wakeup.o
+obj-y += fmn.o fmn-config.o setup.o platform.o platform-flash.o
+obj-$(CONFIG_SMP) += wakeup.o
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
new file mode 100644
index 00000000000..bed2cffa100
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cpu-info.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/mipsregs.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/xlr/xlr.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+
+struct xlr_board_fmn_config xlr_board_fmn_config;
+
+static void __maybe_unused print_credit_config(struct xlr_fmn_info *fmn_info)
+{
+ int bkt;
+
+ pr_info("Bucket size :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 0],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 1],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 2],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 3],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 4],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 5],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 6],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 7]);
+ pr_info("\n");
+
+ pr_info("Credits distribution :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ fmn_info->credit_config[(bkt * 8) + 0],
+ fmn_info->credit_config[(bkt * 8) + 1],
+ fmn_info->credit_config[(bkt * 8) + 2],
+ fmn_info->credit_config[(bkt * 8) + 3],
+ fmn_info->credit_config[(bkt * 8) + 4],
+ fmn_info->credit_config[(bkt * 8) + 5],
+ fmn_info->credit_config[(bkt * 8) + 6],
+ fmn_info->credit_config[(bkt * 8) + 7]);
+ pr_info("\n");
+}
+
+static void check_credit_distribution(void)
+{
+ struct xlr_board_fmn_config *cfg = &xlr_board_fmn_config;
+ int bkt, n, total_credits, ncores;
+
+ ncores = hweight32(nlm_current_node()->coremask);
+ for (bkt = 0; bkt < 128; bkt++) {
+ total_credits = 0;
+ for (n = 0; n < ncores; n++)
+ total_credits += cfg->cpu[n].credit_config[bkt];
+ total_credits += cfg->gmac[0].credit_config[bkt];
+ total_credits += cfg->gmac[1].credit_config[bkt];
+ total_credits += cfg->dma.credit_config[bkt];
+ total_credits += cfg->cmp.credit_config[bkt];
+ total_credits += cfg->sae.credit_config[bkt];
+ total_credits += cfg->xgmac[0].credit_config[bkt];
+ total_credits += cfg->xgmac[1].credit_config[bkt];
+ if (total_credits > cfg->bucket_size[bkt])
+ pr_err("ERROR: Bucket %d: credits (%d) > size (%d)\n",
+ bkt, total_credits, cfg->bucket_size[bkt]);
+ }
+ pr_info("Credit distribution complete.\n");
+}
+
+/**
+ * Configure bucket size and credits for a device. 'size' is the size of
+ * the buckets for the device. This size is distributed among all the CPUs
+ * so that all of them can send messages to the device.
+ *
+ * The device is also given 'cpu_credits' to send messages to the CPUs
+ *
+ * @dev_info: FMN information structure for each devices
+ * @start_stn_id: Starting station id of dev_info
+ * @end_stn_id: End station id of dev_info
+ * @num_buckets: Total number of buckets for den_info
+ * @cpu_credits: Allowed credits to cpu for each devices pointing by dev_info
+ * @size: Size of the each buckets in the device station
+ */
+static void setup_fmn_cc(struct xlr_fmn_info *dev_info, int start_stn_id,
+ int end_stn_id, int num_buckets, int cpu_credits, int size)
+{
+ int i, j, num_core, n, credits_per_cpu;
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ dev_info->num_buckets = num_buckets;
+ dev_info->start_stn_id = start_stn_id;
+ dev_info->end_stn_id = end_stn_id;
+
+ n = num_core;
+ if (num_core == 3)
+ n = 4;
+
+ for (i = start_stn_id; i <= end_stn_id; i++) {
+ xlr_board_fmn_config.bucket_size[i] = size;
+
+ /* Dividing device credits equally to cpus */
+ credits_per_cpu = size / n;
+ for (j = 0; j < num_core; j++)
+ cpu[j].credit_config[i] = credits_per_cpu;
+
+ /* credits left to distribute */
+ credits_per_cpu = size - (credits_per_cpu * num_core);
+
+ /* distribute the remaining credits (if any), among cores */
+ for (j = 0; (j < num_core) && (credits_per_cpu >= 4); j++) {
+ cpu[j].credit_config[i] += 4;
+ credits_per_cpu -= 4;
+ }
+ }
+
+ /* Distributing cpu per bucket credits to devices */
+ for (i = 0; i < num_core; i++) {
+ for (j = 0; j < FMN_CORE_NBUCKETS; j++)
+ dev_info->credit_config[(i * 8) + j] = cpu_credits;
+ }
+}
+
+/*
+ * Each core has 256 slots and 8 buckets,
+ * Configure the 8 buckets each with 32 slots
+ */
+static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
+{
+ int i, j;
+
+ for (i = 0; i < num_core; i++) {
+ cpu[i].start_stn_id = (8 * i);
+ cpu[i].end_stn_id = (8 * i + 8);
+
+ for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
+ xlr_board_fmn_config.bucket_size[j] = 32;
+ }
+}
+
+/**
+ * Setup the FMN details for each devices according to the device available
+ * in each variant of XLR/XLS processor
+ */
+void xlr_board_info_setup(void)
+{
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+ struct xlr_fmn_info *gmac = xlr_board_fmn_config.gmac;
+ struct xlr_fmn_info *xgmac = xlr_board_fmn_config.xgmac;
+ struct xlr_fmn_info *dma = &xlr_board_fmn_config.dma;
+ struct xlr_fmn_info *cmp = &xlr_board_fmn_config.cmp;
+ struct xlr_fmn_info *sae = &xlr_board_fmn_config.sae;
+ int processor_id, num_core;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ processor_id = read_c0_prid() & 0xff00;
+
+ setup_cpu_fmninfo(cpu, num_core);
+ switch (processor_id) {
+ case PRID_IMP_NETLOGIC_XLS104:
+ case PRID_IMP_NETLOGIC_XLS108:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS204:
+ case PRID_IMP_NETLOGIC_XLS208:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS404:
+ case PRID_IMP_NETLOGIC_XLS408:
+ case PRID_IMP_NETLOGIC_XLS404B:
+ case PRID_IMP_NETLOGIC_XLS408B:
+ case PRID_IMP_NETLOGIC_XLS416B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS412B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR308:
+ case PRID_IMP_NETLOGIC_XLR308C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR532:
+ case PRID_IMP_NETLOGIC_XLR532C:
+ case PRID_IMP_NETLOGIC_XLR516C:
+ case PRID_IMP_NETLOGIC_XLR508C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR732:
+ case PRID_IMP_NETLOGIC_XLR716:
+ setup_fmn_cc(&xgmac[0], FMN_STNID_XMAC0_00_TX,
+ FMN_STNID_XMAC0_15_TX, 8, 0, 32);
+ setup_fmn_cc(&xgmac[1], FMN_STNID_XMAC1_00_TX,
+ FMN_STNID_XMAC1_15_TX, 8, 0, 32);
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 24, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+ default:
+ pr_err("Unknown CPU with processor ID [%d]\n", processor_id);
+ pr_err("Error: Cannot initialize FMN credits.\n");
+ }
+
+ check_credit_distribution();
+
+#if 0 /* debug */
+ print_credit_config(&cpu[0]);
+ print_credit_config(&gmac[0]);
+#endif
+}
diff --git a/arch/mips/netlogic/xlr/fmn.c b/arch/mips/netlogic/xlr/fmn.c
new file mode 100644
index 00000000000..4d74f03de50
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqreturn.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/mipsregs.h>
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/common.h>
+
+#define COP2_CC_INIT_CPU_DEST(dest, conf) \
+do { \
+ nlm_write_c2_cc##dest(0, conf[(dest * 8) + 0]); \
+ nlm_write_c2_cc##dest(1, conf[(dest * 8) + 1]); \
+ nlm_write_c2_cc##dest(2, conf[(dest * 8) + 2]); \
+ nlm_write_c2_cc##dest(3, conf[(dest * 8) + 3]); \
+ nlm_write_c2_cc##dest(4, conf[(dest * 8) + 4]); \
+ nlm_write_c2_cc##dest(5, conf[(dest * 8) + 5]); \
+ nlm_write_c2_cc##dest(6, conf[(dest * 8) + 6]); \
+ nlm_write_c2_cc##dest(7, conf[(dest * 8) + 7]); \
+} while (0)
+
+struct fmn_message_handler {
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *);
+ void *arg;
+} msg_handlers[128];
+
+/*
+ * FMN interrupt handler. We configure the FMN so that any messages in
+ * any of the CPU buckets will trigger an interrupt on the CPU.
+ * The message can be from any device on the FMN (like NAE/SAE/DMA).
+ * The source station id is used to figure out which of the registered
+ * handlers have to be called.
+ */
+static irqreturn_t fmn_message_handler(int irq, void *data)
+{
+ struct fmn_message_handler *hndlr;
+ int bucket, rv;
+ int size = 0, code = 0, src_stnid = 0;
+ struct nlm_fmn_msg msg;
+ uint32_t mflags, bkt_status;
+
+ mflags = nlm_cop2_enable();
+ /* Disable message ring interrupt */
+ nlm_fmn_setup_intr(irq, 0);
+ while (1) {
+ /* 8 bkts per core, [24:31] each bit represents one bucket
+ * Bit is Zero if bucket is not empty */
+ bkt_status = (nlm_read_c2_status() >> 24) & 0xff;
+ if (bkt_status == 0xff)
+ break;
+ for (bucket = 0; bucket < 8; bucket++) {
+ /* Continue on empty bucket */
+ if (bkt_status & (1 << bucket))
+ continue;
+ rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid,
+ &msg);
+ if (rv != 0)
+ continue;
+
+ hndlr = &msg_handlers[src_stnid];
+ if (hndlr->action == NULL)
+ pr_warn("No msgring handler for stnid %d\n",
+ src_stnid);
+ else {
+ nlm_cop2_restore(mflags);
+ hndlr->action(bucket, src_stnid, size, code,
+ &msg, hndlr->arg);
+ mflags = nlm_cop2_enable();
+ }
+ }
+ };
+ /* Enable message ring intr, to any thread in core */
+ nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_restore(mflags);
+ return IRQ_HANDLED;
+}
+
+struct irqaction fmn_irqaction = {
+ .handler = fmn_message_handler,
+ .flags = IRQF_PERCPU,
+ .name = "fmn",
+};
+
+void xlr_percpu_fmn_init(void)
+{
+ struct xlr_fmn_info *cpu_fmn_info;
+ int *bucket_sizes;
+ uint32_t flags;
+ int id;
+
+ BUG_ON(nlm_thread_id() != 0);
+ id = nlm_core_id();
+
+ bucket_sizes = xlr_board_fmn_config.bucket_size;
+ cpu_fmn_info = &xlr_board_fmn_config.cpu[id];
+ flags = nlm_cop2_enable();
+
+ /* Setup bucket sizes for the core. */
+ nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]);
+ nlm_write_c2_bucksize(1, bucket_sizes[id * 8 + 1]);
+ nlm_write_c2_bucksize(2, bucket_sizes[id * 8 + 2]);
+ nlm_write_c2_bucksize(3, bucket_sizes[id * 8 + 3]);
+ nlm_write_c2_bucksize(4, bucket_sizes[id * 8 + 4]);
+ nlm_write_c2_bucksize(5, bucket_sizes[id * 8 + 5]);
+ nlm_write_c2_bucksize(6, bucket_sizes[id * 8 + 6]);
+ nlm_write_c2_bucksize(7, bucket_sizes[id * 8 + 7]);
+
+ /*
+ * For sending FMN messages, we need credits on the destination
+ * bucket. Program the credits this core has on the 128 possible
+ * destination buckets.
+ * We cannot use a loop here, because the the first argument has
+ * to be a constant integer value.
+ */
+ COP2_CC_INIT_CPU_DEST(0, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(1, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(2, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(3, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(4, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(5, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(6, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(7, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(8, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(9, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(10, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(11, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(12, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(13, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(14, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(15, cpu_fmn_info->credit_config);
+
+ /* enable FMN interrupts on this CPU */
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_restore(flags);
+}
+
+
+/*
+ * Register a FMN message handler with respect to the source station id
+ * @stnid: source station id
+ * @action: Handler function pointer
+ */
+int nlm_register_fmn_handler(int start_stnid, int end_stnid,
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *),
+ void *arg)
+{
+ int sstnid;
+
+ for (sstnid = start_stnid; sstnid <= end_stnid; sstnid++) {
+ msg_handlers[sstnid].arg = arg;
+ smp_wmb();
+ msg_handlers[sstnid].action = action;
+ }
+ pr_debug("Registered FMN msg handler for stnid %d-%d\n",
+ start_stnid, end_stnid);
+ return 0;
+}
+
+void nlm_setup_fmn_irq(void)
+{
+ uint32_t flags;
+
+ /* setup irq only once */
+ setup_irq(IRQ_FMN, &fmn_irqaction);
+
+ flags = nlm_cop2_enable();
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_restore(flags);
+}
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 81b1d311834..4e7f49d3d5a 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -49,16 +49,15 @@
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
#include <asm/netlogic/xlr/gpio.h>
+#include <asm/netlogic/xlr/fmn.h>
uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE;
-uint64_t nlm_pic_base;
struct psb_info nlm_prom_info;
-unsigned long nlm_common_ebase = 0x0;
-
/* default to uniprocessor */
-uint32_t nlm_coremask = 1, nlm_cpumask = 1;
-int nlm_threads_per_core = 1;
+unsigned int nlm_threads_per_core = 1;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
static void __init nlm_early_serial_setup(void)
{
@@ -113,6 +112,12 @@ void __init prom_free_prom_memory(void)
/* Nothing yet */
}
+void nlm_percpu_init(int hwcpuid)
+{
+ if (hwcpuid % 4 == 0)
+ xlr_percpu_fmn_init();
+}
+
static void __init build_arcs_cmdline(int *argv)
{
int i, remain, len;
@@ -176,9 +181,19 @@ static void prom_add_memory(void)
}
}
+static void nlm_init_node(void)
+{
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_current_node();
+ nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
+ nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+ spin_lock_init(&nodep->piclock);
+}
+
void __init prom_init(void)
{
- int *argv, *envp; /* passed as 32 bit ptrs */
+ int i, *argv, *envp; /* passed as 32 bit ptrs */
struct psb_info *prom_infop;
/* truncate to 32 bit and sign extend all args */
@@ -187,15 +202,19 @@ void __init prom_init(void)
prom_infop = (struct psb_info *)(long)(int)fw_arg3;
nlm_prom_info = *prom_infop;
- nlm_pic_base = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
+ nlm_init_node();
nlm_early_serial_setup();
build_arcs_cmdline(argv);
- nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
prom_add_memory();
#ifdef CONFIG_SMP
- nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map);
+ for (i = 0; i < 32; i++)
+ if (nlm_prom_info.online_cpu_map & (1 << i))
+ cpumask_set_cpu(i, &nlm_cpumask);
+ nlm_wakeup_secondary_cpus();
register_smp_ops(&nlm_smp_ops);
#endif
+ xlr_board_info_setup();
+ xlr_percpu_fmn_init();
}
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index db5d987d488..3ebf7411d67 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -33,6 +33,7 @@
*/
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/threads.h>
#include <asm/asm.h>
@@ -50,18 +51,34 @@
int __cpuinit xlr_wakeup_secondary_cpus(void)
{
- unsigned int i, boot_cpu;
+ struct nlm_soc_info *nodep;
+ unsigned int i, j, boot_cpu;
/*
* In case of RMI boot, hit with NMI to get the cores
* from bootloader to linux code.
*/
+ nodep = nlm_get_node(0);
boot_cpu = hard_smp_processor_id();
nlm_set_nmi_handler(nlm_rmiboot_preboot);
for (i = 0; i < NR_CPUS; i++) {
- if (i == boot_cpu || (nlm_cpumask & (1u << i)) == 0)
+ if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
continue;
- nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */
+ nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
+ }
+
+ /* Fill up the coremask early */
+ nodep->coremask = 1;
+ for (i = 1; i < NLM_CORES_PER_NODE; i++) {
+ for (j = 1000000; j > 0; j--) {
+ if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE])
+ break;
+ udelay(10);
+ }
+ if (j != 0)
+ nodep->coremask |= (1u << i);
+ else
+ pr_err("Failed to wakeup core %d\n", i);
}
return 0;
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 1208c280f77..9c0a6782c09 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -12,5 +12,5 @@ oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_RM9000) += op_model_rm9000.o
+oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index f80480a5a03..e32db1ff02c 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -16,7 +16,6 @@
#include "op_impl.h"
extern struct op_mips_model op_model_mipsxx_ops __weak;
-extern struct op_mips_model op_model_rm9000_ops __weak;
extern struct op_mips_model op_model_loongson2_ops __weak;
static struct op_mips_model *model;
@@ -91,12 +90,10 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_XLR:
lmodel = &op_model_mipsxx_ops;
break;
- case CPU_RM9000:
- lmodel = &op_model_rm9000_ops;
- break;
case CPU_LOONGSON2:
lmodel = &op_model_loongson2_ops;
break;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 28ea1a4cc57..78625463040 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -31,8 +31,22 @@
#define M_COUNTER_OVERFLOW (1UL << 31)
+/* Netlogic XLR specific, count events in all threads in a core */
+#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
+
static int (*save_perf_irq)(void);
+/*
+ * XLR has only one set of counters per core. Designate the
+ * first hardware thread in the core for setup and init.
+ * Skip CPUs with non-zero hardware thread id (4 hwt per core)
+ */
+#ifdef CONFIG_CPU_XLR
+#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
+#else
+#define oprofile_skip_cpu(c) 0
+#endif
+
#ifdef CONFIG_MIPS_MT_SMP
static int cpu_has_mipsmt_pertccounters;
#define WHAT (M_TC_EN_VPE | \
@@ -152,6 +166,8 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
reg.control[i] |= M_PERFCTL_USER;
if (ctr[i].exl)
reg.control[i] |= M_PERFCTL_EXL;
+ if (current_cpu_type() == CPU_XLR)
+ reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
reg.counter[i] = 0x80000000 - ctr[i].count;
}
}
@@ -162,6 +178,9 @@ static void mipsxx_cpu_setup(void *args)
{
unsigned int counters = op_model_mipsxx_ops.num_counters;
+ if (oprofile_skip_cpu(smp_processor_id()))
+ return;
+
switch (counters) {
case 4:
w_c0_perfctrl3(0);
@@ -183,6 +202,9 @@ static void mipsxx_cpu_start(void *args)
{
unsigned int counters = op_model_mipsxx_ops.num_counters;
+ if (oprofile_skip_cpu(smp_processor_id()))
+ return;
+
switch (counters) {
case 4:
w_c0_perfctrl3(WHAT | reg.control[3]);
@@ -200,6 +222,9 @@ static void mipsxx_cpu_stop(void *args)
{
unsigned int counters = op_model_mipsxx_ops.num_counters;
+ if (oprofile_skip_cpu(smp_processor_id()))
+ return;
+
switch (counters) {
case 4:
w_c0_perfctrl3(0);
@@ -372,6 +397,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/loongson1";
break;
+ case CPU_XLR:
+ op_model_mipsxx_ops.cpu_type = "mips/xlr";
+ break;
+
default:
printk(KERN_ERR "Profiling unsupported for this CPU\n");
diff --git a/arch/mips/oprofile/op_model_rm9000.c b/arch/mips/oprofile/op_model_rm9000.c
deleted file mode 100644
index 3aa81384966..00000000000
--- a/arch/mips/oprofile/op_model_rm9000.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle
- */
-#include <linux/init.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-
-#include "op_impl.h"
-
-#define RM9K_COUNTER1_EVENT(event) ((event) << 0)
-#define RM9K_COUNTER1_SUPERVISOR (1ULL << 7)
-#define RM9K_COUNTER1_KERNEL (1ULL << 8)
-#define RM9K_COUNTER1_USER (1ULL << 9)
-#define RM9K_COUNTER1_ENABLE (1ULL << 10)
-#define RM9K_COUNTER1_OVERFLOW (1ULL << 15)
-
-#define RM9K_COUNTER2_EVENT(event) ((event) << 16)
-#define RM9K_COUNTER2_SUPERVISOR (1ULL << 23)
-#define RM9K_COUNTER2_KERNEL (1ULL << 24)
-#define RM9K_COUNTER2_USER (1ULL << 25)
-#define RM9K_COUNTER2_ENABLE (1ULL << 26)
-#define RM9K_COUNTER2_OVERFLOW (1ULL << 31)
-
-extern unsigned int rm9000_perfcount_irq;
-
-static struct rm9k_register_config {
- unsigned int control;
- unsigned int reset_counter1;
- unsigned int reset_counter2;
-} reg;
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void rm9000_reg_setup(struct op_counter_config *ctr)
-{
- unsigned int control = 0;
-
- /* Compute the performance counter control word. */
- /* For now count kernel and user mode */
- if (ctr[0].enabled)
- control |= RM9K_COUNTER1_EVENT(ctr[0].event) |
- RM9K_COUNTER1_KERNEL |
- RM9K_COUNTER1_USER |
- RM9K_COUNTER1_ENABLE;
- if (ctr[1].enabled)
- control |= RM9K_COUNTER2_EVENT(ctr[1].event) |
- RM9K_COUNTER2_KERNEL |
- RM9K_COUNTER2_USER |
- RM9K_COUNTER2_ENABLE;
- reg.control = control;
-
- reg.reset_counter1 = 0x80000000 - ctr[0].count;
- reg.reset_counter2 = 0x80000000 - ctr[1].count;
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void rm9000_cpu_setup(void *args)
-{
- uint64_t perfcount;
-
- perfcount = ((uint64_t) reg.reset_counter2 << 32) | reg.reset_counter1;
- write_c0_perfcount(perfcount);
-}
-
-static void rm9000_cpu_start(void *args)
-{
- /* Start all counters on current CPU */
- write_c0_perfcontrol(reg.control);
-}
-
-static void rm9000_cpu_stop(void *args)
-{
- /* Stop all counters on current CPU */
- write_c0_perfcontrol(0);
-}
-
-static irqreturn_t rm9000_perfcount_handler(int irq, void *dev_id)
-{
- unsigned int control = read_c0_perfcontrol();
- struct pt_regs *regs = get_irq_regs();
- uint32_t counter1, counter2;
- uint64_t counters;
-
- /*
- * RM9000 combines two 32-bit performance counters into a single
- * 64-bit coprocessor zero register. To avoid a race updating the
- * registers we need to stop the counters while we're messing with
- * them ...
- */
- write_c0_perfcontrol(0);
-
- counters = read_c0_perfcount();
- counter1 = counters;
- counter2 = counters >> 32;
-
- if (control & RM9K_COUNTER1_OVERFLOW) {
- oprofile_add_sample(regs, 0);
- counter1 = reg.reset_counter1;
- }
- if (control & RM9K_COUNTER2_OVERFLOW) {
- oprofile_add_sample(regs, 1);
- counter2 = reg.reset_counter2;
- }
-
- counters = ((uint64_t)counter2 << 32) | counter1;
- write_c0_perfcount(counters);
- write_c0_perfcontrol(reg.control);
-
- return IRQ_HANDLED;
-}
-
-static int __init rm9000_init(void)
-{
- return request_irq(rm9000_perfcount_irq, rm9000_perfcount_handler,
- 0, "Perfcounter", NULL);
-}
-
-static void rm9000_exit(void)
-{
- free_irq(rm9000_perfcount_irq, NULL);
-}
-
-struct op_mips_model op_model_rm9000_ops = {
- .reg_setup = rm9000_reg_setup,
- .cpu_setup = rm9000_cpu_setup,
- .init = rm9000_init,
- .exit = rm9000_exit,
- .cpu_start = rm9000_cpu_start,
- .cpu_stop = rm9000_cpu_stop,
- .cpu_type = "mips/rm9000",
- .num_counters = 2
-};
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index e13a71cbc3c..ce995d3d944 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -34,8 +34,6 @@ obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o
obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_FPGA) += fixup-pmcmsp.o ops-pmcmsp.o
-obj-$(CONFIG_PMC_YOSEMITE) += fixup-yosemite.o ops-titan.o ops-titan-ht.o \
- pci-yosemite.o
obj-$(CONFIG_SGI_IP27) += ops-bridge.o pci-ip27.o
obj-$(CONFIG_SGI_IP32) += fixup-ip32.o ops-mace.o pci-ip32.o
obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
diff --git a/arch/mips/pci/fixup-yosemite.c b/arch/mips/pci/fixup-yosemite.c
deleted file mode 100644
index fdafb13a793..00000000000
--- a/arch/mips/pci/fixup-yosemite.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2003 PMC-Sierra
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- if (pin == 0)
- return -1;
-
- return 3; /* Everything goes to one irq bit */
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index b46b3e21177..438319465cb 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -56,7 +56,7 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
@@ -76,7 +76,7 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
oh_my_gawd:
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to look at the wrong register.
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
@@ -85,7 +85,7 @@ oh_my_gawd:
}
/*
- * IOC3 is fucked fucked beyond believe ... Don't try to access
+ * IOC3 is fucking fucked beyond belief ... Don't try to access
* anything but 32-bit words ...
*/
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
@@ -118,7 +118,7 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
@@ -139,7 +139,7 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
oh_my_gawd:
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to look at the wrong register.
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
@@ -148,7 +148,7 @@ oh_my_gawd:
}
/*
- * IOC3 is fucked fucked beyond believe ... Don't try to access
+ * IOC3 is fucking fucked beyond belief ... Don't try to access
* anything but 32-bit words ...
*/
bridge->b_pci_cfg = (busno << 16) | (slot << 11);
@@ -189,7 +189,7 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
@@ -213,14 +213,14 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
oh_my_gawd:
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to touch the wrong register.
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
return PCIBIOS_SUCCESSFUL;
/*
- * IOC3 is fucked fucked beyond believe ... Don't try to access
+ * IOC3 is fucking fucked beyond belief ... Don't try to access
* anything but 32-bit words ...
*/
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
@@ -257,7 +257,7 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to look at it for real ...
*/
if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
@@ -281,14 +281,14 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
oh_my_gawd:
/*
- * IOC3 is fucked fucked beyond believe ... Don't even give the
+ * IOC3 is fucking fucked beyond belief ... Don't even give the
* generic PCI code a chance to touch the wrong register.
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
return PCIBIOS_SUCCESSFUL;
/*
- * IOC3 is fucked fucked beyond believe ... Don't try to access
+ * IOC3 is fucking fucked beyond belief ... Don't try to access
* anything but 32-bit words ...
*/
addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
diff --git a/arch/mips/pci/ops-titan-ht.c b/arch/mips/pci/ops-titan-ht.c
deleted file mode 100644
index 57d54adc9e2..00000000000
--- a/arch/mips/pci/ops-titan-ht.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 2003 PMC-Sierra
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#include <asm/titan_dep.h>
-
-static int titan_ht_config_read_dword(struct pci_bus *bus, unsigned int devfn,
- int offset, u32 *val)
-{
- volatile uint32_t address;
- int busno;
-
- busno = bus->number;
-
- address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000;
- if (busno != 0)
- address |= 1;
-
- /*
- * RM9000 HT Errata: Issue back to back HT config
- * transcations. Issue a BIU sync before and
- * after the HT cycle
- */
-
- *(volatile int32_t *) 0xfb0000f0 |= 0x2;
-
- udelay(30);
-
- *(volatile int32_t *) 0xfb0006f8 = address;
- *(val) = *(volatile int32_t *) 0xfb0006fc;
-
- udelay(30);
-
- * (volatile int32_t *) 0xfb0000f0 |= 0x2;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int titan_ht_config_read(struct pci_bus *bus, unsigned int devfn,
- int offset, int size, u32 *val)
-{
- uint32_t dword;
-
- titan_ht_config_read_dword(bus, devfn, offset, &dword);
-
- dword >>= ((offset & 3) << 3);
- dword &= (0xffffffffU >> ((4 - size) << 8));
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static inline int titan_ht_config_write_dword(struct pci_bus *bus,
- unsigned int devfn, int offset, u32 val)
-{
- volatile uint32_t address;
- int busno;
-
- busno = bus->number;
-
- address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000;
- if (busno != 0)
- address |= 1;
-
- *(volatile int32_t *) 0xfb0000f0 |= 0x2;
-
- udelay(30);
-
- *(volatile int32_t *) 0xfb0006f8 = address;
- *(volatile int32_t *) 0xfb0006fc = val;
-
- udelay(30);
-
- *(volatile int32_t *) 0xfb0000f0 |= 0x2;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int titan_ht_config_write(struct pci_bus *bus, unsigned int devfn,
- int offset, int size, u32 val)
-{
- uint32_t val1, val2, mask;
-
- titan_ht_config_read_dword(bus, devfn, offset, &val2);
-
- val1 = val << ((offset & 3) << 3);
- mask = ~(0xffffffffU >> ((4 - size) << 8));
- val2 &= ~(mask << ((offset & 3) << 8));
-
- titan_ht_config_write_dword(bus, devfn, offset, val1 | val2);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-struct pci_ops titan_ht_pci_ops = {
- .read = titan_ht_config_read,
- .write = titan_ht_config_write,
-};
diff --git a/arch/mips/pci/ops-titan.c b/arch/mips/pci/ops-titan.c
deleted file mode 100644
index ebf8fc40e9b..00000000000
--- a/arch/mips/pci/ops-titan.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright 2003 PMC-Sierra
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-
-#include <asm/pci.h>
-#include <asm/io.h>
-#include <asm/rm9k-ocd.h>
-
-/*
- * PCI specific defines
- */
-#define TITAN_PCI_0_CONFIG_ADDRESS 0x780
-#define TITAN_PCI_0_CONFIG_DATA 0x784
-
-/*
- * Titan PCI Config Read Byte
- */
-static int titan_read_config(struct pci_bus *bus, unsigned int devfn, int reg,
- int size, u32 * val)
-{
- uint32_t address, tmp;
- int dev, busno, func;
-
- busno = bus->number;
- dev = PCI_SLOT(devfn);
- func = PCI_FUNC(devfn);
-
- address = (busno << 16) | (dev << 11) | (func << 8) |
- (reg & 0xfc) | 0x80000000;
-
-
- /* start the configuration cycle */
- ocd_writel(address, TITAN_PCI_0_CONFIG_ADDRESS);
- tmp = ocd_readl(TITAN_PCI_0_CONFIG_DATA) >> ((reg & 3) << 3);
-
- switch (size) {
- case 1:
- tmp &= 0xff;
- case 2:
- tmp &= 0xffff;
- }
- *val = tmp;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int titan_write_config(struct pci_bus *bus, unsigned int devfn, int reg,
- int size, u32 val)
-{
- uint32_t address;
- int dev, busno, func;
-
- busno = bus->number;
- dev = PCI_SLOT(devfn);
- func = PCI_FUNC(devfn);
-
- address = (busno << 16) | (dev << 11) | (func << 8) |
- (reg & 0xfc) | 0x80000000;
-
- /* start the configuration cycle */
- ocd_writel(address, TITAN_PCI_0_CONFIG_ADDRESS);
-
- /* write the data */
- switch (size) {
- case 1:
- ocd_writeb(val, TITAN_PCI_0_CONFIG_DATA + (~reg & 0x3));
- break;
-
- case 2:
- ocd_writew(val, TITAN_PCI_0_CONFIG_DATA + (~reg & 0x2));
- break;
-
- case 4:
- ocd_writel(val, TITAN_PCI_0_CONFIG_DATA);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-/*
- * Titan PCI structure
- */
-struct pci_ops titan_pci_ops = {
- titan_read_config,
- titan_write_config,
-};
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index 8a48139d219..ca179b6ff39 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -11,8 +11,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <asm/bootinfo.h>
+#include <bcm63xx_reset.h>
+
#include "pci-bcm63xx.h"
/*
@@ -119,41 +122,36 @@ static void __init bcm63xx_reset_pcie(void)
{
u32 val;
- /* enable clock */
- val = bcm_perf_readl(PERF_CKCTL_REG);
- val |= CKCTL_6328_PCIE_EN;
- bcm_perf_writel(val, PERF_CKCTL_REG);
-
/* enable SERDES */
val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
/* reset the PCIe core */
- val = bcm_perf_readl(PERF_SOFTRESET_6328_REG);
-
- val &= ~SOFTRESET_6328_PCIE_MASK;
- val &= ~SOFTRESET_6328_PCIE_CORE_MASK;
- val &= ~SOFTRESET_6328_PCIE_HARD_MASK;
- val &= ~SOFTRESET_6328_PCIE_EXT_MASK;
- bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 1);
mdelay(10);
- val |= SOFTRESET_6328_PCIE_MASK;
- val |= SOFTRESET_6328_PCIE_CORE_MASK;
- val |= SOFTRESET_6328_PCIE_HARD_MASK;
- bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 0);
mdelay(10);
- val |= SOFTRESET_6328_PCIE_EXT_MASK;
- bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
+ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 0);
mdelay(200);
}
+static struct clk *pcie_clk;
+
static int __init bcm63xx_register_pcie(void)
{
u32 val;
+ /* enable clock */
+ pcie_clk = clk_get(NULL, "pcie");
+ if (IS_ERR_OR_NULL(pcie_clk))
+ return -ENODEV;
+
+ clk_prepare_enable(pcie_clk);
+
bcm63xx_reset_pcie();
/* configure the PCIe bridge */
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 4b0c347d7a8..5b5ed76c6f4 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/swiotlb.h>
#include <asm/time.h>
@@ -704,6 +705,10 @@ static int __init octeon_pci_setup(void)
*/
cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
+ if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
+ -1, NULL, 0)))
+ pr_err("Registation of co_pci_edac failed!\n");
+
octeon_pci_dma_init();
return 0;
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
index 18af021d289..0c18ccc7962 100644
--- a/arch/mips/pci/pci-xlr.c
+++ b/arch/mips/pci/pci-xlr.c
@@ -47,6 +47,7 @@
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
#include <asm/netlogic/xlr/msidef.h>
#include <asm/netlogic/xlr/iomap.h>
@@ -174,22 +175,9 @@ static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
return p ? bus->self : NULL;
}
-static int get_irq_vector(const struct pci_dev *dev)
+static int nlm_pci_link_to_irq(int link)
{
- struct pci_dev *lnk;
-
- if (!nlm_chip_is_xls())
- return PIC_PCIX_IRQ; /* for XLR just one IRQ */
-
- /*
- * For XLS PCIe, there is an IRQ per Link, find out which
- * link the device is on to assign interrupts
- */
- lnk = xls_get_pcie_link(dev);
- if (lnk == NULL)
- return 0;
-
- switch (PCI_SLOT(lnk->devfn)) {
+ switch (link) {
case 0:
return PIC_PCIE_LINK0_IRQ;
case 1:
@@ -205,10 +193,26 @@ static int get_irq_vector(const struct pci_dev *dev)
else
return PIC_PCIE_LINK3_IRQ;
}
- WARN(1, "Unexpected devfn %d\n", lnk->devfn);
+ WARN(1, "Unexpected link %d\n", link);
return 0;
}
+static int get_irq_vector(const struct pci_dev *dev)
+{
+ struct pci_dev *lnk;
+ int link;
+
+ if (!nlm_chip_is_xls())
+ return PIC_PCIX_IRQ; /* for XLR just one IRQ */
+
+ lnk = xls_get_pcie_link(dev);
+ if (lnk == NULL)
+ return 0;
+
+ link = PCI_SLOT(lnk->devfn);
+ return nlm_pci_link_to_irq(link);
+}
+
#ifdef CONFIG_PCI_MSI
void destroy_irq(unsigned int irq)
{
@@ -332,6 +336,9 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
static int __init pcibios_init(void)
{
+ void (*extra_ack)(struct irq_data *);
+ int link, irq;
+
/* PSB assigns PCI resources */
pci_set_flags(PCI_PROBE_ONLY);
pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
@@ -350,27 +357,19 @@ static int __init pcibios_init(void)
* For PCI interrupts, we need to ack the PCI controller too, overload
* irq handler data to do this
*/
- if (nlm_chip_is_xls()) {
- if (nlm_chip_is_xls_b()) {
- irq_set_handler_data(PIC_PCIE_LINK0_IRQ,
- xls_pcie_ack_b);
- irq_set_handler_data(PIC_PCIE_LINK1_IRQ,
- xls_pcie_ack_b);
- irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ,
- xls_pcie_ack_b);
- irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ,
- xls_pcie_ack_b);
- } else {
- irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack);
- irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack);
- irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack);
- irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack);
- }
- } else {
+ if (!nlm_chip_is_xls()) {
/* XLR PCI controller ACK */
- irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
+ nlm_set_pic_extra_ack(0, PIC_PCIX_IRQ, xlr_pci_ack);
+ } else {
+ if (nlm_chip_is_xls_b())
+ extra_ack = xls_pcie_ack_b;
+ else
+ extra_ack = xls_pcie_ack;
+ for (link = 0; link < 4; link++) {
+ irq = nlm_pci_link_to_irq(link);
+ nlm_set_pic_extra_ack(0, irq, extra_ack);
+ }
}
-
return 0;
}
diff --git a/arch/mips/pci/pci-yosemite.c b/arch/mips/pci/pci-yosemite.c
deleted file mode 100644
index cf5e1a25cb7..00000000000
--- a/arch/mips/pci/pci-yosemite.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/titan_dep.h>
-
-extern struct pci_ops titan_pci_ops;
-
-static struct resource py_mem_resource = {
- .start = 0xe0000000UL,
- .end = 0xe3ffffffUL,
- .name = "Titan PCI MEM",
- .flags = IORESOURCE_MEM
-};
-
-/*
- * PMON really reserves 16MB of I/O port space but that's stupid, nothing
- * needs that much since allocations are limited to 256 bytes per device
- * anyway. So we just claim 64kB here.
- */
-#define TITAN_IO_SIZE 0x0000ffffUL
-#define TITAN_IO_BASE 0xe8000000UL
-
-static struct resource py_io_resource = {
- .start = 0x00001000UL,
- .end = TITAN_IO_SIZE - 1,
- .name = "Titan IO MEM",
- .flags = IORESOURCE_IO,
-};
-
-static struct pci_controller py_controller = {
- .pci_ops = &titan_pci_ops,
- .mem_resource = &py_mem_resource,
- .mem_offset = 0x00000000UL,
- .io_resource = &py_io_resource,
- .io_offset = 0x00000000UL
-};
-
-static char ioremap_failed[] __initdata = "Could not ioremap I/O port range";
-
-static int __init pmc_yosemite_setup(void)
-{
- unsigned long io_v_base;
-
- io_v_base = (unsigned long) ioremap(TITAN_IO_BASE, TITAN_IO_SIZE);
- if (!io_v_base)
- panic(ioremap_failed);
-
- set_io_port_base(io_v_base);
- py_controller.io_map_base = io_v_base;
- TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1);
-
- ioport_resource.end = TITAN_IO_SIZE - 1;
-
- register_pci_controller(&py_controller);
-
- return 0;
-}
-
-arch_initcall(pmc_yosemite_setup);
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig
index bbd76082fa8..3482b8c8640 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmc-sierra/Kconfig
@@ -34,10 +34,6 @@ config PMC_MSP7120_FPGA
endchoice
-config HYPERTRANSPORT
- bool "Hypertransport Support for PMC-Sierra Yosemite"
- depends on PMC_YOSEMITE
-
config MSP_HAS_USB
boolean
depends on PMC_MSP
diff --git a/arch/mips/pmc-sierra/Platform b/arch/mips/pmc-sierra/Platform
index f092f2524c5..387fda6c28c 100644
--- a/arch/mips/pmc-sierra/Platform
+++ b/arch/mips/pmc-sierra/Platform
@@ -5,10 +5,3 @@ platform-$(CONFIG_PMC_MSP) += pmc-sierra/msp71xx/
cflags-$(CONFIG_PMC_MSP) += -I$(srctree)/arch/mips/include/asm/pmc-sierra/msp71xx \
-mno-branch-likely
load-$(CONFIG_PMC_MSP) += 0xffffffff80100000
-
-#
-# PMC-Sierra Yosemite
-#
-platform-$(CONFIG_PMC_YOSEMITE) += pmc-sierra/yosemite/
-cflags-$(CONFIG_PMC_YOSEMITE) += -I$(srctree)/arch/mips/include/asm/mach-yosemite
-load-$(CONFIG_PMC_YOSEMITE) += 0xffffffff80100000
diff --git a/arch/mips/pmc-sierra/yosemite/Makefile b/arch/mips/pmc-sierra/yosemite/Makefile
deleted file mode 100644
index 5af95ec3319..00000000000
--- a/arch/mips/pmc-sierra/yosemite/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the PMC-Sierra Titan
-#
-
-obj-y += irq.o prom.o py-console.o setup.o
-
-obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c
deleted file mode 100644
index d6f8bdff8cb..00000000000
--- a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2003 PMC-Sierra Inc.
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * Description:
- *
- * This code reads the ATMEL 24CXX EEPROM. The PMC-Sierra Yosemite board uses the ATMEL
- * 24C32/24C64 which uses two byte addressing as compared to 24C16. Note that this program
- * uses the serial port like /dev/ttyS0, to communicate with the EEPROM. Hence, you are
- * expected to have a connectivity from the EEPROM to the serial port. This program does
- * __not__ communicate using the I2C protocol
- */
-
-#include "atmel_read_eeprom.h"
-
-static void delay(int delay)
-{
- while (delay--);
-}
-
-static void send_bit(unsigned char bit)
-{
- scl_lo;
- delay(TXX);
- if (bit)
- sda_hi;
- else
- sda_lo;
-
- delay(TXX);
- scl_hi;
- delay(TXX);
-}
-
-static void send_ack(void)
-{
- send_bit(0);
-}
-
-static void send_byte(unsigned char byte)
-{
- int i = 0;
-
- for (i = 7; i >= 0; i--)
- send_bit((byte >> i) & 0x01);
-}
-
-static void send_start(void)
-{
- sda_hi;
- delay(TXX);
- scl_hi;
- delay(TXX);
- sda_lo;
- delay(TXX);
-}
-
-static void send_stop(void)
-{
- sda_lo;
- delay(TXX);
- scl_hi;
- delay(TXX);
- sda_hi;
- delay(TXX);
-}
-
-static void do_idle(void)
-{
- sda_hi;
- scl_hi;
- vcc_off;
-}
-
-static int recv_bit(void)
-{
- int status;
-
- scl_lo;
- delay(TXX);
- sda_hi;
- delay(TXX);
- scl_hi;
- delay(TXX);
-
- return 1;
-}
-
-static unsigned char recv_byte(void) {
- int i;
- unsigned char byte=0;
-
- for (i=7;i>=0;i--)
- byte |= (recv_bit() << i);
-
- return byte;
-}
-
-static int recv_ack(void)
-{
- unsigned int ack;
-
- ack = (unsigned int)recv_bit();
- scl_lo;
-
- if (ack) {
- do_idle();
- printk(KERN_ERR "Error reading the Atmel 24C32/24C64 EEPROM\n");
- return -1;
- }
-
- return ack;
-}
-
-/*
- * This function does the actual read of the EEPROM. It needs the buffer into which the
- * read data is copied, the size of the EEPROM being read and the buffer size
- */
-int read_eeprom(char *buffer, int eeprom_size, int size)
-{
- int i = 0, err;
-
- send_start();
- send_byte(W_HEADER);
- recv_ack();
-
- /* EEPROM with size of more than 2K need two byte addressing */
- if (eeprom_size > 2048) {
- send_byte(0x00);
- recv_ack();
- }
-
- send_start();
- send_byte(R_HEADER);
- err = recv_ack();
- if (err == -1)
- return err;
-
- for (i = 0; i < size; i++) {
- *buffer++ = recv_byte();
- send_ack();
- }
-
- /* Note : We should do some check if the buffer contains correct information */
-
- send_stop();
-}
diff --git a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.h b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.h
deleted file mode 100644
index d6c7ec469fa..00000000000
--- a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c
- *
- * Copyright (C) 2003 PMC-Sierra Inc.
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * Header file for atmel_read_eeprom.c
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <asm/pci.h>
-#include <asm/io.h>
-#include <linux/init.h>
-#include <asm/termios.h>
-#include <asm/ioctls.h>
-#include <linux/ioctl.h>
-#include <linux/fcntl.h>
-
-#define DEFAULT_PORT "/dev/ttyS0" /* Port to open */
-#define TXX 0 /* Dummy loop for spinning */
-
-#define BLOCK_SEL 0x00
-#define SLAVE_ADDR 0xa0
-#define READ_BIT 0x01
-#define WRITE_BIT 0x00
-#define R_HEADER SLAVE_ADDR + BLOCK_SEL + READ_BIT
-#define W_HEADER SLAVE_ADDR + BLOCK_SEL + WRITE_BIT
-
-/*
- * Clock, Voltages and Data
- */
-#define vcc_off (ioctl(fd, TIOCSBRK, 0))
-#define vcc_on (ioctl(fd, TIOCCBRK, 0))
-#define sda_hi (ioctl(fd, TIOCMBIS, &dtr))
-#define sda_lo (ioctl(fd, TIOCMBIC, &dtr))
-#define scl_lo (ioctl(fd, TIOCMBIC, &rts))
-#define scl_hi (ioctl(fd, TIOCMBIS, &rts))
-
-const char rts = TIOCM_RTS;
-const char dtr = TIOCM_DTR;
-int fd;
diff --git a/arch/mips/pmc-sierra/yosemite/ht-irq.c b/arch/mips/pmc-sierra/yosemite/ht-irq.c
deleted file mode 100644
index 62ead6601c6..00000000000
--- a/arch/mips/pmc-sierra/yosemite/ht-irq.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2003 PMC-Sierra
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/pci.h>
-
-/*
- * HT Bus fixup for the Titan
- * XXX IRQ values need to change based on the board layout
- */
-void __init titan_ht_pcibios_fixup_bus(struct pci_bus *bus)
-{
- /*
- * PLX and SPKT related changes go here
- */
-}
diff --git a/arch/mips/pmc-sierra/yosemite/ht.c b/arch/mips/pmc-sierra/yosemite/ht.c
deleted file mode 100644
index 14dc9c8fff0..00000000000
--- a/arch/mips/pmc-sierra/yosemite/ht.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Copyright 2003 PMC-Sierra
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <asm/pci.h>
-#include <asm/io.h>
-
-#include <linux/init.h>
-#include <asm/titan_dep.h>
-
-#ifdef CONFIG_HYPERTRANSPORT
-
-
-/*
- * This function check if the Hypertransport Link Initialization completed. If
- * it did, then proceed further with scanning bus #2
- */
-static __inline__ int check_titan_htlink(void)
-{
- u32 val;
-
- val = *(volatile uint32_t *)(RM9000x2_HTLINK_REG);
- if (val & 0x00000020)
- /* HT Link Initialization completed */
- return 1;
- else
- return 0;
-}
-
-static int titan_ht_config_read_dword(struct pci_dev *device,
- int offset, u32* val)
-{
- int dev, bus, func;
- uint32_t address_reg, data_reg;
- uint32_t address;
-
- bus = device->bus->number;
- dev = PCI_SLOT(device->devfn);
- func = PCI_FUNC(device->devfn);
-
- /* XXX Need to change the Bus # */
- if (bus > 2)
- address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) |
- 0x80000000 | 0x1;
- else
- address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000;
-
- address_reg = RM9000x2_OCD_HTCFGA;
- data_reg = RM9000x2_OCD_HTCFGD;
-
- RM9K_WRITE(address_reg, address);
- RM9K_READ(data_reg, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-static int titan_ht_config_read_word(struct pci_dev *device,
- int offset, u16* val)
-{
- int dev, bus, func;
- uint32_t address_reg, data_reg;
- uint32_t address;
-
- bus = device->bus->number;
- dev = PCI_SLOT(device->devfn);
- func = PCI_FUNC(device->devfn);
-
- /* XXX Need to change the Bus # */
- if (bus > 2)
- address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) |
- 0x80000000 | 0x1;
- else
- address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000;
-
- address_reg = RM9000x2_OCD_HTCFGA;
- data_reg = RM9000x2_OCD_HTCFGD;
-
- if ((offset & 0x3) == 0)
- offset = 0x2;
- else
- offset = 0x0;
-
- RM9K_WRITE(address_reg, address);
- RM9K_READ_16(data_reg + offset, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-u32 longswap(unsigned long l)
-{
- unsigned char b1, b2, b3, b4;
-
- b1 = l&255;
- b2 = (l>>8)&255;
- b3 = (l>>16)&255;
- b4 = (l>>24)&255;
-
- return ((b1<<24) + (b2<<16) + (b3<<8) + b4);
-}
-
-
-static int titan_ht_config_read_byte(struct pci_dev *device,
- int offset, u8* val)
-{
- int dev, bus, func;
- uint32_t address_reg, data_reg;
- uint32_t address;
- int offset1;
-
- bus = device->bus->number;
- dev = PCI_SLOT(device->devfn);
- func = PCI_FUNC(device->devfn);
-
- /* XXX Need to change the Bus # */
- if (bus > 2)
- address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) |
- 0x80000000 | 0x1;
- else
- address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000;
-
- address_reg = RM9000x2_OCD_HTCFGA;
- data_reg = RM9000x2_OCD_HTCFGD;
-
- RM9K_WRITE(address_reg, address);
-
- if ((offset & 0x3) == 0) {
- offset1 = 0x3;
- }
- if ((offset & 0x3) == 1) {
- offset1 = 0x2;
- }
- if ((offset & 0x3) == 2) {
- offset1 = 0x1;
- }
- if ((offset & 0x3) == 3) {
- offset1 = 0x0;
- }
- RM9K_READ_8(data_reg + offset1, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-static int titan_ht_config_write_dword(struct pci_dev *device,
- int offset, u8 val)
-{
- int dev, bus, func;
- uint32_t address_reg, data_reg;
- uint32_t address;
-
- bus = device->bus->number;
- dev = PCI_SLOT(device->devfn);
- func = PCI_FUNC(device->devfn);
-
- /* XXX Need to change the Bus # */
- if (bus > 2)
- address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) |
- 0x80000000 | 0x1;
- else
- address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000;
-
- address_reg = RM9000x2_OCD_HTCFGA;
- data_reg = RM9000x2_OCD_HTCFGD;
-
- RM9K_WRITE(address_reg, address);
- RM9K_WRITE(data_reg, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int titan_ht_config_write_word(struct pci_dev *device,
- int offset, u8 val)
-{
- int dev, bus, func;
- uint32_t address_reg, data_reg;
- uint32_t address;
-
- bus = device->bus->number;
- dev = PCI_SLOT(device->devfn);
- func = PCI_FUNC(device->devfn);
-
- /* XXX Need to change the Bus # */
- if (bus > 2)
- address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) |
- 0x80000000 | 0x1;
- else
- address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000;
-
- address_reg = RM9000x2_OCD_HTCFGA;
- data_reg = RM9000x2_OCD_HTCFGD;
-
- if ((offset & 0x3) == 0)
- offset = 0x2;
- else
- offset = 0x0;
-
- RM9K_WRITE(address_reg, address);
- RM9K_WRITE_16(data_reg + offset, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int titan_ht_config_write_byte(struct pci_dev *device,
- int offset, u8 val)
-{
- int dev, bus, func;
- uint32_t address_reg, data_reg;
- uint32_t address;
- int offset1;
-
- bus = device->bus->number;
- dev = PCI_SLOT(device->devfn);
- func = PCI_FUNC(device->devfn);
-
- /* XXX Need to change the Bus # */
- if (bus > 2)
- address = (bus << 16) | (dev << 11) | (func << 8) | (offset & 0xfc) |
- 0x80000000 | 0x1;
- else
- address = (dev << 11) | (func << 8) | (offset & 0xfc) | 0x80000000;
-
- address_reg = RM9000x2_OCD_HTCFGA;
- data_reg = RM9000x2_OCD_HTCFGD;
-
- RM9K_WRITE(address_reg, address);
-
- if ((offset & 0x3) == 0) {
- offset1 = 0x3;
- }
- if ((offset & 0x3) == 1) {
- offset1 = 0x2;
- }
- if ((offset & 0x3) == 2) {
- offset1 = 0x1;
- }
- if ((offset & 0x3) == 3) {
- offset1 = 0x0;
- }
-
- RM9K_WRITE_8(data_reg + offset1, val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-static void titan_pcibios_set_master(struct pci_dev *dev)
-{
- u16 cmd;
- int bus = dev->bus->number;
-
- if (check_titan_htlink())
- titan_ht_config_read_word(dev, PCI_COMMAND, &cmd);
-
- cmd |= PCI_COMMAND_MASTER;
-
- if (check_titan_htlink())
- titan_ht_config_write_word(dev, PCI_COMMAND, cmd);
-}
-
-
-int pcibios_enable_resources(struct pci_dev *dev)
-{
- u16 cmd, old_cmd;
- u8 tmp1;
- int idx;
- struct resource *r;
- int bus = dev->bus->number;
-
- if (check_titan_htlink())
- titan_ht_config_read_word(dev, PCI_COMMAND, &cmd);
-
- old_cmd = cmd;
- for (idx = 0; idx < 6; idx++) {
- r = &dev->resource[idx];
- if (!r->start && r->end) {
- printk(KERN_ERR
- "PCI: Device %s not available because of "
- "resource collisions\n", pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- if (cmd != old_cmd) {
- if (check_titan_htlink())
- titan_ht_config_write_word(dev, PCI_COMMAND, cmd);
- }
-
- if (check_titan_htlink())
- titan_ht_config_read_byte(dev, PCI_CACHE_LINE_SIZE, &tmp1);
-
- if (tmp1 != 8) {
- printk(KERN_WARNING "PCI setting cache line size to 8 from "
- "%d\n", tmp1);
- }
-
- if (check_titan_htlink())
- titan_ht_config_write_byte(dev, PCI_CACHE_LINE_SIZE, 8);
-
- if (check_titan_htlink())
- titan_ht_config_read_byte(dev, PCI_LATENCY_TIMER, &tmp1);
-
- if (tmp1 < 32 || tmp1 == 0xff) {
- printk(KERN_WARNING "PCI setting latency timer to 32 from %d\n",
- tmp1);
- }
-
- if (check_titan_htlink())
- titan_ht_config_write_byte(dev, PCI_LATENCY_TIMER, 32);
-
- return 0;
-}
-
-
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- return pcibios_enable_resources(dev);
-}
-
-resource_size_t pcibios_align_resource(void *data, const struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- struct pci_dev *dev = data;
- resource_size_t start = res->start;
-
- if (res->flags & IORESOURCE_IO) {
- /* We need to avoid collisions with `mirrored' VGA ports
- and other strange ISA hardware, so we always want the
- addresses kilobyte aligned. */
- if (size > 0x100) {
- printk(KERN_ERR "PCI: I/O Region %s/%d too large"
- " (%ld bytes)\n", pci_name(dev),
- dev->resource - res, size);
- }
-
- start = (start + 1024 - 1) & ~(1024 - 1);
- }
-
- return start;
-}
-
-struct pci_ops titan_pci_ops = {
- titan_ht_config_read_byte,
- titan_ht_config_read_word,
- titan_ht_config_read_dword,
- titan_ht_config_write_byte,
- titan_ht_config_write_word,
- titan_ht_config_write_dword
-};
-
-void __init pcibios_fixup_bus(struct pci_bus *c)
-{
- titan_ht_pcibios_fixup_bus(c);
-}
-
-void __init pcibios_init(void)
-{
-
- /* Reset PCI I/O and PCI MEM values */
- /* XXX Need to add the proper values here */
- ioport_resource.start = 0xe0000000;
- ioport_resource.end = 0xe0000000 + 0x20000000 - 1;
- iomem_resource.start = 0xc0000000;
- iomem_resource.end = 0xc0000000 + 0x20000000 - 1;
-
- /* XXX Need to add bus values */
- pci_scan_bus(2, &titan_pci_ops, NULL);
- pci_scan_bus(3, &titan_pci_ops, NULL);
-}
-
-unsigned __init int pcibios_assign_all_busses(void)
-{
- /* We want to use the PCI bus detection done by PMON */
- return 0;
-}
-
-#endif /* CONFIG_HYPERTRANSPORT */
diff --git a/arch/mips/pmc-sierra/yosemite/irq.c b/arch/mips/pmc-sierra/yosemite/irq.c
deleted file mode 100644
index 6590812daa5..00000000000
--- a/arch/mips/pmc-sierra/yosemite/irq.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (C) 2003 PMC-Sierra Inc.
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Second level Interrupt handlers for the PMC-Sierra Titan/Yosemite board
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/timex.h>
-#include <linux/random.h>
-#include <linux/bitops.h>
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/titan_dep.h>
-
-/* Hypertransport specific */
-#define IRQ_ACK_BITS 0x00000000 /* Ack bits */
-
-#define HYPERTRANSPORT_INTA 0x78 /* INTA# */
-#define HYPERTRANSPORT_INTB 0x79 /* INTB# */
-#define HYPERTRANSPORT_INTC 0x7a /* INTC# */
-#define HYPERTRANSPORT_INTD 0x7b /* INTD# */
-
-extern void titan_mailbox_irq(void);
-
-#ifdef CONFIG_HYPERTRANSPORT
-/*
- * Handle hypertransport & SMP interrupts. The interrupt lines are scarce.
- * For interprocessor interrupts, the best thing to do is to use the INTMSG
- * register. We use the same external interrupt line, i.e. INTB3 and monitor
- * another status bit
- */
-static void ll_ht_smp_irq_handler(int irq)
-{
- u32 status = OCD_READ(RM9000x2_OCD_INTP0STATUS4);
-
- /* Ack all the bits that correspond to the interrupt sources */
- if (status != 0)
- OCD_WRITE(RM9000x2_OCD_INTP0STATUS4, IRQ_ACK_BITS);
-
- status = OCD_READ(RM9000x2_OCD_INTP1STATUS4);
- if (status != 0)
- OCD_WRITE(RM9000x2_OCD_INTP1STATUS4, IRQ_ACK_BITS);
-
-#ifdef CONFIG_HT_LEVEL_TRIGGER
- /*
- * Level Trigger Mode only. Send the HT EOI message back to the source.
- */
- switch (status) {
- case 0x1000000:
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTA);
- break;
- case 0x2000000:
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTB);
- break;
- case 0x4000000:
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTC);
- break;
- case 0x8000000:
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTD);
- break;
- case 0x0000001:
- /* PLX */
- OCD_WRITE(RM9000x2_OCD_HTEOI, 0x20);
- OCD_WRITE(IRQ_CLEAR_REG, IRQ_ACK_BITS);
- break;
- case 0xf000000:
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTA);
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTB);
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTC);
- OCD_WRITE(RM9000x2_OCD_HTEOI, HYPERTRANSPORT_INTD);
- break;
- }
-#endif /* CONFIG_HT_LEVEL_TRIGGER */
-
- do_IRQ(irq);
-}
-#endif
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int cause = read_c0_cause();
- unsigned int status = read_c0_status();
- unsigned int pending = cause & status;
-
- if (pending & STATUSF_IP7) {
- do_IRQ(7);
- } else if (pending & STATUSF_IP2) {
-#ifdef CONFIG_HYPERTRANSPORT
- ll_ht_smp_irq_handler(2);
-#else
- do_IRQ(2);
-#endif
- } else if (pending & STATUSF_IP3) {
- do_IRQ(3);
- } else if (pending & STATUSF_IP4) {
- do_IRQ(4);
- } else if (pending & STATUSF_IP5) {
-#ifdef CONFIG_SMP
- titan_mailbox_irq();
-#else
- do_IRQ(5);
-#endif
- } else if (pending & STATUSF_IP6) {
- do_IRQ(4);
- }
-}
-
-/*
- * Initialize the next level interrupt handler
- */
-void __init arch_init_irq(void)
-{
- clear_c0_status(ST0_IM);
-
- mips_cpu_irq_init();
- rm7k_cpu_irq_init();
- rm9k_cpu_irq_init();
-}
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
deleted file mode 100644
index 6a2754c4f10..00000000000
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Copyright (C) 2003, 2004 PMC-Sierra Inc.
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- * Copyright (C) 2004 Ralf Baechle
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/pm.h>
-#include <linux/smp.h>
-
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <asm/smp-ops.h>
-#include <asm/bootinfo.h>
-#include <asm/pmon.h>
-
-#ifdef CONFIG_SMP
-extern void prom_grab_secondary(void);
-#else
-#define prom_grab_secondary() do { } while (0)
-#endif
-
-#include "setup.h"
-
-struct callvectors *debug_vectors;
-
-extern unsigned long yosemite_base;
-extern unsigned long cpu_clock_freq;
-
-const char *get_system_type(void)
-{
- return "PMC-Sierra Yosemite";
-}
-
-static void prom_cpu0_exit(void *arg)
-{
- void *nvram = (void *) YOSEMITE_RTC_BASE;
-
- /* Ask the NVRAM/RTC/watchdog chip to assert reset in 1/16 second */
- writeb(0x84, nvram + 0xff7);
-
- /* wait for the watchdog to go off */
- mdelay(100 + (1000 / 16));
-
- /* if the watchdog fails for some reason, let people know */
- printk(KERN_NOTICE "Watchdog reset failed\n");
-}
-
-/*
- * Reset the NVRAM over the local bus
- */
-static void prom_exit(void)
-{
-#ifdef CONFIG_SMP
- if (smp_processor_id())
- /* CPU 1 */
- smp_call_function(prom_cpu0_exit, NULL, 1);
-#endif
- prom_cpu0_exit(NULL);
-}
-
-/*
- * Halt the system
- */
-static void prom_halt(void)
-{
- printk(KERN_NOTICE "\n** You can safely turn off the power\n");
- while (1)
- __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0");
-}
-
-extern struct plat_smp_ops yos_smp_ops;
-
-/*
- * Init routine which accepts the variables from PMON
- */
-void __init prom_init(void)
-{
- int argc = fw_arg0;
- char **arg = (char **) fw_arg1;
- char **env = (char **) fw_arg2;
- struct callvectors *cv = (struct callvectors *) fw_arg3;
- int i = 0;
-
- /* Callbacks for halt, restart */
- _machine_restart = (void (*)(char *)) prom_exit;
- _machine_halt = prom_halt;
- pm_power_off = prom_halt;
-
- debug_vectors = cv;
- arcs_cmdline[0] = '\0';
-
- /* Get the boot parameters */
- for (i = 1; i < argc; i++) {
- if (strlen(arcs_cmdline) + strlen(arg[i]) + 1 >=
- sizeof(arcs_cmdline))
- break;
-
- strcat(arcs_cmdline, arg[i]);
- strcat(arcs_cmdline, " ");
- }
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- if ((strstr(arcs_cmdline, "console=ttyS")) == NULL)
- strcat(arcs_cmdline, "console=ttyS0,115200");
-#endif
-
- while (*env) {
- if (strncmp("ocd_base", *env, strlen("ocd_base")) == 0)
- yosemite_base =
- simple_strtol(*env + strlen("ocd_base="), NULL,
- 16);
-
- if (strncmp("cpuclock", *env, strlen("cpuclock")) == 0)
- cpu_clock_freq =
- simple_strtol(*env + strlen("cpuclock="), NULL,
- 10);
-
- env++;
- }
-
- prom_grab_secondary();
-
- register_smp_ops(&yos_smp_ops);
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
-
-void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
-{
-}
diff --git a/arch/mips/pmc-sierra/yosemite/py-console.c b/arch/mips/pmc-sierra/yosemite/py-console.c
deleted file mode 100644
index b7f1d9c4a8a..00000000000
--- a/arch/mips/pmc-sierra/yosemite/py-console.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001, 2002, 2004 Ralf Baechle
- */
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/termios.h>
-#include <linux/sched.h>
-#include <linux/tty.h>
-
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <asm/serial.h>
-#include <asm/io.h>
-
-/* SUPERIO uart register map */
-struct yo_uartregs {
- union {
- volatile u8 rbr; /* read only, DLAB == 0 */
- volatile u8 thr; /* write only, DLAB == 0 */
- volatile u8 dll; /* DLAB == 1 */
- } u1;
- union {
- volatile u8 ier; /* DLAB == 0 */
- volatile u8 dlm; /* DLAB == 1 */
- } u2;
- union {
- volatile u8 iir; /* read only */
- volatile u8 fcr; /* write only */
- } u3;
- volatile u8 iu_lcr;
- volatile u8 iu_mcr;
- volatile u8 iu_lsr;
- volatile u8 iu_msr;
- volatile u8 iu_scr;
-} yo_uregs_t;
-
-#define iu_rbr u1.rbr
-#define iu_thr u1.thr
-#define iu_dll u1.dll
-#define iu_ier u2.ier
-#define iu_dlm u2.dlm
-#define iu_iir u3.iir
-#define iu_fcr u3.fcr
-
-#define ssnop() __asm__ __volatile__("sll $0, $0, 1\n");
-#define ssnop_4() do { ssnop(); ssnop(); ssnop(); ssnop(); } while (0)
-
-#define IO_BASE_64 0x9000000000000000ULL
-
-static unsigned char readb_outer_space(unsigned long long phys)
-{
- unsigned long long vaddr = IO_BASE_64 | phys;
- unsigned char res;
- unsigned int sr;
-
- sr = read_c0_status();
- write_c0_status((sr | ST0_KX) & ~ ST0_IE);
- ssnop_4();
-
- __asm__ __volatile__ (
- " .set mips3 \n"
- " ld %0, %1 \n"
- " lbu %0, (%0) \n"
- " .set mips0 \n"
- : "=r" (res)
- : "m" (vaddr));
-
- write_c0_status(sr);
- ssnop_4();
-
- return res;
-}
-
-static void writeb_outer_space(unsigned long long phys, unsigned char c)
-{
- unsigned long long vaddr = IO_BASE_64 | phys;
- unsigned long tmp;
- unsigned int sr;
-
- sr = read_c0_status();
- write_c0_status((sr | ST0_KX) & ~ ST0_IE);
- ssnop_4();
-
- __asm__ __volatile__ (
- " .set mips3 \n"
- " ld %0, %1 \n"
- " sb %2, (%0) \n"
- " .set mips0 \n"
- : "=&r" (tmp)
- : "m" (vaddr), "r" (c));
-
- write_c0_status(sr);
- ssnop_4();
-}
-
-void prom_putchar(char c)
-{
- unsigned long lsr = 0xfd000008ULL + offsetof(struct yo_uartregs, iu_lsr);
- unsigned long thr = 0xfd000008ULL + offsetof(struct yo_uartregs, iu_thr);
-
- while ((readb_outer_space(lsr) & 0x20) == 0);
- writeb_outer_space(thr, c);
-}
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
deleted file mode 100644
index b6472fc88a9..00000000000
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright (C) 2003 PMC-Sierra Inc.
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- *
- * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/bcd.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/swap.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/termios.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/serial_8250.h>
-
-#include <asm/time.h>
-#include <asm/bootinfo.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <asm/serial.h>
-#include <asm/titan_dep.h>
-#include <asm/m48t37.h>
-
-#include "setup.h"
-
-unsigned char titan_ge_mac_addr_base[6] = {
- // 0x00, 0x03, 0xcc, 0x1d, 0x22, 0x00
- 0x00, 0xe0, 0x04, 0x00, 0x00, 0x21
-};
-
-unsigned long cpu_clock_freq;
-unsigned long yosemite_base;
-
-static struct m48t37_rtc *m48t37_base;
-
-void __init bus_error_init(void)
-{
- /* Do nothing */
-}
-
-
-void read_persistent_clock(struct timespec *ts)
-{
- unsigned int year, month, day, hour, min, sec;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- /* Stop the update to the time */
- m48t37_base->control = 0x40;
-
- year = bcd2bin(m48t37_base->year);
- year += bcd2bin(m48t37_base->century) * 100;
-
- month = bcd2bin(m48t37_base->month);
- day = bcd2bin(m48t37_base->date);
- hour = bcd2bin(m48t37_base->hour);
- min = bcd2bin(m48t37_base->min);
- sec = bcd2bin(m48t37_base->sec);
-
- /* Start the update to the time again */
- m48t37_base->control = 0x00;
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- ts->tv_sec = mktime(year, month, day, hour, min, sec);
- ts->tv_nsec = 0;
-}
-
-int rtc_mips_set_time(unsigned long tim)
-{
- struct rtc_time tm;
- unsigned long flags;
-
- /*
- * Convert to a more useful format -- note months count from 0
- * and years from 1900
- */
- rtc_time_to_tm(tim, &tm);
- tm.tm_year += 1900;
- tm.tm_mon += 1;
-
- spin_lock_irqsave(&rtc_lock, flags);
- /* enable writing */
- m48t37_base->control = 0x80;
-
- /* year */
- m48t37_base->year = bin2bcd(tm.tm_year % 100);
- m48t37_base->century = bin2bcd(tm.tm_year / 100);
-
- /* month */
- m48t37_base->month = bin2bcd(tm.tm_mon);
-
- /* day */
- m48t37_base->date = bin2bcd(tm.tm_mday);
-
- /* hour/min/sec */
- m48t37_base->hour = bin2bcd(tm.tm_hour);
- m48t37_base->min = bin2bcd(tm.tm_min);
- m48t37_base->sec = bin2bcd(tm.tm_sec);
-
- /* day of week -- not really used, but let's keep it up-to-date */
- m48t37_base->day = bin2bcd(tm.tm_wday + 1);
-
- /* disable writing */
- m48t37_base->control = 0x00;
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return 0;
-}
-
-void __init plat_time_init(void)
-{
- mips_hpt_frequency = cpu_clock_freq / 2;
-mips_hpt_frequency = 33000000 * 3 * 5;
-}
-
-unsigned long ocd_base;
-
-EXPORT_SYMBOL(ocd_base);
-
-/*
- * Common setup before any secondaries are started
- */
-
-#define TITAN_UART_CLK 3686400
-#define TITAN_SERIAL_BASE_BAUD (TITAN_UART_CLK / 16)
-#define TITAN_SERIAL_IRQ 4
-#define TITAN_SERIAL_BASE 0xfd000008UL
-
-static void __init py_map_ocd(void)
-{
- ocd_base = (unsigned long) ioremap(OCD_BASE, OCD_SIZE);
- if (!ocd_base)
- panic("Mapping OCD failed - game over. Your score is 0.");
-
- /* Kludge for PMON bug ... */
- OCD_WRITE(0x0710, 0x0ffff029);
-}
-
-static void __init py_uart_setup(void)
-{
-#ifdef CONFIG_SERIAL_8250
- struct uart_port up;
-
- /*
- * Register to interrupt zero because we share the interrupt with
- * the serial driver which we don't properly support yet.
- */
- memset(&up, 0, sizeof(up));
- up.membase = (unsigned char *) ioremap(TITAN_SERIAL_BASE, 8);
- up.irq = TITAN_SERIAL_IRQ;
- up.uartclk = TITAN_UART_CLK;
- up.regshift = 0;
- up.iotype = UPIO_MEM;
- up.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
- up.line = 0;
-
- if (early_serial_setup(&up))
- printk(KERN_ERR "Early serial init of port 0 failed\n");
-#endif /* CONFIG_SERIAL_8250 */
-}
-
-static void __init py_rtc_setup(void)
-{
- m48t37_base = ioremap(YOSEMITE_RTC_BASE, YOSEMITE_RTC_SIZE);
- if (!m48t37_base)
- printk(KERN_ERR "Mapping the RTC failed\n");
-}
-
-/* Not only time init but that's what the hook it's called through is named */
-static void __init py_late_time_init(void)
-{
- py_map_ocd();
- py_uart_setup();
- py_rtc_setup();
-}
-
-void __init plat_mem_setup(void)
-{
- late_time_init = py_late_time_init;
-
- /* Add memory regions */
- add_memory_region(0x00000000, 0x10000000, BOOT_MEM_RAM);
-
-#if 0 /* XXX Crash ... */
- OCD_WRITE(RM9000x2_OCD_HTSC,
- OCD_READ(RM9000x2_OCD_HTSC) | HYPERTRANSPORT_ENABLE);
-
- /* Set the BAR. Shifted mode */
- OCD_WRITE(RM9000x2_OCD_HTBAR0, HYPERTRANSPORT_BAR0_ADDR);
- OCD_WRITE(RM9000x2_OCD_HTMASK0, HYPERTRANSPORT_SIZE0);
-#endif
-}
diff --git a/arch/mips/pmc-sierra/yosemite/setup.h b/arch/mips/pmc-sierra/yosemite/setup.h
deleted file mode 100644
index 1a01abfc7d3..00000000000
--- a/arch/mips/pmc-sierra/yosemite/setup.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2003, 04 PMC-Sierra
- * Author: Manish Lachwani (lachwani@pmc-sierra.com)
- * Copyright 2004 Ralf Baechle <ralf@linux-mips.org>
- *
- * Board specific definititions for the PMC-Sierra Yosemite
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#ifndef __SETUP_H__
-#define __SETUP_H__
-
-/* M48T37 RTC + NVRAM */
-#define YOSEMITE_RTC_BASE 0xfc800000
-#define YOSEMITE_RTC_SIZE 0x00800000
-
-#define HYPERTRANSPORT_BAR0_ADDR 0x00000006
-#define HYPERTRANSPORT_SIZE0 0x0fffffff
-#define HYPERTRANSPORT_BAR0_ATTR 0x00002000
-
-#define HYPERTRANSPORT_ENABLE 0x6
-
-/*
- * EEPROM Size
- */
-#define TITAN_ATMEL_24C32_SIZE 32768
-#define TITAN_ATMEL_24C64_SIZE 65536
-
-#endif /* __SETUP_H__ */
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
deleted file mode 100644
index 5edab2bc6fc..00000000000
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ /dev/null
@@ -1,185 +0,0 @@
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-
-#include <asm/pmon.h>
-#include <asm/titan_dep.h>
-#include <asm/time.h>
-
-#define LAUNCHSTACK_SIZE 256
-
-static __cpuinitdata arch_spinlock_t launch_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-
-static unsigned long secondary_sp __cpuinitdata;
-static unsigned long secondary_gp __cpuinitdata;
-
-static unsigned char launchstack[LAUNCHSTACK_SIZE] __initdata
- __attribute__((aligned(2 * sizeof(long))));
-
-static void __init prom_smp_bootstrap(void)
-{
- local_irq_disable();
-
- while (arch_spin_is_locked(&launch_lock));
-
- __asm__ __volatile__(
- " move $sp, %0 \n"
- " move $gp, %1 \n"
- " j smp_bootstrap \n"
- :
- : "r" (secondary_sp), "r" (secondary_gp));
-}
-
-/*
- * PMON is a fragile beast. It'll blow up once the mappings it's littering
- * right into the middle of KSEG3 are blown away so we have to grab the slave
- * core early and keep it in a waiting loop.
- */
-void __init prom_grab_secondary(void)
-{
- arch_spin_lock(&launch_lock);
-
- pmon_cpustart(1, &prom_smp_bootstrap,
- launchstack + LAUNCHSTACK_SIZE, 0);
-}
-
-void titan_mailbox_irq(void)
-{
- int cpu = smp_processor_id();
- unsigned long status;
-
- switch (cpu) {
- case 0:
- status = OCD_READ(RM9000x2_OCD_INTP0STATUS3);
- OCD_WRITE(RM9000x2_OCD_INTP0CLEAR3, status);
-
- if (status & 0x2)
- smp_call_function_interrupt();
- if (status & 0x4)
- scheduler_ipi();
- break;
-
- case 1:
- status = OCD_READ(RM9000x2_OCD_INTP1STATUS3);
- OCD_WRITE(RM9000x2_OCD_INTP1CLEAR3, status);
-
- if (status & 0x2)
- smp_call_function_interrupt();
- if (status & 0x4)
- scheduler_ipi();
- break;
- }
-}
-
-/*
- * Send inter-processor interrupt
- */
-static void yos_send_ipi_single(int cpu, unsigned int action)
-{
- /*
- * Generate an INTMSG so that it can be sent over to the
- * destination CPU. The INTMSG will put the STATUS bits
- * based on the action desired. An alternative strategy
- * is to write to the Interrupt Set register, read the
- * Interrupt Status register and clear the Interrupt
- * Clear register. The latter is preffered.
- */
- switch (action) {
- case SMP_RESCHEDULE_YOURSELF:
- if (cpu == 1)
- OCD_WRITE(RM9000x2_OCD_INTP1SET3, 4);
- else
- OCD_WRITE(RM9000x2_OCD_INTP0SET3, 4);
- break;
-
- case SMP_CALL_FUNCTION:
- if (cpu == 1)
- OCD_WRITE(RM9000x2_OCD_INTP1SET3, 2);
- else
- OCD_WRITE(RM9000x2_OCD_INTP0SET3, 2);
- break;
- }
-}
-
-static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
-{
- unsigned int i;
-
- for_each_cpu(i, mask)
- yos_send_ipi_single(i, action);
-}
-
-/*
- * After we've done initial boot, this function is called to allow the
- * board code to clean up state, if needed
- */
-static void __cpuinit yos_init_secondary(void)
-{
-}
-
-static void __cpuinit yos_smp_finish(void)
-{
- set_c0_status(ST0_CO | ST0_IM | ST0_IE);
-}
-
-/* Hook for after all CPUs are online */
-static void yos_cpus_done(void)
-{
-}
-
-/*
- * Firmware CPU startup hook
- * Complicated by PMON's weird interface which tries to minimic the UNIX fork.
- * It launches the next * available CPU and copies some information on the
- * stack so the first thing we do is throw away that stuff and load useful
- * values into the registers ...
- */
-static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
-{
- unsigned long gp = (unsigned long) task_thread_info(idle);
- unsigned long sp = __KSTK_TOS(idle);
-
- secondary_sp = sp;
- secondary_gp = gp;
-
- arch_spin_unlock(&launch_lock);
-}
-
-/*
- * Detect available CPUs, populate cpu_possible_mask before smp_init
- *
- * We don't want to start the secondary CPU yet nor do we have a nice probing
- * feature in PMON so we just assume presence of the secondary core.
- */
-static void __init yos_smp_setup(void)
-{
- int i;
-
- init_cpu_possible(cpu_none_mask);
-
- for (i = 0; i < 2; i++) {
- set_cpu_possible(i, true);
- __cpu_number_map[i] = i;
- __cpu_logical_map[i] = i;
- }
-}
-
-static void __init yos_prepare_cpus(unsigned int max_cpus)
-{
- /*
- * Be paranoid. Enable the IPI only if we're really about to go SMP.
- */
- if (num_possible_cpus())
- set_c0_status(STATUSF_IP5);
-}
-
-struct plat_smp_ops yos_smp_ops = {
- .send_ipi_single = yos_send_ipi_single,
- .send_ipi_mask = yos_send_ipi_mask,
- .init_secondary = yos_init_secondary,
- .smp_finish = yos_smp_finish,
- .cpus_done = yos_cpus_done,
- .boot_secondary = yos_boot_secondary,
- .smp_setup = yos_smp_setup,
- .prepare_cpus = yos_prepare_cpus,
-};
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
index 1cf5abbef71..c6979353980 100644
--- a/arch/mips/powertv/init.c
+++ b/arch/mips/powertv/init.c
@@ -69,40 +69,6 @@ char *prom_getenv(char *envname)
return result;
}
-/* TODO: Verify on linux-mips mailing list that the following two */
-/* functions are correct */
-/* TODO: Copy NMI and EJTAG exception vectors to memory from the */
-/* BootROM exception vectors. Flush their cache entries. test it. */
-
-static void __init mips_nmi_setup(void)
-{
- void *base;
-#if defined(CONFIG_CPU_MIPS32_R1)
- base = cpu_has_veic ?
- (void *)(CAC_BASE + 0xa80) :
- (void *)(CAC_BASE + 0x380);
-#elif defined(CONFIG_CPU_MIPS32_R2)
- base = (void *)0xbfc00000;
-#else
-#error NMI exception handler address not defined
-#endif
-}
-
-static void __init mips_ejtag_setup(void)
-{
- void *base;
-
-#if defined(CONFIG_CPU_MIPS32_R1)
- base = cpu_has_veic ?
- (void *)(CAC_BASE + 0xa00) :
- (void *)(CAC_BASE + 0x300);
-#elif defined(CONFIG_CPU_MIPS32_R2)
- base = (void *)0xbfc00480;
-#else
-#error EJTAG exception handler address not defined
-#endif
-}
-
void __init prom_init(void)
{
int prom_argc;
@@ -113,9 +79,6 @@ void __init prom_init(void)
_prom_envp = (int *) fw_arg2;
_prom_memsize = (unsigned long) fw_arg3;
- board_nmi_handler_setup = mips_nmi_setup;
- board_ejtag_handler_setup = mips_ejtag_setup;
-
if (prom_argc == 1) {
strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
strlcat(arcs_cmdline, prom_argv, COMMAND_LINE_SIZE);
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index d7c26d00cfe..a757ded437c 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -72,12 +72,11 @@ void __init prom_setup_cmdline(void)
static char cmd_line[COMMAND_LINE_SIZE] __initdata;
char *cp, *board;
int prom_argc;
- char **prom_argv, **prom_envp;
+ char **prom_argv;
int i;
prom_argc = fw_arg0;
prom_argv = (char **) fw_arg1;
- prom_envp = (char **) fw_arg2;
cp = cmd_line;
/* Note: it is common that parameters start
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index da44ccb2082..4a6057b35b9 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -73,12 +73,10 @@ static char __init *decode_eisa_sig(unsigned long addr)
static irqreturn_t ip22_eisa_intr(int irq, void *dev_id)
{
- u8 eisa_irq;
- u8 dma1, dma2;
+ u8 eisa_irq = inb(EIU_INTRPT_ACK);
- eisa_irq = inb(EIU_INTRPT_ACK);
- dma1 = inb(EISA_DMA1_STATUS);
- dma2 = inb(EISA_DMA2_STATUS);
+ inb(EISA_DMA1_STATUS);
+ inb(EISA_DMA2_STATUS);
if (eisa_irq < EISA_MAX_IRQ) {
do_IRQ(eisa_irq);
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index 3cd937e0e9a..01cc1a749c7 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -74,7 +74,7 @@ config SIBYTE_SB1xxx_SOC
select SWAP_IO_SPACE
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
- select CFE
+ select FW_CFE
select SYS_HAS_EARLY_PRINTK
choice
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 413f17f8e89..d6c7bd4b5ab 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -15,12 +15,12 @@
#include <linux/fb.h>
#include <linux/screen_info.h>
-#ifdef CONFIG_ARC
+#ifdef CONFIG_FW_ARC
#include <asm/fw/arc/types.h>
#include <asm/sgialib.h>
#endif
-#ifdef CONFIG_SNIPROM
+#ifdef CONFIG_FW_SNIPROM
#include <asm/mipsprom.h>
#endif
@@ -37,7 +37,7 @@ extern void sni_machine_power_off(void);
static void __init sni_display_setup(void)
{
-#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_ARC)
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC)
struct screen_info *si = &screen_info;
DISPLAY_STATUS *di;
@@ -56,7 +56,7 @@ static void __init sni_display_setup(void)
static void __init sni_console_setup(void)
{
-#ifndef CONFIG_ARC
+#ifndef CONFIG_FW_ARC
char *ctype;
char *cdev;
char *baud;
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 4efd9185f29..b14ee53581a 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -341,7 +341,7 @@ static void __devinit quirk_slc90e66_ide(struct pci_dev *dev)
static void __devinit tc35815_fixup(struct pci_dev *dev)
{
- /* This device may have PM registers but not they are not suported. */
+ /* This device may have PM registers but not they are not supported. */
if (dev->pm_cap) {
dev_info(&dev->dev, "PM disabled\n");
dev->pm_cap = 0;
diff --git a/arch/mips/wrppmc/pci.c b/arch/mips/wrppmc/pci.c
index d06192faeb7..8b8a0e1a40c 100644
--- a/arch/mips/wrppmc/pci.c
+++ b/arch/mips/wrppmc/pci.c
@@ -38,10 +38,8 @@ static struct pci_controller hose_0 = {
static int __init gt64120_pci_init(void)
{
- u32 tmp;
-
- tmp = GT_READ(GT_PCI0_CMD_OFS); /* Huh??? -- Ralf */
- tmp = GT_READ(GT_PCI0_BARE_OFS);
+ (void) GT_READ(GT_PCI0_CMD_OFS); /* Huh??? -- Ralf */
+ (void) GT_READ(GT_PCI0_BARE_OFS);
/* reset the whole PCI I/O space range */
ioport_resource.start = GT_PCI_IO_BASE;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 72471744a91..aa03f2e1338 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,8 +8,6 @@ config MN10300
select HAVE_ARCH_KGDB
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
select GENERIC_CLOCKEVENTS
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select MODULES_USE_ELF_RELA
config AM33_2
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index cabf8ba73b2..e6d2ed4ba68 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -43,7 +43,6 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/mn10300/include/uapi/asm/signal.h b/arch/mn10300/include/uapi/asm/signal.h
index 08dcd6a8561..f423a08d7ee 100644
--- a/arch/mn10300/include/uapi/asm/signal.h
+++ b/arch/mn10300/include/uapi/asm/signal.h
@@ -92,12 +92,6 @@ typedef unsigned long sigset_t;
#define SA_RESTORER 0x04000000
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 4ebf117c328..bd9ada693f9 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -95,7 +95,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
* checks at dup_mmap(), exec(), and other mmlist addition points
* could be used. The locking scheme was chosen on the basis of
* manfred's recommendations and having no core impact whatsoever.
- * -- wli
+ * -- nyc
*/
DEFINE_SPINLOCK(pgd_lock);
struct page *pgd_list;
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index e7f1a2993f7..0ac66f67521 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -22,8 +22,6 @@ config OPENRISC
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
config MMU
def_bool y
@@ -146,7 +144,7 @@ config DEBUG_STACKOVERFLOW
help
Make extra checks for space available on stack in some
critical functions. This will cause kernel to run a bit slower,
- but will catch most of kernel stack overruns and exit gracefuly.
+ but will catch most of kernel stack overruns and exit gracefully.
Say Y if you are unsure.
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 8971026e1c6..f20d01d9aaf 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -32,6 +32,7 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
+generic-y += kvm_para.h
generic-y += local.h
generic-y += mman.h
generic-y += module.h
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
index 07f5299d6c2..7c691399da3 100644
--- a/arch/openrisc/include/asm/io.h
+++ b/arch/openrisc/include/asm/io.h
@@ -30,6 +30,7 @@
#define PIO_MASK 0
#include <asm-generic/io.h>
+#include <asm/pgtable.h>
extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size,
pgprot_t prot);
diff --git a/arch/openrisc/include/uapi/asm/kvm_para.h b/arch/openrisc/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b95..00000000000
--- a/arch/openrisc/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index 5082b806632..ce40b71df00 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -20,7 +20,6 @@
#define sys_mmap2 sys_mmap_pgoff
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
index 1a242a0d758..ddb73685586 100644
--- a/arch/openrisc/kernel/asm-offsets.c
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -34,15 +34,11 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/thread_info.h>
+#include <linux/kbuild.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
int main(void)
{
/* offsets into the task_struct */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index e688a2be30f..b77feffbade 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -22,8 +22,6 @@ config PARISC
select GENERIC_STRNCPY_FROM_USER
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select CLONE_BACKWARDS
help
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 1efef41659c..3043194547c 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -163,7 +163,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index b1ddaa24337..a2fa297196b 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -71,12 +71,6 @@
#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 5e34ccf39a4..2a625fb063e 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -214,8 +214,6 @@ static inline int reassemble_22(int as22)
void *module_alloc(unsigned long size)
{
- if (size == 0)
- return NULL;
/* using RWX means less protection for modules, but it's
* easier than trying to map the text, data, init_text and
* init_data correctly */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 951a517a1a0..17903f1f356 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -141,10 +141,8 @@ config PPC
select GENERIC_CLOCKEVENTS
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
- select GENERIC_KERNEL_THREAD
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_EXECVE
select CLONE_BACKWARDS
config EARLY_PRINTK
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 159e94f4b22..b639852116f 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -181,7 +181,7 @@ $(BOOT_TARGETS2): vmlinux
bootwrapper_install:
$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-%.dtb:
+%.dtb: scripts
$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
define archhelp
diff --git a/arch/powerpc/boot/dts/a3m071.dts b/arch/powerpc/boot/dts/a3m071.dts
new file mode 100644
index 00000000000..877a28cb77e
--- /dev/null
+++ b/arch/powerpc/boot/dts/a3m071.dts
@@ -0,0 +1,144 @@
+/*
+ * a3m071 board Device Tree Source
+ *
+ * Copyright 2012 Stefan Roese <sr@denx.de>
+ *
+ * Copyright (C) 2011 DENX Software Engineering GmbH
+ * Heiko Schocher <hs@denx.de>
+ *
+ * Copyright (C) 2007 Semihalf
+ * Marian Balakowicz <m8@semihalf.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "mpc5200b.dtsi"
+
+/ {
+ model = "anonymous,a3m071";
+ compatible = "anonymous,a3m071";
+
+ soc5200@f0000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc5200b-immr";
+ ranges = <0 0xf0000000 0x0000c000>;
+ reg = <0xf0000000 0x00000100>;
+ bus-frequency = <0>; /* From boot loader */
+ system-frequency = <0>; /* From boot loader */
+
+ timer@600 {
+ fsl,has-wdt;
+ };
+
+ spi@f00 {
+ status = "disabled";
+ };
+
+ usb: usb@1000 {
+ status = "disabled";
+ };
+
+ psc@2000 {
+ compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ reg = <0x2000 0x100>;
+ interrupts = <2 1 0>;
+ };
+
+ psc@2200 {
+ status = "disabled";
+ };
+
+ psc@2400 {
+ status = "disabled";
+ };
+
+ psc@2600 {
+ status = "disabled";
+ };
+
+ psc@2800 {
+ status = "disabled";
+ };
+
+ psc@2c00 { // PSC6
+ compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ reg = <0x2c00 0x100>;
+ interrupts = <2 4 0>;
+ };
+
+ ethernet@3000 {
+ phy-handle = <&phy0>;
+ };
+
+ mdio@3000 {
+ phy0: ethernet-phy@3 {
+ reg = <0x03>;
+ };
+ };
+
+ ata@3a00 {
+ status = "disabled";
+ };
+
+ i2c@3d00 {
+ status = "disabled";
+ };
+
+ i2c@3d40 {
+ status = "disabled";
+ };
+ };
+
+ localbus {
+ compatible = "fsl,mpc5200b-lpb","simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0xfc000000 0x02000000
+ 3 0 0xe9000000 0x00080000
+ 5 0 0xe8000000 0x00010000>;
+
+ flash@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0 0x0 0x02000000>;
+ compatible = "cfi-flash";
+ bank-width = <2>;
+ partition@0x0 {
+ label = "u-boot";
+ reg = <0x00000000 0x00040000>;
+ read-only;
+ };
+ partition@0x00040000 {
+ label = "env";
+ reg = <0x00040000 0x00020000>;
+ };
+ partition@0x00060000 {
+ label = "dtb";
+ reg = <0x00060000 0x00020000>;
+ };
+ partition@0x00080000 {
+ label = "kernel";
+ reg = <0x00080000 0x00500000>;
+ };
+ partition@0x00580000 {
+ label = "root";
+ reg = <0x00580000 0x00A80000>;
+ };
+ };
+
+ fpga@3,0 {
+ compatible = "anonymous,a3m071-fpga";
+ reg = <3 0x0 0x00080000
+ 5 0x0 0x00010000>;
+ interrupts = <0 0 3>; /* level low */
+ };
+ };
+
+ pci@f0000d00 {
+ status = "disabled";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 64b6abea846..5d7205b7bb0 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -354,4 +354,5 @@
/include/ "qoriq-sata2-0.dtsi"
/include/ "qoriq-sata2-1.dtsi"
/include/ "qoriq-sec4.2-0.dtsi"
+/include/ "qoriq-raid1.0-0.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
index 0a198b0a77e..8df47fc45ab 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
@@ -73,6 +73,12 @@
rtic_c = &rtic_c;
rtic_d = &rtic_d;
sec_mon = &sec_mon;
+
+ raideng = &raideng;
+ raideng_jr0 = &raideng_jr0;
+ raideng_jr1 = &raideng_jr1;
+ raideng_jr2 = &raideng_jr2;
+ raideng_jr3 = &raideng_jr3;
};
cpus {
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-raid1.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-raid1.0-0.dtsi
new file mode 100644
index 00000000000..8d2e8aa6cf8
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-raid1.0-0.dtsi
@@ -0,0 +1,85 @@
+/*
+ * QorIQ RAID 1.0 device tree stub [ controller @ offset 0x320000 ]
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+raideng: raideng@320000 {
+ compatible = "fsl,raideng-v1.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x320000 0x10000>;
+ ranges = <0 0x320000 0x10000>;
+
+ raideng_jq0@1000 {
+ compatible = "fsl,raideng-v1.0-job-queue";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1000 0x1000>;
+ ranges = <0x0 0x1000 0x1000>;
+
+ raideng_jr0: jr@0 {
+ compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring";
+ reg = <0x0 0x400>;
+ interrupts = <139 2 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ raideng_jr1: jr@400 {
+ compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring";
+ reg = <0x400 0x400>;
+ interrupts = <140 2 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+ };
+
+ raideng_jq1@2000 {
+ compatible = "fsl,raideng-v1.0-job-queue";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x2000 0x1000>;
+ ranges = <0x0 0x2000 0x1000>;
+
+ raideng_jr2: jr@0 {
+ compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring";
+ reg = <0x0 0x400>;
+ interrupts = <141 2 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ raideng_jr3: jr@400 {
+ compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring";
+ reg = <0x400 0x400>;
+ interrupts = <142 2 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+ };
+};
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 1f710a32ffa..5b8e1e50827 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -2,7 +2,7 @@ CONFIG_PPC64=y
CONFIG_ALTIVEC=y
CONFIG_VSX=y
CONFIG_SMP=y
-CONFIG_NR_CPUS=1024
+CONFIG_NR_CPUS=2048
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 2d62b484b3f..650757c300d 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,5 +1,4 @@
-
generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index dc2cf9c6d9e..ef918a2328b 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -52,8 +52,6 @@
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
/* Macro for generating the ***_bits() functions */
@@ -83,22 +81,22 @@ DEFINE_BITOP(change_bits, xor, "", "")
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
- set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
{
- clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
{
- clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
}
static __inline__ void change_bit(int nr, volatile unsigned long *addr)
{
- change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+ change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
@@ -136,26 +134,26 @@ DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
static __inline__ int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+ return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
static __inline__ int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_set_bits_lock(BITOP_MASK(nr),
- addr + BITOP_WORD(nr)) != 0;
+ return test_and_set_bits_lock(BIT_MASK(nr),
+ addr + BIT_WORD(nr)) != 0;
}
static __inline__ int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+ return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
static __inline__ int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+ return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
#include <asm-generic/bitops/non-atomic.h>
@@ -280,61 +278,8 @@ unsigned long __arch_hweight64(__u64 w);
#include <asm-generic/bitops/find.h>
/* Little-endian versions */
+#include <asm-generic/bitops/le.h>
-static __inline__ int test_bit_le(unsigned long nr,
- __const__ void *addr)
-{
- __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
- return (tmp[nr >> 3] >> (nr & 7)) & 1;
-}
-
-static inline void set_bit_le(int nr, void *addr)
-{
- set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline void clear_bit_le(int nr, void *addr)
-{
- clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline void __set_bit_le(int nr, void *addr)
-{
- __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline void __clear_bit_le(int nr, void *addr)
-{
- __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline int test_and_set_bit_le(int nr, void *addr)
-{
- return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline int test_and_clear_bit_le(int nr, void *addr)
-{
- return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline int __test_and_set_bit_le(int nr, void *addr)
-{
- return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline int __test_and_clear_bit_le(int nr, void *addr)
-{
- return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
-
-unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
/* Bitmap functions for the ext2 filesystem */
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 21a0687b8c4..76f81bd64f1 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -401,6 +401,14 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
+#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -421,8 +429,8 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
- CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
- CPU_FTR_VSX)
+ CPU_FTRS_POWER7 | CPU_FTRS_POWER8 | CPU_FTRS_CELL | \
+ CPU_FTRS_PA6T | CPU_FTR_VSX)
#endif
#else
enum {
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 154c067761b..607e4eeeb69 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2009 Freescale Semicondutor, Inc.
+ * Copyright 2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 78160874809..e27e9ad6818 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -172,6 +172,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dma_addr);
if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index bf2c06c3387..d3d634274d2 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -50,64 +50,13 @@
#ifndef _EPAPR_HCALLS_H
#define _EPAPR_HCALLS_H
+#include <uapi/asm/epapr_hcalls.h>
+
+#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
-#define EV_BYTE_CHANNEL_SEND 1
-#define EV_BYTE_CHANNEL_RECEIVE 2
-#define EV_BYTE_CHANNEL_POLL 3
-#define EV_INT_SET_CONFIG 4
-#define EV_INT_GET_CONFIG 5
-#define EV_INT_SET_MASK 6
-#define EV_INT_GET_MASK 7
-#define EV_INT_IACK 9
-#define EV_INT_EOI 10
-#define EV_INT_SEND_IPI 11
-#define EV_INT_SET_TASK_PRIORITY 12
-#define EV_INT_GET_TASK_PRIORITY 13
-#define EV_DOORBELL_SEND 14
-#define EV_MSGSND 15
-#define EV_IDLE 16
-
-/* vendor ID: epapr */
-#define EV_LOCAL_VENDOR_ID 0 /* for private use */
-#define EV_EPAPR_VENDOR_ID 1
-#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
-#define EV_IBM_VENDOR_ID 3 /* IBM */
-#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
-#define EV_ENEA_VENDOR_ID 5 /* Enea */
-#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
-#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
-#define EV_KVM_VENDOR_ID 42 /* KVM */
-
-/* The max number of bytes that a byte channel can send or receive per call */
-#define EV_BYTE_CHANNEL_MAX_BYTES 16
-
-
-#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
-#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
-
-/* epapr error codes */
-#define EV_EPERM 1 /* Operation not permitted */
-#define EV_ENOENT 2 /* Entry Not Found */
-#define EV_EIO 3 /* I/O error occured */
-#define EV_EAGAIN 4 /* The operation had insufficient
- * resources to complete and should be
- * retried
- */
-#define EV_ENOMEM 5 /* There was insufficient memory to
- * complete the operation */
-#define EV_EFAULT 6 /* Bad guest address */
-#define EV_ENODEV 7 /* No such device */
-#define EV_EINVAL 8 /* An argument supplied to the hcall
- was out of range or invalid */
-#define EV_INTERNAL 9 /* An internal error occured */
-#define EV_CONFIG 10 /* A configuration error was detected */
-#define EV_INVALID_STATE 11 /* The object is in an invalid state */
-#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
-#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
-
/*
* Hypercall register clobber list
*
@@ -193,7 +142,7 @@ static inline unsigned int ev_int_set_config(unsigned int interrupt,
r5 = priority;
r6 = destination;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
: : EV_HCALL_CLOBBERS4
);
@@ -222,7 +171,7 @@ static inline unsigned int ev_int_get_config(unsigned int interrupt,
r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
r3 = interrupt;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
: : EV_HCALL_CLOBBERS4
);
@@ -252,7 +201,7 @@ static inline unsigned int ev_int_set_mask(unsigned int interrupt,
r3 = interrupt;
r4 = mask;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -277,7 +226,7 @@ static inline unsigned int ev_int_get_mask(unsigned int interrupt,
r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
r3 = interrupt;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -305,7 +254,7 @@ static inline unsigned int ev_int_eoi(unsigned int interrupt)
r11 = EV_HCALL_TOKEN(EV_INT_EOI);
r3 = interrupt;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -344,7 +293,7 @@ static inline unsigned int ev_byte_channel_send(unsigned int handle,
r7 = be32_to_cpu(p[2]);
r8 = be32_to_cpu(p[3]);
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3),
"+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
: : EV_HCALL_CLOBBERS6
@@ -383,7 +332,7 @@ static inline unsigned int ev_byte_channel_receive(unsigned int handle,
r3 = handle;
r4 = *count;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4),
"=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
: : EV_HCALL_CLOBBERS6
@@ -421,7 +370,7 @@ static inline unsigned int ev_byte_channel_poll(unsigned int handle,
r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
r3 = handle;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
: : EV_HCALL_CLOBBERS3
);
@@ -454,7 +403,7 @@ static inline unsigned int ev_int_iack(unsigned int handle,
r11 = EV_HCALL_TOKEN(EV_INT_IACK);
r3 = handle;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -478,7 +427,7 @@ static inline unsigned int ev_doorbell_send(unsigned int handle)
r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
r3 = handle;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -498,12 +447,12 @@ static inline unsigned int ev_idle(void)
r11 = EV_HCALL_TOKEN(EV_IDLE);
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "=r" (r3)
: : EV_HCALL_CLOBBERS1
);
return r3;
}
-
-#endif
+#endif /* !__ASSEMBLY__ */
+#endif /* _EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index a43c1473915..ad708dda3ba 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -48,6 +48,35 @@
#define EX_LR 72
#define EX_CFAR 80
+#ifdef CONFIG_RELOCATABLE
+#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ ld r12,PACAKBASE(r13); /* get high part of &label */ \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label); \
+ mtlr r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ li r10,MSR_RI; \
+ mtmsrd r10,1; /* Set RI (EE=0) */ \
+ blr;
+#else
+/* If not relocatable, we can jump directly -- and save messing with LR */
+#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ li r10,MSR_RI; \
+ mtmsrd r10,1; /* Set RI (EE=0) */ \
+ b label;
+#endif
+
+/*
+ * As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on
+ * so no need to rfid. Save lr in case we're CONFIG_RELOCATABLE, in which
+ * case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr.
+ */
+#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
+
/*
* We're short on space and time in the exception prolog, so we can't
* use the normal SET_REG_IMMEDIATE macro. Normally we just need the
@@ -55,12 +84,29 @@
* word.
*/
#define LOAD_HANDLER(reg, label) \
- addi reg,reg,(label)-_stext; /* virt addr of handler ... */
+ /* Handlers must be within 64K of kbase, which must be 64k aligned */ \
+ ori reg,reg,(label)-_stext; /* virt addr of handler ... */
/* Exception register prefixes */
#define EXC_HV H
#define EXC_STD
+#if defined(CONFIG_RELOCATABLE)
+/*
+ * If we support interrupts with relocation on AND we're a relocatable
+ * kernel, we need to use LR to get to the 2nd level handler. So, save/restore
+ * it when required.
+ */
+#define SAVE_LR(reg, area) mflr reg ; std reg,area+EX_LR(r13)
+#define GET_LR(reg, area) ld reg,area+EX_LR(r13)
+#define RESTORE_LR(reg, area) ld reg,area+EX_LR(r13) ; mtlr reg
+#else
+/* ...else LR is unused and in register. */
+#define SAVE_LR(reg, area)
+#define GET_LR(reg, area) mflr reg
+#define RESTORE_LR(reg, area)
+#endif
+
#define __EXCEPTION_PROLOG_1(area, extra, vec) \
GET_PACA(r13); \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
@@ -69,6 +115,7 @@
mfspr r10,SPRN_CFAR; \
std r10,area+EX_CFAR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ SAVE_LR(r10, area); \
mfcr r9; \
extra(vec); \
std r11,area+EX_R11(r13); \
@@ -169,6 +216,7 @@ do_kvm_##n: \
sth r1,PACA_TRAP_SAVE(r13); \
std r3,area+EX_R3(r13); \
addi r3,r13,area; /* r3 -> where regs are saved*/ \
+ RESTORE_LR(r1, area); \
b bad_stack; \
3: std r9,_CCR(r1); /* save CR in stackframe */ \
std r11,_NIP(r1); /* save SRR0 in stackframe */ \
@@ -194,8 +242,8 @@ do_kvm_##n: \
ld r10,area+EX_CFAR(r13); \
std r10,ORIG_GPR3(r1); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ GET_LR(r9,area); /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
- mflr r9; /* save LR in stackframe */ \
std r9,_LINK(r1); \
mfctr r10; /* save CTR in stackframe */ \
std r10,_CTR(r1); \
@@ -232,6 +280,26 @@ label##_hv: \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_HV, KVMTEST, vec)
+#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ HMT_MEDIUM; \
+ /* No guest interrupts come through here */ \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_STD, KVMTEST_PR, vec)
+
+#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ HMT_MEDIUM; \
+ /* No guest interrupts come through here */ \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_HV, KVMTEST, vec)
+
/* This associate vector numbers with bits in paca->irq_happened */
#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
@@ -257,6 +325,9 @@ label##_hv: \
KVMTEST(vec); \
_SOFTEN_TEST(EXC_STD, vec)
+#define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec)
+#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
+
#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
HMT_MEDIUM; \
SET_SCRATCH0(r13); /* save r13 */ \
@@ -279,6 +350,28 @@ label##_hv: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_TEST_HV)
+#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
+ HMT_MEDIUM; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h);
+#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
+ __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra)
+
+#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
+ EXC_STD, SOFTEN_NOTEST_PR)
+
+#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
+ EXC_HV, SOFTEN_NOTEST_HV)
+
/*
* Our exception common code can be passed various "additions"
* to specify the behaviour of interrupts, whether to kick the
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index ad0b751b0d7..973cc3be011 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -49,6 +49,7 @@
#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
+#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
#ifndef __ASSEMBLY__
@@ -62,7 +63,8 @@ enum {
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
- FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO,
+ FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
+ FW_FEATURE_SET_MODE,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
FW_FEATURE_POWERNV_ALWAYS = 0,
diff --git a/arch/powerpc/include/asm/fsl_gtm.h b/arch/powerpc/include/asm/fsl_gtm.h
index 8e8c9b5032d..3b05808f9ca 100644
--- a/arch/powerpc/include/asm/fsl_gtm.h
+++ b/arch/powerpc/include/asm/fsl_gtm.h
@@ -1,7 +1,7 @@
/*
* Freescale General-purpose Timers Module
*
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright 2006 Freescale Semiconductor, Inc.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) MontaVista Software, Inc. 2008.
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index dd5ba2c2277..77ced0b3d81 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -71,7 +71,9 @@ struct ccsr_guts {
u8 res0c4[0x224 - 0xc4];
__be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
__be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
- u8 res22c[0x800 - 0x22c];
+ u8 res22c[0x604 - 0x22c];
+ __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
+ u8 res608[0x800 - 0x608];
__be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
u8 res804[0x900 - 0x804];
__be32 ircr; /* 0x.0900 - Infrared Control Register */
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
index 922d9b5fe3d..3abb58394da 100644
--- a/arch/powerpc/include/asm/fsl_hcalls.h
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -96,7 +96,7 @@ static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
r3 = vcpu_mask;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -151,7 +151,7 @@ static inline unsigned int fh_partition_get_dtprop(int handle,
r9 = (uint32_t)propvalue_addr;
r10 = *propvalue_len;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11),
"+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
"+r" (r8), "+r" (r9), "+r" (r10)
@@ -205,7 +205,7 @@ static inline unsigned int fh_partition_set_dtprop(int handle,
r9 = (uint32_t)propvalue_addr;
r10 = propvalue_len;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11),
"+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
"+r" (r8), "+r" (r9), "+r" (r10)
@@ -229,7 +229,7 @@ static inline unsigned int fh_partition_restart(unsigned int partition)
r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
r3 = partition;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -262,7 +262,7 @@ static inline unsigned int fh_partition_get_status(unsigned int partition,
r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
r3 = partition;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -295,7 +295,7 @@ static inline unsigned int fh_partition_start(unsigned int partition,
r4 = entry_point;
r5 = load;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
: : EV_HCALL_CLOBBERS3
);
@@ -317,7 +317,7 @@ static inline unsigned int fh_partition_stop(unsigned int partition)
r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
r3 = partition;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -376,7 +376,7 @@ static inline unsigned int fh_partition_memcpy(unsigned int source,
#endif
r7 = count;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11),
"+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
: : EV_HCALL_CLOBBERS5
@@ -399,7 +399,7 @@ static inline unsigned int fh_dma_enable(unsigned int liodn)
r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
r3 = liodn;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -421,7 +421,7 @@ static inline unsigned int fh_dma_disable(unsigned int liodn)
r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
r3 = liodn;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -447,7 +447,7 @@ static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
r3 = interrupt;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -469,7 +469,7 @@ static inline unsigned int fh_system_reset(void)
r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "=r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -506,7 +506,7 @@ static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
r6 = addr_lo;
r7 = peek;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
"+r" (r7)
: : EV_HCALL_CLOBBERS5
@@ -542,7 +542,7 @@ static inline unsigned int fh_get_core_state(unsigned int handle,
r3 = handle;
r4 = vcpu;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -572,7 +572,7 @@ static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
r3 = handle;
r4 = vcpu;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -597,7 +597,7 @@ static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
r3 = handle;
r4 = vcpu;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4)
: : EV_HCALL_CLOBBERS2
);
@@ -618,7 +618,7 @@ static inline unsigned int fh_claim_device(unsigned int handle)
r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
r3 = handle;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
@@ -645,7 +645,7 @@ static inline unsigned int fh_partition_stop_dma(unsigned int handle)
r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
r3 = handle;
- __asm__ __volatile__ ("sc 1"
+ asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 7a867065db7..0975e5c0bb1 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -267,7 +267,8 @@
#define H_RANDOM 0x300
#define H_COP 0x304
#define H_GET_MPP_X 0x314
-#define MAX_HCALL_OPCODE H_GET_MPP_X
+#define H_SET_MODE 0x31C
+#define MAX_HCALL_OPCODE H_SET_MODE
#ifndef __ASSEMBLY__
@@ -355,6 +356,26 @@ struct hvcall_mpp_x_data {
int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data);
+static inline unsigned int get_longbusy_msecs(int longbusy_rc)
+{
+ switch (longbusy_rc) {
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ return 1;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ return 10;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ return 100;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ return 1000;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ return 10000;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ return 100000;
+ default:
+ return 1;
+ }
+}
+
#ifdef CONFIG_PPC_PSERIES
extern int CMO_PrPSP;
extern int CMO_SecPSP;
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index 61e8490786b..bedbff89142 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -3,7 +3,7 @@
* The Internal Memory Map for devices with QE on them. This
* is the superset of all QE devices (8360, etc.).
- * Copyright (C) 2006. Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 76fdcfef088..aabcdba8f6b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -118,6 +118,7 @@
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+#define RESUME_FLAG_ARCH1 (1<<2)
#define RESUME_GUEST 0
#define RESUME_GUEST_NV RESUME_FLAG_NV
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 7aefdb3e1ce..5a56e1c5f85 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -81,6 +81,8 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
+ u64 purr_offset;
+ u64 spurr_offset;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next;
@@ -157,10 +159,14 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel);
-extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh, unsigned long ptel);
+extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel,
+ pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
+extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long *hpret);
extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
- struct kvm_memory_slot *memslot);
+ struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 0dd1d86d3e3..38bec1dc992 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -50,6 +50,15 @@ extern int kvm_hpt_order; /* order of preallocated HPTs */
#define HPTE_V_HVLOCK 0x40UL
#define HPTE_V_ABSENT 0x20UL
+/*
+ * We use this bit in the guest_rpte field of the revmap entry
+ * to indicate a modified HPTE.
+ */
+#define HPTE_GR_MODIFIED (1ul << 62)
+
+/* These bits are reserved in the guest view of the HPTE */
+#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
+
static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
{
unsigned long tmp, old;
@@ -60,7 +69,7 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
" ori %0,%0,%4\n"
" stdcx. %0,0,%2\n"
" beq+ 2f\n"
- " li %1,%3\n"
+ " mr %1,%3\n"
"2: isync"
: "=&r" (tmp), "=&r" (old)
: "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
@@ -237,4 +246,26 @@ static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
}
+/*
+ * This works for 4k, 64k and 16M pages on POWER7,
+ * and 4k and 16M pages on PPC970.
+ */
+static inline unsigned long slb_pgsize_encoding(unsigned long psize)
+{
+ unsigned long senc = 0;
+
+ if (psize > 0x1000) {
+ senc = SLB_VSID_L;
+ if (psize == 0x10000)
+ senc |= SLB_VSID_LP_01;
+ }
+ return senc;
+}
+
+static inline int is_vrma_hpte(unsigned long hpte_v)
+{
+ return (hpte_v & ~0xffffffUL) ==
+ (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
+}
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
index 30a600fa1b6..3a79f532571 100644
--- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -17,6 +17,7 @@
* there are no exceptions for which we fall through directly to
* the normal host handler.
*
+ * 32-bit host
* Expected inputs (normal exceptions):
* SCRATCH0 = saved r10
* r10 = thread struct
@@ -33,14 +34,38 @@
* *(r8 + GPR9) = saved r9
* *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
* *(r8 + GPR11) = saved r11
+ *
+ * 64-bit host
+ * Expected inputs (GEN/GDBELL/DBG/MC exception types):
+ * r10 = saved CR
+ * r13 = PACA_POINTER
+ * *(r13 + PACA_EX##type + EX_R10) = saved r10
+ * *(r13 + PACA_EX##type + EX_R11) = saved r11
+ * SPRN_SPRG_##type##_SCRATCH = saved r13
+ *
+ * Expected inputs (CRIT exception type):
+ * r10 = saved CR
+ * r13 = PACA_POINTER
+ * *(r13 + PACA_EX##type + EX_R10) = saved r10
+ * *(r13 + PACA_EX##type + EX_R11) = saved r11
+ * *(r13 + PACA_EX##type + EX_R13) = saved r13
+ *
+ * Expected inputs (TLB exception type):
+ * r10 = saved CR
+ * r13 = PACA_POINTER
+ * *(r13 + PACA_EX##type + EX_TLB_R10) = saved r10
+ * *(r13 + PACA_EX##type + EX_TLB_R11) = saved r11
+ * SPRN_SPRG_GEN_SCRATCH = saved r13
+ *
+ * Only the bolted version of TLB miss exception handlers is supported now.
*/
.macro DO_KVM intno srr1
#ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION
mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
- bf 3, kvmppc_resume_\intno\()_\srr1
+ bf 3, 1975f
b kvmppc_handler_\intno\()_\srr1
-kvmppc_resume_\intno\()_\srr1:
+1975:
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#endif
.endm
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 28e8f5e5c63..ca9bf459db6 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -46,7 +46,7 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#if !defined(CONFIG_KVM_440)
#include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -204,7 +204,7 @@ struct revmap_entry {
};
/*
- * We use the top bit of each memslot->rmap entry as a lock bit,
+ * We use the top bit of each memslot->arch.rmap entry as a lock bit,
* and bit 32 as a present flag. The bottom 32 bits are the
* index in the guest HPT of a HPTE that points to the page.
*/
@@ -215,14 +215,17 @@ struct revmap_entry {
#define KVMPPC_RMAP_PRESENT 0x100000000ul
#define KVMPPC_RMAP_INDEX 0xfffffffful
-/* Low-order bits in kvm->arch.slot_phys[][] */
+/* Low-order bits in memslot->arch.slot_phys[] */
#define KVMPPC_PAGE_ORDER_MASK 0x1f
#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
#define KVMPPC_GOT_PAGE 0x80
struct kvm_arch_memory_slot {
+#ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long *rmap;
+ unsigned long *slot_phys;
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
};
struct kvm_arch {
@@ -243,12 +246,12 @@ struct kvm_arch {
int using_mmu_notifiers;
u32 hpt_order;
atomic_t vcpus_running;
+ u32 online_vcores;
unsigned long hpt_npte;
unsigned long hpt_mask;
+ atomic_t hpte_mod_interest;
spinlock_t slot_phys_lock;
- unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
- int slot_npages[KVM_MEM_SLOTS_NUM];
- unsigned short last_vcpu[NR_CPUS];
+ cpumask_t need_tlb_flush;
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
@@ -273,6 +276,7 @@ struct kvmppc_vcore {
int nap_count;
int napping_threads;
u16 pcpu;
+ u16 last_cpu;
u8 vcore_state;
u8 in_guest;
struct list_head runnable_threads;
@@ -288,9 +292,10 @@ struct kvmppc_vcore {
/* Values for vcore_state */
#define VCORE_INACTIVE 0
-#define VCORE_RUNNING 1
-#define VCORE_EXITING 2
-#define VCORE_SLEEPING 3
+#define VCORE_SLEEPING 1
+#define VCORE_STARTING 2
+#define VCORE_RUNNING 3
+#define VCORE_EXITING 4
/*
* Struct used to manage memory for a virtual processor area
@@ -346,6 +351,27 @@ struct kvmppc_slb {
bool class : 1;
};
+# ifdef CONFIG_PPC_FSL_BOOK3E
+#define KVMPPC_BOOKE_IAC_NUM 2
+#define KVMPPC_BOOKE_DAC_NUM 2
+# else
+#define KVMPPC_BOOKE_IAC_NUM 4
+#define KVMPPC_BOOKE_DAC_NUM 2
+# endif
+#define KVMPPC_BOOKE_MAX_IAC 4
+#define KVMPPC_BOOKE_MAX_DAC 2
+
+struct kvmppc_booke_debug_reg {
+ u32 dbcr0;
+ u32 dbcr1;
+ u32 dbcr2;
+#ifdef CONFIG_KVM_E500MC
+ u32 dbcr4;
+#endif
+ u64 iac[KVMPPC_BOOKE_MAX_IAC];
+ u64 dac[KVMPPC_BOOKE_MAX_DAC];
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -380,13 +406,18 @@ struct kvm_vcpu_arch {
u32 host_mas4;
u32 host_mas6;
u32 shadow_epcr;
- u32 epcr;
u32 shadow_msrp;
u32 eplc;
u32 epsc;
u32 oldpir;
#endif
+#if defined(CONFIG_BOOKE)
+#if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
+ u32 epcr;
+#endif
+#endif
+
#ifdef CONFIG_PPC_BOOK3S
/* For Gekko paired singles */
u32 qpr[32];
@@ -440,8 +471,6 @@ struct kvm_vcpu_arch {
u32 ccr0;
u32 ccr1;
- u32 dbcr0;
- u32 dbcr1;
u32 dbsr;
u64 mmcr[3];
@@ -471,9 +500,12 @@ struct kvm_vcpu_arch {
ulong fault_esr;
ulong queued_dear;
ulong queued_esr;
+ spinlock_t wdt_lock;
+ struct timer_list wdt_timer;
u32 tlbcfg[4];
u32 mmucfg;
u32 epr;
+ struct kvmppc_booke_debug_reg dbg_reg;
#endif
gpa_t paddr_accessed;
gva_t vaddr_accessed;
@@ -486,6 +518,7 @@ struct kvm_vcpu_arch {
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
+ u8 watchdog_enabled;
u8 sane;
u8 cpu_type;
u8 hcall_needed;
@@ -497,7 +530,6 @@ struct kvm_vcpu_arch {
u64 dec_jiffies;
u64 dec_expires;
unsigned long pending_exceptions;
- u16 last_cpu;
u8 ceded;
u8 prodded;
u32 last_inst;
@@ -534,13 +566,17 @@ struct kvm_vcpu_arch {
unsigned long dtl_index;
u64 stolen_logged;
struct kvmppc_vpa slb_shadow;
+
+ spinlock_t tbacct_lock;
+ u64 busy_stolen;
+ u64 busy_preempt;
#endif
};
/* Values for vcpu->arch.state */
-#define KVMPPC_VCPU_STOPPED 0
-#define KVMPPC_VCPU_BUSY_IN_HOST 1
-#define KVMPPC_VCPU_RUNNABLE 2
+#define KVMPPC_VCPU_NOTREADY 0
+#define KVMPPC_VCPU_RUNNABLE 1
+#define KVMPPC_VCPU_BUSY_IN_HOST 2
/* Values for vcpu->arch.io_gpr */
#define KVM_MMIO_REG_MASK 0x001f
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 9365860fb7f..2b119654b4c 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -21,7 +21,6 @@
#include <uapi/asm/kvm_para.h>
-
#ifdef CONFIG_KVM_GUEST
#include <linux/of.h>
@@ -55,7 +54,7 @@ static unsigned long kvm_hypercall(unsigned long *in,
unsigned long *out,
unsigned long nr)
{
- return HC_EV_UNIMPLEMENTED;
+ return EV_UNIMPLEMENTED;
}
#endif
@@ -66,7 +65,7 @@ static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
unsigned long out[8];
unsigned long r;
- r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
+ r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
*r2 = out[0];
return r;
@@ -77,7 +76,7 @@ static inline long kvm_hypercall0(unsigned int nr)
unsigned long in[8];
unsigned long out[8];
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
+ return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
@@ -86,7 +85,7 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
unsigned long out[8];
in[0] = p1;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
+ return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
@@ -97,7 +96,7 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
in[0] = p1;
in[1] = p2;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
+ return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
@@ -109,7 +108,7 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
in[0] = p1;
in[1] = p2;
in[2] = p3;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
+ return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
@@ -123,7 +122,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
in[1] = p2;
in[2] = p3;
in[3] = p4;
- return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
+ return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e006f0bdea9..572aa753061 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
+#include <linux/bug.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/kvm_book3s.h>
#else
@@ -68,6 +69,8 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
extern void kvmppc_decrementer_func(unsigned long data);
extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
+extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
/* Core-specific hooks */
@@ -104,6 +107,7 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
+extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance);
@@ -111,6 +115,7 @@ extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
ulong *val);
+extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
@@ -139,16 +144,28 @@ extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
extern void kvm_release_hpt(struct kvmppc_linear_info *li);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
+extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
+extern void kvmppc_core_flush_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void);
+extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
+
+extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
+
/*
* Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included.
@@ -182,6 +199,41 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
return r;
}
+union kvmppc_one_reg {
+ u32 wval;
+ u64 dval;
+ vector128 vval;
+ u64 vsxval[2];
+ struct {
+ u64 addr;
+ u64 length;
+ } vpaval;
+};
+
+#define one_reg_size(id) \
+ (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+#define get_reg_val(id, reg) ({ \
+ union kvmppc_one_reg __u; \
+ switch (one_reg_size(id)) { \
+ case 4: __u.wval = (reg); break; \
+ case 8: __u.dval = (reg); break; \
+ default: BUG(); \
+ } \
+ __u; \
+})
+
+
+#define set_reg_val(id, val) ({ \
+ u64 __v; \
+ switch (one_reg_size(id)) { \
+ case 4: __v = (val).wval; break; \
+ case 8: __v = (val).dval; break; \
+ default: BUG(); \
+ } \
+ __v; \
+})
+
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
@@ -190,6 +242,8 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
@@ -230,5 +284,36 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
}
}
+/* Please call after prepare_to_enter. This function puts the lazy ee state
+ back to normal mode, without actually enabling interrupts. */
+static inline void kvmppc_lazy_ee_enable(void)
+{
+#ifdef CONFIG_PPC64
+ /* Only need to enable IRQs by hard enabling them after this */
+ local_paca->irq_happened = 0;
+ local_paca->soft_enabled = 1;
+#endif
+}
+
+static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
+{
+ ulong ea;
+ ulong msr_64bit = 0;
+
+ ea = kvmppc_get_gpr(vcpu, rb);
+ if (ra)
+ ea += kvmppc_get_gpr(vcpu, ra);
+
+#if defined(CONFIG_PPC_BOOK3E_64)
+ msr_64bit = MSR_CM;
+#elif defined(CONFIG_PPC_BOOK3S_64)
+ msr_64bit = MSR_SF;
+#endif
+
+ if (!(vcpu->arch.shared->msr & msr_64bit))
+ ea = (uint32_t)ea;
+
+ return ea;
+}
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index c4231973edd..19d9d96eb8d 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -166,9 +166,6 @@ struct machdep_calls {
unsigned long size,
pgprot_t vma_prot);
- /* Idle loop for this platform, leave empty for default idle loop */
- void (*idle_loop)(void);
-
/*
* Function for waiting for work with reduced power in idle loop;
* called with interrupts disabled.
@@ -320,28 +317,28 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
ppc_md.log_error(buf, err_type, fatal);
}
-#define __define_machine_initcall(mach,level,fn,id) \
+#define __define_machine_initcall(mach, fn, id) \
static int __init __machine_initcall_##mach##_##fn(void) { \
if (machine_is(mach)) return fn(); \
return 0; \
} \
- __define_initcall(level,__machine_initcall_##mach##_##fn,id);
-
-#define machine_core_initcall(mach,fn) __define_machine_initcall(mach,"1",fn,1)
-#define machine_core_initcall_sync(mach,fn) __define_machine_initcall(mach,"1s",fn,1s)
-#define machine_postcore_initcall(mach,fn) __define_machine_initcall(mach,"2",fn,2)
-#define machine_postcore_initcall_sync(mach,fn) __define_machine_initcall(mach,"2s",fn,2s)
-#define machine_arch_initcall(mach,fn) __define_machine_initcall(mach,"3",fn,3)
-#define machine_arch_initcall_sync(mach,fn) __define_machine_initcall(mach,"3s",fn,3s)
-#define machine_subsys_initcall(mach,fn) __define_machine_initcall(mach,"4",fn,4)
-#define machine_subsys_initcall_sync(mach,fn) __define_machine_initcall(mach,"4s",fn,4s)
-#define machine_fs_initcall(mach,fn) __define_machine_initcall(mach,"5",fn,5)
-#define machine_fs_initcall_sync(mach,fn) __define_machine_initcall(mach,"5s",fn,5s)
-#define machine_rootfs_initcall(mach,fn) __define_machine_initcall(mach,"rootfs",fn,rootfs)
-#define machine_device_initcall(mach,fn) __define_machine_initcall(mach,"6",fn,6)
-#define machine_device_initcall_sync(mach,fn) __define_machine_initcall(mach,"6s",fn,6s)
-#define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7)
-#define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s)
+ __define_initcall(__machine_initcall_##mach##_##fn, id);
+
+#define machine_core_initcall(mach, fn) __define_machine_initcall(mach, fn, 1)
+#define machine_core_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 1s)
+#define machine_postcore_initcall(mach, fn) __define_machine_initcall(mach, fn, 2)
+#define machine_postcore_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 2s)
+#define machine_arch_initcall(mach, fn) __define_machine_initcall(mach, fn, 3)
+#define machine_arch_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 3s)
+#define machine_subsys_initcall(mach, fn) __define_machine_initcall(mach, fn, 4)
+#define machine_subsys_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 4s)
+#define machine_fs_initcall(mach, fn) __define_machine_initcall(mach, fn, 5)
+#define machine_fs_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 5s)
+#define machine_rootfs_initcall(mach, fn) __define_machine_initcall(mach, fn, rootfs)
+#define machine_device_initcall(mach, fn) __define_machine_initcall(mach, fn, 6)
+#define machine_device_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 6s)
+#define machine_late_initcall(mach, fn) __define_machine_initcall(mach, fn, 7)
+#define machine_late_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 7s)
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MACHDEP_H */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index eeabcdbc30f..99d43e0c1e4 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -59,7 +59,7 @@
#define MAS1_TSIZE_SHIFT 7
#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
-#define MAS2_EPN 0xFFFFF000
+#define MAS2_EPN (~0xFFFUL)
#define MAS2_X0 0x00000040
#define MAS2_X1 0x00000020
#define MAS2_W 0x00000010
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 9673f73eb8d..2fdb47a19ef 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -121,6 +121,16 @@ extern char initial_stab[];
#define PP_RXRX 3 /* Supervisor read, User read */
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
+/* Fields for tlbiel instruction in architecture 2.06 */
+#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
+#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
+#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
+#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
+#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
+#define TLBIEL_INVAL_SET_SHIFT 12
+
+#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
+
#ifndef __ASSEMBLY__
struct hash_pte {
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 5e38eedea21..691fd8aca93 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -101,6 +101,7 @@
#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
index 639dc96077a..d697b08994c 100644
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -34,7 +34,7 @@ struct op_system_config {
unsigned long mmcra;
#ifdef CONFIG_OPROFILE_CELL
/* Register for oprofile user tool to check cell kernel profiling
- * suport.
+ * support.
*/
unsigned long cell_support;
#endif
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
deleted file mode 100644
index c07edfe98b9..00000000000
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _PPC64_PSERIES_RECONFIG_H
-#define _PPC64_PSERIES_RECONFIG_H
-#ifdef __KERNEL__
-
-#include <linux/notifier.h>
-
-/*
- * Use this API if your code needs to know about OF device nodes being
- * added or removed on pSeries systems.
- */
-
-#define PSERIES_RECONFIG_ADD 0x0001
-#define PSERIES_RECONFIG_REMOVE 0x0002
-#define PSERIES_DRCONF_MEM_ADD 0x0003
-#define PSERIES_DRCONF_MEM_REMOVE 0x0004
-#define PSERIES_UPDATE_PROPERTY 0x0005
-
-/**
- * pSeries_reconfig_notify - Notifier value structure for OFDT property updates
- *
- * @node: Device tree node which owns the property being updated
- * @property: Updated property
- */
-struct pSeries_reconfig_prop_update {
- struct device_node *node;
- struct property *property;
-};
-
-#ifdef CONFIG_PPC_PSERIES
-extern int pSeries_reconfig_notifier_register(struct notifier_block *);
-extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
-extern int pSeries_reconfig_notify(unsigned long action, void *p);
-/* Not the best place to put this, will be fixed when we move some
- * of the rtas suspend-me stuff to pseries */
-extern void pSeries_coalesce_init(void);
-#else /* !CONFIG_PPC_PSERIES */
-static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
-{
- return 0;
-}
-static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { }
-static inline void pSeries_coalesce_init(void) { }
-#endif /* CONFIG_PPC_PSERIES */
-
-
-#endif /* __KERNEL__ */
-#endif /* _PPC64_PSERIES_RECONFIG_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 42b1f43b943..51fb00a20d7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2009 Freescale Semicondutor, Inc.
+ * Copyright 2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -86,6 +86,7 @@
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DCBAL 0x7c2005ec
#define PPC_INST_DCBZL 0x7c2007ec
+#define PPC_INST_ICBT 0x7c00002c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
@@ -201,6 +202,7 @@
#define __PPC_MB(s) (((s) & 0x1f) << 6)
#define __PPC_ME(s) (((s) & 0x1f) << 1)
#define __PPC_BI(s) (((s) & 0x1f) << 16)
+#define __PPC_CT(t) (((t) & 0x0f) << 21)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
@@ -263,6 +265,8 @@
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
__PPC_RT(t) | __PPC_RB(b))
+#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
+ __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
/* PASemi instructions */
#define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \
__PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index b5c91901e38..99c92d5363e 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -58,6 +58,22 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; }
extern void of_instantiate_rtc(void);
+/* The of_drconf_cell struct defines the layout of the LMB array
+ * specified in the device tree property
+ * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory
+ */
+struct of_drconf_cell {
+ u64 base_addr;
+ u32 drc_index;
+ u32 reserved;
+ u32 aa_index;
+ u32 flags;
+};
+
+#define DRCONF_MEM_ASSIGNED 0x00000008
+#define DRCONF_MEM_AI_INVALID 0x00000040
+#define DRCONF_MEM_RESERVED 0x00000080
+
/* These includes are put at the bottom because they may contain things
* that are overridden by this file. Ideally they shouldn't be included
* by this file, but there are a bunch of .c files that currently depend
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index eedf427c912..3e13e23e4fd 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -23,7 +23,7 @@
/* Note the full page bits must be in the same location as for normal
* 4k pages as the same assembly will be used to insert 64K pages
- * wether the kernel has CONFIG_PPC_64K_PAGES or not
+ * whether the kernel has CONFIG_PPC_64K_PAGES or not
*/
#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index 229571a4939..32b9bfa0c9b 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index f706164b0bd..25784cc959a 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d24c1416396..3d5c9dc8917 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -249,6 +249,8 @@
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
#define LPCR_RMLS_SH (63-37)
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
+#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
+#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
@@ -518,6 +520,7 @@
#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
+#define SRR1_PROGILL 0x00080000 /* Illegal instruction */
#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
#define SRR1_PROGTRAP 0x00020000 /* Trap */
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
@@ -1029,6 +1032,7 @@
#define PVR_970MP 0x0044
#define PVR_970GX 0x0045
#define PVR_POWER7p 0x004A
+#define PVR_POWER8 0x004B
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2d916c4982c..e07e6af5e1f 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -539,6 +539,13 @@
#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
#define TCR_ARE 0x00400000 /* Auto Reload Enable */
+#ifdef CONFIG_E500
+#define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \
+ (((tcr) & 0x1E0000) >> 15))
+#else
+#define TCR_GET_WP(tcr) (((tcr) & 0xC0000000) >> 30)
+#endif
+
/* Bit definitions for the TSR. */
#define TSR_ENW 0x80000000 /* Enable Next Watchdog */
#define TSR_WIS 0x40000000 /* WDT Interrupt Status */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 557cff845de..aef00c67590 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -353,8 +353,13 @@ static inline int page_is_rtas_user_buf(unsigned long pfn)
return 1;
return 0;
}
+
+/* Not the best place to put pSeries_coalesce_init, will be fixed when we
+ * move some of the rtas suspend-me stuff to pseries */
+extern void pSeries_coalesce_init(void);
#else
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
+static inline void pSeries_coalesce_init(void) { }
#endif
extern int call_rtas(const char *, int, int, unsigned long *, ...);
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
new file mode 100644
index 00000000000..d3ca85529b8
--- /dev/null
+++ b/arch/powerpc/include/asm/setup.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_POWERPC_SETUP_H
+#define _ASM_POWERPC_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+#ifndef __ASSEMBLY__
+extern void ppc_printk_progress(char *s, unsigned short hex);
+
+extern unsigned int rtas_data;
+extern int mem_init_done; /* set on boot once kmalloc can be called */
+extern int init_bootmem_done; /* set once bootmem is available */
+extern unsigned long long memory_limit;
+extern unsigned long klimit;
+extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+
+struct device_node;
+extern void note_scsi_host(struct device_node *, void *);
+
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_SETUP_H */
+
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index e807e9d8e3f..5a4e437c238 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,14 @@ void generic_mach_cpu_die(void);
void generic_set_cpu_dead(unsigned int cpu);
void generic_set_cpu_up(unsigned int cpu);
int generic_check_cpu_restart(unsigned int cpu);
+
+extern void inhibit_secondary_onlining(void);
+extern void uninhibit_secondary_onlining(void);
+
+#else /* HOTPLUG_CPU */
+static inline void inhibit_secondary_onlining(void) {}
+static inline void uninhibit_secondary_onlining(void) {}
+
#endif
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index ae20ce1af4c..6e909f3e6a4 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -132,7 +132,7 @@
*
* At this point, the OF driver seems to have a limitation on transfer
* sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know
- * wether this is just an OF limit due to some temporary buffer size
+ * whether this is just an OF limit due to some temporary buffer size
* or if this is an SMU imposed limit. This driver has the same limitation
* for now as I use a 0x10 bytes temporary buffer as well
*
@@ -236,7 +236,7 @@
* 3 (optional): enable nmi? [0x00 or 0x01]
*
* Returns:
- * If parameter 2 is 0x00 and parameter 3 is not specified, returns wether
+ * If parameter 2 is 0x00 and parameter 3 is not specified, returns whether
* NMI is enabled. Otherwise unknown.
*/
#define SMU_CMD_MISC_df_NMI_OPTION 0x04
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 84083876985..97909d3b1d7 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -164,7 +164,7 @@ COMPAT_SYS_SPU(sched_getscheduler)
SYSCALL_SPU(sched_yield)
COMPAT_SYS_SPU(sched_get_priority_max)
COMPAT_SYS_SPU(sched_get_priority_min)
-COMPAT_SYS_SPU(sched_rr_get_interval)
+SYSX_SPU(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval_wrapper,sys_sched_rr_get_interval)
COMPAT_SYS_SPU(nanosleep)
SYSCALL_SPU(mremap)
SYSCALL_SPU(setresuid)
@@ -356,3 +356,4 @@ COMPAT_SYS_SPU(sendmmsg)
SYSCALL_SPU(setns)
COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
+SYSCALL(finit_module)
diff --git a/arch/powerpc/include/asm/ucc.h b/arch/powerpc/include/asm/ucc.h
index 46b09ba6bea..6927ac26516 100644
--- a/arch/powerpc/include/asm/ucc.h
+++ b/arch/powerpc/include/asm/ucc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h
index 4644c840e2f..72ea9bab07d 100644
--- a/arch/powerpc/include/asm/ucc_fast.h
+++ b/arch/powerpc/include/asm/ucc_fast.h
@@ -1,7 +1,7 @@
/*
* Internal header file for UCC FAST unit routines.
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
index cf131ffdb8d..c44131e68e1 100644
--- a/arch/powerpc/include/asm/ucc_slow.h
+++ b/arch/powerpc/include/asm/ucc_slow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index b3038817b8d..5a7510e9d09 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -21,7 +21,6 @@ extern int (*udbg_getc_poll)(void);
extern void udbg_puts(const char *s);
extern int udbg_write(const char *s, int n);
-extern int udbg_read(char *buf, int buflen);
extern void register_early_udbg_console(void);
extern void udbg_printf(const char *fmt, ...)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 76fe846ec40..1d4864a40e3 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 353
+#define __NR_syscalls 354
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
@@ -54,8 +54,8 @@
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_NEWFSTATAT
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
#endif
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index a33c3c03bb2..f7bca637074 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -7,6 +7,7 @@ header-y += bootx.h
header-y += byteorder.h
header-y += cputable.h
header-y += elf.h
+header-y += epapr_hcalls.h
header-y += errno.h
header-y += fcntl.h
header-y += ioctl.h
diff --git a/arch/powerpc/include/uapi/asm/epapr_hcalls.h b/arch/powerpc/include/uapi/asm/epapr_hcalls.h
new file mode 100644
index 00000000000..7f9c74b4670
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/epapr_hcalls.h
@@ -0,0 +1,98 @@
+/*
+ * ePAPR hcall interface
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_EPAPR_HCALLS_H
+#define _UAPI_ASM_POWERPC_EPAPR_HCALLS_H
+
+#define EV_BYTE_CHANNEL_SEND 1
+#define EV_BYTE_CHANNEL_RECEIVE 2
+#define EV_BYTE_CHANNEL_POLL 3
+#define EV_INT_SET_CONFIG 4
+#define EV_INT_GET_CONFIG 5
+#define EV_INT_SET_MASK 6
+#define EV_INT_GET_MASK 7
+#define EV_INT_IACK 9
+#define EV_INT_EOI 10
+#define EV_INT_SEND_IPI 11
+#define EV_INT_SET_TASK_PRIORITY 12
+#define EV_INT_GET_TASK_PRIORITY 13
+#define EV_DOORBELL_SEND 14
+#define EV_MSGSND 15
+#define EV_IDLE 16
+
+/* vendor ID: epapr */
+#define EV_LOCAL_VENDOR_ID 0 /* for private use */
+#define EV_EPAPR_VENDOR_ID 1
+#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
+#define EV_IBM_VENDOR_ID 3 /* IBM */
+#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
+#define EV_ENEA_VENDOR_ID 5 /* Enea */
+#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
+#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
+#define EV_KVM_VENDOR_ID 42 /* KVM */
+
+/* The max number of bytes that a byte channel can send or receive per call */
+#define EV_BYTE_CHANNEL_MAX_BYTES 16
+
+
+#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
+#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
+
+/* epapr return codes */
+#define EV_SUCCESS 0
+#define EV_EPERM 1 /* Operation not permitted */
+#define EV_ENOENT 2 /* Entry Not Found */
+#define EV_EIO 3 /* I/O error occured */
+#define EV_EAGAIN 4 /* The operation had insufficient
+ * resources to complete and should be
+ * retried
+ */
+#define EV_ENOMEM 5 /* There was insufficient memory to
+ * complete the operation */
+#define EV_EFAULT 6 /* Bad guest address */
+#define EV_ENODEV 7 /* No such device */
+#define EV_EINVAL 8 /* An argument supplied to the hcall
+ was out of range or invalid */
+#define EV_INTERNAL 9 /* An internal error occured */
+#define EV_CONFIG 10 /* A configuration error was detected */
+#define EV_INVALID_STATE 11 /* The object is in an invalid state */
+#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
+#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
+
+#endif /* _UAPI_ASM_POWERPC_EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 1bea4d8ea6f..2fba8a66fb1 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -221,6 +221,12 @@ struct kvm_sregs {
__u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */
__u32 dbcr[3];
+ /*
+ * iac/dac registers are 64bit wide, while this API
+ * interface provides only lower 32 bits on 64 bit
+ * processors. ONE_REG interface is added for 64bit
+ * iac/dac registers.
+ */
__u32 iac[4];
__u32 dac[2];
__u32 dvc[2];
@@ -325,6 +331,86 @@ struct kvm_book3e_206_tlb_params {
__u32 reserved[8];
};
+/* For KVM_PPC_GET_HTAB_FD */
+struct kvm_get_htab_fd {
+ __u64 flags;
+ __u64 start_index;
+ __u64 reserved[2];
+};
+
+/* Values for kvm_get_htab_fd.flags */
+#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
+#define KVM_GET_HTAB_WRITE ((__u64)0x2)
+
+/*
+ * Data read on the file descriptor is formatted as a series of
+ * records, each consisting of a header followed by a series of
+ * `n_valid' HPTEs (16 bytes each), which are all valid. Following
+ * those valid HPTEs there are `n_invalid' invalid HPTEs, which
+ * are not represented explicitly in the stream. The same format
+ * is used for writing.
+ */
+struct kvm_get_htab_header {
+ __u32 index;
+ __u16 n_valid;
+ __u16 n_invalid;
+};
+
#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
+#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
+#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
+#define KVM_REG_PPC_IAC3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4)
+#define KVM_REG_PPC_IAC4 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5)
+#define KVM_REG_PPC_DAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6)
+#define KVM_REG_PPC_DAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7)
+#define KVM_REG_PPC_DABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8)
+#define KVM_REG_PPC_DSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9)
+#define KVM_REG_PPC_PURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa)
+#define KVM_REG_PPC_SPURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb)
+#define KVM_REG_PPC_DAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc)
+#define KVM_REG_PPC_DSISR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd)
+#define KVM_REG_PPC_AMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe)
+#define KVM_REG_PPC_UAMOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf)
+
+#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
+#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
+#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+
+#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
+#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
+#define KVM_REG_PPC_PMC3 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a)
+#define KVM_REG_PPC_PMC4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b)
+#define KVM_REG_PPC_PMC5 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c)
+#define KVM_REG_PPC_PMC6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d)
+#define KVM_REG_PPC_PMC7 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e)
+#define KVM_REG_PPC_PMC8 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f)
+
+/* 32 floating-point registers */
+#define KVM_REG_PPC_FPR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20)
+#define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n))
+#define KVM_REG_PPC_FPR31 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f)
+
+/* 32 VMX/Altivec vector registers */
+#define KVM_REG_PPC_VR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40)
+#define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n))
+#define KVM_REG_PPC_VR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f)
+
+/* 32 double-width FP registers for VSX */
+/* High-order halves overlap with FP regs */
+#define KVM_REG_PPC_VSR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60)
+#define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n))
+#define KVM_REG_PPC_VSR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f)
+
+/* FP and vector status/control registers */
+#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
+
+/* Virtual processor areas */
+/* For SLB & DTL, address in high (first) half, length in low half */
+#define KVM_REG_PPC_VPA_ADDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82)
+#define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
+#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
+
+#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index 5e04383a1db..ed0e0254b47 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -75,9 +75,10 @@ struct kvm_vcpu_arch_shared {
};
#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
-#define HC_VENDOR_KVM (42 << 16)
-#define HC_EV_SUCCESS 0
-#define HC_EV_UNIMPLEMENTED 12
+
+#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
+
+#include <uapi/asm/epapr_hcalls.h>
#define KVM_FEATURE_MAGIC_PAGE 1
diff --git a/arch/powerpc/include/uapi/asm/setup.h b/arch/powerpc/include/uapi/asm/setup.h
index 8b9a306260b..552df83f1a4 100644
--- a/arch/powerpc/include/uapi/asm/setup.h
+++ b/arch/powerpc/include/uapi/asm/setup.h
@@ -1,32 +1 @@
-#ifndef _ASM_POWERPC_SETUP_H
-#define _ASM_POWERPC_SETUP_H
-
#include <asm-generic/setup.h>
-
-#ifndef __ASSEMBLY__
-extern void ppc_printk_progress(char *s, unsigned short hex);
-
-extern unsigned int rtas_data;
-extern int mem_init_done; /* set on boot once kmalloc can be called */
-extern int init_bootmem_done; /* set once bootmem is available */
-extern unsigned long long memory_limit;
-extern unsigned long klimit;
-extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
-
-extern void via_cuda_init(void);
-extern void read_rtc_time(void);
-extern void pmac_find_display(void);
-
-struct device_node;
-extern void note_scsi_host(struct device_node *, void *);
-
-/* Used in very early kernel initialization. */
-extern unsigned long reloc_offset(void);
-extern unsigned long add_reloc_offset(unsigned long);
-extern void reloc_got2(unsigned long);
-
-#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h
index 48fa8d3f2f9..e079fb39d5b 100644
--- a/arch/powerpc/include/uapi/asm/signal.h
+++ b/arch/powerpc/include/uapi/asm/signal.h
@@ -85,12 +85,6 @@ typedef struct {
#define SA_RESTORER 0x04000000U
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 380b5d37a90..8c478c6c6b1 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -375,6 +375,7 @@
#define __NR_setns 350
#define __NR_process_vm_readv 351
#define __NR_process_vm_writev 352
+#define __NR_finit_module 353
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index cde12f8a4eb..8f619342f14 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
paca.o nvram_64.o firmware.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o
+obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 7523539cfe9..4e23ba2f3ca 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -441,8 +441,7 @@ int main(void)
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
- DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
- DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
+ DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
@@ -470,7 +469,6 @@ int main(void)
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
- DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power.S
index 76797c5105d..57cf14065ae 100644
--- a/arch/powerpc/kernel/cpu_setup_power7.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -27,6 +27,7 @@ _GLOBAL(__setup_cpu_power7)
beqlr
li r0,0
mtspr SPRN_LPID,r0
+ mfspr r3,SPRN_LPCR
bl __init_LPCR
bl __init_TLB
mtlr r11
@@ -39,6 +40,35 @@ _GLOBAL(__restore_cpu_power7)
beqlr
li r0,0
mtspr SPRN_LPID,r0
+ mfspr r3,SPRN_LPCR
+ bl __init_LPCR
+ bl __init_TLB
+ mtlr r11
+ blr
+
+_GLOBAL(__setup_cpu_power8)
+ mflr r11
+ bl __init_hvmode_206
+ mtlr r11
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
+ mfspr r3,SPRN_LPCR
+ oris r3, r3, LPCR_AIL_3@h
+ bl __init_LPCR
+ bl __init_TLB
+ mtlr r11
+ blr
+
+_GLOBAL(__restore_cpu_power8)
+ mflr r11
+ mfmsr r3
+ rldicl. r0,r3,4,63
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
+ mfspr r3,SPRN_LPCR
+ oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_TLB
mtlr r11
@@ -57,6 +87,7 @@ __init_hvmode_206:
__init_LPCR:
/* Setup a sane LPCR:
+ * Called with initial LPCR in R3
*
* LPES = 0b01 (HSRR0/1 used for 0x500)
* PECE = 0b111
@@ -67,7 +98,6 @@ __init_LPCR:
*
* Other bits untouched for now
*/
- mfspr r3,SPRN_LPCR
li r5,1
rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 0514c21f138..75a3d71b895 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -68,6 +68,8 @@ extern void __restore_cpu_pa6t(void);
extern void __restore_cpu_ppc970(void);
extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power7(void);
+extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
+extern void __restore_cpu_power8(void);
extern void __restore_cpu_a2(void);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
@@ -94,6 +96,10 @@ extern void __restore_cpu_e5500(void);
PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
+ PPC_FEATURE_TRUE_LE | \
+ PPC_FEATURE_PSERIES_PERFMON_COMPAT)
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP)
@@ -429,6 +435,21 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_power7,
.platform = "power7",
},
+ { /* 2.07-compliant processor, i.e. Power8 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000004,
+ .cpu_name = "POWER8 (architected)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_cpu_type = "ppc64/ibm-compat-v1",
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .platform = "power8",
+ },
{ /* Power7 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003f0000,
@@ -463,6 +484,23 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_power7,
.platform = "power7+",
},
+ { /* Power8 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004b0000,
+ .cpu_name = "POWER8 (raw)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .platform = "power8",
+ },
{ /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index e9a906c2723..b310a057362 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -373,6 +373,8 @@ _GLOBAL(ret_from_fork)
_GLOBAL(ret_from_kernel_thread)
bl .schedule_tail
REST_NVGPRS(r1)
+ li r3,0
+ std r3,0(r1)
ld r14, 0(r14)
mtlr r14
mr r3,r15
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 697b390ebfd..62c0dc23782 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -8,13 +8,41 @@
*/
#include <linux/threads.h>
+#include <asm/epapr_hcalls.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
+/* epapr_ev_idle() was derived from e500_idle() */
+_GLOBAL(epapr_ev_idle)
+ CURRENT_THREAD_INFO(r3, r1)
+ PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */
+ ori r4, r4,_TLF_NAPPING /* so when we take an exception */
+ PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */
+
+ wrteei 1
+
+idle_loop:
+ LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
+
+.global epapr_ev_idle_start
+epapr_ev_idle_start:
+ li r3, -1
+ nop
+ nop
+ nop
+
+ /*
+ * Guard against spurious wakeups from a hypervisor --
+ * only interrupt will cause us to return to LR due to
+ * _TLF_NAPPING.
+ */
+ b idle_loop
+
/* Hypercall entry point. Will be patched with device tree instructions. */
.global epapr_hypercall_start
epapr_hypercall_start:
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 028aeae370b..f3eab8594d9 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -21,6 +21,10 @@
#include <asm/epapr_hcalls.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
+#include <asm/machdep.h>
+
+extern void epapr_ev_idle(void);
+extern u32 epapr_ev_idle_start[];
bool epapr_paravirt_enabled;
@@ -41,8 +45,13 @@ static int __init epapr_paravirt_init(void)
if (len % 4 || len > (4 * 4))
return -ENODEV;
- for (i = 0; i < (len / 4); i++)
+ for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
+ patch_instruction(epapr_ev_idle_start + i, insts[i]);
+ }
+
+ if (of_get_property(hyper_node, "has-idle", NULL))
+ ppc_md.power_save = epapr_ev_idle;
epapr_paravirt_enabled = true;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 10b658ad65e..4665e82fa37 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -19,12 +19,76 @@
/*
* We layout physical memory as follows:
* 0x0000 - 0x00ff : Secondary processor spin code
- * 0x0100 - 0x2fff : pSeries Interrupt prologs
- * 0x3000 - 0x5fff : interrupt support common interrupt prologs
- * 0x6000 - 0x6fff : Initial (CPU0) segment table
+ * 0x0100 - 0x17ff : pSeries Interrupt prologs
+ * 0x1800 - 0x4000 : interrupt support common interrupt prologs
+ * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
+ * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
* 0x7000 - 0x7fff : FWNMI data area
- * 0x8000 - : Early init and support code
+ * 0x8000 - 0x8fff : Initial (CPU0) segment table
+ * 0x9000 - : Early init and support code
*/
+ /* Syscall routine is used twice, in reloc-off and reloc-on paths */
+#define SYSCALL_PSERIES_1 \
+BEGIN_FTR_SECTION \
+ cmpdi r0,0x1ebe ; \
+ beq- 1f ; \
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
+ mr r9,r13 ; \
+ GET_PACA(r13) ; \
+ mfspr r11,SPRN_SRR0 ; \
+0:
+
+#define SYSCALL_PSERIES_2_RFID \
+ mfspr r12,SPRN_SRR1 ; \
+ ld r10,PACAKBASE(r13) ; \
+ LOAD_HANDLER(r10, system_call_entry) ; \
+ mtspr SPRN_SRR0,r10 ; \
+ ld r10,PACAKMSR(r13) ; \
+ mtspr SPRN_SRR1,r10 ; \
+ rfid ; \
+ b . ; /* prevent speculative execution */
+
+#define SYSCALL_PSERIES_3 \
+ /* Fast LE/BE switch system call */ \
+1: mfspr r12,SPRN_SRR1 ; \
+ xori r12,r12,MSR_LE ; \
+ mtspr SPRN_SRR1,r12 ; \
+ rfid ; /* return to userspace */ \
+ b . ; \
+2: mfspr r12,SPRN_SRR1 ; \
+ andi. r12,r12,MSR_PR ; \
+ bne 0b ; \
+ mtspr SPRN_SRR0,r3 ; \
+ mtspr SPRN_SRR1,r4 ; \
+ mtspr SPRN_SDR1,r5 ; \
+ rfid ; \
+ b . ; /* prevent speculative execution */
+
+#if defined(CONFIG_RELOCATABLE)
+ /*
+ * We can't branch directly; in the direct case we use LR
+ * and system_call_entry restores LR. (We thus need to move
+ * LR to r10 in the RFID case too.)
+ */
+#define SYSCALL_PSERIES_2_DIRECT \
+ mflr r10 ; \
+ ld r12,PACAKBASE(r13) ; \
+ LOAD_HANDLER(r12, system_call_entry_direct) ; \
+ mtlr r12 ; \
+ mfspr r12,SPRN_SRR1 ; \
+ /* Re-use of r13... No spare regs to do this */ \
+ li r13,MSR_RI ; \
+ mtmsrd r13,1 ; \
+ GET_PACA(r13) ; /* get r13 back */ \
+ blr ;
+#else
+ /* We can branch directly */
+#define SYSCALL_PSERIES_2_DIRECT \
+ mfspr r12,SPRN_SRR1 ; \
+ li r10,MSR_RI ; \
+ mtmsrd r10,1 ; /* Set RI (EE=0) */ \
+ b system_call_entry_direct ;
+#endif
/*
* This is the start of the interrupt handlers for pSeries
@@ -207,31 +271,11 @@ system_call_pSeries:
KVMTEST(0xc00)
GET_SCRATCH0(r13)
#endif
-BEGIN_FTR_SECTION
- cmpdi r0,0x1ebe
- beq- 1f
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
- mr r9,r13
- GET_PACA(r13)
- mfspr r11,SPRN_SRR0
- mfspr r12,SPRN_SRR1
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, system_call_entry)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- rfid
- b . /* prevent speculative execution */
-
+ SYSCALL_PSERIES_1
+ SYSCALL_PSERIES_2_RFID
+ SYSCALL_PSERIES_3
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
-/* Fast LE/BE switch system call */
-1: mfspr r12,SPRN_SRR1
- xori r12,r12,MSR_LE
- mtspr SPRN_SRR1,r12
- rfid /* return to userspace */
- b .
-
STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
@@ -276,7 +320,7 @@ vsx_unavailable_pSeries_1:
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
. = 0x1500
- .global denorm_Hypervisor
+ .global denorm_exception_hv
denorm_exception_hv:
HMT_MEDIUM
mtspr SPRN_SPRG_HSCRATCH0,r13
@@ -311,12 +355,14 @@ denorm_exception_hv:
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
+#else
+ . = 0x1800
#endif /* CONFIG_CBE_RAS */
- . = 0x3000
/*** Out of line interrupts support ***/
+ .align 7
/* moved from 0x200 */
machine_check_pSeries:
.globl machine_check_fwnmi
@@ -575,16 +621,12 @@ slb_miss_user_pseries:
b . /* prevent spec. execution */
#endif /* __DISABLED__ */
- .align 7
- .globl __end_interrupts
-__end_interrupts:
-
/*
* Code from here down to __end_handlers is invoked from the
* exception prologs above. Because the prologs assemble the
* addresses of these handlers using the LOAD_HANDLER macro,
- * which uses an addi instruction, these handlers must be in
- * the first 32k of the kernel image.
+ * which uses an ori instruction, these handlers must be in
+ * the first 64k of the kernel image.
*/
/*** Common interrupt handlers ***/
@@ -613,8 +655,8 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
- STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+ STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
@@ -629,7 +671,158 @@ machine_check_common:
STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
#endif /* CONFIG_CBE_RAS */
+ /*
+ * Relocation-on interrupts: A subset of the interrupts can be delivered
+ * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
+ * it. Addresses are the same as the original interrupt addresses, but
+ * offset by 0xc000000000004000.
+ * It's impossible to receive interrupts below 0x300 via this mechanism.
+ * KVM: None of these traps are from the guest ; anything that escalated
+ * to HV=1 from HV=0 is delivered via real mode handlers.
+ */
+
+ /*
+ * This uses the standard macro, since the original 0x300 vector
+ * only has extra guff for STAB-based processors -- which never
+ * come here.
+ */
+ STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
+ . = 0x4380
+ .globl data_access_slb_relon_pSeries
+data_access_slb_relon_pSeries:
+ HMT_MEDIUM
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_DAR
+ mfspr r12,SPRN_SRR1
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ /*
+ * We can't just use a direct branch to .slb_miss_realmode
+ * because the distance from here to there depends on where
+ * the kernel ends up being put.
+ */
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+
+ STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
+ . = 0x4480
+ .globl instruction_access_slb_relon_pSeries
+instruction_access_slb_relon_pSeries:
+ HMT_MEDIUM
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
+ mfspr r12,SPRN_SRR1
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+
+ . = 0x4500
+ .globl hardware_interrupt_relon_pSeries;
+ .globl hardware_interrupt_relon_hv;
+hardware_interrupt_relon_pSeries:
+hardware_interrupt_relon_hv:
+ BEGIN_FTR_SECTION
+ _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
+ FTR_SECTION_ELSE
+ _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
+ STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
+ STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
+ STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
+ MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
+ STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
+ STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
+
+ . = 0x4c00
+ .globl system_call_relon_pSeries
+system_call_relon_pSeries:
+ HMT_MEDIUM
+ SYSCALL_PSERIES_1
+ SYSCALL_PSERIES_2_DIRECT
+ SYSCALL_PSERIES_3
+
+ STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
+
+ . = 0x4e00
+ b h_data_storage_relon_hv
+
+ . = 0x4e20
+ b h_instr_storage_relon_hv
+
+ . = 0x4e40
+ b emulation_assist_relon_hv
+
+ . = 0x4e50
+ b hmi_exception_relon_hv
+
+ . = 0x4e60
+ b hmi_exception_relon_hv
+
+ /* For when we support the doorbell interrupt:
+ STD_RELON_EXCEPTION_HYPERVISOR(0x4e80, 0xe80, doorbell_hyper)
+ */
+
+performance_monitor_relon_pSeries_1:
+ . = 0x4f00
+ b performance_monitor_relon_pSeries
+
+altivec_unavailable_relon_pSeries_1:
+ . = 0x4f20
+ b altivec_unavailable_relon_pSeries
+
+vsx_unavailable_relon_pSeries_1:
+ . = 0x4f40
+ b vsx_unavailable_relon_pSeries
+
+#ifdef CONFIG_CBE_RAS
+ STD_RELON_EXCEPTION_HV(0x5200, 0x1202, cbe_system_error)
+#endif /* CONFIG_CBE_RAS */
+ STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
+#ifdef CONFIG_PPC_DENORMALISATION
+ . = 0x5500
+ b denorm_exception_hv
+#endif
+#ifdef CONFIG_CBE_RAS
+ STD_RELON_EXCEPTION_HV(0x5600, 0x1602, cbe_maintenance)
+#else
+#ifdef CONFIG_HVC_SCOM
+ STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
+#endif /* CONFIG_HVC_SCOM */
+#endif /* CONFIG_CBE_RAS */
+ STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
+#ifdef CONFIG_CBE_RAS
+ STD_RELON_EXCEPTION_HV(0x5800, 0x1802, cbe_thermal)
+#endif /* CONFIG_CBE_RAS */
+
+ /* Other future vectors */
.align 7
+ .globl __end_interrupts
+__end_interrupts:
+
+ .align 7
+system_call_entry_direct:
+#if defined(CONFIG_RELOCATABLE)
+ /* The first level prologue may have used LR to get here, saving
+ * orig in r10. To save hacking/ifdeffing common code, restore here.
+ */
+ mtlr r10
+#endif
system_call_entry:
b system_call_common
@@ -714,21 +907,21 @@ data_access_common:
ld r3,PACA_EXGEN+EX_DAR(r13)
lwz r4,PACA_EXGEN+EX_DSISR(r13)
li r5,0x300
- b .do_hash_page /* Try to handle as hpte fault */
+ b .do_hash_page /* Try to handle as hpte fault */
.align 7
- .globl h_data_storage_common
+ .globl h_data_storage_common
h_data_storage_common:
- mfspr r10,SPRN_HDAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_HDSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
- bl .save_nvgprs
+ mfspr r10,SPRN_HDAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_HDSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
+ bl .save_nvgprs
DISABLE_INTS
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unknown_exception
- b .ret_from_except
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unknown_exception
+ b .ret_from_except
.align 7
.globl instruction_access_common
@@ -741,7 +934,7 @@ instruction_access_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
- STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
/*
* Here is the common SLB miss user that is used when going to virtual
@@ -1152,6 +1345,21 @@ _GLOBAL(do_stab_bolted)
rfid
b . /* prevent speculative execution */
+
+ /* Equivalents to the above handlers for relocation-on interrupt vectors */
+ STD_RELON_EXCEPTION_HV(., 0xe00, h_data_storage)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
+ STD_RELON_EXCEPTION_HV(., 0xe20, h_instr_storage)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
+ STD_RELON_EXCEPTION_HV(., 0xe40, emulation_assist)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
+ STD_RELON_EXCEPTION_HV(., 0xe60, hmi_exception)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
+
+ STD_RELON_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
+ STD_RELON_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
+ STD_RELON_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
+
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* Data area reserved for FWNMI option.
@@ -1164,7 +1372,7 @@ fwnmi_data_area:
/* pseries and powernv need to keep the whole page from
* 0x7000 to 0x8000 free for use by the firmware
*/
- . = 0x8000
+ . = 0x8000
#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
/* Space for CPU0's segment table */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 58bddee8e1e..116f0868695 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -422,7 +422,7 @@ _STATIC(__after_prom_start)
tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
#endif
-#ifdef CONFIG_CRASH_DUMP
+#ifdef CONFIG_RELOCATABLE
/*
* Check if the kernel has to be running as relocatable kernel based on the
* variable __run_at_load, if it is set the kernel is treated as relocatable
@@ -432,7 +432,8 @@ _STATIC(__after_prom_start)
cmplwi cr0,r7,1
bne 3f
- li r5,__end_interrupts - _stext /* just copy interrupts */
+ /* just copy interrupts */
+ LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
b 5f
3:
#endif
@@ -703,6 +704,7 @@ _INIT_STATIC(start_here_multiplatform)
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
/* Setup OPAL entry */
+ LOAD_REG_ADDR(r11, opal)
std r28,0(r11);
std r29,8(r11);
#endif
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 2099d9a879e..ea78761aa16 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -55,9 +55,6 @@ __setup("powersave=off", powersave_off);
*/
void cpu_idle(void)
{
- if (ppc_md.idle_loop)
- ppc_md.idle_loop(); /* doesn't return */
-
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
tick_nohz_idle_enter();
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 8226c6cb348..c862fd716fe 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -656,7 +656,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
struct iommu_pool *p;
/* number of bytes needed for the bitmap */
- sz = (tbl->it_size + 7) >> 3;
+ sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
if (!page)
@@ -708,7 +708,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
{
- unsigned long bitmap_sz, i;
+ unsigned long bitmap_sz;
unsigned int order;
if (!tbl || !tbl->it_map) {
@@ -718,17 +718,11 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
}
/* verify that table contains no entries */
- /* it_size is in entries, and we're examining 64 at a time */
- for (i = 0; i < (tbl->it_size/64); i++) {
- if (tbl->it_map[i] != 0) {
- printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
- __func__, node_name);
- break;
- }
- }
+ if (!bitmap_empty(tbl->it_map, tbl->it_size))
+ pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
/* calculate bitmap size in bytes */
- bitmap_sz = (tbl->it_size + 7) / 8;
+ bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
/* free bitmap */
order = get_order(bitmap_sz);
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 867db1de894..a61b133c4f9 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -419,7 +419,7 @@ static void kvm_map_magic_page(void *data)
in[0] = KVM_MAGIC_PAGE;
in[1] = KVM_MAGIC_PAGE;
- kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
+ kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
*features = out[0];
}
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index bedd12e1cfb..0733b05eb85 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -387,7 +387,7 @@ void __init find_legacy_serial_ports(void)
of_node_put(parent);
continue;
}
- /* Check for known pciclass, and also check wether we have
+ /* Check for known pciclass, and also check whether we have
* a device with child nodes for ports or not
*/
if (of_device_is_compatible(np, "pciclass,0700") ||
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index fa9f6c72f55..e1ec57e87b3 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -218,23 +218,23 @@ static void __init export_crashk_values(struct device_node *node)
* be sure what's in them, so remove them. */
prop = of_find_property(node, "linux,crashkernel-base", NULL);
if (prop)
- prom_remove_property(node, prop);
+ of_remove_property(node, prop);
prop = of_find_property(node, "linux,crashkernel-size", NULL);
if (prop)
- prom_remove_property(node, prop);
+ of_remove_property(node, prop);
if (crashk_res.start != 0) {
- prom_add_property(node, &crashk_base_prop);
+ of_add_property(node, &crashk_base_prop);
crashk_size = resource_size(&crashk_res);
- prom_add_property(node, &crashk_size_prop);
+ of_add_property(node, &crashk_size_prop);
}
/*
* memory_limit is required by the kexec-tools to limit the
* crash regions to the actual memory used.
*/
- prom_update_property(node, &memory_limit_prop);
+ of_update_property(node, &memory_limit_prop);
}
static int __init kexec_setup(void)
@@ -249,11 +249,11 @@ static int __init kexec_setup(void)
/* remove any stale properties so ours can be found */
prop = of_find_property(node, kernel_end_prop.name, NULL);
if (prop)
- prom_remove_property(node, prop);
+ of_remove_property(node, prop);
/* information needed by userspace when using default_machine_kexec */
kernel_end = __pa(_end);
- prom_add_property(node, &kernel_end_prop);
+ of_add_property(node, &kernel_end_prop);
export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index d7f609086a9..7206701b1ff 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -389,14 +389,14 @@ static int __init export_htab_values(void)
/* remove any stale propertys so ours can be found */
prop = of_find_property(node, htab_base_prop.name, NULL);
if (prop)
- prom_remove_property(node, prop);
+ of_remove_property(node, prop);
prop = of_find_property(node, htab_size_prop.name, NULL);
if (prop)
- prom_remove_property(node, prop);
+ of_remove_property(node, prop);
htab_base = __pa(htab_address);
- prom_add_property(node, &htab_base_prop);
- prom_add_property(node, &htab_size_prop);
+ of_add_property(node, &htab_base_prop);
+ of_add_property(node, &htab_size_prop);
of_node_put(node);
return 0;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 2049f2d00ff..9db8ec07ec9 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -82,7 +82,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
return -ENXIO;
/* Claim resources. This might need some rework as well depending
- * wether we are doing probe-only or not, like assigning unassigned
+ * whether we are doing probe-only or not, like assigning unassigned
* resources etc...
*/
pcibios_claim_one_bus(phb->bus);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 4b06ec5a502..64f526a321f 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -208,7 +208,7 @@ pci_create_OF_bus_map(void)
of_prop->name = "pci-OF-bus-map";
of_prop->length = 256;
of_prop->value = &of_prop[1];
- prom_add_property(dn, of_prop);
+ of_add_property(dn, of_prop);
of_node_put(dn);
}
}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 19e4288d848..78b8766fd79 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -43,6 +43,7 @@
#include <asm/dcr.h>
#include <asm/ftrace.h>
#include <asm/switch_to.h>
+#include <asm/epapr_hcalls.h>
#ifdef CONFIG_PPC32
extern void transfer_to_handler(void);
@@ -191,3 +192,7 @@ EXPORT_SYMBOL(__arch_hweight64);
#ifdef CONFIG_PPC_BOOK3S_64
EXPORT_SYMBOL_GPL(mmu_psize_defs);
#endif
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+EXPORT_SYMBOL(epapr_hypercall_start);
+#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 37725e86651..8b6f7a99cce 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -32,6 +32,7 @@
#include <linux/debugfs.h>
#include <linux/irq.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -49,11 +50,11 @@
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/machdep.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
#include <asm/kexec.h>
#include <asm/opal.h>
#include <asm/fadump.h>
+#include <asm/debug.h>
#include <mm/mmu_decl.h>
@@ -802,7 +803,7 @@ static int prom_reconfig_notifier(struct notifier_block *nb,
int err;
switch (action) {
- case PSERIES_RECONFIG_ADD:
+ case OF_RECONFIG_ATTACH_NODE:
err = of_finish_dynamic_node(node);
if (err < 0)
printk(KERN_ERR "finish_node returned %d\n", err);
@@ -821,7 +822,7 @@ static struct notifier_block prom_reconfig_nb = {
static int __init prom_reconfig_setup(void)
{
- return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
+ return of_reconfig_notifier_register(&prom_reconfig_nb);
}
__initcall(prom_reconfig_setup);
#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index cb6c123722a..779f34049a5 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -671,6 +671,7 @@ static void __init early_cmdline_parse(void)
#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
+#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
/* Option vector 2: Open Firmware options supported */
#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
@@ -707,6 +708,7 @@ static void __init early_cmdline_parse(void)
#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
#define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */
#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
+#define OV5_SUB_PROCESSORS 0x01 /* 1,2,or 4 Sub-Processors supported */
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */
@@ -719,6 +721,8 @@ static unsigned char ibm_architecture_vec[] = {
W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
+ W(0xffff0000), W(0x004b0000), /* POWER8 */
+ W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
@@ -728,7 +732,7 @@ static unsigned char ibm_architecture_vec[] = {
3 - 2, /* length */
0, /* don't ignore, don't halt */
OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
- OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06,
+ OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
/* option vector 2: Open Firmware options supported */
34 - 2, /* length */
@@ -755,7 +759,7 @@ static unsigned char ibm_architecture_vec[] = {
OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
/* option vector 5: PAPR/OF options */
- 18 - 2, /* length */
+ 19 - 2, /* length */
0, /* don't ignore, don't halt */
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
OV5_DONATE_DEDICATE_CPU | OV5_MSI,
@@ -769,13 +773,14 @@ static unsigned char ibm_architecture_vec[] = {
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
-#define IBM_ARCH_VEC_NRCORES_OFFSET 101
+#define IBM_ARCH_VEC_NRCORES_OFFSET 117
W(NR_CPUS), /* number of cores supported */
0,
0,
0,
0,
OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842,
+ OV5_SUB_PROCESSORS,
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 79d8e56470d..c4970004d44 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -952,6 +952,10 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
arch_bp_generic_fields(data &
(DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
+
+ /* Enable breakpoint */
+ attr.disabled = false;
+
ret = modify_user_hw_breakpoint(bp, &attr);
if (ret) {
ptrace_put_breakpoints(task);
@@ -1037,7 +1041,7 @@ void ptrace_disable(struct task_struct *child)
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-static long set_intruction_bp(struct task_struct *child,
+static long set_instruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
@@ -1338,6 +1342,12 @@ static int set_dac_range(struct task_struct *child,
static long ppc_set_hwdebug(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int len = 0;
+ struct thread_struct *thread = &(child->thread);
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dabr;
#endif
@@ -1365,7 +1375,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
(bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
return -EINVAL;
- return set_intruction_bp(child, bp_info);
+ return set_instruction_bp(child, bp_info);
}
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
return set_dac(child, bp_info);
@@ -1381,13 +1391,9 @@ static long ppc_set_hwdebug(struct task_struct *child,
*/
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
- bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
- if (child->thread.dabr)
- return -ENOSPC;
-
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
@@ -1397,6 +1403,50 @@ static long ppc_set_hwdebug(struct task_struct *child,
dabr |= DABR_DATA_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dabr |= DABR_DATA_WRITE;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ if (ptrace_get_breakpoints(child) < 0)
+ return -ESRCH;
+
+ /*
+ * Check if the request is for 'range' breakpoints. We can
+ * support it if range < 8 bytes.
+ */
+ if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
+ len = bp_info->addr2 - bp_info->addr;
+ } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+ ptrace_put_breakpoints(child);
+ return -EINVAL;
+ }
+ bp = thread->ptrace_bps[0];
+ if (bp) {
+ ptrace_put_breakpoints(child);
+ return -ENOSPC;
+ }
+
+ /* Create a new breakpoint request if one doesn't exist already */
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
+ attr.bp_len = len;
+ arch_bp_generic_fields(dabr & (DABR_DATA_WRITE | DABR_DATA_READ),
+ &attr.bp_type);
+
+ thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
+ ptrace_triggered, NULL, child);
+ if (IS_ERR(bp)) {
+ thread->ptrace_bps[0] = NULL;
+ ptrace_put_breakpoints(child);
+ return PTR_ERR(bp);
+ }
+
+ ptrace_put_breakpoints(child);
+ return 1;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
+ return -EINVAL;
+
+ if (child->thread.dabr)
+ return -ENOSPC;
child->thread.dabr = dabr;
child->thread.dabrx = DABRX_ALL;
@@ -1405,8 +1455,13 @@ static long ppc_set_hwdebug(struct task_struct *child,
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
}
-static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
+static long ppc_del_hwdebug(struct task_struct *child, long data)
{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret = 0;
+ struct thread_struct *thread = &(child->thread);
+ struct perf_event *bp;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
int rc;
@@ -1426,10 +1481,25 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
#else
if (data != 1)
return -EINVAL;
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ if (ptrace_get_breakpoints(child) < 0)
+ return -ESRCH;
+
+ bp = thread->ptrace_bps[0];
+ if (bp) {
+ unregister_hw_breakpoint(bp);
+ thread->ptrace_bps[0] = NULL;
+ } else
+ ret = -ENOENT;
+ ptrace_put_breakpoints(child);
+ return ret;
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
if (child->thread.dabr == 0)
return -ENOENT;
child->thread.dabr = 0;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
return 0;
#endif
@@ -1536,7 +1606,11 @@ long arch_ptrace(struct task_struct *child, long request,
dbginfo.data_bp_alignment = 4;
#endif
dbginfo.sizeof_condition = 0;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
+#else
dbginfo.features = 0;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
if (!access_ok(VERIFY_WRITE, datavp,
@@ -1563,7 +1637,7 @@ long arch_ptrace(struct task_struct *child, long request,
}
case PPC_PTRACE_DELHWDEBUG: {
- ret = ppc_del_hwdebug(child, addr, data);
+ ret = ppc_del_hwdebug(child, data);
break;
}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index fcec38241f7..1fd6e7b2f39 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,6 @@
#include <asm/time.h>
#include <asm/mmu.h>
#include <asm/topology.h>
-#include <asm/pSeries_reconfig.h>
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 20b0120db0c..8329190312c 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -650,10 +650,8 @@ static int initialize_flash_pde_data(const char *rtas_call_name,
int token;
dp->data = kzalloc(buf_size, GFP_KERNEL);
- if (dp->data == NULL) {
- remove_flash_pde(dp);
+ if (dp->data == NULL)
return -ENOMEM;
- }
/*
* This code assumes that the status int is the first member of the
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index efb6a41b313..6da881b35da 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -601,6 +601,11 @@ void __init setup_arch(char **cmdline_p)
kvm_linear_init();
+ /* Interrupt code needs to be 64K-aligned */
+ if ((unsigned long)_stext & 0xffff)
+ panic("Kernelbase not 64K-aligned (0x%lx)!\n",
+ (unsigned long)_stext);
+
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d183f8719a5..1ca045d4432 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -83,7 +83,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
* the context). This is very important because we must ensure we
* don't lose the VRSAVE content that may have been set prior to
* the process doing its first vector operation
- * Userland shall check AT_HWCAP to know wether it can rely on the
+ * Userland shall check AT_HWCAP to know whether it can rely on the
* v_regs pointer or not
*/
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 2b952b5386f..e5b133ebd8a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -427,6 +427,45 @@ int generic_check_cpu_restart(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
+
+static atomic_t secondary_inhibit_count;
+
+/*
+ * Don't allow secondary CPU threads to come online
+ */
+void inhibit_secondary_onlining(void)
+{
+ /*
+ * This makes secondary_inhibit_count stable during cpu
+ * online/offline operations.
+ */
+ get_online_cpus();
+
+ atomic_inc(&secondary_inhibit_count);
+ put_online_cpus();
+}
+EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);
+
+/*
+ * Allow secondary CPU threads to come online again
+ */
+void uninhibit_secondary_onlining(void)
+{
+ get_online_cpus();
+ atomic_dec(&secondary_inhibit_count);
+ put_online_cpus();
+}
+EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);
+
+static int secondaries_inhibited(void)
+{
+ return atomic_read(&secondary_inhibit_count);
+}
+
+#else /* HOTPLUG_CPU */
+
+#define secondaries_inhibited() 0
+
#endif
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
@@ -445,6 +484,13 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
+ /*
+ * Don't allow secondary threads to come online if inhibited
+ */
+ if (threads_per_core > 1 && secondaries_inhibited() &&
+ cpu % threads_per_core != 0)
+ return -EBUSY;
+
if (smp_ops == NULL ||
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 9c2ed90ece8..8a93778ed9f 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -175,19 +175,10 @@ asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 a
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
-asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
-{
- struct timespec t;
- int ret;
- mm_segment_t old_fs = get_fs ();
-
- /* The __user pointer cast is valid because of the set_fs() */
- set_fs (KERNEL_DS);
- ret = sys_sched_rr_get_interval((int)pid, (struct timespec __user *) &t);
- set_fs (old_fs);
- if (put_compat_timespec(&t, interval))
- return -EFAULT;
- return ret;
+asmlinkage long compat_sys_sched_rr_get_interval_wrapper(u32 pid,
+ struct compat_timespec __user *interval)
+{
+ return compat_sys_sched_rr_get_interval((int)pid, interval);
}
/* Note: it is necessary to treat mode as an unsigned int,
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index c39c1ca77f4..f9748498fe5 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -122,29 +122,6 @@ int udbg_write(const char *s, int n)
return n - remain;
}
-int udbg_read(char *buf, int buflen)
-{
- char *p = buf;
- int i, c;
-
- if (!udbg_getc)
- return 0;
-
- for (i = 0; i < buflen; ++i) {
- do {
- c = udbg_getc();
- if (c == -1 && i == 0)
- return -1;
-
- } while (c == 0x11 || c == 0x13);
- if (c == 0 || c == -1)
- break;
- *p++ = c;
- }
-
- return i;
-}
-
#define UDBG_BUFSIZE 256
void udbg_printf(const char *fmt, ...)
{
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 50e7dbc7356..3d7fd21c65f 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -83,6 +83,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu_44x->shadow_refs[i].gtlb_index = -1;
vcpu->arch.cpu_type = KVM_CPU_440;
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
return 0;
}
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index c8c61578fdf..35ec0a8547d 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -27,12 +27,70 @@
#include "booke.h"
#include "44x_tlb.h"
+#define XOP_MFDCRX 259
#define XOP_MFDCR 323
+#define XOP_MTDCRX 387
#define XOP_MTDCR 451
#define XOP_TLBSX 914
#define XOP_ICCCI 966
#define XOP_TLBWE 978
+static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn)
+{
+ /* emulate some access in kernel */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
+ return EMULATE_DONE;
+ default:
+ vcpu->run->dcr.dcrn = dcrn;
+ vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs);
+ vcpu->run->dcr.is_write = 1;
+ vcpu->arch.dcr_is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+ return EMULATE_DO_DCR;
+ }
+}
+
+static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
+{
+ /* The guest may access CPR0 registers to determine the timebase
+ * frequency, and it must know the real host frequency because it
+ * can directly access the timebase registers.
+ *
+ * It would be possible to emulate those accesses in userspace,
+ * but userspace can really only figure out the end frequency.
+ * We could decompose that into the factors that compute it, but
+ * that's tricky math, and it's easier to just report the real
+ * CPR0 values.
+ */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
+ break;
+ case DCRN_CPR0_CONFIG_DATA:
+ local_irq_disable();
+ mtdcr(DCRN_CPR0_CONFIG_ADDR,
+ vcpu->arch.cpr0_cfgaddr);
+ kvmppc_set_gpr(vcpu, rt,
+ mfdcr(DCRN_CPR0_CONFIG_DATA));
+ local_irq_enable();
+ break;
+ default:
+ vcpu->run->dcr.dcrn = dcrn;
+ vcpu->run->dcr.data = 0;
+ vcpu->run->dcr.is_write = 0;
+ vcpu->arch.dcr_is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+ return EMULATE_DO_DCR;
+ }
+
+ return EMULATE_DONE;
+}
+
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
@@ -50,55 +108,21 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) {
case XOP_MFDCR:
- /* The guest may access CPR0 registers to determine the timebase
- * frequency, and it must know the real host frequency because it
- * can directly access the timebase registers.
- *
- * It would be possible to emulate those accesses in userspace,
- * but userspace can really only figure out the end frequency.
- * We could decompose that into the factors that compute it, but
- * that's tricky math, and it's easier to just report the real
- * CPR0 values.
- */
- switch (dcrn) {
- case DCRN_CPR0_CONFIG_ADDR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
- break;
- case DCRN_CPR0_CONFIG_DATA:
- local_irq_disable();
- mtdcr(DCRN_CPR0_CONFIG_ADDR,
- vcpu->arch.cpr0_cfgaddr);
- kvmppc_set_gpr(vcpu, rt,
- mfdcr(DCRN_CPR0_CONFIG_DATA));
- local_irq_enable();
- break;
- default:
- run->dcr.dcrn = dcrn;
- run->dcr.data = 0;
- run->dcr.is_write = 0;
- vcpu->arch.io_gpr = rt;
- vcpu->arch.dcr_needed = 1;
- kvmppc_account_exit(vcpu, DCR_EXITS);
- emulated = EMULATE_DO_DCR;
- }
+ emulated = emulate_mfdcr(vcpu, rt, dcrn);
+ break;
+ case XOP_MFDCRX:
+ emulated = emulate_mfdcr(vcpu, rt,
+ kvmppc_get_gpr(vcpu, ra));
break;
case XOP_MTDCR:
- /* emulate some access in kernel */
- switch (dcrn) {
- case DCRN_CPR0_CONFIG_ADDR:
- vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
- break;
- default:
- run->dcr.dcrn = dcrn;
- run->dcr.data = kvmppc_get_gpr(vcpu, rs);
- run->dcr.is_write = 1;
- vcpu->arch.dcr_needed = 1;
- kvmppc_account_exit(vcpu, DCR_EXITS);
- emulated = EMULATE_DO_DCR;
- }
+ emulated = emulate_mtdcr(vcpu, rs, dcrn);
+ break;
+ case XOP_MTDCRX:
+ emulated = emulate_mtdcr(vcpu, rs,
+ kvmppc_get_gpr(vcpu, ra));
break;
case XOP_TLBWE:
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f4dacb9c57f..4730c953f43 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
bool
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select HAVE_KVM_EVENTFD
config KVM_BOOK3S_HANDLER
bool
@@ -36,6 +37,7 @@ config KVM_BOOK3S_64_HANDLER
config KVM_BOOK3S_PR
bool
select KVM_MMIO
+ select MMU_NOTIFIER
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
@@ -123,6 +125,7 @@ config KVM_E500V2
depends on EXPERIMENTAL && E500 && !PPC_E500MC
select KVM
select KVM_MMIO
+ select MMU_NOTIFIER
---help---
Support running unmodified E500 guest kernels in virtual machines on
E500v2 host processors.
@@ -138,6 +141,7 @@ config KVM_E500MC
select KVM
select KVM_MMIO
select KVM_BOOKE_HV
+ select MMU_NOTIFIER
---help---
Support running unmodified E500MC/E5500 (32-bit) guest kernels in
virtual machines on E500MC/E5500 host processors.
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index c2a08636e6d..1e473d46322 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -6,7 +6,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \
+ eventfd.o)
CFLAGS_44x_tlb.o := -I.
CFLAGS_e500_tlb.o := -I.
@@ -72,10 +73,12 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_64_vio_hv.o \
+ book3s_hv_ras.o \
book3s_hv_builtin.o
kvm-book3s_64-module-objs := \
../../../virt/kvm/kvm_main.o \
+ ../../../virt/kvm/eventfd.o \
powerpc.o \
emulate.o \
book3s.o \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3f2a8360c85..a4b64528524 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -411,6 +411,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
+int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
@@ -476,6 +485,122 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -ENOTSUPP;
}
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r;
+ union kvmppc_one_reg val;
+ int size;
+ long int i;
+
+ size = one_reg_size(reg->id);
+ if (size > sizeof(val))
+ return -EINVAL;
+
+ r = kvmppc_get_one_reg(vcpu, reg->id, &val);
+
+ if (r == -EINVAL) {
+ r = 0;
+ switch (reg->id) {
+ case KVM_REG_PPC_DAR:
+ val = get_reg_val(reg->id, vcpu->arch.shared->dar);
+ break;
+ case KVM_REG_PPC_DSISR:
+ val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
+ break;
+ case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
+ i = reg->id - KVM_REG_PPC_FPR0;
+ val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
+ break;
+ case KVM_REG_PPC_FPSCR:
+ val = get_reg_val(reg->id, vcpu->arch.fpscr);
+ break;
+#ifdef CONFIG_ALTIVEC
+ case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
+ break;
+ case KVM_REG_PPC_VSCR:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
+ break;
+#endif /* CONFIG_ALTIVEC */
+ default:
+ r = -EINVAL;
+ break;
+ }
+ }
+ if (r)
+ return r;
+
+ if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
+ r = -EFAULT;
+
+ return r;
+}
+
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r;
+ union kvmppc_one_reg val;
+ int size;
+ long int i;
+
+ size = one_reg_size(reg->id);
+ if (size > sizeof(val))
+ return -EINVAL;
+
+ if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
+ return -EFAULT;
+
+ r = kvmppc_set_one_reg(vcpu, reg->id, &val);
+
+ if (r == -EINVAL) {
+ r = 0;
+ switch (reg->id) {
+ case KVM_REG_PPC_DAR:
+ vcpu->arch.shared->dar = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_DSISR:
+ vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
+ i = reg->id - KVM_REG_PPC_FPR0;
+ vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_FPSCR:
+ vcpu->arch.fpscr = set_reg_val(reg->id, val);
+ break;
+#ifdef CONFIG_ALTIVEC
+ case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ break;
+ case KVM_REG_PPC_VSCR:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
+ break;
+#endif /* CONFIG_ALTIVEC */
+ default:
+ r = -EINVAL;
+ break;
+ }
+ }
+
+ return r;
+}
+
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index b0f625a3334..00e619bf608 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -155,7 +155,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
- if (is_error_pfn(hpaddr)) {
+ if (is_error_noslot_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
r = -EINVAL;
@@ -254,6 +254,7 @@ next_pteg:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
+ kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
out:
return r;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 4d72f9ebc55..ead58e31729 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -93,7 +93,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
- if (is_error_pfn(hpaddr)) {
+ if (is_error_noslot_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
r = -EINVAL;
goto out;
@@ -171,6 +171,7 @@ map_again:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
}
+ kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
out:
return r;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d95d11322a1..8cc18abd6dd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -24,6 +24,9 @@
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
+#include <linux/srcu.h>
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
@@ -40,6 +43,11 @@
/* Power architecture requires HPT is at least 256kB */
#define PPC_MIN_HPT_ORDER 18
+static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
+ long pte_index, unsigned long pteh,
+ unsigned long ptel, unsigned long *pte_idx_ret);
+static void kvmppc_rmap_reset(struct kvm *kvm);
+
long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{
unsigned long hpt;
@@ -137,10 +145,11 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
/* Set the entire HPT to 0, i.e. invalid HPTEs */
memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
/*
- * Set the whole last_vcpu array to an invalid vcpu number.
- * This ensures that each vcpu will flush its TLB on next entry.
+ * Reset all the reverse-mapping chains for all memslots
*/
- memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu));
+ kvmppc_rmap_reset(kvm);
+ /* Ensure that each vcpu will flush its TLB on next entry. */
+ cpumask_setall(&kvm->arch.need_tlb_flush);
*htab_orderp = order;
err = 0;
} else {
@@ -184,6 +193,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
unsigned long addr, hash;
unsigned long psize;
unsigned long hp0, hp1;
+ unsigned long idx_ret;
long ret;
struct kvm *kvm = vcpu->kvm;
@@ -215,7 +225,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
hash = (hash << 3) + 7;
hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
hp_r = hp1 | addr;
- ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
+ ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
+ &idx_ret);
if (ret != H_SUCCESS) {
pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
addr, ret);
@@ -260,7 +271,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
/*
* This is called to get a reference to a guest page if there isn't
- * one already in the kvm->arch.slot_phys[][] arrays.
+ * one already in the memslot->arch.slot_phys[] array.
*/
static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
struct kvm_memory_slot *memslot,
@@ -275,7 +286,7 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
struct vm_area_struct *vma;
unsigned long pfn, i, npages;
- physp = kvm->arch.slot_phys[memslot->id];
+ physp = memslot->arch.slot_phys;
if (!physp)
return -EINVAL;
if (physp[gfn - memslot->base_gfn])
@@ -353,15 +364,10 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
return err;
}
-/*
- * We come here on a H_ENTER call from the guest when we are not
- * using mmu notifiers and we don't have the requested page pinned
- * already.
- */
-long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh, unsigned long ptel)
+long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
+ long pte_index, unsigned long pteh,
+ unsigned long ptel, unsigned long *pte_idx_ret)
{
- struct kvm *kvm = vcpu->kvm;
unsigned long psize, gpa, gfn;
struct kvm_memory_slot *memslot;
long ret;
@@ -389,8 +395,8 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
do_insert:
/* Protect linux PTE lookup from page table destruction */
rcu_read_lock_sched(); /* this disables preemption too */
- vcpu->arch.pgdir = current->mm->pgd;
- ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
+ ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
+ current->mm->pgd, false, pte_idx_ret);
rcu_read_unlock_sched();
if (ret == H_TOO_HARD) {
/* this can't happen */
@@ -401,6 +407,19 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
}
+/*
+ * We come here on a H_ENTER call from the guest when we are not
+ * using mmu notifiers and we don't have the requested page pinned
+ * already.
+ */
+long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh,
+ unsigned long ptel)
+{
+ return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
+ pteh, ptel, &vcpu->arch.gpr[4]);
+}
+
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
gva_t eaddr)
{
@@ -570,7 +589,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
unsigned long *hptep, hpte[3], r;
unsigned long mmu_seq, psize, pte_size;
- unsigned long gfn, hva, pfn;
+ unsigned long gpa, gfn, hva, pfn;
struct kvm_memory_slot *memslot;
unsigned long *rmap;
struct revmap_entry *rev;
@@ -608,15 +627,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Translate the logical address and get the page */
psize = hpte_page_size(hpte[0], r);
- gfn = hpte_rpn(r, psize);
+ gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1));
+ gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
/* No memslot means it's an emulated MMIO region */
- if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
- unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
- }
if (!kvm->arch.using_mmu_notifiers)
return -EFAULT; /* should never get here */
@@ -710,7 +728,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Check if we might have been invalidated; let the guest retry if so */
ret = RESUME_GUEST;
- if (mmu_notifier_retry(vcpu, mmu_seq)) {
+ if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
unlock_rmap(rmap);
goto out_unlock;
}
@@ -756,6 +774,25 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto out_put;
}
+static void kvmppc_rmap_reset(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ slots = kvm->memslots;
+ kvm_for_each_memslot(memslot, slots) {
+ /*
+ * This assumes it is acceptable to lose reference and
+ * change bits across a reset.
+ */
+ memset(memslot->arch.rmap, 0,
+ memslot->npages * sizeof(*memslot->arch.rmap));
+ }
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+}
+
static int kvm_handle_hva_range(struct kvm *kvm,
unsigned long start,
unsigned long end,
@@ -850,7 +887,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
psize = hpte_page_size(hptep[0], ptel);
if ((hptep[0] & HPTE_V_VALID) &&
hpte_rpn(ptel, psize) == gfn) {
- hptep[0] |= HPTE_V_ABSENT;
+ if (kvm->arch.using_mmu_notifiers)
+ hptep[0] |= HPTE_V_ABSENT;
kvmppc_invalidate_hpte(kvm, hptep, i);
/* Harvest R and C */
rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
@@ -877,6 +915,28 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return 0;
}
+void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+ unsigned long *rmapp;
+ unsigned long gfn;
+ unsigned long n;
+
+ rmapp = memslot->arch.rmap;
+ gfn = memslot->base_gfn;
+ for (n = memslot->npages; n; --n) {
+ /*
+ * Testing the present bit without locking is OK because
+ * the memslot has been marked invalid already, and hence
+ * no new HPTEs referencing this page can be created,
+ * thus the present bit can't go from 0 to 1.
+ */
+ if (*rmapp & KVMPPC_RMAP_PRESENT)
+ kvm_unmap_rmapp(kvm, rmapp, gfn);
+ ++rmapp;
+ ++gfn;
+ }
+}
+
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long gfn)
{
@@ -1030,16 +1090,16 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
return ret;
}
-long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
+long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long *map)
{
unsigned long i;
- unsigned long *rmapp, *map;
+ unsigned long *rmapp;
preempt_disable();
rmapp = memslot->arch.rmap;
- map = memslot->dirty_bitmap;
for (i = 0; i < memslot->npages; ++i) {
- if (kvm_test_clear_dirty(kvm, rmapp))
+ if (kvm_test_clear_dirty(kvm, rmapp) && map)
__set_bit_le(i, map);
++rmapp;
}
@@ -1057,20 +1117,22 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
unsigned long hva, psize, offset;
unsigned long pa;
unsigned long *physp;
+ int srcu_idx;
+ srcu_idx = srcu_read_lock(&kvm->srcu);
memslot = gfn_to_memslot(kvm, gfn);
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
- return NULL;
+ goto err;
if (!kvm->arch.using_mmu_notifiers) {
- physp = kvm->arch.slot_phys[memslot->id];
+ physp = memslot->arch.slot_phys;
if (!physp)
- return NULL;
+ goto err;
physp += gfn - memslot->base_gfn;
pa = *physp;
if (!pa) {
if (kvmppc_get_guest_page(kvm, gfn, memslot,
PAGE_SIZE) < 0)
- return NULL;
+ goto err;
pa = *physp;
}
page = pfn_to_page(pa >> PAGE_SHIFT);
@@ -1079,9 +1141,11 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, 1, pages);
if (npages < 1)
- return NULL;
+ goto err;
page = pages[0];
}
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
psize = PAGE_SIZE;
if (PageHuge(page)) {
page = compound_head(page);
@@ -1091,6 +1155,10 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
if (nb_ret)
*nb_ret = psize - offset;
return page_address(page) + offset;
+
+ err:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return NULL;
}
void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
@@ -1100,6 +1168,348 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
put_page(page);
}
+/*
+ * Functions for reading and writing the hash table via reads and
+ * writes on a file descriptor.
+ *
+ * Reads return the guest view of the hash table, which has to be
+ * pieced together from the real hash table and the guest_rpte
+ * values in the revmap array.
+ *
+ * On writes, each HPTE written is considered in turn, and if it
+ * is valid, it is written to the HPT as if an H_ENTER with the
+ * exact flag set was done. When the invalid count is non-zero
+ * in the header written to the stream, the kernel will make
+ * sure that that many HPTEs are invalid, and invalidate them
+ * if not.
+ */
+
+struct kvm_htab_ctx {
+ unsigned long index;
+ unsigned long flags;
+ struct kvm *kvm;
+ int first_pass;
+};
+
+#define HPTE_SIZE (2 * sizeof(unsigned long))
+
+static long record_hpte(unsigned long flags, unsigned long *hptp,
+ unsigned long *hpte, struct revmap_entry *revp,
+ int want_valid, int first_pass)
+{
+ unsigned long v, r;
+ int ok = 1;
+ int valid, dirty;
+
+ /* Unmodified entries are uninteresting except on the first pass */
+ dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+ if (!first_pass && !dirty)
+ return 0;
+
+ valid = 0;
+ if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+ valid = 1;
+ if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
+ !(hptp[0] & HPTE_V_BOLTED))
+ valid = 0;
+ }
+ if (valid != want_valid)
+ return 0;
+
+ v = r = 0;
+ if (valid || dirty) {
+ /* lock the HPTE so it's stable and read it */
+ preempt_disable();
+ while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
+ cpu_relax();
+ v = hptp[0];
+ if (v & HPTE_V_ABSENT) {
+ v &= ~HPTE_V_ABSENT;
+ v |= HPTE_V_VALID;
+ }
+ /* re-evaluate valid and dirty from synchronized HPTE value */
+ valid = !!(v & HPTE_V_VALID);
+ if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
+ valid = 0;
+ r = revp->guest_rpte | (hptp[1] & (HPTE_R_R | HPTE_R_C));
+ dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
+ /* only clear modified if this is the right sort of entry */
+ if (valid == want_valid && dirty) {
+ r &= ~HPTE_GR_MODIFIED;
+ revp->guest_rpte = r;
+ }
+ asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+ hptp[0] &= ~HPTE_V_HVLOCK;
+ preempt_enable();
+ if (!(valid == want_valid && (first_pass || dirty)))
+ ok = 0;
+ }
+ hpte[0] = v;
+ hpte[1] = r;
+ return ok;
+}
+
+static ssize_t kvm_htab_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kvm_htab_ctx *ctx = file->private_data;
+ struct kvm *kvm = ctx->kvm;
+ struct kvm_get_htab_header hdr;
+ unsigned long *hptp;
+ struct revmap_entry *revp;
+ unsigned long i, nb, nw;
+ unsigned long __user *lbuf;
+ struct kvm_get_htab_header __user *hptr;
+ unsigned long flags;
+ int first_pass;
+ unsigned long hpte[2];
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ first_pass = ctx->first_pass;
+ flags = ctx->flags;
+
+ i = ctx->index;
+ hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ revp = kvm->arch.revmap + i;
+ lbuf = (unsigned long __user *)buf;
+
+ nb = 0;
+ while (nb + sizeof(hdr) + HPTE_SIZE < count) {
+ /* Initialize header */
+ hptr = (struct kvm_get_htab_header __user *)buf;
+ hdr.n_valid = 0;
+ hdr.n_invalid = 0;
+ nw = nb;
+ nb += sizeof(hdr);
+ lbuf = (unsigned long __user *)(buf + sizeof(hdr));
+
+ /* Skip uninteresting entries, i.e. clean on not-first pass */
+ if (!first_pass) {
+ while (i < kvm->arch.hpt_npte &&
+ !(revp->guest_rpte & HPTE_GR_MODIFIED)) {
+ ++i;
+ hptp += 2;
+ ++revp;
+ }
+ }
+ hdr.index = i;
+
+ /* Grab a series of valid entries */
+ while (i < kvm->arch.hpt_npte &&
+ hdr.n_valid < 0xffff &&
+ nb + HPTE_SIZE < count &&
+ record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
+ /* valid entry, write it out */
+ ++hdr.n_valid;
+ if (__put_user(hpte[0], lbuf) ||
+ __put_user(hpte[1], lbuf + 1))
+ return -EFAULT;
+ nb += HPTE_SIZE;
+ lbuf += 2;
+ ++i;
+ hptp += 2;
+ ++revp;
+ }
+ /* Now skip invalid entries while we can */
+ while (i < kvm->arch.hpt_npte &&
+ hdr.n_invalid < 0xffff &&
+ record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
+ /* found an invalid entry */
+ ++hdr.n_invalid;
+ ++i;
+ hptp += 2;
+ ++revp;
+ }
+
+ if (hdr.n_valid || hdr.n_invalid) {
+ /* write back the header */
+ if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
+ return -EFAULT;
+ nw = nb;
+ buf = (char __user *)lbuf;
+ } else {
+ nb = nw;
+ }
+
+ /* Check if we've wrapped around the hash table */
+ if (i >= kvm->arch.hpt_npte) {
+ i = 0;
+ ctx->first_pass = 0;
+ break;
+ }
+ }
+
+ ctx->index = i;
+
+ return nb;
+}
+
+static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kvm_htab_ctx *ctx = file->private_data;
+ struct kvm *kvm = ctx->kvm;
+ struct kvm_get_htab_header hdr;
+ unsigned long i, j;
+ unsigned long v, r;
+ unsigned long __user *lbuf;
+ unsigned long *hptp;
+ unsigned long tmp[2];
+ ssize_t nb;
+ long int err, ret;
+ int rma_setup;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ /* lock out vcpus from running while we're doing this */
+ mutex_lock(&kvm->lock);
+ rma_setup = kvm->arch.rma_setup_done;
+ if (rma_setup) {
+ kvm->arch.rma_setup_done = 0; /* temporarily */
+ /* order rma_setup_done vs. vcpus_running */
+ smp_mb();
+ if (atomic_read(&kvm->arch.vcpus_running)) {
+ kvm->arch.rma_setup_done = 1;
+ mutex_unlock(&kvm->lock);
+ return -EBUSY;
+ }
+ }
+
+ err = 0;
+ for (nb = 0; nb + sizeof(hdr) <= count; ) {
+ err = -EFAULT;
+ if (__copy_from_user(&hdr, buf, sizeof(hdr)))
+ break;
+
+ err = 0;
+ if (nb + hdr.n_valid * HPTE_SIZE > count)
+ break;
+
+ nb += sizeof(hdr);
+ buf += sizeof(hdr);
+
+ err = -EINVAL;
+ i = hdr.index;
+ if (i >= kvm->arch.hpt_npte ||
+ i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
+ break;
+
+ hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ lbuf = (unsigned long __user *)buf;
+ for (j = 0; j < hdr.n_valid; ++j) {
+ err = -EFAULT;
+ if (__get_user(v, lbuf) || __get_user(r, lbuf + 1))
+ goto out;
+ err = -EINVAL;
+ if (!(v & HPTE_V_VALID))
+ goto out;
+ lbuf += 2;
+ nb += HPTE_SIZE;
+
+ if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
+ kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
+ err = -EIO;
+ ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
+ tmp);
+ if (ret != H_SUCCESS) {
+ pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
+ "r=%lx\n", ret, i, v, r);
+ goto out;
+ }
+ if (!rma_setup && is_vrma_hpte(v)) {
+ unsigned long psize = hpte_page_size(v, r);
+ unsigned long senc = slb_pgsize_encoding(psize);
+ unsigned long lpcr;
+
+ kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
+ lpcr |= senc << (LPCR_VRMASD_SH - 4);
+ kvm->arch.lpcr = lpcr;
+ rma_setup = 1;
+ }
+ ++i;
+ hptp += 2;
+ }
+
+ for (j = 0; j < hdr.n_invalid; ++j) {
+ if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
+ kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
+ ++i;
+ hptp += 2;
+ }
+ err = 0;
+ }
+
+ out:
+ /* Order HPTE updates vs. rma_setup_done */
+ smp_wmb();
+ kvm->arch.rma_setup_done = rma_setup;
+ mutex_unlock(&kvm->lock);
+
+ if (err)
+ return err;
+ return nb;
+}
+
+static int kvm_htab_release(struct inode *inode, struct file *filp)
+{
+ struct kvm_htab_ctx *ctx = filp->private_data;
+
+ filp->private_data = NULL;
+ if (!(ctx->flags & KVM_GET_HTAB_WRITE))
+ atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
+ kvm_put_kvm(ctx->kvm);
+ kfree(ctx);
+ return 0;
+}
+
+static struct file_operations kvm_htab_fops = {
+ .read = kvm_htab_read,
+ .write = kvm_htab_write,
+ .llseek = default_llseek,
+ .release = kvm_htab_release,
+};
+
+int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
+{
+ int ret;
+ struct kvm_htab_ctx *ctx;
+ int rwflag;
+
+ /* reject flags we don't recognize */
+ if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
+ return -EINVAL;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ kvm_get_kvm(kvm);
+ ctx->kvm = kvm;
+ ctx->index = ghf->start_index;
+ ctx->flags = ghf->flags;
+ ctx->first_pass = 1;
+
+ rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
+ ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag);
+ if (ret < 0) {
+ kvm_put_kvm(kvm);
+ return ret;
+ }
+
+ if (rwflag == O_RDONLY) {
+ mutex_lock(&kvm->slots_lock);
+ atomic_inc(&kvm->arch.hpte_mod_interest);
+ /* make sure kvmppc_do_h_enter etc. see the increment */
+ synchronize_srcu_expedited(&kvm->srcu);
+ mutex_unlock(&kvm->slots_lock);
+ }
+
+ return ret;
+}
+
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index b9a989dc76c..d31a716f7f2 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -22,6 +22,7 @@
#include <asm/kvm_book3s.h>
#include <asm/reg.h>
#include <asm/switch_to.h>
+#include <asm/time.h>
#define OP_19_XOP_RFID 18
#define OP_19_XOP_RFI 50
@@ -395,6 +396,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
(mfmsr() & MSR_HV))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
break;
+ case SPRN_PURR:
+ to_book3s(vcpu)->purr_offset = spr_val - get_tb();
+ break;
+ case SPRN_SPURR:
+ to_book3s(vcpu)->spurr_offset = spr_val - get_tb();
+ break;
case SPRN_GQR0:
case SPRN_GQR1:
case SPRN_GQR2:
@@ -412,6 +419,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_CTRLF:
case SPRN_CTRLT:
case SPRN_L2CR:
+ case SPRN_DSCR:
case SPRN_MMCR0_GEKKO:
case SPRN_MMCR1_GEKKO:
case SPRN_PMC1_GEKKO:
@@ -483,9 +491,15 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
*spr_val = to_book3s(vcpu)->hid[5];
break;
case SPRN_CFAR:
- case SPRN_PURR:
+ case SPRN_DSCR:
*spr_val = 0;
break;
+ case SPRN_PURR:
+ *spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
+ break;
+ case SPRN_SPURR:
+ *spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
+ break;
case SPRN_GQR0:
case SPRN_GQR1:
case SPRN_GQR2:
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index a150817d6d4..7057a02f090 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -28,8 +28,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
#endif
-#ifdef CONFIG_VSX
-EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
-#endif
#endif
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 721d4603a23..71d0c90b62b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -30,6 +30,7 @@
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <linux/page-flags.h>
+#include <linux/srcu.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -46,6 +47,7 @@
#include <asm/page.h>
#include <asm/hvcall.h>
#include <asm/switch_to.h>
+#include <asm/smp.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
@@ -55,25 +57,77 @@
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */
+/* Used to indicate that a guest page fault needs to be handled */
+#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
+
+/* Used as a "null" value for timebase values */
+#define TB_NIL (~(u64)0)
+
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+/*
+ * We use the vcpu_load/put functions to measure stolen time.
+ * Stolen time is counted as time when either the vcpu is able to
+ * run as part of a virtual core, but the task running the vcore
+ * is preempted or sleeping, or when the vcpu needs something done
+ * in the kernel by the task running the vcpu, but that task is
+ * preempted or sleeping. Those two things have to be counted
+ * separately, since one of the vcpu tasks will take on the job
+ * of running the core, and the other vcpu tasks in the vcore will
+ * sleep waiting for it to do that, but that sleep shouldn't count
+ * as stolen time.
+ *
+ * Hence we accumulate stolen time when the vcpu can run as part of
+ * a vcore using vc->stolen_tb, and the stolen time when the vcpu
+ * needs its task to do other things in the kernel (for example,
+ * service a page fault) in busy_stolen. We don't accumulate
+ * stolen time for a vcore when it is inactive, or for a vcpu
+ * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
+ * a misnomer; it means that the vcpu task is not executing in
+ * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
+ * the kernel. We don't have any way of dividing up that time
+ * between time that the vcpu is genuinely stopped, time that
+ * the task is actively working on behalf of the vcpu, and time
+ * that the task is preempted, so we don't count any of it as
+ * stolen.
+ *
+ * Updates to busy_stolen are protected by arch.tbacct_lock;
+ * updates to vc->stolen_tb are protected by the arch.tbacct_lock
+ * of the vcpu that has taken responsibility for running the vcore
+ * (i.e. vc->runner). The stolen times are measured in units of
+ * timebase ticks. (Note that the != TB_NIL checks below are
+ * purely defensive; they should never fail.)
+ */
+
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
- local_paca->kvm_hstate.kvm_vcpu = vcpu;
- local_paca->kvm_hstate.kvm_vcore = vc;
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+ spin_lock(&vcpu->arch.tbacct_lock);
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
+ vc->preempt_tb != TB_NIL) {
vc->stolen_tb += mftb() - vc->preempt_tb;
+ vc->preempt_tb = TB_NIL;
+ }
+ if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
+ vcpu->arch.busy_preempt != TB_NIL) {
+ vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
+ vcpu->arch.busy_preempt = TB_NIL;
+ }
+ spin_unlock(&vcpu->arch.tbacct_lock);
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ spin_lock(&vcpu->arch.tbacct_lock);
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
vc->preempt_tb = mftb();
+ if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
+ vcpu->arch.busy_preempt = mftb();
+ spin_unlock(&vcpu->arch.tbacct_lock);
}
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
@@ -142,6 +196,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
vpa->yield_count = 1;
}
+static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
+ unsigned long addr, unsigned long len)
+{
+ /* check address is cacheline aligned */
+ if (addr & (L1_CACHE_BYTES - 1))
+ return -EINVAL;
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ if (v->next_gpa != addr || v->len != len) {
+ v->next_gpa = addr;
+ v->len = addr ? len : 0;
+ v->update_pending = 1;
+ }
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ return 0;
+}
+
/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
struct reg_vpa {
u32 dummy;
@@ -317,10 +387,16 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{
+ if (!(vcpu->arch.vpa.update_pending ||
+ vcpu->arch.slb_shadow.update_pending ||
+ vcpu->arch.dtl.update_pending))
+ return;
+
spin_lock(&vcpu->arch.vpa_update_lock);
if (vcpu->arch.vpa.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
- init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
+ if (vcpu->arch.vpa.pinned_addr)
+ init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
}
if (vcpu->arch.dtl.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
@@ -332,24 +408,61 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->arch.vpa_update_lock);
}
+/*
+ * Return the accumulated stolen time for the vcore up until `now'.
+ * The caller should hold the vcore lock.
+ */
+static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
+{
+ u64 p;
+
+ /*
+ * If we are the task running the vcore, then since we hold
+ * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
+ * can't be updated, so we don't need the tbacct_lock.
+ * If the vcore is inactive, it can't become active (since we
+ * hold the vcore lock), so the vcpu load/put functions won't
+ * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
+ */
+ if (vc->vcore_state != VCORE_INACTIVE &&
+ vc->runner->arch.run_task != current) {
+ spin_lock(&vc->runner->arch.tbacct_lock);
+ p = vc->stolen_tb;
+ if (vc->preempt_tb != TB_NIL)
+ p += now - vc->preempt_tb;
+ spin_unlock(&vc->runner->arch.tbacct_lock);
+ } else {
+ p = vc->stolen_tb;
+ }
+ return p;
+}
+
static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
struct kvmppc_vcore *vc)
{
struct dtl_entry *dt;
struct lppaca *vpa;
- unsigned long old_stolen;
+ unsigned long stolen;
+ unsigned long core_stolen;
+ u64 now;
dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
- old_stolen = vcpu->arch.stolen_logged;
- vcpu->arch.stolen_logged = vc->stolen_tb;
+ now = mftb();
+ core_stolen = vcore_stolen_time(vc, now);
+ stolen = core_stolen - vcpu->arch.stolen_logged;
+ vcpu->arch.stolen_logged = core_stolen;
+ spin_lock(&vcpu->arch.tbacct_lock);
+ stolen += vcpu->arch.busy_stolen;
+ vcpu->arch.busy_stolen = 0;
+ spin_unlock(&vcpu->arch.tbacct_lock);
if (!dt || !vpa)
return;
memset(dt, 0, sizeof(struct dtl_entry));
dt->dispatch_reason = 7;
dt->processor_id = vc->pcpu + vcpu->arch.ptid;
- dt->timebase = mftb();
- dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen;
+ dt->timebase = now;
+ dt->enqueue_to_dispatch_time = stolen;
dt->srr0 = kvmppc_get_pc(vcpu);
dt->srr1 = vcpu->arch.shregs.msr;
++dt;
@@ -366,13 +479,16 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
struct kvm_vcpu *tvcpu;
+ int idx;
switch (req) {
case H_ENTER:
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7));
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
case H_CEDE:
break;
@@ -429,6 +545,17 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_PERFMON:
r = RESUME_GUEST;
break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ /*
+ * Deliver a machine check interrupt to the guest.
+ * We have to do this, even if the host has handled the
+ * machine check, because machine checks use SRR0/1 and
+ * the interrupt might have trashed guest state in them.
+ */
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_MACHINE_CHECK);
+ r = RESUME_GUEST;
+ break;
case BOOK3S_INTERRUPT_PROGRAM:
{
ulong flags;
@@ -470,12 +597,12 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* have been handled already.
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
- r = kvmppc_book3s_hv_page_fault(run, vcpu,
- vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ r = RESUME_PAGE_FAULT;
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
- r = kvmppc_book3s_hv_page_fault(run, vcpu,
- kvmppc_get_pc(vcpu), 0);
+ vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
+ vcpu->arch.fault_dsisr = 0;
+ r = RESUME_PAGE_FAULT;
break;
/*
* This occurs if the guest executes an illegal instruction.
@@ -535,36 +662,174 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return 0;
}
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
- int r = -EINVAL;
+ int r = 0;
+ long int i;
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_HIOR:
- r = put_user(0, (u64 __user *)reg->addr);
+ *val = get_reg_val(id, 0);
+ break;
+ case KVM_REG_PPC_DABR:
+ *val = get_reg_val(id, vcpu->arch.dabr);
+ break;
+ case KVM_REG_PPC_DSCR:
+ *val = get_reg_val(id, vcpu->arch.dscr);
+ break;
+ case KVM_REG_PPC_PURR:
+ *val = get_reg_val(id, vcpu->arch.purr);
+ break;
+ case KVM_REG_PPC_SPURR:
+ *val = get_reg_val(id, vcpu->arch.spurr);
+ break;
+ case KVM_REG_PPC_AMR:
+ *val = get_reg_val(id, vcpu->arch.amr);
+ break;
+ case KVM_REG_PPC_UAMOR:
+ *val = get_reg_val(id, vcpu->arch.uamor);
+ break;
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+ i = id - KVM_REG_PPC_MMCR0;
+ *val = get_reg_val(id, vcpu->arch.mmcr[i]);
+ break;
+ case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
+ i = id - KVM_REG_PPC_PMC1;
+ *val = get_reg_val(id, vcpu->arch.pmc[i]);
+ break;
+#ifdef CONFIG_VSX
+ case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
+ if (cpu_has_feature(CPU_FTR_VSX)) {
+ /* VSX => FP reg i is stored in arch.vsr[2*i] */
+ long int i = id - KVM_REG_PPC_FPR0;
+ *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
+ } else {
+ /* let generic code handle it */
+ r = -EINVAL;
+ }
+ break;
+ case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
+ if (cpu_has_feature(CPU_FTR_VSX)) {
+ long int i = id - KVM_REG_PPC_VSR0;
+ val->vsxval[0] = vcpu->arch.vsr[2 * i];
+ val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
+ } else {
+ r = -ENXIO;
+ }
+ break;
+#endif /* CONFIG_VSX */
+ case KVM_REG_PPC_VPA_ADDR:
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ break;
+ case KVM_REG_PPC_VPA_SLB:
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
+ val->vpaval.length = vcpu->arch.slb_shadow.len;
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ break;
+ case KVM_REG_PPC_VPA_DTL:
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ val->vpaval.addr = vcpu->arch.dtl.next_gpa;
+ val->vpaval.length = vcpu->arch.dtl.len;
+ spin_unlock(&vcpu->arch.vpa_update_lock);
break;
default:
+ r = -EINVAL;
break;
}
return r;
}
-int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
- int r = -EINVAL;
+ int r = 0;
+ long int i;
+ unsigned long addr, len;
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_HIOR:
- {
- u64 hior;
/* Only allow this to be set to zero */
- r = get_user(hior, (u64 __user *)reg->addr);
- if (!r && (hior != 0))
+ if (set_reg_val(id, *val))
r = -EINVAL;
break;
- }
+ case KVM_REG_PPC_DABR:
+ vcpu->arch.dabr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_DSCR:
+ vcpu->arch.dscr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_PURR:
+ vcpu->arch.purr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_SPURR:
+ vcpu->arch.spurr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_AMR:
+ vcpu->arch.amr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_UAMOR:
+ vcpu->arch.uamor = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+ i = id - KVM_REG_PPC_MMCR0;
+ vcpu->arch.mmcr[i] = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
+ i = id - KVM_REG_PPC_PMC1;
+ vcpu->arch.pmc[i] = set_reg_val(id, *val);
+ break;
+#ifdef CONFIG_VSX
+ case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
+ if (cpu_has_feature(CPU_FTR_VSX)) {
+ /* VSX => FP reg i is stored in arch.vsr[2*i] */
+ long int i = id - KVM_REG_PPC_FPR0;
+ vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
+ } else {
+ /* let generic code handle it */
+ r = -EINVAL;
+ }
+ break;
+ case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
+ if (cpu_has_feature(CPU_FTR_VSX)) {
+ long int i = id - KVM_REG_PPC_VSR0;
+ vcpu->arch.vsr[2 * i] = val->vsxval[0];
+ vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
+ } else {
+ r = -ENXIO;
+ }
+ break;
+#endif /* CONFIG_VSX */
+ case KVM_REG_PPC_VPA_ADDR:
+ addr = set_reg_val(id, *val);
+ r = -EINVAL;
+ if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
+ vcpu->arch.dtl.next_gpa))
+ break;
+ r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
+ break;
+ case KVM_REG_PPC_VPA_SLB:
+ addr = val->vpaval.addr;
+ len = val->vpaval.length;
+ r = -EINVAL;
+ if (addr && !vcpu->arch.vpa.next_gpa)
+ break;
+ r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
+ break;
+ case KVM_REG_PPC_VPA_DTL:
+ addr = val->vpaval.addr;
+ len = val->vpaval.length;
+ r = -EINVAL;
+ if (addr && (len < sizeof(struct dtl_entry) ||
+ !vcpu->arch.vpa.next_gpa))
+ break;
+ len -= len % sizeof(struct dtl_entry);
+ r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
+ break;
default:
+ r = -EINVAL;
break;
}
@@ -599,20 +864,18 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
goto free_vcpu;
vcpu->arch.shared = &vcpu->arch.shregs;
- vcpu->arch.last_cpu = -1;
vcpu->arch.mmcr[0] = MMCR0_FC;
vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */
vcpu->arch.pvr = mfspr(SPRN_PVR);
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
spin_lock_init(&vcpu->arch.vpa_update_lock);
+ spin_lock_init(&vcpu->arch.tbacct_lock);
+ vcpu->arch.busy_preempt = TB_NIL;
kvmppc_mmu_book3s_hv_init(vcpu);
- /*
- * We consider the vcpu stopped until we see the first run ioctl for it.
- */
- vcpu->arch.state = KVMPPC_VCPU_STOPPED;
+ vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
init_waitqueue_head(&vcpu->arch.cpu_run);
@@ -624,9 +887,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
init_waitqueue_head(&vcore->wq);
- vcore->preempt_tb = mftb();
+ vcore->preempt_tb = TB_NIL;
}
kvm->arch.vcores[core] = vcore;
+ kvm->arch.online_vcores++;
}
mutex_unlock(&kvm->lock);
@@ -637,7 +901,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
++vcore->num_threads;
spin_unlock(&vcore->lock);
vcpu->arch.vcore = vcore;
- vcpu->arch.stolen_logged = vcore->stolen_tb;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
@@ -697,17 +960,18 @@ extern void xics_wake_cpu(int cpu);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *v;
+ u64 now;
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
return;
+ spin_lock(&vcpu->arch.tbacct_lock);
+ now = mftb();
+ vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
+ vcpu->arch.stolen_logged;
+ vcpu->arch.busy_preempt = now;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ spin_unlock(&vcpu->arch.tbacct_lock);
--vc->n_runnable;
- ++vc->n_busy;
- /* decrement the physical thread id of each following vcpu */
- v = vcpu;
- list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
- --v->arch.ptid;
list_del(&vcpu->arch.run_list);
}
@@ -720,6 +984,7 @@ static int kvmppc_grab_hwthread(int cpu)
/* Ensure the thread won't go into the kernel if it wakes */
tpaca->kvm_hstate.hwthread_req = 1;
+ tpaca->kvm_hstate.kvm_vcpu = NULL;
/*
* If the thread is already executing in the kernel (e.g. handling
@@ -769,7 +1034,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
smp_wmb();
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
- kvmppc_grab_hwthread(cpu);
xics_wake_cpu(cpu);
++vc->n_woken;
}
@@ -795,7 +1059,8 @@ static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
/*
* Check that we are on thread 0 and that any other threads in
- * this core are off-line.
+ * this core are off-line. Then grab the threads so they can't
+ * enter the kernel.
*/
static int on_primary_thread(void)
{
@@ -807,6 +1072,17 @@ static int on_primary_thread(void)
while (++thr < threads_per_core)
if (cpu_online(cpu + thr))
return 0;
+
+ /* Grab all hw threads so they can't go into the kernel */
+ for (thr = 1; thr < threads_per_core; ++thr) {
+ if (kvmppc_grab_hwthread(cpu + thr)) {
+ /* Couldn't grab one; let the others go */
+ do {
+ kvmppc_release_hwthread(cpu + thr);
+ } while (--thr > 0);
+ return 0;
+ }
+ }
return 1;
}
@@ -814,21 +1090,24 @@ static int on_primary_thread(void)
* Run a set of guest threads on a physical core.
* Called with vc->lock held.
*/
-static int kvmppc_run_core(struct kvmppc_vcore *vc)
+static void kvmppc_run_core(struct kvmppc_vcore *vc)
{
struct kvm_vcpu *vcpu, *vcpu0, *vnext;
long ret;
u64 now;
int ptid, i, need_vpa_update;
+ int srcu_idx;
+ struct kvm_vcpu *vcpus_to_update[threads_per_core];
/* don't start if any threads have a signal pending */
need_vpa_update = 0;
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
if (signal_pending(vcpu->arch.run_task))
- return 0;
- need_vpa_update |= vcpu->arch.vpa.update_pending |
- vcpu->arch.slb_shadow.update_pending |
- vcpu->arch.dtl.update_pending;
+ return;
+ if (vcpu->arch.vpa.update_pending ||
+ vcpu->arch.slb_shadow.update_pending ||
+ vcpu->arch.dtl.update_pending)
+ vcpus_to_update[need_vpa_update++] = vcpu;
}
/*
@@ -838,7 +1117,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
vc->n_woken = 0;
vc->nap_count = 0;
vc->entry_exit_count = 0;
- vc->vcore_state = VCORE_RUNNING;
+ vc->vcore_state = VCORE_STARTING;
vc->in_guest = 0;
vc->napping_threads = 0;
@@ -848,24 +1127,12 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
*/
if (need_vpa_update) {
spin_unlock(&vc->lock);
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
- kvmppc_update_vpas(vcpu);
+ for (i = 0; i < need_vpa_update; ++i)
+ kvmppc_update_vpas(vcpus_to_update[i]);
spin_lock(&vc->lock);
}
/*
- * Make sure we are running on thread 0, and that
- * secondary threads are offline.
- * XXX we should also block attempts to bring any
- * secondary threads online.
- */
- if (threads_per_core > 1 && !on_primary_thread()) {
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
- vcpu->arch.ret = -EBUSY;
- goto out;
- }
-
- /*
* Assign physical thread IDs, first to non-ceded vcpus
* and then to ceded ones.
*/
@@ -879,28 +1146,36 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
}
}
if (!vcpu0)
- return 0; /* nothing to run */
+ goto out; /* nothing to run; should never happen */
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
if (vcpu->arch.ceded)
vcpu->arch.ptid = ptid++;
- vc->stolen_tb += mftb() - vc->preempt_tb;
+ /*
+ * Make sure we are running on thread 0, and that
+ * secondary threads are offline.
+ */
+ if (threads_per_core > 1 && !on_primary_thread()) {
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ vcpu->arch.ret = -EBUSY;
+ goto out;
+ }
+
vc->pcpu = smp_processor_id();
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
}
- /* Grab any remaining hw threads so they can't go into the kernel */
- for (i = ptid; i < threads_per_core; ++i)
- kvmppc_grab_hwthread(vc->pcpu + i);
+ vc->vcore_state = VCORE_RUNNING;
preempt_disable();
spin_unlock(&vc->lock);
kvm_guest_enter();
+
+ srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
+
__kvmppc_vcore_entry(NULL, vcpu0);
- for (i = 0; i < threads_per_core; ++i)
- kvmppc_release_hwthread(vc->pcpu + i);
spin_lock(&vc->lock);
/* disable sending of IPIs on virtual external irqs */
@@ -909,10 +1184,14 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
/* wait for secondary threads to finish writing their state to memory */
if (vc->nap_count < vc->n_woken)
kvmppc_wait_for_nap(vc);
+ for (i = 0; i < threads_per_core; ++i)
+ kvmppc_release_hwthread(vc->pcpu + i);
/* prevent other vcpu threads from doing kvmppc_start_thread() now */
vc->vcore_state = VCORE_EXITING;
spin_unlock(&vc->lock);
+ srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
+
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();
kvm_guest_exit();
@@ -920,6 +1199,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
preempt_enable();
kvm_resched(vcpu);
+ spin_lock(&vc->lock);
now = get_tb();
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
/* cancel pending dec exception if dec is positive */
@@ -943,10 +1223,8 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
}
}
- spin_lock(&vc->lock);
out:
vc->vcore_state = VCORE_INACTIVE;
- vc->preempt_tb = mftb();
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
if (vcpu->arch.ret != RESUME_GUEST) {
@@ -954,8 +1232,6 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
wake_up(&vcpu->arch.cpu_run);
}
}
-
- return 1;
}
/*
@@ -979,20 +1255,11 @@ static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{
DEFINE_WAIT(wait);
- struct kvm_vcpu *v;
- int all_idle = 1;
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
vc->vcore_state = VCORE_SLEEPING;
spin_unlock(&vc->lock);
- list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
- if (!v->arch.ceded || v->arch.pending_exceptions) {
- all_idle = 0;
- break;
- }
- }
- if (all_idle)
- schedule();
+ schedule();
finish_wait(&vc->wq, &wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
@@ -1001,13 +1268,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int n_ceded;
- int prev_state;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v, *vn;
kvm_run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
+ kvmppc_update_vpas(vcpu);
/*
* Synchronize with other threads in this virtual core
@@ -1017,8 +1284,9 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
vcpu->arch.kvm_run = kvm_run;
- prev_state = vcpu->arch.state;
+ vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
+ vcpu->arch.busy_preempt = TB_NIL;
list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
++vc->n_runnable;
@@ -1027,33 +1295,26 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* If the vcore is already running, we may be able to start
* this thread straight away and have it join in.
*/
- if (prev_state == KVMPPC_VCPU_STOPPED) {
+ if (!signal_pending(current)) {
if (vc->vcore_state == VCORE_RUNNING &&
VCORE_EXIT_COUNT(vc) == 0) {
vcpu->arch.ptid = vc->n_runnable - 1;
+ kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
+ } else if (vc->vcore_state == VCORE_SLEEPING) {
+ wake_up(&vc->wq);
}
- } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
- --vc->n_busy;
+ }
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
- if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
+ if (vc->vcore_state != VCORE_INACTIVE) {
spin_unlock(&vc->lock);
kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
spin_lock(&vc->lock);
continue;
}
- vc->runner = vcpu;
- n_ceded = 0;
- list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
- n_ceded += v->arch.ceded;
- if (n_ceded == vc->n_runnable)
- kvmppc_vcore_blocked(vc);
- else
- kvmppc_run_core(vc);
-
list_for_each_entry_safe(v, vn, &vc->runnable_threads,
arch.run_list) {
kvmppc_core_prepare_to_enter(v);
@@ -1065,22 +1326,40 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
}
+ if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
+ break;
+ vc->runner = vcpu;
+ n_ceded = 0;
+ list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
+ if (!v->arch.pending_exceptions)
+ n_ceded += v->arch.ceded;
+ if (n_ceded == vc->n_runnable)
+ kvmppc_vcore_blocked(vc);
+ else
+ kvmppc_run_core(vc);
vc->runner = NULL;
}
- if (signal_pending(current)) {
- if (vc->vcore_state == VCORE_RUNNING ||
- vc->vcore_state == VCORE_EXITING) {
- spin_unlock(&vc->lock);
- kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
- spin_lock(&vc->lock);
- }
- if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
- kvmppc_remove_runnable(vc, vcpu);
- vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- vcpu->arch.ret = -EINTR;
- }
+ while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
+ (vc->vcore_state == VCORE_RUNNING ||
+ vc->vcore_state == VCORE_EXITING)) {
+ spin_unlock(&vc->lock);
+ kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
+ spin_lock(&vc->lock);
+ }
+
+ if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
+ kvmppc_remove_runnable(vc, vcpu);
+ vcpu->stat.signal_exits++;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ }
+
+ if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
+ /* Wake up some vcpu to run the core */
+ v = list_first_entry(&vc->runnable_threads,
+ struct kvm_vcpu, arch.run_list);
+ wake_up(&v->arch.cpu_run);
}
spin_unlock(&vc->lock);
@@ -1090,6 +1369,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
+ int srcu_idx;
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -1120,6 +1400,7 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_vsx_to_thread(current);
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
r = kvmppc_run_vcpu(run, vcpu);
@@ -1128,10 +1409,16 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
!(vcpu->arch.shregs.msr & MSR_PR)) {
r = kvmppc_pseries_do_hcall(vcpu);
kvmppc_core_prepare_to_enter(vcpu);
+ } else if (r == RESUME_PAGE_FAULT) {
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvmppc_book3s_hv_page_fault(run, vcpu,
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
}
} while (r == RESUME_GUEST);
out:
+ vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r;
}
@@ -1273,7 +1560,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
- r = kvmppc_hv_get_dirty_log(kvm, memslot);
+ r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
if (r)
goto out;
@@ -1287,67 +1574,88 @@ out:
return r;
}
-static unsigned long slb_pgsize_encoding(unsigned long psize)
+static void unpin_slot(struct kvm_memory_slot *memslot)
{
- unsigned long senc = 0;
+ unsigned long *physp;
+ unsigned long j, npages, pfn;
+ struct page *page;
- if (psize > 0x1000) {
- senc = SLB_VSID_L;
- if (psize == 0x10000)
- senc |= SLB_VSID_LP_01;
+ physp = memslot->arch.slot_phys;
+ npages = memslot->npages;
+ if (!physp)
+ return;
+ for (j = 0; j < npages; j++) {
+ if (!(physp[j] & KVMPPC_GOT_PAGE))
+ continue;
+ pfn = physp[j] >> PAGE_SHIFT;
+ page = pfn_to_page(pfn);
+ SetPageDirty(page);
+ put_page(page);
+ }
+}
+
+void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+ if (!dont || free->arch.rmap != dont->arch.rmap) {
+ vfree(free->arch.rmap);
+ free->arch.rmap = NULL;
+ }
+ if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
+ unpin_slot(free);
+ vfree(free->arch.slot_phys);
+ free->arch.slot_phys = NULL;
}
- return senc;
+}
+
+int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
+ if (!slot->arch.rmap)
+ return -ENOMEM;
+ slot->arch.slot_phys = NULL;
+
+ return 0;
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem)
{
- unsigned long npages;
unsigned long *phys;
- /* Allocate a slot_phys array */
- phys = kvm->arch.slot_phys[mem->slot];
- if (!kvm->arch.using_mmu_notifiers && !phys) {
- npages = mem->memory_size >> PAGE_SHIFT;
- phys = vzalloc(npages * sizeof(unsigned long));
+ /* Allocate a slot_phys array if needed */
+ phys = memslot->arch.slot_phys;
+ if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
+ phys = vzalloc(memslot->npages * sizeof(unsigned long));
if (!phys)
return -ENOMEM;
- kvm->arch.slot_phys[mem->slot] = phys;
- kvm->arch.slot_npages[mem->slot] = npages;
+ memslot->arch.slot_phys = phys;
}
return 0;
}
-static void unpin_slot(struct kvm *kvm, int slot_id)
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old)
{
- unsigned long *physp;
- unsigned long j, npages, pfn;
- struct page *page;
+ unsigned long npages = mem->memory_size >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot;
- physp = kvm->arch.slot_phys[slot_id];
- npages = kvm->arch.slot_npages[slot_id];
- if (physp) {
- spin_lock(&kvm->arch.slot_phys_lock);
- for (j = 0; j < npages; j++) {
- if (!(physp[j] & KVMPPC_GOT_PAGE))
- continue;
- pfn = physp[j] >> PAGE_SHIFT;
- page = pfn_to_page(pfn);
- SetPageDirty(page);
- put_page(page);
- }
- kvm->arch.slot_phys[slot_id] = NULL;
- spin_unlock(&kvm->arch.slot_phys_lock);
- vfree(physp);
+ if (npages && old.npages) {
+ /*
+ * If modifying a memslot, reset all the rmap dirty bits.
+ * If this is a new memslot, we don't need to do anything
+ * since the rmap array starts out as all zeroes,
+ * i.e. no pages are dirty.
+ */
+ memslot = id_to_memslot(kvm->memslots, mem->slot);
+ kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
}
}
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
-{
-}
-
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
@@ -1362,6 +1670,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
unsigned long rmls;
unsigned long *physp;
unsigned long i, npages;
+ int srcu_idx;
mutex_lock(&kvm->lock);
if (kvm->arch.rma_setup_done)
@@ -1377,12 +1686,13 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
}
/* Look up the memslot for guest physical address 0 */
+ srcu_idx = srcu_read_lock(&kvm->srcu);
memslot = gfn_to_memslot(kvm, 0);
/* We must have some memory at 0 by now */
err = -EINVAL;
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
- goto out;
+ goto out_srcu;
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
@@ -1406,14 +1716,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
err = -EPERM;
if (cpu_has_feature(CPU_FTR_ARCH_201)) {
pr_err("KVM: CPU requires an RMO\n");
- goto out;
+ goto out_srcu;
}
/* We can handle 4k, 64k or 16M pages in the VRMA */
err = -EINVAL;
if (!(psize == 0x1000 || psize == 0x10000 ||
psize == 0x1000000))
- goto out;
+ goto out_srcu;
/* Update VRMASD field in the LPCR */
senc = slb_pgsize_encoding(psize);
@@ -1436,7 +1746,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
err = -EINVAL;
if (rmls < 0) {
pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
- goto out;
+ goto out_srcu;
}
atomic_inc(&ri->use_count);
kvm->arch.rma = ri;
@@ -1465,17 +1775,24 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Initialize phys addrs of pages in RMO */
npages = ri->npages;
porder = __ilog2(npages);
- physp = kvm->arch.slot_phys[memslot->id];
- spin_lock(&kvm->arch.slot_phys_lock);
- for (i = 0; i < npages; ++i)
- physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
- spin_unlock(&kvm->arch.slot_phys_lock);
+ physp = memslot->arch.slot_phys;
+ if (physp) {
+ if (npages > memslot->npages)
+ npages = memslot->npages;
+ spin_lock(&kvm->arch.slot_phys_lock);
+ for (i = 0; i < npages; ++i)
+ physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
+ porder;
+ spin_unlock(&kvm->arch.slot_phys_lock);
+ }
}
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
smp_wmb();
kvm->arch.rma_setup_done = 1;
err = 0;
+ out_srcu:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
out:
mutex_unlock(&kvm->lock);
return err;
@@ -1496,6 +1813,13 @@ int kvmppc_core_init_vm(struct kvm *kvm)
return -ENOMEM;
kvm->arch.lpid = lpid;
+ /*
+ * Since we don't flush the TLB when tearing down a VM,
+ * and this lpid might have previously been used,
+ * make sure we flush on each core before running the new VM.
+ */
+ cpumask_setall(&kvm->arch.need_tlb_flush);
+
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
kvm->arch.rma = NULL;
@@ -1523,16 +1847,19 @@ int kvmppc_core_init_vm(struct kvm *kvm)
kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
spin_lock_init(&kvm->arch.slot_phys_lock);
+
+ /*
+ * Don't allow secondary CPU threads to come online
+ * while any KVM VMs exist.
+ */
+ inhibit_secondary_onlining();
+
return 0;
}
void kvmppc_core_destroy_vm(struct kvm *kvm)
{
- unsigned long i;
-
- if (!kvm->arch.using_mmu_notifiers)
- for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- unpin_slot(kvm, i);
+ uninhibit_secondary_onlining();
if (kvm->arch.rma) {
kvm_release_rma(kvm->arch.rma);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fb4eac290fe..ec0a9e5de10 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -157,8 +157,8 @@ static void __init kvm_linear_init_one(ulong size, int count, int type)
linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
for (i = 0; i < count; ++i) {
linear = alloc_bootmem_align(size, size);
- pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
- size >> 20);
+ pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
+ size >> 20);
linear_info[i].base_virt = linear;
linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
linear_info[i].npages = npages;
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
new file mode 100644
index 00000000000..35f3cf0269b
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -0,0 +1,144 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/kernel.h>
+#include <asm/opal.h>
+
+/* SRR1 bits for machine check on POWER7 */
+#define SRR1_MC_LDSTERR (1ul << (63-42))
+#define SRR1_MC_IFETCH_SH (63-45)
+#define SRR1_MC_IFETCH_MASK 0x7
+#define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */
+#define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */
+#define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */
+#define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */
+
+/* DSISR bits for machine check on POWER7 */
+#define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */
+#define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */
+#define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */
+#define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */
+#define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */
+
+/* POWER7 SLB flush and reload */
+static void reload_slb(struct kvm_vcpu *vcpu)
+{
+ struct slb_shadow *slb;
+ unsigned long i, n;
+
+ /* First clear out SLB */
+ asm volatile("slbmte %0,%0; slbia" : : "r" (0));
+
+ /* Do they have an SLB shadow buffer registered? */
+ slb = vcpu->arch.slb_shadow.pinned_addr;
+ if (!slb)
+ return;
+
+ /* Sanity check */
+ n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
+ if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
+ return;
+
+ /* Load up the SLB from that */
+ for (i = 0; i < n; ++i) {
+ unsigned long rb = slb->save_area[i].esid;
+ unsigned long rs = slb->save_area[i].vsid;
+
+ rb = (rb & ~0xFFFul) | i; /* insert entry number */
+ asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
+ }
+}
+
+/* POWER7 TLB flush */
+static void flush_tlb_power7(struct kvm_vcpu *vcpu)
+{
+ unsigned long i, rb;
+
+ rb = TLBIEL_INVAL_SET_LPID;
+ for (i = 0; i < POWER7_TLB_SETS; ++i) {
+ asm volatile("tlbiel %0" : : "r" (rb));
+ rb += 1 << TLBIEL_INVAL_SET_SHIFT;
+ }
+}
+
+/*
+ * On POWER7, see if we can handle a machine check that occurred inside
+ * the guest in real mode, without switching to the host partition.
+ *
+ * Returns: 0 => exit guest, 1 => deliver machine check to guest
+ */
+static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
+{
+ unsigned long srr1 = vcpu->arch.shregs.msr;
+ struct opal_machine_check_event *opal_evt;
+ long handled = 1;
+
+ if (srr1 & SRR1_MC_LDSTERR) {
+ /* error on load/store */
+ unsigned long dsisr = vcpu->arch.shregs.dsisr;
+
+ if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
+ DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
+ /* flush and reload SLB; flushes D-ERAT too */
+ reload_slb(vcpu);
+ dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
+ DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
+ }
+ if (dsisr & DSISR_MC_TLB_MULTI) {
+ flush_tlb_power7(vcpu);
+ dsisr &= ~DSISR_MC_TLB_MULTI;
+ }
+ /* Any other errors we don't understand? */
+ if (dsisr & 0xffffffffUL)
+ handled = 0;
+ }
+
+ switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
+ case 0:
+ break;
+ case SRR1_MC_IFETCH_SLBPAR:
+ case SRR1_MC_IFETCH_SLBMULTI:
+ case SRR1_MC_IFETCH_SLBPARMULTI:
+ reload_slb(vcpu);
+ break;
+ case SRR1_MC_IFETCH_TLBMULTI:
+ flush_tlb_power7(vcpu);
+ break;
+ default:
+ handled = 0;
+ }
+
+ /*
+ * See if OPAL has already handled the condition.
+ * We assume that if the condition is recovered then OPAL
+ * will have generated an error log event that we will pick
+ * up and log later.
+ */
+ opal_evt = local_paca->opal_mc_evt;
+ if (opal_evt->version == OpalMCE_V1 &&
+ (opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
+ opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
+ handled = 1;
+
+ if (handled)
+ opal_evt->in_use = 0;
+
+ return handled;
+}
+
+long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ return kvmppc_realmode_mc_power7(vcpu);
+
+ return 0;
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index fb0e821622d..19c93bae1ae 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -35,6 +35,37 @@ static void *real_vmalloc_addr(void *x)
return __va(addr);
}
+/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
+static int global_invalidates(struct kvm *kvm, unsigned long flags)
+{
+ int global;
+
+ /*
+ * If there is only one vcore, and it's currently running,
+ * we can use tlbiel as long as we mark all other physical
+ * cores as potentially having stale TLB entries for this lpid.
+ * If we're not using MMU notifiers, we never take pages away
+ * from the guest, so we can use tlbiel if requested.
+ * Otherwise, don't use tlbiel.
+ */
+ if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore)
+ global = 0;
+ else if (kvm->arch.using_mmu_notifiers)
+ global = 1;
+ else
+ global = !(flags & H_LOCAL);
+
+ if (!global) {
+ /* any other core might now have stale TLB entries... */
+ smp_wmb();
+ cpumask_setall(&kvm->arch.need_tlb_flush);
+ cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
+ &kvm->arch.need_tlb_flush);
+ }
+
+ return global;
+}
+
/*
* Add this HPTE into the chain for the real page.
* Must be called with the chain locked; it unlocks the chain.
@@ -59,13 +90,24 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
head->back = pte_index;
} else {
rev->forw = rev->back = pte_index;
- i = pte_index;
+ *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
+ pte_index | KVMPPC_RMAP_PRESENT;
}
- smp_wmb();
- *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
+ unlock_rmap(rmap);
}
EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
+/*
+ * Note modification of an HPTE; set the HPTE modified bit
+ * if anyone is interested.
+ */
+static inline void note_hpte_modification(struct kvm *kvm,
+ struct revmap_entry *rev)
+{
+ if (atomic_read(&kvm->arch.hpte_mod_interest))
+ rev->guest_rpte |= HPTE_GR_MODIFIED;
+}
+
/* Remove this HPTE from the chain for a real page */
static void remove_revmap_chain(struct kvm *kvm, long pte_index,
struct revmap_entry *rev,
@@ -81,7 +123,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
ptel = rev->guest_rpte |= rcbits;
gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
- if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ if (!memslot)
return;
rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
@@ -103,14 +145,14 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
unlock_rmap(rmap);
}
-static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
+static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
int writing, unsigned long *pte_sizep)
{
pte_t *ptep;
unsigned long ps = *pte_sizep;
unsigned int shift;
- ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
+ ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
if (!ptep)
return __pte(0);
if (shift)
@@ -130,15 +172,15 @@ static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
hpte[0] = hpte_v;
}
-long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh, unsigned long ptel)
+long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel,
+ pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
{
- struct kvm *kvm = vcpu->kvm;
unsigned long i, pa, gpa, gfn, psize;
unsigned long slot_fn, hva;
unsigned long *hpte;
struct revmap_entry *rev;
- unsigned long g_ptel = ptel;
+ unsigned long g_ptel;
struct kvm_memory_slot *memslot;
unsigned long *physp, pte_size;
unsigned long is_io;
@@ -147,13 +189,14 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned int writing;
unsigned long mmu_seq;
unsigned long rcbits;
- bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
psize = hpte_page_size(pteh, ptel);
if (!psize)
return H_PARAMETER;
writing = hpte_is_writable(ptel);
pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
+ ptel &= ~HPTE_GR_RESERVED;
+ g_ptel = ptel;
/* used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq;
@@ -183,7 +226,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
rmap = &memslot->arch.rmap[slot_fn];
if (!kvm->arch.using_mmu_notifiers) {
- physp = kvm->arch.slot_phys[memslot->id];
+ physp = memslot->arch.slot_phys;
if (!physp)
return H_PARAMETER;
physp += slot_fn;
@@ -201,7 +244,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
/* Look up the Linux PTE for the backing page */
pte_size = psize;
- pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
+ pte = lookup_linux_pte(pgdir, hva, writing, &pte_size);
if (pte_present(pte)) {
if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */
@@ -210,6 +253,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
pa = pte_pfn(pte) << PAGE_SHIFT;
}
}
+
if (pte_size < psize)
return H_PARAMETER;
if (pa && pte_size > psize)
@@ -287,8 +331,10 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
rev = &kvm->arch.revmap[pte_index];
if (realmode)
rev = real_vmalloc_addr(rev);
- if (rev)
+ if (rev) {
rev->guest_rpte = g_ptel;
+ note_hpte_modification(kvm, rev);
+ }
/* Link HPTE into reverse-map chain */
if (pteh & HPTE_V_VALID) {
@@ -297,7 +343,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
if (kvm->arch.using_mmu_notifiers &&
- mmu_notifier_retry(vcpu, mmu_seq)) {
+ mmu_notifier_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
@@ -318,10 +364,17 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
hpte[0] = pteh;
asm volatile("ptesync" : : : "memory");
- vcpu->arch.gpr[4] = pte_index;
+ *pte_idx_ret = pte_index;
return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_h_enter);
+EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
+
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel)
+{
+ return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
+ vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
+}
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@@ -343,11 +396,10 @@ static inline int try_lock_tlbie(unsigned int *lock)
return old == 0;
}
-long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
- unsigned long pte_index, unsigned long avpn,
- unsigned long va)
+long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long *hpret)
{
- struct kvm *kvm = vcpu->kvm;
unsigned long *hpte;
unsigned long v, r, rb;
struct revmap_entry *rev;
@@ -369,7 +421,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
if (v & HPTE_V_VALID) {
hpte[0] &= ~HPTE_V_VALID;
rb = compute_tlbie_rb(v, hpte[1], pte_index);
- if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) {
+ if (global_invalidates(kvm, flags)) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
asm volatile("ptesync" : : : "memory");
@@ -385,13 +437,22 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
/* Read PTE low word after tlbie to get final R/C values */
remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
}
- r = rev->guest_rpte;
+ r = rev->guest_rpte & ~HPTE_GR_RESERVED;
+ note_hpte_modification(kvm, rev);
unlock_hpte(hpte, 0);
- vcpu->arch.gpr[4] = v;
- vcpu->arch.gpr[5] = r;
+ hpret[0] = v;
+ hpret[1] = r;
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
+
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn)
+{
+ return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
+ &vcpu->arch.gpr[4]);
+}
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
@@ -459,6 +520,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
args[j] = ((0x80 | flags) << 56) + pte_index;
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ note_hpte_modification(kvm, rev);
if (!(hp[0] & HPTE_V_VALID)) {
/* insert R and C bits from PTE */
@@ -534,8 +596,6 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_NOT_FOUND;
}
- if (atomic_read(&kvm->online_vcpus) == 1)
- flags |= H_LOCAL;
v = hpte[0];
bits = (flags << 55) & HPTE_R_PP0;
bits |= (flags << 48) & HPTE_R_KEY_HI;
@@ -548,6 +608,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (rev) {
r = (rev->guest_rpte & ~mask) | bits;
rev->guest_rpte = r;
+ note_hpte_modification(kvm, rev);
}
r = (hpte[1] & ~mask) | bits;
@@ -555,7 +616,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (v & HPTE_V_VALID) {
rb = compute_tlbie_rb(v, r, pte_index);
hpte[0] = v & ~HPTE_V_VALID;
- if (!(flags & H_LOCAL)) {
+ if (global_invalidates(kvm, flags)) {
while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
asm volatile("ptesync" : : : "memory");
@@ -568,6 +629,28 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
asm volatile("tlbiel %0" : : "r" (rb));
asm volatile("ptesync" : : : "memory");
}
+ /*
+ * If the host has this page as readonly but the guest
+ * wants to make it read/write, reduce the permissions.
+ * Checking the host permissions involves finding the
+ * memslot and then the Linux PTE for the page.
+ */
+ if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
+ unsigned long psize, gfn, hva;
+ struct kvm_memory_slot *memslot;
+ pgd_t *pgdir = vcpu->arch.pgdir;
+ pte_t pte;
+
+ psize = hpte_page_size(v, r);
+ gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
+ memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
+ if (memslot) {
+ hva = __gfn_to_hva_memslot(memslot, gfn);
+ pte = lookup_linux_pte(pgdir, hva, 1, &psize);
+ if (pte_present(pte) && !pte_write(pte))
+ r = hpte_make_readonly(r);
+ }
+ }
}
hpte[1] = r;
eieio();
@@ -599,8 +682,10 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
}
- if (v & HPTE_V_VALID)
+ if (v & HPTE_V_VALID) {
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
+ r &= ~HPTE_GR_RESERVED;
+ }
vcpu->arch.gpr[4 + i * 2] = v;
vcpu->arch.gpr[5 + i * 2] = r;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 74a24bbb963..10b6c358dd7 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/mmu-hash64.h>
/*****************************************************************************
* *
@@ -134,8 +135,11 @@ kvm_start_guest:
27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+ /* reload vcpu pointer after clearing the IPI */
+ ld r4,HSTATE_KVM_VCPU(r13)
+ cmpdi r4,0
/* if we have no vcpu to run, go back to sleep */
- beq cr1,kvm_no_guest
+ beq kvm_no_guest
/* were we napping due to cede? */
lbz r0,HSTATE_NAPPING(r13)
@@ -310,7 +314,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_SDR1,r6 /* switch to partition page table */
mtspr SPRN_LPID,r7
isync
+
+ /* See if we need to flush the TLB */
+ lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
+ clrldi r7,r6,64-6 /* extract bit number (6 bits) */
+ srdi r6,r6,6 /* doubleword number */
+ sldi r6,r6,3 /* address offset */
+ add r6,r6,r9
+ addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
li r0,1
+ sld r0,r0,r7
+ ld r7,0(r6)
+ and. r7,r7,r0
+ beq 22f
+23: ldarx r7,0,r6 /* if set, clear the bit */
+ andc r7,r7,r0
+ stdcx. r7,0,r6
+ bne 23b
+ li r6,128 /* and flush the TLB */
+ mtctr r6
+ li r7,0x800 /* IS field = 0b10 */
+ ptesync
+28: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 28b
+ ptesync
+
+22: li r0,1
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
b 10f
@@ -333,36 +363,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mr r9,r4
blt hdec_soon
- /*
- * Invalidate the TLB if we could possibly have stale TLB
- * entries for this partition on this core due to the use
- * of tlbiel.
- * XXX maybe only need this on primary thread?
- */
- ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
- lwz r5,VCPU_VCPUID(r4)
- lhz r6,PACAPACAINDEX(r13)
- rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
- lhz r8,VCPU_LAST_CPU(r4)
- sldi r7,r6,1 /* see if this is the same vcpu */
- add r7,r7,r9 /* as last ran on this pcpu */
- lhz r0,KVM_LAST_VCPU(r7)
- cmpw r6,r8 /* on the same cpu core as last time? */
- bne 3f
- cmpw r0,r5 /* same vcpu as this core last ran? */
- beq 1f
-3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
- sth r5,KVM_LAST_VCPU(r7)
- li r6,128
- mtctr r6
- li r7,0x800 /* IS field = 0b10 */
- ptesync
-2: tlbiel r7
- addi r7,r7,0x1000
- bdnz 2b
- ptesync
-1:
-
/* Save purr/spurr */
mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR
@@ -679,8 +679,7 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-nohpte_cont:
-hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Save DEC */
mfspr r5,SPRN_DEC
mftb r6
@@ -701,6 +700,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9)
+ /* See if it is a machine check */
+ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq machine_check_realmode
+mc_cont:
+
/* Save guest CTRL register, set runlatch to 1 */
6: mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
@@ -1113,38 +1117,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
* For external and machine check interrupts, we need
* to call the Linux handler to process the interrupt.
- * We do that by jumping to the interrupt vector address
- * which we have in r12. The [h]rfid at the end of the
+ * We do that by jumping to absolute address 0x500 for
+ * external interrupts, or the machine_check_fwnmi label
+ * for machine checks (since firmware might have patched
+ * the vector area at 0x200). The [h]rfid at the end of the
* handler will return to the book3s_hv_interrupts.S code.
* For other interrupts we do the rfid to get back
- * to the book3s_interrupts.S code here.
+ * to the book3s_hv_interrupts.S code here.
*/
ld r8, HSTATE_VMHANDLER(r13)
ld r7, HSTATE_HOST_MSR(r13)
+ cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+BEGIN_FTR_SECTION
beq 11f
- cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* RFI into the highmem handler, or branch to interrupt handler */
-12: mfmsr r6
- mtctr r12
+ mfmsr r6
li r0, MSR_RI
andc r6, r6, r0
mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8
mtsrr1 r7
- beqctr
+ beqa 0x500 /* external interrupt (PPC970) */
+ beq cr1, 13f /* machine check */
RFI
-11:
-BEGIN_FTR_SECTION
- b 12b
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- mtspr SPRN_HSRR0, r8
+ /* On POWER7, we have external interrupts set to use HSRR0/1 */
+11: mtspr SPRN_HSRR0, r8
mtspr SPRN_HSRR1, r7
ba 0x500
+13: b machine_check_fwnmi
+
/*
* Check whether an HDSI is an HPTE not found fault or something else.
* If it is an HPTE not found fault that is due to the guest accessing
@@ -1177,7 +1184,7 @@ kvmppc_hdsi:
cmpdi r3, 0 /* retry the instruction */
beq 6f
cmpdi r3, -1 /* handle in kernel mode */
- beq nohpte_cont
+ beq guest_exit_cont
cmpdi r3, -2 /* MMIO emulation; need instr word */
beq 2f
@@ -1191,6 +1198,7 @@ kvmppc_hdsi:
li r10, BOOK3S_INTERRUPT_DATA_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63
+fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9)
lwz r8, VCPU_XER(r9)
mtctr r7
@@ -1223,7 +1231,7 @@ kvmppc_hdsi:
/* Unset guest mode. */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
- b nohpte_cont
+ b guest_exit_cont
/*
* Similarly for an HISI, reflect it to the guest as an ISI unless
@@ -1249,9 +1257,9 @@ kvmppc_hisi:
ld r11, VCPU_MSR(r9)
li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
cmpdi r3, 0 /* retry the instruction */
- beq 6f
+ beq fast_interrupt_c_return
cmpdi r3, -1 /* handle in kernel mode */
- beq nohpte_cont
+ beq guest_exit_cont
/* Synthesize an ISI for the guest */
mr r11, r3
@@ -1260,12 +1268,7 @@ kvmppc_hisi:
li r10, BOOK3S_INTERRUPT_INST_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63
-6: ld r7, VCPU_CTR(r9)
- lwz r8, VCPU_XER(r9)
- mtctr r7
- mtxer r8
- mr r4, r9
- b fast_guest_return
+ b fast_interrupt_c_return
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
ld r5, KVM_VRMA_SLB_V(r6)
@@ -1281,14 +1284,14 @@ kvmppc_hisi:
hcall_try_real_mode:
ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR
- bne hcall_real_cont
+ bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
- bge hcall_real_cont
+ bge guest_exit_cont
LOAD_REG_ADDR(r4, hcall_real_table)
lwzx r3,r3,r4
cmpwi r3,0
- beq hcall_real_cont
+ beq guest_exit_cont
add r3,r3,r4
mtctr r3
mr r3,r9 /* get vcpu pointer */
@@ -1309,7 +1312,7 @@ hcall_real_fallback:
li r12,BOOK3S_INTERRUPT_SYSCALL
ld r9, HSTATE_KVM_VCPU(r13)
- b hcall_real_cont
+ b guest_exit_cont
.globl hcall_real_table
hcall_real_table:
@@ -1568,6 +1571,21 @@ kvm_cede_exit:
li r3,H_TOO_HARD
blr
+ /* Try to handle a machine check in real mode */
+machine_check_realmode:
+ mr r3, r9 /* get vcpu pointer */
+ bl .kvmppc_realmode_machine_check
+ nop
+ cmpdi r3, 0 /* continue exiting from guest? */
+ ld r9, HSTATE_KVM_VCPU(r13)
+ li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq mc_cont
+ /* If not, deliver a machine check. SRR0/1 are already set */
+ li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
+ li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
+ rotldi r11, r11, 63
+ b fast_interrupt_c_return
+
secondary_too_late:
ld r5,HSTATE_KVM_VCORE(r13)
HMT_LOW
@@ -1587,6 +1605,10 @@ secondary_too_late:
.endr
secondary_nap:
+ /* Clear our vcpu pointer so we don't come back in early */
+ li r0, 0
+ std r0, HSTATE_KVM_VCPU(r13)
+ lwsync
/* Clear any pending IPI - assume we're a secondary thread */
ld r5, HSTATE_XICS_PHYS(r13)
li r7, XICS_XIRR
@@ -1612,8 +1634,6 @@ secondary_nap:
kvm_no_guest:
li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13)
- li r0, 0
- std r0, HSTATE_KVM_VCPU(r13)
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 41cb0017e75..2c86b0d6371 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -114,11 +114,6 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
hlist_del_init_rcu(&pte->list_vpte);
hlist_del_init_rcu(&pte->list_vpte_long);
- if (pte->pte.may_write)
- kvm_release_pfn_dirty(pte->pfn);
- else
- kvm_release_pfn_clean(pte->pfn);
-
spin_unlock(&vcpu3s->mmu_lock);
vcpu3s->hpte_cache_count--;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 05c28f59f77..28d38adeca7 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -52,8 +52,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#define MSR_USER32 MSR_USER
#define MSR_USER64 MSR_USER
#define HW_PAGE_SIZE PAGE_SIZE
-#define __hard_irq_disable local_irq_disable
-#define __hard_irq_enable local_irq_enable
#endif
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -66,7 +64,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
svcpu_put(svcpu);
#endif
-
+ vcpu->cpu = smp_processor_id();
#ifdef CONFIG_PPC_BOOK3S_32
current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
#endif
@@ -83,17 +81,71 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
svcpu_put(svcpu);
#endif
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
+ kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
+ vcpu->cpu = -1;
+}
+
+int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
+{
+ int r = 1; /* Indicate we want to get back into the guest */
+
+ /* We misuse TLB_FLUSH to indicate that we want to clear
+ all shadow cache entries */
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return r;
+}
+
+/************* MMU Notifiers *************/
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ trace_kvm_unmap_hva(hva);
+
+ /*
+ * Flush all shadow tlb entries everywhere. This is slow, but
+ * we are 100% sure that we catch the to be unmapped page
+ */
+ kvm_flush_remote_tlbs(kvm);
+
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
+
+ return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
}
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ /* The page will get remapped properly on its next fault */
+ kvm_unmap_hva(kvm, hva);
+}
+
+/*****************************************/
+
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
{
ulong smsr = vcpu->arch.shared->msr;
/* Guest MSR values */
- smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
+ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
/* Process MSR values */
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
/* External providers the guest reserved */
@@ -379,10 +431,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
static inline int get_fpr_index(int i)
{
-#ifdef CONFIG_VSX
- i *= 2;
-#endif
- return i;
+ return i * TS_FPRWIDTH;
}
/* Give up external provider (FPU, Altivec, VSX) */
@@ -396,41 +445,49 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
u64 *thread_fpr = (u64*)t->fpr;
int i;
- if (!(vcpu->arch.guest_owned_ext & msr))
+ /*
+ * VSX instructions can access FP and vector registers, so if
+ * we are giving up VSX, make sure we give up FP and VMX as well.
+ */
+ if (msr & MSR_VSX)
+ msr |= MSR_FP | MSR_VEC;
+
+ msr &= vcpu->arch.guest_owned_ext;
+ if (!msr)
return;
#ifdef DEBUG_EXT
printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
#endif
- switch (msr) {
- case MSR_FP:
+ if (msr & MSR_FP) {
+ /*
+ * Note that on CPUs with VSX, giveup_fpu stores
+ * both the traditional FP registers and the added VSX
+ * registers into thread.fpr[].
+ */
giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
vcpu->arch.fpscr = t->fpscr.val;
- break;
- case MSR_VEC:
+
+#ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX))
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
+ vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
+#endif
+ }
+
#ifdef CONFIG_ALTIVEC
+ if (msr & MSR_VEC) {
giveup_altivec(current);
memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
vcpu->arch.vscr = t->vscr;
-#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- __giveup_vsx(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
-#endif
- break;
- default:
- BUG();
}
+#endif
- vcpu->arch.guest_owned_ext &= ~msr;
- current->thread.regs->msr &= ~msr;
+ vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
kvmppc_recalc_shadow_msr(vcpu);
}
@@ -490,47 +547,56 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
return RESUME_GUEST;
}
- /* We already own the ext */
- if (vcpu->arch.guest_owned_ext & msr) {
- return RESUME_GUEST;
+ if (msr == MSR_VSX) {
+ /* No VSX? Give an illegal instruction interrupt */
+#ifdef CONFIG_VSX
+ if (!cpu_has_feature(CPU_FTR_VSX))
+#endif
+ {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ return RESUME_GUEST;
+ }
+
+ /*
+ * We have to load up all the FP and VMX registers before
+ * we can let the guest use VSX instructions.
+ */
+ msr = MSR_FP | MSR_VEC | MSR_VSX;
}
+ /* See if we already own all the ext(s) needed */
+ msr &= ~vcpu->arch.guest_owned_ext;
+ if (!msr)
+ return RESUME_GUEST;
+
#ifdef DEBUG_EXT
printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
#endif
current->thread.regs->msr |= msr;
- switch (msr) {
- case MSR_FP:
+ if (msr & MSR_FP) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
-
+#ifdef CONFIG_VSX
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
+ thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
+#endif
t->fpscr.val = vcpu->arch.fpscr;
t->fpexc_mode = 0;
kvmppc_load_up_fpu();
- break;
- case MSR_VEC:
+ }
+
+ if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC
memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
t->vscr = vcpu->arch.vscr;
t->vrsave = -1;
kvmppc_load_up_altivec();
#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
- kvmppc_load_up_vsx();
-#endif
- break;
- default:
- BUG();
}
vcpu->arch.guest_owned_ext |= msr;
-
kvmppc_recalc_shadow_msr(vcpu);
return RESUME_GUEST;
@@ -540,18 +606,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
int r = RESUME_HOST;
+ int s;
vcpu->stat.sum_exits++;
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
- /* We get here with MSR.EE=0, so enable it to be a nice citizen */
- __hard_irq_enable();
+ /* We get here with MSR.EE=1 */
+
+ trace_kvm_exit(exit_nr, vcpu);
+ kvm_guest_exit();
- trace_kvm_book3s_exit(exit_nr, vcpu);
- preempt_enable();
- kvm_resched(vcpu);
switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE:
{
@@ -802,7 +868,6 @@ program_interrupt:
}
}
- preempt_disable();
if (!(r & RESUME_HOST)) {
/* To avoid clobbering exit_reason, only check for signals if
* we aren't already exiting to userspace for some other
@@ -814,20 +879,13 @@ program_interrupt:
* and if we really did time things so badly, then we just exit
* again due to a host external interrupt.
*/
- __hard_irq_disable();
- if (signal_pending(current)) {
- __hard_irq_enable();
-#ifdef EXIT_DEBUG
- printk(KERN_EMERG "KVM: Going back to host\n");
-#endif
- vcpu->stat.signal_exits++;
- run->exit_reason = KVM_EXIT_INTR;
- r = -EINTR;
+ local_irq_disable();
+ s = kvmppc_prepare_to_enter(vcpu);
+ if (s <= 0) {
+ local_irq_enable();
+ r = s;
} else {
- /* In case an interrupt came in that was triggered
- * from userspace (like DEC), we need to check what
- * to inject now! */
- kvmppc_core_prepare_to_enter(vcpu);
+ kvmppc_lazy_ee_enable();
}
}
@@ -899,34 +957,59 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return 0;
}
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
- int r = -EINVAL;
+ int r = 0;
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_HIOR:
- r = copy_to_user((u64 __user *)(long)reg->addr,
- &to_book3s(vcpu)->hior, sizeof(u64));
+ *val = get_reg_val(id, to_book3s(vcpu)->hior);
break;
+#ifdef CONFIG_VSX
+ case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
+ long int i = id - KVM_REG_PPC_VSR0;
+
+ if (!cpu_has_feature(CPU_FTR_VSX)) {
+ r = -ENXIO;
+ break;
+ }
+ val->vsxval[0] = vcpu->arch.fpr[i];
+ val->vsxval[1] = vcpu->arch.vsr[i];
+ break;
+ }
+#endif /* CONFIG_VSX */
default:
+ r = -EINVAL;
break;
}
return r;
}
-int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
{
- int r = -EINVAL;
+ int r = 0;
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_HIOR:
- r = copy_from_user(&to_book3s(vcpu)->hior,
- (u64 __user *)(long)reg->addr, sizeof(u64));
- if (!r)
- to_book3s(vcpu)->hior_explicit = true;
+ to_book3s(vcpu)->hior = set_reg_val(id, *val);
+ to_book3s(vcpu)->hior_explicit = true;
+ break;
+#ifdef CONFIG_VSX
+ case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
+ long int i = id - KVM_REG_PPC_VSR0;
+
+ if (!cpu_has_feature(CPU_FTR_VSX)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.fpr[i] = val->vsxval[0];
+ vcpu->arch.vsr[i] = val->vsxval[1];
break;
+ }
+#endif /* CONFIG_VSX */
default:
+ r = -EINVAL;
break;
}
@@ -1020,8 +1103,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif
ulong ext_msr;
- preempt_disable();
-
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -1029,21 +1110,16 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
goto out;
}
- kvmppc_core_prepare_to_enter(vcpu);
-
/*
* Interrupts could be timers for the guest which we have to inject
* again, so let's postpone them until we're in the guest and if we
* really did time things so badly, then we just exit again due to
* a host external interrupt.
*/
- __hard_irq_disable();
-
- /* No need to go into the guest when all we do is going out */
- if (signal_pending(current)) {
- __hard_irq_enable();
- kvm_run->exit_reason = KVM_EXIT_INTR;
- ret = -EINTR;
+ local_irq_disable();
+ ret = kvmppc_prepare_to_enter(vcpu);
+ if (ret <= 0) {
+ local_irq_enable();
goto out;
}
@@ -1070,7 +1146,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Save VSX state in stack */
used_vsr = current->thread.used_vsr;
if (used_vsr && (current->thread.regs->msr & MSR_VSX))
- __giveup_vsx(current);
+ __giveup_vsx(current);
#endif
/* Remember the MSR with disabled extensions */
@@ -1080,20 +1156,19 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
- kvm_guest_enter();
+ kvmppc_lazy_ee_enable();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
- kvm_guest_exit();
-
- current->thread.regs->msr = ext_msr;
+ /* No need for kvm_guest_exit. It's done in handle_exit.
+ We also get here with interrupts enabled. */
/* Make sure we save the guest FPU/Altivec/VSX state */
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
+ kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
+
+ current->thread.regs->msr = ext_msr;
- /* Restore FPU state from stack */
+ /* Restore FPU/VSX state from stack */
memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
current->thread.fpscr.val = fpscr;
current->thread.fpexc_mode = fpexc_mode;
@@ -1113,7 +1188,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif
out:
- preempt_enable();
+ vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
}
@@ -1181,14 +1256,31 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
}
#endif /* CONFIG_PPC64 */
+void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ return 0;
+}
+
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem)
{
return 0;
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old)
+{
+}
+
+void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9ecf6e35cd8..8f7633e3afb 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -170,20 +170,21 @@ kvmppc_handler_skip_ins:
* Call kvmppc_handler_trampoline_enter in real mode
*
* On entry, r4 contains the guest shadow MSR
+ * MSR.EE has to be 0 when calling this function
*/
_GLOBAL(kvmppc_entry_trampoline)
mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7)
- li r9, MSR_RI
- ori r9, r9, MSR_EE
- andc r9, r5, r9 /* Clear EE and RI in MSR value */
li r6, MSR_IR | MSR_DR
- ori r6, r6, MSR_EE
- andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */
- MTMSR_EERI(r9) /* Clear EE and RI in MSR */
- mtsrr0 r7 /* before we set srr0/1 */
+ andc r6, r5, r6 /* Clear DR and IR in MSR value */
+ /*
+ * Set EE in HOST_MSR so that it's enabled when we get into our
+ * C exit handler function
+ */
+ ori r5, r5, MSR_EE
+ mtsrr0 r7
mtsrr1 r6
RFI
@@ -233,8 +234,5 @@ define_load_up(fpu)
#ifdef CONFIG_ALTIVEC
define_load_up(altivec)
#endif
-#ifdef CONFIG_VSX
-define_load_up(vsx)
-#endif
#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d25a097c852..69f11401578 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -36,9 +36,11 @@
#include <asm/dbell.h>
#include <asm/hw_irq.h>
#include <asm/irq.h>
+#include <asm/time.h>
#include "timing.h"
#include "booke.h"
+#include "trace.h"
unsigned long kvmppc_booke_handlers;
@@ -62,6 +64,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "doorbell", VCPU_STAT(dbell_exits) },
{ "guest doorbell", VCPU_STAT(gdbell_exits) },
+ { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ NULL }
};
@@ -120,6 +123,16 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
}
#endif
+static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
+ /* We always treat the FP bit as enabled from the host
+ perspective, so only need to adjust the shadow MSR */
+ vcpu->arch.shadow_msr &= ~MSR_FP;
+ vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
+#endif
+}
+
/*
* Helper function for "full" MSR writes. No need to call this if only
* EE/CE/ME/DE/RI are changing.
@@ -136,11 +149,13 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
kvmppc_mmu_msr_notify(vcpu, old_msr);
kvmppc_vcpu_sync_spe(vcpu);
+ kvmppc_vcpu_sync_fpu(vcpu);
}
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
unsigned int priority)
{
+ trace_kvm_booke_queue_irqprio(vcpu, priority);
set_bit(priority, &vcpu->arch.pending_exceptions);
}
@@ -206,6 +221,16 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
}
+static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
+{
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
+}
+
+static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
+{
+ clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
+}
+
static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
{
#ifdef CONFIG_KVM_BOOKE_HV
@@ -287,6 +312,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
bool crit;
bool keep_irq = false;
enum int_class int_class;
+ ulong new_msr = vcpu->arch.shared->msr;
/* Truncate crit indicators in 32 bit mode */
if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -325,6 +351,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
msr_mask = MSR_CE | MSR_ME | MSR_DE;
int_class = INT_CLASS_NONCRIT;
break;
+ case BOOKE_IRQPRIO_WATCHDOG:
case BOOKE_IRQPRIO_CRITICAL:
case BOOKE_IRQPRIO_DBELL_CRIT:
allowed = vcpu->arch.shared->msr & MSR_CE;
@@ -381,7 +408,13 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
set_guest_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true)
set_guest_dear(vcpu, vcpu->arch.queued_dear);
- kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
+
+ new_msr &= msr_mask;
+#if defined(CONFIG_64BIT)
+ if (vcpu->arch.epcr & SPRN_EPCR_ICM)
+ new_msr |= MSR_CM;
+#endif
+ kvmppc_set_msr(vcpu, new_msr);
if (!keep_irq)
clear_bit(priority, &vcpu->arch.pending_exceptions);
@@ -404,12 +437,121 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
return allowed;
}
+/*
+ * Return the number of jiffies until the next timeout. If the timeout is
+ * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
+ * because the larger value can break the timer APIs.
+ */
+static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
+{
+ u64 tb, wdt_tb, wdt_ticks = 0;
+ u64 nr_jiffies = 0;
+ u32 period = TCR_GET_WP(vcpu->arch.tcr);
+
+ wdt_tb = 1ULL << (63 - period);
+ tb = get_tb();
+ /*
+ * The watchdog timeout will hapeen when TB bit corresponding
+ * to watchdog will toggle from 0 to 1.
+ */
+ if (tb & wdt_tb)
+ wdt_ticks = wdt_tb;
+
+ wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
+
+ /* Convert timebase ticks to jiffies */
+ nr_jiffies = wdt_ticks;
+
+ if (do_div(nr_jiffies, tb_ticks_per_jiffy))
+ nr_jiffies++;
+
+ return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
+}
+
+static void arm_next_watchdog(struct kvm_vcpu *vcpu)
+{
+ unsigned long nr_jiffies;
+ unsigned long flags;
+
+ /*
+ * If TSR_ENW and TSR_WIS are not set then no need to exit to
+ * userspace, so clear the KVM_REQ_WATCHDOG request.
+ */
+ if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
+ clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
+
+ spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
+ nr_jiffies = watchdog_next_timeout(vcpu);
+ /*
+ * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
+ * then do not run the watchdog timer as this can break timer APIs.
+ */
+ if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
+ mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
+ else
+ del_timer(&vcpu->arch.wdt_timer);
+ spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
+}
+
+void kvmppc_watchdog_func(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+ u32 tsr, new_tsr;
+ int final;
+
+ do {
+ new_tsr = tsr = vcpu->arch.tsr;
+ final = 0;
+
+ /* Time out event */
+ if (tsr & TSR_ENW) {
+ if (tsr & TSR_WIS)
+ final = 1;
+ else
+ new_tsr = tsr | TSR_WIS;
+ } else {
+ new_tsr = tsr | TSR_ENW;
+ }
+ } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
+
+ if (new_tsr & TSR_WIS) {
+ smp_wmb();
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_vcpu_kick(vcpu);
+ }
+
+ /*
+ * If this is final watchdog expiry and some action is required
+ * then exit to userspace.
+ */
+ if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
+ vcpu->arch.watchdog_enabled) {
+ smp_wmb();
+ kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
+ kvm_vcpu_kick(vcpu);
+ }
+
+ /*
+ * Stop running the watchdog timer after final expiration to
+ * prevent the host from being flooded with timers if the
+ * guest sets a short period.
+ * Timers will resume when TSR/TCR is updated next time.
+ */
+ if (!final)
+ arm_next_watchdog(vcpu);
+}
+
static void update_timer_ints(struct kvm_vcpu *vcpu)
{
if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
kvmppc_core_queue_dec(vcpu);
else
kvmppc_core_dequeue_dec(vcpu);
+
+ if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
+ kvmppc_core_queue_watchdog(vcpu);
+ else
+ kvmppc_core_dequeue_watchdog(vcpu);
}
static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
@@ -417,13 +559,6 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned int priority;
- if (vcpu->requests) {
- if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
- smp_mb();
- update_timer_ints(vcpu);
- }
- }
-
priority = __ffs(*pending);
while (priority < BOOKE_IRQPRIO_MAX) {
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -459,37 +594,20 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
return r;
}
-/*
- * Common checks before entering the guest world. Call with interrupts
- * disabled.
- *
- * returns !0 if a signal is pending and check_signal is true
- */
-static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
+int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{
- int r = 0;
+ int r = 1; /* Indicate we want to get back into the guest */
- WARN_ON_ONCE(!irqs_disabled());
- while (true) {
- if (need_resched()) {
- local_irq_enable();
- cond_resched();
- local_irq_disable();
- continue;
- }
-
- if (signal_pending(current)) {
- r = 1;
- break;
- }
-
- if (kvmppc_core_prepare_to_enter(vcpu)) {
- /* interrupts got enabled in between, so we
- are back at square 1 */
- continue;
- }
+ if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
+ update_timer_ints(vcpu);
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+ kvmppc_core_flush_tlb(vcpu);
+#endif
- break;
+ if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
+ vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
+ r = 0;
}
return r;
@@ -497,7 +615,7 @@ static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
- int ret;
+ int ret, s;
#ifdef CONFIG_PPC_FPU
unsigned int fpscr;
int fpexc_mode;
@@ -510,11 +628,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
local_irq_disable();
- if (kvmppc_prepare_to_enter(vcpu)) {
- kvm_run->exit_reason = KVM_EXIT_INTR;
- ret = -EINTR;
+ s = kvmppc_prepare_to_enter(vcpu);
+ if (s <= 0) {
+ local_irq_enable();
+ ret = s;
goto out;
}
+ kvmppc_lazy_ee_enable();
kvm_guest_enter();
@@ -542,6 +662,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ /* No need for kvm_guest_exit. It's done in handle_exit.
+ We also get here with interrupts enabled. */
+
#ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
@@ -557,10 +680,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
current->thread.fpexc_mode = fpexc_mode;
#endif
- kvm_guest_exit();
-
out:
- local_irq_enable();
+ vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
}
@@ -668,6 +789,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
int r = RESUME_HOST;
+ int s;
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -677,6 +799,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
local_irq_enable();
+ trace_kvm_exit(exit_nr, vcpu);
+ kvm_guest_exit();
+
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
@@ -971,10 +1096,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (!(r & RESUME_HOST)) {
local_irq_disable();
- if (kvmppc_prepare_to_enter(vcpu)) {
- run->exit_reason = KVM_EXIT_INTR;
- r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
- kvmppc_account_exit(vcpu, SIGNAL_EXITS);
+ s = kvmppc_prepare_to_enter(vcpu);
+ if (s <= 0) {
+ local_irq_enable();
+ r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
+ } else {
+ kvmppc_lazy_ee_enable();
}
}
@@ -1011,6 +1138,21 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return r;
}
+int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ /* setup watchdog timer once */
+ spin_lock_init(&vcpu->arch.wdt_lock);
+ setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
+ (unsigned long)vcpu);
+
+ return 0;
+}
+
+void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ del_timer_sync(&vcpu->arch.wdt_timer);
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
@@ -1106,7 +1248,13 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
}
if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
+ u32 old_tsr = vcpu->arch.tsr;
+
vcpu->arch.tsr = sregs->u.e.tsr;
+
+ if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
+ arm_next_watchdog(vcpu);
+
update_timer_ints(vcpu);
}
@@ -1221,12 +1369,70 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
- return -EINVAL;
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_PPC_IAC1:
+ case KVM_REG_PPC_IAC2:
+ case KVM_REG_PPC_IAC3:
+ case KVM_REG_PPC_IAC4: {
+ int iac = reg->id - KVM_REG_PPC_IAC1;
+ r = copy_to_user((u64 __user *)(long)reg->addr,
+ &vcpu->arch.dbg_reg.iac[iac], sizeof(u64));
+ break;
+ }
+ case KVM_REG_PPC_DAC1:
+ case KVM_REG_PPC_DAC2: {
+ int dac = reg->id - KVM_REG_PPC_DAC1;
+ r = copy_to_user((u64 __user *)(long)reg->addr,
+ &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
+ break;
+ }
+#if defined(CONFIG_64BIT)
+ case KVM_REG_PPC_EPCR:
+ r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
+ break;
+#endif
+ default:
+ break;
+ }
+ return r;
}
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
- return -EINVAL;
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_PPC_IAC1:
+ case KVM_REG_PPC_IAC2:
+ case KVM_REG_PPC_IAC3:
+ case KVM_REG_PPC_IAC4: {
+ int iac = reg->id - KVM_REG_PPC_IAC1;
+ r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac],
+ (u64 __user *)(long)reg->addr, sizeof(u64));
+ break;
+ }
+ case KVM_REG_PPC_DAC1:
+ case KVM_REG_PPC_DAC2: {
+ int dac = reg->id - KVM_REG_PPC_DAC1;
+ r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac],
+ (u64 __user *)(long)reg->addr, sizeof(u64));
+ break;
+ }
+#if defined(CONFIG_64BIT)
+ case KVM_REG_PPC_EPCR: {
+ u32 new_epcr;
+ r = get_user(new_epcr, (u32 __user *)(long)reg->addr);
+ if (r == 0)
+ kvmppc_set_epcr(vcpu, new_epcr);
+ break;
+ }
+#endif
+ default:
+ break;
+ }
+ return r;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
@@ -1253,20 +1459,50 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
return -ENOTSUPP;
}
+void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ return 0;
+}
+
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem)
{
return 0;
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old)
+{
+}
+
+void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+}
+
+void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
{
+#if defined(CONFIG_64BIT)
+ vcpu->arch.epcr = new_epcr;
+#ifdef CONFIG_KVM_BOOKE_HV
+ vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
+ if (vcpu->arch.epcr & SPRN_EPCR_ICM)
+ vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
+#endif
+#endif
}
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
{
vcpu->arch.tcr = new_tcr;
+ arm_next_watchdog(vcpu);
update_timer_ints(vcpu);
}
@@ -1281,6 +1517,14 @@ void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
{
clear_bits(tsr_bits, &vcpu->arch.tsr);
+
+ /*
+ * We may have stopped the watchdog due to
+ * being stuck on final expiration.
+ */
+ if (tsr_bits & (TSR_ENW | TSR_WIS))
+ arm_next_watchdog(vcpu);
+
update_timer_ints(vcpu);
}
@@ -1298,12 +1542,14 @@ void kvmppc_decrementer_func(unsigned long data)
void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ vcpu->cpu = smp_processor_id();
current->thread.kvm_vcpu = vcpu;
}
void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
{
current->thread.kvm_vcpu = NULL;
+ vcpu->cpu = -1;
}
int __init kvmppc_booke_init(void)
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index ba61974c1e2..e9b88e433f6 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -69,6 +69,7 @@ extern unsigned long kvmppc_booke_handlers;
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
+void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr);
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 12834bb608a..4685b8cf224 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -133,10 +133,10 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
vcpu->arch.csrr1 = spr_val;
break;
case SPRN_DBCR0:
- vcpu->arch.dbcr0 = spr_val;
+ vcpu->arch.dbg_reg.dbcr0 = spr_val;
break;
case SPRN_DBCR1:
- vcpu->arch.dbcr1 = spr_val;
+ vcpu->arch.dbg_reg.dbcr1 = spr_val;
break;
case SPRN_DBSR:
vcpu->arch.dbsr &= ~spr_val;
@@ -145,6 +145,14 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
kvmppc_clr_tsr_bits(vcpu, spr_val);
break;
case SPRN_TCR:
+ /*
+ * WRC is a 2-bit field that is supposed to preserve its
+ * value once written to non-zero.
+ */
+ if (vcpu->arch.tcr & TCR_WRC_MASK) {
+ spr_val &= ~TCR_WRC_MASK;
+ spr_val |= vcpu->arch.tcr & TCR_WRC_MASK;
+ }
kvmppc_set_tcr(vcpu, spr_val);
break;
@@ -229,7 +237,17 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_IVOR15:
vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
break;
-
+ case SPRN_MCSR:
+ vcpu->arch.mcsr &= ~spr_val;
+ break;
+#if defined(CONFIG_64BIT)
+ case SPRN_EPCR:
+ kvmppc_set_epcr(vcpu, spr_val);
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
+#endif
+ break;
+#endif
default:
emulated = EMULATE_FAIL;
}
@@ -258,10 +276,10 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
*spr_val = vcpu->arch.csrr1;
break;
case SPRN_DBCR0:
- *spr_val = vcpu->arch.dbcr0;
+ *spr_val = vcpu->arch.dbg_reg.dbcr0;
break;
case SPRN_DBCR1:
- *spr_val = vcpu->arch.dbcr1;
+ *spr_val = vcpu->arch.dbg_reg.dbcr1;
break;
case SPRN_DBSR:
*spr_val = vcpu->arch.dbsr;
@@ -321,6 +339,14 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_IVOR15:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break;
+ case SPRN_MCSR:
+ *spr_val = vcpu->arch.mcsr;
+ break;
+#if defined(CONFIG_64BIT)
+ case SPRN_EPCR:
+ *spr_val = vcpu->arch.epcr;
+ break;
+#endif
default:
emulated = EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 099fe8272b5..e8ed7d659c5 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -16,6 +16,7 @@
*
* Author: Varun Sethi <varun.sethi@freescale.com>
* Author: Scott Wood <scotwood@freescale.com>
+ * Author: Mihai Caraman <mihai.caraman@freescale.com>
*
* This file is derived from arch/powerpc/kvm/booke_interrupts.S
*/
@@ -30,31 +31,33 @@
#include <asm/bitsperlong.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_64BIT
+#include <asm/exception-64e.h>
+#else
#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
-
-#define GET_VCPU(vcpu, thread) \
- PPC_LL vcpu, THREAD_KVM_VCPU(thread)
+#endif
#define LONGBYTES (BITS_PER_LONG / 8)
#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
/* The host stack layout: */
-#define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */
-#define HOST_CALLEE_LR (1 * LONGBYTES)
-#define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */
+#define HOST_R1 0 /* Implied by stwu. */
+#define HOST_CALLEE_LR PPC_LR_STKOFF
+#define HOST_RUN (HOST_CALLEE_LR + LONGBYTES)
/*
* r2 is special: it holds 'current', and it made nonvolatile in the
* kernel with the -ffixed-r2 gcc option.
*/
-#define HOST_R2 (3 * LONGBYTES)
-#define HOST_CR (4 * LONGBYTES)
-#define HOST_NV_GPRS (5 * LONGBYTES)
+#define HOST_R2 (HOST_RUN + LONGBYTES)
+#define HOST_CR (HOST_R2 + LONGBYTES)
+#define HOST_NV_GPRS (HOST_CR + LONGBYTES)
#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES)
#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
-#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */
+/* LR in caller stack frame. */
+#define HOST_STACK_LR (HOST_STACK_SIZE + PPC_LR_STKOFF)
#define NEED_EMU 0x00000001 /* emulation -- save nv regs */
#define NEED_DEAR 0x00000002 /* save faulting DEAR */
@@ -201,12 +204,128 @@
b kvmppc_resume_host
.endm
+#ifdef CONFIG_64BIT
+/* Exception types */
+#define EX_GEN 1
+#define EX_GDBELL 2
+#define EX_DBG 3
+#define EX_MC 4
+#define EX_CRIT 5
+#define EX_TLB 6
+
+/*
+ * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
+ */
+.macro kvm_handler intno type scratch, paca_ex, ex_r10, ex_r11, srr0, srr1, flags
+ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
+ mr r11, r4
+ /*
+ * Get vcpu from Paca: paca->__current.thread->kvm_vcpu
+ */
+ PPC_LL r4, PACACURRENT(r13)
+ PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
+ stw r10, VCPU_CR(r4)
+ PPC_STL r11, VCPU_GPR(R4)(r4)
+ PPC_STL r5, VCPU_GPR(R5)(r4)
+ .if \type == EX_CRIT
+ PPC_LL r5, (\paca_ex + EX_R13)(r13)
+ .else
+ mfspr r5, \scratch
+ .endif
+ PPC_STL r6, VCPU_GPR(R6)(r4)
+ PPC_STL r8, VCPU_GPR(R8)(r4)
+ PPC_STL r9, VCPU_GPR(R9)(r4)
+ PPC_STL r5, VCPU_GPR(R13)(r4)
+ PPC_LL r6, (\paca_ex + \ex_r10)(r13)
+ PPC_LL r8, (\paca_ex + \ex_r11)(r13)
+ PPC_STL r3, VCPU_GPR(R3)(r4)
+ PPC_STL r7, VCPU_GPR(R7)(r4)
+ PPC_STL r12, VCPU_GPR(R12)(r4)
+ PPC_STL r6, VCPU_GPR(R10)(r4)
+ PPC_STL r8, VCPU_GPR(R11)(r4)
+ mfctr r5
+ PPC_STL r5, VCPU_CTR(r4)
+ mfspr r5, \srr0
+ mfspr r6, \srr1
+ kvm_handler_common \intno, \srr0, \flags
+.endm
+
+#define EX_PARAMS(type) \
+ EX_##type, \
+ SPRN_SPRG_##type##_SCRATCH, \
+ PACA_EX##type, \
+ EX_R10, \
+ EX_R11
+
+#define EX_PARAMS_TLB \
+ EX_TLB, \
+ SPRN_SPRG_GEN_SCRATCH, \
+ PACA_EXTLB, \
+ EX_TLB_R10, \
+ EX_TLB_R11
+
+kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAMS(CRIT), \
+ SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_PARAMS(MC), \
+ SPRN_MCSRR0, SPRN_MCSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1,NEED_ESR
+kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAMS(CRIT),\
+ SPRN_CSRR0, SPRN_CSRR1, 0
+/*
+ * Only bolted TLB miss exception handlers are supported for now
+ */
+kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, EX_PARAMS(CRIT), \
+ SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, NEED_EMU
+kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PARAMS(GDBELL), \
+ SPRN_GSRR0, SPRN_GSRR1, 0
+kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, EX_PARAMS(CRIT), \
+ SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
+ SPRN_DSRR0, SPRN_DSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
+ SPRN_CSRR0, SPRN_CSRR1, 0
+#else
/*
* For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
*/
.macro kvm_handler intno srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1)
- GET_VCPU(r11, r10)
+ PPC_LL r11, THREAD_KVM_VCPU(r10)
PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, SPRN_SPRG_RSCRATCH0
PPC_STL r4, VCPU_GPR(R4)(r11)
@@ -233,7 +352,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
.macro kvm_lvl_handler intno scratch srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1)
mfspr r10, SPRN_SPRG_THREAD
- GET_VCPU(r11, r10)
+ PPC_LL r11, THREAD_KVM_VCPU(r10)
PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, \scratch
PPC_STL r4, VCPU_GPR(R4)(r11)
@@ -295,7 +414,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
-
+#endif
/* Registers:
* SPRG_SCRATCH0: guest r10
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index aa8b81428bf..c70d37ed770 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -27,8 +27,7 @@
#define E500_TLB_NUM 2
#define E500_TLB_VALID 1
-#define E500_TLB_DIRTY 2
-#define E500_TLB_BITMAP 4
+#define E500_TLB_BITMAP 2
struct tlbe_ref {
pfn_t pfn;
@@ -130,9 +129,9 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
ulong value);
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
-int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
-int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb);
-int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
+int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
+int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
@@ -155,7 +154,7 @@ get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
{
- return tlbe->mas2 & 0xfffff000;
+ return tlbe->mas2 & MAS2_EPN;
}
static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index e04b0ef55ce..e78f353a836 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -89,6 +89,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int ra = get_ra(inst);
int rb = get_rb(inst);
int rt = get_rt(inst);
+ gva_t ea;
switch (get_op(inst)) {
case 31:
@@ -113,15 +114,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_TLBSX:
- emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
+ ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
+ emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
break;
- case XOP_TLBILX:
- emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
+ case XOP_TLBILX: {
+ int type = rt & 0x3;
+ ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
+ emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
break;
+ }
case XOP_TLBIVAX:
- emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
+ ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
+ emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break;
default:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index ff38b664195..cf3f1801237 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -304,17 +304,13 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
ref->flags = E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
- ref->flags |= E500_TLB_DIRTY;
+ kvm_set_pfn_dirty(pfn);
}
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
- if (ref->flags & E500_TLB_DIRTY)
- kvm_release_pfn_dirty(ref->pfn);
- else
- kvm_release_pfn_clean(ref->pfn);
-
+ trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
}
@@ -357,6 +353,13 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
clear_tlb_privs(vcpu_e500);
}
+void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ clear_tlb_refs(vcpu_e500);
+ clear_tlb1_bitmap(vcpu_e500);
+}
+
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
unsigned int eaddr, int as)
{
@@ -412,7 +415,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
struct tlbe_ref *ref)
{
struct kvm_memory_slot *slot;
- unsigned long pfn, hva;
+ unsigned long pfn = 0; /* silence GCC warning */
+ unsigned long hva;
int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K;
@@ -521,7 +525,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (likely(!pfnmap)) {
unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
pfn = gfn_to_pfn_memslot(slot, gfn);
- if (is_error_pfn(pfn)) {
+ if (is_error_noslot_pfn(pfn)) {
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
(long)gfn);
return;
@@ -541,6 +545,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* Clear i-cache for new pages */
kvmppc_mmu_flush_icache(pfn);
+
+ /* Drop refcount on page, so that mmu notifiers can clear it */
+ kvm_release_pfn_clean(pfn);
}
/* XXX only map the one-one case, for now use TLB0 */
@@ -682,14 +689,11 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
return EMULATE_DONE;
}
-int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
+int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
unsigned int ia;
int esel, tlbsel;
- gva_t ea;
-
- ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
ia = (ea >> 2) & 0x1;
@@ -716,7 +720,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
}
static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
- int pid, int rt)
+ int pid, int type)
{
struct kvm_book3e_206_tlb_entry *tlbe;
int tid, esel;
@@ -725,7 +729,7 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
tlbe = get_entry(vcpu_e500, tlbsel, esel);
tid = get_tlb_tid(tlbe);
- if (rt == 0 || tid == pid) {
+ if (type == 0 || tid == pid) {
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
}
@@ -733,14 +737,9 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
}
static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
- int ra, int rb)
+ gva_t ea)
{
int tlbsel, esel;
- gva_t ea;
-
- ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
@@ -752,16 +751,16 @@ static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
}
}
-int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb)
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int pid = get_cur_spid(vcpu);
- if (rt == 0 || rt == 1) {
- tlbilx_all(vcpu_e500, 0, pid, rt);
- tlbilx_all(vcpu_e500, 1, pid, rt);
- } else if (rt == 3) {
- tlbilx_one(vcpu_e500, pid, ra, rb);
+ if (type == 0 || type == 1) {
+ tlbilx_all(vcpu_e500, 0, pid, type);
+ tlbilx_all(vcpu_e500, 1, pid, type);
+ } else if (type == 3) {
+ tlbilx_one(vcpu_e500, pid, ea);
}
return EMULATE_DONE;
@@ -786,16 +785,13 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
-int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
+int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int as = !!get_cur_sas(vcpu);
unsigned int pid = get_cur_spid(vcpu);
int esel, tlbsel;
struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
- gva_t ea;
-
- ea = kvmppc_get_gpr(vcpu, rb);
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
@@ -875,6 +871,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
gtlbe->mas1 = vcpu->arch.shared->mas1;
gtlbe->mas2 = vcpu->arch.shared->mas2;
+ if (!(vcpu->arch.shared->msr & MSR_CM))
+ gtlbe->mas2 &= 0xffffffffUL;
gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
@@ -1039,8 +1037,12 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
sesel = 0; /* unused */
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
- &priv->ref, eaddr, &stlbe);
+ /* Only triggers after clear_tlb_refs */
+ if (unlikely(!(priv->ref.flags & E500_TLB_VALID)))
+ kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+ else
+ kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
+ &priv->ref, eaddr, &stlbe);
break;
case 1: {
@@ -1060,6 +1062,49 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
}
+/************* MMU Notifiers *************/
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ trace_kvm_unmap_hva(hva);
+
+ /*
+ * Flush all shadow tlb entries everywhere. This is slow, but
+ * we are 100% sure that we catch the to be unmapped page
+ */
+ kvm_flush_remote_tlbs(kvm);
+
+ return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ /* kvm_unmap_hva flushes everything anyways */
+ kvm_unmap_hva(kvm, start);
+
+ return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ /* XXX could be more clever ;) */
+ return 0;
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+ /* The page will get remapped properly on its next fault */
+ kvm_unmap_hva(kvm, hva);
+}
+
+/*****************************************/
+
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int i;
@@ -1081,6 +1126,8 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
}
vcpu_e500->num_shared_tlb_pages = 0;
+
+ kfree(vcpu_e500->shared_tlb_pages);
vcpu_e500->shared_tlb_pages = NULL;
} else {
kfree(vcpu_e500->gtlb_arch);
@@ -1178,21 +1225,27 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
}
virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
- if (!virt)
+ if (!virt) {
+ ret = -ENOMEM;
goto err_put_page;
+ }
privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
GFP_KERNEL);
privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
GFP_KERNEL);
- if (!privs[0] || !privs[1])
- goto err_put_page;
+ if (!privs[0] || !privs[1]) {
+ ret = -ENOMEM;
+ goto err_privs;
+ }
g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
GFP_KERNEL);
- if (!g2h_bitmap)
- goto err_put_page;
+ if (!g2h_bitmap) {
+ ret = -ENOMEM;
+ goto err_privs;
+ }
free_gtlb(vcpu_e500);
@@ -1232,10 +1285,11 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
-err_put_page:
+err_privs:
kfree(privs[0]);
kfree(privs[1]);
+err_put_page:
for (i = 0; i < num_pages; i++)
put_page(pages[i]);
@@ -1332,7 +1386,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
if (!vcpu_e500->gtlb_priv[1])
goto err;
- vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
+ vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
vcpu_e500->gtlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->g2h_tlb1_map)
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index ee04abaefe2..b0855e5d890 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -131,6 +131,125 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
return vcpu->arch.dec - jd;
}
+static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+{
+ enum emulation_result emulated = EMULATE_DONE;
+ ulong spr_val = kvmppc_get_gpr(vcpu, rs);
+
+ switch (sprn) {
+ case SPRN_SRR0:
+ vcpu->arch.shared->srr0 = spr_val;
+ break;
+ case SPRN_SRR1:
+ vcpu->arch.shared->srr1 = spr_val;
+ break;
+
+ /* XXX We need to context-switch the timebase for
+ * watchdog and FIT. */
+ case SPRN_TBWL: break;
+ case SPRN_TBWU: break;
+
+ case SPRN_MSSSR0: break;
+
+ case SPRN_DEC:
+ vcpu->arch.dec = spr_val;
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ case SPRN_SPRG0:
+ vcpu->arch.shared->sprg0 = spr_val;
+ break;
+ case SPRN_SPRG1:
+ vcpu->arch.shared->sprg1 = spr_val;
+ break;
+ case SPRN_SPRG2:
+ vcpu->arch.shared->sprg2 = spr_val;
+ break;
+ case SPRN_SPRG3:
+ vcpu->arch.shared->sprg3 = spr_val;
+ break;
+
+ default:
+ emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
+ spr_val);
+ if (emulated == EMULATE_FAIL)
+ printk(KERN_INFO "mtspr: unknown spr "
+ "0x%x\n", sprn);
+ break;
+ }
+
+ kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
+
+ return emulated;
+}
+
+static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+{
+ enum emulation_result emulated = EMULATE_DONE;
+ ulong spr_val = 0;
+
+ switch (sprn) {
+ case SPRN_SRR0:
+ spr_val = vcpu->arch.shared->srr0;
+ break;
+ case SPRN_SRR1:
+ spr_val = vcpu->arch.shared->srr1;
+ break;
+ case SPRN_PVR:
+ spr_val = vcpu->arch.pvr;
+ break;
+ case SPRN_PIR:
+ spr_val = vcpu->vcpu_id;
+ break;
+ case SPRN_MSSSR0:
+ spr_val = 0;
+ break;
+
+ /* Note: mftb and TBRL/TBWL are user-accessible, so
+ * the guest can always access the real TB anyways.
+ * In fact, we probably will never see these traps. */
+ case SPRN_TBWL:
+ spr_val = get_tb() >> 32;
+ break;
+ case SPRN_TBWU:
+ spr_val = get_tb();
+ break;
+
+ case SPRN_SPRG0:
+ spr_val = vcpu->arch.shared->sprg0;
+ break;
+ case SPRN_SPRG1:
+ spr_val = vcpu->arch.shared->sprg1;
+ break;
+ case SPRN_SPRG2:
+ spr_val = vcpu->arch.shared->sprg2;
+ break;
+ case SPRN_SPRG3:
+ spr_val = vcpu->arch.shared->sprg3;
+ break;
+ /* Note: SPRG4-7 are user-readable, so we don't get
+ * a trap. */
+
+ case SPRN_DEC:
+ spr_val = kvmppc_get_dec(vcpu, get_tb());
+ break;
+ default:
+ emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
+ &spr_val);
+ if (unlikely(emulated == EMULATE_FAIL)) {
+ printk(KERN_INFO "mfspr: unknown spr "
+ "0x%x\n", sprn);
+ }
+ break;
+ }
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, rt, spr_val);
+ kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
+
+ return emulated;
+}
+
/* XXX to do:
* lhax
* lhaux
@@ -156,7 +275,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
int sprn = get_sprn(inst);
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
- ulong spr_val = 0;
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@@ -236,62 +354,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_MFSPR:
- switch (sprn) {
- case SPRN_SRR0:
- spr_val = vcpu->arch.shared->srr0;
- break;
- case SPRN_SRR1:
- spr_val = vcpu->arch.shared->srr1;
- break;
- case SPRN_PVR:
- spr_val = vcpu->arch.pvr;
- break;
- case SPRN_PIR:
- spr_val = vcpu->vcpu_id;
- break;
- case SPRN_MSSSR0:
- spr_val = 0;
- break;
-
- /* Note: mftb and TBRL/TBWL are user-accessible, so
- * the guest can always access the real TB anyways.
- * In fact, we probably will never see these traps. */
- case SPRN_TBWL:
- spr_val = get_tb() >> 32;
- break;
- case SPRN_TBWU:
- spr_val = get_tb();
- break;
-
- case SPRN_SPRG0:
- spr_val = vcpu->arch.shared->sprg0;
- break;
- case SPRN_SPRG1:
- spr_val = vcpu->arch.shared->sprg1;
- break;
- case SPRN_SPRG2:
- spr_val = vcpu->arch.shared->sprg2;
- break;
- case SPRN_SPRG3:
- spr_val = vcpu->arch.shared->sprg3;
- break;
- /* Note: SPRG4-7 are user-readable, so we don't get
- * a trap. */
-
- case SPRN_DEC:
- spr_val = kvmppc_get_dec(vcpu, get_tb());
- break;
- default:
- emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
- &spr_val);
- if (unlikely(emulated == EMULATE_FAIL)) {
- printk(KERN_INFO "mfspr: unknown spr "
- "0x%x\n", sprn);
- }
- break;
- }
- kvmppc_set_gpr(vcpu, rt, spr_val);
- kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
+ emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
break;
case OP_31_XOP_STHX:
@@ -308,49 +371,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_MTSPR:
- spr_val = kvmppc_get_gpr(vcpu, rs);
- switch (sprn) {
- case SPRN_SRR0:
- vcpu->arch.shared->srr0 = spr_val;
- break;
- case SPRN_SRR1:
- vcpu->arch.shared->srr1 = spr_val;
- break;
-
- /* XXX We need to context-switch the timebase for
- * watchdog and FIT. */
- case SPRN_TBWL: break;
- case SPRN_TBWU: break;
-
- case SPRN_MSSSR0: break;
-
- case SPRN_DEC:
- vcpu->arch.dec = spr_val;
- kvmppc_emulate_dec(vcpu);
- break;
-
- case SPRN_SPRG0:
- vcpu->arch.shared->sprg0 = spr_val;
- break;
- case SPRN_SPRG1:
- vcpu->arch.shared->sprg1 = spr_val;
- break;
- case SPRN_SPRG2:
- vcpu->arch.shared->sprg2 = spr_val;
- break;
- case SPRN_SPRG3:
- vcpu->arch.shared->sprg3 = spr_val;
- break;
-
- default:
- emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
- spr_val);
- if (emulated == EMULATE_FAIL)
- printk(KERN_INFO "mtspr: unknown spr "
- "0x%x\n", sprn);
- break;
- }
- kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
+ emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
break;
case OP_31_XOP_DCBI:
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 4d213b8b0fb..70739a08956 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -30,6 +30,7 @@
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
#include <asm/cputhreads.h>
+#include <asm/irqflags.h>
#include "timing.h"
#include "../mm/mmu_decl.h"
@@ -38,8 +39,7 @@
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
- return !(v->arch.shared->msr & MSR_WE) ||
- !!(v->arch.pending_exceptions) ||
+ return !!(v->arch.pending_exceptions) ||
v->requests;
}
@@ -48,6 +48,85 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1;
}
+#ifndef CONFIG_KVM_BOOK3S_64_HV
+/*
+ * Common checks before entering the guest world. Call with interrupts
+ * disabled.
+ *
+ * returns:
+ *
+ * == 1 if we're ready to go into guest state
+ * <= 0 if we need to go back to the host with return value
+ */
+int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
+{
+ int r = 1;
+
+ WARN_ON_ONCE(!irqs_disabled());
+ while (true) {
+ if (need_resched()) {
+ local_irq_enable();
+ cond_resched();
+ local_irq_disable();
+ continue;
+ }
+
+ if (signal_pending(current)) {
+ kvmppc_account_exit(vcpu, SIGNAL_EXITS);
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ break;
+ }
+
+ vcpu->mode = IN_GUEST_MODE;
+
+ /*
+ * Reading vcpu->requests must happen after setting vcpu->mode,
+ * so we don't miss a request because the requester sees
+ * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
+ * before next entering the guest (and thus doesn't IPI).
+ */
+ smp_mb();
+
+ if (vcpu->requests) {
+ /* Make sure we process requests preemptable */
+ local_irq_enable();
+ trace_kvm_check_requests(vcpu);
+ r = kvmppc_core_check_requests(vcpu);
+ local_irq_disable();
+ if (r > 0)
+ continue;
+ break;
+ }
+
+ if (kvmppc_core_prepare_to_enter(vcpu)) {
+ /* interrupts got enabled in between, so we
+ are back at square 1 */
+ continue;
+ }
+
+#ifdef CONFIG_PPC64
+ /* lazy EE magic */
+ hard_irq_disable();
+ if (lazy_irq_pending()) {
+ /* Got an interrupt in between, try again */
+ local_irq_enable();
+ local_irq_disable();
+ kvm_guest_exit();
+ continue;
+ }
+
+ trace_hardirqs_on();
+#endif
+
+ kvm_guest_enter();
+ break;
+ }
+
+ return r;
+}
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
int nr = kvmppc_get_gpr(vcpu, 11);
@@ -67,18 +146,18 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
}
switch (nr) {
- case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
+ case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
{
vcpu->arch.magic_page_pa = param1;
vcpu->arch.magic_page_ea = param2;
r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
- r = HC_EV_SUCCESS;
+ r = EV_SUCCESS;
break;
}
- case HC_VENDOR_KVM | KVM_HC_FEATURES:
- r = HC_EV_SUCCESS;
+ case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
+ r = EV_SUCCESS;
#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
/* XXX Missing magic page on 44x */
r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
@@ -86,8 +165,13 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
/* Second return value is in r4 */
break;
+ case EV_HCALL_TOKEN(EV_IDLE):
+ r = EV_SUCCESS;
+ kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ break;
default:
- r = HC_EV_UNIMPLEMENTED;
+ r = EV_UNIMPLEMENTED;
break;
}
@@ -220,6 +304,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
#ifdef CONFIG_BOOKE
case KVM_CAP_PPC_BOOKE_SREGS:
+ case KVM_CAP_PPC_BOOKE_WATCHDOG:
#else
case KVM_CAP_PPC_SEGSTATE:
case KVM_CAP_PPC_HIOR:
@@ -229,6 +314,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_ONE_REG:
+ case KVM_CAP_IOEVENTFD:
r = 1;
break;
#ifndef CONFIG_KVM_BOOK3S_64_HV
@@ -260,10 +346,22 @@ int kvm_dev_ioctl_check_extension(long ext)
if (cpu_has_feature(CPU_FTR_ARCH_201))
r = 2;
break;
+#endif
case KVM_CAP_SYNC_MMU:
+#ifdef CONFIG_KVM_BOOK3S_64_HV
r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
+#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ r = 1;
+#else
+ r = 0;
+ break;
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CAP_PPC_HTAB_FD:
+ r = 1;
break;
#endif
+ break;
case KVM_CAP_NR_VCPUS:
/*
* Recommending a number of CPUs is somewhat arbitrary; we
@@ -302,19 +400,12 @@ long kvm_arch_dev_ioctl(struct file *filp,
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
- if (!dont || free->arch.rmap != dont->arch.rmap) {
- vfree(free->arch.rmap);
- free->arch.rmap = NULL;
- }
+ kvmppc_core_free_memslot(free, dont);
}
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
- slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
- if (!slot->arch.rmap)
- return -ENOMEM;
-
- return 0;
+ return kvmppc_core_create_memslot(slot, npages);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -323,7 +414,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- return kvmppc_core_prepare_memory_region(kvm, mem);
+ return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -331,7 +422,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- kvmppc_core_commit_memory_region(kvm, mem);
+ kvmppc_core_commit_memory_region(kvm, mem, old);
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -341,6 +432,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
+ kvmppc_core_flush_memslot(kvm, slot);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
@@ -354,6 +446,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
return vcpu;
}
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
/* Make sure we're not using the vcpu anymore */
@@ -390,6 +487,8 @@ enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
+ int ret;
+
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
@@ -398,13 +497,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init(&vcpu->arch.exit_timing_lock);
#endif
-
- return 0;
+ ret = kvmppc_subarch_vcpu_init(vcpu);
+ return ret;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
kvmppc_mmu_destroy(vcpu);
+ kvmppc_subarch_vcpu_uninit(vcpu);
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -420,7 +520,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
#endif
kvmppc_core_vcpu_load(vcpu, cpu);
- vcpu->cpu = smp_processor_id();
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -429,7 +528,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
#ifdef CONFIG_BOOKE
vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
#endif
- vcpu->cpu = -1;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -527,6 +625,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_is_write = 0;
vcpu->arch.mmio_sign_extend = 0;
+ if (!kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ bytes, &run->mmio.data)) {
+ kvmppc_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
}
@@ -536,8 +641,8 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int r;
- r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
vcpu->arch.mmio_sign_extend = 1;
+ r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
return r;
}
@@ -575,6 +680,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
+ if (!kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ bytes, &run->mmio.data)) {
+ kvmppc_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
}
@@ -649,6 +761,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = 0;
vcpu->arch.papr_enabled = true;
break;
+#ifdef CONFIG_BOOKE
+ case KVM_CAP_PPC_BOOKE_WATCHDOG:
+ r = 0;
+ vcpu->arch.watchdog_enabled = true;
+ break;
+#endif
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB: {
struct kvm_config_tlb cfg;
@@ -751,9 +869,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
{
+ u32 inst_nop = 0x60000000;
+#ifdef CONFIG_KVM_BOOKE_HV
+ u32 inst_sc1 = 0x44000022;
+ pvinfo->hcall[0] = inst_sc1;
+ pvinfo->hcall[1] = inst_nop;
+ pvinfo->hcall[2] = inst_nop;
+ pvinfo->hcall[3] = inst_nop;
+#else
u32 inst_lis = 0x3c000000;
u32 inst_ori = 0x60000000;
- u32 inst_nop = 0x60000000;
u32 inst_sc = 0x44000002;
u32 inst_imm_mask = 0xffff;
@@ -770,6 +895,9 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
pvinfo->hcall[2] = inst_sc;
pvinfo->hcall[3] = inst_nop;
+#endif
+
+ pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
return 0;
}
@@ -832,6 +960,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
break;
}
+
+ case KVM_PPC_GET_HTAB_FD: {
+ struct kvm *kvm = filp->private_data;
+ struct kvm_get_htab_fd ghf;
+
+ r = -EFAULT;
+ if (copy_from_user(&ghf, argp, sizeof(ghf)))
+ break;
+ r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
+ break;
+ }
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index ddb6a2149d4..e326489a542 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -31,6 +31,126 @@ TRACE_EVENT(kvm_ppc_instr,
__entry->inst, __entry->pc, __entry->emulate)
);
+#ifdef CONFIG_PPC_BOOK3S
+#define kvm_trace_symbol_exit \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x501, "EXTERNAL_LEVEL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+#else
+#define kvm_trace_symbol_exit \
+ {0, "CRITICAL"}, \
+ {1, "MACHINE_CHECK"}, \
+ {2, "DATA_STORAGE"}, \
+ {3, "INST_STORAGE"}, \
+ {4, "EXTERNAL"}, \
+ {5, "ALIGNMENT"}, \
+ {6, "PROGRAM"}, \
+ {7, "FP_UNAVAIL"}, \
+ {8, "SYSCALL"}, \
+ {9, "AP_UNAVAIL"}, \
+ {10, "DECREMENTER"}, \
+ {11, "FIT"}, \
+ {12, "WATCHDOG"}, \
+ {13, "DTLB_MISS"}, \
+ {14, "ITLB_MISS"}, \
+ {15, "DEBUG"}, \
+ {32, "SPE_UNAVAIL"}, \
+ {33, "SPE_FP_DATA"}, \
+ {34, "SPE_FP_ROUND"}, \
+ {35, "PERFORMANCE_MONITOR"}, \
+ {36, "DOORBELL"}, \
+ {37, "DOORBELL_CRITICAL"}, \
+ {38, "GUEST_DBELL"}, \
+ {39, "GUEST_DBELL_CRIT"}, \
+ {40, "HV_SYSCALL"}, \
+ {41, "HV_PRIV"}
+#endif
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
+ TP_ARGS(exit_nr, vcpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, exit_nr )
+ __field( unsigned long, pc )
+ __field( unsigned long, msr )
+ __field( unsigned long, dar )
+#ifdef CONFIG_KVM_BOOK3S_PR
+ __field( unsigned long, srr1 )
+#endif
+ __field( unsigned long, last_inst )
+ ),
+
+ TP_fast_assign(
+#ifdef CONFIG_KVM_BOOK3S_PR
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
+#endif
+ __entry->exit_nr = exit_nr;
+ __entry->pc = kvmppc_get_pc(vcpu);
+ __entry->dar = kvmppc_get_fault_dar(vcpu);
+ __entry->msr = vcpu->arch.shared->msr;
+#ifdef CONFIG_KVM_BOOK3S_PR
+ svcpu = svcpu_get(vcpu);
+ __entry->srr1 = svcpu->shadow_srr1;
+ svcpu_put(svcpu);
+#endif
+ __entry->last_inst = vcpu->arch.last_inst;
+ ),
+
+ TP_printk("exit=%s"
+ " | pc=0x%lx"
+ " | msr=0x%lx"
+ " | dar=0x%lx"
+#ifdef CONFIG_KVM_BOOK3S_PR
+ " | srr1=0x%lx"
+#endif
+ " | last_inst=0x%lx"
+ ,
+ __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
+ __entry->pc,
+ __entry->msr,
+ __entry->dar,
+#ifdef CONFIG_KVM_BOOK3S_PR
+ __entry->srr1,
+#endif
+ __entry->last_inst
+ )
+);
+
+TRACE_EVENT(kvm_unmap_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("unmap hva 0x%lx\n", __entry->hva)
+);
+
TRACE_EVENT(kvm_stlb_inval,
TP_PROTO(unsigned int stlb_index),
TP_ARGS(stlb_index),
@@ -98,41 +218,31 @@ TRACE_EVENT(kvm_gtlb_write,
__entry->word1, __entry->word2)
);
-
-/*************************************************************************
- * Book3S trace points *
- *************************************************************************/
-
-#ifdef CONFIG_KVM_BOOK3S_PR
-
-TRACE_EVENT(kvm_book3s_exit,
- TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
- TP_ARGS(exit_nr, vcpu),
+TRACE_EVENT(kvm_check_requests,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
TP_STRUCT__entry(
- __field( unsigned int, exit_nr )
- __field( unsigned long, pc )
- __field( unsigned long, msr )
- __field( unsigned long, dar )
- __field( unsigned long, srr1 )
+ __field( __u32, cpu_nr )
+ __field( __u32, requests )
),
TP_fast_assign(
- struct kvmppc_book3s_shadow_vcpu *svcpu;
- __entry->exit_nr = exit_nr;
- __entry->pc = kvmppc_get_pc(vcpu);
- __entry->dar = kvmppc_get_fault_dar(vcpu);
- __entry->msr = vcpu->arch.shared->msr;
- svcpu = svcpu_get(vcpu);
- __entry->srr1 = svcpu->shadow_srr1;
- svcpu_put(svcpu);
+ __entry->cpu_nr = vcpu->vcpu_id;
+ __entry->requests = vcpu->requests;
),
- TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx",
- __entry->exit_nr, __entry->pc, __entry->msr, __entry->dar,
- __entry->srr1)
+ TP_printk("vcpu=%x requests=%x",
+ __entry->cpu_nr, __entry->requests)
);
+
+/*************************************************************************
+ * Book3S trace points *
+ *************************************************************************/
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+
TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu),
TP_ARGS(r, vcpu),
@@ -395,6 +505,44 @@ TRACE_EVENT(kvm_booke206_gtlb_write,
__entry->mas2, __entry->mas7_3)
);
+TRACE_EVENT(kvm_booke206_ref_release,
+ TP_PROTO(__u64 pfn, __u32 flags),
+ TP_ARGS(pfn, flags),
+
+ TP_STRUCT__entry(
+ __field( __u64, pfn )
+ __field( __u32, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("pfn=%llx flags=%x",
+ __entry->pfn, __entry->flags)
+);
+
+TRACE_EVENT(kvm_booke_queue_irqprio,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
+ TP_ARGS(vcpu, priority),
+
+ TP_STRUCT__entry(
+ __field( __u32, cpu_nr )
+ __field( __u32, priority )
+ __field( unsigned long, pending )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_nr = vcpu->vcpu_id;
+ __entry->priority = priority;
+ __entry->pending = vcpu->arch.pending_exceptions;
+ ),
+
+ TP_printk("vcpu=%x prio=%x pending=%lx",
+ __entry->cpu_nr, __entry->priority, __entry->pending)
+);
+
#endif
#endif /* _TRACE_KVM_H */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 0a6b28336eb..3a8489a354e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -113,19 +113,6 @@ static int store_updates_sp(struct pt_regs *regs)
#define MM_FAULT_CONTINUE -1
#define MM_FAULT_ERR(sig) (sig)
-static int out_of_memory(struct pt_regs *regs)
-{
- /*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
- up_read(&current->mm->mmap_sem);
- if (!user_mode(regs))
- return MM_FAULT_ERR(SIGKILL);
- pagefault_out_of_memory();
- return MM_FAULT_RETURN;
-}
-
static int do_sigbus(struct pt_regs *regs, unsigned long address)
{
siginfo_t info;
@@ -169,8 +156,18 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
return MM_FAULT_CONTINUE;
/* Out of memory */
- if (fault & VM_FAULT_OOM)
- return out_of_memory(regs);
+ if (fault & VM_FAULT_OOM) {
+ up_read(&current->mm->mmap_sem);
+
+ /*
+ * We ran out of memory, or some other thing happened to us that
+ * made us unable to handle the page fault gracefully.
+ */
+ if (!user_mode(regs))
+ return MM_FAULT_ERR(SIGKILL);
+ pagefault_out_of_memory();
+ return MM_FAULT_RETURN;
+ }
/* Bus error. x86 handles HWPOISON here, we'll add this if/when
* we support the feature in HW
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 59213cfaeca..bba87ca2b4d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -399,18 +399,6 @@ static unsigned long read_n_cells(int n, const unsigned int **buf)
return result;
}
-struct of_drconf_cell {
- u64 base_addr;
- u32 drc_index;
- u32 reserved;
- u32 aa_index;
- u32 flags;
-};
-
-#define DRCONF_MEM_ASSIGNED 0x00000008
-#define DRCONF_MEM_AI_INVALID 0x00000040
-#define DRCONF_MEM_RESERVED 0x00000080
-
/*
* Read the next memblock list entry from the ibm,dynamic-memory property
* and return the information in the provided of_drconf_cell structure.
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 5829d2a950d..cf9dada734b 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -722,7 +722,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
}
/*
- * is_hugepage_only_range() is used by generic code to verify wether
+ * is_hugepage_only_range() is used by generic code to verify whether
* a normal mmap mapping (non hugetlbfs) is valid on a given area.
*
* until the generic code provides a more generic hook and/or starts
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index fab919fd138..626ad081639 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -191,12 +191,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
#ifdef CONFIG_PPC_47x
/*
- * 47x variant of icbt
- */
-# define ICBT(CT,RA,RB) \
- .long 0x7c00002c | ((CT) << 21) | ((RA) << 16) | ((RB) << 11)
-
-/*
* _tlbivax_bcast is only on 47x. We don't bother doing a runtime
* check though, it will blow up soon enough if we mistakenly try
* to use it on a 440.
@@ -208,8 +202,7 @@ _GLOBAL(_tlbivax_bcast)
wrteei 0
mtspr SPRN_MMUCR,r5
isync
-/* tlbivax 0,r3 - use .long to avoid binutils deps */
- .long 0x7c000624 | (r3 << 11)
+ PPC_TLBIVAX(0, R3)
isync
eieio
tlbsync
@@ -227,11 +220,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
bl 2f
2: mflr r6
li r7,32
- ICBT(0,r6,r7) /* touch next cache line */
+ PPC_ICBT(0,R6,R7) /* touch next cache line */
add r6,r6,r7
- ICBT(0,r6,r7) /* touch next cache line */
+ PPC_ICBT(0,R6,R7) /* touch next cache line */
add r6,r6,r7
- ICBT(0,r6,r7) /* touch next cache line */
+ PPC_ICBT(0,R6,R7) /* touch next cache line */
sync
nop
nop
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 441af08edf4..2ee01e38d5e 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -54,8 +54,10 @@
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
- * [ ><><><><><><>
- * NC P6P5P4P3P2P1
+ * < >< ><><><><><><>
+ * L2 NC P6P5P4P3P2P1
+ *
+ * L2 - 16-18 - Required L2SEL value (select field)
*
* NC - number of counters
* 15: NC error 0x8000
@@ -72,7 +74,7 @@
static int power7_get_constraint(u64 event, unsigned long *maskp,
unsigned long *valp)
{
- int pmc, sh;
+ int pmc, sh, unit;
unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -90,6 +92,15 @@ static int power7_get_constraint(u64 event, unsigned long *maskp,
mask |= 0x8000;
value |= 0x1000;
}
+
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ if (unit == 6) {
+ /* L2SEL must be identical across events */
+ int l2sel = (event >> PM_L2SEL_SH) & PM_L2SEL_MSK;
+ mask |= 0x7 << 16;
+ value |= l2sel << 16;
+ }
+
*maskp = mask;
*valp = value;
return 0;
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index b62508b113d..c16999802ec 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -2,7 +2,6 @@ config PPC_MPC512x
bool "512x-based boards"
depends on 6xx
select FSL_SOC
- select FB_FSL_DIU
select IPIC
select PPC_CLOCK
select PPC_PCI_CHOICE
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c
index dcef6ade48e..0a134e0469e 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads.c
@@ -42,7 +42,10 @@ static void __init mpc5121_ads_setup_arch(void)
for_each_compatible_node(np, "pci", "fsl,mpc5121-pci")
mpc83xx_add_bridge(np);
#endif
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
mpc512x_setup_diu();
+#endif
}
static void __init mpc5121_ads_init_IRQ(void)
diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h
index 1ab6d11d0b1..c32b399eb95 100644
--- a/arch/powerpc/platforms/512x/mpc512x.h
+++ b/arch/powerpc/platforms/512x/mpc512x.h
@@ -16,6 +16,13 @@ extern void __init mpc512x_init(void);
extern int __init mpc5121_clk_init(void);
void __init mpc512x_declare_of_platform_devices(void);
extern void mpc512x_restart(char *cmd);
-extern void mpc512x_init_diu(void);
-extern void mpc512x_setup_diu(void);
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+void mpc512x_init_diu(void);
+void mpc512x_setup_diu(void);
+#else
+#define mpc512x_init_diu NULL
+#define mpc512x_setup_diu NULL
+#endif
+
#endif /* __MPC512X_H__ */
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 1650e090ef3..35f14fda108 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -58,6 +58,8 @@ void mpc512x_restart(char *cmd)
;
}
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+
struct fsl_diu_shared_fb {
u8 gamma[0x300]; /* 32-bit aligned! */
struct diu_ad ad0; /* 32-bit aligned! */
@@ -66,25 +68,6 @@ struct fsl_diu_shared_fb {
bool in_use;
};
-u32 mpc512x_get_pixel_format(enum fsl_diu_monitor_port port,
- unsigned int bits_per_pixel)
-{
- switch (bits_per_pixel) {
- case 32:
- return 0x88883316;
- case 24:
- return 0x88082219;
- case 16:
- return 0x65053118;
- }
- return 0x00000400;
-}
-
-void mpc512x_set_gamma_table(enum fsl_diu_monitor_port port,
- char *gamma_table_base)
-{
-}
-
void mpc512x_set_monitor_port(enum fsl_diu_monitor_port port)
{
}
@@ -320,14 +303,14 @@ void __init mpc512x_setup_diu(void)
}
}
- diu_ops.get_pixel_format = mpc512x_get_pixel_format;
- diu_ops.set_gamma_table = mpc512x_set_gamma_table;
diu_ops.set_monitor_port = mpc512x_set_monitor_port;
diu_ops.set_pixel_clock = mpc512x_set_pixel_clock;
diu_ops.valid_monitor_port = mpc512x_valid_monitor_port;
diu_ops.release_bootmem = mpc512x_release_bootmem;
}
+#endif
+
void __init mpc512x_init_IRQ(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 448d862bcf3..1843bc93201 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -4,7 +4,7 @@
* Written by: Grant Likely <grant.likely@secretlab.ca>
*
* Copyright (C) Secret Lab Technologies Ltd. 2006. All rights reserved.
- * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Description:
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index 9cf36020cf0..792a301a0bf 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -50,6 +50,7 @@ static void __init mpc5200_simple_setup_arch(void)
/* list of the supported boards */
static const char *board[] __initdata = {
+ "anonymous,a3m071",
"anonymous,a4m072",
"anon,charon",
"ifm,o2d",
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 028470b9588..a51cb07bd66 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -526,7 +526,7 @@ EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
#define WDT_IDENTITY "mpc52xx watchdog on GPT0"
-/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+/* wdt_is_active stores whether or not the /dev/watchdog device is opened */
static unsigned long wdt_is_active;
/* wdt-capable gpt */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 2351f9e0fb6..16150fa430f 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -578,18 +578,4 @@ static struct platform_driver mpc52xx_lpbfifo_driver = {
.probe = mpc52xx_lpbfifo_probe,
.remove = __devexit_p(mpc52xx_lpbfifo_remove),
};
-
-/***********************************************************************
- * Module init/exit
- */
-static int __init mpc52xx_lpbfifo_init(void)
-{
- return platform_driver_register(&mpc52xx_lpbfifo_driver);
-}
-module_init(mpc52xx_lpbfifo_init);
-
-static void __exit mpc52xx_lpbfifo_exit(void)
-{
- platform_driver_unregister(&mpc52xx_lpbfifo_driver);
-}
-module_exit(mpc52xx_lpbfifo_exit);
+module_platform_driver(mpc52xx_lpbfifo_driver);
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 328d221fd1c..74861a7fb80 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -16,7 +16,6 @@
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
#include <linux/slab.h>
#include <asm/io.h>
@@ -149,7 +148,7 @@ int __init pq2ads_pci_init_irq(void)
priv->regs = of_iomap(np, 0);
if (!priv->regs) {
printk(KERN_ERR "Cannot map PCI PIC registers.\n");
- goto out_free_bootmem;
+ goto out_free_kmalloc;
}
/* mask all PCI interrupts */
@@ -171,9 +170,8 @@ int __init pq2ads_pci_init_irq(void)
out_unmap_regs:
iounmap(priv->regs);
-out_free_bootmem:
- free_bootmem((unsigned long)priv,
- sizeof(struct pq2ads_pci_pic));
+out_free_kmalloc:
+ kfree(priv);
of_node_put(np);
out_unmap_irq:
irq_dispose_mapping(irq);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index d440435e055..8d762203eef 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Description:
* MPC832xE MDS board specific routines.
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 1b1f6c8a1a1..1a26d2f8340 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <LeoLi@freescale.com>
* Yin Olivia <Hong-hua.Yin@freescale.com>
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index f8769d713d6..b63b42d11d6 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -1,7 +1,7 @@
/*
* MPC8360E-RDK board file.
*
- * Copyright (c) 2006 Freescale Semicondutor, Inc.
+ * Copyright (c) 2006 Freescale Semiconductor, Inc.
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index eca1f0960ff..9813c81e8e5 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -1,7 +1,7 @@
/*
* arch/powerpc/platforms/83xx/mpc837x_rdb.c
*
- * Copyright (C) 2007 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* MPC837x RDB board specific routines
*
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 8498f732347..bd12588fa25 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010, 2012 Freescale Semicondutor, Inc.
+ * Copyright (C) 2006-2010, 2012 Freescale Semiconductor, Inc.
* All rights reserved.
*
* Author: Andy Fleming <afleming@freescale.com>
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 848a3e98e1c..7328b8d7412 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -249,7 +249,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
goto exit;
}
- iprop = of_get_property(law_node, "fsl,num-laws", 0);
+ iprop = of_get_property(law_node, "fsl,num-laws", NULL);
if (!iprop) {
pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
goto exit;
@@ -539,7 +539,7 @@ static void __init p1022_ds_setup_arch(void)
};
/*
- * prom_update_property() is called before
+ * of_update_property() is called before
* kmalloc() is available, so the 'new' object
* should be allocated in the global area.
* The easiest way is to do that is to
@@ -548,7 +548,7 @@ static void __init p1022_ds_setup_arch(void)
*/
pr_info("p1022ds: disabling %s node",
np2->full_name);
- prom_update_property(np2, &nor_status);
+ of_update_property(np2, &nor_status);
of_node_put(np2);
}
@@ -564,7 +564,7 @@ static void __init p1022_ds_setup_arch(void)
pr_info("p1022ds: disabling %s node",
np2->full_name);
- prom_update_property(np2, &nand_status);
+ of_update_property(np2, &nand_status);
of_node_put(np2);
}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6fcfa12e5c5..148c2f2d978 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -128,6 +128,19 @@ static void __cpuinit smp_85xx_mach_cpu_die(void)
}
#endif
+static inline void flush_spin_table(void *spin_table)
+{
+ flush_dcache_range((ulong)spin_table,
+ (ulong)spin_table + sizeof(struct epapr_spin_table));
+}
+
+static inline u32 read_spin_table_addr_l(void *spin_table)
+{
+ flush_dcache_range((ulong)spin_table,
+ (ulong)spin_table + sizeof(struct epapr_spin_table));
+ return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
+}
+
static int __cpuinit smp_85xx_kick_cpu(int nr)
{
unsigned long flags;
@@ -161,8 +174,8 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
/* Map the spin table */
if (ioremappable)
- spin_table = ioremap(*cpu_rel_addr,
- sizeof(struct epapr_spin_table));
+ spin_table = ioremap_prot(*cpu_rel_addr,
+ sizeof(struct epapr_spin_table), _PAGE_COHERENT);
else
spin_table = phys_to_virt(*cpu_rel_addr);
@@ -173,7 +186,16 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
generic_set_cpu_up(nr);
if (system_state == SYSTEM_RUNNING) {
+ /*
+ * To keep it compatible with old boot program which uses
+ * cache-inhibit spin table, we need to flush the cache
+ * before accessing spin table to invalidate any staled data.
+ * We also need to flush the cache after writing to spin
+ * table to push data out.
+ */
+ flush_spin_table(spin_table);
out_be32(&spin_table->addr_l, 0);
+ flush_spin_table(spin_table);
/*
* We don't set the BPTR register here since it already points
@@ -181,9 +203,14 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
*/
mpic_reset_core(hw_cpu);
- /* wait until core is ready... */
- if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
- 10000, 100)) {
+ /*
+ * wait until core is ready...
+ * We need to invalidate the stale data, in case the boot
+ * loader uses a cache-inhibited spin table.
+ */
+ if (!spin_event_timeout(
+ read_spin_table_addr_l(spin_table) == 1,
+ 10000, 100)) {
pr_err("%s: timeout waiting for core %d to reset\n",
__func__, hw_cpu);
ret = -ENOENT;
@@ -194,12 +221,10 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
__secondary_hold_acknowledge = -1;
}
#endif
+ flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu);
out_be32(&spin_table->addr_l, __pa(__early_start));
-
- if (!ioremappable)
- flush_dcache_range((ulong)spin_table,
- (ulong)spin_table + sizeof(struct epapr_spin_table));
+ flush_spin_table(spin_table);
/* Wait a bit for the CPU to ack. */
if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
@@ -213,13 +238,11 @@ out:
#else
smp_generic_kick_cpu(nr);
+ flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu);
out_be64((u64 *)(&spin_table->addr_h),
__pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
-
- if (!ioremappable)
- flush_dcache_range((ulong)spin_table,
- (ulong)spin_table + sizeof(struct epapr_spin_table));
+ flush_spin_table(spin_table);
#endif
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index a817398a56d..04d9d317f74 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -353,5 +353,7 @@ define_machine(mpc86xx_hpcd) {
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
};
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e7a896acd98..48a920d5148 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -90,6 +90,7 @@ config MPIC
config PPC_EPAPR_HV_PIC
bool
default n
+ select EPAPR_PARAVIRT
config MPIC_WEIRD
bool
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index dca21366674..e56bb651da1 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -728,7 +728,7 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
nid, np->full_name);
/* XXX todo: If we can have multiple windows on the same IOMMU, which
- * isn't the case today, we probably want here to check wether the
+ * isn't the case today, we probably want here to check whether the
* iommu for that node is already setup.
* However, there might be issue with getting the size right so let's
* ignore that for now. We might want to completely get rid of the
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index d8b7cc8a66c..8e299447127 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -148,7 +148,7 @@ static int spider_set_irq_type(struct irq_data *d, unsigned int type)
/* Configure the source. One gross hack that was there before and
* that I've kept around is the priority to the BE which I set to
- * be the same as the interrupt source number. I don't know wether
+ * be the same as the interrupt source number. I don't know whether
* that's supposed to make any kind of sense however, we'll have to
* decide that, but for now, I'm not changing the behaviour.
*/
@@ -220,7 +220,7 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
/* For hooking up the cascace we have a problem. Our device-tree is
* crap and we don't know on which BE iic interrupt we are hooked on at
* least not the "standard" way. We can reconstitute it based on two
- * informations though: which BE node we are connected to and wether
+ * informations though: which BE node we are connected to and whether
* we are connected to IOIF0 or IOIF1. Right now, we really only care
* about the IBM cell blade and we know that its firmware gives us an
* interrupt-map property which is pretty strange.
@@ -232,7 +232,7 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
int imaplen, intsize, unit;
struct device_node *iic;
- /* First, we check wether we have a real "interrupts" in the device
+ /* First, we check whether we have a real "interrupts" in the device
* tree in case the device-tree is ever fixed
*/
struct of_irq oirq;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 965d381abd7..25db92a8e1c 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -1094,7 +1094,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
LOAD_INT(c), LOAD_FRAC(c),
count_active_contexts(),
atomic_read(&nr_spu_contexts),
- current->nsproxy->pid_ns->last_pid);
+ task_active_pid_ns(current)->last_pid);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 5b7d8ffbf89..baee994fe81 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -66,7 +66,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
struct dentry *dentry;
int ret;
- dentry = user_path_create(AT_FDCWD, pathname, &path, 1);
+ dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY);
ret = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
ret = spufs_create(&path, dentry, flags, mode, neighbor);
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 64171198535..311b804353b 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -55,6 +55,7 @@ static unsigned int low_freq;
static unsigned int hi_freq;
static unsigned int cur_freq;
static unsigned int sleep_freq;
+static unsigned long transition_latency;
/*
* Different models uses different mechanisms to switch the frequency
@@ -403,7 +404,7 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -ENODEV;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cpuinfo.transition_latency = transition_latency;
policy->cur = cur_freq;
cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
@@ -658,12 +659,14 @@ static int __init pmac_cpufreq_setup(void)
if (!value)
goto out;
cur_freq = (*value) / 1000;
+ transition_latency = CPUFREQ_ETERNAL;
/* Check for 7447A based MacRISC3 */
if (of_machine_is_compatible("MacRISC3") &&
of_get_property(cpunode, "dynamic-power-step", NULL) &&
PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
pmac_cpufreq_init_7447A(cpunode);
+ transition_latency = 8000000;
/* Check for other MacRISC3 machines */
} else if (of_machine_is_compatible("PowerBook3,4") ||
of_machine_is_compatible("PowerBook3,5") ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index c4e630576ff..31036b56670 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -529,7 +529,7 @@ static int __init pmac_pic_probe_mpic(void)
void __init pmac_pic_init(void)
{
/* We configure the OF parsing based on our oldworld vs. newworld
- * platform type and wether we were booted by BootX.
+ * platform type and whether we were booted by BootX.
*/
#ifdef CONFIG_PPC32
if (!pmac_newworld)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 471aa3ccd9f..53d052e95cf 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -34,24 +34,12 @@
#include "powernv.h"
#include "pci.h"
-static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe,
- struct va_format *vaf)
-{
- char pfix[32];
-
- if (pe->pdev)
- strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
- else
- sprintf(pfix, "%04x:%02x ",
- pci_domain_nr(pe->pbus), pe->pbus->number);
- return printk("pci %s%s: [PE# %.3d] %pV", level, pfix, pe->pe_number, vaf);
-}
-
#define define_pe_printk_level(func, kern_level) \
static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
+ char pfix[32]; \
int r; \
\
va_start(args, fmt); \
@@ -59,7 +47,16 @@ static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
vaf.fmt = fmt; \
vaf.va = &args; \
\
- r = __pe_printk(kern_level, pe, &vaf); \
+ if (pe->pdev) \
+ strlcpy(pfix, dev_name(&pe->pdev->dev), \
+ sizeof(pfix)); \
+ else \
+ sprintf(pfix, "%04x:%02x ", \
+ pci_domain_nr(pe->pbus), \
+ pe->pbus->number); \
+ r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
+ pfix, pe->pe_number, &vaf); \
+ \
va_end(args); \
\
return r; \
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index 56d26bc4fd4..09787139834 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -280,13 +280,13 @@ static void os_area_set_property(struct device_node *node,
if (tmp) {
pr_debug("%s:%d found %s\n", __func__, __LINE__, prop->name);
- prom_remove_property(node, tmp);
+ of_remove_property(node, tmp);
}
- result = prom_add_property(node, prop);
+ result = of_add_property(node, prop);
if (result)
- pr_debug("%s:%d prom_set_property failed\n", __func__,
+ pr_debug("%s:%d of_set_property failed\n", __func__,
__LINE__);
}
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 0f1b706506e..a1a7b9a67ff 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -13,17 +13,16 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/notifier.h>
-#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include "offline_states.h"
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/uaccess.h>
#include <asm/rtas.h>
-#include <asm/pSeries_reconfig.h>
struct cc_workarea {
u32 drc_index;
@@ -255,9 +254,6 @@ static struct device_node *derive_parent(const char *path)
int dlpar_attach_node(struct device_node *dn)
{
-#ifdef CONFIG_PROC_DEVICETREE
- struct proc_dir_entry *ent;
-#endif
int rc;
of_node_set_flag(dn, OF_DYNAMIC);
@@ -266,44 +262,26 @@ int dlpar_attach_node(struct device_node *dn)
if (!dn->parent)
return -ENOMEM;
- rc = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, dn);
+ rc = of_attach_node(dn);
if (rc) {
printk(KERN_ERR "Failed to add device node %s\n",
dn->full_name);
return rc;
}
- of_attach_node(dn);
-
-#ifdef CONFIG_PROC_DEVICETREE
- ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
- if (ent)
- proc_device_tree_add_node(dn, ent);
-#endif
-
of_node_put(dn->parent);
return 0;
}
int dlpar_detach_node(struct device_node *dn)
{
-#ifdef CONFIG_PROC_DEVICETREE
- struct device_node *parent = dn->parent;
- struct property *prop = dn->properties;
-
- while (prop) {
- remove_proc_entry(prop->name, dn->pde);
- prop = prop->next;
- }
+ int rc;
- if (dn->pde)
- remove_proc_entry(dn->pde->name, parent->pde);
-#endif
+ rc = of_detach_node(dn);
+ if (rc)
+ return rc;
- pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, dn);
- of_detach_node(dn);
of_node_put(dn); /* Must decrement the refcount */
-
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 0b0eff0cce3..7b56118f531 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -56,6 +56,7 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_MULTITCE, "hcall-multi-tce"},
{FW_FEATURE_SPLPAR, "hcall-splpar"},
{FW_FEATURE_VPHN, "hcall-vphn"},
+ {FW_FEATURE_SET_MODE, "hcall-set-mode"},
};
/* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 64c97d8ac0c..a38956269fb 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -23,12 +23,12 @@
#include <linux/delay.h>
#include <linux/sched.h> /* for idle_task_exit */
#include <linux/cpu.h>
+#include <linux/of.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/xics.h>
#include "plpar_wrappers.h"
#include "offline_states.h"
@@ -333,10 +333,10 @@ static int pseries_smp_notifier(struct notifier_block *nb,
int err = 0;
switch (action) {
- case PSERIES_RECONFIG_ADD:
+ case OF_RECONFIG_ATTACH_NODE:
err = pseries_add_processor(node);
break;
- case PSERIES_RECONFIG_REMOVE:
+ case OF_RECONFIG_DETACH_NODE:
pseries_remove_processor(node);
break;
}
@@ -399,7 +399,7 @@ static int __init pseries_cpu_hotplug_init(void)
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR)) {
- pSeries_reconfig_notifier_register(&pseries_smp_nb);
+ of_reconfig_notifier_register(&pseries_smp_nb);
cpu_maps_update_begin();
if (cede_offline_enabled && parse_cede_parameters() == 0) {
default_offline_state = CPU_STATE_INACTIVE;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index ecdb0a6b317..2372c609fa2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -16,7 +16,6 @@
#include <asm/firmware.h>
#include <asm/machdep.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/sparsemem.h>
static unsigned long get_memblock_size(void)
@@ -187,42 +186,69 @@ static int pseries_add_memory(struct device_node *np)
return (ret < 0) ? -EINVAL : 0;
}
-static int pseries_drconf_memory(unsigned long *base, unsigned int action)
+static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
{
+ struct of_drconf_cell *new_drmem, *old_drmem;
unsigned long memblock_size;
- int rc;
+ u32 entries;
+ u32 *p;
+ int i, rc = -EINVAL;
memblock_size = get_memblock_size();
if (!memblock_size)
return -EINVAL;
- if (action == PSERIES_DRCONF_MEM_ADD) {
- rc = memblock_add(*base, memblock_size);
- rc = (rc < 0) ? -EINVAL : 0;
- } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
- rc = pseries_remove_memblock(*base, memblock_size);
- } else {
- rc = -EINVAL;
+ p = (u32 *)of_get_property(pr->dn, "ibm,dynamic-memory", NULL);
+ if (!p)
+ return -EINVAL;
+
+ /* The first int of the property is the number of lmb's described
+ * by the property. This is followed by an array of of_drconf_cell
+ * entries. Get the niumber of entries and skip to the array of
+ * of_drconf_cell's.
+ */
+ entries = *p++;
+ old_drmem = (struct of_drconf_cell *)p;
+
+ p = (u32 *)pr->prop->value;
+ p++;
+ new_drmem = (struct of_drconf_cell *)p;
+
+ for (i = 0; i < entries; i++) {
+ if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) &&
+ (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) {
+ rc = pseries_remove_memblock(old_drmem[i].base_addr,
+ memblock_size);
+ break;
+ } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) &&
+ (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) {
+ rc = memblock_add(old_drmem[i].base_addr,
+ memblock_size);
+ rc = (rc < 0) ? -EINVAL : 0;
+ break;
+ }
}
return rc;
}
static int pseries_memory_notifier(struct notifier_block *nb,
- unsigned long action, void *node)
+ unsigned long action, void *node)
{
+ struct of_prop_reconfig *pr;
int err = 0;
switch (action) {
- case PSERIES_RECONFIG_ADD:
+ case OF_RECONFIG_ATTACH_NODE:
err = pseries_add_memory(node);
break;
- case PSERIES_RECONFIG_REMOVE:
+ case OF_RECONFIG_DETACH_NODE:
err = pseries_remove_memory(node);
break;
- case PSERIES_DRCONF_MEM_ADD:
- case PSERIES_DRCONF_MEM_REMOVE:
- err = pseries_drconf_memory(node, action);
+ case OF_RECONFIG_UPDATE_PROPERTY:
+ pr = (struct of_prop_reconfig *)node;
+ if (!strcmp(pr->prop->name, "ibm,dynamic-memory"))
+ err = pseries_update_drconf_memory(pr);
break;
}
return notifier_from_errno(err);
@@ -235,7 +261,7 @@ static struct notifier_block pseries_mem_nb = {
static int __init pseries_memory_hotplug_init(void)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
- pSeries_reconfig_notifier_register(&pseries_mem_nb);
+ of_reconfig_notifier_register(&pseries_mem_nb);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 6153eea27ce..e2685badb5d 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -36,13 +36,13 @@
#include <linux/dma-mapping.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/ppc-pci.h>
@@ -760,7 +760,7 @@ static void remove_ddw(struct device_node *np)
__remove_ddw(np, ddw_avail, liobn);
delprop:
- ret = prom_remove_property(np, win64);
+ ret = of_remove_property(np, win64);
if (ret)
pr_warning("%s: failed to remove direct window property: %d\n",
np->full_name, ret);
@@ -1070,7 +1070,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
goto out_free_window;
}
- ret = prom_add_property(pdn, win64);
+ ret = of_add_property(pdn, win64);
if (ret) {
dev_err(&dev->dev, "unable to add dma window property for %s: %d",
pdn->full_name, ret);
@@ -1294,7 +1294,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
struct direct_window *window;
switch (action) {
- case PSERIES_RECONFIG_REMOVE:
+ case OF_RECONFIG_DETACH_NODE:
if (pci && pci->iommu_table)
iommu_free_table(pci->iommu_table, np->full_name);
@@ -1357,7 +1357,7 @@ void iommu_init_early_pSeries(void)
}
- pSeries_reconfig_notifier_register(&iommu_reconfig_nb);
+ of_reconfig_notifier_register(&iommu_reconfig_nb);
register_memory_notifier(&iommu_mem_nb);
set_pci_dma_ops(&dma_iommu_ops);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index dd30b12edfe..6573808cc5f 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -116,7 +116,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
}
if (!more) {
- prom_update_property(dn, new_prop);
+ of_update_property(dn, new_prop);
new_prop = NULL;
}
@@ -172,7 +172,7 @@ static int update_dt_node(u32 phandle)
case 0x80000000:
prop = of_find_property(dn, prop_name, NULL);
- prom_remove_property(dn, prop);
+ of_remove_property(dn, prop);
prop = NULL;
break;
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index 13e8cc43adf..e6cc34a6705 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -273,4 +273,35 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
lbuf[1]);
}
+/* Set various resource mode parameters */
+static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
+ unsigned long value1, unsigned long value2)
+{
+ return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
+}
+
+/*
+ * Enable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_reloc_on_exceptions(void)
+{
+ /* mflags = 3: Exceptions at 0xC000000000004000 */
+ return plpar_set_mode(3, 3, 0, 0);
+}
+
+/*
+ * Disable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long disable_reloc_on_exceptions(void) {
+ return plpar_set_mode(0, 3, 0, 0);
+}
+
#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 2f4668136b2..d6491bd481d 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -16,55 +16,13 @@
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/uaccess.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/mmu.h>
-
-
-/*
- * Routines for "runtime" addition and removal of device tree nodes.
- */
-#ifdef CONFIG_PROC_DEVICETREE
-/*
- * Add a node to /proc/device-tree.
- */
-static void add_node_proc_entries(struct device_node *np)
-{
- struct proc_dir_entry *ent;
-
- ent = proc_mkdir(strrchr(np->full_name, '/') + 1, np->parent->pde);
- if (ent)
- proc_device_tree_add_node(np, ent);
-}
-
-static void remove_node_proc_entries(struct device_node *np)
-{
- struct property *pp = np->properties;
- struct device_node *parent = np->parent;
-
- while (pp) {
- remove_proc_entry(pp->name, np->pde);
- pp = pp->next;
- }
- if (np->pde)
- remove_proc_entry(np->pde->name, parent->pde);
-}
-#else /* !CONFIG_PROC_DEVICETREE */
-static void add_node_proc_entries(struct device_node *np)
-{
- return;
-}
-
-static void remove_node_proc_entries(struct device_node *np)
-{
- return;
-}
-#endif /* CONFIG_PROC_DEVICETREE */
-
/**
* derive_parent - basically like dirname(1)
* @path: the full_name of a node to be added to the tree
@@ -97,28 +55,6 @@ static struct device_node *derive_parent(const char *path)
return parent;
}
-static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
-
-int pSeries_reconfig_notifier_register(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb);
-}
-EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_register);
-
-void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
-{
- blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
-}
-EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_unregister);
-
-int pSeries_reconfig_notify(unsigned long action, void *p)
-{
- int err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
- action, p);
-
- return notifier_to_errno(err);
-}
-
static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
{
struct device_node *np;
@@ -142,16 +78,12 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
goto out_err;
}
- err = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, np);
+ err = of_attach_node(np);
if (err) {
printk(KERN_ERR "Failed to add device node %s\n", path);
goto out_err;
}
- of_attach_node(np);
-
- add_node_proc_entries(np);
-
of_node_put(np->parent);
return 0;
@@ -179,11 +111,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
return -EBUSY;
}
- remove_node_proc_entries(np);
-
- pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, np);
of_detach_node(np);
-
of_node_put(parent);
of_node_put(np); /* Must decrement the refcount */
return 0;
@@ -397,7 +325,7 @@ static int do_add_property(char *buf, size_t bufsize)
if (!prop)
return -ENOMEM;
- prom_add_property(np, prop);
+ of_add_property(np, prop);
return 0;
}
@@ -421,16 +349,15 @@ static int do_remove_property(char *buf, size_t bufsize)
prop = of_find_property(np, buf, NULL);
- return prom_remove_property(np, prop);
+ return of_remove_property(np, prop);
}
static int do_update_property(char *buf, size_t bufsize)
{
struct device_node *np;
- struct pSeries_reconfig_prop_update upd_value;
unsigned char *value;
char *name, *end, *next_prop;
- int rc, length;
+ int length;
struct property *newprop;
buf = parse_node(buf, bufsize, &np);
end = buf + bufsize;
@@ -452,41 +379,7 @@ static int do_update_property(char *buf, size_t bufsize)
if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size"))
slb_set_size(*(int *)value);
- upd_value.node = np;
- upd_value.property = newprop;
- pSeries_reconfig_notify(PSERIES_UPDATE_PROPERTY, &upd_value);
-
- rc = prom_update_property(np, newprop);
- if (rc)
- return rc;
-
- /* For memory under the ibm,dynamic-reconfiguration-memory node
- * of the device tree, adding and removing memory is just an update
- * to the ibm,dynamic-memory property instead of adding/removing a
- * memory node in the device tree. For these cases we still need to
- * involve the notifier chain.
- */
- if (!strcmp(name, "ibm,dynamic-memory")) {
- int action;
-
- next_prop = parse_next_property(next_prop, end, &name,
- &length, &value);
- if (!next_prop)
- return -EINVAL;
-
- if (!strcmp(name, "add"))
- action = PSERIES_DRCONF_MEM_ADD;
- else
- action = PSERIES_DRCONF_MEM_REMOVE;
-
- rc = pSeries_reconfig_notify(action, value);
- if (rc) {
- prom_update_property(np, newprop);
- return rc;
- }
- }
-
- return 0;
+ return of_update_property(np, newprop);
}
/**
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index e3cb7ae6165..ca55882465d 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -40,6 +40,8 @@
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/cpuidle.h>
+#include <linux/of.h>
+#include <linux/kexec.h>
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -63,7 +65,6 @@
#include <asm/smp.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
-#include <asm/pSeries_reconfig.h>
#include "plpar_wrappers.h"
#include "pseries.h"
@@ -258,7 +259,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
int err = NOTIFY_OK;
switch (action) {
- case PSERIES_RECONFIG_ADD:
+ case OF_RECONFIG_ATTACH_NODE:
pci = np->parent->data;
if (pci) {
update_dn_pci_info(np, pci->phb);
@@ -367,6 +368,65 @@ static void pSeries_idle(void)
}
}
+/*
+ * Enable relocation on during exceptions. This has partition wide scope and
+ * may take a while to complete, if it takes longer than one second we will
+ * just give up rather than wasting any more time on this - if that turns out
+ * to ever be a problem in practice we can move this into a kernel thread to
+ * finish off the process later in boot.
+ */
+static int __init pSeries_enable_reloc_on_exc(void)
+{
+ long rc;
+ unsigned int delay, total_delay = 0;
+
+ while (1) {
+ rc = enable_reloc_on_exceptions();
+ if (!H_IS_LONG_BUSY(rc))
+ return rc;
+
+ delay = get_longbusy_msecs(rc);
+ total_delay += delay;
+ if (total_delay > 1000) {
+ pr_warn("Warning: Giving up waiting to enable "
+ "relocation on exceptions (%u msec)!\n",
+ total_delay);
+ return rc;
+ }
+
+ mdelay(delay);
+ }
+}
+
+#ifdef CONFIG_KEXEC
+static long pSeries_disable_reloc_on_exc(void)
+{
+ long rc;
+
+ while (1) {
+ rc = disable_reloc_on_exceptions();
+ if (!H_IS_LONG_BUSY(rc))
+ return rc;
+ mdelay(get_longbusy_msecs(rc));
+ }
+}
+
+static void pSeries_machine_kexec(struct kimage *image)
+{
+ long rc;
+
+ if (firmware_has_feature(FW_FEATURE_SET_MODE) &&
+ (image->type != KEXEC_TYPE_CRASH)) {
+ rc = pSeries_disable_reloc_on_exc();
+ if (rc != H_SUCCESS)
+ pr_warning("Warning: Failed to disable relocation on "
+ "exceptions: %ld\n", rc);
+ }
+
+ default_machine_kexec(image);
+}
+#endif
+
static void __init pSeries_setup_arch(void)
{
panic_timeout = 10;
@@ -389,7 +449,7 @@ static void __init pSeries_setup_arch(void)
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
find_and_init_phbs();
- pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
+ of_reconfig_notifier_register(&pci_dn_reconfig_nb);
pSeries_nvram_init();
@@ -402,6 +462,14 @@ static void __init pSeries_setup_arch(void)
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
else
ppc_md.enable_pmcs = power4_enable_pmcs;
+
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ long rc;
+ if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) {
+ pr_warn("Unable to enable relocation on exceptions: "
+ "%ld\n", rc);
+ }
+ }
}
static int __init pSeries_init_panel(void)
@@ -659,4 +727,7 @@ define_machine(pseries) {
.progress = rtas_progress,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_exception = pSeries_machine_check_exception,
+#ifdef CONFIG_KEXEC
+ .machine_kexec = pSeries_machine_kexec,
+#endif
};
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 71706bc34a0..9fc0a494190 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -38,7 +38,6 @@
#include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/rtas.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/mpic.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
index 02cf1e7e77f..0eb871cc343 100644
--- a/arch/powerpc/sysdev/fsl_gtm.c
+++ b/arch/powerpc/sysdev/fsl_gtm.c
@@ -1,7 +1,7 @@
/*
* Freescale General-purpose Timers Module
*
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright (c) Freescale Semiconductor, Inc. 2006.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) MontaVista Software, Inc. 2008.
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 51ffafae561..63c5f04ea58 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -236,7 +236,6 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
u32 intr_index;
u32 have_shift = 0;
struct fsl_msi_cascade_data *cascade_data;
- unsigned int ret;
cascade_data = irq_get_handler_data(irq);
msi_data = cascade_data->msi_data;
@@ -268,7 +267,9 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
case FSL_PIC_IP_IPIC:
msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
break;
- case FSL_PIC_IP_VMPIC:
+#ifdef CONFIG_EPAPR_PARAVIRT
+ case FSL_PIC_IP_VMPIC: {
+ unsigned int ret;
ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
if (ret) {
pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
@@ -277,6 +278,8 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
}
break;
}
+#endif
+ }
while (msir_value) {
intr_index = ffs(msir_value) - 1;
@@ -508,10 +511,12 @@ static const struct of_device_id fsl_of_msi_ids[] = {
.compatible = "fsl,ipic-msi",
.data = &ipic_msi_feature,
},
+#ifdef CONFIG_EPAPR_PARAVIRT
{
.compatible = "fsl,vmpic-msi",
.data = &vmpic_msi_feature,
},
+#endif
{}
};
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 01b62a62c63..5ba325bff3a 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -89,7 +89,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
-static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
+static int setup_one_atmu(struct ccsr_pci __iomem *pci,
unsigned int index, const struct resource *res,
resource_size_t offset)
{
@@ -126,7 +126,7 @@ static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
}
/* atmu setup for fsl pci/pcie controller */
-static void __init setup_pci_atmu(struct pci_controller *hose,
+static void setup_pci_atmu(struct pci_controller *hose,
struct resource *rsrc)
{
struct ccsr_pci __iomem *pci;
@@ -902,9 +902,42 @@ static int __devinit fsl_pci_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int fsl_pci_resume(struct device *dev)
+{
+ struct pci_controller *hose;
+ struct resource pci_rsrc;
+
+ hose = pci_find_hose_for_OF_device(dev->of_node);
+ if (!hose)
+ return -ENODEV;
+
+ if (of_address_to_resource(dev->of_node, 0, &pci_rsrc)) {
+ dev_err(dev, "Get pci register base failed.");
+ return -ENODEV;
+ }
+
+ setup_pci_atmu(hose, &pci_rsrc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pci_pm_ops = {
+ .resume = fsl_pci_resume,
+};
+
+#define PCI_PM_OPS (&pci_pm_ops)
+
+#else
+
+#define PCI_PM_OPS NULL
+
+#endif
+
static struct platform_driver fsl_pci_driver = {
.driver = {
.name = "fsl-pci",
+ .pm = PCI_PM_OPS,
.of_match_table = pci_ids,
},
.probe = fsl_pci_probe,
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index c449dbd1c93..97118dc3d28 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -253,6 +253,7 @@ struct platform_diu_data_ops diu_ops;
EXPORT_SYMBOL(diu_ops);
#endif
+#ifdef CONFIG_EPAPR_PARAVIRT
/*
* Restart the current partition
*
@@ -278,3 +279,4 @@ void fsl_hv_halt(void)
pr_info("hv exit\n");
fh_partition_stop(-1);
}
+#endif
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 8f0465422b1..5aaf86c0389 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -214,18 +214,7 @@ static struct platform_driver pmi_of_platform_driver = {
.of_match_table = pmi_match,
},
};
-
-static int __init pmi_module_init(void)
-{
- return platform_driver_register(&pmi_of_platform_driver);
-}
-module_init(pmi_module_init);
-
-static void __exit pmi_module_exit(void)
-{
- platform_driver_unregister(&pmi_of_platform_driver);
-}
-module_exit(pmi_module_exit);
+module_platform_driver(pmi_of_platform_driver);
int pmi_send_message(pmi_message_t msg)
{
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index b0436752972..238a07b97f2 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 2fba6ef2f95..b2b87c30e26 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -1,7 +1,7 @@
/*
* arch/powerpc/sysdev/qe_lib/qe_ic.c
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <leoli@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
index c327872ed35..efef7ab9b75 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.h
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h
@@ -3,7 +3,7 @@
*
* QUICC ENGINE Interrupt Controller Header
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <leoli@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index fd1a6c3b172..a88807b3dd5 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -3,7 +3,7 @@
*
* QE Parallel I/O ports configuration routines
*
- * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <LeoLi@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index 04677505f20..134b07d2943 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -3,7 +3,7 @@
*
* QE UCC API Set - UCC specific routines implementations.
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index fba02440d12..cceb2e36673 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index 524c0ead941..1c062f48f1a 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
diff --git a/arch/powerpc/sysdev/qe_lib/usb.c b/arch/powerpc/sysdev/qe_lib/usb.c
index 9162828f5da..27f23bd15eb 100644
--- a/arch/powerpc/sysdev/qe_lib/usb.c
+++ b/arch/powerpc/sysdev/qe_lib/usb.c
@@ -1,7 +1,7 @@
/*
* QE USB routines
*
- * Copyright (c) Freescale Semicondutor, Inc. 2006.
+ * Copyright 2006 Freescale Semiconductor, Inc.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) MontaVista Software, Inc. 2008.
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index c168c54e3c4..b49fdbd1580 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -6,7 +6,7 @@ GCOV_PROFILE := n
ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
-obj-y += xmon.o start.o nonstdio.o
+obj-y += xmon.o nonstdio.o
ifdef CONFIG_XMON_DISASSEMBLY
obj-y += ppc-dis.o ppc-opc.o
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
index bfac84fbe78..bce3dcfe505 100644
--- a/arch/powerpc/xmon/nonstdio.c
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -7,9 +7,23 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
+#include <asm/udbg.h>
#include <asm/time.h>
#include "nonstdio.h"
+
+static int xmon_write(const void *ptr, int nb)
+{
+ return udbg_write(ptr, nb);
+}
+
+static int xmon_readchar(void)
+{
+ if (udbg_getc)
+ return udbg_getc();
+ return -1;
+}
+
int xmon_putchar(int c)
{
char ch = c;
@@ -23,34 +37,7 @@ static char line[256];
static char *lineptr;
static int lineleft;
-int xmon_expect(const char *str, unsigned long timeout)
-{
- int c;
- unsigned long t0;
-
- /* assume 25MHz default timebase if tb_ticks_per_sec not set yet */
- timeout *= tb_ticks_per_sec? tb_ticks_per_sec: 25000000;
- t0 = get_tbl();
- do {
- lineptr = line;
- for (;;) {
- c = xmon_read_poll();
- if (c == -1) {
- if (get_tbl() - t0 > timeout)
- return 0;
- continue;
- }
- if (c == '\n')
- break;
- if (c != '\r' && lineptr < &line[sizeof(line) - 1])
- *lineptr++ = c;
- }
- *lineptr = 0;
- } while (strstr(line, str) == NULL);
- return 1;
-}
-
-int xmon_getchar(void)
+static int xmon_getchar(void)
{
int c;
@@ -124,13 +111,19 @@ char *xmon_gets(char *str, int nb)
void xmon_printf(const char *format, ...)
{
va_list args;
- int n;
static char xmon_outbuf[1024];
+ int rc, n;
va_start(args, format);
n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args);
va_end(args);
- xmon_write(xmon_outbuf, n);
+
+ rc = xmon_write(xmon_outbuf, n);
+
+ if (n && rc == 0) {
+ /* No udbg hooks, fallback to printk() - dangerous */
+ printk(xmon_outbuf);
+ }
}
void xmon_puts(const char *str)
diff --git a/arch/powerpc/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h
index 23dd95f4599..18a51ded4ff 100644
--- a/arch/powerpc/xmon/nonstdio.h
+++ b/arch/powerpc/xmon/nonstdio.h
@@ -4,12 +4,6 @@
#define putchar xmon_putchar
extern int xmon_putchar(int c);
-extern int xmon_getchar(void);
extern void xmon_puts(const char *);
extern char *xmon_gets(char *, int);
extern void xmon_printf(const char *, ...);
-extern void xmon_map_scc(void);
-extern int xmon_expect(const char *str, unsigned long timeout);
-extern int xmon_write(const void *ptr, int nb);
-extern int xmon_readchar(void);
-extern int xmon_read_poll(void);
diff --git a/arch/powerpc/xmon/start.c b/arch/powerpc/xmon/start.c
deleted file mode 100644
index 8864de2af38..00000000000
--- a/arch/powerpc/xmon/start.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 1996 Paul Mackerras.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-#include "nonstdio.h"
-
-void xmon_map_scc(void)
-{
-}
-
-int xmon_write(const void *ptr, int nb)
-{
- return udbg_write(ptr, nb);
-}
-
-int xmon_readchar(void)
-{
- if (udbg_getc)
- return udbg_getc();
- return -1;
-}
-
-int xmon_read_poll(void)
-{
- if (udbg_getc_poll)
- return udbg_getc_poll();
- return -1;
-}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 3a56a639a92..1f8d2f10a43 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -52,9 +52,6 @@
#include "nonstdio.h"
#include "dis-asm.h"
-#define scanhex xmon_scanhex
-#define skipbl xmon_skipbl
-
#ifdef CONFIG_SMP
static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
static unsigned long xmon_taken = 1;
@@ -169,12 +166,8 @@ extern void xmon_leave(void);
#ifdef CONFIG_PPC64
#define REG "%.16lx"
-#define REGS_PER_LINE 4
-#define LAST_VOLATILE 13
#else
#define REG "%.8lx"
-#define REGS_PER_LINE 8
-#define LAST_VOLATILE 12
#endif
#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
@@ -1288,27 +1281,19 @@ static void get_function_bounds(unsigned long pc, unsigned long *startp,
catch_memory_errors = 0;
}
-static int xmon_depth_to_print = 64;
-
#define LRSAVE_OFFSET (STACK_FRAME_LR_SAVE * sizeof(unsigned long))
#define MARKER_OFFSET (STACK_FRAME_MARKER * sizeof(unsigned long))
-#ifdef __powerpc64__
-#define REGS_OFFSET 0x70
-#else
-#define REGS_OFFSET 16
-#endif
-
static void xmon_show_stack(unsigned long sp, unsigned long lr,
unsigned long pc)
{
+ int max_to_print = 64;
unsigned long ip;
unsigned long newsp;
unsigned long marker;
- int count = 0;
struct pt_regs regs;
- do {
+ while (max_to_print--) {
if (sp < PAGE_OFFSET) {
if (sp != 0)
printf("SP (%lx) is in userspace\n", sp);
@@ -1362,10 +1347,10 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
an exception frame. */
if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long))
&& marker == STACK_FRAME_REGS_MARKER) {
- if (mread(sp + REGS_OFFSET, &regs, sizeof(regs))
+ if (mread(sp + STACK_FRAME_OVERHEAD, &regs, sizeof(regs))
!= sizeof(regs)) {
printf("Couldn't read registers at %lx\n",
- sp + REGS_OFFSET);
+ sp + STACK_FRAME_OVERHEAD);
break;
}
printf("--- Exception: %lx %s at ", regs.trap,
@@ -1379,7 +1364,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
break;
sp = newsp;
- } while (count++ < xmon_depth_to_print);
+ }
}
static void backtrace(struct pt_regs *excp)
@@ -2943,7 +2928,6 @@ static void xmon_init(int enable)
__debugger_dabr_match = NULL;
__debugger_fault_handler = NULL;
}
- xmon_map_scc();
}
#ifdef CONFIG_MAGIC_SYSRQ
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index cc45d25487b..647c3eccc3d 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -6,3 +6,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
obj-$(CONFIG_APPLDATA_BASE) += appldata/
obj-$(CONFIG_MATHEMU) += math-emu/
obj-y += net/
+obj-$(CONFIG_PCI) += pci/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3cbb8757704..b5ea38c2564 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -34,12 +34,6 @@ config GENERIC_BUG
config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
-config NO_IOMEM
- def_bool y
-
-config NO_DMA
- def_bool y
-
config ARCH_DMA_ADDR_T_64BIT
def_bool 64BIT
@@ -58,6 +52,12 @@ config KEXEC
config AUDIT_ARCH
def_bool y
+config NO_IOPORT
+ def_bool y
+
+config PCI_QUIRKS
+ def_bool n
+
config S390
def_bool y
select USE_GENERIC_SMP_HELPERS if SMP
@@ -137,8 +137,6 @@ config S390
select GENERIC_CLOCKEVENTS
select KTIME_SCALAR if 32BIT
select HAVE_ARCH_SECCOMP_FILTER
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS2
@@ -171,6 +169,10 @@ config HAVE_MARCH_Z196_FEATURES
def_bool n
select HAVE_MARCH_Z10_FEATURES
+config HAVE_MARCH_ZEC12_FEATURES
+ def_bool n
+ select HAVE_MARCH_Z196_FEATURES
+
choice
prompt "Processor type"
default MARCH_G5
@@ -222,6 +224,13 @@ config MARCH_Z196
(2818 and 2817 series). The kernel will be slightly faster but will
not work on older machines.
+config MARCH_ZEC12
+ bool "IBM zEC12"
+ select HAVE_MARCH_ZEC12_FEATURES if 64BIT
+ help
+ Select this to enable optimizations for IBM zEC12 (2827 series). The
+ kernel will be slightly faster but will not work on older machines.
+
endchoice
config 64BIT
@@ -426,6 +435,53 @@ config QDIO
If unsure, say Y.
+menuconfig PCI
+ bool "PCI support"
+ default n
+ depends on 64BIT
+ select ARCH_SUPPORTS_MSI
+ select PCI_MSI
+ help
+ Enable PCI support.
+
+if PCI
+
+config PCI_NR_FUNCTIONS
+ int "Maximum number of PCI functions (1-4096)"
+ range 1 4096
+ default "64"
+ help
+ This allows you to specify the maximum number of PCI functions which
+ this kernel will support.
+
+source "drivers/pci/Kconfig"
+source "drivers/pci/pcie/Kconfig"
+source "drivers/pci/hotplug/Kconfig"
+
+endif # PCI
+
+config PCI_DOMAINS
+ def_bool PCI
+
+config HAS_IOMEM
+ def_bool PCI
+
+config IOMMU_HELPER
+ def_bool PCI
+
+config HAS_DMA
+ def_bool PCI
+ select HAVE_DMA_API_DEBUG
+
+config NEED_SG_DMA_LENGTH
+ def_bool PCI
+
+config HAVE_DMA_ATTRS
+ def_bool PCI
+
+config NEED_DMA_MAP_STATE
+ def_bool PCI
+
config CHSC_SCH
def_tristate m
prompt "Support for CHSC subchannels"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 49e76e8b477..4b8e08b56f4 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -41,6 +41,7 @@ cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
+cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index da3c1a7dcd8..b4dbade8ca2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -325,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -457,7 +458,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -625,7 +627,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
param = xts_ctx->pcc.key + offset;
ret = crypt_s390_pcc(func, param);
- BUG_ON(ret < 0);
+ if (ret < 0)
+ return -EIO;
memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
param = xts_ctx->key + offset;
@@ -636,7 +639,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
- BUG_ON(ret < 0 || ret != n);
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -769,7 +773,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
}
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
- BUG_ON(ret < 0 || ret != n);
+ if (ret < 0 || ret != n)
+ return -EIO;
if (n > AES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
@@ -788,7 +793,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrblk);
- BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
+ if (ret < 0 || ret != AES_BLOCK_SIZE)
+ return -EIO;
memcpy(out, buf, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index b49fb96f420..bcca01c9989 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -94,7 +94,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, key, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -120,7 +121,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, iv, out, in, n);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -386,7 +388,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
}
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
- BUG_ON((ret < 0) || (ret != n));
+ if (ret < 0 || ret != n)
+ return -EIO;
if (n > DES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
@@ -404,7 +407,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrblk);
- BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
+ if (ret < 0 || ret != DES_BLOCK_SIZE)
+ return -EIO;
memcpy(out, buf, nbytes);
crypto_inc(ctrblk, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 1ebd3a15cca..d43485d142e 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
if (!dctx->bytes) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
GHASH_BLOCK_SIZE);
- BUG_ON(ret != GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
- BUG_ON(ret != n);
+ if (ret != n)
+ return -EIO;
src += n;
srclen -= n;
}
@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
return 0;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
int ret;
@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
memset(pos, 0, dctx->bytes);
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
- BUG_ON(ret != GHASH_BLOCK_SIZE);
+ if (ret != GHASH_BLOCK_SIZE)
+ return -EIO;
}
dctx->bytes = 0;
+ return 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ int ret;
- ghash_flush(ctx, dctx);
- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
-
- return 0;
+ ret = ghash_flush(ctx, dctx);
+ if (!ret)
+ memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+ return ret;
}
static struct shash_alg ghash_alg = {
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index bd37d09b9d3..8620b0ec9c4 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
- BUG_ON(ret != bsize);
+ if (ret != bsize)
+ return -EIO;
data += bsize - index;
len -= bsize - index;
index = 0;
@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
- BUG_ON(ret != (len & ~(bsize - 1)));
+ if (ret != (len & ~(bsize - 1)))
+ return -EIO;
data += ret;
len -= ret;
}
@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
- BUG_ON(ret != end);
+ if (ret != end)
+ return -EIO;
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 6f573890fb2..15422933c60 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -640,6 +640,87 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
}
#define find_first_bit find_first_bit
+/*
+ * Big endian variant whichs starts bit counting from left using
+ * the flogr (find leftmost one) instruction.
+ */
+static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
+{
+ register unsigned long bit asm("2") = val;
+ register unsigned long out asm("3");
+
+ asm volatile (
+ " .insn rre,0xb9830000,%[bit],%[bit]\n"
+ : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+ return nr + bit;
+}
+
+/*
+ * 64 bit special left bitops format:
+ * order in memory:
+ * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
+ * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
+ * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
+ * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
+ * after that follows the next long with bit numbers
+ * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
+ * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
+ * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
+ * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
+ * The reason for this bit ordering is the fact that
+ * the hardware sets bits in a bitmap starting at bit 0
+ * and we don't want to scan the bitmap from the 'wrong
+ * end'.
+ */
+static inline unsigned long find_first_bit_left(const unsigned long *addr,
+ unsigned long size)
+{
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffs_word_loop(addr, size);
+ bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
+ return (bits < size) ? bits : size;
+}
+
+static inline int find_next_bit_left(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (__BITOPS_WORDSIZE - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / __BITOPS_WORDSIZE;
+ if (bit) {
+ set = __flo_word(0, *p & (~0UL << bit));
+ if (set >= size)
+ return size + offset;
+ if (set < __BITOPS_WORDSIZE)
+ return set + offset;
+ offset += __BITOPS_WORDSIZE;
+ size -= __BITOPS_WORDSIZE;
+ p++;
+ }
+ return offset + find_first_bit_left(p, size);
+}
+
+#define for_each_set_bit_left(bit, addr, size) \
+ for ((bit) = find_first_bit_left((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_left_cont(bit, addr, size) \
+ for ((bit) = find_next_bit_left((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+
/**
* find_next_zero_bit - find the first zero bit in a memory region
* @addr: The address to base the search on
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 1cb4bb3f32d..e6061617a50 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -12,6 +12,7 @@
#include <linux/mod_devicetable.h>
#include <asm/fcx.h>
#include <asm/irq.h>
+#include <asm/schid.h>
/* structs from asm/cio.h */
struct irb;
@@ -223,8 +224,7 @@ extern int ccw_device_force_console(void);
int ccw_device_siosl(struct ccw_device *);
-// FIXME: these have to go
-extern int _ccw_device_get_subchannel_number(struct ccw_device *);
+extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
extern void *ccw_device_get_chp_desc(struct ccw_device *, int);
#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 01a905eb11e..23723ce5ca7 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -59,6 +59,9 @@ extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
+extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
+extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h
new file mode 100644
index 00000000000..6c3aecc245f
--- /dev/null
+++ b/arch/s390/include/asm/clp.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_S390_CLP_H
+#define _ASM_S390_CLP_H
+
+/* CLP common request & response block size */
+#define CLP_BLK_SIZE (PAGE_SIZE * 2)
+
+struct clp_req_hdr {
+ u16 len;
+ u16 cmd;
+} __packed;
+
+struct clp_rsp_hdr {
+ u16 len;
+ u16 rsp;
+} __packed;
+
+/* CLP Response Codes */
+#define CLP_RC_OK 0x0010 /* Command request successfully */
+#define CLP_RC_CMD 0x0020 /* Command code not recognized */
+#define CLP_RC_PERM 0x0030 /* Command not authorized */
+#define CLP_RC_FMT 0x0040 /* Invalid command request format */
+#define CLP_RC_LEN 0x0050 /* Invalid command request length */
+#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
+#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
+#define CLP_RC_NODATA 0x0080 /* No data available */
+#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
+
+#endif
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 18cd6b59265..f8c6df6cd1f 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -7,6 +7,9 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
+#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
+#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v))
+
#define PSW32_MASK_PER 0x40000000UL
#define PSW32_MASK_DAT 0x04000000UL
#define PSW32_MASK_IO 0x02000000UL
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
new file mode 100644
index 00000000000..8a32f7dfd3a
--- /dev/null
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -0,0 +1,76 @@
+#ifndef _ASM_S390_DMA_MAPPING_H
+#define _ASM_S390_DMA_MAPPING_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-debug.h>
+#include <linux/io.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
+
+extern struct dma_map_ops s390_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return &s390_dma_ops;
+}
+
+extern int dma_set_mask(struct device *dev, u64 mask);
+extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->dma_supported == NULL)
+ return 1;
+ return dma_ops->dma_supported(dev, mask);
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+ return (dma_addr == 0UL);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *ret;
+
+ ret = ops->alloc(dev, size, dma_handle, flag, NULL);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
+ return ret;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index 6fb6de4f15b..de015d85e3e 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -1,14 +1,13 @@
-/*
- * S390 version
- */
-
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
+#ifndef _ASM_S390_DMA_H
+#define _ASM_S390_DMA_H
-#include <asm/io.h> /* need byte IO */
+#include <asm/io.h>
+/*
+ * MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
+ * to DMA. It _is_ used for the s390 memory zone split at 2GB caused
+ * by the 31 bit heritage.
+ */
#define MAX_DMA_ADDRESS 0x80000000
-#define free_dma(x) do { } while (0)
-
-#endif /* _ASM_DMA_H */
+#endif /* _ASM_S390_DMA_H */
diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h
new file mode 100644
index 00000000000..7e3d2586c1f
--- /dev/null
+++ b/arch/s390/include/asm/hw_irq.h
@@ -0,0 +1,22 @@
+#ifndef _HW_IRQ_H
+#define _HW_IRQ_H
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
+{
+ return __irq_get_msi_desc(irq);
+}
+
+/* Must be called with msi map lock held */
+static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
+{
+ if (!msi)
+ return -EINVAL;
+
+ msi->irq = irq;
+ return 0;
+}
+
+#endif
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 559e921a6bb..16c3eb164f4 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -9,9 +9,9 @@
#ifndef _S390_IO_H
#define _S390_IO_H
+#include <linux/kernel.h>
#include <asm/page.h>
-
-#define IO_SPACE_LIMIT 0xffffffff
+#include <asm/pci_io.h>
/*
* Change virtual addresses to physical addresses and vv.
@@ -24,10 +24,11 @@ static inline unsigned long virt_to_phys(volatile void * address)
" lra %0,0(%1)\n"
" jz 0f\n"
" la %0,0\n"
- "0:"
+ "0:"
: "=a" (real_address) : "a" (address) : "cc");
- return real_address;
+ return real_address;
}
+#define virt_to_phys virt_to_phys
static inline void * phys_to_virt(unsigned long address)
{
@@ -42,4 +43,50 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
*/
#define xlate_dev_kmem_ptr(p) p
+#define IO_SPACE_LIMIT 0
+
+#ifdef CONFIG_PCI
+
+#define ioremap_nocache(addr, size) ioremap(addr, size)
+#define ioremap_wc ioremap_nocache
+
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+{
+ return (void __iomem *) offset;
+}
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+}
+
+/*
+ * s390 needs a private implementation of pci_iomap since ioremap with its
+ * offset parameter isn't sufficient. That's because BAR spaces are not
+ * disjunctive on s390 so we need the bar parameter of pci_iomap to find
+ * the corresponding device and create the mapping cookie.
+ */
+#define pci_iomap pci_iomap
+#define pci_iounmap pci_iounmap
+
+#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
+#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
+#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
+
+#define __raw_readb zpci_read_u8
+#define __raw_readw zpci_read_u16
+#define __raw_readl zpci_read_u32
+#define __raw_readq zpci_read_u64
+#define __raw_writeb zpci_write_u8
+#define __raw_writew zpci_write_u16
+#define __raw_writel zpci_write_u32
+#define __raw_writeq zpci_write_u64
+
+#endif /* CONFIG_PCI */
+
+#include <asm-generic/io.h>
+
#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 6703dd986fd..e6972f85d2b 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -33,6 +33,8 @@ enum interruption_class {
IOINT_APB,
IOINT_ADM,
IOINT_CSC,
+ IOINT_PCI,
+ IOINT_MSI,
NMI_NMI,
NR_IRQS,
};
@@ -51,4 +53,14 @@ void service_subclass_irq_unregister(void);
void measurement_alert_subclass_register(void);
void measurement_alert_subclass_unregister(void);
+#ifdef CONFIG_LOCKDEP
+# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
+# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
+ disable_irq_nosync(irq)
+# define disable_irq_lockdep(irq) disable_irq(irq)
+# define enable_irq_lockdep(irq) enable_irq(irq)
+# define enable_irq_lockdep_irqrestore(irq, flags) \
+ enable_irq(irq)
+#endif
+
#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 5ae606456b0..68d7d68300f 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -18,6 +18,7 @@
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
+#define PCI_ISC 2 /* PCI I/O subchannels */
#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 39faa4ac966..a86ad408407 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,6 +30,8 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
+void storage_key_init_range(unsigned long start, unsigned long end);
+
static unsigned long pfmf(unsigned long function, unsigned long address)
{
asm volatile(
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 42a145c9ddd..b1fa93c606a 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -1,10 +1,197 @@
#ifndef __ASM_S390_PCI_H
#define __ASM_S390_PCI_H
-/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
- * includes it even if CONFIG_PCI is not set.
- */
+/* must be set before including asm-generic/pci.h */
#define PCI_DMA_BUS_IS_PHYS (0)
+/* must be set before including pci_clp.h */
+#define PCI_BAR_COUNT 6
-#endif /* __ASM_S390_PCI_H */
+#include <asm-generic/pci.h>
+#include <asm-generic/pci-dma-compat.h>
+#include <asm/pci_clp.h>
+#include <asm/pci_debug.h>
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+#define pcibios_assign_all_busses() (0)
+
+void __iomem *pci_iomap(struct pci_dev *, int, unsigned long);
+void pci_iounmap(struct pci_dev *, void __iomem *);
+int pci_domain_nr(struct pci_bus *);
+int pci_proc_domain(struct pci_bus *);
+
+/* MSI arch hooks */
+#define arch_setup_msi_irqs arch_setup_msi_irqs
+#define arch_teardown_msi_irqs arch_teardown_msi_irqs
+
+#define ZPCI_BUS_NR 0 /* default bus number */
+#define ZPCI_DEVFN 0 /* default device number */
+
+/* PCI Function Controls */
+#define ZPCI_FC_FN_ENABLED 0x80
+#define ZPCI_FC_ERROR 0x40
+#define ZPCI_FC_BLOCKED 0x20
+#define ZPCI_FC_DMA_ENABLED 0x10
+
+struct zpci_fmb {
+ u32 format : 8;
+ u32 dma_valid : 1;
+ u32 : 23;
+ u32 samples;
+ u64 last_update;
+ /* hardware counters */
+ u64 ld_ops;
+ u64 st_ops;
+ u64 stb_ops;
+ u64 rpcit_ops;
+ u64 dma_rbytes;
+ u64 dma_wbytes;
+ /* software counters */
+ atomic64_t allocated_pages;
+ atomic64_t mapped_pages;
+ atomic64_t unmapped_pages;
+} __packed __aligned(16);
+
+struct msi_map {
+ unsigned long irq;
+ struct msi_desc *msi;
+ struct hlist_node msi_chain;
+};
+
+#define ZPCI_NR_MSI_VECS 64
+#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1)
+
+enum zpci_state {
+ ZPCI_FN_STATE_RESERVED,
+ ZPCI_FN_STATE_STANDBY,
+ ZPCI_FN_STATE_CONFIGURED,
+ ZPCI_FN_STATE_ONLINE,
+ NR_ZPCI_FN_STATES,
+};
+
+struct zpci_bar_struct {
+ u32 val; /* bar start & 3 flag bits */
+ u8 size; /* order 2 exponent */
+ u16 map_idx; /* index into bar mapping array */
+};
+
+/* Private data per function */
+struct zpci_dev {
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+ struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
+
+ enum zpci_state state;
+ u32 fid; /* function ID, used by sclp */
+ u32 fh; /* function handle, used by insn's */
+ u16 pchid; /* physical channel ID */
+ u8 pfgid; /* function group ID */
+ u16 domain;
+
+ /* IRQ stuff */
+ u64 msi_addr; /* MSI address */
+ struct zdev_irq_map *irq_map;
+ struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
+ unsigned int aisb; /* number of the summary bit */
+
+ /* DMA stuff */
+ unsigned long *dma_table;
+ spinlock_t dma_table_lock;
+ int tlb_refresh;
+
+ spinlock_t iommu_bitmap_lock;
+ unsigned long *iommu_bitmap;
+ unsigned long iommu_size;
+ unsigned long iommu_pages;
+ unsigned int next_bit;
+
+ struct zpci_bar_struct bars[PCI_BAR_COUNT];
+
+ u64 start_dma; /* Start of available DMA addresses */
+ u64 end_dma; /* End of available DMA addresses */
+ u64 dma_mask; /* DMA address space mask */
+
+ /* Function measurement block */
+ struct zpci_fmb *fmb;
+ u16 fmb_update; /* update interval */
+
+ enum pci_bus_speed max_bus_speed;
+
+ struct dentry *debugfs_dev;
+ struct dentry *debugfs_perf;
+ struct dentry *debugfs_debug;
+};
+
+struct pci_hp_callback_ops {
+ int (*create_slot) (struct zpci_dev *zdev);
+ void (*remove_slot) (struct zpci_dev *zdev);
+};
+
+static inline bool zdev_enabled(struct zpci_dev *zdev)
+{
+ return (zdev->fh & (1UL << 31)) ? true : false;
+}
+
+/* -----------------------------------------------------------------------------
+ Prototypes
+----------------------------------------------------------------------------- */
+/* Base stuff */
+struct zpci_dev *zpci_alloc_device(void);
+int zpci_create_device(struct zpci_dev *);
+int zpci_enable_device(struct zpci_dev *);
+void zpci_stop_device(struct zpci_dev *);
+void zpci_free_device(struct zpci_dev *);
+int zpci_scan_device(struct zpci_dev *);
+int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
+int zpci_unregister_ioat(struct zpci_dev *, u8);
+
+/* CLP */
+int clp_find_pci_devices(void);
+int clp_add_pci_device(u32, u32, int);
+int clp_enable_fh(struct zpci_dev *, u8);
+int clp_disable_fh(struct zpci_dev *);
+
+/* MSI */
+struct msi_desc *__irq_get_msi_desc(unsigned int);
+int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
+int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
+void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
+int zpci_msihash_init(void);
+void zpci_msihash_exit(void);
+
+/* Error handling and recovery */
+void zpci_event_error(void *);
+void zpci_event_availability(void *);
+
+/* Helpers */
+struct zpci_dev *get_zdev(struct pci_dev *);
+struct zpci_dev *get_zdev_by_fid(u32);
+bool zpci_fid_present(u32);
+
+/* sysfs */
+int zpci_sysfs_add_device(struct device *);
+void zpci_sysfs_remove_device(struct device *);
+
+/* DMA */
+int zpci_dma_init(void);
+void zpci_dma_exit(void);
+
+/* Hotplug */
+extern struct mutex zpci_list_lock;
+extern struct list_head zpci_list;
+extern struct pci_hp_callback_ops hotplug_ops;
+extern unsigned int pci_probe;
+
+/* FMB */
+int zpci_fmb_enable_device(struct zpci_dev *);
+int zpci_fmb_disable_device(struct zpci_dev *);
+
+/* Debug */
+int zpci_debug_init(void);
+void zpci_debug_exit(void);
+void zpci_debug_init_device(struct zpci_dev *);
+void zpci_debug_exit_device(struct zpci_dev *);
+void zpci_debug_info(struct zpci_dev *, struct seq_file *);
+
+#endif
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
new file mode 100644
index 00000000000..d31d739f868
--- /dev/null
+++ b/arch/s390/include/asm/pci_clp.h
@@ -0,0 +1,182 @@
+#ifndef _ASM_S390_PCI_CLP_H
+#define _ASM_S390_PCI_CLP_H
+
+#include <asm/clp.h>
+
+/*
+ * Call Logical Processor - Command Codes
+ */
+#define CLP_LIST_PCI 0x0002
+#define CLP_QUERY_PCI_FN 0x0003
+#define CLP_QUERY_PCI_FNGRP 0x0004
+#define CLP_SET_PCI_FN 0x0005
+
+/* PCI function handle list entry */
+struct clp_fh_list_entry {
+ u16 device_id;
+ u16 vendor_id;
+ u32 config_state : 1;
+ u32 : 31;
+ u32 fid; /* PCI function id */
+ u32 fh; /* PCI function handle */
+} __packed;
+
+#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
+#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
+#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
+#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
+#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
+#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
+#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
+#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
+#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
+#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
+
+/* request or response block header length */
+#define LIST_PCI_HDR_LEN 32
+
+/* Number of function handles fitting in response block */
+#define CLP_FH_LIST_NR_ENTRIES \
+ ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
+ / sizeof(struct clp_fh_list_entry))
+
+#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
+#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+
+#define CLP_UTIL_STR_LEN 64
+
+/* List PCI functions request */
+struct clp_req_list_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u64 resume_token;
+ u64 reserved2;
+} __packed;
+
+/* List PCI functions response */
+struct clp_rsp_list_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u64 resume_token;
+ u32 reserved2;
+ u16 max_fn;
+ u8 reserved3;
+ u8 entry_size;
+ struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
+} __packed;
+
+/* Query PCI function request */
+struct clp_req_query_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u32 reserved2;
+ u64 reserved3;
+} __packed;
+
+/* Query PCI function response */
+struct clp_rsp_query_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u16 vfn; /* virtual fn number */
+ u16 : 7;
+ u16 util_str_avail : 1; /* utility string available? */
+ u16 pfgid : 8; /* pci function group id */
+ u32 fid; /* pci function id */
+ u8 bar_size[PCI_BAR_COUNT];
+ u16 pchid;
+ u32 bar[PCI_BAR_COUNT];
+ u64 reserved2;
+ u64 sdma; /* start dma as */
+ u64 edma; /* end dma as */
+ u64 reserved3[6];
+ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
+} __packed;
+
+/* Query PCI function group request */
+struct clp_req_query_pci_grp {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 : 24;
+ u32 pfgid : 8; /* function group id */
+ u32 reserved2;
+ u64 reserved3;
+} __packed;
+
+/* Query PCI function group response */
+struct clp_rsp_query_pci_grp {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u16 : 4;
+ u16 noi : 12; /* number of interrupts */
+ u8 version;
+ u8 : 6;
+ u8 frame : 1;
+ u8 refresh : 1; /* TLB refresh mode */
+ u16 reserved2;
+ u16 mui;
+ u64 reserved3;
+ u64 dasm; /* dma address space mask */
+ u64 msia; /* MSI address */
+ u64 reserved4;
+ u64 reserved5;
+} __packed;
+
+/* Set PCI function request */
+struct clp_req_set_pci {
+ struct clp_req_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u16 reserved2;
+ u8 oc; /* operation controls */
+ u8 ndas; /* number of dma spaces */
+ u64 reserved3;
+} __packed;
+
+/* Set PCI function response */
+struct clp_rsp_set_pci {
+ struct clp_rsp_hdr hdr;
+ u32 fmt : 4; /* cmd request block format */
+ u32 : 28;
+ u64 reserved1;
+ u32 fh; /* function handle */
+ u32 reserved3;
+ u64 reserved4;
+} __packed;
+
+/* Combined request/response block structures used by clp insn */
+struct clp_req_rsp_list_pci {
+ struct clp_req_list_pci request;
+ struct clp_rsp_list_pci response;
+} __packed;
+
+struct clp_req_rsp_set_pci {
+ struct clp_req_set_pci request;
+ struct clp_rsp_set_pci response;
+} __packed;
+
+struct clp_req_rsp_query_pci {
+ struct clp_req_query_pci request;
+ struct clp_rsp_query_pci response;
+} __packed;
+
+struct clp_req_rsp_query_pci_grp {
+ struct clp_req_query_pci_grp request;
+ struct clp_rsp_query_pci_grp response;
+} __packed;
+
+#endif
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
new file mode 100644
index 00000000000..6bbec4265b6
--- /dev/null
+++ b/arch/s390/include/asm/pci_debug.h
@@ -0,0 +1,36 @@
+#ifndef _S390_ASM_PCI_DEBUG_H
+#define _S390_ASM_PCI_DEBUG_H
+
+#include <asm/debug.h>
+
+extern debug_info_t *pci_debug_msg_id;
+extern debug_info_t *pci_debug_err_id;
+
+#ifdef CONFIG_PCI_DEBUG
+#define zpci_dbg(fmt, args...) \
+ do { \
+ if (pci_debug_msg_id->level >= 2) \
+ debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
+ } while (0)
+
+#else /* !CONFIG_PCI_DEBUG */
+#define zpci_dbg(fmt, args...) do { } while (0)
+#endif
+
+#define zpci_err(text...) \
+ do { \
+ char debug_buffer[16]; \
+ snprintf(debug_buffer, 16, text); \
+ debug_text_event(pci_debug_err_id, 0, debug_buffer); \
+ } while (0)
+
+static inline void zpci_err_hex(void *addr, int len)
+{
+ while (len > 0) {
+ debug_event(pci_debug_err_id, 0, (void *) addr, len);
+ len -= pci_debug_err_id->buf_size;
+ addr += pci_debug_err_id->buf_size;
+ }
+}
+
+#endif
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
new file mode 100644
index 00000000000..30b4c179c38
--- /dev/null
+++ b/arch/s390/include/asm/pci_dma.h
@@ -0,0 +1,196 @@
+#ifndef _ASM_S390_PCI_DMA_H
+#define _ASM_S390_PCI_DMA_H
+
+/* I/O Translation Anchor (IOTA) */
+enum zpci_ioat_dtype {
+ ZPCI_IOTA_STO = 0,
+ ZPCI_IOTA_RTTO = 1,
+ ZPCI_IOTA_RSTO = 2,
+ ZPCI_IOTA_RFTO = 3,
+ ZPCI_IOTA_PFAA = 4,
+ ZPCI_IOTA_IOPFAA = 5,
+ ZPCI_IOTA_IOPTO = 7
+};
+
+#define ZPCI_IOTA_IOT_ENABLED 0x800UL
+#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
+#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
+#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
+#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
+#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
+#define ZPCI_IOTA_FS_4K 0
+#define ZPCI_IOTA_FS_1M 1
+#define ZPCI_IOTA_FS_2G 2
+#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+
+#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
+#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
+#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
+#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
+#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
+
+/* I/O Region and segment tables */
+#define ZPCI_INDEX_MASK 0x7ffUL
+
+#define ZPCI_TABLE_TYPE_MASK 0xc
+#define ZPCI_TABLE_TYPE_RFX 0xc
+#define ZPCI_TABLE_TYPE_RSX 0x8
+#define ZPCI_TABLE_TYPE_RTX 0x4
+#define ZPCI_TABLE_TYPE_SX 0x0
+
+#define ZPCI_TABLE_LEN_RFX 0x3
+#define ZPCI_TABLE_LEN_RSX 0x3
+#define ZPCI_TABLE_LEN_RTX 0x3
+
+#define ZPCI_TABLE_OFFSET_MASK 0xc0
+#define ZPCI_TABLE_SIZE 0x4000
+#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
+#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
+#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+
+#define ZPCI_TABLE_BITS 11
+#define ZPCI_PT_BITS 8
+#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
+#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
+
+#define ZPCI_RTE_FLAG_MASK 0x3fffUL
+#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
+#define ZPCI_STE_FLAG_MASK 0x7ffUL
+#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
+
+/* I/O Page tables */
+#define ZPCI_PTE_VALID_MASK 0x400
+#define ZPCI_PTE_INVALID 0x400
+#define ZPCI_PTE_VALID 0x000
+#define ZPCI_PT_SIZE 0x800
+#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
+#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
+
+#define ZPCI_PTE_FLAG_MASK 0xfffUL
+#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
+
+/* Shared bits */
+#define ZPCI_TABLE_VALID 0x00
+#define ZPCI_TABLE_INVALID 0x20
+#define ZPCI_TABLE_PROTECTED 0x200
+#define ZPCI_TABLE_UNPROTECTED 0x000
+
+#define ZPCI_TABLE_VALID_MASK 0x20
+#define ZPCI_TABLE_PROT_MASK 0x200
+
+static inline unsigned int calc_rtx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_sx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_px(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
+}
+
+static inline void set_pt_pfaa(unsigned long *entry, void *pfaa)
+{
+ *entry &= ZPCI_PTE_FLAG_MASK;
+ *entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK);
+}
+
+static inline void set_rt_sto(unsigned long *entry, void *sto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RTX;
+}
+
+static inline void set_st_pto(unsigned long *entry, void *pto)
+{
+ *entry &= ZPCI_STE_FLAG_MASK;
+ *entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_SX;
+}
+
+static inline void validate_rt_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RTX;
+}
+
+static inline void validate_st_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+}
+
+static inline void invalidate_table_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry |= ZPCI_TABLE_INVALID;
+}
+
+static inline void invalidate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_INVALID;
+}
+
+static inline void validate_pt_entry(unsigned long *entry)
+{
+ WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
+ *entry &= ~ZPCI_PTE_VALID_MASK;
+ *entry |= ZPCI_PTE_VALID;
+}
+
+static inline void entry_set_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_PROTECTED;
+}
+
+static inline void entry_clr_protected(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_PROT_MASK;
+ *entry |= ZPCI_TABLE_UNPROTECTED;
+}
+
+static inline int reg_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
+}
+
+static inline int pt_entry_isvalid(unsigned long entry)
+{
+ return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
+}
+
+static inline int entry_isprotected(unsigned long entry)
+{
+ return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
+}
+
+static inline unsigned long *get_rt_sto(unsigned long entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
+ ? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK)
+ : NULL;
+}
+
+static inline unsigned long *get_st_pto(unsigned long entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
+ ? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK)
+ : NULL;
+}
+
+/* Prototypes */
+int zpci_dma_init_device(struct zpci_dev *);
+void zpci_dma_exit_device(struct zpci_dev *);
+
+#endif
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
new file mode 100644
index 00000000000..1486a98d5da
--- /dev/null
+++ b/arch/s390/include/asm/pci_insn.h
@@ -0,0 +1,280 @@
+#ifndef _ASM_S390_PCI_INSN_H
+#define _ASM_S390_PCI_INSN_H
+
+#include <linux/delay.h>
+
+#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
+
+/* Load/Store status codes */
+#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
+#define ZPCI_PCI_ST_FUNC_IN_ERR 8
+#define ZPCI_PCI_ST_BLOCKED 12
+#define ZPCI_PCI_ST_INSUF_RES 16
+#define ZPCI_PCI_ST_INVAL_AS 20
+#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
+#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
+#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
+#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
+#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
+
+/* Load/Store return codes */
+#define ZPCI_PCI_LS_OK 0
+#define ZPCI_PCI_LS_ERR 1
+#define ZPCI_PCI_LS_BUSY 2
+#define ZPCI_PCI_LS_INVAL_HANDLE 3
+
+/* Load/Store address space identifiers */
+#define ZPCI_PCIAS_MEMIO_0 0
+#define ZPCI_PCIAS_MEMIO_1 1
+#define ZPCI_PCIAS_MEMIO_2 2
+#define ZPCI_PCIAS_MEMIO_3 3
+#define ZPCI_PCIAS_MEMIO_4 4
+#define ZPCI_PCIAS_MEMIO_5 5
+#define ZPCI_PCIAS_CFGSPC 15
+
+/* Modify PCI Function Controls */
+#define ZPCI_MOD_FC_REG_INT 2
+#define ZPCI_MOD_FC_DEREG_INT 3
+#define ZPCI_MOD_FC_REG_IOAT 4
+#define ZPCI_MOD_FC_DEREG_IOAT 5
+#define ZPCI_MOD_FC_REREG_IOAT 6
+#define ZPCI_MOD_FC_RESET_ERROR 7
+#define ZPCI_MOD_FC_RESET_BLOCK 9
+#define ZPCI_MOD_FC_SET_MEASURE 10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* Function Information Block */
+struct zpci_fib {
+ u32 fmt : 8; /* format */
+ u32 : 24;
+ u32 reserved1;
+ u8 fc; /* function controls */
+ u8 reserved2;
+ u16 reserved3;
+ u32 reserved4;
+ u64 pba; /* PCI base address */
+ u64 pal; /* PCI address limit */
+ u64 iota; /* I/O Translation Anchor */
+ u32 : 1;
+ u32 isc : 3; /* Interrupt subclass */
+ u32 noi : 12; /* Number of interrupts */
+ u32 : 2;
+ u32 aibvo : 6; /* Adapter interrupt bit vector offset */
+ u32 sum : 1; /* Adapter int summary bit enabled */
+ u32 : 1;
+ u32 aisbo : 6; /* Adapter int summary bit offset */
+ u32 reserved5;
+ u64 aibv; /* Adapter int bit vector address */
+ u64 aisb; /* Adapter int summary bit address */
+ u64 fmb_addr; /* Function measurement block address and key */
+ u64 reserved6;
+ u64 reserved7;
+} __packed;
+
+/* Modify PCI Function Controls */
+static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
+ : : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
+{
+ u8 cc, status;
+
+ do {
+ cc = __mpcifc(req, fib, &status);
+ if (cc == 2)
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
+ __func__, cc, status);
+ return (cc) ? -EIO : 0;
+}
+
+/* Refresh PCI Translations */
+static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
+{
+ register u64 __addr asm("2") = addr;
+ register u64 __range asm("3") = range;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn)
+ : [addr] "d" (__addr), "d" (__range)
+ : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
+{
+ u8 cc, status;
+
+ do {
+ cc = __rpcit(fn, addr, range, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
+ __func__, cc, status, addr, range);
+ return (cc) ? -EIO : 0;
+}
+
+/* Store PCI function controls */
+static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
+{
+ u64 fn = (u64) handle << 32 | space << 16;
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
+ : : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+/* Set Interruption Controls */
+static inline void sic_instr(u16 ctl, char *unused, u8 isc)
+{
+ asm volatile (
+ " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+}
+
+/* PCI Load */
+static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ u64 __data;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d20000,%[data],%[req]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
+ : "d" (__offset)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ *data = __data;
+ return cc;
+}
+
+static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
+{
+ u8 cc, status;
+
+ do {
+ cc = __pcilg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc) {
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ /* TODO: on IO errors set data to 0xff...
+ * here or in users of pcilg (le conversion)?
+ */
+ }
+ return (cc) ? -EIO : 0;
+}
+
+/* PCI Store */
+static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d00000,%[data],%[req]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (__req)
+ : "d" (__offset), [data] "d" (data)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ return cc;
+}
+
+static inline int pcistg_instr(u64 data, u64 req, u64 offset)
+{
+ u8 cc, status;
+
+ do {
+ cc = __pcistg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc) ? -EIO : 0;
+}
+
+/* PCI Store Block */
+static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req)
+ : [offset] "d" (offset), [data] "Q" (*data)
+ : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
+{
+ u8 cc, status;
+
+ do {
+ cc = __pcistb(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc) ? -EIO : 0;
+}
+
+#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
new file mode 100644
index 00000000000..5fd81f31d6c
--- /dev/null
+++ b/arch/s390/include/asm/pci_io.h
@@ -0,0 +1,194 @@
+#ifndef _ASM_S390_PCI_IO_H
+#define _ASM_S390_PCI_IO_H
+
+#ifdef CONFIG_PCI
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/pci_insn.h>
+
+/* I/O Map */
+#define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
+#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
+#define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
+#define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
+
+struct zpci_iomap_entry {
+ u32 fh;
+ u8 bar;
+};
+
+extern struct zpci_iomap_entry *zpci_iomap_start;
+
+#define ZPCI_IDX(addr) \
+ (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
+#define ZPCI_OFFSET(addr) \
+ ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
+
+#define ZPCI_CREATE_REQ(handle, space, len) \
+ ((u64) handle << 32 | space << 16 | len)
+
+#define zpci_read(LENGTH, RETTYPE) \
+static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
+{ \
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
+ u64 data; \
+ int rc; \
+ \
+ rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
+ if (rc) \
+ data = -1ULL; \
+ return (RETTYPE) data; \
+}
+
+#define zpci_write(LENGTH, VALTYPE) \
+static inline void zpci_write_##VALTYPE(VALTYPE val, \
+ const volatile void __iomem *addr) \
+{ \
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
+ u64 data = (VALTYPE) val; \
+ \
+ pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
+}
+
+zpci_read(8, u64)
+zpci_read(4, u32)
+zpci_read(2, u16)
+zpci_read(1, u8)
+zpci_write(8, u64)
+zpci_write(4, u32)
+zpci_write(2, u16)
+zpci_write(1, u8)
+
+static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
+{
+ u64 val;
+
+ switch (len) {
+ case 1:
+ val = (u64) *((u8 *) data);
+ break;
+ case 2:
+ val = (u64) *((u16 *) data);
+ break;
+ case 4:
+ val = (u64) *((u32 *) data);
+ break;
+ case 8:
+ val = (u64) *((u64 *) data);
+ break;
+ default:
+ val = 0; /* let FW report error */
+ break;
+ }
+ return pcistg_instr(val, req, offset);
+}
+
+static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
+{
+ u64 data;
+ u8 cc;
+
+ cc = pcilg_instr(&data, req, offset);
+ switch (len) {
+ case 1:
+ *((u8 *) dst) = (u8) data;
+ break;
+ case 2:
+ *((u16 *) dst) = (u16) data;
+ break;
+ case 4:
+ *((u32 *) dst) = (u32) data;
+ break;
+ case 8:
+ *((u64 *) dst) = (u64) data;
+ break;
+ }
+ return cc;
+}
+
+static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
+{
+ return pcistb_instr(data, req, offset);
+}
+
+static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
+{
+ int count = len > max ? max : len, size = 1;
+
+ while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
+ dst = dst >> 1;
+ src = src >> 1;
+ size = size << 1;
+ }
+ return size;
+}
+
+static inline int zpci_memcpy_fromio(void *dst,
+ const volatile void __iomem *src,
+ unsigned long n)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
+ u64 req, offset = ZPCI_OFFSET(src);
+ int size, rc = 0;
+
+ while (n > 0) {
+ size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
+ req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
+ rc = zpci_read_single(req, dst, offset, size);
+ if (rc)
+ break;
+ offset += size;
+ dst += size;
+ n -= size;
+ }
+ return rc;
+}
+
+static inline int zpci_memcpy_toio(volatile void __iomem *dst,
+ const void *src, unsigned long n)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
+ u64 req, offset = ZPCI_OFFSET(dst);
+ int size, rc = 0;
+
+ if (!src)
+ return -EINVAL;
+
+ while (n > 0) {
+ size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
+ req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
+
+ if (size > 8) /* main path */
+ rc = zpci_write_block(req, src, offset);
+ else
+ rc = zpci_write_single(req, src, offset, size);
+ if (rc)
+ break;
+ offset += size;
+ src += size;
+ n -= size;
+ }
+ return rc;
+}
+
+static inline int zpci_memset_io(volatile void __iomem *dst,
+ unsigned char val, size_t count)
+{
+ u8 *src = kmalloc(count, GFP_KERNEL);
+ int rc;
+
+ if (src == NULL)
+ return -ENOMEM;
+ memset(src, val, count);
+
+ rc = zpci_memcpy_toio(dst, src, count);
+ kfree(src);
+ return rc;
+}
+
+#endif /* CONFIG_PCI */
+
+#endif /* _ASM_S390_PCI_IO_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 2d3b7cb2600..c928dc1938f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -35,7 +35,6 @@
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
extern void vmem_map_init(void);
-extern void fault_init(void);
/*
* The S390 doesn't have any external MMU info: the kernel page
@@ -55,16 +54,7 @@ extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \
(virt_to_page((void *)(empty_zero_page + \
(((unsigned long)(vaddr)) &zero_page_mask))))
-
-#define is_zero_pfn is_zero_pfn
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
-}
-
-#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+#define __HAVE_COLOR_ZERO_PAGE
#endif /* !__ASSEMBLY__ */
@@ -345,6 +335,8 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
+#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
+
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
@@ -444,6 +436,7 @@ static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_large(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
#else /* CONFIG_64BIT */
@@ -489,6 +482,13 @@ static inline int pud_none(pud_t pud)
return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
}
+static inline int pud_large(pud_t pud)
+{
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
+ return 0;
+ return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
+}
+
static inline int pud_bad(pud_t pud)
{
/*
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index e62a555557e..833788693f0 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -55,5 +55,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
bool sclp_has_linemode(void);
bool sclp_has_vt220(void);
+int sclp_pci_configure(u32 fid);
+int sclp_pci_deconfigure(u32 fid);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 9935cbd6a46..05425b18c0a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -8,32 +8,34 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
-extern unsigned char cpu_socket_id[NR_CPUS];
-#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
+struct cpu_topology_s390 {
+ unsigned short core_id;
+ unsigned short socket_id;
+ unsigned short book_id;
+ cpumask_t core_mask;
+ cpumask_t book_mask;
+};
+
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
-extern unsigned char cpu_core_id[NR_CPUS];
-extern cpumask_t cpu_core_map[NR_CPUS];
+#define mc_capable() 1
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_core_map[cpu];
+ return &cpu_topology[cpu].core_mask;
}
-#define topology_core_id(cpu) (cpu_core_id[cpu])
-#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
-#define mc_capable() (1)
-
-extern unsigned char cpu_book_id[NR_CPUS];
-extern cpumask_t cpu_book_map[NR_CPUS];
-
static inline const struct cpumask *cpu_book_mask(int cpu)
{
- return &cpu_book_map[cpu];
+ return &cpu_topology[cpu].book_mask;
}
-#define topology_book_id(cpu) (cpu_book_id[cpu])
-#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
-
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 086bb8eaf6a..63653087251 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -53,7 +53,6 @@
# define __ARCH_WANT_COMPAT_SYS_TIME
# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
# endif
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h
new file mode 100644
index 00000000000..d375526c261
--- /dev/null
+++ b/arch/s390/include/asm/vga.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_S390_VGA_H
+#define _ASM_S390_VGA_H
+
+/* Avoid compile errors due to missing asm/vga.h */
+
+#endif /* _ASM_S390_VGA_H */
diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h
index 8c6a49e392e..2f43cfbf5f1 100644
--- a/arch/s390/include/uapi/asm/signal.h
+++ b/arch/s390/include/uapi/asm/signal.h
@@ -90,12 +90,6 @@ typedef unsigned long sigset_t;
#define SA_RESTORER 0x04000000
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4da52fe3174..2ac311ef5c9 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
- sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o
+ sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f00286bd2ef..a7f9abd98cf 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -83,22 +83,29 @@ enum {
U4_12, /* 4 bit unsigned value starting at 12 */
U4_16, /* 4 bit unsigned value starting at 16 */
U4_20, /* 4 bit unsigned value starting at 20 */
+ U4_24, /* 4 bit unsigned value starting at 24 */
+ U4_28, /* 4 bit unsigned value starting at 28 */
U4_32, /* 4 bit unsigned value starting at 32 */
+ U4_36, /* 4 bit unsigned value starting at 36 */
U8_8, /* 8 bit unsigned value starting at 8 */
U8_16, /* 8 bit unsigned value starting at 16 */
U8_24, /* 8 bit unsigned value starting at 24 */
U8_32, /* 8 bit unsigned value starting at 32 */
I8_8, /* 8 bit signed value starting at 8 */
I8_32, /* 8 bit signed value starting at 32 */
+ J12_12, /* PC relative offset at 12 */
I16_16, /* 16 bit signed value starting at 16 */
I16_32, /* 32 bit signed value starting at 16 */
U16_16, /* 16 bit unsigned value starting at 16 */
U16_32, /* 32 bit unsigned value starting at 16 */
J16_16, /* PC relative jump offset at 16 */
+ J16_32, /* PC relative offset at 16 */
+ I24_24, /* 24 bit signed value starting at 24 */
J32_16, /* PC relative long offset at 16 */
I32_16, /* 32 bit signed value starting at 16 */
U32_16, /* 32 bit unsigned value starting at 16 */
M_16, /* 4 bit optional mask starting at 16 */
+ M_20, /* 4 bit optional mask starting at 20 */
RO_28, /* optional GPR starting at position 28 */
};
@@ -109,6 +116,8 @@ enum {
enum {
INSTR_INVALID,
INSTR_E,
+ INSTR_IE_UU,
+ INSTR_MII_UPI,
INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
@@ -118,13 +127,15 @@ enum {
INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
INSTR_RRE_RR, INSTR_RRE_RR_OPT,
INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
- INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR,
- INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
- INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
+ INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_FUFF2, INSTR_RRF_M0RR,
+ INSTR_RRF_R0RR, INSTR_RRF_R0RR2, INSTR_RRF_RMRR, INSTR_RRF_RURR,
+ INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, INSTR_RRF_UUFF,
+ INSTR_RRF_UUFR, INSTR_RRF_UURF,
+ INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
INSTR_RSI_RRP,
- INSTR_RSL_R0RD,
+ INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
INSTR_RSY_RDRM,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
@@ -136,6 +147,7 @@ enum {
INSTR_SIL_RDI, INSTR_SIL_RDU,
INSTR_SIY_IRD, INSTR_SIY_URD,
INSTR_SI_URD,
+ INSTR_SMI_U0RDP,
INSTR_SSE_RDRD,
INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
@@ -191,31 +203,42 @@ static const struct operand operands[] =
[U4_12] = { 4, 12, 0 },
[U4_16] = { 4, 16, 0 },
[U4_20] = { 4, 20, 0 },
+ [U4_24] = { 4, 24, 0 },
+ [U4_28] = { 4, 28, 0 },
[U4_32] = { 4, 32, 0 },
+ [U4_36] = { 4, 36, 0 },
[U8_8] = { 8, 8, 0 },
[U8_16] = { 8, 16, 0 },
[U8_24] = { 8, 24, 0 },
[U8_32] = { 8, 32, 0 },
+ [J12_12] = { 12, 12, OPERAND_PCREL },
[I16_16] = { 16, 16, OPERAND_SIGNED },
[U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 },
[J16_16] = { 16, 16, OPERAND_PCREL },
+ [J16_32] = { 16, 32, OPERAND_PCREL },
[I16_32] = { 16, 32, OPERAND_SIGNED },
+ [I24_24] = { 24, 24, OPERAND_SIGNED },
[J32_16] = { 32, 16, OPERAND_PCREL },
[I32_16] = { 32, 16, OPERAND_SIGNED },
[U32_16] = { 32, 16, 0 },
[M_16] = { 4, 16, 0 },
+ [M_20] = { 4, 20, 0 },
[RO_28] = { 4, 28, OPERAND_GPR }
};
static const unsigned char formats[][7] = {
[INSTR_E] = { 0xff, 0,0,0,0,0,0 },
+ [INSTR_IE_UU] = { 0xff, U4_24,U4_28,0,0,0,0 },
+ [INSTR_MII_UPI] = { 0xff, U4_8,J12_12,I24_24 },
+ [INSTR_RIE_R0IU] = { 0xff, R_8,I16_16,U4_32,0,0,0 },
[INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
+ [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
[INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
[INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
[INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
[INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
- [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
+ [INSTR_RIE_RUPU] = { 0xff, R_8,U8_32,U4_12,J16_16,0,0 },
[INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
[INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
[INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
@@ -245,14 +268,18 @@ static const unsigned char formats[][7] = {
[INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
[INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
[INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
+ [INSTR_RRF_FUFF2] = { 0xff, F_24,F_28,F_16,U4_20,0,0 },
[INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
[INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
[INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
+ [INSTR_RRF_RMRR] = { 0xff, R_24,R_16,R_28,M_20,0,0 },
[INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
[INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
[INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
[INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
[INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
+ [INSTR_RRF_UUFR] = { 0xff, F_24,U4_16,R_28,U4_20,0,0 },
+ [INSTR_RRF_UURF] = { 0xff, R_24,U4_16,F_28,U4_20,0,0 },
[INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
[INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
[INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
@@ -264,12 +291,13 @@ static const unsigned char formats[][7] = {
[INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
[INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
[INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
+ [INSTR_RSL_LRDFU] = { 0xff, F_32,D_20,L4_8,B_16,U4_36,0 },
[INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
+ [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
[INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
[INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
@@ -289,9 +317,10 @@ static const unsigned char formats[][7] = {
[INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
[INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
[INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
+ [INSTR_SMI_U0RDP] = { 0xff, U4_8,J16_32,D_20,B_16,0,0 },
[INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
- [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
- [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 },
+ [INSTR_SSF_RRDRD] = { 0x0f, D_20,B_16,D_36,B_32,R_8,0 },
+ [INSTR_SSF_RRDRD2]= { 0x0f, R_8,D_20,B_16,D_36,B_32,0 },
[INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
[INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
[INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
@@ -304,46 +333,157 @@ static const unsigned char formats[][7] = {
enum {
LONG_INSN_ALGHSIK,
+ LONG_INSN_ALHHHR,
+ LONG_INSN_ALHHLR,
LONG_INSN_ALHSIK,
+ LONG_INSN_ALSIHN,
+ LONG_INSN_CDFBRA,
+ LONG_INSN_CDGBRA,
+ LONG_INSN_CDGTRA,
+ LONG_INSN_CDLFBR,
+ LONG_INSN_CDLFTR,
+ LONG_INSN_CDLGBR,
+ LONG_INSN_CDLGTR,
+ LONG_INSN_CEFBRA,
+ LONG_INSN_CEGBRA,
+ LONG_INSN_CELFBR,
+ LONG_INSN_CELGBR,
+ LONG_INSN_CFDBRA,
+ LONG_INSN_CFEBRA,
+ LONG_INSN_CFXBRA,
+ LONG_INSN_CGDBRA,
+ LONG_INSN_CGDTRA,
+ LONG_INSN_CGEBRA,
+ LONG_INSN_CGXBRA,
+ LONG_INSN_CGXTRA,
+ LONG_INSN_CLFDBR,
+ LONG_INSN_CLFDTR,
+ LONG_INSN_CLFEBR,
LONG_INSN_CLFHSI,
+ LONG_INSN_CLFXBR,
+ LONG_INSN_CLFXTR,
+ LONG_INSN_CLGDBR,
+ LONG_INSN_CLGDTR,
+ LONG_INSN_CLGEBR,
LONG_INSN_CLGFRL,
LONG_INSN_CLGHRL,
LONG_INSN_CLGHSI,
+ LONG_INSN_CLGXBR,
+ LONG_INSN_CLGXTR,
LONG_INSN_CLHHSI,
+ LONG_INSN_CXFBRA,
+ LONG_INSN_CXGBRA,
+ LONG_INSN_CXGTRA,
+ LONG_INSN_CXLFBR,
+ LONG_INSN_CXLFTR,
+ LONG_INSN_CXLGBR,
+ LONG_INSN_CXLGTR,
+ LONG_INSN_FIDBRA,
+ LONG_INSN_FIEBRA,
+ LONG_INSN_FIXBRA,
+ LONG_INSN_LDXBRA,
+ LONG_INSN_LEDBRA,
+ LONG_INSN_LEXBRA,
+ LONG_INSN_LLGFAT,
LONG_INSN_LLGFRL,
LONG_INSN_LLGHRL,
+ LONG_INSN_LLGTAT,
LONG_INSN_POPCNT,
+ LONG_INSN_RIEMIT,
+ LONG_INSN_RINEXT,
+ LONG_INSN_RISBGN,
LONG_INSN_RISBHG,
LONG_INSN_RISBLG,
- LONG_INSN_RINEXT,
- LONG_INSN_RIEMIT,
+ LONG_INSN_SLHHHR,
+ LONG_INSN_SLHHLR,
LONG_INSN_TABORT,
LONG_INSN_TBEGIN,
LONG_INSN_TBEGINC,
+ LONG_INSN_PCISTG,
+ LONG_INSN_MPCIFC,
+ LONG_INSN_STPCIFC,
+ LONG_INSN_PCISTB,
};
static char *long_insn_name[] = {
[LONG_INSN_ALGHSIK] = "alghsik",
+ [LONG_INSN_ALHHHR] = "alhhhr",
+ [LONG_INSN_ALHHLR] = "alhhlr",
[LONG_INSN_ALHSIK] = "alhsik",
+ [LONG_INSN_ALSIHN] = "alsihn",
+ [LONG_INSN_CDFBRA] = "cdfbra",
+ [LONG_INSN_CDGBRA] = "cdgbra",
+ [LONG_INSN_CDGTRA] = "cdgtra",
+ [LONG_INSN_CDLFBR] = "cdlfbr",
+ [LONG_INSN_CDLFTR] = "cdlftr",
+ [LONG_INSN_CDLGBR] = "cdlgbr",
+ [LONG_INSN_CDLGTR] = "cdlgtr",
+ [LONG_INSN_CEFBRA] = "cefbra",
+ [LONG_INSN_CEGBRA] = "cegbra",
+ [LONG_INSN_CELFBR] = "celfbr",
+ [LONG_INSN_CELGBR] = "celgbr",
+ [LONG_INSN_CFDBRA] = "cfdbra",
+ [LONG_INSN_CFEBRA] = "cfebra",
+ [LONG_INSN_CFXBRA] = "cfxbra",
+ [LONG_INSN_CGDBRA] = "cgdbra",
+ [LONG_INSN_CGDTRA] = "cgdtra",
+ [LONG_INSN_CGEBRA] = "cgebra",
+ [LONG_INSN_CGXBRA] = "cgxbra",
+ [LONG_INSN_CGXTRA] = "cgxtra",
+ [LONG_INSN_CLFDBR] = "clfdbr",
+ [LONG_INSN_CLFDTR] = "clfdtr",
+ [LONG_INSN_CLFEBR] = "clfebr",
[LONG_INSN_CLFHSI] = "clfhsi",
+ [LONG_INSN_CLFXBR] = "clfxbr",
+ [LONG_INSN_CLFXTR] = "clfxtr",
+ [LONG_INSN_CLGDBR] = "clgdbr",
+ [LONG_INSN_CLGDTR] = "clgdtr",
+ [LONG_INSN_CLGEBR] = "clgebr",
[LONG_INSN_CLGFRL] = "clgfrl",
[LONG_INSN_CLGHRL] = "clghrl",
[LONG_INSN_CLGHSI] = "clghsi",
+ [LONG_INSN_CLGXBR] = "clgxbr",
+ [LONG_INSN_CLGXTR] = "clgxtr",
[LONG_INSN_CLHHSI] = "clhhsi",
+ [LONG_INSN_CXFBRA] = "cxfbra",
+ [LONG_INSN_CXGBRA] = "cxgbra",
+ [LONG_INSN_CXGTRA] = "cxgtra",
+ [LONG_INSN_CXLFBR] = "cxlfbr",
+ [LONG_INSN_CXLFTR] = "cxlftr",
+ [LONG_INSN_CXLGBR] = "cxlgbr",
+ [LONG_INSN_CXLGTR] = "cxlgtr",
+ [LONG_INSN_FIDBRA] = "fidbra",
+ [LONG_INSN_FIEBRA] = "fiebra",
+ [LONG_INSN_FIXBRA] = "fixbra",
+ [LONG_INSN_LDXBRA] = "ldxbra",
+ [LONG_INSN_LEDBRA] = "ledbra",
+ [LONG_INSN_LEXBRA] = "lexbra",
+ [LONG_INSN_LLGFAT] = "llgfat",
[LONG_INSN_LLGFRL] = "llgfrl",
[LONG_INSN_LLGHRL] = "llghrl",
+ [LONG_INSN_LLGTAT] = "llgtat",
[LONG_INSN_POPCNT] = "popcnt",
+ [LONG_INSN_RIEMIT] = "riemit",
+ [LONG_INSN_RINEXT] = "rinext",
+ [LONG_INSN_RISBGN] = "risbgn",
[LONG_INSN_RISBHG] = "risbhg",
[LONG_INSN_RISBLG] = "risblg",
- [LONG_INSN_RINEXT] = "rinext",
- [LONG_INSN_RIEMIT] = "riemit",
+ [LONG_INSN_SLHHHR] = "slhhhr",
+ [LONG_INSN_SLHHLR] = "slhhlr",
[LONG_INSN_TABORT] = "tabort",
[LONG_INSN_TBEGIN] = "tbegin",
[LONG_INSN_TBEGINC] = "tbeginc",
+ [LONG_INSN_PCISTG] = "pcistg",
+ [LONG_INSN_MPCIFC] = "mpcifc",
+ [LONG_INSN_STPCIFC] = "stpcifc",
+ [LONG_INSN_PCISTB] = "pcistb",
};
static struct insn opcode[] = {
#ifdef CONFIG_64BIT
+ { "bprp", 0xc5, INSTR_MII_UPI },
+ { "bpp", 0xc7, INSTR_SMI_U0RDP },
+ { "trtr", 0xd0, INSTR_SS_L0RDRD },
{ "lmd", 0xef, INSTR_SS_RRRDRD3 },
#endif
{ "spm", 0x04, INSTR_RR_R0 },
@@ -378,7 +518,6 @@ static struct insn opcode[] = {
{ "lcdr", 0x23, INSTR_RR_FF },
{ "hdr", 0x24, INSTR_RR_FF },
{ "ldxr", 0x25, INSTR_RR_FF },
- { "lrdr", 0x25, INSTR_RR_FF },
{ "mxr", 0x26, INSTR_RR_FF },
{ "mxdr", 0x27, INSTR_RR_FF },
{ "ldr", 0x28, INSTR_RR_FF },
@@ -395,7 +534,6 @@ static struct insn opcode[] = {
{ "lcer", 0x33, INSTR_RR_FF },
{ "her", 0x34, INSTR_RR_FF },
{ "ledr", 0x35, INSTR_RR_FF },
- { "lrer", 0x35, INSTR_RR_FF },
{ "axr", 0x36, INSTR_RR_FF },
{ "sxr", 0x37, INSTR_RR_FF },
{ "ler", 0x38, INSTR_RR_FF },
@@ -403,7 +541,6 @@ static struct insn opcode[] = {
{ "aer", 0x3a, INSTR_RR_FF },
{ "ser", 0x3b, INSTR_RR_FF },
{ "mder", 0x3c, INSTR_RR_FF },
- { "mer", 0x3c, INSTR_RR_FF },
{ "der", 0x3d, INSTR_RR_FF },
{ "aur", 0x3e, INSTR_RR_FF },
{ "sur", 0x3f, INSTR_RR_FF },
@@ -454,7 +591,6 @@ static struct insn opcode[] = {
{ "ae", 0x7a, INSTR_RX_FRRD },
{ "se", 0x7b, INSTR_RX_FRRD },
{ "mde", 0x7c, INSTR_RX_FRRD },
- { "me", 0x7c, INSTR_RX_FRRD },
{ "de", 0x7d, INSTR_RX_FRRD },
{ "au", 0x7e, INSTR_RX_FRRD },
{ "su", 0x7f, INSTR_RX_FRRD },
@@ -534,9 +670,9 @@ static struct insn opcode[] = {
static struct insn opcode_01[] = {
#ifdef CONFIG_64BIT
- { "sam64", 0x0e, INSTR_E },
- { "pfpo", 0x0a, INSTR_E },
{ "ptff", 0x04, INSTR_E },
+ { "pfpo", 0x0a, INSTR_E },
+ { "sam64", 0x0e, INSTR_E },
#endif
{ "pr", 0x01, INSTR_E },
{ "upt", 0x02, INSTR_E },
@@ -605,19 +741,28 @@ static struct insn opcode_aa[] = {
static struct insn opcode_b2[] = {
#ifdef CONFIG_64BIT
- { "sske", 0x2b, INSTR_RRF_M0RR },
{ "stckf", 0x7c, INSTR_S_RD },
- { "cu21", 0xa6, INSTR_RRF_M0RR },
- { "cuutf", 0xa6, INSTR_RRF_M0RR },
- { "cu12", 0xa7, INSTR_RRF_M0RR },
- { "cutfu", 0xa7, INSTR_RRF_M0RR },
+ { "lpp", 0x80, INSTR_S_RD },
+ { "lcctl", 0x84, INSTR_S_RD },
+ { "lpctl", 0x85, INSTR_S_RD },
+ { "qsi", 0x86, INSTR_S_RD },
+ { "lsctl", 0x87, INSTR_S_RD },
+ { "qctri", 0x8e, INSTR_S_RD },
{ "stfle", 0xb0, INSTR_S_RD },
{ "lpswe", 0xb2, INSTR_S_RD },
+ { "srnmb", 0xb8, INSTR_S_RD },
{ "srnmt", 0xb9, INSTR_S_RD },
{ "lfas", 0xbd, INSTR_S_RD },
- { "etndg", 0xec, INSTR_RRE_R0 },
+ { "scctr", 0xe0, INSTR_RRE_RR },
+ { "spctr", 0xe1, INSTR_RRE_RR },
+ { "ecctr", 0xe4, INSTR_RRE_RR },
+ { "epctr", 0xe5, INSTR_RRE_RR },
+ { "ppa", 0xe8, INSTR_RRF_U0RR },
+ { "etnd", 0xec, INSTR_RRE_R0 },
+ { "ecpga", 0xed, INSTR_RRE_RR },
+ { "tend", 0xf8, INSTR_S_00 },
+ { "niai", 0xfa, INSTR_IE_UU },
{ { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
- { "tend", 0xf8, INSTR_S_RD },
#endif
{ "stidp", 0x02, INSTR_S_RD },
{ "sck", 0x04, INSTR_S_RD },
@@ -635,8 +780,8 @@ static struct insn opcode_b2[] = {
{ "sie", 0x14, INSTR_S_RD },
{ "pc", 0x18, INSTR_S_RD },
{ "sac", 0x19, INSTR_S_RD },
- { "servc", 0x20, INSTR_RRE_RR },
{ "cfc", 0x1a, INSTR_S_RD },
+ { "servc", 0x20, INSTR_RRE_RR },
{ "ipte", 0x21, INSTR_RRE_RR },
{ "ipm", 0x22, INSTR_RRE_R0 },
{ "ivsk", 0x23, INSTR_RRE_RR },
@@ -647,9 +792,9 @@ static struct insn opcode_b2[] = {
{ "pt", 0x28, INSTR_RRE_RR },
{ "iske", 0x29, INSTR_RRE_RR },
{ "rrbe", 0x2a, INSTR_RRE_RR },
- { "sske", 0x2b, INSTR_RRE_RR },
+ { "sske", 0x2b, INSTR_RRF_M0RR },
{ "tb", 0x2c, INSTR_RRE_0R },
- { "dxr", 0x2d, INSTR_RRE_F0 },
+ { "dxr", 0x2d, INSTR_RRE_FF },
{ "pgin", 0x2e, INSTR_RRE_RR },
{ "pgout", 0x2f, INSTR_RRE_RR },
{ "csch", 0x30, INSTR_S_00 },
@@ -667,8 +812,8 @@ static struct insn opcode_b2[] = {
{ "schm", 0x3c, INSTR_S_00 },
{ "bakr", 0x40, INSTR_RRE_RR },
{ "cksm", 0x41, INSTR_RRE_RR },
- { "sqdr", 0x44, INSTR_RRE_F0 },
- { "sqer", 0x45, INSTR_RRE_F0 },
+ { "sqdr", 0x44, INSTR_RRE_FF },
+ { "sqer", 0x45, INSTR_RRE_FF },
{ "stura", 0x46, INSTR_RRE_RR },
{ "msta", 0x47, INSTR_RRE_R0 },
{ "palb", 0x48, INSTR_RRE_00 },
@@ -694,14 +839,14 @@ static struct insn opcode_b2[] = {
{ "rp", 0x77, INSTR_S_RD },
{ "stcke", 0x78, INSTR_S_RD },
{ "sacf", 0x79, INSTR_S_RD },
- { "spp", 0x80, INSTR_S_RD },
{ "stsi", 0x7d, INSTR_S_RD },
+ { "spp", 0x80, INSTR_S_RD },
{ "srnm", 0x99, INSTR_S_RD },
{ "stfpc", 0x9c, INSTR_S_RD },
{ "lfpc", 0x9d, INSTR_S_RD },
{ "tre", 0xa5, INSTR_RRE_RR },
- { "cuutf", 0xa6, INSTR_RRE_RR },
- { "cutfu", 0xa7, INSTR_RRE_RR },
+ { "cuutf", 0xa6, INSTR_RRF_M0RR },
+ { "cutfu", 0xa7, INSTR_RRF_M0RR },
{ "stfl", 0xb1, INSTR_S_RD },
{ "trap4", 0xff, INSTR_S_RD },
{ "", 0, INSTR_INVALID }
@@ -715,72 +860,87 @@ static struct insn opcode_b3[] = {
{ "myr", 0x3b, INSTR_RRF_F0FF },
{ "mayhr", 0x3c, INSTR_RRF_F0FF },
{ "myhr", 0x3d, INSTR_RRF_F0FF },
- { "cegbr", 0xa4, INSTR_RRE_RR },
- { "cdgbr", 0xa5, INSTR_RRE_RR },
- { "cxgbr", 0xa6, INSTR_RRE_RR },
- { "cgebr", 0xa8, INSTR_RRF_U0RF },
- { "cgdbr", 0xa9, INSTR_RRF_U0RF },
- { "cgxbr", 0xaa, INSTR_RRF_U0RF },
- { "cfer", 0xb8, INSTR_RRF_U0RF },
- { "cfdr", 0xb9, INSTR_RRF_U0RF },
- { "cfxr", 0xba, INSTR_RRF_U0RF },
- { "cegr", 0xc4, INSTR_RRE_RR },
- { "cdgr", 0xc5, INSTR_RRE_RR },
- { "cxgr", 0xc6, INSTR_RRE_RR },
- { "cger", 0xc8, INSTR_RRF_U0RF },
- { "cgdr", 0xc9, INSTR_RRF_U0RF },
- { "cgxr", 0xca, INSTR_RRF_U0RF },
{ "lpdfr", 0x70, INSTR_RRE_FF },
{ "lndfr", 0x71, INSTR_RRE_FF },
{ "cpsdr", 0x72, INSTR_RRF_F0FF2 },
{ "lcdfr", 0x73, INSTR_RRE_FF },
+ { "sfasr", 0x85, INSTR_RRE_R0 },
+ { { 0, LONG_INSN_CELFBR }, 0x90, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLFBR }, 0x91, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXLFBR }, 0x92, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CEFBRA }, 0x94, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDFBRA }, 0x95, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXFBRA }, 0x96, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CFEBRA }, 0x98, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CFDBRA }, 0x99, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CFXBRA }, 0x9a, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CLFEBR }, 0x9c, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLFDBR }, 0x9d, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLFXBR }, 0x9e, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CELGBR }, 0xa0, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLGBR }, 0xa1, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXLGBR }, 0xa2, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CEGBRA }, 0xa4, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDGBRA }, 0xa5, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CXGBRA }, 0xa6, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CGEBRA }, 0xa8, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CGDBRA }, 0xa9, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CGXBRA }, 0xaa, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CLGEBR }, 0xac, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGDBR }, 0xad, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGXBR }, 0xae, INSTR_RRF_UUFR },
{ "ldgr", 0xc1, INSTR_RRE_FR },
+ { "cegr", 0xc4, INSTR_RRE_FR },
+ { "cdgr", 0xc5, INSTR_RRE_FR },
+ { "cxgr", 0xc6, INSTR_RRE_FR },
+ { "cger", 0xc8, INSTR_RRF_U0RF },
+ { "cgdr", 0xc9, INSTR_RRF_U0RF },
+ { "cgxr", 0xca, INSTR_RRF_U0RF },
{ "lgdr", 0xcd, INSTR_RRE_RF },
- { "adtr", 0xd2, INSTR_RRR_F0FF },
- { "axtr", 0xda, INSTR_RRR_F0FF },
- { "cdtr", 0xe4, INSTR_RRE_FF },
- { "cxtr", 0xec, INSTR_RRE_FF },
+ { "mdtra", 0xd0, INSTR_RRF_FUFF2 },
+ { "ddtra", 0xd1, INSTR_RRF_FUFF2 },
+ { "adtra", 0xd2, INSTR_RRF_FUFF2 },
+ { "sdtra", 0xd3, INSTR_RRF_FUFF2 },
+ { "ldetr", 0xd4, INSTR_RRF_0UFF },
+ { "ledtr", 0xd5, INSTR_RRF_UUFF },
+ { "ltdtr", 0xd6, INSTR_RRE_FF },
+ { "fidtr", 0xd7, INSTR_RRF_UUFF },
+ { "mxtra", 0xd8, INSTR_RRF_FUFF2 },
+ { "dxtra", 0xd9, INSTR_RRF_FUFF2 },
+ { "axtra", 0xda, INSTR_RRF_FUFF2 },
+ { "sxtra", 0xdb, INSTR_RRF_FUFF2 },
+ { "lxdtr", 0xdc, INSTR_RRF_0UFF },
+ { "ldxtr", 0xdd, INSTR_RRF_UUFF },
+ { "ltxtr", 0xde, INSTR_RRE_FF },
+ { "fixtr", 0xdf, INSTR_RRF_UUFF },
{ "kdtr", 0xe0, INSTR_RRE_FF },
- { "kxtr", 0xe8, INSTR_RRE_FF },
- { "cedtr", 0xf4, INSTR_RRE_FF },
- { "cextr", 0xfc, INSTR_RRE_FF },
- { "cdgtr", 0xf1, INSTR_RRE_FR },
- { "cxgtr", 0xf9, INSTR_RRE_FR },
- { "cdstr", 0xf3, INSTR_RRE_FR },
- { "cxstr", 0xfb, INSTR_RRE_FR },
- { "cdutr", 0xf2, INSTR_RRE_FR },
- { "cxutr", 0xfa, INSTR_RRE_FR },
- { "cgdtr", 0xe1, INSTR_RRF_U0RF },
- { "cgxtr", 0xe9, INSTR_RRF_U0RF },
- { "csdtr", 0xe3, INSTR_RRE_RF },
- { "csxtr", 0xeb, INSTR_RRE_RF },
+ { { 0, LONG_INSN_CGDTRA }, 0xe1, INSTR_RRF_UURF },
{ "cudtr", 0xe2, INSTR_RRE_RF },
- { "cuxtr", 0xea, INSTR_RRE_RF },
- { "ddtr", 0xd1, INSTR_RRR_F0FF },
- { "dxtr", 0xd9, INSTR_RRR_F0FF },
+ { "csdtr", 0xe3, INSTR_RRE_RF },
+ { "cdtr", 0xe4, INSTR_RRE_FF },
{ "eedtr", 0xe5, INSTR_RRE_RF },
- { "eextr", 0xed, INSTR_RRE_RF },
{ "esdtr", 0xe7, INSTR_RRE_RF },
+ { "kxtr", 0xe8, INSTR_RRE_FF },
+ { { 0, LONG_INSN_CGXTRA }, 0xe9, INSTR_RRF_UUFR },
+ { "cuxtr", 0xea, INSTR_RRE_RF },
+ { "csxtr", 0xeb, INSTR_RRE_RF },
+ { "cxtr", 0xec, INSTR_RRE_FF },
+ { "eextr", 0xed, INSTR_RRE_RF },
{ "esxtr", 0xef, INSTR_RRE_RF },
- { "iedtr", 0xf6, INSTR_RRF_F0FR },
- { "iextr", 0xfe, INSTR_RRF_F0FR },
- { "ltdtr", 0xd6, INSTR_RRE_FF },
- { "ltxtr", 0xde, INSTR_RRE_FF },
- { "fidtr", 0xd7, INSTR_RRF_UUFF },
- { "fixtr", 0xdf, INSTR_RRF_UUFF },
- { "ldetr", 0xd4, INSTR_RRF_0UFF },
- { "lxdtr", 0xdc, INSTR_RRF_0UFF },
- { "ledtr", 0xd5, INSTR_RRF_UUFF },
- { "ldxtr", 0xdd, INSTR_RRF_UUFF },
- { "mdtr", 0xd0, INSTR_RRR_F0FF },
- { "mxtr", 0xd8, INSTR_RRR_F0FF },
+ { { 0, LONG_INSN_CDGTRA }, 0xf1, INSTR_RRF_UUFR },
+ { "cdutr", 0xf2, INSTR_RRE_FR },
+ { "cdstr", 0xf3, INSTR_RRE_FR },
+ { "cedtr", 0xf4, INSTR_RRE_FF },
{ "qadtr", 0xf5, INSTR_RRF_FUFF },
- { "qaxtr", 0xfd, INSTR_RRF_FUFF },
+ { "iedtr", 0xf6, INSTR_RRF_F0FR },
{ "rrdtr", 0xf7, INSTR_RRF_FFRU },
+ { { 0, LONG_INSN_CXGTRA }, 0xf9, INSTR_RRF_UURF },
+ { "cxutr", 0xfa, INSTR_RRE_FR },
+ { "cxstr", 0xfb, INSTR_RRE_FR },
+ { "cextr", 0xfc, INSTR_RRE_FF },
+ { "qaxtr", 0xfd, INSTR_RRF_FUFF },
+ { "iextr", 0xfe, INSTR_RRF_F0FR },
{ "rrxtr", 0xff, INSTR_RRF_FFRU },
- { "sfasr", 0x85, INSTR_RRE_R0 },
- { "sdtr", 0xd3, INSTR_RRR_F0FF },
- { "sxtr", 0xdb, INSTR_RRR_F0FF },
#endif
{ "lpebr", 0x00, INSTR_RRE_FF },
{ "lnebr", 0x01, INSTR_RRE_FF },
@@ -827,10 +987,10 @@ static struct insn opcode_b3[] = {
{ "lnxbr", 0x41, INSTR_RRE_FF },
{ "ltxbr", 0x42, INSTR_RRE_FF },
{ "lcxbr", 0x43, INSTR_RRE_FF },
- { "ledbr", 0x44, INSTR_RRE_FF },
- { "ldxbr", 0x45, INSTR_RRE_FF },
- { "lexbr", 0x46, INSTR_RRE_FF },
- { "fixbr", 0x47, INSTR_RRF_U0FF },
+ { { 0, LONG_INSN_LEDBRA }, 0x44, INSTR_RRF_UUFF },
+ { { 0, LONG_INSN_LDXBRA }, 0x45, INSTR_RRF_UUFF },
+ { { 0, LONG_INSN_LEXBRA }, 0x46, INSTR_RRF_UUFF },
+ { { 0, LONG_INSN_FIXBRA }, 0x47, INSTR_RRF_UUFF },
{ "kxbr", 0x48, INSTR_RRE_FF },
{ "cxbr", 0x49, INSTR_RRE_FF },
{ "axbr", 0x4a, INSTR_RRE_FF },
@@ -840,24 +1000,24 @@ static struct insn opcode_b3[] = {
{ "tbedr", 0x50, INSTR_RRF_U0FF },
{ "tbdr", 0x51, INSTR_RRF_U0FF },
{ "diebr", 0x53, INSTR_RRF_FUFF },
- { "fiebr", 0x57, INSTR_RRF_U0FF },
- { "thder", 0x58, INSTR_RRE_RR },
- { "thdr", 0x59, INSTR_RRE_RR },
+ { { 0, LONG_INSN_FIEBRA }, 0x57, INSTR_RRF_UUFF },
+ { "thder", 0x58, INSTR_RRE_FF },
+ { "thdr", 0x59, INSTR_RRE_FF },
{ "didbr", 0x5b, INSTR_RRF_FUFF },
- { "fidbr", 0x5f, INSTR_RRF_U0FF },
+ { { 0, LONG_INSN_FIDBRA }, 0x5f, INSTR_RRF_UUFF },
{ "lpxr", 0x60, INSTR_RRE_FF },
{ "lnxr", 0x61, INSTR_RRE_FF },
{ "ltxr", 0x62, INSTR_RRE_FF },
{ "lcxr", 0x63, INSTR_RRE_FF },
- { "lxr", 0x65, INSTR_RRE_RR },
+ { "lxr", 0x65, INSTR_RRE_FF },
{ "lexr", 0x66, INSTR_RRE_FF },
- { "fixr", 0x67, INSTR_RRF_U0FF },
+ { "fixr", 0x67, INSTR_RRE_FF },
{ "cxr", 0x69, INSTR_RRE_FF },
- { "lzer", 0x74, INSTR_RRE_R0 },
- { "lzdr", 0x75, INSTR_RRE_R0 },
- { "lzxr", 0x76, INSTR_RRE_R0 },
- { "fier", 0x77, INSTR_RRF_U0FF },
- { "fidr", 0x7f, INSTR_RRF_U0FF },
+ { "lzer", 0x74, INSTR_RRE_F0 },
+ { "lzdr", 0x75, INSTR_RRE_F0 },
+ { "lzxr", 0x76, INSTR_RRE_F0 },
+ { "fier", 0x77, INSTR_RRE_FF },
+ { "fidr", 0x7f, INSTR_RRE_FF },
{ "sfpc", 0x84, INSTR_RRE_RR_OPT },
{ "efpc", 0x8c, INSTR_RRE_RR_OPT },
{ "cefbr", 0x94, INSTR_RRE_RF },
@@ -866,9 +1026,12 @@ static struct insn opcode_b3[] = {
{ "cfebr", 0x98, INSTR_RRF_U0RF },
{ "cfdbr", 0x99, INSTR_RRF_U0RF },
{ "cfxbr", 0x9a, INSTR_RRF_U0RF },
- { "cefr", 0xb4, INSTR_RRE_RF },
- { "cdfr", 0xb5, INSTR_RRE_RF },
- { "cxfr", 0xb6, INSTR_RRE_RF },
+ { "cefr", 0xb4, INSTR_RRE_FR },
+ { "cdfr", 0xb5, INSTR_RRE_FR },
+ { "cxfr", 0xb6, INSTR_RRE_FR },
+ { "cfer", 0xb8, INSTR_RRF_U0RF },
+ { "cfdr", 0xb9, INSTR_RRF_U0RF },
+ { "cfxr", 0xba, INSTR_RRF_U0RF },
{ "", 0, INSTR_INVALID }
};
@@ -910,7 +1073,23 @@ static struct insn opcode_b9[] = {
{ "lhr", 0x27, INSTR_RRE_RR },
{ "cgfr", 0x30, INSTR_RRE_RR },
{ "clgfr", 0x31, INSTR_RRE_RR },
+ { "cfdtr", 0x41, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGDTR }, 0x42, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLFDTR }, 0x43, INSTR_RRF_UURF },
{ "bctgr", 0x46, INSTR_RRE_RR },
+ { "cfxtr", 0x49, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CLGXTR }, 0x4a, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CLFXTR }, 0x4b, INSTR_RRF_UUFR },
+ { "cdftr", 0x51, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLGTR }, 0x52, INSTR_RRF_UUFR },
+ { { 0, LONG_INSN_CDLFTR }, 0x53, INSTR_RRF_UUFR },
+ { "cxftr", 0x59, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CXLGTR }, 0x5a, INSTR_RRF_UURF },
+ { { 0, LONG_INSN_CXLFTR }, 0x5b, INSTR_RRF_UUFR },
+ { "cgrt", 0x60, INSTR_RRF_U0RR },
+ { "clgrt", 0x61, INSTR_RRF_U0RR },
+ { "crt", 0x72, INSTR_RRF_U0RR },
+ { "clrt", 0x73, INSTR_RRF_U0RR },
{ "ngr", 0x80, INSTR_RRE_RR },
{ "ogr", 0x81, INSTR_RRE_RR },
{ "xgr", 0x82, INSTR_RRE_RR },
@@ -923,32 +1102,34 @@ static struct insn opcode_b9[] = {
{ "slbgr", 0x89, INSTR_RRE_RR },
{ "cspg", 0x8a, INSTR_RRE_RR },
{ "idte", 0x8e, INSTR_RRF_R0RR },
+ { "crdte", 0x8f, INSTR_RRF_RMRR },
{ "llcr", 0x94, INSTR_RRE_RR },
{ "llhr", 0x95, INSTR_RRE_RR },
{ "esea", 0x9d, INSTR_RRE_R0 },
+ { "ptf", 0xa2, INSTR_RRE_R0 },
{ "lptea", 0xaa, INSTR_RRF_RURR },
+ { "rrbm", 0xae, INSTR_RRE_RR },
+ { "pfmf", 0xaf, INSTR_RRE_RR },
{ "cu14", 0xb0, INSTR_RRF_M0RR },
{ "cu24", 0xb1, INSTR_RRF_M0RR },
- { "cu41", 0xb2, INSTR_RRF_M0RR },
- { "cu42", 0xb3, INSTR_RRF_M0RR },
- { "crt", 0x72, INSTR_RRF_U0RR },
- { "cgrt", 0x60, INSTR_RRF_U0RR },
- { "clrt", 0x73, INSTR_RRF_U0RR },
- { "clgrt", 0x61, INSTR_RRF_U0RR },
- { "ptf", 0xa2, INSTR_RRE_R0 },
- { "pfmf", 0xaf, INSTR_RRE_RR },
- { "trte", 0xbf, INSTR_RRF_M0RR },
+ { "cu41", 0xb2, INSTR_RRE_RR },
+ { "cu42", 0xb3, INSTR_RRE_RR },
{ "trtre", 0xbd, INSTR_RRF_M0RR },
+ { "srstu", 0xbe, INSTR_RRE_RR },
+ { "trte", 0xbf, INSTR_RRF_M0RR },
{ "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
{ "shhhr", 0xc9, INSTR_RRF_R0RR2 },
- { "alhhh", 0xca, INSTR_RRF_R0RR2 },
- { "alhhl", 0xca, INSTR_RRF_R0RR2 },
- { "slhhh", 0xcb, INSTR_RRF_R0RR2 },
- { "chhr ", 0xcd, INSTR_RRE_RR },
+ { { 0, LONG_INSN_ALHHHR }, 0xca, INSTR_RRF_R0RR2 },
+ { { 0, LONG_INSN_SLHHHR }, 0xcb, INSTR_RRF_R0RR2 },
+ { "chhr", 0xcd, INSTR_RRE_RR },
{ "clhhr", 0xcf, INSTR_RRE_RR },
+ { { 0, LONG_INSN_PCISTG }, 0xd0, INSTR_RRE_RR },
+ { "pcilg", 0xd2, INSTR_RRE_RR },
+ { "rpcit", 0xd3, INSTR_RRE_RR },
{ "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
{ "shhlr", 0xd9, INSTR_RRF_R0RR2 },
- { "slhhl", 0xdb, INSTR_RRF_R0RR2 },
+ { { 0, LONG_INSN_ALHHLR }, 0xda, INSTR_RRF_R0RR2 },
+ { { 0, LONG_INSN_SLHHLR }, 0xdb, INSTR_RRF_R0RR2 },
{ "chlr", 0xdd, INSTR_RRE_RR },
{ "clhlr", 0xdf, INSTR_RRE_RR },
{ { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
@@ -976,13 +1157,9 @@ static struct insn opcode_b9[] = {
{ "kimd", 0x3e, INSTR_RRE_RR },
{ "klmd", 0x3f, INSTR_RRE_RR },
{ "epsw", 0x8d, INSTR_RRE_RR },
- { "trtt", 0x90, INSTR_RRE_RR },
{ "trtt", 0x90, INSTR_RRF_M0RR },
- { "trto", 0x91, INSTR_RRE_RR },
{ "trto", 0x91, INSTR_RRF_M0RR },
- { "trot", 0x92, INSTR_RRE_RR },
{ "trot", 0x92, INSTR_RRF_M0RR },
- { "troo", 0x93, INSTR_RRE_RR },
{ "troo", 0x93, INSTR_RRF_M0RR },
{ "mlr", 0x96, INSTR_RRE_RR },
{ "dlr", 0x97, INSTR_RRE_RR },
@@ -1013,6 +1190,8 @@ static struct insn opcode_c0[] = {
static struct insn opcode_c2[] = {
#ifdef CONFIG_64BIT
+ { "msgfi", 0x00, INSTR_RIL_RI },
+ { "msfi", 0x01, INSTR_RIL_RI },
{ "slgfi", 0x04, INSTR_RIL_RU },
{ "slfi", 0x05, INSTR_RIL_RU },
{ "agfi", 0x08, INSTR_RIL_RI },
@@ -1023,43 +1202,41 @@ static struct insn opcode_c2[] = {
{ "cfi", 0x0d, INSTR_RIL_RI },
{ "clgfi", 0x0e, INSTR_RIL_RU },
{ "clfi", 0x0f, INSTR_RIL_RU },
- { "msfi", 0x01, INSTR_RIL_RI },
- { "msgfi", 0x00, INSTR_RIL_RI },
#endif
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_c4[] = {
#ifdef CONFIG_64BIT
- { "lrl", 0x0d, INSTR_RIL_RP },
+ { "llhrl", 0x02, INSTR_RIL_RP },
+ { "lghrl", 0x04, INSTR_RIL_RP },
+ { "lhrl", 0x05, INSTR_RIL_RP },
+ { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
+ { "sthrl", 0x07, INSTR_RIL_RP },
{ "lgrl", 0x08, INSTR_RIL_RP },
+ { "stgrl", 0x0b, INSTR_RIL_RP },
{ "lgfrl", 0x0c, INSTR_RIL_RP },
- { "lhrl", 0x05, INSTR_RIL_RP },
- { "lghrl", 0x04, INSTR_RIL_RP },
+ { "lrl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
- { "llhrl", 0x02, INSTR_RIL_RP },
- { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
{ "strl", 0x0f, INSTR_RIL_RP },
- { "stgrl", 0x0b, INSTR_RIL_RP },
- { "sthrl", 0x07, INSTR_RIL_RP },
#endif
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_c6[] = {
#ifdef CONFIG_64BIT
- { "crl", 0x0d, INSTR_RIL_RP },
- { "cgrl", 0x08, INSTR_RIL_RP },
- { "cgfrl", 0x0c, INSTR_RIL_RP },
- { "chrl", 0x05, INSTR_RIL_RP },
+ { "exrl", 0x00, INSTR_RIL_RP },
+ { "pfdrl", 0x02, INSTR_RIL_UP },
{ "cghrl", 0x04, INSTR_RIL_RP },
- { "clrl", 0x0f, INSTR_RIL_RP },
+ { "chrl", 0x05, INSTR_RIL_RP },
+ { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
+ { "clhrl", 0x07, INSTR_RIL_RP },
+ { "cgrl", 0x08, INSTR_RIL_RP },
{ "clgrl", 0x0a, INSTR_RIL_RP },
+ { "cgfrl", 0x0c, INSTR_RIL_RP },
+ { "crl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
- { "clhrl", 0x07, INSTR_RIL_RP },
- { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
- { "pfdrl", 0x02, INSTR_RIL_UP },
- { "exrl", 0x00, INSTR_RIL_RP },
+ { "clrl", 0x0f, INSTR_RIL_RP },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1070,7 +1247,7 @@ static struct insn opcode_c8[] = {
{ "ectg", 0x01, INSTR_SSF_RRDRD },
{ "csst", 0x02, INSTR_SSF_RRDRD },
{ "lpd", 0x04, INSTR_SSF_RRDRD2 },
- { "lpdg ", 0x05, INSTR_SSF_RRDRD2 },
+ { "lpdg", 0x05, INSTR_SSF_RRDRD2 },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1080,9 +1257,9 @@ static struct insn opcode_cc[] = {
{ "brcth", 0x06, INSTR_RIL_RP },
{ "aih", 0x08, INSTR_RIL_RI },
{ "alsih", 0x0a, INSTR_RIL_RI },
- { "alsih", 0x0b, INSTR_RIL_RI },
+ { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
{ "cih", 0x0d, INSTR_RIL_RI },
- { "clih ", 0x0f, INSTR_RIL_RI },
+ { "clih", 0x0f, INSTR_RIL_RI },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1116,11 +1293,15 @@ static struct insn opcode_e3[] = {
{ "cg", 0x20, INSTR_RXY_RRRD },
{ "clg", 0x21, INSTR_RXY_RRRD },
{ "stg", 0x24, INSTR_RXY_RRRD },
+ { "ntstg", 0x25, INSTR_RXY_RRRD },
{ "cvdy", 0x26, INSTR_RXY_RRRD },
{ "cvdg", 0x2e, INSTR_RXY_RRRD },
{ "strvg", 0x2f, INSTR_RXY_RRRD },
{ "cgf", 0x30, INSTR_RXY_RRRD },
{ "clgf", 0x31, INSTR_RXY_RRRD },
+ { "ltgf", 0x32, INSTR_RXY_RRRD },
+ { "cgh", 0x34, INSTR_RXY_RRRD },
+ { "pfd", 0x36, INSTR_RXY_URRD },
{ "strvh", 0x3f, INSTR_RXY_RRRD },
{ "bctg", 0x46, INSTR_RXY_RRRD },
{ "sty", 0x50, INSTR_RXY_RRRD },
@@ -1133,21 +1314,25 @@ static struct insn opcode_e3[] = {
{ "cy", 0x59, INSTR_RXY_RRRD },
{ "ay", 0x5a, INSTR_RXY_RRRD },
{ "sy", 0x5b, INSTR_RXY_RRRD },
+ { "mfy", 0x5c, INSTR_RXY_RRRD },
{ "aly", 0x5e, INSTR_RXY_RRRD },
{ "sly", 0x5f, INSTR_RXY_RRRD },
{ "sthy", 0x70, INSTR_RXY_RRRD },
{ "lay", 0x71, INSTR_RXY_RRRD },
{ "stcy", 0x72, INSTR_RXY_RRRD },
{ "icy", 0x73, INSTR_RXY_RRRD },
+ { "laey", 0x75, INSTR_RXY_RRRD },
{ "lb", 0x76, INSTR_RXY_RRRD },
{ "lgb", 0x77, INSTR_RXY_RRRD },
{ "lhy", 0x78, INSTR_RXY_RRRD },
{ "chy", 0x79, INSTR_RXY_RRRD },
{ "ahy", 0x7a, INSTR_RXY_RRRD },
{ "shy", 0x7b, INSTR_RXY_RRRD },
+ { "mhy", 0x7c, INSTR_RXY_RRRD },
{ "ng", 0x80, INSTR_RXY_RRRD },
{ "og", 0x81, INSTR_RXY_RRRD },
{ "xg", 0x82, INSTR_RXY_RRRD },
+ { "lgat", 0x85, INSTR_RXY_RRRD },
{ "mlg", 0x86, INSTR_RXY_RRRD },
{ "dlg", 0x87, INSTR_RXY_RRRD },
{ "alcg", 0x88, INSTR_RXY_RRRD },
@@ -1158,23 +1343,22 @@ static struct insn opcode_e3[] = {
{ "llgh", 0x91, INSTR_RXY_RRRD },
{ "llc", 0x94, INSTR_RXY_RRRD },
{ "llh", 0x95, INSTR_RXY_RRRD },
- { "cgh", 0x34, INSTR_RXY_RRRD },
- { "laey", 0x75, INSTR_RXY_RRRD },
- { "ltgf", 0x32, INSTR_RXY_RRRD },
- { "mfy", 0x5c, INSTR_RXY_RRRD },
- { "mhy", 0x7c, INSTR_RXY_RRRD },
- { "pfd", 0x36, INSTR_RXY_URRD },
+ { { 0, LONG_INSN_LLGTAT }, 0x9c, INSTR_RXY_RRRD },
+ { { 0, LONG_INSN_LLGFAT }, 0x9d, INSTR_RXY_RRRD },
+ { "lat", 0x9f, INSTR_RXY_RRRD },
{ "lbh", 0xc0, INSTR_RXY_RRRD },
{ "llch", 0xc2, INSTR_RXY_RRRD },
{ "stch", 0xc3, INSTR_RXY_RRRD },
{ "lhh", 0xc4, INSTR_RXY_RRRD },
{ "llhh", 0xc6, INSTR_RXY_RRRD },
{ "sthh", 0xc7, INSTR_RXY_RRRD },
+ { "lfhat", 0xc8, INSTR_RXY_RRRD },
{ "lfh", 0xca, INSTR_RXY_RRRD },
{ "stfh", 0xcb, INSTR_RXY_RRRD },
{ "chf", 0xcd, INSTR_RXY_RRRD },
{ "clhf", 0xcf, INSTR_RXY_RRRD },
- { "ntstg", 0x25, INSTR_RXY_RRRD },
+ { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
+ { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
#endif
{ "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1189,15 +1373,15 @@ static struct insn opcode_e3[] = {
static struct insn opcode_e5[] = {
#ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD },
+ { "mvhhi", 0x44, INSTR_SIL_RDI },
+ { "mvghi", 0x48, INSTR_SIL_RDI },
+ { "mvhi", 0x4c, INSTR_SIL_RDI },
{ "chhsi", 0x54, INSTR_SIL_RDI },
- { "chsi", 0x5c, INSTR_SIL_RDI },
- { "cghsi", 0x58, INSTR_SIL_RDI },
{ { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
- { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
+ { "cghsi", 0x58, INSTR_SIL_RDI },
{ { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
- { "mvhhi", 0x44, INSTR_SIL_RDI },
- { "mvhi", 0x4c, INSTR_SIL_RDI },
- { "mvghi", 0x48, INSTR_SIL_RDI },
+ { "chsi", 0x5c, INSTR_SIL_RDI },
+ { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
#endif
@@ -1220,9 +1404,11 @@ static struct insn opcode_eb[] = {
{ "rllg", 0x1c, INSTR_RSY_RRRD },
{ "clmh", 0x20, INSTR_RSY_RURD },
{ "clmy", 0x21, INSTR_RSY_RURD },
+ { "clt", 0x23, INSTR_RSY_RURD },
{ "stmg", 0x24, INSTR_RSY_RRRD },
{ "stctg", 0x25, INSTR_RSY_CCRD },
{ "stmh", 0x26, INSTR_RSY_RRRD },
+ { "clgt", 0x2b, INSTR_RSY_RURD },
{ "stcmh", 0x2c, INSTR_RSY_RURD },
{ "stcmy", 0x2d, INSTR_RSY_RURD },
{ "lctlg", 0x2f, INSTR_RSY_CCRD },
@@ -1231,16 +1417,17 @@ static struct insn opcode_eb[] = {
{ "cdsg", 0x3e, INSTR_RSY_RRRD },
{ "bxhg", 0x44, INSTR_RSY_RRRD },
{ "bxleg", 0x45, INSTR_RSY_RRRD },
+ { "ecag", 0x4c, INSTR_RSY_RRRD },
{ "tmy", 0x51, INSTR_SIY_URD },
{ "mviy", 0x52, INSTR_SIY_URD },
{ "niy", 0x54, INSTR_SIY_URD },
{ "cliy", 0x55, INSTR_SIY_URD },
{ "oiy", 0x56, INSTR_SIY_URD },
{ "xiy", 0x57, INSTR_SIY_URD },
- { "lric", 0x60, INSTR_RSY_RDRM },
- { "stric", 0x61, INSTR_RSY_RDRM },
- { "mric", 0x62, INSTR_RSY_RDRM },
- { "icmh", 0x80, INSTR_RSE_RURD },
+ { "asi", 0x6a, INSTR_SIY_IRD },
+ { "alsi", 0x6e, INSTR_SIY_IRD },
+ { "agsi", 0x7a, INSTR_SIY_IRD },
+ { "algsi", 0x7e, INSTR_SIY_IRD },
{ "icmh", 0x80, INSTR_RSY_RURD },
{ "icmy", 0x81, INSTR_RSY_RURD },
{ "clclu", 0x8f, INSTR_RSY_RRRD },
@@ -1249,11 +1436,8 @@ static struct insn opcode_eb[] = {
{ "lmy", 0x98, INSTR_RSY_RRRD },
{ "lamy", 0x9a, INSTR_RSY_AARD },
{ "stamy", 0x9b, INSTR_RSY_AARD },
- { "asi", 0x6a, INSTR_SIY_IRD },
- { "agsi", 0x7a, INSTR_SIY_IRD },
- { "alsi", 0x6e, INSTR_SIY_IRD },
- { "algsi", 0x7e, INSTR_SIY_IRD },
- { "ecag", 0x4c, INSTR_RSY_RRRD },
+ { { 0, LONG_INSN_PCISTB }, 0xd0, INSTR_RSY_RRRD },
+ { "sic", 0xd1, INSTR_RSY_RRRD },
{ "srak", 0xdc, INSTR_RSY_RRRD },
{ "slak", 0xdd, INSTR_RSY_RRRD },
{ "srlk", 0xde, INSTR_RSY_RRRD },
@@ -1272,6 +1456,9 @@ static struct insn opcode_eb[] = {
{ "lax", 0xf7, INSTR_RSY_RRRD },
{ "laa", 0xf8, INSTR_RSY_RRRD },
{ "laal", 0xfa, INSTR_RSY_RRRD },
+ { "lric", 0x60, INSTR_RSY_RDRM },
+ { "stric", 0x61, INSTR_RSY_RDRM },
+ { "mric", 0x62, INSTR_RSY_RDRM },
#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -1283,36 +1470,37 @@ static struct insn opcode_ec[] = {
#ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP },
- { "crb", 0xf6, INSTR_RRS_RRRDU },
- { "cgrb", 0xe4, INSTR_RRS_RRRDU },
- { "crj", 0x76, INSTR_RIE_RRPU },
+ { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
+ { "rnsbg", 0x54, INSTR_RIE_RRUUU },
+ { "risbg", 0x55, INSTR_RIE_RRUUU },
+ { "rosbg", 0x56, INSTR_RIE_RRUUU },
+ { "rxsbg", 0x57, INSTR_RIE_RRUUU },
+ { { 0, LONG_INSN_RISBGN }, 0x59, INSTR_RIE_RRUUU },
+ { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
{ "cgrj", 0x64, INSTR_RIE_RRPU },
- { "cib", 0xfe, INSTR_RIS_RURDI },
- { "cgib", 0xfc, INSTR_RIS_RURDI },
- { "cij", 0x7e, INSTR_RIE_RUPI },
- { "cgij", 0x7c, INSTR_RIE_RUPI },
- { "cit", 0x72, INSTR_RIE_R0IU },
+ { "clgrj", 0x65, INSTR_RIE_RRPU },
{ "cgit", 0x70, INSTR_RIE_R0IU },
- { "clrb", 0xf7, INSTR_RRS_RRRDU },
- { "clgrb", 0xe5, INSTR_RRS_RRRDU },
+ { "clgit", 0x71, INSTR_RIE_R0UU },
+ { "cit", 0x72, INSTR_RIE_R0IU },
+ { "clfit", 0x73, INSTR_RIE_R0UU },
+ { "crj", 0x76, INSTR_RIE_RRPU },
{ "clrj", 0x77, INSTR_RIE_RRPU },
- { "clgrj", 0x65, INSTR_RIE_RRPU },
- { "clib", 0xff, INSTR_RIS_RURDU },
- { "clgib", 0xfd, INSTR_RIS_RURDU },
- { "clij", 0x7f, INSTR_RIE_RUPU },
+ { "cgij", 0x7c, INSTR_RIE_RUPI },
{ "clgij", 0x7d, INSTR_RIE_RUPU },
- { "clfit", 0x73, INSTR_RIE_R0UU },
- { "clgit", 0x71, INSTR_RIE_R0UU },
- { "rnsbg", 0x54, INSTR_RIE_RRUUU },
- { "rxsbg", 0x57, INSTR_RIE_RRUUU },
- { "rosbg", 0x56, INSTR_RIE_RRUUU },
- { "risbg", 0x55, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
+ { "cij", 0x7e, INSTR_RIE_RUPI },
+ { "clij", 0x7f, INSTR_RIE_RUPU },
{ "ahik", 0xd8, INSTR_RIE_RRI0 },
{ "aghik", 0xd9, INSTR_RIE_RRI0 },
{ { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
{ { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
+ { "cgrb", 0xe4, INSTR_RRS_RRRDU },
+ { "clgrb", 0xe5, INSTR_RRS_RRRDU },
+ { "crb", 0xf6, INSTR_RRS_RRRDU },
+ { "clrb", 0xf7, INSTR_RRS_RRRDU },
+ { "cgib", 0xfc, INSTR_RIS_RURDI },
+ { "clgib", 0xfd, INSTR_RIS_RURDU },
+ { "cib", 0xfe, INSTR_RIS_RURDI },
+ { "clib", 0xff, INSTR_RIS_RURDU },
#endif
{ "", 0, INSTR_INVALID }
};
@@ -1325,20 +1513,24 @@ static struct insn opcode_ed[] = {
{ "my", 0x3b, INSTR_RXF_FRRDF },
{ "mayh", 0x3c, INSTR_RXF_FRRDF },
{ "myh", 0x3d, INSTR_RXF_FRRDF },
- { "ley", 0x64, INSTR_RXY_FRRD },
- { "ldy", 0x65, INSTR_RXY_FRRD },
- { "stey", 0x66, INSTR_RXY_FRRD },
- { "stdy", 0x67, INSTR_RXY_FRRD },
{ "sldt", 0x40, INSTR_RXF_FRRDF },
- { "slxt", 0x48, INSTR_RXF_FRRDF },
{ "srdt", 0x41, INSTR_RXF_FRRDF },
+ { "slxt", 0x48, INSTR_RXF_FRRDF },
{ "srxt", 0x49, INSTR_RXF_FRRDF },
{ "tdcet", 0x50, INSTR_RXE_FRRD },
- { "tdcdt", 0x54, INSTR_RXE_FRRD },
- { "tdcxt", 0x58, INSTR_RXE_FRRD },
{ "tdget", 0x51, INSTR_RXE_FRRD },
+ { "tdcdt", 0x54, INSTR_RXE_FRRD },
{ "tdgdt", 0x55, INSTR_RXE_FRRD },
+ { "tdcxt", 0x58, INSTR_RXE_FRRD },
{ "tdgxt", 0x59, INSTR_RXE_FRRD },
+ { "ley", 0x64, INSTR_RXY_FRRD },
+ { "ldy", 0x65, INSTR_RXY_FRRD },
+ { "stey", 0x66, INSTR_RXY_FRRD },
+ { "stdy", 0x67, INSTR_RXY_FRRD },
+ { "czdt", 0xa8, INSTR_RSL_LRDFU },
+ { "czxt", 0xa9, INSTR_RSL_LRDFU },
+ { "cdzt", 0xaa, INSTR_RSL_LRDFU },
+ { "cxzt", 0xab, INSTR_RSL_LRDFU },
#endif
{ "ldeb", 0x04, INSTR_RXE_FRRD },
{ "lxdb", 0x05, INSTR_RXE_FRRD },
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index aa8f2ba6289..55022852326 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,12 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
+ tm __TI_flags+3(%r12),_TIF_PER_TRAP
+ jo sysc_singlestep
tm __TI_flags+3(%r12),_TIF_SIGPENDING
jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
- tm __TI_flags+3(%r12),_TIF_PER_TRAP
- jo sysc_singlestep
j sysc_return # beware of critical section cleanup
#
@@ -259,7 +259,6 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
- ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal
@@ -286,7 +285,7 @@ sysc_notify_resume:
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
- ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
+ ni __TI_flags+3(%r12),255-_TIF_PER_TRAP
lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_per_trap)
la %r14,BASED(sysc_return)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index d8251b98f17..2711936fe70 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -6,7 +6,6 @@
#include <asm/ptrace.h>
#include <asm/cputime.h>
-extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack;
void system_call(void);
@@ -25,6 +24,26 @@ void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
void do_asce_exception(struct pt_regs *regs);
+void addressing_exception(struct pt_regs *regs);
+void data_exception(struct pt_regs *regs);
+void default_trap_handler(struct pt_regs *regs);
+void divide_exception(struct pt_regs *regs);
+void execute_exception(struct pt_regs *regs);
+void hfp_divide_exception(struct pt_regs *regs);
+void hfp_overflow_exception(struct pt_regs *regs);
+void hfp_significance_exception(struct pt_regs *regs);
+void hfp_sqrt_exception(struct pt_regs *regs);
+void hfp_underflow_exception(struct pt_regs *regs);
+void illegal_op(struct pt_regs *regs);
+void operand_exception(struct pt_regs *regs);
+void overflow_exception(struct pt_regs *regs);
+void privileged_op(struct pt_regs *regs);
+void space_switch_exception(struct pt_regs *regs);
+void special_op_exception(struct pt_regs *regs);
+void specification_exception(struct pt_regs *regs);
+void transaction_exception(struct pt_regs *regs);
+void translation_exception(struct pt_regs *regs);
+
void do_per_trap(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 499e95e90f3..6d34e0c97a3 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -80,14 +80,21 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#endif
.endm
- .macro HANDLE_SIE_INTERCEPT scratch
+ .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
tmhh %r8,0x0001 # interrupting from user ?
jnz .+42
lgr \scratch,%r9
slg \scratch,BASED(.Lsie_loop)
clg \scratch,BASED(.Lsie_length)
+ .if \pgmcheck
+ # Some program interrupts are suppressing (e.g. protection).
+ # We must also check the instruction after SIE in that case.
+ # do_protection_exception will rewind to rewind_pad
+ jh .+22
+ .else
jhe .+22
+ .endif
lg %r9,BASED(.Lsie_loop)
SPP BASED(.Lhost_id) # set host id
#endif
@@ -262,12 +269,12 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
+ tm __TI_flags+7(%r12),_TIF_PER_TRAP
+ jo sysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo sysc_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
- tm __TI_flags+7(%r12),_TIF_PER_TRAP
- jo sysc_singlestep
j sysc_return # beware of critical section cleanup
#
@@ -288,7 +295,6 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
- ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
tm __TI_flags+7(%r12),_TIF_SYSCALL
@@ -313,7 +319,7 @@ sysc_notify_resume:
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
- ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
+ ni __TI_flags+7(%r12),255-_TIF_PER_TRAP
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return
jg do_per_trap
@@ -375,7 +381,7 @@ ENTRY(pgm_check_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_PGM_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,1
tmhh %r8,0x0001 # test problem state bit
jnz 1f # -> fault in user space
tmhh %r8,0x4000 # PER bit set in old PSW ?
@@ -413,9 +419,9 @@ ENTRY(pgm_check_handler)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
- sll %r10,3
+ sll %r10,2
je sysc_return
- lg %r1,0(%r10,%r1) # load address of handler routine
+ lgf %r1,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
j sysc_return
@@ -451,7 +457,7 @@ ENTRY(io_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_IO_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,0
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user?
jz io_skip
@@ -597,7 +603,7 @@ ENTRY(ext_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_EXT_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,0
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
tmhh %r8,0x0001 # interrupting from user ?
jz ext_skip
@@ -645,7 +651,7 @@ ENTRY(mcck_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,system_call
lmg %r8,%r9,__LC_MCK_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14
+ HANDLE_SIE_INTERCEPT %r14,0
tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_panic # yes -> rest of mcck code invalid
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
@@ -944,6 +950,13 @@ ENTRY(sie64a)
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+# some program checks are suppressing. C code (e.g. do_protection_exception)
+# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
+# instructions in the sie_loop should not cause program interrupts. So
+# lets use a nop (47 00 00 00) as a landing pad.
+# See also HANDLE_SIE_INTERCEPT
+rewind_pad:
+ nop 0
sie_loop:
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r14),_TIF_EXIT_SIE
@@ -983,6 +996,7 @@ sie_fault:
.Lhost_id:
.quad 0
+ EX_TABLE(rewind_pad,sie_fault)
EX_TABLE(sie_loop,sie_fault)
#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 984726cbce1..fd8db63dfc9 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -393,30 +393,35 @@ ENTRY(startup_kdump)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
stck __LC_LAST_UPDATE_CLOCK
- spt 5f-.LPG0(%r13)
- mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
+ spt 6f-.LPG0(%r13)
+ mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
- la %r0,0
+ la %r0,1
.insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended
-0: l %r0,__LC_STFL_FAC_LIST
- n %r0,2f+8-.LPG0(%r13)
- cl %r0,2f+8-.LPG0(%r13)
- jne 1f
- l %r0,__LC_STFL_FAC_LIST+4
- n %r0,2f+12-.LPG0(%r13)
- cl %r0,2f+12-.LPG0(%r13)
- je 3f
-1: l %r15,.Lstack-.LPG0(%r13)
+ # verify if all required facilities are supported by the machine
+0: la %r1,__LC_STFL_FAC_LIST
+ la %r2,3f+8-.LPG0(%r13)
+ l %r3,0(%r2)
+1: l %r0,0(%r1)
+ n %r0,4(%r2)
+ cl %r0,4(%r2)
+ jne 2f
+ la %r1,4(%r1)
+ la %r2,4(%r2)
+ ahi %r3,-1
+ jnz 1b
+ j 4f
+2: l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-96
la %r2,.Lals_string-.LPG0(%r13)
l %r3,.Lsclp_print-.LPG0(%r13)
basr %r14,%r3
- lpsw 2f-.LPG0(%r13) # machine type not good enough, crash
+ lpsw 3f-.LPG0(%r13) # machine type not good enough, crash
.Lals_string:
.asciz "The Linux kernel requires more recent processor hardware"
.Lsclp_print:
@@ -424,33 +429,42 @@ ENTRY(startup_kdump)
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.align 16
-2: .long 0x000a0000,0x8badcccc
+3: .long 0x000a0000,0x8badcccc
+
+# List of facilities that are required. If not all facilities are present
+# the kernel will crash. Format is number of facility words with bits set,
+# followed by the facility words.
+
#if defined(CONFIG_64BIT)
-#if defined(CONFIG_MARCH_Z196)
- .long 0xc100efe3, 0xf46c0000
+#if defined(CONFIG_MARCH_ZEC12)
+ .long 3, 0xc100efe3, 0xf46ce000, 0x00400000
+#elif defined(CONFIG_MARCH_Z196)
+ .long 2, 0xc100efe3, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10)
- .long 0xc100efe3, 0xf0680000
+ .long 2, 0xc100efe3, 0xf0680000
#elif defined(CONFIG_MARCH_Z9_109)
- .long 0xc100efc3, 0x00000000
+ .long 1, 0xc100efc3
#elif defined(CONFIG_MARCH_Z990)
- .long 0xc0002000, 0x00000000
+ .long 1, 0xc0002000
#elif defined(CONFIG_MARCH_Z900)
- .long 0xc0000000, 0x00000000
+ .long 1, 0xc0000000
#endif
#else
-#if defined(CONFIG_MARCH_Z196)
- .long 0x8100c880, 0x00000000
+#if defined(CONFIG_MARCH_ZEC12)
+ .long 1, 0x8100c880
+#elif defined(CONFIG_MARCH_Z196)
+ .long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z10)
- .long 0x8100c880, 0x00000000
+ .long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z9_109)
- .long 0x8100c880, 0x00000000
+ .long 1, 0x8100c880
#elif defined(CONFIG_MARCH_Z990)
- .long 0x80002000, 0x00000000
+ .long 1, 0x80002000
#elif defined(CONFIG_MARCH_Z900)
- .long 0x80000000, 0x00000000
+ .long 1, 0x80000000
#endif
#endif
-3:
+4:
#endif
#ifdef CONFIG_64BIT
@@ -459,14 +473,14 @@ ENTRY(startup_kdump)
jg startup_continue
#else
/* Continue with 31bit startup code in head31.S */
- l %r13,4f-.LPG0(%r13)
+ l %r13,5f-.LPG0(%r13)
b 0(%r13)
.align 8
-4: .long startup_continue
+5: .long startup_continue
#endif
.align 8
-5: .long 0x7fffffff,0xffffffff
+6: .long 0x7fffffff,0xffffffff
#include "head_kdump.S"
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 6cdc55b26d6..bf24293970c 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -58,6 +58,8 @@ static const struct irq_class intrclass_names[] = {
[IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
[IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
[IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
+ [IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
+ [IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
};
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
new file mode 100644
index 00000000000..14bdecb6192
--- /dev/null
+++ b/arch/s390/kernel/pgm_check.S
@@ -0,0 +1,152 @@
+/*
+ * Program check table.
+ *
+ * Copyright IBM Corp. 2012
+ */
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_32BIT
+#define PGM_CHECK_64BIT(handler) .long default_trap_handler
+#else
+#define PGM_CHECK_64BIT(handler) .long handler
+#endif
+
+#define PGM_CHECK(handler) .long handler
+#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
+
+/*
+ * The program check table contains exactly 128 (0x00-0x7f) entries. Each
+ * line defines the 31 and/or 64 bit function to be called corresponding
+ * to the program check interruption code.
+ */
+.section .rodata, "a"
+ENTRY(pgm_check_table)
+PGM_CHECK_DEFAULT /* 00 */
+PGM_CHECK(illegal_op) /* 01 */
+PGM_CHECK(privileged_op) /* 02 */
+PGM_CHECK(execute_exception) /* 03 */
+PGM_CHECK(do_protection_exception) /* 04 */
+PGM_CHECK(addressing_exception) /* 05 */
+PGM_CHECK(specification_exception) /* 06 */
+PGM_CHECK(data_exception) /* 07 */
+PGM_CHECK(overflow_exception) /* 08 */
+PGM_CHECK(divide_exception) /* 09 */
+PGM_CHECK(overflow_exception) /* 0a */
+PGM_CHECK(divide_exception) /* 0b */
+PGM_CHECK(hfp_overflow_exception) /* 0c */
+PGM_CHECK(hfp_underflow_exception) /* 0d */
+PGM_CHECK(hfp_significance_exception) /* 0e */
+PGM_CHECK(hfp_divide_exception) /* 0f */
+PGM_CHECK(do_dat_exception) /* 10 */
+PGM_CHECK(do_dat_exception) /* 11 */
+PGM_CHECK(translation_exception) /* 12 */
+PGM_CHECK(special_op_exception) /* 13 */
+PGM_CHECK_DEFAULT /* 14 */
+PGM_CHECK(operand_exception) /* 15 */
+PGM_CHECK_DEFAULT /* 16 */
+PGM_CHECK_DEFAULT /* 17 */
+PGM_CHECK_64BIT(transaction_exception) /* 18 */
+PGM_CHECK_DEFAULT /* 19 */
+PGM_CHECK_DEFAULT /* 1a */
+PGM_CHECK_DEFAULT /* 1b */
+PGM_CHECK(space_switch_exception) /* 1c */
+PGM_CHECK(hfp_sqrt_exception) /* 1d */
+PGM_CHECK_DEFAULT /* 1e */
+PGM_CHECK_DEFAULT /* 1f */
+PGM_CHECK_DEFAULT /* 20 */
+PGM_CHECK_DEFAULT /* 21 */
+PGM_CHECK_DEFAULT /* 22 */
+PGM_CHECK_DEFAULT /* 23 */
+PGM_CHECK_DEFAULT /* 24 */
+PGM_CHECK_DEFAULT /* 25 */
+PGM_CHECK_DEFAULT /* 26 */
+PGM_CHECK_DEFAULT /* 27 */
+PGM_CHECK_DEFAULT /* 28 */
+PGM_CHECK_DEFAULT /* 29 */
+PGM_CHECK_DEFAULT /* 2a */
+PGM_CHECK_DEFAULT /* 2b */
+PGM_CHECK_DEFAULT /* 2c */
+PGM_CHECK_DEFAULT /* 2d */
+PGM_CHECK_DEFAULT /* 2e */
+PGM_CHECK_DEFAULT /* 2f */
+PGM_CHECK_DEFAULT /* 30 */
+PGM_CHECK_DEFAULT /* 31 */
+PGM_CHECK_DEFAULT /* 32 */
+PGM_CHECK_DEFAULT /* 33 */
+PGM_CHECK_DEFAULT /* 34 */
+PGM_CHECK_DEFAULT /* 35 */
+PGM_CHECK_DEFAULT /* 36 */
+PGM_CHECK_DEFAULT /* 37 */
+PGM_CHECK_64BIT(do_asce_exception) /* 38 */
+PGM_CHECK_64BIT(do_dat_exception) /* 39 */
+PGM_CHECK_64BIT(do_dat_exception) /* 3a */
+PGM_CHECK_64BIT(do_dat_exception) /* 3b */
+PGM_CHECK_DEFAULT /* 3c */
+PGM_CHECK_DEFAULT /* 3d */
+PGM_CHECK_DEFAULT /* 3e */
+PGM_CHECK_DEFAULT /* 3f */
+PGM_CHECK_DEFAULT /* 40 */
+PGM_CHECK_DEFAULT /* 41 */
+PGM_CHECK_DEFAULT /* 42 */
+PGM_CHECK_DEFAULT /* 43 */
+PGM_CHECK_DEFAULT /* 44 */
+PGM_CHECK_DEFAULT /* 45 */
+PGM_CHECK_DEFAULT /* 46 */
+PGM_CHECK_DEFAULT /* 47 */
+PGM_CHECK_DEFAULT /* 48 */
+PGM_CHECK_DEFAULT /* 49 */
+PGM_CHECK_DEFAULT /* 4a */
+PGM_CHECK_DEFAULT /* 4b */
+PGM_CHECK_DEFAULT /* 4c */
+PGM_CHECK_DEFAULT /* 4d */
+PGM_CHECK_DEFAULT /* 4e */
+PGM_CHECK_DEFAULT /* 4f */
+PGM_CHECK_DEFAULT /* 50 */
+PGM_CHECK_DEFAULT /* 51 */
+PGM_CHECK_DEFAULT /* 52 */
+PGM_CHECK_DEFAULT /* 53 */
+PGM_CHECK_DEFAULT /* 54 */
+PGM_CHECK_DEFAULT /* 55 */
+PGM_CHECK_DEFAULT /* 56 */
+PGM_CHECK_DEFAULT /* 57 */
+PGM_CHECK_DEFAULT /* 58 */
+PGM_CHECK_DEFAULT /* 59 */
+PGM_CHECK_DEFAULT /* 5a */
+PGM_CHECK_DEFAULT /* 5b */
+PGM_CHECK_DEFAULT /* 5c */
+PGM_CHECK_DEFAULT /* 5d */
+PGM_CHECK_DEFAULT /* 5e */
+PGM_CHECK_DEFAULT /* 5f */
+PGM_CHECK_DEFAULT /* 60 */
+PGM_CHECK_DEFAULT /* 61 */
+PGM_CHECK_DEFAULT /* 62 */
+PGM_CHECK_DEFAULT /* 63 */
+PGM_CHECK_DEFAULT /* 64 */
+PGM_CHECK_DEFAULT /* 65 */
+PGM_CHECK_DEFAULT /* 66 */
+PGM_CHECK_DEFAULT /* 67 */
+PGM_CHECK_DEFAULT /* 68 */
+PGM_CHECK_DEFAULT /* 69 */
+PGM_CHECK_DEFAULT /* 6a */
+PGM_CHECK_DEFAULT /* 6b */
+PGM_CHECK_DEFAULT /* 6c */
+PGM_CHECK_DEFAULT /* 6d */
+PGM_CHECK_DEFAULT /* 6e */
+PGM_CHECK_DEFAULT /* 6f */
+PGM_CHECK_DEFAULT /* 70 */
+PGM_CHECK_DEFAULT /* 71 */
+PGM_CHECK_DEFAULT /* 72 */
+PGM_CHECK_DEFAULT /* 73 */
+PGM_CHECK_DEFAULT /* 74 */
+PGM_CHECK_DEFAULT /* 75 */
+PGM_CHECK_DEFAULT /* 76 */
+PGM_CHECK_DEFAULT /* 77 */
+PGM_CHECK_DEFAULT /* 78 */
+PGM_CHECK_DEFAULT /* 79 */
+PGM_CHECK_DEFAULT /* 7a */
+PGM_CHECK_DEFAULT /* 7b */
+PGM_CHECK_DEFAULT /* 7c */
+PGM_CHECK_DEFAULT /* 7d */
+PGM_CHECK_DEFAULT /* 7e */
+PGM_CHECK_DEFAULT /* 7f */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b1f2be9aaaa..2568590973a 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -777,40 +777,6 @@ static void __init reserve_crashkernel(void)
#endif
}
-static void __init init_storage_keys(unsigned long start, unsigned long end)
-{
- unsigned long boundary, function, size;
-
- while (start < end) {
- if (MACHINE_HAS_EDAT2) {
- /* set storage keys for a 2GB frame */
- function = 0x22000 | PAGE_DEFAULT_KEY;
- size = 1UL << 31;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
- if (MACHINE_HAS_EDAT1) {
- /* set storage keys for a 1MB frame */
- function = 0x21000 | PAGE_DEFAULT_KEY;
- size = 1UL << 20;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
- page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
- start += PAGE_SIZE;
- }
-}
-
static void __init setup_memory(void)
{
unsigned long bootmap_size;
@@ -889,7 +855,7 @@ static void __init setup_memory(void)
memblock_add_node(PFN_PHYS(start_chunk),
PFN_PHYS(end_chunk - start_chunk), 0);
pfn = max(start_chunk, start_pfn);
- init_storage_keys(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
+ storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
}
psw_set_key(PAGE_DEFAULT_KEY);
@@ -1040,6 +1006,9 @@ static void __init setup_hwcaps(void)
case 0x2818:
strcpy(elf_platform, "z196");
break;
+ case 0x2827:
+ strcpy(elf_platform, "zEC12");
+ break;
}
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d1259d87507..c3ff70a7b24 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -461,6 +461,8 @@ void do_signal(struct pt_regs *regs)
/* Restart system call with magic TIF bit. */
regs->gprs[2] = regs->orig_gpr2;
set_thread_flag(TIF_SYSCALL);
+ if (test_thread_flag(TIF_SINGLE_STEP))
+ set_thread_flag(TIF_PER_TRAP);
break;
}
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index dd55f7c2010..f1aba87cceb 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -29,48 +29,38 @@ struct mask_info {
cpumask_t mask;
};
-static int topology_enabled = 1;
+static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
-static void set_topology_timer(void);
-static DECLARE_WORK(topology_work, topology_work_fn);
-/* topology_lock protects the core linked list */
-static DEFINE_SPINLOCK(topology_lock);
-static struct mask_info core_info;
-cpumask_t cpu_core_map[NR_CPUS];
-unsigned char cpu_core_id[NR_CPUS];
-unsigned char cpu_socket_id[NR_CPUS];
+static int topology_enabled = 1;
+static DECLARE_WORK(topology_work, topology_work_fn);
+/* topology_lock protects the socket and book linked lists */
+static DEFINE_SPINLOCK(topology_lock);
+static struct mask_info socket_info;
static struct mask_info book_info;
-cpumask_t cpu_book_map[NR_CPUS];
-unsigned char cpu_book_id[NR_CPUS];
+
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
- cpumask_clear(&mask);
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_copy(&mask, cpumask_of(cpu));
+ if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
return mask;
+ for (; info; info = info->next) {
+ if (cpumask_test_cpu(cpu, &info->mask))
+ return info->mask;
}
- while (info) {
- if (cpumask_test_cpu(cpu, &info->mask)) {
- mask = info->mask;
- break;
- }
- info = info->next;
- }
- if (cpumask_empty(&mask))
- cpumask_copy(&mask, cpumask_of(cpu));
return mask;
}
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
struct mask_info *book,
- struct mask_info *core,
- int one_core_per_cpu)
+ struct mask_info *socket,
+ int one_socket_per_cpu)
{
unsigned int cpu;
@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
lcpu = smp_find_processor_id(rcpu);
- if (lcpu >= 0) {
- cpumask_set_cpu(lcpu, &book->mask);
- cpu_book_id[lcpu] = book->id;
- cpumask_set_cpu(lcpu, &core->mask);
- cpu_core_id[lcpu] = rcpu;
- if (one_core_per_cpu) {
- cpu_socket_id[lcpu] = rcpu;
- core = core->next;
- } else {
- cpu_socket_id[lcpu] = core->id;
- }
- smp_cpu_set_polarization(lcpu, tl_cpu->pp);
+ if (lcpu < 0)
+ continue;
+ cpumask_set_cpu(lcpu, &book->mask);
+ cpu_topology[lcpu].book_id = book->id;
+ cpumask_set_cpu(lcpu, &socket->mask);
+ cpu_topology[lcpu].core_id = rcpu;
+ if (one_socket_per_cpu) {
+ cpu_topology[lcpu].socket_id = rcpu;
+ socket = socket->next;
+ } else {
+ cpu_topology[lcpu].socket_id = socket->id;
}
+ smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}
- return core;
+ return socket;
}
static void clear_masks(void)
{
struct mask_info *info;
- info = &core_info;
+ info = &socket_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
return (union topology_entry *)((struct topology_container *)tle + 1);
}
-static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
{
- struct mask_info *core = &core_info;
+ struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
book->id = tle->container.id;
break;
case 1:
- core = core->next;
- core->id = tle->container.id;
+ socket = socket->next;
+ socket->id = tle->container.id;
break;
case 0:
- add_cpus_to_mask(&tle->cpu, book, core, 0);
+ add_cpus_to_mask(&tle->cpu, book, socket, 0);
break;
default:
clear_masks();
@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
}
}
-static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
+static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
{
- struct mask_info *core = &core_info;
+ struct mask_info *socket = &socket_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
book->id = tle->container.id;
break;
case 0:
- core = add_cpus_to_mask(&tle->cpu, book, core, 1);
+ socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
break;
default:
clear_masks();
@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
}
}
-static void tl_to_cores(struct sysinfo_15_1_x *info)
+static void tl_to_masks(struct sysinfo_15_1_x *info)
{
struct cpuid cpu_id;
- get_cpu_id(&cpu_id);
spin_lock_irq(&topology_lock);
+ get_cpu_id(&cpu_id);
clear_masks();
switch (cpu_id.machine) {
case 0x2097:
case 0x2098:
- __tl_to_cores_z10(info);
+ __tl_to_masks_z10(info);
break;
default:
- __tl_to_cores_generic(info);
+ __tl_to_masks_generic(info);
}
spin_unlock_irq(&topology_lock);
}
@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
return rc;
}
-static void update_cpu_core_map(void)
+static void update_cpu_masks(void)
{
unsigned long flags;
int cpu;
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
- cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
- cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
+ cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
+ cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+ if (!MACHINE_HAS_TOPOLOGY) {
+ cpu_topology[cpu].core_id = cpu;
+ cpu_topology[cpu].socket_id = cpu;
+ cpu_topology[cpu].book_id = cpu;
+ }
}
spin_unlock_irqrestore(&topology_lock, flags);
}
@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
int cpu;
if (!MACHINE_HAS_TOPOLOGY) {
- update_cpu_core_map();
+ update_cpu_masks();
topology_update_polarization_simple();
return 0;
}
store_topology(info);
- tl_to_cores(info);
- update_cpu_core_map();
+ tl_to_masks(info);
+ update_cpu_masks();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(KERN_CONT " %d", info->mag[i]);
printk(KERN_CONT " / %d\n", info->mnest);
- alloc_masks(info, &core_info, 1);
+ alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
}
@@ -454,7 +449,7 @@ static int __init topology_init(void)
}
set_topology_timer();
out:
- update_cpu_core_map();
+ update_cpu_masks();
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3d2b0fa37db..70ecfc5fe8f 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -41,8 +41,6 @@
#include <asm/ipl.h>
#include "entry.h"
-void (*pgm_check_table[128])(struct pt_regs *regs);
-
int show_unhandled_signals = 1;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -350,7 +348,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current);
}
-static void default_trap_handler(struct pt_regs *regs)
+void default_trap_handler(struct pt_regs *regs)
{
if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV);
@@ -360,9 +358,9 @@ static void default_trap_handler(struct pt_regs *regs)
}
#define DO_ERROR_INFO(name, signr, sicode, str) \
-static void name(struct pt_regs *regs) \
-{ \
- do_trap(regs, signr, sicode, str); \
+void name(struct pt_regs *regs) \
+{ \
+ do_trap(regs, signr, sicode, str); \
}
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
@@ -417,7 +415,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
-static void __kprobes illegal_op(struct pt_regs *regs)
+void __kprobes illegal_op(struct pt_regs *regs)
{
siginfo_t info;
__u8 opcode[6];
@@ -536,7 +534,7 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
#endif
-static void data_exception(struct pt_regs *regs)
+void data_exception(struct pt_regs *regs)
{
__u16 __user *location;
int signal = 0;
@@ -611,7 +609,7 @@ static void data_exception(struct pt_regs *regs)
do_trap(regs, signal, ILL_ILLOPN, "data exception");
}
-static void space_switch_exception(struct pt_regs *regs)
+void space_switch_exception(struct pt_regs *regs)
{
/* Set user psw back to home space mode. */
if (user_mode(regs))
@@ -629,43 +627,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
panic("Corrupt kernel stack, can't continue.");
}
-/* init is done in lowcore.S and head.S */
-
void __init trap_init(void)
{
- int i;
-
- for (i = 0; i < 128; i++)
- pgm_check_table[i] = &default_trap_handler;
- pgm_check_table[1] = &illegal_op;
- pgm_check_table[2] = &privileged_op;
- pgm_check_table[3] = &execute_exception;
- pgm_check_table[4] = &do_protection_exception;
- pgm_check_table[5] = &addressing_exception;
- pgm_check_table[6] = &specification_exception;
- pgm_check_table[7] = &data_exception;
- pgm_check_table[8] = &overflow_exception;
- pgm_check_table[9] = &divide_exception;
- pgm_check_table[0x0A] = &overflow_exception;
- pgm_check_table[0x0B] = &divide_exception;
- pgm_check_table[0x0C] = &hfp_overflow_exception;
- pgm_check_table[0x0D] = &hfp_underflow_exception;
- pgm_check_table[0x0E] = &hfp_significance_exception;
- pgm_check_table[0x0F] = &hfp_divide_exception;
- pgm_check_table[0x10] = &do_dat_exception;
- pgm_check_table[0x11] = &do_dat_exception;
- pgm_check_table[0x12] = &translation_exception;
- pgm_check_table[0x13] = &special_op_exception;
-#ifdef CONFIG_64BIT
- pgm_check_table[0x18] = &transaction_exception;
- pgm_check_table[0x38] = &do_asce_exception;
- pgm_check_table[0x39] = &do_dat_exception;
- pgm_check_table[0x3A] = &do_dat_exception;
- pgm_check_table[0x3B] = &do_dat_exception;
-#endif /* CONFIG_64BIT */
- pgm_check_table[0x15] = &operand_exception;
- pgm_check_table[0x1C] = &space_switch_exception;
- pgm_check_table[0x1D] = &hfp_sqrt_exception;
- /* Enable machine checks early. */
local_mcck_enable();
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index ff1e2f8ef94..c30615e605a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -629,10 +629,27 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
break;
case KVM_S390_SIGP_STOP:
case KVM_S390_RESTART:
+ VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
+ inti->type = s390int->type;
+ break;
case KVM_S390_INT_EXTERNAL_CALL:
+ if (s390int->parm & 0xffff0000) {
+ kfree(inti);
+ return -EINVAL;
+ }
+ VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
+ s390int->parm);
+ inti->type = s390int->type;
+ inti->extcall.code = s390int->parm;
+ break;
case KVM_S390_INT_EMERGENCY:
- VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
+ if (s390int->parm & 0xffff0000) {
+ kfree(inti);
+ return -EINVAL;
+ }
+ VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
inti->type = s390int->type;
+ inti->emerg.code = s390int->parm;
break;
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d91a9556800..c9011bfaabb 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -355,6 +355,11 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
}
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
@@ -993,7 +998,7 @@ static int __init kvm_s390_init(void)
}
memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
facilities[0] &= 0xff00fff3f47c0000ULL;
- facilities[1] &= 0x201c000000000000ULL;
+ facilities[1] &= 0x001c000000000000ULL;
return 0;
}
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 1bea6d1f55a..640bea12303 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,9 +2,9 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
- page-states.o gup.o extable.o
-obj-$(CONFIG_CMM) += cmm.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
-obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
+obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
+obj-y += page-states.o gup.o extable.o pageattr.o
+
+obj-$(CONFIG_CMM) += cmm.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index cbc6668acb8..04e4892247d 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -150,6 +150,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pgd_t *pgd, unsigned long addr)
{
+ unsigned int prot;
pud_t *pud;
int i;
@@ -157,7 +158,11 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
st->current_address = addr;
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
- walk_pmd_level(m, st, pud, addr);
+ if (pud_large(*pud)) {
+ prot = pud_val(*pud) & _PAGE_RO;
+ note_page(m, st, prot, 2);
+ } else
+ walk_pmd_level(m, st, pud, addr);
else
note_page(m, st, _PAGE_INVALID, 2);
addr += PUD_SIZE;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 04ad4001a28..42601d6e166 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -49,15 +49,19 @@
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
-#define VM_FAULT_SIGNAL 0x080000
+#define VM_FAULT_SIGNAL 0x080000
-static unsigned long store_indication;
+static unsigned long store_indication __read_mostly;
-void fault_init(void)
+#ifdef CONFIG_64BIT
+static int __init fault_init(void)
{
- if (test_facility(2) && test_facility(75))
+ if (test_facility(75))
store_indication = 0xc00;
+ return 0;
}
+early_initcall(fault_init);
+#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -273,10 +277,16 @@ static inline int do_exception(struct pt_regs *regs, int access)
unsigned int flags;
int fault;
+ tsk = current;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
+
if (notify_page_fault(regs))
return 0;
- tsk = current;
mm = tsk->mm;
trans_exc_code = regs->int_parm_long;
@@ -372,11 +382,6 @@ retry:
goto retry;
}
}
- /*
- * The instruction that caused the program check will
- * be repeated. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -423,6 +428,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
struct vm_area_struct *vma;
unsigned long trans_exc_code;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_tsk_thread_flag(current, TIF_PER_TRAP);
+
trans_exc_code = regs->int_parm_long;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81e596c65de..ae672f41c46 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -125,7 +125,6 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
- fault_init();
}
void __init mem_init(void)
@@ -159,34 +158,6 @@ void __init mem_init(void)
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- int i;
-
- for (i = 0; i < numpages; i++) {
- address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- __ptep_ipte(address, pte);
- pte_val(*pte) = _PAGE_TYPE_EMPTY;
- continue;
- }
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
- /* Flush cpu write queue. */
- mb();
- }
-}
-#endif
-
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr = begin;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 00be01c4b4f..29ccee3651f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -2,11 +2,46 @@
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
+#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/hugetlb.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
+#include <asm/page.h>
+
+void storage_key_init_range(unsigned long start, unsigned long end)
+{
+ unsigned long boundary, function, size;
+
+ while (start < end) {
+ if (MACHINE_HAS_EDAT2) {
+ /* set storage keys for a 2GB frame */
+ function = 0x22000 | PAGE_DEFAULT_KEY;
+ size = 1UL << 31;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = pfmf(function, start);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ if (MACHINE_HAS_EDAT1) {
+ /* set storage keys for a 1MB frame */
+ function = 0x21000 | PAGE_DEFAULT_KEY;
+ size = 1UL << 20;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = pfmf(function, start);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ start += PAGE_SIZE;
+ }
+}
static pte_t *walk_page_table(unsigned long addr)
{
@@ -19,7 +54,7 @@ static pte_t *walk_page_table(unsigned long addr)
if (pgd_none(*pgdp))
return NULL;
pudp = pud_offset(pgdp, addr);
- if (pud_none(*pudp))
+ if (pud_none(*pudp) || pud_large(*pudp))
return NULL;
pmdp = pmd_offset(pudp, addr);
if (pmd_none(*pmdp) || pmd_large(*pmdp))
@@ -70,3 +105,46 @@ int set_memory_x(unsigned long addr, int numpages)
{
return 0;
}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long address;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ address = page_to_phys(page + i);
+ pgd = pgd_offset_k(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ pte = pte_offset_kernel(pmd, address);
+ if (!enable) {
+ __ptep_ipte(address, pte);
+ pte_val(*pte) = _PAGE_TYPE_EMPTY;
+ continue;
+ }
+ *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
+ }
+}
+
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ int cc;
+
+ addr = page_to_phys(page);
+ asm volatile(
+ " lra %1,0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (cc), "+a" (addr) : : "cc");
+ return cc == 0;
+}
+#endif /* CONFIG_HIBERNATION */
+
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c8188a18af0..ae44d2a3431 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -881,22 +881,6 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
-#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
-bool kernel_page_present(struct page *page)
-{
- unsigned long addr;
- int cc;
-
- addr = page_to_phys(page);
- asm volatile(
- " lra %1,0(%1)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (cc), "+a" (addr) : : "cc");
- return cc == 0;
-}
-#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 387c7c60b5b..6ed1426d27c 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -89,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
int ret = -ENOMEM;
while (address < end) {
+ pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -96,18 +97,24 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
goto out;
pgd_populate(&init_mm, pg_dir, pu_dir);
}
-
pu_dir = pud_offset(pg_dir, address);
+#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+ if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
+ !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
+ pte_val(pte) |= _REGION3_ENTRY_LARGE;
+ pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
+ pud_val(*pu_dir) = pte_val(pte);
+ address += PUD_SIZE;
+ continue;
+ }
+#endif
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate(&init_mm, pu_dir, pm_dir);
}
-
- pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
-
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
@@ -160,6 +167,11 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
address += PUD_SIZE;
continue;
}
+ if (pud_large(*pu_dir)) {
+ pud_clear(pu_dir);
+ address += PUD_SIZE;
+ continue;
+ }
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
address += PMD_SIZE;
@@ -193,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
start_addr = (unsigned long) start;
end_addr = (unsigned long) (start + nr);
- for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
+ for (address = start_addr; address < end_addr;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -212,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
+#ifdef CONFIG_64BIT
+ /* Use 1MB frames for vmemmap if available. We always
+ * use large frames even if they are only partially
+ * used.
+ * Otherwise we would have also page tables since
+ * vmemmap_populate gets called for each section
+ * separately. */
+ if (MACHINE_HAS_EDAT1) {
+ void *new_page;
+
+ new_page = vmemmap_alloc_block(PMD_SIZE, node);
+ if (!new_page)
+ goto out;
+ pte = mk_pte_phys(__pa(new_page), PAGE_RW);
+ pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+ pmd_val(*pm_dir) = pte_val(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+#endif
pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
+ } else if (pmd_large(*pm_dir)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
@@ -228,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte;
}
+ address += PAGE_SIZE;
}
memset(start, 0, nr * sizeof(struct page));
ret = 0;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9b355b406af..bb284419b0f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -341,6 +341,27 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
+ case BPF_S_ALU_MOD_X: /* A %= X */
+ jit->seen |= SEEN_XREG | SEEN_RET0;
+ /* ltr %r12,%r12 */
+ EMIT2(0x12cc);
+ /* jz <ret0> */
+ EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* dr %r4,%r12 */
+ EMIT2(0x1d4c);
+ /* lr %r5,%r4 */
+ EMIT2(0x1854);
+ break;
+ case BPF_S_ALU_MOD_K: /* A %= K */
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* d %r4,<d(K)>(%r13) */
+ EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
+ /* lr %r5,%r4 */
+ EMIT2(0x1854);
+ break;
case BPF_S_ALU_AND_X: /* A &= X */
jit->seen |= SEEN_XREG;
/* nr %r5,%r12 */
@@ -368,10 +389,17 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_DISP(0x5650d000, EMIT_CONST(K));
break;
case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+ case BPF_S_ALU_XOR_X:
jit->seen |= SEEN_XREG;
/* xr %r5,%r12 */
EMIT2(0x175c);
break;
+ case BPF_S_ALU_XOR_K: /* A ^= K */
+ if (!K)
+ break;
+ /* x %r5,<d(K)>(%r13) */
+ EMIT4_DISP(0x5750d000, EMIT_CONST(K));
+ break;
case BPF_S_ALU_LSH_X: /* A <<= X; */
jit->seen |= SEEN_XREG;
/* sll %r5,0(%r12) */
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
new file mode 100644
index 00000000000..f0f426a113c
--- /dev/null
+++ b/arch/s390/pci/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the s390 PCI subsystem.
+#
+
+obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
+ pci_sysfs.o pci_event.o pci_debug.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
new file mode 100644
index 00000000000..8fa416b8775
--- /dev/null
+++ b/arch/s390/pci/pci.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * The System z PCI code is a rewrite from a prototype by
+ * the following people (Kudoz!):
+ * Alexander Schmidt
+ * Christoph Raisch
+ * Hannes Hering
+ * Hoang-Nam Nguyen
+ * Jan-Bernd Themann
+ * Stefan Roscher
+ * Thomas Klein
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+
+#include <asm/isc.h>
+#include <asm/airq.h>
+#include <asm/facility.h>
+#include <asm/pci_insn.h>
+#include <asm/pci_clp.h>
+#include <asm/pci_dma.h>
+
+#define DEBUG /* enable pr_debug */
+
+#define SIC_IRQ_MODE_ALL 0
+#define SIC_IRQ_MODE_SINGLE 1
+
+#define ZPCI_NR_DMA_SPACES 1
+#define ZPCI_MSI_VEC_BITS 6
+#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
+
+/* list of all detected zpci devices */
+LIST_HEAD(zpci_list);
+EXPORT_SYMBOL_GPL(zpci_list);
+DEFINE_MUTEX(zpci_list_lock);
+EXPORT_SYMBOL_GPL(zpci_list_lock);
+
+struct pci_hp_callback_ops hotplug_ops;
+EXPORT_SYMBOL_GPL(hotplug_ops);
+
+static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
+static DEFINE_SPINLOCK(zpci_domain_lock);
+
+struct callback {
+ irq_handler_t handler;
+ void *data;
+};
+
+struct zdev_irq_map {
+ unsigned long aibv; /* AI bit vector */
+ int msi_vecs; /* consecutive MSI-vectors used */
+ int __unused;
+ struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
+ spinlock_t lock; /* protect callbacks against de-reg */
+};
+
+struct intr_bucket {
+ /* amap of adapters, one bit per dev, corresponds to one irq nr */
+ unsigned long *alloc;
+ /* AI summary bit, global page for all devices */
+ unsigned long *aisb;
+ /* pointer to aibv and callback data in zdev */
+ struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
+ /* protects the whole bucket struct */
+ spinlock_t lock;
+};
+
+static struct intr_bucket *bucket;
+
+/* Adapter local summary indicator */
+static u8 *zpci_irq_si;
+
+static atomic_t irq_retries = ATOMIC_INIT(0);
+
+/* I/O Map */
+static DEFINE_SPINLOCK(zpci_iomap_lock);
+static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
+struct zpci_iomap_entry *zpci_iomap_start;
+EXPORT_SYMBOL_GPL(zpci_iomap_start);
+
+/* highest irq summary bit */
+static int __read_mostly aisb_max;
+
+static struct kmem_cache *zdev_irq_cache;
+static struct kmem_cache *zdev_fmb_cache;
+
+debug_info_t *pci_debug_msg_id;
+debug_info_t *pci_debug_err_id;
+
+static inline int irq_to_msi_nr(unsigned int irq)
+{
+ return irq & ZPCI_MSI_MASK;
+}
+
+static inline int irq_to_dev_nr(unsigned int irq)
+{
+ return irq >> ZPCI_MSI_VEC_BITS;
+}
+
+static inline struct zdev_irq_map *get_imap(unsigned int irq)
+{
+ return bucket->imap[irq_to_dev_nr(irq)];
+}
+
+struct zpci_dev *get_zdev(struct pci_dev *pdev)
+{
+ return (struct zpci_dev *) pdev->sysdata;
+}
+
+struct zpci_dev *get_zdev_by_fid(u32 fid)
+{
+ struct zpci_dev *tmp, *zdev = NULL;
+
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(tmp, &zpci_list, entry) {
+ if (tmp->fid == fid) {
+ zdev = tmp;
+ break;
+ }
+ }
+ mutex_unlock(&zpci_list_lock);
+ return zdev;
+}
+
+bool zpci_fid_present(u32 fid)
+{
+ return (get_zdev_by_fid(fid) != NULL) ? true : false;
+}
+
+static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
+{
+ return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
+}
+
+int pci_domain_nr(struct pci_bus *bus)
+{
+ return ((struct zpci_dev *) bus->sysdata)->domain;
+}
+EXPORT_SYMBOL_GPL(pci_domain_nr);
+
+int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+EXPORT_SYMBOL_GPL(pci_proc_domain);
+
+/* Store PCI function information block */
+static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
+{
+ struct zpci_fib *fib;
+ u8 status, cc;
+
+ fib = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!fib)
+ return -ENOMEM;
+
+ do {
+ cc = __stpcifc(zdev->fh, 0, fib, &status);
+ if (cc == 2) {
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ memset(fib, 0, PAGE_SIZE);
+ }
+ } while (cc == 2);
+
+ if (cc)
+ pr_err_once("%s: cc: %u status: %u\n",
+ __func__, cc, status);
+
+ /* Return PCI function controls */
+ *fc = fib->fc;
+
+ free_page((unsigned long) fib);
+ return (cc) ? -EIO : 0;
+}
+
+/* Modify PCI: Register adapter interruptions */
+static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
+ u64 aibv)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
+ struct zpci_fib *fib;
+ int rc;
+
+ fib = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!fib)
+ return -ENOMEM;
+
+ fib->isc = PCI_ISC;
+ fib->noi = zdev->irq_map->msi_vecs;
+ fib->sum = 1; /* enable summary notifications */
+ fib->aibv = aibv;
+ fib->aibvo = 0; /* every function has its own page */
+ fib->aisb = (u64) bucket->aisb + aisb / 8;
+ fib->aisbo = aisb & ZPCI_MSI_MASK;
+
+ rc = mpcifc_instr(req, fib);
+ pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
+
+ free_page((unsigned long) fib);
+ return rc;
+}
+
+struct mod_pci_args {
+ u64 base;
+ u64 limit;
+ u64 iota;
+ u64 fmb_addr;
+};
+
+static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
+ struct zpci_fib *fib;
+ int rc;
+
+ /* The FIB must be available even if it's not used */
+ fib = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!fib)
+ return -ENOMEM;
+
+ fib->pba = args->base;
+ fib->pal = args->limit;
+ fib->iota = args->iota;
+ fib->fmb_addr = args->fmb_addr;
+
+ rc = mpcifc_instr(req, fib);
+ free_page((unsigned long) fib);
+ return rc;
+}
+
+/* Modify PCI: Register I/O address translation parameters */
+int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
+ u64 base, u64 limit, u64 iota)
+{
+ struct mod_pci_args args = { base, limit, iota, 0 };
+
+ WARN_ON_ONCE(iota & 0x3fff);
+ args.iota |= ZPCI_IOTA_RTTO_FLAG;
+ return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
+}
+
+/* Modify PCI: Unregister I/O address translation parameters */
+int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
+{
+ struct mod_pci_args args = { 0, 0, 0, 0 };
+
+ return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
+}
+
+/* Modify PCI: Unregister adapter interruptions */
+static int zpci_unregister_airq(struct zpci_dev *zdev)
+{
+ struct mod_pci_args args = { 0, 0, 0, 0 };
+
+ return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
+}
+
+/* Modify PCI: Set PCI function measurement parameters */
+int zpci_fmb_enable_device(struct zpci_dev *zdev)
+{
+ struct mod_pci_args args = { 0, 0, 0, 0 };
+
+ if (zdev->fmb)
+ return -EINVAL;
+
+ zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL);
+ if (!zdev->fmb)
+ return -ENOMEM;
+ memset(zdev->fmb, 0, sizeof(*zdev->fmb));
+ WARN_ON((u64) zdev->fmb & 0xf);
+
+ args.fmb_addr = virt_to_phys(zdev->fmb);
+ return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
+}
+
+/* Modify PCI: Disable PCI function measurement */
+int zpci_fmb_disable_device(struct zpci_dev *zdev)
+{
+ struct mod_pci_args args = { 0, 0, 0, 0 };
+ int rc;
+
+ if (!zdev->fmb)
+ return -EINVAL;
+
+ /* Function measurement is disabled if fmb address is zero */
+ rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
+
+ kmem_cache_free(zdev_fmb_cache, zdev->fmb);
+ zdev->fmb = NULL;
+ return rc;
+}
+
+#define ZPCI_PCIAS_CFGSPC 15
+
+static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
+ u64 data;
+ int rc;
+
+ rc = pcilg_instr(&data, req, offset);
+ data = data << ((8 - len) * 8);
+ data = le64_to_cpu(data);
+ if (!rc)
+ *val = (u32) data;
+ else
+ *val = 0xffffffff;
+ return rc;
+}
+
+static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
+ u64 data = val;
+ int rc;
+
+ data = cpu_to_le64(data);
+ data = data >> ((8 - len) * 8);
+ rc = pcistg_instr(data, req, offset);
+ return rc;
+}
+
+void synchronize_irq(unsigned int irq)
+{
+ /*
+ * Not needed, the handler is protected by a lock and IRQs that occur
+ * after the handler is deleted are just NOPs.
+ */
+}
+EXPORT_SYMBOL_GPL(synchronize_irq);
+
+void enable_irq(unsigned int irq)
+{
+ struct msi_desc *msi = irq_get_msi_desc(irq);
+
+ zpci_msi_set_mask_bits(msi, 1, 0);
+}
+EXPORT_SYMBOL_GPL(enable_irq);
+
+void disable_irq(unsigned int irq)
+{
+ struct msi_desc *msi = irq_get_msi_desc(irq);
+
+ zpci_msi_set_mask_bits(msi, 1, 1);
+}
+EXPORT_SYMBOL_GPL(disable_irq);
+
+void disable_irq_nosync(unsigned int irq)
+{
+ disable_irq(irq);
+}
+EXPORT_SYMBOL_GPL(disable_irq_nosync);
+
+unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_on);
+
+int probe_irq_off(unsigned long val)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_irq_off);
+
+unsigned int probe_irq_mask(unsigned long val)
+{
+ return val;
+}
+EXPORT_SYMBOL_GPL(probe_irq_mask);
+
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+}
+
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size,
+ resource_size_t align)
+{
+ return 0;
+}
+
+/* combine single writes by using store-block insn */
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+{
+ zpci_memcpy_toio(to, from, count);
+}
+
+/* Create a virtual mapping cookie for a PCI BAR */
+void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+ u64 addr;
+ int idx;
+
+ if ((bar & 7) != bar)
+ return NULL;
+
+ idx = zdev->bars[bar].map_idx;
+ spin_lock(&zpci_iomap_lock);
+ zpci_iomap_start[idx].fh = zdev->fh;
+ zpci_iomap_start[idx].bar = bar;
+ spin_unlock(&zpci_iomap_lock);
+
+ addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
+ return (void __iomem *) addr;
+}
+EXPORT_SYMBOL_GPL(pci_iomap);
+
+void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ unsigned int idx;
+
+ idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
+ spin_lock(&zpci_iomap_lock);
+ zpci_iomap_start[idx].fh = 0;
+ zpci_iomap_start[idx].bar = 0;
+ spin_unlock(&zpci_iomap_lock);
+}
+EXPORT_SYMBOL_GPL(pci_iounmap);
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ struct zpci_dev *zdev = get_zdev_by_bus(bus);
+
+ if (!zdev || devfn != ZPCI_DEVFN)
+ return 0;
+ return zpci_cfg_load(zdev, where, val, size);
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ struct zpci_dev *zdev = get_zdev_by_bus(bus);
+
+ if (!zdev || devfn != ZPCI_DEVFN)
+ return 0;
+ return zpci_cfg_store(zdev, where, val, size);
+}
+
+static struct pci_ops pci_root_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+/* store the last handled bit to implement fair scheduling of devices */
+static DEFINE_PER_CPU(unsigned long, next_sbit);
+
+static void zpci_irq_handler(void *dont, void *need)
+{
+ unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
+ int rescan = 0, max = aisb_max;
+ struct zdev_irq_map *imap;
+
+ kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
+ sbit = start;
+
+scan:
+ /* find summary_bit */
+ for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
+ clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
+ last = sbit;
+
+ /* find vector bit */
+ imap = bucket->imap[sbit];
+ for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
+ kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
+ clear_bit(63 - mbit, &imap->aibv);
+
+ spin_lock(&imap->lock);
+ if (imap->cb[mbit].handler)
+ imap->cb[mbit].handler(mbit,
+ imap->cb[mbit].data);
+ spin_unlock(&imap->lock);
+ }
+ }
+
+ if (rescan)
+ goto out;
+
+ /* scan the skipped bits */
+ if (start > 0) {
+ sbit = 0;
+ max = start;
+ start = 0;
+ goto scan;
+ }
+
+ /* enable interrupts again */
+ sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+
+ /* check again to not lose initiative */
+ rmb();
+ max = aisb_max;
+ sbit = find_first_bit_left(bucket->aisb, max);
+ if (sbit != max) {
+ atomic_inc(&irq_retries);
+ rescan++;
+ goto scan;
+ }
+out:
+ /* store next device bit to scan */
+ __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
+}
+
+/* msi_vecs - number of requested interrupts, 0 place function to error state */
+static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+ unsigned int aisb, msi_nr;
+ struct msi_desc *msi;
+ int rc;
+
+ /* store the number of used MSI vectors */
+ zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
+
+ spin_lock(&bucket->lock);
+ aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
+ /* alloc map exhausted? */
+ if (aisb == PAGE_SIZE) {
+ spin_unlock(&bucket->lock);
+ return -EIO;
+ }
+ set_bit(aisb, bucket->alloc);
+ spin_unlock(&bucket->lock);
+
+ zdev->aisb = aisb;
+ if (aisb + 1 > aisb_max)
+ aisb_max = aisb + 1;
+
+ /* wire up IRQ shortcut pointer */
+ bucket->imap[zdev->aisb] = zdev->irq_map;
+ pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
+
+ /* TODO: irq number 0 wont be found if we return less than requested MSIs.
+ * ignore it for now and fix in common code.
+ */
+ msi_nr = aisb << ZPCI_MSI_VEC_BITS;
+
+ list_for_each_entry(msi, &pdev->msi_list, list) {
+ rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
+ aisb << ZPCI_MSI_VEC_BITS);
+ if (rc)
+ return rc;
+ msi_nr++;
+ }
+
+ rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
+ if (rc) {
+ clear_bit(aisb, bucket->alloc);
+ dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
+ return rc;
+ }
+ return (zdev->irq_map->msi_vecs == msi_vecs) ?
+ 0 : zdev->irq_map->msi_vecs;
+}
+
+static void zpci_teardown_msi(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+ struct msi_desc *msi;
+ int aisb, rc;
+
+ rc = zpci_unregister_airq(zdev);
+ if (rc) {
+ dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
+ return;
+ }
+
+ msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
+ aisb = irq_to_dev_nr(msi->irq);
+
+ list_for_each_entry(msi, &pdev->msi_list, list)
+ zpci_teardown_msi_irq(zdev, msi);
+
+ clear_bit(aisb, bucket->alloc);
+ if (aisb + 1 == aisb_max)
+ aisb_max--;
+}
+
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
+ if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
+ return -EINVAL;
+ return zpci_setup_msi(pdev, nvec);
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ pr_info("%s: on pdev: %p\n", __func__, pdev);
+ zpci_teardown_msi(pdev);
+}
+
+static void zpci_map_resources(struct zpci_dev *zdev)
+{
+ struct pci_dev *pdev = zdev->pdev;
+ resource_size_t len;
+ int i;
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ continue;
+ pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
+ pdev->resource[i].end = pdev->resource[i].start + len - 1;
+ pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
+ i, pdev->resource[i].start, pdev->resource[i].end);
+ }
+};
+
+static void zpci_unmap_resources(struct pci_dev *pdev)
+{
+ resource_size_t len;
+ int i;
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ continue;
+ pci_iounmap(pdev, (void *) pdev->resource[i].start);
+ }
+};
+
+struct zpci_dev *zpci_alloc_device(void)
+{
+ struct zpci_dev *zdev;
+
+ /* Alloc memory for our private pci device data */
+ zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return ERR_PTR(-ENOMEM);
+
+ /* Alloc aibv & callback space */
+ zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
+ if (!zdev->irq_map)
+ goto error;
+ WARN_ON((u64) zdev->irq_map & 0xff);
+ return zdev;
+
+error:
+ kfree(zdev);
+ return ERR_PTR(-ENOMEM);
+}
+
+void zpci_free_device(struct zpci_dev *zdev)
+{
+ kmem_cache_free(zdev_irq_cache, zdev->irq_map);
+ kfree(zdev);
+}
+
+/* Called on removal of pci_dev, leaves zpci and bus device */
+static void zpci_remove_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+
+ dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zpci_dma_exit_device(zdev);
+ zpci_fmb_disable_device(zdev);
+ zpci_sysfs_remove_device(&pdev->dev);
+ zpci_unmap_resources(pdev);
+ list_del(&zdev->entry); /* can be called from init */
+ zdev->pdev = NULL;
+}
+
+static void zpci_scan_devices(void)
+{
+ struct zpci_dev *zdev;
+
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(zdev, &zpci_list, entry)
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
+ zpci_scan_device(zdev);
+ mutex_unlock(&zpci_list_lock);
+}
+
+/*
+ * Too late for any s390 specific setup, since interrupts must be set up
+ * already which requires DMA setup too and the pci scan will access the
+ * config space, which only works if the function handle is enabled.
+ */
+int pcibios_enable_device(struct pci_dev *pdev, int mask)
+{
+ struct resource *res;
+ u16 cmd;
+ int i;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ res = &pdev->resource[i];
+
+ if (res->flags & IORESOURCE_IO)
+ return -EINVAL;
+
+ if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ return 0;
+}
+
+void pcibios_disable_device(struct pci_dev *pdev)
+{
+ zpci_remove_device(pdev);
+ pdev->sysdata = NULL;
+}
+
+int pcibios_add_platform_entries(struct pci_dev *pdev)
+{
+ return zpci_sysfs_add_device(&pdev->dev);
+}
+
+int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
+{
+ int msi_nr = irq_to_msi_nr(irq);
+ struct zdev_irq_map *imap;
+ struct msi_desc *msi;
+
+ msi = irq_get_msi_desc(irq);
+ if (!msi)
+ return -EIO;
+
+ imap = get_imap(irq);
+ spin_lock_init(&imap->lock);
+
+ pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
+ imap->cb[msi_nr].handler = handler;
+ imap->cb[msi_nr].data = data;
+
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ zpci_msi_set_mask_bits(msi, 1, 0);
+ return 0;
+}
+
+void zpci_free_irq(unsigned int irq)
+{
+ struct zdev_irq_map *imap = get_imap(irq);
+ int msi_nr = irq_to_msi_nr(irq);
+ unsigned long flags;
+
+ pr_debug("%s: for irq: %d\n", __func__, irq);
+
+ spin_lock_irqsave(&imap->lock, flags);
+ imap->cb[msi_nr].handler = NULL;
+ imap->cb[msi_nr].data = NULL;
+ spin_unlock_irqrestore(&imap->lock, flags);
+}
+
+int request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
+ __func__, irq, handler, irqflags, devname);
+
+ return zpci_request_irq(irq, handler, dev_id);
+}
+EXPORT_SYMBOL_GPL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ zpci_free_irq(irq);
+}
+EXPORT_SYMBOL_GPL(free_irq);
+
+static int __init zpci_irq_init(void)
+{
+ int cpu, rc;
+
+ bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
+ if (!bucket)
+ return -ENOMEM;
+
+ bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!bucket->aisb) {
+ rc = -ENOMEM;
+ goto out_aisb;
+ }
+
+ bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!bucket->alloc) {
+ rc = -ENOMEM;
+ goto out_alloc;
+ }
+
+ isc_register(PCI_ISC);
+ zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
+ if (IS_ERR(zpci_irq_si)) {
+ rc = PTR_ERR(zpci_irq_si);
+ zpci_irq_si = NULL;
+ goto out_ai;
+ }
+
+ for_each_online_cpu(cpu)
+ per_cpu(next_sbit, cpu) = 0;
+
+ spin_lock_init(&bucket->lock);
+ /* set summary to 1 to be called every time for the ISC */
+ *zpci_irq_si = 1;
+ sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ return 0;
+
+out_ai:
+ isc_unregister(PCI_ISC);
+ free_page((unsigned long) bucket->alloc);
+out_alloc:
+ free_page((unsigned long) bucket->aisb);
+out_aisb:
+ kfree(bucket);
+ return rc;
+}
+
+static void zpci_irq_exit(void)
+{
+ free_page((unsigned long) bucket->alloc);
+ free_page((unsigned long) bucket->aisb);
+ s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
+ isc_unregister(PCI_ISC);
+ kfree(bucket);
+}
+
+void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m)
+{
+ if (!zdev)
+ return;
+
+ seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries));
+ seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n",
+ get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb);
+}
+
+static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
+ unsigned long flags, int domain)
+{
+ struct resource *r;
+ char *name;
+ int rc;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return ERR_PTR(-ENOMEM);
+ r->start = start;
+ r->end = r->start + size - 1;
+ r->flags = flags;
+ r->parent = &iomem_resource;
+ name = kmalloc(18, GFP_KERNEL);
+ if (!name) {
+ kfree(r);
+ return ERR_PTR(-ENOMEM);
+ }
+ sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
+ r->name = name;
+
+ rc = request_resource(&iomem_resource, r);
+ if (rc)
+ pr_debug("request resource %pR failed\n", r);
+ return r;
+}
+
+static int zpci_alloc_iomap(struct zpci_dev *zdev)
+{
+ int entry;
+
+ spin_lock(&zpci_iomap_lock);
+ entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
+ if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
+ spin_unlock(&zpci_iomap_lock);
+ return -ENOSPC;
+ }
+ set_bit(entry, zpci_iomap);
+ spin_unlock(&zpci_iomap_lock);
+ return entry;
+}
+
+static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
+{
+ spin_lock(&zpci_iomap_lock);
+ memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
+ clear_bit(entry, zpci_iomap);
+ spin_unlock(&zpci_iomap_lock);
+}
+
+static int zpci_create_device_bus(struct zpci_dev *zdev)
+{
+ struct resource *res;
+ LIST_HEAD(resources);
+ int i;
+
+ /* allocate mapping entry for each used bar */
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ unsigned long addr, size, flags;
+ int entry;
+
+ if (!zdev->bars[i].size)
+ continue;
+ entry = zpci_alloc_iomap(zdev);
+ if (entry < 0)
+ return entry;
+ zdev->bars[i].map_idx = entry;
+
+ /* only MMIO is supported */
+ flags = IORESOURCE_MEM;
+ if (zdev->bars[i].val & 8)
+ flags |= IORESOURCE_PREFETCH;
+ if (zdev->bars[i].val & 4)
+ flags |= IORESOURCE_MEM_64;
+
+ addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
+
+ size = 1UL << zdev->bars[i].size;
+
+ res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
+ if (IS_ERR(res)) {
+ zpci_free_iomap(zdev, entry);
+ return PTR_ERR(res);
+ }
+ pci_add_resource(&resources, res);
+ }
+
+ zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
+ zdev, &resources);
+ if (!zdev->bus)
+ return -EIO;
+
+ zdev->bus->max_bus_speed = zdev->max_bus_speed;
+ return 0;
+}
+
+static int zpci_alloc_domain(struct zpci_dev *zdev)
+{
+ spin_lock(&zpci_domain_lock);
+ zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
+ if (zdev->domain == ZPCI_NR_DEVICES) {
+ spin_unlock(&zpci_domain_lock);
+ return -ENOSPC;
+ }
+ set_bit(zdev->domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+ return 0;
+}
+
+static void zpci_free_domain(struct zpci_dev *zdev)
+{
+ spin_lock(&zpci_domain_lock);
+ clear_bit(zdev->domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+}
+
+int zpci_enable_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
+ if (rc)
+ goto out;
+ pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
+
+ rc = zpci_dma_init_device(zdev);
+ if (rc)
+ goto out_dma;
+ return 0;
+
+out_dma:
+ clp_disable_fh(zdev);
+out:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(zpci_enable_device);
+
+int zpci_create_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ rc = zpci_alloc_domain(zdev);
+ if (rc)
+ goto out;
+
+ rc = zpci_create_device_bus(zdev);
+ if (rc)
+ goto out_bus;
+
+ mutex_lock(&zpci_list_lock);
+ list_add_tail(&zdev->entry, &zpci_list);
+ if (hotplug_ops.create_slot)
+ hotplug_ops.create_slot(zdev);
+ mutex_unlock(&zpci_list_lock);
+
+ if (zdev->state == ZPCI_FN_STATE_STANDBY)
+ return 0;
+
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ goto out_start;
+ return 0;
+
+out_start:
+ mutex_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ if (hotplug_ops.remove_slot)
+ hotplug_ops.remove_slot(zdev);
+ mutex_unlock(&zpci_list_lock);
+out_bus:
+ zpci_free_domain(zdev);
+out:
+ return rc;
+}
+
+void zpci_stop_device(struct zpci_dev *zdev)
+{
+ zpci_dma_exit_device(zdev);
+ /*
+ * Note: SCLP disables fh via set-pci-fn so don't
+ * do that here.
+ */
+}
+EXPORT_SYMBOL_GPL(zpci_stop_device);
+
+int zpci_scan_device(struct zpci_dev *zdev)
+{
+ zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
+ if (!zdev->pdev) {
+ pr_err("pci_scan_single_device failed for fid: 0x%x\n",
+ zdev->fid);
+ goto out;
+ }
+
+ zpci_debug_init_device(zdev);
+ zpci_fmb_enable_device(zdev);
+ zpci_map_resources(zdev);
+ pci_bus_add_devices(zdev->bus);
+
+ /* now that pdev was added to the bus mark it as used */
+ zdev->state = ZPCI_FN_STATE_ONLINE;
+ return 0;
+
+out:
+ zpci_dma_exit_device(zdev);
+ clp_disable_fh(zdev);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(zpci_scan_device);
+
+static inline int barsize(u8 size)
+{
+ return (size) ? (1 << size) >> 10 : 0;
+}
+
+static int zpci_mem_init(void)
+{
+ zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
+ L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
+ if (!zdev_irq_cache)
+ goto error_zdev;
+
+ zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
+ 16, 0, NULL);
+ if (!zdev_fmb_cache)
+ goto error_fmb;
+
+ /* TODO: use realloc */
+ zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
+ GFP_KERNEL);
+ if (!zpci_iomap_start)
+ goto error_iomap;
+ return 0;
+
+error_iomap:
+ kmem_cache_destroy(zdev_fmb_cache);
+error_fmb:
+ kmem_cache_destroy(zdev_irq_cache);
+error_zdev:
+ return -ENOMEM;
+}
+
+static void zpci_mem_exit(void)
+{
+ kfree(zpci_iomap_start);
+ kmem_cache_destroy(zdev_irq_cache);
+ kmem_cache_destroy(zdev_fmb_cache);
+}
+
+unsigned int pci_probe = 1;
+EXPORT_SYMBOL_GPL(pci_probe);
+
+char * __init pcibios_setup(char *str)
+{
+ if (!strcmp(str, "off")) {
+ pci_probe = 0;
+ return NULL;
+ }
+ return str;
+}
+
+static int __init pci_base_init(void)
+{
+ int rc;
+
+ if (!pci_probe)
+ return 0;
+
+ if (!test_facility(2) || !test_facility(69)
+ || !test_facility(71) || !test_facility(72))
+ return 0;
+
+ pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
+ test_facility(69), test_facility(70),
+ test_facility(71));
+
+ rc = zpci_debug_init();
+ if (rc)
+ return rc;
+
+ rc = zpci_mem_init();
+ if (rc)
+ goto out_mem;
+
+ rc = zpci_msihash_init();
+ if (rc)
+ goto out_hash;
+
+ rc = zpci_irq_init();
+ if (rc)
+ goto out_irq;
+
+ rc = zpci_dma_init();
+ if (rc)
+ goto out_dma;
+
+ rc = clp_find_pci_devices();
+ if (rc)
+ goto out_find;
+
+ zpci_scan_devices();
+ return 0;
+
+out_find:
+ zpci_dma_exit();
+out_dma:
+ zpci_irq_exit();
+out_irq:
+ zpci_msihash_exit();
+out_hash:
+ zpci_mem_exit();
+out_mem:
+ zpci_debug_exit();
+ return rc;
+}
+subsys_initcall(pci_base_init);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
new file mode 100644
index 00000000000..2c847143cbd
--- /dev/null
+++ b/arch/s390/pci/pci_clp.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/pci_clp.h>
+
+/*
+ * Call Logical Processor
+ * Retry logic is handled by the caller.
+ */
+static inline u8 clp_instr(void *req)
+{
+ u64 ilpm;
+ u8 cc;
+
+ asm volatile (
+ " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [ilpm] "=d" (ilpm)
+ : [req] "a" (req)
+ : "cc", "memory");
+ return cc;
+}
+
+static void *clp_alloc_block(void)
+{
+ struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
+ return (page) ? page_address(page) : NULL;
+}
+
+static void clp_free_block(void *ptr)
+{
+ free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
+}
+
+static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
+ struct clp_rsp_query_pci_grp *response)
+{
+ zdev->tlb_refresh = response->refresh;
+ zdev->dma_mask = response->dasm;
+ zdev->msi_addr = response->msia;
+ zdev->fmb_update = response->mui;
+
+ pr_debug("Supported number of MSI vectors: %u\n", response->noi);
+ switch (response->version) {
+ case 1:
+ zdev->max_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
+{
+ struct clp_req_rsp_query_pci_grp *rrb;
+ int rc;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.pfgid = pfgid;
+
+ rc = clp_instr(rrb);
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
+ clp_store_query_pci_fngrp(zdev, &rrb->response);
+ else {
+ pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+ clp_free_block(rrb);
+ return rc;
+}
+
+static int clp_store_query_pci_fn(struct zpci_dev *zdev,
+ struct clp_rsp_query_pci *response)
+{
+ int i;
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ zdev->bars[i].val = le32_to_cpu(response->bar[i]);
+ zdev->bars[i].size = response->bar_size[i];
+ }
+ zdev->start_dma = response->sdma;
+ zdev->end_dma = response->edma;
+ zdev->pchid = response->pchid;
+ zdev->pfgid = response->pfgid;
+ return 0;
+}
+
+static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
+{
+ struct clp_req_rsp_query_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.fh = fh;
+
+ rc = clp_instr(rrb);
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+ rc = clp_store_query_pci_fn(zdev, &rrb->response);
+ if (rc)
+ goto out;
+ if (rrb->response.pfgid)
+ rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
+ } else {
+ pr_err("Query PCI failed with response: %x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+out:
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_add_pci_device(u32 fid, u32 fh, int configured)
+{
+ struct zpci_dev *zdev;
+ int rc;
+
+ zdev = zpci_alloc_device();
+ if (IS_ERR(zdev))
+ return PTR_ERR(zdev);
+
+ zdev->fh = fh;
+ zdev->fid = fid;
+
+ /* Query function properties and update zdev */
+ rc = clp_query_pci_fn(zdev, fh);
+ if (rc)
+ goto error;
+
+ if (configured)
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ else
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+
+ rc = zpci_create_device(zdev);
+ if (rc)
+ goto error;
+ return 0;
+
+error:
+ zpci_free_device(zdev);
+ return rc;
+}
+
+/*
+ * Enable/Disable a given PCI function defined by its function handle.
+ */
+static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
+{
+ struct clp_req_rsp_set_pci *rrb;
+ int rc, retries = 1000;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ do {
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_SET_PCI_FN;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.fh = *fh;
+ rrb->request.oc = command;
+ rrb->request.ndas = nr_dma_as;
+
+ rc = clp_instr(rrb);
+ if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
+ retries--;
+ if (retries < 0)
+ break;
+ msleep(1);
+ }
+ } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
+
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
+ *fh = rrb->response.fh;
+ else {
+ pr_err("Set PCI FN failed with response: %x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
+{
+ u32 fh = zdev->fh;
+ int rc;
+
+ rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
+ if (!rc)
+ /* Success -> store enabled handle in zdev */
+ zdev->fh = fh;
+ return rc;
+}
+
+int clp_disable_fh(struct zpci_dev *zdev)
+{
+ u32 fh = zdev->fh;
+ int rc;
+
+ if (!zdev_enabled(zdev))
+ return 0;
+
+ dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh);
+ rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
+ if (!rc)
+ /* Success -> store disabled handle in zdev */
+ zdev->fh = fh;
+ else
+ dev_err(&zdev->pdev->dev,
+ "Failed to disable fn handle: 0x%x\n", fh);
+ return rc;
+}
+
+static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry)
+{
+ int present, rc;
+
+ if (!entry->vendor_id)
+ return;
+
+ /* TODO: be a little bit more scalable */
+ present = zpci_fid_present(entry->fid);
+
+ if (present)
+ pr_debug("%s: device %x already present\n", __func__, entry->fid);
+
+ /* skip already used functions */
+ if (present && entry->config_state)
+ return;
+
+ /* aev 306: function moved to stand-by state */
+ if (present && !entry->config_state) {
+ /*
+ * The handle is already disabled, that means no iota/irq freeing via
+ * the firmware interfaces anymore. Need to free resources manually
+ * (DMA memory, debug, sysfs)...
+ */
+ zpci_stop_device(get_zdev_by_fid(entry->fid));
+ return;
+ }
+
+ rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
+ if (rc)
+ pr_err("Failed to add fid: 0x%x\n", entry->fid);
+}
+
+int clp_find_pci_devices(void)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ u64 resume_token = 0;
+ int entries, i, rc;
+
+ rrb = clp_alloc_block();
+ if (!rrb)
+ return -ENOMEM;
+
+ do {
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_LIST_PCI;
+ /* store as many entries as possible */
+ rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
+ rrb->request.resume_token = resume_token;
+
+ /* Get PCI function handle list */
+ rc = clp_instr(rrb);
+ if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+ pr_err("List PCI failed with response: 0x%x cc: %d\n",
+ rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ goto out;
+ }
+
+ WARN_ON_ONCE(rrb->response.entry_size !=
+ sizeof(struct clp_fh_list_entry));
+
+ entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
+ rrb->response.entry_size;
+ pr_info("Detected number of PCI functions: %u\n", entries);
+
+ /* Store the returned resume token as input for the next call */
+ resume_token = rrb->response.resume_token;
+
+ for (i = 0; i < entries; i++)
+ clp_check_pcifn_entry(&rrb->response.fh_list[i]);
+ } while (resume_token);
+
+ pr_debug("Maximum number of supported PCI functions: %u\n",
+ rrb->response.max_fn);
+out:
+ clp_free_block(rrb);
+ return rc;
+}
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
new file mode 100644
index 00000000000..a303c95346c
--- /dev/null
+++ b/arch/s390/pci/pci_debug.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <asm/debug.h>
+
+#include <asm/pci_dma.h>
+
+static struct dentry *debugfs_root;
+
+static char *pci_perf_names[] = {
+ /* hardware counters */
+ "Load operations",
+ "Store operations",
+ "Store block operations",
+ "Refresh operations",
+ "DMA read bytes",
+ "DMA write bytes",
+ /* software counters */
+ "Allocated pages",
+ "Mapped pages",
+ "Unmapped pages",
+};
+
+static int pci_perf_show(struct seq_file *m, void *v)
+{
+ struct zpci_dev *zdev = m->private;
+ u64 *stat;
+ int i;
+
+ if (!zdev)
+ return 0;
+ if (!zdev->fmb)
+ return seq_printf(m, "FMB statistics disabled\n");
+
+ /* header */
+ seq_printf(m, "FMB @ %p\n", zdev->fmb);
+ seq_printf(m, "Update interval: %u ms\n", zdev->fmb_update);
+ seq_printf(m, "Samples: %u\n", zdev->fmb->samples);
+ seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update);
+
+ /* hardware counters */
+ stat = (u64 *) &zdev->fmb->ld_ops;
+ for (i = 0; i < 4; i++)
+ seq_printf(m, "%26s:\t%llu\n",
+ pci_perf_names[i], *(stat + i));
+ if (zdev->fmb->dma_valid)
+ for (i = 4; i < 6; i++)
+ seq_printf(m, "%26s:\t%llu\n",
+ pci_perf_names[i], *(stat + i));
+ /* software counters */
+ for (i = 6; i < ARRAY_SIZE(pci_perf_names); i++)
+ seq_printf(m, "%26s:\t%llu\n",
+ pci_perf_names[i],
+ atomic64_read((atomic64_t *) (stat + i)));
+
+ return 0;
+}
+
+static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct zpci_dev *zdev = ((struct seq_file *) file->private_data)->private;
+ unsigned long val;
+ int rc;
+
+ if (!zdev)
+ return 0;
+
+ rc = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (rc)
+ return rc;
+
+ switch (val) {
+ case 0:
+ rc = zpci_fmb_disable_device(zdev);
+ if (rc)
+ return rc;
+ break;
+ case 1:
+ rc = zpci_fmb_enable_device(zdev);
+ if (rc)
+ return rc;
+ break;
+ }
+ return count;
+}
+
+static int pci_perf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, pci_perf_show,
+ filp->f_path.dentry->d_inode->i_private);
+}
+
+static const struct file_operations debugfs_pci_perf_fops = {
+ .open = pci_perf_seq_open,
+ .read = seq_read,
+ .write = pci_perf_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pci_debug_show(struct seq_file *m, void *v)
+{
+ struct zpci_dev *zdev = m->private;
+
+ zpci_debug_info(zdev, m);
+ return 0;
+}
+
+static int pci_debug_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, pci_debug_show,
+ filp->f_path.dentry->d_inode->i_private);
+}
+
+static const struct file_operations debugfs_pci_debug_fops = {
+ .open = pci_debug_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void zpci_debug_init_device(struct zpci_dev *zdev)
+{
+ zdev->debugfs_dev = debugfs_create_dir(dev_name(&zdev->pdev->dev),
+ debugfs_root);
+ if (IS_ERR(zdev->debugfs_dev))
+ zdev->debugfs_dev = NULL;
+
+ zdev->debugfs_perf = debugfs_create_file("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ zdev->debugfs_dev, zdev,
+ &debugfs_pci_perf_fops);
+ if (IS_ERR(zdev->debugfs_perf))
+ zdev->debugfs_perf = NULL;
+
+ zdev->debugfs_debug = debugfs_create_file("debug",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ zdev->debugfs_dev, zdev,
+ &debugfs_pci_debug_fops);
+ if (IS_ERR(zdev->debugfs_debug))
+ zdev->debugfs_debug = NULL;
+}
+
+void zpci_debug_exit_device(struct zpci_dev *zdev)
+{
+ debugfs_remove(zdev->debugfs_perf);
+ debugfs_remove(zdev->debugfs_debug);
+ debugfs_remove(zdev->debugfs_dev);
+}
+
+int __init zpci_debug_init(void)
+{
+ /* event trace buffer */
+ pci_debug_msg_id = debug_register("pci_msg", 16, 1, 16 * sizeof(long));
+ if (!pci_debug_msg_id)
+ return -EINVAL;
+ debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(pci_debug_msg_id, 3);
+ zpci_dbg("Debug view initialized\n");
+
+ /* error log */
+ pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
+ if (!pci_debug_err_id)
+ return -EINVAL;
+ debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
+ debug_set_level(pci_debug_err_id, 6);
+ zpci_err("Debug view initialized\n");
+
+ debugfs_root = debugfs_create_dir("pci", NULL);
+ return 0;
+}
+
+void zpci_debug_exit(void)
+{
+ if (pci_debug_msg_id)
+ debug_unregister(pci_debug_msg_id);
+ if (pci_debug_err_id)
+ debug_unregister(pci_debug_err_id);
+
+ debugfs_remove(debugfs_root);
+}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
new file mode 100644
index 00000000000..6138468b420
--- /dev/null
+++ b/arch/s390/pci/pci_dma.c
@@ -0,0 +1,512 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/iommu-helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <asm/pci_dma.h>
+
+static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
+
+static struct kmem_cache *dma_region_table_cache;
+static struct kmem_cache *dma_page_table_cache;
+
+static unsigned long *dma_alloc_cpu_table(void)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
+ *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
+ return table;
+}
+
+static void dma_free_cpu_table(void *table)
+{
+ kmem_cache_free(dma_region_table_cache, table);
+}
+
+static unsigned long *dma_alloc_page_table(void)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
+ *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
+ return table;
+}
+
+static void dma_free_page_table(void *table)
+{
+ kmem_cache_free(dma_page_table_cache, table);
+}
+
+static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
+{
+ unsigned long *sto;
+
+ if (reg_entry_isvalid(*entry))
+ sto = get_rt_sto(*entry);
+ else {
+ sto = dma_alloc_cpu_table();
+ if (!sto)
+ return NULL;
+
+ set_rt_sto(entry, sto);
+ validate_rt_entry(entry);
+ entry_clr_protected(entry);
+ }
+ return sto;
+}
+
+static unsigned long *dma_get_page_table_origin(unsigned long *entry)
+{
+ unsigned long *pto;
+
+ if (reg_entry_isvalid(*entry))
+ pto = get_st_pto(*entry);
+ else {
+ pto = dma_alloc_page_table();
+ if (!pto)
+ return NULL;
+ set_st_pto(entry, pto);
+ validate_st_entry(entry);
+ entry_clr_protected(entry);
+ }
+ return pto;
+}
+
+static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
+{
+ unsigned long *sto, *pto;
+ unsigned int rtx, sx, px;
+
+ rtx = calc_rtx(dma_addr);
+ sto = dma_get_seg_table_origin(&rto[rtx]);
+ if (!sto)
+ return NULL;
+
+ sx = calc_sx(dma_addr);
+ pto = dma_get_page_table_origin(&sto[sx]);
+ if (!pto)
+ return NULL;
+
+ px = calc_px(dma_addr);
+ return &pto[px];
+}
+
+static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
+ dma_addr_t dma_addr, int flags)
+{
+ unsigned long *entry;
+
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (flags & ZPCI_PTE_INVALID) {
+ invalidate_pt_entry(entry);
+ return;
+ } else {
+ set_pt_pfaa(entry, page_addr);
+ validate_pt_entry(entry);
+ }
+
+ if (flags & ZPCI_TABLE_PROTECTED)
+ entry_set_protected(entry);
+ else
+ entry_clr_protected(entry);
+}
+
+static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
+{
+ unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ u8 *page_addr = (u8 *) (pa & PAGE_MASK);
+ dma_addr_t start_dma_addr = dma_addr;
+ unsigned long irq_flags;
+ int i, rc = 0;
+
+ if (!nr_pages)
+ return -EINVAL;
+
+ spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
+ if (!zdev->dma_table) {
+ dev_err(&zdev->pdev->dev, "Missing DMA table\n");
+ goto no_refresh;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
+ page_addr += PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ }
+
+ /*
+ * rpcit is not required to establish new translations when previously
+ * invalid translation-table entries are validated, however it is
+ * required when altering previously valid entries.
+ */
+ if (!zdev->tlb_refresh &&
+ ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
+ /*
+ * TODO: also need to check that the old entry is indeed INVALID
+ * and not only for one page but for the whole range...
+ * -> now we WARN_ON in that case but with lazy unmap that
+ * needs to be redone!
+ */
+ goto no_refresh;
+ rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
+ nr_pages * PAGE_SIZE);
+
+no_refresh:
+ spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
+ return rc;
+}
+
+static void dma_free_seg_table(unsigned long entry)
+{
+ unsigned long *sto = get_rt_sto(entry);
+ int sx;
+
+ for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
+ if (reg_entry_isvalid(sto[sx]))
+ dma_free_page_table(get_st_pto(sto[sx]));
+
+ dma_free_cpu_table(sto);
+}
+
+static void dma_cleanup_tables(struct zpci_dev *zdev)
+{
+ unsigned long *table;
+ int rtx;
+
+ if (!zdev || !zdev->dma_table)
+ return;
+
+ table = zdev->dma_table;
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(table[rtx]))
+ dma_free_seg_table(table[rtx]);
+
+ dma_free_cpu_table(table);
+ zdev->dma_table = NULL;
+}
+
+static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
+ int size)
+{
+ unsigned long boundary_size = 0x1000000;
+
+ return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
+ start, size, 0, boundary_size, 0);
+}
+
+static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
+{
+ unsigned long offset, flags;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
+ offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
+ if (offset == -1)
+ offset = __dma_alloc_iommu(zdev, 0, size);
+
+ if (offset != -1) {
+ zdev->next_bit = offset + size;
+ if (zdev->next_bit >= zdev->iommu_pages)
+ zdev->next_bit = 0;
+ }
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
+ return offset;
+}
+
+static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
+ if (!zdev->iommu_bitmap)
+ goto out;
+ bitmap_clear(zdev->iommu_bitmap, offset, size);
+ if (offset >= zdev->next_bit)
+ zdev->next_bit = offset + size;
+out:
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
+}
+
+int dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_set_mask);
+
+static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+ unsigned long nr_pages, iommu_page_index;
+ unsigned long pa = page_to_phys(page) + offset;
+ int flags = ZPCI_PTE_VALID;
+ dma_addr_t dma_addr;
+
+ WARN_ON_ONCE(offset > PAGE_SIZE);
+
+ /* This rounds up number of pages based on size and offset */
+ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
+ iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
+ if (iommu_page_index == -1)
+ goto out_err;
+
+ /* Use rounded up size */
+ size = nr_pages * PAGE_SIZE;
+
+ dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
+ if (dma_addr + size > zdev->end_dma) {
+ dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
+ dma_addr, size, zdev->end_dma);
+ goto out_free;
+ }
+
+ if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
+ atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
+ return dma_addr + offset;
+ }
+
+out_free:
+ dma_free_iommu(zdev, iommu_page_index, nr_pages);
+out_err:
+ dev_err(dev, "Failed to map addr: %lx\n", pa);
+ return DMA_ERROR_CODE;
+}
+
+static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+ unsigned long iommu_page_index;
+ int npages;
+
+ npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
+ dma_addr = dma_addr & PAGE_MASK;
+ if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
+ ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
+ dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
+
+ atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
+ iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
+ dma_free_iommu(zdev, iommu_page_index, npages);
+}
+
+static void *s390_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+ struct page *page;
+ unsigned long pa;
+ dma_addr_t map;
+
+ size = PAGE_ALIGN(size);
+ page = alloc_pages(flag, get_order(size));
+ if (!page)
+ return NULL;
+
+ atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
+ pa = page_to_phys(page);
+ memset((void *) pa, 0, size);
+
+ map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
+ size, DMA_BIDIRECTIONAL, NULL);
+ if (dma_mapping_error(dev, map)) {
+ free_pages(pa, get_order(size));
+ return NULL;
+ }
+
+ if (dma_handle)
+ *dma_handle = map;
+ return (void *) pa;
+}
+
+static void s390_dma_free(struct device *dev, size_t size,
+ void *pa, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
+ DMA_BIDIRECTIONAL, NULL);
+ free_pages((unsigned long) pa, get_order(size));
+}
+
+static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ int mapped_elements = 0;
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nr_elements, i) {
+ struct page *page = sg_page(s);
+ s->dma_address = s390_dma_map_pages(dev, page, s->offset,
+ s->length, dir, NULL);
+ if (!dma_mapping_error(dev, s->dma_address)) {
+ s->dma_length = s->length;
+ mapped_elements++;
+ } else
+ goto unmap;
+ }
+out:
+ return mapped_elements;
+
+unmap:
+ for_each_sg(sg, s, mapped_elements, i) {
+ if (s->dma_address)
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
+ dir, NULL);
+ s->dma_address = 0;
+ s->dma_length = 0;
+ }
+ mapped_elements = 0;
+ goto out;
+}
+
+static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nr_elements, i) {
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
+ s->dma_address = 0;
+ s->dma_length = 0;
+ }
+}
+
+int zpci_dma_init_device(struct zpci_dev *zdev)
+{
+ unsigned int bitmap_order;
+ int rc;
+
+ spin_lock_init(&zdev->iommu_bitmap_lock);
+ spin_lock_init(&zdev->dma_table_lock);
+
+ zdev->dma_table = dma_alloc_cpu_table();
+ if (!zdev->dma_table) {
+ rc = -ENOMEM;
+ goto out_clean;
+ }
+
+ zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
+ zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
+ bitmap_order = get_order(zdev->iommu_pages / 8);
+ pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
+ zdev->iommu_size, zdev->iommu_pages, bitmap_order);
+
+ zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ bitmap_order);
+ if (!zdev->iommu_bitmap) {
+ rc = -ENOMEM;
+ goto out_reg;
+ }
+
+ rc = zpci_register_ioat(zdev,
+ 0,
+ zdev->start_dma + PAGE_OFFSET,
+ zdev->start_dma + zdev->iommu_size - 1,
+ (u64) zdev->dma_table);
+ if (rc)
+ goto out_reg;
+ return 0;
+
+out_reg:
+ dma_free_cpu_table(zdev->dma_table);
+out_clean:
+ return rc;
+}
+
+void zpci_dma_exit_device(struct zpci_dev *zdev)
+{
+ zpci_unregister_ioat(zdev, 0);
+ dma_cleanup_tables(zdev);
+ free_pages((unsigned long) zdev->iommu_bitmap,
+ get_order(zdev->iommu_pages / 8));
+ zdev->iommu_bitmap = NULL;
+ zdev->next_bit = 0;
+}
+
+static int __init dma_alloc_cpu_table_caches(void)
+{
+ dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
+ ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
+ 0, NULL);
+ if (!dma_region_table_cache)
+ return -ENOMEM;
+
+ dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
+ ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
+ 0, NULL);
+ if (!dma_page_table_cache) {
+ kmem_cache_destroy(dma_region_table_cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int __init zpci_dma_init(void)
+{
+ return dma_alloc_cpu_table_caches();
+}
+
+void zpci_dma_exit(void)
+{
+ kmem_cache_destroy(dma_page_table_cache);
+ kmem_cache_destroy(dma_region_table_cache);
+}
+
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_debug_do_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+fs_initcall(dma_debug_do_init);
+
+struct dma_map_ops s390_dma_ops = {
+ .alloc = s390_dma_alloc,
+ .free = s390_dma_free,
+ .map_sg = s390_dma_map_sg,
+ .unmap_sg = s390_dma_unmap_sg,
+ .map_page = s390_dma_map_pages,
+ .unmap_page = s390_dma_unmap_pages,
+ /* if we support direct DMA this must be conditional */
+ .is_phys = 0,
+ /* dma_supported is unconditionally true without a callback */
+};
+EXPORT_SYMBOL_GPL(s390_dma_ops);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
new file mode 100644
index 00000000000..ec62e3a0dc0
--- /dev/null
+++ b/arch/s390/pci/pci_event.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/* Content Code Description for PCI Function Error */
+struct zpci_ccdf_err {
+ u32 reserved1;
+ u32 fh; /* function handle */
+ u32 fid; /* function id */
+ u32 ett : 4; /* expected table type */
+ u32 mvn : 12; /* MSI vector number */
+ u32 dmaas : 8; /* DMA address space */
+ u32 : 6;
+ u32 q : 1; /* event qualifier */
+ u32 rw : 1; /* read/write */
+ u64 faddr; /* failing address */
+ u32 reserved3;
+ u16 reserved4;
+ u16 pec; /* PCI event code */
+} __packed;
+
+/* Content Code Description for PCI Function Availability */
+struct zpci_ccdf_avail {
+ u32 reserved1;
+ u32 fh; /* function handle */
+ u32 fid; /* function id */
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 reserved5;
+ u16 reserved6;
+ u16 pec; /* PCI event code */
+} __packed;
+
+static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
+{
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+ zpci_err("SEI error CCD:\n");
+ zpci_err_hex(ccdf, sizeof(*ccdf));
+ dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
+}
+
+static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
+{
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+ pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:",
+ (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
+ (zdev) ? dev_name(&zdev->pdev->dev) : "?",
+ ccdf->fh, ccdf->fid, ccdf->pec);
+ print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
+ 16, 1, ccdf, sizeof(*ccdf), false);
+
+ switch (ccdf->pec) {
+ case 0x0301:
+ zpci_enable_device(zdev);
+ break;
+ case 0x0302:
+ clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
+ break;
+ case 0x0306:
+ clp_find_pci_devices();
+ break;
+ default:
+ break;
+ }
+}
+
+void zpci_event_error(void *data)
+{
+ struct zpci_ccdf_err *ccdf = data;
+ struct zpci_dev *zdev;
+
+ zpci_event_log_err(ccdf);
+ zdev = get_zdev_by_fid(ccdf->fid);
+ if (!zdev) {
+ pr_err("Error event for unknown fid: %x", ccdf->fid);
+ return;
+ }
+}
+
+void zpci_event_availability(void *data)
+{
+ zpci_event_log_avail(data);
+}
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
new file mode 100644
index 00000000000..90fd3482b9e
--- /dev/null
+++ b/arch/s390/pci/pci_msi.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/rculist.h>
+#include <linux/hash.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <asm/hw_irq.h>
+
+/* mapping of irq numbers to msi_desc */
+static struct hlist_head *msi_hash;
+static unsigned int msihash_shift = 6;
+#define msi_hashfn(nr) hash_long(nr, msihash_shift)
+
+static DEFINE_SPINLOCK(msi_map_lock);
+
+struct msi_desc *__irq_get_msi_desc(unsigned int irq)
+{
+ struct hlist_node *entry;
+ struct msi_map *map;
+
+ hlist_for_each_entry_rcu(map, entry,
+ &msi_hash[msi_hashfn(irq)], msi_chain)
+ if (map->irq == irq)
+ return map->msi;
+ return NULL;
+}
+
+int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
+{
+ if (msi->msi_attrib.is_msix) {
+ int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+ msi->masked = readl(msi->mask_base + offset);
+ writel(flag, msi->mask_base + offset);
+ } else {
+ if (msi->msi_attrib.maskbit) {
+ int pos;
+ u32 mask_bits;
+
+ pos = (long) msi->mask_base;
+ pci_read_config_dword(msi->dev, pos, &mask_bits);
+ mask_bits &= ~(mask);
+ mask_bits |= flag & mask;
+ pci_write_config_dword(msi->dev, pos, mask_bits);
+ } else {
+ return 0;
+ }
+ }
+
+ msi->msi_attrib.maskbit = !!flag;
+ return 1;
+}
+
+int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
+ unsigned int nr, int offset)
+{
+ struct msi_map *map;
+ struct msi_msg msg;
+ int rc;
+
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
+
+ map->irq = nr;
+ map->msi = msi;
+ zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
+
+ pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
+ __func__, nr, msi_hashfn(nr));
+ hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
+
+ spin_lock(&msi_map_lock);
+ rc = irq_set_msi_desc(nr, msi);
+ if (rc) {
+ spin_unlock(&msi_map_lock);
+ hlist_del_rcu(&map->msi_chain);
+ kfree(map);
+ zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
+ return rc;
+ }
+ spin_unlock(&msi_map_lock);
+
+ msg.data = nr - offset;
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+ msg.address_hi = zdev->msi_addr >> 32;
+ write_msi_msg(nr, &msg);
+ return 0;
+}
+
+void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
+{
+ int irq = msi->irq & ZPCI_MSI_MASK;
+ struct msi_map *map;
+
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+ msi->irq = 0;
+ zpci_msi_set_mask_bits(msi, 1, 1);
+
+ spin_lock(&msi_map_lock);
+ map = zdev->msi_map[irq];
+ hlist_del_rcu(&map->msi_chain);
+ kfree(map);
+ zdev->msi_map[irq] = NULL;
+ spin_unlock(&msi_map_lock);
+}
+
+/*
+ * The msi hash table has 256 entries which is good for 4..20
+ * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
+ * the hash table size adjustable later.
+ */
+int __init zpci_msihash_init(void)
+{
+ unsigned int i;
+
+ msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
+ if (!msi_hash)
+ return -ENOMEM;
+
+ for (i = 0; i < (1U << msihash_shift); i++)
+ INIT_HLIST_HEAD(&msi_hash[i]);
+ return 0;
+}
+
+void __init zpci_msihash_exit(void)
+{
+ kfree(msi_hash);
+}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
new file mode 100644
index 00000000000..a42cce69d0a
--- /dev/null
+++ b/arch/s390/pci/pci_sysfs.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+
+static ssize_t show_fid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%08x\n", zdev->fid);
+ return strlen(buf);
+}
+static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL);
+
+static ssize_t show_fh(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%08x\n", zdev->fh);
+ return strlen(buf);
+}
+static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL);
+
+static ssize_t show_pchid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%04x\n", zdev->pchid);
+ return strlen(buf);
+}
+static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL);
+
+static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+
+ sprintf(buf, "0x%02x\n", zdev->pfgid);
+ return strlen(buf);
+}
+static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
+
+static struct device_attribute *zpci_dev_attrs[] = {
+ &dev_attr_function_id,
+ &dev_attr_function_handle,
+ &dev_attr_pchid,
+ &dev_attr_pfgid,
+ NULL,
+};
+
+int zpci_sysfs_add_device(struct device *dev)
+{
+ int i, rc = 0;
+
+ for (i = 0; zpci_dev_attrs[i]; i++) {
+ rc = device_create_file(dev, zpci_dev_attrs[i]);
+ if (rc)
+ goto error;
+ }
+ return 0;
+
+error:
+ while (--i >= 0)
+ device_remove_file(dev, zpci_dev_attrs[i]);
+ return rc;
+}
+
+void zpci_sysfs_remove_device(struct device *dev)
+{
+ int i;
+
+ for (i = 0; zpci_dev_attrs[i]; i++)
+ device_remove_file(dev, zpci_dev_attrs[i]);
+}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 45893390c7d..3b1482e7afa 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -13,8 +13,6 @@ config SCORE
select GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select CLONE_BACKWARDS
choice
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 16e41fe1a41..cebaff8069a 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -1,4 +1,3 @@
-include include/asm-generic/Kbuild.asm
header-y +=
diff --git a/arch/score/include/asm/kvm_para.h b/arch/score/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b95..00000000000
--- a/arch/score/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/score/include/asm/ptrace.h b/arch/score/include/asm/ptrace.h
index e89dc9b1ef4..abc279d96b7 100644
--- a/arch/score/include/asm/ptrace.h
+++ b/arch/score/include/asm/ptrace.h
@@ -1,78 +1,8 @@
#ifndef _ASM_SCORE_PTRACE_H
#define _ASM_SCORE_PTRACE_H
-#define PTRACE_GETREGS 12
-#define PTRACE_SETREGS 13
+#include <uapi/asm/ptrace.h>
-#define PC 32
-#define CONDITION 33
-#define ECR 34
-#define EMA 35
-#define CEH 36
-#define CEL 37
-#define COUNTER 38
-#define LDCR 39
-#define STCR 40
-#define PSR 41
-
-#define SINGLESTEP16_INSN 0x7006
-#define SINGLESTEP32_INSN 0x840C8000
-#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */
-#define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */
-
-/* Define instruction mask */
-#define INSN32_MASK 0x80008000
-
-#define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */
-#define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */
-
-#define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */
-#define B32M 0xFC008000
-#define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */
-#define BL32M B32
-#define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */
-#define BR32M 0xFFE0807E
-#define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */
-#define BRL32M BR32M
-
-#define B32_SET (J32 | B32 | BL32 | BR32 | BRL32)
-
-#define J16 0x3000 /* 0_011_....... */
-#define J16M 0xF000
-#define B16 0x4000 /* 0_100_....... */
-#define B16M 0xF000
-#define BR16 0x0004 /* 0_000.......0100 */
-#define BR16M 0xF00F
-#define B16_SET (J16 | B16 | BR16)
-
-
-/*
- * This struct defines the way the registers are stored on the stack during a
- * system call/exception. As usual the registers k0/k1 aren't being saved.
- */
-struct pt_regs {
- unsigned long pad0[6]; /* stack arguments */
- unsigned long orig_r4;
- unsigned long orig_r7;
- long is_syscall;
-
- unsigned long regs[32];
-
- unsigned long cel;
- unsigned long ceh;
-
- unsigned long sr0; /* cnt */
- unsigned long sr1; /* lcr */
- unsigned long sr2; /* scr */
-
- unsigned long cp0_epc;
- unsigned long cp0_ema;
- unsigned long cp0_psr;
- unsigned long cp0_ecr;
- unsigned long cp0_condition;
-};
-
-#ifdef __KERNEL__
struct task_struct;
@@ -83,6 +13,7 @@ struct task_struct;
#define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(r) ((unsigned long)(r)->regs[0])
extern void do_syscall_trace(struct pt_regs *regs, int entryexit);
extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *);
@@ -91,6 +22,4 @@ extern int read_tsk_short(struct task_struct *, unsigned long,
#define arch_has_single_step() (1)
-#endif /* __KERNEL__ */
-
#endif /* _ASM_SCORE_PTRACE_H */
diff --git a/arch/score/include/asm/setup.h b/arch/score/include/asm/setup.h
index 3cb944dc68d..1f3aa7262fa 100644
--- a/arch/score/include/asm/setup.h
+++ b/arch/score/include/asm/setup.h
@@ -1,11 +1,8 @@
#ifndef _ASM_SCORE_SETUP_H
#define _ASM_SCORE_SETUP_H
-#define COMMAND_LINE_SIZE 256
-#define MEMORY_START 0
-#define MEMORY_SIZE 0x2000000
+#include <uapi/asm/setup.h>
-#ifdef __KERNEL__
extern void pagetable_init(void);
extern void pgd_init(unsigned long page);
@@ -36,6 +33,4 @@ extern void debug_exception_vector(void);
extern void general_exception_vector(void);
extern void interrupt_exception_vector(void);
-#endif /* __KERNEL__ */
-
#endif /* _ASM_SCORE_SETUP_H */
diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild
index baebb3da1d4..040178cdb3e 100644
--- a/arch/score/include/uapi/asm/Kbuild
+++ b/arch/score/include/uapi/asm/Kbuild
@@ -1,3 +1,34 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += byteorder.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += kvm_para.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += unistd.h
diff --git a/arch/score/include/asm/auxvec.h b/arch/score/include/uapi/asm/auxvec.h
index f69151565ae..f69151565ae 100644
--- a/arch/score/include/asm/auxvec.h
+++ b/arch/score/include/uapi/asm/auxvec.h
diff --git a/arch/score/include/asm/bitsperlong.h b/arch/score/include/uapi/asm/bitsperlong.h
index 86ff337aa45..86ff337aa45 100644
--- a/arch/score/include/asm/bitsperlong.h
+++ b/arch/score/include/uapi/asm/bitsperlong.h
diff --git a/arch/score/include/asm/byteorder.h b/arch/score/include/uapi/asm/byteorder.h
index 88cbebc7921..88cbebc7921 100644
--- a/arch/score/include/asm/byteorder.h
+++ b/arch/score/include/uapi/asm/byteorder.h
diff --git a/arch/score/include/asm/errno.h b/arch/score/include/uapi/asm/errno.h
index 29ff39d5ab4..29ff39d5ab4 100644
--- a/arch/score/include/asm/errno.h
+++ b/arch/score/include/uapi/asm/errno.h
diff --git a/arch/score/include/asm/fcntl.h b/arch/score/include/uapi/asm/fcntl.h
index 03968a3103a..03968a3103a 100644
--- a/arch/score/include/asm/fcntl.h
+++ b/arch/score/include/uapi/asm/fcntl.h
diff --git a/arch/score/include/asm/ioctl.h b/arch/score/include/uapi/asm/ioctl.h
index a351d2194bf..a351d2194bf 100644
--- a/arch/score/include/asm/ioctl.h
+++ b/arch/score/include/uapi/asm/ioctl.h
diff --git a/arch/score/include/asm/ioctls.h b/arch/score/include/uapi/asm/ioctls.h
index ed01d2b9aea..ed01d2b9aea 100644
--- a/arch/score/include/asm/ioctls.h
+++ b/arch/score/include/uapi/asm/ioctls.h
diff --git a/arch/score/include/asm/ipcbuf.h b/arch/score/include/uapi/asm/ipcbuf.h
index e082ceff181..e082ceff181 100644
--- a/arch/score/include/asm/ipcbuf.h
+++ b/arch/score/include/uapi/asm/ipcbuf.h
diff --git a/arch/microblaze/include/asm/kvm_para.h b/arch/score/include/uapi/asm/kvm_para.h
index 14fab8f0b95..14fab8f0b95 100644
--- a/arch/microblaze/include/asm/kvm_para.h
+++ b/arch/score/include/uapi/asm/kvm_para.h
diff --git a/arch/score/include/asm/mman.h b/arch/score/include/uapi/asm/mman.h
index 84d85ddfed8..84d85ddfed8 100644
--- a/arch/score/include/asm/mman.h
+++ b/arch/score/include/uapi/asm/mman.h
diff --git a/arch/score/include/asm/msgbuf.h b/arch/score/include/uapi/asm/msgbuf.h
index 7506721e29f..7506721e29f 100644
--- a/arch/score/include/asm/msgbuf.h
+++ b/arch/score/include/uapi/asm/msgbuf.h
diff --git a/arch/score/include/asm/param.h b/arch/score/include/uapi/asm/param.h
index 916b8690b6a..916b8690b6a 100644
--- a/arch/score/include/asm/param.h
+++ b/arch/score/include/uapi/asm/param.h
diff --git a/arch/score/include/asm/poll.h b/arch/score/include/uapi/asm/poll.h
index 18532db0286..18532db0286 100644
--- a/arch/score/include/asm/poll.h
+++ b/arch/score/include/uapi/asm/poll.h
diff --git a/arch/score/include/asm/posix_types.h b/arch/score/include/uapi/asm/posix_types.h
index b88acf80048..b88acf80048 100644
--- a/arch/score/include/asm/posix_types.h
+++ b/arch/score/include/uapi/asm/posix_types.h
diff --git a/arch/score/include/uapi/asm/ptrace.h b/arch/score/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..f59771a3f12
--- /dev/null
+++ b/arch/score/include/uapi/asm/ptrace.h
@@ -0,0 +1,76 @@
+#ifndef _UAPI_ASM_SCORE_PTRACE_H
+#define _UAPI_ASM_SCORE_PTRACE_H
+
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+
+#define PC 32
+#define CONDITION 33
+#define ECR 34
+#define EMA 35
+#define CEH 36
+#define CEL 37
+#define COUNTER 38
+#define LDCR 39
+#define STCR 40
+#define PSR 41
+
+#define SINGLESTEP16_INSN 0x7006
+#define SINGLESTEP32_INSN 0x840C8000
+#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */
+#define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */
+
+/* Define instruction mask */
+#define INSN32_MASK 0x80008000
+
+#define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */
+#define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */
+
+#define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */
+#define B32M 0xFC008000
+#define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */
+#define BL32M B32
+#define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */
+#define BR32M 0xFFE0807E
+#define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */
+#define BRL32M BR32M
+
+#define B32_SET (J32 | B32 | BL32 | BR32 | BRL32)
+
+#define J16 0x3000 /* 0_011_....... */
+#define J16M 0xF000
+#define B16 0x4000 /* 0_100_....... */
+#define B16M 0xF000
+#define BR16 0x0004 /* 0_000.......0100 */
+#define BR16M 0xF00F
+#define B16_SET (J16 | B16 | BR16)
+
+
+/*
+ * This struct defines the way the registers are stored on the stack during a
+ * system call/exception. As usual the registers k0/k1 aren't being saved.
+ */
+struct pt_regs {
+ unsigned long pad0[6]; /* stack arguments */
+ unsigned long orig_r4;
+ unsigned long orig_r7;
+ long is_syscall;
+
+ unsigned long regs[32];
+
+ unsigned long cel;
+ unsigned long ceh;
+
+ unsigned long sr0; /* cnt */
+ unsigned long sr1; /* lcr */
+ unsigned long sr2; /* scr */
+
+ unsigned long cp0_epc;
+ unsigned long cp0_ema;
+ unsigned long cp0_psr;
+ unsigned long cp0_ecr;
+ unsigned long cp0_condition;
+};
+
+
+#endif /* _UAPI_ASM_SCORE_PTRACE_H */
diff --git a/arch/score/include/asm/resource.h b/arch/score/include/uapi/asm/resource.h
index 9ce22bc7b47..9ce22bc7b47 100644
--- a/arch/score/include/asm/resource.h
+++ b/arch/score/include/uapi/asm/resource.h
diff --git a/arch/score/include/asm/sembuf.h b/arch/score/include/uapi/asm/sembuf.h
index dae5e835ce9..dae5e835ce9 100644
--- a/arch/score/include/asm/sembuf.h
+++ b/arch/score/include/uapi/asm/sembuf.h
diff --git a/arch/score/include/uapi/asm/setup.h b/arch/score/include/uapi/asm/setup.h
new file mode 100644
index 00000000000..ab9dbdb59bb
--- /dev/null
+++ b/arch/score/include/uapi/asm/setup.h
@@ -0,0 +1,9 @@
+#ifndef _UAPI_ASM_SCORE_SETUP_H
+#define _UAPI_ASM_SCORE_SETUP_H
+
+#define COMMAND_LINE_SIZE 256
+#define MEMORY_START 0
+#define MEMORY_SIZE 0x2000000
+
+
+#endif /* _UAPI_ASM_SCORE_SETUP_H */
diff --git a/arch/score/include/asm/shmbuf.h b/arch/score/include/uapi/asm/shmbuf.h
index c85b2429ba2..c85b2429ba2 100644
--- a/arch/score/include/asm/shmbuf.h
+++ b/arch/score/include/uapi/asm/shmbuf.h
diff --git a/arch/score/include/asm/sigcontext.h b/arch/score/include/uapi/asm/sigcontext.h
index 5ffda39ddb9..5ffda39ddb9 100644
--- a/arch/score/include/asm/sigcontext.h
+++ b/arch/score/include/uapi/asm/sigcontext.h
diff --git a/arch/score/include/asm/siginfo.h b/arch/score/include/uapi/asm/siginfo.h
index 87ca35607a2..87ca35607a2 100644
--- a/arch/score/include/asm/siginfo.h
+++ b/arch/score/include/uapi/asm/siginfo.h
diff --git a/arch/score/include/asm/signal.h b/arch/score/include/uapi/asm/signal.h
index 2605bc06b64..2605bc06b64 100644
--- a/arch/score/include/asm/signal.h
+++ b/arch/score/include/uapi/asm/signal.h
diff --git a/arch/score/include/asm/socket.h b/arch/score/include/uapi/asm/socket.h
index 612a70e385b..612a70e385b 100644
--- a/arch/score/include/asm/socket.h
+++ b/arch/score/include/uapi/asm/socket.h
diff --git a/arch/score/include/asm/sockios.h b/arch/score/include/uapi/asm/sockios.h
index ba825648018..ba825648018 100644
--- a/arch/score/include/asm/sockios.h
+++ b/arch/score/include/uapi/asm/sockios.h
diff --git a/arch/score/include/asm/stat.h b/arch/score/include/uapi/asm/stat.h
index 5037055500a..5037055500a 100644
--- a/arch/score/include/asm/stat.h
+++ b/arch/score/include/uapi/asm/stat.h
diff --git a/arch/score/include/asm/statfs.h b/arch/score/include/uapi/asm/statfs.h
index 36e41004e99..36e41004e99 100644
--- a/arch/score/include/asm/statfs.h
+++ b/arch/score/include/uapi/asm/statfs.h
diff --git a/arch/score/include/asm/swab.h b/arch/score/include/uapi/asm/swab.h
index fadc3cc6d8a..fadc3cc6d8a 100644
--- a/arch/score/include/asm/swab.h
+++ b/arch/score/include/uapi/asm/swab.h
diff --git a/arch/score/include/asm/termbits.h b/arch/score/include/uapi/asm/termbits.h
index 9a95c141243..9a95c141243 100644
--- a/arch/score/include/asm/termbits.h
+++ b/arch/score/include/uapi/asm/termbits.h
diff --git a/arch/score/include/asm/termios.h b/arch/score/include/uapi/asm/termios.h
index 40984e811ad..40984e811ad 100644
--- a/arch/score/include/asm/termios.h
+++ b/arch/score/include/uapi/asm/termios.h
diff --git a/arch/score/include/asm/types.h b/arch/score/include/uapi/asm/types.h
index 2140032778e..2140032778e 100644
--- a/arch/score/include/asm/types.h
+++ b/arch/score/include/uapi/asm/types.h
diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/uapi/asm/unistd.h
index 56001c93095..9cb4260a5f3 100644
--- a/arch/score/include/asm/unistd.h
+++ b/arch/score/include/uapi/asm/unistd.h
@@ -4,7 +4,6 @@
#define __ARCH_WANT_SYSCALL_NO_FLAGS
#define __ARCH_WANT_SYSCALL_OFF_T
#define __ARCH_WANT_SYSCALL_DEPRECATED
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8451317eed5..babc2b826c5 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -40,8 +40,6 @@ config SUPERH
select GENERIC_STRNLEN_USER
select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index 6cba0a7068b..d71a0bcf814 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -1,5 +1,5 @@
/*
- * Data Technology Inc. ESPT-GIGA board suport
+ * Data Technology Inc. ESPT-GIGA board support
*
* Copyright (C) 2008, 2009 Renesas Solutions Corp.
* Copyright (C) 2008, 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 9e963c1d144..5620e33c18a 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -179,11 +179,6 @@ static int ap320_wvga_set_brightness(int brightness)
return 0;
}
-static int ap320_wvga_get_brightness(void)
-{
- return gpio_get_value(GPIO_PTS3);
-}
-
static void ap320_wvga_power_on(void)
{
msleep(100);
@@ -232,7 +227,6 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.name = "sh_mobile_lcdc_bl",
.max_brightness = 1,
.set_brightness = ap320_wvga_set_brightness,
- .get_brightness = ap320_wvga_get_brightness,
},
}
};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 64559e8af14..3fede4556c9 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -329,11 +329,6 @@ static int ecovec24_set_brightness(int brightness)
return 0;
}
-static int ecovec24_get_brightness(void)
-{
- return gpio_get_value(GPIO_PTR1);
-}
-
static struct sh_mobile_lcdc_info lcdc_info = {
.ch[0] = {
.interface_type = RGB18,
@@ -347,7 +342,6 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.name = "sh_mobile_lcdc_bl",
.max_brightness = 1,
.set_brightness = ecovec24_set_brightness,
- .get_brightness = ecovec24_get_brightness,
},
}
};
diff --git a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
index c148b36ecb6..c6205033262 100644
--- a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
+++ b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
@@ -283,7 +283,7 @@ void kfr2r09_lcd_start(void *sohandle, struct sh_mobile_lcdc_sys_bus_ops *so)
#define MAIN_MLED4 0x40
#define MAIN_MSW 0x80
-static int kfr2r09_lcd_backlight(int on)
+int kfr2r09_lcd_set_brightness(int brightness)
{
struct i2c_adapter *a;
struct i2c_msg msg;
@@ -295,7 +295,7 @@ static int kfr2r09_lcd_backlight(int on)
return -ENODEV;
buf[0] = 0x00;
- if (on)
+ if (brightness)
buf[1] = CTRL_CPSW | CTRL_C10 | CTRL_CKSW;
else
buf[1] = 0;
@@ -309,7 +309,7 @@ static int kfr2r09_lcd_backlight(int on)
return -ENODEV;
buf[0] = 0x01;
- if (on)
+ if (brightness)
buf[1] = MAIN_MSW | MAIN_MLED4 | 0x0c;
else
buf[1] = 0;
@@ -324,13 +324,3 @@ static int kfr2r09_lcd_backlight(int on)
return 0;
}
-
-void kfr2r09_lcd_on(void)
-{
- kfr2r09_lcd_backlight(1);
-}
-
-void kfr2r09_lcd_off(void)
-{
- kfr2r09_lcd_backlight(0);
-}
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index f2a4304fbe2..ab502f12ef5 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -158,8 +158,11 @@ static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
.height = 58,
.setup_sys = kfr2r09_lcd_setup,
.start_transfer = kfr2r09_lcd_start,
- .display_on = kfr2r09_lcd_on,
- .display_off = kfr2r09_lcd_off,
+ },
+ .bl_info = {
+ .name = "sh_mobile_lcdc_bl",
+ .max_brightness = 1,
+ .set_brightness = kfr2r09_lcd_set_brightness,
},
.sys_bus_cfg = {
.ldmt2r = 0x07010904,
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 8bd965e00a1..b437f2c780b 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -46,6 +46,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index 43d3f26b2ea..012004ed333 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -28,7 +28,6 @@
# define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK
# define __ARCH_WANT_SYS_RT_SIGACTION
-# define __ARCH_WANT_SYS_EXECVE
# define __ARCH_WANT_SYS_FORK
# define __ARCH_WANT_SYS_VFORK
# define __ARCH_WANT_SYS_CLONE
diff --git a/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h b/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
index ba3d93d333f..c20c9e5f5ea 100644
--- a/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
+++ b/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
@@ -4,15 +4,13 @@
#include <video/sh_mobile_lcdc.h>
#if defined(CONFIG_FB_SH_MOBILE_LCDC) || defined(CONFIG_FB_SH_MOBILE_LCDC_MODULE)
-void kfr2r09_lcd_on(void);
-void kfr2r09_lcd_off(void);
+int kfr2r09_lcd_set_brightness(int brightness);
int kfr2r09_lcd_setup(void *sys_ops_handle,
struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
void kfr2r09_lcd_start(void *sys_ops_handle,
struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
#else
-static void kfr2r09_lcd_on(void) {}
-static void kfr2r09_lcd_off(void) {}
+static int kfr2r09_lcd_set_brightness(int brightness) {}
static int kfr2r09_lcd_setup(void *sys_ops_handle,
struct sh_mobile_lcdc_sys_bus_ops *sys_ops)
{
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index cb8f9920f4d..0f7c852f355 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -111,6 +111,7 @@ config VSYSCALL
config NUMA
bool "Non Uniform Memory Access (NUMA) Support"
depends on MMU && SYS_SUPPORTS_NUMA && EXPERIMENTAL
+ select ARCH_WANT_NUMA_VARIABLE_LOCALITY
default n
help
Some SH systems have many various memories scattered around
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index cbbdcad8fcb..1f49c28affa 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -301,17 +301,6 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
__bad_area(regs, error_code, address, SEGV_ACCERR);
}
-static void out_of_memory(void)
-{
- /*
- * We ran out of memory, call the OOM killer, and return the userspace
- * (which will retry the fault, or kill us if we got oom-killed):
- */
- up_read(&current->mm->mmap_sem);
-
- pagefault_out_of_memory();
-}
-
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
@@ -353,8 +342,14 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
no_context(regs, error_code, address);
return 1;
}
+ up_read(&current->mm->mmap_sem);
- out_of_memory();
+ /*
+ * We ran out of memory, call the OOM killer, and return the
+ * userspace (which will retry the fault, or kill us if we got
+ * oom-killed):
+ */
+ pagefault_out_of_memory();
} else {
if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 0c7d365fa40..9f2edb5c555 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -41,8 +41,6 @@ config SPARC
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S
index 23f6cbb910d..1cda8aa7cb8 100644
--- a/arch/sparc/crypto/aes_asm.S
+++ b/arch/sparc/crypto/aes_asm.S
@@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256)
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
-10: ldx [%o1 + 0x00], %g3
+10: ldd [%o0 + 0xd0], %f56
+ ldd [%o0 + 0xd8], %f58
+ ldd [%o0 + 0xe0], %f60
+ ldd [%o0 + 0xe8], %f62
+ ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
@@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
ldx [%o0 - 0x10], %g1
subcc %o3, 0x10, %o3
+ ldx [%o0 - 0x08], %g2
be 10f
- ldx [%o0 - 0x08], %g2
- sub %o0, 0xf0, %o0
+ sub %o0, 0xf0, %o0
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
@@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
-10: ldx [%o1 + 0x00], %g3
+10: ldd [%o0 + 0x18], %f56
+ ldd [%o0 + 0x10], %f58
+ ldd [%o0 + 0x08], %f60
+ ldd [%o0 + 0x00], %f62
+ ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
@@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256)
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
- ldd [%o0 + 0xd0], %f56
+10: ldd [%o0 + 0xd0], %f56
ldd [%o0 + 0xd8], %f58
ldd [%o0 + 0xe0], %f60
ldd [%o0 + 0xe8], %f62
-10: xor %g1, %g3, %o5
+ xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 3965d1d36df..503e6d96ad4 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -222,6 +222,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
while ((nbytes = walk.nbytes)) {
@@ -251,6 +252,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_decrypt_keys(&ctx->key[0]);
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
@@ -280,6 +282,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
while ((nbytes = walk.nbytes)) {
@@ -309,6 +312,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_decrypt_keys(&ctx->key[0]);
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
@@ -329,6 +333,22 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
return err;
}
+static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
+ struct blkcipher_walk *walk)
+{
+ u8 *ctrblk = walk->iv;
+ u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ unsigned int nbytes = walk->nbytes;
+
+ ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk,
+ keystream, AES_BLOCK_SIZE);
+ crypto_xor((u8 *) keystream, src, nbytes);
+ memcpy(dst, keystream, nbytes);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
static int ctr_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
@@ -338,10 +358,11 @@ static int ctr_crypt(struct blkcipher_desc *desc,
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_encrypt_keys(&ctx->key[0]);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
unsigned int block_len = nbytes & AES_BLOCK_MASK;
if (likely(block_len)) {
@@ -353,6 +374,10 @@ static int ctr_crypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
+ if (walk.nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
fprs_write(0);
return err;
}
@@ -418,7 +443,7 @@ static struct crypto_alg algs[] = { {
.cra_driver_name = "ctr-aes-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type,
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 62c89af3fd3..888f6260b4e 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -98,6 +98,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
if (encrypt)
key = &ctx->encrypt_key[0];
@@ -160,6 +161,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
key = &ctx->encrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
@@ -198,6 +200,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
key = &ctx->decrypt_key[0];
camellia_sparc64_load_keys(key, ctx->key_len);
diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S
index 30b6e90b28b..b5c8fc269b5 100644
--- a/arch/sparc/crypto/des_asm.S
+++ b/arch/sparc/crypto/des_asm.S
@@ -376,6 +376,7 @@ ENTRY(des3_ede_sparc64_ecb_crypt)
1: ldd [%o1 + 0x00], %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
+ add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 41524cebcc4..3065bc61f9d 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -100,6 +100,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
if (encrypt)
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
@@ -147,6 +148,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
while ((nbytes = walk.nbytes)) {
@@ -177,6 +179,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
while ((nbytes = walk.nbytes)) {
@@ -266,6 +269,7 @@ static int __ecb3_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
if (encrypt)
K = &ctx->encrypt_expkey[0];
@@ -317,6 +321,7 @@ static int cbc3_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
K = &ctx->encrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
@@ -352,6 +357,7 @@ static int cbc3_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
K = &ctx->decrypt_expkey[0];
des3_ede_sparc64_load_keys(K);
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 8493fd3c7ba..05fe53f5346 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -59,6 +59,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
+ debug_dma_mapping_error(dev, dma_addr);
return (dma_addr == DMA_ERROR_CODE);
}
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 8c5eed6d267..9661e9bc7bb 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -61,14 +61,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- ptep_set_wrprotect(mm, addr, ptep);
+ pte_t old_pte = *ptep;
+ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ int changed = !pte_same(*ptep, pte);
+ if (changed) {
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ flush_tlb_page(vma, addr);
+ }
+ return changed;
}
static inline pte_t huge_ptep_get(pte_t *ptep)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 95515f1e7ce..7870be0f5ad 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -617,6 +617,12 @@ static inline unsigned long pte_present(pte_t pte)
return val;
}
+#define pte_accessible pte_accessible
+static inline unsigned long pte_accessible(pte_t a)
+{
+ return pte_val(a) & _PAGE_VALID;
+}
+
static inline unsigned long pte_special(pte_t pte)
{
return pte_val(pte) & _PAGE_SPECIAL;
@@ -802,7 +808,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
- if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
+ if (likely(mm != &init_mm) && pte_accessible(orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm);
}
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index c3e5d8b6417..87ce24c5eb9 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -45,8 +45,8 @@
#define __ARCH_WANT_COMPAT_SYS_TIME
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
#endif
-#define __ARCH_WANT_SYS_EXECVE
/*
* "Conditional" syscalls
diff --git a/arch/sparc/include/uapi/asm/signal.h b/arch/sparc/include/uapi/asm/signal.h
index 1a041892538..c4ffd6c9710 100644
--- a/arch/sparc/include/uapi/asm/signal.h
+++ b/arch/sparc/include/uapi/asm/signal.h
@@ -147,12 +147,6 @@ struct sigstack {
#define SIG_UNBLOCK 0x02 /* for unblocking signals */
#define SIG_SETMASK 0x04 /* for setting the signal mask */
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 4096
#define SIGSTKSZ 16384
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index f1ddc0d2367..4435488ebe2 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -43,10 +43,6 @@ void *module_alloc(unsigned long size)
{
void *ret;
- /* We handle the zero case fine, unlike vmalloc */
- if (size == 0)
- return NULL;
-
ret = module_map(size);
if (ret)
memset(ret, 0, size);
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 03c7e929ec3..4a4cdc633f6 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -211,20 +211,6 @@ asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)
return sys_sysfs(option, arg1, arg2);
}
-asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
-{
- struct timespec t;
- int ret;
- mm_segment_t old_fs = get_fs ();
-
- set_fs (KERNEL_DS);
- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
- set_fs (old_fs);
- if (put_compat_timespec(&t, interval))
- return -EFAULT;
- return ret;
-}
-
asmlinkage long compat_sys_rt_sigprocmask(int how,
compat_sigset_t __user *set,
compat_sigset_t __user *oset,
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index ea7f61e8bc9..875d008828b 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -21,8 +21,6 @@ config TILE
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index ca61fb4296b..88f3c227afd 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -296,8 +296,6 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
long compat_sys_fallocate(int fd, int mode,
u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi);
-long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
/* Assembly trampoline to avoid clobbering r0. */
long _compat_sys_rt_sigreturn(void);
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 4b6247d1a31..f2ff191376b 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -72,6 +72,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
+ debug_dma_mapping_error(dev, dma_addr);
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
}
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index b73e1039c91..ff8a9340882 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -170,4 +170,6 @@ do { \
#endif /* CONFIG_COMPAT */
+#define CORE_DUMP_USE_REGSET
+
#endif /* _ASM_TILE_ELF_H */
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 1a4fd9ab0ee..2e83fc1b946 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -24,8 +24,7 @@ typedef unsigned long pt_reg_t;
#include <uapi/asm/ptrace.h>
#define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE)
-#define PT_TRACE_MIGRATE 0x00080000
-#define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE)
+#define PT_TRACE_MIGRATE PT_EVENT_FLAG(PTRACE_EVENT_MIGRATE)
/* Flag bits in pt_regs.flags */
#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
@@ -36,6 +35,7 @@ typedef unsigned long pt_reg_t;
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->sp)
/* Does the process account for user or for system time? */
#define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL)
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index b51c6ee3cd6..6ac21034f69 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -14,8 +14,8 @@
/* In compat mode, we use sys_llseek() for compat_sys_llseek(). */
#ifdef CONFIG_COMPAT
#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
#endif
#define __ARCH_WANT_SYS_NEWFSTATAT
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h
index c717d0fec72..7757e1985fb 100644
--- a/arch/tile/include/uapi/asm/ptrace.h
+++ b/arch/tile/include/uapi/asm/ptrace.h
@@ -81,8 +81,14 @@ struct pt_regs {
#define PTRACE_SETFPREGS 15
/* Support TILE-specific ptrace options, with events starting at 16. */
-#define PTRACE_O_TRACEMIGRATE 0x00010000
#define PTRACE_EVENT_MIGRATE 16
+#define PTRACE_O_TRACEMIGRATE (1 << PTRACE_EVENT_MIGRATE)
+/*
+ * Flag bits in pt_regs.flags that are part of the ptrace API.
+ * We start our numbering higher up to avoid confusion with the
+ * non-ABI kernel-internal values that use the low 16 bits.
+ */
+#define PT_FLAGS_COMPAT 0x10000 /* process is an -m32 compat process */
#endif /* _UAPI_ASM_TILE_PTRACE_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 9cd7cb6041c..7f72401b4f4 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -76,24 +76,6 @@ long compat_sys_fallocate(int fd, int mode,
((loff_t)len_hi << 32) | len_lo);
}
-
-
-long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval)
-{
- struct timespec t;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- ret = sys_sched_rr_get_interval(pid,
- (struct timespec __force __user *)&t);
- set_fs(old_fs);
- if (put_compat_timespec(&t, interval))
- return -EFAULT;
- return ret;
-}
-
/* Provide the compat syscall number to call mapping. */
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 243ffebe38d..4918d91bc3a 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -42,8 +42,6 @@ void *module_alloc(unsigned long size)
int i = 0;
int npages;
- if (size == 0)
- return NULL;
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
if (pages == NULL)
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 759822687e8..aac1cd58696 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -245,7 +245,7 @@ static void __devinit fixup_read_and_payload_sizes(void)
u16 new_values;
/* Scan for the smallest maximum payload size. */
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev) {
u32 devcap;
int max_payload;
@@ -260,7 +260,7 @@ static void __devinit fixup_read_and_payload_sizes(void)
/* Now, set the max_payload_size for all devices to that value. */
new_values = (max_read_size << 12) | (smallest_max_payload << 5);
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
+ for_each_pci_dev(dev)
pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
new_values);
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 2ba6d052f85..94810d4a633 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -1047,8 +1047,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
}
/* Called for each device after PCI setup is done. */
-static void __init
-pcibios_fixup_final(struct pci_dev *pdev)
+static void pcibios_fixup_final(struct pci_dev *pdev)
{
set_dma_ops(&pdev->dev, gx_pci_dma_map_ops);
set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index e92e40527d6..9835312d5a9 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -19,7 +19,10 @@
#include <linux/kprobes.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
#include <asm/traps.h>
+#include <arch/chip.h>
void user_enable_single_step(struct task_struct *child)
{
@@ -45,6 +48,100 @@ void ptrace_disable(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
+/*
+ * Get registers from task and ready the result for userspace.
+ * Note that we localize the API issues to getregs() and putregs() at
+ * some cost in performance, e.g. we need a full pt_regs copy for
+ * PEEKUSR, and two copies for POKEUSR. But in general we expect
+ * GETREGS/PUTREGS to be the API of choice anyway.
+ */
+static char *getregs(struct task_struct *child, struct pt_regs *uregs)
+{
+ *uregs = *task_pt_regs(child);
+
+ /* Set up flags ABI bits. */
+ uregs->flags = 0;
+#ifdef CONFIG_COMPAT
+ if (task_thread_info(child)->status & TS_COMPAT)
+ uregs->flags |= PT_FLAGS_COMPAT;
+#endif
+
+ return (char *)uregs;
+}
+
+/* Put registers back to task. */
+static void putregs(struct task_struct *child, struct pt_regs *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+
+ /* Don't allow overwriting the kernel-internal flags word. */
+ uregs->flags = regs->flags;
+
+ /* Only allow setting the ICS bit in the ex1 word. */
+ uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1));
+
+ *regs = *uregs;
+}
+
+enum tile_regset {
+ REGSET_GPR,
+};
+
+static int tile_gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs regs;
+
+ getregs(target, &regs);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &regs, 0,
+ sizeof(regs));
+}
+
+static int tile_gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct pt_regs regs;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
+ sizeof(regs));
+ if (ret)
+ return ret;
+
+ putregs(target, &regs);
+
+ return 0;
+}
+
+static const struct user_regset tile_user_regset[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(elf_greg_t),
+ .align = sizeof(elf_greg_t),
+ .get = tile_gpr_get,
+ .set = tile_gpr_set,
+ },
+};
+
+static const struct user_regset_view tile_user_regset_view = {
+ .name = CHIP_ARCH_NAME,
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = tile_user_regset,
+ .n = ARRAY_SIZE(tile_user_regset),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &tile_user_regset_view;
+}
+
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
@@ -53,14 +150,13 @@ long arch_ptrace(struct task_struct *child, long request,
long ret = -EIO;
char *childreg;
struct pt_regs copyregs;
- int ex1_offset;
switch (request) {
case PTRACE_PEEKUSR: /* Read register from pt_regs. */
if (addr >= PTREGS_SIZE)
break;
- childreg = (char *)task_pt_regs(child) + addr;
+ childreg = getregs(child, &copyregs) + addr;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
if (addr & (sizeof(compat_long_t)-1))
@@ -79,17 +175,7 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_POKEUSR: /* Write register in pt_regs. */
if (addr >= PTREGS_SIZE)
break;
- childreg = (char *)task_pt_regs(child) + addr;
-
- /* Guard against overwrites of the privilege level. */
- ex1_offset = PTREGS_OFFSET_EX1;
-#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
- if (is_compat_task()) /* point at low word */
- ex1_offset += sizeof(compat_long_t);
-#endif
- if (addr == ex1_offset)
- data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
-
+ childreg = getregs(child, &copyregs) + addr;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
if (addr & (sizeof(compat_long_t)-1))
@@ -102,24 +188,20 @@ long arch_ptrace(struct task_struct *child, long request,
break;
*(long *)childreg = data;
}
+ putregs(child, &copyregs);
ret = 0;
break;
case PTRACE_GETREGS: /* Get all registers from the child. */
- if (copy_to_user(datap, task_pt_regs(child),
- sizeof(struct pt_regs)) == 0) {
- ret = 0;
- }
+ ret = copy_regset_to_user(child, &tile_user_regset_view,
+ REGSET_GPR, 0,
+ sizeof(struct pt_regs), datap);
break;
case PTRACE_SETREGS: /* Set all registers in the child. */
- if (copy_from_user(&copyregs, datap,
- sizeof(struct pt_regs)) == 0) {
- copyregs.ex1 =
- PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
- *task_pt_regs(child) = copyregs;
- ret = 0;
- }
+ ret = copy_regset_from_user(child, &tile_user_regset_view,
+ REGSET_GPR, 0,
+ sizeof(struct pt_regs), datap);
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
@@ -128,12 +210,16 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_SETOPTIONS:
/* Support TILE-specific ptrace options. */
- child->ptrace &= ~PT_TRACE_MASK_TILE;
+ BUILD_BUG_ON(PTRACE_O_MASK_TILE & PTRACE_O_MASK);
tmp = data & PTRACE_O_MASK_TILE;
data &= ~PTRACE_O_MASK_TILE;
ret = ptrace_request(child, request, addr, data);
- if (tmp & PTRACE_O_TRACEMIGRATE)
- child->ptrace |= PT_TRACE_MIGRATE;
+ if (ret == 0) {
+ unsigned int flags = child->ptrace;
+ flags &= ~(PTRACE_O_MASK_TILE << PT_OPT_FLAG_SHIFT);
+ flags |= (tmp << PT_OPT_FLAG_SHIFT);
+ child->ptrace = flags;
+ }
break;
default:
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 49e3b49e552..4bd82ac0210 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -123,7 +123,7 @@ void mconsole_log(struct mc_request *req)
void mconsole_proc(struct mc_request *req)
{
- struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt;
+ struct vfsmount *mnt = task_active_pid_ns(current)->proc_mnt;
char *buf;
int len;
struct file *file;
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index db18eb6124e..48ccf718e29 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -132,8 +132,3 @@ long sys_sigsuspend(int history0, int history1, old_sigset_t mask)
siginitset(&blocked, mask);
return sigsuspend(&blocked);
}
-
-long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
-{
- return do_sigaltstack(uss, uoss, PT_REGS_SP(&current->thread.regs));
-}
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index c4fbb21e802..60651df5f95 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -16,8 +16,6 @@ config UNICORE32
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
diff --git a/arch/unicore32/include/asm/ptrace.h b/arch/unicore32/include/asm/ptrace.h
index 726749dab52..9df53d991c7 100644
--- a/arch/unicore32/include/asm/ptrace.h
+++ b/arch/unicore32/include/asm/ptrace.h
@@ -54,6 +54,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
}
#define instruction_pointer(regs) ((regs)->UCreg_pc)
+#define user_stack_pointer(regs) ((regs)->UCreg_sp)
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h
index 00cf5e286fc..d4cc4559d84 100644
--- a/arch/unicore32/include/uapi/asm/unistd.h
+++ b/arch/unicore32/include/uapi/asm/unistd.h
@@ -12,5 +12,4 @@
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index 8fbe8577f5e..16bd1495b93 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -27,9 +27,6 @@ void *module_alloc(unsigned long size)
struct vm_struct *area;
size = PAGE_ALIGN(size);
- if (!size)
- return NULL;
-
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
if (!area)
return NULL;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9195fd80e11..79795af5981 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,8 @@ config X86
def_bool y
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
+ select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_WANTS_PROT_NUMA_PROT_NONE
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
@@ -108,11 +110,10 @@ config X86
select GENERIC_STRNLEN_USER
select HAVE_CONTEXT_TRACKING if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select MODULES_USE_ELF_REL if X86_32
select MODULES_USE_ELF_RELA if X86_64
select CLONE_BACKWARDS if X86_32
+ select GENERIC_SIGALTSTACK
config INSTRUCTION_DECODER
def_bool y
@@ -370,6 +371,7 @@ config X86_NUMACHIP
depends on NUMA
depends on SMP
depends on X86_X2APIC
+ depends on PCI_MMCONFIG
---help---
Adds support for Numascale NumaChip large-SMP systems. Needed to
enable more than ~168 cores.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 05afcca66de..e71fc4279aa 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -123,9 +123,10 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI
# does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
+avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
LDFLAGS := -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore
index 851fe936d24..e3cf9f682be 100644
--- a/arch/x86/boot/.gitignore
+++ b/arch/x86/boot/.gitignore
@@ -2,7 +2,6 @@ bootsect
bzImage
cpustr.h
mkcpustr
-offsets.h
voffset.h
zoffset.h
setup
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e87b0cac14b..b1942e22276 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -8,6 +8,7 @@
* ----------------------------------------------------------------------- */
#include <linux/efi.h>
+#include <linux/pci.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include <asm/desc.h>
@@ -245,6 +246,121 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
*size = len;
}
+static efi_status_t setup_efi_pci(struct boot_params *params)
+{
+ efi_pci_io_protocol *pci;
+ efi_status_t status;
+ void **pci_handle;
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long nr_pci, size = 0;
+ int i;
+ struct setup_data *data;
+
+ data = (struct setup_data *)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)data->next;
+
+ status = efi_call_phys5(sys_table->boottime->locate_handle,
+ EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &size, pci_handle);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_call_phys3(sys_table->boottime->allocate_pool,
+ EFI_LOADER_DATA, size, &pci_handle);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_phys5(sys_table->boottime->locate_handle,
+ EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &size, pci_handle);
+ }
+
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ nr_pci = size / sizeof(void *);
+ for (i = 0; i < nr_pci; i++) {
+ void *h = pci_handle[i];
+ uint64_t attributes;
+ struct pci_setup_rom *rom;
+
+ status = efi_call_phys3(sys_table->boottime->handle_protocol,
+ h, &pci_proto, &pci);
+
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (!pci)
+ continue;
+
+ status = efi_call_phys4(pci->attributes, pci,
+ EfiPciIoAttributeOperationGet, 0,
+ &attributes);
+
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (!attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)
+ continue;
+
+ if (!pci->romimage || !pci->romsize)
+ continue;
+
+ size = pci->romsize + sizeof(*rom);
+
+ status = efi_call_phys3(sys_table->boottime->allocate_pool,
+ EFI_LOADER_DATA, size, &rom);
+
+ if (status != EFI_SUCCESS)
+ continue;
+
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = pci->romsize;
+
+ status = efi_call_phys5(pci->pci.read, pci,
+ EfiPciIoWidthUint16, PCI_VENDOR_ID,
+ 1, &(rom->vendor));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ status = efi_call_phys5(pci->pci.read, pci,
+ EfiPciIoWidthUint16, PCI_DEVICE_ID,
+ 1, &(rom->devid));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ status = efi_call_phys5(pci->get_location, pci,
+ &(rom->segment), &(rom->bus),
+ &(rom->device), &(rom->function));
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ memcpy(rom->romdata, pci->romimage, pci->romsize);
+
+ if (data)
+ data->next = (uint64_t)rom;
+ else
+ params->hdr.setup_data = (uint64_t)rom;
+
+ data = (struct setup_data *)rom;
+
+ continue;
+ free_struct:
+ efi_call_phys1(sys_table->boottime->free_pool, rom);
+ }
+
+free_handle:
+ efi_call_phys1(sys_table->boottime->free_pool, pci_handle);
+ return status;
+}
+
/*
* See if we have Graphics Output Protocol
*/
@@ -1028,6 +1144,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
setup_graphics(boot_params);
+ setup_efi_pci(boot_params);
+
status = efi_call_phys3(sys_table->boottime->allocate_pool,
EFI_LOADER_DATA, sizeof(*gdt),
(void **)&gdt);
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 5bacb4a226a..e0ca7c9ac38 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
+obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o
obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o
obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
@@ -34,6 +35,8 @@ serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
+camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
+ camellia_aesni_avx_glue.o
cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o
cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
@@ -47,3 +50,5 @@ serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
+crc32c-intel-y := crc32c-intel_glue.o
+crc32c-intel-$(CONFIG_CRYPTO_CRC32C_X86_64) += crc32c-pcl-intel-asm_64.o
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
new file mode 100644
index 00000000000..2306d2e4816
--- /dev/null
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -0,0 +1,1102 @@
+/*
+ * x86_64/AVX/AES-NI assembler implementation of Camellia
+ *
+ * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+/*
+ * Version licensed under 2-clause BSD License is available at:
+ * http://koti.mbnet.fi/axh/crypto/camellia-BSD-1.2.0-aesni1.tar.xz
+ */
+
+#define CAMELLIA_TABLE_BYTE_LEN 272
+
+/* struct camellia_ctx: */
+#define key_table 0
+#define key_length CAMELLIA_TABLE_BYTE_LEN
+
+/* register macros */
+#define CTX %rdi
+
+/**********************************************************************
+ 16-way camellia
+ **********************************************************************/
+#define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \
+ vpand x, mask4bit, tmp0; \
+ vpandn x, mask4bit, x; \
+ vpsrld $4, x, x; \
+ \
+ vpshufb tmp0, lo_t, tmp0; \
+ vpshufb x, hi_t, x; \
+ vpxor tmp0, x, x;
+
+/*
+ * IN:
+ * x0..x7: byte-sliced AB state
+ * mem_cd: register pointer storing CD state
+ * key: index for key material
+ * OUT:
+ * x0..x7: new byte-sliced CD state
+ */
+#define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \
+ t7, mem_cd, key) \
+ /* \
+ * S-function with AES subbytes \
+ */ \
+ vmovdqa .Linv_shift_row, t4; \
+ vbroadcastss .L0f0f0f0f, t7; \
+ vmovdqa .Lpre_tf_lo_s1, t0; \
+ vmovdqa .Lpre_tf_hi_s1, t1; \
+ \
+ /* AES inverse shift rows */ \
+ vpshufb t4, x0, x0; \
+ vpshufb t4, x7, x7; \
+ vpshufb t4, x1, x1; \
+ vpshufb t4, x4, x4; \
+ vpshufb t4, x2, x2; \
+ vpshufb t4, x5, x5; \
+ vpshufb t4, x3, x3; \
+ vpshufb t4, x6, x6; \
+ \
+ /* prefilter sboxes 1, 2 and 3 */ \
+ vmovdqa .Lpre_tf_lo_s4, t2; \
+ vmovdqa .Lpre_tf_hi_s4, t3; \
+ filter_8bit(x0, t0, t1, t7, t6); \
+ filter_8bit(x7, t0, t1, t7, t6); \
+ filter_8bit(x1, t0, t1, t7, t6); \
+ filter_8bit(x4, t0, t1, t7, t6); \
+ filter_8bit(x2, t0, t1, t7, t6); \
+ filter_8bit(x5, t0, t1, t7, t6); \
+ \
+ /* prefilter sbox 4 */ \
+ vpxor t4, t4, t4; \
+ filter_8bit(x3, t2, t3, t7, t6); \
+ filter_8bit(x6, t2, t3, t7, t6); \
+ \
+ /* AES subbytes + AES shift rows */ \
+ vmovdqa .Lpost_tf_lo_s1, t0; \
+ vmovdqa .Lpost_tf_hi_s1, t1; \
+ vaesenclast t4, x0, x0; \
+ vaesenclast t4, x7, x7; \
+ vaesenclast t4, x1, x1; \
+ vaesenclast t4, x4, x4; \
+ vaesenclast t4, x2, x2; \
+ vaesenclast t4, x5, x5; \
+ vaesenclast t4, x3, x3; \
+ vaesenclast t4, x6, x6; \
+ \
+ /* postfilter sboxes 1 and 4 */ \
+ vmovdqa .Lpost_tf_lo_s3, t2; \
+ vmovdqa .Lpost_tf_hi_s3, t3; \
+ filter_8bit(x0, t0, t1, t7, t6); \
+ filter_8bit(x7, t0, t1, t7, t6); \
+ filter_8bit(x3, t0, t1, t7, t6); \
+ filter_8bit(x6, t0, t1, t7, t6); \
+ \
+ /* postfilter sbox 3 */ \
+ vmovdqa .Lpost_tf_lo_s2, t4; \
+ vmovdqa .Lpost_tf_hi_s2, t5; \
+ filter_8bit(x2, t2, t3, t7, t6); \
+ filter_8bit(x5, t2, t3, t7, t6); \
+ \
+ vpxor t6, t6, t6; \
+ vmovq key, t0; \
+ \
+ /* postfilter sbox 2 */ \
+ filter_8bit(x1, t4, t5, t7, t2); \
+ filter_8bit(x4, t4, t5, t7, t2); \
+ \
+ vpsrldq $5, t0, t5; \
+ vpsrldq $1, t0, t1; \
+ vpsrldq $2, t0, t2; \
+ vpsrldq $3, t0, t3; \
+ vpsrldq $4, t0, t4; \
+ vpshufb t6, t0, t0; \
+ vpshufb t6, t1, t1; \
+ vpshufb t6, t2, t2; \
+ vpshufb t6, t3, t3; \
+ vpshufb t6, t4, t4; \
+ vpsrldq $2, t5, t7; \
+ vpshufb t6, t7, t7; \
+ \
+ /* \
+ * P-function \
+ */ \
+ vpxor x5, x0, x0; \
+ vpxor x6, x1, x1; \
+ vpxor x7, x2, x2; \
+ vpxor x4, x3, x3; \
+ \
+ vpxor x2, x4, x4; \
+ vpxor x3, x5, x5; \
+ vpxor x0, x6, x6; \
+ vpxor x1, x7, x7; \
+ \
+ vpxor x7, x0, x0; \
+ vpxor x4, x1, x1; \
+ vpxor x5, x2, x2; \
+ vpxor x6, x3, x3; \
+ \
+ vpxor x3, x4, x4; \
+ vpxor x0, x5, x5; \
+ vpxor x1, x6, x6; \
+ vpxor x2, x7, x7; /* note: high and low parts swapped */ \
+ \
+ /* \
+ * Add key material and result to CD (x becomes new CD) \
+ */ \
+ \
+ vpxor t3, x4, x4; \
+ vpxor 0 * 16(mem_cd), x4, x4; \
+ \
+ vpxor t2, x5, x5; \
+ vpxor 1 * 16(mem_cd), x5, x5; \
+ \
+ vpsrldq $1, t5, t3; \
+ vpshufb t6, t5, t5; \
+ vpshufb t6, t3, t6; \
+ \
+ vpxor t1, x6, x6; \
+ vpxor 2 * 16(mem_cd), x6, x6; \
+ \
+ vpxor t0, x7, x7; \
+ vpxor 3 * 16(mem_cd), x7, x7; \
+ \
+ vpxor t7, x0, x0; \
+ vpxor 4 * 16(mem_cd), x0, x0; \
+ \
+ vpxor t6, x1, x1; \
+ vpxor 5 * 16(mem_cd), x1, x1; \
+ \
+ vpxor t5, x2, x2; \
+ vpxor 6 * 16(mem_cd), x2, x2; \
+ \
+ vpxor t4, x3, x3; \
+ vpxor 7 * 16(mem_cd), x3, x3;
+
+/*
+ * Size optimization... with inlined roundsm16, binary would be over 5 times
+ * larger and would only be 0.5% faster (on sandy-bridge).
+ */
+.align 8
+roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+ roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
+ %rcx, (%r9));
+ ret;
+
+.align 8
+roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
+ %rax, (%r9));
+ ret;
+
+/*
+ * IN/OUT:
+ * x0..x7: byte-sliced AB state preloaded
+ * mem_ab: byte-sliced AB state in memory
+ * mem_cb: byte-sliced CD state in memory
+ */
+#define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
+ leaq (key_table + (i) * 8)(CTX), %r9; \
+ call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
+ \
+ vmovdqu x4, 0 * 16(mem_cd); \
+ vmovdqu x5, 1 * 16(mem_cd); \
+ vmovdqu x6, 2 * 16(mem_cd); \
+ vmovdqu x7, 3 * 16(mem_cd); \
+ vmovdqu x0, 4 * 16(mem_cd); \
+ vmovdqu x1, 5 * 16(mem_cd); \
+ vmovdqu x2, 6 * 16(mem_cd); \
+ vmovdqu x3, 7 * 16(mem_cd); \
+ \
+ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
+ call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
+ \
+ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
+
+#define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */
+
+#define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \
+ /* Store new AB state */ \
+ vmovdqu x0, 0 * 16(mem_ab); \
+ vmovdqu x1, 1 * 16(mem_ab); \
+ vmovdqu x2, 2 * 16(mem_ab); \
+ vmovdqu x3, 3 * 16(mem_ab); \
+ vmovdqu x4, 4 * 16(mem_ab); \
+ vmovdqu x5, 5 * 16(mem_ab); \
+ vmovdqu x6, 6 * 16(mem_ab); \
+ vmovdqu x7, 7 * 16(mem_ab);
+
+#define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, i) \
+ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \
+ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \
+ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store);
+
+#define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, i) \
+ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \
+ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \
+ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store);
+
+/*
+ * IN:
+ * v0..3: byte-sliced 32-bit integers
+ * OUT:
+ * v0..3: (IN <<< 1)
+ */
+#define rol32_1_16(v0, v1, v2, v3, t0, t1, t2, zero) \
+ vpcmpgtb v0, zero, t0; \
+ vpaddb v0, v0, v0; \
+ vpabsb t0, t0; \
+ \
+ vpcmpgtb v1, zero, t1; \
+ vpaddb v1, v1, v1; \
+ vpabsb t1, t1; \
+ \
+ vpcmpgtb v2, zero, t2; \
+ vpaddb v2, v2, v2; \
+ vpabsb t2, t2; \
+ \
+ vpor t0, v1, v1; \
+ \
+ vpcmpgtb v3, zero, t0; \
+ vpaddb v3, v3, v3; \
+ vpabsb t0, t0; \
+ \
+ vpor t1, v2, v2; \
+ vpor t2, v3, v3; \
+ vpor t0, v0, v0;
+
+/*
+ * IN:
+ * r: byte-sliced AB state in memory
+ * l: byte-sliced CD state in memory
+ * OUT:
+ * x0..x7: new byte-sliced CD state
+ */
+#define fls16(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \
+ tt1, tt2, tt3, kll, klr, krl, krr) \
+ /* \
+ * t0 = kll; \
+ * t0 &= ll; \
+ * lr ^= rol32(t0, 1); \
+ */ \
+ vpxor tt0, tt0, tt0; \
+ vmovd kll, t0; \
+ vpshufb tt0, t0, t3; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t2; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t1; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t0; \
+ \
+ vpand l0, t0, t0; \
+ vpand l1, t1, t1; \
+ vpand l2, t2, t2; \
+ vpand l3, t3, t3; \
+ \
+ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
+ \
+ vpxor l4, t0, l4; \
+ vmovdqu l4, 4 * 16(l); \
+ vpxor l5, t1, l5; \
+ vmovdqu l5, 5 * 16(l); \
+ vpxor l6, t2, l6; \
+ vmovdqu l6, 6 * 16(l); \
+ vpxor l7, t3, l7; \
+ vmovdqu l7, 7 * 16(l); \
+ \
+ /* \
+ * t2 = krr; \
+ * t2 |= rr; \
+ * rl ^= t2; \
+ */ \
+ \
+ vmovd krr, t0; \
+ vpshufb tt0, t0, t3; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t2; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t1; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t0; \
+ \
+ vpor 4 * 16(r), t0, t0; \
+ vpor 5 * 16(r), t1, t1; \
+ vpor 6 * 16(r), t2, t2; \
+ vpor 7 * 16(r), t3, t3; \
+ \
+ vpxor 0 * 16(r), t0, t0; \
+ vpxor 1 * 16(r), t1, t1; \
+ vpxor 2 * 16(r), t2, t2; \
+ vpxor 3 * 16(r), t3, t3; \
+ vmovdqu t0, 0 * 16(r); \
+ vmovdqu t1, 1 * 16(r); \
+ vmovdqu t2, 2 * 16(r); \
+ vmovdqu t3, 3 * 16(r); \
+ \
+ /* \
+ * t2 = krl; \
+ * t2 &= rl; \
+ * rr ^= rol32(t2, 1); \
+ */ \
+ vmovd krl, t0; \
+ vpshufb tt0, t0, t3; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t2; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t1; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t0; \
+ \
+ vpand 0 * 16(r), t0, t0; \
+ vpand 1 * 16(r), t1, t1; \
+ vpand 2 * 16(r), t2, t2; \
+ vpand 3 * 16(r), t3, t3; \
+ \
+ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
+ \
+ vpxor 4 * 16(r), t0, t0; \
+ vpxor 5 * 16(r), t1, t1; \
+ vpxor 6 * 16(r), t2, t2; \
+ vpxor 7 * 16(r), t3, t3; \
+ vmovdqu t0, 4 * 16(r); \
+ vmovdqu t1, 5 * 16(r); \
+ vmovdqu t2, 6 * 16(r); \
+ vmovdqu t3, 7 * 16(r); \
+ \
+ /* \
+ * t0 = klr; \
+ * t0 |= lr; \
+ * ll ^= t0; \
+ */ \
+ \
+ vmovd klr, t0; \
+ vpshufb tt0, t0, t3; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t2; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t1; \
+ vpsrldq $1, t0, t0; \
+ vpshufb tt0, t0, t0; \
+ \
+ vpor l4, t0, t0; \
+ vpor l5, t1, t1; \
+ vpor l6, t2, t2; \
+ vpor l7, t3, t3; \
+ \
+ vpxor l0, t0, l0; \
+ vmovdqu l0, 0 * 16(l); \
+ vpxor l1, t1, l1; \
+ vmovdqu l1, 1 * 16(l); \
+ vpxor l2, t2, l2; \
+ vmovdqu l2, 2 * 16(l); \
+ vpxor l3, t3, l3; \
+ vmovdqu l3, 3 * 16(l);
+
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+ vpunpckhdq x1, x0, t2; \
+ vpunpckldq x1, x0, x0; \
+ \
+ vpunpckldq x3, x2, t1; \
+ vpunpckhdq x3, x2, x2; \
+ \
+ vpunpckhqdq t1, x0, x1; \
+ vpunpcklqdq t1, x0, x0; \
+ \
+ vpunpckhqdq x2, t2, x3; \
+ vpunpcklqdq x2, t2, x2;
+
+#define byteslice_16x16b(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, \
+ b3, c3, d3, st0, st1) \
+ vmovdqu d2, st0; \
+ vmovdqu d3, st1; \
+ transpose_4x4(a0, a1, a2, a3, d2, d3); \
+ transpose_4x4(b0, b1, b2, b3, d2, d3); \
+ vmovdqu st0, d2; \
+ vmovdqu st1, d3; \
+ \
+ vmovdqu a0, st0; \
+ vmovdqu a1, st1; \
+ transpose_4x4(c0, c1, c2, c3, a0, a1); \
+ transpose_4x4(d0, d1, d2, d3, a0, a1); \
+ \
+ vmovdqu .Lshufb_16x16b, a0; \
+ vmovdqu st1, a1; \
+ vpshufb a0, a2, a2; \
+ vpshufb a0, a3, a3; \
+ vpshufb a0, b0, b0; \
+ vpshufb a0, b1, b1; \
+ vpshufb a0, b2, b2; \
+ vpshufb a0, b3, b3; \
+ vpshufb a0, a1, a1; \
+ vpshufb a0, c0, c0; \
+ vpshufb a0, c1, c1; \
+ vpshufb a0, c2, c2; \
+ vpshufb a0, c3, c3; \
+ vpshufb a0, d0, d0; \
+ vpshufb a0, d1, d1; \
+ vpshufb a0, d2, d2; \
+ vpshufb a0, d3, d3; \
+ vmovdqu d3, st1; \
+ vmovdqu st0, d3; \
+ vpshufb a0, d3, a0; \
+ vmovdqu d2, st0; \
+ \
+ transpose_4x4(a0, b0, c0, d0, d2, d3); \
+ transpose_4x4(a1, b1, c1, d1, d2, d3); \
+ vmovdqu st0, d2; \
+ vmovdqu st1, d3; \
+ \
+ vmovdqu b0, st0; \
+ vmovdqu b1, st1; \
+ transpose_4x4(a2, b2, c2, d2, b0, b1); \
+ transpose_4x4(a3, b3, c3, d3, b0, b1); \
+ vmovdqu st0, b0; \
+ vmovdqu st1, b1; \
+ /* does not adjust output bytes inside vectors */
+
+/* load blocks to registers and apply pre-whitening */
+#define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, rio, key) \
+ vmovq key, x0; \
+ vpshufb .Lpack_bswap, x0, x0; \
+ \
+ vpxor 0 * 16(rio), x0, y7; \
+ vpxor 1 * 16(rio), x0, y6; \
+ vpxor 2 * 16(rio), x0, y5; \
+ vpxor 3 * 16(rio), x0, y4; \
+ vpxor 4 * 16(rio), x0, y3; \
+ vpxor 5 * 16(rio), x0, y2; \
+ vpxor 6 * 16(rio), x0, y1; \
+ vpxor 7 * 16(rio), x0, y0; \
+ vpxor 8 * 16(rio), x0, x7; \
+ vpxor 9 * 16(rio), x0, x6; \
+ vpxor 10 * 16(rio), x0, x5; \
+ vpxor 11 * 16(rio), x0, x4; \
+ vpxor 12 * 16(rio), x0, x3; \
+ vpxor 13 * 16(rio), x0, x2; \
+ vpxor 14 * 16(rio), x0, x1; \
+ vpxor 15 * 16(rio), x0, x0;
+
+/* byteslice pre-whitened blocks and store to temporary memory */
+#define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd) \
+ byteslice_16x16b(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \
+ y5, y6, y7, (mem_ab), (mem_cd)); \
+ \
+ vmovdqu x0, 0 * 16(mem_ab); \
+ vmovdqu x1, 1 * 16(mem_ab); \
+ vmovdqu x2, 2 * 16(mem_ab); \
+ vmovdqu x3, 3 * 16(mem_ab); \
+ vmovdqu x4, 4 * 16(mem_ab); \
+ vmovdqu x5, 5 * 16(mem_ab); \
+ vmovdqu x6, 6 * 16(mem_ab); \
+ vmovdqu x7, 7 * 16(mem_ab); \
+ vmovdqu y0, 0 * 16(mem_cd); \
+ vmovdqu y1, 1 * 16(mem_cd); \
+ vmovdqu y2, 2 * 16(mem_cd); \
+ vmovdqu y3, 3 * 16(mem_cd); \
+ vmovdqu y4, 4 * 16(mem_cd); \
+ vmovdqu y5, 5 * 16(mem_cd); \
+ vmovdqu y6, 6 * 16(mem_cd); \
+ vmovdqu y7, 7 * 16(mem_cd);
+
+/* de-byteslice, apply post-whitening and store blocks */
+#define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \
+ y5, y6, y7, key, stack_tmp0, stack_tmp1) \
+ byteslice_16x16b(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, y3, \
+ y7, x3, x7, stack_tmp0, stack_tmp1); \
+ \
+ vmovdqu x0, stack_tmp0; \
+ \
+ vmovq key, x0; \
+ vpshufb .Lpack_bswap, x0, x0; \
+ \
+ vpxor x0, y7, y7; \
+ vpxor x0, y6, y6; \
+ vpxor x0, y5, y5; \
+ vpxor x0, y4, y4; \
+ vpxor x0, y3, y3; \
+ vpxor x0, y2, y2; \
+ vpxor x0, y1, y1; \
+ vpxor x0, y0, y0; \
+ vpxor x0, x7, x7; \
+ vpxor x0, x6, x6; \
+ vpxor x0, x5, x5; \
+ vpxor x0, x4, x4; \
+ vpxor x0, x3, x3; \
+ vpxor x0, x2, x2; \
+ vpxor x0, x1, x1; \
+ vpxor stack_tmp0, x0, x0;
+
+#define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, rio) \
+ vmovdqu x0, 0 * 16(rio); \
+ vmovdqu x1, 1 * 16(rio); \
+ vmovdqu x2, 2 * 16(rio); \
+ vmovdqu x3, 3 * 16(rio); \
+ vmovdqu x4, 4 * 16(rio); \
+ vmovdqu x5, 5 * 16(rio); \
+ vmovdqu x6, 6 * 16(rio); \
+ vmovdqu x7, 7 * 16(rio); \
+ vmovdqu y0, 8 * 16(rio); \
+ vmovdqu y1, 9 * 16(rio); \
+ vmovdqu y2, 10 * 16(rio); \
+ vmovdqu y3, 11 * 16(rio); \
+ vmovdqu y4, 12 * 16(rio); \
+ vmovdqu y5, 13 * 16(rio); \
+ vmovdqu y6, 14 * 16(rio); \
+ vmovdqu y7, 15 * 16(rio);
+
+.data
+.align 16
+
+#define SHUFB_BYTES(idx) \
+ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx)
+
+.Lshufb_16x16b:
+ .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3);
+
+.Lpack_bswap:
+ .long 0x00010203
+ .long 0x04050607
+ .long 0x80808080
+ .long 0x80808080
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+ .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/*
+ * pre-SubByte transform
+ *
+ * pre-lookup for sbox1, sbox2, sbox3:
+ * swap_bitendianness(
+ * isom_map_camellia_to_aes(
+ * camellia_f(
+ * swap_bitendianess(in)
+ * )
+ * )
+ * )
+ *
+ * (note: '⊕ 0xc5' inside camellia_f())
+ */
+.Lpre_tf_lo_s1:
+ .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86
+ .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88
+.Lpre_tf_hi_s1:
+ .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a
+ .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23
+
+/*
+ * pre-SubByte transform
+ *
+ * pre-lookup for sbox4:
+ * swap_bitendianness(
+ * isom_map_camellia_to_aes(
+ * camellia_f(
+ * swap_bitendianess(in <<< 1)
+ * )
+ * )
+ * )
+ *
+ * (note: '⊕ 0xc5' inside camellia_f())
+ */
+.Lpre_tf_lo_s4:
+ .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25
+ .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74
+.Lpre_tf_hi_s4:
+ .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72
+ .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf
+
+/*
+ * post-SubByte transform
+ *
+ * post-lookup for sbox1, sbox4:
+ * swap_bitendianness(
+ * camellia_h(
+ * isom_map_aes_to_camellia(
+ * swap_bitendianness(
+ * aes_inverse_affine_transform(in)
+ * )
+ * )
+ * )
+ * )
+ *
+ * (note: '⊕ 0x6e' inside camellia_h())
+ */
+.Lpost_tf_lo_s1:
+ .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31
+ .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1
+.Lpost_tf_hi_s1:
+ .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8
+ .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c
+
+/*
+ * post-SubByte transform
+ *
+ * post-lookup for sbox2:
+ * swap_bitendianness(
+ * camellia_h(
+ * isom_map_aes_to_camellia(
+ * swap_bitendianness(
+ * aes_inverse_affine_transform(in)
+ * )
+ * )
+ * )
+ * ) <<< 1
+ *
+ * (note: '⊕ 0x6e' inside camellia_h())
+ */
+.Lpost_tf_lo_s2:
+ .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62
+ .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3
+.Lpost_tf_hi_s2:
+ .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51
+ .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18
+
+/*
+ * post-SubByte transform
+ *
+ * post-lookup for sbox3:
+ * swap_bitendianness(
+ * camellia_h(
+ * isom_map_aes_to_camellia(
+ * swap_bitendianness(
+ * aes_inverse_affine_transform(in)
+ * )
+ * )
+ * )
+ * ) >>> 1
+ *
+ * (note: '⊕ 0x6e' inside camellia_h())
+ */
+.Lpost_tf_lo_s3:
+ .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98
+ .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8
+.Lpost_tf_hi_s3:
+ .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54
+ .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06
+
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+ .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+ .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+
+/* 4-bit mask */
+.align 4
+.L0f0f0f0f:
+ .long 0x0f0f0f0f
+
+.text
+
+.align 8
+.type __camellia_enc_blk16,@function;
+
+__camellia_enc_blk16:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rax: temporary storage, 256 bytes
+ * %xmm0..%xmm15: 16 plaintext blocks
+ * output:
+ * %xmm0..%xmm15: 16 encrypted blocks, order swapped:
+ * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
+ */
+
+ leaq 8 * 16(%rax), %rcx;
+
+ inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx);
+
+ enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 0);
+
+ fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15,
+ ((key_table + (8) * 8) + 0)(CTX),
+ ((key_table + (8) * 8) + 4)(CTX),
+ ((key_table + (8) * 8) + 8)(CTX),
+ ((key_table + (8) * 8) + 12)(CTX));
+
+ enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 8);
+
+ fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15,
+ ((key_table + (16) * 8) + 0)(CTX),
+ ((key_table + (16) * 8) + 4)(CTX),
+ ((key_table + (16) * 8) + 8)(CTX),
+ ((key_table + (16) * 8) + 12)(CTX));
+
+ enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 16);
+
+ movl $24, %r8d;
+ cmpl $16, key_length(CTX);
+ jne .Lenc_max32;
+
+.Lenc_done:
+ /* load CD for output */
+ vmovdqu 0 * 16(%rcx), %xmm8;
+ vmovdqu 1 * 16(%rcx), %xmm9;
+ vmovdqu 2 * 16(%rcx), %xmm10;
+ vmovdqu 3 * 16(%rcx), %xmm11;
+ vmovdqu 4 * 16(%rcx), %xmm12;
+ vmovdqu 5 * 16(%rcx), %xmm13;
+ vmovdqu 6 * 16(%rcx), %xmm14;
+ vmovdqu 7 * 16(%rcx), %xmm15;
+
+ outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
+
+ ret;
+
+.align 8
+.Lenc_max32:
+ movl $32, %r8d;
+
+ fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15,
+ ((key_table + (24) * 8) + 0)(CTX),
+ ((key_table + (24) * 8) + 4)(CTX),
+ ((key_table + (24) * 8) + 8)(CTX),
+ ((key_table + (24) * 8) + 12)(CTX));
+
+ enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 24);
+
+ jmp .Lenc_done;
+
+.align 8
+.type __camellia_dec_blk16,@function;
+
+__camellia_dec_blk16:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rax: temporary storage, 256 bytes
+ * %r8d: 24 for 16 byte key, 32 for larger
+ * %xmm0..%xmm15: 16 encrypted blocks
+ * output:
+ * %xmm0..%xmm15: 16 plaintext blocks, order swapped:
+ * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
+ */
+
+ leaq 8 * 16(%rax), %rcx;
+
+ inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx);
+
+ cmpl $32, %r8d;
+ je .Ldec_max32;
+
+.Ldec_max24:
+ dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 16);
+
+ fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15,
+ ((key_table + (16) * 8) + 8)(CTX),
+ ((key_table + (16) * 8) + 12)(CTX),
+ ((key_table + (16) * 8) + 0)(CTX),
+ ((key_table + (16) * 8) + 4)(CTX));
+
+ dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 8);
+
+ fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15,
+ ((key_table + (8) * 8) + 8)(CTX),
+ ((key_table + (8) * 8) + 12)(CTX),
+ ((key_table + (8) * 8) + 0)(CTX),
+ ((key_table + (8) * 8) + 4)(CTX));
+
+ dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 0);
+
+ /* load CD for output */
+ vmovdqu 0 * 16(%rcx), %xmm8;
+ vmovdqu 1 * 16(%rcx), %xmm9;
+ vmovdqu 2 * 16(%rcx), %xmm10;
+ vmovdqu 3 * 16(%rcx), %xmm11;
+ vmovdqu 4 * 16(%rcx), %xmm12;
+ vmovdqu 5 * 16(%rcx), %xmm13;
+ vmovdqu 6 * 16(%rcx), %xmm14;
+ vmovdqu 7 * 16(%rcx), %xmm15;
+
+ outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
+
+ ret;
+
+.align 8
+.Ldec_max32:
+ dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %rcx, 24);
+
+ fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15,
+ ((key_table + (24) * 8) + 8)(CTX),
+ ((key_table + (24) * 8) + 12)(CTX),
+ ((key_table + (24) * 8) + 0)(CTX),
+ ((key_table + (24) * 8) + 4)(CTX));
+
+ jmp .Ldec_max24;
+
+.align 8
+.global camellia_ecb_enc_16way
+.type camellia_ecb_enc_16way,@function;
+
+camellia_ecb_enc_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+ * %rdx: src (16 blocks)
+ */
+
+ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rdx, (key_table)(CTX));
+
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+ call __camellia_enc_blk16;
+
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
+ ret;
+
+.align 8
+.global camellia_ecb_dec_16way
+.type camellia_ecb_dec_16way,@function;
+
+camellia_ecb_dec_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+ * %rdx: src (16 blocks)
+ */
+
+ cmpl $16, key_length(CTX);
+ movl $32, %r8d;
+ movl $24, %eax;
+ cmovel %eax, %r8d; /* max */
+
+ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rdx, (key_table)(CTX, %r8, 8));
+
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+ call __camellia_dec_blk16;
+
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
+ ret;
+
+.align 8
+.global camellia_cbc_dec_16way
+.type camellia_cbc_dec_16way,@function;
+
+camellia_cbc_dec_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+ * %rdx: src (16 blocks)
+ */
+
+ cmpl $16, key_length(CTX);
+ movl $32, %r8d;
+ movl $24, %eax;
+ cmovel %eax, %r8d; /* max */
+
+ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rdx, (key_table)(CTX, %r8, 8));
+
+ /*
+ * dst might still be in-use (in case dst == src), so use stack for
+ * temporary storage.
+ */
+ subq $(16 * 16), %rsp;
+ movq %rsp, %rax;
+
+ call __camellia_dec_blk16;
+
+ addq $(16 * 16), %rsp;
+
+ vpxor (0 * 16)(%rdx), %xmm6, %xmm6;
+ vpxor (1 * 16)(%rdx), %xmm5, %xmm5;
+ vpxor (2 * 16)(%rdx), %xmm4, %xmm4;
+ vpxor (3 * 16)(%rdx), %xmm3, %xmm3;
+ vpxor (4 * 16)(%rdx), %xmm2, %xmm2;
+ vpxor (5 * 16)(%rdx), %xmm1, %xmm1;
+ vpxor (6 * 16)(%rdx), %xmm0, %xmm0;
+ vpxor (7 * 16)(%rdx), %xmm15, %xmm15;
+ vpxor (8 * 16)(%rdx), %xmm14, %xmm14;
+ vpxor (9 * 16)(%rdx), %xmm13, %xmm13;
+ vpxor (10 * 16)(%rdx), %xmm12, %xmm12;
+ vpxor (11 * 16)(%rdx), %xmm11, %xmm11;
+ vpxor (12 * 16)(%rdx), %xmm10, %xmm10;
+ vpxor (13 * 16)(%rdx), %xmm9, %xmm9;
+ vpxor (14 * 16)(%rdx), %xmm8, %xmm8;
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
+ ret;
+
+#define inc_le128(x, minus_one, tmp) \
+ vpcmpeqq minus_one, x, tmp; \
+ vpsubq minus_one, x, x; \
+ vpslldq $8, tmp, tmp; \
+ vpsubq tmp, x, x;
+
+.align 8
+.global camellia_ctr_16way
+.type camellia_ctr_16way,@function;
+
+camellia_ctr_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+ * %rdx: src (16 blocks)
+ * %rcx: iv (little endian, 128bit)
+ */
+
+ subq $(16 * 16), %rsp;
+ movq %rsp, %rax;
+
+ vmovdqa .Lbswap128_mask, %xmm14;
+
+ /* load IV and byteswap */
+ vmovdqu (%rcx), %xmm0;
+ vpshufb %xmm14, %xmm0, %xmm15;
+ vmovdqu %xmm15, 15 * 16(%rax);
+
+ vpcmpeqd %xmm15, %xmm15, %xmm15;
+ vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */
+
+ /* construct IVs */
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm13;
+ vmovdqu %xmm13, 14 * 16(%rax);
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm13;
+ vmovdqu %xmm13, 13 * 16(%rax);
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm12;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm11;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm10;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm9;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm8;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm7;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm6;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm5;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm4;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm3;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm2;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vpshufb %xmm14, %xmm0, %xmm1;
+ inc_le128(%xmm0, %xmm15, %xmm13);
+ vmovdqa %xmm0, %xmm13;
+ vpshufb %xmm14, %xmm0, %xmm0;
+ inc_le128(%xmm13, %xmm15, %xmm14);
+ vmovdqu %xmm13, (%rcx);
+
+ /* inpack16_pre: */
+ vmovq (key_table)(CTX), %xmm15;
+ vpshufb .Lpack_bswap, %xmm15, %xmm15;
+ vpxor %xmm0, %xmm15, %xmm0;
+ vpxor %xmm1, %xmm15, %xmm1;
+ vpxor %xmm2, %xmm15, %xmm2;
+ vpxor %xmm3, %xmm15, %xmm3;
+ vpxor %xmm4, %xmm15, %xmm4;
+ vpxor %xmm5, %xmm15, %xmm5;
+ vpxor %xmm6, %xmm15, %xmm6;
+ vpxor %xmm7, %xmm15, %xmm7;
+ vpxor %xmm8, %xmm15, %xmm8;
+ vpxor %xmm9, %xmm15, %xmm9;
+ vpxor %xmm10, %xmm15, %xmm10;
+ vpxor %xmm11, %xmm15, %xmm11;
+ vpxor %xmm12, %xmm15, %xmm12;
+ vpxor 13 * 16(%rax), %xmm15, %xmm13;
+ vpxor 14 * 16(%rax), %xmm15, %xmm14;
+ vpxor 15 * 16(%rax), %xmm15, %xmm15;
+
+ call __camellia_enc_blk16;
+
+ addq $(16 * 16), %rsp;
+
+ vpxor 0 * 16(%rdx), %xmm7, %xmm7;
+ vpxor 1 * 16(%rdx), %xmm6, %xmm6;
+ vpxor 2 * 16(%rdx), %xmm5, %xmm5;
+ vpxor 3 * 16(%rdx), %xmm4, %xmm4;
+ vpxor 4 * 16(%rdx), %xmm3, %xmm3;
+ vpxor 5 * 16(%rdx), %xmm2, %xmm2;
+ vpxor 6 * 16(%rdx), %xmm1, %xmm1;
+ vpxor 7 * 16(%rdx), %xmm0, %xmm0;
+ vpxor 8 * 16(%rdx), %xmm15, %xmm15;
+ vpxor 9 * 16(%rdx), %xmm14, %xmm14;
+ vpxor 10 * 16(%rdx), %xmm13, %xmm13;
+ vpxor 11 * 16(%rdx), %xmm12, %xmm12;
+ vpxor 12 * 16(%rdx), %xmm11, %xmm11;
+ vpxor 13 * 16(%rdx), %xmm10, %xmm10;
+ vpxor 14 * 16(%rdx), %xmm9, %xmm9;
+ vpxor 15 * 16(%rdx), %xmm8, %xmm8;
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
+ ret;
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
new file mode 100644
index 00000000000..96cbb6068fc
--- /dev/null
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -0,0 +1,558 @@
+/*
+ * Glue Code for x86_64/AVX/AES-NI assembler optimized version of Camellia
+ *
+ * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+#include <crypto/lrw.h>
+#include <crypto/xts.h>
+#include <asm/xcr.h>
+#include <asm/xsave.h>
+#include <asm/crypto/camellia.h>
+#include <asm/crypto/ablk_helper.h>
+#include <asm/crypto/glue_helper.h>
+
+#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
+
+/* 16-way AES-NI parallel cipher functions */
+asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+static const struct common_glue_ctx camellia_enc = {
+ .num_funcs = 3,
+ .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+
+ .funcs = { {
+ .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+ .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) }
+ }, {
+ .num_blocks = 2,
+ .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
+ }, {
+ .num_blocks = 1,
+ .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
+ } }
+};
+
+static const struct common_glue_ctx camellia_ctr = {
+ .num_funcs = 3,
+ .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+
+ .funcs = { {
+ .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+ .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) }
+ }, {
+ .num_blocks = 2,
+ .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
+ }, {
+ .num_blocks = 1,
+ .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
+ } }
+};
+
+static const struct common_glue_ctx camellia_dec = {
+ .num_funcs = 3,
+ .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+
+ .funcs = { {
+ .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+ .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) }
+ }, {
+ .num_blocks = 2,
+ .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
+ }, {
+ .num_blocks = 1,
+ .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
+ } }
+};
+
+static const struct common_glue_ctx camellia_dec_cbc = {
+ .num_funcs = 3,
+ .fpu_blocks_limit = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+
+ .funcs = { {
+ .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
+ .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) }
+ }, {
+ .num_blocks = 2,
+ .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
+ }, {
+ .num_blocks = 1,
+ .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
+ } }
+};
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
+ dst, src, nbytes);
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
+ nbytes);
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
+}
+
+static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+{
+ return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
+ CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
+ nbytes);
+}
+
+static inline void camellia_fpu_end(bool fpu_enabled)
+{
+ glue_fpu_end(fpu_enabled);
+}
+
+static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
+ &tfm->crt_flags);
+}
+
+struct crypt_priv {
+ struct camellia_ctx *ctx;
+ bool fpu_enabled;
+};
+
+static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+{
+ const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
+ struct crypt_priv *ctx = priv;
+ int i;
+
+ ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
+
+ if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
+ camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ }
+
+ while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
+ camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ }
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+}
+
+static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+{
+ const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
+ struct crypt_priv *ctx = priv;
+ int i;
+
+ ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
+
+ if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
+ camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
+ }
+
+ while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
+ camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
+ srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
+ nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
+ }
+
+ for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+ camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+}
+
+static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
+ struct crypt_priv crypt_ctx = {
+ .ctx = &ctx->camellia_ctx,
+ .fpu_enabled = false,
+ };
+ struct lrw_crypt_req req = {
+ .tbuf = buf,
+ .tbuflen = sizeof(buf),
+
+ .table_ctx = &ctx->lrw_table,
+ .crypt_ctx = &crypt_ctx,
+ .crypt_fn = encrypt_callback,
+ };
+ int ret;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ ret = lrw_crypt(desc, dst, src, nbytes, &req);
+ camellia_fpu_end(crypt_ctx.fpu_enabled);
+
+ return ret;
+}
+
+static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
+ struct crypt_priv crypt_ctx = {
+ .ctx = &ctx->camellia_ctx,
+ .fpu_enabled = false,
+ };
+ struct lrw_crypt_req req = {
+ .tbuf = buf,
+ .tbuflen = sizeof(buf),
+
+ .table_ctx = &ctx->lrw_table,
+ .crypt_ctx = &crypt_ctx,
+ .crypt_fn = decrypt_callback,
+ };
+ int ret;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ ret = lrw_crypt(desc, dst, src, nbytes, &req);
+ camellia_fpu_end(crypt_ctx.fpu_enabled);
+
+ return ret;
+}
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
+ struct crypt_priv crypt_ctx = {
+ .ctx = &ctx->crypt_ctx,
+ .fpu_enabled = false,
+ };
+ struct xts_crypt_req req = {
+ .tbuf = buf,
+ .tbuflen = sizeof(buf),
+
+ .tweak_ctx = &ctx->tweak_ctx,
+ .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
+ .crypt_ctx = &crypt_ctx,
+ .crypt_fn = encrypt_callback,
+ };
+ int ret;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ ret = xts_crypt(desc, dst, src, nbytes, &req);
+ camellia_fpu_end(crypt_ctx.fpu_enabled);
+
+ return ret;
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
+ struct crypt_priv crypt_ctx = {
+ .ctx = &ctx->crypt_ctx,
+ .fpu_enabled = false,
+ };
+ struct xts_crypt_req req = {
+ .tbuf = buf,
+ .tbuflen = sizeof(buf),
+
+ .tweak_ctx = &ctx->tweak_ctx,
+ .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
+ .crypt_ctx = &crypt_ctx,
+ .crypt_fn = decrypt_callback,
+ };
+ int ret;
+
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ ret = xts_crypt(desc, dst, src, nbytes, &req);
+ camellia_fpu_end(crypt_ctx.fpu_enabled);
+
+ return ret;
+}
+
+static struct crypto_alg cmll_algs[10] = { {
+ .cra_name = "__ecb-camellia-aesni",
+ .cra_driver_name = "__driver-ecb-camellia-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct camellia_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ },
+ },
+}, {
+ .cra_name = "__cbc-camellia-aesni",
+ .cra_driver_name = "__driver-cbc-camellia-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct camellia_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ },
+ },
+}, {
+ .cra_name = "__ctr-camellia-aesni",
+ .cra_driver_name = "__driver-ctr-camellia-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct camellia_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ },
+ },
+}, {
+ .cra_name = "__lrw-camellia-aesni",
+ .cra_driver_name = "__driver-lrw-camellia-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_exit = lrw_camellia_exit_tfm,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE +
+ CAMELLIA_BLOCK_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE +
+ CAMELLIA_BLOCK_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = lrw_camellia_setkey,
+ .encrypt = lrw_encrypt,
+ .decrypt = lrw_decrypt,
+ },
+ },
+}, {
+ .cra_name = "__xts-camellia-aesni",
+ .cra_driver_name = "__driver-xts-camellia-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct camellia_xts_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = xts_camellia_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
+ },
+ },
+}, {
+ .cra_name = "ecb(camellia)",
+ .cra_driver_name = "ecb-camellia-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ },
+ },
+}, {
+ .cra_name = "cbc(camellia)",
+ .cra_driver_name = "cbc-camellia-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = __ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ },
+ },
+}, {
+ .cra_name = "ctr(camellia)",
+ .cra_driver_name = "ctr-camellia-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_encrypt,
+ .geniv = "chainiv",
+ },
+ },
+}, {
+ .cra_name = "lrw(camellia)",
+ .cra_driver_name = "lrw-camellia-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE +
+ CAMELLIA_BLOCK_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE +
+ CAMELLIA_BLOCK_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ },
+ },
+}, {
+ .cra_name = "xts(camellia)",
+ .cra_driver_name = "xts-camellia-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ },
+ },
+} };
+
+static int __init camellia_aesni_init(void)
+{
+ u64 xcr0;
+
+ if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+ pr_info("AVX or AES-NI instructions are not detected.\n");
+ return -ENODEV;
+ }
+
+ xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+ if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
+ pr_info("AVX detected but unusable.\n");
+ return -ENODEV;
+ }
+
+ return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+}
+
+static void __exit camellia_aesni_fini(void)
+{
+ crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+}
+
+module_init(camellia_aesni_init);
+module_exit(camellia_aesni_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
+MODULE_ALIAS("camellia");
+MODULE_ALIAS("camellia-asm");
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index 42ffd2bbab5..5cb86ccd4ac 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -32,53 +32,24 @@
#include <crypto/algapi.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
+#include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h>
-#define CAMELLIA_MIN_KEY_SIZE 16
-#define CAMELLIA_MAX_KEY_SIZE 32
-#define CAMELLIA_BLOCK_SIZE 16
-#define CAMELLIA_TABLE_BYTE_LEN 272
-
-struct camellia_ctx {
- u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
- u32 key_length;
-};
-
/* regular block cipher functions */
asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
+EXPORT_SYMBOL_GPL(__camellia_enc_blk);
asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
const u8 *src);
+EXPORT_SYMBOL_GPL(camellia_dec_blk);
/* 2-way parallel cipher functions */
asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
+EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way);
asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
const u8 *src);
-
-static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __camellia_enc_blk(ctx, dst, src, false);
-}
-
-static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __camellia_enc_blk(ctx, dst, src, true);
-}
-
-static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __camellia_enc_blk_2way(ctx, dst, src, false);
-}
-
-static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __camellia_enc_blk_2way(ctx, dst, src, true);
-}
+EXPORT_SYMBOL_GPL(camellia_dec_blk_2way);
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
@@ -1275,9 +1246,8 @@ static void camellia_setup192(const unsigned char *key, u64 *subkey)
camellia_setup256(kk, subkey);
}
-static int __camellia_setkey(struct camellia_ctx *cctx,
- const unsigned char *key,
- unsigned int key_len, u32 *flags)
+int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
+ unsigned int key_len, u32 *flags)
{
if (key_len != 16 && key_len != 24 && key_len != 32) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -1300,6 +1270,7 @@ static int __camellia_setkey(struct camellia_ctx *cctx,
return 0;
}
+EXPORT_SYMBOL_GPL(__camellia_setkey);
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
@@ -1308,7 +1279,7 @@ static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
&tfm->crt_flags);
}
-static void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
+void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
{
u128 iv = *src;
@@ -1316,22 +1287,23 @@ static void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
u128_xor(&dst[1], &dst[1], &iv);
}
+EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way);
-static void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
+void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
be128 ctrblk;
if (dst != src)
*dst = *src;
- u128_to_be128(&ctrblk, iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblk, iv);
+ le128_inc(iv);
camellia_enc_blk_xor(ctx, (u8 *)dst, (u8 *)&ctrblk);
}
+EXPORT_SYMBOL_GPL(camellia_crypt_ctr);
-static void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
- u128 *iv)
+void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
be128 ctrblks[2];
@@ -1340,13 +1312,14 @@ static void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
dst[1] = src[1];
}
- u128_to_be128(&ctrblks[0], iv);
- u128_inc(iv);
- u128_to_be128(&ctrblks[1], iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblks[0], iv);
+ le128_inc(iv);
+ le128_to_be128(&ctrblks[1], iv);
+ le128_inc(iv);
camellia_enc_blk_xor_2way(ctx, (u8 *)dst, (u8 *)ctrblks);
}
+EXPORT_SYMBOL_GPL(camellia_crypt_ctr_2way);
static const struct common_glue_ctx camellia_enc = {
.num_funcs = 2,
@@ -1464,13 +1437,8 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
camellia_dec_blk(ctx, srcdst, srcdst);
}
-struct camellia_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct camellia_ctx camellia_ctx;
-};
-
-static int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
{
struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
@@ -1484,6 +1452,7 @@ static int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
return lrw_init_table(&ctx->lrw_table,
key + keylen - CAMELLIA_BLOCK_SIZE);
}
+EXPORT_SYMBOL_GPL(lrw_camellia_setkey);
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
@@ -1519,20 +1488,16 @@ static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
return lrw_crypt(desc, dst, src, nbytes, &req);
}
-static void lrw_exit_tfm(struct crypto_tfm *tfm)
+void lrw_camellia_exit_tfm(struct crypto_tfm *tfm)
{
struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
lrw_free_table(&ctx->lrw_table);
}
+EXPORT_SYMBOL_GPL(lrw_camellia_exit_tfm);
-struct camellia_xts_ctx {
- struct camellia_ctx tweak_ctx;
- struct camellia_ctx crypt_ctx;
-};
-
-static int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
{
struct camellia_xts_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
@@ -1555,6 +1520,7 @@ static int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
flags);
}
+EXPORT_SYMBOL_GPL(xts_camellia_setkey);
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
@@ -1679,7 +1645,7 @@ static struct crypto_alg camellia_algs[6] = { {
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_exit = lrw_exit_tfm,
+ .cra_exit = lrw_camellia_exit_tfm,
.cra_u = {
.blkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index a41a3aaba22..15b00ac7cbd 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -25,10 +25,10 @@
.file "cast5-avx-x86_64-asm_64.S"
-.extern cast5_s1
-.extern cast5_s2
-.extern cast5_s3
-.extern cast5_s4
+.extern cast_s1
+.extern cast_s2
+.extern cast_s3
+.extern cast_s4
/* structure of crypto context */
#define km 0
@@ -36,10 +36,10 @@
#define rr ((16*4)+16)
/* s-boxes */
-#define s1 cast5_s1
-#define s2 cast5_s2
-#define s3 cast5_s3
-#define s4 cast5_s4
+#define s1 cast_s1
+#define s2 cast_s2
+#define s3 cast_s3
+#define s4 cast_s4
/**********************************************************************
16-way AVX cast5
@@ -180,31 +180,17 @@
vpunpcklqdq t1, t0, x0; \
vpunpckhqdq t1, t0, x1;
-#define inpack_blocks(in, x0, x1, t0, t1, rmask) \
- vmovdqu (0*4*4)(in), x0; \
- vmovdqu (1*4*4)(in), x1; \
+#define inpack_blocks(x0, x1, t0, t1, rmask) \
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
\
transpose_2x4(x0, x1, t0, t1)
-#define outunpack_blocks(out, x0, x1, t0, t1, rmask) \
+#define outunpack_blocks(x0, x1, t0, t1, rmask) \
transpose_2x4(x0, x1, t0, t1) \
\
vpshufb rmask, x0, x0; \
- vpshufb rmask, x1, x1; \
- vmovdqu x0, (0*4*4)(out); \
- vmovdqu x1, (1*4*4)(out);
-
-#define outunpack_xor_blocks(out, x0, x1, t0, t1, rmask) \
- transpose_2x4(x0, x1, t0, t1) \
- \
- vpshufb rmask, x0, x0; \
- vpshufb rmask, x1, x1; \
- vpxor (0*4*4)(out), x0, x0; \
- vmovdqu x0, (0*4*4)(out); \
- vpxor (1*4*4)(out), x1, x1; \
- vmovdqu x1, (1*4*4)(out);
+ vpshufb rmask, x1, x1;
.data
@@ -213,6 +199,8 @@
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+.Lbswap_iv_mask:
+ .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
.L16_mask:
.byte 16, 16, 16, 16
.L32_mask:
@@ -223,35 +211,42 @@
.text
.align 16
-.global __cast5_enc_blk_16way
-.type __cast5_enc_blk_16way,@function;
+.type __cast5_enc_blk16,@function;
-__cast5_enc_blk_16way:
+__cast5_enc_blk16:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: bool, if true: xor output
+ * RL1: blocks 1 and 2
+ * RR1: blocks 3 and 4
+ * RL2: blocks 5 and 6
+ * RR2: blocks 7 and 8
+ * RL3: blocks 9 and 10
+ * RR3: blocks 11 and 12
+ * RL4: blocks 13 and 14
+ * RR4: blocks 15 and 16
+ * output:
+ * RL1: encrypted blocks 1 and 2
+ * RR1: encrypted blocks 3 and 4
+ * RL2: encrypted blocks 5 and 6
+ * RR2: encrypted blocks 7 and 8
+ * RL3: encrypted blocks 9 and 10
+ * RR3: encrypted blocks 11 and 12
+ * RL4: encrypted blocks 13 and 14
+ * RR4: encrypted blocks 15 and 16
*/
pushq %rbp;
pushq %rbx;
- pushq %rcx;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
enc_preload_rkr();
- leaq 1*(2*4*4)(%rdx), %rax;
- inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
- inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
- leaq 2*(2*4*4)(%rdx), %rax;
- inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
- leaq 3*(2*4*4)(%rdx), %rax;
- inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
-
- movq %rsi, %r11;
+ inpack_blocks(RL1, RR1, RTMP, RX, RKM);
+ inpack_blocks(RL2, RR2, RTMP, RX, RKM);
+ inpack_blocks(RL3, RR3, RTMP, RX, RKM);
+ inpack_blocks(RL4, RR4, RTMP, RX, RKM);
round(RL, RR, 0, 1);
round(RR, RL, 1, 2);
@@ -276,44 +271,41 @@ __cast5_enc_blk_16way:
round(RR, RL, 15, 1);
__skip_enc:
- popq %rcx;
popq %rbx;
popq %rbp;
vmovdqa .Lbswap_mask, RKM;
- leaq 1*(2*4*4)(%r11), %rax;
- testb %cl, %cl;
- jnz __enc_xor16;
-
- outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
- outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
- leaq 2*(2*4*4)(%r11), %rax;
- outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
- leaq 3*(2*4*4)(%r11), %rax;
- outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
-
- ret;
-
-__enc_xor16:
- outunpack_xor_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
- outunpack_xor_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
- leaq 2*(2*4*4)(%r11), %rax;
- outunpack_xor_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
- leaq 3*(2*4*4)(%r11), %rax;
- outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
+ outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
+ outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
+ outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+ outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
.align 16
-.global cast5_dec_blk_16way
-.type cast5_dec_blk_16way,@function;
+.type __cast5_dec_blk16,@function;
-cast5_dec_blk_16way:
+__cast5_dec_blk16:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
+ * RL1: encrypted blocks 1 and 2
+ * RR1: encrypted blocks 3 and 4
+ * RL2: encrypted blocks 5 and 6
+ * RR2: encrypted blocks 7 and 8
+ * RL3: encrypted blocks 9 and 10
+ * RR3: encrypted blocks 11 and 12
+ * RL4: encrypted blocks 13 and 14
+ * RR4: encrypted blocks 15 and 16
+ * output:
+ * RL1: decrypted blocks 1 and 2
+ * RR1: decrypted blocks 3 and 4
+ * RL2: decrypted blocks 5 and 6
+ * RR2: decrypted blocks 7 and 8
+ * RL3: decrypted blocks 9 and 10
+ * RR3: decrypted blocks 11 and 12
+ * RL4: decrypted blocks 13 and 14
+ * RR4: decrypted blocks 15 and 16
*/
pushq %rbp;
@@ -324,15 +316,10 @@ cast5_dec_blk_16way:
vmovd .L32_mask, R32;
dec_preload_rkr();
- leaq 1*(2*4*4)(%rdx), %rax;
- inpack_blocks(%rdx, RL1, RR1, RTMP, RX, RKM);
- inpack_blocks(%rax, RL2, RR2, RTMP, RX, RKM);
- leaq 2*(2*4*4)(%rdx), %rax;
- inpack_blocks(%rax, RL3, RR3, RTMP, RX, RKM);
- leaq 3*(2*4*4)(%rdx), %rax;
- inpack_blocks(%rax, RL4, RR4, RTMP, RX, RKM);
-
- movq %rsi, %r11;
+ inpack_blocks(RL1, RR1, RTMP, RX, RKM);
+ inpack_blocks(RL2, RR2, RTMP, RX, RKM);
+ inpack_blocks(RL3, RR3, RTMP, RX, RKM);
+ inpack_blocks(RL4, RR4, RTMP, RX, RKM);
movzbl rr(CTX), %eax;
testl %eax, %eax;
@@ -361,16 +348,211 @@ __dec_tail:
popq %rbx;
popq %rbp;
- leaq 1*(2*4*4)(%r11), %rax;
- outunpack_blocks(%r11, RR1, RL1, RTMP, RX, RKM);
- outunpack_blocks(%rax, RR2, RL2, RTMP, RX, RKM);
- leaq 2*(2*4*4)(%r11), %rax;
- outunpack_blocks(%rax, RR3, RL3, RTMP, RX, RKM);
- leaq 3*(2*4*4)(%r11), %rax;
- outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
+ outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
+ outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
+ outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+ outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
ret;
__skip_dec:
vpsrldq $4, RKR, RKR;
jmp __dec_tail;
+
+.align 16
+.global cast5_ecb_enc_16way
+.type cast5_ecb_enc_16way,@function;
+
+cast5_ecb_enc_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ movq %rsi, %r11;
+
+ vmovdqu (0*4*4)(%rdx), RL1;
+ vmovdqu (1*4*4)(%rdx), RR1;
+ vmovdqu (2*4*4)(%rdx), RL2;
+ vmovdqu (3*4*4)(%rdx), RR2;
+ vmovdqu (4*4*4)(%rdx), RL3;
+ vmovdqu (5*4*4)(%rdx), RR3;
+ vmovdqu (6*4*4)(%rdx), RL4;
+ vmovdqu (7*4*4)(%rdx), RR4;
+
+ call __cast5_enc_blk16;
+
+ vmovdqu RR1, (0*4*4)(%r11);
+ vmovdqu RL1, (1*4*4)(%r11);
+ vmovdqu RR2, (2*4*4)(%r11);
+ vmovdqu RL2, (3*4*4)(%r11);
+ vmovdqu RR3, (4*4*4)(%r11);
+ vmovdqu RL3, (5*4*4)(%r11);
+ vmovdqu RR4, (6*4*4)(%r11);
+ vmovdqu RL4, (7*4*4)(%r11);
+
+ ret;
+
+.align 16
+.global cast5_ecb_dec_16way
+.type cast5_ecb_dec_16way,@function;
+
+cast5_ecb_dec_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ movq %rsi, %r11;
+
+ vmovdqu (0*4*4)(%rdx), RL1;
+ vmovdqu (1*4*4)(%rdx), RR1;
+ vmovdqu (2*4*4)(%rdx), RL2;
+ vmovdqu (3*4*4)(%rdx), RR2;
+ vmovdqu (4*4*4)(%rdx), RL3;
+ vmovdqu (5*4*4)(%rdx), RR3;
+ vmovdqu (6*4*4)(%rdx), RL4;
+ vmovdqu (7*4*4)(%rdx), RR4;
+
+ call __cast5_dec_blk16;
+
+ vmovdqu RR1, (0*4*4)(%r11);
+ vmovdqu RL1, (1*4*4)(%r11);
+ vmovdqu RR2, (2*4*4)(%r11);
+ vmovdqu RL2, (3*4*4)(%r11);
+ vmovdqu RR3, (4*4*4)(%r11);
+ vmovdqu RL3, (5*4*4)(%r11);
+ vmovdqu RR4, (6*4*4)(%r11);
+ vmovdqu RL4, (7*4*4)(%r11);
+
+ ret;
+
+.align 16
+.global cast5_cbc_dec_16way
+.type cast5_cbc_dec_16way,@function;
+
+cast5_cbc_dec_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ pushq %r12;
+
+ movq %rsi, %r11;
+ movq %rdx, %r12;
+
+ vmovdqu (0*16)(%rdx), RL1;
+ vmovdqu (1*16)(%rdx), RR1;
+ vmovdqu (2*16)(%rdx), RL2;
+ vmovdqu (3*16)(%rdx), RR2;
+ vmovdqu (4*16)(%rdx), RL3;
+ vmovdqu (5*16)(%rdx), RR3;
+ vmovdqu (6*16)(%rdx), RL4;
+ vmovdqu (7*16)(%rdx), RR4;
+
+ call __cast5_dec_blk16;
+
+ /* xor with src */
+ vmovq (%r12), RX;
+ vpshufd $0x4f, RX, RX;
+ vpxor RX, RR1, RR1;
+ vpxor 0*16+8(%r12), RL1, RL1;
+ vpxor 1*16+8(%r12), RR2, RR2;
+ vpxor 2*16+8(%r12), RL2, RL2;
+ vpxor 3*16+8(%r12), RR3, RR3;
+ vpxor 4*16+8(%r12), RL3, RL3;
+ vpxor 5*16+8(%r12), RR4, RR4;
+ vpxor 6*16+8(%r12), RL4, RL4;
+
+ vmovdqu RR1, (0*16)(%r11);
+ vmovdqu RL1, (1*16)(%r11);
+ vmovdqu RR2, (2*16)(%r11);
+ vmovdqu RL2, (3*16)(%r11);
+ vmovdqu RR3, (4*16)(%r11);
+ vmovdqu RL3, (5*16)(%r11);
+ vmovdqu RR4, (6*16)(%r11);
+ vmovdqu RL4, (7*16)(%r11);
+
+ popq %r12;
+
+ ret;
+
+.align 16
+.global cast5_ctr_16way
+.type cast5_ctr_16way,@function;
+
+cast5_ctr_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: iv (big endian, 64bit)
+ */
+
+ pushq %r12;
+
+ movq %rsi, %r11;
+ movq %rdx, %r12;
+
+ vpcmpeqd RTMP, RTMP, RTMP;
+ vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
+
+ vpcmpeqd RKR, RKR, RKR;
+ vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
+ vmovdqa .Lbswap_iv_mask, R1ST;
+ vmovdqa .Lbswap128_mask, RKM;
+
+ /* load IV and byteswap */
+ vmovq (%rcx), RX;
+ vpshufb R1ST, RX, RX;
+
+ /* construct IVs */
+ vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
+ vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
+ vpsubq RKR, RX, RX;
+ vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
+ vpsubq RKR, RX, RX;
+ vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
+ vpsubq RKR, RX, RX;
+ vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
+ vpsubq RKR, RX, RX;
+ vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
+ vpsubq RKR, RX, RX;
+ vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
+ vpsubq RKR, RX, RX;
+ vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
+ vpsubq RKR, RX, RX;
+ vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
+
+ /* store last IV */
+ vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
+ vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
+ vmovq RX, (%rcx);
+
+ call __cast5_enc_blk16;
+
+ /* dst = src ^ iv */
+ vpxor (0*16)(%r12), RR1, RR1;
+ vpxor (1*16)(%r12), RL1, RL1;
+ vpxor (2*16)(%r12), RR2, RR2;
+ vpxor (3*16)(%r12), RL2, RL2;
+ vpxor (4*16)(%r12), RR3, RR3;
+ vpxor (5*16)(%r12), RL3, RL3;
+ vpxor (6*16)(%r12), RR4, RR4;
+ vpxor (7*16)(%r12), RL4, RL4;
+ vmovdqu RR1, (0*16)(%r11);
+ vmovdqu RL1, (1*16)(%r11);
+ vmovdqu RR2, (2*16)(%r11);
+ vmovdqu RL2, (3*16)(%r11);
+ vmovdqu RR3, (4*16)(%r11);
+ vmovdqu RL3, (5*16)(%r11);
+ vmovdqu RR4, (6*16)(%r11);
+ vmovdqu RL4, (7*16)(%r11);
+
+ popq %r12;
+
+ ret;
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index e0ea14f9547..c6631813dc1 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -37,29 +37,14 @@
#define CAST5_PARALLEL_BLOCKS 16
-asmlinkage void __cast5_enc_blk_16way(struct cast5_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void cast5_dec_blk_16way(struct cast5_ctx *ctx, u8 *dst,
+asmlinkage void cast5_ecb_enc_16way(struct cast5_ctx *ctx, u8 *dst,
const u8 *src);
-
-static inline void cast5_enc_blk_xway(struct cast5_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __cast5_enc_blk_16way(ctx, dst, src, false);
-}
-
-static inline void cast5_enc_blk_xway_xor(struct cast5_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __cast5_enc_blk_16way(ctx, dst, src, true);
-}
-
-static inline void cast5_dec_blk_xway(struct cast5_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- cast5_dec_blk_16way(ctx, dst, src);
-}
-
+asmlinkage void cast5_ecb_dec_16way(struct cast5_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src,
+ __be64 *iv);
static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes)
{
@@ -79,8 +64,11 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
+ void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
int err;
+ fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
+
err = blkcipher_walk_virt(desc, walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -93,10 +81,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
do {
- if (enc)
- cast5_enc_blk_xway(ctx, wdst, wsrc);
- else
- cast5_dec_blk_xway(ctx, wdst, wsrc);
+ fn(ctx, wdst, wsrc);
wsrc += bsize * CAST5_PARALLEL_BLOCKS;
wdst += bsize * CAST5_PARALLEL_BLOCKS;
@@ -107,12 +92,11 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
goto done;
}
+ fn = (enc) ? __cast5_encrypt : __cast5_decrypt;
+
/* Handle leftovers */
do {
- if (enc)
- __cast5_encrypt(ctx, wdst, wsrc);
- else
- __cast5_decrypt(ctx, wdst, wsrc);
+ fn(ctx, wdst, wsrc);
wsrc += bsize;
wdst += bsize;
@@ -194,9 +178,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 ivs[CAST5_PARALLEL_BLOCKS - 1];
u64 last_iv;
- int i;
/* Start of the last block. */
src += nbytes / bsize - 1;
@@ -211,13 +193,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
src -= CAST5_PARALLEL_BLOCKS - 1;
dst -= CAST5_PARALLEL_BLOCKS - 1;
- for (i = 0; i < CAST5_PARALLEL_BLOCKS - 1; i++)
- ivs[i] = src[i];
-
- cast5_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
-
- for (i = 0; i < CAST5_PARALLEL_BLOCKS - 1; i++)
- *(dst + (i + 1)) ^= *(ivs + i);
+ cast5_cbc_dec_16way(ctx, (u8 *)dst, (u8 *)src);
nbytes -= bsize;
if (nbytes < bsize)
@@ -298,23 +274,12 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
- __be64 ctrblocks[CAST5_PARALLEL_BLOCKS];
- int i;
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
do {
- /* create ctrblks for parallel encrypt */
- for (i = 0; i < CAST5_PARALLEL_BLOCKS; i++) {
- if (dst != src)
- dst[i] = src[i];
-
- ctrblocks[i] = cpu_to_be64(ctrblk++);
- }
-
- cast5_enc_blk_xway_xor(ctx, (u8 *)dst,
- (u8 *)ctrblocks);
+ cast5_ctr_16way(ctx, (u8 *)dst, (u8 *)src,
+ (__be64 *)walk->iv);
src += CAST5_PARALLEL_BLOCKS;
dst += CAST5_PARALLEL_BLOCKS;
@@ -327,13 +292,16 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
/* Handle leftovers */
do {
+ u64 ctrblk;
+
if (dst != src)
*dst = *src;
- ctrblocks[0] = cpu_to_be64(ctrblk++);
+ ctrblk = *(u64 *)walk->iv;
+ be64_add_cpu((__be64 *)walk->iv, 1);
- __cast5_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
- *dst ^= ctrblocks[0];
+ __cast5_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
+ *dst ^= ctrblk;
src += 1;
dst += 1;
@@ -341,7 +309,6 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
} while (nbytes >= bsize);
done:
- *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
return nbytes;
}
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index 218d283772f..2569d0da841 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -23,22 +23,24 @@
*
*/
+#include "glue_helper-asm-avx.S"
+
.file "cast6-avx-x86_64-asm_64.S"
-.extern cast6_s1
-.extern cast6_s2
-.extern cast6_s3
-.extern cast6_s4
+.extern cast_s1
+.extern cast_s2
+.extern cast_s3
+.extern cast_s4
/* structure of crypto context */
#define km 0
#define kr (12*4*4)
/* s-boxes */
-#define s1 cast6_s1
-#define s2 cast6_s2
-#define s3 cast6_s3
-#define s4 cast6_s4
+#define s1 cast_s1
+#define s2 cast_s2
+#define s3 cast_s3
+#define s4 cast_s4
/**********************************************************************
8-way AVX cast6
@@ -205,11 +207,7 @@
vpunpcklqdq x3, t2, x2; \
vpunpckhqdq x3, t2, x3;
-#define inpack_blocks(in, x0, x1, x2, x3, t0, t1, t2, rmask) \
- vmovdqu (0*4*4)(in), x0; \
- vmovdqu (1*4*4)(in), x1; \
- vmovdqu (2*4*4)(in), x2; \
- vmovdqu (3*4*4)(in), x3; \
+#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
vpshufb rmask, x2, x2; \
@@ -217,39 +215,21 @@
\
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
-#define outunpack_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
+#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
vpshufb rmask, x0, x0; \
vpshufb rmask, x1, x1; \
vpshufb rmask, x2, x2; \
- vpshufb rmask, x3, x3; \
- vmovdqu x0, (0*4*4)(out); \
- vmovdqu x1, (1*4*4)(out); \
- vmovdqu x2, (2*4*4)(out); \
- vmovdqu x3, (3*4*4)(out);
-
-#define outunpack_xor_blocks(out, x0, x1, x2, x3, t0, t1, t2, rmask) \
- transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
- \
- vpshufb rmask, x0, x0; \
- vpshufb rmask, x1, x1; \
- vpshufb rmask, x2, x2; \
- vpshufb rmask, x3, x3; \
- vpxor (0*4*4)(out), x0, x0; \
- vmovdqu x0, (0*4*4)(out); \
- vpxor (1*4*4)(out), x1, x1; \
- vmovdqu x1, (1*4*4)(out); \
- vpxor (2*4*4)(out), x2, x2; \
- vmovdqu x2, (2*4*4)(out); \
- vpxor (3*4*4)(out), x3, x3; \
- vmovdqu x3, (3*4*4)(out);
+ vpshufb rmask, x3, x3;
.data
.align 16
.Lbswap_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+.Lbswap128_mask:
+ .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
.Lrkr_enc_Q_Q_QBAR_QBAR:
.byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
@@ -269,31 +249,26 @@
.text
-.align 16
-.global __cast6_enc_blk_8way
-.type __cast6_enc_blk_8way,@function;
+.align 8
+.type __cast6_enc_blk8,@function;
-__cast6_enc_blk_8way:
+__cast6_enc_blk8:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: bool, if true: xor output
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
+ * output:
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
*/
pushq %rbp;
pushq %rbx;
- pushq %rcx;
vmovdqa .Lbswap_mask, RKM;
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
- leaq (4*4*4)(%rdx), %rax;
- inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
- inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-
- movq %rsi, %r11;
+ inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+ inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
preload_rkr(0, dummy, none);
Q(0);
@@ -311,36 +286,25 @@ __cast6_enc_blk_8way:
QBAR(10);
QBAR(11);
- popq %rcx;
popq %rbx;
popq %rbp;
vmovdqa .Lbswap_mask, RKM;
- leaq (4*4*4)(%r11), %rax;
-
- testb %cl, %cl;
- jnz __enc_xor8;
-
- outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
- outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-
- ret;
-__enc_xor8:
- outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
- outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
+ outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+ outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
ret;
-.align 16
-.global cast6_dec_blk_8way
-.type cast6_dec_blk_8way,@function;
+.align 8
+.type __cast6_dec_blk8,@function;
-cast6_dec_blk_8way:
+__cast6_dec_blk8:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
+ * output:
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
*/
pushq %rbp;
@@ -350,11 +314,8 @@ cast6_dec_blk_8way:
vmovd .Lfirst_mask, R1ST;
vmovd .L32_mask, R32;
- leaq (4*4*4)(%rdx), %rax;
- inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
- inpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-
- movq %rsi, %r11;
+ inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+ inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
Q(11);
@@ -376,8 +337,103 @@ cast6_dec_blk_8way:
popq %rbp;
vmovdqa .Lbswap_mask, RKM;
- leaq (4*4*4)(%r11), %rax;
- outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
- outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
+ outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+ outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
+
+ ret;
+
+.align 8
+.global cast6_ecb_enc_8way
+.type cast6_ecb_enc_8way,@function;
+
+cast6_ecb_enc_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ movq %rsi, %r11;
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __cast6_enc_blk8;
+
+ store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ ret;
+
+.align 8
+.global cast6_ecb_dec_8way
+.type cast6_ecb_dec_8way,@function;
+
+cast6_ecb_dec_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ movq %rsi, %r11;
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __cast6_dec_blk8;
+
+ store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ ret;
+
+.align 8
+.global cast6_cbc_dec_8way
+.type cast6_cbc_dec_8way,@function;
+
+cast6_cbc_dec_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ pushq %r12;
+
+ movq %rsi, %r11;
+ movq %rdx, %r12;
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __cast6_dec_blk8;
+
+ store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ popq %r12;
+
+ ret;
+
+.align 8
+.global cast6_ctr_8way
+.type cast6_ctr_8way,@function;
+
+cast6_ctr_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: iv (little endian, 128bit)
+ */
+
+ pushq %r12;
+
+ movq %rsi, %r11;
+ movq %rdx, %r12;
+
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RX, RKR, RKM);
+
+ call __cast6_enc_blk8;
+
+ store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ popq %r12;
ret;
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 15e5f85a501..92f7ca24790 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -40,79 +40,34 @@
#define CAST6_PARALLEL_BLOCKS 8
-asmlinkage void __cast6_enc_blk_8way(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void cast6_dec_blk_8way(struct cast6_ctx *ctx, u8 *dst,
+asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst,
const u8 *src);
-static inline void cast6_enc_blk_xway(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __cast6_enc_blk_8way(ctx, dst, src, false);
-}
-
-static inline void cast6_enc_blk_xway_xor(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __cast6_enc_blk_8way(ctx, dst, src, true);
-}
-
-static inline void cast6_dec_blk_xway(struct cast6_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- cast6_dec_blk_8way(ctx, dst, src);
-}
-
-
-static void cast6_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
-{
- u128 ivs[CAST6_PARALLEL_BLOCKS - 1];
- unsigned int j;
-
- for (j = 0; j < CAST6_PARALLEL_BLOCKS - 1; j++)
- ivs[j] = src[j];
-
- cast6_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
-
- for (j = 0; j < CAST6_PARALLEL_BLOCKS - 1; j++)
- u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
-}
+asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src,
+ le128 *iv);
-static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
+static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
be128 ctrblk;
- u128_to_be128(&ctrblk, iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblk, iv);
+ le128_inc(iv);
__cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
u128_xor(dst, src, (u128 *)&ctrblk);
}
-static void cast6_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
- u128 *iv)
-{
- be128 ctrblks[CAST6_PARALLEL_BLOCKS];
- unsigned int i;
-
- for (i = 0; i < CAST6_PARALLEL_BLOCKS; i++) {
- if (dst != src)
- dst[i] = src[i];
-
- u128_to_be128(&ctrblks[i], iv);
- u128_inc(iv);
- }
-
- cast6_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
-}
-
static const struct common_glue_ctx cast6_enc = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_enc_blk_xway) }
+ .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) }
@@ -125,7 +80,7 @@ static const struct common_glue_ctx cast6_ctr = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr_xway) }
+ .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) }
}, {
.num_blocks = 1,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) }
@@ -138,7 +93,7 @@ static const struct common_glue_ctx cast6_dec = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_dec_blk_xway) }
+ .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) }
@@ -151,7 +106,7 @@ static const struct common_glue_ctx cast6_dec_cbc = {
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_decrypt_cbc_xway) }
+ .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) }
}, {
.num_blocks = 1,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) }
@@ -215,7 +170,7 @@ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
- cast6_enc_blk_xway(ctx->ctx, srcdst, srcdst);
+ cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
return;
}
@@ -232,7 +187,7 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
- cast6_dec_blk_xway(ctx->ctx, srcdst, srcdst);
+ cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
return;
}
diff --git a/arch/x86/crypto/crc32c-intel.c b/arch/x86/crypto/crc32c-intel_glue.c
index 493f959261f..6812ad98355 100644
--- a/arch/x86/crypto/crc32c-intel.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -32,6 +32,8 @@
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
@@ -44,6 +46,31 @@
#define REX_PRE
#endif
+#ifdef CONFIG_X86_64
+/*
+ * use carryless multiply version of crc32c when buffer
+ * size is >= 512 (when eager fpu is enabled) or
+ * >= 1024 (when eager fpu is disabled) to account
+ * for fpu state save/restore overhead.
+ */
+#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
+#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
+
+asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
+ unsigned int crc_init);
+static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
+#if defined(X86_FEATURE_EAGER_FPU)
+#define set_pcl_breakeven_point() \
+do { \
+ if (!use_eager_fpu()) \
+ crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
+} while (0)
+#else
+#define set_pcl_breakeven_point() \
+ (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
+#endif
+#endif /* CONFIG_X86_64 */
+
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
{
while (length--) {
@@ -154,6 +181,52 @@ static int crc32c_intel_cra_init(struct crypto_tfm *tfm)
return 0;
}
+#ifdef CONFIG_X86_64
+static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ u32 *crcp = shash_desc_ctx(desc);
+
+ /*
+ * use faster PCL version if datasize is large enough to
+ * overcome kernel fpu state save/restore overhead
+ */
+ if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ kernel_fpu_begin();
+ *crcp = crc_pcl(data, len, *crcp);
+ kernel_fpu_end();
+ } else
+ *crcp = crc32c_intel_le_hw(*crcp, data, len);
+ return 0;
+}
+
+static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ kernel_fpu_begin();
+ *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
+ kernel_fpu_end();
+ } else
+ *(__le32 *)out =
+ ~cpu_to_le32(crc32c_intel_le_hw(*crcp, data, len));
+ return 0;
+}
+
+static int crc32c_pcl_intel_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32c_pcl_intel_finup(shash_desc_ctx(desc), data, len, out);
+}
+
+static int crc32c_pcl_intel_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __crc32c_pcl_intel_finup(crypto_shash_ctx(desc->tfm), data, len,
+ out);
+}
+#endif /* CONFIG_X86_64 */
+
static struct shash_alg alg = {
.setkey = crc32c_intel_setkey,
.init = crc32c_intel_init,
@@ -184,6 +257,14 @@ static int __init crc32c_intel_mod_init(void)
{
if (!x86_match_cpu(crc32c_cpu_id))
return -ENODEV;
+#ifdef CONFIG_X86_64
+ if (cpu_has_pclmulqdq) {
+ alg.update = crc32c_pcl_intel_update;
+ alg.finup = crc32c_pcl_intel_finup;
+ alg.digest = crc32c_pcl_intel_digest;
+ set_pcl_breakeven_point();
+ }
+#endif
return crypto_register_shash(&alg);
}
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
new file mode 100644
index 00000000000..93c6d39237a
--- /dev/null
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -0,0 +1,460 @@
+/*
+ * Implement fast CRC32C with PCLMULQDQ instructions. (x86_64)
+ *
+ * The white paper on CRC32C calculations with PCLMULQDQ instruction can be
+ * downloaded from:
+ * http://download.intel.com/design/intarch/papers/323405.pdf
+ *
+ * Copyright (C) 2012 Intel Corporation.
+ *
+ * Authors:
+ * Wajdi Feghali <wajdi.k.feghali@intel.com>
+ * James Guilford <james.guilford@intel.com>
+ * David Cote <david.m.cote@intel.com>
+ * Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
+
+.macro LABEL prefix n
+\prefix\n\():
+.endm
+
+.macro JMPTBL_ENTRY i
+.word crc_\i - crc_array
+.endm
+
+.macro JNC_LESS_THAN j
+ jnc less_than_\j
+.endm
+
+# Define threshold where buffers are considered "small" and routed to more
+# efficient "by-1" code. This "by-1" code only handles up to 255 bytes, so
+# SMALL_SIZE can be no larger than 255.
+
+#define SMALL_SIZE 200
+
+.if (SMALL_SIZE > 255)
+.error "SMALL_ SIZE must be < 256"
+.endif
+
+# unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
+
+.global crc_pcl
+crc_pcl:
+#define bufp %rdi
+#define bufp_dw %edi
+#define bufp_w %di
+#define bufp_b %dil
+#define bufptmp %rcx
+#define block_0 %rcx
+#define block_1 %rdx
+#define block_2 %r11
+#define len %rsi
+#define len_dw %esi
+#define len_w %si
+#define len_b %sil
+#define crc_init_arg %rdx
+#define tmp %rbx
+#define crc_init %r8
+#define crc_init_dw %r8d
+#define crc1 %r9
+#define crc2 %r10
+
+ pushq %rbx
+ pushq %rdi
+ pushq %rsi
+
+ ## Move crc_init for Linux to a different
+ mov crc_init_arg, crc_init
+
+ ################################################################
+ ## 1) ALIGN:
+ ################################################################
+
+ mov bufp, bufptmp # rdi = *buf
+ neg bufp
+ and $7, bufp # calculate the unalignment amount of
+ # the address
+ je proc_block # Skip if aligned
+
+ ## If len is less than 8 and we're unaligned, we need to jump
+ ## to special code to avoid reading beyond the end of the buffer
+ cmp $8, len
+ jae do_align
+ # less_than_8 expects length in upper 3 bits of len_dw
+ # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
+ shl $32-3+1, len_dw
+ jmp less_than_8_post_shl1
+
+do_align:
+ #### Calculate CRC of unaligned bytes of the buffer (if any)
+ movq (bufptmp), tmp # load a quadward from the buffer
+ add bufp, bufptmp # align buffer pointer for quadword
+ # processing
+ sub bufp, len # update buffer length
+align_loop:
+ crc32b %bl, crc_init_dw # compute crc32 of 1-byte
+ shr $8, tmp # get next byte
+ dec bufp
+ jne align_loop
+
+proc_block:
+
+ ################################################################
+ ## 2) PROCESS BLOCKS:
+ ################################################################
+
+ ## compute num of bytes to be processed
+ movq len, tmp # save num bytes in tmp
+
+ cmpq $128*24, len
+ jae full_block
+
+continue_block:
+ cmpq $SMALL_SIZE, len
+ jb small
+
+ ## len < 128*24
+ movq $2731, %rax # 2731 = ceil(2^16 / 24)
+ mul len_dw
+ shrq $16, %rax
+
+ ## eax contains floor(bytes / 24) = num 24-byte chunks to do
+
+ ## process rax 24-byte chunks (128 >= rax >= 0)
+
+ ## compute end address of each block
+ ## block 0 (base addr + RAX * 8)
+ ## block 1 (base addr + RAX * 16)
+ ## block 2 (base addr + RAX * 24)
+ lea (bufptmp, %rax, 8), block_0
+ lea (block_0, %rax, 8), block_1
+ lea (block_1, %rax, 8), block_2
+
+ xor crc1, crc1
+ xor crc2, crc2
+
+ ## branch into array
+ lea jump_table(%rip), bufp
+ movzxw (bufp, %rax, 2), len
+ offset=crc_array-jump_table
+ lea offset(bufp, len, 1), bufp
+ jmp *bufp
+
+ ################################################################
+ ## 2a) PROCESS FULL BLOCKS:
+ ################################################################
+full_block:
+ movq $128,%rax
+ lea 128*8*2(block_0), block_1
+ lea 128*8*3(block_0), block_2
+ add $128*8*1, block_0
+
+ xor crc1,crc1
+ xor crc2,crc2
+
+ # Fall thruogh into top of crc array (crc_128)
+
+ ################################################################
+ ## 3) CRC Array:
+ ################################################################
+
+crc_array:
+ i=128
+.rept 128-1
+.altmacro
+LABEL crc_ %i
+.noaltmacro
+ crc32q -i*8(block_0), crc_init
+ crc32q -i*8(block_1), crc1
+ crc32q -i*8(block_2), crc2
+ i=(i-1)
+.endr
+
+.altmacro
+LABEL crc_ %i
+.noaltmacro
+ crc32q -i*8(block_0), crc_init
+ crc32q -i*8(block_1), crc1
+# SKIP crc32 -i*8(block_2), crc2 ; Don't do this one yet
+
+ mov block_2, block_0
+
+ ################################################################
+ ## 4) Combine three results:
+ ################################################################
+
+ lea (K_table-16)(%rip), bufp # first entry is for idx 1
+ shlq $3, %rax # rax *= 8
+ subq %rax, tmp # tmp -= rax*8
+ shlq $1, %rax
+ subq %rax, tmp # tmp -= rax*16
+ # (total tmp -= rax*24)
+ addq %rax, bufp
+
+ movdqa (bufp), %xmm0 # 2 consts: K1:K2
+
+ movq crc_init, %xmm1 # CRC for block 1
+ pclmulqdq $0x00,%xmm0,%xmm1 # Multiply by K2
+
+ movq crc1, %xmm2 # CRC for block 2
+ pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1
+
+ pxor %xmm2,%xmm1
+ movq %xmm1, %rax
+ xor -i*8(block_2), %rax
+ mov crc2, crc_init
+ crc32 %rax, crc_init
+
+################################################################
+## 5) Check for end:
+################################################################
+
+LABEL crc_ 0
+ mov tmp, len
+ cmp $128*24, tmp
+ jae full_block
+ cmp $24, tmp
+ jae continue_block
+
+less_than_24:
+ shl $32-4, len_dw # less_than_16 expects length
+ # in upper 4 bits of len_dw
+ jnc less_than_16
+ crc32q (bufptmp), crc_init
+ crc32q 8(bufptmp), crc_init
+ jz do_return
+ add $16, bufptmp
+ # len is less than 8 if we got here
+ # less_than_8 expects length in upper 3 bits of len_dw
+ # less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
+ shl $2, len_dw
+ jmp less_than_8_post_shl1
+
+ #######################################################################
+ ## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full)
+ #######################################################################
+small:
+ shl $32-8, len_dw # Prepare len_dw for less_than_256
+ j=256
+.rept 5 # j = {256, 128, 64, 32, 16}
+.altmacro
+LABEL less_than_ %j # less_than_j: Length should be in
+ # upper lg(j) bits of len_dw
+ j=(j/2)
+ shl $1, len_dw # Get next MSB
+ JNC_LESS_THAN %j
+.noaltmacro
+ i=0
+.rept (j/8)
+ crc32q i(bufptmp), crc_init # Compute crc32 of 8-byte data
+ i=i+8
+.endr
+ jz do_return # Return if remaining length is zero
+ add $j, bufptmp # Advance buf
+.endr
+
+less_than_8: # Length should be stored in
+ # upper 3 bits of len_dw
+ shl $1, len_dw
+less_than_8_post_shl1:
+ jnc less_than_4
+ crc32l (bufptmp), crc_init_dw # CRC of 4 bytes
+ jz do_return # return if remaining data is zero
+ add $4, bufptmp
+less_than_4: # Length should be stored in
+ # upper 2 bits of len_dw
+ shl $1, len_dw
+ jnc less_than_2
+ crc32w (bufptmp), crc_init_dw # CRC of 2 bytes
+ jz do_return # return if remaining data is zero
+ add $2, bufptmp
+less_than_2: # Length should be stored in the MSB
+ # of len_dw
+ shl $1, len_dw
+ jnc less_than_1
+ crc32b (bufptmp), crc_init_dw # CRC of 1 byte
+less_than_1: # Length should be zero
+do_return:
+ movq crc_init, %rax
+ popq %rsi
+ popq %rdi
+ popq %rbx
+ ret
+
+ ################################################################
+ ## jump table Table is 129 entries x 2 bytes each
+ ################################################################
+.align 4
+jump_table:
+ i=0
+.rept 129
+.altmacro
+JMPTBL_ENTRY %i
+.noaltmacro
+ i=i+1
+.endr
+ ################################################################
+ ## PCLMULQDQ tables
+ ## Table is 128 entries x 2 quad words each
+ ################################################################
+.data
+.align 64
+K_table:
+ .quad 0x14cd00bd6,0x105ec76f0
+ .quad 0x0ba4fc28e,0x14cd00bd6
+ .quad 0x1d82c63da,0x0f20c0dfe
+ .quad 0x09e4addf8,0x0ba4fc28e
+ .quad 0x039d3b296,0x1384aa63a
+ .quad 0x102f9b8a2,0x1d82c63da
+ .quad 0x14237f5e6,0x01c291d04
+ .quad 0x00d3b6092,0x09e4addf8
+ .quad 0x0c96cfdc0,0x0740eef02
+ .quad 0x18266e456,0x039d3b296
+ .quad 0x0daece73e,0x0083a6eec
+ .quad 0x0ab7aff2a,0x102f9b8a2
+ .quad 0x1248ea574,0x1c1733996
+ .quad 0x083348832,0x14237f5e6
+ .quad 0x12c743124,0x02ad91c30
+ .quad 0x0b9e02b86,0x00d3b6092
+ .quad 0x018b33a4e,0x06992cea2
+ .quad 0x1b331e26a,0x0c96cfdc0
+ .quad 0x17d35ba46,0x07e908048
+ .quad 0x1bf2e8b8a,0x18266e456
+ .quad 0x1a3e0968a,0x11ed1f9d8
+ .quad 0x0ce7f39f4,0x0daece73e
+ .quad 0x061d82e56,0x0f1d0f55e
+ .quad 0x0d270f1a2,0x0ab7aff2a
+ .quad 0x1c3f5f66c,0x0a87ab8a8
+ .quad 0x12ed0daac,0x1248ea574
+ .quad 0x065863b64,0x08462d800
+ .quad 0x11eef4f8e,0x083348832
+ .quad 0x1ee54f54c,0x071d111a8
+ .quad 0x0b3e32c28,0x12c743124
+ .quad 0x0064f7f26,0x0ffd852c6
+ .quad 0x0dd7e3b0c,0x0b9e02b86
+ .quad 0x0f285651c,0x0dcb17aa4
+ .quad 0x010746f3c,0x018b33a4e
+ .quad 0x1c24afea4,0x0f37c5aee
+ .quad 0x0271d9844,0x1b331e26a
+ .quad 0x08e766a0c,0x06051d5a2
+ .quad 0x093a5f730,0x17d35ba46
+ .quad 0x06cb08e5c,0x11d5ca20e
+ .quad 0x06b749fb2,0x1bf2e8b8a
+ .quad 0x1167f94f2,0x021f3d99c
+ .quad 0x0cec3662e,0x1a3e0968a
+ .quad 0x19329634a,0x08f158014
+ .quad 0x0e6fc4e6a,0x0ce7f39f4
+ .quad 0x08227bb8a,0x1a5e82106
+ .quad 0x0b0cd4768,0x061d82e56
+ .quad 0x13c2b89c4,0x188815ab2
+ .quad 0x0d7a4825c,0x0d270f1a2
+ .quad 0x10f5ff2ba,0x105405f3e
+ .quad 0x00167d312,0x1c3f5f66c
+ .quad 0x0f6076544,0x0e9adf796
+ .quad 0x026f6a60a,0x12ed0daac
+ .quad 0x1a2adb74e,0x096638b34
+ .quad 0x19d34af3a,0x065863b64
+ .quad 0x049c3cc9c,0x1e50585a0
+ .quad 0x068bce87a,0x11eef4f8e
+ .quad 0x1524fa6c6,0x19f1c69dc
+ .quad 0x16cba8aca,0x1ee54f54c
+ .quad 0x042d98888,0x12913343e
+ .quad 0x1329d9f7e,0x0b3e32c28
+ .quad 0x1b1c69528,0x088f25a3a
+ .quad 0x02178513a,0x0064f7f26
+ .quad 0x0e0ac139e,0x04e36f0b0
+ .quad 0x0170076fa,0x0dd7e3b0c
+ .quad 0x141a1a2e2,0x0bd6f81f8
+ .quad 0x16ad828b4,0x0f285651c
+ .quad 0x041d17b64,0x19425cbba
+ .quad 0x1fae1cc66,0x010746f3c
+ .quad 0x1a75b4b00,0x18db37e8a
+ .quad 0x0f872e54c,0x1c24afea4
+ .quad 0x01e41e9fc,0x04c144932
+ .quad 0x086d8e4d2,0x0271d9844
+ .quad 0x160f7af7a,0x052148f02
+ .quad 0x05bb8f1bc,0x08e766a0c
+ .quad 0x0a90fd27a,0x0a3c6f37a
+ .quad 0x0b3af077a,0x093a5f730
+ .quad 0x04984d782,0x1d22c238e
+ .quad 0x0ca6ef3ac,0x06cb08e5c
+ .quad 0x0234e0b26,0x063ded06a
+ .quad 0x1d88abd4a,0x06b749fb2
+ .quad 0x04597456a,0x04d56973c
+ .quad 0x0e9e28eb4,0x1167f94f2
+ .quad 0x07b3ff57a,0x19385bf2e
+ .quad 0x0c9c8b782,0x0cec3662e
+ .quad 0x13a9cba9e,0x0e417f38a
+ .quad 0x093e106a4,0x19329634a
+ .quad 0x167001a9c,0x14e727980
+ .quad 0x1ddffc5d4,0x0e6fc4e6a
+ .quad 0x00df04680,0x0d104b8fc
+ .quad 0x02342001e,0x08227bb8a
+ .quad 0x00a2a8d7e,0x05b397730
+ .quad 0x168763fa6,0x0b0cd4768
+ .quad 0x1ed5a407a,0x0e78eb416
+ .quad 0x0d2c3ed1a,0x13c2b89c4
+ .quad 0x0995a5724,0x1641378f0
+ .quad 0x19b1afbc4,0x0d7a4825c
+ .quad 0x109ffedc0,0x08d96551c
+ .quad 0x0f2271e60,0x10f5ff2ba
+ .quad 0x00b0bf8ca,0x00bf80dd2
+ .quad 0x123888b7a,0x00167d312
+ .quad 0x1e888f7dc,0x18dcddd1c
+ .quad 0x002ee03b2,0x0f6076544
+ .quad 0x183e8d8fe,0x06a45d2b2
+ .quad 0x133d7a042,0x026f6a60a
+ .quad 0x116b0f50c,0x1dd3e10e8
+ .quad 0x05fabe670,0x1a2adb74e
+ .quad 0x130004488,0x0de87806c
+ .quad 0x000bcf5f6,0x19d34af3a
+ .quad 0x18f0c7078,0x014338754
+ .quad 0x017f27698,0x049c3cc9c
+ .quad 0x058ca5f00,0x15e3e77ee
+ .quad 0x1af900c24,0x068bce87a
+ .quad 0x0b5cfca28,0x0dd07448e
+ .quad 0x0ded288f8,0x1524fa6c6
+ .quad 0x059f229bc,0x1d8048348
+ .quad 0x06d390dec,0x16cba8aca
+ .quad 0x037170390,0x0a3e3e02c
+ .quad 0x06353c1cc,0x042d98888
+ .quad 0x0c4584f5c,0x0d73c7bea
+ .quad 0x1f16a3418,0x1329d9f7e
+ .quad 0x0531377e2,0x185137662
+ .quad 0x1d8d9ca7c,0x1b1c69528
+ .quad 0x0b25b29f2,0x18a08b5bc
+ .quad 0x19fb2a8b0,0x02178513a
+ .quad 0x1a08fe6ac,0x1da758ae0
+ .quad 0x045cddf4e,0x0e0ac139e
+ .quad 0x1a91647f2,0x169cf9eb0
+ .quad 0x1a0f717c4,0x0170076fa
diff --git a/arch/x86/crypto/glue_helper-asm-avx.S b/arch/x86/crypto/glue_helper-asm-avx.S
new file mode 100644
index 00000000000..f7b6ea2ddfd
--- /dev/null
+++ b/arch/x86/crypto/glue_helper-asm-avx.S
@@ -0,0 +1,91 @@
+/*
+ * Shared glue code for 128bit block ciphers, AVX assembler macros
+ *
+ * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \
+ vmovdqu (0*16)(src), x0; \
+ vmovdqu (1*16)(src), x1; \
+ vmovdqu (2*16)(src), x2; \
+ vmovdqu (3*16)(src), x3; \
+ vmovdqu (4*16)(src), x4; \
+ vmovdqu (5*16)(src), x5; \
+ vmovdqu (6*16)(src), x6; \
+ vmovdqu (7*16)(src), x7;
+
+#define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
+ vmovdqu x0, (0*16)(dst); \
+ vmovdqu x1, (1*16)(dst); \
+ vmovdqu x2, (2*16)(dst); \
+ vmovdqu x3, (3*16)(dst); \
+ vmovdqu x4, (4*16)(dst); \
+ vmovdqu x5, (5*16)(dst); \
+ vmovdqu x6, (6*16)(dst); \
+ vmovdqu x7, (7*16)(dst);
+
+#define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
+ vpxor (0*16)(src), x1, x1; \
+ vpxor (1*16)(src), x2, x2; \
+ vpxor (2*16)(src), x3, x3; \
+ vpxor (3*16)(src), x4, x4; \
+ vpxor (4*16)(src), x5, x5; \
+ vpxor (5*16)(src), x6, x6; \
+ vpxor (6*16)(src), x7, x7; \
+ store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
+
+#define inc_le128(x, minus_one, tmp) \
+ vpcmpeqq minus_one, x, tmp; \
+ vpsubq minus_one, x, x; \
+ vpslldq $8, tmp, tmp; \
+ vpsubq tmp, x, x;
+
+#define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \
+ vpcmpeqd t0, t0, t0; \
+ vpsrldq $8, t0, t0; /* low: -1, high: 0 */ \
+ vmovdqa bswap, t1; \
+ \
+ /* load IV and byteswap */ \
+ vmovdqu (iv), x7; \
+ vpshufb t1, x7, x0; \
+ \
+ /* construct IVs */ \
+ inc_le128(x7, t0, t2); \
+ vpshufb t1, x7, x1; \
+ inc_le128(x7, t0, t2); \
+ vpshufb t1, x7, x2; \
+ inc_le128(x7, t0, t2); \
+ vpshufb t1, x7, x3; \
+ inc_le128(x7, t0, t2); \
+ vpshufb t1, x7, x4; \
+ inc_le128(x7, t0, t2); \
+ vpshufb t1, x7, x5; \
+ inc_le128(x7, t0, t2); \
+ vpshufb t1, x7, x6; \
+ inc_le128(x7, t0, t2); \
+ vmovdqa x7, t2; \
+ vpshufb t1, x7, x7; \
+ inc_le128(t2, t0, t1); \
+ vmovdqu t2, (iv);
+
+#define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
+ vpxor (0*16)(src), x0, x0; \
+ vpxor (1*16)(src), x1, x1; \
+ vpxor (2*16)(src), x2, x2; \
+ vpxor (3*16)(src), x3, x3; \
+ vpxor (4*16)(src), x4, x4; \
+ vpxor (5*16)(src), x5, x5; \
+ vpxor (6*16)(src), x6, x6; \
+ vpxor (7*16)(src), x7, x7; \
+ store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 30b3927bd73..22ce4f683e5 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -221,16 +221,16 @@ static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
u8 *src = (u8 *)walk->src.virt.addr;
u8 *dst = (u8 *)walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
- u128 ctrblk;
+ le128 ctrblk;
u128 tmp;
- be128_to_u128(&ctrblk, (be128 *)walk->iv);
+ be128_to_le128(&ctrblk, (be128 *)walk->iv);
memcpy(&tmp, src, nbytes);
fn_ctr(ctx, &tmp, &tmp, &ctrblk);
memcpy(dst, &tmp, nbytes);
- u128_to_be128((be128 *)walk->iv, &ctrblk);
+ le128_to_be128((be128 *)walk->iv, &ctrblk);
}
EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit);
@@ -243,11 +243,11 @@ static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
unsigned int nbytes = walk->nbytes;
u128 *src = (u128 *)walk->src.virt.addr;
u128 *dst = (u128 *)walk->dst.virt.addr;
- u128 ctrblk;
+ le128 ctrblk;
unsigned int num_blocks, func_bytes;
unsigned int i;
- be128_to_u128(&ctrblk, (be128 *)walk->iv);
+ be128_to_le128(&ctrblk, (be128 *)walk->iv);
/* Process multi-block batch */
for (i = 0; i < gctx->num_funcs; i++) {
@@ -269,7 +269,7 @@ static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
}
done:
- u128_to_be128((be128 *)walk->iv, &ctrblk);
+ le128_to_be128((be128 *)walk->iv, &ctrblk);
return nbytes;
}
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index 504106bf04a..02b0e9fe997 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -24,7 +24,16 @@
*
*/
+#include "glue_helper-asm-avx.S"
+
.file "serpent-avx-x86_64-asm_64.S"
+
+.data
+.align 16
+
+.Lbswap128_mask:
+ .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
.text
#define CTX %rdi
@@ -550,51 +559,27 @@
vpunpcklqdq x3, t2, x2; \
vpunpckhqdq x3, t2, x3;
-#define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \
- vmovdqu (0*4*4)(in), x0; \
- vmovdqu (1*4*4)(in), x1; \
- vmovdqu (2*4*4)(in), x2; \
- vmovdqu (3*4*4)(in), x3; \
- \
+#define read_blocks(x0, x1, x2, x3, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
-#define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
- transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
- \
- vmovdqu x0, (0*4*4)(out); \
- vmovdqu x1, (1*4*4)(out); \
- vmovdqu x2, (2*4*4)(out); \
- vmovdqu x3, (3*4*4)(out);
-
-#define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
- transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
- \
- vpxor (0*4*4)(out), x0, x0; \
- vmovdqu x0, (0*4*4)(out); \
- vpxor (1*4*4)(out), x1, x1; \
- vmovdqu x1, (1*4*4)(out); \
- vpxor (2*4*4)(out), x2, x2; \
- vmovdqu x2, (2*4*4)(out); \
- vpxor (3*4*4)(out), x3, x3; \
- vmovdqu x3, (3*4*4)(out);
+#define write_blocks(x0, x1, x2, x3, t0, t1, t2) \
+ transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
.align 8
-.global __serpent_enc_blk_8way_avx
-.type __serpent_enc_blk_8way_avx,@function;
+.type __serpent_enc_blk8_avx,@function;
-__serpent_enc_blk_8way_avx:
+__serpent_enc_blk8_avx:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: bool, if true: xor output
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
+ * output:
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
*/
vpcmpeqd RNOT, RNOT, RNOT;
- leaq (4*4*4)(%rdx), %rax;
- read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+ read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
K2(RA, RB, RC, RD, RE, 0);
S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1);
@@ -630,38 +615,26 @@ __serpent_enc_blk_8way_avx:
S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31);
S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32);
- leaq (4*4*4)(%rsi), %rax;
-
- testb %cl, %cl;
- jnz __enc_xor8;
-
- write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-
- ret;
-
-__enc_xor8:
- xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+ write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
ret;
.align 8
-.global serpent_dec_blk_8way_avx
-.type serpent_dec_blk_8way_avx,@function;
+.type __serpent_dec_blk8_avx,@function;
-serpent_dec_blk_8way_avx:
+__serpent_dec_blk8_avx:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
+ * output:
+ * RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2: decrypted blocks
*/
vpcmpeqd RNOT, RNOT, RNOT;
- leaq (4*4*4)(%rdx), %rax;
- read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+ read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
K2(RA, RB, RC, RD, RE, 32);
SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31);
@@ -697,8 +670,85 @@ serpent_dec_blk_8way_avx:
SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1);
S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0);
- leaq (4*4*4)(%rsi), %rax;
- write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
- write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
+ write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+ write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
+
+ ret;
+
+.align 8
+.global serpent_ecb_enc_8way_avx
+.type serpent_ecb_enc_8way_avx,@function;
+
+serpent_ecb_enc_8way_avx:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __serpent_enc_blk8_avx;
+
+ store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ ret;
+
+.align 8
+.global serpent_ecb_dec_8way_avx
+.type serpent_ecb_dec_8way_avx,@function;
+
+serpent_ecb_dec_8way_avx:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __serpent_dec_blk8_avx;
+
+ store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+
+ ret;
+
+.align 8
+.global serpent_cbc_dec_8way_avx
+.type serpent_cbc_dec_8way_avx,@function;
+
+serpent_cbc_dec_8way_avx:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __serpent_dec_blk8_avx;
+
+ store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+
+ ret;
+
+.align 8
+.global serpent_ctr_8way_avx
+.type serpent_ctr_8way_avx,@function;
+
+serpent_ctr_8way_avx:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: iv (little endian, 128bit)
+ */
+
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RK0, RK1, RK2);
+
+ call __serpent_enc_blk8_avx;
+
+ store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
ret;
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 3f543a04cf1..52abaaf28e7 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -42,55 +42,24 @@
#include <asm/crypto/ablk_helper.h>
#include <asm/crypto/glue_helper.h>
-static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
-{
- u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
- unsigned int j;
-
- for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
- ivs[j] = src[j];
-
- serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
-
- for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
- u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
-}
-
-static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
+static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
be128 ctrblk;
- u128_to_be128(&ctrblk, iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblk, iv);
+ le128_inc(iv);
__serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
u128_xor(dst, src, (u128 *)&ctrblk);
}
-static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
- u128 *iv)
-{
- be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
- unsigned int i;
-
- for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
- if (dst != src)
- dst[i] = src[i];
-
- u128_to_be128(&ctrblks[i], iv);
- u128_inc(iv);
- }
-
- serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
-}
-
static const struct common_glue_ctx serpent_enc = {
.num_funcs = 2,
.fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
+ .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
@@ -103,7 +72,7 @@ static const struct common_glue_ctx serpent_ctr = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
+ .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
}, {
.num_blocks = 1,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
@@ -116,7 +85,7 @@ static const struct common_glue_ctx serpent_dec = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
+ .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
@@ -129,7 +98,7 @@ static const struct common_glue_ctx serpent_dec_cbc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
+ .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
}, {
.num_blocks = 1,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
@@ -193,7 +162,7 @@ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
+ serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
return;
}
@@ -210,7 +179,7 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
+ serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
return;
}
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 9107a9908c4..97a356ece24 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -59,19 +59,19 @@ static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
}
-static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
+static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
be128 ctrblk;
- u128_to_be128(&ctrblk, iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblk, iv);
+ le128_inc(iv);
__serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
u128_xor(dst, src, (u128 *)&ctrblk);
}
static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
- u128 *iv)
+ le128 *iv)
{
be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
unsigned int i;
@@ -80,8 +80,8 @@ static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
if (dst != src)
dst[i] = src[i];
- u128_to_be128(&ctrblks[i], iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblks[i], iv);
+ le128_inc(iv);
}
serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index 1585abb13dd..ebac16bfa83 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -23,7 +23,16 @@
*
*/
+#include "glue_helper-asm-avx.S"
+
.file "twofish-avx-x86_64-asm_64.S"
+
+.data
+.align 16
+
+.Lbswap128_mask:
+ .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
.text
/* structure of crypto context */
@@ -217,69 +226,45 @@
vpunpcklqdq x3, t2, x2; \
vpunpckhqdq x3, t2, x3;
-#define inpack_blocks(in, x0, x1, x2, x3, wkey, t0, t1, t2) \
- vpxor (0*4*4)(in), wkey, x0; \
- vpxor (1*4*4)(in), wkey, x1; \
- vpxor (2*4*4)(in), wkey, x2; \
- vpxor (3*4*4)(in), wkey, x3; \
+#define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
+ vpxor x0, wkey, x0; \
+ vpxor x1, wkey, x1; \
+ vpxor x2, wkey, x2; \
+ vpxor x3, wkey, x3; \
\
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
-#define outunpack_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \
- transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
- \
- vpxor x0, wkey, x0; \
- vmovdqu x0, (0*4*4)(out); \
- vpxor x1, wkey, x1; \
- vmovdqu x1, (1*4*4)(out); \
- vpxor x2, wkey, x2; \
- vmovdqu x2, (2*4*4)(out); \
- vpxor x3, wkey, x3; \
- vmovdqu x3, (3*4*4)(out);
-
-#define outunpack_xor_blocks(out, x0, x1, x2, x3, wkey, t0, t1, t2) \
+#define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
\
- vpxor x0, wkey, x0; \
- vpxor (0*4*4)(out), x0, x0; \
- vmovdqu x0, (0*4*4)(out); \
- vpxor x1, wkey, x1; \
- vpxor (1*4*4)(out), x1, x1; \
- vmovdqu x1, (1*4*4)(out); \
- vpxor x2, wkey, x2; \
- vpxor (2*4*4)(out), x2, x2; \
- vmovdqu x2, (2*4*4)(out); \
- vpxor x3, wkey, x3; \
- vpxor (3*4*4)(out), x3, x3; \
- vmovdqu x3, (3*4*4)(out);
+ vpxor x0, wkey, x0; \
+ vpxor x1, wkey, x1; \
+ vpxor x2, wkey, x2; \
+ vpxor x3, wkey, x3;
.align 8
-.global __twofish_enc_blk_8way
-.type __twofish_enc_blk_8way,@function;
+.type __twofish_enc_blk8,@function;
-__twofish_enc_blk_8way:
+__twofish_enc_blk8:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
- * %rcx: bool, if true: xor output
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
+ * output:
+ * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
*/
+ vmovdqu w(CTX), RK1;
+
pushq %rbp;
pushq %rbx;
pushq %rcx;
- vmovdqu w(CTX), RK1;
-
- leaq (4*4*4)(%rdx), %rax;
- inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
+ inpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
preload_rgi(RA1);
rotate_1l(RD1);
- inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
+ inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
rotate_1l(RD2);
- movq %rsi, %r11;
-
encrypt_cycle(0);
encrypt_cycle(1);
encrypt_cycle(2);
@@ -295,47 +280,33 @@ __twofish_enc_blk_8way:
popq %rbx;
popq %rbp;
- leaq (4*4*4)(%r11), %rax;
-
- testb %cl, %cl;
- jnz __enc_xor8;
-
- outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
- outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
-
- ret;
-
-__enc_xor8:
- outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
- outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
+ outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
+ outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
ret;
.align 8
-.global twofish_dec_blk_8way
-.type twofish_dec_blk_8way,@function;
+.type __twofish_dec_blk8,@function;
-twofish_dec_blk_8way:
+__twofish_dec_blk8:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst
- * %rdx: src
+ * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
+ * output:
+ * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
*/
+ vmovdqu (w+4*4)(CTX), RK1;
+
pushq %rbp;
pushq %rbx;
- vmovdqu (w+4*4)(CTX), RK1;
-
- leaq (4*4*4)(%rdx), %rax;
- inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
+ inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
preload_rgi(RC1);
rotate_1l(RA1);
- inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
+ inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
rotate_1l(RA2);
- movq %rsi, %r11;
-
decrypt_cycle(7);
decrypt_cycle(6);
decrypt_cycle(5);
@@ -350,8 +321,103 @@ twofish_dec_blk_8way:
popq %rbx;
popq %rbp;
- leaq (4*4*4)(%r11), %rax;
- outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
- outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
+ outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
+ outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
+
+ ret;
+
+.align 8
+.global twofish_ecb_enc_8way
+.type twofish_ecb_enc_8way,@function;
+
+twofish_ecb_enc_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ movq %rsi, %r11;
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __twofish_enc_blk8;
+
+ store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+ ret;
+
+.align 8
+.global twofish_ecb_dec_8way
+.type twofish_ecb_dec_8way,@function;
+
+twofish_ecb_dec_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ movq %rsi, %r11;
+
+ load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+ call __twofish_dec_blk8;
+
+ store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ ret;
+
+.align 8
+.global twofish_cbc_dec_8way
+.type twofish_cbc_dec_8way,@function;
+
+twofish_cbc_dec_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ pushq %r12;
+
+ movq %rsi, %r11;
+ movq %rdx, %r12;
+
+ load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+ call __twofish_dec_blk8;
+
+ store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ popq %r12;
+
+ ret;
+
+.align 8
+.global twofish_ctr_8way
+.type twofish_ctr_8way,@function;
+
+twofish_ctr_8way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: iv (little endian, 128bit)
+ */
+
+ pushq %r12;
+
+ movq %rsi, %r11;
+ movq %rdx, %r12;
+
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RX0, RX1, RY0);
+
+ call __twofish_enc_blk8;
+
+ store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+ popq %r12;
ret;
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index e7708b5442e..94ac91d26e4 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -45,66 +45,23 @@
#define TWOFISH_PARALLEL_BLOCKS 8
-static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __twofish_enc_blk_3way(ctx, dst, src, false);
-}
-
/* 8-way parallel cipher functions */
-asmlinkage void __twofish_enc_blk_8way(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void twofish_dec_blk_8way(struct twofish_ctx *ctx, u8 *dst,
+asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
-static inline void twofish_enc_blk_xway(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __twofish_enc_blk_8way(ctx, dst, src, false);
-}
-
-static inline void twofish_enc_blk_xway_xor(struct twofish_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __twofish_enc_blk_8way(ctx, dst, src, true);
-}
+asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
-static inline void twofish_dec_blk_xway(struct twofish_ctx *ctx, u8 *dst,
+static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
- twofish_dec_blk_8way(ctx, dst, src);
-}
-
-static void twofish_dec_blk_cbc_xway(void *ctx, u128 *dst, const u128 *src)
-{
- u128 ivs[TWOFISH_PARALLEL_BLOCKS - 1];
- unsigned int j;
-
- for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++)
- ivs[j] = src[j];
-
- twofish_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
-
- for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++)
- u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
+ __twofish_enc_blk_3way(ctx, dst, src, false);
}
-static void twofish_enc_blk_ctr_xway(void *ctx, u128 *dst, const u128 *src,
- u128 *iv)
-{
- be128 ctrblks[TWOFISH_PARALLEL_BLOCKS];
- unsigned int i;
-
- for (i = 0; i < TWOFISH_PARALLEL_BLOCKS; i++) {
- if (dst != src)
- dst[i] = src[i];
-
- u128_to_be128(&ctrblks[i], iv);
- u128_inc(iv);
- }
-
- twofish_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks);
-}
static const struct common_glue_ctx twofish_enc = {
.num_funcs = 3,
@@ -112,7 +69,7 @@ static const struct common_glue_ctx twofish_enc = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_xway) }
+ .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) }
}, {
.num_blocks = 3,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
@@ -128,7 +85,7 @@ static const struct common_glue_ctx twofish_ctr = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_xway) }
+ .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) }
}, {
.num_blocks = 3,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) }
@@ -144,7 +101,7 @@ static const struct common_glue_ctx twofish_dec = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_xway) }
+ .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) }
}, {
.num_blocks = 3,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
@@ -160,7 +117,7 @@ static const struct common_glue_ctx twofish_dec_cbc = {
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_xway) }
+ .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) }
}, {
.num_blocks = 3,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
@@ -227,7 +184,7 @@ static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_enc_blk_xway(ctx->ctx, srcdst, srcdst);
+ twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
return;
}
@@ -249,7 +206,7 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_dec_blk_xway(ctx->ctx, srcdst, srcdst);
+ twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
return;
}
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index aa3eb358b7e..13e63b3e1df 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -62,15 +62,15 @@ void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src)
}
EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way);
-void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
+void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
be128 ctrblk;
if (dst != src)
*dst = *src;
- u128_to_be128(&ctrblk, iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblk, iv);
+ le128_inc(iv);
twofish_enc_blk(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
u128_xor(dst, dst, (u128 *)&ctrblk);
@@ -78,7 +78,7 @@ void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, u128 *iv)
EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr);
void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
- u128 *iv)
+ le128 *iv)
{
be128 ctrblks[3];
@@ -88,12 +88,12 @@ void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
dst[2] = src[2];
}
- u128_to_be128(&ctrblks[0], iv);
- u128_inc(iv);
- u128_to_be128(&ctrblks[1], iv);
- u128_inc(iv);
- u128_to_be128(&ctrblks[2], iv);
- u128_inc(iv);
+ le128_to_be128(&ctrblks[0], iv);
+ le128_inc(iv);
+ le128_to_be128(&ctrblks[1], iv);
+ le128_inc(iv);
+ le128_to_be128(&ctrblks[2], iv);
+ le128_inc(iv);
twofish_enc_blk_xor_3way(ctx, (u8 *)dst, (u8 *)ctrblks);
}
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index efc6a958b71..a1daf4a6500 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -136,52 +136,6 @@ asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
return sigsuspend(&blocked);
}
-asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
- stack_ia32_t __user *uoss_ptr,
- struct pt_regs *regs)
-{
- stack_t uss, uoss;
- int ret, err = 0;
- mm_segment_t seg;
-
- if (uss_ptr) {
- u32 ptr;
-
- memset(&uss, 0, sizeof(stack_t));
- if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
- return -EFAULT;
-
- get_user_try {
- get_user_ex(ptr, &uss_ptr->ss_sp);
- get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
- get_user_ex(uss.ss_size, &uss_ptr->ss_size);
- } get_user_catch(err);
-
- if (err)
- return -EFAULT;
- uss.ss_sp = compat_ptr(ptr);
- }
- seg = get_fs();
- set_fs(KERNEL_DS);
- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
- (stack_t __force __user *) &uoss, regs->sp);
- set_fs(seg);
- if (ret >= 0 && uoss_ptr) {
- if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
- return -EFAULT;
-
- put_user_try {
- put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
- put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
- put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
- } put_user_catch(err);
-
- if (err)
- ret = -EFAULT;
- }
- return ret;
-}
-
/*
* Do a signal return; undo the signal stack.
*/
@@ -292,7 +246,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
struct rt_sigframe_ia32 __user *frame;
sigset_t set;
unsigned int ax;
- struct pt_regs tregs;
frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4);
@@ -306,8 +259,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
goto badframe;
- tregs = *regs;
- if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
+ if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
return ax;
@@ -515,10 +467,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- put_user_ex(sas_ss_flags(regs->sp),
- &frame->uc.uc_stack.ss_flags);
- put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 32e6f05ddaa..102ff7cb3e4 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -464,7 +464,6 @@ GLOBAL(\label)
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
- PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
PTREGSCALL stub32_execve, compat_sys_execve, %rcx
PTREGSCALL stub32_fork, sys_fork, %rdi
PTREGSCALL stub32_vfork, sys_vfork, %rdi
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 79fd8a3418f..7f669853317 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -1,30 +1,4 @@
-include include/asm-generic/Kbuild.asm
-header-y += boot.h
-header-y += bootparam.h
-header-y += debugreg.h
-header-y += e820.h
-header-y += hw_breakpoint.h
-header-y += hyperv.h
-header-y += ist.h
-header-y += ldt.h
-header-y += mce.h
-header-y += msr-index.h
-header-y += msr.h
-header-y += mtrr.h
-header-y += perf_regs.h
-header-y += posix_types_32.h
-header-y += posix_types_64.h
-header-y += posix_types_x32.h
-header-y += prctl.h
-header-y += processor-flags.h
-header-y += ptrace-abi.h
-header-y += sigcontext32.h
-header-y += svm.h
-header-y += ucontext.h
-header-y += vm86.h
-header-y += vmx.h
-header-y += vsyscall.h
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index b13fe63bdc5..4fa687a47a6 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -1,14 +1,9 @@
#ifndef _ASM_X86_BOOT_H
#define _ASM_X86_BOOT_H
-/* Internal svga startup constants */
-#define NORMAL_VGA 0xffff /* 80x25 mode */
-#define EXTENDED_VGA 0xfffe /* 80x50 mode */
-#define ASK_VGA 0xfffd /* ask for it at bootup */
-
-#ifdef __KERNEL__
#include <asm/pgtable_types.h>
+#include <uapi/asm/boot.h>
/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
@@ -42,6 +37,4 @@
#define BOOT_STACK_SIZE 0x1000
#endif
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index 0bdbbb3b9ce..16a57f4ed64 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -8,6 +8,7 @@
#define VCLOCK_NONE 0 /* No vDSO clock available. */
#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
+#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
struct arch_clocksource_data {
int vclock_mode;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index da40b1e2228..2d9075e863a 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -202,6 +202,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
new file mode 100644
index 00000000000..98038add801
--- /dev/null
+++ b/arch/x86/include/asm/crypto/camellia.h
@@ -0,0 +1,82 @@
+#ifndef ASM_X86_CAMELLIA_H
+#define ASM_X86_CAMELLIA_H
+
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+
+#define CAMELLIA_MIN_KEY_SIZE 16
+#define CAMELLIA_MAX_KEY_SIZE 32
+#define CAMELLIA_BLOCK_SIZE 16
+#define CAMELLIA_TABLE_BYTE_LEN 272
+#define CAMELLIA_PARALLEL_BLOCKS 2
+
+struct camellia_ctx {
+ u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
+ u32 key_length;
+};
+
+struct camellia_lrw_ctx {
+ struct lrw_table_ctx lrw_table;
+ struct camellia_ctx camellia_ctx;
+};
+
+struct camellia_xts_ctx {
+ struct camellia_ctx tweak_ctx;
+ struct camellia_ctx crypt_ctx;
+};
+
+extern int __camellia_setkey(struct camellia_ctx *cctx,
+ const unsigned char *key,
+ unsigned int key_len, u32 *flags);
+
+extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
+
+extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+/* regular block cipher functions */
+asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* 2-way parallel cipher functions */
+asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk(ctx, dst, src, false);
+}
+
+static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk(ctx, dst, src, true);
+}
+
+static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk_2way(ctx, dst, src, false);
+}
+
+static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk_2way(ctx, dst, src, true);
+}
+
+/* glue helpers */
+extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
+extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+#endif /* ASM_X86_CAMELLIA_H */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 3e408bddc96..e2d65b061d2 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -13,7 +13,7 @@
typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
- u128 *iv);
+ le128 *iv);
#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
@@ -71,23 +71,29 @@ static inline void glue_fpu_end(bool fpu_enabled)
kernel_fpu_end();
}
-static inline void u128_to_be128(be128 *dst, const u128 *src)
+static inline void le128_to_be128(be128 *dst, const le128 *src)
{
- dst->a = cpu_to_be64(src->a);
- dst->b = cpu_to_be64(src->b);
+ dst->a = cpu_to_be64(le64_to_cpu(src->a));
+ dst->b = cpu_to_be64(le64_to_cpu(src->b));
}
-static inline void be128_to_u128(u128 *dst, const be128 *src)
+static inline void be128_to_le128(le128 *dst, const be128 *src)
{
- dst->a = be64_to_cpu(src->a);
- dst->b = be64_to_cpu(src->b);
+ dst->a = cpu_to_le64(be64_to_cpu(src->a));
+ dst->b = cpu_to_le64(be64_to_cpu(src->b));
}
-static inline void u128_inc(u128 *i)
+static inline void le128_inc(le128 *i)
{
- i->b++;
- if (!i->b)
- i->a++;
+ u64 a = le64_to_cpu(i->a);
+ u64 b = le64_to_cpu(i->b);
+
+ b++;
+ if (!b)
+ a++;
+
+ i->a = cpu_to_le64(a);
+ i->b = cpu_to_le64(b);
}
extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
index 432deedd294..0da1d3e2a55 100644
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
@@ -6,27 +6,14 @@
#define SERPENT_PARALLEL_BLOCKS 8
-asmlinkage void __serpent_enc_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, bool xor);
-asmlinkage void serpent_dec_blk_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
const u8 *src);
-static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __serpent_enc_blk_8way_avx(ctx, dst, src, false);
-}
-
-static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- __serpent_enc_blk_8way_avx(ctx, dst, src, true);
-}
-
-static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- serpent_dec_blk_8way_avx(ctx, dst, src);
-}
+asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
#endif
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
index 9d2c514bd5f..878c51ceebb 100644
--- a/arch/x86/include/asm/crypto/twofish.h
+++ b/arch/x86/include/asm/crypto/twofish.h
@@ -31,9 +31,9 @@ asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
/* helpers from twofish_x86_64-3way module */
extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
- u128 *iv);
+ le128 *iv);
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
- u128 *iv);
+ le128 *iv);
extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 2d91580bf22..4b528a970bd 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -2,83 +2,8 @@
#define _ASM_X86_DEBUGREG_H
-/* Indicate the register numbers for a number of the specific
- debug registers. Registers 0-3 contain the addresses we wish to trap on */
-#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
-#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
-
-#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
-#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
-
-/* Define a few things for the status register. We can use this to determine
- which debugging register was responsible for the trap. The other bits
- are either reserved or not of interest to us. */
-
-/* Define reserved bits in DR6 which are always set to 1 */
-#define DR6_RESERVED (0xFFFF0FF0)
-
-#define DR_TRAP0 (0x1) /* db0 */
-#define DR_TRAP1 (0x2) /* db1 */
-#define DR_TRAP2 (0x4) /* db2 */
-#define DR_TRAP3 (0x8) /* db3 */
-#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
-
-#define DR_STEP (0x4000) /* single-step */
-#define DR_SWITCH (0x8000) /* task switch */
-
-/* Now define a bunch of things for manipulating the control register.
- The top two bytes of the control register consist of 4 fields of 4
- bits - each field corresponds to one of the four debug registers,
- and indicates what types of access we trap on, and how large the data
- field is that we are looking at */
-
-#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
-#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
-
-#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
-#define DR_RW_WRITE (0x1)
-#define DR_RW_READ (0x3)
-
-#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
-#define DR_LEN_2 (0x4)
-#define DR_LEN_4 (0xC)
-#define DR_LEN_8 (0x8)
-
-/* The low byte to the control register determine which registers are
- enabled. There are 4 fields of two bits. One bit is "local", meaning
- that the processor will reset the bit after a task switch and the other
- is global meaning that we have to explicitly reset the bit. With linux,
- you can use either one, since we explicitly zero the register when we enter
- kernel mode. */
-
-#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
-#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
-#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
-#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
-#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
-
-#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
-#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
-
-/* The second byte to the control register has a few special things.
- We can slow the instruction pipeline for instructions coming via the
- gdt or the ldt if we want to. I am not sure why this is an advantage */
-
-#ifdef __i386__
-#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
-#else
-#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
-#endif
-
-#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
-#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
-
-/*
- * HW breakpoint additions
- */
-#ifdef __KERNEL__
-
#include <linux/bug.h>
+#include <uapi/asm/debugreg.h>
DECLARE_PER_CPU(unsigned long, cpu_dr7);
@@ -190,6 +115,4 @@ static inline void debug_stack_usage_dec(void) { }
#endif /* X86_64 */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index f7b4c7903e7..808dae63eee 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -47,6 +47,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dma_addr);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 37782566af2..cccd07fa5e3 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,81 +1,14 @@
#ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H
-#define E820MAP 0x2d0 /* our map */
-#define E820MAX 128 /* number of entries in E820MAP */
-/*
- * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
- * constrained space in the zeropage. If we have more nodes than
- * that, and if we've booted off EFI firmware, then the EFI tables
- * passed us from the EFI firmware can list more nodes. Size our
- * internal memory map tables to have room for these additional
- * nodes, based on up to three entries per node for which the
- * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
- * plus E820MAX, allowing space for the possible duplicate E820
- * entries that might need room in the same arrays, prior to the
- * call to sanitize_e820_map() to remove duplicates. The allowance
- * of three memory map entries per node is "enough" entries for
- * the initial hardware platform motivating this mechanism to make
- * use of additional EFI map entries. Future platforms may want
- * to allow more than three entries per node or otherwise refine
- * this size.
- */
-
-/*
- * Odd: 'make headers_check' complains about numa.h if I try
- * to collapse the next two #ifdef lines to a single line:
- * #if defined(__KERNEL__) && defined(CONFIG_EFI)
- */
-#ifdef __KERNEL__
#ifdef CONFIG_EFI
#include <linux/numa.h>
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
#else /* ! CONFIG_EFI */
#define E820_X_MAX E820MAX
#endif
-#else /* ! __KERNEL__ */
-#define E820_X_MAX E820MAX
-#endif
-
-#define E820NR 0x1e8 /* # entries in E820MAP */
-
-#define E820_RAM 1
-#define E820_RESERVED 2
-#define E820_ACPI 3
-#define E820_NVS 4
-#define E820_UNUSABLE 5
-
-/*
- * reserved RAM used by kernel itself
- * if CONFIG_INTEL_TXT is enabled, memory of this type will be
- * included in the S3 integrity calculation and so should not include
- * any memory that BIOS might alter over the S3 transition
- */
-#define E820_RESERVED_KERN 128
-
+#include <uapi/asm/e820.h>
#ifndef __ASSEMBLY__
-#include <linux/types.h>
-struct e820entry {
- __u64 addr; /* start of memory segment */
- __u64 size; /* size of memory segment */
- __u32 type; /* type of memory segment */
-} __attribute__((packed));
-
-struct e820map {
- __u32 nr_map;
- struct e820entry map[E820_X_MAX];
-};
-
-#define ISA_START_ADDRESS 0xa0000
-#define ISA_END_ADDRESS 0x100000
-
-#define BIOS_BEGIN 0x000a0000
-#define BIOS_END 0x00100000
-
-#define BIOS_ROM_BASE 0xffe00000
-#define BIOS_ROM_END 0xffffffff
-
-#ifdef __KERNEL__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;
extern struct e820map e820_saved;
@@ -137,13 +70,8 @@ static inline bool is_ISA_range(u64 s, u64 e)
return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
}
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
#include <linux/ioport.h>
#define HIGH_MEMORY (1024*1024)
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_E820_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4da3c0c4c97..a09c2857106 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -19,6 +19,7 @@
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
+#include <asm/pvclock.h>
#ifdef CONFIG_X86_32
#include <linux/threads.h>
#include <asm/kmap_types.h>
@@ -81,6 +82,10 @@ enum fixed_addresses {
VVAR_PAGE,
VSYSCALL_HPET,
#endif
+#ifdef CONFIG_PARAVIRT_CLOCK
+ PVCLOCK_FIXMAP_BEGIN,
+ PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
+#endif
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 824ca07860d..ef1c4d2d41e 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -1,7 +1,8 @@
#ifndef _I386_HW_BREAKPOINT_H
#define _I386_HW_BREAKPOINT_H
-#ifdef __KERNEL__
+#include <uapi/asm/hw_breakpoint.h>
+
#define __ARCH_HW_BREAKPOINT_H
/*
@@ -71,6 +72,4 @@ extern int arch_bp_generic_fields(int x86_len, int x86_type,
extern struct pmu perf_ops_bp;
-#endif /* __KERNEL__ */
#endif /* _I386_HW_BREAKPOINT_H */
-
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index e6232773ce4..4c6da2e4bb1 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -29,16 +29,10 @@ struct old_sigaction32 {
unsigned int sa_restorer; /* Another 32 bit pointer */
};
-typedef struct sigaltstack_ia32 {
- unsigned int ss_sp;
- int ss_flags;
- unsigned int ss_size;
-} stack_ia32_t;
-
struct ucontext_ia32 {
unsigned int uc_flags;
unsigned int uc_link;
- stack_ia32_t uc_stack;
+ compat_stack_t uc_stack;
struct sigcontext_ia32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
@@ -46,7 +40,7 @@ struct ucontext_ia32 {
struct ucontext_x32 {
unsigned int uc_flags;
unsigned int uc_link;
- stack_ia32_t uc_stack;
+ compat_stack_t uc_stack;
unsigned int uc__pad0; /* needed for alignment */
struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
compat_sigset_t uc_sigmask; /* mask last for extensibility */
diff --git a/arch/x86/include/asm/ist.h b/arch/x86/include/asm/ist.h
index 7e5dff1de0e..c9803f1a203 100644
--- a/arch/x86/include/asm/ist.h
+++ b/arch/x86/include/asm/ist.h
@@ -1,6 +1,3 @@
-#ifndef _ASM_X86_IST_H
-#define _ASM_X86_IST_H
-
/*
* Include file for the interface to IST BIOS
* Copyright 2002 Andy Grover <andrew.grover@intel.com>
@@ -15,20 +12,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
+#ifndef _ASM_X86_IST_H
+#define _ASM_X86_IST_H
+#include <uapi/asm/ist.h>
-#include <linux/types.h>
-
-struct ist_info {
- __u32 signature;
- __u32 command;
- __u32 event;
- __u32 perf_level;
-};
-
-#ifdef __KERNEL__
extern struct ist_info ist_info;
-#endif /* __KERNEL__ */
#endif /* _ASM_X86_IST_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 317ff1703d0..6080d2694ba 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -163,6 +163,9 @@ struct kimage_arch {
};
#endif
+typedef void crash_vmclear_fn(void);
+extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_KEXEC_H */
diff --git a/arch/x86/include/asm/kvm_guest.h b/arch/x86/include/asm/kvm_guest.h
new file mode 100644
index 00000000000..a92b1763c41
--- /dev/null
+++ b/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_KVM_GUEST_H
+#define _ASM_X86_KVM_GUEST_H
+
+int kvm_setup_vsyscall_timeinfo(void);
+
+#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b2e11f45243..dc87b65e9c3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -22,6 +22,8 @@
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
#include <linux/perf_event.h>
+#include <linux/pvclock_gtod.h>
+#include <linux/clocksource.h>
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
@@ -442,6 +444,7 @@ struct kvm_vcpu_arch {
s8 virtual_tsc_shift;
u32 virtual_tsc_mult;
u32 virtual_tsc_khz;
+ s64 ia32_tsc_adjust_msr;
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -559,6 +562,12 @@ struct kvm_arch {
u64 cur_tsc_write;
u64 cur_tsc_offset;
u8 cur_tsc_generation;
+ int nr_vcpus_matched_tsc;
+
+ spinlock_t pvclock_gtod_sync_lock;
+ bool use_master_clock;
+ u64 master_kernel_ns;
+ cycle_t master_cycle_now;
struct kvm_xen_hvm_config xen_hvm_config;
@@ -612,6 +621,12 @@ struct kvm_vcpu_stat {
struct x86_instruction_info;
+struct msr_data {
+ bool host_initiated;
+ u32 index;
+ u64 data;
+};
+
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
@@ -634,7 +649,7 @@ struct kvm_x86_ops {
void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
- int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+ int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
@@ -697,10 +712,11 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void);
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
+ u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
- u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu);
+ u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
@@ -785,7 +801,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
void kvm_enable_efer_bits(u64);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
struct x86_emulate_ctxt;
@@ -812,7 +828,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index eb3e9d85e1f..5ed1f16187b 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -1,103 +1,8 @@
#ifndef _ASM_X86_KVM_PARA_H
#define _ASM_X86_KVM_PARA_H
-#include <linux/types.h>
-#include <asm/hyperv.h>
-
-/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
- * should be used to determine that a VM is running under KVM.
- */
-#define KVM_CPUID_SIGNATURE 0x40000000
-
-/* This CPUID returns a feature bitmap in eax. Before enabling a particular
- * paravirtualization, the appropriate feature bit should be checked.
- */
-#define KVM_CPUID_FEATURES 0x40000001
-#define KVM_FEATURE_CLOCKSOURCE 0
-#define KVM_FEATURE_NOP_IO_DELAY 1
-#define KVM_FEATURE_MMU_OP 2
-/* This indicates that the new set of kvmclock msrs
- * are available. The use of 0x11 and 0x12 is deprecated
- */
-#define KVM_FEATURE_CLOCKSOURCE2 3
-#define KVM_FEATURE_ASYNC_PF 4
-#define KVM_FEATURE_STEAL_TIME 5
-#define KVM_FEATURE_PV_EOI 6
-
-/* The last 8 bits are used to indicate how to interpret the flags field
- * in pvclock structure. If no bits are set, all flags are ignored.
- */
-#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
-
-#define MSR_KVM_WALL_CLOCK 0x11
-#define MSR_KVM_SYSTEM_TIME 0x12
-
-#define KVM_MSR_ENABLED 1
-/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
-#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
-#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
-#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
-#define MSR_KVM_STEAL_TIME 0x4b564d03
-#define MSR_KVM_PV_EOI_EN 0x4b564d04
-
-struct kvm_steal_time {
- __u64 steal;
- __u32 version;
- __u32 flags;
- __u32 pad[12];
-};
-
-#define KVM_STEAL_ALIGNMENT_BITS 5
-#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
-#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
-
-#define KVM_MAX_MMU_OP_BATCH 32
-
-#define KVM_ASYNC_PF_ENABLED (1 << 0)
-#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
-
-/* Operations for KVM_HC_MMU_OP */
-#define KVM_MMU_OP_WRITE_PTE 1
-#define KVM_MMU_OP_FLUSH_TLB 2
-#define KVM_MMU_OP_RELEASE_PT 3
-
-/* Payload for KVM_HC_MMU_OP */
-struct kvm_mmu_op_header {
- __u32 op;
- __u32 pad;
-};
-
-struct kvm_mmu_op_write_pte {
- struct kvm_mmu_op_header header;
- __u64 pte_phys;
- __u64 pte_val;
-};
-
-struct kvm_mmu_op_flush_tlb {
- struct kvm_mmu_op_header header;
-};
-
-struct kvm_mmu_op_release_pt {
- struct kvm_mmu_op_header header;
- __u64 pt_phys;
-};
-
-#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
-#define KVM_PV_REASON_PAGE_READY 2
-
-struct kvm_vcpu_pv_apf_data {
- __u32 reason;
- __u8 pad[60];
- __u32 enabled;
-};
-
-#define KVM_PV_EOI_BIT 0
-#define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT)
-#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
-#define KVM_PV_EOI_DISABLED 0x0
-
-#ifdef __KERNEL__
#include <asm/processor.h>
+#include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt);
@@ -228,6 +133,4 @@ static inline void kvm_disable_steal_time(void)
}
#endif
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 54d73b1f00a..ecdfee60ee4 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -1,124 +1,25 @@
#ifndef _ASM_X86_MCE_H
#define _ASM_X86_MCE_H
-#include <linux/types.h>
-#include <asm/ioctls.h>
-
-/*
- * Machine Check support for x86
- */
-
-/* MCG_CAP register defines */
-#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
-#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
-#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
-#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
-#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
-#define MCG_EXT_CNT_SHIFT 16
-#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
-#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
-
-/* MCG_STATUS register defines */
-#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
-#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
-#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
-
-/* MCi_STATUS register defines */
-#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
-#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
-#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
-#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
-#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
-#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
-#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
-#define MCI_STATUS_AR (1ULL<<55) /* Action required */
-#define MCACOD 0xffff /* MCA Error Code */
-
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
-#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
-#define MCACOD_DATA 0x0134 /* Data Load */
-#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
-
-/* MCi_MISC register defines */
-#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
-#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
-#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
-#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
-#define MCI_MISC_ADDR_PHYS 2 /* physical address */
-#define MCI_MISC_ADDR_MEM 3 /* memory address */
-#define MCI_MISC_ADDR_GENERIC 7 /* generic */
-
-/* CTL2 register defines */
-#define MCI_CTL2_CMCI_EN (1ULL << 30)
-#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
-
-#define MCJ_CTX_MASK 3
-#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
-#define MCJ_CTX_RANDOM 0 /* inject context: random */
-#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
-#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
-#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
-#define MCJ_EXCEPTION 0x8 /* raise as exception */
-#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
-
-/* Fields are zero when not available */
-struct mce {
- __u64 status;
- __u64 misc;
- __u64 addr;
- __u64 mcgstatus;
- __u64 ip;
- __u64 tsc; /* cpu time stamp counter */
- __u64 time; /* wall time_t when error was detected */
- __u8 cpuvendor; /* cpu vendor as encoded in system.h */
- __u8 inject_flags; /* software inject flags */
- __u16 pad;
- __u32 cpuid; /* CPUID 1 EAX */
- __u8 cs; /* code segment */
- __u8 bank; /* machine check bank */
- __u8 cpu; /* cpu number; obsolete; use extcpu now */
- __u8 finished; /* entry is valid */
- __u32 extcpu; /* linux cpu number that detected the error */
- __u32 socketid; /* CPU socket ID */
- __u32 apicid; /* CPU initial apic ID */
- __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
-};
-
-/*
- * This structure contains all data related to the MCE log. Also
- * carries a signature to make it easier to find from external
- * debugging tools. Each entry is only valid when its finished flag
- * is set.
- */
-
-#define MCE_LOG_LEN 32
-
-struct mce_log {
- char signature[12]; /* "MACHINECHECK" */
- unsigned len; /* = MCE_LOG_LEN */
- unsigned next;
- unsigned flags;
- unsigned recordlen; /* length of struct mce */
- struct mce entry[MCE_LOG_LEN];
+#include <uapi/asm/mce.h>
+
+
+struct mca_config {
+ bool dont_log_ce;
+ bool cmci_disabled;
+ bool ignore_ce;
+ bool disabled;
+ bool ser;
+ bool bios_cmci_threshold;
+ u8 banks;
+ s8 bootlog;
+ int tolerant;
+ int monarch_timeout;
+ int panic_timeout;
+ u32 rip_msr;
};
-#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
-
-#define MCE_LOG_SIGNATURE "MACHINECHECK"
-
-#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
-#define MCE_GET_LOG_LEN _IOR('M', 2, int)
-#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
-
-/* Software defined banks */
-#define MCE_EXTENDED_BANK 128
-#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
-#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
-
-#ifdef __KERNEL__
+extern struct mca_config mca_cfg;
extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);
@@ -126,7 +27,6 @@ extern void mce_unregister_decode_chain(struct notifier_block *nb);
#include <linux/init.h>
#include <linux/atomic.h>
-extern int mce_disabled;
extern int mce_p5_enabled;
#ifdef CONFIG_X86_MCE
@@ -159,9 +59,6 @@ DECLARE_PER_CPU(struct device *, mce_device);
#define MAX_NR_BANKS 32
#ifdef CONFIG_X86_MCE_INTEL
-extern int mce_cmci_disabled;
-extern int mce_ignore_ce;
-extern int mce_bios_cmci_threshold;
void mce_intel_feature_init(struct cpuinfo_x86 *c);
void cmci_clear(void);
void cmci_reenable(void);
@@ -247,5 +144,4 @@ struct cper_sec_mem_err;
extern void apei_mce_report_mem_error(int corrected,
struct cper_sec_mem_err *mem_err);
-#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 813ed103f45..9264802e282 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -1,18 +1,10 @@
#ifndef _ASM_X86_MSR_H
#define _ASM_X86_MSR_H
-#include <asm/msr-index.h>
+#include <uapi/asm/msr.h>
#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
-#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
-
-#ifdef __KERNEL__
-
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/cpumask.h>
@@ -271,6 +263,5 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
return wrmsr_safe_regs(regs);
}
#endif /* CONFIG_SMP */
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 7e3f17f92c6..e235582f993 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -23,97 +23,8 @@
#ifndef _ASM_X86_MTRR_H
#define _ASM_X86_MTRR_H
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <linux/errno.h>
+#include <uapi/asm/mtrr.h>
-#define MTRR_IOCTL_BASE 'M'
-
-/* Warning: this structure has a different order from i386
- on x86-64. The 32bit emulation code takes care of that.
- But you need to use this for 64bit, otherwise your X server
- will break. */
-
-#ifdef __i386__
-struct mtrr_sentry {
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int type; /* Type of region */
-};
-
-struct mtrr_gentry {
- unsigned int regnum; /* Register number */
- unsigned long base; /* Base address */
- unsigned int size; /* Size of region */
- unsigned int type; /* Type of region */
-};
-
-#else /* __i386__ */
-
-struct mtrr_sentry {
- __u64 base; /* Base address */
- __u32 size; /* Size of region */
- __u32 type; /* Type of region */
-};
-
-struct mtrr_gentry {
- __u64 base; /* Base address */
- __u32 size; /* Size of region */
- __u32 regnum; /* Register number */
- __u32 type; /* Type of region */
- __u32 _pad; /* Unused */
-};
-
-#endif /* !__i386__ */
-
-struct mtrr_var_range {
- __u32 base_lo;
- __u32 base_hi;
- __u32 mask_lo;
- __u32 mask_hi;
-};
-
-/* In the Intel processor's MTRR interface, the MTRR type is always held in
- an 8 bit field: */
-typedef __u8 mtrr_type;
-
-#define MTRR_NUM_FIXED_RANGES 88
-#define MTRR_MAX_VAR_RANGES 256
-
-struct mtrr_state_type {
- struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
- mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
- unsigned char enabled;
- unsigned char have_fixed;
- mtrr_type def_type;
-};
-
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
-/* These are the various ioctls */
-#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
-#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
-#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
-#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
-#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
-#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
-#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
-#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
-#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
-#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
-
-/* These are the region types */
-#define MTRR_TYPE_UNCACHABLE 0
-#define MTRR_TYPE_WRCOMB 1
-/*#define MTRR_TYPE_ 2*/
-/*#define MTRR_TYPE_ 3*/
-#define MTRR_TYPE_WRTHROUGH 4
-#define MTRR_TYPE_WRPROT 5
-#define MTRR_TYPE_WRBACK 6
-#define MTRR_NUM_TYPES 7
-
-#ifdef __KERNEL__
/* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR
@@ -208,6 +119,4 @@ struct mtrr_gentry32 {
_IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
#endif /* CONFIG_COMPAT */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_MTRR_H */
diff --git a/arch/x86/include/asm/numachip/numachip.h b/arch/x86/include/asm/numachip/numachip.h
new file mode 100644
index 00000000000..1c6f7f6212c
--- /dev/null
+++ b/arch/x86/include/asm/numachip/numachip.h
@@ -0,0 +1,19 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-specific header file
+ *
+ * Copyright (C) 2012 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#ifndef _ASM_X86_NUMACHIP_NUMACHIP_H
+#define _ASM_X86_NUMACHIP_NUMACHIP_H
+
+extern int __init pci_numachip_init(void);
+
+#endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a0facf3908d..5edd1742cfd 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -528,7 +528,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
@@ -539,7 +538,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
native_pmd_val(pmd));
}
-#endif
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 6e41b934392..dba7805176b 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -171,4 +171,16 @@ cpumask_of_pcibus(const struct pci_bus *bus)
}
#endif
+struct pci_setup_rom {
+ struct setup_data data;
+ uint16_t vendor;
+ uint16_t devid;
+ uint64_t pcilen;
+ unsigned long segment;
+ unsigned long bus;
+ unsigned long device;
+ unsigned long function;
+ uint8_t romdata[0];
+};
+
#endif /* _ASM_X86_PCI_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a1f780d45f7..5199db2923d 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -404,7 +404,14 @@ static inline int pte_same(pte_t a, pte_t b)
static inline int pte_present(pte_t a)
{
- return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+ _PAGE_NUMA);
+}
+
+#define pte_accessible pte_accessible
+static inline int pte_accessible(pte_t a)
+{
+ return pte_flags(a) & _PAGE_PRESENT;
}
static inline int pte_hidden(pte_t pte)
@@ -420,7 +427,8 @@ static inline int pmd_present(pmd_t pmd)
* the _PAGE_PSE flag will remain set at all times while the
* _PAGE_PRESENT bit is clear).
*/
- return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
+ _PAGE_NUMA);
}
static inline int pmd_none(pmd_t pmd)
@@ -479,6 +487,11 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
static inline int pmd_bad(pmd_t pmd)
{
+#ifdef CONFIG_NUMA_BALANCING
+ /* pmd_numa check */
+ if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
+ return 0;
+#endif
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index ec8a1fc9505..3c32db8c539 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -64,6 +64,26 @@
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+/*
+ * _PAGE_NUMA indicates that this page will trigger a numa hinting
+ * minor page fault to gather numa placement statistics (see
+ * pte_numa()). The bit picked (8) is within the range between
+ * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't
+ * require changes to the swp entry format because that bit is always
+ * zero when the pte is not present.
+ *
+ * The bit picked must be always zero when the pmd is present and not
+ * present, so that we don't lose information when we set it while
+ * atomically clearing the present bit.
+ *
+ * Because we shared the same bit (8) with _PAGE_PROTNONE this can be
+ * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE
+ * couldn't reach, like handle_mm_fault() (see access_error in
+ * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for
+ * handle_mm_fault() to be invoked).
+ */
+#define _PAGE_NUMA _PAGE_PROTNONE
+
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index bad3665c25f..f565f6dd59d 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -1,15 +1,5 @@
-#ifdef __KERNEL__
# ifdef CONFIG_X86_32
# include <asm/posix_types_32.h>
# else
# include <asm/posix_types_64.h>
# endif
-#else
-# ifdef __i386__
-# include <asm/posix_types_32.h>
-# elif defined(__ILP32__)
-# include <asm/posix_types_x32.h>
-# else
-# include <asm/posix_types_64.h>
-# endif
-#endif
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 680cf09ed10..39fb618e221 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -1,106 +1,11 @@
#ifndef _ASM_X86_PROCESSOR_FLAGS_H
#define _ASM_X86_PROCESSOR_FLAGS_H
-/* Various flags defined: can be included from assembler. */
-/*
- * EFLAGS bits
- */
-#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
-#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
-#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
-#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
-#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
-#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
-#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
-#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
-#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
-#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
-#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
-#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
-#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
-#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
-#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
-#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
-#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+#include <uapi/asm/processor-flags.h>
-/*
- * Basic CPU control in CR0
- */
-#define X86_CR0_PE 0x00000001 /* Protection Enable */
-#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
-#define X86_CR0_EM 0x00000004 /* Emulation */
-#define X86_CR0_TS 0x00000008 /* Task Switched */
-#define X86_CR0_ET 0x00000010 /* Extension Type */
-#define X86_CR0_NE 0x00000020 /* Numeric Error */
-#define X86_CR0_WP 0x00010000 /* Write Protect */
-#define X86_CR0_AM 0x00040000 /* Alignment Mask */
-#define X86_CR0_NW 0x20000000 /* Not Write-through */
-#define X86_CR0_CD 0x40000000 /* Cache Disable */
-#define X86_CR0_PG 0x80000000 /* Paging */
-
-/*
- * Paging options in CR3
- */
-#define X86_CR3_PWT 0x00000008 /* Page Write Through */
-#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
-#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */
-
-/*
- * Intel CPU features in CR4
- */
-#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
-#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
-#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
-#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
-#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
-#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
-#define X86_CR4_MCE 0x00000040 /* Machine check enable */
-#define X86_CR4_PGE 0x00000080 /* enable global pages */
-#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
-#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
-#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
-#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
-#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
-#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
-#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
-#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
-#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
-
-/*
- * x86-64 Task Priority Register, CR8
- */
-#define X86_CR8_TPR 0x0000000F /* task priority register */
-
-/*
- * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
- */
-
-/*
- * NSC/Cyrix CPU configuration register indexes
- */
-#define CX86_PCR0 0x20
-#define CX86_GCR 0xb8
-#define CX86_CCR0 0xc0
-#define CX86_CCR1 0xc1
-#define CX86_CCR2 0xc2
-#define CX86_CCR3 0xc3
-#define CX86_CCR4 0xe8
-#define CX86_CCR5 0xe9
-#define CX86_CCR6 0xea
-#define CX86_CCR7 0xeb
-#define CX86_PCR1 0xf0
-#define CX86_DIR0 0xfe
-#define CX86_DIR1 0xff
-#define CX86_ARR_BASE 0xc4
-#define CX86_RCR_BASE 0xdc
-
-#ifdef __KERNEL__
#ifdef CONFIG_VM86
#define X86_VM_MASK X86_EFLAGS_VM
#else
#define X86_VM_MASK 0 /* No VM86 support */
#endif
-#endif
-
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 54d80fddb73..942a08623a1 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -1,44 +1,12 @@
#ifndef _ASM_X86_PTRACE_H
#define _ASM_X86_PTRACE_H
-#include <linux/compiler.h> /* For __user */
-#include <asm/ptrace-abi.h>
-#include <asm/processor-flags.h>
-
-#ifdef __KERNEL__
#include <asm/segment.h>
#include <asm/page_types.h>
-#endif
+#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-
#ifdef __i386__
-/* this struct defines the way the registers are stored on the
- stack during a system call. */
-
-#ifndef __KERNEL__
-
-struct pt_regs {
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- int xds;
- int xes;
- int xfs;
- int xgs;
- long orig_eax;
- long eip;
- int xcs;
- long eflags;
- long esp;
- int xss;
-};
-
-#else /* __KERNEL__ */
struct pt_regs {
unsigned long bx;
@@ -60,42 +28,8 @@ struct pt_regs {
unsigned long ss;
};
-#endif /* __KERNEL__ */
-
#else /* __i386__ */
-#ifndef __KERNEL__
-
-struct pt_regs {
- unsigned long r15;
- unsigned long r14;
- unsigned long r13;
- unsigned long r12;
- unsigned long rbp;
- unsigned long rbx;
-/* arguments: non interrupts/non tracing syscalls only save up to here*/
- unsigned long r11;
- unsigned long r10;
- unsigned long r9;
- unsigned long r8;
- unsigned long rax;
- unsigned long rcx;
- unsigned long rdx;
- unsigned long rsi;
- unsigned long rdi;
- unsigned long orig_rax;
-/* end of arguments */
-/* cpu exception frame or undefined */
- unsigned long rip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long rsp;
- unsigned long ss;
-/* top of stack page */
-};
-
-#else /* __KERNEL__ */
-
struct pt_regs {
unsigned long r15;
unsigned long r14;
@@ -124,12 +58,8 @@ struct pt_regs {
/* top of stack page */
};
-#endif /* __KERNEL__ */
#endif /* !__i386__ */
-
-#ifdef __KERNEL__
-
#include <linux/init.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
@@ -203,6 +133,13 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
}
+
+#define current_user_stack_pointer() this_cpu_read(old_rsp)
+/* ia32 vs. x32 difference */
+#define compat_user_stack_pointer() \
+ (test_thread_flag(TIF_IA32) \
+ ? current_pt_regs()->sp \
+ : this_cpu_read(old_rsp))
#endif
#ifdef CONFIG_X86_32
@@ -301,8 +238,5 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
-#endif /* __KERNEL__ */
-
#endif /* !__ASSEMBLY__ */
-
#endif /* _ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index c59cc97fe6c..109a9dd5d45 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -6,6 +6,7 @@
/* some helper functions for xen and kvm pv clock sources */
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
void pvclock_set_flags(u8 flags);
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
@@ -56,4 +57,50 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
return product;
}
+static __always_inline
+u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
+{
+ u64 delta = __native_read_tsc() - src->tsc_timestamp;
+ return pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+}
+
+static __always_inline
+unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
+ cycle_t *cycles, u8 *flags)
+{
+ unsigned version;
+ cycle_t ret, offset;
+ u8 ret_flags;
+
+ version = src->version;
+ /* Note: emulated platforms which do not advertise SSE2 support
+ * result in kvmclock not using the necessary RDTSC barriers.
+ * Without barriers, it is possible that RDTSC instruction reads from
+ * the time stamp counter outside rdtsc_barrier protected section
+ * below, resulting in violation of monotonicity.
+ */
+ rdtsc_barrier();
+ offset = pvclock_get_nsec_offset(src);
+ ret = src->system_time + offset;
+ ret_flags = src->flags;
+ rdtsc_barrier();
+
+ *cycles = ret;
+ *flags = ret_flags;
+ return version;
+}
+
+struct pvclock_vsyscall_time_info {
+ struct pvclock_vcpu_time_info pvti;
+ u32 migrate_count;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
+#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
+
+int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
+ int size);
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
+
#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index d0f19f9fb84..b7bf3505e1e 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,7 +1,8 @@
#ifndef _ASM_X86_SETUP_H
#define _ASM_X86_SETUP_H
-#ifdef __KERNEL__
+#include <uapi/asm/setup.h>
+
#define COMMAND_LINE_SIZE 2048
@@ -123,6 +124,4 @@ void __init x86_64_start_reservations(char *real_mode_data);
.size .brk.name,.-1b; \
.popsection
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_SETUP_H */
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 5ca71c065ee..9dfce4e0417 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -1,104 +1,9 @@
#ifndef _ASM_X86_SIGCONTEXT_H
#define _ASM_X86_SIGCONTEXT_H
-#include <linux/compiler.h>
-#include <linux/types.h>
-
-#define FP_XSTATE_MAGIC1 0x46505853U
-#define FP_XSTATE_MAGIC2 0x46505845U
-#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
-
-/*
- * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
- * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
- * are used to extended the fpstate pointer in the sigcontext, which now
- * includes the extended state information along with fpstate information.
- *
- * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
- * area and FP_XSTATE_MAGIC2 at the end of memory layout
- * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
- * extended state information in the memory layout pointed by the fpstate
- * pointer in sigcontext.
- */
-struct _fpx_sw_bytes {
- __u32 magic1; /* FP_XSTATE_MAGIC1 */
- __u32 extended_size; /* total size of the layout referred by
- * fpstate pointer in the sigcontext.
- */
- __u64 xstate_bv;
- /* feature bit mask (including fp/sse/extended
- * state) that is present in the memory
- * layout.
- */
- __u32 xstate_size; /* actual xsave state size, based on the
- * features saved in the layout.
- * 'extended_size' will be greater than
- * 'xstate_size'.
- */
- __u32 padding[7]; /* for future use. */
-};
+#include <uapi/asm/sigcontext.h>
#ifdef __i386__
-/*
- * As documented in the iBCS2 standard..
- *
- * The first part of "struct _fpstate" is just the normal i387
- * hardware setup, the extra "status" word is used to save the
- * coprocessor status word before entering the handler.
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * The FPU state data structure has had to grow to accommodate the
- * extended FPU state required by the Streaming SIMD Extensions.
- * There is no documented standard to accomplish this at the moment.
- */
-struct _fpreg {
- unsigned short significand[4];
- unsigned short exponent;
-};
-
-struct _fpxreg {
- unsigned short significand[4];
- unsigned short exponent;
- unsigned short padding[3];
-};
-
-struct _xmmreg {
- unsigned long element[4];
-};
-
-struct _fpstate {
- /* Regular FPU environment */
- unsigned long cw;
- unsigned long sw;
- unsigned long tag;
- unsigned long ipoff;
- unsigned long cssel;
- unsigned long dataoff;
- unsigned long datasel;
- struct _fpreg _st[8];
- unsigned short status;
- unsigned short magic; /* 0xffff = regular FPU data only */
-
- /* FXSR FPU environment */
- unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
- unsigned long mxcsr;
- unsigned long reserved;
- struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
- struct _xmmreg _xmm[8];
- unsigned long padding1[44];
-
- union {
- unsigned long padding2[12];
- struct _fpx_sw_bytes sw_reserved; /* represents the extended
- * state info */
- };
-};
-
-#define X86_FXSR_MAGIC 0x0000
-
-#ifdef __KERNEL__
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
@@ -131,62 +36,7 @@ struct sigcontext {
unsigned long oldmask;
unsigned long cr2;
};
-#else /* __KERNEL__ */
-/*
- * User-space might still rely on the old definition:
- */
-struct sigcontext {
- unsigned short gs, __gsh;
- unsigned short fs, __fsh;
- unsigned short es, __esh;
- unsigned short ds, __dsh;
- unsigned long edi;
- unsigned long esi;
- unsigned long ebp;
- unsigned long esp;
- unsigned long ebx;
- unsigned long edx;
- unsigned long ecx;
- unsigned long eax;
- unsigned long trapno;
- unsigned long err;
- unsigned long eip;
- unsigned short cs, __csh;
- unsigned long eflags;
- unsigned long esp_at_signal;
- unsigned short ss, __ssh;
- struct _fpstate __user *fpstate;
- unsigned long oldmask;
- unsigned long cr2;
-};
-#endif /* !__KERNEL__ */
-
#else /* __i386__ */
-
-/* FXSAVE frame */
-/* Note: reserved1/2 may someday contain valuable data. Always save/restore
- them when you change signal frames. */
-struct _fpstate {
- __u16 cwd;
- __u16 swd;
- __u16 twd; /* Note this is not the same as the
- 32bit/x87/FSAVE twd */
- __u16 fop;
- __u64 rip;
- __u64 rdp;
- __u32 mxcsr;
- __u32 mxcsr_mask;
- __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
- __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
- __u32 reserved2[12];
- union {
- __u32 reserved3[12];
- struct _fpx_sw_bytes sw_reserved; /* represents the extended
- * state information */
- };
-};
-
-#ifdef __KERNEL__
struct sigcontext {
unsigned long r8;
unsigned long r9;
@@ -225,69 +75,5 @@ struct sigcontext {
void __user *fpstate; /* zero when no FPU/extended context */
unsigned long reserved1[8];
};
-#else /* __KERNEL__ */
-/*
- * User-space might still rely on the old definition:
- */
-struct sigcontext {
- __u64 r8;
- __u64 r9;
- __u64 r10;
- __u64 r11;
- __u64 r12;
- __u64 r13;
- __u64 r14;
- __u64 r15;
- __u64 rdi;
- __u64 rsi;
- __u64 rbp;
- __u64 rbx;
- __u64 rdx;
- __u64 rax;
- __u64 rcx;
- __u64 rsp;
- __u64 rip;
- __u64 eflags; /* RFLAGS */
- __u16 cs;
- __u16 gs;
- __u16 fs;
- __u16 __pad0;
- __u64 err;
- __u64 trapno;
- __u64 oldmask;
- __u64 cr2;
- struct _fpstate __user *fpstate; /* zero when no FPU context */
-#ifdef __ILP32__
- __u32 __fpstate_pad;
-#endif
- __u64 reserved1[8];
-};
-#endif /* !__KERNEL__ */
-
#endif /* !__i386__ */
-
-struct _xsave_hdr {
- __u64 xstate_bv;
- __u64 reserved1[2];
- __u64 reserved2[5];
-};
-
-struct _ymmh_state {
- /* 16 * 16 bytes for each YMMH-reg */
- __u32 ymmh_space[64];
-};
-
-/*
- * Extended state pointed by the fpstate pointer in the sigcontext.
- * In addition to the fpstate, information encoded in the xstate_hdr
- * indicates the presence of other extended state information
- * supported by the processor and OS.
- */
-struct _xstate {
- struct _fpstate fpstate;
- struct _xsave_hdr xstate_hdr;
- struct _ymmh_state ymmh;
- /* new processor state extensions go here */
-};
-
#endif /* _ASM_X86_SIGCONTEXT_H */
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 0dba8b7a6ac..216bf364a7e 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -2,14 +2,6 @@
#define _ASM_X86_SIGNAL_H
#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/compiler.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#ifdef __KERNEL__
#include <linux/linkage.h>
/* Most things should be clean enough to redefine this at will, if care
@@ -35,102 +27,11 @@ typedef struct {
typedef sigset_t compat_sigset_t;
#endif
-#else
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-#define NSIG 32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001u
-#define SA_NOCLDWAIT 0x00000002u
-#define SA_SIGINFO 0x00000004u
-#define SA_ONSTACK 0x08000000u
-#define SA_RESTART 0x10000000u
-#define SA_NODEFER 0x40000000u
-#define SA_RESETHAND 0x80000000u
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
-#define SA_RESTORER 0x04000000
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal-defs.h>
-
+#include <uapi/asm/signal.h>
#ifndef __ASSEMBLY__
-
-# ifdef __KERNEL__
extern void do_notify_resume(struct pt_regs *, void *, __u32);
-# endif /* __KERNEL__ */
-
#ifdef __i386__
-# ifdef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
@@ -149,45 +50,8 @@ struct k_sigaction {
struct sigaction sa;
};
-# else /* __KERNEL__ */
-/* Here we must cater to libcs that poke about in kernel headers. */
-
-struct sigaction {
- union {
- __sighandler_t _sa_handler;
- void (*_sa_sigaction)(int, struct siginfo *, void *);
- } _u;
- sigset_t sa_mask;
- unsigned long sa_flags;
- void (*sa_restorer)(void);
-};
-
-#define sa_handler _u._sa_handler
-#define sa_sigaction _u._sa_sigaction
-
-# endif /* ! __KERNEL__ */
#else /* __i386__ */
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
#endif /* !__i386__ */
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-#ifdef __KERNEL__
#include <asm/sigcontext.h>
#ifdef __i386__
@@ -260,7 +124,5 @@ struct pt_regs;
#endif /* !__i386__ */
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_X86_SIGNAL_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index cdf5674dd23..6136d99f537 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -1,134 +1,8 @@
#ifndef __SVM_H
#define __SVM_H
-#define SVM_EXIT_READ_CR0 0x000
-#define SVM_EXIT_READ_CR3 0x003
-#define SVM_EXIT_READ_CR4 0x004
-#define SVM_EXIT_READ_CR8 0x008
-#define SVM_EXIT_WRITE_CR0 0x010
-#define SVM_EXIT_WRITE_CR3 0x013
-#define SVM_EXIT_WRITE_CR4 0x014
-#define SVM_EXIT_WRITE_CR8 0x018
-#define SVM_EXIT_READ_DR0 0x020
-#define SVM_EXIT_READ_DR1 0x021
-#define SVM_EXIT_READ_DR2 0x022
-#define SVM_EXIT_READ_DR3 0x023
-#define SVM_EXIT_READ_DR4 0x024
-#define SVM_EXIT_READ_DR5 0x025
-#define SVM_EXIT_READ_DR6 0x026
-#define SVM_EXIT_READ_DR7 0x027
-#define SVM_EXIT_WRITE_DR0 0x030
-#define SVM_EXIT_WRITE_DR1 0x031
-#define SVM_EXIT_WRITE_DR2 0x032
-#define SVM_EXIT_WRITE_DR3 0x033
-#define SVM_EXIT_WRITE_DR4 0x034
-#define SVM_EXIT_WRITE_DR5 0x035
-#define SVM_EXIT_WRITE_DR6 0x036
-#define SVM_EXIT_WRITE_DR7 0x037
-#define SVM_EXIT_EXCP_BASE 0x040
-#define SVM_EXIT_INTR 0x060
-#define SVM_EXIT_NMI 0x061
-#define SVM_EXIT_SMI 0x062
-#define SVM_EXIT_INIT 0x063
-#define SVM_EXIT_VINTR 0x064
-#define SVM_EXIT_CR0_SEL_WRITE 0x065
-#define SVM_EXIT_IDTR_READ 0x066
-#define SVM_EXIT_GDTR_READ 0x067
-#define SVM_EXIT_LDTR_READ 0x068
-#define SVM_EXIT_TR_READ 0x069
-#define SVM_EXIT_IDTR_WRITE 0x06a
-#define SVM_EXIT_GDTR_WRITE 0x06b
-#define SVM_EXIT_LDTR_WRITE 0x06c
-#define SVM_EXIT_TR_WRITE 0x06d
-#define SVM_EXIT_RDTSC 0x06e
-#define SVM_EXIT_RDPMC 0x06f
-#define SVM_EXIT_PUSHF 0x070
-#define SVM_EXIT_POPF 0x071
-#define SVM_EXIT_CPUID 0x072
-#define SVM_EXIT_RSM 0x073
-#define SVM_EXIT_IRET 0x074
-#define SVM_EXIT_SWINT 0x075
-#define SVM_EXIT_INVD 0x076
-#define SVM_EXIT_PAUSE 0x077
-#define SVM_EXIT_HLT 0x078
-#define SVM_EXIT_INVLPG 0x079
-#define SVM_EXIT_INVLPGA 0x07a
-#define SVM_EXIT_IOIO 0x07b
-#define SVM_EXIT_MSR 0x07c
-#define SVM_EXIT_TASK_SWITCH 0x07d
-#define SVM_EXIT_FERR_FREEZE 0x07e
-#define SVM_EXIT_SHUTDOWN 0x07f
-#define SVM_EXIT_VMRUN 0x080
-#define SVM_EXIT_VMMCALL 0x081
-#define SVM_EXIT_VMLOAD 0x082
-#define SVM_EXIT_VMSAVE 0x083
-#define SVM_EXIT_STGI 0x084
-#define SVM_EXIT_CLGI 0x085
-#define SVM_EXIT_SKINIT 0x086
-#define SVM_EXIT_RDTSCP 0x087
-#define SVM_EXIT_ICEBP 0x088
-#define SVM_EXIT_WBINVD 0x089
-#define SVM_EXIT_MONITOR 0x08a
-#define SVM_EXIT_MWAIT 0x08b
-#define SVM_EXIT_MWAIT_COND 0x08c
-#define SVM_EXIT_XSETBV 0x08d
-#define SVM_EXIT_NPF 0x400
-
-#define SVM_EXIT_ERR -1
-
-#define SVM_EXIT_REASONS \
- { SVM_EXIT_READ_CR0, "read_cr0" }, \
- { SVM_EXIT_READ_CR3, "read_cr3" }, \
- { SVM_EXIT_READ_CR4, "read_cr4" }, \
- { SVM_EXIT_READ_CR8, "read_cr8" }, \
- { SVM_EXIT_WRITE_CR0, "write_cr0" }, \
- { SVM_EXIT_WRITE_CR3, "write_cr3" }, \
- { SVM_EXIT_WRITE_CR4, "write_cr4" }, \
- { SVM_EXIT_WRITE_CR8, "write_cr8" }, \
- { SVM_EXIT_READ_DR0, "read_dr0" }, \
- { SVM_EXIT_READ_DR1, "read_dr1" }, \
- { SVM_EXIT_READ_DR2, "read_dr2" }, \
- { SVM_EXIT_READ_DR3, "read_dr3" }, \
- { SVM_EXIT_WRITE_DR0, "write_dr0" }, \
- { SVM_EXIT_WRITE_DR1, "write_dr1" }, \
- { SVM_EXIT_WRITE_DR2, "write_dr2" }, \
- { SVM_EXIT_WRITE_DR3, "write_dr3" }, \
- { SVM_EXIT_WRITE_DR5, "write_dr5" }, \
- { SVM_EXIT_WRITE_DR7, "write_dr7" }, \
- { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
- { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
- { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
- { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
- { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
- { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
- { SVM_EXIT_INTR, "interrupt" }, \
- { SVM_EXIT_NMI, "nmi" }, \
- { SVM_EXIT_SMI, "smi" }, \
- { SVM_EXIT_INIT, "init" }, \
- { SVM_EXIT_VINTR, "vintr" }, \
- { SVM_EXIT_CPUID, "cpuid" }, \
- { SVM_EXIT_INVD, "invd" }, \
- { SVM_EXIT_HLT, "hlt" }, \
- { SVM_EXIT_INVLPG, "invlpg" }, \
- { SVM_EXIT_INVLPGA, "invlpga" }, \
- { SVM_EXIT_IOIO, "io" }, \
- { SVM_EXIT_MSR, "msr" }, \
- { SVM_EXIT_TASK_SWITCH, "task_switch" }, \
- { SVM_EXIT_SHUTDOWN, "shutdown" }, \
- { SVM_EXIT_VMRUN, "vmrun" }, \
- { SVM_EXIT_VMMCALL, "hypercall" }, \
- { SVM_EXIT_VMLOAD, "vmload" }, \
- { SVM_EXIT_VMSAVE, "vmsave" }, \
- { SVM_EXIT_STGI, "stgi" }, \
- { SVM_EXIT_CLGI, "clgi" }, \
- { SVM_EXIT_SKINIT, "skinit" }, \
- { SVM_EXIT_WBINVD, "wbinvd" }, \
- { SVM_EXIT_MONITOR, "monitor" }, \
- { SVM_EXIT_MWAIT, "mwait" }, \
- { SVM_EXIT_XSETBV, "xsetbv" }, \
- { SVM_EXIT_NPF, "npf" }
-
-#ifdef __KERNEL__
+#include <uapi/asm/svm.h>
+
enum {
INTERCEPT_INTR,
@@ -403,5 +277,3 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
#endif
-
-#endif
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index c76fae4d90b..31f61f96e0f 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -69,8 +69,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
/* ia32/ia32_signal.c */
asmlinkage long sys32_sigsuspend(int, int, old_sigset_t);
-asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *,
- stack_ia32_t __user *, struct pt_regs *);
asmlinkage long sys32_sigreturn(struct pt_regs *);
asmlinkage long sys32_rt_sigreturn(struct pt_regs *);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 2f8374718aa..58b7e3eac0a 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -25,9 +25,6 @@ asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
long sys_rt_sigreturn(struct pt_regs *);
-long sys_sigaltstack(const stack_t __user *, stack_t __user *,
- struct pt_regs *);
-
/* kernel/tls.c */
asmlinkage int sys_set_thread_area(struct user_desc __user *);
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 0e7dea7d366..a0790e07ba6 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -1,10 +1,8 @@
#ifndef _ASM_X86_UNISTD_H
#define _ASM_X86_UNISTD_H 1
-/* x32 syscall flag bit */
-#define __X32_SYSCALL_BIT 0x40000000
+#include <uapi/asm/unistd.h>
-#ifdef __KERNEL__
# ifdef CONFIG_X86_X32_ABI
# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
@@ -50,7 +48,6 @@
# define __ARCH_WANT_SYS_TIME
# define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_SYS_WAITPID
-# define __ARCH_WANT_SYS_EXECVE
# define __ARCH_WANT_SYS_FORK
# define __ARCH_WANT_SYS_VFORK
# define __ARCH_WANT_SYS_CLONE
@@ -63,14 +60,4 @@
*/
# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#else
-# ifdef __i386__
-# include <asm/unistd_32.h>
-# elif defined(__ILP32__)
-# include <asm/unistd_x32.h>
-# else
-# include <asm/unistd_64.h>
-# endif
-#endif
-
#endif /* _ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index f9303602fbc..1d8de3f3fec 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -1,133 +1,9 @@
#ifndef _ASM_X86_VM86_H
#define _ASM_X86_VM86_H
-/*
- * I'm guessing at the VIF/VIP flag usage, but hope that this is how
- * the Pentium uses them. Linux will return from vm86 mode when both
- * VIF and VIP is set.
- *
- * On a Pentium, we could probably optimize the virtual flags directly
- * in the eflags register instead of doing it "by hand" in vflags...
- *
- * Linus
- */
-
-#include <asm/processor-flags.h>
-
-#define BIOSSEG 0x0f000
-
-#define CPU_086 0
-#define CPU_186 1
-#define CPU_286 2
-#define CPU_386 3
-#define CPU_486 4
-#define CPU_586 5
-
-/*
- * Return values for the 'vm86()' system call
- */
-#define VM86_TYPE(retval) ((retval) & 0xff)
-#define VM86_ARG(retval) ((retval) >> 8)
-
-#define VM86_SIGNAL 0 /* return due to signal */
-#define VM86_UNKNOWN 1 /* unhandled GP fault
- - IO-instruction or similar */
-#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
-#define VM86_STI 3 /* sti/popf/iret instruction enabled
- virtual interrupts */
-
-/*
- * Additional return values when invoking new vm86()
- */
-#define VM86_PICRETURN 4 /* return due to pending PIC request */
-#define VM86_TRAP 6 /* return due to DOS-debugger request */
-
-/*
- * function codes when invoking new vm86()
- */
-#define VM86_PLUS_INSTALL_CHECK 0
-#define VM86_ENTER 1
-#define VM86_ENTER_NO_BYPASS 2
-#define VM86_REQUEST_IRQ 3
-#define VM86_FREE_IRQ 4
-#define VM86_GET_IRQ_BITS 5
-#define VM86_GET_AND_RESET_IRQ 6
-
-/*
- * This is the stack-layout seen by the user space program when we have
- * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
- * is 'kernel_vm86_regs' (see below).
- */
-
-struct vm86_regs {
-/*
- * normal regs, with special meaning for the segment descriptors..
- */
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- long __null_ds;
- long __null_es;
- long __null_fs;
- long __null_gs;
- long orig_eax;
- long eip;
- unsigned short cs, __csh;
- long eflags;
- long esp;
- unsigned short ss, __ssh;
-/*
- * these are specific to v86 mode:
- */
- unsigned short es, __esh;
- unsigned short ds, __dsh;
- unsigned short fs, __fsh;
- unsigned short gs, __gsh;
-};
-
-struct revectored_struct {
- unsigned long __map[8]; /* 256 bits */
-};
-
-struct vm86_struct {
- struct vm86_regs regs;
- unsigned long flags;
- unsigned long screen_bitmap;
- unsigned long cpu_type;
- struct revectored_struct int_revectored;
- struct revectored_struct int21_revectored;
-};
-
-/*
- * flags masks
- */
-#define VM86_SCREEN_BITMAP 0x0001
-
-struct vm86plus_info_struct {
- unsigned long force_return_for_pic:1;
- unsigned long vm86dbg_active:1; /* for debugger */
- unsigned long vm86dbg_TFpendig:1; /* for debugger */
- unsigned long unused:28;
- unsigned long is_vm86pus:1; /* for vm86 internal use */
- unsigned char vm86dbg_intxxtab[32]; /* for debugger */
-};
-struct vm86plus_struct {
- struct vm86_regs regs;
- unsigned long flags;
- unsigned long screen_bitmap;
- unsigned long cpu_type;
- struct revectored_struct int_revectored;
- struct revectored_struct int21_revectored;
- struct vm86plus_info_struct vm86plus;
-};
-
-#ifdef __KERNEL__
#include <asm/ptrace.h>
+#include <uapi/asm/vm86.h>
/*
* This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
@@ -203,6 +79,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
#endif /* CONFIG_VM86 */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_X86_VM86_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 36ec21c36d6..235b49fa554 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -1,6 +1,3 @@
-#ifndef VMX_H
-#define VMX_H
-
/*
* vmx.h: VMX Architecture related definitions
* Copyright (c) 2004, Intel Corporation.
@@ -24,90 +21,12 @@
* Yaniv Kamay <yaniv@qumranet.com>
*
*/
+#ifndef VMX_H
+#define VMX_H
-#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
-
-#define EXIT_REASON_EXCEPTION_NMI 0
-#define EXIT_REASON_EXTERNAL_INTERRUPT 1
-#define EXIT_REASON_TRIPLE_FAULT 2
-
-#define EXIT_REASON_PENDING_INTERRUPT 7
-#define EXIT_REASON_NMI_WINDOW 8
-#define EXIT_REASON_TASK_SWITCH 9
-#define EXIT_REASON_CPUID 10
-#define EXIT_REASON_HLT 12
-#define EXIT_REASON_INVD 13
-#define EXIT_REASON_INVLPG 14
-#define EXIT_REASON_RDPMC 15
-#define EXIT_REASON_RDTSC 16
-#define EXIT_REASON_VMCALL 18
-#define EXIT_REASON_VMCLEAR 19
-#define EXIT_REASON_VMLAUNCH 20
-#define EXIT_REASON_VMPTRLD 21
-#define EXIT_REASON_VMPTRST 22
-#define EXIT_REASON_VMREAD 23
-#define EXIT_REASON_VMRESUME 24
-#define EXIT_REASON_VMWRITE 25
-#define EXIT_REASON_VMOFF 26
-#define EXIT_REASON_VMON 27
-#define EXIT_REASON_CR_ACCESS 28
-#define EXIT_REASON_DR_ACCESS 29
-#define EXIT_REASON_IO_INSTRUCTION 30
-#define EXIT_REASON_MSR_READ 31
-#define EXIT_REASON_MSR_WRITE 32
-#define EXIT_REASON_INVALID_STATE 33
-#define EXIT_REASON_MWAIT_INSTRUCTION 36
-#define EXIT_REASON_MONITOR_INSTRUCTION 39
-#define EXIT_REASON_PAUSE_INSTRUCTION 40
-#define EXIT_REASON_MCE_DURING_VMENTRY 41
-#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
-#define EXIT_REASON_APIC_ACCESS 44
-#define EXIT_REASON_EPT_VIOLATION 48
-#define EXIT_REASON_EPT_MISCONFIG 49
-#define EXIT_REASON_WBINVD 54
-#define EXIT_REASON_XSETBV 55
-#define EXIT_REASON_INVPCID 58
-
-#define VMX_EXIT_REASONS \
- { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
- { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
- { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
- { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
- { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
- { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
- { EXIT_REASON_CPUID, "CPUID" }, \
- { EXIT_REASON_HLT, "HLT" }, \
- { EXIT_REASON_INVLPG, "INVLPG" }, \
- { EXIT_REASON_RDPMC, "RDPMC" }, \
- { EXIT_REASON_RDTSC, "RDTSC" }, \
- { EXIT_REASON_VMCALL, "VMCALL" }, \
- { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
- { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
- { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
- { EXIT_REASON_VMPTRST, "VMPTRST" }, \
- { EXIT_REASON_VMREAD, "VMREAD" }, \
- { EXIT_REASON_VMRESUME, "VMRESUME" }, \
- { EXIT_REASON_VMWRITE, "VMWRITE" }, \
- { EXIT_REASON_VMOFF, "VMOFF" }, \
- { EXIT_REASON_VMON, "VMON" }, \
- { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
- { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
- { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
- { EXIT_REASON_MSR_READ, "MSR_READ" }, \
- { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
- { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
- { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
- { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
- { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
- { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
- { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
- { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
- { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
- { EXIT_REASON_WBINVD, "WBINVD" }
-
-#ifdef __KERNEL__
#include <linux/types.h>
+#include <uapi/asm/vmx.h>
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
@@ -445,8 +364,7 @@ enum vmcs_field {
#define VMX_EPTP_WB_BIT (1ull << 14)
#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
-#define VMX_EPT_AD_BIT (1ull << 21)
-#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
+#define VMX_EPT_AD_BIT (1ull << 21)
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
@@ -527,5 +445,3 @@ enum vm_instruction_error_number {
};
#endif
-
-#endif
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index eaea1d31f75..2a46ca720af 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -1,20 +1,8 @@
#ifndef _ASM_X86_VSYSCALL_H
#define _ASM_X86_VSYSCALL_H
-enum vsyscall_num {
- __NR_vgettimeofday,
- __NR_vtime,
- __NR_vgetcpu,
-};
-
-#define VSYSCALL_START (-10UL << 20)
-#define VSYSCALL_SIZE 1024
-#define VSYSCALL_END (-2UL << 20)
-#define VSYSCALL_MAPPED_PAGES 1
-#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
-
-#ifdef __KERNEL__
#include <linux/seqlock.h>
+#include <uapi/asm/vsyscall.h>
#define VGETCPU_RDTSCP 1
#define VGETCPU_LSL 2
@@ -33,6 +21,24 @@ extern void map_vsyscall(void);
*/
extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
-#endif /* __KERNEL__ */
+#ifdef CONFIG_X86_64
+
+#define VGETCPU_CPU_MASK 0xfff
+
+static inline unsigned int __getcpu(void)
+{
+ unsigned int p;
+
+ if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
+ /* Load per CPU data from RDTSCP */
+ native_read_tscp(&p);
+ } else {
+ /* Load per CPU data from GDT */
+ asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ }
+
+ return p;
+}
+#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 54d52ff1304..fd9cb7695b5 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -63,6 +63,7 @@ DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t);
DEFINE_GUEST_HANDLE(uint32_t);
DEFINE_GUEST_HANDLE(xen_pfn_t);
+DEFINE_GUEST_HANDLE(xen_ulong_t);
#endif
#ifndef HYPERVISOR_VIRT_START
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 83b6e9a0dce..09409c44f9a 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -4,3 +4,61 @@ include include/uapi/asm-generic/Kbuild.asm
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h
+header-y += a.out.h
+header-y += auxvec.h
+header-y += bitsperlong.h
+header-y += boot.h
+header-y += bootparam.h
+header-y += byteorder.h
+header-y += debugreg.h
+header-y += e820.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += hw_breakpoint.h
+header-y += hyperv.h
+header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += ist.h
+header-y += kvm.h
+header-y += kvm_para.h
+header-y += ldt.h
+header-y += mce.h
+header-y += mman.h
+header-y += msgbuf.h
+header-y += msr-index.h
+header-y += msr.h
+header-y += mtrr.h
+header-y += param.h
+header-y += perf_regs.h
+header-y += poll.h
+header-y += posix_types.h
+header-y += posix_types_32.h
+header-y += posix_types_64.h
+header-y += posix_types_x32.h
+header-y += prctl.h
+header-y += processor-flags.h
+header-y += ptrace-abi.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += sigcontext.h
+header-y += sigcontext32.h
+header-y += siginfo.h
+header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
+header-y += statfs.h
+header-y += svm.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += ucontext.h
+header-y += unistd.h
+header-y += vm86.h
+header-y += vmx.h
+header-y += vsyscall.h
diff --git a/arch/x86/include/asm/a.out.h b/arch/x86/include/uapi/asm/a.out.h
index 4684f97a5bb..4684f97a5bb 100644
--- a/arch/x86/include/asm/a.out.h
+++ b/arch/x86/include/uapi/asm/a.out.h
diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/uapi/asm/auxvec.h
index 77203ac352d..77203ac352d 100644
--- a/arch/x86/include/asm/auxvec.h
+++ b/arch/x86/include/uapi/asm/auxvec.h
diff --git a/arch/x86/include/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h
index b0ae1c4dc79..b0ae1c4dc79 100644
--- a/arch/x86/include/asm/bitsperlong.h
+++ b/arch/x86/include/uapi/asm/bitsperlong.h
diff --git a/arch/x86/include/uapi/asm/boot.h b/arch/x86/include/uapi/asm/boot.h
new file mode 100644
index 00000000000..94292c4c812
--- /dev/null
+++ b/arch/x86/include/uapi/asm/boot.h
@@ -0,0 +1,10 @@
+#ifndef _UAPI_ASM_X86_BOOT_H
+#define _UAPI_ASM_X86_BOOT_H
+
+/* Internal svga startup constants */
+#define NORMAL_VGA 0xffff /* 80x25 mode */
+#define EXTENDED_VGA 0xfffe /* 80x50 mode */
+#define ASK_VGA 0xfffd /* ask for it at bootup */
+
+
+#endif /* _UAPI_ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 2ad874cb661..92862cd9020 100644
--- a/arch/x86/include/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -13,6 +13,7 @@
#define SETUP_NONE 0
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
+#define SETUP_PCI 3
/* extensible setup data list node */
struct setup_data {
diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/uapi/asm/byteorder.h
index b13a7a88f3e..b13a7a88f3e 100644
--- a/arch/x86/include/asm/byteorder.h
+++ b/arch/x86/include/uapi/asm/byteorder.h
diff --git a/arch/x86/include/uapi/asm/debugreg.h b/arch/x86/include/uapi/asm/debugreg.h
new file mode 100644
index 00000000000..3c0874dd986
--- /dev/null
+++ b/arch/x86/include/uapi/asm/debugreg.h
@@ -0,0 +1,80 @@
+#ifndef _UAPI_ASM_X86_DEBUGREG_H
+#define _UAPI_ASM_X86_DEBUGREG_H
+
+
+/* Indicate the register numbers for a number of the specific
+ debug registers. Registers 0-3 contain the addresses we wish to trap on */
+#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
+#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
+
+#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
+#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
+
+/* Define a few things for the status register. We can use this to determine
+ which debugging register was responsible for the trap. The other bits
+ are either reserved or not of interest to us. */
+
+/* Define reserved bits in DR6 which are always set to 1 */
+#define DR6_RESERVED (0xFFFF0FF0)
+
+#define DR_TRAP0 (0x1) /* db0 */
+#define DR_TRAP1 (0x2) /* db1 */
+#define DR_TRAP2 (0x4) /* db2 */
+#define DR_TRAP3 (0x8) /* db3 */
+#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
+
+#define DR_STEP (0x4000) /* single-step */
+#define DR_SWITCH (0x8000) /* task switch */
+
+/* Now define a bunch of things for manipulating the control register.
+ The top two bytes of the control register consist of 4 fields of 4
+ bits - each field corresponds to one of the four debug registers,
+ and indicates what types of access we trap on, and how large the data
+ field is that we are looking at */
+
+#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
+#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
+
+#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
+#define DR_RW_WRITE (0x1)
+#define DR_RW_READ (0x3)
+
+#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
+#define DR_LEN_2 (0x4)
+#define DR_LEN_4 (0xC)
+#define DR_LEN_8 (0x8)
+
+/* The low byte to the control register determine which registers are
+ enabled. There are 4 fields of two bits. One bit is "local", meaning
+ that the processor will reset the bit after a task switch and the other
+ is global meaning that we have to explicitly reset the bit. With linux,
+ you can use either one, since we explicitly zero the register when we enter
+ kernel mode. */
+
+#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
+#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
+#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
+#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
+#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
+
+#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
+#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
+
+/* The second byte to the control register has a few special things.
+ We can slow the instruction pipeline for instructions coming via the
+ gdt or the ldt if we want to. I am not sure why this is an advantage */
+
+#ifdef __i386__
+#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
+#else
+#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
+#endif
+
+#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
+#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
+
+/*
+ * HW breakpoint additions
+ */
+
+#endif /* _UAPI_ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
new file mode 100644
index 00000000000..bbae0247070
--- /dev/null
+++ b/arch/x86/include/uapi/asm/e820.h
@@ -0,0 +1,75 @@
+#ifndef _UAPI_ASM_X86_E820_H
+#define _UAPI_ASM_X86_E820_H
+#define E820MAP 0x2d0 /* our map */
+#define E820MAX 128 /* number of entries in E820MAP */
+
+/*
+ * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
+ * constrained space in the zeropage. If we have more nodes than
+ * that, and if we've booted off EFI firmware, then the EFI tables
+ * passed us from the EFI firmware can list more nodes. Size our
+ * internal memory map tables to have room for these additional
+ * nodes, based on up to three entries per node for which the
+ * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
+ * plus E820MAX, allowing space for the possible duplicate E820
+ * entries that might need room in the same arrays, prior to the
+ * call to sanitize_e820_map() to remove duplicates. The allowance
+ * of three memory map entries per node is "enough" entries for
+ * the initial hardware platform motivating this mechanism to make
+ * use of additional EFI map entries. Future platforms may want
+ * to allow more than three entries per node or otherwise refine
+ * this size.
+ */
+
+/*
+ * Odd: 'make headers_check' complains about numa.h if I try
+ * to collapse the next two #ifdef lines to a single line:
+ * #if defined(__KERNEL__) && defined(CONFIG_EFI)
+ */
+#ifndef __KERNEL__
+#define E820_X_MAX E820MAX
+#endif
+
+#define E820NR 0x1e8 /* # entries in E820MAP */
+
+#define E820_RAM 1
+#define E820_RESERVED 2
+#define E820_ACPI 3
+#define E820_NVS 4
+#define E820_UNUSABLE 5
+
+
+/*
+ * reserved RAM used by kernel itself
+ * if CONFIG_INTEL_TXT is enabled, memory of this type will be
+ * included in the S3 integrity calculation and so should not include
+ * any memory that BIOS might alter over the S3 transition
+ */
+#define E820_RESERVED_KERN 128
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+struct e820entry {
+ __u64 addr; /* start of memory segment */
+ __u64 size; /* size of memory segment */
+ __u32 type; /* type of memory segment */
+} __attribute__((packed));
+
+struct e820map {
+ __u32 nr_map;
+ struct e820entry map[E820_X_MAX];
+};
+
+#define ISA_START_ADDRESS 0xa0000
+#define ISA_END_ADDRESS 0x100000
+
+#define BIOS_BEGIN 0x000a0000
+#define BIOS_END 0x00100000
+
+#define BIOS_ROM_BASE 0xffe00000
+#define BIOS_ROM_END 0xffffffff
+
+#endif /* __ASSEMBLY__ */
+
+
+#endif /* _UAPI_ASM_X86_E820_H */
diff --git a/arch/x86/include/asm/errno.h b/arch/x86/include/uapi/asm/errno.h
index 4c82b503d92..4c82b503d92 100644
--- a/arch/x86/include/asm/errno.h
+++ b/arch/x86/include/uapi/asm/errno.h
diff --git a/arch/x86/include/asm/fcntl.h b/arch/x86/include/uapi/asm/fcntl.h
index 46ab12db573..46ab12db573 100644
--- a/arch/x86/include/asm/fcntl.h
+++ b/arch/x86/include/uapi/asm/fcntl.h
diff --git a/arch/x86/include/uapi/asm/hw_breakpoint.h b/arch/x86/include/uapi/asm/hw_breakpoint.h
new file mode 100644
index 00000000000..79a9626b550
--- /dev/null
+++ b/arch/x86/include/uapi/asm/hw_breakpoint.h
@@ -0,0 +1 @@
+/* */
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index b80420bcd09..b80420bcd09 100644
--- a/arch/x86/include/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
diff --git a/arch/x86/include/asm/ioctl.h b/arch/x86/include/uapi/asm/ioctl.h
index b279fe06dfe..b279fe06dfe 100644
--- a/arch/x86/include/asm/ioctl.h
+++ b/arch/x86/include/uapi/asm/ioctl.h
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/uapi/asm/ioctls.h
index ec34c760665..ec34c760665 100644
--- a/arch/x86/include/asm/ioctls.h
+++ b/arch/x86/include/uapi/asm/ioctls.h
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/uapi/asm/ipcbuf.h
index 84c7e51cb6d..84c7e51cb6d 100644
--- a/arch/x86/include/asm/ipcbuf.h
+++ b/arch/x86/include/uapi/asm/ipcbuf.h
diff --git a/arch/x86/include/uapi/asm/ist.h b/arch/x86/include/uapi/asm/ist.h
new file mode 100644
index 00000000000..bad9f5ea407
--- /dev/null
+++ b/arch/x86/include/uapi/asm/ist.h
@@ -0,0 +1,29 @@
+/*
+ * Include file for the interface to IST BIOS
+ * Copyright 2002 Andy Grover <andrew.grover@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _UAPI_ASM_X86_IST_H
+#define _UAPI_ASM_X86_IST_H
+
+
+
+#include <linux/types.h>
+
+struct ist_info {
+ __u32 signature;
+ __u32 command;
+ __u32 event;
+ __u32 perf_level;
+};
+
+#endif /* _UAPI_ASM_X86_IST_H */
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a65ec29e6ff..a65ec29e6ff 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
new file mode 100644
index 00000000000..06fdbd987e9
--- /dev/null
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -0,0 +1,100 @@
+#ifndef _UAPI_ASM_X86_KVM_PARA_H
+#define _UAPI_ASM_X86_KVM_PARA_H
+
+#include <linux/types.h>
+#include <asm/hyperv.h>
+
+/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
+ * should be used to determine that a VM is running under KVM.
+ */
+#define KVM_CPUID_SIGNATURE 0x40000000
+
+/* This CPUID returns a feature bitmap in eax. Before enabling a particular
+ * paravirtualization, the appropriate feature bit should be checked.
+ */
+#define KVM_CPUID_FEATURES 0x40000001
+#define KVM_FEATURE_CLOCKSOURCE 0
+#define KVM_FEATURE_NOP_IO_DELAY 1
+#define KVM_FEATURE_MMU_OP 2
+/* This indicates that the new set of kvmclock msrs
+ * are available. The use of 0x11 and 0x12 is deprecated
+ */
+#define KVM_FEATURE_CLOCKSOURCE2 3
+#define KVM_FEATURE_ASYNC_PF 4
+#define KVM_FEATURE_STEAL_TIME 5
+#define KVM_FEATURE_PV_EOI 6
+
+/* The last 8 bits are used to indicate how to interpret the flags field
+ * in pvclock structure. If no bits are set, all flags are ignored.
+ */
+#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define KVM_MSR_ENABLED 1
+/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
+#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
+#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
+#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
+#define MSR_KVM_STEAL_TIME 0x4b564d03
+#define MSR_KVM_PV_EOI_EN 0x4b564d04
+
+struct kvm_steal_time {
+ __u64 steal;
+ __u32 version;
+ __u32 flags;
+ __u32 pad[12];
+};
+
+#define KVM_STEAL_ALIGNMENT_BITS 5
+#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
+#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
+
+#define KVM_MAX_MMU_OP_BATCH 32
+
+#define KVM_ASYNC_PF_ENABLED (1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
+
+/* Operations for KVM_HC_MMU_OP */
+#define KVM_MMU_OP_WRITE_PTE 1
+#define KVM_MMU_OP_FLUSH_TLB 2
+#define KVM_MMU_OP_RELEASE_PT 3
+
+/* Payload for KVM_HC_MMU_OP */
+struct kvm_mmu_op_header {
+ __u32 op;
+ __u32 pad;
+};
+
+struct kvm_mmu_op_write_pte {
+ struct kvm_mmu_op_header header;
+ __u64 pte_phys;
+ __u64 pte_val;
+};
+
+struct kvm_mmu_op_flush_tlb {
+ struct kvm_mmu_op_header header;
+};
+
+struct kvm_mmu_op_release_pt {
+ struct kvm_mmu_op_header header;
+ __u64 pt_phys;
+};
+
+#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
+#define KVM_PV_REASON_PAGE_READY 2
+
+struct kvm_vcpu_pv_apf_data {
+ __u32 reason;
+ __u8 pad[60];
+ __u32 enabled;
+};
+
+#define KVM_PV_EOI_BIT 0
+#define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT)
+#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
+#define KVM_PV_EOI_DISABLED 0x0
+
+
+#endif /* _UAPI_ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
index 46727eb37bf..46727eb37bf 100644
--- a/arch/x86/include/asm/ldt.h
+++ b/arch/x86/include/uapi/asm/ldt.h
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
new file mode 100644
index 00000000000..58c829871c3
--- /dev/null
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -0,0 +1,121 @@
+#ifndef _UAPI_ASM_X86_MCE_H
+#define _UAPI_ASM_X86_MCE_H
+
+#include <linux/types.h>
+#include <asm/ioctls.h>
+
+/*
+ * Machine Check support for x86
+ */
+
+/* MCG_CAP register defines */
+#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
+#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
+#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
+#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
+#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
+#define MCG_EXT_CNT_SHIFT 16
+#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
+#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+
+/* MCG_STATUS register defines */
+#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+
+/* MCi_STATUS register defines */
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
+#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
+#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
+#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+#define MCACOD 0xffff /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
+
+/* MCi_MISC register defines */
+#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
+#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
+#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
+#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
+#define MCI_MISC_ADDR_PHYS 2 /* physical address */
+#define MCI_MISC_ADDR_MEM 3 /* memory address */
+#define MCI_MISC_ADDR_GENERIC 7 /* generic */
+
+/* CTL2 register defines */
+#define MCI_CTL2_CMCI_EN (1ULL << 30)
+#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
+
+#define MCJ_CTX_MASK 3
+#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
+#define MCJ_CTX_RANDOM 0 /* inject context: random */
+#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
+#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
+#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
+#define MCJ_EXCEPTION 0x8 /* raise as exception */
+#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
+
+/* Fields are zero when not available */
+struct mce {
+ __u64 status;
+ __u64 misc;
+ __u64 addr;
+ __u64 mcgstatus;
+ __u64 ip;
+ __u64 tsc; /* cpu time stamp counter */
+ __u64 time; /* wall time_t when error was detected */
+ __u8 cpuvendor; /* cpu vendor as encoded in system.h */
+ __u8 inject_flags; /* software inject flags */
+ __u16 pad;
+ __u32 cpuid; /* CPUID 1 EAX */
+ __u8 cs; /* code segment */
+ __u8 bank; /* machine check bank */
+ __u8 cpu; /* cpu number; obsolete; use extcpu now */
+ __u8 finished; /* entry is valid */
+ __u32 extcpu; /* linux cpu number that detected the error */
+ __u32 socketid; /* CPU socket ID */
+ __u32 apicid; /* CPU initial apic ID */
+ __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
+};
+
+/*
+ * This structure contains all data related to the MCE log. Also
+ * carries a signature to make it easier to find from external
+ * debugging tools. Each entry is only valid when its finished flag
+ * is set.
+ */
+
+#define MCE_LOG_LEN 32
+
+struct mce_log {
+ char signature[12]; /* "MACHINECHECK" */
+ unsigned len; /* = MCE_LOG_LEN */
+ unsigned next;
+ unsigned flags;
+ unsigned recordlen; /* length of struct mce */
+ struct mce entry[MCE_LOG_LEN];
+};
+
+#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
+
+#define MCE_LOG_SIGNATURE "MACHINECHECK"
+
+#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
+#define MCE_GET_LOG_LEN _IOR('M', 2, int)
+#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
+
+/* Software defined banks */
+#define MCE_EXTENDED_BANK 128
+#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
+#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
+
+#endif /* _UAPI_ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/uapi/asm/mman.h
index 513b05f15bb..513b05f15bb 100644
--- a/arch/x86/include/asm/mman.h
+++ b/arch/x86/include/uapi/asm/mman.h
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c644a..809134c644a 100644
--- a/arch/x86/include/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index e400cdb2dd6..433a59fb1a7 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -35,11 +35,14 @@
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
+#define MSR_NHM_PLATFORM_INFO 0x000000ce
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
+#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
+#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
@@ -55,6 +58,8 @@
#define MSR_OFFCORE_RSP_0 0x000001a6
#define MSR_OFFCORE_RSP_1 0x000001a7
+#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad
+#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae
#define MSR_LBR_SELECT 0x000001c8
#define MSR_LBR_TOS 0x000001c9
@@ -103,6 +108,38 @@
#define MSR_IA32_MC0_ADDR 0x00000402
#define MSR_IA32_MC0_MISC 0x00000403
+/* C-state Residency Counters */
+#define MSR_PKG_C3_RESIDENCY 0x000003f8
+#define MSR_PKG_C6_RESIDENCY 0x000003f9
+#define MSR_PKG_C7_RESIDENCY 0x000003fa
+#define MSR_CORE_C3_RESIDENCY 0x000003fc
+#define MSR_CORE_C6_RESIDENCY 0x000003fd
+#define MSR_CORE_C7_RESIDENCY 0x000003fe
+#define MSR_PKG_C2_RESIDENCY 0x0000060d
+
+/* Run Time Average Power Limiting (RAPL) Interface */
+
+#define MSR_RAPL_POWER_UNIT 0x00000606
+
+#define MSR_PKG_POWER_LIMIT 0x00000610
+#define MSR_PKG_ENERGY_STATUS 0x00000611
+#define MSR_PKG_PERF_STATUS 0x00000613
+#define MSR_PKG_POWER_INFO 0x00000614
+
+#define MSR_DRAM_POWER_LIMIT 0x00000618
+#define MSR_DRAM_ENERGY_STATUS 0x00000619
+#define MSR_DRAM_PERF_STATUS 0x0000061b
+#define MSR_DRAM_POWER_INFO 0x0000061c
+
+#define MSR_PP0_POWER_LIMIT 0x00000638
+#define MSR_PP0_ENERGY_STATUS 0x00000639
+#define MSR_PP0_POLICY 0x0000063a
+#define MSR_PP0_PERF_STATUS 0x0000063b
+
+#define MSR_PP1_POWER_LIMIT 0x00000640
+#define MSR_PP1_ENERGY_STATUS 0x00000641
+#define MSR_PP1_POLICY 0x00000642
+
#define MSR_AMD64_MC0_MASK 0xc0010044
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
@@ -236,6 +273,7 @@
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_EBC_FREQUENCY_ID 0x0000002c
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
+#define MSR_IA32_TSC_ADJUST 0x0000003b
#define FEATURE_CONTROL_LOCKED (1<<0)
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
diff --git a/arch/x86/include/uapi/asm/msr.h b/arch/x86/include/uapi/asm/msr.h
new file mode 100644
index 00000000000..155e51048fa
--- /dev/null
+++ b/arch/x86/include/uapi/asm/msr.h
@@ -0,0 +1,15 @@
+#ifndef _UAPI_ASM_X86_MSR_H
+#define _UAPI_ASM_X86_MSR_H
+
+#include <asm/msr-index.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
+#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
+
+#endif /* __ASSEMBLY__ */
+#endif /* _UAPI_ASM_X86_MSR_H */
diff --git a/arch/x86/include/uapi/asm/mtrr.h b/arch/x86/include/uapi/asm/mtrr.h
new file mode 100644
index 00000000000..d0acb658c8f
--- /dev/null
+++ b/arch/x86/include/uapi/asm/mtrr.h
@@ -0,0 +1,117 @@
+/* Generic MTRR (Memory Type Range Register) ioctls.
+
+ Copyright (C) 1997-1999 Richard Gooch
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Richard Gooch may be reached by email at rgooch@atnf.csiro.au
+ The postal address is:
+ Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
+*/
+#ifndef _UAPI_ASM_X86_MTRR_H
+#define _UAPI_ASM_X86_MTRR_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/errno.h>
+
+#define MTRR_IOCTL_BASE 'M'
+
+/* Warning: this structure has a different order from i386
+ on x86-64. The 32bit emulation code takes care of that.
+ But you need to use this for 64bit, otherwise your X server
+ will break. */
+
+#ifdef __i386__
+struct mtrr_sentry {
+ unsigned long base; /* Base address */
+ unsigned int size; /* Size of region */
+ unsigned int type; /* Type of region */
+};
+
+struct mtrr_gentry {
+ unsigned int regnum; /* Register number */
+ unsigned long base; /* Base address */
+ unsigned int size; /* Size of region */
+ unsigned int type; /* Type of region */
+};
+
+#else /* __i386__ */
+
+struct mtrr_sentry {
+ __u64 base; /* Base address */
+ __u32 size; /* Size of region */
+ __u32 type; /* Type of region */
+};
+
+struct mtrr_gentry {
+ __u64 base; /* Base address */
+ __u32 size; /* Size of region */
+ __u32 regnum; /* Register number */
+ __u32 type; /* Type of region */
+ __u32 _pad; /* Unused */
+};
+
+#endif /* !__i386__ */
+
+struct mtrr_var_range {
+ __u32 base_lo;
+ __u32 base_hi;
+ __u32 mask_lo;
+ __u32 mask_hi;
+};
+
+/* In the Intel processor's MTRR interface, the MTRR type is always held in
+ an 8 bit field: */
+typedef __u8 mtrr_type;
+
+#define MTRR_NUM_FIXED_RANGES 88
+#define MTRR_MAX_VAR_RANGES 256
+
+struct mtrr_state_type {
+ struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
+ mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
+ unsigned char enabled;
+ unsigned char have_fixed;
+ mtrr_type def_type;
+};
+
+#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
+#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
+
+/* These are the various ioctls */
+#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
+#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
+#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
+#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
+#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
+#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
+#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
+#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
+#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
+#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
+
+/* These are the region types */
+#define MTRR_TYPE_UNCACHABLE 0
+#define MTRR_TYPE_WRCOMB 1
+/*#define MTRR_TYPE_ 2*/
+/*#define MTRR_TYPE_ 3*/
+#define MTRR_TYPE_WRTHROUGH 4
+#define MTRR_TYPE_WRPROT 5
+#define MTRR_TYPE_WRBACK 6
+#define MTRR_NUM_TYPES 7
+
+
+#endif /* _UAPI_ASM_X86_MTRR_H */
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/uapi/asm/param.h
index 965d4542797..965d4542797 100644
--- a/arch/x86/include/asm/param.h
+++ b/arch/x86/include/uapi/asm/param.h
diff --git a/arch/x86/include/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h
index 3f2207bfd17..3f2207bfd17 100644
--- a/arch/x86/include/asm/perf_regs.h
+++ b/arch/x86/include/uapi/asm/perf_regs.h
diff --git a/arch/x86/include/asm/poll.h b/arch/x86/include/uapi/asm/poll.h
index c98509d3149..c98509d3149 100644
--- a/arch/x86/include/asm/poll.h
+++ b/arch/x86/include/uapi/asm/poll.h
diff --git a/arch/x86/include/uapi/asm/posix_types.h b/arch/x86/include/uapi/asm/posix_types.h
new file mode 100644
index 00000000000..85506b38362
--- /dev/null
+++ b/arch/x86/include/uapi/asm/posix_types.h
@@ -0,0 +1,9 @@
+#ifndef __KERNEL__
+# ifdef __i386__
+# include <asm/posix_types_32.h>
+# elif defined(__ILP32__)
+# include <asm/posix_types_x32.h>
+# else
+# include <asm/posix_types_64.h>
+# endif
+#endif
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/uapi/asm/posix_types_32.h
index 8e525059e7d..8e525059e7d 100644
--- a/arch/x86/include/asm/posix_types_32.h
+++ b/arch/x86/include/uapi/asm/posix_types_32.h
diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/uapi/asm/posix_types_64.h
index cba0c1ead16..cba0c1ead16 100644
--- a/arch/x86/include/asm/posix_types_64.h
+++ b/arch/x86/include/uapi/asm/posix_types_64.h
diff --git a/arch/x86/include/asm/posix_types_x32.h b/arch/x86/include/uapi/asm/posix_types_x32.h
index 85f9bdafa93..85f9bdafa93 100644
--- a/arch/x86/include/asm/posix_types_x32.h
+++ b/arch/x86/include/uapi/asm/posix_types_x32.h
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index 3ac5032fae0..3ac5032fae0 100644
--- a/arch/x86/include/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
new file mode 100644
index 00000000000..54991a74604
--- /dev/null
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -0,0 +1,99 @@
+#ifndef _UAPI_ASM_X86_PROCESSOR_FLAGS_H
+#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H
+/* Various flags defined: can be included from assembler. */
+
+/*
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+/*
+ * Basic CPU control in CR0
+ */
+#define X86_CR0_PE 0x00000001 /* Protection Enable */
+#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
+#define X86_CR0_EM 0x00000004 /* Emulation */
+#define X86_CR0_TS 0x00000008 /* Task Switched */
+#define X86_CR0_ET 0x00000010 /* Extension Type */
+#define X86_CR0_NE 0x00000020 /* Numeric Error */
+#define X86_CR0_WP 0x00010000 /* Write Protect */
+#define X86_CR0_AM 0x00040000 /* Alignment Mask */
+#define X86_CR0_NW 0x20000000 /* Not Write-through */
+#define X86_CR0_CD 0x40000000 /* Cache Disable */
+#define X86_CR0_PG 0x80000000 /* Paging */
+
+/*
+ * Paging options in CR3
+ */
+#define X86_CR3_PWT 0x00000008 /* Page Write Through */
+#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
+#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */
+
+/*
+ * Intel CPU features in CR4
+ */
+#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
+#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
+#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
+#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
+#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
+#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
+#define X86_CR4_MCE 0x00000040 /* Machine check enable */
+#define X86_CR4_PGE 0x00000080 /* enable global pages */
+#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
+#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
+#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
+#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
+#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
+#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
+#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
+#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
+#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
+
+/*
+ * x86-64 Task Priority Register, CR8
+ */
+#define X86_CR8_TPR 0x0000000F /* task priority register */
+
+/*
+ * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
+ */
+
+/*
+ * NSC/Cyrix CPU configuration register indexes
+ */
+#define CX86_PCR0 0x20
+#define CX86_GCR 0xb8
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
+#define CX86_CCR7 0xeb
+#define CX86_PCR1 0xf0
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
+
+
+#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
index 7b0a55a8885..7b0a55a8885 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
diff --git a/arch/x86/include/uapi/asm/ptrace.h b/arch/x86/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..ac4b9aa4d99
--- /dev/null
+++ b/arch/x86/include/uapi/asm/ptrace.h
@@ -0,0 +1,78 @@
+#ifndef _UAPI_ASM_X86_PTRACE_H
+#define _UAPI_ASM_X86_PTRACE_H
+
+#include <linux/compiler.h> /* For __user */
+#include <asm/ptrace-abi.h>
+#include <asm/processor-flags.h>
+
+
+#ifndef __ASSEMBLY__
+
+#ifdef __i386__
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+#ifndef __KERNEL__
+
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ int xds;
+ int xes;
+ int xfs;
+ int xgs;
+ long orig_eax;
+ long eip;
+ int xcs;
+ long eflags;
+ long esp;
+ int xss;
+};
+
+#endif /* __KERNEL__ */
+
+#else /* __i386__ */
+
+#ifndef __KERNEL__
+
+struct pt_regs {
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long rbp;
+ unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save up to here*/
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+ unsigned long orig_rax;
+/* end of arguments */
+/* cpu exception frame or undefined */
+ unsigned long rip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long rsp;
+ unsigned long ss;
+/* top of stack page */
+};
+
+#endif /* __KERNEL__ */
+#endif /* !__i386__ */
+
+
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/asm/resource.h b/arch/x86/include/uapi/asm/resource.h
index 04bc4db8921..04bc4db8921 100644
--- a/arch/x86/include/asm/resource.h
+++ b/arch/x86/include/uapi/asm/resource.h
diff --git a/arch/x86/include/asm/sembuf.h b/arch/x86/include/uapi/asm/sembuf.h
index ee50c801f7b..ee50c801f7b 100644
--- a/arch/x86/include/asm/sembuf.h
+++ b/arch/x86/include/uapi/asm/sembuf.h
diff --git a/arch/x86/include/uapi/asm/setup.h b/arch/x86/include/uapi/asm/setup.h
new file mode 100644
index 00000000000..79a9626b550
--- /dev/null
+++ b/arch/x86/include/uapi/asm/setup.h
@@ -0,0 +1 @@
+/* */
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc2de3..83c05fc2de3 100644
--- a/arch/x86/include/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
new file mode 100644
index 00000000000..d8b9f9081e8
--- /dev/null
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -0,0 +1,221 @@
+#ifndef _UAPI_ASM_X86_SIGCONTEXT_H
+#define _UAPI_ASM_X86_SIGCONTEXT_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define FP_XSTATE_MAGIC1 0x46505853U
+#define FP_XSTATE_MAGIC2 0x46505845U
+#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
+
+/*
+ * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
+ * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
+ * are used to extended the fpstate pointer in the sigcontext, which now
+ * includes the extended state information along with fpstate information.
+ *
+ * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
+ * area and FP_XSTATE_MAGIC2 at the end of memory layout
+ * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
+ * extended state information in the memory layout pointed by the fpstate
+ * pointer in sigcontext.
+ */
+struct _fpx_sw_bytes {
+ __u32 magic1; /* FP_XSTATE_MAGIC1 */
+ __u32 extended_size; /* total size of the layout referred by
+ * fpstate pointer in the sigcontext.
+ */
+ __u64 xstate_bv;
+ /* feature bit mask (including fp/sse/extended
+ * state) that is present in the memory
+ * layout.
+ */
+ __u32 xstate_size; /* actual xsave state size, based on the
+ * features saved in the layout.
+ * 'extended_size' will be greater than
+ * 'xstate_size'.
+ */
+ __u32 padding[7]; /* for future use. */
+};
+
+#ifdef __i386__
+/*
+ * As documented in the iBCS2 standard..
+ *
+ * The first part of "struct _fpstate" is just the normal i387
+ * hardware setup, the extra "status" word is used to save the
+ * coprocessor status word before entering the handler.
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * The FPU state data structure has had to grow to accommodate the
+ * extended FPU state required by the Streaming SIMD Extensions.
+ * There is no documented standard to accomplish this at the moment.
+ */
+struct _fpreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+};
+
+struct _fpxreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+ unsigned short padding[3];
+};
+
+struct _xmmreg {
+ unsigned long element[4];
+};
+
+struct _fpstate {
+ /* Regular FPU environment */
+ unsigned long cw;
+ unsigned long sw;
+ unsigned long tag;
+ unsigned long ipoff;
+ unsigned long cssel;
+ unsigned long dataoff;
+ unsigned long datasel;
+ struct _fpreg _st[8];
+ unsigned short status;
+ unsigned short magic; /* 0xffff = regular FPU data only */
+
+ /* FXSR FPU environment */
+ unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
+ unsigned long mxcsr;
+ unsigned long reserved;
+ struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
+ struct _xmmreg _xmm[8];
+ unsigned long padding1[44];
+
+ union {
+ unsigned long padding2[12];
+ struct _fpx_sw_bytes sw_reserved; /* represents the extended
+ * state info */
+ };
+};
+
+#define X86_FXSR_MAGIC 0x0000
+
+#ifndef __KERNEL__
+/*
+ * User-space might still rely on the old definition:
+ */
+struct sigcontext {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long esp;
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned short cs, __csh;
+ unsigned long eflags;
+ unsigned long esp_at_signal;
+ unsigned short ss, __ssh;
+ struct _fpstate __user *fpstate;
+ unsigned long oldmask;
+ unsigned long cr2;
+};
+#endif /* !__KERNEL__ */
+
+#else /* __i386__ */
+
+/* FXSAVE frame */
+/* Note: reserved1/2 may someday contain valuable data. Always save/restore
+ them when you change signal frames. */
+struct _fpstate {
+ __u16 cwd;
+ __u16 swd;
+ __u16 twd; /* Note this is not the same as the
+ 32bit/x87/FSAVE twd */
+ __u16 fop;
+ __u64 rip;
+ __u64 rdp;
+ __u32 mxcsr;
+ __u32 mxcsr_mask;
+ __u32 st_space[32]; /* 8*16 bytes for each FP-reg */
+ __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
+ __u32 reserved2[12];
+ union {
+ __u32 reserved3[12];
+ struct _fpx_sw_bytes sw_reserved; /* represents the extended
+ * state information */
+ };
+};
+
+#ifndef __KERNEL__
+/*
+ * User-space might still rely on the old definition:
+ */
+struct sigcontext {
+ __u64 r8;
+ __u64 r9;
+ __u64 r10;
+ __u64 r11;
+ __u64 r12;
+ __u64 r13;
+ __u64 r14;
+ __u64 r15;
+ __u64 rdi;
+ __u64 rsi;
+ __u64 rbp;
+ __u64 rbx;
+ __u64 rdx;
+ __u64 rax;
+ __u64 rcx;
+ __u64 rsp;
+ __u64 rip;
+ __u64 eflags; /* RFLAGS */
+ __u16 cs;
+ __u16 gs;
+ __u16 fs;
+ __u16 __pad0;
+ __u64 err;
+ __u64 trapno;
+ __u64 oldmask;
+ __u64 cr2;
+ struct _fpstate __user *fpstate; /* zero when no FPU context */
+#ifdef __ILP32__
+ __u32 __fpstate_pad;
+#endif
+ __u64 reserved1[8];
+};
+#endif /* !__KERNEL__ */
+
+#endif /* !__i386__ */
+
+struct _xsave_hdr {
+ __u64 xstate_bv;
+ __u64 reserved1[2];
+ __u64 reserved2[5];
+};
+
+struct _ymmh_state {
+ /* 16 * 16 bytes for each YMMH-reg */
+ __u32 ymmh_space[64];
+};
+
+/*
+ * Extended state pointed by the fpstate pointer in the sigcontext.
+ * In addition to the fpstate, information encoded in the xstate_hdr
+ * indicates the presence of other extended state information
+ * supported by the processor and OS.
+ */
+struct _xstate {
+ struct _fpstate fpstate;
+ struct _xsave_hdr xstate_hdr;
+ struct _ymmh_state ymmh;
+ /* new processor state extensions go here */
+};
+
+#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */
diff --git a/arch/x86/include/asm/sigcontext32.h b/arch/x86/include/uapi/asm/sigcontext32.h
index ad1478c4ae1..ad1478c4ae1 100644
--- a/arch/x86/include/asm/sigcontext32.h
+++ b/arch/x86/include/uapi/asm/sigcontext32.h
diff --git a/arch/x86/include/asm/siginfo.h b/arch/x86/include/uapi/asm/siginfo.h
index 34c47b3341c..34c47b3341c 100644
--- a/arch/x86/include/asm/siginfo.h
+++ b/arch/x86/include/uapi/asm/siginfo.h
diff --git a/arch/x86/include/uapi/asm/signal.h b/arch/x86/include/uapi/asm/signal.h
new file mode 100644
index 00000000000..aa7d6ae39e0
--- /dev/null
+++ b/arch/x86/include/uapi/asm/signal.h
@@ -0,0 +1,139 @@
+#ifndef _UAPI_ASM_X86_SIGNAL_H
+#define _UAPI_ASM_X86_SIGNAL_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/compiler.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001u
+#define SA_NOCLDWAIT 0x00000002u
+#define SA_SIGINFO 0x00000004u
+#define SA_ONSTACK 0x08000000u
+#define SA_RESTART 0x10000000u
+#define SA_NODEFER 0x40000000u
+#define SA_RESETHAND 0x80000000u
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#include <asm-generic/signal-defs.h>
+
+#ifndef __ASSEMBLY__
+
+
+#ifdef __i386__
+# ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+# endif /* ! __KERNEL__ */
+#else /* __i386__ */
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+#endif /* !__i386__ */
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_X86_SIGNAL_H */
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/uapi/asm/socket.h
index 6b71384b9d8..6b71384b9d8 100644
--- a/arch/x86/include/asm/socket.h
+++ b/arch/x86/include/uapi/asm/socket.h
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/uapi/asm/sockios.h
index def6d4746ee..def6d4746ee 100644
--- a/arch/x86/include/asm/sockios.h
+++ b/arch/x86/include/uapi/asm/sockios.h
diff --git a/arch/x86/include/asm/stat.h b/arch/x86/include/uapi/asm/stat.h
index 7b3ddc34858..7b3ddc34858 100644
--- a/arch/x86/include/asm/stat.h
+++ b/arch/x86/include/uapi/asm/stat.h
diff --git a/arch/x86/include/asm/statfs.h b/arch/x86/include/uapi/asm/statfs.h
index 2d0adbf99a8..2d0adbf99a8 100644
--- a/arch/x86/include/asm/statfs.h
+++ b/arch/x86/include/uapi/asm/statfs.h
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
new file mode 100644
index 00000000000..b5d7640abc5
--- /dev/null
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -0,0 +1,132 @@
+#ifndef _UAPI__SVM_H
+#define _UAPI__SVM_H
+
+#define SVM_EXIT_READ_CR0 0x000
+#define SVM_EXIT_READ_CR3 0x003
+#define SVM_EXIT_READ_CR4 0x004
+#define SVM_EXIT_READ_CR8 0x008
+#define SVM_EXIT_WRITE_CR0 0x010
+#define SVM_EXIT_WRITE_CR3 0x013
+#define SVM_EXIT_WRITE_CR4 0x014
+#define SVM_EXIT_WRITE_CR8 0x018
+#define SVM_EXIT_READ_DR0 0x020
+#define SVM_EXIT_READ_DR1 0x021
+#define SVM_EXIT_READ_DR2 0x022
+#define SVM_EXIT_READ_DR3 0x023
+#define SVM_EXIT_READ_DR4 0x024
+#define SVM_EXIT_READ_DR5 0x025
+#define SVM_EXIT_READ_DR6 0x026
+#define SVM_EXIT_READ_DR7 0x027
+#define SVM_EXIT_WRITE_DR0 0x030
+#define SVM_EXIT_WRITE_DR1 0x031
+#define SVM_EXIT_WRITE_DR2 0x032
+#define SVM_EXIT_WRITE_DR3 0x033
+#define SVM_EXIT_WRITE_DR4 0x034
+#define SVM_EXIT_WRITE_DR5 0x035
+#define SVM_EXIT_WRITE_DR6 0x036
+#define SVM_EXIT_WRITE_DR7 0x037
+#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_INTR 0x060
+#define SVM_EXIT_NMI 0x061
+#define SVM_EXIT_SMI 0x062
+#define SVM_EXIT_INIT 0x063
+#define SVM_EXIT_VINTR 0x064
+#define SVM_EXIT_CR0_SEL_WRITE 0x065
+#define SVM_EXIT_IDTR_READ 0x066
+#define SVM_EXIT_GDTR_READ 0x067
+#define SVM_EXIT_LDTR_READ 0x068
+#define SVM_EXIT_TR_READ 0x069
+#define SVM_EXIT_IDTR_WRITE 0x06a
+#define SVM_EXIT_GDTR_WRITE 0x06b
+#define SVM_EXIT_LDTR_WRITE 0x06c
+#define SVM_EXIT_TR_WRITE 0x06d
+#define SVM_EXIT_RDTSC 0x06e
+#define SVM_EXIT_RDPMC 0x06f
+#define SVM_EXIT_PUSHF 0x070
+#define SVM_EXIT_POPF 0x071
+#define SVM_EXIT_CPUID 0x072
+#define SVM_EXIT_RSM 0x073
+#define SVM_EXIT_IRET 0x074
+#define SVM_EXIT_SWINT 0x075
+#define SVM_EXIT_INVD 0x076
+#define SVM_EXIT_PAUSE 0x077
+#define SVM_EXIT_HLT 0x078
+#define SVM_EXIT_INVLPG 0x079
+#define SVM_EXIT_INVLPGA 0x07a
+#define SVM_EXIT_IOIO 0x07b
+#define SVM_EXIT_MSR 0x07c
+#define SVM_EXIT_TASK_SWITCH 0x07d
+#define SVM_EXIT_FERR_FREEZE 0x07e
+#define SVM_EXIT_SHUTDOWN 0x07f
+#define SVM_EXIT_VMRUN 0x080
+#define SVM_EXIT_VMMCALL 0x081
+#define SVM_EXIT_VMLOAD 0x082
+#define SVM_EXIT_VMSAVE 0x083
+#define SVM_EXIT_STGI 0x084
+#define SVM_EXIT_CLGI 0x085
+#define SVM_EXIT_SKINIT 0x086
+#define SVM_EXIT_RDTSCP 0x087
+#define SVM_EXIT_ICEBP 0x088
+#define SVM_EXIT_WBINVD 0x089
+#define SVM_EXIT_MONITOR 0x08a
+#define SVM_EXIT_MWAIT 0x08b
+#define SVM_EXIT_MWAIT_COND 0x08c
+#define SVM_EXIT_XSETBV 0x08d
+#define SVM_EXIT_NPF 0x400
+
+#define SVM_EXIT_ERR -1
+
+#define SVM_EXIT_REASONS \
+ { SVM_EXIT_READ_CR0, "read_cr0" }, \
+ { SVM_EXIT_READ_CR3, "read_cr3" }, \
+ { SVM_EXIT_READ_CR4, "read_cr4" }, \
+ { SVM_EXIT_READ_CR8, "read_cr8" }, \
+ { SVM_EXIT_WRITE_CR0, "write_cr0" }, \
+ { SVM_EXIT_WRITE_CR3, "write_cr3" }, \
+ { SVM_EXIT_WRITE_CR4, "write_cr4" }, \
+ { SVM_EXIT_WRITE_CR8, "write_cr8" }, \
+ { SVM_EXIT_READ_DR0, "read_dr0" }, \
+ { SVM_EXIT_READ_DR1, "read_dr1" }, \
+ { SVM_EXIT_READ_DR2, "read_dr2" }, \
+ { SVM_EXIT_READ_DR3, "read_dr3" }, \
+ { SVM_EXIT_WRITE_DR0, "write_dr0" }, \
+ { SVM_EXIT_WRITE_DR1, "write_dr1" }, \
+ { SVM_EXIT_WRITE_DR2, "write_dr2" }, \
+ { SVM_EXIT_WRITE_DR3, "write_dr3" }, \
+ { SVM_EXIT_WRITE_DR5, "write_dr5" }, \
+ { SVM_EXIT_WRITE_DR7, "write_dr7" }, \
+ { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
+ { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
+ { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
+ { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
+ { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
+ { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
+ { SVM_EXIT_INTR, "interrupt" }, \
+ { SVM_EXIT_NMI, "nmi" }, \
+ { SVM_EXIT_SMI, "smi" }, \
+ { SVM_EXIT_INIT, "init" }, \
+ { SVM_EXIT_VINTR, "vintr" }, \
+ { SVM_EXIT_CPUID, "cpuid" }, \
+ { SVM_EXIT_INVD, "invd" }, \
+ { SVM_EXIT_HLT, "hlt" }, \
+ { SVM_EXIT_INVLPG, "invlpg" }, \
+ { SVM_EXIT_INVLPGA, "invlpga" }, \
+ { SVM_EXIT_IOIO, "io" }, \
+ { SVM_EXIT_MSR, "msr" }, \
+ { SVM_EXIT_TASK_SWITCH, "task_switch" }, \
+ { SVM_EXIT_SHUTDOWN, "shutdown" }, \
+ { SVM_EXIT_VMRUN, "vmrun" }, \
+ { SVM_EXIT_VMMCALL, "hypercall" }, \
+ { SVM_EXIT_VMLOAD, "vmload" }, \
+ { SVM_EXIT_VMSAVE, "vmsave" }, \
+ { SVM_EXIT_STGI, "stgi" }, \
+ { SVM_EXIT_CLGI, "clgi" }, \
+ { SVM_EXIT_SKINIT, "skinit" }, \
+ { SVM_EXIT_WBINVD, "wbinvd" }, \
+ { SVM_EXIT_MONITOR, "monitor" }, \
+ { SVM_EXIT_MWAIT, "mwait" }, \
+ { SVM_EXIT_XSETBV, "xsetbv" }, \
+ { SVM_EXIT_NPF, "npf" }
+
+
+#endif /* _UAPI__SVM_H */
diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/uapi/asm/swab.h
index 7f235c7105c..7f235c7105c 100644
--- a/arch/x86/include/asm/swab.h
+++ b/arch/x86/include/uapi/asm/swab.h
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/uapi/asm/termbits.h
index 3935b106de7..3935b106de7 100644
--- a/arch/x86/include/asm/termbits.h
+++ b/arch/x86/include/uapi/asm/termbits.h
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/uapi/asm/termios.h
index 280d78a9d96..280d78a9d96 100644
--- a/arch/x86/include/asm/termios.h
+++ b/arch/x86/include/uapi/asm/termios.h
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/uapi/asm/types.h
index 8e8c23fef08..8e8c23fef08 100644
--- a/arch/x86/include/asm/types.h
+++ b/arch/x86/include/uapi/asm/types.h
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/uapi/asm/ucontext.h
index b7c29c8017f..b7c29c8017f 100644
--- a/arch/x86/include/asm/ucontext.h
+++ b/arch/x86/include/uapi/asm/ucontext.h
diff --git a/arch/x86/include/uapi/asm/unistd.h b/arch/x86/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..a26df0d75cd
--- /dev/null
+++ b/arch/x86/include/uapi/asm/unistd.h
@@ -0,0 +1,17 @@
+#ifndef _UAPI_ASM_X86_UNISTD_H
+#define _UAPI_ASM_X86_UNISTD_H
+
+/* x32 syscall flag bit */
+#define __X32_SYSCALL_BIT 0x40000000
+
+#ifndef __KERNEL__
+# ifdef __i386__
+# include <asm/unistd_32.h>
+# elif defined(__ILP32__)
+# include <asm/unistd_x32.h>
+# else
+# include <asm/unistd_64.h>
+# endif
+#endif
+
+#endif /* _UAPI_ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/uapi/asm/vm86.h b/arch/x86/include/uapi/asm/vm86.h
new file mode 100644
index 00000000000..e0b243e9d85
--- /dev/null
+++ b/arch/x86/include/uapi/asm/vm86.h
@@ -0,0 +1,129 @@
+#ifndef _UAPI_ASM_X86_VM86_H
+#define _UAPI_ASM_X86_VM86_H
+
+/*
+ * I'm guessing at the VIF/VIP flag usage, but hope that this is how
+ * the Pentium uses them. Linux will return from vm86 mode when both
+ * VIF and VIP is set.
+ *
+ * On a Pentium, we could probably optimize the virtual flags directly
+ * in the eflags register instead of doing it "by hand" in vflags...
+ *
+ * Linus
+ */
+
+#include <asm/processor-flags.h>
+
+#define BIOSSEG 0x0f000
+
+#define CPU_086 0
+#define CPU_186 1
+#define CPU_286 2
+#define CPU_386 3
+#define CPU_486 4
+#define CPU_586 5
+
+/*
+ * Return values for the 'vm86()' system call
+ */
+#define VM86_TYPE(retval) ((retval) & 0xff)
+#define VM86_ARG(retval) ((retval) >> 8)
+
+#define VM86_SIGNAL 0 /* return due to signal */
+#define VM86_UNKNOWN 1 /* unhandled GP fault
+ - IO-instruction or similar */
+#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
+#define VM86_STI 3 /* sti/popf/iret instruction enabled
+ virtual interrupts */
+
+/*
+ * Additional return values when invoking new vm86()
+ */
+#define VM86_PICRETURN 4 /* return due to pending PIC request */
+#define VM86_TRAP 6 /* return due to DOS-debugger request */
+
+/*
+ * function codes when invoking new vm86()
+ */
+#define VM86_PLUS_INSTALL_CHECK 0
+#define VM86_ENTER 1
+#define VM86_ENTER_NO_BYPASS 2
+#define VM86_REQUEST_IRQ 3
+#define VM86_FREE_IRQ 4
+#define VM86_GET_IRQ_BITS 5
+#define VM86_GET_AND_RESET_IRQ 6
+
+/*
+ * This is the stack-layout seen by the user space program when we have
+ * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
+ * is 'kernel_vm86_regs' (see below).
+ */
+
+struct vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ long __null_ds;
+ long __null_es;
+ long __null_fs;
+ long __null_gs;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csh;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct revectored_struct {
+ unsigned long __map[8]; /* 256 bits */
+};
+
+struct vm86_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+};
+
+/*
+ * flags masks
+ */
+#define VM86_SCREEN_BITMAP 0x0001
+
+struct vm86plus_info_struct {
+ unsigned long force_return_for_pic:1;
+ unsigned long vm86dbg_active:1; /* for debugger */
+ unsigned long vm86dbg_TFpendig:1; /* for debugger */
+ unsigned long unused:28;
+ unsigned long is_vm86pus:1; /* for vm86 internal use */
+ unsigned char vm86dbg_intxxtab[32]; /* for debugger */
+};
+struct vm86plus_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+};
+
+
+#endif /* _UAPI_ASM_X86_VM86_H */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
new file mode 100644
index 00000000000..979d03bce13
--- /dev/null
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -0,0 +1,109 @@
+/*
+ * vmx.h: VMX Architecture related definitions
+ * Copyright (c) 2004, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * A few random additions are:
+ * Copyright (C) 2006 Qumranet
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ */
+#ifndef _UAPIVMX_H
+#define _UAPIVMX_H
+
+
+#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
+
+#define EXIT_REASON_EXCEPTION_NMI 0
+#define EXIT_REASON_EXTERNAL_INTERRUPT 1
+#define EXIT_REASON_TRIPLE_FAULT 2
+
+#define EXIT_REASON_PENDING_INTERRUPT 7
+#define EXIT_REASON_NMI_WINDOW 8
+#define EXIT_REASON_TASK_SWITCH 9
+#define EXIT_REASON_CPUID 10
+#define EXIT_REASON_HLT 12
+#define EXIT_REASON_INVD 13
+#define EXIT_REASON_INVLPG 14
+#define EXIT_REASON_RDPMC 15
+#define EXIT_REASON_RDTSC 16
+#define EXIT_REASON_VMCALL 18
+#define EXIT_REASON_VMCLEAR 19
+#define EXIT_REASON_VMLAUNCH 20
+#define EXIT_REASON_VMPTRLD 21
+#define EXIT_REASON_VMPTRST 22
+#define EXIT_REASON_VMREAD 23
+#define EXIT_REASON_VMRESUME 24
+#define EXIT_REASON_VMWRITE 25
+#define EXIT_REASON_VMOFF 26
+#define EXIT_REASON_VMON 27
+#define EXIT_REASON_CR_ACCESS 28
+#define EXIT_REASON_DR_ACCESS 29
+#define EXIT_REASON_IO_INSTRUCTION 30
+#define EXIT_REASON_MSR_READ 31
+#define EXIT_REASON_MSR_WRITE 32
+#define EXIT_REASON_INVALID_STATE 33
+#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MONITOR_INSTRUCTION 39
+#define EXIT_REASON_PAUSE_INSTRUCTION 40
+#define EXIT_REASON_MCE_DURING_VMENTRY 41
+#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
+#define EXIT_REASON_APIC_ACCESS 44
+#define EXIT_REASON_EPT_VIOLATION 48
+#define EXIT_REASON_EPT_MISCONFIG 49
+#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
+#define EXIT_REASON_INVPCID 58
+
+#define VMX_EXIT_REASONS \
+ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
+ { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
+ { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
+ { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
+ { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
+ { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
+ { EXIT_REASON_CPUID, "CPUID" }, \
+ { EXIT_REASON_HLT, "HLT" }, \
+ { EXIT_REASON_INVLPG, "INVLPG" }, \
+ { EXIT_REASON_RDPMC, "RDPMC" }, \
+ { EXIT_REASON_RDTSC, "RDTSC" }, \
+ { EXIT_REASON_VMCALL, "VMCALL" }, \
+ { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
+ { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
+ { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
+ { EXIT_REASON_VMPTRST, "VMPTRST" }, \
+ { EXIT_REASON_VMREAD, "VMREAD" }, \
+ { EXIT_REASON_VMRESUME, "VMRESUME" }, \
+ { EXIT_REASON_VMWRITE, "VMWRITE" }, \
+ { EXIT_REASON_VMOFF, "VMOFF" }, \
+ { EXIT_REASON_VMON, "VMON" }, \
+ { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
+ { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
+ { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
+ { EXIT_REASON_MSR_READ, "MSR_READ" }, \
+ { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
+ { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
+ { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
+ { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
+ { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
+ { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
+ { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
+ { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
+ { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
+ { EXIT_REASON_WBINVD, "WBINVD" }
+
+
+#endif /* _UAPIVMX_H */
diff --git a/arch/x86/include/uapi/asm/vsyscall.h b/arch/x86/include/uapi/asm/vsyscall.h
new file mode 100644
index 00000000000..85dc1b3825a
--- /dev/null
+++ b/arch/x86/include/uapi/asm/vsyscall.h
@@ -0,0 +1,17 @@
+#ifndef _UAPI_ASM_X86_VSYSCALL_H
+#define _UAPI_ASM_X86_VSYSCALL_H
+
+enum vsyscall_num {
+ __NR_vgettimeofday,
+ __NR_vtime,
+ __NR_vgetcpu,
+};
+
+#define VSYSCALL_START (-10UL << 20)
+#define VSYSCALL_SIZE 1024
+#define VSYSCALL_END (-2UL << 20)
+#define VSYSCALL_MAPPED_PAGES 1
+#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
+
+
+#endif /* _UAPI_ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e48cafcf92a..bacf4b0d91f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1706,3 +1706,9 @@ int __acpi_release_global_lock(unsigned int *lock)
} while (unlikely (val != old));
return old & 0x1;
}
+
+void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+ e820_add_region(addr, size, E820_ACPI);
+ update_e820();
+}
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index a65829ac2b9..9c2aa89a11c 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -22,6 +22,7 @@
#include <linux/hardirq.h>
#include <linux/delay.h>
+#include <asm/numachip/numachip.h>
#include <asm/numachip/numachip_csr.h>
#include <asm/smp.h>
#include <asm/apic.h>
@@ -179,6 +180,7 @@ static int __init numachip_system_init(void)
return 0;
x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
+ x86_init.pci.arch_init = pci_numachip_init;
map_csrs();
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 6a05c1d327a..5b7d4fa5d3b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -24,8 +24,6 @@ struct mce_bank {
int mce_severity(struct mce *a, int tolerant, char **msg);
struct dentry *mce_get_debugfs_dir(void);
-extern int mce_ser;
-
extern struct mce_bank *mce_banks;
#ifdef CONFIG_X86_MCE_INTEL
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 13017626f9a..beb1f1689e5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -193,9 +193,9 @@ int mce_severity(struct mce *m, int tolerant, char **msg)
continue;
if ((m->mcgstatus & s->mcgmask) != s->mcgres)
continue;
- if (s->ser == SER_REQUIRED && !mce_ser)
+ if (s->ser == SER_REQUIRED && !mca_cfg.ser)
continue;
- if (s->ser == NO_SER && mce_ser)
+ if (s->ser == NO_SER && mca_cfg.ser)
continue;
if (s->context && ctx != s->context)
continue;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 46cbf868969..80dbda84f1c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -58,34 +58,26 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
-int mce_disabled __read_mostly;
-
#define SPINUNIT 100 /* 100ns */
atomic_t mce_entry;
DEFINE_PER_CPU(unsigned, mce_exception_count);
-/*
- * Tolerant levels:
- * 0: always panic on uncorrected errors, log corrected errors
- * 1: panic or SIGBUS on uncorrected errors, log corrected errors
- * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
- * 3: never panic or SIGBUS, log all errors (for testing only)
- */
-static int tolerant __read_mostly = 1;
-static int banks __read_mostly;
-static int rip_msr __read_mostly;
-static int mce_bootlog __read_mostly = -1;
-static int monarch_timeout __read_mostly = -1;
-static int mce_panic_timeout __read_mostly;
-static int mce_dont_log_ce __read_mostly;
-int mce_cmci_disabled __read_mostly;
-int mce_ignore_ce __read_mostly;
-int mce_ser __read_mostly;
-int mce_bios_cmci_threshold __read_mostly;
-
-struct mce_bank *mce_banks __read_mostly;
+struct mce_bank *mce_banks __read_mostly;
+
+struct mca_config mca_cfg __read_mostly = {
+ .bootlog = -1,
+ /*
+ * Tolerant levels:
+ * 0: always panic on uncorrected errors, log corrected errors
+ * 1: panic or SIGBUS on uncorrected errors, log corrected errors
+ * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
+ * 3: never panic or SIGBUS, log all errors (for testing only)
+ */
+ .tolerant = 1,
+ .monarch_timeout = -1
+};
/* User mode helper program triggered by machine check event */
static unsigned long mce_need_notify;
@@ -302,7 +294,7 @@ static void wait_for_panic(void)
while (timeout-- > 0)
udelay(1);
if (panic_timeout == 0)
- panic_timeout = mce_panic_timeout;
+ panic_timeout = mca_cfg.panic_timeout;
panic("Panicing machine check CPU died");
}
@@ -360,7 +352,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
pr_emerg(HW_ERR "Machine check: %s\n", exp);
if (!fake_panic) {
if (panic_timeout == 0)
- panic_timeout = mce_panic_timeout;
+ panic_timeout = mca_cfg.panic_timeout;
panic(msg);
} else
pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
@@ -372,7 +364,7 @@ static int msr_to_offset(u32 msr)
{
unsigned bank = __this_cpu_read(injectm.bank);
- if (msr == rip_msr)
+ if (msr == mca_cfg.rip_msr)
return offsetof(struct mce, ip);
if (msr == MSR_IA32_MCx_STATUS(bank))
return offsetof(struct mce, status);
@@ -451,8 +443,8 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
m->cs |= 3;
}
/* Use accurate RIP reporting if available. */
- if (rip_msr)
- m->ip = mce_rdmsrl(rip_msr);
+ if (mca_cfg.rip_msr)
+ m->ip = mce_rdmsrl(mca_cfg.rip_msr);
}
}
@@ -513,7 +505,7 @@ static int mce_ring_add(unsigned long pfn)
int mce_available(struct cpuinfo_x86 *c)
{
- if (mce_disabled)
+ if (mca_cfg.disabled)
return 0;
return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
}
@@ -565,7 +557,7 @@ static void mce_read_aux(struct mce *m, int i)
/*
* Mask the reported address by the reported granularity.
*/
- if (mce_ser && (m->status & MCI_STATUS_MISCV)) {
+ if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
u8 shift = MCI_MISC_ADDR_LSB(m->misc);
m->addr >>= shift;
m->addr <<= shift;
@@ -599,7 +591,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
mce_gather_info(&m, NULL);
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
if (!mce_banks[i].ctl || !test_bit(i, *b))
continue;
@@ -620,7 +612,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
* TBD do the same check for MCI_STATUS_EN here?
*/
if (!(flags & MCP_UC) &&
- (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
+ (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
continue;
mce_read_aux(&m, i);
@@ -631,7 +623,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
* Don't get the IP here because it's unlikely to
* have anything to do with the actual error location.
*/
- if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce)
+ if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
mce_log(&m);
/*
@@ -658,14 +650,14 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
{
int i, ret = 0;
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
if (m->status & MCI_STATUS_VAL) {
__set_bit(i, validp);
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);
}
- if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
+ if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY)
ret = 1;
}
return ret;
@@ -696,11 +688,11 @@ static int mce_timed_out(u64 *t)
rmb();
if (atomic_read(&mce_paniced))
wait_for_panic();
- if (!monarch_timeout)
+ if (!mca_cfg.monarch_timeout)
goto out;
if ((s64)*t < SPINUNIT) {
/* CHECKME: Make panic default for 1 too? */
- if (tolerant < 1)
+ if (mca_cfg.tolerant < 1)
mce_panic("Timeout synchronizing machine check over CPUs",
NULL, NULL);
cpu_missing = 1;
@@ -750,7 +742,8 @@ static void mce_reign(void)
* Grade the severity of the errors of all the CPUs.
*/
for_each_possible_cpu(cpu) {
- int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
+ int severity = mce_severity(&per_cpu(mces_seen, cpu),
+ mca_cfg.tolerant,
&nmsg);
if (severity > global_worst) {
msg = nmsg;
@@ -764,7 +757,7 @@ static void mce_reign(void)
* This dumps all the mces in the log buffer and stops the
* other CPUs.
*/
- if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
+ if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
mce_panic("Fatal Machine check", m, msg);
/*
@@ -777,7 +770,7 @@ static void mce_reign(void)
* No machine check event found. Must be some external
* source or one CPU is hung. Panic.
*/
- if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
+ if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
mce_panic("Machine check from unknown source", NULL, NULL);
/*
@@ -801,7 +794,7 @@ static int mce_start(int *no_way_out)
{
int order;
int cpus = num_online_cpus();
- u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
+ u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
if (!timeout)
return -1;
@@ -865,7 +858,7 @@ static int mce_start(int *no_way_out)
static int mce_end(int order)
{
int ret = -1;
- u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
+ u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
if (!timeout)
goto reset;
@@ -946,7 +939,7 @@ static void mce_clear_state(unsigned long *toclear)
{
int i;
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
if (test_bit(i, toclear))
mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
}
@@ -1011,6 +1004,7 @@ static void mce_clear_info(struct mce_info *mi)
*/
void do_machine_check(struct pt_regs *regs, long error_code)
{
+ struct mca_config *cfg = &mca_cfg;
struct mce m, *final;
int i;
int worst = 0;
@@ -1022,7 +1016,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
int order;
/*
* If no_way_out gets set, there is no safe way to recover from this
- * MCE. If tolerant is cranked up, we'll try anyway.
+ * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
*/
int no_way_out = 0;
/*
@@ -1038,7 +1032,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
this_cpu_inc(mce_exception_count);
- if (!banks)
+ if (!cfg->banks)
goto out;
mce_gather_info(&m, regs);
@@ -1065,7 +1059,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
* because the first one to see it will clear it.
*/
order = mce_start(&no_way_out);
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < cfg->banks; i++) {
__clear_bit(i, toclear);
if (!test_bit(i, valid_banks))
continue;
@@ -1084,7 +1078,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
* Non uncorrected or non signaled errors are handled by
* machine_check_poll. Leave them alone, unless this panics.
*/
- if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
+ if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
!no_way_out)
continue;
@@ -1093,7 +1087,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*/
add_taint(TAINT_MACHINE_CHECK);
- severity = mce_severity(&m, tolerant, NULL);
+ severity = mce_severity(&m, cfg->tolerant, NULL);
/*
* When machine check was for corrected handler don't touch,
@@ -1117,7 +1111,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
* When the ring overflows we just ignore the AO error.
* RED-PEN add some logging mechanism when
* usable_address or mce_add_ring fails.
- * RED-PEN don't ignore overflow for tolerant == 0
+ * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
*/
if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
mce_ring_add(m.addr >> PAGE_SHIFT);
@@ -1149,7 +1143,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
* issues we try to recover, or limit damage to the current
* process.
*/
- if (tolerant < 3) {
+ if (cfg->tolerant < 3) {
if (no_way_out)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst == MCE_AR_SEVERITY) {
@@ -1377,11 +1371,13 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
static int __cpuinit __mcheck_cpu_mce_banks_init(void)
{
int i;
+ u8 num_banks = mca_cfg.banks;
- mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
+ mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
if (!mce_banks)
return -ENOMEM;
- for (i = 0; i < banks; i++) {
+
+ for (i = 0; i < num_banks; i++) {
struct mce_bank *b = &mce_banks[i];
b->ctl = -1ULL;
@@ -1401,7 +1397,7 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
rdmsrl(MSR_IA32_MCG_CAP, cap);
b = cap & MCG_BANKCNT_MASK;
- if (!banks)
+ if (!mca_cfg.banks)
pr_info("CPU supports %d MCE banks\n", b);
if (b > MAX_NR_BANKS) {
@@ -1411,8 +1407,9 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
}
/* Don't support asymmetric configurations today */
- WARN_ON(banks != 0 && b != banks);
- banks = b;
+ WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
+ mca_cfg.banks = b;
+
if (!mce_banks) {
int err = __mcheck_cpu_mce_banks_init();
@@ -1422,25 +1419,29 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
/* Use accurate RIP reporting if available. */
if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
- rip_msr = MSR_IA32_MCG_EIP;
+ mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
if (cap & MCG_SER_P)
- mce_ser = 1;
+ mca_cfg.ser = true;
return 0;
}
static void __mcheck_cpu_init_generic(void)
{
+ enum mcp_flags m_fl = 0;
mce_banks_t all_banks;
u64 cap;
int i;
+ if (!mca_cfg.bootlog)
+ m_fl = MCP_DONTLOG;
+
/*
* Log the machine checks left over from the previous reset.
*/
bitmap_fill(all_banks, MAX_NR_BANKS);
- machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
+ machine_check_poll(MCP_UC | m_fl, &all_banks);
set_in_cr4(X86_CR4_MCE);
@@ -1448,7 +1449,7 @@ static void __mcheck_cpu_init_generic(void)
if (cap & MCG_CTL_P)
wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
if (!b->init)
@@ -1489,6 +1490,8 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
/* Add per CPU specific workarounds here */
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
+ struct mca_config *cfg = &mca_cfg;
+
if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
pr_info("unknown CPU type - not enabling MCE support\n");
return -EOPNOTSUPP;
@@ -1496,7 +1499,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
/* This should be disabled by the BIOS, but isn't always */
if (c->x86_vendor == X86_VENDOR_AMD) {
- if (c->x86 == 15 && banks > 4) {
+ if (c->x86 == 15 && cfg->banks > 4) {
/*
* disable GART TBL walk error reporting, which
* trips off incorrectly with the IOMMU & 3ware
@@ -1504,18 +1507,18 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
*/
clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
}
- if (c->x86 <= 17 && mce_bootlog < 0) {
+ if (c->x86 <= 17 && cfg->bootlog < 0) {
/*
* Lots of broken BIOS around that don't clear them
* by default and leave crap in there. Don't log:
*/
- mce_bootlog = 0;
+ cfg->bootlog = 0;
}
/*
* Various K7s with broken bank 0 around. Always disable
* by default.
*/
- if (c->x86 == 6 && banks > 0)
+ if (c->x86 == 6 && cfg->banks > 0)
mce_banks[0].ctl = 0;
/*
@@ -1566,7 +1569,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
* valid event later, merely don't write CTL0.
*/
- if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
+ if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
mce_banks[0].init = 0;
/*
@@ -1574,23 +1577,23 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
* synchronization with a one second timeout.
*/
if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
- monarch_timeout < 0)
- monarch_timeout = USEC_PER_SEC;
+ cfg->monarch_timeout < 0)
+ cfg->monarch_timeout = USEC_PER_SEC;
/*
* There are also broken BIOSes on some Pentium M and
* earlier systems:
*/
- if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
- mce_bootlog = 0;
+ if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
+ cfg->bootlog = 0;
if (c->x86 == 6 && c->x86_model == 45)
quirk_no_way_out = quirk_sandybridge_ifu;
}
- if (monarch_timeout < 0)
- monarch_timeout = 0;
- if (mce_bootlog != 0)
- mce_panic_timeout = 30;
+ if (cfg->monarch_timeout < 0)
+ cfg->monarch_timeout = 0;
+ if (cfg->bootlog != 0)
+ cfg->panic_timeout = 30;
return 0;
}
@@ -1635,7 +1638,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
__this_cpu_write(mce_next_interval, iv);
- if (mce_ignore_ce || !iv)
+ if (mca_cfg.ignore_ce || !iv)
return;
t->expires = round_jiffies(jiffies + iv);
@@ -1668,7 +1671,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
*/
void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
{
- if (mce_disabled)
+ if (mca_cfg.disabled)
return;
if (__mcheck_cpu_ancient_init(c))
@@ -1678,7 +1681,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
return;
if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
- mce_disabled = 1;
+ mca_cfg.disabled = true;
return;
}
@@ -1951,6 +1954,8 @@ static struct miscdevice mce_chrdev_device = {
*/
static int __init mcheck_enable(char *str)
{
+ struct mca_config *cfg = &mca_cfg;
+
if (*str == 0) {
enable_p5_mce();
return 1;
@@ -1958,22 +1963,22 @@ static int __init mcheck_enable(char *str)
if (*str == '=')
str++;
if (!strcmp(str, "off"))
- mce_disabled = 1;
+ cfg->disabled = true;
else if (!strcmp(str, "no_cmci"))
- mce_cmci_disabled = 1;
+ cfg->cmci_disabled = true;
else if (!strcmp(str, "dont_log_ce"))
- mce_dont_log_ce = 1;
+ cfg->dont_log_ce = true;
else if (!strcmp(str, "ignore_ce"))
- mce_ignore_ce = 1;
+ cfg->ignore_ce = true;
else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
- mce_bootlog = (str[0] == 'b');
+ cfg->bootlog = (str[0] == 'b');
else if (!strcmp(str, "bios_cmci_threshold"))
- mce_bios_cmci_threshold = 1;
+ cfg->bios_cmci_threshold = true;
else if (isdigit(str[0])) {
- get_option(&str, &tolerant);
+ get_option(&str, &(cfg->tolerant));
if (*str == ',') {
++str;
- get_option(&str, &monarch_timeout);
+ get_option(&str, &(cfg->monarch_timeout));
}
} else {
pr_info("mce argument %s ignored. Please use /sys\n", str);
@@ -2002,7 +2007,7 @@ static int mce_disable_error_reporting(void)
{
int i;
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
if (b->init)
@@ -2142,15 +2147,15 @@ static ssize_t set_ignore_ce(struct device *s,
if (strict_strtoull(buf, 0, &new) < 0)
return -EINVAL;
- if (mce_ignore_ce ^ !!new) {
+ if (mca_cfg.ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
mce_timer_delete_all();
on_each_cpu(mce_disable_cmci, NULL, 1);
- mce_ignore_ce = 1;
+ mca_cfg.ignore_ce = true;
} else {
/* enable ce features */
- mce_ignore_ce = 0;
+ mca_cfg.ignore_ce = false;
on_each_cpu(mce_enable_ce, (void *)1, 1);
}
}
@@ -2166,14 +2171,14 @@ static ssize_t set_cmci_disabled(struct device *s,
if (strict_strtoull(buf, 0, &new) < 0)
return -EINVAL;
- if (mce_cmci_disabled ^ !!new) {
+ if (mca_cfg.cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
on_each_cpu(mce_disable_cmci, NULL, 1);
- mce_cmci_disabled = 1;
+ mca_cfg.cmci_disabled = true;
} else {
/* enable cmci */
- mce_cmci_disabled = 0;
+ mca_cfg.cmci_disabled = false;
on_each_cpu(mce_enable_ce, NULL, 1);
}
}
@@ -2190,9 +2195,9 @@ static ssize_t store_int_with_restart(struct device *s,
}
static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
-static DEVICE_INT_ATTR(tolerant, 0644, tolerant);
-static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
-static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
+static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
+static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
+static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
static struct dev_ext_attribute dev_attr_check_interval = {
__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
@@ -2200,13 +2205,13 @@ static struct dev_ext_attribute dev_attr_check_interval = {
};
static struct dev_ext_attribute dev_attr_ignore_ce = {
- __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce),
- &mce_ignore_ce
+ __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
+ &mca_cfg.ignore_ce
};
static struct dev_ext_attribute dev_attr_cmci_disabled = {
- __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled),
- &mce_cmci_disabled
+ __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
+ &mca_cfg.cmci_disabled
};
static struct device_attribute *mce_device_attrs[] = {
@@ -2253,7 +2258,7 @@ static __cpuinit int mce_device_create(unsigned int cpu)
if (err)
goto error;
}
- for (j = 0; j < banks; j++) {
+ for (j = 0; j < mca_cfg.banks; j++) {
err = device_create_file(dev, &mce_banks[j].attr);
if (err)
goto error2;
@@ -2285,7 +2290,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
for (i = 0; mce_device_attrs[i]; i++)
device_remove_file(dev, mce_device_attrs[i]);
- for (i = 0; i < banks; i++)
+ for (i = 0; i < mca_cfg.banks; i++)
device_remove_file(dev, &mce_banks[i].attr);
device_unregister(dev);
@@ -2304,7 +2309,7 @@ static void __cpuinit mce_disable_cpu(void *h)
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
if (b->init)
@@ -2322,7 +2327,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
if (!(action & CPU_TASKS_FROZEN))
cmci_reenable();
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
if (b->init)
@@ -2375,7 +2380,7 @@ static __init void mce_init_banks(void)
{
int i;
- for (i = 0; i < banks; i++) {
+ for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
struct device_attribute *a = &b->attr;
@@ -2426,7 +2431,7 @@ device_initcall_sync(mcheck_init_device);
*/
static int __init mcheck_disable(char *str)
{
- mce_disabled = 1;
+ mca_cfg.disabled = true;
return 1;
}
__setup("nomce", mcheck_disable);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 4f9a3cbfc4a..402c454fbff 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -53,7 +53,7 @@ static int cmci_supported(int *banks)
{
u64 cap;
- if (mce_cmci_disabled || mce_ignore_ce)
+ if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
return 0;
/*
@@ -200,7 +200,7 @@ static void cmci_discover(int banks)
continue;
}
- if (!mce_bios_cmci_threshold) {
+ if (!mca_cfg.bios_cmci_threshold) {
val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
val |= CMCI_THRESHOLD;
} else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
@@ -227,7 +227,7 @@ static void cmci_discover(int banks)
* set the thresholds properly or does not work with
* this boot option. Note down now and report later.
*/
- if (mce_bios_cmci_threshold && bios_zero_thresh &&
+ if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
(val & MCI_CTL2_CMCI_THRESHOLD_MASK))
bios_wrong_thresh = 1;
} else {
@@ -235,7 +235,7 @@ static void cmci_discover(int banks)
}
}
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
- if (mce_bios_cmci_threshold && bios_wrong_thresh) {
+ if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
pr_info_once(
"bios_cmci_threshold: Some banks do not have valid thresholds set\n");
pr_info_once(
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index e4c1a418453..726bf963c22 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -606,7 +606,7 @@ void __init mtrr_bp_init(void)
/*
* This is an AMD specific MSR, but we assume(hope?) that
- * Intel will implement it to when they extend the address
+ * Intel will implement it too when they extend the address
* bus of the Xeon.
*/
if (cpuid_eax(0x80000000) >= 0x80000008) {
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index fbd89556229..3286a92e662 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -26,11 +26,6 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
#ifdef CONFIG_X86_32
static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
{
- /*
- * We use exception 16 if we have hardware math and we've either seen
- * it or the CPU claims it is internal
- */
- int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
seq_printf(m,
"fdiv_bug\t: %s\n"
"hlt_bug\t\t: %s\n"
@@ -45,7 +40,7 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
c->f00f_bug ? "yes" : "no",
c->coma_bug ? "yes" : "no",
c->hard_math ? "yes" : "no",
- fpu_exception ? "yes" : "no",
+ c->hard_math ? "yes" : "no",
c->cpuid_level,
c->wp_works_ok ? "yes" : "no");
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 13ad89971d4..74467feb4dc 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
+#include <linux/module.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
@@ -30,6 +31,27 @@
int in_crash_kexec;
+/*
+ * This is used to VMCLEAR all VMCSs loaded on the
+ * processor. And when loading kvm_intel module, the
+ * callback function pointer will be assigned.
+ *
+ * protected by rcu.
+ */
+crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
+EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
+
+static inline void cpu_crash_vmclear_loaded_vmcss(void)
+{
+ crash_vmclear_fn *do_vmclear_operation = NULL;
+
+ rcu_read_lock();
+ do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
+ if (do_vmclear_operation)
+ do_vmclear_operation();
+ rcu_read_unlock();
+}
+
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
@@ -46,6 +68,11 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
#endif
crash_save_cpu(regs, cpu);
+ /*
+ * VMCLEAR VMCSs loaded on all cpus if needed.
+ */
+ cpu_crash_vmclear_loaded_vmcss();
+
/* Disable VMX or SVM if needed.
*
* We need to disable virtualization on all CPUs.
@@ -88,6 +115,11 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
kdump_nmi_shootdown_cpus();
+ /*
+ * VMCLEAR VMCSs loaded on this cpu if needed.
+ */
+ cpu_crash_vmclear_loaded_vmcss();
+
/* Booting kdump kernel with VMX or SVM enabled won't work,
* because (among other limitations) we can't disable paging
* with the virt flags.
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index c763116c535..ff84d5469d7 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -739,7 +739,6 @@ ENTRY(ptregs_##name) ; \
ENDPROC(ptregs_##name)
PTREGSCALL1(iopl)
-PTREGSCALL2(sigaltstack)
PTREGSCALL0(sigreturn)
PTREGSCALL0(rt_sigreturn)
PTREGSCALL2(vm86)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 70641aff0c2..07a7a04529b 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -864,7 +864,6 @@ END(stub_\func)
FORK_LIKE clone
FORK_LIKE fork
FORK_LIKE vfork
- PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
PTREGSCALL stub_iopl, sys_iopl, %rsi
ENTRY(ptregscall_common)
@@ -913,8 +912,6 @@ ENTRY(stub_rt_sigreturn)
END(stub_rt_sigreturn)
#ifdef CONFIG_X86_X32_ABI
- PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
-
ENTRY(stub_x32_rt_sigreturn)
CFI_STARTPROC
addq $8, %rsp
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 6e03b0d6913..7dc4e459c2b 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -42,39 +42,6 @@
* (these are usually mapped into the 0x30-0xff vector range)
*/
-#ifdef CONFIG_X86_32
-/*
- * Note that on a 486, we don't want to do a SIGFPE on an irq13
- * as the irq is unreliable, and exception 16 works correctly
- * (ie as explained in the intel literature). On a 386, you
- * can't use exception 16 due to bad IBM design, so we have to
- * rely on the less exact irq13.
- *
- * Careful.. Not only is IRQ13 unreliable, but it is also
- * leads to races. IBM designers who came up with it should
- * be shot.
- */
-
-static irqreturn_t math_error_irq(int cpl, void *dev_id)
-{
- outb(0, 0xF0);
- if (ignore_fpu_irq || !boot_cpu_data.hard_math)
- return IRQ_NONE;
- math_error(get_irq_regs(), 0, X86_TRAP_MF);
- return IRQ_HANDLED;
-}
-
-/*
- * New motherboards sometimes make IRQ 13 be a PCI interrupt,
- * so allow interrupt sharing.
- */
-static struct irqaction fpu_irq = {
- .handler = math_error_irq,
- .name = "fpu",
- .flags = IRQF_NO_THREAD,
-};
-#endif
-
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
@@ -242,13 +209,6 @@ void __init native_init_IRQ(void)
setup_irq(2, &irq2);
#ifdef CONFIG_X86_32
- /*
- * External FPU? Set up irq13 if so, for
- * original braindamaged IBM FERR coupling.
- */
- if (boot_cpu_data.hard_math && !cpu_has_fpu)
- setup_irq(FPU_IRQ, &fpu_irq);
-
irq_ctx_init(smp_processor_id());
#endif
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 4180a874c76..08b973f6403 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -42,6 +42,7 @@
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
+#include <asm/kvm_guest.h>
static int kvmapf = 1;
@@ -62,6 +63,15 @@ static int parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
+static int kvmclock_vsyscall = 1;
+static int parse_no_kvmclock_vsyscall(char *arg)
+{
+ kvmclock_vsyscall = 0;
+ return 0;
+}
+
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
@@ -110,11 +120,6 @@ void kvm_async_pf_task_wait(u32 token)
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
struct kvm_task_sleep_node n, *e;
DEFINE_WAIT(wait);
- int cpu, idle;
-
- cpu = get_cpu();
- idle = idle_cpu(cpu);
- put_cpu();
spin_lock(&b->lock);
e = _find_apf_task(b, token);
@@ -128,7 +133,7 @@ void kvm_async_pf_task_wait(u32 token)
n.token = token;
n.cpu = smp_processor_id();
- n.halted = idle || preempt_count() > 1;
+ n.halted = is_idle_task(current) || preempt_count() > 1;
init_waitqueue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
spin_unlock(&b->lock);
@@ -471,6 +476,9 @@ void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
+ if (kvmclock_vsyscall)
+ kvm_setup_vsyscall_timeinfo();
+
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
register_cpu_notifier(&kvm_cpu_notifier);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index f1b42b3a186..220a360010f 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -23,6 +23,7 @@
#include <asm/apic.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/memblock.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
@@ -39,7 +40,7 @@ static int parse_no_kvmclock(char *arg)
early_param("no-kvmclock", parse_no_kvmclock);
/* The hypervisor will put information about time periodically here */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct pvclock_vcpu_time_info, hv_clock);
+static struct pvclock_vsyscall_time_info *hv_clock;
static struct pvclock_wall_clock wall_clock;
/*
@@ -52,15 +53,20 @@ static unsigned long kvm_get_wallclock(void)
struct pvclock_vcpu_time_info *vcpu_time;
struct timespec ts;
int low, high;
+ int cpu;
low = (int)__pa_symbol(&wall_clock);
high = ((u64)__pa_symbol(&wall_clock) >> 32);
native_write_msr(msr_kvm_wall_clock, low, high);
- vcpu_time = &get_cpu_var(hv_clock);
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ vcpu_time = &hv_clock[cpu].pvti;
pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
- put_cpu_var(hv_clock);
+
+ preempt_enable();
return ts.tv_sec;
}
@@ -74,9 +80,11 @@ static cycle_t kvm_clock_read(void)
{
struct pvclock_vcpu_time_info *src;
cycle_t ret;
+ int cpu;
preempt_disable_notrace();
- src = &__get_cpu_var(hv_clock);
+ cpu = smp_processor_id();
+ src = &hv_clock[cpu].pvti;
ret = pvclock_clocksource_read(src);
preempt_enable_notrace();
return ret;
@@ -99,8 +107,15 @@ static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
static unsigned long kvm_get_tsc_khz(void)
{
struct pvclock_vcpu_time_info *src;
- src = &per_cpu(hv_clock, 0);
- return pvclock_tsc_khz(src);
+ int cpu;
+ unsigned long tsc_khz;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+ src = &hv_clock[cpu].pvti;
+ tsc_khz = pvclock_tsc_khz(src);
+ preempt_enable();
+ return tsc_khz;
}
static void kvm_get_preset_lpj(void)
@@ -119,10 +134,14 @@ bool kvm_check_and_clear_guest_paused(void)
{
bool ret = false;
struct pvclock_vcpu_time_info *src;
+ int cpu = smp_processor_id();
- src = &__get_cpu_var(hv_clock);
+ if (!hv_clock)
+ return ret;
+
+ src = &hv_clock[cpu].pvti;
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
- __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+ src->flags &= ~PVCLOCK_GUEST_STOPPED;
ret = true;
}
@@ -141,9 +160,10 @@ int kvm_register_clock(char *txt)
{
int cpu = smp_processor_id();
int low, high, ret;
+ struct pvclock_vcpu_time_info *src = &hv_clock[cpu].pvti;
- low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
- high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
+ low = (int)__pa(src) | 1;
+ high = ((u64)__pa(src) >> 32);
ret = native_write_msr_safe(msr_kvm_system_time, low, high);
printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
cpu, high, low, txt);
@@ -197,6 +217,8 @@ static void kvm_shutdown(void)
void __init kvmclock_init(void)
{
+ unsigned long mem;
+
if (!kvm_para_available())
return;
@@ -209,8 +231,18 @@ void __init kvmclock_init(void)
printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
- if (kvm_register_clock("boot clock"))
+ mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS,
+ PAGE_SIZE);
+ if (!mem)
+ return;
+ hv_clock = __va(mem);
+
+ if (kvm_register_clock("boot clock")) {
+ hv_clock = NULL;
+ memblock_free(mem,
+ sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
return;
+ }
pv_time_ops.sched_clock = kvm_clock_read;
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock;
@@ -233,3 +265,37 @@ void __init kvmclock_init(void)
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
}
+
+int __init kvm_setup_vsyscall_timeinfo(void)
+{
+#ifdef CONFIG_X86_64
+ int cpu;
+ int ret;
+ u8 flags;
+ struct pvclock_vcpu_time_info *vcpu_time;
+ unsigned int size;
+
+ size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ vcpu_time = &hv_clock[cpu].pvti;
+ flags = pvclock_read_flags(vcpu_time);
+
+ if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
+ preempt_enable();
+ return 1;
+ }
+
+ if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
+ preempt_enable();
+ return ret;
+ }
+
+ preempt_enable();
+
+ kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+#endif
+ return 0;
+}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 42eb3300dfc..85c39590c1a 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -17,23 +17,13 @@
#include <linux/kernel.h>
#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <linux/bootmem.h>
+#include <asm/fixmap.h>
#include <asm/pvclock.h>
-/*
- * These are perodically updated
- * xen: magic shared_info page
- * kvm: gpa registered via msr
- * and then copied here.
- */
-struct pvclock_shadow_time {
- u64 tsc_timestamp; /* TSC at last update of time vals. */
- u64 system_timestamp; /* Time, in nanosecs, since boot. */
- u32 tsc_to_nsec_mul;
- int tsc_shift;
- u32 version;
- u8 flags;
-};
-
static u8 valid_flags __read_mostly = 0;
void pvclock_set_flags(u8 flags)
@@ -41,34 +31,6 @@ void pvclock_set_flags(u8 flags)
valid_flags = flags;
}
-static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
-{
- u64 delta = native_read_tsc() - shadow->tsc_timestamp;
- return pvclock_scale_delta(delta, shadow->tsc_to_nsec_mul,
- shadow->tsc_shift);
-}
-
-/*
- * Reads a consistent set of time-base values from hypervisor,
- * into a shadow data area.
- */
-static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
- struct pvclock_vcpu_time_info *src)
-{
- do {
- dst->version = src->version;
- rmb(); /* fetch version before data */
- dst->tsc_timestamp = src->tsc_timestamp;
- dst->system_timestamp = src->system_time;
- dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
- dst->tsc_shift = src->tsc_shift;
- dst->flags = src->flags;
- rmb(); /* test version after fetching data */
- } while ((src->version & 1) || (dst->version != src->version));
-
- return dst->version;
-}
-
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
{
u64 pv_tsc_khz = 1000000ULL << 32;
@@ -88,23 +50,32 @@ void pvclock_resume(void)
atomic64_set(&last_value, 0);
}
+u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
+{
+ unsigned version;
+ cycle_t ret;
+ u8 flags;
+
+ do {
+ version = __pvclock_read_cycles(src, &ret, &flags);
+ } while ((src->version & 1) || version != src->version);
+
+ return flags & valid_flags;
+}
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
- struct pvclock_shadow_time shadow;
unsigned version;
- cycle_t ret, offset;
+ cycle_t ret;
u64 last;
+ u8 flags;
do {
- version = pvclock_get_time_values(&shadow, src);
- barrier();
- offset = pvclock_get_nsec_offset(&shadow);
- ret = shadow.system_timestamp + offset;
- barrier();
- } while (version != src->version);
+ version = __pvclock_read_cycles(src, &ret, &flags);
+ } while ((src->version & 1) || version != src->version);
if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
- (shadow.flags & PVCLOCK_TSC_STABLE_BIT))
+ (flags & PVCLOCK_TSC_STABLE_BIT))
return ret;
/*
@@ -156,3 +127,71 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}
+
+static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
+
+static struct pvclock_vsyscall_time_info *
+pvclock_get_vsyscall_user_time_info(int cpu)
+{
+ if (!pvclock_vdso_info) {
+ BUG();
+ return NULL;
+ }
+
+ return &pvclock_vdso_info[cpu];
+}
+
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
+{
+ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
+}
+
+#ifdef CONFIG_X86_64
+static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
+ void *v)
+{
+ struct task_migration_notifier *mn = v;
+ struct pvclock_vsyscall_time_info *pvti;
+
+ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
+
+ /* this is NULL when pvclock vsyscall is not initialized */
+ if (unlikely(pvti == NULL))
+ return NOTIFY_DONE;
+
+ pvti->migrate_count++;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pvclock_migrate = {
+ .notifier_call = pvclock_task_migrate,
+};
+
+/*
+ * Initialize the generic pvclock vsyscall state. This will allocate
+ * a/some page(s) for the per-vcpu pvclock information, set up a
+ * fixmap mapping for the page(s)
+ */
+
+int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
+ int size)
+{
+ int idx;
+
+ WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
+
+ pvclock_vdso_info = i;
+
+ for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
+ __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
+ __pa_symbol(i) + (idx*PAGE_SIZE),
+ PAGE_KERNEL_VVAR);
+ }
+
+
+ register_task_migration_notifier(&pvclock_migrate);
+
+ return 0;
+}
+#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ca45696f30f..23ddd558fbd 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -143,11 +143,7 @@ int default_check_phys_apicid_present(int phys_apicid)
}
#endif
-#ifndef CONFIG_DEBUG_BOOT_PARAMS
-struct boot_params __initdata boot_params;
-#else
struct boot_params boot_params;
-#endif
/*
* Machine setup..
@@ -956,6 +952,10 @@ void __init setup_arch(char **cmdline_p)
reserve_initrd();
+#if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
+ acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
+#endif
+
reserve_crashkernel();
vsmp_init();
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index fbbb604313a..d6bf1f34a6e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -364,10 +364,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- put_user_ex(sas_ss_flags(regs->sp),
- &frame->uc.uc_stack.ss_flags);
- put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -414,7 +411,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe __user *frame;
void __user *fp = NULL;
int err = 0;
- struct task_struct *me = current;
frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp);
@@ -433,10 +429,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- put_user_ex(sas_ss_flags(regs->sp),
- &frame->uc.uc_stack.ss_flags);
- put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
@@ -503,10 +496,7 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- put_user_ex(sas_ss_flags(regs->sp),
- &frame->uc.uc_stack.ss_flags);
- put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
put_user_ex(0, &frame->uc.uc__pad0);
if (ka->sa.sa_flags & SA_RESTORER) {
@@ -603,13 +593,6 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
}
#endif /* CONFIG_X86_32 */
-long
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- struct pt_regs *regs)
-{
- return do_sigaltstack(uss, uoss, regs->sp);
-}
-
/*
* Do a signal return; undo the signal stack.
*/
@@ -659,7 +642,7 @@ long sys_rt_sigreturn(struct pt_regs *regs)
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return ax;
@@ -865,7 +848,6 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
struct rt_sigframe_x32 __user *frame;
sigset_t set;
unsigned long ax;
- struct pt_regs tregs;
frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
@@ -879,8 +861,7 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
goto badframe;
- tregs = *regs;
- if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
+ if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
return ax;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index eb8586693e0..ecffca11f4e 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -69,9 +69,6 @@
asmlinkage int system_call(void);
-/* Do we ignore FPU interrupts ? */
-char ignore_fpu_irq;
-
/*
* The IDT has to be page-aligned to simplify the Pentium
* F0 0F bug workaround.
@@ -564,9 +561,6 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
{
-#ifdef CONFIG_X86_32
- ignore_fpu_irq = 1;
-#endif
exception_enter(regs);
math_error(regs, error_code, X86_TRAP_MF);
exception_exit(regs);
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 5c9687b1bde..1dfe69cc78a 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -182,7 +182,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
if (pud_none_or_clear_bad(pud))
goto out;
pmd = pmd_offset(pud, 0xA0000);
- split_huge_page_pmd(mm, pmd);
+ split_huge_page_pmd_mm(mm, 0xA0000, pmd);
if (pmd_none_or_clear_bad(pmd))
goto out;
pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 3a3e8c9e280..9a907a67be8 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -145,19 +145,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
return nr;
}
-#ifdef CONFIG_SECCOMP
-static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
-{
- if (!seccomp_mode(&tsk->seccomp))
- return 0;
- task_pt_regs(tsk)->orig_ax = syscall_nr;
- task_pt_regs(tsk)->ax = syscall_nr;
- return __secure_computing(syscall_nr);
-}
-#else
-#define vsyscall_seccomp(_tsk, _nr) 0
-#endif
-
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
/*
@@ -190,10 +177,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
struct task_struct *tsk;
unsigned long caller;
- int vsyscall_nr;
+ int vsyscall_nr, syscall_nr, tmp;
int prev_sig_on_uaccess_error;
long ret;
- int skip;
/*
* No point in checking CS -- the only way to get here is a user mode
@@ -225,56 +211,84 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
}
tsk = current;
- /*
- * With a real vsyscall, page faults cause SIGSEGV. We want to
- * preserve that behavior to make writing exploits harder.
- */
- prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
- current_thread_info()->sig_on_uaccess_error = 1;
/*
+ * Check for access_ok violations and find the syscall nr.
+ *
* NULL is a valid user pointer (in the access_ok sense) on 32-bit and
* 64-bit, so we don't need to special-case it here. For all the
* vsyscalls, NULL means "don't write anything" not "write it at
* address 0".
*/
- ret = -EFAULT;
- skip = 0;
switch (vsyscall_nr) {
case 0:
- skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
- if (skip)
- break;
-
if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
- !write_ok_or_segv(regs->si, sizeof(struct timezone)))
- break;
+ !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
+ ret = -EFAULT;
+ goto check_fault;
+ }
+
+ syscall_nr = __NR_gettimeofday;
+ break;
+
+ case 1:
+ if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
+ ret = -EFAULT;
+ goto check_fault;
+ }
+
+ syscall_nr = __NR_time;
+ break;
+
+ case 2:
+ if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
+ !write_ok_or_segv(regs->si, sizeof(unsigned))) {
+ ret = -EFAULT;
+ goto check_fault;
+ }
+
+ syscall_nr = __NR_getcpu;
+ break;
+ }
+
+ /*
+ * Handle seccomp. regs->ip must be the original value.
+ * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
+ *
+ * We could optimize the seccomp disabled case, but performance
+ * here doesn't matter.
+ */
+ regs->orig_ax = syscall_nr;
+ regs->ax = -ENOSYS;
+ tmp = secure_computing(syscall_nr);
+ if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
+ warn_bad_vsyscall(KERN_DEBUG, regs,
+ "seccomp tried to change syscall nr or ip");
+ do_exit(SIGSYS);
+ }
+ if (tmp)
+ goto do_ret; /* skip requested */
+ /*
+ * With a real vsyscall, page faults cause SIGSEGV. We want to
+ * preserve that behavior to make writing exploits harder.
+ */
+ prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
+ current_thread_info()->sig_on_uaccess_error = 1;
+
+ ret = -EFAULT;
+ switch (vsyscall_nr) {
+ case 0:
ret = sys_gettimeofday(
(struct timeval __user *)regs->di,
(struct timezone __user *)regs->si);
break;
case 1:
- skip = vsyscall_seccomp(tsk, __NR_time);
- if (skip)
- break;
-
- if (!write_ok_or_segv(regs->di, sizeof(time_t)))
- break;
-
ret = sys_time((time_t __user *)regs->di);
break;
case 2:
- skip = vsyscall_seccomp(tsk, __NR_getcpu);
- if (skip)
- break;
-
- if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
- !write_ok_or_segv(regs->si, sizeof(unsigned)))
- break;
-
ret = sys_getcpu((unsigned __user *)regs->di,
(unsigned __user *)regs->si,
NULL);
@@ -283,12 +297,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
- if (skip) {
- if ((long)regs->ax <= 0L) /* seccomp errno emulation */
- goto do_ret;
- goto done; /* seccomp trace/trap */
- }
-
+check_fault:
if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
@@ -311,7 +320,6 @@ do_ret:
/* Emulate a ret instruction. */
regs->ip = caller;
regs->sp += 8;
-done:
return true;
sigsegv:
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index ec79e773342..a20ecb5b6cb 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -320,6 +320,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
if (index == 0) {
entry->ebx &= kvm_supported_word9_x86_features;
cpuid_mask(&entry->ebx, 9);
+ // TSC_ADJUST is emulated
+ entry->ebx |= F(TSC_ADJUST);
} else
entry->ebx = 0;
entry->eax = 0;
@@ -659,6 +661,7 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
} else
*eax = *ebx = *ecx = *edx = 0;
}
+EXPORT_SYMBOL_GPL(kvm_cpuid);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 58fc5148882..b7fd0798488 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -31,6 +31,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
+static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
+}
+
static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index bba39bfa1c4..a27e7637110 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -676,8 +676,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
addr.seg);
if (!usable)
goto bad;
- /* code segment or read-only data segment */
- if (((desc.type & 8) || !(desc.type & 2)) && write)
+ /* code segment in protected mode or read-only data segment */
+ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
+ || !(desc.type & 2)) && write)
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 43e9fadca5d..9392f527f10 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1011,7 +1011,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
local_irq_save(flags);
now = apic->lapic_timer.timer.base->get_time();
- guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
+ guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6f85fe0bf95..01d7c2ad05f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2382,12 +2382,20 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|| (!vcpu->arch.mmu.direct_map && write_fault
&& !is_write_protection(vcpu) && !user_fault)) {
+ /*
+ * There are two cases:
+ * - the one is other vcpu creates new sp in the window
+ * between mapping_level() and acquiring mmu-lock.
+ * - the another case is the new sp is created by itself
+ * (page-fault path) when guest uses the target gfn as
+ * its page table.
+ * Both of these cases can be fixed by allowing guest to
+ * retry the access, it will refault, then we can establish
+ * the mapping by using small page.
+ */
if (level > PT_PAGE_TABLE_LEVEL &&
- has_wrprotected_page(vcpu->kvm, gfn, level)) {
- ret = 1;
- drop_spte(vcpu->kvm, sptep);
+ has_wrprotected_page(vcpu->kvm, gfn, level))
goto done;
- }
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
@@ -2505,6 +2513,14 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
mmu_free_roots(vcpu);
}
+static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
+{
+ int bit7;
+
+ bit7 = (gpte >> 7) & 1;
+ return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
+}
+
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
@@ -2517,6 +2533,26 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
return gfn_to_pfn_memslot_atomic(slot, gfn);
}
+static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *sp, u64 *spte,
+ u64 gpte)
+{
+ if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+ goto no_present;
+
+ if (!is_present_gpte(gpte))
+ goto no_present;
+
+ if (!(gpte & PT_ACCESSED_MASK))
+ goto no_present;
+
+ return false;
+
+no_present:
+ drop_spte(vcpu->kvm, spte);
+ return true;
+}
+
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
@@ -2671,7 +2707,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
* here.
*/
- if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+ if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) &&
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
@@ -2699,18 +2735,13 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
}
}
-static bool mmu_invalid_pfn(pfn_t pfn)
-{
- return unlikely(is_invalid_pfn(pfn));
-}
-
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
pfn_t pfn, unsigned access, int *ret_val)
{
bool ret = true;
/* The pfn is invalid, report the error! */
- if (unlikely(is_invalid_pfn(pfn))) {
+ if (unlikely(is_error_pfn(pfn))) {
*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
goto exit;
}
@@ -2862,7 +2893,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
return r;
spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu, mmu_seq))
+ if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level))
@@ -3331,7 +3362,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
return r;
spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu, mmu_seq))
+ if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
if (likely(!force_pt_level))
@@ -3399,14 +3430,6 @@ static void paging_free(struct kvm_vcpu *vcpu)
nonpaging_free(vcpu);
}
-static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
-{
- int bit7;
-
- bit7 = (gpte >> 7) & 1;
- return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
-}
-
static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
{
unsigned mask;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 714e2c01a6f..891eb6d93b8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -305,51 +305,43 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker,
addr, access);
}
-static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp, u64 *spte,
- pt_element_t gpte)
+static bool
+FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ u64 *spte, pt_element_t gpte, bool no_dirty_log)
{
- if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
- goto no_present;
-
- if (!is_present_gpte(gpte))
- goto no_present;
-
- if (!(gpte & PT_ACCESSED_MASK))
- goto no_present;
-
- return false;
-
-no_present:
- drop_spte(vcpu->kvm, spte);
- return true;
-}
-
-static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- u64 *spte, const void *pte)
-{
- pt_element_t gpte;
unsigned pte_access;
+ gfn_t gfn;
pfn_t pfn;
- gpte = *(const pt_element_t *)pte;
- if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
- return;
+ if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
+ return false;
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
+
+ gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & gpte_access(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);
- pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
- if (mmu_invalid_pfn(pfn))
- return;
+ pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
+ no_dirty_log && (pte_access & ACC_WRITE_MASK));
+ if (is_error_pfn(pfn))
+ return false;
/*
- * we call mmu_set_spte() with host_writable = true because that
- * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
+ * we call mmu_set_spte() with host_writable = true because
+ * pte_prefetch_gfn_to_pfn always gets a writable pfn.
*/
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
- NULL, PT_PAGE_TABLE_LEVEL,
- gpte_to_gfn(gpte), pfn, true, true);
+ NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);
+
+ return true;
+}
+
+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ u64 *spte, const void *pte)
+{
+ pt_element_t gpte = *(const pt_element_t *)pte;
+
+ FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
}
static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
@@ -395,53 +387,34 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
- pt_element_t gpte;
- unsigned pte_access;
- gfn_t gfn;
- pfn_t pfn;
-
if (spte == sptep)
continue;
if (is_shadow_present_pte(*spte))
continue;
- gpte = gptep[i];
-
- if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
- continue;
-
- pte_access = sp->role.access & gpte_access(vcpu, gpte);
- protect_clean_gpte(&pte_access, gpte);
- gfn = gpte_to_gfn(gpte);
- pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
- pte_access & ACC_WRITE_MASK);
- if (mmu_invalid_pfn(pfn))
+ if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
break;
-
- mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
- NULL, PT_PAGE_TABLE_LEVEL, gfn,
- pfn, true, true);
}
}
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
+ * If the guest tries to write a write-protected page, we need to
+ * emulate this operation, return 1 to indicate this case.
*/
-static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int user_fault, int write_fault, int hlevel,
- int *emulate, pfn_t pfn, bool map_writable,
- bool prefault)
+ pfn_t pfn, bool map_writable, bool prefault)
{
- unsigned access = gw->pt_access;
struct kvm_mmu_page *sp = NULL;
- int top_level;
- unsigned direct_access;
struct kvm_shadow_walk_iterator it;
+ unsigned direct_access, access = gw->pt_access;
+ int top_level, emulate = 0;
if (!is_present_gpte(gw->ptes[gw->level - 1]))
- return NULL;
+ return 0;
direct_access = gw->pte_access;
@@ -505,17 +478,17 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
clear_sp_write_flooding_count(it.sptep);
mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
- user_fault, write_fault, emulate, it.level,
+ user_fault, write_fault, &emulate, it.level,
gw->gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
- return it.sptep;
+ return emulate;
out_gpte_changed:
if (sp)
kvm_mmu_put_page(sp, it.sptep);
kvm_release_pfn_clean(pfn);
- return NULL;
+ return 0;
}
/*
@@ -538,8 +511,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
struct guest_walker walker;
- u64 *sptep;
- int emulate = 0;
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
@@ -594,24 +565,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return r;
spin_lock(&vcpu->kvm->mmu_lock);
- if (mmu_notifier_retry(vcpu, mmu_seq))
+ if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
kvm_mmu_free_some_pages(vcpu);
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
- sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
- level, &emulate, pfn, map_writable, prefault);
- (void)sptep;
- pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
- sptep, *sptep, emulate);
-
+ r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+ level, pfn, map_writable, prefault);
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock);
- return emulate;
+ return r;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
@@ -757,7 +724,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sizeof(pt_element_t)))
return -EINVAL;
- if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
+ if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++;
continue;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d017df3899e..d29d3cd1c15 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -20,6 +20,7 @@
#include "mmu.h"
#include "kvm_cache_regs.h"
#include "x86.h"
+#include "cpuid.h"
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -630,15 +631,12 @@ static int svm_hardware_enable(void *garbage)
return -EBUSY;
if (!has_svm()) {
- printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
- me);
+ pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
return -EINVAL;
}
sd = per_cpu(svm_data, me);
-
if (!sd) {
- printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
- me);
+ pr_err("%s: svm_data is NULL on %d\n", __func__, me);
return -EINVAL;
}
@@ -1012,6 +1010,13 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
svm->tsc_ratio = ratio;
}
+static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ return svm->vmcb->control.tsc_offset;
+}
+
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1189,6 +1194,8 @@ static void init_vmcb(struct vcpu_svm *svm)
static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ u32 dummy;
+ u32 eax = 1;
init_vmcb(svm);
@@ -1197,8 +1204,9 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
}
- vcpu->arch.regs_avail = ~0;
- vcpu->arch.regs_dirty = ~0;
+
+ kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
return 0;
}
@@ -1254,11 +1262,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
init_vmcb(svm);
- kvm_write_tsc(&svm->vcpu, 0);
-
- err = fx_init(&svm->vcpu);
- if (err)
- goto free_page4;
svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_bsp(&svm->vcpu))
@@ -1268,8 +1271,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
return &svm->vcpu;
-free_page4:
- __free_page(hsave_page);
free_page3:
__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
@@ -3008,11 +3009,11 @@ static int cr8_write_interception(struct vcpu_svm *svm)
return 0;
}
-u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu)
+u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
return vmcb->control.tsc_offset +
- svm_scale_tsc(vcpu, native_read_tsc());
+ svm_scale_tsc(vcpu, host_tsc);
}
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
@@ -3131,13 +3132,15 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
-static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
+static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ u32 ecx = msr->index;
+ u64 data = msr->data;
switch (ecx) {
case MSR_IA32_TSC:
- kvm_write_tsc(vcpu, data);
+ kvm_write_tsc(vcpu, msr);
break;
case MSR_STAR:
svm->vmcb->save.star = data;
@@ -3192,20 +3195,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
default:
- return kvm_set_msr_common(vcpu, ecx, data);
+ return kvm_set_msr_common(vcpu, msr);
}
return 0;
}
static int wrmsr_interception(struct vcpu_svm *svm)
{
+ struct msr_data msr;
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ msr.data = data;
+ msr.index = ecx;
+ msr.host_initiated = false;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
- if (svm_set_msr(&svm->vcpu, ecx, data)) {
+ if (svm_set_msr(&svm->vcpu, &msr)) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(&svm->vcpu, 0);
} else {
@@ -4302,6 +4309,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
.set_tsc_khz = svm_set_tsc_khz,
+ .read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset = svm_adjust_tsc_offset,
.compute_tsc_offset = svm_compute_tsc_offset,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index bca63f04dcc..fe5e00ed703 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -4,6 +4,7 @@
#include <linux/tracepoint.h>
#include <asm/vmx.h>
#include <asm/svm.h>
+#include <asm/clocksource.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
@@ -754,6 +755,68 @@ TRACE_EVENT(
__entry->write ? "Write" : "Read",
__entry->gpa_match ? "GPA" : "GVA")
);
+
+#ifdef CONFIG_X86_64
+
+#define host_clocks \
+ {VCLOCK_NONE, "none"}, \
+ {VCLOCK_TSC, "tsc"}, \
+ {VCLOCK_HPET, "hpet"} \
+
+TRACE_EVENT(kvm_update_master_clock,
+ TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
+ TP_ARGS(use_master_clock, host_clock, offset_matched),
+
+ TP_STRUCT__entry(
+ __field( bool, use_master_clock )
+ __field( unsigned int, host_clock )
+ __field( bool, offset_matched )
+ ),
+
+ TP_fast_assign(
+ __entry->use_master_clock = use_master_clock;
+ __entry->host_clock = host_clock;
+ __entry->offset_matched = offset_matched;
+ ),
+
+ TP_printk("masterclock %d hostclock %s offsetmatched %u",
+ __entry->use_master_clock,
+ __print_symbolic(__entry->host_clock, host_clocks),
+ __entry->offset_matched)
+);
+
+TRACE_EVENT(kvm_track_tsc,
+ TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
+ unsigned int online_vcpus, bool use_master_clock,
+ unsigned int host_clock),
+ TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
+ host_clock),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vcpu_id )
+ __field( unsigned int, nr_vcpus_matched_tsc )
+ __field( unsigned int, online_vcpus )
+ __field( bool, use_master_clock )
+ __field( unsigned int, host_clock )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->nr_vcpus_matched_tsc = nr_matched;
+ __entry->online_vcpus = online_vcpus;
+ __entry->use_master_clock = use_master_clock;
+ __entry->host_clock = host_clock;
+ ),
+
+ TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
+ " hostclock %s",
+ __entry->vcpu_id, __entry->use_master_clock,
+ __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
+ __print_symbolic(__entry->host_clock, host_clocks))
+);
+
+#endif /* CONFIG_X86_64 */
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f85815945fc..9120ae1901e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -42,6 +42,7 @@
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/perf_event.h>
+#include <asm/kexec.h>
#include "trace.h"
@@ -802,11 +803,6 @@ static inline bool cpu_has_vmx_ept_ad_bits(void)
return vmx_capability.ept & VMX_EPT_AD_BIT;
}
-static inline bool cpu_has_vmx_invept_individual_addr(void)
-{
- return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
-}
-
static inline bool cpu_has_vmx_invept_context(void)
{
return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
@@ -992,6 +988,46 @@ static void vmcs_load(struct vmcs *vmcs)
vmcs, phys_addr);
}
+#ifdef CONFIG_KEXEC
+/*
+ * This bitmap is used to indicate whether the vmclear
+ * operation is enabled on all cpus. All disabled by
+ * default.
+ */
+static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
+
+static inline void crash_enable_local_vmclear(int cpu)
+{
+ cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
+}
+
+static inline void crash_disable_local_vmclear(int cpu)
+{
+ cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
+}
+
+static inline int crash_local_vmclear_enabled(int cpu)
+{
+ return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
+}
+
+static void crash_vmclear_local_loaded_vmcss(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct loaded_vmcs *v;
+
+ if (!crash_local_vmclear_enabled(cpu))
+ return;
+
+ list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
+ loaded_vmcss_on_cpu_link)
+ vmcs_clear(v->vmcs);
+}
+#else
+static inline void crash_enable_local_vmclear(int cpu) { }
+static inline void crash_disable_local_vmclear(int cpu) { }
+#endif /* CONFIG_KEXEC */
+
static void __loaded_vmcs_clear(void *arg)
{
struct loaded_vmcs *loaded_vmcs = arg;
@@ -1001,15 +1037,28 @@ static void __loaded_vmcs_clear(void *arg)
return; /* vcpu migration can race with cpu offline */
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
+ crash_disable_local_vmclear(cpu);
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
+
+ /*
+ * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
+ * is before setting loaded_vmcs->vcpu to -1 which is done in
+ * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
+ * then adds the vmcs into percpu list before it is deleted.
+ */
+ smp_wmb();
+
loaded_vmcs_init(loaded_vmcs);
+ crash_enable_local_vmclear(cpu);
}
static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
{
- if (loaded_vmcs->cpu != -1)
- smp_call_function_single(
- loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1);
+ int cpu = loaded_vmcs->cpu;
+
+ if (cpu != -1)
+ smp_call_function_single(cpu,
+ __loaded_vmcs_clear, loaded_vmcs, 1);
}
static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
@@ -1051,17 +1100,6 @@ static inline void ept_sync_context(u64 eptp)
}
}
-static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
-{
- if (enable_ept) {
- if (cpu_has_vmx_invept_individual_addr())
- __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
- eptp, gpa);
- else
- ept_sync_context(eptp);
- }
-}
-
static __always_inline unsigned long vmcs_readl(unsigned long field)
{
unsigned long value;
@@ -1535,8 +1573,18 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
local_irq_disable();
+ crash_disable_local_vmclear(cpu);
+
+ /*
+ * Read loaded_vmcs->cpu should be before fetching
+ * loaded_vmcs->loaded_vmcss_on_cpu_link.
+ * See the comments in __loaded_vmcs_clear().
+ */
+ smp_rmb();
+
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
+ crash_enable_local_vmclear(cpu);
local_irq_enable();
/*
@@ -1839,11 +1887,10 @@ static u64 guest_read_tsc(void)
* Like guest_read_tsc, but always returns L1's notion of the timestamp
* counter, even if a nested guest (L2) is currently running.
*/
-u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu)
+u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
- u64 host_tsc, tsc_offset;
+ u64 tsc_offset;
- rdtscll(host_tsc);
tsc_offset = is_guest_mode(vcpu) ?
to_vmx(vcpu)->nested.vmcs01_tsc_offset :
vmcs_read64(TSC_OFFSET);
@@ -1866,6 +1913,11 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
WARN(1, "user requested TSC rate below hardware speed\n");
}
+static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
+{
+ return vmcs_read64(TSC_OFFSET);
+}
+
/*
* writes 'offset' into guest's timestamp counter offset register
*/
@@ -2202,15 +2254,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr;
int ret = 0;
+ u32 msr_index = msr_info->index;
+ u64 data = msr_info->data;
switch (msr_index) {
case MSR_EFER:
- ret = kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_info);
break;
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
@@ -2236,7 +2290,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_TSC:
- kvm_write_tsc(vcpu, data);
+ kvm_write_tsc(vcpu, msr_info);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -2244,7 +2298,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vcpu->arch.pat = data;
break;
}
- ret = kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ break;
+ case MSR_IA32_TSC_ADJUST:
+ ret = kvm_set_msr_common(vcpu, msr_info);
break;
case MSR_TSC_AUX:
if (!vmx->rdtscp_enabled)
@@ -2267,7 +2324,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
}
break;
}
- ret = kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_info);
}
return ret;
@@ -2341,6 +2398,18 @@ static int hardware_enable(void *garbage)
return -EBUSY;
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
+
+ /*
+ * Now we can enable the vmclear operation in kdump
+ * since the loaded_vmcss_on_cpu list on this cpu
+ * has been initialized.
+ *
+ * Though the cpu is not in VMX operation now, there
+ * is no problem to enable the vmclear operation
+ * for the loaded_vmcss_on_cpu list is empty!
+ */
+ crash_enable_local_vmclear(cpu);
+
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
test_bits = FEATURE_CONTROL_LOCKED;
@@ -2697,6 +2766,7 @@ static void fix_pmode_dataseg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment
if (!(vmcs_readl(sf->base) == tmp.base && tmp.s)) {
tmp.base = vmcs_readl(sf->base);
tmp.selector = vmcs_read16(sf->selector);
+ tmp.dpl = tmp.selector & SELECTOR_RPL_MASK;
tmp.s = 1;
}
vmx_set_segment(vcpu, &tmp, seg);
@@ -3246,7 +3316,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
* unrestricted guest like Westmere to older host that don't have
* unrestricted guest like Nehelem.
*/
- if (!enable_unrestricted_guest && vmx->rmode.vm86_active) {
+ if (vmx->rmode.vm86_active) {
switch (seg) {
case VCPU_SREG_CS:
vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
@@ -3897,8 +3967,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
- kvm_write_tsc(&vmx->vcpu, 0);
-
return 0;
}
@@ -3908,8 +3976,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
u64 msr;
int ret;
- vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
-
vmx->rmode.vm86_active = 0;
vmx->soft_vnmi_blocked = 0;
@@ -3921,10 +3987,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
msr |= MSR_IA32_APICBASE_BSP;
kvm_set_apic_base(&vmx->vcpu, msr);
- ret = fx_init(&vmx->vcpu);
- if (ret != 0)
- goto out;
-
vmx_segment_cache_clear(vmx);
seg_setup(VCPU_SREG_CS);
@@ -3965,7 +4027,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_rip_write(vcpu, 0xfff0);
else
kvm_rip_write(vcpu, 0);
- kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
vmcs_writel(GUEST_GDTR_BASE, 0);
vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
@@ -4015,7 +4076,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
/* HACK: Don't enable emulation on guest boot/reset */
vmx->emulation_required = 0;
-out:
return ret;
}
@@ -4287,16 +4347,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (is_machine_check(intr_info))
return handle_machine_check(vcpu);
- if ((vect_info & VECTORING_INFO_VALID_MASK) &&
- !is_page_fault(intr_info)) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
- vcpu->run->internal.ndata = 2;
- vcpu->run->internal.data[0] = vect_info;
- vcpu->run->internal.data[1] = intr_info;
- return 0;
- }
-
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
return 1; /* already handled by vmx_vcpu_run() */
@@ -4315,6 +4365,22 @@ static int handle_exception(struct kvm_vcpu *vcpu)
error_code = 0;
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+
+ /*
+ * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
+ * MMIO, it is better to report an internal error.
+ * See the comments in vmx_handle_exit.
+ */
+ if ((vect_info & VECTORING_INFO_VALID_MASK) &&
+ !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
+ vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.data[0] = vect_info;
+ vcpu->run->internal.data[1] = intr_info;
+ return 0;
+ }
+
if (is_page_fault(intr_info)) {
/* EPT won't cause page fault directly */
BUG_ON(enable_ept);
@@ -4626,11 +4692,15 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
+ struct msr_data msr;
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
- if (vmx_set_msr(vcpu, ecx, data) != 0) {
+ msr.data = data;
+ msr.index = ecx;
+ msr.host_initiated = false;
+ if (vmx_set_msr(vcpu, &msr) != 0) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
@@ -4827,11 +4897,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- if (exit_qualification & (1 << 6)) {
- printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
- return -EINVAL;
- }
-
gla_validity = (exit_qualification >> 7) & 0x3;
if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
@@ -5979,13 +6044,24 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
return 0;
}
+ /*
+ * Note:
+ * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
+ * delivery event since it indicates guest is accessing MMIO.
+ * The vm-exit can be triggered again after return to guest that
+ * will cause infinite loop.
+ */
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
- exit_reason != EXIT_REASON_TASK_SWITCH))
- printk(KERN_WARNING "%s: unexpected, valid vectoring info "
- "(0x%x) and exit reason is 0x%x\n",
- __func__, vectoring_info, exit_reason);
+ exit_reason != EXIT_REASON_TASK_SWITCH)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
+ vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.data[0] = vectoring_info;
+ vcpu->run->internal.data[1] = exit_reason;
+ return 0;
+ }
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
!(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
@@ -7309,6 +7385,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.set_tsc_khz = vmx_set_tsc_khz,
+ .read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
.adjust_tsc_offset = vmx_adjust_tsc_offset,
.compute_tsc_offset = vmx_compute_tsc_offset,
@@ -7367,6 +7444,11 @@ static int __init vmx_init(void)
if (r)
goto out3;
+#ifdef CONFIG_KEXEC
+ rcu_assign_pointer(crash_vmclear_loaded_vmcss,
+ crash_vmclear_local_loaded_vmcss);
+#endif
+
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
@@ -7404,6 +7486,11 @@ static void __exit vmx_exit(void)
free_page((unsigned long)vmx_io_bitmap_b);
free_page((unsigned long)vmx_io_bitmap_a);
+#ifdef CONFIG_KEXEC
+ rcu_assign_pointer(crash_vmclear_loaded_vmcss, NULL);
+ synchronize_rcu();
+#endif
+
kvm_exit();
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4f7641756be..76f54461f7c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -46,6 +46,8 @@
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/pci.h>
+#include <linux/timekeeper_internal.h>
+#include <linux/pvclock_gtod.h>
#include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
@@ -158,7 +160,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
u64 __read_mostly host_xcr0;
-int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
+static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
+
+static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
@@ -633,7 +637,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
}
if (is_long_mode(vcpu)) {
- if (kvm_read_cr4(vcpu) & X86_CR4_PCIDE) {
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
return 1;
} else
@@ -827,6 +831,7 @@ static u32 msrs_to_save[] = {
static unsigned num_msrs_to_save;
static const u32 emulated_msrs[] = {
+ MSR_IA32_TSC_ADJUST,
MSR_IA32_TSCDEADLINE,
MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS,
@@ -886,9 +891,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
- return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+ return kvm_x86_ops->set_msr(vcpu, msr);
}
/*
@@ -896,9 +901,63 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
*/
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
- return kvm_set_msr(vcpu, index, *data);
+ struct msr_data msr;
+
+ msr.data = *data;
+ msr.index = index;
+ msr.host_initiated = true;
+ return kvm_set_msr(vcpu, &msr);
}
+#ifdef CONFIG_X86_64
+struct pvclock_gtod_data {
+ seqcount_t seq;
+
+ struct { /* extract of a clocksource struct */
+ int vclock_mode;
+ cycle_t cycle_last;
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+ } clock;
+
+ /* open coded 'struct timespec' */
+ u64 monotonic_time_snsec;
+ time_t monotonic_time_sec;
+};
+
+static struct pvclock_gtod_data pvclock_gtod_data;
+
+static void update_pvclock_gtod(struct timekeeper *tk)
+{
+ struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
+
+ write_seqcount_begin(&vdata->seq);
+
+ /* copy pvclock gtod data */
+ vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
+ vdata->clock.cycle_last = tk->clock->cycle_last;
+ vdata->clock.mask = tk->clock->mask;
+ vdata->clock.mult = tk->mult;
+ vdata->clock.shift = tk->shift;
+
+ vdata->monotonic_time_sec = tk->xtime_sec
+ + tk->wall_to_monotonic.tv_sec;
+ vdata->monotonic_time_snsec = tk->xtime_nsec
+ + (tk->wall_to_monotonic.tv_nsec
+ << tk->shift);
+ while (vdata->monotonic_time_snsec >=
+ (((u64)NSEC_PER_SEC) << tk->shift)) {
+ vdata->monotonic_time_snsec -=
+ ((u64)NSEC_PER_SEC) << tk->shift;
+ vdata->monotonic_time_sec++;
+ }
+
+ write_seqcount_end(&vdata->seq);
+}
+#endif
+
+
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
int version;
@@ -995,6 +1054,10 @@ static inline u64 get_kernel_ns(void)
return timespec_to_ns(&ts);
}
+#ifdef CONFIG_X86_64
+static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
+#endif
+
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
unsigned long max_tsc_khz;
@@ -1046,12 +1109,47 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
return tsc;
}
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
+void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_X86_64
+ bool vcpus_matched;
+ bool do_request = false;
+ struct kvm_arch *ka = &vcpu->kvm->arch;
+ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+
+ vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
+ atomic_read(&vcpu->kvm->online_vcpus));
+
+ if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
+ if (!ka->use_master_clock)
+ do_request = 1;
+
+ if (!vcpus_matched && ka->use_master_clock)
+ do_request = 1;
+
+ if (do_request)
+ kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+
+ trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
+ atomic_read(&vcpu->kvm->online_vcpus),
+ ka->use_master_clock, gtod->clock.vclock_mode);
+#endif
+}
+
+static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
+{
+ u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
+ vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
+}
+
+void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
unsigned long flags;
s64 usdiff;
+ bool matched;
+ u64 data = msr->data;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
@@ -1094,6 +1192,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
+ matched = true;
} else {
/*
* We split periods of matched TSC writes into generations.
@@ -1108,6 +1207,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
kvm->arch.cur_tsc_nsec = ns;
kvm->arch.cur_tsc_write = data;
kvm->arch.cur_tsc_offset = offset;
+ matched = false;
pr_debug("kvm: new tsc generation %u, clock %llu\n",
kvm->arch.cur_tsc_generation, data);
}
@@ -1129,26 +1229,195 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+ if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
+ update_ia32_tsc_adjust_msr(vcpu, offset);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+
+ spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+ if (matched)
+ kvm->arch.nr_vcpus_matched_tsc++;
+ else
+ kvm->arch.nr_vcpus_matched_tsc = 0;
+
+ kvm_track_tsc_matching(vcpu);
+ spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
}
EXPORT_SYMBOL_GPL(kvm_write_tsc);
+#ifdef CONFIG_X86_64
+
+static cycle_t read_tsc(void)
+{
+ cycle_t ret;
+ u64 last;
+
+ /*
+ * Empirically, a fence (of type that depends on the CPU)
+ * before rdtsc is enough to ensure that rdtsc is ordered
+ * with respect to loads. The various CPU manuals are unclear
+ * as to whether rdtsc can be reordered with later loads,
+ * but no one has ever seen it happen.
+ */
+ rdtsc_barrier();
+ ret = (cycle_t)vget_cycles();
+
+ last = pvclock_gtod_data.clock.cycle_last;
+
+ if (likely(ret >= last))
+ return ret;
+
+ /*
+ * GCC likes to generate cmov here, but this branch is extremely
+ * predictable (it's just a funciton of time and the likely is
+ * very likely) and there's a data dependence, so force GCC
+ * to generate a branch instead. I don't barrier() because
+ * we don't actually need a barrier, and if this function
+ * ever gets inlined it will generate worse code.
+ */
+ asm volatile ("");
+ return last;
+}
+
+static inline u64 vgettsc(cycle_t *cycle_now)
+{
+ long v;
+ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+
+ *cycle_now = read_tsc();
+
+ v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
+ return v * gtod->clock.mult;
+}
+
+static int do_monotonic(struct timespec *ts, cycle_t *cycle_now)
+{
+ unsigned long seq;
+ u64 ns;
+ int mode;
+ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+
+ ts->tv_nsec = 0;
+ do {
+ seq = read_seqcount_begin(&gtod->seq);
+ mode = gtod->clock.vclock_mode;
+ ts->tv_sec = gtod->monotonic_time_sec;
+ ns = gtod->monotonic_time_snsec;
+ ns += vgettsc(cycle_now);
+ ns >>= gtod->clock.shift;
+ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
+ timespec_add_ns(ts, ns);
+
+ return mode;
+}
+
+/* returns true if host is using tsc clocksource */
+static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
+{
+ struct timespec ts;
+
+ /* checked again under seqlock below */
+ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
+ return false;
+
+ if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC)
+ return false;
+
+ monotonic_to_bootbased(&ts);
+ *kernel_ns = timespec_to_ns(&ts);
+
+ return true;
+}
+#endif
+
+/*
+ *
+ * Assuming a stable TSC across physical CPUS, and a stable TSC
+ * across virtual CPUs, the following condition is possible.
+ * Each numbered line represents an event visible to both
+ * CPUs at the next numbered event.
+ *
+ * "timespecX" represents host monotonic time. "tscX" represents
+ * RDTSC value.
+ *
+ * VCPU0 on CPU0 | VCPU1 on CPU1
+ *
+ * 1. read timespec0,tsc0
+ * 2. | timespec1 = timespec0 + N
+ * | tsc1 = tsc0 + M
+ * 3. transition to guest | transition to guest
+ * 4. ret0 = timespec0 + (rdtsc - tsc0) |
+ * 5. | ret1 = timespec1 + (rdtsc - tsc1)
+ * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
+ *
+ * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
+ *
+ * - ret0 < ret1
+ * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
+ * ...
+ * - 0 < N - M => M < N
+ *
+ * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
+ * always the case (the difference between two distinct xtime instances
+ * might be smaller then the difference between corresponding TSC reads,
+ * when updating guest vcpus pvclock areas).
+ *
+ * To avoid that problem, do not allow visibility of distinct
+ * system_timestamp/tsc_timestamp values simultaneously: use a master
+ * copy of host monotonic time values. Update that master copy
+ * in lockstep.
+ *
+ * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
+ *
+ */
+
+static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
+{
+#ifdef CONFIG_X86_64
+ struct kvm_arch *ka = &kvm->arch;
+ int vclock_mode;
+ bool host_tsc_clocksource, vcpus_matched;
+
+ vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
+ atomic_read(&kvm->online_vcpus));
+
+ /*
+ * If the host uses TSC clock, then passthrough TSC as stable
+ * to the guest.
+ */
+ host_tsc_clocksource = kvm_get_time_and_clockread(
+ &ka->master_kernel_ns,
+ &ka->master_cycle_now);
+
+ ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
+
+ if (ka->use_master_clock)
+ atomic_set(&kvm_guest_has_master_clock, 1);
+
+ vclock_mode = pvclock_gtod_data.clock.vclock_mode;
+ trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
+ vcpus_matched);
+#endif
+}
+
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
- unsigned long flags;
+ unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
+ struct kvm_arch *ka = &v->kvm->arch;
void *shared_kaddr;
- unsigned long this_tsc_khz;
s64 kernel_ns, max_kernel_ns;
- u64 tsc_timestamp;
+ u64 tsc_timestamp, host_tsc;
+ struct pvclock_vcpu_time_info *guest_hv_clock;
u8 pvclock_flags;
+ bool use_master_clock;
+
+ kernel_ns = 0;
+ host_tsc = 0;
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- tsc_timestamp = kvm_x86_ops->read_l1_tsc(v);
- kernel_ns = get_kernel_ns();
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
@@ -1157,6 +1426,24 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
}
/*
+ * If the host uses TSC clock, then passthrough TSC as stable
+ * to the guest.
+ */
+ spin_lock(&ka->pvclock_gtod_sync_lock);
+ use_master_clock = ka->use_master_clock;
+ if (use_master_clock) {
+ host_tsc = ka->master_cycle_now;
+ kernel_ns = ka->master_kernel_ns;
+ }
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+ if (!use_master_clock) {
+ host_tsc = native_read_tsc();
+ kernel_ns = get_kernel_ns();
+ }
+
+ tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
+
+ /*
* We may have to catch up the TSC to match elapsed wall clock
* time for two reasons, even if kvmclock is used.
* 1) CPU could have been running below the maximum TSC rate
@@ -1217,23 +1504,20 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hw_tsc_khz = this_tsc_khz;
}
- if (max_kernel_ns > kernel_ns)
- kernel_ns = max_kernel_ns;
-
+ /* with a master <monotonic time, tsc value> tuple,
+ * pvclock clock reads always increase at the (scaled) rate
+ * of guest TSC - no need to deal with sampling errors.
+ */
+ if (!use_master_clock) {
+ if (max_kernel_ns > kernel_ns)
+ kernel_ns = max_kernel_ns;
+ }
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
vcpu->last_guest_tsc = tsc_timestamp;
- pvclock_flags = 0;
- if (vcpu->pvclock_set_guest_stopped_request) {
- pvclock_flags |= PVCLOCK_GUEST_STOPPED;
- vcpu->pvclock_set_guest_stopped_request = false;
- }
-
- vcpu->hv_clock.flags = pvclock_flags;
-
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
@@ -1243,6 +1527,22 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
shared_kaddr = kmap_atomic(vcpu->time_page);
+ guest_hv_clock = shared_kaddr + vcpu->time_offset;
+
+ /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
+ pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+
+ if (vcpu->pvclock_set_guest_stopped_request) {
+ pvclock_flags |= PVCLOCK_GUEST_STOPPED;
+ vcpu->pvclock_set_guest_stopped_request = false;
+ }
+
+ /* If the host uses TSC clocksource, then it is stable */
+ if (use_master_clock)
+ pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
+
+ vcpu->hv_clock.flags = pvclock_flags;
+
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock));
@@ -1572,9 +1872,11 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
+ u32 msr = msr_info->index;
+ u64 data = msr_info->data;
switch (msr) {
case MSR_EFER:
@@ -1625,6 +1927,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
+ case MSR_IA32_TSC_ADJUST:
+ if (guest_cpuid_has_tsc_adjust(vcpu)) {
+ if (!msr_info->host_initiated) {
+ u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
+ kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ }
+ vcpu->arch.ia32_tsc_adjust_msr = data;
+ }
+ break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
@@ -1984,6 +2295,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_TSCDEADLINE:
data = kvm_get_lapic_tscdeadline_msr(vcpu);
break;
+ case MSR_IA32_TSC_ADJUST:
+ data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
+ break;
case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr;
break;
@@ -2342,7 +2656,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
- kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+ /*
+ * On a host with synchronized TSC, there is no need to update
+ * kvmclock on vcpu->cpu migration
+ */
+ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
kvm_migrate_timers(vcpu);
vcpu->cpu = cpu;
@@ -2691,15 +3010,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!vcpu->arch.apic)
goto out;
u.lapic = memdup_user(argp, sizeof(*u.lapic));
- if (IS_ERR(u.lapic)) {
- r = PTR_ERR(u.lapic);
- goto out;
- }
+ if (IS_ERR(u.lapic))
+ return PTR_ERR(u.lapic);
r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_INTERRUPT: {
@@ -2709,16 +3023,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&irq, argp, sizeof irq))
goto out;
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_NMI: {
r = kvm_vcpu_ioctl_nmi(vcpu);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_SET_CPUID: {
@@ -2729,8 +3037,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
- if (r)
- goto out;
break;
}
case KVM_SET_CPUID2: {
@@ -2742,8 +3048,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
goto out;
r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
cpuid_arg->entries);
- if (r)
- goto out;
break;
}
case KVM_GET_CPUID2: {
@@ -2875,10 +3179,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
case KVM_SET_XSAVE: {
u.xsave = memdup_user(argp, sizeof(*u.xsave));
- if (IS_ERR(u.xsave)) {
- r = PTR_ERR(u.xsave);
- goto out;
- }
+ if (IS_ERR(u.xsave))
+ return PTR_ERR(u.xsave);
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
break;
@@ -2900,10 +3202,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
case KVM_SET_XCRS: {
u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
- if (IS_ERR(u.xcrs)) {
- r = PTR_ERR(u.xcrs);
- goto out;
- }
+ if (IS_ERR(u.xcrs))
+ return PTR_ERR(u.xcrs);
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
break;
@@ -2951,7 +3251,7 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
int ret;
if (addr > (unsigned int)(-3 * PAGE_SIZE))
- return -1;
+ return -EINVAL;
ret = kvm_x86_ops->set_tss_addr(kvm, addr);
return ret;
}
@@ -3212,8 +3512,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_SET_TSS_ADDR:
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
- if (r < 0)
- goto out;
break;
case KVM_SET_IDENTITY_MAP_ADDR: {
u64 ident_addr;
@@ -3222,14 +3520,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
goto out;
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
- if (r < 0)
- goto out;
break;
}
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
- if (r)
- goto out;
break;
case KVM_GET_NR_MMU_PAGES:
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
@@ -3320,8 +3614,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
get_irqchip_out:
kfree(chip);
- if (r)
- goto out;
break;
}
case KVM_SET_IRQCHIP: {
@@ -3343,8 +3635,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
set_irqchip_out:
kfree(chip);
- if (r)
- goto out;
break;
}
case KVM_GET_PIT: {
@@ -3371,9 +3661,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_GET_PIT2: {
@@ -3397,9 +3684,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_REINJECT_CONTROL: {
@@ -3408,9 +3692,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (copy_from_user(&control, argp, sizeof(control)))
goto out;
r = kvm_vm_ioctl_reinject(kvm, &control);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_XEN_HVM_CONFIG: {
@@ -4273,7 +4554,12 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 data)
{
- return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+ struct msr_data msr;
+
+ msr.data = data;
+ msr.index = msr_index;
+ msr.host_initiated = false;
+ return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
}
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
@@ -4495,7 +4781,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
- if (!is_error_pfn(pfn)) {
+ if (!is_error_noslot_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return true;
}
@@ -4881,6 +5167,50 @@ static void kvm_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask(mask);
}
+#ifdef CONFIG_X86_64
+static void pvclock_gtod_update_fn(struct work_struct *work)
+{
+ struct kvm *kvm;
+
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ raw_spin_lock(&kvm_lock);
+ list_for_each_entry(kvm, &vm_list, vm_list)
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
+ atomic_set(&kvm_guest_has_master_clock, 0);
+ raw_spin_unlock(&kvm_lock);
+}
+
+static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
+
+/*
+ * Notification about pvclock gtod data update.
+ */
+static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
+ void *priv)
+{
+ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+ struct timekeeper *tk = priv;
+
+ update_pvclock_gtod(tk);
+
+ /* disable master clock if host does not trust, or does not
+ * use, TSC clocksource
+ */
+ if (gtod->clock.vclock_mode != VCLOCK_TSC &&
+ atomic_read(&kvm_guest_has_master_clock) != 0)
+ queue_work(system_long_wq, &pvclock_gtod_work);
+
+ return 0;
+}
+
+static struct notifier_block pvclock_gtod_notifier = {
+ .notifier_call = pvclock_gtod_notify,
+};
+#endif
+
int kvm_arch_init(void *opaque)
{
int r;
@@ -4922,6 +5252,10 @@ int kvm_arch_init(void *opaque)
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_lapic_init();
+#ifdef CONFIG_X86_64
+ pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
+#endif
+
return 0;
out:
@@ -4936,6 +5270,9 @@ void kvm_arch_exit(void)
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
+#ifdef CONFIG_X86_64
+ pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
+#endif
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
}
@@ -5059,7 +5396,7 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
-int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
+static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
char instruction[3];
@@ -5235,6 +5572,29 @@ static void process_nmi(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
+static void kvm_gen_update_masterclock(struct kvm *kvm)
+{
+#ifdef CONFIG_X86_64
+ int i;
+ struct kvm_vcpu *vcpu;
+ struct kvm_arch *ka = &kvm->arch;
+
+ spin_lock(&ka->pvclock_gtod_sync_lock);
+ kvm_make_mclock_inprogress_request(kvm);
+ /* no guest entries from this point */
+ pvclock_update_vm_gtod_copy(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
+
+ /* guest entries allowed */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
+
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+#endif
+}
+
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
@@ -5247,6 +5607,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu);
+ if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
+ kvm_gen_update_masterclock(vcpu->kvm);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
r = kvm_guest_time_update(vcpu);
if (unlikely(r))
@@ -5362,7 +5724,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (hw_breakpoint_active())
hw_breakpoint_restore();
- vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
+ vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
+ native_read_tsc());
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
@@ -5419,7 +5782,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, vcpu->arch.sipi_vector);
kvm_lapic_reset(vcpu);
- r = kvm_arch_vcpu_reset(vcpu);
+ r = kvm_vcpu_reset(vcpu);
if (r)
return r;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -6047,7 +6410,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
r = vcpu_load(vcpu);
if (r)
return r;
- r = kvm_arch_vcpu_reset(vcpu);
+ r = kvm_vcpu_reset(vcpu);
if (r == 0)
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
@@ -6055,6 +6418,23 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return r;
}
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ int r;
+ struct msr_data msr;
+
+ r = vcpu_load(vcpu);
+ if (r)
+ return r;
+ msr.data = 0x0;
+ msr.index = MSR_IA32_TSC;
+ msr.host_initiated = true;
+ kvm_write_tsc(vcpu, &msr);
+ vcpu_put(vcpu);
+
+ return r;
+}
+
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
int r;
@@ -6069,7 +6449,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_free(vcpu);
}
-int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
@@ -6092,6 +6472,10 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_pmu_reset(vcpu);
+ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
+ vcpu->arch.regs_avail = ~0;
+ vcpu->arch.regs_dirty = ~0;
+
return kvm_x86_ops->vcpu_reset(vcpu);
}
@@ -6168,6 +6552,8 @@ int kvm_arch_hardware_enable(void *garbage)
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu->arch.last_host_tsc = local_tsc;
+ set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
+ &vcpu->requests);
}
/*
@@ -6258,10 +6644,17 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks;
+ r = fx_init(vcpu);
+ if (r)
+ goto fail_free_wbinvd_dirty_mask;
+
+ vcpu->arch.ia32_tsc_adjust_msr = 0x0;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
return 0;
+fail_free_wbinvd_dirty_mask:
+ free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
fail_free_lapic:
@@ -6305,6 +6698,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock);
+ spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
+
+ pvclock_update_vm_gtod_copy(kvm);
return 0;
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 2b5219c12ac..e224f7a671b 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 642d8805bc1..df4176cdbb3 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1412,7 +1412,7 @@ __init void lguest_init(void)
/* We don't have features. We have puppies! Puppies! */
#ifdef CONFIG_X86_MCE
- mce_disabled = 1;
+ mca_cfg.disabled = true;
#endif
#ifdef CONFIG_ACPI
acpi_disabled = 1;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 7a529cbab7a..027088f2f7d 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -803,20 +803,6 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
__bad_area(regs, error_code, address, SEGV_ACCERR);
}
-/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
-static void
-out_of_memory(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
-{
- /*
- * We ran out of memory, call the OOM killer, and return the userspace
- * (which will retry the fault, or kill us if we got oom-killed):
- */
- up_read(&current->mm->mmap_sem);
-
- pagefault_out_of_memory();
-}
-
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
unsigned int fault)
@@ -879,7 +865,14 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
return 1;
}
- out_of_memory(regs, error_code, address);
+ up_read(&current->mm->mmap_sem);
+
+ /*
+ * We ran out of memory, call the OOM killer, and return the
+ * userspace (which will retry the fault, or kill us if we got
+ * oom-killed):
+ */
+ pagefault_out_of_memory();
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3baff255ada..2ead3c8a4c8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -630,7 +630,9 @@ void __init paging_init(void)
* numa support is not compiled in, and later node_set_state
* will not set it back.
*/
- node_clear_state(0, N_NORMAL_MEMORY);
+ node_clear_state(0, N_MEMORY);
+ if (N_MEMORY != N_NORMAL_MEMORY)
+ node_clear_state(0, N_NORMAL_MEMORY);
zone_sizes_init();
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8573b83a63d..e27fbf887f3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -137,7 +137,7 @@ static void pgd_dtor(pgd_t *pgd)
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
- * -- wli
+ * -- nyc
*/
#ifdef CONFIG_X86_PAE
@@ -301,6 +301,13 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
+/*
+ * Used to set accessed or dirty bits in the page table entries
+ * on other architectures. On x86, the accessed and dirty bits
+ * are tracked by hardware. However, do_wp_page calls this function
+ * to also make the pte writeable at the same time the dirty bit is
+ * set. In that case we do actually need to write the PTE.
+ */
int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
@@ -310,7 +317,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
if (changed && dirty) {
*ptep = entry;
pte_update_defer(vma->vm_mm, address, ptep);
- flush_tlb_page(vma, address);
}
return changed;
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 3af5a1e79c9..ee0af58ca5b 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_STA2X11) += sta2x11-fixup.o
obj-$(CONFIG_X86_VISWS) += visws.o
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
+obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_X86_INTEL_MID) += mrst.o
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 192397c9860..0c01261fe5a 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -12,6 +12,7 @@ struct pci_root_info {
char name[16];
unsigned int res_num;
struct resource *res;
+ resource_size_t *res_offset;
struct pci_sysdata sd;
#ifdef CONFIG_PCI_MMCONFIG
bool mcfg_added;
@@ -22,6 +23,7 @@ struct pci_root_info {
};
static bool pci_use_crs = true;
+static bool pci_ignore_seg = false;
static int __init set_use_crs(const struct dmi_system_id *id)
{
@@ -35,7 +37,14 @@ static int __init set_nouse_crs(const struct dmi_system_id *id)
return 0;
}
-static const struct dmi_system_id pci_use_crs_table[] __initconst = {
+static int __init set_ignore_seg(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO "PCI: %s detected: ignoring ACPI _SEG\n", id->ident);
+ pci_ignore_seg = true;
+ return 0;
+}
+
+static const struct dmi_system_id pci_crs_quirks[] __initconst = {
/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
{
.callback = set_use_crs,
@@ -98,6 +107,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
},
},
+
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
+ {
+ .callback = set_ignore_seg,
+ .ident = "HP xw9300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
+ },
+ },
{}
};
@@ -108,7 +127,7 @@ void __init pci_acpi_crs_quirks(void)
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
pci_use_crs = false;
- dmi_check_system(pci_use_crs_table);
+ dmi_check_system(pci_crs_quirks);
/*
* If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
@@ -305,6 +324,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
res->flags = flags;
res->start = start;
res->end = end;
+ info->res_offset[info->res_num] = addr.translation_offset;
if (!pci_use_crs) {
dev_printk(KERN_DEBUG, &info->bridge->dev,
@@ -374,7 +394,8 @@ static void add_resources(struct pci_root_info *info,
"ignoring host bridge window %pR (conflicts with %s %pR)\n",
res, conflict->name, conflict);
else
- pci_add_resource(resources, res);
+ pci_add_resource_offset(resources, res,
+ info->res_offset[i]);
}
}
@@ -382,6 +403,8 @@ static void free_pci_root_info_res(struct pci_root_info *info)
{
kfree(info->res);
info->res = NULL;
+ kfree(info->res_offset);
+ info->res_offset = NULL;
info->res_num = 0;
}
@@ -432,10 +455,20 @@ probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
return;
size = sizeof(*info->res) * info->res_num;
- info->res_num = 0;
info->res = kzalloc(size, GFP_KERNEL);
- if (!info->res)
+ if (!info->res) {
+ info->res_num = 0;
+ return;
+ }
+
+ size = sizeof(*info->res_offset) * info->res_num;
+ info->res_num = 0;
+ info->res_offset = kzalloc(size, GFP_KERNEL);
+ if (!info->res_offset) {
+ kfree(info->res);
+ info->res = NULL;
return;
+ }
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
info);
@@ -455,6 +488,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
int pxm;
#endif
+ if (pci_ignore_seg)
+ domain = 0;
+
if (domain && !pci_domains_supported) {
printk(KERN_WARNING "pci_bus %04x:%02x: "
"ignored (multiple domains not supported)\n",
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 720e973fc34..412e1286d1f 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -17,6 +17,7 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/pci_x86.h>
+#include <asm/setup.h>
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
@@ -433,7 +434,8 @@ static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
.callback = set_scan_all,
.ident = "Stratus/NEC ftServer",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ftServer"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Stratus"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
},
},
{}
@@ -608,6 +610,35 @@ unsigned int pcibios_assign_all_busses(void)
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
}
+int pcibios_add_device(struct pci_dev *dev)
+{
+ struct setup_data *data;
+ struct pci_setup_rom *rom;
+ u64 pa_data;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = phys_to_virt(pa_data);
+
+ if (data->type == SETUP_PCI) {
+ rom = (struct pci_setup_rom *)data;
+
+ if ((pci_domain_nr(dev->bus) == rom->segment) &&
+ (dev->bus->number == rom->bus) &&
+ (PCI_SLOT(dev->devfn) == rom->device) &&
+ (PCI_FUNC(dev->devfn) == rom->function) &&
+ (dev->vendor == rom->vendor) &&
+ (dev->device == rom->devid)) {
+ dev->rom = pa_data +
+ offsetof(struct pci_setup_rom, romdata);
+ dev->romlen = rom->pcilen;
+ }
+ }
+ pa_data = data->next;
+ }
+ return 0;
+}
+
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
@@ -626,7 +657,7 @@ void pcibios_disable_device (struct pci_dev *dev)
pcibios_disable_irq(dev);
}
-int pci_ext_cfg_avail(struct pci_dev *dev)
+int pci_ext_cfg_avail(void)
{
if (raw_pci_ext_ops)
return 1;
diff --git a/arch/x86/pci/numachip.c b/arch/x86/pci/numachip.c
new file mode 100644
index 00000000000..7307d9d12d1
--- /dev/null
+++ b/arch/x86/pci/numachip.c
@@ -0,0 +1,129 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-specific PCI code
+ *
+ * Copyright (C) 2012 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ * PCI accessor functions derived from mmconfig_64.c
+ *
+ */
+
+#include <linux/pci.h>
+#include <asm/pci_x86.h>
+
+static u8 limit __read_mostly;
+
+static inline char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
+{
+ struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
+
+ if (cfg && cfg->virt)
+ return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
+ return NULL;
+}
+
+static int pci_mmcfg_read_numachip(unsigned int seg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *value)
+{
+ char __iomem *addr;
+
+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
+err: *value = -1;
+ return -EINVAL;
+ }
+
+ /* Ensure AMD Northbridges don't decode reads to other devices */
+ if (unlikely(bus == 0 && devfn >= limit)) {
+ *value = -1;
+ return 0;
+ }
+
+ rcu_read_lock();
+ addr = pci_dev_base(seg, bus, devfn);
+ if (!addr) {
+ rcu_read_unlock();
+ goto err;
+ }
+
+ switch (len) {
+ case 1:
+ *value = mmio_config_readb(addr + reg);
+ break;
+ case 2:
+ *value = mmio_config_readw(addr + reg);
+ break;
+ case 4:
+ *value = mmio_config_readl(addr + reg);
+ break;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int pci_mmcfg_write_numachip(unsigned int seg, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 value)
+{
+ char __iomem *addr;
+
+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
+ if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
+ return -EINVAL;
+
+ /* Ensure AMD Northbridges don't decode writes to other devices */
+ if (unlikely(bus == 0 && devfn >= limit))
+ return 0;
+
+ rcu_read_lock();
+ addr = pci_dev_base(seg, bus, devfn);
+ if (!addr) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ switch (len) {
+ case 1:
+ mmio_config_writeb(addr + reg, value);
+ break;
+ case 2:
+ mmio_config_writew(addr + reg, value);
+ break;
+ case 4:
+ mmio_config_writel(addr + reg, value);
+ break;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+const struct pci_raw_ops pci_mmcfg_numachip = {
+ .read = pci_mmcfg_read_numachip,
+ .write = pci_mmcfg_write_numachip,
+};
+
+int __init pci_numachip_init(void)
+{
+ int ret = 0;
+ u32 val;
+
+ /* For remote I/O, restrict bus 0 access to the actual number of AMD
+ Northbridges, which starts at device number 0x18 */
+ ret = raw_pci_read(0, 0, PCI_DEVFN(0x18, 0), 0x60, sizeof(val), &val);
+ if (ret)
+ goto out;
+
+ /* HyperTransport fabric size in bits 6:4 */
+ limit = PCI_DEVFN(0x18 + ((val >> 4) & 7) + 1, 0);
+
+ /* Use NumaChip PCI accessors for non-extended and extended access */
+ raw_pci_ops = raw_pci_ext_ops = &pci_mmcfg_numachip;
+out:
+ return ret;
+}
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index f6a0c1b8e51..d9c1b95af17 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -39,6 +39,8 @@ void efi_bgrt_init(void)
if (ACPI_FAILURE(status))
return;
+ if (bgrt_tab->header.length < sizeof(*bgrt_tab))
+ return;
if (bgrt_tab->version != 1)
return;
if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
index 5917eb56b31..e6cb80f620a 100644
--- a/arch/x86/platform/iris/iris.c
+++ b/arch/x86/platform/iris/iris.c
@@ -23,6 +23,7 @@
#include <linux/moduleparam.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/delay.h>
@@ -62,29 +63,75 @@ static void iris_power_off(void)
* by reading its input port and seeing whether the read value is
* meaningful.
*/
-static int iris_init(void)
+static int iris_probe(struct platform_device *pdev)
{
- unsigned char status;
- if (force != 1) {
- printk(KERN_ERR "The force parameter has not been set to 1 so the Iris poweroff handler will not be installed.\n");
- return -ENODEV;
- }
- status = inb(IRIS_GIO_INPUT);
+ unsigned char status = inb(IRIS_GIO_INPUT);
if (status == IRIS_GIO_NODEV) {
- printk(KERN_ERR "This machine does not seem to be an Iris. Power_off handler not installed.\n");
+ printk(KERN_ERR "This machine does not seem to be an Iris. "
+ "Power off handler not installed.\n");
return -ENODEV;
}
old_pm_power_off = pm_power_off;
pm_power_off = &iris_power_off;
printk(KERN_INFO "Iris power_off handler installed.\n");
-
return 0;
}
-static void iris_exit(void)
+static int iris_remove(struct platform_device *pdev)
{
pm_power_off = old_pm_power_off;
printk(KERN_INFO "Iris power_off handler uninstalled.\n");
+ return 0;
+}
+
+static struct platform_driver iris_driver = {
+ .driver = {
+ .name = "iris",
+ .owner = THIS_MODULE,
+ },
+ .probe = iris_probe,
+ .remove = iris_remove,
+};
+
+static struct resource iris_resources[] = {
+ {
+ .start = IRIS_GIO_BASE,
+ .end = IRIS_GIO_OUTPUT,
+ .flags = IORESOURCE_IO,
+ .name = "address"
+ }
+};
+
+static struct platform_device *iris_device;
+
+static int iris_init(void)
+{
+ int ret;
+ if (force != 1) {
+ printk(KERN_ERR "The force parameter has not been set to 1."
+ " The Iris poweroff handler will not be installed.\n");
+ return -ENODEV;
+ }
+ ret = platform_driver_register(&iris_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "Failed to register iris platform driver: %d\n",
+ ret);
+ return ret;
+ }
+ iris_device = platform_device_register_simple("iris", (-1),
+ iris_resources, ARRAY_SIZE(iris_resources));
+ if (IS_ERR(iris_device)) {
+ printk(KERN_ERR "Failed to register iris platform device\n");
+ platform_driver_unregister(&iris_driver);
+ return PTR_ERR(iris_device);
+ }
+ return 0;
+}
+
+static void iris_exit(void)
+{
+ platform_device_unregister(iris_device);
+ platform_driver_unregister(&iris_driver);
}
module_init(iris_init);
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index ee3c220ee50..28e3fa9056e 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -192,7 +192,7 @@
183 i386 getcwd sys_getcwd
184 i386 capget sys_capget
185 i386 capset sys_capset
-186 i386 sigaltstack ptregs_sigaltstack stub32_sigaltstack
+186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack
187 i386 sendfile sys_sendfile sys32_sendfile
188 i386 getpmsg
189 i386 putpmsg
@@ -356,3 +356,4 @@
347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
349 i386 kcmp sys_kcmp
+350 i386 finit_module sys_finit_module
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index a582bfed95b..dc97328bd90 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -137,7 +137,7 @@
128 64 rt_sigtimedwait sys_rt_sigtimedwait
129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
130 common rt_sigsuspend sys_rt_sigsuspend
-131 64 sigaltstack stub_sigaltstack
+131 64 sigaltstack sys_sigaltstack
132 common utime sys_utime
133 common mknod sys_mknod
134 64 uselib
@@ -319,6 +319,7 @@
310 64 process_vm_readv sys_process_vm_readv
311 64 process_vm_writev sys_process_vm_writev
312 common kcmp sys_kcmp
+313 common finit_module sys_finit_module
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -337,7 +338,7 @@
522 x32 rt_sigpending sys32_rt_sigpending
523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo
-525 x32 sigaltstack stub_x32_sigaltstack
+525 x32 sigaltstack compat_sys_sigaltstack
526 x32 timer_create compat_sys_timer_create
527 x32 mq_notify compat_sys_mq_notify
528 x32 kexec_load compat_sys_kexec_load
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 98399704196..53c90fd412d 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -13,8 +13,7 @@ endmenu
config UML_X86
def_bool y
select GENERIC_FIND_FIRST_BIT
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
+ select GENERIC_SIGALTSTACK
config 64BIT
bool "64-bit kernel" if SUBARCH = "x86"
diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h
index 755133258c4..54f8102ccde 100644
--- a/arch/x86/um/asm/ptrace.h
+++ b/arch/x86/um/asm/ptrace.h
@@ -86,4 +86,5 @@ extern long arch_prctl(struct task_struct *task, int code,
unsigned long __user *addr);
#endif
+#define user_stack_pointer(regs) PT_REGS_SP(regs)
#endif /* __UM_X86_PTRACE_H */
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index bdaa08cfbcf..71cef48ea5c 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -342,9 +342,7 @@ static int copy_ucontext_to_user(struct ucontext __user *uc,
{
int err = 0;
- err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp);
- err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags);
- err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size);
+ err |= __save_altstack(&uc->uc_stack, sp);
err |= copy_sc_to_user(&uc->uc_mcontext, fp, &current->thread.regs, 0);
err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set));
return err;
@@ -529,10 +527,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(PT_REGS_SP(regs)),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
set->sig[0]);
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 812e98c098e..a0c3b0d1a12 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -27,7 +27,6 @@
#define ptregs_iopl sys_iopl
#define ptregs_vm86old sys_vm86old
#define ptregs_vm86 sys_vm86
-#define ptregs_sigaltstack sys_sigaltstack
#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ;
#include <asm/syscalls_32.h>
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 170bd926a69..f2f0723070c 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -31,7 +31,6 @@
#define stub_fork sys_fork
#define stub_vfork sys_vfork
#define stub_execve sys_execve
-#define stub_sigaltstack sys_sigaltstack
#define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 4df6c373421..205ad328aa5 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -22,6 +22,7 @@
#include <asm/hpet.h>
#include <asm/unistd.h>
#include <asm/io.h>
+#include <asm/pvclock.h>
#define gtod (&VVAR(vsyscall_gtod_data))
@@ -62,6 +63,76 @@ static notrace cycle_t vread_hpet(void)
return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
}
+#ifdef CONFIG_PARAVIRT_CLOCK
+
+static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
+{
+ const struct pvclock_vsyscall_time_info *pvti_base;
+ int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
+ int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
+
+ BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
+
+ pvti_base = (struct pvclock_vsyscall_time_info *)
+ __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
+
+ return &pvti_base[offset];
+}
+
+static notrace cycle_t vread_pvclock(int *mode)
+{
+ const struct pvclock_vsyscall_time_info *pvti;
+ cycle_t ret;
+ u64 last;
+ u32 version;
+ u32 migrate_count;
+ u8 flags;
+ unsigned cpu, cpu1;
+
+
+ /*
+ * When looping to get a consistent (time-info, tsc) pair, we
+ * also need to deal with the possibility we can switch vcpus,
+ * so make sure we always re-fetch time-info for the current vcpu.
+ */
+ do {
+ cpu = __getcpu() & VGETCPU_CPU_MASK;
+ /* TODO: We can put vcpu id into higher bits of pvti.version.
+ * This will save a couple of cycles by getting rid of
+ * __getcpu() calls (Gleb).
+ */
+
+ pvti = get_pvti(cpu);
+
+ migrate_count = pvti->migrate_count;
+
+ version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
+
+ /*
+ * Test we're still on the cpu as well as the version.
+ * We could have been migrated just after the first
+ * vgetcpu but before fetching the version, so we
+ * wouldn't notice a version change.
+ */
+ cpu1 = __getcpu() & VGETCPU_CPU_MASK;
+ } while (unlikely(cpu != cpu1 ||
+ (pvti->pvti.version & 1) ||
+ pvti->pvti.version != version ||
+ pvti->migrate_count != migrate_count));
+
+ if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
+ *mode = VCLOCK_NONE;
+
+ /* refer to tsc.c read_tsc() comment for rationale */
+ last = VVAR(vsyscall_gtod_data).clock.cycle_last;
+
+ if (likely(ret >= last))
+ return ret;
+
+ return last;
+}
+#endif
+
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
@@ -80,7 +151,7 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
}
-notrace static inline u64 vgetsns(void)
+notrace static inline u64 vgetsns(int *mode)
{
long v;
cycles_t cycles;
@@ -88,6 +159,10 @@ notrace static inline u64 vgetsns(void)
cycles = vread_tsc();
else if (gtod->clock.vclock_mode == VCLOCK_HPET)
cycles = vread_hpet();
+#ifdef CONFIG_PARAVIRT_CLOCK
+ else if (gtod->clock.vclock_mode == VCLOCK_PVCLOCK)
+ cycles = vread_pvclock(mode);
+#endif
else
return 0;
v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
@@ -107,7 +182,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->wall_time_snsec;
- ns += vgetsns();
+ ns += vgetsns(&mode);
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -127,7 +202,7 @@ notrace static int do_monotonic(struct timespec *ts)
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec;
ns = gtod->monotonic_time_snsec;
- ns += vgetsns();
+ ns += vgetsns(&mode);
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
timespec_add_ns(ts, ns);
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
index 5463ad55857..2f94b039e55 100644
--- a/arch/x86/vdso/vgetcpu.c
+++ b/arch/x86/vdso/vgetcpu.c
@@ -17,15 +17,10 @@ __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
unsigned int p;
- if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
- /* Load per CPU data from RDTSCP */
- native_read_tscp(&p);
- } else {
- /* Load per CPU data from GDT */
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
- }
+ p = __getcpu();
+
if (cpu)
- *cpu = p & 0xfff;
+ *cpu = p & VGETCPU_CPU_MASK;
if (node)
*node = p >> 12;
return 0;
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 9a6775c9ddc..131dacd2748 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -6,6 +6,7 @@ config XEN
bool "Xen guest support"
select PARAVIRT
select PARAVIRT_CLOCK
+ select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
depends on X86_TSC
help
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 586d83812b6..138e5667409 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -193,10 +193,11 @@ void xen_vcpu_restore(void)
{
int cpu;
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
bool other_cpu = (cpu != smp_processor_id());
+ bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
- if (other_cpu &&
+ if (other_cpu && is_up &&
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
BUG();
@@ -205,7 +206,7 @@ void xen_vcpu_restore(void)
if (have_vcpu_info_placement)
xen_vcpu_setup(cpu);
- if (other_cpu &&
+ if (other_cpu && is_up &&
HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
BUG();
}
@@ -223,6 +224,21 @@ static void __init xen_banner(void)
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
}
+/* Check if running on Xen version (major, minor) or later */
+bool
+xen_running_on_version_or_later(unsigned int major, unsigned int minor)
+{
+ unsigned int version;
+
+ if (!xen_domain())
+ return false;
+
+ version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
+ ((version >> 16) > major))
+ return true;
+ return false;
+}
#define CPUID_THERM_POWER_LEAF 6
#define APERFMPERF_PRESENT 0
@@ -287,8 +303,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
static bool __init xen_check_mwait(void)
{
-#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \
- !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
+#ifdef CONFIG_ACPI
struct xen_platform_op op = {
.cmd = XENPF_set_processor_pminfo,
.u.set_pminfo.id = -1,
@@ -309,6 +324,13 @@ static bool __init xen_check_mwait(void)
if (!xen_initial_domain())
return false;
+ /*
+ * When running under platform earlier than Xen4.2, do not expose
+ * mwait, to avoid the risk of loading native acpi pad driver
+ */
+ if (!xen_running_on_version_or_later(4, 2))
+ return false;
+
ax = 1;
cx = 0;
@@ -1495,51 +1517,72 @@ asmlinkage void __init xen_start_kernel(void)
#endif
}
-void __ref xen_hvm_init_shared_info(void)
+#ifdef CONFIG_XEN_PVHVM
+#define HVM_SHARED_INFO_ADDR 0xFE700000UL
+static struct shared_info *xen_hvm_shared_info;
+static unsigned long xen_hvm_sip_phys;
+static int xen_major, xen_minor;
+
+static void xen_hvm_connect_shared_info(unsigned long pfn)
{
- int cpu;
struct xen_add_to_physmap xatp;
- static struct shared_info *shared_info_page = 0;
- if (!shared_info_page)
- shared_info_page = (struct shared_info *)
- extend_brk(PAGE_SIZE, PAGE_SIZE);
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
+ xatp.gpfn = pfn;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
- HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
+}
+static void __init xen_hvm_set_shared_info(struct shared_info *sip)
+{
+ int cpu;
+
+ HYPERVISOR_shared_info = sip;
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page, we use it in the event channel upcall and in some pvclock
* related functions. We don't need the vcpu_info placement
* optimizations because we don't use any pv_mmu or pv_irq op on
- * HVM.
- * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
- * online but xen_hvm_init_shared_info is run at resume time too and
- * in that case multiple vcpus might be online. */
- for_each_online_cpu(cpu) {
+ * HVM. */
+ for_each_online_cpu(cpu)
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+}
+
+/* Reconnect the shared_info pfn to a (new) mfn */
+void xen_hvm_resume_shared_info(void)
+{
+ xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
+}
+
+/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage.
+ * On these old tools the shared info page will be placed in E820_Ram.
+ * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects
+ * that nothing is mapped up to HVM_SHARED_INFO_ADDR.
+ * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used
+ * here for the shared info page. */
+static void __init xen_hvm_init_shared_info(void)
+{
+ if (xen_major < 4) {
+ xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ xen_hvm_sip_phys = __pa(xen_hvm_shared_info);
+ } else {
+ xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR;
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys);
+ xen_hvm_shared_info =
+ (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
}
+ xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
+ xen_hvm_set_shared_info(xen_hvm_shared_info);
}
-#ifdef CONFIG_XEN_PVHVM
static void __init init_hvm_pv_info(void)
{
- int major, minor;
- uint32_t eax, ebx, ecx, edx, pages, msr, base;
+ uint32_t ecx, edx, pages, msr, base;
u64 pfn;
base = xen_cpuid_base();
- cpuid(base + 1, &eax, &ebx, &ecx, &edx);
-
- major = eax >> 16;
- minor = eax & 0xffff;
- printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
-
cpuid(base + 2, &pages, &msr, &ecx, &edx);
pfn = __pa(hypercall_page);
@@ -1590,12 +1633,22 @@ static void __init xen_hvm_guest_init(void)
static bool __init xen_hvm_platform(void)
{
+ uint32_t eax, ebx, ecx, edx, base;
+
if (xen_pv_domain())
return false;
- if (!xen_cpuid_base())
+ base = xen_cpuid_base();
+ if (!base)
return false;
+ cpuid(base + 1, &eax, &ebx, &ecx, &edx);
+
+ xen_major = eax >> 16;
+ xen_minor = eax & 0xffff;
+
+ printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor);
+
return true;
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dcf5f2dd91e..01de35c7722 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2497,8 +2497,10 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- unsigned long mfn, int nr,
- pgprot_t prot, unsigned domid)
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
+
{
struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
@@ -2542,3 +2544,14 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+
+/* Returns: 0 success */
+int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+ int numpgs, struct page **pages)
+{
+ if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 353c50f1870..4f7d2599b48 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -254,7 +254,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
}
xen_init_lock_cpu(0);
- smp_store_cpu_info(0);
+ smp_store_boot_cpu_info();
cpu_data(0).x86_max_cores = 1;
for_each_possible_cpu(i) {
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 45329c8c226..ae8a00c39de 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
{
#ifdef CONFIG_XEN_PVHVM
int cpu;
- xen_hvm_init_shared_info();
+ xen_hvm_resume_shared_info();
xen_callback_vector();
xen_unplug_emulated_devices();
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a95b41744ad..d2e73d19d36 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -40,7 +40,7 @@ void xen_enable_syscall(void);
void xen_vcpu_restore(void);
void xen_callback_vector(void);
-void xen_hvm_init_shared_info(void);
+void xen_hvm_resume_shared_info(void);
void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 2481f267be2..5aab1acabf1 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -13,10 +13,9 @@ config XTENSA
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
select GENERIC_PCI_IOMAP
- select GENERIC_KERNEL_THREAD
- select GENERIC_KERNEL_EXECVE
select ARCH_WANT_OPTIONAL_GPIOLIB
select CLONE_BACKWARDS
+ select IRQ_DOMAIN
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
@@ -150,6 +149,15 @@ config XTENSA_PLATFORM_S6105
select SERIAL_CONSOLE
select NO_IOPORT
+config XTENSA_PLATFORM_XTFPGA
+ bool "XTFPGA"
+ select SERIAL_CONSOLE
+ select ETHOC
+ select XTENSA_CALIBRATE_CCOUNT
+ help
+ XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
+ This hardware is capable of running a full Linux distribution.
+
endchoice
@@ -177,6 +185,17 @@ config CMDLINE
time by entering them here. As a minimum, you should specify the
memory size and the root device (e.g., mem=64M root=/dev/nfs).
+config USE_OF
+ bool "Flattened Device Tree support"
+ select OF
+ select OF_EARLY_FLATTREE
+ help
+ Include support for flattened device tree machine descriptions.
+
+config BUILTIN_DTB
+ string "DTB to build into the kernel image"
+ depends on OF
+
source "mm/Kconfig"
source "drivers/pcmcia/Kconfig"
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index 11c585295dd..a34010e0e51 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -2,6 +2,26 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-endmenu
+config LD_NO_RELAX
+ bool "Disable linker relaxation"
+ default n
+ help
+ Enable this function to disable link-time optimizations.
+ The default linker behavior is to combine identical literal
+ values to reduce code size and remove unnecessary overhead from
+ assembler-generated 'longcall' sequences.
+ Enabling this option improves the link time but increases the
+ code size, and possibly execution time.
+
+config S32C1I_SELFTEST
+ bool "Perform S32C1I instruction self-test at boot"
+ default y
+ help
+ Enable this option to test S32C1I instruction behavior at boot.
+ Correct operation of this instruction requires some cooperation from hardware
+ external to the processor (such as bus bridge, bus fabric, or memory controller).
+ It is easy to make wrong hardware configuration, this test should catch it early.
+ Say 'N' on stable hardware.
+endmenu
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index bb5ba61723f..0aa72702f17 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -38,6 +38,7 @@ endif
platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
platform-$(CONFIG_XTENSA_PLATFORM_S6105) := s6105
+platform-$(CONFIG_XTENSA_PLATFORM_XTFPGA) := xtfpga
PLATFORM = $(platform-y)
export PLATFORM
@@ -49,6 +50,17 @@ KBUILD_CFLAGS += -pipe -mlongcalls
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
+ifneq ($(CONFIG_LD_NO_RELAX),)
+LDFLAGS := --no-relax
+endif
+
+ifeq ($(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
+CHECKFLAGS += -D__XTENSA_EB__
+endif
+ifeq ($(shell echo -e __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
+CHECKFLAGS += -D__XTENSA_EL__
+endif
+
vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
@@ -75,6 +87,10 @@ core-y += $(buildvar) $(buildplf)
libs-y += arch/xtensa/lib/ $(LIBGCC)
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+core-$(CONFIG_OF) += arch/xtensa/boot/
+endif
+
boot := arch/xtensa/boot
all: zImage
@@ -84,7 +100,9 @@ bzImage : zImage
zImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
+%.dtb:
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
define archhelp
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
endef
-
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 4018f899419..818647e815d 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -22,12 +22,35 @@ subdir-y := lib
# Subdirs for the boot loader(s)
bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
-bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf
+bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot
+bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+endif
+
+# Rule to build device tree blobs
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+ $(call if_changed_dep,dtc)
+
+clean-files := *.dtb.S
+
zImage Image: $(bootdir-y)
$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
$(addprefix $(obj)/,$(host-progs))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
+OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
+
+vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+vmlinux.bin.gz: vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+boot-elf: vmlinux.bin
+boot-redboot: vmlinux.bin.gz
+boot-uboot: vmlinux.bin.gz
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
index f10992b8902..1fe01b78c12 100644
--- a/arch/xtensa/boot/boot-elf/Makefile
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -4,9 +4,6 @@
# for more details.
#
-GZIP = gzip
-GZIP_FLAGS = -v9fc
-
ifeq ($(BIG_ENDIAN),1)
OBJCOPY_ARGS := -O elf32-xtensa-be
else
@@ -20,18 +17,17 @@ boot-y := bootstrap.o
OBJS := $(addprefix $(obj)/,$(boot-y))
-vmlinux.tmp: vmlinux
- $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
- $^ $@
-
-Image: vmlinux.tmp $(OBJS) arch/$(ARCH)/boot/boot-elf/boot.lds
- $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
- --add-section image=vmlinux.tmp \
+$(obj)/Image.o: vmlinux.bin $(OBJS)
+ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.bin \
--set-section-flags image=contents,alloc,load,load,data \
- $(OBJS) $@.tmp
- $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
- -T arch/$(ARCH)/boot/boot-elf/boot.lds \
- -o arch/$(ARCH)/boot/$@.elf $@.tmp
+ $(OBJS) $@
-zImage: Image
+$(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds
+ $(Q)$(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
+ -T $(obj)/boot.lds \
+ --build-id=none \
+ -o $@ $(obj)/Image.o
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+zImage: $(obj)/../Image.elf
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
index 25a78c6b153..8be8b943698 100644
--- a/arch/xtensa/boot/boot-redboot/Makefile
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -4,8 +4,6 @@
# for more details.
#
-GZIP = gzip
-GZIP_FLAGS = -v9fc
ifeq ($(BIG_ENDIAN),1)
OBJCOPY_ARGS := -O elf32-xtensa-be
else
@@ -21,17 +19,17 @@ LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-vmlinux.tmp: vmlinux
- $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
- $^ $@
+$(obj)/zImage.o: vmlinux.bin.gz $(OBJS)
+ $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
+ --add-section image=vmlinux.bin.gz \
+ --set-section-flags image=contents,alloc,load,load,data \
+ $(OBJS) $@
-vmlinux.tmp.gz: vmlinux.tmp
- $(GZIP) $(GZIP_FLAGS) $^ > $@
+$(obj)/zImage.elf: $(obj)/zImage.o $(LIBS)
+ $(Q)$(LD) $(LD_ARGS) -o $@ $^ -L/xtensa-elf/lib $(LIBGCC)
-zImage: vmlinux.tmp.gz $(OBJS) $(LIBS)
- $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
- --add-section image=vmlinux.tmp.gz \
- --set-section-flags image=contents,alloc,load,load,data \
- $(OBJS) $@.tmp
- $(LD) $(LD_ARGS) -o $@.elf $@.tmp $(LIBS) -L/xtensa-elf/lib $(LIBGCC)
- $(OBJCOPY) -S -O binary $@.elf arch/$(ARCH)/boot/$@.redboot
+$(obj)/../zImage.redboot: $(obj)/zImage.elf
+ $(Q)$(OBJCOPY) -S -O binary $< $@
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+
+zImage: $(obj)/../zImage.redboot
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
new file mode 100644
index 00000000000..bfbf8af582f
--- /dev/null
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -0,0 +1,14 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+UIMAGE_LOADADDR = 0xd0001000
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/../uImage: vmlinux.bin.gz FORCE
+ $(call if_changed,uimage)
+ $(Q)$(kecho) ' Kernel: $@ is ready'
+
+zImage: $(obj)/../uImage
diff --git a/arch/xtensa/boot/dts/lx60.dts b/arch/xtensa/boot/dts/lx60.dts
new file mode 100644
index 00000000000..2eab3658e1b
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx60.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-4m.dtsi"
+
+/ {
+ compatible = "xtensa,lx60";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x04000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/ml605.dts b/arch/xtensa/boot/dts/ml605.dts
new file mode 100644
index 00000000000..6ed51d6554e
--- /dev/null
+++ b/arch/xtensa/boot/dts/ml605.dts
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-16m.dtsi"
+
+/ {
+ compatible = "xtensa,ml605";
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
new file mode 100644
index 00000000000..e5703c7beeb
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
@@ -0,0 +1,26 @@
+/ {
+ flash: flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0xf8000000 0x01000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x00400000>;
+ };
+ partition@0x400000 {
+ label = "kernel image";
+ reg = <0x00400000 0x00600000>;
+ };
+ partition@0xa00000 {
+ label = "data";
+ reg = <0x00a00000 0x005e0000>;
+ };
+ partition@0xfe0000 {
+ label = "boot environment";
+ reg = <0x00fe0000 0x00020000>;
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
new file mode 100644
index 00000000000..6f9c10d6b68
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
@@ -0,0 +1,18 @@
+/ {
+ flash: flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0xf8000000 0x00400000>;
+ bank-width = <2>;
+ device-width = <2>;
+ partition@0x0 {
+ label = "boot loader area";
+ reg = <0x00000000 0x003f0000>;
+ };
+ partition@0x3f0000 {
+ label = "boot environment";
+ reg = <0x003f0000 0x00010000>;
+ };
+ };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
new file mode 100644
index 00000000000..7eda6ecf7ee
--- /dev/null
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -0,0 +1,56 @@
+/ {
+ compatible = "xtensa,xtfpga";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x06000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "xtensa,cpu";
+ reg = <0>;
+ /* Filled in by platform_setup from FPGA register
+ * clock-frequency = <100000000>;
+ */
+ };
+ };
+
+ pic: pic {
+ compatible = "xtensa,pic";
+ /* one cell: internal irq number,
+ * two cells: second cell == 0: internal irq number
+ * second cell == 1: external irq number
+ */
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ serial0: serial@fd050020 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ no-loopback-test;
+ reg = <0xfd050020 0x20>;
+ reg-shift = <2>;
+ interrupts = <0 1>; /* external irq 0 */
+ /* Filled in by platform_setup from FPGA register
+ * clock-frequency = <100000000>;
+ */
+ };
+
+ enet0: ethoc@fd030000 {
+ compatible = "opencores,ethoc";
+ reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
+ interrupts = <1 1>; /* external irq 1 */
+ local-mac-address = [00 50 c2 13 6f 00];
+ };
+};
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 24f50cada70..c3f289174c1 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -66,19 +66,35 @@
*/
static inline void atomic_add(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "add %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " add %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " add %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/**
@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v)
*/
static inline void atomic_sub(int i, atomic_t *v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "sub %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " sub %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/*
@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "add %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-
- return vval;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " add %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ " add %0, %0, %2\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+
+ return result;
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " add %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+#endif
}
static inline int atomic_sub_return(int i, atomic_t * v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "sub %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-
- return vval;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " sub %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ " sub %0, %0, %2\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (i), "a" (v)
+ : "memory"
+ );
+
+ return result;
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+#endif
}
/**
@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- unsigned int all_f = -1;
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "xor %1, %4, %3 \n\t"
- "and %0, %0, %4 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval), "=a" (mask)
- : "a" (v), "a" (all_f), "1" (mask)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (~mask), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int all_f = -1;
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " xor %1, %4, %3\n"
+ " and %0, %0, %4\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval), "=a" (mask)
+ : "a" (v), "a" (all_f), "1" (mask)
+ : "a15", "memory"
+ );
+#endif
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- unsigned int vval;
-
- __asm__ __volatile__(
- "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %2, 0 \n\t"
- "or %0, %0, %1 \n\t"
- "s32i %0, %2, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n"
- : "=&a" (vval)
- : "a" (mask), "a" (v)
- : "a15", "memory"
- );
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp;
+ int result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (mask), "a" (v)
+ : "memory"
+ );
+#else
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %2, 0\n"
+ " or %0, %0, %1\n"
+ " s32i %0, %2, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (vval)
+ : "a" (mask), "a" (v)
+ : "a15", "memory"
+ );
+#endif
}
/* Atomic operations are already serializing */
@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
-
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index 55707a8009d..ef021677d53 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2012 Tensilica Inc.
*/
#ifndef _XTENSA_SYSTEM_H
@@ -12,8 +12,8 @@
#define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0)
-#define mb() barrier()
-#define rmb() mb()
+#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define rmb() barrier()
#define wmb() mb()
#ifdef CONFIG_SMP
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 5270197ddd3..84afe58d5d3 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -29,7 +29,6 @@
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
-#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#if XCHAL_HAVE_NSA
@@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word)
#endif
#include <asm-generic/bitops/fls64.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline int
+test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " or %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " and %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+static inline int
+test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %3, 0\n"
+ " wsr %1, scompare1\n"
+ " xor %0, %1, %2\n"
+ " s32c1i %0, %3, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return tmp & mask;
+}
+
+#else
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* XCHAL_HAVE_S32C1I */
+
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h
index 9983f2c1b7e..0c25799faca 100644
--- a/arch/xtensa/include/asm/bootparam.h
+++ b/arch/xtensa/include/asm/bootparam.h
@@ -22,6 +22,7 @@
#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */
#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
+#define BP_TAG_FDT 0x1006 /* flat device tree addr */
#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
#define BP_TAG_LAST 0x7E0B /* last tag */
@@ -31,15 +32,15 @@
/* All records are aligned to 4 bytes */
typedef struct bp_tag {
- unsigned short id; /* tag id */
- unsigned short size; /* size of this record excluding the structure*/
- unsigned long data[0]; /* data */
+ unsigned short id; /* tag id */
+ unsigned short size; /* size of this record excluding the structure*/
+ unsigned long data[0]; /* data */
} bp_tag_t;
typedef struct meminfo {
- unsigned long type;
- unsigned long start;
- unsigned long end;
+ unsigned long type;
+ unsigned long start;
+ unsigned long end;
} meminfo_t;
#define SYSMEM_BANKS_MAX 5
@@ -48,14 +49,11 @@ typedef struct meminfo {
#define MEMORY_TYPE_NONE 0x2000
typedef struct sysmem_info {
- int nr_banks;
- meminfo_t bank[SYSMEM_BANKS_MAX];
+ int nr_banks;
+ meminfo_t bank[SYSMEM_BANKS_MAX];
} sysmem_info_t;
extern sysmem_info_t sysmem;
#endif
#endif
-
-
-
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index 2c20a58f94c..60e18773ecb 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -174,4 +174,3 @@
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm
-
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 569fec4f9a2..127cd48883c 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
-extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
+extern void flush_cache_page(struct vm_area_struct*,
+ unsigned long, unsigned long);
#else
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index e4d831a3077..aed7ad68ca4 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
@@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst,
static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len, __wsum sum, int *err_ptr)
{
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
- : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr)
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
+ "=&r" (endaddr)
: "1" (iph), "2" (ihl)
: "memory");
@@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
{
- return csum_fold (csum_partial(buff, len, 0));
+ return csum_fold (csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum sum, int *err_ptr)
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst, int len,
+ __wsum sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
- return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
+ return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
if (len)
*err_ptr = -EFAULT;
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 64dad04a9d2..d9ab131bc1a 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -22,17 +22,30 @@
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
- __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %1, 0 \n\t"
- "bne %0, %2, 1f \n\t"
- "s32i %3, %1, 0 \n\t"
- "1: \n\t"
- "wsr a15, ps \n\t"
- "rsync \n\t"
- : "=&a" (old)
- : "a" (p), "a" (old), "r" (new)
- : "a15", "memory");
- return old;
+#if XCHAL_HAVE_S32C1I
+ __asm__ __volatile__(
+ " wsr %2, scompare1\n"
+ " s32c1i %0, %1, 0\n"
+ : "+a" (new)
+ : "a" (p), "a" (old)
+ : "memory"
+ );
+
+ return new;
+#else
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " bne %0, %2, 1f\n"
+ " s32i %3, %1, 0\n"
+ "1:\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (old)
+ : "a" (p), "a" (old), "r" (new)
+ : "a15", "memory");
+ return old;
+#endif
}
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
@@ -93,19 +106,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
- unsigned long tmp;
- __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
- "l32i %0, %1, 0 \n\t"
- "s32i %2, %1, 0 \n\t"
- "wsr a15, ps \n\t"
- "rsync \n\t"
- : "=&a" (tmp)
- : "a" (m), "a" (val)
- : "a15", "memory");
- return tmp;
+#if XCHAL_HAVE_S32C1I
+ unsigned long tmp, result;
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " mov %0, %3\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "memory"
+ );
+ return result;
+#else
+ unsigned long tmp;
+ __asm__ __volatile__(
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " l32i %0, %1, 0\n"
+ " s32i %2, %1, 0\n"
+ " wsr a15, ps\n"
+ " rsync\n"
+ : "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "a15", "memory");
+ return tmp;
+#endif
}
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/*
* This only works if the compiler isn't horribly bad at optimizing.
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 8d1eb5d7864..47e46dcf5d4 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void)
#define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \
- l32i reg, reg, TI_TASK \
+ l32i reg, reg, TI_TASK \
#endif
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
index 58c0a4fd400..61fc5faeb46 100644
--- a/arch/xtensa/include/asm/delay.h
+++ b/arch/xtensa/include/asm/delay.h
@@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops)
{
- /* 2 cycles per loop. */
- __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
- : "=r" (loops) : "0" (loops));
+ /* 2 cycles per loop. */
+ __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
+ : "=r" (loops) : "0" (loops));
}
static __inline__ u32 xtensa_get_ccount(void)
@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs)
}
#endif
-
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 492c95790ad..4acb5feba1f 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -16,6 +16,8 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
/*
* DMA-consistent mapping functions.
*/
@@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
}
static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
}
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index 5293312bc6a..264d5fa450d 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
*/
#define ELF_PLAT_INIT(_r, load_addr) \
- do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
- _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
- _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
- _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
- } while (0)
+ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
+ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
+ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
+ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
+ } while (0)
typedef struct {
xtregs_opt_t opt;
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 0a046ca5a68..80be1512469 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -14,4 +14,3 @@
extern void flush_cache_kmaps(void);
#endif
-
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
new file mode 100644
index 00000000000..e1f8ba4061e
--- /dev/null
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -0,0 +1,55 @@
+/*
+ * arch/xtensa/include/asm/initialize_mmu.h
+ *
+ * Initializes MMU:
+ *
+ * For the new V3 MMU we remap the TLB from virtual == physical
+ * to the standard Linux mapping used in earlier MMU's.
+ *
+ * The the MMU we also support a new configuration register that
+ * specifies how the S32C1I instruction operates with the cache
+ * controller.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica, Inc.
+ *
+ * Marc Gauthier <marc@tensilica.com>
+ * Pete Delaney <piet@tensilica.com>
+ */
+
+#ifndef _XTENSA_INITIALIZE_MMU_H
+#define _XTENSA_INITIALIZE_MMU_H
+
+#ifdef __ASSEMBLY__
+
+#define XTENSA_HWVERSION_RC_2009_0 230000
+
+ .macro initialize_mmu
+
+#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+/*
+ * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
+ * For details see Documentation/xtensa/atomctl.txt
+ */
+#if XCHAL_DCACHE_IS_COHERENT
+ movi a3, 0x25 /* For SMP/MX -- internal for writeback,
+ * RCW otherwise
+ */
+#else
+ movi a3, 0x29 /* non-MX -- Most cores use Std Memory
+ * Controlers which usually can't use RCW
+ */
+#endif
+ wsr a3, atomctl
+#endif /* XCHAL_HAVE_S32C1I &&
+ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
+ */
+
+ .endm
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _XTENSA_INITIALIZE_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h
index 04890d6e233..8554b2c8b17 100644
--- a/arch/xtensa/include/asm/mmu.h
+++ b/arch/xtensa/include/asm/mmu.h
@@ -12,7 +12,7 @@
#define _XTENSA_MMU_H
#ifndef CONFIG_MMU
-#include <asm/nommu.h>
+#include <asm-generic/mmu.h>
#else
/* Default "unsigned long" context */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index feb10af9651..d43525a286b 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+ struct task_struct *tsk)
{
unsigned long asid = asid_cache;
diff --git a/arch/xtensa/include/asm/nommu.h b/arch/xtensa/include/asm/nommu.h
deleted file mode 100644
index dce2c438c5b..00000000000
--- a/arch/xtensa/include/asm/nommu.h
+++ /dev/null
@@ -1,3 +0,0 @@
-typedef struct {
- unsigned long end_brk;
-} mm_context_t;
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 599e7a2e729..3407cf7989b 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
{
return 0;
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 7a5591a71f8..47f582333f6 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -29,19 +29,19 @@
* PAGE_SHIFT determines the page size
*/
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef CONFIG_MMU
-#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
-#define MAX_MEM_PFN XCHAL_KSEG_SIZE
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else
-#define PAGE_OFFSET 0
-#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define PAGE_OFFSET 0
+#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif
-#define PGTABLE_START 0x80000000
+#define PGTABLE_START 0x80000000
/*
* Cache aliasing:
@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#define pfn_valid(pfn) \
+ ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
#ifdef CONFIG_DISCONTIGMEM
# error CONFIG_DISCONTIGMEM not supported
#endif
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
index 00fcbd7c534..0b68c76ec1e 100644
--- a/arch/xtensa/include/asm/pci-bridge.h
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -35,7 +35,7 @@ struct pci_space {
struct pci_controller {
int index; /* used for pci_controller_num */
struct pci_controller *next;
- struct pci_bus *bus;
+ struct pci_bus *bus;
void *arch_data;
int first_busno;
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 05244f07dd3..614be031a79 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -53,7 +53,7 @@ struct pci_dev;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+ enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index 40cf9bceda2..cf914c8c249 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
extern struct kmem_cache *pgtable_cache;
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index b03c043ce75..c90ea5bfa1b 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -284,7 +284,7 @@ struct vm_area_struct;
static inline int
ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep)
{
pte_t pte = *ptep;
if (!pte_young(pte))
@@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
static inline void
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- pte_t pte = *ptep;
- update_pte(ptep, pte_wrprotect(pte));
+ pte_t pte = *ptep;
+ update_pte(ptep, pte_wrprotect(pte));
}
/* to find an entry in a kernel page-table-directory */
@@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
*/
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
- remap_pfn_range(vma, from, pfn, size, prot)
+ remap_pfn_range(vma, from, pfn, size, prot)
typedef pte_t *pte_addr_t;
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 7d936e58e9b..ec098b68fb9 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void);
extern void platform_calibrate_ccount (void);
#endif /* _XTENSA_PLATFORM_H */
-
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 2d630e7399c..e5fb6b0abdf 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -89,7 +89,7 @@
#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
typedef struct {
- unsigned long seg;
+ unsigned long seg;
} mm_segment_t;
struct thread_struct {
@@ -145,10 +145,10 @@ struct thread_struct {
* set_thread_state in signal.c depends on it.
*/
#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
- (1 << PS_CALLINC_SHIFT) | \
- (USER_RING << PS_RING_SHIFT) | \
- (1 << PS_UM_BIT) | \
- (1 << PS_EXCM_BIT))
+ (1 << PS_CALLINC_SHIFT) | \
+ (USER_RING << PS_RING_SHIFT) | \
+ (1 << PS_UM_BIT) | \
+ (1 << PS_EXCM_BIT))
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
diff --git a/arch/xtensa/include/asm/prom.h b/arch/xtensa/include/asm/prom.h
new file mode 100644
index 00000000000..f3d7cd2c0de
--- /dev/null
+++ b/arch/xtensa/include/asm/prom.h
@@ -0,0 +1,6 @@
+#ifndef _XTENSA_ASM_PROM_H
+#define _XTENSA_ASM_PROM_H
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif /* _XTENSA_ASM_PROM_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index da21c17f23a..682b1deac1f 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -37,7 +37,7 @@ struct pt_regs {
unsigned long windowstart; /* 52 */
unsigned long syscall; /* 56 */
unsigned long icountlevel; /* 60 */
- int reserved[1]; /* 64 */
+ unsigned long scompare1; /* 64 */
/* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt;
@@ -55,7 +55,7 @@ struct pt_regs {
# define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \
- (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
@@ -63,6 +63,8 @@ struct pt_regs {
# define profile_pc(regs) instruction_pointer(regs)
# endif
+#define user_stack_pointer(regs) ((regs)->areg[1])
+
#else /* __ASSEMBLY__ */
# include <asm/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index 8a8aa61ccc8..76096a4e5b8 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -52,6 +52,10 @@
#define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9
+#define EXCCAUSE_INSTR_DATA_ERROR 12
+#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13
+#define EXCCAUSE_INSTR_ADDR_ERROR 14
+#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15
#define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18
@@ -105,4 +109,3 @@
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */
-
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index 8ff23649581..03975906b36 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -11,6 +11,192 @@
#ifndef _XTENSA_SPINLOCK_H
#define _XTENSA_SPINLOCK_H
-#include <linux/spinlock.h>
+/*
+ * spinlock
+ *
+ * There is at most one owner of a spinlock. There are not different
+ * types of spinlock owners like there are for rwlocks (see below).
+ *
+ * When trying to obtain a spinlock, the function "spins" forever, or busy-
+ * waits, until the lock is obtained. When spinning, presumably some other
+ * owner will soon give up the spinlock making it available to others. Use
+ * the trylock functions to avoid spinning forever.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the spinlock
+ * 1 somebody owns the spinlock
+ */
+
+#define __raw_spin_is_locked(x) ((x)->slock != 0)
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&lock->slock)
+ : "memory");
+}
+
+/*
+ * rwlock
+ *
+ * Read-write locks are really a more flexible spinlock. They allow
+ * multiple readers but only one writer. Write ownership is exclusive
+ * (i.e., all other readers and writers are blocked from ownership while
+ * there is a write owner). These rwlocks are unfair to writers. Writers
+ * can be starved for an indefinite time by readers.
+ *
+ * possible values:
+ *
+ * 0 nobody owns the rwlock
+ * >0 one or more readers own the rwlock
+ * (the positive value is the actual number of readers)
+ * 0x80000000 one writer owns the rwlock, no other writers, no readers
+ */
+
+#define __raw_write_can_lock(x) ((x)->lock == 0)
+
+static inline void __raw_write_lock(raw_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ "1: movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ " bnez %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " wsr %0, scompare1\n"
+ " movi %0, 1\n"
+ " slli %0, %0, 31\n"
+ " s32c1i %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return tmp == 0 ? 1 : 0;
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " movi %0, 0\n"
+ " s32ri %0, %1, 0\n"
+ : "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
+{
+ unsigned long tmp;
+ unsigned long result;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " bltz %1, 1b\n"
+ " wsr %1, scompare1\n"
+ " addi %0, %1, 1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+}
+
+/* Returns 1 if the lock is obtained, 0 otherwise. */
+
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
+{
+ unsigned long result;
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ " l32i %1, %2, 0\n"
+ " addi %0, %1, 1\n"
+ " bltz %0, 1f\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " sub %0, %0, %1\n"
+ "1:\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (&rw->lock)
+ : "memory");
+
+ return result == 0;
+}
+
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: l32i %1, %2, 0\n"
+ " addi %0, %1, -1\n"
+ " wsr %1, scompare1\n"
+ " s32c1i %0, %2, 0\n"
+ " bne %0, %1, 1b\n"
+ : "=&a" (tmp1), "=&a" (tmp2)
+ : "a" (&rw->lock)
+ : "memory");
+}
#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index b00c928d4cc..8d5e47fad09 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int,
/* Should probably move to linux/syscalls.h */
struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timespec __user *tsp, void __user *sig);
+ fd_set __user *exp, struct timespec __user *tsp,
+ void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
- struct timespec __user *tsp, const sigset_t __user *sigmask,
- size_t sigsetsize);
-asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
- size_t sigsetsize);
+ struct timespec __user *tsp,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
new file mode 100644
index 00000000000..54f70440185
--- /dev/null
+++ b/arch/xtensa/include/asm/traps.h
@@ -0,0 +1,23 @@
+/*
+ * arch/xtensa/include/asm/traps.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+#ifndef _XTENSA_TRAPS_H
+#define _XTENSA_TRAPS_H
+
+#include <asm/ptrace.h>
+
+/*
+ * handler must be either of the following:
+ * void (*)(struct pt_regs *regs);
+ * void (*)(struct pt_regs *regs, unsigned long exccause);
+ */
+extern void * __init trap_set_handler(int cause, void *handler);
+extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+
+#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 6e4bb3b791a..fd686dc45d1 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -180,7 +180,8 @@
#define segment_eq(a,b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
+#define __user_ok(addr,size) \
+ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
@@ -234,10 +235,10 @@ do { \
int __cb; \
retval = 0; \
switch (size) { \
- case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
- case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
- case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
- case 8: { \
+ case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
+ case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
+ case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
+ case 8: { \
__typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr,&__v64,8); \
break; \
@@ -291,7 +292,7 @@ do { \
* __check_align_* macros still work.
*/
#define __put_user_asm(x, addr, err, align, insn, cb) \
- __asm__ __volatile__( \
+__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"2: \n" \
@@ -301,8 +302,8 @@ do { \
" .long 2b \n" \
"5: \n" \
" l32r %1, 4b \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
@@ -334,13 +335,13 @@ extern long __get_user_bad(void);
do { \
int __cb; \
retval = 0; \
- switch (size) { \
- case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
- case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
- case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
- case 8: retval = __copy_from_user(&x,ptr,8); break; \
- default: (x) = __get_user_bad(); \
- } \
+ switch (size) { \
+ case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
+ case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
+ case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
+ case 8: retval = __copy_from_user(&x,ptr,8); break; \
+ default: (x) = __get_user_bad(); \
+ } \
} while (0)
@@ -349,7 +350,7 @@ do { \
* __check_align_* macros still work.
*/
#define __get_user_asm(x, addr, err, align, insn, cb) \
- __asm__ __volatile__( \
+__asm__ __volatile__( \
__check_align_##align \
"1: "insn" %2, %3, 0 \n" \
"2: \n" \
@@ -360,8 +361,8 @@ do { \
"5: \n" \
" l32r %1, 4b \n" \
" movi %2, 0 \n" \
- " movi %0, %4 \n" \
- " jx %1 \n" \
+ " movi %0, %4 \n" \
+ " jx %1 \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
-#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
-#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
+#define __copy_to_user(to,from,n) \
+ __generic_copy_to_user_nocheck((to),(from),(n))
+#define __copy_from_user(to,from,n) \
+ __generic_copy_from_user_nocheck((to),(from),(n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index e002dbcc88b..eb63ea87815 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -1,7 +1,6 @@
#ifndef _XTENSA_UNISTD_H
#define _XTENSA_UNISTD_H
-#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h
index b88ce96f2af..dacf716dd3e 100644
--- a/arch/xtensa/include/uapi/asm/signal.h
+++ b/arch/xtensa/include/uapi/asm/signal.h
@@ -97,12 +97,6 @@ typedef struct {
#define SA_RESTORER 0x04000000
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index f36cef5a62f..c3a59d992ac 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -23,13 +23,13 @@ obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
#
# Replicate rules in scripts/Makefile.build
-sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
- -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
+sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
+ -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@
- cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
- | sed $(sed-y) >$@
+cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
+ | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
$(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index 934ae58e2c7..aa2e87b8566 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -442,7 +442,7 @@ ENTRY(fast_unaligned)
mov a1, a2
rsr a0, ps
- bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
+ bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception
jx a0
@@ -450,6 +450,6 @@ ENTRY(fast_unaligned)
1: movi a0, _user_exception
jx a0
+ENDPROC(fast_unaligned)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
-
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 7dc3f915718..0701fad170d 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -41,6 +41,7 @@ int main(void)
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
+ DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
@@ -91,7 +92,8 @@ int main(void)
#endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
- DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
+ DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
+ thread.current_ds));
/* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
@@ -108,4 +110,3 @@ int main(void)
return 0;
}
-
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 54c3be313bf..64765748486 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -43,10 +43,13 @@
/* IO protection is currently unsupported. */
ENTRY(fast_io_protect)
+
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
+ENDPROC(fast_io_protect)
+
#if XTENSA_HAVE_COPROCESSORS
/*
@@ -139,6 +142,7 @@ ENTRY(fast_io_protect)
*/
ENTRY(coprocessor_save)
+
entry a1, 32
s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
@@ -150,7 +154,10 @@ ENTRY(coprocessor_save)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_save)
+
ENTRY(coprocessor_load)
+
entry a1, 32
s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table
@@ -162,8 +169,10 @@ ENTRY(coprocessor_load)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_load)
+
/*
- * coprocessor_flush(struct task_info*, index)
+ * coprocessor_flush(struct task_info*, index)
* a2 a3
* coprocessor_restore(struct task_info*, index)
* a2 a3
@@ -178,6 +187,7 @@ ENTRY(coprocessor_load)
ENTRY(coprocessor_flush)
+
entry a1, 32
s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_flush)
+
ENTRY(coprocessor_restore)
entry a1, 32
s32i a0, a1, 0
@@ -205,6 +217,8 @@ ENTRY(coprocessor_restore)
1: l32i a0, a1, 0
retw
+ENDPROC(coprocessor_restore)
+
/*
* Entry condition:
*
@@ -220,10 +234,12 @@ ENTRY(coprocessor_restore)
*/
ENTRY(fast_coprocessor_double)
+
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0
+ENDPROC(fast_coprocessor_double)
ENTRY(fast_coprocessor)
@@ -327,9 +343,14 @@ ENTRY(fast_coprocessor)
rfe
+ENDPROC(fast_coprocessor)
+
.data
+
ENTRY(coprocessor_owner)
+
.fill XCHAL_CP_MAX, 4, 0
-#endif /* XTENSA_HAVE_COPROCESSORS */
+END(coprocessor_owner)
+#endif /* XTENSA_HAVE_COPROCESSORS */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 90bfc1dbc13..3777fec85e7 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -219,6 +219,7 @@ _user_exception:
j common_exception
+ENDPROC(user_exception)
/*
* First-level exit handler for kernel exceptions
@@ -371,6 +372,13 @@ common_exception:
s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND
+ /* Save SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ rsr a2, scompare1
+ s32i a2, a1, PT_SCOMPARE1
+#endif
+
/* Save optional registers. */
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
@@ -432,6 +440,12 @@ common_exception_return:
load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+ /* Restore SCOMPARE1 */
+
+#if XCHAL_HAVE_S32C1I
+ l32i a2, a1, PT_SCOMPARE1
+ wsr a2, scompare1
+#endif
wsr a3, ps /* disable interrupts */
_bbci.l a3, PS_UM_BIT, kernel_exception_exit
@@ -641,6 +655,8 @@ common_exception_exit:
l32i a1, a1, PT_AREG1
rfde
+ENDPROC(kernel_exception)
+
/*
* Debug exception handler.
*
@@ -701,6 +717,7 @@ ENTRY(debug_exception)
/* Debug exception while in exception mode. */
1: j 1b // FIXME!!
+ENDPROC(debug_exception)
/*
* We get here in case of an unrecoverable exception.
@@ -751,6 +768,7 @@ ENTRY(unrecoverable_exception)
1: j 1b
+ENDPROC(unrecoverable_exception)
/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
@@ -856,7 +874,7 @@ ENTRY(fast_alloca)
_bnei a0, 1, 1f # no 'movsp a1, ax': jump
- /* Move the save area. This implies the use of the L32E
+ /* Move the save area. This implies the use of the L32E
* and S32E instructions, because this move must be done with
* the user's PS.RING privilege levels, not with ring 0
* (kernel's) privileges currently active with PS.EXCM
@@ -929,6 +947,7 @@ ENTRY(fast_alloca)
l32i a2, a2, PT_AREG2
rfe
+ENDPROC(fast_alloca)
/*
* fast system calls.
@@ -966,6 +985,8 @@ ENTRY(fast_syscall_kernel)
j kernel_exception
+ENDPROC(fast_syscall_kernel)
+
ENTRY(fast_syscall_user)
/* Skip syscall. */
@@ -983,19 +1004,21 @@ ENTRY(fast_syscall_user)
j user_exception
-ENTRY(fast_syscall_unrecoverable)
+ENDPROC(fast_syscall_user)
- /* Restore all states. */
+ENTRY(fast_syscall_unrecoverable)
- l32i a0, a2, PT_AREG0 # restore a0
- xsr a2, depc # restore a2, depc
- rsr a3, excsave1
+ /* Restore all states. */
- wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
+ l32i a0, a2, PT_AREG0 # restore a0
+ xsr a2, depc # restore a2, depc
+ rsr a3, excsave1
+ wsr a0, excsave1
+ movi a0, unrecoverable_exception
+ callx0 a0
+ENDPROC(fast_syscall_unrecoverable)
/*
* sysxtensa syscall handler
@@ -1101,7 +1124,7 @@ CATCH
movi a2, -EINVAL
rfe
-
+ENDPROC(fast_syscall_xtensa)
/* fast_syscall_spill_registers.
@@ -1160,6 +1183,8 @@ ENTRY(fast_syscall_spill_registers)
movi a2, 0
rfe
+ENDPROC(fast_syscall_spill_registers)
+
/* Fixup handler.
*
* We get here if the spill routine causes an exception, e.g. tlb miss.
@@ -1228,9 +1253,9 @@ fast_syscall_spill_registers_fixup:
movi a3, exc_table
rsr a0, exccause
- addx4 a0, a0, a3 # find entry in table
- l32i a0, a0, EXC_TABLE_FAST_USER # load handler
- jx a0
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ jx a0
fast_syscall_spill_registers_fixup_return:
@@ -1432,7 +1457,7 @@ ENTRY(_spill_registers)
rsr a0, ps
_bbci.l a0, PS_UM_BIT, 1f
- /* User space: Setup a dummy frame and kill application.
+ /* User space: Setup a dummy frame and kill application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
*/
@@ -1464,6 +1489,8 @@ ENTRY(_spill_registers)
callx0 a0 # should not return
1: j 1b
+ENDPROC(_spill_registers)
+
#ifdef CONFIG_MMU
/*
* We should never get here. Bail out!
@@ -1475,6 +1502,8 @@ ENTRY(fast_second_level_miss_double_kernel)
callx0 a0 # should not return
1: j 1b
+ENDPROC(fast_second_level_miss_double_kernel)
+
/* First-level entry handler for user, kernel, and double 2nd-level
* TLB miss exceptions. Note that for now, user and kernel miss
* exceptions share the same entry point and are handled identically.
@@ -1682,6 +1711,7 @@ ENTRY(fast_second_level_miss)
j _kernel_exception
1: j _user_exception
+ENDPROC(fast_second_level_miss)
/*
* StoreProhibitedException
@@ -1777,6 +1807,9 @@ ENTRY(fast_store_prohibited)
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
+
+ENDPROC(fast_store_prohibited)
+
#endif /* CONFIG_MMU */
/*
@@ -1787,6 +1820,7 @@ ENTRY(fast_store_prohibited)
*/
ENTRY(system_call)
+
entry a1, 32
/* regs->syscall = regs->areg[2] */
@@ -1831,6 +1865,8 @@ ENTRY(system_call)
callx4 a4
retw
+ENDPROC(system_call)
+
/*
* Task switch.
@@ -1899,6 +1935,7 @@ ENTRY(_switch_to)
retw
+ENDPROC(_switch_to)
ENTRY(ret_from_fork)
@@ -1914,6 +1951,8 @@ ENTRY(ret_from_fork)
j common_exception_return
+ENDPROC(ret_from_fork)
+
/*
* Kernel thread creation helper
* On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index bdc50788f35..91d9095284d 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -18,6 +18,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
#include <linux/init.h>
#include <linux/linkage.h>
@@ -47,16 +48,19 @@
*/
__HEAD
- .globl _start
-_start: _j 2f
+ENTRY(_start)
+
+ _j 2f
.align 4
1: .word _startup
2: l32r a0, 1b
jx a0
+ENDPROC(_start)
+
.section .init.text, "ax"
- .align 4
-_startup:
+
+ENTRY(_startup)
/* Disable interrupts and exceptions. */
@@ -107,7 +111,7 @@ _startup:
/* Disable all timers. */
.set _index, 0
- .rept XCHAL_NUM_TIMERS - 1
+ .rept XCHAL_NUM_TIMERS
wsr a0, SREG_CCOMPARE + _index
.set _index, _index + 1
.endr
@@ -120,7 +124,7 @@ _startup:
/* Disable coprocessors. */
-#if XCHAL_CP_NUM > 0
+#if XCHAL_HAVE_CP
wsr a0, cpenable
#endif
@@ -152,6 +156,8 @@ _startup:
isync
+ initialize_mmu
+
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
@@ -230,6 +236,7 @@ _startup:
should_never_return:
j should_never_return
+ENDPROC(_startup)
/*
* BSS section
@@ -239,6 +246,8 @@ __PAGE_ALIGNED_BSS
#ifdef CONFIG_MMU
ENTRY(swapper_pg_dir)
.fill PAGE_SIZE, 1, 0
+END(swapper_pg_dir)
#endif
ENTRY(empty_zero_page)
.fill PAGE_SIZE, 1, 0
+END(empty_zero_page)
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a6ce3e56373..6f4f9749cff 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -18,6 +18,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
#include <asm/uaccess.h>
#include <asm/platform.h>
@@ -26,19 +28,22 @@ static unsigned int cached_irq_mask;
atomic_t irq_err_count;
+static struct irq_domain *root_domain;
+
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
-asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
+ int irq = irq_find_mapping(root_domain, hwirq);
- if (irq >= NR_IRQS) {
+ if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
- __func__, irq);
+ __func__, hwirq);
}
irq_enter();
@@ -71,40 +76,39 @@ int arch_show_interrupts(struct seq_file *p, int prec)
static void xtensa_irq_mask(struct irq_data *d)
{
- cached_irq_mask &= ~(1 << d->irq);
+ cached_irq_mask &= ~(1 << d->hwirq);
set_sr (cached_irq_mask, intenable);
}
static void xtensa_irq_unmask(struct irq_data *d)
{
- cached_irq_mask |= 1 << d->irq;
+ cached_irq_mask |= 1 << d->hwirq;
set_sr (cached_irq_mask, intenable);
}
static void xtensa_irq_enable(struct irq_data *d)
{
- variant_irq_enable(d->irq);
+ variant_irq_enable(d->hwirq);
xtensa_irq_unmask(d);
}
static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d);
- variant_irq_disable(d->irq);
+ variant_irq_disable(d->hwirq);
}
static void xtensa_irq_ack(struct irq_data *d)
{
- set_sr(1 << d->irq, intclear);
+ set_sr(1 << d->hwirq, intclear);
}
static int xtensa_irq_retrigger(struct irq_data *d)
{
- set_sr (1 << d->irq, INTSET);
+ set_sr(1 << d->hwirq, intset);
return 1;
}
-
static struct irq_chip xtensa_irq_chip = {
.name = "xtensa",
.irq_enable = xtensa_irq_enable,
@@ -115,37 +119,99 @@ static struct irq_chip xtensa_irq_chip = {
.irq_retrigger = xtensa_irq_retrigger,
};
-void __init init_IRQ(void)
+static int xtensa_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
{
- int index;
-
- for (index = 0; index < XTENSA_NR_IRQS; index++) {
- int mask = 1 << index;
-
- if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_simple_irq);
+ u32 mask = 1 << hw;
+
+ if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) {
+ irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ handle_simple_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) {
+ irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) {
+ irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ } else if (mask & XCHAL_INTTYPE_MASK_TIMER) {
+ irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
+ /* XCHAL_INTTYPE_MASK_NMI */
+
+ irq_set_chip_and_handler_name(irq, &xtensa_irq_chip,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
+}
- else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_edge_irq);
+static unsigned map_ext_irq(unsigned ext_irq)
+{
+ unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
+ XCHAL_INTTYPE_MASK_EXTERN_LEVEL;
+ unsigned i;
- else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_level_irq);
+ for (i = 0; mask; ++i, mask >>= 1) {
+ if ((mask & 1) && ext_irq-- == 0)
+ return i;
+ }
+ return XCHAL_NUM_INTERRUPTS;
+}
- else if (mask & XCHAL_INTTYPE_MASK_TIMER)
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_edge_irq);
+/*
+ * Device Tree IRQ specifier translation function which works with one or
+ * two cell bindings. First cell value maps directly to the hwirq number.
+ * Second cell if present specifies whether hwirq number is external (1) or
+ * internal (0).
+ */
+int xtensa_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1 || intsize > 2))
+ return -EINVAL;
+ if (intsize == 2 && intspec[1] == 1) {
+ unsigned int_irq = map_ext_irq(intspec[0]);
+ if (int_irq < XCHAL_NUM_INTERRUPTS)
+ *out_hwirq = int_irq;
+ else
+ return -EINVAL;
+ } else {
+ *out_hwirq = intspec[0];
+ }
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
- else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
- /* XCHAL_INTTYPE_MASK_NMI */
+static const struct irq_domain_ops xtensa_irq_domain_ops = {
+ .xlate = xtensa_irq_domain_xlate,
+ .map = xtensa_irq_map,
+};
- irq_set_chip_and_handler(index, &xtensa_irq_chip,
- handle_level_irq);
- }
+void __init init_IRQ(void)
+{
+ struct device_node *intc = NULL;
cached_irq_mask = 0;
+ set_sr(~0, intclear);
+
+#ifdef CONFIG_OF
+ /* The interrupt controller device node is mandatory */
+ intc = of_find_compatible_node(NULL, NULL, "xtensa,pic");
+ BUG_ON(!intc);
+
+ root_domain = irq_domain_add_linear(intc, NR_IRQS,
+ &xtensa_irq_domain_ops, NULL);
+#else
+ root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+ &xtensa_irq_domain_ops, NULL);
+#endif
+ irq_set_default_host(root_domain);
variant_init_irq();
}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index 451dda928c9..b715237bae6 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -53,7 +53,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
struct module *mod)
{
unsigned int i;
- Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
unsigned char *location;
uint32_t value;
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index 97230e46cbe..44bf21c3769 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void),
ccount_per_jiffy = 10 * (1000000UL/HZ);
});
#endif
-
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 1accf28da5f..0dd5784416d 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -108,7 +108,7 @@ void coprocessor_flush_all(struct thread_info *ti)
void cpu_idle(void)
{
- local_irq_enable();
+ local_irq_enable();
/* endless idle loop with no priority at all */
while (1) {
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 33eea4c16f1..61fb2e9e903 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -154,7 +154,7 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs)
coprocessor_flush_all(ti);
coprocessor_release_all(ti);
- ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
+ ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
sizeof(xtregs_coprocessor_t));
#endif
ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs)
&& (current->ptrace & PT_PTRACED))
do_syscall_trace();
}
-
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index b237988ba6d..24c1a57abb4 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -22,6 +22,11 @@
#include <linux/bootmem.h>
#include <linux/kernel.h>
+#ifdef CONFIG_OF
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#endif
+
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
# include <linux/console.h>
#endif
@@ -42,6 +47,7 @@
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/param.h>
+#include <asm/traps.h>
#include <platform/hardware.h>
@@ -64,6 +70,11 @@ int initrd_is_mapped = 0;
extern int initrd_below_start_ok;
#endif
+#ifdef CONFIG_OF
+extern u32 __dtb_start[];
+void *dtb_start = __dtb_start;
+#endif
+
unsigned char aux_device_present;
extern unsigned long loops_per_jiffy;
@@ -83,6 +94,8 @@ extern void init_mmu(void);
static inline void init_mmu(void) { }
#endif
+extern int mem_reserve(unsigned long, unsigned long, int);
+extern void bootmem_init(void);
extern void zones_init(void);
/*
@@ -104,28 +117,33 @@ typedef struct tagtable {
/* parse current tag */
-static int __init parse_tag_mem(const bp_tag_t *tag)
+static int __init add_sysmem_bank(unsigned long type, unsigned long start,
+ unsigned long end)
{
- meminfo_t *mi = (meminfo_t*)(tag->data);
-
- if (mi->type != MEMORY_TYPE_CONVENTIONAL)
- return -1;
-
if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
printk(KERN_WARNING
- "Ignoring memory bank 0x%08lx size %ldKB\n",
- (unsigned long)mi->start,
- (unsigned long)mi->end - (unsigned long)mi->start);
+ "Ignoring memory bank 0x%08lx size %ldKB\n",
+ start, end - start);
return -EINVAL;
}
- sysmem.bank[sysmem.nr_banks].type = mi->type;
- sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
- sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_MASK;
+ sysmem.bank[sysmem.nr_banks].type = type;
+ sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start);
+ sysmem.bank[sysmem.nr_banks].end = end & PAGE_MASK;
sysmem.nr_banks++;
return 0;
}
+static int __init parse_tag_mem(const bp_tag_t *tag)
+{
+ meminfo_t *mi = (meminfo_t *)(tag->data);
+
+ if (mi->type != MEMORY_TYPE_CONVENTIONAL)
+ return -1;
+
+ return add_sysmem_bank(mi->type, mi->start, mi->end);
+}
+
__tagtable(BP_TAG_MEMORY, parse_tag_mem);
#ifdef CONFIG_BLK_DEV_INITRD
@@ -142,12 +160,31 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
__tagtable(BP_TAG_INITRD, parse_tag_initrd);
+#ifdef CONFIG_OF
+
+static int __init parse_tag_fdt(const bp_tag_t *tag)
+{
+ dtb_start = (void *)(tag->data[0]);
+ return 0;
+}
+
+__tagtable(BP_TAG_FDT, parse_tag_fdt);
+
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (void *)__va(start);
+ initrd_end = (void *)__va(end);
+ initrd_below_start_ok = 1;
+}
+
+#endif /* CONFIG_OF */
+
#endif /* CONFIG_BLK_DEV_INITRD */
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
- strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
- command_line[COMMAND_LINE_SIZE - 1] = '\0';
+ strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
return 0;
}
@@ -185,6 +222,58 @@ static int __init parse_bootparam(const bp_tag_t* tag)
return 0;
}
+#ifdef CONFIG_OF
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ size &= PAGE_MASK;
+ add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __alloc_bootmem(size, align, 0);
+}
+
+void __init early_init_devtree(void *params)
+{
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* Retrieve various informations from the /chosen node of the
+ * device-tree, including the platform type, initrd location and
+ * size, TCE reserve, and more ...
+ */
+ if (!command_line[0])
+ of_scan_flat_dt(early_init_dt_scan_chosen, command_line);
+
+ /* Scan memory nodes and rebuild MEMBLOCKs */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ if (sysmem.nr_banks == 0)
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+static void __init copy_devtree(void)
+{
+ void *alloc = early_init_dt_alloc_memory_arch(
+ be32_to_cpu(initial_boot_params->totalsize), 0);
+ if (alloc) {
+ memcpy(alloc, initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+ initial_boot_params = alloc;
+ }
+}
+
+static int __init xtensa_device_probe(void)
+{
+ of_platform_populate(NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+device_initcall(xtensa_device_probe);
+
+#endif /* CONFIG_OF */
+
/*
* Initialize architecture. (Early stage)
*/
@@ -193,14 +282,14 @@ void __init init_arch(bp_tag_t *bp_start)
{
sysmem.nr_banks = 0;
-#ifdef CONFIG_CMDLINE_BOOL
- strcpy(command_line, default_command_line);
-#endif
-
/* Parse boot parameters */
- if (bp_start)
- parse_bootparam(bp_start);
+ if (bp_start)
+ parse_bootparam(bp_start);
+
+#ifdef CONFIG_OF
+ early_init_devtree(dtb_start);
+#endif
if (sysmem.nr_banks == 0) {
sysmem.nr_banks = 1;
@@ -209,6 +298,11 @@ void __init init_arch(bp_tag_t *bp_start)
+ PLATFORM_DEFAULT_MEM_SIZE;
}
+#ifdef CONFIG_CMDLINE_BOOL
+ if (!command_line[0])
+ strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
+#endif
+
/* Early hook for platforms */
platform_init(bp_start);
@@ -235,15 +329,130 @@ extern char _UserExceptionVector_text_end;
extern char _DoubleExceptionVector_literal_start;
extern char _DoubleExceptionVector_text_end;
-void __init setup_arch(char **cmdline_p)
+
+#ifdef CONFIG_S32C1I_SELFTEST
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set. Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ " movi %1, 1f\n"
+ " s32i %1, %4, 0\n"
+ " wsr %2, scompare1\n"
+ "1: s32c1i %0, %3, 0\n"
+ : "=a" (set), "=&a" (tmp)
+ : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+ : "memory"
+ );
+ return set;
+}
+
+/* Handle probed exception */
+
+void __init do_probed_exception(struct pt_regs *regs, unsigned long exccause)
+{
+ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+ regs->pc += 3; /* skip the s32c1i instruction */
+ rcw_exc = exccause;
+ } else {
+ do_unhandled(regs, exccause);
+ }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+void __init check_s32c1i(void)
+{
+ int n, cause1, cause2;
+ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+ rcw_probe_pc = 0;
+ handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+ do_probed_exception);
+ handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+ do_probed_exception);
+ handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+ do_probed_exception);
+
+ /* First try an S32C1I that does not store: */
+ rcw_exc = 0;
+ rcw_word = 1;
+ n = probed_compare_swap(&rcw_word, 0, 2);
+ cause1 = rcw_exc;
+
+ /* took exception? */
+ if (cause1 != 0) {
+ /* unclean exception? */
+ if (n != 2 || rcw_word != 1)
+ panic("S32C1I exception error");
+ } else if (rcw_word != 1 || n != 1) {
+ panic("S32C1I compare error");
+ }
+
+ /* Then an S32C1I that stores: */
+ rcw_exc = 0;
+ rcw_word = 0x1234567;
+ n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+ cause2 = rcw_exc;
+
+ if (cause2 != 0) {
+ /* unclean exception? */
+ if (n != 0xabcde || rcw_word != 0x1234567)
+ panic("S32C1I exception error (b)");
+ } else if (rcw_word != 0xabcde || n != 0x1234567) {
+ panic("S32C1I store error");
+ }
+
+ /* Verify consistency of exceptions: */
+ if (cause1 || cause2) {
+ pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+ /* If emulation of S32C1I upon bus error gets implemented,
+ we can get rid of this panic for single core (not SMP) */
+ panic("S32C1I exceptions not currently supported");
+ }
+ if (cause1 != cause2)
+ panic("inconsistent S32C1I exceptions");
+
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ Display reminder for early engr test or demo chips / FPGA bitstreams */
+void __init check_s32c1i(void)
+{
+ pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+#else /* CONFIG_S32C1I_SELFTEST */
+
+void __init check_s32c1i(void)
{
- extern int mem_reserve(unsigned long, unsigned long, int);
- extern void bootmem_init(void);
+}
+
+#endif /* CONFIG_S32C1I_SELFTEST */
- memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
- boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+void __init setup_arch(char **cmdline_p)
+{
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
+ check_s32c1i();
+
/* Reserve some memory regions */
#ifdef CONFIG_BLK_DEV_INITRD
@@ -251,7 +460,7 @@ void __init setup_arch(char **cmdline_p)
initrd_is_mapped = mem_reserve(__pa(initrd_start),
__pa(initrd_end), 0);
initrd_below_start_ok = 1;
- } else {
+ } else {
initrd_start = 0;
}
#endif
@@ -275,8 +484,12 @@ void __init setup_arch(char **cmdline_p)
bootmem_init();
- platform_setup(cmdline_p);
+#ifdef CONFIG_OF
+ copy_devtree();
+ unflatten_device_tree();
+#endif
+ platform_setup(cmdline_p);
paging_init();
zones_init();
@@ -326,7 +539,7 @@ c_show(struct seq_file *f, void *slot)
"core ID\t\t: " XCHAL_CORE_ID "\n"
"build ID\t: 0x%x\n"
"byte order\t: %s\n"
- "cpu MHz\t\t: %lu.%02lu\n"
+ "cpu MHz\t\t: %lu.%02lu\n"
"bogomips\t: %lu.%02lu\n",
XCHAL_BUILD_UNIQUE_ID,
XCHAL_HAVE_BE ? "big" : "little",
@@ -381,6 +594,9 @@ c_show(struct seq_file *f, void *slot)
#if XCHAL_HAVE_FP
"fpu "
#endif
+#if XCHAL_HAVE_S32C1I
+ "s32c1i "
+#endif
"\n");
/* Registers. */
@@ -412,7 +628,7 @@ c_show(struct seq_file *f, void *slot)
"icache size\t: %d\n"
"icache flags\t: "
#if XCHAL_ICACHE_LINE_LOCKABLE
- "lock"
+ "lock "
#endif
"\n"
"dcache line size: %d\n"
@@ -420,10 +636,10 @@ c_show(struct seq_file *f, void *slot)
"dcache size\t: %d\n"
"dcache flags\t: "
#if XCHAL_DCACHE_IS_WRITEBACK
- "writeback"
+ "writeback "
#endif
#if XCHAL_DCACHE_LINE_LOCKABLE
- "lock"
+ "lock "
#endif
"\n",
XCHAL_ICACHE_LINESIZE,
@@ -465,4 +681,3 @@ const struct seq_operations cpuinfo_op =
};
#endif /* CONFIG_PROC_FS */
-
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 63c566f627b..de34d6be91c 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -212,7 +212,7 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
if (err)
return err;
- /* The signal handler may have used coprocessors in which
+ /* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy
* context-switching mechanisms of CP exception handling.
@@ -396,7 +396,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
*/
/* Set up registers for signal handler */
- start_thread(regs, (unsigned long) ka->sa.sa_handler,
+ start_thread(regs, (unsigned long) ka->sa.sa_handler,
(unsigned long) frame);
/* Set up a stack frame for a call4
@@ -424,9 +424,9 @@ give_sigsegv:
return -EFAULT;
}
-asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
+asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
stack_t __user *uoss,
- long a2, long a3, long a4, long a5,
+ long a2, long a3, long a4, long a5,
struct pt_regs *regs)
{
return do_sigaltstack(uss, uoss, regs->areg[1]);
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 5702065f472..54fa8425cee 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
{
return sys_fadvise64_64(fd, offset, len, advice);
}
-
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index ac62f9cf1e1..ffb47410431 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/profile.h>
#include <linux/delay.h>
+#include <linux/irqdomain.h>
#include <asm/timex.h>
#include <asm/platform.h>
@@ -31,7 +32,7 @@ unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long nsec_per_ccount; /* nsec per ccount increment */
#endif
-static cycle_t ccount_read(void)
+static cycle_t ccount_read(struct clocksource *cs)
{
return (cycle_t)get_ccount();
}
@@ -52,6 +53,7 @@ static struct irqaction timer_irqaction = {
void __init time_init(void)
{
+ unsigned int irq;
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
platform_calibrate_ccount();
@@ -62,7 +64,8 @@ void __init time_init(void)
/* Initialize the linux timer interrupt. */
- setup_irq(LINUX_TIMER_INT, &timer_irqaction);
+ irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
+ setup_irq(irq, &timer_irqaction);
set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
}
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 5caf2b64d43..01e0111bf78 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -293,6 +293,17 @@ do_debug(struct pt_regs *regs)
}
+/* Set exception C handler - for temporary use when probing exceptions */
+
+void * __init trap_set_handler(int cause, void *handler)
+{
+ unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause];
+ void *previous = (void *)*entry;
+ *entry = (unsigned long)handler;
+ return previous;
+}
+
+
/*
* Initialize dispatch tables.
*
@@ -397,7 +408,8 @@ static inline void spill_registers(void)
"wsr a13, sar\n\t"
"wsr a14, ps\n\t"
:: "a" (&a0), "a" (&ps)
- : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
+ : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
+ "memory");
}
void show_trace(struct task_struct *task, unsigned long *sp)
@@ -452,7 +464,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp)
sp = stack_pointer(task);
- stack = sp;
+ stack = sp;
printk("\nStack: ");
@@ -523,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(err);
}
-
-
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 4462c1e595c..68df35f66ce 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector)
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0
+ENDPROC(_UserExceptionVector)
+
/*
* Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
*
@@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector)
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
jx a0
+ENDPROC(_KernelExceptionVector)
/*
* Double exception vector (Exceptions with PS.EXCM == 1)
@@ -225,7 +228,13 @@ ENTRY(_DoubleExceptionVector)
/* Window overflow/underflow exception. Get stack pointer. */
mov a3, a2
- movi a2, exc_table
+ /* This explicit literal and the following references to it are made
+ * in order to fit DoubleExceptionVector.literals into the available
+ * 16-byte gap before DoubleExceptionVector.text in the absence of
+ * link time relaxation. See kernel/vmlinux.lds.S
+ */
+ .literal .Lexc_table, exc_table
+ l32r a2, .Lexc_table
l32i a2, a2, EXC_TABLE_KSTK
/* Check for overflow/underflow exception, jump if overflow. */
@@ -255,7 +264,7 @@ ENTRY(_DoubleExceptionVector)
s32i a0, a2, PT_AREG0
wsr a3, excsave1 # save a3
- movi a3, exc_table
+ l32r a3, .Lexc_table
rsr a0, exccause
s32i a0, a2, PT_DEPC # mark it as a regular exception
@@ -267,7 +276,7 @@ ENTRY(_DoubleExceptionVector)
/* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
- movi a3, exc_table
+ l32r a3, .Lexc_table
s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
/* Enter critical section. */
@@ -296,7 +305,7 @@ ENTRY(_DoubleExceptionVector)
/* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
- movi a3, exc_table
+ l32r a3, .Lexc_table
rsr a0, exccause
addx4 a0, a0, a3
l32i a0, a0, EXC_TABLE_FAST_USER
@@ -338,6 +347,7 @@ ENTRY(_DoubleExceptionVector)
.end literal_prefix
+ENDPROC(_DoubleExceptionVector)
/*
* Debug interrupt vector
@@ -349,9 +359,11 @@ ENTRY(_DoubleExceptionVector)
.section .DebugInterruptVector.text, "ax"
ENTRY(_DebugInterruptVector)
+
xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
jx a0
+ENDPROC(_DebugInterruptVector)
/* Window overflow and underflow handlers.
@@ -363,38 +375,43 @@ ENTRY(_DebugInterruptVector)
* we try to access any page that would cause a page fault early.
*/
+#define ENTRY_ALIGN64(name) \
+ .globl name; \
+ .align 64; \
+ name:
+
.section .WindowVectors.text, "ax"
/* 4-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow4
-_WindowOverflow4:
+ENTRY_ALIGN64(_WindowOverflow4)
+
s32e a0, a5, -16
s32e a1, a5, -12
s32e a2, a5, -8
s32e a3, a5, -4
rfwo
+ENDPROC(_WindowOverflow4)
+
/* 4-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow4
-_WindowUnderflow4:
+ENTRY_ALIGN64(_WindowUnderflow4)
+
l32e a0, a5, -16
l32e a1, a5, -12
l32e a2, a5, -8
l32e a3, a5, -4
rfwu
+ENDPROC(_WindowUnderflow4)
/* 8-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow8
-_WindowOverflow8:
+ENTRY_ALIGN64(_WindowOverflow8)
+
s32e a0, a9, -16
l32e a0, a1, -12
s32e a2, a9, -8
@@ -406,11 +423,12 @@ _WindowOverflow8:
s32e a7, a0, -20
rfwo
+ENDPROC(_WindowOverflow8)
+
/* 8-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow8
-_WindowUnderflow8:
+ENTRY_ALIGN64(_WindowUnderflow8)
+
l32e a1, a9, -12
l32e a0, a9, -16
l32e a7, a1, -12
@@ -422,12 +440,12 @@ _WindowUnderflow8:
l32e a7, a7, -20
rfwu
+ENDPROC(_WindowUnderflow8)
/* 12-Register Window Overflow Vector (Handler) */
- .align 64
-.global _WindowOverflow12
-_WindowOverflow12:
+ENTRY_ALIGN64(_WindowOverflow12)
+
s32e a0, a13, -16
l32e a0, a1, -12
s32e a1, a13, -12
@@ -443,11 +461,12 @@ _WindowOverflow12:
s32e a11, a0, -20
rfwo
+ENDPROC(_WindowOverflow12)
+
/* 12-Register Window Underflow Vector (Handler) */
- .align 64
-.global _WindowUnderflow12
-_WindowUnderflow12:
+ENTRY_ALIGN64(_WindowUnderflow12)
+
l32e a1, a13, -12
l32e a0, a13, -16
l32e a11, a1, -12
@@ -463,6 +482,6 @@ _WindowUnderflow12:
l32e a11, a11, -20
rfwu
- .text
-
+ENDPROC(_WindowUnderflow12)
+ .text
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index df397f932d0..4eb573d2720 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -41,10 +41,11 @@
.text
ENTRY(csum_partial)
- /*
- * Experiments with Ethernet and SLIP connections show that buf
- * is aligned on either a 2-byte or 4-byte boundary.
- */
+
+ /*
+ * Experiments with Ethernet and SLIP connections show that buf
+ * is aligned on either a 2-byte or 4-byte boundary.
+ */
entry sp, 32
extui a5, a2, 0, 2
bnez a5, 8f /* branch if 2-byte aligned */
@@ -170,7 +171,7 @@ ENTRY(csum_partial)
3:
j 5b /* branch to handle the remaining byte */
-
+ENDPROC(csum_partial)
/*
* Copy from ds while checksumming, otherwise like csum_partial
@@ -211,6 +212,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
*/
ENTRY(csum_partial_copy_generic)
+
entry sp, 32
mov a12, a3
mov a11, a4
@@ -367,6 +369,8 @@ DST( s8i a8, a3, 1 )
6:
j 4b /* process the possible trailing odd byte */
+ENDPROC(csum_partial_copy_generic)
+
# Exception handler:
.section .fixup, "ax"
@@ -406,4 +410,3 @@ DST( s8i a8, a3, 1 )
retw
.previous
-
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index c48b80acb5f..b1c219acabe 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -210,8 +210,10 @@ memcpy:
_beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset
-#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
- lint or ferret client, or 0 to save a few cycles */
+
+/* set to 1 when running on ISS (simulator) with the
+ lint or ferret client, or 0 to save a few cycles */
+#define SIM_CHECKS_ALIGNMENT 1
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
and a11, a3, a8 # save unalignment offset for below
sub a3, a3, a11 # align a3
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
index a71733ae119..34d05abbd92 100644
--- a/arch/xtensa/lib/pci-auto.c
+++ b/arch/xtensa/lib/pci-auto.c
@@ -241,8 +241,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
unsigned char header_type;
struct pci_dev *dev = &pciauto_dev;
- pciauto_dev.bus = &pciauto_bus;
- pciauto_dev.sysdata = pci_ctrl;
+ pciauto_dev.bus = &pciauto_bus;
+ pciauto_dev.sysdata = pci_ctrl;
pciauto_bus.ops = pci_ctrl->ops;
/*
@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
}
return sub_bus;
}
-
-
-
-
-
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 9f603cdaaa6..1ad0ecf4536 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -166,7 +166,7 @@ __strncpy_user:
retw
.Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__
- extui a9, a9, 16, 16
+ extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s)
addi a11, a11, 1 # advance dst pointer
@@ -174,7 +174,7 @@ __strncpy_user:
retw
.Lz2: # byte 2 is zero
#ifdef __XTENSA_EB__
- extui a9, a9, 16, 16
+ extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s)
movi a9, 0
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index 23f2a89816a..4c03b1e581e 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -145,4 +145,3 @@ __strnlen_user:
lenfixup:
movi a2, 0
retw
-
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 46d60314bb1..ace1892a875 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -318,4 +318,3 @@ l_fixup:
/* Ignore memset return value in a6. */
/* a2 still contains bytes not copied. */
retw
-
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 85df4655d32..81edeab82d1 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page)
* For now, flush the whole cache. FIXME??
*/
-void flush_cache_range(struct vm_area_struct* vma,
+void flush_cache_range(struct vm_area_struct* vma,
unsigned long start, unsigned long end)
{
__flush_invalidate_dcache_all();
@@ -133,7 +133,7 @@ void flush_cache_range(struct vm_area_struct* vma,
*/
void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
- unsigned long pfn)
+ unsigned long pfn)
{
/* Note that we have to use the 'alias' address to avoid multi-hit */
@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
- unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page);
+ unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page(paddr);
- __flush_invalidate_dcache_page_alias(vaddr, phys);
- __invalidate_icache_page_alias(vaddr, phys);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ __invalidate_icache_page_alias(tmp, phys);
clear_bit(PG_arch_1, &page->flags);
}
@@ -195,7 +195,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
-void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
/* Flush and invalidate user page if aliased. */
if (alias) {
- unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
- __flush_invalidate_dcache_page_alias(temp, phys);
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(t, phys);
}
/* Copy data */
@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
*/
if (alias) {
- unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_range((unsigned long) dst, len);
- if ((vma->vm_flags & VM_EXEC) != 0) {
- __invalidate_icache_page_alias(temp, phys);
- }
+ if ((vma->vm_flags & VM_EXEC) != 0)
+ __invalidate_icache_page_alias(t, phys);
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
*/
if (alias) {
- unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
- __flush_invalidate_dcache_page_alias(temp, phys);
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(t, phys);
}
memcpy(dst, src, len);
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 245b08f7eaf..4b7bc8db170 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Oops", regs, sig);
do_exit(sig);
}
-
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index db955179da2..7a5156ffebb 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -75,15 +75,15 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
sysmem.nr_banks++;
}
sysmem.bank[i].end = start;
+
+ } else if (end < sysmem.bank[i].end) {
+ sysmem.bank[i].start = end;
+
} else {
- if (end < sysmem.bank[i].end)
- sysmem.bank[i].start = end;
- else {
- /* remove entry */
- sysmem.nr_banks--;
- sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
- sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
- }
+ /* remove entry */
+ sysmem.nr_banks--;
+ sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
+ sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
}
return -1;
}
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index b048406d875..d97ed1ba7b0 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -29,6 +29,7 @@
*/
ENTRY(clear_page)
+
entry a1, 16
movi a3, 0
@@ -45,6 +46,8 @@ ENTRY(clear_page)
retw
+ENDPROC(clear_page)
+
/*
* copy_page and copy_user_page are the same for non-cache-aliased configs.
*
@@ -53,6 +56,7 @@ ENTRY(clear_page)
*/
ENTRY(copy_page)
+
entry a1, 16
__loopi a2, a4, PAGE_SIZE, 32
@@ -84,6 +88,8 @@ ENTRY(copy_page)
retw
+ENDPROC(copy_page)
+
#ifdef CONFIG_MMU
/*
* If we have to deal with cache aliasing, we use temporary memory mappings
@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start)
*/
ENTRY(clear_user_page)
+
entry a1, 32
/* Mark page dirty and determine alias. */
@@ -164,6 +171,8 @@ ENTRY(clear_user_page)
retw
+ENDPROC(clear_user_page)
+
/*
* copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
* a2 a3 a4 a5
@@ -171,7 +180,7 @@ ENTRY(clear_user_page)
ENTRY(copy_user_page)
- entry a1, 32
+ entry a1, 32
/* Mark page dirty and determine alias for destination. */
@@ -262,6 +271,8 @@ ENTRY(copy_user_page)
retw
+ENDPROC(copy_user_page)
+
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
@@ -272,6 +283,7 @@ ENTRY(copy_user_page)
*/
ENTRY(__flush_invalidate_dcache_page_alias)
+
entry sp, 16
movi a7, 0 # required for exception handler
@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
retw
+ENDPROC(__flush_invalidate_dcache_page_alias)
#endif
ENTRY(__tlbtemp_mapping_itlb)
@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb)
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
ENTRY(__invalidate_icache_page_alias)
+
entry sp, 16
addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias)
isync
retw
+ENDPROC(__invalidate_icache_page_alias)
+
#endif
/* End of special treatment in tlb miss exception */
ENTRY(__tlbtemp_mapping_end)
+
#endif /* CONFIG_MMU
/*
@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end)
*/
ENTRY(__invalidate_icache_page)
+
entry sp, 16
___invalidate_icache_page a2 a3
@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page)
retw
+ENDPROC(__invalidate_icache_page)
+
/*
* void __invalidate_dcache_page(ulong start)
*/
ENTRY(__invalidate_dcache_page)
+
entry sp, 16
___invalidate_dcache_page a2 a3
@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page)
retw
+ENDPROC(__invalidate_dcache_page)
+
/*
* void __flush_invalidate_dcache_page(ulong start)
*/
ENTRY(__flush_invalidate_dcache_page)
+
entry sp, 16
___flush_invalidate_dcache_page a2 a3
@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page)
dsync
retw
+ENDPROC(__flush_invalidate_dcache_page)
+
/*
* void __flush_dcache_page(ulong start)
*/
ENTRY(__flush_dcache_page)
+
entry sp, 16
___flush_dcache_page a2 a3
@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page)
dsync
retw
+ENDPROC(__flush_dcache_page)
+
/*
* void __invalidate_icache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_icache_range)
+
entry sp, 16
___invalidate_icache_range a2 a3 a4
@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range)
retw
+ENDPROC(__invalidate_icache_range)
+
/*
* void __flush_invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_range)
+
entry sp, 16
___flush_invalidate_dcache_range a2 a3 a4
@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range)
retw
+ENDPROC(__flush_invalidate_dcache_range)
+
/*
* void _flush_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_dcache_range)
+
entry sp, 16
___flush_dcache_range a2 a3 a4
@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range)
retw
+ENDPROC(__flush_dcache_range)
+
/*
* void _invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_dcache_range)
+
entry sp, 16
___invalidate_dcache_range a2 a3 a4
retw
+ENDPROC(__invalidate_dcache_range)
+
/*
* void _invalidate_icache_all(void)
*/
ENTRY(__invalidate_icache_all)
+
entry sp, 16
___invalidate_icache_all a2 a3
@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all)
retw
+ENDPROC(__invalidate_icache_all)
+
/*
* void _flush_invalidate_dcache_all(void)
*/
ENTRY(__flush_invalidate_dcache_all)
+
entry sp, 16
___flush_invalidate_dcache_all a2 a3
@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all)
retw
+ENDPROC(__flush_invalidate_dcache_all)
+
/*
* void _invalidate_dcache_all(void)
*/
ENTRY(__invalidate_dcache_all)
+
entry sp, 16
___invalidate_dcache_all a2 a3
@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all)
retw
+ENDPROC(__invalidate_dcache_all)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index ca81654f3ec..0f77f9d3bb8 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -37,7 +37,7 @@ void __init init_mmu(void)
/* Set rasid register to a known value. */
- set_rasid_register(ASID_USER_FIRST);
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index e2700b21395..5411aa67c68 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -63,7 +63,7 @@ void flush_tlb_all (void)
void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm) {
- int flags;
+ unsigned long flags;
local_save_flags(flags);
__get_new_mmu_context(mm);
__load_mmu_context(mm);
@@ -82,7 +82,7 @@ void flush_tlb_mm(struct mm_struct *mm)
#endif
void flush_tlb_range (struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
@@ -100,7 +100,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
int oldpid = get_rasid_register();
set_rasid_register (ASID_INSERT(mm->context));
start &= PAGE_MASK;
- if (vma->vm_flags & VM_EXEC)
+ if (vma->vm_flags & VM_EXEC)
while(start < end) {
invalidate_itlb_mapping(start);
invalidate_dtlb_mapping(start);
@@ -130,7 +130,7 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_save_flags(flags);
- oldpid = get_rasid_register();
+ oldpid = get_rasid_register();
if (vma->vm_flags & VM_EXEC)
invalidate_itlb_mapping(page);
@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags);
}
-
diff --git a/arch/xtensa/platforms/iss/include/platform/serial.h b/arch/xtensa/platforms/iss/include/platform/serial.h
index e69de29bb2d..16aec542d43 100644
--- a/arch/xtensa/platforms/iss/include/platform/serial.h
+++ b/arch/xtensa/platforms/iss/include/platform/serial.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Tensilica Inc.
+ */
+
+#ifndef __ASM_XTENSA_ISS_SERIAL_H
+#define __ASM_XTENSA_ISS_SERIAL_H
+
+/* Have no meaning on ISS, but needed for 8250_early.c */
+#define BASE_BAUD 0
+
+#endif /* __ASM_XTENSA_ISS_SERIAL_H */
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index bd78192e2fc..b5a4edf02d7 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -74,13 +74,12 @@ static inline int __simc(int a, int b, int c, int d, int e, int f)
"mov %1, a3\n"
: "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
: "r"(c1), "r"(d1), "r"(e1), "r"(f1)
- : );
+ : "memory");
return ret;
}
static inline int simc_open(const char *file, int flags, int mode)
{
- wmb();
return __simc(SYS_open, (int) file, flags, mode, 0, 0);
}
@@ -91,19 +90,16 @@ static inline int simc_close(int fd)
static inline int simc_ioctl(int fd, int request, void *arg)
{
- wmb();
return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
}
static inline int simc_read(int fd, void *buf, size_t count)
{
- rmb();
return __simc(SYS_read, fd, (int) buf, count, 0, 0);
}
static inline int simc_write(int fd, const void *buf, size_t count)
{
- wmb();
return __simc(SYS_write, fd, (int) buf, count, 0, 0);
}
@@ -111,7 +107,6 @@ static inline int simc_poll(int fd)
{
struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
- wmb();
return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,
0, 0);
}
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
new file mode 100644
index 00000000000..b9ae206340c
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/Makefile
@@ -0,0 +1,9 @@
+# Makefile for the Tensilica xtavnet Emulation Board
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are in the main makefile...
+
+obj-y = setup.o lcd.o
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
new file mode 100644
index 00000000000..4416773cbde
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -0,0 +1,69 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/hardware.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ */
+
+/*
+ * This file contains the hardware configuration of the XTAVNET boards.
+ */
+
+#ifndef __XTENSA_XTAVNET_HARDWARE_H
+#define __XTENSA_XTAVNET_HARDWARE_H
+
+/* By default NO_IRQ is defined to 0 in Linux, but we use the
+ interrupt 0 for UART... */
+#define NO_IRQ -1
+
+/* Memory configuration. */
+
+#define PLATFORM_DEFAULT_MEM_START 0x00000000
+#define PLATFORM_DEFAULT_MEM_SIZE 0x04000000
+
+/* Interrupt configuration. */
+
+#define PLATFORM_NR_IRQS 10
+
+/* Default assignment of LX60 devices to external interrupts. */
+
+#ifdef CONFIG_ARCH_HAS_SMP
+#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
+#define OETH_IRQ XCHAL_EXTINT4_NUM
+#else
+#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
+#define OETH_IRQ XCHAL_EXTINT1_NUM
+#endif
+
+/*
+ * Device addresses and parameters.
+ */
+
+/* UART */
+#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
+/* LCD instruction and data addresses. */
+#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
+#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
+
+/* Misc. */
+#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
+/* Clock frequency in Hz (read-only): */
+#define XTFPGA_CLKFRQ_VADDR (XTFPGA_FPGAREGS_VADDR + 0x04)
+/* Setting of 8 DIP switches: */
+#define DIP_SWITCHES_VADDR (XTFPGA_FPGAREGS_VADDR + 0x0C)
+/* Software reset (write 0xdead): */
+#define XTFPGA_SWRST_VADDR (XTFPGA_FPGAREGS_VADDR + 0x10)
+
+/* OpenCores Ethernet controller: */
+ /* regs + RX/TX descriptors */
+#define OETH_REGS_PADDR (XCHAL_KIO_PADDR + 0x0D030000)
+#define OETH_REGS_SIZE 0x1000
+#define OETH_SRAMBUFF_PADDR (XCHAL_KIO_PADDR + 0x0D800000)
+
+ /* 5*rx buffs + 5*tx buffs */
+#define OETH_SRAMBUFF_SIZE (5 * 0x600 + 5 * 0x600)
+
+#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
new file mode 100644
index 00000000000..0e435645af5
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
@@ -0,0 +1,20 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/lcd.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+#ifndef __XTENSA_XTAVNET_LCD_H
+#define __XTENSA_XTAVNET_LCD_H
+
+/* Display string STR at position POS on the LCD. */
+void lcd_disp_at_pos(char *str, unsigned char pos);
+
+/* Shift the contents of the LCD display left or right. */
+void lcd_shiftleft(void);
+void lcd_shiftright(void);
+#endif
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/serial.h b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
new file mode 100644
index 00000000000..14d8f7beebf
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/include/platform/serial.h
@@ -0,0 +1,18 @@
+/*
+ * arch/xtensa/platform/xtavnet/include/platform/serial.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+#ifndef __ASM_XTENSA_XTAVNET_SERIAL_H
+#define __ASM_XTENSA_XTAVNET_SERIAL_H
+
+#include <platform/hardware.h>
+
+#define BASE_BAUD (*(long *)XTFPGA_CLKFRQ_VADDR / 16)
+
+#endif /* __ASM_XTENSA_XTAVNET_SERIAL_H */
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
new file mode 100644
index 00000000000..2872301598d
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -0,0 +1,76 @@
+/*
+ * Driver for the LCD display on the Tensilica LX60 Board.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
+ */
+
+/*
+ *
+ * FIXME: this code is from the examples from the LX60 user guide.
+ *
+ * The lcd_pause function does busy waiting, which is probably not
+ * great. Maybe the code could be changed to use kernel timers, or
+ * change the hardware to not need to wait.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <platform/hardware.h>
+#include <platform/lcd.h>
+#include <linux/delay.h>
+
+#define LCD_PAUSE_ITERATIONS 4000
+#define LCD_CLEAR 0x1
+#define LCD_DISPLAY_ON 0xc
+
+/* 8bit and 2 lines display */
+#define LCD_DISPLAY_MODE8BIT 0x38
+#define LCD_DISPLAY_POS 0x80
+#define LCD_SHIFT_LEFT 0x18
+#define LCD_SHIFT_RIGHT 0x1c
+
+static int __init lcd_init(void)
+{
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ mdelay(5);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ udelay(200);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ udelay(50);
+ *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
+ udelay(50);
+ *LCD_INSTR_ADDR = LCD_CLEAR;
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+ return 0;
+}
+
+void lcd_disp_at_pos(char *str, unsigned char pos)
+{
+ *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
+ udelay(100);
+ while (*str != 0) {
+ *LCD_DATA_ADDR = *str;
+ udelay(200);
+ str++;
+ }
+}
+
+void lcd_shiftleft(void)
+{
+ *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
+ udelay(50);
+}
+
+void lcd_shiftright(void)
+{
+ *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
+ udelay(50);
+}
+
+arch_initcall(lcd_init);
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
new file mode 100644
index 00000000000..4b9951a4569
--- /dev/null
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -0,0 +1,301 @@
+/*
+ *
+ * arch/xtensa/platform/xtavnet/setup.c
+ *
+ * ...
+ *
+ * Authors: Chris Zankel <chris@zankel.net>
+ * Joe Taylor <joe@tensilica.com>
+ *
+ * Copyright 2001 - 2006 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <asm/timex.h>
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+#include <platform/lcd.h>
+#include <platform/hardware.h>
+
+void platform_halt(void)
+{
+ lcd_disp_at_pos(" HALT ", 0);
+ local_irq_disable();
+ while (1)
+ cpu_relax();
+}
+
+void platform_power_off(void)
+{
+ lcd_disp_at_pos("POWEROFF", 0);
+ local_irq_disable();
+ while (1)
+ cpu_relax();
+}
+
+void platform_restart(void)
+{
+ /* Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector. */
+
+
+ __asm__ __volatile__ ("movi a2, 15\n\t"
+ "wsr a2, icountlevel\n\t"
+ "movi a2, 0\n\t"
+ "wsr a2, icount\n\t"
+ "wsr a2, ibreakenable\n\t"
+ "wsr a2, lcount\n\t"
+ "movi a2, 0x1f\n\t"
+ "wsr a2, ps\n\t"
+ "isync\n\t"
+ "jx %0\n\t"
+ :
+ : "a" (XCHAL_RESET_VECTOR_VADDR)
+ : "a2"
+ );
+
+ /* control never gets here */
+}
+
+void __init platform_setup(char **cmdline)
+{
+}
+
+#ifdef CONFIG_OF
+
+static void __init update_clock_frequency(struct device_node *node)
+{
+ struct property *newfreq;
+ u32 freq;
+
+ if (!of_property_read_u32(node, "clock-frequency", &freq) && freq != 0)
+ return;
+
+ newfreq = kzalloc(sizeof(*newfreq) + sizeof(u32), GFP_KERNEL);
+ if (!newfreq)
+ return;
+ newfreq->value = newfreq + 1;
+ newfreq->length = sizeof(freq);
+ newfreq->name = kstrdup("clock-frequency", GFP_KERNEL);
+ if (!newfreq->name) {
+ kfree(newfreq);
+ return;
+ }
+
+ *(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
+ prom_update_property(node, newfreq);
+}
+
+#define MAC_LEN 6
+static void __init update_local_mac(struct device_node *node)
+{
+ struct property *newmac;
+ const u8* macaddr;
+ int prop_len;
+
+ macaddr = of_get_property(node, "local-mac-address", &prop_len);
+ if (macaddr == NULL || prop_len != MAC_LEN)
+ return;
+
+ newmac = kzalloc(sizeof(*newmac) + MAC_LEN, GFP_KERNEL);
+ if (newmac == NULL)
+ return;
+
+ newmac->value = newmac + 1;
+ newmac->length = MAC_LEN;
+ newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
+ if (newmac->name == NULL) {
+ kfree(newmac);
+ return;
+ }
+
+ memcpy(newmac->value, macaddr, MAC_LEN);
+ ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
+ prom_update_property(node, newmac);
+}
+
+static int __init machine_setup(void)
+{
+ struct device_node *serial;
+ struct device_node *eth = NULL;
+
+ for_each_compatible_node(serial, NULL, "ns16550a")
+ update_clock_frequency(serial);
+
+ if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
+ update_local_mac(eth);
+ return 0;
+}
+arch_initcall(machine_setup);
+
+#endif
+
+/* early initialization */
+
+void __init platform_init(bp_tag_t *first)
+{
+}
+
+/* Heartbeat. */
+
+void platform_heartbeat(void)
+{
+}
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+
+void platform_calibrate_ccount(void)
+{
+ long clk_freq = 0;
+#ifdef CONFIG_OF
+ struct device_node *cpu =
+ of_find_compatible_node(NULL, NULL, "xtensa,cpu");
+ if (cpu) {
+ u32 freq;
+ update_clock_frequency(cpu);
+ if (!of_property_read_u32(cpu, "clock-frequency", &freq))
+ clk_freq = freq;
+ }
+#endif
+ if (!clk_freq)
+ clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
+
+ ccount_per_jiffy = clk_freq / HZ;
+ nsec_per_ccount = 1000000000UL / clk_freq;
+}
+
+#endif
+
+#ifndef CONFIG_OF
+
+#include <linux/serial_8250.h>
+#include <linux/if.h>
+#include <net/ethoc.h>
+
+/*----------------------------------------------------------------------------
+ * Ethernet -- OpenCores Ethernet MAC (ethoc driver)
+ */
+
+static struct resource ethoc_res[] __initdata = {
+ [0] = { /* register space */
+ .start = OETH_REGS_PADDR,
+ .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* buffer space */
+ .start = OETH_SRAMBUFF_PADDR,
+ .end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = { /* IRQ number */
+ .start = OETH_IRQ,
+ .end = OETH_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct ethoc_platform_data ethoc_pdata __initdata = {
+ /*
+ * The MAC address for these boards is 00:50:c2:13:6f:xx.
+ * The last byte (here as zero) is read from the DIP switches on the
+ * board.
+ */
+ .hwaddr = { 0x00, 0x50, 0xc2, 0x13, 0x6f, 0 },
+ .phy_id = -1,
+};
+
+static struct platform_device ethoc_device __initdata = {
+ .name = "ethoc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ethoc_res),
+ .resource = ethoc_res,
+ .dev = {
+ .platform_data = &ethoc_pdata,
+ },
+};
+
+/*----------------------------------------------------------------------------
+ * UART
+ */
+
+static struct resource serial_resource __initdata = {
+ .start = DUART16552_PADDR,
+ .end = DUART16552_PADDR + 0x1f,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct plat_serial8250_port serial_platform_data[] __initdata = {
+ [0] = {
+ .mapbase = DUART16552_PADDR,
+ .irq = DUART16552_INTNUM,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+ UPF_IOREMAP,
+ .iotype = UPIO_MEM32,
+ .regshift = 2,
+ .uartclk = 0, /* set in xtavnet_init() */
+ },
+ { },
+};
+
+static struct platform_device xtavnet_uart __initdata = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = serial_platform_data,
+ },
+ .num_resources = 1,
+ .resource = &serial_resource,
+};
+
+/* platform devices */
+static struct platform_device *platform_devices[] __initdata = {
+ &ethoc_device,
+ &xtavnet_uart,
+};
+
+
+static int __init xtavnet_init(void)
+{
+ /* Ethernet MAC address. */
+ ethoc_pdata.hwaddr[5] = *(u32 *)DIP_SWITCHES_VADDR;
+
+ /* Clock rate varies among FPGA bitstreams; board specific FPGA register
+ * reports the actual clock rate.
+ */
+ serial_platform_data[0].uartclk = *(long *)XTFPGA_CLKFRQ_VADDR;
+
+
+ /* register platform devices */
+ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+
+ /* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user
+ * knows whether they set it correctly on the DIP switches.
+ */
+ pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
+
+ return 0;
+}
+
+/*
+ * Register to be done during do_initcalls().
+ */
+arch_initcall(xtavnet_init);
+
+#endif /* CONFIG_OF */
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
index b89541ba39a..da9e85c13b0 100644
--- a/arch/xtensa/variants/s6000/gpio.c
+++ b/arch/xtensa/variants/s6000/gpio.c
@@ -164,7 +164,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
int cirq;
chip->irq_mask(&desc->irq_data);
- chip->irq_ack(&desc->irq_data));
+ chip->irq_ack(&desc->irq_data);
pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
cirq = IRQ_BASE - 1;
while (pending) {
@@ -173,7 +173,7 @@ static void demux_irqs(unsigned int irq, struct irq_desc *desc)
pending >>= n;
generic_handle_irq(cirq);
}
- chip->irq_unmask(&desc->irq_data));
+ chip->irq_unmask(&desc->irq_data);
}
extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
diff --git a/block/Kconfig b/block/Kconfig
index a7e40a7c821..4a85ccf8d4c 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,6 +4,7 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
+ select PERCPU_RWSEM
help
Provide block layer support for the kernel.
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3f6d39d23bb..b8858fb0caf 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q, NULL);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c95c4d6e31..c973249d68c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -40,6 +40,7 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
@@ -219,12 +220,13 @@ static void blk_delay_work(struct work_struct *work)
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
- * restarted around the specified time.
+ * restarted around the specified time. Queue lock must be held.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
- msecs_to_jiffies(msecs));
+ if (likely(!blk_queue_dead(q)))
+ queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
@@ -293,6 +295,34 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);
/**
+ * __blk_run_queue_uncond - run a queue whether or not it has been stopped
+ * @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on a queue if there are any pending requests.
+ * May be used to restart request handling after a request has completed.
+ * This variant runs the queue whether or not the queue has been
+ * stopped. Must be called with the queue lock held and interrupts
+ * disabled. See also @blk_run_queue.
+ */
+inline void __blk_run_queue_uncond(struct request_queue *q)
+{
+ if (unlikely(blk_queue_dead(q)))
+ return;
+
+ /*
+ * Some request_fn implementations, e.g. scsi_request_fn(), unlock
+ * the queue lock internally. As a result multiple threads may be
+ * running such a request function concurrently. Keep track of the
+ * number of active request_fn invocations such that blk_drain_queue()
+ * can wait until all these request_fn calls have finished.
+ */
+ q->request_fn_active++;
+ q->request_fn(q);
+ q->request_fn_active--;
+}
+
+/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
*
@@ -305,7 +335,7 @@ void __blk_run_queue(struct request_queue *q)
if (unlikely(blk_queue_stopped(q)))
return;
- q->request_fn(q);
+ __blk_run_queue_uncond(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -315,11 +345,11 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- * of us.
+ * of us. The caller must hold the queue lock.
*/
void blk_run_queue_async(struct request_queue *q)
{
- if (likely(!blk_queue_stopped(q)))
+ if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);
@@ -349,7 +379,7 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);
/**
- * blk_drain_queue - drain requests from request_queue
+ * __blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
@@ -357,15 +387,17 @@ EXPORT_SYMBOL(blk_put_queue);
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
-void blk_drain_queue(struct request_queue *q, bool drain_all)
+static void __blk_drain_queue(struct request_queue *q, bool drain_all)
+ __releases(q->queue_lock)
+ __acquires(q->queue_lock)
{
int i;
+ lockdep_assert_held(q->queue_lock);
+
while (true) {
bool drain = false;
- spin_lock_irq(q->queue_lock);
-
/*
* The caller might be trying to drain @q before its
* elevator is initialized.
@@ -386,6 +418,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
__blk_run_queue(q);
drain |= q->nr_rqs_elvpriv;
+ drain |= q->request_fn_active;
/*
* Unfortunately, requests are queued at and tracked from
@@ -401,11 +434,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
}
- spin_unlock_irq(q->queue_lock);
-
if (!drain)
break;
+
+ spin_unlock_irq(q->queue_lock);
+
msleep(10);
+
+ spin_lock_irq(q->queue_lock);
}
/*
@@ -416,13 +452,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (q->request_fn) {
struct request_list *rl;
- spin_lock_irq(q->queue_lock);
-
blk_queue_for_each_rl(rl, q)
for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
wake_up_all(&rl->wait[i]);
-
- spin_unlock_irq(q->queue_lock);
}
}
@@ -446,7 +478,10 @@ void blk_queue_bypass_start(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
if (drain) {
- blk_drain_queue(q, false);
+ spin_lock_irq(q->queue_lock);
+ __blk_drain_queue(q, false);
+ spin_unlock_irq(q->queue_lock);
+
/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu();
}
@@ -473,20 +508,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
- * Mark @q DEAD, drain all pending requests, destroy and put it. All
- * future requests will be failed immediately with -ENODEV.
+ * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
+ * put it. All future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;
- /* mark @q DEAD, no new request or merges will be allowed afterwards */
+ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);
/*
- * Dead queue is permanently in bypass mode till released. Note
+ * A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
@@ -499,12 +534,18 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
- /* drain all requests queued before DEAD marking */
- blk_drain_queue(q, true);
+ /*
+ * Drain all requests queued before DYING marking. Set DEAD flag to
+ * prevent that q->request_fn() gets invoked after draining finished.
+ */
+ spin_lock_irq(lock);
+ __blk_drain_queue(q, true);
+ queue_flag_set(QUEUE_FLAG_DEAD, q);
+ spin_unlock_irq(lock);
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -549,7 +590,7 @@ void blk_exit_rl(struct request_list *rl)
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
- return blk_alloc_queue_node(gfp_mask, -1);
+ return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);
@@ -660,7 +701,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
- return blk_init_queue_node(rfn, lock, -1);
+ return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_init_queue);
@@ -716,7 +757,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q)
{
- if (likely(!blk_queue_dead(q))) {
+ if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
@@ -870,7 +911,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_dying(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1091,7 @@ retry:
if (rq)
return rq;
- if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return NULL;
}
@@ -1910,7 +1951,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
@@ -2884,27 +2925,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
{
trace_block_unplug(q, depth, !from_schedule);
- /*
- * Don't mess with dead queue.
- */
- if (unlikely(blk_queue_dead(q))) {
- spin_unlock(q->queue_lock);
- return;
- }
-
- /*
- * If we are punting this to kblockd, then we can safely drop
- * the queue_lock before waking kblockd (which needs to take
- * this lock).
- */
- if (from_schedule) {
- spin_unlock(q->queue_lock);
+ if (from_schedule)
blk_run_queue_async(q);
- } else {
+ else
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
- }
-
+ spin_unlock(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -2996,7 +3021,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f71eac35c1b..74638ec234c 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
@@ -78,7 +78,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
if (is_pm_resume)
- q->request_fn(q);
+ __blk_run_queue_uncond(q);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9373b58dfab..b3a1f2b70b3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -43,11 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
- unsigned int max_discard_sectors;
- unsigned int granularity, alignment, mask;
+ sector_t max_discard_sectors;
+ sector_t granularity, alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
+ struct blk_plug plug;
if (!q)
return -ENXIO;
@@ -57,15 +58,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
- mask = granularity - 1;
- alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+ alignment = bdev_discard_alignment(bdev) >> 9;
+ alignment = sector_div(alignment, granularity);
/*
* Ensure that max_discard_sectors is of the proper
* granularity, so that requests stay aligned after a split.
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
- max_discard_sectors = round_down(max_discard_sectors, granularity);
+ sector_div(max_discard_sectors, granularity);
+ max_discard_sectors *= granularity;
if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP;
@@ -81,9 +83,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait;
+ blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
- sector_t end_sect;
+ sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
@@ -98,10 +101,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects;
- if (req_sects < nr_sects && (end_sect & mask) != alignment) {
- end_sect =
- round_down(end_sect - alignment, granularity)
- + alignment;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
@@ -117,6 +122,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
atomic_inc(&bb.done);
submit_bio(type, bio);
}
+ blk_finish_plug(&plug);
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 779bb7646bc..c50ecf0ea3b 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
bottom = b->discard_granularity + alignment;
/* Verify that top and bottom intervals line up */
- if (max(top, bottom) & (min(top, bottom) - 1))
+ if ((max(top, bottom) % min(top, bottom)) != 0)
t->discard_misaligned = 1;
}
@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
- t->discard_alignment = lcm(t->discard_alignment, alignment) &
- (t->discard_granularity - 1);
+ t->discard_alignment = lcm(t->discard_alignment, alignment) %
+ t->discard_granularity;
}
return ret;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ce620460882..788147797a7 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a9664fa0b60..31146225f3d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
/* if %NULL and @q is alive, fall back to root_tg */
if (!IS_ERR(blkg))
tg = blkg_to_tg(blkg);
- else if (!blk_queue_dead(q))
+ else if (!blk_queue_dying(q))
tg = td_root_tg(td);
}
diff --git a/block/blk.h b/block/blk.h
index ca51543b248..47fdfdd4152 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dead(q)) ||
+ if (unlikely(blk_queue_dying(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
+void __blk_run_queue_uncond(struct request_queue *q);
+
int blk_dev_init(void);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index deee61fbb74..650f427d915 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -151,19 +151,6 @@ failjob_rls_job:
return -ENOMEM;
}
-/*
- * bsg_goose_queue - restart queue in case it was stopped
- * @q: request q to be restarted
- */
-void bsg_goose_queue(struct request_queue *q)
-{
- if (!q)
- return;
-
- blk_run_queue_async(q);
-}
-EXPORT_SYMBOL_GPL(bsg_goose_queue);
-
/**
* bsg_request_fn - generic handler for bsg requests
* @q: request queue to manage
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fb52df9744f..e62e9205b80 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1973,7 +1973,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+ cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
rq_set_fifo_time(rq, rq_fifo_time(next));
}
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 599b12e5380..90037b5eb17 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq_fifo_time(rq)))
return 1;
return 0;
diff --git a/block/elevator.c b/block/elevator.c
index 9b1d42b62f2..9edba1b8323 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -458,6 +458,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
struct request *rq)
{
struct request *__rq;
+ bool ret;
if (blk_queue_nomerges(q))
return false;
@@ -471,14 +472,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
if (blk_queue_noxmerges(q))
return false;
+ ret = false;
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, blk_rq_pos(rq));
- if (__rq && blk_attempt_req_merge(q, __rq, rq))
- return true;
+ while (1) {
+ __rq = elv_rqhash_find(q, blk_rq_pos(rq));
+ if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
+ break;
- return false;
+ /* The merged request could be merged with others, try again */
+ ret = true;
+ rq = __rq;
+ }
+
+ return ret;
}
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
diff --git a/block/genhd.c b/block/genhd.c
index 6cace663a80..9a289d7c84b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -743,7 +743,6 @@ void __init printk_all_partitions(void)
struct hd_struct *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
- char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
/*
* Don't show empty devices or things that have been
@@ -762,16 +761,11 @@ void __init printk_all_partitions(void)
while ((part = disk_part_iter_next(&piter))) {
bool is_part0 = part == &disk->part0;
- uuid_buf[0] = '\0';
- if (part->info)
- snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
- part->info->uuid);
-
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
bdevt_str(part_devt(part), devt_buf),
(unsigned long long)part_nr_sects_read(part) >> 1
, disk_name(disk, part->partno, name_buf),
- uuid_buf);
+ part->info ? part->info->uuid : "");
if (is_part0) {
if (disk->driverfs_dev != NULL &&
disk->driverfs_dev->driver != NULL)
@@ -1245,7 +1239,7 @@ EXPORT_SYMBOL(blk_lookup_devt);
struct gendisk *alloc_disk(int minors)
{
- return alloc_disk_node(minors, -1);
+ return alloc_disk_node(minors, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_disk);
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index cb5f0a3f1b0..75a54e1adbb 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -234,8 +234,8 @@ config KARMA_PARTITION
uses a proprietary partition table.
config EFI_PARTITION
- bool "EFI GUID Partition support"
- depends on PARTITION_ADVANCED
+ bool "EFI GUID Partition support" if PARTITION_ADVANCED
+ default y
select CRC32
help
Say Y here if you would like to use hard disks under Linux which
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 6296b403c67..b62fb88b871 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -620,7 +620,6 @@ int efi_partition(struct parsed_partitions *state)
gpt_entry *ptes = NULL;
u32 i;
unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
- u8 unparsed_guid[37];
if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
kfree(gpt);
@@ -649,11 +648,7 @@ int efi_partition(struct parsed_partitions *state)
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
info = &state->parts[i + 1].info;
- /* Instead of doing a manual swap to big endian, reuse the
- * common ASCII hex format as the interim.
- */
- efi_guid_unparse(&ptes[i].unique_partition_guid, unparsed_guid);
- part_pack_uuid(unparsed_guid, info->uuid);
+ efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid);
/* Naively convert UTF16-LE to 7 bits. */
label_max = min(sizeof(info->volname) - 1,
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 5f79a6677c6..8752a5d2656 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -94,6 +94,17 @@ static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
return ret;
}
+static void set_info(struct parsed_partitions *state, int slot,
+ u32 disksig)
+{
+ struct partition_meta_info *info = &state->parts[slot].info;
+
+ snprintf(info->uuid, sizeof(info->uuid), "%08x-%02x", disksig,
+ slot);
+ info->volname[0] = 0;
+ state->parts[slot].has_info = true;
+}
+
/*
* Create devices for each logical partition in an extended partition.
* The logical partitions form a linked list, with each entry being
@@ -106,7 +117,8 @@ static int aix_magic_present(struct parsed_partitions *state, unsigned char *p)
*/
static void parse_extended(struct parsed_partitions *state,
- sector_t first_sector, sector_t first_size)
+ sector_t first_sector, sector_t first_size,
+ u32 disksig)
{
struct partition *p;
Sector sect;
@@ -166,6 +178,7 @@ static void parse_extended(struct parsed_partitions *state,
}
put_partition(state, state->next, next, size);
+ set_info(state, state->next, disksig);
if (SYS_IND(p) == LINUX_RAID_PARTITION)
state->parts[state->next].flags = ADDPART_FLAG_RAID;
loopct = 0;
@@ -437,6 +450,7 @@ int msdos_partition(struct parsed_partitions *state)
struct partition *p;
struct fat_boot_sector *fb;
int slot;
+ u32 disksig;
data = read_part_sector(state, 0, &sect);
if (!data)
@@ -491,6 +505,8 @@ int msdos_partition(struct parsed_partitions *state)
#endif
p = (struct partition *) (data + 0x1be);
+ disksig = le32_to_cpup((__le32 *)(data + 0x1b8));
+
/*
* Look for partitions in two passes:
* First find the primary and DOS-type extended partitions.
@@ -515,11 +531,12 @@ int msdos_partition(struct parsed_partitions *state)
put_partition(state, slot, start, n);
strlcat(state->pp_buf, " <", PAGE_SIZE);
- parse_extended(state, start, size);
+ parse_extended(state, start, size, disksig);
strlcat(state->pp_buf, " >", PAGE_SIZE);
continue;
}
put_partition(state, slot, start, size);
+ set_info(state, slot, disksig);
if (SYS_IND(p) == LINUX_RAID_PARTITION)
state->parts[slot].flags = ADDPART_FLAG_RAID;
if (SYS_IND(p) == DM6_PARTITION)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 6563366bae8..4641d95651d 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -324,9 +324,19 @@ config CRYPTO_CRC32C
by iSCSI for header and data digests and by others.
See Castagnoli93. Module will be crc32c.
+config CRYPTO_CRC32C_X86_64
+ bool
+ depends on X86 && 64BIT
+ select CRYPTO_HASH
+ help
+ In Intel processor with SSE4.2 supported, the processor will
+ support CRC32C calculation using hardware accelerated CRC32
+ instruction optimized with PCLMULQDQ instruction when available.
+
config CRYPTO_CRC32C_INTEL
tristate "CRC32c INTEL hardware acceleration"
depends on X86
+ select CRYPTO_CRC32C_X86_64 if 64BIT
select CRYPTO_HASH
help
In Intel processor with SSE4.2 supported, the processor will
@@ -793,6 +803,28 @@ config CRYPTO_CAMELLIA_X86_64
See also:
<https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
+config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
+ tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
+ depends on X86 && 64BIT
+ depends on CRYPTO
+ select CRYPTO_ALGAPI
+ select CRYPTO_CRYPTD
+ select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_CAMELLIA_X86_64
+ select CRYPTO_LRW
+ select CRYPTO_XTS
+ help
+ Camellia cipher algorithm module (x86_64/AES-NI/AVX).
+
+ Camellia is a symmetric key block cipher developed jointly
+ at NTT and Mitsubishi Electric Corporation.
+
+ The Camellia specifies three key sizes: 128, 192 and 256 bits.
+
+ See also:
+ <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
+
config CRYPTO_CAMELLIA_SPARC64
tristate "Camellia cipher algorithm (SPARC64)"
depends on SPARC64
@@ -809,9 +841,16 @@ config CRYPTO_CAMELLIA_SPARC64
See also:
<https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
+config CRYPTO_CAST_COMMON
+ tristate
+ help
+ Common parts of the CAST cipher algorithms shared by the
+ generic c and the assembler implementations.
+
config CRYPTO_CAST5
tristate "CAST5 (CAST-128) cipher algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_CAST_COMMON
help
The CAST5 encryption algorithm (synonymous with CAST-128) is
described in RFC2144.
@@ -822,6 +861,7 @@ config CRYPTO_CAST5_AVX_X86_64
select CRYPTO_ALGAPI
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
+ select CRYPTO_CAST_COMMON
select CRYPTO_CAST5
help
The CAST5 encryption algorithm (synonymous with CAST-128) is
@@ -833,6 +873,7 @@ config CRYPTO_CAST5_AVX_X86_64
config CRYPTO_CAST6
tristate "CAST6 (CAST-256) cipher algorithm"
select CRYPTO_ALGAPI
+ select CRYPTO_CAST_COMMON
help
The CAST6 encryption algorithm (synonymous with CAST-256) is
described in RFC2612.
@@ -844,6 +885,7 @@ config CRYPTO_CAST6_AVX_X86_64
select CRYPTO_CRYPTD
select CRYPTO_ABLK_HELPER_X86
select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_CAST_COMMON
select CRYPTO_CAST6
select CRYPTO_LRW
select CRYPTO_XTS
diff --git a/crypto/Makefile b/crypto/Makefile
index 8cf61ffe351..d59dec74980 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
+obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o
obj-$(CONFIG_CRYPTO_CAST6) += cast6_generic.o
obj-$(CONFIG_CRYPTO_ARC4) += arc4.o
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index bc525dbd8a4..5558f630a0e 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -30,275 +30,6 @@
#include <linux/types.h>
#include <crypto/cast5.h>
-
-const u32 cast5_s1[256] = {
- 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
- 0x9c004dd3, 0x6003e540, 0xcf9fc949,
- 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
- 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
- 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3,
- 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
- 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1,
- 0xaa54166b, 0x22568e3a, 0xa2d341d0,
- 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac,
- 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
- 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0,
- 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
- 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290,
- 0xe93b159f, 0xb48ee411, 0x4bff345d,
- 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad,
- 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
- 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f,
- 0xc59c5319, 0xb949e354, 0xb04669fe,
- 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5,
- 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
- 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5,
- 0xf61b1891, 0xbb72275e, 0xaa508167,
- 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427,
- 0xa2d1936b, 0x2ad286af, 0xaa56d291,
- 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d,
- 0x73e2bb14, 0xa0bebc3c, 0x54623779,
- 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e,
- 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
- 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf,
- 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
- 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241,
- 0x051ef495, 0xaa573b04, 0x4a805d8d,
- 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b,
- 0x50afd341, 0xa7c13275, 0x915a0bf5,
- 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265,
- 0xab85c5f3, 0x1b55db94, 0xaad4e324,
- 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3,
- 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
- 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6,
- 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
- 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6,
- 0x032268d4, 0xc9600acc, 0xce387e6d,
- 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da,
- 0x4736f464, 0x5ad328d8, 0xb347cc96,
- 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc,
- 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
- 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f,
- 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
- 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4,
- 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
- 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af,
- 0x51c85f4d, 0x56907596, 0xa5bb15e6,
- 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a,
- 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
- 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf,
- 0x700b45e1, 0xd5ea50f1, 0x85a92872,
- 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198,
- 0x0cd0ede7, 0x26470db8, 0xf881814c,
- 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db,
- 0xab838653, 0x6e2f1e23, 0x83719c9e,
- 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c,
- 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
- 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c,
- 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf
-};
-EXPORT_SYMBOL_GPL(cast5_s1);
-const u32 cast5_s2[256] = {
- 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
- 0xeec5207a, 0x55889c94, 0x72fc0651,
- 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
- 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
- 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086,
- 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
- 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb,
- 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
- 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f,
- 0x77e83f4e, 0x79929269, 0x24fa9f7b,
- 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154,
- 0x0d554b63, 0x5d681121, 0xc866c359,
- 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181,
- 0x39f7627f, 0x361e3084, 0xe4eb573b,
- 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c,
- 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
- 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a,
- 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
- 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c,
- 0x1d804366, 0x721d9bfd, 0xa58684bb,
- 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1,
- 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
- 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9,
- 0xe0b56714, 0x21f043b7, 0xe5d05860,
- 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf,
- 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
- 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c,
- 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
- 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122,
- 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
- 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402,
- 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
- 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53,
- 0xe3214517, 0xb4542835, 0x9f63293c,
- 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6,
- 0x30a22c95, 0x31a70850, 0x60930f13,
- 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6,
- 0xa02b1741, 0x7cbad9a2, 0x2180036f,
- 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676,
- 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
- 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb,
- 0x846a3bae, 0x8ff77888, 0xee5d60f6,
- 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54,
- 0x157fd7fa, 0xef8579cc, 0xd152de58,
- 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5,
- 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
- 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8,
- 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
- 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc,
- 0x301e16e6, 0x273be979, 0xb0ffeaa6,
- 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a,
- 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
- 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e,
- 0x1a513742, 0xef6828bc, 0x520365d6,
- 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb,
- 0x5eea29cb, 0x145892f5, 0x91584f7f,
- 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4,
- 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
- 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3,
- 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
- 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589,
- 0xa345415e, 0x5c038323, 0x3e5d3bb9,
- 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539,
- 0x73bfbe70, 0x83877605, 0x4523ecf1
-};
-EXPORT_SYMBOL_GPL(cast5_s2);
-const u32 cast5_s3[256] = {
- 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
- 0x369fe44b, 0x8c1fc644, 0xaececa90,
- 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
- 0xf0ad0548, 0xe13c8d83, 0x927010d5,
- 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820,
- 0xfade82e0, 0xa067268b, 0x8272792e,
- 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee,
- 0x825b1bfd, 0x9255c5ed, 0x1257a240,
- 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf,
- 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
- 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1,
- 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
- 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c,
- 0x4a012d6e, 0xc5884a28, 0xccc36f71,
- 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850,
- 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
- 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e,
- 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
- 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0,
- 0x1eac5790, 0x796fb449, 0x8252dc15,
- 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403,
- 0xe83ec305, 0x4f91751a, 0x925669c2,
- 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574,
- 0x927985b2, 0x8276dbcb, 0x02778176,
- 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83,
- 0x340ce5c8, 0x96bbb682, 0x93b4b148,
- 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20,
- 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
- 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e,
- 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
- 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9,
- 0xbda8229c, 0x127dadaa, 0x438a074e,
- 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff,
- 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
- 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a,
- 0x76a2e214, 0xb9a40368, 0x925d958f,
- 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623,
- 0x193cbcfa, 0x27627545, 0x825cf47a,
- 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7,
- 0x8272a972, 0x9270c4a8, 0x127de50b,
- 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb,
- 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
- 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11,
- 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
- 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c,
- 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
- 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40,
- 0x7c34671c, 0x02717ef6, 0x4feb5536,
- 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1,
- 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
- 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33,
- 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
- 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff,
- 0x856302e0, 0x72dbd92b, 0xee971b69,
- 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2,
- 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
- 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38,
- 0x0ff0443d, 0x606e6dc6, 0x60543a49,
- 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f,
- 0x68458425, 0x99833be5, 0x600d457d,
- 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31,
- 0x9c305a00, 0x52bce688, 0x1b03588a,
- 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636,
- 0xa133c501, 0xe9d3531c, 0xee353783
-};
-EXPORT_SYMBOL_GPL(cast5_s3);
-const u32 cast5_s4[256] = {
- 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
- 0x64ad8c57, 0x85510443, 0xfa020ed1,
- 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
- 0x6497b7b1, 0xf3641f63, 0x241e4adf,
- 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30,
- 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
- 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f,
- 0x0c13fefe, 0x081b08ca, 0x05170121,
- 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f,
- 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
- 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400,
- 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
- 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061,
- 0x11b638e1, 0x72500e03, 0xf80eb2bb,
- 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400,
- 0x6920318f, 0x081dbb99, 0xffc304a5,
- 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea,
- 0x9f926f91, 0x9f46222f, 0x3991467d,
- 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8,
- 0x3fb6180c, 0x18f8931e, 0x281658e6,
- 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25,
- 0x79098b02, 0xe4eabb81, 0x28123b23,
- 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9,
- 0x0014377b, 0x041e8ac8, 0x09114003,
- 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de,
- 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
- 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0,
- 0x56c8c391, 0x6b65811c, 0x5e146119,
- 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d,
- 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
- 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a,
- 0xeca1d7c7, 0x041afa32, 0x1d16625a,
- 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb,
- 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
- 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3,
- 0xedda04eb, 0x17a9be04, 0x2c18f4df,
- 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254,
- 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
- 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2,
- 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
- 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86,
- 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
- 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1,
- 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
- 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca,
- 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
- 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5,
- 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
- 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415,
- 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
- 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7,
- 0x0ce454a9, 0xd60acd86, 0x015f1919,
- 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe,
- 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
- 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb,
- 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
- 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8,
- 0x296b299e, 0x492fc295, 0x9266beab,
- 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee,
- 0xf65324e6, 0x6afce36c, 0x0316cc04,
- 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979,
- 0x932bcdf6, 0xb657c34d, 0x4edfd282,
- 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0,
- 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2
-};
-EXPORT_SYMBOL_GPL(cast5_s4);
static const u32 s5[256] = {
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff,
0x1dd358f5, 0x44dd9d44, 0x1731167f,
@@ -564,10 +295,10 @@ static const u32 sb8[256] = {
0xeaee6801, 0x8db2a283, 0xea8bf59e
};
-#define s1 cast5_s1
-#define s2 cast5_s2
-#define s3 cast5_s3
-#define s4 cast5_s4
+#define s1 cast_s1
+#define s2 cast_s2
+#define s3 cast_s3
+#define s4 cast_s4
#define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 1acd2f1c48f..de732528a43 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -27,10 +27,10 @@
#include <linux/types.h>
#include <crypto/cast6.h>
-#define s1 cast6_s1
-#define s2 cast6_s2
-#define s3 cast6_s3
-#define s4 cast6_s4
+#define s1 cast_s1
+#define s2 cast_s2
+#define s3 cast_s3
+#define s4 cast_s4
#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
@@ -39,278 +39,6 @@
#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
-const u32 cast6_s1[256] = {
- 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
- 0x9c004dd3, 0x6003e540, 0xcf9fc949,
- 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
- 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
- 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3,
- 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
- 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1,
- 0xaa54166b, 0x22568e3a, 0xa2d341d0,
- 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac,
- 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
- 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0,
- 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
- 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290,
- 0xe93b159f, 0xb48ee411, 0x4bff345d,
- 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad,
- 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
- 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f,
- 0xc59c5319, 0xb949e354, 0xb04669fe,
- 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5,
- 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
- 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5,
- 0xf61b1891, 0xbb72275e, 0xaa508167,
- 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427,
- 0xa2d1936b, 0x2ad286af, 0xaa56d291,
- 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d,
- 0x73e2bb14, 0xa0bebc3c, 0x54623779,
- 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e,
- 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
- 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf,
- 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
- 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241,
- 0x051ef495, 0xaa573b04, 0x4a805d8d,
- 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b,
- 0x50afd341, 0xa7c13275, 0x915a0bf5,
- 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265,
- 0xab85c5f3, 0x1b55db94, 0xaad4e324,
- 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3,
- 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
- 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6,
- 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
- 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6,
- 0x032268d4, 0xc9600acc, 0xce387e6d,
- 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da,
- 0x4736f464, 0x5ad328d8, 0xb347cc96,
- 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc,
- 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
- 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f,
- 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
- 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4,
- 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
- 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af,
- 0x51c85f4d, 0x56907596, 0xa5bb15e6,
- 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a,
- 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
- 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf,
- 0x700b45e1, 0xd5ea50f1, 0x85a92872,
- 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198,
- 0x0cd0ede7, 0x26470db8, 0xf881814c,
- 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db,
- 0xab838653, 0x6e2f1e23, 0x83719c9e,
- 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c,
- 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
- 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c,
- 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf
-};
-EXPORT_SYMBOL_GPL(cast6_s1);
-
-const u32 cast6_s2[256] = {
- 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
- 0xeec5207a, 0x55889c94, 0x72fc0651,
- 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
- 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
- 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086,
- 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
- 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb,
- 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
- 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f,
- 0x77e83f4e, 0x79929269, 0x24fa9f7b,
- 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154,
- 0x0d554b63, 0x5d681121, 0xc866c359,
- 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181,
- 0x39f7627f, 0x361e3084, 0xe4eb573b,
- 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c,
- 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
- 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a,
- 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
- 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c,
- 0x1d804366, 0x721d9bfd, 0xa58684bb,
- 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1,
- 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
- 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9,
- 0xe0b56714, 0x21f043b7, 0xe5d05860,
- 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf,
- 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
- 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c,
- 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
- 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122,
- 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
- 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402,
- 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
- 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53,
- 0xe3214517, 0xb4542835, 0x9f63293c,
- 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6,
- 0x30a22c95, 0x31a70850, 0x60930f13,
- 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6,
- 0xa02b1741, 0x7cbad9a2, 0x2180036f,
- 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676,
- 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
- 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb,
- 0x846a3bae, 0x8ff77888, 0xee5d60f6,
- 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54,
- 0x157fd7fa, 0xef8579cc, 0xd152de58,
- 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5,
- 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
- 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8,
- 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
- 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc,
- 0x301e16e6, 0x273be979, 0xb0ffeaa6,
- 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a,
- 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
- 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e,
- 0x1a513742, 0xef6828bc, 0x520365d6,
- 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb,
- 0x5eea29cb, 0x145892f5, 0x91584f7f,
- 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4,
- 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
- 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3,
- 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
- 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589,
- 0xa345415e, 0x5c038323, 0x3e5d3bb9,
- 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539,
- 0x73bfbe70, 0x83877605, 0x4523ecf1
-};
-EXPORT_SYMBOL_GPL(cast6_s2);
-
-const u32 cast6_s3[256] = {
- 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
- 0x369fe44b, 0x8c1fc644, 0xaececa90,
- 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
- 0xf0ad0548, 0xe13c8d83, 0x927010d5,
- 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820,
- 0xfade82e0, 0xa067268b, 0x8272792e,
- 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee,
- 0x825b1bfd, 0x9255c5ed, 0x1257a240,
- 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf,
- 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
- 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1,
- 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
- 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c,
- 0x4a012d6e, 0xc5884a28, 0xccc36f71,
- 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850,
- 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
- 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e,
- 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
- 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0,
- 0x1eac5790, 0x796fb449, 0x8252dc15,
- 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403,
- 0xe83ec305, 0x4f91751a, 0x925669c2,
- 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574,
- 0x927985b2, 0x8276dbcb, 0x02778176,
- 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83,
- 0x340ce5c8, 0x96bbb682, 0x93b4b148,
- 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20,
- 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
- 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e,
- 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
- 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9,
- 0xbda8229c, 0x127dadaa, 0x438a074e,
- 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff,
- 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
- 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a,
- 0x76a2e214, 0xb9a40368, 0x925d958f,
- 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623,
- 0x193cbcfa, 0x27627545, 0x825cf47a,
- 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7,
- 0x8272a972, 0x9270c4a8, 0x127de50b,
- 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb,
- 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
- 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11,
- 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
- 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c,
- 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
- 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40,
- 0x7c34671c, 0x02717ef6, 0x4feb5536,
- 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1,
- 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
- 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33,
- 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
- 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff,
- 0x856302e0, 0x72dbd92b, 0xee971b69,
- 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2,
- 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
- 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38,
- 0x0ff0443d, 0x606e6dc6, 0x60543a49,
- 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f,
- 0x68458425, 0x99833be5, 0x600d457d,
- 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31,
- 0x9c305a00, 0x52bce688, 0x1b03588a,
- 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636,
- 0xa133c501, 0xe9d3531c, 0xee353783
-};
-EXPORT_SYMBOL_GPL(cast6_s3);
-
-const u32 cast6_s4[256] = {
- 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
- 0x64ad8c57, 0x85510443, 0xfa020ed1,
- 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
- 0x6497b7b1, 0xf3641f63, 0x241e4adf,
- 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30,
- 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
- 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f,
- 0x0c13fefe, 0x081b08ca, 0x05170121,
- 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f,
- 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
- 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400,
- 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
- 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061,
- 0x11b638e1, 0x72500e03, 0xf80eb2bb,
- 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400,
- 0x6920318f, 0x081dbb99, 0xffc304a5,
- 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea,
- 0x9f926f91, 0x9f46222f, 0x3991467d,
- 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8,
- 0x3fb6180c, 0x18f8931e, 0x281658e6,
- 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25,
- 0x79098b02, 0xe4eabb81, 0x28123b23,
- 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9,
- 0x0014377b, 0x041e8ac8, 0x09114003,
- 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de,
- 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
- 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0,
- 0x56c8c391, 0x6b65811c, 0x5e146119,
- 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d,
- 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
- 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a,
- 0xeca1d7c7, 0x041afa32, 0x1d16625a,
- 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb,
- 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
- 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3,
- 0xedda04eb, 0x17a9be04, 0x2c18f4df,
- 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254,
- 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
- 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2,
- 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
- 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86,
- 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
- 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1,
- 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
- 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca,
- 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
- 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5,
- 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
- 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415,
- 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
- 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7,
- 0x0ce454a9, 0xd60acd86, 0x015f1919,
- 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe,
- 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
- 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb,
- 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
- 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8,
- 0x296b299e, 0x492fc295, 0x9266beab,
- 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee,
- 0xf65324e6, 0x6afce36c, 0x0316cc04,
- 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979,
- 0x932bcdf6, 0xb657c34d, 0x4edfd282,
- 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0,
- 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2
-};
-EXPORT_SYMBOL_GPL(cast6_s4);
-
static const u32 Tm[24][8] = {
{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
diff --git a/crypto/cast_common.c b/crypto/cast_common.c
new file mode 100644
index 00000000000..a15f523d5f5
--- /dev/null
+++ b/crypto/cast_common.c
@@ -0,0 +1,290 @@
+/*
+ * Common lookup tables for CAST-128 (cast5) and CAST-256 (cast6)
+ *
+ * Copyright © 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ * Copyright © 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>
+ * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <crypto/cast_common.h>
+
+const u32 cast_s1[256] = {
+ 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
+ 0x9c004dd3, 0x6003e540, 0xcf9fc949,
+ 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
+ 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
+ 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3,
+ 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
+ 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1,
+ 0xaa54166b, 0x22568e3a, 0xa2d341d0,
+ 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac,
+ 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
+ 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0,
+ 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
+ 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290,
+ 0xe93b159f, 0xb48ee411, 0x4bff345d,
+ 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad,
+ 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
+ 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f,
+ 0xc59c5319, 0xb949e354, 0xb04669fe,
+ 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5,
+ 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
+ 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5,
+ 0xf61b1891, 0xbb72275e, 0xaa508167,
+ 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427,
+ 0xa2d1936b, 0x2ad286af, 0xaa56d291,
+ 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d,
+ 0x73e2bb14, 0xa0bebc3c, 0x54623779,
+ 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e,
+ 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
+ 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf,
+ 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
+ 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241,
+ 0x051ef495, 0xaa573b04, 0x4a805d8d,
+ 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b,
+ 0x50afd341, 0xa7c13275, 0x915a0bf5,
+ 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265,
+ 0xab85c5f3, 0x1b55db94, 0xaad4e324,
+ 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3,
+ 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
+ 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6,
+ 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
+ 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6,
+ 0x032268d4, 0xc9600acc, 0xce387e6d,
+ 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da,
+ 0x4736f464, 0x5ad328d8, 0xb347cc96,
+ 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc,
+ 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
+ 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f,
+ 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
+ 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4,
+ 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
+ 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af,
+ 0x51c85f4d, 0x56907596, 0xa5bb15e6,
+ 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a,
+ 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
+ 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf,
+ 0x700b45e1, 0xd5ea50f1, 0x85a92872,
+ 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198,
+ 0x0cd0ede7, 0x26470db8, 0xf881814c,
+ 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db,
+ 0xab838653, 0x6e2f1e23, 0x83719c9e,
+ 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c,
+ 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
+ 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c,
+ 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf
+};
+EXPORT_SYMBOL_GPL(cast_s1);
+
+const u32 cast_s2[256] = {
+ 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
+ 0xeec5207a, 0x55889c94, 0x72fc0651,
+ 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
+ 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
+ 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086,
+ 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
+ 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb,
+ 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
+ 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f,
+ 0x77e83f4e, 0x79929269, 0x24fa9f7b,
+ 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154,
+ 0x0d554b63, 0x5d681121, 0xc866c359,
+ 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181,
+ 0x39f7627f, 0x361e3084, 0xe4eb573b,
+ 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c,
+ 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
+ 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a,
+ 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
+ 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c,
+ 0x1d804366, 0x721d9bfd, 0xa58684bb,
+ 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1,
+ 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
+ 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9,
+ 0xe0b56714, 0x21f043b7, 0xe5d05860,
+ 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf,
+ 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
+ 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c,
+ 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
+ 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122,
+ 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
+ 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402,
+ 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
+ 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53,
+ 0xe3214517, 0xb4542835, 0x9f63293c,
+ 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6,
+ 0x30a22c95, 0x31a70850, 0x60930f13,
+ 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6,
+ 0xa02b1741, 0x7cbad9a2, 0x2180036f,
+ 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676,
+ 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
+ 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb,
+ 0x846a3bae, 0x8ff77888, 0xee5d60f6,
+ 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54,
+ 0x157fd7fa, 0xef8579cc, 0xd152de58,
+ 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5,
+ 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
+ 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8,
+ 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
+ 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc,
+ 0x301e16e6, 0x273be979, 0xb0ffeaa6,
+ 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a,
+ 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
+ 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e,
+ 0x1a513742, 0xef6828bc, 0x520365d6,
+ 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb,
+ 0x5eea29cb, 0x145892f5, 0x91584f7f,
+ 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4,
+ 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
+ 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3,
+ 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
+ 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589,
+ 0xa345415e, 0x5c038323, 0x3e5d3bb9,
+ 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539,
+ 0x73bfbe70, 0x83877605, 0x4523ecf1
+};
+EXPORT_SYMBOL_GPL(cast_s2);
+
+const u32 cast_s3[256] = {
+ 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
+ 0x369fe44b, 0x8c1fc644, 0xaececa90,
+ 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
+ 0xf0ad0548, 0xe13c8d83, 0x927010d5,
+ 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820,
+ 0xfade82e0, 0xa067268b, 0x8272792e,
+ 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee,
+ 0x825b1bfd, 0x9255c5ed, 0x1257a240,
+ 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf,
+ 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
+ 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1,
+ 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
+ 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c,
+ 0x4a012d6e, 0xc5884a28, 0xccc36f71,
+ 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850,
+ 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
+ 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e,
+ 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
+ 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0,
+ 0x1eac5790, 0x796fb449, 0x8252dc15,
+ 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403,
+ 0xe83ec305, 0x4f91751a, 0x925669c2,
+ 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574,
+ 0x927985b2, 0x8276dbcb, 0x02778176,
+ 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83,
+ 0x340ce5c8, 0x96bbb682, 0x93b4b148,
+ 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20,
+ 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
+ 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e,
+ 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
+ 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9,
+ 0xbda8229c, 0x127dadaa, 0x438a074e,
+ 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff,
+ 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
+ 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a,
+ 0x76a2e214, 0xb9a40368, 0x925d958f,
+ 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623,
+ 0x193cbcfa, 0x27627545, 0x825cf47a,
+ 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7,
+ 0x8272a972, 0x9270c4a8, 0x127de50b,
+ 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb,
+ 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
+ 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11,
+ 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
+ 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c,
+ 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
+ 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40,
+ 0x7c34671c, 0x02717ef6, 0x4feb5536,
+ 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1,
+ 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
+ 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33,
+ 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
+ 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff,
+ 0x856302e0, 0x72dbd92b, 0xee971b69,
+ 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2,
+ 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
+ 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38,
+ 0x0ff0443d, 0x606e6dc6, 0x60543a49,
+ 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f,
+ 0x68458425, 0x99833be5, 0x600d457d,
+ 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31,
+ 0x9c305a00, 0x52bce688, 0x1b03588a,
+ 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636,
+ 0xa133c501, 0xe9d3531c, 0xee353783
+};
+EXPORT_SYMBOL_GPL(cast_s3);
+
+const u32 cast_s4[256] = {
+ 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
+ 0x64ad8c57, 0x85510443, 0xfa020ed1,
+ 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
+ 0x6497b7b1, 0xf3641f63, 0x241e4adf,
+ 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30,
+ 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
+ 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f,
+ 0x0c13fefe, 0x081b08ca, 0x05170121,
+ 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f,
+ 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
+ 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400,
+ 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
+ 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061,
+ 0x11b638e1, 0x72500e03, 0xf80eb2bb,
+ 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400,
+ 0x6920318f, 0x081dbb99, 0xffc304a5,
+ 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea,
+ 0x9f926f91, 0x9f46222f, 0x3991467d,
+ 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8,
+ 0x3fb6180c, 0x18f8931e, 0x281658e6,
+ 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25,
+ 0x79098b02, 0xe4eabb81, 0x28123b23,
+ 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9,
+ 0x0014377b, 0x041e8ac8, 0x09114003,
+ 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de,
+ 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
+ 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0,
+ 0x56c8c391, 0x6b65811c, 0x5e146119,
+ 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d,
+ 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
+ 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a,
+ 0xeca1d7c7, 0x041afa32, 0x1d16625a,
+ 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb,
+ 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
+ 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3,
+ 0xedda04eb, 0x17a9be04, 0x2c18f4df,
+ 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254,
+ 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
+ 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2,
+ 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
+ 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86,
+ 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
+ 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1,
+ 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
+ 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca,
+ 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
+ 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5,
+ 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
+ 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415,
+ 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
+ 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7,
+ 0x0ce454a9, 0xd60acd86, 0x015f1919,
+ 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe,
+ 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
+ 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb,
+ 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
+ 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8,
+ 0x296b299e, 0x492fc295, 0x9266beab,
+ 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee,
+ 0xf65324e6, 0x6afce36c, 0x0316cc04,
+ 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979,
+ 0x932bcdf6, 0xb657c34d, 0x4edfd282,
+ 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0,
+ 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2
+};
+EXPORT_SYMBOL_GPL(cast_s4);
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e87fa60f583..7ae2130e1b0 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -971,11 +971,13 @@ static int do_test(int m)
case 3:
ret += tcrypt_test("ecb(des)");
ret += tcrypt_test("cbc(des)");
+ ret += tcrypt_test("ctr(des)");
break;
case 4:
ret += tcrypt_test("ecb(des3_ede)");
ret += tcrypt_test("cbc(des3_ede)");
+ ret += tcrypt_test("ctr(des3_ede)");
break;
case 5:
@@ -1479,6 +1481,10 @@ static int do_test(int m)
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
+ case 319:
+ test_hash_speed("crc32c", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+
case 399:
break;
@@ -1722,6 +1728,29 @@ static int do_test(int m)
speed_template_32_64);
break;
+ case 508:
+ test_acipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32);
+ test_acipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32);
+ test_acipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32);
+ test_acipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32);
+ test_acipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32);
+ test_acipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32);
+ test_acipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48);
+ test_acipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48);
+ test_acipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64);
+ test_acipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64);
+ break;
+
case 1000:
test_available();
break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 941d75cd1f7..edf4a081877 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1638,270 +1638,66 @@ static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "__cbc-cast5-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__cbc-cast6-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__cbc-serpent-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__cbc-serpent-sse2",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__cbc-twofish-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-cbc-aes-aesni",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
+ }, {
+ .alg = "__driver-cbc-camellia-aesni",
+ .test = alg_test_null,
}, {
.alg = "__driver-cbc-cast5-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-cbc-cast6-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-cbc-serpent-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-cbc-serpent-sse2",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-cbc-twofish-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-ecb-aes-aesni",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
+ }, {
+ .alg = "__driver-ecb-camellia-aesni",
+ .test = alg_test_null,
}, {
.alg = "__driver-ecb-cast5-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-ecb-cast6-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-ecb-serpent-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-ecb-serpent-sse2",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__driver-ecb-twofish-avx",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "__ghash-pclmulqdqni",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .hash = {
- .vecs = NULL,
- .count = 0
- }
- }
}, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
@@ -2130,135 +1926,39 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "cryptd(__driver-cbc-aes-aesni)",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
+ }, {
+ .alg = "cryptd(__driver-cbc-camellia-aesni)",
+ .test = alg_test_null,
}, {
.alg = "cryptd(__driver-ecb-aes-aesni)",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
+ }, {
+ .alg = "cryptd(__driver-ecb-camellia-aesni)",
+ .test = alg_test_null,
}, {
.alg = "cryptd(__driver-ecb-cast5-avx)",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "cryptd(__driver-ecb-cast6-avx)",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "cryptd(__driver-ecb-serpent-avx)",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "cryptd(__driver-ecb-serpent-sse2)",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "cryptd(__driver-ecb-twofish-avx)",
.test = alg_test_null,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "cryptd(__driver-gcm-aes-aesni)",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "cryptd(__ghash-pclmulqdqni)",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .hash = {
- .vecs = NULL,
- .count = 0
- }
- }
}, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
@@ -2336,6 +2036,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ctr(des)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = des_ctr_enc_tv_template,
+ .count = DES_CTR_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = des_ctr_dec_tv_template,
+ .count = DES_CTR_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
+ .alg = "ctr(des3_ede)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = des3_ede_ctr_enc_tv_template,
+ .count = DES3_EDE_CTR_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = des3_ede_ctr_dec_tv_template,
+ .count = DES3_EDE_CTR_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "ctr(serpent)",
.test = alg_test_skcipher,
.suite = {
@@ -2383,6 +2113,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "deflate",
.test = alg_test_comp,
+ .fips_allowed = 1,
.suite = {
.comp = {
.comp = {
@@ -2399,18 +2130,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.alg = "ecb(__aes-aesni)",
.test = alg_test_null,
.fips_allowed = 1,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = NULL,
- .count = 0
- },
- .dec = {
- .vecs = NULL,
- .count = 0
- }
- }
- }
}, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
@@ -2859,6 +2578,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "lzo",
.test = alg_test_comp,
+ .fips_allowed = 1,
.suite = {
.comp = {
.comp = {
@@ -3226,6 +2946,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "zlib",
.test = alg_test_pcomp,
+ .fips_allowed = 1,
.suite = {
.pcomp = {
.comp = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 76d7f6cc82f..b5721e0b979 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -41,7 +41,7 @@ struct hash_testvec {
char *plaintext;
char *digest;
unsigned char tap[MAX_TAP];
- unsigned char psize;
+ unsigned short psize;
unsigned char np;
unsigned char ksize;
};
@@ -1707,7 +1707,7 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-#define VMAC_AES_TEST_VECTORS 8
+#define VMAC_AES_TEST_VECTORS 11
static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
'\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07',
@@ -1723,6 +1723,19 @@ static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
'a', 'b', 'c', 'a', 'b', 'c',
};
+static char vmac_string4[17] = {'b', 'c', 'e', 'f',
+ 'i', 'j', 'l', 'm',
+ 'o', 'p', 'r', 's',
+ 't', 'u', 'w', 'x', 'z'};
+
+static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
+ 'o', 'l', 'k', ']', '%',
+ '9', '2', '7', '!', 'A'};
+
+static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
+ 'i', '!', '#', 'w', '0',
+ 'z', '/', '4', 'A', 'n'};
+
static struct hash_testvec aes_vmac128_tv_template[] = {
{
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
@@ -1776,6 +1789,24 @@ static struct hash_testvec aes_vmac128_tv_template[] = {
.digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
.psize = 128,
.ksize = 16,
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .plaintext = vmac_string4,
+ .digest = "\xab\xa5\x0f\xea\x42\x4e\xa1\x5f",
+ .psize = sizeof(vmac_string4),
+ .ksize = 16,
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .plaintext = vmac_string5,
+ .digest = "\x25\x31\x98\xbc\x1d\xe8\x67\x60",
+ .psize = sizeof(vmac_string5),
+ .ksize = 16,
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .plaintext = vmac_string6,
+ .digest = "\xc4\xae\x9b\x47\x95\x65\xeb\x41",
+ .psize = sizeof(vmac_string6),
+ .ksize = 16,
},
};
@@ -1993,14 +2024,18 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
/*
* DES test vectors.
*/
-#define DES_ENC_TEST_VECTORS 10
-#define DES_DEC_TEST_VECTORS 4
-#define DES_CBC_ENC_TEST_VECTORS 5
-#define DES_CBC_DEC_TEST_VECTORS 4
-#define DES3_EDE_ENC_TEST_VECTORS 3
-#define DES3_EDE_DEC_TEST_VECTORS 3
-#define DES3_EDE_CBC_ENC_TEST_VECTORS 1
-#define DES3_EDE_CBC_DEC_TEST_VECTORS 1
+#define DES_ENC_TEST_VECTORS 11
+#define DES_DEC_TEST_VECTORS 5
+#define DES_CBC_ENC_TEST_VECTORS 6
+#define DES_CBC_DEC_TEST_VECTORS 5
+#define DES_CTR_ENC_TEST_VECTORS 2
+#define DES_CTR_DEC_TEST_VECTORS 2
+#define DES3_EDE_ENC_TEST_VECTORS 4
+#define DES3_EDE_DEC_TEST_VECTORS 4
+#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
+#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
+#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
+#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
static struct cipher_testvec des_enc_tv_template[] = {
{ /* From Applied Cryptography */
@@ -2103,6 +2138,76 @@ static struct cipher_testvec des_enc_tv_template[] = {
.rlen = 8,
.np = 8,
.tap = { 1, 1, 1, 1, 1, 1, 1, 1 }
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
+ .ilen = 248,
+ .result = "\x88\xCB\x1F\xAB\x2F\x2A\x49\x57"
+ "\x92\xB9\x77\xFF\x2F\x47\x58\xDD"
+ "\xD7\x8A\x91\x95\x26\x33\x78\xB2"
+ "\x33\xBA\xB2\x3E\x02\xF5\x1F\xEF"
+ "\x98\xC5\xA6\xD2\x7D\x79\xEC\xB3"
+ "\x45\xF3\x4C\x61\xAC\x6C\xC2\x55"
+ "\xE5\xD3\x06\x58\x8A\x42\x3E\xDD"
+ "\x3D\x20\x45\xE9\x6F\x0D\x25\xA8"
+ "\xA5\xC7\x69\xCE\xD5\x3B\x7B\xC9"
+ "\x9E\x65\xE7\xA3\xF2\xE4\x18\x94"
+ "\xD2\x81\xE9\x33\x2B\x2D\x49\xC4"
+ "\xFE\xDA\x7F\xE2\xF2\x8C\x9C\xDC"
+ "\x73\x58\x11\x1F\x81\xD7\x21\x1A"
+ "\x80\xD0\x0D\xE8\x45\xD6\xD8\xD5"
+ "\x2E\x51\x16\xCA\x09\x89\x54\x62"
+ "\xF7\x04\x3D\x75\xB9\xA3\x84\xF4"
+ "\x62\xF0\x02\x58\x83\xAF\x30\x87"
+ "\x85\x3F\x01\xCD\x8E\x58\x42\xC4"
+ "\x41\x73\xE0\x15\x0A\xE6\x2E\x80"
+ "\x94\xF8\x5B\x3A\x4E\xDF\x51\xB2"
+ "\x9D\xE4\xC4\x9D\xF7\x3F\xF8\x8E"
+ "\x37\x22\x4D\x00\x2A\xEF\xC1\x0F"
+ "\x14\xA0\x66\xAB\x79\x39\xD0\x8E"
+ "\xE9\x95\x61\x74\x12\xED\x07\xD7"
+ "\xDD\x95\xDC\x7B\x57\x25\x27\x9C"
+ "\x51\x96\x16\xF7\x94\x61\xB8\x87"
+ "\xF0\x21\x1B\x32\xFB\x07\x0F\x29"
+ "\x56\xBD\x9D\x22\xA2\x9F\xA2\xB9"
+ "\x46\x31\x4C\x5E\x2E\x95\x61\xEF"
+ "\xE1\x58\x39\x09\xB4\x8B\x40\xAC"
+ "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A",
+ .rlen = 248,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 248 - 8, 8 },
},
};
@@ -2143,6 +2248,76 @@ static struct cipher_testvec des_dec_tv_template[] = {
.rlen = 16,
.np = 3,
.tap = { 3, 12, 1 }
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .input = "\x88\xCB\x1F\xAB\x2F\x2A\x49\x57"
+ "\x92\xB9\x77\xFF\x2F\x47\x58\xDD"
+ "\xD7\x8A\x91\x95\x26\x33\x78\xB2"
+ "\x33\xBA\xB2\x3E\x02\xF5\x1F\xEF"
+ "\x98\xC5\xA6\xD2\x7D\x79\xEC\xB3"
+ "\x45\xF3\x4C\x61\xAC\x6C\xC2\x55"
+ "\xE5\xD3\x06\x58\x8A\x42\x3E\xDD"
+ "\x3D\x20\x45\xE9\x6F\x0D\x25\xA8"
+ "\xA5\xC7\x69\xCE\xD5\x3B\x7B\xC9"
+ "\x9E\x65\xE7\xA3\xF2\xE4\x18\x94"
+ "\xD2\x81\xE9\x33\x2B\x2D\x49\xC4"
+ "\xFE\xDA\x7F\xE2\xF2\x8C\x9C\xDC"
+ "\x73\x58\x11\x1F\x81\xD7\x21\x1A"
+ "\x80\xD0\x0D\xE8\x45\xD6\xD8\xD5"
+ "\x2E\x51\x16\xCA\x09\x89\x54\x62"
+ "\xF7\x04\x3D\x75\xB9\xA3\x84\xF4"
+ "\x62\xF0\x02\x58\x83\xAF\x30\x87"
+ "\x85\x3F\x01\xCD\x8E\x58\x42\xC4"
+ "\x41\x73\xE0\x15\x0A\xE6\x2E\x80"
+ "\x94\xF8\x5B\x3A\x4E\xDF\x51\xB2"
+ "\x9D\xE4\xC4\x9D\xF7\x3F\xF8\x8E"
+ "\x37\x22\x4D\x00\x2A\xEF\xC1\x0F"
+ "\x14\xA0\x66\xAB\x79\x39\xD0\x8E"
+ "\xE9\x95\x61\x74\x12\xED\x07\xD7"
+ "\xDD\x95\xDC\x7B\x57\x25\x27\x9C"
+ "\x51\x96\x16\xF7\x94\x61\xB8\x87"
+ "\xF0\x21\x1B\x32\xFB\x07\x0F\x29"
+ "\x56\xBD\x9D\x22\xA2\x9F\xA2\xB9"
+ "\x46\x31\x4C\x5E\x2E\x95\x61\xEF"
+ "\xE1\x58\x39\x09\xB4\x8B\x40\xAC"
+ "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A",
+ .ilen = 248,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
+ .rlen = 248,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 248 - 8, 8 },
},
};
@@ -2198,6 +2373,77 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = {
.rlen = 24,
.np = 2,
.tap = { 13, 11 }
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47",
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
+ .ilen = 248,
+ .result = "\x71\xCC\x56\x1C\x87\x2C\x43\x20"
+ "\x1C\x20\x13\x09\xF9\x2B\x40\x47"
+ "\x99\x10\xD1\x1B\x65\x33\x33\xBA"
+ "\x88\x0D\xA2\xD1\x86\xFF\x4D\xF4"
+ "\x5A\x0C\x12\x96\x32\x57\xAA\x26"
+ "\xA7\xF4\x32\x8D\xBC\x10\x31\x9E"
+ "\x81\x72\x74\xDE\x30\x19\x69\x49"
+ "\x54\x9C\xC3\xEB\x0B\x97\xDD\xD1"
+ "\xE8\x6D\x0D\x05\x83\xA5\x12\x08"
+ "\x47\xF8\x88\x03\x86\x51\x3C\xEF"
+ "\xE7\x11\x73\x4D\x44\x2B\xE2\x16"
+ "\xE8\xA5\x06\x50\x66\x70\x0E\x14"
+ "\xBA\x21\x3B\xD5\x23\x5B\xA7\x8F"
+ "\x56\xB6\xA7\x44\xDB\x86\xAB\x69"
+ "\x33\x3C\xBE\x64\xC4\x22\xD3\xFE"
+ "\x49\x90\x88\x6A\x09\x8F\x76\x59"
+ "\xCB\xB7\xA0\x2D\x79\x75\x92\x8A"
+ "\x82\x1D\xC2\xFE\x09\x1F\x78\x6B"
+ "\x2F\xD6\xA4\x87\x1E\xC4\x53\x63"
+ "\x80\x02\x61\x2F\xE3\x46\xB6\xB5"
+ "\xAA\x95\xF4\xEE\xA7\x64\x2B\x4F"
+ "\x20\xCF\xD2\x47\x4E\x39\x65\xB3"
+ "\x11\x87\xA2\x6C\x49\x7E\x36\xC7"
+ "\x62\x8B\x48\x0D\x6A\x64\x00\xBD"
+ "\x71\x91\x8C\xE9\x70\x19\x01\x4F"
+ "\x4E\x68\x23\xBA\xDA\x24\x2E\x45"
+ "\x02\x14\x33\x21\xAE\x58\x4B\xCF"
+ "\x3B\x4B\xE8\xF8\xF6\x4F\x34\x93"
+ "\xD7\x07\x8A\xD7\x18\x92\x36\x8C"
+ "\x82\xA9\xBD\x6A\x31\x91\x39\x11"
+ "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63",
+ .rlen = 248,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 248 - 8, 8 },
},
};
@@ -2236,6 +2482,369 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
.rlen = 8,
.np = 2,
.tap = { 4, 4 }
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47",
+ .input = "\x71\xCC\x56\x1C\x87\x2C\x43\x20"
+ "\x1C\x20\x13\x09\xF9\x2B\x40\x47"
+ "\x99\x10\xD1\x1B\x65\x33\x33\xBA"
+ "\x88\x0D\xA2\xD1\x86\xFF\x4D\xF4"
+ "\x5A\x0C\x12\x96\x32\x57\xAA\x26"
+ "\xA7\xF4\x32\x8D\xBC\x10\x31\x9E"
+ "\x81\x72\x74\xDE\x30\x19\x69\x49"
+ "\x54\x9C\xC3\xEB\x0B\x97\xDD\xD1"
+ "\xE8\x6D\x0D\x05\x83\xA5\x12\x08"
+ "\x47\xF8\x88\x03\x86\x51\x3C\xEF"
+ "\xE7\x11\x73\x4D\x44\x2B\xE2\x16"
+ "\xE8\xA5\x06\x50\x66\x70\x0E\x14"
+ "\xBA\x21\x3B\xD5\x23\x5B\xA7\x8F"
+ "\x56\xB6\xA7\x44\xDB\x86\xAB\x69"
+ "\x33\x3C\xBE\x64\xC4\x22\xD3\xFE"
+ "\x49\x90\x88\x6A\x09\x8F\x76\x59"
+ "\xCB\xB7\xA0\x2D\x79\x75\x92\x8A"
+ "\x82\x1D\xC2\xFE\x09\x1F\x78\x6B"
+ "\x2F\xD6\xA4\x87\x1E\xC4\x53\x63"
+ "\x80\x02\x61\x2F\xE3\x46\xB6\xB5"
+ "\xAA\x95\xF4\xEE\xA7\x64\x2B\x4F"
+ "\x20\xCF\xD2\x47\x4E\x39\x65\xB3"
+ "\x11\x87\xA2\x6C\x49\x7E\x36\xC7"
+ "\x62\x8B\x48\x0D\x6A\x64\x00\xBD"
+ "\x71\x91\x8C\xE9\x70\x19\x01\x4F"
+ "\x4E\x68\x23\xBA\xDA\x24\x2E\x45"
+ "\x02\x14\x33\x21\xAE\x58\x4B\xCF"
+ "\x3B\x4B\xE8\xF8\xF6\x4F\x34\x93"
+ "\xD7\x07\x8A\xD7\x18\x92\x36\x8C"
+ "\x82\xA9\xBD\x6A\x31\x91\x39\x11"
+ "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63",
+ .ilen = 248,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
+ .rlen = 248,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 248 - 8, 8 },
+ },
+};
+
+static struct cipher_testvec des_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
+ .ilen = 248,
+ .result = "\x2F\x96\x06\x0F\x50\xC9\x68\x03"
+ "\x0F\x31\xD4\x64\xA5\x29\x77\x35"
+ "\xBC\x7A\x9F\x19\xE7\x0D\x33\x3E"
+ "\x12\x0B\x8C\xAE\x48\xAE\xD9\x02"
+ "\x0A\xD4\xB0\xD6\x37\xB2\x65\x1C"
+ "\x4B\x65\xEB\x24\xB5\x8E\xAD\x47"
+ "\x0D\xDA\x79\x77\xA0\x29\xA0\x2B"
+ "\xC8\x0F\x85\xDC\x03\x13\xA9\x04"
+ "\x19\x40\xBE\xBE\x5C\x49\x4A\x69"
+ "\xED\xE8\xE1\x9E\x14\x43\x74\xDE"
+ "\xEC\x6E\x11\x3F\x36\xEF\x7B\xFB"
+ "\xBE\x4C\x91\x43\x22\x65\x72\x48"
+ "\xE2\x12\xED\x88\xAC\xA7\xC9\x91"
+ "\x14\xA2\x36\x1C\x29\xFF\xC8\x4F"
+ "\x72\x5C\x4B\xB0\x1E\x93\xC2\xFA"
+ "\x9D\x53\x86\xA0\xAE\xC6\xB7\x3C"
+ "\x59\x0C\xD0\x8F\xA6\xD8\xA4\x31"
+ "\xB7\x30\x1C\x21\x38\xFB\x68\x8C"
+ "\x2E\xF5\x6E\x73\xC3\x16\x5F\x12"
+ "\x0C\x33\xB9\x1E\x7B\x70\xDE\x86"
+ "\x32\xB3\xC1\x16\xAB\xD9\x49\x0B"
+ "\x96\x28\x72\x6B\xF3\x30\xA9\xEB"
+ "\x69\xE2\x1E\x58\x46\xA2\x8E\xC7"
+ "\xC0\xEF\x07\xB7\x77\x2C\x00\x05"
+ "\x46\xBD\xFE\x53\x81\x8B\xA4\x03"
+ "\x20\x0F\xDB\x78\x0B\x1F\x53\x04"
+ "\x4C\x60\x4C\xC3\x2A\x86\x86\x7E"
+ "\x13\xD2\x26\xED\x5D\x3E\x9C\xF2"
+ "\x5C\xC4\x15\xC9\x9A\x21\xC5\xCD"
+ "\x19\x7F\x99\x19\x53\xCE\x1D\x14"
+ "\x69\x74\xA1\x06\x46\x0F\x4E\x75",
+ .rlen = 248,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 248 - 8, 8 },
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47",
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82",
+ .ilen = 247,
+ .result = "\x62\xE5\xF4\xDC\x99\xE7\x89\xE3"
+ "\xF4\x10\xCC\x21\x99\xEB\xDC\x15"
+ "\x19\x13\x93\x27\x9D\xB6\x6F\x45"
+ "\x17\x55\x61\x72\xC8\xD3\x7F\xA5"
+ "\x32\xD0\xD3\x02\x15\xA4\x05\x23"
+ "\x9C\x23\x61\x60\x77\x7B\x6C\x95"
+ "\x26\x49\x42\x2E\xF3\xC1\x8C\x6D"
+ "\xC8\x47\xD5\x94\xE7\x53\xC8\x23"
+ "\x1B\xA5\x0B\xCB\x12\xD3\x7A\x12"
+ "\xA4\x42\x15\x34\xF7\x5F\xDC\x58"
+ "\x5B\x58\x4C\xAD\xD1\x33\x8E\xE6"
+ "\xE5\xA0\xDA\x4D\x94\x3D\x63\xA8"
+ "\x02\x82\xBB\x16\xB8\xDC\xB5\x58"
+ "\xC3\x2D\x79\xE4\x25\x79\x43\xF9"
+ "\x6D\xD3\xCA\xC0\xE8\x12\xD4\x7E"
+ "\x04\x25\x79\xFD\x27\xFB\xC4\xEA"
+ "\x32\x94\x48\x92\xF3\x68\x1A\x7F"
+ "\x36\x33\x43\x79\xF7\xCA\xC2\x38"
+ "\xC0\x68\xD4\x53\xA9\xCC\x43\x0C"
+ "\x40\x57\x3E\xED\x00\x9F\x22\x6E"
+ "\x80\x99\x0B\xCC\x40\x63\x46\x8A"
+ "\xE8\xC4\x9B\x6D\x7A\x08\x6E\xA9"
+ "\x6F\x84\xBC\xB3\xF4\x95\x0B\x2D"
+ "\x6A\xBA\x37\x50\xC3\xCF\x9F\x7C"
+ "\x59\x5E\xDE\x0B\x30\xFA\x34\x8A"
+ "\xF8\xD1\xA2\xF8\x4E\xBD\x5D\x5E"
+ "\x7D\x71\x99\xE0\xF6\xE5\x7C\xE0"
+ "\x6D\xEE\x82\x89\x92\xD4\xF5\xD7"
+ "\xDF\x85\x2D\xE1\xB2\xD6\xAB\x94"
+ "\xA5\xA6\xE7\xB0\x51\x36\x52\x37"
+ "\x91\x45\x05\x3E\x58\xBF\x32",
+ .rlen = 247,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 247 - 8, 8 },
+ },
+};
+
+static struct cipher_testvec des_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .input = "\x2F\x96\x06\x0F\x50\xC9\x68\x03"
+ "\x0F\x31\xD4\x64\xA5\x29\x77\x35"
+ "\xBC\x7A\x9F\x19\xE7\x0D\x33\x3E"
+ "\x12\x0B\x8C\xAE\x48\xAE\xD9\x02"
+ "\x0A\xD4\xB0\xD6\x37\xB2\x65\x1C"
+ "\x4B\x65\xEB\x24\xB5\x8E\xAD\x47"
+ "\x0D\xDA\x79\x77\xA0\x29\xA0\x2B"
+ "\xC8\x0F\x85\xDC\x03\x13\xA9\x04"
+ "\x19\x40\xBE\xBE\x5C\x49\x4A\x69"
+ "\xED\xE8\xE1\x9E\x14\x43\x74\xDE"
+ "\xEC\x6E\x11\x3F\x36\xEF\x7B\xFB"
+ "\xBE\x4C\x91\x43\x22\x65\x72\x48"
+ "\xE2\x12\xED\x88\xAC\xA7\xC9\x91"
+ "\x14\xA2\x36\x1C\x29\xFF\xC8\x4F"
+ "\x72\x5C\x4B\xB0\x1E\x93\xC2\xFA"
+ "\x9D\x53\x86\xA0\xAE\xC6\xB7\x3C"
+ "\x59\x0C\xD0\x8F\xA6\xD8\xA4\x31"
+ "\xB7\x30\x1C\x21\x38\xFB\x68\x8C"
+ "\x2E\xF5\x6E\x73\xC3\x16\x5F\x12"
+ "\x0C\x33\xB9\x1E\x7B\x70\xDE\x86"
+ "\x32\xB3\xC1\x16\xAB\xD9\x49\x0B"
+ "\x96\x28\x72\x6B\xF3\x30\xA9\xEB"
+ "\x69\xE2\x1E\x58\x46\xA2\x8E\xC7"
+ "\xC0\xEF\x07\xB7\x77\x2C\x00\x05"
+ "\x46\xBD\xFE\x53\x81\x8B\xA4\x03"
+ "\x20\x0F\xDB\x78\x0B\x1F\x53\x04"
+ "\x4C\x60\x4C\xC3\x2A\x86\x86\x7E"
+ "\x13\xD2\x26\xED\x5D\x3E\x9C\xF2"
+ "\x5C\xC4\x15\xC9\x9A\x21\xC5\xCD"
+ "\x19\x7F\x99\x19\x53\xCE\x1D\x14"
+ "\x69\x74\xA1\x06\x46\x0F\x4E\x75",
+ .ilen = 248,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
+ .rlen = 248,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 248 - 8, 8 },
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
+ .klen = 8,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47",
+ .input = "\x62\xE5\xF4\xDC\x99\xE7\x89\xE3"
+ "\xF4\x10\xCC\x21\x99\xEB\xDC\x15"
+ "\x19\x13\x93\x27\x9D\xB6\x6F\x45"
+ "\x17\x55\x61\x72\xC8\xD3\x7F\xA5"
+ "\x32\xD0\xD3\x02\x15\xA4\x05\x23"
+ "\x9C\x23\x61\x60\x77\x7B\x6C\x95"
+ "\x26\x49\x42\x2E\xF3\xC1\x8C\x6D"
+ "\xC8\x47\xD5\x94\xE7\x53\xC8\x23"
+ "\x1B\xA5\x0B\xCB\x12\xD3\x7A\x12"
+ "\xA4\x42\x15\x34\xF7\x5F\xDC\x58"
+ "\x5B\x58\x4C\xAD\xD1\x33\x8E\xE6"
+ "\xE5\xA0\xDA\x4D\x94\x3D\x63\xA8"
+ "\x02\x82\xBB\x16\xB8\xDC\xB5\x58"
+ "\xC3\x2D\x79\xE4\x25\x79\x43\xF9"
+ "\x6D\xD3\xCA\xC0\xE8\x12\xD4\x7E"
+ "\x04\x25\x79\xFD\x27\xFB\xC4\xEA"
+ "\x32\x94\x48\x92\xF3\x68\x1A\x7F"
+ "\x36\x33\x43\x79\xF7\xCA\xC2\x38"
+ "\xC0\x68\xD4\x53\xA9\xCC\x43\x0C"
+ "\x40\x57\x3E\xED\x00\x9F\x22\x6E"
+ "\x80\x99\x0B\xCC\x40\x63\x46\x8A"
+ "\xE8\xC4\x9B\x6D\x7A\x08\x6E\xA9"
+ "\x6F\x84\xBC\xB3\xF4\x95\x0B\x2D"
+ "\x6A\xBA\x37\x50\xC3\xCF\x9F\x7C"
+ "\x59\x5E\xDE\x0B\x30\xFA\x34\x8A"
+ "\xF8\xD1\xA2\xF8\x4E\xBD\x5D\x5E"
+ "\x7D\x71\x99\xE0\xF6\xE5\x7C\xE0"
+ "\x6D\xEE\x82\x89\x92\xD4\xF5\xD7"
+ "\xDF\x85\x2D\xE1\xB2\xD6\xAB\x94"
+ "\xA5\xA6\xE7\xB0\x51\x36\x52\x37"
+ "\x91\x45\x05\x3E\x58\xBF\x32",
+ .ilen = 247,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82",
+ .rlen = 247,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 247 - 8, 8 },
},
};
@@ -2267,6 +2876,140 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = {
.ilen = 8,
.result = "\xe1\xef\x62\xc3\x32\xfe\x82\x5b",
.rlen = 8,
+ }, { /* Generated with Crypto++ */
+ .key = "\xF3\x9C\xD6\xF3\x9C\xB9\x5A\x67"
+ "\x00\x5A\x67\x00\x2D\xCE\xEB\x2D"
+ "\xCE\xEB\xB4\x51\x72\xB4\x51\x72",
+ .klen = 24,
+ .input = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
+ .ilen = 496,
+ .result = "\x4E\x9A\x40\x3D\x61\x7D\x17\xFA"
+ "\x16\x86\x88\x0B\xD8\xAE\xF8\xE4"
+ "\x81\x01\x04\x00\x76\xFA\xED\xD3"
+ "\x44\x7E\x21\x9D\xF0\xFB\x2B\x64"
+ "\xCA\x4E\x90\xE0\xC0\x63\x28\x92"
+ "\xF3\x1F\xA4\x53\x2C\x77\xCC\x77"
+ "\x69\x56\xD0\x19\xAD\x00\x2D\x97"
+ "\xBC\xDE\x49\x6A\x82\xBC\x16\xE2"
+ "\x2F\x3E\x72\xEE\xD1\xCE\xFC\x1B"
+ "\xEA\x32\x56\xE4\x0B\xAF\x27\x36"
+ "\xAF\x08\xB9\x61\xB7\x48\x23\x27"
+ "\xEE\x4D\xC8\x79\x56\x06\xEB\xC7"
+ "\x5B\xCA\x0A\xC6\x5E\x5C\xCB\xB6"
+ "\x9D\xDA\x04\x59\xE2\x09\x48\x7E"
+ "\x6B\x37\xC6\xFE\x92\xA9\x1E\x6E"
+ "\x0D\x19\xFA\x33\x0F\xEE\x36\x68"
+ "\x11\xBB\xF9\x5A\x73\xAB\x3A\xEA"
+ "\xAC\x28\xD8\xD5\x27\xE8\x6B\x16"
+ "\x45\x86\x50\x01\x70\x35\x99\x92"
+ "\xDF\x0C\x07\x88\x8B\x7F\x9E\x4B"
+ "\xD2\x04\x84\x90\xC4\x27\xDF\x0A"
+ "\x49\xA8\xA7\x1A\x6D\x78\x16\xCA"
+ "\xB3\x18\x5C\xC3\x93\x63\x5A\x68"
+ "\x77\x02\xBA\xED\x62\x71\xB1\xD9"
+ "\x5E\xE5\x6F\x1A\xCC\x1D\xBE\x2E"
+ "\x11\xF3\xA6\x97\xCA\x8E\xBF\xB4"
+ "\x56\xA1\x36\x6B\xB1\x0A\x3E\x70"
+ "\xEA\xD7\xCD\x72\x7B\x79\xC8\xAD"
+ "\x6B\xFE\xFB\xBA\x64\xAE\x19\xC1"
+ "\x82\xCF\x8A\xA1\x50\x17\x7F\xB2"
+ "\x6F\x7B\x0F\x52\xC5\x3E\x4A\x52"
+ "\x3F\xD9\x3F\x01\xA6\x41\x1A\xB3"
+ "\xB3\x7A\x0E\x8E\x75\xB2\xB1\x5F"
+ "\xDB\xEA\x84\x13\x26\x6C\x85\x4E"
+ "\xAE\x6B\xDC\xE7\xE7\xAD\xB0\x06"
+ "\x5C\xBA\x92\xD0\x30\xBB\x8D\xD2"
+ "\xAE\x4C\x70\x85\xA0\x07\xE3\x2C"
+ "\xD1\x27\x9C\xCF\xDB\x13\xB7\xE5"
+ "\xF9\x6A\x02\xD0\x39\x9D\xB6\xE7"
+ "\xD1\x17\x25\x08\xF9\xA9\xA6\x67"
+ "\x38\x80\xD1\x22\xAB\x1A\xD7\x26"
+ "\xAD\xCA\x19\x1B\xFA\x18\xA7\x57"
+ "\x31\xEC\xC9\xED\xDB\x79\xC0\x48"
+ "\xAC\x31\x9F\x03\x8B\x62\x5B\x7E"
+ "\x0E\xA6\xD0\x64\xEE\xEA\x00\xFC"
+ "\x58\xC8\xDE\x51\x4E\x17\x15\x11"
+ "\x66\x58\xB6\x90\xDC\xDF\xA1\x49"
+ "\xCA\x79\xE9\x31\x31\x42\xDC\x56"
+ "\x0B\xCD\xB6\x0D\xC7\x64\xF7\x19"
+ "\xD9\x42\x05\x7F\xBC\x2F\xFC\x90"
+ "\xAE\x29\x86\xAA\x43\x7A\x4F\x6B"
+ "\xCE\xEA\xBC\x31\x8D\x65\x9D\x46"
+ "\xEA\x77\xB4\xF9\x58\xEA\x5D\x84"
+ "\xE4\xDC\x14\xBB\xBD\x15\x0E\xDA"
+ "\xD8\xE4\xA4\x5D\x61\xF9\x58\x0F"
+ "\xE4\x82\x77\xCE\x87\xC0\x09\xF0"
+ "\xD6\x10\x9E\x34\xE1\x0C\x67\x55"
+ "\x7B\x6D\xD5\x51\x4B\x00\xEE\xBA"
+ "\xF2\x7B\xBE\x75\x07\x42\x9D\x99"
+ "\x12\xE1\x71\x4A\xF9\x2A\xF5\xF6"
+ "\x93\x03\xD7\x51\x09\xFA\xBE\x68"
+ "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
},
};
@@ -2298,6 +3041,140 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
.ilen = 8,
.result = "\x00\x00\x00\x00\x00\x00\x00\x00",
.rlen = 8,
+ }, { /* Generated with Crypto++ */
+ .key = "\xF3\x9C\xD6\xF3\x9C\xB9\x5A\x67"
+ "\x00\x5A\x67\x00\x2D\xCE\xEB\x2D"
+ "\xCE\xEB\xB4\x51\x72\xB4\x51\x72",
+ .klen = 24,
+ .input = "\x4E\x9A\x40\x3D\x61\x7D\x17\xFA"
+ "\x16\x86\x88\x0B\xD8\xAE\xF8\xE4"
+ "\x81\x01\x04\x00\x76\xFA\xED\xD3"
+ "\x44\x7E\x21\x9D\xF0\xFB\x2B\x64"
+ "\xCA\x4E\x90\xE0\xC0\x63\x28\x92"
+ "\xF3\x1F\xA4\x53\x2C\x77\xCC\x77"
+ "\x69\x56\xD0\x19\xAD\x00\x2D\x97"
+ "\xBC\xDE\x49\x6A\x82\xBC\x16\xE2"
+ "\x2F\x3E\x72\xEE\xD1\xCE\xFC\x1B"
+ "\xEA\x32\x56\xE4\x0B\xAF\x27\x36"
+ "\xAF\x08\xB9\x61\xB7\x48\x23\x27"
+ "\xEE\x4D\xC8\x79\x56\x06\xEB\xC7"
+ "\x5B\xCA\x0A\xC6\x5E\x5C\xCB\xB6"
+ "\x9D\xDA\x04\x59\xE2\x09\x48\x7E"
+ "\x6B\x37\xC6\xFE\x92\xA9\x1E\x6E"
+ "\x0D\x19\xFA\x33\x0F\xEE\x36\x68"
+ "\x11\xBB\xF9\x5A\x73\xAB\x3A\xEA"
+ "\xAC\x28\xD8\xD5\x27\xE8\x6B\x16"
+ "\x45\x86\x50\x01\x70\x35\x99\x92"
+ "\xDF\x0C\x07\x88\x8B\x7F\x9E\x4B"
+ "\xD2\x04\x84\x90\xC4\x27\xDF\x0A"
+ "\x49\xA8\xA7\x1A\x6D\x78\x16\xCA"
+ "\xB3\x18\x5C\xC3\x93\x63\x5A\x68"
+ "\x77\x02\xBA\xED\x62\x71\xB1\xD9"
+ "\x5E\xE5\x6F\x1A\xCC\x1D\xBE\x2E"
+ "\x11\xF3\xA6\x97\xCA\x8E\xBF\xB4"
+ "\x56\xA1\x36\x6B\xB1\x0A\x3E\x70"
+ "\xEA\xD7\xCD\x72\x7B\x79\xC8\xAD"
+ "\x6B\xFE\xFB\xBA\x64\xAE\x19\xC1"
+ "\x82\xCF\x8A\xA1\x50\x17\x7F\xB2"
+ "\x6F\x7B\x0F\x52\xC5\x3E\x4A\x52"
+ "\x3F\xD9\x3F\x01\xA6\x41\x1A\xB3"
+ "\xB3\x7A\x0E\x8E\x75\xB2\xB1\x5F"
+ "\xDB\xEA\x84\x13\x26\x6C\x85\x4E"
+ "\xAE\x6B\xDC\xE7\xE7\xAD\xB0\x06"
+ "\x5C\xBA\x92\xD0\x30\xBB\x8D\xD2"
+ "\xAE\x4C\x70\x85\xA0\x07\xE3\x2C"
+ "\xD1\x27\x9C\xCF\xDB\x13\xB7\xE5"
+ "\xF9\x6A\x02\xD0\x39\x9D\xB6\xE7"
+ "\xD1\x17\x25\x08\xF9\xA9\xA6\x67"
+ "\x38\x80\xD1\x22\xAB\x1A\xD7\x26"
+ "\xAD\xCA\x19\x1B\xFA\x18\xA7\x57"
+ "\x31\xEC\xC9\xED\xDB\x79\xC0\x48"
+ "\xAC\x31\x9F\x03\x8B\x62\x5B\x7E"
+ "\x0E\xA6\xD0\x64\xEE\xEA\x00\xFC"
+ "\x58\xC8\xDE\x51\x4E\x17\x15\x11"
+ "\x66\x58\xB6\x90\xDC\xDF\xA1\x49"
+ "\xCA\x79\xE9\x31\x31\x42\xDC\x56"
+ "\x0B\xCD\xB6\x0D\xC7\x64\xF7\x19"
+ "\xD9\x42\x05\x7F\xBC\x2F\xFC\x90"
+ "\xAE\x29\x86\xAA\x43\x7A\x4F\x6B"
+ "\xCE\xEA\xBC\x31\x8D\x65\x9D\x46"
+ "\xEA\x77\xB4\xF9\x58\xEA\x5D\x84"
+ "\xE4\xDC\x14\xBB\xBD\x15\x0E\xDA"
+ "\xD8\xE4\xA4\x5D\x61\xF9\x58\x0F"
+ "\xE4\x82\x77\xCE\x87\xC0\x09\xF0"
+ "\xD6\x10\x9E\x34\xE1\x0C\x67\x55"
+ "\x7B\x6D\xD5\x51\x4B\x00\xEE\xBA"
+ "\xF2\x7B\xBE\x75\x07\x42\x9D\x99"
+ "\x12\xE1\x71\x4A\xF9\x2A\xF5\xF6"
+ "\x93\x03\xD7\x51\x09\xFA\xBE\x68"
+ "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63",
+ .ilen = 496,
+ .result = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
},
};
@@ -2342,6 +3219,142 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
"\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
"\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
.rlen = 128,
+ }, { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+ "\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
+ .klen = 24,
+ .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12"
+ "\xB7\x28\x4D\x83\x24\x59\xF2\x17",
+ .input = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
+ .ilen = 496,
+ .result = "\xF8\xF6\xB5\x60\x5C\x5A\x75\x84"
+ "\x87\x81\x53\xBA\xC9\x6F\xEC\xD5"
+ "\x1E\x68\x8E\x85\x12\x86\x1D\x38"
+ "\x1C\x91\x40\xCC\x69\x6A\xD5\x35"
+ "\x0D\x7C\xB5\x07\x7C\x7B\x2A\xAF"
+ "\x32\xBC\xA1\xB3\x84\x31\x1B\x3C"
+ "\x0A\x2B\xFA\xD3\x9F\xB0\x8C\x37"
+ "\x8F\x9D\xA7\x6D\x6C\xFA\xD7\x90"
+ "\xE3\x69\x54\xED\x3A\xC4\xF1\x6B"
+ "\xB1\xCC\xFB\x7D\xD8\x8E\x17\x0B"
+ "\x9C\xF6\x4C\xD6\xFF\x03\x4E\xD9"
+ "\xE6\xA5\xAD\x25\xE6\x17\x69\x63"
+ "\x11\x35\x61\x94\x88\x7B\x1C\x48"
+ "\xF1\x24\x20\x29\x6B\x93\x1A\x8E"
+ "\x43\x03\x89\xD8\xB1\xDA\x47\x7B"
+ "\x79\x3A\x83\x76\xDA\xAE\xC6\xBB"
+ "\x22\xF8\xE8\x3D\x9A\x65\x54\xD8"
+ "\x4C\xE9\xE7\xE4\x63\x2F\x5C\x73"
+ "\x5A\xC3\xAE\x46\xA8\xCD\x57\xE6"
+ "\x67\x88\xA5\x20\x6F\x5F\x97\xC7"
+ "\xCC\x15\xA2\x0A\x93\xEA\x33\xE7"
+ "\x03\x5F\xEC\x64\x30\x6F\xEE\xD7"
+ "\x7E\xDF\xD6\xE9\x6F\x3F\xD6\x1E"
+ "\xBE\x67\x6C\x5B\x97\xA0\x09\xE6"
+ "\xEE\xFE\x55\xA3\x29\x65\xE0\x12"
+ "\xA1\x6A\x8A\x6F\xF2\xE6\xF1\x96"
+ "\x87\xFB\x9C\x05\xDD\x80\xEC\xFF"
+ "\xC5\xED\x50\xFE\xFC\x91\xCD\xCE"
+ "\x25\x2C\x5F\xD9\xAD\x95\x7D\x99"
+ "\xF0\x05\xC4\x71\x46\x5F\xF9\x0D"
+ "\xD2\x63\xDF\x9B\x96\x2E\x2B\xA6"
+ "\x2B\x1C\xD5\xFB\x96\x24\x60\x60"
+ "\x54\x40\xB8\x62\xA4\xF8\x46\x95"
+ "\x73\x28\xA3\xA6\x16\x2B\x17\xE7"
+ "\x7A\xF8\x62\x54\x3B\x64\x69\xE1"
+ "\x71\x34\x29\x5B\x4E\x05\x9B\xFA"
+ "\x5E\xF1\x96\xB7\xCE\x16\x9B\x59"
+ "\xF1\x1A\x4C\x51\x26\xFD\x79\xE2"
+ "\x3B\x8E\x71\x69\x6A\x91\xB6\x65"
+ "\x32\x09\xB8\xE4\x09\x1F\xEA\x39"
+ "\xCE\x20\x65\x9F\xD6\xD1\xC7\xF0"
+ "\x73\x50\x08\x56\x20\x9B\x94\x23"
+ "\x14\x39\xB7\x2B\xB1\x2D\x6D\x6F"
+ "\x41\x5B\xCC\xE2\x18\xAE\x62\x89"
+ "\x78\x8E\x67\x23\xD0\xFB\x2B\xE5"
+ "\x25\xC9\x48\x97\xB5\xD3\x17\xD5"
+ "\x6A\x9F\xA7\x48\x0C\x2B\x73\x3B"
+ "\x57\x08\xAE\x91\xF2\xB7\x57\x89"
+ "\xF4\xD0\xB0\x07\xB0\x42\x6C\xAF"
+ "\x98\x1A\xE7\xD1\xAC\x1E\xB5\x02"
+ "\xD4\x56\x42\x79\x79\x7F\x2A\x77"
+ "\x25\xE9\x7D\xC1\x88\x19\x2B\x49"
+ "\x6F\x46\x59\xAB\x56\x1F\x61\xE0"
+ "\x0C\x24\x9C\xC9\x5B\x63\xA9\x12"
+ "\xCF\x88\x96\xB6\xA8\x24\xC6\xA8"
+ "\x21\x85\x1A\x62\x7E\x34\xBB\xEB"
+ "\xBD\x02\x2A\xC7\xD8\x89\x80\xC5"
+ "\xB1\xBB\x60\xA5\x22\xFC\x6F\x38"
+ "\x02\x80\xA3\x28\x22\x75\xE1\xE9"
+ "\x90\xE9\xFA\x4B\x00\x10\xAC\x58"
+ "\x83\x70\xFF\x86\xE6\xAA\x0F\x1F"
+ "\x95\x63\x73\xA2\x44\xAC\xF8\xA5",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
},
};
@@ -2386,6 +3399,698 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
"\x63\x65\x65\x72\x73\x74\x54\x20"
"\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
.rlen = 128,
+ }, { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+ "\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
+ .klen = 24,
+ .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12"
+ "\xB7\x28\x4D\x83\x24\x59\xF2\x17",
+ .input = "\xF8\xF6\xB5\x60\x5C\x5A\x75\x84"
+ "\x87\x81\x53\xBA\xC9\x6F\xEC\xD5"
+ "\x1E\x68\x8E\x85\x12\x86\x1D\x38"
+ "\x1C\x91\x40\xCC\x69\x6A\xD5\x35"
+ "\x0D\x7C\xB5\x07\x7C\x7B\x2A\xAF"
+ "\x32\xBC\xA1\xB3\x84\x31\x1B\x3C"
+ "\x0A\x2B\xFA\xD3\x9F\xB0\x8C\x37"
+ "\x8F\x9D\xA7\x6D\x6C\xFA\xD7\x90"
+ "\xE3\x69\x54\xED\x3A\xC4\xF1\x6B"
+ "\xB1\xCC\xFB\x7D\xD8\x8E\x17\x0B"
+ "\x9C\xF6\x4C\xD6\xFF\x03\x4E\xD9"
+ "\xE6\xA5\xAD\x25\xE6\x17\x69\x63"
+ "\x11\x35\x61\x94\x88\x7B\x1C\x48"
+ "\xF1\x24\x20\x29\x6B\x93\x1A\x8E"
+ "\x43\x03\x89\xD8\xB1\xDA\x47\x7B"
+ "\x79\x3A\x83\x76\xDA\xAE\xC6\xBB"
+ "\x22\xF8\xE8\x3D\x9A\x65\x54\xD8"
+ "\x4C\xE9\xE7\xE4\x63\x2F\x5C\x73"
+ "\x5A\xC3\xAE\x46\xA8\xCD\x57\xE6"
+ "\x67\x88\xA5\x20\x6F\x5F\x97\xC7"
+ "\xCC\x15\xA2\x0A\x93\xEA\x33\xE7"
+ "\x03\x5F\xEC\x64\x30\x6F\xEE\xD7"
+ "\x7E\xDF\xD6\xE9\x6F\x3F\xD6\x1E"
+ "\xBE\x67\x6C\x5B\x97\xA0\x09\xE6"
+ "\xEE\xFE\x55\xA3\x29\x65\xE0\x12"
+ "\xA1\x6A\x8A\x6F\xF2\xE6\xF1\x96"
+ "\x87\xFB\x9C\x05\xDD\x80\xEC\xFF"
+ "\xC5\xED\x50\xFE\xFC\x91\xCD\xCE"
+ "\x25\x2C\x5F\xD9\xAD\x95\x7D\x99"
+ "\xF0\x05\xC4\x71\x46\x5F\xF9\x0D"
+ "\xD2\x63\xDF\x9B\x96\x2E\x2B\xA6"
+ "\x2B\x1C\xD5\xFB\x96\x24\x60\x60"
+ "\x54\x40\xB8\x62\xA4\xF8\x46\x95"
+ "\x73\x28\xA3\xA6\x16\x2B\x17\xE7"
+ "\x7A\xF8\x62\x54\x3B\x64\x69\xE1"
+ "\x71\x34\x29\x5B\x4E\x05\x9B\xFA"
+ "\x5E\xF1\x96\xB7\xCE\x16\x9B\x59"
+ "\xF1\x1A\x4C\x51\x26\xFD\x79\xE2"
+ "\x3B\x8E\x71\x69\x6A\x91\xB6\x65"
+ "\x32\x09\xB8\xE4\x09\x1F\xEA\x39"
+ "\xCE\x20\x65\x9F\xD6\xD1\xC7\xF0"
+ "\x73\x50\x08\x56\x20\x9B\x94\x23"
+ "\x14\x39\xB7\x2B\xB1\x2D\x6D\x6F"
+ "\x41\x5B\xCC\xE2\x18\xAE\x62\x89"
+ "\x78\x8E\x67\x23\xD0\xFB\x2B\xE5"
+ "\x25\xC9\x48\x97\xB5\xD3\x17\xD5"
+ "\x6A\x9F\xA7\x48\x0C\x2B\x73\x3B"
+ "\x57\x08\xAE\x91\xF2\xB7\x57\x89"
+ "\xF4\xD0\xB0\x07\xB0\x42\x6C\xAF"
+ "\x98\x1A\xE7\xD1\xAC\x1E\xB5\x02"
+ "\xD4\x56\x42\x79\x79\x7F\x2A\x77"
+ "\x25\xE9\x7D\xC1\x88\x19\x2B\x49"
+ "\x6F\x46\x59\xAB\x56\x1F\x61\xE0"
+ "\x0C\x24\x9C\xC9\x5B\x63\xA9\x12"
+ "\xCF\x88\x96\xB6\xA8\x24\xC6\xA8"
+ "\x21\x85\x1A\x62\x7E\x34\xBB\xEB"
+ "\xBD\x02\x2A\xC7\xD8\x89\x80\xC5"
+ "\xB1\xBB\x60\xA5\x22\xFC\x6F\x38"
+ "\x02\x80\xA3\x28\x22\x75\xE1\xE9"
+ "\x90\xE9\xFA\x4B\x00\x10\xAC\x58"
+ "\x83\x70\xFF\x86\xE6\xAA\x0F\x1F"
+ "\x95\x63\x73\xA2\x44\xAC\xF8\xA5",
+ .ilen = 496,
+ .result = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
+ },
+};
+
+static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+ "\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
+ .klen = 24,
+ .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+ "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .input = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
+ .ilen = 496,
+ .result = "\x07\xC2\x08\x20\x72\x1F\x49\xEF"
+ "\x19\xCD\x6F\x32\x53\x05\x22\x15"
+ "\xA2\x85\x2B\xDB\x85\xD2\xD8\xB9"
+ "\xDD\x0D\x1B\x45\xCB\x69\x11\xD4"
+ "\xEA\xBE\xB2\x45\x5D\x0C\xAE\xBE"
+ "\xA0\xC1\x27\xAC\x65\x9F\x53\x7E"
+ "\xAF\xC2\x1B\xB5\xB8\x6D\x36\x0C"
+ "\x25\xC0\xF8\x6D\x0B\x29\x01\xDA"
+ "\x13\x78\xDC\x89\x12\x12\x43\xFA"
+ "\xF6\x12\xEF\x8D\x87\x62\x78\x83"
+ "\xE2\xBE\x41\x20\x4C\x6D\x35\x1B"
+ "\xD1\x0C\x30\xCF\xE2\xDE\x2B\x03"
+ "\xBF\x45\x73\xD4\xE5\x59\x95\xD1"
+ "\xB3\x9B\x27\x62\x97\xBD\xDE\x7F"
+ "\xA4\xD2\x39\x80\xAA\x50\x23\xF0"
+ "\x74\x88\x3D\xA8\x6A\x18\x79\x3B"
+ "\xC4\x96\x6C\x8D\x22\x40\x92\x6E"
+ "\xD6\xAD\x2A\x1F\xDE\x63\xC0\xE7"
+ "\x07\xF7\x2D\xF7\xB5\xF3\xF0\xCC"
+ "\x01\x7C\x2A\x9B\xC2\x10\xCA\xAA"
+ "\xFD\x2B\x3F\xC5\xF3\xF6\xFC\x9B"
+ "\x45\xDB\x53\xE4\x5B\xF3\xC9\x7B"
+ "\x8E\x52\xFF\xC8\x02\xB8\xAC\x9D"
+ "\xA1\x00\x39\xDA\x3D\x2D\x0E\x01"
+ "\x09\x7D\x8D\x5E\xBE\x53\xB9\xB0"
+ "\x8E\xE7\xE2\x96\x6A\xB2\x78\xEA"
+ "\xDE\x23\x8B\xA5\xFA\x5C\xE3\xDA"
+ "\xBF\x8E\x31\x6A\x55\xD1\x6A\xB2"
+ "\xB5\x46\x6F\xA5\xF0\xEE\xBA\x1F"
+ "\x9F\x98\xB0\x66\x4F\xD0\x3F\xA9"
+ "\xDF\x5F\x58\xC4\xF4\xFF\x75\x5C"
+ "\x40\x3A\x09\x7E\x6E\x1C\x97\xD4"
+ "\xCC\xE7\xE7\x71\xCF\x0B\x15\x08"
+ "\x71\xFA\x07\x97\xCD\xE6\xCA\x1D"
+ "\x14\x28\x0C\xCF\x99\x13\x7A\xF1"
+ "\xEB\xFA\xFA\x92\x07\xDE\x1D\xA1"
+ "\xD3\x36\x69\xFE\x51\x4D\x9F\x2E"
+ "\x83\x37\x4F\x1F\x48\x30\xED\x04"
+ "\x4D\xA4\xEF\x3A\xCA\x76\xF4\x1C"
+ "\x41\x8F\x63\x37\x78\x2F\x86\xA6"
+ "\xEF\x41\x7E\xD2\xAF\x88\xAB\x67"
+ "\x52\x71\xC3\x8E\xF8\x26\x93\x72"
+ "\xAA\xD6\x0E\xE7\x0B\x46\xB1\x3A"
+ "\xB4\x08\xA9\xA8\xA0\xCF\x20\x0C"
+ "\x52\xBC\x8B\x05\x56\xB2\xBC\x31"
+ "\x9B\x74\xB9\x29\x29\x96\x9A\x50"
+ "\xDC\x45\xDC\x1A\xEB\x0C\x64\xD4"
+ "\xD3\x05\x7E\x59\x55\xC3\xF4\x90"
+ "\xC2\xAB\xF8\x9B\x8A\xDA\xCE\xA1"
+ "\xC3\xF4\xAD\x77\xDD\x44\xC8\xAC"
+ "\xA3\xF1\xC9\xD2\x19\x5C\xB0\xCA"
+ "\xA2\x34\xC1\xF7\x6C\xFD\xAC\x65"
+ "\x32\xDC\x48\xC4\xF2\x00\x6B\x77"
+ "\xF1\x7D\x76\xAC\xC0\x31\x63\x2A"
+ "\xA5\x3A\x62\xC8\x91\xB1\x03\x65"
+ "\xCB\x43\xD1\x06\xDF\xC3\x67\xBC"
+ "\xDC\xE0\xCD\x35\xCE\x49\x65\xA0"
+ "\x52\x7B\xA7\x0D\x07\xA9\x1B\xB0"
+ "\x40\x77\x72\xC2\xEA\x0E\x3A\x78"
+ "\x46\xB9\x91\xB6\xE7\x3D\x51\x42"
+ "\xFD\x51\xB0\xC6\x2C\x63\x13\x78"
+ "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
+ }, { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+ "\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
+ .klen = 24,
+ .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12"
+ "\xB7\x28\x4D\x83\x24\x59\xF2\x17",
+ .input = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47"
+ "\x2E\xB1\x18",
+ .ilen = 499,
+ .result = "\x23\xFF\x5C\x99\x75\xBB\x1F\xD4"
+ "\xBC\x27\x9D\x36\x60\xA9\xC9\xF7"
+ "\x94\x9D\x1B\xFF\x8E\x95\x57\x89"
+ "\x8C\x2E\x33\x70\x43\x61\xE6\xD2"
+ "\x82\x33\x63\xB6\xC4\x34\x5E\xF8"
+ "\x96\x07\xA7\xD2\x3B\x8E\xC9\xAA"
+ "\x7C\xA0\x55\x89\x2E\xE1\x85\x25"
+ "\x14\x04\xDA\x6B\xE0\xEE\x56\xCF"
+ "\x08\x2E\x69\xD4\x54\xDE\x22\x84"
+ "\x69\xA6\xA7\xD3\x3A\x9A\xE8\x05"
+ "\x63\xDB\xBF\x46\x3A\x26\x2E\x0F"
+ "\x58\x5C\x46\xEA\x07\x40\xDA\xE1"
+ "\x14\x1D\xCD\x4F\x06\xC0\xCA\x54"
+ "\x1E\xC9\x45\x85\x67\x7C\xC2\xB5"
+ "\x97\x5D\x61\x78\x2E\x46\xEC\x6A"
+ "\x53\xF4\xD0\xAE\xFA\xB4\x86\x29"
+ "\x9F\x17\x33\x24\xD8\xB9\xB2\x05"
+ "\x93\x88\xEA\xF7\xA0\x70\x69\x49"
+ "\x88\x6B\x73\x40\x41\x8D\xD9\xD9"
+ "\x7E\x78\xE9\xBE\x6C\x14\x22\x7A"
+ "\x66\xE1\xDA\xED\x10\xFF\x69\x1D"
+ "\xB9\xAA\xF2\x56\x72\x1B\x23\xE2"
+ "\x45\x54\x8B\xA3\x70\x23\xB4\x5E"
+ "\x8E\x96\xC9\x05\x00\xB3\xB6\xC2"
+ "\x2A\x02\x43\x7A\x62\xD5\xC8\xD2"
+ "\xC2\xD0\xE4\x78\xA1\x7B\x3E\xE8"
+ "\x9F\x7F\x7D\x40\x54\x30\x3B\xC0"
+ "\xA5\x54\xFD\xCA\x25\xEC\x44\x3E"
+ "\x1A\x54\x7F\x88\xD0\xE1\xFE\x71"
+ "\xCE\x05\x49\x89\xBA\xD6\x72\xE7"
+ "\xD6\x5D\x3F\xA2\xD9\xAB\xC5\x02"
+ "\xD6\x43\x22\xAF\xA2\xE4\x80\x85"
+ "\xD7\x87\xB9\xEA\x43\xDB\xC8\xEF"
+ "\x5C\x82\x2E\x98\x0D\x30\x41\x6B"
+ "\x08\x48\x8D\xF0\xF8\x60\xD7\x9D"
+ "\xE9\xDE\x40\xAD\x0D\xAD\x0D\x58"
+ "\x2A\x98\x35\xFE\xF7\xDD\x4B\x40"
+ "\xDE\xB0\x05\xD9\x7B\x09\x4D\xBC"
+ "\x42\xC0\xF1\x15\x0B\xFA\x26\x6B"
+ "\xC6\x12\x13\x4F\xCB\x35\xBA\x35"
+ "\xDD\x7A\x36\x9C\x12\x57\x55\x83"
+ "\x78\x58\x09\xD0\xB0\xCF\x7C\x5C"
+ "\x38\xCF\xBD\x79\x5B\x13\x4D\x97"
+ "\xC1\x85\x6F\x97\xC9\xE8\xC2\xA4"
+ "\x98\xE2\xBD\x77\x6B\x53\x39\x1A"
+ "\x28\x10\xE7\xE0\xE7\xDE\x9D\x69"
+ "\x78\x6F\x8E\xD2\xD9\x5D\xD2\x15"
+ "\x9E\xB5\x4D\x8C\xC0\x78\x22\x2F"
+ "\x17\x11\x2E\x99\xD7\xE3\xA4\x4F"
+ "\x65\xA5\x6B\x03\x2C\x35\x6F\xDA"
+ "\x8A\x19\x08\xE1\x08\x48\x59\x51"
+ "\x53\x4B\xD1\xDF\xDA\x14\x50\x5F"
+ "\xDF\xB5\x8C\xDF\xC6\xFD\x85\xFA"
+ "\xD4\xF9\x64\x45\x65\x0D\x7D\xF4"
+ "\xC8\xCD\x3F\x32\xAF\xDD\x30\xED"
+ "\x7B\xAA\xAC\xF0\xDA\x7F\xDF\x75"
+ "\x1C\xA4\xF1\xCB\x5E\x4F\x0B\xB4"
+ "\x97\x73\x28\xDE\xCF\xAF\x82\xBD"
+ "\xC4\xBA\xB4\x9C\x0D\x16\x77\x42"
+ "\x42\x39\x7C\x53\xA4\xD4\xDD\x40"
+ "\x5C\x60\x1F\x6E\xA7\xE2\xDC\xE7"
+ "\x32\x0F\x05\x2F\xF2\x4C\x95\x3B"
+ "\xF2\x79\xD9",
+ .rlen = 499,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 499 - 16, 16 },
+ },
+};
+
+static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
+ { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+ "\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
+ .klen = 24,
+ .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+ "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .input = "\x07\xC2\x08\x20\x72\x1F\x49\xEF"
+ "\x19\xCD\x6F\x32\x53\x05\x22\x15"
+ "\xA2\x85\x2B\xDB\x85\xD2\xD8\xB9"
+ "\xDD\x0D\x1B\x45\xCB\x69\x11\xD4"
+ "\xEA\xBE\xB2\x45\x5D\x0C\xAE\xBE"
+ "\xA0\xC1\x27\xAC\x65\x9F\x53\x7E"
+ "\xAF\xC2\x1B\xB5\xB8\x6D\x36\x0C"
+ "\x25\xC0\xF8\x6D\x0B\x29\x01\xDA"
+ "\x13\x78\xDC\x89\x12\x12\x43\xFA"
+ "\xF6\x12\xEF\x8D\x87\x62\x78\x83"
+ "\xE2\xBE\x41\x20\x4C\x6D\x35\x1B"
+ "\xD1\x0C\x30\xCF\xE2\xDE\x2B\x03"
+ "\xBF\x45\x73\xD4\xE5\x59\x95\xD1"
+ "\xB3\x9B\x27\x62\x97\xBD\xDE\x7F"
+ "\xA4\xD2\x39\x80\xAA\x50\x23\xF0"
+ "\x74\x88\x3D\xA8\x6A\x18\x79\x3B"
+ "\xC4\x96\x6C\x8D\x22\x40\x92\x6E"
+ "\xD6\xAD\x2A\x1F\xDE\x63\xC0\xE7"
+ "\x07\xF7\x2D\xF7\xB5\xF3\xF0\xCC"
+ "\x01\x7C\x2A\x9B\xC2\x10\xCA\xAA"
+ "\xFD\x2B\x3F\xC5\xF3\xF6\xFC\x9B"
+ "\x45\xDB\x53\xE4\x5B\xF3\xC9\x7B"
+ "\x8E\x52\xFF\xC8\x02\xB8\xAC\x9D"
+ "\xA1\x00\x39\xDA\x3D\x2D\x0E\x01"
+ "\x09\x7D\x8D\x5E\xBE\x53\xB9\xB0"
+ "\x8E\xE7\xE2\x96\x6A\xB2\x78\xEA"
+ "\xDE\x23\x8B\xA5\xFA\x5C\xE3\xDA"
+ "\xBF\x8E\x31\x6A\x55\xD1\x6A\xB2"
+ "\xB5\x46\x6F\xA5\xF0\xEE\xBA\x1F"
+ "\x9F\x98\xB0\x66\x4F\xD0\x3F\xA9"
+ "\xDF\x5F\x58\xC4\xF4\xFF\x75\x5C"
+ "\x40\x3A\x09\x7E\x6E\x1C\x97\xD4"
+ "\xCC\xE7\xE7\x71\xCF\x0B\x15\x08"
+ "\x71\xFA\x07\x97\xCD\xE6\xCA\x1D"
+ "\x14\x28\x0C\xCF\x99\x13\x7A\xF1"
+ "\xEB\xFA\xFA\x92\x07\xDE\x1D\xA1"
+ "\xD3\x36\x69\xFE\x51\x4D\x9F\x2E"
+ "\x83\x37\x4F\x1F\x48\x30\xED\x04"
+ "\x4D\xA4\xEF\x3A\xCA\x76\xF4\x1C"
+ "\x41\x8F\x63\x37\x78\x2F\x86\xA6"
+ "\xEF\x41\x7E\xD2\xAF\x88\xAB\x67"
+ "\x52\x71\xC3\x8E\xF8\x26\x93\x72"
+ "\xAA\xD6\x0E\xE7\x0B\x46\xB1\x3A"
+ "\xB4\x08\xA9\xA8\xA0\xCF\x20\x0C"
+ "\x52\xBC\x8B\x05\x56\xB2\xBC\x31"
+ "\x9B\x74\xB9\x29\x29\x96\x9A\x50"
+ "\xDC\x45\xDC\x1A\xEB\x0C\x64\xD4"
+ "\xD3\x05\x7E\x59\x55\xC3\xF4\x90"
+ "\xC2\xAB\xF8\x9B\x8A\xDA\xCE\xA1"
+ "\xC3\xF4\xAD\x77\xDD\x44\xC8\xAC"
+ "\xA3\xF1\xC9\xD2\x19\x5C\xB0\xCA"
+ "\xA2\x34\xC1\xF7\x6C\xFD\xAC\x65"
+ "\x32\xDC\x48\xC4\xF2\x00\x6B\x77"
+ "\xF1\x7D\x76\xAC\xC0\x31\x63\x2A"
+ "\xA5\x3A\x62\xC8\x91\xB1\x03\x65"
+ "\xCB\x43\xD1\x06\xDF\xC3\x67\xBC"
+ "\xDC\xE0\xCD\x35\xCE\x49\x65\xA0"
+ "\x52\x7B\xA7\x0D\x07\xA9\x1B\xB0"
+ "\x40\x77\x72\xC2\xEA\x0E\x3A\x78"
+ "\x46\xB9\x91\xB6\xE7\x3D\x51\x42"
+ "\xFD\x51\xB0\xC6\x2C\x63\x13\x78"
+ "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34",
+ .ilen = 496,
+ .result = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
+ }, { /* Generated with Crypto++ */
+ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
+ "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
+ "\xEB\xB4\x51\x72\xB4\x51\x72\x1F",
+ .klen = 24,
+ .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12"
+ "\xB7\x28\x4D\x83\x24\x59\xF2\x17",
+ .input = "\x23\xFF\x5C\x99\x75\xBB\x1F\xD4"
+ "\xBC\x27\x9D\x36\x60\xA9\xC9\xF7"
+ "\x94\x9D\x1B\xFF\x8E\x95\x57\x89"
+ "\x8C\x2E\x33\x70\x43\x61\xE6\xD2"
+ "\x82\x33\x63\xB6\xC4\x34\x5E\xF8"
+ "\x96\x07\xA7\xD2\x3B\x8E\xC9\xAA"
+ "\x7C\xA0\x55\x89\x2E\xE1\x85\x25"
+ "\x14\x04\xDA\x6B\xE0\xEE\x56\xCF"
+ "\x08\x2E\x69\xD4\x54\xDE\x22\x84"
+ "\x69\xA6\xA7\xD3\x3A\x9A\xE8\x05"
+ "\x63\xDB\xBF\x46\x3A\x26\x2E\x0F"
+ "\x58\x5C\x46\xEA\x07\x40\xDA\xE1"
+ "\x14\x1D\xCD\x4F\x06\xC0\xCA\x54"
+ "\x1E\xC9\x45\x85\x67\x7C\xC2\xB5"
+ "\x97\x5D\x61\x78\x2E\x46\xEC\x6A"
+ "\x53\xF4\xD0\xAE\xFA\xB4\x86\x29"
+ "\x9F\x17\x33\x24\xD8\xB9\xB2\x05"
+ "\x93\x88\xEA\xF7\xA0\x70\x69\x49"
+ "\x88\x6B\x73\x40\x41\x8D\xD9\xD9"
+ "\x7E\x78\xE9\xBE\x6C\x14\x22\x7A"
+ "\x66\xE1\xDA\xED\x10\xFF\x69\x1D"
+ "\xB9\xAA\xF2\x56\x72\x1B\x23\xE2"
+ "\x45\x54\x8B\xA3\x70\x23\xB4\x5E"
+ "\x8E\x96\xC9\x05\x00\xB3\xB6\xC2"
+ "\x2A\x02\x43\x7A\x62\xD5\xC8\xD2"
+ "\xC2\xD0\xE4\x78\xA1\x7B\x3E\xE8"
+ "\x9F\x7F\x7D\x40\x54\x30\x3B\xC0"
+ "\xA5\x54\xFD\xCA\x25\xEC\x44\x3E"
+ "\x1A\x54\x7F\x88\xD0\xE1\xFE\x71"
+ "\xCE\x05\x49\x89\xBA\xD6\x72\xE7"
+ "\xD6\x5D\x3F\xA2\xD9\xAB\xC5\x02"
+ "\xD6\x43\x22\xAF\xA2\xE4\x80\x85"
+ "\xD7\x87\xB9\xEA\x43\xDB\xC8\xEF"
+ "\x5C\x82\x2E\x98\x0D\x30\x41\x6B"
+ "\x08\x48\x8D\xF0\xF8\x60\xD7\x9D"
+ "\xE9\xDE\x40\xAD\x0D\xAD\x0D\x58"
+ "\x2A\x98\x35\xFE\xF7\xDD\x4B\x40"
+ "\xDE\xB0\x05\xD9\x7B\x09\x4D\xBC"
+ "\x42\xC0\xF1\x15\x0B\xFA\x26\x6B"
+ "\xC6\x12\x13\x4F\xCB\x35\xBA\x35"
+ "\xDD\x7A\x36\x9C\x12\x57\x55\x83"
+ "\x78\x58\x09\xD0\xB0\xCF\x7C\x5C"
+ "\x38\xCF\xBD\x79\x5B\x13\x4D\x97"
+ "\xC1\x85\x6F\x97\xC9\xE8\xC2\xA4"
+ "\x98\xE2\xBD\x77\x6B\x53\x39\x1A"
+ "\x28\x10\xE7\xE0\xE7\xDE\x9D\x69"
+ "\x78\x6F\x8E\xD2\xD9\x5D\xD2\x15"
+ "\x9E\xB5\x4D\x8C\xC0\x78\x22\x2F"
+ "\x17\x11\x2E\x99\xD7\xE3\xA4\x4F"
+ "\x65\xA5\x6B\x03\x2C\x35\x6F\xDA"
+ "\x8A\x19\x08\xE1\x08\x48\x59\x51"
+ "\x53\x4B\xD1\xDF\xDA\x14\x50\x5F"
+ "\xDF\xB5\x8C\xDF\xC6\xFD\x85\xFA"
+ "\xD4\xF9\x64\x45\x65\x0D\x7D\xF4"
+ "\xC8\xCD\x3F\x32\xAF\xDD\x30\xED"
+ "\x7B\xAA\xAC\xF0\xDA\x7F\xDF\x75"
+ "\x1C\xA4\xF1\xCB\x5E\x4F\x0B\xB4"
+ "\x97\x73\x28\xDE\xCF\xAF\x82\xBD"
+ "\xC4\xBA\xB4\x9C\x0D\x16\x77\x42"
+ "\x42\x39\x7C\x53\xA4\xD4\xDD\x40"
+ "\x5C\x60\x1F\x6E\xA7\xE2\xDC\xE7"
+ "\x32\x0F\x05\x2F\xF2\x4C\x95\x3B"
+ "\xF2\x79\xD9",
+ .ilen = 499,
+ .result = "\x05\xEC\x77\xFB\x42\xD5\x59\x20"
+ "\x8B\x12\x86\x69\xF0\x5B\xCF\x56"
+ "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4"
+ "\x48\xD3\xBA\x0D\xB1\x18\xE3\x4A"
+ "\xFE\x41\x28\x5C\x27\x8E\x11\x85"
+ "\x6C\xF7\x5E\xC2\x55\x3C\xA0\x0B"
+ "\x92\x65\xE9\x70\xDB\x4F\xD6\xB9"
+ "\x00\xB4\x1F\xE6\x49\xFD\x44\x2F"
+ "\x53\x3A\x8D\x14\x98\x63\xCA\x5D"
+ "\xC1\xA8\x33\xA7\x0E\x91\x78\xEC"
+ "\x77\xDE\x42\xD5\xBC\x07\x8B\x12"
+ "\xE5\x4C\xF0\x5B\x22\x56\x39\x80"
+ "\x6B\x9F\x66\xC9\x50\xC4\xAF\x36"
+ "\xBA\x0D\x94\x7F\xE3\x4A\xDD\x41"
+ "\x28\xB3\x1A\x8E\x11\xF8\x43\xF7"
+ "\x5E\x21\x55\x3C\x87\x6E\x92\x65"
+ "\xCC\x57\xDB\xA2\x35\xB9\x00\xEB"
+ "\x72\xE6\x49\xD0\x44\x2F\xB6\x19"
+ "\x8D\x14\xFF\x46\xCA\x5D\x24\xA8"
+ "\x33\x9A\x6D\x91\x78\xC3\x77\xDE"
+ "\xA1\x08\xBC\x07\xEE\x71\xE5\x4C"
+ "\xD7\x5B\x22\xB5\x1C\x80\x6B\xF2"
+ "\x45\xC9\x50\x3B\xAF\x36\x99\x60"
+ "\x94\x7F\xC6\x4A\xDD\xA4\x0F\xB3"
+ "\x1A\xED\x74\xF8\x43\x2A\x5E\x21"
+ "\x88\x13\x87\x6E\xF1\x58\xCC\x57"
+ "\x3E\xA2\x35\x9C\x67\xEB\x72\xC5"
+ "\x49\xD0\xBB\x02\xB6\x19\xE0\x4B"
+ "\xFF\x46\x29\x5D\x24\x8F\x16\x9A"
+ "\x6D\xF4\x5F\xC3\xAA\x3D\xA1\x08"
+ "\x93\x7A\xEE\x71\xD8\x4C\xD7\xBE"
+ "\x01\xB5\x1C\xE7\x4E\xF2\x45\x2C"
+ "\x50\x3B\x82\x15\x99\x60\xCB\x52"
+ "\xC6\xA9\x30\xA4\x0F\x96\x79\xED"
+ "\x74\xDF\x43\x2A\xBD\x04\x88\x13"
+ "\xFA\x4D\xF1\x58\x23\x57\x3E\x81"
+ "\x68\x9C\x67\xCE\x51\xC5\xAC\x37"
+ "\xBB\x02\x95\x7C\xE0\x4B\xD2\x46"
+ "\x29\xB0\x1B\x8F\x16\xF9\x40\xF4"
+ "\x5F\x26\xAA\x3D\x84\x6F\x93\x7A"
+ "\xCD\x54\xD8\xA3\x0A\xBE\x01\xE8"
+ "\x73\xE7\x4E\xD1\x45\x2C\xB7\x1E"
+ "\x82\x15\xFC\x47\xCB\x52\x25\xA9"
+ "\x30\x9B\x62\x96\x79\xC0\x74\xDF"
+ "\xA6\x09\xBD\x04\xEF\x76\xFA\x4D"
+ "\xD4\x58\x23\x8A\x1D\x81\x68\xF3"
+ "\x5A\xCE\x51\x38\xAC\x37\x9E\x61"
+ "\x95\x7C\xC7\x4B\xD2\xA5\x0C\xB0"
+ "\x1B\xE2\x75\xF9\x40\x2B\x5F\x26"
+ "\x89\x10\x84\x6F\xF6\x59\xCD\x54"
+ "\x3F\xA3\x0A\x9D\x64\xE8\x73\xDA"
+ "\x4E\xD1\xB8\x03\xB7\x1E\xE1\x48"
+ "\xFC\x47\x2E\x52\x25\x8C\x17\x9B"
+ "\x62\xF5\x5C\xC0\xAB\x32\xA6\x09"
+ "\x90\x7B\xEF\x76\xD9\x4D\xD4\xBF"
+ "\x06\x8A\x1D\xE4\x4F\xF3\x5A\x2D"
+ "\x51\x38\x83\x6A\x9E\x61\xC8\x53"
+ "\xC7\xAE\x31\xA5\x0C\x97\x7E\xE2"
+ "\x75\xDC\x40\x2B\xB2\x05\x89\x10"
+ "\xFB\x42\xF6\x59\x20\x54\x3F\x86"
+ "\x69\x9D\x64\xCF\x56\xDA\xAD\x34"
+ "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47"
+ "\x2E\xB1\x18",
+ .rlen = 499,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 499 - 16, 16 },
},
};
@@ -2461,17 +4166,133 @@ static struct cipher_testvec bf_enc_tv_template[] = {
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
- "\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
- .ilen = 40,
+ "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
+ .ilen = 504,
.result = "\x96\x87\x3D\x0C\x7B\xFB\xBD\x1F"
"\xE3\xC1\x99\x6D\x39\xD4\xC2\x7D"
"\xD7\x87\xA1\xF2\xDF\x51\x71\x26"
"\xC2\xF4\x6D\xFF\xF6\xCD\x6B\x40"
- "\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B",
- .rlen = 40,
+ "\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B"
+ "\xD3\xB2\xD4\x61\xC7\x9F\x06\xE9"
+ "\xCD\xF3\x88\x39\x39\x7A\xDF\x19"
+ "\xE8\x03\x2A\x0B\x9E\xA0\x2B\x86"
+ "\x31\xF8\x9D\xB1\xEE\x78\x9D\xB5"
+ "\xCD\x8B\x7C\x2E\xF5\xA2\x2D\x5D"
+ "\x6E\x66\xAF\x38\x6C\xD3\x13\xED"
+ "\x14\xEA\x5D\xD0\x17\x77\x0F\x4A"
+ "\x50\xF2\xD0\x0F\xC8\xF7\x1E\x7B"
+ "\x9D\x5B\x54\x65\x4F\x16\x8A\x97"
+ "\xF3\xF6\xD4\xAA\x87\x36\x77\x72"
+ "\x99\x4A\xB5\x5E\x88\xC3\xCD\x7D"
+ "\x1D\x97\xF9\x11\xBD\xE0\x1F\x1F"
+ "\x96\x3E\x4B\x22\xF4\xC0\xE6\xB8"
+ "\x47\x82\x98\x23\x33\x36\xBC\x1B"
+ "\x36\xE7\xF6\xCF\x97\x37\x16\xC0"
+ "\x87\x31\x8B\xB0\xDB\x19\x42\xA5"
+ "\x1F\x90\x7E\x66\x34\xDD\x5E\xE9"
+ "\x4F\xB2\x2B\x9A\xDE\xB3\x5D\x71"
+ "\x4D\x68\xF0\xDC\xA6\xEA\xE3\x9B"
+ "\x60\x00\x55\x57\x06\x8B\xD5\xB3"
+ "\x86\x30\x78\xDA\x33\x9A\x9D\xCC"
+ "\xBA\x0B\x81\x06\x77\x43\xC7\xC9"
+ "\xDB\x37\x60\x11\x45\x59\x6D\x2D"
+ "\x90\x3D\x65\x3E\xD0\x13\xC6\x3C"
+ "\x0E\x78\x7D\x9A\x00\xD6\x2F\x0B"
+ "\x3B\x53\x19\x1E\xA8\x9B\x11\xD9"
+ "\x98\xE4\x7F\xC3\x6E\x51\x24\x70"
+ "\x9F\x04\x9C\xC2\x9E\x44\x84\xE3"
+ "\xE0\x8A\x44\xA2\x5C\x94\x74\x34"
+ "\x37\x52\x7C\x03\xE8\x8E\x97\xE1"
+ "\x5B\x5C\x0E\xB0\x70\xFE\x54\x3F"
+ "\xD8\x65\xA9\xC5\xCD\xEC\xF4\x45"
+ "\x55\xC5\xA7\xA3\x19\x80\x28\x51"
+ "\xBE\x64\x4A\xC1\xD4\xE1\xBE\xEB"
+ "\x73\x4C\xB6\xF9\x5F\x6D\x82\xBC"
+ "\x3E\x42\x14\x49\x88\x51\xBF\x68"
+ "\x45\x75\x27\x1B\x0A\x72\xED\xAF"
+ "\xDA\xC4\x4D\x67\x0D\xEE\x75\xE3"
+ "\x34\xDD\x91\x19\x42\x3A\xCB\xDA"
+ "\x38\xFA\x3C\x93\x62\xF2\xE3\x81"
+ "\xB3\xE4\xBB\xF6\x0D\x0B\x1D\x09"
+ "\x9C\x52\x0D\x50\x63\xA4\xB2\xD2"
+ "\x82\xA0\x23\x3F\x1F\xB6\xED\x6E"
+ "\xC2\x9C\x1C\xD0\x9A\x40\xB6\xFC"
+ "\x36\x56\x6E\x85\x73\xD7\x52\xBA"
+ "\x35\x5E\x32\x89\x5D\x42\xF5\x36"
+ "\x52\x8D\x46\x7D\xC8\x71\xAD\x33"
+ "\xE1\xAF\x6A\xA8\xEC\xBA\x1C\xDC"
+ "\xFE\x88\xE6\x16\xE4\xC8\x13\x00"
+ "\x3C\xDA\x59\x32\x38\x19\xD5\xEB"
+ "\xB6\x7F\x78\x45\x1B\x8E\x07\x8C"
+ "\x66\x52\x75\xFF\xAF\xCE\x2D\x2B"
+ "\x22\x29\xCA\xB3\x5F\x7F\xE3\x29"
+ "\xB2\xB8\x9D\xEB\x16\xC8\xC5\x1D"
+ "\xC9\x0D\x59\x82\x27\x57\x9D\x42"
+ "\x54\x59\x09\xA5\x3D\xC5\x84\x68"
+ "\x56\xEB\x36\x77\x3D\xAA\xB8\xF5"
+ "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4",
+ .rlen = 504,
.also_non_np = 1,
.np = 2,
- .tap = { 40 - 8, 8 },
+ .tap = { 504 - 8, 8 },
},
};
@@ -2537,17 +4358,133 @@ static struct cipher_testvec bf_dec_tv_template[] = {
"\xE3\xC1\x99\x6D\x39\xD4\xC2\x7D"
"\xD7\x87\xA1\xF2\xDF\x51\x71\x26"
"\xC2\xF4\x6D\xFF\xF6\xCD\x6B\x40"
- "\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B",
- .ilen = 40,
+ "\xE1\xB3\xBF\xD4\x38\x2B\xC8\x3B"
+ "\xD3\xB2\xD4\x61\xC7\x9F\x06\xE9"
+ "\xCD\xF3\x88\x39\x39\x7A\xDF\x19"
+ "\xE8\x03\x2A\x0B\x9E\xA0\x2B\x86"
+ "\x31\xF8\x9D\xB1\xEE\x78\x9D\xB5"
+ "\xCD\x8B\x7C\x2E\xF5\xA2\x2D\x5D"
+ "\x6E\x66\xAF\x38\x6C\xD3\x13\xED"
+ "\x14\xEA\x5D\xD0\x17\x77\x0F\x4A"
+ "\x50\xF2\xD0\x0F\xC8\xF7\x1E\x7B"
+ "\x9D\x5B\x54\x65\x4F\x16\x8A\x97"
+ "\xF3\xF6\xD4\xAA\x87\x36\x77\x72"
+ "\x99\x4A\xB5\x5E\x88\xC3\xCD\x7D"
+ "\x1D\x97\xF9\x11\xBD\xE0\x1F\x1F"
+ "\x96\x3E\x4B\x22\xF4\xC0\xE6\xB8"
+ "\x47\x82\x98\x23\x33\x36\xBC\x1B"
+ "\x36\xE7\xF6\xCF\x97\x37\x16\xC0"
+ "\x87\x31\x8B\xB0\xDB\x19\x42\xA5"
+ "\x1F\x90\x7E\x66\x34\xDD\x5E\xE9"
+ "\x4F\xB2\x2B\x9A\xDE\xB3\x5D\x71"
+ "\x4D\x68\xF0\xDC\xA6\xEA\xE3\x9B"
+ "\x60\x00\x55\x57\x06\x8B\xD5\xB3"
+ "\x86\x30\x78\xDA\x33\x9A\x9D\xCC"
+ "\xBA\x0B\x81\x06\x77\x43\xC7\xC9"
+ "\xDB\x37\x60\x11\x45\x59\x6D\x2D"
+ "\x90\x3D\x65\x3E\xD0\x13\xC6\x3C"
+ "\x0E\x78\x7D\x9A\x00\xD6\x2F\x0B"
+ "\x3B\x53\x19\x1E\xA8\x9B\x11\xD9"
+ "\x98\xE4\x7F\xC3\x6E\x51\x24\x70"
+ "\x9F\x04\x9C\xC2\x9E\x44\x84\xE3"
+ "\xE0\x8A\x44\xA2\x5C\x94\x74\x34"
+ "\x37\x52\x7C\x03\xE8\x8E\x97\xE1"
+ "\x5B\x5C\x0E\xB0\x70\xFE\x54\x3F"
+ "\xD8\x65\xA9\xC5\xCD\xEC\xF4\x45"
+ "\x55\xC5\xA7\xA3\x19\x80\x28\x51"
+ "\xBE\x64\x4A\xC1\xD4\xE1\xBE\xEB"
+ "\x73\x4C\xB6\xF9\x5F\x6D\x82\xBC"
+ "\x3E\x42\x14\x49\x88\x51\xBF\x68"
+ "\x45\x75\x27\x1B\x0A\x72\xED\xAF"
+ "\xDA\xC4\x4D\x67\x0D\xEE\x75\xE3"
+ "\x34\xDD\x91\x19\x42\x3A\xCB\xDA"
+ "\x38\xFA\x3C\x93\x62\xF2\xE3\x81"
+ "\xB3\xE4\xBB\xF6\x0D\x0B\x1D\x09"
+ "\x9C\x52\x0D\x50\x63\xA4\xB2\xD2"
+ "\x82\xA0\x23\x3F\x1F\xB6\xED\x6E"
+ "\xC2\x9C\x1C\xD0\x9A\x40\xB6\xFC"
+ "\x36\x56\x6E\x85\x73\xD7\x52\xBA"
+ "\x35\x5E\x32\x89\x5D\x42\xF5\x36"
+ "\x52\x8D\x46\x7D\xC8\x71\xAD\x33"
+ "\xE1\xAF\x6A\xA8\xEC\xBA\x1C\xDC"
+ "\xFE\x88\xE6\x16\xE4\xC8\x13\x00"
+ "\x3C\xDA\x59\x32\x38\x19\xD5\xEB"
+ "\xB6\x7F\x78\x45\x1B\x8E\x07\x8C"
+ "\x66\x52\x75\xFF\xAF\xCE\x2D\x2B"
+ "\x22\x29\xCA\xB3\x5F\x7F\xE3\x29"
+ "\xB2\xB8\x9D\xEB\x16\xC8\xC5\x1D"
+ "\xC9\x0D\x59\x82\x27\x57\x9D\x42"
+ "\x54\x59\x09\xA5\x3D\xC5\x84\x68"
+ "\x56\xEB\x36\x77\x3D\xAA\xB8\xF5"
+ "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4",
+ .ilen = 504,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
- "\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
- .rlen = 40,
+ "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
+ .rlen = 504,
.also_non_np = 1,
.np = 2,
- .tap = { 40 - 8, 8 },
+ .tap = { 504 - 8, 8 },
},
};
@@ -2578,17 +4515,133 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
- "\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
- .ilen = 40,
+ "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
+ .ilen = 504,
.result = "\xB4\xFE\xA5\xBB\x3D\x2C\x27\x06"
"\x06\x2B\x3A\x92\xB2\xF5\x5E\x62"
"\x84\xCD\xF7\x66\x7E\x41\x6C\x8E"
"\x1B\xD9\x02\xB6\x48\xB0\x87\x25"
- "\x01\x9C\x93\x63\x51\x60\x82\xD2",
- .rlen = 40,
+ "\x01\x9C\x93\x63\x51\x60\x82\xD2"
+ "\x4D\xE5\xC2\xB7\xAE\x60\xD8\xAD"
+ "\x9F\xAB\x6C\xFA\x20\x05\xDA\x6F"
+ "\x1F\xD1\xD8\x36\x0F\xB5\x16\x69"
+ "\x3C\xAF\xB3\x30\x18\x33\xE6\xB5"
+ "\x43\x29\x9D\x94\xF4\x2F\x0A\x65"
+ "\x40\xB2\xB2\xB2\x42\x89\xEE\x8A"
+ "\x60\xD3\x52\xA8\xED\x91\xDF\xE1"
+ "\x91\x73\x7C\x28\xA1\x14\xC3\x4C"
+ "\x82\x72\x4B\x7D\x7D\x32\xD5\x19"
+ "\xE8\xB8\x6B\x30\x21\x09\x0E\x27"
+ "\x10\x9D\x2D\x3A\x6A\x4B\x7B\xE6"
+ "\x8D\x4E\x02\x32\xFF\x7F\x8E\x13"
+ "\xB0\x96\xF4\xC2\xA1\x60\x8A\x69"
+ "\xEF\x0F\x86\xD0\x25\x13\x1A\x7C"
+ "\x6E\xF0\x41\xA3\xFB\xB3\xAB\x40"
+ "\x7D\x19\xA0\x11\x4F\x3E\x1D\x43"
+ "\x65\xFE\x15\x40\xD0\x62\x41\x02"
+ "\xEA\x0C\x7A\xC3\x84\xEE\xB0\xBE"
+ "\xBE\xC8\x57\x51\xCD\x4F\xAD\x5C"
+ "\xCC\x79\xBA\x0D\x85\x3A\xED\x6B"
+ "\xAC\x6B\xA3\x4D\xBC\xE8\x02\x6A"
+ "\xC2\x6D\xBD\x5E\x89\x95\x86\x43"
+ "\x2C\x17\x4B\xC6\x40\xA2\xBD\x24"
+ "\x04\xF0\x86\x08\x78\x18\x42\xE0"
+ "\x39\x1B\x22\x9E\x89\x4C\x04\x6B"
+ "\x65\xC5\xB6\x0E\xF6\x63\xFC\xD7"
+ "\xAE\x9E\x87\x13\xCC\xD3\x1A\xEC"
+ "\xF0\x51\xCC\x93\x68\xFC\xE9\x19"
+ "\x7C\x4E\x9B\xCC\x17\xAD\xD2\xFC"
+ "\x97\x18\x92\xFF\x15\x11\xCE\xED"
+ "\x04\x41\x05\xA3\x92\xFF\x3B\xE6"
+ "\xB6\x8C\x90\xC6\xCD\x15\xA0\x04"
+ "\x25\x8B\x5D\x5B\x5F\xDB\xAE\x68"
+ "\xEF\xB3\x61\x18\xDB\x83\x9B\x39"
+ "\xCA\x82\xD1\x88\xF0\xA2\x5C\x02"
+ "\x87\xBD\x8D\x8F\xBB\x62\xF0\x35"
+ "\x75\x6F\x06\x81\x0A\x97\x4D\xF0"
+ "\x43\x12\x73\x77\xDB\x91\x83\x5B"
+ "\xE7\x3A\xA6\x07\x7B\xBF\x2C\x50"
+ "\x94\xDE\x7B\x65\xDA\x1C\xF1\x9F"
+ "\x7E\x12\x40\xB2\x3E\x19\x23\xF1"
+ "\x7C\x1B\x5F\xA8\xF3\xAC\x63\x87"
+ "\xEB\x3E\x0C\xBE\xA3\x63\x97\x88"
+ "\x8D\x27\xC6\x2A\xF8\xF2\x67\x9A"
+ "\x0D\x14\x16\x2B\x6F\xCB\xD4\x76"
+ "\x14\x48\x2E\xDE\x2A\x44\x5E\x45"
+ "\xF1\x97\x82\xEF\xB7\xAE\xED\x3A"
+ "\xED\x73\xD3\x79\xF7\x38\x1D\xD0"
+ "\xC5\xF8\x69\x83\x28\x84\x87\x56"
+ "\x3F\xAE\x81\x04\x79\x1F\xD1\x09"
+ "\xC5\xE5\x05\x0D\x64\x16\xCE\x42"
+ "\xC5\xF8\xDB\x57\x89\x33\x22\xFC"
+ "\xB4\xD7\x94\xB9\xF3\xCC\x02\x90"
+ "\x02\xBA\x55\x1E\x24\x3E\x02\x1D"
+ "\xC6\xCD\x8F\xD9\xBD\xED\xB0\x51"
+ "\xCD\xE9\xD5\x0C\xFE\x12\x39\xA9"
+ "\x93\x9B\xEE\xB5\x97\x41\xD2\xA0"
+ "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4",
+ .rlen = 504,
.also_non_np = 1,
.np = 2,
- .tap = { 40 - 8, 8 },
+ .tap = { 504 - 8, 8 },
},
};
@@ -2619,17 +4672,133 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
"\x06\x2B\x3A\x92\xB2\xF5\x5E\x62"
"\x84\xCD\xF7\x66\x7E\x41\x6C\x8E"
"\x1B\xD9\x02\xB6\x48\xB0\x87\x25"
- "\x01\x9C\x93\x63\x51\x60\x82\xD2",
- .ilen = 40,
+ "\x01\x9C\x93\x63\x51\x60\x82\xD2"
+ "\x4D\xE5\xC2\xB7\xAE\x60\xD8\xAD"
+ "\x9F\xAB\x6C\xFA\x20\x05\xDA\x6F"
+ "\x1F\xD1\xD8\x36\x0F\xB5\x16\x69"
+ "\x3C\xAF\xB3\x30\x18\x33\xE6\xB5"
+ "\x43\x29\x9D\x94\xF4\x2F\x0A\x65"
+ "\x40\xB2\xB2\xB2\x42\x89\xEE\x8A"
+ "\x60\xD3\x52\xA8\xED\x91\xDF\xE1"
+ "\x91\x73\x7C\x28\xA1\x14\xC3\x4C"
+ "\x82\x72\x4B\x7D\x7D\x32\xD5\x19"
+ "\xE8\xB8\x6B\x30\x21\x09\x0E\x27"
+ "\x10\x9D\x2D\x3A\x6A\x4B\x7B\xE6"
+ "\x8D\x4E\x02\x32\xFF\x7F\x8E\x13"
+ "\xB0\x96\xF4\xC2\xA1\x60\x8A\x69"
+ "\xEF\x0F\x86\xD0\x25\x13\x1A\x7C"
+ "\x6E\xF0\x41\xA3\xFB\xB3\xAB\x40"
+ "\x7D\x19\xA0\x11\x4F\x3E\x1D\x43"
+ "\x65\xFE\x15\x40\xD0\x62\x41\x02"
+ "\xEA\x0C\x7A\xC3\x84\xEE\xB0\xBE"
+ "\xBE\xC8\x57\x51\xCD\x4F\xAD\x5C"
+ "\xCC\x79\xBA\x0D\x85\x3A\xED\x6B"
+ "\xAC\x6B\xA3\x4D\xBC\xE8\x02\x6A"
+ "\xC2\x6D\xBD\x5E\x89\x95\x86\x43"
+ "\x2C\x17\x4B\xC6\x40\xA2\xBD\x24"
+ "\x04\xF0\x86\x08\x78\x18\x42\xE0"
+ "\x39\x1B\x22\x9E\x89\x4C\x04\x6B"
+ "\x65\xC5\xB6\x0E\xF6\x63\xFC\xD7"
+ "\xAE\x9E\x87\x13\xCC\xD3\x1A\xEC"
+ "\xF0\x51\xCC\x93\x68\xFC\xE9\x19"
+ "\x7C\x4E\x9B\xCC\x17\xAD\xD2\xFC"
+ "\x97\x18\x92\xFF\x15\x11\xCE\xED"
+ "\x04\x41\x05\xA3\x92\xFF\x3B\xE6"
+ "\xB6\x8C\x90\xC6\xCD\x15\xA0\x04"
+ "\x25\x8B\x5D\x5B\x5F\xDB\xAE\x68"
+ "\xEF\xB3\x61\x18\xDB\x83\x9B\x39"
+ "\xCA\x82\xD1\x88\xF0\xA2\x5C\x02"
+ "\x87\xBD\x8D\x8F\xBB\x62\xF0\x35"
+ "\x75\x6F\x06\x81\x0A\x97\x4D\xF0"
+ "\x43\x12\x73\x77\xDB\x91\x83\x5B"
+ "\xE7\x3A\xA6\x07\x7B\xBF\x2C\x50"
+ "\x94\xDE\x7B\x65\xDA\x1C\xF1\x9F"
+ "\x7E\x12\x40\xB2\x3E\x19\x23\xF1"
+ "\x7C\x1B\x5F\xA8\xF3\xAC\x63\x87"
+ "\xEB\x3E\x0C\xBE\xA3\x63\x97\x88"
+ "\x8D\x27\xC6\x2A\xF8\xF2\x67\x9A"
+ "\x0D\x14\x16\x2B\x6F\xCB\xD4\x76"
+ "\x14\x48\x2E\xDE\x2A\x44\x5E\x45"
+ "\xF1\x97\x82\xEF\xB7\xAE\xED\x3A"
+ "\xED\x73\xD3\x79\xF7\x38\x1D\xD0"
+ "\xC5\xF8\x69\x83\x28\x84\x87\x56"
+ "\x3F\xAE\x81\x04\x79\x1F\xD1\x09"
+ "\xC5\xE5\x05\x0D\x64\x16\xCE\x42"
+ "\xC5\xF8\xDB\x57\x89\x33\x22\xFC"
+ "\xB4\xD7\x94\xB9\xF3\xCC\x02\x90"
+ "\x02\xBA\x55\x1E\x24\x3E\x02\x1D"
+ "\xC6\xCD\x8F\xD9\xBD\xED\xB0\x51"
+ "\xCD\xE9\xD5\x0C\xFE\x12\x39\xA9"
+ "\x93\x9B\xEE\xB5\x97\x41\xD2\xA0"
+ "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4",
+ .ilen = 504,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
- "\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
- .rlen = 40,
+ "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
+ .rlen = 504,
.also_non_np = 1,
.np = 2,
- .tap = { 40 - 8, 8 },
+ .tap = { 504 - 8, 8 },
},
};
@@ -2645,14 +4814,130 @@ static struct cipher_testvec bf_ctr_enc_tv_template[] = {
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
- "\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
- .ilen = 40,
+ "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
+ .ilen = 504,
.result = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D"
"\x9E\xDF\x38\x18\x83\x07\xEF\xC1"
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
- "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC",
- .rlen = 40,
+ "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC"
+ "\x3D\xA7\xE9\x0A\x5C\x70\x4D\xDE"
+ "\x99\x38\x07\xCA\x1D\x21\xC1\x11"
+ "\x97\xEB\x98\x75\xC4\x73\x45\x83"
+ "\x46\x1C\x9C\x91\x87\xC1\xA0\x56"
+ "\x98\xA1\x8B\xDB\x22\x76\xBD\x62"
+ "\xA4\xBC\xE8\x86\xDA\xD2\x51\x13"
+ "\x13\xD2\x96\x68\x69\x10\x67\x0C"
+ "\xD0\x17\x25\x7C\xB2\xAE\x4F\x93"
+ "\xA6\x82\x20\xCF\x0F\xA6\x47\x79"
+ "\x88\x09\x40\x59\xBD\x12\x64\xB5"
+ "\x19\x38\x0D\xFF\x86\xD9\x42\x20"
+ "\x81\x0D\x96\x99\xAF\x22\x1F\x94"
+ "\x5C\x6E\xEC\xEA\xA3\x39\xCB\x09"
+ "\x43\x19\x7F\xD0\xBB\x10\xC2\x49"
+ "\xF7\xE9\xF2\xEE\xBF\xF7\xF8\xB3"
+ "\x0E\x1A\xF1\x8D\x70\x82\x0C\x04"
+ "\xFD\x29\x1A\xAC\xC0\x92\x48\x34"
+ "\x6A\xE3\x1D\x4F\xFC\x1C\x72\x6A"
+ "\x57\xCB\xAD\xD0\x98\xAB\xB1\x01"
+ "\x03\x6A\x45\xDD\x07\x71\x5F\x5B"
+ "\xB5\x4A\xE4\xE5\xB9\xB9\xBC\xAC"
+ "\x44\xF7\x41\xA4\x5F\x2E\xE9\x28"
+ "\xE3\x05\xD2\x94\x78\x4C\x33\x1B"
+ "\xBD\xC1\x6E\x51\xD9\xAD\xD9\x86"
+ "\x15\x4A\x78\xAE\x7B\xAD\x3B\xBC"
+ "\x2F\xE0\x0E\xC5\x7B\x54\x97\x5F"
+ "\x60\x51\x14\x65\xF9\x91\xE9\xDA"
+ "\x9A\xBC\xFC\x19\x29\x67\xAA\x63"
+ "\x5E\xF2\x48\x88\xEB\x79\xE1\xE4"
+ "\xF7\xF6\x4C\xA9\xE2\x8C\x3B\xE0"
+ "\xED\x52\xAE\x90\x8F\x5B\x98\x34"
+ "\x29\x94\x34\x7F\xF9\x6C\x1E\xB6"
+ "\xA4\xE7\x2D\x06\x54\x9D\xC3\x02"
+ "\xC1\x90\xA4\x72\x31\x6B\x24\x51"
+ "\x0B\xB3\x7C\x63\x15\xBA\xAF\x5D"
+ "\x41\xE0\x37\x6D\xBE\x41\x58\xDE"
+ "\xF2\x07\x62\x99\xBE\xC1\x8C\x0F"
+ "\x0F\x28\xFB\x8F\x0E\x1D\x91\xE2"
+ "\xDA\x99\x5C\x49\xBA\x9C\xA8\x86"
+ "\x82\x63\x11\xB3\x54\x49\x00\x08"
+ "\x07\xF2\xE8\x1F\x34\x49\x61\xF4"
+ "\x81\xE9\xF6\xA9\x5A\x28\x60\x1F"
+ "\x66\x99\x08\x06\xF2\xE8\x2D\xD1"
+ "\xD0\x67\xBA\x32\x1F\x02\x86\x7B"
+ "\xFB\x79\x3D\xC5\xB1\x7F\x15\xAF"
+ "\xD7\xBF\x31\x46\x22\x7F\xAE\x5B"
+ "\x8B\x95\x47\xC2\xB1\x62\xA1\xCE"
+ "\x52\xAC\x9C\x8B\xC2\x49\x7F\xBC"
+ "\x9C\x89\xB8\xB6\xCA\xE3\x8F\xEA"
+ "\xAC\xB4\x5D\xE4\x50\xDC\x3A\xB5"
+ "\x91\x04\x94\x99\x03\x3B\x42\x6D"
+ "\x9C\x4A\x02\xF5\xB5\x38\x98\xA8"
+ "\x5C\x97\x2E\x4D\x79\x67\x71\xAF"
+ "\xF0\x70\x77\xFF\x2D\xDA\xA0\x9E"
+ "\x23\x8D\xD6\xA6\x68\x10\x78\x9A"
+ "\x64\xBB\x15\xB8\x56\xCF\xEE\xE5"
+ "\x32\x44\x96\x1C\xD8\xEB\x95\xD2"
+ "\xF3\x71\xEF\xEB\x4E\xBB\x4D\x29",
+ .rlen = 504,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -2665,18 +4950,132 @@ static struct cipher_testvec bf_ctr_enc_tv_template[] = {
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B",
- .ilen = 43,
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92",
+ .ilen = 503,
.result = "\xC7\xA3\xDF\xB9\x05\xF4\x9E\x8D"
"\x9E\xDF\x38\x18\x83\x07\xEF\xC1"
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
"\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC"
- "\x3D\xA7\xE9",
- .rlen = 43,
+ "\x3D\xA7\xE9\x0A\x5C\x70\x4D\xDE"
+ "\x99\x38\x07\xCA\x1D\x21\xC1\x11"
+ "\x97\xEB\x98\x75\xC4\x73\x45\x83"
+ "\x46\x1C\x9C\x91\x87\xC1\xA0\x56"
+ "\x98\xA1\x8B\xDB\x22\x76\xBD\x62"
+ "\xA4\xBC\xE8\x86\xDA\xD2\x51\x13"
+ "\x13\xD2\x96\x68\x69\x10\x67\x0C"
+ "\xD0\x17\x25\x7C\xB2\xAE\x4F\x93"
+ "\xA6\x82\x20\xCF\x0F\xA6\x47\x79"
+ "\x88\x09\x40\x59\xBD\x12\x64\xB5"
+ "\x19\x38\x0D\xFF\x86\xD9\x42\x20"
+ "\x81\x0D\x96\x99\xAF\x22\x1F\x94"
+ "\x5C\x6E\xEC\xEA\xA3\x39\xCB\x09"
+ "\x43\x19\x7F\xD0\xBB\x10\xC2\x49"
+ "\xF7\xE9\xF2\xEE\xBF\xF7\xF8\xB3"
+ "\x0E\x1A\xF1\x8D\x70\x82\x0C\x04"
+ "\xFD\x29\x1A\xAC\xC0\x92\x48\x34"
+ "\x6A\xE3\x1D\x4F\xFC\x1C\x72\x6A"
+ "\x57\xCB\xAD\xD0\x98\xAB\xB1\x01"
+ "\x03\x6A\x45\xDD\x07\x71\x5F\x5B"
+ "\xB5\x4A\xE4\xE5\xB9\xB9\xBC\xAC"
+ "\x44\xF7\x41\xA4\x5F\x2E\xE9\x28"
+ "\xE3\x05\xD2\x94\x78\x4C\x33\x1B"
+ "\xBD\xC1\x6E\x51\xD9\xAD\xD9\x86"
+ "\x15\x4A\x78\xAE\x7B\xAD\x3B\xBC"
+ "\x2F\xE0\x0E\xC5\x7B\x54\x97\x5F"
+ "\x60\x51\x14\x65\xF9\x91\xE9\xDA"
+ "\x9A\xBC\xFC\x19\x29\x67\xAA\x63"
+ "\x5E\xF2\x48\x88\xEB\x79\xE1\xE4"
+ "\xF7\xF6\x4C\xA9\xE2\x8C\x3B\xE0"
+ "\xED\x52\xAE\x90\x8F\x5B\x98\x34"
+ "\x29\x94\x34\x7F\xF9\x6C\x1E\xB6"
+ "\xA4\xE7\x2D\x06\x54\x9D\xC3\x02"
+ "\xC1\x90\xA4\x72\x31\x6B\x24\x51"
+ "\x0B\xB3\x7C\x63\x15\xBA\xAF\x5D"
+ "\x41\xE0\x37\x6D\xBE\x41\x58\xDE"
+ "\xF2\x07\x62\x99\xBE\xC1\x8C\x0F"
+ "\x0F\x28\xFB\x8F\x0E\x1D\x91\xE2"
+ "\xDA\x99\x5C\x49\xBA\x9C\xA8\x86"
+ "\x82\x63\x11\xB3\x54\x49\x00\x08"
+ "\x07\xF2\xE8\x1F\x34\x49\x61\xF4"
+ "\x81\xE9\xF6\xA9\x5A\x28\x60\x1F"
+ "\x66\x99\x08\x06\xF2\xE8\x2D\xD1"
+ "\xD0\x67\xBA\x32\x1F\x02\x86\x7B"
+ "\xFB\x79\x3D\xC5\xB1\x7F\x15\xAF"
+ "\xD7\xBF\x31\x46\x22\x7F\xAE\x5B"
+ "\x8B\x95\x47\xC2\xB1\x62\xA1\xCE"
+ "\x52\xAC\x9C\x8B\xC2\x49\x7F\xBC"
+ "\x9C\x89\xB8\xB6\xCA\xE3\x8F\xEA"
+ "\xAC\xB4\x5D\xE4\x50\xDC\x3A\xB5"
+ "\x91\x04\x94\x99\x03\x3B\x42\x6D"
+ "\x9C\x4A\x02\xF5\xB5\x38\x98\xA8"
+ "\x5C\x97\x2E\x4D\x79\x67\x71\xAF"
+ "\xF0\x70\x77\xFF\x2D\xDA\xA0\x9E"
+ "\x23\x8D\xD6\xA6\x68\x10\x78\x9A"
+ "\x64\xBB\x15\xB8\x56\xCF\xEE\xE5"
+ "\x32\x44\x96\x1C\xD8\xEB\x95\xD2"
+ "\xF3\x71\xEF\xEB\x4E\xBB\x4D",
+ .rlen = 503,
.also_non_np = 1,
.np = 2,
- .tap = { 43 - 8, 8 },
+ .tap = { 503 - 8, 8 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -2827,14 +5226,130 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
"\x9E\xDF\x38\x18\x83\x07\xEF\xC1"
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
- "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC",
- .ilen = 40,
+ "\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC"
+ "\x3D\xA7\xE9\x0A\x5C\x70\x4D\xDE"
+ "\x99\x38\x07\xCA\x1D\x21\xC1\x11"
+ "\x97\xEB\x98\x75\xC4\x73\x45\x83"
+ "\x46\x1C\x9C\x91\x87\xC1\xA0\x56"
+ "\x98\xA1\x8B\xDB\x22\x76\xBD\x62"
+ "\xA4\xBC\xE8\x86\xDA\xD2\x51\x13"
+ "\x13\xD2\x96\x68\x69\x10\x67\x0C"
+ "\xD0\x17\x25\x7C\xB2\xAE\x4F\x93"
+ "\xA6\x82\x20\xCF\x0F\xA6\x47\x79"
+ "\x88\x09\x40\x59\xBD\x12\x64\xB5"
+ "\x19\x38\x0D\xFF\x86\xD9\x42\x20"
+ "\x81\x0D\x96\x99\xAF\x22\x1F\x94"
+ "\x5C\x6E\xEC\xEA\xA3\x39\xCB\x09"
+ "\x43\x19\x7F\xD0\xBB\x10\xC2\x49"
+ "\xF7\xE9\xF2\xEE\xBF\xF7\xF8\xB3"
+ "\x0E\x1A\xF1\x8D\x70\x82\x0C\x04"
+ "\xFD\x29\x1A\xAC\xC0\x92\x48\x34"
+ "\x6A\xE3\x1D\x4F\xFC\x1C\x72\x6A"
+ "\x57\xCB\xAD\xD0\x98\xAB\xB1\x01"
+ "\x03\x6A\x45\xDD\x07\x71\x5F\x5B"
+ "\xB5\x4A\xE4\xE5\xB9\xB9\xBC\xAC"
+ "\x44\xF7\x41\xA4\x5F\x2E\xE9\x28"
+ "\xE3\x05\xD2\x94\x78\x4C\x33\x1B"
+ "\xBD\xC1\x6E\x51\xD9\xAD\xD9\x86"
+ "\x15\x4A\x78\xAE\x7B\xAD\x3B\xBC"
+ "\x2F\xE0\x0E\xC5\x7B\x54\x97\x5F"
+ "\x60\x51\x14\x65\xF9\x91\xE9\xDA"
+ "\x9A\xBC\xFC\x19\x29\x67\xAA\x63"
+ "\x5E\xF2\x48\x88\xEB\x79\xE1\xE4"
+ "\xF7\xF6\x4C\xA9\xE2\x8C\x3B\xE0"
+ "\xED\x52\xAE\x90\x8F\x5B\x98\x34"
+ "\x29\x94\x34\x7F\xF9\x6C\x1E\xB6"
+ "\xA4\xE7\x2D\x06\x54\x9D\xC3\x02"
+ "\xC1\x90\xA4\x72\x31\x6B\x24\x51"
+ "\x0B\xB3\x7C\x63\x15\xBA\xAF\x5D"
+ "\x41\xE0\x37\x6D\xBE\x41\x58\xDE"
+ "\xF2\x07\x62\x99\xBE\xC1\x8C\x0F"
+ "\x0F\x28\xFB\x8F\x0E\x1D\x91\xE2"
+ "\xDA\x99\x5C\x49\xBA\x9C\xA8\x86"
+ "\x82\x63\x11\xB3\x54\x49\x00\x08"
+ "\x07\xF2\xE8\x1F\x34\x49\x61\xF4"
+ "\x81\xE9\xF6\xA9\x5A\x28\x60\x1F"
+ "\x66\x99\x08\x06\xF2\xE8\x2D\xD1"
+ "\xD0\x67\xBA\x32\x1F\x02\x86\x7B"
+ "\xFB\x79\x3D\xC5\xB1\x7F\x15\xAF"
+ "\xD7\xBF\x31\x46\x22\x7F\xAE\x5B"
+ "\x8B\x95\x47\xC2\xB1\x62\xA1\xCE"
+ "\x52\xAC\x9C\x8B\xC2\x49\x7F\xBC"
+ "\x9C\x89\xB8\xB6\xCA\xE3\x8F\xEA"
+ "\xAC\xB4\x5D\xE4\x50\xDC\x3A\xB5"
+ "\x91\x04\x94\x99\x03\x3B\x42\x6D"
+ "\x9C\x4A\x02\xF5\xB5\x38\x98\xA8"
+ "\x5C\x97\x2E\x4D\x79\x67\x71\xAF"
+ "\xF0\x70\x77\xFF\x2D\xDA\xA0\x9E"
+ "\x23\x8D\xD6\xA6\x68\x10\x78\x9A"
+ "\x64\xBB\x15\xB8\x56\xCF\xEE\xE5"
+ "\x32\x44\x96\x1C\xD8\xEB\x95\xD2"
+ "\xF3\x71\xEF\xEB\x4E\xBB\x4D\x29",
+ .ilen = 504,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
- "\x1E\x92\x29\xC0\x34\xCB\x62\xF9",
- .rlen = 40,
+ "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
+ .rlen = 504,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -2847,18 +5362,132 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
"\x93\x3C\xAA\xAA\xFE\x06\x42\xCC"
"\x0D\x70\x86\x5A\x44\xAD\x85\x17"
"\xE4\x1F\x5E\xA5\x89\xAC\x32\xBC"
- "\x3D\xA7\xE9",
- .ilen = 43,
+ "\x3D\xA7\xE9\x0A\x5C\x70\x4D\xDE"
+ "\x99\x38\x07\xCA\x1D\x21\xC1\x11"
+ "\x97\xEB\x98\x75\xC4\x73\x45\x83"
+ "\x46\x1C\x9C\x91\x87\xC1\xA0\x56"
+ "\x98\xA1\x8B\xDB\x22\x76\xBD\x62"
+ "\xA4\xBC\xE8\x86\xDA\xD2\x51\x13"
+ "\x13\xD2\x96\x68\x69\x10\x67\x0C"
+ "\xD0\x17\x25\x7C\xB2\xAE\x4F\x93"
+ "\xA6\x82\x20\xCF\x0F\xA6\x47\x79"
+ "\x88\x09\x40\x59\xBD\x12\x64\xB5"
+ "\x19\x38\x0D\xFF\x86\xD9\x42\x20"
+ "\x81\x0D\x96\x99\xAF\x22\x1F\x94"
+ "\x5C\x6E\xEC\xEA\xA3\x39\xCB\x09"
+ "\x43\x19\x7F\xD0\xBB\x10\xC2\x49"
+ "\xF7\xE9\xF2\xEE\xBF\xF7\xF8\xB3"
+ "\x0E\x1A\xF1\x8D\x70\x82\x0C\x04"
+ "\xFD\x29\x1A\xAC\xC0\x92\x48\x34"
+ "\x6A\xE3\x1D\x4F\xFC\x1C\x72\x6A"
+ "\x57\xCB\xAD\xD0\x98\xAB\xB1\x01"
+ "\x03\x6A\x45\xDD\x07\x71\x5F\x5B"
+ "\xB5\x4A\xE4\xE5\xB9\xB9\xBC\xAC"
+ "\x44\xF7\x41\xA4\x5F\x2E\xE9\x28"
+ "\xE3\x05\xD2\x94\x78\x4C\x33\x1B"
+ "\xBD\xC1\x6E\x51\xD9\xAD\xD9\x86"
+ "\x15\x4A\x78\xAE\x7B\xAD\x3B\xBC"
+ "\x2F\xE0\x0E\xC5\x7B\x54\x97\x5F"
+ "\x60\x51\x14\x65\xF9\x91\xE9\xDA"
+ "\x9A\xBC\xFC\x19\x29\x67\xAA\x63"
+ "\x5E\xF2\x48\x88\xEB\x79\xE1\xE4"
+ "\xF7\xF6\x4C\xA9\xE2\x8C\x3B\xE0"
+ "\xED\x52\xAE\x90\x8F\x5B\x98\x34"
+ "\x29\x94\x34\x7F\xF9\x6C\x1E\xB6"
+ "\xA4\xE7\x2D\x06\x54\x9D\xC3\x02"
+ "\xC1\x90\xA4\x72\x31\x6B\x24\x51"
+ "\x0B\xB3\x7C\x63\x15\xBA\xAF\x5D"
+ "\x41\xE0\x37\x6D\xBE\x41\x58\xDE"
+ "\xF2\x07\x62\x99\xBE\xC1\x8C\x0F"
+ "\x0F\x28\xFB\x8F\x0E\x1D\x91\xE2"
+ "\xDA\x99\x5C\x49\xBA\x9C\xA8\x86"
+ "\x82\x63\x11\xB3\x54\x49\x00\x08"
+ "\x07\xF2\xE8\x1F\x34\x49\x61\xF4"
+ "\x81\xE9\xF6\xA9\x5A\x28\x60\x1F"
+ "\x66\x99\x08\x06\xF2\xE8\x2D\xD1"
+ "\xD0\x67\xBA\x32\x1F\x02\x86\x7B"
+ "\xFB\x79\x3D\xC5\xB1\x7F\x15\xAF"
+ "\xD7\xBF\x31\x46\x22\x7F\xAE\x5B"
+ "\x8B\x95\x47\xC2\xB1\x62\xA1\xCE"
+ "\x52\xAC\x9C\x8B\xC2\x49\x7F\xBC"
+ "\x9C\x89\xB8\xB6\xCA\xE3\x8F\xEA"
+ "\xAC\xB4\x5D\xE4\x50\xDC\x3A\xB5"
+ "\x91\x04\x94\x99\x03\x3B\x42\x6D"
+ "\x9C\x4A\x02\xF5\xB5\x38\x98\xA8"
+ "\x5C\x97\x2E\x4D\x79\x67\x71\xAF"
+ "\xF0\x70\x77\xFF\x2D\xDA\xA0\x9E"
+ "\x23\x8D\xD6\xA6\x68\x10\x78\x9A"
+ "\x64\xBB\x15\xB8\x56\xCF\xEE\xE5"
+ "\x32\x44\x96\x1C\xD8\xEB\x95\xD2"
+ "\xF3\x71\xEF\xEB\x4E\xBB\x4D",
+ .ilen = 503,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B",
- .rlen = 43,
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59\xF0\x64\xFB\x92",
+ .rlen = 503,
.also_non_np = 1,
.np = 2,
- .tap = { 43 - 8, 8 },
+ .tap = { 503 - 8, 8 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -5808,8 +8437,52 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
"\x29\xC0\x57\xEE\x62\xF9\x90\x04"
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
- "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
- .ilen = 144,
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .ilen = 496,
.result = "\xFB\xB0\x5D\xDE\xC0\xFE\xFC\xEB"
"\xB1\x80\x10\x43\xDE\x62\x70\xBD"
"\xFA\x8A\x93\xEA\x6B\xF7\xC5\xD7"
@@ -5827,11 +8500,55 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
"\x9D\x74\x2B\x77\x53\x2D\xE5\xBD"
"\x69\xDA\x7A\x01\xF5\x6A\x70\x39"
"\x30\xD4\x2C\xF2\x8E\x06\x4B\x39"
- "\xB3\x12\x1D\xB3\x17\x46\xE6\xD6",
- .rlen = 144,
+ "\xB3\x12\x1D\xB3\x17\x46\xE6\xD6"
+ "\xB6\x31\x36\x34\x38\x3C\x1D\x69"
+ "\x9F\x47\x28\x9A\x1D\x96\x70\x54"
+ "\x8E\x88\xCB\xE0\xF5\x6A\xAE\x0A"
+ "\x3C\xD5\x93\x1C\x21\xC9\x14\x3A"
+ "\x23\x9C\x9B\x79\xC7\x75\xC8\x39"
+ "\xA6\xAC\x65\x9A\x99\x37\xAF\x6D"
+ "\xBD\xB5\x32\xFD\xD8\x9C\x95\x7B"
+ "\xC6\x6A\x80\x64\xEA\xEF\x6D\x3F"
+ "\xA9\xFE\x5B\x16\xA3\xCF\x32\xC8"
+ "\xEF\x50\x22\x20\x93\x30\xBE\xE2"
+ "\x38\x05\x65\xAF\xBA\xB6\xE4\x72"
+ "\xA9\xEE\x05\x42\x88\xBD\x9D\x49"
+ "\xAD\x93\xCA\x4D\x45\x11\x43\x4D"
+ "\xB8\xF5\x74\x2B\x48\xE7\x21\xE4"
+ "\x4E\x3A\x4C\xDE\x65\x7A\x5A\xAD"
+ "\x86\xE6\x23\xEC\x6B\xA7\x17\xE6"
+ "\xF6\xA1\xAC\x29\xAE\xF9\x9B\x69"
+ "\x73\x65\x65\x51\xD6\x0B\x4E\x8C"
+ "\x17\x15\x9D\xB0\xCF\xB2\x42\x2B"
+ "\x51\xC3\x03\xE8\xB7\x7D\x2D\x39"
+ "\xE8\x10\x93\x16\xC8\x68\x4C\x60"
+ "\x87\x70\x14\xD0\x01\x57\xCB\x42"
+ "\x13\x59\xB1\x7F\x12\x4F\xBB\xC7"
+ "\xBD\x2B\xD4\xA9\x12\x26\x4F\xDE"
+ "\xFD\x72\xEC\xD7\x6F\x97\x14\x90"
+ "\x0E\x37\x13\xE6\x67\x1D\xE5\xFE"
+ "\x9E\x18\x3C\x8F\x3A\x3F\x59\x9B"
+ "\x71\x80\x05\x35\x3F\x40\x0B\x21"
+ "\x76\xE5\xEF\x42\x6C\xDB\x31\x05"
+ "\x5F\x05\xCF\x14\xE3\xF0\x61\xA2"
+ "\x49\x03\x5E\x77\x2E\x20\xBA\xA1"
+ "\xAF\x46\x51\xC0\x2B\xC4\x64\x1E"
+ "\x65\xCC\x51\x58\x0A\xDF\xF0\x5F"
+ "\x75\x9F\x48\xCD\x81\xEC\xC3\xF6"
+ "\xED\xC9\x4B\x7B\x4E\x26\x23\xE1"
+ "\xBB\xE9\x83\x0B\xCF\xE4\xDE\x00"
+ "\x48\xFF\xBF\x6C\xB4\x72\x16\xEF"
+ "\xC7\x46\xEE\x48\x8C\xB8\xAF\x45"
+ "\x91\x76\xE7\x6E\x65\x3D\x15\x86"
+ "\x10\xF8\xDB\x66\x97\x7C\x43\x4D"
+ "\x79\x12\x4E\xCE\x06\xD1\xD1\x6A"
+ "\x34\xC1\xC9\xF2\x28\x4A\xCD\x02"
+ "\x75\x55\x9B\xFF\x36\x73\xAB\x7C"
+ "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 144 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -5946,8 +8663,52 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
"\x9D\x74\x2B\x77\x53\x2D\xE5\xBD"
"\x69\xDA\x7A\x01\xF5\x6A\x70\x39"
"\x30\xD4\x2C\xF2\x8E\x06\x4B\x39"
- "\xB3\x12\x1D\xB3\x17\x46\xE6\xD6",
- .ilen = 144,
+ "\xB3\x12\x1D\xB3\x17\x46\xE6\xD6"
+ "\xB6\x31\x36\x34\x38\x3C\x1D\x69"
+ "\x9F\x47\x28\x9A\x1D\x96\x70\x54"
+ "\x8E\x88\xCB\xE0\xF5\x6A\xAE\x0A"
+ "\x3C\xD5\x93\x1C\x21\xC9\x14\x3A"
+ "\x23\x9C\x9B\x79\xC7\x75\xC8\x39"
+ "\xA6\xAC\x65\x9A\x99\x37\xAF\x6D"
+ "\xBD\xB5\x32\xFD\xD8\x9C\x95\x7B"
+ "\xC6\x6A\x80\x64\xEA\xEF\x6D\x3F"
+ "\xA9\xFE\x5B\x16\xA3\xCF\x32\xC8"
+ "\xEF\x50\x22\x20\x93\x30\xBE\xE2"
+ "\x38\x05\x65\xAF\xBA\xB6\xE4\x72"
+ "\xA9\xEE\x05\x42\x88\xBD\x9D\x49"
+ "\xAD\x93\xCA\x4D\x45\x11\x43\x4D"
+ "\xB8\xF5\x74\x2B\x48\xE7\x21\xE4"
+ "\x4E\x3A\x4C\xDE\x65\x7A\x5A\xAD"
+ "\x86\xE6\x23\xEC\x6B\xA7\x17\xE6"
+ "\xF6\xA1\xAC\x29\xAE\xF9\x9B\x69"
+ "\x73\x65\x65\x51\xD6\x0B\x4E\x8C"
+ "\x17\x15\x9D\xB0\xCF\xB2\x42\x2B"
+ "\x51\xC3\x03\xE8\xB7\x7D\x2D\x39"
+ "\xE8\x10\x93\x16\xC8\x68\x4C\x60"
+ "\x87\x70\x14\xD0\x01\x57\xCB\x42"
+ "\x13\x59\xB1\x7F\x12\x4F\xBB\xC7"
+ "\xBD\x2B\xD4\xA9\x12\x26\x4F\xDE"
+ "\xFD\x72\xEC\xD7\x6F\x97\x14\x90"
+ "\x0E\x37\x13\xE6\x67\x1D\xE5\xFE"
+ "\x9E\x18\x3C\x8F\x3A\x3F\x59\x9B"
+ "\x71\x80\x05\x35\x3F\x40\x0B\x21"
+ "\x76\xE5\xEF\x42\x6C\xDB\x31\x05"
+ "\x5F\x05\xCF\x14\xE3\xF0\x61\xA2"
+ "\x49\x03\x5E\x77\x2E\x20\xBA\xA1"
+ "\xAF\x46\x51\xC0\x2B\xC4\x64\x1E"
+ "\x65\xCC\x51\x58\x0A\xDF\xF0\x5F"
+ "\x75\x9F\x48\xCD\x81\xEC\xC3\xF6"
+ "\xED\xC9\x4B\x7B\x4E\x26\x23\xE1"
+ "\xBB\xE9\x83\x0B\xCF\xE4\xDE\x00"
+ "\x48\xFF\xBF\x6C\xB4\x72\x16\xEF"
+ "\xC7\x46\xEE\x48\x8C\xB8\xAF\x45"
+ "\x91\x76\xE7\x6E\x65\x3D\x15\x86"
+ "\x10\xF8\xDB\x66\x97\x7C\x43\x4D"
+ "\x79\x12\x4E\xCE\x06\xD1\xD1\x6A"
+ "\x34\xC1\xC9\xF2\x28\x4A\xCD\x02"
+ "\x75\x55\x9B\xFF\x36\x73\xAB\x7C"
+ "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7",
+ .ilen = 496,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -5965,11 +8726,55 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
"\x29\xC0\x57\xEE\x62\xF9\x90\x04"
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
- "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
- .rlen = 144,
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 144 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -6040,8 +8845,52 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
"\x29\xC0\x57\xEE\x62\xF9\x90\x04"
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
- "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
- .ilen = 144,
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .ilen = 496,
.result = "\x80\xCF\x11\x41\x1A\xB9\x4B\x9C"
"\xFF\xB7\x6C\xEA\xF0\xAF\x77\x6E"
"\x71\x75\x95\x9D\x4E\x1C\xCF\xAD"
@@ -6059,11 +8908,55 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
"\x15\x26\xE1\xDB\xA4\x3D\x74\xD2"
"\x41\x1E\x3F\xA9\xC6\x7D\x2A\xAB"
"\x27\xDF\x89\x1D\x86\x3E\xF7\x5A"
- "\xF6\xE3\x0F\xC7\x6B\x4C\x96\x7C",
- .rlen = 144,
+ "\xF6\xE3\x0F\xC7\x6B\x4C\x96\x7C"
+ "\x2D\x12\xA5\x05\x92\xCB\xD7\x4A"
+ "\x4D\x1E\x88\x21\xE1\x63\xB4\xFC"
+ "\x4A\xF2\xCD\x35\xB9\xD7\x70\x97"
+ "\x5A\x5E\x7E\x96\x52\x20\xDC\x25"
+ "\xE9\x6B\x36\xB4\xE0\x98\x85\x2C"
+ "\x3C\xD2\xF7\x78\x8A\x73\x26\x9B"
+ "\xAF\x0B\x11\xE8\x4D\x67\x23\xE9"
+ "\x77\xDF\x58\xF6\x6F\x9E\xA4\xC5"
+ "\x10\xA1\x82\x0E\x80\xA0\x8F\x4B"
+ "\xA1\xC0\x12\x54\x4E\xC9\x20\x92"
+ "\x11\x00\x10\x4E\xB3\x7C\xCA\x63"
+ "\xE5\x3F\xD3\x41\x37\xCD\x74\xB7"
+ "\xA5\x7C\x61\xB8\x0B\x7A\x7F\x4D"
+ "\xFE\x96\x7D\x1B\xBE\x60\x37\xB7"
+ "\x81\x92\x66\x67\x15\x1E\x39\x98"
+ "\x52\xC0\xF4\x69\xC0\x99\x4F\x5A"
+ "\x2E\x32\xAD\x7C\x8B\xE9\xAD\x05"
+ "\x55\xF9\x0A\x1F\x97\x5C\xFA\x2B"
+ "\xF4\x99\x76\x3A\x6E\x4D\xE1\x4C"
+ "\x14\x4E\x6F\x87\xEE\x1A\x85\xA3"
+ "\x96\xC6\x66\x49\xDA\x0D\x71\xAC"
+ "\x04\x05\x46\xD3\x90\x0F\x64\x64"
+ "\x01\x66\x2C\x62\x5D\x34\xD1\xCB"
+ "\x3A\x24\xCE\x95\xEF\xAE\x2C\x97"
+ "\x0E\x0C\x1D\x36\x49\xEB\xE9\x3D"
+ "\x62\xA6\x19\x28\x9E\x26\xB4\x3F"
+ "\xD7\x55\x42\x3C\xCD\x72\x0A\xF0"
+ "\x7D\xE9\x95\x45\x86\xED\xB1\xE0"
+ "\x8D\xE9\xC5\x86\x13\x24\x28\x7D"
+ "\x74\xEF\xCA\x50\x12\x7E\x64\x8F"
+ "\x1B\xF5\x5B\xFE\xE2\xAC\xFA\xE7"
+ "\xBD\x38\x8C\x11\x20\xEF\xB1\xAA"
+ "\x7B\xE5\xE5\x78\xAD\x9D\x2D\xA2"
+ "\x8E\xDD\x48\xB3\xEF\x18\x92\x7E"
+ "\xE6\x75\x0D\x54\x64\x11\xA3\x3A"
+ "\xDB\x97\x0F\xD3\xDF\x07\xD3\x7E"
+ "\x1E\xD1\x87\xE4\x74\xBB\x46\xF4"
+ "\xBA\x23\x2D\x8D\x29\x07\x12\xCF"
+ "\x34\xCD\x72\x7F\x01\x30\xE7\xA0"
+ "\xF8\xDD\xA8\x08\xF0\xBC\xB1\xA2"
+ "\xCC\xE1\x6B\x5F\xBE\xEA\xF1\xE4"
+ "\x02\xC4\xAF\xFA\xAD\x31\xF4\xBF"
+ "\xFC\x66\xAA\x37\xF2\x37\x39\x6B"
+ "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 144 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -6093,8 +8986,52 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
"\x15\x26\xE1\xDB\xA4\x3D\x74\xD2"
"\x41\x1E\x3F\xA9\xC6\x7D\x2A\xAB"
"\x27\xDF\x89\x1D\x86\x3E\xF7\x5A"
- "\xF6\xE3\x0F\xC7\x6B\x4C\x96\x7C",
- .ilen = 144,
+ "\xF6\xE3\x0F\xC7\x6B\x4C\x96\x7C"
+ "\x2D\x12\xA5\x05\x92\xCB\xD7\x4A"
+ "\x4D\x1E\x88\x21\xE1\x63\xB4\xFC"
+ "\x4A\xF2\xCD\x35\xB9\xD7\x70\x97"
+ "\x5A\x5E\x7E\x96\x52\x20\xDC\x25"
+ "\xE9\x6B\x36\xB4\xE0\x98\x85\x2C"
+ "\x3C\xD2\xF7\x78\x8A\x73\x26\x9B"
+ "\xAF\x0B\x11\xE8\x4D\x67\x23\xE9"
+ "\x77\xDF\x58\xF6\x6F\x9E\xA4\xC5"
+ "\x10\xA1\x82\x0E\x80\xA0\x8F\x4B"
+ "\xA1\xC0\x12\x54\x4E\xC9\x20\x92"
+ "\x11\x00\x10\x4E\xB3\x7C\xCA\x63"
+ "\xE5\x3F\xD3\x41\x37\xCD\x74\xB7"
+ "\xA5\x7C\x61\xB8\x0B\x7A\x7F\x4D"
+ "\xFE\x96\x7D\x1B\xBE\x60\x37\xB7"
+ "\x81\x92\x66\x67\x15\x1E\x39\x98"
+ "\x52\xC0\xF4\x69\xC0\x99\x4F\x5A"
+ "\x2E\x32\xAD\x7C\x8B\xE9\xAD\x05"
+ "\x55\xF9\x0A\x1F\x97\x5C\xFA\x2B"
+ "\xF4\x99\x76\x3A\x6E\x4D\xE1\x4C"
+ "\x14\x4E\x6F\x87\xEE\x1A\x85\xA3"
+ "\x96\xC6\x66\x49\xDA\x0D\x71\xAC"
+ "\x04\x05\x46\xD3\x90\x0F\x64\x64"
+ "\x01\x66\x2C\x62\x5D\x34\xD1\xCB"
+ "\x3A\x24\xCE\x95\xEF\xAE\x2C\x97"
+ "\x0E\x0C\x1D\x36\x49\xEB\xE9\x3D"
+ "\x62\xA6\x19\x28\x9E\x26\xB4\x3F"
+ "\xD7\x55\x42\x3C\xCD\x72\x0A\xF0"
+ "\x7D\xE9\x95\x45\x86\xED\xB1\xE0"
+ "\x8D\xE9\xC5\x86\x13\x24\x28\x7D"
+ "\x74\xEF\xCA\x50\x12\x7E\x64\x8F"
+ "\x1B\xF5\x5B\xFE\xE2\xAC\xFA\xE7"
+ "\xBD\x38\x8C\x11\x20\xEF\xB1\xAA"
+ "\x7B\xE5\xE5\x78\xAD\x9D\x2D\xA2"
+ "\x8E\xDD\x48\xB3\xEF\x18\x92\x7E"
+ "\xE6\x75\x0D\x54\x64\x11\xA3\x3A"
+ "\xDB\x97\x0F\xD3\xDF\x07\xD3\x7E"
+ "\x1E\xD1\x87\xE4\x74\xBB\x46\xF4"
+ "\xBA\x23\x2D\x8D\x29\x07\x12\xCF"
+ "\x34\xCD\x72\x7F\x01\x30\xE7\xA0"
+ "\xF8\xDD\xA8\x08\xF0\xBC\xB1\xA2"
+ "\xCC\xE1\x6B\x5F\xBE\xEA\xF1\xE4"
+ "\x02\xC4\xAF\xFA\xAD\x31\xF4\xBF"
+ "\xFC\x66\xAA\x37\xF2\x37\x39\x6B"
+ "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1",
+ .ilen = 496,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -6112,11 +9049,55 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
"\x29\xC0\x57\xEE\x62\xF9\x90\x04"
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
- "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
- .rlen = 144,
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 144 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -6146,8 +9127,52 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
"\x29\xC0\x57\xEE\x62\xF9\x90\x04"
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
- "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
- .ilen = 144,
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .ilen = 496,
.result = "\x84\x68\xEC\xF2\x1C\x88\x20\xCA"
"\x37\x69\xE3\x3A\x22\x85\x48\x46"
"\x70\xAA\x25\xB4\xCD\x8B\x04\x4E"
@@ -6165,8 +9190,52 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
"\xBA\x58\x2A\x1C\xDF\xC2\x3A\xA5"
"\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
"\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
- "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9",
- .rlen = 144,
+ "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9"
+ "\xE6\xD0\x97\x81\xDE\xD1\xFB\x8A"
+ "\x30\xDB\xA3\x5D\xEC\x25\x0B\x86"
+ "\x71\xC8\xA7\x67\xE8\xBC\x7D\x4C"
+ "\xAE\x82\xD3\x73\x31\x09\xCB\xB3"
+ "\x4D\xD4\xC0\x8A\x2B\xFA\xA6\x55"
+ "\x39\x0A\xBC\x6E\x75\xAB\xC2\xE2"
+ "\x8A\xF2\x26\xCD\x63\x38\x35\xF7"
+ "\xAE\x12\x83\xCD\x8A\x9E\x7E\x4C"
+ "\xFE\x4D\xD7\xCE\x5C\x6E\x4C\xAF"
+ "\xE3\xCD\x76\xA7\x87\xA1\x54\x7C"
+ "\xEC\x32\xC7\x83\x2A\xFF\xF8\xEA"
+ "\x87\xB2\x47\xA3\x9D\xC2\x9C\xA2"
+ "\xB7\x2C\x7C\x1A\x24\xCB\x88\x61"
+ "\xFF\xA7\x1A\x16\x01\xDD\x4B\xFC"
+ "\x2E\xE0\x48\x67\x09\x42\xCC\x91"
+ "\xBE\x20\x38\xC0\x5E\x3B\x95\x00"
+ "\xA1\x96\x66\x0B\x8A\xE9\x9E\xF7"
+ "\x6B\x34\x0A\x51\xC0\x3B\xEB\x71"
+ "\x07\x97\x38\x4B\x5C\x56\x98\x67"
+ "\x78\x9C\xD0\x0E\x2B\xB5\x67\x90"
+ "\x75\xF8\xFE\x6D\x4E\x85\xCC\x0D"
+ "\x18\x06\x15\x9D\x5A\x10\x13\x37"
+ "\xA3\xD6\x68\xA2\xDF\x7E\xC7\x12"
+ "\xC9\x0D\x4D\x91\xB0\x2A\x55\xFF"
+ "\x6F\x73\x13\xDF\x28\xB5\x2A\x2C"
+ "\xE4\xFC\x20\xD9\xF1\x7A\x82\xB1"
+ "\xCB\x57\xB6\x3D\x8C\xF4\x8E\x27"
+ "\x37\xDC\x35\xF3\x79\x01\x53\xA4"
+ "\x7B\x37\xDE\x7C\x04\xAE\x50\xDB"
+ "\x9B\x1E\x8C\x07\xA7\x52\x49\x50"
+ "\x34\x25\x65\xDD\xA9\x8F\x7E\xBD"
+ "\x7A\xC9\x36\xAE\xDE\x21\x48\x64"
+ "\xC2\x02\xBA\xBE\x11\x1E\x3D\x9C"
+ "\x98\x52\xCC\x04\xBD\x5E\x61\x26"
+ "\x10\xD3\x21\xD9\x6E\x25\x98\x77"
+ "\x8E\x98\x63\xF6\xF6\x52\xFB\x13"
+ "\xAA\x30\xF2\xB9\xA4\x43\x53\x39"
+ "\x1C\x97\x07\x7E\x6B\xFF\x3D\x43"
+ "\xA6\x71\x6B\x66\x8F\x58\x3F\x71"
+ "\x90\x47\x40\x92\xE6\x69\xD1\x96"
+ "\x34\xB3\x3B\xE5\x43\xE4\xD5\x56"
+ "\xB2\xE6\x7E\x86\x7A\x12\x17\x5B"
+ "\x30\xF3\x9B\x0D\xFA\x57\xE4\x50"
+ "\x40\x53\x77\x8C\x15\xF8\x8D\x13",
+ .rlen = 496,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -6193,8 +9262,52 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
"\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
- "\xF1\x65\xFC",
- .ilen = 147,
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59",
+ .ilen = 499,
.result = "\x84\x68\xEC\xF2\x1C\x88\x20\xCA"
"\x37\x69\xE3\x3A\x22\x85\x48\x46"
"\x70\xAA\x25\xB4\xCD\x8B\x04\x4E"
@@ -6213,11 +9326,55 @@ static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
"\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
"\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
"\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9"
- "\xE6\xD0\x97",
- .rlen = 147,
+ "\xE6\xD0\x97\x81\xDE\xD1\xFB\x8A"
+ "\x30\xDB\xA3\x5D\xEC\x25\x0B\x86"
+ "\x71\xC8\xA7\x67\xE8\xBC\x7D\x4C"
+ "\xAE\x82\xD3\x73\x31\x09\xCB\xB3"
+ "\x4D\xD4\xC0\x8A\x2B\xFA\xA6\x55"
+ "\x39\x0A\xBC\x6E\x75\xAB\xC2\xE2"
+ "\x8A\xF2\x26\xCD\x63\x38\x35\xF7"
+ "\xAE\x12\x83\xCD\x8A\x9E\x7E\x4C"
+ "\xFE\x4D\xD7\xCE\x5C\x6E\x4C\xAF"
+ "\xE3\xCD\x76\xA7\x87\xA1\x54\x7C"
+ "\xEC\x32\xC7\x83\x2A\xFF\xF8\xEA"
+ "\x87\xB2\x47\xA3\x9D\xC2\x9C\xA2"
+ "\xB7\x2C\x7C\x1A\x24\xCB\x88\x61"
+ "\xFF\xA7\x1A\x16\x01\xDD\x4B\xFC"
+ "\x2E\xE0\x48\x67\x09\x42\xCC\x91"
+ "\xBE\x20\x38\xC0\x5E\x3B\x95\x00"
+ "\xA1\x96\x66\x0B\x8A\xE9\x9E\xF7"
+ "\x6B\x34\x0A\x51\xC0\x3B\xEB\x71"
+ "\x07\x97\x38\x4B\x5C\x56\x98\x67"
+ "\x78\x9C\xD0\x0E\x2B\xB5\x67\x90"
+ "\x75\xF8\xFE\x6D\x4E\x85\xCC\x0D"
+ "\x18\x06\x15\x9D\x5A\x10\x13\x37"
+ "\xA3\xD6\x68\xA2\xDF\x7E\xC7\x12"
+ "\xC9\x0D\x4D\x91\xB0\x2A\x55\xFF"
+ "\x6F\x73\x13\xDF\x28\xB5\x2A\x2C"
+ "\xE4\xFC\x20\xD9\xF1\x7A\x82\xB1"
+ "\xCB\x57\xB6\x3D\x8C\xF4\x8E\x27"
+ "\x37\xDC\x35\xF3\x79\x01\x53\xA4"
+ "\x7B\x37\xDE\x7C\x04\xAE\x50\xDB"
+ "\x9B\x1E\x8C\x07\xA7\x52\x49\x50"
+ "\x34\x25\x65\xDD\xA9\x8F\x7E\xBD"
+ "\x7A\xC9\x36\xAE\xDE\x21\x48\x64"
+ "\xC2\x02\xBA\xBE\x11\x1E\x3D\x9C"
+ "\x98\x52\xCC\x04\xBD\x5E\x61\x26"
+ "\x10\xD3\x21\xD9\x6E\x25\x98\x77"
+ "\x8E\x98\x63\xF6\xF6\x52\xFB\x13"
+ "\xAA\x30\xF2\xB9\xA4\x43\x53\x39"
+ "\x1C\x97\x07\x7E\x6B\xFF\x3D\x43"
+ "\xA6\x71\x6B\x66\x8F\x58\x3F\x71"
+ "\x90\x47\x40\x92\xE6\x69\xD1\x96"
+ "\x34\xB3\x3B\xE5\x43\xE4\xD5\x56"
+ "\xB2\xE6\x7E\x86\x7A\x12\x17\x5B"
+ "\x30\xF3\x9B\x0D\xFA\x57\xE4\x50"
+ "\x40\x53\x77\x8C\x15\xF8\x8D\x13"
+ "\x38\xE2\xE5",
+ .rlen = 499,
.also_non_np = 1,
.np = 2,
- .tap = { 147 - 16, 16 },
+ .tap = { 499 - 16, 16 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -6381,8 +9538,52 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
"\xBA\x58\x2A\x1C\xDF\xC2\x3A\xA5"
"\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
"\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
- "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9",
- .ilen = 144,
+ "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9"
+ "\xE6\xD0\x97\x81\xDE\xD1\xFB\x8A"
+ "\x30\xDB\xA3\x5D\xEC\x25\x0B\x86"
+ "\x71\xC8\xA7\x67\xE8\xBC\x7D\x4C"
+ "\xAE\x82\xD3\x73\x31\x09\xCB\xB3"
+ "\x4D\xD4\xC0\x8A\x2B\xFA\xA6\x55"
+ "\x39\x0A\xBC\x6E\x75\xAB\xC2\xE2"
+ "\x8A\xF2\x26\xCD\x63\x38\x35\xF7"
+ "\xAE\x12\x83\xCD\x8A\x9E\x7E\x4C"
+ "\xFE\x4D\xD7\xCE\x5C\x6E\x4C\xAF"
+ "\xE3\xCD\x76\xA7\x87\xA1\x54\x7C"
+ "\xEC\x32\xC7\x83\x2A\xFF\xF8\xEA"
+ "\x87\xB2\x47\xA3\x9D\xC2\x9C\xA2"
+ "\xB7\x2C\x7C\x1A\x24\xCB\x88\x61"
+ "\xFF\xA7\x1A\x16\x01\xDD\x4B\xFC"
+ "\x2E\xE0\x48\x67\x09\x42\xCC\x91"
+ "\xBE\x20\x38\xC0\x5E\x3B\x95\x00"
+ "\xA1\x96\x66\x0B\x8A\xE9\x9E\xF7"
+ "\x6B\x34\x0A\x51\xC0\x3B\xEB\x71"
+ "\x07\x97\x38\x4B\x5C\x56\x98\x67"
+ "\x78\x9C\xD0\x0E\x2B\xB5\x67\x90"
+ "\x75\xF8\xFE\x6D\x4E\x85\xCC\x0D"
+ "\x18\x06\x15\x9D\x5A\x10\x13\x37"
+ "\xA3\xD6\x68\xA2\xDF\x7E\xC7\x12"
+ "\xC9\x0D\x4D\x91\xB0\x2A\x55\xFF"
+ "\x6F\x73\x13\xDF\x28\xB5\x2A\x2C"
+ "\xE4\xFC\x20\xD9\xF1\x7A\x82\xB1"
+ "\xCB\x57\xB6\x3D\x8C\xF4\x8E\x27"
+ "\x37\xDC\x35\xF3\x79\x01\x53\xA4"
+ "\x7B\x37\xDE\x7C\x04\xAE\x50\xDB"
+ "\x9B\x1E\x8C\x07\xA7\x52\x49\x50"
+ "\x34\x25\x65\xDD\xA9\x8F\x7E\xBD"
+ "\x7A\xC9\x36\xAE\xDE\x21\x48\x64"
+ "\xC2\x02\xBA\xBE\x11\x1E\x3D\x9C"
+ "\x98\x52\xCC\x04\xBD\x5E\x61\x26"
+ "\x10\xD3\x21\xD9\x6E\x25\x98\x77"
+ "\x8E\x98\x63\xF6\xF6\x52\xFB\x13"
+ "\xAA\x30\xF2\xB9\xA4\x43\x53\x39"
+ "\x1C\x97\x07\x7E\x6B\xFF\x3D\x43"
+ "\xA6\x71\x6B\x66\x8F\x58\x3F\x71"
+ "\x90\x47\x40\x92\xE6\x69\xD1\x96"
+ "\x34\xB3\x3B\xE5\x43\xE4\xD5\x56"
+ "\xB2\xE6\x7E\x86\x7A\x12\x17\x5B"
+ "\x30\xF3\x9B\x0D\xFA\x57\xE4\x50"
+ "\x40\x53\x77\x8C\x15\xF8\x8D\x13",
+ .ilen = 496,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -6400,8 +9601,52 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
"\x29\xC0\x57\xEE\x62\xF9\x90\x04"
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
- "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
- .rlen = 144,
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .rlen = 496,
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -6428,8 +9673,52 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
"\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
"\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
"\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9"
- "\xE6\xD0\x97",
- .ilen = 147,
+ "\xE6\xD0\x97\x81\xDE\xD1\xFB\x8A"
+ "\x30\xDB\xA3\x5D\xEC\x25\x0B\x86"
+ "\x71\xC8\xA7\x67\xE8\xBC\x7D\x4C"
+ "\xAE\x82\xD3\x73\x31\x09\xCB\xB3"
+ "\x4D\xD4\xC0\x8A\x2B\xFA\xA6\x55"
+ "\x39\x0A\xBC\x6E\x75\xAB\xC2\xE2"
+ "\x8A\xF2\x26\xCD\x63\x38\x35\xF7"
+ "\xAE\x12\x83\xCD\x8A\x9E\x7E\x4C"
+ "\xFE\x4D\xD7\xCE\x5C\x6E\x4C\xAF"
+ "\xE3\xCD\x76\xA7\x87\xA1\x54\x7C"
+ "\xEC\x32\xC7\x83\x2A\xFF\xF8\xEA"
+ "\x87\xB2\x47\xA3\x9D\xC2\x9C\xA2"
+ "\xB7\x2C\x7C\x1A\x24\xCB\x88\x61"
+ "\xFF\xA7\x1A\x16\x01\xDD\x4B\xFC"
+ "\x2E\xE0\x48\x67\x09\x42\xCC\x91"
+ "\xBE\x20\x38\xC0\x5E\x3B\x95\x00"
+ "\xA1\x96\x66\x0B\x8A\xE9\x9E\xF7"
+ "\x6B\x34\x0A\x51\xC0\x3B\xEB\x71"
+ "\x07\x97\x38\x4B\x5C\x56\x98\x67"
+ "\x78\x9C\xD0\x0E\x2B\xB5\x67\x90"
+ "\x75\xF8\xFE\x6D\x4E\x85\xCC\x0D"
+ "\x18\x06\x15\x9D\x5A\x10\x13\x37"
+ "\xA3\xD6\x68\xA2\xDF\x7E\xC7\x12"
+ "\xC9\x0D\x4D\x91\xB0\x2A\x55\xFF"
+ "\x6F\x73\x13\xDF\x28\xB5\x2A\x2C"
+ "\xE4\xFC\x20\xD9\xF1\x7A\x82\xB1"
+ "\xCB\x57\xB6\x3D\x8C\xF4\x8E\x27"
+ "\x37\xDC\x35\xF3\x79\x01\x53\xA4"
+ "\x7B\x37\xDE\x7C\x04\xAE\x50\xDB"
+ "\x9B\x1E\x8C\x07\xA7\x52\x49\x50"
+ "\x34\x25\x65\xDD\xA9\x8F\x7E\xBD"
+ "\x7A\xC9\x36\xAE\xDE\x21\x48\x64"
+ "\xC2\x02\xBA\xBE\x11\x1E\x3D\x9C"
+ "\x98\x52\xCC\x04\xBD\x5E\x61\x26"
+ "\x10\xD3\x21\xD9\x6E\x25\x98\x77"
+ "\x8E\x98\x63\xF6\xF6\x52\xFB\x13"
+ "\xAA\x30\xF2\xB9\xA4\x43\x53\x39"
+ "\x1C\x97\x07\x7E\x6B\xFF\x3D\x43"
+ "\xA6\x71\x6B\x66\x8F\x58\x3F\x71"
+ "\x90\x47\x40\x92\xE6\x69\xD1\x96"
+ "\x34\xB3\x3B\xE5\x43\xE4\xD5\x56"
+ "\xB2\xE6\x7E\x86\x7A\x12\x17\x5B"
+ "\x30\xF3\x9B\x0D\xFA\x57\xE4\x50"
+ "\x40\x53\x77\x8C\x15\xF8\x8D\x13"
+ "\x38\xE2\xE5",
+ .ilen = 499,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
@@ -6448,11 +9737,55 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
"\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
"\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
"\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
- "\xF1\x65\xFC",
- .rlen = 147,
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59",
+ .rlen = 499,
.also_non_np = 1,
.np = 2,
- .tap = { 147 - 16, 16 },
+ .tap = { 499 - 16, 16 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -9326,10 +12659,10 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
/*
* AES test vectors.
*/
-#define AES_ENC_TEST_VECTORS 3
-#define AES_DEC_TEST_VECTORS 3
-#define AES_CBC_ENC_TEST_VECTORS 4
-#define AES_CBC_DEC_TEST_VECTORS 4
+#define AES_ENC_TEST_VECTORS 4
+#define AES_DEC_TEST_VECTORS 4
+#define AES_CBC_ENC_TEST_VECTORS 5
+#define AES_CBC_DEC_TEST_VECTORS 5
#define HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS 7
#define HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS 7
#define HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS 7
@@ -9337,8 +12670,8 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
#define AES_LRW_DEC_TEST_VECTORS 8
#define AES_XTS_ENC_TEST_VECTORS 5
#define AES_XTS_DEC_TEST_VECTORS 5
-#define AES_CTR_ENC_TEST_VECTORS 3
-#define AES_CTR_DEC_TEST_VECTORS 3
+#define AES_CTR_ENC_TEST_VECTORS 5
+#define AES_CTR_DEC_TEST_VECTORS 5
#define AES_OFB_ENC_TEST_VECTORS 1
#define AES_OFB_DEC_TEST_VECTORS 1
#define AES_CTR_3686_ENC_TEST_VECTORS 7
@@ -9386,6 +12719,141 @@ static struct cipher_testvec aes_enc_tv_template[] = {
.result = "\x8e\xa2\xb7\xca\x51\x67\x45\xbf"
"\xea\xfc\x49\x90\x4b\x49\x60\x89",
.rlen = 16,
+ }, { /* Generated with Crypto++ */
+ .key = "\xA6\xC9\x83\xA6\xC9\xEC\x0F\x32"
+ "\x55\x0F\x32\x55\x78\x9B\xBE\x78"
+ "\x9B\xBE\xE1\x04\x27\xE1\x04\x27"
+ "\x4A\x6D\x90\x4A\x6D\x90\xB3\xD6",
+ .klen = 32,
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
+ .ilen = 496,
+ .result = "\x71\x73\xF7\xDB\x24\x93\x21\x6D"
+ "\x61\x1E\xBB\x63\x42\x79\xDB\x64"
+ "\x6F\x82\xC0\xCA\xA3\x9B\xFA\x0B"
+ "\xD9\x08\xC7\x4A\x90\xAE\x8F\x5F"
+ "\x5E\x06\xF0\x5F\x31\x51\x18\x37"
+ "\x45\xD7\xCA\x3A\xFD\x6C\x3F\xE1"
+ "\xDD\x8D\x22\x65\x2B\x00\x50\xCE"
+ "\xBA\x28\x67\xD7\xCE\x0E\x0D\xEA"
+ "\x78\x69\x7F\xAE\x8F\x8B\x69\x37"
+ "\x75\xE0\xDC\x96\xE0\xB7\xF4\x09"
+ "\xCB\x6D\xA2\xFB\xDA\xAF\x09\xF8"
+ "\x81\x82\x27\xFA\x45\x9C\x29\xA4"
+ "\x22\x8B\x78\x69\x5B\x46\xF9\x39"
+ "\x1B\xCC\xF9\x1D\x09\xEB\xBC\x5C"
+ "\x41\x72\x51\x97\x1D\x07\x49\xA0"
+ "\x1B\x8E\x65\x4B\xB2\x6A\x12\x03"
+ "\x6A\x60\x95\xAC\xBD\xAC\x1A\x64"
+ "\xDE\x5A\xA5\xF0\x83\x2F\xCB\xCA"
+ "\x22\x74\xA6\x6C\x9B\x73\xCE\x3F"
+ "\xE1\x8B\x22\x17\x59\x0C\x47\x89"
+ "\x33\xA1\xD6\x47\x03\x19\x4F\xA8"
+ "\x67\x69\xF0\x5B\xF0\x20\xAD\x06"
+ "\x27\x81\x92\xD8\xC5\xBA\x98\x12"
+ "\xBE\x24\xB5\x2F\x75\x02\xC2\xAD"
+ "\x12\x2F\x07\x32\xEE\x39\xAF\x64"
+ "\x05\x8F\xB3\xD4\xEB\x1B\x46\x6E"
+ "\xD9\x21\xF9\xC4\xB7\xC9\x45\x68"
+ "\xB4\xA1\x74\x9F\x82\x47\xEB\xCC"
+ "\xBD\x0A\x14\x95\x0F\x8B\xA8\x2F"
+ "\x4B\x1B\xA7\xBF\x82\xA6\x43\x0C"
+ "\xB9\x39\x4A\xA8\x10\x6F\x50\x7B"
+ "\x25\xFB\x26\x81\xE0\x2F\xF0\x96"
+ "\x8D\x8B\xAC\x92\x0F\xF6\xED\x64"
+ "\x63\x29\x4C\x8E\x18\x13\xC5\xBF"
+ "\xFC\xA0\xD9\xBF\x7C\x3A\x0E\x29"
+ "\x6F\xD1\x6C\x6F\xA5\xDA\xBF\xB1"
+ "\x30\xEA\x44\x2D\xC3\x8F\x16\xE1"
+ "\x66\xFA\xA3\x21\x3E\xFC\x13\xCA"
+ "\xF0\xF6\xF0\x59\xBD\x8F\x38\x50"
+ "\x31\xCB\x69\x3F\x96\x15\xD6\xF5"
+ "\xAE\xFF\xF6\xAA\x41\x85\x4C\x10"
+ "\x58\xE3\xF9\x44\xE6\x28\xDA\x9A"
+ "\xDC\x6A\x80\x34\x73\x97\x1B\xC5"
+ "\xCA\x26\x16\x77\x0E\x60\xAB\x89"
+ "\x0F\x04\x27\xBD\xCE\x3E\x71\xB4"
+ "\xA0\xD7\x22\x7E\xDB\xEB\x24\x70"
+ "\x42\x71\x51\x78\x70\xB3\xE0\x3D"
+ "\x84\x8E\x8D\x7B\xD0\x6D\xEA\x92"
+ "\x11\x08\x42\x4F\xE5\xAD\x26\x92"
+ "\xD2\x00\xAE\xA8\xE3\x4B\x37\x47"
+ "\x22\xC1\x95\xC1\x63\x7F\xCB\x03"
+ "\xF3\xE3\xD7\x9D\x60\xC7\xBC\xEA"
+ "\x35\xA2\xFD\x45\x52\x39\x13\x6F"
+ "\xC1\x53\xF3\x53\xDF\x33\x84\xD7"
+ "\xD2\xC8\x37\xB0\x75\xE3\x41\x46"
+ "\xB3\xC7\x83\x2E\x8A\xBB\xA4\xE5"
+ "\x7F\x3C\xFD\x8B\xEB\xEA\x63\xBD"
+ "\xB7\x46\xE7\xBF\x09\x9C\x0D\x0F"
+ "\x40\x86\x7F\x51\xE1\x11\x9C\xCB"
+ "\x88\xE6\x68\x47\xE3\x2B\xC5\xFF"
+ "\x09\x79\xA0\x43\x5C\x0D\x08\x58"
+ "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
},
};
@@ -9423,6 +12891,141 @@ static struct cipher_testvec aes_dec_tv_template[] = {
.result = "\x00\x11\x22\x33\x44\x55\x66\x77"
"\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
.rlen = 16,
+ }, { /* Generated with Crypto++ */
+ .key = "\xA6\xC9\x83\xA6\xC9\xEC\x0F\x32"
+ "\x55\x0F\x32\x55\x78\x9B\xBE\x78"
+ "\x9B\xBE\xE1\x04\x27\xE1\x04\x27"
+ "\x4A\x6D\x90\x4A\x6D\x90\xB3\xD6",
+ .klen = 32,
+ .input = "\x71\x73\xF7\xDB\x24\x93\x21\x6D"
+ "\x61\x1E\xBB\x63\x42\x79\xDB\x64"
+ "\x6F\x82\xC0\xCA\xA3\x9B\xFA\x0B"
+ "\xD9\x08\xC7\x4A\x90\xAE\x8F\x5F"
+ "\x5E\x06\xF0\x5F\x31\x51\x18\x37"
+ "\x45\xD7\xCA\x3A\xFD\x6C\x3F\xE1"
+ "\xDD\x8D\x22\x65\x2B\x00\x50\xCE"
+ "\xBA\x28\x67\xD7\xCE\x0E\x0D\xEA"
+ "\x78\x69\x7F\xAE\x8F\x8B\x69\x37"
+ "\x75\xE0\xDC\x96\xE0\xB7\xF4\x09"
+ "\xCB\x6D\xA2\xFB\xDA\xAF\x09\xF8"
+ "\x81\x82\x27\xFA\x45\x9C\x29\xA4"
+ "\x22\x8B\x78\x69\x5B\x46\xF9\x39"
+ "\x1B\xCC\xF9\x1D\x09\xEB\xBC\x5C"
+ "\x41\x72\x51\x97\x1D\x07\x49\xA0"
+ "\x1B\x8E\x65\x4B\xB2\x6A\x12\x03"
+ "\x6A\x60\x95\xAC\xBD\xAC\x1A\x64"
+ "\xDE\x5A\xA5\xF0\x83\x2F\xCB\xCA"
+ "\x22\x74\xA6\x6C\x9B\x73\xCE\x3F"
+ "\xE1\x8B\x22\x17\x59\x0C\x47\x89"
+ "\x33\xA1\xD6\x47\x03\x19\x4F\xA8"
+ "\x67\x69\xF0\x5B\xF0\x20\xAD\x06"
+ "\x27\x81\x92\xD8\xC5\xBA\x98\x12"
+ "\xBE\x24\xB5\x2F\x75\x02\xC2\xAD"
+ "\x12\x2F\x07\x32\xEE\x39\xAF\x64"
+ "\x05\x8F\xB3\xD4\xEB\x1B\x46\x6E"
+ "\xD9\x21\xF9\xC4\xB7\xC9\x45\x68"
+ "\xB4\xA1\x74\x9F\x82\x47\xEB\xCC"
+ "\xBD\x0A\x14\x95\x0F\x8B\xA8\x2F"
+ "\x4B\x1B\xA7\xBF\x82\xA6\x43\x0C"
+ "\xB9\x39\x4A\xA8\x10\x6F\x50\x7B"
+ "\x25\xFB\x26\x81\xE0\x2F\xF0\x96"
+ "\x8D\x8B\xAC\x92\x0F\xF6\xED\x64"
+ "\x63\x29\x4C\x8E\x18\x13\xC5\xBF"
+ "\xFC\xA0\xD9\xBF\x7C\x3A\x0E\x29"
+ "\x6F\xD1\x6C\x6F\xA5\xDA\xBF\xB1"
+ "\x30\xEA\x44\x2D\xC3\x8F\x16\xE1"
+ "\x66\xFA\xA3\x21\x3E\xFC\x13\xCA"
+ "\xF0\xF6\xF0\x59\xBD\x8F\x38\x50"
+ "\x31\xCB\x69\x3F\x96\x15\xD6\xF5"
+ "\xAE\xFF\xF6\xAA\x41\x85\x4C\x10"
+ "\x58\xE3\xF9\x44\xE6\x28\xDA\x9A"
+ "\xDC\x6A\x80\x34\x73\x97\x1B\xC5"
+ "\xCA\x26\x16\x77\x0E\x60\xAB\x89"
+ "\x0F\x04\x27\xBD\xCE\x3E\x71\xB4"
+ "\xA0\xD7\x22\x7E\xDB\xEB\x24\x70"
+ "\x42\x71\x51\x78\x70\xB3\xE0\x3D"
+ "\x84\x8E\x8D\x7B\xD0\x6D\xEA\x92"
+ "\x11\x08\x42\x4F\xE5\xAD\x26\x92"
+ "\xD2\x00\xAE\xA8\xE3\x4B\x37\x47"
+ "\x22\xC1\x95\xC1\x63\x7F\xCB\x03"
+ "\xF3\xE3\xD7\x9D\x60\xC7\xBC\xEA"
+ "\x35\xA2\xFD\x45\x52\x39\x13\x6F"
+ "\xC1\x53\xF3\x53\xDF\x33\x84\xD7"
+ "\xD2\xC8\x37\xB0\x75\xE3\x41\x46"
+ "\xB3\xC7\x83\x2E\x8A\xBB\xA4\xE5"
+ "\x7F\x3C\xFD\x8B\xEB\xEA\x63\xBD"
+ "\xB7\x46\xE7\xBF\x09\x9C\x0D\x0F"
+ "\x40\x86\x7F\x51\xE1\x11\x9C\xCB"
+ "\x88\xE6\x68\x47\xE3\x2B\xC5\xFF"
+ "\x09\x79\xA0\x43\x5C\x0D\x08\x58"
+ "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9",
+ .ilen = 496,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
},
};
@@ -9505,6 +13108,143 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
"\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
"\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
.rlen = 64,
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
+ "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
+ "\xBE\xE1\x04\x27\xE1\x04\x27\x4A"
+ "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9",
+ .klen = 32,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
+ "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42",
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
+ .ilen = 496,
+ .result = "\xEA\x65\x8A\x19\xB0\x66\xC1\x3F"
+ "\xCE\xF1\x97\x75\xC1\xFD\xB5\xAF"
+ "\x52\x65\xF7\xFF\xBC\xD8\x2D\x9F"
+ "\x2F\xB9\x26\x9B\x6F\x10\xB7\xB8"
+ "\x26\xA1\x02\x46\xA2\xAD\xC6\xC0"
+ "\x11\x15\xFF\x6D\x1E\x82\x04\xA6"
+ "\xB1\x74\xD1\x08\x13\xFD\x90\x7C"
+ "\xF5\xED\xD3\xDB\x5A\x0A\x0C\x2F"
+ "\x0A\x70\xF1\x88\x07\xCF\x21\x26"
+ "\x40\x40\x8A\xF5\x53\xF7\x24\x4F"
+ "\x83\x38\x43\x5F\x08\x99\xEB\xE3"
+ "\xDC\x02\x64\x67\x50\x6E\x15\xC3"
+ "\x01\x1A\xA0\x81\x13\x65\xA6\x73"
+ "\x71\xA6\x3B\x91\x83\x77\xBE\xFA"
+ "\xDB\x71\x73\xA6\xC1\xAE\x43\xC3"
+ "\x36\xCE\xD6\xEB\xF9\x30\x1C\x4F"
+ "\x80\x38\x5E\x9C\x6E\xAB\x98\x2F"
+ "\x53\xAF\xCF\xC8\x9A\xB8\x86\x43"
+ "\x3E\x86\xE7\xA1\xF4\x2F\x30\x40"
+ "\x03\xA8\x6C\x50\x42\x9F\x77\x59"
+ "\x89\xA0\xC5\xEC\x9A\xB8\xDD\x99"
+ "\x16\x24\x02\x07\x48\xAE\xF2\x31"
+ "\x34\x0E\xC3\x85\xFE\x1C\x95\x99"
+ "\x87\x58\x98\x8B\xE7\xC6\xC5\x70"
+ "\x73\x81\x07\x7C\x56\x2F\xD8\x1B"
+ "\xB7\xB9\x2B\xAB\xE3\x01\x87\x0F"
+ "\xD8\xBB\xC0\x0D\xAC\x2C\x2F\x98"
+ "\x3C\x0B\xA2\x99\x4A\x8C\xF7\x04"
+ "\xE0\xE0\xCF\xD1\x81\x5B\xFE\xF5"
+ "\x24\x04\xFD\xB8\xDF\x13\xD8\xCD"
+ "\xF1\xE3\x3D\x98\x50\x02\x77\x9E"
+ "\xBC\x22\xAB\xFA\xC2\x43\x1F\x66"
+ "\x20\x02\x23\xDA\xDF\xA0\x89\xF6"
+ "\xD8\xF3\x45\x24\x53\x6F\x16\x77"
+ "\x02\x3E\x7B\x36\x5F\xA0\x3B\x78"
+ "\x63\xA2\xBD\xB5\xA4\xCA\x1E\xD3"
+ "\x57\xBC\x0B\x9F\x43\x51\x28\x4F"
+ "\x07\x50\x6C\x68\x12\x07\xCF\xFA"
+ "\x6B\x72\x0B\xEB\xF8\x88\x90\x2C"
+ "\x7E\xF5\x91\xD1\x03\xD8\xD5\xBD"
+ "\x22\x39\x7B\x16\x03\x01\x69\xAF"
+ "\x3D\x38\x66\x28\x0C\xBE\x5B\xC5"
+ "\x03\xB4\x2F\x51\x8A\x56\x17\x2B"
+ "\x88\x42\x6D\x40\x68\x8F\xD0\x11"
+ "\x19\xF9\x1F\x43\x79\x95\x31\xFA"
+ "\x28\x7A\x3D\xF7\x66\xEB\xEF\xAC"
+ "\x06\xB2\x01\xAD\xDB\x68\xDB\xEC"
+ "\x8D\x53\x6E\x72\x68\xA3\xC7\x63"
+ "\x43\x2B\x78\xE0\x04\x29\x8F\x72"
+ "\xB2\x2C\xE6\x84\x03\x30\x6D\xCD"
+ "\x26\x92\x37\xE1\x2F\xBB\x8B\x9D"
+ "\xE4\x4C\xF6\x93\xBC\xD9\xAD\x44"
+ "\x52\x65\xC7\xB0\x0E\x3F\x0E\x61"
+ "\x56\x5D\x1C\x6D\xA7\x05\x2E\xBC"
+ "\x58\x08\x15\xAB\x12\xAB\x17\x4A"
+ "\x5E\x1C\xF2\xCD\xB8\xA2\xAE\xFB"
+ "\x9B\x2E\x0E\x85\x34\x80\x0E\x3F"
+ "\x4C\xB8\xDB\xCE\x1C\x90\xA1\x61"
+ "\x6C\x69\x09\x35\x9E\xD4\xF4\xAD"
+ "\xBC\x06\x41\xE3\x01\xB4\x4E\x0A"
+ "\xE0\x1F\x91\xF8\x82\x96\x2D\x65"
+ "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
},
};
@@ -9587,6 +13327,143 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.rlen = 64,
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
+ "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
+ "\xBE\xE1\x04\x27\xE1\x04\x27\x4A"
+ "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9",
+ .klen = 32,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
+ "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42",
+ .input = "\xEA\x65\x8A\x19\xB0\x66\xC1\x3F"
+ "\xCE\xF1\x97\x75\xC1\xFD\xB5\xAF"
+ "\x52\x65\xF7\xFF\xBC\xD8\x2D\x9F"
+ "\x2F\xB9\x26\x9B\x6F\x10\xB7\xB8"
+ "\x26\xA1\x02\x46\xA2\xAD\xC6\xC0"
+ "\x11\x15\xFF\x6D\x1E\x82\x04\xA6"
+ "\xB1\x74\xD1\x08\x13\xFD\x90\x7C"
+ "\xF5\xED\xD3\xDB\x5A\x0A\x0C\x2F"
+ "\x0A\x70\xF1\x88\x07\xCF\x21\x26"
+ "\x40\x40\x8A\xF5\x53\xF7\x24\x4F"
+ "\x83\x38\x43\x5F\x08\x99\xEB\xE3"
+ "\xDC\x02\x64\x67\x50\x6E\x15\xC3"
+ "\x01\x1A\xA0\x81\x13\x65\xA6\x73"
+ "\x71\xA6\x3B\x91\x83\x77\xBE\xFA"
+ "\xDB\x71\x73\xA6\xC1\xAE\x43\xC3"
+ "\x36\xCE\xD6\xEB\xF9\x30\x1C\x4F"
+ "\x80\x38\x5E\x9C\x6E\xAB\x98\x2F"
+ "\x53\xAF\xCF\xC8\x9A\xB8\x86\x43"
+ "\x3E\x86\xE7\xA1\xF4\x2F\x30\x40"
+ "\x03\xA8\x6C\x50\x42\x9F\x77\x59"
+ "\x89\xA0\xC5\xEC\x9A\xB8\xDD\x99"
+ "\x16\x24\x02\x07\x48\xAE\xF2\x31"
+ "\x34\x0E\xC3\x85\xFE\x1C\x95\x99"
+ "\x87\x58\x98\x8B\xE7\xC6\xC5\x70"
+ "\x73\x81\x07\x7C\x56\x2F\xD8\x1B"
+ "\xB7\xB9\x2B\xAB\xE3\x01\x87\x0F"
+ "\xD8\xBB\xC0\x0D\xAC\x2C\x2F\x98"
+ "\x3C\x0B\xA2\x99\x4A\x8C\xF7\x04"
+ "\xE0\xE0\xCF\xD1\x81\x5B\xFE\xF5"
+ "\x24\x04\xFD\xB8\xDF\x13\xD8\xCD"
+ "\xF1\xE3\x3D\x98\x50\x02\x77\x9E"
+ "\xBC\x22\xAB\xFA\xC2\x43\x1F\x66"
+ "\x20\x02\x23\xDA\xDF\xA0\x89\xF6"
+ "\xD8\xF3\x45\x24\x53\x6F\x16\x77"
+ "\x02\x3E\x7B\x36\x5F\xA0\x3B\x78"
+ "\x63\xA2\xBD\xB5\xA4\xCA\x1E\xD3"
+ "\x57\xBC\x0B\x9F\x43\x51\x28\x4F"
+ "\x07\x50\x6C\x68\x12\x07\xCF\xFA"
+ "\x6B\x72\x0B\xEB\xF8\x88\x90\x2C"
+ "\x7E\xF5\x91\xD1\x03\xD8\xD5\xBD"
+ "\x22\x39\x7B\x16\x03\x01\x69\xAF"
+ "\x3D\x38\x66\x28\x0C\xBE\x5B\xC5"
+ "\x03\xB4\x2F\x51\x8A\x56\x17\x2B"
+ "\x88\x42\x6D\x40\x68\x8F\xD0\x11"
+ "\x19\xF9\x1F\x43\x79\x95\x31\xFA"
+ "\x28\x7A\x3D\xF7\x66\xEB\xEF\xAC"
+ "\x06\xB2\x01\xAD\xDB\x68\xDB\xEC"
+ "\x8D\x53\x6E\x72\x68\xA3\xC7\x63"
+ "\x43\x2B\x78\xE0\x04\x29\x8F\x72"
+ "\xB2\x2C\xE6\x84\x03\x30\x6D\xCD"
+ "\x26\x92\x37\xE1\x2F\xBB\x8B\x9D"
+ "\xE4\x4C\xF6\x93\xBC\xD9\xAD\x44"
+ "\x52\x65\xC7\xB0\x0E\x3F\x0E\x61"
+ "\x56\x5D\x1C\x6D\xA7\x05\x2E\xBC"
+ "\x58\x08\x15\xAB\x12\xAB\x17\x4A"
+ "\x5E\x1C\xF2\xCD\xB8\xA2\xAE\xFB"
+ "\x9B\x2E\x0E\x85\x34\x80\x0E\x3F"
+ "\x4C\xB8\xDB\xCE\x1C\x90\xA1\x61"
+ "\x6C\x69\x09\x35\x9E\xD4\xF4\xAD"
+ "\xBC\x06\x41\xE3\x01\xB4\x4E\x0A"
+ "\xE0\x1F\x91\xF8\x82\x96\x2D\x65"
+ "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02",
+ .ilen = 496,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
},
};
@@ -11134,8 +15011,6 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
.klen = 64,
.iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- "\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -11478,8 +15353,6 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
.klen = 64,
.iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- "\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x1c\x3b\x3a\x10\x2f\x77\x03\x86"
"\xe4\x83\x6c\x99\xe3\x70\xcf\x9b"
"\xea\x00\x80\x3f\x5e\x48\x23\x57"
@@ -11693,7 +15566,283 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
"\xdf\xc9\xc5\x8d\xb6\x7a\xad\xa6"
"\x13\xc2\xdd\x08\x45\x79\x41\xa6",
.rlen = 64,
- }
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
+ "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
+ "\xBE\xE1\x04\x27\xE1\x04\x27\x4A"
+ "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9",
+ .klen = 32,
+ .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+ "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
+ .ilen = 496,
+ .result = "\x04\xF3\xD3\x88\x17\xEF\xDC\xEF"
+ "\x8B\x04\xF8\x3A\x66\x8D\x1A\x53"
+ "\x57\x1F\x4B\x23\xE4\xA0\xAF\xF9"
+ "\x69\x95\x35\x98\x8D\x4D\x8C\xC1"
+ "\xF0\xB2\x7F\x80\xBB\x54\x28\xA2"
+ "\x7A\x1B\x9F\x77\xEC\x0E\x6E\xDE"
+ "\xF0\xEC\xB8\xE4\x20\x62\xEE\xDB"
+ "\x5D\xF5\xDD\xE3\x54\xFC\xDD\xEB"
+ "\x6A\xEE\x65\xA1\x21\xD6\xD7\x81"
+ "\x47\x61\x12\x4D\xC2\x8C\xFA\x78"
+ "\x1F\x28\x02\x01\xC3\xFC\x1F\xEC"
+ "\x0F\x10\x4F\xB3\x12\x45\xC6\x3B"
+ "\x7E\x08\xF9\x5A\xD0\x5D\x73\x2D"
+ "\x58\xA4\xE5\xCB\x1C\xB4\xCE\x74"
+ "\x32\x41\x1F\x31\x9C\x08\xA2\x5D"
+ "\x67\xEB\x72\x1D\xF8\xE7\x70\x54"
+ "\x34\x4B\x31\x69\x84\x66\x96\x44"
+ "\x56\xCC\x1E\xD9\xE6\x13\x6A\xB9"
+ "\x2D\x0A\x05\x45\x2D\x90\xCC\xDF"
+ "\x16\x5C\x5F\x79\x34\x52\x54\xFE"
+ "\xFE\xCD\xAD\x04\x2E\xAD\x86\x06"
+ "\x1F\x37\xE8\x28\xBC\xD3\x8F\x5B"
+ "\x92\x66\x87\x3B\x8A\x0A\x1A\xCC"
+ "\x6E\xAB\x9F\x0B\xFA\x5C\xE6\xFD"
+ "\x3C\x98\x08\x12\xEC\xAA\x9E\x11"
+ "\xCA\xB2\x1F\xCE\x5E\x5B\xB2\x72"
+ "\x9C\xCC\x5D\xC5\xE0\x32\xC0\x56"
+ "\xD5\x45\x16\xD2\xAF\x13\x66\xF7"
+ "\x8C\x67\xAC\x79\xB2\xAF\x56\x27"
+ "\x3F\xCC\xFE\xCB\x1E\xC0\x75\xF1"
+ "\xA7\xC9\xC3\x1D\x8E\xDD\xF9\xD4"
+ "\x42\xC8\x21\x08\x16\xF7\x01\xD7"
+ "\xAC\x8E\x3F\x1D\x56\xC1\x06\xE4"
+ "\x9C\x62\xD6\xA5\x6A\x50\x44\xB3"
+ "\x35\x1C\x82\xB9\x10\xF9\x42\xA1"
+ "\xFC\x74\x9B\x44\x4F\x25\x02\xE3"
+ "\x08\xF5\xD4\x32\x39\x08\x11\xE8"
+ "\xD2\x6B\x50\x53\xD4\x08\xD1\x6B"
+ "\x3A\x4A\x68\x7B\x7C\xCD\x46\x5E"
+ "\x0D\x07\x19\xDB\x67\xD7\x98\x91"
+ "\xD7\x17\x10\x9B\x7B\x8A\x9B\x33"
+ "\xAE\xF3\x00\xA6\xD4\x15\xD9\xEA"
+ "\x85\x99\x22\xE8\x91\x38\x70\x83"
+ "\x93\x01\x24\x6C\xFA\x9A\xB9\x07"
+ "\xEA\x8D\x3B\xD9\x2A\x43\x59\x16"
+ "\x2F\x69\xEE\x84\x36\x44\x76\x98"
+ "\xF3\x04\x2A\x7C\x74\x3D\x29\x2B"
+ "\x0D\xAD\x8F\x44\x82\x9E\x57\x8D"
+ "\xAC\xED\x18\x1F\x50\xA4\xF5\x98"
+ "\x1F\xBD\x92\x91\x1B\x2D\xA6\xD6"
+ "\xD2\xE3\x02\xAA\x92\x3B\xC6\xB3"
+ "\x1B\x39\x72\xD5\x26\xCA\x04\xE0"
+ "\xFC\x58\x78\xBB\xB1\x3F\xA1\x9C"
+ "\x42\x24\x3E\x2E\x22\xBB\x4B\xBA"
+ "\xF4\x52\x0A\xE6\xAE\x47\xB4\x7D"
+ "\x1D\xA8\xBE\x81\x1A\x75\xDA\xAC"
+ "\xA6\x25\x1E\xEF\x3A\xC0\x6C\x63"
+ "\xEF\xDC\xC9\x79\x10\x26\xE8\x61"
+ "\x29\xFC\xA4\x05\xDF\x7D\x5C\x63"
+ "\x10\x09\x9B\x46\x9B\xF2\x2C\x2B"
+ "\xFA\x3A\x05\x4C\xFA\xD1\xFF\xFE"
+ "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
+ "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
+ "\xBE\xE1\x04\x27\xE1\x04\x27\x4A"
+ "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9",
+ .klen = 32,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
+ "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42",
+ .input = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12"
+ "\x7B\xE4\x4D",
+ .ilen = 499,
+ .result = "\xDA\x4E\x3F\xBC\xE8\xB6\x3A\xA2"
+ "\xD5\x4D\x84\x4A\xA9\x0C\xE1\xA5"
+ "\xB8\x73\xBC\xF9\xBB\x59\x2F\x44"
+ "\x8B\xAB\x82\x6C\xB4\x32\x9A\xDE"
+ "\x5A\x0B\xDB\x7A\x6B\xF2\x38\x9F"
+ "\x06\xF7\xF7\xFF\xFF\xC0\x8A\x2E"
+ "\x76\xEA\x06\x32\x23\xF3\x59\x2E"
+ "\x75\xDE\x71\x86\x3C\x98\x23\x44"
+ "\x5B\xF2\xFA\x6A\x00\xBB\xC1\xAD"
+ "\x58\xBD\x3E\x6F\x2E\xB4\x19\x04"
+ "\x70\x8B\x92\x55\x23\xE9\x6A\x3A"
+ "\x78\x7A\x1B\x10\x85\x52\x9C\x12"
+ "\xE4\x55\x81\x21\xCE\x53\xD0\x3B"
+ "\x63\x77\x2C\x74\xD1\xF5\x60\xF3"
+ "\xA1\xDE\x44\x3C\x8F\x4D\x2F\xDD"
+ "\x8A\xFE\x3C\x42\x8E\xD3\xF2\x8E"
+ "\xA8\x28\x69\x65\x31\xE1\x45\x83"
+ "\xE4\x49\xC4\x9C\xA7\x28\xAA\x21"
+ "\xCD\x5D\x0F\x15\xB7\x93\x07\x26"
+ "\xB0\x65\x6D\x91\x90\x23\x7A\xC6"
+ "\xDB\x68\xB0\xA1\x8E\xA4\x76\x4E"
+ "\xC6\x91\x83\x20\x92\x4D\x63\x7A"
+ "\x45\x18\x18\x74\x19\xAD\x71\x01"
+ "\x6B\x23\xAD\x9D\x4E\xE4\x6E\x46"
+ "\xC9\x73\x7A\xF9\x02\x95\xF4\x07"
+ "\x0E\x7A\xA6\xC5\xAE\xFA\x15\x2C"
+ "\x51\x71\xF1\xDC\x22\xB6\xAC\xD8"
+ "\x19\x24\x44\xBC\x0C\xFB\x3C\x2D"
+ "\xB1\x50\x47\x15\x0E\xDB\xB6\xD7"
+ "\xE8\x61\xE5\x95\x52\x1E\x3E\x49"
+ "\x70\xE9\x66\x04\x4C\xE1\xAF\xBD"
+ "\xDD\x15\x3B\x20\x59\x24\xFF\xB0"
+ "\x39\xAA\xE7\xBF\x23\xA3\x6E\xD5"
+ "\x15\xF0\x61\x4F\xAE\x89\x10\x58"
+ "\x5A\x33\x95\x52\x2A\xB5\x77\x9C"
+ "\xA5\x43\x80\x40\x27\x2D\xAE\xD9"
+ "\x3F\xE0\x80\x94\x78\x79\xCB\x7E"
+ "\xAD\x12\x44\x4C\xEC\x27\xB0\xEE"
+ "\x0B\x05\x2A\x82\x99\x58\xBB\x7A"
+ "\x8D\x6D\x9D\x8E\xE2\x8E\xE7\x93"
+ "\x2F\xB3\x09\x8D\x06\xD5\xEE\x70"
+ "\x16\xAE\x35\xC5\x52\x0F\x46\x1F"
+ "\x71\xF9\x5E\xF2\x67\xDC\x98\x2F"
+ "\xA3\x23\xAA\xD5\xD0\x49\xF4\xA6"
+ "\xF6\xB8\x32\xCD\xD6\x85\x73\x60"
+ "\x59\x20\xE7\x55\x0E\x91\xE2\x0C"
+ "\x3F\x1C\xEB\x3D\xDF\x52\x64\xF2"
+ "\x7D\x8B\x5D\x63\x16\xB9\xB2\x5D"
+ "\x5E\xAB\xB2\x97\xAB\x78\x44\xE7"
+ "\xC6\x72\x20\xC5\x90\x9B\xDC\x5D"
+ "\xB0\xEF\x44\xEF\x87\x31\x8D\xF4"
+ "\xFB\x81\x5D\xF7\x96\x96\xD4\x50"
+ "\x89\xA7\xF6\xB9\x67\x76\x40\x9E"
+ "\x9D\x40\xD5\x2C\x30\xB8\x01\x8F"
+ "\xE4\x7B\x71\x48\xA9\xA0\xA0\x1D"
+ "\x87\x52\xA4\x91\xA9\xD7\xA9\x51"
+ "\xD9\x59\xF7\xCC\x63\x22\xC1\x8D"
+ "\x84\x7B\xD8\x22\x32\x5C\x6F\x1D"
+ "\x6E\x9F\xFA\xDD\x49\x40\xDC\x37"
+ "\x14\x8C\xE1\x80\x1B\xDD\x36\x2A"
+ "\xD0\xE9\x54\x99\x5D\xBA\x3B\x11"
+ "\xD8\xFE\xC9\x5B\x5C\x25\xE5\x76"
+ "\xFB\xF2\x3F",
+ .rlen = 499,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 499 - 16, 16 },
+ },
};
static struct cipher_testvec aes_ctr_dec_tv_template[] = {
@@ -11772,7 +15921,283 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
"\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
"\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
.rlen = 64,
- }
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
+ "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
+ "\xBE\xE1\x04\x27\xE1\x04\x27\x4A"
+ "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9",
+ .klen = 32,
+ .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+ "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD",
+ .input = "\x04\xF3\xD3\x88\x17\xEF\xDC\xEF"
+ "\x8B\x04\xF8\x3A\x66\x8D\x1A\x53"
+ "\x57\x1F\x4B\x23\xE4\xA0\xAF\xF9"
+ "\x69\x95\x35\x98\x8D\x4D\x8C\xC1"
+ "\xF0\xB2\x7F\x80\xBB\x54\x28\xA2"
+ "\x7A\x1B\x9F\x77\xEC\x0E\x6E\xDE"
+ "\xF0\xEC\xB8\xE4\x20\x62\xEE\xDB"
+ "\x5D\xF5\xDD\xE3\x54\xFC\xDD\xEB"
+ "\x6A\xEE\x65\xA1\x21\xD6\xD7\x81"
+ "\x47\x61\x12\x4D\xC2\x8C\xFA\x78"
+ "\x1F\x28\x02\x01\xC3\xFC\x1F\xEC"
+ "\x0F\x10\x4F\xB3\x12\x45\xC6\x3B"
+ "\x7E\x08\xF9\x5A\xD0\x5D\x73\x2D"
+ "\x58\xA4\xE5\xCB\x1C\xB4\xCE\x74"
+ "\x32\x41\x1F\x31\x9C\x08\xA2\x5D"
+ "\x67\xEB\x72\x1D\xF8\xE7\x70\x54"
+ "\x34\x4B\x31\x69\x84\x66\x96\x44"
+ "\x56\xCC\x1E\xD9\xE6\x13\x6A\xB9"
+ "\x2D\x0A\x05\x45\x2D\x90\xCC\xDF"
+ "\x16\x5C\x5F\x79\x34\x52\x54\xFE"
+ "\xFE\xCD\xAD\x04\x2E\xAD\x86\x06"
+ "\x1F\x37\xE8\x28\xBC\xD3\x8F\x5B"
+ "\x92\x66\x87\x3B\x8A\x0A\x1A\xCC"
+ "\x6E\xAB\x9F\x0B\xFA\x5C\xE6\xFD"
+ "\x3C\x98\x08\x12\xEC\xAA\x9E\x11"
+ "\xCA\xB2\x1F\xCE\x5E\x5B\xB2\x72"
+ "\x9C\xCC\x5D\xC5\xE0\x32\xC0\x56"
+ "\xD5\x45\x16\xD2\xAF\x13\x66\xF7"
+ "\x8C\x67\xAC\x79\xB2\xAF\x56\x27"
+ "\x3F\xCC\xFE\xCB\x1E\xC0\x75\xF1"
+ "\xA7\xC9\xC3\x1D\x8E\xDD\xF9\xD4"
+ "\x42\xC8\x21\x08\x16\xF7\x01\xD7"
+ "\xAC\x8E\x3F\x1D\x56\xC1\x06\xE4"
+ "\x9C\x62\xD6\xA5\x6A\x50\x44\xB3"
+ "\x35\x1C\x82\xB9\x10\xF9\x42\xA1"
+ "\xFC\x74\x9B\x44\x4F\x25\x02\xE3"
+ "\x08\xF5\xD4\x32\x39\x08\x11\xE8"
+ "\xD2\x6B\x50\x53\xD4\x08\xD1\x6B"
+ "\x3A\x4A\x68\x7B\x7C\xCD\x46\x5E"
+ "\x0D\x07\x19\xDB\x67\xD7\x98\x91"
+ "\xD7\x17\x10\x9B\x7B\x8A\x9B\x33"
+ "\xAE\xF3\x00\xA6\xD4\x15\xD9\xEA"
+ "\x85\x99\x22\xE8\x91\x38\x70\x83"
+ "\x93\x01\x24\x6C\xFA\x9A\xB9\x07"
+ "\xEA\x8D\x3B\xD9\x2A\x43\x59\x16"
+ "\x2F\x69\xEE\x84\x36\x44\x76\x98"
+ "\xF3\x04\x2A\x7C\x74\x3D\x29\x2B"
+ "\x0D\xAD\x8F\x44\x82\x9E\x57\x8D"
+ "\xAC\xED\x18\x1F\x50\xA4\xF5\x98"
+ "\x1F\xBD\x92\x91\x1B\x2D\xA6\xD6"
+ "\xD2\xE3\x02\xAA\x92\x3B\xC6\xB3"
+ "\x1B\x39\x72\xD5\x26\xCA\x04\xE0"
+ "\xFC\x58\x78\xBB\xB1\x3F\xA1\x9C"
+ "\x42\x24\x3E\x2E\x22\xBB\x4B\xBA"
+ "\xF4\x52\x0A\xE6\xAE\x47\xB4\x7D"
+ "\x1D\xA8\xBE\x81\x1A\x75\xDA\xAC"
+ "\xA6\x25\x1E\xEF\x3A\xC0\x6C\x63"
+ "\xEF\xDC\xC9\x79\x10\x26\xE8\x61"
+ "\x29\xFC\xA4\x05\xDF\x7D\x5C\x63"
+ "\x10\x09\x9B\x46\x9B\xF2\x2C\x2B"
+ "\xFA\x3A\x05\x4C\xFA\xD1\xFF\xFE"
+ "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51",
+ .ilen = 496,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
+ .rlen = 496,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 496 - 16, 16 },
+ }, { /* Generated with Crypto++ */
+ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
+ "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
+ "\xBE\xE1\x04\x27\xE1\x04\x27\x4A"
+ "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9",
+ .klen = 32,
+ .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47"
+ "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42",
+ .input = "\xDA\x4E\x3F\xBC\xE8\xB6\x3A\xA2"
+ "\xD5\x4D\x84\x4A\xA9\x0C\xE1\xA5"
+ "\xB8\x73\xBC\xF9\xBB\x59\x2F\x44"
+ "\x8B\xAB\x82\x6C\xB4\x32\x9A\xDE"
+ "\x5A\x0B\xDB\x7A\x6B\xF2\x38\x9F"
+ "\x06\xF7\xF7\xFF\xFF\xC0\x8A\x2E"
+ "\x76\xEA\x06\x32\x23\xF3\x59\x2E"
+ "\x75\xDE\x71\x86\x3C\x98\x23\x44"
+ "\x5B\xF2\xFA\x6A\x00\xBB\xC1\xAD"
+ "\x58\xBD\x3E\x6F\x2E\xB4\x19\x04"
+ "\x70\x8B\x92\x55\x23\xE9\x6A\x3A"
+ "\x78\x7A\x1B\x10\x85\x52\x9C\x12"
+ "\xE4\x55\x81\x21\xCE\x53\xD0\x3B"
+ "\x63\x77\x2C\x74\xD1\xF5\x60\xF3"
+ "\xA1\xDE\x44\x3C\x8F\x4D\x2F\xDD"
+ "\x8A\xFE\x3C\x42\x8E\xD3\xF2\x8E"
+ "\xA8\x28\x69\x65\x31\xE1\x45\x83"
+ "\xE4\x49\xC4\x9C\xA7\x28\xAA\x21"
+ "\xCD\x5D\x0F\x15\xB7\x93\x07\x26"
+ "\xB0\x65\x6D\x91\x90\x23\x7A\xC6"
+ "\xDB\x68\xB0\xA1\x8E\xA4\x76\x4E"
+ "\xC6\x91\x83\x20\x92\x4D\x63\x7A"
+ "\x45\x18\x18\x74\x19\xAD\x71\x01"
+ "\x6B\x23\xAD\x9D\x4E\xE4\x6E\x46"
+ "\xC9\x73\x7A\xF9\x02\x95\xF4\x07"
+ "\x0E\x7A\xA6\xC5\xAE\xFA\x15\x2C"
+ "\x51\x71\xF1\xDC\x22\xB6\xAC\xD8"
+ "\x19\x24\x44\xBC\x0C\xFB\x3C\x2D"
+ "\xB1\x50\x47\x15\x0E\xDB\xB6\xD7"
+ "\xE8\x61\xE5\x95\x52\x1E\x3E\x49"
+ "\x70\xE9\x66\x04\x4C\xE1\xAF\xBD"
+ "\xDD\x15\x3B\x20\x59\x24\xFF\xB0"
+ "\x39\xAA\xE7\xBF\x23\xA3\x6E\xD5"
+ "\x15\xF0\x61\x4F\xAE\x89\x10\x58"
+ "\x5A\x33\x95\x52\x2A\xB5\x77\x9C"
+ "\xA5\x43\x80\x40\x27\x2D\xAE\xD9"
+ "\x3F\xE0\x80\x94\x78\x79\xCB\x7E"
+ "\xAD\x12\x44\x4C\xEC\x27\xB0\xEE"
+ "\x0B\x05\x2A\x82\x99\x58\xBB\x7A"
+ "\x8D\x6D\x9D\x8E\xE2\x8E\xE7\x93"
+ "\x2F\xB3\x09\x8D\x06\xD5\xEE\x70"
+ "\x16\xAE\x35\xC5\x52\x0F\x46\x1F"
+ "\x71\xF9\x5E\xF2\x67\xDC\x98\x2F"
+ "\xA3\x23\xAA\xD5\xD0\x49\xF4\xA6"
+ "\xF6\xB8\x32\xCD\xD6\x85\x73\x60"
+ "\x59\x20\xE7\x55\x0E\x91\xE2\x0C"
+ "\x3F\x1C\xEB\x3D\xDF\x52\x64\xF2"
+ "\x7D\x8B\x5D\x63\x16\xB9\xB2\x5D"
+ "\x5E\xAB\xB2\x97\xAB\x78\x44\xE7"
+ "\xC6\x72\x20\xC5\x90\x9B\xDC\x5D"
+ "\xB0\xEF\x44\xEF\x87\x31\x8D\xF4"
+ "\xFB\x81\x5D\xF7\x96\x96\xD4\x50"
+ "\x89\xA7\xF6\xB9\x67\x76\x40\x9E"
+ "\x9D\x40\xD5\x2C\x30\xB8\x01\x8F"
+ "\xE4\x7B\x71\x48\xA9\xA0\xA0\x1D"
+ "\x87\x52\xA4\x91\xA9\xD7\xA9\x51"
+ "\xD9\x59\xF7\xCC\x63\x22\xC1\x8D"
+ "\x84\x7B\xD8\x22\x32\x5C\x6F\x1D"
+ "\x6E\x9F\xFA\xDD\x49\x40\xDC\x37"
+ "\x14\x8C\xE1\x80\x1B\xDD\x36\x2A"
+ "\xD0\xE9\x54\x99\x5D\xBA\x3B\x11"
+ "\xD8\xFE\xC9\x5B\x5C\x25\xE5\x76"
+ "\xFB\xF2\x3F",
+ .ilen = 499,
+ .result = "\x50\xB9\x22\xAE\x17\x80\x0C\x75"
+ "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03"
+ "\x6C\xF8\x61\xCA\x33\xBF\x28\x91"
+ "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F"
+ "\xAB\x14\x7D\x09\x72\xDB\x44\xD0"
+ "\x39\xA2\x0B\x97\x00\x69\xF5\x5E"
+ "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC"
+ "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A"
+ "\x06\x6F\xD8\x41\xCD\x36\x9F\x08"
+ "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9"
+ "\x22\x8B\x17\x80\xE9\x52\xDE\x47"
+ "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5"
+ "\x3E\xCA\x33\x9C\x05\x91\xFA\x63"
+ "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14"
+ "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2"
+ "\x0B\x74\x00\x69\xD2\x3B\xC7\x30"
+ "\x99\x02\x8E\xF7\x60\xEC\x55\xBE"
+ "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C"
+ "\xD8\x41\xAA\x13\x9F\x08\x71\xFD"
+ "\x66\xCF\x38\xC4\x2D\x96\x22\x8B"
+ "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19"
+ "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7"
+ "\x10\x9C\x05\x6E\xFA\x63\xCC\x35"
+ "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6"
+ "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74"
+ "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02"
+ "\x6B\xF7\x60\xC9\x32\xBE\x27\x90"
+ "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E"
+ "\xAA\x13\x7C\x08\x71\xDA\x43\xCF"
+ "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D"
+ "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB"
+ "\x54\xE0\x49\xB2\x1B\xA7\x10\x79"
+ "\x05\x6E\xD7\x40\xCC\x35\x9E\x07"
+ "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8"
+ "\x21\x8A\x16\x7F\xE8\x51\xDD\x46"
+ "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4"
+ "\x3D\xC9\x32\x9B\x04\x90\xF9\x62"
+ "\xEE\x57\xC0\x29\xB5\x1E\x87\x13"
+ "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1"
+ "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F"
+ "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD"
+ "\x26\xB2\x1B\x84\x10\x79\xE2\x4B"
+ "\xD7\x40\xA9\x12\x9E\x07\x70\xFC"
+ "\x65\xCE\x37\xC3\x2C\x95\x21\x8A"
+ "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18"
+ "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6"
+ "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34"
+ "\xC0\x29\x92\x1E\x87\xF0\x59\xE5"
+ "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73"
+ "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01"
+ "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F"
+ "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D"
+ "\xA9\x12\x7B\x07\x70\xD9\x42\xCE"
+ "\x37\xA0\x09\x95\xFE\x67\xF3\x5C"
+ "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA"
+ "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78"
+ "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06"
+ "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7"
+ "\x20\x89\x15\x7E\xE7\x50\xDC\x45"
+ "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3"
+ "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61"
+ "\xED\x56\xBF\x28\xB4\x1D\x86\x12"
+ "\x7B\xE4\x4D",
+ .rlen = 499,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 499 - 16, 16 },
+ },
};
static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
@@ -16291,8 +20716,7 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
.result = "\x9a\xcc\x23\x7d\xff\x16\xd7\x6c"
"\x20\xef\x7c\x91\x9e\x3a\x75\x09",
.rlen = 16,
- },
- { /* Generated with Crypto++ */
+ }, { /* Generated with Crypto++ */
.key = "\x3F\x85\x62\x3F\x1C\xF9\xD6\x1C"
"\xF9\xD6\xB3\x90\x6D\x4A\x90\x6D"
"\x4A\x27\x04\xE1\x27\x04\xE1\xBE"
@@ -16303,18 +20727,130 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48",
- .ilen = 48,
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .ilen = 496,
.result = "\xED\xCD\xDB\xB8\x68\xCE\xBD\xEA"
"\x9D\x9D\xCD\x9F\x4F\xFC\x4D\xB7"
"\xA5\xFF\x6F\x43\x0F\xBA\x32\x04"
"\xB3\xC2\xB9\x03\xAA\x91\x56\x29"
"\x0D\xD0\xFD\xC4\x65\xA5\x69\xB9"
- "\xF1\xF6\xB1\xA5\xB2\x75\x4F\x8A",
- .rlen = 48,
+ "\xF1\xF6\xB1\xA5\xB2\x75\x4F\x8A"
+ "\x8D\x7D\x1B\x9B\xC7\x68\x72\xF8"
+ "\x01\x9B\x17\x0A\x29\xE7\x61\x28"
+ "\x7F\xA7\x50\xCA\x20\x2C\x96\x3B"
+ "\x6E\x5C\x5D\x3F\xB5\x7F\xF3\x2B"
+ "\x04\xEF\x9D\xD4\xCE\x41\x28\x8E"
+ "\x83\x54\xAE\x7C\x82\x46\x10\xC9"
+ "\xC4\x8A\x1E\x1F\x4C\xA9\xFC\xEC"
+ "\x3C\x8C\x30\xFC\x59\xD2\x54\xC4"
+ "\x6F\x50\xC6\xCA\x8C\x14\x5B\x9C"
+ "\x18\x56\x5B\xF8\x33\x0E\x4A\xDB"
+ "\xEC\xB5\x6E\x5B\x31\xC4\x0E\x98"
+ "\x9F\x32\xBA\xA2\x18\xCF\x55\x43"
+ "\xFE\x80\x8F\x60\xCF\x05\x30\x9B"
+ "\x70\x50\x1E\x9C\x08\x87\xE6\x20"
+ "\xD2\xF3\x27\xF8\x2A\x8D\x12\xB2"
+ "\xBC\x5F\xFE\x52\x52\xF6\x7F\xB6"
+ "\xB8\x30\x86\x3B\x0F\x94\x1E\x79"
+ "\x13\x94\x35\xA2\xB1\x35\x5B\x05"
+ "\x2A\x98\x6B\x96\x4C\xB1\x20\xBE"
+ "\xB6\x14\xC2\x06\xBF\xFD\x5F\x2A"
+ "\xF5\x33\xC8\x19\x45\x14\x44\x5D"
+ "\xFE\x94\x7B\xBB\x63\x13\x57\xC3"
+ "\x2A\x8F\x6C\x11\x2A\x07\xA7\x6A"
+ "\xBF\x20\xD3\x99\xC6\x00\x0B\xBF"
+ "\x83\x46\x25\x3A\xB0\xF6\xC5\xC8"
+ "\x00\xCA\xE5\x28\x4A\x7C\x95\x9C"
+ "\x7B\x43\xAB\xF9\xE4\xF8\x74\xAB"
+ "\xA7\xB8\x9C\x0F\x53\x7B\xB6\x74"
+ "\x60\x64\x0D\x1C\x80\xD1\x20\x9E"
+ "\xDC\x14\x27\x9B\xFC\xBD\x5C\x96"
+ "\xD2\x51\xDC\x96\xEE\xE5\xEA\x2B"
+ "\x02\x7C\xAA\x3C\xDC\x9D\x7B\x01"
+ "\x20\xC3\xE1\x0B\xDD\xAB\xF3\x1E"
+ "\x19\xA8\x84\x29\x5F\xCC\xC3\x5B"
+ "\xE4\x33\x59\xDC\x12\xEB\x2B\x4D"
+ "\x5B\x55\x23\xB7\x40\x31\xDE\xEE"
+ "\x18\xC9\x3C\x4D\xBC\xED\xE0\x42"
+ "\xAD\xDE\xA0\xA3\xC3\xFE\x44\xD3"
+ "\xE1\x9A\xDA\xAB\x32\xFC\x1A\xBF"
+ "\x63\xA9\xF0\x6A\x08\x46\xBD\x48"
+ "\x83\x06\xAB\x82\x99\x01\x16\x1A"
+ "\x03\x36\xC5\x59\x6B\xB8\x8C\x9F"
+ "\xC6\x51\x3D\xE5\x7F\xBF\xAB\xBC"
+ "\xC9\xA1\x88\x34\x5F\xA9\x7C\x3B"
+ "\x9F\x1B\x98\x2B\x4F\xFB\x9B\xF0"
+ "\xCD\xB6\x45\xB2\x29\x2E\x34\x23"
+ "\xA9\x97\xC0\x22\x8C\x42\x9B\x5F"
+ "\x40\xC8\xD7\x3D\x82\x9A\x6F\xAA"
+ "\x74\x83\x29\x05\xE8\xC4\x4D\x01"
+ "\xB5\xE5\x84\x3F\x7F\xD3\xE0\x99"
+ "\xDA\xE7\x6F\x30\xFD\xAA\x92\x30"
+ "\xA5\x46\x8B\xA2\xE6\x58\x62\x7C"
+ "\x2C\x35\x1B\x38\x85\x7D\xE8\xF3"
+ "\x87\x4F\xDA\xD8\x5F\xFC\xB6\x44"
+ "\xD0\xE3\x9B\x8B\xBF\xD6\xB8\xC4"
+ "\x73\xAE\x1D\x8B\x5B\x74\x8B\xCB",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 48 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -16352,8 +20888,7 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
.result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
"\xfe\xdc\xba\x98\x76\x54\x32\x10",
.rlen = 16,
- },
- { /* Generated with Crypto++ */
+ }, { /* Generated with Crypto++ */
.key = "\x3F\x85\x62\x3F\x1C\xF9\xD6\x1C"
"\xF9\xD6\xB3\x90\x6D\x4A\x90\x6D"
"\x4A\x27\x04\xE1\x27\x04\xE1\xBE"
@@ -16364,18 +20899,130 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
"\xA5\xFF\x6F\x43\x0F\xBA\x32\x04"
"\xB3\xC2\xB9\x03\xAA\x91\x56\x29"
"\x0D\xD0\xFD\xC4\x65\xA5\x69\xB9"
- "\xF1\xF6\xB1\xA5\xB2\x75\x4F\x8A",
- .ilen = 48,
+ "\xF1\xF6\xB1\xA5\xB2\x75\x4F\x8A"
+ "\x8D\x7D\x1B\x9B\xC7\x68\x72\xF8"
+ "\x01\x9B\x17\x0A\x29\xE7\x61\x28"
+ "\x7F\xA7\x50\xCA\x20\x2C\x96\x3B"
+ "\x6E\x5C\x5D\x3F\xB5\x7F\xF3\x2B"
+ "\x04\xEF\x9D\xD4\xCE\x41\x28\x8E"
+ "\x83\x54\xAE\x7C\x82\x46\x10\xC9"
+ "\xC4\x8A\x1E\x1F\x4C\xA9\xFC\xEC"
+ "\x3C\x8C\x30\xFC\x59\xD2\x54\xC4"
+ "\x6F\x50\xC6\xCA\x8C\x14\x5B\x9C"
+ "\x18\x56\x5B\xF8\x33\x0E\x4A\xDB"
+ "\xEC\xB5\x6E\x5B\x31\xC4\x0E\x98"
+ "\x9F\x32\xBA\xA2\x18\xCF\x55\x43"
+ "\xFE\x80\x8F\x60\xCF\x05\x30\x9B"
+ "\x70\x50\x1E\x9C\x08\x87\xE6\x20"
+ "\xD2\xF3\x27\xF8\x2A\x8D\x12\xB2"
+ "\xBC\x5F\xFE\x52\x52\xF6\x7F\xB6"
+ "\xB8\x30\x86\x3B\x0F\x94\x1E\x79"
+ "\x13\x94\x35\xA2\xB1\x35\x5B\x05"
+ "\x2A\x98\x6B\x96\x4C\xB1\x20\xBE"
+ "\xB6\x14\xC2\x06\xBF\xFD\x5F\x2A"
+ "\xF5\x33\xC8\x19\x45\x14\x44\x5D"
+ "\xFE\x94\x7B\xBB\x63\x13\x57\xC3"
+ "\x2A\x8F\x6C\x11\x2A\x07\xA7\x6A"
+ "\xBF\x20\xD3\x99\xC6\x00\x0B\xBF"
+ "\x83\x46\x25\x3A\xB0\xF6\xC5\xC8"
+ "\x00\xCA\xE5\x28\x4A\x7C\x95\x9C"
+ "\x7B\x43\xAB\xF9\xE4\xF8\x74\xAB"
+ "\xA7\xB8\x9C\x0F\x53\x7B\xB6\x74"
+ "\x60\x64\x0D\x1C\x80\xD1\x20\x9E"
+ "\xDC\x14\x27\x9B\xFC\xBD\x5C\x96"
+ "\xD2\x51\xDC\x96\xEE\xE5\xEA\x2B"
+ "\x02\x7C\xAA\x3C\xDC\x9D\x7B\x01"
+ "\x20\xC3\xE1\x0B\xDD\xAB\xF3\x1E"
+ "\x19\xA8\x84\x29\x5F\xCC\xC3\x5B"
+ "\xE4\x33\x59\xDC\x12\xEB\x2B\x4D"
+ "\x5B\x55\x23\xB7\x40\x31\xDE\xEE"
+ "\x18\xC9\x3C\x4D\xBC\xED\xE0\x42"
+ "\xAD\xDE\xA0\xA3\xC3\xFE\x44\xD3"
+ "\xE1\x9A\xDA\xAB\x32\xFC\x1A\xBF"
+ "\x63\xA9\xF0\x6A\x08\x46\xBD\x48"
+ "\x83\x06\xAB\x82\x99\x01\x16\x1A"
+ "\x03\x36\xC5\x59\x6B\xB8\x8C\x9F"
+ "\xC6\x51\x3D\xE5\x7F\xBF\xAB\xBC"
+ "\xC9\xA1\x88\x34\x5F\xA9\x7C\x3B"
+ "\x9F\x1B\x98\x2B\x4F\xFB\x9B\xF0"
+ "\xCD\xB6\x45\xB2\x29\x2E\x34\x23"
+ "\xA9\x97\xC0\x22\x8C\x42\x9B\x5F"
+ "\x40\xC8\xD7\x3D\x82\x9A\x6F\xAA"
+ "\x74\x83\x29\x05\xE8\xC4\x4D\x01"
+ "\xB5\xE5\x84\x3F\x7F\xD3\xE0\x99"
+ "\xDA\xE7\x6F\x30\xFD\xAA\x92\x30"
+ "\xA5\x46\x8B\xA2\xE6\x58\x62\x7C"
+ "\x2C\x35\x1B\x38\x85\x7D\xE8\xF3"
+ "\x87\x4F\xDA\xD8\x5F\xFC\xB6\x44"
+ "\xD0\xE3\x9B\x8B\xBF\xD6\xB8\xC4"
+ "\x73\xAE\x1D\x8B\x5B\x74\x8B\xCB",
+ .ilen = 496,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48",
- .rlen = 48,
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 48 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -16407,8 +21054,7 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
"\x19\xb4\x3e\x57\x1c\x02\x5e\xa0"
"\x15\x78\xe0\x5e\xf2\xcb\x87\x16",
.rlen = 32,
- },
- { /* Generated with Crypto++ */
+ }, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
@@ -16421,18 +21067,130 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48",
- .ilen = 48,
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .ilen = 496,
.result = "\xCD\x3E\x2A\x3B\x3E\x94\xC5\x77"
"\xBA\xBB\x5B\xB1\xDE\x7B\xA4\x40"
"\x88\x39\xE3\xFD\x94\x4B\x25\x58"
"\xE1\x4B\xC4\x18\x7A\xFD\x17\x2B"
"\xB9\xF9\xC2\x27\x6A\xB6\x31\x27"
- "\xA6\xAD\xEF\xE5\x5D\xE4\x02\x01",
- .rlen = 48,
+ "\xA6\xAD\xEF\xE5\x5D\xE4\x02\x01"
+ "\x56\x2E\x10\xC2\x2C\xFF\xC6\x83"
+ "\xB5\xDC\x4F\x63\xAD\x0E\x63\x5E"
+ "\x56\xC8\x18\x3D\x79\x86\x97\xEF"
+ "\x57\x0E\x63\xA1\xC1\x41\x48\xB8"
+ "\x98\xB7\x51\x6D\x18\xF6\x19\x82"
+ "\x37\x49\x88\xA4\xEF\x91\x21\x47"
+ "\x03\x28\xEA\x42\xF4\xFB\x7A\x58"
+ "\x28\x90\x77\x46\xD8\xD2\x35\x16"
+ "\x44\xA9\x9E\x49\x52\x2A\xE4\x16"
+ "\x5D\xF7\x65\xEB\x0F\xC9\x29\xE6"
+ "\xCF\x76\x91\x89\x8A\x94\x39\xFA"
+ "\x6B\x5F\x63\x53\x74\x43\x91\xF5"
+ "\x3F\xBC\x88\x53\xB2\x1A\x02\x3F"
+ "\x9D\x32\x84\xEB\x56\x28\xD6\x06"
+ "\xD5\xB2\x20\xA9\xFC\xC3\x76\x62"
+ "\x32\xCC\x86\xC8\x36\x67\x5E\x7E"
+ "\xA4\xAA\x15\x63\x6B\xA9\x86\xAF"
+ "\x1A\x52\x82\x36\x5F\xF4\x3F\x7A"
+ "\x9B\x78\x62\x3B\x02\x28\x60\xB3"
+ "\xBA\x82\xB1\xDD\xC9\x60\x8F\x47"
+ "\xF1\x6B\xFE\xE5\x39\x34\xA0\x28"
+ "\xA4\xB3\xC9\x7E\xED\x28\x8D\x70"
+ "\xB2\x1D\xFD\xC6\x00\xCF\x1A\x94"
+ "\x28\xF8\xC1\x34\xB7\x58\xA5\x6C"
+ "\x1A\x9D\xE4\xE4\xF6\xB9\xB4\xB0"
+ "\x5D\x51\x54\x9A\x53\xA0\xF9\x32"
+ "\xBD\x31\x54\x14\x7B\x33\xEE\x17"
+ "\xD3\xC7\x1F\x48\xBF\x0B\x22\xA2"
+ "\x7D\x0C\xDF\xD0\x2E\x98\xFA\xD2"
+ "\xFA\xCF\x24\x1D\x99\x9B\xD0\x7E"
+ "\xF4\x4F\x88\xFF\x45\x99\x4A\xF4"
+ "\xF2\x0A\x5B\x3B\x21\xAB\x92\xAE"
+ "\x40\x78\x91\x95\xC4\x2F\xA3\xE8"
+ "\x18\xC7\x07\xA6\xC8\xC0\x66\x33"
+ "\x35\xC0\xB4\xA0\xF8\xEE\x1E\xF3"
+ "\x40\xF5\x40\x54\xF1\x84\x8C\xEA"
+ "\x27\x38\x1F\xF8\x77\xC7\xDF\xD8"
+ "\x1D\xE2\xD9\x59\x40\x4F\x59\xD4"
+ "\xF8\x17\x99\x8D\x58\x2D\x72\x44"
+ "\x9D\x1D\x91\x64\xD6\x3F\x0A\x82"
+ "\xC7\x57\x3D\xEF\xD3\x41\xFA\xA7"
+ "\x68\xA3\xB8\xA5\x93\x74\x2E\x85"
+ "\x4C\x9D\x69\x59\xCE\x15\xAE\xBF"
+ "\x9C\x8F\x14\x64\x5D\x7F\xCF\x0B"
+ "\xCE\x43\x5D\x28\xC0\x2F\xFB\x18"
+ "\x79\x9A\xFC\x43\x16\x7C\x6B\x7B"
+ "\x38\xB8\x48\x36\x66\x4E\x20\x43"
+ "\xBA\x76\x13\x9A\xC3\xF2\xEB\x52"
+ "\xD7\xDC\xB2\x67\x63\x14\x25\xCD"
+ "\xB1\x13\x4B\xDE\x8C\x59\x21\x84"
+ "\x81\x8D\x97\x23\x45\x33\x7C\xF3"
+ "\xC5\xBC\x79\x95\xAA\x84\x68\x31"
+ "\x2D\x1A\x68\xFE\xEC\x92\x94\xDA"
+ "\x94\x2A\x6F\xD6\xFE\xE5\x76\x97"
+ "\xF4\x6E\xEE\xCB\x2B\x95\x4E\x36"
+ "\x5F\x74\x8C\x86\x5B\x71\xD0\x20",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 48 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -16464,8 +21222,7 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
.rlen = 32,
- },
- { /* Generated with Crypto++ */
+ }, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
@@ -16478,18 +21235,130 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
"\x88\x39\xE3\xFD\x94\x4B\x25\x58"
"\xE1\x4B\xC4\x18\x7A\xFD\x17\x2B"
"\xB9\xF9\xC2\x27\x6A\xB6\x31\x27"
- "\xA6\xAD\xEF\xE5\x5D\xE4\x02\x01",
- .ilen = 48,
+ "\xA6\xAD\xEF\xE5\x5D\xE4\x02\x01"
+ "\x56\x2E\x10\xC2\x2C\xFF\xC6\x83"
+ "\xB5\xDC\x4F\x63\xAD\x0E\x63\x5E"
+ "\x56\xC8\x18\x3D\x79\x86\x97\xEF"
+ "\x57\x0E\x63\xA1\xC1\x41\x48\xB8"
+ "\x98\xB7\x51\x6D\x18\xF6\x19\x82"
+ "\x37\x49\x88\xA4\xEF\x91\x21\x47"
+ "\x03\x28\xEA\x42\xF4\xFB\x7A\x58"
+ "\x28\x90\x77\x46\xD8\xD2\x35\x16"
+ "\x44\xA9\x9E\x49\x52\x2A\xE4\x16"
+ "\x5D\xF7\x65\xEB\x0F\xC9\x29\xE6"
+ "\xCF\x76\x91\x89\x8A\x94\x39\xFA"
+ "\x6B\x5F\x63\x53\x74\x43\x91\xF5"
+ "\x3F\xBC\x88\x53\xB2\x1A\x02\x3F"
+ "\x9D\x32\x84\xEB\x56\x28\xD6\x06"
+ "\xD5\xB2\x20\xA9\xFC\xC3\x76\x62"
+ "\x32\xCC\x86\xC8\x36\x67\x5E\x7E"
+ "\xA4\xAA\x15\x63\x6B\xA9\x86\xAF"
+ "\x1A\x52\x82\x36\x5F\xF4\x3F\x7A"
+ "\x9B\x78\x62\x3B\x02\x28\x60\xB3"
+ "\xBA\x82\xB1\xDD\xC9\x60\x8F\x47"
+ "\xF1\x6B\xFE\xE5\x39\x34\xA0\x28"
+ "\xA4\xB3\xC9\x7E\xED\x28\x8D\x70"
+ "\xB2\x1D\xFD\xC6\x00\xCF\x1A\x94"
+ "\x28\xF8\xC1\x34\xB7\x58\xA5\x6C"
+ "\x1A\x9D\xE4\xE4\xF6\xB9\xB4\xB0"
+ "\x5D\x51\x54\x9A\x53\xA0\xF9\x32"
+ "\xBD\x31\x54\x14\x7B\x33\xEE\x17"
+ "\xD3\xC7\x1F\x48\xBF\x0B\x22\xA2"
+ "\x7D\x0C\xDF\xD0\x2E\x98\xFA\xD2"
+ "\xFA\xCF\x24\x1D\x99\x9B\xD0\x7E"
+ "\xF4\x4F\x88\xFF\x45\x99\x4A\xF4"
+ "\xF2\x0A\x5B\x3B\x21\xAB\x92\xAE"
+ "\x40\x78\x91\x95\xC4\x2F\xA3\xE8"
+ "\x18\xC7\x07\xA6\xC8\xC0\x66\x33"
+ "\x35\xC0\xB4\xA0\xF8\xEE\x1E\xF3"
+ "\x40\xF5\x40\x54\xF1\x84\x8C\xEA"
+ "\x27\x38\x1F\xF8\x77\xC7\xDF\xD8"
+ "\x1D\xE2\xD9\x59\x40\x4F\x59\xD4"
+ "\xF8\x17\x99\x8D\x58\x2D\x72\x44"
+ "\x9D\x1D\x91\x64\xD6\x3F\x0A\x82"
+ "\xC7\x57\x3D\xEF\xD3\x41\xFA\xA7"
+ "\x68\xA3\xB8\xA5\x93\x74\x2E\x85"
+ "\x4C\x9D\x69\x59\xCE\x15\xAE\xBF"
+ "\x9C\x8F\x14\x64\x5D\x7F\xCF\x0B"
+ "\xCE\x43\x5D\x28\xC0\x2F\xFB\x18"
+ "\x79\x9A\xFC\x43\x16\x7C\x6B\x7B"
+ "\x38\xB8\x48\x36\x66\x4E\x20\x43"
+ "\xBA\x76\x13\x9A\xC3\xF2\xEB\x52"
+ "\xD7\xDC\xB2\x67\x63\x14\x25\xCD"
+ "\xB1\x13\x4B\xDE\x8C\x59\x21\x84"
+ "\x81\x8D\x97\x23\x45\x33\x7C\xF3"
+ "\xC5\xBC\x79\x95\xAA\x84\x68\x31"
+ "\x2D\x1A\x68\xFE\xEC\x92\x94\xDA"
+ "\x94\x2A\x6F\xD6\xFE\xE5\x76\x97"
+ "\xF4\x6E\xEE\xCB\x2B\x95\x4E\x36"
+ "\x5F\x74\x8C\x86\x5B\x71\xD0\x20",
+ .ilen = 496,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48",
- .rlen = 48,
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .rlen = 496,
.also_non_np = 1,
.np = 2,
- .tap = { 48 - 16, 16 },
+ .tap = { 496 - 16, 16 },
},
};
@@ -16507,17 +21376,128 @@ static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48",
- .ilen = 48,
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .ilen = 496,
.result = "\xF3\x06\x3A\x84\xCD\xBA\x8E\x11"
"\xB7\x74\x6F\x5C\x97\xFB\x36\xFE"
"\xDE\x71\x58\xD4\x15\xD1\xC1\xA4"
"\xC9\x28\x74\xA6\x6B\xC7\x95\xA6"
"\x6C\x77\xF7\x2F\xDF\xC7\xBB\x85"
- "\x60\xFC\xE8\x94\xE8\xB5\x09\x2C",
- .rlen = 48,
- },
- { /* Generated with Crypto++ */
+ "\x60\xFC\xE8\x94\xE8\xB5\x09\x2C"
+ "\x1E\x43\xEF\x6C\xE9\x98\xC5\xA0"
+ "\x7B\x13\xE5\x7F\xF8\x49\x9A\x8C"
+ "\xE6\x7B\x08\xC3\x32\x66\x55\x4E"
+ "\xA5\x44\x1D\x2C\x18\xC7\x29\x1F"
+ "\x61\x28\x4A\xE3\xCD\xE5\x47\xB2"
+ "\x82\x2F\x66\x83\x91\x51\xAE\xD7"
+ "\x1C\x91\x3C\x57\xE3\x1D\x5A\xC9"
+ "\xFD\xC5\x58\x58\xEF\xCC\x33\xC9"
+ "\x0F\xEA\x26\x32\xD1\x15\x19\x2D"
+ "\x25\xB4\x7F\xB0\xDF\xFB\x88\x60"
+ "\x4E\x4D\x06\x7D\xCC\x1F\xED\x3B"
+ "\x68\x84\xD5\xB3\x1B\xE7\xB9\xA1"
+ "\x68\x8B\x2C\x1A\x44\xDA\x63\xD3"
+ "\x29\xE9\x59\x32\x1F\x30\x1C\x43"
+ "\xEA\x3A\xA3\x6B\x54\x3C\xAA\x11"
+ "\xAD\x38\x20\xC9\xB9\x8A\x64\x66"
+ "\x5A\x07\x49\xDF\xA1\x9C\xF9\x76"
+ "\x36\x65\xB6\x81\x8F\x76\x09\xE5"
+ "\xEB\xD1\x29\xA4\xE4\xF4\x4C\xCD"
+ "\xAF\xFC\xB9\x16\xD9\xC3\x73\x6A"
+ "\x33\x12\xF8\x7E\xBC\xCC\x7D\x80"
+ "\xBF\x3C\x25\x06\x13\x84\xFA\x35"
+ "\xF7\x40\xFA\xA1\x44\x13\x70\xD8"
+ "\x01\xF9\x85\x15\x63\xEC\x7D\xB9"
+ "\x02\xD8\xBA\x41\x6C\x92\x68\x66"
+ "\x95\xDD\xD6\x42\xE7\xBB\xE1\xFD"
+ "\x28\x3E\x94\xB6\xBD\xA7\xBF\x47"
+ "\x58\x8D\xFF\x19\x30\x75\x0D\x48"
+ "\x94\xE9\xA6\xCD\xB3\x8E\x1E\xCD"
+ "\x59\xBC\x1A\xAC\x3C\x4F\xA9\xEB"
+ "\xF4\xA7\xE4\x75\x4A\x18\x40\xC9"
+ "\x1E\xEC\x06\x9C\x28\x4B\xF7\x2B"
+ "\xE2\xEF\xD6\x42\x2E\xBB\xFC\x0A"
+ "\x79\xA2\x99\x28\x93\x1B\x00\x57"
+ "\x35\x1E\x1A\x93\x90\xA4\x68\x95"
+ "\x5E\x57\x40\xD5\xA9\xAA\x19\x48"
+ "\xEC\xFF\x76\x77\xDC\x78\x89\x76"
+ "\xE5\x3B\x00\xEC\x58\x4D\xD1\xE3"
+ "\xC8\x6C\x2C\x45\x5E\x5F\xD9\x4E"
+ "\x71\xA5\x36\x6D\x03\xF1\xC7\xD5"
+ "\xF3\x63\xC0\xD8\xCB\x2B\xF1\xA8"
+ "\xB9\x2B\xE6\x0B\xB9\x65\x78\xA0"
+ "\xC4\x46\xE6\x9B\x8B\x43\x2D\xAB"
+ "\x70\xA6\xE0\x59\x1E\xAC\x9D\xE0"
+ "\x76\x44\x45\xF3\x24\x11\x57\x98"
+ "\x9A\x86\xB4\x12\x80\x28\x86\x20"
+ "\x23\x9D\x2D\xE9\x38\x32\xB1\xE1"
+ "\xCF\x0A\x23\x73\x7D\xC5\x80\x3D"
+ "\x9F\x6D\xA0\xD0\xEE\x93\x8A\x79"
+ "\x3A\xDD\x1D\xBB\x9E\x26\x5D\x01"
+ "\x44\xD0\xD4\x4E\xC3\xF1\xE4\x38"
+ "\x09\x62\x0A\x1A\x4E\xD2\x63\x0F"
+ "\x6E\x3E\xD2\xA4\x3A\xF4\xF3\xFF"
+ "\x7E\x42\xEC\xB6\x6F\x4D\x6B\x48"
+ "\xE6\xA6\x50\x80\x78\x9E\xF1\xB0"
+ "\x4D\xB2\x0D\x3D\xFC\x40\x25\x4D",
+ .rlen = 496,
+ }, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
@@ -16531,19 +21511,131 @@ static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
- "\xDF\x76\x0D",
- .ilen = 51,
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59",
+ .ilen = 499,
.result = "\xF3\x06\x3A\x84\xCD\xBA\x8E\x11"
"\xB7\x74\x6F\x5C\x97\xFB\x36\xFE"
"\xDE\x71\x58\xD4\x15\xD1\xC1\xA4"
"\xC9\x28\x74\xA6\x6B\xC7\x95\xA6"
"\x6C\x77\xF7\x2F\xDF\xC7\xBB\x85"
"\x60\xFC\xE8\x94\xE8\xB5\x09\x2C"
- "\x1E\x43\xEF",
- .rlen = 51,
+ "\x1E\x43\xEF\x6C\xE9\x98\xC5\xA0"
+ "\x7B\x13\xE5\x7F\xF8\x49\x9A\x8C"
+ "\xE6\x7B\x08\xC3\x32\x66\x55\x4E"
+ "\xA5\x44\x1D\x2C\x18\xC7\x29\x1F"
+ "\x61\x28\x4A\xE3\xCD\xE5\x47\xB2"
+ "\x82\x2F\x66\x83\x91\x51\xAE\xD7"
+ "\x1C\x91\x3C\x57\xE3\x1D\x5A\xC9"
+ "\xFD\xC5\x58\x58\xEF\xCC\x33\xC9"
+ "\x0F\xEA\x26\x32\xD1\x15\x19\x2D"
+ "\x25\xB4\x7F\xB0\xDF\xFB\x88\x60"
+ "\x4E\x4D\x06\x7D\xCC\x1F\xED\x3B"
+ "\x68\x84\xD5\xB3\x1B\xE7\xB9\xA1"
+ "\x68\x8B\x2C\x1A\x44\xDA\x63\xD3"
+ "\x29\xE9\x59\x32\x1F\x30\x1C\x43"
+ "\xEA\x3A\xA3\x6B\x54\x3C\xAA\x11"
+ "\xAD\x38\x20\xC9\xB9\x8A\x64\x66"
+ "\x5A\x07\x49\xDF\xA1\x9C\xF9\x76"
+ "\x36\x65\xB6\x81\x8F\x76\x09\xE5"
+ "\xEB\xD1\x29\xA4\xE4\xF4\x4C\xCD"
+ "\xAF\xFC\xB9\x16\xD9\xC3\x73\x6A"
+ "\x33\x12\xF8\x7E\xBC\xCC\x7D\x80"
+ "\xBF\x3C\x25\x06\x13\x84\xFA\x35"
+ "\xF7\x40\xFA\xA1\x44\x13\x70\xD8"
+ "\x01\xF9\x85\x15\x63\xEC\x7D\xB9"
+ "\x02\xD8\xBA\x41\x6C\x92\x68\x66"
+ "\x95\xDD\xD6\x42\xE7\xBB\xE1\xFD"
+ "\x28\x3E\x94\xB6\xBD\xA7\xBF\x47"
+ "\x58\x8D\xFF\x19\x30\x75\x0D\x48"
+ "\x94\xE9\xA6\xCD\xB3\x8E\x1E\xCD"
+ "\x59\xBC\x1A\xAC\x3C\x4F\xA9\xEB"
+ "\xF4\xA7\xE4\x75\x4A\x18\x40\xC9"
+ "\x1E\xEC\x06\x9C\x28\x4B\xF7\x2B"
+ "\xE2\xEF\xD6\x42\x2E\xBB\xFC\x0A"
+ "\x79\xA2\x99\x28\x93\x1B\x00\x57"
+ "\x35\x1E\x1A\x93\x90\xA4\x68\x95"
+ "\x5E\x57\x40\xD5\xA9\xAA\x19\x48"
+ "\xEC\xFF\x76\x77\xDC\x78\x89\x76"
+ "\xE5\x3B\x00\xEC\x58\x4D\xD1\xE3"
+ "\xC8\x6C\x2C\x45\x5E\x5F\xD9\x4E"
+ "\x71\xA5\x36\x6D\x03\xF1\xC7\xD5"
+ "\xF3\x63\xC0\xD8\xCB\x2B\xF1\xA8"
+ "\xB9\x2B\xE6\x0B\xB9\x65\x78\xA0"
+ "\xC4\x46\xE6\x9B\x8B\x43\x2D\xAB"
+ "\x70\xA6\xE0\x59\x1E\xAC\x9D\xE0"
+ "\x76\x44\x45\xF3\x24\x11\x57\x98"
+ "\x9A\x86\xB4\x12\x80\x28\x86\x20"
+ "\x23\x9D\x2D\xE9\x38\x32\xB1\xE1"
+ "\xCF\x0A\x23\x73\x7D\xC5\x80\x3D"
+ "\x9F\x6D\xA0\xD0\xEE\x93\x8A\x79"
+ "\x3A\xDD\x1D\xBB\x9E\x26\x5D\x01"
+ "\x44\xD0\xD4\x4E\xC3\xF1\xE4\x38"
+ "\x09\x62\x0A\x1A\x4E\xD2\x63\x0F"
+ "\x6E\x3E\xD2\xA4\x3A\xF4\xF3\xFF"
+ "\x7E\x42\xEC\xB6\x6F\x4D\x6B\x48"
+ "\xE6\xA6\x50\x80\x78\x9E\xF1\xB0"
+ "\x4D\xB2\x0D\x3D\xFC\x40\x25\x4D"
+ "\x93\x11\x1C",
+ .rlen = 499,
.also_non_np = 1,
.np = 2,
- .tap = { 51 - 16, 16 },
+ .tap = { 499 - 16, 16 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -16695,17 +21787,128 @@ static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
"\xDE\x71\x58\xD4\x15\xD1\xC1\xA4"
"\xC9\x28\x74\xA6\x6B\xC7\x95\xA6"
"\x6C\x77\xF7\x2F\xDF\xC7\xBB\x85"
- "\x60\xFC\xE8\x94\xE8\xB5\x09\x2C",
- .ilen = 48,
+ "\x60\xFC\xE8\x94\xE8\xB5\x09\x2C"
+ "\x1E\x43\xEF\x6C\xE9\x98\xC5\xA0"
+ "\x7B\x13\xE5\x7F\xF8\x49\x9A\x8C"
+ "\xE6\x7B\x08\xC3\x32\x66\x55\x4E"
+ "\xA5\x44\x1D\x2C\x18\xC7\x29\x1F"
+ "\x61\x28\x4A\xE3\xCD\xE5\x47\xB2"
+ "\x82\x2F\x66\x83\x91\x51\xAE\xD7"
+ "\x1C\x91\x3C\x57\xE3\x1D\x5A\xC9"
+ "\xFD\xC5\x58\x58\xEF\xCC\x33\xC9"
+ "\x0F\xEA\x26\x32\xD1\x15\x19\x2D"
+ "\x25\xB4\x7F\xB0\xDF\xFB\x88\x60"
+ "\x4E\x4D\x06\x7D\xCC\x1F\xED\x3B"
+ "\x68\x84\xD5\xB3\x1B\xE7\xB9\xA1"
+ "\x68\x8B\x2C\x1A\x44\xDA\x63\xD3"
+ "\x29\xE9\x59\x32\x1F\x30\x1C\x43"
+ "\xEA\x3A\xA3\x6B\x54\x3C\xAA\x11"
+ "\xAD\x38\x20\xC9\xB9\x8A\x64\x66"
+ "\x5A\x07\x49\xDF\xA1\x9C\xF9\x76"
+ "\x36\x65\xB6\x81\x8F\x76\x09\xE5"
+ "\xEB\xD1\x29\xA4\xE4\xF4\x4C\xCD"
+ "\xAF\xFC\xB9\x16\xD9\xC3\x73\x6A"
+ "\x33\x12\xF8\x7E\xBC\xCC\x7D\x80"
+ "\xBF\x3C\x25\x06\x13\x84\xFA\x35"
+ "\xF7\x40\xFA\xA1\x44\x13\x70\xD8"
+ "\x01\xF9\x85\x15\x63\xEC\x7D\xB9"
+ "\x02\xD8\xBA\x41\x6C\x92\x68\x66"
+ "\x95\xDD\xD6\x42\xE7\xBB\xE1\xFD"
+ "\x28\x3E\x94\xB6\xBD\xA7\xBF\x47"
+ "\x58\x8D\xFF\x19\x30\x75\x0D\x48"
+ "\x94\xE9\xA6\xCD\xB3\x8E\x1E\xCD"
+ "\x59\xBC\x1A\xAC\x3C\x4F\xA9\xEB"
+ "\xF4\xA7\xE4\x75\x4A\x18\x40\xC9"
+ "\x1E\xEC\x06\x9C\x28\x4B\xF7\x2B"
+ "\xE2\xEF\xD6\x42\x2E\xBB\xFC\x0A"
+ "\x79\xA2\x99\x28\x93\x1B\x00\x57"
+ "\x35\x1E\x1A\x93\x90\xA4\x68\x95"
+ "\x5E\x57\x40\xD5\xA9\xAA\x19\x48"
+ "\xEC\xFF\x76\x77\xDC\x78\x89\x76"
+ "\xE5\x3B\x00\xEC\x58\x4D\xD1\xE3"
+ "\xC8\x6C\x2C\x45\x5E\x5F\xD9\x4E"
+ "\x71\xA5\x36\x6D\x03\xF1\xC7\xD5"
+ "\xF3\x63\xC0\xD8\xCB\x2B\xF1\xA8"
+ "\xB9\x2B\xE6\x0B\xB9\x65\x78\xA0"
+ "\xC4\x46\xE6\x9B\x8B\x43\x2D\xAB"
+ "\x70\xA6\xE0\x59\x1E\xAC\x9D\xE0"
+ "\x76\x44\x45\xF3\x24\x11\x57\x98"
+ "\x9A\x86\xB4\x12\x80\x28\x86\x20"
+ "\x23\x9D\x2D\xE9\x38\x32\xB1\xE1"
+ "\xCF\x0A\x23\x73\x7D\xC5\x80\x3D"
+ "\x9F\x6D\xA0\xD0\xEE\x93\x8A\x79"
+ "\x3A\xDD\x1D\xBB\x9E\x26\x5D\x01"
+ "\x44\xD0\xD4\x4E\xC3\xF1\xE4\x38"
+ "\x09\x62\x0A\x1A\x4E\xD2\x63\x0F"
+ "\x6E\x3E\xD2\xA4\x3A\xF4\xF3\xFF"
+ "\x7E\x42\xEC\xB6\x6F\x4D\x6B\x48"
+ "\xE6\xA6\x50\x80\x78\x9E\xF1\xB0"
+ "\x4D\xB2\x0D\x3D\xFC\x40\x25\x4D",
+ .ilen = 496,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
- "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48",
- .rlen = 48,
- },
- { /* Generated with Crypto++ */
+ "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
+ .rlen = 496,
+ }, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
"\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
@@ -16719,19 +21922,131 @@ static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
"\xC9\x28\x74\xA6\x6B\xC7\x95\xA6"
"\x6C\x77\xF7\x2F\xDF\xC7\xBB\x85"
"\x60\xFC\xE8\x94\xE8\xB5\x09\x2C"
- "\x1E\x43\xEF",
- .ilen = 51,
+ "\x1E\x43\xEF\x6C\xE9\x98\xC5\xA0"
+ "\x7B\x13\xE5\x7F\xF8\x49\x9A\x8C"
+ "\xE6\x7B\x08\xC3\x32\x66\x55\x4E"
+ "\xA5\x44\x1D\x2C\x18\xC7\x29\x1F"
+ "\x61\x28\x4A\xE3\xCD\xE5\x47\xB2"
+ "\x82\x2F\x66\x83\x91\x51\xAE\xD7"
+ "\x1C\x91\x3C\x57\xE3\x1D\x5A\xC9"
+ "\xFD\xC5\x58\x58\xEF\xCC\x33\xC9"
+ "\x0F\xEA\x26\x32\xD1\x15\x19\x2D"
+ "\x25\xB4\x7F\xB0\xDF\xFB\x88\x60"
+ "\x4E\x4D\x06\x7D\xCC\x1F\xED\x3B"
+ "\x68\x84\xD5\xB3\x1B\xE7\xB9\xA1"
+ "\x68\x8B\x2C\x1A\x44\xDA\x63\xD3"
+ "\x29\xE9\x59\x32\x1F\x30\x1C\x43"
+ "\xEA\x3A\xA3\x6B\x54\x3C\xAA\x11"
+ "\xAD\x38\x20\xC9\xB9\x8A\x64\x66"
+ "\x5A\x07\x49\xDF\xA1\x9C\xF9\x76"
+ "\x36\x65\xB6\x81\x8F\x76\x09\xE5"
+ "\xEB\xD1\x29\xA4\xE4\xF4\x4C\xCD"
+ "\xAF\xFC\xB9\x16\xD9\xC3\x73\x6A"
+ "\x33\x12\xF8\x7E\xBC\xCC\x7D\x80"
+ "\xBF\x3C\x25\x06\x13\x84\xFA\x35"
+ "\xF7\x40\xFA\xA1\x44\x13\x70\xD8"
+ "\x01\xF9\x85\x15\x63\xEC\x7D\xB9"
+ "\x02\xD8\xBA\x41\x6C\x92\x68\x66"
+ "\x95\xDD\xD6\x42\xE7\xBB\xE1\xFD"
+ "\x28\x3E\x94\xB6\xBD\xA7\xBF\x47"
+ "\x58\x8D\xFF\x19\x30\x75\x0D\x48"
+ "\x94\xE9\xA6\xCD\xB3\x8E\x1E\xCD"
+ "\x59\xBC\x1A\xAC\x3C\x4F\xA9\xEB"
+ "\xF4\xA7\xE4\x75\x4A\x18\x40\xC9"
+ "\x1E\xEC\x06\x9C\x28\x4B\xF7\x2B"
+ "\xE2\xEF\xD6\x42\x2E\xBB\xFC\x0A"
+ "\x79\xA2\x99\x28\x93\x1B\x00\x57"
+ "\x35\x1E\x1A\x93\x90\xA4\x68\x95"
+ "\x5E\x57\x40\xD5\xA9\xAA\x19\x48"
+ "\xEC\xFF\x76\x77\xDC\x78\x89\x76"
+ "\xE5\x3B\x00\xEC\x58\x4D\xD1\xE3"
+ "\xC8\x6C\x2C\x45\x5E\x5F\xD9\x4E"
+ "\x71\xA5\x36\x6D\x03\xF1\xC7\xD5"
+ "\xF3\x63\xC0\xD8\xCB\x2B\xF1\xA8"
+ "\xB9\x2B\xE6\x0B\xB9\x65\x78\xA0"
+ "\xC4\x46\xE6\x9B\x8B\x43\x2D\xAB"
+ "\x70\xA6\xE0\x59\x1E\xAC\x9D\xE0"
+ "\x76\x44\x45\xF3\x24\x11\x57\x98"
+ "\x9A\x86\xB4\x12\x80\x28\x86\x20"
+ "\x23\x9D\x2D\xE9\x38\x32\xB1\xE1"
+ "\xCF\x0A\x23\x73\x7D\xC5\x80\x3D"
+ "\x9F\x6D\xA0\xD0\xEE\x93\x8A\x79"
+ "\x3A\xDD\x1D\xBB\x9E\x26\x5D\x01"
+ "\x44\xD0\xD4\x4E\xC3\xF1\xE4\x38"
+ "\x09\x62\x0A\x1A\x4E\xD2\x63\x0F"
+ "\x6E\x3E\xD2\xA4\x3A\xF4\xF3\xFF"
+ "\x7E\x42\xEC\xB6\x6F\x4D\x6B\x48"
+ "\xE6\xA6\x50\x80\x78\x9E\xF1\xB0"
+ "\x4D\xB2\x0D\x3D\xFC\x40\x25\x4D"
+ "\x93\x11\x1C",
+ .ilen = 499,
.result = "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
"\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
"\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
"\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
"\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
"\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
- "\xDF\x76\x0D",
- .rlen = 51,
+ "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+ "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+ "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+ "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+ "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+ "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+ "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+ "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+ "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+ "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+ "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+ "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+ "\xF1\x65\xFC\x93\x07\x9E\x35\xCC"
+ "\x40\xD7\x6E\x05\x79\x10\xA7\x1B"
+ "\xB2\x49\xE0\x54\xEB\x82\x19\x8D"
+ "\x24\xBB\x2F\xC6\x5D\xF4\x68\xFF"
+ "\x96\x0A\xA1\x38\xCF\x43\xDA\x71"
+ "\x08\x7C\x13\xAA\x1E\xB5\x4C\xE3"
+ "\x57\xEE\x85\x1C\x90\x27\xBE\x32"
+ "\xC9\x60\xF7\x6B\x02\x99\x0D\xA4"
+ "\x3B\xD2\x46\xDD\x74\x0B\x7F\x16"
+ "\xAD\x21\xB8\x4F\xE6\x5A\xF1\x88"
+ "\x1F\x93\x2A\xC1\x35\xCC\x63\xFA"
+ "\x6E\x05\x9C\x10\xA7\x3E\xD5\x49"
+ "\xE0\x77\x0E\x82\x19\xB0\x24\xBB"
+ "\x52\xE9\x5D\xF4\x8B\x22\x96\x2D"
+ "\xC4\x38\xCF\x66\xFD\x71\x08\x9F"
+ "\x13\xAA\x41\xD8\x4C\xE3\x7A\x11"
+ "\x85\x1C\xB3\x27\xBE\x55\xEC\x60"
+ "\xF7\x8E\x02\x99\x30\xC7\x3B\xD2"
+ "\x69\x00\x74\x0B\xA2\x16\xAD\x44"
+ "\xDB\x4F\xE6\x7D\x14\x88\x1F\xB6"
+ "\x2A\xC1\x58\xEF\x63\xFA\x91\x05"
+ "\x9C\x33\xCA\x3E\xD5\x6C\x03\x77"
+ "\x0E\xA5\x19\xB0\x47\xDE\x52\xE9"
+ "\x80\x17\x8B\x22\xB9\x2D\xC4\x5B"
+ "\xF2\x66\xFD\x94\x08\x9F\x36\xCD"
+ "\x41\xD8\x6F\x06\x7A\x11\xA8\x1C"
+ "\xB3\x4A\xE1\x55\xEC\x83\x1A\x8E"
+ "\x25\xBC\x30\xC7\x5E\xF5\x69\x00"
+ "\x97\x0B\xA2\x39\xD0\x44\xDB\x72"
+ "\x09\x7D\x14\xAB\x1F\xB6\x4D\xE4"
+ "\x58\xEF\x86\x1D\x91\x28\xBF\x33"
+ "\xCA\x61\xF8\x6C\x03\x9A\x0E\xA5"
+ "\x3C\xD3\x47\xDE\x75\x0C\x80\x17"
+ "\xAE\x22\xB9\x50\xE7\x5B\xF2\x89"
+ "\x20\x94\x2B\xC2\x36\xCD\x64\xFB"
+ "\x6F\x06\x9D\x11\xA8\x3F\xD6\x4A"
+ "\xE1\x78\x0F\x83\x1A\xB1\x25\xBC"
+ "\x53\xEA\x5E\xF5\x8C\x00\x97\x2E"
+ "\xC5\x39\xD0\x67\xFE\x72\x09\xA0"
+ "\x14\xAB\x42\xD9\x4D\xE4\x7B\x12"
+ "\x86\x1D\xB4\x28\xBF\x56\xED\x61"
+ "\xF8\x8F\x03\x9A\x31\xC8\x3C\xD3"
+ "\x6A\x01\x75\x0C\xA3\x17\xAE\x45"
+ "\xDC\x50\xE7\x7E\x15\x89\x20\xB7"
+ "\x2B\xC2\x59",
+ .rlen = 499,
.also_non_np = 1,
.np = 2,
- .tap = { 51 - 16, 16 },
+ .tap = { 499 - 16, 16 },
}, { /* Generated with Crypto++ */
.key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
"\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
@@ -19895,7 +25210,7 @@ static struct hash_testvec michael_mic_tv_template[] = {
/*
* CRC32C test vectors
*/
-#define CRC32C_TEST_VECTORS 14
+#define CRC32C_TEST_VECTORS 15
static struct hash_testvec crc32c_tv_template[] = {
{
@@ -20066,7 +25381,268 @@ static struct hash_testvec crc32c_tv_template[] = {
.digest = "\x75\xd3\xc5\x24",
.np = 2,
.tap = { 31, 209 }
- },
+ }, {
+ .key = "\xff\xff\xff\xff",
+ .ksize = 4,
+ .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
+ "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
+ "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
+ "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
+ "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
+ "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
+ "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
+ "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
+ "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
+ "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
+ "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
+ "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
+ "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
+ "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
+ "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
+ "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
+ "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
+ "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
+ "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
+ "\x58\xef\x63\xfa\x91\x05\x9c\x33"
+ "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
+ "\x19\xb0\x47\xde\x52\xe9\x80\x17"
+ "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
+ "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
+ "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
+ "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
+ "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
+ "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
+ "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
+ "\x86\x1d\x91\x28\xbf\x33\xca\x61"
+ "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
+ "\x47\xde\x75\x0c\x80\x17\xae\x22"
+ "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
+ "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
+ "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
+ "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
+ "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
+ "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
+ "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
+ "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
+ "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
+ "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
+ "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
+ "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
+ "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
+ "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
+ "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
+ "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
+ "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
+ "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
+ "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
+ "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
+ "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
+ "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
+ "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
+ "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
+ "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
+ "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
+ "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
+ "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
+ "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
+ "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
+ "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
+ "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
+ "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
+ "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
+ "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
+ "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
+ "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
+ "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
+ "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
+ "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
+ "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
+ "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
+ "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
+ "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
+ "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
+ "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
+ "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
+ "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
+ "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
+ "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
+ "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
+ "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
+ "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
+ "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
+ "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
+ "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
+ "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
+ "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
+ "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
+ "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
+ "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
+ "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
+ "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
+ "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
+ "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
+ "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
+ "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
+ "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
+ "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
+ "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
+ "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
+ "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
+ "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
+ "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
+ "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
+ "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
+ "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
+ "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
+ "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
+ "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
+ "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
+ "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
+ "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
+ "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
+ "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
+ "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
+ "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
+ "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
+ "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
+ "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
+ "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
+ "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
+ "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
+ "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
+ "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
+ "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
+ "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
+ "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
+ "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
+ "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
+ "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
+ "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
+ "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
+ "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
+ "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
+ "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
+ "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
+ "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
+ "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
+ "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
+ "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
+ "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
+ "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
+ "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
+ "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
+ "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
+ "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
+ "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
+ "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
+ "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
+ "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
+ "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
+ "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
+ "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
+ "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
+ "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
+ "\x47\xde\x52\xe9\x80\x17\x8b\x22"
+ "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
+ "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
+ "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
+ "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
+ "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
+ "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
+ "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
+ "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
+ "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
+ "\x75\x0c\x80\x17\xae\x22\xb9\x50"
+ "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
+ "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
+ "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
+ "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
+ "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
+ "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
+ "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
+ "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
+ "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
+ "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
+ "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
+ "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
+ "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
+ "\x48\xdf\x53\xea\x81\x18\x8c\x23"
+ "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
+ "\x09\xa0\x37\xce\x42\xd9\x70\x07"
+ "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
+ "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
+ "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
+ "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
+ "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
+ "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
+ "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
+ "\x76\x0d\x81\x18\xaf\x23\xba\x51"
+ "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
+ "\x37\xce\x65\xfc\x70\x07\x9e\x12"
+ "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
+ "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
+ "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
+ "\xff\x73\x0a\xa1\x15\xac\x43\xda"
+ "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
+ "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
+ "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
+ "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
+ "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
+ "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
+ "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
+ "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
+ "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
+ "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
+ "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
+ "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
+ "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
+ "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
+ "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
+ "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
+ "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
+ "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
+ "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
+ "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
+ "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
+ "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
+ "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
+ "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
+ "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
+ "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
+ "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
+ "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
+ "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
+ "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
+ "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
+ "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
+ "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
+ "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
+ "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
+ "\xef\x86\x1d\x91\x28\xbf\x33\xca"
+ "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
+ "\xd3\x47\xde\x75\x0c\x80\x17\xae"
+ "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
+ "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
+ "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
+ "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
+ "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
+ "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
+ "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
+ "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
+ "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
+ "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
+ "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
+ "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
+ "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
+ "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
+ "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
+ "\x67\xfe\x95\x09\xa0\x37\xce\x42"
+ "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
+ "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
+ "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
+ .psize = 2048,
+ .digest = "\xec\x26\x4d\x95",
+ }
};
/*
diff --git a/crypto/vmac.c b/crypto/vmac.c
index f2338ca9836..2eb11a30c29 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -375,6 +375,11 @@ static void vhash_update(const unsigned char *m,
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
+ if (!mbytes)
+ return;
+
+ BUG_ON(mbytes % VMAC_NHBYTES);
+
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
@@ -454,7 +459,7 @@ do_l3:
}
static u64 vmac(unsigned char m[], unsigned int mbytes,
- unsigned char n[16], u64 *tagl,
+ const unsigned char n[16], u64 *tagl,
struct vmac_ctx_t *ctx)
{
u64 *in_n, *out_p;
@@ -559,8 +564,33 @@ static int vmac_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ int expand;
+ int min;
+
+ expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
+ VMAC_NHBYTES - ctx->partial_size : 0;
+
+ min = len < expand ? len : expand;
+
+ memcpy(ctx->partial + ctx->partial_size, p, min);
+ ctx->partial_size += min;
+
+ if (len < expand)
+ return 0;
- vhash_update(p, len, &ctx->__vmac_ctx);
+ vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
+ ctx->partial_size = 0;
+
+ len -= expand;
+ p += expand;
+
+ if (len % VMAC_NHBYTES) {
+ memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
+ len % VMAC_NHBYTES);
+ ctx->partial_size = len % VMAC_NHBYTES;
+ }
+
+ vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
return 0;
}
@@ -572,10 +602,20 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
vmac_t mac;
u8 nonce[16] = {};
- mac = vmac(NULL, 0, nonce, NULL, ctx);
+ /* vmac() ends up accessing outside the array bounds that
+ * we specify. In appears to access up to the next 2-word
+ * boundary. We'll just be uber cautious and zero the
+ * unwritten bytes in the buffer.
+ */
+ if (ctx->partial_size) {
+ memset(ctx->partial + ctx->partial_size, 0,
+ VMAC_NHBYTES - ctx->partial_size);
+ }
+ mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
memcpy(out, &mac, sizeof(vmac_t));
memset(&mac, 0, sizeof(vmac_t));
memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ ctx->partial_size = 0;
return 0;
}
@@ -673,4 +713,3 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
-
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 0300bf61294..38c5078da11 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -267,6 +267,15 @@ config ACPI_CUSTOM_DSDT
bool
default ACPI_CUSTOM_DSDT_FILE != ""
+config ACPI_INITRD_TABLE_OVERRIDE
+ bool "ACPI tables can be passed via uncompressed cpio in initrd"
+ default n
+ help
+ This option provides functionality to override arbitrary ACPI tables
+ via initrd. No functional change if no ACPI tables are passed via
+ initrd, therefore it's safe to say Y.
+ See Documentation/acpi/initrd_table_override.txt for details
+
config ACPI_BLACKLIST_YEAR
int "Disable ACPI for systems before Jan 1st this year" if X86_32
default 0
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 0df024e5fb6..d09c6b4bab2 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: dsopcode - Dispatcher suport for regions and fields
+ * Module Name: dsopcode - Dispatcher support for regions and fields
*
*****************************************************************************/
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 6dc4a2b1e95..3ff26786154 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -534,6 +534,137 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
+#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+#include <linux/earlycpio.h>
+#include <linux/memblock.h>
+
+static u64 acpi_tables_addr;
+static int all_tables_size;
+
+/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
+u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end)
+ sum = (u8) (sum + *(buffer++));
+ return sum;
+}
+
+/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
+static const char * const table_sigs[] = {
+ ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
+ ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
+ ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
+ ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
+ ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
+ ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
+ ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
+ ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
+ ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
+
+/* Non-fatal errors: Affected tables/files are ignored */
+#define INVALID_TABLE(x, path, name) \
+ { pr_err("ACPI OVERRIDE: " x " [%s%s]\n", path, name); continue; }
+
+#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
+
+/* Must not increase 10 or needs code modification below */
+#define ACPI_OVERRIDE_TABLES 10
+
+void __init acpi_initrd_override(void *data, size_t size)
+{
+ int sig, no, table_nr = 0, total_offset = 0;
+ long offset = 0;
+ struct acpi_table_header *table;
+ char cpio_path[32] = "kernel/firmware/acpi/";
+ struct cpio_data file;
+ struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
+ char *p;
+
+ if (data == NULL || size == 0)
+ return;
+
+ for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
+ file = find_cpio_data(cpio_path, data, size, &offset);
+ if (!file.data)
+ break;
+
+ data += offset;
+ size -= offset;
+
+ if (file.size < sizeof(struct acpi_table_header))
+ INVALID_TABLE("Table smaller than ACPI header",
+ cpio_path, file.name);
+
+ table = file.data;
+
+ for (sig = 0; table_sigs[sig]; sig++)
+ if (!memcmp(table->signature, table_sigs[sig], 4))
+ break;
+
+ if (!table_sigs[sig])
+ INVALID_TABLE("Unknown signature",
+ cpio_path, file.name);
+ if (file.size != table->length)
+ INVALID_TABLE("File length does not match table length",
+ cpio_path, file.name);
+ if (acpi_table_checksum(file.data, table->length))
+ INVALID_TABLE("Bad table checksum",
+ cpio_path, file.name);
+
+ pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
+ table->signature, cpio_path, file.name, table->length);
+
+ all_tables_size += table->length;
+ early_initrd_files[table_nr].data = file.data;
+ early_initrd_files[table_nr].size = file.size;
+ table_nr++;
+ }
+ if (table_nr == 0)
+ return;
+
+ acpi_tables_addr =
+ memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+ all_tables_size, PAGE_SIZE);
+ if (!acpi_tables_addr) {
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Only calling e820_add_reserve does not work and the
+ * tables are invalid (memory got used) later.
+ * memblock_reserve works as expected and the tables won't get modified.
+ * But it's not enough on X86 because ioremap will
+ * complain later (used by acpi_os_map_memory) that the pages
+ * that should get mapped are not marked "reserved".
+ * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
+ * works fine.
+ */
+ memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
+ arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+
+ p = early_ioremap(acpi_tables_addr, all_tables_size);
+
+ for (no = 0; no < table_nr; no++) {
+ memcpy(p + total_offset, early_initrd_files[no].data,
+ early_initrd_files[no].size);
+ total_offset += early_initrd_files[no].size;
+ }
+ early_iounmap(p, all_tables_size);
+}
+#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
+
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+ pr_warn(PREFIX
+ "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+ table->signature, table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+}
+
+
acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,
struct acpi_table_header ** new_table)
@@ -547,24 +678,73 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
if (strncmp(existing_table->signature, "DSDT", 4) == 0)
*new_table = (struct acpi_table_header *)AmlCode;
#endif
- if (*new_table != NULL) {
- printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
- "this is unsafe: tainting kernel\n",
- existing_table->signature,
- existing_table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
+ if (*new_table != NULL)
+ acpi_table_taint(existing_table);
return AE_OK;
}
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address * new_address,
- u32 *new_table_length)
+ acpi_physical_address *address,
+ u32 *table_length)
{
- return AE_SUPPORT;
-}
+#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+ *table_length = 0;
+ *address = 0;
+ return AE_OK;
+#else
+ int table_offset = 0;
+ struct acpi_table_header *table;
+
+ *table_length = 0;
+ *address = 0;
+
+ if (!acpi_tables_addr)
+ return AE_OK;
+
+ do {
+ if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
+ WARN_ON(1);
+ return AE_OK;
+ }
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return AE_OK;
+ }
+
+ table_offset += table->length;
+
+ if (memcmp(existing_table->signature, table->signature, 4)) {
+ acpi_os_unmap_memory(table,
+ ACPI_HEADER_SIZE);
+ continue;
+ }
+
+ /* Only override tables with matching oem id */
+ if (memcmp(table->oem_table_id, existing_table->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE)) {
+ acpi_os_unmap_memory(table,
+ ACPI_HEADER_SIZE);
+ continue;
+ }
+
+ table_offset -= table->length;
+ *table_length = table->length;
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ *address = acpi_tables_addr + table_offset;
+ break;
+ } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
+
+ if (*address != 0)
+ acpi_table_taint(existing_table);
+ return AE_OK;
+#endif
+}
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 2ef04098cc1..a1dee29beed 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -45,11 +45,12 @@ static int acpi_pci_unbind(struct acpi_device *device)
device_set_run_wake(&dev->dev, false);
pci_acpi_remove_pm_notifier(device);
+ acpi_power_resource_unregister_device(&dev->dev, device->handle);
if (!dev->subordinate)
goto out;
- acpi_pci_irq_del_prt(dev->subordinate);
+ acpi_pci_irq_del_prt(pci_domain_nr(dev->bus), dev->subordinate->number);
device->ops.bind = NULL;
device->ops.unbind = NULL;
@@ -63,7 +64,7 @@ static int acpi_pci_bind(struct acpi_device *device)
{
acpi_status status;
acpi_handle handle;
- struct pci_bus *bus;
+ unsigned char bus;
struct pci_dev *dev;
dev = acpi_get_pci_dev(device->handle);
@@ -71,6 +72,7 @@ static int acpi_pci_bind(struct acpi_device *device)
return 0;
pci_acpi_add_pm_notifier(device, dev);
+ acpi_power_resource_register_device(&dev->dev, device->handle);
if (device->wakeup.flags.run_wake)
device_set_run_wake(&dev->dev, true);
@@ -100,11 +102,11 @@ static int acpi_pci_bind(struct acpi_device *device)
goto out;
if (dev->subordinate)
- bus = dev->subordinate;
+ bus = dev->subordinate->number;
else
- bus = dev->bus;
+ bus = dev->bus->number;
- acpi_pci_irq_add_prt(device->handle, bus);
+ acpi_pci_irq_add_prt(device->handle, pci_domain_nr(dev->bus), bus);
out:
pci_dev_put(dev);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 23a03249013..68a921d0324 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -184,7 +184,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
}
}
-static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
+static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
struct acpi_pci_routing_table *prt)
{
struct acpi_prt_entry *entry;
@@ -198,8 +198,8 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
* 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert
* it here.
*/
- entry->id.segment = pci_domain_nr(bus);
- entry->id.bus = bus->number;
+ entry->id.segment = segment;
+ entry->id.bus = bus;
entry->id.device = (prt->address >> 16) & 0xFFFF;
entry->pin = prt->pin + 1;
@@ -244,7 +244,7 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
return 0;
}
-int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
+int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -273,7 +273,7 @@ int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
- acpi_pci_irq_add_entry(handle, bus, entry);
+ acpi_pci_irq_add_entry(handle, segment, bus, entry);
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
@@ -282,17 +282,16 @@ int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
return 0;
}
-void acpi_pci_irq_del_prt(struct pci_bus *bus)
+void acpi_pci_irq_del_prt(int segment, int bus)
{
struct acpi_prt_entry *entry, *tmp;
printk(KERN_DEBUG
"ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
+ segment, bus);
spin_lock(&acpi_prt_lock);
list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
- if (pci_domain_nr(bus) == entry->id.segment
- && bus->number == entry->id.bus) {
+ if (segment == entry->id.segment && bus == entry->id.bus) {
list_del(&entry->list);
kfree(entry);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index f70b9e5fc1b..7928d4dc705 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -454,6 +454,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
acpi_handle handle;
struct acpi_device *child;
u32 flags, base_flags;
+ bool is_osc_granted = false;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -501,85 +502,47 @@ static int acpi_pci_root_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
- root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
-
- /*
- * All supported architectures that use ACPI have support for
- * PCI domains, so we indicate this in _OSC support capabilities.
- */
- flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
- acpi_pci_osc_support(root, flags);
-
- /*
- * TBD: Need PCI interface for enumeration/configuration of roots.
- */
-
- mutex_lock(&acpi_pci_root_lock);
- list_add_tail(&root->node, &acpi_pci_roots);
- mutex_unlock(&acpi_pci_root_lock);
-
printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
/*
- * Scan the Root Bridge
- * --------------------
- * Must do this prior to any attempt to bind the root device, as the
- * PCI namespace does not get created until this call is made (and
- * thus the root bridge's pci_dev does not exist).
- */
- root->bus = pci_acpi_scan_root(root);
- if (!root->bus) {
- printk(KERN_ERR PREFIX
- "Bus %04x:%02x not present in PCI namespace\n",
- root->segment, (unsigned int)root->secondary.start);
- result = -ENODEV;
- goto out_del_root;
- }
-
- /*
- * Attach ACPI-PCI Context
- * -----------------------
- * Thus binding the ACPI and PCI devices.
- */
- result = acpi_pci_bind_root(device);
- if (result)
- goto out_del_root;
-
- /*
* PCI Routing Table
* -----------------
* Evaluate and parse _PRT, if exists.
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
if (ACPI_SUCCESS(status))
- result = acpi_pci_irq_add_prt(device->handle, root->bus);
+ result = acpi_pci_irq_add_prt(device->handle, root->segment,
+ root->secondary.start);
+
+ root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
/*
- * Scan and bind all _ADR-Based Devices
+ * All supported architectures that use ACPI have support for
+ * PCI domains, so we indicate this in _OSC support capabilities.
*/
- list_for_each_entry(child, &device->children, node)
- acpi_pci_bridge_scan(child);
+ flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ acpi_pci_osc_support(root, flags);
/* Indicate support for various _OSC capabilities. */
- if (pci_ext_cfg_avail(root->bus->self))
+ if (pci_ext_cfg_avail())
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
- if (pcie_aspm_support_enabled())
+ if (pcie_aspm_support_enabled()) {
flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
- OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
+ OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
+ }
if (pci_msi_enabled())
flags |= OSC_MSI_SUPPORT;
if (flags != base_flags) {
status = acpi_pci_osc_support(root, flags);
if (ACPI_FAILURE(status)) {
- dev_info(root->bus->bridge, "ACPI _OSC support "
+ dev_info(&device->dev, "ACPI _OSC support "
"notification failed, disabling PCIe ASPM\n");
pcie_no_aspm();
flags = base_flags;
}
}
-
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
@@ -588,40 +551,81 @@ static int acpi_pci_root_add(struct acpi_device *device)
if (pci_aer_available()) {
if (aer_acpi_firmware_first())
- dev_dbg(root->bus->bridge,
+ dev_dbg(&device->dev,
"PCIe errors handled by BIOS.\n");
else
flags |= OSC_PCI_EXPRESS_AER_CONTROL;
}
- dev_info(root->bus->bridge,
+ dev_info(&device->dev,
"Requesting ACPI _OSC control (0x%02x)\n", flags);
status = acpi_pci_osc_control_set(device->handle, &flags,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (ACPI_SUCCESS(status)) {
- dev_info(root->bus->bridge,
+ is_osc_granted = true;
+ dev_info(&device->dev,
"ACPI _OSC control (0x%02x) granted\n", flags);
- if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
- /*
- * We have ASPM control, but the FADT indicates
- * that it's unsupported. Clear it.
- */
- pcie_clear_aspm(root->bus);
- }
} else {
- dev_info(root->bus->bridge,
+ is_osc_granted = false;
+ dev_info(&device->dev,
"ACPI _OSC request failed (%s), "
"returned control mask: 0x%02x\n",
acpi_format_exception(status), flags);
- pr_info("ACPI _OSC control for PCIe not granted, "
- "disabling ASPM\n");
- pcie_no_aspm();
}
} else {
- dev_info(root->bus->bridge,
- "Unable to request _OSC control "
- "(_OSC support mask: 0x%02x)\n", flags);
+ dev_info(&device->dev,
+ "Unable to request _OSC control "
+ "(_OSC support mask: 0x%02x)\n", flags);
+ }
+
+ /*
+ * TBD: Need PCI interface for enumeration/configuration of roots.
+ */
+
+ mutex_lock(&acpi_pci_root_lock);
+ list_add_tail(&root->node, &acpi_pci_roots);
+ mutex_unlock(&acpi_pci_root_lock);
+
+ /*
+ * Scan the Root Bridge
+ * --------------------
+ * Must do this prior to any attempt to bind the root device, as the
+ * PCI namespace does not get created until this call is made (and
+ * thus the root bridge's pci_dev does not exist).
+ */
+ root->bus = pci_acpi_scan_root(root);
+ if (!root->bus) {
+ printk(KERN_ERR PREFIX
+ "Bus %04x:%02x not present in PCI namespace\n",
+ root->segment, (unsigned int)root->secondary.start);
+ result = -ENODEV;
+ goto out_del_root;
+ }
+
+ /*
+ * Attach ACPI-PCI Context
+ * -----------------------
+ * Thus binding the ACPI and PCI devices.
+ */
+ result = acpi_pci_bind_root(device);
+ if (result)
+ goto out_del_root;
+
+ /*
+ * Scan and bind all _ADR-Based Devices
+ */
+ list_for_each_entry(child, &device->children, node)
+ acpi_pci_bridge_scan(child);
+
+ /* ASPM setting */
+ if (is_osc_granted) {
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
+ pcie_clear_aspm(root->bus);
+ } else {
+ pr_info("ACPI _OSC control for PCIe not granted, "
+ "disabling ASPM\n");
+ pcie_no_aspm();
}
pci_acpi_add_bus_pm_notifier(device, root->bus);
@@ -634,6 +638,8 @@ out_del_root:
mutex_lock(&acpi_pci_root_lock);
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
+
+ acpi_pci_irq_del_prt(root->segment, root->secondary.start);
end:
kfree(root);
return result;
@@ -644,12 +650,19 @@ static int acpi_pci_root_start(struct acpi_device *device)
struct acpi_pci_root *root = acpi_driver_data(device);
struct acpi_pci_driver *driver;
+ if (system_state != SYSTEM_BOOTING)
+ pci_assign_unassigned_bus_resources(root->bus);
+
mutex_lock(&acpi_pci_root_lock);
list_for_each_entry(driver, &acpi_pci_drivers, node)
if (driver->add)
driver->add(root);
mutex_unlock(&acpi_pci_root_lock);
+ /* need to after hot-added ioapic is registered */
+ if (system_state != SYSTEM_BOOTING)
+ pci_enable_bridges(root->bus);
+
pci_bus_add_devices(root->bus);
return 0;
@@ -657,17 +670,29 @@ static int acpi_pci_root_start(struct acpi_device *device)
static int acpi_pci_root_remove(struct acpi_device *device, int type)
{
+ acpi_status status;
+ acpi_handle handle;
struct acpi_pci_root *root = acpi_driver_data(device);
struct acpi_pci_driver *driver;
+ pci_stop_root_bus(root->bus);
+
mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry(driver, &acpi_pci_drivers, node)
+ list_for_each_entry_reverse(driver, &acpi_pci_drivers, node)
if (driver->remove)
driver->remove(root);
+ mutex_unlock(&acpi_pci_root_lock);
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
+ status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
+ if (ACPI_SUCCESS(status))
+ acpi_pci_irq_del_prt(root->segment, root->secondary.start);
+
+ pci_remove_root_bus(root->bus);
+
+ mutex_lock(&acpi_pci_root_lock);
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
kfree(root);
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index bd5de08ad6f..0576a7dd32a 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -157,6 +157,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
+#ifdef CONFIG_PM_SLEEP
static int tegra_ahb_suspend(struct device *dev)
{
int i;
@@ -176,6 +177,7 @@ static int tegra_ahb_resume(struct device *dev)
gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
return 0;
}
+#endif
static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
tegra_ahb_suspend,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index b7078afddb7..1cc467bdb63 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -25,6 +25,8 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+static void ahci_host_stop(struct ata_host *host);
+
enum ahci_type {
AHCI, /* standard platform ahci */
IMX53_AHCI, /* ahci on i.mx53 */
@@ -47,6 +49,15 @@ static struct platform_device_id ahci_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, ahci_devtype);
+static struct ata_port_operations ahci_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = ahci_host_stop,
+};
+
+static struct ata_port_operations ahci_platform_retry_srst_ops = {
+ .inherits = &ahci_pmp_retry_srst_ops,
+ .host_stop = ahci_host_stop,
+};
static const struct ata_port_info ahci_port_info[] = {
/* by features */
@@ -54,20 +65,20 @@ static const struct ata_port_info ahci_port_info[] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_platform_ops,
},
[IMX53_AHCI] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_pmp_retry_srst_ops,
+ .port_ops = &ahci_platform_retry_srst_ops,
},
[STRICT_AHCI] = {
AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_platform_ops,
},
};
@@ -75,7 +86,7 @@ static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT("ahci_platform"),
};
-static int __init ahci_probe(struct platform_device *pdev)
+static int __devinit ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
@@ -218,15 +229,12 @@ free_clk:
return rc;
}
-static int __devexit ahci_remove(struct platform_device *pdev)
+static void ahci_host_stop(struct ata_host *host)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = host->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
- ata_host_detach(host);
-
if (pdata && pdata->exit)
pdata->exit(dev);
@@ -234,8 +242,6 @@ static int __devexit ahci_remove(struct platform_device *pdev)
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -317,7 +323,7 @@ disable_unprepare_clk:
}
#endif
-SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,spear-ahci", },
@@ -326,7 +332,8 @@ static const struct of_device_id ahci_of_match[] = {
MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver ahci_driver = {
- .remove = __devexit_p(ahci_remove),
+ .probe = ahci_probe,
+ .remove = ata_platform_remove_one,
.driver = {
.name = "ahci",
.owner = THIS_MODULE,
@@ -335,18 +342,7 @@ static struct platform_driver ahci_driver = {
},
.id_table = ahci_devtype,
};
-
-static int __init ahci_init(void)
-{
- return platform_driver_probe(&ahci_driver, ahci_probe);
-}
-module_init(ahci_init);
-
-static void __exit ahci_exit(void)
-{
- platform_driver_unregister(&ahci_driver);
-}
-module_exit(ahci_exit);
+module_platform_driver(ahci_driver);
MODULE_DESCRIPTION("AHCI SATA platform driver");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ef773e12af7..acffcf0b3ad 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -164,28 +164,6 @@ struct piix_host_priv {
void __iomem *sidpr;
};
-static int piix_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void piix_remove_one(struct pci_dev *pdev);
-static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
-static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
-static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
-static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
-static int ich_pata_cable_detect(struct ata_port *ap);
-static u8 piix_vmw_bmdma_status(struct ata_port *ap);
-static int piix_sidpr_scr_read(struct ata_link *link,
- unsigned int reg, u32 *val);
-static int piix_sidpr_scr_write(struct ata_link *link,
- unsigned int reg, u32 val);
-static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- unsigned hints);
-static bool piix_irq_check(struct ata_port *ap);
-static int piix_port_start(struct ata_port *ap);
-#ifdef CONFIG_PM
-static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
-static int piix_pci_device_resume(struct pci_dev *pdev);
-#endif
-
static unsigned int in_module_init = 1;
static const struct pci_device_id piix_pci_tbl[] = {
@@ -342,64 +320,6 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ } /* terminate list */
};
-static struct pci_driver piix_pci_driver = {
- .name = DRV_NAME,
- .id_table = piix_pci_tbl,
- .probe = piix_init_one,
- .remove = piix_remove_one,
-#ifdef CONFIG_PM
- .suspend = piix_pci_device_suspend,
- .resume = piix_pci_device_resume,
-#endif
-};
-
-static struct scsi_host_template piix_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations piix_sata_ops = {
- .inherits = &ata_bmdma32_port_ops,
- .sff_irq_check = piix_irq_check,
- .port_start = piix_port_start,
-};
-
-static struct ata_port_operations piix_pata_ops = {
- .inherits = &piix_sata_ops,
- .cable_detect = ata_cable_40wire,
- .set_piomode = piix_set_piomode,
- .set_dmamode = piix_set_dmamode,
- .prereset = piix_pata_prereset,
-};
-
-static struct ata_port_operations piix_vmw_ops = {
- .inherits = &piix_pata_ops,
- .bmdma_status = piix_vmw_bmdma_status,
-};
-
-static struct ata_port_operations ich_pata_ops = {
- .inherits = &piix_pata_ops,
- .cable_detect = ich_pata_cable_detect,
- .set_dmamode = ich_set_dmamode,
-};
-
-static struct device_attribute *piix_sidpr_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- NULL
-};
-
-static struct scsi_host_template piix_sidpr_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
- .shost_attrs = piix_sidpr_shost_attrs,
-};
-
-static struct ata_port_operations piix_sidpr_sata_ops = {
- .inherits = &piix_sata_ops,
- .hardreset = sata_std_hardreset,
- .scr_read = piix_sidpr_scr_read,
- .scr_write = piix_sidpr_scr_write,
- .set_lpm = piix_sidpr_set_lpm,
-};
-
static const struct piix_map_db ich5_map_db = {
.mask = 0x7,
.port_enable = 0x3,
@@ -504,147 +424,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_sata_snb] = &ich8_map_db,
};
-static struct ata_port_info piix_port_info[] = {
- [piix_pata_mwdma] = /* PIIX3 MWDMA only */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .port_ops = &piix_pata_ops,
- },
-
- [piix_pata_33] = /* PIIX4 at 33MHz */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .udma_mask = ATA_UDMA2,
- .port_ops = &piix_pata_ops,
- },
-
- [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
- .udma_mask = ATA_UDMA2,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_66] = /* ICH controllers up to 66MHz */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
- .udma_mask = ATA_UDMA4,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_100] =
- {
- .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA5,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_100_nomwdma1] =
- {
- .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2_ONLY,
- .udma_mask = ATA_UDMA5,
- .port_ops = &ich_pata_ops,
- },
-
- [ich5_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich6_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich6m_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8_sata] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8_2port_sata] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [tolapai_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8m_apple_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [piix_pata_vmw] =
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .udma_mask = ATA_UDMA2,
- .port_ops = &piix_vmw_ops,
- },
-
- /*
- * some Sandybridge chipsets have broken 32 mode up to now,
- * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
- */
- [ich8_sata_snb] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
-};
-
static struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
@@ -1261,6 +1040,193 @@ static u8 piix_vmw_bmdma_status(struct ata_port *ap)
return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
}
+static struct scsi_host_template piix_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations piix_sata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .sff_irq_check = piix_irq_check,
+ .port_start = piix_port_start,
+};
+
+static struct ata_port_operations piix_pata_ops = {
+ .inherits = &piix_sata_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = piix_set_piomode,
+ .set_dmamode = piix_set_dmamode,
+ .prereset = piix_pata_prereset,
+};
+
+static struct ata_port_operations piix_vmw_ops = {
+ .inherits = &piix_pata_ops,
+ .bmdma_status = piix_vmw_bmdma_status,
+};
+
+static struct ata_port_operations ich_pata_ops = {
+ .inherits = &piix_pata_ops,
+ .cable_detect = ich_pata_cable_detect,
+ .set_dmamode = ich_set_dmamode,
+};
+
+static struct device_attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ NULL
+};
+
+static struct scsi_host_template piix_sidpr_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .shost_attrs = piix_sidpr_shost_attrs,
+};
+
+static struct ata_port_operations piix_sidpr_sata_ops = {
+ .inherits = &piix_sata_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = piix_sidpr_scr_read,
+ .scr_write = piix_sidpr_scr_write,
+ .set_lpm = piix_sidpr_set_lpm,
+};
+
+static struct ata_port_info piix_port_info[] = {
+ [piix_pata_mwdma] = /* PIIX3 MWDMA only */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .port_ops = &piix_pata_ops,
+ },
+
+ [piix_pata_33] = /* PIIX4 at 33MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &piix_pata_ops,
+ },
+
+ [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_66] = /* ICH controllers up to 66MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_100] =
+ {
+ .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_100_nomwdma1] =
+ {
+ .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2_ONLY,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich5_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6m_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_2port_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [tolapai_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8m_apple_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [piix_pata_vmw] =
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &piix_vmw_ops,
+ },
+
+ /*
+ * some Sandybridge chipsets have broken 32 mode up to now,
+ * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
+ */
+ [ich8_sata_snb] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+};
+
#define AHCI_PCI_BAR 5
#define AHCI_GLOBAL_CTL 0x04
#define AHCI_ENABLE (1 << 31)
@@ -1585,12 +1551,31 @@ static void piix_ignore_devices_quirk(struct ata_host *host)
},
{ } /* terminate list */
};
- const struct dmi_system_id *dmi = dmi_first_match(ignore_hyperv);
+ static const struct dmi_system_id allow_virtual_pc[] = {
+ {
+ /* In MS Virtual PC guests the DMI ident is nearly
+ * identical to a Hyper-V guest. One difference is the
+ * product version which is used here to identify
+ * a Virtual PC guest. This entry allows ata_piix to
+ * drive the emulated hardware.
+ */
+ .ident = "MS Virtual PC 2007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ },
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv);
+ const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc);
- if (dmi && prefer_ms_hyperv) {
+ if (ignore && !allow && prefer_ms_hyperv) {
host->flags |= ATA_HOST_IGNORE_ATA;
dev_info(host->dev, "%s detected, ATA device ignore set\n",
- dmi->ident);
+ ignore->ident);
}
#endif
}
@@ -1727,6 +1712,17 @@ static void piix_remove_one(struct pci_dev *pdev)
ata_pci_remove_one(pdev);
}
+static struct pci_driver piix_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = piix_pci_tbl,
+ .probe = piix_init_one,
+ .remove = piix_remove_one,
+#ifdef CONFIG_PM
+ .suspend = piix_pci_device_suspend,
+ .resume = piix_pci_device_resume,
+#endif
+};
+
static int __init piix_init(void)
{
int rc;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 4201e535a8c..320712a7b9e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1384,7 +1384,7 @@ int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
if (rc == -EIO) {
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
if (irq_sts & PORT_IRQ_BAD_PMP) {
- ata_link_printk(link, KERN_WARNING,
+ ata_link_warn(link,
"applying PMP SRST workaround "
"and retrying\n");
rc = ahci_do_softreset(link, class, 0, deadline,
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 5b0ba3f20ed..ef01ac07502 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -76,6 +76,9 @@ acpi_handle ata_dev_acpi_handle(struct ata_device *dev)
acpi_integer adr;
struct ata_port *ap = dev->link->ap;
+ if (dev->flags & ATA_DFLAG_ACPI_DISABLED)
+ return NULL;
+
if (ap->flags & ATA_FLAG_ACPI_SATA) {
if (!sata_pmp_attached(ap))
adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
@@ -945,6 +948,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
return rc;
}
+ dev->flags |= ATA_DFLAG_ACPI_DISABLED;
ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
/* We can safely continue if no _GTF command has been executed
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f46fbd3bd3f..9e8b99af400 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -67,6 +67,7 @@
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
#include "libata.h"
#include "libata-transport.h"
@@ -2560,6 +2561,7 @@ int ata_bus_probe(struct ata_port *ap)
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = 0xff;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
@@ -6286,8 +6288,7 @@ void ata_host_detach(struct ata_host *host)
*/
void ata_pci_remove_one(struct pci_dev *pdev)
{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
ata_host_detach(host);
}
@@ -6356,7 +6357,7 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc = 0;
rc = ata_host_suspend(host, mesg);
@@ -6370,7 +6371,7 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
int ata_pci_device_resume(struct pci_dev *pdev)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
@@ -6382,6 +6383,26 @@ int ata_pci_device_resume(struct pci_dev *pdev)
#endif /* CONFIG_PCI */
+/**
+ * ata_platform_remove_one - Platform layer callback for device removal
+ * @pdev: Platform device that was removed
+ *
+ * Platform layer indicates to libata via this hook that hot-unplug or
+ * module unload event has occurred. Detach all ports. Resource
+ * release is handled via devres.
+ *
+ * LOCKING:
+ * Inherited from platform layer (may sleep).
+ */
+int ata_platform_remove_one(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+
static int __init ata_parse_force_one(char **cur,
struct ata_force_ent *force_ent,
const char **reason)
@@ -6877,6 +6898,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_resume);
#endif /* CONFIG_PM */
#endif /* CONFIG_PCI */
+EXPORT_SYMBOL_GPL(ata_platform_remove_one);
+
EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index e60437cd0d1..bf039b0e97b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2657,6 +2657,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = 0xff;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a6df6a351d6..7c337e754da 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
- if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ if (atadev && ap->ops->sw_activity_show &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY))
return ap->ops->sw_activity_show(atadev, buf);
return -EINVAL;
}
@@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
enum sw_activity val;
int rc;
- if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ if (atadev && ap->ops->sw_activity_store &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
val = simple_strtoul(buf, NULL, 0);
switch (val) {
case OFF: case BLINK_ON: case BLINK_OFF:
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 371fd2c698b..9764e80f932 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -674,13 +674,16 @@ void arasan_cf_error_handler(struct ata_port *ap)
static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
{
+ struct ata_queued_cmd *qc = acdev->qc;
+ struct ata_port *ap = qc->ap;
+ struct ata_taskfile *tf = &qc->tf;
u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
- u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+ u32 write = tf->flags & ATA_TFLAG_WRITE;
xfer_ctr |= write ? XFER_WRITE : XFER_READ;
writel(xfer_ctr, acdev->vbase + XFER_CTR);
- acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
+ ap->ops->sff_exec_command(ap, tf);
ata_sff_queue_work(&acdev->work);
}
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 7ba01415b67..2949cfc2dd3 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -474,14 +474,14 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* check for enabled ports */
pci_read_config_byte(pdev, CNTRL, &reg);
if (!port_ok)
- dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
+ dev_notice(&pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
+ dev_notice(&pdev->dev, "Primary port is disabled\n");
ppi[0] = &ata_dummy_port_info;
}
if (port_ok && !(reg & CNTRL_CH1)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
+ dev_notice(&pdev->dev, "Secondary port is disabled\n");
ppi[1] = &ata_dummy_port_info;
}
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index dec1b6c4b35..0448860a207 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -38,6 +38,7 @@
#include <linux/delay.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86_32
#include <asm/msr.h>
@@ -80,6 +81,21 @@ enum {
IDE_ETC_UDMA_MASK = 0xc0,
};
+/* Some Bachmann OT200 devices have a non working UDMA support due a
+ * missing resistor.
+ */
+static const struct dmi_system_id udma_quirk_dmi_table[] = {
+ {
+ .ident = "Bachmann electronic OT200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Bachmann electronic"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OT200"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1")
+ },
+ },
+ { }
+};
+
static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
@@ -242,9 +258,23 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &cs5536_port_ops,
};
- const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+ static const struct ata_port_info no_udma_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4,
+ .port_ops = &cs5536_port_ops,
+ };
+
+
+ const struct ata_port_info *ppi[2];
u32 cfg;
+ if (dmi_check_system(udma_quirk_dmi_table))
+ ppi[0] = &no_udma_info;
+ else
+ ppi[0] = &info;
+
+ ppi[1] = &ata_dummy_port_info;
+
if (use_msr)
printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index e056406d6a1..3982cef91f3 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -822,8 +822,7 @@ static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes,
rc = ep93xx_pata_bus_softreset(ap, devmask, deadline);
/* if link is ocuppied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(al))) {
- ata_link_printk(al, KERN_ERR, "SRST failed (errno=%d)\n",
- rc);
+ ata_link_err(al, "SRST failed (errno=%d)\n", rc);
return rc;
}
@@ -857,8 +856,7 @@ static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
/* Can become DEBUG later */
if (count)
- ata_port_printk(ap, KERN_DEBUG,
- "drained %d bytes to clear DRQ.\n", count);
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count);
}
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 87bb05b3caf..7d40b526ef1 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -60,7 +60,7 @@ static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index badb1789a91..0566e67b5e1 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -187,22 +187,13 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
}
-static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
-{
- struct ata_host *host = platform_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ixp4xx_pata_probe,
- .remove = __devexit_p(ixp4xx_pata_remove),
+ .remove = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index d2c102fd433..ec67f54dc56 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -663,18 +663,6 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
&mpc52xx_ata_sht);
}
-static struct mpc52xx_ata_priv *
-mpc52xx_ata_remove_one(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
- struct mpc52xx_ata_priv *priv = host->private_data;
-
- ata_host_detach(host);
-
- return priv;
-}
-
-
/* ======================================================================== */
/* OF Platform driver */
/* ======================================================================== */
@@ -815,11 +803,12 @@ mpc52xx_ata_probe(struct platform_device *op)
static int
mpc52xx_ata_remove(struct platform_device *op)
{
- struct mpc52xx_ata_priv *priv;
+ struct ata_host *host = platform_get_drvdata(op);
+ struct mpc52xx_ata_priv *priv = host->private_data;
int task_irq;
/* Deregister the ATA interface */
- priv = mpc52xx_ata_remove_one(&op->dev);
+ ata_platform_remove_one(op);
/* Clean up DMA */
task_irq = bcom_get_task_irq(priv->dmatsk);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 1d61d5d278f..4e1194b4c27 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -5,19 +5,22 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2005 - 2009 Cavium Networks
+ * Copyright (C) 2005 - 2012 Cavium Inc.
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libata.h>
-#include <linux/irq.h>
+#include <linux/hrtimer.h>
#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
+#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
/*
@@ -34,20 +37,36 @@
*/
#define DRV_NAME "pata_octeon_cf"
-#define DRV_VERSION "2.1"
+#define DRV_VERSION "2.2"
+
+/* Poll interval in nS. */
+#define OCTEON_CF_BUSY_POLL_INTERVAL 500000
+#define DMA_CFG 0
+#define DMA_TIM 0x20
+#define DMA_INT 0x38
+#define DMA_INT_EN 0x50
struct octeon_cf_port {
- struct workqueue_struct *wq;
- struct delayed_work delayed_finish;
+ struct hrtimer delayed_finish;
struct ata_port *ap;
int dma_finished;
+ void *c0;
+ unsigned int cs0;
+ unsigned int cs1;
+ bool is_true_ide;
+ u64 dma_base;
};
static struct scsi_host_template octeon_cf_sht = {
ATA_PIO_SHT(DRV_NAME),
};
+static int enable_dma;
+module_param(enable_dma, int, 0444);
+MODULE_PARM_DESC(enable_dma,
+ "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
+
/**
* Convert nanosecond based time to setting used in the
* boot bus timing register, based on timing multiple
@@ -66,12 +85,29 @@ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
return val;
}
-static void octeon_cf_set_boot_reg_cfg(int cs)
+static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
{
union cvmx_mio_boot_reg_cfgx reg_cfg;
+ unsigned int tim_mult;
+
+ switch (multiplier) {
+ case 8:
+ tim_mult = 3;
+ break;
+ case 4:
+ tim_mult = 0;
+ break;
+ case 2:
+ tim_mult = 2;
+ break;
+ default:
+ tim_mult = 1;
+ break;
+ }
+
reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
- reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */
+ reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */
reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
reg_cfg.s.sam = 0; /* Don't combine write and output enable */
reg_cfg.s.we_ext = 0; /* No write enable extension */
@@ -92,12 +128,12 @@ static void octeon_cf_set_boot_reg_cfg(int cs)
*/
static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
{
- struct octeon_cf_data *ocd = ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_reg_timx reg_tim;
- int cs = ocd->base_region;
int T;
struct ata_timing timing;
+ unsigned int div;
int use_iordy;
int trh;
int pause;
@@ -106,7 +142,15 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
int t2;
int t2i;
- T = (int)(2000000000000LL / octeon_get_clock_rate());
+ /*
+ * A divisor value of four will overflow the timing fields at
+ * clock rates greater than 800MHz
+ */
+ if (octeon_get_io_clock_rate() <= 800000000)
+ div = 4;
+ else
+ div = 8;
+ T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
BUG();
@@ -121,23 +165,26 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
if (t2i)
t2i--;
- trh = ns_to_tim_reg(2, 20);
+ trh = ns_to_tim_reg(div, 20);
if (trh)
trh--;
- pause = timing.cycle - timing.active - timing.setup - trh;
+ pause = (int)timing.cycle - (int)timing.active -
+ (int)timing.setup - trh;
+ if (pause < 0)
+ pause = 0;
if (pause)
pause--;
- octeon_cf_set_boot_reg_cfg(cs);
- if (ocd->dma_engine >= 0)
+ octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
+ if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
- octeon_cf_set_boot_reg_cfg(cs + 1);
+ octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
use_iordy = ata_pio_need_iordy(dev);
- reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
+ reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
/* Disable page mode */
reg_tim.s.pagem = 0;
/* Enable dynamic timing */
@@ -161,20 +208,22 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
/* How long read enable is asserted */
reg_tim.s.oe = t2;
/* Time after CE that read/write starts */
- reg_tim.s.ce = ns_to_tim_reg(2, 5);
+ reg_tim.s.ce = ns_to_tim_reg(div, 5);
/* Time before CE that address is valid */
reg_tim.s.adr = 0;
/* Program the bootbus region timing for the data port chip select. */
- cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
- if (ocd->dma_engine >= 0)
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
+ if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
- cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
+ reg_tim.u64);
}
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
{
- struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
+ union cvmx_mio_boot_pin_defs pin_defs;
union cvmx_mio_boot_dma_timx dma_tim;
unsigned int oe_a;
unsigned int oe_n;
@@ -183,6 +232,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
unsigned int pause;
unsigned int T0, Tkr, Td;
unsigned int tim_mult;
+ int c;
const struct ata_timing *timing;
@@ -199,13 +249,19 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
/* not spec'ed, value in eclocks, not affected by tim_mult */
dma_arq = 8;
pause = 25 - dma_arq * 1000 /
- (octeon_get_clock_rate() / 1000000); /* Tz */
+ (octeon_get_io_clock_rate() / 1000000); /* Tz */
oe_a = Td;
/* Tkr from cf spec, lengthened to meet T0 */
oe_n = max(T0 - oe_a, Tkr);
- dma_tim.s.dmack_pi = 1;
+ pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
+
+ /* DMA channel number. */
+ c = (cf_port->dma_base & 8) >> 3;
+
+ /* Invert the polarity if the default is 0*/
+ dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
@@ -228,14 +284,11 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
ns_to_tim_reg(tim_mult, 60));
- pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
- "%d, dmarq: %d, pause: %d\n",
+ pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
- dma_tim.u64);
-
+ cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
}
/**
@@ -489,15 +542,10 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
ata_wait_idle(ap);
}
-static void octeon_cf_irq_on(struct ata_port *ap)
+static void octeon_cf_ata_port_noaction(struct ata_port *ap)
{
}
-static void octeon_cf_irq_clear(struct ata_port *ap)
-{
- return;
-}
-
static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -519,7 +567,7 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
*/
static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
{
- struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = qc->ap->private_data;
union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
union cvmx_mio_boot_dma_intx mio_boot_dma_int;
struct scatterlist *sg;
@@ -535,15 +583,16 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
*/
mio_boot_dma_int.u64 = 0;
mio_boot_dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
- mio_boot_dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
/* Enable the interrupt. */
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
- mio_boot_dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
/* Set the direction of the DMA */
mio_boot_dma_cfg.u64 = 0;
+#ifdef __LITTLE_ENDIAN
+ mio_boot_dma_cfg.s.endian = 1;
+#endif
mio_boot_dma_cfg.s.en = 1;
mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
@@ -569,8 +618,7 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
(mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
(void *)(unsigned long)mio_boot_dma_cfg.s.adr);
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
- mio_boot_dma_cfg.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
/**
@@ -583,10 +631,9 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
- struct octeon_cf_data *ocd = ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_dma_cfgx dma_cfg;
union cvmx_mio_boot_dma_intx dma_int;
- struct octeon_cf_port *cf_port;
u8 status;
VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -596,9 +643,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
if (ap->hsm_task_state != HSM_ST_LAST)
return 0;
- cf_port = ap->private_data;
-
- dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+ dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
if (dma_cfg.s.size != 0xfffff) {
/* Error, the transfer was not complete. */
qc->err_mask |= AC_ERR_HOST_BUS;
@@ -608,15 +653,15 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
/* Stop and clear the dma engine. */
dma_cfg.u64 = 0;
dma_cfg.s.size = -1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
/* Disable the interrupt. */
dma_int.u64 = 0;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
/* Clear the DMA complete status */
dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
status = ap->ops->sff_check_status(ap);
@@ -649,69 +694,68 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
struct ata_queued_cmd *qc;
union cvmx_mio_boot_dma_intx dma_int;
union cvmx_mio_boot_dma_cfgx dma_cfg;
- struct octeon_cf_data *ocd;
ap = host->ports[i];
- ocd = ap->dev->platform_data;
cf_port = ap->private_data;
- dma_int.u64 =
- cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
- dma_cfg.u64 =
- cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+
+ dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
+ dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
- if (dma_int.s.done && !dma_cfg.s.en) {
- if (!sg_is_last(qc->cursg)) {
- qc->cursg = sg_next(qc->cursg);
- handled = 1;
- octeon_cf_dma_start(qc);
- continue;
- } else {
- cf_port->dma_finished = 1;
- }
- }
- if (!cf_port->dma_finished)
- continue;
- status = ioread8(ap->ioaddr.altstatus_addr);
- if (status & (ATA_BUSY | ATA_DRQ)) {
- /*
- * We are busy, try to handle it
- * later. This is the DMA finished
- * interrupt, and it could take a
- * little while for the card to be
- * ready for more commands.
- */
- /* Clear DMA irq. */
- dma_int.u64 = 0;
- dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
- dma_int.u64);
-
- queue_delayed_work(cf_port->wq,
- &cf_port->delayed_finish, 1);
+ if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
+ continue;
+
+ if (dma_int.s.done && !dma_cfg.s.en) {
+ if (!sg_is_last(qc->cursg)) {
+ qc->cursg = sg_next(qc->cursg);
handled = 1;
+ octeon_cf_dma_start(qc);
+ continue;
} else {
- handled |= octeon_cf_dma_finished(ap, qc);
+ cf_port->dma_finished = 1;
}
}
+ if (!cf_port->dma_finished)
+ continue;
+ status = ioread8(ap->ioaddr.altstatus_addr);
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ /*
+ * We are busy, try to handle it later. This
+ * is the DMA finished interrupt, and it could
+ * take a little while for the card to be
+ * ready for more commands.
+ */
+ /* Clear DMA irq. */
+ dma_int.u64 = 0;
+ dma_int.s.done = 1;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT,
+ dma_int.u64);
+ hrtimer_start_range_ns(&cf_port->delayed_finish,
+ ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
+ OCTEON_CF_BUSY_POLL_INTERVAL / 5,
+ HRTIMER_MODE_REL);
+ handled = 1;
+ } else {
+ handled |= octeon_cf_dma_finished(ap, qc);
+ }
}
spin_unlock_irqrestore(&host->lock, flags);
DPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
-static void octeon_cf_delayed_finish(struct work_struct *work)
+static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
{
- struct octeon_cf_port *cf_port = container_of(work,
+ struct octeon_cf_port *cf_port = container_of(hrt,
struct octeon_cf_port,
- delayed_finish.work);
+ delayed_finish);
struct ata_port *ap = cf_port->ap;
struct ata_host *host = ap->host;
struct ata_queued_cmd *qc;
unsigned long flags;
u8 status;
+ enum hrtimer_restart rv = HRTIMER_NORESTART;
spin_lock_irqsave(&host->lock, flags);
@@ -726,15 +770,17 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
status = ioread8(ap->ioaddr.altstatus_addr);
if (status & (ATA_BUSY | ATA_DRQ)) {
/* Still busy, try again. */
- queue_delayed_work(cf_port->wq,
- &cf_port->delayed_finish, 1);
+ hrtimer_forward_now(hrt,
+ ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
+ rv = HRTIMER_RESTART;
goto out;
}
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
octeon_cf_dma_finished(ap, qc);
out:
spin_unlock_irqrestore(&host->lock, flags);
+ return rv;
}
static void octeon_cf_dev_config(struct ata_device *dev)
@@ -786,8 +832,8 @@ static struct ata_port_operations octeon_cf_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = octeon_cf_qc_issue,
.sff_dev_select = octeon_cf_dev_select,
- .sff_irq_on = octeon_cf_irq_on,
- .sff_irq_clear = octeon_cf_irq_clear,
+ .sff_irq_on = octeon_cf_ata_port_noaction,
+ .sff_irq_clear = octeon_cf_ata_port_noaction,
.cable_detect = ata_cable_40wire,
.set_piomode = octeon_cf_set_piomode,
.set_dmamode = octeon_cf_set_dmamode,
@@ -798,46 +844,113 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
{
struct resource *res_cs0, *res_cs1;
+ bool is_16bit;
+ const __be32 *cs_num;
+ struct property *reg_prop;
+ int n_addr, n_size, reg_len;
+ struct device_node *node;
+ const void *prop;
void __iomem *cs0;
void __iomem *cs1 = NULL;
struct ata_host *host;
struct ata_port *ap;
- struct octeon_cf_data *ocd;
int irq = 0;
irq_handler_t irq_handler = NULL;
void __iomem *base;
struct octeon_cf_port *cf_port;
- char version[32];
+ int rv = -ENOMEM;
- res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_cs0)
+ node = pdev->dev.of_node;
+ if (node == NULL)
return -EINVAL;
- ocd = pdev->dev.platform_data;
+ cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
+ if (!cf_port)
+ return -ENOMEM;
- cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
- resource_size(res_cs0));
+ cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
- if (!cs0)
- return -ENOMEM;
+ prop = of_get_property(node, "cavium,bus-width", NULL);
+ if (prop)
+ is_16bit = (be32_to_cpup(prop) == 16);
+ else
+ is_16bit = false;
- /* Determine from availability of DMA if True IDE mode or not */
- if (ocd->dma_engine >= 0) {
- res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_cs1)
- return -EINVAL;
+ n_addr = of_n_addr_cells(node);
+ n_size = of_n_size_cells(node);
+ reg_prop = of_find_property(node, "reg", &reg_len);
+ if (!reg_prop || reg_len < sizeof(__be32)) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cs_num = reg_prop->value;
+ cf_port->cs0 = be32_to_cpup(cs_num);
+
+ if (cf_port->is_true_ide) {
+ struct device_node *dma_node;
+ dma_node = of_parse_phandle(node,
+ "cavium,dma-engine-handle", 0);
+ if (dma_node) {
+ struct platform_device *dma_dev;
+ dma_dev = of_find_device_by_node(dma_node);
+ if (dma_dev) {
+ struct resource *res_dma;
+ int i;
+ res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
+ if (!res_dma) {
+ of_node_put(dma_node);
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+ resource_size(res_dma));
+
+ if (!cf_port->dma_base) {
+ of_node_put(dma_node);
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+
+ irq_handler = octeon_cf_interrupt;
+ i = platform_get_irq(dma_dev, 0);
+ if (i > 0)
+ irq = i;
+ }
+ of_node_put(dma_node);
+ }
+ res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_cs1) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
- resource_size(res_cs1));
+ res_cs1->end - res_cs1->start + 1);
if (!cs1)
- return -ENOMEM;
+ goto free_cf_port;
+
+ if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cs_num += n_addr + n_size;
+ cf_port->cs1 = be32_to_cpup(cs_num);
}
- cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
- if (!cf_port)
- return -ENOMEM;
+ res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res_cs0) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+
+ cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+ resource_size(res_cs0));
+
+ if (!cs0)
+ goto free_cf_port;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
@@ -846,21 +959,22 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->private_data = cf_port;
+ pdev->dev.platform_data = cf_port;
cf_port->ap = ap;
ap->ops = &octeon_cf_ops;
ap->pio_mask = ATA_PIO6;
ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
- base = cs0 + ocd->base_region_bias;
- if (!ocd->is16bit) {
+ if (!is_16bit) {
+ base = cs0 + 0x800;
ap->ioaddr.cmd_addr = base;
ata_sff_std_ports(&ap->ioaddr);
ap->ioaddr.altstatus_addr = base + 0xe;
ap->ioaddr.ctl_addr = base + 0xe;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
- } else if (cs1) {
- /* Presence of cs1 indicates True IDE mode. */
+ } else if (cf_port->is_true_ide) {
+ base = cs0;
ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
@@ -876,19 +990,15 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
- ap->mwdma_mask = ATA_MWDMA4;
- irq = platform_get_irq(pdev, 0);
- irq_handler = octeon_cf_interrupt;
-
- /* True IDE mode needs delayed work to poll for not-busy. */
- cf_port->wq = create_singlethread_workqueue(DRV_NAME);
- if (!cf_port->wq)
- goto free_cf_port;
- INIT_DELAYED_WORK(&cf_port->delayed_finish,
- octeon_cf_delayed_finish);
+ ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
+ /* True IDE mode needs a timer to poll for not-busy. */
+ hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ cf_port->delayed_finish.function = octeon_cf_delayed_finish;
} else {
/* 16 bit but not True IDE */
+ base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
octeon_cf_ops.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
@@ -902,28 +1012,71 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap->ioaddr.ctl_addr = base + 0xe;
ap->ioaddr.altstatus_addr = base + 0xe;
}
+ cf_port->c0 = ap->ioaddr.ctl_addr;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
- snprintf(version, sizeof(version), "%s %d bit%s",
- DRV_VERSION,
- (ocd->is16bit) ? 16 : 8,
- (cs1) ? ", True IDE" : "");
- ata_print_version_once(&pdev->dev, version);
+ dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
+ is_16bit ? 16 : 8,
+ cf_port->is_true_ide ? ", True IDE" : "");
- return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
+ return ata_host_activate(host, irq, irq_handler,
+ IRQF_SHARED, &octeon_cf_sht);
free_cf_port:
kfree(cf_port);
- return -ENOMEM;
+ return rv;
+}
+
+static void octeon_cf_shutdown(struct device *dev)
+{
+ union cvmx_mio_boot_dma_cfgx dma_cfg;
+ union cvmx_mio_boot_dma_intx dma_int;
+
+ struct octeon_cf_port *cf_port = dev->platform_data;
+
+ if (cf_port->dma_base) {
+ /* Stop and clear the dma engine. */
+ dma_cfg.u64 = 0;
+ dma_cfg.s.size = -1;
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
+
+ /* Disable the interrupt. */
+ dma_int.u64 = 0;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
+
+ /* Clear the DMA complete status */
+ dma_int.s.done = 1;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
+
+ __raw_writeb(0, cf_port->c0);
+ udelay(20);
+ __raw_writeb(ATA_SRST, cf_port->c0);
+ udelay(20);
+ __raw_writeb(0, cf_port->c0);
+ mdelay(100);
+ }
}
+static struct of_device_id octeon_cf_match[] = {
+ {
+ .compatible = "cavium,ebt3000-compact-flash",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+
static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = octeon_cf_match,
+ .shutdown = octeon_cf_shutdown
},
};
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1654dc27e7f..e5b234c370f 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -14,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
+#include <linux/libata.h>
static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
{
@@ -76,11 +77,6 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
reg_shift, pio_mask);
}
-static int __devexit pata_of_platform_remove(struct platform_device *ofdev)
-{
- return __pata_platform_remove(&ofdev->dev);
-}
-
static struct of_device_id pata_of_platform_match[] = {
{ .compatible = "ata-generic", },
{ .compatible = "electra-ide", },
@@ -95,7 +91,7 @@ static struct platform_driver pata_of_platform_driver = {
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
- .remove = __devexit_p(pata_of_platform_remove),
+ .remove = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 5ff31b68135..f9f79fc04a8 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -111,9 +111,7 @@ err1:
static __devexit int palmld_pata_remove(struct platform_device *dev)
{
- struct ata_host *host = platform_get_drvdata(dev);
-
- ata_host_detach(host);
+ ata_platform_remove_one(dev);
/* power down the HDD */
gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index f1848aeda78..f4372d0c7ce 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -178,23 +178,6 @@ int __devinit __pata_platform_probe(struct device *dev,
}
EXPORT_SYMBOL_GPL(__pata_platform_probe);
-/**
- * __pata_platform_remove - unplug a platform interface
- * @dev: device
- *
- * A platform bus ATA device has been unplugged. Perform the needed
- * cleanup. Also called on module unload for any active devices.
- */
-int __pata_platform_remove(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__pata_platform_remove);
-
static int __devinit pata_platform_probe(struct platform_device *pdev)
{
struct resource *io_res;
@@ -242,14 +225,9 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
pio_mask);
}
-static int __devexit pata_platform_remove(struct platform_device *pdev)
-{
- return __pata_platform_remove(&pdev->dev);
-}
-
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
- .remove = __devexit_p(pata_platform_remove),
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 32a3499e83e..e71f998dd90 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -327,7 +327,6 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
- unsigned long port_flags;
struct ata_host *host;
struct rdc_host_priv *hpriv;
int rc;
@@ -337,8 +336,6 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
- port_flags = port_info[0].flags;
-
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 937aeb34b31..2e391730e8b 100755..100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -43,6 +43,7 @@
/* These two are defined in "libata.h" */
#undef DRV_NAME
#undef DRV_VERSION
+
#define DRV_NAME "sata-dwc"
#define DRV_VERSION "1.3"
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 400bf1c3e98..dc7d78eecb1 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -213,7 +213,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = 0x80;
+ tf.command = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
do {
@@ -368,16 +368,6 @@ err0:
return rc;
}
-static int __devexit ahci_highbank_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int ahci_highbank_suspend(struct device *dev)
{
@@ -432,7 +422,7 @@ SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
- .remove = __devexit_p(ahci_highbank_remove),
+ .remove = ata_platform_remove_one,
.driver = {
.name = "highbank-ahci",
.owner = THIS_MODULE,
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index dc35f4d42b8..1e6827c8942 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -273,12 +273,10 @@ static void inic_reset_port(void __iomem *port_base)
static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
{
void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
- void __iomem *addr;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
return -EINVAL;
- addr = scr_addr + scr_map[sc_reg] * 4;
*val = readl(scr_addr + scr_map[sc_reg] * 4);
/* this controller has stuck DIAG.N, ignore it */
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 489c8176832..fb0dd87f889 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -147,6 +147,10 @@ struct pdc_port_priv {
dma_addr_t pkt_dma;
};
+struct pdc_host_priv {
+ spinlock_t hard_reset_lock;
+};
+
static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
+ struct pdc_host_priv *hpriv = ap->host->private_data;
u8 tmp;
- spin_lock(&ap->host->lock);
+ spin_lock(&hpriv->hard_reset_lock);
tmp = readb(pcictl_b1_mmio);
tmp &= ~(0x10 << ata_no);
@@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
- spin_unlock(&ap->host->lock);
+ spin_unlock(&hpriv->hard_reset_lock);
}
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
@@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
+ struct pdc_host_priv *hpriv;
void __iomem *host_mmio;
int n_ports, i, rc;
int is_sataii_tx4;
@@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
+ hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ spin_lock_init(&hpriv->hard_reset_lock);
+ host->private_data = hpriv;
host->iomap = pcim_iomap_table(pdev);
is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index a5f2a563a26..59f0d630d63 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -506,8 +506,6 @@ static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
- void __iomem *addr;
- addr = scr_addr + sil24_scr_map[sc_reg] * 4;
*val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
@@ -519,8 +517,6 @@ static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
- void __iomem *addr;
- addr = scr_addr + sil24_scr_map[sc_reg] * 4;
writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 12260559316..7b7127a58f5 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -315,9 +315,8 @@ static int pdc_port_start(struct ata_port *ap)
return 0;
}
-static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
- unsigned int total_len)
+static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
+ unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
@@ -337,9 +336,8 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
buf32[dw], buf32[dw + 1]);
}
-static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
- unsigned int total_len)
+static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
+ unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
@@ -486,10 +484,10 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
/*
* Build ATA, host DMA packets
*/
- pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
- pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
if (qc->tf.flags & ATA_TFLAG_LBA48)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c909b7b7d5f..d47db401027 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -42,7 +42,8 @@
#include <linux/swab.h>
#include <linux/slab.h>
-#define VERSION "0.07"
+#define VERSION "1.04"
+#define DRIVER_VERSION 0x01
#define PTAG "solos-pci"
#define CONFIG_RAM_SIZE 128
@@ -56,16 +57,21 @@
#define FLASH_BUSY 0x60
#define FPGA_MODE 0x5C
#define FLASH_MODE 0x58
+#define GPIO_STATUS 0x54
+#define DRIVER_VER 0x50
#define TX_DMA_ADDR(port) (0x40 + (4 * (port)))
#define RX_DMA_ADDR(port) (0x30 + (4 * (port)))
#define DATA_RAM_SIZE 32768
#define BUF_SIZE 2048
#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/
-#define FPGA_PAGE 528 /* FPGA flash page size*/
-#define SOLOS_PAGE 512 /* Solos flash page size*/
-#define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/
-#define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/
+/* Old boards use ATMEL AD45DB161D flash */
+#define ATMEL_FPGA_PAGE 528 /* FPGA flash page size*/
+#define ATMEL_SOLOS_PAGE 512 /* Solos flash page size*/
+#define ATMEL_FPGA_BLOCK (ATMEL_FPGA_PAGE * 8) /* FPGA block size*/
+#define ATMEL_SOLOS_BLOCK (ATMEL_SOLOS_PAGE * 8) /* Solos block size*/
+/* Current boards use M25P/M25PE SPI flash */
+#define SPI_FLASH_BLOCK (256 * 64)
#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2)
#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size))
@@ -122,11 +128,14 @@ struct solos_card {
struct sk_buff_head cli_queue[4];
struct sk_buff *tx_skb[4];
struct sk_buff *rx_skb[4];
+ unsigned char *dma_bounce;
wait_queue_head_t param_wq;
wait_queue_head_t fw_wq;
int using_dma;
+ int dma_alignment;
int fpga_version;
int buffer_size;
+ int atmel_flash;
};
@@ -451,7 +460,6 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
len = skb->len;
memcpy(buf, skb->data, len);
- dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
return len;
@@ -498,6 +506,78 @@ static ssize_t console_store(struct device *dev, struct device_attribute *attr,
return err?:count;
}
+struct geos_gpio_attr {
+ struct device_attribute attr;
+ int offset;
+};
+
+#define SOLOS_GPIO_ATTR(_name, _mode, _show, _store, _offset) \
+ struct geos_gpio_attr gpio_attr_##_name = { \
+ .attr = __ATTR(_name, _mode, _show, _store), \
+ .offset = _offset }
+
+static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ if (count != 1 && (count != 2 || buf[1] != '\n'))
+ return -EINVAL;
+
+ spin_lock_irq(&card->param_queue_lock);
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ if (buf[0] == '1') {
+ data32 |= 1 << gattr->offset;
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else if (buf[0] == '0') {
+ data32 &= ~(1 << gattr->offset);
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else {
+ count = -EINVAL;
+ }
+ spin_unlock_irq(&card->param_queue_lock);
+ return count;
+}
+
+static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ data32 = (data32 >> gattr->offset) & 1;
+
+ return sprintf(buf, "%d\n", data32);
+}
+
+static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ switch (gattr->offset) {
+ case 0:
+ /* HardwareVersion */
+ data32 = data32 & 0x1F;
+ break;
+ case 1:
+ /* HardwareVariant */
+ data32 = (data32 >> 5) & 0x0F;
+ break;
+ }
+ return sprintf(buf, "%d\n", data32);
+}
+
static DEVICE_ATTR(console, 0644, console_show, console_store);
@@ -506,6 +586,14 @@ static DEVICE_ATTR(console, 0644, console_show, console_store);
#include "solos-attrlist.c"
+static SOLOS_GPIO_ATTR(GPIO1, 0644, geos_gpio_show, geos_gpio_store, 9);
+static SOLOS_GPIO_ATTR(GPIO2, 0644, geos_gpio_show, geos_gpio_store, 10);
+static SOLOS_GPIO_ATTR(GPIO3, 0644, geos_gpio_show, geos_gpio_store, 11);
+static SOLOS_GPIO_ATTR(GPIO4, 0644, geos_gpio_show, geos_gpio_store, 12);
+static SOLOS_GPIO_ATTR(GPIO5, 0644, geos_gpio_show, geos_gpio_store, 13);
+static SOLOS_GPIO_ATTR(PushButton, 0444, geos_gpio_show, NULL, 14);
+static SOLOS_GPIO_ATTR(HardwareVersion, 0444, hardware_show, NULL, 0);
+static SOLOS_GPIO_ATTR(HardwareVariant, 0444, hardware_show, NULL, 1);
#undef SOLOS_ATTR_RO
#undef SOLOS_ATTR_RW
@@ -522,6 +610,23 @@ static struct attribute_group solos_attr_group = {
.name = "parameters",
};
+static struct attribute *gpio_attrs[] = {
+ &gpio_attr_GPIO1.attr.attr,
+ &gpio_attr_GPIO2.attr.attr,
+ &gpio_attr_GPIO3.attr.attr,
+ &gpio_attr_GPIO4.attr.attr,
+ &gpio_attr_GPIO5.attr.attr,
+ &gpio_attr_PushButton.attr.attr,
+ &gpio_attr_HardwareVersion.attr.attr,
+ &gpio_attr_HardwareVariant.attr.attr,
+ NULL
+};
+
+static struct attribute_group gpio_attr_group = {
+ .attrs = gpio_attrs,
+ .name = "gpio",
+};
+
static int flash_upgrade(struct solos_card *card, int chip)
{
const struct firmware *fw;
@@ -533,16 +638,25 @@ static int flash_upgrade(struct solos_card *card, int chip)
switch (chip) {
case 0:
fw_name = "solos-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 1:
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 2:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-db-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -552,7 +666,10 @@ static int flash_upgrade(struct solos_card *card, int chip)
case 3:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -568,6 +685,9 @@ static int flash_upgrade(struct solos_card *card, int chip)
dev_info(&card->dev->dev, "Flash upgrade starting\n");
+ /* New FPGAs require driver version before permitting flash upgrades */
+ iowrite32(DRIVER_VERSION, card->config_regs + DRIVER_VER);
+
numblocks = fw->size / blocksize;
dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size);
dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks);
@@ -597,9 +717,13 @@ static int flash_upgrade(struct solos_card *card, int chip)
/* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */
iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE);
- /* Copy block to buffer, swapping each 16 bits */
+ /* Copy block to buffer, swapping each 16 bits for Atmel flash */
for(i = 0; i < blocksize; i += 4) {
- uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i));
+ uint32_t word;
+ if (card->atmel_flash)
+ word = swahb32p((uint32_t *)(fw->data + offset + i));
+ else
+ word = *(uint32_t *)(fw->data + offset + i);
if(card->fpga_version > LEGACY_BUFFERS)
iowrite32(word, FLASH_BUF + i);
else
@@ -961,7 +1085,12 @@ static uint32_t fpga_tx(struct solos_card *card)
tx_started |= 1 << port;
oldskb = skb; /* We're done with this skb already */
} else if (skb && card->using_dma) {
- SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
+ unsigned char *data = skb->data;
+ if ((unsigned long)data & card->dma_alignment) {
+ data = card->dma_bounce + (BUF_SIZE * port);
+ memcpy(data, skb->data, skb->len);
+ }
+ SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
skb->len, PCI_DMA_TODEVICE);
card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
@@ -1133,18 +1262,33 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
db_fpga_upgrade = db_firmware_upgrade = 0;
}
+ /* Stopped using Atmel flash after 0.03-38 */
+ if (fpga_ver < 39)
+ card->atmel_flash = 1;
+ else
+ card->atmel_flash = 0;
+
+ data32 = ioread32(card->config_regs + PORTS);
+ card->nr_ports = (data32 & 0x000000FF);
+
if (card->fpga_version >= DMA_SUPPORTED) {
pci_set_master(dev);
card->using_dma = 1;
+ if (1) { /* All known FPGA versions so far */
+ card->dma_alignment = 3;
+ card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL);
+ if (!card->dma_bounce) {
+ dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n");
+ /* Fallback to MMIO doesn't work */
+ goto out_unmap_both;
+ }
+ }
} else {
card->using_dma = 0;
/* Set RX empty flag for all ports */
iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
}
- data32 = ioread32(card->config_regs + PORTS);
- card->nr_ports = (data32 & 0x000000FF);
-
pci_set_drvdata(dev, card);
tasklet_init(&card->tlet, solos_bh, (unsigned long)card);
@@ -1179,6 +1323,10 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto out_free_irq;
+ if (card->fpga_version >= DMA_SUPPORTED &&
+ sysfs_create_group(&card->dev->dev.kobj, &gpio_attr_group))
+ dev_err(&card->dev->dev, "Could not register parameter group for GPIOs\n");
+
return 0;
out_free_irq:
@@ -1187,6 +1335,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
tasklet_kill(&card->tlet);
out_unmap_both:
+ kfree(card->dma_bounce);
pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
@@ -1289,11 +1438,16 @@ static void fpga_remove(struct pci_dev *dev)
iowrite32(1, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
+ if (card->fpga_version >= DMA_SUPPORTED)
+ sysfs_remove_group(&card->dev->dev.kobj, &gpio_attr_group);
+
atm_remove(card);
free_irq(dev->irq, card);
tasklet_kill(&card->tlet);
+ kfree(card->dma_bounce);
+
/* Release device from reset */
iowrite32(0, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 417913974df..a235085e343 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -171,6 +171,27 @@ ssize_t device_show_int(struct device *dev,
}
EXPORT_SYMBOL_GPL(device_show_int);
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ if (strtobool(buf, ea->var) < 0)
+ return -EINVAL;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_bool);
+
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_bool);
+
/**
* device_release - free device structure.
* @kobj: device's kobject.
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 147d1a4dd26..17cf7cad601 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -148,7 +148,7 @@ static int dev_mkdir(const char *name, umode_t mode)
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, 1);
+ dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 460e22dee36..a3f79c495a4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -298,6 +298,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
enum dma_data_direction direction)
{
+ might_sleep();
+
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3fbedc75e7c..0ce39a33b3c 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -218,6 +218,8 @@ void dmam_release_declared_memory(struct device *dev)
}
EXPORT_SYMBOL(dmam_release_declared_memory);
+#endif
+
/*
* Create scatter-list for the already allocated DMA buffer.
*/
@@ -236,8 +238,6 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
}
EXPORT_SYMBOL(dma_common_get_sgtable);
-#endif
-
/*
* Create userspace mapping for the DMA-coherent memory.
*/
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 294e3162621..fac124a7e1c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -227,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
static inline bool hugetlb_register_node(struct node *node)
{
if (__hugetlb_register_node &&
- node_state(node->dev.id, N_HIGH_MEMORY)) {
+ node_state(node->dev.id, N_MEMORY)) {
__hugetlb_register_node(node);
return true;
}
@@ -644,6 +644,9 @@ static struct node_attr node_state_attr[] = {
#ifdef CONFIG_HIGHMEM
[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
#endif
+#ifdef CONFIG_MOVABLE_NODE
+ [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
+#endif
[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
};
@@ -654,6 +657,9 @@ static struct attribute *node_state_attrs[] = {
#ifdef CONFIG_HIGHMEM
&node_state_attr[N_HIGH_MEMORY].attr.attr,
#endif
+#ifdef CONFIG_MOVABLE_NODE
+ &node_state_attr[N_MEMORY].attr.attr,
+#endif
&node_state_attr[N_CPU].attr.attr,
NULL
};
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index a533af21836..d7b56a88c9f 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -65,6 +65,15 @@ config BCMA_DRIVER_GMAC_CMN
If unsure, say N
+config BCMA_DRIVER_GPIO
+ bool "BCMA GPIO driver"
+ depends on BCMA
+ select GPIOLIB
+ help
+ Driver to provide access to the GPIO pins of the bcma bus.
+
+ If unsure, say N
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 8ad42d41b2f..734b32f09c0 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -6,6 +6,7 @@ bcma-y += driver_pci.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
+bcma-$(CONFIG_BCMA_DRIVER_GPIO) += driver_gpio.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 537ae53231c..4a2d72ec6d4 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -91,4 +91,14 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+#ifdef CONFIG_BCMA_DRIVER_GPIO
+/* driver_gpio.c */
+int bcma_gpio_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+ return -ENOTSUPP;
+}
+#endif /* CONFIG_BCMA_DRIVER_GPIO */
+
#endif
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index dc96dd8ebff..e461ad25fda 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -114,6 +114,8 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
if (cc->early_setup_done)
return;
+ spin_lock_init(&cc->gpio_lock);
+
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
@@ -202,28 +204,97 @@ u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask)
u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
+/*
+ * If the bit is set to 0, chipcommon controlls this GPIO,
+ * if the bit is set to 1, it is used by some part of the chip and not our code.
+ */
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control);
u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res;
+
+ if (cc->core->id.rev < 20)
+ return 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLUP, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res;
+
+ if (cc->core->id.rev < 20)
+ return 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLDOWN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
#ifdef CONFIG_BCMA_DRIVER_MIPS
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index e162999bf91..c62c788b328 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -13,12 +13,13 @@
#include <linux/export.h>
#include <linux/bcma/bcma.h>
-static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
+u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
{
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
new file mode 100644
index 00000000000..9a6f585da2d
--- /dev/null
+++ b/drivers/bcma/driver_gpio.c
@@ -0,0 +1,98 @@
+/*
+ * Broadcom specific AMBA
+ * GPIO driver
+ *
+ * Copyright 2011, Broadcom Corporation
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcma_private.h"
+
+static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
+{
+ return container_of(chip, struct bcma_drv_cc, gpio);
+}
+
+static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ return !!bcma_chipco_gpio_in(cc, 1 << gpio);
+}
+
+static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_outen(cc, 1 << gpio, 0);
+ return 0;
+}
+
+static int bcma_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_outen(cc, 1 << gpio, 1 << gpio);
+ bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+}
+
+static int bcma_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_control(cc, 1 << gpio, 0);
+ /* clear pulldown */
+ bcma_chipco_gpio_pulldown(cc, 1 << gpio, 0);
+ /* Set pullup */
+ bcma_chipco_gpio_pullup(cc, 1 << gpio, 1 << gpio);
+
+ return 0;
+}
+
+static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ /* clear pullup */
+ bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
+}
+
+int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+ struct gpio_chip *chip = &cc->gpio;
+
+ chip->label = "bcma_gpio";
+ chip->owner = THIS_MODULE;
+ chip->request = bcma_gpio_request;
+ chip->free = bcma_gpio_free;
+ chip->get = bcma_gpio_get_value;
+ chip->set = bcma_gpio_set_value;
+ chip->direction_input = bcma_gpio_direction_input;
+ chip->direction_output = bcma_gpio_direction_output;
+ chip->ngpio = 16;
+ /* There is just one SoC in one device and its GPIO addresses should be
+ * deterministic to address them more easily. The other buses could get
+ * a random base number. */
+ if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+ chip->base = 0;
+ else
+ chip->base = -1;
+
+ return gpiochip_add(chip);
+}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index debd4f142f9..53ba20ca17e 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -164,6 +164,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
bcma_err(bus, "Error registering NAND flash\n");
}
#endif
+ err = bcma_gpio_init(&bus->drv_cc);
+ if (err == -ENOTSUPP)
+ bcma_debug(bus, "GPIO driver not activated\n");
+ else if (err)
+ bcma_err(bus, "Error registering GPIO driver: %i\n", err);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
err = bcma_chipco_watchdog_register(&bus->drv_cc);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index d2ed7f18d1a..175649468c9 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
-#define VERSION "50"
+#define VERSION "81"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -10,7 +10,7 @@
#define AOE_PARTITIONS (16)
#endif
-#define WHITESPACE " \t\v\f\n"
+#define WHITESPACE " \t\v\f\n,"
enum {
AOECMD_ATA,
@@ -73,21 +73,29 @@ enum {
DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */
DEVFL_EXT = (1<<2), /* device accepts lba48 commands */
DEVFL_GDALLOC = (1<<3), /* need to alloc gendisk */
- DEVFL_KICKME = (1<<4), /* slow polling network card catch */
- DEVFL_NEWSIZE = (1<<5), /* need to update dev size in block layer */
+ DEVFL_GD_NOW = (1<<4), /* allocating gendisk */
+ DEVFL_KICKME = (1<<5), /* slow polling network card catch */
+ DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
+ DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
+ DEVFL_FREED = (1<<8), /* device has been cleaned up */
};
enum {
DEFAULTBCNT = 2 * 512, /* 2 sectors */
MIN_BUFS = 16,
- NTARGETS = 8,
+ NTARGETS = 4,
NAOEIFS = 8,
NSKBPOOLMAX = 256,
NFACTIVE = 61,
TIMERTICK = HZ / 10,
- MINTIMER = HZ >> 2,
- MAXTIMER = HZ << 1,
+ RTTSCALE = 8,
+ RTTDSCALE = 3,
+ RTTAVG_INIT = USEC_PER_SEC / 4 << RTTSCALE,
+ RTTDEV_INIT = RTTAVG_INIT / 4,
+
+ HARD_SCORN_SECS = 10, /* try another remote port after this */
+ MAX_TAINT = 1000, /* cap on aoetgt taint */
};
struct buf {
@@ -100,10 +108,17 @@ struct buf {
struct request *rq;
};
+enum frame_flags {
+ FFL_PROBE = 1,
+};
+
struct frame {
struct list_head head;
u32 tag;
+ struct timeval sent; /* high-res time packet was sent */
+ u32 sent_jiffs; /* low-res jiffies-based sent time */
ulong waited;
+ ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
sector_t lba;
struct sk_buff *skb; /* command skb freed on module exit */
@@ -112,6 +127,7 @@ struct frame {
struct bio_vec *bv;
ulong bcnt;
ulong bv_off;
+ char flags;
};
struct aoeif {
@@ -122,28 +138,31 @@ struct aoeif {
struct aoetgt {
unsigned char addr[6];
- ushort nframes;
+ ushort nframes; /* cap on frames to use */
struct aoedev *d; /* parent device I belong to */
struct list_head ffree; /* list of free frames */
struct aoeif ifs[NAOEIFS];
struct aoeif *ifp; /* current aoeif in use */
- ushort nout;
- ushort maxout;
- ulong falloc;
- ulong lastwadj; /* last window adjustment */
+ ushort nout; /* number of AoE commands outstanding */
+ ushort maxout; /* current value for max outstanding */
+ ushort next_cwnd; /* incr maxout after decrementing to zero */
+ ushort ssthresh; /* slow start threshold */
+ ulong falloc; /* number of allocated frames */
+ int taint; /* how much we want to avoid this aoetgt */
int minbcnt;
int wpkts, rpkts;
+ char nout_probes;
};
struct aoedev {
struct aoedev *next;
ulong sysminor;
ulong aoemajor;
+ u32 rttavg; /* scaled AoE round trip time average */
+ u32 rttdev; /* scaled round trip time mean deviation */
u16 aoeminor;
u16 flags;
u16 nopen; /* (bd_openers isn't available without sleeping) */
- u16 rttavg; /* round trip average of requests/responses */
- u16 mintimer;
u16 fw_ver; /* version of blade's firmware */
u16 lasttag; /* last tag sent */
u16 useme;
@@ -151,7 +170,7 @@ struct aoedev {
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
struct request_queue *blkq;
- struct hd_geometry geo;
+ struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
spinlock_t lock;
@@ -164,11 +183,12 @@ struct aoedev {
} ip;
ulong maxbcnt;
struct list_head factive[NFACTIVE]; /* hash of active frames */
- struct aoetgt *targets[NTARGETS];
+ struct list_head rexmitq; /* deferred retransmissions */
+ struct aoetgt **targets;
+ ulong ntargets; /* number of allocated aoetgt pointers */
struct aoetgt **tgt; /* target in use when working */
- struct aoetgt *htgt; /* target needing rexmit assistance */
- ulong ntargets;
ulong kicked;
+ char ident[512];
};
/* kthread tracking */
@@ -195,6 +215,7 @@ void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
struct sk_buff *aoecmd_ata_rsp(struct sk_buff *);
void aoecmd_cfg_rsp(struct sk_buff *);
void aoecmd_sleepwork(struct work_struct *);
+void aoecmd_wreset(struct aoetgt *t);
void aoecmd_cleanslate(struct aoedev *);
void aoecmd_exit(void);
int aoecmd_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 00dfc5008ad..a129f8c8073 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -16,11 +16,19 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include <scsi/sg.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
+/* GPFS needs a larger value than the default. */
+static int aoe_maxsectors;
+module_param(aoe_maxsectors, int, 0644);
+MODULE_PARM_DESC(aoe_maxsectors,
+ "When nonzero, set the maximum number of sectors per I/O request");
+
static ssize_t aoedisk_show_state(struct device *dev,
struct device_attribute *attr, char *page)
{
@@ -59,7 +67,7 @@ static ssize_t aoedisk_show_netif(struct device *dev,
nd = nds;
ne = nd + ARRAY_SIZE(nds);
t = d->targets;
- te = t + NTARGETS;
+ te = t + d->ntargets;
for (; t < te && *t; t++) {
ifp = (*t)->ifs;
e = ifp + NAOEIFS;
@@ -91,6 +99,14 @@ static ssize_t aoedisk_show_fwver(struct device *dev,
return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
}
+static ssize_t aoedisk_show_payload(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct aoedev *d = disk->private_data;
+
+ return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
+}
static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
@@ -99,12 +115,14 @@ static struct device_attribute dev_attr_firmware_version = {
.attr = { .name = "firmware-version", .mode = S_IRUGO },
.show = aoedisk_show_fwver,
};
+static DEVICE_ATTR(payload, S_IRUGO, aoedisk_show_payload, NULL);
static struct attribute *aoe_attrs[] = {
&dev_attr_state.attr,
&dev_attr_mac.attr,
&dev_attr_netif.attr,
&dev_attr_firmware_version.attr,
+ &dev_attr_payload.attr,
NULL,
};
@@ -129,9 +147,18 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
struct aoedev *d = bdev->bd_disk->private_data;
ulong flags;
+ if (!virt_addr_valid(d)) {
+ pr_crit("aoe: invalid device pointer in %s\n",
+ __func__);
+ WARN_ON(1);
+ return -ENODEV;
+ }
+ if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
+ return -ENODEV;
+
mutex_lock(&aoeblk_mutex);
spin_lock_irqsave(&d->lock, flags);
- if (d->flags & DEVFL_UP) {
+ if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
d->nopen++;
spin_unlock_irqrestore(&d->lock, flags);
mutex_unlock(&aoeblk_mutex);
@@ -195,9 +222,38 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+static int
+aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
+{
+ struct aoedev *d;
+
+ if (!arg)
+ return -EINVAL;
+
+ d = bdev->bd_disk->private_data;
+ if ((d->flags & DEVFL_UP) == 0) {
+ pr_err("aoe: disk not up\n");
+ return -ENODEV;
+ }
+
+ if (cmd == HDIO_GET_IDENTITY) {
+ if (!copy_to_user((void __user *) arg, &d->ident,
+ sizeof(d->ident)))
+ return 0;
+ return -EFAULT;
+ }
+
+ /* udev calls scsi_id, which uses SG_IO, resulting in noise */
+ if (cmd != SG_IO)
+ pr_info("aoe: unknown ioctl 0x%x\n", cmd);
+
+ return -ENOTTY;
+}
+
static const struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
+ .ioctl = aoeblk_ioctl,
.getgeo = aoeblk_getgeo,
.owner = THIS_MODULE,
};
@@ -212,6 +268,18 @@ aoeblk_gdalloc(void *vp)
struct request_queue *q;
enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
+ int late = 0;
+
+ spin_lock_irqsave(&d->lock, flags);
+ if (d->flags & DEVFL_GDALLOC
+ && !(d->flags & DEVFL_TKILL)
+ && !(d->flags & DEVFL_GD_NOW))
+ d->flags |= DEVFL_GD_NOW;
+ else
+ late = 1;
+ spin_unlock_irqrestore(&d->lock, flags);
+ if (late)
+ return;
gd = alloc_disk(AOE_PARTITIONS);
if (gd == NULL) {
@@ -231,23 +299,24 @@ aoeblk_gdalloc(void *vp)
if (q == NULL) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
- mempool_destroy(mp);
- goto err_disk;
+ goto err_mempool;
}
- d->blkq = blk_alloc_queue(GFP_KERNEL);
- if (!d->blkq)
- goto err_mempool;
- d->blkq->backing_dev_info.name = "aoe";
- if (bdi_init(&d->blkq->backing_dev_info))
- goto err_blkq;
spin_lock_irqsave(&d->lock, flags);
- blk_queue_max_hw_sectors(d->blkq, BLK_DEF_MAX_SECTORS);
+ WARN_ON(!(d->flags & DEVFL_GD_NOW));
+ WARN_ON(!(d->flags & DEVFL_GDALLOC));
+ WARN_ON(d->flags & DEVFL_TKILL);
+ WARN_ON(d->gd);
+ WARN_ON(d->flags & DEVFL_UP);
+ blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
+ q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
d->gd = gd;
+ if (aoe_maxsectors)
+ blk_queue_max_hw_sectors(q, aoe_maxsectors);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor;
gd->fops = &aoe_bdops;
@@ -263,18 +332,21 @@ aoeblk_gdalloc(void *vp)
add_disk(gd);
aoedisk_add_sysfs(d);
+
+ spin_lock_irqsave(&d->lock, flags);
+ WARN_ON(!(d->flags & DEVFL_GD_NOW));
+ d->flags &= ~DEVFL_GD_NOW;
+ spin_unlock_irqrestore(&d->lock, flags);
return;
-err_blkq:
- blk_cleanup_queue(d->blkq);
- d->blkq = NULL;
err_mempool:
- mempool_destroy(d->bufpool);
+ mempool_destroy(mp);
err_disk:
put_disk(gd);
err:
spin_lock_irqsave(&d->lock, flags);
- d->flags &= ~DEVFL_GDALLOC;
+ d->flags &= ~DEVFL_GD_NOW;
+ schedule_work(&d->work);
spin_unlock_irqrestore(&d->lock, flags);
}
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index ed57a890c64..42e67ad6bd2 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -39,6 +39,11 @@ struct ErrMsg {
};
static DEFINE_MUTEX(aoechr_mutex);
+
+/* A ring buffer of error messages, to be read through
+ * "/dev/etherd/err". When no messages are present,
+ * readers will block waiting for messages to appear.
+ */
static struct ErrMsg emsgs[NMSG];
static int emsgs_head_idx, emsgs_tail_idx;
static struct completion emsgs_comp;
@@ -282,7 +287,7 @@ aoechr_init(void)
int n, i;
n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
- if (n < 0) {
+ if (n < 0) {
printk(KERN_ERR "aoe: can't register char device\n");
return n;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 9fe4f186555..25ef5c014fc 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -22,6 +22,7 @@
#define MAXIOC (8192) /* default meant to avoid most soft lockups */
static void ktcomplete(struct frame *, struct sk_buff *);
+static int count_targets(struct aoedev *d, int *untainted);
static struct buf *nextbuf(struct aoedev *);
@@ -29,7 +30,7 @@ static int aoe_deadsecs = 60 * 3;
module_param(aoe_deadsecs, int, 0644);
MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
-static int aoe_maxout = 16;
+static int aoe_maxout = 64;
module_param(aoe_maxout, int, 0644);
MODULE_PARM_DESC(aoe_maxout,
"Only aoe_maxout outstanding packets for every MAC on eX.Y.");
@@ -43,6 +44,8 @@ static struct {
spinlock_t lock;
} iocq;
+static struct page *empty_page;
+
static struct sk_buff *
new_skb(ulong len)
{
@@ -59,6 +62,23 @@ new_skb(ulong len)
}
static struct frame *
+getframe_deferred(struct aoedev *d, u32 tag)
+{
+ struct list_head *head, *pos, *nx;
+ struct frame *f;
+
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head) {
+ f = list_entry(pos, struct frame, head);
+ if (f->tag == tag) {
+ list_del(pos);
+ return f;
+ }
+ }
+ return NULL;
+}
+
+static struct frame *
getframe(struct aoedev *d, u32 tag)
{
struct frame *f;
@@ -162,8 +182,10 @@ aoe_freetframe(struct frame *f)
t = f->t;
f->buf = NULL;
+ f->lba = 0;
f->bv = NULL;
f->r_skb = NULL;
+ f->flags = 0;
list_add(&f->head, &t->ffree);
}
@@ -217,20 +239,25 @@ newframe(struct aoedev *d)
struct frame *f;
struct aoetgt *t, **tt;
int totout = 0;
+ int use_tainted;
+ int has_untainted;
- if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
+ if (!d->targets || !d->targets[0]) {
printk(KERN_ERR "aoe: NULL TARGETS!\n");
return NULL;
}
tt = d->tgt; /* last used target */
- for (;;) {
+ for (use_tainted = 0, has_untainted = 0;;) {
tt++;
- if (tt >= &d->targets[NTARGETS] || !*tt)
+ if (tt >= &d->targets[d->ntargets] || !*tt)
tt = d->targets;
t = *tt;
- totout += t->nout;
+ if (!t->taint) {
+ has_untainted = 1;
+ totout += t->nout;
+ }
if (t->nout < t->maxout
- && t != d->htgt
+ && (use_tainted || !t->taint)
&& t->ifp->nd) {
f = newtframe(d, t);
if (f) {
@@ -239,8 +266,12 @@ newframe(struct aoedev *d)
return f;
}
}
- if (tt == d->tgt) /* we've looped and found nada */
- break;
+ if (tt == d->tgt) { /* we've looped and found nada */
+ if (!use_tainted && !has_untainted)
+ use_tainted = 1;
+ else
+ break;
+ }
}
if (totout == 0) {
d->kicked++;
@@ -277,21 +308,68 @@ fhash(struct frame *f)
list_add_tail(&f->head, &d->factive[n]);
}
+static void
+ata_rw_frameinit(struct frame *f)
+{
+ struct aoetgt *t;
+ struct aoe_hdr *h;
+ struct aoe_atahdr *ah;
+ struct sk_buff *skb;
+ char writebit, extbit;
+
+ skb = f->skb;
+ h = (struct aoe_hdr *) skb_mac_header(skb);
+ ah = (struct aoe_atahdr *) (h + 1);
+ skb_put(skb, sizeof(*h) + sizeof(*ah));
+ memset(h, 0, skb->len);
+
+ writebit = 0x10;
+ extbit = 0x4;
+
+ t = f->t;
+ f->tag = aoehdr_atainit(t->d, t, h);
+ fhash(f);
+ t->nout++;
+ f->waited = 0;
+ f->waited_total = 0;
+ if (f->buf)
+ f->lba = f->buf->sector;
+
+ /* set up ata header */
+ ah->scnt = f->bcnt >> 9;
+ put_lba(ah, f->lba);
+ if (t->d->flags & DEVFL_EXT) {
+ ah->aflags |= AOEAFL_EXT;
+ } else {
+ extbit = 0;
+ ah->lba3 &= 0x0f;
+ ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
+ }
+ if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
+ skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+ ah->aflags |= AOEAFL_WRITE;
+ skb->len += f->bcnt;
+ skb->data_len = f->bcnt;
+ skb->truesize += f->bcnt;
+ t->wpkts++;
+ } else {
+ t->rpkts++;
+ writebit = 0;
+ }
+
+ ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
+ skb->dev = t->ifp->nd;
+}
+
static int
aoecmd_ata_rw(struct aoedev *d)
{
struct frame *f;
- struct aoe_hdr *h;
- struct aoe_atahdr *ah;
struct buf *buf;
struct aoetgt *t;
struct sk_buff *skb;
struct sk_buff_head queue;
ulong bcnt, fbcnt;
- char writebit, extbit;
-
- writebit = 0x10;
- extbit = 0x4;
buf = nextbuf(d);
if (buf == NULL)
@@ -326,50 +404,18 @@ aoecmd_ata_rw(struct aoedev *d)
} while (fbcnt);
/* initialize the headers & frame */
- skb = f->skb;
- h = (struct aoe_hdr *) skb_mac_header(skb);
- ah = (struct aoe_atahdr *) (h+1);
- skb_put(skb, sizeof *h + sizeof *ah);
- memset(h, 0, skb->len);
- f->tag = aoehdr_atainit(d, t, h);
- fhash(f);
- t->nout++;
- f->waited = 0;
f->buf = buf;
f->bcnt = bcnt;
- f->lba = buf->sector;
-
- /* set up ata header */
- ah->scnt = bcnt >> 9;
- put_lba(ah, buf->sector);
- if (d->flags & DEVFL_EXT) {
- ah->aflags |= AOEAFL_EXT;
- } else {
- extbit = 0;
- ah->lba3 &= 0x0f;
- ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
- }
- if (bio_data_dir(buf->bio) == WRITE) {
- skb_fillup(skb, f->bv, f->bv_off, bcnt);
- ah->aflags |= AOEAFL_WRITE;
- skb->len += bcnt;
- skb->data_len = bcnt;
- skb->truesize += bcnt;
- t->wpkts++;
- } else {
- t->rpkts++;
- writebit = 0;
- }
-
- ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
+ ata_rw_frameinit(f);
/* mark all tracking fields and load out */
buf->nframesout += 1;
buf->sector += bcnt >> 9;
- skb->dev = t->ifp->nd;
- skb = skb_clone(skb, GFP_ATOMIC);
+ skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -442,11 +488,14 @@ resend(struct aoedev *d, struct frame *f)
h = (struct aoe_hdr *) skb_mac_header(skb);
ah = (struct aoe_atahdr *) (h+1);
- snprintf(buf, sizeof buf,
- "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
- "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
- h->src, h->dst, t->nout);
- aoechr_error(buf);
+ if (!(f->flags & FFL_PROBE)) {
+ snprintf(buf, sizeof(buf),
+ "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
+ "retransmit", d->aoemajor, d->aoeminor,
+ f->tag, jiffies, n,
+ h->src, h->dst, t->nout);
+ aoechr_error(buf);
+ }
f->tag = n;
fhash(f);
@@ -458,12 +507,46 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
static int
+tsince_hr(struct frame *f)
+{
+ struct timeval now;
+ int n;
+
+ do_gettimeofday(&now);
+ n = now.tv_usec - f->sent.tv_usec;
+ n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+
+ if (n < 0)
+ n = -n;
+
+ /* For relatively long periods, use jiffies to avoid
+ * discrepancies caused by updates to the system time.
+ *
+ * On system with HZ of 1000, 32-bits is over 49 days
+ * worth of jiffies, or over 71 minutes worth of usecs.
+ *
+ * Jiffies overflow is handled by subtraction of unsigned ints:
+ * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
+ * $3 = 4
+ * (gdb)
+ */
+ if (n > USEC_PER_SEC / 4) {
+ n = ((u32) jiffies) - f->sent_jiffs;
+ n *= USEC_PER_SEC / HZ;
+ }
+
+ return n;
+}
+
+static int
tsince(u32 tag)
{
int n;
@@ -472,7 +555,7 @@ tsince(u32 tag)
n -= tag & 0xffff;
if (n < 0)
n += 1<<16;
- return n;
+ return jiffies_to_usecs(n + 1);
}
static struct aoeif *
@@ -503,70 +586,189 @@ ejectif(struct aoetgt *t, struct aoeif *ifp)
dev_put(nd);
}
-static int
-sthtith(struct aoedev *d)
+static struct frame *
+reassign_frame(struct frame *f)
{
- struct frame *f, *nf;
- struct list_head *nx, *pos, *head;
+ struct frame *nf;
struct sk_buff *skb;
- struct aoetgt *ht = d->htgt;
- int i;
- for (i = 0; i < NFACTIVE; i++) {
- head = &d->factive[i];
- list_for_each_safe(pos, nx, head) {
- f = list_entry(pos, struct frame, head);
- if (f->t != ht)
- continue;
+ nf = newframe(f->t->d);
+ if (!nf)
+ return NULL;
+ if (nf->t == f->t) {
+ aoe_freetframe(nf);
+ return NULL;
+ }
- nf = newframe(d);
- if (!nf)
- return 0;
+ skb = nf->skb;
+ nf->skb = f->skb;
+ nf->buf = f->buf;
+ nf->bcnt = f->bcnt;
+ nf->lba = f->lba;
+ nf->bv = f->bv;
+ nf->bv_off = f->bv_off;
+ nf->waited = 0;
+ nf->waited_total = f->waited_total;
+ nf->sent = f->sent;
+ nf->sent_jiffs = f->sent_jiffs;
+ f->skb = skb;
+
+ return nf;
+}
- /* remove frame from active list */
- list_del(pos);
+static void
+probe(struct aoetgt *t)
+{
+ struct aoedev *d;
+ struct frame *f;
+ struct sk_buff *skb;
+ struct sk_buff_head queue;
+ size_t n, m;
+ int frag;
- /* reassign all pertinent bits to new outbound frame */
- skb = nf->skb;
- nf->skb = f->skb;
- nf->buf = f->buf;
- nf->bcnt = f->bcnt;
- nf->lba = f->lba;
- nf->bv = f->bv;
- nf->bv_off = f->bv_off;
- nf->waited = 0;
- f->skb = skb;
+ d = t->d;
+ f = newtframe(d, t);
+ if (!f) {
+ pr_err("%s %pm for e%ld.%d: %s\n",
+ "aoe: cannot probe remote address",
+ t->addr,
+ (long) d->aoemajor, d->aoeminor,
+ "no frame available");
+ return;
+ }
+ f->flags |= FFL_PROBE;
+ ifrotate(t);
+ f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+ ata_rw_frameinit(f);
+ skb = f->skb;
+ for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+ if (n < PAGE_SIZE)
+ m = n;
+ else
+ m = PAGE_SIZE;
+ skb_fill_page_desc(skb, frag, empty_page, 0, m);
+ }
+ skb->len += f->bcnt;
+ skb->data_len = f->bcnt;
+ skb->truesize += f->bcnt;
+
+ skb = skb_clone(f->skb, GFP_ATOMIC);
+ if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
+ __skb_queue_head_init(&queue);
+ __skb_queue_tail(&queue, skb);
+ aoenet_xmit(&queue);
+ }
+}
+
+static long
+rto(struct aoedev *d)
+{
+ long t;
+
+ t = 2 * d->rttavg >> RTTSCALE;
+ t += 8 * d->rttdev >> RTTDSCALE;
+ if (t == 0)
+ t = 1;
+
+ return t;
+}
+
+static void
+rexmit_deferred(struct aoedev *d)
+{
+ struct aoetgt *t;
+ struct frame *f;
+ struct frame *nf;
+ struct list_head *pos, *nx, *head;
+ int since;
+ int untainted;
+
+ count_targets(d, &untainted);
+
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head) {
+ f = list_entry(pos, struct frame, head);
+ t = f->t;
+ if (t->taint) {
+ if (!(f->flags & FFL_PROBE)) {
+ nf = reassign_frame(f);
+ if (nf) {
+ if (t->nout_probes == 0
+ && untainted > 0) {
+ probe(t);
+ t->nout_probes++;
+ }
+ list_replace(&f->head, &nf->head);
+ pos = &nf->head;
+ aoe_freetframe(f);
+ f = nf;
+ t = f->t;
+ }
+ } else if (untainted < 1) {
+ /* don't probe w/o other untainted aoetgts */
+ goto stop_probe;
+ } else if (tsince_hr(f) < t->taint * rto(d)) {
+ /* reprobe slowly when taint is high */
+ continue;
+ }
+ } else if (f->flags & FFL_PROBE) {
+stop_probe: /* don't probe untainted aoetgts */
+ list_del(pos);
aoe_freetframe(f);
- ht->nout--;
- nf->t->nout++;
- resend(d, nf);
+ /* leaving d->kicked, because this is routine */
+ f->t->d->flags |= DEVFL_KICKME;
+ continue;
}
+ if (t->nout >= t->maxout)
+ continue;
+ list_del(pos);
+ t->nout++;
+ if (f->flags & FFL_PROBE)
+ t->nout_probes++;
+ since = tsince_hr(f);
+ f->waited += since;
+ f->waited_total += since;
+ resend(d, f);
}
- /* We've cleaned up the outstanding so take away his
- * interfaces so he won't be used. We should remove him from
- * the target array here, but cleaning up a target is
- * involved. PUNT!
- */
- memset(ht->ifs, 0, sizeof ht->ifs);
- d->htgt = NULL;
- return 1;
}
-static inline unsigned char
-ata_scnt(unsigned char *packet) {
- struct aoe_hdr *h;
- struct aoe_atahdr *ah;
+/* An aoetgt accumulates demerits quickly, and successful
+ * probing redeems the aoetgt slowly.
+ */
+static void
+scorn(struct aoetgt *t)
+{
+ int n;
- h = (struct aoe_hdr *) packet;
- ah = (struct aoe_atahdr *) (h+1);
- return ah->scnt;
+ n = t->taint++;
+ t->taint += t->taint * 2;
+ if (n > t->taint)
+ t->taint = n;
+ if (t->taint > MAX_TAINT)
+ t->taint = MAX_TAINT;
+}
+
+static int
+count_targets(struct aoedev *d, int *untainted)
+{
+ int i, good;
+
+ for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
+ if (d->targets[i]->taint == 0)
+ good++;
+
+ if (untainted)
+ *untainted = good;
+ return i;
}
static void
rexmit_timer(ulong vp)
{
struct aoedev *d;
- struct aoetgt *t, **tt, **te;
+ struct aoetgt *t;
struct aoeif *ifp;
struct frame *f;
struct list_head *head, *pos, *nx;
@@ -574,15 +776,18 @@ rexmit_timer(ulong vp)
register long timeout;
ulong flags, n;
int i;
+ int utgts; /* number of aoetgt descriptors (not slots) */
+ int since;
d = (struct aoedev *) vp;
- /* timeout is always ~150% of the moving average */
- timeout = d->rttavg;
- timeout += timeout >> 1;
-
spin_lock_irqsave(&d->lock, flags);
+ /* timeout based on observed timings and variations */
+ timeout = rto(d);
+
+ utgts = count_targets(d, NULL);
+
if (d->flags & DEVFL_TKILL) {
spin_unlock_irqrestore(&d->lock, flags);
return;
@@ -593,67 +798,61 @@ rexmit_timer(ulong vp)
head = &d->factive[i];
list_for_each_safe(pos, nx, head) {
f = list_entry(pos, struct frame, head);
- if (tsince(f->tag) < timeout)
+ if (tsince_hr(f) < timeout)
break; /* end of expired frames */
/* move to flist for later processing */
list_move_tail(pos, &flist);
}
}
- /* window check */
- tt = d->targets;
- te = tt + d->ntargets;
- for (; tt < te && (t = *tt); tt++) {
- if (t->nout == t->maxout
- && t->maxout < t->nframes
- && (jiffies - t->lastwadj)/HZ > 10) {
- t->maxout++;
- t->lastwadj = jiffies;
- }
- }
-
- if (!list_empty(&flist)) { /* retransmissions necessary */
- n = d->rttavg <<= 1;
- if (n > MAXTIMER)
- d->rttavg = MAXTIMER;
- }
/* process expired frames */
while (!list_empty(&flist)) {
pos = flist.next;
f = list_entry(pos, struct frame, head);
- n = f->waited += timeout;
- n /= HZ;
- if (n > aoe_deadsecs) {
+ since = tsince_hr(f);
+ n = f->waited_total + since;
+ n /= USEC_PER_SEC;
+ if (aoe_deadsecs
+ && n > aoe_deadsecs
+ && !(f->flags & FFL_PROBE)) {
/* Waited too long. Device failure.
* Hang all frames on first hash bucket for downdev
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
aoedev_downdev(d);
- break;
+ goto out;
}
- list_del(pos);
t = f->t;
- if (n > aoe_deadsecs/2)
- d->htgt = t; /* see if another target can help */
-
- if (t->nout == t->maxout) {
- if (t->maxout > 1)
- t->maxout--;
- t->lastwadj = jiffies;
+ n = f->waited + since;
+ n /= USEC_PER_SEC;
+ if (aoe_deadsecs && utgts > 0
+ && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
+ scorn(t); /* avoid this target */
+
+ if (t->maxout != 1) {
+ t->ssthresh = t->maxout / 2;
+ t->maxout = 1;
}
- ifp = getif(t, f->skb->dev);
- if (ifp && ++ifp->lost > (t->nframes << 1)
- && (ifp != t->ifs || t->ifs[1].nd)) {
- ejectif(t, ifp);
- ifp = NULL;
+ if (f->flags & FFL_PROBE) {
+ t->nout_probes--;
+ } else {
+ ifp = getif(t, f->skb->dev);
+ if (ifp && ++ifp->lost > (t->nframes << 1)
+ && (ifp != t->ifs || t->ifs[1].nd)) {
+ ejectif(t, ifp);
+ ifp = NULL;
+ }
}
- resend(d, f);
+ list_move_tail(pos, &d->rexmitq);
+ t->nout--;
}
+ rexmit_deferred(d);
- if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) {
+out:
+ if ((d->flags & DEVFL_KICKME) && d->blkq) {
d->flags &= ~DEVFL_KICKME;
d->blkq->request_fn(d->blkq);
}
@@ -774,8 +973,7 @@ nextbuf(struct aoedev *d)
void
aoecmd_work(struct aoedev *d)
{
- if (d->htgt && !sthtith(d))
- return;
+ rexmit_deferred(d);
while (aoecmd_ata_rw(d))
;
}
@@ -809,6 +1007,17 @@ aoecmd_sleepwork(struct work_struct *work)
}
static void
+ata_ident_fixstring(u16 *id, int ns)
+{
+ u16 s;
+
+ while (ns-- > 0) {
+ s = *id;
+ *id++ = s >> 8 | s << 8;
+ }
+}
+
+static void
ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
{
u64 ssize;
@@ -843,6 +1052,11 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
}
+ ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
+ ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
+ ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
+ memcpy(d->ident, id, sizeof(d->ident));
+
if (d->ssize != ssize)
printk(KERN_INFO
"aoe: %pm e%ld.%d v%04x has %llu sectors\n",
@@ -862,26 +1076,28 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
}
static void
-calc_rttavg(struct aoedev *d, int rtt)
+calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
{
register long n;
n = rtt;
- if (n < 0) {
- n = -rtt;
- if (n < MINTIMER)
- n = MINTIMER;
- else if (n > MAXTIMER)
- n = MAXTIMER;
- d->mintimer += (n - d->mintimer) >> 1;
- } else if (n < d->mintimer)
- n = d->mintimer;
- else if (n > MAXTIMER)
- n = MAXTIMER;
-
- /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
- n -= d->rttavg;
- d->rttavg += n >> 2;
+
+ /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
+ n -= d->rttavg >> RTTSCALE;
+ d->rttavg += n;
+ if (n < 0)
+ n = -n;
+ n -= d->rttdev >> RTTDSCALE;
+ d->rttdev += n;
+
+ if (!t || t->maxout >= t->nframes)
+ return;
+ if (t->maxout < t->ssthresh)
+ t->maxout += 1;
+ else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
+ t->maxout += 1;
+ t->next_cwnd = t->maxout;
+ }
}
static struct aoetgt *
@@ -890,7 +1106,7 @@ gettgt(struct aoedev *d, char *addr)
struct aoetgt **t, **e;
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
return *t;
@@ -966,19 +1182,22 @@ ktiocomplete(struct frame *f)
struct aoeif *ifp;
struct aoedev *d;
long n;
+ int untainted;
if (f == NULL)
return;
t = f->t;
d = t->d;
+ skb = f->r_skb;
+ buf = f->buf;
+ if (f->flags & FFL_PROBE)
+ goto out;
+ if (!skb) /* just fail the buf. */
+ goto noskb;
hout = (struct aoe_hdr *) skb_mac_header(f->skb);
ahout = (struct aoe_atahdr *) (hout+1);
- buf = f->buf;
- skb = f->r_skb;
- if (skb == NULL)
- goto noskb; /* just fail the buf. */
hin = (struct aoe_hdr *) skb->data;
skb_pull(skb, sizeof(*hin));
@@ -988,9 +1207,9 @@ ktiocomplete(struct frame *f)
pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor);
-noskb: if (buf)
+noskb: if (buf)
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
- goto badrsp;
+ goto out;
}
n = ahout->scnt << 9;
@@ -998,8 +1217,10 @@ noskb: if (buf)
case ATA_CMD_PIO_READ:
case ATA_CMD_PIO_READ_EXT:
if (skb->len < n) {
- pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
- skb->len, n);
+ pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
+ "aoe: runt data size in read from",
+ (long) d->aoemajor, d->aoeminor,
+ skb->len, n);
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
break;
}
@@ -1010,13 +1231,13 @@ noskb: if (buf)
ifp = getif(t, skb->dev);
if (ifp)
ifp->lost = 0;
- if (d->htgt == t) /* I'll help myself, thank you. */
- d->htgt = NULL;
spin_unlock_irq(&d->lock);
break;
case ATA_CMD_ID_ATA:
if (skb->len < 512) {
- pr_info("aoe: runt data size in ataid. skb->len=%d\n",
+ pr_info("%s e%ld.%d. skb->len=%d need=512\n",
+ "aoe: runt data size in ataid from",
+ (long) d->aoemajor, d->aoeminor,
skb->len);
break;
}
@@ -1032,16 +1253,23 @@ noskb: if (buf)
be16_to_cpu(get_unaligned(&hin->major)),
hin->minor);
}
-badrsp:
+out:
spin_lock_irq(&d->lock);
+ if (t->taint > 0
+ && --t->taint > 0
+ && t->nout_probes == 0) {
+ count_targets(d, &untainted);
+ if (untainted > 0) {
+ probe(t);
+ t->nout_probes++;
+ }
+ }
aoe_freetframe(f);
if (buf && --buf->nframesout == 0 && buf->resid == 0)
aoe_end_buf(d, buf);
- aoecmd_work(d);
-
spin_unlock_irq(&d->lock);
aoedev_put(d);
dev_kfree_skb(skb);
@@ -1141,7 +1369,6 @@ aoecmd_ata_rsp(struct sk_buff *skb)
struct aoedev *d;
struct aoe_hdr *h;
struct frame *f;
- struct aoetgt *t;
u32 n;
ulong flags;
char ebuf[128];
@@ -1162,23 +1389,32 @@ aoecmd_ata_rsp(struct sk_buff *skb)
n = be32_to_cpu(get_unaligned(&h->tag));
f = getframe(d, n);
- if (f == NULL) {
- calc_rttavg(d, -tsince(n));
- spin_unlock_irqrestore(&d->lock, flags);
- aoedev_put(d);
- snprintf(ebuf, sizeof ebuf,
- "%15s e%d.%d tag=%08x@%08lx\n",
- "unexpected rsp",
- get_unaligned_be16(&h->major),
- h->minor,
- get_unaligned_be32(&h->tag),
- jiffies);
- aoechr_error(ebuf);
- return skb;
+ if (f) {
+ calc_rttavg(d, f->t, tsince_hr(f));
+ f->t->nout--;
+ if (f->flags & FFL_PROBE)
+ f->t->nout_probes--;
+ } else {
+ f = getframe_deferred(d, n);
+ if (f) {
+ calc_rttavg(d, NULL, tsince_hr(f));
+ } else {
+ calc_rttavg(d, NULL, tsince(n));
+ spin_unlock_irqrestore(&d->lock, flags);
+ aoedev_put(d);
+ snprintf(ebuf, sizeof(ebuf),
+ "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
+ "unexpected rsp",
+ get_unaligned_be16(&h->major),
+ h->minor,
+ get_unaligned_be32(&h->tag),
+ jiffies,
+ h->src,
+ h->dst);
+ aoechr_error(ebuf);
+ return skb;
+ }
}
- t = f->t;
- calc_rttavg(d, tsince(f->tag));
- t->nout--;
aoecmd_work(d);
spin_unlock_irqrestore(&d->lock, flags);
@@ -1201,7 +1437,7 @@ aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
aoenet_xmit(&queue);
}
-
+
struct sk_buff *
aoecmd_ata_id(struct aoedev *d)
{
@@ -1227,6 +1463,7 @@ aoecmd_ata_id(struct aoedev *d)
fhash(f);
t->nout++;
f->waited = 0;
+ f->waited_total = 0;
/* set up ata header */
ah->scnt = 1;
@@ -1235,41 +1472,69 @@ aoecmd_ata_id(struct aoedev *d)
skb->dev = t->ifp->nd;
- d->rttavg = MAXTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->timer.function = rexmit_timer;
- return skb_clone(skb, GFP_ATOMIC);
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
+ }
+
+ return skb;
}
-
+
+static struct aoetgt **
+grow_targets(struct aoedev *d)
+{
+ ulong oldn, newn;
+ struct aoetgt **tt;
+
+ oldn = d->ntargets;
+ newn = oldn * 2;
+ tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
+ if (!tt)
+ return NULL;
+ memmove(tt, d->targets, sizeof(*d->targets) * oldn);
+ d->tgt = tt + (d->tgt - d->targets);
+ kfree(d->targets);
+ d->targets = tt;
+ d->ntargets = newn;
+
+ return &d->targets[oldn];
+}
+
static struct aoetgt *
addtgt(struct aoedev *d, char *addr, ulong nframes)
{
struct aoetgt *t, **tt, **te;
tt = d->targets;
- te = tt + NTARGETS;
+ te = tt + d->ntargets;
for (; tt < te && *tt; tt++)
;
if (tt == te) {
- printk(KERN_INFO
- "aoe: device addtgt failure; too many targets\n");
- return NULL;
+ tt = grow_targets(d);
+ if (!tt)
+ goto nomem;
}
t = kzalloc(sizeof(*t), GFP_ATOMIC);
- if (!t) {
- printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
- return NULL;
- }
-
- d->ntargets++;
+ if (!t)
+ goto nomem;
t->nframes = nframes;
t->d = d;
memcpy(t->addr, addr, sizeof t->addr);
t->ifp = t->ifs;
- t->maxout = t->nframes;
+ aoecmd_wreset(t);
+ t->maxout = t->nframes / 2;
INIT_LIST_HEAD(&t->ffree);
return *tt = t;
+
+ nomem:
+ pr_info("aoe: cannot allocate memory to add target\n");
+ return NULL;
}
static void
@@ -1279,7 +1544,7 @@ setdbcnt(struct aoedev *d)
int bcnt = 0;
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
if (bcnt == 0 || bcnt > (*t)->minbcnt)
bcnt = (*t)->minbcnt;
@@ -1373,7 +1638,11 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
spin_lock_irqsave(&d->lock, flags);
t = gettgt(d, h->src);
- if (!t) {
+ if (t) {
+ t->nframes = n;
+ if (n < t->maxout)
+ aoecmd_wreset(t);
+ } else {
t = addtgt(d, h->src, n);
if (!t)
goto bail;
@@ -1402,17 +1671,26 @@ bail:
}
void
+aoecmd_wreset(struct aoetgt *t)
+{
+ t->maxout = 1;
+ t->ssthresh = t->nframes / 2;
+ t->next_cwnd = t->nframes;
+}
+
+void
aoecmd_cleanslate(struct aoedev *d)
{
struct aoetgt **t, **te;
- d->mintimer = MINTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->maxbcnt = 0;
t = d->targets;
- te = t + NTARGETS;
+ te = t + d->ntargets;
for (; t < te && *t; t++)
- (*t)->maxout = (*t)->nframes;
+ aoecmd_wreset(*t);
}
void
@@ -1460,6 +1738,14 @@ aoe_flush_iocq(void)
int __init
aoecmd_init(void)
{
+ void *p;
+
+ /* get_zeroed_page returns page with ref count 1 */
+ p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ if (!p)
+ return -ENOMEM;
+ empty_page = virt_to_page(p);
+
INIT_LIST_HEAD(&iocq.head);
spin_lock_init(&iocq.lock);
init_waitqueue_head(&ktiowq);
@@ -1475,4 +1761,7 @@ aoecmd_exit(void)
{
aoe_ktstop(&kts);
aoe_flush_iocq();
+
+ free_page((unsigned long) page_address(empty_page));
+ empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 90e5b537f94..98f2965778b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -15,7 +15,6 @@
#include "aoe.h"
static void dummy_timer(ulong);
-static void aoedev_freedev(struct aoedev *);
static void freetgt(struct aoedev *d, struct aoetgt *t);
static void skbpoolfree(struct aoedev *d);
@@ -69,25 +68,34 @@ minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
NPERSHELF = 16,
};
+ if (aoemin >= NPERSHELF) {
+ pr_err("aoe: %s %d slots per shelf\n",
+ "static minor device numbers support only",
+ NPERSHELF);
+ error = -1;
+ goto out;
+ }
+
n = aoemaj * NPERSHELF + aoemin;
- if (aoemin >= NPERSHELF || n >= N_DEVS) {
+ if (n >= N_DEVS) {
pr_err("aoe: %s with e%ld.%d\n",
"cannot use static minor device numbers",
aoemaj, aoemin);
error = -1;
- } else {
- spin_lock_irqsave(&used_minors_lock, flags);
- if (test_bit(n, used_minors)) {
- pr_err("aoe: %s %lu\n",
- "existing device already has static minor number",
- n);
- error = -1;
- } else
- set_bit(n, used_minors);
- spin_unlock_irqrestore(&used_minors_lock, flags);
+ goto out;
}
- *sysminor = n;
+ spin_lock_irqsave(&used_minors_lock, flags);
+ if (test_bit(n, used_minors)) {
+ pr_err("aoe: %s %lu\n",
+ "existing device already has static minor number",
+ n);
+ error = -1;
+ } else
+ set_bit(n, used_minors);
+ spin_unlock_irqrestore(&used_minors_lock, flags);
+ *sysminor = n * AOE_PARTITIONS;
+out:
return error;
}
@@ -170,41 +178,50 @@ aoe_failip(struct aoedev *d)
aoe_end_request(d, rq, 0);
}
+static void
+downdev_frame(struct list_head *pos)
+{
+ struct frame *f;
+
+ f = list_entry(pos, struct frame, head);
+ list_del(pos);
+ if (f->buf) {
+ f->buf->nframesout--;
+ aoe_failbuf(f->t->d, f->buf);
+ }
+ aoe_freetframe(f);
+}
+
void
aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
- struct frame *f;
struct list_head *head, *pos, *nx;
struct request *rq;
int i;
d->flags &= ~DEVFL_UP;
- /* clean out active buffers */
+ /* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
head = &d->factive[i];
- list_for_each_safe(pos, nx, head) {
- f = list_entry(pos, struct frame, head);
- list_del(pos);
- if (f->buf) {
- f->buf->nframesout--;
- aoe_failbuf(d, f->buf);
- }
- aoe_freetframe(f);
- }
+ list_for_each_safe(pos, nx, head)
+ downdev_frame(pos);
}
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head)
+ downdev_frame(pos);
+
/* reset window dressings */
tt = d->targets;
- te = tt + NTARGETS;
+ te = tt + d->ntargets;
for (; tt < te && (t = *tt); tt++) {
- t->maxout = t->nframes;
+ aoecmd_wreset(t);
t->nout = 0;
}
/* clean out the in-process request (if any) */
aoe_failip(d);
- d->htgt = NULL;
/* fast fail all pending I/O */
if (d->blkq) {
@@ -218,12 +235,48 @@ aoedev_downdev(struct aoedev *d)
set_capacity(d->gd, 0);
}
+/* return whether the user asked for this particular
+ * device to be flushed
+ */
+static int
+user_req(char *s, size_t slen, struct aoedev *d)
+{
+ char *p;
+ size_t lim;
+
+ if (!d->gd)
+ return 0;
+ p = strrchr(d->gd->disk_name, '/');
+ if (!p)
+ p = d->gd->disk_name;
+ else
+ p += 1;
+ lim = sizeof(d->gd->disk_name);
+ lim -= p - d->gd->disk_name;
+ if (slen < lim)
+ lim = slen;
+
+ return !strncmp(s, p, lim);
+}
+
static void
-aoedev_freedev(struct aoedev *d)
+freedev(struct aoedev *d)
{
struct aoetgt **t, **e;
+ int freeing = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ if (d->flags & DEVFL_TKILL
+ && !(d->flags & DEVFL_FREEING)) {
+ d->flags |= DEVFL_FREEING;
+ freeing = 1;
+ }
+ spin_unlock_irqrestore(&d->lock, flags);
+ if (!freeing)
+ return;
- cancel_work_sync(&d->work);
+ del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
@@ -231,61 +284,113 @@ aoedev_freedev(struct aoedev *d)
blk_cleanup_queue(d->blkq);
}
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
freetgt(d, *t);
if (d->bufpool)
mempool_destroy(d->bufpool);
skbpoolfree(d);
minor_free(d->sysminor);
- kfree(d);
+
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags |= DEVFL_FREED;
+ spin_unlock_irqrestore(&d->lock, flags);
}
-int
-aoedev_flush(const char __user *str, size_t cnt)
+enum flush_parms {
+ NOT_EXITING = 0,
+ EXITING = 1,
+};
+
+static int
+flush(const char __user *str, size_t cnt, int exiting)
{
ulong flags;
struct aoedev *d, **dd;
- struct aoedev *rmd = NULL;
char buf[16];
int all = 0;
+ int specified = 0; /* flush a specific device */
+ unsigned int skipflags;
+
+ skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL;
- if (cnt >= 3) {
+ if (!exiting && cnt >= 3) {
if (cnt > sizeof buf)
cnt = sizeof buf;
if (copy_from_user(buf, str, cnt))
return -EFAULT;
all = !strncmp(buf, "all", 3);
+ if (!all)
+ specified = 1;
}
+ flush_scheduled_work();
+ /* pass one: without sleeping, do aoedev_downdev */
spin_lock_irqsave(&devlist_lock, flags);
- dd = &devlist;
- while ((d = *dd)) {
+ for (d = devlist; d; d = d->next) {
spin_lock(&d->lock);
- if ((!all && (d->flags & DEVFL_UP))
- || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
+ if (exiting) {
+ /* unconditionally take each device down */
+ } else if (specified) {
+ if (!user_req(buf, cnt, d))
+ goto cont;
+ } else if ((!all && (d->flags & DEVFL_UP))
+ || d->flags & skipflags
|| d->nopen
- || d->ref) {
- spin_unlock(&d->lock);
- dd = &d->next;
- continue;
- }
- *dd = d->next;
+ || d->ref)
+ goto cont;
+
aoedev_downdev(d);
d->flags |= DEVFL_TKILL;
+cont:
spin_unlock(&d->lock);
- d->next = rmd;
- rmd = d;
}
spin_unlock_irqrestore(&devlist_lock, flags);
- while ((d = rmd)) {
- rmd = d->next;
- del_timer_sync(&d->timer);
- aoedev_freedev(d); /* must be able to sleep */
+
+ /* pass two: call freedev, which might sleep,
+ * for aoedevs marked with DEVFL_TKILL
+ */
+restart:
+ spin_lock_irqsave(&devlist_lock, flags);
+ for (d = devlist; d; d = d->next) {
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_TKILL
+ && !(d->flags & DEVFL_FREEING)) {
+ spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&devlist_lock, flags);
+ freedev(d);
+ goto restart;
+ }
+ spin_unlock(&d->lock);
}
+
+ /* pass three: remove aoedevs marked with DEVFL_FREED */
+ for (dd = &devlist, d = *dd; d; d = *dd) {
+ struct aoedev *doomed = NULL;
+
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_FREED) {
+ *dd = d->next;
+ doomed = d;
+ } else {
+ dd = &d->next;
+ }
+ spin_unlock(&d->lock);
+ if (doomed)
+ kfree(doomed->targets);
+ kfree(doomed);
+ }
+ spin_unlock_irqrestore(&devlist_lock, flags);
+
return 0;
}
+int
+aoedev_flush(const char __user *str, size_t cnt)
+{
+ return flush(str, cnt, NOT_EXITING);
+}
+
/* This has been confirmed to occur once with Tms=3*1000 due to the
* driver changing link and not processing its transmit ring. The
* problem is hard enough to solve by returning an error that I'm
@@ -332,13 +437,20 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
struct aoedev *d;
int i;
ulong flags;
- ulong sysminor;
+ ulong sysminor = 0;
spin_lock_irqsave(&devlist_lock, flags);
for (d=devlist; d; d=d->next)
if (d->aoemajor == maj && d->aoeminor == min) {
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_TKILL) {
+ spin_unlock(&d->lock);
+ d = NULL;
+ goto out;
+ }
d->ref++;
+ spin_unlock(&d->lock);
break;
}
if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0)
@@ -346,6 +458,13 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d = kcalloc(1, sizeof *d, GFP_ATOMIC);
if (!d)
goto out;
+ d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC);
+ if (!d->targets) {
+ kfree(d);
+ d = NULL;
+ goto out;
+ }
+ d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
skb_queue_head_init(&d->skbpool);
@@ -359,10 +478,12 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d->ref = 1;
for (i = 0; i < NFACTIVE; i++)
INIT_LIST_HEAD(&d->factive[i]);
+ INIT_LIST_HEAD(&d->rexmitq);
d->sysminor = sysminor;
d->aoemajor = maj;
d->aoeminor = min;
- d->mintimer = MINTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->next = devlist;
devlist = d;
out:
@@ -396,21 +517,9 @@ freetgt(struct aoedev *d, struct aoetgt *t)
void
aoedev_exit(void)
{
- struct aoedev *d;
- ulong flags;
-
+ flush_scheduled_work();
aoe_flush_iocq();
- while ((d = devlist)) {
- devlist = d->next;
-
- spin_lock_irqsave(&d->lock, flags);
- aoedev_downdev(d);
- d->flags |= DEVFL_TKILL;
- spin_unlock_irqrestore(&d->lock, flags);
-
- del_timer_sync(&d->timer);
- aoedev_freedev(d);
- }
+ flush(NULL, 0, EXITING);
}
int __init
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 04793c2c701..4b987c2fefb 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -105,7 +105,7 @@ aoe_init(void)
aoechr_exit();
chr_fail:
aoedev_exit();
-
+
printk(KERN_INFO "aoe: initialisation failure.\n");
return ret;
}
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 162c6471275..71d3ea8d300 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -31,7 +31,7 @@ enum {
static char aoe_iflist[IFLISTSZ];
module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
-MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"");
+MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=dev1[,dev2...]");
static wait_queue_head_t txwq;
static struct ktstate kts;
@@ -52,13 +52,18 @@ static struct sk_buff_head skbtxq;
/* enters with txlock held */
static int
-tx(void)
+tx(void) __must_hold(&txlock)
{
struct sk_buff *skb;
+ struct net_device *ifp;
while ((skb = skb_dequeue(&skbtxq))) {
spin_unlock_irq(&txlock);
- dev_queue_xmit(skb);
+ ifp = skb->dev;
+ if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
+ pr_warn("aoe: packet could not be sent on %s. %s\n",
+ ifp ? ifp->name : "netif",
+ "consider increasing tx_queue_len");
spin_lock_irq(&txlock);
}
return 0;
@@ -119,8 +124,8 @@ aoenet_xmit(struct sk_buff_head *queue)
}
}
-/*
- * (1) len doesn't include the header by default. I want this.
+/*
+ * (1) len doesn't include the header by default. I want this.
*/
static int
aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ca83f96756a..6526157edaf 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -41,8 +41,9 @@
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
@@ -978,8 +979,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h)
i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
if (i == h->nr_cmds)
return NULL;
- } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ } while (test_and_set_bit(i, h->cmd_pool_bits) != 0);
c = h->cmd_pool + i;
memset(c, 0, sizeof(CommandList_struct));
cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
@@ -1046,8 +1046,7 @@ static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
int i;
i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ clear_bit(i, h->cmd_pool_bits);
h->nr_frees++;
}
@@ -4268,10 +4267,7 @@ static void __devinit cciss_find_board_params(ctlr_info_t *h)
static inline bool CISS_signature_present(ctlr_info_t *h)
{
- if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
- (readb(&h->cfgtable->Signature[1]) != 'I') ||
- (readb(&h->cfgtable->Signature[2]) != 'S') ||
- (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
return false;
}
@@ -4812,8 +4808,7 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
{
- h->cmd_pool_bits = kmalloc(
- DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
+ h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) *
sizeof(unsigned long), GFP_KERNEL);
h->cmd_pool = pci_alloc_consistent(h->pdev,
h->nr_cmds * sizeof(CommandList_struct),
@@ -5068,9 +5063,7 @@ reinit_after_soft_reset:
pci_set_drvdata(pdev, h);
/* command and error info recs zeroed out before
they are used */
- memset(h->cmd_pool_bits, 0,
- DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
- * sizeof(unsigned long));
+ bitmap_zero(h->cmd_pool_bits, h->nr_cmds);
h->num_luns = 0;
h->highest_lun = -1;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index df098378739..7845bd6ee41 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -2,13 +2,14 @@
# DRBD device driver configuration
#
-comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
- depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
+comment "DRBD disabled because PROC_FS or INET not selected"
+ depends on PROC_FS='n' || INET='n'
config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
- depends on PROC_FS && INET && CONNECTOR
+ depends on PROC_FS && INET
select LRU_CACHE
+ select LIBCRC32C
default n
help
@@ -58,7 +59,8 @@ config DRBD_FAULT_INJECTION
32 data read
64 read ahead
128 kmalloc of bitmap
- 256 allocation of EE (epoch_entries)
+ 256 allocation of peer_requests
+ 512 insert data corruption on receiving side
fault_devs: bitmask of minor numbers
fault_rate: frequency in percent
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
index 0d3f337ff5f..8b450338075 100644
--- a/drivers/block/drbd/Makefile
+++ b/drivers/block/drbd/Makefile
@@ -1,5 +1,7 @@
drbd-y := drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
+drbd-y += drbd_interval.o drbd_state.o
+drbd-y += drbd_nla.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 3fbef018ce5..92510f8ad01 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -24,21 +24,73 @@
*/
#include <linux/slab.h>
+#include <linux/crc32c.h>
#include <linux/drbd.h>
+#include <linux/drbd_limits.h>
+#include <linux/dynamic_debug.h>
#include "drbd_int.h"
#include "drbd_wrappers.h"
-/* We maintain a trivial checksum in our on disk activity log.
- * With that we can ensure correct operation even when the storage
- * device might do a partial (last) sector write while losing power.
- */
-struct __packed al_transaction {
- u32 magic;
- u32 tr_number;
- struct __packed {
- u32 pos;
- u32 extent; } updates[1 + AL_EXTENTS_PT];
- u32 xor_sum;
+
+enum al_transaction_types {
+ AL_TR_UPDATE = 0,
+ AL_TR_INITIALIZED = 0xffff
+};
+/* all fields on disc in big endian */
+struct __packed al_transaction_on_disk {
+ /* don't we all like magic */
+ __be32 magic;
+
+ /* to identify the most recent transaction block
+ * in the on disk ring buffer */
+ __be32 tr_number;
+
+ /* checksum on the full 4k block, with this field set to 0. */
+ __be32 crc32c;
+
+ /* type of transaction, special transaction types like:
+ * purge-all, set-all-idle, set-all-active, ... to-be-defined
+ * see also enum al_transaction_types */
+ __be16 transaction_type;
+
+ /* we currently allow only a few thousand extents,
+ * so 16bit will be enough for the slot number. */
+
+ /* how many updates in this transaction */
+ __be16 n_updates;
+
+ /* maximum slot number, "al-extents" in drbd.conf speak.
+ * Having this in each transaction should make reconfiguration
+ * of that parameter easier. */
+ __be16 context_size;
+
+ /* slot number the context starts with */
+ __be16 context_start_slot_nr;
+
+ /* Some reserved bytes. Expected usage is a 64bit counter of
+ * sectors-written since device creation, and other data generation tag
+ * supporting usage */
+ __be32 __reserved[4];
+
+ /* --- 36 byte used --- */
+
+ /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
+ * in one transaction, then use the remaining byte in the 4k block for
+ * context information. "Flexible" number of updates per transaction
+ * does not help, as we have to account for the case when all update
+ * slots are used anyways, so it would only complicate code without
+ * additional benefit.
+ */
+ __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
+
+ /* but the extent number is 32bit, which at an extent size of 4 MiB
+ * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
+ __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
+
+ /* --- 420 bytes used (36 + 64*6) --- */
+
+ /* 4096 - 420 = 3676 = 919 * 4 */
+ __be32 context[AL_CONTEXT_PER_TRANSACTION];
};
struct update_odbm_work {
@@ -48,22 +100,11 @@ struct update_odbm_work {
struct update_al_work {
struct drbd_work w;
- struct lc_element *al_ext;
struct completion event;
- unsigned int enr;
- /* if old_enr != LC_FREE, write corresponding bitmap sector, too */
- unsigned int old_enr;
-};
-
-struct drbd_atodb_wait {
- atomic_t count;
- struct completion io_done;
- struct drbd_conf *mdev;
- int error;
+ int err;
};
-
-int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+static int al_write_transaction(struct drbd_conf *mdev);
void *drbd_md_get_buffer(struct drbd_conf *mdev)
{
@@ -82,22 +123,24 @@ void drbd_md_put_buffer(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
-static bool md_io_allowed(struct drbd_conf *mdev)
-{
- enum drbd_disk_state ds = mdev->state.disk;
- return ds >= D_NEGOTIATING || ds == D_ATTACHING;
-}
-
-void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
unsigned int *done)
{
- long dt = bdev->dc.disk_timeout * HZ / 10;
+ long dt;
+
+ rcu_read_lock();
+ dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
+ rcu_read_unlock();
+ dt = dt * HZ / 10;
if (dt == 0)
dt = MAX_SCHEDULE_TIMEOUT;
- dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
- if (dt == 0)
+ dt = wait_event_timeout(mdev->misc_wait,
+ *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
+ if (dt == 0) {
dev_err(DEV, "meta-data IO operation timed out\n");
+ drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
+ }
}
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
@@ -106,7 +149,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
int rw, int size)
{
struct bio *bio;
- int ok;
+ int err;
mdev->md_io.done = 0;
mdev->md_io.error = -ENODEV;
@@ -118,8 +161,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
- ok = (bio_add_page(bio, page, size, 0) == size);
- if (!ok)
+ err = -EIO;
+ if (bio_add_page(bio, page, size, 0) != size)
goto out;
bio->bi_private = &mdev->md_io;
bio->bi_end_io = drbd_md_io_complete;
@@ -127,7 +170,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
- ok = 0;
+ err = -ENODEV;
goto out;
}
@@ -137,86 +180,47 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
- ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
+ wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
+ if (bio_flagged(bio, BIO_UPTODATE))
+ err = mdev->md_io.error;
out:
bio_put(bio);
- return ok;
+ return err;
}
int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
- int logical_block_size, mask, ok;
- int offset = 0;
+ int err;
struct page *iop = mdev->md_io_page;
D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
- logical_block_size = bdev_logical_block_size(bdev->md_bdev);
- if (logical_block_size == 0)
- logical_block_size = MD_SECTOR_SIZE;
-
- /* in case logical_block_size != 512 [ s390 only? ] */
- if (logical_block_size != MD_SECTOR_SIZE) {
- mask = (logical_block_size / MD_SECTOR_SIZE) - 1;
- D_ASSERT(mask == 1 || mask == 3 || mask == 7);
- D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE);
- offset = sector & mask;
- sector = sector & ~mask;
- iop = mdev->md_io_tmpp;
-
- if (rw & WRITE) {
- /* these are GFP_KERNEL pages, pre-allocated
- * on device initialization */
- void *p = page_address(mdev->md_io_page);
- void *hp = page_address(mdev->md_io_tmpp);
-
- ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector,
- READ, logical_block_size);
-
- if (unlikely(!ok)) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,"
- "READ [logical_block_size!=512]) failed!\n",
- (unsigned long long)sector);
- return 0;
- }
-
- memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE);
- }
- }
+ dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
+ current->comm, current->pid, __func__,
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
if (sector < drbd_md_first_sector(bdev) ||
- sector > drbd_md_last_sector(bdev))
+ sector + 7 > drbd_md_last_sector(bdev))
dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size);
- if (unlikely(!ok)) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n",
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- return 0;
- }
-
- if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) {
- void *p = page_address(mdev->md_io_page);
- void *hp = page_address(mdev->md_io_tmpp);
-
- memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE);
+ err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, MD_BLOCK_SIZE);
+ if (err) {
+ dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
}
-
- return ok;
+ return err;
}
static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *al_ext;
struct lc_element *tmp;
- unsigned long al_flags = 0;
int wake;
spin_lock_irq(&mdev->al_lock);
@@ -231,76 +235,92 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
return NULL;
}
}
- al_ext = lc_get(mdev->act_log, enr);
- al_flags = mdev->act_log->flags;
+ al_ext = lc_get(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
-
- /*
- if (!al_ext) {
- if (al_flags & LC_STARVING)
- dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n");
- if (al_flags & LC_DIRTY)
- dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n");
- }
- */
-
return al_ext;
}
-void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
- unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
- struct lc_element *al_ext;
- struct update_al_work al_work;
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
+ bool locked = false;
+
+ D_ASSERT(first <= last);
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
- wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
+ for (enr = first; enr <= last; enr++)
+ wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
+
+ /* Serialize multiple transactions.
+ * This uses test_and_set_bit, memory barrier is implicit.
+ */
+ wait_event(mdev->al_wait,
+ mdev->act_log->pending_changes == 0 ||
+ (locked = lc_try_lock_for_transaction(mdev->act_log)));
- if (al_ext->lc_number != enr) {
+ if (locked) {
/* drbd_al_write_transaction(mdev,al_ext,enr);
* recurses into generic_make_request(), which
* disallows recursion, bios being serialized on the
* current->bio_tail list now.
* we have to delegate updates to the activity log
* to the worker thread. */
- init_completion(&al_work.event);
- al_work.al_ext = al_ext;
- al_work.enr = enr;
- al_work.old_enr = al_ext->lc_number;
- al_work.w.cb = w_al_write_transaction;
- drbd_queue_work_front(&mdev->data.work, &al_work.w);
- wait_for_completion(&al_work.event);
-
- mdev->al_writ_cnt++;
-
- spin_lock_irq(&mdev->al_lock);
- lc_changed(mdev->act_log, al_ext);
- spin_unlock_irq(&mdev->al_lock);
+
+ /* Double check: it may have been committed by someone else,
+ * while we have been waiting for the lock. */
+ if (mdev->act_log->pending_changes) {
+ bool write_al_updates;
+
+ rcu_read_lock();
+ write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ rcu_read_unlock();
+
+ if (write_al_updates) {
+ al_write_transaction(mdev);
+ mdev->al_writ_cnt++;
+ }
+
+ spin_lock_irq(&mdev->al_lock);
+ /* FIXME
+ if (err)
+ we need an "lc_cancel" here;
+ */
+ lc_committed(mdev->act_log);
+ spin_unlock_irq(&mdev->al_lock);
+ }
+ lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
}
}
-void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
- unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
struct lc_element *extent;
unsigned long flags;
+ D_ASSERT(first <= last);
spin_lock_irqsave(&mdev->al_lock, flags);
- extent = lc_find(mdev->act_log, enr);
-
- if (!extent) {
- spin_unlock_irqrestore(&mdev->al_lock, flags);
- dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
- return;
+ for (enr = first; enr <= last; enr++) {
+ extent = lc_find(mdev->act_log, enr);
+ if (!extent) {
+ dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
+ continue;
+ }
+ lc_put(mdev->act_log, extent);
}
-
- if (lc_put(mdev->act_log, extent) == 0)
- wake_up(&mdev->al_wait);
-
spin_unlock_irqrestore(&mdev->al_lock, flags);
+ wake_up(&mdev->al_wait);
}
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
@@ -326,296 +346,148 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
return rs_enr >>
/* bit to page */
((PAGE_SHIFT + 3) -
- /* al extent number to bit */
+ /* resync extent number to bit */
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
-int
-w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int
+_al_write_transaction(struct drbd_conf *mdev)
{
- struct update_al_work *aw = container_of(w, struct update_al_work, w);
- struct lc_element *updated = aw->al_ext;
- const unsigned int new_enr = aw->enr;
- const unsigned int evicted = aw->old_enr;
- struct al_transaction *buffer;
+ struct al_transaction_on_disk *buffer;
+ struct lc_element *e;
sector_t sector;
- int i, n, mx;
- unsigned int extent_nr;
- u32 xor_sum = 0;
+ int i, mx;
+ unsigned extent_nr;
+ unsigned crc = 0;
+ int err = 0;
if (!get_ldev(mdev)) {
- dev_err(DEV,
- "disk is %s, cannot start al transaction (-%d +%d)\n",
- drbd_disk_str(mdev->state.disk), evicted, new_enr);
- complete(&((struct update_al_work *)w)->event);
- return 1;
+ dev_err(DEV, "disk is %s, cannot start al transaction\n",
+ drbd_disk_str(mdev->state.disk));
+ return -EIO;
}
- /* do we have to do a bitmap write, first?
- * TODO reduce maximum latency:
- * submit both bios, then wait for both,
- * instead of doing two synchronous sector writes.
- * For now, we must not write the transaction,
- * if we cannot write out the bitmap of the evicted extent. */
- if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
- drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted));
/* The bitmap write may have failed, causing a state change. */
if (mdev->state.disk < D_INCONSISTENT) {
dev_err(DEV,
- "disk is %s, cannot write al transaction (-%d +%d)\n",
- drbd_disk_str(mdev->state.disk), evicted, new_enr);
- complete(&((struct update_al_work *)w)->event);
+ "disk is %s, cannot write al transaction\n",
+ drbd_disk_str(mdev->state.disk));
put_ldev(mdev);
- return 1;
+ return -EIO;
}
buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
if (!buffer) {
dev_err(DEV, "disk failed while waiting for md_io buffer\n");
- complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
- return 1;
+ return -ENODEV;
}
- buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
+ memset(buffer, 0, sizeof(*buffer));
+ buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
- n = lc_index_of(mdev->act_log, updated);
+ i = 0;
+
+ /* Even though no one can start to change this list
+ * once we set the LC_LOCKED -- from drbd_al_begin_io(),
+ * lc_try_lock_for_transaction() --, someone may still
+ * be in the process of changing it. */
+ spin_lock_irq(&mdev->al_lock);
+ list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
+ if (i == AL_UPDATES_PER_TRANSACTION) {
+ i++;
+ break;
+ }
+ buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
+ buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
+ if (e->lc_number != LC_FREE)
+ drbd_bm_mark_for_writeout(mdev,
+ al_extent_to_bm_page(e->lc_number));
+ i++;
+ }
+ spin_unlock_irq(&mdev->al_lock);
+ BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
- buffer->updates[0].pos = cpu_to_be32(n);
- buffer->updates[0].extent = cpu_to_be32(new_enr);
+ buffer->n_updates = cpu_to_be16(i);
+ for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
+ buffer->update_slot_nr[i] = cpu_to_be16(-1);
+ buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
+ }
- xor_sum ^= new_enr;
+ buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
+ buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
- mx = min_t(int, AL_EXTENTS_PT,
+ mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
mdev->act_log->nr_elements - mdev->al_tr_cycle);
for (i = 0; i < mx; i++) {
unsigned idx = mdev->al_tr_cycle + i;
extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
- buffer->updates[i+1].pos = cpu_to_be32(idx);
- buffer->updates[i+1].extent = cpu_to_be32(extent_nr);
- xor_sum ^= extent_nr;
- }
- for (; i < AL_EXTENTS_PT; i++) {
- buffer->updates[i+1].pos = __constant_cpu_to_be32(-1);
- buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE);
- xor_sum ^= LC_FREE;
+ buffer->context[i] = cpu_to_be32(extent_nr);
}
- mdev->al_tr_cycle += AL_EXTENTS_PT;
+ for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
+ buffer->context[i] = cpu_to_be32(LC_FREE);
+
+ mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
mdev->al_tr_cycle = 0;
- buffer->xor_sum = cpu_to_be32(xor_sum);
-
sector = mdev->ldev->md.md_offset
- + mdev->ldev->md.al_offset + mdev->al_tr_pos;
-
- if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ + mdev->ldev->md.al_offset
+ + mdev->al_tr_pos * (MD_BLOCK_SIZE>>9);
- if (++mdev->al_tr_pos >
- div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
- mdev->al_tr_pos = 0;
+ crc = crc32c(0, buffer, 4096);
+ buffer->crc32c = cpu_to_be32(crc);
- D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
- mdev->al_tr_number++;
+ if (drbd_bm_write_hinted(mdev))
+ err = -EIO;
+ /* drbd_chk_io_error done already */
+ else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ err = -EIO;
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ } else {
+ /* advance ringbuffer position and transaction counter */
+ mdev->al_tr_pos = (mdev->al_tr_pos + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
+ mdev->al_tr_number++;
+ }
drbd_md_put_buffer(mdev);
-
- complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
- return 1;
+ return err;
}
-/**
- * drbd_al_read_tr() - Read a single transaction from the on disk activity log
- * @mdev: DRBD device.
- * @bdev: Block device to read form.
- * @b: pointer to an al_transaction.
- * @index: On disk slot of the transaction to read.
- *
- * Returns -1 on IO error, 0 on checksum error and 1 upon success.
- */
-static int drbd_al_read_tr(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev,
- struct al_transaction *b,
- int index)
-{
- sector_t sector;
- int rv, i;
- u32 xor_sum = 0;
-
- sector = bdev->md.md_offset + bdev->md.al_offset + index;
-
- /* Dont process error normally,
- * as this is done before disk is attached! */
- if (!drbd_md_sync_page_io(mdev, bdev, sector, READ))
- return -1;
-
- rv = (be32_to_cpu(b->magic) == DRBD_MAGIC);
-
- for (i = 0; i < AL_EXTENTS_PT + 1; i++)
- xor_sum ^= be32_to_cpu(b->updates[i].extent);
- rv &= (xor_sum == be32_to_cpu(b->xor_sum));
- return rv;
-}
-
-/**
- * drbd_al_read_log() - Restores the activity log from its on disk representation.
- * @mdev: DRBD device.
- * @bdev: Block device to read form.
- *
- * Returns 1 on success, returns 0 when reading the log failed due to IO errors.
- */
-int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+static int w_al_write_transaction(struct drbd_work *w, int unused)
{
- struct al_transaction *buffer;
- int i;
- int rv;
- int mx;
- int active_extents = 0;
- int transactions = 0;
- int found_valid = 0;
- int from = 0;
- int to = 0;
- u32 from_tnr = 0;
- u32 to_tnr = 0;
- u32 cnr;
-
- mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT);
-
- /* lock out all other meta data io for now,
- * and make sure the page is mapped.
- */
- buffer = drbd_md_get_buffer(mdev);
- if (!buffer)
- return 0;
-
- /* Find the valid transaction in the log */
- for (i = 0; i <= mx; i++) {
- rv = drbd_al_read_tr(mdev, bdev, buffer, i);
- if (rv == 0)
- continue;
- if (rv == -1) {
- drbd_md_put_buffer(mdev);
- return 0;
- }
- cnr = be32_to_cpu(buffer->tr_number);
-
- if (++found_valid == 1) {
- from = i;
- to = i;
- from_tnr = cnr;
- to_tnr = cnr;
- continue;
- }
- if ((int)cnr - (int)from_tnr < 0) {
- D_ASSERT(from_tnr - cnr + i - from == mx+1);
- from = i;
- from_tnr = cnr;
- }
- if ((int)cnr - (int)to_tnr > 0) {
- D_ASSERT(cnr - to_tnr == i - to);
- to = i;
- to_tnr = cnr;
- }
- }
-
- if (!found_valid) {
- dev_warn(DEV, "No usable activity log found.\n");
- drbd_md_put_buffer(mdev);
- return 1;
- }
-
- /* Read the valid transactions.
- * dev_info(DEV, "Reading from %d to %d.\n",from,to); */
- i = from;
- while (1) {
- int j, pos;
- unsigned int extent_nr;
- unsigned int trn;
-
- rv = drbd_al_read_tr(mdev, bdev, buffer, i);
- ERR_IF(rv == 0) goto cancel;
- if (rv == -1) {
- drbd_md_put_buffer(mdev);
- return 0;
- }
-
- trn = be32_to_cpu(buffer->tr_number);
-
- spin_lock_irq(&mdev->al_lock);
-
- /* This loop runs backwards because in the cyclic
- elements there might be an old version of the
- updated element (in slot 0). So the element in slot 0
- can overwrite old versions. */
- for (j = AL_EXTENTS_PT; j >= 0; j--) {
- pos = be32_to_cpu(buffer->updates[j].pos);
- extent_nr = be32_to_cpu(buffer->updates[j].extent);
-
- if (extent_nr == LC_FREE)
- continue;
-
- lc_set(mdev->act_log, extent_nr, pos);
- active_extents++;
- }
- spin_unlock_irq(&mdev->al_lock);
-
- transactions++;
-
-cancel:
- if (i == to)
- break;
- i++;
- if (i > mx)
- i = 0;
- }
-
- mdev->al_tr_number = to_tnr+1;
- mdev->al_tr_pos = to;
- if (++mdev->al_tr_pos >
- div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
- mdev->al_tr_pos = 0;
-
- /* ok, we are done with it */
- drbd_md_put_buffer(mdev);
+ struct update_al_work *aw = container_of(w, struct update_al_work, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
- dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
- transactions, active_extents);
+ err = _al_write_transaction(mdev);
+ aw->err = err;
+ complete(&aw->event);
- return 1;
+ return err != -EIO ? err : 0;
}
-/**
- * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
- * @mdev: DRBD device.
- */
-void drbd_al_apply_to_bm(struct drbd_conf *mdev)
+/* Calls from worker context (see w_restart_disk_io()) need to write the
+ transaction directly. Others came through generic_make_request(),
+ those need to delegate it to the worker. */
+static int al_write_transaction(struct drbd_conf *mdev)
{
- unsigned int enr;
- unsigned long add = 0;
- char ppb[10];
- int i, tmp;
-
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ struct update_al_work al_work;
- for (i = 0; i < mdev->act_log->nr_elements; i++) {
- enr = lc_element_by_index(mdev->act_log, i)->lc_number;
- if (enr == LC_FREE)
- continue;
- tmp = drbd_bm_ALe_set_all(mdev, enr);
- dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
- add += tmp;
- }
+ if (current == mdev->tconn->worker.task)
+ return _al_write_transaction(mdev);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
+ init_completion(&al_work.event);
+ al_work.w.cb = w_al_write_transaction;
+ al_work.w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+ wait_for_completion(&al_work.event);
- dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n",
- ppsize(ppb, Bit2KB(add)));
+ return al_work.err;
}
static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
@@ -645,7 +517,7 @@ void drbd_al_shrink(struct drbd_conf *mdev)
struct lc_element *al_ext;
int i;
- D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags));
+ D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
for (i = 0; i < mdev->act_log->nr_elements; i++) {
al_ext = lc_element_by_index(mdev->act_log, i);
@@ -657,15 +529,17 @@ void drbd_al_shrink(struct drbd_conf *mdev)
wake_up(&mdev->al_wait);
}
-static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_update_odbm(struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
+ struct drbd_conf *mdev = w->mdev;
+ struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
if (!get_ldev(mdev)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
kfree(udw);
- return 1;
+ return 0;
}
drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
@@ -683,9 +557,9 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
break;
}
}
- drbd_bcast_sync_progress(mdev);
+ drbd_bcast_event(mdev, &sib);
- return 1;
+ return 0;
}
@@ -755,7 +629,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
}
ext->rs_left = rs_left;
ext->rs_failed = success ? 0 : count;
- lc_changed(mdev->resync, &ext->lce);
+ /* we don't keep a persistent log of the resync lru,
+ * we can commit any change right away. */
+ lc_committed(mdev->resync);
}
lc_put(mdev->resync, &ext->lce);
/* no race, we are within the al_lock! */
@@ -767,7 +643,8 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
if (udw) {
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
- drbd_queue_work_front(&mdev->data.work, &udw->w);
+ udw->w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
} else {
dev_warn(DEV, "Could not kmalloc an udw\n");
}
@@ -813,16 +690,22 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
int wake_up = 0;
unsigned long flags;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
+
+ if (!get_ldev(mdev))
+ return; /* no disk, no metadata, no bitmap to clear bits in */
+
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors) return;
- ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+ if (!expect(sector < nr_sectors))
+ goto out;
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
@@ -830,7 +713,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
* round up start sector, round down end sector. we make sure we only
* clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
if (unlikely(esector < BM_SECT_PER_BIT-1))
- return;
+ goto out;
if (unlikely(esector == (nr_sectors-1)))
ebnr = lbnr;
else
@@ -838,14 +721,14 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
if (sbnr > ebnr)
- return;
+ goto out;
/*
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
- if (count && get_ldev(mdev)) {
+ if (count) {
drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
spin_lock_irqsave(&mdev->al_lock, flags);
drbd_try_clear_on_disk_bm(mdev, sector, count, true);
@@ -854,8 +737,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
- put_ldev(mdev);
}
+out:
+ put_ldev(mdev);
if (wake_up)
wake_up(&mdev->al_wait);
}
@@ -871,7 +755,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
- unsigned long sbnr, ebnr, lbnr, flags;
+ unsigned long sbnr, ebnr, flags;
sector_t esector, nr_sectors;
unsigned int enr, count = 0;
struct lc_element *e;
@@ -880,7 +764,7 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
if (size == 0)
return 0;
- if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
@@ -892,12 +776,10 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors)
+ if (!expect(sector < nr_sectors))
goto out;
- ERR_IF(esector >= nr_sectors)
- esector = (nr_sectors-1);
-
- lbnr = BM_SECT_TO_BIT(nr_sectors-1);
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
/* we set it out of sync,
* we do not need to round anything here */
@@ -940,7 +822,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
- lc_changed(mdev->resync, &bm_ext->lce);
+ lc_committed(mdev->resync);
wakeup = 1;
}
if (bm_ext->lce.refcnt == 1)
@@ -956,7 +838,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
- BUG_ON(rs_flags & LC_DIRTY);
+ BUG_ON(rs_flags & LC_LOCKED);
}
return bm_ext;
@@ -964,26 +846,12 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
{
- struct lc_element *al_ext;
- int rv = 0;
+ int rv;
spin_lock_irq(&mdev->al_lock);
- if (unlikely(enr == mdev->act_log->new_number))
- rv = 1;
- else {
- al_ext = lc_find(mdev->act_log, enr);
- if (al_ext) {
- if (al_ext->refcnt)
- rv = 1;
- }
- }
+ rv = lc_is_used(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
- /*
- if (unlikely(rv)) {
- dev_info(DEV, "Delaying sync read until app's write is done\n");
- }
- */
return rv;
}
@@ -1113,13 +981,13 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
- BUG_ON(rs_flags & LC_DIRTY);
+ BUG_ON(rs_flags & LC_LOCKED);
goto try_again;
}
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
- lc_changed(mdev->resync, &bm_ext->lce);
+ lc_committed(mdev->resync);
wake_up(&mdev->al_wait);
D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
}
@@ -1130,8 +998,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
}
check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
- if (unlikely(al_enr+i == mdev->act_log->new_number))
- goto try_again;
if (lc_is_used(mdev->act_log, al_enr+i))
goto try_again;
}
@@ -1266,7 +1132,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
sector_t esector, nr_sectors;
int wake_up = 0;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
@@ -1274,8 +1140,10 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors) return;
- ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+ if (!expect(sector < nr_sectors))
+ return;
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index d8456649674..8dc29502dc0 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -119,13 +119,9 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
if (!__ratelimit(&drbd_ratelimit_state))
return;
dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
- current == mdev->receiver.task ? "receiver" :
- current == mdev->asender.task ? "asender" :
- current == mdev->worker.task ? "worker" : current->comm,
- func, b->bm_why ?: "?",
- b->bm_task == mdev->receiver.task ? "receiver" :
- b->bm_task == mdev->asender.task ? "asender" :
- b->bm_task == mdev->worker.task ? "worker" : "?");
+ drbd_task_to_thread_name(mdev->tconn, current),
+ func, b->bm_why ?: "?",
+ drbd_task_to_thread_name(mdev->tconn, b->bm_task));
}
void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
@@ -142,13 +138,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
if (trylock_failed) {
dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
- current == mdev->receiver.task ? "receiver" :
- current == mdev->asender.task ? "asender" :
- current == mdev->worker.task ? "worker" : current->comm,
- why, b->bm_why ?: "?",
- b->bm_task == mdev->receiver.task ? "receiver" :
- b->bm_task == mdev->asender.task ? "asender" :
- b->bm_task == mdev->worker.task ? "worker" : "?");
+ drbd_task_to_thread_name(mdev->tconn, current),
+ why, b->bm_why ?: "?",
+ drbd_task_to_thread_name(mdev->tconn, b->bm_task));
mutex_lock(&b->bm_change);
}
if (BM_LOCKED_MASK & b->bm_flags)
@@ -196,6 +188,9 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
/* to mark for lazy writeout once syncer cleared all clearable bits,
* we if bits have been cleared since last IO. */
#define BM_PAGE_LAZY_WRITEOUT 28
+/* pages marked with this "HINT" will be considered for writeout
+ * on activity log transactions */
+#define BM_PAGE_HINT_WRITEOUT 27
/* store_page_idx uses non-atomic assignment. It is only used directly after
* allocating the page. All other bm_set_page_* and bm_clear_page_* need to
@@ -227,8 +222,7 @@ static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
{
struct drbd_bitmap *b = mdev->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
- clear_bit(BM_PAGE_IO_LOCK, addr);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
wake_up(&mdev->bitmap->bm_io_wait);
}
@@ -246,6 +240,27 @@ static void bm_set_page_need_writeout(struct page *page)
set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
}
+/**
+ * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
+ * @mdev: DRBD device.
+ * @page_nr: the bitmap page to mark with the "hint" flag
+ *
+ * From within an activity log transaction, we mark a few pages with these
+ * hints, then call drbd_bm_write_hinted(), which will only write out changed
+ * pages which are flagged with this mark.
+ */
+void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
+{
+ struct page *page;
+ if (page_nr >= mdev->bitmap->bm_number_of_pages) {
+ dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
+ page_nr, (int)mdev->bitmap->bm_number_of_pages);
+ return;
+ }
+ page = mdev->bitmap->bm_pages[page_nr];
+ set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
+}
+
static int bm_test_page_unchanged(struct page *page)
{
volatile const unsigned long *addr = &page_private(page);
@@ -373,14 +388,16 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
return old_pages;
/* Trying kmalloc first, falling back to vmalloc.
- * GFP_KERNEL is ok, as this is done when a lower level disk is
- * "attached" to the drbd. Context is receiver thread or cqueue
- * thread. As we have no disk yet, we are not in the IO path,
- * not even the IO path of the peer. */
+ * GFP_NOIO, as this is called while drbd IO is "suspended",
+ * and during resize or attach on diskless Primary,
+ * we must not block on IO to ourselves.
+ * Context is receiver thread or dmsetup. */
bytes = sizeof(struct page *)*want;
- new_pages = kzalloc(bytes, GFP_KERNEL);
+ new_pages = kzalloc(bytes, GFP_NOIO);
if (!new_pages) {
- new_pages = vzalloc(bytes);
+ new_pages = __vmalloc(bytes,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
if (!new_pages)
return NULL;
vmalloced = 1;
@@ -390,7 +407,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
for (i = 0; i < have; i++)
new_pages[i] = old_pages[i];
for (; i < want; i++) {
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (!page) {
bm_free_pages(new_pages + have, i - have);
bm_vk_free(new_pages, vmalloced);
@@ -439,7 +456,8 @@ int drbd_bm_init(struct drbd_conf *mdev)
sector_t drbd_bm_capacity(struct drbd_conf *mdev)
{
- ERR_IF(!mdev->bitmap) return 0;
+ if (!expect(mdev->bitmap))
+ return 0;
return mdev->bitmap->bm_dev_capacity;
}
@@ -447,7 +465,8 @@ sector_t drbd_bm_capacity(struct drbd_conf *mdev)
*/
void drbd_bm_cleanup(struct drbd_conf *mdev)
{
- ERR_IF (!mdev->bitmap) return;
+ if (!expect(mdev->bitmap))
+ return;
bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
kfree(mdev->bitmap);
@@ -610,7 +629,8 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
int err = 0, growing;
int opages_vmalloced;
- ERR_IF(!b) return -ENOMEM;
+ if (!expect(b))
+ return -ENOMEM;
drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
@@ -732,8 +752,10 @@ unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
unsigned long s;
unsigned long flags;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
s = b->bm_set;
@@ -756,8 +778,10 @@ unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
size_t drbd_bm_words(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
return b->bm_words;
}
@@ -765,7 +789,8 @@ size_t drbd_bm_words(struct drbd_conf *mdev)
unsigned long drbd_bm_bits(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return 0;
+ if (!expect(b))
+ return 0;
return b->bm_bits;
}
@@ -786,8 +811,10 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
end = offset + number;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
if (number == 0)
return;
WARN_ON(offset >= b->bm_words);
@@ -831,8 +858,10 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
end = offset + number;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
if ((offset >= b->bm_words) ||
@@ -860,8 +889,10 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
void drbd_bm_set_all(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0xff, b->bm_words);
@@ -874,8 +905,10 @@ void drbd_bm_set_all(struct drbd_conf *mdev)
void drbd_bm_clear_all(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0, b->bm_words);
@@ -889,7 +922,8 @@ struct bm_aio_ctx {
unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
-#define BM_WRITE_ALL_PAGES 2
+#define BM_AIO_WRITE_HINTED 2
+#define BM_WRITE_ALL_PAGES 4
int error;
struct kref kref;
};
@@ -977,17 +1011,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- void *src, *dest;
page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
- dest = kmap_atomic(page);
- src = kmap_atomic(b->bm_pages[page_nr]);
- memcpy(dest, src, PAGE_SIZE);
- kunmap_atomic(src);
- kunmap_atomic(dest);
+ copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
-
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
@@ -1060,6 +1088,11 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
break;
if (rw & WRITE) {
+ if ((flags & BM_AIO_WRITE_HINTED) &&
+ !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
+ &page_private(b->bm_pages[i])))
+ continue;
+
if (!(flags & BM_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
@@ -1088,13 +1121,15 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
else
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
- dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
- rw == WRITE ? "WRITE" : "READ",
- count, jiffies - now);
+ /* summary for global bitmap IO */
+ if (flags == 0)
+ dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
+ rw == WRITE ? "WRITE" : "READ",
+ count, jiffies - now);
if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
@@ -1103,7 +1138,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
}
if (atomic_read(&ctx->in_flight))
- err = -EIO; /* Disk failed during IO... */
+ err = -EIO; /* Disk timeout/force-detach during IO... */
now = jiffies;
if (rw == WRITE) {
@@ -1115,8 +1150,9 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
}
now = b->bm_set;
- dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
- ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+ if (flags == 0)
+ dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
+ ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
@@ -1179,9 +1215,17 @@ int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
+/**
+ * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
+ * @mdev: DRBD device.
+ */
+int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
+}
/**
- * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
+ * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
* @mdev: DRBD device.
* @idx: bitmap page index
*
@@ -1222,11 +1266,11 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
}
bm_page_io_async(ctx, idx, WRITE_SYNC);
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
if (ctx->error)
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
- /* that should force detach, so the in memory bitmap will be
+ /* that causes us to detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
@@ -1289,8 +1333,10 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
struct drbd_bitmap *b = mdev->bitmap;
unsigned long i = DRBD_END_OF_BITMAP;
- ERR_IF(!b) return i;
- ERR_IF(!b->bm_pages) return i;
+ if (!expect(b))
+ return i;
+ if (!expect(b->bm_pages))
+ return i;
spin_lock_irq(&b->bm_lock);
if (BM_DONT_TEST & b->bm_flags)
@@ -1391,8 +1437,10 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
struct drbd_bitmap *b = mdev->bitmap;
int c = 0;
- ERR_IF(!b) return 1;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 1;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
@@ -1423,13 +1471,21 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{
int i;
int bits;
+ int changed = 0;
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
- b->bm_set += BITS_PER_LONG - bits;
+ changed += BITS_PER_LONG - bits;
}
kunmap_atomic(paddr);
+ if (changed) {
+ /* We only need lazy writeout, the information is still in the
+ * remote bitmap as well, and is reconstructed during the next
+ * bitmap exchange, if lost locally due to a crash. */
+ bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
+ b->bm_set += changed;
+ }
}
/* Same thing as drbd_bm_set_bits,
@@ -1524,8 +1580,10 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
unsigned long *p_addr;
int i;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1559,8 +1617,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* robust in case we screwed up elsewhere, in that case pretend there
* was one dirty bit in the requested area, so we won't try to do a
* local read there (no bitmap probably implies no disk) */
- ERR_IF(!b) return 1;
- ERR_IF(!b->bm_pages) return 1;
+ if (!expect(b))
+ return 1;
+ if (!expect(b->bm_pages))
+ return 1;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1573,11 +1633,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
bm_unmap(p_addr);
p_addr = bm_map_pidx(b, idx);
}
- ERR_IF (bitnr >= b->bm_bits) {
- dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
- } else {
+ if (expect(bitnr < b->bm_bits))
c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
- }
+ else
+ dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
}
if (p_addr)
bm_unmap(p_addr);
@@ -1607,8 +1666,10 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
unsigned long flags;
unsigned long *p_addr, *bm;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1630,47 +1691,3 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
spin_unlock_irqrestore(&b->bm_lock, flags);
return count;
}
-
-/* Set all bits covered by the AL-extent al_enr.
- * Returns number of bits changed. */
-unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
-{
- struct drbd_bitmap *b = mdev->bitmap;
- unsigned long *p_addr, *bm;
- unsigned long weight;
- unsigned long s, e;
- int count, i, do_now;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
-
- spin_lock_irq(&b->bm_lock);
- if (BM_DONT_SET & b->bm_flags)
- bm_print_lock_info(mdev);
- weight = b->bm_set;
-
- s = al_enr * BM_WORDS_PER_AL_EXT;
- e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
- /* assert that s and e are on the same page */
- D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
- == s >> (PAGE_SHIFT - LN2_BPL + 3));
- count = 0;
- if (s < b->bm_words) {
- i = do_now = e-s;
- p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
- bm = p_addr + MLPP(s);
- while (i--) {
- count += hweight_long(*bm);
- *bm = -1UL;
- bm++;
- }
- bm_unmap(p_addr);
- b->bm_set += do_now*BITS_PER_LONG - count;
- if (e == b->bm_words)
- b->bm_set -= bm_clear_surplus(b);
- } else {
- dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
- }
- weight = b->bm_set - weight;
- spin_unlock_irq(&b->bm_lock);
- return weight;
-}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b953cc7c9c0..6b51afa1aae 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -39,9 +39,13 @@
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
+#include <linux/idr.h>
#include <net/tcp.h>
#include <linux/lru_cache.h>
#include <linux/prefetch.h>
+#include <linux/drbd_genl_api.h>
+#include <linux/drbd.h>
+#include "drbd_state.h"
#ifdef __CHECKER__
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
@@ -61,7 +65,6 @@
extern unsigned int minor_count;
extern bool disable_sendpage;
extern bool allow_oos;
-extern unsigned int cn_idx;
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int enable_faults;
@@ -86,34 +89,44 @@ extern char usermode_helper[];
*/
#define DRBD_SIGKILL SIGHUP
-/* All EEs on the free list should have ID_VACANT (== 0)
- * freshly allocated EEs get !ID_VACANT (== 1)
- * so if it says "cannot dereference null pointer at address 0x00000001",
- * it is most likely one of these :( */
-
#define ID_IN_SYNC (4711ULL)
#define ID_OUT_OF_SYNC (4712ULL)
-
#define ID_SYNCER (-1ULL)
-#define ID_VACANT 0
-#define is_syncer_block_id(id) ((id) == ID_SYNCER)
+
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
struct drbd_conf;
+struct drbd_tconn;
/* to shorten dev_warn(DEV, "msg"); and relatives statements */
#define DEV (disk_to_dev(mdev->vdisk))
+#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
+ printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
+#define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
+#define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
+#define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
+#define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
+#define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
+#define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
+#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
+
#define D_ASSERT(exp) if (!(exp)) \
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
-#define ERR_IF(exp) if (({ \
- int _b = (exp) != 0; \
- if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
- __func__, #exp, __FILE__, __LINE__); \
- _b; \
- }))
+/**
+ * expect - Make an assertion
+ *
+ * Unlike the assert macro, this macro returns a boolean result.
+ */
+#define expect(exp) ({ \
+ bool _bool = (exp); \
+ if (!_bool) \
+ dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
+ #exp, __func__); \
+ _bool; \
+ })
/* Defines to control fault insertion */
enum {
@@ -150,15 +163,12 @@ drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
/* usual integer division */
#define div_floor(A, B) ((A)/(B))
-/* drbd_meta-data.c (still in drbd_main.c) */
-/* 4th incarnation of the disk layout. */
-#define DRBD_MD_MAGIC (DRBD_MAGIC+4)
-
-extern struct drbd_conf **minor_table;
extern struct ratelimit_state drbd_ratelimit_state;
+extern struct idr minors; /* RCU, updates: genl_lock() */
+extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */
/* on the wire */
-enum drbd_packets {
+enum drbd_packet {
/* receiver (data socket) */
P_DATA = 0x00,
P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
@@ -186,7 +196,7 @@ enum drbd_packets {
P_RECV_ACK = 0x15, /* Used in protocol B */
P_WRITE_ACK = 0x16, /* Used in protocol C */
P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
- P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */
+ P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
@@ -207,77 +217,23 @@ enum drbd_packets {
P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
+ P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
+ P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
+ P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
+ P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
- P_MAX_CMD = 0x2A,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
/* special command ids for handshake */
- P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */
- P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */
+ P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
+ P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
- P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */
+ P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
};
-static inline const char *cmdname(enum drbd_packets cmd)
-{
- /* THINK may need to become several global tables
- * when we want to support more than
- * one PRO_VERSION */
- static const char *cmdnames[] = {
- [P_DATA] = "Data",
- [P_DATA_REPLY] = "DataReply",
- [P_RS_DATA_REPLY] = "RSDataReply",
- [P_BARRIER] = "Barrier",
- [P_BITMAP] = "ReportBitMap",
- [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
- [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
- [P_UNPLUG_REMOTE] = "UnplugRemote",
- [P_DATA_REQUEST] = "DataRequest",
- [P_RS_DATA_REQUEST] = "RSDataRequest",
- [P_SYNC_PARAM] = "SyncParam",
- [P_SYNC_PARAM89] = "SyncParam89",
- [P_PROTOCOL] = "ReportProtocol",
- [P_UUIDS] = "ReportUUIDs",
- [P_SIZES] = "ReportSizes",
- [P_STATE] = "ReportState",
- [P_SYNC_UUID] = "ReportSyncUUID",
- [P_AUTH_CHALLENGE] = "AuthChallenge",
- [P_AUTH_RESPONSE] = "AuthResponse",
- [P_PING] = "Ping",
- [P_PING_ACK] = "PingAck",
- [P_RECV_ACK] = "RecvAck",
- [P_WRITE_ACK] = "WriteAck",
- [P_RS_WRITE_ACK] = "RSWriteAck",
- [P_DISCARD_ACK] = "DiscardAck",
- [P_NEG_ACK] = "NegAck",
- [P_NEG_DREPLY] = "NegDReply",
- [P_NEG_RS_DREPLY] = "NegRSDReply",
- [P_BARRIER_ACK] = "BarrierAck",
- [P_STATE_CHG_REQ] = "StateChgRequest",
- [P_STATE_CHG_REPLY] = "StateChgReply",
- [P_OV_REQUEST] = "OVRequest",
- [P_OV_REPLY] = "OVReply",
- [P_OV_RESULT] = "OVResult",
- [P_CSUM_RS_REQUEST] = "CsumRSRequest",
- [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
- [P_COMPRESSED_BITMAP] = "CBitmap",
- [P_DELAY_PROBE] = "DelayProbe",
- [P_OUT_OF_SYNC] = "OutOfSync",
- [P_MAX_CMD] = NULL,
- };
-
- if (cmd == P_HAND_SHAKE_M)
- return "HandShakeM";
- if (cmd == P_HAND_SHAKE_S)
- return "HandShakeS";
- if (cmd == P_HAND_SHAKE)
- return "HandShake";
- if (cmd >= P_MAX_CMD)
- return "Unknown";
- return cmdnames[cmd];
-}
+extern const char *cmdname(enum drbd_packet cmd);
/* for sending/receiving the bitmap,
* possibly in some encoding scheme */
@@ -337,37 +293,24 @@ struct p_header80 {
u32 magic;
u16 command;
u16 length; /* bytes of data after this header */
- u8 payload[0];
} __packed;
/* Header for big packets, Used for data packets exceeding 64kB */
struct p_header95 {
u16 magic; /* use DRBD_MAGIC_BIG here */
u16 command;
- u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */
- u8 payload[0];
+ u32 length;
} __packed;
-union p_header {
- struct p_header80 h80;
- struct p_header95 h95;
-};
-
-/*
- * short commands, packets without payload, plain p_header:
- * P_PING
- * P_PING_ACK
- * P_BECOME_SYNC_TARGET
- * P_BECOME_SYNC_SOURCE
- * P_UNPLUG_REMOTE
- */
+struct p_header100 {
+ u32 magic;
+ u16 volume;
+ u16 command;
+ u32 length;
+ u32 pad;
+} __packed;
-/*
- * commands with out-of-struct payload:
- * P_BITMAP (no additional fields)
- * P_DATA, P_DATA_REPLY (see p_data)
- * P_COMPRESSED_BITMAP (see receive_compressed_bitmap)
- */
+extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
/* these defines must not be changed without changing the protocol version */
#define DP_HARDBARRIER 1 /* depricated */
@@ -377,9 +320,10 @@ union p_header {
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_FLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
+#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
+#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
struct p_data {
- union p_header head;
u64 sector; /* 64 bits sector number */
u64 block_id; /* to identify the request in protocol B&C */
u32 seq_num;
@@ -390,21 +334,18 @@ struct p_data {
* commands which share a struct:
* p_block_ack:
* P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
- * P_DISCARD_ACK (proto C, two-primaries conflict detection)
+ * P_SUPERSEDED (proto C, two-primaries conflict detection)
* p_block_req:
* P_DATA_REQUEST, P_RS_DATA_REQUEST
*/
struct p_block_ack {
- struct p_header80 head;
u64 sector;
u64 block_id;
u32 blksize;
u32 seq_num;
} __packed;
-
struct p_block_req {
- struct p_header80 head;
u64 sector;
u64 block_id;
u32 blksize;
@@ -413,59 +354,52 @@ struct p_block_req {
/*
* commands with their own struct for additional fields:
- * P_HAND_SHAKE
+ * P_CONNECTION_FEATURES
* P_BARRIER
* P_BARRIER_ACK
* P_SYNC_PARAM
* ReportParams
*/
-struct p_handshake {
- struct p_header80 head; /* 8 bytes */
+struct p_connection_features {
u32 protocol_min;
u32 feature_flags;
u32 protocol_max;
/* should be more than enough for future enhancements
- * for now, feature_flags and the reserverd array shall be zero.
+ * for now, feature_flags and the reserved array shall be zero.
*/
u32 _pad;
- u64 reserverd[7];
+ u64 reserved[7];
} __packed;
-/* 80 bytes, FIXED for the next century */
struct p_barrier {
- struct p_header80 head;
u32 barrier; /* barrier number _handle_ only */
u32 pad; /* to multiple of 8 Byte */
} __packed;
struct p_barrier_ack {
- struct p_header80 head;
u32 barrier;
u32 set_size;
} __packed;
struct p_rs_param {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
/* Since protocol version 88 and higher. */
char verify_alg[0];
} __packed;
struct p_rs_param_89 {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
/* protocol version 89: */
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
} __packed;
struct p_rs_param_95 {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
u32 c_plan_ahead;
@@ -475,12 +409,11 @@ struct p_rs_param_95 {
} __packed;
enum drbd_conn_flags {
- CF_WANT_LOSE = 1,
+ CF_DISCARD_MY_DATA = 1,
CF_DRY_RUN = 2,
};
struct p_protocol {
- struct p_header80 head;
u32 protocol;
u32 after_sb_0p;
u32 after_sb_1p;
@@ -494,17 +427,14 @@ struct p_protocol {
} __packed;
struct p_uuids {
- struct p_header80 head;
u64 uuid[UI_EXTENDED_SIZE];
} __packed;
struct p_rs_uuid {
- struct p_header80 head;
u64 uuid;
} __packed;
struct p_sizes {
- struct p_header80 head;
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
@@ -514,18 +444,15 @@ struct p_sizes {
} __packed;
struct p_state {
- struct p_header80 head;
u32 state;
} __packed;
struct p_req_state {
- struct p_header80 head;
u32 mask;
u32 val;
} __packed;
struct p_req_state_reply {
- struct p_header80 head;
u32 retcode;
} __packed;
@@ -539,15 +466,7 @@ struct p_drbd06_param {
u32 bit_map_gen[5];
} __packed;
-struct p_discard {
- struct p_header80 head;
- u64 block_id;
- u32 seq_num;
- u32 pad;
-} __packed;
-
struct p_block_desc {
- struct p_header80 head;
u64 sector;
u32 blksize;
u32 pad; /* to multiple of 8 Byte */
@@ -563,7 +482,6 @@ enum drbd_bitmap_code {
};
struct p_compressed_bm {
- struct p_header80 head;
/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
* (encoding & 0x80): polarity (set/unset) of first runlength
* ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
@@ -575,90 +493,22 @@ struct p_compressed_bm {
} __packed;
struct p_delay_probe93 {
- struct p_header80 head;
u32 seq_num; /* sequence number to match the two probe packets */
u32 offset; /* usecs the probe got sent after the reference time point */
} __packed;
-/* DCBP: Drbd Compressed Bitmap Packet ... */
-static inline enum drbd_bitmap_code
-DCBP_get_code(struct p_compressed_bm *p)
-{
- return (enum drbd_bitmap_code)(p->encoding & 0x0f);
-}
-
-static inline void
-DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
-{
- BUG_ON(code & ~0xf);
- p->encoding = (p->encoding & ~0xf) | code;
-}
-
-static inline int
-DCBP_get_start(struct p_compressed_bm *p)
-{
- return (p->encoding & 0x80) != 0;
-}
-
-static inline void
-DCBP_set_start(struct p_compressed_bm *p, int set)
-{
- p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
-}
-
-static inline int
-DCBP_get_pad_bits(struct p_compressed_bm *p)
-{
- return (p->encoding >> 4) & 0x7;
-}
-
-static inline void
-DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
-{
- BUG_ON(n & ~0x7);
- p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
-}
-
-/* one bitmap packet, including the p_header,
- * should fit within one _architecture independend_ page.
- * so we need to use the fixed size 4KiB page size
- * most architectures have used for a long time.
+/*
+ * Bitmap packets need to fit within a single page on the sender and receiver,
+ * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
*/
-#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80))
-#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
-#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
-#if (PAGE_SIZE < 4096)
-/* drbd_send_bitmap / receive_bitmap would break horribly */
-#error "PAGE_SIZE too small"
-#endif
-
-union p_polymorph {
- union p_header header;
- struct p_handshake handshake;
- struct p_data data;
- struct p_block_ack block_ack;
- struct p_barrier barrier;
- struct p_barrier_ack barrier_ack;
- struct p_rs_param_89 rs_param_89;
- struct p_rs_param_95 rs_param_95;
- struct p_protocol protocol;
- struct p_sizes sizes;
- struct p_uuids uuids;
- struct p_state state;
- struct p_req_state req_state;
- struct p_req_state_reply req_state_reply;
- struct p_block_req block_req;
- struct p_delay_probe93 delay_probe93;
- struct p_rs_uuid rs_uuid;
- struct p_block_desc block_desc;
-} __packed;
+#define DRBD_SOCKET_BUFFER_SIZE 4096
/**********************************************************************/
enum drbd_thread_state {
- None,
- Running,
- Exiting,
- Restarting
+ NONE,
+ RUNNING,
+ EXITING,
+ RESTARTING
};
struct drbd_thread {
@@ -667,8 +517,9 @@ struct drbd_thread {
struct completion stop;
enum drbd_thread_state t_state;
int (*function) (struct drbd_thread *);
- struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
int reset_cpu_mask;
+ char name[9];
};
static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
@@ -681,58 +532,54 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
return thi->t_state;
}
-struct drbd_work;
-typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
struct drbd_work {
struct list_head list;
- drbd_work_cb cb;
+ int (*cb)(struct drbd_work *, int cancel);
+ union {
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
+ };
};
-struct drbd_tl_epoch;
+#include "drbd_interval.h"
+
+extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *);
+
struct drbd_request {
struct drbd_work w;
- struct drbd_conf *mdev;
/* if local IO is not allowed, will be NULL.
* if local IO _is_ allowed, holds the locally submitted bio clone,
* or, after local IO completion, the ERR_PTR(error).
- * see drbd_endio_pri(). */
+ * see drbd_request_endio(). */
struct bio *private_bio;
- struct hlist_node collision;
- sector_t sector;
- unsigned int size;
- unsigned int epoch; /* barrier_nr */
+ struct drbd_interval i;
- /* barrier_nr: used to check on "completion" whether this req was in
+ /* epoch: used to check on "completion" whether this req was in
* the current epoch, and we therefore have to close it,
- * starting a new epoch...
+ * causing a p_barrier packet to be send, starting a new epoch.
+ *
+ * This corresponds to "barrier" in struct p_barrier[_ack],
+ * and to "barrier_nr" in struct drbd_epoch (and various
+ * comments/function parameters/local variable names).
*/
+ unsigned int epoch;
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
- unsigned long rq_state; /* see comments above _req_mod() */
unsigned long start_time;
-};
-
-struct drbd_tl_epoch {
- struct drbd_work w;
- struct list_head requests; /* requests before */
- struct drbd_tl_epoch *next; /* pointer to the next barrier */
- unsigned int br_number; /* the barriers identifier. */
- int n_writes; /* number of requests attached before this barrier */
-};
-struct drbd_request;
+ /* once it hits 0, we may complete the master_bio */
+ atomic_t completion_ref;
+ /* once it hits 0, we may destroy this drbd_request object */
+ struct kref kref;
-/* These Tl_epoch_entries may be in one of 6 lists:
- active_ee .. data packet being written
- sync_ee .. syncer block being written
- done_ee .. block written, need to send P_WRITE_ACK
- read_ee .. [RS]P_DATA_REQUEST being read
-*/
+ unsigned rq_state; /* see comments above _req_mod() */
+};
struct drbd_epoch {
+ struct drbd_tconn *tconn;
struct list_head list;
unsigned int barrier_nr;
atomic_t epoch_size; /* increased on every request added. */
@@ -762,17 +609,14 @@ struct digest_info {
void *digest;
};
-struct drbd_epoch_entry {
+struct drbd_peer_request {
struct drbd_work w;
- struct hlist_node collision;
struct drbd_epoch *epoch; /* for writes */
- struct drbd_conf *mdev;
struct page *pages;
atomic_t pending_bios;
- unsigned int size;
+ struct drbd_interval i;
/* see comments on ee flag bits below */
unsigned long flags;
- sector_t sector;
union {
u64 block_id;
struct digest_info *digest;
@@ -793,31 +637,37 @@ enum {
* we need to resubmit without the barrier flag. */
__EE_RESUBMITTED,
- /* we may have several bios per epoch entry.
+ /* we may have several bios per peer request.
* if any of those fail, we set this flag atomically
* from the endio callback */
__EE_WAS_ERROR,
/* This ee has a pointer to a digest instead of a block id */
__EE_HAS_DIGEST,
+
+ /* Conflicting local requests need to be restarted after this request */
+ __EE_RESTART_REQUESTS,
+
+ /* The peer wants a write ACK for this (wire proto C) */
+ __EE_SEND_WRITE_ACK,
+
+ /* Is set when net_conf had two_primaries set while creating this peer_req */
+ __EE_IN_INTERVAL_TREE,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
+#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
+#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
+#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
-/* global flag bits */
+/* flag bits per mdev */
enum {
- CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
- SIGNAL_ASENDER, /* whether asender wants to be interrupted */
- SEND_PING, /* whether asender should send a ping asap */
-
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
- DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
- CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */
CL_ST_CHG_SUCCESS,
CL_ST_CHG_FAIL,
CRASHED_PRIMARY, /* This node was a crashed primary.
@@ -831,32 +681,18 @@ enum {
once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
- WAS_IO_ERROR, /* Local disk failed returned IO error */
+ WAS_IO_ERROR, /* Local disk failed, returned IO error */
+ WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
- NET_CONGESTED, /* The data socket is congested */
-
- CONFIG_PENDING, /* serialization of (re)configuration requests.
- * if set, also prevents the device from dying */
- DEVICE_DYING, /* device became unconfigured,
- * but worker thread is still handling the cleanup.
- * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed,
- * while this is set. */
RESIZE_PENDING, /* Size change detected locally, waiting for the response from
* the peer, if it changed there as well. */
- CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
- GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
- STATE_SENT, /* Do not change state/UUIDs while this is set */
-
- CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
- * pending, from drbd worker context.
- * If set, bdi_write_congested() returns true,
- * so shrink_page_list() would not recurse into,
- * and potentially deadlock on, this drbd worker.
- */
+ B_RS_H_DONE, /* Before resync handler done (already executed) */
+ DISCARD_MY_DATA, /* discard_my_data flag per volume */
+ READ_BALANCE_RR,
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -894,24 +730,24 @@ enum bm_flag {
struct drbd_work_queue {
struct list_head q;
- struct semaphore s; /* producers up it, worker down()s it */
spinlock_t q_lock; /* to protect the list. */
+ wait_queue_head_t q_wait;
};
struct drbd_socket {
- struct drbd_work_queue work;
struct mutex mutex;
struct socket *socket;
/* this way we get our
* send/receive buffers off the stack */
- union p_polymorph sbuf;
- union p_polymorph rbuf;
+ void *sbuf;
+ void *rbuf;
};
struct drbd_md {
u64 md_offset; /* sector offset to 'super' block */
u64 la_size_sect; /* last agreed size, unit sectors */
+ spinlock_t uuid_lock;
u64 uuid[UI_SIZE];
u64 device_uuid;
u32 flags;
@@ -921,24 +757,16 @@ struct drbd_md {
s32 bm_offset; /* signed relative sector offset to bitmap */
/* u32 al_nr_extents; important for restoring the AL
- * is stored into sync_conf.al_extents, which in turn
+ * is stored into ldev->dc.al_extents, which in turn
* gets applied to act_log->nr_elements
*/
};
-/* for sync_conf and other types... */
-#define NL_PACKET(name, number, fields) struct name { fields };
-#define NL_INTEGER(pn,pr,member) int member;
-#define NL_INT64(pn,pr,member) __u64 member;
-#define NL_BIT(pn,pr,member) unsigned member:1;
-#define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
-#include <linux/drbd_nl.h>
-
struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
struct drbd_md md;
- struct disk_conf dc; /* The user provided config... */
+ struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */
sector_t known_size; /* last known size of that backing device */
};
@@ -962,18 +790,116 @@ enum write_ordering_e {
};
struct fifo_buffer {
- int *values;
unsigned int head_index;
unsigned int size;
+ int total; /* sum of all values */
+ int values[0];
+};
+extern struct fifo_buffer *fifo_alloc(int fifo_size);
+
+/* flag bits per tconn */
+enum {
+ NET_CONGESTED, /* The data socket is congested */
+ RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
+ SEND_PING, /* whether asender should send a ping asap */
+ SIGNAL_ASENDER, /* whether asender wants to be interrupted */
+ GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */
+ CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */
+ CONN_WD_ST_CHG_OKAY,
+ CONN_WD_ST_CHG_FAIL,
+ CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
+ CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
+ STATE_SENT, /* Do not change state/UUIDs while this is set */
+ CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
+ * pending, from drbd worker context.
+ * If set, bdi_write_congested() returns true,
+ * so shrink_page_list() would not recurse into,
+ * and potentially deadlock on, this drbd worker.
+ */
+ DISCONNECT_SENT,
+};
+
+struct drbd_tconn { /* is a resource from the config file */
+ char *name; /* Resource name */
+ struct list_head all_tconn; /* linked on global drbd_tconns */
+ struct kref kref;
+ struct idr volumes; /* <tconn, vnr> to mdev mapping */
+ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
+ unsigned susp:1; /* IO suspended by user */
+ unsigned susp_nod:1; /* IO suspended because no data */
+ unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
+ struct mutex cstate_mutex; /* Protects graceful disconnects */
+
+ unsigned long flags;
+ struct net_conf *net_conf; /* content protected by rcu */
+ struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
+ wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
+ struct res_opts res_opts;
+
+ struct sockaddr_storage my_addr;
+ int my_addr_len;
+ struct sockaddr_storage peer_addr;
+ int peer_addr_len;
+
+ struct drbd_socket data; /* data/barrier/cstate/parameter packets */
+ struct drbd_socket meta; /* ping/ack (metadata) packets */
+ int agreed_pro_version; /* actually used protocol version */
+ unsigned long last_received; /* in jiffies, either socket */
+ unsigned int ko_count;
+
+ spinlock_t req_lock;
+
+ struct list_head transfer_log; /* all requests not yet fully processed */
+
+ struct crypto_hash *cram_hmac_tfm;
+ struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */
+ struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
+ struct crypto_hash *csums_tfm;
+ struct crypto_hash *verify_tfm;
+ void *int_dig_in;
+ void *int_dig_vv;
+
+ /* receiver side */
+ struct drbd_epoch *current_epoch;
+ spinlock_t epoch_lock;
+ unsigned int epochs;
+ enum write_ordering_e write_ordering;
+ atomic_t current_tle_nr; /* transfer log epoch number */
+ unsigned current_tle_writes; /* writes seen within this tl epoch */
+
+ unsigned long last_reconnect_jif;
+ struct drbd_thread receiver;
+ struct drbd_thread worker;
+ struct drbd_thread asender;
+ cpumask_var_t cpu_mask;
+
+ /* sender side */
+ struct drbd_work_queue sender_work;
+
+ struct {
+ /* whether this sender thread
+ * has processed a single write yet. */
+ bool seen_any_write_yet;
+
+ /* Which barrier number to send with the next P_BARRIER */
+ int current_epoch_nr;
+
+ /* how many write requests have been sent
+ * with req->epoch == current_epoch_nr.
+ * If none, no P_BARRIER will be sent. */
+ unsigned current_epoch_writes;
+ } send;
};
struct drbd_conf {
+ struct drbd_tconn *tconn;
+ int vnr; /* volume number within the connection */
+ struct kref kref;
+
/* things that are stored as / read from meta data on disk */
unsigned long flags;
/* configured by drbdsetup */
- struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
- struct syncer_conf sync_conf;
struct drbd_backing_dev *ldev __protected_by(local);
sector_t p_size; /* partner's disk size */
@@ -981,11 +907,7 @@ struct drbd_conf {
struct block_device *this_bdev;
struct gendisk *vdisk;
- struct drbd_socket data; /* data/barrier/cstate/parameter packets */
- struct drbd_socket meta; /* ping/ack (metadata) packets */
- int agreed_pro_version; /* actually used protocol version */
- unsigned long last_received; /* in jiffies, either socket */
- unsigned int ko_count;
+ unsigned long last_reattach_jif;
struct drbd_work resync_work,
unplug_work,
go_diskless,
@@ -1005,10 +927,9 @@ struct drbd_conf {
/* Used after attach while negotiating new disk state. */
union drbd_state new_state_tmp;
- union drbd_state state;
+ union drbd_dev_state state;
wait_queue_head_t misc_wait;
wait_queue_head_t state_wait; /* upon each state change. */
- wait_queue_head_t net_cnt_wait;
unsigned int send_cnt;
unsigned int recv_cnt;
unsigned int read_cnt;
@@ -1018,17 +939,12 @@ struct drbd_conf {
atomic_t ap_bio_cnt; /* Requests we need to complete */
atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
- atomic_t unacked_cnt; /* Need to send replys for */
+ atomic_t unacked_cnt; /* Need to send replies for */
atomic_t local_cnt; /* Waiting for local completion */
- atomic_t net_cnt; /* Users of net_conf */
- spinlock_t req_lock;
- struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
- struct drbd_tl_epoch *newest_tle;
- struct drbd_tl_epoch *oldest_tle;
- struct list_head out_of_sequence_requests;
- struct list_head barrier_acked_requests;
- struct hlist_head *tl_hash;
- unsigned int tl_hash_s;
+
+ /* Interval tree of pending local requests */
+ struct rb_root read_requests;
+ struct rb_root write_requests;
/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
unsigned long rs_total;
@@ -1048,9 +964,11 @@ struct drbd_conf {
unsigned long rs_mark_time[DRBD_SYNC_MARKS];
/* current index into rs_mark_{left,time} */
int rs_last_mark;
+ unsigned long rs_last_bcast; /* [unit jiffies] */
/* where does the admin want us to start? (sector) */
sector_t ov_start_sector;
+ sector_t ov_stop_sector;
/* where are we now? (sector) */
sector_t ov_position;
/* Start sector of out of sync range (to merge printk reporting). */
@@ -1058,14 +976,7 @@ struct drbd_conf {
/* size of out-of-sync range in sectors. */
sector_t ov_last_oos_size;
unsigned long ov_left; /* in bits */
- struct crypto_hash *csums_tfm;
- struct crypto_hash *verify_tfm;
- unsigned long last_reattach_jif;
- unsigned long last_reconnect_jif;
- struct drbd_thread receiver;
- struct drbd_thread worker;
- struct drbd_thread asender;
struct drbd_bitmap *bitmap;
unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
@@ -1078,29 +989,19 @@ struct drbd_conf {
int open_cnt;
u64 *p_uuid;
- struct drbd_epoch *current_epoch;
- spinlock_t epoch_lock;
- unsigned int epochs;
- enum write_ordering_e write_ordering;
+
struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
- struct list_head done_ee; /* send ack */
- struct list_head read_ee; /* IO in progress (any read) */
+ struct list_head done_ee; /* need to send P_WRITE_ACK */
+ struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
struct list_head net_ee; /* zero-copy network send in progress */
- struct hlist_head *ee_hash; /* is proteced by req_lock! */
- unsigned int ee_hash_s;
-
- /* this one is protected by ee_lock, single thread */
- struct drbd_epoch_entry *last_write_w_barrier;
int next_barrier_nr;
- struct hlist_head *app_reads_hash; /* is proteced by req_lock */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
- struct page *md_io_tmpp; /* for logical_block_size != 512 */
struct drbd_md_io md_io;
atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
spinlock_t al_lock;
@@ -1109,22 +1010,16 @@ struct drbd_conf {
unsigned int al_tr_number;
int al_tr_cycle;
int al_tr_pos; /* position of the next transaction in the journal */
- struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */
- struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */
- void *int_dig_out;
- void *int_dig_in;
- void *int_dig_vv;
wait_queue_head_t seq_wait;
atomic_t packet_seq;
unsigned int peer_seq;
spinlock_t peer_seq_lock;
unsigned int minor;
unsigned long comm_bm_set; /* communicated number of set bits. */
- cpumask_var_t cpu_mask;
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
- struct mutex state_mutex;
+ struct mutex own_state_mutex;
+ struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@@ -1132,9 +1027,8 @@ struct drbd_conf {
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
int c_sync_rate; /* current resync rate after syncer throttle magic */
- struct fifo_buffer rs_plan_s; /* correction values of resync planer */
+ struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
- int rs_planed; /* resync sectors already planned */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size;
unsigned int local_max_bio_size;
@@ -1142,11 +1036,7 @@ struct drbd_conf {
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
{
- struct drbd_conf *mdev;
-
- mdev = minor < minor_count ? minor_table[minor] : NULL;
-
- return mdev;
+ return (struct drbd_conf *)idr_find(&minors, minor);
}
static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
@@ -1154,29 +1044,9 @@ static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
return mdev->minor;
}
-/* returns 1 if it was successful,
- * returns 0 if there was no data socket.
- * so wherever you are going to use the data.socket, e.g. do
- * if (!drbd_get_data_sock(mdev))
- * return 0;
- * CODE();
- * drbd_put_data_sock(mdev);
- */
-static inline int drbd_get_data_sock(struct drbd_conf *mdev)
-{
- mutex_lock(&mdev->data.mutex);
- /* drbd_disconnect() could have called drbd_free_sock()
- * while we were waiting in down()... */
- if (unlikely(mdev->data.socket == NULL)) {
- mutex_unlock(&mdev->data.mutex);
- return 0;
- }
- return 1;
-}
-
-static inline void drbd_put_data_sock(struct drbd_conf *mdev)
+static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
{
- mutex_unlock(&mdev->data.mutex);
+ return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
}
/*
@@ -1185,106 +1055,77 @@ static inline void drbd_put_data_sock(struct drbd_conf *mdev)
/* drbd_main.c */
-enum chg_state_flags {
- CS_HARD = 1,
- CS_VERBOSE = 2,
- CS_WAIT_COMPLETE = 4,
- CS_SERIALIZE = 8,
- CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
-};
-
enum dds_flags {
DDSF_FORCED = 1,
DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
};
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
-extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
- enum chg_state_flags f,
- union drbd_state mask,
- union drbd_state val);
-extern void drbd_force_state(struct drbd_conf *, union drbd_state,
- union drbd_state);
-extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
- union drbd_state,
- union drbd_state,
- enum chg_state_flags);
-extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
- enum chg_state_flags,
- struct completion *done);
-extern void print_st_err(struct drbd_conf *, union drbd_state,
- union drbd_state, int);
extern int drbd_thread_start(struct drbd_thread *thi);
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
+extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
#ifdef CONFIG_SMP
-extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev);
-extern void drbd_calc_cpu_mask(struct drbd_conf *mdev);
+extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
+extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
#else
#define drbd_thread_current_set_cpu(A) ({})
#define drbd_calc_cpu_mask(A) ({})
#endif
-extern void drbd_free_resources(struct drbd_conf *mdev);
-extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
+extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
unsigned int set_size);
-extern void tl_clear(struct drbd_conf *mdev);
-extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
-extern void drbd_free_sock(struct drbd_conf *mdev);
-extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
- void *buf, size_t size, unsigned msg_flags);
-extern int drbd_send_protocol(struct drbd_conf *mdev);
+extern void tl_clear(struct drbd_tconn *);
+extern void drbd_free_sock(struct drbd_tconn *tconn);
+extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+ void *buf, size_t size, unsigned msg_flags);
+extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
+ unsigned);
+
+extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
+extern int drbd_send_protocol(struct drbd_tconn *tconn);
extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
-extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
+extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
extern int drbd_send_current_state(struct drbd_conf *mdev);
-extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size, unsigned msg_flags);
-#define USE_DATA_SOCKET 1
-#define USE_META_SOCKET 0
-extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size);
-extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
- char *data, size_t size);
-extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
-extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
- u32 set_size);
-extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e);
-extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_block_req *rp);
-extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_data *dp, int data_size);
-extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+extern int drbd_send_sync_param(struct drbd_conf *mdev);
+extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
+ u32 set_size);
+extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
+extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_block_req *rp);
+extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_data *dp, int data_size);
+extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id);
-extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
-extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e);
+extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *);
+extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id);
-extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
- sector_t sector,int size,
- void *digest, int digest_size,
- enum drbd_packets cmd);
+extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
+ int size, void *digest, int digest_size,
+ enum drbd_packet cmd);
extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
extern int drbd_send_bitmap(struct drbd_conf *mdev);
-extern int _drbd_send_bitmap(struct drbd_conf *mdev);
-extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
+extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
+extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
+extern void conn_md_sync(struct drbd_tconn *tconn);
extern void drbd_md_sync(struct drbd_conf *mdev);
extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
-extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
+extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
+extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
@@ -1302,33 +1143,52 @@ extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
extern int drbd_bitmap_io(struct drbd_conf *mdev,
int (*io_fn)(struct drbd_conf *),
char *why, enum bm_flag flags);
+extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags);
extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
-
/* Meta data layout
We reserve a 128MB Block (4k aligned)
* either at the end of the backing device
* or on a separate meta data device. */
-#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
/* The following numbers are sectors */
-#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
-#define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */
-/* Allows up to about 3.8TB */
-#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
-
-/* Since the smalles IO unit is usually 512 byte */
-#define MD_SECTOR_SHIFT 9
-#define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT)
-
-/* activity log */
-#define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */
-#define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */
+/* Allows up to about 3.8TB, so if you want more,
+ * you need to use the "flexible" meta data format. */
+#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
+#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
+#define MD_AL_SECTORS 64 /* = 32 kB on disk activity log ring buffer */
+#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_SECTORS)
+
+/* we do all meta data IO in 4k blocks */
+#define MD_BLOCK_SHIFT 12
+#define MD_BLOCK_SIZE (1<<MD_BLOCK_SHIFT)
+
+/* One activity log extent represents 4M of storage */
+#define AL_EXTENT_SHIFT 22
#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
+/* We could make these currently hardcoded constants configurable
+ * variables at create-md time (or even re-configurable at runtime?).
+ * Which will require some more changes to the DRBD "super block"
+ * and attach code.
+ *
+ * updates per transaction:
+ * This many changes to the active set can be logged with one transaction.
+ * This number is arbitrary.
+ * context per transaction:
+ * This many context extent numbers are logged with each transaction.
+ * This number is resulting from the transaction block size (4k), the layout
+ * of the transaction header, and the number of updates per transaction.
+ * See drbd_actlog.c:struct al_transaction_on_disk
+ * */
+#define AL_UPDATES_PER_TRANSACTION 64 // arbitrary
+#define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
+
#if BITS_PER_LONG == 32
#define LN2_BPL 5
#define cpu_to_lel(A) cpu_to_le32(A)
@@ -1364,11 +1224,14 @@ struct bm_extent {
#define SLEEP_TIME (HZ/10)
-#define BM_BLOCK_SHIFT 12 /* 4k per bit */
+/* We do bitmap IO in units of 4k blocks.
+ * We also still have a hardcoded 4k per bit relation. */
+#define BM_BLOCK_SHIFT 12 /* 4k per bit */
#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
-/* (9+3) : 512 bytes @ 8 bits; representing 16M storage
- * per sector of on disk bitmap */
-#define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */
+/* mostly arbitrarily set the represented size of one bitmap extent,
+ * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
+ * at 4k per bit resolution) */
+#define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */
#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
@@ -1436,17 +1299,20 @@ struct bm_extent {
#endif
#endif
-/* Sector shift value for the "hash" functions of tl_hash and ee_hash tables.
- * With a value of 8 all IO in one 128K block make it to the same slot of the
- * hash table. */
-#define HT_SHIFT 8
-#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
+/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
+ * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+ * Since we may live in a mixed-platform cluster,
+ * we limit us to a platform agnostic constant here for now.
+ * A followup commit may allow even bigger BIO sizes,
+ * once we thought that through. */
+#define DRBD_MAX_BIO_SIZE (1U << 20)
+#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#endif
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
-#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
-
-/* Number of elements in the app_reads_hash */
-#define APP_R_HSIZE 15
+#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
+#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
extern int drbd_bm_init(struct drbd_conf *mdev);
extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
@@ -1468,11 +1334,11 @@ extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
+extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
-extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
- unsigned long al_enr);
extern size_t drbd_bm_words(struct drbd_conf *mdev);
extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
@@ -1497,7 +1363,7 @@ extern void drbd_bm_unlock(struct drbd_conf *mdev);
/* drbd_main.c */
extern struct kmem_cache *drbd_request_cache;
-extern struct kmem_cache *drbd_ee_cache; /* epoch entries */
+extern struct kmem_cache *drbd_ee_cache; /* peer requests */
extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool;
@@ -1537,12 +1403,22 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock;
-extern struct drbd_conf *drbd_new_device(unsigned int minor);
-extern void drbd_free_mdev(struct drbd_conf *mdev);
+extern int conn_lowest_minor(struct drbd_tconn *tconn);
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
+extern void drbd_minor_destroy(struct kref *kref);
+
+extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
+extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
+extern void conn_destroy(struct kref *kref);
+struct drbd_tconn *conn_get_by_name(const char *name);
+extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+ void *peer_addr, int peer_addr_len);
+extern void conn_free_crypto(struct drbd_tconn *tconn);
extern int proc_details;
/* drbd_req */
+extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
@@ -1550,10 +1426,11 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
+extern int drbd_msg_put_info(const char *info);
extern void drbd_suspend_io(struct drbd_conf *mdev);
extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
+extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
@@ -1561,13 +1438,14 @@ extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
enum drbd_role new_role,
int force);
-extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
-extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
+extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
+extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
/* drbd_worker.c */
extern int drbd_worker(struct drbd_thread *thi);
-extern int drbd_alter_sa(struct drbd_conf *mdev, int na);
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor);
+void drbd_resync_after_changed(struct drbd_conf *mdev);
extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
extern void resume_next_sg(struct drbd_conf *mdev);
extern void suspend_other_sg(struct drbd_conf *mdev);
@@ -1576,13 +1454,13 @@ extern int drbd_resync_finished(struct drbd_conf *mdev);
extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
-extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
- unsigned int *done);
-extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
+ struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
+extern void wait_until_done_or_force_detached(struct drbd_conf *mdev,
+ struct drbd_backing_dev *bdev, unsigned int *done);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
-static inline void ov_oos_print(struct drbd_conf *mdev)
+static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
{
if (mdev->ov_last_oos_size) {
dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
@@ -1594,97 +1472,102 @@ static inline void ov_oos_print(struct drbd_conf *mdev)
extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
+ struct drbd_peer_request *, void *);
/* worker callbacks */
-extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
-extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
-extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int);
-extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_data_req(struct drbd_work *, int);
+extern int w_e_end_rsdata_req(struct drbd_work *, int);
+extern int w_e_end_csum_rs_req(struct drbd_work *, int);
+extern int w_e_end_ov_reply(struct drbd_work *, int);
+extern int w_e_end_ov_req(struct drbd_work *, int);
+extern int w_ov_finished(struct drbd_work *, int);
+extern int w_resync_timer(struct drbd_work *, int);
+extern int w_send_write_hint(struct drbd_work *, int);
+extern int w_make_resync_request(struct drbd_work *, int);
+extern int w_send_dblock(struct drbd_work *, int);
+extern int w_send_read_req(struct drbd_work *, int);
+extern int w_prev_work_done(struct drbd_work *, int);
+extern int w_e_reissue(struct drbd_work *, int);
+extern int w_restart_disk_io(struct drbd_work *, int);
+extern int w_send_out_of_sync(struct drbd_work *, int);
+extern int w_start_resync(struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
extern void start_resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type);
-extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
-extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local);
-extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- int is_net);
-#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
-#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
-extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
- struct list_head *head);
-extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
- struct list_head *head);
+extern int drbd_submit_peer_request(struct drbd_conf *,
+ struct drbd_peer_request *, const unsigned,
+ const int);
+extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *);
+extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
+ sector_t, unsigned int,
+ gfp_t) __must_hold(local);
+extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
+ int);
+#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
+#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
-extern void drbd_flush_workqueue(struct drbd_conf *mdev);
-extern void drbd_free_tl_hash(struct drbd_conf *mdev);
+extern void conn_flush_workqueue(struct drbd_tconn *tconn);
+extern int drbd_connected(struct drbd_conf *mdev);
+static inline void drbd_flush_workqueue(struct drbd_conf *mdev)
+{
+ conn_flush_workqueue(mdev->tconn);
+}
-/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
- * mess with get_fs/set_fs, we know we are KERNEL_DS always. */
+/* Yes, there is kernel_setsockopt, but only since 2.6.18.
+ * So we have our own copy of it here. */
static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char *optval, int optlen)
{
+ mm_segment_t oldfs = get_fs();
+ char __user *uoptval;
int err;
+
+ uoptval = (char __user __force *)optval;
+
+ set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
- err = sock_setsockopt(sock, level, optname, optval, optlen);
+ err = sock_setsockopt(sock, level, optname, uoptval, optlen);
else
- err = sock->ops->setsockopt(sock, level, optname, optval,
+ err = sock->ops->setsockopt(sock, level, optname, uoptval,
optlen);
+ set_fs(oldfs);
return err;
}
static inline void drbd_tcp_cork(struct socket *sock)
{
- int __user val = 1;
+ int val = 1;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_uncork(struct socket *sock)
{
- int __user val = 0;
+ int val = 0;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_nodelay(struct socket *sock)
{
- int __user val = 1;
+ int val = 1;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_quickack(struct socket *sock)
{
- int __user val = 2;
+ int val = 2;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
@@ -1693,8 +1576,8 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
-extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector);
+extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
@@ -1702,7 +1585,6 @@ extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
extern int drbd_rs_del_all(struct drbd_conf *mdev);
extern void drbd_rs_failed_io(struct drbd_conf *mdev,
sector_t sector, int size);
-extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
@@ -1712,73 +1594,24 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
#define drbd_set_out_of_sync(mdev, sector, size) \
__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
-extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
extern void drbd_al_shrink(struct drbd_conf *mdev);
-
/* drbd_nl.c */
-
-void drbd_nl_cleanup(void);
-int __init drbd_nl_init(void);
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
-void drbd_bcast_sync_progress(struct drbd_conf *mdev);
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e);
-
-
-/**
- * DOC: DRBD State macros
- *
- * These macros are used to express state changes in easily readable form.
- *
- * The NS macros expand to a mask and a value, that can be bit ored onto the
- * current state as soon as the spinlock (req_lock) was taken.
- *
- * The _NS macros are used for state functions that get called with the
- * spinlock. These macros expand directly to the new state value.
- *
- * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
- * to express state changes that affect more than one aspect of the state.
- *
- * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
- * Means that the network connection was established and that the peer
- * is in secondary role.
- */
-#define role_MASK R_MASK
-#define peer_MASK R_MASK
-#define disk_MASK D_MASK
-#define pdsk_MASK D_MASK
-#define conn_MASK C_MASK
-#define susp_MASK 1
-#define user_isp_MASK 1
-#define aftr_isp_MASK 1
-#define susp_nod_MASK 1
-#define susp_fen_MASK 1
-
-#define NS(T, S) \
- ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T = (S); val; })
-#define NS2(T1, S1, T2, S2) \
- ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
- mask.T2 = T2##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val; })
-#define NS3(T1, S1, T2, S2, T3, S3) \
- ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
- mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val.T3 = (S3); val; })
-
-#define _NS(D, T, S) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; })
-#define _NS2(D, T1, S1, T2, S2) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
- __ns.T2 = (S2); __ns; })
-#define _NS3(D, T1, S1, T2, S2, T3, S3) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
- __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
+/* state info broadcast */
+struct sib_info {
+ enum drbd_state_info_bcast_reason sib_reason;
+ union {
+ struct {
+ char *helper_name;
+ unsigned helper_exit_code;
+ };
+ struct {
+ union drbd_state os;
+ union drbd_state ns;
+ };
+ };
+};
+void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib);
/*
* inline helper functions
@@ -1795,9 +1628,10 @@ static inline struct page *page_chain_next(struct page *page)
#define page_chain_for_each_safe(page, n) \
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+
+static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
{
- struct page *page = e->pages;
+ struct page *page = peer_req->pages;
page_chain_for_each(page) {
if (page_count(page) > 1)
return 1;
@@ -1805,18 +1639,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
return 0;
}
-static inline void drbd_state_lock(struct drbd_conf *mdev)
-{
- wait_event(mdev->misc_wait,
- !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
-}
-
-static inline void drbd_state_unlock(struct drbd_conf *mdev)
-{
- clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
- wake_up(&mdev->misc_wait);
-}
-
static inline enum drbd_state_rv
_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
@@ -1830,48 +1652,71 @@ _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
-/**
- * drbd_request_state() - Reqest a state change
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- *
- * This is the most graceful way of requesting a state change. It is verbose
- * quite verbose in case the state change is not possible, and all those
- * state changes are globally serialized.
- */
-static inline int drbd_request_state(struct drbd_conf *mdev,
- union drbd_state mask,
- union drbd_state val)
+static inline union drbd_state drbd_read_state(struct drbd_conf *mdev)
{
- return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+ union drbd_state rv;
+
+ rv.i = mdev->state.i;
+ rv.susp = mdev->tconn->susp;
+ rv.susp_nod = mdev->tconn->susp_nod;
+ rv.susp_fen = mdev->tconn->susp_fen;
+
+ return rv;
}
enum drbd_force_detach_flags {
- DRBD_IO_ERROR,
+ DRBD_READ_ERROR,
+ DRBD_WRITE_ERROR,
DRBD_META_IO_ERROR,
DRBD_FORCE_DETACH,
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
- enum drbd_force_detach_flags forcedetach,
+ enum drbd_force_detach_flags df,
const char *where)
{
- switch (mdev->ldev->dc.on_io_error) {
- case EP_PASS_ON:
- if (forcedetach == DRBD_IO_ERROR) {
+ enum drbd_io_error_p ep;
+
+ rcu_read_lock();
+ ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ rcu_read_unlock();
+ switch (ep) {
+ case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
+ if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT)
_drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* NOTE fall through to detach case if forcedetach set */
+ /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
+ /* Remember whether we saw a READ or WRITE error.
+ *
+ * Recovery of the affected area for WRITE failure is covered
+ * by the activity log.
+ * READ errors may fall outside that area though. Certain READ
+ * errors can be "healed" by writing good data to the affected
+ * blocks, which triggers block re-allocation in lower layers.
+ *
+ * If we can not write the bitmap after a READ error,
+ * we may need to trigger a full sync (see w_go_diskless()).
+ *
+ * Force-detach is not really an IO error, but rather a
+ * desperate measure to try to deal with a completely
+ * unresponsive lower level IO stack.
+ * Still it should be treated as a WRITE error.
+ *
+ * Meta IO error is always WRITE error:
+ * we read meta data only once during attach,
+ * which will fail in case of errors.
+ */
set_bit(WAS_IO_ERROR, &mdev->flags);
- if (forcedetach == DRBD_FORCE_DETACH)
+ if (df == DRBD_READ_ERROR)
+ set_bit(WAS_READ_ERROR, &mdev->flags);
+ if (df == DRBD_FORCE_DETACH)
set_bit(FORCE_DETACH, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
@@ -1896,9 +1741,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
{
if (error) {
unsigned long flags;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__drbd_chk_io_error_(mdev, forcedetach, where);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
}
}
@@ -1910,9 +1755,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
* BTW, for internal meta data, this happens to be the maximum capacity
* we could agree upon with our peer node.
*/
-static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
+static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + bdev->md.bm_offset;
@@ -1922,13 +1767,30 @@ static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
}
}
+static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
+{
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ return _drbd_md_first_sector(meta_dev_idx, bdev);
+}
+
/**
* drbd_md_last_sector() - Return the last sector number of the meta data area
* @bdev: Meta data block device.
*/
static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + MD_AL_OFFSET - 1;
@@ -1956,12 +1818,18 @@ static inline sector_t drbd_get_capacity(struct block_device *bdev)
static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
{
sector_t s;
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
s = drbd_get_capacity(bdev->backing_bdev)
? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
- drbd_md_first_sector(bdev))
+ _drbd_md_first_sector(meta_dev_idx, bdev))
: 0;
break;
case DRBD_MD_INDEX_FLEX_EXT:
@@ -1987,9 +1855,15 @@ static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
default: /* external, some index */
- return MD_RESERVED_SECT * bdev->dc.meta_dev_idx;
+ return MD_RESERVED_SECT * meta_dev_idx;
case DRBD_MD_INDEX_INTERNAL:
/* with drbd08, internal meta data is always "flexible" */
case DRBD_MD_INDEX_FLEX_INT:
@@ -2015,9 +1889,8 @@ drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
unsigned long flags;
spin_lock_irqsave(&q->q_lock, flags);
list_add(&w->list, &q->q);
- up(&q->s); /* within the spinlock,
- see comment near end of drbd_worker() */
spin_unlock_irqrestore(&q->q_lock, flags);
+ wake_up(&q->q_wait);
}
static inline void
@@ -2026,41 +1899,35 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
unsigned long flags;
spin_lock_irqsave(&q->q_lock, flags);
list_add_tail(&w->list, &q->q);
- up(&q->s); /* within the spinlock,
- see comment near end of drbd_worker() */
spin_unlock_irqrestore(&q->q_lock, flags);
+ wake_up(&q->q_wait);
}
-static inline void wake_asender(struct drbd_conf *mdev)
-{
- if (test_bit(SIGNAL_ASENDER, &mdev->flags))
- force_sig(DRBD_SIG, mdev->asender.task);
-}
-
-static inline void request_ping(struct drbd_conf *mdev)
+static inline void wake_asender(struct drbd_tconn *tconn)
{
- set_bit(SEND_PING, &mdev->flags);
- wake_asender(mdev);
+ if (test_bit(SIGNAL_ASENDER, &tconn->flags))
+ force_sig(DRBD_SIG, tconn->asender.task);
}
-static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
- enum drbd_packets cmd)
+static inline void request_ping(struct drbd_tconn *tconn)
{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
+ set_bit(SEND_PING, &tconn->flags);
+ wake_asender(tconn);
}
-static inline int drbd_send_ping(struct drbd_conf *mdev)
-{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
-}
+extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
+extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *);
+extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
+ enum drbd_packet, unsigned int, void *,
+ unsigned int);
+extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *,
+ enum drbd_packet, unsigned int, void *,
+ unsigned int);
-static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
-{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
-}
+extern int drbd_send_ping(struct drbd_tconn *tconn);
+extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
+extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
+extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
static inline void drbd_thread_stop(struct drbd_thread *thi)
{
@@ -2082,21 +1949,21 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* or implicit barrier packets as necessary.
* increased:
* w_send_barrier
- * _req_mod(req, queue_for_net_write or queue_for_net_read);
+ * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
* it is much easier and equally valid to count what we queue for the
* worker, even before it actually was queued or send.
* (drbd_make_request_common; recovery path on read io-error)
* decreased:
* got_BarrierAck (respective tl_clear, tl_clear_barrier)
- * _req_mod(req, data_received)
+ * _req_mod(req, DATA_RECEIVED)
* [from receive_DataReply]
- * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked)
+ * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
* [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
* for some reason it is NOT decreased in got_NegAck,
* but in the resulting cleanup code from report_params.
* we should try to remember the reason for that...
- * _req_mod(req, send_failed or send_canceled)
- * _req_mod(req, connection_lost_while_pending)
+ * _req_mod(req, SEND_FAILED or SEND_CANCELED)
+ * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier]
*/
static inline void inc_ap_pending(struct drbd_conf *mdev)
@@ -2104,17 +1971,19 @@ static inline void inc_ap_pending(struct drbd_conf *mdev)
atomic_inc(&mdev->ap_pending_cnt);
}
-#define ERR_IF_CNT_IS_NEGATIVE(which) \
- if (atomic_read(&mdev->which) < 0) \
+#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
+ if (atomic_read(&mdev->which) < 0) \
dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
- __func__ , __LINE__ , \
- atomic_read(&mdev->which))
+ func, line, \
+ atomic_read(&mdev->which))
-#define dec_ap_pending(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \
- wake_up(&mdev->misc_wait); \
- ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
+#define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line)
+{
+ if (atomic_dec_and_test(&mdev->ap_pending_cnt))
+ wake_up(&mdev->misc_wait);
+ ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
+}
/* counts how many resync-related answers we still expect from the peer
* increase decrease
@@ -2127,10 +1996,12 @@ static inline void inc_rs_pending(struct drbd_conf *mdev)
atomic_inc(&mdev->rs_pending_cnt);
}
-#define dec_rs_pending(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_dec(&mdev->rs_pending_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
+#define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line)
+{
+ atomic_dec(&mdev->rs_pending_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
+}
/* counts how many answers we still need to send to the peer.
* increased on
@@ -2146,38 +2017,18 @@ static inline void inc_unacked(struct drbd_conf *mdev)
atomic_inc(&mdev->unacked_cnt);
}
-#define dec_unacked(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_dec(&mdev->unacked_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-
-#define sub_unacked(mdev, n) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_sub(n, &mdev->unacked_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-
-
-static inline void put_net_conf(struct drbd_conf *mdev)
+#define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line)
{
- if (atomic_dec_and_test(&mdev->net_cnt))
- wake_up(&mdev->net_cnt_wait);
+ atomic_dec(&mdev->unacked_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
-/**
- * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there
- * @mdev: DRBD device.
- *
- * You have to call put_net_conf() when finished working with mdev->net_conf.
- */
-static inline int get_net_conf(struct drbd_conf *mdev)
+#define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
+static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line)
{
- int have_net_conf;
-
- atomic_inc(&mdev->net_cnt);
- have_net_conf = mdev->state.conn >= C_UNCONNECTED;
- if (!have_net_conf)
- put_net_conf(mdev);
- return have_net_conf;
+ atomic_sub(n, &mdev->unacked_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
/**
@@ -2281,17 +2132,20 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* maybe re-implement using semaphores? */
static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
{
- int mxb = 1000000; /* arbitrary limit on open requests */
- if (get_net_conf(mdev)) {
- mxb = mdev->net_conf->max_buffers;
- put_net_conf(mdev);
- }
+ struct net_conf *nc;
+ int mxb;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
+ rcu_read_unlock();
+
return mxb;
}
static inline int drbd_state_is_stable(struct drbd_conf *mdev)
{
- union drbd_state s = mdev->state;
+ union drbd_dev_state s = mdev->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
@@ -2325,7 +2179,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
- if (mdev->agreed_pro_version < 96)
+ if (mdev->tconn->agreed_pro_version < 96)
return 0;
break;
@@ -2347,7 +2201,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* disk state is stable as well. */
break;
- /* no new io accepted during tansitional states */
+ /* no new io accepted during transitional states */
case D_ATTACHING:
case D_NEGOTIATING:
case D_UNKNOWN:
@@ -2359,16 +2213,18 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
return 1;
}
-static inline int is_susp(union drbd_state s)
+static inline int drbd_suspended(struct drbd_conf *mdev)
{
- return s.susp || s.susp_nod || s.susp_fen;
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ return tconn->susp || tconn->susp_fen || tconn->susp_nod;
}
static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
{
int mxb = drbd_get_max_buffers(mdev);
- if (is_susp(mdev->state))
+ if (drbd_suspended(mdev))
return false;
if (test_bit(SUSPEND_IO, &mdev->flags))
return false;
@@ -2390,30 +2246,30 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
return true;
}
-static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
+static inline bool inc_ap_bio_cond(struct drbd_conf *mdev)
{
bool rv = false;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
rv = may_inc_ap_bio(mdev);
if (rv)
- atomic_add(count, &mdev->ap_bio_cnt);
- spin_unlock_irq(&mdev->req_lock);
+ atomic_inc(&mdev->ap_bio_cnt);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
-static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
+static inline void inc_ap_bio(struct drbd_conf *mdev)
{
/* we wait here
* as long as the device is suspended
* until the bitmap is no longer on the fly during connection
- * handshake as long as we would exeed the max_buffer limit.
+ * handshake as long as we would exceed the max_buffer limit.
*
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
- wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count));
+ wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
}
static inline void dec_ap_bio(struct drbd_conf *mdev)
@@ -2425,7 +2281,7 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
/* this currently does wake_up for every dec_ap_bio!
@@ -2435,6 +2291,12 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
+static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev)
+{
+ return mdev->tconn->agreed_pro_version >= 97 &&
+ mdev->tconn->agreed_pro_version != 100;
+}
+
static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
{
int changed = mdev->ed_uuid != val;
@@ -2442,40 +2304,6 @@ static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
return changed;
}
-static inline int seq_cmp(u32 a, u32 b)
-{
- /* we assume wrap around at 32bit.
- * for wrap around at 24bit (old atomic_t),
- * we'd have to
- * a <<= 8; b <<= 8;
- */
- return (s32)(a) - (s32)(b);
-}
-#define seq_lt(a, b) (seq_cmp((a), (b)) < 0)
-#define seq_gt(a, b) (seq_cmp((a), (b)) > 0)
-#define seq_ge(a, b) (seq_cmp((a), (b)) >= 0)
-#define seq_le(a, b) (seq_cmp((a), (b)) <= 0)
-/* CAUTION: please no side effects in arguments! */
-#define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b)))
-
-static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq)
-{
- unsigned int m;
- spin_lock(&mdev->peer_seq_lock);
- m = seq_max(mdev->peer_seq, new_seq);
- mdev->peer_seq = m;
- spin_unlock(&mdev->peer_seq_lock);
- if (m == new_seq)
- wake_up(&mdev->seq_wait);
-}
-
-static inline void drbd_update_congested(struct drbd_conf *mdev)
-{
- struct sock *sk = mdev->data.socket->sk;
- if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &mdev->flags);
-}
-
static inline int drbd_queue_order_type(struct drbd_conf *mdev)
{
/* sorry, we currently have no working implementation
@@ -2490,10 +2318,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
+ if (mdev->ldev == NULL) {
+ dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n");
+ return;
+ }
+
if (test_bit(MD_NO_FUA, &mdev->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
set_bit(MD_NO_FUA, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
new file mode 100644
index 00000000000..89c497c630b
--- /dev/null
+++ b/drivers/block/drbd/drbd_interval.c
@@ -0,0 +1,207 @@
+#include <asm/bug.h>
+#include <linux/rbtree_augmented.h>
+#include "drbd_interval.h"
+
+/**
+ * interval_end - return end of @node
+ */
+static inline
+sector_t interval_end(struct rb_node *node)
+{
+ struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
+ return this->end;
+}
+
+/**
+ * compute_subtree_last - compute end of @node
+ *
+ * The end of an interval is the highest (start + (size >> 9)) value of this
+ * node and of its children. Called for @node and its parents whenever the end
+ * may have changed.
+ */
+static inline sector_t
+compute_subtree_last(struct drbd_interval *node)
+{
+ sector_t max = node->sector + (node->size >> 9);
+
+ if (node->rb.rb_left) {
+ sector_t left = interval_end(node->rb.rb_left);
+ if (left > max)
+ max = left;
+ }
+ if (node->rb.rb_right) {
+ sector_t right = interval_end(node->rb.rb_right);
+ if (right > max)
+ max = right;
+ }
+ return max;
+}
+
+static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
+{
+ while (rb != stop) {
+ struct drbd_interval *node = rb_entry(rb, struct drbd_interval, rb);
+ sector_t subtree_last = compute_subtree_last(node);
+ if (node->end == subtree_last)
+ break;
+ node->end = subtree_last;
+ rb = rb_parent(&node->rb);
+ }
+}
+
+static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
+{
+ struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
+ struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
+
+ new->end = old->end;
+}
+
+static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
+{
+ struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
+ struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
+
+ new->end = old->end;
+ old->end = compute_subtree_last(old);
+}
+
+static const struct rb_augment_callbacks augment_callbacks = {
+ augment_propagate,
+ augment_copy,
+ augment_rotate,
+};
+
+/**
+ * drbd_insert_interval - insert a new interval into a tree
+ */
+bool
+drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+{
+ struct rb_node **new = &root->rb_node, *parent = NULL;
+
+ BUG_ON(!IS_ALIGNED(this->size, 512));
+
+ while (*new) {
+ struct drbd_interval *here =
+ rb_entry(*new, struct drbd_interval, rb);
+
+ parent = *new;
+ if (this->sector < here->sector)
+ new = &(*new)->rb_left;
+ else if (this->sector > here->sector)
+ new = &(*new)->rb_right;
+ else if (this < here)
+ new = &(*new)->rb_left;
+ else if (this > here)
+ new = &(*new)->rb_right;
+ else
+ return false;
+ }
+
+ rb_link_node(&this->rb, parent, new);
+ rb_insert_augmented(&this->rb, root, &augment_callbacks);
+ return true;
+}
+
+/**
+ * drbd_contains_interval - check if a tree contains a given interval
+ * @sector: start sector of @interval
+ * @interval: may not be a valid pointer
+ *
+ * Returns if the tree contains the node @interval with start sector @start.
+ * Does not dereference @interval until @interval is known to be a valid object
+ * in @tree. Returns %false if @interval is in the tree but with a different
+ * sector number.
+ */
+bool
+drbd_contains_interval(struct rb_root *root, sector_t sector,
+ struct drbd_interval *interval)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct drbd_interval *here =
+ rb_entry(node, struct drbd_interval, rb);
+
+ if (sector < here->sector)
+ node = node->rb_left;
+ else if (sector > here->sector)
+ node = node->rb_right;
+ else if (interval < here)
+ node = node->rb_left;
+ else if (interval > here)
+ node = node->rb_right;
+ else
+ return true;
+ }
+ return false;
+}
+
+/**
+ * drbd_remove_interval - remove an interval from a tree
+ */
+void
+drbd_remove_interval(struct rb_root *root, struct drbd_interval *this)
+{
+ rb_erase_augmented(&this->rb, root, &augment_callbacks);
+}
+
+/**
+ * drbd_find_overlap - search for an interval overlapping with [sector, sector + size)
+ * @sector: start sector
+ * @size: size, aligned to 512 bytes
+ *
+ * Returns an interval overlapping with [sector, sector + size), or NULL if
+ * there is none. When there is more than one overlapping interval in the
+ * tree, the interval with the lowest start sector is returned, and all other
+ * overlapping intervals will be on the right side of the tree, reachable with
+ * rb_next().
+ */
+struct drbd_interval *
+drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size)
+{
+ struct rb_node *node = root->rb_node;
+ struct drbd_interval *overlap = NULL;
+ sector_t end = sector + (size >> 9);
+
+ BUG_ON(!IS_ALIGNED(size, 512));
+
+ while (node) {
+ struct drbd_interval *here =
+ rb_entry(node, struct drbd_interval, rb);
+
+ if (node->rb_left &&
+ sector < interval_end(node->rb_left)) {
+ /* Overlap if any must be on left side */
+ node = node->rb_left;
+ } else if (here->sector < end &&
+ sector < here->sector + (here->size >> 9)) {
+ overlap = here;
+ break;
+ } else if (sector >= here->sector) {
+ /* Overlap if any must be on right side */
+ node = node->rb_right;
+ } else
+ break;
+ }
+ return overlap;
+}
+
+struct drbd_interval *
+drbd_next_overlap(struct drbd_interval *i, sector_t sector, unsigned int size)
+{
+ sector_t end = sector + (size >> 9);
+ struct rb_node *node;
+
+ for (;;) {
+ node = rb_next(&i->rb);
+ if (!node)
+ return NULL;
+ i = rb_entry(node, struct drbd_interval, rb);
+ if (i->sector >= end)
+ return NULL;
+ if (sector < i->sector + (i->size >> 9))
+ return i;
+ }
+}
diff --git a/drivers/block/drbd/drbd_interval.h b/drivers/block/drbd/drbd_interval.h
new file mode 100644
index 00000000000..f38fcb00c10
--- /dev/null
+++ b/drivers/block/drbd/drbd_interval.h
@@ -0,0 +1,40 @@
+#ifndef __DRBD_INTERVAL_H
+#define __DRBD_INTERVAL_H
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+
+struct drbd_interval {
+ struct rb_node rb;
+ sector_t sector; /* start sector of the interval */
+ unsigned int size; /* size in bytes */
+ sector_t end; /* highest interval end in subtree */
+ int local:1 /* local or remote request? */;
+ int waiting:1;
+};
+
+static inline void drbd_clear_interval(struct drbd_interval *i)
+{
+ RB_CLEAR_NODE(&i->rb);
+}
+
+static inline bool drbd_interval_empty(struct drbd_interval *i)
+{
+ return RB_EMPTY_NODE(&i->rb);
+}
+
+extern bool drbd_insert_interval(struct rb_root *, struct drbd_interval *);
+extern bool drbd_contains_interval(struct rb_root *, sector_t,
+ struct drbd_interval *);
+extern void drbd_remove_interval(struct rb_root *, struct drbd_interval *);
+extern struct drbd_interval *drbd_find_overlap(struct rb_root *, sector_t,
+ unsigned int);
+extern struct drbd_interval *drbd_next_overlap(struct drbd_interval *, sector_t,
+ unsigned int);
+
+#define drbd_for_each_overlap(i, root, sector, size) \
+ for (i = drbd_find_overlap(root, sector, size); \
+ i; \
+ i = drbd_next_overlap(i, sector, size))
+
+#endif /* __DRBD_INTERVAL_H */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f55683ad4ff..8c13eeb83c5 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -56,14 +56,6 @@
#include "drbd_vli.h"
-struct after_state_chg_work {
- struct drbd_work w;
- union drbd_state os;
- union drbd_state ns;
- enum chg_state_flags flags;
- struct completion *done;
-};
-
static DEFINE_MUTEX(drbd_main_mutex);
int drbdd_init(struct drbd_thread *);
int drbd_worker(struct drbd_thread *);
@@ -72,21 +64,17 @@ int drbd_asender(struct drbd_thread *);
int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static int drbd_release(struct gendisk *gd, fmode_t mode);
-static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum chg_state_flags flags);
-static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+static int w_md_sync(struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
-static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static void _tl_clear(struct drbd_conf *mdev);
+static int w_bitmap_io(struct drbd_work *w, int unused);
+static int w_go_diskless(struct drbd_work *w, int unused);
MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
"Lars Ellenberg <lars@linbit.com>");
MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
MODULE_VERSION(REL_VERSION);
MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
+MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
__stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
@@ -98,7 +86,6 @@ MODULE_PARM_DESC(allow_oos, "DONT USE!");
module_param(minor_count, uint, 0444);
module_param(disable_sendpage, bool, 0644);
module_param(allow_oos, bool, 0);
-module_param(cn_idx, uint, 0444);
module_param(proc_details, int, 0644);
#ifdef CONFIG_DRBD_FAULT_INJECTION
@@ -120,7 +107,6 @@ module_param(fault_devs, int, 0644);
unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
bool disable_sendpage;
bool allow_oos;
-unsigned int cn_idx = CN_IDX_DRBD;
int proc_details; /* Detail level in proc drbd*/
/* Module parameter for setting the user mode helper program
@@ -132,10 +118,11 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
-struct drbd_conf **minor_table;
+struct idr minors;
+struct list_head drbd_tconns; /* list of struct drbd_tconn */
struct kmem_cache *drbd_request_cache;
-struct kmem_cache *drbd_ee_cache; /* epoch entries */
+struct kmem_cache *drbd_ee_cache; /* peer requests */
struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool;
@@ -164,10 +151,15 @@ static const struct block_device_operations drbd_ops = {
struct bio *bio_alloc_drbd(gfp_t gfp_mask)
{
+ struct bio *bio;
+
if (!drbd_md_io_bio_set)
return bio_alloc(gfp_mask, 1);
- return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ if (!bio)
+ return NULL;
+ return bio;
}
#ifdef __CHECKER__
@@ -190,158 +182,87 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
#endif
/**
- * DOC: The transfer log
- *
- * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
- * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
- * of the list. There is always at least one &struct drbd_tl_epoch object.
- *
- * Each &struct drbd_tl_epoch has a circular double linked list of requests
- * attached.
- */
-static int tl_init(struct drbd_conf *mdev)
-{
- struct drbd_tl_epoch *b;
-
- /* during device minor initialization, we may well use GFP_KERNEL */
- b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
- if (!b)
- return 0;
- INIT_LIST_HEAD(&b->requests);
- INIT_LIST_HEAD(&b->w.list);
- b->next = NULL;
- b->br_number = 4711;
- b->n_writes = 0;
- b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
-
- mdev->oldest_tle = b;
- mdev->newest_tle = b;
- INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
- INIT_LIST_HEAD(&mdev->barrier_acked_requests);
-
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
-
- return 1;
-}
-
-static void tl_cleanup(struct drbd_conf *mdev)
-{
- D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
- kfree(mdev->oldest_tle);
- mdev->oldest_tle = NULL;
- kfree(mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
- kfree(mdev->tl_hash);
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
-}
-
-/**
- * _tl_add_barrier() - Adds a barrier to the transfer log
- * @mdev: DRBD device.
- * @new: Barrier to be added before the current head of the TL.
- *
- * The caller must hold the req_lock.
- */
-void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
-{
- struct drbd_tl_epoch *newest_before;
-
- INIT_LIST_HEAD(&new->requests);
- INIT_LIST_HEAD(&new->w.list);
- new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
- new->next = NULL;
- new->n_writes = 0;
-
- newest_before = mdev->newest_tle;
- new->br_number = newest_before->br_number+1;
- if (mdev->newest_tle != new) {
- mdev->newest_tle->next = new;
- mdev->newest_tle = new;
- }
-}
-
-/**
- * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
- * @mdev: DRBD device.
+ * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
+ * @tconn: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier.
*
* In case the passed barrier_nr or set_size does not match the oldest
- * &struct drbd_tl_epoch objects this function will cause a termination
- * of the connection.
+ * epoch of not yet barrier-acked requests, this function will cause a
+ * termination of the connection.
*/
-void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
- unsigned int set_size)
+void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
+ unsigned int set_size)
{
- struct drbd_tl_epoch *b, *nob; /* next old barrier */
- struct list_head *le, *tle;
struct drbd_request *r;
-
- spin_lock_irq(&mdev->req_lock);
-
- b = mdev->oldest_tle;
+ struct drbd_request *req = NULL;
+ int expect_epoch = 0;
+ int expect_size = 0;
+
+ spin_lock_irq(&tconn->req_lock);
+
+ /* find oldest not yet barrier-acked write request,
+ * count writes in its epoch. */
+ list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ const unsigned s = r->rq_state;
+ if (!req) {
+ if (!(s & RQ_WRITE))
+ continue;
+ if (!(s & RQ_NET_MASK))
+ continue;
+ if (s & RQ_NET_DONE)
+ continue;
+ req = r;
+ expect_epoch = req->epoch;
+ expect_size ++;
+ } else {
+ if (r->epoch != expect_epoch)
+ break;
+ if (!(s & RQ_WRITE))
+ continue;
+ /* if (s & RQ_DONE): not expected */
+ /* if (!(s & RQ_NET_MASK)): not expected */
+ expect_size++;
+ }
+ }
/* first some paranoia code */
- if (b == NULL) {
- dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
- barrier_nr);
+ if (req == NULL) {
+ conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+ barrier_nr);
goto bail;
}
- if (b->br_number != barrier_nr) {
- dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
- barrier_nr, b->br_number);
+ if (expect_epoch != barrier_nr) {
+ conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
+ barrier_nr, expect_epoch);
goto bail;
}
- if (b->n_writes != set_size) {
- dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
- barrier_nr, set_size, b->n_writes);
+
+ if (expect_size != set_size) {
+ conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
+ barrier_nr, set_size, expect_size);
goto bail;
}
- /* Clean up list of requests processed during current epoch */
- list_for_each_safe(le, tle, &b->requests) {
- r = list_entry(le, struct drbd_request, tl_requests);
- _req_mod(r, barrier_acked);
- }
- /* There could be requests on the list waiting for completion
- of the write to the local disk. To avoid corruptions of
- slab's data structures we have to remove the lists head.
-
- Also there could have been a barrier ack out of sequence, overtaking
- the write acks - which would be a bug and violating write ordering.
- To not deadlock in case we lose connection while such requests are
- still pending, we need some way to find them for the
- _req_mode(connection_lost_while_pending).
-
- These have been list_move'd to the out_of_sequence_requests list in
- _req_mod(, barrier_acked) above.
- */
- list_splice_init(&b->requests, &mdev->barrier_acked_requests);
-
- nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, b);
- if (nob)
- mdev->oldest_tle = nob;
- /* if nob == NULL b was the only barrier, and becomes the new
- barrier. Therefore mdev->oldest_tle points already to b */
- } else {
- D_ASSERT(nob != NULL);
- mdev->oldest_tle = nob;
- kfree(b);
+ /* Clean up list of requests processed during current epoch. */
+ /* this extra list walk restart is paranoia,
+ * to catch requests being barrier-acked "unexpectedly".
+ * It usually should find the same req again, or some READ preceding it. */
+ list_for_each_entry(req, &tconn->transfer_log, tl_requests)
+ if (req->epoch == expect_epoch)
+ break;
+ list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) {
+ if (req->epoch != expect_epoch)
+ break;
+ _req_mod(req, BARRIER_ACKED);
}
-
- spin_unlock_irq(&mdev->req_lock);
- dec_ap_pending(mdev);
+ spin_unlock_irq(&tconn->req_lock);
return;
bail:
- spin_unlock_irq(&mdev->req_lock);
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+ spin_unlock_irq(&tconn->req_lock);
+ conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
@@ -350,85 +271,24 @@ bail:
* @mdev: DRBD device.
* @what: The action/event to perform with all request objects
*
- * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
- * restart_frozen_disk_io.
+ * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
+ * RESTART_FROZEN_DISK_IO.
*/
-static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
-{
- struct drbd_tl_epoch *b, *tmp, **pn;
- struct list_head *le, *tle, carry_reads;
- struct drbd_request *req;
- int rv, n_writes, n_reads;
-
- b = mdev->oldest_tle;
- pn = &mdev->oldest_tle;
- while (b) {
- n_writes = 0;
- n_reads = 0;
- INIT_LIST_HEAD(&carry_reads);
- list_for_each_safe(le, tle, &b->requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
- rv = _req_mod(req, what);
-
- n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
- n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
- }
- tmp = b->next;
-
- if (n_writes) {
- if (what == resend) {
- b->n_writes = n_writes;
- if (b->w.cb == NULL) {
- b->w.cb = w_send_barrier;
- inc_ap_pending(mdev);
- set_bit(CREATE_BARRIER, &mdev->flags);
- }
-
- drbd_queue_work(&mdev->data.work, &b->w);
- }
- pn = &b->next;
- } else {
- if (n_reads)
- list_add(&carry_reads, &b->requests);
- /* there could still be requests on that ring list,
- * in case local io is still pending */
- list_del(&b->requests);
-
- /* dec_ap_pending corresponding to queue_barrier.
- * the newest barrier may not have been queued yet,
- * in which case w.cb is still NULL. */
- if (b->w.cb != NULL)
- dec_ap_pending(mdev);
-
- if (b == mdev->newest_tle) {
- /* recycle, but reinit! */
- D_ASSERT(tmp == NULL);
- INIT_LIST_HEAD(&b->requests);
- list_splice(&carry_reads, &b->requests);
- INIT_LIST_HEAD(&b->w.list);
- b->w.cb = NULL;
- b->br_number = net_random();
- b->n_writes = 0;
-
- *pn = b;
- break;
- }
- *pn = tmp;
- kfree(b);
- }
- b = tmp;
- list_splice(&carry_reads, &b->requests);
- }
-
- /* Actions operating on the disk state, also want to work on
- requests that got barrier acked. */
+/* must hold resource->req_lock */
+void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+{
+ struct drbd_request *req, *r;
- list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
+ list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
_req_mod(req, what);
- }
}
+void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+{
+ spin_lock_irq(&tconn->req_lock);
+ _tl_restart(tconn, what);
+ spin_unlock_irq(&tconn->req_lock);
+}
/**
* tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
@@ -438,43 +298,9 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
* by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread.
*/
-void tl_clear(struct drbd_conf *mdev)
+void tl_clear(struct drbd_tconn *tconn)
{
- spin_lock_irq(&mdev->req_lock);
- _tl_clear(mdev);
- spin_unlock_irq(&mdev->req_lock);
-}
-
-static void _tl_clear(struct drbd_conf *mdev)
-{
- struct list_head *le, *tle;
- struct drbd_request *r;
-
- _tl_restart(mdev, connection_lost_while_pending);
-
- /* we expect this list to be empty. */
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
-
- /* but just in case, clean it up anyways! */
- list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
- r = list_entry(le, struct drbd_request, tl_requests);
- /* It would be nice to complete outside of spinlock.
- * But this is easier for now. */
- _req_mod(r, connection_lost_while_pending);
- }
-
- /* ensure bit indicating barrier is required is clear */
- clear_bit(CREATE_BARRIER, &mdev->flags);
-
- memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
-
-}
-
-void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
-{
- spin_lock_irq(&mdev->req_lock);
- _tl_restart(mdev, what);
- spin_unlock_irq(&mdev->req_lock);
+ tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
}
/**
@@ -483,1377 +309,131 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
*/
void tl_abort_disk_io(struct drbd_conf *mdev)
{
- struct drbd_tl_epoch *b;
- struct list_head *le, *tle;
- struct drbd_request *req;
-
- spin_lock_irq(&mdev->req_lock);
- b = mdev->oldest_tle;
- while (b) {
- list_for_each_safe(le, tle, &b->requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
- if (!(req->rq_state & RQ_LOCAL_PENDING))
- continue;
- _req_mod(req, abort_disk_io);
- }
- b = b->next;
- }
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_request *req, *r;
- list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
+ spin_lock_irq(&tconn->req_lock);
+ list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING))
continue;
- _req_mod(req, abort_disk_io);
- }
-
- spin_unlock_irq(&mdev->req_lock);
-}
-
-/**
- * cl_wide_st_chg() - true if the state change is a cluster wide one
- * @mdev: DRBD device.
- * @os: old (current) state.
- * @ns: new (wanted) state.
- */
-static int cl_wide_st_chg(struct drbd_conf *mdev,
- union drbd_state os, union drbd_state ns)
-{
- return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
- ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
- (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
- (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
- (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
- (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
- (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
-}
-
-enum drbd_state_rv
-drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
- union drbd_state mask, union drbd_state val)
-{
- unsigned long flags;
- union drbd_state os, ns;
- enum drbd_state_rv rv;
-
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- rv = _drbd_set_state(mdev, ns, f, NULL);
- ns = mdev->state;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- return rv;
-}
-
-/**
- * drbd_force_state() - Impose a change which happens outside our control on our state
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- */
-void drbd_force_state(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val)
-{
- drbd_change_state(mdev, CS_HARD, mask, val);
-}
-
-static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
-static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
- union drbd_state,
- union drbd_state);
-enum sanitize_state_warnings {
- NO_WARNING,
- ABORTED_ONLINE_VERIFY,
- ABORTED_RESYNC,
- CONNECTION_LOST_NEGOTIATING,
- IMPLICITLY_UPGRADED_DISK,
- IMPLICITLY_UPGRADED_PDSK,
-};
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum sanitize_state_warnings *warn);
-int drbd_send_state_req(struct drbd_conf *,
- union drbd_state, union drbd_state);
-
-static enum drbd_state_rv
-_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val)
-{
- union drbd_state os, ns;
- unsigned long flags;
- enum drbd_state_rv rv;
-
- if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
- return SS_CW_SUCCESS;
-
- if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
- return SS_CW_FAILED_BY_PEER;
-
- rv = 0;
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- ns = sanitize_state(mdev, os, ns, NULL);
-
- if (!cl_wide_st_chg(mdev, os, ns))
- rv = SS_CW_NO_NEED;
- if (!rv) {
- rv = is_valid_state(mdev, ns);
- if (rv == SS_SUCCESS) {
- rv = is_valid_state_transition(mdev, ns, os);
- if (rv == SS_SUCCESS)
- rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
- }
- }
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- return rv;
-}
-
-/**
- * drbd_req_state() - Perform an eventually cluster wide state change
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- * @f: flags
- *
- * Should not be called directly, use drbd_request_state() or
- * _drbd_request_state().
- */
-static enum drbd_state_rv
-drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val, enum chg_state_flags f)
-{
- struct completion done;
- unsigned long flags;
- union drbd_state os, ns;
- enum drbd_state_rv rv;
-
- init_completion(&done);
-
- if (f & CS_SERIALIZE)
- mutex_lock(&mdev->state_mutex);
-
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- ns = sanitize_state(mdev, os, ns, NULL);
-
- if (cl_wide_st_chg(mdev, os, ns)) {
- rv = is_valid_state(mdev, ns);
- if (rv == SS_SUCCESS)
- rv = is_valid_state_transition(mdev, ns, os);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- if (rv < SS_SUCCESS) {
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
-
- drbd_state_lock(mdev);
- if (!drbd_send_state_req(mdev, mask, val)) {
- drbd_state_unlock(mdev);
- rv = SS_CW_FAILED_BY_PEER;
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
-
- wait_event(mdev->state_wait,
- (rv = _req_st_cond(mdev, mask, val)));
-
- if (rv < SS_SUCCESS) {
- drbd_state_unlock(mdev);
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- rv = _drbd_set_state(mdev, ns, f, &done);
- drbd_state_unlock(mdev);
- } else {
- rv = _drbd_set_state(mdev, ns, f, &done);
- }
-
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
- D_ASSERT(current != mdev->worker.task);
- wait_for_completion(&done);
- }
-
-abort:
- if (f & CS_SERIALIZE)
- mutex_unlock(&mdev->state_mutex);
-
- return rv;
-}
-
-/**
- * _drbd_request_state() - Request a state change (with flags)
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- * @f: flags
- *
- * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
- * flag, or when logging of failed state change requests is not desired.
- */
-enum drbd_state_rv
-_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val, enum chg_state_flags f)
-{
- enum drbd_state_rv rv;
-
- wait_event(mdev->state_wait,
- (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
-
- return rv;
-}
-
-static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
-{
- dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
- name,
- drbd_conn_str(ns.conn),
- drbd_role_str(ns.role),
- drbd_role_str(ns.peer),
- drbd_disk_str(ns.disk),
- drbd_disk_str(ns.pdsk),
- is_susp(ns) ? 's' : 'r',
- ns.aftr_isp ? 'a' : '-',
- ns.peer_isp ? 'p' : '-',
- ns.user_isp ? 'u' : '-'
- );
-}
-
-void print_st_err(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum drbd_state_rv err)
-{
- if (err == SS_IN_TRANSIENT_STATE)
- return;
- dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
- print_st(mdev, " state", os);
- print_st(mdev, "wanted", ns);
-}
-
-
-/**
- * is_valid_state() - Returns an SS_ error code if ns is not valid
- * @mdev: DRBD device.
- * @ns: State to consider.
- */
-static enum drbd_state_rv
-is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
-{
- /* See drbd_state_sw_errors in drbd_strings.c */
-
- enum drbd_fencing_p fp;
- enum drbd_state_rv rv = SS_SUCCESS;
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- if (get_net_conf(mdev)) {
- if (!mdev->net_conf->two_primaries &&
- ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
- rv = SS_TWO_PRIMARIES;
- put_net_conf(mdev);
- }
-
- if (rv <= 0)
- /* already found a reason to abort */;
- else if (ns.role == R_SECONDARY && mdev->open_cnt)
- rv = SS_DEVICE_IN_USE;
-
- else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if (fp >= FP_RESOURCE &&
- ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
- rv = SS_PRIMARY_NOP;
-
- else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
- rv = SS_NO_LOCAL_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
- rv = SS_NO_REMOTE_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if ((ns.conn == C_CONNECTED ||
- ns.conn == C_WF_BITMAP_S ||
- ns.conn == C_SYNC_SOURCE ||
- ns.conn == C_PAUSED_SYNC_S) &&
- ns.disk == D_OUTDATED)
- rv = SS_CONNECTED_OUTDATES;
-
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- (mdev->sync_conf.verify_alg[0] == 0))
- rv = SS_NO_VERIFY_ALG;
-
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- mdev->agreed_pro_version < 88)
- rv = SS_NOT_SUPPORTED;
-
- else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
- rv = SS_CONNECTED_OUTDATES;
-
- return rv;
-}
-
-/**
- * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
- * @mdev: DRBD device.
- * @ns: new state.
- * @os: old state.
- */
-static enum drbd_state_rv
-is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
- union drbd_state os)
-{
- enum drbd_state_rv rv = SS_SUCCESS;
-
- if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
- os.conn > C_CONNECTED)
- rv = SS_RESYNC_RUNNING;
-
- if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
- rv = SS_ALREADY_STANDALONE;
-
- if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
- rv = SS_IS_DISKLESS;
-
- if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
- rv = SS_NO_NET_CONFIG;
-
- if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
- rv = SS_LOWER_THAN_OUTDATED;
-
- if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
- rv = SS_IN_TRANSIENT_STATE;
-
- if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
- rv = SS_IN_TRANSIENT_STATE;
-
- /* While establishing a connection only allow cstate to change.
- Delay/refuse role changes, detach attach etc... */
- if (test_bit(STATE_SENT, &mdev->flags) &&
- !(os.conn == C_WF_REPORT_PARAMS ||
- (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
- rv = SS_IN_TRANSIENT_STATE;
-
- if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
- rv = SS_NEED_CONNECTION;
-
- if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- ns.conn != os.conn && os.conn > C_CONNECTED)
- rv = SS_RESYNC_RUNNING;
-
- if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
- os.conn < C_CONNECTED)
- rv = SS_NEED_CONNECTION;
-
- if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
- && os.conn < C_WF_REPORT_PARAMS)
- rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
-
- return rv;
-}
-
-static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
-{
- static const char *msg_table[] = {
- [NO_WARNING] = "",
- [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
- [ABORTED_RESYNC] = "Resync aborted.",
- [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
- [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
- [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
- };
-
- if (warn != NO_WARNING)
- dev_warn(DEV, "%s\n", msg_table[warn]);
-}
-
-/**
- * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
- * @mdev: DRBD device.
- * @os: old state.
- * @ns: new state.
- * @warn_sync_abort:
- *
- * When we loose connection, we have to set the state of the peers disk (pdsk)
- * to D_UNKNOWN. This rule and many more along those lines are in this function.
- */
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum sanitize_state_warnings *warn)
-{
- enum drbd_fencing_p fp;
- enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
-
- if (warn)
- *warn = NO_WARNING;
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- /* Disallow Network errors to configure a device's network part */
- if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
- os.conn <= C_DISCONNECTING)
- ns.conn = os.conn;
-
- /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
- * If you try to go into some Sync* state, that shall fail (elsewhere). */
- if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
- ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
- ns.conn = os.conn;
-
- /* we cannot fail (again) if we already detached */
- if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
- ns.disk = D_DISKLESS;
-
- /* After C_DISCONNECTING only C_STANDALONE may follow */
- if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
- ns.conn = os.conn;
-
- if (ns.conn < C_CONNECTED) {
- ns.peer_isp = 0;
- ns.peer = R_UNKNOWN;
- if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
- ns.pdsk = D_UNKNOWN;
- }
-
- /* Clear the aftr_isp when becoming unconfigured */
- if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
- ns.aftr_isp = 0;
-
- /* Abort resync if a disk fails/detaches */
- if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
- (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- if (warn)
- *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
- ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
- ns.conn = C_CONNECTED;
- }
-
- /* Connection breaks down before we finished "Negotiating" */
- if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
- get_ldev_if_state(mdev, D_NEGOTIATING)) {
- if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
- ns.disk = mdev->new_state_tmp.disk;
- ns.pdsk = mdev->new_state_tmp.pdsk;
- } else {
- if (warn)
- *warn = CONNECTION_LOST_NEGOTIATING;
- ns.disk = D_DISKLESS;
- ns.pdsk = D_UNKNOWN;
- }
- put_ldev(mdev);
- }
-
- /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
- if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
- if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
- ns.disk = D_UP_TO_DATE;
- if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
- ns.pdsk = D_UP_TO_DATE;
- }
-
- /* Implications of the connection stat on the disk states */
- disk_min = D_DISKLESS;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_UNKNOWN;
- switch ((enum drbd_conns)ns.conn) {
- case C_WF_BITMAP_T:
- case C_PAUSED_SYNC_T:
- case C_STARTING_SYNC_T:
- case C_WF_SYNC_UUID:
- case C_BEHIND:
- disk_min = D_INCONSISTENT;
- disk_max = D_OUTDATED;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_VERIFY_S:
- case C_VERIFY_T:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_CONNECTED:
- disk_min = D_DISKLESS;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_DISKLESS;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_WF_BITMAP_S:
- case C_PAUSED_SYNC_S:
- case C_STARTING_SYNC_S:
- case C_AHEAD:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
- break;
- case C_SYNC_TARGET:
- disk_min = D_INCONSISTENT;
- disk_max = D_INCONSISTENT;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_SYNC_SOURCE:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_INCONSISTENT;
- break;
- case C_STANDALONE:
- case C_DISCONNECTING:
- case C_UNCONNECTED:
- case C_TIMEOUT:
- case C_BROKEN_PIPE:
- case C_NETWORK_FAILURE:
- case C_PROTOCOL_ERROR:
- case C_TEAR_DOWN:
- case C_WF_CONNECTION:
- case C_WF_REPORT_PARAMS:
- case C_MASK:
- break;
- }
- if (ns.disk > disk_max)
- ns.disk = disk_max;
-
- if (ns.disk < disk_min) {
- if (warn)
- *warn = IMPLICITLY_UPGRADED_DISK;
- ns.disk = disk_min;
- }
- if (ns.pdsk > pdsk_max)
- ns.pdsk = pdsk_max;
-
- if (ns.pdsk < pdsk_min) {
- if (warn)
- *warn = IMPLICITLY_UPGRADED_PDSK;
- ns.pdsk = pdsk_min;
- }
-
- if (fp == FP_STONITH &&
- (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
- !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
- ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
-
- if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
- (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
- !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
- ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
-
- if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
- if (ns.conn == C_SYNC_SOURCE)
- ns.conn = C_PAUSED_SYNC_S;
- if (ns.conn == C_SYNC_TARGET)
- ns.conn = C_PAUSED_SYNC_T;
- } else {
- if (ns.conn == C_PAUSED_SYNC_S)
- ns.conn = C_SYNC_SOURCE;
- if (ns.conn == C_PAUSED_SYNC_T)
- ns.conn = C_SYNC_TARGET;
- }
-
- return ns;
-}
-
-/* helper for __drbd_set_state */
-static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
-{
- if (mdev->agreed_pro_version < 90)
- mdev->ov_start_sector = 0;
- mdev->rs_total = drbd_bm_bits(mdev);
- mdev->ov_position = 0;
- if (cs == C_VERIFY_T) {
- /* starting online verify from an arbitrary position
- * does not fit well into the existing protocol.
- * on C_VERIFY_T, we initialize ov_left and friends
- * implicitly in receive_DataRequest once the
- * first P_OV_REQUEST is received */
- mdev->ov_start_sector = ~(sector_t)0;
- } else {
- unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
- if (bit >= mdev->rs_total) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(mdev->rs_total - 1);
- mdev->rs_total = 1;
- } else
- mdev->rs_total -= bit;
- mdev->ov_position = mdev->ov_start_sector;
- }
- mdev->ov_left = mdev->rs_total;
-}
-
-static void drbd_resume_al(struct drbd_conf *mdev)
-{
- if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
- dev_info(DEV, "Resumed AL updates\n");
-}
-
-/**
- * __drbd_set_state() - Set a new DRBD state
- * @mdev: DRBD device.
- * @ns: new state.
- * @flags: Flags
- * @done: Optional completion, that will get completed after the after_state_ch() finished
- *
- * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
- */
-enum drbd_state_rv
-__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
- enum chg_state_flags flags, struct completion *done)
-{
- union drbd_state os;
- enum drbd_state_rv rv = SS_SUCCESS;
- enum sanitize_state_warnings ssw;
- struct after_state_chg_work *ascw;
-
- os = mdev->state;
-
- ns = sanitize_state(mdev, os, ns, &ssw);
-
- if (ns.i == os.i)
- return SS_NOTHING_TO_DO;
-
- if (!(flags & CS_HARD)) {
- /* pre-state-change checks ; only look at ns */
- /* See drbd_state_sw_errors in drbd_strings.c */
-
- rv = is_valid_state(mdev, ns);
- if (rv < SS_SUCCESS) {
- /* If the old state was illegal as well, then let
- this happen...*/
-
- if (is_valid_state(mdev, os) == rv)
- rv = is_valid_state_transition(mdev, ns, os);
- } else
- rv = is_valid_state_transition(mdev, ns, os);
- }
-
- if (rv < SS_SUCCESS) {
- if (flags & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- return rv;
- }
-
- print_sanitize_warnings(mdev, ssw);
-
- {
- char *pbp, pb[300];
- pbp = pb;
- *pbp = 0;
- if (ns.role != os.role)
- pbp += sprintf(pbp, "role( %s -> %s ) ",
- drbd_role_str(os.role),
- drbd_role_str(ns.role));
- if (ns.peer != os.peer)
- pbp += sprintf(pbp, "peer( %s -> %s ) ",
- drbd_role_str(os.peer),
- drbd_role_str(ns.peer));
- if (ns.conn != os.conn)
- pbp += sprintf(pbp, "conn( %s -> %s ) ",
- drbd_conn_str(os.conn),
- drbd_conn_str(ns.conn));
- if (ns.disk != os.disk)
- pbp += sprintf(pbp, "disk( %s -> %s ) ",
- drbd_disk_str(os.disk),
- drbd_disk_str(ns.disk));
- if (ns.pdsk != os.pdsk)
- pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
- drbd_disk_str(os.pdsk),
- drbd_disk_str(ns.pdsk));
- if (is_susp(ns) != is_susp(os))
- pbp += sprintf(pbp, "susp( %d -> %d ) ",
- is_susp(os),
- is_susp(ns));
- if (ns.aftr_isp != os.aftr_isp)
- pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
- os.aftr_isp,
- ns.aftr_isp);
- if (ns.peer_isp != os.peer_isp)
- pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
- os.peer_isp,
- ns.peer_isp);
- if (ns.user_isp != os.user_isp)
- pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
- os.user_isp,
- ns.user_isp);
- dev_info(DEV, "%s\n", pb);
- }
-
- /* solve the race between becoming unconfigured,
- * worker doing the cleanup, and
- * admin reconfiguring us:
- * on (re)configure, first set CONFIG_PENDING,
- * then wait for a potentially exiting worker,
- * start the worker, and schedule one no_op.
- * then proceed with configuration.
- */
- if (ns.disk == D_DISKLESS &&
- ns.conn == C_STANDALONE &&
- ns.role == R_SECONDARY &&
- !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
- set_bit(DEVICE_DYING, &mdev->flags);
-
- /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
- * on the ldev here, to be sure the transition -> D_DISKLESS resp.
- * drbd_ldev_destroy() won't happen before our corresponding
- * after_state_ch works run, where we put_ldev again. */
- if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
- (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
- atomic_inc(&mdev->local_cnt);
-
- mdev->state = ns;
-
- if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
- drbd_print_uuids(mdev, "attached to UUIDs");
-
- wake_up(&mdev->misc_wait);
- wake_up(&mdev->state_wait);
-
- /* aborted verify run. log the last position */
- if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
- ns.conn < C_CONNECTED) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
- dev_info(DEV, "Online Verify reached sector %llu\n",
- (unsigned long long)mdev->ov_start_sector);
- }
-
- if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
- (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
- dev_info(DEV, "Syncer continues.\n");
- mdev->rs_paused += (long)jiffies
- -(long)mdev->rs_mark_time[mdev->rs_last_mark];
- if (ns.conn == C_SYNC_TARGET)
- mod_timer(&mdev->resync_timer, jiffies);
- }
-
- if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
- (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
- dev_info(DEV, "Resync suspended\n");
- mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
- }
-
- if (os.conn == C_CONNECTED &&
- (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
- unsigned long now = jiffies;
- int i;
-
- set_ov_position(mdev, ns.conn);
- mdev->rs_start = now;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
- mdev->ov_last_oos_size = 0;
- mdev->ov_last_oos_start = 0;
-
- for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->ov_left;
- mdev->rs_mark_time[i] = now;
- }
-
- drbd_rs_controller_reset(mdev);
-
- if (ns.conn == C_VERIFY_S) {
- dev_info(DEV, "Starting Online Verify from sector %llu\n",
- (unsigned long long)mdev->ov_position);
- mod_timer(&mdev->resync_timer, jiffies);
- }
- }
-
- if (get_ldev(mdev)) {
- u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
- MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
- MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
-
- if (test_bit(CRASHED_PRIMARY, &mdev->flags))
- mdf |= MDF_CRASHED_PRIMARY;
- if (mdev->state.role == R_PRIMARY ||
- (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
- mdf |= MDF_PRIMARY_IND;
- if (mdev->state.conn > C_WF_REPORT_PARAMS)
- mdf |= MDF_CONNECTED_IND;
- if (mdev->state.disk > D_INCONSISTENT)
- mdf |= MDF_CONSISTENT;
- if (mdev->state.disk > D_OUTDATED)
- mdf |= MDF_WAS_UP_TO_DATE;
- if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
- mdf |= MDF_PEER_OUT_DATED;
- if (mdf != mdev->ldev->md.flags) {
- mdev->ldev->md.flags = mdf;
- drbd_md_mark_dirty(mdev);
- }
- if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
- drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
- put_ldev(mdev);
- }
-
- /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
- if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
- os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
- set_bit(CONSIDER_RESYNC, &mdev->flags);
-
- /* Receiver should clean up itself */
- if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
- drbd_thread_stop_nowait(&mdev->receiver);
-
- /* Now the receiver finished cleaning up itself, it should die */
- if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
- drbd_thread_stop_nowait(&mdev->receiver);
-
- /* Upon network failure, we need to restart the receiver. */
- if (os.conn > C_WF_CONNECTION &&
- ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
- drbd_thread_restart_nowait(&mdev->receiver);
-
- /* Resume AL writing if we get a connection */
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
- drbd_resume_al(mdev);
-
- /* remember last connect and attach times so request_timer_fn() won't
- * kill newly established sessions while we are still trying to thaw
- * previously frozen IO */
- if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
- mdev->last_reconnect_jif = jiffies;
- if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- ns.disk > D_NEGOTIATING)
- mdev->last_reattach_jif = jiffies;
-
- ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
- if (ascw) {
- ascw->os = os;
- ascw->ns = ns;
- ascw->flags = flags;
- ascw->w.cb = w_after_state_ch;
- ascw->done = done;
- drbd_queue_work(&mdev->data.work, &ascw->w);
- } else {
- dev_warn(DEV, "Could not kmalloc an ascw\n");
- }
-
- return rv;
-}
-
-static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
-{
- struct after_state_chg_work *ascw =
- container_of(w, struct after_state_chg_work, w);
- after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
- if (ascw->flags & CS_WAIT_COMPLETE) {
- D_ASSERT(ascw->done != NULL);
- complete(ascw->done);
- }
- kfree(ascw);
-
- return 1;
-}
-
-static void abw_start_sync(struct drbd_conf *mdev, int rv)
-{
- if (rv) {
- dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
- _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
- return;
- }
-
- switch (mdev->state.conn) {
- case C_STARTING_SYNC_T:
- _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
- break;
- case C_STARTING_SYNC_S:
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- break;
- }
-}
-
-int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- char *why, enum bm_flag flags)
-{
- int rv;
-
- D_ASSERT(current == mdev->worker.task);
-
- /* open coded non-blocking drbd_suspend_io(mdev); */
- set_bit(SUSPEND_IO, &mdev->flags);
-
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
-
- drbd_resume_io(mdev);
-
- return rv;
-}
-
-/**
- * after_state_ch() - Perform after state change actions that may sleep
- * @mdev: DRBD device.
- * @os: old state.
- * @ns: new state.
- * @flags: Flags
- */
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum chg_state_flags flags)
-{
- enum drbd_fencing_p fp;
- enum drbd_req_event what = nothing;
- union drbd_state nsm = (union drbd_state){ .i = -1 };
-
- if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
- if (mdev->p_uuid)
- mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
- }
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- /* Inform userspace about the change... */
- drbd_bcast_state(mdev, ns);
-
- if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
- (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
- drbd_khelper(mdev, "pri-on-incon-degr");
-
- /* Here we have the actions that are performed after a
- state change. This function might sleep */
-
- if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
- mod_timer(&mdev->request_timer, jiffies + HZ);
-
- nsm.i = -1;
- if (ns.susp_nod) {
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
- what = resend;
-
- if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- ns.disk > D_NEGOTIATING)
- what = restart_frozen_disk_io;
-
- if (what != nothing)
- nsm.susp_nod = 0;
- }
-
- if (ns.susp_fen) {
- /* case1: The outdate peer handler is successful: */
- if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- }
- spin_lock_irq(&mdev->req_lock);
- _tl_clear(mdev);
- _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
- }
- /* case2: The connection was established again: */
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- what = resend;
- nsm.susp_fen = 0;
- }
- }
-
- if (what != nothing) {
- spin_lock_irq(&mdev->req_lock);
- _tl_restart(mdev, what);
- nsm.i &= mdev->state.i;
- _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
- }
-
- /* Became sync source. With protocol >= 96, we still need to send out
- * the sync uuid now. Need to do that before any drbd_send_state, or
- * the other side may go "paused sync" before receiving the sync uuids,
- * which is unexpected. */
- if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
- (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
- mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
- drbd_gen_and_send_sync_uuid(mdev);
- put_ldev(mdev);
- }
-
- /* Do not change the order of the if above and the two below... */
- if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
- /* we probably will start a resync soon.
- * make sure those things are properly reset. */
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- drbd_rs_cancel_all(mdev);
-
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
- }
- /* No point in queuing send_bitmap if we don't have a connection
- * anymore, so check also the _current_ state, not only the new state
- * at the time this work was queued. */
- if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
- mdev->state.conn == C_WF_BITMAP_S)
- drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
- "send_bitmap (WFBitMapS)",
- BM_LOCKED_TEST_ALLOWED);
-
- /* Lost contact to peer's copy of the data */
- if ((os.pdsk >= D_INCONSISTENT &&
- os.pdsk != D_UNKNOWN &&
- os.pdsk != D_OUTDATED)
- && (ns.pdsk < D_INCONSISTENT ||
- ns.pdsk == D_UNKNOWN ||
- ns.pdsk == D_OUTDATED)) {
- if (get_ldev(mdev)) {
- if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- if (is_susp(mdev->state)) {
- set_bit(NEW_CUR_UUID, &mdev->flags);
- } else {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
- }
- put_ldev(mdev);
- }
- }
-
- if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
- /* D_DISKLESS Peer becomes secondary */
- if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
- /* We may still be Primary ourselves.
- * No harm done if the bitmap still changes,
- * redirtied pages will follow later. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
- "demote diskless peer", BM_LOCKED_SET_ALLOWED);
- put_ldev(mdev);
- }
-
- /* Write out all changed bits on demote.
- * Though, no need to da that just yet
- * if there is a resync going on still */
- if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
- mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
- /* No changes to the bitmap expected this time, so assert that,
- * even though no harm was done if it did change. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
- "demote", BM_LOCKED_TEST_ALLOWED);
- put_ldev(mdev);
- }
-
- /* Last part of the attaching process ... */
- if (ns.conn >= C_CONNECTED &&
- os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
- drbd_send_sizes(mdev, 0, 0); /* to start sync... */
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
- }
-
- /* We want to pause/continue resync, tell peer. */
- if (ns.conn >= C_CONNECTED &&
- ((os.aftr_isp != ns.aftr_isp) ||
- (os.user_isp != ns.user_isp)))
- drbd_send_state(mdev, ns);
-
- /* In case one of the isp bits got set, suspend other devices. */
- if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
- (ns.aftr_isp || ns.peer_isp || ns.user_isp))
- suspend_other_sg(mdev);
-
- /* Make sure the peer gets informed about eventual state
- changes (ISP bits) while we were in WFReportParams. */
- if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev, ns);
-
- /* We are in the progress to start a full sync... */
- if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
- (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
- /* no other bitmap changes expected during this phase */
- drbd_queue_bitmap_io(mdev,
- &drbd_bmio_set_n_write, &abw_start_sync,
- "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
-
- /* We are invalidating our self... */
- if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
- os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
- /* other bitmap operation expected during this phase */
- drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
- "set_n_write from invalidate", BM_LOCKED_MASK);
-
- /* first half of local IO error, failure to attach,
- * or administrative detach */
- if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh = EP_PASS_ON;
- int was_io_error = 0;
- /* corresponding get_ldev was in __drbd_set_state, to serialize
- * our cleanup here with the transition to D_DISKLESS.
- * But is is still not save to dreference ldev here, since
- * we might come from an failed Attach before ldev was set. */
- if (mdev->ldev) {
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
- if (was_io_error && eh == EP_CALL_HELPER)
- drbd_khelper(mdev, "local-io-error");
-
- /* Immediately allow completion of all application IO,
- * that waits for completion from the local disk,
- * if this was a force-detach due to disk_timeout
- * or administrator request (drbdsetup detach --force).
- * Do NOT abort otherwise.
- * Aborting local requests may cause serious problems,
- * if requests are completed to upper layers already,
- * and then later the already submitted local bio completes.
- * This can cause DMA into former bio pages that meanwhile
- * have been re-used for other things.
- * So aborting local requests may cause crashes,
- * or even worse, silent data corruption.
- */
- if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
- tl_abort_disk_io(mdev);
-
- /* current state still has to be D_FAILED,
- * there is only one way out: to D_DISKLESS,
- * and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
-
- if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- drbd_rs_cancel_all(mdev);
-
- /* In case we want to get something to stable storage still,
- * this may be the last chance.
- * Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
- }
- put_ldev(mdev);
- }
-
- /* second half of local IO error, failure to attach,
- * or administrative detach,
- * after local_cnt references have reached zero again */
- if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
- /* We must still be diskless,
- * re-attach has to be serialized with this! */
- if (mdev->state.disk != D_DISKLESS)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s while going diskless\n",
- drbd_disk_str(mdev->state.disk));
-
- if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* corresponding get_ldev in __drbd_set_state
- * this may finally trigger drbd_ldev_destroy. */
- put_ldev(mdev);
- }
-
- /* Notify peer that I had a local IO error, and did not detached.. */
- if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* Disks got bigger while they were detached */
- if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
- test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
- if (ns.conn == C_CONNECTED)
- resync_after_online_grow(mdev);
- }
-
- /* A resync finished or aborted, wake paused devices... */
- if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
- (os.peer_isp && !ns.peer_isp) ||
- (os.user_isp && !ns.user_isp))
- resume_next_sg(mdev);
-
- /* sync target done with resync. Explicitly notify peer, even though
- * it should (at least for non-empty resyncs) already know itself. */
- if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* Wake up role changes, that were delayed because of connection establishing */
- if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
- clear_bit(STATE_SENT, &mdev->flags);
- wake_up(&mdev->state_wait);
- }
-
- /* This triggers bitmap writeout of potentially still unwritten pages
- * if the resync finished cleanly, or aborted because of peer disk
- * failure, or because of connection loss.
- * For resync aborted because of local disk failure, we cannot do
- * any bitmap writeout anymore.
- * No harm done if some bits change during this phase.
- */
- if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
- "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
- put_ldev(mdev);
- }
-
- /* free tl_hash if we Got thawed and are C_STANDALONE */
- if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
- drbd_free_tl_hash(mdev);
-
- /* Upon network connection, we need to start the receiver */
- if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
- drbd_thread_start(&mdev->receiver);
-
- /* Terminate worker thread if we are unconfigured - it will be
- restarted as needed... */
- if (ns.disk == D_DISKLESS &&
- ns.conn == C_STANDALONE &&
- ns.role == R_SECONDARY) {
- if (os.aftr_isp != ns.aftr_isp)
- resume_next_sg(mdev);
- /* set in __drbd_set_state, unless CONFIG_PENDING was set */
- if (test_bit(DEVICE_DYING, &mdev->flags))
- drbd_thread_stop_nowait(&mdev->worker);
+ if (req->w.mdev != mdev)
+ continue;
+ _req_mod(req, ABORT_DISK_IO);
}
-
- drbd_md_sync(mdev);
+ spin_unlock_irq(&tconn->req_lock);
}
-
static int drbd_thread_setup(void *arg)
{
struct drbd_thread *thi = (struct drbd_thread *) arg;
- struct drbd_conf *mdev = thi->mdev;
+ struct drbd_tconn *tconn = thi->tconn;
unsigned long flags;
int retval;
+ snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
+ thi->name[0], thi->tconn->name);
+
restart:
retval = thi->function(thi);
spin_lock_irqsave(&thi->t_lock, flags);
- /* if the receiver has been "Exiting", the last thing it did
+ /* if the receiver has been "EXITING", the last thing it did
* was set the conn state to "StandAlone",
* if now a re-connect request comes in, conn state goes C_UNCONNECTED,
* and receiver thread will be "started".
- * drbd_thread_start needs to set "Restarting" in that case.
+ * drbd_thread_start needs to set "RESTARTING" in that case.
* t_state check and assignment needs to be within the same spinlock,
- * so either thread_start sees Exiting, and can remap to Restarting,
- * or thread_start see None, and can proceed as normal.
+ * so either thread_start sees EXITING, and can remap to RESTARTING,
+ * or thread_start see NONE, and can proceed as normal.
*/
- if (thi->t_state == Restarting) {
- dev_info(DEV, "Restarting %s\n", current->comm);
- thi->t_state = Running;
+ if (thi->t_state == RESTARTING) {
+ conn_info(tconn, "Restarting %s thread\n", thi->name);
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart;
}
thi->task = NULL;
- thi->t_state = None;
+ thi->t_state = NONE;
smp_mb();
- complete(&thi->stop);
+ complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags);
- dev_info(DEV, "Terminating %s\n", current->comm);
+ conn_info(tconn, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */
+
+ kref_put(&tconn->kref, &conn_destroy);
module_put(THIS_MODULE);
return retval;
}
-static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
- int (*func) (struct drbd_thread *))
+static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
+ int (*func) (struct drbd_thread *), char *name)
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
- thi->t_state = None;
+ thi->t_state = NONE;
thi->function = func;
- thi->mdev = mdev;
+ thi->tconn = tconn;
+ strncpy(thi->name, name, ARRAY_SIZE(thi->name));
}
int drbd_thread_start(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
+ struct drbd_tconn *tconn = thi->tconn;
struct task_struct *nt;
unsigned long flags;
- const char *me =
- thi == &mdev->receiver ? "receiver" :
- thi == &mdev->asender ? "asender" :
- thi == &mdev->worker ? "worker" : "NONSENSE";
-
/* is used from state engine doing drbd_thread_stop_nowait,
* while holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
switch (thi->t_state) {
- case None:
- dev_info(DEV, "Starting %s thread (from %s [%d])\n",
- me, current->comm, current->pid);
+ case NONE:
+ conn_info(tconn, "Starting %s thread (from %s [%d])\n",
+ thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) {
- dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
+ conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return false;
}
+ kref_get(&thi->tconn->kref);
+
init_completion(&thi->stop);
- D_ASSERT(thi->task == NULL);
thi->reset_cpu_mask = 1;
- thi->t_state = Running;
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi,
- "drbd%d_%s", mdev_to_minor(mdev), me);
+ "drbd_%c_%s", thi->name[0], thi->tconn->name);
if (IS_ERR(nt)) {
- dev_err(DEV, "Couldn't start thread\n");
+ conn_err(tconn, "Couldn't start thread\n");
+ kref_put(&tconn->kref, &conn_destroy);
module_put(THIS_MODULE);
return false;
}
spin_lock_irqsave(&thi->t_lock, flags);
thi->task = nt;
- thi->t_state = Running;
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
wake_up_process(nt);
break;
- case Exiting:
- thi->t_state = Restarting;
- dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
- me, current->comm, current->pid);
+ case EXITING:
+ thi->t_state = RESTARTING;
+ conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
+ thi->name, current->comm, current->pid);
/* fall through */
- case Running:
- case Restarting:
+ case RUNNING:
+ case RESTARTING:
default:
spin_unlock_irqrestore(&thi->t_lock, flags);
break;
@@ -1867,12 +447,12 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
{
unsigned long flags;
- enum drbd_thread_state ns = restart ? Restarting : Exiting;
+ enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
/* may be called from state engine, holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
- if (thi->t_state == None) {
+ if (thi->t_state == NONE) {
spin_unlock_irqrestore(&thi->t_lock, flags);
if (restart)
drbd_thread_start(thi);
@@ -1890,7 +470,6 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
init_completion(&thi->stop);
if (thi->task != current)
force_sig(DRBD_SIGKILL, thi->task);
-
}
spin_unlock_irqrestore(&thi->t_lock, flags);
@@ -1899,6 +478,35 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
+static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
+{
+ struct drbd_thread *thi =
+ task == tconn->receiver.task ? &tconn->receiver :
+ task == tconn->asender.task ? &tconn->asender :
+ task == tconn->worker.task ? &tconn->worker : NULL;
+
+ return thi;
+}
+
+char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
+{
+ struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
+ return thi ? thi->name : task->comm;
+}
+
+int conn_lowest_minor(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr = 0, m;
+
+ rcu_read_lock();
+ mdev = idr_get_next(&tconn->volumes, &vnr);
+ m = mdev ? mdev_to_minor(mdev) : -1;
+ rcu_read_unlock();
+
+ return m;
+}
+
#ifdef CONFIG_SMP
/**
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
@@ -1907,238 +515,345 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
* Forces all threads of a device onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
-void drbd_calc_cpu_mask(struct drbd_conf *mdev)
+void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
{
int ord, cpu;
/* user override. */
- if (cpumask_weight(mdev->cpu_mask))
+ if (cpumask_weight(tconn->cpu_mask))
return;
- ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
+ ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) {
if (ord-- == 0) {
- cpumask_set_cpu(cpu, mdev->cpu_mask);
+ cpumask_set_cpu(cpu, tconn->cpu_mask);
return;
}
}
/* should not be reached */
- cpumask_setall(mdev->cpu_mask);
+ cpumask_setall(tconn->cpu_mask);
}
/**
* drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
* @mdev: DRBD device.
+ * @thi: drbd_thread object
*
* call in the "main loop" of _all_ threads, no need for any mutex, current won't die
* prematurely.
*/
-void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
+void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{
struct task_struct *p = current;
- struct drbd_thread *thi =
- p == mdev->asender.task ? &mdev->asender :
- p == mdev->receiver.task ? &mdev->receiver :
- p == mdev->worker.task ? &mdev->worker :
- NULL;
- ERR_IF(thi == NULL)
- return;
+
if (!thi->reset_cpu_mask)
return;
thi->reset_cpu_mask = 0;
- set_cpus_allowed_ptr(p, mdev->cpu_mask);
+ set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
}
#endif
-/* the appropriate socket mutex must be held already */
-int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size, unsigned msg_flags)
+/**
+ * drbd_header_size - size of a packet header
+ *
+ * The header size is a multiple of 8, so any payload following the header is
+ * word aligned on 64-bit architectures. (The bitmap send and receive code
+ * relies on this.)
+ */
+unsigned int drbd_header_size(struct drbd_tconn *tconn)
{
- int sent, ok;
+ if (tconn->agreed_pro_version >= 100) {
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
+ return sizeof(struct p_header100);
+ } else {
+ BUILD_BUG_ON(sizeof(struct p_header80) !=
+ sizeof(struct p_header95));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
+ return sizeof(struct p_header80);
+ }
+}
- ERR_IF(!h) return false;
- ERR_IF(!size) return false;
+static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
+{
+ h->magic = cpu_to_be32(DRBD_MAGIC);
+ h->command = cpu_to_be16(cmd);
+ h->length = cpu_to_be16(size);
+ return sizeof(struct p_header80);
+}
- h->magic = BE_DRBD_MAGIC;
+static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
+{
+ h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
h->command = cpu_to_be16(cmd);
- h->length = cpu_to_be16(size-sizeof(struct p_header80));
+ h->length = cpu_to_be32(size);
+ return sizeof(struct p_header95);
+}
- sent = drbd_send(mdev, sock, h, size, msg_flags);
+static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
+ int size, int vnr)
+{
+ h->magic = cpu_to_be32(DRBD_MAGIC_100);
+ h->volume = cpu_to_be16(vnr);
+ h->command = cpu_to_be16(cmd);
+ h->length = cpu_to_be32(size);
+ h->pad = 0;
+ return sizeof(struct p_header100);
+}
- ok = (sent == size);
- if (!ok && !signal_pending(current))
- dev_warn(DEV, "short sent %s size=%d sent=%d\n",
- cmdname(cmd), (int)size, sent);
- return ok;
+static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
+ void *buffer, enum drbd_packet cmd, int size)
+{
+ if (tconn->agreed_pro_version >= 100)
+ return prepare_header100(buffer, cmd, size, vnr);
+ else if (tconn->agreed_pro_version >= 95 &&
+ size > DRBD_MAX_SIZE_H80_PACKET)
+ return prepare_header95(buffer, cmd, size);
+ else
+ return prepare_header80(buffer, cmd, size);
}
-/* don't pass the socket. we may only look at it
- * when we hold the appropriate socket mutex.
- */
-int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h, size_t size)
+static void *__conn_prepare_command(struct drbd_tconn *tconn,
+ struct drbd_socket *sock)
{
- int ok = 0;
- struct socket *sock;
+ if (!sock->socket)
+ return NULL;
+ return sock->sbuf + drbd_header_size(tconn);
+}
- if (use_data_socket) {
- mutex_lock(&mdev->data.mutex);
- sock = mdev->data.socket;
- } else {
- mutex_lock(&mdev->meta.mutex);
- sock = mdev->meta.socket;
- }
+void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
+{
+ void *p;
- /* drbd_disconnect() could have called drbd_free_sock()
- * while we were waiting in down()... */
- if (likely(sock != NULL))
- ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
+ mutex_lock(&sock->mutex);
+ p = __conn_prepare_command(tconn, sock);
+ if (!p)
+ mutex_unlock(&sock->mutex);
- if (use_data_socket)
- mutex_unlock(&mdev->data.mutex);
- else
- mutex_unlock(&mdev->meta.mutex);
- return ok;
+ return p;
}
-int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
- size_t size)
+void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
{
- struct p_header80 h;
- int ok;
+ return conn_prepare_command(mdev->tconn, sock);
+}
- h.magic = BE_DRBD_MAGIC;
- h.command = cpu_to_be16(cmd);
- h.length = cpu_to_be16(size);
+static int __send_command(struct drbd_tconn *tconn, int vnr,
+ struct drbd_socket *sock, enum drbd_packet cmd,
+ unsigned int header_size, void *data,
+ unsigned int size)
+{
+ int msg_flags;
+ int err;
- if (!drbd_get_data_sock(mdev))
- return 0;
+ /*
+ * Called with @data == NULL and the size of the data blocks in @size
+ * for commands that send data blocks. For those commands, omit the
+ * MSG_MORE flag: this will increase the likelihood that data blocks
+ * which are page aligned on the sender will end up page aligned on the
+ * receiver.
+ */
+ msg_flags = data ? MSG_MORE : 0;
+
+ header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
+ header_size + size);
+ err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
+ msg_flags);
+ if (data && !err)
+ err = drbd_send_all(tconn, sock->socket, data, size, 0);
+ return err;
+}
- ok = (sizeof(h) ==
- drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
- ok = ok && (size ==
- drbd_send(mdev, mdev->data.socket, data, size, 0));
+static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
+{
+ return __send_command(tconn, 0, sock, cmd, header_size, data, size);
+}
- drbd_put_data_sock(mdev);
+int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
+{
+ int err;
- return ok;
+ err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
+ mutex_unlock(&sock->mutex);
+ return err;
}
-int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
+int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
{
+ int err;
+
+ err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
+ data, size);
+ mutex_unlock(&sock->mutex);
+ return err;
+}
+
+int drbd_send_ping(struct drbd_tconn *tconn)
+{
+ struct drbd_socket *sock;
+
+ sock = &tconn->meta;
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
+}
+
+int drbd_send_ping_ack(struct drbd_tconn *tconn)
+{
+ struct drbd_socket *sock;
+
+ sock = &tconn->meta;
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
+}
+
+int drbd_send_sync_param(struct drbd_conf *mdev)
+{
+ struct drbd_socket *sock;
struct p_rs_param_95 *p;
- struct socket *sock;
- int size, rv;
- const int apv = mdev->agreed_pro_version;
+ int size;
+ const int apv = mdev->tconn->agreed_pro_version;
+ enum drbd_packet cmd;
+ struct net_conf *nc;
+ struct disk_conf *dc;
+
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
- + strlen(mdev->sync_conf.verify_alg) + 1
+ + strlen(nc->verify_alg) + 1
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
- /* used from admin command context and receiver/worker context.
- * to avoid kmalloc, grab the socket right here,
- * then use the pre-allocated sbuf there */
- mutex_lock(&mdev->data.mutex);
- sock = mdev->data.socket;
-
- if (likely(sock != NULL)) {
- enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
-
- p = &mdev->data.sbuf.rs_param_95;
+ cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
- /* initialize verify_alg and csums_alg */
- memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+ /* initialize verify_alg and csums_alg */
+ memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- p->rate = cpu_to_be32(sc->rate);
- p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
- p->c_delay_target = cpu_to_be32(sc->c_delay_target);
- p->c_fill_target = cpu_to_be32(sc->c_fill_target);
- p->c_max_rate = cpu_to_be32(sc->c_max_rate);
-
- if (apv >= 88)
- strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
- if (apv >= 89)
- strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
-
- rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
- } else
- rv = 0; /* not ok */
+ if (get_ldev(mdev)) {
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+ p->resync_rate = cpu_to_be32(dc->resync_rate);
+ p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
+ p->c_delay_target = cpu_to_be32(dc->c_delay_target);
+ p->c_fill_target = cpu_to_be32(dc->c_fill_target);
+ p->c_max_rate = cpu_to_be32(dc->c_max_rate);
+ put_ldev(mdev);
+ } else {
+ p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
+ p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
+ p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
+ p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
+ p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
+ }
- mutex_unlock(&mdev->data.mutex);
+ if (apv >= 88)
+ strcpy(p->verify_alg, nc->verify_alg);
+ if (apv >= 89)
+ strcpy(p->csums_alg, nc->csums_alg);
+ rcu_read_unlock();
- return rv;
+ return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
}
-int drbd_send_protocol(struct drbd_conf *mdev)
+int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
{
+ struct drbd_socket *sock;
struct p_protocol *p;
- int size, cf, rv;
+ struct net_conf *nc;
+ int size, cf;
- size = sizeof(struct p_protocol);
+ sock = &tconn->data;
+ p = __conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
- if (mdev->agreed_pro_version >= 87)
- size += strlen(mdev->net_conf->integrity_alg) + 1;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- /* we must not recurse into our own queue,
- * as that is blocked during handshake */
- p = kmalloc(size, GFP_NOIO);
- if (p == NULL)
- return 0;
+ if (nc->tentative && tconn->agreed_pro_version < 92) {
+ rcu_read_unlock();
+ mutex_unlock(&sock->mutex);
+ conn_err(tconn, "--dry-run is not supported by peer");
+ return -EOPNOTSUPP;
+ }
- p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
- p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
- p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
- p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
- p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
+ size = sizeof(*p);
+ if (tconn->agreed_pro_version >= 87)
+ size += strlen(nc->integrity_alg) + 1;
+ p->protocol = cpu_to_be32(nc->wire_protocol);
+ p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
+ p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
+ p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
+ p->two_primaries = cpu_to_be32(nc->two_primaries);
cf = 0;
- if (mdev->net_conf->want_lose)
- cf |= CF_WANT_LOSE;
- if (mdev->net_conf->dry_run) {
- if (mdev->agreed_pro_version >= 92)
- cf |= CF_DRY_RUN;
- else {
- dev_err(DEV, "--dry-run is not supported by peer");
- kfree(p);
- return -1;
- }
- }
+ if (nc->discard_my_data)
+ cf |= CF_DISCARD_MY_DATA;
+ if (nc->tentative)
+ cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
- if (mdev->agreed_pro_version >= 87)
- strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
+ if (tconn->agreed_pro_version >= 87)
+ strcpy(p->integrity_alg, nc->integrity_alg);
+ rcu_read_unlock();
- rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
- (struct p_header80 *)p, size);
- kfree(p);
- return rv;
+ return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
+}
+
+int drbd_send_protocol(struct drbd_tconn *tconn)
+{
+ int err;
+
+ mutex_lock(&tconn->data.mutex);
+ err = __drbd_send_protocol(tconn, P_PROTOCOL);
+ mutex_unlock(&tconn->data.mutex);
+
+ return err;
}
int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
{
- struct p_uuids p;
+ struct drbd_socket *sock;
+ struct p_uuids *p;
int i;
if (!get_ldev_if_state(mdev, D_NEGOTIATING))
- return 1;
+ return 0;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p) {
+ put_ldev(mdev);
+ return -EIO;
+ }
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
- p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
+ p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
- p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
- uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
+ p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+ rcu_read_lock();
+ uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
+ rcu_read_unlock();
uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
- p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
+ p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
put_ldev(mdev);
-
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
- (struct p_header80 *)&p, sizeof(p));
+ return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
}
int drbd_send_uuids(struct drbd_conf *mdev)
@@ -2169,9 +884,10 @@ void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
}
}
-int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
+void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
{
- struct p_rs_uuid p;
+ struct drbd_socket *sock;
+ struct p_rs_uuid *p;
u64 uuid;
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
@@ -2184,24 +900,29 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
- p.uuid = cpu_to_be64(uuid);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
- (struct p_header80 *)&p, sizeof(p));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (p) {
+ p->uuid = cpu_to_be64(uuid);
+ drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
+ }
}
int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
{
- struct p_sizes p;
+ struct drbd_socket *sock;
+ struct p_sizes *p;
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
- int ok;
if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
D_ASSERT(mdev->ldev->backing_bdev);
d_size = drbd_get_max_capacity(mdev->ldev);
- u_size = mdev->ldev->dc.disk_size;
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
q_order_type = drbd_queue_order_type(mdev);
max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
@@ -2213,20 +934,23 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
- /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
- if (mdev->agreed_pro_version <= 94)
- max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
- p.d_size = cpu_to_be64(d_size);
- p.u_size = cpu_to_be64(u_size);
- p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
- p.max_bio_size = cpu_to_be32(max_bio_size);
- p.queue_order_type = cpu_to_be16(q_order_type);
- p.dds_flags = cpu_to_be16(flags);
+ if (mdev->tconn->agreed_pro_version <= 94)
+ max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ else if (mdev->tconn->agreed_pro_version < 100)
+ max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ p->d_size = cpu_to_be64(d_size);
+ p->u_size = cpu_to_be64(u_size);
+ p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
+ p->max_bio_size = cpu_to_be32(max_bio_size);
+ p->queue_order_type = cpu_to_be16(q_order_type);
+ p->dds_flags = cpu_to_be16(flags);
+ return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
}
/**
@@ -2235,34 +959,21 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
*/
int drbd_send_current_state(struct drbd_conf *mdev)
{
- struct socket *sock;
- struct p_state p;
- int ok = 0;
-
- /* Grab state lock so we wont send state if we're in the middle
- * of a cluster wide state change on another thread */
- drbd_state_lock(mdev);
-
- mutex_lock(&mdev->data.mutex);
-
- p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
- sock = mdev->data.socket;
+ struct drbd_socket *sock;
+ struct p_state *p;
- if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE,
- (struct p_header80 *)&p, sizeof(p), 0);
- }
-
- mutex_unlock(&mdev->data.mutex);
-
- drbd_state_unlock(mdev);
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
}
/**
* drbd_send_state() - After a state change, sends the new state to the peer
- * @mdev: DRBD device.
- * @state: the state to send, not necessarily the current state.
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
*
* Each state change queues an "after_state_ch" work, which will eventually
* send the resulting new state to the peer. If more state changes happen
@@ -2271,50 +982,95 @@ int drbd_send_current_state(struct drbd_conf *mdev)
*/
int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
{
- struct socket *sock;
- struct p_state p;
- int ok = 0;
+ struct drbd_socket *sock;
+ struct p_state *p;
- mutex_lock(&mdev->data.mutex);
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+}
- p.state = cpu_to_be32(state.i);
- sock = mdev->data.socket;
+int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
+{
+ struct drbd_socket *sock;
+ struct p_req_state *p;
- if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE,
- (struct p_header80 *)&p, sizeof(p), 0);
- }
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->mask = cpu_to_be32(mask.i);
+ p->val = cpu_to_be32(val.i);
+ return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
+}
- mutex_unlock(&mdev->data.mutex);
+int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+{
+ enum drbd_packet cmd;
+ struct drbd_socket *sock;
+ struct p_req_state *p;
- return ok;
+ cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
+ p->mask = cpu_to_be32(mask.i);
+ p->val = cpu_to_be32(val.i);
+ return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_state_req(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val)
+void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
{
- struct p_req_state p;
+ struct drbd_socket *sock;
+ struct p_req_state_reply *p;
- p.mask = cpu_to_be32(mask.i);
- p.val = cpu_to_be32(val.i);
+ sock = &mdev->tconn->meta;
+ p = drbd_prepare_command(mdev, sock);
+ if (p) {
+ p->retcode = cpu_to_be32(retcode);
+ drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
+ }
+}
+
+void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
+{
+ struct drbd_socket *sock;
+ struct p_req_state_reply *p;
+ enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
- (struct p_header80 *)&p, sizeof(p));
+ sock = &tconn->meta;
+ p = conn_prepare_command(tconn, sock);
+ if (p) {
+ p->retcode = cpu_to_be32(retcode);
+ conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ }
}
-int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
+static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
{
- struct p_req_state_reply p;
+ BUG_ON(code & ~0xf);
+ p->encoding = (p->encoding & ~0xf) | code;
+}
- p.retcode = cpu_to_be32(retcode);
+static void dcbp_set_start(struct p_compressed_bm *p, int set)
+{
+ p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
+}
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
- (struct p_header80 *)&p, sizeof(p));
+static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
+{
+ BUG_ON(n & ~0x7);
+ p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
int fill_bitmap_rle_bits(struct drbd_conf *mdev,
- struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct p_compressed_bm *p,
+ unsigned int size,
+ struct bm_xfer_ctx *c)
{
struct bitstream bs;
unsigned long plain_bits;
@@ -2322,19 +1078,21 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
unsigned long rl;
unsigned len;
unsigned toggle;
- int bits;
+ int bits, use_rle;
/* may we use this feature? */
- if ((mdev->sync_conf.use_rle == 0) ||
- (mdev->agreed_pro_version < 90))
- return 0;
+ rcu_read_lock();
+ use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
+ rcu_read_unlock();
+ if (!use_rle || mdev->tconn->agreed_pro_version < 90)
+ return 0;
if (c->bit_offset >= c->bm_bits)
return 0; /* nothing to do. */
/* use at most thus many bytes */
- bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
- memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
+ bitstream_init(&bs, p->code, size, 0);
+ memset(p->code, 0, size);
/* plain bits covered in this code string */
plain_bits = 0;
@@ -2356,12 +1114,12 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
if (rl == 0) {
/* the first checked bit was set,
* store start value, */
- DCBP_set_start(p, 1);
+ dcbp_set_start(p, 1);
/* but skip encoding of zero run length */
toggle = !toggle;
continue;
}
- DCBP_set_start(p, 0);
+ dcbp_set_start(p, 0);
}
/* paranoia: catch zero runlength.
@@ -2401,7 +1159,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
bm_xfer_ctx_bit_to_word_offset(c);
/* store pad_bits */
- DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
+ dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
return len;
}
@@ -2413,48 +1171,52 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_conf *mdev,
- struct p_header80 *h, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
{
- struct p_compressed_bm *p = (void*)h;
- unsigned long num_words;
- int len;
- int ok;
-
- len = fill_bitmap_rle_bits(mdev, p, c);
+ struct drbd_socket *sock = &mdev->tconn->data;
+ unsigned int header_size = drbd_header_size(mdev->tconn);
+ struct p_compressed_bm *p = sock->sbuf + header_size;
+ int len, err;
+ len = fill_bitmap_rle_bits(mdev, p,
+ DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
if (len < 0)
return -EIO;
if (len) {
- DCBP_set_code(p, RLE_VLI_Bits);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
- sizeof(*p) + len, 0);
-
+ dcbp_set_code(p, RLE_VLI_Bits);
+ err = __send_command(mdev->tconn, mdev->vnr, sock,
+ P_COMPRESSED_BITMAP, sizeof(*p) + len,
+ NULL, 0);
c->packets[0]++;
- c->bytes[0] += sizeof(*p) + len;
+ c->bytes[0] += header_size + sizeof(*p) + len;
if (c->bit_offset >= c->bm_bits)
len = 0; /* DONE */
} else {
/* was not compressible.
* send a buffer full of plain text bits instead. */
- num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
- len = num_words * sizeof(long);
+ unsigned int data_size;
+ unsigned long num_words;
+ unsigned long *p = sock->sbuf + header_size;
+
+ data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
+ num_words = min_t(size_t, data_size / sizeof(*p),
+ c->bm_words - c->word_offset);
+ len = num_words * sizeof(*p);
if (len)
- drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
- h, sizeof(struct p_header80) + len, 0);
+ drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
c->packets[1]++;
- c->bytes[1] += sizeof(struct p_header80) + len;
+ c->bytes[1] += header_size + len;
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
}
- if (ok) {
+ if (!err) {
if (len == 0) {
INFO_bm_xfer_stats(mdev, "send", c);
return 0;
@@ -2465,21 +1227,13 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
}
/* See the comment at receive_bitmap() */
-int _drbd_send_bitmap(struct drbd_conf *mdev)
+static int _drbd_send_bitmap(struct drbd_conf *mdev)
{
struct bm_xfer_ctx c;
- struct p_header80 *p;
int err;
- ERR_IF(!mdev->bitmap) return false;
-
- /* maybe we should use some per thread scratch page,
- * and allocate that during initial device creation? */
- p = (struct p_header80 *) __get_free_page(GFP_NOIO);
- if (!p) {
- dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
+ if (!expect(mdev->bitmap))
return false;
- }
if (get_ldev(mdev)) {
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
@@ -2504,37 +1258,39 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
};
do {
- err = send_bitmap_rle_or_plain(mdev, p, &c);
+ err = send_bitmap_rle_or_plain(mdev, &c);
} while (err > 0);
- free_page((unsigned long) p);
return err == 0;
}
int drbd_send_bitmap(struct drbd_conf *mdev)
{
- int err;
+ struct drbd_socket *sock = &mdev->tconn->data;
+ int err = -1;
- if (!drbd_get_data_sock(mdev))
- return -1;
- err = !_drbd_send_bitmap(mdev);
- drbd_put_data_sock(mdev);
+ mutex_lock(&sock->mutex);
+ if (sock->socket)
+ err = !_drbd_send_bitmap(mdev);
+ mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
+void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
{
- int ok;
- struct p_barrier_ack p;
+ struct drbd_socket *sock;
+ struct p_barrier_ack *p;
- p.barrier = barrier_nr;
- p.set_size = cpu_to_be32(set_size);
+ if (tconn->cstate < C_WF_REPORT_PARAMS)
+ return;
- if (mdev->state.conn < C_CONNECTED)
- return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &tconn->meta;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return;
+ p->barrier = barrier_nr;
+ p->set_size = cpu_to_be32(set_size);
+ conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
@@ -2545,62 +1301,62 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
* @blksize: size in byte, needs to be in big endian byte order
* @block_id: Id, big endian byte order
*/
-static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
- u64 sector,
- u32 blksize,
- u64 block_id)
+static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+ u64 sector, u32 blksize, u64 block_id)
{
- int ok;
- struct p_block_ack p;
+ struct drbd_socket *sock;
+ struct p_block_ack *p;
- p.sector = sector;
- p.block_id = block_id;
- p.blksize = blksize;
- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
+ if (mdev->state.conn < C_CONNECTED)
+ return -EIO;
- if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
- return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->meta;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = sector;
+ p->block_id = block_id;
+ p->blksize = blksize;
+ p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
}
/* dp->sector and dp->block_id already/still in network byte order,
* data_size is payload size according to dp->head,
* and may need to be corrected for digest size. */
-int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_data *dp, int data_size)
+void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_data *dp, int data_size)
{
- data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
- return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
- dp->block_id);
+ if (mdev->tconn->peer_integrity_tfm)
+ data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
+ dp->block_id);
}
-int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_block_req *rp)
+void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_block_req *rp)
{
- return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
+ _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
}
/**
* drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device.
- * @cmd: Packet command code.
- * @e: Epoch entry.
+ * @mdev: DRBD device
+ * @cmd: packet command code
+ * @peer_req: peer request
*/
-int drbd_send_ack(struct drbd_conf *mdev,
- enum drbd_packets cmd, struct drbd_epoch_entry *e)
+int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct drbd_peer_request *peer_req)
{
return _drbd_send_ack(mdev, cmd,
- cpu_to_be64(e->sector),
- cpu_to_be32(e->size),
- e->block_id);
+ cpu_to_be64(peer_req->i.sector),
+ cpu_to_be32(peer_req->i.size),
+ peer_req->block_id);
}
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
-int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id)
{
return _drbd_send_ack(mdev, cmd,
@@ -2612,85 +1368,87 @@ int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id)
{
- int ok;
- struct p_block_req p;
-
- p.sector = cpu_to_be64(sector);
- p.block_id = block_id;
- p.blksize = cpu_to_be32(size);
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = block_id;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_drequest_csum(struct drbd_conf *mdev,
- sector_t sector, int size,
- void *digest, int digest_size,
- enum drbd_packets cmd)
+int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
+ void *digest, int digest_size, enum drbd_packet cmd)
{
- int ok;
- struct p_block_req p;
-
- p.sector = cpu_to_be64(sector);
- p.block_id = BE_DRBD_MAGIC + 0xbeef;
- p.blksize = cpu_to_be32(size);
-
- p.head.magic = BE_DRBD_MAGIC;
- p.head.command = cpu_to_be16(cmd);
- p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- mutex_lock(&mdev->data.mutex);
+ /* FIXME: Put the digest into the preallocated socket buffer. */
- ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
- ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
-
- mutex_unlock(&mdev->data.mutex);
-
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = ID_SYNCER /* unused */;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p),
+ digest, digest_size);
}
int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
{
- int ok;
- struct p_block_req p;
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- p.sector = cpu_to_be64(sector);
- p.block_id = BE_DRBD_MAGIC + 0xbabe;
- p.blksize = cpu_to_be32(size);
-
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = ID_SYNCER /* unused */;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
}
/* called on sndtimeo
* returns false if we should retry,
* true if we think connection is dead
*/
-static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
+static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
{
int drop_it;
/* long elapsed = (long)(jiffies - mdev->last_received); */
- drop_it = mdev->meta.socket == sock
- || !mdev->asender.task
- || get_t_state(&mdev->asender) != Running
- || mdev->state.conn < C_CONNECTED;
+ drop_it = tconn->meta.socket == sock
+ || !tconn->asender.task
+ || get_t_state(&tconn->asender) != RUNNING
+ || tconn->cstate < C_WF_REPORT_PARAMS;
if (drop_it)
return true;
- drop_it = !--mdev->ko_count;
+ drop_it = !--tconn->ko_count;
if (!drop_it) {
- dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
- current->comm, current->pid, mdev->ko_count);
- request_ping(mdev);
+ conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
+ current->comm, current->pid, tconn->ko_count);
+ request_ping(tconn);
}
return drop_it; /* && (mdev->state == R_PRIMARY) */;
}
+static void drbd_update_congested(struct drbd_tconn *tconn)
+{
+ struct sock *sk = tconn->data.socket->sk;
+ if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
+ set_bit(NET_CONGESTED, &tconn->flags);
+}
+
/* The idea of sendpage seems to be to put some kind of reference
* to the page into the skb, and to hand it over to the NIC. In
* this process get_page() gets called.
@@ -2713,21 +1471,28 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
* with page_count == 0 or PageSlab.
*/
static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
- int offset, size_t size, unsigned msg_flags)
+ int offset, size_t size, unsigned msg_flags)
{
- int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
+ struct socket *socket;
+ void *addr;
+ int err;
+
+ socket = mdev->tconn->data.socket;
+ addr = kmap(page) + offset;
+ err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
kunmap(page);
- if (sent == size)
- mdev->send_cnt += size>>9;
- return sent == size;
+ if (!err)
+ mdev->send_cnt += size >> 9;
+ return err;
}
static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
+ struct socket *socket = mdev->tconn->data.socket;
mm_segment_t oldfs = get_fs();
- int sent, ok;
int len = size;
+ int err = -EIO;
/* e.g. XFS meta- & log-data is in slab pages, which have a
* page_count of 0 and/or have PageSlab() set.
@@ -2739,34 +1504,35 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
- drbd_update_congested(mdev);
+ drbd_update_congested(mdev->tconn);
set_fs(KERNEL_DS);
do {
- sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
- offset, len,
- msg_flags);
- if (sent == -EAGAIN) {
- if (we_should_drop_the_connection(mdev,
- mdev->data.socket))
- break;
- else
- continue;
- }
+ int sent;
+
+ sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
if (sent <= 0) {
+ if (sent == -EAGAIN) {
+ if (we_should_drop_the_connection(mdev->tconn, socket))
+ break;
+ continue;
+ }
dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
__func__, (int)size, len, sent);
+ if (sent < 0)
+ err = sent;
break;
}
len -= sent;
offset += sent;
} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->flags);
+ clear_bit(NET_CONGESTED, &mdev->tconn->flags);
- ok = (len == 0);
- if (likely(ok))
- mdev->send_cnt += size>>9;
- return ok;
+ if (len == 0) {
+ err = 0;
+ mdev->send_cnt += size >> 9;
+ }
+ return err;
}
static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
@@ -2775,12 +1541,15 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
int i;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
- if (!_drbd_no_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
- return 0;
+ int err;
+
+ err = _drbd_no_send_page(mdev, bvec->bv_page,
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ if (err)
+ return err;
}
- return 1;
+ return 0;
}
static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
@@ -2789,32 +1558,40 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
int i;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
- if (!_drbd_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
- return 0;
+ int err;
+
+ err = _drbd_send_page(mdev, bvec->bv_page,
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ if (err)
+ return err;
}
- return 1;
+ return 0;
}
-static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static int _drbd_send_zc_ee(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
{
- struct page *page = e->pages;
- unsigned len = e->size;
+ struct page *page = peer_req->pages;
+ unsigned len = peer_req->i.size;
+ int err;
+
/* hint all but last page with MSG_MORE */
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- if (!_drbd_send_page(mdev, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0))
- return 0;
+
+ err = _drbd_send_page(mdev, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ if (err)
+ return err;
len -= l;
}
- return 1;
+ return 0;
}
static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
{
- if (mdev->agreed_pro_version >= 95)
+ if (mdev->tconn->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -2828,50 +1605,36 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
*/
int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
{
- int ok = 1;
- struct p_data p;
+ struct drbd_socket *sock;
+ struct p_data *p;
unsigned int dp_flags = 0;
- void *dgb;
int dgs;
+ int err;
- if (!drbd_get_data_sock(mdev))
- return 0;
-
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
- crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
-
- if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
- p.head.h80.magic = BE_DRBD_MAGIC;
- p.head.h80.command = cpu_to_be16(P_DATA);
- p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
- } else {
- p.head.h95.magic = BE_DRBD_MAGIC_BIG;
- p.head.h95.command = cpu_to_be16(P_DATA);
- p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
- }
-
- p.sector = cpu_to_be64(req->sector);
- p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(req->i.sector);
+ p->block_id = (unsigned long)req;
+ p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
-
if (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
-
- p.dp_flags = cpu_to_be32(dp_flags);
- set_bit(UNPLUG_REMOTE, &mdev->flags);
- ok = (sizeof(p) ==
- drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
- if (ok && dgs) {
- dgb = mdev->int_dig_out;
- drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
- ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
- }
- if (ok) {
+ if (mdev->tconn->agreed_pro_version >= 100) {
+ if (req->rq_state & RQ_EXP_RECEIVE_ACK)
+ dp_flags |= DP_SEND_RECEIVE_ACK;
+ if (req->rq_state & RQ_EXP_WRITE_ACK)
+ dp_flags |= DP_SEND_WRITE_ACK;
+ }
+ p->dp_flags = cpu_to_be32(dp_flags);
+ if (dgs)
+ drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
+ if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
* as soon as we handed it over to tcp, at which point the data
@@ -2883,92 +1646,76 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* out ok after sending on this side, but does not fit on the
* receiving side, we sure have detected corruption elsewhere.
*/
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
- ok = _drbd_send_bio(mdev, req->master_bio);
+ if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
+ err = _drbd_send_bio(mdev, req->master_bio);
else
- ok = _drbd_send_zc_bio(mdev, req->master_bio);
+ err = _drbd_send_zc_bio(mdev, req->master_bio);
/* double check digest, sometimes buffers have been modified in flight. */
if (dgs > 0 && dgs <= 64) {
/* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */
unsigned char digest[64];
- drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
- if (memcmp(mdev->int_dig_out, digest, dgs)) {
+ drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
+ if (memcmp(p + 1, digest, dgs)) {
dev_warn(DEV,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
- (unsigned long long)req->sector, req->size);
+ (unsigned long long)req->i.sector, req->i.size);
}
} /* else if (dgs > 64) {
... Be noisy about digest too large ...
} */
}
+ mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
- drbd_put_data_sock(mdev);
-
- return ok;
+ return err;
}
/* answer packet, used to send data back for read requests:
* Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
-int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e)
+int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct drbd_peer_request *peer_req)
{
- int ok;
- struct p_data p;
- void *dgb;
+ struct drbd_socket *sock;
+ struct p_data *p;
+ int err;
int dgs;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
- crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
-
- if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
- p.head.h80.magic = BE_DRBD_MAGIC;
- p.head.h80.command = cpu_to_be16(cmd);
- p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
- } else {
- p.head.h95.magic = BE_DRBD_MAGIC_BIG;
- p.head.h95.command = cpu_to_be16(cmd);
- p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
- }
-
- p.sector = cpu_to_be64(e->sector);
- p.block_id = e->block_id;
- /* p.seq_num = 0; No sequence numbers here.. */
-
- /* Only called by our kernel thread.
- * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
- * in response to admin command or module unload.
- */
- if (!drbd_get_data_sock(mdev))
- return 0;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
- ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
- if (ok && dgs) {
- dgb = mdev->int_dig_out;
- drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
- ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
- }
- if (ok)
- ok = _drbd_send_zc_ee(mdev, e);
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
- drbd_put_data_sock(mdev);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(peer_req->i.sector);
+ p->block_id = peer_req->block_id;
+ p->seq_num = 0; /* unused */
+ p->dp_flags = 0;
+ if (dgs)
+ drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
+ if (!err)
+ err = _drbd_send_zc_ee(mdev, peer_req);
+ mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
- return ok;
+ return err;
}
-int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
{
- struct p_block_desc p;
-
- p.sector = cpu_to_be64(req->sector);
- p.blksize = cpu_to_be32(req->size);
+ struct drbd_socket *sock;
+ struct p_block_desc *p;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(req->i.sector);
+ p->blksize = cpu_to_be32(req->i.size);
+ return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
}
/*
@@ -2987,7 +1734,7 @@ int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
/*
* you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/
-int drbd_send(struct drbd_conf *mdev, struct socket *sock,
+int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov;
@@ -2995,7 +1742,7 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
int rv, sent = 0;
if (!sock)
- return -1000;
+ return -EBADR;
/* THINK if (signal_pending) return ... ? */
@@ -3008,9 +1755,11 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
- if (sock == mdev->data.socket) {
- mdev->ko_count = mdev->net_conf->ko_count;
- drbd_update_congested(mdev);
+ if (sock == tconn->data.socket) {
+ rcu_read_lock();
+ tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
+ rcu_read_unlock();
+ drbd_update_congested(tconn);
}
do {
/* STRANGE
@@ -3024,12 +1773,11 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
*/
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (rv == -EAGAIN) {
- if (we_should_drop_the_connection(mdev, sock))
+ if (we_should_drop_the_connection(tconn, sock))
break;
else
continue;
}
- D_ASSERT(rv != 0);
if (rv == -EINTR) {
flush_signals(current);
rv = 0;
@@ -3041,22 +1789,40 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
iov.iov_len -= rv;
} while (sent < size);
- if (sock == mdev->data.socket)
- clear_bit(NET_CONGESTED, &mdev->flags);
+ if (sock == tconn->data.socket)
+ clear_bit(NET_CONGESTED, &tconn->flags);
if (rv <= 0) {
if (rv != -EAGAIN) {
- dev_err(DEV, "%s_sendmsg returned %d\n",
- sock == mdev->meta.socket ? "msock" : "sock",
- rv);
- drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+ conn_err(tconn, "%s_sendmsg returned %d\n",
+ sock == tconn->meta.socket ? "msock" : "sock",
+ rv);
+ conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else
- drbd_force_state(mdev, NS(conn, C_TIMEOUT));
+ conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
}
return sent;
}
+/**
+ * drbd_send_all - Send an entire buffer
+ *
+ * Returns 0 upon success and a negative error value otherwise.
+ */
+int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
+ size_t size, unsigned msg_flags)
+{
+ int err;
+
+ err = drbd_send(tconn, sock, buffer, size, msg_flags);
+ if (err < 0)
+ return err;
+ if (err != size)
+ return -EIO;
+ return 0;
+}
+
static int drbd_open(struct block_device *bdev, fmode_t mode)
{
struct drbd_conf *mdev = bdev->bd_disk->private_data;
@@ -3064,7 +1830,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
int rv = 0;
mutex_lock(&drbd_main_mutex);
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
/* to have a stable mdev->state.role
* and no race with updating open_cnt */
@@ -3077,7 +1843,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (!rv)
mdev->open_cnt++;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
@@ -3094,35 +1860,14 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
static void drbd_set_defaults(struct drbd_conf *mdev)
{
- /* This way we get a compile error when sync_conf grows,
- and we forgot to initialize it here */
- mdev->sync_conf = (struct syncer_conf) {
- /* .rate = */ DRBD_RATE_DEF,
- /* .after = */ DRBD_AFTER_DEF,
- /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
- /* .verify_alg = */ {}, 0,
- /* .cpu_mask = */ {}, 0,
- /* .csums_alg = */ {}, 0,
- /* .use_rle = */ 0,
- /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
- /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
- /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
- /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
- /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
- /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
- };
-
- /* Have to use that way, because the layout differs between
- big endian and little endian */
- mdev->state = (union drbd_state) {
+ /* Beware! The actual layout differs
+ * between big endian and little endian */
+ mdev->state = (union drbd_dev_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = C_STANDALONE,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN,
- .susp = 0,
- .susp_nod = 0,
- .susp_fen = 0
} };
}
@@ -3138,28 +1883,17 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
atomic_set(&mdev->unacked_cnt, 0);
atomic_set(&mdev->local_cnt, 0);
- atomic_set(&mdev->net_cnt, 0);
- atomic_set(&mdev->packet_seq, 0);
- atomic_set(&mdev->pp_in_use, 0);
atomic_set(&mdev->pp_in_use_by_net, 0);
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
atomic_set(&mdev->ap_in_flight, 0);
atomic_set(&mdev->md_io_in_use, 0);
- mutex_init(&mdev->data.mutex);
- mutex_init(&mdev->meta.mutex);
- sema_init(&mdev->data.work.s, 0);
- sema_init(&mdev->meta.work.s, 0);
- mutex_init(&mdev->state_mutex);
-
- spin_lock_init(&mdev->data.work.q_lock);
- spin_lock_init(&mdev->meta.work.q_lock);
+ mutex_init(&mdev->own_state_mutex);
+ mdev->state_mutex = &mdev->own_state_mutex;
spin_lock_init(&mdev->al_lock);
- spin_lock_init(&mdev->req_lock);
spin_lock_init(&mdev->peer_seq_lock);
- spin_lock_init(&mdev->epoch_lock);
INIT_LIST_HEAD(&mdev->active_ee);
INIT_LIST_HEAD(&mdev->sync_ee);
@@ -3167,8 +1901,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->read_ee);
INIT_LIST_HEAD(&mdev->net_ee);
INIT_LIST_HEAD(&mdev->resync_reads);
- INIT_LIST_HEAD(&mdev->data.work.q);
- INIT_LIST_HEAD(&mdev->meta.work.q);
INIT_LIST_HEAD(&mdev->resync_work.list);
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->go_diskless.list);
@@ -3182,6 +1914,14 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
mdev->start_resync_work.cb = w_start_resync;
+
+ mdev->resync_work.mdev = mdev;
+ mdev->unplug_work.mdev = mdev;
+ mdev->go_diskless.mdev = mdev;
+ mdev->md_sync_work.mdev = mdev;
+ mdev->bm_io_work.w.mdev = mdev;
+ mdev->start_resync_work.mdev = mdev;
+
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
init_timer(&mdev->start_resync_timer);
@@ -3197,17 +1937,10 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
- init_waitqueue_head(&mdev->net_cnt_wait);
init_waitqueue_head(&mdev->ee_wait);
init_waitqueue_head(&mdev->al_wait);
init_waitqueue_head(&mdev->seq_wait);
- drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
- drbd_thread_init(mdev, &mdev->worker, drbd_worker);
- drbd_thread_init(mdev, &mdev->asender, drbd_asender);
-
- mdev->agreed_pro_version = PRO_VERSION_MAX;
- mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE;
mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
@@ -3216,13 +1949,10 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
void drbd_mdev_cleanup(struct drbd_conf *mdev)
{
int i;
- if (mdev->receiver.t_state != None)
+ if (mdev->tconn->receiver.t_state != NONE)
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
- mdev->receiver.t_state);
+ mdev->tconn->receiver.t_state);
- /* no need to lock it, I'm the only thread alive */
- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
mdev->al_writ_cnt =
mdev->bm_writ_cnt =
mdev->read_cnt =
@@ -3239,7 +1969,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
mdev->rs_mark_left[i] = 0;
mdev->rs_mark_time[i] = 0;
}
- D_ASSERT(mdev->net_conf == NULL);
+ D_ASSERT(mdev->tconn->net_conf == NULL);
drbd_set_my_capacity(mdev, 0);
if (mdev->bitmap) {
@@ -3248,21 +1978,18 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
drbd_bm_cleanup(mdev);
}
- drbd_free_resources(mdev);
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;
+
clear_bit(AL_SUSPENDED, &mdev->flags);
- /*
- * currently we drbd_init_ee only on module load, so
- * we may do drbd_release_ee only on module unload!
- */
D_ASSERT(list_empty(&mdev->active_ee));
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->net_ee));
D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->data.work.q));
- D_ASSERT(list_empty(&mdev->meta.work.q));
+ D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
@@ -3336,7 +2063,7 @@ static int drbd_create_mempools(void)
goto Enomem;
drbd_ee_cache = kmem_cache_create(
- "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
+ "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
if (drbd_ee_cache == NULL)
goto Enomem;
@@ -3351,11 +2078,9 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
-#ifdef COMPAT_HAVE_BIOSET_CREATE
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
-#endif
drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_page_pool == NULL)
@@ -3404,73 +2129,53 @@ static struct notifier_block drbd_notifier = {
.notifier_call = drbd_notify_sys,
};
-static void drbd_release_ee_lists(struct drbd_conf *mdev)
+static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
{
int rr;
- rr = drbd_release_ee(mdev, &mdev->active_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
if (rr)
dev_err(DEV, "%d EEs in active list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->sync_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
if (rr)
dev_err(DEV, "%d EEs in sync list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->read_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
if (rr)
dev_err(DEV, "%d EEs in read list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->done_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
if (rr)
dev_err(DEV, "%d EEs in done list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->net_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
if (rr)
dev_err(DEV, "%d EEs in net list found!\n", rr);
}
-/* caution. no locking.
- * currently only used from module cleanup code. */
-static void drbd_delete_device(unsigned int minor)
+/* caution. no locking. */
+void drbd_minor_destroy(struct kref *kref)
{
- struct drbd_conf *mdev = minor_to_mdev(minor);
-
- if (!mdev)
- return;
+ struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
+ struct drbd_tconn *tconn = mdev->tconn;
del_timer_sync(&mdev->request_timer);
/* paranoia asserts */
- if (mdev->open_cnt != 0)
- dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
- __FILE__ , __LINE__);
-
- ERR_IF (!list_empty(&mdev->data.work.q)) {
- struct list_head *lp;
- list_for_each(lp, &mdev->data.work.q) {
- dev_err(DEV, "lp = %p\n", lp);
- }
- };
+ D_ASSERT(mdev->open_cnt == 0);
/* end paranoia asserts */
- del_gendisk(mdev->vdisk);
-
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
if (mdev->this_bdev)
bdput(mdev->this_bdev);
- drbd_free_resources(mdev);
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;
- drbd_release_ee_lists(mdev);
-
- /* should be freed on disconnect? */
- kfree(mdev->ee_hash);
- /*
- mdev->ee_hash_s = 0;
- mdev->ee_hash = NULL;
- */
+ drbd_release_all_peer_reqs(mdev);
lc_destroy(mdev->act_log);
lc_destroy(mdev->resync);
@@ -3478,19 +2183,101 @@ static void drbd_delete_device(unsigned int minor)
kfree(mdev->p_uuid);
/* mdev->p_uuid = NULL; */
- kfree(mdev->int_dig_out);
- kfree(mdev->int_dig_in);
- kfree(mdev->int_dig_vv);
+ if (mdev->bitmap) /* should no longer be there. */
+ drbd_bm_cleanup(mdev);
+ __free_page(mdev->md_io_page);
+ put_disk(mdev->vdisk);
+ blk_cleanup_queue(mdev->rq_queue);
+ kfree(mdev->rs_plan_s);
+ kfree(mdev);
- /* cleanup the rest that has been
- * allocated from drbd_new_device
- * and actually free the mdev itself */
- drbd_free_mdev(mdev);
+ kref_put(&tconn->kref, &conn_destroy);
}
+/* One global retry thread, if we need to push back some bio and have it
+ * reinserted through our make request function.
+ */
+static struct retry_worker {
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ spinlock_t lock;
+ struct list_head writes;
+} retry;
+
+static void do_retry(struct work_struct *ws)
+{
+ struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
+ LIST_HEAD(writes);
+ struct drbd_request *req, *tmp;
+
+ spin_lock_irq(&retry->lock);
+ list_splice_init(&retry->writes, &writes);
+ spin_unlock_irq(&retry->lock);
+
+ list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
+ struct drbd_conf *mdev = req->w.mdev;
+ struct bio *bio = req->master_bio;
+ unsigned long start_time = req->start_time;
+ bool expected;
+
+ expected =
+ expect(atomic_read(&req->completion_ref) == 0) &&
+ expect(req->rq_state & RQ_POSTPONED) &&
+ expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
+ (req->rq_state & RQ_LOCAL_ABORTED) != 0);
+
+ if (!expected)
+ dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
+ req, atomic_read(&req->completion_ref),
+ req->rq_state);
+
+ /* We still need to put one kref associated with the
+ * "completion_ref" going zero in the code path that queued it
+ * here. The request object may still be referenced by a
+ * frozen local req->private_bio, in case we force-detached.
+ */
+ kref_put(&req->kref, drbd_req_destroy);
+
+ /* A single suspended or otherwise blocking device may stall
+ * all others as well. Fortunately, this code path is to
+ * recover from a situation that "should not happen":
+ * concurrent writes in multi-primary setup.
+ * In a "normal" lifecycle, this workqueue is supposed to be
+ * destroyed without ever doing anything.
+ * If it turns out to be an issue anyways, we can do per
+ * resource (replication group) or per device (minor) retry
+ * workqueues instead.
+ */
+
+ /* We are not just doing generic_make_request(),
+ * as we want to keep the start_time information. */
+ inc_ap_bio(mdev);
+ __drbd_make_request(mdev, bio, start_time);
+ }
+}
+
+void drbd_restart_request(struct drbd_request *req)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&retry.lock, flags);
+ list_move_tail(&req->tl_requests, &retry.writes);
+ spin_unlock_irqrestore(&retry.lock, flags);
+
+ /* Drop the extra reference that would otherwise
+ * have been dropped by complete_master_bio.
+ * do_retry() needs to grab a new one. */
+ dec_ap_bio(req->w.mdev);
+
+ queue_work(retry.wq, &retry.worker);
+}
+
+
static void drbd_cleanup(void)
{
unsigned int i;
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn, *tmp;
unregister_reboot_notifier(&drbd_notifier);
@@ -3505,19 +2292,31 @@ static void drbd_cleanup(void)
if (drbd_proc)
remove_proc_entry("drbd", NULL);
- drbd_nl_cleanup();
+ if (retry.wq)
+ destroy_workqueue(retry.wq);
+
+ drbd_genl_unregister();
- if (minor_table) {
- i = minor_count;
- while (i--)
- drbd_delete_device(i);
- drbd_destroy_mempools();
+ idr_for_each_entry(&minors, mdev, i) {
+ idr_remove(&minors, mdev_to_minor(mdev));
+ idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ del_gendisk(mdev->vdisk);
+ /* synchronize_rcu(); No other threads running at this point */
+ kref_put(&mdev->kref, &drbd_minor_destroy);
}
- kfree(minor_table);
+ /* not _rcu since, no other updater anymore. Genl already unregistered */
+ list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+ list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
+ /* synchronize_rcu(); */
+ kref_put(&tconn->kref, &conn_destroy);
+ }
+ drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
+ idr_destroy(&minors);
+
printk(KERN_INFO "drbd: module cleanup done.\n");
}
@@ -3542,7 +2341,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+ if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
@@ -3566,7 +2365,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
@@ -3576,20 +2375,243 @@ out:
return r;
}
-struct drbd_conf *drbd_new_device(unsigned int minor)
+static void drbd_init_workqueue(struct drbd_work_queue* wq)
+{
+ spin_lock_init(&wq->q_lock);
+ INIT_LIST_HEAD(&wq->q);
+ init_waitqueue_head(&wq->q_wait);
+}
+
+struct drbd_tconn *conn_get_by_name(const char *name)
+{
+ struct drbd_tconn *tconn;
+
+ if (!name || !name[0])
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
+ if (!strcmp(tconn->name, name)) {
+ kref_get(&tconn->kref);
+ goto found;
+ }
+ }
+ tconn = NULL;
+found:
+ rcu_read_unlock();
+ return tconn;
+}
+
+struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+ void *peer_addr, int peer_addr_len)
+{
+ struct drbd_tconn *tconn;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
+ if (tconn->my_addr_len == my_addr_len &&
+ tconn->peer_addr_len == peer_addr_len &&
+ !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
+ !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
+ kref_get(&tconn->kref);
+ goto found;
+ }
+ }
+ tconn = NULL;
+found:
+ rcu_read_unlock();
+ return tconn;
+}
+
+static int drbd_alloc_socket(struct drbd_socket *socket)
+{
+ socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!socket->rbuf)
+ return -ENOMEM;
+ socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!socket->sbuf)
+ return -ENOMEM;
+ return 0;
+}
+
+static void drbd_free_socket(struct drbd_socket *socket)
+{
+ free_page((unsigned long) socket->sbuf);
+ free_page((unsigned long) socket->rbuf);
+}
+
+void conn_free_crypto(struct drbd_tconn *tconn)
+{
+ drbd_free_sock(tconn);
+
+ crypto_free_hash(tconn->csums_tfm);
+ crypto_free_hash(tconn->verify_tfm);
+ crypto_free_hash(tconn->cram_hmac_tfm);
+ crypto_free_hash(tconn->integrity_tfm);
+ crypto_free_hash(tconn->peer_integrity_tfm);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+
+ tconn->csums_tfm = NULL;
+ tconn->verify_tfm = NULL;
+ tconn->cram_hmac_tfm = NULL;
+ tconn->integrity_tfm = NULL;
+ tconn->peer_integrity_tfm = NULL;
+ tconn->int_dig_in = NULL;
+ tconn->int_dig_vv = NULL;
+}
+
+int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
+{
+ cpumask_var_t new_cpu_mask;
+ int err;
+
+ if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+ /*
+ retcode = ERR_NOMEM;
+ drbd_msg_put_info("unable to allocate cpumask");
+ */
+
+ /* silently ignore cpu mask on UP kernel */
+ if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
+ /* FIXME: Get rid of constant 32 here */
+ err = bitmap_parse(res_opts->cpu_mask, 32,
+ cpumask_bits(new_cpu_mask), nr_cpu_ids);
+ if (err) {
+ conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
+ /* retcode = ERR_CPU_MASK_PARSE; */
+ goto fail;
+ }
+ }
+ tconn->res_opts = *res_opts;
+ if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
+ cpumask_copy(tconn->cpu_mask, new_cpu_mask);
+ drbd_calc_cpu_mask(tconn);
+ tconn->receiver.reset_cpu_mask = 1;
+ tconn->asender.reset_cpu_mask = 1;
+ tconn->worker.reset_cpu_mask = 1;
+ }
+ err = 0;
+
+fail:
+ free_cpumask_var(new_cpu_mask);
+ return err;
+
+}
+
+/* caller must be under genl_lock() */
+struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
+{
+ struct drbd_tconn *tconn;
+
+ tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
+ if (!tconn)
+ return NULL;
+
+ tconn->name = kstrdup(name, GFP_KERNEL);
+ if (!tconn->name)
+ goto fail;
+
+ if (drbd_alloc_socket(&tconn->data))
+ goto fail;
+ if (drbd_alloc_socket(&tconn->meta))
+ goto fail;
+
+ if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
+ goto fail;
+
+ if (set_resource_options(tconn, res_opts))
+ goto fail;
+
+ tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!tconn->current_epoch)
+ goto fail;
+
+ INIT_LIST_HEAD(&tconn->transfer_log);
+
+ INIT_LIST_HEAD(&tconn->current_epoch->list);
+ tconn->epochs = 1;
+ spin_lock_init(&tconn->epoch_lock);
+ tconn->write_ordering = WO_bdev_flush;
+
+ tconn->send.seen_any_write_yet = false;
+ tconn->send.current_epoch_nr = 0;
+ tconn->send.current_epoch_writes = 0;
+
+ tconn->cstate = C_STANDALONE;
+ mutex_init(&tconn->cstate_mutex);
+ spin_lock_init(&tconn->req_lock);
+ mutex_init(&tconn->conf_update);
+ init_waitqueue_head(&tconn->ping_wait);
+ idr_init(&tconn->volumes);
+
+ drbd_init_workqueue(&tconn->sender_work);
+ mutex_init(&tconn->data.mutex);
+ mutex_init(&tconn->meta.mutex);
+
+ drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
+ drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
+ drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
+
+ kref_init(&tconn->kref);
+ list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
+
+ return tconn;
+
+fail:
+ kfree(tconn->current_epoch);
+ free_cpumask_var(tconn->cpu_mask);
+ drbd_free_socket(&tconn->meta);
+ drbd_free_socket(&tconn->data);
+ kfree(tconn->name);
+ kfree(tconn);
+
+ return NULL;
+}
+
+void conn_destroy(struct kref *kref)
+{
+ struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+
+ if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
+ conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+ kfree(tconn->current_epoch);
+
+ idr_destroy(&tconn->volumes);
+
+ free_cpumask_var(tconn->cpu_mask);
+ drbd_free_socket(&tconn->meta);
+ drbd_free_socket(&tconn->data);
+ kfree(tconn->name);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+ kfree(tconn);
+}
+
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
{
struct drbd_conf *mdev;
struct gendisk *disk;
struct request_queue *q;
+ int vnr_got = vnr;
+ int minor_got = minor;
+ enum drbd_ret_code err = ERR_NOMEM;
+
+ mdev = minor_to_mdev(minor);
+ if (mdev)
+ return ERR_MINOR_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
if (!mdev)
- return NULL;
- if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
- goto out_no_cpumask;
+ return ERR_NOMEM;
+
+ kref_get(&tconn->kref);
+ mdev->tconn = tconn;
mdev->minor = minor;
+ mdev->vnr = vnr;
drbd_init_set_defaults(mdev);
@@ -3627,7 +2649,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->req_lock;
+ q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
mdev->md_io_page = alloc_page(GFP_KERNEL);
if (!mdev->md_io_page)
@@ -3635,30 +2657,44 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
if (drbd_bm_init(mdev))
goto out_no_bitmap;
- /* no need to lock access, we are still initializing this minor device. */
- if (!tl_init(mdev))
- goto out_no_tl;
-
- mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
- if (!mdev->app_reads_hash)
- goto out_no_app_reads;
-
- mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!mdev->current_epoch)
- goto out_no_epoch;
-
- INIT_LIST_HEAD(&mdev->current_epoch->list);
- mdev->epochs = 1;
-
- return mdev;
-
-/* out_whatever_else:
- kfree(mdev->current_epoch); */
-out_no_epoch:
- kfree(mdev->app_reads_hash);
-out_no_app_reads:
- tl_cleanup(mdev);
-out_no_tl:
+ mdev->read_requests = RB_ROOT;
+ mdev->write_requests = RB_ROOT;
+
+ if (!idr_pre_get(&minors, GFP_KERNEL))
+ goto out_no_minor_idr;
+ if (idr_get_new_above(&minors, mdev, minor, &minor_got))
+ goto out_no_minor_idr;
+ if (minor_got != minor) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
+ goto out_idr_remove_minor;
+ }
+
+ if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
+ goto out_idr_remove_minor;
+ if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
+ goto out_idr_remove_minor;
+ if (vnr_got != vnr) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ goto out_idr_remove_vol;
+ }
+ add_disk(disk);
+ kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
+
+ /* inherit the connection state */
+ mdev->state.conn = tconn->cstate;
+ if (mdev->state.conn == C_WF_REPORT_PARAMS)
+ drbd_connected(mdev);
+
+ return NO_ERROR;
+
+out_idr_remove_vol:
+ idr_remove(&tconn->volumes, vnr_got);
+out_idr_remove_minor:
+ idr_remove(&minors, minor_got);
+ synchronize_rcu();
+out_no_minor_idr:
drbd_bm_cleanup(mdev);
out_no_bitmap:
__free_page(mdev->md_io_page);
@@ -3667,55 +2703,25 @@ out_no_io_page:
out_no_disk:
blk_cleanup_queue(q);
out_no_q:
- free_cpumask_var(mdev->cpu_mask);
-out_no_cpumask:
- kfree(mdev);
- return NULL;
-}
-
-/* counterpart of drbd_new_device.
- * last part of drbd_delete_device. */
-void drbd_free_mdev(struct drbd_conf *mdev)
-{
- kfree(mdev->current_epoch);
- kfree(mdev->app_reads_hash);
- tl_cleanup(mdev);
- if (mdev->bitmap) /* should no longer be there. */
- drbd_bm_cleanup(mdev);
- __free_page(mdev->md_io_page);
- put_disk(mdev->vdisk);
- blk_cleanup_queue(mdev->rq_queue);
- free_cpumask_var(mdev->cpu_mask);
- drbd_free_tl_hash(mdev);
kfree(mdev);
+ kref_put(&tconn->kref, &conn_destroy);
+ return err;
}
-
int __init drbd_init(void)
{
int err;
- if (sizeof(struct p_handshake) != 80) {
- printk(KERN_ERR
- "drbd: never change the size or layout "
- "of the HandShake packet.\n");
- return -EINVAL;
- }
-
if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
printk(KERN_ERR
- "drbd: invalid minor_count (%d)\n", minor_count);
+ "drbd: invalid minor_count (%d)\n", minor_count);
#ifdef MODULE
return -EINVAL;
#else
- minor_count = 8;
+ minor_count = DRBD_MINOR_COUNT_DEF;
#endif
}
- err = drbd_nl_init();
- if (err)
- return err;
-
err = register_blkdev(DRBD_MAJOR, "drbd");
if (err) {
printk(KERN_ERR
@@ -3724,6 +2730,13 @@ int __init drbd_init(void)
return err;
}
+ err = drbd_genl_register();
+ if (err) {
+ printk(KERN_ERR "drbd: unable to register generic netlink family\n");
+ goto fail;
+ }
+
+
register_reboot_notifier(&drbd_notifier);
/*
@@ -3734,22 +2747,29 @@ int __init drbd_init(void)
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
- minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
- GFP_KERNEL);
- if (!minor_table)
- goto Enomem;
+ idr_init(&minors);
err = drbd_create_mempools();
if (err)
- goto Enomem;
+ goto fail;
drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
if (!drbd_proc) {
printk(KERN_ERR "drbd: unable to register proc file\n");
- goto Enomem;
+ goto fail;
}
rwlock_init(&global_state_lock);
+ INIT_LIST_HEAD(&drbd_tconns);
+
+ retry.wq = create_singlethread_workqueue("drbd-reissue");
+ if (!retry.wq) {
+ printk(KERN_ERR "drbd: unable to create retry workqueue\n");
+ goto fail;
+ }
+ INIT_WORK(&retry.worker, do_retry);
+ spin_lock_init(&retry.lock);
+ INIT_LIST_HEAD(&retry.writes);
printk(KERN_INFO "drbd: initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
@@ -3757,11 +2777,10 @@ int __init drbd_init(void)
printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
printk(KERN_INFO "drbd: registered as block device major %d\n",
DRBD_MAJOR);
- printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
return 0; /* Success! */
-Enomem:
+fail:
drbd_cleanup();
if (err == -ENOMEM)
/* currently always the case */
@@ -3782,47 +2801,42 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
kfree(ldev);
}
-void drbd_free_sock(struct drbd_conf *mdev)
+void drbd_free_sock(struct drbd_tconn *tconn)
{
- if (mdev->data.socket) {
- mutex_lock(&mdev->data.mutex);
- kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
- sock_release(mdev->data.socket);
- mdev->data.socket = NULL;
- mutex_unlock(&mdev->data.mutex);
+ if (tconn->data.socket) {
+ mutex_lock(&tconn->data.mutex);
+ kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
+ sock_release(tconn->data.socket);
+ tconn->data.socket = NULL;
+ mutex_unlock(&tconn->data.mutex);
}
- if (mdev->meta.socket) {
- mutex_lock(&mdev->meta.mutex);
- kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
- sock_release(mdev->meta.socket);
- mdev->meta.socket = NULL;
- mutex_unlock(&mdev->meta.mutex);
+ if (tconn->meta.socket) {
+ mutex_lock(&tconn->meta.mutex);
+ kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
+ sock_release(tconn->meta.socket);
+ tconn->meta.socket = NULL;
+ mutex_unlock(&tconn->meta.mutex);
}
}
+/* meta data management */
-void drbd_free_resources(struct drbd_conf *mdev)
+void conn_md_sync(struct drbd_tconn *tconn)
{
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = NULL;
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = NULL;
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = NULL;
- crypto_free_hash(mdev->integrity_w_tfm);
- mdev->integrity_w_tfm = NULL;
- crypto_free_hash(mdev->integrity_r_tfm);
- mdev->integrity_r_tfm = NULL;
-
- drbd_free_sock(mdev);
+ struct drbd_conf *mdev;
+ int vnr;
- __no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_md_sync(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
}
-/* meta data management */
-
struct meta_data_on_disk {
u64 la_size; /* last agreed size. */
u64 uuid[UI_SIZE]; /* UUIDs. */
@@ -3833,7 +2847,7 @@ struct meta_data_on_disk {
u32 md_size_sect;
u32 al_offset; /* offset to this block */
u32 al_nr_extents; /* important for restoring the AL */
- /* `-- act_log->nr_elements <-- sync_conf.al_extents */
+ /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
u32 bm_offset; /* offset to the bitmap, from here */
u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
u32 la_peer_max_bio_size; /* last peer max_bio_size */
@@ -3871,7 +2885,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
- buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
+ buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
@@ -3885,7 +2899,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
sector = mdev->ldev->md.md_offset;
- if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n");
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
@@ -3906,11 +2920,12 @@ out:
* @bdev: Device from which the meta data should be read in.
*
* Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
- * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
+ * something goes wrong.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
+ u32 magic, flags;
int i, rv = NO_ERROR;
if (!get_ldev_if_state(mdev, D_ATTACHING))
@@ -3920,7 +2935,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!buffer)
goto out;
- if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
dev_err(DEV, "Error while reading metadata.\n");
@@ -3928,8 +2943,20 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
goto err;
}
- if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
- dev_err(DEV, "Error while reading metadata, magic not found.\n");
+ magic = be32_to_cpu(buffer->magic);
+ flags = be32_to_cpu(buffer->flags);
+ if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
+ (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
+ /* btw: that's Activity Log clean, not "all" clean. */
+ dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
+ rv = ERR_MD_UNCLEAN;
+ goto err;
+ }
+ if (magic != DRBD_MD_MAGIC_08) {
+ if (magic == DRBD_MD_MAGIC_07)
+ dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
+ else
+ dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
rv = ERR_MD_INVALID;
goto err;
}
@@ -3963,20 +2990,16 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
for (i = UI_CURRENT; i < UI_SIZE; i++)
bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
bdev->md.flags = be32_to_cpu(buffer->flags);
- mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) {
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
mdev->peer_max_bio_size = peer;
}
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->sync_conf.al_extents < 7)
- mdev->sync_conf.al_extents = 127;
+ spin_unlock_irq(&mdev->tconn->req_lock);
err:
drbd_md_put_buffer(mdev);
@@ -4011,7 +3034,7 @@ void drbd_md_mark_dirty(struct drbd_conf *mdev)
}
#endif
-static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
@@ -4019,7 +3042,7 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
}
-void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
if (mdev->state.role == R_PRIMARY)
@@ -4034,14 +3057,24 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
drbd_md_mark_dirty(mdev);
}
+void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+}
void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
}
- _drbd_uuid_set(mdev, idx, val);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
/**
@@ -4054,15 +3087,20 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
{
u64 val;
- unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ unsigned long long bm_uuid;
+
+ get_random_bytes(&val, sizeof(u64));
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
+ __drbd_uuid_set(mdev, UI_CURRENT, val);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
- get_random_bytes(&val, sizeof(u64));
- _drbd_uuid_set(mdev, UI_CURRENT, val);
drbd_print_uuids(mdev, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
@@ -4070,9 +3108,11 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
{
+ unsigned long flags;
if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
@@ -4084,6 +3124,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+
drbd_md_mark_dirty(mdev);
}
@@ -4135,9 +3177,10 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
return rv;
}
-static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_bitmap_io(struct drbd_work *w, int unused)
{
struct bm_io_work *work = container_of(w, struct bm_io_work, w);
+ struct drbd_conf *mdev = w->mdev;
int rv = -EIO;
D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
@@ -4149,8 +3192,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
put_ldev(mdev);
}
- clear_bit(BITMAP_IO, &mdev->flags);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(BITMAP_IO, &mdev->flags);
wake_up(&mdev->misc_wait);
if (work->done)
@@ -4160,7 +3202,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
work->why = NULL;
work->flags = 0;
- return 1;
+ return 0;
}
void drbd_ldev_destroy(struct drbd_conf *mdev)
@@ -4173,29 +3215,51 @@ void drbd_ldev_destroy(struct drbd_conf *mdev)
drbd_free_bc(mdev->ldev);
mdev->ldev = NULL;);
- if (mdev->md_io_tmpp) {
- __free_page(mdev->md_io_tmpp);
- mdev->md_io_tmpp = NULL;
- }
clear_bit(GO_DISKLESS, &mdev->flags);
}
-static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_go_diskless(struct drbd_work *w, int unused)
{
+ struct drbd_conf *mdev = w->mdev;
+
D_ASSERT(mdev->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
* again, it will be safe to free them. */
+
+ /* Try to write changed bitmap pages, read errors may have just
+ * set some bits outside the area covered by the activity log.
+ *
+ * If we have an IO error during the bitmap writeout,
+ * we will want a full sync next time, just in case.
+ * (Do we want a specific meta data flag for this?)
+ *
+ * If that does not make it to stable storage either,
+ * we cannot do anything about that anymore.
+ *
+ * We still need to check if both bitmap and ldev are present, we may
+ * end up here after a failed attach, before ldev was even assigned.
+ */
+ if (mdev->bitmap && mdev->ldev) {
+ if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
+ "detach", BM_LOCKED_MASK)) {
+ if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
+ drbd_md_set_flag(mdev, MDF_FULL_SYNC);
+ drbd_md_sync(mdev);
+ }
+ }
+ }
+
drbd_force_state(mdev, NS(disk, D_DISKLESS));
- return 1;
+ return 0;
}
void drbd_go_diskless(struct drbd_conf *mdev)
{
D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
}
/**
@@ -4215,7 +3279,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
void (*done)(struct drbd_conf *, int),
char *why, enum bm_flag flags)
{
- D_ASSERT(current == mdev->worker.task);
+ D_ASSERT(current == mdev->tconn->worker.task);
D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
@@ -4229,13 +3293,13 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
mdev->bm_io_work.why = why;
mdev->bm_io_work.flags = flags;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
set_bit(BITMAP_IO, &mdev->flags);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/**
@@ -4252,7 +3316,7 @@ int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
{
int rv;
- D_ASSERT(current != mdev->worker.task);
+ D_ASSERT(current != mdev->tconn->worker.task);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
drbd_suspend_io(mdev);
@@ -4291,18 +3355,127 @@ static void md_sync_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
- drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
+ /* must not double-queue! */
+ if (list_empty(&mdev->md_sync_work.list))
+ drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
}
-static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_md_sync(struct drbd_work *w, int unused)
{
+ struct drbd_conf *mdev = w->mdev;
+
dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
#ifdef DEBUG
dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
#endif
drbd_md_sync(mdev);
- return 1;
+ return 0;
+}
+
+const char *cmdname(enum drbd_packet cmd)
+{
+ /* THINK may need to become several global tables
+ * when we want to support more than
+ * one PRO_VERSION */
+ static const char *cmdnames[] = {
+ [P_DATA] = "Data",
+ [P_DATA_REPLY] = "DataReply",
+ [P_RS_DATA_REPLY] = "RSDataReply",
+ [P_BARRIER] = "Barrier",
+ [P_BITMAP] = "ReportBitMap",
+ [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
+ [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
+ [P_UNPLUG_REMOTE] = "UnplugRemote",
+ [P_DATA_REQUEST] = "DataRequest",
+ [P_RS_DATA_REQUEST] = "RSDataRequest",
+ [P_SYNC_PARAM] = "SyncParam",
+ [P_SYNC_PARAM89] = "SyncParam89",
+ [P_PROTOCOL] = "ReportProtocol",
+ [P_UUIDS] = "ReportUUIDs",
+ [P_SIZES] = "ReportSizes",
+ [P_STATE] = "ReportState",
+ [P_SYNC_UUID] = "ReportSyncUUID",
+ [P_AUTH_CHALLENGE] = "AuthChallenge",
+ [P_AUTH_RESPONSE] = "AuthResponse",
+ [P_PING] = "Ping",
+ [P_PING_ACK] = "PingAck",
+ [P_RECV_ACK] = "RecvAck",
+ [P_WRITE_ACK] = "WriteAck",
+ [P_RS_WRITE_ACK] = "RSWriteAck",
+ [P_SUPERSEDED] = "Superseded",
+ [P_NEG_ACK] = "NegAck",
+ [P_NEG_DREPLY] = "NegDReply",
+ [P_NEG_RS_DREPLY] = "NegRSDReply",
+ [P_BARRIER_ACK] = "BarrierAck",
+ [P_STATE_CHG_REQ] = "StateChgRequest",
+ [P_STATE_CHG_REPLY] = "StateChgReply",
+ [P_OV_REQUEST] = "OVRequest",
+ [P_OV_REPLY] = "OVReply",
+ [P_OV_RESULT] = "OVResult",
+ [P_CSUM_RS_REQUEST] = "CsumRSRequest",
+ [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
+ [P_COMPRESSED_BITMAP] = "CBitmap",
+ [P_DELAY_PROBE] = "DelayProbe",
+ [P_OUT_OF_SYNC] = "OutOfSync",
+ [P_RETRY_WRITE] = "RetryWrite",
+ [P_RS_CANCEL] = "RSCancel",
+ [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
+ [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
+ [P_RETRY_WRITE] = "retry_write",
+ [P_PROTOCOL_UPDATE] = "protocol_update",
+
+ /* enum drbd_packet, but not commands - obsoleted flags:
+ * P_MAY_IGNORE
+ * P_MAX_OPT_CMD
+ */
+ };
+
+ /* too big for the array: 0xfffX */
+ if (cmd == P_INITIAL_META)
+ return "InitialMeta";
+ if (cmd == P_INITIAL_DATA)
+ return "InitialData";
+ if (cmd == P_CONNECTION_FEATURES)
+ return "ConnectionFeatures";
+ if (cmd >= ARRAY_SIZE(cmdnames))
+ return "Unknown";
+ return cmdnames[cmd];
+}
+
+/**
+ * drbd_wait_misc - wait for a request to make progress
+ * @mdev: device associated with the request
+ * @i: the struct drbd_interval embedded in struct drbd_request or
+ * struct drbd_peer_request
+ */
+int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+ struct net_conf *nc;
+ DEFINE_WAIT(wait);
+ long timeout;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return -ETIMEDOUT;
+ }
+ timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
+ rcu_read_unlock();
+
+ /* Indicate to wake up mdev->misc_wait on progress. */
+ i->waiting = true;
+ prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ timeout = schedule_timeout(timeout);
+ finish_wait(&mdev->misc_wait, &wait);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (!timeout || mdev->state.conn < C_CONNECTED)
+ return -ETIMEDOUT;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ return 0;
}
#ifdef CONFIG_DRBD_FAULT_INJECTION
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index edb490aad8b..2af26fc9528 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -29,159 +29,317 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/slab.h>
-#include <linux/connector.h>
#include <linux/blkpg.h>
#include <linux/cpumask.h>
#include "drbd_int.h"
#include "drbd_req.h"
#include "drbd_wrappers.h"
#include <asm/unaligned.h>
-#include <linux/drbd_tag_magic.h>
#include <linux/drbd_limits.h>
-#include <linux/compiler.h>
#include <linux/kthread.h>
-static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
-static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
-static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
-
-/* see get_sb_bdev and bd_claim */
+#include <net/genetlink.h>
+
+/* .doit */
+// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
+// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
+/* .dumpit */
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
+
+#include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
+#include <linux/genl_magic_func.h>
+
+/* used blkdev_get_by_path, to claim our meta data device(s) */
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
-/* Generate the tag_list to struct functions */
-#define NL_PACKET(name, number, fields) \
-static int name ## _from_tags(struct drbd_conf *mdev, \
- unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
-static int name ## _from_tags(struct drbd_conf *mdev, \
- unsigned short *tags, struct name *arg) \
-{ \
- int tag; \
- int dlen; \
- \
- while ((tag = get_unaligned(tags++)) != TT_END) { \
- dlen = get_unaligned(tags++); \
- switch (tag_number(tag)) { \
- fields \
- default: \
- if (tag & T_MANDATORY) { \
- dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
- return 0; \
- } \
- } \
- tags = (unsigned short *)((char *)tags + dlen); \
- } \
- return 1; \
-}
-#define NL_INTEGER(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
- arg->member = get_unaligned((int *)(tags)); \
- break;
-#define NL_INT64(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
- arg->member = get_unaligned((u64 *)(tags)); \
+/* Configuration is strictly serialized, because generic netlink message
+ * processing is strictly serialized by the genl_lock().
+ * Which means we can use one static global drbd_config_context struct.
+ */
+static struct drbd_config_context {
+ /* assigned from drbd_genlmsghdr */
+ unsigned int minor;
+ /* assigned from request attributes, if present */
+ unsigned int volume;
+#define VOLUME_UNSPECIFIED (-1U)
+ /* pointer into the request skb,
+ * limited lifetime! */
+ char *resource_name;
+ struct nlattr *my_addr;
+ struct nlattr *peer_addr;
+
+ /* reply buffer */
+ struct sk_buff *reply_skb;
+ /* pointer into reply buffer */
+ struct drbd_genlmsghdr *reply_dh;
+ /* resolved from attributes, if possible */
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
+} adm_ctx;
+
+static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
+ if (genlmsg_reply(skb, info))
+ printk(KERN_ERR "drbd: error sending genl reply\n");
+}
+
+/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
+ * reason it could fail was no space in skb, and there are 4k available. */
+int drbd_msg_put_info(const char *info)
+{
+ struct sk_buff *skb = adm_ctx.reply_skb;
+ struct nlattr *nla;
+ int err = -EMSGSIZE;
+
+ if (!info || !info[0])
+ return 0;
+
+ nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ if (!nla)
+ return err;
+
+ err = nla_put_string(skb, T_info_text, info);
+ if (err) {
+ nla_nest_cancel(skb, nla);
+ return err;
+ } else
+ nla_nest_end(skb, nla);
+ return 0;
+}
+
+/* This would be a good candidate for a "pre_doit" hook,
+ * and per-family private info->pointers.
+ * But we need to stay compatible with older kernels.
+ * If it returns successfully, adm_ctx members are valid.
+ */
+#define DRBD_ADM_NEED_MINOR 1
+#define DRBD_ADM_NEED_RESOURCE 2
+#define DRBD_ADM_NEED_CONNECTION 4
+static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
+ unsigned flags)
+{
+ struct drbd_genlmsghdr *d_in = info->userhdr;
+ const u8 cmd = info->genlhdr->cmd;
+ int err;
+
+ memset(&adm_ctx, 0, sizeof(adm_ctx));
+
+ /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
+ if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!adm_ctx.reply_skb) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
+ info, &drbd_genl_family, 0, cmd);
+ /* put of a few bytes into a fresh skb of >= 4k will always succeed.
+ * but anyways */
+ if (!adm_ctx.reply_dh) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ adm_ctx.reply_dh->minor = d_in->minor;
+ adm_ctx.reply_dh->ret_code = NO_ERROR;
+
+ adm_ctx.volume = VOLUME_UNSPECIFIED;
+ if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
+ struct nlattr *nla;
+ /* parse and validate only */
+ err = drbd_cfg_context_from_attrs(NULL, info);
+ if (err)
+ goto fail;
+
+ /* It was present, and valid,
+ * copy it over to the reply skb. */
+ err = nla_put_nohdr(adm_ctx.reply_skb,
+ info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
+ info->attrs[DRBD_NLA_CFG_CONTEXT]);
+ if (err)
+ goto fail;
+
+ /* and assign stuff to the global adm_ctx */
+ nla = nested_attr_tb[__nla_type(T_ctx_volume)];
+ if (nla)
+ adm_ctx.volume = nla_get_u32(nla);
+ nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
+ if (nla)
+ adm_ctx.resource_name = nla_data(nla);
+ adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
+ adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
+ if ((adm_ctx.my_addr &&
+ nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+ (adm_ctx.peer_addr &&
+ nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ adm_ctx.minor = d_in->minor;
+ adm_ctx.mdev = minor_to_mdev(d_in->minor);
+ adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
+
+ if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
+ drbd_msg_put_info("unknown minor");
+ return ERR_MINOR_INVALID;
+ }
+ if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("unknown resource");
+ return ERR_INVALID_REQUEST;
+ }
+
+ if (flags & DRBD_ADM_NEED_CONNECTION) {
+ if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("no resource name expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.mdev) {
+ drbd_msg_put_info("no minor number expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.my_addr && adm_ctx.peer_addr)
+ adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+ nla_len(adm_ctx.my_addr),
+ nla_data(adm_ctx.peer_addr),
+ nla_len(adm_ctx.peer_addr));
+ if (!adm_ctx.tconn) {
+ drbd_msg_put_info("unknown connection");
+ return ERR_INVALID_REQUEST;
+ }
+ }
+
+ /* some more paranoia, if the request was over-determined */
+ if (adm_ctx.mdev && adm_ctx.tconn &&
+ adm_ctx.mdev->tconn != adm_ctx.tconn) {
+ pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
+ adm_ctx.minor, adm_ctx.resource_name,
+ adm_ctx.mdev->tconn->name);
+ drbd_msg_put_info("minor exists in different resource");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.mdev &&
+ adm_ctx.volume != VOLUME_UNSPECIFIED &&
+ adm_ctx.volume != adm_ctx.mdev->vnr) {
+ pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
+ adm_ctx.minor, adm_ctx.volume,
+ adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
+ drbd_msg_put_info("minor exists as different volume");
+ return ERR_INVALID_REQUEST;
+ }
+
+ return NO_ERROR;
+
+fail:
+ nlmsg_free(adm_ctx.reply_skb);
+ adm_ctx.reply_skb = NULL;
+ return err;
+}
+
+static int drbd_adm_finish(struct genl_info *info, int retcode)
+{
+ if (adm_ctx.tconn) {
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+ adm_ctx.tconn = NULL;
+ }
+
+ if (!adm_ctx.reply_skb)
+ return -ENOMEM;
+
+ adm_ctx.reply_dh->ret_code = retcode;
+ drbd_adm_send_reply(adm_ctx.reply_skb, info);
+ return 0;
+}
+
+static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+{
+ char *afs;
+
+ /* FIXME: A future version will not allow this case. */
+ if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+ return;
+
+ switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+ case AF_INET6:
+ afs = "ipv6";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
+ &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
break;
-#define NL_BIT(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
- arg->member = *(char *)(tags) ? 1 : 0; \
+ case AF_INET:
+ afs = "ipv4";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
break;
-#define NL_STRING(pn, pr, member, len) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
- if (dlen > len) { \
- dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
- #member, dlen, (unsigned int)len); \
- return 0; \
- } \
- arg->member ## _len = dlen; \
- memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
- break;
-#include <linux/drbd_nl.h>
-
-/* Generate the struct to tag_list functions */
-#define NL_PACKET(name, number, fields) \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
- struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
- struct name *arg, unsigned short *tags) \
-{ \
- fields \
- return tags; \
-}
-
-#define NL_INTEGER(pn, pr, member) \
- put_unaligned(pn | pr | TT_INTEGER, tags++); \
- put_unaligned(sizeof(int), tags++); \
- put_unaligned(arg->member, (int *)tags); \
- tags = (unsigned short *)((char *)tags+sizeof(int));
-#define NL_INT64(pn, pr, member) \
- put_unaligned(pn | pr | TT_INT64, tags++); \
- put_unaligned(sizeof(u64), tags++); \
- put_unaligned(arg->member, (u64 *)tags); \
- tags = (unsigned short *)((char *)tags+sizeof(u64));
-#define NL_BIT(pn, pr, member) \
- put_unaligned(pn | pr | TT_BIT, tags++); \
- put_unaligned(sizeof(char), tags++); \
- *(char *)tags = arg->member; \
- tags = (unsigned short *)((char *)tags+sizeof(char));
-#define NL_STRING(pn, pr, member, len) \
- put_unaligned(pn | pr | TT_STRING, tags++); \
- put_unaligned(arg->member ## _len, tags++); \
- memcpy(tags, arg->member, arg->member ## _len); \
- tags = (unsigned short *)((char *)tags + arg->member ## _len);
-#include <linux/drbd_nl.h>
-
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
-void drbd_nl_send_reply(struct cn_msg *, int);
+ default:
+ afs = "ssocks";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ }
+ snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
+}
int drbd_khelper(struct drbd_conf *mdev, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
- NULL, /* Will be set to address family */
- NULL, /* Will be set to address */
+ (char[20]) { }, /* address family */
+ (char[60]) { }, /* address */
NULL };
-
- char mb[12], af[20], ad[60], *afs;
+ char mb[12];
char *argv[] = {usermode_helper, cmd, mb, NULL };
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct sib_info sib;
int ret;
- if (current == mdev->worker.task)
- set_bit(CALLBACK_PENDING, &mdev->flags);
+ if (current == tconn->worker.task)
+ set_bit(CALLBACK_PENDING, &tconn->flags);
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
-
- if (get_net_conf(mdev)) {
- switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
- case AF_INET6:
- afs = "ipv6";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
- &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
- break;
- case AF_INET:
- afs = "ipv4";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
- break;
- default:
- afs = "ssocks";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
- }
- snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
- envp[3]=af;
- envp[4]=ad;
- put_net_conf(mdev);
- }
+ setup_khelper_env(tconn, envp);
/* The helper may take some time.
* write out any unsynced meta data changes now */
drbd_md_sync(mdev);
dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
-
- drbd_bcast_ev_helper(mdev, cmd);
+ sib.sib_reason = SIB_HELPER_PRE;
+ sib.helper_name = cmd;
+ drbd_bcast_event(mdev, &sib);
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
@@ -191,9 +349,46 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
+ sib.sib_reason = SIB_HELPER_POST;
+ sib.helper_exit_code = ret;
+ drbd_bcast_event(mdev, &sib);
+
+ if (current == tconn->worker.task)
+ clear_bit(CALLBACK_PENDING, &tconn->flags);
+
+ if (ret < 0) /* Ignore any ERRNOs we got. */
+ ret = 0;
+
+ return ret;
+}
+
+int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+{
+ char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ (char[20]) { }, /* address family */
+ (char[60]) { }, /* address */
+ NULL };
+ char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+ int ret;
+
+ setup_khelper_env(tconn, envp);
+ conn_md_sync(tconn);
- if (current == mdev->worker.task)
- clear_bit(CALLBACK_PENDING, &mdev->flags);
+ conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+ /* TODO: conn_bcast_event() ?? */
+
+ ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ if (ret)
+ conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, tconn->name,
+ (ret >> 8) & 0xff, ret);
+ else
+ conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, tconn->name,
+ (ret >> 8) & 0xff, ret);
+ /* TODO: conn_bcast_event() ?? */
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
@@ -201,116 +396,129 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
return ret;
}
-enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
{
+ enum drbd_fencing_p fp = FP_NOT_AVAIL;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (get_ldev_if_state(mdev, D_CONSISTENT)) {
+ fp = max_t(enum drbd_fencing_p, fp,
+ rcu_dereference(mdev->ldev->disk_conf)->fencing);
+ put_ldev(mdev);
+ }
+ }
+ rcu_read_unlock();
+
+ return fp;
+}
+
+bool conn_try_outdate_peer(struct drbd_tconn *tconn)
+{
+ union drbd_state mask = { };
+ union drbd_state val = { };
+ enum drbd_fencing_p fp;
char *ex_to_string;
int r;
- enum drbd_disk_state nps;
- enum drbd_fencing_p fp;
- D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+ if (tconn->cstate >= C_WF_REPORT_PARAMS) {
+ conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+ return false;
+ }
- if (get_ldev_if_state(mdev, D_CONSISTENT)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- } else {
- dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
- nps = mdev->state.pdsk;
+ fp = highest_fencing_policy(tconn);
+ switch (fp) {
+ case FP_NOT_AVAIL:
+ conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
goto out;
+ case FP_DONT_CARE:
+ return true;
+ default: ;
}
- r = drbd_khelper(mdev, "fence-peer");
+ r = conn_khelper(tconn, "fence-peer");
switch ((r>>8) & 0xff) {
case 3: /* peer is inconsistent */
ex_to_string = "peer is inconsistent or worse";
- nps = D_INCONSISTENT;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_INCONSISTENT;
break;
case 4: /* peer got outdated, or was already outdated */
ex_to_string = "peer was fenced";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
break;
case 5: /* peer was down */
- if (mdev->state.disk == D_UP_TO_DATE) {
+ if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
} else {
ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
- nps = mdev->state.pdsk;
}
break;
case 6: /* Peer is primary, voluntarily outdate myself.
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
- dev_warn(DEV, "Peer is primary, outdating myself.\n");
- nps = D_UNKNOWN;
- _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
+ conn_warn(tconn, "Peer is primary, outdating myself.\n");
+ mask.disk = D_MASK;
+ val.disk = D_OUTDATED;
break;
case 7:
if (fp != FP_STONITH)
- dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
+ conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
break;
default:
/* The script is broken ... */
- nps = D_UNKNOWN;
- dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
- return nps;
+ conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+ return false; /* Eventually leave IO frozen */
}
- dev_info(DEV, "fence-peer helper returned %d (%s)\n",
- (r>>8) & 0xff, ex_to_string);
+ conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+ (r>>8) & 0xff, ex_to_string);
-out:
- if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
- /* The handler was not successful... unfreeze here, the
- state engine can not unfreeze... */
- _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
- }
+ out:
- return nps;
+ /* Not using
+ conn_request_state(tconn, mask, val, CS_VERBOSE);
+ here, because we might were able to re-establish the connection in the
+ meantime. */
+ spin_lock_irq(&tconn->req_lock);
+ if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
+ _conn_request_state(tconn, mask, val, CS_VERBOSE);
+ spin_unlock_irq(&tconn->req_lock);
+
+ return conn_highest_pdsk(tconn) <= D_OUTDATED;
}
static int _try_outdate_peer_async(void *data)
{
- struct drbd_conf *mdev = (struct drbd_conf *)data;
- enum drbd_disk_state nps;
- union drbd_state ns;
+ struct drbd_tconn *tconn = (struct drbd_tconn *)data;
- nps = drbd_try_outdate_peer(mdev);
-
- /* Not using
- drbd_request_state(mdev, NS(pdsk, nps));
- here, because we might were able to re-establish the connection
- in the meantime. This can only partially be solved in the state's
- engine is_valid_state() and is_valid_state_transition()
- functions.
-
- nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
- pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
- therefore we have to have the pre state change check here.
- */
- spin_lock_irq(&mdev->req_lock);
- ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
- ns.pdsk = nps;
- _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- }
- spin_unlock_irq(&mdev->req_lock);
+ conn_try_outdate_peer(tconn);
+ kref_put(&tconn->kref, &conn_destroy);
return 0;
}
-void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
+void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
{
struct task_struct *opa;
- opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
- if (IS_ERR(opa))
- dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
+ kref_get(&tconn->kref);
+ opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+ if (IS_ERR(opa)) {
+ conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
+ kref_put(&tconn->kref, &conn_destroy);
+ }
}
enum drbd_state_rv
@@ -318,15 +526,15 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
{
const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
+ struct net_conf *nc;
int try = 0;
int forced = 0;
union drbd_state mask, val;
- enum drbd_disk_state nps;
if (new_role == R_PRIMARY)
- request_ping(mdev); /* Detect a dead peer ASAP */
+ request_ping(mdev->tconn); /* Detect a dead peer ASAP */
- mutex_lock(&mdev->state_mutex);
+ mutex_lock(mdev->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
@@ -354,38 +562,34 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (rv == SS_NO_UP_TO_DATE_DISK &&
mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
- nps = drbd_try_outdate_peer(mdev);
- if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
+ if (conn_try_outdate_peer(mdev->tconn)) {
val.disk = D_UP_TO_DATE;
mask.disk = D_MASK;
}
-
- val.pdsk = nps;
- mask.pdsk = D_MASK;
-
continue;
}
if (rv == SS_NOTHING_TO_DO)
- goto fail;
+ goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
- nps = drbd_try_outdate_peer(mdev);
-
- if (force && nps > D_OUTDATED) {
+ if (!conn_try_outdate_peer(mdev->tconn) && force) {
dev_warn(DEV, "Forced into split brain situation!\n");
- nps = D_OUTDATED;
- }
-
- mask.pdsk = D_MASK;
- val.pdsk = nps;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
+ }
continue;
}
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
- schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
+ int timeo;
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
if (try < max_tries)
try = max_tries - 1;
continue;
@@ -394,13 +598,13 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
rv = _drbd_request_state(mdev, mask, val,
CS_VERBOSE + CS_WAIT_COMPLETE);
if (rv < SS_SUCCESS)
- goto fail;
+ goto out;
}
break;
}
if (rv < SS_SUCCESS)
- goto fail;
+ goto out;
if (forced)
dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
@@ -408,6 +612,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
/* Wait until nothing is on the fly :) */
wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+ /* FIXME also wait for all pending P_BARRIER_ACK? */
+
if (new_role == R_SECONDARY) {
set_disk_ro(mdev->vdisk, true);
if (get_ldev(mdev)) {
@@ -415,10 +621,12 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
put_ldev(mdev);
}
} else {
- if (get_net_conf(mdev)) {
- mdev->net_conf->want_lose = 0;
- put_net_conf(mdev);
- }
+ mutex_lock(&mdev->tconn->conf_update);
+ nc = mdev->tconn->net_conf;
+ if (nc)
+ nc->discard_my_data = 0; /* without copy; single bit op is atomic */
+ mutex_unlock(&mdev->tconn->conf_update);
+
set_disk_ro(mdev->vdisk, false);
if (get_ldev(mdev)) {
if (((mdev->state.conn < C_CONNECTED ||
@@ -444,67 +652,47 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
drbd_md_sync(mdev);
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- fail:
- mutex_unlock(&mdev->state_mutex);
+out:
+ mutex_unlock(mdev->state_mutex);
return rv;
}
-static struct drbd_conf *ensure_mdev(int minor, int create)
+static const char *from_attrs_err_to_txt(int err)
{
- struct drbd_conf *mdev;
-
- if (minor >= minor_count)
- return NULL;
-
- mdev = minor_to_mdev(minor);
-
- if (!mdev && create) {
- struct gendisk *disk = NULL;
- mdev = drbd_new_device(minor);
-
- spin_lock_irq(&drbd_pp_lock);
- if (minor_table[minor] == NULL) {
- minor_table[minor] = mdev;
- disk = mdev->vdisk;
- mdev = NULL;
- } /* else: we lost the race */
- spin_unlock_irq(&drbd_pp_lock);
-
- if (disk) /* we won the race above */
- /* in case we ever add a drbd_delete_device(),
- * don't forget the del_gendisk! */
- add_disk(disk);
- else /* we lost the race above */
- drbd_free_mdev(mdev);
-
- mdev = minor_to_mdev(minor);
- }
-
- return mdev;
+ return err == -ENOMSG ? "required attribute missing" :
+ err == -EOPNOTSUPP ? "unknown mandatory attribute" :
+ err == -EEXIST ? "can not change invariant setting" :
+ "invalid attribute value";
}
-static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
{
- struct primary primary_args;
-
- memset(&primary_args, 0, sizeof(struct primary));
- if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
- }
-
- reply->ret_code =
- drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
+ struct set_role_parms parms;
+ int err;
+ enum drbd_ret_code retcode;
- return 0;
-}
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
-static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
-{
- reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
+ memset(&parms, 0, sizeof(parms));
+ if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
+ err = set_role_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
+ }
+ if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
+ retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
+ else
+ retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -514,7 +702,12 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+
+ switch (meta_dev_idx) {
default:
/* v07 style fixed size indexed meta data */
bdev->md.md_size_sect = MD_RESERVED_SECT;
@@ -533,7 +726,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
case DRBD_MD_INDEX_FLEX_INT:
bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
/* al size is still fixed */
- bdev->md.al_offset = -MD_AL_MAX_SIZE;
+ bdev->md.al_offset = -MD_AL_SECTORS;
/* we need (slightly less than) ~ this much bitmap sectors: */
md_size_sect = drbd_get_capacity(bdev->backing_bdev);
md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -549,6 +742,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
break;
}
+ rcu_read_unlock();
}
/* input size is expected to be in KB */
@@ -581,10 +775,16 @@ char *ppsize(char *buf, unsigned long long size)
* R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
* peer may not initiate a resize.
*/
+/* Note these are not to be confused with
+ * drbd_adm_suspend_io/drbd_adm_resume_io,
+ * which are (sub) state changes triggered by admin (drbdsetup),
+ * and can be long lived.
+ * This changes an mdev->flag, is triggered by drbd internals,
+ * and should be short-lived. */
void drbd_suspend_io(struct drbd_conf *mdev)
{
set_bit(SUSPEND_IO, &mdev->flags);
- if (is_susp(mdev->state))
+ if (drbd_suspended(mdev))
return;
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
}
@@ -605,7 +805,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
- sector_t la_size;
+ sector_t la_size, u_size;
sector_t size;
char ppb[10];
@@ -633,7 +833,10 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+ size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
@@ -696,12 +899,12 @@ out:
}
sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ sector_t u_size, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
- sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
sector_t size = 0;
m_size = drbd_get_max_capacity(bdev);
@@ -750,24 +953,21 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int ass
* failed, and 0 on success. You should call drbd_md_sync() after you called
* this function.
*/
-static int drbd_check_al_size(struct drbd_conf *mdev)
+static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
{
struct lru_cache *n, *t;
struct lc_element *e;
unsigned int in_use;
int i;
- ERR_IF(mdev->sync_conf.al_extents < 7)
- mdev->sync_conf.al_extents = 127;
-
if (mdev->act_log &&
- mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
+ mdev->act_log->nr_elements == dc->al_extents)
return 0;
in_use = 0;
t = mdev->act_log;
- n = lc_create("act_log", drbd_al_ext_cache,
- mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
+ n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
+ dc->al_extents, sizeof(struct lc_element), 0);
if (n == NULL) {
dev_err(DEV, "Cannot allocate act_log lru!\n");
@@ -808,7 +1008,9 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
- max_segments = mdev->ldev->dc.max_bio_bvecs;
+ rcu_read_lock();
+ max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+ rcu_read_unlock();
put_ldev(mdev);
}
@@ -852,12 +1054,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
- if (mdev->agreed_pro_version < 94) {
- peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ if (mdev->tconn->agreed_pro_version < 94)
+ peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
- } else if (mdev->agreed_pro_version == 94)
+ else if (mdev->tconn->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
- else /* drbd 8.3.8 onwards */
+ else if (mdev->tconn->agreed_pro_version < 100)
+ peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
+ else
peer = DRBD_MAX_BIO_SIZE;
}
@@ -872,36 +1076,27 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
drbd_setup_queue_param(mdev, new);
}
-/* serialize deconfig (worker exiting, doing cleanup)
- * and reconfig (drbdsetup disk, drbdsetup net)
- *
- * Wait for a potentially exiting worker, then restart it,
- * or start a new one. Flush any pending work, there may still be an
- * after_state_change queued.
- */
-static void drbd_reconfig_start(struct drbd_conf *mdev)
+/* Starts the worker thread */
+static void conn_reconfig_start(struct drbd_tconn *tconn)
{
- wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
- wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
- drbd_thread_start(&mdev->worker);
- drbd_flush_workqueue(mdev);
+ drbd_thread_start(&tconn->worker);
+ conn_flush_workqueue(tconn);
}
-/* if still unconfigured, stops worker again.
- * if configured now, clears CONFIG_PENDING.
- * wakes potential waiters */
-static void drbd_reconfig_done(struct drbd_conf *mdev)
+/* if still unconfigured, stops worker again. */
+static void conn_reconfig_done(struct drbd_tconn *tconn)
{
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.disk == D_DISKLESS &&
- mdev->state.conn == C_STANDALONE &&
- mdev->state.role == R_SECONDARY) {
- set_bit(DEVICE_DYING, &mdev->flags);
- drbd_thread_stop_nowait(&mdev->worker);
- } else
- clear_bit(CONFIG_PENDING, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
- wake_up(&mdev->state_wait);
+ bool stop_threads;
+ spin_lock_irq(&tconn->req_lock);
+ stop_threads = conn_all_vols_unconf(tconn) &&
+ tconn->cstate == C_STANDALONE;
+ spin_unlock_irq(&tconn->req_lock);
+ if (stop_threads) {
+ /* asender is implicitly stopped by receiver
+ * in conn_disconnect() */
+ drbd_thread_stop(&tconn->receiver);
+ drbd_thread_stop(&tconn->worker);
+ }
}
/* Make sure IO is suspended before calling this function(). */
@@ -909,42 +1104,187 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
{
int s = 0;
- if (lc_try_lock(mdev->act_log)) {
- drbd_al_shrink(mdev);
- lc_unlock(mdev->act_log);
- } else {
+ if (!lc_try_lock(mdev->act_log)) {
dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
return;
}
- spin_lock_irq(&mdev->req_lock);
+ drbd_al_shrink(mdev);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
-
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ lc_unlock(mdev->act_log);
if (s)
dev_info(DEV, "Suspended AL updates\n");
}
-/* does always return 0;
- * interesting return code is in reply->ret_code */
-static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+
+static bool should_set_defaults(struct genl_info *info)
+{
+ unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
+ return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
+}
+
+static void enforce_disk_conf_limits(struct disk_conf *dc)
+{
+ if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
+ dc->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
+ dc->al_extents = DRBD_AL_EXTENTS_MAX;
+
+ if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+ dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+}
+
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
+ struct drbd_conf *mdev;
+ struct disk_conf *new_disk_conf, *old_disk_conf;
+ struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
+ int err, fifo_size;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
+
+ /* we also need a disk
+ * to change the options on */
+ if (!get_ldev(mdev)) {
+ retcode = ERR_NO_DISK;
+ goto out;
+ }
+
+ new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ if (should_set_defaults(info))
+ set_disk_conf_defaults(new_disk_conf);
+
+ err = disk_conf_from_attrs_for_change(new_disk_conf, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ }
+
+ if (!expect(new_disk_conf->resync_rate >= 1))
+ new_disk_conf->resync_rate = 1;
+
+ enforce_disk_conf_limits(new_disk_conf);
+
+ fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+ if (fifo_size != mdev->rs_plan_s->size) {
+ new_plan = fifo_alloc(fifo_size);
+ if (!new_plan) {
+ dev_err(DEV, "kmalloc of fifo_buffer failed");
+ retcode = ERR_NOMEM;
+ goto fail_unlock;
+ }
+ }
+
+ drbd_suspend_io(mdev);
+ wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ drbd_al_shrink(mdev);
+ err = drbd_check_al_size(mdev, new_disk_conf);
+ lc_unlock(mdev->act_log);
+ wake_up(&mdev->al_wait);
+ drbd_resume_io(mdev);
+
+ if (err) {
+ retcode = ERR_NOMEM;
+ goto fail_unlock;
+ }
+
+ write_lock_irq(&global_state_lock);
+ retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ if (retcode == NO_ERROR) {
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ drbd_resync_after_changed(mdev);
+ }
+ write_unlock_irq(&global_state_lock);
+
+ if (retcode != NO_ERROR)
+ goto fail_unlock;
+
+ if (new_plan) {
+ old_plan = mdev->rs_plan_s;
+ rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ }
+
+ mutex_unlock(&mdev->tconn->conf_update);
+
+ if (new_disk_conf->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ if (new_disk_conf->md_flushes)
+ clear_bit(MD_NO_FUA, &mdev->flags);
+ else
+ set_bit(MD_NO_FUA, &mdev->flags);
+
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+
+ drbd_md_sync(mdev);
+
+ if (mdev->state.conn >= C_CONNECTED)
+ drbd_send_sync_param(mdev);
+
+ synchronize_rcu();
+ kfree(old_disk_conf);
+ kfree(old_plan);
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+ goto success;
+
+fail_unlock:
+ mutex_unlock(&mdev->tconn->conf_update);
+ fail:
+ kfree(new_disk_conf);
+ kfree(new_plan);
+success:
+ put_ldev(mdev);
+ out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
+{
+ struct drbd_conf *mdev;
+ int err;
+ enum drbd_ret_code retcode;
enum determine_dev_size dd;
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
+ struct disk_conf *new_disk_conf = NULL;
struct block_device *bdev;
struct lru_cache *resync_lru = NULL;
+ struct fifo_buffer *new_plan = NULL;
union drbd_state ns, os;
enum drbd_state_rv rv;
- int cp_discovered = 0;
- int logical_block_size;
+ struct net_conf *nc;
- drbd_reconfig_start(mdev);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto finish;
+
+ mdev = adm_ctx.mdev;
+ conn_reconfig_start(mdev->tconn);
/* if you want to reconfigure, please tear down first */
if (mdev->state.disk > D_DISKLESS) {
@@ -959,47 +1299,65 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* make sure there is no leftover from previous force-detach attempts */
clear_bit(FORCE_DETACH, &mdev->flags);
+ clear_bit(WAS_IO_ERROR, &mdev->flags);
+ clear_bit(WAS_READ_ERROR, &mdev->flags);
/* and no leftover from previously aborted resync or verify, either */
mdev->rs_total = 0;
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
- /* allocation not in the IO path, cqueue thread context */
+ /* allocation not in the IO path, drbdsetup context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) {
retcode = ERR_NOMEM;
goto fail;
}
+ spin_lock_init(&nbc->md.uuid_lock);
- nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
- nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
- nbc->dc.fencing = DRBD_FENCING_DEF;
- nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+ nbc->disk_conf = new_disk_conf;
- if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
+ set_disk_conf_defaults(new_disk_conf);
+ err = disk_conf_from_attrs(new_disk_conf, info);
+ if (err) {
retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+ enforce_disk_conf_limits(new_disk_conf);
+
+ new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
+ if (!new_plan) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+
+ if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
- if (get_net_conf(mdev)) {
- int prot = mdev->net_conf->wire_protocol;
- put_net_conf(mdev);
- if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc) {
+ if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
+ rcu_read_unlock();
retcode = ERR_STONITH_AND_PROT_A;
goto fail;
}
}
+ rcu_read_unlock();
- bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+ bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
+ dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_DISK;
goto fail;
@@ -1014,12 +1372,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
- bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+ bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
- (nbc->dc.meta_dev_idx < 0) ?
+ (new_disk_conf->meta_dev_idx < 0) ?
(void *)mdev : (void *)drbd_m_holder);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
+ dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_MD_DISK;
goto fail;
@@ -1027,14 +1385,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->md_bdev = bdev;
if ((nbc->backing_bdev == nbc->md_bdev) !=
- (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
- nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+ (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+ new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
resync_lru = lc_create("resync", drbd_bm_ext_cache,
- 61, sizeof(struct bm_extent),
+ 1, 61, sizeof(struct bm_extent),
offsetof(struct bm_extent, lce));
if (!resync_lru) {
retcode = ERR_NOMEM;
@@ -1044,21 +1402,21 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
drbd_md_set_sector_offsets(mdev, nbc);
- if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
+ if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
- (unsigned long long) nbc->dc.disk_size);
+ (unsigned long long) new_disk_conf->disk_size);
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
- if (nbc->dc.meta_dev_idx < 0) {
+ if (new_disk_conf->meta_dev_idx < 0) {
max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
/* at least one MB, otherwise it does not make sense */
min_md_device_sectors = (2<<10);
} else {
max_possible_sectors = DRBD_MAX_SECTORS;
- min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
+ min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
@@ -1083,14 +1441,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_warn(DEV, "==> truncating very big lower level device "
"to currently maximum possible %llu sectors <==\n",
(unsigned long long) max_possible_sectors);
- if (nbc->dc.meta_dev_idx >= 0)
+ if (new_disk_conf->meta_dev_idx >= 0)
dev_warn(DEV, "==>> using internal or flexible "
"meta data may help <<==\n");
}
drbd_suspend_io(mdev);
/* also wait for the last barrier ack. */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
+ /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
+ * We need a way to either ignore barrier acks for barriers sent before a device
+ * was attached, or a way to wait for all pending barrier acks to come in.
+ * As barriers are counted per resource,
+ * we'd need to suspend io on all devices of a resource.
+ */
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
/* and for any other previously queued work */
drbd_flush_workqueue(mdev);
@@ -1105,25 +1469,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_md_set_sector_offsets(mdev, nbc);
- /* allocate a second IO page if logical_block_size != 512 */
- logical_block_size = bdev_logical_block_size(nbc->md_bdev);
- if (logical_block_size == 0)
- logical_block_size = MD_SECTOR_SIZE;
-
- if (logical_block_size != MD_SECTOR_SIZE) {
- if (!mdev->md_io_tmpp) {
- struct page *page = alloc_page(GFP_NOIO);
- if (!page)
- goto force_diskless_dec;
-
- dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
- logical_block_size, MD_SECTOR_SIZE);
- dev_warn(DEV, "Workaround engaged (has performance impact).\n");
-
- mdev->md_io_tmpp = page;
- }
- }
-
if (!mdev->bitmap) {
if (drbd_bm_init(mdev)) {
retcode = ERR_NOMEM;
@@ -1145,30 +1490,25 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
/* Since we are diskless, fix the activity log first... */
- if (drbd_check_al_size(mdev)) {
+ if (drbd_check_al_size(mdev, new_disk_conf)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
/* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
- drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
+ drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
- if (!drbd_al_read_log(mdev, nbc)) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
-
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
- if (nbc->dc.no_md_flush)
- set_bit(MD_NO_FUA, &mdev->flags);
- else
+ if (new_disk_conf->md_flushes)
clear_bit(MD_NO_FUA, &mdev->flags);
+ else
+ set_bit(MD_NO_FUA, &mdev->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
@@ -1177,11 +1517,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
D_ASSERT(mdev->ldev == NULL);
mdev->ldev = nbc;
mdev->resync = resync_lru;
+ mdev->rs_plan_s = new_plan;
nbc = NULL;
resync_lru = NULL;
+ new_disk_conf = NULL;
+ new_plan = NULL;
- mdev->write_ordering = WO_bdev_flush;
- drbd_bump_write_ordering(mdev, WO_bdev_flush);
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1189,10 +1531,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
clear_bit(CRASHED_PRIMARY, &mdev->flags);
if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
+ !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
set_bit(CRASHED_PRIMARY, &mdev->flags);
- cp_discovered = 1;
- }
mdev->send_cnt = 0;
mdev->recv_cnt = 0;
@@ -1228,7 +1568,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
} else if (dd == grew)
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
+ if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
+ (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
+ drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
dev_info(DEV, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
@@ -1238,16 +1580,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
} else {
if (drbd_bitmap_io(mdev, &drbd_bm_read,
- "read from attaching", BM_LOCKED_MASK) < 0) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
- }
-
- if (cp_discovered) {
- drbd_al_apply_to_bm(mdev);
- if (drbd_bitmap_io(mdev, &drbd_bm_write,
- "crashed primary apply AL", BM_LOCKED_MASK)) {
+ "read from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
@@ -1256,9 +1589,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
drbd_suspend_al(mdev); /* IO is still suspended here... */
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
- ns.i = os.i;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ os = drbd_read_state(mdev);
+ ns = os;
/* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investigate MDF_WasUpToDate...
If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
@@ -1276,8 +1609,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
ns.pdsk = D_OUTDATED;
- if ( ns.disk == D_CONSISTENT &&
- (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
+ rcu_read_lock();
+ if (ns.disk == D_CONSISTENT &&
+ (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
ns.disk = D_UP_TO_DATE;
/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1285,6 +1619,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
this point, because drbd_request_state() modifies these
flags. */
+ if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ rcu_read_unlock();
+
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
if (mdev->state.conn == C_CONNECTED) {
@@ -1300,12 +1641,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS)
goto force_diskless_dec;
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
if (mdev->state.role == R_PRIMARY)
mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
else
@@ -1316,16 +1658,17 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(mdev);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ conn_reconfig_done(mdev->tconn);
+ drbd_adm_finish(info, retcode);
return 0;
force_diskless_dec:
put_ldev(mdev);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ drbd_force_state(mdev, NS(disk, D_DISKLESS));
drbd_md_sync(mdev);
fail:
+ conn_reconfig_done(mdev->tconn);
if (nbc) {
if (nbc->backing_bdev)
blkdev_put(nbc->backing_bdev,
@@ -1335,34 +1678,24 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
FMODE_READ | FMODE_WRITE | FMODE_EXCL);
kfree(nbc);
}
+ kfree(new_disk_conf);
lc_destroy(resync_lru);
+ kfree(new_plan);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ finish:
+ drbd_adm_finish(info, retcode);
return 0;
}
-/* Detaching the disk is a process in multiple stages. First we need to lock
- * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
- * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
- * internal references as well.
- * Only then we have finally detached. */
-static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+static int adm_detach(struct drbd_conf *mdev, int force)
{
- enum drbd_ret_code retcode;
+ enum drbd_state_rv retcode;
int ret;
- struct detach dt = {};
- if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- goto out;
- }
-
- if (dt.detach_force) {
+ if (force) {
set_bit(FORCE_DETACH, &mdev->flags);
drbd_force_state(mdev, NS(disk, D_FAILED));
- reply->ret_code = SS_SUCCESS;
+ retcode = SS_SUCCESS;
goto out;
}
@@ -1374,326 +1707,529 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
drbd_resume_io(mdev);
-
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
- reply->ret_code = retcode;
out:
- return 0;
+ return retcode;
}
-static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+/* Detaching the disk is a process in multiple stages. First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
{
- int i, ns;
enum drbd_ret_code retcode;
- struct net_conf *new_conf = NULL;
- struct crypto_hash *tfm = NULL;
- struct crypto_hash *integrity_w_tfm = NULL;
- struct crypto_hash *integrity_r_tfm = NULL;
- struct hlist_head *new_tl_hash = NULL;
- struct hlist_head *new_ee_hash = NULL;
- struct drbd_conf *odev;
- char hmac_name[CRYPTO_MAX_ALG_NAME];
- void *int_dig_out = NULL;
- void *int_dig_in = NULL;
- void *int_dig_vv = NULL;
- struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
+ struct detach_parms parms = { };
+ int err;
- drbd_reconfig_start(mdev);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- if (mdev->state.conn > C_STANDALONE) {
- retcode = ERR_NET_CONFIGURED;
- goto fail;
+ if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
+ err = detach_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
}
- /* allocation not in the IO path, cqueue thread context */
+ retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+static bool conn_resync_running(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = false;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_SYNC_SOURCE ||
+ mdev->state.conn == C_SYNC_TARGET ||
+ mdev->state.conn == C_PAUSED_SYNC_S ||
+ mdev->state.conn == C_PAUSED_SYNC_T) {
+ rv = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+static bool conn_ov_running(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = false;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_VERIFY_S ||
+ mdev->state.conn == C_VERIFY_T) {
+ rv = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+static enum drbd_ret_code
+_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+{
+ struct drbd_conf *mdev;
+ int i;
+
+ if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
+ if (new_conf->wire_protocol != old_conf->wire_protocol)
+ return ERR_NEED_APV_100;
+
+ if (new_conf->two_primaries != old_conf->two_primaries)
+ return ERR_NEED_APV_100;
+
+ if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
+ return ERR_NEED_APV_100;
+ }
+
+ if (!new_conf->two_primaries &&
+ conn_highest_role(tconn) == R_PRIMARY &&
+ conn_highest_peer(tconn) == R_PRIMARY)
+ return ERR_NEED_ALLOW_TWO_PRI;
+
+ if (new_conf->two_primaries &&
+ (new_conf->wire_protocol != DRBD_PROT_C))
+ return ERR_NOT_PROTO_C;
+
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ if (get_ldev(mdev)) {
+ enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ put_ldev(mdev);
+ if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
+ return ERR_STONITH_AND_PROT_A;
+ }
+ if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+ return ERR_DISCARD_IMPOSSIBLE;
+ }
+
+ if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
+ return ERR_CONG_NOT_PROTO_A;
+
+ return NO_ERROR;
+}
+
+static enum drbd_ret_code
+check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+{
+ static enum drbd_ret_code rv;
+ struct drbd_conf *mdev;
+ int i;
+
+ rcu_read_lock();
+ rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+ rcu_read_unlock();
+
+ /* tconn->volumes protected by genl_lock() here */
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ if (!mdev->bitmap) {
+ if(drbd_bm_init(mdev))
+ return ERR_NOMEM;
+ }
+ }
+
+ return rv;
+}
+
+struct crypto {
+ struct crypto_hash *verify_tfm;
+ struct crypto_hash *csums_tfm;
+ struct crypto_hash *cram_hmac_tfm;
+ struct crypto_hash *integrity_tfm;
+};
+
+static int
+alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+{
+ if (!tfm_name[0])
+ return NO_ERROR;
+
+ *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(*tfm)) {
+ *tfm = NULL;
+ return err_alg;
+ }
+
+ return NO_ERROR;
+}
+
+static enum drbd_ret_code
+alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
+{
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ enum drbd_ret_code rv;
+
+ rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
+ ERR_CSUMS_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
+ ERR_VERIFY_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
+ ERR_INTEGRITY_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ if (new_conf->cram_hmac_alg[0] != 0) {
+ snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
+ new_conf->cram_hmac_alg);
+
+ rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
+ ERR_AUTH_ALG);
+ }
+
+ return rv;
+}
+
+static void free_crypto(struct crypto *crypto)
+{
+ crypto_free_hash(crypto->cram_hmac_tfm);
+ crypto_free_hash(crypto->integrity_tfm);
+ crypto_free_hash(crypto->csums_tfm);
+ crypto_free_hash(crypto->verify_tfm);
+}
+
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct drbd_tconn *tconn;
+ struct net_conf *old_conf, *new_conf = NULL;
+ int err;
+ int ovr; /* online verify running */
+ int rsr; /* re-sync running */
+ struct crypto crypto = { };
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ tconn = adm_ctx.tconn;
+
new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) {
retcode = ERR_NOMEM;
+ goto out;
+ }
+
+ conn_reconfig_start(tconn);
+
+ mutex_lock(&tconn->data.mutex);
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+
+ if (!old_conf) {
+ drbd_msg_put_info("net conf missing, try connect");
+ retcode = ERR_INVALID_REQUEST;
goto fail;
}
- new_conf->timeout = DRBD_TIMEOUT_DEF;
- new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
- new_conf->ping_int = DRBD_PING_INT_DEF;
- new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
- new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
- new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
- new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
- new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
- new_conf->ko_count = DRBD_KO_COUNT_DEF;
- new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
- new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
- new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
- new_conf->want_lose = 0;
- new_conf->two_primaries = 0;
- new_conf->wire_protocol = DRBD_PROT_C;
- new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
- new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
- new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
- new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
-
- if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
+ *new_conf = *old_conf;
+ if (should_set_defaults(info))
+ set_net_conf_defaults(new_conf);
+
+ err = net_conf_from_attrs_for_change(new_conf, info);
+ if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- if (new_conf->two_primaries
- && (new_conf->wire_protocol != DRBD_PROT_C)) {
- retcode = ERR_NOT_PROTO_C;
+ retcode = check_net_options(tconn, new_conf);
+ if (retcode != NO_ERROR)
goto fail;
- }
- if (get_ldev(mdev)) {
- enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
- retcode = ERR_STONITH_AND_PROT_A;
- goto fail;
- }
+ /* re-sync running */
+ rsr = conn_resync_running(tconn);
+ if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
+ retcode = ERR_CSUMS_RESYNC_RUNNING;
+ goto fail;
}
- if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
- retcode = ERR_CONG_NOT_PROTO_A;
+ /* online verify running */
+ ovr = conn_ov_running(tconn);
+ if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
+ retcode = ERR_VERIFY_RUNNING;
goto fail;
}
- if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
- retcode = ERR_DISCARD;
+ retcode = alloc_crypto(&crypto, new_conf);
+ if (retcode != NO_ERROR)
goto fail;
- }
- retcode = NO_ERROR;
+ rcu_assign_pointer(tconn->net_conf, new_conf);
- new_my_addr = (struct sockaddr *)&new_conf->my_addr;
- new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev || odev == mdev)
- continue;
- if (get_net_conf(odev)) {
- taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
- if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
- !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
- retcode = ERR_LOCAL_ADDR;
-
- taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
- if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
- !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
- retcode = ERR_PEER_ADDR;
-
- put_net_conf(odev);
- if (retcode != NO_ERROR)
- goto fail;
- }
+ if (!rsr) {
+ crypto_free_hash(tconn->csums_tfm);
+ tconn->csums_tfm = crypto.csums_tfm;
+ crypto.csums_tfm = NULL;
+ }
+ if (!ovr) {
+ crypto_free_hash(tconn->verify_tfm);
+ tconn->verify_tfm = crypto.verify_tfm;
+ crypto.verify_tfm = NULL;
}
- if (new_conf->cram_hmac_alg[0] != 0) {
- snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- new_conf->cram_hmac_alg);
- tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- tfm = NULL;
- retcode = ERR_AUTH_ALG;
- goto fail;
- }
+ crypto_free_hash(tconn->integrity_tfm);
+ tconn->integrity_tfm = crypto.integrity_tfm;
+ if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
+ /* Do this without trying to take tconn->data.mutex again. */
+ __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
- if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
- retcode = ERR_AUTH_ALG_ND;
- goto fail;
- }
- }
+ crypto_free_hash(tconn->cram_hmac_tfm);
+ tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
- if (new_conf->integrity_alg[0]) {
- integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(integrity_w_tfm)) {
- integrity_w_tfm = NULL;
- retcode=ERR_INTEGRITY_ALG;
- goto fail;
- }
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+ synchronize_rcu();
+ kfree(old_conf);
- if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
- retcode=ERR_INTEGRITY_ALG_ND;
- goto fail;
- }
+ if (tconn->cstate >= C_WF_REPORT_PARAMS)
+ drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
- integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(integrity_r_tfm)) {
- integrity_r_tfm = NULL;
- retcode=ERR_INTEGRITY_ALG;
- goto fail;
- }
+ goto done;
+
+ fail:
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+ free_crypto(&crypto);
+ kfree(new_conf);
+ done:
+ conn_reconfig_done(tconn);
+ out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct drbd_conf *mdev;
+ struct net_conf *old_conf, *new_conf = NULL;
+ struct crypto crypto = { };
+ struct drbd_tconn *tconn;
+ enum drbd_ret_code retcode;
+ int i;
+ int err;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+ if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
+ drbd_msg_put_info("connection endpoint(s) missing");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
}
- ns = new_conf->max_epoch_size/8;
- if (mdev->tl_hash_s != ns) {
- new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
- if (!new_tl_hash) {
- retcode = ERR_NOMEM;
- goto fail;
+ /* No need for _rcu here. All reconfiguration is
+ * strictly serialized on genl_lock(). We are protected against
+ * concurrent reconfiguration/addition/deletion */
+ list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
+ if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
+ !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
+ retcode = ERR_LOCAL_ADDR;
+ goto out;
}
- }
- ns = new_conf->max_buffers/8;
- if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
- new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
- if (!new_ee_hash) {
- retcode = ERR_NOMEM;
- goto fail;
+ if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
+ !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
+ retcode = ERR_PEER_ADDR;
+ goto out;
}
}
- ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+ tconn = adm_ctx.tconn;
+ conn_reconfig_start(tconn);
- if (integrity_w_tfm) {
- i = crypto_hash_digestsize(integrity_w_tfm);
- int_dig_out = kmalloc(i, GFP_KERNEL);
- if (!int_dig_out) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- int_dig_in = kmalloc(i, GFP_KERNEL);
- if (!int_dig_in) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- int_dig_vv = kmalloc(i, GFP_KERNEL);
- if (!int_dig_vv) {
- retcode = ERR_NOMEM;
- goto fail;
- }
+ if (tconn->cstate > C_STANDALONE) {
+ retcode = ERR_NET_CONFIGURED;
+ goto fail;
}
- if (!mdev->bitmap) {
- if(drbd_bm_init(mdev)) {
- retcode = ERR_NOMEM;
- goto fail;
- }
+ /* allocation not in the IO path, drbdsetup / netlink process context */
+ new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
+ if (!new_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
}
- drbd_flush_workqueue(mdev);
- spin_lock_irq(&mdev->req_lock);
- if (mdev->net_conf != NULL) {
- retcode = ERR_NET_CONFIGURED;
- spin_unlock_irq(&mdev->req_lock);
+ set_net_conf_defaults(new_conf);
+
+ err = net_conf_from_attrs(new_conf, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- mdev->net_conf = new_conf;
- mdev->send_cnt = 0;
- mdev->recv_cnt = 0;
+ retcode = check_net_options(tconn, new_conf);
+ if (retcode != NO_ERROR)
+ goto fail;
- if (new_tl_hash) {
- kfree(mdev->tl_hash);
- mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
- mdev->tl_hash = new_tl_hash;
- }
+ retcode = alloc_crypto(&crypto, new_conf);
+ if (retcode != NO_ERROR)
+ goto fail;
+
+ ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+
+ conn_flush_workqueue(tconn);
- if (new_ee_hash) {
- kfree(mdev->ee_hash);
- mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
- mdev->ee_hash = new_ee_hash;
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+ if (old_conf) {
+ retcode = ERR_NET_CONFIGURED;
+ mutex_unlock(&tconn->conf_update);
+ goto fail;
}
+ rcu_assign_pointer(tconn->net_conf, new_conf);
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = tfm;
+ conn_free_crypto(tconn);
+ tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+ tconn->integrity_tfm = crypto.integrity_tfm;
+ tconn->csums_tfm = crypto.csums_tfm;
+ tconn->verify_tfm = crypto.verify_tfm;
- crypto_free_hash(mdev->integrity_w_tfm);
- mdev->integrity_w_tfm = integrity_w_tfm;
+ tconn->my_addr_len = nla_len(adm_ctx.my_addr);
+ memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
+ tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
+ memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
- crypto_free_hash(mdev->integrity_r_tfm);
- mdev->integrity_r_tfm = integrity_r_tfm;
+ mutex_unlock(&tconn->conf_update);
- kfree(mdev->int_dig_out);
- kfree(mdev->int_dig_in);
- kfree(mdev->int_dig_vv);
- mdev->int_dig_out=int_dig_out;
- mdev->int_dig_in=int_dig_in;
- mdev->int_dig_vv=int_dig_vv;
- retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ mdev->send_cnt = 0;
+ mdev->recv_cnt = 0;
+ }
+ rcu_read_unlock();
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
+ conn_reconfig_done(tconn);
+ drbd_adm_finish(info, retcode);
return 0;
fail:
- kfree(int_dig_out);
- kfree(int_dig_in);
- kfree(int_dig_vv);
- crypto_free_hash(tfm);
- crypto_free_hash(integrity_w_tfm);
- crypto_free_hash(integrity_r_tfm);
- kfree(new_tl_hash);
- kfree(new_ee_hash);
+ free_crypto(&crypto);
kfree(new_conf);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ conn_reconfig_done(tconn);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
{
- int retcode;
- struct disconnect dc;
-
- memset(&dc, 0, sizeof(struct disconnect));
- if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
- retcode = ERR_MANDATORY_TAG;
- goto fail;
- }
-
- if (dc.force) {
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.conn >= C_WF_CONNECTION)
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
- spin_unlock_irq(&mdev->req_lock);
- goto done;
- }
+ enum drbd_state_rv rv;
- retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ force ? CS_HARD : 0);
- if (retcode == SS_NOTHING_TO_DO)
- goto done;
- else if (retcode == SS_ALREADY_STANDALONE)
- goto done;
- else if (retcode == SS_PRIMARY_NOP) {
- /* Our statche checking code wants to see the peer outdated. */
- retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
- pdsk, D_OUTDATED));
- } else if (retcode == SS_CW_FAILED_BY_PEER) {
+ switch (rv) {
+ case SS_NOTHING_TO_DO:
+ break;
+ case SS_ALREADY_STANDALONE:
+ return SS_SUCCESS;
+ case SS_PRIMARY_NOP:
+ /* Our state checking code wants to see the peer outdated. */
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ pdsk, D_OUTDATED), CS_VERBOSE);
+ break;
+ case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */
- retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
- disk, D_OUTDATED),
- CS_ORDERED);
- if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- retcode = SS_SUCCESS;
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ disk, D_OUTDATED), 0);
+ if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ CS_HARD);
}
+ break;
+ default:;
+ /* no special handling necessary */
+ }
+
+ if (rv >= SS_SUCCESS) {
+ enum drbd_state_rv rv2;
+ /* No one else can reconfigure the network while I am here.
+ * The state handling only uses drbd_thread_stop_nowait(),
+ * we want to really wait here until the receiver is no more.
+ */
+ drbd_thread_stop(&adm_ctx.tconn->receiver);
+
+ /* Race breaker. This additional state change request may be
+ * necessary, if this was a forced disconnect during a receiver
+ * restart. We may have "killed" the receiver thread just
+ * after drbdd_init() returned. Typically, we should be
+ * C_STANDALONE already, now, and this becomes a no-op.
+ */
+ rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+ CS_VERBOSE | CS_HARD);
+ if (rv2 < SS_SUCCESS)
+ conn_err(tconn,
+ "unexpected rv2=%d in conn_try_disconnect()\n",
+ rv2);
}
+ return rv;
+}
- if (retcode < SS_SUCCESS)
- goto fail;
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct disconnect_parms parms;
+ struct drbd_tconn *tconn;
+ enum drbd_state_rv rv;
+ enum drbd_ret_code retcode;
+ int err;
- if (wait_event_interruptible(mdev->state_wait,
- mdev->state.conn != C_DISCONNECTING)) {
- /* Do not test for mdev->state.conn == C_STANDALONE, since
- someone else might connect us in the mean time! */
- retcode = ERR_INTR;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
goto fail;
+
+ tconn = adm_ctx.tconn;
+ memset(&parms, 0, sizeof(parms));
+ if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
+ err = disconnect_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
+ }
}
- done:
- retcode = NO_ERROR;
+ rv = conn_try_disconnect(tconn, parms.force_disconnect);
+ if (rv < SS_SUCCESS)
+ retcode = rv; /* FIXME: Type mismatch. */
+ else
+ retcode = NO_ERROR;
fail:
- drbd_md_sync(mdev);
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -1705,7 +2241,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
if (mdev->state.role != mdev->state.peer)
iass = (mdev->state.role == R_PRIMARY);
else
- iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
if (iass)
drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1713,20 +2249,34 @@ void resync_after_online_grow(struct drbd_conf *mdev)
_drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
}
-static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
- struct resize rs;
- int retcode = NO_ERROR;
+ struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+ struct resize_parms rs;
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
enum determine_dev_size dd;
enum dds_flags ddsf;
+ sector_t u_size;
+ int err;
- memset(&rs, 0, sizeof(struct resize));
- if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
- retcode = ERR_MANDATORY_TAG;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
goto fail;
+
+ memset(&rs, 0, sizeof(struct resize_parms));
+ if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
+ err = resize_parms_from_attrs(&rs, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
+ }
}
+ mdev = adm_ctx.mdev;
if (mdev->state.conn > C_CONNECTED) {
retcode = ERR_RESIZE_RESYNC;
goto fail;
@@ -1743,15 +2293,36 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (rs.no_resync && mdev->agreed_pro_version < 93) {
+ if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
goto fail_ldev;
}
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+ if (u_size != (sector_t)rs.resize_size) {
+ new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail_ldev;
+ }
+ }
+
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
- mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
+ if (new_disk_conf) {
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ new_disk_conf->disk_size = (sector_t)rs.resize_size;
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ kfree(old_disk_conf);
+ }
+
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
dd = drbd_determine_dev_size(mdev, ddsf);
drbd_md_sync(mdev);
@@ -1770,7 +2341,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
fail:
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
fail_ldev:
@@ -1778,204 +2349,55 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
-static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ enum drbd_ret_code retcode;
+ struct drbd_tconn *tconn;
+ struct res_opts res_opts;
int err;
- int ovr; /* online verify running */
- int rsr; /* re-sync running */
- struct crypto_hash *verify_tfm = NULL;
- struct crypto_hash *csums_tfm = NULL;
- struct syncer_conf sc;
- cpumask_var_t new_cpu_mask;
- int *rs_plan_s = NULL;
- int fifo_size;
-
- if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
- memset(&sc, 0, sizeof(struct syncer_conf));
- sc.rate = DRBD_RATE_DEF;
- sc.after = DRBD_AFTER_DEF;
- sc.al_extents = DRBD_AL_EXTENTS_DEF;
- sc.on_no_data = DRBD_ON_NO_DATA_DEF;
- sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
- sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
- sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
- sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
- sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
- } else
- memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
-
- if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
- retcode = ERR_MANDATORY_TAG;
- goto fail;
- }
-
- /* re-sync running */
- rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_PAUSED_SYNC_S ||
- mdev->state.conn == C_PAUSED_SYNC_T );
-
- if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
- retcode = ERR_CSUMS_RESYNC_RUNNING;
- goto fail;
- }
-
- if (!rsr && sc.csums_alg[0]) {
- csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(csums_tfm)) {
- csums_tfm = NULL;
- retcode = ERR_CSUMS_ALG;
- goto fail;
- }
-
- if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
- retcode = ERR_CSUMS_ALG_ND;
- goto fail;
- }
- }
-
- /* online verify running */
- ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
-
- if (ovr) {
- if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
- retcode = ERR_VERIFY_RUNNING;
- goto fail;
- }
- }
-
- if (!ovr && sc.verify_alg[0]) {
- verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(verify_tfm)) {
- verify_tfm = NULL;
- retcode = ERR_VERIFY_ALG;
- goto fail;
- }
-
- if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
- retcode = ERR_VERIFY_ALG_ND;
- goto fail;
- }
- }
-
- /* silently ignore cpu mask on UP kernel */
- if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
- err = bitmap_parse(sc.cpu_mask, 32,
- cpumask_bits(new_cpu_mask), nr_cpu_ids);
- if (err) {
- dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
- retcode = ERR_CPU_MASK_PARSE;
- goto fail;
- }
- }
-
- ERR_IF (sc.rate < 1) sc.rate = 1;
- ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
-#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
- if (sc.al_extents > AL_MAX) {
- dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
- sc.al_extents = AL_MAX;
- }
-#undef AL_MAX
-
- /* to avoid spurious errors when configuring minors before configuring
- * the minors they depend on: if necessary, first create the minor we
- * depend on */
- if (sc.after >= 0)
- ensure_mdev(sc.after, 1);
-
- /* most sanity checks done, try to assign the new sync-after
- * dependency. need to hold the global lock in there,
- * to avoid a race in the dependency loop check. */
- retcode = drbd_alter_sa(mdev, sc.after);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
if (retcode != NO_ERROR)
goto fail;
+ tconn = adm_ctx.tconn;
- fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
- if (!rs_plan_s) {
- dev_err(DEV, "kmalloc of fifo_buffer failed");
- retcode = ERR_NOMEM;
- goto fail;
- }
- }
+ res_opts = tconn->res_opts;
+ if (should_set_defaults(info))
+ set_res_opts_defaults(&res_opts);
- /* ok, assign the rest of it as well.
- * lock against receive_SyncParam() */
- spin_lock(&mdev->peer_seq_lock);
- mdev->sync_conf = sc;
-
- if (!rsr) {
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = csums_tfm;
- csums_tfm = NULL;
- }
-
- if (!ovr) {
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = verify_tfm;
- verify_tfm = NULL;
- }
-
- if (fifo_size != mdev->rs_plan_s.size) {
- kfree(mdev->rs_plan_s.values);
- mdev->rs_plan_s.values = rs_plan_s;
- mdev->rs_plan_s.size = fifo_size;
- mdev->rs_planed = 0;
- rs_plan_s = NULL;
+ err = res_opts_from_attrs(&res_opts, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
}
- spin_unlock(&mdev->peer_seq_lock);
-
- if (get_ldev(mdev)) {
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
- drbd_al_shrink(mdev);
- err = drbd_check_al_size(mdev);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
-
- put_ldev(mdev);
- drbd_md_sync(mdev);
-
- if (err) {
+ err = set_resource_options(tconn, &res_opts);
+ if (err) {
+ retcode = ERR_INVALID_REQUEST;
+ if (err == -ENOMEM)
retcode = ERR_NOMEM;
- goto fail;
- }
}
- if (mdev->state.conn >= C_CONNECTED)
- drbd_send_sync_param(mdev, &sc);
-
- if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
- cpumask_copy(mdev->cpu_mask, new_cpu_mask);
- drbd_calc_cpu_mask(mdev);
- mdev->receiver.reset_cpu_mask = 1;
- mdev->asender.reset_cpu_mask = 1;
- mdev->worker.reset_cpu_mask = 1;
- }
-
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
fail:
- kfree(rs_plan_s);
- free_cpumask_var(new_cpu_mask);
- crypto_free_hash(csums_tfm);
- crypto_free_hash(verify_tfm);
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
- int retcode;
+ struct drbd_conf *mdev;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
@@ -1990,10 +2412,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
while (retcode == SS_NEED_CONNECTION) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (retcode != SS_NEED_CONNECTION)
break;
@@ -2002,7 +2424,25 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
}
drbd_resume_io(mdev);
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
+ union drbd_state mask, union drbd_state val)
+{
+ enum drbd_ret_code retcode;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -2015,10 +2455,18 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
return rv;
}
-static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
- int retcode;
+ int retcode; /* drbd_ret_code, drbd_state_rv */
+ struct drbd_conf *mdev;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
@@ -2028,16 +2476,15 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
-
if (retcode < SS_SUCCESS) {
if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
- /* The peer will get a resync upon connect anyways. Just make that
- into a full resync. */
+ /* The peer will get a resync upon connect anyways.
+ * Just make that into a full resync. */
retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
- "set_n_write from invalidate_peer",
- BM_LOCKED_SET_ALLOWED))
+ "set_n_write from invalidate_peer",
+ BM_LOCKED_SET_ALLOWED))
retcode = ERR_IO_MD_DISK;
}
} else
@@ -2045,30 +2492,41 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
}
drbd_resume_io(mdev);
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ enum drbd_ret_code retcode;
- if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
- retcode = ERR_PAUSE_IS_SET;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- reply->ret_code = retcode;
+ if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+ retcode = ERR_PAUSE_IS_SET;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
- union drbd_state s;
+ union drbd_dev_state s;
+ enum drbd_ret_code retcode;
- if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
- s = mdev->state;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+ s = adm_ctx.mdev->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
@@ -2077,172 +2535,482 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
}
}
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
{
- reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
-
- return 0;
+ return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
}
-static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_conf *mdev;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
}
drbd_suspend_io(mdev);
- reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
- if (reply->ret_code == SS_SUCCESS) {
+ retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
+ if (retcode == SS_SUCCESS) {
if (mdev->state.conn < C_CONNECTED)
- tl_clear(mdev);
+ tl_clear(mdev->tconn);
if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
- tl_restart(mdev, fail_frozen_disk_io);
+ tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(mdev);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
{
- reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
- return 0;
+ return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
}
-static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
{
- unsigned short *tl;
+ struct nlattr *nla;
+ nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+ if (!nla)
+ goto nla_put_failure;
+ if (vnr != VOLUME_UNSPECIFIED &&
+ nla_put_u32(skb, T_ctx_volume, vnr))
+ goto nla_put_failure;
+ if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
+ goto nla_put_failure;
+ if (tconn->my_addr_len &&
+ nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+ goto nla_put_failure;
+ if (tconn->peer_addr_len &&
+ nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+ goto nla_put_failure;
+ nla_nest_end(skb, nla);
+ return 0;
- tl = reply->tag_list;
+nla_put_failure:
+ if (nla)
+ nla_nest_cancel(skb, nla);
+ return -EMSGSIZE;
+}
- if (get_ldev(mdev)) {
- tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
- put_ldev(mdev);
- }
+int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
+ const struct sib_info *sib)
+{
+ struct state_info *si = NULL; /* for sizeof(si->member); */
+ struct net_conf *nc;
+ struct nlattr *nla;
+ int got_ldev;
+ int err = 0;
+ int exclude_sensitive;
+
+ /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
+ * to. So we better exclude_sensitive information.
+ *
+ * If sib == NULL, this is drbd_adm_get_status, executed synchronously
+ * in the context of the requesting user process. Exclude sensitive
+ * information, unless current has superuser.
+ *
+ * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
+ * relies on the current implementation of netlink_dump(), which
+ * executes the dump callback successively from netlink_recvmsg(),
+ * always in the context of the receiving process */
+ exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
+
+ got_ldev = get_ldev(mdev);
+
+ /* We need to add connection name and volume number information still.
+ * Minor number is in drbd_genlmsghdr. */
+ if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
+ goto nla_put_failure;
+
+ if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
+ goto nla_put_failure;
+
+ rcu_read_lock();
+ if (got_ldev)
+ if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
+ goto nla_put_failure;
+
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc)
+ err = net_conf_to_skb(skb, nc, exclude_sensitive);
+ rcu_read_unlock();
+ if (err)
+ goto nla_put_failure;
+
+ nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
+ if (!nla)
+ goto nla_put_failure;
+ if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
+ nla_put_u32(skb, T_current_state, mdev->state.i) ||
+ nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
+ nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
+ nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
+ nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
+ nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
+ nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
+ nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
+ nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
+ nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
+ nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
+ nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+ goto nla_put_failure;
+
+ if (got_ldev) {
+ int err;
- if (get_net_conf(mdev)) {
- tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
- put_net_conf(mdev);
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+
+ if (err)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
+ nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
+ nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+ goto nla_put_failure;
+ if (C_SYNC_SOURCE <= mdev->state.conn &&
+ C_PAUSED_SYNC_T >= mdev->state.conn) {
+ if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
+ nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+ goto nla_put_failure;
+ }
}
- tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ if (sib) {
+ switch(sib->sib_reason) {
+ case SIB_SYNC_PROGRESS:
+ case SIB_GET_STATUS_REPLY:
+ break;
+ case SIB_STATE_CHANGE:
+ if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
+ nla_put_u32(skb, T_new_state, sib->ns.i))
+ goto nla_put_failure;
+ break;
+ case SIB_HELPER_POST:
+ if (nla_put_u32(skb, T_helper_exit_code,
+ sib->helper_exit_code))
+ goto nla_put_failure;
+ /* fall through */
+ case SIB_HELPER_PRE:
+ if (nla_put_string(skb, T_helper, sib->helper_name))
+ goto nla_put_failure;
+ break;
+ }
+ }
+ nla_nest_end(skb, nla);
- return (int)((char *)tl - (char *)reply->tag_list);
+ if (0)
+nla_put_failure:
+ err = -EMSGSIZE;
+ if (got_ldev)
+ put_ldev(mdev);
+ return err;
}
-static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
{
- unsigned short *tl = reply->tag_list;
- union drbd_state s = mdev->state;
- unsigned long rs_left;
- unsigned int res;
+ enum drbd_ret_code retcode;
+ int err;
- tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* no local ref, no bitmap, no syncer progress. */
- if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
- if (get_ldev(mdev)) {
- drbd_get_syncer_progress(mdev, &rs_left, &res);
- tl = tl_add_int(tl, T_sync_progress, &res);
- put_ldev(mdev);
- }
+ err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
+ if (err) {
+ nlmsg_free(adm_ctx.reply_skb);
+ return err;
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- return (int)((char *)tl - (char *)reply->tag_list);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{
- unsigned short *tl;
-
- tl = reply->tag_list;
+ struct drbd_conf *mdev;
+ struct drbd_genlmsghdr *dh;
+ struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
+ struct drbd_tconn *tconn = NULL;
+ struct drbd_tconn *tmp;
+ unsigned volume = cb->args[1];
+
+ /* Open coded, deferred, iteration:
+ * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+ * idr_for_each_entry(&tconn->volumes, mdev, i) {
+ * ...
+ * }
+ * }
+ * where tconn is cb->args[0];
+ * and i is cb->args[1];
+ *
+ * cb->args[2] indicates if we shall loop over all resources,
+ * or just dump all volumes of a single resource.
+ *
+ * This may miss entries inserted after this dump started,
+ * or entries deleted before they are reached.
+ *
+ * We need to make sure the mdev won't disappear while
+ * we are looking at it, and revalidate our iterators
+ * on each iteration.
+ */
- if (get_ldev(mdev)) {
- tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
- tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
- put_ldev(mdev);
+ /* synchronize with conn_create()/conn_destroy() */
+ rcu_read_lock();
+ /* revalidate iterator position */
+ list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
+ if (pos == NULL) {
+ /* first iteration */
+ pos = tmp;
+ tconn = pos;
+ break;
+ }
+ if (tmp == pos) {
+ tconn = pos;
+ break;
+ }
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ if (tconn) {
+next_tconn:
+ mdev = idr_get_next(&tconn->volumes, &volume);
+ if (!mdev) {
+ /* No more volumes to dump on this tconn.
+ * Advance tconn iterator. */
+ pos = list_entry_rcu(tconn->all_tconn.next,
+ struct drbd_tconn, all_tconn);
+ /* Did we dump any volume on this tconn yet? */
+ if (volume != 0) {
+ /* If we reached the end of the list,
+ * or only a single resource dump was requested,
+ * we are done. */
+ if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+ goto out;
+ volume = 0;
+ tconn = pos;
+ goto next_tconn;
+ }
+ }
+
+ dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &drbd_genl_family,
+ NLM_F_MULTI, DRBD_ADM_GET_STATUS);
+ if (!dh)
+ goto out;
+
+ if (!mdev) {
+ /* This is a tconn without a single volume.
+ * Suprisingly enough, it may have a network
+ * configuration. */
+ struct net_conf *nc;
+ dh->minor = -1U;
+ dh->ret_code = NO_ERROR;
+ if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
+ goto cancel;
+ nc = rcu_dereference(tconn->net_conf);
+ if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ goto cancel;
+ goto done;
+ }
+
+ D_ASSERT(mdev->vnr == volume);
+ D_ASSERT(mdev->tconn == tconn);
+
+ dh->minor = mdev_to_minor(mdev);
+ dh->ret_code = NO_ERROR;
- return (int)((char *)tl - (char *)reply->tag_list);
+ if (nla_put_status_info(skb, mdev, NULL)) {
+cancel:
+ genlmsg_cancel(skb, dh);
+ goto out;
+ }
+done:
+ genlmsg_end(skb, dh);
+ }
+
+out:
+ rcu_read_unlock();
+ /* where to start the next iteration */
+ cb->args[0] = (long)pos;
+ cb->args[1] = (pos == tconn) ? volume + 1 : 0;
+
+ /* No more tconns/volumes/minors found results in an empty skb.
+ * Which will terminate the dump. */
+ return skb->len;
}
-/**
- * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
- * @mdev: DRBD device.
- * @nlp: Netlink/connector packet from drbdsetup
- * @reply: Reply packet for drbdsetup
+/*
+ * Request status of all resources, or of all volumes within a single resource.
+ *
+ * This is a dump, as the answer may not fit in a single reply skb otherwise.
+ * Which means we cannot use the family->attrbuf or other such members, because
+ * dump is NOT protected by the genl_lock(). During dump, we only have access
+ * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
+ *
+ * Once things are setup properly, we call into get_one_status().
*/
-static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
{
- unsigned short *tl;
- char rv;
+ const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
+ struct nlattr *nla;
+ const char *resource_name;
+ struct drbd_tconn *tconn;
+ int maxtype;
+
+ /* Is this a followup call? */
+ if (cb->args[0]) {
+ /* ... of a single resource dump,
+ * and the resource iterator has been advanced already? */
+ if (cb->args[2] && cb->args[2] != cb->args[0])
+ return 0; /* DONE. */
+ goto dump;
+ }
+
+ /* First call (from netlink_dump_start). We need to figure out
+ * which resource(s) the user wants us to dump. */
+ nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
+ nlmsg_attrlen(cb->nlh, hdrlen),
+ DRBD_NLA_CFG_CONTEXT);
+
+ /* No explicit context given. Dump all. */
+ if (!nla)
+ goto dump;
+ maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
+ nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
+ if (IS_ERR(nla))
+ return PTR_ERR(nla);
+ /* context given, but no name present? */
+ if (!nla)
+ return -EINVAL;
+ resource_name = nla_data(nla);
+ tconn = conn_get_by_name(resource_name);
+
+ if (!tconn)
+ return -ENODEV;
+
+ kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
+
+ /* prime iterators, and set "filter" mode mark:
+ * only dump this tconn. */
+ cb->args[0] = (long)tconn;
+ /* cb->args[1] = 0; passed in this way. */
+ cb->args[2] = (long)tconn;
+
+dump:
+ return get_one_status(skb, cb);
+}
- tl = reply->tag_list;
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct timeout_parms tp;
+ int err;
- rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ tp.timeout_type =
+ adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+ test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
+ UT_DEFAULT;
- return (int)((char *)tl - (char *)reply->tag_list);
+ err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
+ if (err) {
+ nlmsg_free(adm_ctx.reply_skb);
+ return err;
+ }
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
- /* default to resume from last known position, if possible */
- struct start_ov args =
- { .start_sector = mdev->ov_start_sector };
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
+ struct start_ov_parms parms;
- if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
+
+ /* resume from last known position, if possible */
+ parms.ov_start_sector = mdev->ov_start_sector;
+ parms.ov_stop_sector = ULLONG_MAX;
+ if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
+ int err = start_ov_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
}
+ /* w_make_ov_request expects position to be aligned */
+ mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+ mdev->ov_stop_sector = parms.ov_stop_sector;
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
-
- /* w_make_ov_request expects position to be aligned */
- mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
- reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
drbd_resume_io(mdev);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
+ struct new_c_uuid_parms args;
- struct new_c_uuid args;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out_nolock;
- memset(&args, 0, sizeof(struct new_c_uuid));
- if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
+ mdev = adm_ctx.mdev;
+ memset(&args, 0, sizeof(args));
+ if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
+ err = new_c_uuid_parms_from_attrs(&args, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out_nolock;
+ }
}
- mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
+ mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK;
@@ -2250,7 +3018,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
}
/* this is "skip initial sync", assume to be clean */
- if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
+ if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
dev_info(DEV, "Preparing to skip initial sync\n");
skip_initial_sync = 1;
@@ -2273,10 +3041,10 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
drbd_send_uuids_skip_initial_sync(mdev);
_drbd_uuid_set(mdev, UI_BITMAP, 0);
drbd_print_uuids(mdev, "cleared bitmap UUID");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
}
@@ -2284,416 +3052,284 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
out_dec:
put_ldev(mdev);
out:
- mutex_unlock(&mdev->state_mutex);
-
- reply->ret_code = retcode;
+ mutex_unlock(mdev->state_mutex);
+out_nolock:
+ drbd_adm_finish(info, retcode);
return 0;
}
-struct cn_handler_struct {
- int (*function)(struct drbd_conf *,
- struct drbd_nl_cfg_req *,
- struct drbd_nl_cfg_reply *);
- int reply_body_size;
-};
-
-static struct cn_handler_struct cnd_table[] = {
- [ P_primary ] = { &drbd_nl_primary, 0 },
- [ P_secondary ] = { &drbd_nl_secondary, 0 },
- [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
- [ P_detach ] = { &drbd_nl_detach, 0 },
- [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
- [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
- [ P_resize ] = { &drbd_nl_resize, 0 },
- [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
- [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
- [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
- [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
- [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
- [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
- [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
- [ P_outdate ] = { &drbd_nl_outdate, 0 },
- [ P_get_config ] = { &drbd_nl_get_config,
- sizeof(struct syncer_conf_tag_len_struct) +
- sizeof(struct disk_conf_tag_len_struct) +
- sizeof(struct net_conf_tag_len_struct) },
- [ P_get_state ] = { &drbd_nl_get_state,
- sizeof(struct get_state_tag_len_struct) +
- sizeof(struct sync_progress_tag_len_struct) },
- [ P_get_uuids ] = { &drbd_nl_get_uuids,
- sizeof(struct get_uuids_tag_len_struct) },
- [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
- sizeof(struct get_timeout_flag_tag_len_struct)},
- [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
- [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
-};
-
-static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
+static enum drbd_ret_code
+drbd_check_resource_name(const char *name)
{
- struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
- struct cn_handler_struct *cm;
- struct cn_msg *cn_reply;
- struct drbd_nl_cfg_reply *reply;
- struct drbd_conf *mdev;
- int retcode, rr;
- int reply_size = sizeof(struct cn_msg)
- + sizeof(struct drbd_nl_cfg_reply)
- + sizeof(short int);
-
- if (!try_module_get(THIS_MODULE)) {
- printk(KERN_ERR "drbd: try_module_get() failed!\n");
- return;
+ if (!name || !name[0]) {
+ drbd_msg_put_info("resource name missing");
+ return ERR_MANDATORY_TAG;
}
-
- if (!capable(CAP_SYS_ADMIN)) {
- retcode = ERR_PERM;
- goto fail;
- }
-
- mdev = ensure_mdev(nlp->drbd_minor,
- (nlp->flags & DRBD_NL_CREATE_DEVICE));
- if (!mdev) {
- retcode = ERR_MINOR_INVALID;
- goto fail;
+ /* if we want to use these in sysfs/configfs/debugfs some day,
+ * we must not allow slashes */
+ if (strchr(name, '/')) {
+ drbd_msg_put_info("invalid resource name");
+ return ERR_INVALID_REQUEST;
}
+ return NO_ERROR;
+}
- if (nlp->packet_type >= P_nl_after_last_packet ||
- nlp->packet_type == P_return_code_only) {
- retcode = ERR_PACKET_NR;
- goto fail;
- }
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct res_opts res_opts;
+ int err;
- cm = cnd_table + nlp->packet_type;
+ retcode = drbd_adm_prepare(skb, info, 0);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* This may happen if packet number is 0: */
- if (cm->function == NULL) {
- retcode = ERR_PACKET_NR;
- goto fail;
+ set_res_opts_defaults(&res_opts);
+ err = res_opts_from_attrs(&res_opts, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
}
- reply_size += cm->reply_body_size;
+ retcode = drbd_check_resource_name(adm_ctx.resource_name);
+ if (retcode != NO_ERROR)
+ goto out;
- /* allocation not in the IO path, cqueue thread context */
- cn_reply = kzalloc(reply_size, GFP_KERNEL);
- if (!cn_reply) {
- retcode = ERR_NOMEM;
- goto fail;
+ if (adm_ctx.tconn) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
+ retcode = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("resource exists");
+ }
+ /* else: still NO_ERROR */
+ goto out;
}
- reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
-
- reply->packet_type =
- cm->reply_body_size ? nlp->packet_type : P_return_code_only;
- reply->minor = nlp->drbd_minor;
- reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
- /* reply->tag_list; might be modified by cm->function. */
-
- rr = cm->function(mdev, nlp, reply);
-
- cn_reply->id = req->id;
- cn_reply->seq = req->seq;
- cn_reply->ack = req->ack + 1;
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
- cn_reply->flags = 0;
-
- rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
- if (rr && rr != -ESRCH)
- printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
- kfree(cn_reply);
- module_put(THIS_MODULE);
- return;
- fail:
- drbd_nl_send_reply(req, retcode);
- module_put(THIS_MODULE);
+ if (!conn_create(adm_ctx.resource_name, &res_opts))
+ retcode = ERR_NOMEM;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
-
-static unsigned short *
-__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
- unsigned short len, int nul_terminated)
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
{
- unsigned short l = tag_descriptions[tag_number(tag)].max_len;
- len = (len < l) ? len : l;
- put_unaligned(tag, tl++);
- put_unaligned(len, tl++);
- memcpy(tl, data, len);
- tl = (unsigned short*)((char*)tl + len);
- if (nul_terminated)
- *((char*)tl - 1) = 0;
- return tl;
-}
+ struct drbd_genlmsghdr *dh = info->userhdr;
+ enum drbd_ret_code retcode;
-static unsigned short *
-tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
-{
- return __tl_add_blob(tl, tag, data, len, 0);
-}
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
-static unsigned short *
-tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
-{
- return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
-}
+ if (dh->minor > MINORMASK) {
+ drbd_msg_put_info("requested minor out of range");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
+ }
+ if (adm_ctx.volume > DRBD_VOLUME_MAX) {
+ drbd_msg_put_info("requested volume id out of range");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
+ }
-static unsigned short *
-tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
-{
- put_unaligned(tag, tl++);
- switch(tag_type(tag)) {
- case TT_INTEGER:
- put_unaligned(sizeof(int), tl++);
- put_unaligned(*(int *)val, (int *)tl);
- tl = (unsigned short*)((char*)tl+sizeof(int));
- break;
- case TT_INT64:
- put_unaligned(sizeof(u64), tl++);
- put_unaligned(*(u64 *)val, (u64 *)tl);
- tl = (unsigned short*)((char*)tl+sizeof(u64));
- break;
- default:
- /* someone did something stupid. */
- ;
+ /* drbd_adm_prepare made sure already
+ * that mdev->tconn and mdev->vnr match the request. */
+ if (adm_ctx.mdev) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+ retcode = ERR_MINOR_EXISTS;
+ /* else: still NO_ERROR */
+ goto out;
}
- return tl;
+
+ retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
+static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct get_state_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
-
- /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
- tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
-
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
-
- reply->packet_type = P_get_state;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
-
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ if (mdev->state.disk == D_DISKLESS &&
+ /* no need to be mdev->state.conn == C_STANDALONE &&
+ * we may want to delete a minor from a live replication group.
+ */
+ mdev->state.role == R_SECONDARY) {
+ _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+ CS_VERBOSE + CS_WAIT_COMPLETE);
+ idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ idr_remove(&minors, mdev_to_minor(mdev));
+ del_gendisk(mdev->vdisk);
+ synchronize_rcu();
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ return NO_ERROR;
+ } else
+ return ERR_MINOR_CONFIGURED;
}
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct call_helper_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
-
- /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
- tl = tl_add_str(tl, T_helper, helper_name);
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
+ enum drbd_ret_code retcode;
- reply->packet_type = P_call_helper;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ retcode = adm_delete_minor(adm_ctx.mdev);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e)
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
- struct cn_msg *cn_reply;
- struct drbd_nl_cfg_reply *reply;
- unsigned short *tl;
- struct page *page;
- unsigned len;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+ struct drbd_conf *mdev;
+ unsigned i;
- if (!e)
- return;
- if (!reason || !reason[0])
- return;
+ retcode = drbd_adm_prepare(skb, info, 0);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* apparently we have to memcpy twice, first to prepare the data for the
- * struct cn_msg, then within cn_netlink_send from the cn_msg to the
- * netlink skb. */
- /* receiver thread context, which is not in the writeout path (of this node),
- * but may be in the writeout path of the _other_ node.
- * GFP_NOIO to avoid potential "distributed deadlock". */
- cn_reply = kzalloc(
- sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct dump_ee_tag_len_struct)+
- sizeof(short int),
- GFP_NOIO);
-
- if (!cn_reply) {
- dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
- (unsigned long long)e->sector, e->size);
- return;
+ if (!adm_ctx.tconn) {
+ retcode = ERR_RES_NOT_KNOWN;
+ goto out;
}
- reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
- tl = reply->tag_list;
-
- tl = tl_add_str(tl, T_dump_ee_reason, reason);
- tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
- tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
- tl = tl_add_int(tl, T_ee_sector, &e->sector);
- tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
-
- /* dump the first 32k */
- len = min_t(unsigned, e->size, 32 << 10);
- put_unaligned(T_ee_data, tl++);
- put_unaligned(len, tl++);
-
- page = e->pages;
- page_chain_for_each(page) {
- void *d = kmap_atomic(page);
- unsigned l = min_t(unsigned, len, PAGE_SIZE);
- memcpy(tl, d, l);
- kunmap_atomic(d);
- tl = (unsigned short*)((char*)tl + l);
- len -= l;
- if (len == 0)
- break;
+ /* demote */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to demote");
+ goto out;
+ }
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
- cn_reply->ack = 0; // not used here.
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char*)tl - (char*)reply->tag_list);
- cn_reply->flags = 0;
-
- reply->packet_type = P_dump_ee;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
- kfree(cn_reply);
-}
-
-void drbd_bcast_sync_progress(struct drbd_conf *mdev)
-{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct sync_progress_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
- unsigned long rs_left;
- unsigned int res;
+ retcode = conn_try_disconnect(adm_ctx.tconn, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to disconnect");
+ goto out;
+ }
- /* no local ref, no bitmap, no syncer progress, no broadcast. */
- if (!get_ldev(mdev))
- return;
- drbd_get_syncer_progress(mdev, &rs_left, &res);
- put_ldev(mdev);
+ /* detach */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = adm_detach(mdev, 0);
+ if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
+ drbd_msg_put_info("failed to detach");
+ goto out;
+ }
+ }
- tl = tl_add_int(tl, T_sync_progress, &res);
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ /* If we reach this, all volumes (of this tconn) are Secondary,
+ * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
+ * actually stopped, state handling only does drbd_thread_stop_nowait(). */
+ drbd_thread_stop(&adm_ctx.tconn->worker);
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
+ /* Now, nothing can fail anymore */
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
+ /* delete volumes */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = adm_delete_minor(mdev);
+ if (retcode != NO_ERROR) {
+ /* "can not happen" */
+ drbd_msg_put_info("failed to delete volume");
+ goto out;
+ }
+ }
- reply->packet_type = P_sync_progress;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
+ /* delete connection */
+ if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+ list_del_rcu(&adm_ctx.tconn->all_tconn);
+ synchronize_rcu();
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ retcode = NO_ERROR;
+ } else {
+ /* "can not happen" */
+ retcode = ERR_RES_IN_USE;
+ drbd_msg_put_info("failed to delete connection");
+ }
+ goto out;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-int __init drbd_nl_init(void)
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
- static struct cb_id cn_id_drbd;
- int err, try=10;
+ enum drbd_ret_code retcode;
- cn_id_drbd.val = CN_VAL_DRBD;
- do {
- cn_id_drbd.idx = cn_idx;
- err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
- if (!err)
- break;
- cn_idx = (cn_idx + CN_IDX_STEP);
- } while (try--);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- if (err) {
- printk(KERN_ERR "drbd: cn_drbd failed to register\n");
- return err;
+ if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+ list_del_rcu(&adm_ctx.tconn->all_tconn);
+ synchronize_rcu();
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+
+ retcode = NO_ERROR;
+ } else {
+ retcode = ERR_RES_IN_USE;
}
+ if (retcode == NO_ERROR)
+ drbd_thread_stop(&adm_ctx.tconn->worker);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-void drbd_nl_cleanup(void)
+void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
{
- static struct cb_id cn_id_drbd;
-
- cn_id_drbd.idx = cn_idx;
- cn_id_drbd.val = CN_VAL_DRBD;
-
- cn_del_callback(&cn_id_drbd);
-}
+ static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+ struct sk_buff *msg;
+ struct drbd_genlmsghdr *d_out;
+ unsigned seq;
+ int err = -ENOMEM;
+
+ if (sib->sib_reason == SIB_SYNC_PROGRESS) {
+ if (time_after(jiffies, mdev->rs_last_bcast + HZ))
+ mdev->rs_last_bcast = jiffies;
+ else
+ return;
+ }
-void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
-{
- char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- int rr;
+ seq = atomic_inc_return(&drbd_genl_seq);
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+ if (!msg)
+ goto failed;
- memset(buffer, 0, sizeof(buffer));
- cn_reply->id = req->id;
+ err = -EMSGSIZE;
+ d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
+ if (!d_out) /* cannot happen, but anyways. */
+ goto nla_put_failure;
+ d_out->minor = mdev_to_minor(mdev);
+ d_out->ret_code = NO_ERROR;
- cn_reply->seq = req->seq;
- cn_reply->ack = req->ack + 1;
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
- cn_reply->flags = 0;
+ if (nla_put_status_info(msg, mdev, sib))
+ goto nla_put_failure;
+ genlmsg_end(msg, d_out);
+ err = drbd_genl_multicast_events(msg, 0);
+ /* msg has been consumed or freed in netlink_broadcast() */
+ if (err && err != -ESRCH)
+ goto failed;
- reply->packet_type = P_return_code_only;
- reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
- reply->ret_code = ret_code;
+ return;
- rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
- if (rr && rr != -ESRCH)
- printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
+nla_put_failure:
+ nlmsg_free(msg);
+failed:
+ dev_err(DEV, "Error %d while broadcasting event. "
+ "Event seq:%u sib_reason:%u\n",
+ err, seq, sib->sib_reason);
}
-
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
new file mode 100644
index 00000000000..fa672b6df8d
--- /dev/null
+++ b/drivers/block/drbd/drbd_nla.c
@@ -0,0 +1,55 @@
+#include "drbd_wrappers.h"
+#include <linux/kernel.h>
+#include <net/netlink.h>
+#include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
+
+static int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla)
+{
+ struct nlattr *head = nla_data(nla);
+ int len = nla_len(nla);
+ int rem;
+
+ /*
+ * validate_nla (called from nla_parse_nested) ignores attributes
+ * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag.
+ * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY
+ * flag set also, check and remove that flag before calling
+ * nla_parse_nested.
+ */
+
+ nla_for_each_attr(nla, head, len, rem) {
+ if (nla->nla_type & DRBD_GENLA_F_MANDATORY) {
+ nla->nla_type &= ~DRBD_GENLA_F_MANDATORY;
+ if (nla_type(nla) > maxtype)
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
+int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ const struct nla_policy *policy)
+{
+ int err;
+
+ err = drbd_nla_check_mandatory(maxtype, nla);
+ if (!err)
+ err = nla_parse_nested(tb, maxtype, nla, policy);
+
+ return err;
+}
+
+struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype)
+{
+ int err;
+ /*
+ * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and
+ * we don't know about that attribute, reject all the nested
+ * attributes.
+ */
+ err = drbd_nla_check_mandatory(maxtype, nla);
+ if (err)
+ return ERR_PTR(err);
+ return nla_find_nested(nla, attrtype);
+}
diff --git a/drivers/block/drbd/drbd_nla.h b/drivers/block/drbd/drbd_nla.h
new file mode 100644
index 00000000000..679c2d5b453
--- /dev/null
+++ b/drivers/block/drbd/drbd_nla.h
@@ -0,0 +1,8 @@
+#ifndef __DRBD_NLA_H
+#define __DRBD_NLA_H
+
+extern int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ const struct nla_policy *policy);
+extern struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype);
+
+#endif /* __DRBD_NLA_H */
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 5496104f90b..56672a61eb9 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -167,18 +167,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(mdev);
unsigned long bit_pos;
+ unsigned long long stop_sector = 0;
if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T)
+ mdev->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - mdev->ov_left;
- else
+ if (verify_can_do_stop_sector(mdev))
+ stop_sector = mdev->ov_stop_sector;
+ } else
bit_pos = mdev->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
- "\t%3d%% sector pos: %llu/%llu\n",
+ "\t%3d%% sector pos: %llu/%llu",
(int)(bit_pos / (bm_bits/100+1)),
(unsigned long long)bit_pos * BM_SECT_PER_BIT,
(unsigned long long)bm_bits * BM_SECT_PER_BIT);
+ if (stop_sector != 0 && stop_sector != ULLONG_MAX)
+ seq_printf(seq, " stop sector: %llu", stop_sector);
+ seq_printf(seq, "\n");
}
}
@@ -194,9 +200,11 @@ static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
static int drbd_seq_show(struct seq_file *seq, void *v)
{
- int i, hole = 0;
+ int i, prev_i = -1;
const char *sn;
struct drbd_conf *mdev;
+ struct net_conf *nc;
+ char wp;
static char write_ordering_chars[] = {
[WO_none] = 'n',
@@ -227,16 +235,11 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
oos .. known out-of-sync kB
*/
- for (i = 0; i < minor_count; i++) {
- mdev = minor_to_mdev(i);
- if (!mdev) {
- hole = 1;
- continue;
- }
- if (hole) {
- hole = 0;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, mdev, i) {
+ if (prev_i != i - 1)
seq_printf(seq, "\n");
- }
+ prev_i = i;
sn = drbd_conn_str(mdev->state.conn);
@@ -248,6 +251,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
/* reset mdev->congestion_reason */
bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
@@ -257,9 +262,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
drbd_role_str(mdev->state.peer),
drbd_disk_str(mdev->state.disk),
drbd_disk_str(mdev->state.pdsk),
- (mdev->net_conf == NULL ? ' ' :
- (mdev->net_conf->wire_protocol - DRBD_PROT_A+'A')),
- is_susp(mdev->state) ? 's' : 'r',
+ wp,
+ drbd_suspended(mdev) ? 's' : 'r',
mdev->state.aftr_isp ? 'a' : '-',
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
@@ -276,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
atomic_read(&mdev->rs_pending_cnt),
atomic_read(&mdev->unacked_cnt),
atomic_read(&mdev->ap_bio_cnt),
- mdev->epochs,
- write_ordering_chars[mdev->write_ordering]
+ mdev->tconn->epochs,
+ write_ordering_chars[mdev->tconn->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
@@ -302,6 +306,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
}
}
}
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c74ca2df743..a9eccfc6079 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -48,17 +48,25 @@
#include "drbd_vli.h"
+struct packet_info {
+ enum drbd_packet cmd;
+ unsigned int size;
+ unsigned int vnr;
+ void *data;
+};
+
enum finish_epoch {
FE_STILL_LIVE,
FE_DESTROYED,
FE_RECYCLED,
};
-static int drbd_do_handshake(struct drbd_conf *mdev);
-static int drbd_do_auth(struct drbd_conf *mdev);
+static int drbd_do_features(struct drbd_tconn *tconn);
+static int drbd_do_auth(struct drbd_tconn *tconn);
+static int drbd_disconnected(struct drbd_conf *mdev);
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
-static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
+static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
@@ -142,11 +150,12 @@ static void page_chain_add(struct page **head,
*head = chain_first;
}
-static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
+static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
+ unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
- int i = 0;
+ unsigned int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
@@ -175,7 +184,7 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return page;
/* Not enough pages immediately available this time.
- * No need to jump around here, drbd_pp_alloc will retry this
+ * No need to jump around here, drbd_alloc_pages will retry this
* function "soon". */
if (page) {
tmp = page_chain_tail(page, NULL);
@@ -187,9 +196,10 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return NULL;
}
-static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
+static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
+ struct list_head *to_be_freed)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct list_head *le, *tle;
/* The EEs are always appended to the end of the list. Since
@@ -198,8 +208,8 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
stop to examine the list... */
list_for_each_safe(le, tle, &mdev->net_ee) {
- e = list_entry(le, struct drbd_epoch_entry, w.list);
- if (drbd_ee_has_active_page(e))
+ peer_req = list_entry(le, struct drbd_peer_request, w.list);
+ if (drbd_peer_req_has_active_page(peer_req))
break;
list_move(le, to_be_freed);
}
@@ -208,18 +218,18 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
{
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
}
/**
- * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
+ * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
@@ -230,23 +240,31 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
*
* Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
+struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
+ bool retry)
{
struct page *page = NULL;
+ struct net_conf *nc;
DEFINE_WAIT(wait);
+ int mxb;
/* Yes, we may run up to @number over max_buffers. If we
* follow it strictly, the admin will get it wrong anyways. */
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ mxb = nc ? nc->max_buffers : 1000000;
+ rcu_read_unlock();
+
+ if (atomic_read(&mdev->pp_in_use) < mxb)
+ page = __drbd_alloc_pages(mdev, number);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_kick_lo_and_reclaim_net(mdev);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ if (atomic_read(&mdev->pp_in_use) < mxb) {
+ page = __drbd_alloc_pages(mdev, number);
if (page)
break;
}
@@ -255,7 +273,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
break;
if (signal_pending(current)) {
- dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
+ dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
break;
}
@@ -268,11 +286,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
return page;
}
-/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
+ * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
{
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
int i;
@@ -280,7 +298,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -302,127 +320,130 @@ You need to hold the req_lock:
_drbd_wait_ee_list_empty()
You must not have the req_lock:
- drbd_free_ee()
- drbd_alloc_ee()
- drbd_init_ee()
- drbd_release_ee()
+ drbd_free_peer_req()
+ drbd_alloc_peer_req()
+ drbd_free_peer_reqs()
drbd_ee_fix_bhs()
- drbd_process_done_ee()
+ drbd_finish_peer_reqs()
drbd_clear_done_ee()
drbd_wait_ee_list_empty()
*/
-struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local)
+struct drbd_peer_request *
+drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
+ unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page = NULL;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
return NULL;
- e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
- if (!e) {
+ peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
+ if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
+ dev_err(DEV, "%s: allocation failed\n", __func__);
return NULL;
}
if (data_size) {
- page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
}
- INIT_HLIST_NODE(&e->collision);
- e->epoch = NULL;
- e->mdev = mdev;
- e->pages = page;
- atomic_set(&e->pending_bios, 0);
- e->size = data_size;
- e->flags = 0;
- e->sector = sector;
- e->block_id = id;
+ drbd_clear_interval(&peer_req->i);
+ peer_req->i.size = data_size;
+ peer_req->i.sector = sector;
+ peer_req->i.local = false;
+ peer_req->i.waiting = false;
+
+ peer_req->epoch = NULL;
+ peer_req->w.mdev = mdev;
+ peer_req->pages = page;
+ atomic_set(&peer_req->pending_bios, 0);
+ peer_req->flags = 0;
+ /*
+ * The block_id is opaque to the receiver. It is not endianness
+ * converted, and sent back to the sender unchanged.
+ */
+ peer_req->block_id = id;
- return e;
+ return peer_req;
fail:
- mempool_free(e, drbd_ee_mempool);
+ mempool_free(peer_req, drbd_ee_mempool);
return NULL;
}
-void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
+void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
+ int is_net)
{
- if (e->flags & EE_HAS_DIGEST)
- kfree(e->digest);
- drbd_pp_free(mdev, e->pages, is_net);
- D_ASSERT(atomic_read(&e->pending_bios) == 0);
- D_ASSERT(hlist_unhashed(&e->collision));
- mempool_free(e, drbd_ee_mempool);
+ if (peer_req->flags & EE_HAS_DIGEST)
+ kfree(peer_req->digest);
+ drbd_free_pages(mdev, peer_req->pages, is_net);
+ D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
+ mempool_free(peer_req, drbd_ee_mempool);
}
-int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
+int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
{
LIST_HEAD(work_list);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
int count = 0;
int is_net = list == &mdev->net_ee;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &work_list, w.list) {
- drbd_free_some_ee(mdev, e, is_net);
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ __drbd_free_peer_req(mdev, peer_req, is_net);
count++;
}
return count;
}
-
/*
- * This function is called from _asender only_
- * but see also comments in _req_mod(,barrier_acked)
- * and receive_Barrier.
- *
- * Move entries from net_ee to done_ee, if ready.
- * Grab done_ee, call all callbacks, free the entries.
- * The callbacks typically send out ACKs.
+ * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
*/
-static int drbd_process_done_ee(struct drbd_conf *mdev)
+static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
- int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
+ struct drbd_peer_request *peer_req, *t;
+ int err = 0;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
/* possible callbacks here:
- * e_end_block, and e_end_resync_block, e_send_discard_ack.
+ * e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
*/
- list_for_each_entry_safe(e, t, &work_list, w.list) {
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ int err2;
+
/* list_del not necessary, next/prev members not touched */
- ok = e->w.cb(mdev, &e->w, !ok) && ok;
- drbd_free_ee(mdev, e);
+ err2 = peer_req->w.cb(&peer_req->w, !!err);
+ if (!err)
+ err = err2;
+ drbd_free_peer_req(mdev, peer_req);
}
wake_up(&mdev->ee_wait);
- return ok;
+ return err;
}
-void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
DEFINE_WAIT(wait);
@@ -430,55 +451,22 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
io_schedule();
finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
}
-void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->req_lock);
-}
-
-/* see also kernel_accept; which is only present since 2.6.18.
- * also we want to log which part of it failed, exactly */
-static int drbd_accept(struct drbd_conf *mdev, const char **what,
- struct socket *sock, struct socket **newsock)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- *what = "listen";
- err = sock->ops->listen(sock, 5);
- if (err < 0)
- goto out;
-
- *what = "sock_create_lite";
- err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
- newsock);
- if (err < 0)
- goto out;
-
- *what = "accept";
- err = sock->ops->accept(sock, *newsock, 0);
- if (err < 0) {
- sock_release(*newsock);
- *newsock = NULL;
- goto out;
- }
- (*newsock)->ops = sock->ops;
- __module_get((*newsock)->ops->owner);
-
-out:
- return err;
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
-static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
- void *buf, size_t size, int flags)
+static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
{
mm_segment_t oldfs;
struct kvec iov = {
@@ -500,59 +488,62 @@ static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
return rv;
}
-static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
+static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
{
- mm_segment_t oldfs;
- struct kvec iov = {
- .iov_base = buf,
- .iov_len = size,
- };
- struct msghdr msg = {
- .msg_iovlen = 1,
- .msg_iov = (struct iovec *)&iov,
- .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
- };
int rv;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
+ rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
- for (;;) {
- rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
- if (rv == size)
- break;
+ if (rv < 0) {
+ if (rv == -ECONNRESET)
+ conn_info(tconn, "sock was reset by peer\n");
+ else if (rv != -ERESTARTSYS)
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
+ } else if (rv == 0) {
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
- /* Note:
- * ECONNRESET other side closed the connection
- * ERESTARTSYS (on sock) we got a signal
- */
+ t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
- if (rv < 0) {
- if (rv == -ECONNRESET)
- dev_info(DEV, "sock was reset by peer\n");
- else if (rv != -ERESTARTSYS)
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
- break;
- } else if (rv == 0) {
- dev_info(DEV, "sock was shut down by peer\n");
- break;
- } else {
- /* signal came in, or peer/link went down,
- * after we read a partial message
- */
- /* D_ASSERT(signal_pending(current)); */
- break;
+ if (t)
+ goto out;
}
- };
-
- set_fs(oldfs);
+ conn_info(tconn, "sock was shut down by peer\n");
+ }
if (rv != size)
- drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+ conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+out:
return rv;
}
+static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv(tconn, buf, size);
+ if (err != size) {
+ if (err >= 0)
+ err = -EIO;
+ } else
+ err = 0;
+ return err;
+}
+
+static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv_all(tconn, buf, size);
+ if (err && !signal_pending(current))
+ conn_warn(tconn, "short read (expected size %d)\n", (int)size);
+ return err;
+}
+
/* quoting tcp(7):
* On individual connections, the socket buffer size must be set prior to the
* listen(2) or connect(2) calls in order to have it take effect.
@@ -572,29 +563,50 @@ static void drbd_setbufsize(struct socket *sock, unsigned int snd,
}
}
-static struct socket *drbd_try_connect(struct drbd_conf *mdev)
+static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
{
const char *what;
struct socket *sock;
struct sockaddr_in6 src_in6;
- int err;
+ struct sockaddr_in6 peer_in6;
+ struct net_conf *nc;
+ int err, peer_addr_len, my_addr_len;
+ int sndbuf_size, rcvbuf_size, connect_int;
int disconnect_on_error = 1;
- if (!get_net_conf(mdev))
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
return NULL;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
+ memcpy(&src_in6, &tconn->my_addr, my_addr_len);
+
+ if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
+ src_in6.sin6_port = 0;
+ else
+ ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
+
+ peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
+ memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &sock);
+ err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (err < 0) {
sock = NULL;
goto out;
}
sock->sk->sk_rcvtimeo =
- sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
- drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ sock->sk->sk_sndtimeo = connect_int * HZ;
+ drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
/* explicitly bind to the configured IP as source IP
* for the outgoing connections.
@@ -603,17 +615,8 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
* Make sure to use 0 as port number, so linux selects
* a free one dynamically.
*/
- memcpy(&src_in6, mdev->net_conf->my_addr,
- min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
- if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
- src_in6.sin6_port = 0;
- else
- ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
-
what = "bind before connect";
- err = sock->ops->bind(sock,
- (struct sockaddr *) &src_in6,
- mdev->net_conf->my_addr_len);
+ err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
if (err < 0)
goto out;
@@ -621,9 +624,7 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
- err = sock->ops->connect(sock,
- (struct sockaddr *)mdev->net_conf->peer_addr,
- mdev->net_conf->peer_addr_len, 0);
+ err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
@@ -641,91 +642,174 @@ out:
disconnect_on_error = 0;
break;
default:
- dev_err(DEV, "%s failed, err = %d\n", what, err);
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- put_net_conf(mdev);
+
return sock;
}
-static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
+struct accept_wait_data {
+ struct drbd_tconn *tconn;
+ struct socket *s_listen;
+ struct completion door_bell;
+ void (*original_sk_state_change)(struct sock *sk);
+
+};
+
+static void drbd_incoming_connection(struct sock *sk)
{
- int timeo, err;
- struct socket *s_estab = NULL, *s_listen;
+ struct accept_wait_data *ad = sk->sk_user_data;
+ void (*state_change)(struct sock *sk);
+
+ state_change = ad->original_sk_state_change;
+ if (sk->sk_state == TCP_ESTABLISHED)
+ complete(&ad->door_bell);
+ state_change(sk);
+}
+
+static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+{
+ int err, sndbuf_size, rcvbuf_size, my_addr_len;
+ struct sockaddr_in6 my_addr;
+ struct socket *s_listen;
+ struct net_conf *nc;
const char *what;
- if (!get_net_conf(mdev))
- return NULL;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return -EIO;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
+ memcpy(&my_addr, &tconn->my_addr, my_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &s_listen);
+ err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &s_listen);
if (err) {
s_listen = NULL;
goto out;
}
- timeo = mdev->net_conf->try_connect_int * HZ;
- timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
-
- s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- s_listen->sk->sk_rcvtimeo = timeo;
- s_listen->sk->sk_sndtimeo = timeo;
- drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
- err = s_listen->ops->bind(s_listen,
- (struct sockaddr *) mdev->net_conf->my_addr,
- mdev->net_conf->my_addr_len);
+ err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
if (err < 0)
goto out;
- err = drbd_accept(mdev, &what, s_listen, &s_estab);
+ ad->s_listen = s_listen;
+ write_lock_bh(&s_listen->sk->sk_callback_lock);
+ ad->original_sk_state_change = s_listen->sk->sk_state_change;
+ s_listen->sk->sk_state_change = drbd_incoming_connection;
+ s_listen->sk->sk_user_data = ad;
+ write_unlock_bh(&s_listen->sk->sk_callback_lock);
+
+ what = "listen";
+ err = s_listen->ops->listen(s_listen, 5);
+ if (err < 0)
+ goto out;
+ return 0;
out:
if (s_listen)
sock_release(s_listen);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- dev_err(DEV, "%s failed, err = %d\n", what, err);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
- put_net_conf(mdev);
- return s_estab;
+ return -EIO;
}
-static int drbd_send_fp(struct drbd_conf *mdev,
- struct socket *sock, enum drbd_packets cmd)
+static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
{
- struct p_header80 *h = &mdev->data.sbuf.header.h80;
-
- return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = ad->original_sk_state_change;
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
}
-static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
+static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
{
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
- int rr;
+ int timeo, connect_int, err = 0;
+ struct socket *s_estab = NULL;
+ struct net_conf *nc;
+
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ timeo = connect_int * HZ;
+ timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
+
+ err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
+ if (err <= 0)
+ return NULL;
+
+ err = kernel_accept(ad->s_listen, &s_estab, 0);
+ if (err < 0) {
+ if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
+ conn_err(tconn, "accept failed, err = %d\n", err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ }
+ }
+
+ if (s_estab)
+ unregister_state_change(s_estab->sk, ad);
+
+ return s_estab;
+}
- rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
+static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
- if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
- return be16_to_cpu(h->command);
+static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd)
+{
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
+}
- return 0xffff;
+static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
+{
+ unsigned int header_size = drbd_header_size(tconn);
+ struct packet_info pi;
+ int err;
+
+ err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
+ if (err != header_size) {
+ if (err >= 0)
+ err = -EIO;
+ return err;
+ }
+ err = decode_header(tconn, tconn->data.rbuf, &pi);
+ if (err)
+ return err;
+ return pi.cmd;
}
/**
* drbd_socket_okay() - Free the socket if its connection is not okay
- * @mdev: DRBD device.
* @sock: pointer to the pointer to the socket.
*/
-static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
+static int drbd_socket_okay(struct socket **sock)
{
int rr;
char tb[4];
@@ -733,7 +817,7 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
if (!*sock)
return false;
- rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
+ rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
if (rr > 0 || rr == -EAGAIN) {
return true;
@@ -743,6 +827,31 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
return false;
}
}
+/* Gets called if a connection is established, or if a new minor gets created
+ in a connection */
+int drbd_connected(struct drbd_conf *mdev)
+{
+ int err;
+
+ atomic_set(&mdev->packet_seq, 0);
+ mdev->peer_seq = 0;
+
+ mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
+ &mdev->tconn->cstate_mutex :
+ &mdev->own_state_mutex;
+
+ err = drbd_send_sync_param(mdev);
+ if (!err)
+ err = drbd_send_sizes(mdev, 0, 0);
+ if (!err)
+ err = drbd_send_uuids(mdev);
+ if (!err)
+ err = drbd_send_current_state(mdev);
+ clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+ clear_bit(RESIZE_PENDING, &mdev->flags);
+ mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ return err;
+}
/*
* return values:
@@ -752,232 +861,315 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
* no point in trying again, please go standalone.
* -2 We do not have a network config...
*/
-static int drbd_connect(struct drbd_conf *mdev)
+static int conn_connect(struct drbd_tconn *tconn)
{
- struct socket *s, *sock, *msock;
- int try, h, ok;
+ struct drbd_socket sock, msock;
+ struct drbd_conf *mdev;
+ struct net_conf *nc;
+ int vnr, timeout, h, ok;
+ bool discard_my_data;
enum drbd_state_rv rv;
+ struct accept_wait_data ad = {
+ .tconn = tconn,
+ .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
+ };
- D_ASSERT(!mdev->data.socket);
-
- if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
+ clear_bit(DISCONNECT_SENT, &tconn->flags);
+ if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
return -2;
- clear_bit(DISCARD_CONCURRENT, &mdev->flags);
+ mutex_init(&sock.mutex);
+ sock.sbuf = tconn->data.sbuf;
+ sock.rbuf = tconn->data.rbuf;
+ sock.socket = NULL;
+ mutex_init(&msock.mutex);
+ msock.sbuf = tconn->meta.sbuf;
+ msock.rbuf = tconn->meta.rbuf;
+ msock.socket = NULL;
+
+ /* Assume that the peer only understands protocol 80 until we know better. */
+ tconn->agreed_pro_version = 80;
- sock = NULL;
- msock = NULL;
+ if (prepare_listen_socket(tconn, &ad))
+ return 0;
do {
- for (try = 0;;) {
- /* 3 tries, this should take less than a second! */
- s = drbd_try_connect(mdev);
- if (s || ++try >= 3)
- break;
- /* give the other side time to call bind() & listen() */
- schedule_timeout_interruptible(HZ / 10);
- }
+ struct socket *s;
+ s = drbd_try_connect(tconn);
if (s) {
- if (!sock) {
- drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
- sock = s;
- s = NULL;
- } else if (!msock) {
- drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
- msock = s;
- s = NULL;
+ if (!sock.socket) {
+ sock.socket = s;
+ send_first_packet(tconn, &sock, P_INITIAL_DATA);
+ } else if (!msock.socket) {
+ clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ msock.socket = s;
+ send_first_packet(tconn, &msock, P_INITIAL_META);
} else {
- dev_err(DEV, "Logic error in drbd_connect()\n");
+ conn_err(tconn, "Logic error in conn_connect()\n");
goto out_release_sockets;
}
}
- if (sock && msock) {
- schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
+ if (sock.socket && msock.socket) {
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ timeout = nc->ping_timeo * HZ / 10;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeout);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
if (ok)
break;
}
retry:
- s = drbd_wait_for_connect(mdev);
+ s = drbd_wait_for_connect(tconn, &ad);
if (s) {
- try = drbd_recv_fp(mdev, s);
- drbd_socket_okay(mdev, &sock);
- drbd_socket_okay(mdev, &msock);
- switch (try) {
- case P_HAND_SHAKE_S:
- if (sock) {
- dev_warn(DEV, "initial packet S crossed\n");
- sock_release(sock);
+ int fp = receive_first_packet(tconn, s);
+ drbd_socket_okay(&sock.socket);
+ drbd_socket_okay(&msock.socket);
+ switch (fp) {
+ case P_INITIAL_DATA:
+ if (sock.socket) {
+ conn_warn(tconn, "initial packet S crossed\n");
+ sock_release(sock.socket);
+ sock.socket = s;
+ goto randomize;
}
- sock = s;
+ sock.socket = s;
break;
- case P_HAND_SHAKE_M:
- if (msock) {
- dev_warn(DEV, "initial packet M crossed\n");
- sock_release(msock);
+ case P_INITIAL_META:
+ set_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ if (msock.socket) {
+ conn_warn(tconn, "initial packet M crossed\n");
+ sock_release(msock.socket);
+ msock.socket = s;
+ goto randomize;
}
- msock = s;
- set_bit(DISCARD_CONCURRENT, &mdev->flags);
+ msock.socket = s;
break;
default:
- dev_warn(DEV, "Error receiving initial packet\n");
+ conn_warn(tconn, "Error receiving initial packet\n");
sock_release(s);
+randomize:
if (random32() & 1)
goto retry;
}
}
- if (mdev->state.conn <= C_DISCONNECTING)
+ if (tconn->cstate <= C_DISCONNECTING)
goto out_release_sockets;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
- if (get_t_state(&mdev->receiver) == Exiting)
+ if (get_t_state(&tconn->receiver) == EXITING)
goto out_release_sockets;
}
- if (sock && msock) {
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
- if (ok)
- break;
- }
- } while (1);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
+ } while (!ok);
+
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
- msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_allocation = GFP_NOIO;
- msock->sk->sk_allocation = GFP_NOIO;
+ sock.socket->sk->sk_allocation = GFP_NOIO;
+ msock.socket->sk->sk_allocation = GFP_NOIO;
- sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
- msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
+ sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+ msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
- * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- * first set it to the P_HAND_SHAKE timeout,
+ * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
+ * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ * first set it to the P_CONNECTION_FEATURES timeout,
* which we set to 4x the configured ping_timeout. */
- sock->sk->sk_sndtimeo =
- sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+ sock.socket->sk->sk_sndtimeo =
+ sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
+
+ msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
+ timeout = nc->timeout * HZ / 10;
+ discard_my_data = nc->discard_my_data;
+ rcu_read_unlock();
+
+ msock.socket->sk->sk_sndtimeo = timeout;
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
- drbd_tcp_nodelay(sock);
- drbd_tcp_nodelay(msock);
-
- mdev->data.socket = sock;
- mdev->meta.socket = msock;
- mdev->last_received = jiffies;
+ drbd_tcp_nodelay(sock.socket);
+ drbd_tcp_nodelay(msock.socket);
- D_ASSERT(mdev->asender.task == NULL);
+ tconn->data.socket = sock.socket;
+ tconn->meta.socket = msock.socket;
+ tconn->last_received = jiffies;
- h = drbd_do_handshake(mdev);
+ h = drbd_do_features(tconn);
if (h <= 0)
return h;
- if (mdev->cram_hmac_tfm) {
+ if (tconn->cram_hmac_tfm) {
/* drbd_request_state(mdev, NS(conn, WFAuth)); */
- switch (drbd_do_auth(mdev)) {
+ switch (drbd_do_auth(tconn)) {
case -1:
- dev_err(DEV, "Authentication of peer failed\n");
+ conn_err(tconn, "Authentication of peer failed\n");
return -1;
case 0:
- dev_err(DEV, "Authentication of peer failed, trying again.\n");
+ conn_err(tconn, "Authentication of peer failed, trying again.\n");
return 0;
}
}
- sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ tconn->data.socket->sk->sk_sndtimeo = timeout;
+ tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- atomic_set(&mdev->packet_seq, 0);
- mdev->peer_seq = 0;
-
- if (drbd_send_protocol(mdev) == -1)
+ if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
return -1;
- set_bit(STATE_SENT, &mdev->flags);
- drbd_send_sync_param(mdev, &mdev->sync_conf);
- drbd_send_sizes(mdev, 0, 0);
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- clear_bit(RESIZE_PENDING, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
- rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
- if (mdev->state.conn != C_WF_REPORT_PARAMS)
- clear_bit(STATE_SENT, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
+ set_bit(STATE_SENT, &tconn->flags);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ /* Prevent a race between resync-handshake and
+ * being promoted to Primary.
+ *
+ * Grab and release the state mutex, so we know that any current
+ * drbd_set_role() is finished, and any incoming drbd_set_role
+ * will see the STATE_SENT flag, and wait for it to be cleared.
+ */
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
+
+ rcu_read_unlock();
+
+ if (discard_my_data)
+ set_bit(DISCARD_MY_DATA, &mdev->flags);
+ else
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
+
+ drbd_connected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
- if (rv < SS_SUCCESS)
+ rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
+ if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &tconn->flags);
return 0;
+ }
- drbd_thread_start(&mdev->asender);
- mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ drbd_thread_start(&tconn->asender);
- return 1;
+ mutex_lock(&tconn->conf_update);
+ /* The discard_my_data flag is a single-shot modifier to the next
+ * connection attempt, the handshake of which is now well underway.
+ * No need for rcu style copying of the whole struct
+ * just to clear a single value. */
+ tconn->net_conf->discard_my_data = 0;
+ mutex_unlock(&tconn->conf_update);
+
+ return h;
out_release_sockets:
- if (sock)
- sock_release(sock);
- if (msock)
- sock_release(msock);
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
+ if (sock.socket)
+ sock_release(sock.socket);
+ if (msock.socket)
+ sock_release(msock.socket);
return -1;
}
-static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
+static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
{
- union p_header *h = &mdev->data.rbuf.header;
- int r;
-
- r = drbd_recv(mdev, h, sizeof(*h));
- if (unlikely(r != sizeof(*h))) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
- return false;
- }
-
- if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
- *cmd = be16_to_cpu(h->h80.command);
- *packet_size = be16_to_cpu(h->h80.length);
- } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
- *cmd = be16_to_cpu(h->h95.command);
- *packet_size = be32_to_cpu(h->h95.length);
+ unsigned int header_size = drbd_header_size(tconn);
+
+ if (header_size == sizeof(struct p_header100) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
+ struct p_header100 *h = header;
+ if (h->pad != 0) {
+ conn_err(tconn, "Header padding is not zero\n");
+ return -EINVAL;
+ }
+ pi->vnr = be16_to_cpu(h->volume);
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ } else if (header_size == sizeof(struct p_header95) &&
+ *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
+ struct p_header95 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ pi->vnr = 0;
+ } else if (header_size == sizeof(struct p_header80) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
+ struct p_header80 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be16_to_cpu(h->length);
+ pi->vnr = 0;
} else {
- dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->h80.magic),
- be16_to_cpu(h->h80.command),
- be16_to_cpu(h->h80.length));
- return false;
+ conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
+ be32_to_cpu(*(__be32 *)header),
+ tconn->agreed_pro_version);
+ return -EINVAL;
}
- mdev->last_received = jiffies;
+ pi->data = header + header_size;
+ return 0;
+}
- return true;
+static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ void *buffer = tconn->data.rbuf;
+ int err;
+
+ err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
+ if (err)
+ return err;
+
+ err = decode_header(tconn, buffer, pi);
+ tconn->last_received = jiffies;
+
+ return err;
}
-static void drbd_flush(struct drbd_conf *mdev)
+static void drbd_flush(struct drbd_tconn *tconn)
{
int rv;
+ struct drbd_conf *mdev;
+ int vnr;
- if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
- NULL);
- if (rv) {
- dev_info(DEV, "local disk flush failed with status %d\n", rv);
- /* would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0
- * if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(mdev, WO_drain_io);
+ if (tconn->write_ordering >= WO_bdev_flush) {
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev(mdev))
+ continue;
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
+ GFP_NOIO, NULL);
+ if (rv) {
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
+ /* would rather check on EOPNOTSUPP, but that is not reliable.
+ * don't try again for ANY return value != 0
+ * if (rv == -EOPNOTSUPP) */
+ drbd_bump_write_ordering(tconn, WO_drain_io);
+ }
+ put_ldev(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+
+ rcu_read_lock();
+ if (rv)
+ break;
}
- put_ldev(mdev);
+ rcu_read_unlock();
}
}
@@ -987,7 +1179,7 @@ static void drbd_flush(struct drbd_conf *mdev)
* @epoch: Epoch object.
* @ev: Epoch event.
*/
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
@@ -995,7 +1187,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
- spin_lock(&mdev->epoch_lock);
+ spin_lock(&tconn->epoch_lock);
do {
next_epoch = NULL;
@@ -1017,18 +1209,22 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
- spin_unlock(&mdev->epoch_lock);
- drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
- spin_lock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
+ drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
+ spin_lock(&tconn->epoch_lock);
}
+#if 0
+ /* FIXME: dec unacked on connection, once we have
+ * something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
- dec_unacked(mdev);
+ dec_unacked(epoch->tconn);
+#endif
- if (mdev->current_epoch != epoch) {
+ if (tconn->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
- mdev->epochs--;
+ tconn->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
@@ -1039,7 +1235,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
/* atomic_set(&epoch->active, 0); is already zero */
if (rv == FE_STILL_LIVE)
rv = FE_RECYCLED;
- wake_up(&mdev->ee_wait);
}
}
@@ -1049,40 +1244,52 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
epoch = next_epoch;
} while (1);
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
return rv;
}
/**
* drbd_bump_write_ordering() - Fall back to an other write ordering method
- * @mdev: DRBD device.
+ * @tconn: DRBD connection.
* @wo: Write ordering method to try.
*/
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
{
+ struct disk_conf *dc;
+ struct drbd_conf *mdev;
enum write_ordering_e pwo;
+ int vnr;
static char *write_ordering_str[] = {
[WO_none] = "none",
[WO_drain_io] = "drain",
[WO_bdev_flush] = "flush",
};
- pwo = mdev->write_ordering;
+ pwo = tconn->write_ordering;
wo = min(pwo, wo);
- if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
- wo = WO_drain_io;
- if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
- wo = WO_none;
- mdev->write_ordering = wo;
- if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
- dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev_if_state(mdev, D_ATTACHING))
+ continue;
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+
+ if (wo == WO_bdev_flush && !dc->disk_flushes)
+ wo = WO_drain_io;
+ if (wo == WO_drain_io && !dc->disk_drain)
+ wo = WO_none;
+ put_ldev(mdev);
+ }
+ rcu_read_unlock();
+ tconn->write_ordering = wo;
+ if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
+ conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
}
/**
- * drbd_submit_ee()
+ * drbd_submit_peer_request()
* @mdev: DRBD device.
- * @e: epoch entry
+ * @peer_req: peer request
* @rw: flag field, see bio->bi_rw
*
* May spread the pages to multiple bios,
@@ -1096,14 +1303,15 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
-int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type)
+int drbd_submit_peer_request(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req,
+ const unsigned rw, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
- struct page *page = e->pages;
- sector_t sector = e->sector;
- unsigned ds = e->size;
+ struct page *page = peer_req->pages;
+ sector_t sector = peer_req->i.sector;
+ unsigned ds = peer_req->i.size;
unsigned n_bios = 0;
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
int err = -ENOMEM;
@@ -1122,12 +1330,12 @@ next_bio:
dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
goto fail;
}
- /* > e->sector, unless this is the first bio */
+ /* > peer_req->i.sector, unless this is the first bio */
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_rw = rw;
- bio->bi_private = e;
- bio->bi_end_io = drbd_endio_sec;
+ bio->bi_private = peer_req;
+ bio->bi_end_io = drbd_peer_request_endio;
bio->bi_next = bios;
bios = bio;
@@ -1156,7 +1364,7 @@ next_bio:
D_ASSERT(page == NULL);
D_ASSERT(ds == 0);
- atomic_set(&e->pending_bios, n_bios);
+ atomic_set(&peer_req->pending_bios, n_bios);
do {
bio = bios;
bios = bios->bi_next;
@@ -1175,26 +1383,57 @@ fail:
return err;
}
-static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_interval *i = &peer_req->i;
+
+ drbd_remove_interval(&mdev->write_requests, i);
+ drbd_clear_interval(i);
+
+ /* Wake up any processes waiting for this peer request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
+void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+}
+
+static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
{
int rv;
- struct p_barrier *p = &mdev->data.rbuf.barrier;
+ struct p_barrier *p = pi->data;
struct drbd_epoch *epoch;
- inc_unacked(mdev);
-
- mdev->current_epoch->barrier_nr = p->barrier;
- rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
+ /* FIXME these are unacked on connection,
+ * not a specific (peer)device.
+ */
+ tconn->current_epoch->barrier_nr = p->barrier;
+ tconn->current_epoch->tconn = tconn;
+ rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
* R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
- switch (mdev->write_ordering) {
+ switch (tconn->write_ordering) {
case WO_none:
if (rv == FE_RECYCLED)
- return true;
+ return 0;
/* receiver context, in the writeout path of the other node.
* avoid potential distributed deadlock */
@@ -1202,81 +1441,75 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
if (epoch)
break;
else
- dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+ conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
/* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- drbd_flush(mdev);
+ conn_wait_active_ee_empty(tconn);
+ drbd_flush(tconn);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
}
- epoch = mdev->current_epoch;
- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
-
- D_ASSERT(atomic_read(&epoch->active) == 0);
- D_ASSERT(epoch->flags == 0);
-
- return true;
+ return 0;
default:
- dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
- return false;
+ conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
+ return -EIO;
}
epoch->flags = 0;
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
- spin_lock(&mdev->epoch_lock);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
- list_add(&epoch->list, &mdev->current_epoch->list);
- mdev->current_epoch = epoch;
- mdev->epochs++;
+ spin_lock(&tconn->epoch_lock);
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &tconn->current_epoch->list);
+ tconn->current_epoch = epoch;
+ tconn->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
- return true;
+ return 0;
}
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
-static struct drbd_epoch_entry *
-read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
+static struct drbd_peer_request *
+read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
+ int data_size) __must_hold(local)
{
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page;
- int dgs, ds, rr;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
+ int dgs, ds, err;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
unsigned long *data;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
-
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data digest: read %d expected %d\n",
- rr, dgs);
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ /*
+ * FIXME: Receive the incoming digest into the receive buffer
+ * here, together with its struct p_data?
+ */
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
return NULL;
- }
+ data_size -= dgs;
}
- data_size -= dgs;
-
- ERR_IF(data_size & 0x1ff) return NULL;
- ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
+ if (!expect(IS_ALIGNED(data_size, 512)))
+ return NULL;
+ if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
+ return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
@@ -1291,47 +1524,42 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
- if (!e)
+ peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
+ if (!peer_req)
return NULL;
if (!data_size)
- return e;
+ return peer_req;
ds = data_size;
- page = e->pages;
+ page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
- rr = drbd_recv(mdev, data, len);
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
dev_err(DEV, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
- if (rr != len) {
- drbd_free_ee(mdev, e);
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, len);
+ if (err) {
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
- ds -= rr;
+ ds -= len;
}
if (dgs) {
- drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
+ drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
- drbd_bcast_ee(mdev, "digest failed",
- dgs, dig_in, dig_vv, e);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
}
mdev->recv_cnt += data_size>>9;
- return e;
+ return peer_req;
}
/* drbd_drain_block() just takes a data block
@@ -1340,30 +1568,26 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
{
struct page *page;
- int rr, rv = 1;
+ int err = 0;
void *data;
if (!data_size)
- return true;
+ return 0;
- page = drbd_pp_alloc(mdev, 1, 1);
+ page = drbd_alloc_pages(mdev, 1, 1);
data = kmap(page);
while (data_size) {
- rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
- if (rr != min_t(int, data_size, PAGE_SIZE)) {
- rv = 0;
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data: read %d expected %d\n",
- rr, min_t(int, data_size, PAGE_SIZE));
+ unsigned int len = min_t(int, data_size, PAGE_SIZE);
+
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
+ if (err)
break;
- }
- data_size -= rr;
+ data_size -= len;
}
kunmap(page);
- drbd_pp_free(mdev, page, 0);
- return rv;
+ drbd_free_pages(mdev, page, 0);
+ return err;
}
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
@@ -1371,26 +1595,19 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
{
struct bio_vec *bvec;
struct bio *bio;
- int dgs, rr, i, expect;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
-
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
+ int dgs, err, i, expect;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data reply digest: read %d expected %d\n",
- rr, dgs);
- return 0;
- }
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
+ return err;
+ data_size -= dgs;
}
- data_size -= dgs;
-
/* optimistically update recv_cnt. if receiving fails below,
* we disconnect anyways, and counters will be reset. */
mdev->recv_cnt += data_size>>9;
@@ -1399,63 +1616,61 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
D_ASSERT(sector == bio->bi_sector);
bio_for_each_segment(bvec, bio, i) {
+ void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
expect = min_t(int, data_size, bvec->bv_len);
- rr = drbd_recv(mdev,
- kmap(bvec->bv_page)+bvec->bv_offset,
- expect);
+ err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
kunmap(bvec->bv_page);
- if (rr != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data reply: "
- "read %d expected %d\n",
- rr, expect);
- return 0;
- }
- data_size -= rr;
+ if (err)
+ return err;
+ data_size -= expect;
}
if (dgs) {
- drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
- return 0;
+ return -EINVAL;
}
}
D_ASSERT(data_size == 0);
- return 1;
+ return 0;
}
-/* e_end_resync_block() is called via
- * drbd_process_done_ee() by asender only */
-static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+/*
+ * e_end_resync_block() is called in asender context via
+ * drbd_finish_peer_reqs().
+ */
+static int e_end_resync_block(struct drbd_work *w, int unused)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err;
- D_ASSERT(hlist_unhashed(&e->collision));
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- drbd_set_in_sync(mdev, sector, e->size);
- ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
+ err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
- drbd_rs_failed_io(mdev, sector, e->size);
+ drbd_rs_failed_io(mdev, sector, peer_req->i.size);
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
}
dec_unacked(mdev);
- return ok;
+ return err;
}
static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
- e = read_in_block(mdev, ID_SYNCER, sector, data_size);
- if (!e)
+ peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
+ if (!peer_req)
goto fail;
dec_rs_pending(mdev);
@@ -1464,64 +1679,88 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
- e->w.cb = e_end_resync_block;
+ peer_req->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->req_lock);
- list_add(&e->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->sync_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
fail:
put_ldev(mdev);
- return false;
+ return -EIO;
}
-static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static struct drbd_request *
+find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
+ sector_t sector, bool missing_ok, const char *func)
{
struct drbd_request *req;
+
+ /* Request object according to our peer */
+ req = (struct drbd_request *)(unsigned long)id;
+ if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
+ return req;
+ if (!missing_ok) {
+ dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
+ (unsigned long)id, (unsigned long long)sector);
+ }
+ return NULL;
+}
+
+static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct drbd_request *req;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->req_lock);
- req = _ar_id_to_req(mdev, p->block_id, sector);
- spin_unlock_irq(&mdev->req_lock);
- if (unlikely(!req)) {
- dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
- return false;
- }
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (unlikely(!req))
+ return -EIO;
/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
* special casing it there for the various failure cases.
* still no race with drbd_fail_pending_reads */
- ok = recv_dless_read(mdev, req, sector, data_size);
-
- if (ok)
- req_mod(req, data_received);
+ err = recv_dless_read(mdev, req, sector, pi->size);
+ if (!err)
+ req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */
- return ok;
+ return err;
}
-static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
D_ASSERT(p->block_id == ID_SYNCER);
@@ -1529,42 +1768,63 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
if (get_ldev(mdev)) {
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
- * or in drbd_endio_write_sec. */
- ok = recv_resync_read(mdev, sector, data_size);
+ * or in drbd_peer_request_endio. */
+ err = recv_resync_read(mdev, sector, pi->size);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not write resync data to local disk.\n");
- ok = drbd_drain_block(mdev, data_size);
+ err = drbd_drain_block(mdev, pi->size);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
}
- atomic_add(data_size >> 9, &mdev->rs_sect_in);
+ atomic_add(pi->size >> 9, &mdev->rs_sect_in);
- return ok;
+ return err;
}
-/* e_end_block() is called via drbd_process_done_ee().
- * this means this function only runs in the asender thread
- */
-static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static void restart_conflicting_writes(struct drbd_conf *mdev,
+ sector_t sector, int size)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok = 1, pcmd;
+ struct drbd_interval *i;
+ struct drbd_request *req;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED))
+ continue;
+ /* as it is RQ_POSTPONED, this will cause it to
+ * be queued on the retry workqueue. */
+ __req_mod(req, CONFLICT_RESOLVED, NULL);
+ }
+}
+
+/*
+ * e_end_block() is called in asender context via drbd_finish_peer_reqs().
+ */
+static int e_end_block(struct drbd_work *w, int cancel)
+{
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err = 0, pcmd;
+
+ if (peer_req->flags & EE_SEND_WRITE_ACK) {
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
- e->flags & EE_MAY_SET_IN_SYNC) ?
+ peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P_RS_WRITE_ACK : P_WRITE_ACK;
- ok &= drbd_send_ack(mdev, pcmd, e);
+ err = drbd_send_ack(mdev, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
- drbd_set_in_sync(mdev, sector, e->size);
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
} else {
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
}
@@ -1572,52 +1832,115 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
- if (mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- } else {
- D_ASSERT(hlist_unhashed(&e->collision));
- }
+ if (peer_req->flags & EE_IN_INTERVAL_TREE) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ D_ASSERT(!drbd_interval_empty(&peer_req->i));
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ if (peer_req->flags & EE_RESTART_REQUESTS)
+ restart_conflicting_writes(mdev, sector, peer_req->i.size);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ } else
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
+ drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
- return ok;
+ return err;
}
-static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- int ok = 1;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ int err;
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
+ err = drbd_send_ack(mdev, ack, peer_req);
+ dec_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
+ return err;
+}
- dec_unacked(mdev);
+static int e_send_superseded(struct drbd_work *w, int unused)
+{
+ return e_send_ack(w, P_SUPERSEDED);
+}
+
+static int e_send_retry_write(struct drbd_work *w, int unused)
+{
+ struct drbd_tconn *tconn = w->mdev->tconn;
+
+ return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
+ P_RETRY_WRITE : P_SUPERSEDED);
+}
+
+static bool seq_greater(u32 a, u32 b)
+{
+ /*
+ * We assume 32-bit wrap-around here.
+ * For 24-bit wrap-around, we would have to shift:
+ * a <<= 8; b <<= 8;
+ */
+ return (s32)a - (s32)b > 0;
+}
+
+static u32 seq_max(u32 a, u32 b)
+{
+ return seq_greater(a, b) ? a : b;
+}
+
+static bool need_peer_seq(struct drbd_conf *mdev)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ int tp;
- return ok;
+ /*
+ * We only need to keep track of the last packet_seq number of our peer
+ * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
+ * handle_write_conflicts().
+ */
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+
+ return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
}
-static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
{
+ unsigned int newest_peer_seq;
- struct drbd_epoch_entry *rs_e;
+ if (need_peer_seq(mdev)) {
+ spin_lock(&mdev->peer_seq_lock);
+ newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ mdev->peer_seq = newest_peer_seq;
+ spin_unlock(&mdev->peer_seq_lock);
+ /* wake up only if we actually changed mdev->peer_seq */
+ if (peer_seq == newest_peer_seq)
+ wake_up(&mdev->seq_wait);
+ }
+}
+
+static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
+{
+ return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
+}
+
+/* maybe change sync_ee into interval trees as well? */
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+{
+ struct drbd_peer_request *rs_req;
bool rv = 0;
- spin_lock_irq(&mdev->req_lock);
- list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
- if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
+ if (overlaps(peer_req->i.sector, peer_req->i.size,
+ rs_req->i.sector, rs_req->i.size)) {
rv = 1;
break;
}
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
@@ -1643,35 +1966,41 @@ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_e
*
* returns 0 if we may process the packet,
* -ERESTARTSYS if we were interrupted (by disconnect signal). */
-static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
+static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
{
DEFINE_WAIT(wait);
- unsigned int p_seq;
long timeout;
- int ret = 0;
+ int ret;
+
+ if (!need_peer_seq(mdev))
+ return 0;
+
spin_lock(&mdev->peer_seq_lock);
for (;;) {
- prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
- if (seq_le(packet_seq, mdev->peer_seq+1))
+ if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
+ mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ ret = 0;
break;
+ }
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
- p_seq = mdev->peer_seq;
+ prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock(&mdev->peer_seq_lock);
- timeout = schedule_timeout(30*HZ);
+ rcu_read_lock();
+ timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
+ rcu_read_unlock();
+ timeout = schedule_timeout(timeout);
spin_lock(&mdev->peer_seq_lock);
- if (timeout == 0 && p_seq == mdev->peer_seq) {
+ if (!timeout) {
ret = -ETIMEDOUT;
- dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
+ dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
break;
}
}
- finish_wait(&mdev->seq_wait, &wait);
- if (mdev->peer_seq+1 == packet_seq)
- mdev->peer_seq++;
spin_unlock(&mdev->peer_seq_lock);
+ finish_wait(&mdev->seq_wait, &wait);
return ret;
}
@@ -1686,233 +2015,277 @@ static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
}
+static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
+ unsigned int size)
+{
+ struct drbd_interval *i;
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ struct drbd_request *req;
+ struct bio_and_error m;
+
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (!(req->rq_state & RQ_POSTPONED))
+ continue;
+ req->rq_state &= ~RQ_POSTPONED;
+ __req_mod(req, NEG_ACKED, &m);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ goto repeat;
+ }
+}
+
+static int handle_write_conflicts(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ sector_t sector = peer_req->i.sector;
+ const unsigned int size = peer_req->i.size;
+ struct drbd_interval *i;
+ bool equal;
+ int err;
+
+ /*
+ * Inserting the peer request into the write_requests tree will prevent
+ * new conflicting local requests from being added.
+ */
+ drbd_insert_interval(&mdev->write_requests, &peer_req->i);
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (i == &peer_req->i)
+ continue;
+
+ if (!i->local) {
+ /*
+ * Our peer has sent a conflicting remote request; this
+ * should not happen in a two-node setup. Wait for the
+ * earlier peer request to complete.
+ */
+ err = drbd_wait_misc(mdev, i);
+ if (err)
+ goto out;
+ goto repeat;
+ }
+
+ equal = i->sector == sector && i->size == size;
+ if (resolve_conflicts) {
+ /*
+ * If the peer request is fully contained within the
+ * overlapping request, it can be considered overwritten
+ * and thus superseded; otherwise, it will be retried
+ * once all overlapping requests have completed.
+ */
+ bool superseded = i->sector <= sector && i->sector +
+ (i->size >> 9) >= sector + (size >> 9);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u, "
+ "assuming %s came first\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size,
+ superseded ? "local" : "remote");
+
+ inc_unacked(mdev);
+ peer_req->w.cb = superseded ? e_send_superseded :
+ e_send_retry_write;
+ list_add_tail(&peer_req->w.list, &mdev->done_ee);
+ wake_asender(mdev->tconn);
+
+ err = -ENOENT;
+ goto out;
+ } else {
+ struct drbd_request *req =
+ container_of(i, struct drbd_request, i);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size);
+
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED)) {
+ /*
+ * Wait for the node with the discard flag to
+ * decide if this request has been superseded
+ * or needs to be retried.
+ * Requests that have been superseded will
+ * disappear from the write_requests tree.
+ *
+ * In addition, wait for the conflicting
+ * request to finish locally before submitting
+ * the conflicting peer request.
+ */
+ err = drbd_wait_misc(mdev, &req->i);
+ if (err) {
+ _conn_request_state(mdev->tconn,
+ NS(conn, C_TIMEOUT),
+ CS_HARD);
+ fail_postponed_requests(mdev, sector, size);
+ goto out;
+ }
+ goto repeat;
+ }
+ /*
+ * Remember to restart the conflicting requests after
+ * the new peer request has completed.
+ */
+ peer_req->flags |= EE_RESTART_REQUESTS;
+ }
+ }
+ err = 0;
+
+ out:
+ if (err)
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ return err;
+}
+
/* mirrored write */
-static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- struct drbd_epoch_entry *e;
- struct p_data *p = &mdev->data.rbuf.data;
+ struct drbd_peer_request *peer_req;
+ struct p_data *p = pi->data;
+ u32 peer_seq = be32_to_cpu(p->seq_num);
int rw = WRITE;
u32 dp_flags;
+ int err, tp;
- if (!get_ldev(mdev)) {
- spin_lock(&mdev->peer_seq_lock);
- if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
- mdev->peer_seq++;
- spin_unlock(&mdev->peer_seq_lock);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
- atomic_inc(&mdev->current_epoch->epoch_size);
- return drbd_drain_block(mdev, data_size);
+ if (!get_ldev(mdev)) {
+ int err2;
+
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+ atomic_inc(&tconn->current_epoch->epoch_size);
+ err2 = drbd_drain_block(mdev, pi->size);
+ if (!err)
+ err = err2;
+ return err;
}
- /* get_ldev(mdev) successful.
- * Corresponding put_ldev done either below (on various errors),
- * or in drbd_endio_write_sec, if we successfully submit the data at
- * the end of this function. */
+ /*
+ * Corresponding put_ldev done either below (on various errors), or in
+ * drbd_peer_request_endio, if we successfully submit the data at the
+ * end of this function.
+ */
sector = be64_to_cpu(p->sector);
- e = read_in_block(mdev, p->block_id, sector, data_size);
- if (!e) {
+ peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -EIO;
}
- e->w.cb = e_end_block;
+ peer_req->w.cb = e_end_block;
dp_flags = be32_to_cpu(p->dp_flags);
rw |= wire_flags_to_bio(mdev, dp_flags);
- if (e->pages == NULL) {
- D_ASSERT(e->size == 0);
+ if (peer_req->pages == NULL) {
+ D_ASSERT(peer_req->i.size == 0);
D_ASSERT(dp_flags & DP_FLUSH);
}
if (dp_flags & DP_MAY_SET_IN_SYNC)
- e->flags |= EE_MAY_SET_IN_SYNC;
-
- spin_lock(&mdev->epoch_lock);
- e->epoch = mdev->current_epoch;
- atomic_inc(&e->epoch->epoch_size);
- atomic_inc(&e->epoch->active);
- spin_unlock(&mdev->epoch_lock);
-
- /* I'm the receiver, I do hold a net_cnt reference. */
- if (!mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- } else {
- /* don't get the req_lock yet,
- * we may sleep in drbd_wait_peer_seq */
- const int size = e->size;
- const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
- DEFINE_WAIT(wait);
- struct drbd_request *i;
- struct hlist_node *n;
- struct hlist_head *slot;
- int first;
-
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- BUG_ON(mdev->ee_hash == NULL);
- BUG_ON(mdev->tl_hash == NULL);
-
- /* conflict detection and handling:
- * 1. wait on the sequence number,
- * in case this data packet overtook ACK packets.
- * 2. check our hash tables for conflicting requests.
- * we only need to walk the tl_hash, since an ee can not
- * have a conflict with an other ee: on the submitting
- * node, the corresponding req had already been conflicting,
- * and a conflicting req is never sent.
- *
- * Note: for two_primaries, we are protocol C,
- * so there cannot be any request that is DONE
- * but still on the transfer log.
- *
- * unconditionally add to the ee_hash.
- *
- * if no conflicting request is found:
- * submit.
- *
- * if any conflicting request is found
- * that has not yet been acked,
- * AND I have the "discard concurrent writes" flag:
- * queue (via done_ee) the P_DISCARD_ACK; OUT.
- *
- * if any conflicting request is found:
- * block the receiver, waiting on misc_wait
- * until no more conflicting requests are there,
- * or we get interrupted (disconnect).
- *
- * we do not just write after local io completion of those
- * requests, but only after req is done completely, i.e.
- * we wait for the P_DISCARD_ACK to arrive!
- *
- * then proceed normally, i.e. submit.
- */
- if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
+ peer_req->flags |= EE_MAY_SET_IN_SYNC;
+
+ spin_lock(&tconn->epoch_lock);
+ peer_req->epoch = tconn->current_epoch;
+ atomic_inc(&peer_req->epoch->epoch_size);
+ atomic_inc(&peer_req->epoch->active);
+ spin_unlock(&tconn->epoch_lock);
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+ if (tp) {
+ peer_req->flags |= EE_IN_INTERVAL_TREE;
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ if (err)
goto out_interrupted;
-
- spin_lock_irq(&mdev->req_lock);
-
- hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- first = 1;
- for (;;) {
- int have_unacked = 0;
- int have_conflict = 0;
- prepare_to_wait(&mdev->misc_wait, &wait,
- TASK_INTERRUPTIBLE);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- /* only ALERT on first iteration,
- * we may be woken up early... */
- if (first)
- dev_alert(DEV, "%s[%u] Concurrent local write detected!"
- " new: %llus +%u; pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
- if (i->rq_state & RQ_NET_PENDING)
- ++have_unacked;
- ++have_conflict;
- }
- }
-#undef OVERLAPS
- if (!have_conflict)
- break;
-
- /* Discard Ack only for the _first_ iteration */
- if (first && discard && have_unacked) {
- dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
- (unsigned long long)sector);
- inc_unacked(mdev);
- e->w.cb = e_send_discard_ack;
- list_add_tail(&e->w.list, &mdev->done_ee);
-
- spin_unlock_irq(&mdev->req_lock);
-
- /* we could probably send that P_DISCARD_ACK ourselves,
- * but I don't like the receiver using the msock */
-
+ spin_lock_irq(&mdev->tconn->req_lock);
+ err = handle_write_conflicts(mdev, peer_req);
+ if (err) {
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (err == -ENOENT) {
put_ldev(mdev);
- wake_asender(mdev);
- finish_wait(&mdev->misc_wait, &wait);
- return true;
+ return 0;
}
+ goto out_interrupted;
+ }
+ } else
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->active_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- if (signal_pending(current)) {
- hlist_del_init(&e->collision);
-
- spin_unlock_irq(&mdev->req_lock);
-
- finish_wait(&mdev->misc_wait, &wait);
- goto out_interrupted;
- }
+ if (mdev->state.conn == C_SYNC_TARGET)
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
- spin_unlock_irq(&mdev->req_lock);
- if (first) {
- first = 0;
- dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
- "sec=%llus\n", (unsigned long long)sector);
- } else if (discard) {
- /* we had none on the first iteration.
- * there must be none now. */
- D_ASSERT(have_unacked == 0);
- }
- schedule();
- spin_lock_irq(&mdev->req_lock);
+ if (mdev->tconn->agreed_pro_version < 100) {
+ rcu_read_lock();
+ switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
+ case DRBD_PROT_C:
+ dp_flags |= DP_SEND_WRITE_ACK;
+ break;
+ case DRBD_PROT_B:
+ dp_flags |= DP_SEND_RECEIVE_ACK;
+ break;
}
- finish_wait(&mdev->misc_wait, &wait);
+ rcu_read_unlock();
}
- list_add(&e->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->state.conn == C_SYNC_TARGET)
- wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
-
- switch (mdev->net_conf->wire_protocol) {
- case DRBD_PROT_C:
+ if (dp_flags & DP_SEND_WRITE_ACK) {
+ peer_req->flags |= EE_SEND_WRITE_ACK;
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_block()
* respective _drbd_clear_done_ee */
- break;
- case DRBD_PROT_B:
+ }
+
+ if (dp_flags & DP_SEND_RECEIVE_ACK) {
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
- drbd_send_ack(mdev, P_RECV_ACK, e);
- break;
- case DRBD_PROT_A:
- /* nothing to do */
- break;
+ drbd_send_ack(mdev, P_RECV_ACK, peer_req);
}
if (mdev->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
- drbd_set_out_of_sync(mdev, e->sector, e->size);
- e->flags |= EE_CALL_AL_COMPLETE_IO;
- e->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, e->sector);
+ drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
+ peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
+ drbd_al_begin_io(mdev, &peer_req->i);
}
- if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
- return true;
+ err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
+ if (!err)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- if (e->flags & EE_CALL_AL_COMPLETE_IO)
- drbd_al_complete_io(mdev, e->sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
+ drbd_al_complete_io(mdev, &peer_req->i);
out_interrupted:
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
+ drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return err;
}
/* We may throttle resync, if the lower device seems to be busy,
@@ -1933,9 +2306,14 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
struct lc_element *tmp;
int curr_events;
int throttle = 0;
+ unsigned int c_min_rate;
+
+ rcu_read_lock();
+ c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
+ rcu_read_unlock();
/* feature disabled? */
- if (mdev->sync_conf.c_min_rate == 0)
+ if (c_min_rate == 0)
return 0;
spin_lock_irq(&mdev->al_lock);
@@ -1975,40 +2353,46 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
db = mdev->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
- if (dbdt > mdev->sync_conf.c_min_rate)
+ if (dbdt > c_min_rate)
throttle = 1;
}
return throttle;
}
-static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
+static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ sector_t capacity;
+ struct drbd_peer_request *peer_req;
struct digest_info *di = NULL;
int size, verb;
unsigned int fault_type;
- struct p_block_req *p = &mdev->data.rbuf.block_req;
+ struct p_block_req *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+ capacity = drbd_get_capacity(mdev->this_bdev);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (sector + (size>>9) > capacity) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
verb = 1;
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
break;
@@ -2023,35 +2407,34 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
+ BUG();
}
if (verb && __ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not satisfy peer's read request, "
"no local data.\n");
/* drain possibly payload */
- return drbd_drain_block(mdev, digest_size);
+ return drbd_drain_block(mdev, pi->size);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
- if (!e) {
+ peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -ENOMEM;
}
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
- e->w.cb = w_e_end_data_req;
+ peer_req->w.cb = w_e_end_data_req;
fault_type = DRBD_FAULT_DT_RD;
/* application IO, don't drbd_rs_begin_io */
goto submit;
case P_RS_DATA_REQUEST:
- e->w.cb = w_e_end_rsdata_req;
+ peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2060,28 +2443,28 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_OV_REPLY:
case P_CSUM_RS_REQUEST:
fault_type = DRBD_FAULT_RS_RD;
- di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
+ di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
if (!di)
goto out_free_e;
- di->digest_size = digest_size;
+ di->digest_size = pi->size;
di->digest = (((char *)di)+sizeof(struct digest_info));
- e->digest = di;
- e->flags |= EE_HAS_DIGEST;
+ peer_req->digest = di;
+ peer_req->flags |= EE_HAS_DIGEST;
- if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
+ if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
goto out_free_e;
- if (cmd == P_CSUM_RS_REQUEST) {
- D_ASSERT(mdev->agreed_pro_version >= 89);
- e->w.cb = w_e_end_csum_rs_req;
+ if (pi->cmd == P_CSUM_RS_REQUEST) {
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
- } else if (cmd == P_OV_REPLY) {
+ } else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */
atomic_add(size >> 9, &mdev->rs_sect_in);
- e->w.cb = w_e_end_ov_reply;
+ peer_req->w.cb = w_e_end_ov_reply;
dec_rs_pending(mdev);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
@@ -2091,7 +2474,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_OV_REQUEST:
if (mdev->ov_start_sector == ~(sector_t)0 &&
- mdev->agreed_pro_version >= 90) {
+ mdev->tconn->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
mdev->ov_start_sector = sector;
@@ -2105,15 +2488,12 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
dev_info(DEV, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
- e->w.cb = w_e_end_ov_req;
+ peer_req->w.cb = w_e_end_ov_req;
fault_type = DRBD_FAULT_RS_RD;
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
- fault_type = DRBD_FAULT_MAX;
- goto out_free_e;
+ BUG();
}
/* Throttle, drbd_rs_begin_io and submit should become asynchronous
@@ -2148,30 +2528,31 @@ submit_for_resync:
submit:
inc_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
- list_add_tail(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add_tail(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return -EIO;
}
static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
{
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
+ enum drbd_after_sb_p after_sb_0p;
self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
peer = mdev->p_uuid[UI_BITMAP] & 1;
@@ -2179,10 +2560,14 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
ch_peer = mdev->p_uuid[UI_SIZE];
ch_self = mdev->comm_bm_set;
- switch (mdev->net_conf->after_sb_0p) {
+ rcu_read_lock();
+ after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
+ rcu_read_unlock();
+ switch (after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
+ case ASB_VIOLENTLY:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2211,14 +2596,14 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
} else {
if (ch_peer == 0) { rv = 1; break; }
if (ch_self == 0) { rv = -1; break; }
}
- if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
+ if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
@@ -2227,7 +2612,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
@@ -2243,13 +2628,18 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_1p;
- switch (mdev->net_conf->after_sb_1p) {
+ rcu_read_lock();
+ after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
+ rcu_read_unlock();
+ switch (after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2292,8 +2682,12 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_2p;
- switch (mdev->net_conf->after_sb_2p) {
+ rcu_read_lock();
+ after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
+ rcu_read_unlock();
+ switch (after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
@@ -2301,6 +2695,7 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
case ASB_DISCARD_REMOTE:
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_VIOLENTLY:
@@ -2386,13 +2781,15 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
- drbd_uuid_set_bm(mdev, 0UL);
+ drbd_uuid_move_history(mdev);
+ mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
+ mdev->ldev->md.uuid[UI_BITMAP] = 0;
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
@@ -2407,7 +2804,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
@@ -2440,7 +2837,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
return dc ? -1 : 1;
}
}
@@ -2453,14 +2850,14 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 51;
peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
@@ -2490,18 +2887,18 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 71;
self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
- _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
- _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
+ __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
+ __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
@@ -2545,20 +2942,24 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
- int hg, rule_nr;
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
+ struct net_conf *nc;
+ int hg, rule_nr, rr_conflict, tentative;
mydisk = mdev->state.disk;
if (mydisk == D_NEGOTIATING)
mydisk = mdev->new_state_tmp.disk;
dev_info(DEV, "drbd_sync_handshake:\n");
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
hg = drbd_uuid_compare(mdev, &rule_nr);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
@@ -2584,7 +2985,10 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (abs(hg) == 100)
drbd_khelper(mdev, "initial-split-brain");
- if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+
+ if (hg == 100 || (hg == -100 && nc->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
@@ -2613,9 +3017,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
if (hg == -100) {
- if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
+ if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
+ if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
@@ -2623,6 +3027,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
+ rr_conflict = nc->rr_conflict;
+ tentative = nc->tentative;
+ rcu_read_unlock();
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
@@ -2641,7 +3048,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (hg < 0 && /* by intention we do not use mydisk here. */
mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
- switch (mdev->net_conf->rr_conflict) {
+ switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(mdev, "pri-lost");
/* fall through */
@@ -2654,7 +3061,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
}
- if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+ if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
if (hg == 0)
dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
else
@@ -2686,33 +3093,29 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
return rv;
}
-/* returns 1 if invalid */
-static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
+static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
{
/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
- if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
- (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
- return 0;
+ if (peer == ASB_DISCARD_REMOTE)
+ return ASB_DISCARD_LOCAL;
/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
- if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
- self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
- return 1;
+ if (peer == ASB_DISCARD_LOCAL)
+ return ASB_DISCARD_REMOTE;
/* everything else is valid if they are equal on both sides. */
- if (peer == self)
- return 0;
-
- /* everything es is invalid. */
- return 1;
+ return peer;
}
-static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_protocol *p = &mdev->data.rbuf.protocol;
- int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
- int p_want_lose, p_two_primaries, cf;
- char p_integrity_alg[SHARED_SECRET_MAX] = "";
+ struct p_protocol *p = pi->data;
+ enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
+ int p_proto, p_discard_my_data, p_two_primaries, cf;
+ struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
+ char integrity_alg[SHARED_SECRET_MAX] = "";
+ struct crypto_hash *peer_integrity_tfm = NULL;
+ void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
@@ -2720,63 +3123,138 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
p_two_primaries = be32_to_cpu(p->two_primaries);
cf = be32_to_cpu(p->conn_flags);
- p_want_lose = cf & CF_WANT_LOSE;
-
- clear_bit(CONN_DRY_RUN, &mdev->flags);
+ p_discard_my_data = cf & CF_DISCARD_MY_DATA;
- if (cf & CF_DRY_RUN)
- set_bit(CONN_DRY_RUN, &mdev->flags);
+ if (tconn->agreed_pro_version >= 87) {
+ int err;
- if (p_proto != mdev->net_conf->wire_protocol) {
- dev_err(DEV, "incompatible communication protocols\n");
- goto disconnect;
+ if (pi->size > sizeof(integrity_alg))
+ return -EIO;
+ err = drbd_recv_all(tconn, integrity_alg, pi->size);
+ if (err)
+ return err;
+ integrity_alg[SHARED_SECRET_MAX - 1] = 0;
}
- if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
- dev_err(DEV, "incompatible after-sb-0pri settings\n");
- goto disconnect;
- }
+ if (pi->cmd != P_PROTOCOL_UPDATE) {
+ clear_bit(CONN_DRY_RUN, &tconn->flags);
- if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
- dev_err(DEV, "incompatible after-sb-1pri settings\n");
- goto disconnect;
- }
+ if (cf & CF_DRY_RUN)
+ set_bit(CONN_DRY_RUN, &tconn->flags);
- if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
- dev_err(DEV, "incompatible after-sb-2pri settings\n");
- goto disconnect;
- }
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- if (p_want_lose && mdev->net_conf->want_lose) {
- dev_err(DEV, "both sides have the 'want_lose' flag set\n");
- goto disconnect;
- }
+ if (p_proto != nc->wire_protocol) {
+ conn_err(tconn, "incompatible %s settings\n", "protocol");
+ goto disconnect_rcu_unlock;
+ }
- if (p_two_primaries != mdev->net_conf->two_primaries) {
- dev_err(DEV, "incompatible setting of the two-primaries options\n");
- goto disconnect;
+ if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_discard_my_data && nc->discard_my_data) {
+ conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_two_primaries != nc->two_primaries) {
+ conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (strcmp(integrity_alg, nc->integrity_alg)) {
+ conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
+ goto disconnect_rcu_unlock;
+ }
+
+ rcu_read_unlock();
}
- if (mdev->agreed_pro_version >= 87) {
- unsigned char *my_alg = mdev->net_conf->integrity_alg;
+ if (integrity_alg[0]) {
+ int hash_size;
- if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
- return false;
+ /*
+ * We can only change the peer data integrity algorithm
+ * here. Changing our own data integrity algorithm
+ * requires that we send a P_PROTOCOL_UPDATE packet at
+ * the same time; otherwise, the peer has no way to
+ * tell between which packets the algorithm should
+ * change.
+ */
- p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
- if (strcmp(p_integrity_alg, my_alg)) {
- dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
+ peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ if (!peer_integrity_tfm) {
+ conn_err(tconn, "peer data-integrity-alg %s not supported\n",
+ integrity_alg);
goto disconnect;
}
- dev_info(DEV, "data-integrity-alg: %s\n",
- my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
+
+ hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+ int_dig_in = kmalloc(hash_size, GFP_KERNEL);
+ int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
+ if (!(int_dig_in && int_dig_vv)) {
+ conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
+ goto disconnect;
+ }
+ }
+
+ new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ conn_err(tconn, "Allocation of new net_conf failed\n");
+ goto disconnect;
}
- return true;
+ mutex_lock(&tconn->data.mutex);
+ mutex_lock(&tconn->conf_update);
+ old_net_conf = tconn->net_conf;
+ *new_net_conf = *old_net_conf;
+
+ new_net_conf->wire_protocol = p_proto;
+ new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
+ new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
+ new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
+ new_net_conf->two_primaries = p_two_primaries;
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+
+ crypto_free_hash(tconn->peer_integrity_tfm);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+ tconn->peer_integrity_tfm = peer_integrity_tfm;
+ tconn->int_dig_in = int_dig_in;
+ tconn->int_dig_vv = int_dig_vv;
+
+ if (strcmp(old_net_conf->integrity_alg, integrity_alg))
+ conn_info(tconn, "peer data-integrity-alg: %s\n",
+ integrity_alg[0] ? integrity_alg : "(none)");
+
+ synchronize_rcu();
+ kfree(old_net_conf);
+ return 0;
+
+disconnect_rcu_unlock:
+ rcu_read_unlock();
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ crypto_free_hash(peer_integrity_tfm);
+ kfree(int_dig_in);
+ kfree(int_dig_vv);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* helper function
@@ -2798,24 +3276,64 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
alg, name, PTR_ERR(tfm));
return tfm;
}
- if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
- crypto_free_hash(tfm);
- dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
- return ERR_PTR(-EINVAL);
- }
return tfm;
}
-static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
+static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ void *buffer = tconn->data.rbuf;
+ int size = pi->size;
+
+ while (size) {
+ int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
+ s = drbd_recv(tconn, buffer, s);
+ if (s <= 0) {
+ if (s < 0)
+ return s;
+ break;
+ }
+ size -= s;
+ }
+ if (size)
+ return -EIO;
+ return 0;
+}
+
+/*
+ * config_unknown_volume - device configuration command for unknown volume
+ *
+ * When a device is added to an existing connection, the node on which the
+ * device is added first will send configuration commands to its peer but the
+ * peer will not know about the device yet. It will warn and ignore these
+ * commands. Once the device is added on the second node, the second node will
+ * send the same device configuration commands, but in the other direction.
+ *
+ * (We can also end up here if drbd is misconfigured.)
+ */
+static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
{
- int ok = true;
- struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
+ conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
+ cmdname(pi->cmd), pi->vnr);
+ return ignore_remaining_packet(tconn, pi);
+}
+
+static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
- const int apv = mdev->agreed_pro_version;
- int *rs_plan_s = NULL;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
+ struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
+ const int apv = tconn->agreed_pro_version;
+ struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int fifo_size = 0;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -2823,32 +3341,49 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
- if (packet_size > exp_max_sz) {
+ if (pi->size > exp_max_sz) {
dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
- packet_size, exp_max_sz);
- return false;
+ pi->size, exp_max_sz);
+ return -EIO;
}
if (apv <= 88) {
- header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param);
+ data_size = pi->size - header_size;
} else if (apv <= 94) {
- header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_89);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
} else {
- header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_95);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
}
/* initialize verify_alg and csums_alg */
+ p = pi->data;
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
- return false;
+ err = drbd_recv_all(mdev->tconn, p, header_size);
+ if (err)
+ return err;
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
+ mutex_lock(&mdev->tconn->conf_update);
+ old_net_conf = mdev->tconn->net_conf;
+ if (get_ldev(mdev)) {
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ put_ldev(mdev);
+ mutex_unlock(&mdev->tconn->conf_update);
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ return -ENOMEM;
+ }
+
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+
+ new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
+ }
if (apv >= 88) {
if (apv == 88) {
@@ -2856,12 +3391,13 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
dev_err(DEV, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
- return false;
+ err = -EIO;
+ goto reconnect;
}
- if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
- return false;
-
+ err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
+ if (err)
+ goto reconnect;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
D_ASSERT(p->verify_alg[data_size-1] == 0);
@@ -2876,10 +3412,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
p->csums_alg[SHARED_SECRET_MAX-1] = 0;
}
- if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
+ if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.verify_alg, p->verify_alg);
+ old_net_conf->verify_alg, p->verify_alg);
goto disconnect;
}
verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2890,10 +3426,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
}
}
- if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
+ if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.csums_alg, p->csums_alg);
+ old_net_conf->csums_alg, p->csums_alg);
goto disconnect;
}
csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2904,57 +3440,91 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
}
}
- if (apv > 94) {
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
- mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
- mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
- mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
- mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
-
- fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
- if (!rs_plan_s) {
+ if (apv > 94 && new_disk_conf) {
+ new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
+ new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
+ new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
+ new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
+
+ fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+ if (fifo_size != mdev->rs_plan_s->size) {
+ new_plan = fifo_alloc(fifo_size);
+ if (!new_plan) {
dev_err(DEV, "kmalloc of fifo_buffer failed");
+ put_ldev(mdev);
goto disconnect;
}
}
}
- spin_lock(&mdev->peer_seq_lock);
- /* lock against drbd_nl_syncer_conf() */
- if (verify_tfm) {
- strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
- mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = verify_tfm;
- dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
- }
- if (csums_tfm) {
- strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
- mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = csums_tfm;
- dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
- }
- if (fifo_size != mdev->rs_plan_s.size) {
- kfree(mdev->rs_plan_s.values);
- mdev->rs_plan_s.values = rs_plan_s;
- mdev->rs_plan_s.size = fifo_size;
- mdev->rs_planed = 0;
+ if (verify_tfm || csums_tfm) {
+ new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ dev_err(DEV, "Allocation of new net_conf failed\n");
+ goto disconnect;
+ }
+
+ *new_net_conf = *old_net_conf;
+
+ if (verify_tfm) {
+ strcpy(new_net_conf->verify_alg, p->verify_alg);
+ new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
+ crypto_free_hash(mdev->tconn->verify_tfm);
+ mdev->tconn->verify_tfm = verify_tfm;
+ dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
+ }
+ if (csums_tfm) {
+ strcpy(new_net_conf->csums_alg, p->csums_alg);
+ new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
+ crypto_free_hash(mdev->tconn->csums_tfm);
+ mdev->tconn->csums_tfm = csums_tfm;
+ dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
+ }
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
}
- spin_unlock(&mdev->peer_seq_lock);
}
- return ok;
+ if (new_disk_conf) {
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ put_ldev(mdev);
+ }
+
+ if (new_plan) {
+ old_plan = mdev->rs_plan_s;
+ rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ }
+
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ if (new_net_conf)
+ kfree(old_net_conf);
+ kfree(old_disk_conf);
+ kfree(old_plan);
+
+ return 0;
+
+reconnect:
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
+ return -EIO;
+
disconnect:
+ kfree(new_plan);
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_hash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_hash(verify_tfm);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* warn if the arguments differ by more than 12.5% */
@@ -2970,59 +3540,77 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
(unsigned long long)a, (unsigned long long)b);
}
-static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_sizes *p = &mdev->data.rbuf.sizes;
+ struct drbd_conf *mdev;
+ struct p_sizes *p = pi->data;
enum determine_dev_size dd = unchanged;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
- if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
- dev_err(DEV, "some backing storage is needed\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
- }
-
/* just store the peer's disk size for now.
* we still need to figure out whether we accept that. */
mdev->p_size = p_size;
if (get_ldev(mdev)) {
+ rcu_read_lock();
+ my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+
warn_if_differ_considerably(mdev, "lower level device sizes",
p_size, drbd_get_max_capacity(mdev->ldev));
warn_if_differ_considerably(mdev, "user requested size",
- p_usize, mdev->ldev->dc.disk_size);
+ p_usize, my_usize);
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
if (mdev->state.conn == C_WF_REPORT_PARAMS)
- p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
- p_usize);
-
- my_usize = mdev->ldev->dc.disk_size;
-
- if (mdev->ldev->dc.disk_size != p_usize) {
- mdev->ldev->dc.disk_size = p_usize;
- dev_info(DEV, "Peer sets u_size to %lu sectors\n",
- (unsigned long)mdev->ldev->dc.disk_size);
- }
+ p_usize = min_not_zero(my_usize, p_usize);
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
- drbd_get_capacity(mdev->this_bdev) &&
- mdev->state.disk >= D_OUTDATED &&
- mdev->state.conn < C_CONNECTED) {
+ if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
+ drbd_get_capacity(mdev->this_bdev) &&
+ mdev->state.disk >= D_OUTDATED &&
+ mdev->state.conn < C_CONNECTED) {
dev_err(DEV, "The peer's disk size is too small!\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- mdev->ldev->dc.disk_size = my_usize;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
put_ldev(mdev);
- return false;
+ return -EIO;
+ }
+
+ if (my_usize != p_usize) {
+ struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ put_ldev(mdev);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ new_disk_conf->disk_size = p_usize;
+
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ kfree(old_disk_conf);
+
+ dev_info(DEV, "Peer sets u_size to %lu sectors\n",
+ (unsigned long)my_usize);
}
+
put_ldev(mdev);
}
@@ -3031,7 +3619,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dd = drbd_determine_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
- return false;
+ return -EIO;
drbd_md_sync(mdev);
} else {
/* I am diskless, need to accept the peer's size. */
@@ -3070,16 +3658,25 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
- return true;
+ return 0;
}
-static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_uuids *p = &mdev->data.rbuf.uuids;
+ struct drbd_conf *mdev;
+ struct p_uuids *p = pi->data;
u64 *p_uuid;
int i, updated_uuids = 0;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
+ if (!p_uuid) {
+ dev_err(DEV, "kmalloc of p_uuid failed\n");
+ return false;
+ }
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
@@ -3093,14 +3690,14 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
(mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
(unsigned long long)mdev->ed_uuid);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (get_ldev(mdev)) {
int skip_initial_sync =
mdev->state.conn == C_CONNECTED &&
- mdev->agreed_pro_version >= 90 &&
+ mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
@@ -3127,14 +3724,15 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
if (updated_uuids)
drbd_print_uuids(mdev, "receiver updated UUIDs to");
- return true;
+ return 0;
}
/**
@@ -3146,6 +3744,7 @@ static union drbd_state convert_state(union drbd_state ps)
union drbd_state ms;
static enum drbd_conns c_tab[] = {
+ [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
[C_CONNECTED] = C_CONNECTED,
[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
@@ -3167,40 +3766,74 @@ static union drbd_state convert_state(union drbd_state ps)
return ms;
}
-static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state *p = &mdev->data.rbuf.req_state;
+ struct drbd_conf *mdev;
+ struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
- test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
+ if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
+ mutex_is_locked(mdev->state_mutex)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
- return true;
+ return 0;
}
mask = convert_state(mask);
val = convert_state(val);
rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
-
drbd_send_sr_reply(mdev, rv);
+
drbd_md_sync(mdev);
- return true;
+ return 0;
}
-static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_state *p = &mdev->data.rbuf.state;
+ struct p_req_state *p = pi->data;
+ union drbd_state mask, val;
+ enum drbd_state_rv rv;
+
+ mask.i = be32_to_cpu(p->mask);
+ val.i = be32_to_cpu(p->val);
+
+ if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
+ mutex_is_locked(&tconn->cstate_mutex)) {
+ conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
+ return 0;
+ }
+
+ mask = convert_state(mask);
+ val = convert_state(val);
+
+ rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
+ conn_send_sr_reply(tconn, rv);
+
+ return 0;
+}
+
+static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_state *p = pi->data;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
int rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
@@ -3209,16 +3842,16 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
retry:
- os = ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ os = ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* If some other part of the code (asender thread, timeout)
* already decided to close the connection again,
* we must not "re-establish" it here. */
if (os.conn <= C_TEAR_DOWN)
- return false;
+ return -ECONNRESET;
/* If this is the "end of sync" confirmation, usually the peer disk
* transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
@@ -3246,10 +3879,18 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.conn == C_CONNECTED) {
if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
drbd_resync_finished(mdev);
- return true;
+ return 0;
}
}
+ /* explicit verify finished notification, stop sector reached. */
+ if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
+ peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
+ ov_out_of_sync_print(mdev);
+ drbd_resync_finished(mdev);
+ return 0;
+ }
+
/* peer says his disk is inconsistent, while we think it is uptodate,
* and this happens while the peer still thinks we have a sync going on,
* but we think we are already done with the sync.
@@ -3298,17 +3939,17 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
- return false;
+ if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
+ return -EIO;
D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
}
}
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.i != os.i)
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (os.i != drbd_read_state(mdev).i)
goto retry;
clear_bit(CONSIDER_RESYNC, &mdev->flags);
ns.peer = peer_state.role;
@@ -3317,25 +3958,25 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
ns.disk = mdev->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
- if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
+ if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
test_bit(NEW_CUR_UUID, &mdev->flags)) {
- /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
+ /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
- tl_clear(mdev);
+ tl_clear(mdev->tconn);
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
- drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
- return false;
+ conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
+ return -EIO;
}
rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
- ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (os.conn > C_WF_REPORT_PARAMS) {
@@ -3349,16 +3990,21 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
- mdev->net_conf->want_lose = 0;
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
- return true;
+ return 0;
}
-static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
+ struct drbd_conf *mdev;
+ struct p_rs_uuid *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
wait_event(mdev->misc_wait,
mdev->state.conn == C_WF_SYNC_UUID ||
@@ -3381,7 +4027,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
} else
dev_err(DEV, "Ignoring SyncUUID packet!\n");
- return true;
+ return 0;
}
/**
@@ -3391,27 +4037,27 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
* code upon failure.
*/
static int
-receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
- unsigned long *buffer, struct bm_xfer_ctx *c)
+receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
+ unsigned long *p, struct bm_xfer_ctx *c)
{
- unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
- unsigned want = num_words * sizeof(long);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
+ drbd_header_size(mdev->tconn);
+ unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
+ c->bm_words - c->word_offset);
+ unsigned int want = num_words * sizeof(*p);
int err;
- if (want != data_size) {
- dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
+ if (want != size) {
+ dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
return -EIO;
}
if (want == 0)
return 0;
- err = drbd_recv(mdev, buffer, want);
- if (err != want) {
- if (err >= 0)
- err = -EIO;
+ err = drbd_recv_all(mdev->tconn, p, want);
+ if (err)
return err;
- }
- drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
+ drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -3421,6 +4067,21 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
return 1;
}
+static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
+{
+ return (enum drbd_bitmap_code)(p->encoding & 0x0f);
+}
+
+static int dcbp_get_start(struct p_compressed_bm *p)
+{
+ return (p->encoding & 0x80) != 0;
+}
+
+static int dcbp_get_pad_bits(struct p_compressed_bm *p)
+{
+ return (p->encoding >> 4) & 0x7;
+}
+
/**
* recv_bm_rle_bits
*
@@ -3430,7 +4091,8 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
static int
recv_bm_rle_bits(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
struct bitstream bs;
u64 look_ahead;
@@ -3438,12 +4100,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
u64 tmp;
unsigned long s = c->bit_offset;
unsigned long e;
- int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
- int toggle = DCBP_get_start(p);
+ int toggle = dcbp_get_start(p);
int have;
int bits;
- bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
+ bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
bits = bitstream_get_bits(&bs, &look_ahead, 64);
if (bits < 0)
@@ -3495,17 +4156,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
static int
decode_bitmap_c(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
- if (DCBP_get_code(p) == RLE_VLI_Bits)
- return recv_bm_rle_bits(mdev, p, c);
+ if (dcbp_get_code(p) == RLE_VLI_Bits)
+ return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
/* other variants had been implemented for evaluation,
* but have been dropped as this one turned out to be "best"
* during all our tests. */
dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+ conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
return -EIO;
}
@@ -3513,11 +4175,13 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned plain = sizeof(struct p_header80) *
- ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
- + c->bm_words * sizeof(long);
- unsigned total = c->bytes[0] + c->bytes[1];
- unsigned r;
+ unsigned int header_size = drbd_header_size(mdev->tconn);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
+ unsigned int plain =
+ header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
+ c->bm_words * sizeof(unsigned long);
+ unsigned int total = c->bytes[0] + c->bytes[1];
+ unsigned int r;
/* total can not be zero. but just in case: */
if (total == 0)
@@ -3551,67 +4215,63 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we successfully received it. */
-static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
struct bm_xfer_ctx c;
- void *buffer;
int err;
- int ok = false;
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
* if you actually set bits during this phase */
- /* maybe we should use some per thread scratch page,
- * and allocate that during initial device creation? */
- buffer = (unsigned long *) __get_free_page(GFP_NOIO);
- if (!buffer) {
- dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
- goto out;
- }
-
c = (struct bm_xfer_ctx) {
.bm_bits = drbd_bm_bits(mdev),
.bm_words = drbd_bm_words(mdev),
};
for(;;) {
- if (cmd == P_BITMAP) {
- err = receive_bitmap_plain(mdev, data_size, buffer, &c);
- } else if (cmd == P_COMPRESSED_BITMAP) {
+ if (pi->cmd == P_BITMAP)
+ err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
+ else if (pi->cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
- struct p_compressed_bm *p;
+ struct p_compressed_bm *p = pi->data;
- if (data_size > BM_PACKET_PAYLOAD_BYTES) {
+ if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
dev_err(DEV, "ReportCBitmap packet too large\n");
+ err = -EIO;
goto out;
}
- /* use the page buff */
- p = buffer;
- memcpy(p, h, sizeof(*h));
- if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
- goto out;
- if (data_size <= (sizeof(*p) - sizeof(p->head))) {
- dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
+ if (pi->size <= sizeof(*p)) {
+ dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
+ err = -EIO;
goto out;
}
- err = decode_bitmap_c(mdev, p, &c);
+ err = drbd_recv_all(mdev->tconn, p, pi->size);
+ if (err)
+ goto out;
+ err = decode_bitmap_c(mdev, p, &c, pi->size);
} else {
- dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
+ dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
+ err = -EIO;
goto out;
}
- c.packets[cmd == P_BITMAP]++;
- c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
+ c.packets[pi->cmd == P_BITMAP]++;
+ c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
if (err <= 0) {
if (err < 0)
goto out;
break;
}
- if (!drbd_recv_header(mdev, &cmd, &data_size))
+ err = drbd_recv_header(mdev->tconn, pi);
+ if (err)
goto out;
}
@@ -3620,8 +4280,8 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
if (mdev->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
- ok = !drbd_send_bitmap(mdev);
- if (!ok)
+ err = drbd_send_bitmap(mdev);
+ if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
@@ -3632,47 +4292,40 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
drbd_conn_str(mdev->state.conn));
}
+ err = 0;
- ok = true;
out:
drbd_bm_unlock(mdev);
- if (ok && mdev->state.conn == C_WF_BITMAP_S)
+ if (!err && mdev->state.conn == C_WF_BITMAP_S)
drbd_start_resync(mdev, C_SYNC_SOURCE);
- free_page((unsigned long) buffer);
- return ok;
+ return err;
}
-static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
{
- /* TODO zero copy sink :) */
- static char sink[128];
- int size, want, r;
+ conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
+ pi->cmd, pi->size);
- dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
- cmd, data_size);
-
- size = data_size;
- while (size > 0) {
- want = min_t(int, size, sizeof(sink));
- r = drbd_recv(mdev, sink, want);
- ERR_IF(r <= 0) break;
- size -= r;
- }
- return size == 0;
+ return ignore_remaining_packet(tconn, pi);
}
-static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(mdev->data.socket);
+ drbd_tcp_quickack(tconn->data.socket);
- return true;
+ return 0;
}
-static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+ struct drbd_conf *mdev;
+ struct p_block_desc *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
switch (mdev->state.conn) {
case C_WF_SYNC_UUID:
@@ -3686,15 +4339,13 @@ static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, un
drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
- return true;
+ return 0;
}
-typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
-
struct data_cmd {
int expect_payload;
size_t pkt_size;
- drbd_cmd_handler_f function;
+ int (*fn)(struct drbd_tconn *, struct packet_info *);
};
static struct data_cmd drbd_cmd_handler[] = {
@@ -3702,13 +4353,13 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
[P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
[P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
- [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
+ [P_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
[P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
- [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
- [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
+ [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
+ [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
[P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
[P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
@@ -3720,124 +4371,75 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
- /* anything missing from this table is in
- * the asender_tbl, see get_asender_cmd */
- [P_MAX_CMD] = { 0, 0, NULL },
+ [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
+ [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
};
-/* All handler functions that expect a sub-header get that sub-heder in
- mdev->data.rbuf.header.head.payload.
-
- Usually in mdev->data.rbuf.header.head the callback can find the usual
- p_header, but they may not rely on that. Since there is also p_header95 !
- */
-
-static void drbdd(struct drbd_conf *mdev)
+static void drbdd(struct drbd_tconn *tconn)
{
- union p_header *header = &mdev->data.rbuf.header;
- unsigned int packet_size;
- enum drbd_packets cmd;
+ struct packet_info pi;
size_t shs; /* sub header size */
- int rv;
+ int err;
+
+ while (get_t_state(&tconn->receiver) == RUNNING) {
+ struct data_cmd *cmd;
- while (get_t_state(&mdev->receiver) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (!drbd_recv_header(mdev, &cmd, &packet_size))
+ drbd_thread_current_set_cpu(&tconn->receiver);
+ if (drbd_recv_header(tconn, &pi))
goto err_out;
- if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
- dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
+ cmd = &drbd_cmd_handler[pi.cmd];
+ if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
+ conn_err(tconn, "Unexpected data packet %s (0x%04x)",
+ cmdname(pi.cmd), pi.cmd);
goto err_out;
}
- shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
- if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
- dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
+ shs = cmd->pkt_size;
+ if (pi.size > shs && !cmd->expect_payload) {
+ conn_err(tconn, "No payload expected %s l:%d\n",
+ cmdname(pi.cmd), pi.size);
goto err_out;
}
if (shs) {
- rv = drbd_recv(mdev, &header->h80.payload, shs);
- if (unlikely(rv != shs)) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
+ err = drbd_recv_all_warn(tconn, pi.data, shs);
+ if (err)
goto err_out;
- }
+ pi.size -= shs;
}
- rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
-
- if (unlikely(!rv)) {
- dev_err(DEV, "error receiving %s, l: %d!\n",
- cmdname(cmd), packet_size);
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
+ cmdname(pi.cmd), err, pi.size);
goto err_out;
}
}
+ return;
- if (0) {
- err_out:
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
- }
- /* If we leave here, we probably want to update at least the
- * "Connected" indicator on stable storage. Do so explicitly here. */
- drbd_md_sync(mdev);
+ err_out:
+ conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
-void drbd_flush_workqueue(struct drbd_conf *mdev)
+void conn_flush_workqueue(struct drbd_tconn *tconn)
{
struct drbd_wq_barrier barr;
barr.w.cb = w_prev_work_done;
+ barr.w.tconn = tconn;
init_completion(&barr.done);
- drbd_queue_work(&mdev->data.work, &barr.w);
+ drbd_queue_work(&tconn->sender_work, &barr.w);
wait_for_completion(&barr.done);
}
-void drbd_free_tl_hash(struct drbd_conf *mdev)
-{
- struct hlist_head *h;
-
- spin_lock_irq(&mdev->req_lock);
-
- if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
- spin_unlock_irq(&mdev->req_lock);
- return;
- }
- /* paranoia code */
- for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
- if (h->first)
- dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
- (int)(h - mdev->ee_hash), h->first);
- kfree(mdev->ee_hash);
- mdev->ee_hash = NULL;
- mdev->ee_hash_s = 0;
-
- /* We may not have had the chance to wait for all locally pending
- * application requests. The hlist_add_fake() prevents access after
- * free on master bio completion. */
- for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
- struct drbd_request *req;
- struct hlist_node *pos, *n;
- hlist_for_each_entry_safe(req, pos, n, h, collision) {
- hlist_del_init(&req->collision);
- hlist_add_fake(&req->collision);
- }
- }
-
- kfree(mdev->tl_hash);
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
- spin_unlock_irq(&mdev->req_lock);
-}
-
-static void drbd_disconnect(struct drbd_conf *mdev)
+static void conn_disconnect(struct drbd_tconn *tconn)
{
- enum drbd_fencing_p fp;
- union drbd_state os, ns;
- int rv = SS_UNKNOWN_ERROR;
- unsigned int i;
+ struct drbd_conf *mdev;
+ enum drbd_conns oc;
+ int vnr;
- if (mdev->state.conn == C_STANDALONE)
+ if (tconn->cstate == C_STANDALONE)
return;
/* We are about to start the cleanup after connection loss.
@@ -3845,18 +4447,54 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* Usually we should be in some network failure state already,
* but just in case we are not, we fix it up here.
*/
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
/* asender does not clean up anything. it must not interfere, either */
- drbd_thread_stop(&mdev->asender);
- drbd_free_sock(mdev);
+ drbd_thread_stop(&tconn->asender);
+ drbd_free_sock(tconn);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_disconnected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ if (!list_empty(&tconn->current_epoch->list))
+ conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+ atomic_set(&tconn->current_epoch->epoch_size, 0);
+ tconn->send.seen_any_write_yet = false;
+
+ conn_info(tconn, "Connection closed\n");
+
+ if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
+ conn_try_outdate_peer_async(tconn);
+
+ spin_lock_irq(&tconn->req_lock);
+ oc = tconn->cstate;
+ if (oc >= C_UNCONNECTED)
+ _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
+ spin_unlock_irq(&tconn->req_lock);
+
+ if (oc == C_DISCONNECTING)
+ conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
+}
+
+static int drbd_disconnected(struct drbd_conf *mdev)
+{
+ unsigned int i;
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
@@ -3874,7 +4512,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- /* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -3883,50 +4520,25 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* to be "canceled" */
drbd_flush_workqueue(mdev);
- /* This also does reclaim_net_ee(). If we do this too early, we might
- * miss some resync ee and pages.*/
- drbd_process_done_ee(mdev);
+ drbd_finish_peer_reqs(mdev);
+
+ /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
+ might have issued a work again. The one before drbd_finish_peer_reqs() is
+ necessary to reclain net_ee in drbd_finish_peer_reqs(). */
+ drbd_flush_workqueue(mdev);
+
+ /* need to do it again, drbd_finish_peer_reqs() may have populated it
+ * again via drbd_try_clear_on_disk_bm(). */
+ drbd_rs_cancel_all(mdev);
kfree(mdev->p_uuid);
mdev->p_uuid = NULL;
- if (!is_susp(mdev->state))
- tl_clear(mdev);
-
- dev_info(DEV, "Connection closed\n");
+ if (!drbd_suspended(mdev))
+ tl_clear(mdev->tconn);
drbd_md_sync(mdev);
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
- drbd_try_outdate_peer_async(mdev);
-
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
- if (os.conn >= C_UNCONNECTED) {
- /* Do not restart in case we are C_DISCONNECTING */
- ns = os;
- ns.conn = C_UNCONNECTED;
- rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- }
- spin_unlock_irq(&mdev->req_lock);
-
- if (os.conn == C_DISCONNECTING) {
- wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
-
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = NULL;
-
- kfree(mdev->net_conf);
- mdev->net_conf = NULL;
- drbd_request_state(mdev, NS(conn, C_STANDALONE));
- }
-
/* serialize with bitmap writeout triggered by the state change,
* if any. */
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
@@ -3938,7 +4550,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
- i = drbd_release_ee(mdev, &mdev->net_ee);
+ i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
if (i)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use_by_net);
@@ -3953,9 +4565,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
- /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
- atomic_set(&mdev->current_epoch->epoch_size, 0);
- D_ASSERT(list_empty(&mdev->current_epoch->list));
+ return 0;
}
/*
@@ -3967,29 +4577,19 @@ static void drbd_disconnect(struct drbd_conf *mdev)
*
* for now, they are expected to be zero, but ignored.
*/
-static int drbd_send_handshake(struct drbd_conf *mdev)
+static int drbd_send_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.sbuf.handshake;
- int ok;
-
- if (mutex_lock_interruptible(&mdev->data.mutex)) {
- dev_err(DEV, "interrupted during initial handshake\n");
- return 0; /* interrupted. not ok. */
- }
-
- if (mdev->data.socket == NULL) {
- mutex_unlock(&mdev->data.mutex);
- return 0;
- }
+ struct drbd_socket *sock;
+ struct p_connection_features *p;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
- (struct p_header80 *)p, sizeof(*p), 0 );
- mutex_unlock(&mdev->data.mutex);
- return ok;
+ return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
/*
@@ -3999,42 +4599,38 @@ static int drbd_send_handshake(struct drbd_conf *mdev)
* -1 peer talks different language,
* no point in trying again, please go standalone.
*/
-static int drbd_do_handshake(struct drbd_conf *mdev)
+static int drbd_do_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.rbuf.handshake;
- const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
- unsigned int length;
- enum drbd_packets cmd;
- int rv;
+ /* ASSERT current == tconn->receiver ... */
+ struct p_connection_features *p;
+ const int expect = sizeof(struct p_connection_features);
+ struct packet_info pi;
+ int err;
- rv = drbd_send_handshake(mdev);
- if (!rv)
+ err = drbd_send_features(tconn);
+ if (err)
return 0;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
+ err = drbd_recv_header(tconn, &pi);
+ if (err)
return 0;
- if (cmd != P_HAND_SHAKE) {
- dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ if (pi.cmd != P_CONNECTION_FEATURES) {
+ conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
return -1;
}
- if (length != expect) {
- dev_err(DEV, "expected HandShake length: %u, received: %u\n",
- expect, length);
+ if (pi.size != expect) {
+ conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
+ expect, pi.size);
return -1;
}
- rv = drbd_recv(mdev, &p->head.payload, expect);
-
- if (rv != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
+ p = pi.data;
+ err = drbd_recv_all_warn(tconn, p, expect);
+ if (err)
return 0;
- }
p->protocol_min = be32_to_cpu(p->protocol_min);
p->protocol_max = be32_to_cpu(p->protocol_max);
@@ -4045,15 +4641,15 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
- mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
- dev_info(DEV, "Handshake successful: "
- "Agreed network protocol version %d\n", mdev->agreed_pro_version);
+ conn_info(tconn, "Handshake successful: "
+ "Agreed network protocol version %d\n", tconn->agreed_pro_version);
return 1;
incompat:
- dev_err(DEV, "incompatible DRBD dialects: "
+ conn_err(tconn, "incompatible DRBD dialects: "
"I support %d-%d, peer supports %d-%d\n",
PRO_VERSION_MIN, PRO_VERSION_MAX,
p->protocol_min, p->protocol_max);
@@ -4061,7 +4657,7 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
}
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
@@ -4076,121 +4672,139 @@ static int drbd_do_auth(struct drbd_conf *mdev)
-1 - auth failed, don't try again.
*/
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
+ struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
struct scatterlist sg;
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
- unsigned int key_len = strlen(mdev->net_conf->shared_secret);
+ unsigned int key_len;
+ char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
struct hash_desc desc;
- enum drbd_packets cmd;
- unsigned int length;
- int rv;
+ struct packet_info pi;
+ struct net_conf *nc;
+ int err, rv;
+
+ /* FIXME: Put the challenge/response into the preallocated socket buffer. */
- desc.tfm = mdev->cram_hmac_tfm;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ key_len = strlen(nc->shared_secret);
+ memcpy(secret, nc->shared_secret, key_len);
+ rcu_read_unlock();
+
+ desc.tfm = tconn->cram_hmac_tfm;
desc.flags = 0;
- rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
- (u8 *)mdev->net_conf->shared_secret, key_len);
+ rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
get_random_bytes(my_challenge, CHALLENGE_LEN);
- rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
+ sock = &tconn->data;
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
+ goto fail;
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
+ my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
+ rv = 0;
goto fail;
+ }
- if (cmd != P_AUTH_CHALLENGE) {
- dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ if (pi.cmd != P_AUTH_CHALLENGE) {
+ conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
- if (length > CHALLENGE_LEN * 2) {
- dev_err(DEV, "expected AuthChallenge payload too big.\n");
+ if (pi.size > CHALLENGE_LEN * 2) {
+ conn_err(tconn, "expected AuthChallenge payload too big.\n");
rv = -1;
goto fail;
}
- peers_ch = kmalloc(length, GFP_NOIO);
+ peers_ch = kmalloc(pi.size, GFP_NOIO);
if (peers_ch == NULL) {
- dev_err(DEV, "kmalloc of peers_ch failed\n");
+ conn_err(tconn, "kmalloc of peers_ch failed\n");
rv = -1;
goto fail;
}
- rv = drbd_recv(mdev, peers_ch, length);
-
- if (rv != length) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
+ if (err) {
rv = 0;
goto fail;
}
- resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
+ resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
- dev_err(DEV, "kmalloc of response failed\n");
+ conn_err(tconn, "kmalloc of response failed\n");
rv = -1;
goto fail;
}
sg_init_table(&sg, 1);
- sg_set_buf(&sg, peers_ch, length);
+ sg_set_buf(&sg, peers_ch, pi.size);
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
- rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
- if (!rv)
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
goto fail;
-
- rv = drbd_recv_header(mdev, &cmd, &length);
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
+ response, resp_size);
if (!rv)
goto fail;
- if (cmd != P_AUTH_RESPONSE) {
- dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
rv = 0;
goto fail;
}
- if (length != resp_size) {
- dev_err(DEV, "expected AuthResponse payload of wrong size\n");
+ if (pi.cmd != P_AUTH_RESPONSE) {
+ conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
- rv = drbd_recv(mdev, response , resp_size);
+ if (pi.size != resp_size) {
+ conn_err(tconn, "expected AuthResponse payload of wrong size\n");
+ rv = 0;
+ goto fail;
+ }
- if (rv != resp_size) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, response , resp_size);
+ if (err) {
rv = 0;
goto fail;
}
right_response = kmalloc(resp_size, GFP_NOIO);
if (right_response == NULL) {
- dev_err(DEV, "kmalloc of right_response failed\n");
+ conn_err(tconn, "kmalloc of right_response failed\n");
rv = -1;
goto fail;
}
@@ -4199,7 +4813,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -4207,8 +4821,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = !memcmp(response, right_response, resp_size);
if (rv)
- dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
- resp_size, mdev->net_conf->cram_hmac_alg);
+ conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
+ resp_size);
else
rv = -1;
@@ -4223,82 +4837,106 @@ static int drbd_do_auth(struct drbd_conf *mdev)
int drbdd_init(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- unsigned int minor = mdev_to_minor(mdev);
+ struct drbd_tconn *tconn = thi->tconn;
int h;
- sprintf(current->comm, "drbd%d_receiver", minor);
-
- dev_info(DEV, "receiver (re)started\n");
+ conn_info(tconn, "receiver (re)started\n");
do {
- h = drbd_connect(mdev);
+ h = conn_connect(tconn);
if (h == 0) {
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
schedule_timeout_interruptible(HZ);
}
if (h == -1) {
- dev_warn(DEV, "Discarding network configuration.\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_warn(tconn, "Discarding network configuration.\n");
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
} while (h == 0);
- if (h > 0) {
- if (get_net_conf(mdev)) {
- drbdd(mdev);
- put_net_conf(mdev);
- }
- }
+ if (h > 0)
+ drbdd(tconn);
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
- dev_info(DEV, "receiver terminated\n");
+ conn_info(tconn, "receiver terminated\n");
return 0;
}
/* ********* acknowledge sender ******** */
-static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state_reply *p = (struct p_req_state_reply *)h;
+ struct p_req_state_reply *p = pi->data;
+ int retcode = be32_to_cpu(p->retcode);
+
+ if (retcode >= SS_SUCCESS) {
+ set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
+ } else {
+ set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
+ conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
+ drbd_set_st_err_str(retcode), retcode);
+ }
+ wake_up(&tconn->ping_wait);
+
+ return 0;
+}
+static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
+ D_ASSERT(tconn->agreed_pro_version < 100);
+ return got_conn_RqSReply(tconn, pi);
+ }
+
if (retcode >= SS_SUCCESS) {
set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
} else {
set_bit(CL_ST_CHG_FAIL, &mdev->flags);
dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
- drbd_set_st_err_str(retcode), retcode);
+ drbd_set_st_err_str(retcode), retcode);
}
wake_up(&mdev->state_wait);
- return true;
+ return 0;
}
-static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
{
- return drbd_send_ping_ack(mdev);
+ return drbd_send_ping_ack(tconn);
}
-static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* restore idle timeout */
- mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
- if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
- wake_up(&mdev->misc_wait);
+ tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
+ if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
+ wake_up(&tconn->ping_wait);
- return true;
+ return 0;
}
-static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
- D_ASSERT(mdev->agreed_pro_version >= 89);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
@@ -4312,162 +4950,139 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
dec_rs_pending(mdev);
atomic_add(blksize >> 9, &mdev->rs_sect_in);
- return true;
-}
-
-/* when we receive the ACK for a write request,
- * verify that we actually know about it */
-static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = tl_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, collision) {
- if ((unsigned long)req == (unsigned long)id) {
- if (req->sector != sector) {
- dev_err(DEV, "_ack_id_to_req: found req %p but it has "
- "wrong sector (%llus versus %llus)\n", req,
- (unsigned long long)req->sector,
- (unsigned long long)sector);
- break;
- }
- return req;
- }
- }
- return NULL;
+ return 0;
}
-typedef struct drbd_request *(req_validator_fn)
- (struct drbd_conf *mdev, u64 id, sector_t sector);
-
-static int validate_req_change_req_state(struct drbd_conf *mdev,
- u64 id, sector_t sector, req_validator_fn validator,
- const char *func, enum drbd_req_event what)
+static int
+validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
+ struct rb_root *root, const char *func,
+ enum drbd_req_event what, bool missing_ok)
{
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->req_lock);
- req = validator(mdev, id, sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->req_lock);
-
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
- (void *)(unsigned long)id, (unsigned long long)sector);
- return false;
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ return -EIO;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
enum drbd_req_event what;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
drbd_set_in_sync(mdev, sector, blksize);
dec_rs_pending(mdev);
- return true;
+ return 0;
}
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_RS_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer_and_sis;
+ what = WRITE_ACKED_BY_PEER_AND_SIS;
break;
case P_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer;
+ what = WRITE_ACKED_BY_PEER;
break;
case P_RECV_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
- what = recv_acked_by_peer;
+ what = RECV_ACKED_BY_PEER;
break;
- case P_DISCARD_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = conflict_discarded_by_peer;
+ case P_SUPERSEDED:
+ what = CONFLICT_RESOLVED;
+ break;
+ case P_RETRY_WRITE:
+ what = POSTPONE_WRITE;
break;
default:
- D_ASSERT(0);
- return false;
+ BUG();
}
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ack_id_to_req, __func__ , what);
+ &mdev->write_requests, __func__,
+ what, false);
}
-static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
- struct drbd_request *req;
- struct bio_and_error m;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
dec_rs_pending(mdev);
drbd_rs_failed_io(mdev, sector, size);
- return true;
+ return 0;
}
- spin_lock_irq(&mdev->req_lock);
- req = _ack_id_to_req(mdev, p->block_id, sector);
- if (!req) {
- spin_unlock_irq(&mdev->req_lock);
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
- mdev->net_conf->wire_protocol == DRBD_PROT_B) {
- /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
- The master bio might already be completed, therefore the
- request is no longer in the collision hash.
- => Do not try to validate block_id as request. */
- /* In Protocol B we might already have got a P_RECV_ACK
- but then get a P_NEG_ACK after wards. */
- drbd_set_out_of_sync(mdev, sector, size);
- return true;
- } else {
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
- (void *)(unsigned long)p->block_id, (unsigned long long)sector);
- return false;
- }
+ err = validate_req_change_req_state(mdev, p->block_id, sector,
+ &mdev->write_requests, __func__,
+ NEG_ACKED, true);
+ if (err) {
+ /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
+ The master bio might already be completed, therefore the
+ request is no longer in the collision hash. */
+ /* In Protocol B we might already have got a P_RECV_ACK
+ but then get a P_NEG_ACK afterwards. */
+ drbd_set_out_of_sync(mdev, sector, size);
}
- __req_mod(req, neg_acked, &m);
- spin_unlock_irq(&mdev->req_lock);
-
- if (m.bio)
- complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
+
+ dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ar_id_to_req, __func__ , neg_acked);
+ &mdev->read_requests, __func__,
+ NEG_ACKED, false);
}
-static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
int size;
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct p_block_ack *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -4478,57 +5093,66 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, sector);
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_NEG_RS_DREPLY:
drbd_rs_failed_io(mdev, sector, size);
case P_RS_CANCEL:
break;
default:
- D_ASSERT(0);
- put_ldev(mdev);
- return false;
+ BUG();
}
put_ldev(mdev);
}
- return true;
+ return 0;
}
-static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_barrier_ack *p = (struct p_barrier_ack *)h;
-
- tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
-
- if (mdev->state.conn == C_AHEAD &&
- atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
- mdev->start_resync_timer.expires = jiffies + HZ;
- add_timer(&mdev->start_resync_timer);
+ struct p_barrier_ack *p = pi->data;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_AHEAD &&
+ atomic_read(&mdev->ap_in_flight) == 0 &&
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+ mdev->start_resync_timer.expires = jiffies + HZ;
+ add_timer(&mdev->start_resync_timer);
+ }
}
+ rcu_read_unlock();
- return true;
+ return 0;
}
-static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
struct drbd_work *w;
sector_t sector;
int size;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
if (!get_ldev(mdev))
- return true;
+ return 0;
drbd_rs_complete_io(mdev, sector);
dec_rs_pending(mdev);
@@ -4543,114 +5167,137 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
w = kmalloc(sizeof(*w), GFP_NOIO);
if (w) {
w->cb = w_ov_finished;
- drbd_queue_work_front(&mdev->data.work, w);
+ w->mdev = mdev;
+ drbd_queue_work(&mdev->tconn->sender_work, w);
} else {
dev_err(DEV, "kmalloc(w) failed.");
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
}
put_ldev(mdev);
- return true;
+ return 0;
+}
+
+static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ return 0;
}
-static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
+static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
{
- return true;
+ struct drbd_conf *mdev;
+ int vnr, not_empty = 0;
+
+ do {
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ flush_signals(current);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ if (drbd_finish_peer_reqs(mdev)) {
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ return 1;
+ }
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ set_bit(SIGNAL_ASENDER, &tconn->flags);
+
+ spin_lock_irq(&tconn->req_lock);
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ not_empty = !list_empty(&mdev->done_ee);
+ if (not_empty)
+ break;
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ rcu_read_unlock();
+ } while (not_empty);
+
+ return 0;
}
struct asender_cmd {
size_t pkt_size;
- int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
+ int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
};
-static struct asender_cmd *get_asender_cmd(int cmd)
-{
- static struct asender_cmd asender_tbl[] = {
- /* anything missing from this table is in
- * the drbd_cmd_handler (drbd_default_handler) table,
- * see the beginning of drbdd() */
- [P_PING] = { sizeof(struct p_header80), got_Ping },
- [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
+static struct asender_cmd asender_tbl[] = {
+ [P_PING] = { 0, got_Ping },
+ [P_PING_ACK] = { 0, got_PingAck },
[P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
- [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
[P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
[P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
- [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
+ [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
[P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
- [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
- [P_MAX_CMD] = { 0, NULL },
- };
- if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
- return NULL;
- return &asender_tbl[cmd];
-}
+ [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
+ [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
+ [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
+};
int drbd_asender(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- struct p_header80 *h = &mdev->meta.rbuf.header.h80;
+ struct drbd_tconn *tconn = thi->tconn;
struct asender_cmd *cmd = NULL;
-
- int rv, len;
- void *buf = h;
+ struct packet_info pi;
+ int rv;
+ void *buf = tconn->meta.rbuf;
int received = 0;
- int expect = sizeof(struct p_header80);
- int empty;
- int ping_timeout_active = 0;
-
- sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
+ unsigned int header_size = drbd_header_size(tconn);
+ int expect = header_size;
+ bool ping_timeout_active = false;
+ struct net_conf *nc;
+ int ping_timeo, tcp_cork, ping_int;
current->policy = SCHED_RR; /* Make this a realtime task! */
current->rt_priority = 2; /* more important than all other tasks */
- while (get_t_state(thi) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
- ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
- mdev->meta.socket->sk->sk_rcvtimeo =
- mdev->net_conf->ping_timeo*HZ/10;
- ping_timeout_active = 1;
- }
+ while (get_t_state(thi) == RUNNING) {
+ drbd_thread_current_set_cpu(thi);
- /* conditionally cork;
- * it may hurt latency if we cork without much to send */
- if (!mdev->net_conf->no_cork &&
- 3 < atomic_read(&mdev->unacked_cnt))
- drbd_tcp_cork(mdev->meta.socket);
- while (1) {
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
- flush_signals(current);
- if (!drbd_process_done_ee(mdev))
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ ping_timeo = nc->ping_timeo;
+ tcp_cork = nc->tcp_cork;
+ ping_int = nc->ping_int;
+ rcu_read_unlock();
+
+ if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
+ if (drbd_send_ping(tconn)) {
+ conn_err(tconn, "drbd_send_ping has failed\n");
goto reconnect;
- /* to avoid race with newly queued ACKs */
- set_bit(SIGNAL_ASENDER, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
- empty = list_empty(&mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
- /* new ack may have been queued right here,
- * but then there is also a signal pending,
- * and we start over... */
- if (empty)
- break;
+ }
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
+ ping_timeout_active = true;
+ }
+
+ /* TODO: conditionally cork; it may hurt latency if we cork without
+ much to send */
+ if (tcp_cork)
+ drbd_tcp_cork(tconn->meta.socket);
+ if (tconn_finish_peer_reqs(tconn)) {
+ conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
+ goto reconnect;
}
/* but unconditionally uncork unless disabled */
- if (!mdev->net_conf->no_cork)
- drbd_tcp_uncork(mdev->meta.socket);
+ if (tcp_cork)
+ drbd_tcp_uncork(tconn->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */
if (signal_pending(current))
continue;
- rv = drbd_recv_short(mdev, mdev->meta.socket,
- buf, expect-received, 0);
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
flush_signals(current);
@@ -4668,80 +5315,91 @@ int drbd_asender(struct drbd_thread *thi)
received += rv;
buf += rv;
} else if (rv == 0) {
- dev_err(DEV, "meta connection shut down by peer.\n");
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
+
+ t = wait_event_timeout(tconn->ping_wait,
+ tconn->cstate < C_WF_REPORT_PARAMS,
+ t);
+ if (t)
+ break;
+ }
+ conn_err(tconn, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
- if (time_after(mdev->last_received,
- jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ if (time_after(tconn->last_received,
+ jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) {
- dev_err(DEV, "PingAck did not arrive in time.\n");
+ conn_err(tconn, "PingAck did not arrive in time.\n");
goto reconnect;
}
- set_bit(SEND_PING, &mdev->flags);
+ set_bit(SEND_PING, &tconn->flags);
continue;
} else if (rv == -EINTR) {
continue;
} else {
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
goto reconnect;
}
if (received == expect && cmd == NULL) {
- if (unlikely(h->magic != BE_DRBD_MAGIC)) {
- dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ if (decode_header(tconn, tconn->meta.rbuf, &pi))
goto reconnect;
- }
- cmd = get_asender_cmd(be16_to_cpu(h->command));
- len = be16_to_cpu(h->length);
- if (unlikely(cmd == NULL)) {
- dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ cmd = &asender_tbl[pi.cmd];
+ if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
+ conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
goto disconnect;
}
- expect = cmd->pkt_size;
- ERR_IF(len != expect-sizeof(struct p_header80))
+ expect = header_size + cmd->pkt_size;
+ if (pi.size != expect - header_size) {
+ conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
+ pi.cmd, pi.size);
goto reconnect;
+ }
}
if (received == expect) {
- mdev->last_received = jiffies;
- D_ASSERT(cmd != NULL);
- if (!cmd->process(mdev, h))
+ bool err;
+
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "%pf failed\n", cmd->fn);
goto reconnect;
+ }
+
+ tconn->last_received = jiffies;
- /* the idle_timeout (ping-int)
- * has been restored in got_PingAck() */
- if (cmd == get_asender_cmd(P_PING_ACK))
- ping_timeout_active = 0;
+ if (cmd == &asender_tbl[P_PING_ACK]) {
+ /* restore idle timeout */
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
+ ping_timeout_active = false;
+ }
- buf = h;
+ buf = tconn->meta.rbuf;
received = 0;
- expect = sizeof(struct p_header80);
+ expect = header_size;
cmd = NULL;
}
}
if (0) {
reconnect:
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_md_sync(tconn);
}
if (0) {
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
- D_ASSERT(mdev->state.conn < C_CONNECTED);
- dev_info(DEV, "asender terminated\n");
+ conn_info(tconn, "asender terminated\n");
return 0;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 01b2ac641c7..f58a4a4b4df 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -31,6 +31,8 @@
#include "drbd_req.h"
+static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
+
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
@@ -40,6 +42,8 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+ (void) cpu; /* The macro invocations above want the cpu argument, I do not like
+ the compiler warning about cpu only assigned but never used... */
part_inc_in_flight(&mdev->vdisk->part0, rw);
part_stat_unlock();
}
@@ -57,9 +61,51 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
part_stat_unlock();
}
-static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
+static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
+ struct bio *bio_src)
+{
+ struct drbd_request *req;
+
+ req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
+ if (!req)
+ return NULL;
+
+ drbd_req_make_private_bio(req, bio_src);
+ req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
+ req->w.mdev = mdev;
+ req->master_bio = bio_src;
+ req->epoch = 0;
+
+ drbd_clear_interval(&req->i);
+ req->i.sector = bio_src->bi_sector;
+ req->i.size = bio_src->bi_size;
+ req->i.local = true;
+ req->i.waiting = false;
+
+ INIT_LIST_HEAD(&req->tl_requests);
+ INIT_LIST_HEAD(&req->w.list);
+
+ /* one reference to be put by __drbd_make_request */
+ atomic_set(&req->completion_ref, 1);
+ /* one kref as long as completion_ref > 0 */
+ kref_init(&req->kref);
+ return req;
+}
+
+void drbd_req_destroy(struct kref *kref)
{
- const unsigned long s = req->rq_state;
+ struct drbd_request *req = container_of(kref, struct drbd_request, kref);
+ struct drbd_conf *mdev = req->w.mdev;
+ const unsigned s = req->rq_state;
+
+ if ((req->master_bio && !(s & RQ_POSTPONED)) ||
+ atomic_read(&req->completion_ref) ||
+ (s & RQ_LOCAL_PENDING) ||
+ ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
+ dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
+ s, atomic_read(&req->completion_ref));
+ return;
+ }
/* remove it from the transfer log.
* well, only if it had been there in the first
@@ -67,24 +113,33 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* and never sent), it should still be "empty" as
* initialized in drbd_req_new(), so we can list_del() it
* here unconditionally */
- list_del(&req->tl_requests);
+ list_del_init(&req->tl_requests);
/* if it was a write, we may have to set the corresponding
* bit(s) out-of-sync first. If it had a local part, we need to
* release the reference to the activity log. */
- if (rw == WRITE) {
+ if (s & RQ_WRITE) {
/* Set out-of-sync unless both OK flags are set
* (local only or remote failed).
* Other places where we set out-of-sync:
* READ with local io-error */
- if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(mdev, req->sector, req->size);
- if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(mdev, req->sector, req->size);
+ /* There is a special case:
+ * we may notice late that IO was suspended,
+ * and postpone, or schedule for retry, a write,
+ * before it even was submitted or sent.
+ * In that case we do not want to touch the bitmap at all.
+ */
+ if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
+ if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+
+ if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
+ drbd_set_in_sync(mdev, req->i.sector, req->i.size);
+ }
/* one might be tempted to move the drbd_al_complete_io
- * to the local io completion callback drbd_endio_pri.
+ * to the local io completion callback drbd_request_endio.
* but, if this was a mirror write, we may only
* drbd_al_complete_io after this is RQ_NET_DONE,
* otherwise the extent could be dropped from the al
@@ -93,109 +148,35 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* but after the extent has been dropped from the al,
* we would forget to resync the corresponding extent.
*/
- if (s & RQ_LOCAL_MASK) {
+ if (s & RQ_IN_ACT_LOG) {
if (get_ldev_if_state(mdev, D_FAILED)) {
- if (s & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, req->sector);
+ drbd_al_complete_io(mdev, &req->i);
put_ldev(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) {
- dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
- "but my Disk seems to have failed :(\n",
- (unsigned long long) req->sector);
+ dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
+ "but my Disk seems to have failed :(\n",
+ (unsigned long long) req->i.sector, req->i.size);
}
}
}
- drbd_req_free(req);
+ mempool_free(req, drbd_request_mempool);
}
-static void queue_barrier(struct drbd_conf *mdev)
-{
- struct drbd_tl_epoch *b;
-
- /* We are within the req_lock. Once we queued the barrier for sending,
- * we set the CREATE_BARRIER bit. It is cleared as soon as a new
- * barrier/epoch object is added. This is the only place this bit is
- * set. It indicates that the barrier for this epoch is already queued,
- * and no new epoch has been created yet. */
- if (test_bit(CREATE_BARRIER, &mdev->flags))
- return;
-
- b = mdev->newest_tle;
- b->w.cb = w_send_barrier;
- /* inc_ap_pending done here, so we won't
- * get imbalanced on connection loss.
- * dec_ap_pending will be done in got_BarrierAck
- * or (on connection loss) in tl_clear. */
- inc_ap_pending(mdev);
- drbd_queue_work(&mdev->data.work, &b->w);
- set_bit(CREATE_BARRIER, &mdev->flags);
+static void wake_all_senders(struct drbd_tconn *tconn) {
+ wake_up(&tconn->sender_work.q_wait);
}
-static void _about_to_complete_local_write(struct drbd_conf *mdev,
- struct drbd_request *req)
+/* must hold resource->req_lock */
+static void start_new_tl_epoch(struct drbd_tconn *tconn)
{
- const unsigned long s = req->rq_state;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
-
- /* Before we can signal completion to the upper layers,
- * we may need to close the current epoch.
- * We can skip this, if this request has not even been sent, because we
- * did not have a fully established connection yet/anymore, during
- * bitmap exchange, or while we are C_AHEAD due to congestion policy.
- */
- if (mdev->state.conn >= C_CONNECTED &&
- (s & RQ_NET_SENT) != 0 &&
- req->epoch == mdev->newest_tle->br_number)
- queue_barrier(mdev);
-
- /* we need to do the conflict detection stuff,
- * if we have the ee_hash (two_primaries) and
- * this has been on the network */
- if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
- const sector_t sector = req->sector;
- const int size = req->size;
-
- /* ASSERT:
- * there must be no conflicting requests, since
- * they must have been failed on the spot */
-#define OVERLAPS overlaps(sector, size, i->sector, i->size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
- "other: %p %llus +%u\n",
- req, (unsigned long long)sector, size,
- i, (unsigned long long)i->sector, i->size);
- }
- }
+ /* no point closing an epoch, if it is empty, anyways. */
+ if (tconn->current_tle_writes == 0)
+ return;
- /* maybe "wake" those conflicting epoch entries
- * that wait for this request to finish.
- *
- * currently, there can be only _one_ such ee
- * (well, or some more, which would be pending
- * P_DISCARD_ACK not yet sent by the asender...),
- * since we block the receiver thread upon the
- * first conflict detection, which will wait on
- * misc_wait. maybe we want to assert that?
- *
- * anyways, if we found one,
- * we just have to do a wake_up. */
-#undef OVERLAPS
-#define OVERLAPS overlaps(sector, size, e->sector, e->size)
- slot = ee_hash_slot(mdev, req->sector);
- hlist_for_each_entry(e, n, slot, collision) {
- if (OVERLAPS) {
- wake_up(&mdev->misc_wait);
- break;
- }
- }
- }
-#undef OVERLAPS
+ tconn->current_tle_writes = 0;
+ atomic_inc(&tconn->current_tle_nr);
+ wake_all_senders(tconn);
}
void complete_master_bio(struct drbd_conf *mdev,
@@ -205,17 +186,33 @@ void complete_master_bio(struct drbd_conf *mdev,
dec_ap_bio(mdev);
}
+
+static void drbd_remove_request_interval(struct rb_root *root,
+ struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_interval *i = &req->i;
+
+ drbd_remove_interval(root, i);
+
+ /* Wake up any processes waiting for this request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
/* Helper for __req_mod().
* Set m->bio to the master bio, if it is fit to be completed,
* or leave it alone (it is initialized to NULL in __req_mod),
* if it has already been completed, or cannot be completed yet.
* If m->bio is set, the error status to be returned is placed in m->error.
*/
-void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
+static
+void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
- const unsigned long s = req->rq_state;
- struct drbd_conf *mdev = req->mdev;
- int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
+ const unsigned s = req->rq_state;
+ struct drbd_conf *mdev = req->w.mdev;
+ int rw;
+ int error, ok;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -226,165 +223,220 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
* the receiver,
* the bio_endio completion callbacks.
*/
- if (s & RQ_NET_QUEUED)
- return;
- if (s & RQ_NET_PENDING)
+ if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
+ (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
+ (s & RQ_COMPLETION_SUSP)) {
+ dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
return;
- if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
+ }
+
+ if (!req->master_bio) {
+ dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
return;
+ }
- if (req->master_bio) {
- /* this is data_received (remote read)
- * or protocol C P_WRITE_ACK
- * or protocol B P_RECV_ACK
- * or protocol A "handed_over_to_network" (SendAck)
- * or canceled or failed,
- * or killed from the transfer log due to connection loss.
- */
+ rw = bio_rw(req->master_bio);
- /*
- * figure out whether to report success or failure.
- *
- * report success when at least one of the operations succeeded.
- * or, to put the other way,
- * only report failure, when both operations failed.
- *
- * what to do about the failures is handled elsewhere.
- * what we need to do here is just: complete the master_bio.
- *
- * local completion error, if any, has been stored as ERR_PTR
- * in private_bio within drbd_endio_pri.
- */
- int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
- int error = PTR_ERR(req->private_bio);
+ /*
+ * figure out whether to report success or failure.
+ *
+ * report success when at least one of the operations succeeded.
+ * or, to put the other way,
+ * only report failure, when both operations failed.
+ *
+ * what to do about the failures is handled elsewhere.
+ * what we need to do here is just: complete the master_bio.
+ *
+ * local completion error, if any, has been stored as ERR_PTR
+ * in private_bio within drbd_request_endio.
+ */
+ ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
+ error = PTR_ERR(req->private_bio);
- /* remove the request from the conflict detection
- * respective block_id verification hash */
- if (!hlist_unhashed(&req->collision))
- hlist_del(&req->collision);
- else
- D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
+ /* remove the request from the conflict detection
+ * respective block_id verification hash */
+ if (!drbd_interval_empty(&req->i)) {
+ struct rb_root *root;
- /* for writes we need to do some extra housekeeping */
if (rw == WRITE)
- _about_to_complete_local_write(mdev, req);
+ root = &mdev->write_requests;
+ else
+ root = &mdev->read_requests;
+ drbd_remove_request_interval(root, req);
+ } else if (!(s & RQ_POSTPONED))
+ D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
- /* Update disk stats */
- _drbd_end_io_acct(mdev, req);
+ /* Before we can signal completion to the upper layers,
+ * we may need to close the current transfer log epoch.
+ * We are within the request lock, so we can simply compare
+ * the request epoch number with the current transfer log
+ * epoch number. If they match, increase the current_tle_nr,
+ * and reset the transfer log epoch write_cnt.
+ */
+ if (rw == WRITE &&
+ req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
+ start_new_tl_epoch(mdev->tconn);
+
+ /* Update disk stats */
+ _drbd_end_io_acct(mdev, req);
+
+ /* If READ failed,
+ * have it be pushed back to the retry work queue,
+ * so it will re-enter __drbd_make_request(),
+ * and be re-assigned to a suitable local or remote path,
+ * or failed if we do not have access to good data anymore.
+ *
+ * Unless it was failed early by __drbd_make_request(),
+ * because no path was available, in which case
+ * it was not even added to the transfer_log.
+ *
+ * READA may fail, and will not be retried.
+ *
+ * WRITE should have used all available paths already.
+ */
+ if (!ok && rw == READ && !list_empty(&req->tl_requests))
+ req->rq_state |= RQ_POSTPONED;
+ if (!(req->rq_state & RQ_POSTPONED)) {
m->error = ok ? 0 : (error ?: -EIO);
m->bio = req->master_bio;
req->master_bio = NULL;
}
+}
- if (s & RQ_LOCAL_PENDING)
- return;
+static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
+
+ if (!atomic_sub_and_test(put, &req->completion_ref))
+ return 0;
- if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
- /* this is disconnected (local only) operation,
- * or protocol C P_WRITE_ACK,
- * or protocol A or B P_BARRIER_ACK,
- * or killed from the transfer log due to connection loss. */
- _req_is_done(mdev, req, rw);
+ drbd_req_complete(req, m);
+
+ if (req->rq_state & RQ_POSTPONED) {
+ /* don't destroy the req object just yet,
+ * but queue it for retry */
+ drbd_restart_request(req);
+ return 0;
}
- /* else: network part and not DONE yet. that is
- * protocol A or B, barrier ack still pending... */
+
+ return 1;
}
-static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
+/* I'd like this to be the only place that manipulates
+ * req->completion_ref and req->kref. */
+static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
+ int clear, int set)
{
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
+ unsigned s = req->rq_state;
+ int c_put = 0;
+ int k_put = 0;
- if (!is_susp(mdev->state))
- _req_may_be_done(req, m);
-}
+ if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP))
+ set |= RQ_COMPLETION_SUSP;
-/*
- * checks whether there was an overlapping request
- * or ee already registered.
- *
- * if so, return 1, in which case this request is completed on the spot,
- * without ever being submitted or send.
- *
- * return 0 if it is ok to submit this request.
- *
- * NOTE:
- * paranoia: assume something above us is broken, and issues different write
- * requests for the same block simultaneously...
- *
- * To ensure these won't be reordered differently on both nodes, resulting in
- * diverging data sets, we discard the later one(s). Not that this is supposed
- * to happen, but this is the rationale why we also have to check for
- * conflicting requests with local origin, and why we have to do so regardless
- * of whether we allowed multiple primaries.
- *
- * BTW, in case we only have one primary, the ee_hash is empty anyways, and the
- * second hlist_for_each_entry becomes a noop. This is even simpler than to
- * grab a reference on the net_conf, and check for the two_primaries flag...
- */
-static int _req_conflicts(struct drbd_request *req)
-{
- struct drbd_conf *mdev = req->mdev;
- const sector_t sector = req->sector;
- const int size = req->size;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
+ /* apply */
- D_ASSERT(hlist_unhashed(&req->collision));
+ req->rq_state &= ~clear;
+ req->rq_state |= set;
- if (!get_net_conf(mdev))
- return 0;
+ /* no change? */
+ if (req->rq_state == s)
+ return;
- /* BUG_ON */
- ERR_IF (mdev->tl_hash_s == 0)
- goto out_no_conflict;
- BUG_ON(mdev->tl_hash == NULL);
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent local write detected! "
- "[DISCARD L] new: %llus +%u; "
- "pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
- goto out_conflict;
- }
+ /* intent: get references */
+
+ if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
+ atomic_inc(&req->completion_ref);
+
+ if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
+ inc_ap_pending(mdev);
+ atomic_inc(&req->completion_ref);
}
- if (mdev->ee_hash_s) {
- /* now, check for overlapping requests with remote origin */
- BUG_ON(mdev->ee_hash == NULL);
-#undef OVERLAPS
-#define OVERLAPS overlaps(e->sector, e->size, sector, size)
- slot = ee_hash_slot(mdev, sector);
- hlist_for_each_entry(e, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
- " [DISCARD L] new: %llus +%u; "
- "pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)e->sector, e->size);
- goto out_conflict;
- }
- }
+ if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED))
+ atomic_inc(&req->completion_ref);
+
+ if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
+ kref_get(&req->kref); /* wait for the DONE */
+
+ if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT))
+ atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
+
+ if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
+ atomic_inc(&req->completion_ref);
+
+ /* progress: put references */
+
+ if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
+ ++c_put;
+
+ if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
+ D_ASSERT(req->rq_state & RQ_LOCAL_PENDING);
+ /* local completion may still come in later,
+ * we need to keep the req object around. */
+ kref_get(&req->kref);
+ ++c_put;
+ }
+
+ if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
+ if (req->rq_state & RQ_LOCAL_ABORTED)
+ ++k_put;
+ else
+ ++c_put;
}
-#undef OVERLAPS
-out_no_conflict:
- /* this is like it should be, and what we expected.
- * our users do behave after all... */
- put_net_conf(mdev);
- return 0;
+ if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
+ dec_ap_pending(mdev);
+ ++c_put;
+ }
-out_conflict:
- put_net_conf(mdev);
- return 1;
+ if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED))
+ ++c_put;
+
+ if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
+ if (req->rq_state & RQ_NET_SENT)
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
+ ++k_put;
+ }
+
+ /* potentially complete and destroy */
+
+ if (k_put || c_put) {
+ /* Completion does it's own kref_put. If we are going to
+ * kref_sub below, we need req to be still around then. */
+ int at_least = k_put + !!c_put;
+ int refcount = atomic_read(&req->kref.refcount);
+ if (refcount < at_least)
+ dev_err(DEV,
+ "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
+ s, req->rq_state, refcount, at_least);
+ }
+
+ /* If we made progress, retry conflicting peer requests, if any. */
+ if (req->i.waiting)
+ wake_up(&mdev->misc_wait);
+
+ if (c_put)
+ k_put += drbd_req_put_completion_ref(req, m, c_put);
+ if (k_put)
+ kref_sub(&req->kref, k_put, drbd_req_destroy);
+}
+
+static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req)
+{
+ char b[BDEVNAME_SIZE];
+
+ if (!__ratelimit(&drbd_ratelimit_state))
+ return;
+
+ dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n",
+ (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
+ (unsigned long long)req->i.sector,
+ req->i.size >> 9,
+ bdevname(mdev->ldev->backing_bdev, b));
}
/* obviously this could be coded as many single functions
@@ -402,9 +454,12 @@ out_conflict:
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
- struct drbd_conf *mdev = req->mdev;
- int rv = 0;
- m->bio = NULL;
+ struct drbd_conf *mdev = req->w.mdev;
+ struct net_conf *nc;
+ int p, rv = 0;
+
+ if (m)
+ m->bio = NULL;
switch (what) {
default:
@@ -413,116 +468,91 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* does not happen...
* initialization done in drbd_req_new
- case created:
+ case CREATED:
break;
*/
- case to_be_send: /* via network */
- /* reached via drbd_make_request_common
+ case TO_BE_SENT: /* via network */
+ /* reached via __drbd_make_request
* and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
- req->rq_state |= RQ_NET_PENDING;
- inc_ap_pending(mdev);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ p = nc->wire_protocol;
+ rcu_read_unlock();
+ req->rq_state |=
+ p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
+ p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
+ mod_rq_state(req, m, 0, RQ_NET_PENDING);
break;
- case to_be_submitted: /* locally */
- /* reached via drbd_make_request_common */
+ case TO_BE_SUBMITTED: /* locally */
+ /* reached via __drbd_make_request */
D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
- req->rq_state |= RQ_LOCAL_PENDING;
+ mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
break;
- case completed_ok:
+ case COMPLETED_OK:
if (req->rq_state & RQ_WRITE)
- mdev->writ_cnt += req->size>>9;
+ mdev->writ_cnt += req->i.size >> 9;
else
- mdev->read_cnt += req->size>>9;
+ mdev->read_cnt += req->i.size >> 9;
- req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_LOCAL_PENDING,
+ RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
break;
- case abort_disk_io:
- req->rq_state |= RQ_LOCAL_ABORTED;
- if (req->rq_state & RQ_WRITE)
- _req_may_be_done_not_susp(req, m);
- else
- goto goto_queue_for_net_read;
+ case ABORT_DISK_IO:
+ mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
break;
- case write_completed_with_error:
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- _req_may_be_done_not_susp(req, m);
+ case WRITE_COMPLETED_WITH_ERROR:
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
- case read_ahead_completed_with_error:
- /* it is legal to fail READA */
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
- _req_may_be_done_not_susp(req, m);
+ case READ_COMPLETED_WITH_ERROR:
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ /* fall through. */
+ case READ_AHEAD_COMPLETED_WITH_ERROR:
+ /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
+ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
- case read_completed_with_error:
- drbd_set_out_of_sync(mdev, req->sector, req->size);
-
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- if (req->rq_state & RQ_LOCAL_ABORTED) {
- _req_may_be_done(req, m);
- break;
- }
-
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
-
- goto_queue_for_net_read:
-
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
-
- /* no point in retrying if there is no good remote data,
- * or we have no connection. */
- if (mdev->state.pdsk != D_UP_TO_DATE) {
- _req_may_be_done_not_susp(req, m);
- break;
- }
-
- /* _req_mod(req,to_be_send); oops, recursion... */
- req->rq_state |= RQ_NET_PENDING;
- inc_ap_pending(mdev);
- /* fall through: _req_mod(req,queue_for_net_read); */
-
- case queue_for_net_read:
+ case QUEUE_FOR_NET_READ:
/* READ or READA, and
* no local disk,
* or target area marked as invalid,
* or just got an io-error. */
- /* from drbd_make_request_common
+ /* from __drbd_make_request
* or from bio_endio during read io-error recovery */
- /* so we can verify the handle in the answer packet
- * corresponding hlist_del is in _req_may_be_done() */
- hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
+ /* So we can verify the handle in the answer packet.
+ * Corresponding drbd_remove_request_interval is in
+ * drbd_req_complete() */
+ D_ASSERT(drbd_interval_empty(&req->i));
+ drbd_insert_interval(&mdev->read_requests, &req->i);
set_bit(UNPLUG_REMOTE, &mdev->flags);
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- req->rq_state |= RQ_NET_QUEUED;
- req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
- ? w_read_retry_remote
- : w_send_read_req;
- drbd_queue_work(&mdev->data.work, &req->w);
+ D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED);
+ req->w.cb = w_send_read_req;
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case queue_for_net_write:
+ case QUEUE_FOR_NET_WRITE:
/* assert something? */
- /* from drbd_make_request_common only */
+ /* from __drbd_make_request only */
- hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector));
- /* corresponding hlist_del is in _req_may_be_done() */
+ /* Corresponding drbd_remove_request_interval is in
+ * drbd_req_complete() */
+ D_ASSERT(drbd_interval_empty(&req->i));
+ drbd_insert_interval(&mdev->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
@@ -533,7 +563,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
*
* _req_add_to_epoch(req); this has to be after the
* _maybe_start_new_epoch(req); which happened in
- * drbd_make_request_common, because we now may set the bit
+ * __drbd_make_request, because we now may set the bit
* again ourselves to close the current epoch.
*
* Add req to the (now) current epoch (barrier). */
@@ -543,202 +573,187 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &mdev->flags);
- /* see drbd_make_request_common,
- * just after it grabs the req_lock */
- D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
-
- req->epoch = mdev->newest_tle->br_number;
-
- /* increment size of current epoch */
- mdev->newest_tle->n_writes++;
-
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- req->rq_state |= RQ_NET_QUEUED;
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */
- if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
- queue_barrier(mdev);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ p = nc->max_epoch_size;
+ rcu_read_unlock();
+ if (mdev->tconn->current_tle_writes >= p)
+ start_new_tl_epoch(mdev->tconn);
break;
- case queue_for_send_oos:
- req->rq_state |= RQ_NET_QUEUED;
- req->w.cb = w_send_oos;
- drbd_queue_work(&mdev->data.work, &req->w);
+ case QUEUE_FOR_SEND_OOS:
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED);
+ req->w.cb = w_send_out_of_sync;
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case read_retry_remote_canceled:
- case send_canceled:
- case send_failed:
+ case READ_RETRY_REMOTE_CANCELED:
+ case SEND_CANCELED:
+ case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
- req->rq_state &= ~RQ_NET_QUEUED;
- /* if we did it right, tl_clear should be scheduled only after
- * this, so this should not be necessary! */
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, 0);
break;
- case handed_over_to_network:
+ case HANDED_OVER_TO_NETWORK:
/* assert something? */
- if (bio_data_dir(req->master_bio) == WRITE)
- atomic_add(req->size>>9, &mdev->ap_in_flight);
-
if (bio_data_dir(req->master_bio) == WRITE &&
- mdev->net_conf->wire_protocol == DRBD_PROT_A) {
+ !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) {
/* this is what is dangerous about protocol A:
* pretend it was successfully written on the peer. */
- if (req->rq_state & RQ_NET_PENDING) {
- dec_ap_pending(mdev);
- req->rq_state &= ~RQ_NET_PENDING;
- req->rq_state |= RQ_NET_OK;
- } /* else: neg-ack was faster... */
+ if (req->rq_state & RQ_NET_PENDING)
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
+ /* else: neg-ack was faster... */
/* it is still not yet RQ_NET_DONE until the
* corresponding epoch barrier got acked as well,
* so we know what to dirty on connection loss */
}
- req->rq_state &= ~RQ_NET_QUEUED;
- req->rq_state |= RQ_NET_SENT;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
break;
- case oos_handed_to_network:
+ case OOS_HANDED_TO_NETWORK:
/* Was not set PENDING, no longer QUEUED, so is now DONE
* as far as this connection is concerned. */
- req->rq_state &= ~RQ_NET_QUEUED;
- req->rq_state |= RQ_NET_DONE;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
break;
- case connection_lost_while_pending:
+ case CONNECTION_LOST_WHILE_PENDING:
/* transfer log cleanup after connection loss */
- /* assert something? */
- if (req->rq_state & RQ_NET_PENDING)
- dec_ap_pending(mdev);
- req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
- req->rq_state |= RQ_NET_DONE;
- if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
-
- /* if it is still queued, we may not complete it here.
- * it will be canceled soon. */
- if (!(req->rq_state & RQ_NET_QUEUED))
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ mod_rq_state(req, m,
+ RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
+ RQ_NET_DONE);
break;
- case conflict_discarded_by_peer:
- /* for discarded conflicting writes of multiple primaries,
+ case CONFLICT_RESOLVED:
+ /* for superseded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
- * node crashes are covered by the activity log. */
- if (what == conflict_discarded_by_peer)
- dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
- " DRBD is not a random data generator!\n",
- (unsigned long long)req->sector, req->size);
- req->rq_state |= RQ_NET_DONE;
- /* fall through */
- case write_acked_by_peer_and_sis:
- case write_acked_by_peer:
- if (what == write_acked_by_peer_and_sis)
- req->rq_state |= RQ_NET_SIS;
+ * node crashes are covered by the activity log.
+ *
+ * If this request had been marked as RQ_POSTPONED before,
+ * it will actually not be completed, but "restarted",
+ * resubmitted from the retry worker context. */
+ D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
+ break;
+
+ case WRITE_ACKED_BY_PEER_AND_SIS:
+ req->rq_state |= RQ_NET_SIS;
+ case WRITE_ACKED_BY_PEER:
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
/* protocol C; successfully written on peer.
* Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
* for volatile write-back caches on lower level devices. */
- case recv_acked_by_peer:
+ goto ack_common;
+ case RECV_ACKED_BY_PEER:
+ D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK);
/* protocol B; pretends to be successfully written on peer.
- * see also notes above in handed_over_to_network about
+ * see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
- req->rq_state |= RQ_NET_OK;
+ ack_common:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- req->rq_state &= ~RQ_NET_PENDING;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
break;
- case neg_acked:
- /* assert something? */
- if (req->rq_state & RQ_NET_PENDING) {
- dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- }
- req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
+ case POSTPONE_WRITE:
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ /* If this node has already detected the write conflict, the
+ * worker will be waiting on misc_wait. Wake it up once this
+ * request has completed locally.
+ */
+ D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ req->rq_state |= RQ_POSTPONED;
+ if (req->i.waiting)
+ wake_up(&mdev->misc_wait);
+ /* Do not clear RQ_NET_PENDING. This request will make further
+ * progress via restart_conflicting_writes() or
+ * fail_postponed_requests(). Hopefully. */
+ break;
- req->rq_state |= RQ_NET_DONE;
- _req_may_be_done_not_susp(req, m);
- /* else: done by handed_over_to_network */
+ case NEG_ACKED:
+ mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
break;
- case fail_frozen_disk_io:
+ case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
-
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
- case restart_frozen_disk_io:
+ case RESTART_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
- req->rq_state &= ~RQ_LOCAL_COMPLETED;
+ mod_rq_state(req, m,
+ RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
+ RQ_LOCAL_PENDING);
rv = MR_READ;
if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
- get_ldev(mdev);
+ get_ldev(mdev); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case resend:
+ case RESEND:
/* Simply complete (local only) READs. */
if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
- _req_may_be_done(req, m);
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
}
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
- before the connection loss (B&C only); only P_BARRIER_ACK was missing.
- Trowing them out of the TL here by pretending we got a BARRIER_ACK
- We ensure that the peer was not rebooted */
+ before the connection loss (B&C only); only P_BARRIER_ACK
+ (or the local completion?) was missing when we suspended.
+ Throwing them out of the TL here by pretending we got a BARRIER_ACK.
+ During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
+ /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync?
+ * in that case we must not set RQ_NET_PENDING. */
+
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) {
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
- }
+ } /* else: FIXME can this happen? */
break;
}
- /* else, fall through to barrier_acked */
+ /* else, fall through to BARRIER_ACKED */
- case barrier_acked:
+ case BARRIER_ACKED:
+ /* barrier ack for READ requests does not make sense */
if (!(req->rq_state & RQ_WRITE))
break;
if (req->rq_state & RQ_NET_PENDING) {
- /* barrier came in before all requests have been acked.
+ /* barrier came in before all requests were acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
- dev_err(DEV, "FIXME (barrier_acked but pending)\n");
- list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
+ dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
}
- if ((req->rq_state & RQ_NET_MASK) != 0) {
- req->rq_state |= RQ_NET_DONE;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- }
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ /* Allowed to complete requests, even while suspended.
+ * As this is called for all requests within a matching epoch,
+ * we need to filter, and only set RQ_NET_DONE for those that
+ * have actually been on the wire. */
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP,
+ (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
break;
- case data_received:
+ case DATA_RECEIVED:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- dec_ap_pending(mdev);
- req->rq_state &= ~RQ_NET_PENDING;
- req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
};
@@ -752,75 +767,265 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
-static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
+static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
if (mdev->state.disk == D_UP_TO_DATE)
- return 1;
- if (mdev->state.disk >= D_OUTDATED)
- return 0;
- if (mdev->state.disk < D_INCONSISTENT)
- return 0;
- /* state.disk == D_INCONSISTENT We will have a look at the BitMap */
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ return true;
+ if (mdev->state.disk != D_INCONSISTENT)
+ return false;
esector = sector + (size >> 9) - 1;
-
+ nr_sectors = drbd_get_capacity(mdev->this_bdev);
D_ASSERT(sector < nr_sectors);
D_ASSERT(esector < nr_sectors);
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
- return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
+ return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
+}
+
+static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector,
+ enum drbd_read_balancing rbm)
+{
+ struct backing_dev_info *bdi;
+ int stripe_shift;
+
+ switch (rbm) {
+ case RB_CONGESTED_REMOTE:
+ bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ return bdi_read_congested(bdi);
+ case RB_LEAST_PENDING:
+ return atomic_read(&mdev->local_cnt) >
+ atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
+ case RB_32K_STRIPING: /* stripe_shift = 15 */
+ case RB_64K_STRIPING:
+ case RB_128K_STRIPING:
+ case RB_256K_STRIPING:
+ case RB_512K_STRIPING:
+ case RB_1M_STRIPING: /* stripe_shift = 20 */
+ stripe_shift = (rbm - RB_32K_STRIPING + 15);
+ return (sector >> (stripe_shift - 9)) & 1;
+ case RB_ROUND_ROBIN:
+ return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
+ case RB_PREFER_REMOTE:
+ return true;
+ case RB_PREFER_LOCAL:
+ default:
+ return false;
+ }
+}
+
+/*
+ * complete_conflicting_writes - wait for any conflicting write requests
+ *
+ * The write_requests tree contains all active write requests which we
+ * currently know about. Wait for any requests to complete which conflict with
+ * the new one.
+ *
+ * Only way out: remove the conflicting intervals from the tree.
+ */
+static void complete_conflicting_writes(struct drbd_request *req)
+{
+ DEFINE_WAIT(wait);
+ struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_interval *i;
+ sector_t sector = req->i.sector;
+ int size = req->i.size;
+
+ i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ if (!i)
+ return;
+
+ for (;;) {
+ prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
+ i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ if (!i)
+ break;
+ /* Indicate to wake up device->misc_wait on progress. */
+ i->waiting = true;
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ schedule();
+ spin_lock_irq(&mdev->tconn->req_lock);
+ }
+ finish_wait(&mdev->misc_wait, &wait);
}
+/* called within req_lock and rcu_read_lock() */
static void maybe_pull_ahead(struct drbd_conf *mdev)
{
- int congested = 0;
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct net_conf *nc;
+ bool congested = false;
+ enum drbd_on_congestion on_congestion;
+
+ nc = rcu_dereference(tconn->net_conf);
+ on_congestion = nc ? nc->on_congestion : OC_BLOCK;
+ if (on_congestion == OC_BLOCK ||
+ tconn->agreed_pro_version < 96)
+ return;
/* If I don't even have good local storage, we can not reasonably try
* to pull ahead of the peer. We also need the local reference to make
* sure mdev->act_log is there.
- * Note: caller has to make sure that net_conf is there.
*/
if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
return;
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ if (nc->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
dev_info(DEV, "Congestion-fill threshold reached\n");
- congested = 1;
+ congested = true;
}
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ if (mdev->act_log->used >= nc->cong_extents) {
dev_info(DEV, "Congestion-extents threshold reached\n");
- congested = 1;
+ congested = true;
}
if (congested) {
- queue_barrier(mdev); /* last barrier, after mirrored writes */
+ /* start a new epoch for non-mirrored writes */
+ start_new_tl_epoch(mdev->tconn);
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ if (on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ else /*nc->on_congestion == OC_DISCONNECT */
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
}
put_ldev(mdev);
}
-static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+/* If this returns false, and req->private_bio is still set,
+ * this should be submitted locally.
+ *
+ * If it returns false, but req->private_bio is not set,
+ * we do not have access to good data :(
+ *
+ * Otherwise, this destroys req->private_bio, if any,
+ * and returns true.
+ */
+static bool do_remote_read(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ enum drbd_read_balancing rbm;
+
+ if (req->private_bio) {
+ if (!drbd_may_do_local_read(mdev,
+ req->i.sector, req->i.size)) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
+ }
+ }
+
+ if (mdev->state.pdsk != D_UP_TO_DATE)
+ return false;
+
+ if (req->private_bio == NULL)
+ return true;
+
+ /* TODO: improve read balancing decisions, take into account drbd
+ * protocol, pending requests etc. */
+
+ rcu_read_lock();
+ rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
+ rcu_read_unlock();
+
+ if (rbm == RB_PREFER_LOCAL && req->private_bio)
+ return false; /* submit locally */
+
+ if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) {
+ if (req->private_bio) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+/* returns number of connections (== 1, for drbd 8.4)
+ * expected to actually write this data,
+ * which does NOT include those that we are L_AHEAD for. */
+static int drbd_process_write_request(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ int remote, send_oos;
+
+ rcu_read_lock();
+ remote = drbd_should_do_remote(mdev->state);
+ if (remote) {
+ maybe_pull_ahead(mdev);
+ remote = drbd_should_do_remote(mdev->state);
+ }
+ send_oos = drbd_should_send_out_of_sync(mdev->state);
+ rcu_read_unlock();
+
+ /* Need to replicate writes. Unless it is an empty flush,
+ * which is better mapped to a DRBD P_BARRIER packet,
+ * also for drbd wire protocol compatibility reasons.
+ * If this was a flush, just start a new epoch.
+ * Unless the current epoch was empty anyways, or we are not currently
+ * replicating, in which case there is no point. */
+ if (unlikely(req->i.size == 0)) {
+ /* The only size==0 bios we expect are empty flushes. */
+ D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
+ if (remote)
+ start_new_tl_epoch(mdev->tconn);
+ return 0;
+ }
+
+ if (!remote && !send_oos)
+ return 0;
+
+ D_ASSERT(!(remote && send_oos));
+
+ if (remote) {
+ _req_mod(req, TO_BE_SENT);
+ _req_mod(req, QUEUE_FOR_NET_WRITE);
+ } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size))
+ _req_mod(req, QUEUE_FOR_SEND_OOS);
+
+ return remote;
+}
+
+static void
+drbd_submit_req_private_bio(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ struct bio *bio = req->private_bio;
+ const int rw = bio_rw(bio);
+
+ bio->bi_bdev = mdev->ldev->backing_bdev;
+
+ /* State may have changed since we grabbed our reference on the
+ * ->ldev member. Double check, and short-circuit to endio.
+ * In case the last activity log transaction failed to get on
+ * stable storage, and this is a WRITE, we may not even submit
+ * this bio. */
+ if (get_ldev(mdev)) {
+ if (drbd_insert_fault(mdev,
+ rw == WRITE ? DRBD_FAULT_DT_WR
+ : rw == READ ? DRBD_FAULT_DT_RD
+ : DRBD_FAULT_DT_RA))
+ bio_endio(bio, -EIO);
+ else
+ generic_make_request(bio);
+ put_ldev(mdev);
+ } else
+ bio_endio(bio, -EIO);
+}
+
+void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
- const int size = bio->bi_size;
- const sector_t sector = bio->bi_sector;
- struct drbd_tl_epoch *b = NULL;
+ struct bio_and_error m = { NULL, };
struct drbd_request *req;
- int local, remote, send_oos = 0;
- int err = -EIO;
- int ret = 0;
- union drbd_state s;
+ bool no_remote = false;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -830,55 +1035,14 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* if user cannot handle io errors, that's not our business. */
dev_err(DEV, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
- return 0;
+ return;
}
req->start_time = start_time;
- local = get_ldev(mdev);
- if (!local) {
- bio_put(req->private_bio); /* or we get a bio leak */
+ if (!get_ldev(mdev)) {
+ bio_put(req->private_bio);
req->private_bio = NULL;
}
- if (rw == WRITE) {
- /* Need to replicate writes. Unless it is an empty flush,
- * which is better mapped to a DRBD P_BARRIER packet,
- * also for drbd wire protocol compatibility reasons. */
- if (unlikely(size == 0)) {
- /* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(bio->bi_rw & REQ_FLUSH);
- remote = 0;
- } else
- remote = 1;
- } else {
- /* READ || READA */
- if (local) {
- if (!drbd_may_do_local_read(mdev, sector, size)) {
- /* we could kick the syncer to
- * sync this extent asap, wait for
- * it, then continue locally.
- * Or just issue the request remotely.
- */
- local = 0;
- bio_put(req->private_bio);
- req->private_bio = NULL;
- put_ldev(mdev);
- }
- }
- remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
- }
-
- /* If we have a disk, but a READA request is mapped to remote,
- * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
- * Just fail that READA request right here.
- *
- * THINK: maybe fail all READA when not local?
- * or make this configurable...
- * if network is slow, READA won't do any good.
- */
- if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
- err = -EWOULDBLOCK;
- goto fail_and_free_req;
- }
/* For WRITES going to the local disk, grab a reference on the target
* extent. This waits for any resync activity in the corresponding
@@ -887,348 +1051,131 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* of transactional on-disk meta data updates.
* Empty flushes don't need to go into the activity log, they can only
* flush data for pending writes which are already in there. */
- if (rw == WRITE && local && size
+ if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
req->rq_state |= RQ_IN_ACT_LOG;
- drbd_al_begin_io(mdev, sector);
- }
-
- s = mdev->state;
- remote = remote && drbd_should_do_remote(s);
- send_oos = rw == WRITE && drbd_should_send_oos(s);
- D_ASSERT(!(remote && send_oos));
-
- if (!(local || remote) && !is_susp(mdev->state)) {
- if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- goto fail_free_complete;
+ drbd_al_begin_io(mdev, &req->i);
}
- /* For WRITE request, we have to make sure that we have an
- * unused_spare_tle, in case we need to start a new epoch.
- * I try to be smart and avoid to pre-allocate always "just in case",
- * but there is a race between testing the bit and pointer outside the
- * spinlock, and grabbing the spinlock.
- * if we lost that race, we retry. */
- if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
-allocate_barrier:
- b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
- if (!b) {
- dev_err(DEV, "Failed to alloc barrier.\n");
- err = -ENOMEM;
- goto fail_free_complete;
- }
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (rw == WRITE) {
+ /* This may temporarily give up the req_lock,
+ * but will re-aquire it before it returns here.
+ * Needs to be before the check on drbd_suspended() */
+ complete_conflicting_writes(req);
}
- /* GOOD, everything prepared, grab the spin_lock */
- spin_lock_irq(&mdev->req_lock);
-
- if (is_susp(mdev->state)) {
- /* If we got suspended, use the retry mechanism of
- drbd_make_request() to restart processing of this
- bio. In the next call to drbd_make_request
- we sleep in inc_ap_bio() */
- ret = 1;
- spin_unlock_irq(&mdev->req_lock);
- goto fail_free_complete;
- }
+ /* no more giving up req_lock from now on! */
- if (remote || send_oos) {
- remote = drbd_should_do_remote(mdev->state);
- send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
- D_ASSERT(!(remote && send_oos));
-
- if (!(remote || send_oos))
- dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
- if (!(local || remote)) {
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- spin_unlock_irq(&mdev->req_lock);
- goto fail_free_complete;
+ if (drbd_suspended(mdev)) {
+ /* push back and retry: */
+ req->rq_state |= RQ_POSTPONED;
+ if (req->private_bio) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
}
+ goto out;
}
- if (b && mdev->unused_spare_tle == NULL) {
- mdev->unused_spare_tle = b;
- b = NULL;
- }
- if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
- /* someone closed the current epoch
- * while we were grabbing the spinlock */
- spin_unlock_irq(&mdev->req_lock);
- goto allocate_barrier;
- }
-
-
/* Update disk stats */
_drbd_start_io_acct(mdev, req, bio);
- /* _maybe_start_new_epoch(mdev);
- * If we need to generate a write barrier packet, we have to add the
- * new epoch (barrier) object, and queue the barrier packet for sending,
- * and queue the req's data after it _within the same lock_, otherwise
- * we have race conditions were the reorder domains could be mixed up.
- *
- * Even read requests may start a new epoch and queue the corresponding
- * barrier packet. To get the write ordering right, we only have to
- * make sure that, if this is a write request and it triggered a
- * barrier packet, this request is queued within the same spinlock. */
- if ((remote || send_oos) && mdev->unused_spare_tle &&
- test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
- } else {
- D_ASSERT(!(remote && rw == WRITE &&
- test_bit(CREATE_BARRIER, &mdev->flags)));
+ /* We fail READ/READA early, if we can not serve it.
+ * We must do this before req is registered on any lists.
+ * Otherwise, drbd_req_complete() will queue failed READ for retry. */
+ if (rw != WRITE) {
+ if (!do_remote_read(req) && !req->private_bio)
+ goto nodata;
}
- /* NOTE
- * Actually, 'local' may be wrong here already, since we may have failed
- * to write to the meta data, and may become wrong anytime because of
- * local io-error for some other request, which would lead to us
- * "detaching" the local disk.
- *
- * 'remote' may become wrong any time because the network could fail.
- *
- * This is a harmless race condition, though, since it is handled
- * correctly at the appropriate places; so it just defers the failure
- * of the respective operation.
- */
-
- /* mark them early for readability.
- * this just sets some state flags. */
- if (remote)
- _req_mod(req, to_be_send);
- if (local)
- _req_mod(req, to_be_submitted);
-
- /* check this request on the collision detection hash tables.
- * if we have a conflict, just complete it here.
- * THINK do we want to check reads, too? (I don't think so...) */
- if (rw == WRITE && _req_conflicts(req))
- goto fail_conflicting;
+ /* which transfer log epoch does this belong to? */
+ req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
- if (likely(size!=0))
- list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
+ if (likely(req->i.size!=0)) {
+ if (rw == WRITE)
+ mdev->tconn->current_tle_writes++;
- /* NOTE remote first: to get the concurrent write detection right,
- * we must register the request before start of local IO. */
- if (remote) {
- /* either WRITE and C_CONNECTED,
- * or READ, and no local disk,
- * or READ, but not in sync.
- */
- _req_mod(req, (rw == WRITE)
- ? queue_for_net_write
- : queue_for_net_read);
+ list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log);
}
- if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
- _req_mod(req, queue_for_send_oos);
- if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
- maybe_pull_ahead(mdev);
-
- /* If this was a flush, queue a drbd barrier/start a new epoch.
- * Unless the current epoch was empty anyways, or we are not currently
- * replicating, in which case there is no point. */
- if (unlikely(bio->bi_rw & REQ_FLUSH)
- && mdev->newest_tle->n_writes
- && drbd_should_do_remote(mdev->state))
- queue_barrier(mdev);
-
- spin_unlock_irq(&mdev->req_lock);
- kfree(b); /* if someone else has beaten us to it... */
-
- if (local) {
- req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
-
- /* State may have changed since we grabbed our reference on the
- * mdev->ldev member. Double check, and short-circuit to endio.
- * In case the last activity log transaction failed to get on
- * stable storage, and this is a WRITE, we may not even submit
- * this bio. */
- if (get_ldev(mdev)) {
- if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
- bio_endio(req->private_bio, -EIO);
- else
- generic_make_request(req->private_bio);
- put_ldev(mdev);
+ if (rw == WRITE) {
+ if (!drbd_process_write_request(req))
+ no_remote = true;
+ } else {
+ /* We either have a private_bio, or we can read from remote.
+ * Otherwise we had done the goto nodata above. */
+ if (req->private_bio == NULL) {
+ _req_mod(req, TO_BE_SENT);
+ _req_mod(req, QUEUE_FOR_NET_READ);
} else
- bio_endio(req->private_bio, -EIO);
+ no_remote = true;
}
- return 0;
-
-fail_conflicting:
- /* this is a conflicting request.
- * even though it may have been only _partially_
- * overlapping with one of the currently pending requests,
- * without even submitting or sending it, we will
- * pretend that it was successfully served right now.
- */
- _drbd_end_io_acct(mdev, req);
- spin_unlock_irq(&mdev->req_lock);
- if (remote)
- dec_ap_pending(mdev);
- /* THINK: do we want to fail it (-EIO), or pretend success?
- * this pretends success. */
- err = 0;
-
-fail_free_complete:
- if (req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, sector);
-fail_and_free_req:
- if (local) {
- bio_put(req->private_bio);
- req->private_bio = NULL;
- put_ldev(mdev);
+ if (req->private_bio) {
+ /* needs to be marked within the same spinlock */
+ _req_mod(req, TO_BE_SUBMITTED);
+ /* but we need to give up the spinlock to submit */
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ drbd_submit_req_private_bio(req);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ } else if (no_remote) {
+nodata:
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
+ (unsigned long long)req->i.sector, req->i.size >> 9);
+ /* A write may have been queued for send_oos, however.
+ * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
}
- if (!ret)
- bio_endio(bio, err);
-
- drbd_req_free(req);
- dec_ap_bio(mdev);
- kfree(b);
-
- return ret;
-}
-/* helper function for drbd_make_request
- * if we can determine just by the mdev (state) that this request will fail,
- * return 1
- * otherwise return 0
- */
-static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
-{
- if (mdev->state.role != R_PRIMARY &&
- (!allow_oos || is_write)) {
- if (__ratelimit(&drbd_ratelimit_state)) {
- dev_err(DEV, "Process %s[%u] tried to %s; "
- "since we are not in Primary state, "
- "we cannot allow this\n",
- current->comm, current->pid,
- is_write ? "WRITE" : "READ");
- }
- return 1;
- }
+out:
+ if (drbd_req_put_completion_ref(req, &m, 1))
+ kref_put(&req->kref, drbd_req_destroy);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- return 0;
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ return;
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
{
- unsigned int s_enr, e_enr;
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
unsigned long start_time;
- if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
- bio_endio(bio, -EPERM);
- return;
- }
-
start_time = jiffies;
/*
* what we "blindly" assume:
*/
- D_ASSERT((bio->bi_size & 0x1ff) == 0);
-
- /* to make some things easier, force alignment of requests within the
- * granularity of our hash tables */
- s_enr = bio->bi_sector >> HT_SHIFT;
- e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
-
- if (likely(s_enr == e_enr)) {
- do {
- inc_ap_bio(mdev, 1);
- } while (drbd_make_request_common(mdev, bio, start_time));
- return;
- }
-
- /* can this bio be split generically?
- * Maybe add our own split-arbitrary-bios function. */
- if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
- /* rather error out here than BUG in bio_split */
- dev_err(DEV, "bio would need to, but cannot, be split: "
- "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
- bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- (unsigned long long)bio->bi_sector);
- bio_endio(bio, -EINVAL);
- } else {
- /* This bio crosses some boundary, so we have to split it. */
- struct bio_pair *bp;
- /* works for the "do not cross hash slot boundaries" case
- * e.g. sector 262269, size 4096
- * s_enr = 262269 >> 6 = 4097
- * e_enr = (262269+8-1) >> 6 = 4098
- * HT_SHIFT = 6
- * sps = 64, mask = 63
- * first_sectors = 64 - (262269 & 63) = 3
- */
- const sector_t sect = bio->bi_sector;
- const int sps = 1 << HT_SHIFT; /* sectors per slot */
- const int mask = sps - 1;
- const sector_t first_sectors = sps - (sect & mask);
- bp = bio_split(bio, first_sectors);
+ D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
- /* we need to get a "reference count" (ap_bio_cnt)
- * to avoid races with the disconnect/reconnect/suspend code.
- * In case we need to split the bio here, we need to get three references
- * atomically, otherwise we might deadlock when trying to submit the
- * second one! */
- inc_ap_bio(mdev, 3);
-
- D_ASSERT(e_enr == s_enr + 1);
-
- while (drbd_make_request_common(mdev, &bp->bio1, start_time))
- inc_ap_bio(mdev, 1);
-
- while (drbd_make_request_common(mdev, &bp->bio2, start_time))
- inc_ap_bio(mdev, 1);
-
- dec_ap_bio(mdev);
-
- bio_pair_release(bp);
- }
+ inc_ap_bio(mdev);
+ __drbd_make_request(mdev, bio, start_time);
}
-/* This is called by bio_add_page(). With this function we reduce
- * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
- * units (was AL_EXTENTs).
+/* This is called by bio_add_page().
+ *
+ * q->max_hw_sectors and other global limits are already enforced there.
*
- * we do the calculation within the lower 32bit of the byte offsets,
- * since we don't care for actual offset, but only check whether it
- * would cross "activity log extent" boundaries.
+ * We need to call down to our lower level device,
+ * in case it has special restrictions.
+ *
+ * We also may need to enforce configured max-bio-bvecs limits.
*
* As long as the BIO is empty we have to allow at least one bvec,
- * regardless of size and offset. so the resulting bio may still
- * cross extent boundaries. those are dealt with (bio_split) in
- * drbd_make_request.
+ * regardless of size and offset, so no need to ask lower levels.
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
- unsigned int bio_offset =
- (unsigned int)bvm->bi_sector << 9; /* 32 bit */
unsigned int bio_size = bvm->bi_size;
- int limit, backing_limit;
-
- limit = DRBD_MAX_BIO_SIZE
- - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
- if (limit < 0)
- limit = 0;
- if (bio_size == 0) {
- if (limit <= bvec->bv_len)
- limit = bvec->bv_len;
- } else if (limit && get_ldev(mdev)) {
+ int limit = DRBD_MAX_BIO_SIZE;
+ int backing_limit;
+
+ if (bio_size && get_ldev(mdev)) {
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
@@ -1240,24 +1187,38 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
return limit;
}
+struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
+{
+ /* Walk the transfer log,
+ * and find the oldest not yet completed request */
+ struct drbd_request *r;
+ list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ if (atomic_read(&r->completion_ref))
+ return r;
+ }
+ return NULL;
+}
+
void request_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_tconn *tconn = mdev->tconn;
struct drbd_request *req; /* oldest request */
- struct list_head *le;
+ struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now;
- if (get_net_conf(mdev)) {
- if (mdev->state.conn >= C_WF_REPORT_PARAMS)
- ent = mdev->net_conf->timeout*HZ/10
- * mdev->net_conf->ko_count;
- put_net_conf(mdev);
- }
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS)
+ ent = nc->timeout * HZ/10 * nc->ko_count;
+
if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
- dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+ dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
put_ldev(mdev);
}
+ rcu_read_unlock();
+
et = min_not_zero(dt, ent);
if (!et)
@@ -1265,17 +1226,14 @@ void request_timer_fn(unsigned long data)
now = jiffies;
- spin_lock_irq(&mdev->req_lock);
- le = &mdev->oldest_tle->requests;
- if (list_empty(le)) {
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&tconn->req_lock);
+ req = find_oldest_request(tconn);
+ if (!req) {
+ spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, now + et);
return;
}
- le = le->prev;
- req = list_entry(le, struct drbd_request, tl_requests);
-
/* The request is considered timed out, if
* - we have some effective timeout from the configuration,
* with above state restrictions applied,
@@ -1294,17 +1252,17 @@ void request_timer_fn(unsigned long data)
*/
if (ent && req->rq_state & RQ_NET_PENDING &&
time_after(now, req->start_time + ent) &&
- !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+ !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) {
dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
}
- if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+ if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev &&
time_after(now, req->start_time + dt) &&
!time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
}
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 3d211191948..016de6b8bb5 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -77,40 +77,41 @@
*/
enum drbd_req_event {
- created,
- to_be_send,
- to_be_submitted,
+ CREATED,
+ TO_BE_SENT,
+ TO_BE_SUBMITTED,
/* XXX yes, now I am inconsistent...
* these are not "events" but "actions"
* oh, well... */
- queue_for_net_write,
- queue_for_net_read,
- queue_for_send_oos,
-
- send_canceled,
- send_failed,
- handed_over_to_network,
- oos_handed_to_network,
- connection_lost_while_pending,
- read_retry_remote_canceled,
- recv_acked_by_peer,
- write_acked_by_peer,
- write_acked_by_peer_and_sis, /* and set_in_sync */
- conflict_discarded_by_peer,
- neg_acked,
- barrier_acked, /* in protocol A and B */
- data_received, /* (remote read) */
-
- read_completed_with_error,
- read_ahead_completed_with_error,
- write_completed_with_error,
- abort_disk_io,
- completed_ok,
- resend,
- fail_frozen_disk_io,
- restart_frozen_disk_io,
- nothing, /* for tracing only */
+ QUEUE_FOR_NET_WRITE,
+ QUEUE_FOR_NET_READ,
+ QUEUE_FOR_SEND_OOS,
+
+ SEND_CANCELED,
+ SEND_FAILED,
+ HANDED_OVER_TO_NETWORK,
+ OOS_HANDED_TO_NETWORK,
+ CONNECTION_LOST_WHILE_PENDING,
+ READ_RETRY_REMOTE_CANCELED,
+ RECV_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
+ CONFLICT_RESOLVED,
+ POSTPONE_WRITE,
+ NEG_ACKED,
+ BARRIER_ACKED, /* in protocol A and B */
+ DATA_RECEIVED, /* (remote read) */
+
+ READ_COMPLETED_WITH_ERROR,
+ READ_AHEAD_COMPLETED_WITH_ERROR,
+ WRITE_COMPLETED_WITH_ERROR,
+ ABORT_DISK_IO,
+ COMPLETED_OK,
+ RESEND,
+ FAIL_FROZEN_DISK_IO,
+ RESTART_FROZEN_DISK_IO,
+ NOTHING,
};
/* encoding of request states for now. we don't actually need that many bits.
@@ -142,8 +143,8 @@ enum drbd_req_state_bits {
* recv_ack (B) or implicit "ack" (A),
* still waiting for the barrier ack.
* master_bio may already be completed and invalidated.
- * 11100: write_acked (C),
- * data_received (for remote read, any protocol)
+ * 11100: write acked (C),
+ * data received (for remote read, any protocol)
* or finally the barrier ack has arrived (B,A)...
* request can be freed
* 01100: neg-acked (write, protocol C)
@@ -198,6 +199,22 @@ enum drbd_req_state_bits {
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
+
+ /* The peer has sent a retry ACK */
+ __RQ_POSTPONED,
+
+ /* would have been completed,
+ * but was not, because of drbd_suspended() */
+ __RQ_COMPLETION_SUSP,
+
+ /* We expect a receive ACK (wire proto B) */
+ __RQ_EXP_RECEIVE_ACK,
+
+ /* We expect a write ACK (wite proto C) */
+ __RQ_EXP_WRITE_ACK,
+
+ /* waiting for a barrier ack, did an extra kref_get */
+ __RQ_EXP_BARR_ACK,
};
#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
@@ -219,56 +236,16 @@ enum drbd_req_state_bits {
#define RQ_WRITE (1UL << __RQ_WRITE)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
+#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
+#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
+#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
+#define RQ_EXP_WRITE_ACK (1UL << __RQ_EXP_WRITE_ACK)
+#define RQ_EXP_BARR_ACK (1UL << __RQ_EXP_BARR_ACK)
/* For waking up the frozen transfer log mod_req() has to return if the request
should be counted in the epoch object*/
-#define MR_WRITE_SHIFT 0
-#define MR_WRITE (1 << MR_WRITE_SHIFT)
-#define MR_READ_SHIFT 1
-#define MR_READ (1 << MR_READ_SHIFT)
-
-/* epoch entries */
-static inline
-struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- BUG_ON(mdev->ee_hash_s == 0);
- return mdev->ee_hash +
- ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
-}
-
-/* transfer log (drbd_request objects) */
-static inline
-struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- BUG_ON(mdev->tl_hash_s == 0);
- return mdev->tl_hash +
- ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
-}
-
-/* application reads (drbd_request objects) */
-static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- return mdev->app_reads_hash
- + ((unsigned int)(sector) % APP_R_HSIZE);
-}
-
-/* when we receive the answer for a read request,
- * verify that we actually know about it */
-static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = ar_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, collision) {
- if ((unsigned long)req == (unsigned long)id) {
- D_ASSERT(req->sector == sector);
- return req;
- }
- }
- return NULL;
-}
+#define MR_WRITE 1
+#define MR_READ 2
static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
{
@@ -278,41 +255,10 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
req->private_bio = bio;
bio->bi_private = req;
- bio->bi_end_io = drbd_endio_pri;
+ bio->bi_end_io = drbd_request_endio;
bio->bi_next = NULL;
}
-static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
- struct bio *bio_src)
-{
- struct drbd_request *req =
- mempool_alloc(drbd_request_mempool, GFP_NOIO);
- if (likely(req)) {
- drbd_req_make_private_bio(req, bio_src);
-
- req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
- req->mdev = mdev;
- req->master_bio = bio_src;
- req->epoch = 0;
- req->sector = bio_src->bi_sector;
- req->size = bio_src->bi_size;
- INIT_HLIST_NODE(&req->collision);
- INIT_LIST_HEAD(&req->tl_requests);
- INIT_LIST_HEAD(&req->w.list);
- }
- return req;
-}
-
-static inline void drbd_req_free(struct drbd_request *req)
-{
- mempool_free(req, drbd_request_mempool);
-}
-
-static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
-{
- return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
-}
-
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
* bio->bi_size, or similar. But that would be too ugly. */
@@ -321,6 +267,7 @@ struct bio_and_error {
int error;
};
+extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
@@ -328,13 +275,17 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
extern void complete_master_bio(struct drbd_conf *mdev,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
-extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
+extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+
+/* this is in drbd_main.c */
+extern void drbd_restart_request(struct drbd_request *req);
/* use this if you don't want to deal with calling complete_master_bio()
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
@@ -354,13 +305,13 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
rv = __req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
@@ -368,7 +319,7 @@ static inline int req_mod(struct drbd_request *req,
return rv;
}
-static inline bool drbd_should_do_remote(union drbd_state s)
+static inline bool drbd_should_do_remote(union drbd_dev_state s)
{
return s.pdsk == D_UP_TO_DATE ||
(s.pdsk >= D_INCONSISTENT &&
@@ -378,7 +329,7 @@ static inline bool drbd_should_do_remote(union drbd_state s)
That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
states. */
}
-static inline bool drbd_should_send_oos(union drbd_state s)
+static inline bool drbd_should_send_out_of_sync(union drbd_dev_state s)
{
return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
new file mode 100644
index 00000000000..53bf6182bac
--- /dev/null
+++ b/drivers/block/drbd/drbd_state.c
@@ -0,0 +1,1856 @@
+/*
+ drbd_state.c
+
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+ Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+ Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+ Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+ Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
+ from Logicworks, Inc. for making SDP replication support possible.
+
+ drbd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ drbd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with drbd; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/drbd_limits.h>
+#include "drbd_int.h"
+#include "drbd_req.h"
+
+/* in drbd_main.c */
+extern void tl_abort_disk_io(struct drbd_conf *mdev);
+
+struct after_state_chg_work {
+ struct drbd_work w;
+ union drbd_state os;
+ union drbd_state ns;
+ enum chg_state_flags flags;
+ struct completion *done;
+};
+
+enum sanitize_state_warnings {
+ NO_WARNING,
+ ABORTED_ONLINE_VERIFY,
+ ABORTED_RESYNC,
+ CONNECTION_LOST_NEGOTIATING,
+ IMPLICITLY_UPGRADED_DISK,
+ IMPLICITLY_UPGRADED_PDSK,
+};
+
+static int w_after_state_ch(struct drbd_work *w, int unused);
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags);
+static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
+static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *);
+static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum sanitize_state_warnings *warn);
+
+static inline bool is_susp(union drbd_state s)
+{
+ return s.susp || s.susp_nod || s.susp_fen;
+}
+
+bool conn_all_vols_unconf(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = true;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.disk != D_DISKLESS ||
+ mdev->state.conn != C_STANDALONE ||
+ mdev->state.role != R_SECONDARY) {
+ rv = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+/* Unfortunately the states where not correctly ordered, when
+ they where defined. therefore can not use max_t() here. */
+static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
+{
+ if (role1 == R_PRIMARY || role2 == R_PRIMARY)
+ return R_PRIMARY;
+ if (role1 == R_SECONDARY || role2 == R_SECONDARY)
+ return R_SECONDARY;
+ return R_UNKNOWN;
+}
+static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
+{
+ if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
+ return R_UNKNOWN;
+ if (role1 == R_SECONDARY || role2 == R_SECONDARY)
+ return R_SECONDARY;
+ return R_PRIMARY;
+}
+
+enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
+{
+ enum drbd_role role = R_UNKNOWN;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ role = max_role(role, mdev->state.role);
+ rcu_read_unlock();
+
+ return role;
+}
+
+enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
+{
+ enum drbd_role peer = R_UNKNOWN;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ peer = max_role(peer, mdev->state.peer);
+ rcu_read_unlock();
+
+ return peer;
+}
+
+enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_DISKLESS;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_MASK;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_DISKLESS;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
+{
+ enum drbd_conns conn = C_MASK;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ conn = min_t(enum drbd_conns, conn, mdev->state.conn);
+ rcu_read_unlock();
+
+ return conn;
+}
+
+static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr;
+ bool rv = true;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ if (mdev->state.conn == C_WF_REPORT_PARAMS) {
+ rv = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+
+/**
+ * cl_wide_st_chg() - true if the state change is a cluster wide one
+ * @mdev: DRBD device.
+ * @os: old (current) state.
+ * @ns: new (wanted) state.
+ */
+static int cl_wide_st_chg(struct drbd_conf *mdev,
+ union drbd_state os, union drbd_state ns)
+{
+ return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
+ ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
+ (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
+ (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
+ (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
+ (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) ||
+ (os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS);
+}
+
+static union drbd_state
+apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
+{
+ union drbd_state ns;
+ ns.i = (os.i & ~mask.i) | val.i;
+ return ns;
+}
+
+enum drbd_state_rv
+drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+ union drbd_state mask, union drbd_state val)
+{
+ unsigned long flags;
+ union drbd_state ns;
+ enum drbd_state_rv rv;
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(mdev), mask, val);
+ rv = _drbd_set_state(mdev, ns, f, NULL);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ return rv;
+}
+
+/**
+ * drbd_force_state() - Impose a change which happens outside our control on our state
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ */
+void drbd_force_state(struct drbd_conf *mdev,
+ union drbd_state mask, union drbd_state val)
+{
+ drbd_change_state(mdev, CS_HARD, mask, val);
+}
+
+static enum drbd_state_rv
+_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val)
+{
+ union drbd_state os, ns;
+ unsigned long flags;
+ enum drbd_state_rv rv;
+
+ if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+ return SS_CW_SUCCESS;
+
+ if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+ return SS_CW_FAILED_BY_PEER;
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ rv = is_valid_transition(os, ns);
+ if (rv >= SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+
+ if (!cl_wide_st_chg(mdev, os, ns))
+ rv = SS_CW_NO_NEED;
+ if (rv == SS_UNKNOWN_ERROR) {
+ rv = is_valid_state(mdev, ns);
+ if (rv >= SS_SUCCESS) {
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ if (rv >= SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+ }
+ }
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ return rv;
+}
+
+/**
+ * drbd_req_state() - Perform an eventually cluster wide state change
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ * @f: flags
+ *
+ * Should not be called directly, use drbd_request_state() or
+ * _drbd_request_state().
+ */
+static enum drbd_state_rv
+drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
+{
+ struct completion done;
+ unsigned long flags;
+ union drbd_state os, ns;
+ enum drbd_state_rv rv;
+
+ init_completion(&done);
+
+ if (f & CS_SERIALIZE)
+ mutex_lock(mdev->state_mutex);
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS) {
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ goto abort;
+ }
+
+ if (cl_wide_st_chg(mdev, os, ns)) {
+ rv = is_valid_state(mdev, ns);
+ if (rv == SS_SUCCESS)
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ if (rv < SS_SUCCESS) {
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+
+ if (drbd_send_state_req(mdev, mask, val)) {
+ rv = SS_CW_FAILED_BY_PEER;
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+
+ wait_event(mdev->state_wait,
+ (rv = _req_st_cond(mdev, mask, val)));
+
+ if (rv < SS_SUCCESS) {
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(mdev), mask, val);
+ rv = _drbd_set_state(mdev, ns, f, &done);
+ } else {
+ rv = _drbd_set_state(mdev, ns, f, &done);
+ }
+
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
+ D_ASSERT(current != mdev->tconn->worker.task);
+ wait_for_completion(&done);
+ }
+
+abort:
+ if (f & CS_SERIALIZE)
+ mutex_unlock(mdev->state_mutex);
+
+ return rv;
+}
+
+/**
+ * _drbd_request_state() - Request a state change (with flags)
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ * @f: flags
+ *
+ * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
+ * flag, or when logging of failed state change requests is not desired.
+ */
+enum drbd_state_rv
+_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
+{
+ enum drbd_state_rv rv;
+
+ wait_event(mdev->state_wait,
+ (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
+
+ return rv;
+}
+
+static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
+{
+ dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
+ name,
+ drbd_conn_str(ns.conn),
+ drbd_role_str(ns.role),
+ drbd_role_str(ns.peer),
+ drbd_disk_str(ns.disk),
+ drbd_disk_str(ns.pdsk),
+ is_susp(ns) ? 's' : 'r',
+ ns.aftr_isp ? 'a' : '-',
+ ns.peer_isp ? 'p' : '-',
+ ns.user_isp ? 'u' : '-',
+ ns.susp_fen ? 'F' : '-',
+ ns.susp_nod ? 'N' : '-'
+ );
+}
+
+void print_st_err(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum drbd_state_rv err)
+{
+ if (err == SS_IN_TRANSIENT_STATE)
+ return;
+ dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
+ print_st(mdev, " state", os);
+ print_st(mdev, "wanted", ns);
+}
+
+static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char *pbp;
+ pbp = pb;
+ *pbp = 0;
+
+ if (ns.role != os.role && flags & CS_DC_ROLE)
+ pbp += sprintf(pbp, "role( %s -> %s ) ",
+ drbd_role_str(os.role),
+ drbd_role_str(ns.role));
+ if (ns.peer != os.peer && flags & CS_DC_PEER)
+ pbp += sprintf(pbp, "peer( %s -> %s ) ",
+ drbd_role_str(os.peer),
+ drbd_role_str(ns.peer));
+ if (ns.conn != os.conn && flags & CS_DC_CONN)
+ pbp += sprintf(pbp, "conn( %s -> %s ) ",
+ drbd_conn_str(os.conn),
+ drbd_conn_str(ns.conn));
+ if (ns.disk != os.disk && flags & CS_DC_DISK)
+ pbp += sprintf(pbp, "disk( %s -> %s ) ",
+ drbd_disk_str(os.disk),
+ drbd_disk_str(ns.disk));
+ if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
+ pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
+ drbd_disk_str(os.pdsk),
+ drbd_disk_str(ns.pdsk));
+
+ return pbp - pb;
+}
+
+static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char pb[300];
+ char *pbp = pb;
+
+ pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
+
+ if (ns.aftr_isp != os.aftr_isp)
+ pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
+ os.aftr_isp,
+ ns.aftr_isp);
+ if (ns.peer_isp != os.peer_isp)
+ pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
+ os.peer_isp,
+ ns.peer_isp);
+ if (ns.user_isp != os.user_isp)
+ pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
+ os.user_isp,
+ ns.user_isp);
+
+ if (pbp != pb)
+ dev_info(DEV, "%s\n", pb);
+}
+
+static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char pb[300];
+ char *pbp = pb;
+
+ pbp += print_state_change(pbp, os, ns, flags);
+
+ if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
+ pbp += sprintf(pbp, "susp( %d -> %d ) ",
+ is_susp(os),
+ is_susp(ns));
+
+ if (pbp != pb)
+ conn_info(tconn, "%s\n", pb);
+}
+
+
+/**
+ * is_valid_state() - Returns an SS_ error code if ns is not valid
+ * @mdev: DRBD device.
+ * @ns: State to consider.
+ */
+static enum drbd_state_rv
+is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+{
+ /* See drbd_state_sw_errors in drbd_strings.c */
+
+ enum drbd_fencing_p fp;
+ enum drbd_state_rv rv = SS_SUCCESS;
+ struct net_conf *nc;
+
+ rcu_read_lock();
+ fp = FP_DONT_CARE;
+ if (get_ldev(mdev)) {
+ fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ put_ldev(mdev);
+ }
+
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc) {
+ if (!nc->two_primaries && ns.role == R_PRIMARY) {
+ if (ns.peer == R_PRIMARY)
+ rv = SS_TWO_PRIMARIES;
+ else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
+ rv = SS_O_VOL_PEER_PRI;
+ }
+ }
+
+ if (rv <= 0)
+ /* already found a reason to abort */;
+ else if (ns.role == R_SECONDARY && mdev->open_cnt)
+ rv = SS_DEVICE_IN_USE;
+
+ else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if (fp >= FP_RESOURCE &&
+ ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
+ rv = SS_PRIMARY_NOP;
+
+ else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
+ rv = SS_NO_LOCAL_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
+ rv = SS_NO_REMOTE_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if ((ns.conn == C_CONNECTED ||
+ ns.conn == C_WF_BITMAP_S ||
+ ns.conn == C_SYNC_SOURCE ||
+ ns.conn == C_PAUSED_SYNC_S) &&
+ ns.disk == D_OUTDATED)
+ rv = SS_CONNECTED_OUTDATES;
+
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ (nc->verify_alg[0] == 0))
+ rv = SS_NO_VERIFY_ALG;
+
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ mdev->tconn->agreed_pro_version < 88)
+ rv = SS_NOT_SUPPORTED;
+
+ else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
+ rv = SS_CONNECTED_OUTDATES;
+
+ rcu_read_unlock();
+
+ return rv;
+}
+
+/**
+ * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
+ * This function limits state transitions that may be declined by DRBD. I.e.
+ * user requests (aka soft transitions).
+ * @mdev: DRBD device.
+ * @ns: new state.
+ * @os: old state.
+ */
+static enum drbd_state_rv
+is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+
+ if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
+ os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
+
+ if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
+ rv = SS_ALREADY_STANDALONE;
+
+ if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
+ rv = SS_IS_DISKLESS;
+
+ if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
+ rv = SS_NO_NET_CONFIG;
+
+ if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
+ rv = SS_LOWER_THAN_OUTDATED;
+
+ if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
+ rv = SS_IN_TRANSIENT_STATE;
+
+ /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
+ rv = SS_IN_TRANSIENT_STATE; */
+
+ /* While establishing a connection only allow cstate to change.
+ Delay/refuse role changes, detach attach etc... */
+ if (test_bit(STATE_SENT, &tconn->flags) &&
+ !(os.conn == C_WF_REPORT_PARAMS ||
+ (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ rv = SS_IN_TRANSIENT_STATE;
+
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
+
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ ns.conn != os.conn && os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
+
+ if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+ os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
+
+ if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
+ && os.conn < C_WF_REPORT_PARAMS)
+ rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
+
+ return rv;
+}
+
+static enum drbd_state_rv
+is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
+{
+ /* no change -> nothing to do, at least for the connection part */
+ if (oc == nc)
+ return SS_NOTHING_TO_DO;
+
+ /* disconnect of an unconfigured connection does not make sense */
+ if (oc == C_STANDALONE && nc == C_DISCONNECTING)
+ return SS_ALREADY_STANDALONE;
+
+ /* from C_STANDALONE, we start with C_UNCONNECTED */
+ if (oc == C_STANDALONE && nc != C_UNCONNECTED)
+ return SS_NEED_CONNECTION;
+
+ /* When establishing a connection we need to go through WF_REPORT_PARAMS!
+ Necessary to do the right thing upon invalidate-remote on a disconnected resource */
+ if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
+ return SS_NEED_CONNECTION;
+
+ /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
+ if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
+ return SS_IN_TRANSIENT_STATE;
+
+ /* After C_DISCONNECTING only C_STANDALONE may follow */
+ if (oc == C_DISCONNECTING && nc != C_STANDALONE)
+ return SS_IN_TRANSIENT_STATE;
+
+ return SS_SUCCESS;
+}
+
+
+/**
+ * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
+ * This limits hard state transitions. Hard state transitions are facts there are
+ * imposed on DRBD by the environment. E.g. disk broke or network broke down.
+ * But those hard state transitions are still not allowed to do everything.
+ * @ns: new state.
+ * @os: old state.
+ */
+static enum drbd_state_rv
+is_valid_transition(union drbd_state os, union drbd_state ns)
+{
+ enum drbd_state_rv rv;
+
+ rv = is_valid_conn_transition(os.conn, ns.conn);
+
+ /* we cannot fail (again) if we already detached */
+ if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+ rv = SS_IS_DISKLESS;
+
+ return rv;
+}
+
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+ static const char *msg_table[] = {
+ [NO_WARNING] = "",
+ [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+ [ABORTED_RESYNC] = "Resync aborted.",
+ [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+ [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+ [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+ };
+
+ if (warn != NO_WARNING)
+ dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
+/**
+ * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
+ * @mdev: DRBD device.
+ * @os: old state.
+ * @ns: new state.
+ * @warn_sync_abort:
+ *
+ * When we loose connection, we have to set the state of the peers disk (pdsk)
+ * to D_UNKNOWN. This rule and many more along those lines are in this function.
+ */
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum sanitize_state_warnings *warn)
+{
+ enum drbd_fencing_p fp;
+ enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
+
+ if (warn)
+ *warn = NO_WARNING;
+
+ fp = FP_DONT_CARE;
+ if (get_ldev(mdev)) {
+ rcu_read_lock();
+ fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ rcu_read_unlock();
+ put_ldev(mdev);
+ }
+
+ /* Implications from connection to peer and peer_isp */
+ if (ns.conn < C_CONNECTED) {
+ ns.peer_isp = 0;
+ ns.peer = R_UNKNOWN;
+ if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
+ ns.pdsk = D_UNKNOWN;
+ }
+
+ /* Clear the aftr_isp when becoming unconfigured */
+ if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
+ ns.aftr_isp = 0;
+
+ /* An implication of the disk states onto the connection state */
+ /* Abort resync if a disk fails/detaches */
+ if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
+ if (warn)
+ *warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
+ ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
+ ns.conn = C_CONNECTED;
+ }
+
+ /* Connection breaks down before we finished "Negotiating" */
+ if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
+ get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
+ ns.disk = mdev->new_state_tmp.disk;
+ ns.pdsk = mdev->new_state_tmp.pdsk;
+ } else {
+ if (warn)
+ *warn = CONNECTION_LOST_NEGOTIATING;
+ ns.disk = D_DISKLESS;
+ ns.pdsk = D_UNKNOWN;
+ }
+ put_ldev(mdev);
+ }
+
+ /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
+ if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
+ if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
+ ns.disk = D_UP_TO_DATE;
+ if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
+ ns.pdsk = D_UP_TO_DATE;
+ }
+
+ /* Implications of the connection stat on the disk states */
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_UNKNOWN;
+ switch ((enum drbd_conns)ns.conn) {
+ case C_WF_BITMAP_T:
+ case C_PAUSED_SYNC_T:
+ case C_STARTING_SYNC_T:
+ case C_WF_SYNC_UUID:
+ case C_BEHIND:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_OUTDATED;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_VERIFY_S:
+ case C_VERIFY_T:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_CONNECTED:
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_DISKLESS;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_WF_BITMAP_S:
+ case C_PAUSED_SYNC_S:
+ case C_STARTING_SYNC_S:
+ case C_AHEAD:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
+ break;
+ case C_SYNC_TARGET:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_INCONSISTENT;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_SYNC_SOURCE:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_INCONSISTENT;
+ break;
+ case C_STANDALONE:
+ case C_DISCONNECTING:
+ case C_UNCONNECTED:
+ case C_TIMEOUT:
+ case C_BROKEN_PIPE:
+ case C_NETWORK_FAILURE:
+ case C_PROTOCOL_ERROR:
+ case C_TEAR_DOWN:
+ case C_WF_CONNECTION:
+ case C_WF_REPORT_PARAMS:
+ case C_MASK:
+ break;
+ }
+ if (ns.disk > disk_max)
+ ns.disk = disk_max;
+
+ if (ns.disk < disk_min) {
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_DISK;
+ ns.disk = disk_min;
+ }
+ if (ns.pdsk > pdsk_max)
+ ns.pdsk = pdsk_max;
+
+ if (ns.pdsk < pdsk_min) {
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_PDSK;
+ ns.pdsk = pdsk_min;
+ }
+
+ if (fp == FP_STONITH &&
+ (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
+ ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
+
+ if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+ ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
+
+ if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
+ if (ns.conn == C_SYNC_SOURCE)
+ ns.conn = C_PAUSED_SYNC_S;
+ if (ns.conn == C_SYNC_TARGET)
+ ns.conn = C_PAUSED_SYNC_T;
+ } else {
+ if (ns.conn == C_PAUSED_SYNC_S)
+ ns.conn = C_SYNC_SOURCE;
+ if (ns.conn == C_PAUSED_SYNC_T)
+ ns.conn = C_SYNC_TARGET;
+ }
+
+ return ns;
+}
+
+void drbd_resume_al(struct drbd_conf *mdev)
+{
+ if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
+ dev_info(DEV, "Resumed AL updates\n");
+}
+
+/* helper for __drbd_set_state */
+static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
+{
+ if (mdev->tconn->agreed_pro_version < 90)
+ mdev->ov_start_sector = 0;
+ mdev->rs_total = drbd_bm_bits(mdev);
+ mdev->ov_position = 0;
+ if (cs == C_VERIFY_T) {
+ /* starting online verify from an arbitrary position
+ * does not fit well into the existing protocol.
+ * on C_VERIFY_T, we initialize ov_left and friends
+ * implicitly in receive_DataRequest once the
+ * first P_OV_REQUEST is received */
+ mdev->ov_start_sector = ~(sector_t)0;
+ } else {
+ unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
+ if (bit >= mdev->rs_total) {
+ mdev->ov_start_sector =
+ BM_BIT_TO_SECT(mdev->rs_total - 1);
+ mdev->rs_total = 1;
+ } else
+ mdev->rs_total -= bit;
+ mdev->ov_position = mdev->ov_start_sector;
+ }
+ mdev->ov_left = mdev->rs_total;
+}
+
+/**
+ * __drbd_set_state() - Set a new DRBD state
+ * @mdev: DRBD device.
+ * @ns: new state.
+ * @flags: Flags
+ * @done: Optional completion, that will get completed after the after_state_ch() finished
+ *
+ * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
+ */
+enum drbd_state_rv
+__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum chg_state_flags flags, struct completion *done)
+{
+ union drbd_state os;
+ enum drbd_state_rv rv = SS_SUCCESS;
+ enum sanitize_state_warnings ssw;
+ struct after_state_chg_work *ascw;
+
+ os = drbd_read_state(mdev);
+
+ ns = sanitize_state(mdev, ns, &ssw);
+ if (ns.i == os.i)
+ return SS_NOTHING_TO_DO;
+
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS)
+ return rv;
+
+ if (!(flags & CS_HARD)) {
+ /* pre-state-change checks ; only look at ns */
+ /* See drbd_state_sw_errors in drbd_strings.c */
+
+ rv = is_valid_state(mdev, ns);
+ if (rv < SS_SUCCESS) {
+ /* If the old state was illegal as well, then let
+ this happen...*/
+
+ if (is_valid_state(mdev, os) == rv)
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ } else
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ }
+
+ if (rv < SS_SUCCESS) {
+ if (flags & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ return rv;
+ }
+
+ print_sanitize_warnings(mdev, ssw);
+
+ drbd_pr_state_change(mdev, os, ns, flags);
+
+ /* Display changes to the susp* flags that where caused by the call to
+ sanitize_state(). Only display it here if we where not called from
+ _conn_request_state() */
+ if (!(flags & CS_DC_SUSP))
+ conn_pr_state_change(mdev->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
+
+ /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+ * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+ * drbd_ldev_destroy() won't happen before our corresponding
+ * after_state_ch works run, where we put_ldev again. */
+ if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+ (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+ atomic_inc(&mdev->local_cnt);
+
+ mdev->state.i = ns.i;
+ mdev->tconn->susp = ns.susp;
+ mdev->tconn->susp_nod = ns.susp_nod;
+ mdev->tconn->susp_fen = ns.susp_fen;
+
+ if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
+ drbd_print_uuids(mdev, "attached to UUIDs");
+
+ /* Wake up role changes, that were delayed because of connection establishing */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
+ no_peer_wf_report_params(mdev->tconn))
+ clear_bit(STATE_SENT, &mdev->tconn->flags);
+
+ wake_up(&mdev->misc_wait);
+ wake_up(&mdev->state_wait);
+ wake_up(&mdev->tconn->ping_wait);
+
+ /* Aborted verify run, or we reached the stop sector.
+ * Log the last position, unless end-of-device. */
+ if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
+ ns.conn <= C_CONNECTED) {
+ mdev->ov_start_sector =
+ BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
+ if (mdev->ov_left)
+ dev_info(DEV, "Online Verify reached sector %llu\n",
+ (unsigned long long)mdev->ov_start_sector);
+ }
+
+ if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
+ dev_info(DEV, "Syncer continues.\n");
+ mdev->rs_paused += (long)jiffies
+ -(long)mdev->rs_mark_time[mdev->rs_last_mark];
+ if (ns.conn == C_SYNC_TARGET)
+ mod_timer(&mdev->resync_timer, jiffies);
+ }
+
+ if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
+ (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
+ dev_info(DEV, "Resync suspended\n");
+ mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
+ }
+
+ if (os.conn == C_CONNECTED &&
+ (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
+ unsigned long now = jiffies;
+ int i;
+
+ set_ov_position(mdev, ns.conn);
+ mdev->rs_start = now;
+ mdev->rs_last_events = 0;
+ mdev->rs_last_sect_ev = 0;
+ mdev->ov_last_oos_size = 0;
+ mdev->ov_last_oos_start = 0;
+
+ for (i = 0; i < DRBD_SYNC_MARKS; i++) {
+ mdev->rs_mark_left[i] = mdev->ov_left;
+ mdev->rs_mark_time[i] = now;
+ }
+
+ drbd_rs_controller_reset(mdev);
+
+ if (ns.conn == C_VERIFY_S) {
+ dev_info(DEV, "Starting Online Verify from sector %llu\n",
+ (unsigned long long)mdev->ov_position);
+ mod_timer(&mdev->resync_timer, jiffies);
+ }
+ }
+
+ if (get_ldev(mdev)) {
+ u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
+ MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
+ MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
+
+ mdf &= ~MDF_AL_CLEAN;
+ if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+ mdf |= MDF_CRASHED_PRIMARY;
+ if (mdev->state.role == R_PRIMARY ||
+ (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
+ mdf |= MDF_PRIMARY_IND;
+ if (mdev->state.conn > C_WF_REPORT_PARAMS)
+ mdf |= MDF_CONNECTED_IND;
+ if (mdev->state.disk > D_INCONSISTENT)
+ mdf |= MDF_CONSISTENT;
+ if (mdev->state.disk > D_OUTDATED)
+ mdf |= MDF_WAS_UP_TO_DATE;
+ if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
+ mdf |= MDF_PEER_OUT_DATED;
+ if (mdf != mdev->ldev->md.flags) {
+ mdev->ldev->md.flags = mdf;
+ drbd_md_mark_dirty(mdev);
+ }
+ if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
+ drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
+ put_ldev(mdev);
+ }
+
+ /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
+ if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
+ os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
+ set_bit(CONSIDER_RESYNC, &mdev->flags);
+
+ /* Receiver should clean up itself */
+ if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
+ drbd_thread_stop_nowait(&mdev->tconn->receiver);
+
+ /* Now the receiver finished cleaning up itself, it should die */
+ if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
+ drbd_thread_stop_nowait(&mdev->tconn->receiver);
+
+ /* Upon network failure, we need to restart the receiver. */
+ if (os.conn > C_WF_CONNECTION &&
+ ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
+ drbd_thread_restart_nowait(&mdev->tconn->receiver);
+
+ /* Resume AL writing if we get a connection */
+ if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
+ drbd_resume_al(mdev);
+
+ /* remember last attach time so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
+ mdev->last_reattach_jif = jiffies;
+
+ ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
+ if (ascw) {
+ ascw->os = os;
+ ascw->ns = ns;
+ ascw->flags = flags;
+ ascw->w.cb = w_after_state_ch;
+ ascw->w.mdev = mdev;
+ ascw->done = done;
+ drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
+ } else {
+ dev_err(DEV, "Could not kmalloc an ascw\n");
+ }
+
+ return rv;
+}
+
+static int w_after_state_ch(struct drbd_work *w, int unused)
+{
+ struct after_state_chg_work *ascw =
+ container_of(w, struct after_state_chg_work, w);
+ struct drbd_conf *mdev = w->mdev;
+
+ after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
+ if (ascw->flags & CS_WAIT_COMPLETE) {
+ D_ASSERT(ascw->done != NULL);
+ complete(ascw->done);
+ }
+ kfree(ascw);
+
+ return 0;
+}
+
+static void abw_start_sync(struct drbd_conf *mdev, int rv)
+{
+ if (rv) {
+ dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
+ _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
+ return;
+ }
+
+ switch (mdev->state.conn) {
+ case C_STARTING_SYNC_T:
+ _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ break;
+ case C_STARTING_SYNC_S:
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
+ break;
+ }
+}
+
+int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags)
+{
+ int rv;
+
+ D_ASSERT(current == mdev->tconn->worker.task);
+
+ /* open coded non-blocking drbd_suspend_io(mdev); */
+ set_bit(SUSPEND_IO, &mdev->flags);
+
+ drbd_bm_lock(mdev, why, flags);
+ rv = io_fn(mdev);
+ drbd_bm_unlock(mdev);
+
+ drbd_resume_io(mdev);
+
+ return rv;
+}
+
+/**
+ * after_state_ch() - Perform after state change actions that may sleep
+ * @mdev: DRBD device.
+ * @os: old state.
+ * @ns: new state.
+ * @flags: Flags
+ */
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags)
+{
+ struct sib_info sib;
+
+ sib.sib_reason = SIB_STATE_CHANGE;
+ sib.os = os;
+ sib.ns = ns;
+
+ if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
+ clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ if (mdev->p_uuid)
+ mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
+ }
+
+ /* Inform userspace about the change... */
+ drbd_bcast_event(mdev, &sib);
+
+ if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+ drbd_khelper(mdev, "pri-on-incon-degr");
+
+ /* Here we have the actions that are performed after a
+ state change. This function might sleep */
+
+ if (ns.susp_nod) {
+ struct drbd_tconn *tconn = mdev->tconn;
+ enum drbd_req_event what = NOTHING;
+
+ spin_lock_irq(&tconn->req_lock);
+ if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED)
+ what = RESEND;
+
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ conn_lowest_disk(tconn) > D_NEGOTIATING)
+ what = RESTART_FROZEN_DISK_IO;
+
+ if (tconn->susp_nod && what != NOTHING) {
+ _tl_restart(tconn, what);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_nod = 1 } },
+ (union drbd_state) { { .susp_nod = 0 } },
+ CS_VERBOSE);
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ }
+
+ if (ns.susp_fen) {
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ spin_lock_irq(&tconn->req_lock);
+ if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) {
+ /* case2: The connection was established again: */
+ struct drbd_conf *odev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, odev, vnr)
+ clear_bit(NEW_CUR_UUID, &odev->flags);
+ rcu_read_unlock();
+ _tl_restart(tconn, RESEND);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE);
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ }
+
+ /* Became sync source. With protocol >= 96, we still need to send out
+ * the sync uuid now. Need to do that before any drbd_send_state, or
+ * the other side may go "paused sync" before receiving the sync uuids,
+ * which is unexpected. */
+ if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
+ mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
+ drbd_gen_and_send_sync_uuid(mdev);
+ put_ldev(mdev);
+ }
+
+ /* Do not change the order of the if above and the two below... */
+ if (os.pdsk == D_DISKLESS &&
+ ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) { /* attach on the peer */
+ /* we probably will start a resync soon.
+ * make sure those things are properly reset. */
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
+ drbd_rs_cancel_all(mdev);
+
+ drbd_send_uuids(mdev);
+ drbd_send_state(mdev, ns);
+ }
+ /* No point in queuing send_bitmap if we don't have a connection
+ * anymore, so check also the _current_ state, not only the new state
+ * at the time this work was queued. */
+ if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
+ mdev->state.conn == C_WF_BITMAP_S)
+ drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
+ "send_bitmap (WFBitMapS)",
+ BM_LOCKED_TEST_ALLOWED);
+
+ /* Lost contact to peer's copy of the data */
+ if ((os.pdsk >= D_INCONSISTENT &&
+ os.pdsk != D_UNKNOWN &&
+ os.pdsk != D_OUTDATED)
+ && (ns.pdsk < D_INCONSISTENT ||
+ ns.pdsk == D_UNKNOWN ||
+ ns.pdsk == D_OUTDATED)) {
+ if (get_ldev(mdev)) {
+ if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ if (drbd_suspended(mdev)) {
+ set_bit(NEW_CUR_UUID, &mdev->flags);
+ } else {
+ drbd_uuid_new_current(mdev);
+ drbd_send_uuids(mdev);
+ }
+ }
+ put_ldev(mdev);
+ }
+ }
+
+ if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
+ if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ drbd_uuid_new_current(mdev);
+ drbd_send_uuids(mdev);
+ }
+ /* D_DISKLESS Peer becomes secondary */
+ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
+ /* We may still be Primary ourselves.
+ * No harm done if the bitmap still changes,
+ * redirtied pages will follow later. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote diskless peer", BM_LOCKED_SET_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ /* Write out all changed bits on demote.
+ * Though, no need to da that just yet
+ * if there is a resync going on still */
+ if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
+ mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
+ /* No changes to the bitmap expected this time, so assert that,
+ * even though no harm was done if it did change. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote", BM_LOCKED_TEST_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ /* Last part of the attaching process ... */
+ if (ns.conn >= C_CONNECTED &&
+ os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
+ drbd_send_sizes(mdev, 0, 0); /* to start sync... */
+ drbd_send_uuids(mdev);
+ drbd_send_state(mdev, ns);
+ }
+
+ /* We want to pause/continue resync, tell peer. */
+ if (ns.conn >= C_CONNECTED &&
+ ((os.aftr_isp != ns.aftr_isp) ||
+ (os.user_isp != ns.user_isp)))
+ drbd_send_state(mdev, ns);
+
+ /* In case one of the isp bits got set, suspend other devices. */
+ if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
+ (ns.aftr_isp || ns.peer_isp || ns.user_isp))
+ suspend_other_sg(mdev);
+
+ /* Make sure the peer gets informed about eventual state
+ changes (ISP bits) while we were in WFReportParams. */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
+ drbd_send_state(mdev, ns);
+
+ /* We are in the progress to start a full sync... */
+ if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
+ /* no other bitmap changes expected during this phase */
+ drbd_queue_bitmap_io(mdev,
+ &drbd_bmio_set_n_write, &abw_start_sync,
+ "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
+
+ /* We are invalidating our self... */
+ if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
+ os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
+ /* other bitmap operation expected during this phase */
+ drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
+ "set_n_write from invalidate", BM_LOCKED_MASK);
+
+ /* first half of local IO error, failure to attach,
+ * or administrative detach */
+ if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+ enum drbd_io_error_p eh = EP_PASS_ON;
+ int was_io_error = 0;
+ /* corresponding get_ldev was in __drbd_set_state, to serialize
+ * our cleanup here with the transition to D_DISKLESS.
+ * But is is still not save to dreference ldev here, since
+ * we might come from an failed Attach before ldev was set. */
+ if (mdev->ldev) {
+ rcu_read_lock();
+ eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ rcu_read_unlock();
+
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ if (was_io_error && eh == EP_CALL_HELPER)
+ drbd_khelper(mdev, "local-io-error");
+
+ /* Immediately allow completion of all application IO,
+ * that waits for completion from the local disk,
+ * if this was a force-detach due to disk_timeout
+ * or administrator request (drbdsetup detach --force).
+ * Do NOT abort otherwise.
+ * Aborting local requests may cause serious problems,
+ * if requests are completed to upper layers already,
+ * and then later the already submitted local bio completes.
+ * This can cause DMA into former bio pages that meanwhile
+ * have been re-used for other things.
+ * So aborting local requests may cause crashes,
+ * or even worse, silent data corruption.
+ */
+ if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+ tl_abort_disk_io(mdev);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ drbd_rs_cancel_all(mdev);
+
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ }
+ put_ldev(mdev);
+ }
+
+ /* second half of local IO error, failure to attach,
+ * or administrative detach,
+ * after local_cnt references have reached zero again */
+ if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* We must still be diskless,
+ * re-attach has to be serialized with this! */
+ if (mdev->state.disk != D_DISKLESS)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s while going diskless\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+ /* corresponding get_ldev in __drbd_set_state
+ * this may finally trigger drbd_ldev_destroy. */
+ put_ldev(mdev);
+ }
+
+ /* Notify peer that I had a local IO error, and did not detached.. */
+ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ /* Disks got bigger while they were detached */
+ if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
+ test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+ if (ns.conn == C_CONNECTED)
+ resync_after_online_grow(mdev);
+ }
+
+ /* A resync finished or aborted, wake paused devices... */
+ if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
+ (os.peer_isp && !ns.peer_isp) ||
+ (os.user_isp && !ns.user_isp))
+ resume_next_sg(mdev);
+
+ /* sync target done with resync. Explicitly notify peer, even though
+ * it should (at least for non-empty resyncs) already know itself. */
+ if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ /* Verify finished, or reached stop sector. Peer did not know about
+ * the stop sector, and we may even have changed the stop sector during
+ * verify to interrupt/stop early. Send the new state. */
+ if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
+ && verify_can_do_stop_sector(mdev))
+ drbd_send_state(mdev, ns);
+
+ /* This triggers bitmap writeout of potentially still unwritten pages
+ * if the resync finished cleanly, or aborted because of peer disk
+ * failure, or because of connection loss.
+ * For resync aborted because of local disk failure, we cannot do
+ * any bitmap writeout anymore.
+ * No harm done if some bits change during this phase.
+ */
+ if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ if (ns.disk == D_DISKLESS &&
+ ns.conn == C_STANDALONE &&
+ ns.role == R_SECONDARY) {
+ if (os.aftr_isp != ns.aftr_isp)
+ resume_next_sg(mdev);
+ }
+
+ drbd_md_sync(mdev);
+}
+
+struct after_conn_state_chg_work {
+ struct drbd_work w;
+ enum drbd_conns oc;
+ union drbd_state ns_min;
+ union drbd_state ns_max; /* new, max state, over all mdevs */
+ enum chg_state_flags flags;
+};
+
+static int w_after_conn_state_ch(struct drbd_work *w, int unused)
+{
+ struct after_conn_state_chg_work *acscw =
+ container_of(w, struct after_conn_state_chg_work, w);
+ struct drbd_tconn *tconn = w->tconn;
+ enum drbd_conns oc = acscw->oc;
+ union drbd_state ns_max = acscw->ns_max;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ kfree(acscw);
+
+ /* Upon network configuration, we need to start the receiver */
+ if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
+ drbd_thread_start(&tconn->receiver);
+
+ if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
+ struct net_conf *old_conf;
+
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+ tconn->my_addr_len = 0;
+ tconn->peer_addr_len = 0;
+ rcu_assign_pointer(tconn->net_conf, NULL);
+ conn_free_crypto(tconn);
+ mutex_unlock(&tconn->conf_update);
+
+ synchronize_rcu();
+ kfree(old_conf);
+ }
+
+ if (ns_max.susp_fen) {
+ /* case1: The outdate peer handler is successful: */
+ if (ns_max.pdsk <= D_OUTDATED) {
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ drbd_uuid_new_current(mdev);
+ clear_bit(NEW_CUR_UUID, &mdev->flags);
+ }
+ }
+ rcu_read_unlock();
+ spin_lock_irq(&tconn->req_lock);
+ _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE);
+ spin_unlock_irq(&tconn->req_lock);
+ }
+ }
+ kref_put(&tconn->kref, &conn_destroy);
+
+ conn_md_sync(tconn);
+
+ return 0;
+}
+
+void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
+{
+ enum chg_state_flags flags = ~0;
+ struct drbd_conf *mdev;
+ int vnr, first_vol = 1;
+ union drbd_dev_state os, cs = {
+ { .role = R_SECONDARY,
+ .peer = R_UNKNOWN,
+ .conn = tconn->cstate,
+ .disk = D_DISKLESS,
+ .pdsk = D_UNKNOWN,
+ } };
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ os = mdev->state;
+
+ if (first_vol) {
+ cs = os;
+ first_vol = 0;
+ continue;
+ }
+
+ if (cs.role != os.role)
+ flags &= ~CS_DC_ROLE;
+
+ if (cs.peer != os.peer)
+ flags &= ~CS_DC_PEER;
+
+ if (cs.conn != os.conn)
+ flags &= ~CS_DC_CONN;
+
+ if (cs.disk != os.disk)
+ flags &= ~CS_DC_DISK;
+
+ if (cs.pdsk != os.pdsk)
+ flags &= ~CS_DC_PDSK;
+ }
+ rcu_read_unlock();
+
+ *pf |= CS_DC_MASK;
+ *pf &= flags;
+ (*pcs).i = cs.i;
+}
+
+static enum drbd_state_rv
+conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+ union drbd_state ns, os;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+
+ if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
+ ns.disk = os.disk;
+
+ if (ns.i == os.i)
+ continue;
+
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS)
+ break;
+
+ if (!(flags & CS_HARD)) {
+ rv = is_valid_state(mdev, ns);
+ if (rv < SS_SUCCESS) {
+ if (is_valid_state(mdev, os) == rv)
+ rv = is_valid_soft_transition(os, ns, tconn);
+ } else
+ rv = is_valid_soft_transition(os, ns, tconn);
+ }
+ if (rv < SS_SUCCESS)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (rv < SS_SUCCESS && flags & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+
+ return rv;
+}
+
+void
+conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
+{
+ union drbd_state ns, os, ns_max = { };
+ union drbd_state ns_min = {
+ { .role = R_MASK,
+ .peer = R_MASK,
+ .conn = val.conn,
+ .disk = D_MASK,
+ .pdsk = D_MASK
+ } };
+ struct drbd_conf *mdev;
+ enum drbd_state_rv rv;
+ int vnr, number_of_volumes = 0;
+
+ if (mask.conn == C_MASK) {
+ /* remember last connect time so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
+ tconn->last_reconnect_jif = jiffies;
+
+ tconn->cstate = val.conn;
+ }
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ number_of_volumes++;
+ os = drbd_read_state(mdev);
+ ns = apply_mask_val(os, mask, val);
+ ns = sanitize_state(mdev, ns, NULL);
+
+ if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
+ ns.disk = os.disk;
+
+ rv = __drbd_set_state(mdev, ns, flags, NULL);
+ if (rv < SS_SUCCESS)
+ BUG();
+
+ ns.i = mdev->state.i;
+ ns_max.role = max_role(ns.role, ns_max.role);
+ ns_max.peer = max_role(ns.peer, ns_max.peer);
+ ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
+ ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
+ ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
+
+ ns_min.role = min_role(ns.role, ns_min.role);
+ ns_min.peer = min_role(ns.peer, ns_min.peer);
+ ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
+ ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
+ ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
+ }
+ rcu_read_unlock();
+
+ if (number_of_volumes == 0) {
+ ns_min = ns_max = (union drbd_state) { {
+ .role = R_SECONDARY,
+ .peer = R_UNKNOWN,
+ .conn = val.conn,
+ .disk = D_DISKLESS,
+ .pdsk = D_UNKNOWN
+ } };
+ }
+
+ ns_min.susp = ns_max.susp = tconn->susp;
+ ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
+ ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
+
+ *pns_min = ns_min;
+ *pns_max = ns_max;
+}
+
+static enum drbd_state_rv
+_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+{
+ enum drbd_state_rv rv;
+
+ if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
+ return SS_CW_SUCCESS;
+
+ if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
+ return SS_CW_FAILED_BY_PEER;
+
+ rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
+
+ if (rv == SS_UNKNOWN_ERROR)
+ rv = conn_is_valid_transition(tconn, mask, val, 0);
+
+ if (rv == SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+
+ return rv;
+}
+
+enum drbd_state_rv
+_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+ struct after_conn_state_chg_work *acscw;
+ enum drbd_conns oc = tconn->cstate;
+ union drbd_state ns_max, ns_min, os;
+ bool have_mutex = false;
+
+ if (mask.conn) {
+ rv = is_valid_conn_transition(oc, val.conn);
+ if (rv < SS_SUCCESS)
+ goto abort;
+ }
+
+ rv = conn_is_valid_transition(tconn, mask, val, flags);
+ if (rv < SS_SUCCESS)
+ goto abort;
+
+ if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
+ !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
+
+ /* This will be a cluster-wide state change.
+ * Need to give up the spinlock, grab the mutex,
+ * then send the state change request, ... */
+ spin_unlock_irq(&tconn->req_lock);
+ mutex_lock(&tconn->cstate_mutex);
+ have_mutex = true;
+
+ set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ if (conn_send_state_req(tconn, mask, val)) {
+ /* sending failed. */
+ clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ rv = SS_CW_FAILED_BY_PEER;
+ /* need to re-aquire the spin lock, though */
+ goto abort_unlocked;
+ }
+
+ if (val.conn == C_DISCONNECTING)
+ set_bit(DISCONNECT_SENT, &tconn->flags);
+
+ /* ... and re-aquire the spinlock.
+ * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
+ * conn_set_state() within the same spinlock. */
+ spin_lock_irq(&tconn->req_lock);
+ wait_event_lock_irq(tconn->ping_wait,
+ (rv = _conn_rq_cond(tconn, mask, val)),
+ tconn->req_lock);
+ clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ if (rv < SS_SUCCESS)
+ goto abort;
+ }
+
+ conn_old_common_state(tconn, &os, &flags);
+ flags |= CS_DC_SUSP;
+ conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
+ conn_pr_state_change(tconn, os, ns_max, flags);
+
+ acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
+ if (acscw) {
+ acscw->oc = os.conn;
+ acscw->ns_min = ns_min;
+ acscw->ns_max = ns_max;
+ acscw->flags = flags;
+ acscw->w.cb = w_after_conn_state_ch;
+ kref_get(&tconn->kref);
+ acscw->w.tconn = tconn;
+ drbd_queue_work(&tconn->sender_work, &acscw->w);
+ } else {
+ conn_err(tconn, "Could not kmalloc an acscw\n");
+ }
+
+ abort:
+ if (have_mutex) {
+ /* mutex_unlock() "... must not be used in interrupt context.",
+ * so give up the spinlock, then re-aquire it */
+ spin_unlock_irq(&tconn->req_lock);
+ abort_unlocked:
+ mutex_unlock(&tconn->cstate_mutex);
+ spin_lock_irq(&tconn->req_lock);
+ }
+ if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
+ conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv));
+ conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
+ conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
+ }
+ return rv;
+}
+
+enum drbd_state_rv
+conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv;
+
+ spin_lock_irq(&tconn->req_lock);
+ rv = _conn_request_state(tconn, mask, val, flags);
+ spin_unlock_irq(&tconn->req_lock);
+
+ return rv;
+}
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
new file mode 100644
index 00000000000..a3c361bbc4b
--- /dev/null
+++ b/drivers/block/drbd/drbd_state.h
@@ -0,0 +1,161 @@
+#ifndef DRBD_STATE_H
+#define DRBD_STATE_H
+
+struct drbd_conf;
+struct drbd_tconn;
+
+/**
+ * DOC: DRBD State macros
+ *
+ * These macros are used to express state changes in easily readable form.
+ *
+ * The NS macros expand to a mask and a value, that can be bit ored onto the
+ * current state as soon as the spinlock (req_lock) was taken.
+ *
+ * The _NS macros are used for state functions that get called with the
+ * spinlock. These macros expand directly to the new state value.
+ *
+ * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
+ * to express state changes that affect more than one aspect of the state.
+ *
+ * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
+ * Means that the network connection was established and that the peer
+ * is in secondary role.
+ */
+#define role_MASK R_MASK
+#define peer_MASK R_MASK
+#define disk_MASK D_MASK
+#define pdsk_MASK D_MASK
+#define conn_MASK C_MASK
+#define susp_MASK 1
+#define user_isp_MASK 1
+#define aftr_isp_MASK 1
+#define susp_nod_MASK 1
+#define susp_fen_MASK 1
+
+#define NS(T, S) \
+ ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T = (S); val; })
+#define NS2(T1, S1, T2, S2) \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
+ val.T2 = (S2); val; })
+#define NS3(T1, S1, T2, S2, T3, S3) \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
+ val.T2 = (S2); val.T3 = (S3); val; })
+
+#define _NS(D, T, S) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T = (S); __ns; })
+#define _NS2(D, T1, S1, T2, S2) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
+ __ns.T2 = (S2); __ns; })
+#define _NS3(D, T1, S1, T2, S2, T3, S3) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
+ __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
+
+enum chg_state_flags {
+ CS_HARD = 1 << 0,
+ CS_VERBOSE = 1 << 1,
+ CS_WAIT_COMPLETE = 1 << 2,
+ CS_SERIALIZE = 1 << 3,
+ CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
+ CS_LOCAL_ONLY = 1 << 4, /* Do not consider a device pair wide state change */
+ CS_DC_ROLE = 1 << 5, /* DC = display as connection state change */
+ CS_DC_PEER = 1 << 6,
+ CS_DC_CONN = 1 << 7,
+ CS_DC_DISK = 1 << 8,
+ CS_DC_PDSK = 1 << 9,
+ CS_DC_SUSP = 1 << 10,
+ CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK,
+ CS_IGN_OUTD_FAIL = 1 << 11,
+};
+
+/* drbd_dev_state and drbd_state are different types. This is to stress the
+ small difference. There is no suspended flag (.susp), and no suspended
+ while fence handler runs flas (susp_fen). */
+union drbd_dev_state {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned _unused:1 ;
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned peer_isp:1 ;
+ unsigned user_isp:1 ;
+ unsigned _pad:11; /* 0 unused */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned _pad:11;
+ unsigned user_isp:1 ;
+ unsigned peer_isp:1 ;
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned _unused:1 ;
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+#else
+# error "this endianess is not supported"
+#endif
+ };
+ unsigned int i;
+};
+
+extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
+ enum chg_state_flags f,
+ union drbd_state mask,
+ union drbd_state val);
+extern void drbd_force_state(struct drbd_conf *, union drbd_state,
+ union drbd_state);
+extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
+ union drbd_state,
+ union drbd_state,
+ enum chg_state_flags);
+extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
+ enum chg_state_flags,
+ struct completion *done);
+extern void print_st_err(struct drbd_conf *, union drbd_state,
+ union drbd_state, int);
+
+enum drbd_state_rv
+_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags);
+
+enum drbd_state_rv
+conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags);
+
+extern void drbd_resume_al(struct drbd_conf *mdev);
+extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
+
+/**
+ * drbd_request_state() - Reqest a state change
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ *
+ * This is the most graceful way of requesting a state change. It is verbose
+ * quite verbose in case the state change is not possible, and all those
+ * state changes are globally serialized.
+ */
+static inline int drbd_request_state(struct drbd_conf *mdev,
+ union drbd_state mask,
+ union drbd_state val)
+{
+ return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+}
+
+enum drbd_role conn_highest_role(struct drbd_tconn *tconn);
+enum drbd_role conn_highest_peer(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn);
+enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn);
+
+#endif
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index c44a2a60277..9a664bd2740 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -89,6 +89,7 @@ static const char *drbd_state_sw_errors[] = {
[-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
[-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
[-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
+ [-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config",
};
const char *drbd_conn_str(enum drbd_conns s)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 6bce2cc179d..424dc7bdf9b 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -38,16 +38,13 @@
#include "drbd_int.h"
#include "drbd_req.h"
-static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
-static int w_make_resync_request(struct drbd_conf *mdev,
- struct drbd_work *w, int cancel);
-
+static int w_make_ov_request(struct drbd_work *w, int cancel);
/* endio handlers:
* drbd_md_io_complete (defined here)
- * drbd_endio_pri (defined here)
- * drbd_endio_sec (defined here)
+ * drbd_request_endio (defined here)
+ * drbd_peer_request_endio (defined here)
* bm_async_io_complete (defined in drbd_bitmap.c)
*
* For all these callbacks, note the following:
@@ -60,7 +57,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
/* About the global_state_lock
Each state transition on an device holds a read lock. In case we have
- to evaluate the sync after dependencies, we grab a write lock, because
+ to evaluate the resync after dependencies, we grab a write lock, because
we need stable states on all devices for that. */
rwlock_t global_state_lock;
@@ -98,97 +95,93 @@ void drbd_md_io_complete(struct bio *bio, int error)
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = e->mdev;
-
- D_ASSERT(e->block_id != ID_VACANT);
+ struct drbd_conf *mdev = peer_req->w.mdev;
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->read_cnt += e->size >> 9;
- list_del(&e->w.list);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ mdev->read_cnt += peer_req->i.size >> 9;
+ list_del(&peer_req->w.list);
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
- if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- drbd_queue_work(&mdev->data.work, &e->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
put_ldev(mdev);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
-static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
+static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = e->mdev;
- sector_t e_sector;
+ struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_interval i;
int do_wake;
- int is_syncer_req;
+ u64 block_id;
int do_al_complete_io;
- D_ASSERT(e->block_id != ID_VACANT);
-
- /* after we moved e to done_ee,
+ /* after we moved peer_req to done_ee,
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
- e_sector = e->sector;
- do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
- is_syncer_req = is_syncer_block_id(e->block_id);
+ i = peer_req->i;
+ do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
+ block_id = peer_req->block_id;
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->writ_cnt += e->size >> 9;
- list_del(&e->w.list); /* has been on active_ee or sync_ee */
- list_add_tail(&e->w.list, &mdev->done_ee);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ mdev->writ_cnt += peer_req->i.size >> 9;
+ list_move_tail(&peer_req->w.list, &mdev->done_ee);
- /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
- * neither did we wake possibly waiting conflicting requests.
- * done from "drbd_process_done_ee" within the appropriate w.cb
- * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
+ /*
+ * Do not remove from the write_requests tree here: we did not send the
+ * Ack yet and did not wake possibly waiting conflicting requests.
+ * Removed from the tree from "drbd_process_done_ee" within the
+ * appropriate w.cb (e_end_block/e_end_resync_block) or from
+ * _drbd_clear_done_ee.
+ */
- do_wake = is_syncer_req
- ? list_empty(&mdev->sync_ee)
- : list_empty(&mdev->active_ee);
+ do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
- if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- if (is_syncer_req)
- drbd_rs_complete_io(mdev, e_sector);
+ if (block_id == ID_SYNCER)
+ drbd_rs_complete_io(mdev, i.sector);
if (do_wake)
wake_up(&mdev->ee_wait);
if (do_al_complete_io)
- drbd_al_complete_io(mdev, e_sector);
+ drbd_al_complete_io(mdev, &i);
- wake_asender(mdev);
+ wake_asender(mdev->tconn);
put_ldev(mdev);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver.
*/
-void drbd_endio_sec(struct bio *bio, int error)
+void drbd_peer_request_endio(struct bio *bio, int error)
{
- struct drbd_epoch_entry *e = bio->bi_private;
- struct drbd_conf *mdev = e->mdev;
+ struct drbd_peer_request *peer_req = bio->bi_private;
+ struct drbd_conf *mdev = peer_req->w.mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
if (error && __ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: error=%d s=%llus\n",
is_write ? "write" : "read", error,
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
if (!error && !uptodate) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
is_write ? "write" : "read",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?! */
@@ -196,24 +189,24 @@ void drbd_endio_sec(struct bio *bio, int error)
}
if (error)
- set_bit(__EE_WAS_ERROR, &e->flags);
+ set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */
- if (atomic_dec_and_test(&e->pending_bios)) {
+ if (atomic_dec_and_test(&peer_req->pending_bios)) {
if (is_write)
- drbd_endio_write_sec_final(e);
+ drbd_endio_write_sec_final(peer_req);
else
- drbd_endio_read_sec_final(e);
+ drbd_endio_read_sec_final(peer_req);
}
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
*/
-void drbd_endio_pri(struct bio *bio, int error)
+void drbd_request_endio(struct bio *bio, int error)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -227,53 +220,72 @@ void drbd_endio_pri(struct bio *bio, int error)
error = -EIO;
}
+
+ /* If this request was aborted locally before,
+ * but now was completed "successfully",
+ * chances are that this caused arbitrary data corruption.
+ *
+ * "aborting" requests, or force-detaching the disk, is intended for
+ * completely blocked/hung local backing devices which do no longer
+ * complete requests at all, not even do error completions. In this
+ * situation, usually a hard-reset and failover is the only way out.
+ *
+ * By "aborting", basically faking a local error-completion,
+ * we allow for a more graceful swichover by cleanly migrating services.
+ * Still the affected node has to be rebooted "soon".
+ *
+ * By completing these requests, we allow the upper layers to re-use
+ * the associated data pages.
+ *
+ * If later the local backing device "recovers", and now DMAs some data
+ * from disk into the original request pages, in the best case it will
+ * just put random data into unused pages; but typically it will corrupt
+ * meanwhile completely unrelated data, causing all sorts of damage.
+ *
+ * Which means delayed successful completion,
+ * especially for READ requests,
+ * is a reason to panic().
+ *
+ * We assume that a delayed *error* completion is OK,
+ * though we still will complain noisily about it.
+ */
+ if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
+
+ if (!error)
+ panic("possible random memory corruption caused by delayed completion of aborted local request\n");
+ }
+
/* to avoid recursion in __req_mod */
if (unlikely(error)) {
what = (bio_data_dir(bio) == WRITE)
- ? write_completed_with_error
+ ? WRITE_COMPLETED_WITH_ERROR
: (bio_rw(bio) == READ)
- ? read_completed_with_error
- : read_ahead_completed_with_error;
+ ? READ_COMPLETED_WITH_ERROR
+ : READ_AHEAD_COMPLETED_WITH_ERROR;
} else
- what = completed_ok;
+ what = COMPLETED_OK;
bio_put(req->private_bio);
req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
put_ldev(mdev);
if (m.bio)
complete_master_bio(mdev, &m);
}
-int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- struct drbd_request *req = container_of(w, struct drbd_request, w);
-
- /* We should not detach for read io-error,
- * but try to WRITE the P_DATA_REPLY to the failed location,
- * to give the disk the chance to relocate that block */
-
- spin_lock_irq(&mdev->req_lock);
- if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
- _req_mod(req, read_retry_remote_canceled);
- spin_unlock_irq(&mdev->req_lock);
- return 1;
- }
- spin_unlock_irq(&mdev->req_lock);
-
- return w_send_read_req(mdev, w, 0);
-}
-
-void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
+ struct drbd_peer_request *peer_req, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
- struct page *page = e->pages;
+ struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
@@ -290,7 +302,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_e
page = tmp;
}
/* and now the last, possibly only partially used page */
- len = e->size & (PAGE_SIZE - 1);
+ len = peer_req->i.size & (PAGE_SIZE - 1);
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
crypto_hash_update(&desc, &sg, sg.length);
crypto_hash_final(&desc, digest);
@@ -316,59 +328,58 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
crypto_hash_final(&desc, digest);
}
-/* TODO merge common code with w_e_end_ov_req */
-int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+/* MAYBE merge common code with w_e_end_ov_req */
+static int w_e_send_csum(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
int digest_size;
void *digest;
- int ok = 1;
-
- D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
+ int err = 0;
if (unlikely(cancel))
goto out;
- if (likely((e->flags & EE_WAS_ERROR) != 0))
+ if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_hash_digestsize(mdev->csums_tfm);
+ digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- sector_t sector = e->sector;
- unsigned int size = e->size;
- drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
- /* Free e and pages before send.
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
+ drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
+ /* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
- e = NULL;
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
+ peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_CSUM_RS_REQUEST);
+ err = drbd_send_drequest_csum(mdev, sector, size,
+ digest, digest_size,
+ P_CSUM_RS_REQUEST);
kfree(digest);
} else {
dev_err(DEV, "kmalloc() of digest failed.\n");
- ok = 0;
+ err = -ENOMEM;
}
out:
- if (e)
- drbd_free_ee(mdev, e);
+ if (peer_req)
+ drbd_free_peer_req(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
- return ok;
+ return err;
}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
if (!get_ldev(mdev))
return -EIO;
@@ -378,45 +389,47 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
- e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
- if (!e)
+ peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector,
+ size, GFP_TRY);
+ if (!peer_req)
goto defer;
- e->w.cb = w_e_send_csum;
- spin_lock_irq(&mdev->req_lock);
- list_add(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ peer_req->w.cb = w_e_send_csum;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+ if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
* because bio_add_page failed (probably broken lower level driver),
* retry may or may not help.
* If it does not, you may need to force disconnect. */
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
defer:
put_ldev(mdev);
return -EAGAIN;
}
-int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_resync_timer(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
switch (mdev->state.conn) {
case C_VERIFY_S:
- w_make_ov_request(mdev, w, cancel);
+ w_make_ov_request(w, cancel);
break;
case C_SYNC_TARGET:
- w_make_resync_request(mdev, w, cancel);
+ w_make_resync_request(w, cancel);
break;
}
- return 1;
+ return 0;
}
void resync_timer_fn(unsigned long data)
@@ -424,7 +437,7 @@ void resync_timer_fn(unsigned long data)
struct drbd_conf *mdev = (struct drbd_conf *) data;
if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->data.work, &mdev->resync_work);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
@@ -456,8 +469,24 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
fb->values[i] += value;
}
+struct fifo_buffer *fifo_alloc(int fifo_size)
+{
+ struct fifo_buffer *fb;
+
+ fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO);
+ if (!fb)
+ return NULL;
+
+ fb->head_index = 0;
+ fb->size = fifo_size;
+ fb->total = 0;
+
+ return fb;
+}
+
static int drbd_rs_controller(struct drbd_conf *mdev)
{
+ struct disk_conf *dc;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
unsigned int want; /* The number of sectors we want in the proxy */
int req_sect; /* Number of sectors to request in this turn */
@@ -466,38 +495,39 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
int steps; /* Number of time steps to plan ahead */
int curr_corr;
int max_sect;
+ struct fifo_buffer *plan;
sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
mdev->rs_in_flight -= sect_in;
- spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+ plan = rcu_dereference(mdev->rs_plan_s);
- steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
+ steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
- want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
+ want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
} else { /* normal path */
- want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
- sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
+ want = dc->c_fill_target ? dc->c_fill_target :
+ sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
}
- correction = want - mdev->rs_in_flight - mdev->rs_planed;
+ correction = want - mdev->rs_in_flight - plan->total;
/* Plan ahead */
cps = correction / steps;
- fifo_add_val(&mdev->rs_plan_s, cps);
- mdev->rs_planed += cps * steps;
+ fifo_add_val(plan, cps);
+ plan->total += cps * steps;
/* What we do in this step */
- curr_corr = fifo_push(&mdev->rs_plan_s, 0);
- spin_unlock(&mdev->peer_seq_lock);
- mdev->rs_planed -= curr_corr;
+ curr_corr = fifo_push(plan, 0);
+ plan->total -= curr_corr;
req_sect = sect_in + curr_corr;
if (req_sect < 0)
req_sect = 0;
- max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
+ max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
if (req_sect > max_sect)
req_sect = max_sect;
@@ -513,22 +543,25 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
static int drbd_rs_number_requests(struct drbd_conf *mdev)
{
int number;
- if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
+
+ rcu_read_lock();
+ if (rcu_dereference(mdev->rs_plan_s)->size) {
number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
- mdev->c_sync_rate = mdev->sync_conf.rate;
+ mdev->c_sync_rate = rcu_dereference(mdev->ldev->disk_conf)->resync_rate;
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
}
+ rcu_read_unlock();
/* ignore the amount of pending requests, the resync controller should
* throttle down to incoming reply rate soon enough anyways. */
return number;
}
-static int w_make_resync_request(struct drbd_conf *mdev,
- struct drbd_work *w, int cancel)
+int w_make_resync_request(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
@@ -538,12 +571,12 @@ static int w_make_resync_request(struct drbd_conf *mdev,
int i = 0;
if (unlikely(cancel))
- return 1;
+ return 0;
if (mdev->rs_total == 0) {
/* empty resync? */
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
if (!get_ldev(mdev)) {
@@ -552,7 +585,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
to continue resync with a broken disk makes no sense at
all */
dev_err(DEV, "Disk broke down during resync!\n");
- return 1;
+ return 0;
}
max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
@@ -562,15 +595,15 @@ static int w_make_resync_request(struct drbd_conf *mdev,
for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket) {
- queued = mdev->data.socket->sk->sk_wmem_queued;
- sndbuf = mdev->data.socket->sk->sk_sndbuf;
+ mutex_lock(&mdev->tconn->data.mutex);
+ if (mdev->tconn->data.socket) {
+ queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
+ sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
} else {
queued = 1;
sndbuf = 0;
}
- mutex_unlock(&mdev->data.mutex);
+ mutex_unlock(&mdev->tconn->data.mutex);
if (queued > sndbuf / 2)
goto requeue;
@@ -581,7 +614,7 @@ next_sector:
if (bit == DRBD_END_OF_BITMAP) {
mdev->bm_resync_fo = drbd_bm_bits(mdev);
put_ldev(mdev);
- return 1;
+ return 0;
}
sector = BM_BIT_TO_SECT(bit);
@@ -640,11 +673,11 @@ next_sector:
/* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
+ if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
switch (read_for_csum(mdev, sector, size)) {
case -EIO: /* Disk failure */
put_ldev(mdev);
- return 0;
+ return -EIO;
case -EAGAIN: /* allocation failed, or ldev busy */
drbd_rs_complete_io(mdev, sector);
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -657,13 +690,16 @@ next_sector:
BUG();
}
} else {
+ int err;
+
inc_rs_pending(mdev);
- if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
- sector, size, ID_SYNCER)) {
+ err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
+ sector, size, ID_SYNCER);
+ if (err) {
dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
dec_rs_pending(mdev);
put_ldev(mdev);
- return 0;
+ return err;
}
}
}
@@ -676,21 +712,23 @@ next_sector:
* until then resync "work" is "inactive" ...
*/
put_ldev(mdev);
- return 1;
+ return 0;
}
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
put_ldev(mdev);
- return 1;
+ return 0;
}
-static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static int w_make_ov_request(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
int number, i, size;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ bool stop_sector_reached = false;
if (unlikely(cancel))
return 1;
@@ -699,9 +737,17 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
sector = mdev->ov_position;
for (i = 0; i < number; i++) {
- if (sector >= capacity) {
+ if (sector >= capacity)
return 1;
- }
+
+ /* We check for "finished" only in the reply path:
+ * w_e_end_ov_reply().
+ * We need to send at least one request out. */
+ stop_sector_reached = i > 0
+ && verify_can_do_stop_sector(mdev)
+ && sector >= mdev->ov_stop_sector;
+ if (stop_sector_reached)
+ break;
size = BM_BLOCK_SIZE;
@@ -715,7 +761,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
size = (capacity-sector)<<9;
inc_rs_pending(mdev);
- if (!drbd_send_ov_request(mdev, sector, size)) {
+ if (drbd_send_ov_request(mdev, sector, size)) {
dec_rs_pending(mdev);
return 0;
}
@@ -725,56 +771,39 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
- return 1;
-}
-
-
-void start_resync_timer_fn(unsigned long data)
-{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
-
- drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
-}
-
-int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
- dev_warn(DEV, "w_start_resync later...\n");
- mdev->start_resync_timer.expires = jiffies + HZ/10;
- add_timer(&mdev->start_resync_timer);
- return 1;
- }
-
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ if (i == 0 || !stop_sector_reached)
+ mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
-int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_ov_finished(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
kfree(w);
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
-static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static int w_resync_finished(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
kfree(w);
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
static void ping_peer(struct drbd_conf *mdev)
{
- clear_bit(GOT_PING_ACK, &mdev->flags);
- request_ping(mdev);
- wait_event(mdev->misc_wait,
- test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ clear_bit(GOT_PING_ACK, &tconn->flags);
+ request_ping(tconn);
+ wait_event(tconn->ping_wait,
+ test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_conf *mdev)
@@ -799,7 +828,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
if (w) {
w->cb = w_resync_finished;
- drbd_queue_work(&mdev->data.work, w);
+ w->mdev = mdev;
+ drbd_queue_work(&mdev->tconn->sender_work, w);
return 1;
}
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -808,7 +838,12 @@ int drbd_resync_finished(struct drbd_conf *mdev)
dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
if (dt <= 0)
dt = 1;
+
db = mdev->rs_total;
+ /* adjust for verify start and stop sectors, respective reached position */
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ db -= mdev->ov_left;
+
dbdt = Bit2KB(db/dt);
mdev->rs_paused /= HZ;
@@ -817,8 +852,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ping_peer(mdev);
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ os = drbd_read_state(mdev);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -831,7 +866,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns.conn = C_CONNECTED;
dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
- verify_done ? "Online verify " : "Resync",
+ verify_done ? "Online verify" : "Resync",
dt + mdev->rs_paused, mdev->rs_paused, dbdt);
n_oos = drbd_bm_total_weight(mdev);
@@ -848,7 +883,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target";
- if (mdev->csums_tfm && mdev->rs_total) {
+ if (mdev->tconn->csums_tfm && mdev->rs_total) {
const unsigned long s = mdev->rs_same_csum;
const unsigned long t = mdev->rs_total;
const int ratio =
@@ -906,13 +941,15 @@ int drbd_resync_finished(struct drbd_conf *mdev)
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
put_ldev(mdev);
out:
mdev->rs_total = 0;
mdev->rs_failed = 0;
mdev->rs_paused = 0;
- if (verify_done)
+
+ /* reset start sector, if we reached end of device */
+ if (verify_done && mdev->ov_left == 0)
mdev->ov_start_sector = 0;
drbd_md_sync(mdev);
@@ -924,19 +961,19 @@ out:
}
/* helper */
-static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
{
- if (drbd_ee_has_active_page(e)) {
+ if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
- int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
atomic_add(i, &mdev->pp_in_use_by_net);
atomic_sub(i, &mdev->pp_in_use);
- spin_lock_irq(&mdev->req_lock);
- list_add_tail(&e->w.list, &mdev->net_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add_tail(&peer_req->w.list, &mdev->net_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait);
} else
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
}
/**
@@ -945,174 +982,177 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_data_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- int ok;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- ok = drbd_send_block(mdev, P_DATA_REPLY, e);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ err = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
- ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
/**
- * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
+ * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
* @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- int ok;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
if (mdev->state.conn == C_AHEAD) {
- ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
- } else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
+ } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Not sending RSDataReply, "
"partner DISKLESS!\n");
- ok = 1;
+ err = 0;
}
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
- ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
- drbd_rs_failed_io(mdev, e->sector, e->size);
+ drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
-int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
struct digest_info *di;
int digest_size;
void *digest = NULL;
- int ok, eq = 0;
+ int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
- di = e->digest;
+ di = peer_req->digest;
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
- if (mdev->csums_tfm) {
- digest_size = crypto_hash_digestsize(mdev->csums_tfm);
+ if (mdev->tconn->csums_tfm) {
+ digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
D_ASSERT(digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
+ drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
if (eq) {
- drbd_set_in_sync(mdev, e->sector, e->size);
+ drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
- mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
- ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
+ mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
+ err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
} else {
inc_rs_pending(mdev);
- e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
- e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
+ peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
+ peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
}
} else {
- ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block/ack() failed\n");
- return ok;
+ return err;
}
-/* TODO merge common code with w_e_send_csum */
-int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- sector_t sector = e->sector;
- unsigned int size = e->size;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
int digest_size;
void *digest;
- int ok = 1;
+ int err = 0;
if (unlikely(cancel))
goto out;
- digest_size = crypto_hash_digestsize(mdev->verify_tfm);
+ digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
- ok = 0; /* terminate the connection in case the allocation failed */
+ err = 1; /* terminate the connection in case the allocation failed */
goto out;
}
- if (likely(!(e->flags & EE_WAS_ERROR)))
- drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+ if (likely(!(peer_req->flags & EE_WAS_ERROR)))
+ drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
else
memset(digest, 0, digest_size);
@@ -1120,25 +1160,23 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
- e = NULL;
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
+ peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_OV_REPLY);
- if (!ok)
+ err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
+ if (err)
dec_rs_pending(mdev);
kfree(digest);
out:
- if (e)
- drbd_free_ee(mdev, e);
+ if (peer_req)
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return ok;
+ return err;
}
-void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
{
if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
mdev->ov_last_oos_size += size>>9;
@@ -1149,36 +1187,38 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
drbd_set_out_of_sync(mdev, sector, size);
}
-int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
struct digest_info *di;
void *digest;
- sector_t sector = e->sector;
- unsigned int size = e->size;
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
int digest_size;
- int ok, eq = 0;
+ int err, eq = 0;
+ bool stop_sector_reached = false;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* the resync lru has been cleaned up already */
if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
- di = e->digest;
+ di = peer_req->digest;
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_hash_digestsize(mdev->verify_tfm);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+ drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
@@ -1186,19 +1226,19 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
}
- /* Free e and pages before send.
- * In case we block on congestion, we could otherwise run into
- * some distributed deadlock, if the other side blocks on
- * congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
+ /* Free peer_req and pages before send.
+ * In case we block on congestion, we could otherwise run into
+ * some distributed deadlock, if the other side blocks on
+ * congestion as well, because our receiver blocks in
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
if (!eq)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
- ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
- eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
+ err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
+ eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
dec_unacked(mdev);
@@ -1208,73 +1248,102 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if ((mdev->ov_left & 0x200) == 0x200)
drbd_advance_rs_marks(mdev, mdev->ov_left);
- if (mdev->ov_left == 0) {
- ov_oos_print(mdev);
+ stop_sector_reached = verify_can_do_stop_sector(mdev) &&
+ (sector + (size>>9)) >= mdev->ov_stop_sector;
+
+ if (mdev->ov_left == 0 || stop_sector_reached) {
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
- return ok;
+ return err;
}
-int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_prev_work_done(struct drbd_work *w, int cancel)
{
struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
+
complete(&b->done);
- return 1;
+ return 0;
}
-int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+/* FIXME
+ * We need to track the number of pending barrier acks,
+ * and to be able to wait for them.
+ * See also comment in drbd_adm_attach before drbd_suspend_io.
+ */
+int drbd_send_barrier(struct drbd_tconn *tconn)
{
- struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
- struct p_barrier *p = &mdev->data.sbuf.barrier;
- int ok = 1;
-
- /* really avoid racing with tl_clear. w.cb may have been referenced
- * just before it was reassigned and re-queued, so double check that.
- * actually, this race was harmless, since we only try to send the
- * barrier packet here, and otherwise do nothing with the object.
- * but compare with the head of w_clear_epoch */
- spin_lock_irq(&mdev->req_lock);
- if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
- cancel = 1;
- spin_unlock_irq(&mdev->req_lock);
- if (cancel)
- return 1;
+ struct p_barrier *p;
+ struct drbd_socket *sock;
- if (!drbd_get_data_sock(mdev))
- return 0;
- p->barrier = b->br_number;
- /* inc_ap_pending was done where this was queued.
- * dec_ap_pending will be done in got_BarrierAck
- * or (on connection loss) in w_clear_epoch. */
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
- (struct p_header80 *)p, sizeof(*p), 0);
- drbd_put_data_sock(mdev);
-
- return ok;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
+ p->barrier = tconn->send.current_epoch_nr;
+ p->pad = 0;
+ tconn->send.current_epoch_writes = 0;
+
+ return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
-int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_write_hint(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_socket *sock;
+
if (cancel)
- return 1;
- return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
+ return 0;
+ sock = &mdev->tconn->data;
+ if (!drbd_prepare_command(mdev, sock))
+ return -EIO;
+ return drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
}
-int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch)
+{
+ if (!tconn->send.seen_any_write_yet) {
+ tconn->send.seen_any_write_yet = true;
+ tconn->send.current_epoch_nr = epoch;
+ tconn->send.current_epoch_writes = 0;
+ }
+}
+
+static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch)
+{
+ /* re-init if first write on this connection */
+ if (!tconn->send.seen_any_write_yet)
+ return;
+ if (tconn->send.current_epoch_nr != epoch) {
+ if (tconn->send.current_epoch_writes)
+ drbd_send_barrier(tconn);
+ tconn->send.current_epoch_nr = epoch;
+ }
+}
+
+int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_oos(mdev, req);
- req_mod(req, oos_handed_to_network);
+ /* this time, no tconn->send.current_epoch_writes++;
+ * If it was sent, it was the closing barrier for the last
+ * replicated epoch, before we went into AHEAD mode.
+ * No more barriers will be sent, until we leave AHEAD mode again. */
+ maybe_send_barrier(tconn, req->epoch);
+
+ err = drbd_send_out_of_sync(mdev, req);
+ req_mod(req, OOS_HANDED_TO_NETWORK);
- return ok;
+ return err;
}
/**
@@ -1283,20 +1352,26 @@ int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_dblock(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_dblock(mdev, req);
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ re_init_if_first_write(tconn, req->epoch);
+ maybe_send_barrier(tconn, req->epoch);
+ tconn->send.current_epoch_writes++;
+
+ err = drbd_send_dblock(mdev, req);
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
/**
@@ -1305,57 +1380,61 @@ int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_read_req(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
- (unsigned long)req);
+ /* Even read requests may close a write epoch,
+ * if there was any yet. */
+ maybe_send_barrier(tconn, req->epoch);
- if (!ok) {
- /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
- * so this is probably redundant */
- if (mdev->state.conn >= C_CONNECTED)
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
- }
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
+ (unsigned long)req);
+
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
-int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_restart_disk_io(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
+ struct drbd_conf *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, req->sector);
- /* Calling drbd_al_begin_io() out of the worker might deadlocks
- theoretically. Practically it can not deadlock, since this is
- only used when unfreezing IOs. All the extents of the requests
- that made it into the TL are already active */
+ drbd_al_begin_io(mdev, &req->i);
drbd_req_make_private_bio(req, req->master_bio);
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
generic_make_request(req->private_bio);
- return 1;
+ return 0;
}
static int _drbd_may_sync_now(struct drbd_conf *mdev)
{
struct drbd_conf *odev = mdev;
+ int resync_after;
while (1) {
- if (odev->sync_conf.after == -1)
+ if (!odev->ldev)
+ return 1;
+ rcu_read_lock();
+ resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
+ rcu_read_unlock();
+ if (resync_after == -1)
+ return 1;
+ odev = minor_to_mdev(resync_after);
+ if (!expect(odev))
return 1;
- odev = minor_to_mdev(odev->sync_conf.after);
- ERR_IF(!odev) return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
odev->state.conn <= C_PAUSED_SYNC_T) ||
odev->state.aftr_isp || odev->state.peer_isp ||
@@ -1375,16 +1454,15 @@ static int _drbd_pause_after(struct drbd_conf *mdev)
struct drbd_conf *odev;
int i, rv = 0;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev)
- continue;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev))
rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
!= SS_NOTHING_TO_DO);
}
+ rcu_read_unlock();
return rv;
}
@@ -1400,10 +1478,8 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
struct drbd_conf *odev;
int i, rv = 0;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev)
- continue;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {
@@ -1413,6 +1489,7 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
!= SS_NOTHING_TO_DO) ;
}
}
+ rcu_read_unlock();
return rv;
}
@@ -1430,57 +1507,86 @@ void suspend_other_sg(struct drbd_conf *mdev)
write_unlock_irq(&global_state_lock);
}
-static int sync_after_error(struct drbd_conf *mdev, int o_minor)
+/* caller must hold global_state_lock */
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
{
struct drbd_conf *odev;
+ int resync_after;
if (o_minor == -1)
return NO_ERROR;
if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
- return ERR_SYNC_AFTER;
+ return ERR_RESYNC_AFTER;
/* check for loops */
odev = minor_to_mdev(o_minor);
while (1) {
if (odev == mdev)
- return ERR_SYNC_AFTER_CYCLE;
+ return ERR_RESYNC_AFTER_CYCLE;
+ rcu_read_lock();
+ resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
+ rcu_read_unlock();
/* dependency chain ends here, no cycles. */
- if (odev->sync_conf.after == -1)
+ if (resync_after == -1)
return NO_ERROR;
/* follow the dependency chain */
- odev = minor_to_mdev(odev->sync_conf.after);
+ odev = minor_to_mdev(resync_after);
}
}
-int drbd_alter_sa(struct drbd_conf *mdev, int na)
+/* caller must hold global_state_lock */
+void drbd_resync_after_changed(struct drbd_conf *mdev)
{
int changes;
- int retcode;
- write_lock_irq(&global_state_lock);
- retcode = sync_after_error(mdev, na);
- if (retcode == NO_ERROR) {
- mdev->sync_conf.after = na;
- do {
- changes = _drbd_pause_after(mdev);
- changes |= _drbd_resume_next(mdev);
- } while (changes);
- }
- write_unlock_irq(&global_state_lock);
- return retcode;
+ do {
+ changes = _drbd_pause_after(mdev);
+ changes |= _drbd_resume_next(mdev);
+ } while (changes);
}
void drbd_rs_controller_reset(struct drbd_conf *mdev)
{
+ struct fifo_buffer *plan;
+
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
mdev->rs_in_flight = 0;
- mdev->rs_planed = 0;
- spin_lock(&mdev->peer_seq_lock);
- fifo_set(&mdev->rs_plan_s, 0);
- spin_unlock(&mdev->peer_seq_lock);
+
+ /* Updating the RCU protected object in place is necessary since
+ this function gets called from atomic context.
+ It is valid since all other updates also lead to an completely
+ empty fifo */
+ rcu_read_lock();
+ plan = rcu_dereference(mdev->rs_plan_s);
+ plan->total = 0;
+ fifo_set(plan, 0);
+ rcu_read_unlock();
+}
+
+void start_resync_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
+}
+
+int w_start_resync(struct drbd_work *w, int cancel)
+{
+ struct drbd_conf *mdev = w->mdev;
+
+ if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
+ dev_warn(DEV, "w_start_resync later...\n");
+ mdev->start_resync_timer.expires = jiffies + HZ/10;
+ add_timer(&mdev->start_resync_timer);
+ return 0;
+ }
+
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ return 0;
}
/**
@@ -1501,43 +1607,58 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
return;
}
- if (side == C_SYNC_TARGET) {
- /* Since application IO was locked out during C_WF_BITMAP_T and
- C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
- we check that we might make the data inconsistent. */
- r = drbd_khelper(mdev, "before-resync-target");
- r = (r >> 8) & 0xff;
- if (r > 0) {
- dev_info(DEV, "before-resync-target handler returned %d, "
- "dropping connection.\n", r);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return;
- }
- } else /* C_SYNC_SOURCE */ {
- r = drbd_khelper(mdev, "before-resync-source");
- r = (r >> 8) & 0xff;
- if (r > 0) {
- if (r == 3) {
- dev_info(DEV, "before-resync-source handler returned %d, "
- "ignoring. Old userland tools?", r);
- } else {
- dev_info(DEV, "before-resync-source handler returned %d, "
+ if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
+ if (side == C_SYNC_TARGET) {
+ /* Since application IO was locked out during C_WF_BITMAP_T and
+ C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
+ we check that we might make the data inconsistent. */
+ r = drbd_khelper(mdev, "before-resync-target");
+ r = (r >> 8) & 0xff;
+ if (r > 0) {
+ dev_info(DEV, "before-resync-target handler returned %d, "
"dropping connection.\n", r);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
+ } else /* C_SYNC_SOURCE */ {
+ r = drbd_khelper(mdev, "before-resync-source");
+ r = (r >> 8) & 0xff;
+ if (r > 0) {
+ if (r == 3) {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "ignoring. Old userland tools?", r);
+ } else {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "dropping connection.\n", r);
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return;
+ }
+ }
}
}
- drbd_state_lock(mdev);
+ if (current == mdev->tconn->worker.task) {
+ /* The worker should not sleep waiting for state_mutex,
+ that can take long */
+ if (!mutex_trylock(mdev->state_mutex)) {
+ set_bit(B_RS_H_DONE, &mdev->flags);
+ mdev->start_resync_timer.expires = jiffies + HZ/5;
+ add_timer(&mdev->start_resync_timer);
+ return;
+ }
+ } else {
+ mutex_lock(mdev->state_mutex);
+ }
+ clear_bit(B_RS_H_DONE, &mdev->flags);
+
write_lock_irq(&global_state_lock);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
write_unlock_irq(&global_state_lock);
- drbd_state_unlock(mdev);
+ mutex_unlock(mdev->state_mutex);
return;
}
- ns.i = mdev->state.i;
+ ns = drbd_read_state(mdev);
ns.aftr_isp = !_drbd_may_sync_now(mdev);
@@ -1549,7 +1670,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
ns.pdsk = D_INCONSISTENT;
r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = mdev->state;
+ ns = drbd_read_state(mdev);
if (ns.conn < C_CONNECTED)
r = SS_UNKNOWN_ERROR;
@@ -1575,6 +1696,10 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
write_unlock_irq(&global_state_lock);
if (r == SS_SUCCESS) {
+ /* reset rs_last_bcast when a resync or verify is started,
+ * to deal with potential jiffies wrap. */
+ mdev->rs_last_bcast = jiffies - HZ;
+
dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
drbd_conn_str(ns.conn),
(unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
@@ -1589,10 +1714,10 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */
- if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
+ if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
drbd_gen_and_send_sync_uuid(mdev);
- if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
+ if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
/* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit
@@ -1603,10 +1728,16 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* detect connection loss, then waiting for a ping
* response (implicit in drbd_resync_finished) reduces
* the race considerably, but does not solve it. */
- if (side == C_SYNC_SOURCE)
- schedule_timeout_interruptible(
- mdev->net_conf->ping_int * HZ +
- mdev->net_conf->ping_timeo*HZ/9);
+ if (side == C_SYNC_SOURCE) {
+ struct net_conf *nc;
+ int timeo;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
+ }
drbd_resync_finished(mdev);
}
@@ -1621,114 +1752,180 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_md_sync(mdev);
}
put_ldev(mdev);
- drbd_state_unlock(mdev);
+ mutex_unlock(mdev->state_mutex);
}
-int drbd_worker(struct drbd_thread *thi)
+/* If the resource already closed the current epoch, but we did not
+ * (because we have not yet seen new requests), we should send the
+ * corresponding barrier now. Must be checked within the same spinlock
+ * that is used to check for new requests. */
+bool need_to_send_barrier(struct drbd_tconn *connection)
{
- struct drbd_conf *mdev = thi->mdev;
- struct drbd_work *w = NULL;
- LIST_HEAD(work_list);
- int intr = 0, i;
+ if (!connection->send.seen_any_write_yet)
+ return false;
+
+ /* Skip barriers that do not contain any writes.
+ * This may happen during AHEAD mode. */
+ if (!connection->send.current_epoch_writes)
+ return false;
+
+ /* ->req_lock is held when requests are queued on
+ * connection->sender_work, and put into ->transfer_log.
+ * It is also held when ->current_tle_nr is increased.
+ * So either there are already new requests queued,
+ * and corresponding barriers will be send there.
+ * Or nothing new is queued yet, so the difference will be 1.
+ */
+ if (atomic_read(&connection->current_tle_nr) !=
+ connection->send.current_epoch_nr + 1)
+ return false;
+
+ return true;
+}
+
+bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
+{
+ spin_lock_irq(&queue->q_lock);
+ list_splice_init(&queue->q, work_list);
+ spin_unlock_irq(&queue->q_lock);
+ return !list_empty(work_list);
+}
- sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
+bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
+{
+ spin_lock_irq(&queue->q_lock);
+ if (!list_empty(&queue->q))
+ list_move(queue->q.next, work_list);
+ spin_unlock_irq(&queue->q_lock);
+ return !list_empty(work_list);
+}
- while (get_t_state(thi) == Running) {
- drbd_thread_current_set_cpu(mdev);
+void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
+{
+ DEFINE_WAIT(wait);
+ struct net_conf *nc;
+ int uncork, cork;
- if (down_trylock(&mdev->data.work.s)) {
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket && !mdev->net_conf->no_cork)
- drbd_tcp_uncork(mdev->data.socket);
- mutex_unlock(&mdev->data.mutex);
+ dequeue_work_item(&connection->sender_work, work_list);
+ if (!list_empty(work_list))
+ return;
- intr = down_interruptible(&mdev->data.work.s);
+ /* Still nothing to do?
+ * Maybe we still need to close the current epoch,
+ * even if no new requests are queued yet.
+ *
+ * Also, poke TCP, just in case.
+ * Then wait for new work (or signal). */
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ uncork = nc ? nc->tcp_cork : 0;
+ rcu_read_unlock();
+ if (uncork) {
+ mutex_lock(&connection->data.mutex);
+ if (connection->data.socket)
+ drbd_tcp_uncork(connection->data.socket);
+ mutex_unlock(&connection->data.mutex);
+ }
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket && !mdev->net_conf->no_cork)
- drbd_tcp_cork(mdev->data.socket);
- mutex_unlock(&mdev->data.mutex);
+ for (;;) {
+ int send_barrier;
+ prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_lock_irq(&connection->req_lock);
+ spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
+ /* dequeue single item only,
+ * we still use drbd_queue_work_front() in some places */
+ if (!list_empty(&connection->sender_work.q))
+ list_move(connection->sender_work.q.next, work_list);
+ spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
+ if (!list_empty(work_list) || signal_pending(current)) {
+ spin_unlock_irq(&connection->req_lock);
+ break;
}
+ send_barrier = need_to_send_barrier(connection);
+ spin_unlock_irq(&connection->req_lock);
+ if (send_barrier) {
+ drbd_send_barrier(connection);
+ connection->send.current_epoch_nr++;
+ }
+ schedule();
+ /* may be woken up for other things but new work, too,
+ * e.g. if the current epoch got closed.
+ * In which case we send the barrier above. */
+ }
+ finish_wait(&connection->sender_work.q_wait, &wait);
+
+ /* someone may have changed the config while we have been waiting above. */
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ cork = nc ? nc->tcp_cork : 0;
+ rcu_read_unlock();
+ mutex_lock(&connection->data.mutex);
+ if (connection->data.socket) {
+ if (cork)
+ drbd_tcp_cork(connection->data.socket);
+ else if (!uncork)
+ drbd_tcp_uncork(connection->data.socket);
+ }
+ mutex_unlock(&connection->data.mutex);
+}
- if (intr) {
- D_ASSERT(intr == -EINTR);
+int drbd_worker(struct drbd_thread *thi)
+{
+ struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_work *w = NULL;
+ struct drbd_conf *mdev;
+ LIST_HEAD(work_list);
+ int vnr;
+
+ while (get_t_state(thi) == RUNNING) {
+ drbd_thread_current_set_cpu(thi);
+
+ /* as long as we use drbd_queue_work_front(),
+ * we may only dequeue single work items here, not batches. */
+ if (list_empty(&work_list))
+ wait_for_work(tconn, &work_list);
+
+ if (signal_pending(current)) {
flush_signals(current);
- ERR_IF (get_t_state(thi) == Running)
+ if (get_t_state(thi) == RUNNING) {
+ conn_warn(tconn, "Worker got an unexpected signal\n");
continue;
+ }
break;
}
- if (get_t_state(thi) != Running)
+ if (get_t_state(thi) != RUNNING)
break;
- /* With this break, we have done a down() but not consumed
- the entry from the list. The cleanup code takes care of
- this... */
-
- w = NULL;
- spin_lock_irq(&mdev->data.work.q_lock);
- ERR_IF(list_empty(&mdev->data.work.q)) {
- /* something terribly wrong in our logic.
- * we were able to down() the semaphore,
- * but the list is empty... doh.
- *
- * what is the best thing to do now?
- * try again from scratch, restarting the receiver,
- * asender, whatnot? could break even more ugly,
- * e.g. when we are primary, but no good local data.
- *
- * I'll try to get away just starting over this loop.
- */
- spin_unlock_irq(&mdev->data.work.q_lock);
- continue;
- }
- w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
- list_del_init(&w->list);
- spin_unlock_irq(&mdev->data.work.q_lock);
-
- if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
- /* dev_warn(DEV, "worker: a callback failed! \n"); */
- if (mdev->state.conn >= C_CONNECTED)
- drbd_force_state(mdev,
- NS(conn, C_NETWORK_FAILURE));
+
+ while (!list_empty(&work_list)) {
+ w = list_first_entry(&work_list, struct drbd_work, list);
+ list_del_init(&w->list);
+ if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0)
+ continue;
+ if (tconn->cstate >= C_WF_REPORT_PARAMS)
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
}
}
- D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
- D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
-
- spin_lock_irq(&mdev->data.work.q_lock);
- i = 0;
- while (!list_empty(&mdev->data.work.q)) {
- list_splice_init(&mdev->data.work.q, &work_list);
- spin_unlock_irq(&mdev->data.work.q_lock);
+ do {
while (!list_empty(&work_list)) {
- w = list_entry(work_list.next, struct drbd_work, list);
+ w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list);
- w->cb(mdev, w, 1);
- i++; /* dead debugging code */
+ w->cb(w, 1);
}
-
- spin_lock_irq(&mdev->data.work.q_lock);
+ dequeue_work_batch(&tconn->sender_work, &work_list);
+ } while (!list_empty(&work_list));
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_mdev_cleanup(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
}
- sema_init(&mdev->data.work.s, 0);
- /* DANGEROUS race: if someone did queue his work within the spinlock,
- * but up() ed outside the spinlock, we could get an up() on the
- * semaphore without corresponding list entry.
- * So don't do that.
- */
- spin_unlock_irq(&mdev->data.work.q_lock);
-
- D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
- /* _drbd_set_state only uses stop_nowait.
- * wait here for the Exiting receiver. */
- drbd_thread_stop(&mdev->receiver);
- drbd_mdev_cleanup(mdev);
-
- dev_info(DEV, "worker terminated\n");
-
- clear_bit(DEVICE_DYING, &mdev->flags);
- clear_bit(CONFIG_PENDING, &mdev->flags);
- wake_up(&mdev->state_wait);
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index 151f1a37478..328f18e4b4e 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/mm.h>
+#include "drbd_int.h"
/* see get_sb_bdev and bd_claim */
extern char *drbd_sec_holder;
@@ -20,8 +21,8 @@ static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
/* bi_end_io handlers */
extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_endio_sec(struct bio *bio, int error);
-extern void drbd_endio_pri(struct bio *bio, int error);
+extern void drbd_peer_request_endio(struct bio *bio, int error);
+extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
@@ -45,12 +46,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
generic_make_request(bio);
}
-static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
-{
- return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
- == CRYPTO_ALG_TYPE_HASH;
-}
-
#ifndef __CHECKER__
# undef __cond_lock
# define __cond_lock(x,c) (c)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 54046e51160..ae125127062 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -463,6 +463,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
+ lo->lo_bio_count++;
bio_list_add(&lo->lo_bio_list, bio);
}
@@ -471,6 +472,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
+ lo->lo_bio_count--;
return bio_list_pop(&lo->lo_bio_list);
}
@@ -489,6 +491,10 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
goto out;
if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
goto out;
+ if (lo->lo_bio_count >= q->nr_congestion_on)
+ wait_event_lock_irq(lo->lo_req_wait,
+ lo->lo_bio_count < q->nr_congestion_off,
+ lo->lo_lock);
loop_add_bio(lo, old_bio);
wake_up(&lo->lo_event);
spin_unlock_irq(&lo->lo_lock);
@@ -546,6 +552,8 @@ static int loop_thread(void *data)
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
+ if (lo->lo_bio_count < lo->lo_queue->nr_congestion_off)
+ wake_up(&lo->lo_req_wait);
spin_unlock_irq(&lo->lo_lock);
BUG_ON(!bio);
@@ -873,6 +881,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->transfer = transfer_none;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
+ lo->lo_bio_count = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
@@ -1673,6 +1682,7 @@ static int loop_add(struct loop_device **l, int i)
lo->lo_number = i;
lo->lo_thread = NULL;
init_waitqueue_head(&lo->lo_event);
+ init_waitqueue_head(&lo->lo_req_wait);
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bb3d9be3b1b..89576a0b3f2 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -61,15 +61,29 @@
#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
-#define RBD_MAX_SNAP_NAME_LEN 32
+#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
+#define RBD_MAX_SNAP_NAME_LEN \
+ (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
+
#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
#define RBD_MAX_OPT_LEN 1024
#define RBD_SNAP_HEAD_NAME "-"
+/* This allows a single page to hold an image name sent by OSD */
+#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
#define RBD_IMAGE_ID_LEN_MAX 64
+
#define RBD_OBJ_PREFIX_LEN_MAX 64
+/* Feature bits */
+
+#define RBD_FEATURE_LAYERING 1
+
+/* Features supported by this (client software) implementation. */
+
+#define RBD_FEATURES_ALL (0)
+
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
* RBD_DRV_NAME above, and # is a unique integer identifier.
@@ -101,6 +115,27 @@ struct rbd_image_header {
u64 obj_version;
};
+/*
+ * An rbd image specification.
+ *
+ * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
+ * identify an image.
+ */
+struct rbd_spec {
+ u64 pool_id;
+ char *pool_name;
+
+ char *image_id;
+ size_t image_id_len;
+ char *image_name;
+ size_t image_name_len;
+
+ u64 snap_id;
+ char *snap_name;
+
+ struct kref kref;
+};
+
struct rbd_options {
bool read_only;
};
@@ -155,11 +190,8 @@ struct rbd_snap {
};
struct rbd_mapping {
- char *snap_name;
- u64 snap_id;
u64 size;
u64 features;
- bool snap_exists;
bool read_only;
};
@@ -173,7 +205,6 @@ struct rbd_device {
struct gendisk *disk; /* blkdev's gendisk and rq */
u32 image_format; /* Either 1 or 2 */
- struct rbd_options rbd_opts;
struct rbd_client *rbd_client;
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
@@ -181,17 +212,17 @@ struct rbd_device {
spinlock_t lock; /* queue lock */
struct rbd_image_header header;
- char *image_id;
- size_t image_id_len;
- char *image_name;
- size_t image_name_len;
+ bool exists;
+ struct rbd_spec *spec;
+
char *header_name;
- char *pool_name;
- int pool_id;
struct ceph_osd_event *watch_event;
struct ceph_osd_request *watch_request;
+ struct rbd_spec *parent_spec;
+ u64 parent_overlap;
+
/* protects updating the header */
struct rw_semaphore header_rwsem;
@@ -204,6 +235,7 @@ struct rbd_device {
/* sysfs related */
struct device dev;
+ unsigned long open_count;
};
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
@@ -218,7 +250,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static void __rbd_remove_snap_dev(struct rbd_snap *snap);
+static void rbd_remove_snap_dev(struct rbd_snap *snap);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
@@ -258,17 +290,8 @@ static struct device rbd_root_dev = {
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
-static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
-{
- return get_device(&rbd_dev->dev);
-}
-
-static void rbd_put_dev(struct rbd_device *rbd_dev)
-{
- put_device(&rbd_dev->dev);
-}
-
-static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -277,8 +300,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
return -EROFS;
- rbd_get_dev(rbd_dev);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ (void) get_device(&rbd_dev->dev);
set_device_ro(bdev, rbd_dev->mapping.read_only);
+ rbd_dev->open_count++;
+ mutex_unlock(&ctl_mutex);
return 0;
}
@@ -287,7 +313,11 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
- rbd_put_dev(rbd_dev);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ rbd_assert(rbd_dev->open_count > 0);
+ rbd_dev->open_count--;
+ put_device(&rbd_dev->dev);
+ mutex_unlock(&ctl_mutex);
return 0;
}
@@ -388,7 +418,7 @@ enum {
static match_table_t rbd_opts_tokens = {
/* int args above */
/* string args above */
- {Opt_read_only, "mapping.read_only"},
+ {Opt_read_only, "read_only"},
{Opt_read_only, "ro"}, /* Alternate spelling */
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
@@ -441,33 +471,17 @@ static int parse_rbd_opts_token(char *c, void *private)
* Get a ceph client with specific addr and configuration, if one does
* not exist create it.
*/
-static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
- size_t mon_addr_len, char *options)
+static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
- struct rbd_options *rbd_opts = &rbd_dev->rbd_opts;
- struct ceph_options *ceph_opts;
struct rbd_client *rbdc;
- rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
-
- ceph_opts = ceph_parse_options(options, mon_addr,
- mon_addr + mon_addr_len,
- parse_rbd_opts_token, rbd_opts);
- if (IS_ERR(ceph_opts))
- return PTR_ERR(ceph_opts);
-
rbdc = rbd_client_find(ceph_opts);
- if (rbdc) {
- /* using an existing client */
+ if (rbdc) /* using an existing client */
ceph_destroy_options(ceph_opts);
- } else {
+ else
rbdc = rbd_client_create(ceph_opts);
- if (IS_ERR(rbdc))
- return PTR_ERR(rbdc);
- }
- rbd_dev->rbd_client = rbdc;
- return 0;
+ return rbdc;
}
/*
@@ -492,10 +506,10 @@ static void rbd_client_release(struct kref *kref)
* Drop reference to ceph client node. If it's not referenced anymore, release
* it.
*/
-static void rbd_put_client(struct rbd_device *rbd_dev)
+static void rbd_put_client(struct rbd_client *rbdc)
{
- kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- rbd_dev->rbd_client = NULL;
+ if (rbdc)
+ kref_put(&rbdc->kref, rbd_client_release);
}
/*
@@ -524,6 +538,16 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
return false;
+ /* The bio layer requires at least sector-sized I/O */
+
+ if (ondisk->options.order < SECTOR_SHIFT)
+ return false;
+
+ /* If we use u64 in a few spots we may be able to loosen this */
+
+ if (ondisk->options.order > 8 * sizeof (int) - 1)
+ return false;
+
/*
* The size of a snapshot header has to fit in a size_t, and
* that limits the number of snapshots.
@@ -635,6 +659,20 @@ out_err:
return -ENOMEM;
}
+static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
+{
+ struct rbd_snap *snap;
+
+ if (snap_id == CEPH_NOSNAP)
+ return RBD_SNAP_HEAD_NAME;
+
+ list_for_each_entry(snap, &rbd_dev->snaps, node)
+ if (snap_id == snap->id)
+ return snap->name;
+
+ return NULL;
+}
+
static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
{
@@ -642,7 +680,7 @@ static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
list_for_each_entry(snap, &rbd_dev->snaps, node) {
if (!strcmp(snap_name, snap->name)) {
- rbd_dev->mapping.snap_id = snap->id;
+ rbd_dev->spec->snap_id = snap->id;
rbd_dev->mapping.size = snap->size;
rbd_dev->mapping.features = snap->features;
@@ -653,26 +691,23 @@ static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
return -ENOENT;
}
-static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name)
+static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
{
int ret;
- if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME,
+ if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
sizeof (RBD_SNAP_HEAD_NAME))) {
- rbd_dev->mapping.snap_id = CEPH_NOSNAP;
+ rbd_dev->spec->snap_id = CEPH_NOSNAP;
rbd_dev->mapping.size = rbd_dev->header.image_size;
rbd_dev->mapping.features = rbd_dev->header.features;
- rbd_dev->mapping.snap_exists = false;
- rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
ret = 0;
} else {
- ret = snap_by_name(rbd_dev, snap_name);
+ ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
if (ret < 0)
goto done;
- rbd_dev->mapping.snap_exists = true;
rbd_dev->mapping.read_only = true;
}
- rbd_dev->mapping.snap_name = snap_name;
+ rbd_dev->exists = true;
done:
return ret;
}
@@ -695,13 +730,13 @@ static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
u64 segment;
int ret;
- name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+ name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
- ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
+ ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
rbd_dev->header.object_prefix, segment);
- if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
+ if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
pr_err("error formatting segment name for #%llu (%d)\n",
segment, ret);
kfree(name);
@@ -800,77 +835,144 @@ static void zero_bio_chain(struct bio *chain, int start_ofs)
}
/*
- * bio_chain_clone - clone a chain of bios up to a certain length.
- * might return a bio_pair that will need to be released.
+ * Clone a portion of a bio, starting at the given byte offset
+ * and continuing for the number of bytes indicated.
*/
-static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
- struct bio_pair **bp,
- int len, gfp_t gfpmask)
-{
- struct bio *old_chain = *old;
- struct bio *new_chain = NULL;
- struct bio *tail;
- int total = 0;
-
- if (*bp) {
- bio_pair_release(*bp);
- *bp = NULL;
- }
+static struct bio *bio_clone_range(struct bio *bio_src,
+ unsigned int offset,
+ unsigned int len,
+ gfp_t gfpmask)
+{
+ struct bio_vec *bv;
+ unsigned int resid;
+ unsigned short idx;
+ unsigned int voff;
+ unsigned short end_idx;
+ unsigned short vcnt;
+ struct bio *bio;
- while (old_chain && (total < len)) {
- struct bio *tmp;
+ /* Handle the easy case for the caller */
- tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
- if (!tmp)
- goto err_out;
- gfpmask &= ~__GFP_WAIT; /* can't wait after the first */
+ if (!offset && len == bio_src->bi_size)
+ return bio_clone(bio_src, gfpmask);
- if (total + old_chain->bi_size > len) {
- struct bio_pair *bp;
+ if (WARN_ON_ONCE(!len))
+ return NULL;
+ if (WARN_ON_ONCE(len > bio_src->bi_size))
+ return NULL;
+ if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
+ return NULL;
- /*
- * this split can only happen with a single paged bio,
- * split_bio will BUG_ON if this is not the case
- */
- dout("bio_chain_clone split! total=%d remaining=%d"
- "bi_size=%u\n",
- total, len - total, old_chain->bi_size);
+ /* Find first affected segment... */
- /* split the bio. We'll release it either in the next
- call, or it will have to be released outside */
- bp = bio_split(old_chain, (len - total) / SECTOR_SIZE);
- if (!bp)
- goto err_out;
+ resid = offset;
+ __bio_for_each_segment(bv, bio_src, idx, 0) {
+ if (resid < bv->bv_len)
+ break;
+ resid -= bv->bv_len;
+ }
+ voff = resid;
- __bio_clone(tmp, &bp->bio1);
+ /* ...and the last affected segment */
- *next = &bp->bio2;
- } else {
- __bio_clone(tmp, old_chain);
- *next = old_chain->bi_next;
- }
+ resid += len;
+ __bio_for_each_segment(bv, bio_src, end_idx, idx) {
+ if (resid <= bv->bv_len)
+ break;
+ resid -= bv->bv_len;
+ }
+ vcnt = end_idx - idx + 1;
+
+ /* Build the clone */
- tmp->bi_bdev = NULL;
- tmp->bi_next = NULL;
- if (new_chain)
- tail->bi_next = tmp;
- else
- new_chain = tmp;
- tail = tmp;
- old_chain = old_chain->bi_next;
+ bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+ if (!bio)
+ return NULL; /* ENOMEM */
- total += tmp->bi_size;
+ bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
+ bio->bi_rw = bio_src->bi_rw;
+ bio->bi_flags |= 1 << BIO_CLONED;
+
+ /*
+ * Copy over our part of the bio_vec, then update the first
+ * and last (or only) entries.
+ */
+ memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
+ vcnt * sizeof (struct bio_vec));
+ bio->bi_io_vec[0].bv_offset += voff;
+ if (vcnt > 1) {
+ bio->bi_io_vec[0].bv_len -= voff;
+ bio->bi_io_vec[vcnt - 1].bv_len = resid;
+ } else {
+ bio->bi_io_vec[0].bv_len = len;
}
- rbd_assert(total == len);
+ bio->bi_vcnt = vcnt;
+ bio->bi_size = len;
+ bio->bi_idx = 0;
+
+ return bio;
+}
+
+/*
+ * Clone a portion of a bio chain, starting at the given byte offset
+ * into the first bio in the source chain and continuing for the
+ * number of bytes indicated. The result is another bio chain of
+ * exactly the given length, or a null pointer on error.
+ *
+ * The bio_src and offset parameters are both in-out. On entry they
+ * refer to the first source bio and the offset into that bio where
+ * the start of data to be cloned is located.
+ *
+ * On return, bio_src is updated to refer to the bio in the source
+ * chain that contains first un-cloned byte, and *offset will
+ * contain the offset of that byte within that bio.
+ */
+static struct bio *bio_chain_clone_range(struct bio **bio_src,
+ unsigned int *offset,
+ unsigned int len,
+ gfp_t gfpmask)
+{
+ struct bio *bi = *bio_src;
+ unsigned int off = *offset;
+ struct bio *chain = NULL;
+ struct bio **end;
+
+ /* Build up a chain of clone bios up to the limit */
+
+ if (!bi || off >= bi->bi_size || !len)
+ return NULL; /* Nothing to clone */
- *old = old_chain;
+ end = &chain;
+ while (len) {
+ unsigned int bi_size;
+ struct bio *bio;
+
+ if (!bi)
+ goto out_err; /* EINVAL; ran out of bio's */
+ bi_size = min_t(unsigned int, bi->bi_size - off, len);
+ bio = bio_clone_range(bi, off, bi_size, gfpmask);
+ if (!bio)
+ goto out_err; /* ENOMEM */
+
+ *end = bio;
+ end = &bio->bi_next;
+
+ off += bi_size;
+ if (off == bi->bi_size) {
+ bi = bi->bi_next;
+ off = 0;
+ }
+ len -= bi_size;
+ }
+ *bio_src = bi;
+ *offset = off;
- return new_chain;
+ return chain;
+out_err:
+ bio_chain_put(chain);
-err_out:
- dout("bio_chain_clone with err\n");
- bio_chain_put(new_chain);
return NULL;
}
@@ -988,8 +1090,9 @@ static int rbd_do_request(struct request *rq,
req_data->coll_index = coll_index;
}
- dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
- (unsigned long long) ofs, (unsigned long long) len);
+ dout("rbd_do_request object_name=%s ofs=%llu len=%llu coll=%p[%d]\n",
+ object_name, (unsigned long long) ofs,
+ (unsigned long long) len, coll, coll_index);
osdc = &rbd_dev->rbd_client->client->osdc;
req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
@@ -1019,7 +1122,7 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
+ layout->fl_pg_pool = cpu_to_le32((int) rbd_dev->spec->pool_id);
ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
rbd_assert(ret == 0);
@@ -1154,8 +1257,6 @@ done:
static int rbd_do_op(struct request *rq,
struct rbd_device *rbd_dev,
struct ceph_snap_context *snapc,
- u64 snapid,
- int opcode, int flags,
u64 ofs, u64 len,
struct bio *bio,
struct rbd_req_coll *coll,
@@ -1167,6 +1268,9 @@ static int rbd_do_op(struct request *rq,
int ret;
struct ceph_osd_req_op *ops;
u32 payload_len;
+ int opcode;
+ int flags;
+ u64 snapid;
seg_name = rbd_segment_name(rbd_dev, ofs);
if (!seg_name)
@@ -1174,7 +1278,18 @@ static int rbd_do_op(struct request *rq,
seg_len = rbd_segment_length(rbd_dev, ofs, len);
seg_ofs = rbd_segment_offset(rbd_dev, ofs);
- payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
+ if (rq_data_dir(rq) == WRITE) {
+ opcode = CEPH_OSD_OP_WRITE;
+ flags = CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK;
+ snapid = CEPH_NOSNAP;
+ payload_len = seg_len;
+ } else {
+ opcode = CEPH_OSD_OP_READ;
+ flags = CEPH_OSD_FLAG_READ;
+ snapc = NULL;
+ snapid = rbd_dev->spec->snap_id;
+ payload_len = 0;
+ }
ret = -ENOMEM;
ops = rbd_create_rw_ops(1, opcode, payload_len);
@@ -1202,41 +1317,6 @@ done:
}
/*
- * Request async osd write
- */
-static int rbd_req_write(struct request *rq,
- struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
- CEPH_OSD_OP_WRITE,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ofs, len, bio, coll, coll_index);
-}
-
-/*
- * Request async osd read
- */
-static int rbd_req_read(struct request *rq,
- struct rbd_device *rbd_dev,
- u64 snapid,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- return rbd_do_op(rq, rbd_dev, NULL,
- snapid,
- CEPH_OSD_OP_READ,
- CEPH_OSD_FLAG_READ,
- ofs, len, bio, coll, coll_index);
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_read(struct rbd_device *rbd_dev,
@@ -1304,7 +1384,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
rbd_dev->header_name, (unsigned long long) notify_id,
(unsigned int) opcode);
- rc = rbd_refresh_header(rbd_dev, &hver);
+ rc = rbd_dev_refresh(rbd_dev, &hver);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
" update snaps: %d\n", rbd_dev->major, rc);
@@ -1460,18 +1540,16 @@ static void rbd_rq_fn(struct request_queue *q)
{
struct rbd_device *rbd_dev = q->queuedata;
struct request *rq;
- struct bio_pair *bp = NULL;
while ((rq = blk_fetch_request(q))) {
struct bio *bio;
- struct bio *rq_bio, *next_bio = NULL;
bool do_write;
unsigned int size;
- u64 op_size = 0;
u64 ofs;
int num_segs, cur_seg = 0;
struct rbd_req_coll *coll;
struct ceph_snap_context *snapc;
+ unsigned int bio_offset;
dout("fetched request\n");
@@ -1483,10 +1561,6 @@ static void rbd_rq_fn(struct request_queue *q)
/* deduce our operation (read, write) */
do_write = (rq_data_dir(rq) == WRITE);
-
- size = blk_rq_bytes(rq);
- ofs = blk_rq_pos(rq) * SECTOR_SIZE;
- rq_bio = rq->bio;
if (do_write && rbd_dev->mapping.read_only) {
__blk_end_request_all(rq, -EROFS);
continue;
@@ -1496,8 +1570,8 @@ static void rbd_rq_fn(struct request_queue *q)
down_read(&rbd_dev->header_rwsem);
- if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
- !rbd_dev->mapping.snap_exists) {
+ if (!rbd_dev->exists) {
+ rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
up_read(&rbd_dev->header_rwsem);
dout("request for non-existent snapshot");
spin_lock_irq(q->queue_lock);
@@ -1509,6 +1583,10 @@ static void rbd_rq_fn(struct request_queue *q)
up_read(&rbd_dev->header_rwsem);
+ size = blk_rq_bytes(rq);
+ ofs = blk_rq_pos(rq) * SECTOR_SIZE;
+ bio = rq->bio;
+
dout("%s 0x%x bytes at 0x%llx\n",
do_write ? "write" : "read",
size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
@@ -1528,45 +1606,37 @@ static void rbd_rq_fn(struct request_queue *q)
continue;
}
+ bio_offset = 0;
do {
- /* a bio clone to be passed down to OSD req */
+ u64 limit = rbd_segment_length(rbd_dev, ofs, size);
+ unsigned int chain_size;
+ struct bio *bio_chain;
+
+ BUG_ON(limit > (u64) UINT_MAX);
+ chain_size = (unsigned int) limit;
dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
- op_size = rbd_segment_length(rbd_dev, ofs, size);
+
kref_get(&coll->kref);
- bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
- op_size, GFP_ATOMIC);
- if (!bio) {
- rbd_coll_end_req_index(rq, coll, cur_seg,
- -ENOMEM, op_size);
- goto next_seg;
- }
+ /* Pass a cloned bio chain via an osd request */
- /* init OSD command: write or read */
- if (do_write)
- rbd_req_write(rq, rbd_dev,
- snapc,
- ofs,
- op_size, bio,
- coll, cur_seg);
+ bio_chain = bio_chain_clone_range(&bio,
+ &bio_offset, chain_size,
+ GFP_ATOMIC);
+ if (bio_chain)
+ (void) rbd_do_op(rq, rbd_dev, snapc,
+ ofs, chain_size,
+ bio_chain, coll, cur_seg);
else
- rbd_req_read(rq, rbd_dev,
- rbd_dev->mapping.snap_id,
- ofs,
- op_size, bio,
- coll, cur_seg);
-
-next_seg:
- size -= op_size;
- ofs += op_size;
+ rbd_coll_end_req_index(rq, coll, cur_seg,
+ -ENOMEM, chain_size);
+ size -= chain_size;
+ ofs += chain_size;
cur_seg++;
- rq_bio = next_bio;
} while (size > 0);
kref_put(&coll->kref, rbd_coll_release);
- if (bp)
- bio_pair_release(bp);
spin_lock_irq(q->queue_lock);
ceph_put_snap_context(snapc);
@@ -1576,28 +1646,47 @@ next_seg:
/*
* a queue callback. Makes sure that we don't create a bio that spans across
* multiple osd objects. One exception would be with a single page bios,
- * which we handle later at bio_chain_clone
+ * which we handle later at bio_chain_clone_range()
*/
static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct rbd_device *rbd_dev = q->queuedata;
- unsigned int chunk_sectors;
- sector_t sector;
- unsigned int bio_sectors;
- int max;
+ sector_t sector_offset;
+ sector_t sectors_per_obj;
+ sector_t obj_sector_offset;
+ int ret;
- chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
- sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
- bio_sectors = bmd->bi_size >> SECTOR_SHIFT;
+ /*
+ * Find how far into its rbd object the partition-relative
+ * bio start sector is to offset relative to the enclosing
+ * device.
+ */
+ sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
+ sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
+ obj_sector_offset = sector_offset & (sectors_per_obj - 1);
+
+ /*
+ * Compute the number of bytes from that offset to the end
+ * of the object. Account for what's already used by the bio.
+ */
+ ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
+ if (ret > bmd->bi_size)
+ ret -= bmd->bi_size;
+ else
+ ret = 0;
- max = (chunk_sectors - ((sector & (chunk_sectors - 1))
- + bio_sectors)) << SECTOR_SHIFT;
- if (max < 0)
- max = 0; /* bio_add cannot handle a negative return */
- if (max <= bvec->bv_len && bio_sectors == 0)
- return bvec->bv_len;
- return max;
+ /*
+ * Don't send back more than was asked for. And if the bio
+ * was empty, let the whole thing through because: "Note
+ * that a block device *must* allow a single page to be
+ * added to an empty bio."
+ */
+ rbd_assert(bvec->bv_len <= PAGE_SIZE);
+ if (ret > (int) bvec->bv_len || !bmd->bi_size)
+ ret = (int) bvec->bv_len;
+
+ return ret;
}
static void rbd_free_disk(struct rbd_device *rbd_dev)
@@ -1663,13 +1752,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
ret = -ENXIO;
pr_warning("short header read for image %s"
" (want %zd got %d)\n",
- rbd_dev->image_name, size, ret);
+ rbd_dev->spec->image_name, size, ret);
goto out_err;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
pr_warning("invalid header for image %s\n",
- rbd_dev->image_name);
+ rbd_dev->spec->image_name);
goto out_err;
}
@@ -1707,19 +1796,32 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
return ret;
}
-static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
+static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
{
struct rbd_snap *snap;
struct rbd_snap *next;
list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
- __rbd_remove_snap_dev(snap);
+ rbd_remove_snap_dev(snap);
+}
+
+static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
+{
+ sector_t size;
+
+ if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+ return;
+
+ size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long) size);
+ rbd_dev->mapping.size = (u64) size;
+ set_capacity(rbd_dev->disk, size);
}
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
{
int ret;
struct rbd_image_header h;
@@ -1730,17 +1832,9 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
down_write(&rbd_dev->header_rwsem);
- /* resized? */
- if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) {
- sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
-
- if (size != (sector_t) rbd_dev->mapping.size) {
- dout("setting size to %llu sectors",
- (unsigned long long) size);
- rbd_dev->mapping.size = (u64) size;
- set_capacity(rbd_dev->disk, size);
- }
- }
+ /* Update image size, and check for resize of mapped image */
+ rbd_dev->header.image_size = h.image_size;
+ rbd_update_mapping_size(rbd_dev);
/* rbd_dev->header.object_prefix shouldn't change */
kfree(rbd_dev->header.snap_sizes);
@@ -1768,12 +1862,16 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
return ret;
}
-static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
{
int ret;
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- ret = __rbd_refresh_header(rbd_dev, hver);
+ if (rbd_dev->image_format == 1)
+ ret = rbd_dev_v1_refresh(rbd_dev, hver);
+ else
+ ret = rbd_dev_v2_refresh(rbd_dev, hver);
mutex_unlock(&ctl_mutex);
return ret;
@@ -1885,7 +1983,7 @@ static ssize_t rbd_pool_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->pool_name);
+ return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
}
static ssize_t rbd_pool_id_show(struct device *dev,
@@ -1893,7 +1991,8 @@ static ssize_t rbd_pool_id_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%d\n", rbd_dev->pool_id);
+ return sprintf(buf, "%llu\n",
+ (unsigned long long) rbd_dev->spec->pool_id);
}
static ssize_t rbd_name_show(struct device *dev,
@@ -1901,7 +2000,10 @@ static ssize_t rbd_name_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->image_name);
+ if (rbd_dev->spec->image_name)
+ return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
+
+ return sprintf(buf, "(unknown)\n");
}
static ssize_t rbd_image_id_show(struct device *dev,
@@ -1909,7 +2011,7 @@ static ssize_t rbd_image_id_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->image_id);
+ return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
}
/*
@@ -1922,7 +2024,50 @@ static ssize_t rbd_snap_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
+ return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
+}
+
+/*
+ * For an rbd v2 image, shows the pool id, image id, and snapshot id
+ * for the parent image. If there is no parent, simply shows
+ * "(no parent image)".
+ */
+static ssize_t rbd_parent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ struct rbd_spec *spec = rbd_dev->parent_spec;
+ int count;
+ char *bufp = buf;
+
+ if (!spec)
+ return sprintf(buf, "(no parent image)\n");
+
+ count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
+ (unsigned long long) spec->pool_id, spec->pool_name);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
+ spec->image_name ? spec->image_name : "(unknown)");
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
+ (unsigned long long) spec->snap_id, spec->snap_name);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ return (ssize_t) (bufp - buf);
}
static ssize_t rbd_image_refresh(struct device *dev,
@@ -1933,7 +2078,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
- ret = rbd_refresh_header(rbd_dev, NULL);
+ ret = rbd_dev_refresh(rbd_dev, NULL);
return ret < 0 ? ret : size;
}
@@ -1948,6 +2093,7 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
+static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1959,6 +2105,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
+ &dev_attr_parent.attr,
&dev_attr_refresh.attr,
NULL
};
@@ -2047,6 +2194,74 @@ static struct device_type rbd_snap_device_type = {
.release = rbd_snap_dev_release,
};
+static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
+{
+ kref_get(&spec->kref);
+
+ return spec;
+}
+
+static void rbd_spec_free(struct kref *kref);
+static void rbd_spec_put(struct rbd_spec *spec)
+{
+ if (spec)
+ kref_put(&spec->kref, rbd_spec_free);
+}
+
+static struct rbd_spec *rbd_spec_alloc(void)
+{
+ struct rbd_spec *spec;
+
+ spec = kzalloc(sizeof (*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+ kref_init(&spec->kref);
+
+ rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
+
+ return spec;
+}
+
+static void rbd_spec_free(struct kref *kref)
+{
+ struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
+
+ kfree(spec->pool_name);
+ kfree(spec->image_id);
+ kfree(spec->image_name);
+ kfree(spec->snap_name);
+ kfree(spec);
+}
+
+struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ struct rbd_spec *spec)
+{
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
+ if (!rbd_dev)
+ return NULL;
+
+ spin_lock_init(&rbd_dev->lock);
+ INIT_LIST_HEAD(&rbd_dev->node);
+ INIT_LIST_HEAD(&rbd_dev->snaps);
+ init_rwsem(&rbd_dev->header_rwsem);
+
+ rbd_dev->spec = spec;
+ rbd_dev->rbd_client = rbdc;
+
+ return rbd_dev;
+}
+
+static void rbd_dev_destroy(struct rbd_device *rbd_dev)
+{
+ rbd_spec_put(rbd_dev->parent_spec);
+ kfree(rbd_dev->header_name);
+ rbd_put_client(rbd_dev->rbd_client);
+ rbd_spec_put(rbd_dev->spec);
+ kfree(rbd_dev);
+}
+
static bool rbd_snap_registered(struct rbd_snap *snap)
{
bool ret = snap->dev.type == &rbd_snap_device_type;
@@ -2057,7 +2272,7 @@ static bool rbd_snap_registered(struct rbd_snap *snap)
return ret;
}
-static void __rbd_remove_snap_dev(struct rbd_snap *snap)
+static void rbd_remove_snap_dev(struct rbd_snap *snap)
{
list_del(&snap->node);
if (device_is_registered(&snap->dev))
@@ -2073,7 +2288,7 @@ static int rbd_register_snap_dev(struct rbd_snap *snap,
dev->type = &rbd_snap_device_type;
dev->parent = parent;
dev->release = rbd_snap_dev_release;
- dev_set_name(dev, "snap_%s", snap->name);
+ dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
dout("%s: registering device for snapshot %s\n", __func__, snap->name);
ret = device_register(dev);
@@ -2189,6 +2404,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
goto out;
+ ret = 0; /* rbd_req_sync_exec() can return positive */
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
@@ -2216,6 +2432,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
__le64 features;
__le64 incompat;
} features_buf = { 0 };
+ u64 incompat;
int ret;
ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
@@ -2226,6 +2443,11 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
return ret;
+
+ incompat = le64_to_cpu(features_buf.incompat);
+ if (incompat & ~RBD_FEATURES_ALL)
+ return -ENXIO;
+
*snap_features = le64_to_cpu(features_buf.features);
dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
@@ -2242,6 +2464,183 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
&rbd_dev->header.features);
}
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+{
+ struct rbd_spec *parent_spec;
+ size_t size;
+ void *reply_buf = NULL;
+ __le64 snapid;
+ void *p;
+ void *end;
+ char *image_id;
+ u64 overlap;
+ size_t len = 0;
+ int ret;
+
+ parent_spec = rbd_spec_alloc();
+ if (!parent_spec)
+ return -ENOMEM;
+
+ size = sizeof (__le64) + /* pool_id */
+ sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
+ sizeof (__le64) + /* snap_id */
+ sizeof (__le64); /* overlap */
+ reply_buf = kmalloc(size, GFP_KERNEL);
+ if (!reply_buf) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ snapid = cpu_to_le64(CEPH_NOSNAP);
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_parent",
+ (char *) &snapid, sizeof (snapid),
+ (char *) reply_buf, size,
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ goto out_err;
+
+ ret = -ERANGE;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
+ if (parent_spec->pool_id == CEPH_NOPOOL)
+ goto out; /* No parent? No problem. */
+
+ image_id = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ if (IS_ERR(image_id)) {
+ ret = PTR_ERR(image_id);
+ goto out_err;
+ }
+ parent_spec->image_id = image_id;
+ parent_spec->image_id_len = len;
+ ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
+ ceph_decode_64_safe(&p, end, overlap, out_err);
+
+ rbd_dev->parent_overlap = overlap;
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
+out:
+ ret = 0;
+out_err:
+ kfree(reply_buf);
+ rbd_spec_put(parent_spec);
+
+ return ret;
+}
+
+static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
+{
+ size_t image_id_size;
+ char *image_id;
+ void *p;
+ void *end;
+ size_t size;
+ void *reply_buf = NULL;
+ size_t len = 0;
+ char *image_name = NULL;
+ int ret;
+
+ rbd_assert(!rbd_dev->spec->image_name);
+
+ image_id_size = sizeof (__le32) + rbd_dev->spec->image_id_len;
+ image_id = kmalloc(image_id_size, GFP_KERNEL);
+ if (!image_id)
+ return NULL;
+
+ p = image_id;
+ end = (char *) image_id + image_id_size;
+ ceph_encode_string(&p, end, rbd_dev->spec->image_id,
+ (u32) rbd_dev->spec->image_id_len);
+
+ size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
+ reply_buf = kmalloc(size, GFP_KERNEL);
+ if (!reply_buf)
+ goto out;
+
+ ret = rbd_req_sync_exec(rbd_dev, RBD_DIRECTORY,
+ "rbd", "dir_get_name",
+ image_id, image_id_size,
+ (char *) reply_buf, size,
+ CEPH_OSD_FLAG_READ, NULL);
+ if (ret < 0)
+ goto out;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ if (IS_ERR(image_name))
+ image_name = NULL;
+ else
+ dout("%s: name is %s len is %zd\n", __func__, image_name, len);
+out:
+ kfree(reply_buf);
+ kfree(image_id);
+
+ return image_name;
+}
+
+/*
+ * When a parent image gets probed, we only have the pool, image,
+ * and snapshot ids but not the names of any of them. This call
+ * is made later to fill in those names. It has to be done after
+ * rbd_dev_snaps_update() has completed because some of the
+ * information (in particular, snapshot name) is not available
+ * until then.
+ */
+static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc;
+ const char *name;
+ void *reply_buf = NULL;
+ int ret;
+
+ if (rbd_dev->spec->pool_name)
+ return 0; /* Already have the names */
+
+ /* Look up the pool name */
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
+ if (!name)
+ return -EIO; /* pool id too large (>= 2^31) */
+
+ rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
+ if (!rbd_dev->spec->pool_name)
+ return -ENOMEM;
+
+ /* Fetch the image name; tolerate failure here */
+
+ name = rbd_dev_image_name(rbd_dev);
+ if (name) {
+ rbd_dev->spec->image_name_len = strlen(name);
+ rbd_dev->spec->image_name = (char *) name;
+ } else {
+ pr_warning(RBD_DRV_NAME "%d "
+ "unable to get image name for image id %s\n",
+ rbd_dev->major, rbd_dev->spec->image_id);
+ }
+
+ /* Look up the snapshot name. */
+
+ name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
+ if (!name) {
+ ret = -EIO;
+ goto out_err;
+ }
+ rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
+ if(!rbd_dev->spec->snap_name)
+ goto out_err;
+
+ return 0;
+out_err:
+ kfree(reply_buf);
+ kfree(rbd_dev->spec->pool_name);
+ rbd_dev->spec->pool_name = NULL;
+
+ return ret;
+}
+
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
{
size_t size;
@@ -2328,7 +2727,6 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
int ret;
void *p;
void *end;
- size_t snap_name_len;
char *snap_name;
size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
@@ -2348,9 +2746,7 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
p = reply_buf;
end = (char *) reply_buf + size;
- snap_name_len = 0;
- snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len,
- GFP_KERNEL);
+ snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(snap_name)) {
ret = PTR_ERR(snap_name);
goto out;
@@ -2397,6 +2793,41 @@ static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
return ERR_PTR(-EINVAL);
}
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
+{
+ int ret;
+ __u8 obj_order;
+
+ down_write(&rbd_dev->header_rwsem);
+
+ /* Grab old order first, to see if it changes */
+
+ obj_order = rbd_dev->header.obj_order,
+ ret = rbd_dev_v2_image_size(rbd_dev);
+ if (ret)
+ goto out;
+ if (rbd_dev->header.obj_order != obj_order) {
+ ret = -EIO;
+ goto out;
+ }
+ rbd_update_mapping_size(rbd_dev);
+
+ ret = rbd_dev_v2_snap_context(rbd_dev, hver);
+ dout("rbd_dev_v2_snap_context returned %d\n", ret);
+ if (ret)
+ goto out;
+ ret = rbd_dev_snaps_update(rbd_dev);
+ dout("rbd_dev_snaps_update returned %d\n", ret);
+ if (ret)
+ goto out;
+ ret = rbd_dev_snaps_register(rbd_dev);
+ dout("rbd_dev_snaps_register returned %d\n", ret);
+out:
+ up_write(&rbd_dev->header_rwsem);
+
+ return ret;
+}
+
/*
* Scan the rbd device's current snapshot list and compare it to the
* newly-received snapshot context. Remove any existing snapshots
@@ -2436,12 +2867,12 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
/* Existing snapshot not in the new snap context */
- if (rbd_dev->mapping.snap_id == snap->id)
- rbd_dev->mapping.snap_exists = false;
- __rbd_remove_snap_dev(snap);
+ if (rbd_dev->spec->snap_id == snap->id)
+ rbd_dev->exists = false;
+ rbd_remove_snap_dev(snap);
dout("%ssnap id %llu has been removed\n",
- rbd_dev->mapping.snap_id == snap->id ?
- "mapped " : "",
+ rbd_dev->spec->snap_id == snap->id ?
+ "mapped " : "",
(unsigned long long) snap->id);
/* Done with this list entry; advance */
@@ -2559,7 +2990,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
do {
ret = rbd_req_sync_watch(rbd_dev);
if (ret == -ERANGE) {
- rc = rbd_refresh_header(rbd_dev, NULL);
+ rc = rbd_dev_refresh(rbd_dev, NULL);
if (rc < 0)
return rc;
}
@@ -2621,8 +3052,8 @@ static void rbd_dev_id_put(struct rbd_device *rbd_dev)
struct rbd_device *rbd_dev;
rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_id > max_id)
- max_id = rbd_id;
+ if (rbd_dev->dev_id > max_id)
+ max_id = rbd_dev->dev_id;
}
spin_unlock(&rbd_dev_list_lock);
@@ -2722,73 +3153,140 @@ static inline char *dup_token(const char **buf, size_t *lenp)
}
/*
- * This fills in the pool_name, image_name, image_name_len, rbd_dev,
- * rbd_md_name, and name fields of the given rbd_dev, based on the
- * list of monitor addresses and other options provided via
- * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated
- * copy of the snapshot name to map if successful, or a
- * pointer-coded error otherwise.
+ * Parse the options provided for an "rbd add" (i.e., rbd image
+ * mapping) request. These arrive via a write to /sys/bus/rbd/add,
+ * and the data written is passed here via a NUL-terminated buffer.
+ * Returns 0 if successful or an error code otherwise.
+ *
+ * The information extracted from these options is recorded in
+ * the other parameters which return dynamically-allocated
+ * structures:
+ * ceph_opts
+ * The address of a pointer that will refer to a ceph options
+ * structure. Caller must release the returned pointer using
+ * ceph_destroy_options() when it is no longer needed.
+ * rbd_opts
+ * Address of an rbd options pointer. Fully initialized by
+ * this function; caller must release with kfree().
+ * spec
+ * Address of an rbd image specification pointer. Fully
+ * initialized by this function based on parsed options.
+ * Caller must release with rbd_spec_put().
*
- * Note: rbd_dev is assumed to have been initially zero-filled.
+ * The options passed take this form:
+ * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
+ * where:
+ * <mon_addrs>
+ * A comma-separated list of one or more monitor addresses.
+ * A monitor address is an ip address, optionally followed
+ * by a port number (separated by a colon).
+ * I.e.: ip1[:port1][,ip2[:port2]...]
+ * <options>
+ * A comma-separated list of ceph and/or rbd options.
+ * <pool_name>
+ * The name of the rados pool containing the rbd image.
+ * <image_name>
+ * The name of the image in that pool to map.
+ * <snap_id>
+ * An optional snapshot id. If provided, the mapping will
+ * present data from the image at the time that snapshot was
+ * created. The image head is used if no snapshot id is
+ * provided. Snapshot mappings are always read-only.
*/
-static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
- const char *buf,
- const char **mon_addrs,
- size_t *mon_addrs_size,
- char *options,
- size_t options_size)
+static int rbd_add_parse_args(const char *buf,
+ struct ceph_options **ceph_opts,
+ struct rbd_options **opts,
+ struct rbd_spec **rbd_spec)
{
size_t len;
- char *err_ptr = ERR_PTR(-EINVAL);
- char *snap_name;
+ char *options;
+ const char *mon_addrs;
+ size_t mon_addrs_size;
+ struct rbd_spec *spec = NULL;
+ struct rbd_options *rbd_opts = NULL;
+ struct ceph_options *copts;
+ int ret;
/* The first four tokens are required */
len = next_token(&buf);
if (!len)
- return err_ptr;
- *mon_addrs_size = len + 1;
- *mon_addrs = buf;
-
+ return -EINVAL; /* Missing monitor address(es) */
+ mon_addrs = buf;
+ mon_addrs_size = len + 1;
buf += len;
- len = copy_token(&buf, options, options_size);
- if (!len || len >= options_size)
- return err_ptr;
+ ret = -EINVAL;
+ options = dup_token(&buf, NULL);
+ if (!options)
+ return -ENOMEM;
+ if (!*options)
+ goto out_err; /* Missing options */
- err_ptr = ERR_PTR(-ENOMEM);
- rbd_dev->pool_name = dup_token(&buf, NULL);
- if (!rbd_dev->pool_name)
- goto out_err;
+ spec = rbd_spec_alloc();
+ if (!spec)
+ goto out_mem;
- rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
- if (!rbd_dev->image_name)
- goto out_err;
+ spec->pool_name = dup_token(&buf, NULL);
+ if (!spec->pool_name)
+ goto out_mem;
+ if (!*spec->pool_name)
+ goto out_err; /* Missing pool name */
- /* Snapshot name is optional */
+ spec->image_name = dup_token(&buf, &spec->image_name_len);
+ if (!spec->image_name)
+ goto out_mem;
+ if (!*spec->image_name)
+ goto out_err; /* Missing image name */
+
+ /*
+ * Snapshot name is optional; default is to use "-"
+ * (indicating the head/no snapshot).
+ */
len = next_token(&buf);
if (!len) {
buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
- }
- snap_name = kmalloc(len + 1, GFP_KERNEL);
- if (!snap_name)
+ } else if (len > RBD_MAX_SNAP_NAME_LEN) {
+ ret = -ENAMETOOLONG;
goto out_err;
- memcpy(snap_name, buf, len);
- *(snap_name + len) = '\0';
+ }
+ spec->snap_name = kmalloc(len + 1, GFP_KERNEL);
+ if (!spec->snap_name)
+ goto out_mem;
+ memcpy(spec->snap_name, buf, len);
+ *(spec->snap_name + len) = '\0';
-dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
+ /* Initialize all rbd options to the defaults */
- return snap_name;
+ rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
+ if (!rbd_opts)
+ goto out_mem;
+
+ rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
+
+ copts = ceph_parse_options(options, mon_addrs,
+ mon_addrs + mon_addrs_size - 1,
+ parse_rbd_opts_token, rbd_opts);
+ if (IS_ERR(copts)) {
+ ret = PTR_ERR(copts);
+ goto out_err;
+ }
+ kfree(options);
+ *ceph_opts = copts;
+ *opts = rbd_opts;
+ *rbd_spec = spec;
+
+ return 0;
+out_mem:
+ ret = -ENOMEM;
out_err:
- kfree(rbd_dev->image_name);
- rbd_dev->image_name = NULL;
- rbd_dev->image_name_len = 0;
- kfree(rbd_dev->pool_name);
- rbd_dev->pool_name = NULL;
+ kfree(rbd_opts);
+ rbd_spec_put(spec);
+ kfree(options);
- return err_ptr;
+ return ret;
}
/*
@@ -2814,14 +3312,22 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
void *p;
/*
+ * When probing a parent image, the image id is already
+ * known (and the image name likely is not). There's no
+ * need to fetch the image id again in this case.
+ */
+ if (rbd_dev->spec->image_id)
+ return 0;
+
+ /*
* First, see if the format 2 image id file exists, and if
* so, get the image's persistent id from it.
*/
- size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len;
+ size = sizeof (RBD_ID_PREFIX) + rbd_dev->spec->image_name_len;
object_name = kmalloc(size, GFP_NOIO);
if (!object_name)
return -ENOMEM;
- sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name);
+ sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
dout("rbd id object name is %s\n", object_name);
/* Response will be an encoded string, which includes a length */
@@ -2841,17 +3347,18 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
goto out;
+ ret = 0; /* rbd_req_sync_exec() can return positive */
p = response;
- rbd_dev->image_id = ceph_extract_encoded_string(&p,
+ rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
p + RBD_IMAGE_ID_LEN_MAX,
- &rbd_dev->image_id_len,
+ &rbd_dev->spec->image_id_len,
GFP_NOIO);
- if (IS_ERR(rbd_dev->image_id)) {
- ret = PTR_ERR(rbd_dev->image_id);
- rbd_dev->image_id = NULL;
+ if (IS_ERR(rbd_dev->spec->image_id)) {
+ ret = PTR_ERR(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
} else {
- dout("image_id is %s\n", rbd_dev->image_id);
+ dout("image_id is %s\n", rbd_dev->spec->image_id);
}
out:
kfree(response);
@@ -2867,26 +3374,33 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
/* Version 1 images have no id; empty string is used */
- rbd_dev->image_id = kstrdup("", GFP_KERNEL);
- if (!rbd_dev->image_id)
+ rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
+ if (!rbd_dev->spec->image_id)
return -ENOMEM;
- rbd_dev->image_id_len = 0;
+ rbd_dev->spec->image_id_len = 0;
/* Record the header object name for this rbd image. */
- size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX);
+ size = rbd_dev->spec->image_name_len + sizeof (RBD_SUFFIX);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name) {
ret = -ENOMEM;
goto out_err;
}
- sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
+ sprintf(rbd_dev->header_name, "%s%s",
+ rbd_dev->spec->image_name, RBD_SUFFIX);
/* Populate rbd image metadata */
ret = rbd_read_header(rbd_dev, &rbd_dev->header);
if (ret < 0)
goto out_err;
+
+ /* Version 1 images have no parent (no layering) */
+
+ rbd_dev->parent_spec = NULL;
+ rbd_dev->parent_overlap = 0;
+
rbd_dev->image_format = 1;
dout("discovered version 1 image, header name is %s\n",
@@ -2897,8 +3411,8 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
out_err:
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
- kfree(rbd_dev->image_id);
- rbd_dev->image_id = NULL;
+ kfree(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
return ret;
}
@@ -2913,12 +3427,12 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
* Image id was filled in by the caller. Record the header
* object name for this rbd image.
*/
- size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len;
+ size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->spec->image_id_len;
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name)
return -ENOMEM;
sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, rbd_dev->image_id);
+ RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
/* Get the size and object order for the image */
@@ -2932,12 +3446,20 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
if (ret < 0)
goto out_err;
- /* Get the features for the image */
+ /* Get the and check features for the image */
ret = rbd_dev_v2_features(rbd_dev);
if (ret < 0)
goto out_err;
+ /* If the image supports layering, get the parent info */
+
+ if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
+ ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret < 0)
+ goto out_err;
+ }
+
/* crypto and compression type aren't (yet) supported for v2 images */
rbd_dev->header.crypt_type = 0;
@@ -2955,8 +3477,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
dout("discovered version 2 image, header name is %s\n",
rbd_dev->header_name);
- return -ENOTSUPP;
+ return 0;
out_err:
+ rbd_dev->parent_overlap = 0;
+ rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev->parent_spec = NULL;
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
kfree(rbd_dev->header.object_prefix);
@@ -2965,91 +3490,22 @@ out_err:
return ret;
}
-/*
- * Probe for the existence of the header object for the given rbd
- * device. For format 2 images this includes determining the image
- * id.
- */
-static int rbd_dev_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
{
int ret;
- /*
- * Get the id from the image id object. If it's not a
- * format 2 image, we'll get ENOENT back, and we'll assume
- * it's a format 1 image.
- */
- ret = rbd_dev_image_id(rbd_dev);
- if (ret)
- ret = rbd_dev_v1_probe(rbd_dev);
- else
- ret = rbd_dev_v2_probe(rbd_dev);
+ /* no need to lock here, as rbd_dev is not registered yet */
+ ret = rbd_dev_snaps_update(rbd_dev);
if (ret)
- dout("probe failed, returning %d\n", ret);
-
- return ret;
-}
-
-static ssize_t rbd_add(struct bus_type *bus,
- const char *buf,
- size_t count)
-{
- char *options;
- struct rbd_device *rbd_dev = NULL;
- const char *mon_addrs = NULL;
- size_t mon_addrs_size = 0;
- struct ceph_osd_client *osdc;
- int rc = -ENOMEM;
- char *snap_name;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- options = kmalloc(count, GFP_KERNEL);
- if (!options)
- goto err_out_mem;
- rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
- if (!rbd_dev)
- goto err_out_mem;
-
- /* static rbd_device initialization */
- spin_lock_init(&rbd_dev->lock);
- INIT_LIST_HEAD(&rbd_dev->node);
- INIT_LIST_HEAD(&rbd_dev->snaps);
- init_rwsem(&rbd_dev->header_rwsem);
-
- /* parse add command */
- snap_name = rbd_add_parse_args(rbd_dev, buf,
- &mon_addrs, &mon_addrs_size, options, count);
- if (IS_ERR(snap_name)) {
- rc = PTR_ERR(snap_name);
- goto err_out_mem;
- }
-
- rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options);
- if (rc < 0)
- goto err_out_args;
-
- /* pick the pool */
- osdc = &rbd_dev->rbd_client->client->osdc;
- rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
- if (rc < 0)
- goto err_out_client;
- rbd_dev->pool_id = rc;
-
- rc = rbd_dev_probe(rbd_dev);
- if (rc < 0)
- goto err_out_client;
- rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ return ret;
- /* no need to lock here, as rbd_dev is not registered yet */
- rc = rbd_dev_snaps_update(rbd_dev);
- if (rc)
- goto err_out_header;
+ ret = rbd_dev_probe_update_spec(rbd_dev);
+ if (ret)
+ goto err_out_snaps;
- rc = rbd_dev_set_mapping(rbd_dev, snap_name);
- if (rc)
- goto err_out_header;
+ ret = rbd_dev_set_mapping(rbd_dev);
+ if (ret)
+ goto err_out_snaps;
/* generate unique id: find highest unique id, add one */
rbd_dev_id_get(rbd_dev);
@@ -3061,34 +3517,33 @@ static ssize_t rbd_add(struct bus_type *bus,
/* Get our block major device number. */
- rc = register_blkdev(0, rbd_dev->name);
- if (rc < 0)
+ ret = register_blkdev(0, rbd_dev->name);
+ if (ret < 0)
goto err_out_id;
- rbd_dev->major = rc;
+ rbd_dev->major = ret;
/* Set up the blkdev mapping. */
- rc = rbd_init_disk(rbd_dev);
- if (rc)
+ ret = rbd_init_disk(rbd_dev);
+ if (ret)
goto err_out_blkdev;
- rc = rbd_bus_add_dev(rbd_dev);
- if (rc)
+ ret = rbd_bus_add_dev(rbd_dev);
+ if (ret)
goto err_out_disk;
/*
* At this point cleanup in the event of an error is the job
* of the sysfs code (initiated by rbd_bus_del_dev()).
*/
-
down_write(&rbd_dev->header_rwsem);
- rc = rbd_dev_snaps_register(rbd_dev);
+ ret = rbd_dev_snaps_register(rbd_dev);
up_write(&rbd_dev->header_rwsem);
- if (rc)
+ if (ret)
goto err_out_bus;
- rc = rbd_init_watch_dev(rbd_dev);
- if (rc)
+ ret = rbd_init_watch_dev(rbd_dev);
+ if (ret)
goto err_out_bus;
/* Everything's ready. Announce the disk to the world. */
@@ -3098,37 +3553,119 @@ static ssize_t rbd_add(struct bus_type *bus,
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
- return count;
-
+ return ret;
err_out_bus:
/* this will also clean up rest of rbd_dev stuff */
rbd_bus_del_dev(rbd_dev);
- kfree(options);
- return rc;
+ return ret;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
-err_out_header:
- rbd_header_free(&rbd_dev->header);
+err_out_snaps:
+ rbd_remove_all_snaps(rbd_dev);
+
+ return ret;
+}
+
+/*
+ * Probe for the existence of the header object for the given rbd
+ * device. For format 2 images this includes determining the image
+ * id.
+ */
+static int rbd_dev_probe(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ /*
+ * Get the id from the image id object. If it's not a
+ * format 2 image, we'll get ENOENT back, and we'll assume
+ * it's a format 1 image.
+ */
+ ret = rbd_dev_image_id(rbd_dev);
+ if (ret)
+ ret = rbd_dev_v1_probe(rbd_dev);
+ else
+ ret = rbd_dev_v2_probe(rbd_dev);
+ if (ret) {
+ dout("probe failed, returning %d\n", ret);
+
+ return ret;
+ }
+
+ ret = rbd_dev_probe_finish(rbd_dev);
+ if (ret)
+ rbd_header_free(&rbd_dev->header);
+
+ return ret;
+}
+
+static ssize_t rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ struct rbd_device *rbd_dev = NULL;
+ struct ceph_options *ceph_opts = NULL;
+ struct rbd_options *rbd_opts = NULL;
+ struct rbd_spec *spec = NULL;
+ struct rbd_client *rbdc;
+ struct ceph_osd_client *osdc;
+ int rc = -ENOMEM;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ /* parse add command */
+ rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
+ if (rc < 0)
+ goto err_out_module;
+
+ rbdc = rbd_get_client(ceph_opts);
+ if (IS_ERR(rbdc)) {
+ rc = PTR_ERR(rbdc);
+ goto err_out_args;
+ }
+ ceph_opts = NULL; /* rbd_dev client now owns this */
+
+ /* pick the pool */
+ osdc = &rbdc->client->osdc;
+ rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
+ if (rc < 0)
+ goto err_out_client;
+ spec->pool_id = (u64) rc;
+
+ rbd_dev = rbd_dev_create(rbdc, spec);
+ if (!rbd_dev)
+ goto err_out_client;
+ rbdc = NULL; /* rbd_dev now owns this */
+ spec = NULL; /* rbd_dev now owns this */
+
+ rbd_dev->mapping.read_only = rbd_opts->read_only;
+ kfree(rbd_opts);
+ rbd_opts = NULL; /* done with this */
+
+ rc = rbd_dev_probe(rbd_dev);
+ if (rc < 0)
+ goto err_out_rbd_dev;
+
+ return count;
+err_out_rbd_dev:
+ rbd_dev_destroy(rbd_dev);
err_out_client:
- kfree(rbd_dev->header_name);
- rbd_put_client(rbd_dev);
- kfree(rbd_dev->image_id);
+ rbd_put_client(rbdc);
err_out_args:
- kfree(rbd_dev->mapping.snap_name);
- kfree(rbd_dev->image_name);
- kfree(rbd_dev->pool_name);
-err_out_mem:
- kfree(rbd_dev);
- kfree(options);
+ if (ceph_opts)
+ ceph_destroy_options(ceph_opts);
+ kfree(rbd_opts);
+ rbd_spec_put(spec);
+err_out_module:
+ module_put(THIS_MODULE);
dout("Error adding device %s\n", buf);
- module_put(THIS_MODULE);
return (ssize_t) rc;
}
@@ -3163,7 +3700,6 @@ static void rbd_dev_release(struct device *dev)
if (rbd_dev->watch_event)
rbd_req_sync_unwatch(rbd_dev);
- rbd_put_client(rbd_dev);
/* clean up and free blkdev */
rbd_free_disk(rbd_dev);
@@ -3173,13 +3709,9 @@ static void rbd_dev_release(struct device *dev)
rbd_header_free(&rbd_dev->header);
/* done with the id, and with the rbd_dev */
- kfree(rbd_dev->mapping.snap_name);
- kfree(rbd_dev->image_id);
- kfree(rbd_dev->header_name);
- kfree(rbd_dev->pool_name);
- kfree(rbd_dev->image_name);
rbd_dev_id_put(rbd_dev);
- kfree(rbd_dev);
+ rbd_assert(rbd_dev->rbd_client != NULL);
+ rbd_dev_destroy(rbd_dev);
/* release module ref */
module_put(THIS_MODULE);
@@ -3211,7 +3743,12 @@ static ssize_t rbd_remove(struct bus_type *bus,
goto done;
}
- __rbd_remove_all_snaps(rbd_dev);
+ if (rbd_dev->open_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ rbd_remove_all_snaps(rbd_dev);
rbd_bus_del_dev(rbd_dev);
done:
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index cbe77fa105b..49d77cbcf8b 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -46,8 +46,6 @@
#define RBD_MIN_OBJ_ORDER 16
#define RBD_MAX_OBJ_ORDER 30
-#define RBD_MAX_SEG_NAME_LEN 128
-
#define RBD_COMP_NONE 0
#define RBD_CRYPT_NONE 0
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 280a13846e6..74374fb762a 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,6 +39,7 @@
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/bitmap.h>
#include <xen/events.h>
#include <xen/page.h>
@@ -79,6 +80,7 @@ struct pending_req {
unsigned short operation;
int status;
struct list_head free_list;
+ DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
};
#define BLKBACK_INVALID_HANDLE (~0)
@@ -99,6 +101,36 @@ struct xen_blkbk {
static struct xen_blkbk *blkbk;
/*
+ * Maximum number of grant pages that can be mapped in blkback.
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of
+ * pages that blkback will persistently map.
+ * Currently, this is:
+ * RING_SIZE = 32 (for all known ring types)
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
+ * sizeof(struct persistent_gnt) = 48
+ * So the maximum memory used to store the grants is:
+ * 32 * 11 * 48 = 16896 bytes
+ */
+static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol)
+{
+ switch (protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ return __CONST_RING_SIZE(blkif, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ case BLKIF_PROTOCOL_X86_32:
+ return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ case BLKIF_PROTOCOL_X86_64:
+ return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+
+/*
* Little helpful macro to figure out the index and virtual address of the
* pending_pages[..]. For each 'pending_req' we have have up to
* BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
@@ -129,6 +161,90 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st);
+#define foreach_grant(pos, rbtree, node) \
+ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
+ &(pos)->node != NULL; \
+ (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
+
+
+static void add_persistent_gnt(struct rb_root *root,
+ struct persistent_gnt *persistent_gnt)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct persistent_gnt *this;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ this = container_of(*new, struct persistent_gnt, node);
+
+ parent = *new;
+ if (persistent_gnt->gnt < this->gnt)
+ new = &((*new)->rb_left);
+ else if (persistent_gnt->gnt > this->gnt)
+ new = &((*new)->rb_right);
+ else {
+ pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n");
+ BUG();
+ }
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&(persistent_gnt->node), parent, new);
+ rb_insert_color(&(persistent_gnt->node), root);
+}
+
+static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
+ grant_ref_t gref)
+{
+ struct persistent_gnt *data;
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ data = container_of(node, struct persistent_gnt, node);
+
+ if (gref < data->gnt)
+ node = node->rb_left;
+ else if (gref > data->gnt)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void free_persistent_gnts(struct rb_root *root, unsigned int num)
+{
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct persistent_gnt *persistent_gnt;
+ int ret = 0;
+ int segs_to_unmap = 0;
+
+ foreach_grant(persistent_gnt, root, node) {
+ BUG_ON(persistent_gnt->handle ==
+ BLKBACK_INVALID_HANDLE);
+ gnttab_set_unmap_op(&unmap[segs_to_unmap],
+ (unsigned long) pfn_to_kaddr(page_to_pfn(
+ persistent_gnt->page)),
+ GNTMAP_host_map,
+ persistent_gnt->handle);
+
+ pages[segs_to_unmap] = persistent_gnt->page;
+ rb_erase(&persistent_gnt->node, root);
+ kfree(persistent_gnt);
+ num--;
+
+ if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
+ !rb_next(&persistent_gnt->node)) {
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
+ BUG_ON(ret);
+ segs_to_unmap = 0;
+ }
+ }
+ BUG_ON(num != 0);
+}
+
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
@@ -302,6 +418,14 @@ int xen_blkif_schedule(void *arg)
print_stats(blkif);
}
+ /* Free all persistent grant pages */
+ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
+ free_persistent_gnts(&blkif->persistent_gnts,
+ blkif->persistent_gnt_c);
+
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+ blkif->persistent_gnt_c = 0;
+
if (log_stats)
print_stats(blkif);
@@ -328,6 +452,8 @@ static void xen_blkbk_unmap(struct pending_req *req)
int ret;
for (i = 0; i < req->nr_pages; i++) {
+ if (!test_bit(i, req->unmap_seg))
+ continue;
handle = pending_handle(req, i);
if (handle == BLKBACK_INVALID_HANDLE)
continue;
@@ -344,12 +470,26 @@ static void xen_blkbk_unmap(struct pending_req *req)
static int xen_blkbk_map(struct blkif_request *req,
struct pending_req *pending_req,
- struct seg_buf seg[])
+ struct seg_buf seg[],
+ struct page *pages[])
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- int i;
+ struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct persistent_gnt *persistent_gnt = NULL;
+ struct xen_blkif *blkif = pending_req->blkif;
+ phys_addr_t addr = 0;
+ int i, j;
+ bool new_map;
int nseg = req->u.rw.nr_segments;
+ int segs_to_map = 0;
int ret = 0;
+ int use_persistent_gnts;
+
+ use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
+
+ BUG_ON(blkif->persistent_gnt_c >
+ max_mapped_grant_pages(pending_req->blkif->blk_protocol));
/*
* Fill out preq.nr_sects with proper amount of sectors, and setup
@@ -359,36 +499,146 @@ static int xen_blkbk_map(struct blkif_request *req,
for (i = 0; i < nseg; i++) {
uint32_t flags;
- flags = GNTMAP_host_map;
- if (pending_req->operation != BLKIF_OP_READ)
- flags |= GNTMAP_readonly;
- gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
- req->u.rw.seg[i].gref,
- pending_req->blkif->domid);
+ if (use_persistent_gnts)
+ persistent_gnt = get_persistent_gnt(
+ &blkif->persistent_gnts,
+ req->u.rw.seg[i].gref);
+
+ if (persistent_gnt) {
+ /*
+ * We are using persistent grants and
+ * the grant is already mapped
+ */
+ new_map = false;
+ } else if (use_persistent_gnts &&
+ blkif->persistent_gnt_c <
+ max_mapped_grant_pages(blkif->blk_protocol)) {
+ /*
+ * We are using persistent grants, the grant is
+ * not mapped but we have room for it
+ */
+ new_map = true;
+ persistent_gnt = kmalloc(
+ sizeof(struct persistent_gnt),
+ GFP_KERNEL);
+ if (!persistent_gnt)
+ return -ENOMEM;
+ persistent_gnt->page = alloc_page(GFP_KERNEL);
+ if (!persistent_gnt->page) {
+ kfree(persistent_gnt);
+ return -ENOMEM;
+ }
+ persistent_gnt->gnt = req->u.rw.seg[i].gref;
+ persistent_gnt->handle = BLKBACK_INVALID_HANDLE;
+
+ pages_to_gnt[segs_to_map] =
+ persistent_gnt->page;
+ addr = (unsigned long) pfn_to_kaddr(
+ page_to_pfn(persistent_gnt->page));
+
+ add_persistent_gnt(&blkif->persistent_gnts,
+ persistent_gnt);
+ blkif->persistent_gnt_c++;
+ pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
+ persistent_gnt->gnt, blkif->persistent_gnt_c,
+ max_mapped_grant_pages(blkif->blk_protocol));
+ } else {
+ /*
+ * We are either using persistent grants and
+ * hit the maximum limit of grants mapped,
+ * or we are not using persistent grants.
+ */
+ if (use_persistent_gnts &&
+ !blkif->vbd.overflow_max_grants) {
+ blkif->vbd.overflow_max_grants = 1;
+ pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
+ blkif->domid, blkif->vbd.handle);
+ }
+ new_map = true;
+ pages[i] = blkbk->pending_page(pending_req, i);
+ addr = vaddr(pending_req, i);
+ pages_to_gnt[segs_to_map] =
+ blkbk->pending_page(pending_req, i);
+ }
+
+ if (persistent_gnt) {
+ pages[i] = persistent_gnt->page;
+ persistent_gnts[i] = persistent_gnt;
+ } else {
+ persistent_gnts[i] = NULL;
+ }
+
+ if (new_map) {
+ flags = GNTMAP_host_map;
+ if (!persistent_gnt &&
+ (pending_req->operation != BLKIF_OP_READ))
+ flags |= GNTMAP_readonly;
+ gnttab_set_map_op(&map[segs_to_map++], addr,
+ flags, req->u.rw.seg[i].gref,
+ blkif->domid);
+ }
}
- ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
- BUG_ON(ret);
+ if (segs_to_map) {
+ ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
+ BUG_ON(ret);
+ }
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
* so that when we access vaddr(pending_req,i) it has the contents of
* the page from the other domain.
*/
- for (i = 0; i < nseg; i++) {
- if (unlikely(map[i].status != 0)) {
- pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
- map[i].handle = BLKBACK_INVALID_HANDLE;
- ret |= 1;
+ bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ for (i = 0, j = 0; i < nseg; i++) {
+ if (!persistent_gnts[i] ||
+ persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) {
+ /* This is a newly mapped grant */
+ BUG_ON(j >= segs_to_map);
+ if (unlikely(map[j].status != 0)) {
+ pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
+ map[j].handle = BLKBACK_INVALID_HANDLE;
+ ret |= 1;
+ if (persistent_gnts[i]) {
+ rb_erase(&persistent_gnts[i]->node,
+ &blkif->persistent_gnts);
+ blkif->persistent_gnt_c--;
+ kfree(persistent_gnts[i]);
+ persistent_gnts[i] = NULL;
+ }
+ }
+ }
+ if (persistent_gnts[i]) {
+ if (persistent_gnts[i]->handle ==
+ BLKBACK_INVALID_HANDLE) {
+ /*
+ * If this is a new persistent grant
+ * save the handler
+ */
+ persistent_gnts[i]->handle = map[j].handle;
+ persistent_gnts[i]->dev_bus_addr =
+ map[j++].dev_bus_addr;
+ }
+ pending_handle(pending_req, i) =
+ persistent_gnts[i]->handle;
+
+ if (ret)
+ continue;
+
+ seg[i].buf = persistent_gnts[i]->dev_bus_addr |
+ (req->u.rw.seg[i].first_sect << 9);
+ } else {
+ pending_handle(pending_req, i) = map[j].handle;
+ bitmap_set(pending_req->unmap_seg, i, 1);
+
+ if (ret) {
+ j++;
+ continue;
+ }
+
+ seg[i].buf = map[j++].dev_bus_addr |
+ (req->u.rw.seg[i].first_sect << 9);
}
-
- pending_handle(pending_req, i) = map[i].handle;
-
- if (ret)
- continue;
-
- seg[i].buf = map[i].dev_bus_addr |
- (req->u.rw.seg[i].first_sect << 9);
}
return ret;
}
@@ -591,6 +841,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
int operation;
struct blk_plug plug;
bool drain = false;
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
switch (req->operation) {
case BLKIF_OP_READ:
@@ -677,7 +928,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (xen_blkbk_map(req, pending_req, seg))
+ if (xen_blkbk_map(req, pending_req, seg, pages))
goto fail_flush;
/*
@@ -689,7 +940,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
(bio_add_page(bio,
- blkbk->pending_page(pending_req, i),
+ pages[i],
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9a54623e52d..6072390c7f5 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/io.h>
+#include <linux/rbtree.h>
#include <asm/setup.h>
#include <asm/pgalloc.h>
#include <asm/hypervisor.h>
@@ -160,10 +161,21 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ unsigned int feature_gnt_persistent:1;
+ unsigned int overflow_max_grants:1;
};
struct backend_info;
+
+struct persistent_gnt {
+ struct page *page;
+ grant_ref_t gnt;
+ grant_handle_t handle;
+ uint64_t dev_bus_addr;
+ struct rb_node node;
+};
+
struct xen_blkif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -190,6 +202,10 @@ struct xen_blkif {
struct task_struct *xenblkd;
unsigned int waiting_reqs;
+ /* tree to store persistent grants */
+ struct rb_root persistent_gnts;
+ unsigned int persistent_gnt_c;
+
/* statistics */
unsigned long st_print;
int st_rd_req;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f58434c2617..63980722db4 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -117,6 +117,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
atomic_set(&blkif->drain, 0);
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
+ blkif->persistent_gnts.rb_node = NULL;
return blkif;
}
@@ -672,6 +673,13 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
+ dev->nodename);
+ goto abort;
+ }
+
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
if (err) {
@@ -720,6 +728,7 @@ static int connect_ring(struct backend_info *be)
struct xenbus_device *dev = be->dev;
unsigned long ring_ref;
unsigned int evtchn;
+ unsigned int pers_grants;
char protocol[64] = "";
int err;
@@ -749,8 +758,18 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -1;
}
- pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
- ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "feature-persistent", "%u",
+ &pers_grants, NULL);
+ if (err)
+ pers_grants = 0;
+
+ be->blkif->vbd.feature_gnt_persistent = pers_grants;
+ be->blkif->vbd.overflow_max_grants = 0;
+
+ pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
+ ring_ref, evtchn, be->blkif->blk_protocol, protocol,
+ pers_grants ? "persistent grants" : "");
/* Map the shared frame, irq etc. */
err = xen_blkif_map(be->blkif, ring_ref, evtchn);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 007db8986e8..96e9b00db08 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -44,6 +44,7 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
+#include <linux/llist.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -64,10 +65,17 @@ enum blkif_state {
BLKIF_STATE_SUSPENDED,
};
+struct grant {
+ grant_ref_t gref;
+ unsigned long pfn;
+ struct llist_node node;
+};
+
struct blk_shadow {
struct blkif_request req;
struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
static DEFINE_MUTEX(blkfront_mutex);
@@ -97,6 +105,8 @@ struct blkfront_info
struct work_struct work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
+ struct llist_head persistent_gnts;
+ unsigned int persistent_gnts_c;
unsigned long shadow_free;
unsigned int feature_flush;
unsigned int flush_op;
@@ -104,6 +114,7 @@ struct blkfront_info
unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int feature_persistent:1;
int is_ready;
};
@@ -287,21 +298,36 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref;
+
+ /*
+ * Used to store if we are able to queue the request by just using
+ * existing persistent grants, or if we have to get new grants,
+ * as there are not sufficiently many free.
+ */
+ bool new_persistent_gnts;
grant_ref_t gref_head;
+ struct page *granted_page;
+ struct grant *gnt_list_entry = NULL;
struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
- if (gnttab_alloc_grant_references(
- BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
- gnttab_request_free_callback(
- &info->callback,
- blkif_restart_queue_callback,
- info,
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
- return 1;
- }
+ /* Check if we have enought grants to allocate a requests */
+ if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ new_persistent_gnts = 1;
+ if (gnttab_alloc_grant_references(
+ BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c,
+ &gref_head) < 0) {
+ gnttab_request_free_callback(
+ &info->callback,
+ blkif_restart_queue_callback,
+ info,
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ return 1;
+ }
+ } else
+ new_persistent_gnts = 0;
/* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
@@ -341,18 +367,73 @@ static int blkif_queue_request(struct request *req)
BLKIF_MAX_SEGMENTS_PER_REQUEST);
for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
- buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
- /* install a grant reference. */
- ref = gnttab_claim_grant_reference(&gref_head);
- BUG_ON(ref == -ENOSPC);
- gnttab_grant_foreign_access_ref(
- ref,
+ if (info->persistent_gnts_c) {
+ BUG_ON(llist_empty(&info->persistent_gnts));
+ gnt_list_entry = llist_entry(
+ llist_del_first(&info->persistent_gnts),
+ struct grant, node);
+
+ ref = gnt_list_entry->gref;
+ buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+ info->persistent_gnts_c--;
+ } else {
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnt_list_entry =
+ kmalloc(sizeof(struct grant),
+ GFP_ATOMIC);
+ if (!gnt_list_entry)
+ return -ENOMEM;
+
+ granted_page = alloc_page(GFP_ATOMIC);
+ if (!granted_page) {
+ kfree(gnt_list_entry);
+ return -ENOMEM;
+ }
+
+ gnt_list_entry->pfn =
+ page_to_pfn(granted_page);
+ gnt_list_entry->gref = ref;
+
+ buffer_mfn = pfn_to_mfn(page_to_pfn(
+ granted_page));
+ gnttab_grant_foreign_access_ref(ref,
info->xbdev->otherend_id,
- buffer_mfn,
- rq_data_dir(req));
+ buffer_mfn, 0);
+ }
+
+ info->shadow[id].grants_used[i] = gnt_list_entry;
+
+ if (rq_data_dir(req)) {
+ char *bvec_data;
+ void *shared_data;
+
+ BUG_ON(sg->offset + sg->length > PAGE_SIZE);
+
+ shared_data = kmap_atomic(
+ pfn_to_page(gnt_list_entry->pfn));
+ bvec_data = kmap_atomic(sg_page(sg));
+
+ /*
+ * this does not wipe data stored outside the
+ * range sg->offset..sg->offset+sg->length.
+ * Therefore, blkback *could* see data from
+ * previous requests. This is OK as long as
+ * persistent grants are shared with just one
+ * domain. It may need refactoring if this
+ * changes
+ */
+ memcpy(shared_data + sg->offset,
+ bvec_data + sg->offset,
+ sg->length);
+
+ kunmap_atomic(bvec_data);
+ kunmap_atomic(shared_data);
+ }
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
ring_req->u.rw.seg[i] =
@@ -368,7 +449,8 @@ static int blkif_queue_request(struct request *req)
/* Keep a private copy so we can reissue requests when recovering. */
info->shadow[id].req = *ring_req;
- gnttab_free_grant_references(gref_head);
+ if (new_persistent_gnts)
+ gnttab_free_grant_references(gref_head);
return 0;
}
@@ -480,12 +562,13 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_flush(info->rq, info->feature_flush);
- printk(KERN_INFO "blkfront: %s: %s: %s\n",
+ printk(KERN_INFO "blkfront: %s: %s: %s %s\n",
info->gd->disk_name,
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
"barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
"flush diskcache" : "barrier or flush"),
- info->feature_flush ? "enabled" : "disabled");
+ info->feature_flush ? "enabled" : "disabled",
+ info->feature_persistent ? "using persistent grants" : "");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -707,6 +790,9 @@ static void blkif_restart_queue(struct work_struct *work)
static void blkif_free(struct blkfront_info *info, int suspend)
{
+ struct llist_node *all_gnts;
+ struct grant *persistent_gnt;
+
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
info->connected = suspend ?
@@ -714,6 +800,18 @@ static void blkif_free(struct blkfront_info *info, int suspend)
/* No more blkif_request(). */
if (info->rq)
blk_stop_queue(info->rq);
+
+ /* Remove all persistent grants */
+ if (info->persistent_gnts_c) {
+ all_gnts = llist_del_all(&info->persistent_gnts);
+ llist_for_each_entry(persistent_gnt, all_gnts, node) {
+ gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+ __free_page(pfn_to_page(persistent_gnt->pfn));
+ kfree(persistent_gnt);
+ }
+ info->persistent_gnts_c = 0;
+ }
+
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&info->io_lock);
@@ -734,13 +832,43 @@ static void blkif_free(struct blkfront_info *info, int suspend)
}
-static void blkif_completion(struct blk_shadow *s)
+static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
+ struct blkif_response *bret)
{
int i;
- /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
- * flag. */
- for (i = 0; i < s->req.u.rw.nr_segments; i++)
- gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
+ struct bio_vec *bvec;
+ struct req_iterator iter;
+ unsigned long flags;
+ char *bvec_data;
+ void *shared_data;
+ unsigned int offset = 0;
+
+ if (bret->operation == BLKIF_OP_READ) {
+ /*
+ * Copy the data received from the backend into the bvec.
+ * Since bv_offset can be different than 0, and bv_len different
+ * than PAGE_SIZE, we have to keep track of the current offset,
+ * to be sure we are copying the data from the right shared page.
+ */
+ rq_for_each_segment(bvec, s->request, iter) {
+ BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
+ i = offset >> PAGE_SHIFT;
+ BUG_ON(i >= s->req.u.rw.nr_segments);
+ shared_data = kmap_atomic(
+ pfn_to_page(s->grants_used[i]->pfn));
+ bvec_data = bvec_kmap_irq(bvec, &flags);
+ memcpy(bvec_data, shared_data + bvec->bv_offset,
+ bvec->bv_len);
+ bvec_kunmap_irq(bvec_data, &flags);
+ kunmap_atomic(shared_data);
+ offset += bvec->bv_len;
+ }
+ }
+ /* Add the persistent grant into the list of free grants */
+ for (i = 0; i < s->req.u.rw.nr_segments; i++) {
+ llist_add(&s->grants_used[i]->node, &info->persistent_gnts);
+ info->persistent_gnts_c++;
+ }
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -783,7 +911,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
req = info->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD)
- blkif_completion(&info->shadow[id]);
+ blkif_completion(&info->shadow[id], info, bret);
if (add_id_to_freelist(info, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
@@ -942,6 +1070,11 @@ again:
message = "writing protocol";
goto abort_transaction;
}
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-persistent", "%u", 1);
+ if (err)
+ dev_warn(&dev->dev,
+ "writing persistent grants feature to xenbus");
err = xenbus_transaction_end(xbt, 0);
if (err) {
@@ -1029,6 +1162,8 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
+ init_llist_head(&info->persistent_gnts);
+ info->persistent_gnts_c = 0;
info->connected = BLKIF_STATE_DISCONNECTED;
INIT_WORK(&info->work, blkif_restart_queue);
@@ -1093,7 +1228,7 @@ static int blkif_recover(struct blkfront_info *info)
req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
- rq_data_dir(info->shadow[req->u.rw.id].request));
+ 0);
}
info->shadow[req->u.rw.id].req = *req;
@@ -1225,7 +1360,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long sector_size;
unsigned int binfo;
int err;
- int barrier, flush, discard;
+ int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1303,6 +1438,14 @@ static void blkfront_connect(struct blkfront_info *info)
if (!err && discard)
blkfront_setup_discard(info);
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%u", &persistent,
+ NULL);
+ if (err)
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
+
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index bbec35d21fe..0f51ed687dc 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -6,6 +6,7 @@ menu "Bus devices"
config OMAP_OCP2SCP
tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
help
Driver to enable ocp2scp module which transforms ocp interface
protocol to scp protocol. In OMAP4, USB PHY is connected via
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6ec0fff79bc..1042c1b9037 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
-/* GT PTE cache control fields */
-#define GEN6_PTE_UNCACHED 0x00000002
-#define HSW_PTE_UNCACHED 0x00000000
-#define GEN6_PTE_LLC 0x00000004
-#define GEN6_PTE_LLC_MLC 0x00000006
-#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
-#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 38390f7c6ab..dbd901e94ea 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0;
break;
}
- } else if (INTEL_GTT_GEN == 6) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen_size = MB(32);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen_size = MB(64);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen_size = MB(96);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen_size = MB(128);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen_size = MB(160);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen_size = MB(192);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen_size = MB(224);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen_size = MB(256);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen_size = MB(288);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen_size = MB(320);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen_size = MB(352);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen_size = MB(384);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen_size = MB(416);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen_size = MB(448);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen_size = MB(480);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen_size = MB(512);
- break;
- }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void)
{
- int size;
-
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
- else if (INTEL_GTT_GEN == 6) {
- u16 snb_gmch_ctl;
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- size = MB(2);
- break;
- }
- return size/4;
- } else {
+ else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
{
u8 __iomem *reg;
- if (INTEL_GTT_GEN >= 6)
- return true;
-
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static bool gen6_check_flags(unsigned int flags)
-{
- return true;
-}
-
-static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else {
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-
- writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
-}
-
-static void gen6_cleanup(void)
-{
-}
-
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
- u32 reg_addr;
+ u32 reg_addr, gtt_addr;
int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
- if (INTEL_GTT_GEN >= 7)
- size = MB(2);
-
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
- if (INTEL_GTT_GEN == 3) {
- u32 gtt_addr;
-
+ switch (INTEL_GTT_GEN) {
+ case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr;
- } else {
- u32 gtt_offset;
-
- switch (INTEL_GTT_GEN) {
- case 5:
- case 6:
- case 7:
- gtt_offset = MB(2);
- break;
- case 4:
- default:
- gtt_offset = KB(512);
- break;
- }
- intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ break;
+ case 5:
+ intel_private.gtt_bus_addr = reg_addr + MB(2);
+ break;
+ default:
+ intel_private.gtt_bus_addr = reg_addr + KB(512);
+ break;
}
if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
-static const struct intel_gtt_driver sandybridge_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = gen6_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver haswell_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = haswell_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver valleyview_gtt_driver = {
- .gen = 7,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = valleyview_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
-};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
- "ValleyView", &valleyview_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-const struct intel_gtt *intel_gtt_get(void)
+struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c58ea9b80b1..c5a0262251b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -216,7 +216,7 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
- depends on HW_RANDOM && PLAT_NOMADIK
+ depends on HW_RANDOM && ARCH_NOMADIK
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a0c84bb3085..053201b062a 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3789,7 +3789,7 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
- /* It's an asyncronous event. */
+ /* It's an asynchronous event. */
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 20ab5b3a891..cfdfecd5bc7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -155,7 +155,7 @@ enum si_stat_indexes {
/* Number of watchdog pretimeouts. */
SI_STAT_watchdog_pretimeouts,
- /* Number of asyncronous messages received. */
+ /* Number of asynchronous messages received. */
SI_STAT_incoming_messages,
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d0d824ebf2c..1cd49241e60 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -251,12 +251,8 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
break;
}
- if (signal_pending (current)) {
- if (!bytes_written) {
- bytes_written = -EINTR;
- }
+ if (signal_pending (current))
break;
- }
cond_resched();
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b86eae9b77d..85e81ec1451 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -399,7 +399,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
-#if 0
static bool debug;
module_param(debug, bool, 0644);
#define DEBUG_ENT(fmt, arg...) do { \
@@ -410,9 +409,6 @@ module_param(debug, bool, 0644);
blocking_pool.entropy_count,\
nonblocking_pool.entropy_count,\
## arg); } while (0)
-#else
-#define DEBUG_ENT(fmt, arg...) do {} while (0)
-#endif
/**********************************************************************
*
@@ -437,6 +433,7 @@ struct entropy_store {
int entropy_count;
int entropy_total;
unsigned int initialized:1;
+ bool last_data_init;
__u8 last_data[EXTRACT_SIZE];
};
@@ -829,7 +826,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
bytes = min_t(int, bytes, sizeof(tmp));
DEBUG_ENT("going to reseed %s with %d bits "
- "(%d of %d requested)\n",
+ "(%zu of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
bytes = extract_entropy(r->pull, tmp, bytes,
@@ -860,7 +857,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
spin_lock_irqsave(&r->lock, flags);
BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
- DEBUG_ENT("trying to extract %d bits from %s\n",
+ DEBUG_ENT("trying to extract %zu bits from %s\n",
nbytes * 8, r->name);
/* Can we pull enough? */
@@ -882,7 +879,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
}
}
- DEBUG_ENT("debiting %d entropy credits from %s%s\n",
+ DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
spin_unlock_irqrestore(&r->lock, flags);
@@ -957,6 +954,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
+ if (fips_enabled && !r->last_data_init)
+ nbytes += EXTRACT_SIZE;
+
trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -967,6 +968,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
if (fips_enabled) {
unsigned long flags;
+
+ /* prime last_data value if need be, per fips 140-2 */
+ if (!r->last_data_init) {
+ spin_lock_irqsave(&r->lock, flags);
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ r->last_data_init = true;
+ nbytes -= EXTRACT_SIZE;
+ spin_unlock_irqrestore(&r->lock, flags);
+ extract_buf(r, tmp);
+ }
+
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -1086,6 +1098,7 @@ static void init_std_data(struct entropy_store *r)
r->entropy_count = 0;
r->entropy_total = 0;
+ r->last_data_init = false;
mix_pool_bytes(r, &now, sizeof(now), NULL);
for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_long(&rv))
@@ -1142,11 +1155,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (n > SEC_XFER_SIZE)
n = SEC_XFER_SIZE;
- DEBUG_ENT("reading %d bits\n", n*8);
+ DEBUG_ENT("reading %zu bits\n", n*8);
n = extract_entropy_user(&blocking_pool, buf, n);
- DEBUG_ENT("read got %d bits (%d still needed)\n",
+ if (n < 0) {
+ retval = n;
+ break;
+ }
+
+ DEBUG_ENT("read got %zd bits (%zd still needed)\n",
n*8, (nbytes-n)*8);
if (n == 0) {
@@ -1171,10 +1189,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
continue;
}
- if (n < 0) {
- retval = n;
- break;
- }
count += n;
buf += n;
nbytes -= n;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 7da840d487d..9978609d93b 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -38,8 +38,6 @@ static struct vio_device_id tpm_ibmvtpm_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
-DECLARE_WAIT_QUEUE_HEAD(wq);
-
/**
* ibmvtpm_send_crq - Send a CRQ request
* @vdev: vio device struct
@@ -83,6 +81,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm;
u16 len;
+ int sig;
ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
@@ -91,22 +90,23 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return 0;
}
- wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0);
+ sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
+ if (sig)
+ return -EINTR;
+
+ len = ibmvtpm->res_len;
- if (count < ibmvtpm->crq_res.len) {
+ if (count < len) {
dev_err(ibmvtpm->dev,
"Invalid size in recv: count=%ld, crq_size=%d\n",
- count, ibmvtpm->crq_res.len);
+ count, len);
return -EIO;
}
spin_lock(&ibmvtpm->rtce_lock);
- memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len);
- memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len);
- ibmvtpm->crq_res.valid = 0;
- ibmvtpm->crq_res.msg = 0;
- len = ibmvtpm->crq_res.len;
- ibmvtpm->crq_res.len = 0;
+ memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+ memset(ibmvtpm->rtce_buf, 0, len);
+ ibmvtpm->res_len = 0;
spin_unlock(&ibmvtpm->rtce_lock);
return len;
}
@@ -273,7 +273,6 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
int rc = 0;
free_irq(vdev->irq, ibmvtpm);
- tasklet_kill(&ibmvtpm->tasklet);
do {
if (rc)
@@ -372,7 +371,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
- unsigned long flags;
int rc = 0;
do {
@@ -387,10 +385,11 @@ static int tpm_ibmvtpm_resume(struct device *dev)
return rc;
}
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
@@ -467,7 +466,7 @@ static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
if (crq->valid & VTPM_MSG_RES) {
if (++crq_q->index == crq_q->num_entry)
crq_q->index = 0;
- rmb();
+ smp_rmb();
} else
crq = NULL;
return crq;
@@ -535,11 +534,9 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
ibmvtpm->vtpm_version = crq->data;
return;
case VTPM_TPM_COMMAND_RES:
- ibmvtpm->crq_res.valid = crq->valid;
- ibmvtpm->crq_res.msg = crq->msg;
- ibmvtpm->crq_res.len = crq->len;
- ibmvtpm->crq_res.data = crq->data;
- wake_up_interruptible(&wq);
+ /* len of the data in rtce buffer */
+ ibmvtpm->res_len = crq->len;
+ wake_up_interruptible(&ibmvtpm->wq);
return;
default:
return;
@@ -559,38 +556,19 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
{
struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
- unsigned long flags;
-
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ibmvtpm_tasklet - Interrupt handler tasklet
- * @data: ibm vtpm device struct
- *
- * Returns:
- * Nothing
- **/
-static void ibmvtpm_tasklet(void *data)
-{
- struct ibmvtpm_dev *ibmvtpm = data;
struct ibmvtpm_crq *crq;
- unsigned long flags;
- spin_lock_irqsave(&ibmvtpm->lock, flags);
+ /* while loop is needed for initial setup (get version and
+ * get rtce_size). There should be only one tpm request at any
+ * given time.
+ */
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm);
crq->valid = 0;
- wmb();
+ smp_wmb();
}
- vio_enable_interrupts(ibmvtpm->vdev);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ return IRQ_HANDLED;
}
/**
@@ -650,9 +628,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto reg_crq_cleanup;
}
- tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
- (unsigned long)ibmvtpm);
-
rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
tpm_ibmvtpm_driver_name, ibmvtpm);
if (rc) {
@@ -666,13 +641,14 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto init_irq_cleanup;
}
+ init_waitqueue_head(&ibmvtpm->wq);
+
crq_q->index = 0;
ibmvtpm->dev = dev;
ibmvtpm->vdev = vio_dev;
chip->vendor.data = (void *)ibmvtpm;
- spin_lock_init(&ibmvtpm->lock);
spin_lock_init(&ibmvtpm->rtce_lock);
rc = ibmvtpm_crq_send_init(ibmvtpm);
@@ -689,7 +665,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
return rc;
init_irq_cleanup:
- tasklet_kill(&ibmvtpm->tasklet);
do {
rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index 4296eb4b4d8..bd82a791f99 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -38,13 +38,12 @@ struct ibmvtpm_dev {
struct vio_dev *vdev;
struct ibmvtpm_crq_queue crq_queue;
dma_addr_t crq_dma_handle;
- spinlock_t lock;
- struct tasklet_struct tasklet;
u32 rtce_size;
void __iomem *rtce_buf;
dma_addr_t rtce_dma_handle;
spinlock_t rtce_lock;
- struct ibmvtpm_crq crq_res;
+ wait_queue_head_t wq;
+ u16 res_len;
u32 vtpm_version;
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 90493d4ead1..c594cb16c37 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -37,8 +37,12 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/kconfig.h>
#include "../tty/hvc/hvc_console.h"
+#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
+
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -111,6 +115,21 @@ struct port_buffer {
size_t len;
/* offset in the buf from which to consume data */
size_t offset;
+
+ /* DMA address of buffer */
+ dma_addr_t dma;
+
+ /* Device we got DMA memory from */
+ struct device *dev;
+
+ /* List of pending dma buffers to free */
+ struct list_head list;
+
+ /* If sgpages == 0 then buf is used */
+ unsigned int sgpages;
+
+ /* sg is used if spages > 0. sg must be the last in is struct */
+ struct scatterlist sg[0];
};
/*
@@ -325,6 +344,11 @@ static bool is_console_port(struct port *port)
return false;
}
+static bool is_rproc_serial(const struct virtio_device *vdev)
+{
+ return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
+}
+
static inline bool use_multiport(struct ports_device *portdev)
{
/*
@@ -336,20 +360,110 @@ static inline bool use_multiport(struct ports_device *portdev)
return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
}
-static void free_buf(struct port_buffer *buf)
+static DEFINE_SPINLOCK(dma_bufs_lock);
+static LIST_HEAD(pending_free_dma_bufs);
+
+static void free_buf(struct port_buffer *buf, bool can_sleep)
{
- kfree(buf->buf);
+ unsigned int i;
+
+ for (i = 0; i < buf->sgpages; i++) {
+ struct page *page = sg_page(&buf->sg[i]);
+ if (!page)
+ break;
+ put_page(page);
+ }
+
+ if (!buf->dev) {
+ kfree(buf->buf);
+ } else if (is_rproc_enabled) {
+ unsigned long flags;
+
+ /* dma_free_coherent requires interrupts to be enabled. */
+ if (!can_sleep) {
+ /* queue up dma-buffers to be freed later */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_add_tail(&buf->list, &pending_free_dma_bufs);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+ return;
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
+
+ /* Release device refcnt and allow it to be freed */
+ put_device(buf->dev);
+ }
+
kfree(buf);
}
-static struct port_buffer *alloc_buf(size_t buf_size)
+static void reclaim_dma_bufs(void)
+{
+ unsigned long flags;
+ struct port_buffer *buf, *tmp;
+ LIST_HEAD(tmp_list);
+
+ if (list_empty(&pending_free_dma_bufs))
+ return;
+
+ /* Create a copy of the pending_free_dma_bufs while holding the lock */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_cut_position(&tmp_list, &pending_free_dma_bufs,
+ pending_free_dma_bufs.prev);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+
+ /* Release the dma buffers, without irqs enabled */
+ list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
+ list_del(&buf->list);
+ free_buf(buf, true);
+ }
+}
+
+static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
+ int pages)
{
struct port_buffer *buf;
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ reclaim_dma_bufs();
+
+ /*
+ * Allocate buffer and the sg list. The sg list array is allocated
+ * directly after the port_buffer struct.
+ */
+ buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages,
+ GFP_KERNEL);
if (!buf)
goto fail;
- buf->buf = kzalloc(buf_size, GFP_KERNEL);
+
+ buf->sgpages = pages;
+ if (pages > 0) {
+ buf->dev = NULL;
+ buf->buf = NULL;
+ return buf;
+ }
+
+ if (is_rproc_serial(vq->vdev)) {
+ /*
+ * Allocate DMA memory from ancestor. When a virtio
+ * device is created by remoteproc, the DMA memory is
+ * associated with the grandparent device:
+ * vdev => rproc => platform-dev.
+ * The code here would have been less quirky if
+ * DMA_MEMORY_INCLUDES_CHILDREN had been supported
+ * in dma-coherent.c
+ */
+ if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
+ goto free_buf;
+ buf->dev = vq->vdev->dev.parent->parent;
+
+ /* Increase device refcnt to avoid freeing it */
+ get_device(buf->dev);
+ buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
+ GFP_KERNEL);
+ } else {
+ buf->dev = NULL;
+ buf->buf = kmalloc(buf_size, GFP_KERNEL);
+ }
+
if (!buf->buf)
goto free_buf;
buf->len = 0;
@@ -396,6 +510,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq);
+ if (!ret)
+ ret = vq->num_free;
return ret;
}
@@ -416,7 +532,7 @@ static void discard_port_data(struct port *port)
port->stats.bytes_discarded += buf->len - buf->offset;
if (add_inbuf(port->in_vq, buf) < 0) {
err++;
- free_buf(buf);
+ free_buf(buf, false);
}
port->inbuf = NULL;
buf = get_inbuf(port);
@@ -459,7 +575,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
@@ -476,55 +592,29 @@ static ssize_t send_control_msg(struct port *port, unsigned int event,
return 0;
}
-struct buffer_token {
- union {
- void *buf;
- struct scatterlist *sg;
- } u;
- /* If sgpages == 0 then buf is used, else sg is used */
- unsigned int sgpages;
-};
-
-static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages)
-{
- int i;
- struct page *page;
-
- for (i = 0; i < nrpages; i++) {
- page = sg_page(&sg[i]);
- if (!page)
- break;
- put_page(page);
- }
- kfree(sg);
-}
/* Callers must take the port->outvq_lock */
static void reclaim_consumed_buffers(struct port *port)
{
- struct buffer_token *tok;
+ struct port_buffer *buf;
unsigned int len;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
- while ((tok = virtqueue_get_buf(port->out_vq, &len))) {
- if (tok->sgpages)
- reclaim_sg_pages(tok->u.sg, tok->sgpages);
- else
- kfree(tok->u.buf);
- kfree(tok);
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ free_buf(buf, false);
port->outvq_full = false;
}
}
static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
int nents, size_t in_count,
- struct buffer_token *tok, bool nonblock)
+ void *data, bool nonblock)
{
struct virtqueue *out_vq;
- ssize_t ret;
+ int err;
unsigned long flags;
unsigned int len;
@@ -534,17 +624,17 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
reclaim_consumed_buffers(port);
- ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC);
+ err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
- if (ret < 0) {
+ if (err) {
in_count = 0;
goto done;
}
- if (ret == 0)
+ if (out_vq->num_free == 0)
port->outvq_full = true;
if (nonblock)
@@ -572,37 +662,6 @@ done:
return in_count;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
- bool nonblock)
-{
- struct scatterlist sg[1];
- struct buffer_token *tok;
-
- tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
- if (!tok)
- return -ENOMEM;
- tok->sgpages = 0;
- tok->u.buf = in_buf;
-
- sg_init_one(sg, in_buf, in_count);
-
- return __send_to_port(port, sg, 1, in_count, tok, nonblock);
-}
-
-static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents,
- size_t in_count, bool nonblock)
-{
- struct buffer_token *tok;
-
- tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
- if (!tok)
- return -ENOMEM;
- tok->sgpages = nents;
- tok->u.sg = sg;
-
- return __send_to_port(port, sg, nents, in_count, tok, nonblock);
-}
-
/*
* Give out the data that's requested from the buffer that we have
* queued up.
@@ -748,9 +807,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
struct port *port;
- char *buf;
+ struct port_buffer *buf;
ssize_t ret;
bool nonblock;
+ struct scatterlist sg[1];
/* Userspace could be out to fool us */
if (!count)
@@ -766,11 +826,11 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
count = min((size_t)(32 * 1024), count);
- buf = kmalloc(count, GFP_KERNEL);
+ buf = alloc_buf(port->out_vq, count, 0);
if (!buf)
return -ENOMEM;
- ret = copy_from_user(buf, ubuf, count);
+ ret = copy_from_user(buf->buf, ubuf, count);
if (ret) {
ret = -EFAULT;
goto free_buf;
@@ -784,13 +844,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
* through to the host.
*/
nonblock = true;
- ret = send_buf(port, buf, count, nonblock);
+ sg_init_one(sg, buf->buf, count);
+ ret = __send_to_port(port, sg, 1, count, buf, nonblock);
if (nonblock && ret > 0)
goto out;
free_buf:
- kfree(buf);
+ free_buf(buf, true);
out:
return ret;
}
@@ -856,6 +917,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
struct port *port = filp->private_data;
struct sg_list sgl;
ssize_t ret;
+ struct port_buffer *buf;
struct splice_desc sd = {
.total_len = len,
.flags = flags,
@@ -863,22 +925,34 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
.u.data = &sgl,
};
+ /*
+ * Rproc_serial does not yet support splice. To support splice
+ * pipe_to_sg() must allocate dma-buffers and copy content from
+ * regular pages to dma pages. And alloc_buf and free_buf must
+ * support allocating and freeing such a list of dma-buffers.
+ */
+ if (is_rproc_serial(port->out_vq->vdev))
+ return -EINVAL;
+
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
return ret;
+ buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
+ if (!buf)
+ return -ENOMEM;
+
sgl.n = 0;
sgl.len = 0;
sgl.size = pipe->nrbufs;
- sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL);
- if (unlikely(!sgl.sg))
- return -ENOMEM;
-
+ sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
if (likely(ret > 0))
- ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true);
+ ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
+ if (unlikely(ret <= 0))
+ free_buf(buf, true);
return ret;
}
@@ -927,6 +1001,7 @@ static int port_fops_release(struct inode *inode, struct file *filp)
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
+ reclaim_dma_bufs();
/*
* Locks aren't necessary here as a port can't be opened after
* unplug, and if a port isn't unplugged, a kref would already
@@ -1031,6 +1106,7 @@ static const struct file_operations port_fops = {
static int put_chars(u32 vtermno, const char *buf, int count)
{
struct port *port;
+ struct scatterlist sg[1];
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
@@ -1039,7 +1115,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
if (!port)
return -EPIPE;
- return send_buf(port, (void *)buf, count, false);
+ sg_init_one(sg, buf, count);
+ return __send_to_port(port, sg, 1, count, (void *)buf, false);
}
/*
@@ -1076,7 +1153,10 @@ static void resize_console(struct port *port)
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+
+ /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
hvc_resize(port->cons.hvc, port->cons.ws);
}
@@ -1260,7 +1340,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
nr_added_bufs = 0;
do {
- buf = alloc_buf(PAGE_SIZE);
+ buf = alloc_buf(vq, PAGE_SIZE, 0);
if (!buf)
break;
@@ -1268,7 +1348,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
- free_buf(buf);
+ free_buf(buf, true);
break;
}
nr_added_bufs++;
@@ -1356,10 +1436,18 @@ static int add_port(struct ports_device *portdev, u32 id)
goto free_device;
}
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
+ if (is_rproc_serial(port->portdev->vdev))
+ /*
+ * For rproc_serial assume remote processor is connected.
+ * rproc_serial does not want the console port, only
+ * the generic port implementation.
+ */
+ port->host_connected = true;
+ else if (!use_multiport(port->portdev)) {
+ /*
+ * If we're not using multiport support,
+ * this has to be a console port.
+ */
err = init_port_console(port);
if (err)
goto free_inbufs;
@@ -1392,7 +1480,7 @@ static int add_port(struct ports_device *portdev, u32 id)
free_inbufs:
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1434,7 +1522,11 @@ static void remove_port_data(struct port *port)
/* Remove buffers we queued up for the Host to send us data in. */
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ free_buf(buf, true);
+
+ /* Free pending buffers from the out-queue. */
+ while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
+ free_buf(buf, true);
}
/*
@@ -1636,7 +1728,7 @@ static void control_work_handler(struct work_struct *work)
if (add_inbuf(portdev->c_ivq, buf) < 0) {
dev_warn(&portdev->vdev->dev,
"Error adding buffer to queue\n");
- free_buf(buf);
+ free_buf(buf, false);
}
}
spin_unlock(&portdev->cvq_lock);
@@ -1832,10 +1924,10 @@ static void remove_controlq_data(struct ports_device *portdev)
return;
while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf);
+ free_buf(buf, true);
while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf);
+ free_buf(buf, true);
}
/*
@@ -1882,11 +1974,15 @@ static int virtcons_probe(struct virtio_device *vdev)
multiport = false;
portdev->config.max_nr_ports = 1;
- if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
- offsetof(struct virtio_console_config,
- max_nr_ports),
- &portdev->config.max_nr_ports) == 0)
+
+ /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+ offsetof(struct virtio_console_config,
+ max_nr_ports),
+ &portdev->config.max_nr_ports) == 0) {
multiport = true;
+ }
err = init_vqs(portdev);
if (err < 0) {
@@ -1996,6 +2092,16 @@ static unsigned int features[] = {
VIRTIO_CONSOLE_F_MULTIPORT,
};
+static struct virtio_device_id rproc_serial_id_table[] = {
+#if IS_ENABLED(CONFIG_REMOTEPROC)
+ { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
+#endif
+ { 0 },
+};
+
+static unsigned int rproc_serial_features[] = {
+};
+
#ifdef CONFIG_PM
static int virtcons_freeze(struct virtio_device *vdev)
{
@@ -2080,6 +2186,20 @@ static struct virtio_driver virtio_console = {
#endif
};
+/*
+ * virtio_rproc_serial refers to __devinit function which causes
+ * section mismatch warnings. So use __refdata to silence warnings.
+ */
+static struct virtio_driver __refdata virtio_rproc_serial = {
+ .feature_table = rproc_serial_features,
+ .feature_table_size = ARRAY_SIZE(rproc_serial_features),
+ .driver.name = "virtio_rproc_serial",
+ .driver.owner = THIS_MODULE,
+ .id_table = rproc_serial_id_table,
+ .probe = virtcons_probe,
+ .remove = virtcons_remove,
+};
+
static int __init init(void)
{
int err;
@@ -2104,7 +2224,15 @@ static int __init init(void)
pr_err("Error %d registering virtio driver\n", err);
goto free;
}
+ err = register_virtio_driver(&virtio_rproc_serial);
+ if (err < 0) {
+ pr_err("Error %d registering virtio rproc serial driver\n",
+ err);
+ goto unregister;
+ }
return 0;
+unregister:
+ unregister_virtio_driver(&virtio_console);
free:
if (pdrvdata.debugfs_dir)
debugfs_remove_recursive(pdrvdata.debugfs_dir);
@@ -2114,7 +2242,10 @@ free:
static void __exit fini(void)
{
+ reclaim_dma_bufs();
+
unregister_virtio_driver(&virtio_console);
+ unregister_virtio_driver(&virtio_rproc_serial);
class_destroy(pdrvdata.class);
if (pdrvdata.debugfs_dir)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 823f62d900b..a47e6ee98b8 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -64,3 +64,5 @@ config CLK_TWL6040
as functional clock.
endmenu
+
+source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index a96bda3d3b8..ee90e87e767 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_ARCH_PRIMA2) += clk-prima2.o
+obj-$(CONFIG_PLAT_ORION) += mvebu/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
@@ -20,6 +21,7 @@ obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_ARCH_SUNXI) += clk-sunxi.o
+obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
# Chip specific
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 517a8ff7121..6b4c70f7d23 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -20,6 +20,7 @@ void __init nomadik_clk_init(void)
clk_register_clkdev(clk, NULL, "gpio.2");
clk_register_clkdev(clk, NULL, "gpio.3");
clk_register_clkdev(clk, NULL, "rng");
+ clk_register_clkdev(clk, NULL, "fsmc-nand");
/*
* The 2.4 MHz TIMCLK reference clock is active at boot time, this is
diff --git a/drivers/clk/clk-zynq.c b/drivers/clk/clk-zynq.c
new file mode 100644
index 00000000000..37a30514fd6
--- /dev/null
+++ b/drivers/clk/clk-zynq.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2012 National Instruments
+ *
+ * Josh Cartwright <josh.cartwright@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+
+static void __iomem *slcr_base;
+
+struct zynq_pll_clk {
+ struct clk_hw hw;
+ void __iomem *pll_ctrl;
+ void __iomem *pll_cfg;
+};
+
+#define to_zynq_pll_clk(hw) container_of(hw, struct zynq_pll_clk, hw)
+
+#define CTRL_PLL_FDIV(x) ((x) >> 12)
+
+static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct zynq_pll_clk *pll = to_zynq_pll_clk(hw);
+ return parent_rate * CTRL_PLL_FDIV(ioread32(pll->pll_ctrl));
+}
+
+static const struct clk_ops zynq_pll_clk_ops = {
+ .recalc_rate = zynq_pll_recalc_rate,
+};
+
+static void __init zynq_pll_clk_setup(struct device_node *np)
+{
+ struct clk_init_data init;
+ struct zynq_pll_clk *pll;
+ const char *parent_name;
+ struct clk *clk;
+ u32 regs[2];
+ int ret;
+
+ ret = of_property_read_u32_array(np, "reg", regs, ARRAY_SIZE(regs));
+ if (WARN_ON(ret))
+ return;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (WARN_ON(!pll))
+ return;
+
+ pll->pll_ctrl = slcr_base + regs[0];
+ pll->pll_cfg = slcr_base + regs[1];
+
+ of_property_read_string(np, "clock-output-names", &init.name);
+
+ init.ops = &zynq_pll_clk_ops;
+ parent_name = of_clk_get_parent_name(np, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (WARN_ON(ret))
+ return;
+}
+
+struct zynq_periph_clk {
+ struct clk_hw hw;
+ struct clk_onecell_data onecell_data;
+ struct clk *gates[2];
+ void __iomem *clk_ctrl;
+ spinlock_t clkact_lock;
+};
+
+#define to_zynq_periph_clk(hw) container_of(hw, struct zynq_periph_clk, hw)
+
+static const u8 periph_clk_parent_map[] = {
+ 0, 0, 1, 2
+};
+#define PERIPH_CLK_CTRL_SRC(x) (periph_clk_parent_map[((x) & 0x30) >> 4])
+#define PERIPH_CLK_CTRL_DIV(x) (((x) & 0x3F00) >> 8)
+
+static unsigned long zynq_periph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
+ return parent_rate / PERIPH_CLK_CTRL_DIV(ioread32(periph->clk_ctrl));
+}
+
+static u8 zynq_periph_get_parent(struct clk_hw *hw)
+{
+ struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
+ return PERIPH_CLK_CTRL_SRC(ioread32(periph->clk_ctrl));
+}
+
+static const struct clk_ops zynq_periph_clk_ops = {
+ .recalc_rate = zynq_periph_recalc_rate,
+ .get_parent = zynq_periph_get_parent,
+};
+
+static void __init zynq_periph_clk_setup(struct device_node *np)
+{
+ struct zynq_periph_clk *periph;
+ const char *parent_names[3];
+ struct clk_init_data init;
+ int clk_num = 0, err;
+ const char *name;
+ struct clk *clk;
+ u32 reg;
+ int i;
+
+ err = of_property_read_u32(np, "reg", &reg);
+ if (WARN_ON(err))
+ return;
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (WARN_ON(!periph))
+ return;
+
+ periph->clk_ctrl = slcr_base + reg;
+ spin_lock_init(&periph->clkact_lock);
+
+ init.name = np->name;
+ init.ops = &zynq_periph_clk_ops;
+ for (i = 0; i < ARRAY_SIZE(parent_names); i++)
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ init.parent_names = parent_names;
+ init.num_parents = ARRAY_SIZE(parent_names);
+
+ periph->hw.init = &init;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
+ err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (WARN_ON(err))
+ return;
+
+ err = of_property_read_string_index(np, "clock-output-names", 0,
+ &name);
+ if (WARN_ON(err))
+ return;
+
+ periph->gates[0] = clk_register_gate(NULL, name, np->name, 0,
+ periph->clk_ctrl, 0, 0,
+ &periph->clkact_lock);
+ if (WARN_ON(IS_ERR(periph->gates[0])))
+ return;
+ clk_num++;
+
+ /* some periph clks have 2 downstream gates */
+ err = of_property_read_string_index(np, "clock-output-names", 1,
+ &name);
+ if (err != -ENODATA) {
+ periph->gates[1] = clk_register_gate(NULL, name, np->name, 0,
+ periph->clk_ctrl, 1, 0,
+ &periph->clkact_lock);
+ if (WARN_ON(IS_ERR(periph->gates[1])))
+ return;
+ clk_num++;
+ }
+
+ periph->onecell_data.clks = periph->gates;
+ periph->onecell_data.clk_num = clk_num;
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get,
+ &periph->onecell_data);
+ if (WARN_ON(err))
+ return;
+}
+
+/* CPU Clock domain is modelled as a mux with 4 children subclks, whose
+ * derivative rates depend on CLK_621_TRUE
+ */
+
+struct zynq_cpu_clk {
+ struct clk_hw hw;
+ struct clk_onecell_data onecell_data;
+ struct clk *subclks[4];
+ void __iomem *clk_ctrl;
+ spinlock_t clkact_lock;
+};
+
+#define to_zynq_cpu_clk(hw) container_of(hw, struct zynq_cpu_clk, hw)
+
+static const u8 zynq_cpu_clk_parent_map[] = {
+ 1, 1, 2, 0
+};
+#define CPU_CLK_SRCSEL(x) (zynq_cpu_clk_parent_map[(((x) & 0x30) >> 4)])
+#define CPU_CLK_CTRL_DIV(x) (((x) & 0x3F00) >> 8)
+
+static u8 zynq_cpu_clk_get_parent(struct clk_hw *hw)
+{
+ struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
+ return CPU_CLK_SRCSEL(ioread32(cpuclk->clk_ctrl));
+}
+
+static unsigned long zynq_cpu_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
+ return parent_rate / CPU_CLK_CTRL_DIV(ioread32(cpuclk->clk_ctrl));
+}
+
+static const struct clk_ops zynq_cpu_clk_ops = {
+ .get_parent = zynq_cpu_clk_get_parent,
+ .recalc_rate = zynq_cpu_clk_recalc_rate,
+};
+
+struct zynq_cpu_subclk {
+ struct clk_hw hw;
+ void __iomem *clk_621;
+ enum {
+ CPU_SUBCLK_6X4X,
+ CPU_SUBCLK_3X2X,
+ CPU_SUBCLK_2X,
+ CPU_SUBCLK_1X,
+ } which;
+};
+
+#define CLK_621_TRUE(x) ((x) & 1)
+
+#define to_zynq_cpu_subclk(hw) container_of(hw, struct zynq_cpu_subclk, hw);
+
+static unsigned long zynq_cpu_subclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long uninitialized_var(rate);
+ struct zynq_cpu_subclk *subclk;
+ bool is_621;
+
+ subclk = to_zynq_cpu_subclk(hw)
+ is_621 = CLK_621_TRUE(ioread32(subclk->clk_621));
+
+ switch (subclk->which) {
+ case CPU_SUBCLK_6X4X:
+ rate = parent_rate;
+ break;
+ case CPU_SUBCLK_3X2X:
+ rate = parent_rate / 2;
+ break;
+ case CPU_SUBCLK_2X:
+ rate = parent_rate / (is_621 ? 3 : 2);
+ break;
+ case CPU_SUBCLK_1X:
+ rate = parent_rate / (is_621 ? 6 : 4);
+ break;
+ };
+
+ return rate;
+}
+
+static const struct clk_ops zynq_cpu_subclk_ops = {
+ .recalc_rate = zynq_cpu_subclk_recalc_rate,
+};
+
+static struct clk *zynq_cpu_subclk_setup(struct device_node *np, u8 which,
+ void __iomem *clk_621)
+{
+ struct zynq_cpu_subclk *subclk;
+ struct clk_init_data init;
+ struct clk *clk;
+ int err;
+
+ err = of_property_read_string_index(np, "clock-output-names",
+ which, &init.name);
+ if (WARN_ON(err))
+ goto err_read_output_name;
+
+ subclk = kzalloc(sizeof(*subclk), GFP_KERNEL);
+ if (!subclk)
+ goto err_subclk_alloc;
+
+ subclk->clk_621 = clk_621;
+ subclk->which = which;
+
+ init.ops = &zynq_cpu_subclk_ops;
+ init.parent_names = &np->name;
+ init.num_parents = 1;
+
+ subclk->hw.init = &init;
+
+ clk = clk_register(NULL, &subclk->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ goto err_clk_register;
+
+ return clk;
+
+err_clk_register:
+ kfree(subclk);
+err_subclk_alloc:
+err_read_output_name:
+ return ERR_PTR(-EINVAL);
+}
+
+static void __init zynq_cpu_clk_setup(struct device_node *np)
+{
+ struct zynq_cpu_clk *cpuclk;
+ const char *parent_names[3];
+ struct clk_init_data init;
+ void __iomem *clk_621;
+ struct clk *clk;
+ u32 reg[2];
+ int err;
+ int i;
+
+ err = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
+ if (WARN_ON(err))
+ return;
+
+ cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
+ if (WARN_ON(!cpuclk))
+ return;
+
+ cpuclk->clk_ctrl = slcr_base + reg[0];
+ clk_621 = slcr_base + reg[1];
+ spin_lock_init(&cpuclk->clkact_lock);
+
+ init.name = np->name;
+ init.ops = &zynq_cpu_clk_ops;
+ for (i = 0; i < ARRAY_SIZE(parent_names); i++)
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ init.parent_names = parent_names;
+ init.num_parents = ARRAY_SIZE(parent_names);
+
+ cpuclk->hw.init = &init;
+
+ clk = clk_register(NULL, &cpuclk->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
+ err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (WARN_ON(err))
+ return;
+
+ for (i = 0; i < 4; i++) {
+ cpuclk->subclks[i] = zynq_cpu_subclk_setup(np, i, clk_621);
+ if (WARN_ON(IS_ERR(cpuclk->subclks[i])))
+ return;
+ }
+
+ cpuclk->onecell_data.clks = cpuclk->subclks;
+ cpuclk->onecell_data.clk_num = i;
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get,
+ &cpuclk->onecell_data);
+ if (WARN_ON(err))
+ return;
+}
+
+static const __initconst struct of_device_id zynq_clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ { .compatible = "xlnx,zynq-pll", .data = zynq_pll_clk_setup, },
+ { .compatible = "xlnx,zynq-periph-clock",
+ .data = zynq_periph_clk_setup, },
+ { .compatible = "xlnx,zynq-cpu-clock", .data = zynq_cpu_clk_setup, },
+ {}
+};
+
+void __init xilinx_zynq_clocks_init(void __iomem *slcr)
+{
+ slcr_base = slcr;
+ of_clk_init(zynq_clk_match);
+}
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
new file mode 100644
index 00000000000..57323fd15ec
--- /dev/null
+++ b/drivers/clk/mvebu/Kconfig
@@ -0,0 +1,8 @@
+config MVEBU_CLK_CORE
+ bool
+
+config MVEBU_CLK_CPU
+ bool
+
+config MVEBU_CLK_GATING
+ bool
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
new file mode 100644
index 00000000000..58df3dc4936
--- /dev/null
+++ b/drivers/clk/mvebu/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MVEBU_CLK_CORE) += clk.o clk-core.o
+obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
+obj-$(CONFIG_MVEBU_CLK_GATING) += clk-gating-ctrl.o
diff --git a/drivers/clk/mvebu/clk-core.c b/drivers/clk/mvebu/clk-core.c
new file mode 100644
index 00000000000..69056a7479e
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.c
@@ -0,0 +1,675 @@
+/*
+ * Marvell EBU clock core handling defined at reset
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include "clk-core.h"
+
+struct core_ratio {
+ int id;
+ const char *name;
+};
+
+struct core_clocks {
+ u32 (*get_tclk_freq)(void __iomem *sar);
+ u32 (*get_cpu_freq)(void __iomem *sar);
+ void (*get_clk_ratio)(void __iomem *sar, int id, int *mult, int *div);
+ const struct core_ratio *ratios;
+ int num_ratios;
+};
+
+static struct clk_onecell_data clk_data;
+
+static void __init mvebu_clk_core_setup(struct device_node *np,
+ struct core_clocks *coreclk)
+{
+ const char *tclk_name = "tclk";
+ const char *cpuclk_name = "cpuclk";
+ void __iomem *base;
+ unsigned long rate;
+ int n;
+
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return;
+
+ /*
+ * Allocate struct for TCLK, cpu clk, and core ratio clocks
+ */
+ clk_data.clk_num = 2 + coreclk->num_ratios;
+ clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!clk_data.clks))
+ return;
+
+ /*
+ * Register TCLK
+ */
+ of_property_read_string_index(np, "clock-output-names", 0,
+ &tclk_name);
+ rate = coreclk->get_tclk_freq(base);
+ clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
+ CLK_IS_ROOT, rate);
+ WARN_ON(IS_ERR(clk_data.clks[0]));
+
+ /*
+ * Register CPU clock
+ */
+ of_property_read_string_index(np, "clock-output-names", 1,
+ &cpuclk_name);
+ rate = coreclk->get_cpu_freq(base);
+ clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
+ CLK_IS_ROOT, rate);
+ WARN_ON(IS_ERR(clk_data.clks[1]));
+
+ /*
+ * Register fixed-factor clocks derived from CPU clock
+ */
+ for (n = 0; n < coreclk->num_ratios; n++) {
+ const char *rclk_name = coreclk->ratios[n].name;
+ int mult, div;
+
+ of_property_read_string_index(np, "clock-output-names",
+ 2+n, &rclk_name);
+ coreclk->get_clk_ratio(base, coreclk->ratios[n].id,
+ &mult, &div);
+ clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
+ cpuclk_name, 0, mult, div);
+ WARN_ON(IS_ERR(clk_data.clks[2+n]));
+ };
+
+ /*
+ * SAR register isn't needed anymore
+ */
+ iounmap(base);
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+#ifdef CONFIG_MACH_ARMADA_370_XP
+/*
+ * Armada 370/XP Sample At Reset is a 64 bit bitfiled split in two
+ * register of 32 bits
+ */
+
+#define SARL 0 /* Low part [0:31] */
+#define SARL_AXP_PCLK_FREQ_OPT 21
+#define SARL_AXP_PCLK_FREQ_OPT_MASK 0x7
+#define SARL_A370_PCLK_FREQ_OPT 11
+#define SARL_A370_PCLK_FREQ_OPT_MASK 0xF
+#define SARL_AXP_FAB_FREQ_OPT 24
+#define SARL_AXP_FAB_FREQ_OPT_MASK 0xF
+#define SARL_A370_FAB_FREQ_OPT 15
+#define SARL_A370_FAB_FREQ_OPT_MASK 0x1F
+#define SARL_A370_TCLK_FREQ_OPT 20
+#define SARL_A370_TCLK_FREQ_OPT_MASK 0x1
+#define SARH 4 /* High part [32:63] */
+#define SARH_AXP_PCLK_FREQ_OPT (52-32)
+#define SARH_AXP_PCLK_FREQ_OPT_MASK 0x1
+#define SARH_AXP_PCLK_FREQ_OPT_SHIFT 3
+#define SARH_AXP_FAB_FREQ_OPT (51-32)
+#define SARH_AXP_FAB_FREQ_OPT_MASK 0x1
+#define SARH_AXP_FAB_FREQ_OPT_SHIFT 4
+
+static const u32 __initconst armada_370_tclk_frequencies[] = {
+ 16600000,
+ 20000000,
+};
+
+static u32 __init armada_370_get_tclk_freq(void __iomem *sar)
+{
+ u8 tclk_freq_select = 0;
+
+ tclk_freq_select = ((readl(sar) >> SARL_A370_TCLK_FREQ_OPT) &
+ SARL_A370_TCLK_FREQ_OPT_MASK);
+ return armada_370_tclk_frequencies[tclk_freq_select];
+}
+
+static const u32 __initconst armada_370_cpu_frequencies[] = {
+ 400000000,
+ 533000000,
+ 667000000,
+ 800000000,
+ 1000000000,
+ 1067000000,
+ 1200000000,
+};
+
+static u32 __init armada_370_get_cpu_freq(void __iomem *sar)
+{
+ u32 cpu_freq;
+ u8 cpu_freq_select = 0;
+
+ cpu_freq_select = ((readl(sar) >> SARL_A370_PCLK_FREQ_OPT) &
+ SARL_A370_PCLK_FREQ_OPT_MASK);
+ if (cpu_freq_select > ARRAY_SIZE(armada_370_cpu_frequencies)) {
+ pr_err("CPU freq select unsuported %d\n", cpu_freq_select);
+ cpu_freq = 0;
+ } else
+ cpu_freq = armada_370_cpu_frequencies[cpu_freq_select];
+
+ return cpu_freq;
+}
+
+enum { A370_XP_NBCLK, A370_XP_HCLK, A370_XP_DRAMCLK };
+
+static const struct core_ratio __initconst armada_370_xp_core_ratios[] = {
+ { .id = A370_XP_NBCLK, .name = "nbclk" },
+ { .id = A370_XP_HCLK, .name = "hclk" },
+ { .id = A370_XP_DRAMCLK, .name = "dramclk" },
+};
+
+static const int __initconst armada_370_xp_nbclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 2}, {2, 2},
+ {1, 2}, {1, 2}, {1, 1}, {2, 3},
+ {0, 1}, {1, 2}, {2, 4}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {2, 2},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {2, 3}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_370_xp_hclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 6}, {2, 3},
+ {1, 3}, {1, 4}, {1, 2}, {2, 6},
+ {0, 1}, {1, 6}, {2, 10}, {0, 1},
+ {1, 4}, {0, 1}, {0, 1}, {2, 5},
+ {0, 1}, {0, 1}, {0, 1}, {1, 2},
+ {2, 6}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_370_xp_dramclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 3}, {2, 3},
+ {1, 3}, {1, 2}, {1, 2}, {2, 6},
+ {0, 1}, {1, 3}, {2, 5}, {0, 1},
+ {1, 4}, {0, 1}, {0, 1}, {2, 5},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {2, 3}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static void __init armada_370_xp_get_clk_ratio(u32 opt,
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case A370_XP_NBCLK:
+ *mult = armada_370_xp_nbclk_ratios[opt][0];
+ *div = armada_370_xp_nbclk_ratios[opt][1];
+ break;
+ case A370_XP_HCLK:
+ *mult = armada_370_xp_hclk_ratios[opt][0];
+ *div = armada_370_xp_hclk_ratios[opt][1];
+ break;
+ case A370_XP_DRAMCLK:
+ *mult = armada_370_xp_dramclk_ratios[opt][0];
+ *div = armada_370_xp_dramclk_ratios[opt][1];
+ break;
+ }
+}
+
+static void __init armada_370_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ u32 opt = ((readl(sar) >> SARL_A370_FAB_FREQ_OPT) &
+ SARL_A370_FAB_FREQ_OPT_MASK);
+
+ armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
+}
+
+
+static const struct core_clocks armada_370_core_clocks = {
+ .get_tclk_freq = armada_370_get_tclk_freq,
+ .get_cpu_freq = armada_370_get_cpu_freq,
+ .get_clk_ratio = armada_370_get_clk_ratio,
+ .ratios = armada_370_xp_core_ratios,
+ .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
+};
+
+static const u32 __initconst armada_xp_cpu_frequencies[] = {
+ 1000000000,
+ 1066000000,
+ 1200000000,
+ 1333000000,
+ 1500000000,
+ 1666000000,
+ 1800000000,
+ 2000000000,
+ 667000000,
+ 0,
+ 800000000,
+ 1600000000,
+};
+
+/* For Armada XP TCLK frequency is fix: 250MHz */
+static u32 __init armada_xp_get_tclk_freq(void __iomem *sar)
+{
+ return 250 * 1000 * 1000;
+}
+
+static u32 __init armada_xp_get_cpu_freq(void __iomem *sar)
+{
+ u32 cpu_freq;
+ u8 cpu_freq_select = 0;
+
+ cpu_freq_select = ((readl(sar) >> SARL_AXP_PCLK_FREQ_OPT) &
+ SARL_AXP_PCLK_FREQ_OPT_MASK);
+ /*
+ * The upper bit is not contiguous to the other ones and
+ * located in the high part of the SAR registers
+ */
+ cpu_freq_select |= (((readl(sar+4) >> SARH_AXP_PCLK_FREQ_OPT) &
+ SARH_AXP_PCLK_FREQ_OPT_MASK)
+ << SARH_AXP_PCLK_FREQ_OPT_SHIFT);
+ if (cpu_freq_select > ARRAY_SIZE(armada_xp_cpu_frequencies)) {
+ pr_err("CPU freq select unsuported: %d\n", cpu_freq_select);
+ cpu_freq = 0;
+ } else
+ cpu_freq = armada_xp_cpu_frequencies[cpu_freq_select];
+
+ return cpu_freq;
+}
+
+static void __init armada_xp_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+
+ u32 opt = ((readl(sar) >> SARL_AXP_FAB_FREQ_OPT) &
+ SARL_AXP_FAB_FREQ_OPT_MASK);
+ /*
+ * The upper bit is not contiguous to the other ones and
+ * located in the high part of the SAR registers
+ */
+ opt |= (((readl(sar+4) >> SARH_AXP_FAB_FREQ_OPT) &
+ SARH_AXP_FAB_FREQ_OPT_MASK)
+ << SARH_AXP_FAB_FREQ_OPT_SHIFT);
+
+ armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
+}
+
+static const struct core_clocks armada_xp_core_clocks = {
+ .get_tclk_freq = armada_xp_get_tclk_freq,
+ .get_cpu_freq = armada_xp_get_cpu_freq,
+ .get_clk_ratio = armada_xp_get_clk_ratio,
+ .ratios = armada_370_xp_core_ratios,
+ .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
+};
+
+#endif /* CONFIG_MACH_ARMADA_370_XP */
+
+/*
+ * Dove PLL sample-at-reset configuration
+ *
+ * SAR0[8:5] : CPU frequency
+ * 5 = 1000 MHz
+ * 6 = 933 MHz
+ * 7 = 933 MHz
+ * 8 = 800 MHz
+ * 9 = 800 MHz
+ * 10 = 800 MHz
+ * 11 = 1067 MHz
+ * 12 = 667 MHz
+ * 13 = 533 MHz
+ * 14 = 400 MHz
+ * 15 = 333 MHz
+ * others reserved.
+ *
+ * SAR0[11:9] : CPU to L2 Clock divider ratio
+ * 0 = (1/1) * CPU
+ * 2 = (1/2) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * others reserved.
+ *
+ * SAR0[15:12] : CPU to DDR DRAM Clock divider ratio
+ * 0 = (1/1) * CPU
+ * 2 = (1/2) * CPU
+ * 3 = (2/5) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * 8 = (1/5) * CPU
+ * 10 = (1/6) * CPU
+ * 12 = (1/7) * CPU
+ * 14 = (1/8) * CPU
+ * 15 = (1/10) * CPU
+ * others reserved.
+ *
+ * SAR0[24:23] : TCLK frequency
+ * 0 = 166 MHz
+ * 1 = 125 MHz
+ * others reserved.
+ */
+#ifdef CONFIG_ARCH_DOVE
+#define SAR_DOVE_CPU_FREQ 5
+#define SAR_DOVE_CPU_FREQ_MASK 0xf
+#define SAR_DOVE_L2_RATIO 9
+#define SAR_DOVE_L2_RATIO_MASK 0x7
+#define SAR_DOVE_DDR_RATIO 12
+#define SAR_DOVE_DDR_RATIO_MASK 0xf
+#define SAR_DOVE_TCLK_FREQ 23
+#define SAR_DOVE_TCLK_FREQ_MASK 0x3
+
+static const u32 __initconst dove_tclk_frequencies[] = {
+ 166666667,
+ 125000000,
+ 0, 0
+};
+
+static u32 __init dove_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_DOVE_TCLK_FREQ) &
+ SAR_DOVE_TCLK_FREQ_MASK;
+ return dove_tclk_frequencies[opt];
+}
+
+static const u32 __initconst dove_cpu_frequencies[] = {
+ 0, 0, 0, 0, 0,
+ 1000000000,
+ 933333333, 933333333,
+ 800000000, 800000000, 800000000,
+ 1066666667,
+ 666666667,
+ 533333333,
+ 400000000,
+ 333333333
+};
+
+static u32 __init dove_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_DOVE_CPU_FREQ) &
+ SAR_DOVE_CPU_FREQ_MASK;
+ return dove_cpu_frequencies[opt];
+}
+
+enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR };
+
+static const struct core_ratio __initconst dove_core_ratios[] = {
+ { .id = DOVE_CPU_TO_L2, .name = "l2clk", },
+ { .id = DOVE_CPU_TO_DDR, .name = "ddrclk", }
+};
+
+static const int __initconst dove_cpu_l2_ratios[8][2] = {
+ { 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }
+};
+
+static const int __initconst dove_cpu_ddr_ratios[16][2] = {
+ { 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 },
+ { 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 },
+ { 1, 7 }, { 0, 1 }, { 1, 8 }, { 1, 10 }
+};
+
+static void __init dove_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case DOVE_CPU_TO_L2:
+ {
+ u32 opt = (readl(sar) >> SAR_DOVE_L2_RATIO) &
+ SAR_DOVE_L2_RATIO_MASK;
+ *mult = dove_cpu_l2_ratios[opt][0];
+ *div = dove_cpu_l2_ratios[opt][1];
+ break;
+ }
+ case DOVE_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_DOVE_DDR_RATIO) &
+ SAR_DOVE_DDR_RATIO_MASK;
+ *mult = dove_cpu_ddr_ratios[opt][0];
+ *div = dove_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks dove_core_clocks = {
+ .get_tclk_freq = dove_get_tclk_freq,
+ .get_cpu_freq = dove_get_cpu_freq,
+ .get_clk_ratio = dove_get_clk_ratio,
+ .ratios = dove_core_ratios,
+ .num_ratios = ARRAY_SIZE(dove_core_ratios),
+};
+#endif /* CONFIG_ARCH_DOVE */
+
+/*
+ * Kirkwood PLL sample-at-reset configuration
+ * (6180 has different SAR layout than other Kirkwood SoCs)
+ *
+ * SAR0[4:3,22,1] : CPU frequency (6281,6292,6282)
+ * 4 = 600 MHz
+ * 6 = 800 MHz
+ * 7 = 1000 MHz
+ * 9 = 1200 MHz
+ * 12 = 1500 MHz
+ * 13 = 1600 MHz
+ * 14 = 1800 MHz
+ * 15 = 2000 MHz
+ * others reserved.
+ *
+ * SAR0[19,10:9] : CPU to L2 Clock divider ratio (6281,6292,6282)
+ * 1 = (1/2) * CPU
+ * 3 = (1/3) * CPU
+ * 5 = (1/4) * CPU
+ * others reserved.
+ *
+ * SAR0[8:5] : CPU to DDR DRAM Clock divider ratio (6281,6292,6282)
+ * 2 = (1/2) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * 7 = (2/9) * CPU
+ * 8 = (1/5) * CPU
+ * 9 = (1/6) * CPU
+ * others reserved.
+ *
+ * SAR0[4:2] : Kirkwood 6180 cpu/l2/ddr clock configuration (6180 only)
+ * 5 = [CPU = 600 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/3) * CPU]
+ * 6 = [CPU = 800 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/4) * CPU]
+ * 7 = [CPU = 1000 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/5) * CPU]
+ * others reserved.
+ *
+ * SAR0[21] : TCLK frequency
+ * 0 = 200 MHz
+ * 1 = 166 MHz
+ * others reserved.
+ */
+#ifdef CONFIG_ARCH_KIRKWOOD
+#define SAR_KIRKWOOD_CPU_FREQ(x) \
+ (((x & (1 << 1)) >> 1) | \
+ ((x & (1 << 22)) >> 21) | \
+ ((x & (3 << 3)) >> 1))
+#define SAR_KIRKWOOD_L2_RATIO(x) \
+ (((x & (3 << 9)) >> 9) | \
+ (((x & (1 << 19)) >> 17)))
+#define SAR_KIRKWOOD_DDR_RATIO 5
+#define SAR_KIRKWOOD_DDR_RATIO_MASK 0xf
+#define SAR_MV88F6180_CLK 2
+#define SAR_MV88F6180_CLK_MASK 0x7
+#define SAR_KIRKWOOD_TCLK_FREQ 21
+#define SAR_KIRKWOOD_TCLK_FREQ_MASK 0x1
+
+enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR };
+
+static const struct core_ratio __initconst kirkwood_core_ratios[] = {
+ { .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", },
+ { .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", }
+};
+
+static u32 __init kirkwood_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_KIRKWOOD_TCLK_FREQ) &
+ SAR_KIRKWOOD_TCLK_FREQ_MASK;
+ return (opt) ? 166666667 : 200000000;
+}
+
+static const u32 __initconst kirkwood_cpu_frequencies[] = {
+ 0, 0, 0, 0,
+ 600000000,
+ 0,
+ 800000000,
+ 1000000000,
+ 0,
+ 1200000000,
+ 0, 0,
+ 1500000000,
+ 1600000000,
+ 1800000000,
+ 2000000000
+};
+
+static u32 __init kirkwood_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = SAR_KIRKWOOD_CPU_FREQ(readl(sar));
+ return kirkwood_cpu_frequencies[opt];
+}
+
+static const int __initconst kirkwood_cpu_l2_ratios[8][2] = {
+ { 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 },
+ { 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 }
+};
+
+static const int __initconst kirkwood_cpu_ddr_ratios[16][2] = {
+ { 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 },
+ { 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }
+};
+
+static void __init kirkwood_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case KIRKWOOD_CPU_TO_L2:
+ {
+ u32 opt = SAR_KIRKWOOD_L2_RATIO(readl(sar));
+ *mult = kirkwood_cpu_l2_ratios[opt][0];
+ *div = kirkwood_cpu_l2_ratios[opt][1];
+ break;
+ }
+ case KIRKWOOD_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_KIRKWOOD_DDR_RATIO) &
+ SAR_KIRKWOOD_DDR_RATIO_MASK;
+ *mult = kirkwood_cpu_ddr_ratios[opt][0];
+ *div = kirkwood_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks kirkwood_core_clocks = {
+ .get_tclk_freq = kirkwood_get_tclk_freq,
+ .get_cpu_freq = kirkwood_get_cpu_freq,
+ .get_clk_ratio = kirkwood_get_clk_ratio,
+ .ratios = kirkwood_core_ratios,
+ .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
+};
+
+static const u32 __initconst mv88f6180_cpu_frequencies[] = {
+ 0, 0, 0, 0, 0,
+ 600000000,
+ 800000000,
+ 1000000000
+};
+
+static u32 __init mv88f6180_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) & SAR_MV88F6180_CLK_MASK;
+ return mv88f6180_cpu_frequencies[opt];
+}
+
+static const int __initconst mv88f6180_cpu_ddr_ratios[8][2] = {
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
+ { 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 }
+};
+
+static void __init mv88f6180_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case KIRKWOOD_CPU_TO_L2:
+ {
+ /* mv88f6180 has a fixed 1:2 CPU-to-L2 ratio */
+ *mult = 1;
+ *div = 2;
+ break;
+ }
+ case KIRKWOOD_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) &
+ SAR_MV88F6180_CLK_MASK;
+ *mult = mv88f6180_cpu_ddr_ratios[opt][0];
+ *div = mv88f6180_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks mv88f6180_core_clocks = {
+ .get_tclk_freq = kirkwood_get_tclk_freq,
+ .get_cpu_freq = mv88f6180_get_cpu_freq,
+ .get_clk_ratio = mv88f6180_get_clk_ratio,
+ .ratios = kirkwood_core_ratios,
+ .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
+};
+#endif /* CONFIG_ARCH_KIRKWOOD */
+
+static const __initdata struct of_device_id clk_core_match[] = {
+#ifdef CONFIG_MACH_ARMADA_370_XP
+ {
+ .compatible = "marvell,armada-370-core-clock",
+ .data = &armada_370_core_clocks,
+ },
+ {
+ .compatible = "marvell,armada-xp-core-clock",
+ .data = &armada_xp_core_clocks,
+ },
+#endif
+#ifdef CONFIG_ARCH_DOVE
+ {
+ .compatible = "marvell,dove-core-clock",
+ .data = &dove_core_clocks,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+ {
+ .compatible = "marvell,kirkwood-core-clock",
+ .data = &kirkwood_core_clocks,
+ },
+ {
+ .compatible = "marvell,mv88f6180-core-clock",
+ .data = &mv88f6180_core_clocks,
+ },
+#endif
+
+ { }
+};
+
+void __init mvebu_core_clk_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, clk_core_match) {
+ const struct of_device_id *match =
+ of_match_node(clk_core_match, np);
+ mvebu_clk_core_setup(np, (struct core_clocks *)match->data);
+ }
+}
diff --git a/drivers/clk/mvebu/clk-core.h b/drivers/clk/mvebu/clk-core.h
new file mode 100644
index 00000000000..28b5e02e988
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.h
@@ -0,0 +1,18 @@
+/*
+ * * Marvell EBU clock core handling defined at reset
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_CORE_H
+#define __MVEBU_CLK_CORE_H
+
+void __init mvebu_core_clk_init(void);
+
+#endif
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
new file mode 100644
index 00000000000..ff004578a11
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -0,0 +1,186 @@
+/*
+ * Marvell MVEBU CPU clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include "clk-cpu.h"
+
+#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
+#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
+#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
+
+#define MAX_CPU 4
+struct cpu_clk {
+ struct clk_hw hw;
+ int cpu;
+ const char *clk_name;
+ const char *parent_name;
+ void __iomem *reg_base;
+};
+
+static struct clk **clks;
+
+static struct clk_onecell_data clk_data;
+
+#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
+
+static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+ u32 reg, div;
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
+ div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
+ return parent_rate / div;
+}
+
+static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /* Valid ratio are 1:1, 1:2 and 1:3 */
+ u32 div;
+
+ div = *parent_rate / rate;
+ if (div == 0)
+ div = 1;
+ else if (div > 3)
+ div = 3;
+
+ return *parent_rate / div;
+}
+
+static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+ u32 reg, div;
+ u32 reload_mask;
+
+ div = parent_rate / rate;
+ reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
+ & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
+ | (div << (cpuclk->cpu * 8));
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
+ /* Set clock divider reload smooth bit mask */
+ reload_mask = 1 << (20 + cpuclk->cpu);
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ | reload_mask;
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+
+ /* Now trigger the clock update */
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ | 1 << 24;
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+
+ /* Wait for clocks to settle down then clear reload request */
+ udelay(1000);
+ reg &= ~(reload_mask | 1 << 24);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ udelay(1000);
+
+ return 0;
+}
+
+static const struct clk_ops cpu_ops = {
+ .recalc_rate = clk_cpu_recalc_rate,
+ .round_rate = clk_cpu_round_rate,
+ .set_rate = clk_cpu_set_rate,
+};
+
+void __init of_cpu_clk_setup(struct device_node *node)
+{
+ struct cpu_clk *cpuclk;
+ void __iomem *clock_complex_base = of_iomap(node, 0);
+ int ncpus = 0;
+ struct device_node *dn;
+
+ if (clock_complex_base == NULL) {
+ pr_err("%s: clock-complex base register not set\n",
+ __func__);
+ return;
+ }
+
+ for_each_node_by_type(dn, "cpu")
+ ncpus++;
+
+ cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
+ if (WARN_ON(!cpuclk))
+ return;
+
+ clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
+ if (WARN_ON(!clks))
+ return;
+
+ for_each_node_by_type(dn, "cpu") {
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk *parent_clk;
+ char *clk_name = kzalloc(5, GFP_KERNEL);
+ int cpu, err;
+
+ if (WARN_ON(!clk_name))
+ return;
+
+ err = of_property_read_u32(dn, "reg", &cpu);
+ if (WARN_ON(err))
+ return;
+
+ sprintf(clk_name, "cpu%d", cpu);
+ parent_clk = of_clk_get(node, 0);
+
+ cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
+ cpuclk[cpu].clk_name = clk_name;
+ cpuclk[cpu].cpu = cpu;
+ cpuclk[cpu].reg_base = clock_complex_base;
+ cpuclk[cpu].hw.init = &init;
+
+ init.name = cpuclk[cpu].clk_name;
+ init.ops = &cpu_ops;
+ init.flags = 0;
+ init.parent_names = &cpuclk[cpu].parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &cpuclk[cpu].hw);
+ if (WARN_ON(IS_ERR(clk)))
+ goto bail_out;
+ clks[cpu] = clk;
+ }
+ clk_data.clk_num = MAX_CPU;
+ clk_data.clks = clks;
+ of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
+
+ return;
+bail_out:
+ kfree(clks);
+ kfree(cpuclk);
+}
+
+static const __initconst struct of_device_id clk_cpu_match[] = {
+ {
+ .compatible = "marvell,armada-xp-cpu-clock",
+ .data = of_cpu_clk_setup,
+ },
+ {
+ /* sentinel */
+ },
+};
+
+void __init mvebu_cpu_clk_init(void)
+{
+ of_clk_init(clk_cpu_match);
+}
diff --git a/drivers/clk/mvebu/clk-cpu.h b/drivers/clk/mvebu/clk-cpu.h
new file mode 100644
index 00000000000..08e2affba4e
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.h
@@ -0,0 +1,22 @@
+/*
+ * Marvell MVEBU CPU clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_CPU_H
+#define __MVEBU_CLK_CPU_H
+
+#ifdef CONFIG_MVEBU_CLK_CPU
+void __init mvebu_cpu_clk_init(void);
+#else
+static inline void mvebu_cpu_clk_init(void) {}
+#endif
+
+#endif
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.c b/drivers/clk/mvebu/clk-gating-ctrl.c
new file mode 100644
index 00000000000..c6d3c263b07
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.c
@@ -0,0 +1,249 @@
+/*
+ * Marvell MVEBU clock gating control.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+struct mvebu_gating_ctrl {
+ spinlock_t lock;
+ struct clk **gates;
+ int num_gates;
+};
+
+struct mvebu_soc_descr {
+ const char *name;
+ const char *parent;
+ int bit_idx;
+};
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static struct clk __init *mvebu_clk_gating_get_src(
+ struct of_phandle_args *clkspec, void *data)
+{
+ struct mvebu_gating_ctrl *ctrl = (struct mvebu_gating_ctrl *)data;
+ int n;
+
+ if (clkspec->args_count < 1)
+ return ERR_PTR(-EINVAL);
+
+ for (n = 0; n < ctrl->num_gates; n++) {
+ struct clk_gate *gate =
+ to_clk_gate(__clk_get_hw(ctrl->gates[n]));
+ if (clkspec->args[0] == gate->bit_idx)
+ return ctrl->gates[n];
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static void __init mvebu_clk_gating_setup(
+ struct device_node *np, const struct mvebu_soc_descr *descr)
+{
+ struct mvebu_gating_ctrl *ctrl;
+ struct clk *clk;
+ void __iomem *base;
+ const char *default_parent = NULL;
+ int n;
+
+ base = of_iomap(np, 0);
+
+ clk = of_clk_get(np, 0);
+ if (!IS_ERR(clk)) {
+ default_parent = __clk_get_name(clk);
+ clk_put(clk);
+ }
+
+ ctrl = kzalloc(sizeof(struct mvebu_gating_ctrl), GFP_KERNEL);
+ if (WARN_ON(!ctrl))
+ return;
+
+ spin_lock_init(&ctrl->lock);
+
+ /*
+ * Count, allocate, and register clock gates
+ */
+ for (n = 0; descr[n].name;)
+ n++;
+
+ ctrl->num_gates = n;
+ ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!ctrl->gates)) {
+ kfree(ctrl);
+ return;
+ }
+
+ for (n = 0; n < ctrl->num_gates; n++) {
+ u8 flags = 0;
+ const char *parent =
+ (descr[n].parent) ? descr[n].parent : default_parent;
+
+ /*
+ * On Armada 370, the DDR clock is a special case: it
+ * isn't taken by any driver, but should anyway be
+ * kept enabled, so we mark it as IGNORE_UNUSED for
+ * now.
+ */
+ if (!strcmp(descr[n].name, "ddr"))
+ flags |= CLK_IGNORE_UNUSED;
+
+ ctrl->gates[n] = clk_register_gate(NULL, descr[n].name, parent,
+ flags, base, descr[n].bit_idx, 0, &ctrl->lock);
+ WARN_ON(IS_ERR(ctrl->gates[n]));
+ }
+ of_clk_add_provider(np, mvebu_clk_gating_get_src, ctrl);
+}
+
+/*
+ * SoC specific clock gating control
+ */
+
+#ifdef CONFIG_MACH_ARMADA_370
+static const struct mvebu_soc_descr __initconst armada_370_gating_descr[] = {
+ { "audio", NULL, 0 },
+ { "pex0_en", NULL, 1 },
+ { "pex1_en", NULL, 2 },
+ { "ge1", NULL, 3 },
+ { "ge0", NULL, 4 },
+ { "pex0", NULL, 5 },
+ { "pex1", NULL, 9 },
+ { "sata0", NULL, 15 },
+ { "sdio", NULL, 17 },
+ { "tdm", NULL, 25 },
+ { "ddr", NULL, 28 },
+ { "sata1", NULL, 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_XP
+static const struct mvebu_soc_descr __initconst armada_xp_gating_descr[] = {
+ { "audio", NULL, 0 },
+ { "ge3", NULL, 1 },
+ { "ge2", NULL, 2 },
+ { "ge1", NULL, 3 },
+ { "ge0", NULL, 4 },
+ { "pex0", NULL, 5 },
+ { "pex1", NULL, 6 },
+ { "pex2", NULL, 7 },
+ { "pex3", NULL, 8 },
+ { "bp", NULL, 13 },
+ { "sata0lnk", NULL, 14 },
+ { "sata0", "sata0lnk", 15 },
+ { "lcd", NULL, 16 },
+ { "sdio", NULL, 17 },
+ { "usb0", NULL, 18 },
+ { "usb1", NULL, 19 },
+ { "usb2", NULL, 20 },
+ { "xor0", NULL, 22 },
+ { "crypto", NULL, 23 },
+ { "tdm", NULL, 25 },
+ { "xor1", NULL, 28 },
+ { "sata1lnk", NULL, 29 },
+ { "sata1", "sata1lnk", 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_ARCH_DOVE
+static const struct mvebu_soc_descr __initconst dove_gating_descr[] = {
+ { "usb0", NULL, 0 },
+ { "usb1", NULL, 1 },
+ { "ge", "gephy", 2 },
+ { "sata", NULL, 3 },
+ { "pex0", NULL, 4 },
+ { "pex1", NULL, 5 },
+ { "sdio0", NULL, 8 },
+ { "sdio1", NULL, 9 },
+ { "nand", NULL, 10 },
+ { "camera", NULL, 11 },
+ { "i2s0", NULL, 12 },
+ { "i2s1", NULL, 13 },
+ { "crypto", NULL, 15 },
+ { "ac97", NULL, 21 },
+ { "pdma", NULL, 22 },
+ { "xor0", NULL, 23 },
+ { "xor1", NULL, 24 },
+ { "gephy", NULL, 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+static const struct mvebu_soc_descr __initconst kirkwood_gating_descr[] = {
+ { "ge0", NULL, 0 },
+ { "pex0", NULL, 2 },
+ { "usb0", NULL, 3 },
+ { "sdio", NULL, 4 },
+ { "tsu", NULL, 5 },
+ { "runit", NULL, 7 },
+ { "xor0", NULL, 8 },
+ { "audio", NULL, 9 },
+ { "sata0", NULL, 14 },
+ { "sata1", NULL, 15 },
+ { "xor1", NULL, 16 },
+ { "crypto", NULL, 17 },
+ { "pex1", NULL, 18 },
+ { "ge1", NULL, 19 },
+ { "tdm", NULL, 20 },
+ { }
+};
+#endif
+
+static const __initdata struct of_device_id clk_gating_match[] = {
+#ifdef CONFIG_MACH_ARMADA_370
+ {
+ .compatible = "marvell,armada-370-gating-clock",
+ .data = armada_370_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_XP
+ {
+ .compatible = "marvell,armada-xp-gating-clock",
+ .data = armada_xp_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_DOVE
+ {
+ .compatible = "marvell,dove-gating-clock",
+ .data = dove_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+ {
+ .compatible = "marvell,kirkwood-gating-clock",
+ .data = kirkwood_gating_descr,
+ },
+#endif
+
+ { }
+};
+
+void __init mvebu_gating_clk_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, clk_gating_match) {
+ const struct of_device_id *match =
+ of_match_node(clk_gating_match, np);
+ mvebu_clk_gating_setup(np,
+ (const struct mvebu_soc_descr *)match->data);
+ }
+}
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.h b/drivers/clk/mvebu/clk-gating-ctrl.h
new file mode 100644
index 00000000000..9275d1e51f1
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.h
@@ -0,0 +1,22 @@
+/*
+ * Marvell EBU gating clock handling
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_GATING_H
+#define __MVEBU_CLK_GATING_H
+
+#ifdef CONFIG_MVEBU_CLK_GATING
+void __init mvebu_gating_clk_init(void);
+#else
+void mvebu_gating_clk_init(void) {}
+#endif
+
+#endif
diff --git a/drivers/clk/mvebu/clk.c b/drivers/clk/mvebu/clk.c
new file mode 100644
index 00000000000..855681b8a9d
--- /dev/null
+++ b/drivers/clk/mvebu/clk.c
@@ -0,0 +1,27 @@
+/*
+ * Marvell EBU SoC clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include "clk-core.h"
+#include "clk-cpu.h"
+#include "clk-gating-ctrl.h"
+
+void __init mvebu_clocks_init(void)
+{
+ mvebu_core_clk_init();
+ mvebu_gating_clk_init();
+ mvebu_cpu_clk_init();
+}
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 147e25f0040..ed9af427861 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -20,6 +20,7 @@
#include <mach/spear.h>
#include "clk.h"
+#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
/* PLL related registers and bit values */
#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
/* PLL_CFG bit values */
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c
index 7d0e0258f20..6b889a0e90b 100644
--- a/drivers/clk/ux500/u8500_clk.c
+++ b/drivers/clk/ux500/u8500_clk.c
@@ -12,7 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/clk-ux500.h>
-
+#include <mach/db8500-regs.h>
#include "clk.h"
void u8500_clk_init(void)
@@ -160,12 +160,6 @@ void u8500_clk_init(void)
clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
clk_register_clkdev(clk, NULL, "uicc");
- /*
- * FIXME: The MTU clocks might need some kind of "parent muxed join"
- * and these have no K-clocks. For now, we ignore the missing
- * connection to the corresponding P-clocks, p6_mtu0_clk and
- * p6_mtu1_clk. Instead timclk is used which is the valid parent.
- */
clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
clk_register_clkdev(clk, NULL, "mtu0");
clk_register_clkdev(clk, NULL, "mtu1");
@@ -405,8 +399,11 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", U8500_CLKRST6_BASE,
BIT(6), 0);
+ clk_register_clkdev(clk, "apb_pclk", "mtu0");
+
clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", U8500_CLKRST6_BASE,
BIT(7), 0);
+ clk_register_clkdev(clk, "apb_pclk", "mtu1");
/* PRCC K-clocks
*
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a0985732f1e..7fdcbd3f4da 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -25,6 +25,21 @@ config ARMADA_370_XP_TIMER
config SUNXI_TIMER
bool
+config CLKSRC_NOMADIK_MTU
+ bool
+ depends on (ARCH_NOMADIK || ARCH_U8500)
+ select CLKSRC_MMIO
+ help
+ Support for Multi Timer Unit. MTU provides access
+ to multiple interrupt generating programmable
+ 32-bit free running decrementing counters.
+
+config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
+ bool
+ depends on CLKSRC_NOMADIK_MTU
+ help
+ Use the Multi Timer Unit as the sched_clock.
+
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB8500
@@ -34,7 +49,7 @@ config CLKSRC_DBX500_PRCMU
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
- depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
+ depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
default y
help
Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 36f06de4c5a..f93453d0167 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
+obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
diff --git a/arch/arm/plat-nomadik/timer.c b/drivers/clocksource/nomadik-mtu.c
index 9222e5522a4..8914c3c1c88 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/arm/plat-nomadik/timer.c
- *
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
* Copyright (C) 2010 Linus Walleij for ST-Ericsson
@@ -14,9 +12,11 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/clockchips.h>
+#include <linux/clocksource.h>
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/err.h>
+#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <asm/mach/time.h>
#include <asm/sched_clock.h>
@@ -174,12 +174,18 @@ void nmdk_clksrc_reset(void)
mtu_base + MTU_CR(0));
}
-void __init nmdk_timer_init(void __iomem *base)
+void __init nmdk_timer_init(void __iomem *base, int irq)
{
unsigned long rate;
- struct clk *clk0;
+ struct clk *clk0, *pclk0;
mtu_base = base;
+
+ pclk0 = clk_get_sys("mtu0", "apb_pclk");
+ BUG_ON(IS_ERR(pclk0));
+ BUG_ON(clk_prepare(pclk0) < 0);
+ BUG_ON(clk_enable(pclk0) < 0);
+
clk0 = clk_get_sys("mtu0", NULL);
BUG_ON(IS_ERR(clk0));
BUG_ON(clk_prepare(clk0) < 0);
@@ -201,7 +207,8 @@ void __init nmdk_timer_init(void __iomem *base)
clk_prescale = MTU_CRn_PRESCALE_1;
}
- nmdk_cycle = (rate + HZ/2) / HZ;
+ /* Cycles for periodic mode */
+ nmdk_cycle = DIV_ROUND_CLOSEST(rate, HZ);
/* Timer 0 is the free running clocksource */
@@ -217,7 +224,7 @@ void __init nmdk_timer_init(void __iomem *base)
#endif
/* Timer 1 is used for events, register irq and clockevents */
- setup_irq(IRQ_MTU0, &nmdk_timer_irq);
+ setup_irq(irq, &nmdk_timer_irq);
nmdk_clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
}
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 4674f94957c..a4605fd7e30 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -167,7 +168,6 @@ void __init armada_370_xp_timer_init(void)
u32 u;
struct device_node *np;
unsigned int timer_clk;
- int ret;
np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
@@ -179,13 +179,14 @@ void __init armada_370_xp_timer_init(void)
timer_base + TIMER_CTRL_OFF);
timer_clk = 25000000;
} else {
- u32 clk = 0;
- ret = of_property_read_u32(np, "clock-frequency", &clk);
- WARN_ON(!clk || ret < 0);
+ unsigned long rate = 0;
+ struct clk *clk = of_clk_get(np, 0);
+ WARN_ON(IS_ERR(clk));
+ rate = clk_get_rate(clk);
u = readl(timer_base + TIMER_CTRL_OFF);
writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
timer_base + TIMER_CTRL_OFF);
- timer_clk = clk / TIMER_DIVIDER;
+ timer_clk = rate / TIMER_DIVIDER;
}
/* We use timer 0 as clocksource, and timer 1 for
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f6644f59fd9..87ec4d027c2 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -254,6 +254,7 @@ config CRYPTO_DEV_OMAP_AES
tristate "Support for OMAP AES hw engine"
depends on ARCH_OMAP2 || ARCH_OMAP3
select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 0ce62573867..6c4c000671c 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/vio.h>
#include "nx_csbcpb.h" /* struct nx_csbcpb */
@@ -1014,26 +1013,23 @@ error_out:
* NOTIFY_BAD encoded with error number on failure, use
* notifier_to_errno() to decode this value
*/
-static int nx842_OF_notifier(struct notifier_block *np,
- unsigned long action,
- void *update)
+static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
+ void *update)
{
- struct pSeries_reconfig_prop_update *upd;
+ struct of_prop_reconfig *upd = update;
struct nx842_devdata *local_devdata;
struct device_node *node = NULL;
- upd = (struct pSeries_reconfig_prop_update *)update;
-
rcu_read_lock();
local_devdata = rcu_dereference(devdata);
if (local_devdata)
node = local_devdata->dev->of_node;
if (local_devdata &&
- action == PSERIES_UPDATE_PROPERTY &&
- !strcmp(upd->node->name, node->name)) {
+ action == OF_RECONFIG_UPDATE_PROPERTY &&
+ !strcmp(upd->dn->name, node->name)) {
rcu_read_unlock();
- nx842_OF_upd(upd->property);
+ nx842_OF_upd(upd->prop);
} else
rcu_read_unlock();
@@ -1182,7 +1178,7 @@ static int __init nx842_probe(struct vio_dev *viodev,
synchronize_rcu();
kfree(old_devdata);
- pSeries_reconfig_notifier_register(&nx842_of_nb);
+ of_reconfig_notifier_register(&nx842_of_nb);
ret = nx842_OF_upd(NULL);
if (ret && ret != -ENODEV) {
@@ -1228,7 +1224,7 @@ static int __exit nx842_remove(struct vio_dev *viodev)
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
- pSeries_reconfig_notifier_unregister(&nx842_of_nb);
+ of_reconfig_notifier_unregister(&nx842_of_nb);
rcu_assign_pointer(devdata, NULL);
spin_unlock_irqrestore(&devdata_mutex, flags);
synchronize_rcu();
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 638110efae9..f7a8a16aa7d 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -33,7 +33,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 649a146e138..e66e8ee5a9a 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -29,7 +29,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
/* OMAP TRM gives bitfields as start:end, where start is the higher bit
number. For example 7:0 */
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d76fe06b941..1d75e6f95a5 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -37,7 +37,7 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/irqs.h>
#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 410a03c01ca..c983f869d2b 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1863,6 +1863,7 @@ static int __devexit spacc_remove(struct platform_device *pdev)
static const struct platform_device_id spacc_id_table[] = {
{ "picochip,spacc-ipsec", },
{ "picochip,spacc-l2", },
+ { }
};
static struct platform_driver spacc_driver = {
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index a22714412cd..49ad8cbade6 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -30,7 +30,7 @@
#include <crypto/ctr.h>
#include <plat/cpu.h>
-#include <plat/dma.h>
+#include <mach/dma.h>
#define _SBF(s, v) ((v) << (s))
#define _BIT(b) _SBF(b, 1)
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index da1112765a4..09b184adf31 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -936,8 +936,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
sg_count--;
link_tbl_ptr--;
}
- link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
- + cryptlen);
+ be16_add_cpu(&link_tbl_ptr->len, cryptlen);
/* tag end of link table */
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index e69f3bc473b..eb32fd8cad1 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -672,8 +672,10 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
mutex_lock(&aes_lock);
ret = clk_prepare_enable(dd->aes_clk);
- if (ret)
+ if (ret) {
+ mutex_unlock(&aes_lock);
return ret;
+ }
ctx->dd = dd;
dd->ctx = ctx;
@@ -757,8 +759,10 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
ret = clk_prepare_enable(dd->aes_clk);
- if (ret)
+ if (ret) {
+ mutex_unlock(&aes_lock);
return ret;
+ }
aes_set_key(dd);
@@ -1029,7 +1033,7 @@ out:
if (dd->buf_out)
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- if (IS_ERR(dd->aes_clk))
+ if (!IS_ERR(dd->aes_clk))
clk_put(dd->aes_clk);
if (aes_wq)
destroy_workqueue(aes_wq);
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index bc615cc5626..8bc5fef07e7 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/semaphore.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
@@ -30,8 +31,6 @@
#include <crypto/des.h>
#include <crypto/scatterwalk.h>
-#include <plat/ste_dma40.h>
-
#include <linux/platform_data/crypto-ux500.h>
#include <mach/hardware.h>
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 24225f0fdcd..64b048d7fba 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -228,6 +228,20 @@ static void dmatest_callback(void *arg)
wake_up_all(done->wait);
}
+static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
+ unsigned int count)
+{
+ while (count--)
+ dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
+}
+
+static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
+ unsigned int count)
+{
+ while (count--)
+ dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -353,15 +367,35 @@ static int dmatest_func(void *data)
dma_srcs[i] = dma_map_single(dev->dev, buf, len,
DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev->dev, dma_srcs[i]);
+ if (ret) {
+ unmap_src(dev->dev, dma_srcs, len, i);
+ pr_warn("%s: #%u: mapping error %d with "
+ "src_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, ret,
+ src_off, len);
+ failed_tests++;
+ continue;
+ }
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
test_buf_size,
DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev->dev, dma_dsts[i]);
+ if (ret) {
+ unmap_src(dev->dev, dma_srcs, len, src_cnt);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
+ pr_warn("%s: #%u: mapping error %d with "
+ "dst_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, ret,
+ dst_off, test_buf_size);
+ failed_tests++;
+ continue;
+ }
}
-
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dma_dsts[0] + dst_off,
@@ -383,13 +417,8 @@ static int dmatest_func(void *data)
}
if (!tx) {
- for (i = 0; i < src_cnt; i++)
- dma_unmap_single(dev->dev, dma_srcs[i], len,
- DMA_TO_DEVICE);
- for (i = 0; i < dst_cnt; i++)
- dma_unmap_single(dev->dev, dma_dsts[i],
- test_buf_size,
- DMA_BIDIRECTIONAL);
+ unmap_src(dev->dev, dma_srcs, len, src_cnt);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
@@ -443,9 +472,7 @@ static int dmatest_func(void *data)
}
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- for (i = 0; i < dst_cnt; i++)
- dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
- DMA_BIDIRECTIONAL);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
error_count = 0;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d12ad00da4c..ac71f555dd7 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,9 @@
#include <linux/platform_device.h>
#include <linux/memory.h>
#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
#include <linux/platform_data/dma-mv_xor.h>
#include "dmaengine.h"
@@ -34,14 +37,14 @@
static void mv_xor_issue_pending(struct dma_chan *chan);
#define to_mv_xor_chan(chan) \
- container_of(chan, struct mv_xor_chan, common)
-
-#define to_mv_xor_device(dev) \
- container_of(dev, struct mv_xor_device, common)
+ container_of(chan, struct mv_xor_chan, dmachan)
#define to_mv_xor_slot(tx) \
container_of(tx, struct mv_xor_desc_slot, async_tx)
+#define mv_chan_to_devp(chan) \
+ ((chan)->dmadev.dev)
+
static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -166,7 +169,7 @@ static int mv_is_err_intr(u32 intr_cause)
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
- dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
+ dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}
@@ -206,9 +209,9 @@ static void mv_set_mode(struct mv_xor_chan *chan,
op_mode = XOR_OPERATION_MODE_MEMSET;
break;
default:
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error: unsupported operation %d.\n",
- type);
+ dev_err(mv_chan_to_devp(chan),
+ "error: unsupported operation %d.\n",
+ type);
BUG();
return;
}
@@ -223,7 +226,7 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
{
u32 activation;
- dev_dbg(chan->device->common.dev, " activate chan.\n");
+ dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
activation = __raw_readl(XOR_ACTIVATION(chan));
activation |= 0x1;
__raw_writel(activation, XOR_ACTIVATION(chan));
@@ -251,7 +254,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt)
static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *slot)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
__func__, __LINE__, slot);
slot->slots_per_op = 0;
@@ -266,7 +269,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *sw_desc)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
__func__, __LINE__, sw_desc);
if (sw_desc->type != mv_chan->current_type)
mv_set_mode(mv_chan, sw_desc->type);
@@ -284,7 +287,7 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
}
mv_chan->pending += sw_desc->slot_cnt;
- mv_xor_issue_pending(&mv_chan->common);
+ mv_xor_issue_pending(&mv_chan->dmachan);
}
static dma_cookie_t
@@ -308,8 +311,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
*/
if (desc->group_head && desc->unmap_len) {
struct mv_xor_desc_slot *unmap = desc->group_head;
- struct device *dev =
- &mv_chan->device->pdev->dev;
+ struct device *dev = mv_chan_to_devp(mv_chan);
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
@@ -353,7 +355,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
completed_node) {
@@ -369,7 +371,7 @@ static int
mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
struct mv_xor_chan *mv_chan)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
__func__, __LINE__, desc, desc->async_tx.flags);
list_del(&desc->chain_node);
/* the client is allowed to attach dependent operations
@@ -393,8 +395,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
u32 current_desc = mv_chan_get_current_desc(mv_chan);
int seen_current = 0;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
- dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
mv_xor_clean_completed_slots(mv_chan);
/* free completed slots from the chain starting with
@@ -438,7 +440,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
}
if (cookie > 0)
- mv_chan->common.completed_cookie = cookie;
+ mv_chan->dmachan.completed_cookie = cookie;
}
static void
@@ -547,7 +549,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie;
int new_hw_chain = 1;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
@@ -570,7 +572,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
@@ -604,9 +606,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
- struct mv_xor_platform_data *plat_data =
- mv_chan->device->pdev->dev.platform_data;
- int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
+ int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
/* Allocate descriptor slots */
idx = mv_chan->slots_allocated;
@@ -617,7 +617,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
+ hw_desc = (char *) mv_chan->dma_desc_pool_virt;
slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
dma_async_tx_descriptor_init(&slot->async_tx, chan);
@@ -625,7 +625,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->device->dma_desc_pool;
+ hw_desc = (char *) mv_chan->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
slot->idx = idx++;
@@ -641,7 +641,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
struct mv_xor_desc_slot,
slot_node);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"allocated %d descriptor slots last_used: %p\n",
mv_chan->slots_allocated, mv_chan->last_used);
@@ -656,7 +656,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s dest: %x src %x len: %u flags: %ld\n",
__func__, dest, src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -680,7 +680,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
@@ -695,7 +695,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s dest: %x len: %u flags: %ld\n",
__func__, dest, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -718,7 +718,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
sw_desc->unmap_len = len;
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
@@ -737,7 +737,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
__func__, src_cnt, len, dest, flags);
@@ -758,7 +758,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
@@ -791,12 +791,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
mv_chan->last_used = NULL;
- dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
__func__, mv_chan->slots_allocated);
spin_unlock_bh(&mv_chan->lock);
if (in_use_descs)
- dev_err(mv_chan->device->common.dev,
+ dev_err(mv_chan_to_devp(mv_chan),
"freeing %d in use descriptors!\n", in_use_descs);
}
@@ -828,42 +828,42 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
u32 val;
val = __raw_readl(XOR_CONFIG(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "config 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "config 0x%08x.\n", val);
val = __raw_readl(XOR_ACTIVATION(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "activation 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "activation 0x%08x.\n", val);
val = __raw_readl(XOR_INTR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "intr cause 0x%08x.\n", val);
val = __raw_readl(XOR_INTR_MASK(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr mask 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "intr mask 0x%08x.\n", val);
val = __raw_readl(XOR_ERROR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "error cause 0x%08x.\n", val);
val = __raw_readl(XOR_ERROR_ADDR(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error addr 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "error addr 0x%08x.\n", val);
}
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
u32 intr_cause)
{
if (intr_cause & (1 << 4)) {
- dev_dbg(chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(chan),
"ignore this error\n");
return;
}
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error on chan %d. intr cause 0x%08x.\n",
- chan->idx, intr_cause);
+ dev_err(mv_chan_to_devp(chan),
+ "error on chan %d. intr cause 0x%08x.\n",
+ chan->idx, intr_cause);
mv_dump_xor_regs(chan);
BUG();
@@ -874,7 +874,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
struct mv_xor_chan *chan = data;
u32 intr_cause = mv_chan_get_intr_cause(chan);
- dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
+ dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
if (mv_is_err_intr(intr_cause))
mv_xor_err_interrupt_handler(chan, intr_cause);
@@ -901,7 +901,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
*/
#define MV_XOR_TEST_SIZE 2000
-static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
+static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
int i;
void *src, *dest;
@@ -910,7 +910,6 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
int err = 0;
- struct mv_xor_chan *mv_chan;
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
if (!src)
@@ -926,10 +925,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
for (i = 0; i < MV_XOR_TEST_SIZE; i++)
((u8 *) src)[i] = (u8)i;
- /* Start copy, using first DMA channel */
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
@@ -950,18 +946,17 @@ static int mv_xor_memcpy_self_test(struct mv_xor_device *device)
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy timed out, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy failed compare, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}
@@ -976,7 +971,7 @@ out:
#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
static int
-mv_xor_xor_self_test(struct mv_xor_device *device)
+mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
{
int i, src_idx;
struct page *dest;
@@ -989,7 +984,6 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
- struct mv_xor_chan *mv_chan;
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1022,9 +1016,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
memset(page_address(dest), 0, PAGE_SIZE);
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
@@ -1048,22 +1040,21 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor timed out, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor failed compare, disabling."
- " index %d, data %x, expected %x\n", i,
- ptr[i], cmp_word);
+ dev_err(dma_chan->device->dev,
+ "Self-test xor failed compare, disabling."
+ " index %d, data %x, expected %x\n", i,
+ ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
@@ -1079,62 +1070,66 @@ out:
return err;
}
-static int __devexit mv_xor_remove(struct platform_device *dev)
+/* This driver does not implement any of the optional DMA operations. */
+static int
+mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ return -ENOSYS;
+}
+
+static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
{
- struct mv_xor_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
- struct mv_xor_chan *mv_chan;
- struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
+ struct device *dev = mv_chan->dmadev.dev;
- dma_async_device_unregister(&device->common);
+ dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(&dev->dev, plat_data->pool_size,
- device->dma_desc_pool_virt, device->dma_desc_pool);
+ dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
- list_for_each_entry_safe(chan, _chan, &device->common.channels,
- device_node) {
- mv_chan = to_mv_xor_chan(chan);
+ list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
+ device_node) {
list_del(&chan->device_node);
}
+ free_irq(mv_chan->irq, mv_chan);
+
return 0;
}
-static int mv_xor_probe(struct platform_device *pdev)
+static struct mv_xor_chan *
+mv_xor_channel_add(struct mv_xor_device *xordev,
+ struct platform_device *pdev,
+ int idx, dma_cap_mask_t cap_mask, int irq)
{
int ret = 0;
- int irq;
- struct mv_xor_device *adev;
struct mv_xor_chan *mv_chan;
struct dma_device *dma_dev;
- struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
+ mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
+ if (!mv_chan) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
- adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
- if (!adev)
- return -ENOMEM;
+ mv_chan->idx = idx;
+ mv_chan->irq = irq;
- dma_dev = &adev->common;
+ dma_dev = &mv_chan->dmadev;
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
- if (!adev->dma_desc_pool_virt)
- return -ENOMEM;
-
- adev->id = plat_data->hw_id;
+ mv_chan->dma_desc_pool_virt =
+ dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
+ &mv_chan->dma_desc_pool, GFP_KERNEL);
+ if (!mv_chan->dma_desc_pool_virt)
+ return ERR_PTR(-ENOMEM);
/* discover transaction capabilites from the platform data */
- dma_dev->cap_mask = plat_data->cap_mask;
- adev->pdev = pdev;
- platform_set_drvdata(pdev, adev);
-
- adev->shared = platform_get_drvdata(plat_data->shared);
+ dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
@@ -1143,6 +1138,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
+ dma_dev->device_control = mv_xor_control;
dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
@@ -1155,15 +1151,7 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
}
- mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
- mv_chan->device = adev;
- mv_chan->idx = plat_data->hw_id;
- mv_chan->mmr_base = adev->shared->xor_base;
-
+ mv_chan->mmr_base = xordev->xor_base;
if (!mv_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_dma;
@@ -1174,14 +1162,8 @@ static int mv_xor_probe(struct platform_device *pdev)
/* clear errors before enabling interrupts */
mv_xor_device_clear_err_status(mv_chan);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_free_dma;
- }
- ret = devm_request_irq(&pdev->dev, irq,
- mv_xor_interrupt_handler,
- 0, dev_name(&pdev->dev), mv_chan);
+ ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
+ 0, dev_name(&pdev->dev), mv_chan);
if (ret)
goto err_free_dma;
@@ -1193,26 +1175,26 @@ static int mv_xor_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mv_chan->chain);
INIT_LIST_HEAD(&mv_chan->completed_slots);
INIT_LIST_HEAD(&mv_chan->all_slots);
- mv_chan->common.device = dma_dev;
- dma_cookie_init(&mv_chan->common);
+ mv_chan->dmachan.device = dma_dev;
+ dma_cookie_init(&mv_chan->dmachan);
- list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
+ list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- ret = mv_xor_memcpy_self_test(adev);
+ ret = mv_xor_memcpy_self_test(mv_chan);
dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
- ret = mv_xor_xor_self_test(adev);
+ ret = mv_xor_xor_self_test(mv_chan);
dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
- dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
+ dev_info(&pdev->dev, "Marvell XOR: "
"( %s%s%s%s)\n",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
@@ -1220,20 +1202,21 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
- goto out;
+ return mv_chan;
+err_free_irq:
+ free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
- adev->dma_desc_pool_virt, adev->dma_desc_pool);
- out:
- return ret;
+ dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+ return ERR_PTR(ret);
}
static void
-mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
+mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
- void __iomem *base = msp->xor_base;
+ void __iomem *base = xordev->xor_base;
u32 win_enable = 0;
int i;
@@ -1258,99 +1241,176 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(0));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
-static struct platform_driver mv_xor_driver = {
- .probe = mv_xor_probe,
- .remove = mv_xor_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_NAME,
- },
-};
-
-static int mv_xor_shared_probe(struct platform_device *pdev)
+static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
- struct mv_xor_shared_private *msp;
+ struct mv_xor_device *xordev;
+ struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
+ int i, ret;
- dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
+ dev_notice(&pdev->dev, "Marvell XOR driver\n");
- msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
- if (!msp)
+ xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
+ if (!xordev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- msp->xor_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_base)
+ xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_base)
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -ENODEV;
- msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_high_base)
+ xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_high_base)
return -EBUSY;
- platform_set_drvdata(pdev, msp);
+ platform_set_drvdata(pdev, xordev);
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
- mv_xor_conf_mbus_windows(msp, dram);
+ mv_xor_conf_mbus_windows(xordev, dram);
/* Not all platforms can gate the clock, so it is not
* an error if the clock does not exists.
*/
- msp->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(msp->clk))
- clk_prepare_enable(msp->clk);
+ xordev->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(xordev->clk))
+ clk_prepare_enable(xordev->clk);
+
+ if (pdev->dev.of_node) {
+ struct device_node *np;
+ int i = 0;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ dma_cap_mask_t cap_mask;
+ int irq;
+
+ dma_cap_zero(cap_mask);
+ if (of_property_read_bool(np, "dmacap,memcpy"))
+ dma_cap_set(DMA_MEMCPY, cap_mask);
+ if (of_property_read_bool(np, "dmacap,xor"))
+ dma_cap_set(DMA_XOR, cap_mask);
+ if (of_property_read_bool(np, "dmacap,memset"))
+ dma_cap_set(DMA_MEMSET, cap_mask);
+ if (of_property_read_bool(np, "dmacap,interrupt"))
+ dma_cap_set(DMA_INTERRUPT, cap_mask);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] =
+ mv_xor_channel_add(xordev, pdev, i,
+ cap_mask, irq);
+ if (IS_ERR(xordev->channels[i])) {
+ ret = PTR_ERR(xordev->channels[i]);
+ xordev->channels[i] = NULL;
+ irq_dispose_mapping(irq);
+ goto err_channel_add;
+ }
+
+ i++;
+ }
+ } else if (pdata && pdata->channels) {
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ struct mv_xor_channel_data *cd;
+ int irq;
+
+ cd = &pdata->channels[i];
+ if (!cd) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] =
+ mv_xor_channel_add(xordev, pdev, i,
+ cd->cap_mask, irq);
+ if (IS_ERR(xordev->channels[i])) {
+ ret = PTR_ERR(xordev->channels[i]);
+ goto err_channel_add;
+ }
+ }
+ }
return 0;
+
+err_channel_add:
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
+ if (xordev->channels[i]) {
+ if (pdev->dev.of_node)
+ irq_dispose_mapping(xordev->channels[i]->irq);
+ mv_xor_channel_remove(xordev->channels[i]);
+ }
+
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
+ return ret;
}
-static int mv_xor_shared_remove(struct platform_device *pdev)
+static int mv_xor_remove(struct platform_device *pdev)
{
- struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+ struct mv_xor_device *xordev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ if (xordev->channels[i])
+ mv_xor_channel_remove(xordev->channels[i]);
+ }
- if (!IS_ERR(msp->clk)) {
- clk_disable_unprepare(msp->clk);
- clk_put(msp->clk);
+ if (!IS_ERR(xordev->clk)) {
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
}
return 0;
}
-static struct platform_driver mv_xor_shared_driver = {
- .probe = mv_xor_shared_probe,
- .remove = mv_xor_shared_remove,
+#ifdef CONFIG_OF
+static struct of_device_id mv_xor_dt_ids[] = {
+ { .compatible = "marvell,orion-xor", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+#endif
+
+static struct platform_driver mv_xor_driver = {
+ .probe = mv_xor_probe,
+ .remove = mv_xor_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_SHARED_NAME,
+ .owner = THIS_MODULE,
+ .name = MV_XOR_NAME,
+ .of_match_table = of_match_ptr(mv_xor_dt_ids),
},
};
static int __init mv_xor_init(void)
{
- int rc;
-
- rc = platform_driver_register(&mv_xor_shared_driver);
- if (!rc) {
- rc = platform_driver_register(&mv_xor_driver);
- if (rc)
- platform_driver_unregister(&mv_xor_shared_driver);
- }
- return rc;
+ return platform_driver_register(&mv_xor_driver);
}
module_init(mv_xor_init);
@@ -1359,7 +1419,6 @@ module_init(mv_xor_init);
static void __exit mv_xor_exit(void)
{
platform_driver_unregister(&mv_xor_driver);
- platform_driver_unregister(&mv_xor_shared_driver);
return;
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index a5b422f5a8a..c632a4761fc 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -24,8 +24,10 @@
#include <linux/interrupt.h>
#define USE_TIMER
+#define MV_XOR_POOL_SIZE PAGE_SIZE
#define MV_XOR_SLOT_SIZE 64
#define MV_XOR_THRESHOLD 1
+#define MV_XOR_MAX_CHANNELS 2
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
@@ -51,29 +53,13 @@
#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
+#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
-struct mv_xor_shared_private {
- void __iomem *xor_base;
- void __iomem *xor_high_base;
- struct clk *clk;
-};
-
-
-/**
- * struct mv_xor_device - internal representation of a XOR device
- * @pdev: Platform device
- * @id: HW XOR Device selector
- * @dma_desc_pool: base of DMA descriptor region (DMA address)
- * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
- * @common: embedded struct dma_device
- */
struct mv_xor_device {
- struct platform_device *pdev;
- int id;
- dma_addr_t dma_desc_pool;
- void *dma_desc_pool_virt;
- struct dma_device common;
- struct mv_xor_shared_private *shared;
+ void __iomem *xor_base;
+ void __iomem *xor_high_base;
+ struct clk *clk;
+ struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
};
/**
@@ -96,11 +82,15 @@ struct mv_xor_chan {
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
unsigned int idx;
+ int irq;
enum dma_transaction_type current_type;
struct list_head chain;
struct list_head completed_slots;
- struct mv_xor_device *device;
- struct dma_chan common;
+ dma_addr_t dma_desc_pool;
+ void *dma_desc_pool_virt;
+ size_t pool_size;
+ struct dma_device dmadev;
+ struct dma_chan dmachan;
struct mv_xor_desc_slot *last_used;
struct list_head all_slots;
int slots_allocated;
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7d35c237fbf..5a31264f2bd 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -19,8 +19,6 @@
#include "virt-dma.h"
-#include <plat-omap/dma-omap.h>
-
struct omap_dmadev {
struct dma_device ddev;
spinlock_t lock;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index ae55091c227..23c5573e62d 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -19,8 +19,7 @@
#include <linux/err.h>
#include <linux/amba/bus.h>
#include <linux/regulator/consumer.h>
-
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include "dmaengine.h"
#include "ste_dma40_ll.h"
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index cad9e1daedf..851ad56e840 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -6,7 +6,7 @@
*/
#include <linux/kernel.h>
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include "ste_dma40_ll.h"
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index bb82d6be793..4c6c876d9dc 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,7 +7,7 @@
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
- depends on X86 || PPC || TILE || ARM
+ depends on X86 || PPC || TILE || ARM || EDAC_SUPPORT
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -27,6 +27,9 @@ menuconfig EDAC
There is also a mailing list for the EDAC project, which can
be found via the sourceforge page.
+config EDAC_SUPPORT
+ bool
+
if EDAC
comment "Reporting subsystems"
@@ -316,4 +319,32 @@ config EDAC_HIGHBANK_L2
Support for error detection and correction on the
Calxeda Highbank memory controller.
+config EDAC_OCTEON_PC
+ tristate "Cavium Octeon Primary Caches"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the primary caches of
+ the cnMIPS cores of Cavium Octeon family SOCs.
+
+config EDAC_OCTEON_L2C
+ tristate "Cavium Octeon Secondary Caches (L2C)"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
+config EDAC_OCTEON_LMC
+ tristate "Cavium Octeon DRAM Memory Controller (LMC)"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
+config EDAC_OCTEON_PCI
+ tristate "Cavium Octeon PCI Controller"
+ depends on EDAC_MM_EDAC && PCI && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 7e5129a733f..5608a9ba61b 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -58,3 +58,8 @@ obj-$(CONFIG_EDAC_TILE) += tile_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
+
+obj-$(CONFIG_EDAC_OCTEON_PC) += octeon_edac-pc.o
+obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o
+obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
+obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
new file mode 100644
index 00000000000..40fde6a51ed
--- /dev/null
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -0,0 +1,208 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/cvmx.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_MOD_STR "octeon-l2c"
+
+static void octeon_l2c_poll_oct1(struct edac_device_ctl_info *l2c)
+{
+ union cvmx_l2t_err l2t_err, l2t_err_reset;
+ union cvmx_l2d_err l2d_err, l2d_err_reset;
+
+ l2t_err_reset.u64 = 0;
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.sec_err) {
+ edac_device_handle_ce(l2c, 0, 0,
+ "Tag Single bit error (corrected)");
+ l2t_err_reset.s.sec_err = 1;
+ }
+ if (l2t_err.s.ded_err) {
+ edac_device_handle_ue(l2c, 0, 0,
+ "Tag Double bit error (detected)");
+ l2t_err_reset.s.ded_err = 1;
+ }
+ if (l2t_err_reset.u64)
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err_reset.u64);
+
+ l2d_err_reset.u64 = 0;
+ l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
+ if (l2d_err.s.sec_err) {
+ edac_device_handle_ce(l2c, 0, 1,
+ "Data Single bit error (corrected)");
+ l2d_err_reset.s.sec_err = 1;
+ }
+ if (l2d_err.s.ded_err) {
+ edac_device_handle_ue(l2c, 0, 1,
+ "Data Double bit error (detected)");
+ l2d_err_reset.s.ded_err = 1;
+ }
+ if (l2d_err_reset.u64)
+ cvmx_write_csr(CVMX_L2D_ERR, l2d_err_reset.u64);
+
+}
+
+static void _octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c, int tad)
+{
+ union cvmx_l2c_err_tdtx err_tdtx, err_tdtx_reset;
+ union cvmx_l2c_err_ttgx err_ttgx, err_ttgx_reset;
+ char buf1[64];
+ char buf2[80];
+
+ err_tdtx_reset.u64 = 0;
+ err_tdtx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TDTX(tad));
+ if (err_tdtx.s.dbe || err_tdtx.s.sbe ||
+ err_tdtx.s.vdbe || err_tdtx.s.vsbe)
+ snprintf(buf1, sizeof(buf1),
+ "type:%d, syn:0x%x, way:%d",
+ err_tdtx.s.type, err_tdtx.s.syn, err_tdtx.s.wayidx);
+
+ if (err_tdtx.s.dbe) {
+ snprintf(buf2, sizeof(buf2),
+ "L2D Double bit error (detected):%s", buf1);
+ err_tdtx_reset.s.dbe = 1;
+ edac_device_handle_ue(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.sbe) {
+ snprintf(buf2, sizeof(buf2),
+ "L2D Single bit error (corrected):%s", buf1);
+ err_tdtx_reset.s.sbe = 1;
+ edac_device_handle_ce(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.vdbe) {
+ snprintf(buf2, sizeof(buf2),
+ "VBF Double bit error (detected):%s", buf1);
+ err_tdtx_reset.s.vdbe = 1;
+ edac_device_handle_ue(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.vsbe) {
+ snprintf(buf2, sizeof(buf2),
+ "VBF Single bit error (corrected):%s", buf1);
+ err_tdtx_reset.s.vsbe = 1;
+ edac_device_handle_ce(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx_reset.u64)
+ cvmx_write_csr(CVMX_L2C_ERR_TDTX(tad), err_tdtx_reset.u64);
+
+ err_ttgx_reset.u64 = 0;
+ err_ttgx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TTGX(tad));
+
+ if (err_ttgx.s.dbe || err_ttgx.s.sbe)
+ snprintf(buf1, sizeof(buf1),
+ "type:%d, syn:0x%x, way:%d",
+ err_ttgx.s.type, err_ttgx.s.syn, err_ttgx.s.wayidx);
+
+ if (err_ttgx.s.dbe) {
+ snprintf(buf2, sizeof(buf2),
+ "Tag Double bit error (detected):%s", buf1);
+ err_ttgx_reset.s.dbe = 1;
+ edac_device_handle_ue(l2c, tad, 0, buf2);
+ }
+ if (err_ttgx.s.sbe) {
+ snprintf(buf2, sizeof(buf2),
+ "Tag Single bit error (corrected):%s", buf1);
+ err_ttgx_reset.s.sbe = 1;
+ edac_device_handle_ce(l2c, tad, 0, buf2);
+ }
+ if (err_ttgx_reset.u64)
+ cvmx_write_csr(CVMX_L2C_ERR_TTGX(tad), err_ttgx_reset.u64);
+}
+
+static void octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c)
+{
+ int i;
+ for (i = 0; i < l2c->nr_instances; i++)
+ _octeon_l2c_poll_oct2(l2c, i);
+}
+
+static int __devinit octeon_l2c_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *l2c;
+
+ int num_tads = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : 1;
+
+ /* 'Tags' are block 0, 'Data' is block 1*/
+ l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0,
+ NULL, 0, edac_device_alloc_index());
+ if (!l2c)
+ return -ENOMEM;
+
+ l2c->dev = &pdev->dev;
+ platform_set_drvdata(pdev, l2c);
+ l2c->dev_name = dev_name(&pdev->dev);
+
+ l2c->mod_name = "octeon-l2c";
+ l2c->ctl_name = "octeon_l2c_err";
+
+
+ if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ union cvmx_l2t_err l2t_err;
+ union cvmx_l2d_err l2d_err;
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.sec_intena = 0; /* We poll */
+ l2t_err.s.ded_intena = 0;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+ l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
+ l2d_err.s.sec_intena = 0; /* We poll */
+ l2d_err.s.ded_intena = 0;
+ cvmx_write_csr(CVMX_L2T_ERR, l2d_err.u64);
+
+ l2c->edac_check = octeon_l2c_poll_oct1;
+ } else {
+ /* OCTEON II */
+ l2c->edac_check = octeon_l2c_poll_oct2;
+ }
+
+ if (edac_device_add_device(l2c) > 0) {
+ pr_err("%s: edac_device_add_device() failed\n", __func__);
+ goto err;
+ }
+
+
+ return 0;
+
+err:
+ edac_device_free_ctl_info(l2c);
+
+ return -ENXIO;
+}
+
+static int octeon_l2c_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(l2c);
+
+ return 0;
+}
+
+static struct platform_driver octeon_l2c_driver = {
+ .probe = octeon_l2c_probe,
+ .remove = octeon_l2c_remove,
+ .driver = {
+ .name = "octeon_l2c_edac",
+ }
+};
+module_platform_driver(octeon_l2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
new file mode 100644
index 00000000000..33bca766e37
--- /dev/null
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -0,0 +1,186 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-lmcx-defs.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define OCTEON_MAX_MC 4
+
+static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
+{
+ union cvmx_lmcx_mem_cfg0 cfg0;
+ bool do_clear = false;
+ char msg[64];
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx));
+ if (cfg0.s.sec_err || cfg0.s.ded_err) {
+ union cvmx_lmcx_fadr fadr;
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ snprintf(msg, sizeof(msg),
+ "DIMM %d rank %d bank %d row %d col %d",
+ fadr.cn30xx.fdimm, fadr.cn30xx.fbunk,
+ fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol);
+ }
+
+ if (cfg0.s.sec_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ cfg0.s.sec_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+
+ if (cfg0.s.ded_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ cfg0.s.ded_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+ if (do_clear)
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64);
+}
+
+static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
+{
+ union cvmx_lmcx_int int_reg;
+ bool do_clear = false;
+ char msg[64];
+
+ int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ if (int_reg.s.sec_err || int_reg.s.ded_err) {
+ union cvmx_lmcx_fadr fadr;
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ snprintf(msg, sizeof(msg),
+ "DIMM %d rank %d bank %d row %d col %d",
+ fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
+ fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol);
+ }
+
+ if (int_reg.s.sec_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ int_reg.s.sec_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+
+ if (int_reg.s.ded_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ int_reg.s.ded_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+ if (do_clear)
+ cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+}
+
+static int __devinit octeon_lmc_edac_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ int mc = pdev->id;
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+
+ if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ union cvmx_lmcx_mem_cfg0 cfg0;
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
+ if (!cfg0.s.ecc_ena) {
+ dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
+ return 0;
+ }
+
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ if (!mci)
+ return -ENXIO;
+
+ mci->pdev = &pdev->dev;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ mci->mod_name = "octeon-lmc";
+ mci->ctl_name = "octeon-lmc-err";
+ mci->edac_check = octeon_lmc_edac_poll;
+
+ if (edac_mc_add_mc(mci)) {
+ dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
+ edac_mc_free(mci);
+ return -ENXIO;
+ }
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
+ cfg0.s.intr_ded_ena = 0; /* We poll */
+ cfg0.s.intr_sec_ena = 0;
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64);
+ } else {
+ /* OCTEON II */
+ union cvmx_lmcx_int_en en;
+ union cvmx_lmcx_config config;
+
+ config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0));
+ if (!config.s.ecc_ena) {
+ dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
+ return 0;
+ }
+
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ if (!mci)
+ return -ENXIO;
+
+ mci->pdev = &pdev->dev;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ mci->mod_name = "octeon-lmc";
+ mci->ctl_name = "co_lmc_err";
+ mci->edac_check = octeon_lmc_edac_poll_o2;
+
+ if (edac_mc_add_mc(mci)) {
+ dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
+ edac_mc_free(mci);
+ return -ENXIO;
+ }
+
+ en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
+ en.s.intr_ded_ena = 0; /* We poll */
+ en.s.intr_sec_ena = 0;
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64);
+ }
+ platform_set_drvdata(pdev, mci);
+
+ return 0;
+}
+
+static int octeon_lmc_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver octeon_lmc_edac_driver = {
+ .probe = octeon_lmc_edac_probe,
+ .remove = octeon_lmc_edac_remove,
+ .driver = {
+ .name = "octeon_lmc_edac",
+ }
+};
+module_platform_driver(octeon_lmc_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
new file mode 100644
index 00000000000..14a5e57f2b3
--- /dev/null
+++ b/drivers/edac/octeon_edac-pc.c
@@ -0,0 +1,143 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#include <asm/octeon/cvmx.h>
+#include <asm/mipsregs.h>
+
+extern int register_co_cache_error_notifier(struct notifier_block *nb);
+extern int unregister_co_cache_error_notifier(struct notifier_block *nb);
+
+extern unsigned long long cache_err_dcache[NR_CPUS];
+
+struct co_cache_error {
+ struct notifier_block notifier;
+ struct edac_device_ctl_info *ed;
+};
+
+/**
+ * EDAC CPU cache error callback
+ *
+ * @event: non-zero if unrecoverable.
+ */
+static int co_cache_error_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct co_cache_error *p = container_of(this, struct co_cache_error,
+ notifier);
+
+ unsigned int core = cvmx_get_core_num();
+ unsigned int cpu = smp_processor_id();
+ u64 icache_err = read_octeon_c0_icacheerr();
+ u64 dcache_err;
+
+ if (event) {
+ dcache_err = cache_err_dcache[core];
+ cache_err_dcache[core] = 0;
+ } else {
+ dcache_err = read_octeon_c0_dcacheerr();
+ }
+
+ if (icache_err & 1) {
+ edac_device_printk(p->ed, KERN_ERR,
+ "CacheErr (Icache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
+ (unsigned long long)icache_err, core, cpu,
+ read_c0_errorepc());
+ write_octeon_c0_icacheerr(0);
+ edac_device_handle_ce(p->ed, cpu, 1, "icache");
+ }
+ if (dcache_err & 1) {
+ edac_device_printk(p->ed, KERN_ERR,
+ "CacheErr (Dcache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
+ (unsigned long long)dcache_err, core, cpu,
+ read_c0_errorepc());
+ if (event)
+ edac_device_handle_ue(p->ed, cpu, 0, "dcache");
+ else
+ edac_device_handle_ce(p->ed, cpu, 0, "dcache");
+
+ /* Clear the error indication */
+ if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+ write_octeon_c0_dcacheerr(1);
+ else
+ write_octeon_c0_dcacheerr(0);
+ }
+
+ return NOTIFY_STOP;
+}
+
+static int __devinit co_cache_error_probe(struct platform_device *pdev)
+{
+ struct co_cache_error *p = devm_kzalloc(&pdev->dev, sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->notifier.notifier_call = co_cache_error_event;
+ platform_set_drvdata(pdev, p);
+
+ p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(),
+ "cache", 2, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!p->ed)
+ goto err;
+
+ p->ed->dev = &pdev->dev;
+
+ p->ed->dev_name = dev_name(&pdev->dev);
+
+ p->ed->mod_name = "octeon-cpu";
+ p->ed->ctl_name = "cache";
+
+ if (edac_device_add_device(p->ed)) {
+ pr_err("%s: edac_device_add_device() failed\n", __func__);
+ goto err1;
+ }
+
+ register_co_cache_error_notifier(&p->notifier);
+
+ return 0;
+
+err1:
+ edac_device_free_ctl_info(p->ed);
+err:
+ return -ENXIO;
+}
+
+static int co_cache_error_remove(struct platform_device *pdev)
+{
+ struct co_cache_error *p = platform_get_drvdata(pdev);
+
+ unregister_co_cache_error_notifier(&p->notifier);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(p->ed);
+ return 0;
+}
+
+static struct platform_driver co_cache_error_driver = {
+ .probe = co_cache_error_probe,
+ .remove = co_cache_error_remove,
+ .driver = {
+ .name = "octeon_pc_edac",
+ }
+};
+module_platform_driver(co_cache_error_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
new file mode 100644
index 00000000000..758c1ef5fc9
--- /dev/null
+++ b/drivers/edac/octeon_edac-pci.c
@@ -0,0 +1,111 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/octeon.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+static void octeon_pci_poll(struct edac_pci_ctl_info *pci)
+{
+ union cvmx_pci_cfg01 cfg01;
+
+ cfg01.u32 = octeon_npi_read32(CVMX_NPI_PCI_CFG01);
+ if (cfg01.s.dpe) { /* Detected parity error */
+ edac_pci_handle_pe(pci, pci->ctl_name);
+ cfg01.s.dpe = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.sse) {
+ edac_pci_handle_npe(pci, "Signaled System Error");
+ cfg01.s.sse = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.rma) {
+ edac_pci_handle_npe(pci, "Received Master Abort");
+ cfg01.s.rma = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.rta) {
+ edac_pci_handle_npe(pci, "Received Target Abort");
+ cfg01.s.rta = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.sta) {
+ edac_pci_handle_npe(pci, "Signaled Target Abort");
+ cfg01.s.sta = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.mdpe) {
+ edac_pci_handle_npe(pci, "Master Data Parity Error");
+ cfg01.s.mdpe = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+}
+
+static int __devinit octeon_pci_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ int res = 0;
+
+ pci = edac_pci_alloc_ctl_info(0, "octeon_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pci);
+ pci->dev_name = dev_name(&pdev->dev);
+
+ pci->mod_name = "octeon-pci";
+ pci->ctl_name = "octeon_pci_err";
+ pci->edac_check = octeon_pci_poll;
+
+ if (edac_pci_add_device(pci, 0) > 0) {
+ pr_err("%s: edac_pci_add_device() failed\n", __func__);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ edac_pci_free_ctl_info(pci);
+
+ return res;
+}
+
+static int octeon_pci_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+
+ edac_pci_del_device(&pdev->dev);
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver octeon_pci_driver = {
+ .probe = octeon_pci_probe,
+ .remove = octeon_pci_remove,
+ .driver = {
+ .name = "octeon_pci_edac",
+ }
+};
+module_platform_driver(octeon_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/eisa/eisa.ids b/drivers/eisa/eisa.ids
index 6cbb7a51443..2d864b48a48 100644
--- a/drivers/eisa/eisa.ids
+++ b/drivers/eisa/eisa.ids
@@ -985,8 +985,8 @@ ISABD00 "Racal-Interlan NP600A Ethernet 16bit"
ISABD02 "Racal-Interlan NI5210/8 Ethernet"
ISABD03 "Racal-Interlan NI5210/16 Ethernet"
ISABE00 "Qua Tech PXB-1608 Parallel Expansion Board"
-ISABE01 "Qua Tech ES-100 8 Channel Asyncronous"
-ISABE02 "Qua Tech QS-100M 4 Channel Asyncronous"
+ISABE01 "Qua Tech ES-100 8 Channel Asynchronous"
+ISABE02 "Qua Tech QS-100M 4 Channel Asynchronous"
ISABE03 "Qua Tech MXI-100 IEEE 488 GPIB"
ISABE04 "Qua Tech DS-201 Dual Channel RS-422"
ISABE05 "Qua Tech PXB-721 Parallel Expansion"
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index f10f05d4ee9..414aed50b1b 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -166,6 +166,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read MICDET: %d\n", ret);
+ mutex_unlock(&info->lock);
return IRQ_NONE;
}
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index d398821097f..60adc04b056 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -472,7 +472,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
if (obj->cable_index < 0)
- return -ENODEV;
+ return obj->cable_index;
obj->user_nb = nb;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index b656dfa401a..8c17b65eb74 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -657,17 +657,17 @@ static int max77693_muic_probe(struct platform_device *pdev)
int ret, i;
u8 id;
- info = kzalloc(sizeof(struct max77693_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
info->max77693 = max77693;
- if (info->max77693->regmap_muic)
+ if (info->max77693->regmap_muic) {
dev_dbg(&pdev->dev, "allocate register map\n");
- else {
+ } else {
info->max77693->regmap_muic = devm_regmap_init_i2c(
info->max77693->muic,
&max77693_muic_regmap_config);
@@ -675,7 +675,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = PTR_ERR(info->max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
- goto err_regmap;
+ return ret;
}
}
platform_set_drvdata(pdev, info);
@@ -686,11 +686,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
/* Support irq domain for MAX77693 MUIC device */
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max77693_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
ret = request_threaded_irq(virq, NULL,
@@ -702,14 +704,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
" error :%d)\n",
muic_irq->irq, ret);
- for (i = i - 1; i >= 0; i--)
- free_irq(muic_irq->virq, info);
goto err_irq;
}
}
/* Initialize extcon device */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -720,7 +721,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize MUIC register by using platform data */
@@ -753,7 +754,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
MAX77693_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
- goto err_extcon;
+ goto err_irq;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
@@ -765,12 +766,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
return ret;
-err_extcon:
- kfree(info->edev);
err_irq:
-err_regmap:
- kfree(info);
-err_kfree:
+ while (--i >= 0)
+ free_irq(muic_irqs[i].virq, info);
return ret;
}
@@ -783,8 +781,6 @@ static int max77693_muic_remove(struct platform_device *pdev)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
return 0;
}
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index bad76f51161..93009fe6ef0 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -1,7 +1,7 @@
/*
* extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Donggeun Kim <dg77.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -433,11 +433,11 @@ static int max8997_muic_probe(struct platform_device *pdev)
struct max8997_muic_info *info;
int ret, i;
- info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
@@ -450,14 +450,16 @@ static int max8997_muic_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max8997_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
- ret = request_threaded_irq(virq, NULL,max8997_muic_irq_handler,
+ ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
0, muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
@@ -469,7 +471,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
/* External connector */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -480,7 +483,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize registers according to platform data */
@@ -498,13 +501,9 @@ static int max8997_muic_probe(struct platform_device *pdev)
return ret;
-err_extcon:
- kfree(info->edev);
err_irq:
while (--i >= 0)
free_irq(muic_irqs[i].virq, info);
- kfree(info);
-err_kfree:
return ret;
}
@@ -519,9 +518,6 @@ static int max8997_muic_remove(struct platform_device *pdev)
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
-
return 0;
}
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index a9a347adb35..2cc89ce745c 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -149,10 +149,10 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
- /* Accept asyncronous transfer requests from all nodes for now */
+ /* Accept asynchronous transfer requests from all nodes for now */
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
- /* Specify asyncronous transfer retries */
+ /* Specify asynchronous transfer retries */
reg_write(ohci, OHCI1394_ATRetries,
OHCI1394_MAX_AT_REQ_RETRIES |
(OHCI1394_MAX_AT_RESP_RETRIES<<4) |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 08c674957af..e7a711f53a6 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -828,7 +828,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
{
struct fwnet_device *dev;
struct fw_iso_packet packet;
- struct fw_card *card;
__be16 *hdr_ptr;
__be32 *buf_ptr;
int retval;
@@ -840,7 +839,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
unsigned long flags;
dev = data;
- card = dev->card;
hdr_ptr = header;
length = be16_to_cpup(hdr_ptr);
@@ -861,8 +859,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
buf_ptr += 2;
length -= IEEE1394_GASP_HDR_SIZE;
- fwnet_incoming_packet(dev, buf_ptr, length,
- source_node_id, -1, true);
+ fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+ context->card->generation, true);
}
packet.payload_length = dev->rcv_buffer_size;
@@ -958,7 +956,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
break;
}
- skb_pull(skb, ptask->max_payload);
+ if (ptask->dest_node == IEEE1394_ALL_NODES) {
+ skb_pull(skb,
+ ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
+ } else {
+ skb_pull(skb, ptask->max_payload);
+ }
if (ptask->outstanding_pkts > 1) {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
dg_size, fg_off, datagram_label);
@@ -1062,7 +1065,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
smp_rmb();
node_id = dev->card->node_id;
- p = skb_push(ptask->skb, 8);
+ p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
| RFC2734_SW_VERSION, &p[4]);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index f25610bb314..6ce6e07c38c 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1281,7 +1281,7 @@ static int at_context_queue_packet(struct context *ctx,
d[0].res_count = cpu_to_le16(packet->timestamp);
/*
- * The DMA format for asyncronous link packets is different
+ * The DMA format for asynchronous link packets is different
* from the IEEE1394 layout, so shift the fields around
* accordingly.
*/
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index bb1b392f5cd..1162d6b3bf8 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1546,8 +1546,6 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
struct sbp2_logical_unit *lu = sdev->hostdata;
sdev->use_10_for_rw = 1;
- sdev->no_report_opcodes = 1;
- sdev->no_write_same = 1;
if (sbp2_param_exclusive_login)
sdev->manage_start_stop = 1;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index b298158cb92..fd3ae6290d7 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -16,6 +16,7 @@
*/
static char dmi_empty_string[] = " ";
+static u16 __initdata dmi_ver;
/*
* Catch too early calls to dmi_check_system():
*/
@@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
return 0;
}
-static int __init dmi_checksum(const u8 *buf)
+static int __init dmi_checksum(const u8 *buf, u8 len)
{
u8 sum = 0;
int a;
- for (a = 0; a < 15; a++)
+ for (a = 0; a < len; a++)
sum += buf[a];
return sum == 0;
@@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
return;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
- if(d[i] != 0x00) is_ff = 0;
- if(d[i] != 0xFF) is_00 = 0;
+ if (d[i] != 0x00)
+ is_00 = 0;
+ if (d[i] != 0xFF)
+ is_ff = 0;
}
if (is_ff || is_00)
@@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
if (!s)
return;
- sprintf(s, "%pUB", d);
+ /*
+ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
+ * the UUID are supposed to be little-endian encoded. The specification
+ * says that this is the defacto standard.
+ */
+ if (dmi_ver >= 0x0206)
+ sprintf(s, "%pUL", d);
+ else
+ sprintf(s, "%pUB", d);
dmi_ident[slot] = s;
}
@@ -404,29 +415,57 @@ static int __init dmi_present(const char __iomem *p)
u8 buf[15];
memcpy_fromio(buf, p, 15);
- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
+ if (dmi_checksum(buf, 15)) {
dmi_num = (buf[13] << 8) | buf[12];
dmi_len = (buf[7] << 8) | buf[6];
dmi_base = (buf[11] << 24) | (buf[10] << 16) |
(buf[9] << 8) | buf[8];
- /*
- * DMI version 0.0 means that the real version is taken from
- * the SMBIOS version, which we don't know at this point.
- */
- if (buf[14] != 0)
- printk(KERN_INFO "DMI %d.%d present.\n",
- buf[14] >> 4, buf[14] & 0xF);
- else
- printk(KERN_INFO "DMI present.\n");
if (dmi_walk_early(dmi_decode) == 0) {
+ if (dmi_ver)
+ pr_info("SMBIOS %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ else {
+ dmi_ver = (buf[14] & 0xF0) << 4 |
+ (buf[14] & 0x0F);
+ pr_info("Legacy DMI %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ }
dmi_dump_ids();
return 0;
}
}
+ dmi_ver = 0;
return 1;
}
+static int __init smbios_present(const char __iomem *p)
+{
+ u8 buf[32];
+ int offset = 0;
+
+ memcpy_fromio(buf, p, 32);
+ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
+ dmi_ver = (buf[6] << 8) + buf[7];
+
+ /* Some BIOS report weird SMBIOS version, fix that up */
+ switch (dmi_ver) {
+ case 0x021F:
+ case 0x0221:
+ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
+ dmi_ver & 0xFF, 3);
+ dmi_ver = 0x0203;
+ break;
+ case 0x0233:
+ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
+ dmi_ver = 0x0206;
+ break;
+ }
+ offset = 16;
+ }
+ return dmi_present(buf + offset);
+}
+
void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
@@ -444,7 +483,7 @@ void __init dmi_scan_machine(void)
if (p == NULL)
goto error;
- rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
+ rc = smbios_present(p);
dmi_iounmap(p, 32);
if (!rc) {
dmi_available = 1;
@@ -462,7 +501,12 @@ void __init dmi_scan_machine(void)
goto error;
for (q = p; q < p + 0x10000; q += 16) {
- rc = dmi_present(q);
+ if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0)
+ rc = smbios_present(q);
+ else if (memcmp(q, "_DMI_", 5) == 0)
+ rc = dmi_present(q);
+ else
+ continue;
if (!rc) {
dmi_available = 1;
dmi_iounmap(p, 0x10000);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 6e51c1e81f1..7b1c37497c9 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -80,6 +80,10 @@
#include <linux/slab.h>
#include <linux/pstore.h>
+#include <linux/fs.h>
+#include <linux/ramfs.h>
+#include <linux/pagemap.h>
+
#include <asm/uaccess.h>
#define EFIVARS_VERSION "0.08"
@@ -93,6 +97,12 @@ MODULE_VERSION(EFIVARS_VERSION);
#define DUMP_NAME_LEN 52
/*
+ * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"))
+ * not including trailing NUL
+ */
+#define GUID_LEN 36
+
+/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
* space in each part of the structure,
@@ -108,7 +118,6 @@ struct efi_variable {
__u32 Attributes;
} __attribute__((packed));
-
struct efivar_entry {
struct efivars *efivars;
struct efi_variable var;
@@ -122,6 +131,9 @@ struct efivar_attribute {
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
+static struct efivars __efivars;
+static struct efivar_operations ops;
+
#define PSTORE_EFI_ATTRIBUTES \
(EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
@@ -629,14 +641,481 @@ static struct kobj_type efivar_ktype = {
.default_attrs = def_attrs,
};
-static struct pstore_info efi_pstore_info;
-
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
+static int efivarfs_file_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ssize_t efivarfs_file_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct efivar_entry *var = file->private_data;
+ struct efivars *efivars;
+ efi_status_t status;
+ void *data;
+ u32 attributes;
+ struct inode *inode = file->f_mapping->host;
+ unsigned long datasize = count - sizeof(attributes);
+ unsigned long newdatasize;
+ u64 storage_size, remaining_size, max_size;
+ ssize_t bytes = 0;
+
+ if (count < sizeof(attributes))
+ return -EINVAL;
+
+ if (copy_from_user(&attributes, userbuf, sizeof(attributes)))
+ return -EFAULT;
+
+ if (attributes & ~(EFI_VARIABLE_MASK))
+ return -EINVAL;
+
+ efivars = var->efivars;
+
+ /*
+ * Ensure that the user can't allocate arbitrarily large
+ * amounts of memory. Pick a default size of 64K if
+ * QueryVariableInfo() isn't supported by the firmware.
+ */
+ spin_lock(&efivars->lock);
+
+ if (!efivars->ops->query_variable_info)
+ status = EFI_UNSUPPORTED;
+ else {
+ const struct efivar_operations *fops = efivars->ops;
+ status = fops->query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ }
+
+ spin_unlock(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ if (status != EFI_UNSUPPORTED)
+ return efi_status_to_err(status);
+
+ remaining_size = 65536;
+ }
+
+ if (datasize > remaining_size)
+ return -ENOSPC;
+
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (copy_from_user(data, userbuf + sizeof(attributes), datasize)) {
+ bytes = -EFAULT;
+ goto out;
+ }
+
+ if (validate_var(&var->var, data, datasize) == false) {
+ bytes = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * The lock here protects the get_variable call, the conditional
+ * set_variable call, and removal of the variable from the efivars
+ * list (in the case of an authenticated delete).
+ */
+ spin_lock(&efivars->lock);
+
+ status = efivars->ops->set_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ attributes, datasize,
+ data);
+
+ if (status != EFI_SUCCESS) {
+ spin_unlock(&efivars->lock);
+ kfree(data);
+
+ return efi_status_to_err(status);
+ }
+
+ bytes = count;
+
+ /*
+ * Writing to the variable may have caused a change in size (which
+ * could either be an append or an overwrite), or the variable to be
+ * deleted. Perform a GetVariable() so we can tell what actually
+ * happened.
+ */
+ newdatasize = 0;
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ NULL, &newdatasize,
+ NULL);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ spin_unlock(&efivars->lock);
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, newdatasize + sizeof(attributes));
+ mutex_unlock(&inode->i_mutex);
+
+ } else if (status == EFI_NOT_FOUND) {
+ list_del(&var->list);
+ spin_unlock(&efivars->lock);
+ efivar_unregister(var);
+ drop_nlink(inode);
+ dput(file->f_dentry);
+
+ } else {
+ spin_unlock(&efivars->lock);
+ pr_warn("efivarfs: inconsistent EFI variable implementation? "
+ "status = %lx\n", status);
+ }
+
+out:
+ kfree(data);
+
+ return bytes;
+}
+
+static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct efivar_entry *var = file->private_data;
+ struct efivars *efivars = var->efivars;
+ efi_status_t status;
+ unsigned long datasize = 0;
+ u32 attributes;
+ void *data;
+ ssize_t size = 0;
+
+ spin_lock(&efivars->lock);
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ &attributes, &datasize, NULL);
+ spin_unlock(&efivars->lock);
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock(&efivars->lock);
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ &attributes, &datasize,
+ (data + sizeof(attributes)));
+ spin_unlock(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ size = efi_status_to_err(status);
+ goto out_free;
+ }
+
+ memcpy(data, &attributes, sizeof(attributes));
+ size = simple_read_from_buffer(userbuf, count, ppos,
+ data, datasize + sizeof(attributes));
+out_free:
+ kfree(data);
+
+ return size;
+}
+
+static void efivarfs_evict_inode(struct inode *inode)
+{
+ clear_inode(inode);
+}
+
+static const struct super_operations efivarfs_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .evict_inode = efivarfs_evict_inode,
+ .show_options = generic_show_options,
+};
+
+static struct super_block *efivarfs_sb;
+
+static const struct inode_operations efivarfs_dir_inode_operations;
+
+static const struct file_operations efivarfs_file_operations = {
+ .open = efivarfs_file_open,
+ .read = efivarfs_file_read,
+ .write = efivarfs_file_write,
+ .llseek = no_llseek,
+};
+
+static struct inode *efivarfs_get_inode(struct super_block *sb,
+ const struct inode *dir, int mode, dev_t dev)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_fop = &efivarfs_file_operations;
+ break;
+ case S_IFDIR:
+ inode->i_op = &efivarfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
+ break;
+ }
+ }
+ return inode;
+}
+
+static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+{
+ guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
+ guid->b[1] = hex_to_bin(str[4]) << 4 | hex_to_bin(str[5]);
+ guid->b[2] = hex_to_bin(str[2]) << 4 | hex_to_bin(str[3]);
+ guid->b[3] = hex_to_bin(str[0]) << 4 | hex_to_bin(str[1]);
+ guid->b[4] = hex_to_bin(str[11]) << 4 | hex_to_bin(str[12]);
+ guid->b[5] = hex_to_bin(str[9]) << 4 | hex_to_bin(str[10]);
+ guid->b[6] = hex_to_bin(str[16]) << 4 | hex_to_bin(str[17]);
+ guid->b[7] = hex_to_bin(str[14]) << 4 | hex_to_bin(str[15]);
+ guid->b[8] = hex_to_bin(str[19]) << 4 | hex_to_bin(str[20]);
+ guid->b[9] = hex_to_bin(str[21]) << 4 | hex_to_bin(str[22]);
+ guid->b[10] = hex_to_bin(str[24]) << 4 | hex_to_bin(str[25]);
+ guid->b[11] = hex_to_bin(str[26]) << 4 | hex_to_bin(str[27]);
+ guid->b[12] = hex_to_bin(str[28]) << 4 | hex_to_bin(str[29]);
+ guid->b[13] = hex_to_bin(str[30]) << 4 | hex_to_bin(str[31]);
+ guid->b[14] = hex_to_bin(str[32]) << 4 | hex_to_bin(str[33]);
+ guid->b[15] = hex_to_bin(str[34]) << 4 | hex_to_bin(str[35]);
+}
+
+static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
+{
+ struct inode *inode;
+ struct efivars *efivars = &__efivars;
+ struct efivar_entry *var;
+ int namelen, i = 0, err = 0;
+
+ /*
+ * We need a GUID, plus at least one letter for the variable name,
+ * plus the '-' separator
+ */
+ if (dentry->d_name.len < GUID_LEN + 2)
+ return -EINVAL;
+
+ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+ if (!inode)
+ return -ENOMEM;
+
+ var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
+ if (!var) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* length of the variable name itself: remove GUID and separator */
+ namelen = dentry->d_name.len - GUID_LEN - 1;
+
+ efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
+ &var->var.VendorGuid);
+
+ for (i = 0; i < namelen; i++)
+ var->var.VariableName[i] = dentry->d_name.name[i];
+
+ var->var.VariableName[i] = '\0';
+
+ inode->i_private = var;
+ var->efivars = efivars;
+ var->kobj.kset = efivars->kset;
+
+ err = kobject_init_and_add(&var->kobj, &efivar_ktype, NULL, "%s",
+ dentry->d_name.name);
+ if (err)
+ goto out;
+
+ kobject_uevent(&var->kobj, KOBJ_ADD);
+ spin_lock(&efivars->lock);
+ list_add(&var->list, &efivars->list);
+ spin_unlock(&efivars->lock);
+ d_instantiate(dentry, inode);
+ dget(dentry);
+out:
+ if (err) {
+ kfree(var);
+ iput(inode);
+ }
+ return err;
+}
+
+static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct efivar_entry *var = dentry->d_inode->i_private;
+ struct efivars *efivars = var->efivars;
+ efi_status_t status;
+
+ spin_lock(&efivars->lock);
+
+ status = efivars->ops->set_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ 0, 0, NULL);
+
+ if (status == EFI_SUCCESS || status == EFI_NOT_FOUND) {
+ list_del(&var->list);
+ spin_unlock(&efivars->lock);
+ efivar_unregister(var);
+ drop_nlink(dir);
+ dput(dentry);
+ return 0;
+ }
+
+ spin_unlock(&efivars->lock);
+ return -EINVAL;
+};
+
+static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct efivar_entry *entry, *n;
+ struct efivars *efivars = &__efivars;
+ char *name;
+
+ efivarfs_sb = sb;
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = EFIVARFS_MAGIC;
+ sb->s_op = &efivarfs_ops;
+ sb->s_time_gran = 1;
+
+ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+ if (!inode)
+ return -ENOMEM;
+ inode->i_op = &efivarfs_dir_inode_operations;
+
+ root = d_make_root(inode);
+ sb->s_root = root;
+ if (!root)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(entry, n, &efivars->list, list) {
+ struct dentry *dentry, *root = efivarfs_sb->s_root;
+ unsigned long size = 0;
+ int len, i;
+
+ inode = NULL;
+
+ len = utf16_strlen(entry->var.VariableName);
+
+ /* name, plus '-', plus GUID, plus NUL*/
+ name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
+ if (!name)
+ goto fail;
+
+ for (i = 0; i < len; i++)
+ name[i] = entry->var.VariableName[i] & 0xFF;
+
+ name[len] = '-';
+
+ efi_guid_unparse(&entry->var.VendorGuid, name + len + 1);
+
+ name[len+GUID_LEN+1] = '\0';
+
+ inode = efivarfs_get_inode(efivarfs_sb, root->d_inode,
+ S_IFREG | 0644, 0);
+ if (!inode)
+ goto fail_name;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ goto fail_inode;
+
+ /* copied by the above to local storage in the dentry. */
+ kfree(name);
+
+ spin_lock(&efivars->lock);
+ efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ &entry->var.Attributes,
+ &size,
+ NULL);
+ spin_unlock(&efivars->lock);
+
+ mutex_lock(&inode->i_mutex);
+ inode->i_private = entry;
+ i_size_write(inode, size+4);
+ mutex_unlock(&inode->i_mutex);
+ d_add(dentry, inode);
+ }
+
+ return 0;
+
+fail_inode:
+ iput(inode);
+fail_name:
+ kfree(name);
+fail:
+ return -ENOMEM;
+}
+
+static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_single(fs_type, flags, data, efivarfs_fill_super);
+}
+
+static void efivarfs_kill_sb(struct super_block *sb)
+{
+ kill_litter_super(sb);
+ efivarfs_sb = NULL;
+}
+
+static struct file_system_type efivarfs_type = {
+ .name = "efivarfs",
+ .mount = efivarfs_mount,
+ .kill_sb = efivarfs_kill_sb,
+};
+
+static const struct inode_operations efivarfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .unlink = efivarfs_unlink,
+ .create = efivarfs_create,
+};
+
+static struct pstore_info efi_pstore_info;
+
#ifdef CONFIG_PSTORE
static int efi_pstore_open(struct pstore_info *psi)
@@ -1065,11 +1544,18 @@ efivar_create_sysfs_entry(struct efivars *efivars,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid)
{
- int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38;
+ int i, short_name_size;
char *short_name;
struct efivar_entry *new_efivar;
- short_name = kzalloc(short_name_size + 1, GFP_KERNEL);
+ /*
+ * Length of the variable bytes in ASCII, plus the '-' separator,
+ * plus the GUID, plus trailing NUL
+ */
+ short_name_size = variable_name_size / sizeof(efi_char16_t)
+ + 1 + GUID_LEN + 1;
+
+ short_name = kzalloc(short_name_size, GFP_KERNEL);
new_efivar = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
if (!short_name || !new_efivar) {
@@ -1189,6 +1675,7 @@ void unregister_efivars(struct efivars *efivars)
sysfs_remove_bin_file(&efivars->kset->kobj, efivars->del_var);
kfree(efivars->new_var);
kfree(efivars->del_var);
+ kobject_put(efivars->kobject);
kset_unregister(efivars->kset);
}
EXPORT_SYMBOL_GPL(unregister_efivars);
@@ -1220,6 +1707,14 @@ int register_efivars(struct efivars *efivars,
goto out;
}
+ efivars->kobject = kobject_create_and_add("efivars", parent_kobj);
+ if (!efivars->kobject) {
+ pr_err("efivars: Subsystem registration failed.\n");
+ error = -ENOMEM;
+ kset_unregister(efivars->kset);
+ goto out;
+ }
+
/*
* Per EFI spec, the maximum storage allocated for both
* the variable name and variable data is 1024 bytes.
@@ -1262,6 +1757,8 @@ int register_efivars(struct efivars *efivars,
pstore_register(&efivars->efi_pstore_info);
}
+ register_filesystem(&efivarfs_type);
+
out:
kfree(variable_name);
@@ -1269,9 +1766,6 @@ out:
}
EXPORT_SYMBOL_GPL(register_efivars);
-static struct efivars __efivars;
-static struct efivar_operations ops;
-
/*
* For now we register the efi subsystem with the firmware subsystem
* and the vars subsystem with the efi subsystem. In the future, it
@@ -1302,6 +1796,7 @@ efivars_init(void)
ops.set_variable = efi.set_variable;
ops.get_next_variable = efi.get_next_variable;
ops.query_variable_info = efi.query_variable_info;
+
error = register_efivars(&__efivars, &ops, efi_kobj);
if (error)
goto err_put;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 14a6c2913e4..682de754d63 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -171,7 +171,8 @@ config GPIO_MSM_V2
config GPIO_MVEBU
def_bool y
- depends on ARCH_MVEBU
+ depends on PLAT_ORION
+ depends on OF
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -504,7 +505,7 @@ config GPIO_ADNP
help
This option enables support for N GPIOs found on Avionic Design
I2C GPIO expanders. The register space will be extended by powers
- of two, so the controller will need to accomodate for that. For
+ of two, so the controller will need to accommodate for that. For
example: if a controller provides 48 pins, 6 registers will be
enough to represent all pins, but the driver will assume a
register layout for 64 pins (8 registers).
@@ -683,4 +684,17 @@ config GPIO_MSIC
Enable support for GPIO on intel MSIC controllers found in
intel MID devices
+comment "USB GPIO expanders:"
+
+config GPIO_VIPERBOARD
+ tristate "Viperboard GPIO a & b support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the GPIO signals of Nano River
+ Technologies Viperboard. There are two GPIO chips on the
+ board: gpioa and gpiob.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 76b34468325..c5aebd008dd 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VT8500) += gpio-vt8500.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index a05aacd2777..29b11e9b6a7 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -185,7 +185,11 @@ static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
struct da9052_gpio *gpio = to_da9052_gpio(gc);
struct da9052 *da9052 = gpio->da9052;
- return da9052->irq_base + DA9052_IRQ_GPI0 + offset;
+ int irq;
+
+ irq = regmap_irq_get_virq(da9052->irq_data, DA9052_IRQ_GPI0 + offset);
+
+ return irq;
}
static struct gpio_chip reference_gp = {
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 6cc87ac8e01..6f2306db859 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -390,6 +390,7 @@ static int ichx_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
+ spin_lock_init(&ichx_priv.lock);
res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
ichx_priv.use_gpio = ich_info->use_gpio;
err = ichx_gpio_request_regions(res_base, pdev->name,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d767b534c4a..7d9bd94be8d 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -41,7 +41,6 @@
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
/*
@@ -469,19 +468,6 @@ static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
-static struct platform_device_id mvebu_gpio_ids[] = {
- {
- .name = "orion-gpio",
- }, {
- .name = "mv78200-gpio",
- }, {
- .name = "armadaxp-gpio",
- }, {
- /* sentinel */
- },
-};
-MODULE_DEVICE_TABLE(platform, mvebu_gpio_ids);
-
static struct of_device_id mvebu_gpio_of_match[] = {
{
.compatible = "marvell,orion-gpio",
@@ -555,9 +541,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = 0;
-#ifdef CONFIG_OF
mvchip->chip.of_node = np;
-#endif
spin_lock_init(&mvchip->lock);
mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
@@ -698,7 +682,6 @@ static struct platform_driver mvebu_gpio_driver = {
.of_match_table = mvebu_gpio_of_match,
},
.probe = mvebu_gpio_probe,
- .id_table = mvebu_gpio_ids,
};
static int __init mvebu_gpio_init(void)
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index a006f0db15a..01f7fe95559 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -42,12 +42,6 @@
#include <plat/gpio-fns.h>
#include <plat/pm.h>
-#ifndef DEBUG_GPIO
-#define gpio_dbg(x...) do { } while (0)
-#else
-#define gpio_dbg(x...) printk(KERN_DEBUG x)
-#endif
-
int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip,
unsigned int off, samsung_gpio_pull_t pull)
{
@@ -596,10 +590,13 @@ static int samsung_gpiolib_4bit_input(struct gpio_chip *chip,
unsigned long con;
con = __raw_readl(base + GPIOCON_OFF);
- con &= ~(0xf << con_4bit_shift(offset));
+ if (ourchip->bitmap_gpio_int & BIT(offset))
+ con |= 0xf << con_4bit_shift(offset);
+ else
+ con &= ~(0xf << con_4bit_shift(offset));
__raw_writel(con, base + GPIOCON_OFF);
- gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
+ pr_debug("%s: %p: CON now %08lx\n", __func__, base, con);
return 0;
}
@@ -627,7 +624,7 @@ static int samsung_gpiolib_4bit_output(struct gpio_chip *chip,
__raw_writel(con, base + GPIOCON_OFF);
__raw_writel(dat, base + GPIODAT_OFF);
- gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+ pr_debug("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
return 0;
}
@@ -671,7 +668,7 @@ static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip,
con &= ~(0xf << con_4bit_shift(offset));
__raw_writel(con, regcon);
- gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con);
+ pr_debug("%s: %p: CON %08lx\n", __func__, base, con);
return 0;
}
@@ -706,7 +703,7 @@ static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
__raw_writel(con, regcon);
__raw_writel(dat, base + GPIODAT_OFF);
- gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+ pr_debug("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
return 0;
}
@@ -926,10 +923,10 @@ static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip)
#ifdef CONFIG_PM
if (chip->pm != NULL) {
if (!chip->pm->save || !chip->pm->resume)
- printk(KERN_ERR "gpio: %s has missing PM functions\n",
+ pr_err("gpio: %s has missing PM functions\n",
gc->label);
} else
- printk(KERN_ERR "gpio: %s has no PM function\n", gc->label);
+ pr_err("gpio: %s has no PM function\n", gc->label);
#endif
/* gpiochip_add() prints own failure message on error. */
@@ -1081,6 +1078,8 @@ static void __init samsung_gpiolib_add_4bit_chips(struct samsung_gpio_chip *chip
if ((base != NULL) && (chip->base == NULL))
chip->base = base + ((i) * 0x20);
+ chip->bitmap_gpio_int = 0;
+
samsung_gpiolib_add(chip);
}
}
@@ -2797,27 +2796,6 @@ static __init void exynos4_gpiolib_init(void)
int group = 0;
void __iomem *gpx_base;
-#ifdef CONFIG_PINCTRL_SAMSUNG
- /*
- * This gpio driver includes support for device tree support and
- * there are platforms using it. In order to maintain
- * compatibility with those platforms, and to allow non-dt
- * Exynos4210 platforms to use this gpiolib support, a check
- * is added to find out if there is a active pin-controller
- * driver support available. If it is available, this gpiolib
- * support is ignored and the gpiolib support available in
- * pin-controller driver is used. This is a temporary check and
- * will go away when all of the Exynos4210 platforms have
- * switched to using device tree and the pin-ctrl driver.
- */
- struct device_node *pctrl_np;
- const char *pctrl_compat = "samsung,pinctrl-exynos4210";
- pctrl_np = of_find_compatible_node(NULL, NULL, pctrl_compat);
- if (pctrl_np)
- if (of_device_is_available(pctrl_np))
- return;
-#endif
-
/* gpio part1 */
gpio_base1 = ioremap(EXYNOS4_PA_GPIO1, SZ_4K);
if (gpio_base1 == NULL) {
@@ -3032,6 +3010,28 @@ static __init int samsung_gpiolib_init(void)
int i, nr_chips;
int group = 0;
+#ifdef CONFIG_PINCTRL_SAMSUNG
+ /*
+ * This gpio driver includes support for device tree support and there
+ * are platforms using it. In order to maintain compatibility with those
+ * platforms, and to allow non-dt Exynos4210 platforms to use this
+ * gpiolib support, a check is added to find out if there is a active
+ * pin-controller driver support available. If it is available, this
+ * gpiolib support is ignored and the gpiolib support available in
+ * pin-controller driver is used. This is a temporary check and will go
+ * away when all of the Exynos4210 platforms have switched to using
+ * device tree and the pin-ctrl driver.
+ */
+ struct device_node *pctrl_np;
+ static const struct of_device_id exynos_pinctrl_ids[] = {
+ { .compatible = "samsung,pinctrl-exynos4210", },
+ { .compatible = "samsung,pinctrl-exynos4x12", },
+ };
+ for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
+ if (pctrl_np && of_device_is_available(pctrl_np))
+ return -ENODEV;
+#endif
+
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
if (soc_is_s3c24xx()) {
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index c1b82da5650..29e8e750bd4 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -80,6 +80,14 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
val, mask);
}
+static int tps6586x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+
+ return tps6586x_irq_get_virq(tps6586x_gpio->parent,
+ TPS6586X_INT_PLDO_0 + offset);
+}
+
static int tps6586x_gpio_probe(struct platform_device *pdev)
{
struct tps6586x_platform_data *pdata;
@@ -106,6 +114,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
+ tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
#ifdef CONFIG_OF_GPIO
tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 00329f2fc05..9572aa137e6 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -355,13 +355,13 @@ static struct gpio_chip twl_gpiochip = {
static int gpio_twl4030_pulls(u32 ups, u32 downs)
{
- u8 message[6];
+ u8 message[5];
unsigned i, gpio_bit;
/* For most pins, a pulldown was enabled by default.
* We should have data that's specific to this board.
*/
- for (gpio_bit = 1, i = 1; i < 6; i++) {
+ for (gpio_bit = 1, i = 0; i < 5; i++) {
u8 bit_mask;
unsigned j;
@@ -380,16 +380,16 @@ static int gpio_twl4030_pulls(u32 ups, u32 downs)
static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
{
- u8 message[4];
+ u8 message[3];
/* 30 msec of debouncing is always used for MMC card detect,
* and is optional for everything else.
*/
- message[1] = (debounce & 0xff) | (mmc_cd & 0x03);
+ message[0] = (debounce & 0xff) | (mmc_cd & 0x03);
debounce >>= 8;
- message[2] = (debounce & 0xff);
+ message[1] = (debounce & 0xff);
debounce >>= 8;
- message[3] = (debounce & 0x03);
+ message[2] = (debounce & 0x03);
return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIO_DEBEN1, 3);
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
new file mode 100644
index 00000000000..13772996cf2
--- /dev/null
+++ b/drivers/gpio/gpio-viperboard.c
@@ -0,0 +1,517 @@
+/*
+ * Nano River Technologies viperboard GPIO lib driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/gpio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_GPIOA_CLK_1MHZ 0
+#define VPRBRD_GPIOA_CLK_100KHZ 1
+#define VPRBRD_GPIOA_CLK_10KHZ 2
+#define VPRBRD_GPIOA_CLK_1KHZ 3
+#define VPRBRD_GPIOA_CLK_100HZ 4
+#define VPRBRD_GPIOA_CLK_10HZ 5
+
+#define VPRBRD_GPIOA_FREQ_DEFAULT 1000
+
+#define VPRBRD_GPIOA_CMD_CONT 0x00
+#define VPRBRD_GPIOA_CMD_PULSE 0x01
+#define VPRBRD_GPIOA_CMD_PWM 0x02
+#define VPRBRD_GPIOA_CMD_SETOUT 0x03
+#define VPRBRD_GPIOA_CMD_SETIN 0x04
+#define VPRBRD_GPIOA_CMD_SETINT 0x05
+#define VPRBRD_GPIOA_CMD_GETIN 0x06
+
+#define VPRBRD_GPIOB_CMD_SETDIR 0x00
+#define VPRBRD_GPIOB_CMD_SETVAL 0x01
+
+struct vprbrd_gpioa_msg {
+ u8 cmd;
+ u8 clk;
+ u8 offset;
+ u8 t1;
+ u8 t2;
+ u8 invert;
+ u8 pwmlevel;
+ u8 outval;
+ u8 risefall;
+ u8 answer;
+ u8 __fill;
+} __packed;
+
+struct vprbrd_gpiob_msg {
+ u8 cmd;
+ u16 val;
+ u16 mask;
+} __packed;
+
+struct vprbrd_gpio {
+ struct gpio_chip gpioa; /* gpio a related things */
+ u32 gpioa_out;
+ u32 gpioa_val;
+ struct gpio_chip gpiob; /* gpio b related things */
+ u32 gpiob_out;
+ u32 gpiob_val;
+ struct vprbrd *vb;
+};
+
+/* gpioa sampling clock module parameter */
+static unsigned char gpioa_clk;
+static unsigned int gpioa_freq = VPRBRD_GPIOA_FREQ_DEFAULT;
+module_param(gpioa_freq, uint, 0);
+MODULE_PARM_DESC(gpioa_freq,
+ "gpio-a sampling freq in Hz (default is 1000Hz) valid values: 10, 100, 1000, 10000, 100000, 1000000");
+
+/* ----- begin of gipo a chip -------------------------------------------- */
+
+static int vprbrd_gpioa_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret, answer, error = 0;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpioa_out & (1 << offset))
+ return gpio->gpioa_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_GETIN;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ answer = gamsg->answer & 0x01;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ if (error)
+ return error;
+
+ return answer;
+}
+
+static void vprbrd_gpioa_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ if (gpio->gpioa_out & (1 << offset)) {
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gamsg,
+ sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETIN;
+ gamsg->clk = gpioa_clk;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out |= (1 << offset);
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+/* ----- end of gpio a chip ---------------------------------------------- */
+
+/* ----- begin of gipo b chip -------------------------------------------- */
+
+static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
+ unsigned dir)
+{
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+ int ret;
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETDIR;
+ gbmsg->val = cpu_to_be16(dir << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpiob_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ u16 val;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpiob_out & (1 << offset))
+ return gpio->gpiob_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ val = gbmsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return ret;
+
+ /* cache the read values */
+ gpio->gpiob_val = be16_to_cpu(val);
+
+ return (gpio->gpiob_val >> offset) & 0x1;
+}
+
+static void vprbrd_gpiob_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ if (gpio->gpiob_out & (1 << offset)) {
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+ gbmsg->val = cpu_to_be16(value << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gbmsg,
+ sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 0);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to input\n");
+
+ return ret;
+}
+
+static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out |= (1 << offset);
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 1);
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to output\n");
+
+ mutex_unlock(&vb->lock);
+
+ vprbrd_gpiob_set(chip, offset, value);
+
+ return ret;
+}
+
+/* ----- end of gpio b chip ---------------------------------------------- */
+
+static int __devinit vprbrd_gpio_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_gpio *vb_gpio;
+ int ret;
+
+ vb_gpio = devm_kzalloc(&pdev->dev, sizeof(*vb_gpio), GFP_KERNEL);
+ if (vb_gpio == NULL)
+ return -ENOMEM;
+
+ vb_gpio->vb = vb;
+ /* registering gpio a */
+ vb_gpio->gpioa.label = "viperboard gpio a";
+ vb_gpio->gpioa.dev = &pdev->dev;
+ vb_gpio->gpioa.owner = THIS_MODULE;
+ vb_gpio->gpioa.base = -1;
+ vb_gpio->gpioa.ngpio = 16;
+ vb_gpio->gpioa.can_sleep = 1;
+ vb_gpio->gpioa.set = vprbrd_gpioa_set;
+ vb_gpio->gpioa.get = vprbrd_gpioa_get;
+ vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
+ vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpioa);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpioa.dev, "could not add gpio a");
+ goto err_gpioa;
+ }
+
+ /* registering gpio b */
+ vb_gpio->gpiob.label = "viperboard gpio b";
+ vb_gpio->gpiob.dev = &pdev->dev;
+ vb_gpio->gpiob.owner = THIS_MODULE;
+ vb_gpio->gpiob.base = -1;
+ vb_gpio->gpiob.ngpio = 16;
+ vb_gpio->gpiob.can_sleep = 1;
+ vb_gpio->gpiob.set = vprbrd_gpiob_set;
+ vb_gpio->gpiob.get = vprbrd_gpiob_get;
+ vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
+ vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpiob);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpiob.dev, "could not add gpio b");
+ goto err_gpiob;
+ }
+
+ platform_set_drvdata(pdev, vb_gpio);
+
+ return ret;
+
+err_gpiob:
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+err_gpioa:
+ return ret;
+}
+
+static int __devexit vprbrd_gpio_remove(struct platform_device *pdev)
+{
+ struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&vb_gpio->gpiob);
+ if (ret == 0)
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+ return ret;
+}
+
+static struct platform_driver vprbrd_gpio_driver = {
+ .driver.name = "viperboard-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = vprbrd_gpio_probe,
+ .remove = __devexit_p(vprbrd_gpio_remove),
+};
+
+static int __init vprbrd_gpio_init(void)
+{
+ switch (gpioa_freq) {
+ case 1000000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1MHZ;
+ break;
+ case 100000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100KHZ;
+ break;
+ case 10000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10KHZ;
+ break;
+ case 1000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ break;
+ case 100:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100HZ;
+ break;
+ case 10:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10HZ;
+ break;
+ default:
+ pr_warn("invalid gpioa_freq (%d)\n", gpioa_freq);
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ }
+
+ return platform_driver_register(&vprbrd_gpio_driver);
+}
+subsys_initcall(vprbrd_gpio_init);
+
+static void __exit vprbrd_gpio_exit(void)
+{
+ platform_driver_unregister(&vprbrd_gpio_driver);
+}
+module_exit(vprbrd_gpio_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("GPIO driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-gpio");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 18321b68b88..983201b450f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2ff5cefe9ea..6f58c81cfcb 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb..3602731a611 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
static int ast_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy);
if (ret)
return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c899..dcd1a8c029e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
};
-static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
+
+ return 0;
}
static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- cirrus_kick_out_firmware_fb(pdev);
+ int ret;
+
+ ret = cirrus_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c83..1413a26e490 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static int cirrus_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ef1b22144d3..f2d667b8bee 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.edid_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-void drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret;
}
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
- ret = drm_connector_property_set_value(connector,
+ ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */
if (!ret)
- drm_connector_property_set_value(connector, property, value);
+ drm_object_property_set_value(&connector->base, property, value);
return ret;
}
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->status = connector_status_unknown;
+
if (connector->funcs->reset)
connector->funcs->reset(connector);
+ }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74db..7b2d378b257 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
+ }
+
+ list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
*
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
- drm_kms_helper_poll_enable(dev);
}
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
*
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- /* if this is HPD or polled don't check it -
- TV out for instance */
- if (!connector->polled)
+ /* Ignore forced connectors. */
+ if (connector->force)
continue;
- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
- repoll = true;
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
- !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
- if (changed) {
- /* send a uevent + call fbdev */
- drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
- }
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled)
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool changed = false;
+
if (!dev->mode_config.poll_enabled)
return;
- /* kill timer and schedule immediate execution, this doesn't block */
- cancel_delayed_work(&dev->mode_config.output_poll_work);
- if (drm_kms_helper_poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7f246f21245..89e19662716 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
-
+
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
-
}
static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0;
}
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
-
+
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fadcd44ff19..5a3770fbd77 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
- int i;
- u32 *raw_edid = (u32 *)in_edid;
+ if (memchr_inv(in_edid, 0, length))
+ return false;
- for (i = 0; i < length / 4; i++)
- if (*(raw_edid + i) != 0)
- return false;
return true;
}
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+ struct drm_display_mode *cea_mode;
+ u8 mode;
+
+ for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+ if (drm_mode_equal(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_LOG_KMS("HDMI: DVI dual %d, "
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+ uint8_t i;
+
+ for (i = 0; i < drm_num_cea_modes; i++)
+ if (drm_mode_equal(mode, &edid_cea_modes[i]))
+ return i + 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3..954d175bd7f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
if (mode->force) {
const char *s;
switch (mode->force) {
- case DRM_FORCE_OFF: s = "OFF"; break;
- case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
default:
- case DRM_FORCE_ON: s = "ON"; break;
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
}
DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
if (panic_timeout < 0)
return 0;
- printk(KERN_ERR "panic occurred, switching back to text console\n");
+ pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: unregistered panic notifier\n");
+ pr_info("drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != sizes.surface_bpp) {
+ if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- }
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
/* set the fb pointer */
- for (i = 0; i < fb_helper->crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- }
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0) {
+ if (register_framebuffer(info) < 0)
return -EINVAL;
- }
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
} else {
drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: registered panic notifier\n");
+ dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
- if (strict) {
+ if (strict)
enable = connector->status == connector_status_connected;
- } else {
+ else
enable = connector->status != connector_status_disconnected;
- }
+
return enable;
}
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
continue;
- }
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
+ if (!crtcs || !modes || !enabled) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+
drm_enable_connectors(fb_helper, enabled);
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
}
}
+out:
kfree(crtcs);
kfree(modes);
kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/**
* drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
*
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/*
* we shouldn't end up with no modes here.
*/
- if (count == 0) {
- printk(KERN_INFO "No connectors reported connected with modes\n");
- }
+ if (count == 0)
+ dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
- * probing all the outputs attached to the fb.
+ * probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d..80254547a3f 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return list;
if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+ unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ if (entry->key == key)
+ return list;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = list;
}
if (parent) {
- hlist_add_after(parent, &item->head);
+ hlist_add_after_rcu(parent, &item->head);
} else {
- hlist_add_head(&item->head, h_list);
+ hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key(ht, key);
+ list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
- hlist_del_init(list);
+ hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- hlist_del_init(&item->head);
+ hlist_del_init_rcu(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f2..e77bd8b57df 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break;
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b..19c01ca3cc7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
s64 diff_ns;
int vblrc;
struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
unsigned flags,
struct drm_crtc *refcrtc)
{
- struct timeval stime, raw_time;
+ ktime_t stime, etime, mono_time_offset;
+ struct timeval tv_etime;
struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay;
int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
preempt_disable();
/* Get system timestamp before query. */
- do_gettimeofday(&stime);
+ stime = ktime_get();
/* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */
- do_gettimeofday(&raw_time);
+ etime = ktime_get();
+ if (!drm_timestamp_monotonic)
+ mono_time_offset = ktime_get_monotonic_offset();
preempt_enable();
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status |= 0x8;
}
+ if (!drm_timestamp_monotonic)
+ etime = ktime_sub(etime, mono_time_offset);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+ etime = ktime_sub_ns(etime, delta_ns);
+ *vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
crtc, (int)vbl_status, hpos, vpos,
- (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+ (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
(int)duration_ns/1000, i);
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+static struct timeval get_drm_timestamp(void)
+{
+ ktime_t now;
+
+ now = ktime_get();
+ if (!drm_timestamp_monotonic)
+ now = ktime_sub(now, ktime_get_monotonic_offset());
+
+ return ktime_to_timeval(now);
+}
+
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
}
/* GPU high precision timestamp query unsupported or failed.
- * Return gettimeofday timestamp as best estimate.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
*/
- do_gettimeofday(tvblank);
+ *tvblank = get_drm_timestamp();
return 0;
}
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
+ spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ send_vblank_event(dev, e, seq, &now);
vblwait->reply.sequence = seq;
- trace_drm_vblank_event_delivered(current->pid, pipe,
- vblwait->request.sequence);
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq);
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 0761a03cdbb..2bf9670ba29 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free(mm, size, alignment, false);
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+ color, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment, 0);
-
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -275,22 +283,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment, unsigned long color,
+ unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free_in_range(mm, size, alignment,
- start, end, false);
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+ start, end, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
start, end);
-
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
+}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 59450f39bf9..d8da30e90db 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
*
* Describe @mode using DRM_DEBUG.
*/
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
* RETURNS:
* @mode->hdisplay
*/
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
{
return mode->hdisplay;
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
* RETURNS:
* @mode->vdisplay
*/
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
{
return mode->vdisplay;
}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e..754bc96e10c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
- u32 lnkcap, lnkcap2;
+ u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0;
if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c236fd27eba..200e104f1fa 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (!file_priv->master)
return -EINVAL;
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
+ if (file_priv->minor->master)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
}
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
+ kfree(dev->devname);
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062a..02296653a05 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_connector_property_get_value(connector,
+ ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb0..1d1f1e5e33f 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+ bool "Exynos DRM IPP"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Exynos DRM FIMC"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+ bool "Exynos DRM Rotator"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Exynos DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ help
+ Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a..639b49e1ec0 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1..bef43e0342a 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = hdmiddc_match_types,
+ .of_match_table = of_match_ptr(hdmiddc_match_types),
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b322..9601bad47a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,89 +33,64 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr;
- unsigned int npages, i = 0;
- struct scatterlist *sgl;
int ret = 0;
+ enum dma_attr attr;
+ unsigned int nr_pages;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return -EINVAL;
- }
-
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- if (buf->size >= SZ_1M) {
- npages = buf->size >> SECTION_SHIFT;
- buf->page_size = SECTION_SIZE;
- } else if (buf->size >= SZ_64K) {
- npages = buf->size >> 16;
- buf->page_size = SZ_64K;
- } else {
- npages = buf->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
- }
+ init_dma_attrs(&buf->dma_attrs);
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (flags & EXYNOS_BO_CONTIG)
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- kfree(buf->sgt);
- buf->sgt = NULL;
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
- }
+ dma_set_attr(attr, &buf->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err2;
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
}
- sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
-
- while (i < npages) {
- buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
- sg_dma_address(sgl) = start_addr;
- start_addr += buf->page_size;
- sgl = sg_next(sgl);
- i++;
+ nr_pages = buf->size >> PAGE_SHIFT;
+ buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to get sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_attrs;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
return ret;
-err2:
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
-err1:
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
return ret;
}
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- /*
- * release only physically continuous memory and
- * non-continuous memory would be released by exynos
- * gem framework.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return;
- }
-
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- kfree(buf->pages);
- buf->pages = NULL;
-
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba..25cf1628503 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4..2efa4b031d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
+ spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
+ spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
+
+ spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ spin_unlock_irq(&dev->event_lock);
goto out;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886..61d5a8402eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,70 +30,108 @@
#include <linux/dma-buf.h>
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
- unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
{
- struct sg_table *sgt = NULL;
- struct scatterlist *sgl;
- int i, ret;
+ struct exynos_drm_dmabuf_attachment *exynos_attach;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- goto out;
+ exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+ if (!exynos_attach)
+ return -ENOMEM;
- ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
- if (ret)
- goto err_free_sgt;
+ exynos_attach->dir = DMA_NONE;
+ attach->priv = exynos_attach;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ return 0;
+}
- for_each_sg(sgt->sgl, sgl, nr_pages, i)
- sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct sg_table *sgt;
- return sgt;
+ if (!exynos_attach)
+ return;
-err_free_sgt:
- kfree(sgt);
- sgt = NULL;
-out:
- return NULL;
+ sgt = &exynos_attach->sgt;
+
+ if (exynos_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ exynos_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(exynos_attach);
+ attach->priv = NULL;
}
static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
+ struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL;
- unsigned int npages;
- int nents;
+ unsigned int i;
+ int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dir == DMA_NONE))
+ return ERR_PTR(-EINVAL);
+
+ /* just return current sgt if already requested. */
+ if (exynos_attach->dir == dir)
+ return &exynos_attach->sgt;
+
+ /* reattaching is not allowed. */
+ if (WARN_ON(exynos_attach->dir != DMA_NONE))
+ return ERR_PTR(-EBUSY);
buf = gem_obj->buffer;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* there should always be pages allocated. */
- if (!buf->pages) {
- DRM_ERROR("pages is null.\n");
- goto err_unlock;
+ sgt = &exynos_attach->sgt;
+
+ ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
}
- npages = buf->size / buf->page_size;
+ mutex_lock(&dev->struct_mutex);
- sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
- if (!sgt) {
- DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+ rd = buf->sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sgt = ERR_PTR(-EIO);
goto err_unlock;
}
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
- npages, buf->size, buf->page_size);
+ exynos_attach->dir = dir;
+ attach->priv = exynos_attach;
+
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
- sgt = NULL;
+ /* Nothing to do. */
}
static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
}
static struct dma_buf_ops exynos_dmabuf_ops = {
+ .attach = exynos_gem_attach_dma_buf,
+ .detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
- struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
- buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
- if (!buffer->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_pages;
+ goto err_free_buffer;
}
sgl = sgt->sgl;
- if (sgt->nents == 1) {
- buffer->dma_addr = sg_dma_address(sgt->sgl);
- buffer->size = sg_dma_len(sgt->sgl);
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+ if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- unsigned int i = 0;
-
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
-err_free_pages:
- kfree(buffer->pages);
- buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd..e0a8e8024b0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,8 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +51,9 @@
#define VBLANK_OFF_DELAY 50000
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_crtc;
+ }
+
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+ drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+ exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+ exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
if (ret < 0)
goto out_common_hdmi;
+
+ ret = exynos_platform_device_hdmi_register();
+ if (ret < 0)
+ goto out_common_hdmi_dev;
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
goto out_g2d;
#endif
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ ret = platform_driver_register(&rotator_driver);
+ if (ret < 0)
+ goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto out_ipp;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
+ goto out_drm;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+ ret = PTR_ERR(exynos_drm_pdev);
goto out;
+ }
return 0;
out:
+ platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
platform_driver_unregister(&vidi_driver);
+out_vidi:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
out_common_hdmi:
platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ platform_device_unregister(exynos_drm_pdev);
+
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a3423103649..f5a97745bf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- * hardware overlay is disabled.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
- void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
* @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
* @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- void __iomem *vaddr[MAX_FB_BUFFER];
int zpos;
bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ * hardware overlay is updated.
*/
struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
+ void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head gem_list;
- unsigned int gem_nr;
+ struct list_head userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ struct list_head event_list;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
};
/*
* Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+ unsigned long da_space_order;
};
/*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f..301485215a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_encoder *exynos_encoder;
+ struct exynos_drm_manager_ops *ops;
+ struct drm_device *dev = fb->dev;
+ struct drm_encoder *encoder;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ exynos_encoder = to_exynos_encoder(encoder);
+ ops = exynos_encoder->manager->ops;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (ops->wait_for_vblank)
+ ops->wait_for_vblank(exynos_encoder->manager->dev);
+ }
+}
+
+
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
-
- /*
- * wait for vblank interrupt
- * - this makes sure that hardware overlay is disabled to avoid
- * for the dma accesses to memory after gem buffer was released
- * because the setting for disabling the overlay will be updated
- * at vsync.
- */
- if (overlay_ops && overlay_ops->wait_for_vblank)
- overlay_ops->wait_for_vblank(manager->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a..88bb25a2a91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f993..5426cc5a5e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* make sure that overlay data are updated before relesing fb. */
+ exynos_drm_encoder_complete_scanout(fb);
+
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
return &exynos_fb->fb;
}
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
- int i;
+ int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb)) {
- drm_gem_object_unreference_unlocked(obj);
- return fb;
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb) {
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ return ERR_PTR(-ENOMEM);
}
- exynos_fb = to_exynos_fb(fb);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- exynos_drm_fb_destroy(fb);
+ kfree(exynos_fb);
return ERR_PTR(-ENOENT);
}
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
- return fb;
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *gem_obj;
+
+ gem_obj = exynos_fb->exynos_gem_obj[i];
+ drm_gem_object_unreference_unlocked(&gem_obj->base);
+ }
+
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
+ return &exynos_fb->fb;
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
- (unsigned long)buffer->kvaddr,
- (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
return buffer;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414c..f433eb7533a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
+static int exynos_drm_fb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ unsigned long vm_size;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ .fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
+ /* map pages with kernel virtual space. */
+ if (!buffer->kvaddr) {
+ unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+ buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!buffer->kvaddr) {
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
+ }
+ }
+
/* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1);
@@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
- offset);
+ fbi->fix.smem_start = (unsigned long)
+ (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
- goto out;
+ goto err_release_framebuffer;
}
exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto out;
+ goto err_destroy_gem;
}
helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) {
DRM_ERROR("failed to allocate cmap.\n");
- goto out;
+ goto err_destroy_framebuffer;
}
ret = exynos_drm_fbdev_update(helper, helper->fb);
- if (ret < 0) {
- fb_dealloc_cmap(&fbi->cmap);
- goto out;
- }
+ if (ret < 0)
+ goto err_dealloc_cmap;
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+ drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+ exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+ framebuffer_release(fbi);
/*
* if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
+ if (exynos_gem_obj->buffer->kvaddr)
+ vunmap(exynos_gem_obj->buffer->kvaddr);
+
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 00000000000..61ea24296b5
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#define FIMC_SHFACTOR 10
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ 32
+#define FIMC_WIDTH_ITU_709 1280
+#define FIMC_REFRESH_MAX 60
+#define FIMC_REFRESH_MIN 12
+#define FIMC_CROP_MAX 8192
+#define FIMC_CROP_MIN 32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+#define fimc_read(offset) readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+ char *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *sclk_fimc_clk;
+ struct clk *fimc_clk;
+ struct clk *wb_clk;
+ struct clk *wb_b_clk;
+ struct fimc_scaler sc;
+ struct fimc_driverdata *ddata;
+ struct exynos_drm_ipp_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+ cfg = fimc_read(EXYNOS_CISRCFMT);
+ cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+ if (pattern)
+ cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* s/w reset */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= (EXYNOS_CIGCTRL_SWRST);
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* s/w reset complete */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_SWRST;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ u32 camblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+ camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_ipp_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+ __func__, pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+ __func__, pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+ bool overflow, bool level)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, level);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+ if (overflow)
+ cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+ if (level)
+ cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status, flag;
+
+ status = fimc_read(EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+ if (status & flag) {
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg1, cfg2;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg1 = fimc_read(EXYNOS_MSCTRL);
+ cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+ cfg2 = fimc_read(EXYNOS_CITRGFMT);
+ cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg1, EXYNOS_MSCTRL);
+ fimc_write(cfg2, EXYNOS_CITRGFMT);
+ *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+ h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+ pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+ fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIIYOFF);
+ cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICBOFF);
+ cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICROFF);
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_SRC];
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, cfg_ext, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ int ret = 0;
+ u32 src_w, src_h, dst_w, dst_h;
+
+ cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+ if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+ src_w = src->h;
+ src_h = src->w;
+ } else {
+ src_w = src->w;
+ src_h = src->h;
+ }
+
+ if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ pre_dst_width = src_w / pre_hratio;
+ pre_dst_height = src_h / pre_vratio;
+ DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+ sc->hratio = (src_w << 14) / (dst_w << hfactor);
+ sc->vratio = (src_h << 14) / (dst_h << vfactor);
+ sc->up_h = (dst_w >= src_w) ? true : false;
+ sc->up_v = (dst_h >= src_h) ? true : false;
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+ fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+ __func__, sc->hratio, sc->vratio);
+
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(cfg, EXYNOS_CITAREA);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+ u32 cfg, i, buf_num = 0;
+ u32 mask = 0x00000001;
+
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ for (i = 0; i < FIMC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num++;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= (~mask);
+ cfg |= (enable << buf_id);
+ fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_DST];
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->sclk_fimc_clk);
+ clk_enable(ctx->fimc_clk);
+ clk_enable(ctx->wb_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_disable(ctx->fimc_clk);
+ clk_disable(ctx->wb_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ int buf_id;
+
+ DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return IRQ_HANDLED;
+ }
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+ queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = FIMC_REFRESH_MIN;
+ prop_list->refresh_max = FIMC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+ (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = FIMC_CROP_MAX;
+ prop_list->crop_max.vsize = FIMC_CROP_MAX;
+ prop_list->crop_min.hsize = FIMC_CROP_MIN;
+ prop_list->crop_min.vsize = FIMC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+ prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+ prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+ prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!fimc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:\n", __func__);
+
+ for (i = 0; i < FIMC_MAX_SRC; i++) {
+ fimc_write(0, EXYNOS_CIIYSA(i));
+ fimc_write(0, EXYNOS_CIICBSA(i));
+ fimc_write(0, EXYNOS_CIICRSA(i));
+ }
+
+ for (i = 0; i < FIMC_MAX_DST; i++) {
+ fimc_write(0, EXYNOS_CIOYSA(i));
+ fimc_write(0, EXYNOS_CIOCBSA(i));
+ fimc_write(0, EXYNOS_CIOCRSA(i));
+ }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx, false);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ fimc_clear_addr(ctx);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ int ret, i;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ fimc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ fimc_set_camblk_fimd0_wb(ctx);
+
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ cfg0 = fimc_read(EXYNOS_CIGCTRL);
+ cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+ cfg0 = fimc_read(EXYNOS_CIOCTRL);
+ cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+ fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+ if (cmd == IPP_CMD_M2M) {
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ fimc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ /* Disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* Enable frame end irq */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct clk *parent_clk;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_fimc_pdata *pdata;
+ struct fimc_driverdata *ddata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ddata = (struct fimc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* clock control */
+ ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+ if (IS_ERR(ctx->sclk_fimc_clk)) {
+ dev_err(dev, "failed to get src fimc clock.\n");
+ ret = PTR_ERR(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+ clk_enable(ctx->sclk_fimc_clk);
+
+ ctx->fimc_clk = clk_get(dev, "fimc");
+ if (IS_ERR(ctx->fimc_clk)) {
+ dev_err(dev, "failed to get fimc clock.\n");
+ ret = PTR_ERR(ctx->fimc_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_clk = clk_get(dev, "pxl_async0");
+ if (IS_ERR(ctx->wb_clk)) {
+ dev_err(dev, "failed to get writeback a clock.\n");
+ ret = PTR_ERR(ctx->wb_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+ if (IS_ERR(ctx->wb_b_clk)) {
+ dev_err(dev, "failed to get writeback b clock.\n");
+ ret = PTR_ERR(ctx->wb_b_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ goto err_ctx;
+ }
+
+ parent_clk = clk_get(dev, ddata->parent_clk);
+
+ if (IS_ERR(parent_clk)) {
+ dev_err(dev, "failed to get parent clock.\n");
+ ret = PTR_ERR(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+ dev_err(dev, "failed to set parent.\n");
+ ret = -EINVAL;
+ clk_put(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ clk_put(parent_clk);
+ clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ ret = -ENOENT;
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+ ctx->pol = pdata->pol;
+ ctx->ddata = ddata;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+ ret = fimc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ devm_iounmap(dev, ctx->regs);
+err_clk:
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+err_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ devm_iounmap(dev, ctx->regs);
+
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return fimc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+ .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+ .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "exynos4210-fimc",
+ .driver_data = (unsigned long)&exynos4210_fimc_data,
+ }, {
+ .name = "exynos4412-fimc",
+ .driver_data = (unsigned long)&exynos4410_fimc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = __devexit_p(fimc_remove),
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 00000000000..dc970fa0d88
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1..bf0d9baca2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
/*
* FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
+ bool resume;
};
struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
u32 vidcon1;
bool suspended;
struct mutex lock;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
struct exynos_drm_panel_info *panel;
};
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+ { .compatible = "samsung,exynos4-fimd",
+ .data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos5-fimd",
+ .data = &exynos5_fimd_driver_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
+#ifdef CONFIG_OF
+ const struct of_device_id *of_id =
+ of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+ if (of_id)
+ return (struct fimd_driver_data *)of_id->data;
+#endif
+
return (struct fimd_driver_data *)
platform_get_device_id(pdev)->driver_data;
}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1);
+ VIDTCON2_HOZVAL(timing->xres - 1) |
+ VIDTCON2_LINEVAL_E(timing->yres - 1) |
+ VIDTCON2_HOZVAL_E(timing->xres - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
}
}
+static void fimd_wait_for_vblank(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for FIMD to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.apply = fimd_apply,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
+ .wait_for_vblank = fimd_wait_for_vblank,
};
static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
+ unsigned int last_x;
+ unsigned int last_y;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
/* buffer size */
val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+ VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+ VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+ VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
writel(val, ctx->regs + VIDOSD_A(win));
- val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
- win_data->ovl_width - 1) |
- VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
- win_data->ovl_height - 1);
+ last_x = win_data->offset_x + win_data->ovl_width;
+ if (last_x)
+ last_x--;
+ last_y = win_data->offset_y + win_data->ovl_height;
+ if (last_y)
+ last_y--;
+
+ val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+ VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y,
- win_data->offset_x + win_data->ovl_width - 1,
- win_data->offset_y + win_data->ovl_height - 1);
+ win_data->offset_x, win_data->offset_y, last_x, last_y);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data = &ctx->win_data[win];
+ if (ctx->suspended) {
+ /* do not resume this window*/
+ win_data->resume = false;
+ return;
+ }
+
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false;
}
-static void fimd_wait_for_vblank(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- int ret;
-
- ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
- VIDCON1_VSTATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
.mode_set = fimd_win_mode_set,
.commit = fimd_win_commit,
.disable = fimd_win_disable,
- .wait_for_vblank = fimd_wait_for_vblank,
};
static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
-
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
out:
return IRQ_HANDLED;
}
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->vblank_disable_allowed = 1;
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_attach_device(drm_dev, dev);
+
return 0;
}
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, dev);
}
static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
return 0;
}
+static void fimd_window_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ fimd_win_disable(dev, i);
+ }
+ fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
static int fimd_activate(struct fimd_context *ctx, bool enable)
{
+ struct device *dev = ctx->subdrv.dev;
if (enable) {
int ret;
- struct device *dev = ctx->subdrv.dev;
ret = fimd_clock(ctx, true);
if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(dev);
+
+ fimd_window_resume(dev);
} else {
+ fimd_window_suspend(dev);
+
fimd_clock(ctx, false);
ctx->suspended = true;
}
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = clk_get(dev, "fimd");
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
- ret = PTR_ERR(ctx->bus_clk);
- goto err_clk_get;
+ return PTR_ERR(ctx->bus_clk);
}
- ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(ctx->lcd_clk);
- goto err_bus_clk;
+ return PTR_ERR(ctx->lcd_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
- goto err_clk;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return -ENXIO;
}
ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return ret;
}
ctx->vidcon0 = pdata->vidcon0;
ctx->vidcon1 = pdata->vidcon1;
ctx->default_win = pdata->default_win;
ctx->panel = panel;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv;
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv);
return 0;
-
-err_clk:
- clk_disable(ctx->lcd_clk);
- clk_put(ctx->lcd_clk);
-
-err_bus_clk:
- clk_disable(ctx->bus_clk);
- clk_put(ctx->bus_clk);
-
-err_clk_get:
- return ret;
}
static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out:
pm_runtime_disable(dev);
- clk_put(ctx->lcd_clk);
- clk_put(ctx->bus_clk);
-
return 0;
}
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = &fimd_pm_ops,
+ .of_match_table = of_match_ptr(fimd_driver_dt_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46..6ffa0763c07 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+#define MAX_BUF_ADDR_NR 6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL (64 * 1024 * 1024)
+
+enum {
+ BUF_TYPE_GEM = 1,
+ BUF_TYPE_USERPTR,
+};
+
/* cmdlist data structure */
struct g2d_cmdlist {
- u32 head;
- u32 data[G2D_CMDLIST_DATA_NUM];
- u32 last; /* last data offset */
+ u32 head;
+ unsigned long data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
};
struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event;
};
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
struct list_head list;
- unsigned int handle;
+ dma_addr_t dma_addr;
+ unsigned long userptr;
+ unsigned long size;
+ struct page **pages;
+ unsigned int npages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ atomic_t refcount;
+ bool in_pool;
+ bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
- unsigned int gem_nr;
+ unsigned int map_nr;
+ unsigned long handles[MAX_BUF_ADDR_NR];
+ unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
+ struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
@@ -143,23 +168,33 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
+ struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
+
+ unsigned long current_pool;
+ unsigned long max_pool;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
- g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
- &g2d->cmdlist_pool, GFP_KERNEL);
+ init_dma_attrs(&g2d->cmdlist_dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+ G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL,
+ &g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct device *dev = g2d->dev;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node);
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+ unsigned long obj,
+ bool force)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr =
+ (struct g2d_cmdlist_userptr *)obj;
+
+ if (!obj)
+ return;
+
+ if (force)
+ goto out;
+
+ atomic_dec(&g2d_userptr->refcount);
+
+ if (atomic_read(&g2d_userptr->refcount) > 0)
+ return;
+
+ if (g2d_userptr->in_pool)
+ return;
+
+out:
+ exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+ if (!g2d_userptr->out_of_list)
+ list_del_init(&g2d_userptr->list);
+
+ sg_free_table(g2d_userptr->sgt);
+ kfree(g2d_userptr->sgt);
+ g2d_userptr->sgt = NULL;
+
+ kfree(g2d_userptr->pages);
+ g2d_userptr->pages = NULL;
+ kfree(g2d_userptr);
+ g2d_userptr = NULL;
+}
+
+dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+ unsigned long userptr,
+ unsigned long size,
+ struct drm_file *filp,
+ unsigned long *obj)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr;
+ struct g2d_data *g2d;
+ struct page **pages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ unsigned int npages, offset;
+ int ret;
+
+ if (!size) {
+ DRM_ERROR("invalid userptr size.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ g2d = dev_get_drvdata(g2d_priv->dev);
+
+ /* check if userptr already exists in userptr_list. */
+ list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ if (g2d_userptr->userptr == userptr) {
+ /*
+ * also check size because there could be same address
+ * and different size.
+ */
+ if (g2d_userptr->size == size) {
+ atomic_inc(&g2d_userptr->refcount);
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+ }
+
+ /*
+ * at this moment, maybe g2d dma is accessing this
+ * g2d_userptr memory region so just remove this
+ * g2d_userptr object from userptr_list not to be
+ * referred again and also except it the userptr
+ * pool to be released after the dma access completion.
+ */
+ g2d_userptr->out_of_list = true;
+ g2d_userptr->in_pool = false;
+ list_del_init(&g2d_userptr->list);
+
+ break;
+ }
+ }
+
+ g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+ if (!g2d_userptr) {
+ DRM_ERROR("failed to allocate g2d_userptr.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&g2d_userptr->refcount, 1);
+
+ start = userptr & PAGE_MASK;
+ offset = userptr & ~PAGE_MASK;
+ end = PAGE_ALIGN(userptr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+ g2d_userptr->npages = npages;
+
+ pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ kfree(g2d_userptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vma = find_vma(current->mm, userptr);
+ if (!vma) {
+ DRM_ERROR("failed to get vm region.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ if (vma->vm_end < userptr + size) {
+ DRM_ERROR("vma is too small.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->vma = exynos_gem_get_vma(vma);
+ if (!g2d_userptr->vma) {
+ DRM_ERROR("failed to copy vma.\n");
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->size = size;
+
+ ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+ npages, pages, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ goto err_put_vma;
+ }
+
+ g2d_userptr->pages = pages;
+
+ sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ if (!sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_userptr;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+ size, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to get sgt from pages.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->sgt = sgt;
+
+ ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+ if (ret < 0) {
+ DRM_ERROR("failed to map sgt with dma region.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+ g2d_userptr->userptr = userptr;
+
+ list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+ if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+ g2d->current_pool += npages << PAGE_SHIFT;
+ g2d_userptr->in_pool = true;
+ }
+
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+
+err_free_userptr:
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+err_put_vma:
+ exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+ kfree(pages);
+ kfree(g2d_userptr);
+ pages = NULL;
+ g2d_userptr = NULL;
+
+ return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+ struct g2d_data *g2d,
+ struct drm_file *filp)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+ list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ if (g2d_userptr->in_pool)
+ g2d_userptr_put_dma_addr(drm_dev,
+ (unsigned long)g2d_userptr,
+ true);
+
+ g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_device *drm_dev,
+ struct drm_file *file)
+{
struct g2d_cmdlist *cmdlist = node->cmdlist;
- dma_addr_t *addr;
int offset;
int i;
- for (i = 0; i < node->gem_nr; i++) {
- struct g2d_gem_node *gem_node;
-
- gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
- if (!gem_node) {
- dev_err(g2d_priv->dev, "failed to allocate gem node\n");
- return -ENOMEM;
- }
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle;
+ dma_addr_t *addr;
offset = cmdlist->last - (i * 2 + 1);
- gem_node->handle = cmdlist->data[offset];
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
- file);
- if (IS_ERR(addr)) {
- node->gem_nr = i;
- kfree(gem_node);
- return PTR_ERR(addr);
+ handle = cmdlist->data[offset];
+
+ if (node->obj_type[i] == BUF_TYPE_GEM) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+ } else {
+ struct drm_exynos_g2d_userptr g2d_userptr;
+
+ if (copy_from_user(&g2d_userptr, (void __user *)handle,
+ sizeof(struct drm_exynos_g2d_userptr))) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+
+ addr = g2d_userptr_get_dma_addr(drm_dev,
+ g2d_userptr.userptr,
+ g2d_userptr.size,
+ file,
+ &handle);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
}
cmdlist->data[offset] = *addr;
- list_add_tail(&gem_node->list, &g2d_priv->gem_list);
- g2d_priv->gem_nr++;
+ node->handles[i] = handle;
}
return 0;
}
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_file *filp)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct g2d_gem_node *node, *n;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ int i;
- list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
- if (!nr)
- break;
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle = node->handles[i];
- exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
- list_del_init(&node->list);
- kfree(node);
- nr--;
+ if (node->obj_type[i] == BUF_TYPE_GEM)
+ exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+ filp);
+ else
+ g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+ false);
+
+ node->handles[i] = 0;
}
+
+ node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
+ struct g2d_cmdlist_node *node;
+
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
+ /*
+ * commands in run_cmdlist have been completed so unmap all gem
+ * objects in each command node so that they are unreferenced.
+ */
+ list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+ g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+ struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
+
+ if (for_addr) {
+ /* check userptr buffer type. */
+ reg_offset = (cmdlist->data[index] &
+ ~0x7fffffff) >> 31;
+ if (reg_offset) {
+ node->obj_type[i] = BUF_TYPE_USERPTR;
+ cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+ }
+ }
+
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
+
+ if (node->obj_type[i] != BUF_TYPE_USERPTR)
+ node->obj_type[i] = BUF_TYPE_GEM;
break;
default:
if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
- size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
- node->gem_nr = req->cmd_gem_nr;
- if (req->cmd_gem_nr) {
- struct drm_exynos_g2d_cmd *cmd_gem;
+ node->map_nr = req->cmd_buf_nr;
+ if (req->cmd_buf_nr) {
+ struct drm_exynos_g2d_cmd *cmd_buf;
- cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+ cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
- (void __user *)cmd_gem,
- sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ (void __user *)cmd_buf,
+ sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
- cmdlist->last += req->cmd_gem_nr * 2;
+ cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
- ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
- g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+ g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
+ runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct g2d_data *g2d;
+ int ret;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ if (!is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ }
+
+ return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ if (!is_drm_iommu_supported(drm_dev))
+ return;
+
+ drm_iommu_detach_device(drm_dev, dev);
+}
+
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->gem_list);
+ INIT_LIST_HEAD(&g2d_priv->userptr_list);
return 0;
}
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ /*
+ * unmap all gem objects not completed.
+ *
+ * P.S. if current process was terminated forcely then
+ * there may be some commands in inuse_cmdlist so unmap
+ * them.
+ */
+ g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
+ }
mutex_unlock(&g2d->cmdlist_mutex);
- g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+ /* release all g2d_userptr in pool. */
+ g2d_userptr_free_all(drm_dev, g2d, file);
kfree(file_priv->g2d_priv);
}
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0)
- goto err_destroy_workqueue;
-
- g2d->gate_clk = clk_get(dev, "fimg2d");
+ g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
- goto err_fini_cmdlist;
+ goto err_destroy_workqueue;
}
pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ g2d->max_pool = MAX_POOL;
+
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
+ subdrv->probe = g2d_subdrv_probe;
+ subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk:
pm_runtime_disable(dev);
- clk_put(g2d->gate_clk);
-err_fini_cmdlist:
- g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
- clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664..d48183e7e05 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- if (!IS_NONCONTIG_BUFFER(flags)) {
- if (size >= SZ_1M)
- return roundup(size, SECTION_SIZE);
- else if (size >= SZ_64K)
- return roundup(size, SZ_64K);
- else
- goto out;
- }
-out:
- return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct page *p, **pages;
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < npages; i++) {
- p = alloc_page(gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
- }
-
- return pages;
-
-fail:
- while (--i)
- __free_page(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages)
-{
- int npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- while (--npages >= 0)
- __free_page(pages[npages]);
+ /* TODO */
- drm_free_large(pages);
+ return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
unsigned long pfn;
+ int i;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->pages)
- return -EINTR;
-
- pfn = page_to_pfn(buf->pages[page_offset++]);
- } else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
- return vm_insert_mixed(vma, f_vaddr, pfn);
-}
+ if (!buf->sgt)
+ return -EINTR;
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- struct page **pages;
- unsigned int npages, i = 0;
- int ret;
-
- if (buf->pages) {
- DRM_DEBUG_KMS("already allocated.\n");
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
- if (IS_ERR(pages)) {
- DRM_ERROR("failed to get pages.\n");
- return PTR_ERR(pages);
- }
-
- npages = obj->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
-
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- ret = -EFAULT;
- goto err1;
- }
-
sgl = buf->sgt->sgl;
-
- /* set all pages to sg list. */
- while (i < npages) {
- sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
- sg_dma_address(sgl) = page_to_phys(pages[i]);
- i++;
- sgl = sg_next(sgl);
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
}
- /* add some codes for UNCACHED type here. TODO */
-
- buf->pages = pages;
- return ret;
-err1:
- kfree(buf->sgt);
- buf->sgt = NULL;
-err:
- exynos_gem_put_pages(obj, pages);
- return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
- /*
- * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
- * allocated at gem fault handler.
- */
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- exynos_gem_put_pages(obj, buf->pages);
- buf->pages = NULL;
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
- /* add some codes for UNCACHED type here. TODO */
+ return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if (!buf->pages)
- return;
-
/*
* do not release memory region from exporter.
*
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
- exynos_drm_gem_put_pages(obj);
- else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return;
- }
-
drm_gem_object_unreference_unlocked(obj);
/*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+ struct file *filp)
+{
+ struct drm_file *file_priv;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ /* find current process's drm_file from filelist. */
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+ if (file_priv->filp == filp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return file_priv;
+ }
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ WARN_ON(1);
+
+ return ERR_PTR(-EFAULT);
+}
+
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+ struct drm_file *file_priv;
+ unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = obj;
+ vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+ /* restore it to driver's fops. */
+ filp->f_op = fops_get(drm_dev->driver->fops);
+
+ file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+ if (IS_ERR(file_priv))
+ return PTR_ERR(file_priv);
+
+ /* restore it to drm_file. */
+ filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = usize = vma->vm_end - vma->vm_start;
+ vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- int i = 0;
-
- if (!buffer->pages)
- return -EINVAL;
+ ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
- vma->vm_flags |= VM_MIXEDMAP;
+ /*
+ * take a reference to this mapping of the object. And this reference
+ * is unreferenced by the corresponding vm_close call.
+ */
+ drm_gem_object_reference(obj);
- do {
- ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
- if (ret) {
- DRM_ERROR("failed to remap user space.\n");
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- } else {
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
- }
- }
+ mutex_lock(&drm_dev->struct_mutex);
+ drm_vm_open_locked(drm_dev, vma);
+ mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj->filp->f_op = &exynos_drm_gem_fops;
- obj->filp->private_data = obj;
+ /*
+ * Set specific mmper's fops. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+ * This is used to call specific mapper temporarily.
+ */
+ file_priv->filp->f_op = &exynos_drm_gem_fops;
- addr = vm_mmap(obj->filp, 0, args->size,
+ /*
+ * Set gem object to private_data so that specific mmaper
+ * can get the gem object. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to drm_file.
+ */
+ file_priv->filp->private_data = obj;
+
+ addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
- if (IS_ERR((void *)addr))
+ if (IS_ERR((void *)addr)) {
+ file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr);
+ }
args->mapped = addr;
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (!vma_copy)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma)
+{
+ int get_npages;
+
+ /* the memory region mmaped with VM_PFNMAP. */
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ pages[i] = pfn_to_page(pfn);
+ }
+
+ if (i != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ get_npages = get_user_pages(current, current->mm, start,
+ npages, 1, 1, pages, NULL);
+ get_npages = max(get_npages, 0);
+ if (get_npages != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ while (get_npages)
+ put_page(pages[--get_npages]);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma)
+{
+ if (!vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(pages[i]);
+
+ /*
+ * undo the reference we took when populating
+ * the table.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ int nents;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with dma.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return nents;
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map pages.\n");
+ DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f7..f11f2afd5bf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,21 +35,27 @@
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
* @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ * VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
+ unsigned long userptr;
dma_addr_t dma_addr;
- struct sg_table *sgt;
+ struct dma_attrs dma_attrs;
+ unsigned int write;
struct page **pages;
- unsigned long page_size;
+ struct sg_table *sgt;
unsigned long size;
+ bool pfnmap;
};
/*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
+ * @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
+ struct vm_area_struct *vma;
unsigned int flags;
};
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/*
* put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 00000000000..5639353d47b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1870 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_SRC 4
+#define GSC_MAX_DST 16
+#define GSC_RESET_TIMEOUT 50
+#define GSC_BUF_STOP 1
+#define GSC_BUF_START 2
+#define GSC_REG_SZ 16
+#define GSC_WIDTH_ITU_709 1280
+#define GSC_SC_UP_MAX_RATIO 65536
+#define GSC_SC_DOWN_RATIO_7_8 74898
+#define GSC_SC_DOWN_RATIO_6_8 87381
+#define GSC_SC_DOWN_RATIO_5_8 104857
+#define GSC_SC_DOWN_RATIO_4_8 131072
+#define GSC_SC_DOWN_RATIO_3_8 174762
+#define GSC_SC_DOWN_RATIO_2_8 262144
+#define GSC_REFRESH_MIN 12
+#define GSC_REFRESH_MAX 60
+#define GSC_CROP_MAX 8192
+#define GSC_CROP_MIN 32
+#define GSC_SCALE_MAX 4224
+#define GSC_SCALE_MIN 32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR 16
+#define GSC_COEF_H_8T 8
+#define GSC_COEF_V_4T 4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct gsc_context, ippdrv);
+#define gsc_read(offset) readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+ bool range;
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ unsigned long main_hratio;
+ unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+ /* tile or rotation */
+ u32 tile_w;
+ u32 tile_h;
+ /* other cases */
+ u32 w;
+ u32 h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *gsc_clk;
+ struct gsc_scaler sc;
+ int id;
+ int irq;
+ bool rotation;
+ bool suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 },
+ { -2, 7, -21, 99, 57, -16, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 7, -2 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 3, -8, 14, 111, 13, -8, 3, 0 },
+ { 2, -6, 7, 112, 21, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 },
+ { 1, -1, -7, 103, 44, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 },
+ { 0, 3, -15, 85, 69, -17, 4, -1 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 },
+ { -1, 4, -17, 69, 85, -15, 3, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 },
+ { -1, 4, -15, 44, 103, -7, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 },
+ { -1, 4, -12, 28, 110, 1, -4, 2 },
+ { -1, 3, -10, 21, 112, 7, -6, 2 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 2, -11, 25, 96, 25, -11, 2, 0 },
+ { 2, -10, 19, 96, 31, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 },
+ { 2, -8, 10, 92, 43, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 },
+ { 0, 1, -12, 43, 92, 10, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 },
+ { 0, 2, -12, 31, 96, 19, -10, 2 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { -1, -8, 33, 80, 33, -8, -1, 0 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 },
+ { 0, -8, 20, 78, 46, -6, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 },
+ { 1, -7, 10, 71, 58, -1, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 },
+ { 1, -5, -1, 58, 71, 10, -7, 1 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 },
+ { 1, -3, -6, 46, 78, 20, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { -3, 0, 35, 64, 35, 0, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 },
+ { -2, -3, 24, 61, 46, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 },
+ { 0, -4, 6, 46, 61, 24, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { -1, 8, 33, 48, 33, 8, -1, 0 },
+ { -1, 7, 31, 49, 35, 9, -1, -1 },
+ { -1, 6, 30, 49, 36, 10, -1, -1 },
+ { -1, 5, 28, 48, 38, 12, -1, -1 },
+ { -1, 4, 26, 48, 39, 13, 0, -1 },
+ { -1, 3, 24, 47, 41, 15, 0, -1 },
+ { -1, 2, 23, 47, 42, 16, 0, -1 },
+ { -1, 2, 21, 45, 43, 18, 1, -1 },
+ { -1, 1, 19, 45, 45, 19, 1, -1 },
+ { -1, 1, 18, 43, 45, 21, 2, -1 },
+ { -1, 0, 16, 42, 47, 23, 2, -1 },
+ { -1, 0, 15, 41, 47, 24, 3, -1 },
+ { -1, 0, 13, 39, 48, 26, 4, -1 },
+ { -1, -1, 12, 38, 48, 28, 5, -1 },
+ { -1, -1, 10, 36, 49, 30, 6, -1 },
+ { -1, -1, 9, 35, 49, 31, 7, -1 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 2, 13, 30, 38, 30, 13, 2, 0 },
+ { 2, 12, 29, 38, 30, 14, 3, 0 },
+ { 2, 11, 28, 38, 31, 15, 3, 0 },
+ { 2, 10, 26, 38, 32, 16, 4, 0 },
+ { 1, 10, 26, 37, 33, 17, 4, 0 },
+ { 1, 9, 24, 37, 34, 18, 5, 0 },
+ { 1, 8, 24, 37, 34, 19, 5, 0 },
+ { 1, 7, 22, 36, 35, 20, 6, 1 },
+ { 1, 6, 21, 36, 36, 21, 6, 1 },
+ { 1, 6, 20, 35, 36, 22, 7, 1 },
+ { 0, 5, 19, 34, 37, 24, 8, 1 },
+ { 0, 5, 18, 34, 37, 24, 9, 1 },
+ { 0, 4, 17, 33, 37, 26, 10, 1 },
+ { 0, 4, 16, 32, 38, 26, 10, 2 },
+ { 0, 3, 15, 31, 38, 28, 11, 2 },
+ { 0, 3, 14, 30, 38, 29, 12, 2 }
+ }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 128, 0, 0 },
+ { -4, 127, 5, 0 },
+ { -6, 124, 11, -1 },
+ { -8, 118, 19, -1 },
+ { -8, 111, 27, -2 },
+ { -8, 102, 37, -3 },
+ { -8, 92, 48, -4 },
+ { -7, 81, 59, -5 },
+ { -6, 70, 70, -6 },
+ { -5, 59, 81, -7 },
+ { -4, 48, 92, -8 },
+ { -3, 37, 102, -8 },
+ { -2, 27, 111, -8 },
+ { -1, 19, 118, -8 },
+ { -1, 11, 124, -6 },
+ { 0, 5, 127, -4 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 8, 112, 8, 0 },
+ { 4, 111, 14, -1 },
+ { 1, 109, 20, -2 },
+ { -2, 105, 27, -2 },
+ { -3, 100, 34, -3 },
+ { -5, 93, 43, -3 },
+ { -5, 86, 51, -4 },
+ { -5, 77, 60, -4 },
+ { -5, 69, 69, -5 },
+ { -4, 60, 77, -5 },
+ { -4, 51, 86, -5 },
+ { -3, 43, 93, -5 },
+ { -3, 34, 100, -3 },
+ { -2, 27, 105, -2 },
+ { -2, 20, 109, 1 },
+ { -1, 14, 111, 4 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 16, 96, 16, 0 },
+ { 12, 97, 21, -2 },
+ { 8, 96, 26, -2 },
+ { 5, 93, 32, -2 },
+ { 2, 89, 39, -2 },
+ { 0, 84, 46, -2 },
+ { -1, 79, 53, -3 },
+ { -2, 73, 59, -2 },
+ { -2, 66, 66, -2 },
+ { -2, 59, 73, -2 },
+ { -3, 53, 79, -1 },
+ { -2, 46, 84, 0 },
+ { -2, 39, 89, 2 },
+ { -2, 32, 93, 5 },
+ { -2, 26, 96, 8 },
+ { -2, 21, 97, 12 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { 22, 84, 22, 0 },
+ { 18, 85, 26, -1 },
+ { 14, 84, 31, -1 },
+ { 11, 82, 36, -1 },
+ { 8, 79, 42, -1 },
+ { 6, 76, 47, -1 },
+ { 4, 72, 52, 0 },
+ { 2, 68, 58, 0 },
+ { 1, 63, 63, 1 },
+ { 0, 58, 68, 2 },
+ { 0, 52, 72, 4 },
+ { -1, 47, 76, 6 },
+ { -1, 42, 79, 8 },
+ { -1, 36, 82, 11 },
+ { -1, 31, 84, 14 },
+ { -1, 26, 85, 18 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { 26, 76, 26, 0 },
+ { 22, 76, 30, 0 },
+ { 19, 75, 34, 0 },
+ { 16, 73, 38, 1 },
+ { 13, 71, 43, 1 },
+ { 10, 69, 47, 2 },
+ { 8, 66, 51, 3 },
+ { 6, 63, 55, 4 },
+ { 5, 59, 59, 5 },
+ { 4, 55, 63, 6 },
+ { 3, 51, 66, 8 },
+ { 2, 47, 69, 10 },
+ { 1, 43, 71, 13 },
+ { 1, 38, 73, 16 },
+ { 0, 34, 75, 19 },
+ { 0, 30, 76, 22 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { 29, 70, 29, 0 },
+ { 26, 68, 32, 2 },
+ { 23, 67, 36, 2 },
+ { 20, 66, 39, 3 },
+ { 17, 65, 43, 3 },
+ { 15, 63, 46, 4 },
+ { 12, 61, 50, 5 },
+ { 10, 58, 53, 7 },
+ { 8, 56, 56, 8 },
+ { 7, 53, 58, 10 },
+ { 5, 50, 61, 12 },
+ { 4, 46, 63, 15 },
+ { 3, 43, 65, 17 },
+ { 3, 39, 66, 20 },
+ { 2, 36, 67, 23 },
+ { 2, 32, 68, 26 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 32, 64, 32, 0 },
+ { 28, 63, 34, 3 },
+ { 25, 62, 37, 4 },
+ { 22, 62, 40, 4 },
+ { 19, 61, 43, 5 },
+ { 17, 59, 46, 6 },
+ { 15, 58, 48, 7 },
+ { 13, 55, 51, 9 },
+ { 11, 53, 53, 11 },
+ { 9, 51, 55, 13 },
+ { 7, 48, 58, 15 },
+ { 6, 46, 59, 17 },
+ { 5, 43, 61, 19 },
+ { 4, 40, 62, 22 },
+ { 4, 37, 62, 25 },
+ { 3, 34, 63, 28 }
+ }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+ u32 cfg;
+ int count = GSC_RESET_TIMEOUT;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* s/w reset */
+ cfg = (GSC_SW_RESET_SRESET);
+ gsc_write(cfg, GSC_SW_RESET);
+
+ /* wait s/w reset complete */
+ while (count--) {
+ cfg = gsc_read(GSC_SW_RESET);
+ if (!cfg)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cfg) {
+ DRM_ERROR("failed to reset gsc h/w.\n");
+ return -EBUSY;
+ }
+
+ /* reset sequence */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_IN_BASE_ADDR_MASK |
+ GSC_IN_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_OUT_BASE_ADDR_MASK |
+ GSC_OUT_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+ u32 gscblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+ if (enable)
+ gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+ GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+ GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+ else
+ gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+ writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+ bool overflow, bool done)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, done);
+
+ cfg = gsc_read(GSC_IRQ);
+ cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+ if (enable)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+
+ if (overflow)
+ cfg &= ~GSC_IRQ_OR_MASK;
+ else
+ cfg |= GSC_IRQ_OR_MASK;
+
+ if (done)
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+
+ gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+ GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_IN_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_IN_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_IN_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+ GSC_SRCIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+ /* cropped size */
+ cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+ GSC_CROPPED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_CROPPED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_SRCIMG_SIZE);
+ cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+ GSC_SRCIMG_WIDTH_MASK);
+
+ cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+ GSC_SRCIMG_HEIGHT(sz->vsize));
+
+ gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_IN_RGB_SD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+ .set_fmt = gsc_src_set_fmt,
+ .set_transf = gsc_src_set_transf,
+ .set_size = gsc_src_set_size,
+ .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+ GSC_OUT_GLOBAL_ALPHA_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_OUT_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_OUT_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 8) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 4)
+ *ratio = 4;
+ else if (src >= dst * 2)
+ *ratio = 2;
+ else
+ *ratio = 1;
+
+ return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+ if (hratio == 4 && vratio == 4)
+ *shfactor = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *shfactor = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *shfactor = 2;
+ else if (hratio == 1 && vratio == 1)
+ *shfactor = 0;
+ else
+ *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ u32 src_w, src_h, dst_w, dst_h;
+ int ret = 0;
+
+ src_w = src->w;
+ src_h = src->h;
+
+ if (ctx->rotation) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+ __func__, sc->pre_hratio, sc->pre_vratio);
+
+ sc->main_hratio = (src_w << 16) / dst_w;
+ sc->main_vratio = (src_h << 16) / dst_h;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+ sc->pre_shfactor);
+
+ cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+ GSC_PRESC_H_RATIO(sc->pre_hratio) |
+ GSC_PRESC_V_RATIO(sc->pre_vratio));
+ gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+ return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_H_8T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(h_coef_8t[sc_ratio][i][j],
+ GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_V_4T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(v_coef_4t[sc_ratio][i][j],
+ GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_set_h_coef(ctx, sc->main_hratio);
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+ gsc_set_v_coef(ctx, sc->main_vratio);
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+ GSC_DSTIMG_OFFSET_Y(pos->y));
+ gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+ /* scaled size */
+ cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_SCALED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_DSTIMG_SIZE);
+ cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+ GSC_DSTIMG_WIDTH_MASK);
+ cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+ GSC_DSTIMG_HEIGHT(sz->vsize));
+ gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+ u32 cfg, i, buf_num = GSC_REG_SZ;
+ u32 mask = 0x00000001;
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ for (i = 0; i < GSC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num--;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+ gsc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+ gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EFAULT;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+ .set_fmt = gsc_dst_set_fmt,
+ .set_transf = gsc_dst_set_transf,
+ .set_size = gsc_dst_set_size,
+ .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->gsc_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->gsc_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_SRC;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_SRC; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_SRC) {
+ DRM_ERROR("failed to get in buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_DST;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_DST; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_DST) {
+ DRM_ERROR("failed to get out buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+ struct gsc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ u32 status;
+ int buf_id[EXYNOS_DRM_OPS_MAX];
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ status = gsc_read(GSC_IRQ);
+ if (status & GSC_IRQ_STATUS_OR_IRQ) {
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return IRQ_NONE;
+ }
+
+ if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+ dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ ctx->id, status);
+
+ buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+ return IRQ_HANDLED;
+
+ buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+ buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+ buf_id[EXYNOS_DRM_OPS_SRC];
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = GSC_REFRESH_MIN;
+ prop_list->refresh_max = GSC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = GSC_CROP_MAX;
+ prop_list->crop_max.vsize = GSC_CROP_MAX;
+ prop_list->crop_min.hsize = GSC_CROP_MIN;
+ prop_list->crop_min.vsize = GSC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = GSC_SCALE_MAX;
+ prop_list->scale_max.vsize = GSC_SCALE_MAX;
+ prop_list->scale_min.hsize = GSC_SCALE_MIN;
+ prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!gsc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct gsc_scaler *sc = &ctx->sc;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ ret = gsc_sw_reset(ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset hardware.\n");
+ return ret;
+ }
+
+ /* scaler setting */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+ sc->range = true;
+
+ return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ u32 cfg;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ gsc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* enable one shot */
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+ GSC_ENABLE_CLK_GATE_MODE_MASK);
+ cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+ gsc_write(cfg, GSC_ENABLE);
+
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_WB:
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+ /* src local path */
+ cfg = readl(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_OUTPUT:
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst local path */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ ret = gsc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ gsc_set_scaler(ctx, &ctx->sc);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg |= GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+
+ return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ gsc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+}
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->gsc_clk = clk_get(dev, "gscl");
+ if (IS_ERR(ctx->gsc_clk)) {
+ dev_err(dev, "failed to get gsc clock.\n");
+ ret = PTR_ERR(ctx->gsc_clk);
+ goto err_ctx;
+ }
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ ret = -ENOENT;
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+ IRQF_ONESHOT, "drm_gsc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+ ippdrv->check_property = gsc_ippdrv_check_property;
+ ippdrv->reset = gsc_ippdrv_reset;
+ ippdrv->start = gsc_ippdrv_start;
+ ippdrv->stop = gsc_ippdrv_stop;
+ ret = gsc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm gsc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ devm_iounmap(dev, ctx->regs);
+err_clk:
+ clk_put(ctx->gsc_clk);
+err_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ devm_iounmap(dev, ctx->regs);
+
+ clk_put(ctx->gsc_clk);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = __devexit_p(gsc_remove),
+ .driver = {
+ .name = "exynos-drm-gsc",
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 00000000000..b3c3bc618c0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b4518..55793c46e3c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv);
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
/* Common hdmi subdrv needs to access the hdmi and mixer though context.
* These should be initialied by the repective drivers */
static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
bool enabled[MIXER_WIN_NR];
};
+int exynos_platform_device_hdmi_register(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ return -EEXIST;
+
+ exynos_drm_hdmi_pdev = platform_device_register_simple(
+ "exynos-drm-hdmi", -1, NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+ return PTR_ERR(exynos_drm_hdmi_pdev);
+
+ return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
{
if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (mixer_ops && mixer_ops->wait_for_vblank)
+ mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
+ .wait_for_vblank = drm_hdmi_wait_for_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set,
.get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
ctx->enabled[win] = false;
}
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (mixer_ops && mixer_ops->wait_for_vblank)
- mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit,
.disable = drm_mixer_disable,
- .wait_for_vblank = drm_mixer_wait_for_vblank,
};
static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
return 0;
}
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct drm_hdmi_context *ctx;
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a05..fcc3093ec8f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops {
/* manager */
+ int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*wait_for_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
/* overlay */
- void (*wait_for_vblank)(void *ctx);
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 00000000000..2482b7f9634
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+
+ if (!priv->da_start)
+ priv->da_start = EXYNOS_DEV_ADDR_START;
+ if (!priv->da_space_size)
+ priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+ if (!priv->da_space_order)
+ priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+ priv->da_space_size,
+ priv->da_space_order);
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(dev, 0xffffffffu);
+ dev->archdata.mapping = mapping;
+
+ return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ if (!dev->archdata.mapping) {
+ DRM_ERROR("iommu_mapping is null.\n");
+ return -EFAULT;
+ }
+
+ subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+ sizeof(*subdrv_dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+ ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed iommu attach.\n");
+ return ret;
+ }
+
+ /*
+ * Set dma_ops to drm_device just one time.
+ *
+ * The dma mapping api needs device object and the api is used
+ * to allocate physial memory and map it with iommu table.
+ * If iommu attach succeeded, the sub driver would have dma_ops
+ * for iommu and also all sub drivers have same dma_ops.
+ */
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ if (!mapping || !mapping->domain)
+ return;
+
+ iommu_detach_device(mapping->domain, subdrv_dev);
+ drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 00000000000..18a0ca190b9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+#define EXYNOS_DEV_ADDR_ORDER 0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+ struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct device *dev = drm_dev->dev;
+
+ return dev->archdata.mapping ? true : false;
+#else
+ return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 00000000000..49eebe948ed
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+ struct drm_pending_event base;
+ struct drm_exynos_ipp_event event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+ struct list_head list;
+ enum drm_exynos_ops_id ops_id;
+ u32 prop_id;
+ u32 buf_id;
+ struct drm_exynos_ipp_buf_info buf_info;
+ struct drm_file *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+ struct exynos_drm_subdrv subdrv;
+ struct mutex ipp_lock;
+ struct mutex prop_lock;
+ struct idr ipp_idr;
+ struct idr prop_idr;
+ struct workqueue_struct *event_workq;
+ struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_del(&ippdrv->drv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+ u32 *idp)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("failed to get idr.\n");
+ return -ENOMEM;
+ }
+
+ /* do the allocation under our mutexlock */
+ mutex_lock(lock);
+ ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ mutex_unlock(lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ void *obj;
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+ mutex_lock(lock);
+
+ /* find object using handle */
+ obj = idr_find(id_idr, id);
+ if (!obj) {
+ DRM_ERROR("failed to find object.\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_unlock(lock);
+
+ return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+ enum drm_exynos_ipp_cmd cmd)
+{
+ /*
+ * check dedicated flag and WB, OUTPUT operation with
+ * power on state.
+ */
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return true;
+
+ return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 ipp_id = property->ipp_id;
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+ if (ipp_id) {
+ /* find ipp driver using idr */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ ipp_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+ return ippdrv;
+ }
+
+ /*
+ * WB, OUTPUT opertion not supported multi-operation.
+ * so, make dedicated state at set property ioctl.
+ * when ipp driver finished operations, clear dedicated flags.
+ */
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_ERROR("already used choose device.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * This is necessary to find correct device in ipp drivers.
+ * ipp drivers have different abilities,
+ * so need to check property.
+ */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_ERROR("not support property.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ippdrv;
+ } else {
+ /*
+ * This case is search all ipp driver for finding.
+ * user application don't set ipp_id in this case,
+ * so ipp subsystem search correct driver in driver list.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_DEBUG_KMS("%s:used device.\n", __func__);
+ continue;
+ }
+
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_DEBUG_KMS("%s:not support property.\n",
+ __func__);
+ continue;
+ }
+
+ return ippdrv;
+ }
+
+ DRM_ERROR("not support ipp driver operations.\n");
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * This case is search ipp driver by prop_id handle.
+ * sometimes, ipp subsystem find driver by prop_id.
+ * e.g PAUSE state, queue buf, command contro.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+ count++, (int)ippdrv);
+
+ if (!list_empty(&ippdrv->cmd_list)) {
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+ if (c_node->property.prop_id == prop_id)
+ return ippdrv;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_prop_list *prop_list = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!prop_list) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+ if (!prop_list->ipp_id) {
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ count++;
+ /*
+ * Supports ippdrv list count for user application.
+ * First step user application getting ippdrv count.
+ * and second step getting ippdrv capability using ipp_id.
+ */
+ prop_list->count = count;
+ } else {
+ /*
+ * Getting ippdrv capability by ipp_id.
+ * some deivce not supported wb, output interface.
+ * so, user application detect correct ipp driver
+ * using this ioctl.
+ */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ prop_list->ipp_id);
+ if (!ippdrv) {
+ DRM_ERROR("not found ipp%d driver.\n",
+ prop_list->ipp_id);
+ return -EINVAL;
+ }
+
+ prop_list = ippdrv->prop_list;
+ }
+
+ return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+ int idx)
+{
+ struct drm_exynos_ipp_config *config = &property->config[idx];
+ struct drm_exynos_pos *pos = &config->pos;
+ struct drm_exynos_sz *sz = &config->sz;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+ __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+ DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ u32 prop_id = property->prop_id;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find command node using command list in ippdrv.
+ * when we find this command no using prop_id.
+ * return property information set in this command node.
+ */
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if ((c_node->property.prop_id == prop_id) &&
+ (c_node->state == IPP_STATE_STOP)) {
+ DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->cmd, (int)ippdrv);
+
+ c_node->property = *property;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("failed to search property.\n");
+
+ return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+ if (!cmd_work) {
+ DRM_ERROR("failed to alloc cmd_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+ return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+ struct drm_exynos_ipp_event_work *event_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+ if (!event_work) {
+ DRM_ERROR("failed to alloc event_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+ return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_property *property = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is log print for user application property.
+ * user application set various property.
+ */
+ for_each_ipp_ops(i)
+ ipp_print_property(property, i);
+
+ /*
+ * set property ioctl generated new prop_id.
+ * but in this case already asigned prop_id using old set property.
+ * e.g PAUSE state. this case supports find current prop_id and use it
+ * instead of allocation.
+ */
+ if (property->prop_id) {
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+ return ipp_find_and_set_property(property);
+ }
+
+ /* find ipp driver using ipp id */
+ ippdrv = ipp_find_driver(ctx, property);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /* allocate command node */
+ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+ if (!c_node) {
+ DRM_ERROR("failed to allocate map node.\n");
+ return -ENOMEM;
+ }
+
+ /* create property id */
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+ &property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+ /* stored property information and ippdrv in private data */
+ c_node->priv = priv;
+ c_node->property = *property;
+ c_node->state = IPP_STATE_IDLE;
+
+ c_node->start_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->start_work)) {
+ DRM_ERROR("failed to create start work.\n");
+ goto err_clear;
+ }
+
+ c_node->stop_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->stop_work)) {
+ DRM_ERROR("failed to create stop work.\n");
+ goto err_free_start;
+ }
+
+ c_node->event_work = ipp_create_event_work();
+ if (IS_ERR_OR_NULL(c_node->event_work)) {
+ DRM_ERROR("failed to create event work.\n");
+ goto err_free_stop;
+ }
+
+ mutex_init(&c_node->cmd_lock);
+ mutex_init(&c_node->mem_lock);
+ mutex_init(&c_node->event_lock);
+
+ init_completion(&c_node->start_complete);
+ init_completion(&c_node->stop_complete);
+
+ for_each_ipp_ops(i)
+ INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+ INIT_LIST_HEAD(&c_node->event_list);
+ list_splice_init(&priv->event_list, &c_node->event_list);
+ list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+ /* make dedicated state without m2m */
+ if (!ipp_is_m2m_cmd(property->cmd))
+ ippdrv->dedicated = true;
+
+ return 0;
+
+err_free_stop:
+ kfree(c_node->stop_work);
+err_free_start:
+ kfree(c_node->start_work);
+err_clear:
+ kfree(c_node);
+ return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* delete list */
+ list_del(&c_node->list);
+
+ /* destroy mutex */
+ mutex_destroy(&c_node->cmd_lock);
+ mutex_destroy(&c_node->mem_lock);
+ mutex_destroy(&c_node->event_lock);
+
+ /* free command node */
+ kfree(c_node->start_work);
+ kfree(c_node->stop_work);
+ kfree(c_node->event_work);
+ kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+ i ? "dst" : "src");
+ continue;
+ }
+
+ /* find memory node entry */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+ i ? "dst" : "src", count[i], (int)m_node);
+ count[i]++;
+ }
+ }
+
+ DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+ min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+ max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+ /*
+ * M2M operations should be need paired memory address.
+ * so, need to check minimum count about src, dst.
+ * other case not use paired memory, so use maximum count
+ */
+ if (ipp_is_m2m_cmd(property->cmd))
+ ret = min(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+ else
+ ret = max(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+ /* source/destination memory list */
+ head = &c_node->mem_list[qbuf->ops_id];
+
+ /* find memory node from memory list */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+ __func__, count++, (int)m_node);
+
+ /* compare buffer id */
+ if (m_node->buf_id == qbuf->buf_id)
+ return m_node;
+ }
+
+ return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid queue node.\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* get operations callback */
+ ops = ippdrv->ops[m_node->ops_id];
+ if (!ops) {
+ DRM_ERROR("not support ops.\n");
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ /* set address and enable irq */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+ m_node->buf_id, IPP_BUF_ENQUEUE);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ goto err_unlock;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_get_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info buf_info;
+ void *addr;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node) {
+ DRM_ERROR("failed to allocate queue node.\n");
+ goto err_unlock;
+ }
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* operations, buffer id */
+ m_node->ops_id = qbuf->ops_id;
+ m_node->prop_id = qbuf->prop_id;
+ m_node->buf_id = qbuf->buf_id;
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+ (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+ qbuf->prop_id, m_node->buf_id);
+
+ for_each_ipp_planar(i) {
+ DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+ i, qbuf->handle[i]);
+
+ /* get dma address by handle */
+ if (qbuf->handle[i]) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ qbuf->handle[i], file);
+ if (IS_ERR(addr)) {
+ DRM_ERROR("failed to get addr.\n");
+ goto err_clear;
+ }
+
+ buf_info.handles[i] = qbuf->handle[i];
+ buf_info.base[i] = *(dma_addr_t *) addr;
+ DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+ __func__, i, buf_info.base[i],
+ (int)buf_info.handles[i]);
+ }
+ }
+
+ m_node->filp = file;
+ m_node->buf_info = buf_info;
+ list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+ mutex_unlock(&c_node->mem_lock);
+ return m_node;
+
+err_clear:
+ kfree(m_node);
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid dequeue node.\n");
+ return -EFAULT;
+ }
+
+ if (list_empty(&m_node->list)) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* put gem buffer */
+ for_each_ipp_planar(i) {
+ unsigned long handle = m_node->buf_info.handles[i];
+ if (handle)
+ exynos_drm_gem_put_dma_addr(drm_dev, handle,
+ m_node->filp);
+ }
+
+ /* delete list in queue */
+ list_del(&m_node->list);
+ kfree(m_node);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+ kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+ qbuf->ops_id, qbuf->buf_id);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+ if (!e) {
+ DRM_ERROR("failed to allocate event.\n");
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ return -ENOMEM;
+ }
+
+ /* make event */
+ e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = qbuf->user_data;
+ e->event.prop_id = qbuf->prop_id;
+ e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = ipp_free_event;
+ list_add_tail(&e->base.link, &c_node->event_list);
+
+ return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e, *te;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+ DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+ __func__, count++, (int)e);
+
+ /*
+ * quf == NULL condition means all event deletion.
+ * stop operations want to delete all event list.
+ * another case delete only same buf id.
+ */
+ if (!qbuf) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+
+ /* compare buffer id */
+ if (qbuf && (qbuf->buf_id ==
+ e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ return;
+ }
+ }
+}
+
+void ipp_handle_cmd_work(struct device *dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_work *cmd_work,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ cmd_work->ippdrv = ippdrv;
+ cmd_work->c_node = c_node;
+ queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_property *property;
+ struct exynos_drm_ipp_ops *ops;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EFAULT;
+ }
+
+ ops = ippdrv->ops[qbuf->ops_id];
+ if (!ops) {
+ DRM_ERROR("failed to get ops.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /*
+ * If set destination buffer and enabled clock,
+ * then m2m operations need start operations at queue_buf
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+ cmd_work->ctrl = IPP_CTRL_PLAY;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ } else {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+ /* delete list */
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[qbuf->ops_id], list) {
+ if (m_node->buf_id == qbuf->buf_id &&
+ m_node->ops_id == qbuf->ops_id)
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ }
+ }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_queue_buf *qbuf = data;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!qbuf) {
+ DRM_ERROR("invalid buf parameter.\n");
+ return -EINVAL;
+ }
+
+ if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+ DRM_ERROR("invalid ops parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+ __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+ qbuf->buf_id, qbuf->buf_type);
+
+ /* find command node */
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ qbuf->prop_id);
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return -EFAULT;
+ }
+
+ /* buffer control */
+ switch (qbuf->buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* get memory node */
+ m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+ if (IS_ERR(m_node)) {
+ DRM_ERROR("failed to get m_node.\n");
+ return PTR_ERR(m_node);
+ }
+
+ /*
+ * first step get event for destination buffer.
+ * and second step when M2M case run with destination buffer
+ * if needed.
+ */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+ /* get event for destination buffer */
+ ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to get event.\n");
+ goto err_clean_node;
+ }
+
+ /*
+ * M2M case run play control for streaming feature.
+ * other case set address and waiting.
+ */
+ ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to run command.\n");
+ goto err_clean_node;
+ }
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ mutex_lock(&c_node->cmd_lock);
+
+ /* put event for destination buffer */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+ ipp_put_event(c_node, qbuf);
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+ mutex_unlock(&c_node->cmd_lock);
+ break;
+ default:
+ DRM_ERROR("invalid buffer control.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_clean_node:
+ DRM_ERROR("clean memory nodes.\n");
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+ return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+ enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (ctrl != IPP_CTRL_PLAY) {
+ if (pm_runtime_suspended(dev)) {
+ DRM_ERROR("pm:runtime_suspended.\n");
+ goto err_status;
+ }
+ }
+
+ switch (ctrl) {
+ case IPP_CTRL_PLAY:
+ if (state != IPP_STATE_IDLE)
+ goto err_status;
+ break;
+ case IPP_CTRL_STOP:
+ if (state == IPP_STATE_STOP)
+ goto err_status;
+ break;
+ case IPP_CTRL_PAUSE:
+ if (state != IPP_STATE_START)
+ goto err_status;
+ break;
+ case IPP_CTRL_RESUME:
+ if (state != IPP_STATE_STOP)
+ goto err_status;
+ break;
+ default:
+ DRM_ERROR("invalid state.\n");
+ goto err_status;
+ break;
+ }
+
+ return true;
+
+err_status:
+ DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+ return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+ struct drm_exynos_ipp_cmd_node *c_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!cmd_ctrl) {
+ DRM_ERROR("invalid control parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return PTR_ERR(ippdrv);
+ }
+
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ cmd_ctrl->prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return -EINVAL;
+ }
+
+ if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+ c_node->state)) {
+ DRM_ERROR("invalid state.\n");
+ return -EINVAL;
+ }
+
+ switch (cmd_ctrl->ctrl) {
+ case IPP_CTRL_PLAY:
+ if (pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_get_sync(ippdrv->dev);
+ c_node->state = IPP_STATE_START;
+
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ c_node->state = IPP_STATE_START;
+ break;
+ case IPP_CTRL_STOP:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(300))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ break;
+ case IPP_CTRL_PAUSE:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ break;
+ case IPP_CTRL_RESUME:
+ c_node->state = IPP_STATE_START;
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ break;
+ default:
+ DRM_ERROR("could not support this state currently.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ bool swap = false;
+ int ret, i;
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* reset h/w block */
+ if (ippdrv->reset &&
+ ippdrv->reset(ippdrv->dev)) {
+ DRM_ERROR("failed to reset.\n");
+ return -EINVAL;
+ }
+
+ /* set source,destination operations */
+ for_each_ipp_ops(i) {
+ struct drm_exynos_ipp_config *config =
+ &property->config[i];
+
+ ops = ippdrv->ops[i];
+ if (!ops || !config) {
+ DRM_ERROR("not support ops and config.\n");
+ return -EINVAL;
+ }
+
+ /* set format */
+ if (ops->set_fmt) {
+ ret = ops->set_fmt(ippdrv->dev, config->fmt);
+ if (ret) {
+ DRM_ERROR("not support format.\n");
+ return ret;
+ }
+ }
+
+ /* set transform for rotation, flip */
+ if (ops->set_transf) {
+ ret = ops->set_transf(ippdrv->dev, config->degree,
+ config->flip, &swap);
+ if (ret) {
+ DRM_ERROR("not support tranf.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* set size */
+ if (ops->set_size) {
+ ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+ &config->sz);
+ if (ret) {
+ DRM_ERROR("not support size.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* store command info in ippdrv */
+ ippdrv->cmd = c_node;
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* set current property in ippdrv */
+ ret = ipp_set_property(ippdrv, property);
+ if (ret) {
+ DRM_ERROR("failed to set property.\n");
+ ippdrv->cmd = NULL;
+ return ret;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("failed to get node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+ __func__, (int)m_node);
+
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+ /* start operations */
+ if (ippdrv->start) {
+ ret = ippdrv->start(ippdrv->dev, property->cmd);
+ if (ret) {
+ DRM_ERROR("failed to start ops.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret = 0, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* put event */
+ ipp_put_event(c_node, NULL);
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+ __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node,
+ head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+err_clear:
+ /* stop operations */
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, property->cmd);
+
+ return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work =
+ (struct drm_exynos_ipp_cmd_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_property *property;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = cmd_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("invalid ippdrv list.\n");
+ return;
+ }
+
+ c_node = cmd_work->c_node;
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return;
+ }
+
+ mutex_lock(&c_node->cmd_lock);
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ switch (cmd_work->ctrl) {
+ case IPP_CTRL_PLAY:
+ case IPP_CTRL_RESUME:
+ ret = ipp_start_property(ippdrv, c_node);
+ if (ret) {
+ DRM_ERROR("failed to start property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ /*
+ * M2M case supports wait_completion of transfer.
+ * because M2M case supports single unit operation
+ * with multiple queue.
+ * M2M need to wait completion of data transfer.
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ if (!wait_for_completion_timeout
+ (&c_node->start_complete, msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout event:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CTRL_STOP:
+ case IPP_CTRL_PAUSE:
+ ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+ c_node);
+ if (ret) {
+ DRM_ERROR("failed to stop property.\n");
+ goto err_unlock;
+ }
+
+ complete(&c_node->stop_complete);
+ break;
+ default:
+ DRM_ERROR("unknown control type\n");
+ break;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+ mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_queue_buf qbuf;
+ struct drm_exynos_ipp_send_event *e;
+ struct list_head *head;
+ struct timeval now;
+ unsigned long flags;
+ u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+ int ret, i;
+
+ for_each_ipp_ops(i)
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", buf_id[i]);
+
+ if (!drm_dev) {
+ DRM_ERROR("failed to get drm_dev.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[i] = m_node->buf_id;
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", tbuf_id[i]);
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ }
+ break;
+ case IPP_CMD_WB:
+ /* clear buf for finding */
+ memset(&qbuf, 0x0, sizeof(qbuf));
+ qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+ qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+ /* get memory node entry */
+ m_node = ipp_find_mem_node(c_node, &qbuf);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+ DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+ tbuf_id[1], buf_id[1], property->prop_id);
+
+ /*
+ * command node have event list of destination buffer
+ * If destination buffer enqueue to mem list,
+ * then we make event and link to event list tail.
+ * so, we get first event for first enqueued buffer.
+ */
+ e = list_first_entry(&c_node->event_list,
+ struct drm_exynos_ipp_send_event, base.link);
+
+ if (!e) {
+ DRM_ERROR("empty event.\n");
+ return -EINVAL;
+ }
+
+ do_gettimeofday(&now);
+ DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+ , __func__, now.tv_sec, now.tv_usec);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.prop_id = property->prop_id;
+
+ /* set buffer id about source destination */
+ for_each_ipp_ops(i)
+ e->event.buf_id[i] = tbuf_id[i];
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+ property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+ return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+ struct drm_exynos_ipp_event_work *event_work =
+ (struct drm_exynos_ipp_event_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret;
+
+ if (!event_work) {
+ DRM_ERROR("failed to get event_work.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+ event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+ ippdrv = event_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return;
+ }
+
+ c_node = ippdrv->cmd;
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return;
+ }
+
+ /*
+ * IPP supports command thread, event thread synchronization.
+ * If IPP close immediately from user land, then IPP make
+ * synchronization with command thread, so make complete event.
+ * or going out operations.
+ */
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+ __func__, c_node->state, c_node->property.prop_id);
+ goto err_completion;
+ }
+
+ mutex_lock(&c_node->event_lock);
+
+ ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+ if (ret) {
+ DRM_ERROR("failed to send event.\n");
+ goto err_completion;
+ }
+
+err_completion:
+ if (ipp_is_m2m_cmd(c_node->property.cmd))
+ complete(&c_node->start_complete);
+
+ mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret, count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ ippdrv->drm_dev = drm_dev;
+
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+ &ippdrv->ipp_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_idr;
+ }
+
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+ count++, (int)ippdrv, ippdrv->ipp_id);
+
+ if (ippdrv->ipp_id == 0) {
+ DRM_ERROR("failed to get ipp_id[%d]\n",
+ ippdrv->ipp_id);
+ goto err_idr;
+ }
+
+ /* store parent device for node */
+ ippdrv->parent_dev = dev;
+
+ /* store event work queue and handler */
+ ippdrv->event_workq = ctx->event_workq;
+ ippdrv->sched_event = ipp_sched_event;
+ INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+ if (is_drm_iommu_supported(drm_dev)) {
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err_iommu;
+ }
+ }
+ }
+
+ return 0;
+
+err_iommu:
+ /* get ipp driver entry */
+ list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+ return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+ ippdrv->drm_dev = NULL;
+ exynos_drm_ippdrv_unregister(ippdrv);
+ }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate priv.\n");
+ return -ENOMEM;
+ }
+ priv->dev = dev;
+ file_priv->ipp_priv = priv;
+
+ INIT_LIST_HEAD(&priv->event_list);
+
+ DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+ return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ goto err_clear;
+ }
+
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (list_empty(&ippdrv->cmd_list))
+ continue;
+
+ list_for_each_entry_safe(c_node, tc_node,
+ &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+ __func__, count++, (int)ippdrv);
+
+ if (c_node->priv == priv) {
+ /*
+ * userland goto unnormal state. process killed.
+ * and close the file.
+ * so, IPP didn't called stop cmd ctrl.
+ * so, we are make stop operation in this state.
+ */
+ if (c_node->state == IPP_STATE_START) {
+ ipp_stop_property(drm_dev, ippdrv,
+ c_node);
+ c_node->state = IPP_STATE_STOP;
+ }
+
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+ }
+ }
+
+err_clear:
+ kfree(priv);
+ return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipp_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_init(&ctx->ipp_lock);
+ mutex_init(&ctx->prop_lock);
+
+ idr_init(&ctx->ipp_idr);
+ idr_init(&ctx->prop_idr);
+
+ /*
+ * create single thread for ipp event
+ * IPP supports event thread for IPP drivers.
+ * IPP driver send event_work to this thread.
+ * and IPP event thread send event to user process.
+ */
+ ctx->event_workq = create_singlethread_workqueue("ipp_event");
+ if (!ctx->event_workq) {
+ dev_err(dev, "failed to create event workqueue\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+ /*
+ * create single thread for ipp command
+ * IPP supports command thread for user process.
+ * user process make command node using set property ioctl.
+ * and make start_work and send this work to command thread.
+ * and then this command thread start property.
+ */
+ ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+ if (!ctx->cmd_workq) {
+ dev_err(dev, "failed to create cmd workqueue\n");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ /* set sub driver informations */
+ subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = ipp_subdrv_probe;
+ subdrv->remove = ipp_subdrv_remove;
+ subdrv->open = ipp_subdrv_open;
+ subdrv->close = ipp_subdrv_close;
+
+ platform_set_drvdata(pdev, ctx);
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm ipp device.\n");
+ goto err_cmd_workq;
+ }
+
+ dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+ return 0;
+
+err_cmd_workq:
+ destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+ destroy_workqueue(ctx->event_workq);
+err_clear:
+ kfree(ctx);
+ return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+ struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* unregister sub driver */
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ /* remove,destroy ipp idr */
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+
+ mutex_destroy(&ctx->ipp_lock);
+ mutex_destroy(&ctx->prop_lock);
+
+ /* destroy command, event work queue */
+ destroy_workqueue(ctx->cmd_workq);
+ destroy_workqueue(ctx->event_workq);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return ipp_power_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+ SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+ .probe = ipp_probe,
+ .remove = __devexit_p(ipp_remove),
+ .driver = {
+ .name = "exynos-drm-ipp",
+ .owner = THIS_MODULE,
+ .pm = &ipp_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 00000000000..28ffac95386
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+ IPP_STATE_IDLE,
+ IPP_STATE_START,
+ IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ enum drm_exynos_ipp_ctrl ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+ struct exynos_drm_ipp_private *priv;
+ struct list_head list;
+ struct list_head event_list;
+ struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
+ struct mutex cmd_lock;
+ struct mutex mem_lock;
+ struct mutex event_lock;
+ struct completion start_complete;
+ struct completion stop_complete;
+ struct drm_exynos_ipp_property property;
+ struct drm_exynos_ipp_cmd_work *start_work;
+ struct drm_exynos_ipp_cmd_work *stop_work;
+ struct drm_exynos_ipp_event_work *event_work;
+ enum drm_exynos_ipp_state state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+ unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
+ dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+ __u32 enable;
+ __u32 refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+ int (*set_fmt)(struct device *dev, u32 fmt);
+ int (*set_transf)(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap);
+ int (*set_size)(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+ int (*set_addr)(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @cmd: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+ struct list_head drv_list;
+ struct device *parent_dev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ u32 ipp_id;
+ bool dedicated;
+ struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
+ struct workqueue_struct *event_workq;
+ struct drm_exynos_ipp_cmd_node *cmd;
+ struct list_head cmd_list;
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ int (*check_property)(struct device *dev,
+ struct drm_exynos_ipp_property *property);
+ int (*reset)(struct device *dev);
+ int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 862ca1eb210..83efc662d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
* CRTC ----------------
* ^ start ^ end
*
- * There are six cases from a to b.
+ * There are six cases from a to f.
*
* <----- SCREEN ----->
* 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
}
overlay->dma_addr[i] = buffer->dma_addr;
- overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->vaddr[i],
- (unsigned long)overlay->dma_addr[i]);
+ DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->dma_addr[i]);
}
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_x < 0) {
if (actual_w)
src_x -= crtc_x;
- else
- src_x += crtc_w;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
src_y -= crtc_y;
- else
- src_y += crtc_h;
crtc_y = 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 00000000000..1c2366083c7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct rot_context, ippdrv);
+#define rot_read(offset) readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+ ROT_IRQ_STATUS_COMPLETE = 8,
+ ROT_IRQ_STATUS_ILLEGAL = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+ u32 min_w;
+ u32 min_h;
+ u32 max_w;
+ u32 max_h;
+ u32 align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+ struct rot_limit ycbcr420_2p;
+ struct rot_limit rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct clk *clock;
+ struct rot_limit_table *limit_tbl;
+ int irq;
+ int cur_buf_id[EXYNOS_DRM_OPS_MAX];
+ bool suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+ u32 val = rot_read(ROT_CONFIG);
+
+ if (enable == true)
+ val |= ROT_CONFIG_IRQ;
+ else
+ val &= ~ROT_CONFIG_IRQ;
+
+ rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_CONTROL);
+
+ val &= ROT_CONTROL_FMT_MASK;
+
+ return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_STATUS);
+
+ val = ROT_STATUS_IRQ(val);
+
+ if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+ return ROT_IRQ_STATUS_COMPLETE;
+
+ return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+ struct rot_context *rot = arg;
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+ enum rot_irq_status irq_status;
+ u32 val;
+
+ /* Get execution result */
+ irq_status = rotator_reg_get_irq_status(rot);
+
+ /* clear status */
+ val = rot_read(ROT_STATUS);
+ val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+ rot_write(val, ROT_STATUS);
+
+ if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ } else
+ DRM_ERROR("the SFR is set illegally\n");
+
+ return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+ u32 *vsize)
+{
+ struct rot_limit_table *limit_tbl = rot->limit_tbl;
+ struct rot_limit *limit;
+ u32 mask, val;
+
+ /* Get size limit */
+ if (fmt == ROT_CONTROL_FMT_RGB888)
+ limit = &limit_tbl->rgb888;
+ else
+ limit = &limit_tbl->ycbcr420_2p;
+
+ /* Get mask for rounding to nearest aligned val */
+ mask = ~((1 << limit->align) - 1);
+
+ /* Set aligned width */
+ val = ROT_ALIGN(*hsize, limit->align, mask);
+ if (val < limit->min_w)
+ *hsize = ROT_MIN(limit->min_w, mask);
+ else if (val > limit->max_w)
+ *hsize = ROT_MAX(limit->max_w, mask);
+ else
+ *hsize = val;
+
+ /* Set aligned height */
+ val = ROT_ALIGN(*vsize, limit->align, mask);
+ if (val < limit->min_h)
+ *vsize = ROT_MIN(limit->min_h, mask);
+ else if (val > limit->max_h)
+ *vsize = ROT_MAX(limit->max_h, mask);
+ else
+ *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FMT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ val |= ROT_CONTROL_FMT_YCBCR420_2P;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= ROT_CONTROL_FMT_RGB888;
+ break;
+ default:
+ DRM_ERROR("invalid image format\n");
+ return -EINVAL;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+ (fmt == ROT_CONTROL_FMT_RGB888))
+ return true;
+
+ return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
+ u32 val;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_SRC_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_SRC_CROP_POS);
+ val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+ rot_write(val, ROT_SRC_CROP_SIZE);
+
+ return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+ val = rot_read(ROT_SRC_BUF_SIZE);
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ /* Set transform configuration */
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FLIP_MASK;
+
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ val |= ROT_CONTROL_FLIP_VERTICAL;
+ break;
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
+ break;
+ default:
+ /* Flip None */
+ break;
+ }
+
+ val &= ~ROT_CONTROL_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ val |= ROT_CONTROL_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ val |= ROT_CONTROL_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ val |= ROT_CONTROL_ROT_270;
+ break;
+ default:
+ /* Rotation 0 Degree */
+ break;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ /* Check degree for setting buffer size swap */
+ if ((degree == EXYNOS_DRM_DEGREE_90) ||
+ (degree == EXYNOS_DRM_DEGREE_270))
+ *swap = true;
+ else
+ *swap = false;
+
+ return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val, fmt, hsize, vsize;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_DST_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_DST_CROP_POS);
+
+ return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+ /* Get buf size */
+ val = rot_read(ROT_DST_BUF_SIZE);
+
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_DST_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+ .set_fmt = rotator_src_set_fmt,
+ .set_size = rotator_src_set_size,
+ .set_addr = rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+ .set_transf = rotator_dst_set_transf,
+ .set_size = rotator_dst_set_size,
+ .set_addr = rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 0;
+ prop_list->crop = 0;
+ prop_list->scale = 0;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:not support format\n", __func__);
+ return false;
+ }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct drm_exynos_ipp_config *src_config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_ipp_config *dst_config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos *src_pos = &src_config->pos;
+ struct drm_exynos_pos *dst_pos = &dst_config->pos;
+ struct drm_exynos_sz *src_sz = &src_config->sz;
+ struct drm_exynos_sz *dst_sz = &dst_config->sz;
+ bool swap = false;
+
+ /* Check format configuration */
+ if (src_config->fmt != dst_config->fmt) {
+ DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_fmt(dst_config->fmt)) {
+ DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check transform configuration */
+ if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+ DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (dst_config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+ return -EINVAL;
+ }
+
+ if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+ DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_flip(dst_config->flip)) {
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check size configuration */
+ if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+ (src_pos->y + src_pos->h > src_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+ return -EINVAL;
+ }
+
+ if (swap) {
+ if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+ (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+ (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ if (rot->suspended) {
+ DRM_ERROR("suspended state\n");
+ return -EPERM;
+ }
+
+ if (cmd != IPP_CMD_M2M) {
+ DRM_ERROR("not support cmd: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set interrupt enable */
+ rotator_reg_set_irq(rot, true);
+
+ val = rot_read(ROT_CONTROL);
+ val |= ROT_CONTROL_START;
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static int __devinit rotator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+ if (!rot) {
+ dev_err(dev, "failed to allocate rot\n");
+ return -ENOMEM;
+ }
+
+ rot->limit_tbl = (struct rot_limit_table *)
+ platform_get_device_id(pdev)->driver_data;
+
+ rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rot->regs_res) {
+ dev_err(dev, "failed to find registers\n");
+ ret = -ENOENT;
+ goto err_get_resource;
+ }
+
+ rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+ if (!rot->regs) {
+ dev_err(dev, "failed to map register\n");
+ ret = -ENXIO;
+ goto err_get_resource;
+ }
+
+ rot->irq = platform_get_irq(pdev, 0);
+ if (rot->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ ret = rot->irq;
+ goto err_get_irq;
+ }
+
+ ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+ IRQF_ONESHOT, "drm_rotator", rot);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_get_irq;
+ }
+
+ rot->clock = clk_get(dev, "rotator");
+ if (IS_ERR_OR_NULL(rot->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ ret = PTR_ERR(rot->clock);
+ goto err_clk_get;
+ }
+
+ pm_runtime_enable(dev);
+
+ ippdrv = &rot->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+ ippdrv->check_property = rotator_ippdrv_check_property;
+ ippdrv->start = rotator_ippdrv_start;
+ ret = rotator_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_ippdrv_register;
+ }
+
+ DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+ platform_set_drvdata(pdev, rot);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm rotator device\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(dev, "The exynos rotator is probed successfully\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+ clk_put(rot->clock);
+err_clk_get:
+ free_irq(rot->irq, rot);
+err_get_irq:
+ devm_iounmap(dev, rot->regs);
+err_get_resource:
+ devm_kfree(dev, rot);
+ return ret;
+}
+
+static int __devexit rotator_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot = dev_get_drvdata(dev);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_disable(dev);
+ clk_put(rot->clock);
+
+ free_irq(rot->irq, rot);
+ devm_iounmap(dev, rot->regs);
+
+ devm_kfree(dev, rot);
+
+ return 0;
+}
+
+struct rot_limit_table rot_limit_tbl = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+struct platform_device_id rotator_driver_ids[] = {
+ {
+ .name = "exynos-rot",
+ .driver_data = (unsigned long)&rot_limit_tbl,
+ },
+ {},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (enable) {
+ clk_enable(rot->clock);
+ rot->suspended = false;
+ } else {
+ clk_disable(rot->clock);
+ rot->suspended = true;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return rotator_clk_crtl(rot, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+ NULL)
+};
+
+struct platform_driver rotator_driver = {
+ .probe = rotator_probe,
+ .remove = __devexit_p(rotator_remove),
+ .id_table = rotator_driver_ids,
+ .driver = {
+ .name = "exynos-rot",
+ .owner = THIS_MODULE,
+ .pm = &rotator_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 00000000000..a2d7a14a52b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_ROTATOR_H_
+#define _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f..99bfc38dfaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
-
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
+ drm_vblank_put(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a..2c46b6c0b82 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION 0x02
+#define HDMI_AVI_LENGTH 0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION 0x01
+#define HDMI_AUI_LENGTH 0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+ /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+ /* InfoFrame packet type */
+ HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+ /* Vendor-Specific InfoFrame */
+ HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+ /* Auxiliary Video information InfoFrame */
+ HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+ /* Audio information InfoFrame */
+ HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
struct mutex hdmi_mutex;
void __iomem *regs;
+ void *parent_ctx;
int external_irq;
int internal_irq;
@@ -84,7 +108,6 @@ struct hdmi_context {
int cur_conf;
struct hdmi_resources res;
- void *parent_ctx;
int hpd_gpio;
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf;
};
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
};
static const struct hdmi_v13_conf hdmi_v13_confs[] = {
- { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
- { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
- { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p50 },
- { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
- { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p60 },
+ { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+ &hdmi_v13_conf_480p },
+ { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i50 },
+ { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i60 },
+ { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p60 },
};
/* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf;
};
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
};
static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+ { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+ { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+ { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
+struct hdmi_infoframe {
+ enum HDMI_PACKET_TYPE type;
+ u8 ver;
+ u8 len;
+};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode);
}
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+ u32 start, u8 len, u32 hdr_sum)
+{
+ int i;
+
+ /* hdr_sum : header0 + header1 + header2
+ * start : start address of packet byte1
+ * len : packet bytes - 1 */
+ for (i = 0; i < len; ++i)
+ hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+ /* return 2's complement of 8 bit hdr_sum */
+ return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+ struct hdmi_infoframe *infoframe)
+{
+ u32 hdr_sum;
+ u8 chksum;
+ u32 aspect_ratio;
+ u32 mod;
+ u32 vic;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+ if (hdata->dvi_mode) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+ HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+ return;
+ }
+
+ switch (infoframe->type) {
+ case HDMI_PACKET_TYPE_AVI:
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+ /* Output format zero hardcoded ,RGB YBCR selection */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+ AVI_ACTIVE_FORMAT_VALID |
+ AVI_UNDERSCANNED_DISPLAY_VALID);
+
+ aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+ AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+ if (hdata->type == HDMI_TYPE13)
+ vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+ else
+ vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+ chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+ break;
+ case HDMI_PACKET_TYPE_AUI:
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+ break;
+ default:
+ break;
+ }
+}
+
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
raw_edid->width_cm, raw_edid->height_cm);
+ kfree(raw_edid);
} else {
return -ENODEV;
}
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
+ struct hdmi_infoframe infoframe;
+
/* disable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
+ infoframe.type = HDMI_PACKET_TYPE_AVI;
+ infoframe.ver = HDMI_AVI_VERSION;
+ infoframe.len = HDMI_AVI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
+ infoframe.type = HDMI_PACKET_TYPE_AUI;
+ infoframe.ver = HDMI_AUI_VERSION;
+ infoframe.len = HDMI_AUI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
/* enable AVI packet every vsync, fixes purple line problem */
- hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
mdelay(10);
}
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m);
if (index >= 0) {
+ struct drm_mode_object base;
+ struct list_head head;
+
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+
+ /* preserve display mode header while copying. */
+ head = adjusted_mode->head;
+ base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m));
+ adjusted_mode->head = head;
+ adjusted_mode->base = base;
break;
}
}
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
mutex_unlock(&hdata->hdmi_mutex);
- pm_runtime_get_sync(hdata->dev);
-
regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy);
clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi);
+
+ hdmiphy_poweron(hdata);
}
static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
* its reset state seems to meet the condition.
*/
hdmiphy_conf_reset(hdata);
+ hdmiphy_poweroff(hdata);
clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi);
clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
- pm_runtime_put_sync(hdata->dev);
-
mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
- hdmi_poweron(hdata);
+ if (pm_runtime_suspended(hdata->dev))
+ pm_runtime_get_sync(hdata->dev);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- hdmi_poweroff(hdata);
+ if (!pm_runtime_suspended(hdata->dev))
+ pm_runtime_put_sync(hdata->dev);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res));
/* get clocks, power */
- res->hdmi = clk_get(dev, "hdmi");
+ res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
- res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
- res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
- res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
- res->hdmiphy = clk_get(dev, "hdmiphy");
+ res->hdmiphy = devm_clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
- ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
goto fail;
@@ -2217,28 +2373,6 @@ fail:
return -ENODEV;
}
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- regulator_bulk_free(res->regul_count, res->regul_bulk);
- /* kfree is NULL-safe */
- kfree(res->regul_bulk);
- if (!IS_ERR_OR_NULL(res->hdmiphy))
- clk_put(res->hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
- clk_put(res->sclk_hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_pixel))
- clk_put(res->sclk_pixel);
- if (!IS_ERR_OR_NULL(res->sclk_hdmi))
- clk_put(res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(res->hdmi))
- clk_put(res->hdmi);
- memset(res, 0, sizeof(*res));
-
- return 0;
-}
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
}
};
+#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+#endif
static int __devinit hdmi_probe(struct platform_device *pdev)
{
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types),
pdev->dev.of_node);
+ if (match == NULL)
+ return -ENODEV;
hdata->type = (enum hdmi_type)match->data;
} else {
hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
- ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
- goto err_data;
+ return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
- ret = -ENOENT;
- goto err_resource;
+ return -ENOENT;
}
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
DRM_ERROR("failed to map registers\n");
- ret = -ENXIO;
- goto err_resource;
+ return -ENXIO;
}
- ret = gpio_request(hdata->hpd_gpio, "HPD");
+ ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
- goto err_resource;
+ return ret;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
- ret = -ENOENT;
- goto err_gpio;
+ return -ENOENT;
}
hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
-err_gpio:
- gpio_free(hdata->hpd_gpio);
-err_resource:
- hdmi_resources_cleanup(hdata);
-err_data:
return ret;
}
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
free_irq(hdata->internal_irq, hdata);
free_irq(hdata->external_irq, hdata);
- gpio_free(hdata->hpd_gpio);
-
- hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
disable_irq(hdata->internal_irq);
disable_irq(hdata->external_irq);
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
hdmi_poweroff(hdata);
return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
enable_irq(hdata->external_irq);
enable_irq(hdata->internal_irq);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ hdmi_poweron(hdata);
+
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweron(hdata);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+ SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
- .of_match_table = hdmi_match_types,
+ .of_match_table = of_match_ptr(hdmi_match_types),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bb..6206056f4a3 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
- .of_match_table = hdmiphy_match_types,
+ .of_match_table = of_match_ptr(hdmiphy_match_types),
},
.id_table = hdmiphy_id,
.probe = hdmiphy_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8..21db89530fc 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,14 +36,13 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
dma_addr_t dma_addr;
- void __iomem *vaddr;
dma_addr_t chroma_dma_addr;
- void __iomem *chroma_vaddr;
uint32_t pixel_format;
unsigned int bpp;
unsigned int crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
+ bool enabled;
+ bool resume;
};
struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
struct mixer_context {
struct device *dev;
+ struct drm_device *drm_dev;
int pipe;
bool interlace;
bool powered;
@@ -90,6 +92,9 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
+ void *parent_ctx;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
};
struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static void mixer_poweron(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- mutex_lock(&ctx->mixer_mutex);
- if (ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
- return;
- }
- ctx->powered = true;
- mutex_unlock(&ctx->mixer_mutex);
-
- pm_runtime_get_sync(ctx->dev);
-
- clk_enable(res->mixer);
- if (ctx->vp_enabled) {
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
- }
-
- mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
- mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *mdata = ctx;
+ struct drm_device *drm_dev;
- mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
- mutex_unlock(&ctx->mixer_mutex);
+ drm_hdmi_ctx = mdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
- ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+ if (is_drm_iommu_supported(drm_dev)) {
+ if (enable)
+ return drm_iommu_attach_device(drm_dev, mdata->dev);
- clk_disable(res->mixer);
- if (ctx->vp_enabled) {
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
+ drm_iommu_detach_device(drm_dev, mdata->dev);
}
-
- pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
+ return 0;
}
static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_dpms(void *ctx, int mode)
-{
- struct mixer_context *mixer_ctx = ctx;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- mixer_poweron(mixer_ctx);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- mixer_poweroff(mixer_ctx);
- break;
- default:
- DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
- break;
- }
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
- struct mixer_context *mixer_ctx = ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
- int ret;
-
- ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
- MXR_INT_STATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
win_data = &mixer_ctx->win_data[win];
win_data->dma_addr = overlay->dma_addr[0];
- win_data->vaddr = overlay->vaddr[0];
win_data->chroma_dma_addr = overlay->dma_addr[1];
- win_data->chroma_vaddr = overlay->vaddr[1];
win_data->pixel_format = overlay->pixel_format;
win_data->bpp = overlay->bpp;
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
+
+ mixer_ctx->win_data[win].enabled = true;
}
static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ mixer_ctx->win_data[win].resume = false;
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ mixer_ctx->win_data[win].enabled = false;
+}
+
+static void mixer_wait_for_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for MIXER to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+ !atomic_read(&mixer_ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void mixer_window_suspend(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ mixer_win_disable(ctx, i);
+ }
+ mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ clk_enable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+ }
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+
+ mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_window_suspend(ctx);
+
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+ clk_disable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+ }
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+
+out:
+ mutex_unlock(&ctx->mixer_mutex);
+}
+
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_get_sync(mixer_ctx->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (!pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_put_sync(mixer_ctx->dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
}
static struct exynos_mixer_ops mixer_ops = {
/* manager */
+ .iommu_on = mixer_iommu_on,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
+ .wait_for_vblank = mixer_wait_for_vblank,
.dpms = mixer_dpms,
/* overlay */
- .wait_for_vblank = mixer_wait_for_vblank,
.win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit,
.win_disable = mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
- bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
- is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ drm_vblank_put(drm_dev, crtc);
}
- if (is_checked)
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
}
out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
- mixer_res->mixer = clk_get(dev, "mixer");
+ mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
- goto fail;
+ return ret;
}
mixer_res->irq = res->start;
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
- clk_put(mixer_res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(mixer_res->mixer))
- clk_put(mixer_res->mixer);
- return ret;
}
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
- int ret;
- mixer_res->vp = clk_get(dev, "vp");
+ mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
- clk_put(mixer_res->sclk_dac);
- if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
- clk_put(mixer_res->sclk_mixer);
- if (!IS_ERR_OR_NULL(mixer_res->vp))
- clk_put(mixer_res->vp);
- return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
}
ctx->dev = &pdev->dev;
+ ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
mixer_poweroff(ctx);
return 0;
}
+
+static int mixer_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
#endif
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mixer_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+ SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
struct platform_driver mixer_driver = {
.driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 00000000000..b4f9ca1fd85
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2 (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1 (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2 (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3 (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4 (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1 (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2 (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3 (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4 (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1 (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2 (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3 (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4 (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL (0x58)
+/* Target area */
+#define EXYNOS_CITAREA (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2 (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0 (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0 (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0 (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1 (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1 (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1 (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5 (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6 (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7 (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8 (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9 (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10 (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11 (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12 (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13 (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14 (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15 (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16 (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17 (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18 (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19 (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20 (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21 (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22 (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23 (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24 (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25 (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26 (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27 (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28 (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29 (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30 (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31 (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32 (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5 (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6 (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7 (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8 (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9 (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10 (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11 (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12 (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13 (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14 (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15 (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16 (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17 (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18 (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19 (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20 (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21 (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22 (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23 (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24 (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25 (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26 (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27 (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28 (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29 (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30 (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31 (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32 (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5 (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6 (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7 (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8 (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9 (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10 (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11 (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12 (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13 (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14 (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15 (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16 (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17 (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18 (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19 (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20 (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21 (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22 (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23 (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24 (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25 (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26 (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27 (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28 (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29 (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30 (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31 (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32 (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP 4
+#define EXYNOS_CIOYSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOYSA1 + (__x) * 4) : \
+ (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP 1
+#define EXYNOS_CIIYSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
+#define EXYNOS_MSCTRL_ENVID (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
+#define EXYNOS_CLKSRC_SCLK (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 00000000000..9ad592707aa
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
+#define GSC_ENABLE_NORM_MODE (0 << 7)
+#define GSC_ENABLE_IPC_MODE (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE (1 << 3)
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
+#define GSC_IRQ_OR_MASK (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
+#define GSC_IN_RB_SWAP_MASK (1 << 19)
+#define GSC_IN_RB_SWAP (1 << 19)
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (2 << 14)
+#define GSC_IN_RGB_SD_WIDE (1 << 14)
+#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_C_16x16 (1 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_LOCAL_CAM3 (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
+#define GSC_IN_LOCAL_CAM1 (1 << 1)
+#define GSC_IN_LOCAL_CAM0 (0 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_LOCAL (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1C
+#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK (1 << 12)
+#define GSC_OUT_RB_SWAP (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_C_16x16 (1 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK (7 << 16)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK (7 << 0)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2C
+#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE 0x3C
+#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE 0x48
+#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON 0xA78
+#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
+#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION 0xA7C
+#define GSC_VPOS_F(x) ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT 0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT 0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718..ef1b3eb3ba6 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN (1 << 0)
+
/* Video related registers */
#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 00000000000..a09ac6e180d
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG 0x00
+#define ROT_CONFIG_IRQ (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL 0x10
+#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
+#define ROT_CONTROL_FMT_RGB888 (6 << 8)
+#define ROT_CONTROL_FMT_MASK (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
+#define ROT_CONTROL_FLIP_MASK (3 << 6)
+#define ROT_CONTROL_ROT_90 (1 << 4)
+#define ROT_CONTROL_ROT_180 (2 << 4)
+#define ROT_CONTROL_ROT_270 (3 << 4)
+#define ROT_CONTROL_ROT_MASK (3 << 4)
+#define ROT_CONTROL_START (1 << 0)
+
+/* Status */
+#define ROT_STATUS 0x20
+#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
+#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE 1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE 0x3c
+#define ROT_DST_BUF_SIZE 0x5c
+#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS 0x40
+#define ROT_DST_CROP_POS 0x60
+#define ROT_CROP_POS_Y(x) ((x) << 16)
+#define ROT_CROP_POS_X(x) ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE 0x44
+#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask) ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b6..23e14e93991 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
/* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf32..51044cc55cf 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 7272a461edf..e223b500022 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property, &curValue))
return -1;
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
return -1;
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index b362dd39bf5..d81dbc3368f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curValue))
return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 32dba2ab53e..2d4ab48f07a 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector, property, &val))
+ if (drm_object_property_get_value(&connector->base, property, &val))
goto set_prop_error;
if (val == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
goto set_prop_error;
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
}
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector, property,
+ if (drm_object_property_set_value(&connector->base, property,
value))
goto set_prop_error;
else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
- if (!dev || ((pipe != 0) && (pipe != 2))) {
+ if (pipe != 0 && pipe != 2) {
DRM_ERROR("Invalid parameter\n");
return;
}
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*attach properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index dec6a9aea3c..74485dc4394 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index f2f9f38a536..30adbbe2302 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
extern void oaktrail_hdmi_save(struct drm_device *dev);
extern void oaktrail_hdmi_restore(struct drm_device *dev);
extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
+
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index cdafd2acc72..3071526bc3c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
+ if (pipe == 1) {
+ oaktrail_crtc_hdmi_dpms(crtc, mode);
+ return;
+ }
+
if (!gma_power_begin(dev, true))
return;
@@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
+ if (pipe == 1)
+ return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
if (!gma_power_begin(dev, true))
return 0;
@@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
(mode->crtc_vdisplay - 1));
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 010b820744a..08747fd7105 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
- .hdmi_mask = (1 << 0),
+ .hdmi_mask = (1 << 1),
.lvds_mask = (1 << 0),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 69e51e903f3..f036f1fc161 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
+static void wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+ u32 htotal, new_crtc_htotal;
+
+ htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+ /*
+ * 1024 x 768 new_crtc_htotal = 0x1024;
+ * 1280 x 1024 new_crtc_htotal = 0x0c34;
+ */
+ new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+ DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
+ return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+ int np_min, np_max, nr_min, nr_max;
+ int np, nr, nf;
+
+ np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+ np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+ if (np_min < oaktrail_hdmi_limit.np.min)
+ np_min = oaktrail_hdmi_limit.np.min;
+ if (np_max > oaktrail_hdmi_limit.np.max)
+ np_max = oaktrail_hdmi_limit.np.max;
+
+ nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+ nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+ if (nr_min < oaktrail_hdmi_limit.nr.min)
+ nr_min = oaktrail_hdmi_limit.nr.min;
+ if (nr_max > oaktrail_hdmi_limit.nr.max)
+ nr_max = oaktrail_hdmi_limit.nr.max;
+
+ np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+ nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+ nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+ DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+ /*
+ * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
+ * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+ */
+ best_clock->np = np;
+ best_clock->nr = nr - 1;
+ best_clock->nf = (nf << 14);
+}
+
+static void scu_busy_loop(void __iomem *scu_base)
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = readl(scu_base + 0x04);
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = readl(scu_base + 0x04);
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 1000) {
+ DRM_DEBUG_KMS("SCU IPC timed out");
+ return;
+ }
+ }
+}
+
+/*
+ * You don't want to know, you really really don't want to know....
+ *
+ * This is magic. However it's safe magic because of the way the platform
+ * works and it is necessary magic.
+ */
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+ void __iomem *base;
+ unsigned long scu_ipc_mmio = 0xff11c000UL;
+ int scu_len = 1024;
+
+ base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+ if (base == NULL) {
+ DRM_ERROR("failed to map scu mmio\n");
+ return;
+ }
+
+ /* scu ipc: assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffdf, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ /* scu ipc: de-assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffff, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ iounmap(base);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int pipe = 1;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int refclk;
+ struct oaktrail_hdmi_clock clock;
+ u32 dspcntr, pipeconf, dpll, temp;
+ int dspcntr_reg = DSPBCNTR;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable dpll if necessary */
+ dpll = REG_READ(DPLL_CTRL);
+ if ((dpll & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ udelay(150);
+
+ /* Reset controller */
+ oaktrail_hdmi_reset(dev);
+
+ /* program and enable dpll */
+ refclk = 25000;
+ oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+ /* Set the DPLL */
+ dpll = REG_READ(DPLL_CTRL);
+ dpll &= ~DPLL_PDIV_MASK;
+ dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+ REG_WRITE(DPLL_CTRL, 0x00000008);
+ REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+ REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+ REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+ REG_WRITE(DPLL_UPDATE, 0x80000000);
+ REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+ udelay(150);
+
+ /* configure HDMI */
+ HDMI_WRITE(0x1004, 0x1fd);
+ HDMI_WRITE(0x2000, 0x1);
+ HDMI_WRITE(0x2008, 0x0);
+ HDMI_WRITE(0x3130, 0x8);
+ HDMI_WRITE(0x101c, 0x1800810);
+
+ temp = htotal_calculate(adjusted_mode);
+ REG_WRITE(htot_reg, temp);
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
+
+ REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ REG_WRITE(PCH_PIPEBCONF, pipeconf);
+ REG_READ(PCH_PIPEBCONF);
+ wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ wait_for_vblank(dev);
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ u32 temp;
+
+ DRM_DEBUG_KMS("%s %d\n", __func__, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_OFF:
+ REG_WRITE(VGACNTRL, 0x80000000);
+
+ /* Disable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(DSPBCNTR);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ /* Disable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Disable LNW Pipes, etc */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ /* wait for pipe off */
+ udelay(150);
+
+ /* Disable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+
+ /* wait for dpll off */
+ udelay(150);
+
+ break;
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) != 0) {
+ REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+ temp = REG_READ(DPLL_CLK_ENABLE);
+ REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+ REG_READ(DPLL_CLK_ENABLE);
+ }
+ /* wait for dpll warm up */
+ udelay(150);
+
+ /* Enable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Enable LNW Pipe B */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ wait_for_vblank(dev);
+
+ /* Enable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+ }
+
+ /* DSPARB */
+ REG_WRITE(DSPARB, 0x00003fbf);
+
+ /* FW1 */
+ REG_WRITE(0x70034, 0x3f880a0a);
+
+ /* FW2 */
+ REG_WRITE(0x70038, 0x0b060808);
+
+ /* FW4 */
+ REG_WRITE(0x70050, 0x08030404);
+
+ /* FW5 */
+ REG_WRITE(0x70054, 0x04040404);
+
+ /* LNC Chicken Bits - Squawk! */
+ REG_WRITE(0x70400, 0x4000);
+
+ return;
+}
+
static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
static int dpms_mode = -1;
@@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
struct i2c_adapter *i2c_adap;
struct edid *edid;
- struct drm_display_mode *mode, *t;
- int i = 0, ret = 0;
+ int ret = 0;
+ /*
+ * FIXME: We need to figure this lot out. In theory we can
+ * read the EDID somehow but I've yet to find working reference
+ * code.
+ */
i2c_adap = i2c_get_adapter(3);
if (i2c_adap == NULL) {
DRM_ERROR("No ddc adapter available!\n");
@@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
-
- /*
- * prune modes that require frame buffer bigger than stolen mem
- */
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
- if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
- i++;
- drm_mode_remove(connector, mode);
- }
- }
- return ret - i;
+ return ret;
}
static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_sysfs_connector_add(connector);
+ dev_info(dev->dev, "HDMI initialised.\n");
return;
@@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
dev_priv->hdmi_priv = hdmi_dev;
oaktrail_hdmi_audio_disable(dev);
+
+ dev_info(dev->dev, "HDMI hardware present.\n");
+
return;
free:
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 558c77fb55e..325013a9c48 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
return;
}
- drm_connector_property_get_value(
- connector,
+ drm_object_property_get_value(
+ &connector->base,
dev->mode_config.scaling_mode_property,
&v);
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 2a4c3a9e33e..9fa5fa2e619 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curval))
goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
if (curval == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
} else if (!strcmp(property->name, "backlight")) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index fc9292705db..a4cc777ab7a 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val;
if (psb_intel_sdvo_connector->left == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->right == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->top == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+ drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
psb_intel_sdvo_connector->tv_format, 0);
return true;
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
- drm_connector_attach_property(connector, \
+ drm_object_attach_property(&connector->base, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->left)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->right)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->top)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->bottom)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 599099fe76e..b865d0728e2 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector);
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
- drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector);
- drm_connector_attach_property(connector, conf->tv_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
priv->subconnector);
- drm_connector_attach_property(connector, conf->tv_left_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
priv->hmargin);
- drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_connector_attach_property(connector, conf->tv_mode_property,
+ drm_object_attach_property(&connector->base, conf->tv_mode_property,
priv->norm);
- drm_connector_attach_property(connector, conf->tv_brightness_property,
+ drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
- drm_connector_attach_property(connector, conf->tv_contrast_property,
+ drm_object_attach_property(&connector->base, conf->tv_contrast_property,
priv->contrast);
- drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+ drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
priv->flicker);
- drm_connector_attach_property(connector, priv->scale_property,
+ drm_object_attach_property(&connector->base, priv->scale_property,
priv->scale);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dde8b505bf7..e6a11ca85ea 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
- if (!work->pending) {
+ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
@@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Stall check enabled, ");
else
seq_printf(m, "Stall check waiting for page flip ioctl, ");
- seq_printf(m, "%d prepares\n", work->pending);
+ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- seq_printf(m, " SYNC_0: 0x%08x\n",
- error->semaphore_mboxes[ring][0]);
- seq_printf(m, " SYNC_1: 0x%08x\n",
- error->semaphore_mboxes[ring][1]);
+ seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
@@ -1068,7 +1070,7 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, gt_core_status, rcctl1;
+ u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
int count=0, ret;
@@ -1097,6 +1099,9 @@ static int gen6_drpc_info(struct seq_file *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ mutex_unlock(&dev_priv->rps.hw_lock);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1148,6 +1153,12 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp));
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0;
}
@@ -1273,7 +1284,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1282,19 +1293,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
- I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_READ_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode read of freq table timed out\n");
- continue;
- }
- ia_freq = I915_READ(GEN6_PCODE_DATA);
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(dev_priv,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq);
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
}
@@ -1398,15 +1404,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->pwrctx) {
+ if (dev_priv->ips.pwrctx) {
seq_printf(m, "power context ");
- describe_obj(m, dev_priv->pwrctx);
+ describe_obj(m, dev_priv->ips.pwrctx);
seq_printf(m, "\n");
}
- if (dev_priv->renderctx) {
+ if (dev_priv->ips.renderctx) {
seq_printf(m, "render context ");
- describe_obj(m, dev_priv->renderctx);
+ describe_obj(m, dev_priv->ips.renderctx);
seq_printf(m, "\n");
}
@@ -1711,13 +1717,13 @@ i915_max_freq_read(struct file *filp,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1752,7 +1758,7 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1762,7 +1768,7 @@ i915_max_freq_write(struct file *filp,
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}
@@ -1787,13 +1793,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1826,7 +1832,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1836,7 +1842,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 61ae104dca8..99daa896105 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -104,32 +104,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
}
/**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-static int i915_init_phys_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- /* Program Hardware Status Page */
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
-
- if (!dev_priv->status_page_dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
-
- memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
- 0, PAGE_SIZE);
-
- i915_write_hws_pga(dev);
-
- DRM_DEBUG_DRIVER("Enabled hardware status page\n");
- return 0;
-}
-
-/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
@@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
+ ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 0;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
- master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
static int i915_quiescent(struct drm_device *dev)
{
- struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
-
i915_kernel_lost_context(dev);
- return intel_wait_ring_idle(ring);
+ return intel_ring_idle(LP_RING(dev->dev_private));
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n");
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
- return dev_priv->counter;
+ return dev_priv->dri1.counter;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
}
return ret;
@@ -1014,6 +986,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1068,7 +1046,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_ring_buffer *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -1088,6 +1066,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
+ ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
@@ -1326,6 +1305,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
+ INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
+
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@@ -1491,19 +1472,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- ret = -EIO;
+ ret = i915_gem_gtt_init(dev);
+ if (ret)
goto put_bridge;
- }
-
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto put_gmch;
- }
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@@ -1590,18 +1561,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
- /* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev);
i915_gem_load(dev);
- /* Init HWS */
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_phys_hws(dev);
- if (ret)
- goto out_gem_unload;
- }
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1621,6 +1584,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1678,7 +1643,7 @@ out_mtrrfree:
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
- intel_gmch_remove();
+ i915_gem_gtt_fini(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1721,6 +1686,7 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
+ cancel_work_sync(&dev_priv->console_resume_work);
/*
* free the memory space allocated for the child device
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6770ee6084b..530db83ef32 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-int i915_panel_ignore_lid __read_mostly = 0;
+int i915_panel_ignore_lid __read_mostly = 1;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
- "Override lid status (0=autodetect [default], 1=lid open, "
- "-1=lid closed)");
+ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+ "-1=force lid closed, -2=force lid open)");
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-#define INTEL_PCH_DEVICE_ID_MASK 0xff00
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- int id;
+ unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
@@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
}
-static int i915_drm_thaw(struct drm_device *dev)
+void intel_console_resume(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ console_resume_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
+}
+
+static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
i915_restore_state(dev);
intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
@@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
- intel_modeset_setup_hw_state(dev);
- drm_mode_config_reset(dev);
+ intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
}
@@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
- console_lock();
- intel_fbdev_set_suspend(dev, 0);
- console_unlock();
+ /*
+ * The console lock can be pretty contented on resume due
+ * to all the printk activity. Try to keep it out of the hot
+ * path of resume if possible.
+ */
+ if (console_trylock()) {
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ } else {
+ schedule_work(&dev_priv->console_resume_work);
+ }
+
+ return error;
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ intel_gt_reset(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ __i915_drm_thaw(dev);
+
return error;
}
int i915_resume(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- ret = i915_drm_thaw(dev);
+ intel_gt_reset(dev);
+
+ /*
+ * Platforms with opregion should have sane BIOS, older ones (gen3 and
+ * earlier) need this since the BIOS might clear all our scratch PTEs.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ !dev_priv->opregion.header) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ ret = __i915_drm_thaw(dev);
if (ret)
return ret;
@@ -833,7 +883,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (intel_info->is_haswell || intel_info->is_valleyview)
+ if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
@@ -1140,12 +1190,40 @@ static bool IS_DISPLAYREG(u32 reg)
if (reg == GEN6_GDRST)
return false;
+ switch (reg) {
+ case _3D_CHICKEN3:
+ case IVB_CHICKEN3:
+ case GEN7_COMMON_SLICE_CHICKEN1:
+ case GEN7_L3CNTLREG1:
+ case GEN7_L3_CHICKEN_MODE_REGISTER:
+ case GEN7_ROW_CHICKEN2:
+ case GEN7_L3SQCREG4:
+ case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
+ case GEN7_HALF_SLICE_CHICKEN1:
+ case GEN6_MBCTL:
+ case GEN6_UCGCTL2:
+ return false;
+ default:
+ break;
+ }
+
return true;
}
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+ * chip from rc6 before touching it for real. MI_MODE is masked, hence
+ * harmless to write 0 into. */
+ I915_WRITE_NOTRACE(MI_MODE, 0);
+}
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1177,6 +1255,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
+ I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
+ } \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f511fa2f416..ed305957557 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,6 +58,14 @@ enum pipe {
};
#define pipe_name(p) ((p) + 'A')
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
enum plane {
PLANE_A = 0,
PLANE_B,
@@ -93,6 +101,12 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
+
/* Interface history:
*
* 1.1: Original.
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj;
};
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- int start;
- int size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -191,6 +197,7 @@ struct drm_i915_error_state {
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
@@ -251,6 +258,7 @@ struct drm_i915_display_funcs {
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+ void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -263,7 +271,6 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
- void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
@@ -338,6 +345,7 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
+ struct drm_device *dev;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
@@ -374,6 +382,11 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint PCH */
};
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -383,154 +396,18 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
- bool force_bit;
+ u32 force_bit;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv;
};
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- const struct intel_device_info *info;
-
- int relative_constants_mode;
-
- void __iomem *regs;
-
- struct drm_i915_gt_funcs gt;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
-
- struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
-
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
-
- /**
- * Base address of the gmbus and gpio block.
- */
- uint32_t gpio_mmio_base;
-
- struct pci_dev *bridge_dev;
- struct intel_ring_buffer ring[I915_NUM_RINGS];
- uint32_t next_seqno;
-
- drm_dma_handle_t *status_page_dmah;
- uint32_t counter;
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
-
- struct resource mch_res;
-
- atomic_t irq_received;
-
- /* protects the irq masks */
- spinlock_t irq_lock;
-
- /* DPIO indirect register protection */
- spinlock_t dpio_lock;
-
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 pipestat[2];
- u32 irq_mask;
- u32 gt_irq_mask;
- u32 pch_irq_mask;
-
- u32 hotplug_supported_mask;
- struct work_struct hotplug_work;
-
- int num_pipe;
- int num_pch_pll;
-
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
- unsigned int stop_rings;
-
- unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
- struct intel_opregion opregion;
-
- /* overlay */
- struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
-
- /* LVDS info */
- int backlight_level; /* restore backlight to this value */
- bool backlight_enabled;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
- /* Feature bits from the VBIOS */
- unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int display_clock_mode:1;
- int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
-
- bool initialized;
- bool support;
- int bpp;
- struct edp_power_seq pps;
- } edp;
- bool no_aux_handshake;
-
- struct notifier_block lid_notifier;
-
- int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
- spinlock_t error_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct completion error_completion;
- struct workqueue_struct *wq;
-
- /* Display functions */
- struct drm_i915_display_funcs display;
-
- /* PCH chipset type */
- enum intel_pch pch_type;
-
- unsigned long quirks;
-
- /* Register state */
- bool modeset_on_lid;
+struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -676,10 +553,206 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ struct delayed_work delayed_resume_work;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct mutex hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ uint32_t counter;
+};
+
+struct intel_l3_parity {
+ u32 *remap_info;
+ struct work_struct error_work;
+};
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ void __iomem *regs;
+
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct spinlock gt_lock;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ struct pci_dev *bridge_dev;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ struct resource mch_res;
+
+ atomic_t irq_received;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ u32 pipestat[2];
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
+
+ u32 hotplug_supported_mask;
+ struct work_struct hotplug_work;
+
+ int num_pipe;
+ int num_pch_pll;
+
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ unsigned int stop_rings;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ struct intel_opregion opregion;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct error_work;
+ struct completion error_completion;
+ struct workqueue_struct *wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
struct {
/** Bridge to intel-gtt-ko */
- const struct intel_gtt *gtt;
+ struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
@@ -706,9 +779,8 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
- u32 *l3_remap_info;
-
struct shrinker inactive_shrinker;
+ bool shrinker_no_lock_stealing;
/**
* List of objects currently involved in rendering.
@@ -785,19 +857,6 @@ typedef struct drm_i915_private {
u32 object_count;
} mm;
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct {
- unsigned allow_batchbuffer : 1;
- u32 __iomem *gfx_hws_cpu_addr;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
- } dri1;
-
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
@@ -811,6 +870,7 @@ typedef struct drm_i915_private {
wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
/* Reclocking support */
bool render_reclock_avail;
@@ -820,46 +880,17 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
- struct drm_connector *int_lvds_connector;
- struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
+ struct intel_l3_parity l3_parity;
+
/* gen6+ rps state */
- struct {
- struct work_struct work;
- u32 pm_iir;
- /* lock - irqsave spinlock that protectects the work_struct and
- * pm_iir. */
- spinlock_t lock;
-
- /* The below variables an all the rps hw state are protected by
- * dev->struct mutext. */
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- } rps;
+ struct intel_gen6_power_mgmt rps;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
- struct {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
- } ips;
+ struct intel_ilk_power_mgmt ips;
enum no_fbc_reason no_fbc_reason;
@@ -871,14 +902,27 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ /*
+ * The console may be contended at resume, but we don't
+ * want it to block on it.
+ */
+ struct work_struct console_resume_work;
+
struct backlight_device *backlight;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- struct work_struct parity_error_work;
bool hw_contexts_disabled;
uint32_t hw_context_size;
+
+ bool fdi_rx_polarity_reversed;
+
+ struct i915_suspend_saved_registers regfile;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct i915_dri1_state dri1;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -1057,6 +1101,7 @@ struct drm_i915_gem_object {
*/
atomic_t pending_flip;
};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1120,9 +1165,17 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
+ (dev)->pci_device == 0x0152 || \
+ (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0A00)
/*
* The genX designation typically refers to the render engine, so render
@@ -1148,6 +1201,9 @@ struct drm_i915_file_private {
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1168,6 +1224,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1250,6 +1313,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
@@ -1257,6 +1321,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
@@ -1368,8 +1433,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno);
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -1387,7 +1451,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1499,6 +1563,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_gtt_fini(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gtt_chipset_flush();
+}
+
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1595,11 +1667,12 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@@ -1628,6 +1701,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b285da4449..da3c82e301b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ out:
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
}
}
if (needs_clflush_after)
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
return ret;
}
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Now bind it into the GTT if needed */
- if (!obj->map_and_fenceable) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- goto unlock;
- }
- if (!obj->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
- if (ret)
- goto unlock;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
- }
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret)
+ goto unlock;
- if (!obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unpin;
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto unlock;
-
- if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ goto unpin;
obj->fault_mappable = true;
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+unpin:
+ i915_gem_object_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -1528,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (obj->base.map_list.map)
return 0;
+ dev_priv->mm.shrinker_no_lock_stealing = true;
+
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
/* Badly fragmented mmap space? The only way we can recover
* space is by destroying unwanted objects. We can't randomly release
@@ -1542,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
i915_gem_shrink_all(dev_priv);
- return drm_gem_create_mmap_offset(&obj->base);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+ dev_priv->mm.shrinker_no_lock_stealing = false;
+
+ return ret;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1707,10 +1702,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count)
return -EBUSY;
+ /* ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early. */
+ list_del(&obj->gtt_list);
+
ops->put_pages(obj);
obj->pages = NULL;
- list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
@@ -1868,11 +1867,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
obj->ring = ring;
@@ -1933,26 +1932,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev));
}
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno = dev_priv->next_seqno;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i, j;
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
+ /* The hardware uses various monotonic 32-bit counters, if we
+ * detect that they will wraparound we need to idle the GPU
+ * and reset those counters.
+ */
+ ret = 0;
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ret |= ring->sync_seqno[j] != 0;
+ }
+ if (ret == 0)
+ return ret;
+
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ring->sync_seqno[j] = 0;
+ }
- return seqno;
+ return 0;
}
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
- if (ring->outstanding_lazy_request == 0)
- ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_handle_seqno_wrap(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
- return ring->outstanding_lazy_request;
+ *seqno = dev_priv->next_seqno++;
+ return 0;
}
int
@@ -1963,7 +1990,6 @@ i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position;
- u32 seqno;
int was_empty;
int ret;
@@ -1982,7 +2008,6 @@ i915_add_request(struct intel_ring_buffer *ring,
if (request == NULL)
return -ENOMEM;
- seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
@@ -1991,15 +2016,13 @@ i915_add_request(struct intel_ring_buffer *ring,
*/
request_ring_position = intel_ring_get_tail(ring);
- ret = ring->add_request(ring, &seqno);
+ ret = ring->add_request(ring);
if (ret) {
kfree(request);
return ret;
}
- trace_i915_gem_request_add(ring, seqno);
-
- request->seqno = seqno;
+ request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
@@ -2017,23 +2040,24 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
+ trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work, HZ);
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
if (out_seqno)
- *out_seqno = seqno;
+ *out_seqno = request->seqno;
return 0;
}
@@ -2131,7 +2155,6 @@ void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
- int i;
if (list_empty(&ring->request_list))
return;
@@ -2140,10 +2163,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
- if (seqno >= ring->sync_seqno[i])
- ring->sync_seqno[i] = 0;
-
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2218,7 +2237,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
return;
}
@@ -2236,7 +2256,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
if (!dev_priv->mm.suspended && !idle)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);
@@ -2386,7 +2407,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
ret = to->sync_to(to, from, seqno);
if (!ret)
- from->sync_seqno[idx] = seqno;
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->sync_seqno[idx] = obj->last_read_seqno;
return ret;
}
@@ -2469,14 +2494,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring)
-{
- if (list_empty(&ring->active_list))
- return 0;
-
- return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
-}
-
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2489,7 +2506,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret)
return ret;
- ret = i915_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
return ret;
}
@@ -2879,7 +2896,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *free_space;
+ struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2923,74 +2940,63 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ i915_gem_object_pin_pages(obj);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
if (map_and_fenceable)
- free_space =
- drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
+ 0, dev_priv->mm.gtt_mappable_end);
else
- free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- false);
-
- if (free_space != NULL) {
- if (map_and_fenceable)
- obj->gtt_space =
- drm_mm_get_block_range_generic(free_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
- else
- obj->gtt_space =
- drm_mm_get_block_generic(free_space,
- size, alignment, obj->cache_level,
- false);
- }
- if (obj->gtt_space == NULL) {
+ ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level);
+ if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
- if (ret)
- return ret;
+ if (ret == 0)
+ goto search_free;
- goto search_free;
+ i915_gem_object_unpin_pages(obj);
+ kfree(node);
+ return ret;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev,
- obj->gtt_space,
- obj->cache_level))) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
return -EINVAL;
}
-
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
return ret;
}
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
-
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->gtt_offset = obj->gtt_space->start;
+ obj->gtt_space = node;
+ obj->gtt_offset = node->start;
fenceable =
- obj->gtt_space->size == fence_size &&
- (obj->gtt_space->start & (fence_alignment - 1)) == 0;
+ node->size == fence_size &&
+ (node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable;
+ i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
@@ -3059,7 +3065,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3454,11 +3460,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (obj->gtt_space == NULL) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3832,7 +3843,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
if (!IS_IVYBRIDGE(dev))
return;
- if (!dev_priv->mm.l3_remap_info)
+ if (!dev_priv->l3_parity.remap_info)
return;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3841,12 +3852,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
- if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+ if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
- if (remap && !dev_priv->mm.l3_remap_info[i/4])
+ if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
- I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
}
/* Make sure all the writes land before disabling dop clock gating */
@@ -3876,68 +3887,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
-void i915_gem_init_ppgtt(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
- struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- uint32_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- return;
-
-
- pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- if (dev_priv->mm.gtt->needs_dmar)
- pt_addr = ppgtt->pt_dma_addr[i];
- else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
-
- pd_offset = ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
-
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk, gab_ctl, ecobits;
-
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
- /* GFX_MODE is per-ring on gen7+ */
- }
-
- for_each_ring(ring, dev_priv, i) {
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
- }
-}
-
static bool
intel_enable_blt(struct drm_device *dev)
{
@@ -3960,7 +3909,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (!intel_enable_gtt())
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4295,7 +4244,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_cache_release(page);
}
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
@@ -4382,7 +4331,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
return -EFAULT;
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
return 0;
}
@@ -4407,6 +4356,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -4417,10 +4379,18 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
+ bool unlock = true;
int cnt;
- if (!mutex_trylock(&dev->struct_mutex))
- return 0;
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return 0;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
+ unlock = false;
+ }
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
@@ -4436,6 +4406,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 05ed42f203d..a3f06bcad55 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
struct i915_hw_context *ctx;
int ret, id;
- ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
- u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from_obj, ring, seqno);
+ i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 773ef77b6c2..7be4241e824 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eea143749f..d6a994a0739 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj->cache_level);
}
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (unlikely(target_offset == 0)) {
- DRM_DEBUG("No GTT space found for object %d\n",
- reloc->target_handle);
- return ret;
- }
-
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
@@ -672,7 +663,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -722,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -735,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring, seqno);
+ i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
}
@@ -798,8 +788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 seqno;
u32 mask;
+ u32 flags;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@@ -811,6 +801,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
+ flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
+ if (!file->is_master || !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ flags |= I915_DISPATCH_SECURE;
+ }
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
+
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@@ -983,26 +983,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
+ * hsw should have this fixed, but let's be paranoid and do it
+ * unconditionally for now. */
+ if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
- seqno = i915_gem_next_request_seqno(ring);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
- if (seqno < ring->sync_seqno[i]) {
- /* The GPU can not handle its semaphore value wrapping,
- * so every billion or so execbuffers, we need to stall
- * the GPU in order to reset the counters.
- */
- ret = i915_gpu_idle(dev);
- if (ret)
- goto err;
- i915_gem_retire_requests(dev);
-
- BUG_ON(ring->sync_seqno[i]);
- }
- }
-
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1028,8 +1019,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- trace_i915_gem_ring_dispatch(ring, seqno);
-
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1040,17 +1029,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len);
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
} else {
- ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
- i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df470b5e8d3..2c150dee78a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,19 +28,67 @@
#include "i915_trace.h"
#include "intel_drv.h"
+typedef uint32_t gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define HSW_PTE_UNCACHED (0)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+static inline gtt_pte_t pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_LLC_MLC:
+ /* Haswell doesn't set L3 this way */
+ if (IS_HASWELL(dev))
+ pte |= GEN6_PTE_CACHE_LLC;
+ else
+ pte |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ if (IS_HASWELL(dev))
+ pte |= HSW_PTE_UNCACHED;
+ else
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
+
+ return pte;
+}
+
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
- uint32_t *pt_vaddr;
- uint32_t scratch_pte;
+ gtt_pte_t *pt_vaddr;
+ gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
- scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+ scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return ret;
+ ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
- ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
unsigned first_entry,
- uint32_t pte_flags)
+ enum i915_cache_level cache_level)
{
- uint32_t *pt_vaddr, pte;
+ gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[j] = pte | pte_flags;
+ pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
+ cache_level);
/* grab the next page */
if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- uint32_t pte_flags = GEN6_PTE_VALID;
-
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
- break;
- case I915_CACHE_LLC:
- pte_flags |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- if (IS_HASWELL(obj->base.dev))
- pte_flags |= HSW_PTE_UNCACHED;
- else
- pte_flags |= GEN6_PTE_UNCACHED;
- break;
- default:
- BUG();
- }
-
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT);
}
-/* XXX kill agp_type! */
-static unsigned int cache_level_to_agp_type(struct drm_device *dev,
- enum i915_cache_level cache_level)
+void i915_gem_init_ppgtt(struct drm_device *dev)
{
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- if (INTEL_INFO(dev)->gen >= 6)
- return AGP_USER_CACHED_MEMORY_LLC_MLC;
- /* Older chipsets do not have this extra level of CPU
- * cacheing, so fallthrough and request the PTE simply
- * as cached.
- */
- case I915_CACHE_LLC:
- return AGP_USER_CACHED_MEMORY;
- default:
- case I915_CACHE_NONE:
- return AGP_USER_MEMORY;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ uint32_t __iomem *pd_addr;
+ uint32_t pd_entry;
+ int i;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ return;
+
+
+ pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ if (dev_priv->mm.gtt->needs_dmar)
+ pt_addr = ppgtt->pt_dma_addr[i];
+ else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ pd_offset = ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for_each_ring(ring, dev_priv, i) {
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
@@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ unsigned first_entry,
+ unsigned num_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+ int i;
+
+ if (INTEL_INFO(dev)->gen < 6) {
+ intel_gtt_clear_range(first_entry, num_entries);
+ return;
+ }
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sg_table *st = obj->pages;
+ struct scatterlist *sg = st->sgl;
+ const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+ gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+ int unused, i = 0;
+ unsigned int len, m = 0;
+ dma_addr_t addr;
+
+ for_each_sg(st->sgl, sg, st->nents, unused) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+ i++;
+ }
+ }
+
+ BUG_ON(i > max_entries);
+ BUG_ON(i != obj->base.size / PAGE_SIZE);
+
+ /* XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+ if (INTEL_INFO(dev)->gen < 6) {
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ flags);
+ } else {
+ gen6_ggtt_bind_object(obj, cache_level);
+ }
- intel_gtt_insert_sg_entries(obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ i915_ggtt_clear_range(obj->base.dev,
+ obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
@@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+#ifdef CONFIG_INTEL_IOMMU
+ dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dma_addr))
+ return -EINVAL;
+#else
+ dma_addr = page_to_phys(page);
+#endif
+ dev_priv->mm.gtt->scratch_page = page;
+ dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+
+ return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(dev_priv->mm.gtt->scratch_page);
+ __free_page(dev_priv->mm.gtt->scratch_page);
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+ return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+{
+ static const int stolen_decoder[] = {
+ 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
+ snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
+ return stolen_decoder[snb_gmch_ctl] << 20;
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ phys_addr_t gtt_bus_addr;
+ u16 snb_gmch_ctl;
+ int ret;
+
+ /* On modern platforms we need not worry ourself with the legacy
+ * hostbridge query stuff. Skip it entirely
+ */
+ if (INTEL_INFO(dev)->gen < 6) {
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ intel_gmch_remove();
+ return -ENODEV;
+ }
+ return 0;
+ }
+
+ dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
+ if (!dev_priv->mm.gtt)
+ return -ENOMEM;
+
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+
+#ifdef CONFIG_INTEL_IOMMU
+ dev_priv->mm.gtt->needs_dmar = 1;
+#endif
+
+ /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+ gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
+ dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
+
+ /* i9xx_setup */
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ dev_priv->mm.gtt->gtt_total_entries =
+ gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
+ if (INTEL_INFO(dev)->gen < 7)
+ dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+ else
+ dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
+
+ dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
+ /* 64/512MB is the current min/max we actually know of, but this is just a
+ * coarse sanity check.
+ */
+ if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
+ dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
+ DRM_ERROR("Unknown GMADR entries (%d)\n",
+ dev_priv->mm.gtt->gtt_mappable_entries);
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ ret = setup_scratch_page(dev);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ goto err_out;
+ }
+
+ dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
+ dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
+ if (!dev_priv->mm.gtt->gtt) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ teardown_scratch_page(dev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
+ DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
+ DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
+ DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+
+ return 0;
+
+err_out:
+ kfree(dev_priv->mm.gtt);
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+ return ret;
+}
+
+void i915_gem_gtt_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ iounmap(dev_priv->mm.gtt->gtt);
+ teardown_scratch_page(dev);
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+ kfree(dev_priv->mm.gtt);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 32e1bda865b..2220dec3e5d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -122,7 +122,10 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
}
/* Get vtotal. */
- vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+ vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
- htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
/* Query vblank area. */
- vbl = I915_READ(VBLANK(pipe));
+ vbl = I915_READ(VBLANK(cpu_transcoder));
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
- mutex_lock(&dev_priv->dev->struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
gen6_set_rps(dev_priv->dev, new_delay);
}
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- parity_error_work);
+ l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->parity_error_work);
+ queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
-static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ if (pch_iir & SDE_HOTPLUG_MASK)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
}
-static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[VCS]);
}
-static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- u32 hotplug_mask;
atomic_inc(&dev_priv->irq_received);
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
- if (HAS_PCH_CPT(dev))
- hotplug_mask = SDE_HOTPLUG_MASK_CPT;
- else
- hotplug_mask = SDE_HOTPLUG_MASK;
-
ret = IRQ_HANDLED;
if (IS_GEN5(dev))
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- if (pch_iir & hotplug_mask)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
@@ -1120,6 +1132,8 @@ static void i915_record_ring_state(struct drm_device *dev,
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1464,7 +1478,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || work->pending || !work->enable_stall_check) {
+ if (work == NULL ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+ !work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
@@ -1751,7 +1767,7 @@ void i915_hangcheck_elapsed(unsigned long data)
repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
/* drm_dma.h hooks
@@ -1956,6 +1972,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ u32 render_irqs;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1995,21 +2012,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- dev_priv->gt_irq_mask = ~0;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
- GT_GEN6_BLT_CS_ERROR_INTERRUPT |
- GT_GEN6_BLT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_GEN6_BSD_CS_ERROR_INTERRUPT |
- GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
- GT_PIPE_NOTIFY |
- GT_RENDER_CS_ERROR_INTERRUPT |
- GT_SYNC_STATUS |
- GT_USER_INTERRUPT);
+
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */
@@ -2019,7 +2027,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2027,15 +2034,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
-#endif
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
@@ -2129,7 +2135,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2307,7 +2313,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2545,7 +2551,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2691,7 +2697,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
- INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a4162ddff6c..186ee5c85b5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -40,6 +41,14 @@
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define IVB_GMCH_GMS_SHIFT 4
+#define IVB_GMCH_GMS_MASK 0xf
+
/* PCI config space */
@@ -105,23 +114,6 @@
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID (1 << 0)
-#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define HSW_PTE_UNCACHED (0)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
-#define GEN6_PTE_CACHE_BITS (3 << 1)
-#define GEN6_PTE_GFDT (1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -241,11 +233,18 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
-#define MI_INVALIDATE_TLB (1<<18)
-#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-#define MI_BATCH_NON_SECURE (1)
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@@ -369,6 +368,7 @@
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
@@ -384,6 +384,9 @@
#define DPIO_FASTCLK_DISABLE 0x8100
+#define DPIO_DATA_CHANNEL1 0x8220
+#define DPIO_DATA_CHANNEL2 0x8420
+
/*
* Fence registers
*/
@@ -514,6 +517,7 @@
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
@@ -521,6 +525,7 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
@@ -528,7 +533,8 @@
# define MI_FLUSH_ENABLE (1 << 12)
#define GEN6_GT_MODE 0x20d0
-#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
@@ -547,6 +553,8 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_GUNIT_CLOCK_GATE 0x182060
+#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
@@ -661,6 +669,7 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -670,6 +679,8 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6 0x101008
+#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
@@ -1559,14 +1570,14 @@
#define _VSYNCSHIFT_B 0x61028
-#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -2641,6 +2652,7 @@
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
+#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
@@ -2711,7 +2723,7 @@
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@@ -2998,12 +3010,19 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
+#define DISPPLANE_BGRA555 (0x3<<26)
+#define DISPPLANE_BGRX555 (0x4<<26)
+#define DISPPLANE_BGRX565 (0x5<<26)
+#define DISPPLANE_BGRX888 (0x6<<26)
+#define DISPPLANE_BGRA888 (0x7<<26)
+#define DISPPLANE_RGBX101010 (0x8<<26)
+#define DISPPLANE_RGBA101010 (0x9<<26)
+#define DISPPLANE_BGRX101010 (0xa<<26)
+#define DISPPLANE_RGBX161616 (0xc<<26)
+#define DISPPLANE_RGBX888 (0xe<<26)
+#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -3024,6 +3043,8 @@
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAOFFSET 0x701A4 /* HSW */
+#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3033,6 +3054,8 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3078,6 +3101,8 @@
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
+#define _DSPBOFFSET 0x711A4
+#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3143,6 +3168,7 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
@@ -3177,6 +3203,8 @@
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
@@ -3197,6 +3225,8 @@
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
@@ -3210,8 +3240,10 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -3246,12 +3278,6 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
-#define PCH_DSPCLK_GATE_D 0x42020
-# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
-# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
-# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
-
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3301,20 +3327,22 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
+#define PF_PIPE_SEL_MASK_IVB (3<<29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
@@ -3423,15 +3451,13 @@
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
-#define ILK_DSPCLK_GATE 0x42020
-#define IVB_VRHUNIT_CLK_GATE (1<<28)
-#define ILK_DPARB_CLK_GATE (1<<5)
-#define ILK_DPFD_CLK_GATE (1<<7)
-/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
-#define ILK_CLK_FBC (1<<7)
-#define ILK_DPFC_DIS1 (1<<8)
-#define ILK_DPFC_DIS2 (1<<9)
+#define ILK_DSPCLK_GATE_D 0x42020
+#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
+#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
+#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@@ -3447,14 +3473,21 @@
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+#define GEN7_L3SQCREG4 0xb034
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define HSW_FUSE_STRAP 0x42014
+#define HSW_CDCLK_LIMIT (1 << 24)
+
/* PCH */
/* south display engine interrupt: IBX */
@@ -3686,7 +3719,7 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@@ -3795,18 +3828,26 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
+
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
@@ -3816,6 +3857,7 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
@@ -3877,6 +3919,7 @@
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
@@ -3901,16 +3944,21 @@
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
-#define _FDI_RXA_MISC 0xf0010
-#define _FDI_RXB_MISC 0xf1010
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
+#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
-#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@@ -4003,6 +4051,11 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
+#define PANEL_POWER_PORT_LVDS (0 << 30)
+#define PANEL_POWER_PORT_DP_A (1 << 30)
+#define PANEL_POWER_PORT_DP_C (2 << 30)
+#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4050,7 +4103,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
+#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -4108,6 +4161,8 @@
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_KERNEL 0x1
+#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -4220,6 +4275,10 @@
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -4251,6 +4310,15 @@
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
+#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
+#define GEN7_MAX_PS_THREAD_DEP (8<<12)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+
+#define GEN7_ROW_CHICKEN2 0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
+#define DOP_CLOCK_GATING_DISABLE (1<<0)
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4380,33 +4448,39 @@
#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A 0x60400
-#define PIPE_DDI_FUNC_CTL_B 0x61400
-#define PIPE_DDI_FUNC_CTL_C 0x62400
-#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
- PIPE_DDI_FUNC_CTL_B)
-#define PIPE_DDI_FUNC_ENABLE (1<<31)
+#define TRANS_DDI_FUNC_CTL_A 0x60400
+#define TRANS_DDI_FUNC_CTL_B 0x61400
+#define TRANS_DDI_FUNC_CTL_C 0x62400
+#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+ TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (7<<28)
-#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
-#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
-#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
-#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
-#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
-#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
-#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
-#define PIPE_DDI_BPC_MASK (7<<20)
-#define PIPE_DDI_BPC_8 (0<<20)
-#define PIPE_DDI_BPC_10 (1<<20)
-#define PIPE_DDI_BPC_6 (2<<20)
-#define PIPE_DDI_BPC_12 (3<<20)
-#define PIPE_DDI_PVSYNC (1<<17)
-#define PIPE_DDI_PHSYNC (1<<16)
-#define PIPE_DDI_BFI_ENABLE (1<<4)
-#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
-#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
-#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
+#define TRANS_DDI_PORT_NONE (0<<28)
+#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
+#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
+#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
+#define TRANS_DDI_BPC_MASK (7<<20)
+#define TRANS_DDI_BPC_8 (0<<20)
+#define TRANS_DDI_BPC_10 (1<<20)
+#define TRANS_DDI_BPC_6 (2<<20)
+#define TRANS_DDI_BPC_12 (3<<20)
+#define TRANS_DDI_PVSYNC (1<<17)
+#define TRANS_DDI_PHSYNC (1<<16)
+#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_BFI_ENABLE (1<<4)
+#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
+#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
+#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
@@ -4420,12 +4494,16 @@
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
@@ -4444,6 +4522,7 @@
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
@@ -4460,6 +4539,10 @@
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_DEST_ICLK (0x0<<16)
+#define SBI_CTL_DEST_MPHY (0x1<<16)
+#define SBI_CTL_OP_IORD (0x2<<8)
+#define SBI_CTL_OP_IOWR (0x3<<8)
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
@@ -4477,10 +4560,12 @@
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0_ENABLE (1<<0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
@@ -4490,8 +4575,8 @@
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
-#define SPLL_PLL_SCC (1<<28)
-#define SPLL_PLL_NON_SCC (2<<28)
+#define SPLL_PLL_SSC (1<<28)
+#define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
@@ -4500,7 +4585,7 @@
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@@ -4517,21 +4602,36 @@
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
-
-/* Pipe clock selection */
-#define PIPE_CLK_SEL_A 0x46140
-#define PIPE_CLK_SEL_B 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
-/* For each pipe, we need to select the corresponding port clock */
-#define PIPE_CLK_SEL_DISABLED (0x0<<29)
-#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+#define PORT_CLK_SEL_NONE (7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A 0x46140
+#define TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0<<29)
+#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+ _TRANSB_MSA_MISC)
+#define TRANS_MSA_SYNC_CLK (1<<0)
+#define TRANS_MSA_6_BPC (0<<5)
+#define TRANS_MSA_8_BPC (1<<5)
+#define TRANS_MSA_10_BPC (2<<5)
+#define TRANS_MSA_12_BPC (3<<5)
+#define TRANS_MSA_16_BPC (4<<5)
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CLK_FREQ_MASK (3<<26)
+#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_CD_SOURCE_FCLK (1<<21)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5854bddb1e9..63d4d30c39d 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
- dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+ dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
- dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
- dev_priv->saveCR[i] =
+ dev_priv->regfile.saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
- dev_priv->saveCR[0x11] &= ~0x80;
+ dev_priv->regfile.saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
- dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
- dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
- dev_priv->saveGR[i] =
+ dev_priv->regfile.saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
- dev_priv->saveGR[0x10] =
+ dev_priv->regfile.saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->saveGR[0x11] =
+ dev_priv->regfile.saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->saveGR[0x18] =
+ dev_priv->regfile.saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
- dev_priv->saveSR[i] =
+ dev_priv->regfile.saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->saveSR[i]);
+ dev_priv->regfile.saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->saveGR[i]);
+ dev_priv->regfile.saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->saveGR[0x10]);
+ dev_priv->regfile.saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->saveGR[0x11]);
+ dev_priv->regfile.saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->saveGR[0x18]);
+ dev_priv->regfile.saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+ i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+ I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->saveCURABASE = I915_READ(_CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->saveFPA0 = I915_READ(_FPA0);
- dev_priv->saveFPA1 = I915_READ(_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->saveFPB0 = I915_READ(_FPB0);
- dev_priv->saveFPB1 = I915_READ(_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
return;
}
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
case 7:
case 6:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
}
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
- I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
- I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
- I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
- I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
- I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
- I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
- I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
- I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
- I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
- I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
return;
}
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
- /* Don't save them in KMS mode */
+ /* Don't regfile.save them in KMS mode */
i915_save_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveADPA = I915_READ(PCH_ADPA);
- } else {
- dev_priv->saveADPA = I915_READ(ADPA);
- }
-
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else {
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->saveLVDS = I915_READ(LVDS);
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->saveDP_B = I915_READ(DP_B);
- dev_priv->saveDP_C = I915_READ(DP_C);
- dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: save TV & SDVO state */
-
- /* Only save FBC state on the platform that supports FBC */
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+ }
+
+ /* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
}
/* VGA state */
- dev_priv->saveVGA0 = I915_READ(VGA0);
- dev_priv->saveVGA1 = I915_READ(VGA1);
- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
if (HAS_PCH_SPLIT(dev))
- dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
}
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->saveADPA);
-
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->saveLVDS);
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
* otherwise we get blank eDP screen after S3 on some machines
*/
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
- dev_priv->saveMCHBAR_RENDER_STANDBY);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->saveDP_B);
- I915_WRITE(DP_C, dev_priv->saveDP_C);
- I915_WRITE(DP_D, dev_priv->saveDP_D);
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
}
- /* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
/* VGA state */
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
else
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->saveVGA0);
- I915_WRITE(VGA1, dev_priv->saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
@@ -802,46 +807,45 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
-
i915_save_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDEIER = I915_READ(DEIER);
- dev_priv->saveDEIMR = I915_READ(DEIMR);
- dev_priv->saveGTIER = I915_READ(GTIER);
- dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+ dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+ dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+ dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+ dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->regfile.saveIER = I915_READ(IER);
+ dev_priv->regfile.saveIMR = I915_READ(IMR);
+ }
}
intel_disable_gt_powersave(dev);
/* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
mutex_unlock(&dev->struct_mutex);
@@ -853,41 +857,40 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
i915_restore_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->saveDEIER);
- I915_WRITE(DEIMR, dev_priv->saveDEIMR);
- I915_WRITE(GTIER, dev_priv->saveGTIER);
- I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
- } else {
- I915_WRITE(IER, dev_priv->saveIER);
- I915_WRITE(IMR, dev_priv->saveIMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->regfile.saveIER);
+ I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+ }
}
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
- I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+ I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 903eebd2117..9462081b1e6 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return -EPERM;
if (offset % 4 != 0)
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
- if (!dev_priv->mm.l3_remap_info) {
+ if (!dev_priv->l3_parity.remap_info) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
- dev_priv->mm.l3_remap_info = temp;
+ dev_priv->l3_parity.remap_info = temp;
- memcpy(dev_priv->mm.l3_remap_info + (offset/4),
+ memcpy(dev_priv->l3_parity.remap_info + (offset/4),
buf + (offset/4),
count);
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
dev_priv->rps.max_delay = val;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return count;
}
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_delay = val;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return count;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8134421b89a..3db4a681771 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+ TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
+ __field(u32, flags)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
+ __entry->flags = flags;
i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+ __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 56846ed5ee5..55ffba1f581 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -755,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */
- if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+ if (!HAS_PCH_SPLIT(dev) &&
+ I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6345878ae1e..9293878ec7e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
- adpa = ADPA_HOTPLUG_BITS;
+ if (HAS_PCH_SPLIT(dev))
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
@@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
+ int ret;
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
- return intel_connector_update_modes(connector, edid);
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
+ }
+
}
/*
@@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
- if (IS_HASWELL(dev) || IS_I830(dev))
+ if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- crt->base.get_hw_state = intel_crt_get_hw_state;
+ if (IS_HASWELL(dev))
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ else
+ crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
- if (HAS_PCH_SPLIT(dev)) {
- u32 adpa;
-
- adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
-
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
- crt->force_hotplug_required = 1;
- }
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the
+ * polarity reversal bit or not, instead of relying on the BIOS.
+ */
+ if (HAS_PCH_LPT(dev))
+ dev_priv->fdi_rx_polarity_reversed =
+ !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index bfe375466a0..4bad0f72401 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+ return intel_dig_port->port;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ return PORT_E;
+
+ } else {
+ DRM_ERROR("Invalid DDI encoder type %d\n", type);
+ BUG();
+ }
+}
+
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -118,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_800MV_3_5DB_HSW
};
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ udelay(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
@@ -133,25 +166,36 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
-
- /* Configure CPU PLL, wait for warmup */
- I915_WRITE(SPLL_CTL,
- SPLL_PLL_ENABLE |
- SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SCC);
+ u32 temp, i, rx_ctl_val;
- /* Use SPLL to drive the output when in FDI mode */
- I915_WRITE(PORT_CLK_SEL(PORT_E),
- PORT_CLK_SEL_SPLL);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(PORT_E));
-
- udelay(20);
-
- /* Start the training iterating through available voltages and emphasis */
- for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ */
+ I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+ FDI_RX_PWRDN_LANE0_VAL(2) |
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 19);
+ if (dev_priv->fdi_rx_polarity_reversed)
+ rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
@@ -160,103 +204,75 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
- temp = I915_READ(DDI_BUF_CTL(PORT_E));
- temp = (temp & ~DDI_BUF_EMP_MASK);
I915_WRITE(DDI_BUF_CTL(PORT_E),
- temp |
- DDI_BUF_CTL_ENABLE |
- DDI_PORT_WIDTH_X2 |
- hsw_ddi_buf_ctl_values[i]);
+ DDI_BUF_CTL_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 1) |
+ hsw_ddi_buf_ctl_values[i / 2]);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
udelay(600);
- /* We need to program FDI_RX_MISC with the default TP1 to TP2
- * values before enabling the receiver, and configure the delay
- * for the FDI timing generator to 90h. Luckily, all the other
- * bits are supposed to be zeroed, so we can write those values
- * directly.
- */
- I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
- FDI_RX_FDI_DELAY_90);
-
- /* Enable CPU FDI Receiver with auto-training */
- reg = FDI_RX_CTL(pipe);
- I915_WRITE(reg,
- I915_READ(reg) |
- FDI_LINK_TRAIN_AUTO |
- FDI_RX_ENABLE |
- FDI_LINK_TRAIN_PATTERN_1_CPT |
- FDI_RX_ENHANCE_FRAME_ENABLE |
- FDI_PORT_WIDTH_2X_LPT |
- FDI_RX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
+ /* Program PCH FDI Receiver TU */
+ I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+
+ /* Wait for FDI auto training time */
+ udelay(5);
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+ DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
-
- /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
- temp = I915_READ(DDI_FUNC_CTL(pipe));
- temp &= ~PIPE_DDI_PORT_MASK;
- temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
- PIPE_DDI_MODE_SELECT_FDI |
- PIPE_DDI_FUNC_ENABLE |
- PIPE_DDI_PORT_WIDTH_X2;
- I915_WRITE(DDI_FUNC_CTL(pipe),
- temp);
- break;
- } else {
- DRM_ERROR("Error training BUF_CTL %d\n", i);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
- /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
- I915_WRITE(DP_TP_CTL(PORT_E),
- I915_READ(DP_TP_CTL(PORT_E)) &
- ~DP_TP_CTL_ENABLE);
- I915_WRITE(FDI_RX_CTL(pipe),
- I915_READ(FDI_RX_CTL(pipe)) &
- ~FDI_RX_PLL_ENABLE);
- continue;
+ return;
}
- }
- DRM_DEBUG_KMS("FDI train done.\n");
-}
-
-/* For DDI connections, it is possible to support different outputs over the
- * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
- * the time the output is detected what exactly is on the other end of it. This
- * function aims at providing support for this detection and proper output
- * configuration.
- */
-void intel_ddi_init(struct drm_device *dev, enum port port)
-{
- /* For now, we don't do any proper output detection and assume that we
- * handle HDMI only */
-
- switch(port){
- case PORT_A:
- /* We don't handle eDP and DP yet */
- DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
- break;
- /* Assume that the ports B, C and D are working in HDMI mode for now */
- case PORT_B:
- case PORT_C:
- case PORT_D:
- intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
- break;
- default:
- DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
- port);
- break;
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
}
+
+ DRM_ERROR("FDI link training failed!\n");
}
/* WRPLL clock dividers */
@@ -645,116 +661,435 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{298000, 2, 21, 19},
};
-void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- int port = intel_hdmi->ddi_port;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
- int p, n2, r2;
- u32 temp, i;
+ int type = intel_encoder->type;
- /* On Haswell, we need to enable the clocks and prepare DDI function to
- * work in HDMI mode for this pipe.
- */
- DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ intel_dp->DP |= DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ WARN(1, "Unexpected DP lane count %d\n",
+ intel_dp->lane_count);
+ break;
+ }
+
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic
+ * and a new set of registers, so we leave it for future
+ * patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ }
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+
+ if (num_encoders != 1)
+ WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+ intel_crtc->pipe);
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t val;
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ plls->spll_refcount--;
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ plls->wrpll1_refcount--;
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+ val = I915_READ(WRPLL_CTL1);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL1);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ plls->wrpll2_refcount--;
+ if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+ val = I915_READ(WRPLL_CTL2);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL2);
+ }
+ break;
+ }
+
+ WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+ WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+ WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+ u32 i;
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
- if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+ if (clock <= wrpll_tmds_clock_table[i].clock)
break;
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
- p = wrpll_tmds_clock_table[i].p;
- n2 = wrpll_tmds_clock_table[i].n2;
- r2 = wrpll_tmds_clock_table[i].r2;
+ *p = wrpll_tmds_clock_table[i].p;
+ *n2 = wrpll_tmds_clock_table[i].n2;
+ *r2 = wrpll_tmds_clock_table[i].r2;
- if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
- DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
- wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+ if (wrpll_tmds_clock_table[i].clock != clock)
+ DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, clock);
- DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- crtc->mode.clock, p, n2, r2);
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, *p, *n2, *r2);
+}
- /* Enable LCPLL if disabled */
- temp = I915_READ(LCPLL_CTL);
- if (temp & LCPLL_PLL_DISABLE)
- I915_WRITE(LCPLL_CTL,
- temp & ~LCPLL_PLL_DISABLE);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int type = intel_encoder->type;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t reg, val;
- /* Configure WR PLL 1, program the correct divider values for
- * the desired frequency and wait for warmup */
- I915_WRITE(WRPLL_CTL1,
- WRPLL_PLL_ENABLE |
- WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p));
+ /* TODO: reuse PLLs when possible (compare values) */
- udelay(20);
+ intel_ddi_put_crtc_pll(crtc);
- /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
- * this port for connection.
- */
- I915_WRITE(PORT_CLK_SEL(port),
- PORT_CLK_SEL_WRPLL1);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(port));
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ default:
+ DRM_ERROR("Link bandwidth %d unsupported\n",
+ intel_dp->link_bw);
+ return false;
+ }
+
+ /* We don't need to turn any PLL on because we'll use LCPLL. */
+ return true;
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ int p, n2, r2;
+
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll1_refcount++;
+ reg = WRPLL_CTL1;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll2_refcount++;
+ reg = WRPLL_CTL2;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ } else {
+ DRM_ERROR("No WRPLLs available!\n");
+ return false;
+ }
+ WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+ "WRPLL already enabled\n");
+
+ intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+ pipe_name(pipe));
+ plls->spll_refcount++;
+ reg = SPLL_CTL;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ }
+
+ WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+ "SPLL already enabled\n");
+
+ val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ } else {
+ WARN(1, "Invalid DDI encoder type %d\n", type);
+ return false;
+ }
+
+ I915_WRITE(reg, val);
udelay(20);
- if (intel_hdmi->has_audio) {
- /* Proper support for digital audio needs a new logic and a new set
- * of registers, so we leave it for future patch bombing.
- */
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
+ return true;
+}
- /* write eld */
- DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ temp |= TRANS_MSA_8_BPC;
+ WARN(1, "%d bpp unsupported by DDI function\n",
+ intel_crtc->bpp);
+ }
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
+}
- /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ temp |= TRANS_DDI_SELECT_PORT(port);
switch (intel_crtc->bpp) {
case 18:
- temp |= PIPE_DDI_BPC_6;
+ temp |= TRANS_DDI_BPC_6;
break;
case 24:
- temp |= PIPE_DDI_BPC_8;
+ temp |= TRANS_DDI_BPC_8;
break;
case 30:
- temp |= PIPE_DDI_BPC_10;
+ temp |= TRANS_DDI_BPC_10;
break;
case 36:
- temp |= PIPE_DDI_BPC_12;
+ temp |= TRANS_DDI_BPC_12;
break;
default:
- WARN(1, "%d bpp unsupported by pipe DDI function\n",
+ WARN(1, "%d bpp unsupported by transcoder DDI function\n",
intel_crtc->bpp);
}
- if (intel_hdmi->has_hdmi_sink)
- temp |= PIPE_DDI_MODE_SELECT_HDMI;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ case PIPE_A:
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ temp |= TRANS_DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ temp |= TRANS_DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ WARN(1, "Unsupported lane count %d\n",
+ intel_dp->lane_count);
+ }
+
+ } else {
+ WARN(1, "Invalid encoder type %d for pipe %d\n",
+ intel_encoder->type, pipe);
+ }
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ int type = intel_connector->base.connector_type;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum pipe pipe = 0;
+ enum transcoder cpu_transcoder;
+ uint32_t tmp;
+
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+ return false;
+
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
else
- temp |= PIPE_DDI_MODE_SELECT_DVI;
+ cpu_transcoder = pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- temp |= PIPE_DDI_PVSYNC;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- temp |= PIPE_DDI_PHSYNC;
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ return true;
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ return (type == DRM_MODE_CONNECTOR_DisplayPort);
- I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+ case TRANS_DDI_MODE_SELECT_FDI:
+ return (type == DRM_MODE_CONNECTOR_VGA);
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ default:
+ return false;
+ }
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -762,58 +1097,418 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum port port = intel_ddi_get_encoder_port(encoder);
u32 tmp;
int i;
- tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+ tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
return false;
- for_each_pipe(i) {
- tmp = I915_READ(DDI_FUNC_CTL(i));
+ if (port == PORT_A) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if ((tmp & PIPE_DDI_PORT_MASK)
- == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
- *pipe = i;
- return true;
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ }
+
+ return true;
+ } else {
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK)
+ == TRANS_DDI_SELECT_PORT(port)) {
+ *pipe = i;
+ return true;
+ }
}
}
- DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
return true;
}
-void intel_enable_ddi(struct intel_encoder *encoder)
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t temp, ret;
+ enum port port;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int i;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ port = PORT_A;
+ } else {
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp &= TRANS_DDI_PORT_MASK;
+
+ for (i = PORT_B; i <= PORT_E; i++)
+ if (temp == TRANS_DDI_SELECT_PORT(i))
+ port = i;
+ }
+
+ ret = I915_READ(PORT_CLK_SEL(port));
+
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
+ pipe_name(pipe), port_name(port), ret);
+
+ return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- int port = intel_hdmi->ddi_port;
- u32 temp;
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
- temp = I915_READ(DDI_BUF_CTL(port));
- temp |= DDI_BUF_CTL_ENABLE;
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
- * and swing/emphasis values are ignored so nothing special needs
- * to be done besides enabling the port.
- */
- I915_WRITE(DDI_BUF_CTL(port), temp);
+ if (!intel_crtc->active)
+ continue;
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ dev_priv->ddi_plls.spll_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ dev_priv->ddi_plls.wrpll1_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ dev_priv->ddi_plls.wrpll2_refcount++;
+ break;
+ }
+ }
}
-void intel_disable_ddi(struct intel_encoder *encoder)
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ }
+
+ WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t val;
+ bool wait = false;
+
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+ }
+
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- int port = intel_hdmi->ddi_port;
- u32 temp;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_on(intel_dp);
+ }
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_off(intel_dp);
+ }
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450;
+ else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+ LCPLL_CLK_FREQ_450)
+ return 450;
+ else if (IS_ULT(dev_priv->dev))
+ return 338;
+ else
+ return 540;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ /* The LCPLL register should be turned on by the BIOS. For now let's
+ * just check its state and print errors in case something is wrong.
+ * Don't even try to turn it on.
+ */
+
+ DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_dig_port->port;
+ bool wait;
+ uint32_t val;
+
+ if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+ POSTING_READ(DDI_BUF_CTL(port));
+
+ udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ uint32_t val;
+
+ intel_ddi_post_disable(intel_encoder);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_MISC);
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+ intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int type = intel_encoder->type;
+
+ WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+
+ if (type == INTEL_OUTPUT_HDMI)
+ return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+ else
+ return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+ .mode_fixup = intel_ddi_mode_fixup,
+ .mode_set = intel_ddi_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *hdmi_connector = NULL;
+ struct intel_connector *dp_connector = NULL;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!dp_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ if (port != PORT_A) {
+ hdmi_connector = kzalloc(sizeof(struct intel_connector),
+ GFP_KERNEL);
+ if (!hdmi_connector) {
+ kfree(dp_connector);
+ kfree(intel_dig_port);
+ return;
+ }
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->pre_enable = intel_ddi_pre_enable;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+ intel_dig_port->port = port;
+ if (hdmi_connector)
+ intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
+ else
+ intel_dig_port->hdmi.sdvox_reg = 0;
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
- temp = I915_READ(DDI_BUF_CTL(port));
- temp &= ~DDI_BUF_CTL_ENABLE;
+ intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_ddi_hot_plug;
- I915_WRITE(DDI_BUF_CTL(port), temp);
+ if (hdmi_connector)
+ intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ intel_dp_init_connector(intel_dig_port, dp_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b426d44a2b0..a9fb046b94a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,8 +41,6 @@
#include <drm/drm_crtc_helper.h>
#include <linux/dma_remapping.h>
-#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -80,6 +78,16 @@ struct intel_limit {
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_PCH_SPLIT(dev));
+
+ return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
- .vco = { .min = 5994000, .max = 4000000 },
+ .vco = { .min = 4000000, .max = 5994000},
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
};
static const intel_limit_t intel_limits_vlv_dp = {
- .dot = { .min = 162000, .max = 270000 },
- .vco = { .min = 5994000, .max = 4000000 },
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
- .m = { .min = 60, .max = 300 }, /* guess */
+ .m = { .min = 22, .max = 450 },
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- HAS_eDP)
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return intel_crtc->cpu_transcoder;
+}
+
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (INTEL_INFO(dev)->gen >= 4) {
- int reg = PIPECONF(pipe);
+ int reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
- reg = DDI_FUNC_CTL(pipe);
+ reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
- cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+ cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
@@ -1128,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
- return;
- } else {
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
- }
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -1168,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
- return;
- }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1212,12 +1224,14 @@ void assert_pipe(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
WARN(cur_state != state,
@@ -1492,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* SBI access */
static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
+ u32 tmp;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_DATA,
- value);
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRWR);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, value);
+
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -1522,23 +1538,25 @@ out_unlock:
}
static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
u32 value = 0;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRRD);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -1554,14 +1572,14 @@ out_unlock:
}
/**
- * intel_enable_pch_pll - enable PCH PLL
+ * ironlake_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
+static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
@@ -1645,12 +1663,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
pll->on = false;
}
-static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- int reg;
- u32 val, pipeconf_val;
+ struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ uint32_t reg, val, pipeconf_val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1664,10 +1682,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
- return;
+ if (HAS_PCH_CPT(dev)) {
+ /* Workaround: Set the timing override bit before enabling the
+ * pch transcoder. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
}
+
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1696,11 +1719,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
-static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- int reg;
- u32 val;
+ u32 val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+ /* Workaround: set timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+ if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t reg, val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1716,6 +1770,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+ if (!HAS_PCH_IBX(dev)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(_TRANSACONF);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(_TRANSACONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
}
/**
@@ -1735,9 +1814,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ enum transcoder pch_transcoder;
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev))
+ pch_transcoder = TRANSCODER_A;
+ else
+ pch_transcoder = pipe;
+
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1748,13 +1835,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pipe);
- assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
@@ -1778,6 +1865,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int reg;
u32 val;
@@ -1791,7 +1880,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1807,8 +1896,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ if (dev_priv->info->gen >= 4)
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ else
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
}
/**
@@ -1926,9 +2017,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch)
+unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch)
{
int tile_rows, tiles;
@@ -1969,24 +2060,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
+
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
@@ -2000,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- gen4_compute_dspaddr_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
@@ -2053,27 +2158,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth != 16)
- return -EINVAL;
-
- dspcntr |= DISPPLANE_16BPP;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
break;
- case 24:
- case 32:
- if (fb->depth == 24)
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- else if (fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- return -EINVAL;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
@@ -2089,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
- gen4_compute_dspaddr_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2099,8 +2208,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPLINOFF(plane), linear_offset);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+ } else {
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ }
POSTING_READ(reg);
return 0;
@@ -2148,13 +2261,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (intel_crtc->pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ break;
+ default:
+ break;
+ }
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_framebuffer *old_fb;
int ret;
@@ -2206,20 +2345,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
- if (!dev->primary->master)
- return 0;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return 0;
-
- if (intel_crtc->pipe) {
- master_priv->sarea_priv->pipeB_x = x;
- master_priv->sarea_priv->pipeB_y = y;
- } else {
- master_priv->sarea_priv->pipeA_x = x;
- master_priv->sarea_priv->pipeA_y = y;
- }
+ intel_crtc_update_sarea_pos(crtc, x, y);
return 0;
}
@@ -2302,16 +2428,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+ struct intel_crtc *pipe_C_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+ uint32_t temp;
- flags |= FDI_PHASE_SYNC_OVR(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
- flags |= FDI_PHASE_SYNC_EN(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
- POSTING_READ(SOUTH_CHICKEN1);
+ /* When everything is off disable fdi C so that we could enable fdi B
+ * with all lanes. XXX: This misses the case where a pipe is not using
+ * any pch resources and so doesn't need any fdi lanes. */
+ if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("disabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ }
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2357,11 +2494,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- if (HAS_PCH_IBX(dev)) {
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
- }
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -2450,6 +2585,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
@@ -2464,9 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2570,6 +2705,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+ I915_READ(FDI_RX_IIR(pipe)));
+
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2582,6 +2720,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2593,9 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2613,7 +2751,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
break;
}
}
@@ -2654,7 +2792,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
break;
}
}
@@ -2671,9 +2809,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
int pipe = intel_crtc->pipe;
u32 reg, temp;
- /* Write the TU size bits so error detection works */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
@@ -2737,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
-
- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
- POSTING_READ(SOUTH_CHICKEN1);
-}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2774,11 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_EN));
- } else if (HAS_PCH_CPT(dev)) {
- cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2839,7 +2958,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
@@ -2849,23 +2968,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
* must be driven by its own crtc; no sharing is possible.
*/
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
- /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
- * CPU handles all others */
- if (IS_HASWELL(dev)) {
- /* It is still unclear how this will work on PPT, so throw up a warning */
- WARN_ON(!HAS_PCH_LPT(dev));
-
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
- DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
- return true;
- } else {
- DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
- intel_encoder->type);
- return false;
- }
- }
-
switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -2877,6 +2979,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
+}
+
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
@@ -2892,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
- intel_sbi_read(dev_priv, SBI_SSCCTL6) |
- SBI_SSCCTL_DISABLE);
+ intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+ SBI_SSCCTL_DISABLE,
+ SBI_ICLK);
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (crtc->mode.clock == 20000) {
@@ -2934,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
phaseinc);
/* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-
- intel_sbi_write(dev_priv,
- SBI_SSCDIVINTPHASE6,
- temp);
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
- intel_sbi_write(dev_priv,
- SBI_SSCAUXDIV6,
- temp);
-
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv,
- SBI_SSCCTL6,
- temp);
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
/* Wait for initialization time */
udelay(24);
@@ -2986,15 +3086,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_transcoder_disabled(dev_priv, pipe);
+ /* Write the TU size bits before fdi link training, so that error
+ * detection works. */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(intel_crtc);
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+ * unconditionally resets the pll - we need that to have the right LVDS
+ * enable sequence. */
+ ironlake_enable_pch_pll(intel_crtc);
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
- lpt_program_iclkip(crtc);
- } else if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -3031,8 +3140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- if (!IS_HASWELL(dev))
- intel_fdi_normal_train(crtc);
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3064,15 +3172,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
- temp |= TRANS_DP_PORT_SEL_B;
- break;
+ BUG();
}
I915_WRITE(reg, temp);
}
- intel_enable_transcoder(dev_priv, pipe);
+ ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+ lpt_program_iclkip(crtc);
+
+ /* Set transcoder timing. */
+ I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
+
+ I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3165,16 +3295,12 @@ prepare: /* separate function? */
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+ int dslreg = PIPEDSL(pipe);
u32 temp;
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
- /* Without this, mode sets may fail silently on FDI */
- I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
- udelay(250);
- I915_WRITE(tc2reg, 0);
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
}
@@ -3205,9 +3331,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- is_pch_port = intel_crtc_driving_pch(crtc);
+ is_pch_port = ironlake_crtc_driving_pch(crtc);
if (is_pch_port) {
+ /* Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling. */
ironlake_fdi_pll_enable(intel_crtc);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3220,12 +3349,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ else
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
@@ -3265,6 +3399,83 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ bool is_pch_port;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ dev_priv->display.fdi_link_train(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ intel_ddi_enable_pipe_clock(intel_crtc);
+
+ /* Enable panel fitting for eDP */
+ if (dev_priv->pch_pf_size &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_enable_pipe_func(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ lpt_pch_enable(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3303,7 +3514,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_disable(crtc);
- intel_disable_transcoder(dev_priv, pipe);
+ ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
@@ -3345,12 +3556,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ bool is_pch_port;
+
+ if (!intel_crtc->active)
+ return;
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+ /* Disable PF */
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+ intel_ddi_disable_pipe_clock(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (is_pch_port) {
+ lpt_disable_pch_transcoder(dev_priv);
+ intel_ddi_fdi_disable(crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_put_pch_pll(intel_crtc);
}
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+ * start using it. */
+ intel_crtc->cpu_transcoder = intel_crtc->pipe;
+
+ intel_ddi_put_crtc_pll(crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -4061,7 +4338,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, intel_clock_t *reduced_clock,
- int refclk, int num_connectors)
+ int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4069,9 +4346,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- bool is_hdmi;
+ bool is_sdvo;
+ u32 temp;
- is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+ dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+ dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
bestn = clock->n;
bestm1 = clock->m1;
@@ -4079,12 +4366,10 @@ static void vlv_update_pll(struct drm_crtc *crtc,
bestp1 = clock->p1;
bestp2 = clock->p2;
- /* Enable DPIO clock input */
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
-
+ /*
+ * In Valleyview PLL and program lane counter registers are exposed
+ * through DPIO interface
+ */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4095,12 +4380,13 @@ static void vlv_update_pll(struct drm_crtc *crtc,
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
- pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
- (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+ (5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
- intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
@@ -4108,19 +4394,44 @@ static void vlv_update_pll(struct drm_crtc *crtc,
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
- if (is_hdmi) {
- u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
-
- I915_WRITE(DPLL_MD(pipe), temp);
- POSTING_READ(DPLL_MD(pipe));
}
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
- intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
+ /* Now program lane control registers */
+ if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
+ || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+ }
+ if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+ }
}
static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4136,6 +4447,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
u32 dpll;
bool is_sdvo;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -4236,7 +4549,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
@@ -4245,6 +4558,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
dpll = DPLL_VGA_MODE_DIS;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4294,6 +4609,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t vsyncshift;
+
+ if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal / 2;
+ } else {
+ vsyncshift = 0;
+ }
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+ I915_WRITE(HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4307,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr, pipeconf, vsyncshift;
+ u32 dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder;
@@ -4371,14 +4744,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
- i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
- &reduced_clock : NULL);
-
if (IS_GEN2(dev))
- i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+ i8xx_update_pll(crtc, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
else if (IS_VALLEYVIEW(dev))
- vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
- refclk, num_connectors);
+ vlv_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
@@ -4419,6 +4792,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_ENABLE |
+ I965_PIPECONF_ACTIVE;
+ }
+ }
+
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4434,40 +4815,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- vsyncshift = adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2;
- } else {
+ else
pipeconf |= PIPECONF_PROGRESSIVE;
- vsyncshift = 0;
- }
- if (!IS_GEN3(dev))
- I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
-
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
@@ -4476,8 +4829,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -4495,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4612,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
}
}
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+ bool is_sdv = false;
+ u32 tmp;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (!has_vga)
+ return;
+
+ /* XXX: Rip out SDV support once Haswell ships for real. */
+ if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+ is_sdv = true;
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (!is_sdv) {
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+ 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+ tmp &= ~(0x3 << 6);
+ tmp |= (1 << 6) | (1 << 0);
+ intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+ }
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+ tmp |= 0x7FFF;
+ intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ }
+
+ /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+ tmp |= SBI_DBUFF0_ENABLE;
+ intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ironlake_init_pch_refclk(dev);
+ else if (HAS_PCH_LPT(dev))
+ lpt_init_pch_refclk(dev);
+}
+
static int ironlake_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4668,8 +5192,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val |= PIPE_12BPC;
break;
default:
- val |= PIPE_8BPC;
- break;
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
}
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -4686,6 +5210,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
POSTING_READ(PIPECONF(pipe));
}
+static void haswell_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(cpu_transcoder));
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK_HSW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(cpu_transcoder), val);
+ POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
@@ -4749,74 +5298,126 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
return true;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *fb)
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+ DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ if (intel_crtc->fdi_lanes > 4) {
+ DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 4;
+
+ return false;
+ }
+
+ if (dev_priv->num_pipe == 2)
+ return true;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ return true;
+ case PIPE_B:
+ if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+ intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+
+ if (intel_crtc->fdi_lanes > 2)
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ case PIPE_C:
+ if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+ if (intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+ } else {
+ DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ return false;
+ }
+
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ default:
+ BUG();
+ }
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return bps / (link_bw * 8) + 1;
+}
+
+static void ironlake_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false, is_sdvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *encoder, *edp_encoder = NULL;
- int ret;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ struct intel_encoder *intel_encoder, *edp_encoder = NULL;
struct fdi_m_n m_n = {0};
- u32 temp;
- int target_clock, pixel_multiplier, lane, link_bw, factor;
- unsigned int pipe_bpp;
- bool dither;
- bool is_cpu_edp = false, is_pch_edp = false;
+ int target_clock, pixel_multiplier, lane, link_bw;
+ bool is_dp = false, is_cpu_edp = false;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_SDVO:
- case INTEL_OUTPUT_HDMI:
- is_sdvo = true;
- if (encoder->needs_tv_clock)
- is_tv = true;
- break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
- break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
- if (intel_encoder_is_pch_edp(&encoder->base))
- is_pch_edp = true;
- else
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
is_cpu_edp = true;
- edp_encoder = encoder;
+ edp_encoder = intel_encoder;
break;
}
-
- num_connectors++;
- }
-
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock, &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
}
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -4843,29 +5444,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
target_clock = adjusted_mode->clock;
- /* determine panel color depth */
- dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
- adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
-
- if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
- pipe_bpp != 36) {
- WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- pipe_bpp = 24;
- }
- intel_crtc->bpp = pipe_bpp;
-
- if (!lane) {
- /*
- * Account for spread spectrum to avoid
- * oversubscribing the link. Max center spread
- * is 2.5%; use 5% for safety's sake.
- */
- u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
- lane = bps / (link_bw * 8) + 1;
- }
+ if (!lane)
+ lane = ironlake_get_lanes_required(target_clock, link_bw,
+ intel_crtc->bpp);
intel_crtc->fdi_lanes = lane;
@@ -4874,10 +5455,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, u32 fp)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ uint32_t dpll;
+ int factor, pixel_multiplier, num_connectors = 0;
+ bool is_lvds = false, is_sdvo = false, is_tv = false;
+ bool is_dp = false, is_cpu_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
@@ -4889,7 +5511,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock.m < factor * clock.n)
+ if (clock->m < factor * clock->n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -4899,7 +5521,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
@@ -4909,11 +5531,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (clock.p2) {
+ switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -4939,15 +5561,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
+ return dpll;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither, fdi_config_ok;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+ "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
- * pre-Haswell/LPT generation */
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
- pipe);
- } else if (!is_cpu_edp) {
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!is_cpu_edp) {
struct intel_pch_pll *pll;
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5033,47 +5719,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- I915_WRITE(VSYNCSHIFT(pipe),
- adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2);
- } else {
- I915_WRITE(VSYNCSHIFT(pipe), 0);
- }
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
-
- /* pipesrc controls the size that is scaled from, which should
- * always be the user's requested size.
- */
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ /* Note, this also computes intel_crtc->fdi_lanes which is used below in
+ * ironlake_check_fdi_lanes. */
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
- I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+ fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5092,6 +5744,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+ return fdi_config_ok ? ret : -EINVAL;
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ if (is_cpu_edp)
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
+ /* We are not sure yet this won't happen. */
+ WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+ INTEL_PCH_TYPE(dev));
+
+ WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+ num_connectors, pipe_name(pipe));
+
+ WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+ (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+ WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+ if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+ return -EINVAL;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock,
+ &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ drm_mode_debug_printmodeline(mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
+ fp);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its
+ * own on pre-Haswell/LPT generation */
+ if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
+ return -EINVAL;
+ }
+ } else
+ intel_put_pch_pll(intel_crtc);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are
+ * enabled. This is an exception to the general rule that
+ * mode_set doesn't turn things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether
+ * we're going to set the DPLLs for dual-channel mode or
+ * not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP |
+ LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode
+ * (LVDS_A3_POWER_UP) appropriately here, but we need to
+ * look more thoroughly into how panels behave in the
+ * two modes.
+ */
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(PCH_LVDS, temp);
+ }
+ }
+
+ if (is_dp && !is_cpu_edp) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery
+ * setting.*/
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+ }
+ }
+
+ intel_crtc->lowfreq_avail = false;
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+ }
+
+ if (intel_crtc->pch_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+ }
+ }
+ }
+
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+ if (!is_dp || is_cpu_edp)
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (is_cpu_edp)
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+ haswell_set_pipeconf(crtc, adjusted_mode, dither);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
return ret;
}
@@ -5103,6 +5966,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
@@ -5113,7 +5978,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- return ret;
+ if (ret != 0)
+ return ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->base.helper_private;
+ encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ }
+
+ return 0;
}
static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -5749,7 +6626,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -5879,24 +6756,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- goto fail;
+ return false;
}
if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- goto fail;
+ return false;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
-
return true;
-fail:
- connector->encoder = NULL;
- encoder->crtc = NULL;
- return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6021,12 +6893,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct drm_display_mode *mode;
- int htot = I915_READ(HTOTAL(pipe));
- int hsync = I915_READ(HSYNC(pipe));
- int vtot = I915_READ(VTOTAL(pipe));
- int vsync = I915_READ(VSYNC(pipe));
+ int htot = I915_READ(HTOTAL(cpu_transcoder));
+ int hsync = I915_READ(HSYNC(cpu_transcoder));
+ int vtot = I915_READ(VTOTAL(cpu_transcoder));
+ int vsync = I915_READ(VSYNC(cpu_transcoder));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -6183,14 +7055,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
+ struct drm_device *dev = work->crtc->dev;
- mutex_lock(&work->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(work->dev);
- mutex_unlock(&work->dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+ atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
kfree(work);
}
@@ -6201,8 +7078,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
- struct drm_pending_vblank_event *e;
- struct timeval tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
@@ -6211,24 +7086,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || !work->pending) {
+
+ /* Ensure we don't miss a work->pending update ... */
+ smp_rmb();
+
+ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
- intel_crtc->unpin_work = NULL;
-
- if (work->event) {
- e = work->event;
- e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+ /* and that the unpin work is consistent wrt ->pending. */
+ smp_rmb();
- e->event.tv_sec = tvbl.tv_sec;
- e->event.tv_usec = tvbl.tv_usec;
+ intel_crtc->unpin_work = NULL;
- list_add_tail(&e->base.link,
- &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
+ if (work->event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
drm_vblank_put(dev, intel_crtc->pipe);
@@ -6238,9 +7111,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
-
wake_up(&dev_priv->pending_flip_queue);
- schedule_work(&work->work);
+
+ queue_work(dev_priv->wq, &work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
@@ -6268,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
+ /* NB: An MMIO update of the plane base pointer will also
+ * generate a page-flip completion irq, i.e. every modeset
+ * is also accompanied by a spurious intel_prepare_page_flip().
+ */
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work) {
- if ((++intel_crtc->unpin_work->pending) > 1)
- DRM_ERROR("Prepared flip multiple times\n");
- } else {
- DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
- }
+ if (intel_crtc->unpin_work)
+ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+ /* Ensure that the work item is consistent when activating it ... */
+ smp_wmb();
+ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+ /* and that it is marked active as soon as the irq could fire. */
+ smp_wmb();
+}
+
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -6311,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6351,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6397,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6439,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6493,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6541,7 +7432,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -ENOMEM;
work->event = event;
- work->dev = crtc->dev;
+ work->crtc = crtc;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -6566,6 +7457,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+ flush_workqueue(dev_priv->wq);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
@@ -6584,6 +7478,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ atomic_inc(&intel_crtc->unpin_work_count);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
@@ -6598,6 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
+ atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
@@ -6893,7 +7789,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
dev->mode_config.dpms_property;
connector->dpms = DRM_MODE_DPMS_ON;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dpms_property,
DRM_MODE_DPMS_ON);
@@ -7015,8 +7911,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_encoder *encoder;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
bool ret = true;
@@ -7061,6 +7955,9 @@ bool intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
@@ -7070,18 +7967,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
x, y, fb);
if (!ret)
goto done;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if (encoder->crtc != &intel_crtc->base)
- continue;
-
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.id, drm_get_encoder_name(encoder),
- mode->base.id, mode->name);
- encoder_funcs = encoder->helper_private;
- encoder_funcs->mode_set(encoder, mode, adjusted_mode);
- }
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7259,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
}
-
- /* Disable all disconnected encoders. */
- if (connector->base.status == connector_status_disconnected)
- connector->new_encoder = NULL;
}
/* connector->new_encoder is now updated for all connectors. */
@@ -7420,6 +8301,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+ if (IS_HASWELL(dev))
+ intel_ddi_pll_init(dev);
+}
+
static void intel_pch_pll_init(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7459,6 +8346,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
+ intel_crtc->cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
@@ -7551,17 +8439,9 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (HAS_PCH_SPLIT(dev)) {
- dpd_is_edp = intel_dpd_is_edp(dev);
-
- if (has_edp_a(dev))
- intel_dp_init(dev, DP_A, PORT_A);
-
- if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D, PORT_D);
- }
-
- intel_crt_init(dev);
+ if (!(IS_HASWELL(dev) &&
+ (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ intel_crt_init(dev);
if (IS_HASWELL(dev)) {
int found;
@@ -7584,6 +8464,10 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
+
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
@@ -7603,11 +8487,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C, PORT_C);
- if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
+ /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C, PORT_C);
+
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
@@ -7620,9 +8508,6 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
- /* Shares lanes with HDMI on SDVOC */
- if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -7676,8 +8561,9 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_encoder_clones(encoder);
}
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
+
+ drm_helper_move_panel_connectors_to_head(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7718,27 +8604,51 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
+ /* FIXME <= Gen4 stride limits are bit unclear */
+ if (mode_cmd->pitches[0] > 32768)
+ return -EINVAL;
+
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != obj->stride)
+ return -EINVAL;
+
+ /* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ if (INTEL_INFO(dev)->gen > 3)
+ return -EINVAL;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- /* RGB formats are common across chipsets */
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
+ if (INTEL_INFO(dev)->gen < 6)
+ return -EINVAL;
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format %u\n",
- mode_cmd->pixel_format);
+ DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0)
+ return -EINVAL;
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -7776,7 +8686,13 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_HASWELL(dev)) {
+ dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -7827,6 +8743,8 @@ static void intel_init_display(struct drm_device *dev)
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
@@ -8058,6 +8976,7 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
/* Just disable it once at startup */
@@ -8127,7 +9046,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->pipe);
+ reg = PIPECONF(crtc->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* We need to sanitize the plane -> pipe mapping first because this will
@@ -8244,9 +9163,27 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
+static void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+ }
+}
+
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev)
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
@@ -8255,10 +9192,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
+ if (IS_HASWELL(dev)) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc->cpu_transcoder = TRANSCODER_EDP;
+
+ DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+ pipe_name(pipe));
+ }
+ }
+
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- tmp = I915_READ(PIPECONF(pipe));
+ tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
if (tmp & PIPECONF_ENABLE)
crtc->active = true;
else
@@ -8271,6 +9233,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
+ if (IS_HASWELL(dev))
+ intel_ddi_setup_hw_pll_state(dev);
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
@@ -8317,9 +9282,21 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
intel_sanitize_crtc(crtc);
}
- intel_modeset_update_staged_output_state(dev);
+ if (force_restore) {
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_set_mode(&crtc->base, &crtc->base.mode,
+ crtc->base.x, crtc->base.y, crtc->base.fb);
+ }
+
+ i915_redisable_vga(dev);
+ } else {
+ intel_modeset_update_staged_output_state(dev);
+ }
intel_modeset_check_state(dev);
+
+ drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -8328,7 +9305,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, false);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -8447,6 +9424,7 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
+ enum transcoder cpu_transcoder;
int i;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -8454,6 +9432,8 @@ intel_display_capture_error_state(struct drm_device *dev)
return NULL;
for_each_pipe(i) {
+ cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -8468,14 +9448,14 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(i));
- error->pipe[i].hblank = I915_READ(HBLANK(i));
- error->pipe[i].hsync = I915_READ(HSYNC(i));
- error->pipe[i].vtotal = I915_READ(VTOTAL(i));
- error->pipe[i].vblank = I915_READ(VBLANK(i));
- error->pipe[i].vsync = I915_READ(VSYNC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
return error;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 368ed8ef160..1b63d55318a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,8 +36,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
/**
@@ -49,7 +47,9 @@
*/
static bool is_edp(struct intel_dp *intel_dp)
{
- return intel_dp->base.type == INTEL_OUTPUT_EDP;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
/**
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
- return container_of(encoder, struct intel_dp, base.base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.base.dev;
}
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_dp, base);
+ return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
/**
@@ -106,49 +107,32 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
return is_pch_edp(intel_dp);
}
-static void intel_dp_start_link_train(struct intel_dp *intel_dp);
-static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config(struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
*lane_num = intel_dp->lane_count;
- if (intel_dp->link_bw == DP_LINK_BW_1_62)
- *link_bw = 162000;
- else if (intel_dp->link_bw == DP_LINK_BW_2_7)
- *link_bw = 270000;
+ *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
}
int
intel_edp_target_clock(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
- if (intel_dp->panel_fixed_mode)
- return intel_dp->panel_fixed_mode->clock;
+ if (intel_connector->panel.fixed_mode)
+ return intel_connector->panel.fixed_mode->clock;
else
return mode->clock;
}
static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
-{
- int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
- }
- return max_lane_count;
-}
-
-static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ if (is_edp(intel_dp) && fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!is_edp(intel_dp))
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t aux_clock_divider;
int try, precharge;
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ ch_data = DPA_AUX_CH_DATA1;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ ch_data = PCH_DPB_AUX_CH_DATA1;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ ch_data = PCH_DPC_AUX_CH_DATA1;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ ch_data = PCH_DPD_AUX_CH_DATA1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_HASWELL(dev))
+ aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+ else if (IS_VALLEYVIEW(dev))
+ aux_clock_divider = 100;
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+ aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
return ret;
}
-static bool
+bool
intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
}
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
/*
* Find the lane count in the intel_encoder private
*/
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(TRANSDATA_M1(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+ } else if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+ } else if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+}
+
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/*
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
- intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
- /*
- * Check for DPCD version > 1.1 and enhanced framing support
- */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
+
+ intel_dp_init_link_config(intel_dp);
/* Split out the IBX/CPU vs CPT settings */
- if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -978,9 +1012,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
return control;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
ironlake_wait_panel_off(intel_dp);
}
-static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
if (!is_edp(intel_dp))
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ intel_panel_enable_backlight(dev, pipe);
}
-static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
if (!is_edp(intel_dp))
return;
+ intel_panel_disable_backlight(dev);
+
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
}
/* If the sink supports it, try to set the power state appropriately */
-static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
int ret, i;
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
return true;
}
}
- }
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+ intel_dp->output_reg);
+ }
return true;
}
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE);
}
-static uint8_t
-intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static uint8_t
-intel_get_adjust_request_voltage(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
-
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -1448,7 +1459,7 @@ static char *link_train_names[] = {
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
uint8_t v = 0;
uint8_t p = 0;
int lane;
- uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
uint8_t voltage_max;
uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
}
}
-static uint8_t
-intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int s = (lane & 1) * 4;
- uint8_t l = link_status[lane>>1];
-
- return (l >> s) & 0xf;
-}
-
-/* Check for clock recovery is done on all channels */
-static bool
-intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_dp_signal_levels_hsw(uint8_t train_set)
{
- int lane;
- uint8_t lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+ return DDI_BUF_EMP_400MV_9_5DB_HSW;
-/* Check to see if channel eq is done on all channels */
-#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
- DP_LANE_CHANNEL_EQ_DONE|\
- DP_LANE_SYMBOL_LOCKED)
-static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
-{
- uint8_t lane_align;
- uint8_t lane_status;
- int lane;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_HSW;
- lane_align = intel_dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < intel_dp->lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
- return false;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_HSW;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_HSW;
}
- return true;
}
static bool
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
int ret;
+ uint32_t temp;
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ if (IS_HASWELL(dev)) {
+ temp = I915_READ(DP_TP_CTL(port));
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE), 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ }
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ } else if (HAS_PCH_CPT(dev) &&
+ (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
}
/* Enable corresponding port and start training pattern 1 */
-static void
+void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+ struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
+ if (IS_HASWELL(dev))
+ intel_ddi_prepare_link_retrain(encoder);
+
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
-
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(
+ intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
+ signal_levels);
+ /* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
- /* Set training pattern 1 */
- udelay(100);
+ drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static void
+void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_LINK_SCRAMBLING_DISABLE))
break;
- udelay(400);
+ drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
- if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (intel_channel_eq_ok(intel_dp, link_status)) {
+ if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
+ if (channel_eq)
+ DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
+
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ /*
+ * DDI code has a strict mode set sequence and we should try to respect
+ * it, otherwise we might hang the machine in many different ways. So we
+ * really should be disabling the port only on a complete crtc_disable
+ * sequence. This function is just called under two conditions on DDI
+ * code:
+ * - Link train failed while doing crtc_enable, and on this case we
+ * really should respect the mode set sequence and wait for a
+ * crtc_disable.
+ * - Someone turned the monitor off and intel_dp_check_link_status
+ * called us. We don't need to disable the whole port on this case, so
+ * when someone turns the monitor on again,
+ * intel_ddi_prepare_link_retrain will take care of redoing the link
+ * train.
+ */
+ if (IS_HASWELL(dev))
+ return;
+
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
@@ -2024,7 +2103,7 @@ static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
- intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
}
/*
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 4. Check link status on receipt of hot-plug interrupt
*/
-static void
+void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->base.connectors_active)
+ if (!intel_encoder->connectors_active)
return;
- if (WARN_ON(!intel_dp->base.base.crtc))
+ if (WARN_ON(!intel_encoder->base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- if (!intel_channel_eq_ok(intel_dp, link_status)) {
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_dp->base.base));
+ drm_get_encoder_name(&intel_encoder->base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
- status = intel_panel_detect(intel_dp->base.base.dev);
+ status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct edid *edid;
- int size;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp)) {
- if (!intel_dp->edid)
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ struct edid *edid;
+ int size;
+
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
return NULL;
- size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+ size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edid = kmalloc(size, GFP_KERNEL);
if (!edid)
return NULL;
- memcpy(edid, intel_dp->edid, size);
+ memcpy(edid, intel_connector->edid, size);
return edid;
}
- edid = drm_get_edid(connector, adapter);
- return edid;
+ return drm_get_edid(connector, adapter);
}
static int
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- int ret;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp)) {
- drm_mode_connector_update_edid_property(connector,
- intel_dp->edid);
- ret = drm_add_edid_modes(connector, intel_dp->edid);
- drm_edid_to_eld(connector,
- intel_dp->edid);
- return intel_dp->edid_mode_count;
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return 0;
+
+ return intel_connector_update_modes(connector,
+ intel_connector->edid);
}
- ret = intel_ddc_get_modes(connector, adapter);
- return ret;
+ return intel_ddc_get_modes(connector, adapter);
}
@@ -2219,9 +2301,12 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else
status = g4x_dp_detect(intel_dp);
- DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
- intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
- intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
- intel_dp->dpcd[6], intel_dp->dpcd[7]);
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
if (status != connector_status_connected)
return status;
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
}
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_connected;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
- if (ret) {
- if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
- struct drm_display_mode *newmode;
- list_for_each_entry(newmode, &connector->probed_modes,
- head) {
- if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, newmode);
- break;
- }
- }
- }
+ if (ret)
return ret;
- }
- /* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (is_edp(intel_dp)) {
- /* initialize panel mode from VBT if available for eDP */
- if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_dp->panel_fixed_mode) {
- intel_dp->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- }
- }
- if (intel_dp->panel_fixed_mode) {
- struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ /* if eDP has no EDID, fall back to fixed mode */
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev,
+ intel_connector->panel.fixed_mode);
+ if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -2322,10 +2388,12 @@ intel_dp_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
+ if (is_edp(intel_dp) &&
+ property == connector->dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val) {
+ /* the eDP scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = val;
+
+ goto done;
+ }
+
return -EINVAL;
done:
- if (intel_dp->base.base.crtc) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ if (intel_encoder->base.crtc) {
+ struct drm_crtc *crtc = intel_encoder->base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp))
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ if (is_edp(intel_dp)) {
intel_panel_destroy_backlight(dev);
+ intel_panel_fini(&intel_connector->panel);
+ }
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
-static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
- kfree(intel_dp->edid);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
- kfree(intel_dp);
+ kfree(intel_dig_port);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
intel_dp_check_link_status(intel_dp);
}
@@ -2435,13 +2525,14 @@ int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@@ -2471,78 +2562,191 @@ bool intel_dpd_is_edp(struct drm_device *dev)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+
+ if (is_edp(intel_dp)) {
+ drm_mode_create_scaling_mode_property(connector->dev);
+ drm_object_attach_property(
+ &connector->base,
+ connector->dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq cur, vbt, spec, final;
+ u32 pp_on, pp_off, pp_div, pp;
+
+ /* Workaround: Need to write PP_CONTROL with the unlock key as
+ * the very first thing. */
+ pp = ironlake_get_pp_control(dev_priv);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ /* And finally store the new values in the power sequencer. */
+ pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ /* Compute the divisor for the pp clock, simply match the Bspec
+ * formula. */
+ pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
+ << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (is_cpu_edp(intel_dp))
+ pp_on |= PANEL_POWER_PORT_DP_A;
+ else
+ pp_on |= PANEL_POWER_PORT_DP_D;
+ }
+
+ I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
+ I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
+ I915_WRITE(PCH_PP_DIVISOR, pp_div);
+
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ I915_READ(PCH_PP_ON_DELAYS),
+ I915_READ(PCH_PP_OFF_DELAYS),
+ I915_READ(PCH_PP_DIVISOR));
}
void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_dp *intel_dp;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
+ struct drm_display_mode *fixed_mode = NULL;
+ enum port port = intel_dig_port->port;
const char *name = NULL;
int type;
- intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
- if (!intel_dp)
- return;
-
- intel_dp->output_reg = output_reg;
- intel_dp->port = port;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_dp);
- return;
- }
- intel_encoder = &intel_dp->base;
-
- if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ if (HAS_PCH_SPLIT(dev) && port == PORT_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
- if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+ /*
+ * FIXME : We need to initialize built-in panels before external panels.
+ * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+ */
+ if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else if (port == PORT_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
+ /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+ * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+ * rewrite it.
+ */
type = DRM_MODE_CONNECTOR_DisplayPort;
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
}
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- intel_encoder->cloneable = false;
-
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
-
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- intel_encoder->enable = intel_enable_dp;
- intel_encoder->pre_enable = intel_pre_enable_dp;
- intel_encoder->disable = intel_disable_dp;
- intel_encoder->post_disable = intel_post_disable_dp;
- intel_encoder->get_hw_state = intel_dp_get_hw_state;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
/* Set up the DDC bus. */
switch (port) {
@@ -2566,66 +2770,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
break;
}
- /* Cache some DPCD data in the eDP case */
- if (is_edp(intel_dp)) {
- struct edp_power_seq cur, vbt;
- u32 pp_on, pp_off, pp_div;
-
- pp_on = I915_READ(PCH_PP_ON_DELAYS);
- pp_off = I915_READ(PCH_PP_OFF_DELAYS);
- pp_div = I915_READ(PCH_PP_DIVISOR);
-
- if (!pp_on || !pp_off || !pp_div) {
- DRM_INFO("bad panel power sequencing delays, disabling panel\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
- return;
- }
-
- /* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
-
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
-
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
-
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
- vbt = dev_priv->edp.pps;
-
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-
-#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
-
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
- }
+ if (is_edp(intel_dp))
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_i2c_init(intel_dp, intel_connector, name);
+ /* Cache DPCD and EDID for edp. */
if (is_edp(intel_dp)) {
bool ret;
+ struct drm_display_mode *scan;
struct edid *edid;
ironlake_edp_panel_vdd_on(intel_dp);
@@ -2640,29 +2793,47 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
+ intel_dp_encoder_destroy(&intel_encoder->base);
+ intel_dp_destroy(connector);
return;
}
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector,
- edid);
- intel_dp->edid_mode_count =
- drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- intel_dp->edid = edid;
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ intel_connector->edid = edid;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
}
+
+ /* fallback to VBT if available for eDP */
+ if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode)
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+
ironlake_edp_panel_vdd_off(intel_dp, false);
}
- intel_encoder->hot_plug = intel_dp_hot_plug;
-
if (is_edp(intel_dp)) {
- dev_priv->int_edp_connector = connector;
- intel_panel_setup_backlight(dev);
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
}
intel_dp_add_properties(intel_dp, connector);
@@ -2676,3 +2847,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+ intel_dig_port->port = port;
+ intel_dig_port->dp.output_reg = output_reg;
+
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ intel_dp_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fe7142502f4..8a1bd4a3ad0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -94,6 +94,7 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -163,6 +164,11 @@ struct intel_encoder {
int crtc_mask;
};
+struct intel_panel {
+ struct drm_display_mode *fixed_mode;
+ int fitting_mode;
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -179,12 +185,19 @@ struct intel_connector {
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
+ enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
@@ -198,6 +211,8 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+ atomic_t unpin_work_count;
+
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
@@ -212,12 +227,14 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
+ uint32_t ddi_pll_sel;
};
struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
+ bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@@ -317,10 +334,8 @@ struct dip_infoframe {
} __attribute__((packed));
struct intel_hdmi {
- struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
- int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
@@ -331,18 +346,15 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
-#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
- struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
- enum port port;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
@@ -357,11 +369,16 @@ struct intel_dp {
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
- struct edid *edid; /* cached EDID for eDP */
- int edid_mode_count;
+ struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
};
static inline struct drm_crtc *
@@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
- struct drm_device *dev;
+ struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
- int pending;
+ atomic_t pending;
+#define INTEL_FLIP_INACTIVE 0
+#define INTEL_FLIP_PENDING 1
+#define INTEL_FLIP_COMPLETE 2
bool enable_stall_check;
};
@@ -395,6 +415,8 @@ struct intel_fbc_work {
int interval;
};
+int intel_pch_rawclk(struct drm_device *dev);
+
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
@@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
/* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder;
}
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
@@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch);
+
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
-extern void intel_enable_ddi(struct intel_encoder *encoder);
-extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
-extern void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9ba0aaed7ee..2ee9821b9d9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -36,10 +36,15 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+ return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_hdmi, base.base);
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->hdmi;
}
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_hdmi, base);
+ return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+ avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+
intel_set_infoframe(encoder, &avi_if);
}
@@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -786,6 +794,9 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
@@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
}
return status;
@@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_hdmi->base.base.crtc) {
- struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+ if (intel_dig_port->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
- .mode_fixup = intel_hdmi_mode_fixup,
- .mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
-};
-
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
@@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct intel_hdmi *intel_hdmi;
-
- intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
- if (!intel_hdmi)
- return;
-
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_hdmi);
- return;
- }
-
- intel_encoder = &intel_hdmi->base;
- drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ enum port port = intel_dig_port->port;
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- intel_encoder->type = INTEL_OUTPUT_HDMI;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
-
- intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
BUG();
}
- intel_hdmi->sdvox_reg = sdvox_reg;
-
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev)) {
- intel_encoder->enable = intel_enable_ddi;
- intel_encoder->disable = intel_disable_ddi;
- intel_encoder->get_hw_state = intel_ddi_get_hw_state;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_hdmi_helper_funcs_hsw);
- } else {
- intel_encoder->enable = intel_enable_hdmi;
- intel_encoder->disable = intel_disable_hdmi;
- intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_hdmi_helper_funcs);
- }
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_hdmi_add_properties(intel_hdmi, connector);
@@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+
+ intel_dig_port->port = port;
+ intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+ intel_dig_port->dp.output_reg = 0;
+
+ intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2c6dbc0971..3ef5af15b81 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -432,7 +432,7 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- bus->force_bit = true;
+ bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
- bus->force_bit = true;
+ bus->force_bit = 1;
intel_gpio_setup(bus, port);
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- bus->force_bit = force_bit;
+ bus->force_bit += force_bit ? 1 : -1;
+ DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
}
void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index edba93b3474..b9a660a5367 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,28 +40,30 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds {
- struct intel_encoder base;
+struct intel_lvds_connector {
+ struct intel_connector base;
- struct edid *edid;
+ struct notifier_block lid_notifier;
+};
+
+struct intel_lvds_encoder {
+ struct intel_encoder base;
- int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
- struct drm_display_mode *fixed_mode;
+ struct intel_lvds_connector *attached_connector;
};
-static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_lvds, base.base);
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
}
-static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_lvds, base);
+ return container_of(connector, struct intel_lvds_connector, base.base);
}
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_dirty) {
+ if (lvds_encoder->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
+ lvds_encoder->pfit_control,
+ lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
+ lvds_encoder->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (intel_lvds->pfit_control) {
+ if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_dirty = true;
+ lvds_encoder->pfit_dirty = true;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
- struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+ struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- if (intel_encoder_check_is_cloned(&intel_lvds->base))
+ if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
/*
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
- intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
}
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
- switch (intel_lvds->fitting_mode) {
+ switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -396,11 +402,11 @@ out:
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- if (pfit_control != intel_lvds->pfit_control ||
- pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
- intel_lvds->pfit_dirty = true;
+ if (pfit_control != lvds_encoder->pfit_control ||
+ pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+ lvds_encoder->pfit_control = pfit_control;
+ lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+ lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- if (intel_lvds->edid)
- return drm_add_edid_modes(connector, intel_lvds->edid);
+ /* use cached edid if we have one */
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ return drm_add_edid_modes(connector, lvds_connector->base.edid);
- mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL)
return 0;
@@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, lid_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector = dev_priv->int_lvds_connector;
+ struct intel_lvds_connector *lvds_connector =
+ container_of(nb, struct intel_lvds_connector, lid_notifier);
+ struct drm_connector *connector = &lvds_connector->base.base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
- if (connector)
- connector->status = connector->funcs->detect(connector,
- false);
+ connector->status = connector->funcs->detect(connector, false);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
@@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex);
- intel_modeset_check_state(dev);
+ intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
@@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds_connector *lvds_connector =
+ to_lvds_connector(connector);
- intel_panel_destroy_backlight(dev);
+ if (lvds_connector->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ kfree(lvds_connector->base.edid);
+
+ intel_panel_destroy_backlight(connector->dev);
+ intel_panel_fini(&lvds_connector->base.panel);
- if (dev_priv->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) {
- struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+ struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (intel_lvds->fitting_mode == value) {
+ if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- intel_lvds->fitting_mode = value;
+ intel_connector->panel.fitting_mode = value;
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -912,12 +925,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds;
+ struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
+ struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
int pipe;
@@ -945,23 +961,25 @@ bool intel_lvds_init(struct drm_device *dev)
}
}
- intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
- if (!intel_lvds) {
+ lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+ if (!lvds_encoder)
return false;
- }
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_lvds);
+ lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+ if (!lvds_connector) {
+ kfree(lvds_encoder);
return false;
}
+ lvds_encoder->attached_connector = lvds_connector;
+
if (!HAS_PCH_SPLIT(dev)) {
- intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+ lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
- intel_encoder = &intel_lvds->base;
+ intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
+ intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -993,14 +1011,10 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
- /*
- * the initial panel fitting mode will be FULL_SCREEN.
- */
-
- drm_connector_attach_property(&intel_connector->base,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1015,20 +1029,21 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- intel_lvds->edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- pin));
- if (intel_lvds->edid) {
- if (drm_add_edid_modes(connector,
- intel_lvds->edid)) {
+ edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
- intel_lvds->edid);
+ edid);
} else {
- kfree(intel_lvds->edid);
- intel_lvds->edid = NULL;
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
}
+ } else {
+ edid = ERR_PTR(-ENOENT);
}
- if (!intel_lvds->edid) {
+ lvds_connector->base.edid = edid;
+
+ if (IS_ERR_OR_NULL(edid)) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
@@ -1041,22 +1056,26 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, scan);
- intel_find_lvds_downclock(dev,
- intel_lvds->fixed_mode,
- connector);
- goto out;
+ DRM_DEBUG_KMS("using preferred mode from EDID: ");
+ drm_mode_debug_printmodeline(scan);
+
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ if (fixed_mode) {
+ intel_find_lvds_downclock(dev, fixed_mode,
+ connector);
+ goto out;
+ }
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("using mode from VBT: ");
+ drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode) {
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
@@ -1076,16 +1095,17 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!intel_lvds->fixed_mode)
+ if (!fixed_mode)
goto failed;
out:
@@ -1100,16 +1120,15 @@ out:
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
- dev_priv->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
- dev_priv->lid_notifier.notifier_call = NULL;
+ lvds_connector->lid_notifier.notifier_call = NULL;
}
- /* keep the LVDS connector */
- dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- intel_panel_setup_backlight(dev);
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
return true;
@@ -1117,7 +1136,9 @@ failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_lvds);
- kfree(intel_connector);
+ if (fixed_mode)
+ drm_mode_destroy(dev, fixed_mode);
+ kfree(lvds_encoder);
+ kfree(lvds_connector);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cabd84bf66e..b00f1c83adc 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
- kfree(edid);
return ret;
}
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
+ int ret;
edid = drm_get_edid(connector, adapter);
if (!edid)
return 0;
- return intel_connector_update_modes(connector, edid);
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
}
static const struct drm_prop_enum_list force_audio_names[] = {
@@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5530413213d..7741c22c934 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
+ DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e2aacd32954..bee8cb6108a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
-static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
/* Restore the CTL value if it lost, e.g. GPU reset */
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
- if (dev_priv->saveBLC_PWM_CTL2 == 0) {
- dev_priv->saveBLC_PWM_CTL2 = val;
+ if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
- I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL2;
+ val = dev_priv->regfile.saveBLC_PWM_CTL2;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val);
}
} else {
val = I915_READ(BLC_PWM_CTL);
- if (dev_priv->saveBLC_PWM_CTL == 0) {
- dev_priv->saveBLC_PWM_CTL = val;
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL = val;
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
- I915_WRITE(BLC_PWM_CTL,
- dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL;
+ val = dev_priv->regfile.saveBLC_PWM_CTL;
+ I915_WRITE(BLC_PWM_CTL, val);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->regfile.saveBLC_PWM_CTL2);
}
}
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
- max = i915_read_blc_pwm_ctl(dev_priv);
+ max = i915_read_blc_pwm_ctl(dev);
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
@@ -275,7 +276,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
-#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
-
- if (i915_panel_ignore_lid)
- return i915_panel_ignore_lid > 0 ?
- connector_status_connected :
- connector_status_disconnected;
- /* opregion lid state on HP 2540p is wrong at boot up,
- * appears to be either the BIOS or Linux ACPI fault */
-#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
- if (dev_priv->opregion.lid_state)
+ if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
-#endif
+ }
- return connector_status_unknown;
+ switch (i915_panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
}
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -416,21 +414,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
.get_brightness = intel_panel_get_brightness,
};
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
- struct drm_connector *connector;
intel_panel_init_backlight(dev);
- if (dev_priv->int_lvds_connector)
- connector = dev_priv->int_lvds_connector;
- else if (dev_priv->int_edp_connector)
- connector = dev_priv->int_edp_connector;
- else
- return -ENODEV;
-
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
backlight_device_unregister(dev_priv->backlight);
}
#else
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
{
- intel_panel_init_backlight(dev);
+ intel_panel_init_backlight(connector->dev);
return 0;
}
@@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
return;
}
#endif
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode)
+{
+ panel->fixed_mode = fixed_mode;
+
+ return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+ struct intel_connector *intel_connector =
+ container_of(panel, struct intel_connector, panel);
+
+ if (panel->fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 442968f8b20..e6f54ffab3b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -405,7 +405,7 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled &&
+ if (to_intel_crtc(tmp_crtc)->active &&
!to_intel_crtc(tmp_crtc)->primary_disabled &&
tmp_crtc->fb) {
if (crtc) {
@@ -992,7 +992,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
+ if (to_intel_crtc(crtc)->active && crtc->fb) {
if (enabled)
return NULL;
enabled = crtc;
@@ -1086,7 +1086,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@@ -1215,7 +1215,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1286,6 +1286,7 @@ static void valleyview_update_wm(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
vlv_update_drain_latency(dev);
@@ -1302,17 +1303,23 @@ static void valleyview_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1325,10 +1332,11 @@ static void valleyview_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void g4x_update_wm(struct drm_device *dev)
@@ -1351,17 +1359,18 @@ static void g4x_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1374,11 +1383,11 @@ static void g4x_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
@@ -1467,10 +1476,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
+ if (to_intel_crtc(crtc)->active && crtc->fb) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
} else
@@ -1478,10 +1490,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
+ if (to_intel_crtc(crtc)->active && crtc->fb) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
enabled = crtc;
@@ -1571,8 +1586,7 @@ static void i830_update_wm(struct drm_device *dev)
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
+ 4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -1805,8 +1819,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 2;
}
- if ((dev_priv->num_pipe == 3) &&
- g4x_compute_wm0(dev, 2,
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if (g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -1869,12 +1985,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
- /* WM3 */
+ /* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
+ &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+ !ironlake_compute_srwm(dev, 3, enabled,
+ 2 * SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
return;
I915_WRITE(WM3_LP_ILK,
@@ -1923,7 +2044,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
*sprite_wm = display->guard_size;
return false;
}
@@ -2323,7 +2444,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
@@ -2398,12 +2519,12 @@ static void gen6_enable_rps(struct drm_device *dev)
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
- u32 pcu_mbox, rc6_mask = 0;
+ u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
- int i;
+ int i, ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
@@ -2497,30 +2618,16 @@ static void gen6_enable_rps(struct drm_device *dev)
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->rps.max_delay = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+ if (!ret) {
+ pcu_mbox = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2534,6 +2641,20 @@ static void gen6_enable_rps(struct drm_device *dev)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ if (IS_GEN6(dev) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+
gen6_gt_force_wake_put(dev_priv);
}
@@ -2541,10 +2662,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
+ int gpu_freq;
+ unsigned int ia_freq, max_ia_freq;
int scaling_factor = 180;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
max_ia_freq = cpufreq_quick_get_max(0);
/*
@@ -2575,17 +2697,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
+ sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq | gpu_freq);
}
}
@@ -2593,16 +2709,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
+ if (dev_priv->ips.renderctx) {
+ i915_gem_object_unpin(dev_priv->ips.renderctx);
+ drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+ dev_priv->ips.renderctx = NULL;
}
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
+ if (dev_priv->ips.pwrctx) {
+ i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+ dev_priv->ips.pwrctx = NULL;
}
}
@@ -2628,14 +2744,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
+ if (dev_priv->ips.renderctx == NULL)
+ dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.renderctx)
return -ENOMEM;
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
+ if (dev_priv->ips.pwrctx == NULL)
+ dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
@@ -2647,6 +2763,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ bool was_interruptible;
int ret;
/* rc6 disabled by default due to repeated reports of hanging during
@@ -2661,6 +2778,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
if (ret)
return;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
/*
* GPU can automatically power down the render unit if given a page
* to save state.
@@ -2668,12 +2788,13 @@ static void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
+ dev_priv->mm.interruptible = was_interruptible;
return;
}
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -2688,14 +2809,15 @@ static void ironlake_enable_rc6(struct drm_device *dev)
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
+ dev_priv->mm.interruptible = was_interruptible;
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
return;
}
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -3304,37 +3426,72 @@ static void intel_init_emon(struct drm_device *dev)
void intel_disable_gt_powersave(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+ mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
}
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ rps.delayed_resume_work.work);
+ struct drm_device *dev = dev_priv->dev;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
void intel_enable_gt_powersave(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
- gen6_enable_rps(dev);
- gen6_update_ring_freq(dev);
+ /*
+ * PCU communication is slow and this doesn't need to be
+ * done at any specific time, so do this out of our fast path
+ * to make resume and init faster.
+ */
+ schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ));
}
}
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3342,8 +3499,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
@@ -3354,9 +3509,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
+ dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
@@ -3378,33 +3531,70 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
}
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* The below fixes the weird display corruption, a few pixels shifted
+ * downward, on (only) LVDS of some HP laptops with IVY.
+ */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+ /* WADP0ClockGatingDisable */
+ for_each_pipe(pipe) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+ }
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
@@ -3454,11 +3644,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3473,6 +3664,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+ cpt_init_clock_gating(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3487,13 +3680,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * TODO: this bit should only be enabled when really needed, then
+ * disabled when not needed anymore in order to save power.
+ */
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ I915_WRITE(SOUTH_DSPCLK_GATE_D,
+ I915_READ(SOUTH_DSPCLK_GATE_D) |
+ PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@@ -3504,12 +3708,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
- I915_WRITE(IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3538,6 +3736,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
@@ -3547,27 +3749,38 @@ static void haswell_init_clock_gating(struct drm_device *dev)
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
+ lpt_init_clock_gating(dev);
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t snpcr;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ /* WaDisablePSDDualDispatchEnable */
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ else
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3576,7 +3789,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
+ GEN7_WA_L3_CHICKEN_MODE);
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ else
+ I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
@@ -3607,6 +3831,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3620,39 +3845,59 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ cpt_init_clock_gating(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* WaDisableDopClockGating */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3704,6 +3949,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
PLANEA_FLIPDONE_INT_EN);
+
+ /*
+ * WaDisableVLVClockGating_VBIIssue
+ * Disable clock gating on th GCFG unit to prevent a delay
+ * in the reporting of vblank events.
+ */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3722,6 +3974,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
}
static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3777,44 +4033,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
}
/* Starting with Haswell, we have different power wells for
@@ -3840,7 +4063,7 @@ void intel_init_power_wells(struct drm_device *dev)
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
@@ -3878,11 +4101,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
@@ -3905,7 +4123,7 @@ void intel_init_pm(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
@@ -3993,6 +4211,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
DRM_ERROR("GT thread status wait timed out\n");
}
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4006,7 +4230,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4016,6 +4240,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4029,7 +4259,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4073,7 +4303,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4111,13 +4341,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+}
+
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4128,49 +4363,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
+void intel_gt_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->gt_lock);
+ intel_gt_reset(dev);
+
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
+}
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->gt.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ return -EAGAIN;
}
+
+ I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ *val = I915_READ(GEN6_PCODE_DATA);
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
}
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ecbc5c5dbbb..ae253e04c39 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -45,7 +45,7 @@ struct pipe_control {
static inline int ring_space(struct intel_ring_buffer *ring)
{
- int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
return space;
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
/*
* TLB invalidate requires a post-sync write.
*/
- flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
ret = intel_ring_begin(ring, 4);
@@ -547,23 +547,24 @@ static int init_render_ring(struct intel_ring_buffer *ring)
static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
+
if (!ring->private)
return;
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
cleanup_pipe_control(ring);
}
static void
update_mboxes(struct intel_ring_buffer *ring,
- u32 seqno,
- u32 mmio_offset)
+ u32 mmio_offset)
{
- intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_REGISTER |
- MI_SEMAPHORE_UPDATE);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
}
/**
@@ -576,8 +577,7 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_ring_buffer *ring,
- u32 *seqno)
+gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
@@ -590,13 +590,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
- *seqno = i915_gem_next_request_seqno(ring);
-
- update_mboxes(ring, *seqno, mbox1_reg);
- update_mboxes(ring, *seqno, mbox2_reg);
+ update_mboxes(ring, mbox1_reg);
+ update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, *seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
@@ -653,10 +651,8 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+pc_render_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -677,7 +673,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
@@ -696,11 +692,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
@@ -888,25 +883,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-i9xx_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+i9xx_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno;
int ret;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- seqno = i915_gem_next_request_seqno(ring);
-
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
@@ -964,7 +954,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags)
{
int ret;
@@ -975,35 +967,71 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- MI_BATCH_NON_SECURE_I965);
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
return 0;
}
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -1012,7 +1040,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@@ -1075,6 +1103,29 @@ err:
return ret;
}
+static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
@@ -1086,6 +1137,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = 32 * PAGE_SIZE;
+ memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
init_waitqueue_head(&ring->irq_queue);
@@ -1093,6 +1145,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = init_status_page(ring);
if (ret)
return ret;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
}
obj = i915_gem_alloc_object(dev, ring->size);
@@ -1157,7 +1214,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@@ -1176,28 +1233,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
cleanup_status_page(ring);
}
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
-{
- uint32_t __iomem *virt;
- int rem = ring->size - ring->tail;
-
- if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(ring, rem);
- if (ret)
- return ret;
- }
-
- virt = ring->virtual_start + ring->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ring->tail = 0;
- ring->space = ring_space(ring);
-
- return 0;
-}
-
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
@@ -1231,7 +1266,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (request->tail == -1)
continue;
- space = request->tail - (ring->tail + 8);
+ space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
@@ -1266,7 +1301,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
return 0;
}
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,6 +1344,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return -EBUSY;
}
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+ uint32_t __iomem *virt;
+ int rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = ring_wait_for_space(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = ring->virtual_start + ring->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
+
+ ring->tail = 0;
+ ring->space = ring_space(ring);
+
+ return 0;
+}
+
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ int ret;
+
+ /* We need to add any requests required to flush the objects and ring */
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait upon the last request to be completed */
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
@@ -1320,6 +1409,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
@@ -1327,7 +1421,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
}
if (unlikely(ring->space < n)) {
- ret = intel_wait_ring_buffer(ring, n);
+ ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
}
@@ -1391,10 +1485,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1402,8 +1503,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -1411,7 +1534,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1432,10 +1557,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB;
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1490,7 +1622,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1501,10 +1635,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
- if (!I915_NEED_GFX_HWS(dev)) {
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
}
return intel_init_ring_buffer(dev, ring);
@@ -1514,6 +1663,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1551,16 +1701,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
- if (!I915_NEED_GFX_HWS(dev))
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
ring->effective_size = ring->size;
- if (IS_I830(ring->dev))
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ring->virtual_start = ioremap_wc(start, size);
@@ -1570,6 +1717,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
return -ENOMEM;
}
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1618,7 +1771,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->init = init_ring_common;
-
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ea7a311a1f..6af87cd0572 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,17 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
struct intel_hw_status_page {
u32 *page_addr;
unsigned int gfx_addr;
@@ -70,8 +81,7 @@ struct intel_ring_buffer {
int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_ring_buffer *ring,
- u32 *seqno);
+ int (*add_request)(struct intel_ring_buffer *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -81,7 +91,10 @@ struct intel_ring_buffer {
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
- u32 offset, u32 length);
+ u32 offset, u32 length,
+ unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
@@ -181,27 +194,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
+#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
-int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
-static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
-{
- return intel_wait_ring_buffer(ring, ring->size - 8);
-}
-
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-
void intel_ring_advance(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
@@ -217,6 +224,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
return ring->tail;
}
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ BUG_ON(ring->outstanding_lazy_request == 0);
+ return ring->outstanding_lazy_request;
+}
+
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a6ac0b41696..c275bf0fa36 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -509,7 +509,7 @@ out:
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
- u8 retry = 5;
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i;
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
* command to be complete.
*
* Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15µs.
*/
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && retry--) {
- udelay(15);
+ while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ if (retry < 10)
+ msleep(15);
+ else
+ udelay(15);
+
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
@@ -1228,6 +1241,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
+ /* HW workaround for IBX, we need to move the port to
+ * transcoder A before disabling it. */
+ if (HAS_PCH_IBX(encoder->base.dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(encoder->base.dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
@@ -1244,8 +1281,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u8 status;
temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0)
+ if ((temp & SDVO_ENABLE) == 0) {
+ /* HW workaround for IBX, we need to move the port
+ * to transcoder A before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ temp |= SDVO_PIPE_B_SELECT;
+ }
+
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ }
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1499,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
- if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
- return connector_status_unknown;
-
- /* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
- msleep(30);
-
- if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1796,7 +1839,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(intel_sdvo_connector);
}
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1828,7 +1871,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -1883,7 +1926,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1895,7 +1938,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1907,7 +1950,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1919,7 +1962,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2072,17 +2115,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else
mapping = &dev_priv->sdvo_mappings[1];
- pin = GMBUS_PORT_DPB;
- if (mapping->initialized)
+ if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PORT_DPB;
- if (intel_gmbus_is_port_valid(pin)) {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
- intel_gmbus_force_bit(sdvo->i2c, true);
- } else {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- }
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+ /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now. */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+ intel_gmbus_force_bit(sdvo->i2c, false);
}
static bool
@@ -2427,7 +2477,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&intel_sdvo_connector->base.base,
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
@@ -2443,7 +2493,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
- drm_connector_attach_property(connector, \
+ drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2480,7 +2530,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->left)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
@@ -2489,7 +2539,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->right)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2517,7 +2567,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->top)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
@@ -2527,7 +2577,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->bottom)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2559,7 +2609,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->dot_crawl)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2663,10 +2713,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
- if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
- kfree(intel_sdvo);
- return false;
- }
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ goto err_i2c_bus;
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
@@ -2765,6 +2813,8 @@ err_output:
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
kfree(intel_sdvo);
return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 82f5e5c7009..827dcd4edf1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
- int pixel_size;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
sprctl = I915_READ(SPRCTL(pipe));
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
- pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -127,18 +119,28 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ sprsurf_offset =
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev))
+ I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(SPRLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(SPRLINOFF(pipe), offset);
- }
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPRSCALE(pipe), sprscale);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -152,7 +154,8 @@ ivb_disable_plane(struct drm_plane *plane)
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
- I915_WRITE(SPRSCALE(pipe), 0);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
@@ -225,8 +228,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe, pixel_size;
+ int pipe = intel_plane->pipe;
+ unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -239,33 +244,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
- pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -289,18 +285,23 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ dvssurf_offset =
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= dvssurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(DVSLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(DVSLINOFF(pipe), offset);
- }
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -422,6 +423,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -436,7 +439,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_h = src_h >> 16;
/* Pipe must be running... */
- if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -446,6 +449,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
+ /* Sprite planes can be linear or x-tiled surfaces */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ case I915_TILING_X:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
@@ -473,6 +485,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
goto out;
/*
+ * We may not have a scaler, eg. HSW does not have it any more
+ */
+ if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+ return -EINVAL;
+
+ /*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
*/
@@ -665,6 +683,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
@@ -681,6 +700,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break;
case 7:
+ if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+ intel_plane->can_scale = false;
+ else
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 62bb048c135..ea93520c127 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
}
j = 0;
@@ -1292,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
}
intel_tv->tv_format = tv_mode->name;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1446,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
int ret = 0;
bool changed = false;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret < 0)
goto out;
@@ -1658,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
ARRAY_SIZE(tv_modes),
tv_format_names);
- drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
initial_mode);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
intel_tv->margin[TV_MARGIN_LEFT]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
intel_tv->margin[TV_MARGIN_TOP]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
intel_tv->margin[TV_MARGIN_RIGHT]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index d6a1aae3370..70dd3c5529d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -133,6 +133,8 @@ static int mga_vram_init(struct mga_device *mdev)
{
void __iomem *mem;
struct apertures_struct *aper = alloc_apertures(1);
+ if (!aper)
+ return -ENOMEM;
/* BAR 0 is VRAM */
mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -140,9 +142,9 @@ static int mga_vram_init(struct mga_device *mdev)
aper->ranges[0].base = mdev->mc.vram_base;
aper->ranges[0].size = mdev->mc.vram_window;
- aper->count = 1;
remove_conflicting_framebuffers(aper, "mgafb", true);
+ kfree(aper);
if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
"mgadrmfb_vram")) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 1504699666c..8fc9d920194 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
static int mgag200_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
ttm_bo_type_device, &mgabo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, mgag200_bo_ttm_destroy);
if (ret)
return ret;
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index a990df4d6c0..ab25752a0b1 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
+nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/conn.o
nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
nouveau-y += core/subdev/bios/dp.o
nouveau-y += core/subdev/bios/extdev.o
nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
nouveau-y += core/subdev/fb/nv50.o
nouveau-y += core/subdev/fb/nvc0.o
nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
nouveau-y += core/engine/dmaobj/nv04.o
nouveau-y += core/engine/dmaobj/nv50.o
nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
nouveau-y += core/engine/copy/nva3.o
nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv50.o
nouveau-y += core/engine/mpeg/nv84.o
nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
nouveau-y += core/engine/software/nv04.o
nouveau-y += core/engine/software/nv10.o
nouveau-y += core/engine/software/nv50.o
nouveau-y += core/engine/software/nvc0.o
nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
# drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
# drm/kms/nv50-
-nouveau-y += nv50_display.o nvd0_display.o
-nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
-nouveau-y += nv50_evo.o
+nouveau-y += nv50_display.o
# drm/pm
nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
index e41b10d5eb5..84c71fad2b6 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
return nouveau_gpuobj_fini(&engctx->base, suspend);
}
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_engctx *engctx;
+ int ret;
+
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+ NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+ *pobject = nv_object(engctx);
+ return ret;
+}
+
void
_nouveau_engctx_dtor(struct nouveau_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 00000000000..6b0843c3387
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_falcon *falcon = (void *)object;
+ const struct firmware *fw;
+ char name[32] = "internal";
+ int ret, i;
+ u32 caps;
+
+ /* enable engine, and determine its capabilities */
+ ret = nouveau_engine_init(&falcon->base);
+ if (ret)
+ return ret;
+
+ if (device->chipset < 0xa3 ||
+ device->chipset == 0xaa || device->chipset == 0xac) {
+ falcon->version = 0;
+ falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
+ } else {
+ caps = nv_ro32(falcon, 0x12c);
+ falcon->version = (caps & 0x0000000f);
+ falcon->secret = (caps & 0x00000030) >> 4;
+ }
+
+ caps = nv_ro32(falcon, 0x108);
+ falcon->code.limit = (caps & 0x000001ff) << 8;
+ falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+ nv_debug(falcon, "falcon version: %d\n", falcon->version);
+ nv_debug(falcon, "secret level: %d\n", falcon->secret);
+ nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+ nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+ /* wait for 'uc halted' to be signalled before continuing */
+ if (falcon->secret) {
+ nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+ nv_wo32(falcon, 0x004, 0x00000010);
+ }
+
+ /* disable all interrupts */
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ /* no default ucode provided by the engine implementation, try and
+ * locate a "self-bootstrapping" firmware image for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret == 0) {
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ falcon->data.data = NULL;
+ falcon->data.size = 0;
+ release_firmware(fw);
+ }
+
+ falcon->external = true;
+ }
+
+ /* next step is to try and load "static code/data segment" firmware
+ * images for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware data\n");
+ return ret;
+ }
+
+ falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->data.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->data.data)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware code\n");
+ return ret;
+ }
+
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->code.data)
+ return -ENOMEM;
+ }
+
+ nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+ "static code/data segments" : "self-bootstrapping");
+
+ /* ensure any "self-bootstrapping" firmware image is in vram */
+ if (!falcon->data.data && !falcon->core) {
+ ret = nouveau_gpuobj_new(object->parent, NULL,
+ falcon->code.size, 256, 0,
+ &falcon->core);
+ if (ret) {
+ nv_error(falcon, "core allocation failed, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < falcon->code.size; i += 4)
+ nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+ }
+
+ /* upload firmware bootloader (or the full code segments) */
+ if (falcon->core) {
+ if (device->card_type < NV_C0)
+ nv_wo32(falcon, 0x618, 0x04000000);
+ else
+ nv_wo32(falcon, 0x618, 0x00000114);
+ nv_wo32(falcon, 0x11c, 0);
+ nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+ nv_wo32(falcon, 0x114, 0);
+ nv_wo32(falcon, 0x118, 0x00006610);
+ } else {
+ if (falcon->code.size > falcon->code.limit ||
+ falcon->data.size > falcon->data.limit) {
+ nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+ return -EINVAL;
+ }
+
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00100000);
+ for (i = 0; i < falcon->code.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+ } else {
+ nv_wo32(falcon, 0x180, 0x01000000);
+ for (i = 0; i < falcon->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wo32(falcon, 0x188, i >> 6);
+ nv_wo32(falcon, 0x184, falcon->code.data[i]);
+ }
+ }
+ }
+
+ /* upload data segment (if necessary), zeroing the remainder */
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+ for (; i < falcon->data.limit; i += 4)
+ nv_wo32(falcon, 0xff4, 0x00000000);
+ } else {
+ nv_wo32(falcon, 0x1c0, 0x01000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+ for (; i < falcon->data.limit / 4; i++)
+ nv_wo32(falcon, 0x1c4, 0x00000000);
+ }
+
+ /* start it running */
+ nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+ nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+ nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+ nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+ return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+
+ if (!suspend) {
+ nouveau_gpuobj_ref(NULL, &falcon->core);
+ if (falcon->external) {
+ kfree(falcon->data.data);
+ kfree(falcon->code.data);
+ falcon->code.data = NULL;
+ }
+ }
+
+ nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 addr, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
+{
+ struct nouveau_falcon *falcon;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+ fname, length, pobject);
+ falcon = *pobject;
+ if (ret)
+ return ret;
+
+ falcon->addr = addr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 70586fde69c..560b2214cf1 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
}
u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
}
void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index a6d3cd6490f..0261a11b2ae 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
int
nouveau_mm_fini(struct nouveau_mm *mm)
{
- struct nouveau_mm_node *node, *heap =
- list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
- int nodes = 0;
+ if (nouveau_mm_initialised(mm)) {
+ struct nouveau_mm_node *node, *heap =
+ list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+ int nodes = 0;
+
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (WARN_ON(nodes++ == mm->heap_nodes))
+ return -EBUSY;
+ }
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (WARN_ON(nodes++ == mm->heap_nodes))
- return -EBUSY;
+ kfree(heap);
}
- kfree(heap);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 66f7dfd907e..1d9f614cb97 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/bsp.h>
struct nv84_bsp_priv {
- struct nouveau_bsp base;
-};
-
-struct nv84_bsp_chan {
- struct nouveau_bsp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
* BSP context
******************************************************************************/
-static int
-nv84_bsp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_bsp_chan *priv;
- int ret;
-
- ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_bsp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- nouveau_bsp_context_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_context_init(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- return nouveau_bsp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_bsp_context_ctor,
- .dtor = nv84_bsp_context_dtor,
- .init = nv84_bsp_context_init,
- .fini = nv84_bsp_context_fini,
- .rd32 = _nouveau_bsp_context_rd32,
- .wr32 = _nouveau_bsp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
* BSP engine/subdev functions
******************************************************************************/
-static void
-nv84_bsp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_bsp_priv *priv;
int ret;
- ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
- nv_subdev(priv)->intr = nv84_bsp_intr;
nv_engine(priv)->cclass = &nv84_bsp_cclass;
nv_engine(priv)->sclass = nv84_bsp_sclass;
return 0;
}
-static void
-nv84_bsp_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- nouveau_bsp_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_init(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- return nouveau_bsp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_bsp_ctor,
- .dtor = nv84_bsp_dtor,
- .init = nv84_bsp_init,
- .fini = nv84_bsp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 00000000000..0a5aa6bb087
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+ { 0x90b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+ struct nvc0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+ nv_engine(priv)->sclass = nvc0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 00000000000..d4f23bbd75b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+ { 0x95b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+ struct nve0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nve0_bsp_cclass;
+ nv_engine(priv)->sclass = nve0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 4df6da0af74..283248c7b05 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
@@ -36,11 +35,7 @@
#include "fuc/nva3.fuc.h"
struct nva3_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nva3_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nva3_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nva3_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nva3_copy_cclass = {
.handle = NV_ENGCTX(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
{}
};
-static void
+void
nva3_copy_intr(struct nouveau_subdev *subdev)
{
struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_falcon *falcon = (void *)subdev;
struct nouveau_object *engctx;
- struct nva3_copy_priv *priv = (void *)subdev;
- u32 dispatch = nv_rd32(priv, 0x10401c);
- u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
- u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
- u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040) >> 16;
+ u32 dispatch = nv_ro32(falcon, 0x01c);
+ u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+ u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+ u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+ u32 addr = nv_ro32(falcon, 0x040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044);
+ u32 data = nv_ro32(falcon, 0x044);
int chid;
engctx = nouveau_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
+ nv_error(falcon, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004, 0x00000040);
+ nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004, stat);
+ nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+ nv_wo32(falcon, 0x004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_copy_priv *priv;
int ret;
- ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nva3_copy_cclass;
nv_engine(priv)->sclass = nva3_copy_sclass;
nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+ nv_falcon(priv)->code.data = nva3_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+ nv_falcon(priv)->data.data = nva3_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
return 0;
}
-static int
-nva3_copy_init(struct nouveau_object *object)
-{
- struct nva3_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188, i >> 6);
- nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x10410c, 0x00000000);
- nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nva3_copy_fini(struct nouveau_object *object, bool suspend)
-{
- struct nva3_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nva3_copy_oclass = {
.handle = NV_ENGINE(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_copy_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = nva3_copy_init,
- .fini = nva3_copy_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 06d4a879105..b3ed2737e21 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <engine/fifo.h>
#include <engine/copy.h>
@@ -33,11 +32,7 @@
#include "fuc/nvc0.fuc.h"
struct nvc0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nvc0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nvc0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nvc0_copy_context_ofuncs = {
- .ctor = nvc0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
};
static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
* PCOPY engine/subdev functions
******************************************************************************/
-static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
- { 0x0001, "ILLEGAL_MTHD" },
- { 0x0002, "INVALID_ENUM" },
- { 0x0003, "INVALID_BITFIELD" },
- {}
-};
-
-static void
-nvc0_copy_intr(struct nouveau_subdev *subdev)
+static int
+nvc0_copy_init(struct nouveau_object *object)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)subdev;
- u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
- u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
- u32 stat = intr & disp & ~(disp >> 16);
- u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
- u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
- u32 mthd = (addr & 0x07ff) << 2;
- u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
- int chid;
-
- engctx = nouveau_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
- if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
- nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
- stat &= ~0x00000040;
- }
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret;
- if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
- }
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
- nouveau_engctx_put(engctx);
+ nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+ return 0;
}
static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy0_cclass;
nv_engine(priv)->sclass = nvc0_copy0_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy1_cclass;
nv_engine(priv)->sclass = nvc0_copy1_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
- nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
- nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
- nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nvc0_copy_fini(struct nouveau_object *object, bool suspend)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nvc0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 2017c1579ac..dbbe9e8998f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,11 +30,7 @@
#include <engine/copy.h>
struct nve0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nve0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nve0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nve0_copy_context_ofuncs = {
- .ctor = nve0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
};
static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 1d85e5b66ca..b9749051272 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,11 +34,7 @@
#include <engine/crypt.h>
struct nv84_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv84_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv84_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv84_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x102130, stat);
nv_wr32(priv, 0x10200c, 0x10);
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
struct nv84_crypt_priv *priv = (void *)object;
int ret;
- ret = nouveau_crypt_init(&priv->base);
+ ret = nouveau_engine_init(&priv->base);
if (ret)
return ret;
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
+ .dtor = _nouveau_engine_dtor,
.init = nv84_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 9e3876c89b9..21986f3bf0c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,6 +26,7 @@
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
+#include <core/falcon.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
@@ -36,11 +37,7 @@
#include "fuc/nv98.fuc.h"
struct nv98_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv98_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv98_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv98_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x087004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nv98_crypt_cclass;
nv_engine(priv)->sclass = nv98_crypt_sclass;
nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
- return 0;
-}
-
-static int
-nv98_crypt_init(struct nouveau_object *object)
-{
- struct nv98_crypt_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_crypt_init(&priv->base);
- if (ret)
- return ret;
-
- /* wait for exit interrupt to signal */
- nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
- nv_wr32(priv, 0x087004, 0x00000010);
-
- /* upload microcode code and data segments */
- nv_wr32(priv, 0x087ff8, 0x00100000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
-
- nv_wr32(priv, 0x087ff8, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
-
- /* start it running */
- nv_wr32(priv, 0x08710c, 0x00000000);
- nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+ nv_falcon(priv)->code.data = nv98_pcrypt_code;
+ nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+ nv_falcon(priv)->data.data = nv98_pcrypt_data;
+ nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
return 0;
}
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
- .init = nv98_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 00000000000..d0817d94454
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+ (data & NV50_DISP_DAC_PWR_VSYNC) |
+ (data & NV50_DISP_DAC_PWR_DATA) |
+ (data & NV50_DISP_DAC_PWR_STATE);
+ const u32 doff = (or * 0x800);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+ const u32 doff = (or * 0x800);
+ int load = -EINVAL;
+ nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+ udelay(9500);
+ nv_wr32(priv, 0x61a00c + doff, 0x80000000);
+ load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
+ nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ return load;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_DAC_PWR:
+ ret = priv->dac.power(priv, or, data[0]);
+ break;
+ case NV50_DISP_DAC_LOAD:
+ ret = priv->dac.sense(priv, or, data[0]);
+ if (ret >= 0) {
+ data[0] = ret;
+ ret = 0;
+ }
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 00000000000..373dbcc523b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x800);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 00000000000..dc57e24fc1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x030);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 00000000000..0d36bdc5141
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+ nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+ nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+ nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 00000000000..f065fc248ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 soff = (or * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+ nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+ nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+ nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+ nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 00000000000..5151bb26183
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+ nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+ nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 15b182c84ce..0f09af13541 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,20 +22,740 @@
* Authors: Ben Skeggs
*/
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
#include <engine/software.h>
#include <engine/disp.h>
-struct nv50_disp_priv {
- struct nouveau_disp base;
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_base *base = (void *)parent;
+ struct nv50_disp_chan *chan;
+ int ret;
+
+ if (base->chan & (1 << chid))
+ return -EBUSY;
+ base->chan |= (1 << chid);
+
+ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+ (1ULL << NVDEV_ENGINE_DMAOBJ),
+ length, pobject);
+ chan = *pobject;
+ if (ret)
+ return ret;
+
+ chan->chid = chid;
+ return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+ base->chan &= ~(1 << chan->chid);
+ nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 chid = chan->chid;
+ u32 data = (chid << 28) | (addr << 10) | chid;
+ return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+ dmac = *pobject;
+ if (ret)
+ return ret;
+
+ dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ if (!dmac->pushdma)
+ return -ENOENT;
+
+ switch (nv_mclass(dmac->pushdma)) {
+ case 0x0002:
+ case 0x003d:
+ if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+ return -EINVAL;
+
+ switch (dmac->pushdma->target) {
+ case NV_MEM_TARGET_VRAM:
+ dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_dmac *dmac = (void *)object;
+ nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+ nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+ /* attempt to unstick channel from some unknown state */
+ if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+ nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+ if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+ nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204, mast->push);
+ nv_wr32(priv, 0x610208, 0x00010000);
+ nv_wr32(priv, 0x61020c, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610200, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+ .ctor = nv50_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_mast_init,
+ .fini = nv50_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+ .ctor = nv50_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 3 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+ .ctor = nv50_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+ nv_error(pioc, "timeout0: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "timeout1: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+ .ctor = nv50_disp_oimm_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+ .ctor = nv50_disp_curs_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar. NFI what the 0x614004 caps are for..
+ */
+ tmp = nv_rd32(priv, 0x614004);
+ nv_wr32(priv, 0x610184, tmp);
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+ nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+ }
+
+ /* ... EXT caps */
+ for (i = 0; i < 3; i++) {
+ tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+ nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x610024) & 0x00000100) {
+ nv_wr32(priv, 0x610024, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x61002c, 0x00000370);
+ nv_wr32(priv, 0x610028, 0x00000000);
+ return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x610024, 0x00000000);
+ nv_wr32(priv, 0x610020, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+ .ctor = nv50_disp_base_ctor,
+ .dtor = nv50_disp_base_dtor,
+ .init = nv50_disp_base_init,
+ .fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+ { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- {},
+ { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nouveau_engctx *ectx;
+ int ret = -EBUSY;
+
+ /* no context needed for channel objects... */
+ if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+ atomic_inc(&parent->refcount);
+ *pobject = parent;
+ return 0;
+ }
+
+ /* allocate display hardware to client */
+ mutex_lock(&nv_subdev(priv)->mutex);
+ if (list_empty(&nv_engine(priv)->contexts)) {
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+ 0x10000, 0x10000,
+ NVOBJ_FLAG_HEAP, &ectx);
+ *pobject = nv_object(ectx);
+ }
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+ .handle = NV_ENGCTX(DISP, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_data_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+ },
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+ u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+ u32 addr, data;
+ int chid;
+
+ for (chid = 0; chid < 5; chid++) {
+ if (!(channels & (1 << chid)))
+ continue;
+
+ nv_wr32(priv, 0x610020, 0x00010000 << chid);
+ addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+ data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+ nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+ chid, addr & 0xffc, data, addr);
+ }
+}
+
static void
nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
@@ -80,30 +800,422 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+ i += 3;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+ i += 3;
+ }
+
+ if (!(ctrl & (1 << head)))
+ return false;
+
+ data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+ struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+ i += 3;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+ i += 3;
+ }
+
+ if (!(ctrl & (1 << head)))
+ return 0x0000;
+
+ data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+ if (!data)
+ return 0x0000;
+
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = outp,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000060) >> 5) - 1;
+ if (head >= 0) {
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 1);
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000010);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
+{
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+ const u32 symbol = 100000;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+ u32 clksor = nv_rd32(priv, 0x614300 + soff);
+ int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+ int TU, VTUi, VTUf, VTUa;
+ u64 link_data_rate, link_ratio, unk;
+ u32 best_diff = 64 * symbol;
+ u32 link_nr, link_bw, bits, r;
+
+ /* calculate packed data rate for each lane */
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ if (clksor & 0x000c0000)
+ link_bw = 270000;
+ else
+ link_bw = 162000;
+
+ if ((ctrl & 0xf0000) == 0x60000) bits = 30;
+ else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+ else bits = 18;
+
+ link_data_rate = (pclk * bits / 8) / link_nr;
+
+ /* calculate ratio of packed data rate to link symbol rate */
+ link_ratio = link_data_rate * symbol;
+ r = do_div(link_ratio, link_bw);
+
+ for (TU = 64; TU >= 32; TU--) {
+ /* calculate average number of valid symbols in each TU */
+ u32 tu_valid = link_ratio * TU;
+ u32 calc, diff;
+
+ /* find a hw representation for the fraction.. */
+ VTUi = tu_valid / symbol;
+ calc = VTUi * symbol;
+ diff = tu_valid - calc;
+ if (diff) {
+ if (diff >= (symbol / 2)) {
+ VTUf = symbol / (symbol - diff);
+ if (symbol - (VTUf * diff))
+ VTUf++;
+
+ if (VTUf <= 15) {
+ VTUa = 1;
+ calc += symbol - (symbol / VTUf);
+ } else {
+ VTUa = 0;
+ VTUf = 1;
+ calc += symbol;
+ }
+ } else {
+ VTUa = 0;
+ VTUf = min((int)(symbol / diff), 15);
+ calc += symbol / VTUf;
+ }
+
+ diff = calc - tu_valid;
+ } else {
+ /* no remainder, but the hw doesn't like the fractional
+ * part to be zero. decrement the integer part and
+ * have the fraction add a whole symbol back
+ */
+ VTUa = 0;
+ VTUf = 1;
+ VTUi--;
+ }
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ bestTU = TU;
+ bestVTUa = VTUa;
+ bestVTUf = VTUf;
+ bestVTUi = VTUi;
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (!bestTU) {
+ nv_error(priv, "unable to find suitable dp config\n");
+ return;
+ }
+
+ /* XXX close to vbios numbers, but not right */
+ unk = (symbol - link_ratio) * bestTU;
+ unk *= link_ratio;
+ r = do_div(unk, symbol);
+ r = do_div(unk, symbol);
+ unk += 6;
+
+ nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+ nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+ bestVTUf << 16 |
+ bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+{
+ struct dcb_output outp;
+ u32 addr, mask, data;
+ int head;
+
+ /* finish detaching encoder? */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 2);
+
+ /* check whether a vpll change is required */
+ head = ffs((super & 0x00000600) >> 9) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
+ }
+
+ /* (re)attach the relevant OR to the head */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
+ if (conf) {
+ if (outp.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0xffffffff;
+ data = 0x00000000;
+ } else {
+ if (outp.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_dp(priv, &outp, pclk);
+ addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0x00000707;
+ data = (conf & 0x0100) ? 0x0101 : 0x0000;
+ }
+
+ nv_mask(priv, addr, mask, data);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000020);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u16 mask = (outp->sorconf.link << 6) | outp->or;
+ u8 ver, hdr;
+
+ if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+ nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
static void
+nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
+ if (outp.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_tmds(priv, &outp);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000040);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+{
+ u32 super = nv_rd32(priv, 0x610030);
+
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+
+ if (intr1 & 0x00000010)
+ nv50_disp_intr_unk10(priv, super);
+ if (intr1 & 0x00000020)
+ nv50_disp_intr_unk20(priv, super);
+ if (intr1 & 0x00000040)
+ nv50_disp_intr_unk40(priv, super);
+}
+
+void
nv50_disp_intr(struct nouveau_subdev *subdev)
{
struct nv50_disp_priv *priv = (void *)subdev;
- u32 stat1 = nv_rd32(priv, 0x610024);
+ u32 intr0 = nv_rd32(priv, 0x610020);
+ u32 intr1 = nv_rd32(priv, 0x610024);
- if (stat1 & 0x00000004) {
+ if (intr0 & 0x001f0000) {
+ nv50_disp_intr_error(priv);
+ intr0 &= ~0x001f0000;
+ }
+
+ if (intr1 & 0x00000004) {
nv50_disp_intr_vblank(priv, 0);
nv_wr32(priv, 0x610024, 0x00000004);
- stat1 &= ~0x00000004;
+ intr1 &= ~0x00000004;
}
- if (stat1 & 0x00000008) {
+ if (intr1 & 0x00000008) {
nv50_disp_intr_vblank(priv, 1);
nv_wr32(priv, 0x610024, 0x00000008);
- stat1 &= ~0x00000008;
+ intr1 &= ~0x00000008;
}
+ if (intr1 & 0x00000070) {
+ nv50_disp_intr_super(priv, intr1);
+ intr1 &= ~0x00000070;
+ }
}
static int
nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
@@ -114,8 +1226,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv50_disp_sclass;
+ nv_engine(priv)->sclass = nv50_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv50_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 00000000000..a6bb931450f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,142 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/ramht.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+struct dcb_output;
+
+struct nv50_disp_priv {
+ struct nouveau_disp base;
+ struct nouveau_oclass *sclass;
+ struct {
+ int nr;
+ } head;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+ int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+ } dac;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+ int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+ int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+ int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
+ u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
+ int lane, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ u32 lvdsconf;
+ } sor;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+struct nv50_disp_base {
+ struct nouveau_parent base;
+ struct nouveau_ramht *ramht;
+ u32 chan;
+};
+
+struct nv50_disp_chan {
+ struct nouveau_namedb base;
+ int chid;
+};
+
+int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a) \
+ nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b) \
+ nouveau_namedb_fini(&(a)->base, (b))
+
+int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+ struct nv50_disp_chan base;
+ struct nouveau_dmaobj *pushdma;
+ u32 push;
+};
+
+struct nv50_disp_pioc {
+ struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 00000000000..fc84eacdfbe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+ { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+ { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv84_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv84_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x82),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 00000000000..ba9dfd4669a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+ { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+ { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+ {}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv94_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv94_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x88),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 00000000000..5d63902cded
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+ { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+ { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva0_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x83),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 00000000000..e9192ca389f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+ { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+ { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva3_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva3_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nva3_hda_eld;
+ priv->sor.hdmi = nva3_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x85),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index d93efbcf75b..9e38ebff5fb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -22,22 +22,808 @@
* Authors: Ben Skeggs
*/
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
#include <engine/software.h>
#include <engine/disp.h>
-struct nvd0_disp_priv {
- struct nouveau_disp base;
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+ return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494, mast->push);
+ nv_wr32(priv, 0x610498, 0x00010000);
+ nv_wr32(priv, 0x61049c, 0x00000001);
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+ .ctor = nvd0_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_mast_init,
+ .fini = nvd0_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+ .ctor = nvd0_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 5 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+ .ctor = nvd0_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* activate channel */
+ nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+ .ctor = nvd0_disp_oimm_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+ .ctor = nvd0_disp_curs_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar.
+ */
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+ nv_wr32(priv, 0x6100ac, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x610090, 0x00000000);
+ nv_wr32(priv, 0x6100a0, 0x00000000);
+ nv_wr32(priv, 0x6100b0, 0x00000307);
+
+ return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x6100b0, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+ .ctor = nvd0_disp_base_ctor,
+ .dtor = nvd0_disp_base_dtor,
+ .init = nvd0_disp_base_init,
+ .fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+ { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- {},
+ { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ dcb->sorconf.link = mask;
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
+ u32 ctrl, int id, u32 pclk)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
+ if (data == 0x0000)
+ return false;
+
+ switch (dcb.type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 1);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
static void
-nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+{
+ const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
+ const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+ const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+ const u32 hoff = (head * 0x800);
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 symbol = 100000;
+ const u32 TU = 64;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+ u32 clksor = nv_rd32(priv, 0x612300 + soff);
+ u32 datarate, link_nr, link_bw, bits;
+ u64 ratio, value;
+
+ if ((conf & 0x3c0) == 0x180) bits = 30;
+ else if ((conf & 0x3c0) == 0x140) bits = 24;
+ else bits = 18;
+ datarate = (pclk * bits) / 8;
+
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ link_bw = (clksor & 0x007c0000) >> 18;
+ link_bw *= 27000;
+
+ ratio = datarate;
+ ratio *= symbol;
+ do_div(ratio, link_nr * link_bw);
+
+ value = (symbol - ratio) * TU;
+ value *= ratio;
+ do_div(value, symbol);
+ do_div(value, symbol);
+
+ value += 5;
+ value |= 0x08000000;
+
+ nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ u32 pclk;
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 2);
+ }
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
+ if (pclk && (mask & 0x00010000)) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
+ if (mcp & (1 << head)) {
+ if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
+ u32 addr, mask, data = 0x00000000;
+ if (i < 4) {
+ addr = 0x612280 + ((i - 0) * 0x800);
+ mask = 0xffffffff;
+ } else {
+ switch (mcp & 0x00000f00) {
+ case 0x00000800:
+ case 0x00000900:
+ nvd0_display_unk2_calc_tu(priv, head, i - 4);
+ break;
+ default:
+ break;
+ }
+
+ addr = 0x612300 + ((i - 4) * 0x800);
+ mask = 0x00000707;
+ if (cfg & 0x00000100)
+ data = 0x00000101;
+ }
+ nv_mask(priv, addr, mask, data);
+ }
+ break;
+ }
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int pclk, i;
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
+ if (mcp & (1 << head))
+ exec_clkcmp(priv, head, i, mcp, 1, pclk);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
-static void
+void
nvd0_disp_intr(struct nouveau_subdev *subdev)
{
- struct nvd0_disp_priv *priv = (void *)subdev;
+ struct nv50_disp_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x610088);
int i;
- for (i = 0; i < 4; i++) {
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x61008c);
+ nv_wr32(priv, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
+ if (intr & 0x00000002) {
+ u32 stat = nv_rd32(priv, 0x61009c);
+ int chid = ffs(stat) - 1;
+ if (chid >= 0) {
+ u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+ u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+ u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+ "0x%08x 0x%08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+ nv_wr32(priv, 0x61009c, (1 << chid));
+ nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+ }
+
+ intr &= ~0x00000002;
+ }
+
+ if (intr & 0x00100000) {
+ u32 stat = nv_rd32(priv, 0x6100ac);
+ u32 mask = 0, crtc = ~0;
+
+ while (!mask && ++crtc < priv->head.nr)
+ mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
+
+ if (stat & 0x00000001) {
+ nv_wr32(priv, 0x6100ac, 0x00000001);
+ nvd0_display_unk1_handler(priv, crtc, mask);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ nv_wr32(priv, 0x6100ac, 0x00000002);
+ nvd0_display_unk2_handler(priv, crtc, mask);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000004) {
+ nv_wr32(priv, 0x6100ac, 0x00000004);
+ nvd0_display_unk4_handler(priv, crtc, mask);
+ stat &= ~0x00000004;
+ }
+
+ if (stat) {
+ nv_info(priv, "unknown intr24 0x%08x\n", stat);
+ nv_wr32(priv, 0x6100ac, stat);
+ }
+
+ intr &= ~0x00100000;
+ }
+
+ for (i = 0; i < priv->head.nr; i++) {
u32 mask = 0x01000000 << i;
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
static int
nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nvd0_disp_priv *priv;
+ struct nv50_disp_priv *priv;
int ret;
ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvd0_disp_sclass;
+ nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nvd0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass
nvd0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0xd0),
+ .handle = NV_ENGINE(DISP, 0x90),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvd0_disp_ctor,
.dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 00000000000..259537c4587
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+ { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+ { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nve0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nve0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x91),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 00000000000..39b6b67732d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+ const u32 soff = (or * 0x800);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
+ const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+ const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
+ const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
+ const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+ struct dcb_output outp;
+ u8 ver, hdr;
+ u32 data;
+ int ret = -EINVAL;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+ data = *(u32 *)args;
+
+ if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
+ return -ENODEV;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_SOR_PWR:
+ ret = priv->sor.power(priv, or, data);
+ break;
+ case NVA3_DISP_SOR_HDA_ELD:
+ ret = priv->sor.hda_eld(priv, or, args, size);
+ break;
+ case NV84_DISP_SOR_HDMI_PWR:
+ ret = priv->sor.hdmi(priv, head, or, data);
+ break;
+ case NV50_DISP_SOR_LVDS_SCRIPT:
+ priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+ ret = 0;
+ break;
+ case NV94_DISP_SOR_DP_TRAIN:
+ switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
+ case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
+ ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
+ ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
+ ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
+ break;
+ default:
+ break;
+ }
+ break;
+ case NV94_DISP_SOR_DP_LNKCTL:
+ ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_DRVCTL(0):
+ case NV94_DISP_SOR_DP_DRVCTL(1):
+ case NV94_DISP_SOR_DP_DRVCTL(2):
+ case NV94_DISP_SOR_DP_DRVCTL(3):
+ ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
+ type, mask, data, &outp);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 00000000000..f6edd009762
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv94[] = { 16, 8, 0, 24 };
+ if (nv_device(priv)->chipset == 0xaf)
+ return nvaf[lane];
+ return nv94[lane];
+}
+
+int
+nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
+ init.offset = info.script[2];
+ else
+ init.offset = info.script[3];
+ nvbios_exec(&init);
+
+ init.offset = info.script[0];
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[1],
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+ return 0;
+}
+
+int
+nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ /* -> 10Khz units */
+ link_bw *= 2700;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (link_bw < nv_ro16(bios, info.lnkcmp))
+ info.lnkcmp += 4;
+ init.offset = nv_ro16(bios, info.lnkcmp + 2);
+
+ nvbios_exec(&init);
+ }
+
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+ if (link_bw > 16200)
+ clksor |= 0x00040000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 00000000000..c37ce7e29f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvd0[] = { 16, 8, 0, 24 };
+ return nvd0[lane];
+}
+
+int
+nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+ return 0;
+}
+
+int
+nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (nv_ro08(bios, info.lnkcmp) < link_bw)
+ info.lnkcmp += 3;
+ init.offset = nv_ro16(bios, info.lnkcmp + 1);
+
+ nvbios_exec(&init);
+ }
+
+ clksor |= link_bw << 18;
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index e1f013d3976..5103e88d187 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -28,37 +28,39 @@
#include <subdev/fb.h>
#include <engine/dmaobj.h>
-int
-nouveau_dmaobj_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- void *data, u32 size, int len, void **pobject)
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nouveau_dmaobj *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
struct nv_dma_class *args = data;
- struct nouveau_dmaobj *object;
int ret;
if (size < sizeof(*args))
return -EINVAL;
- ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
- object = *pobject;
+ ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+ *pobject = nv_object(dmaobj);
if (ret)
return ret;
switch (args->flags & NV_DMA_TARGET_MASK) {
case NV_DMA_TARGET_VM:
- object->target = NV_MEM_TARGET_VM;
+ dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_TARGET_VRAM:
- object->target = NV_MEM_TARGET_VRAM;
+ dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_TARGET_PCI:
- object->target = NV_MEM_TARGET_PCI;
+ dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_TARGET_PCI_US:
case NV_DMA_TARGET_AGP:
- object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+ dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
switch (args->flags & NV_DMA_ACCESS_MASK) {
case NV_DMA_ACCESS_VM:
- object->access = NV_MEM_ACCESS_VM;
+ dmaobj->access = NV_MEM_ACCESS_VM;
break;
case NV_DMA_ACCESS_RD:
- object->access = NV_MEM_ACCESS_RO;
+ dmaobj->access = NV_MEM_ACCESS_RO;
break;
case NV_DMA_ACCESS_WR:
- object->access = NV_MEM_ACCESS_WO;
+ dmaobj->access = NV_MEM_ACCESS_WO;
break;
case NV_DMA_ACCESS_RDWR:
- object->access = NV_MEM_ACCESS_RW;
+ dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
- object->start = args->start;
- object->limit = args->limit;
- return 0;
+ dmaobj->start = args->start;
+ dmaobj->limit = args->limit;
+ dmaobj->conf0 = args->conf0;
+
+ switch (nv_mclass(parent)) {
+ case NV_DEVICE_CLASS:
+ /* delayed, or no, binding */
+ break;
+ default:
+ ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+ if (ret == 0) {
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ }
+ break;
+ }
+
+ return ret;
}
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+ .ctor = nouveau_dmaobj_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+ { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 9f4cc2f3199..027d8217c0f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv04_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
u32 length = dmaobj->limit - dmaobj->start;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV03_CHANNEL_DMA_CLASS:
+ case NV10_CHANNEL_DMA_CLASS:
+ case NV17_CHANNEL_DMA_CLASS:
+ case NV40_CHANNEL_DMA_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
if (dmaobj->target == NV_MEM_TARGET_VM) {
if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
}
static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv04_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV03_CHANNEL_DMA_CLASS:
- case NV10_CHANNEL_DMA_CLASS:
- case NV17_CHANNEL_DMA_CLASS:
- case NV40_CHANNEL_DMA_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv04_dmaobj_ofuncs = {
- .ctor = nv04_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv04_dmaobj_sclass[] = {
- { 0x0002, &nv04_dmaobj_ofuncs },
- { 0x0003, &nv04_dmaobj_ofuncs },
- { 0x003d, &nv04_dmaobj_ofuncs },
- {}
-};
-
-static int
nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv04_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv04_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 045d2565e28..750183f7c05 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv50_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags = nv_mclass(dmaobj);
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV50_CHANNEL_DMA_CLASS:
+ case NV84_CHANNEL_DMA_CLASS:
+ case NV50_CHANNEL_IND_CLASS:
+ case NV84_CHANNEL_IND_CLASS:
+ case NV50_DISP_MAST_CLASS:
+ case NV84_DISP_MAST_CLASS:
+ case NV94_DISP_MAST_CLASS:
+ case NVA0_DISP_MAST_CLASS:
+ case NVA3_DISP_MAST_CLASS:
+ case NV50_DISP_SYNC_CLASS:
+ case NV84_DISP_SYNC_CLASS:
+ case NV94_DISP_SYNC_CLASS:
+ case NVA0_DISP_SYNC_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NV50_DISP_OVLY_CLASS:
+ case NV84_DISP_OVLY_CLASS:
+ case NV94_DISP_OVLY_CLASS:
+ case NVA0_DISP_OVLY_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
switch (dmaobj->target) {
case NV_MEM_TARGET_VM:
- flags |= 0x00000000;
- flags |= 0x60000000; /* COMPRESSION_USEVM */
- flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+ flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags |= 0x00010000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags |= 0x00020000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags |= 0x00030000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00030000;
break;
default:
return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags |= 0x00040000;
+ flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags |= 0x00080000;
+ flags0 |= 0x00080000;
break;
}
ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags);
+ nv_wo32(*pgpuobj, 0x00, flags0);
nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
upper_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
}
return ret;
}
static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv50_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV50_CHANNEL_DMA_CLASS:
- case NV84_CHANNEL_DMA_CLASS:
- case NV50_CHANNEL_IND_CLASS:
- case NV84_CHANNEL_IND_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv50_dmaobj_ofuncs = {
- .ctor = nv50_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv50_dmaobj_sclass[] = {
- { 0x0002, &nv50_dmaobj_ofuncs },
- { 0x0003, &nv50_dmaobj_ofuncs },
- { 0x003d, &nv50_dmaobj_ofuncs },
- {}
-};
-
-static int
nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv50_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv50_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index 5baa0869553..cd3970d03b8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,7 +22,9 @@
* Authors: Ben Skeggs
*/
+#include <core/device.h>
#include <core/gpuobj.h>
+#include <core/class.h>
#include <subdev/fb.h>
#include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nvc0_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
{
- struct nvc0_dmaobj_priv *dmaobj;
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
- ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVA3_DISP_MAST_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= 0x00020000;
+ }
+ }
- if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VM:
+ flags0 |= 0x00000000;
+ break;
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
+ break;
+ default:
return -EINVAL;
+ }
- return 0;
-}
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ flags0 |= 0x00080000;
+ break;
+ }
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
- .ctor = nvc0_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+ upper_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
+ }
-static struct nouveau_oclass
-nvc0_dmaobj_sclass[] = {
- { 0x0002, &nvc0_dmaobj_ofuncs },
- { 0x0003, &nvc0_dmaobj_ofuncs },
- { 0x003d, &nvc0_dmaobj_ofuncs },
- {}
-};
+ return ret;
+}
static int
nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nvc0_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvc0_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 00000000000..d1528752980
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ u32 flags0 = 0x00000000;
+ int ret;
+
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVD0_DISP_MAST_CLASS:
+ case NVD0_DISP_SYNC_CLASS:
+ case NVD0_DISP_OVLY_CLASS:
+ case NVE0_DISP_MAST_CLASS:
+ case NVE0_DISP_SYNC_CLASS:
+ case NVE0_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+ } else {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00000009;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+ nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+ nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ }
+
+ return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvd0_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index bbb43c67c2a..c2b9db33581 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -24,6 +24,7 @@
#include <core/object.h>
#include <core/handle.h>
+#include <core/class.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int bar, u32 addr, u32 size, u32 pushbuf,
- u32 engmask, int len, void **ptr)
+ u64 engmask, int len, void **ptr)
{
struct nouveau_device *device = nv_device(engine);
struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
- case 0x0002:
- case 0x003d:
+ case NV_DMA_FROM_MEMORY_CLASS:
+ case NV_DMA_IN_MEMORY_CLASS:
break;
default:
return -EINVAL;
}
- if (dmaeng->bind) {
- ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
- if (ret)
- return ret;
- }
+ ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ if (ret)
+ return ret;
/* find a free fifo channel */
spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
}
u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_fifo_chan *chan = (void *)object;
return ioread32_native(chan->user + addr);
}
void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_fifo_chan *chan = (void *)object;
iowrite32_native(data, chan->user + addr);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ea76e3e8c9c..a47a8548f9e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+ nv_error(priv, "CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
u32 ib_get = nv_rd32(priv, 0x003334);
u32 ib_put = nv_rd32(priv, 0x003330);
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x003334, ib_put);
}
} else {
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
if (device->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_trap(nouveau_fb(priv), 1);
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
}
if (status) {
- nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+ nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
status, chid);
nv_wr32(priv, NV03_PFIFO_INTR_0, status);
status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status) {
- nv_info(priv, "still angry after %d spins, halt\n", cnt);
+ nv_error(priv, "still angry after %d spins, halt\n", cnt);
nv_wr32(priv, 0x002140, 0);
nv_wr32(priv, 0x000140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 4ba75422b89..2c927c1d173 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index b96e6b0ae2b..a9cb51d38c5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
&chan);
*pobject = nv_object(chan);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 559c3b4e1b8..2b1f9172122 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x1000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 536e7634a00..bd096364f68 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
/* HW bug workaround:
*
* PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
ret = -EBUSY;
}
-
nv_wr32(priv, 0x00b860, me);
+
+ if (ret == 0) {
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ }
+
return ret;
}
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index b4fd26d8f16..1eb1c512f50 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
return -EBUSY;
}
+
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 6f21be60055..b4365dde185 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x1000,
args->pushbuf,
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_COPY1), &chan);
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_COPY1) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_PPP), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
- nv_info(priv, "unknown status 0x00000100\n");
+ nv_warn(priv, "unknown status 0x00000100\n");
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 36e81b6fafb..c930da99c2c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -38,12 +38,12 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#define _(a,b) { (a), ((1 << (a)) | (b)) }
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
- int subdev;
- u32 mask;
+ u64 subdev;
+ u64 mask;
} fifo_engine[] = {
- _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)),
+ _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)),
_(NVDEV_ENGINE_VP , 0),
_(NVDEV_ENGINE_PPP , 0),
_(NVDEV_ENGINE_BSP , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
index 7b715fda276..62ab231cd6b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -57,6 +57,11 @@ chipsets:
.b16 #nve4_gpc_mmio_tail
.b16 #nve4_tpc_mmio_head
.b16 #nve4_tpc_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 26c2165bad0..09ee4702c8b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -34,13 +34,16 @@ uint32_t nve0_grgpc_data[] = {
0x00000000,
/* 0x0064: chipsets */
0x000000e4,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
0x000000e7,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
+ 0x000000e6,
+ 0x0110008c,
+ 0x01580110,
0x00000000,
-/* 0x0080: nve4_gpc_mmio_head */
+/* 0x008c: nve4_gpc_mmio_head */
0x00000380,
0x04000400,
0x0800040c,
@@ -74,8 +77,8 @@ uint32_t nve0_grgpc_data[] = {
0x14003100,
0x000031d0,
0x040031e0,
-/* 0x0104: nve4_gpc_mmio_tail */
-/* 0x0104: nve4_tpc_mmio_head */
+/* 0x0110: nve4_gpc_mmio_tail */
+/* 0x0110: nve4_tpc_mmio_head */
0x00000048,
0x00000064,
0x00000088,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index acfc457654b..0bcfa4d447e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -754,6 +754,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 85a8d556f48..bb03d2a1d57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -799,79 +799,80 @@ uint32_t nvc0_grhub_code[] = {
0x01fa0613,
0xf803f806,
/* 0x0829: ctx_xfer */
- 0x0611f400,
-/* 0x082f: ctx_xfer_pre */
- 0xf01102f4,
- 0x21f510f7,
- 0x21f50698,
- 0x11f40631,
-/* 0x083d: ctx_xfer_pre_load */
- 0x02f7f01c,
- 0x065721f5,
- 0x066621f5,
- 0x067821f5,
- 0x21f5f4bd,
- 0x21f50657,
-/* 0x0856: ctx_xfer_exec */
- 0x019806b8,
- 0x1427f116,
- 0x0624b604,
- 0xf10020d0,
- 0xf0a500e7,
- 0x1fb941e3,
- 0x8d21f402,
- 0xf004e0b6,
- 0x2cf001fc,
- 0x0124b602,
- 0xf405f2fd,
- 0x17f18d21,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1020721,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x06a5f001,
- 0x9800b7f0,
- 0x0d98140c,
- 0x00e7f015,
- 0x015c21f5,
- 0xf508a7f0,
- 0xf5010321,
- 0xf4020721,
- 0xa7f02201,
- 0xc921f40c,
- 0x0a1017f1,
- 0xf00614b6,
- 0x12d00527,
-/* 0x08dd: ctx_xfer_post_save_wait */
- 0x0012cf00,
- 0xf40522fd,
- 0x02f4fa1b,
-/* 0x08e9: ctx_xfer_post */
- 0x02f7f032,
- 0x065721f5,
- 0x21f5f4bd,
- 0x21f50698,
- 0x21f50226,
- 0xf4bd0666,
- 0x065721f5,
- 0x981011f4,
- 0x11fd8001,
- 0x070bf405,
- 0x07df21f5,
-/* 0x0914: ctx_xfer_no_post_mmio */
- 0x064921f5,
-/* 0x0918: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x00f7f100,
+ 0x06f4b60c,
+ 0xd004e7f0,
+/* 0x0836: ctx_xfer_idle */
+ 0xfecf80fe,
+ 0x00e4f100,
+ 0xf91bf420,
+ 0xf40611f4,
+/* 0x0846: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x9821f510,
+ 0x3121f506,
+ 0x1c11f406,
+/* 0x0854: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf5065721,
+ 0xf5066621,
+ 0xbd067821,
+ 0x5721f5f4,
+ 0xb821f506,
+/* 0x086d: ctx_xfer_exec */
+ 0x16019806,
+ 0x041427f1,
+ 0xd00624b6,
+ 0xe7f10020,
+ 0xe3f0a500,
+ 0x021fb941,
+ 0xb68d21f4,
+ 0xfcf004e0,
+ 0x022cf001,
+ 0xfd0124b6,
+ 0x21f405f2,
+ 0xfc17f18d,
+ 0x0213f04a,
+ 0xd00c27f0,
+ 0x21f50012,
+ 0x27f10207,
+ 0x23f047fc,
+ 0x0020d002,
+ 0xb6012cf0,
+ 0x12d00320,
+ 0x01acf000,
+ 0xf006a5f0,
+ 0x0c9800b7,
+ 0x150d9814,
+ 0xf500e7f0,
+ 0xf0015c21,
+ 0x21f508a7,
+ 0x21f50103,
+ 0x01f40207,
+ 0x0ca7f022,
+ 0xf1c921f4,
+ 0xb60a1017,
+ 0x27f00614,
+ 0x0012d005,
+/* 0x08f4: ctx_xfer_post_save_wait */
+ 0xfd0012cf,
+ 0x1bf40522,
+ 0x3202f4fa,
+/* 0x0900: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd065721,
+ 0x9821f5f4,
+ 0x2621f506,
+ 0x6621f502,
+ 0xf5f4bd06,
+ 0xf4065721,
+ 0x01981011,
+ 0x0511fd80,
+ 0xf5070bf4,
+/* 0x092b: ctx_xfer_no_post_mmio */
+ 0xf507df21,
+/* 0x092f: ctx_xfer_done */
+ 0xf8064921,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
index 138eeaa2866..7fe9d7cf486 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -44,6 +44,9 @@ chipsets:
.b8 0xe7 0 0 0
.b16 #nve4_hub_mmio_head
.b16 #nve4_hub_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
.b8 0 0 0 0
nve4_hub_mmio_head:
@@ -680,6 +683,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index decf0c60ca3..e3421af68ab 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -30,11 +30,13 @@ uint32_t nve0_grhub_data[] = {
0x00000000,
/* 0x005c: chipsets */
0x000000e4,
- 0x013c0070,
+ 0x01440078,
0x000000e7,
- 0x013c0070,
+ 0x01440078,
+ 0x000000e6,
+ 0x01440078,
0x00000000,
-/* 0x0070: nve4_hub_mmio_head */
+/* 0x0078: nve4_hub_mmio_head */
0x0417e91c,
0x04400204,
0x18404010,
@@ -86,9 +88,7 @@ uint32_t nve0_grhub_data[] = {
0x00408840,
0x08408900,
0x00408980,
-/* 0x013c: nve4_hub_mmio_tail */
- 0x00000000,
- 0x00000000,
+/* 0x0144: nve4_hub_mmio_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -781,77 +781,78 @@ uint32_t nve0_grhub_code[] = {
0x0613f002,
0xf80601fa,
/* 0x07fb: ctx_xfer */
- 0xf400f803,
- 0x02f40611,
-/* 0x0801: ctx_xfer_pre */
- 0x10f7f00d,
- 0x067221f5,
-/* 0x080b: ctx_xfer_pre_load */
- 0xf01c11f4,
- 0x21f502f7,
- 0x21f50631,
- 0x21f50640,
- 0xf4bd0652,
- 0x063121f5,
- 0x069221f5,
-/* 0x0824: ctx_xfer_exec */
- 0xf1160198,
- 0xb6041427,
- 0x20d00624,
- 0x00e7f100,
- 0x41e3f0a5,
- 0xf4021fb9,
- 0xe0b68d21,
- 0x01fcf004,
- 0xb6022cf0,
- 0xf2fd0124,
- 0x8d21f405,
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x0721f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f006a5,
- 0x140c9800,
- 0xf0150d98,
- 0x21f500e7,
- 0xa7f0015c,
- 0x0321f508,
- 0x0721f501,
- 0x2201f402,
- 0xf40ca7f0,
- 0x17f1c921,
- 0x14b60a10,
- 0x0527f006,
-/* 0x08ab: ctx_xfer_post_save_wait */
- 0xcf0012d0,
- 0x22fd0012,
- 0xfa1bf405,
-/* 0x08b7: ctx_xfer_post */
- 0xf02e02f4,
- 0x21f502f7,
- 0xf4bd0631,
- 0x067221f5,
- 0x022621f5,
- 0x064021f5,
- 0x21f5f4bd,
- 0x11f40631,
- 0x80019810,
- 0xf40511fd,
- 0x21f5070b,
-/* 0x08e2: ctx_xfer_no_post_mmio */
-/* 0x08e2: ctx_xfer_done */
- 0x00f807b1,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xf100f803,
+ 0xb60c00f7,
+ 0xe7f006f4,
+ 0x80fed004,
+/* 0x0808: ctx_xfer_idle */
+ 0xf100fecf,
+ 0xf42000e4,
+ 0x11f4f91b,
+ 0x0d02f406,
+/* 0x0818: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf4067221,
+/* 0x0822: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x3121f502,
+ 0x4021f506,
+ 0x5221f506,
+ 0xf5f4bd06,
+ 0xf5063121,
+/* 0x083b: ctx_xfer_exec */
+ 0x98069221,
+ 0x27f11601,
+ 0x24b60414,
+ 0x0020d006,
+ 0xa500e7f1,
+ 0xb941e3f0,
+ 0x21f4021f,
+ 0x04e0b68d,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0xf18d21f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00c,
+ 0x020721f5,
+ 0x47fc27f1,
+ 0xd00223f0,
+ 0x2cf00020,
+ 0x0320b601,
+ 0xf00012d0,
+ 0xa5f001ac,
+ 0x00b7f006,
+ 0x98140c98,
+ 0xe7f0150d,
+ 0x5c21f500,
+ 0x08a7f001,
+ 0x010321f5,
+ 0x020721f5,
+ 0xf02201f4,
+ 0x21f40ca7,
+ 0x1017f1c9,
+ 0x0614b60a,
+ 0xd00527f0,
+/* 0x08c2: ctx_xfer_post_save_wait */
+ 0x12cf0012,
+ 0x0522fd00,
+ 0xf4fa1bf4,
+/* 0x08ce: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x3121f502,
+ 0xf5f4bd06,
+ 0xf5067221,
+ 0xf5022621,
+ 0xbd064021,
+ 0x3121f5f4,
+ 0x1011f406,
+ 0xfd800198,
+ 0x0bf40511,
+ 0xb121f507,
+/* 0x08f9: ctx_xfer_no_post_mmio */
+/* 0x08f9: ctx_xfer_done */
+ 0x0000f807,
0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 61852824845..e30a9c5ff1f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv03_graph_gdi_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_patt },
- { 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_gdi_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_iifc_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_chroma },
- { 0x018c, nv01_graph_mthd_bind_clip },
- { 0x0190, nv04_graph_mthd_bind_patt },
- { 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifm_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifm_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_surf3d_omthds[] = {
- { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
{}
};
static struct nouveau_omthds
nv03_graph_ttri_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
{}
};
static struct nouveau_omthds
nv01_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 92521c89e77..5c0f843ea24 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv17_celcius_omthds[] = {
- { 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, nv17_graph_mthd_lma_enable },
+ { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 8f3f619c4a7..5b20401bf91 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
- if (nv_device(engine)->card_type == NV_20) {
+ if (nv_device(engine)->chipset != 0x34) {
nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
printk(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
printk("\n");
- nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, subc, class, mthd, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index cc6574eeb80..0b36dd3deeb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
switch (nv_device(priv)->chipset) {
case 0x40:
- case 0x41: /* guess */
+ case 0x41:
case 0x42:
case 0x43:
- case 0x45: /* guess */
+ case 0x45:
case 0x4e:
nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x40:
+ case 0x45:
+ nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
break;
case 0x44:
case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
+ case 0x4c:
case 0x47:
case 0x49:
case 0x4b:
- case 0x4c:
+ case 0x63:
case 0x67:
- default:
+ case 0x68:
nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
break;
}
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index ab3b9dcaf47..b1c3d835b4c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
return 0;
}
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+ { 0x00000001, "BUSY" }, /* set when any bit is set */
+ { 0x00000002, "DISPATCH" },
+ { 0x00000004, "UNK2" },
+ { 0x00000008, "UNK3" },
+ { 0x00000010, "UNK4" },
+ { 0x00000020, "UNK5" },
+ { 0x00000040, "M2MF" },
+ { 0x00000080, "UNK7" },
+ { 0x00000100, "CTXPROG" },
+ { 0x00000200, "VFETCH" },
+ { 0x00000400, "CCACHE_UNK4" },
+ { 0x00000800, "STRMOUT_GSCHED_UNK5" },
+ { 0x00001000, "UNK14XX" },
+ { 0x00002000, "UNK24XX_CSCHED" },
+ { 0x00004000, "UNK1CXX" },
+ { 0x00008000, "CLIPID" },
+ { 0x00010000, "ZCULL" },
+ { 0x00020000, "ENG2D" },
+ { 0x00040000, "UNK34XX" },
+ { 0x00080000, "TPRAST" },
+ { 0x00100000, "TPROP" },
+ { 0x00200000, "TEX" },
+ { 0x00400000, "TPVP" },
+ { 0x00800000, "MP" },
+ { 0x01000000, "ROP" },
+ {}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+ "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+ "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+ "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+ "ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+ const char *const units[], u32 status)
+{
+ int i;
+
+ nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+ for (i = 0; units[i] && status; i++) {
+ if ((status & 7) == 1)
+ pr_cont(" %s", units[i]);
+ status >>= 3;
+ }
+ if (status)
+ pr_cont(" (invalid: 0x%x)", status);
+ pr_cont("\n");
+}
+
static int
nv84_graph_tlb_flush(struct nouveau_engine *engine)
{
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
!(timeout = ptimer->read(ptimer) - start > 2000000000));
if (timeout) {
- nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
- "0x%08x 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
- nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+ nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+ tmp = nv_rd32(priv, 0x400700);
+ nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
+ nouveau_bitfield_print(nv50_pgraph_status, tmp);
+ pr_cont("\n");
+
+ nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+ nv_rd32(priv, 0x400380));
+ nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+ nv_rd32(priv, 0x400384));
+ nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+ nv_rd32(priv, 0x400388));
}
nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
}
if (ustatus) {
if (display)
- nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
}
nv_wr32(priv, ustatus_addr, 0xc0000000);
}
if (!tps && display)
- nv_info(priv, "%s - No TPs claiming errors?\n", name);
+ nv_warn(priv, "%s - No TPs claiming errors?\n", name);
}
static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv50_graph_intr_name, show);
printk("\n");
nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
"mthd 0x%04x data 0x%08x\n",
chid, (u64)inst << 12, subc, class, mthd, data);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index c62f2d0f5f0..45aff5f5085 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -516,18 +516,9 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
{
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
- bool enable = true;
int ret, i;
- switch (device->chipset) {
- case 0xd9: /* known broken without binary driver firmware */
- enable = false;
- break;
- default:
- break;
- }
-
- ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -814,7 +805,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x41a100, 0x00000002);
nv_wr32(priv, 0x409100, 0x00000002);
if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
- nv_info(priv, "0x409800 wait failed\n");
+ nv_warn(priv, "0x409800 wait failed\n");
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x7fffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 18d2210e12e..a1e78de4645 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -121,6 +121,7 @@ nvc0_graph_class(void *obj)
return 0x9297;
case 0xe4:
case 0xe7:
+ case 0xe6:
return 0xa097;
default:
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 539d4c72f19..9f82e9702b4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -203,7 +203,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_graph_priv *priv;
int ret, i;
- ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -252,6 +252,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->magic_not_rop_nr = 1;
break;
case 0xe7:
+ case 0xe6:
priv->magic_not_rop_nr = 1;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
index 9c715a25cec..fde8e24415e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -205,6 +205,7 @@
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -216,6 +217,7 @@
#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i))
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
@@ -261,9 +263,12 @@
#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i))
#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i))
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 1f394a2629e..9fd86375f4c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
static struct nouveau_omthds
nv31_mpeg_omthds[] = {
- { 0x0190, nv31_mpeg_mthd_dma },
- { 0x01a0, nv31_mpeg_mthd_dma },
- { 0x01b0, nv31_mpeg_mthd_dma },
+ { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+ { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+ { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 8678a9996d5..bc7d12b30fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b100, stat);
nv_wr32(priv, 0x00b230, 0x00000001);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 50e7e0da198..5a5b2a773ed 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -22,18 +22,18 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/engine.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/ppp.h>
struct nv98_ppp_priv {
- struct nouveau_ppp base;
+ struct nouveau_engine base;
};
struct nv98_ppp_chan {
- struct nouveau_ppp_chan base;
+ struct nouveau_engctx base;
};
/*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
* PPPP context
******************************************************************************/
-static int
-nv98_ppp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_ppp_chan *priv;
- int ret;
-
- ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv98_ppp_context_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- nouveau_ppp_context_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_context_init(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- return nouveau_ppp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv98_ppp_cclass = {
.handle = NV_ENGCTX(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_ppp_context_ctor,
- .dtor = nv98_ppp_context_dtor,
- .init = nv98_ppp_context_init,
- .fini = nv98_ppp_context_fini,
- .rd32 = _nouveau_ppp_context_rd32,
- .wr32 = _nouveau_ppp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
* PPPP engine/subdev functions
******************************************************************************/
-static void
-nv98_ppp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_ppp_priv *priv;
int ret;
- ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PPPP", "ppp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00400002;
- nv_subdev(priv)->intr = nv98_ppp_intr;
nv_engine(priv)->cclass = &nv98_ppp_cclass;
nv_engine(priv)->sclass = nv98_ppp_sclass;
return 0;
}
-static void
-nv98_ppp_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- nouveau_ppp_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_init(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- return nouveau_ppp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv98_ppp_oclass = {
.handle = NV_ENGINE(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_ctor,
- .dtor = nv98_ppp_dtor,
- .init = nv98_ppp_init,
- .fini = nv98_ppp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 00000000000..ebf0d860e2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+ { 0x90b3, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+ .handle = NV_ENGCTX(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+ struct nvc0_ppp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x086010, 0x0000fff2);
+ nv_wr32(priv, 0x08601c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_ppp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+ "PPPP", "ppp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+ nv_engine(priv)->sclass = nvc0_ppp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+ .handle = NV_ENGINE(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_ppp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_ppp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 3ca4c3aa90b..2a859a31c30 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv04_software_omthds[] = {
- { 0x0150, nv04_software_set_ref },
- { 0x0500, nv04_software_flip },
+ { 0x0150, 0x0150, nv04_software_set_ref },
+ { 0x0500, 0x0500, nv04_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index 6e699afbfdb..a019364b1e1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv10_software_omthds[] = {
- { 0x0500, nv10_software_flip },
+ { 0x0500, 0x0500, nv10_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index a2edcd38544..b0e7e1c01ce 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv50_software_omthds[] = {
- { 0x018c, nv50_software_mthd_dma_vblsem },
- { 0x0400, nv50_software_mthd_vblsem_offset },
- { 0x0404, nv50_software_mthd_vblsem_value },
- { 0x0408, nv50_software_mthd_vblsem_release },
- { 0x0500, nv50_software_mthd_flip },
+ { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+ { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+ { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nv50_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index b7b0d7e330d..282a1cd1bc2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nvc0_software_omthds[] = {
- { 0x0400, nvc0_software_mthd_vblsem_offset },
- { 0x0404, nvc0_software_mthd_vblsem_offset },
- { 0x0408, nvc0_software_mthd_vblsem_value },
- { 0x040c, nvc0_software_mthd_vblsem_release },
- { 0x0500, nvc0_software_mthd_flip },
+ { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+ { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+ { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nvc0_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index dd23c80e540..261cd96e695 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/vp.h>
struct nv84_vp_priv {
- struct nouveau_vp base;
-};
-
-struct nv84_vp_chan {
- struct nouveau_vp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
* PVP context
******************************************************************************/
-static int
-nv84_vp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_vp_chan *priv;
- int ret;
-
- ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_vp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- nouveau_vp_context_destroy(&priv->base);
-}
-
-static int
-nv84_vp_context_init(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_chan *priv = (void *)object;
- return nouveau_vp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_vp_context_ctor,
- .dtor = nv84_vp_context_dtor,
- .init = nv84_vp_context_init,
- .fini = nv84_vp_context_fini,
- .rd32 = _nouveau_vp_context_rd32,
- .wr32 = _nouveau_vp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
* PVP engine/subdev functions
******************************************************************************/
-static void
-nv84_vp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_vp_priv *priv;
int ret;
- ret = nouveau_vp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x01020000;
- nv_subdev(priv)->intr = nv84_vp_intr;
nv_engine(priv)->cclass = &nv84_vp_cclass;
nv_engine(priv)->sclass = nv84_vp_sclass;
return 0;
}
-static void
-nv84_vp_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- nouveau_vp_destroy(&priv->base);
-}
-
-static int
-nv84_vp_init(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_priv *priv = (void *)object;
- return nouveau_vp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_vp_oclass = {
.handle = NV_ENGINE(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_vp_ctor,
- .dtor = nv84_vp_dtor,
- .init = nv84_vp_init,
- .fini = nv84_vp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 00000000000..f761949d703
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+ { 0x90b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+ struct nvc0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nvc0_vp_cclass;
+ nv_engine(priv)->sclass = nvc0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 00000000000..2384ce5dbe1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+ { 0x95b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+ struct nve0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nve0_vp_cclass;
+ nv_engine(priv)->sclass = nve0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 6180ae9800f..47c4b3a5bd3 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -23,6 +23,7 @@
#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
struct nv_device_class {
u64 device; /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
#define NV_DMA_ACCESS_WR 0x00000200
#define NV_DMA_ACCESS_RDWR 0x00000300
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE 0x80000000
+#define NV50_DMA_CONF0_PRIV 0x00300000
+#define NV50_DMA_CONF0_PRIV_VM 0x00000000
+#define NV50_DMA_CONF0_PRIV_US 0x00100000
+#define NV50_DMA_CONF0_PRIV__S 0x00200000
+#define NV50_DMA_CONF0_PART 0x00030000
+#define NV50_DMA_CONF0_PART_VM 0x00000000
+#define NV50_DMA_CONF0_PART_256 0x00010000
+#define NV50_DMA_CONF0_PART_1KB 0x00020000
+#define NV50_DMA_CONF0_COMP 0x00000180
+#define NV50_DMA_CONF0_COMP_NONE 0x00000000
+#define NV50_DMA_CONF0_COMP_VM 0x00000180
+#define NV50_DMA_CONF0_TYPE 0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE 0x80000000
+#define NVC0_DMA_CONF0_PRIV 0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
+#define NVC0_DMA_CONF0_PRIV_US 0x00100000
+#define NVC0_DMA_CONF0_PRIV__S 0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
+#define NVC0_DMA_CONF0_TYPE 0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE 0x80000000
+#define NVD0_DMA_CONF0_PAGE 0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
+#define NVD0_DMA_CONF0_TYPE 0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
+
struct nv_dma_class {
u32 flags;
u32 pad0;
u64 start;
u64 limit;
+ u32 conf0;
};
/* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
u32 engine;
};
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ */
+
+#define NV50_DISP_CLASS 0x00005070
+#define NV84_DISP_CLASS 0x00008270
+#define NVA0_DISP_CLASS 0x00008370
+#define NV94_DISP_CLASS 0x00008870
+#define NVA3_DISP_CLASS 0x00008570
+#define NVD0_DISP_CLASS 0x00009070
+#define NVE0_DISP_CLASS 0x00009170
+
+#define NV50_DISP_SOR_MTHD 0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
+#define NV50_DISP_SOR_MTHD_LINK 0x00000004
+#define NV50_DISP_SOR_MTHD_OR 0x00000003
+
+#define NV50_DISP_SOR_PWR 0x00010000
+#define NV50_DISP_SOR_PWR_STATE 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
+#define NVA3_DISP_SOR_HDA_ELD 0x00010100
+#define NV84_DISP_SOR_HDMI_PWR 0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
+#define NV94_DISP_SOR_DP_TRAIN 0x00016000
+#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
+#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
+#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
+#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
+#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
+
+#define NV50_DISP_DAC_MTHD 0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
+#define NV50_DISP_DAC_MTHD_OR 0x00000003
+
+#define NV50_DISP_DAC_PWR 0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
+#define NV50_DISP_DAC_PWR_DATA 0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
+#define NV50_DISP_DAC_PWR_STATE 0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
+#define NV50_DISP_DAC_LOAD 0x0002000c
+#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS 0x0000507a
+#define NV84_DISP_CURS_CLASS 0x0000827a
+#define NVA0_DISP_CURS_CLASS 0x0000837a
+#define NV94_DISP_CURS_CLASS 0x0000887a
+#define NVA3_DISP_CURS_CLASS 0x0000857a
+#define NVD0_DISP_CURS_CLASS 0x0000907a
+#define NVE0_DISP_CURS_CLASS 0x0000917a
+
+struct nv50_display_curs_class {
+ u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS 0x0000507b
+#define NV84_DISP_OIMM_CLASS 0x0000827b
+#define NVA0_DISP_OIMM_CLASS 0x0000837b
+#define NV94_DISP_OIMM_CLASS 0x0000887b
+#define NVA3_DISP_OIMM_CLASS 0x0000857b
+#define NVD0_DISP_OIMM_CLASS 0x0000907b
+#define NVE0_DISP_OIMM_CLASS 0x0000917b
+
+struct nv50_display_oimm_class {
+ u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS 0x0000507c
+#define NV84_DISP_SYNC_CLASS 0x0000827c
+#define NVA0_DISP_SYNC_CLASS 0x0000837c
+#define NV94_DISP_SYNC_CLASS 0x0000887c
+#define NVA3_DISP_SYNC_CLASS 0x0000857c
+#define NVD0_DISP_SYNC_CLASS 0x0000907c
+#define NVE0_DISP_SYNC_CLASS 0x0000917c
+
+struct nv50_display_sync_class {
+ u32 pushbuf;
+ u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS 0x0000507d
+#define NV84_DISP_MAST_CLASS 0x0000827d
+#define NVA0_DISP_MAST_CLASS 0x0000837d
+#define NV94_DISP_MAST_CLASS 0x0000887d
+#define NVA3_DISP_MAST_CLASS 0x0000857d
+#define NVD0_DISP_MAST_CLASS 0x0000907d
+#define NVE0_DISP_MAST_CLASS 0x0000917d
+
+struct nv50_display_mast_class {
+ u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS 0x0000507e
+#define NV84_DISP_OVLY_CLASS 0x0000827e
+#define NVA0_DISP_OVLY_CLASS 0x0000837e
+#define NV94_DISP_OVLY_CLASS 0x0000887e
+#define NVA3_DISP_OVLY_CLASS 0x0000857e
+#define NVD0_DISP_OVLY_CLASS 0x0000907e
+#define NVE0_DISP_OVLY_CLASS 0x0000917e
+
+struct nv50_display_ovly_class {
+ u32 pushbuf;
+ u32 head;
+};
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
index 8a947b6872e..2fd48b564c7 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
int nouveau_engctx_init(struct nouveau_engctx *);
int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
+int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
void _nouveau_engctx_dtor(struct nouveau_object *);
int _nouveau_engctx_init(struct nouveau_object *);
int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 00000000000..1edec386ab3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+ bool external;
+};
+
+struct nouveau_falcon {
+ struct nouveau_engine base;
+
+ u32 addr;
+ u8 version;
+ u8 secret;
+
+ struct nouveau_gpuobj *core;
+ bool external;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } code;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \
+ nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
+ sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p) \
+ nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_init(nv_object(falcon)); \
+})
+#define nouveau_falcon_fini(p,s) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_fini(nv_object(falcon), (s)); \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, bool, const char *,
+ const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int _nouveau_falcon_init(struct nouveau_object *);
+int _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32 _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
index 6eaff79377a..b3b9ce4e9d3 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
void _nouveau_gpuobj_dtor(struct nouveau_object *);
int _nouveau_gpuobj_init(struct nouveau_object *);
int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 975137ba34a..2514e81ade0 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -21,6 +21,12 @@ struct nouveau_mm {
int heap_nodes;
};
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+ return mm->block_size != 0;
+}
+
int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
int nouveau_mm_fini(struct nouveau_mm *);
int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 486f1a9217f..5982935ee23 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
}
struct nouveau_omthds {
- u32 method;
+ u32 start;
+ u32 limit;
int (*call)(struct nouveau_object *, u32, void *, u32);
};
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
void (*dtor)(struct nouveau_object *);
int (*init)(struct nouveau_object *);
int (*fini)(struct nouveau_object *, bool suspend);
- u8 (*rd08)(struct nouveau_object *, u32 offset);
- u16 (*rd16)(struct nouveau_object *, u32 offset);
- u32 (*rd32)(struct nouveau_object *, u32 offset);
- void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
- void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
- void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+ u8 (*rd08)(struct nouveau_object *, u64 offset);
+ u16 (*rd16)(struct nouveau_object *, u64 offset);
+ u32 (*rd32)(struct nouveau_object *, u64 offset);
+ void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+ void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+ void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
};
static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
void nouveau_object_debug(void);
static inline int
-nv_call(void *obj, u32 mthd, u32 data)
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
{
struct nouveau_omthds *method = nv_oclass(obj)->omthds;
while (method && method->call) {
- if (method->method == mthd)
- return method->call(obj, mthd, &data, sizeof(data));
+ if (mthd >= method->start && mthd <= method->limit)
+ return method->call(obj, mthd, data, size);
method++;
}
return -EINVAL;
}
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+ return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
static inline u8
-nv_ro08(void *obj, u32 addr)
+nv_ro08(void *obj, u64 addr)
{
u8 data = nv_ofuncs(obj)->rd08(obj, addr);
nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
}
static inline u16
-nv_ro16(void *obj, u32 addr)
+nv_ro16(void *obj, u64 addr)
{
u16 data = nv_ofuncs(obj)->rd16(obj, addr);
nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
}
static inline u32
-nv_ro32(void *obj, u32 addr)
+nv_ro32(void *obj, u64 addr)
{
u32 data = nv_ofuncs(obj)->rd32(obj, addr);
nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,28 +154,28 @@ nv_ro32(void *obj, u32 addr)
}
static inline void
-nv_wo08(void *obj, u32 addr, u8 data)
+nv_wo08(void *obj, u64 addr, u8 data)
{
nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
nv_ofuncs(obj)->wr08(obj, addr, data);
}
static inline void
-nv_wo16(void *obj, u32 addr, u16 data)
+nv_wo16(void *obj, u64 addr, u16 data)
{
nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
nv_ofuncs(obj)->wr16(obj, addr, data);
}
static inline void
-nv_wo32(void *obj, u32 addr, u32 data)
+nv_wo32(void *obj, u64 addr, u32 data)
{
nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
nv_ofuncs(obj)->wr32(obj, addr, data);
}
static inline u32
-nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
{
u32 temp = nv_ro32(obj, addr);
nv_wo32(obj, addr, (temp & ~mask) | data);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 3c2e940eb0f..31cd852c96d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -14,7 +14,7 @@ struct nouveau_parent {
struct nouveau_object base;
struct nouveau_sclass *sclass;
- u32 engine;
+ u64 engine;
int (*context_attach)(struct nouveau_object *,
struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
index 75d1ed5f85f..13ccdf54dfa 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_BSP_H__
#define __NOUVEAU_BSP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_bsp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_bsp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_bsp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_bsp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_bsp_context_init _nouveau_engctx_init
-#define _nouveau_bsp_context_fini _nouveau_engctx_fini
-#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_bsp {
- struct nouveau_engine base;
-};
-
-#define nouveau_bsp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
-#define nouveau_bsp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_bsp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_bsp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_dtor _nouveau_engine_dtor
-#define _nouveau_bsp_init _nouveau_engine_init
-#define _nouveau_bsp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
index 70b9d8c5fcf..8cad2cf28ce 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -1,44 +1,7 @@
#ifndef __NOUVEAU_COPY_H__
#define __NOUVEAU_COPY_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_copy_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_copy_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_copy_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_copy_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
-#define _nouveau_copy_context_init _nouveau_engctx_init
-#define _nouveau_copy_context_fini _nouveau_engctx_fini
-#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_copy {
- struct nouveau_engine base;
-};
-
-#define nouveau_copy_create(p,e,c,y,i,d) \
- nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
-#define nouveau_copy_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_copy_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_copy_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_copy_dtor _nouveau_engine_dtor
-#define _nouveau_copy_init _nouveau_engine_init
-#define _nouveau_copy_fini _nouveau_engine_fini
+void nva3_copy_intr(struct nouveau_subdev *);
extern struct nouveau_oclass nva3_copy_oclass;
extern struct nouveau_oclass nvc0_copy0_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
index e3674743baa..db975618e93 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -1,45 +1,6 @@
#ifndef __NOUVEAU_CRYPT_H__
#define __NOUVEAU_CRYPT_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_crypt_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_crypt_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_crypt_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_crypt_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
-#define _nouveau_crypt_context_init _nouveau_engctx_init
-#define _nouveau_crypt_context_fini _nouveau_engctx_fini
-#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_crypt {
- struct nouveau_engine base;
-};
-
-#define nouveau_crypt_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
-#define nouveau_crypt_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_crypt_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_crypt_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_dtor _nouveau_engine_dtor
-#define _nouveau_crypt_init _nouveau_engine_init
-#define _nouveau_crypt_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_crypt_oclass;
extern struct nouveau_oclass nv98_crypt_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 38ec1252cba..46948285f3e 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
extern struct nouveau_oclass nv04_disp_oclass;
extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index 700ccbb1941..b28914ed175 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
u32 access;
u64 start;
u64 limit;
+ u32 conf0;
};
-#define nouveau_dmaobj_create(p,e,c,a,s,d) \
- nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
-#define nouveau_dmaobj_destroy(p) \
- nouveau_object_destroy(&(p)->base)
-#define nouveau_dmaobj_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_dmaobj_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *data, u32 size,
- int length, void **);
-
-#define _nouveau_dmaobj_dtor nouveau_object_destroy
-#define _nouveau_dmaobj_init nouveau_object_init
-#define _nouveau_dmaobj_fini nouveau_object_fini
-
struct nouveau_dmaeng {
struct nouveau_engine base;
- int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
- struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+
+ /* creates a "physical" dma object from a struct nouveau_dmaobj */
+ int (*bind)(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **);
};
#define nouveau_dmaeng_create(p,e,c,d) \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
extern struct nouveau_oclass nv04_dmaeng_oclass;
extern struct nouveau_oclass nv50_dmaeng_oclass;
extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index d67fed1e397..f18846c8c6f 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *,
struct nouveau_object *,
struct nouveau_oclass *,
int bar, u32 addr, u32 size, u32 push,
- u32 engmask, int len, void **);
+ u64 engmask, int len, void **);
void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
#define _nouveau_fifo_channel_init _nouveau_namedb_init
#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
struct nouveau_fifo_base {
struct nouveau_gpuobj base;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
index 74d554fb328..0a66781e8cf 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -1,45 +1,7 @@
#ifndef __NOUVEAU_PPP_H__
#define __NOUVEAU_PPP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_ppp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_ppp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_ppp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_ppp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_ppp_context_init _nouveau_engctx_init
-#define _nouveau_ppp_context_fini _nouveau_engctx_fini
-#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_ppp {
- struct nouveau_engine base;
-};
-
-#define nouveau_ppp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
-#define nouveau_ppp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_ppp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_ppp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_dtor _nouveau_engine_dtor
-#define _nouveau_ppp_init _nouveau_engine_init
-#define _nouveau_ppp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
index 05cd08fba37..d7b287b115b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_VP_H__
#define __NOUVEAU_VP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_vp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_vp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_vp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_vp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_vp_context_init _nouveau_engctx_init
-#define _nouveau_vp_context_fini _nouveau_engctx_fini
-#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_vp {
- struct nouveau_engine base;
-};
-
-#define nouveau_vp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
-#define nouveau_vp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_vp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_vp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_vp_dtor _nouveau_engine_dtor
-#define _nouveau_vp_init _nouveau_engine_init
-#define _nouveau_vp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
index d145b25e6be..5bd1ca8cd20 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -17,6 +17,7 @@ struct nouveau_bios {
u8 chip;
u8 minor;
u8 micro;
+ u8 patch;
} version;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index d682fb62583..b79025da581 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -23,6 +23,7 @@ struct dcb_output {
uint8_t bus;
uint8_t location;
uint8_t or;
+ uint8_t link;
bool duallink_possible;
union {
struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+ struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+ struct dcb_output *);
int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
(struct nouveau_bios *, void *, int index, u16 entry));
-
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-static inline bool
-dcb_hash_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 00000000000..c35937e2f6a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+ u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub,
+ struct nvbios_disp *);
+
+struct nvbios_outp {
+ u16 type;
+ u16 mask;
+ u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+ u16 match;
+ u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 73b5e5d3e75..6e54218b55f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -1,8 +1,34 @@
#ifndef __NVBIOS_DP_H__
#define __NVBIOS_DP_H__
-u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+struct nvbios_dpout {
+ u16 type;
+ u16 mask;
+ u8 flags;
+ u32 script[5];
+ u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+ u8 drv;
+ u8 pre;
+ u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 2bf178082a3..e6563b5cb08 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -25,9 +25,11 @@ struct dcb_gpio_func {
u8 param;
};
-u16 dcb_gpio_table(struct nouveau_bios *);
-u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
-int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
+u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
+u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *);
+u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
index e69a8bdc6e9..ca2f6bf37f4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -13,6 +13,7 @@ struct nvbios_init {
u32 nested;
u16 repeat;
u16 repend;
+ u32 ramcfg;
};
int nvbios_exec(struct nvbios_init *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 5c1b5e1904f..da470e6851b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -69,8 +69,11 @@ struct nouveau_fb {
} type;
u64 stolen;
u64 size;
+
int ranks;
+ int parts;
+ int (*init)(struct nouveau_fb *);
int (*get)(struct nouveau_fb *, u64 size, u32 align,
u32 size_nc, u32 type, struct nouveau_mem **);
void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
int regions;
void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
+ void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
void (*fini)(struct nouveau_fb *, int i,
struct nouveau_fb_tile *);
void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
#define nouveau_fb_create(p,e,c,d) \
nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
-int nouveau_fb_created(struct nouveau_fb *);
+int nouveau_fb_preinit(struct nouveau_fb *);
void nouveau_fb_destroy(struct nouveau_fb *);
int nouveau_fb_init(struct nouveau_fb *);
#define nouveau_fb_fini(p,s) \
@@ -111,9 +116,19 @@ int _nouveau_fb_init(struct nouveau_object *);
extern struct nouveau_oclass nv04_fb_oclass;
extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
extern struct nouveau_oclass nv50_fb_oclass;
extern struct nouveau_oclass nvc0_fb_oclass;
@@ -122,13 +137,35 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *);
bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+int nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv30_fb_init(struct nouveau_object *);
void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
+
+int nv41_fb_vram_init(struct nouveau_fb *);
+int nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv44_fb_vram_init(struct nouveau_fb *);
+int nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
-void nv50_fb_trap(struct nouveau_fb *, int display);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 9ea2b12cc15..b75e8f18e52 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -11,7 +11,7 @@ struct nouveau_gpio {
struct nouveau_subdev base;
/* hardware interfaces */
- void (*reset)(struct nouveau_gpio *);
+ void (*reset)(struct nouveau_gpio *, u8 func);
int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
int (*sense)(struct nouveau_gpio *, int line);
void (*irq_enable)(struct nouveau_gpio *, int line, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index cd01c533007..d70ba342aa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
}
static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_barobj *barobj = (void *)object;
return ioread32_native(barobj->iomem + addr);
}
static void
-nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_barobj *barobj = (void *)object;
iowrite32_native(data, barobj->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 70ca7d5a1aa..f621f69fa1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
struct pci_dev *pdev = nv_device(bios)->pdev;
struct device_node *dn;
const u32 *data;
- int size, i;
+ int size;
dn = pci_device_to_OF_node(pdev);
if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
return;
bios->data = kmalloc(bios->size, GFP_KERNEL);
- for (i = 0; bios->data && i < bios->size; i += cnt) {
- cnt = min((bios->size - i), (u32)4096);
- ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
- if (ret != cnt)
- break;
+ if (bios->data) {
+ /* disobey the acpi spec - much faster on at least w530 ... */
+ ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+ if (ret != bios->size ||
+ nvbios_checksum(bios->data, bios->size)) {
+ /* ... that didn't work, ok, i'll be good now */
+ for (i = 0; i < bios->size; i += cnt) {
+ cnt = min((bios->size - i), (u32)4096);
+ ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+ if (ret != cnt)
+ break;
+ }
+ }
}
}
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
}
static u8
-nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return bios->data[addr];
}
static u16
-nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le16(&bios->data[addr]);
}
static u32
-nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le32(&bios->data[addr]);
}
static void
-nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
struct nouveau_bios *bios = (void *)object;
bios->data[addr] = data;
}
static void
-nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le16(data, &bios->data[addr]);
}
static void
-nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le32(data, &bios->data[addr]);
@@ -439,6 +447,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
+ bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
} else
if (bmp_version(bios)) {
bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
@@ -447,9 +456,9 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
}
- nv_info(bios, "version %02x.%02x.%02x.%02x\n",
+ nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
bios->version.major, bios->version.chip,
- bios->version.minor, bios->version.micro);
+ bios->version.minor, bios->version.micro, bios->version.patch);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index c5119715774..0fd87df99dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
return 0x0000;
}
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ struct dcb_output *outp)
+{
+ u16 dcb = dcb_outp(bios, idx, ver, len);
+ if (dcb) {
+ if (*ver >= 0x20) {
+ u32 conn = nv_ro32(bios, dcb + 0x00);
+ outp->or = (conn & 0x0f000000) >> 24;
+ outp->location = (conn & 0x00300000) >> 20;
+ outp->bus = (conn & 0x000f0000) >> 16;
+ outp->connector = (conn & 0x0000f000) >> 12;
+ outp->heads = (conn & 0x00000f00) >> 8;
+ outp->i2c_index = (conn & 0x000000f0) >> 4;
+ outp->type = (conn & 0x0000000f);
+ outp->link = 0;
+ } else {
+ dcb = 0x0000;
+ }
+
+ if (*ver >= 0x40) {
+ u32 conf = nv_ro32(bios, dcb + 0x04);
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ outp->link = (conf & 0x00000030) >> 4;
+ outp->sorconf.link = outp->link; /*XXX*/
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return dcb;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+ return outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+ return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *len, struct dcb_output *outp)
+{
+ u16 dcb, idx = 0;
+ while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+ if (dcb_outp_hasht(outp) == type) {
+ if ((dcb_outp_hashm(outp) & mask) == mask)
+ break;
+ }
+ }
+ return dcb;
+}
+
int
dcb_outp_foreach(struct nouveau_bios *bios, void *data,
int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 00000000000..7f16e52d9be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+ struct bit_entry U;
+
+ if (!bit_entry(bios, 'U', &U)) {
+ if (U.version == 1) {
+ u16 data = nv_ro16(bios, U.offset);
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x20:
+ case 0x21:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ *sub = nv_ro08(bios, data + 0x04);
+ return data;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub)
+{
+ u8 hdr, cnt;
+ u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+ if (data && idx < cnt)
+ return data + hdr + (idx * *len);
+ *ver = 0x00;
+ return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub,
+ struct nvbios_disp *info)
+{
+ u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+ if (data && *len >= 2) {
+ info->data = nv_ro16(bios, data + 0);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct nvbios_disp info;
+ u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+ if (data) {
+ *cnt = nv_ro08(bios, info.data + 0x05);
+ *len = 0x06;
+ data = info.data;
+ }
+ return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *hdr >= 0x0a) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro32(bios, data + 0x02);
+ if (*ver <= 0x20) /* match any link */
+ info->mask |= 0x00c0;
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->script[2] = 0x0000;
+ if (*hdr >= 0x0c)
+ info->script[2] = nv_ro16(bios, data + 0x0a);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+ return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ info->match = nv_ro16(bios, data + 0x00);
+ info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+ info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+ if (info->match == type)
+ break;
+ }
+ return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+ while (cmp) {
+ if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+ return nv_ro16(bios, cmp + 0x02);
+ cmp += 0x04;
+ }
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 3cbc0f3e8d5..663853bcca8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -25,23 +25,29 @@
#include "subdev/bios.h"
#include "subdev/bios/bit.h"
-#include "subdev/bios/dcb.h"
#include "subdev/bios/dp.h"
-u16
-dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- struct bit_entry bit_d;
+ struct bit_entry d;
- if (!bit_entry(bios, 'd', &bit_d)) {
- if (bit_d.version == 1) {
- u16 data = nv_ro16(bios, bit_d.offset);
+ if (!bit_entry(bios, 'd', &d)) {
+ if (d.version == 1 && d.length >= 2) {
+ u16 data = nv_ro16(bios, d.offset);
if (data) {
- *ver = nv_ro08(bios, data + 0);
- *hdr = nv_ro08(bios, data + 1);
- *len = nv_ro08(bios, data + 2);
- *cnt = nv_ro08(bios, data + 3);
- return data;
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ return data;
+ default:
+ break;
+ }
}
}
}
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
return 0x0000;
}
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ if (data && idx < *cnt) {
+ u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+ switch (*ver * !!outp) {
+ case 0x21:
+ case 0x30:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *len = nv_ro08(bios, data + 0x05);
+ *cnt = nv_ro08(bios, outp + 0x04);
+ break;
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *cnt = 0;
+ *len = 0;
+ break;
+ default:
+ break;
+ }
+ return outp;
+ }
+ *ver = 0x00;
+ return 0x0000;
+}
+
u16
-dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 hdr, cnt;
- u16 table = dp_table(bios, ver, &hdr, &cnt, len);
- if (table && idx < cnt)
- return nv_ro16(bios, table + hdr + (idx * *len));
- return 0xffff;
+ u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *ver) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro16(bios, data + 0x02);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ info->flags = nv_ro08(bios, data + 0x05);
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->lnkcmp = nv_ro16(bios, data + 0x0a);
+ info->script[2] = nv_ro16(bios, data + 0x0c);
+ info->script[3] = nv_ro16(bios, data + 0x0e);
+ info->script[4] = nv_ro16(bios, data + 0x10);
+ break;
+ case 0x40:
+ info->flags = nv_ro08(bios, data + 0x04);
+ info->script[0] = nv_ro16(bios, data + 0x05);
+ info->script[1] = nv_ro16(bios, data + 0x07);
+ info->lnkcmp = nv_ro16(bios, data + 0x09);
+ info->script[2] = nv_ro16(bios, data + 0x0b);
+ info->script[3] = nv_ro16(bios, data + 0x0d);
+ info->script[4] = nv_ro16(bios, data + 0x0f);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
}
u16
-dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
- u8 *ver, u8 *len)
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 idx = 0;
- u16 data;
- while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
- if (data) {
- u32 hash = nv_ro32(bios, data);
- if (dcb_hash_match(outp, hash))
- return data;
+ u16 data, idx = 0;
+ while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
}
}
+ return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (*ver >= 0x40) {
+ outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ *hdr = *hdr + (*len * * cnt);
+ *len = nv_ro08(bios, outp + 0x06);
+ *cnt = nv_ro08(bios, outp + 0x07);
+ }
+
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+
return 0x0000;
}
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ switch (*ver) {
+ case 0x21:
+ info->drv = nv_ro08(bios, data + 0x02);
+ info->pre = nv_ro08(bios, data + 0x03);
+ info->unk = nv_ro08(bios, data + 0x04);
+ break;
+ case 0x30:
+ case 0x40:
+ info->drv = nv_ro08(bios, data + 0x01);
+ info->pre = nv_ro08(bios, data + 0x02);
+ info->unk = nv_ro08(bios, data + 0x03);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u8 idx = 0xff;
+ u16 data;
+
+ if (*ver >= 0x30) {
+ const u8 vsoff[] = { 0, 4, 7, 9 };
+ idx = (un * 10) + vsoff[vs] + pe;
+ } else {
+ while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+ ver, hdr, cnt, len))) {
+ if (nv_ro08(bios, data + 0x00) == vs &&
+ nv_ro08(bios, data + 0x01) == pe)
+ break;
+ idx++;
+ }
+ }
+
+ return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index 4c9f1e50816..c84e93fa6d9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -27,84 +27,105 @@
#include <subdev/bios/gpio.h>
u16
-dcb_gpio_table(struct nouveau_bios *bios)
+dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- u8 ver, hdr, cnt, len;
- u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+ u16 data = 0x0000;
+ u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
- if (ver >= 0x30 && hdr >= 0x0c)
- return nv_ro16(bios, dcb + 0x0a);
- if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
- return nv_ro16(bios, dcb - 0x0f);
+ if (*ver >= 0x30 && *hdr >= 0x0c)
+ data = nv_ro16(bios, dcb + 0x0a);
+ else
+ if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
+ data = nv_ro16(bios, dcb - 0x0f);
+
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ if (*ver < 0x30) {
+ *hdr = 3;
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x01);
+ } else
+ if (*ver <= 0x41) {
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ } else {
+ data = 0x0000;
+ }
+ }
}
- return 0x0000;
+ return data;
}
u16
-dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
+dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
- u16 gpio = dcb_gpio_table(bios);
- if (gpio) {
- *ver = nv_ro08(bios, gpio);
- if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
- return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
- else if (ent < nv_ro08(bios, gpio + 2))
- return gpio + nv_ro08(bios, gpio + 1) +
- (ent * nv_ro08(bios, gpio + 3));
- }
+ u8 hdr, cnt;
+ u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+ if (gpio && ent < cnt)
+ return gpio + hdr + (ent * *len);
return 0x0000;
}
-int
-dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+u16
+dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *gpio)
{
- u8 ver, hdr, cnt, len;
- u16 entry;
- int i = -1;
-
- while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
- if (ver < 0x40) {
- u16 data = nv_ro16(bios, entry);
+ u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
+ if (data) {
+ if (*ver < 0x40) {
+ u16 info = nv_ro16(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x001f) >> 0,
- .func = (data & 0x07e0) >> 5,
- .log[0] = (data & 0x1800) >> 11,
- .log[1] = (data & 0x6000) >> 13,
- .param = !!(data & 0x8000),
+ .line = (info & 0x001f) >> 0,
+ .func = (info & 0x07e0) >> 5,
+ .log[0] = (info & 0x1800) >> 11,
+ .log[1] = (info & 0x6000) >> 13,
+ .param = !!(info & 0x8000),
};
} else
- if (ver < 0x41) {
- u32 data = nv_ro32(bios, entry);
+ if (*ver < 0x41) {
+ u32 info = nv_ro32(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000001f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data & 0x18000000) >> 27,
- .log[1] = (data & 0x60000000) >> 29,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000001f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info & 0x18000000) >> 27,
+ .log[1] = (info & 0x60000000) >> 29,
+ .param = !!(info & 0x80000000),
};
} else {
- u32 data = nv_ro32(bios, entry + 0);
- u8 data1 = nv_ro32(bios, entry + 4);
+ u32 info = nv_ro32(bios, data + 0);
+ u8 info1 = nv_ro32(bios, data + 4);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000003f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data1 & 0x30) >> 4,
- .log[1] = (data1 & 0xc0) >> 6,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000003f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info1 & 0x30) >> 4,
+ .log[1] = (info1 & 0xc0) >> 6,
+ .param = !!(info & 0x80000000),
};
}
+ }
+
+ return data;
+}
+u16
+dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
+{
+ u8 hdr, cnt, i = 0;
+ u16 data;
+
+ while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
if ((line == 0xff || line == gpio->line) &&
(func == 0xff || func == gpio->func))
- return 0;
+ return data;
}
/* DCB 2.2, fixed TVDAC GPIO data */
- if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
- if (func == DCB_GPIO_TVDAC0) {
- u8 conf = nv_ro08(bios, entry - 5);
- u8 addr = nv_ro08(bios, entry - 4);
+ if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
+ if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
+ u8 conf = nv_ro08(bios, data - 5);
+ u8 addr = nv_ro08(bios, data - 4);
if (conf & 0x01) {
*gpio = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
@@ -112,10 +133,11 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
.log[0] = !!(conf & 0x02),
.log[1] = !(conf & 0x02),
};
- return 0;
+ *ver = 0x00;
+ return data;
}
}
}
- return -EINVAL;
+ return 0x0000;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 6be8c32f6e4..2917d552689 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2,11 +2,12 @@
#include <core/device.h>
#include <subdev/bios.h>
-#include <subdev/bios/conn.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
+#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/clock.h>
@@ -410,9 +411,25 @@ init_ram_restrict_group_count(struct nvbios_init *init)
}
static u8
+init_ram_restrict_strap(struct nvbios_init *init)
+{
+ /* This appears to be the behaviour of the VBIOS parser, and *is*
+ * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
+ * avoid fucking up the memory controller (somehow) by reading it
+ * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
+ *
+ * Preserving the non-caching behaviour on earlier chipsets just
+ * in case *not* re-reading the strap causes similar breakage.
+ */
+ if (!init->ramcfg || init->bios->version.major < 0x70)
+ init->ramcfg = init_rd32(init, 0x101000);
+ return (init->ramcfg & 0x00000003c) >> 2;
+}
+
+static u8
init_ram_restrict(struct nvbios_init *init)
{
- u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
+ u8 strap = init_ram_restrict_strap(init);
u16 table = init_ram_restrict_table(init);
if (table)
return nv_ro08(init->bios, table + strap);
@@ -743,9 +760,10 @@ static void
init_dp_condition(struct nvbios_init *init)
{
struct nouveau_bios *bios = init->bios;
+ struct nvbios_dpout info;
u8 cond = nv_ro08(bios, init->offset + 1);
u8 unkn = nv_ro08(bios, init->offset + 2);
- u8 ver, len;
+ u8 ver, hdr, cnt, len;
u16 data;
trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +777,12 @@ init_dp_condition(struct nvbios_init *init)
case 1:
case 2:
if ( init->outp &&
- (data = dp_outp_match(bios, init->outp, &ver, &len))) {
- if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
- init_exec_set(init, false);
- if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+ (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+ (init->outp->or << 0) |
+ (init->outp->sorconf.link << 6),
+ &ver, &hdr, &cnt, &len, &info)))
+ {
+ if (!(info.flags & cond))
init_exec_set(init, false);
break;
}
@@ -1778,7 +1798,7 @@ init_gpio(struct nvbios_init *init)
init->offset += 1;
if (init_exec(init) && gpio && gpio->reset)
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
/**
@@ -1992,6 +2012,47 @@ init_i2c_long_if(struct nvbios_init *init)
init_exec_set(init, false);
}
+/**
+ * INIT_GPIO_NE - opcode 0xa9
+ *
+ */
+static void
+init_gpio_ne(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ struct nouveau_gpio *gpio = nouveau_gpio(bios);
+ struct dcb_gpio_func func;
+ u8 count = nv_ro08(bios, init->offset + 1);
+ u8 idx = 0, ver, len;
+ u16 data, i;
+
+ trace("GPIO_NE\t");
+ init->offset += 2;
+
+ for (i = init->offset; i < init->offset + count; i++)
+ cont("0x%02x ", nv_ro08(bios, i));
+ cont("\n");
+
+ while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
+ if (func.func != DCB_GPIO_UNUSED) {
+ for (i = init->offset; i < init->offset + count; i++) {
+ if (func.func == nv_ro08(bios, i))
+ break;
+ }
+
+ trace("\tFUNC[0x%02x]", func.func);
+ if (i == (init->offset + count)) {
+ cont(" *");
+ if (init_exec(init) && gpio && gpio->reset)
+ gpio->reset(gpio, func.func);
+ }
+ cont("\n");
+ }
+ }
+
+ init->offset += count;
+}
+
static struct nvbios_init_opcode {
void (*exec)(struct nvbios_init *);
} init_opcode[] = {
@@ -2056,6 +2117,7 @@ static struct nvbios_init_opcode {
[0x98] = { init_auxch },
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
+ [0xa9] = { init_gpio_ne },
};
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index ca9a4648bd8..f8a7ed4166c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -25,7 +25,6 @@
#include <core/object.h>
#include <core/device.h>
#include <core/client.h>
-#include <core/device.h>
#include <core/option.h>
#include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
static const u64 disable_map[] = {
[NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
+ [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
[NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
[NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
[NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
[NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
[NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
[NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
- [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
[NVDEV_SUBDEV_NR] = 0,
};
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
/* determine frequency of timing crystal */
if ( device->chipset < 0x17 ||
- (device->chipset >= 0x20 && device->chipset <= 0x25))
+ (device->chipset >= 0x20 && device->chipset < 0x25))
strap &= 0x00000040;
else
strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
}
static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
{
return nv_rd08(object->engine, addr);
}
static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
{
return nv_rd16(object->engine, addr);
}
static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object->engine, addr);
}
static void
-nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
nv_wr08(object->engine, addr, data);
}
static void
-nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
nv_wr16(object->engine, addr, data);
}
static void
-nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
nv_wr32(object->engine, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index f09accfd0e3..9c40b0fb23f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 5fa58b7369b..74f88f48e1c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 7f4b8fe6ccc..0ac1b2c4f61 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 42deadca0f0..41d59689a02 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index fec3bcc9a6f..6ccfd8585ba 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x86:
device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x92:
device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x94:
device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x96:
device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x98:
device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa0:
device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
break;
case 0xaa:
device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xac:
device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa3:
device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa5:
device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa8:
device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xaf:
device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 6697f0f9c29..f0461685a42 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc4:
device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc3:
device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xce:
device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xcf:
device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc1:
device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc8:
device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xd9:
device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 4a280b7ab85..03a652876e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -45,6 +45,9 @@
#include <engine/graph.h>
#include <engine/disp.h>
#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
int
nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
case 0xe7:
device->cname = "GK107";
@@ -92,13 +98,44 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
+ case 0xe6:
+ device->cname = "GK106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 61becfa732e..ae7249b0979 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -22,6 +22,10 @@
* Authors: Ben Skeggs
*/
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/vga.h>
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
static int
nv50_devinit_init(struct nouveau_object *object)
{
+ struct nouveau_bios *bios = nouveau_bios(object);
struct nv50_devinit_priv *priv = (void *)object;
+ struct nvbios_outp info;
+ struct dcb_output outp;
+ u8 ver = 0xff, hdr, cnt, len;
+ int ret, i = 0;
if (!priv->base.post) {
if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
}
}
- return nouveau_devinit_init(&priv->base);
+ ret = nouveau_devinit_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* if we ran the init tables, execute first script pointer for each
+ * display table output entry that has a matching dcb entry.
+ */
+ while (priv->base.post && ver) {
+ u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
+ if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[0],
+ .outp = &outp,
+ .crtc = -1,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+ };
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f0086de8af3..d6d16007ec1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
}
int
-nouveau_fb_init(struct nouveau_fb *pfb)
+nouveau_fb_preinit(struct nouveau_fb *pfb)
{
- int ret, i;
+ static const char *name[] = {
+ [NV_MEM_TYPE_UNKNOWN] = "unknown",
+ [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+ [NV_MEM_TYPE_SGRAM ] = "SGRAM",
+ [NV_MEM_TYPE_SDRAM ] = "SDRAM",
+ [NV_MEM_TYPE_DDR1 ] = "DDR1",
+ [NV_MEM_TYPE_DDR2 ] = "DDR2",
+ [NV_MEM_TYPE_DDR3 ] = "DDR3",
+ [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
+ [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
+ [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
+ [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
+ };
+ int ret, tags;
- ret = nouveau_subdev_init(&pfb->base);
- if (ret)
- return ret;
+ tags = pfb->ram.init(pfb);
+ if (tags < 0 || !pfb->ram.size) {
+ nv_fatal(pfb, "error detecting memory configuration!!\n");
+ return (tags < 0) ? tags : -ERANGE;
+ }
- for (i = 0; i < pfb->tile.regions; i++)
- pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+ if (!nouveau_mm_initialised(&pfb->vram)) {
+ ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+ if (ret)
+ return ret;
+ }
- return 0;
-}
+ if (!nouveau_mm_initialised(&pfb->tags) && tags) {
+ ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+ if (ret)
+ return ret;
+ }
-int
-_nouveau_fb_init(struct nouveau_object *object)
-{
- struct nouveau_fb *pfb = (void *)object;
- return nouveau_fb_init(pfb);
+ nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+ nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+ nv_info(pfb, " ZCOMP: %d tags\n", tags);
+ return 0;
}
void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-
- if (pfb->tags.block_size)
- nouveau_mm_fini(&pfb->tags);
-
- if (pfb->vram.block_size)
- nouveau_mm_fini(&pfb->vram);
+ nouveau_mm_fini(&pfb->tags);
+ nouveau_mm_fini(&pfb->vram);
nouveau_subdev_destroy(&pfb->base);
}
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
struct nouveau_fb *pfb = (void *)object;
nouveau_fb_destroy(pfb);
}
-
int
-nouveau_fb_created(struct nouveau_fb *pfb)
+nouveau_fb_init(struct nouveau_fb *pfb)
{
- static const char *name[] = {
- [NV_MEM_TYPE_UNKNOWN] = "unknown",
- [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
- [NV_MEM_TYPE_SGRAM ] = "SGRAM",
- [NV_MEM_TYPE_SDRAM ] = "SDRAM",
- [NV_MEM_TYPE_DDR1 ] = "DDR1",
- [NV_MEM_TYPE_DDR2 ] = "DDR2",
- [NV_MEM_TYPE_DDR3 ] = "DDR3",
- [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
- [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
- [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
- [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
- };
+ int ret, i;
- if (pfb->ram.size == 0) {
- nv_fatal(pfb, "no vram detected!!\n");
- return -ERANGE;
- }
+ ret = nouveau_subdev_init(&pfb->base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pfb->tile.regions; i++)
+ pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
- nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
- nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
return 0;
}
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = (void *)object;
+ return nouveau_fb_init(pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index eb06836b69f..6e369f85361 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+ if (boot0 & 0x00000100) {
+ pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
+ pfb->ram.size *= 1024 * 1024;
+ } else {
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+ pfb->ram.size = 32 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+ pfb->ram.size = 16 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+ pfb->ram.size = 8 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+ pfb->ram.size = 4 * 1024 * 1024;
+ break;
+ }
+ }
+
+ if ((boot0 & 0x00000038) <= 0x10)
+ pfb->ram.type = NV_MEM_TYPE_SGRAM;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+ return 0;
+}
+
+static int
nv04_fb_init(struct nouveau_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv04_fb_priv *priv;
- u32 boot0;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
- if (boot0 & 0x00000100) {
- priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
- priv->base.ram.size *= 1024 * 1024;
- } else {
- switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
- case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- priv->base.ram.size = 32 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- priv->base.ram.size = 16 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- priv->base.ram.size = 8 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- priv->base.ram.size = 4 * 1024 * 1024;
- break;
- }
- }
-
- if ((boot0 & 0x00000038) <= 0x10)
- priv->base.ram.type = NV_MEM_TYPE_SGRAM;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- return nouveau_fb_created(&priv->base);
+ priv->base.ram.init = nv04_fb_vram_init;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index f037a422d2f..edbbe26e858 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
struct nouveau_fb base;
};
-static void
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 cfg0 = nv_rd32(pfb, 0x100200);
+ if (cfg0 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+void
nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
tile->pitch = pitch;
}
-static void
+void
nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
}
static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv10_fb_priv *priv;
int ret;
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- if (device->chipset == 0x1a || device->chipset == 0x1f) {
- struct pci_dev *bridge;
- u32 mem, mib;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
- if (!bridge) {
- nv_fatal(device, "no bridge device\n");
- return 0;
- }
-
- if (device->chipset == 0x1a) {
- pci_read_config_dword(bridge, 0x7c, &mem);
- mib = ((mem >> 6) & 31) + 1;
- } else {
- pci_read_config_dword(bridge, 0x84, &mem);
- mib = ((mem >> 4) & 127) + 1;
- }
-
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- priv->base.ram.size = mib * 1024 * 1024;
- } else {
- u32 cfg0 = nv_rd32(priv, 0x100200);
- if (cfg0 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
- }
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv10_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv10_fb_tile_init;
priv->base.tile.fini = nv10_fb_tile_fini;
priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 00000000000..48366841db4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct pci_dev *bridge;
+ u32 mem, mib;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ nv_fatal(pfb, "no bridge device\n");
+ return -ENODEV;
+ }
+
+ if (nv_device(pfb)->chipset == 0x1a) {
+ pci_read_config_dword(bridge, 0x7c, &mem);
+ mib = ((mem >> 6) & 31) + 1;
+ } else {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ mib = ((mem >> 4) & 127) + 1;
+ }
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.size = mib * 1024 * 1024;
+ return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv1a_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv1a_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv10_fb_tile_init;
+ priv->base.tile.fini = nv10_fb_tile_fini;
+ priv->base.tile.prog = nv10_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x1a),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv1a_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index 4b3578fcb7f..5d14612a2c8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
struct nouveau_fb base;
};
-static void
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+ }
+ pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- struct nouveau_device *device = nv_device(pfb);
- int bpp = (flags & 2) ? 32 : 16;
-
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
-
- /* Allocate some of the on-die tag memory, used to store Z
- * compression meta-data (most likely just a bitmap determining
- * if a given tile is compressed or not).
- */
- size /= 256;
if (flags & 4) {
- if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
- /* Enable Z compression */
- tile->zcomp = tile->tag->offset;
- if (device->chipset >= 0x25) {
- if (bpp == 16)
- tile->zcomp |= 0x00100000;
- else
- tile->zcomp |= 0x00200000;
- } else {
- tile->zcomp |= 0x80000000;
- if (bpp != 16)
- tile->zcomp |= 0x04000000;
- }
- }
-
+ pfb->tile.comp(pfb, i, size, flags, tile);
tile->addr |= 2;
}
}
static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+ else tile->zcomp = 0x04000000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+ tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x08000000;
+#endif
+ }
+}
+
+void
nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nouveau_mm_free(&pfb->tags, &tile->tag);
}
-static void
+void
nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
}
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv20_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
- if (device->chipset >= 0x25)
- ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
- else
- ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
- if (ret)
- return ret;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv20_fb_tile_comp;
priv->base.tile.fini = nv20_fb_tile_fini;
priv->base.tile.prog = nv20_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 00000000000..0042ace6bef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+ else tile->zcomp = 0x00200000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x01000000;
+#endif
+ }
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv25_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv25_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x25),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv25_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index cba67bc9139..a7ba0d048ae 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -34,17 +34,36 @@ void
nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- tile->addr = addr | 1;
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) {
+ tile->addr = (0 << 4);
+ } else {
+ if (pfb->tile.comp) /* z compression */
+ pfb->tile.comp(pfb, i, size, flags, tile);
+ tile->addr = (1 << 4);
+ }
+
+ tile->addr |= 0x00000001; /* enable */
+ tile->addr |= addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
-void
-nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- tile->addr = 0;
- tile->limit = 0;
- tile->pitch = 0;
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+ else tile->zcomp |= 0x02000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x10000000;
+#endif
+ }
}
static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
return x;
}
-static int
+int
nv30_fb_init(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv30_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv30_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 00000000000..092f6f4f352
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+ else tile->zcomp |= 0x08000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv35_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv35_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x35),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv35_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 00000000000..797ab3b821b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+ else tile->zcomp |= 0x20000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x80000000;
+#endif
+ }
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv36_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv36_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x36),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv36_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 347a496fcad..65e131b90f3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
struct nouveau_fb base;
};
-static inline int
-nv44_graph_class(struct nouveau_device *device)
-{
- if ((device->chipset & 0xf0) == 0x60)
- return 1;
-
- return !(0x0baf & (1 << (device->chipset & 0x0f)));
-}
-
-static void
-nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
{
- nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-}
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ }
-static void
-nv40_fb_init_gart(struct nv40_fb_priv *priv)
-{
- nv_wr32(priv, 0x100800, 0x00000001);
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
}
-static void
-nv44_fb_init_gart(struct nv40_fb_priv *priv)
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- nv_wr32(priv, 0x100850, 0x80000000);
- nv_wr32(priv, 0x100800, 0x00000001);
+ u32 tiles = DIV_ROUND_UP(size, 0x80);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x100);
+ if ( (flags & 2) &&
+ !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
+ tile->zcomp |= ((tile->tag->offset ) >> 8);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
}
static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
if (ret)
return ret;
- switch (nv_device(priv)->chipset) {
- case 0x40:
- case 0x45:
- nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
- break;
- default:
- if (nv44_graph_class(nv_device(priv)))
- nv44_fb_init_gart(priv);
- else
- nv40_fb_init_gart(priv);
- break;
- }
-
+ nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
return 0;
}
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv40_fb_priv *priv;
int ret;
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- /* 0x001218 is actually present on a few other NV4X I looked at,
- * and even contains sane values matching 0x100474. From looking
- * at various vbios images however, this isn't the case everywhere.
- * So, I chose to use the same regs I've seen NVIDIA reading around
- * the memory detection, hopefully that'll get us the right numbers
- */
- if (device->chipset == 0x40) {
- u32 pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- }
- } else
- if (device->chipset == 0x49 || device->chipset == 0x4b) {
- u32 pfb914 = nv_rd32(priv, 0x100914);
- switch (pfb914 & 0x00000003) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000003: break;
- }
- } else
- if (device->chipset != 0x4e) {
- u32 pfb474 = nv_rd32(priv, 0x100474);
- if (pfb474 & 0x00000004)
- priv->base.ram.type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- } else {
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- switch (device->chipset) {
- case 0x40:
- case 0x45:
- priv->base.tile.regions = 8;
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- case 0x4c:
- priv->base.tile.regions = 15;
- break;
- default:
- priv->base.tile.regions = 12;
- break;
- }
+ priv->base.ram.init = nv40_fb_vram_init;
+ priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- if (device->chipset == 0x40)
- priv->base.tile.prog = nv10_fb_tile_prog;
- else
- priv->base.tile.prog = nv40_fb_tile_prog;
-
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 00000000000..e9e5a08c41a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+ nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+ struct nv41_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv41_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x41),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv41_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 00000000000..ae89b5006f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ tile->addr = 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+ struct nv44_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100850, 0x80000000);
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv44_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv44_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x44),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv44_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 00000000000..589b93ea299
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+ struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) tile->addr = (0 << 3);
+ else tile->addr = (1 << 3);
+
+ tile->addr |= 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv46_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x46),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv46_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 00000000000..818bba35b36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv47_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x47),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv47_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 00000000000..84a31af16ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+ switch (pfb914 & 0x00000003) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000003: break;
+ }
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv49_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv49_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x49),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv49_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 00000000000..797fd558170
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv4e_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv4e_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x4e),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 5f570806143..487cb8c6c20 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
return types[(memtype & 0xff00) >> 8] != 0;
}
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(pfb, 0x100200);
+ r4 = nv_rd32(pfb, 0x100204);
+ rt = nv_rd32(pfb, 0x100250);
+ ru = nv_rd32(pfb, 0x001540);
+ nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != pfb->ram.size) {
+ nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+ (u32)(pfb->ram.size >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nouveau_bios *bios = nouveau_bios(device);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 size;
+ int ret;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c);
+ pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+ ((pfb->ram.size & 0x000000ff) << 32);
+
+ size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ switch (device->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf: /* IGPs, no reordering, no real VRAM */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+ if (ret)
+ return ret;
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ break;
+ default:
+ switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+ case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 1:
+ if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+ pfb->ram.type = NV_MEM_TYPE_DDR3;
+ else
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ break;
+ case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+ case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+ default:
+ break;
+ }
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+ nv50_fb_vram_rblock(pfb) >> 12);
+ if (ret)
+ return ret;
+
+ pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+ break;
+ }
+
+ return nv_rd32(pfb, 0x100320);
+}
+
static int
nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
kfree(mem);
}
-static u32
-nv50_vram_rblock(struct nv50_fb_priv *priv)
-{
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru, rblock_size;
-
- r0 = nv_rd32(priv, 0x100200);
- r4 = nv_rd32(priv, 0x100204);
- rt = nv_rd32(priv, 0x100250);
- ru = nv_rd32(priv, 0x001540);
- nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != priv->base.ram.size) {
- nv_warn(priv, "memory controller reports %d MiB VRAM\n",
- (u32)(priv->base.ram.size >> 20));
- }
-
- rblock_size = rowsize;
- if (rt & 1)
- rblock_size *= 3;
-
- nv_debug(priv, "rblock %d bytes\n", rblock_size);
- return rblock_size;
-}
-
-static int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_bios *bios = nouveau_bios(device);
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- struct nv50_fb_priv *priv;
- u32 tags;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- switch (nv_rd32(priv, 0x100714) & 0x00000007) {
- case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 1:
- if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
- priv->base.ram.type = NV_MEM_TYPE_DDR3;
- else
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- break;
- case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
- case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
- default:
- break;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c);
- priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
- ((priv->base.ram.size & 0x000000ff) << 32);
-
- tags = nv_rd32(priv, 0x100320);
- ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
- if (ret)
- return ret;
-
- nv_debug(priv, "%d compression tags\n", tags);
-
- size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- switch (device->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf: /* IGPs, no reordering, no real VRAM */
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
- if (ret)
- return ret;
-
- priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- break;
- default:
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
- nv50_vram_rblock(priv) >> 12);
- if (ret)
- return ret;
-
- priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
- break;
- }
-
- priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (priv->r100c08_page) {
- priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, priv->r100c08))
- nv_warn(priv, "failed 0x100c08 page map\n");
- } else {
- nv_warn(priv, "failed 0x100c08 page alloc\n");
- }
-
- priv->base.memtype_valid = nv50_fb_memtype_valid;
- priv->base.ram.get = nv50_fb_vram_new;
- priv->base.ram.put = nv50_fb_vram_del;
- return nouveau_fb_created(&priv->base);
-}
-
-static void
-nv50_fb_dtor(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
-
- if (priv->r100c08_page) {
- pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- }
-
- nouveau_fb_destroy(&priv->base);
-}
-
-static int
-nv50_fb_init(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_fb_init(&priv->base);
- if (ret)
- return ret;
-
- /* Not a clue what this is exactly. Without pointing it at a
- * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
- * cause IOMMU "read from address 0" errors (rh#561267)
- */
- nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
-
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
- switch (device->chipset) {
- case 0x50:
- nv_wr32(priv, 0x100c90, 0x000707ff);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nv_wr32(priv, 0x100c90, 0x000d0fff);
- break;
- case 0xaf:
- nv_wr32(priv, 0x100c90, 0x089d1fff);
- break;
- default:
- nv_wr32(priv, 0x100c90, 0x001d07ff);
- break;
- }
-
- return 0;
-}
-
-struct nouveau_oclass
-nv50_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
- },
-};
-
static const struct nouveau_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX", NULL },
{ 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
{}
};
-void
-nv50_fb_trap(struct nouveau_fb *pfb, int display)
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
{
- struct nouveau_device *device = nv_device(pfb);
- struct nv50_fb_priv *priv = (void *)pfb;
+ struct nouveau_device *device = nv_device(subdev);
+ struct nv50_fb_priv *priv = (void *)subdev;
const struct nouveau_enum *en, *cl;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
}
nv_wr32(priv, 0x100c90, idx | 0x80000000);
- if (!display)
- return;
-
/* decode status bits into something more useful */
if (device->chipset < 0xa3 ||
device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
else
printk("0x%08x\n", st1);
}
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (priv->r100c08_page) {
+ priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+ nv_warn(priv, "failed 0x100c08 page map\n");
+ } else {
+ nv_warn(priv, "failed 0x100c08 page alloc\n");
+ }
+
+ priv->base.memtype_valid = nv50_fb_memtype_valid;
+ priv->base.ram.init = nv50_fb_vram_init;
+ priv->base.ram.get = nv50_fb_vram_new;
+ priv->base.ram.put = nv50_fb_vram_del;
+ nv_subdev(priv)->intr = nv50_fb_intr;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ switch (device->chipset) {
+ case 0x50:
+ nv_wr32(priv, 0x100c90, 0x000707ff);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ nv_wr32(priv, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(priv, 0x100c90, 0x089d1fff);
+ break;
+ default:
+ nv_wr32(priv, 0x100c90, 0x001d07ff);
+ break;
+ }
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 9f59f2bf007..306bdf12145 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 parts = nv_rd32(pfb, 0x022438);
+ u32 pmask = nv_rd32(pfb, 0x022554);
+ u32 bsize = nv_rd32(pfb, 0x10f20c);
+ u32 offset, length;
+ bool uniform = true;
+ int ret, part;
+
+ nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+ nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+ pfb->ram.type = nouveau_fb_bios_memtype(bios);
+ pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+ /* read amount of vram attached to each memory controller */
+ for (part = 0; part < parts; part++) {
+ if (!(pmask & (1 << part))) {
+ u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
+ }
+
+ nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+ pfb->ram.size += (u64)psize << 20;
+ }
+ }
+
+ /* if all controllers have the same amount attached, there's no holes */
+ if (uniform) {
+ offset = rsvd_head;
+ length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ return nouveau_mm_init(&pfb->vram, offset, length, 1);
+ }
+
+ /* otherwise, address lowest common amount from 0GiB */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+ if (ret)
+ return ret;
+
+ /* and the rest starting from (8GiB + common_size) */
+ offset = (0x0200000000ULL >> 12) + (bsize << 8);
+ length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+ if (ret) {
+ nouveau_mm_fini(&pfb->vram);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
{
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
}
static int
-nvc0_vram_detect(struct nvc0_fb_priv *priv)
-{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nouveau_fb *pfb = &priv->base;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(priv, 0x022438);
- u32 pmask = nv_rd32(priv, 0x022554);
- u32 bsize = nv_rd32(priv, 0x10f20c);
- u32 offset, length;
- bool uniform = true;
- int ret, part;
-
- nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
- nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
- priv->base.ram.type = nouveau_fb_bios_memtype(bios);
- priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
-
- /* read amount of vram attached to each memory controller */
- for (part = 0; part < parts; part++) {
- if (!(pmask & (1 << part))) {
- u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
- }
-
- nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
- priv->base.ram.size += (u64)psize << 20;
- }
- }
-
- /* if all controllers have the same amount attached, there's no holes */
- if (uniform) {
- offset = rsvd_head;
- length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- return nouveau_mm_init(&pfb->vram, offset, length, 1);
- }
-
- /* otherwise, address lowest common amount from 0GiB */
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
- if (ret)
- return ret;
-
- /* and the rest starting from (8GiB + common_size) */
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
-
- ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
- if (ret) {
- nouveau_mm_fini(&pfb->vram);
- return ret;
- }
-
- return 0;
-}
-
-static int
nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.memtype_valid = nvc0_fb_memtype_valid;
+ priv->base.ram.init = nvc0_fb_vram_init;
priv->base.ram.get = nvc0_fb_vram_new;
priv->base.ram.put = nv50_fb_vram_del;
- ret = nvc0_vram_detect(priv);
- if (ret)
- return ret;
-
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c10_page)
return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (pci_dma_mapping_error(device->pdev, priv->r100c10))
return -EFAULT;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index acf818c58bf..9fb0f9b92d4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -43,10 +43,15 @@ static int
nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
struct dcb_gpio_func *func)
{
+ struct nouveau_bios *bios = nouveau_bios(gpio);
+ u8 ver, len;
+ u16 data;
+
if (line == 0xff && tag == 0xff)
return -EINVAL;
- if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
+ data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
+ if (data)
return 0;
/* Apple iMac G4 NV18 */
@@ -265,7 +270,7 @@ nouveau_gpio_init(struct nouveau_gpio *gpio)
int ret = nouveau_subdev_init(&gpio->base);
if (ret == 0 && gpio->reset) {
if (dmi_check_system(gpio_reset_ids))
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index f3502c961cd..bf13a1200f2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -29,15 +29,15 @@ struct nv50_gpio_priv {
};
static void
-nv50_gpio_reset(struct nouveau_gpio *gpio)
+nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nv50_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
static const u32 regs[] = { 0xe100, 0xe28c };
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000001f);
@@ -48,7 +48,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio)
u32 val = (unk1 << 16) | unk0;
u32 reg = regs[line >> 4]; line &= 0x0f;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 8d18fcad26e..83e8b8f16e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -29,15 +29,15 @@ struct nvd0_gpio_priv {
};
static void
-nvd0_gpio_reset(struct nouveau_gpio *gpio)
+nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nvd0_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000003f);
u8 defs = !!(data & 0x00000080);
@@ -45,7 +45,8 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio)
u8 unk0 = (data & 0x00ff0000) >> 16;
u8 unk1 = (data & 0x1f000000) >> 24;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index fe1ebf199ba..dc27e794a85 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
udelay(1);
if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x", ctrl);
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
return -EBUSY;
}
} while (ctrl & 0x03010000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index ba4d28b5036..f5bbd383411 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instobj_priv *node = (void *)object;
return nv_ro32(object->engine, node->mem->offset + addr);
}
static void
-nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instobj_priv *node = (void *)object;
nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
}
static u32
-nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object, 0x700000 + addr);
}
static void
-nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
return nv_wr32(object, 0x700000 + addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 73c52ebd593..da64253201e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static u32
-nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instmem_priv *priv = (void *)object;
return ioread32_native(priv->iomem + addr);
}
static void
-nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instmem_priv *priv = (void *)object;
iowrite32_native(data, priv->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 27ef0891d10..cfc7e31461d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
}
static void
-nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index de5721cfc4c..8379aafa6e1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
struct nouveau_mc *pmc = nouveau_mc(subdev);
const struct nouveau_mc_intr *map = pmc->intr_map;
struct nouveau_subdev *unit;
- u32 stat;
+ u32 stat, intr;
- stat = nv_rd32(pmc, 0x000100);
+ intr = stat = nv_rd32(pmc, 0x000100);
while (stat && map->stat) {
if (stat & map->stat) {
unit = nouveau_subdev(subdev, map->unit);
if (unit && unit->intr)
unit->intr(unit);
- stat &= ~map->stat;
+ intr &= ~map->stat;
}
map++;
}
- if (stat) {
+ if (intr) {
nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index cedf33b0297..8d759f83032 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0000d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index a001e4c4d38..ceb5c83f945 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0040d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c2b81e30a17..92796682722 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 93e3ddf7303..e286e132c7e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -260,7 +260,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nv_ro08(bios, data))) {
- nv_info(mxm, "no VBIOS data, nothing to do\n");
+ nv_debug(mxm, "no VBIOS data, nothing to do\n");
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cbf1fc60a38..41241922263 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -246,14 +246,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, -ENODEV);
client = nv_client(abi16->client);
-
- if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
- return nouveau_abi16_put(abi16, -EINVAL);
-
device = nv_device(abi16->device);
imem = nouveau_instmem(device);
pfb = nouveau_fb(device);
+ /* hack to allow channel engine type specification on kepler */
+ if (device->card_type >= NV_E0) {
+ if (init->fb_ctxdma_handle != ~0)
+ init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+ else
+ init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+ /* allow flips to be executed if this is a graphics channel */
+ init->tt_ctxdma_handle = 0;
+ if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+ init->tt_ctxdma_handle = 1;
+ }
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return nouveau_abi16_put(abi16, -EINVAL);
+
/* allocate "abi16 channel" data and make up a handle for it */
init->channel = ffsll(~abi16->handles);
if (!init->channel--)
@@ -268,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
abi16->handles |= (1 << init->channel);
/* create channel object and initialise dma and fence management */
- if (device->card_type >= NV_E0) {
- init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
- init->tt_ctxdma_handle = 0;
- }
-
ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
init->channel, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
@@ -382,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret;
if (unlikely(!abi16))
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48783e14114..d97f20069d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
acpi_handle rom_handle;
} nouveau_dsm_priv;
+bool nouveau_is_optimus(void) {
+ return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+ return nouveau_dsm_priv.dsm_detected;
+}
+
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
- /* perhaps the _DSM functions are mutually exclusive, but prepare for
- * the future */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
/* Optimus laptops have the card already disabled in
* nouveau_switcheroo_set_state */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
has_optimus = 1;
}
- if (vga_count == 2 && has_dsm && guid_valid) {
+ /* find the optimus DSM or the old v1 DSM */
+ if (has_optimus == 1) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.dsm_detected = true;
+ nouveau_dsm_priv.optimus_detected = true;
ret = true;
- }
-
- if (has_optimus == 1) {
+ } else if (vga_count == 2 && has_dsm && guid_valid) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.optimus_detected = true;
+ nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 08af67722b5..d0da230d770 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -4,6 +4,8 @@
#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 09fdef23588..865eddfa30a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
return 0;
}
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
-int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
- struct dcb_output *dcbent, int crtc)
-{
- /*
- * The display script table is located by the BIT 'U' table.
- *
- * It contains an array of pointers to various tables describing
- * a particular output type. The first 32-bits of the output
- * tables contains similar information to a DCB entry, and is
- * used to decide whether that particular table is suitable for
- * the output you want to access.
- *
- * The "record header length" field here seems to indicate the
- * offset of the first configuration entry in the output tables.
- * This is 10 on most cards I've seen, but 12 has been witnessed
- * on DP cards, and there's another script pointer within the
- * header.
- *
- * offset + 0 ( 8 bits): version
- * offset + 1 ( 8 bits): header length
- * offset + 2 ( 8 bits): record length
- * offset + 3 ( 8 bits): number of records
- * offset + 4 ( 8 bits): record header length
- * offset + 5 (16 bits): pointer to first output script table
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- uint8_t *table = &bios->data[bios->display.script_table_ptr];
- uint8_t *otable = NULL;
- uint16_t script;
- int i;
-
- if (!bios->display.script_table_ptr) {
- NV_ERROR(drm, "No pointer to output script table\n");
- return 1;
- }
-
- /*
- * Nothing useful has been in any of the pre-2.0 tables I've seen,
- * so until they are, we really don't need to care.
- */
- if (table[0] < 0x20)
- return 1;
-
- if (table[0] != 0x20 && table[0] != 0x21) {
- NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
- table[0]);
- return 1;
- }
-
- /*
- * The output script tables describing a particular output type
- * look as follows:
- *
- * offset + 0 (32 bits): output this table matches (hash of DCB)
- * offset + 4 ( 8 bits): unknown
- * offset + 5 ( 8 bits): number of configurations
- * offset + 6 (16 bits): pointer to some script
- * offset + 8 (16 bits): pointer to some script
- *
- * headerlen == 10
- * offset + 10 : configuration 0
- *
- * headerlen == 12
- * offset + 10 : pointer to some script
- * offset + 12 : configuration 0
- *
- * Each config entry is as follows:
- *
- * offset + 0 (16 bits): unknown, assumed to be a match value
- * offset + 2 (16 bits): pointer to script table (clock set?)
- * offset + 4 (16 bits): pointer to script table (reset?)
- *
- * There doesn't appear to be a count value to say how many
- * entries exist in each script table, instead, a 0 value in
- * the first 16-bit word seems to indicate both the end of the
- * list and the default entry. The second 16-bit word in the
- * script tables is a pointer to the script to execute.
- */
-
- NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
- dcbent->type, dcbent->location, dcbent->or);
- for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
- break;
- }
-
- if (!otable) {
- NV_DEBUG(drm, "failed to match any output table\n");
- return 1;
- }
-
- if (pclk < -2 || pclk > 0) {
- /* Try to find matching script table entry */
- for (i = 0; i < otable[5]; i++) {
- if (ROM16(otable[table[4] + i*6]) == type)
- break;
- }
-
- if (i == otable[5]) {
- NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
- "using first\n",
- type, dcbent->type, dcbent->or);
- i = 0;
- }
- }
-
- if (pclk == 0) {
- script = ROM16(otable[6]);
- if (!script) {
- NV_DEBUG(drm, "output script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -1) {
- script = ROM16(otable[8]);
- if (!script) {
- NV_DEBUG(drm, "output script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -2) {
- if (table[4] >= 12)
- script = ROM16(otable[10]);
- else
- script = 0;
- if (!script) {
- NV_DEBUG(drm, "output script 2 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk > 0) {
- script = ROM16(otable[table[4] + i*6 + 2]);
- if (script)
- script = clkcmptable(bios, script, pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk < 0) {
- script = ROM16(otable[table[4] + i*6 + 4]);
- if (script)
- script = clkcmptable(bios, script, -pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- }
-
- return 0;
-}
-
-
int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
{
/*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
}
-static int
-parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
- struct bit_entry *bitentry)
-{
- /*
- * Parses the pointer to the G80 output script tables
- *
- * Starting at bitentry->offset:
- *
- * offset + 0 (16 bits): output script table pointer
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- uint16_t outputscripttableptr;
-
- if (bitentry->length != 3) {
- NV_ERROR(drm, "Do not understand BIT U table\n");
- return -EINVAL;
- }
-
- outputscripttableptr = ROM16(bios->data[bitentry->offset]);
- bios->display.script_table_ptr = outputscripttableptr;
- return 0;
-}
-
struct bit_table {
const char id;
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
- parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
return 0;
}
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
- int i, ret = 0;
+ int ret = 0;
/* Reset the BIOS head to 0. */
bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
bios->fp.lvds_init_run = false;
}
- if (nv_device(drm->device)->card_type >= NV_50) {
- for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
- nouveau_bios_run_display_table(dev, 0, 0,
- &bios->dcb.entry[i], -1);
- }
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 3befbb821a5..f68c54ca422 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -128,12 +128,6 @@ struct nvbios {
} state;
struct {
- struct dcb_output *output;
- int crtc;
- uint16_t script_table_ptr;
- } display;
-
- struct {
uint16_t fptablepointer; /* also used by tmds */
uint16_t fpxlatetableptr;
int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
int nouveau_run_vbios_init(struct drm_device *);
struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
-int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
- struct dcb_output *, int crtc);
bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
int head, int pxclk);
int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_output *, u32 hash);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35ac57f0aab..5614c89148c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
type, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
+ align >> PAGE_SHIFT, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+ interruptible, no_wait_gpu);
if (ret)
return ret;
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
int ret;
@@ -566,8 +565,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
- no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
+ no_wait_gpu, new_mem);
nouveau_fence_unref(&fence);
return ret;
}
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_reserve,
no_wait_gpu, new_mem);
}
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -1098,8 +1094,7 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* CPU copy if we have no accelerated method available */
if (!drm->ttm.move) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
goto out;
}
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr,
+ no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr,
+ no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr,
+ no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
if (nv_device(drm->device)->card_type < NV_50) {
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, true, false);
+ return nouveau_bo_validate(nvbo, false, false);
}
static int
@@ -1472,19 +1469,19 @@ nouveau_bo_fence_ref(void *sync_obj)
}
static bool
-nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_signalled(void *sync_obj)
{
return nouveau_fence_done(sync_obj);
}
static int
-nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
{
return nouveau_fence_wait(sync_obj, lazy, intr);
}
static int
-nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_flush(void *sync_obj)
{
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098f..25ca37989d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu);
+ bool no_wait_gpu);
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index c1d7301c0e9..174300b6a02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
+ if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+ nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
}
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_fb *pfb = nouveau_fb(device);
struct nouveau_software_chan *swch;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret, i;
/* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
/* allocate software object class (used for fences on <= nv05, and
* to signal flip completion), bind it to a subchannel.
*/
- if (chan != chan->drm->cechan) {
+ if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
ret = nouveau_object_new(nv_object(client), chan->handle,
NvSw, nouveau_abi16_swclass(chan->drm),
NULL, 0, &object);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d3595b23434..ac340ba3201 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev = nv_connector->base.dev;
drm = nouveau_drm(dev);
gpio = nouveau_gpio(drm->device);
- NV_DEBUG(drm, "\n");
if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -221,7 +220,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
}
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
DRM_MODE_SUBCONNECTOR_DVID :
@@ -929,8 +928,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
int type, ret = 0;
bool dummy;
- NV_DEBUG(drm, "\n");
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
nv_connector = nouveau_connector(connector);
if (nv_connector->index == index)
@@ -1043,7 +1040,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
/* Init DVI-I specific properties */
if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
/* Add overscan compensation options to digital outputs */
if (disp->underscan_property &&
@@ -1051,31 +1048,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
type == DRM_MODE_CONNECTOR_DVII ||
type == DRM_MODE_CONNECTOR_HDMIA ||
type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_hborder_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_vborder_property,
0);
}
/* Add hue and saturation options */
if (disp->vibrant_hue_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->vibrant_hue_property,
90);
if (disp->color_vibrance_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->color_vibrance_property,
150);
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (nv_device(drm->device)->card_type >= NV_50) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
@@ -1088,18 +1085,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
if (disp->dithering_mode) {
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->dithering_mode);
}
if (disp->dithering_depth) {
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->dithering_depth);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index ebdb87670a8..20eb84cce9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,6 +28,7 @@
#define __NOUVEAU_CONNECTOR_H__
#include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
struct nouveau_i2c_port;
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
struct drm_connector *
nouveau_connector_create(struct drm_device *, int index);
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1eb013..d1e5890784d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
return &crtc->base;
}
-int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width,
- uint32_t height);
-int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
-
int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 86124b131f4..e4188f24fc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
nv_fb->r_dma = NvEvoVRAM_LP;
switch (fb->depth) {
- case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
- case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
- case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 8: nv_fb->r_format = 0x1e00; break;
+ case 15: nv_fb->r_format = 0xe900; break;
+ case 16: nv_fb->r_format = 0xe800; break;
case 24:
- case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
- case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ case 32: nv_fb->r_format = 0xcf00; break;
+ case 30: nv_fb->r_format = 0xd100; break;
default:
NV_ERROR(drm, "unknown depth %d\n", fb->depth);
return -EINVAL;
@@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev)
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
- if (gen == 1) {
+ if (gen >= 1) {
disp->vibrant_hue_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"vibrant hue", 2);
@@ -366,10 +366,7 @@ nouveau_display_create(struct drm_device *dev)
if (nv_device(drm->device)->card_type < NV_50)
ret = nv04_display_create(dev);
else
- if (nv_device(drm->device)->card_type < NV_D0)
ret = nv50_display_create(dev);
- else
- ret = nvd0_display_create(dev);
if (ret)
goto disp_create_err;
@@ -400,11 +397,12 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_backlight_exit(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
if (disp->dtor)
disp->dtor(dev);
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
nouveau_drm(dev)->display = NULL;
kfree(disp);
}
@@ -659,10 +657,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
- if (nv_device(drm->device)->card_type >= NV_D0)
- ret = nvd0_display_flip_next(crtc, fb, chan, 0);
- else
- ret = nv50_display_flip_next(crtc, fb, chan);
+ ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret) {
mutex_unlock(&chan->cli->mutex);
goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 978a108ba7a..59838651ee8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,60 +30,17 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <core/class.h>
+
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct bit_entry d;
- u8 *table;
- int i;
-
- if (bit_table(dev, 'd', &d)) {
- NV_ERROR(drm, "BIT 'd' table not found\n");
- return NULL;
- }
-
- if (d.version != 1) {
- NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
- return NULL;
- }
-
- table = ROMPTR(dev, d.data[0]);
- if (!table) {
- NV_ERROR(drm, "displayport table pointer invalid\n");
- return NULL;
- }
-
- switch (table[0]) {
- case 0x20:
- case 0x21:
- case 0x30:
- case 0x40:
- break;
- default:
- NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
- return NULL;
- }
-
- for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
- return table;
- }
-
- NV_ERROR(drm, "displayport encoder table not found\n");
- return NULL;
-}
-
/******************************************************************************
* link training
*****************************************************************************/
struct dp_state {
struct nouveau_i2c_port *auxch;
- struct dp_train_func *func;
+ struct nouveau_object *core;
struct dcb_output *dcb;
int crtc;
u8 *dpcd;
@@ -97,13 +54,20 @@ static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink[2];
+ u32 data;
NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
/* set desired link configuration on the source */
- dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
- dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+ data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
+ if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+ data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
/* inform the sink of the new configuration */
sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink_tp;
NV_DEBUG(drm, "training pattern %d\n", pattern);
- dp->func->train_set(dev, dp->dcb, pattern);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
int i;
for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
- dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
}
return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
}
static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30) {
- if (enable) script = ROM16(entry[12]);
- else script = ROM16(entry[14]);
- } else
- if (table[0] == 0x40) {
- if (enable) script = ROM16(entry[11]);
- else script = ROM16(entry[13]);
- }
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[6]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[5]);
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
+ NV94_DISP_SOR_DP_TRAIN_OP_INIT);
}
static void
dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[8]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[7]);
- }
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
+ NV94_DISP_SOR_DP_TRAIN_OP_FINI);
}
static bool
nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
if (!dp.auxch)
return false;
- dp.func = func;
+ dp.core = core;
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
*/
gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
- /* enable down-spreading, if possible */
- dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* execute pre-train script from vbios */
- dp_link_train_init(dev, &dp);
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
/* start off at highest link rate supported by encoder and display */
while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
void
nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
nv_wraux(auxch, DP_SET_POWER, &status, 1);
if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, func);
+ nouveau_dp_link_train(encoder, datarate, core);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8503b2ea570..01c403ddb99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -49,8 +49,6 @@
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
-#include "nouveau_ttm.h"
-
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
@@ -149,7 +147,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
- arg1 = 0;
+ arg1 = 1;
} else {
arg0 = NvDmaFB;
arg1 = NvDmaTT;
@@ -224,6 +222,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+ kfree(aper);
ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
nouveau_config, nouveau_debug, &device);
@@ -395,17 +394,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
}
int
-nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+nouveau_do_suspend(struct drm_device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- pm_state.event == PM_EVENT_PRETHAW)
- return 0;
-
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending fbcon...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +430,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
goto fail_client;
nouveau_agp_fini(drm);
-
- pci_save_state(pdev);
- if (pm_state.event == PM_EVENT_SUSPEND) {
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
return 0;
fail_client:
@@ -457,24 +444,33 @@ fail_client:
return ret;
}
-int
-nouveau_drm_resume(struct pci_dev *pdev)
+int nouveau_pmops_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_cli *cli;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(drm, "re-enabling device...\n");
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
+ ret = nouveau_do_suspend(drm_dev);
if (ret)
return ret;
- pci_set_master(pdev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+int
+nouveau_do_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli;
+
+ NV_INFO(drm, "re-enabling device...\n");
nouveau_agp_reset(drm);
@@ -500,6 +496,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
return 0;
}
+int nouveau_pmops_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
@@ -652,14 +684,22 @@ nouveau_drm_pci_table[] = {
{}
};
+static const struct dev_pm_ops nouveau_pm_ops = {
+ .suspend = nouveau_pmops_suspend,
+ .resume = nouveau_pmops_resume,
+ .freeze = nouveau_pmops_freeze,
+ .thaw = nouveau_pmops_thaw,
+ .poweroff = nouveau_pmops_freeze,
+ .restore = nouveau_pmops_resume,
+};
+
static struct pci_driver
nouveau_drm_pci_driver = {
.name = "nouveau",
.id_table = nouveau_drm_pci_table,
.probe = nouveau_drm_probe,
.remove = nouveau_drm_remove,
- .suspend = nouveau_drm_suspend,
- .resume = nouveau_drm_resume,
+ .driver.pm = &nouveau_pm_ops,
};
static int __init
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a1016992708..aa89eb938b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
return nv_device(nouveau_drm(dev)->device);
}
-int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
-int nouveau_drm_resume(struct pci_dev *);
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 6a17bf2ba9a..d0d95bd511a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
/* nouveau_dp.c */
bool nouveau_dp_detect(struct drm_encoder *);
void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
- struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
+ struct nouveau_object *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_output *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_output *);
-
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f1..8bf695c52f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_bo_validate(nvbo, true, false, false);
+ ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 2c672cebc88..00000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- if (nv_device(drm->device)->chipset < 0xa3 ||
- nv_device(drm->device)->chipset == 0xaa ||
- nv_device(drm->device)->chipset == 0xac)
- return false;
- return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- if (!hdmi_sor(encoder))
- return 0x616500 + (nv_crtc->index * 0x800);
- return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- nv_wr32(device, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- return nv_rd32(device, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
- u32 tmp = hdmi_rd32(encoder, reg);
- hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
- return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- u32 or = nv_encoder->or * 0x800;
-
- if (hdmi_sor(encoder))
- nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_connector *nv_connector;
- u32 or = nv_encoder->or * 0x800;
- int i;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid)) {
- nouveau_audio_disconnect(encoder);
- return;
- }
-
- if (hdmi_sor(encoder)) {
- nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
- nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
- }
- }
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
- /* calculate checksum for the infoframe */
- u8 sum = 0, i;
- for (i = 0; i < frame[2]; i++)
- sum += frame[i];
- frame[3] = 256 - sum;
-
- /* disable infoframe, and write header */
- hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
- hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
- /* register scans tell me the audio infoframe has only one set of
- * subpack regs, according to tegra (gee nvidia, it'd be nice if we
- * could get those docs too!), the hdmi block pads out the rest of
- * the packet on its own.
- */
- if (ctrl == 0x020)
- frame[2] = 6;
-
- /* write out checksum and data, weird weird 7 byte register pairs */
- for (i = 0; i < frame[2] + 1; i += 7) {
- u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
- u32 *subpack = (u32 *)&frame[3 + i];
- hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
- hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
- }
-
- /* enable the infoframe */
- hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
- const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
- const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
- u8 frame[20];
-
- frame[0x00] = 0x82; /* AVI infoframe */
- frame[0x01] = 0x02; /* version */
- frame[0x02] = 0x0d; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
- frame[0x05] = (C << 6) | (M << 4) | R;
- frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
- frame[0x07] = VIC;
- frame[0x08] = PR;
- frame[0x09] = bar_top & 0xff;
- frame[0x0a] = bar_top >> 8;
- frame[0x0b] = bar_bottom & 0xff;
- frame[0x0c] = bar_bottom >> 8;
- frame[0x0d] = bar_left & 0xff;
- frame[0x0e] = bar_left >> 8;
- frame[0x0f] = bar_right & 0xff;
- frame[0x10] = bar_right >> 8;
- frame[0x11] = 0x00;
- frame[0x12] = 0x00;
- frame[0x13] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
- const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
- u8 frame[12];
-
- frame[0x00] = 0x84; /* Audio infoframe */
- frame[0x01] = 0x01; /* version */
- frame[0x02] = 0x0a; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (CT << 4) | CC;
- frame[0x05] = (SF << 2) | ceaSS;
- frame[0x06] = FMT;
- frame[0x07] = CA;
- frame[0x08] = (DM_INH << 7) | (LSV << 3);
- frame[0x09] = 0x00;
- frame[0x0a] = 0x00;
- frame[0x0b] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
- nouveau_audio_disconnect(encoder);
-
- /* disable audio and avi infoframes */
- hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
- hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
- /* disable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- u32 max_ac_packet, rekey;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!mode || !nv_connector || !nv_connector->edid ||
- !drm_detect_hdmi_monitor(nv_connector->edid)) {
- nouveau_hdmi_disconnect(encoder);
- return;
- }
-
- nouveau_hdmi_video_infoframe(encoder, mode);
- nouveau_hdmi_audio_infoframe(encoder, mode);
-
- hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
- hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
- hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
- nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
- /* value matches nvidia binary driver, and tegra constant */
- rekey = 56;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* enable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
- 0x1f000000 | /* unknown */
- max_ac_packet << 16 |
- rekey);
-
- nouveau_audio_mode_set(encoder, mode);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1d8cb506a28..1303680affd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
nv_subdev(pmc)->intr(nv_subdev(pmc));
-
- if (dev->mode_config.num_crtc) {
- if (device->card_type >= NV_D0) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nvd0_display_intr(dev);
- } else
- if (device->card_type >= NV_50) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nv50_display_intr(dev);
- }
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 366462cf8a2..3543fec2355 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
return ret;
nvbo = *pnvbo;
- /* we restrict allowed domains on nv50+ to only the types
- * that were requested at creation time. not possibly on
- * earlier chips without busting the ABI.
- */
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 6f0ac64873d..25d3495725e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- nouveau_drm_resume(pdev);
+ nouveau_pmops_resume(&pdev->dev);
drm_kms_helper_poll_enable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_switcheroo_optimus_dsm();
- nouveau_drm_suspend(pdev, pmm);
+ nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 82a0d9c6cda..6578cd28c55 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc);
}
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
if (ret)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 846050f04c2..2cd6fb8c548 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
struct nv04_display *disp;
int i, ret;
- NV_DEBUG(drm, "\n");
-
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
void
nv04_display_destroy(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_display *disp = nv04_display(dev);
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NV_DEBUG(drm, "\n");
-
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index ce752bf5cc4..7ae7f97a6d4 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -155,6 +155,8 @@ nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
@@ -183,8 +185,11 @@ nv10_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 897b63621e2..2ca276ada50 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
break;
}
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_select_subconnector_property,
tv_enc->select_subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_mode_property,
tv_enc->tv_norm);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_flicker_reduction_property,
tv_enc->flicker);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_saturation_property,
tv_enc->saturation);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_hue_property,
tv_enc->hue);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_overscan_property,
tv_enc->overscan);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644
index 222de77d626..00000000000
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_hw.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-#include <subdev/clock.h>
-
-static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- NV_DEBUG(drm, "\n");
-
- for (i = 0; i < 256; i++) {
- writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
- }
-
- if (nv_crtc->lut.depth == 30) {
- writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
- }
-}
-
-int
-nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int index = nv_crtc->index, ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
- NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
-
- if (blanked) {
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
- if (ret) {
- NV_ERROR(drm, "no space while blanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
- } else {
- if (nv_crtc->cursor.visible)
- nv_crtc->cursor.show(nv_crtc, false);
- else
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
- if (ret) {
- NV_ERROR(drm, "no space while unblanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, nv_crtc->lut.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF :
- NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- if (nv_device(drm->device)->chipset != 0x50)
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- }
-
- nv_crtc->fb.blanked = blanked;
- return 0;
-}
-
-static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- int head = nv_crtc->index, ret;
- u32 mode = 0x00;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
- OUT_RING (evo, mode);
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
- }
-
- return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
- int adj;
- u32 hue, vib;
-
- NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
- nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret) {
- NV_ERROR(drm, "no space while setting color vibrance\n");
- return ret;
- }
-
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING (evo, (hue << 20) | (vib << 8));
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
-
- return 0;
-}
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_connector *connector;
- struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
-
- /* The safest approach is to find an encoder with the right crtc, that
- * is also linked to a connector. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
- }
-
- return NULL;
-}
-
-static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_connector *nv_connector;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *umode = &crtc->mode;
- struct drm_display_mode *omode;
- int scaling_mode, ret;
- u32 ctrl = 0, oX, oY;
-
- NV_DEBUG(drm, "\n");
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(drm, "no native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
- } else {
- scaling_mode = nv_connector->scaling_mode;
- }
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- if (scaling_mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (scaling_mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- if (umode->hdisplay != oX || umode->vdisplay != oY ||
- umode->flags & DRM_MODE_FLAG_INTERLACE ||
- umode->flags & DRM_MODE_FLAG_DBLSCAN)
- ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
- ret = RING_SPACE(evo, 5);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- OUT_RING (evo, ctrl);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING (evo, oY << 16 | oX);
- OUT_RING (evo, oY << 16 | oX);
-
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
- }
-
- return 0;
-}
-
-int
-nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_clock *clk = nouveau_clock(device);
-
- return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
-}
-
-static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-
- NV_DEBUG(drm, "\n");
-
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- drm_crtc_cleanup(&nv_crtc->base);
- kfree(nv_crtc);
-}
-
-int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_bo *cursor = NULL;
- struct drm_gem_object *gem;
- int ret = 0, i;
-
- if (!buffer_handle) {
- nv_crtc->cursor.hide(nv_crtc, true);
- return 0;
- }
-
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
- if (!gem)
- return -ENOENT;
- cursor = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(cursor);
- if (ret)
- goto out;
-
- /* The simple will do for now. */
- for (i = 0; i < 64 * 64; i++)
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
-
- nouveau_bo_unmap(cursor);
-
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
- nv_crtc->cursor.show(nv_crtc, true);
-
-out:
- drm_gem_object_unreference_unlocked(gem);
- return ret;
-}
-
-int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nv_crtc->cursor.set_pos(nv_crtc, x, y);
- return 0;
-}
-
-static void
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- int end = (start + size > 256) ? 256 : start + size, i;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- /* We need to know the depth before we upload, but it's possible to
- * get called before a framebuffer is bound. If this is the case,
- * mark the lut values as dirty by setting depth==0, and it'll be
- * uploaded on the first mode_set_base()
- */
- if (!nv_crtc->base.fb) {
- nv_crtc->lut.depth = 0;
- return;
- }
-
- nv50_crtc_lut_load(crtc);
-}
-
-static void
-nv50_crtc_save(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_crtc_restore(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static const struct drm_crtc_funcs nv50_crtc_funcs = {
- .save = nv50_crtc_save,
- .restore = nv50_crtc_restore,
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = nouveau_crtc_page_flip,
- .destroy = nv50_crtc_destroy,
-};
-
-static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_display_flip_stop(crtc);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
- nv50_crtc_blank(nv_crtc, true);
-}
-
-static void
-nv50_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_crtc_blank(nv_crtc, false);
- drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *passed_fb,
- int x, int y, bool atomic)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
- int ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- /* no fb bound */
- if (!atomic && !crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- /* If atomic, we want to switch to the fb we were passed, so
- * now we update pointers to do that. (We don't pin; just
- * assume we're already pinned and update the base address.)
- */
- if (atomic) {
- drm_fb = passed_fb;
- fb = nouveau_framebuffer(passed_fb);
- } else {
- drm_fb = crtc->fb;
- fb = nouveau_framebuffer(crtc->fb);
- /* If not atomic, we can go ahead and pin, and unpin the
- * old fb we were passed.
- */
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (passed_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
- }
-
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
- nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
- nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
- if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- OUT_RING (evo, fb->r_dma);
- }
-
- ret = RING_SPACE(evo, 12);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING (evo, nv_crtc->fb.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
- OUT_RING (evo, fb->r_pitch);
- OUT_RING (evo, fb->r_format);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING (evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING (evo, (y << 16) | x);
-
- if (nv_crtc->lut.depth != fb->base.depth) {
- nv_crtc->lut.depth = fb->base.depth;
- nv50_crtc_lut_load(crtc);
- }
-
- return 0;
-}
-
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 head = nv_crtc->index * 0x400;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- int ret;
-
- /* hw timing description looks like this:
- *
- * <sync> <back porch> <---------display---------> <front porch>
- * ______
- * |____________|---------------------------|____________|
- *
- * ^ synce ^ blanke ^ blanks ^ active
- *
- * interlaced modes also have 2 additional values pointing at the end
- * and start of the next field's blanking period.
- */
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = RING_SPACE(evo, 18);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0804 + head, 2);
- OUT_RING (evo, 0x00800000 | mode->clock);
- OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_NV04(evo, 0, 0x0810 + head, 6);
- OUT_RING (evo, 0x00000000); /* border colour */
- OUT_RING (evo, (vactive << 16) | hactive);
- OUT_RING (evo, ( vsynce << 16) | hsynce);
- OUT_RING (evo, (vblanke << 16) | hblanke);
- OUT_RING (evo, (vblanks << 16) | hblanks);
- OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_NV04(evo, 0, 0x082c + head, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0900 + head, 1);
- OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
- OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
- OUT_RING (evo, 0x00000000); /* screen position */
- }
-
- nv_crtc->set_dither(nv_crtc, false);
- nv_crtc->set_scale(nv_crtc, false);
- nv_crtc->set_color_vibrance(nv_crtc, false);
-
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-}
-
-static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
- if (ret)
- return ret;
-
- ret = nv50_display_sync(crtc->dev);
- if (ret)
- return ret;
-
- return nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
- if (ret)
- return ret;
-
- return nv50_display_sync(crtc->dev);
-}
-
-static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
-};
-
-int
-nv50_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = NULL;
- int ret, i;
-
- NV_DEBUG(drm, "\n");
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nv50_crtc_set_dither;
- nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
- nv_crtc->color_vibrance = 50;
- nv_crtc->vibrant_hue = 0;
- nv_crtc->lut.depth = 0;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
- drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
-
- ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
-
- ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- nv50_cursor_init(nv_crtc);
-out:
- if (ret)
- nv50_crtc_destroy(&nv_crtc->base);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644
index 223da113cee..00000000000
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-static void
-nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while unhiding cursor\n");
- return;
- }
-
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
- OUT_RING(evo, nv_crtc->cursor.offset >> 8);
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = true;
- }
-}
-
-static void
-nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && !nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while hiding cursor\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
- }
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = false;
- }
-}
-
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
- struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
-
- nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
- ((y & 0xFFFF) << 16) | (x & 0xFFFF));
- /* Needed to make the cursor move. */
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
- if (offset == nv_crtc->cursor.offset)
- return;
-
- nv_crtc->cursor.offset = offset;
- if (nv_crtc->cursor.visible) {
- nv_crtc->cursor.visible = false;
- nv_crtc->cursor.show(nv_crtc, true);
- }
-}
-
-int
-nv50_cursor_init(struct nouveau_crtc *nv_crtc)
-{
- nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
- nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
- nv_crtc->cursor.hide = nv50_cursor_hide;
- nv_crtc->cursor.show = nv50_cursor_show;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644
index 6a30a174857..00000000000
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nv_encoder->crtc = NULL;
-}
-
-static enum drm_connector_status
-nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- enum drm_connector_status status = connector_status_disconnected;
- uint32_t dpms_state, load_pattern, load_state;
- int or = nv_encoder->or;
-
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
- dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return status;
- }
-
- /* Use bios provided value if possible. */
- if (drm->vbios.dactestval) {
- load_pattern = drm->vbios.dactestval;
- NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
- load_pattern);
- } else {
- load_pattern = 340;
- NV_DEBUG(drm, "Using default load_pattern of %d\n",
- load_pattern);
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
- NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
- mdelay(45); /* give it some time to process */
- load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-
- if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
- NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
- status = connector_status_connected;
-
- if (status == connector_status_connected)
- NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
- else
- NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
-
- return status;
-}
-
-static void
-nv50_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d mode %d\n", or, mode);
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return;
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
-
- if (mode != DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
-
- switch (mode) {
- case DRM_MODE_DPMS_STANDBY:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- case DRM_MODE_DPMS_OFF:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- default:
- break;
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-}
-
-static void
-nv50_dac_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_dac_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- uint32_t mode_ctl = 0, mode_ctl2 = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
-
- /* Lacking a working tv-out, this is not a 100% sure. */
- if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
- mode_ctl |= 0x40;
- else
- if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
- mode_ctl |= 0x100;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
-
- ret = RING_SPACE(evo, 3);
- if (ret) {
- NV_ERROR(drm, "no space while connecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
- OUT_RING(evo, mode_ctl);
- OUT_RING(evo, mode_ctl2);
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static struct drm_crtc *
-nv50_dac_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
- .dpms = nv50_dac_dpms,
- .save = nv50_dac_save,
- .restore = nv50_dac_restore,
- .mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .get_crtc = nv50_dac_crtc_get,
- .detect = nv50_dac_detect,
- .disable = nv50_dac_disconnect
-};
-
-static void
-nv50_dac_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- if (!encoder)
- return;
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
- .destroy = nv50_dac_destroy,
-};
-
-int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
-
- drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f97b42cbb6b..35874085a61 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,969 +1,2058 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
+ /*
+ * Copyright 2011 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-
-#include "nv50_display.h"
-#include "nouveau_crtc.h"
-#include "nouveau_encoder.h"
+#include "nouveau_gem.h"
#include "nouveau_connector.h"
-#include "nouveau_fbcon.h"
-#include <drm/drm_crtc_helper.h>
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nv50_display.h"
+#include <core/client.h>
#include <core/gpuobj.h>
-#include <subdev/timer.h>
+#include <core/class.h>
-static void nv50_display_bh(unsigned long);
-
-static inline int
-nv50_sor_nr(struct drm_device *dev)
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+
+#define EVO_CORE_HANDLE (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
+ (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+ struct nouveau_object *user;
+ u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+ const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+ int ret;
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0)
- return 2;
+ ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+ oclass, data, size, &chan->user);
+ if (ret)
+ return ret;
- return 4;
+ chan->handle = handle;
+ return 0;
}
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 mask = 0;
- int i;
-
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0) {
- for (i = 0; i < 2; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- } else {
- for (i = 0; i < 4; i++)
- mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
- }
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ if (chan->handle)
+ nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+}
- for (i = 0; i < 3; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
- return mask & 3;
-}
+struct nv50_pioc {
+ struct nv50_chan base;
+};
-int
-nv50_display_early_init(struct drm_device *dev)
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
{
- return 0;
+ nv50_chan_destroy(core, &pioc->base);
}
-void
-nv50_display_late_takedown(struct drm_device *dev)
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_pioc *pioc)
{
+ return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
}
-int
-nv50_display_sync(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- int ret;
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
- ret = RING_SPACE(evo, 6);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
+struct nv50_dmac {
+ struct nv50_chan base;
+ dma_addr_t handle;
+ u32 *ptr;
+};
- nv_wo32(disp->ramin, 0x2000, 0x00000000);
- FIRE_RING (evo);
-
- if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
- return 0;
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+ if (dmac->ptr) {
+ struct pci_dev *pdev = nv_device(core)->pdev;
+ pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
- return 0;
+ nv50_chan_destroy(core, &dmac->base);
}
-int
-nv50_display_init(struct drm_device *dev)
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_channel *evo;
- int ret, i;
- u32 val;
-
- NV_DEBUG(drm, "\n");
-
- nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
-
- /*
- * I think the 0x006101XX range is some kind of main control area
- * that enables things.
- */
- /* CRTC? */
- for (i = 0; i < 2; i++) {
- val = nv_rd32(device, 0x00616100 + (i * 0x800));
- nv_wr32(device, 0x00610190 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616104 + (i * 0x800));
- nv_wr32(device, 0x00610194 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616108 + (i * 0x800));
- nv_wr32(device, 0x00610198 + (i * 0x10), val);
- val = nv_rd32(device, 0x0061610c + (i * 0x800));
- nv_wr32(device, 0x0061019c + (i * 0x10), val);
- }
-
- /* DAC */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061a000 + (i * 0x800));
- nv_wr32(device, 0x006101d0 + (i * 0x04), val);
- }
-
- /* SOR */
- for (i = 0; i < nv50_sor_nr(dev); i++) {
- val = nv_rd32(device, 0x0061c000 + (i * 0x800));
- nv_wr32(device, 0x006101e0 + (i * 0x04), val);
- }
-
- /* EXT */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061e000 + (i * 0x800));
- nv_wr32(device, 0x006101f0 + (i * 0x04), val);
- }
-
- for (i = 0; i < 3; i++) {
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
- }
-
- /* The precise purpose is unknown, i suspect it has something to do
- * with text mode.
- */
- if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
- nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
- nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
- if (!nv_wait(device, 0x006194e8, 2, 0)) {
- NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
- NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
-
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
- NV_ERROR(drm, "timeout: "
- "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
- NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
- }
-
- nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
-
- ret = nv50_evo_init(dev);
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- evo = nv50_display(dev)->master;
-
- nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
- ret = RING_SPACE(evo, 3);
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING (evo, NvEvoSync);
- return nv50_display_sync(dev);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
}
-void
-nv50_display_fini(struct drm_device *dev)
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- struct drm_crtc *drm_crtc;
- int ret, i;
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- NV_DEBUG(drm, "\n");
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- nv50_crtc_blank(crtc, true);
- }
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- ret = RING_SPACE(evo, 2);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- }
- FIRE_RING(evo);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
- * cleaning up?
- */
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
- uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, u64 syncbuf,
+ struct nv50_dmac *dmac)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ u32 pushbuf = *(u32 *)data;
+ int ret;
- if (!crtc->base.enabled)
- continue;
+ dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+ &dmac->handle);
+ if (!dmac->ptr)
+ return -ENOMEM;
- nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
- if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
- NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
- "0x%08x\n", mask, mask);
- NV_ERROR(drm, "0x610024 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_INTR_1));
- }
- }
+ ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+ NV_DMA_FROM_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_PCI_US |
+ NV_DMA_ACCESS_RD,
+ .start = dmac->handle + 0x0000,
+ .limit = dmac->handle + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- }
- }
+ ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+ if (ret)
+ return ret;
- nv50_evo_fini(dev);
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = syncbuf + 0x0000,
+ .limit = syncbuf + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 3; i++) {
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
- }
- }
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- /* disable interrupts. */
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+ if (nv_device(core)->card_type < NV_C0)
+ ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ if (nv_device(core)->card_type < NV_D0)
+ ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+ return ret;
}
-int
-nv50_display_create(struct drm_device *dev)
+struct nv50_mast {
+ struct nv50_dmac base;
+};
+
+struct nv50_curs {
+ struct nv50_pioc base;
+};
+
+struct nv50_sync {
+ struct nv50_dmac base;
+ struct {
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_ovly {
+ struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+ struct nv50_pioc base;
+};
+
+struct nv50_head {
+ struct nouveau_crtc base;
+ struct nv50_curs curs;
+ struct nv50_sync sync;
+ struct nv50_ovly ovly;
+ struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+ struct nouveau_object *core;
+ struct nv50_mast mast;
+
+ u32 modeset;
+
+ struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *ct;
- struct nv50_display *priv;
- int ret, i;
+ return nouveau_display(dev)->priv;
+}
- NV_DEBUG(drm, "\n");
+#define nv50_mast(d) (&nv50_disp(d)->mast)
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = priv;
- nouveau_display(dev)->dtor = nv50_display_destroy;
- nouveau_display(dev)->init = nv50_display_init;
- nouveau_display(dev)->fini = nv50_display_fini;
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
- /* Create CRTC objects */
- for (i = 0; i < 2; i++) {
- ret = nv50_crtc_create(dev, i);
- if (ret)
- return ret;
- }
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+ struct nv50_dmac *dmac = evoc;
+ u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
- /* We setup the encoders from the BIOS table */
- for (i = 0 ; i < dcb->entries; i++) {
- struct dcb_output *entry = &dcb->entry[i];
+ if (put + nr >= (PAGE_SIZE / 4) - 8) {
+ dmac->ptr[put] = 0x20000000;
- if (entry->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
- entry->type, ffs(entry->or) - 1);
- continue;
+ nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+ if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ NV_ERROR(dmac->base.user, "channel stalled\n");
+ return NULL;
}
- connector = nouveau_connector_create(dev, entry->connector);
- if (IS_ERR(connector))
- continue;
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nv50_sor_create(connector, entry);
- break;
- case DCB_OUTPUT_ANALOG:
- nv50_dac_create(connector, entry);
- break;
- default:
- NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
- continue;
- }
+ put = 0;
}
- list_for_each_entry_safe(connector, ct,
- &dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
- }
+ return dmac->ptr + put;
+}
- tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
+static void
+evo_kick(u32 *push, void *evoc)
+{
+ struct nv50_dmac *dmac = evoc;
+ nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+}
- ret = nv50_evo_create(dev);
- if (ret) {
- nv50_display_destroy(dev);
- return ret;
- }
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d) *((p)++) = (d)
- return 0;
+static bool
+evo_sync_wait(void *data)
+{
+ return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
}
-void
-nv50_display_destroy(struct drm_device *dev)
+static int
+evo_sync(struct drm_device *dev)
{
- struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_device *device = nouveau_dev(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_mast *mast = nv50_mast(dev);
+ u32 *push = evo_wait(mast, 8);
+ if (push) {
+ nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+ return 0;
+ }
- nv50_evo_destroy(dev);
- kfree(disp);
+ return -EBUSY;
}
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
struct nouveau_bo *
nv50_display_crtc_sema(struct drm_device *dev, int crtc)
{
- return nv50_display(dev)->crtc[crtc].sem.bo;
+ return nv50_disp(dev)->sync;
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nv50_display *disp = nv50_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
- int ret;
-
- ret = RING_SPACE(evo, 8);
- if (ret) {
- WARN_ON(1);
- return;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
+
+ push = evo_wait(sync, 8);
+ if (push) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
}
-
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0094, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
}
int
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan)
+ struct nouveau_channel *chan, u32 swap_interval)
{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
int ret;
- ret = RING_SPACE(evo, chan ? 25 : 27);
- if (unlikely(ret))
- return ret;
+ swap_interval <<= 4;
+ if (swap_interval == 0)
+ swap_interval |= 0x100;
+
+ push = evo_wait(sync, 128);
+ if (unlikely(push == NULL))
+ return -EBUSY;
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
ret = RING_SPACE(chan, 10);
- if (ret) {
- WIND_RING(evo);
+ if (ret)
return ret;
- }
- if (nv_device(drm->device)->chipset < 0xc0) {
- BEGIN_NV04(chan, 0, 0x0060, 2);
+ if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, dispc->sem.offset);
- BEGIN_NV04(chan, 0, 0x006c, 1);
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
- BEGIN_NV04(chan, 0, 0x0064, 2);
- OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, sync->sem.offset);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
- BEGIN_NV04(chan, 0, 0x0060, 1);
- if (nv_device(drm->device)->chipset < 0x84)
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram);
} else {
u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += dispc->sem.offset;
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ offset += sync->sem.offset;
+
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x1001);
}
+
FIRE_RING (chan);
} else {
- nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
- 0xf00d0000 | dispc->sem.value);
+ nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
+ 0xf00d0000 | sync->sem.value);
+ evo_sync(crtc->dev);
}
- /* queue the flip on the crtc's "display sync" channel */
- BEGIN_NV04(evo, 0, 0x0100, 1);
- OUT_RING (evo, 0xfffe0000);
- if (chan) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000100);
+ /* queue the flip */
+ evo_mthd(push, 0x0100, 1);
+ evo_data(push, 0xfffe0000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, swap_interval);
+ if (!(swap_interval & 0x00000100)) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, 0x40000000);
+ }
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, sync->sem.offset);
+ evo_data(push, 0xf00d0000 | sync->sem.value);
+ evo_data(push, 0x74b1e000);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, nv_fb->r_dma);
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
} else {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000010);
- /* allows gamma somehow, PDISP will bitch at you if
- * you don't wait for vblank before changing this..
- */
- BEGIN_NV04(evo, 0, 0x00e0, 1);
- OUT_RING (evo, 0x40000000);
- }
- BEGIN_NV04(evo, 0, 0x0088, 4);
- OUT_RING (evo, dispc->sem.offset);
- OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
- OUT_RING (evo, 0x74b1e000);
- OUT_RING (evo, NvEvoSync);
- BEGIN_NV04(evo, 0, 0x00a0, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, nv_fb->r_dma);
- BEGIN_NV04(evo, 0, 0x0110, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0800, 5);
- OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (fb->height << 16) | fb->width);
- OUT_RING (evo, nv_fb->r_pitch);
- OUT_RING (evo, nv_fb->r_format);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
-
- dispc->sem.offset ^= 0x10;
- dispc->sem.value++;
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
+ }
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
+
+ sync->sem.offset ^= 0x10;
+ sync->sem.value++;
return 0;
}
-static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
- u32 mc, int pxclk)
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_connector *nv_connector = NULL;
- struct drm_encoder *encoder;
- struct nvbios *bios = &drm->vbios;
- u32 script = 0, or;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ u32 *push, mode = 0x00;
+
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
+ }
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
+ }
- if (nv_encoder->dcb != dcb)
- continue;
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+ evo_data(push, mode);
+ } else
+ if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ } else {
+ evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ }
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- break;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
}
- or = ffs(dcb->or) - 1;
- switch (dcb->type) {
- case DCB_OUTPUT_LVDS:
- script = (mc >> 8) & 0xf;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- script |= 0x0100;
- if (bios->fp.if_is_24bit)
- script |= 0x0200;
- } else {
- /* determine number of lvds links */
- if (nv_connector && nv_connector->edid &&
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- /* http://www.spwg.org */
- if (((u8 *)nv_connector->edid)[121] == 2)
- script |= 0x0100;
- } else
- if (pxclk >= bios->fp.duallink_transition_clk) {
- script |= 0x0100;
- }
+ return 0;
+}
- /* determine panel depth */
- if (script & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- script |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- script |= 0x0200;
- }
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+ struct drm_crtc *crtc = &nv_crtc->base;
+ struct nouveau_connector *nv_connector;
+ int mode = DRM_MODE_SCALE_NONE;
+ u32 oX, oY, *push;
+
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (nv_connector && nv_connector->native_mode)
+ mode = nv_connector->scaling_mode;
+
+ if (mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ } else {
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ }
+ }
- if (nv_connector && nv_connector->edid &&
- (nv_connector->edid->revision >= 4) &&
- (nv_connector->edid->input & 0x70) >= 0x20)
- script |= 0x0200;
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ } else {
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
break;
- case DCB_OUTPUT_TMDS:
- script = (mc >> 8) & 0xf;
- if (pxclk >= 165000)
- script |= 0x0100;
- break;
- case DCB_OUTPUT_DP:
- script = (mc >> 8) & 0xf;
- break;
- case DCB_OUTPUT_ANALOG:
- script = 0xff;
- break;
default:
- NV_ERROR(drm, "modeset on unsupported output type!\n");
break;
}
- return script;
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ /*XXX: SCALE_CTRL_ACTIVE??? */
+ evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ } else {
+ evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ }
+
+ evo_kick(push, mast);
+
+ if (update) {
+ nv50_display_flip_stop(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ }
+ }
+
+ return 0;
}
-static void
-nv50_display_unk10_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), mc;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push, hue, vib;
+ int adj;
+
+ adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+ vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+ hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ } else {
+ evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
+ return 0;
+}
- /* Determine which CRTC we're dealing with, only 1 ever will be
- * signalled at the same time with the current nouveau code.
- */
- crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Find which encoder was connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+ int x, int y, bool update)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (y << 16) | x);
+ if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->r_dma);
+ }
+ } else {
+ evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_data(push, nvfb->r_dma);
+ evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (y << 16) | x);
+ }
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
}
+ evo_kick(push, mast);
+ }
- or = i;
+ nv_crtc->fb.tile_flags = nvfb->r_dma;
+ return 0;
+}
+
+static void
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ }
+ evo_kick(push, mast);
}
+}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+static void
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+}
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static void
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+ if (show)
+ nv50_crtc_cursor_show(nv_crtc);
+ else
+ nv50_crtc_cursor_hide(nv_crtc);
+
+ if (update) {
+ u32 *push = evo_wait(mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ nv50_display_flip_stop(crtc);
+
+ push = evo_wait(mast, 2);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x03000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, false, false);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ push = evo_wait(mast, 32);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM_LP);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, 0x83000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0xffffff00);
}
- or = i;
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ nvfb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(nvfb->nvbo);
}
- /* There was no encoder to disable */
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_connector *nv_connector;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
+ u32 *push;
+ int ret;
+
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
+ }
- /* Disable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
- if (dcb->type == type && (dcb->or & (1 << or))) {
- nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
- disp->irq.dcb = dcb;
- goto ack;
+ push = evo_wait(mast, 64);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00800000 | mode->clock);
+ evo_data(push, (ilace == 2) ? 2 : 0);
+ evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
+ } else {
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000); /* ??? */
+ evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, mode->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, mode->clock * 1000);
+ evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
}
+
+ evo_kick(push, mast);
}
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ nv50_crtc_set_dither(nv_crtc, false);
+ nv50_crtc_set_scale(nv_crtc, false);
+ nv50_crtc_set_color_vibrance(nv_crtc, false);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ if (!crtc->fb) {
+ NV_DEBUG(drm, "No FB bound\n");
+ return 0;
+ }
+
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ return 0;
}
static void
-nv50_display_unk20_handler(struct drm_device *dev)
+nv50_crtc_lut_load(struct drm_crtc *crtc)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
- struct dcb_output *dcb;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- dcb = disp->irq.dcb;
- if (dcb) {
- nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
- disp->irq.dcb = NULL;
- }
-
- /* CRTC clock change requested? */
- crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
- if (crtc >= 0) {
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
- pclk &= 0x003fffff;
- if (pclk)
- nv50_crtc_set_clock(dev, crtc, pclk);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
- tmp &= ~0x000000f;
- nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
- }
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
-
- /* Find which encoder is connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ int i;
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ for (i = 0; i < 256; i++) {
+ u16 r = nv_crtc->lut.r[i] >> 2;
+ u16 g = nv_crtc->lut.g[i] >> 2;
+ u16 b = nv_crtc->lut.b[i] >> 2;
+
+ if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+ writew(r + 0x0000, lut + (i * 0x08) + 0);
+ writew(g + 0x0000, lut + (i * 0x08) + 2);
+ writew(b + 0x0000, lut + (i * 0x08) + 4);
+ } else {
+ writew(r + 0x6000, lut + (i * 0x20) + 0);
+ writew(g + 0x6000, lut + (i * 0x20) + 2);
+ writew(b + 0x6000, lut + (i * 0x20) + 4);
+ }
+ }
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ bool visible = (handle != 0);
+ int i, ret = 0;
+
+ if (visible) {
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (unlikely(!gem))
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret == 0) {
+ for (i = 0; i < 64 * 64; i++) {
+ u32 v = nouveau_bo_rd32(nvbo, i);
+ nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+ }
+ nouveau_bo_unmap(nvbo);
}
- or = i;
+ drm_gem_object_unreference_unlocked(gem);
}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
+ if (visible != nv_crtc->cursor.visible) {
+ nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+ nv_crtc->cursor.visible = visible;
+ }
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ return ret;
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
- }
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nv50_curs *curs = nv50_curs(crtc);
+ struct nv50_chan *chan = nv50_chan(curs);
+ nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+ nv_wo32(chan->user, 0x0080, 0x00000000);
+ return 0;
+}
- or = i;
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 end = max(start + size, (u32)256);
+ u32 i;
+
+ for (i = start; i < end; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
}
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ nv50_crtc_lut_load(crtc);
+}
- /* Enable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)))
- break;
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ nv50_dmac_destroy(disp->core, &head->ovly.base);
+ nv50_pioc_destroy(disp->core, &head->oimm.base);
+ nv50_dmac_destroy(disp->core, &head->sync.base);
+ nv50_pioc_destroy(disp->core, &head->curs.base);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ if (nv_crtc->cursor.nvbo)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_unmap(nv_crtc->lut.nvbo);
+ if (nv_crtc->lut.nvbo)
+ nouveau_bo_unpin(nv_crtc->lut.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+ .dpms = nv50_crtc_dpms,
+ .prepare = nv50_crtc_prepare,
+ .commit = nv50_crtc_commit,
+ .mode_fixup = nv50_crtc_mode_fixup,
+ .mode_set = nv50_crtc_mode_set,
+ .mode_set_base = nv50_crtc_mode_set_base,
+ .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+ .load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+ .cursor_set = nv50_crtc_cursor_set,
+ .cursor_move = nv50_crtc_cursor_move,
+ .gamma_set = nv50_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv50_crtc_destroy,
+ .page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_head *head;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ head->base.index = index;
+ head->base.set_dither = nv50_crtc_set_dither;
+ head->base.set_scale = nv50_crtc_set_scale;
+ head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+ head->base.color_vibrance = 50;
+ head->base.vibrant_hue = 0;
+ head->base.cursor.set_offset = nv50_cursor_set_offset;
+ head->base.cursor.set_pos = nv50_cursor_set_pos;
+ for (i = 0; i < 256; i++) {
+ head->base.lut.r[i] = i << 8;
+ head->base.lut.g[i] = i << 8;
+ head->base.lut.b[i] = i << 8;
}
- if (i == drm->vbios.dcb.entries) {
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
- goto ack;
+ crtc = &head->base.base;
+ drm_crtc_init(dev, crtc, &nv50_crtc_func);
+ drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.lut.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.lut.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.lut.nvbo);
}
- script = nv50_display_script_select(dev, dcb, mc, pclk);
- nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
+ if (ret)
+ goto out;
- if (type == DCB_OUTPUT_DP) {
- int link = !(dcb->dpconf.sor.link & 1);
- if ((mc & 0x000f0000) == 0x00020000)
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
- else
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
+ nv50_crtc_lut_load(crtc);
+
+ /* allocate cursor resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+ &(struct nv50_display_curs_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_curs_class),
+ &head->curs.base);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
}
- if (dcb->type != DCB_OUTPUT_ANALOG) {
- tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
- tmp &= ~0x00000f0f;
- if (script & 0x0100)
- tmp |= 0x00000101;
- nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
- } else {
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+ if (ret)
+ goto out;
+
+ /* allocate page flip / sync resources */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+ &(struct nv50_display_sync_class) {
+ .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+ .head = index,
+ }, sizeof(struct nv50_display_sync_class),
+ disp->sync->bo.offset, &head->sync.base);
+ if (ret)
+ goto out;
+
+ head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+
+ /* allocate overlay resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+ &(struct nv50_display_oimm_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_oimm_class),
+ &head->oimm.base);
+ if (ret)
+ goto out;
+
+ ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+ &(struct nv50_display_ovly_class) {
+ .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+ .head = index,
+ }, sizeof(struct nv50_display_ovly_class),
+ disp->sync->bo.offset, &head->ovly.base);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nv50_crtc_destroy(crtc);
+ return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int or = nv_encoder->or;
+ u32 dpms_ctrl;
+
+ dpms_ctrl = 0x00000000;
+ if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000001;
+ if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000004;
+
+ nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
}
- disp->irq.dcb = dcb;
- disp->irq.pclk = pclk;
- disp->irq.script = script;
+ return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ u32 *push;
+
+ nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ u32 syncs = 0x00000000;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000002;
+
+ evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+ evo_data(push, 1 << nv_crtc->index);
+ evo_data(push, syncs);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs);
+ evo_data(push, magic);
+ evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, 1 << nv_crtc->index);
+ }
+
+ evo_kick(push, mast);
+ }
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_encoder->crtc = encoder->crtc;
}
-/* If programming a TMDS output on a SOR that can also be configured for
- * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
- *
- * It looks like the VBIOS TMDS scripts make an attempt at this, however,
- * the VBIOS scripts on at least one board I have only switch it off on
- * link 0, causing a blank display if the output has previously been
- * programmed for DisplayPort.
- */
static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
+nv50_dac_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int ret, or = nouveau_encoder(encoder)->or;
+ u32 load = 0;
+
+ ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+ if (ret || load != 7)
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+ .dpms = nv50_dac_dpms,
+ .mode_fixup = nv50_dac_mode_fixup,
+ .prepare = nv50_dac_disconnect,
+ .commit = nv50_dac_commit,
+ .mode_set = nv50_dac_mode_set,
+ .disable = nv50_dac_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+ .detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+ .destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- u32 tmp;
- if (dcb->type != DCB_OUTPUT_TMDS)
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+ nv_connector->base.eld,
+ nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+ u32 rekey = 56; /* binary driver, and tegra constant */
+ u32 max_ac_packet;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+ NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+ (max_ac_packet << 16) | rekey);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
- nv_encoder->dcb->or & (1 << or)) {
- tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
- tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
+ nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+ nv50_audio_disconnect(encoder);
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *partner;
+ int or = nv_encoder->or;
+
+ nv_encoder->last_dpms = mode;
+
+ list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+ if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+ continue;
+
+ if (nv_partner != nv_encoder &&
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
+ if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+ return;
break;
}
}
+
+ nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ return true;
}
static void
-nv50_display_unk40_handler(struct drm_device *dev)
+nv50_sor_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct dcb_output *dcb = disp->irq.dcb;
- u16 script = disp->irq.script;
- u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0600 + (or * 0x40), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0200 + (or * 0x20), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
- if (!dcb)
- goto ack;
+ nv50_hdmi_disconnect(encoder);
+ }
- nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
- nv50_display_unk40_dp_set_tmds(dev, dcb);
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->crtc = NULL;
+}
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
- nv_wr32(device, 0x610030, 0x80000000);
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+ nv50_sor_disconnect(encoder);
+ if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
+ evo_sync(encoder->dev);
}
static void
-nv50_display_bh(unsigned long data)
+nv50_sor_commit(struct drm_encoder *encoder)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nvbios *bios = &drm->vbios;
+ u32 *push, lvds = 0;
+ u8 owner = 1 << nv_crtc->index;
+ u8 proto = 0xf;
+ u8 depth = 0x0;
- for (;;) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ if (nv_encoder->dcb->sorconf.link & 1) {
+ if (mode->clock < 165000)
+ proto = 0x1;
+ else
+ proto = 0x5;
+ } else {
+ proto = 0x2;
+ }
- NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+ nv50_hdmi_mode_set(encoder, mode);
+ break;
+ case DCB_OUTPUT_LVDS:
+ proto = 0x0;
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
- nv50_display_unk10_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
- nv50_display_unk20_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
- nv50_display_unk40_handler(dev);
+ if (bios->fp_no_ddc) {
+ if (bios->fp.dual_link)
+ lvds |= 0x0100;
+ if (bios->fp.if_is_24bit)
+ lvds |= 0x0200;
+ } else {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (((u8 *)nv_connector->edid)[121] == 2)
+ lvds |= 0x0100;
+ } else
+ if (mode->clock >= bios->fp.duallink_transition_clk) {
+ lvds |= 0x0100;
+ }
+
+ if (lvds & 0x0100) {
+ if (bios->fp.strapless_is_24bit & 2)
+ lvds |= 0x0200;
+ } else {
+ if (bios->fp.strapless_is_24bit & 1)
+ lvds |= 0x0200;
+ }
+
+ if (nv_connector->base.display_info.bpc == 8)
+ lvds |= 0x0200;
+ }
+
+ nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+ break;
+ case DCB_OUTPUT_DP:
+ if (nv_connector->base.display_info.bpc == 6) {
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ depth = 0x2;
+ } else
+ if (nv_connector->base.display_info.bpc == 8) {
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ depth = 0x5;
+ } else {
+ nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ depth = 0x6;
+ }
+
+ if (nv_encoder->dcb->sorconf.link & 1)
+ proto = 0x8;
else
- break;
+ proto = 0x9;
+ break;
+ default:
+ BUG_ON(1);
+ break;
}
- nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(nv50_mast(dev), 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+ evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+ evo_data(push, (depth << 16) | (proto << 8) | owner);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, owner | (proto << 8));
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
}
static void
-nv50_display_error_handler(struct drm_device *dev)
+nv50_sor_destroy(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
- u32 addr, data;
- int chid;
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
- for (chid = 0; chid < 5; chid++) {
- if (!(channels & (1 << chid)))
- continue;
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+ .dpms = nv50_sor_dpms,
+ .mode_fixup = nv50_sor_mode_fixup,
+ .prepare = nv50_sor_prepare,
+ .commit = nv50_sor_commit,
+ .mode_set = nv50_sor_mode_set,
+ .disable = nv50_sor_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+ .destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
- addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
- data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
- NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
- "(0x%04x 0x%02x)\n", chid,
- addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
- nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+int
+nv50_display_init(struct drm_device *dev)
+{
+ u32 *push = evo_wait(nv50_mast(dev), 32);
+ if (push) {
+ evo_mthd(push, 0x0088, 1);
+ evo_data(push, NvEvoSync);
+ evo_kick(push, nv50_mast(dev));
+ return evo_sync(dev);
}
+
+ return -EBUSY;
}
void
-nv50_display_intr(struct drm_device *dev)
+nv50_display_destroy(struct drm_device *dev)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+
+ nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+ nouveau_bo_unmap(disp->sync);
+ if (disp->sync)
+ nouveau_bo_unpin(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
+
+ nouveau_display(dev)->priv = NULL;
+ kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
{
+ static const u16 oclass[] = {
+ NVE0_DISP_CLASS,
+ NVD0_DISP_CLASS,
+ NVA3_DISP_CLASS,
+ NV94_DISP_CLASS,
+ NVA0_DISP_CLASS,
+ NV84_DISP_CLASS,
+ NV50_DISP_CLASS,
+ };
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- uint32_t delayed = 0;
-
- while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
- uint32_t clock;
+ struct dcb_table *dcb = &drm->vbios.dcb;
+ struct drm_connector *connector, *tmp;
+ struct nv50_disp *disp;
+ struct dcb_output *dcbe;
+ int crtcs, ret, i;
- NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
- if (!intr0 && !(intr1 & ~delayed))
- break;
+ nouveau_display(dev)->priv = disp;
+ nouveau_display(dev)->dtor = nv50_display_destroy;
+ nouveau_display(dev)->init = nv50_display_init;
+ nouveau_display(dev)->fini = nv50_display_fini;
- if (intr0 & 0x001f0000) {
- nv50_display_error_handler(dev);
- intr0 &= ~0x001f0000;
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_unpin(disp->sync);
}
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
- if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
- intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- }
+ if (ret)
+ goto out;
+
+ /* attempt to allocate a supported evo display class */
+ ret = -ENODEV;
+ for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+ 0xd1500000, oclass[i], NULL, 0,
+ &disp->core);
+ }
- clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_1_CLK_UNK40));
- if (clock) {
- nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
- tasklet_schedule(&disp->tasklet);
- delayed |= clock;
- intr1 &= ~clock;
- }
+ if (ret)
+ goto out;
+
+ /* allocate master evo channel */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+ &(struct nv50_display_mast_class) {
+ .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+ }, sizeof(struct nv50_display_mast_class),
+ disp->sync->bo.offset, &disp->mast.base);
+ if (ret)
+ goto out;
+
+ /* create crtc objects to represent the hw heads */
+ if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+ crtcs = nv_rd32(device, 0x022448);
+ else
+ crtcs = 2;
+
+ for (i = 0; i < crtcs; i++) {
+ ret = nv50_crtc_create(dev, disp->core, i);
+ if (ret)
+ goto out;
+ }
+
+ /* create encoder/connector objects based on VBIOS DCB table */
+ for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+ connector = nouveau_connector_create(dev, dcbe->connector);
+ if (IS_ERR(connector))
+ continue;
- if (intr0) {
- NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
+ if (dcbe->location != DCB_LOC_ON_CHIP) {
+ NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
- if (intr1) {
- NV_ERROR(drm,
- "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
- nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
}
+
+ /* cull any connectors we created that don't have an encoder */
+ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+ if (connector->encoder_ids[0])
+ continue;
+
+ NV_WARN(drm, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
+
+out:
+ if (ret)
+ nv50_display_destroy(dev);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 973554d8a7a..70da347aa8c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -30,77 +30,16 @@
#include "nouveau_display.h"
#include "nouveau_crtc.h"
#include "nouveau_reg.h"
-#include "nv50_evo.h"
-struct nv50_display_crtc {
- struct nouveau_channel *sync;
- struct {
- struct nouveau_bo *bo;
- u32 offset;
- u16 value;
- } sem;
-};
+int nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
-struct nv50_display {
- struct nouveau_channel *master;
-
- struct nouveau_gpuobj *ramin;
- u32 dmao;
- u32 hash;
-
- struct nv50_display_crtc crtc[2];
-
- struct tasklet_struct tasklet;
- struct {
- struct dcb_output *dcb;
- u16 script;
- u32 pclk;
- } irq;
-};
-
-static inline struct nv50_display *
-nv50_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-int nv50_display_early_init(struct drm_device *dev);
-void nv50_display_late_takedown(struct drm_device *dev);
-int nv50_display_create(struct drm_device *dev);
-int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
-void nv50_display_destroy(struct drm_device *dev);
-void nv50_display_intr(struct drm_device *);
-int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
-int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-
-u32 nv50_display_active_crtcs(struct drm_device *);
-
-int nv50_display_sync(struct drm_device *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
-
-int nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
- u64 size);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **);
-
-int nvd0_display_create(struct drm_device *);
-void nvd0_display_destroy(struct drm_device *);
-int nvd0_display_init(struct drm_device *);
-void nvd0_display_fini(struct drm_device *);
-void nvd0_display_intr(struct drm_device *);
-
-void nvd0_display_flip_stop(struct drm_crtc *);
-int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *, u32 swap_interval);
struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644
index 9f6f55cdfa7..00000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32
-nv50_evo_rd32(struct nouveau_object *object, u32 addr)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- return ioread32_native(iomem + addr);
-}
-
-static void
-nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- iowrite32_native(data, iomem + addr);
-}
-
-static void
-nv50_evo_channel_del(struct nouveau_channel **pevo)
-{
- struct nouveau_channel *evo = *pevo;
-
- if (!evo)
- return;
- *pevo = NULL;
-
- nouveau_bo_unmap(evo->push.buffer);
- nouveau_bo_ref(NULL, &evo->push.buffer);
-
- if (evo->object)
- iounmap(evo->object->oclass->ofuncs);
-
- kfree(evo);
-}
-
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
- struct drm_device *dev = evo->fence;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 dmao = disp->dmao;
- u32 hash = disp->hash;
- u32 flags5;
-
- if (nv_device(drm->device)->chipset < 0xc0) {
- /* not supported on 0x50, specified in format mthd */
- if (nv_device(drm->device)->chipset == 0x50)
- memtype = 0;
- flags5 = 0x00010000;
- } else {
- if (memtype & 0x80000000)
- flags5 = 0x00000000; /* large pages */
- else
- flags5 = 0x00020000;
- }
-
- nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
- nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
- nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
- upper_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
- nv_wo32(disp->ramin, dmao + 0x14, flags5);
-
- nv_wo32(disp->ramin, hash + 0x00, handle);
- nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
- evo->handle);
-
- disp->dmao += 0x20;
- disp->hash += 0x08;
- return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, int chid,
- struct nouveau_channel **pevo)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret;
-
- evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
- if (!evo)
- return -ENOMEM;
- *pevo = evo;
-
- evo->drm = drm;
- evo->handle = chid;
- evo->fence = dev;
- evo->user_get = 4;
- evo->user_put = 0;
-
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
- &evo->push.buffer);
- if (ret == 0)
- ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- ret = nouveau_bo_map(evo->push.buffer);
- if (ret) {
- NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
-#ifdef NOUVEAU_OBJECT_MAGIC
- evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
-#endif
- evo->object->parent = nv_object(disp->ramin)->parent;
- evo->object->engine = nv_object(disp->ramin)->engine;
- evo->object->oclass =
- kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
- evo->object->oclass->ofuncs =
- kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
- evo->object->oclass->ofuncs->rd08 =
- ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
- return 0;
-}
-
-static int
-nv50_evo_channel_init(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle, ret, i;
- u64 pushbuf = evo->push.buffer->bo.offset;
- u32 tmp;
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x009f0000) == 0x00020000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x003f0000) == 0x00030000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
-
- /* initialise fifo */
- nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
- NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
- NV50_PDISPLAY_EVO_DMA_CB_VALID);
- nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
- nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-
- nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- return -EBUSY;
- }
-
- /* enable error reporting on the channel */
- nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
-
- evo->dma.max = (4096/4) - 2;
- evo->dma.max &= ~7;
- evo->dma.put = 0;
- evo->dma.cur = evo->dma.put;
- evo->dma.free = evo->dma.max - evo->dma.cur;
-
- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
- if (ret)
- return ret;
-
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(evo, 0);
-
- return 0;
-}
-
-static void
-nv50_evo_channel_fini(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle;
-
- nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- }
-}
-
-void
-nv50_evo_destroy(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sem.bo) {
- nouveau_bo_unmap(disp->crtc[i].sem.bo);
- nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
- }
- nv50_evo_channel_del(&disp->crtc[i].sync);
- }
- nv50_evo_channel_del(&disp->master);
- nouveau_gpuobj_ref(NULL, &disp->ramin);
-}
-
-int
-nv50_evo_create(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret, i, j;
-
- /* setup object management on it, any other evo channel will
- * use this also as there's no per-channel support on the
- * hardware
- */
- ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
- if (ret) {
- NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
- goto err;
- }
-
- disp->hash = 0x0000;
- disp->dmao = 0x1000;
-
- /* create primary evo channel, the one we use for modesetting
- * purporses
- */
- ret = nv50_evo_channel_new(dev, 0, &disp->master);
- if (ret)
- return ret;
- evo = disp->master;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
- disp->ramin->addr + 0x2000, 0x1000, NULL);
- if (ret)
- goto err;
-
- /* create some default objects for the scanout memtypes we support */
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- /* create "display sync" channels and other structures we need
- * to implement page flipping
- */
- for (i = 0; i < 2; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- u64 offset;
-
- ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
- if (ret)
- goto err;
-
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &dispc->sem.bo);
- if (!ret) {
- ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(dispc->sem.bo);
- if (ret)
- nouveau_bo_ref(NULL, &dispc->sem.bo);
- offset = dispc->sem.bo->bo.offset;
- }
-
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
- offset, 4096, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- for (j = 0; j < 4096; j += 4)
- nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
- dispc->sem.offset = 0;
- }
-
- return 0;
-
-err:
- nv50_evo_destroy(dev);
- return ret;
-}
-
-int
-nv50_evo_init(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int ret, i;
-
- ret = nv50_evo_channel_init(disp->master);
- if (ret)
- return ret;
-
- for (i = 0; i < 2; i++) {
- ret = nv50_evo_channel_init(disp->crtc[i].sync);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-void
-nv50_evo_fini(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sync)
- nv50_evo_channel_fini(disp->crtc[i].sync);
- }
-
- if (disp->master)
- nv50_evo_channel_fini(disp->master);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644
index 771d879bc83..00000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NV50_EVO_H__
-#define __NV50_EVO_H__
-
-#define NV50_EVO_UPDATE 0x00000080
-#define NV50_EVO_UNK84 0x00000084
-#define NV50_EVO_UNK84_NOTIFY 0x40000000
-#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
-#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
-#define NV50_EVO_DMA_NOTIFY 0x00000088
-#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
-#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
-#define NV50_EVO_UNK8C 0x0000008C
-
-#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
-#define NV50_EVO_DAC_MODE_CTRL 0x00000400
-#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
-#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
-
-#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
-#define NV50_EVO_SOR_MODE_CTRL 0x00000600
-#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
-#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
-#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
-#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
-
-#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
-#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
-#define NV50_EVO_CRTC_UNK0800 0x00000800
-#define NV50_EVO_CRTC_CLOCK 0x00000804
-#define NV50_EVO_CRTC_INTERLACE 0x00000808
-#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
-#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
-#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
-#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
-#define NV50_EVO_CRTC_UNK0820 0x00000820
-#define NV50_EVO_CRTC_UNK0824 0x00000824
-#define NV50_EVO_CRTC_UNK082C 0x0000082c
-#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
-/* You can't have a palette in 8 bit mode (=OFF) */
-#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
-#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
-#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
-#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
-#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
-#define NV50_EVO_CRTC_FB_SIZE 0x00000868
-#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
-#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
-#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
-#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
-#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
-#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
-#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
-#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
-#define NV50_EVO_CRTC_FB_DMA 0x00000874
-#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
-#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
-#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
-#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
-#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
-#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
-#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
-#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
-#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
-#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
-#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
-#define NV50_EVO_CRTC_FB_POS 0x000008c0
-#define NV50_EVO_CRTC_REAL_RES 0x000008c8
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
- ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
-/* Both of these are needed, otherwise nothing happens. */
-#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
-#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
-#define NV50_EVO_CRTC_UNK900 0x00000900
-#define NV50_EVO_CRTC_UNK904 0x00000904
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index e0763ea88ee..c20f2727ea0 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index c4a65039b1c..8bd5d2781ba 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nouveau_dev(dev);
- u32 crtc_mask = nv50_display_active_crtcs(dev);
+ u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
struct nouveau_mem_exec_func exec = {
.dev = dev,
.precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644
index b562b59e132..00000000000
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv50[] = { 16, 8, 0, 24 };
- if (nv_device(drm->device)->chipset == 0xaf)
- return nvaf[lane];
- return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- config = entry + table[4];
- while (config[0] != swing || config[1] != preem) {
- config += table[5];
- if (config >= entry + table[4] + entry[4] * table[5])
- return;
- }
-
- nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
- nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
- u8 *table, *entry, mask;
- int i;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- entry = ROMPTR(dev, entry[10]);
- if (entry) {
- while (link_bw < ROM16(entry[0]) * 10)
- entry += 4;
-
- nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
- }
-
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- if (link_bw > 162000)
- clksor |= 0x00040000;
-
- nv_wr32(device, 0x614300 + (or * 0x800), clksor);
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
- mask = 0;
- for (i = 0; i < link_nr; i++)
- mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
- if (clksor & 0x000c0000)
- *bw = 270000;
- else
- *bw = 162000;
-
- if (dpctrl > 0x00030000) *nr = 4;
- else if (dpctrl > 0x00010000) *nr = 2;
- else *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 symbol = 100000;
- int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
- int TU, VTUi, VTUf, VTUa;
- u64 link_data_rate, link_ratio, unk;
- u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, r;
-
- /* calculate packed data rate for each lane */
- nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
- link_data_rate = (clk * bpp / 8) / link_nr;
-
- /* calculate ratio of packed data rate to link symbol rate */
- link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
-
- for (TU = 64; TU >= 32; TU--) {
- /* calculate average number of valid symbols in each TU */
- u32 tu_valid = link_ratio * TU;
- u32 calc, diff;
-
- /* find a hw representation for the fraction.. */
- VTUi = tu_valid / symbol;
- calc = VTUi * symbol;
- diff = tu_valid - calc;
- if (diff) {
- if (diff >= (symbol / 2)) {
- VTUf = symbol / (symbol - diff);
- if (symbol - (VTUf * diff))
- VTUf++;
-
- if (VTUf <= 15) {
- VTUa = 1;
- calc += symbol - (symbol / VTUf);
- } else {
- VTUa = 0;
- VTUf = 1;
- calc += symbol;
- }
- } else {
- VTUa = 0;
- VTUf = min((int)(symbol / diff), 15);
- calc += symbol / VTUf;
- }
-
- diff = calc - tu_valid;
- } else {
- /* no remainder, but the hw doesn't like the fractional
- * part to be zero. decrement the integer part and
- * have the fraction add a whole symbol back
- */
- VTUa = 0;
- VTUf = 1;
- VTUi--;
- }
-
- if (diff < best_diff) {
- best_diff = diff;
- bestTU = TU;
- bestVTUa = VTUa;
- bestVTUf = VTUf;
- bestVTUi = VTUi;
- if (diff == 0)
- break;
- }
- }
-
- if (!bestTU) {
- NV_ERROR(drm, "DP: unable to find suitable config\n");
- return;
- }
-
- /* XXX close to vbios numbers, but not right */
- unk = (symbol - link_ratio) * bestTU;
- unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
- unk += 6;
-
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
- nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
- bestVTUf << 16 |
- bestVTUi << 8 |
- unk);
-}
-static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting SOR\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nouveau_hdmi_mode_set(encoder, NULL);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-}
-
-static void
-nv50_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder *enc;
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
-
- nv_encoder->last_dpms = mode;
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nvenc = nouveau_encoder(enc);
-
- if (nvenc == nv_encoder ||
- (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
- nvenc->dcb->type != DCB_OUTPUT_LVDS &&
- nvenc->dcb->type != DCB_OUTPUT_DP) ||
- nvenc->dcb->or != nv_encoder->dcb->or)
- continue;
-
- if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
- return;
- }
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
-
- if (mode == DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
- else
- val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-
- nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
- }
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nv50_sor_dp_link_set,
- .train_set = nv50_sor_dp_train_set,
- .train_adj = nv50_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static void
-nv50_sor_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_sor_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- nv50_sor_disconnect(encoder);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- /* avoid race between link training and supervisor intr */
- nv50_display_sync(encoder->dev);
- }
-}
-
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- uint32_t mode_ctl = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
- nv_encoder->crtc = encoder->crtc;
-
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctl = 0x0100;
- else
- mode_ctl = 0x0500;
- } else
- mode_ctl = 0x0200;
-
- nouveau_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_DP:
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- mode_ctl |= 0x00020000;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- mode_ctl |= 0x00050000;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctl |= 0x00000800;
- else
- mode_ctl |= 0x00000900;
- break;
- default:
- break;
- }
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
-
- nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(drm, "no space while connecting SOR\n");
- nv_encoder->crtc = NULL;
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, mode_ctl);
-}
-
-static struct drm_crtc *
-nv50_sor_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
- .dpms = nv50_sor_dpms,
- .save = nv50_sor_save,
- .restore = nv50_sor_restore,
- .mode_fixup = nv50_sor_mode_fixup,
- .prepare = nv50_sor_prepare,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .get_crtc = nv50_sor_crtc_get,
- .detect = NULL,
- .disable = nv50_sor_disconnect
-};
-
-static void
-nv50_sor_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
-
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
- .destroy = nv50_sor_destroy,
-};
-
-int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder = NULL;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder *encoder;
- int type;
-
- NV_DEBUG(drm, "\n");
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_DP:
- type = DRM_MODE_ENCODER_TMDS;
- break;
- case DCB_OUTPUT_LVDS:
- type = DRM_MODE_ENCODER_LVDS;
- break;
- default:
- return -EINVAL;
- }
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
- drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 53299eac967..2a56b1b551c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
struct nvc0_fence_chan *fctx = chan->fence;
int i;
- if (nv_device(chan->drm->device)->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
- } else
- if (nv_device(chan->drm->device)->card_type >= NV_50) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
}
nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (nv_device(chan->drm->device)->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(chan->drm->dev, i);
- else
- bo = nv50_display_crtc_sema(chan->drm->dev, i);
-
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
{
struct nvc0_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (ret == 0)
+ if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index c402fca2b2b..00000000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2141 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_fence.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
-#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
-
-struct evo {
- int idx;
- dma_addr_t handle;
- u32 *ptr;
- struct {
- u32 offset;
- u16 value;
- } sem;
-};
-
-struct nvd0_display {
- struct nouveau_gpuobj *mem;
- struct nouveau_bo *sync;
- struct evo evo[9];
-
- struct tasklet_struct tasklet;
- u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- int ret = 0;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
- nv_wr32(device, 0x610704 + (id * 0x10), data);
- nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
- if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
- ret = -EBUSY;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
- return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
-
- if (put + nr >= (PAGE_SIZE / 4)) {
- disp->evo[id].ptr[put] = 0x20000000;
-
- nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
- if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
- NV_ERROR(drm, "evo %d dma stalled\n", id);
- return NULL;
- }
-
- put = 0;
- }
-
- return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
-
- nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 flags;
-
- flags = 0x00000000;
- if (ch == EVO_MASTER)
- flags |= 0x01000000;
-
- nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
- nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
- nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 *push = evo_wait(dev, ch, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, ch);
- if (nv_wait_cb(device, evo_sync_wait, disp->sync))
- return 0;
- }
-
- return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
- return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u32 *push;
-
- push = evo_wait(crtc->dev, evo->idx, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
- }
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u64 offset;
- u32 *push;
- int ret;
-
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
-
- push = evo_wait(crtc->dev, evo->idx, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
-
- /* synchronise with the rendering channel, if necessary */
- if (likely(chan)) {
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
-
-
- offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += evo->sem.offset;
-
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | evo->sem.value);
- OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
- FIRE_RING (chan);
- } else {
- nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
- 0xf00d0000 | evo->sem.value);
- evo_sync(crtc->dev, EVO_MASTER);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, evo->sem.offset);
- evo_data(push, 0xf00d0000 | evo->sem.value);
- evo_data(push, 0x74b1e000);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_dma);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
-
- evo->sem.offset ^= 0x10;
- evo->sem.value++;
- return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
- u32 mthd;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- if (nv_device(drm->device)->card_type < NV_E0)
- mthd = 0x0490 + (nv_crtc->index * 0x0300);
- else
- mthd = 0x04a0 + (nv_crtc->index * 0x0300);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, mthd, 1);
- evo_data(push, mode);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, dev, EVO_MASTER);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode)
- mode = nv_connector->scaling_mode;
-
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
- evo_kick(push, dev, EVO_MASTER);
- if (update) {
- nvd0_display_flip_stop(crtc);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- }
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- u32 *push;
-
- push = evo_wait(fb->dev, EVO_MASTER, 16);
- if (push) {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_dma);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, fb->dev, EVO_MASTER);
- }
-
- nv_crtc->fb.tile_flags = nvfb->r_dma;
- return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, EVO_MASTER, 16);
- if (push) {
- if (show) {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- }
-
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
-
- evo_kick(push, dev, EVO_MASTER);
- }
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- nvd0_display_flip_stop(crtc);
-
- push = evo_wait(crtc->dev, EVO_MASTER, 2);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 32);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
- evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
- int ret;
-
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- nvfb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(nvfb->nvbo);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 64);
- if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, false);
- nvd0_crtc_set_scale(nv_crtc, false);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
-
- if (!crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
- return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- for (i = 0; i < 256; i++) {
- writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
- writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
- writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
- }
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- bool visible = (handle != 0);
- int i, ret = 0;
-
- if (visible) {
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(nvbo);
- if (ret == 0) {
- for (i = 0; i < 64 * 64; i++) {
- u32 v = nouveau_bo_rd32(nvbo, i);
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
- }
- nouveau_bo_unmap(nvbo);
- }
-
- drm_gem_object_unreference_unlocked(gem);
- }
-
- if (visible != nv_crtc->cursor.visible) {
- nvd0_crtc_cursor_show(nv_crtc, visible, true);
- nv_crtc->cursor.visible = visible;
- }
-
- return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ch = EVO_CURS(nv_crtc->index);
-
- evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
- evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
- return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
- u32 i;
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- drm_crtc_cleanup(crtc);
- kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
- .dpms = nvd0_crtc_dpms,
- .prepare = nvd0_crtc_prepare,
- .commit = nvd0_crtc_commit,
- .mode_fixup = nvd0_crtc_mode_fixup,
- .mode_set = nvd0_crtc_mode_set,
- .mode_set_base = nvd0_crtc_mode_set_base,
- .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
- .load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
- .cursor_set = nvd0_crtc_cursor_set,
- .cursor_move = nvd0_crtc_cursor_move,
- .gamma_set = nvd0_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = nvd0_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_crtc *nv_crtc;
- struct drm_crtc *crtc;
- int ret, i;
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nvd0_crtc_set_dither;
- nv_crtc->set_scale = nvd0_crtc_set_scale;
- nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
- nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- crtc = &nv_crtc->base;
- drm_crtc_init(dev, crtc, &nvd0_crtc_func);
- drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
- drm_mode_crtc_set_gamma_size(crtc, 256);
-
- ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
- nvd0_crtc_lut_load(crtc);
-
-out:
- if (ret)
- nvd0_crtc_destroy(crtc);
- return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- dpms_ctrl = 0x80000000;
- if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000001;
- if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000004;
-
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- u32 syncs, magic, *push;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- push = evo_wait(encoder->dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
- evo_data(push, 1 << nv_crtc->index);
- evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = NULL;
- }
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- enum drm_connector_status status = connector_status_disconnected;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 load;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
- udelay(9500);
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
-
- load = nv_rd32(device, 0x61a00c + (or * 0x800));
- if ((load & 0x38000000) == 0x38000000)
- status = connector_status_connected;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
- return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
- .dpms = nvd0_dac_dpms,
- .mode_fixup = nvd0_dac_mode_fixup,
- .prepare = nvd0_dac_disconnect,
- .commit = nvd0_dac_commit,
- .mode_set = nvd0_dac_mode_set,
- .disable = nvd0_dac_disconnect,
- .get_crtc = nvd0_display_crtc_get,
- .detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
- .destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int i, or = nv_encoder->or * 0x30;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid))
- return;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
-
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
-
- nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
- }
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or * 0x30;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
- u32 rekey = 56; /* binary driver, and tegra constant */
- u32 max_ac_packet;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_hdmi_monitor(nv_connector->edid))
- return;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* AVI InfoFrame */
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x61671c + head, 0x000d0282);
- nv_wr32(device, 0x616720 + head, 0x0000006f);
- nv_wr32(device, 0x616724 + head, 0x00000000);
- nv_wr32(device, 0x616728 + head, 0x00000000);
- nv_wr32(device, 0x61672c + head, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
-
- /* ??? InfoFrame? */
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x6167ac + head, 0x00000010);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
-
- /* HDMI_CTRL */
- nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
- max_ac_packet << 16);
-
- /* NFI, audio doesn't work without it though.. */
- nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
-
- nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
-
- nvd0_audio_disconnect(encoder);
-
- nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- static const u8 nvd0[] = { 16, 8, 0, 24 };
- return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config = NULL;
-
- switch (swing) {
- case 0: preem += 0; break;
- case 1: preem += 4; break;
- case 2: preem += 7; break;
- case 3: preem += 9; break;
- }
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) {
- config = entry + table[4];
- config += table[5] * preem;
- } else
- if (table[0] == 0x40) {
- config = table + table[1];
- config += table[2] * table[3];
- config += table[6] * preem;
- }
- }
-
- if (!config) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
- nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
- nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
- nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
- u32 script = 0x0000, lane_mask = 0;
- u8 *table, *entry;
- int i;
-
- link_bw /= 27000;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
- else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
- else entry = NULL;
-
- while (entry) {
- if (entry[0] >= link_bw)
- break;
- entry += 3;
- }
-
- nouveau_bios_run_init_table(dev, script, dcb, crtc);
- }
-
- clksor |= link_bw << 18;
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- for (i = 0; i < link_nr; i++)
- lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
- nv_wr32(device, 0x612300 + soff, clksor);
- nv_wr32(device, 0x61c10c + loff, dpctrl);
- nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
- u32 *link_nr, u32 *link_bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x612300 + soff);
-
- if (dpctrl > 0x00030000) *link_nr = 4;
- else if (dpctrl > 0x00010000) *link_nr = 2;
- else *link_nr = 1;
-
- *link_bw = (clksor & 0x007c0000) >> 18;
- *link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
- u32 crtc, u32 datarate)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 symbol = 100000;
- const u32 TU = 64;
- u32 link_nr, link_bw;
- u64 ratio, value;
-
- nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
- ratio = datarate;
- ratio *= symbol;
- do_div(ratio, link_nr * link_bw);
-
- value = (symbol - ratio) * TU;
- value *= ratio;
- do_div(value, symbol);
- do_div(value, symbol);
-
- value += 5;
- value |= 0x08000000;
-
- nv_wr32(device, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- struct drm_encoder *partner;
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
-
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
-
- dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
- dpms_ctrl |= 0x80000000;
-
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nvd0_sor_dp_link_set,
- .train_set = nvd0_sor_dp_train_set,
- .train_adj = nvd0_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nvd0_hdmi_disconnect(encoder);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- }
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
- nvd0_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
- evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct nvbios *bios = &drm->vbios;
- u32 mode_ctrl = (1 << nv_crtc->index);
- u32 syncs, magic, *push;
- u32 or_config;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctrl |= 0x00000100;
- else
- mode_ctrl |= 0x00000500;
- } else {
- mode_ctrl |= 0x00000200;
- }
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (mode->clock >= 165000)
- or_config |= 0x0100;
-
- nvd0_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_LVDS:
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- or_config |= 0x0100;
- if (bios->fp.if_is_24bit)
- or_config |= 0x0200;
- } else {
- if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- if (((u8 *)nv_connector->edid)[121] == 2)
- or_config |= 0x0100;
- } else
- if (mode->clock >= bios->fp.duallink_transition_clk) {
- or_config |= 0x0100;
- }
-
- if (or_config & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- or_config |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- or_config |= 0x0200;
- }
-
- if (nv_connector->base.display_info.bpc == 8)
- or_config |= 0x0200;
-
- }
- break;
- case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- syncs |= 0x00000002 << 6;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- syncs |= 0x00000005 << 6;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctrl |= 0x00000800;
- else
- mode_ctrl |= 0x00000900;
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
- nv_encoder->dp.datarate);
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
- evo_data(push, mode_ctrl);
- evo_data(push, or_config);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
- .dpms = nvd0_sor_dpms,
- .mode_fixup = nvd0_sor_mode_fixup,
- .prepare = nvd0_sor_prepare,
- .commit = nvd0_sor_commit,
- .mode_set = nvd0_sor_mode_set,
- .disable = nvd0_sor_disconnect,
- .get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
- .destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_output *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- int type, or, i, link = -1;
-
- if (id < 4) {
- type = DCB_OUTPUT_ANALOG;
- or = id;
- } else {
- switch (mc & 0x00000f00) {
- case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
- case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
- case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
- case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
- return NULL;
- }
-
- or = id - 4;
- }
-
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)) &&
- (link < 0 || link == !(dcb->sorconf.link & 1)))
- return dcb;
- }
-
- NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
- return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb;
- u32 or, tmp, pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
- }
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
- NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
- crtc, pclk, mask);
- if (pclk && (mask & 0x00010000)) {
- nv50_crtc_set_clock(dev, crtc, pclk);
- }
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
- or = ffs(dcb->or) - 1;
-
- nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
- nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
- switch (dcb->type) {
- case DCB_OUTPUT_ANALOG:
- nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
- break;
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (cfg & 0x00000100)
- tmp = 0x00000101;
- else
- tmp = 0x00000000;
-
- nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
- break;
- default:
- break;
- }
-
- break;
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int pclk, i;
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 mask = 0, crtc = ~0;
- int i;
-
- if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
- NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
- NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(device, 0x6101d0),
- nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
- for (i = 0; i < 8; i++) {
- NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
- i < 4 ? "DAC" : "SOR", i,
- nv_rd32(device, 0x640180 + (i * 0x20)),
- nv_rd32(device, 0x660180 + (i * 0x20)));
- }
- }
-
- while (!mask && ++crtc < dev->mode_config.num_crtc)
- mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
-
- if (disp->modeset & 0x00000001)
- nvd0_display_unk1_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000002)
- nvd0_display_unk2_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000004)
- nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-void
-nvd0_display_intr(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 intr = nv_rd32(device, 0x610088);
-
- if (intr & 0x00000001) {
- u32 stat = nv_rd32(device, 0x61008c);
- nv_wr32(device, 0x61008c, stat);
- intr &= ~0x00000001;
- }
-
- if (intr & 0x00000002) {
- u32 stat = nv_rd32(device, 0x61009c);
- int chid = ffs(stat) - 1;
- if (chid >= 0) {
- u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
- u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
- u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
-
- NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
- "0x%08x 0x%08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
- nv_wr32(device, 0x61009c, (1 << chid));
- nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
- }
-
- intr &= ~0x00000002;
- }
-
- if (intr & 0x00100000) {
- u32 stat = nv_rd32(device, 0x6100ac);
-
- if (stat & 0x00000007) {
- disp->modeset = stat;
- tasklet_schedule(&disp->tasklet);
-
- nv_wr32(device, 0x6100ac, (stat & 0x00000007));
- stat &= ~0x00000007;
- }
-
- if (stat) {
- NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
- nv_wr32(device, 0x6100ac, stat);
- }
-
- intr &= ~0x00100000;
- }
-
- intr &= ~0x0f000000; /* vblank, handled in core */
- if (intr)
- NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
- int i;
-
- /* fini cursors + overlays + flips */
- for (i = 1; i >= 0; i--) {
- evo_fini_pio(dev, EVO_CURS(i));
- evo_fini_pio(dev, EVO_OIMM(i));
- evo_fini_dma(dev, EVO_OVLY(i));
- evo_fini_dma(dev, EVO_FLIP(i));
- }
-
- /* fini master */
- evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- int ret, i;
- u32 *push;
-
- if (nv_rd32(device, 0x6100ac) & 0x00000100) {
- nv_wr32(device, 0x6100ac, 0x00000100);
- nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
- if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
- NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
- * work at all unless you do the SOR part below.
- */
- for (i = 0; i < 3; i++) {
- u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
- nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
- }
-
- for (i = 0; i < 4; i++) {
- u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
- nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
- }
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
- u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
- u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
- nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
- nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
- nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
- }
-
- /* point at our hash table / objects, enable interrupts */
- nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
- nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
-
- /* init master */
- ret = evo_init_dma(dev, EVO_MASTER);
- if (ret)
- goto error;
-
- /* init flips + overlays + cursors */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
- (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
- (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
- (ret = evo_init_pio(dev, EVO_CURS(i))))
- goto error;
- }
-
- push = evo_wait(dev, EVO_MASTER, 32);
- if (!push) {
- ret = -EBUSY;
- goto error;
- }
- evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000);
- evo_mthd(push, 0x008c, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
-
-error:
- if (ret)
- nvd0_display_fini(dev);
- return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct pci_dev *pdev = dev->pdev;
- int i;
-
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
- }
-
- nouveau_gpuobj_ref(NULL, &disp->mem);
- nouveau_bo_unmap(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
-
- nouveau_display(dev)->priv = NULL;
- kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bar *bar = nouveau_bar(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *tmp;
- struct pci_dev *pdev = dev->pdev;
- struct nvd0_display *disp;
- struct dcb_output *dcbe;
- int crtcs, ret, i;
-
- disp = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = disp;
- nouveau_display(dev)->dtor = nvd0_display_destroy;
- nouveau_display(dev)->init = nvd0_display_init;
- nouveau_display(dev)->fini = nvd0_display_fini;
-
- /* create crtc objects to represent the hw heads */
- crtcs = nv_rd32(device, 0x022448);
- for (i = 0; i < crtcs; i++) {
- ret = nvd0_crtc_create(dev, i);
- if (ret)
- goto out;
- }
-
- /* create encoder/connector objects based on VBIOS DCB table */
- for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe->connector);
- if (IS_ERR(connector))
- continue;
-
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
-
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nvd0_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- nvd0_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
- }
-
- /* cull any connectors we created that don't have an encoder */
- list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
- continue;
-
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
-
- /* setup interrupt handling */
- tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-
- /* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
- }
-
- if (ret)
- goto out;
-
- /* hash table and dma objects for the memory areas we care about */
- ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
- if (ret)
- goto out;
-
- /* create evo dma channels */
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- u64 offset = disp->sync->bo.offset;
- u32 dmao = 0x1000 + (i * 0x100);
- u32 hash = 0x0000 + (i * 0x040);
-
- evo->idx = i;
- evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
- evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
- if (!evo->ptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
- nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
- nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
- nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
- nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
- ((dmao + 0x00) << 9));
-
- nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
- nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
- nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
- ((dmao + 0x20) << 9));
-
- nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
- nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
- nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
- nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
- ((dmao + 0x40) << 9));
-
- nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
- nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
- nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
- nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
- ((dmao + 0x60) << 9));
- }
-
- bar->flush(bar);
-
-out:
- if (ret)
- nvd0_display_destroy(dev);
- return ret;
-}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 24d932f5320..9175615bbd8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index d5699fe4f1e..064023bed48 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -34,8 +34,7 @@
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
-#define DP_LINK_STATUS_SIZE 6
-#define DP_DPCD_SIZE 8
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/***** general DP utility functions *****/
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_LANE0_1_STATUS + (lane >> 1);
- int s = (lane & 1) * 4;
- u8 l = dp_link_status(link_status, i);
- return (l >> s) & 0xf;
-}
-
-static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- int lane;
- u8 lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
-
-static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- u8 lane_align;
- u8 lane_status;
- int lane;
-
- lane_align = dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
- return false;
- }
- return true;
-}
-
-static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane;
for (lane = 0; lane < lane_count; lane++) {
- u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
- u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+ u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
return (link_rate * lane_num * 8) / bpp;
}
-static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
-{
- switch (dpcd[DP_MAX_LINK_RATE]) {
- case DP_LINK_BW_1_62:
- default:
- return 162000;
- case DP_LINK_BW_2_7:
- return 270000;
- case DP_LINK_BW_5_4:
- return 540000;
- }
-}
-
-static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
-{
- return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static u8 dp_get_dp_link_rate_coded(int link_rate)
-{
- switch (link_rate) {
- case 162000:
- default:
- return DP_LINK_BW_1_62;
- case 270000:
- return DP_LINK_BW_2_7;
- case 540000:
- return DP_LINK_BW_5_4;
- }
-}
-
/***** radeon specific DP functions *****/
/* First get the min lane# when low rate is used according to pixel clock
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = dp_get_max_link_rate(dpcd);
- int max_lane_num = dp_get_max_lane_number(dpcd);
+ int max_link_rate = drm_dp_max_link_rate(dpcd);
+ int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
return 540000;
}
- return dp_get_max_link_rate(dpcd);
+ return drm_dp_max_link_rate(dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- u8 msg[25];
+ u8 msg[DP_DPCD_SIZE];
int ret, i;
- ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+ DP_DPCD_SIZE, 0);
if (ret > 0) {
- memcpy(dig_connector->dpcd, msg, 8);
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: ");
- for (i = 0; i < 8; i++)
+ for (i = 0; i < DP_DPCD_SIZE; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
- if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
+ if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
int enc_id;
int dp_clock;
int dp_lane_count;
- int rd_interval;
bool tp3_supported;
- u8 dpcd[8];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
/* set the link rate on the sink */
- tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
+ tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
/* start training on the source */
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
voltage = 0xff;
while (1) {
- if (dp_info->rd_interval == 0)
- udelay(100);
- else
- mdelay(dp_info->rd_interval * 4);
+ drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
- if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
break;
}
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
channel_eq = false;
while (1) {
- if (dp_info->rd_interval == 0)
- udelay(400);
- else
- mdelay(dp_info->rd_interval * 4);
+ drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
- if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
break;
}
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
- dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
dp_info.tp3_supported = false;
- memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
+ memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 010bae19554..4552d4aff31 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- radeon_dp_set_link_config(connector, mode);
+ radeon_dp_set_link_config(connector, adjusted_mode);
}
return true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 219942c660d..f95d7fc1f5e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1650,7 +1650,7 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
@@ -1821,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
- rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 4;
if (rdev->pdev->device == 0x9648)
rdev->config.evergreen.max_simds = 3;
else if ((rdev->pdev->device == 0x9647) ||
@@ -1844,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
@@ -1866,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
@@ -1914,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
- rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2034,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
@@ -2403,8 +2404,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
+ tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ WREG32(CAYMAN_DMA1_CNTL, tmp);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2457,6 +2462,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+ u32 dma_cntl, dma_cntl1 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2484,6 +2490,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2506,6 +2514,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2591,6 +2612,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
+
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_CAYMAN)
+ WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3093,6 +3120,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3116,9 +3153,19 @@ restart_ih:
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ if (rdev->family >= CHIP_CAYMAN) {
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3144,6 +3191,143 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3207,6 +3391,12 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3221,12 +3411,23 @@ static int evergreen_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
r = evergreen_cp_resume(rdev);
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -3273,11 +3474,9 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- ring->ready = false;
+ r600_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -3354,6 +3553,9 @@ int evergreen_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3366,6 +3568,7 @@ int evergreen_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3393,6 +3596,7 @@ void evergreen_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c042e497e45..7a445666e71 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
/* height is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
+ /* always assume 8x8 htile */
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
switch (track->npipes) {
case 8:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 64 * 8);
break;
case 4:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 32 * 8);
break;
case 2:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
case 1:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 16 * 8);
break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
}
}
/* compute number of htile */
- nbx = nbx / 8;
- nby = nby / 8;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_SURFACE:
/* 8x8 only */
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size, info;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ info = radeon_get_ib_value(p, idx+1);
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if (size % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ /* GDS is ok */
+ if (((info & 0x60000000) >> 29) != 1) {
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ if (((info & 0x60000000) >> 29) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ } else if (((info & 0x60000000) >> 29) != 2) {
+ DRM_ERROR("bad CP DMA SRC_SEL\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ /* GDS is ok */
+ if (((info & 0x00300000) >> 20) != 1) {
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ if (((info & 0x00300000) >> 20) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ } else {
+ DRM_ERROR("bad CP DMA DST_SEL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2540,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2715,6 +2858,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/*
+ * DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset, dst2_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 7;
+ } else {
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+7];
+ dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+7];
+ src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+7];
+ dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+7];
+ src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst2_offset = ib[idx+2];
+ dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
+ src_offset = ib[idx+3];
+ src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
+
/* vm parser */
static bool evergreen_vm_reg_valid(u32 reg)
{
@@ -2724,6 +3316,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
+ case WAIT_UNTIL:
case GRBM_GFX_INDEX:
case CP_STRMOUT_CNTL:
case CP_COHER_CNTL:
@@ -2843,6 +3436,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2917,6 +3511,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if ((command & 0x1fffff) % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
return -EINVAL;
}
@@ -2958,3 +3610,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
return ret;
}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib: radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ u32 idx = 0;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+
+ do {
+ header = ib->ptr[idx];
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ if (tiled)
+ idx += count + 7;
+ else
+ idx += count + 3;
+ break;
+ case DMA_PACKET_COPY:
+ if (tiled) {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (idx < ib->length_dw);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 2bc0f6a1b42..cb9baaac9e8 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -45,6 +45,8 @@
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
/* Registers */
@@ -355,6 +357,54 @@
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7138
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x5e78
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -651,6 +701,7 @@
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT1_CNTL2 0x1434
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
@@ -672,6 +723,8 @@
#define CACHE_UPDATE_MODE(x) ((x) << 6)
#define VM_L2_STATUS 0x140C
#define L2_BUSY (1 << 0)
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
#define WAIT_UNTIL 0x8040
@@ -854,6 +907,37 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* ASYNC DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_TILING_CONFIG 0xD0B8
+
+#define CAYMAN_DMA1_CNTL 0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
@@ -951,6 +1035,53 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 81e6a568c29..7bdbcb00aaf 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-7 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
- WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
}
@@ -1059,7 +1072,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
WREG32(SCRATCH_UMSK, 0xff);
@@ -1076,7 +1089,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
#endif
WREG32(cp_rb_cntl[i], rb_cntl);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
@@ -1118,6 +1131,181 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
@@ -1208,6 +1396,32 @@ int cayman_asic_reset(struct radeon_device *rdev)
return cayman_gpu_soft_reset(rdev);
}
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ else
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -1289,6 +1503,18 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1303,6 +1529,23 @@ static int cayman_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = cayman_cp_load_microcode(rdev);
if (r)
return r;
@@ -1310,6 +1553,10 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1601,7 @@ int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
cayman_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ cayman_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1668,14 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1433,6 +1688,7 @@ int cayman_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1463,6 +1719,7 @@ void cayman_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1538,30 +1795,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 1 + count * 2;
- if (ndw > 0x3FFF)
- ndw = 0x3FFF;
-
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- for (; ndw > 1; ndw -= 2, --count, pe += 8) {
- uint64_t value = 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
-
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
}
-
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
}
}
}
@@ -1596,3 +1880,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index cbef6815907..b93186b8ee4 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -50,6 +50,24 @@
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -80,7 +98,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -588,5 +617,62 @@
#define PACKET3_SET_APPEND_CNT 0x75
#define PACKET3_ME_WRITE 0x7A
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+# define CMD_VMID_FORCE (1 << 31)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_MODE 0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 376884f1bcd..8ff7cac222d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
}
}
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cda280d157d..2aaf147969b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ dma_status_reg = RREG32(DMA_STATUS_REG);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
@@ -1424,13 +1447,7 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
int r600_count_pipe_bits(uint32_t val)
{
- int i, ret = 0;
-
- for (i = 0; i < 32; i++) {
- ret += val & 1;
- val >>= 1;
- }
- return ret;
+ return hweight32(val);
}
static void r600_gpu_init(struct radeon_device *rdev)
@@ -1594,6 +1611,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+ WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1871,6 +1889,7 @@ void r600_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
int r600_init_microcode(struct radeon_device *rdev)
@@ -2196,6 +2215,128 @@ void r600_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
/*
* GPU scratch registers helpers function.
@@ -2252,6 +2393,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -2315,6 +2514,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2334,6 +2586,80 @@ int r600_copy_blit(struct radeon_device *rdev,
return 0;
}
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2349,7 +2675,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
static int r600_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -2394,6 +2720,12 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2403,12 +2735,20 @@ static int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
+
r = r600_cp_load_microcode(rdev);
if (r)
return r;
@@ -2416,6 +2756,10 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2471,7 +2815,7 @@ int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r600_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -2544,6 +2888,9 @@ int r600_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2556,6 +2903,7 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2572,6 +2920,7 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2674,6 +3023,104 @@ free_scratch:
return r;
}
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
/*
* Interrupts
*
@@ -2865,6 +3312,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp;
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3006,6 +3455,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
+ u32 dma_cntl;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3040,12 +3490,19 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
+
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3090,6 +3547,7 @@ int r600_irq_set(struct radeon_device *rdev)
}
WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3469,6 +3927,10 @@ restart_ih:
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 2514123d2d0..be85f75aedd 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -721,12 +721,7 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static int r600_count_pipe_bits(uint32_t val)
{
- int i, ret = 0;
- for (i = 0; i < 32; i++) {
- ret += val & 1;
- val >>= 1;
- }
- return ret;
+ return hweight32(val);
}
static void r600_gfx_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 211c40252fe..9ea13d07cc5 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
- /* htile widht & nby (8 or 4) make 2 bits number */
- tmp = track->htile_surface & 3;
+ /* always assume 8x8 htile */
/* align is htile align * 8, htile align vary according to
* number of pipe and tile width and nby
*/
switch (track->npipes) {
case 8:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 64 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
break;
case 4:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 2:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 1:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 8 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
break;
default:
dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
}
}
/* compute number of htile */
- nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
- nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_HTILE_SURFACE:
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2276,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2496,3 +2543,196 @@ void r600_cs_legacy_init(void)
{
r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
}
+
+/*
+ * DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p: parser structure holding parsing context.
+ * @cs_reloc: reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ unsigned idx;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ idx = p->dma_reloc_idx;
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ *cs_reloc = p->relocs_ptr[idx];
+ p->dma_reloc_idx++;
+ return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ u32 header, cmd, count, tiled;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 5;
+ } else {
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+5];
+ dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+5];
+ src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 7;
+ } else {
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("Constant Fill is 7xx only !\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 2b960cb5c18..909219b1bf8 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -96,6 +96,15 @@
#define R600_CONFIG_F0_BASE 0x542C
#define R600_CONFIG_APER_SIZE 0x5430
+#define R600_BIF_FB_EN 0x5490
+#define R600_FB_READ_EN (1 << 0)
+#define R600_FB_WRITE_EN (1 << 1)
+
+#define R600_CITF_CNTL 0x200c
+#define R600_BLACKOUT_MASK 0x00000003
+
+#define R700_MC_CITF_CNTL 0x25c0
+
#define R600_ROM_CNTL 0x1600
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fa6f37099ba..4a53402b185 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -590,9 +590,59 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
+/* async DMA */
+#define DMA_TILING_CONFIG 0x3ec4
+#define DMA_CONFIG 0x3e4c
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_MODE 0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */
+#define DMA_PACKET_NOP 0xf
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
-# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
@@ -637,7 +687,9 @@
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
+# define RV770_SOFT_RESET_DMA (1 << 20)
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
@@ -1134,6 +1186,38 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8c42d54c2e2..9b9422c4403 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 5
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
@@ -122,6 +122,11 @@ extern int radeon_lockup_timeout;
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX 3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -220,12 +225,13 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
@@ -313,6 +319,7 @@ struct radeon_bo {
struct list_head list;
/* Protected by tbo.reserved */
u32 placements[3];
+ u32 busy_placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -787,6 +794,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
/*
* CS.
*/
@@ -824,6 +840,7 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
+ unsigned dma_reloc_idx;
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
@@ -883,7 +900,9 @@ struct radeon_wb {
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
#define R600_WB_EVENT_OFFSET 3072
/**
@@ -1539,6 +1558,8 @@ struct radeon_device {
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
+ /* protects concurrent MM_INDEX/DATA based register access */
+ spinlock_t mmio_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
@@ -1614,8 +1635,10 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
@@ -1631,9 +1654,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
-#define RREG32(reg) r100_mm_rreg(rdev, (reg))
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
-#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1683,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 654520b95ab..596bcbe80ed 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
},
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1391,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1496,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1601,7 +1699,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
}
},
.irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
.copy = {
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = NULL,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &si_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &si_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5e3a0e5c6be..5f4882cc215 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
struct rv515_mc_save {
u32 vga_render_control;
u32 vga_hdp_control;
+ bool crtc_enabled[2];
};
int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -416,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +444,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* cayman
@@ -449,6 +473,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +505,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 45b660b27cf..4af89126e22 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
while (ram--) {
addr = ram * 1024 * 1024;
/* write to each page */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- WREG32(RADEON_MM_DATA, 0xdeadbeef);
+ WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
/* read back and verify */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+ if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b884c362a8c..47bf162ab9c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
break;
@@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = false;
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
*/
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 8b2797dc7b6..9143fc45e35 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
}
}
-u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
-{
- u32 ret;
-
- if (addr < 0x10000)
- ret = DRM_READ32(dev_priv->mmio, addr);
- else {
- DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
- ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
- }
-
- return ret;
-}
-
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41672cc563f..396baba0141 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return 0;
}
chunk = &p->chunks[p->chunk_relocs_idx];
+ p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
} else
p->ring = RADEON_RING_TYPE_GFX_INDEX;
break;
+ case RADEON_CS_RING_DMA:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (p->priority > 0)
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ else
+ p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+ } else if (p->rdev->family >= CHIP_R600) {
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ } else {
+ return -EINVAL;
+ }
+ break;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0fe56c9f64b..ad6df625e8b 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
- WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ u32 reg;
switch (radeon_crtc->crtc_id) {
case 0:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+ reg = RADEON_CRTC_GEN_CNTL;
break;
case 1:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+ reg = RADEON_CRTC2_GEN_CNTL;
break;
default:
return;
}
- WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+ WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e2f5f888c37..cd756262924 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
+ spin_lock_init(&rdev->mmio_idx_lock);
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
@@ -1163,6 +1164,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
+ bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -1205,8 +1207,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_empty_locked(rdev, i);
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* delay GPU reset to resume */
+ force_completion = true;
+ }
+ }
+ if (force_completion) {
+ radeon_fence_driver_force_completion(rdev);
+ }
mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -1337,7 +1347,6 @@ retry:
}
radeon_restore_bios_scratch_regs(rdev);
- drm_helper_resume_force_mode(rdev->ddev);
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1357,11 +1366,14 @@ retry:
}
}
} else {
+ radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
+ drm_helper_resume_force_mode(rdev->ddev);
+
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bfa2a601572..310c0e5254b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
work->old_rbo = rbo;
obj = new_radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&rbo->tbo.bdev->fence_lock);
if (rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+ spin_unlock(&rbo->tbo.bdev->fence_lock);
+
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 07eb84e8a8a..ff7593498a7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -65,9 +65,13 @@
* 2.22.0 - r600 only: RESOLVE_BOX allowed
* 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
* 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ * 2.25.0 - eg+: new info request for num SE and num SH
+ * 2.26.0 - r600-eg: fix htile size computation
+ * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ * 2.28.0 - r600-eg: Add MEM_WRITE packet support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 24
+#define KMS_DRIVER_MINOR 28
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -281,12 +285,15 @@ static struct drm_driver driver_old = {
static struct drm_driver kms_driver;
-static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -295,13 +302,19 @@ static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
kfree(ap);
+
+ return 0;
}
static int __devinit
radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ int ret;
+
/* Get rid of things like offb */
- radeon_kick_out_firmware_fb(pdev);
+ ret = radeon_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &kms_driver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index a1b59ca96d0..e7fdf163a8c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
-extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 22bd6c2c274..34356252567 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ int r;
- while(1) {
- int r;
- r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r) {
if (r == -EDEADLK) {
- mutex_unlock(&rdev->ring_lock);
- r = radeon_gpu_reset(rdev);
- mutex_lock(&rdev->ring_lock);
- if (!r)
- continue;
- }
- if (r) {
- dev_err(rdev->dev, "error waiting for ring to become"
- " idle (%d)\n", r);
+ return -EDEADLK;
}
- return;
+ dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+ ring, r);
}
+ return 0;
}
/**
@@ -772,7 +766,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
int r;
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- if (rdev->wb.use_event) {
+ if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
rdev->fence_drv[ring].scratch_reg = 0;
index = R600_WB_EVENT_OFFSET + ring * 4;
} else {
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
*/
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- int ring;
+ int ring, r;
mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_empty_locked(rdev, ring);
+ r = radeon_fence_wait_empty_locked(rdev, ring);
+ if (r) {
+ /* no need to trigger GPU reset as we are unloading */
+ radeon_fence_driver_force_completion(rdev);
+ }
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+ }
+}
+
/*
* Fence debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4debd60e5aa..6e24f84755b 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
{
struct radeon_bo_va *bo_va;
- BUG_ON(!atomic_read(&bo->tbo.reserved));
list_for_each_entry(bo_va, &bo->va, bo_list) {
bo_va->valid = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dc781c49b96..9c312f9afb6 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_MAX_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_shader_engines;
+ else if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.num_ses;
+ else
+ value = 1;
+ break;
+ case RADEON_INFO_MAX_SH_PER_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_sh_per_se;
+ else
+ return -EINVAL;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92c5f473cf0..d818b503b42 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -427,7 +427,7 @@ struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
- u8 dpcd[8];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b91118ccef8..883c95d8d90 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,17 +84,34 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
- rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
- if (domain & RADEON_GEM_DOMAIN_GTT)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- if (domain & RADEON_GEM_DOMAIN_CPU)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (domain & RADEON_GEM_DOMAIN_GTT) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ }
+ }
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+ }
+ }
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
+
+ c = 0;
+ rbo->placement.busy_placement = rbo->busy_placements;
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ } else {
+ rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ }
rbo->placement.num_busy_placement = c;
}
@@ -140,7 +157,7 @@ int radeon_bo_create(struct radeon_device *rdev,
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, 0, !kernel, NULL,
+ &bo->placement, page_align, !kernel, NULL,
acc_size, sg, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
@@ -240,7 +257,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -269,7 +286,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -340,7 +357,6 @@ int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
- u32 domain;
int r;
r = ttm_eu_reserve_buffers(head);
@@ -350,17 +366,9 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
- domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
-
- retry:
- radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false, false);
+ true, false);
if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
- domain |= RADEON_GEM_DOMAIN_GTT;
- goto retry;
- }
return r;
}
}
@@ -384,7 +392,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
int steal;
int i;
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (!bo->tiling_flags)
return 0;
@@ -510,7 +518,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
if (pitch)
@@ -520,7 +528,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -575,7 +583,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r != 0))
return r;
offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 93cd491fff2..5fc86b03043 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
{
- return !!atomic_read(&bo->tbo.reserved);
+ return ttm_bo_is_reserved(&bo->tbo);
}
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aa14dbb7e4f..0bfa656aa87 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
- int i;
+ int i, r;
/* no need to take locks, etc. if nothing's going to change */
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
@@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
- if (ring->ready)
- radeon_fence_wait_empty_locked(rdev, i);
+ if (!ring->ready) {
+ continue;
+ }
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* needs a GPU reset dont reset here */
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ return;
+ }
}
radeon_unmap_vram_bos(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 47634f27f2e..ebd69562ef6 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -459,7 +459,7 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *r
*
* @ring: radeon_ring structure holding ring information
*
- * Reset the driver's copy of the wtpr (all asics).
+ * Reset the driver's copy of the wptr (all asics).
*/
void radeon_ring_undo(struct radeon_ring *ring)
{
@@ -503,7 +503,7 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
}
/**
- * radeon_ring_force_activity - update lockup variables
+ * radeon_ring_lockup_update - update lockup variables
*
* @ring: radeon_ring structure holding ring information
*
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 587c09a00ba..fda09c9ea68 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -26,16 +26,31 @@
#include "radeon_reg.h"
#include "radeon.h"
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA 0
+
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-void radeon_test_moves(struct radeon_device *rdev)
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
{
struct radeon_bo *vram_obj = NULL;
struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
- int r;
+ int r, ring;
+
+ switch (flag) {
+ case RADEON_TEST_COPY_DMA:
+ ring = radeon_copy_dma_ring_index(rdev);
+ break;
+ case RADEON_TEST_COPY_BLIT:
+ ring = radeon_copy_blit_ring_index(rdev);
+ break;
+ default:
+ DRM_ERROR("Unknown copy method\n");
+ return;
+ }
size = 1024 * 1024;
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
}
}
+void radeon_test_moves(struct radeon_device *rdev)
+{
+ if (rdev->asic->copy.dma)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+ if (rdev->asic->copy.blit)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5ebe1b3e5db..1d8ff2f850b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
@@ -265,15 +265,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+ evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -320,7 +320,7 @@ out_cleanup:
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -359,7 +360,7 @@ out_cleanup:
static int radeon_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
}
return r;
}
@@ -471,13 +472,12 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
- bool lazy, bool interruptible)
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}
-static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int radeon_sync_obj_flush(void *sync_obj)
{
return 0;
}
@@ -492,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj)
return radeon_fence_ref((struct radeon_fence *)sync_obj);
}
-static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool radeon_sync_obj_signaled(void *sync_obj)
{
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 785d09590b2..2bb6d0e84b3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
void rv515_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,114 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
+
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
- /* Stop all video */
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+ /* disable VGA render */
WREG32(R_000300_VGA_RENDER_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
- WREG32(R_006080_D1CRTC_CONTROL, 0);
- WREG32(R_006880_D2CRTC_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
- WREG32(R_000330_D1VGA_CONTROL, 0);
- WREG32(R_000338_D2VGA_CONTROL, 0);
+ /* blank the display controllers */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+
+ radeon_mc_wait_for_idle(rdev);
+
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->family >= CHIP_RV770)
+ blackout = RREG32(R700_MC_CITF_CNTL);
+ else
+ blackout = RREG32(R600_CITF_CNTL);
+ if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+ /* Block CPU access */
+ WREG32(R600_BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout |= R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, blackout);
+ else
+ WREG32(R600_CITF_CNTL, blackout);
+ }
+ }
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
{
- WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
- /* Unlock host access */
+ u32 tmp, frame_count;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->family >= CHIP_RV770) {
+ if (i == 1) {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ } else {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ }
+ }
+ WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+ WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+ if (rdev->family >= CHIP_R600) {
+ /* unblackout the MC */
+ if (rdev->family >= CHIP_RV770)
+ tmp = RREG32(R700_MC_CITF_CNTL);
+ else
+ tmp = RREG32(R600_CITF_CNTL);
+ tmp &= ~R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, tmp);
+ else
+ WREG32(R600_CITF_CNTL, tmp);
+ /* allow CPU access */
+ WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+ }
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ /* Unlock vga access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 79814a08c8e..87c979c4f72 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = rv770_cp_load_microcode(rdev);
if (r)
return r;
@@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b0adfc595d7..20e29d23d34 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -109,6 +109,9 @@
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
+#define DMA_TILING_CONFIG 0x3ec8
+#define DMA_TILING_CONFIG2 0xd0b8
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -358,6 +361,26 @@
#define WAIT_UNTIL 0x8040
+/* async DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+
#define SRBM_STATUS 0x0E50
/* DCE 3.2 HDMI */
@@ -551,6 +574,54 @@
#define HDMI_OFFSET0 (0x7400 - 0x7400)
#define HDMI_OFFSET1 (0x7800 - 0x7400)
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x7300
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 4422d630b33..ef683653f0b 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
si_tiling_mode_table_init(rdev);
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
udelay(50);
}
@@ -2007,7 +2012,7 @@ static int si_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB0_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
@@ -2040,7 +2045,7 @@ static int si_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB1_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
@@ -2066,7 +2071,7 @@ static int si_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB2_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
@@ -2426,9 +2431,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-15 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
si_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2534,6 +2550,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2633,6 +2650,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
@@ -2809,30 +2872,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- uint64_t value;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- addr += incr;
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ /* DMA */
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+ radeon_ring_write(ring, pe); /* dst addr */
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ radeon_ring_write(ring, r600_flags); /* mask */
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, value); /* value */
+ radeon_ring_write(ring, upper_32_bits(value));
+ radeon_ring_write(ring, incr); /* increment size */
+ radeon_ring_write(ring, 0);
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
}
}
@@ -2880,6 +2999,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0x0);
}
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
/*
* RLC
*/
@@ -3048,6 +3193,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
+ tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3167,6 +3316,7 @@ int si_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 dma_cntl, dma_cntl1;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3187,6 +3337,9 @@ int si_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3200,6 +3353,15 @@ int si_irq_set(struct radeon_device *rdev)
DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3259,6 +3421,9 @@ int si_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3684,6 +3849,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
@@ -3707,9 +3882,17 @@ restart_ih:
break;
}
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3733,6 +3916,80 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
/*
* startup/shutdown callbacks
*/
@@ -3804,6 +4061,18 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = si_irq_init(rdev);
if (r) {
@@ -3834,6 +4103,22 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
r = si_cp_load_microcode(rdev);
if (r)
return r;
@@ -3841,6 +4126,10 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3882,9 +4171,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
si_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ cayman_dma_stop(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -3962,6 +4249,14 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3974,6 +4269,7 @@ int si_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
@@ -4002,6 +4298,7 @@ void si_fini(struct radeon_device *rdev)
r600_blit_fini(rdev);
#endif
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index a8871afc5b4..62b46215d42 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -91,7 +91,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -104,6 +115,9 @@
#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
@@ -835,6 +849,54 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_DEST_BASE_0_ENA (1 << 0)
@@ -922,4 +984,61 @@
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
#define PACKET3_SWITCH_BUFFER 0x8B
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_TILING_CONFIG 0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
#endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0e7a9306bd0..d917a411ca8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
connector->encoder = encoder;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
return 0;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644
index 00000000000..be1daf7344d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -0,0 +1,23 @@
+config DRM_TEGRA
+ tristate "NVIDIA Tegra DRM"
+ depends on DRM && OF && ARCH_TEGRA
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tegra-drm.
+
+if DRM_TEGRA
+
+config DRM_TEGRA_DEBUG
+ bool "NVIDIA Tegra DRM debug support"
+ help
+ Say yes here to enable debugging support.
+
+endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 00000000000..80f73d1315d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := drm.o fb.o dc.o host1x.o
+tegra-drm-y += output.o rgb.o hdmi.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644
index 00000000000..656b2e3334a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -0,0 +1,833 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <mach/clk.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_dc_window {
+ fixed20_12 x;
+ fixed20_12 y;
+ fixed20_12 w;
+ fixed20_12 h;
+ unsigned int outx;
+ unsigned int outy;
+ unsigned int outw;
+ unsigned int outh;
+ unsigned int stride;
+ unsigned int fmt;
+};
+
+static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+};
+
+static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+ unsigned int bpp)
+{
+ fixed20_12 outf = dfixed_init(out);
+ u32 dda_inc;
+ int max;
+
+ if (v)
+ max = 15;
+ else {
+ switch (bpp) {
+ case 2:
+ max = 8;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ /* fallthrough */
+ case 4:
+ max = 4;
+ break;
+ }
+ }
+
+ outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+ inf.full -= dfixed_const(1);
+
+ dda_inc = dfixed_div(inf, outf);
+ dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+ return dda_inc;
+}
+
+static inline u32 compute_initial_dda(fixed20_12 in)
+{
+ return dfixed_frac(in);
+}
+
+static int tegra_dc_set_timings(struct tegra_dc *dc,
+ struct drm_display_mode *mode)
+{
+ /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
+ unsigned int h_ref_to_sync = 0;
+ unsigned int v_ref_to_sync = 0;
+ unsigned long value;
+
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+
+ value = (v_ref_to_sync << 16) | h_ref_to_sync;
+ tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+
+ value = ((mode->vsync_end - mode->vsync_start) << 16) |
+ ((mode->hsync_end - mode->hsync_start) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
+
+ value = ((mode->vtotal - mode->vsync_end) << 16) |
+ ((mode->htotal - mode->hsync_end) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+ value = ((mode->vsync_start - mode->vdisplay) << 16) |
+ ((mode->hsync_start - mode->hdisplay) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
+
+ value = (mode->vdisplay << 16) | mode->hdisplay;
+ tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
+
+ return 0;
+}
+
+static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ unsigned long *div)
+{
+ unsigned long pclk = mode->clock * 1000, rate;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_output *output = NULL;
+ struct drm_encoder *encoder;
+ long err;
+
+ list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc) {
+ output = encoder_to_output(encoder);
+ break;
+ }
+
+ if (!output)
+ return -ENODEV;
+
+ /*
+ * This assumes that the display controller will divide its parent
+ * clock by 2 to generate the pixel clock.
+ */
+ err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to setup clock: %ld\n", err);
+ return err;
+ }
+
+ rate = clk_get_rate(dc->clk);
+ *div = (rate * 2 / pclk) - 2;
+
+ DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
+
+ return 0;
+}
+
+static int tegra_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned int h_dda, v_dda, bpp;
+ struct tegra_dc_window win;
+ unsigned long div, value;
+ int err;
+
+ err = tegra_crtc_setup_clk(crtc, mode, &div);
+ if (err) {
+ dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
+ return err;
+ }
+
+ /* program display mode */
+ tegra_dc_set_timings(dc, mode);
+
+ value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+ tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+ value &= ~LVS_OUTPUT_POLARITY_LOW;
+ value &= ~LHS_OUTPUT_POLARITY_LOW;
+ tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+ value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+ DISP_ORDER_RED_BLUE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+ value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+ /* setup window parameters */
+ memset(&win, 0, sizeof(win));
+ win.x.full = dfixed_const(0);
+ win.y.full = dfixed_const(0);
+ win.w.full = dfixed_const(mode->hdisplay);
+ win.h.full = dfixed_const(mode->vdisplay);
+ win.outx = 0;
+ win.outy = 0;
+ win.outw = mode->hdisplay;
+ win.outh = mode->vdisplay;
+
+ switch (crtc->fb->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+ break;
+
+ case DRM_FORMAT_RGB565:
+ win.fmt = WIN_COLOR_DEPTH_B5G6R5;
+ break;
+
+ default:
+ win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+ WARN_ON(1);
+ break;
+ }
+
+ bpp = crtc->fb->bits_per_pixel / 8;
+ win.stride = crtc->fb->pitches[0];
+
+ /* program window registers */
+ value = WINDOW_A_SELECT;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(win.outy) | H_POSITION(win.outx);
+ tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+ value = V_SIZE(win.outh) | H_SIZE(win.outw);
+ tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+ value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
+ H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
+ tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+ h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
+ v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(win.x);
+ v_dda = compute_initial_dda(win.y);
+
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
+ tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
+ DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
+
+ value = WIN_ENABLE;
+
+ if (bpp < 24)
+ value |= COLOR_EXPAND;
+
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+
+ return 0;
+}
+
+static void tegra_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned int syncpt;
+ unsigned long value;
+
+ /* hardware initialization */
+ tegra_periph_reset_deassert(dc->clk);
+ usleep_range(10000, 20000);
+
+ if (dc->pipe)
+ syncpt = SYNCPT_VBLANK1;
+ else
+ syncpt = SYNCPT_VBLANK0;
+
+ /* initialize display controller */
+ tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+}
+
+static void tegra_crtc_commit(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned long update_mask;
+ unsigned long value;
+
+ update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+
+ tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ value |= FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+ .dpms = tegra_crtc_dpms,
+ .mode_fixup = tegra_crtc_mode_fixup,
+ .mode_set = tegra_crtc_mode_set,
+ .prepare = tegra_crtc_prepare,
+ .commit = tegra_crtc_commit,
+ .load_lut = tegra_crtc_load_lut,
+};
+
+static irqreturn_t tegra_drm_irq(int irq, void *data)
+{
+ struct tegra_dc *dc = data;
+ unsigned long status;
+
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+ if (status & FRAME_END_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): frame end\n", __func__);
+ */
+ }
+
+ if (status & VBLANK_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
+ */
+ drm_handle_vblank(dc->base.dev, dc->pipe);
+ }
+
+ if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
+ /*
+ dev_dbg(dc->dev, "%s(): underflow\n", __func__);
+ */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \
+ tegra_dc_readl(dc, name))
+
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE);
+ DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+ DUMP_REG(DC_CMD_INT_STATUS);
+ DUMP_REG(DC_CMD_INT_MASK);
+ DUMP_REG(DC_CMD_INT_ENABLE);
+ DUMP_REG(DC_CMD_INT_TYPE);
+ DUMP_REG(DC_CMD_INT_POLARITY);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+ DUMP_REG(DC_CMD_STATE_ACCESS);
+ DUMP_REG(DC_CMD_STATE_CONTROL);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+ DUMP_REG(DC_COM_CRC_CONTROL);
+ DUMP_REG(DC_COM_CRC_CHECKSUM);
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
+ DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
+ DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
+ DUMP_REG(DC_COM_PIN_MISC_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM0_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
+ DUMP_REG(DC_COM_PIN_PM1_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
+ DUMP_REG(DC_COM_SPI_CONTROL);
+ DUMP_REG(DC_COM_SPI_START_BYTE);
+ DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
+ DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
+ DUMP_REG(DC_COM_HSPI_CS_DC);
+ DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
+ DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
+ DUMP_REG(DC_COM_GPIO_CTRL);
+ DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
+ DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+ DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+ DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
+ DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+ DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+ DUMP_REG(DC_DISP_REF_TO_SYNC);
+ DUMP_REG(DC_DISP_SYNC_WIDTH);
+ DUMP_REG(DC_DISP_BACK_PORCH);
+ DUMP_REG(DC_DISP_ACTIVE);
+ DUMP_REG(DC_DISP_FRONT_PORCH);
+ DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+ DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+ DUMP_REG(DC_DISP_M0_CONTROL);
+ DUMP_REG(DC_DISP_M1_CONTROL);
+ DUMP_REG(DC_DISP_DI_CONTROL);
+ DUMP_REG(DC_DISP_PP_CONTROL);
+ DUMP_REG(DC_DISP_PP_SELECT_A);
+ DUMP_REG(DC_DISP_PP_SELECT_B);
+ DUMP_REG(DC_DISP_PP_SELECT_C);
+ DUMP_REG(DC_DISP_PP_SELECT_D);
+ DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+ DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+ DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+ DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+ DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+ DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+ DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+ DUMP_REG(DC_DISP_BORDER_COLOR);
+ DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+ DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+ DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+ DUMP_REG(DC_DISP_CURSOR_POSITION);
+ DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+ DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+ DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+ DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+ DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+ DUMP_REG(DC_DISP_SD_CONTROL);
+ DUMP_REG(DC_DISP_SD_CSC_COEFF);
+ DUMP_REG(DC_DISP_SD_LUT(0));
+ DUMP_REG(DC_DISP_SD_LUT(1));
+ DUMP_REG(DC_DISP_SD_LUT(2));
+ DUMP_REG(DC_DISP_SD_LUT(3));
+ DUMP_REG(DC_DISP_SD_LUT(4));
+ DUMP_REG(DC_DISP_SD_LUT(5));
+ DUMP_REG(DC_DISP_SD_LUT(6));
+ DUMP_REG(DC_DISP_SD_LUT(7));
+ DUMP_REG(DC_DISP_SD_LUT(8));
+ DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
+ DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
+ DUMP_REG(DC_DISP_SD_BL_TF(0));
+ DUMP_REG(DC_DISP_SD_BL_TF(1));
+ DUMP_REG(DC_DISP_SD_BL_TF(2));
+ DUMP_REG(DC_DISP_SD_BL_TF(3));
+ DUMP_REG(DC_DISP_SD_BL_CONTROL);
+ DUMP_REG(DC_DISP_SD_HW_K_VALUES);
+ DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
+ DUMP_REG(DC_WIN_WIN_OPTIONS);
+ DUMP_REG(DC_WIN_BYTE_SWAP);
+ DUMP_REG(DC_WIN_BUFFER_CONTROL);
+ DUMP_REG(DC_WIN_COLOR_DEPTH);
+ DUMP_REG(DC_WIN_POSITION);
+ DUMP_REG(DC_WIN_SIZE);
+ DUMP_REG(DC_WIN_PRESCALED_SIZE);
+ DUMP_REG(DC_WIN_H_INITIAL_DDA);
+ DUMP_REG(DC_WIN_V_INITIAL_DDA);
+ DUMP_REG(DC_WIN_DDA_INC);
+ DUMP_REG(DC_WIN_LINE_STRIDE);
+ DUMP_REG(DC_WIN_BUF_STRIDE);
+ DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+ DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
+ DUMP_REG(DC_WIN_DV_CONTROL);
+ DUMP_REG(DC_WIN_BLEND_NOKEY);
+ DUMP_REG(DC_WIN_BLEND_1WIN);
+ DUMP_REG(DC_WIN_BLEND_2WIN_X);
+ DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+ DUMP_REG(DC_WIN_BLEND32WIN_XY);
+ DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
+ DUMP_REG(DC_WINBUF_START_ADDR);
+ DUMP_REG(DC_WINBUF_START_ADDR_NS);
+ DUMP_REG(DC_WINBUF_START_ADDR_U);
+ DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
+ DUMP_REG(DC_WINBUF_START_ADDR_V);
+ DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
+ DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dc_show_regs, 0, NULL },
+};
+
+static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+{
+ unsigned int i;
+ char *name;
+ int err;
+
+ name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
+ dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+
+ if (!dc->debugfs)
+ return -ENOMEM;
+
+ dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dc->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ dc->debugfs_files[i].data = dc;
+
+ err = drm_debugfs_create_files(dc->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ dc->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ dc->minor = minor;
+
+ return 0;
+
+free:
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+remove:
+ debugfs_remove(dc->debugfs);
+ dc->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
+{
+ drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
+ dc->minor);
+ dc->minor = NULL;
+
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+
+ debugfs_remove(dc->debugfs);
+ dc->debugfs = NULL;
+
+ return 0;
+}
+
+static int tegra_dc_drm_init(struct host1x_client *client,
+ struct drm_device *drm)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ int err;
+
+ dc->pipe = drm->mode_config.num_crtc;
+
+ drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&dc->base, 256);
+ drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+
+ err = tegra_dc_rgb_init(drm, dc);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dc_debugfs_init(dc, drm->primary);
+ if (err < 0)
+ dev_err(dc->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+ dev_name(dc->dev), dc);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_dc_drm_exit(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ int err;
+
+ devm_free_irq(dc->dev, dc->irq, dc);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dc_debugfs_exit(dc);
+ if (err < 0)
+ dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
+ }
+
+ err = tegra_dc_rgb_exit(dc);
+ if (err) {
+ dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops dc_client_ops = {
+ .drm_init = tegra_dc_drm_init,
+ .drm_exit = tegra_dc_drm_exit,
+};
+
+static int tegra_dc_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct resource *regs;
+ struct tegra_dc *dc;
+ int err;
+
+ dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dc->list);
+ dc->dev = &pdev->dev;
+
+ dc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dc->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(dc->clk);
+ }
+
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
+
+ dc->regs = devm_request_and_ioremap(&pdev->dev, regs);
+ if (!dc->regs) {
+ dev_err(&pdev->dev, "failed to remap registers\n");
+ return -ENXIO;
+ }
+
+ dc->irq = platform_get_irq(pdev, 0);
+ if (dc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return -ENXIO;
+ }
+
+ INIT_LIST_HEAD(&dc->client.list);
+ dc->client.ops = &dc_client_ops;
+ dc->client.dev = &pdev->dev;
+
+ err = tegra_dc_rgb_probe(dc);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+ return err;
+ }
+
+ err = host1x_register_client(host1x, &dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, dc);
+
+ return 0;
+}
+
+static int tegra_dc_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_dc *dc = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_unregister_client(host1x, &dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_disable_unprepare(dc->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_dc_of_match[] = {
+ { .compatible = "nvidia,tegra30-dc", },
+ { .compatible = "nvidia,tegra20-dc", },
+ { },
+};
+
+struct platform_driver tegra_dc_driver = {
+ .driver = {
+ .name = "tegra-dc",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_dc_of_match,
+ },
+ .probe = tegra_dc_probe,
+ .remove = tegra_dc_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644
index 00000000000..99977b5d5c3
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DC_H
+#define TEGRA_DC_H 1
+
+#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
+#define DC_CMD_DISPLAY_COMMAND 0x032
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DC_CMD_SIGNAL_RAISE 0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
+#define PW0_ENABLE (1 << 0)
+#define PW1_ENABLE (1 << 2)
+#define PW2_ENABLE (1 << 4)
+#define PW3_ENABLE (1 << 6)
+#define PW4_ENABLE (1 << 8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+
+#define DC_CMD_INT_STATUS 0x037
+#define DC_CMD_INT_MASK 0x038
+#define DC_CMD_INT_ENABLE 0x039
+#define DC_CMD_INT_TYPE 0x03a
+#define DC_CMD_INT_POLARITY 0x03b
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT (1 << 2)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+
+#define DC_CMD_SIGNAL_RAISE1 0x03c
+#define DC_CMD_SIGNAL_RAISE2 0x03d
+#define DC_CMD_SIGNAL_RAISE3 0x03e
+
+#define DC_CMD_STATE_ACCESS 0x040
+
+#define DC_CMD_STATE_CONTROL 0x041
+#define GENERAL_ACT_REQ (1 << 0)
+#define WIN_A_ACT_REQ (1 << 1)
+#define WIN_B_ACT_REQ (1 << 2)
+#define WIN_C_ACT_REQ (1 << 3)
+#define GENERAL_UPDATE (1 << 8)
+#define WIN_A_UPDATE (1 << 9)
+#define WIN_B_UPDATE (1 << 10)
+#define WIN_C_UPDATE (1 << 11)
+#define NC_HOST_TRIG (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL 0x043
+
+#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CHECKSUM 0x301
+#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
+#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
+#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
+#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
+#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
+
+#define DC_COM_PIN_MISC_CONTROL 0x31b
+#define DC_COM_PIN_PM0_CONTROL 0x31c
+#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d
+#define DC_COM_PIN_PM1_CONTROL 0x31e
+#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f
+
+#define DC_COM_SPI_CONTROL 0x320
+#define DC_COM_SPI_START_BYTE 0x321
+#define DC_COM_HSPI_WRITE_DATA_AB 0x322
+#define DC_COM_HSPI_WRITE_DATA_CD 0x323
+#define DC_COM_HSPI_CS_DC 0x324
+#define DC_COM_SCRATCH_REGISTER_A 0x325
+#define DC_COM_SCRATCH_REGISTER_B 0x326
+#define DC_COM_GPIO_CTRL 0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
+#define H_PULSE_0_ENABLE (1 << 8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
+
+#define DC_DISP_DISP_WIN_OPTIONS 0x402
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
+#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
+#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
+#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8)
+#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
+#define CURSOR_DELAY(x) (((x) & 0x3f) << 24)
+#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
+#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8)
+#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0)
+
+#define DC_DISP_DISP_TIMING_OPTIONS 0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC 0x406
+#define DC_DISP_SYNC_WIDTH 0x407
+#define DC_DISP_BACK_PORCH 0x408
+#define DC_DISP_ACTIVE 0x409
+#define DC_DISP_FRONT_PORCH 0x40a
+#define DC_DISP_H_PULSE0_CONTROL 0x40b
+#define DC_DISP_H_PULSE0_POSITION_A 0x40c
+#define DC_DISP_H_PULSE0_POSITION_B 0x40d
+#define DC_DISP_H_PULSE0_POSITION_C 0x40e
+#define DC_DISP_H_PULSE0_POSITION_D 0x40f
+#define DC_DISP_H_PULSE1_CONTROL 0x410
+#define DC_DISP_H_PULSE1_POSITION_A 0x411
+#define DC_DISP_H_PULSE1_POSITION_B 0x412
+#define DC_DISP_H_PULSE1_POSITION_C 0x413
+#define DC_DISP_H_PULSE1_POSITION_D 0x414
+#define DC_DISP_H_PULSE2_CONTROL 0x415
+#define DC_DISP_H_PULSE2_POSITION_A 0x416
+#define DC_DISP_H_PULSE2_POSITION_B 0x417
+#define DC_DISP_H_PULSE2_POSITION_C 0x418
+#define DC_DISP_H_PULSE2_POSITION_D 0x419
+#define DC_DISP_V_PULSE0_CONTROL 0x41a
+#define DC_DISP_V_PULSE0_POSITION_A 0x41b
+#define DC_DISP_V_PULSE0_POSITION_B 0x41c
+#define DC_DISP_V_PULSE0_POSITION_C 0x41d
+#define DC_DISP_V_PULSE1_CONTROL 0x41e
+#define DC_DISP_V_PULSE1_POSITION_A 0x41f
+#define DC_DISP_V_PULSE1_POSITION_B 0x420
+#define DC_DISP_V_PULSE1_POSITION_C 0x421
+#define DC_DISP_V_PULSE2_CONTROL 0x422
+#define DC_DISP_V_PULSE2_POSITION_A 0x423
+#define DC_DISP_V_PULSE3_CONTROL 0x424
+#define DC_DISP_V_PULSE3_POSITION_A 0x425
+#define DC_DISP_M0_CONTROL 0x426
+#define DC_DISP_M1_CONTROL 0x427
+#define DC_DISP_DI_CONTROL 0x428
+#define DC_DISP_PP_CONTROL 0x429
+#define DC_DISP_PP_SELECT_A 0x42a
+#define DC_DISP_PP_SELECT_B 0x42b
+#define DC_DISP_PP_SELECT_C 0x42c
+#define DC_DISP_PP_SELECT_D 0x42d
+
+#define PULSE_MODE_NORMAL (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH (0 << 4)
+#define PULSE_POLARITY_LOW (1 << 4)
+#define PULSE_QUAL_ALWAYS (0 << 6)
+#define PULSE_QUAL_VACTIVE (2 << 6)
+#define PULSE_QUAL_VACTIVE1 (3 << 6)
+#define PULSE_LAST_START_A (0 << 8)
+#define PULSE_LAST_END_A (1 << 8)
+#define PULSE_LAST_START_B (2 << 8)
+#define PULSE_LAST_END_B (3 << 8)
+#define PULSE_LAST_START_C (4 << 8)
+#define PULSE_LAST_END_C (5 << 8)
+#define PULSE_LAST_START_D (6 << 8)
+#define PULSE_LAST_END_D (7 << 8)
+
+#define PULSE_START(x) (((x) & 0xfff) << 0)
+#define PULSE_END(x) (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
+#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
+#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S (4 << 0)
+#define DISP_DATA_FORMAT_DF3S (5 << 0)
+#define DISP_DATA_FORMAT_DFSPI (6 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
+#define DISP_ALIGNMENT_MSB (0 << 8)
+#define DISP_ALIGNMENT_LSB (1 << 8)
+#define DISP_ORDER_RED_BLUE (0 << 9)
+#define DISP_ORDER_BLUE_RED (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL 0x430
+#define BASE_COLOR_SIZE666 (0 << 0)
+#define BASE_COLOR_SIZE111 (1 << 0)
+#define BASE_COLOR_SIZE222 (2 << 0)
+#define BASE_COLOR_SIZE333 (3 << 0)
+#define BASE_COLOR_SIZE444 (4 << 0)
+#define BASE_COLOR_SIZE555 (5 << 0)
+#define BASE_COLOR_SIZE565 (6 << 0)
+#define BASE_COLOR_SIZE332 (7 << 0)
+#define BASE_COLOR_SIZE888 (8 << 0)
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+
+#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
+#define DE_SELECT_ACTIVE_BLANK (0 << 0)
+#define DE_SELECT_ACTIVE (1 << 0)
+#define DE_SELECT_ACTIVE_IS (2 << 0)
+#define DE_CONTROL_ONECLK (0 << 2)
+#define DE_CONTROL_NORMAL (1 << 2)
+#define DE_CONTROL_EARLY_EXT (2 << 2)
+#define DE_CONTROL_EARLY (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
+#define DC_DISP_LCD_SPI_OPTIONS 0x434
+#define DC_DISP_BORDER_COLOR 0x435
+#define DC_DISP_COLOR_KEY0_LOWER 0x436
+#define DC_DISP_COLOR_KEY0_UPPER 0x437
+#define DC_DISP_COLOR_KEY1_LOWER 0x438
+#define DC_DISP_COLOR_KEY1_UPPER 0x439
+
+#define DC_DISP_CURSOR_FOREGROUND 0x43c
+#define DC_DISP_CURSOR_BACKGROUND 0x43d
+
+#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
+
+#define DC_DISP_CURSOR_POSITION 0x440
+#define DC_DISP_CURSOR_POSITION_NS 0x441
+
+#define DC_DISP_INIT_SEQ_CONTROL 0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
+
+#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
+#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
+
+#define DC_DISP_DAC_CRT_CTRL 0x4c0
+#define DC_DISP_DISP_MISC_CONTROL 0x4c1
+#define DC_DISP_SD_CONTROL 0x4c2
+#define DC_DISP_SD_CSC_COEFF 0x4c3
+#define DC_DISP_SD_LUT(x) (0x4c4 + (x))
+#define DC_DISP_SD_FLICKER_CONTROL 0x4cd
+#define DC_DISP_DC_PIXEL_COUNT 0x4ce
+#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x))
+#define DC_DISP_SD_BL_PARAMETERS 0x4d7
+#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x))
+#define DC_DISP_SD_BL_CONTROL 0x4dc
+#define DC_DISP_SD_HW_K_VALUES 0x4dd
+#define DC_DISP_SD_MAN_K_VALUES 0x4de
+
+#define DC_WIN_WIN_OPTIONS 0x700
+#define COLOR_EXPAND (1 << 6)
+#define WIN_ENABLE (1 << 30)
+
+#define DC_WIN_BYTE_SWAP 0x701
+#define BYTE_SWAP_NOSWAP (0 << 0)
+#define BYTE_SWAP_SWAP2 (1 << 0)
+#define BYTE_SWAP_SWAP4 (2 << 0)
+#define BYTE_SWAP_SWAP4HW (3 << 0)
+
+#define DC_WIN_BUFFER_CONTROL 0x702
+#define BUFFER_CONTROL_HOST (0 << 0)
+#define BUFFER_CONTROL_VI (1 << 0)
+#define BUFFER_CONTROL_EPP (2 << 0)
+#define BUFFER_CONTROL_MPEGE (3 << 0)
+#define BUFFER_CONTROL_SB2D (4 << 0)
+
+#define DC_WIN_COLOR_DEPTH 0x703
+#define WIN_COLOR_DEPTH_P1 0
+#define WIN_COLOR_DEPTH_P2 1
+#define WIN_COLOR_DEPTH_P4 2
+#define WIN_COLOR_DEPTH_P8 3
+#define WIN_COLOR_DEPTH_B4G4R4A4 4
+#define WIN_COLOR_DEPTH_B5G5R5A 5
+#define WIN_COLOR_DEPTH_B5G6R5 6
+#define WIN_COLOR_DEPTH_AB5G5R5 7
+#define WIN_COLOR_DEPTH_B8G8R8A8 12
+#define WIN_COLOR_DEPTH_R8G8B8A8 13
+#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
+#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
+#define WIN_COLOR_DEPTH_YCbCr422 16
+#define WIN_COLOR_DEPTH_YUV422 17
+#define WIN_COLOR_DEPTH_YCbCr420P 18
+#define WIN_COLOR_DEPTH_YUV420P 19
+#define WIN_COLOR_DEPTH_YCbCr422P 20
+#define WIN_COLOR_DEPTH_YUV422P 21
+#define WIN_COLOR_DEPTH_YCbCr422R 22
+#define WIN_COLOR_DEPTH_YUV422R 23
+#define WIN_COLOR_DEPTH_YCbCr422RA 24
+#define WIN_COLOR_DEPTH_YUV422RA 25
+
+#define DC_WIN_POSITION 0x704
+#define H_POSITION(x) (((x) & 0x1fff) << 0)
+#define V_POSITION(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_SIZE 0x705
+#define H_SIZE(x) (((x) & 0x1fff) << 0)
+#define V_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE 0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA 0x707
+#define DC_WIN_V_INITIAL_DDA 0x708
+#define DC_WIN_DDA_INC 0x709
+#define H_DDA_INC(x) (((x) & 0xffff) << 0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE 0x70a
+#define DC_WIN_BUF_STRIDE 0x70b
+#define DC_WIN_UV_BUF_STRIDE 0x70c
+#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_DV_CONTROL 0x70e
+
+#define DC_WIN_BLEND_NOKEY 0x70f
+#define DC_WIN_BLEND_1WIN 0x710
+#define DC_WIN_BLEND_2WIN_X 0x711
+#define DC_WIN_BLEND_2WIN_Y 0x712
+#define DC_WIN_BLEND32WIN_XY 0x713
+
+#define DC_WIN_HP_FETCH_CONTROL 0x714
+
+#define DC_WINBUF_START_ADDR 0x800
+#define DC_WINBUF_START_ADDR_NS 0x801
+#define DC_WINBUF_START_ADDR_U 0x802
+#define DC_WINBUF_START_ADDR_U_NS 0x803
+#define DC_WINBUF_START_ADDR_V 0x804
+#define DC_WINBUF_START_ADDR_V_NS 0x805
+
+#define DC_WINBUF_ADDR_H_OFFSET 0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
+#define DC_WINBUF_ADDR_V_OFFSET 0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
+
+#define DC_WINBUF_UFLOW_STATUS 0x80a
+
+#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
+
+/* synchronization points */
+#define SYNCPT_VBLANK0 26
+#define SYNCPT_VBLANK1 27
+
+#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 00000000000..3a503c9e468
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <mach/clk.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include "drm.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct device *dev = drm->dev;
+ struct host1x *host1x;
+ int err;
+
+ host1x = dev_get_drvdata(dev);
+ drm->dev_private = host1x;
+ host1x->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ err = host1x_drm_init(host1x, drm);
+ if (err < 0)
+ return err;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+
+ drm_mode_config_cleanup(drm);
+
+ return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+ return 0;
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(host1x->fbdev);
+}
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+};
+
+static const struct file_operations tegra_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+struct drm_driver tegra_drm_driver = {
+ .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+ .load = tegra_drm_load,
+ .unload = tegra_drm_unload,
+ .open = tegra_drm_open,
+ .lastclose = tegra_drm_lastclose,
+
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_cma_dumb_destroy,
+
+ .ioctls = tegra_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+ .fops = &tegra_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644
index 00000000000..741b5dc2742
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DRM_H
+#define TEGRA_DRM_H 1
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fixed.h>
+
+struct tegra_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_cma_object *obj;
+};
+
+static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct tegra_framebuffer, base);
+}
+
+struct host1x {
+ struct drm_device *drm;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ int syncpt;
+ int irq;
+
+ struct mutex drm_clients_lock;
+ struct list_head drm_clients;
+ struct list_head drm_active;
+
+ struct mutex clients_lock;
+ struct list_head clients;
+
+ struct drm_fbdev_cma *fbdev;
+ struct tegra_framebuffer fb;
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+ int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
+ int (*drm_exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+ struct host1x *host1x;
+ struct device *dev;
+
+ const struct host1x_client_ops *ops;
+
+ struct list_head list;
+};
+
+extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x *host1x);
+
+extern int host1x_register_client(struct host1x *host1x,
+ struct host1x_client *client);
+extern int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client);
+
+struct tegra_output;
+
+struct tegra_dc {
+ struct host1x_client client;
+
+ struct host1x *host1x;
+ struct device *dev;
+
+ struct drm_crtc base;
+ int pipe;
+
+ struct clk *clk;
+
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *rgb;
+
+ struct list_head list;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+};
+
+static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct tegra_dc, base);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
+ unsigned long reg)
+{
+ writel(value, dc->regs + (reg << 2));
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+ unsigned long reg)
+{
+ return readl(dc->regs + (reg << 2));
+}
+
+struct tegra_output_ops {
+ int (*enable)(struct tegra_output *output);
+ int (*disable)(struct tegra_output *output);
+ int (*setup_clock)(struct tegra_output *output, struct clk *clk,
+ unsigned long pclk);
+ int (*check_mode)(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status);
+};
+
+enum tegra_output_type {
+ TEGRA_OUTPUT_RGB,
+ TEGRA_OUTPUT_HDMI,
+};
+
+struct tegra_output {
+ struct device_node *of_node;
+ struct device *dev;
+
+ const struct tegra_output_ops *ops;
+ enum tegra_output_type type;
+
+ struct i2c_adapter *ddc;
+ const struct edid *edid;
+ unsigned int hpd_irq;
+ int hpd_gpio;
+
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
+{
+ return container_of(e, struct tegra_output, encoder);
+}
+
+static inline struct tegra_output *connector_to_output(struct drm_connector *c)
+{
+ return container_of(c, struct tegra_output, connector);
+}
+
+static inline int tegra_output_enable(struct tegra_output *output)
+{
+ if (output && output->ops && output->ops->enable)
+ return output->ops->enable(output);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_disable(struct tegra_output *output)
+{
+ if (output && output->ops && output->ops->disable)
+ return output->ops->disable(output);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ if (output && output->ops && output->ops->setup_clock)
+ return output->ops->setup_clock(output, clk, pclk);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ if (output && output->ops && output->ops->check_mode)
+ return output->ops->check_mode(output, mode, status);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+/* from rgb.c */
+extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
+/* from output.c */
+extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+extern int tegra_output_exit(struct tegra_output *output);
+
+/* from fb.c */
+extern int tegra_drm_fb_init(struct drm_device *drm);
+extern void tegra_drm_fb_exit(struct drm_device *drm);
+
+extern struct platform_driver tegra_host1x_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct drm_driver tegra_drm_driver;
+
+#endif /* TEGRA_DRM_H */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644
index 00000000000..97993c6835f
--- /dev/null
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(host1x->fbdev);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = tegra_drm_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+ struct drm_fbdev_cma *fbdev;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+ fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
+
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+ drm_fbdev_cma_restore_mode(fbdev);
+#endif
+
+ host1x->fbdev = fbdev;
+
+ return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_fini(host1x->fbdev);
+}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644
index 00000000000..e060c7e6434
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -0,0 +1,1321 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/clk.h>
+
+#include "hdmi.h"
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_hdmi {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ struct regulator *vdd;
+ struct regulator *pll;
+
+ void __iomem *regs;
+ unsigned int irq;
+
+ struct clk *clk_parent;
+ struct clk *clk;
+
+ unsigned int audio_source;
+ unsigned int audio_freq;
+ bool stereo;
+ bool dvi;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+};
+
+static inline struct tegra_hdmi *
+host1x_client_to_hdmi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_hdmi, client);
+}
+
+static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_hdmi, output);
+}
+
+#define HDMI_AUDIOCLK_FREQ 216000000
+#define HDMI_REKEY_DEFAULT 56
+
+enum {
+ AUTO = 0,
+ SPDIF,
+ HDA,
+};
+
+static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+ unsigned long reg)
+{
+ return readl(hdmi->regs + (reg << 2));
+}
+
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, hdmi->regs + (reg << 2));
+}
+
+struct tegra_hdmi_audio_config {
+ unsigned int pclk;
+ unsigned int n;
+ unsigned int cts;
+ unsigned int aval;
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+ { 25200000, 4096, 25200, 24000 },
+ { 27000000, 4096, 27000, 24000 },
+ { 74250000, 4096, 74250, 24000 },
+ { 148500000, 4096, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+ { 25200000, 5880, 26250, 25000 },
+ { 27000000, 5880, 28125, 25000 },
+ { 74250000, 4704, 61875, 20000 },
+ { 148500000, 4704, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+ { 25200000, 6144, 25200, 24000 },
+ { 27000000, 6144, 27000, 24000 },
+ { 74250000, 6144, 74250, 24000 },
+ { 148500000, 6144, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+ { 25200000, 11760, 26250, 25000 },
+ { 27000000, 11760, 28125, 25000 },
+ { 74250000, 9408, 61875, 20000 },
+ { 148500000, 9408, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+ { 25200000, 12288, 25200, 24000 },
+ { 27000000, 12288, 27000, 24000 },
+ { 74250000, 12288, 74250, 24000 },
+ { 148500000, 12288, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+ { 25200000, 23520, 26250, 25000 },
+ { 27000000, 23520, 28125, 25000 },
+ { 74250000, 18816, 61875, 20000 },
+ { 148500000, 18816, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+ { 25200000, 24576, 25200, 24000 },
+ { 27000000, 24576, 27000, 24000 },
+ { 74250000, 24576, 74250, 24000 },
+ { 148500000, 24576, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+struct tmds_config {
+ unsigned int pclk;
+ u32 pll0;
+ u32 pll1;
+ u32 pe_current;
+ u32 drive_current;
+};
+
+static const struct tmds_config tegra2_tmds_config[] = {
+ { /* slow pixel clock modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+ { /* high pixel clock modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+ PE_CURRENT1(PE_CURRENT_6_0_mA) |
+ PE_CURRENT2(PE_CURRENT_6_0_mA) |
+ PE_CURRENT3(PE_CURRENT_6_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+};
+
+static const struct tmds_config tegra3_tmds_config[] = {
+ { /* 480p modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 720p modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 1080p modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ },
+};
+
+static const struct tegra_hdmi_audio_config *
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+{
+ const struct tegra_hdmi_audio_config *table;
+
+ switch (audio_freq) {
+ case 32000:
+ table = tegra_hdmi_audio_32k;
+ break;
+
+ case 44100:
+ table = tegra_hdmi_audio_44_1k;
+ break;
+
+ case 48000:
+ table = tegra_hdmi_audio_48k;
+ break;
+
+ case 88200:
+ table = tegra_hdmi_audio_88_2k;
+ break;
+
+ case 96000:
+ table = tegra_hdmi_audio_96k;
+ break;
+
+ case 176400:
+ table = tegra_hdmi_audio_176_4k;
+ break;
+
+ case 192000:
+ table = tegra_hdmi_audio_192k;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ while (table->pclk) {
+ if (table->pclk == pclk)
+ return table;
+
+ table++;
+ }
+
+ return NULL;
+}
+
+static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
+{
+ const unsigned int freqs[] = {
+ 32000, 44100, 48000, 88200, 96000, 176400, 192000
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned int f = freqs[i];
+ unsigned int eight_half;
+ unsigned long value;
+ unsigned int delta;
+
+ if (f > 96000)
+ delta = 2;
+ else if (f > 480000)
+ delta = 6;
+ else
+ delta = 9;
+
+ eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+ value = AUDIO_FS_LOW(eight_half - delta) |
+ AUDIO_FS_HIGH(eight_half + delta);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
+ }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+{
+ struct device_node *node = hdmi->dev->of_node;
+ const struct tegra_hdmi_audio_config *config;
+ unsigned int offset = 0;
+ unsigned long value;
+
+ switch (hdmi->audio_source) {
+ case HDA:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ break;
+
+ case SPDIF:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ break;
+
+ default:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ break;
+ }
+
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ } else {
+ value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+
+ value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ }
+
+ config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+ if (!config) {
+ dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
+ hdmi->audio_freq, pclk);
+ return -EINVAL;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+ value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
+ AUDIO_N_VALUE(config->n - 1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ value = ACR_SUBPACK_CTS(config->cts);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+ value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
+ value &= ~AUDIO_N_RESETF;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ switch (hdmi->audio_freq) {
+ case 32000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
+ break;
+
+ case 44100:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
+ break;
+
+ case 48000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
+ break;
+
+ case 88200:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
+ break;
+
+ case 96000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
+ break;
+
+ case 176400:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
+ break;
+
+ case 192000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
+ break;
+ }
+
+ tegra_hdmi_writel(hdmi, config->aval, offset);
+ }
+
+ tegra_hdmi_setup_audio_fs_tables(hdmi);
+
+ return 0;
+}
+
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
+ unsigned int offset, u8 type,
+ u8 version, void *data, size_t size)
+{
+ unsigned long value;
+ u8 *ptr = data;
+ u32 subpack[2];
+ size_t i;
+ u8 csum;
+
+ /* first byte of data is the checksum */
+ csum = type + version + size - 1;
+
+ for (i = 1; i < size; i++)
+ csum += ptr[i];
+
+ ptr[0] = 0x100 - csum;
+
+ value = INFOFRAME_HEADER_TYPE(type) |
+ INFOFRAME_HEADER_VERSION(version) |
+ INFOFRAME_HEADER_LEN(size - 1);
+ tegra_hdmi_writel(hdmi, value, offset);
+
+ /* The audio inforame only has one set of subpack registers. The hdmi
+ * block pads the rest of the data as per the spec so we have to fixup
+ * the length before filling in the subpacks.
+ */
+ if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+ size = 6;
+
+ /* each subpack 7 bytes devided into:
+ * subpack_low - bytes 0 - 3
+ * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 0; i < size; i++) {
+ size_t index = i % 7;
+
+ if (index == 0)
+ memset(subpack, 0x0, sizeof(subpack));
+
+ ((u8 *)subpack)[index] = ptr[i];
+
+ if (index == 6 || (i + 1 == size)) {
+ unsigned int reg = offset + 1 + (i / 7) * 2;
+
+ tegra_hdmi_writel(hdmi, subpack[0], reg);
+ tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+ }
+ }
+}
+
+static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_avi_infoframe frame;
+ unsigned int h_front_porch;
+ unsigned int hsize = 16;
+ unsigned int vsize = 9;
+
+ if (hdmi->dvi) {
+ tegra_hdmi_writel(hdmi, 0,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ return;
+ }
+
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+ memset(&frame, 0, sizeof(frame));
+ frame.r = HDMI_AVI_R_SAME;
+
+ switch (mode->vdisplay) {
+ case 480:
+ if (mode->hdisplay == 640) {
+ frame.m = HDMI_AVI_M_4_3;
+ frame.vic = 1;
+ } else {
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 3;
+ }
+ break;
+
+ case 576:
+ if (((hsize * 10) / vsize) > 14) {
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 18;
+ } else {
+ frame.m = HDMI_AVI_M_4_3;
+ frame.vic = 17;
+ }
+ break;
+
+ case 720:
+ case 1470: /* stereo mode */
+ frame.m = HDMI_AVI_M_16_9;
+
+ if (h_front_porch == 110)
+ frame.vic = 4;
+ else
+ frame.vic = 19;
+ break;
+
+ case 1080:
+ case 2205: /* stereo mode */
+ frame.m = HDMI_AVI_M_16_9;
+
+ switch (h_front_porch) {
+ case 88:
+ frame.vic = 16;
+ break;
+
+ case 528:
+ frame.vic = 31;
+ break;
+
+ default:
+ frame.vic = 32;
+ break;
+ }
+ break;
+
+ default:
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 0;
+ break;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
+ &frame, sizeof(frame));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_audio_infoframe frame;
+
+ if (hdmi->dvi) {
+ tegra_hdmi_writel(hdmi, 0,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&frame, 0, sizeof(frame));
+ frame.cc = HDMI_AUDIO_CC_2;
+
+ tegra_hdmi_write_infopack(hdmi,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AUDIO,
+ HDMI_AUDIO_VERSION,
+ &frame, sizeof(frame));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_stereo_infoframe frame;
+ unsigned long value;
+
+ if (!hdmi->stereo) {
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ return;
+ }
+
+ memset(&frame, 0, sizeof(frame));
+ frame.regid0 = 0x03;
+ frame.regid1 = 0x0c;
+ frame.regid2 = 0x00;
+ frame.hdmi_video_format = 2;
+
+ /* TODO: 74 MHz limit? */
+ if (1) {
+ frame._3d_structure = 0;
+ } else {
+ frame._3d_structure = 8;
+ frame._3d_ext_data = 0;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ HDMI_VENDOR_VERSION, &frame, 6);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
+ const struct tmds_config *tmds)
+{
+ unsigned long value;
+
+ tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
+ tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
+ tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+ value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static int tegra_output_hdmi_enable(struct tegra_output *output)
+{
+ unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ struct device_node *node = hdmi->dev->of_node;
+ unsigned int pulse_start, div82, pclk;
+ const struct tmds_config *tmds;
+ unsigned int num_tmds;
+ unsigned long value;
+ int retries = 1000;
+ int err;
+
+ pclk = mode->clock * 1000;
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_back_porch = mode->htotal - mode->hsync_end;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+
+ err = regulator_enable(hdmi->vdd);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(hdmi->pll);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+ return err;
+ }
+
+ /*
+ * This assumes that the display controller will divide its parent
+ * clock by 2 to generate the pixel clock.
+ */
+ err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
+ return err;
+ }
+
+ err = clk_set_rate(hdmi->clk, pclk);
+ if (err < 0)
+ return err;
+
+ err = clk_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ tegra_periph_reset_assert(hdmi->clk);
+ usleep_range(1000, 2000);
+ tegra_periph_reset_deassert(hdmi->clk);
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1),
+ DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ DC_DISP_DISP_COLOR_CONTROL);
+
+ /* video_preamble uses h_pulse2 */
+ pulse_start = 1 + h_sync_width + h_back_porch - 10;
+
+ tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+ value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
+ PULSE_LAST_END_A;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+ value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+ value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
+ VSYNC_WINDOW_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+ if (dc->pipe)
+ value = HDMI_SRC_DISPLAYB;
+ else
+ value = HDMI_SRC_DISPLAYA;
+
+ if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
+ (mode->vdisplay == 576)))
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_FULL,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+ else
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_LIMITED,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+
+ div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+ value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+
+ if (!hdmi->dvi) {
+ err = tegra_hdmi_setup_audio(hdmi, pclk);
+ if (err < 0)
+ hdmi->dvi = true;
+ }
+
+ if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
+ /*
+ * TODO: add ELD support
+ */
+ }
+
+ rekey = HDMI_REKEY_DEFAULT;
+ value = HDMI_CTRL_REKEY(rekey);
+ value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
+ h_front_porch - rekey - 18) / 32);
+
+ if (!hdmi->dvi)
+ value |= HDMI_CTRL_ENABLE;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
+
+ if (hdmi->dvi)
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ else
+ tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+ tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_setup_stereo_infoframe(hdmi);
+
+ /* TMDS CONFIG */
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ num_tmds = ARRAY_SIZE(tegra3_tmds_config);
+ tmds = tegra3_tmds_config;
+ } else {
+ num_tmds = ARRAY_SIZE(tegra2_tmds_config);
+ tmds = tegra2_tmds_config;
+ }
+
+ for (i = 0; i < num_tmds; i++) {
+ if (pclk <= tmds[i].pclk) {
+ tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+ break;
+ }
+ }
+
+ tegra_hdmi_writel(hdmi,
+ SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC_ALT(0) |
+ SOR_SEQ_PD_PC(8) |
+ SOR_SEQ_PD_PC_ALT(8),
+ HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+ value = SOR_SEQ_INST_WAIT_TIME(1) |
+ SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+ SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_PIN_A_LOW |
+ SOR_SEQ_INST_PIN_B_LOW |
+ SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
+
+ value = 0x1c800;
+ value &= ~SOR_CSTM_ROTCLK(~0);
+ value |= SOR_CSTM_ROTCLK(2);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* start SOR */
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_TRIGGER,
+ HDMI_NV_PDISP_SOR_PWR);
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_DONE,
+ HDMI_NV_PDISP_SOR_PWR);
+
+ do {
+ BUG_ON(--retries < 0);
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+ } while (value & SOR_PWR_SETTING_NEW_PENDING);
+
+ value = SOR_STATE_ASY_CRCMODE_COMPLETE |
+ SOR_STATE_ASY_OWNER_HEAD0 |
+ SOR_STATE_ASY_SUBOWNER_BOTH |
+ SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+ SOR_STATE_ASY_DEPOL_POS;
+
+ /* setup sync polarities */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_NEG;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_NEG;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
+
+ value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
+ HDMI_NV_PDISP_SOR_STATE1);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+ tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* TODO: add HDCP support */
+
+ return 0;
+}
+
+static int tegra_output_hdmi_disable(struct tegra_output *output)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+
+ tegra_periph_reset_assert(hdmi->clk);
+ clk_disable(hdmi->clk);
+ regulator_disable(hdmi->pll);
+ regulator_disable(hdmi->vdd);
+
+ return 0;
+}
+
+static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ struct clk *base;
+ int err;
+
+ err = clk_set_parent(clk, hdmi->clk_parent);
+ if (err < 0) {
+ dev_err(output->dev, "failed to set parent: %d\n", err);
+ return err;
+ }
+
+ base = clk_get_parent(hdmi->clk_parent);
+
+ /*
+ * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+ * respectively, each of which divides the base pll_d by 2.
+ */
+ err = clk_set_rate(base, pclk * 2);
+ if (err < 0)
+ dev_err(output->dev,
+ "failed to set base clock rate to %lu Hz\n",
+ pclk * 2);
+
+ return 0;
+}
+
+static int tegra_output_hdmi_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned long pclk = mode->clock * 1000;
+ struct clk *parent;
+ long err;
+
+ parent = clk_get_parent(hdmi->clk_parent);
+
+ err = clk_round_rate(parent, pclk * 4);
+ if (err < 0)
+ *status = MODE_NOCLOCK;
+ else
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops hdmi_ops = {
+ .enable = tegra_output_hdmi_enable,
+ .disable = tegra_output_hdmi_disable,
+ .setup_clock = tegra_output_hdmi_setup_clock,
+ .check_mode = tegra_output_hdmi_check_mode,
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_hdmi *hdmi = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
+ tegra_hdmi_readl(hdmi, name))
+
+ DUMP_REG(HDMI_CTXSW);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+ DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+ DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+ DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
+ struct drm_minor *minor)
+{
+ unsigned int i;
+ int err;
+
+ hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
+ if (!hdmi->debugfs)
+ return -ENOMEM;
+
+ hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!hdmi->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ hdmi->debugfs_files[i].data = hdmi;
+
+ err = drm_debugfs_create_files(hdmi->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ hdmi->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ hdmi->minor = minor;
+
+ return 0;
+
+free:
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+remove:
+ debugfs_remove(hdmi->debugfs);
+ hdmi->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
+{
+ drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
+ hdmi->minor);
+ hdmi->minor = NULL;
+
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+
+ debugfs_remove(hdmi->debugfs);
+ hdmi->debugfs = NULL;
+
+ return 0;
+}
+
+static int tegra_hdmi_drm_init(struct host1x_client *client,
+ struct drm_device *drm)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ int err;
+
+ hdmi->output.type = TEGRA_OUTPUT_HDMI;
+ hdmi->output.dev = client->dev;
+ hdmi->output.ops = &hdmi_ops;
+
+ err = tegra_output_init(drm, &hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+ if (err < 0)
+ dev_err(client->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ return 0;
+}
+
+static int tegra_hdmi_drm_exit(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ int err;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_hdmi_debugfs_exit(hdmi);
+ if (err < 0)
+ dev_err(client->dev, "debugfs cleanup failed: %d\n",
+ err);
+ }
+
+ err = tegra_output_disable(&hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(&hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops hdmi_client_ops = {
+ .drm_init = tegra_hdmi_drm_init,
+ .drm_exit = tegra_hdmi_drm_exit,
+};
+
+static int tegra_hdmi_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_hdmi *hdmi;
+ struct resource *regs;
+ int err;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = &pdev->dev;
+ hdmi->audio_source = AUTO;
+ hdmi->audio_freq = 44100;
+ hdmi->stereo = false;
+ hdmi->dvi = false;
+
+ hdmi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hdmi->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(hdmi->clk);
+ }
+
+ err = clk_prepare(hdmi->clk);
+ if (err < 0)
+ return err;
+
+ hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(hdmi->clk_parent))
+ return PTR_ERR(hdmi->clk_parent);
+
+ err = clk_prepare(hdmi->clk_parent);
+ if (err < 0)
+ return err;
+
+ err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
+ return err;
+ }
+
+ hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(hdmi->vdd)) {
+ dev_err(&pdev->dev, "failed to get VDD regulator\n");
+ return PTR_ERR(hdmi->vdd);
+ }
+
+ hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
+ if (IS_ERR(hdmi->pll)) {
+ dev_err(&pdev->dev, "failed to get PLL regulator\n");
+ return PTR_ERR(hdmi->pll);
+ }
+
+ hdmi->output.dev = &pdev->dev;
+
+ err = tegra_output_parse_dt(&hdmi->output);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs);
+ if (!hdmi->regs)
+ return -EADDRNOTAVAIL;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+
+ hdmi->irq = err;
+
+ hdmi->client.ops = &hdmi_client_ops;
+ INIT_LIST_HEAD(&hdmi->client.list);
+ hdmi->client.dev = &pdev->dev;
+
+ err = host1x_register_client(host1x, &hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+}
+
+static int tegra_hdmi_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_unregister_client(host1x, &hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_unprepare(hdmi->clk_parent);
+ clk_unprepare(hdmi->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_hdmi_of_match[] = {
+ { .compatible = "nvidia,tegra30-hdmi", },
+ { .compatible = "nvidia,tegra20-hdmi", },
+ { },
+};
+
+struct platform_driver tegra_hdmi_driver = {
+ .driver = {
+ .name = "tegra-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_hdmi_of_match,
+ },
+ .probe = tegra_hdmi_probe,
+ .remove = tegra_hdmi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644
index 00000000000..1477f36eb45
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_HDMI_H
+#define TEGRA_HDMI_H 1
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HDMI_INFOFRAME_TYPE_AVI 0x82
+#define HDMI_INFOFRAME_TYPE_SPD 0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned s:2; /* scan information */
+ unsigned b:2; /* bar info data valid */
+ unsigned a:1; /* active info present */
+ unsigned y:2; /* RGB or YCbCr */
+ unsigned res1:1;
+
+ /* PB2 */
+ unsigned r:4; /* active format aspect ratio */
+ unsigned m:2; /* picture aspect ratio */
+ unsigned c:2; /* colorimetry */
+
+ /* PB3 */
+ unsigned sc:2; /* scan information */
+ unsigned q:2; /* quantization range */
+ unsigned ec:3; /* extended colorimetry */
+ unsigned itc:1; /* it content */
+
+ /* PB4 */
+ unsigned vic:7; /* video format id code */
+ unsigned res4:1;
+
+ /* PB5 */
+ unsigned pr:4; /* pixel repetition factor */
+ unsigned cn:2; /* it content type*/
+ unsigned yq:2; /* ycc quantization range */
+
+ /* PB6-7 */
+ u16 top_bar_end_line;
+
+ /* PB8-9 */
+ u16 bot_bar_start_line;
+
+ /* PB10-11 */
+ u16 left_bar_end_pixel;
+
+ /* PB12-13 */
+ u16 right_bar_start_pixel;
+} __packed;
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB 0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT 0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE 0x0
+#define HDMI_AVI_S_OVERSCAN 0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE 0x0
+#define HDMI_AVI_C_SMPTE 0x1
+#define HDMI_AVI_C_ITU_R 0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3 0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME 0x8
+#define HDMI_AVI_R_4_3_CENTER 0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned cc:3; /* channel count */
+ unsigned res1:1;
+ unsigned ct:4; /* coding type */
+
+ /* PB2 */
+ unsigned ss:2; /* sample size */
+ unsigned sf:3; /* sample frequency */
+ unsigned res2:3;
+
+ /* PB3 */
+ unsigned cxt:5; /* coding extention type */
+ unsigned res3:3;
+
+ /* PB4 */
+ u8 ca; /* channel/speaker allocation */
+
+ /* PB5 */
+ unsigned res5:3;
+ unsigned lsv:4; /* level shift value */
+ unsigned dm_inh:1; /* downmix inhibit */
+
+ /* PB6-10 reserved */
+ u8 res6;
+ u8 res7;
+ u8 res8;
+ u8 res9;
+ u8 res10;
+} __packed;
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2 0x1
+#define HDMI_AUDIO_CC_3 0x2
+#define HDMI_AUDIO_CC_4 0x3
+#define HDMI_AUDIO_CC_5 0x4
+#define HDMI_AUDIO_CC_6 0x5
+#define HDMI_AUDIO_CC_7 0x6
+#define HDMI_AUDIO_CC_8 0x7
+
+#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM 0x1
+#define HDMI_AUDIO_CT_AC3 0x2
+#define HDMI_AUDIO_CT_MPEG1 0x3
+#define HDMI_AUDIO_CT_MP3 0x4
+#define HDMI_AUDIO_CT_MPEG2 0x5
+#define HDMI_AUDIO_CT_AAC_LC 0x6
+#define HDMI_AUDIO_CT_DTS 0x7
+#define HDMI_AUDIO_CT_ATRAC 0x8
+#define HDMI_AUDIO_CT_DSD 0x9
+#define HDMI_AUDIO_CT_E_AC3 0xa
+#define HDMI_AUDIO_CT_DTS_HD 0xb
+#define HDMI_AUDIO_CT_MLP 0xc
+#define HDMI_AUDIO_CT_DST 0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT 0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K 0x1
+#define HDMI_AUDIO_SF_44_1K 0x2
+#define HDMI_AUDIO_SF_48K 0x3
+#define HDMI_AUDIO_SF_88_2K 0x4
+#define HDMI_AUDIO_SF_96K 0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K 0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT 0x1
+#define HDMI_AUDIO_SS_20BIT 0x2
+#define HDMI_AUDIO_SS_24BIT 0x3
+
+#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC 0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+/* all fields little endian */
+struct hdmi_stereo_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ u8 regid0;
+
+ /* PB2 */
+ u8 regid1;
+
+ /* PB3 */
+ u8 regid2;
+
+ /* PB4 */
+ unsigned res1:5;
+ unsigned hdmi_video_format:3;
+
+ /* PB5 */
+ unsigned res2:4;
+ unsigned _3d_structure:4;
+
+ /* PB6*/
+ unsigned res3:4;
+ unsigned _3d_ext_data:4;
+} __packed;
+
+#define HDMI_VENDOR_VERSION 0x01
+
+/* register definitions */
+#define HDMI_CTXSW 0x00
+
+#define HDMI_NV_PDISP_SOR_STATE0 0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1 0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
+#define SOR_STATE_ATTACHED (1 << 3)
+
+#define HDMI_NV_PDISP_SOR_STATE2 0x03
+#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
+#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
+#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
+
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
+
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
+#define GENERIC_CTRL_ENABLE (1 << 0)
+#define GENERIC_CTRL_OTHER (1 << 4)
+#define GENERIC_CTRL_SINGLE (1 << 8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
+
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
+
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0)
+#define ACR_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_CTRL 0x44
+#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
+#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
+
+#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
+#define SPARE_HW_CTS (1 << 0)
+#define SPARE_FORCE_SW_CTS (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
+#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53
+#define HDMI_NV_PDISP_SOR_CAP 0x54
+#define HDMI_NV_PDISP_SOR_PWR 0x55
+#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
+#define SOR_PWR_NORMAL_START_ALT (1 << 1)
+#define SOR_PWR_SAFE_STATE_PD (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU (1 << 16)
+#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST 0x56
+#define HDMI_NV_PDISP_SOR_PLL0 0x57
+#define SOR_PLL_PWR (1 << 0)
+#define SOR_PLL_PDBG (1 << 1)
+#define SOR_PLL_VCAPD (1 << 2)
+#define SOR_PLL_PDPORT (1 << 3)
+#define SOR_PLL_RESISTORSEL (1 << 4)
+#define SOR_PLL_PULLDOWN (1 << 5)
+#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN (1 << 28)
+#define SOR_PLL_HALF_FULL_PE (1 << 29)
+#define SOR_PLL_S_D_PIN_PE (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2 0x59
+
+#define HDMI_NV_PDISP_SOR_CSTM 0x5a
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_SOR_LVDS 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK 0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS (1 << 28)
+#define SOR_SEQ_SWITCH (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x))
+
+#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
+#define HDMI_NV_PDISP_SOR_TRIG 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
+
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
+#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
+#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
+#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+
+#define DRIVE_CURRENT_1_500_mA 0x00
+#define DRIVE_CURRENT_1_875_mA 0x01
+#define DRIVE_CURRENT_2_250_mA 0x02
+#define DRIVE_CURRENT_2_625_mA 0x03
+#define DRIVE_CURRENT_3_000_mA 0x04
+#define DRIVE_CURRENT_3_375_mA 0x05
+#define DRIVE_CURRENT_3_750_mA 0x06
+#define DRIVE_CURRENT_4_125_mA 0x07
+#define DRIVE_CURRENT_4_500_mA 0x08
+#define DRIVE_CURRENT_4_875_mA 0x09
+#define DRIVE_CURRENT_5_250_mA 0x0a
+#define DRIVE_CURRENT_5_625_mA 0x0b
+#define DRIVE_CURRENT_6_000_mA 0x0c
+#define DRIVE_CURRENT_6_375_mA 0x0d
+#define DRIVE_CURRENT_6_750_mA 0x0e
+#define DRIVE_CURRENT_7_125_mA 0x0f
+#define DRIVE_CURRENT_7_500_mA 0x10
+#define DRIVE_CURRENT_7_875_mA 0x11
+#define DRIVE_CURRENT_8_250_mA 0x12
+#define DRIVE_CURRENT_8_625_mA 0x13
+#define DRIVE_CURRENT_9_000_mA 0x14
+#define DRIVE_CURRENT_9_375_mA 0x15
+#define DRIVE_CURRENT_9_750_mA 0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
+
+#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
+#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N 0x8c
+#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
+#define HDMI_NV_PDISP_SOR_REFCLK 0x95
+#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL 0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
+#define HDMI_SRC_DISPLAYA (0 << 0)
+#define HDMI_SRC_DISPLAYB (1 << 0)
+#define ARM_VIDEO_RANGE_FULL (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH 0x98
+#define HDMI_NV_PDISP_PE_CURRENT 0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL 0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
+
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
+#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
+
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+
+#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
new file mode 100644
index 00000000000..5d17b113a6f
--- /dev/null
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+
+struct host1x_drm_client {
+ struct host1x_client *client;
+ struct device_node *np;
+ struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
+{
+ struct host1x_drm_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&client->list);
+ client->np = of_node_get(np);
+
+ list_add_tail(&client->list, &host1x->drm_clients);
+
+ return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x *host1x,
+ struct host1x_drm_client *drm,
+ struct host1x_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&drm->list);
+ list_add_tail(&drm->list, &host1x->drm_active);
+ drm->client = client;
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x *host1x,
+ struct host1x_drm_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ of_node_put(client->np);
+ kfree(client);
+
+ return 0;
+}
+
+static int host1x_parse_dt(struct host1x *host1x)
+{
+ static const char * const compat[] = {
+ "nvidia,tegra20-dc",
+ "nvidia,tegra20-hdmi",
+ "nvidia,tegra30-dc",
+ "nvidia,tegra30-hdmi",
+ };
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(compat); i++) {
+ struct device_node *np;
+
+ for_each_child_of_node(host1x->dev->of_node, np) {
+ if (of_device_is_compatible(np, compat[i]) &&
+ of_device_is_available(np)) {
+ err = host1x_add_drm_client(host1x, np);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_host1x_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x;
+ struct resource *regs;
+ int err;
+
+ host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+ if (!host1x)
+ return -ENOMEM;
+
+ mutex_init(&host1x->drm_clients_lock);
+ INIT_LIST_HEAD(&host1x->drm_clients);
+ INIT_LIST_HEAD(&host1x->drm_active);
+ mutex_init(&host1x->clients_lock);
+ INIT_LIST_HEAD(&host1x->clients);
+ host1x->dev = &pdev->dev;
+
+ err = host1x_parse_dt(host1x);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+ return err;
+ }
+
+ host1x->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host1x->clk))
+ return PTR_ERR(host1x->clk);
+
+ err = clk_prepare_enable(host1x->clk);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ err = -ENXIO;
+ goto err;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ goto err;
+
+ host1x->syncpt = err;
+
+ err = platform_get_irq(pdev, 1);
+ if (err < 0)
+ goto err;
+
+ host1x->irq = err;
+
+ host1x->regs = devm_request_and_ioremap(&pdev->dev, regs);
+ if (!host1x->regs) {
+ err = -EADDRNOTAVAIL;
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, host1x);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(host1x->clk);
+ return err;
+}
+
+static int tegra_host1x_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(host1x->clk);
+
+ return 0;
+}
+
+int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
+{
+ struct host1x_client *client;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_init) {
+ int err = client->ops->drm_init(client, drm);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM setup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+int host1x_drm_exit(struct host1x *host1x)
+{
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+ struct host1x_client *client;
+
+ if (!host1x->drm)
+ return 0;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry_reverse(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_exit) {
+ int err = client->ops->drm_exit(client);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM cleanup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ drm_platform_exit(&tegra_drm_driver, pdev);
+ host1x->drm = NULL;
+
+ return 0;
+}
+
+int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ mutex_lock(&host1x->clients_lock);
+ list_add_tail(&client->list, &host1x->clients);
+ mutex_unlock(&host1x->clients_lock);
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+ if (drm->np == client->dev->of_node)
+ host1x_activate_drm_client(host1x, drm, client);
+
+ if (list_empty(&host1x->drm_clients)) {
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+
+ err = drm_platform_init(&tegra_drm_driver, pdev);
+ if (err < 0) {
+ dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+ return err;
+ }
+ }
+
+ client->host1x = host1x;
+
+ return 0;
+}
+
+int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+ if (drm->client == client) {
+ err = host1x_drm_exit(host1x);
+ if (err < 0) {
+ dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+ err);
+ return err;
+ }
+
+ host1x_remove_drm_client(host1x, drm);
+ break;
+ }
+ }
+
+ mutex_lock(&host1x->clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+static struct of_device_id tegra_host1x_of_match[] = {
+ { .compatible = "nvidia,tegra30-host1x", },
+ { .compatible = "nvidia,tegra20-host1x", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
+
+struct platform_driver tegra_host1x_driver = {
+ .driver = {
+ .name = "tegra-host1x",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_host1x_of_match,
+ },
+ .probe = tegra_host1x_probe,
+ .remove = tegra_host1x_remove,
+};
+
+static int __init tegra_host1x_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&tegra_host1x_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_driver_register(&tegra_dc_driver);
+ if (err < 0)
+ goto unregister_host1x;
+
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dc;
+
+ return 0;
+
+unregister_dc:
+ platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+ platform_driver_unregister(&tegra_host1x_driver);
+ return err;
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+ platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dc_driver);
+ platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644
index 00000000000..8140fc6c34d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/output.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
+
+#include "drm.h"
+
+static int tegra_connector_get_modes(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct edid *edid = NULL;
+ int err = 0;
+
+ if (output->edid)
+ edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+ else if (output->ddc)
+ edid = drm_get_edid(connector, output->ddc);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ err = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return err;
+}
+
+static int tegra_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ enum drm_mode_status status = MODE_OK;
+ int err;
+
+ err = tegra_output_check_mode(output, mode, &status);
+ if (err < 0)
+ return MODE_ERROR;
+
+ return status;
+}
+
+static struct drm_encoder *
+tegra_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+
+ return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .get_modes = tegra_connector_get_modes,
+ .mode_valid = tegra_connector_mode_valid,
+ .best_encoder = tegra_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tegra_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ enum drm_connector_status status = connector_status_unknown;
+
+ if (gpio_is_valid(output->hpd_gpio)) {
+ if (gpio_get_value(output->hpd_gpio) == 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ status = connector_status_connected;
+ }
+
+ return status;
+}
+
+static void tegra_connector_destroy(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = tegra_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_connector_destroy,
+};
+
+static void tegra_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = tegra_encoder_destroy,
+};
+
+static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+static void tegra_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ int err;
+
+ err = tegra_output_enable(output);
+ if (err < 0)
+ dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = tegra_encoder_dpms,
+ .mode_fixup = tegra_encoder_mode_fixup,
+ .prepare = tegra_encoder_prepare,
+ .commit = tegra_encoder_commit,
+ .mode_set = tegra_encoder_mode_set,
+};
+
+static irqreturn_t hpd_irq(int irq, void *data)
+{
+ struct tegra_output *output = data;
+
+ drm_helper_hpd_irq_event(output->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+int tegra_output_parse_dt(struct tegra_output *output)
+{
+ enum of_gpio_flags flags;
+ struct device_node *ddc;
+ size_t size;
+ int err;
+
+ if (!output->of_node)
+ output->of_node = output->dev->of_node;
+
+ output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
+
+ ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
+ if (ddc) {
+ output->ddc = of_find_i2c_adapter_by_node(ddc);
+ if (!output->ddc) {
+ err = -EPROBE_DEFER;
+ of_node_put(ddc);
+ return err;
+ }
+
+ of_node_put(ddc);
+ }
+
+ if (!output->edid && !output->ddc)
+ return -ENODEV;
+
+ output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
+ "nvidia,hpd-gpio", 0,
+ &flags);
+
+ return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+ int connector, encoder, err;
+
+ if (gpio_is_valid(output->hpd_gpio)) {
+ unsigned long flags;
+
+ err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
+ "HDMI hotplug detect");
+ if (err < 0) {
+ dev_err(output->dev, "gpio_request_one(): %d\n", err);
+ return err;
+ }
+
+ err = gpio_to_irq(output->hpd_gpio);
+ if (err < 0) {
+ dev_err(output->dev, "gpio_to_irq(): %d\n", err);
+ goto free_hpd;
+ }
+
+ output->hpd_irq = err;
+
+ flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT;
+
+ err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
+ flags, "hpd", output);
+ if (err < 0) {
+ dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+ output->hpd_irq, err);
+ goto free_hpd;
+ }
+
+ output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+ switch (output->type) {
+ case TEGRA_OUTPUT_RGB:
+ connector = DRM_MODE_CONNECTOR_LVDS;
+ encoder = DRM_MODE_ENCODER_LVDS;
+ break;
+
+ case TEGRA_OUTPUT_HDMI:
+ connector = DRM_MODE_CONNECTOR_HDMIA;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ break;
+
+ default:
+ connector = DRM_MODE_CONNECTOR_Unknown;
+ encoder = DRM_MODE_ENCODER_NONE;
+ break;
+ }
+
+ drm_connector_init(drm, &output->connector, &connector_funcs,
+ connector);
+ drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+
+ drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
+ drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
+ drm_sysfs_connector_add(&output->connector);
+
+ output->encoder.possible_crtcs = 0x3;
+
+ return 0;
+
+free_hpd:
+ gpio_free(output->hpd_gpio);
+
+ return err;
+}
+
+int tegra_output_exit(struct tegra_output *output)
+{
+ if (gpio_is_valid(output->hpd_gpio)) {
+ free_irq(output->hpd_irq, output);
+ gpio_free(output->hpd_gpio);
+ }
+
+ if (output->ddc)
+ put_device(&output->ddc->dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644
index 00000000000..ed4416f2026
--- /dev/null
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_rgb {
+ struct tegra_output output;
+ struct clk *clk_parent;
+ struct clk *clk;
+};
+
+static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_rgb, output);
+}
+
+struct reg_entry {
+ unsigned long offset;
+ unsigned long value;
+};
+
+static const struct reg_entry rgb_enable[] = {
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 },
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 },
+};
+
+static const struct reg_entry rgb_disable[] = {
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 },
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 },
+};
+
+static void tegra_dc_write_regs(struct tegra_dc *dc,
+ const struct reg_entry *table,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ tegra_dc_writel(dc, table[i].value, table[i].offset);
+}
+
+static int tegra_output_rgb_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+ tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+
+ return 0;
+}
+
+static int tegra_output_rgb_disable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+ tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+
+ return 0;
+}
+
+static int tegra_output_rgb_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_rgb *rgb = to_rgb(output);
+
+ return clk_set_parent(clk, rgb->clk_parent);
+}
+
+static int tegra_output_rgb_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ /*
+ * FIXME: For now, always assume that the mode is okay. There are
+ * unresolved issues with clk_round_rate(), which doesn't always
+ * reliably report whether a frequency can be set or not.
+ */
+
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops rgb_ops = {
+ .enable = tegra_output_rgb_enable,
+ .disable = tegra_output_rgb_disable,
+ .setup_clock = tegra_output_rgb_setup_clock,
+ .check_mode = tegra_output_rgb_check_mode,
+};
+
+int tegra_dc_rgb_probe(struct tegra_dc *dc)
+{
+ struct device_node *np;
+ struct tegra_rgb *rgb;
+ int err;
+
+ np = of_get_child_by_name(dc->dev->of_node, "rgb");
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+ if (!rgb)
+ return -ENOMEM;
+
+ rgb->clk = devm_clk_get(dc->dev, NULL);
+ if (IS_ERR(rgb->clk)) {
+ dev_err(dc->dev, "failed to get clock\n");
+ return PTR_ERR(rgb->clk);
+ }
+
+ rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+ if (IS_ERR(rgb->clk_parent)) {
+ dev_err(dc->dev, "failed to get parent clock\n");
+ return PTR_ERR(rgb->clk_parent);
+ }
+
+ err = clk_set_parent(rgb->clk, rgb->clk_parent);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+ return err;
+ }
+
+ rgb->output.dev = dc->dev;
+ rgb->output.of_node = np;
+
+ err = tegra_output_parse_dt(&rgb->output);
+ if (err < 0)
+ return err;
+
+ dc->rgb = &rgb->output;
+
+ return 0;
+}
+
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
+{
+ struct tegra_rgb *rgb = to_rgb(dc->rgb);
+ int err;
+
+ if (!dc->rgb)
+ return -ENODEV;
+
+ rgb->output.type = TEGRA_OUTPUT_RGB;
+ rgb->output.ops = &rgb_ops;
+
+ err = tegra_output_init(dc->base.dev, &rgb->output);
+ if (err < 0) {
+ dev_err(dc->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ /*
+ * By default, outputs can be associated with each display controller.
+ * RGB outputs are an exception, so we make sure they can be attached
+ * to only their parent display controller.
+ */
+ rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+
+ return 0;
+}
+
+int tegra_dc_rgb_exit(struct tegra_dc *dc)
+{
+ if (dc->rgb) {
+ int err;
+
+ err = tegra_output_disable(dc->rgb);
+ if (err < 0) {
+ dev_err(dc->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(dc->rgb);
+ if (err < 0) {
+ dev_err(dc->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ dc->rgb = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bf6e4b5a73b..33d20be87db 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
{
if (interruptible) {
return wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->reserved) == 0);
+ !ttm_bo_is_reserved(bo));
} else {
- wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
return 0;
}
}
@@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
struct ttm_bo_global *glob = bo->glob;
int ret;
- while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ while (unlikely(atomic_read(&bo->reserved) != 0)) {
/**
* Deadlock avoidance for multi-bo reserving.
*/
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
return ret;
}
+ atomic_set(&bo->reserved, 1);
if (use_sequence) {
/**
* Wake up waiters that may need to recheck for deadlock,
@@ -365,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -419,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, mem);
+ no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -487,40 +488,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
/*
- * Make processes trying to reserve really pick it up.
+ * Since the final reference to this bo may not be dropped by
+ * the current task we have to put a memory barrier here to make
+ * sure the changes done in this function are always visible.
+ *
+ * This function only needs protection against the final kref_put.
*/
- smp_mb__after_atomic_dec();
- wake_up_all(&bo->event_queue);
+ smp_mb__before_atomic_dec();
}
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver;
+ struct ttm_bo_driver *driver = bdev->driver;
void *sync_obj = NULL;
- void *sync_obj_arg;
int put_count;
int ret;
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
- if (!bo->sync_obj) {
-
- spin_lock(&glob->lru_lock);
-
- /**
- * Lock inversion between bo:reserve and bdev::fence_lock here,
- * but that's OK, since we're only trylocking.
- */
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY))
- goto queue;
-
+ if (!ret && !bo->sync_obj) {
spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
@@ -530,22 +524,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_list_ref_sub(bo, put_count, true);
return;
- } else {
- spin_lock(&glob->lru_lock);
}
-queue:
- driver = bdev->driver;
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- sync_obj_arg = bo->sync_obj_arg;
+ spin_unlock(&bdev->fence_lock);
+
+ if (!ret) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bdev->fence_lock);
if (sync_obj) {
- driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ driver->sync_obj_flush(sync_obj);
driver->sync_obj_unref(&sync_obj);
}
schedule_delayed_work(&bdev->wq,
@@ -553,68 +547,84 @@ queue:
}
/**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
* @interruptible Any sleeps should occur interruptibly.
- * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
*/
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_reserve,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
- int ret = 0;
+ int ret;
-retry:
spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
- if (unlikely(ret != 0))
- return ret;
+ if (ret && !no_wait_gpu) {
+ void *sync_obj;
-retry_reserve:
- spin_lock(&glob->lru_lock);
+ /*
+ * Take a reference to the fence and unreserve,
+ * at this point the buffer should be dead, so
+ * no new sync objects can be attached.
+ */
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(list_empty(&bo->ddestroy))) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- return 0;
- }
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
- if (unlikely(ret != 0))
+ ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+ driver->sync_obj_unref(&sync_obj);
+ if (ret)
return ret;
- goto retry_reserve;
- }
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+ WARN_ON(ret);
+ spin_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
- BUG_ON(ret != 0);
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- /**
- * We can re-check for sync object without taking
- * the bo::lock since setting the sync object requires
- * also bo::reserved. A busy object at this point may
- * be caused by another thread recently starting an accelerated
- * eviction.
- */
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ } else
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(bo->sync_obj)) {
+ if (ret || unlikely(list_empty(&bo->ddestroy))) {
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -657,9 +667,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
- !remove_all);
+ ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ if (!ret)
+ ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+ !remove_all);
+ else
+ spin_unlock(&glob->lru_lock);
+
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;
@@ -697,6 +711,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ write_lock(&bdev->vm_lock);
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
drm_mm_put_block(bo->vm_node);
@@ -708,18 +723,14 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
- write_lock(&bdev->vm_lock);
}
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
- struct ttm_bo_device *bdev = bo->bdev;
*p_bo = NULL;
- write_lock(&bdev->vm_lock);
kref_put(&bo->kref, ttm_bo_release);
- write_unlock(&bdev->vm_lock);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -738,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
@@ -756,7 +767,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -769,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -780,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
@@ -794,49 +805,33 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
- int ret, put_count = 0;
+ int ret = -EBUSY, put_count;
-retry:
spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
+ list_for_each_entry(bo, &man->lru, lru) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
}
- bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (ret) {
spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(bo, interruptible,
- no_wait_reserve, no_wait_gpu);
- kref_put(&bo->list_kref, ttm_bo_release_list);
-
return ret;
}
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
+ kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+ no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
-
- /**
- * We *need* to retry after releasing the lru lock.
- */
-
- if (unlikely(ret != 0))
- return ret;
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -846,7 +841,7 @@ retry:
ttm_bo_list_ref_sub(bo, put_count, true);
- ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -871,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
- bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -884,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_mem_evict_first(bdev, mem_type,
+ interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -950,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1041,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
@@ -1054,26 +1048,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_mem_space);
-int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
-{
- if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
- return -EBUSY;
-
- return wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_wait_cpu);
-
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret = 0;
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
/*
* FIXME: It's possible to pipeline buffer moves.
@@ -1093,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, placement, &mem,
+ interruptible, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false,
+ interruptible, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem);
@@ -1125,12 +1111,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
@@ -1141,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible,
+ no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1179,7 +1166,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
size_t acc_size,
@@ -1200,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
pr_err("Illegal buffer object size\n");
@@ -1233,7 +1218,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
- bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->seq_valid = false;
@@ -1257,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (ret)
goto out_err;
@@ -1306,7 +1290,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
@@ -1321,8 +1304,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
- buffer_start, interruptible,
- persistent_swap_storage, acc_size, NULL, NULL);
+ interruptible, persistent_swap_storage, acc_size,
+ NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
@@ -1344,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1577,7 +1560,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
goto out_no_addr_mm;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
- bdev->nice_mode = true;
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL;
bdev->glob = glob;
@@ -1721,7 +1703,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
struct ttm_bo_driver *driver = bo->bdev->driver;
struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
- void *sync_obj_arg;
int ret = 0;
if (likely(bo->sync_obj == NULL))
@@ -1729,7 +1710,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
while (bo->sync_obj) {
- if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
+ if (driver->sync_obj_signaled(bo->sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
@@ -1743,9 +1724,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
return -EBUSY;
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- sync_obj_arg = bo->sync_obj_arg;
spin_unlock(&bdev->fence_lock);
- ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
+ ret = driver->sync_obj_wait(sync_obj,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
@@ -1753,8 +1733,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
return ret;
}
spin_lock(&bdev->fence_lock);
- if (likely(bo->sync_obj == sync_obj &&
- bo->sync_obj_arg == sync_obj_arg)) {
+ if (likely(bo->sync_obj == sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
@@ -1797,8 +1776,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
- if (atomic_dec_and_test(&bo->cpu_writers))
- wake_up_all(&bo->event_queue);
+ atomic_dec(&bo->cpu_writers);
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
@@ -1817,40 +1795,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
- while (ret == -EBUSY) {
- if (unlikely(list_empty(&glob->swap_lru))) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
- bo = list_first_entry(&glob->swap_lru,
- struct ttm_buffer_object, swap);
- kref_get(&bo->list_kref);
+ list_for_each_entry(bo, &glob->swap_lru, swap) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
- if (!list_empty(&bo->ddestroy)) {
- spin_unlock(&glob->lru_lock);
- (void) ttm_bo_cleanup_refs(bo, false, false, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- continue;
- }
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return ret;
+ }
- /**
- * Reserve buffer. Since we unlock while sleeping, we need
- * to re-check that nobody removed us from the swap-list while
- * we slept.
- */
+ kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait_unreserved(bo, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- }
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
}
- BUG_ON(ret != 0);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
@@ -1876,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false, false);
+ false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2026060f03e..9e9c5d2a5c7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -611,8 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- void *sync_obj_arg,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
@@ -630,7 +629,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj = NULL;
}
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = sync_obj_arg;
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ba72dbdc4b..74705f329d9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
+ if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+ bo = NULL;
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1937069432c..cd9e4523dc5 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -185,10 +185,7 @@ retry_this_bo:
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_cpu(bo, false);
- if (ret)
- return ret;
- goto retry;
+ return -EBUSY;
}
}
@@ -216,19 +213,18 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
driver = bdev->driver;
glob = bo->glob;
- spin_lock(&bdev->fence_lock);
spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = entry->new_sync_obj_arg;
ttm_bo_unreserve_locked(bo);
entry->reserved = false;
}
- spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 479c6b0467c..dbc2def887c 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -367,7 +367,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
- init_waitqueue_head(&glob->queue);
ret = kobject_init_and_add(
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index c7857874956..58a5f3261c0 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -80,7 +80,7 @@ struct ttm_object_file {
*/
struct ttm_object_device {
- rwlock_t object_lock;
+ spinlock_t object_lock;
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
@@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->refcount_release = refcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
- write_lock(&tdev->object_lock);
kref_init(&base->refcount);
- ret = drm_ht_just_insert_please(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
- write_unlock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
+ ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ spin_lock(&tdev->object_lock);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ spin_unlock(&tdev->object_lock);
out_err0:
return ret;
}
@@ -186,30 +188,29 @@ static void ttm_release_base(struct kref *kref)
container_of(kref, struct ttm_base_object, refcount);
struct ttm_object_device *tdev = base->tfile->tdev;
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
- write_unlock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ spin_unlock(&tdev->object_lock);
+
+ /*
+ * Note: We don't use synchronize_rcu() here because it's far
+ * too slow. It's up to the user to free the object using
+ * call_rcu() or ttm_base_object_kfree().
+ */
+
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
base->refcount_release(&base);
}
- write_lock(&tdev->object_lock);
}
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
- struct ttm_object_device *tdev = base->tfile->tdev;
*p_base = NULL;
- /*
- * Need to take the lock here to avoid racing with
- * users trying to look up the object.
- */
-
- write_lock(&tdev->object_lock);
kref_put(&base->refcount, ttm_release_base);
- write_unlock(&tdev->object_lock);
}
EXPORT_SYMBOL(ttm_base_object_unref);
@@ -221,14 +222,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct drm_hash_item *hash;
int ret;
- read_lock(&tdev->object_lock);
- ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
- kref_get(&base->refcount);
+ ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
}
- read_unlock(&tdev->object_lock);
+ rcu_read_unlock();
if (unlikely(ret != 0))
return NULL;
@@ -426,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
return NULL;
tdev->mem_glob = mem_glob;
- rwlock_init(&tdev->object_lock);
+ spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
@@ -444,9 +445,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
- write_lock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
- write_unlock(&tdev->object_lock);
+ spin_unlock(&tdev->object_lock);
kfree(tdev);
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b3b2cedf674..512f44add89 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -84,7 +84,8 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
+static struct drm_encoder*
+udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
@@ -97,8 +98,9 @@ struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
return encoder;
}
-int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
- uint64_t val)
+static int udl_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
{
return 0;
}
@@ -110,13 +112,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
.best_encoder = udl_best_single_encoder,
};
-struct drm_connector_funcs udl_connector_funcs = {
+static struct drm_connector_funcs udl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -138,7 +140,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_sysfs_connector_add(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c8c11..2cc6cd91ac1 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_surface.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 00000000000..8369c3ba10f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ * 1. Red, bump W, luminance and depth are stored in the first channel.
+ * 2. Green, bump V and stencil are stored in the second channel.
+ * 3. Blue and bump U are stored in the third channel.
+ * 4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ * 1. For compressed formats, only the data channel is used and its size
+ * is equal to that of a singular block in the compression scheme.
+ * 2. For buffer formats, only the data channel is used and its size is
+ * exactly one byte in length.
+ * 3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+ SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
+ data */
+ SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
+ data */
+ SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
+ U and V */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
+ data */
+ SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
+ data */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
+ channel */
+ SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
+ data */
+ SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
+ data */
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
+ data */
+ SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
+ data */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
+ channel */
+ SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
+ data */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
+ data */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
+ data depending on the
+ compression method used */
+ SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
+ floating point
+ representation in
+ all channels */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
+ data. */
+ SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
+ SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
+ SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
+ SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
+ e.g., NV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
+ Y, U, V, e.g., YV12. */
+
+ SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q,
+ SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
+ SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_Y |
+ SVGA3DBLOCKDESC_U_VIDEO |
+ SVGA3DBLOCKDESC_V_VIDEO,
+ SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_EXP,
+ SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ * 1. Block description.
+ * 2. Dimensions of a block in the surface.
+ * 3. Size of block in bytes.
+ * 4. Bit depth of the pixel data.
+ * 5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type) \
+ struct { \
+ union { \
+ type blue; \
+ type u; \
+ type uv_video; \
+ type u_video; \
+ }; \
+ union { \
+ type green; \
+ type v; \
+ type stencil; \
+ type v_video; \
+ }; \
+ union { \
+ type red; \
+ type w; \
+ type luminance; \
+ type y; \
+ type depth; \
+ type data; \
+ }; \
+ union { \
+ type alpha; \
+ type q; \
+ type exp; \
+ }; \
+ }
+
+struct svga3d_surface_desc {
+ enum svga3d_block_desc block_desc;
+ surf_size_struct block_size;
+ u32 bytes_per_block;
+ u32 pitch_bytes_per_block;
+
+ struct {
+ u32 total;
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_depth;
+
+ struct {
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+ {SVGA3DBLOCKDESC_NONE,
+ {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
+ {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
+ {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
+ {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
+ {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
+ {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
+ {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
+ {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
+
+ {SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
+
+ {SVGA3DBLOCKDESC_BUFFER,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
+ {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
+
+ {SVGA3DBLOCKDESC_NV12,
+ {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
+
+ {SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
+
+ {SVGA3DBLOCKDESC_UVW,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
+ {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBE,
+ {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
+ {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_SRGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+ uint64_t tmp = (uint64_t) a*b;
+ return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+ if (format < ARRAY_SIZE(svga3d_surface_descs))
+ return &svga3d_surface_descs[format];
+
+ return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ * Given a base level size and the mip level, compute the size of
+ * the mip level.
+ *
+ * Results:
+ * See above.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+ surf_size_struct size;
+
+ size.width = max_t(u32, base_level.width >> mip_level, 1);
+ size.height = max_t(u32, base_level.height >> mip_level, 1);
+ size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+ return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *pixel_size,
+ surf_size_struct *block_size)
+{
+ block_size->width = DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+ return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size)
+{
+ u32 pitch;
+ surf_size_struct blocks;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+ pitch = blocks.width * desc->pitch_bytes_per_block;
+
+ return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ * Return the number of bytes of buffer space required to store
+ * one image of a surface, optionally using the specified pitch.
+ *
+ * If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ * This function is overflow-safe. If the result would have
+ * overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ * Byte count.
+ *
+ * Side effects:
+ * None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size,
+ u32 pitch)
+{
+ surf_size_struct image_blocks;
+ u32 slice_size, total_size;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+ if (svga3dsurface_is_planar_surface(desc)) {
+ total_size = clamped_umul32(image_blocks.width,
+ image_blocks.height);
+ total_size = clamped_umul32(total_size, image_blocks.depth);
+ total_size = clamped_umul32(total_size, desc->bytes_per_block);
+ return total_size;
+ }
+
+ if (pitch == 0)
+ pitch = svga3dsurface_calculate_pitch(desc, size);
+
+ slice_size = clamped_umul32(image_blocks.height, pitch);
+ total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+ return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ bool cubemap)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ u32 total_size = 0;
+ u32 mip;
+
+ for (mip = 0; mip < num_mip_levels; mip++) {
+ surf_size_struct size =
+ svga3dsurface_get_mip_size(base_level_size, mip);
+ total_size += svga3dsurface_get_image_buffer_size(desc,
+ &size, 0);
+ }
+
+ if (cubemap)
+ total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+ return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+ u32 width, u32 height,
+ u32 x, u32 y, u32 z)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+ const u32 bd = desc->block_size.depth;
+ const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+ const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 offset = (z / bd * imgstride +
+ y / bh * rowstride +
+ x / bw * desc->bytes_per_block);
+ return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+ surf_size_struct baseLevelSize,
+ u32 numMipLevels,
+ u32 face,
+ u32 mip)
+
+{
+ u32 offset;
+ u32 mipChainBytes;
+ u32 mipChainBytesToLevel;
+ u32 i;
+ const struct svga3d_surface_desc *desc;
+ surf_size_struct mipSize;
+ u32 bytes;
+
+ desc = svga3dsurface_get_desc(format);
+
+ mipChainBytes = 0;
+ mipChainBytesToLevel = 0;
+ for (i = 0; i < numMipLevels; i++) {
+ mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+ bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+ mipChainBytes += bytes;
+ if (i < mip)
+ mipChainBytesToLevel += bytes;
+ }
+
+ offset = mipChainBytes * face + mipChainBytesToLevel;
+
+ return offset;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 9826fbc8815..96dc84dc34d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
*placement = vmw_sys_placement;
}
-/**
- * FIXME: Proper access checks on buffers.
- */
-
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct ttm_object_file *tfile =
+ vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+ return vmw_user_dmabuf_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -310,27 +309,23 @@ static void vmw_sync_obj_unref(void **sync_obj)
vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
}
-static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int vmw_sync_obj_flush(void *sync_obj)
{
vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
return 0;
}
-static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool vmw_sync_obj_signaled(void *sync_obj)
{
- unsigned long flags = (unsigned long) sync_arg;
return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags);
+ DRM_VMW_FENCE_FLAG_EXEC);
}
-static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
- bool lazy, bool interruptible)
+static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
- unsigned long flags = (unsigned long) sync_arg;
-
return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags,
+ DRM_VMW_FENCE_FLAG_EXEC,
lazy, interruptible,
VMW_FENCE_WAIT_TIMEOUT);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 00000000000..00ae0925aca
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+ struct ttm_base_object base;
+ struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+ .object_type = VMW_RES_CONTEXT,
+ .base_obj_to_res = vmw_user_context_base_to_res,
+ .res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+ &user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+ .res_type = vmw_res_context,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "legacy contexts",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyContext body;
+ } *cmd;
+
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineContext body;
+ } *cmd;
+
+ ret = vmw_resource_init(dev_priv, res, false,
+ res_free, &vmw_legacy_context_func);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a resource id.\n");
+ goto out_early;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+ DRM_ERROR("Out of hw context ids.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+
+out_early:
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ ret = vmw_context_init(dev_priv, res, NULL);
+
+ return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ ttm_base_object_kfree(ctx, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_context *ctx =
+ container_of(base, struct vmw_user_context, base);
+ struct vmw_resource *res = &ctx->res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_context *ctx;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of contexts anyway.
+ */
+
+ if (unlikely(vmw_user_context_size == 0))
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_context_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for context"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(ctx == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ res = &ctx->res;
+ ctx->base.shareable = false;
+ ctx->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&ctx->res);
+ ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+ &vmw_user_context_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->cid = ctx->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d1498bfd787..5fae06ad7e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
ttm_bo_unreserve(bo);
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_gmr_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve;
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
err_unreserve:
ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
- false, false);
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
@@ -304,7 +302,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
old_mem_type != VMW_PL_GMR);
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
placement.num_placement = 1;
placement.placement = &pl_flags;
- ret = ttm_bo_validate(bo, &placement, false, true, true);
+ ret = ttm_bo_validate(bo, &placement, false, true);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2dd185e42f2..161f8b2549a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
PAGE_SIZE,
ttm_bo_type_device,
&vmw_vram_sys_placement,
- 0, 0, false, NULL,
+ 0, false, NULL,
&dev_priv->dummy_query_bo);
}
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
struct vmw_private *dev_priv;
int ret;
uint32_t svga_id;
+ enum vmw_res_type i;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock);
- idr_init(&dev_priv->context_idr);
- idr_init(&dev_priv->surface_idr);
- idr_init(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i) {
+ idr_init(&dev_priv->res_idr[i]);
+ INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+ }
+
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
- INIT_LIST_HEAD(&dev_priv->surface_lru);
+
dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -609,14 +613,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
}
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (ret != 0) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
+
dev_priv->fman = vmw_fence_manager_init(dev_priv);
if (unlikely(dev_priv->fman == NULL))
goto out_no_fman;
- /* Need to start the fifo to check if we can do screen objects */
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- goto out_no_fifo;
vmw_kms_save_vga(dev_priv);
/* Start kms and overlay systems, needs fifo. */
@@ -625,25 +633,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_kms;
vmw_overlay_init(dev_priv);
- /* 3D Depends on Screen Objects being used. */
- DRM_INFO("Detected %sdevice 3D availability.\n",
- vmw_fifo_have_3d(dev_priv) ?
- "" : "no ");
-
- /* We might be done with the fifo now */
if (dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv, true);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
vmw_fb_init(dev_priv);
- } else {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- }
-
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed installing irq: %d\n", ret);
- goto out_no_irq;
- }
}
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
@@ -651,20 +645,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
-out_no_irq:
- if (dev_priv->enable_fb)
- vmw_fb_close(dev_priv);
+out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- /* We still have a 3D resource reference held */
- if (dev_priv->enable_fb) {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
- }
-out_no_fifo:
+ vmw_kms_restore_vga(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+out_no_irq:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -684,9 +674,9 @@ out_err2:
out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
+
kfree(dev_priv);
return ret;
}
@@ -694,13 +684,14 @@ out_err0:
static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
+ if (dev_priv->ctx.res_ht_initialized)
+ drm_ht_remove(&dev_priv->ctx.res_ht);
if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce);
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
@@ -709,6 +700,8 @@ static int vmw_driver_unload(struct drm_device *dev)
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -723,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
kfree(dev_priv);
@@ -924,11 +917,11 @@ static int vmw_master_set(struct drm_device *dev,
out_no_active_lock:
if (!dev_priv->enable_fb) {
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
}
return ret;
}
@@ -949,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -962,11 +955,11 @@ static void vmw_master_drop(struct drm_device *dev,
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0))
DRM_ERROR("Unable to clean VRAM on master drop.\n");
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1001,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_resource_evict_all(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 88a179e26de..13aeda71280 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
- struct list_head validate_list;
- bool gmr_bound;
- uint32_t cur_validate_node;
- bool on_validate_list;
+ struct list_head res_list;
};
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+ struct ttm_validate_buffer base;
+ struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
- struct idr *idr;
int id;
- enum ttm_object_type res_type;
bool avail;
- void (*remove_from_lists) (struct vmw_resource *res);
- void (*hw_destroy) (struct vmw_resource *res);
+ unsigned long backup_size;
+ bool res_dirty; /* Protected by backup buffer reserved */
+ bool backup_dirty; /* Protected by backup buffer reserved */
+ struct vmw_dma_buffer *backup;
+ unsigned long backup_offset;
+ const struct vmw_res_func *func;
+ struct list_head lru_head; /* Protected by the resource lock */
+ struct list_head mob_head; /* Protected by @backup reserved */
void (*res_free) (struct vmw_resource *res);
- struct list_head validate_head;
- struct list_head query_head; /* Protected by the cmdbuf mutex */
- /* TODO is a generic snooper needed? */
-#if 0
- void (*snoop)(struct vmw_resource *res,
- struct ttm_object_file *tfile,
- SVGA3dCmdHeader *header);
- void *snoop_priv;
-#endif
+ void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+ vmw_res_context,
+ vmw_res_surface,
+ vmw_res_stream,
+ vmw_res_max
};
struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
struct vmw_surface {
struct vmw_resource res;
- struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ struct drm_vmw_size base_size;
struct drm_vmw_size *sizes;
uint32_t num_sizes;
-
bool scanout;
-
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
- struct ttm_buffer_object *backup;
struct vmw_surface_offset *offsets;
- uint32_t backup_size;
+ SVGA3dTextureFilter autogen_filter;
+ uint32_t multisample_count;
};
struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
uint32_t index;
};
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+ bool valid;
+ uint32_t handle;
+ struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+};
+
struct vmw_sw_context{
- struct ida bo_list;
- uint32_t last_cid;
- bool cid_valid;
+ struct drm_open_hash res_ht;
+ bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct vmw_resource *cur_ctx;
- uint32_t last_sid;
- uint32_t sid_translation;
- bool sid_valid;
struct ttm_object_file *tfile;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
- struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+ struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
uint32_t fence_flags;
- struct list_head query_list;
struct ttm_buffer_object *cur_query_bo;
- uint32_t cur_query_cid;
- bool query_cid_valid;
+ struct list_head res_relocations;
+ uint32_t *buf_start;
+ struct vmw_res_cache_entry res_cache[vmw_res_max];
+ struct vmw_resource *last_query_ctx;
+ bool needs_post_query_barrier;
+ struct vmw_resource *error_resource;
};
struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
*/
rwlock_t resource_lock;
- struct idr context_idr;
- struct idr surface_idr;
- struct idr stream_idr;
-
+ struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
*/
@@ -320,6 +347,7 @@ struct vmw_private {
struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo;
uint32_t query_cid;
+ uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
/*
@@ -329,10 +357,15 @@ struct vmw_private {
* protected by the cmdbuf mutex for simplicity.
*/
- struct list_head surface_lru;
+ struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size;
};
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_surface, res);
+}
+
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
* Resource utilities - vmwgfx_resource.c
*/
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+ struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv *converter,
+ struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
@@ -699,10 +745,13 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
{
struct vmw_dma_buffer *tmp_buf = *buf;
- struct ttm_buffer_object *bo = &tmp_buf->base;
+
*buf = NULL;
+ if (tmp_buf != NULL) {
+ struct ttm_buffer_object *bo = &tmp_buf->base;
- ttm_bo_unref(&bo);
+ ttm_bo_unref(&bo);
+ }
}
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4cc97..394e6476105 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+ struct list_head head;
+ const struct vmw_resource *res;
+ unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+ struct list_head head;
+ struct drm_hash_item hash;
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *new_backup;
+ unsigned long new_backup_offset;
+ bool first_usage;
+ bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+ bool backoff)
+{
+ struct vmw_resource_val_node *val;
+
+ list_for_each_entry(val, list, head) {
+ struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *new_backup =
+ backoff ? NULL : val->new_backup;
+
+ vmw_resource_unreserve(res, new_backup,
+ val->new_backup_offset);
+ vmw_dmabuf_unreference(&val->new_backup);
+ }
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_node)
+{
+ struct vmw_resource_val_node *node;
+ struct drm_hash_item *hash;
+ int ret;
+
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+ &hash) == 0)) {
+ node = container_of(hash, struct vmw_resource_val_node, hash);
+ node->first_usage = false;
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+ return 0;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(node == NULL)) {
+ DRM_ERROR("Failed to allocate a resource validation "
+ "entry.\n");
+ return -ENOMEM;
+ }
+
+ node->hash.key = (unsigned long) res;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a resource validation "
+ "entry.\n");
+ kfree(node);
+ return ret;
+ }
+ list_add_tail(&node->head, &sw_context->resource_list);
+ node->res = vmw_resource_reference(res);
+ node->first_usage = true;
+
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+ const struct vmw_resource *res,
+ unsigned long offset)
+{
+ struct vmw_resource_relocation *rel;
+
+ rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+ if (unlikely(rel == NULL)) {
+ DRM_ERROR("Failed to allocate a resource relocation.\n");
+ return -ENOMEM;
+ }
+
+ rel->res = res;
+ rel->offset = offset;
+ list_add_tail(&rel->head, list);
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+ struct vmw_resource_relocation *rel, *n;
+
+ list_for_each_entry_safe(rel, n, list, head) {
+ list_del(&rel->head);
+ kfree(rel);
+ }
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+ struct list_head *list)
+{
+ struct vmw_resource_relocation *rel;
+
+ list_for_each_entry(rel, list, head)
+ cb[rel->offset] = rel->res->id;
+}
+
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res = *p_res;
-
- if (list_empty(&res->validate_head)) {
- list_add_tail(&res->validate_head, &sw_context->resource_list);
- *p_res = NULL;
- } else
- vmw_resource_unreference(p_res);
-}
-
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -71,31 +232,43 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
- uint32_t fence_flags,
uint32_t *p_val_node)
{
uint32_t val_node;
+ struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
+ struct drm_hash_item *hash;
+ int ret;
- val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- return -EINVAL;
- }
-
- val_buf = &sw_context->val_bufs[val_node];
- if (unlikely(val_node == sw_context->cur_val_buf)) {
- val_buf->new_sync_obj_arg = NULL;
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ &hash) == 0)) {
+ vval_buf = container_of(hash, struct vmw_validate_buffer,
+ hash);
+ val_buf = &vval_buf->base;
+ val_node = vval_buf - sw_context->val_bufs;
+ } else {
+ val_node = sw_context->cur_val_buf;
+ if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+ DRM_ERROR("Max number of DMA buffers per submission "
+ "exceeded.\n");
+ return -EINVAL;
+ }
+ vval_buf = &sw_context->val_bufs[val_node];
+ vval_buf->hash.key = (unsigned long) bo;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a buffer validation "
+ "entry.\n");
+ return ret;
+ }
+ ++sw_context->cur_val_buf;
+ val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
+ val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
}
- val_buf->new_sync_obj_arg = (void *)
- ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
- sw_context->fence_flags |= fence_flags;
+ sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
if (p_val_node)
*p_val_node = val_node;
@@ -103,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return 0;
}
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource *ctx;
-
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- __le32 cid;
- } *cmd;
+ struct vmw_resource_val_node *val;
int ret;
- cmd = container_of(header, struct vmw_cid_cmd, header);
- if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
- return 0;
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
- &ctx);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use context %u\n",
- (unsigned) cmd->cid);
- return ret;
+ ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, bo, NULL);
+
+ if (unlikely(ret != 0))
+ return ret;
+ }
}
+ return 0;
+}
- sw_context->last_cid = cmd->cid;
- sw_context->cid_valid = true;
- sw_context->cur_ctx = ctx;
- vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
+ ret = vmw_resource_validate(res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to validate resource.\n");
+ return ret;
+ }
+ }
return 0;
}
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- uint32_t *sid)
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id,
+ struct vmw_resource_val_node **p_val)
{
- struct vmw_surface *srf;
- int ret;
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[res_type];
struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+ int ret;
- if (*sid == SVGA3D_INVALID_ID)
+ if (*id == SVGA3D_INVALID_ID)
return 0;
- if (likely((sw_context->sid_valid &&
- *sid == sw_context->last_sid))) {
- *sid = sw_context->sid_translation;
- return 0;
- }
+ /*
+ * Fastpath in case of repeated commands referencing the same
+ * resource
+ */
- ret = vmw_user_surface_lookup_handle(dev_priv,
- sw_context->tfile,
- *sid, &srf);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
+ if (likely(rcache->valid && *id == rcache->handle)) {
+ const struct vmw_resource *res = rcache->res;
+
+ rcache->node->first_usage = false;
+ if (p_val)
+ *p_val = rcache->node;
+
+ return vmw_resource_relocation_add
+ (&sw_context->res_relocations, res,
+ id - sw_context->buf_start);
}
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_user_resource_lookup_handle(dev_priv,
+ sw_context->tfile,
+ *id,
+ converter,
+ &res);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Could not validate surface.\n");
- vmw_surface_unreference(&srf);
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
+ (unsigned) *id);
+ dump_stack();
return ret;
}
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- sw_context->sid_translation = srf->res.id;
- *sid = sw_context->sid_translation;
+ rcache->valid = true;
+ rcache->res = res;
+ rcache->handle = *id;
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ rcache->node = node;
+ if (p_val)
+ *p_val = node;
+ vmw_resource_unreference(&res);
return 0;
+
+out_no_reloc:
+ BUG_ON(sw_context->error_resource != NULL);
+ sw_context->error_resource = res;
+
+ return ret;
}
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->cid, NULL);
+}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -198,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.target.sid, NULL);
return ret;
}
@@ -213,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcImage.sid, NULL);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->body.sid,
+ NULL);
}
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
@@ -287,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- uint32_t cid,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
int ret;
- bool add_cid = false;
- uint32_t cid_to_add;
+
+ BUG_ON(!ctx_entry->valid);
+ sw_context->last_query_ctx = ctx_entry->res;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
@@ -308,12 +583,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
- BUG_ON(!sw_context->query_cid_valid);
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
+ sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
NULL);
if (unlikely(ret != 0))
return ret;
@@ -322,35 +594,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
NULL);
if (unlikely(ret != 0))
return ret;
}
- if (unlikely(cid != sw_context->cur_query_cid &&
- sw_context->query_cid_valid)) {
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
- }
-
- sw_context->cur_query_cid = cid;
- sw_context->query_cid_valid = true;
-
- if (add_cid) {
- struct vmw_resource *ctx = sw_context->cur_ctx;
-
- if (list_empty(&ctx->query_head))
- list_add_tail(&ctx->query_head,
- &sw_context->query_list);
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- }
return 0;
}
@@ -362,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
@@ -376,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
-
- struct vmw_resource *ctx, *next_ctx;
- int ret;
-
/*
* The validate list should still hold references to all
* contexts here.
*/
- list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
- query_head) {
- list_del_init(&ctx->query_head);
+ if (sw_context->needs_post_query_barrier) {
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
+ struct vmw_resource *ctx;
+ int ret;
- BUG_ON(list_empty(&ctx->validate_head));
+ BUG_ON(!ctx_entry->valid);
+ ctx = ctx_entry->res;
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
@@ -403,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ttm_bo_unref(&dev_priv->pinned_bo);
}
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ if (!sw_context->needs_post_query_barrier) {
+ vmw_bo_pin(sw_context->cur_query_bo, true);
- /*
- * We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
- */
+ /*
+ * We pin also the dummy_query_bo buffer so that we
+ * don't need to validate it when emitting
+ * dummy queries in context destroy paths.
+ */
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ vmw_bo_pin(dev_priv->dummy_query_bo, true);
+ dev_priv->dummy_query_bo_pinned = true;
- dev_priv->query_cid = sw_context->cur_query_cid;
- dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ BUG_ON(sw_context->last_query_ctx == NULL);
+ dev_priv->query_cid = sw_context->last_query_ctx->id;
+ dev_priv->query_cid_valid = true;
+ dev_priv->pinned_bo =
+ ttm_bo_reference(sw_context->cur_query_bo);
+ }
}
}
/**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
*
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
*
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &sw_context->query_list) {
- list_del_init(list);
- }
-}
-
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -465,8 +718,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
- &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -479,6 +731,37 @@ out_no_reloc:
return ret;
}
+/**
+ * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -501,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
- &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
+/*
+ * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -518,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
- struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
-
- /*
- * This wait will act as a barrier for previous waits for this
- * context.
- */
-
- ctx = sw_context->cur_ctx;
- if (!list_empty(&ctx->query_head))
- list_del_init(&ctx->query_head);
-
return 0;
}
@@ -550,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
- struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- bo = &vmw_bo->base;
- ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
- cmd->dma.host.sid, &srf);
- if (ret) {
- DRM_ERROR("could not find surface\n");
- goto out_no_reloc;
- }
-
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->dma.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Culd not validate surface.\n");
- goto out_no_validate;
+ if (unlikely(ret != -ERESTARTSYS))
+ DRM_ERROR("could not find surface for DMA.\n");
+ goto out_no_surface;
}
- /*
- * Patch command stream with device SID.
- */
- cmd->dma.host.sid = srf->res.id;
- vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
- vmw_dmabuf_unreference(&vmw_bo);
-
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- return 0;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
-out_no_validate:
- vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
@@ -629,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &decl->array.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -644,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &range->indexArray.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -676,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &cur_state->value);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cur_state->value, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -708,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_cmd,
+ header);
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return 0;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -781,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check)
+ &vmw_cmd_blt_surf_screen_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
int32_t cur_size = size;
int ret;
+ sw_context->buf_start = buf;
+
while (cur_size > 0) {
size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index];
+ validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
- } else
+ break;
+ case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
+ break;
+ default:
+ BUG();
+ }
}
vmw_free_relocations(sw_context);
}
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+ struct vmw_resource_val_node *val, *val_next;
+
+ /*
+ * Drop references to resources held during command submission.
+ */
+
+ list_for_each_entry_safe(val, val_next, list, head) {
+ list_del_init(&val->head);
+ vmw_resource_unreference(&val->res);
+ kfree(val);
+ }
+}
+
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry, *next;
- struct vmw_resource *res, *res_next;
+ struct vmw_validate_buffer *entry, *next;
+ struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- head) {
- list_del(&entry->head);
- vmw_dmabuf_validate_clear(entry->bo);
- ttm_bo_unref(&entry->bo);
+ base.head) {
+ list_del(&entry->base.head);
+ ttm_bo_unref(&entry->base.bo);
+ (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
- /*
- * Drop references to resources held during command submission.
- */
- vmw_resource_unreserve(&sw_context->resource_list);
- list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
- validate_head) {
- list_del_init(&res->validate_head);
- vmw_resource_unreference(&res);
- }
+ list_for_each_entry(val, &sw_context->resource_list, head)
+ (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -929,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -939,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
*/
DRM_INFO("Falling through to VRAM.\n");
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
}
@@ -947,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry;
+ struct vmw_validate_buffer *entry;
int ret;
- list_for_each_entry(entry, &sw_context->validate_nodes, head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1114,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
+ struct vmw_resource *error_resource;
+ struct list_head resource_list;
uint32_t handle;
void *cmd;
int ret;
@@ -1143,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
- sw_context->cid_valid = false;
- sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
- INIT_LIST_HEAD(&sw_context->query_list);
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
- sw_context->cur_query_cid = dev_priv->query_cid;
- sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+ sw_context->last_query_ctx = NULL;
+ sw_context->needs_post_query_barrier = false;
+ memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
+ INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (!sw_context->res_ht_initialized) {
+ ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ sw_context->res_ht_initialized = true;
+ }
+ INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
+ ret = vmw_resources_reserve(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -1169,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- vmw_apply_relocations(sw_context);
+ ret = vmw_resources_validate(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (unlikely(ret != 0))
- goto out_throttle;
+ goto out_err;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_throttle;
+ goto out_err;
}
+ vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
+
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+
vmw_fifo_commit(dev_priv, command_size);
vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
+ vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
(void *) fence);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
vmw_clear_validations(sw_context);
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
@@ -1217,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
+ list_splice_init(&sw_context->resource_list, &resource_list);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+
return 0;
out_err:
+ vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
-out_throttle:
- vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
+ list_splice_init(&sw_context->resource_list, &resource_list);
+ error_resource = sw_context->error_resource;
+ sw_context->error_resource = NULL;
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+ if (unlikely(error_resource != NULL))
+ vmw_resource_unreference(&error_resource);
+
return ret;
}
@@ -1252,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
@@ -1271,31 +1633,26 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
*/
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
- struct vmw_fence_obj *fence;
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
+ struct vmw_fence_obj *lfence = NULL;
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- if (only_on_cid_match && cid != dev_priv->query_cid)
- goto out_unlock;
-
INIT_LIST_HEAD(&validate_list);
- pinned_val.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
list_add_tail(&pinned_val.head, &validate_list);
- query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
list_add_tail(&query_val.head, &validate_list);
@@ -1308,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
goto out_no_reserve;
}
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_emit;
+ if (dev_priv->query_cid_valid) {
+ BUG_ON(fence != NULL);
+ ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_emit;
+ }
+ dev_priv->query_cid_valid = false;
}
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
- (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (fence == NULL) {
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+ NULL);
+ fence = lfence;
+ }
ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+ if (lfence != NULL)
+ vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
out_unlock:
- mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
out_no_emit:
@@ -1335,6 +1701,31 @@ out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ if (dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index bc187fafd58..c62d20e8a6f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
container_of(fence, struct vmw_user_fence, fence);
struct vmw_fence_manager *fman = fence->fman;
- kfree(ufence);
+ ttm_base_object_kfree(ufence, base);
/*
* Free kernel space accounting.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 7290811f89b..d9fbbe19107 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -133,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *clips = NULL;
struct drm_mode_object *obj;
struct vmw_framebuffer *vfb;
+ struct vmw_resource *res;
uint32_t num_clips;
int ret;
@@ -180,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_no_ttm_lock;
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
- &surface);
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+ user_surface_converter,
+ &res);
if (ret)
goto out_no_surface;
+ surface = vmw_res_to_srf(res);
ret = vmw_kms_present(dev_priv, file_priv,
vfb, surface, arg->sid,
arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb239c5a..79f7e8e6052 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cb55b7b6637..87e39f68e9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -35,6 +35,7 @@
#include "svga_escape.h"
#define VMW_MAX_NUM_STREAMS 1
+#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
struct vmw_dma_buffer *buf;
@@ -449,6 +450,14 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
return 0;
}
+
+static bool vmw_overlay_available(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->overlay_priv != NULL &&
+ ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+ VMW_OVERLAY_CAP_MASK));
+}
+
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -461,7 +470,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
int ret;
- if (!overlay)
+ if (!vmw_overlay_available(dev_priv))
return -ENOSYS;
ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
@@ -492,7 +501,7 @@ out_unlock:
int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
{
- if (!dev_priv->overlay_priv)
+ if (!vmw_overlay_available(dev_priv))
return 0;
return VMW_MAX_NUM_STREAMS;
@@ -503,7 +512,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, k;
- if (!overlay)
+ if (!vmw_overlay_available(dev_priv))
return 0;
mutex_lock(&overlay->mutex);
@@ -569,12 +578,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
if (dev_priv->overlay_priv)
return -EINVAL;
- if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
- (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
- DRM_INFO("hardware doesn't support overlays\n");
- return -ENOSYS;
- }
-
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index da3c6b5b98a..e01a17b407b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -30,17 +30,7 @@
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
-
-struct vmw_user_context {
- struct ttm_base_object base;
- struct vmw_resource res;
-};
-
-struct vmw_user_surface {
- struct ttm_base_object base;
- struct vmw_surface srf;
- uint32_t size;
-};
+#include "vmwgfx_resource_priv.h"
struct vmw_user_dma_buffer {
struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
-struct vmw_surface_offset {
- uint32_t face;
- uint32_t mip;
- uint32_t bo_offset;
-};
-
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
static uint64_t vmw_user_stream_size;
+static const struct vmw_res_func vmw_stream_func = {
+ .res_type = vmw_res_stream,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "video streams",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
*
* Release the resource id to the resource id manager and set it to -1
*/
-static void vmw_resource_release_id(struct vmw_resource *res)
+void vmw_resource_release_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock);
if (res->id != -1)
- idr_remove(res->idr, res->id);
+ idr_remove(idr, res->id);
res->id = -1;
write_unlock(&dev_priv->resource_lock);
}
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
- int id = res->id;
- struct idr *idr = res->idr;
+ int id;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
res->avail = false;
- if (res->remove_from_lists != NULL)
- res->remove_from_lists(res);
+ list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ttm_bo_reserve(bo, false, false, false, 0);
+ if (!list_empty(&res->mob_head) &&
+ res->func->unbind != NULL) {
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+ res->func->unbind(res, false, &val_buf);
+ }
+ res->backup_dirty = false;
+ list_del_init(&res->mob_head);
+ ttm_bo_unreserve(bo);
+ vmw_dmabuf_unreference(&res->backup);
+ }
if (likely(res->hw_destroy != NULL))
res->hw_destroy(res);
+ id = res->id;
if (res->res_free != NULL)
res->res_free(res);
else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
/**
* vmw_resource_alloc_id - release a resource id to the id manager.
*
- * @dev_priv: Pointer to the device private structure.
* @res: Pointer to the resource.
*
* Allocate the lowest free resource from the resource manager, and set
* @res->id to that id. Returns 0 on success and -ENOMEM on failure.
*/
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
- struct vmw_resource *res)
+int vmw_resource_alloc_id(struct vmw_resource *res)
{
+ struct vmw_private *dev_priv = res->dev_priv;
int ret;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
BUG_ON(res->id != -1);
do {
- if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
return -ENOMEM;
write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(res->idr, res, 1, &res->id);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
write_unlock(&dev_priv->resource_lock);
} while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
return ret;
}
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- bool delay_id,
- void (*res_free) (struct vmw_resource *res),
- void (*remove_from_lists)
- (struct vmw_resource *res))
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @res: The struct vmw_resource to initialize.
+ * @obj_type: Resource object type.
+ * @delay_id: Boolean whether to defer device id allocation until
+ * the first validation.
+ * @res_free: Resource destructor.
+ * @func: Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func)
{
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
- res->remove_from_lists = remove_from_lists;
- res->res_type = obj_type;
- res->idr = idr;
res->avail = false;
res->dev_priv = dev_priv;
- INIT_LIST_HEAD(&res->query_head);
- INIT_LIST_HEAD(&res->validate_head);
+ res->func = func;
+ INIT_LIST_HEAD(&res->lru_head);
+ INIT_LIST_HEAD(&res->mob_head);
res->id = -1;
+ res->backup = NULL;
+ res->backup_offset = 0;
+ res->backup_dirty = false;
+ res->res_dirty = false;
if (delay_id)
return 0;
else
- return vmw_resource_alloc_id(dev_priv, res);
+ return vmw_resource_alloc_id(res);
}
/**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
-
-static void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -250,994 +268,41 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
}
/**
- * Context management:
- */
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyContext body;
- } *cmd;
-
-
- vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineContext body;
- } *cmd;
-
- ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, false, res_free, NULL);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a resource id.\n");
- goto out_early;
- }
-
- if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
- DRM_ERROR("Out of hw context ids.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_context_destroy);
- return 0;
-
-out_early:
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
-}
-
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
- return (ret == 0) ? res : NULL;
-}
-
-/**
- * User-space context management:
- */
-
-static void vmw_user_context_free(struct vmw_resource *res)
-{
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
-
- kfree(ctx);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
-}
-
-/**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
- */
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_context *ctx =
- container_of(base, struct vmw_user_context, base);
- struct vmw_resource *res = &ctx->res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_resource *res;
- struct vmw_user_context *ctx;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret = 0;
-
- res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
- if (unlikely(res == NULL))
- return -EINVAL;
-
- if (res->res_free != &vmw_user_context_free) {
- ret = -EINVAL;
- goto out;
- }
-
- ctx = container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable) {
- ret = -EPERM;
- goto out;
- }
-
- ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
- vmw_resource_unreference(&res);
- return ret;
-}
-
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret;
-
-
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_unlock;
- }
-
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- res = &ctx->res;
- ctx->base.shareable = false;
- ctx->base.tfile = NULL;
-
- /*
- * From here on, the destructor takes over resource freeing.
- */
-
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&ctx->res);
- ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
- }
-
- arg->cid = res->id;
-out_err:
- vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-
-}
-
-int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res;
- int ret = 0;
-
- read_lock(&dev_priv->resource_lock);
- res = idr_find(&dev_priv->context_idr, id);
- if (res && res->avail) {
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable)
- ret = -EPERM;
- if (p_res)
- *p_res = vmw_resource_reference(res);
- } else
- ret = -EINVAL;
- read_unlock(&dev_priv->resource_lock);
-
- return ret;
-}
-
-struct vmw_bpp {
- uint8_t bpp;
- uint8_t s_bpp;
-};
-
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
- [SVGA3D_FORMAT_INVALID] = {0, 0},
- [SVGA3D_X8R8G8B8] = {32, 32},
- [SVGA3D_A8R8G8B8] = {32, 32},
- [SVGA3D_R5G6B5] = {16, 16},
- [SVGA3D_X1R5G5B5] = {16, 16},
- [SVGA3D_A1R5G5B5] = {16, 16},
- [SVGA3D_A4R4G4B4] = {16, 16},
- [SVGA3D_Z_D32] = {32, 32},
- [SVGA3D_Z_D16] = {16, 16},
- [SVGA3D_Z_D24S8] = {32, 32},
- [SVGA3D_Z_D15S1] = {16, 16},
- [SVGA3D_LUMINANCE8] = {8, 8},
- [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
- [SVGA3D_LUMINANCE16] = {16, 16},
- [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
- [SVGA3D_DXT1] = {4, 16},
- [SVGA3D_DXT2] = {8, 32},
- [SVGA3D_DXT3] = {8, 32},
- [SVGA3D_DXT4] = {8, 32},
- [SVGA3D_DXT5] = {8, 32},
- [SVGA3D_BUMPU8V8] = {16, 16},
- [SVGA3D_BUMPL6V5U5] = {16, 16},
- [SVGA3D_BUMPX8L8V8U8] = {32, 32},
- [SVGA3D_ARGB_S10E5] = {16, 16},
- [SVGA3D_ARGB_S23E8] = {32, 32},
- [SVGA3D_A2R10G10B10] = {32, 32},
- [SVGA3D_V8U8] = {16, 16},
- [SVGA3D_Q8W8V8U8] = {32, 32},
- [SVGA3D_CxV8U8] = {16, 16},
- [SVGA3D_X8L8V8U8] = {32, 32},
- [SVGA3D_A2W10V10U10] = {32, 32},
- [SVGA3D_ALPHA8] = {8, 8},
- [SVGA3D_R_S10E5] = {16, 16},
- [SVGA3D_R_S23E8] = {32, 32},
- [SVGA3D_RG_S10E5] = {16, 16},
- [SVGA3D_RG_S23E8] = {32, 32},
- [SVGA3D_BUFFER] = {8, 8},
- [SVGA3D_Z_D24X8] = {32, 32},
- [SVGA3D_V16U16] = {32, 32},
- [SVGA3D_G16R16] = {32, 32},
- [SVGA3D_A16B16G16R16] = {64, 64},
- [SVGA3D_UYVY] = {12, 12},
- [SVGA3D_YUY2] = {12, 12},
- [SVGA3D_NV12] = {12, 8},
- [SVGA3D_AYUV] = {32, 32},
- [SVGA3D_BC4_UNORM] = {4, 16},
- [SVGA3D_BC5_UNORM] = {8, 32},
- [SVGA3D_Z_DF16] = {16, 16},
- [SVGA3D_Z_DF24] = {24, 24},
- [SVGA3D_Z_D24S8_INT] = {32, 32}
-};
-
-
-/**
- * Surface management.
- */
-
-struct vmw_surface_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
- SVGA3dCopyBox cb;
- SVGA3dCmdSurfaceDMASuffix suffix;
-};
-
-struct vmw_surface_define {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
-};
-
-struct vmw_surface_destroy {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
-};
-
-
-/**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
-{
- return srf->num_sizes * sizeof(struct vmw_surface_dma);
-}
-
-
-/**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
-{
- return sizeof(struct vmw_surface_define) + srf->num_sizes *
- sizeof(SVGA3dSize);
-}
-
-
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
*
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
-{
- return sizeof(struct vmw_surface_destroy);
-}
-
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
- void *cmd_space)
-{
- struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
- cmd_space;
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.sid = id;
-}
-
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
+ * @dev_priv: Pointer to a device private struct
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
+ * @handle: The TTM user-space handle
+ * @converter: Pointer to an object describing the resource type
+ * @p_res: On successful return the location pointed to will contain
+ * a pointer to a refcounted struct vmw_resource.
*
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
*/
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
- void *cmd_space)
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv
+ *converter,
+ struct vmw_resource **p_res)
{
- struct vmw_surface_define *cmd = (struct vmw_surface_define *)
- cmd_space;
- struct drm_vmw_size *src_size;
- SVGA3dSize *cmd_size;
- uint32_t cmd_len;
- int i;
-
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
- cmd->header.size = cmd_len;
- cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
-
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = src_size->width;
- cmd_size->height = src_size->height;
- cmd_size->depth = src_size->depth;
- }
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
- void *cmd_space,
- const SVGAGuestPtr *ptr,
- bool to_surface)
-{
- uint32_t i;
- uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
- uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
- struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
- for (i = 0; i < srf->num_sizes; ++i) {
- SVGA3dCmdHeader *header = &cmd->header;
- SVGA3dCmdSurfaceDMA *body = &cmd->body;
- SVGA3dCopyBox *cb = &cmd->cb;
- SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
- const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
- const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
- header->id = SVGA_3D_CMD_SURFACE_DMA;
- header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
- body->guest.ptr = *ptr;
- body->guest.ptr.offset += cur_offset->bo_offset;
- body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
- body->host.sid = srf->res.id;
- body->host.face = cur_offset->face;
- body->host.mipmap = cur_offset->mip;
- body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM);
- cb->x = 0;
- cb->y = 0;
- cb->z = 0;
- cb->srcx = 0;
- cb->srcy = 0;
- cb->srcz = 0;
- cb->w = cur_size->width;
- cb->h = cur_size->height;
- cb->d = cur_size->depth;
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = body->guest.pitch*cur_size->height*
- cur_size->depth*bpp / stride_bpp;
- suffix->flags.discard = 0;
- suffix->flags.unsynchronized = 0;
- suffix->flags.reserved = 0;
- ++cmd;
- }
-};
-
-
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
- void *cmd;
-
- if (res->id != -1) {
-
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
-
- /*
- * used_memory_size_atomic, or separate lock
- * to avoid taking dev_priv::cmdbuf_mutex in
- * the destroy path.
- */
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = container_of(res, struct vmw_surface, res);
- dev_priv->used_memory_size -= srf->backup_size;
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- }
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-void vmw_surface_res_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(srf);
-}
-
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
-
- if (likely(res->id != -1))
- return 0;
-
- if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
- dev_priv->memory_size))
- return -EBUSY;
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- if (srf->backup) {
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)((unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC);
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
- }
-
- /*
- * Alloc id for the resource.
- */
-
- ret = vmw_resource_alloc_id(dev_priv, res);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a surface id.\n");
- goto out_no_id;
- }
- if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
- ret = -EBUSY;
- goto out_no_fifo;
- }
-
-
- /*
- * Encode surface define- and dma commands.
- */
-
- submit_size = vmw_surface_define_size(srf);
- if (srf->backup)
- submit_size += vmw_surface_dma_size(srf);
-
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "validation.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_surface_define_encode(srf, cmd);
- if (srf->backup) {
- SVGAGuestPtr ptr;
-
- cmd += vmw_surface_define_size(srf);
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, true);
- }
-
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Create a fence object and fence the backup buffer.
- */
-
- if (srf->backup) {
- struct vmw_fence_obj *fence;
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- }
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size += srf->backup_size;
-
- return 0;
-
-out_no_fifo:
- vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- if (srf->backup)
- ttm_bo_unref(&val_buf.bo);
- return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
- struct vmw_fence_obj *fence;
- SVGAGuestPtr ptr;
-
- BUG_ON(res->id == -1);
-
- /*
- * Create a surface backup buffer object.
- */
-
- if (!srf->backup) {
- ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
- ttm_bo_type_device,
- &vmw_srf_placement, 0, 0, true,
- NULL, &srf->backup);
- if (unlikely(ret != 0))
- return ret;
- }
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
-
-
- /*
- * Encode the dma- and surface destroy commands.
- */
-
- submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, false);
- cmd += vmw_surface_dma_size(srf);
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size -= srf->backup_size;
-
- /*
- * Create a fence object and fence the DMA buffer.
- */
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
-
- /*
- * Release the surface ID.
- */
-
- vmw_resource_release_id(res);
-
- return 0;
-
-out_no_fifo:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- int ret;
- struct vmw_surface *evict_srf;
-
- do {
- write_lock(&dev_priv->resource_lock);
- list_del_init(&srf->lru_head);
- write_unlock(&dev_priv->resource_lock);
-
- ret = vmw_surface_do_validate(dev_priv, srf);
- if (likely(ret != -EBUSY))
- break;
-
- write_lock(&dev_priv->resource_lock);
- if (list_empty(&dev_priv->surface_lru)) {
- DRM_ERROR("Out of device memory for surfaces.\n");
- ret = -EBUSY;
- write_unlock(&dev_priv->resource_lock);
- break;
- }
-
- evict_srf = vmw_surface_reference
- (list_first_entry(&dev_priv->surface_lru,
- struct vmw_surface,
- lru_head));
- list_del_init(&evict_srf->lru_head);
-
- write_unlock(&dev_priv->resource_lock);
- (void) vmw_surface_evict(dev_priv, evict_srf);
-
- vmw_surface_unreference(&evict_srf);
-
- } while (1);
-
- if (unlikely(ret != 0 && srf->res.id != -1)) {
- write_lock(&dev_priv->resource_lock);
- list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
- write_unlock(&dev_priv->resource_lock);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
- struct vmw_resource *res = &srf->res;
-
- BUG_ON(res_free == NULL);
- INIT_LIST_HEAD(&srf->lru_head);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, true, res_free,
- vmw_surface_remove_from_lists);
-
- if (unlikely(ret != 0))
- res_free(res);
-
- /*
- * The surface won't be visible to hardware until a
- * surface validate.
- */
-
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
- return ret;
-}
-
-static void vmw_user_surface_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
- struct vmw_user_surface *user_srf =
- container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(user_srf);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- rwlock_t *lock = NULL;
-
- list_for_each_entry(res, list, validate_head) {
-
- if (res->res_free != &vmw_surface_res_free &&
- res->res_free != &vmw_user_surface_free)
- continue;
-
- if (unlikely(lock == NULL)) {
- lock = &res->dev_priv->resource_lock;
- write_lock(lock);
- }
-
- srf = container_of(res, struct vmw_surface, res);
- list_del_init(&srf->lru_head);
- list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
- }
-
- if (lock != NULL)
- write_unlock(lock);
-}
-
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
-{
- int ret;
-
- BUG_ON(*out_surf || *out_buf);
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
- if (!ret)
- return 0;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
- return ret;
-}
-
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_surface **out)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
struct ttm_base_object *base;
+ struct vmw_resource *res;
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(base->object_type != converter->object_type))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
- res = &srf->res;
+ res = converter->base_obj_to_res(base);
read_lock(&dev_priv->resource_lock);
-
- if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ if (!res->avail || res->res_free != converter->res_free) {
read_unlock(&dev_priv->resource_lock);
goto out_bad_resource;
}
@@ -1245,7 +310,7 @@ int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
kref_get(&res->kref);
read_unlock(&dev_priv->resource_lock);
- *out = srf;
+ *p_res = res;
ret = 0;
out_bad_resource:
@@ -1254,286 +319,32 @@ out_bad_resource:
return ret;
}
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
- struct vmw_resource *res = &user_srf->srf.res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
-}
-
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct vmw_surface *srf;
struct vmw_resource *res;
- struct vmw_resource *tmp;
- union drm_vmw_surface_create_arg *arg =
- (union drm_vmw_surface_create_arg *)data;
- struct drm_vmw_surface_create_req *req = &arg->req;
- struct drm_vmw_surface_arg *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
- int i, j;
- uint32_t cur_bo_offset;
- struct drm_vmw_size *cur_size;
- struct vmw_surface_offset *cur_offset;
- uint32_t stride_bpp;
- uint32_t bpp;
- uint32_t num_sizes;
- uint32_t size;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- num_sizes += req->mip_levels[i];
-
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
-
- size = vmw_user_surface_size + 128 +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- srf = &user_srf->srf;
- res = &srf->res;
-
- srf->flags = req->flags;
- srf->format = req->format;
- srf->scanout = req->scanout;
- srf->backup = NULL;
-
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = num_sizes;
- user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_sizes;
- }
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_offsets;
- }
-
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
- cur_bo_offset = 0;
- cur_offset = srf->offsets;
- cur_size = srf->sizes;
-
- bpp = vmw_sf_bpp[srf->format].bpp;
- stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- for (j = 0; j < srf->mip_levels[i]; ++j) {
- uint32_t stride =
- (cur_size->width * stride_bpp + 7) >> 3;
-
- cur_offset->face = i;
- cur_offset->mip = j;
- cur_offset->bo_offset = cur_bo_offset;
- cur_bo_offset += stride * cur_size->height *
- cur_size->depth * bpp / stride_bpp;
- ++cur_offset;
- ++cur_size;
- }
- }
- srf->backup_size = cur_bo_offset;
-
- if (srf->scanout &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
-
- /* allocate image area and clear it */
- srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
- }
- srf->snooper.crtc = NULL;
-
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
-
- /**
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
-
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- rep->sid = user_srf->base.hash.key;
- if (rep->sid == SVGA3D_INVALID_ID)
- DRM_ERROR("Created bad Surface ID.\n");
-
- vmw_resource_unreference(&res);
-
- ttm_read_unlock(&vmaster->lock);
- return 0;
-out_no_copy:
- kfree(srf->offsets);
-out_no_offsets:
- kfree(srf->sizes);
-out_no_sizes:
- kfree(user_srf);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_vmw_surface_reference_arg *arg =
- (union drm_vmw_surface_reference_arg *)data;
- struct drm_vmw_surface_arg *req = &arg->req;
- struct drm_vmw_surface_create_req *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct drm_vmw_size __user *user_sizes;
- struct ttm_base_object *base;
- int ret = -EINVAL;
-
- base = ttm_base_object_lookup(tfile, req->sid);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Could not find surface to reference.\n");
- return -EINVAL;
- }
-
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_resource;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
-
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a surface.\n");
- goto out_no_reference;
- }
-
- rep->flags = srf->flags;
- rep->format = srf->format;
- memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- rep->size_addr;
+ BUG_ON(*out_surf || *out_buf);
- if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
- ret = -EFAULT;
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+ user_surface_converter,
+ &res);
+ if (!ret) {
+ *out_surf = vmw_res_to_srf(res);
+ return 0;
}
-out_bad_resource:
-out_no_reference:
- ttm_base_object_unref(&base);
-
- return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id)
-{
- struct ttm_base_object *base;
- struct vmw_user_surface *user_srf;
-
- int ret = -EPERM;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
- return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_surface;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- *id = user_srf->srf.res.id;
- ret = 0;
-
-out_bad_surface:
- /**
- * FIXME: May deadlock here when called from the
- * command parsing code.
- */
-
- ttm_base_object_unref(&base);
+ *out_surf = NULL;
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
return ret;
}
@@ -1562,11 +373,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
- INIT_LIST_HEAD(&vmw_bo->validate_list);
+ INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
- 0, 0, interruptible,
+ 0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
}
@@ -1575,7 +386,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- kfree(vmw_user_bo);
+ ttm_base_object_kfree(vmw_user_bo, base);
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -1594,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(user_bo == NULL)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&user_bo->dma.base);
+ ret = ttm_base_object_init(tfile,
+ &user_bo->base,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
+ }
+
+ *p_dma_buf = &user_bo->dma;
+ *handle = user_bo->base.hash.key;
+
+out_no_base_object:
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+
+ if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+ return -EPERM;
+
+ vmw_user_bo = vmw_user_dma_buffer(bo);
+ return (vmw_user_bo->base.tfile == tfile ||
+ vmw_user_bo->base.shareable) ? 0 : -EPERM;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1602,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
+ struct vmw_dma_buffer *dma_buf;
+ uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (unlikely(vmw_user_bo == NULL))
- return -ENOMEM;
-
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0)) {
- kfree(vmw_user_bo);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &dma_buf);
if (unlikely(ret != 0))
goto out_no_dmabuf;
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
- if (unlikely(ret != 0))
- goto out_no_base_object;
- else {
- rep->handle = vmw_user_bo->base.hash.key;
- rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
- rep->cur_gmr_id = vmw_user_bo->base.hash.key;
- rep->cur_gmr_offset = 0;
- }
+ rep->handle = handle;
+ rep->map_handle = dma_buf->base.addr_space_offset;
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+
+ vmw_dmabuf_unreference(&dma_buf);
-out_no_base_object:
- ttm_bo_unref(&tmp);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- if (likely(vmw_bo->on_validate_list))
- return vmw_bo->cur_validate_node;
-
- vmw_bo->cur_validate_node = cur_validate_node;
- vmw_bo->on_validate_list = true;
-
- return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_bo->on_validate_list = false;
-}
-
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return 0;
}
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+
+ if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+ return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
/*
* Stream management
*/
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &stream->res;
int ret;
- ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, false, res_free, NULL);
+ ret = vmw_resource_init(dev_priv, res, false, res_free,
+ &vmw_stream_func);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
return 0;
}
-/**
- * User-space context management:
- */
-
static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
struct vmw_private *dev_priv = res->dev_priv;
- kfree(stream);
+ ttm_base_object_kfree(stream, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
}
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct vmw_user_stream *stream;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
int ret = 0;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+ res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource *res;
int ret;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+ *inout_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+ bool interruptible)
+{
+ unsigned long size =
+ (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ struct vmw_dma_buffer *backup;
+ int ret;
+
+ if (likely(res->backup)) {
+ BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ return 0;
+ }
+
+ backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+ if (unlikely(backup == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ res->func->backup_placement,
+ interruptible,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out_no_dmabuf;
+
+ res->backup = backup;
+
+out_no_dmabuf:
+ return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ * @val_buf: Information about a buffer possibly
+ * containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ int ret = 0;
+ const struct vmw_res_func *func = res->func;
+
+ if (unlikely(res->id == -1)) {
+ ret = func->create(res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ if (func->bind &&
+ ((func->needs_backup && list_empty(&res->mob_head) &&
+ val_buf->bo != NULL) ||
+ (!func->needs_backup && val_buf->bo != NULL))) {
+ ret = func->bind(res, val_buf);
+ if (unlikely(ret != 0))
+ goto out_bind_failed;
+ if (func->needs_backup)
+ list_add_tail(&res->mob_head, &res->backup->res_list);
+ }
+
+ /*
+ * Only do this on write operations, and move to
+ * vmw_resource_unreserve if it can be called after
+ * backup buffers have been unreserved. Otherwise
+ * sort out locking.
+ */
+ res->res_dirty = true;
+
+ return 0;
+
+out_bind_failed:
+ func->destroy(res);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res: Pointer to the struct vmw_resource to unreserve.
+ * @new_backup: Pointer to new backup buffer if command submission
+ * switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ if (!list_empty(&res->lru_head))
+ return;
+
+ if (new_backup && new_backup != res->backup) {
+
+ if (res->backup) {
+ BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ res->backup = vmw_dmabuf_reference(new_backup);
+ BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+ list_add_tail(&res->mob_head, &new_backup->res_list);
+ }
+ if (new_backup)
+ res->backup_offset = new_backup_offset;
+
+ if (!res->func->may_evict)
+ return;
+
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&res->lru_head,
+ &res->dev_priv->res_lru[res->func->res_type]);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ * for a resource and in that case, allocate
+ * one, reserve and validate it.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ * @val_buf: On successful return contains data about the
+ * reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+ bool interruptible,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+ bool backup_dirty = false;
+ int ret;
+
+ if (unlikely(res->backup == NULL)) {
+ ret = vmw_resource_buf_alloc(res, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&val_list);
+ val_buf->bo = ttm_bo_reference(&res->backup->base);
+ list_add_tail(&val_buf->head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ if (res->func->needs_backup && list_empty(&res->mob_head))
+ return 0;
+
+ backup_dirty = res->backup_dirty;
+ ret = ttm_bo_validate(&res->backup->base,
+ res->func->backup_placement,
+ true, false);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+
+ return 0;
+
+out_no_validate:
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ ttm_bo_unref(&val_buf->bo);
+ if (backup_dirty)
+ vmw_dmabuf_unreference(&res->backup);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res: The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ write_lock(&dev_priv->resource_lock);
+ list_del_init(&res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (res->func->needs_backup && res->backup == NULL &&
+ !no_backup) {
+ ret = vmw_resource_buf_alloc(res, true);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ * backup buffer
+ *.
+ * @val_buf: Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+
+ if (likely(val_buf->bo == NULL))
+ return;
+
+ INIT_LIST_HEAD(&val_list);
+ list_add_tail(&val_buf->head, &val_list);
+ ttm_eu_backoff_reservation(&val_list);
+ ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ * to a backup buffer.
+ *
+ * @res: The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+ struct ttm_validate_buffer val_buf;
+ const struct vmw_res_func *func = res->func;
+ int ret;
+
+ BUG_ON(!func->may_evict);
+
+ val_buf.bo = NULL;
+ ret = vmw_resource_check_buffer(res, true, &val_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(func->unbind != NULL &&
+ (!func->needs_backup || !list_empty(&res->mob_head)))) {
+ ret = func->unbind(res, res->res_dirty, &val_buf);
+ if (unlikely(ret != 0))
+ goto out_no_unbind;
+ list_del_init(&res->mob_head);
+ }
+ ret = func->destroy(res);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+out_no_unbind:
+ vmw_resource_backoff_reservation(&val_buf);
+
+ return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+ int ret;
+ struct vmw_resource *evict_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+ struct ttm_validate_buffer val_buf;
+
+ if (likely(!res->func->may_evict))
+ return 0;
+
+ val_buf.bo = NULL;
+ if (res->backup)
+ val_buf.bo = &res->backup->base;
+ do {
+ ret = vmw_resource_do_validate(res, &val_buf);
+ if (likely(ret != -EBUSY))
+ break;
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(lru_list) || !res->func->may_evict) {
+ DRM_ERROR("Out of device device id entries "
+ "for %s.\n", res->func->type_name);
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+ break;
+ }
+
+ evict_res = vmw_resource_reference
+ (list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+ else if (!res->func->needs_backup && res->backup) {
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ return 0;
+
+out_no_validate:
+ return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct vmw_fence_obj *old_fence_obj;
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL)
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ else
+ driver->sync_obj_ref(fence);
+
+ spin_lock(&bdev->fence_lock);
+
+ old_fence_obj = bo->sync_obj;
+ bo->sync_obj = fence;
+
+ spin_unlock(&bdev->fence_lock);
+
+ if (old_fence_obj)
+ vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res: The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+ return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @type: The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+ enum vmw_res_type type)
+{
+ struct list_head *lru_list = &dev_priv->res_lru[type];
+ struct vmw_resource *evict_res;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+
+ if (list_empty(lru_list))
+ goto out_unlock;
+
+ evict_res = vmw_resource_reference(
+ list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+out_unlock:
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv: Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+ enum vmw_res_type type;
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ for (type = 0; type < vmw_res_max; ++type)
+ vmw_resource_evict_type(dev_priv, type);
+
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 00000000000..f3adeed2854
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+ enum ttm_object_type object_type;
+ struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+ void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type: Enum that identifies the lru list to use for eviction.
+ * @needs_backup: Whether the resource is guest-backed and needs
+ * persistent buffer storage.
+ * @type_name: String that identifies the resource type.
+ * @backup_placement: TTM placement for backup buffers.
+ * @may_evict Whether the resource may be evicted.
+ * @create: Create a hardware resource.
+ * @destroy: Destroy a hardware resource.
+ * @bind: Bind a hardware resource to persistent buffer storage.
+ * @unbind: Unbind a hardware resource from persistent
+ * buffer storage.
+ */
+
+struct vmw_res_func {
+ enum vmw_res_type res_type;
+ bool needs_backup;
+ const char *type_name;
+ struct ttm_placement *backup_placement;
+ bool may_evict;
+
+ int (*create) (struct vmw_resource *res);
+ int (*destroy) (struct vmw_resource *res);
+ int (*bind) (struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+ int (*unbind) (struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *));
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 6deaf2f8bab..26387c3d5a2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
@@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
return -EINVAL;
}
- if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+ if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
DRM_INFO("Not using screen objects,"
" missing cap SCREEN_OBJECT_2\n");
return -ENOSYS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 00000000000..58281433974
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base: The TTM base object handling user-space visibility.
+ * @srf: The surface metadata.
+ * @size: TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+ struct ttm_base_object base;
+ struct vmw_surface srf;
+ uint32_t size;
+ uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face: Surface face.
+ * @mip: Mip level.
+ * @bo_offset: Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+ uint32_t face;
+ uint32_t mip;
+ uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+ .object_type = VMW_RES_SURFACE,
+ .base_obj_to_res = vmw_user_surface_base_to_res,
+ .res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+ &user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+ .res_type = vmw_res_surface,
+ .needs_backup = false,
+ .may_evict = true,
+ .type_name = "legacy surfaces",
+ .backup_placement = &vmw_srf_placement,
+ .create = &vmw_legacy_srf_create,
+ .destroy = &vmw_legacy_srf_destroy,
+ .bind = &vmw_legacy_srf_bind,
+ .unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+ SVGA3dCopyBox cb;
+ SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+ return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+ return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+ return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+ void *cmd_space)
+{
+ struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+ cmd_space;
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+ void *cmd_space)
+{
+ struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+ cmd_space;
+ struct drm_vmw_size *src_size;
+ SVGA3dSize *cmd_size;
+ uint32_t cmd_len;
+ int i;
+
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = src_size->width;
+ cmd_size->height = src_size->height;
+ cmd_size->depth = src_size->depth;
+ }
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+ void *cmd_space,
+ const SVGAGuestPtr *ptr,
+ bool to_surface)
+{
+ uint32_t i;
+ struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+ const struct svga3d_surface_desc *desc =
+ svga3dsurface_get_desc(srf->format);
+
+ for (i = 0; i < srf->num_sizes; ++i) {
+ SVGA3dCmdHeader *header = &cmd->header;
+ SVGA3dCmdSurfaceDMA *body = &cmd->body;
+ SVGA3dCopyBox *cb = &cmd->cb;
+ SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+ const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+ const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+ header->id = SVGA_3D_CMD_SURFACE_DMA;
+ header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+ body->guest.ptr = *ptr;
+ body->guest.ptr.offset += cur_offset->bo_offset;
+ body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+ cur_size);
+ body->host.sid = srf->res.id;
+ body->host.face = cur_offset->face;
+ body->host.mipmap = cur_offset->mip;
+ body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM);
+ cb->x = 0;
+ cb->y = 0;
+ cb->z = 0;
+ cb->srcx = 0;
+ cb->srcy = 0;
+ cb->srcz = 0;
+ cb->w = cur_size->width;
+ cb->h = cur_size->height;
+ cb->d = cur_size->depth;
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset =
+ svga3dsurface_get_image_buffer_size(desc, cur_size,
+ body->guest.pitch);
+ suffix->flags.discard = 0;
+ suffix->flags.unsynchronized = 0;
+ suffix->flags.reserved = 0;
+ ++cmd;
+ }
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res: Pointer to a struct vmw_resource embedded in a struct
+ * vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ void *cmd;
+
+ if (res->id != -1) {
+
+ cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+ /*
+ * used_memory_size_atomic, or separate lock
+ * to avoid taking dev_priv::cmdbuf_mutex in
+ * the destroy path.
+ */
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ srf = vmw_res_to_srf(res);
+ dev_priv->used_memory_size -= res->backup_size;
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ }
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ srf = vmw_res_to_srf(res);
+ if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+ dev_priv->memory_size))
+ return -EBUSY;
+
+ /*
+ * Alloc id for the resource.
+ */
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ /*
+ * Encode surface define- commands.
+ */
+
+ submit_size = vmw_surface_define_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ vmw_surface_define_encode(srf, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size += res->backup_size;
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ * @bind: Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf,
+ bool bind)
+{
+ SVGAGuestPtr ptr;
+ struct vmw_fence_obj *fence;
+ uint32_t submit_size;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ uint8_t *cmd;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ BUG_ON(val_buf->bo == NULL);
+
+ submit_size = vmw_surface_dma_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "DMA.\n");
+ return -ENOMEM;
+ }
+ vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ * surface validation process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (!res->backup_dirty)
+ return 0;
+
+ return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ * surface eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (unlikely(readback))
+ return vmw_legacy_srf_dma(res, val_buf, false);
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ * resource eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+ BUG_ON(res->id == -1);
+
+ /*
+ * Encode the dma- and surface destroy commands.
+ */
+
+ submit_size = vmw_surface_destroy_size();
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "eviction.\n");
+ return -ENOMEM;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size -= res->backup_size;
+
+ /*
+ * Release the surface ID.
+ */
+
+ vmw_resource_release_id(res);
+
+ return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to the struct vmw_surface to initialize.
+ * @res_free: Pointer to a resource destructor used to free
+ * the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_resource *res = &srf->res;
+
+ BUG_ON(res_free == NULL);
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ ret = vmw_resource_init(dev_priv, res, true, res_free,
+ &vmw_legacy_surface_func);
+
+ if (unlikely(ret != 0)) {
+ vmw_3d_resource_dec(dev_priv, false);
+ res_free(res);
+ return ret;
+ }
+
+ /*
+ * The surface won't be visible to hardware until a
+ * surface validate.
+ */
+
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ * user visible surfaces
+ *
+ * @base: Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res: A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_user_surface *user_srf =
+ container_of(srf, struct vmw_user_surface, srf);
+ struct vmw_private *dev_priv = srf->res.dev_priv;
+ uint32_t size = user_srf->size;
+
+ kfree(srf->offsets);
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ ttm_base_object_kfree(user_srf, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base: Pointer to a pointer to a TTM base object
+ * embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_surface *user_srf =
+ container_of(base, struct vmw_user_surface, base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ * the user surface destroy functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_surface_create_arg *arg =
+ (union drm_vmw_surface_create_arg *)data;
+ struct drm_vmw_surface_create_req *req = &arg->req;
+ struct drm_vmw_surface_arg *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct drm_vmw_size __user *user_sizes;
+ int ret;
+ int i, j;
+ uint32_t cur_bo_offset;
+ struct drm_vmw_size *cur_size;
+ struct vmw_surface_offset *cur_offset;
+ uint32_t num_sizes;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ const struct svga3d_surface_desc *desc;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ num_sizes += req->mip_levels[i];
+
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ size = vmw_user_surface_size + 128 +
+ ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+ ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+ desc = svga3dsurface_get_desc(req->format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format for surface creation.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->flags;
+ srf->format = req->format;
+ srf->scanout = req->scanout;
+
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = num_sizes;
+ user_srf->size = size;
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_sizes;
+ }
+ srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_offsets;
+ }
+
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr;
+
+ ret = copy_from_user(srf->sizes, user_sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ srf->base_size = *srf->sizes;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->multisample_count = 1;
+
+ cur_bo_offset = 0;
+ cur_offset = srf->offsets;
+ cur_size = srf->sizes;
+
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ for (j = 0; j < srf->mip_levels[i]; ++j) {
+ uint32_t stride = svga3dsurface_calculate_pitch
+ (desc, cur_size);
+
+ cur_offset->face = i;
+ cur_offset->mip = j;
+ cur_offset->bo_offset = cur_bo_offset;
+ cur_bo_offset += svga3dsurface_get_image_buffer_size
+ (desc, cur_size, stride);
+ ++cur_offset;
+ ++cur_size;
+ }
+ }
+ res->backup_size = cur_bo_offset;
+ if (srf->scanout &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ ret = -ENOMEM;
+ goto out_no_copy;
+ }
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
+ user_srf->base.shareable = false;
+ user_srf->base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_base_object_init(tfile, &user_srf->base,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->sid = user_srf->base.hash.key;
+ vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_no_copy:
+ kfree(srf->offsets);
+out_no_offsets:
+ kfree(srf->sizes);
+out_no_sizes:
+ ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_surface_reference_arg *arg =
+ (union drm_vmw_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_surface_create_req *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct drm_vmw_size __user *user_sizes;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+
+ ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_no_reference;
+ }
+
+ rep->flags = srf->flags;
+ rep->format = srf->format;
+ memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ rep->size_addr;
+
+ if (user_sizes)
+ ret = copy_to_user(user_sizes, srf->sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("copy_to_user failed %p %u\n",
+ user_sizes, srf->num_sizes);
+ ret = -EFAULT;
+ }
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index f34838839b0..29437eabe09 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,7 +1,7 @@
config VGA_ARB
bool "VGA Arbitration" if EXPERT
default y
- depends on PCI
+ depends on (PCI && !S390)
help
Some "legacy" VGA devices implemented on PCI typically have the same
hard-decoded addresses as they did on ISA. When multiple PCI devices
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e25cf31faab..fa60add0ff6 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/dmi.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
@@ -376,7 +375,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
- const char *pdev_name;
int ret;
bool delay = false, can_switch;
bool just_mux = false;
@@ -468,7 +466,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
goto out;
if (can_switch) {
- pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage1(client);
if (ret)
printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
@@ -540,7 +537,6 @@ fail:
int vga_switcheroo_process_delayed_switch(void)
{
struct vga_switcheroo_client *client;
- const char *pdev_name;
int ret;
int err = -EINVAL;
@@ -555,7 +551,6 @@ int vga_switcheroo_process_delayed_switch(void)
if (!client || !check_can_switch())
goto err;
- pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage2(client);
if (ret)
printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
@@ -567,4 +562,3 @@ err:
return err;
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
-
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1630150ad2b..e7d6a13ec6a 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -265,6 +265,15 @@ config HID_GYRATION
---help---
Support for Gyration remote control.
+config HID_ICADE
+ tristate "ION iCade arcade controller"
+ depends on BT_HIDP
+ ---help---
+ Support for the ION iCade arcade controller to work as a joystick.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-icade.
+
config HID_TWINHAN
tristate "Twinhan IR remote control"
depends on USB_HID
@@ -728,4 +737,6 @@ endif # HID
source "drivers/hid/usbhid/Kconfig"
+source "drivers/hid/i2c-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index cef68ca859d..b62215716b2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
+obj-$(CONFIG_HID_ICADE) += hid-icade.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
@@ -93,8 +94,8 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_PS3REMOTE) += hid-ps3remote.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
- hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o \
- hid-roccat-savu.o
+ hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-lua.o \
+ hid-roccat-pyra.o hid-roccat-savu.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
@@ -118,3 +119,4 @@ obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
obj-$(CONFIG_USB_KBD) += usbhid/
+obj-$(CONFIG_I2C_HID) += i2c-hid/
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index fd7722aecf7..d0f7662aacc 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -439,7 +439,8 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO),
- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_RDESC_JIS },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f4109fd657f..eb2ee11b641 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -92,6 +92,7 @@ EXPORT_SYMBOL_GPL(hid_register_report);
static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
{
struct hid_field *field;
+ int i;
if (report->maxfield == HID_MAX_FIELDS) {
hid_err(report->device, "too many fields in report\n");
@@ -110,6 +111,9 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field->value = (s32 *)(field->usage + usages);
field->report = report;
+ for (i = 0; i < usages; i++)
+ field->usage[i].usage_index = i;
+
return field;
}
@@ -315,6 +319,7 @@ static s32 item_sdata(struct hid_item *item)
static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
{
+ __u32 raw_value;
switch (item->tag) {
case HID_GLOBAL_ITEM_TAG_PUSH:
@@ -365,7 +370,14 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
return 0;
case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
- parser->global.unit_exponent = item_sdata(item);
+ /* Units exponent negative numbers are given through a
+ * two's complement.
+ * See "6.2.2.7 Global Items" for more information. */
+ raw_value = item_udata(item);
+ if (!(raw_value & 0xfffffff0))
+ parser->global.unit_exponent = hid_snto32(raw_value, 4);
+ else
+ parser->global.unit_exponent = raw_value;
return 0;
case HID_GLOBAL_ITEM_TAG_UNIT:
@@ -713,7 +725,12 @@ static int hid_scan_report(struct hid_device *hid)
hid_scan_usage(hid, u);
break;
}
- }
+ } else if (page == HID_UP_SENSOR &&
+ item.type == HID_ITEM_TYPE_MAIN &&
+ item.tag == HID_MAIN_ITEM_TAG_BEGIN_COLLECTION &&
+ (item_udata(&item) & 0xff) == HID_COLLECTION_PHYSICAL &&
+ hid->bus == BUS_USB)
+ hid->group = HID_GROUP_SENSOR_HUB;
}
return 0;
@@ -865,6 +882,12 @@ static s32 snto32(__u32 value, unsigned n)
return value & (1 << (n - 1)) ? value | (-1 << n) : value;
}
+s32 hid_snto32(__u32 value, unsigned n)
+{
+ return snto32(value, n);
+}
+EXPORT_SYMBOL_GPL(hid_snto32);
+
/*
* Convert a signed 32-bit integer to a signed n-bit integer.
*/
@@ -1465,6 +1488,10 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
* there is a proper autodetection and autoloading in place (based on presence
* of HID_DG_CONTACTID), so those devices don't need to be added to this list,
* as we are doing the right thing in hid_scan_usage().
+ *
+ * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
+ * physical is found inside a usage page of type sensor, hid-sensor-hub will be
+ * used as a driver. See hid_scan_report().
*/
static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
@@ -1538,6 +1565,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
@@ -1571,10 +1599,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086, USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086, USB_DEVICE_ID_SENSOR_HUB_09FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087, USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087, USB_DEVICE_ID_SENSOR_HUB_09FA) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
@@ -1658,6 +1683,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
@@ -1672,7 +1698,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_SENSOR_HUB_7014) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2150,8 +2175,13 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ }
};
-static bool hid_ignore(struct hid_device *hdev)
+bool hid_ignore(struct hid_device *hdev)
{
+ if (hdev->quirks & HID_QUIRK_NO_IGNORE)
+ return false;
+ if (hdev->quirks & HID_QUIRK_IGNORE)
+ return true;
+
switch (hdev->vendor) {
case USB_VENDOR_ID_CODEMERCS:
/* ignore all Code Mercenaries IOWarrior devices */
@@ -2188,7 +2218,16 @@ static bool hid_ignore(struct hid_device *hdev)
if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
hdev->type == HID_TYPE_USBNONE)
return true;
- break;
+ break;
+ case USB_VENDOR_ID_DWAV:
+ /* These are handled by usbtouchscreen. hdev->type is probably
+ * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match
+ * usbtouchscreen. */
+ if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER ||
+ hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) &&
+ hdev->type != HID_TYPE_USBMOUSE)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
@@ -2197,6 +2236,7 @@ static bool hid_ignore(struct hid_device *hdev)
return !!hid_match_id(hdev, hid_ignore_list);
}
+EXPORT_SYMBOL_GPL(hid_ignore);
int hid_add_device(struct hid_device *hdev)
{
@@ -2208,8 +2248,7 @@ int hid_add_device(struct hid_device *hdev)
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
- if (!(hdev->quirks & HID_QUIRK_NO_IGNORE)
- && (hid_ignore(hdev) || (hdev->quirks & HID_QUIRK_IGNORE)))
+ if (hid_ignore(hdev))
return -ENODEV;
/*
diff --git a/drivers/hid/hid-icade.c b/drivers/hid/hid-icade.c
new file mode 100644
index 00000000000..1d6565e37ba
--- /dev/null
+++ b/drivers/hid/hid-icade.c
@@ -0,0 +1,259 @@
+/*
+ * ION iCade input driver
+ *
+ * Copyright (c) 2012 Bastien Nocera <hadess@hadess.net>
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+/*
+ * ↑ A C Y L
+ * ← →
+ * ↓ B X Z R
+ *
+ *
+ * UP ON,OFF = w,e
+ * RT ON,OFF = d,c
+ * DN ON,OFF = x,z
+ * LT ON,OFF = a,q
+ * A ON,OFF = y,t
+ * B ON,OFF = h,r
+ * C ON,OFF = u,f
+ * X ON,OFF = j,n
+ * Y ON,OFF = i,m
+ * Z ON,OFF = k,p
+ * L ON,OFF = o,g
+ * R ON,OFF = l,v
+ */
+
+/* The translation code uses HID usage instead of input layer
+ * keys. This code generates a lookup table that makes
+ * translation quick.
+ *
+ * #include <linux/input.h>
+ * #include <stdio.h>
+ * #include <assert.h>
+ *
+ * #define unk KEY_UNKNOWN
+ *
+ * < copy of hid_keyboard[] from hid-input.c >
+ *
+ * struct icade_key_translation {
+ * int from;
+ * const char *to;
+ * int press;
+ * };
+ *
+ * static const struct icade_key_translation icade_keys[] = {
+ * { KEY_W, "KEY_UP", 1 },
+ * { KEY_E, "KEY_UP", 0 },
+ * { KEY_D, "KEY_RIGHT", 1 },
+ * { KEY_C, "KEY_RIGHT", 0 },
+ * { KEY_X, "KEY_DOWN", 1 },
+ * { KEY_Z, "KEY_DOWN", 0 },
+ * { KEY_A, "KEY_LEFT", 1 },
+ * { KEY_Q, "KEY_LEFT", 0 },
+ * { KEY_Y, "BTN_A", 1 },
+ * { KEY_T, "BTN_A", 0 },
+ * { KEY_H, "BTN_B", 1 },
+ * { KEY_R, "BTN_B", 0 },
+ * { KEY_U, "BTN_C", 1 },
+ * { KEY_F, "BTN_C", 0 },
+ * { KEY_J, "BTN_X", 1 },
+ * { KEY_N, "BTN_X", 0 },
+ * { KEY_I, "BTN_Y", 1 },
+ * { KEY_M, "BTN_Y", 0 },
+ * { KEY_K, "BTN_Z", 1 },
+ * { KEY_P, "BTN_Z", 0 },
+ * { KEY_O, "BTN_THUMBL", 1 },
+ * { KEY_G, "BTN_THUMBL", 0 },
+ * { KEY_L, "BTN_THUMBR", 1 },
+ * { KEY_V, "BTN_THUMBR", 0 },
+ *
+ * { }
+ * };
+ *
+ * static int
+ * usage_for_key (int key)
+ * {
+ * int i;
+ * for (i = 0; i < 256; i++) {
+ * if (hid_keyboard[i] == key)
+ * return i;
+ * }
+ * assert(0);
+ * }
+ *
+ * int main (int argc, char **argv)
+ * {
+ * const struct icade_key_translation *trans;
+ * int max_usage = 0;
+ *
+ * for (trans = icade_keys; trans->from; trans++) {
+ * int usage = usage_for_key (trans->from);
+ * max_usage = usage > max_usage ? usage : max_usage;
+ * }
+ *
+ * printf ("#define ICADE_MAX_USAGE %d\n\n", max_usage);
+ * printf ("struct icade_key {\n");
+ * printf ("\tu16 to;\n");
+ * printf ("\tu8 press:1;\n");
+ * printf ("};\n\n");
+ * printf ("static const struct icade_key "
+ * "icade_usage_table[%d] = {\n", max_usage + 1);
+ * for (trans = icade_keys; trans->from; trans++) {
+ * printf ("\t[%d] = { %s, %d },\n",
+ * usage_for_key (trans->from), trans->to, trans->press);
+ * }
+ * printf ("};\n");
+ *
+ * return 0;
+ * }
+ */
+
+#define ICADE_MAX_USAGE 29
+
+struct icade_key {
+ u16 to;
+ u8 press:1;
+};
+
+static const struct icade_key icade_usage_table[30] = {
+ [26] = { KEY_UP, 1 },
+ [8] = { KEY_UP, 0 },
+ [7] = { KEY_RIGHT, 1 },
+ [6] = { KEY_RIGHT, 0 },
+ [27] = { KEY_DOWN, 1 },
+ [29] = { KEY_DOWN, 0 },
+ [4] = { KEY_LEFT, 1 },
+ [20] = { KEY_LEFT, 0 },
+ [28] = { BTN_A, 1 },
+ [23] = { BTN_A, 0 },
+ [11] = { BTN_B, 1 },
+ [21] = { BTN_B, 0 },
+ [24] = { BTN_C, 1 },
+ [9] = { BTN_C, 0 },
+ [13] = { BTN_X, 1 },
+ [17] = { BTN_X, 0 },
+ [12] = { BTN_Y, 1 },
+ [16] = { BTN_Y, 0 },
+ [14] = { BTN_Z, 1 },
+ [19] = { BTN_Z, 0 },
+ [18] = { BTN_THUMBL, 1 },
+ [10] = { BTN_THUMBL, 0 },
+ [15] = { BTN_THUMBR, 1 },
+ [25] = { BTN_THUMBR, 0 },
+};
+
+static const struct icade_key *icade_find_translation(u16 from)
+{
+ if (from < 0 || from > ICADE_MAX_USAGE)
+ return NULL;
+ return &icade_usage_table[from];
+}
+
+static int icade_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ const struct icade_key *trans;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
+ !usage->type)
+ return 0;
+
+ /* We ignore the fake key up, and act only on key down */
+ if (!value)
+ return 1;
+
+ trans = icade_find_translation(usage->hid & HID_USAGE);
+
+ if (!trans)
+ return 1;
+
+ input_event(field->hidinput->input, usage->type,
+ trans->to, trans->press);
+
+ return 1;
+}
+
+static int icade_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ const struct icade_key *trans;
+
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_KEYBOARD) {
+ trans = icade_find_translation(usage->hid & HID_USAGE);
+
+ if (!trans)
+ return -1;
+
+ hid_map_usage(hi, usage, bit, max, EV_KEY, trans->to);
+ set_bit(trans->to, hi->input->keybit);
+
+ return 1;
+ }
+
+ /* ignore others */
+ return -1;
+
+}
+
+static int icade_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY)
+ set_bit(usage->type, hi->input->evbit);
+
+ return -1;
+}
+
+static const struct hid_device_id icade_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+
+ { }
+};
+MODULE_DEVICE_TABLE(hid, icade_devices);
+
+static struct hid_driver icade_driver = {
+ .name = "icade",
+ .id_table = icade_devices,
+ .event = icade_event,
+ .input_mapped = icade_input_mapped,
+ .input_mapping = icade_input_mapping,
+};
+
+static int __init icade_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&icade_driver);
+ if (ret)
+ pr_err("can't register icade driver\n");
+
+ return ret;
+}
+
+static void __exit icade_exit(void)
+{
+ hid_unregister_driver(&icade_driver);
+}
+
+module_init(icade_init);
+module_exit(icade_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("ION iCade input driver");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9d7a42857ea..4dfa605e2d1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -257,6 +257,7 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
+#define USB_DEVICE_ID_DWAV_TOUCHCONTROLLER 0x0002
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207 0x7207
@@ -423,6 +424,9 @@
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
+#define USB_VENDOR_ID_ION 0x15e4
+#define USB_DEVICE_ID_ICADE 0x0132
+
#define USB_VENDOR_ID_HOLTEK 0x1241
#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
@@ -432,11 +436,6 @@
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
-#define USB_VENDOR_ID_INTEL_8086 0x8086
-#define USB_VENDOR_ID_INTEL_8087 0x8087
-#define USB_DEVICE_ID_SENSOR_HUB_1020 0x1020
-#define USB_DEVICE_ID_SENSOR_HUB_09FA 0x09FA
-
#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
@@ -603,6 +602,7 @@
#define USB_VENDOR_ID_NOVATEK 0x0603
#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
+#define USB_DEVICE_ID_NOVATEK_MOUSE 0x1602
#define USB_VENDOR_ID_NTRIG 0x1b96
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
@@ -677,7 +677,9 @@
#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
+#define USB_DEVICE_ID_ROCCAT_KONEXTD 0x2e22
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
+#define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
@@ -696,6 +698,9 @@
#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
+#define USB_VENDOR_ID_SIGMATEL 0x066F
+#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
@@ -714,7 +719,6 @@
#define USB_VENDOR_ID_STANTUM_STM 0x0483
#define USB_DEVICE_ID_MTP_STM 0x3261
-#define USB_DEVICE_ID_SENSOR_HUB_7014 0x7014
#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
@@ -762,6 +766,9 @@
#define USB_VENDOR_ID_TOUCHPACK 0x1bfd
#define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688
+#define USB_VENDOR_ID_TPV 0x25aa
+#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883
+
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d917c0d5368..21b196c394b 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -192,7 +192,6 @@ static int hidinput_setkeycode(struct input_dev *dev,
return -EINVAL;
}
-
/**
* hidinput_calc_abs_res - calculate an absolute axis resolution
* @field: the HID report field to calculate resolution for
@@ -208,7 +207,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
* Only exponent 1 length units are processed. Centimeters and inches are
* converted to millimeters. Degrees are converted to radians.
*/
-static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
+__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
{
__s32 unit_exponent = field->unit_exponent;
__s32 logical_extents = field->logical_maximum -
@@ -229,17 +228,29 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_X:
case ABS_Y:
case ABS_Z:
- if (field->unit == 0x11) { /* If centimeters */
+ case ABS_MT_POSITION_X:
+ case ABS_MT_POSITION_Y:
+ case ABS_MT_TOOL_X:
+ case ABS_MT_TOOL_Y:
+ case ABS_MT_TOUCH_MAJOR:
+ case ABS_MT_TOUCH_MINOR:
+ if (field->unit & 0xffffff00) /* Not a length */
+ return 0;
+ unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
+ switch (field->unit & 0xf) {
+ case 0x1: /* If centimeters */
/* Convert to millimeters */
unit_exponent += 1;
- } else if (field->unit == 0x13) { /* If inches */
+ break;
+ case 0x3: /* If inches */
/* Convert to millimeters */
prev = physical_extents;
physical_extents *= 254;
if (physical_extents < prev)
return 0;
unit_exponent -= 1;
- } else {
+ break;
+ default:
return 0;
}
break;
@@ -281,8 +292,9 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
}
/* Calculate resolution */
- return logical_extents / physical_extents;
+ return DIV_ROUND_CLOSEST(logical_extents, physical_extents);
}
+EXPORT_SYMBOL_GPL(hidinput_calc_abs_res);
#ifdef CONFIG_HID_BATTERY_STRENGTH
static enum power_supply_property hidinput_battery_props[] = {
@@ -299,6 +311,9 @@ static enum power_supply_property hidinput_battery_props[] = {
static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
@@ -502,9 +517,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (code <= 0xf)
code += BTN_JOYSTICK;
else
- code += BTN_TRIGGER_HAPPY;
+ code += BTN_TRIGGER_HAPPY - 0x10;
+ break;
+ case HID_GD_GAMEPAD:
+ if (code <= 0xf)
+ code += BTN_GAMEPAD;
+ else
+ code += BTN_TRIGGER_HAPPY - 0x10;
break;
- case HID_GD_GAMEPAD: code += BTN_GAMEPAD; break;
default:
switch (field->physical) {
case HID_GD_MOUSE:
@@ -1146,6 +1166,38 @@ static void report_features(struct hid_device *hid)
}
}
+static struct hid_input *hidinput_allocate(struct hid_device *hid)
+{
+ struct hid_input *hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
+ struct input_dev *input_dev = input_allocate_device();
+ if (!hidinput || !input_dev) {
+ kfree(hidinput);
+ input_free_device(input_dev);
+ hid_err(hid, "Out of memory during hid input probe\n");
+ return NULL;
+ }
+
+ input_set_drvdata(input_dev, hid);
+ input_dev->event = hid->ll_driver->hidinput_input_event;
+ input_dev->open = hidinput_open;
+ input_dev->close = hidinput_close;
+ input_dev->setkeycode = hidinput_setkeycode;
+ input_dev->getkeycode = hidinput_getkeycode;
+
+ input_dev->name = hid->name;
+ input_dev->phys = hid->phys;
+ input_dev->uniq = hid->uniq;
+ input_dev->id.bustype = hid->bus;
+ input_dev->id.vendor = hid->vendor;
+ input_dev->id.product = hid->product;
+ input_dev->id.version = hid->version;
+ input_dev->dev.parent = hid->dev.parent;
+ hidinput->input = input_dev;
+ list_add_tail(&hidinput->list, &hid->inputs);
+
+ return hidinput;
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -1157,7 +1209,6 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
struct hid_driver *drv = hid->driver;
struct hid_report *report;
struct hid_input *hidinput = NULL;
- struct input_dev *input_dev;
int i, j, k;
INIT_LIST_HEAD(&hid->inputs);
@@ -1188,33 +1239,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
continue;
if (!hidinput) {
- hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!hidinput || !input_dev) {
- kfree(hidinput);
- input_free_device(input_dev);
- hid_err(hid, "Out of memory during hid input probe\n");
+ hidinput = hidinput_allocate(hid);
+ if (!hidinput)
goto out_unwind;
- }
-
- input_set_drvdata(input_dev, hid);
- input_dev->event =
- hid->ll_driver->hidinput_input_event;
- input_dev->open = hidinput_open;
- input_dev->close = hidinput_close;
- input_dev->setkeycode = hidinput_setkeycode;
- input_dev->getkeycode = hidinput_getkeycode;
-
- input_dev->name = hid->name;
- input_dev->phys = hid->phys;
- input_dev->uniq = hid->uniq;
- input_dev->id.bustype = hid->bus;
- input_dev->id.vendor = hid->vendor;
- input_dev->id.product = hid->product;
- input_dev->id.version = hid->version;
- input_dev->dev.parent = hid->dev.parent;
- hidinput->input = input_dev;
- list_add_tail(&hidinput->list, &hid->inputs);
}
for (i = 0; i < report->maxfield; i++)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7867d69f0ef..61543c02ea0 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -52,11 +52,14 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
#define MT_QUIRK_NO_AREA (1 << 9)
+#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
+#define MT_QUIRK_HOVERING (1 << 11)
struct mt_slot {
- __s32 x, y, p, w, h;
+ __s32 x, y, cx, cy, p, w, h;
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
+ bool inrange_state; /* is the finger in proximity of the sensor? */
};
struct mt_class {
@@ -121,6 +124,7 @@ struct mt_device {
#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109
#define MT_DEFAULT_MAXCONTACT 10
+#define MT_MAX_MAXCONTACT 250
#define MT_USB_DEVICE(v, p) HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p)
#define MT_BT_DEVICE(v, p) HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p)
@@ -282,11 +286,26 @@ static void mt_feature_mapping(struct hid_device *hdev,
case HID_DG_CONTACTMAX:
td->maxcontact_report_id = field->report->id;
td->maxcontacts = field->value[0];
+ if (!td->maxcontacts &&
+ field->logical_maximum <= MT_MAX_MAXCONTACT)
+ td->maxcontacts = field->logical_maximum;
if (td->mtclass.maxcontacts)
/* check if the maxcontacts is given by the class */
td->maxcontacts = td->mtclass.maxcontacts;
break;
+ case 0xff0000c5:
+ if (field->report_count == 256 && field->report_size == 8) {
+ /* Win 8 devices need special quirks */
+ __s32 *quirks = &td->mtclass.quirks;
+ *quirks |= MT_QUIRK_ALWAYS_VALID;
+ *quirks |= MT_QUIRK_IGNORE_DUPLICATES;
+ *quirks |= MT_QUIRK_HOVERING;
+ *quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
+ *quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
+ *quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
+ }
+ break;
}
}
@@ -297,6 +316,7 @@ static void set_abs(struct input_dev *input, unsigned int code,
int fmax = field->logical_maximum;
int fuzz = snratio ? (fmax - fmin) / snratio : 0;
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+ input_abs_set_res(input, code, hidinput_calc_abs_res(field, code));
}
static void mt_store_field(struct hid_usage *usage, struct mt_device *td,
@@ -317,6 +337,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
int code;
+ struct hid_usage *prev_usage = NULL;
/* Only map fields from TouchScreen or TouchPad collections.
* We need to ignore fields that belong to other collections
@@ -339,23 +360,42 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (field->physical == HID_DG_STYLUS)
return -1;
+ if (usage->usage_index)
+ prev_usage = &field->usage[usage->usage_index - 1];
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
- hid_map_usage(hi, usage, bit, max,
+ if (prev_usage && (prev_usage->hid == usage->hid)) {
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOOL_X);
+ set_abs(hi->input, ABS_MT_TOOL_X, field,
+ cls->sn_move);
+ } else {
+ hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_X);
- set_abs(hi->input, ABS_MT_POSITION_X, field,
- cls->sn_move);
+ set_abs(hi->input, ABS_MT_POSITION_X, field,
+ cls->sn_move);
+ }
+
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
return 1;
case HID_GD_Y:
- hid_map_usage(hi, usage, bit, max,
+ if (prev_usage && (prev_usage->hid == usage->hid)) {
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOOL_Y);
+ set_abs(hi->input, ABS_MT_TOOL_Y, field,
+ cls->sn_move);
+ } else {
+ hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_Y);
- set_abs(hi->input, ABS_MT_POSITION_Y, field,
- cls->sn_move);
+ set_abs(hi->input, ABS_MT_POSITION_Y, field,
+ cls->sn_move);
+ }
+
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
return 1;
@@ -365,6 +405,12 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_DIGITIZER:
switch (usage->hid) {
case HID_DG_INRANGE:
+ if (cls->quirks & MT_QUIRK_HOVERING) {
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_DISTANCE);
+ input_set_abs_params(hi->input,
+ ABS_MT_DISTANCE, 0, 1, 0, 0);
+ }
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
return 1;
@@ -477,18 +523,26 @@ static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
{
- if (td->curvalid) {
+ if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
int slotnum = mt_compute_slot(td, input);
struct mt_slot *s = &td->curdata;
+ struct input_mt *mt = input->mt;
if (slotnum < 0 || slotnum >= td->maxcontacts)
return;
+ if ((td->mtclass.quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) {
+ struct input_mt_slot *slot = &mt->slots[slotnum];
+ if (input_mt_is_active(slot) &&
+ input_mt_is_used(mt, slot))
+ return;
+ }
+
input_mt_slot(input, slotnum);
input_mt_report_slot_state(input, MT_TOOL_FINGER,
- s->touch_state);
- if (s->touch_state) {
- /* this finger is on the screen */
+ s->touch_state || s->inrange_state);
+ if (s->touch_state || s->inrange_state) {
+ /* this finger is in proximity of the sensor */
int wide = (s->w > s->h);
/* divided by two to match visual scale of touch */
int major = max(s->w, s->h) >> 1;
@@ -496,6 +550,10 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
+ input_event(input, EV_ABS, ABS_MT_TOOL_X, s->cx);
+ input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
+ input_event(input, EV_ABS, ABS_MT_DISTANCE,
+ !s->touch_state);
input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
@@ -526,10 +584,10 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
if (hid->claimed & HID_CLAIMED_INPUT) {
switch (usage->hid) {
case HID_DG_INRANGE:
- if (quirks & MT_QUIRK_ALWAYS_VALID)
- td->curvalid = true;
- else if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ if (quirks & MT_QUIRK_VALID_IS_INRANGE)
td->curvalid = value;
+ if (quirks & MT_QUIRK_HOVERING)
+ td->curdata.inrange_state = value;
break;
case HID_DG_TIPSWITCH:
if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
@@ -547,10 +605,16 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
td->curdata.p = value;
break;
case HID_GD_X:
- td->curdata.x = value;
+ if (usage->code == ABS_MT_TOOL_X)
+ td->curdata.cx = value;
+ else
+ td->curdata.x = value;
break;
case HID_GD_Y:
- td->curdata.y = value;
+ if (usage->code == ABS_MT_TOOL_Y)
+ td->curdata.cy = value;
+ else
+ td->curdata.y = value;
break;
case HID_DG_WIDTH:
td->curdata.w = value;
@@ -575,12 +639,15 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
return 0;
}
- if (usage->hid == td->last_slot_field)
- mt_complete_slot(td, field->hidinput->input);
+ if (usage->usage_index + 1 == field->report_count) {
+ /* we only take into account the last report. */
+ if (usage->hid == td->last_slot_field)
+ mt_complete_slot(td, field->hidinput->input);
- if (field->index == td->last_field_index
- && td->num_received >= td->num_expected)
- mt_sync_frame(td, field->hidinput->input);
+ if (field->index == td->last_field_index
+ && td->num_received >= td->num_expected)
+ mt_sync_frame(td, field->hidinput->input);
+ }
}
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 13ca9191b63..a79e95bb9fb 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -116,7 +116,7 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
rdev->priv = data;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = picolcd_cir_open;
rdev->close = picolcd_cir_close;
rdev->input_name = data->hdev->name;
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 5669916c294..1219998a02d 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -167,7 +167,7 @@ static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj
loff_t off, size_t count) \
{ \
return isku_sysfs_write(fp, kobj, buf, off, count, \
- sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+ ISKU_SIZE_ ## THINGY, ISKU_COMMAND_ ## THINGY); \
}
#define ISKU_SYSFS_R(thingy, THINGY) \
@@ -176,32 +176,32 @@ static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj,
loff_t off, size_t count) \
{ \
return isku_sysfs_read(fp, kobj, buf, off, count, \
- sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+ ISKU_SIZE_ ## THINGY, ISKU_COMMAND_ ## THINGY); \
}
#define ISKU_SYSFS_RW(thingy, THINGY) \
ISKU_SYSFS_R(thingy, THINGY) \
ISKU_SYSFS_W(thingy, THINGY)
-#define ISKU_BIN_ATTR_RW(thingy) \
+#define ISKU_BIN_ATTR_RW(thingy, THINGY) \
{ \
.attr = { .name = #thingy, .mode = 0660 }, \
- .size = sizeof(struct isku_ ## thingy), \
+ .size = ISKU_SIZE_ ## THINGY, \
.read = isku_sysfs_read_ ## thingy, \
.write = isku_sysfs_write_ ## thingy \
}
-#define ISKU_BIN_ATTR_R(thingy) \
+#define ISKU_BIN_ATTR_R(thingy, THINGY) \
{ \
.attr = { .name = #thingy, .mode = 0440 }, \
- .size = sizeof(struct isku_ ## thingy), \
+ .size = ISKU_SIZE_ ## THINGY, \
.read = isku_sysfs_read_ ## thingy, \
}
-#define ISKU_BIN_ATTR_W(thingy) \
+#define ISKU_BIN_ATTR_W(thingy, THINGY) \
{ \
.attr = { .name = #thingy, .mode = 0220 }, \
- .size = sizeof(struct isku_ ## thingy), \
+ .size = ISKU_SIZE_ ## THINGY, \
.write = isku_sysfs_write_ ## thingy \
}
@@ -218,21 +218,23 @@ ISKU_SYSFS_RW(last_set, LAST_SET)
ISKU_SYSFS_W(talk, TALK)
ISKU_SYSFS_R(info, INFO)
ISKU_SYSFS_W(control, CONTROL)
+ISKU_SYSFS_W(reset, RESET)
static struct bin_attribute isku_bin_attributes[] = {
- ISKU_BIN_ATTR_RW(macro),
- ISKU_BIN_ATTR_RW(keys_function),
- ISKU_BIN_ATTR_RW(keys_easyzone),
- ISKU_BIN_ATTR_RW(keys_media),
- ISKU_BIN_ATTR_RW(keys_thumbster),
- ISKU_BIN_ATTR_RW(keys_macro),
- ISKU_BIN_ATTR_RW(keys_capslock),
- ISKU_BIN_ATTR_RW(light),
- ISKU_BIN_ATTR_RW(key_mask),
- ISKU_BIN_ATTR_RW(last_set),
- ISKU_BIN_ATTR_W(talk),
- ISKU_BIN_ATTR_R(info),
- ISKU_BIN_ATTR_W(control),
+ ISKU_BIN_ATTR_RW(macro, MACRO),
+ ISKU_BIN_ATTR_RW(keys_function, KEYS_FUNCTION),
+ ISKU_BIN_ATTR_RW(keys_easyzone, KEYS_EASYZONE),
+ ISKU_BIN_ATTR_RW(keys_media, KEYS_MEDIA),
+ ISKU_BIN_ATTR_RW(keys_thumbster, KEYS_THUMBSTER),
+ ISKU_BIN_ATTR_RW(keys_macro, KEYS_MACRO),
+ ISKU_BIN_ATTR_RW(keys_capslock, KEYS_CAPSLOCK),
+ ISKU_BIN_ATTR_RW(light, LIGHT),
+ ISKU_BIN_ATTR_RW(key_mask, KEY_MASK),
+ ISKU_BIN_ATTR_RW(last_set, LAST_SET),
+ ISKU_BIN_ATTR_W(talk, TALK),
+ ISKU_BIN_ATTR_R(info, INFO),
+ ISKU_BIN_ATTR_W(control, CONTROL),
+ ISKU_BIN_ATTR_W(reset, RESET),
__ATTR_NULL
};
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
index 605b3ce2163..cf6896c8386 100644
--- a/drivers/hid/hid-roccat-isku.h
+++ b/drivers/hid/hid-roccat-isku.h
@@ -15,76 +15,33 @@
#include <linux/types.h>
enum {
+ ISKU_SIZE_CONTROL = 0x03,
+ ISKU_SIZE_INFO = 0x06,
+ ISKU_SIZE_KEY_MASK = 0x06,
+ ISKU_SIZE_KEYS_FUNCTION = 0x29,
+ ISKU_SIZE_KEYS_EASYZONE = 0x41,
+ ISKU_SIZE_KEYS_MEDIA = 0x1d,
+ ISKU_SIZE_KEYS_THUMBSTER = 0x17,
+ ISKU_SIZE_KEYS_MACRO = 0x23,
+ ISKU_SIZE_KEYS_CAPSLOCK = 0x06,
+ ISKU_SIZE_LAST_SET = 0x14,
+ ISKU_SIZE_LIGHT = 0x0a,
+ ISKU_SIZE_MACRO = 0x823,
+ ISKU_SIZE_RESET = 0x03,
+ ISKU_SIZE_TALK = 0x10,
+};
+
+enum {
ISKU_PROFILE_NUM = 5,
ISKU_USB_INTERFACE_PROTOCOL = 0,
};
-struct isku_control {
- uint8_t command; /* ISKU_COMMAND_CONTROL */
- uint8_t value;
- uint8_t request;
-} __packed;
-
struct isku_actual_profile {
uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
uint8_t actual_profile;
} __packed;
-struct isku_key_mask {
- uint8_t command; /* ISKU_COMMAND_KEY_MASK */
- uint8_t size; /* 6 */
- uint8_t profile_number; /* 0-4 */
- uint8_t mask;
- uint16_t checksum;
-} __packed;
-
-struct isku_keys_function {
- uint8_t data[0x29];
-} __packed;
-
-struct isku_keys_easyzone {
- uint8_t data[0x41];
-} __packed;
-
-struct isku_keys_media {
- uint8_t data[0x1d];
-} __packed;
-
-struct isku_keys_thumbster {
- uint8_t data[0x17];
-} __packed;
-
-struct isku_keys_macro {
- uint8_t data[0x23];
-} __packed;
-
-struct isku_keys_capslock {
- uint8_t data[0x6];
-} __packed;
-
-struct isku_macro {
- uint8_t data[0x823];
-} __packed;
-
-struct isku_light {
- uint8_t data[0xa];
-} __packed;
-
-struct isku_info {
- uint8_t data[2];
- uint8_t firmware_version;
- uint8_t unknown[3];
-} __packed;
-
-struct isku_talk {
- uint8_t data[0x10];
-} __packed;
-
-struct isku_last_set {
- uint8_t data[0x14];
-} __packed;
-
enum isku_commands {
ISKU_COMMAND_CONTROL = 0x4,
ISKU_COMMAND_ACTUAL_PROFILE = 0x5,
@@ -97,6 +54,7 @@ enum isku_commands {
ISKU_COMMAND_MACRO = 0xe,
ISKU_COMMAND_INFO = 0xf,
ISKU_COMMAND_LIGHT = 0x10,
+ ISKU_COMMAND_RESET = 0x11,
ISKU_COMMAND_KEYS_CAPSLOCK = 0x13,
ISKU_COMMAND_LAST_SET = 0x14,
ISKU_COMMAND_15 = 0x15,
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index f5602fec486..6a48fa3c7da 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -14,6 +14,7 @@
/*
* Roccat Kone[+] is an updated/improved version of the Kone with more memory
* and functionality and without the non-standard behaviours the Kone had.
+ * KoneXTD has same capabilities but updated sensor.
*/
#include <linux/device.h>
@@ -55,56 +56,6 @@ static int koneplus_send_control(struct usb_device *usb_dev, uint value,
&control, sizeof(struct roccat_common2_control));
}
-static int koneplus_get_info(struct usb_device *usb_dev,
- struct koneplus_info *buf)
-{
- return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO,
- buf, sizeof(struct koneplus_info));
-}
-
-static int koneplus_get_profile_settings(struct usb_device *usb_dev,
- struct koneplus_profile_settings *buf, uint number)
-{
- int retval;
-
- retval = koneplus_send_control(usb_dev, number,
- KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
- if (retval)
- return retval;
-
- return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
- buf, sizeof(struct koneplus_profile_settings));
-}
-
-static int koneplus_set_profile_settings(struct usb_device *usb_dev,
- struct koneplus_profile_settings const *settings)
-{
- return roccat_common2_send_with_status(usb_dev,
- KONEPLUS_COMMAND_PROFILE_SETTINGS,
- settings, sizeof(struct koneplus_profile_settings));
-}
-
-static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
- struct koneplus_profile_buttons *buf, int number)
-{
- int retval;
-
- retval = koneplus_send_control(usb_dev, number,
- KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
- if (retval)
- return retval;
-
- return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
- buf, sizeof(struct koneplus_profile_buttons));
-}
-
-static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
- struct koneplus_profile_buttons const *buttons)
-{
- return roccat_common2_send_with_status(usb_dev,
- KONEPLUS_COMMAND_PROFILE_BUTTONS,
- buttons, sizeof(struct koneplus_profile_buttons));
-}
/* retval is 0-4 on success, < 0 on error */
static int koneplus_get_actual_profile(struct usb_device *usb_dev)
@@ -113,7 +64,7 @@ static int koneplus_get_actual_profile(struct usb_device *usb_dev)
int retval;
retval = roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
- &buf, sizeof(struct koneplus_actual_profile));
+ &buf, KONEPLUS_SIZE_ACTUAL_PROFILE);
return retval ? retval : buf.actual_profile;
}
@@ -124,12 +75,12 @@ static int koneplus_set_actual_profile(struct usb_device *usb_dev,
struct koneplus_actual_profile buf;
buf.command = KONEPLUS_COMMAND_ACTUAL_PROFILE;
- buf.size = sizeof(struct koneplus_actual_profile);
+ buf.size = KONEPLUS_SIZE_ACTUAL_PROFILE;
buf.actual_profile = new_profile;
return roccat_common2_send_with_status(usb_dev,
KONEPLUS_COMMAND_ACTUAL_PROFILE,
- &buf, sizeof(struct koneplus_actual_profile));
+ &buf, KONEPLUS_SIZE_ACTUAL_PROFILE);
}
static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
@@ -182,111 +133,77 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return real_size;
}
-static ssize_t koneplus_sysfs_write_talk(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_talk), KONEPLUS_COMMAND_TALK);
+#define KONEPLUS_SYSFS_W(thingy, THINGY) \
+static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return koneplus_sysfs_write(fp, kobj, buf, off, count, \
+ KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
}
-static ssize_t koneplus_sysfs_write_macro(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_macro), KONEPLUS_COMMAND_MACRO);
+#define KONEPLUS_SYSFS_R(thingy, THINGY) \
+static ssize_t koneplus_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return koneplus_sysfs_read(fp, kobj, buf, off, count, \
+ KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
}
-static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
-}
+#define KONEPLUS_SYSFS_RW(thingy, THINGY) \
+KONEPLUS_SYSFS_W(thingy, THINGY) \
+KONEPLUS_SYSFS_R(thingy, THINGY)
-static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
+#define KONEPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = KONEPLUS_SIZE_ ## THINGY, \
+ .read = koneplus_sysfs_read_ ## thingy, \
+ .write = koneplus_sysfs_write_ ## thingy \
}
-static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu), KONEPLUS_COMMAND_TCU);
+#define KONEPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = KONEPLUS_SIZE_ ## THINGY, \
+ .read = koneplus_sysfs_read_ ## thingy, \
}
-static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu_image), KONEPLUS_COMMAND_TCU);
+#define KONEPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = KONEPLUS_SIZE_ ## THINGY, \
+ .write = koneplus_sysfs_write_ ## thingy \
}
-static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
-
- if (off >= sizeof(struct koneplus_profile_settings))
- return 0;
+KONEPLUS_SYSFS_W(control, CONTROL)
+KONEPLUS_SYSFS_RW(info, INFO)
+KONEPLUS_SYSFS_W(talk, TALK)
+KONEPLUS_SYSFS_W(macro, MACRO)
+KONEPLUS_SYSFS_RW(sensor, SENSOR)
+KONEPLUS_SYSFS_RW(tcu, TCU)
+KONEPLUS_SYSFS_R(tcu_image, TCU_IMAGE)
+KONEPLUS_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
+KONEPLUS_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
- if (off + count > sizeof(struct koneplus_profile_settings))
- count = sizeof(struct koneplus_profile_settings) - off;
-
- mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((char const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
- count);
- mutex_unlock(&koneplus->koneplus_lock);
-
- return count;
-}
-
-static ssize_t koneplus_sysfs_write_profile_settings(struct file *fp,
+static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_number;
- struct koneplus_profile_settings *profile_settings;
-
- if (off != 0 || count != sizeof(struct koneplus_profile_settings))
- return -EINVAL;
-
- profile_number = ((struct koneplus_profile_settings const *)buf)->number;
- profile_settings = &koneplus->profile_settings[profile_number];
-
- mutex_lock(&koneplus->koneplus_lock);
- difference = memcmp(buf, profile_settings,
- sizeof(struct koneplus_profile_settings));
- if (difference) {
- retval = koneplus_set_profile_settings(usb_dev,
- (struct koneplus_profile_settings const *)buf);
- if (!retval)
- memcpy(profile_settings, buf,
- sizeof(struct koneplus_profile_settings));
- }
- mutex_unlock(&koneplus->koneplus_lock);
+ ssize_t retval;
+ retval = koneplus_send_control(usb_dev, *(uint *)(attr->private),
+ KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return sizeof(struct koneplus_profile_settings);
+ return koneplus_sysfs_read(fp, kobj, buf, off, count,
+ KONEPLUS_SIZE_PROFILE_SETTINGS,
+ KONEPLUS_COMMAND_PROFILE_SETTINGS);
}
static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
@@ -295,57 +212,17 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
-
- if (off >= sizeof(struct koneplus_profile_buttons))
- return 0;
-
- if (off + count > sizeof(struct koneplus_profile_buttons))
- count = sizeof(struct koneplus_profile_buttons) - off;
-
- mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((char const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
- count);
- mutex_unlock(&koneplus->koneplus_lock);
-
- return count;
-}
-
-static ssize_t koneplus_sysfs_write_profile_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- uint profile_number;
- struct koneplus_profile_buttons *profile_buttons;
-
- if (off != 0 || count != sizeof(struct koneplus_profile_buttons))
- return -EINVAL;
-
- profile_number = ((struct koneplus_profile_buttons const *)buf)->number;
- profile_buttons = &koneplus->profile_buttons[profile_number];
-
- mutex_lock(&koneplus->koneplus_lock);
- difference = memcmp(buf, profile_buttons,
- sizeof(struct koneplus_profile_buttons));
- if (difference) {
- retval = koneplus_set_profile_buttons(usb_dev,
- (struct koneplus_profile_buttons const *)buf);
- if (!retval)
- memcpy(profile_buttons, buf,
- sizeof(struct koneplus_profile_buttons));
- }
- mutex_unlock(&koneplus->koneplus_lock);
+ ssize_t retval;
+ retval = koneplus_send_control(usb_dev, *(uint *)(attr->private),
+ KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return sizeof(struct koneplus_profile_buttons);
+ return koneplus_sysfs_read(fp, kobj, buf, off, count,
+ KONEPLUS_SIZE_PROFILE_BUTTONS,
+ KONEPLUS_COMMAND_PROFILE_BUTTONS);
}
static ssize_t koneplus_sysfs_show_actual_profile(struct device *dev,
@@ -401,9 +278,20 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct koneplus_device *koneplus =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->info.firmware_version);
+ struct koneplus_device *koneplus;
+ struct usb_device *usb_dev;
+ struct koneplus_info info;
+
+ dev = dev->parent->parent;
+ koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ mutex_lock(&koneplus->koneplus_lock);
+ roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO,
+ &info, KONEPLUS_SIZE_INFO);
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
static struct device_attribute koneplus_attributes[] = {
@@ -419,132 +307,85 @@ static struct device_attribute koneplus_attributes[] = {
};
static struct bin_attribute koneplus_bin_attributes[] = {
- {
- .attr = { .name = "sensor", .mode = 0660 },
- .size = sizeof(struct koneplus_sensor),
- .read = koneplus_sysfs_read_sensor,
- .write = koneplus_sysfs_write_sensor
- },
- {
- .attr = { .name = "tcu", .mode = 0220 },
- .size = sizeof(struct koneplus_tcu),
- .write = koneplus_sysfs_write_tcu
- },
- {
- .attr = { .name = "tcu_image", .mode = 0440 },
- .size = sizeof(struct koneplus_tcu_image),
- .read = koneplus_sysfs_read_tcu_image
- },
- {
- .attr = { .name = "profile_settings", .mode = 0220 },
- .size = sizeof(struct koneplus_profile_settings),
- .write = koneplus_sysfs_write_profile_settings
- },
+ KONEPLUS_BIN_ATTRIBUTE_W(control, CONTROL),
+ KONEPLUS_BIN_ATTRIBUTE_RW(info, INFO),
+ KONEPLUS_BIN_ATTRIBUTE_W(talk, TALK),
+ KONEPLUS_BIN_ATTRIBUTE_W(macro, MACRO),
+ KONEPLUS_BIN_ATTRIBUTE_RW(sensor, SENSOR),
+ KONEPLUS_BIN_ATTRIBUTE_RW(tcu, TCU),
+ KONEPLUS_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE),
+ KONEPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
+ KONEPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
{
.attr = { .name = "profile1_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[4]
},
{
- .attr = { .name = "profile_buttons", .mode = 0220 },
- .size = sizeof(struct koneplus_profile_buttons),
- .write = koneplus_sysfs_write_profile_buttons
- },
- {
.attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[4]
},
- {
- .attr = { .name = "macro", .mode = 0220 },
- .size = sizeof(struct koneplus_macro),
- .write = koneplus_sysfs_write_macro
- },
- {
- .attr = { .name = "talk", .mode = 0220 },
- .size = sizeof(struct koneplus_talk),
- .write = koneplus_sysfs_write_talk
- },
__ATTR_NULL
};
static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
struct koneplus_device *koneplus)
{
- int retval, i;
- static uint wait = 200;
+ int retval;
mutex_init(&koneplus->koneplus_lock);
- retval = koneplus_get_info(usb_dev, &koneplus->info);
- if (retval)
- return retval;
-
- for (i = 0; i < 5; ++i) {
- msleep(wait);
- retval = koneplus_get_profile_settings(usb_dev,
- &koneplus->profile_settings[i], i);
- if (retval)
- return retval;
-
- msleep(wait);
- retval = koneplus_get_profile_buttons(usb_dev,
- &koneplus->profile_buttons[i], i);
- if (retval)
- return retval;
- }
-
- msleep(wait);
retval = koneplus_get_actual_profile(usb_dev);
if (retval < 0)
return retval;
@@ -709,6 +550,7 @@ static int koneplus_raw_event(struct hid_device *hdev,
static const struct hid_device_id koneplus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
{ }
};
@@ -749,5 +591,5 @@ module_init(koneplus_init);
module_exit(koneplus_exit);
MODULE_AUTHOR("Stefan Achatz");
-MODULE_DESCRIPTION("USB Roccat Kone[+] driver");
+MODULE_DESCRIPTION("USB Roccat Kone[+]/XTD driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
index 7074b2a4b94..af7f57e8cf3 100644
--- a/drivers/hid/hid-roccat-koneplus.h
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -14,11 +14,19 @@
#include <linux/types.h>
-struct koneplus_talk {
- uint8_t command; /* KONEPLUS_COMMAND_TALK */
- uint8_t size; /* always 0x10 */
- uint8_t data[14];
-} __packed;
+enum {
+ KONEPLUS_SIZE_ACTUAL_PROFILE = 0x03,
+ KONEPLUS_SIZE_CONTROL = 0x03,
+ KONEPLUS_SIZE_FIRMWARE_WRITE = 0x0402,
+ KONEPLUS_SIZE_INFO = 0x06,
+ KONEPLUS_SIZE_MACRO = 0x0822,
+ KONEPLUS_SIZE_PROFILE_SETTINGS = 0x2b,
+ KONEPLUS_SIZE_PROFILE_BUTTONS = 0x4d,
+ KONEPLUS_SIZE_SENSOR = 0x06,
+ KONEPLUS_SIZE_TALK = 0x10,
+ KONEPLUS_SIZE_TCU = 0x04,
+ KONEPLUS_SIZE_TCU_IMAGE = 0x0404,
+};
enum koneplus_control_requests {
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x80,
@@ -31,45 +39,6 @@ struct koneplus_actual_profile {
uint8_t actual_profile; /* Range 0-4! */
} __attribute__ ((__packed__));
-struct koneplus_profile_settings {
- uint8_t command; /* KONEPLUS_COMMAND_PROFILE_SETTINGS */
- uint8_t size; /* always 43 */
- uint8_t number; /* range 0-4 */
- uint8_t advanced_sensitivity;
- uint8_t sensitivity_x;
- uint8_t sensitivity_y;
- uint8_t cpi_levels_enabled;
- uint8_t cpi_levels_x[5];
- uint8_t cpi_startup_level; /* range 0-4 */
- uint8_t cpi_levels_y[5]; /* range 1-60 means 100-6000 cpi */
- uint8_t unknown1;
- uint8_t polling_rate;
- uint8_t lights_enabled;
- uint8_t light_effect_mode;
- uint8_t color_flow_effect;
- uint8_t light_effect_type;
- uint8_t light_effect_speed;
- uint8_t lights[16];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
-struct koneplus_profile_buttons {
- uint8_t command; /* KONEPLUS_COMMAND_PROFILE_BUTTONS */
- uint8_t size; /* always 77 */
- uint8_t number; /* range 0-4 */
- uint8_t data[72];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
-struct koneplus_macro {
- uint8_t command; /* KONEPLUS_COMMAND_MACRO */
- uint16_t size; /* always 0x822 little endian */
- uint8_t profile; /* range 0-4 */
- uint8_t button; /* range 0-23 */
- uint8_t data[2075];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
struct koneplus_info {
uint8_t command; /* KONEPLUS_COMMAND_INFO */
uint8_t size; /* always 6 */
@@ -77,51 +46,15 @@ struct koneplus_info {
uint8_t unknown[3];
} __attribute__ ((__packed__));
-struct koneplus_e {
- uint8_t command; /* KONEPLUS_COMMAND_E */
- uint8_t size; /* always 3 */
- uint8_t unknown; /* TODO 1; 0 before firmware update */
-} __attribute__ ((__packed__));
-
-struct koneplus_sensor {
- uint8_t command; /* KONEPLUS_COMMAND_SENSOR */
- uint8_t size; /* always 6 */
- uint8_t data[4];
-} __attribute__ ((__packed__));
-
-struct koneplus_firmware_write {
- uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE */
- uint8_t unknown[1025];
-} __attribute__ ((__packed__));
-
-struct koneplus_firmware_write_control {
- uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL */
- /*
- * value is 1 on success
- * 3 means "not finished yet"
- */
- uint8_t value;
- uint8_t unknown; /* always 0x75 */
-} __attribute__ ((__packed__));
-
-struct koneplus_tcu {
- uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
- uint8_t data[2];
-} __attribute__ ((__packed__));
-
-struct koneplus_tcu_image {
- uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
- uint8_t data[1024];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
enum koneplus_commands {
KONEPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
+ KONEPLUS_COMMAND_CONTROL = 0x4,
KONEPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
KONEPLUS_COMMAND_MACRO = 0x8,
KONEPLUS_COMMAND_INFO = 0x9,
KONEPLUS_COMMAND_TCU = 0xc,
+ KONEPLUS_COMMAND_TCU_IMAGE = 0xc,
KONEPLUS_COMMAND_E = 0xe,
KONEPLUS_COMMAND_SENSOR = 0xf,
KONEPLUS_COMMAND_TALK = 0x10,
@@ -187,10 +120,6 @@ struct koneplus_device {
int chrdev_minor;
struct mutex koneplus_lock;
-
- struct koneplus_info info;
- struct koneplus_profile_settings profile_settings[5];
- struct koneplus_profile_buttons profile_buttons[5];
};
#endif
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index ca6527ac655..b8b37789b86 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -70,13 +70,6 @@ static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
return kovaplus_send_control(usb_dev, number, request);
}
-static int kovaplus_get_info(struct usb_device *usb_dev,
- struct kovaplus_info *buf)
-{
- return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
- buf, sizeof(struct kovaplus_info));
-}
-
static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings *buf, uint number)
{
@@ -88,15 +81,7 @@ static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
return retval;
return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
- buf, sizeof(struct kovaplus_profile_settings));
-}
-
-static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
- struct kovaplus_profile_settings const *settings)
-{
- return roccat_common2_send_with_status(usb_dev,
- KOVAPLUS_COMMAND_PROFILE_SETTINGS,
- settings, sizeof(struct kovaplus_profile_settings));
+ buf, KOVAPLUS_SIZE_PROFILE_SETTINGS);
}
static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
@@ -110,15 +95,7 @@ static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
return retval;
return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
- buf, sizeof(struct kovaplus_profile_buttons));
-}
-
-static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
- struct kovaplus_profile_buttons const *buttons)
-{
- return roccat_common2_send_with_status(usb_dev,
- KOVAPLUS_COMMAND_PROFILE_BUTTONS,
- buttons, sizeof(struct kovaplus_profile_buttons));
+ buf, KOVAPLUS_SIZE_PROFILE_BUTTONS);
}
/* retval is 0-4 on success, < 0 on error */
@@ -147,122 +124,141 @@ static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
&buf, sizeof(struct kovaplus_actual_profile));
}
-static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t kovaplus_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
- if (off >= sizeof(struct kovaplus_profile_settings))
+ if (off >= real_size)
return 0;
- if (off + count > sizeof(struct kovaplus_profile_settings))
- count = sizeof(struct kovaplus_profile_settings) - off;
+ if (off != 0 || count != real_size)
+ return -EINVAL;
mutex_lock(&kovaplus->kovaplus_lock);
- memcpy(buf, ((char const *)&kovaplus->profile_settings[*(uint *)(attr->private)]) + off,
- count);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&kovaplus->kovaplus_lock);
- return count;
+ if (retval)
+ return retval;
+
+ return real_size;
}
-static ssize_t kovaplus_sysfs_write_profile_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t kovaplus_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_index;
- struct kovaplus_profile_settings *profile_settings;
+ int retval;
- if (off != 0 || count != sizeof(struct kovaplus_profile_settings))
+ if (off != 0 || count != real_size)
return -EINVAL;
- profile_index = ((struct kovaplus_profile_settings const *)buf)->profile_index;
- profile_settings = &kovaplus->profile_settings[profile_index];
-
mutex_lock(&kovaplus->kovaplus_lock);
- difference = memcmp(buf, profile_settings,
- sizeof(struct kovaplus_profile_settings));
- if (difference) {
- retval = kovaplus_set_profile_settings(usb_dev,
- (struct kovaplus_profile_settings const *)buf);
- if (!retval)
- memcpy(profile_settings, buf,
- sizeof(struct kovaplus_profile_settings));
- }
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ buf, real_size);
mutex_unlock(&kovaplus->kovaplus_lock);
if (retval)
return retval;
- return sizeof(struct kovaplus_profile_settings);
+ return real_size;
}
-static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+#define KOVAPLUS_SYSFS_W(thingy, THINGY) \
+static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return kovaplus_sysfs_write(fp, kobj, buf, off, count, \
+ KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
+}
- if (off >= sizeof(struct kovaplus_profile_buttons))
- return 0;
+#define KOVAPLUS_SYSFS_R(thingy, THINGY) \
+static ssize_t kovaplus_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return kovaplus_sysfs_read(fp, kobj, buf, off, count, \
+ KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
+}
- if (off + count > sizeof(struct kovaplus_profile_buttons))
- count = sizeof(struct kovaplus_profile_buttons) - off;
+#define KOVAPLUS_SYSFS_RW(thingy, THINGY) \
+KOVAPLUS_SYSFS_W(thingy, THINGY) \
+KOVAPLUS_SYSFS_R(thingy, THINGY)
- mutex_lock(&kovaplus->kovaplus_lock);
- memcpy(buf, ((char const *)&kovaplus->profile_buttons[*(uint *)(attr->private)]) + off,
- count);
- mutex_unlock(&kovaplus->kovaplus_lock);
+#define KOVAPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = KOVAPLUS_SIZE_ ## THINGY, \
+ .read = kovaplus_sysfs_read_ ## thingy, \
+ .write = kovaplus_sysfs_write_ ## thingy \
+}
+
+#define KOVAPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = KOVAPLUS_SIZE_ ## THINGY, \
+ .read = kovaplus_sysfs_read_ ## thingy, \
+}
- return count;
+#define KOVAPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = KOVAPLUS_SIZE_ ## THINGY, \
+ .write = kovaplus_sysfs_write_ ## thingy \
}
-static ssize_t kovaplus_sysfs_write_profile_buttons(struct file *fp,
+KOVAPLUS_SYSFS_W(control, CONTROL)
+KOVAPLUS_SYSFS_RW(info, INFO)
+KOVAPLUS_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
+KOVAPLUS_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
+
+static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- uint profile_index;
- struct kovaplus_profile_buttons *profile_buttons;
+ ssize_t retval;
- if (off != 0 || count != sizeof(struct kovaplus_profile_buttons))
- return -EINVAL;
+ retval = kovaplus_select_profile(usb_dev, *(uint *)(attr->private),
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
+ if (retval)
+ return retval;
- profile_index = ((struct kovaplus_profile_buttons const *)buf)->profile_index;
- profile_buttons = &kovaplus->profile_buttons[profile_index];
+ return kovaplus_sysfs_read(fp, kobj, buf, off, count,
+ KOVAPLUS_SIZE_PROFILE_SETTINGS,
+ KOVAPLUS_COMMAND_PROFILE_SETTINGS);
+}
- mutex_lock(&kovaplus->kovaplus_lock);
- difference = memcmp(buf, profile_buttons,
- sizeof(struct kovaplus_profile_buttons));
- if (difference) {
- retval = kovaplus_set_profile_buttons(usb_dev,
- (struct kovaplus_profile_buttons const *)buf);
- if (!retval)
- memcpy(profile_buttons, buf,
- sizeof(struct kovaplus_profile_buttons));
- }
- mutex_unlock(&kovaplus->kovaplus_lock);
+static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ ssize_t retval;
+ retval = kovaplus_select_profile(usb_dev, *(uint *)(attr->private),
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return sizeof(struct kovaplus_profile_buttons);
+ return kovaplus_sysfs_read(fp, kobj, buf, off, count,
+ KOVAPLUS_SIZE_PROFILE_BUTTONS,
+ KOVAPLUS_COMMAND_PROFILE_BUTTONS);
}
static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev,
@@ -342,9 +338,20 @@ static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kovaplus_device *kovaplus =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->info.firmware_version);
+ struct kovaplus_device *kovaplus;
+ struct usb_device *usb_dev;
+ struct kovaplus_info info;
+
+ dev = dev->parent->parent;
+ kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
+ &info, KOVAPLUS_SIZE_INFO);
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
static struct device_attribute kovaplus_attributes[] = {
@@ -363,73 +370,67 @@ static struct device_attribute kovaplus_attributes[] = {
};
static struct bin_attribute kovaplus_bin_attributes[] = {
- {
- .attr = { .name = "profile_settings", .mode = 0220 },
- .size = sizeof(struct kovaplus_profile_settings),
- .write = kovaplus_sysfs_write_profile_settings
- },
+ KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL),
+ KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO),
+ KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
+ KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
{
.attr = { .name = "profile1_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[4]
},
{
- .attr = { .name = "profile_buttons", .mode = 0220 },
- .size = sizeof(struct kovaplus_profile_buttons),
- .write = kovaplus_sysfs_write_profile_buttons
- },
- {
.attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[4]
},
@@ -444,10 +445,6 @@ static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev,
mutex_init(&kovaplus->kovaplus_lock);
- retval = kovaplus_get_info(usb_dev, &kovaplus->info);
- if (retval)
- return retval;
-
for (i = 0; i < 5; ++i) {
msleep(wait);
retval = kovaplus_get_profile_settings(usb_dev,
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
index f82daa1cdcb..fbb7a16a7e5 100644
--- a/drivers/hid/hid-roccat-kovaplus.h
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -14,6 +14,13 @@
#include <linux/types.h>
+enum {
+ KOVAPLUS_SIZE_CONTROL = 0x03,
+ KOVAPLUS_SIZE_INFO = 0x06,
+ KOVAPLUS_SIZE_PROFILE_SETTINGS = 0x10,
+ KOVAPLUS_SIZE_PROFILE_BUTTONS = 0x17,
+};
+
enum kovaplus_control_requests {
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
@@ -53,15 +60,9 @@ struct kovaplus_info {
uint8_t unknown[3];
} __packed;
-/* writes 1 on plugin */
-struct kovaplus_a {
- uint8_t command; /* KOVAPLUS_COMMAND_A */
- uint8_t size; /* 3 */
- uint8_t unknown;
-} __packed;
-
enum kovaplus_commands {
KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
+ KOVAPLUS_COMMAND_CONTROL = 0x4,
KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
KOVAPLUS_COMMAND_INFO = 0x9,
@@ -125,7 +126,6 @@ struct kovaplus_device {
int roccat_claimed;
int chrdev_minor;
struct mutex kovaplus_lock;
- struct kovaplus_info info;
struct kovaplus_profile_settings profile_settings[5];
struct kovaplus_profile_buttons profile_buttons[5];
};
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
new file mode 100644
index 00000000000..5084fb4b7e9
--- /dev/null
+++ b/drivers/hid/hid-roccat-lua.c
@@ -0,0 +1,227 @@
+/*
+ * Roccat Lua driver for Linux
+ *
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Lua is a gamer mouse which cpi, button and light settings can be
+ * configured.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-lua.h"
+
+static ssize_t lua_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&lua->lua_lock);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&lua->lua_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&lua->lua_lock);
+ retval = roccat_common2_send(usb_dev, command, (void *)buf, real_size);
+ mutex_unlock(&lua->lua_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define LUA_SYSFS_W(thingy, THINGY) \
+static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
+{ \
+ return lua_sysfs_write(fp, kobj, buf, off, count, \
+ LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \
+}
+
+#define LUA_SYSFS_R(thingy, THINGY) \
+static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
+{ \
+ return lua_sysfs_read(fp, kobj, buf, off, count, \
+ LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \
+}
+
+#define LUA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+LUA_SYSFS_W(thingy, THINGY) \
+LUA_SYSFS_R(thingy, THINGY) \
+static struct bin_attribute lua_ ## thingy ## _attr = { \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = LUA_SIZE_ ## THINGY, \
+ .read = lua_sysfs_read_ ## thingy, \
+ .write = lua_sysfs_write_ ## thingy \
+};
+
+LUA_BIN_ATTRIBUTE_RW(control, CONTROL)
+
+static int lua_create_sysfs_attributes(struct usb_interface *intf)
+{
+ return sysfs_create_bin_file(&intf->dev.kobj, &lua_control_attr);
+}
+
+static void lua_remove_sysfs_attributes(struct usb_interface *intf)
+{
+ sysfs_remove_bin_file(&intf->dev.kobj, &lua_control_attr);
+}
+
+static int lua_init_lua_device_struct(struct usb_device *usb_dev,
+ struct lua_device *lua)
+{
+ mutex_init(&lua->lua_lock);
+
+ return 0;
+}
+
+static int lua_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct lua_device *lua;
+ int retval;
+
+ lua = kzalloc(sizeof(*lua), GFP_KERNEL);
+ if (!lua) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, lua);
+
+ retval = lua_init_lua_device_struct(usb_dev, lua);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct lua_device\n");
+ goto exit;
+ }
+
+ retval = lua_create_sysfs_attributes(intf);
+ if (retval) {
+ hid_err(hdev, "cannot create sysfs files\n");
+ goto exit;
+ }
+
+ return 0;
+exit:
+ kfree(lua);
+ return retval;
+}
+
+static void lua_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct lua_device *lua;
+
+ lua_remove_sysfs_attributes(intf);
+
+ lua = hid_get_drvdata(hdev);
+ kfree(lua);
+}
+
+static int lua_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = lua_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void lua_remove(struct hid_device *hdev)
+{
+ lua_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id lua_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, lua_devices);
+
+static struct hid_driver lua_driver = {
+ .name = "lua",
+ .id_table = lua_devices,
+ .probe = lua_probe,
+ .remove = lua_remove
+};
+
+static int __init lua_init(void)
+{
+ return hid_register_driver(&lua_driver);
+}
+
+static void __exit lua_exit(void)
+{
+ hid_unregister_driver(&lua_driver);
+}
+
+module_init(lua_init);
+module_exit(lua_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Lua driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-lua.h b/drivers/hid/hid-roccat-lua.h
new file mode 100644
index 00000000000..547d77a375d
--- /dev/null
+++ b/drivers/hid/hid-roccat-lua.h
@@ -0,0 +1,29 @@
+#ifndef __HID_ROCCAT_LUA_H
+#define __HID_ROCCAT_LUA_H
+
+/*
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ LUA_SIZE_CONTROL = 8,
+};
+
+enum lua_commands {
+ LUA_COMMAND_CONTROL = 3,
+};
+
+struct lua_device {
+ struct mutex lua_lock;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1317c177a3e..d4f1e3bee59 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -66,48 +66,14 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
- buf, sizeof(struct pyra_profile_settings));
-}
-
-static int pyra_get_profile_buttons(struct usb_device *usb_dev,
- struct pyra_profile_buttons *buf, int number)
-{
- int retval;
- retval = pyra_send_control(usb_dev, number,
- PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
- if (retval)
- return retval;
- return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
- buf, sizeof(struct pyra_profile_buttons));
+ buf, PYRA_SIZE_PROFILE_SETTINGS);
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
return roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS,
- buf, sizeof(struct pyra_settings));
-}
-
-static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
-{
- return roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO,
- buf, sizeof(struct pyra_info));
-}
-
-static int pyra_set_profile_settings(struct usb_device *usb_dev,
- struct pyra_profile_settings const *settings)
-{
- return roccat_common2_send_with_status(usb_dev,
- PYRA_COMMAND_PROFILE_SETTINGS, settings,
- sizeof(struct pyra_profile_settings));
-}
-
-static int pyra_set_profile_buttons(struct usb_device *usb_dev,
- struct pyra_profile_buttons const *buttons)
-{
- return roccat_common2_send_with_status(usb_dev,
- PYRA_COMMAND_PROFILE_BUTTONS, buttons,
- sizeof(struct pyra_profile_buttons));
+ buf, PYRA_SIZE_SETTINGS);
}
static int pyra_set_settings(struct usb_device *usb_dev,
@@ -115,146 +81,144 @@ static int pyra_set_settings(struct usb_device *usb_dev,
{
return roccat_common2_send_with_status(usb_dev,
PYRA_COMMAND_SETTINGS, settings,
- sizeof(struct pyra_settings));
+ PYRA_SIZE_SETTINGS);
}
-static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pyra_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
- if (off >= sizeof(struct pyra_profile_settings))
+ if (off >= real_size)
return 0;
- if (off + count > sizeof(struct pyra_profile_settings))
- count = sizeof(struct pyra_profile_settings) - off;
+ if (off != 0 || count != real_size)
+ return -EINVAL;
mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->profile_settings[*(uint *)(attr->private)]) + off,
- count);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&pyra->pyra_lock);
- return count;
+ if (retval)
+ return retval;
+
+ return real_size;
}
-static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
- if (off >= sizeof(struct pyra_profile_buttons))
- return 0;
-
- if (off + count > sizeof(struct pyra_profile_buttons))
- count = sizeof(struct pyra_profile_buttons) - off;
+ if (off != 0 || count != real_size)
+ return -EINVAL;
mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->profile_buttons[*(uint *)(attr->private)]) + off,
- count);
+ retval = roccat_common2_send_with_status(usb_dev, command, (void *)buf, real_size);
mutex_unlock(&pyra->pyra_lock);
- return count;
+ if (retval)
+ return retval;
+
+ return real_size;
}
-static ssize_t pyra_sysfs_write_profile_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_number;
- struct pyra_profile_settings *profile_settings;
+#define PYRA_SYSFS_W(thingy, THINGY) \
+static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return pyra_sysfs_write(fp, kobj, buf, off, count, \
+ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
+}
- if (off != 0 || count != sizeof(struct pyra_profile_settings))
- return -EINVAL;
+#define PYRA_SYSFS_R(thingy, THINGY) \
+static ssize_t pyra_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return pyra_sysfs_read(fp, kobj, buf, off, count, \
+ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
+}
- profile_number = ((struct pyra_profile_settings const *)buf)->number;
- profile_settings = &pyra->profile_settings[profile_number];
+#define PYRA_SYSFS_RW(thingy, THINGY) \
+PYRA_SYSFS_W(thingy, THINGY) \
+PYRA_SYSFS_R(thingy, THINGY)
- mutex_lock(&pyra->pyra_lock);
- difference = memcmp(buf, profile_settings,
- sizeof(struct pyra_profile_settings));
- if (difference) {
- retval = pyra_set_profile_settings(usb_dev,
- (struct pyra_profile_settings const *)buf);
- if (!retval)
- memcpy(profile_settings, buf,
- sizeof(struct pyra_profile_settings));
- }
- mutex_unlock(&pyra->pyra_lock);
+#define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = PYRA_SIZE_ ## THINGY, \
+ .read = pyra_sysfs_read_ ## thingy, \
+ .write = pyra_sysfs_write_ ## thingy \
+}
- if (retval)
- return retval;
+#define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = PYRA_SIZE_ ## THINGY, \
+ .read = pyra_sysfs_read_ ## thingy, \
+}
- return sizeof(struct pyra_profile_settings);
+#define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = PYRA_SIZE_ ## THINGY, \
+ .write = pyra_sysfs_write_ ## thingy \
}
-static ssize_t pyra_sysfs_write_profile_buttons(struct file *fp,
+PYRA_SYSFS_W(control, CONTROL)
+PYRA_SYSFS_RW(info, INFO)
+PYRA_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
+PYRA_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
+PYRA_SYSFS_R(settings, SETTINGS)
+
+static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_number;
- struct pyra_profile_buttons *profile_buttons;
-
- if (off != 0 || count != sizeof(struct pyra_profile_buttons))
- return -EINVAL;
-
- profile_number = ((struct pyra_profile_buttons const *)buf)->number;
- profile_buttons = &pyra->profile_buttons[profile_number];
-
- mutex_lock(&pyra->pyra_lock);
- difference = memcmp(buf, profile_buttons,
- sizeof(struct pyra_profile_buttons));
- if (difference) {
- retval = pyra_set_profile_buttons(usb_dev,
- (struct pyra_profile_buttons const *)buf);
- if (!retval)
- memcpy(profile_buttons, buf,
- sizeof(struct pyra_profile_buttons));
- }
- mutex_unlock(&pyra->pyra_lock);
+ ssize_t retval;
+ retval = pyra_send_control(usb_dev, *(uint *)(attr->private),
+ PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return sizeof(struct pyra_profile_buttons);
+ return pyra_sysfs_read(fp, kobj, buf, off, count,
+ PYRA_SIZE_PROFILE_SETTINGS,
+ PYRA_COMMAND_PROFILE_SETTINGS);
}
-static ssize_t pyra_sysfs_read_settings(struct file *fp,
+static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
-
- if (off >= sizeof(struct pyra_settings))
- return 0;
-
- if (off + count > sizeof(struct pyra_settings))
- count = sizeof(struct pyra_settings) - off;
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ ssize_t retval;
- mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->settings) + off, count);
- mutex_unlock(&pyra->pyra_lock);
+ retval = pyra_send_control(usb_dev, *(uint *)(attr->private),
+ PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
+ if (retval)
+ return retval;
- return count;
+ return pyra_sysfs_read(fp, kobj, buf, off, count,
+ PYRA_SIZE_PROFILE_BUTTONS,
+ PYRA_COMMAND_PROFILE_BUTTONS);
}
static ssize_t pyra_sysfs_write_settings(struct file *fp,
@@ -266,35 +230,32 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
- int difference;
struct pyra_roccat_report roccat_report;
+ struct pyra_settings const *settings;
- if (off != 0 || count != sizeof(struct pyra_settings))
+ if (off != 0 || count != PYRA_SIZE_SETTINGS)
return -EINVAL;
mutex_lock(&pyra->pyra_lock);
- difference = memcmp(buf, &pyra->settings, sizeof(struct pyra_settings));
- if (difference) {
- retval = pyra_set_settings(usb_dev,
- (struct pyra_settings const *)buf);
- if (retval) {
- mutex_unlock(&pyra->pyra_lock);
- return retval;
- }
-
- memcpy(&pyra->settings, buf,
- sizeof(struct pyra_settings));
- profile_activated(pyra, pyra->settings.startup_profile);
+ settings = (struct pyra_settings const *)buf;
- roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2;
- roccat_report.value = pyra->settings.startup_profile + 1;
- roccat_report.key = 0;
- roccat_report_event(pyra->chrdev_minor,
- (uint8_t const *)&roccat_report);
+ retval = pyra_set_settings(usb_dev, settings);
+ if (retval) {
+ mutex_unlock(&pyra->pyra_lock);
+ return retval;
}
+
+ profile_activated(pyra, settings->startup_profile);
+
+ roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2;
+ roccat_report.value = settings->startup_profile + 1;
+ roccat_report.key = 0;
+ roccat_report_event(pyra->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+
mutex_unlock(&pyra->pyra_lock);
- return sizeof(struct pyra_settings);
+ return PYRA_SIZE_SETTINGS;
}
@@ -311,23 +272,34 @@ static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
{
struct pyra_device *pyra =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_profile);
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct pyra_settings settings;
+
+ mutex_lock(&pyra->pyra_lock);
+ roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS,
+ &settings, PYRA_SIZE_SETTINGS);
+ mutex_unlock(&pyra->pyra_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", settings.startup_profile);
}
static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pyra_device *pyra =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", pyra->firmware_version);
-}
+ struct pyra_device *pyra;
+ struct usb_device *usb_dev;
+ struct pyra_info info;
-static ssize_t pyra_sysfs_show_startup_profile(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pyra_device *pyra =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", pyra->settings.startup_profile);
+ dev = dev->parent->parent;
+ pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ mutex_lock(&pyra->pyra_lock);
+ roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO,
+ &info, PYRA_SIZE_INFO);
+ mutex_unlock(&pyra->pyra_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
static struct device_attribute pyra_attributes[] = {
@@ -336,105 +308,88 @@ static struct device_attribute pyra_attributes[] = {
__ATTR(firmware_version, 0440,
pyra_sysfs_show_firmware_version, NULL),
__ATTR(startup_profile, 0440,
- pyra_sysfs_show_startup_profile, NULL),
+ pyra_sysfs_show_actual_profile, NULL),
__ATTR_NULL
};
static struct bin_attribute pyra_bin_attributes[] = {
- {
- .attr = { .name = "profile_settings", .mode = 0220 },
- .size = sizeof(struct pyra_profile_settings),
- .write = pyra_sysfs_write_profile_settings
- },
+ PYRA_BIN_ATTRIBUTE_W(control, CONTROL),
+ PYRA_BIN_ATTRIBUTE_RW(info, INFO),
+ PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
+ PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
+ PYRA_BIN_ATTRIBUTE_RW(settings, SETTINGS),
{
.attr = { .name = "profile1_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[4]
},
{
- .attr = { .name = "profile_buttons", .mode = 0220 },
- .size = sizeof(struct pyra_profile_buttons),
- .write = pyra_sysfs_write_profile_buttons
- },
- {
.attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[4]
},
- {
- .attr = { .name = "settings", .mode = 0660 },
- .size = sizeof(struct pyra_settings),
- .read = pyra_sysfs_read_settings,
- .write = pyra_sysfs_write_settings
- },
__ATTR_NULL
};
static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
struct pyra_device *pyra)
{
- struct pyra_info info;
+ struct pyra_settings settings;
int retval, i;
mutex_init(&pyra->pyra_lock);
- retval = pyra_get_info(usb_dev, &info);
- if (retval)
- return retval;
-
- pyra->firmware_version = info.firmware_version;
-
- retval = pyra_get_settings(usb_dev, &pyra->settings);
+ retval = pyra_get_settings(usb_dev, &settings);
if (retval)
return retval;
@@ -443,14 +398,9 @@ static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
&pyra->profile_settings[i], i);
if (retval)
return retval;
-
- retval = pyra_get_profile_buttons(usb_dev,
- &pyra->profile_buttons[i], i);
- if (retval)
- return retval;
}
- profile_activated(pyra, pyra->settings.startup_profile);
+ profile_activated(pyra, settings.startup_profile);
return 0;
}
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index eada7830fa9..beedcf001ce 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -14,11 +14,13 @@
#include <linux/types.h>
-struct pyra_b {
- uint8_t command; /* PYRA_COMMAND_B */
- uint8_t size; /* always 3 */
- uint8_t unknown; /* 1 */
-} __attribute__ ((__packed__));
+enum {
+ PYRA_SIZE_CONTROL = 0x03,
+ PYRA_SIZE_INFO = 0x06,
+ PYRA_SIZE_PROFILE_SETTINGS = 0x0d,
+ PYRA_SIZE_PROFILE_BUTTONS = 0x13,
+ PYRA_SIZE_SETTINGS = 0x03,
+};
enum pyra_control_requests {
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
@@ -46,14 +48,6 @@ struct pyra_profile_settings {
uint16_t checksum; /* byte sum */
} __attribute__ ((__packed__));
-struct pyra_profile_buttons {
- uint8_t command; /* PYRA_COMMAND_PROFILE_BUTTONS */
- uint8_t size; /* always 0x13 */
- uint8_t number; /* Range 0-4 */
- uint8_t buttons[14];
- uint16_t checksum; /* byte sum */
-} __attribute__ ((__packed__));
-
struct pyra_info {
uint8_t command; /* PYRA_COMMAND_INFO */
uint8_t size; /* always 6 */
@@ -64,6 +58,7 @@ struct pyra_info {
} __attribute__ ((__packed__));
enum pyra_commands {
+ PYRA_COMMAND_CONTROL = 0x4,
PYRA_COMMAND_SETTINGS = 0x5,
PYRA_COMMAND_PROFILE_SETTINGS = 0x6,
PYRA_COMMAND_PROFILE_BUTTONS = 0x7,
@@ -148,13 +143,10 @@ struct pyra_roccat_report {
struct pyra_device {
int actual_profile;
int actual_cpi;
- int firmware_version;
int roccat_claimed;
int chrdev_minor;
struct mutex pyra_lock;
- struct pyra_settings settings;
struct pyra_profile_settings profile_settings[5];
- struct pyra_profile_buttons profile_buttons[5];
};
#endif
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 014afba407e..31747a29c09 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -120,7 +120,7 @@ SAVU_SYSFS_RW(profile, PROFILE)
SAVU_SYSFS_RW(general, GENERAL)
SAVU_SYSFS_RW(buttons, BUTTONS)
SAVU_SYSFS_RW(macro, MACRO)
-SAVU_SYSFS_R(info, INFO)
+SAVU_SYSFS_RW(info, INFO)
SAVU_SYSFS_RW(sensor, SENSOR)
static struct bin_attribute savu_bin_attributes[] = {
@@ -129,7 +129,7 @@ static struct bin_attribute savu_bin_attributes[] = {
SAVU_BIN_ATTRIBUTE_RW(general, GENERAL),
SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS),
SAVU_BIN_ATTRIBUTE_RW(macro, MACRO),
- SAVU_BIN_ATTRIBUTE_R(info, INFO),
+ SAVU_BIN_ATTRIBUTE_RW(info, INFO),
SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR),
__ATTR_NULL
};
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index d9d73e9163e..0bc58bd8d4f 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -82,23 +82,6 @@ struct hid_sensor_hub_callbacks_list {
void *priv;
};
-static int sensor_hub_check_for_sensor_page(struct hid_device *hdev)
-{
- int i;
- int ret = -EINVAL;
-
- for (i = 0; i < hdev->maxcollection; i++) {
- struct hid_collection *col = &hdev->collection[i];
- if (col->type == HID_COLLECTION_PHYSICAL &&
- (col->usage & HID_USAGE_PAGE) == HID_UP_SENSOR) {
- ret = 0;
- break;
- }
- }
-
- return ret;
-}
-
static struct hid_report *sensor_hub_report(int id, struct hid_device *hdev,
int dir)
{
@@ -437,9 +420,6 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
ptr = raw_data;
ptr++; /*Skip report id*/
- if (!report)
- goto err_report;
-
spin_lock_irqsave(&pdata->lock, flags);
for (i = 0; i < report->maxfield; ++i) {
@@ -485,7 +465,6 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
callback->pdev);
spin_unlock_irqrestore(&pdata->lock, flags);
-err_report:
return 1;
}
@@ -524,10 +503,6 @@ static int sensor_hub_probe(struct hid_device *hdev,
hid_err(hdev, "parse failed\n");
goto err_free;
}
- if (sensor_hub_check_for_sensor_page(hdev) < 0) {
- hid_err(hdev, "sensor page not found\n");
- goto err_free;
- }
INIT_LIST_HEAD(&hdev->inputs);
ret = hid_hw_start(hdev, 0);
@@ -630,16 +605,7 @@ static void sensor_hub_remove(struct hid_device *hdev)
}
static const struct hid_device_id sensor_hub_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086,
- USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087,
- USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086,
- USB_DEVICE_ID_SENSOR_HUB_09FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087,
- USB_DEVICE_ID_SENSOR_HUB_09FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
- USB_DEVICE_ID_SENSOR_HUB_7014) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_SENSOR_HUB, HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, sensor_hub_devices);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 7c47fc3f7b2..413a73187d3 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -57,10 +57,6 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
set_current_state(TASK_INTERRUPTIBLE);
while (list->head == list->tail) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -69,6 +65,10 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
ret = -EIO;
break;
}
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
/* allow O_NONBLOCK to work well from other threads */
mutex_unlock(&list->read_mutex);
@@ -295,6 +295,13 @@ out:
}
+static int hidraw_fasync(int fd, struct file *file, int on)
+{
+ struct hidraw_list *list = file->private_data;
+
+ return fasync_helper(fd, file, on, &list->fasync);
+}
+
static int hidraw_release(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
@@ -438,6 +445,7 @@ static const struct file_operations hidraw_ops = {
.open = hidraw_open,
.release = hidraw_release,
.unlocked_ioctl = hidraw_ioctl,
+ .fasync = hidraw_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = hidraw_ioctl,
#endif
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
new file mode 100644
index 00000000000..b66617a020b
--- /dev/null
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -0,0 +1,18 @@
+menu "I2C HID support"
+ depends on I2C
+
+config I2C_HID
+ tristate "HID over I2C transport layer"
+ default n
+ depends on I2C && INPUT
+ select HID
+ ---help---
+ Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
+ other HID based devices which is connected to your computer via I2C.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-hid.
+
+endmenu
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
new file mode 100644
index 00000000000..832d8f9aaba
--- /dev/null
+++ b/drivers/hid/i2c-hid/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the I2C input drivers
+#
+
+obj-$(CONFIG_I2C_HID) += i2c-hid.o
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
new file mode 100644
index 00000000000..9ef222442ca
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -0,0 +1,979 @@
+/*
+ * HID over I2C protocol implementation
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ *
+ * This code is partly based on "USB HID support for Linux":
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2007-2008 Oliver Neukum
+ * Copyright (c) 2006-2010 Jiri Kosina
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/hid.h>
+#include <linux/mutex.h>
+
+#include <linux/i2c/i2c-hid.h>
+
+/* flags */
+#define I2C_HID_STARTED (1 << 0)
+#define I2C_HID_RESET_PENDING (1 << 1)
+#define I2C_HID_READ_PENDING (1 << 2)
+
+#define I2C_HID_PWR_ON 0x00
+#define I2C_HID_PWR_SLEEP 0x01
+
+/* debug option */
+static bool debug;
+module_param(debug, bool, 0444);
+MODULE_PARM_DESC(debug, "print a lot of debug information");
+
+#define i2c_hid_dbg(ihid, fmt, arg...) \
+do { \
+ if (debug) \
+ dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
+} while (0)
+
+struct i2c_hid_desc {
+ __le16 wHIDDescLength;
+ __le16 bcdVersion;
+ __le16 wReportDescLength;
+ __le16 wReportDescRegister;
+ __le16 wInputRegister;
+ __le16 wMaxInputLength;
+ __le16 wOutputRegister;
+ __le16 wMaxOutputLength;
+ __le16 wCommandRegister;
+ __le16 wDataRegister;
+ __le16 wVendorID;
+ __le16 wProductID;
+ __le16 wVersionID;
+ __le32 reserved;
+} __packed;
+
+struct i2c_hid_cmd {
+ unsigned int registerIndex;
+ __u8 opcode;
+ unsigned int length;
+ bool wait;
+};
+
+union command {
+ u8 data[0];
+ struct cmd {
+ __le16 reg;
+ __u8 reportTypeID;
+ __u8 opcode;
+ } __packed c;
+};
+
+#define I2C_HID_CMD(opcode_) \
+ .opcode = opcode_, .length = 4, \
+ .registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
+
+/* fetch HID descriptor */
+static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
+/* fetch report descriptors */
+static const struct i2c_hid_cmd hid_report_descr_cmd = {
+ .registerIndex = offsetof(struct i2c_hid_desc,
+ wReportDescRegister),
+ .opcode = 0x00,
+ .length = 2 };
+/* commands */
+static const struct i2c_hid_cmd hid_reset_cmd = { I2C_HID_CMD(0x01),
+ .wait = true };
+static const struct i2c_hid_cmd hid_get_report_cmd = { I2C_HID_CMD(0x02) };
+static const struct i2c_hid_cmd hid_set_report_cmd = { I2C_HID_CMD(0x03) };
+static const struct i2c_hid_cmd hid_set_power_cmd = { I2C_HID_CMD(0x08) };
+
+/*
+ * These definitions are not used here, but are defined by the spec.
+ * Keeping them here for documentation purposes.
+ *
+ * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
+ * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
+ * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
+ * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
+ */
+
+static DEFINE_MUTEX(i2c_hid_open_mut);
+
+/* The main device structure */
+struct i2c_hid {
+ struct i2c_client *client; /* i2c client */
+ struct hid_device *hid; /* pointer to corresponding HID dev */
+ union {
+ __u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
+ struct i2c_hid_desc hdesc; /* the HID Descriptor */
+ };
+ __le16 wHIDDescRegister; /* location of the i2c
+ * register of the HID
+ * descriptor. */
+ unsigned int bufsize; /* i2c buffer size */
+ char *inbuf; /* Input buffer */
+ char *cmdbuf; /* Command buffer */
+ char *argsbuf; /* Command arguments buffer */
+
+ unsigned long flags; /* device flags */
+
+ wait_queue_head_t wait; /* For waiting the interrupt */
+};
+
+static int __i2c_hid_command(struct i2c_client *client,
+ const struct i2c_hid_cmd *command, u8 reportID,
+ u8 reportType, u8 *args, int args_len,
+ unsigned char *buf_recv, int data_len)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ union command *cmd = (union command *)ihid->cmdbuf;
+ int ret;
+ struct i2c_msg msg[2];
+ int msg_num = 1;
+
+ int length = command->length;
+ bool wait = command->wait;
+ unsigned int registerIndex = command->registerIndex;
+
+ /* special case for hid_descr_cmd */
+ if (command == &hid_descr_cmd) {
+ cmd->c.reg = ihid->wHIDDescRegister;
+ } else {
+ cmd->data[0] = ihid->hdesc_buffer[registerIndex];
+ cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
+ }
+
+ if (length > 2) {
+ cmd->c.opcode = command->opcode;
+ cmd->c.reportTypeID = reportID | reportType << 4;
+ }
+
+ memcpy(cmd->data + length, args, args_len);
+ length += args_len;
+
+ i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags & I2C_M_TEN;
+ msg[0].len = length;
+ msg[0].buf = cmd->data;
+ if (data_len > 0) {
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags & I2C_M_TEN;
+ msg[1].flags |= I2C_M_RD;
+ msg[1].len = data_len;
+ msg[1].buf = buf_recv;
+ msg_num = 2;
+ set_bit(I2C_HID_READ_PENDING, &ihid->flags);
+ }
+
+ if (wait)
+ set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
+
+ ret = i2c_transfer(client->adapter, msg, msg_num);
+
+ if (data_len > 0)
+ clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
+
+ if (ret != msg_num)
+ return ret < 0 ? ret : -EIO;
+
+ ret = 0;
+
+ if (wait) {
+ i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
+ if (!wait_event_timeout(ihid->wait,
+ !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
+ msecs_to_jiffies(5000)))
+ ret = -ENODATA;
+ i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
+ }
+
+ return ret;
+}
+
+static int i2c_hid_command(struct i2c_client *client,
+ const struct i2c_hid_cmd *command,
+ unsigned char *buf_recv, int data_len)
+{
+ return __i2c_hid_command(client, command, 0, 0, NULL, 0,
+ buf_recv, data_len);
+}
+
+static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
+ u8 reportID, unsigned char *buf_recv, int data_len)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ u8 args[3];
+ int ret;
+ int args_len = 0;
+ u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (reportID >= 0x0F) {
+ args[args_len++] = reportID;
+ reportID = 0x0F;
+ }
+
+ args[args_len++] = readRegister & 0xFF;
+ args[args_len++] = readRegister >> 8;
+
+ ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
+ reportType, args, args_len, buf_recv, data_len);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to retrieve report from device.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i2c_hid_set_report(struct i2c_client *client, u8 reportType,
+ u8 reportID, unsigned char *buf, size_t data_len)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ u8 *args = ihid->argsbuf;
+ int ret;
+ u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+
+ /* hidraw already checked that data_len < HID_MAX_BUFFER_SIZE */
+ u16 size = 2 /* size */ +
+ (reportID ? 1 : 0) /* reportID */ +
+ data_len /* buf */;
+ int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ 2 /* dataRegister */ +
+ size /* args */;
+ int index = 0;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (reportID >= 0x0F) {
+ args[index++] = reportID;
+ reportID = 0x0F;
+ }
+
+ args[index++] = dataRegister & 0xFF;
+ args[index++] = dataRegister >> 8;
+
+ args[index++] = size & 0xFF;
+ args[index++] = size >> 8;
+
+ if (reportID)
+ args[index++] = reportID;
+
+ memcpy(&args[index], buf, data_len);
+
+ ret = __i2c_hid_command(client, &hid_set_report_cmd, reportID,
+ reportType, args, args_len, NULL, 0);
+ if (ret) {
+ dev_err(&client->dev, "failed to set a report to device.\n");
+ return ret;
+ }
+
+ return data_len;
+}
+
+static int i2c_hid_set_power(struct i2c_client *client, int power_state)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
+ 0, NULL, 0, NULL, 0);
+ if (ret)
+ dev_err(&client->dev, "failed to change power setting.\n");
+
+ return ret;
+}
+
+static int i2c_hid_hwreset(struct i2c_client *client)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (ret)
+ return ret;
+
+ i2c_hid_dbg(ihid, "resetting...\n");
+
+ ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
+ if (ret) {
+ dev_err(&client->dev, "failed to reset device.\n");
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i2c_hid_get_input(struct i2c_hid *ihid)
+{
+ int ret, ret_size;
+ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+ ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+ if (ret != size) {
+ if (ret < 0)
+ return;
+
+ dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
+ __func__, ret, size);
+ return;
+ }
+
+ ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
+
+ if (!ret_size) {
+ /* host or device initiated RESET completed */
+ if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
+ wake_up(&ihid->wait);
+ return;
+ }
+
+ if (ret_size > size) {
+ dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+ __func__, size, ret_size);
+ return;
+ }
+
+ i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
+
+ if (test_bit(I2C_HID_STARTED, &ihid->flags))
+ hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
+ ret_size - 2, 1);
+
+ return;
+}
+
+static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
+{
+ struct i2c_hid *ihid = dev_id;
+
+ if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
+ return IRQ_HANDLED;
+
+ i2c_hid_get_input(ihid);
+
+ return IRQ_HANDLED;
+}
+
+static int i2c_hid_get_report_length(struct hid_report *report)
+{
+ return ((report->size - 1) >> 3) + 1 +
+ report->device->report_enum[report->type].numbered + 2;
+}
+
+static void i2c_hid_init_report(struct hid_report *report, u8 *buffer,
+ size_t bufsize)
+{
+ struct hid_device *hid = report->device;
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ unsigned int size, ret_size;
+
+ size = i2c_hid_get_report_length(report);
+ if (i2c_hid_get_report(client,
+ report->type == HID_FEATURE_REPORT ? 0x03 : 0x01,
+ report->id, buffer, size))
+ return;
+
+ i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, ihid->inbuf);
+
+ ret_size = buffer[0] | (buffer[1] << 8);
+
+ if (ret_size != size) {
+ dev_err(&client->dev, "error in %s size:%d / ret_size:%d\n",
+ __func__, size, ret_size);
+ return;
+ }
+
+ /* hid->driver_lock is held as we are in probe function,
+ * we just need to setup the input fields, so using
+ * hid_report_raw_event is safe. */
+ hid_report_raw_event(hid, report->type, buffer + 2, size - 2, 1);
+}
+
+/*
+ * Initialize all reports
+ */
+static void i2c_hid_init_reports(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ u8 *inbuf = kzalloc(ihid->bufsize, GFP_KERNEL);
+
+ if (!inbuf) {
+ dev_err(&client->dev, "can not retrieve initial reports\n");
+ return;
+ }
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_INPUT_REPORT].report_list, list)
+ i2c_hid_init_report(report, inbuf, ihid->bufsize);
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
+ i2c_hid_init_report(report, inbuf, ihid->bufsize);
+
+ kfree(inbuf);
+}
+
+/*
+ * Traverse the supplied list of reports and find the longest
+ */
+static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
+ unsigned int *max)
+{
+ struct hid_report *report;
+ unsigned int size;
+
+ /* We should not rely on wMaxInputLength, as some devices may set it to
+ * a wrong length. */
+ list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+ size = i2c_hid_get_report_length(report);
+ if (*max < size)
+ *max = size;
+ }
+}
+
+static void i2c_hid_free_buffers(struct i2c_hid *ihid)
+{
+ kfree(ihid->inbuf);
+ kfree(ihid->argsbuf);
+ kfree(ihid->cmdbuf);
+ ihid->inbuf = NULL;
+ ihid->cmdbuf = NULL;
+ ihid->argsbuf = NULL;
+ ihid->bufsize = 0;
+}
+
+static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
+{
+ /* the worst case is computed from the set_report command with a
+ * reportID > 15 and the maximum report length */
+ int args_len = sizeof(__u8) + /* optional ReportID byte */
+ sizeof(__u16) + /* data register */
+ sizeof(__u16) + /* size of the report */
+ report_size; /* report */
+
+ ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
+ ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
+ ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
+
+ if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+ i2c_hid_free_buffers(ihid);
+ return -ENOMEM;
+ }
+
+ ihid->bufsize = report_size;
+
+ return 0;
+}
+
+static int i2c_hid_get_raw_report(struct hid_device *hid,
+ unsigned char report_number, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ size_t ret_count, ask_count;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ /* +2 bytes to include the size of the reply in the query buffer */
+ ask_count = min(count + 2, (size_t)ihid->bufsize);
+
+ ret = i2c_hid_get_report(client,
+ report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
+ report_number, ihid->inbuf, ask_count);
+
+ if (ret < 0)
+ return ret;
+
+ ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
+
+ if (ret_count <= 2)
+ return 0;
+
+ ret_count = min(ret_count, ask_count);
+
+ /* The query buffer contains the size, dropping it in the reply */
+ count = min(count, ret_count - 2);
+ memcpy(buf, ihid->inbuf + 2, count);
+
+ return count;
+}
+
+static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
+ size_t count, unsigned char report_type)
+{
+ struct i2c_client *client = hid->driver_data;
+ int report_id = buf[0];
+
+ if (report_type == HID_INPUT_REPORT)
+ return -EINVAL;
+
+ return i2c_hid_set_report(client,
+ report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
+ report_id, buf, count);
+}
+
+static int i2c_hid_parse(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ struct i2c_hid_desc *hdesc = &ihid->hdesc;
+ unsigned int rsize;
+ char *rdesc;
+ int ret;
+ int tries = 3;
+
+ i2c_hid_dbg(ihid, "entering %s\n", __func__);
+
+ rsize = le16_to_cpu(hdesc->wReportDescLength);
+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+ dbg_hid("weird size of report descriptor (%u)\n", rsize);
+ return -EINVAL;
+ }
+
+ do {
+ ret = i2c_hid_hwreset(client);
+ if (ret)
+ msleep(1000);
+ } while (tries-- > 0 && ret);
+
+ if (ret)
+ return ret;
+
+ rdesc = kzalloc(rsize, GFP_KERNEL);
+
+ if (!rdesc) {
+ dbg_hid("couldn't allocate rdesc memory\n");
+ return -ENOMEM;
+ }
+
+ i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ kfree(rdesc);
+ return -EIO;
+ }
+
+ i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
+
+ ret = hid_parse_report(hid, rdesc, rsize);
+ kfree(rdesc);
+ if (ret) {
+ dbg_hid("parsing report descriptor failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i2c_hid_start(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+ unsigned int bufsize = HID_MIN_BUFFER_SIZE;
+
+ i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
+ i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
+ i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
+
+ if (bufsize > ihid->bufsize) {
+ i2c_hid_free_buffers(ihid);
+
+ ret = i2c_hid_alloc_buffers(ihid, bufsize);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
+ i2c_hid_init_reports(hid);
+
+ return 0;
+}
+
+static void i2c_hid_stop(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+
+ hid->claimed = 0;
+
+ i2c_hid_free_buffers(ihid);
+}
+
+static int i2c_hid_open(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret = 0;
+
+ mutex_lock(&i2c_hid_open_mut);
+ if (!hid->open++) {
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (ret) {
+ hid->open--;
+ goto done;
+ }
+ set_bit(I2C_HID_STARTED, &ihid->flags);
+ }
+done:
+ mutex_unlock(&i2c_hid_open_mut);
+ return ret;
+}
+
+static void i2c_hid_close(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+
+ /* protecting hid->open to make sure we don't restart
+ * data acquistion due to a resumption we no longer
+ * care about
+ */
+ mutex_lock(&i2c_hid_open_mut);
+ if (!--hid->open) {
+ clear_bit(I2C_HID_STARTED, &ihid->flags);
+
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ }
+ mutex_unlock(&i2c_hid_open_mut);
+}
+
+static int i2c_hid_power(struct hid_device *hid, int lvl)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret = 0;
+
+ i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
+
+ switch (lvl) {
+ case PM_HINT_FULLON:
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ break;
+ case PM_HINT_NORMAL:
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ break;
+ }
+ return ret;
+}
+
+static int i2c_hid_hidinput_input_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct hid_field *field;
+ int offset;
+
+ if (type == EV_FF)
+ return input_ff_event(dev, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ offset = hidinput_find_field(hid, type, code, &field);
+
+ if (offset == -1) {
+ hid_warn(dev, "event field not found\n");
+ return -1;
+ }
+
+ return hid_set_field(field, offset, value);
+}
+
+static struct hid_ll_driver i2c_hid_ll_driver = {
+ .parse = i2c_hid_parse,
+ .start = i2c_hid_start,
+ .stop = i2c_hid_stop,
+ .open = i2c_hid_open,
+ .close = i2c_hid_close,
+ .power = i2c_hid_power,
+ .hidinput_input_event = i2c_hid_hidinput_input_event,
+};
+
+static int __devinit i2c_hid_init_irq(struct i2c_client *client)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+
+ dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
+
+ ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, ihid);
+ if (ret < 0) {
+ dev_warn(&client->dev,
+ "Could not register for %s interrupt, irq = %d,"
+ " ret = %d\n",
+ client->name, client->irq, ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
+{
+ struct i2c_client *client = ihid->client;
+ struct i2c_hid_desc *hdesc = &ihid->hdesc;
+ unsigned int dsize;
+ int ret;
+
+ /* Fetch the length of HID description, retrieve the 4 first bytes:
+ * bytes 0-1 -> length
+ * bytes 2-3 -> bcdVersion (has to be 1.00) */
+ ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer, 4);
+
+ i2c_hid_dbg(ihid, "%s, ihid->hdesc_buffer: %*ph\n",
+ __func__, 4, ihid->hdesc_buffer);
+
+ if (ret) {
+ dev_err(&client->dev,
+ "unable to fetch the size of HID descriptor (ret=%d)\n",
+ ret);
+ return -ENODEV;
+ }
+
+ dsize = le16_to_cpu(hdesc->wHIDDescLength);
+ /*
+ * the size of the HID descriptor should at least contain
+ * its size and the bcdVersion (4 bytes), and should not be greater
+ * than sizeof(struct i2c_hid_desc) as we directly fill this struct
+ * through i2c_hid_command.
+ */
+ if (dsize < 4 || dsize > sizeof(struct i2c_hid_desc)) {
+ dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
+ dsize);
+ return -ENODEV;
+ }
+
+ /* check bcdVersion == 1.0 */
+ if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
+ dev_err(&client->dev,
+ "unexpected HID descriptor bcdVersion (0x%04hx)\n",
+ le16_to_cpu(hdesc->bcdVersion));
+ return -ENODEV;
+ }
+
+ i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
+ dsize);
+ if (ret) {
+ dev_err(&client->dev, "hid_descr_cmd Fail\n");
+ return -ENODEV;
+ }
+
+ i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
+
+ return 0;
+}
+
+static int __devinit i2c_hid_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int ret;
+ struct i2c_hid *ihid;
+ struct hid_device *hid;
+ __u16 hidRegister;
+ struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
+
+ dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
+
+ if (!platform_data) {
+ dev_err(&client->dev, "HID register address not provided\n");
+ return -EINVAL;
+ }
+
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "HID over i2c has not been provided an Int IRQ\n");
+ return -EINVAL;
+ }
+
+ ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
+ if (!ihid)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ihid);
+
+ ihid->client = client;
+
+ hidRegister = platform_data->hid_descriptor_address;
+ ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
+
+ init_waitqueue_head(&ihid->wait);
+
+ /* we need to allocate the command buffer without knowing the maximum
+ * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
+ * real computation later. */
+ ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
+ if (ret < 0)
+ goto err;
+
+ ret = i2c_hid_fetch_hid_descriptor(ihid);
+ if (ret < 0)
+ goto err;
+
+ ret = i2c_hid_init_irq(client);
+ if (ret < 0)
+ goto err;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ ret = PTR_ERR(hid);
+ goto err_irq;
+ }
+
+ ihid->hid = hid;
+
+ hid->driver_data = client;
+ hid->ll_driver = &i2c_hid_ll_driver;
+ hid->hid_get_raw_report = i2c_hid_get_raw_report;
+ hid->hid_output_raw_report = i2c_hid_output_raw_report;
+ hid->dev.parent = &client->dev;
+ hid->bus = BUS_I2C;
+ hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
+ hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+ hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+
+ snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
+ client->name, hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ if (ret != -ENODEV)
+ hid_err(client, "can't add hid device: %d\n", ret);
+ goto err_mem_free;
+ }
+
+ return 0;
+
+err_mem_free:
+ hid_destroy_device(hid);
+
+err_irq:
+ free_irq(client->irq, ihid);
+
+err:
+ i2c_hid_free_buffers(ihid);
+ kfree(ihid);
+ return ret;
+}
+
+static int __devexit i2c_hid_remove(struct i2c_client *client)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ struct hid_device *hid;
+
+ hid = ihid->hid;
+ hid_destroy_device(hid);
+
+ free_irq(client->irq, ihid);
+
+ if (ihid->bufsize)
+ i2c_hid_free_buffers(ihid);
+
+ kfree(ihid);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int i2c_hid_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+
+ return 0;
+}
+
+static int i2c_hid_resume(struct device *dev)
+{
+ int ret;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ ret = i2c_hid_hwreset(client);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_hid_pm, i2c_hid_suspend, i2c_hid_resume);
+
+static const struct i2c_device_id i2c_hid_id_table[] = {
+ { "hid", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
+
+
+static struct i2c_driver i2c_hid_driver = {
+ .driver = {
+ .name = "i2c_hid",
+ .owner = THIS_MODULE,
+ .pm = &i2c_hid_pm,
+ },
+
+ .probe = i2c_hid_probe,
+ .remove = __devexit_p(i2c_hid_remove),
+
+ .id_table = i2c_hid_id_table,
+};
+
+module_i2c_driver(i2c_hid_driver);
+
+MODULE_DESCRIPTION("HID over I2C core driver");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 11c7932dc7e..ac9e3522825 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,6 +72,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
@@ -79,9 +80,11 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 14599e25679..87bd64959a9 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -361,10 +361,6 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
prepare_to_wait(&list->hiddev->wait, &wait, TASK_INTERRUPTIBLE);
while (list->head == list->tail) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
@@ -373,6 +369,10 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
retval = -EIO;
break;
}
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
/* let O_NONBLOCK tasks run */
mutex_unlock(&list->thread_lock);
@@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case HIDIOCAPPLICATION:
- if (arg < 0 || arg >= hid->maxapplication)
+ if (arg >= hid->maxapplication)
break;
for (i = 0; i < hid->maxcollection; i++)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4800d4c2a7b..32f238f3cae 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1208,6 +1208,14 @@ config SENSORS_TWL4030_MADC
This driver can also be built as a module. If so it will be called
twl4030-madc-hwmon.
+config SENSORS_VEXPRESS
+ tristate "Versatile Express"
+ depends on VEXPRESS_CONFIG
+ help
+ This driver provides support for hardware sensors available on
+ the ARM Ltd's Versatile Express platform. It can provide wide
+ range of information like temperature, power, energy.
+
config SENSORS_VIA_CPUTEMP
tristate "VIA CPU temperature sensor"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index a930f0997d2..5da287443f6 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
+obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index a98c917b588..789bd4fb329 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -187,7 +187,7 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
* Sysfs callback functions
*/
-static const u16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
+static const s16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
char *buf)
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 9f26400713f..89cfd64b337 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -115,6 +115,12 @@ int vid_from_reg(int val, u8 vrm)
return (val < 32) ? 1550 - 25 * val
: 775 - (25 * (val - 31)) / 2;
+ case 26: /* AMD family 10h to 15h, serial VID */
+ val &= 0x7f;
+ if (val >= 0x7c)
+ return 0;
+ return DIV_ROUND_CLOSEST(15500 - 125 * val, 10);
+
case 91: /* VRM 9.1 */
case 90: /* VRM 9.0 */
val &= 0x1f;
@@ -195,6 +201,10 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_AMD, 0xF, 0x40, 0x7F, ANY, 24}, /* NPT family 0Fh */
{X86_VENDOR_AMD, 0xF, 0x80, ANY, ANY, 25}, /* future fam. 0Fh */
{X86_VENDOR_AMD, 0x10, 0x0, ANY, ANY, 25}, /* NPT family 10h */
+ {X86_VENDOR_AMD, 0x11, 0x0, ANY, ANY, 26}, /* family 11h */
+ {X86_VENDOR_AMD, 0x12, 0x0, ANY, ANY, 26}, /* family 12h */
+ {X86_VENDOR_AMD, 0x14, 0x0, ANY, ANY, 26}, /* family 14h */
+ {X86_VENDOR_AMD, 0x15, 0x0, ANY, ANY, 26}, /* family 15h */
{X86_VENDOR_INTEL, 0x6, 0x0, 0x6, ANY, 82}, /* Pentium Pro,
* Pentium II, Xeon,
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c3c471ca202..646314f7c83 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -84,19 +84,21 @@ static void __init hwmon_pci_quirks(void)
/* Open access to 0x295-0x296 on MSI MS-7031 */
sb = pci_get_device(PCI_VENDOR_ID_ATI, 0x436c, NULL);
- if (sb &&
- (sb->subsystem_vendor == 0x1462 && /* MSI */
- sb->subsystem_device == 0x0031)) { /* MS-7031 */
-
- pci_read_config_byte(sb, 0x48, &enable);
- pci_read_config_word(sb, 0x64, &base);
-
- if (base == 0 && !(enable & BIT(2))) {
- dev_info(&sb->dev,
- "Opening wide generic port at 0x295\n");
- pci_write_config_word(sb, 0x64, 0x295);
- pci_write_config_byte(sb, 0x48, enable | BIT(2));
+ if (sb) {
+ if (sb->subsystem_vendor == 0x1462 && /* MSI */
+ sb->subsystem_device == 0x0031) { /* MS-7031 */
+ pci_read_config_byte(sb, 0x48, &enable);
+ pci_read_config_word(sb, 0x64, &base);
+
+ if (base == 0 && !(enable & BIT(2))) {
+ dev_info(&sb->dev,
+ "Opening wide generic port at 0x295\n");
+ pci_write_config_word(sb, 0x64, 0x295);
+ pci_write_config_byte(sb, 0x48,
+ enable | BIT(2));
+ }
}
+ pci_dev_put(sb);
}
#endif
}
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d32aa354cbd..117d66fcded 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -203,6 +203,8 @@ static const u8 IT87_REG_FAN[] = { 0x0d, 0x0e, 0x0f, 0x80, 0x82 };
static const u8 IT87_REG_FAN_MIN[] = { 0x10, 0x11, 0x12, 0x84, 0x86 };
static const u8 IT87_REG_FANX[] = { 0x18, 0x19, 0x1a, 0x81, 0x83 };
static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
+static const u8 IT87_REG_TEMP_OFFSET[] = { 0x56, 0x57, 0x59 };
+
#define IT87_REG_FAN_MAIN_CTRL 0x13
#define IT87_REG_FAN_CTL 0x14
#define IT87_REG_PWM(nr) (0x15 + (nr))
@@ -226,6 +228,83 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
#define IT87_REG_AUTO_PWM(nr, i) (0x65 + (nr) * 8 + (i))
+struct it87_devices {
+ const char *name;
+ u16 features;
+ u8 peci_mask;
+ u8 old_peci_mask;
+};
+
+#define FEAT_12MV_ADC (1 << 0)
+#define FEAT_NEWER_AUTOPWM (1 << 1)
+#define FEAT_OLD_AUTOPWM (1 << 2)
+#define FEAT_16BIT_FANS (1 << 3)
+#define FEAT_TEMP_OFFSET (1 << 4)
+#define FEAT_TEMP_PECI (1 << 5)
+#define FEAT_TEMP_OLD_PECI (1 << 6)
+
+static const struct it87_devices it87_devices[] = {
+ [it87] = {
+ .name = "it87",
+ .features = FEAT_OLD_AUTOPWM, /* may need to overwrite */
+ },
+ [it8712] = {
+ .name = "it8712",
+ .features = FEAT_OLD_AUTOPWM, /* may need to overwrite */
+ },
+ [it8716] = {
+ .name = "it8716",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET,
+ },
+ [it8718] = {
+ .name = "it8718",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8720] = {
+ .name = "it8720",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8721] = {
+ .name = "it8721",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI,
+ .peci_mask = 0x05,
+ .old_peci_mask = 0x02, /* Actually reports PCH */
+ },
+ [it8728] = {
+ .name = "it8728",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ .peci_mask = 0x07,
+ },
+ [it8782] = {
+ .name = "it8782",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8783] = {
+ .name = "it8783",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+};
+
+#define has_16bit_fans(data) ((data)->features & FEAT_16BIT_FANS)
+#define has_12mv_adc(data) ((data)->features & FEAT_12MV_ADC)
+#define has_newer_autopwm(data) ((data)->features & FEAT_NEWER_AUTOPWM)
+#define has_old_autopwm(data) ((data)->features & FEAT_OLD_AUTOPWM)
+#define has_temp_offset(data) ((data)->features & FEAT_TEMP_OFFSET)
+#define has_temp_peci(data, nr) (((data)->features & FEAT_TEMP_PECI) && \
+ ((data)->peci_mask & (1 << nr)))
+#define has_temp_old_peci(data, nr) \
+ (((data)->features & FEAT_TEMP_OLD_PECI) && \
+ ((data)->old_peci_mask & (1 << nr)))
struct it87_sio_data {
enum chips type;
@@ -249,7 +328,9 @@ struct it87_sio_data {
struct it87_data {
struct device *hwmon_dev;
enum chips type;
- u8 revision;
+ u16 features;
+ u8 peci_mask;
+ u8 old_peci_mask;
unsigned short addr;
const char *name;
@@ -258,17 +339,13 @@ struct it87_data {
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
- u8 in[9]; /* Register value */
- u8 in_max[8]; /* Register value */
- u8 in_min[8]; /* Register value */
+ u8 in[9][3]; /* [nr][0]=in, [1]=min, [2]=max */
u8 has_fan; /* Bitfield, fans enabled */
- u16 fan[5]; /* Register values, possibly combined */
- u16 fan_min[5]; /* Register values, possibly combined */
+ u16 fan[5][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
- s8 temp[3]; /* Register value */
- s8 temp_high[3]; /* Register value */
- s8 temp_low[3]; /* Register value */
- u8 sensor; /* Register value */
+ s8 temp[3][4]; /* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
+ u8 sensor; /* Register value (IT87_REG_TEMP_ENABLE) */
+ u8 extra; /* Register value (IT87_REG_TEMP_EXTRA) */
u8 fan_div[3]; /* Register encoding, shifted right */
u8 vid; /* Register encoding, combined */
u8 vrm;
@@ -296,26 +373,6 @@ struct it87_data {
s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
};
-static inline int has_12mv_adc(const struct it87_data *data)
-{
- /*
- * IT8721F and later have a 12 mV ADC, also with internal scaling
- * on selected inputs.
- */
- return data->type == it8721
- || data->type == it8728;
-}
-
-static inline int has_newer_autopwm(const struct it87_data *data)
-{
- /*
- * IT8721F and later have separate registers for the temperature
- * mapping and the manual duty cycle.
- */
- return data->type == it8721
- || data->type == it8728;
-}
-
static int adc_lsb(const struct it87_data *data, int nr)
{
int lsb = has_12mv_adc(data) ? 12 : 16;
@@ -398,35 +455,6 @@ static const unsigned int pwm_freq[8] = {
750000 / 128,
};
-static inline int has_16bit_fans(const struct it87_data *data)
-{
- /*
- * IT8705F Datasheet 0.4.1, 3h == Version G.
- * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
- * These are the first revisions with 16-bit tachometer support.
- */
- return (data->type == it87 && data->revision >= 0x03)
- || (data->type == it8712 && data->revision >= 0x08)
- || data->type == it8716
- || data->type == it8718
- || data->type == it8720
- || data->type == it8721
- || data->type == it8728
- || data->type == it8782
- || data->type == it8783;
-}
-
-static inline int has_old_autopwm(const struct it87_data *data)
-{
- /*
- * The old automatic fan speed control interface is implemented
- * by IT8705F chips up to revision F and IT8712F chips up to
- * revision G.
- */
- return (data->type == it87 && data->revision < 0x03)
- || (data->type == it8712 && data->revision < 0x08);
-}
-
static int it87_probe(struct platform_device *pdev);
static int it87_remove(struct platform_device *pdev);
@@ -447,59 +475,22 @@ static struct platform_driver it87_driver = {
};
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr]));
-}
-
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_min[nr]));
+ return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr][index]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_max[nr]));
-}
-
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = dev_get_drvdata(dev);
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->in_min[nr] = in_to_reg(data, nr, val);
- it87_write_value(data, IT87_REG_VIN_MIN(nr),
- data->in_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_in(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -508,140 +499,167 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->in_max[nr] = in_to_reg(data, nr, val);
- it87_write_value(data, IT87_REG_VIN_MAX(nr),
- data->in_max[nr]);
+ data->in[nr][index] = in_to_reg(data, nr, val);
+ it87_write_value(data,
+ index == 1 ? IT87_REG_VIN_MIN(nr)
+ : IT87_REG_VIN_MAX(nr),
+ data->in[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset);
-
-#define limit_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-limit_in_offset(0);
-show_in_offset(1);
-limit_in_offset(1);
-show_in_offset(2);
-limit_in_offset(2);
-show_in_offset(3);
-limit_in_offset(3);
-show_in_offset(4);
-limit_in_offset(4);
-show_in_offset(5);
-limit_in_offset(5);
-show_in_offset(6);
-limit_in_offset(6);
-show_in_offset(7);
-limit_in_offset(7);
-show_in_offset(8);
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(in0_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 0, 2);
+
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 2, 2);
+
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 3, 1);
+static SENSOR_DEVICE_ATTR_2(in3_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 3, 2);
+
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 4, 1);
+static SENSOR_DEVICE_ATTR_2(in4_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 4, 2);
+
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 5, 1);
+static SENSOR_DEVICE_ATTR_2(in5_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 5, 2);
+
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 6, 1);
+static SENSOR_DEVICE_ATTR_2(in6_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 6, 2);
+
+static SENSOR_DEVICE_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 7, 0);
+static SENSOR_DEVICE_ATTR_2(in7_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 7, 1);
+static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 7, 2);
+
+static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
/* 3 temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
-}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_high[nr]));
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr][index]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_low[nr]));
-}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
+ u8 reg, regval;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
- data->temp_high[nr] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
+ switch (index) {
+ default:
+ case 1:
+ reg = IT87_REG_TEMP_LOW(nr);
+ break;
+ case 2:
+ reg = IT87_REG_TEMP_HIGH(nr);
+ break;
+ case 3:
+ regval = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+ if (!(regval & 0x80)) {
+ regval |= 0x80;
+ it87_write_value(data, IT87_REG_BEEP_ENABLE, regval);
+ }
+ data->valid = 0;
+ reg = IT87_REG_TEMP_OFFSET[nr];
+ break;
+ }
- mutex_lock(&data->update_lock);
- data->temp_low[nr] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]);
+ data->temp[nr][index] = TEMP_TO_REG(val);
+ it87_write_value(data, reg, data->temp[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
-#define show_temp_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1);
-
-show_temp_offset(1);
-show_temp_offset(2);
-show_temp_offset(3);
-
-static ssize_t show_sensor(struct device *dev, struct device_attribute *attr,
- char *buf)
+
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, 2);
+static SENSOR_DEVICE_ATTR_2(temp1_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 0, 3);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, 2);
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 1, 3);
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 2, 3);
+
+static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
u8 reg = data->sensor; /* In case value is updated while used */
+ u8 extra = data->extra;
+ if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1))
+ || (has_temp_old_peci(data, nr) && (extra & 0x80)))
+ return sprintf(buf, "6\n"); /* Intel PECI */
if (reg & (1 << nr))
return sprintf(buf, "3\n"); /* thermal diode */
if (reg & (8 << nr))
return sprintf(buf, "4\n"); /* thermistor */
return sprintf(buf, "0\n"); /* disabled */
}
-static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+
+static ssize_t set_temp_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
- u8 reg;
+ u8 reg, extra;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
@@ -649,33 +667,45 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
reg &= ~(1 << nr);
reg &= ~(8 << nr);
+ if (has_temp_peci(data, nr) && (reg >> 6 == nr + 1 || val == 6))
+ reg &= 0x3f;
+ extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
+ if (has_temp_old_peci(data, nr) && ((extra & 0x80) || val == 6))
+ extra &= 0x7f;
if (val == 2) { /* backwards compatibility */
- dev_warn(dev, "Sensor type 2 is deprecated, please use 4 "
- "instead\n");
+ dev_warn(dev,
+ "Sensor type 2 is deprecated, please use 4 instead\n");
val = 4;
}
- /* 3 = thermal diode; 4 = thermistor; 0 = disabled */
+ /* 3 = thermal diode; 4 = thermistor; 6 = Intel PECI; 0 = disabled */
if (val == 3)
reg |= 1 << nr;
else if (val == 4)
reg |= 8 << nr;
+ else if (has_temp_peci(data, nr) && val == 6)
+ reg |= (nr + 1) << 6;
+ else if (has_temp_old_peci(data, nr) && val == 6)
+ extra |= 0x80;
else if (val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
data->sensor = reg;
+ data->extra = extra;
it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
+ if (has_temp_old_peci(data, nr))
+ it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
-#define show_sensor_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \
- show_sensor, set_sensor, offset - 1);
-show_sensor_offset(1);
-show_sensor_offset(2);
-show_sensor_offset(3);
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 0);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 1);
+static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 2);
/* 3 Fans */
@@ -692,25 +722,21 @@ static int pwm_mode(const struct it87_data *data, int nr)
}
static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
+ int speed;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
- DIV_FROM_REG(data->fan_div[nr])));
-}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
- DIV_FROM_REG(data->fan_div[nr])));
+ speed = has_16bit_fans(data) ?
+ FAN16_FROM_REG(data->fan[nr][index]) :
+ FAN_FROM_REG(data->fan[nr][index],
+ DIV_FROM_REG(data->fan_div[nr]));
+ return sprintf(buf, "%d\n", speed);
}
+
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -747,11 +773,13 @@ static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", pwm_freq[index]);
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+
+static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
@@ -761,24 +789,36 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = it87_read_value(data, IT87_REG_FAN_DIV);
- switch (nr) {
- case 0:
- data->fan_div[nr] = reg & 0x07;
- break;
- case 1:
- data->fan_div[nr] = (reg >> 3) & 0x07;
- break;
- case 2:
- data->fan_div[nr] = (reg & 0x40) ? 3 : 1;
- break;
+
+ if (has_16bit_fans(data)) {
+ data->fan[nr][index] = FAN16_TO_REG(val);
+ it87_write_value(data, IT87_REG_FAN_MIN[nr],
+ data->fan[nr][index] & 0xff);
+ it87_write_value(data, IT87_REG_FANX_MIN[nr],
+ data->fan[nr][index] >> 8);
+ } else {
+ reg = it87_read_value(data, IT87_REG_FAN_DIV);
+ switch (nr) {
+ case 0:
+ data->fan_div[nr] = reg & 0x07;
+ break;
+ case 1:
+ data->fan_div[nr] = (reg >> 3) & 0x07;
+ break;
+ case 2:
+ data->fan_div[nr] = (reg & 0x40) ? 3 : 1;
+ break;
+ }
+ data->fan[nr][index] =
+ FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
+ it87_write_value(data, IT87_REG_FAN_MIN[nr],
+ data->fan[nr][index]);
}
- data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
+
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -797,7 +837,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
old = it87_read_value(data, IT87_REG_FAN_DIV);
/* Save fan min limit */
- min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
+ min = FAN_FROM_REG(data->fan[nr][1], DIV_FROM_REG(data->fan_div[nr]));
switch (nr) {
case 0:
@@ -818,8 +858,8 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
it87_write_value(data, IT87_REG_FAN_DIV, val);
/* Restore fan min limit */
- data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan_min[nr]);
+ data->fan[nr][1] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
+ it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan[nr][1]);
mutex_unlock(&data->update_lock);
return count;
@@ -843,8 +883,8 @@ static int check_trip_points(struct device *dev, int nr)
}
if (err) {
- dev_err(dev, "Inconsistent trip points, not switching to "
- "automatic mode\n");
+ dev_err(dev,
+ "Inconsistent trip points, not switching to automatic mode\n");
dev_err(dev, "Adjust the trip points and try again\n");
}
return err;
@@ -1092,118 +1132,106 @@ static ssize_t set_auto_temp(struct device *dev,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
-
-#define show_pwm_offset(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- show_pwm_enable, set_pwm_enable, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- show_pwm, set_pwm, offset - 1); \
-static DEVICE_ATTR(pwm##offset##_freq, \
- (offset == 1 ? S_IRUGO | S_IWUSR : S_IRUGO), \
- show_pwm_freq, (offset == 1 ? set_pwm_freq : NULL)); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_channels_temp, \
- S_IRUGO | S_IWUSR, show_pwm_temp_map, set_pwm_temp_map, \
- offset - 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 0); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 2); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_pwm, \
- S_IRUGO, show_auto_pwm, NULL, offset - 1, 3); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp_hyst, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 0); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 2); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 3); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 4);
-
-show_pwm_offset(1);
-show_pwm_offset(2);
-show_pwm_offset(3);
-
-/* A different set of callbacks for 16-bit fans */
-static ssize_t show_fan16(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN16_FROM_REG(data->fan[nr]));
-}
-
-static ssize_t show_fan16_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN16_FROM_REG(data->fan_min[nr]));
-}
-
-static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->fan_min[nr] = FAN16_TO_REG(val);
- it87_write_value(data, IT87_REG_FAN_MIN[nr],
- data->fan_min[nr] & 0xff);
- it87_write_value(data, IT87_REG_FANX_MIN[nr],
- data->fan_min[nr] >> 8);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-/*
- * We want to use the same sysfs file names as 8-bit fans, but we need
- * different variable names, so we have to use SENSOR_ATTR instead of
- * SENSOR_DEVICE_ATTR.
- */
-#define show_fan16_offset(offset) \
-static struct sensor_device_attribute sensor_dev_attr_fan##offset##_input16 \
- = SENSOR_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan16, NULL, offset - 1); \
-static struct sensor_device_attribute sensor_dev_attr_fan##offset##_min16 \
- = SENSOR_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan16_min, set_fan16_min, offset - 1)
-
-show_fan16_offset(1);
-show_fan16_offset(2);
-show_fan16_offset(3);
-show_fan16_offset(4);
-show_fan16_offset(5);
+static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 0, 1);
+static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 0);
+
+static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 1, 1);
+static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 1);
+
+static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 2, 1);
+static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 2);
+
+static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 3, 1);
+
+static SENSOR_DEVICE_ATTR_2(fan5_input, S_IRUGO, show_fan, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(fan5_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 4, 1);
+
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
+static DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 0, 3);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 3);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 4);
+
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
+static DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, NULL);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 1, 3);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 3);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 4);
+
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 2);
+static DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL);
+static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 4);
/* Alarms */
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
@@ -1471,6 +1499,12 @@ static const struct attribute_group it87_group_temp[3] = {
{ .attrs = it87_attributes_temp[2] },
};
+static struct attribute *it87_attributes_temp_offset[] = {
+ &sensor_dev_attr_temp1_offset.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ &sensor_dev_attr_temp3_offset.dev_attr.attr,
+};
+
static struct attribute *it87_attributes[] = {
&dev_attr_alarms.attr,
&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
@@ -1500,73 +1534,47 @@ static struct attribute *it87_attributes_temp_beep[] = {
&sensor_dev_attr_temp3_beep.dev_attr.attr,
};
-static struct attribute *it87_attributes_fan16[5][3+1] = { {
- &sensor_dev_attr_fan1_input16.dev_attr.attr,
- &sensor_dev_attr_fan1_min16.dev_attr.attr,
+static struct attribute *it87_attributes_fan[5][3+1] = { {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan2_input16.dev_attr.attr,
- &sensor_dev_attr_fan2_min16.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan3_input16.dev_attr.attr,
- &sensor_dev_attr_fan3_min16.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan4_input16.dev_attr.attr,
- &sensor_dev_attr_fan4_min16.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan5_input16.dev_attr.attr,
- &sensor_dev_attr_fan5_min16.dev_attr.attr,
+ &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
NULL
} };
-static const struct attribute_group it87_group_fan16[5] = {
- { .attrs = it87_attributes_fan16[0] },
- { .attrs = it87_attributes_fan16[1] },
- { .attrs = it87_attributes_fan16[2] },
- { .attrs = it87_attributes_fan16[3] },
- { .attrs = it87_attributes_fan16[4] },
+static const struct attribute_group it87_group_fan[5] = {
+ { .attrs = it87_attributes_fan[0] },
+ { .attrs = it87_attributes_fan[1] },
+ { .attrs = it87_attributes_fan[2] },
+ { .attrs = it87_attributes_fan[3] },
+ { .attrs = it87_attributes_fan[4] },
};
-static struct attribute *it87_attributes_fan[3][4+1] = { {
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_min.dev_attr.attr,
+static const struct attribute *it87_attributes_fan_div[] = {
&sensor_dev_attr_fan1_div.dev_attr.attr,
- &sensor_dev_attr_fan1_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
- &sensor_dev_attr_fan2_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_div.dev_attr.attr,
- &sensor_dev_attr_fan3_alarm.dev_attr.attr,
- NULL
-} };
-
-static const struct attribute_group it87_group_fan[3] = {
- { .attrs = it87_attributes_fan[0] },
- { .attrs = it87_attributes_fan[1] },
- { .attrs = it87_attributes_fan[2] },
};
-static const struct attribute_group *
-it87_get_fan_group(const struct it87_data *data)
-{
- return has_16bit_fans(data) ? it87_group_fan16 : it87_group_fan;
-}
-
static struct attribute *it87_attributes_pwm[3][4+1] = { {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
@@ -1925,7 +1933,6 @@ static void it87_remove_files(struct device *dev)
{
struct it87_data *data = platform_get_drvdata(pdev);
struct it87_sio_data *sio_data = dev->platform_data;
- const struct attribute_group *fan_group = it87_get_fan_group(data);
int i;
sysfs_remove_group(&dev->kobj, &it87_group);
@@ -1941,6 +1948,9 @@ static void it87_remove_files(struct device *dev)
if (!(data->has_temp & (1 << i)))
continue;
sysfs_remove_group(&dev->kobj, &it87_group_temp[i]);
+ if (has_temp_offset(data))
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_temp_offset[i]);
if (sio_data->beep_pin)
sysfs_remove_file(&dev->kobj,
it87_attributes_temp_beep[i]);
@@ -1948,10 +1958,13 @@ static void it87_remove_files(struct device *dev)
for (i = 0; i < 5; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- sysfs_remove_group(&dev->kobj, &fan_group[i]);
+ sysfs_remove_group(&dev->kobj, &it87_group_fan[i]);
if (sio_data->beep_pin)
sysfs_remove_file(&dev->kobj,
it87_attributes_fan_beep[i]);
+ if (i < 3 && !has_16bit_fans(data))
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_fan_div[i]);
}
for (i = 0; i < 3; i++) {
if (sio_data->skip_pwm & (1 << 0))
@@ -1972,21 +1985,9 @@ static int it87_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
struct it87_sio_data *sio_data = dev->platform_data;
- const struct attribute_group *fan_group;
int err = 0, i;
int enable_pwm_interface;
int fan_beep_need_rw;
- static const char * const names[] = {
- "it87",
- "it8712",
- "it8716",
- "it8718",
- "it8720",
- "it8721",
- "it8728",
- "it8782",
- "it8783",
- };
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
@@ -2003,8 +2004,31 @@ static int it87_probe(struct platform_device *pdev)
data->addr = res->start;
data->type = sio_data->type;
- data->revision = sio_data->revision;
- data->name = names[sio_data->type];
+ data->features = it87_devices[sio_data->type].features;
+ data->peci_mask = it87_devices[sio_data->type].peci_mask;
+ data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
+ data->name = it87_devices[sio_data->type].name;
+ /*
+ * IT8705F Datasheet 0.4.1, 3h == Version G.
+ * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
+ * These are the first revisions with 16-bit tachometer support.
+ */
+ switch (data->type) {
+ case it87:
+ if (sio_data->revision >= 0x03) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_16BIT_FANS;
+ }
+ break;
+ case it8712:
+ if (sio_data->revision >= 0x08) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_16BIT_FANS;
+ }
+ break;
+ default:
+ break;
+ }
/* Now, we do the remaining detection. */
if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
@@ -2068,6 +2092,12 @@ static int it87_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj, &it87_group_temp[i]);
if (err)
goto error;
+ if (has_temp_offset(data)) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_temp_offset[i]);
+ if (err)
+ goto error;
+ }
if (sio_data->beep_pin) {
err = sysfs_create_file(&dev->kobj,
it87_attributes_temp_beep[i]);
@@ -2077,15 +2107,21 @@ static int it87_probe(struct platform_device *pdev)
}
/* Do not create fan files for disabled fans */
- fan_group = it87_get_fan_group(data);
fan_beep_need_rw = 1;
for (i = 0; i < 5; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- err = sysfs_create_group(&dev->kobj, &fan_group[i]);
+ err = sysfs_create_group(&dev->kobj, &it87_group_fan[i]);
if (err)
goto error;
+ if (i < 3 && !has_16bit_fans(data)) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_fan_div[i]);
+ if (err)
+ goto error;
+ }
+
if (sio_data->beep_pin) {
err = sysfs_create_file(&dev->kobj,
it87_attributes_fan_beep[i]);
@@ -2221,8 +2257,8 @@ static int it87_check_pwm(struct device *dev)
* PWM interface).
*/
if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
- dev_info(dev, "Reconfiguring PWM to "
- "active high polarity\n");
+ dev_info(dev,
+ "Reconfiguring PWM to active high polarity\n");
it87_write_value(data, IT87_REG_FAN_CTL,
tmp | 0x87);
for (i = 0; i < 3; i++)
@@ -2232,16 +2268,16 @@ static int it87_check_pwm(struct device *dev)
return 1;
}
- dev_info(dev, "PWM configuration is "
- "too broken to be fixed\n");
+ dev_info(dev,
+ "PWM configuration is too broken to be fixed\n");
}
- dev_info(dev, "Detected broken BIOS "
- "defaults, disabling PWM interface\n");
+ dev_info(dev,
+ "Detected broken BIOS defaults, disabling PWM interface\n");
return 0;
} else if (fix_pwm_polarity) {
- dev_info(dev, "PWM configuration looks "
- "sane, won't touch\n");
+ dev_info(dev,
+ "PWM configuration looks sane, won't touch\n");
}
return 1;
@@ -2389,42 +2425,46 @@ static struct it87_data *it87_update_device(struct device *dev)
it87_read_value(data, IT87_REG_CONFIG) | 0x40);
}
for (i = 0; i <= 7; i++) {
- data->in[i] =
+ data->in[i][0] =
it87_read_value(data, IT87_REG_VIN(i));
- data->in_min[i] =
+ data->in[i][1] =
it87_read_value(data, IT87_REG_VIN_MIN(i));
- data->in_max[i] =
+ data->in[i][2] =
it87_read_value(data, IT87_REG_VIN_MAX(i));
}
/* in8 (battery) has no limit registers */
- data->in[8] = it87_read_value(data, IT87_REG_VIN(8));
+ data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
for (i = 0; i < 5; i++) {
/* Skip disabled fans */
if (!(data->has_fan & (1 << i)))
continue;
- data->fan_min[i] =
+ data->fan[i][1] =
it87_read_value(data, IT87_REG_FAN_MIN[i]);
- data->fan[i] = it87_read_value(data,
+ data->fan[i][0] = it87_read_value(data,
IT87_REG_FAN[i]);
/* Add high byte if in 16-bit mode */
if (has_16bit_fans(data)) {
- data->fan[i] |= it87_read_value(data,
+ data->fan[i][0] |= it87_read_value(data,
IT87_REG_FANX[i]) << 8;
- data->fan_min[i] |= it87_read_value(data,
+ data->fan[i][1] |= it87_read_value(data,
IT87_REG_FANX_MIN[i]) << 8;
}
}
for (i = 0; i < 3; i++) {
if (!(data->has_temp & (1 << i)))
continue;
- data->temp[i] =
+ data->temp[i][0] =
it87_read_value(data, IT87_REG_TEMP(i));
- data->temp_high[i] =
- it87_read_value(data, IT87_REG_TEMP_HIGH(i));
- data->temp_low[i] =
+ data->temp[i][1] =
it87_read_value(data, IT87_REG_TEMP_LOW(i));
+ data->temp[i][2] =
+ it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+ if (has_temp_offset(data))
+ data->temp[i][3] =
+ it87_read_value(data,
+ IT87_REG_TEMP_OFFSET[i]);
}
/* Newer chips don't have clock dividers */
@@ -2448,6 +2488,7 @@ static struct it87_data *it87_update_device(struct device *dev)
it87_update_pwm_ctrl(data, i);
data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+ data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
/*
* The IT8705F does not have VID capability.
* The IT8718F and later don't use IT87_REG_VID for the
@@ -2549,8 +2590,7 @@ static void __exit sm_it87_exit(void)
}
-MODULE_AUTHOR("Chris Gauthron, "
- "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Chris Gauthron, Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 8fa2632cbba..7272176a9ec 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
long temp;
short value;
+ s32 err;
int status = kstrtol(buf, 10, &temp);
if (status < 0)
@@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
/* Write value */
value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
(LM73_TEMP_MAX*4)) << 5;
- i2c_smbus_write_word_swapped(client, attr->index, value);
- return count;
+ err = i2c_smbus_write_word_swapped(client, attr->index, value);
+ return (err < 0) ? err : count;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
@@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct i2c_client *client = to_i2c_client(dev);
+ int temp;
+
+ s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+ if (err < 0)
+ return err;
+
/* use integer division instead of equivalent right shift to
guarantee arithmetic shift and preserve the sign */
- int temp = ((s16) (i2c_smbus_read_word_swapped(client,
- attr->index))*250) / 32;
- return sprintf(buf, "%d\n", temp);
+ temp = (((s16) err) * 250) / 32;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
}
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 149d44a7c58..6c6d440bb2d 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -130,7 +130,7 @@ static int twl4030_madc_hwmon_remove(struct platform_device *pdev)
static struct platform_driver twl4030_madc_hwmon_driver = {
.probe = twl4030_madc_hwmon_probe,
- .remove = __exit_p(twl4030_madc_hwmon_remove),
+ .remove = twl4030_madc_hwmon_remove,
.driver = {
.name = "twl4030_madc_hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
new file mode 100644
index 00000000000..59fd1268e58
--- /dev/null
+++ b/drivers/hwmon/vexpress.c
@@ -0,0 +1,229 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#define DRVNAME "vexpress-hwmon"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/vexpress.h>
+
+struct vexpress_hwmon_data {
+ struct device *hwmon_dev;
+ struct vexpress_config_func *func;
+};
+
+static ssize_t vexpress_hwmon_name_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ const char *compatible = of_get_property(dev->of_node, "compatible",
+ NULL);
+
+ return sprintf(buffer, "%s\n", compatible);
+}
+
+static ssize_t vexpress_hwmon_label_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ const char *label = of_get_property(dev->of_node, "label", NULL);
+
+ if (!label)
+ return -ENOENT;
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", label);
+}
+
+static ssize_t vexpress_hwmon_u32_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
+ int err;
+ u32 value;
+
+ err = vexpress_config_read(data->func, 0, &value);
+ if (err)
+ return err;
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", value /
+ to_sensor_dev_attr(dev_attr)->index);
+}
+
+static ssize_t vexpress_hwmon_u64_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
+ int err;
+ u32 value_hi, value_lo;
+
+ err = vexpress_config_read(data->func, 0, &value_lo);
+ if (err)
+ return err;
+
+ err = vexpress_config_read(data->func, 1, &value_hi);
+ if (err)
+ return err;
+
+ return snprintf(buffer, PAGE_SIZE, "%llu\n",
+ div_u64(((u64)value_hi << 32) | value_lo,
+ to_sensor_dev_attr(dev_attr)->index));
+}
+
+static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
+
+#define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr) \
+struct attribute *vexpress_hwmon_attrs_##_name[] = { \
+ &dev_attr_name.attr, \
+ &dev_attr_##_label_attr.attr, \
+ &sensor_dev_attr_##_input_attr.dev_attr.attr, \
+ NULL \
+}
+
+#if !defined(CONFIG_REGULATOR_VEXPRESS)
+static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1000);
+static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input);
+static struct attribute_group vexpress_hwmon_group_volt = {
+ .attrs = vexpress_hwmon_attrs_volt,
+};
+#endif
+
+static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1000);
+static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input);
+static struct attribute_group vexpress_hwmon_group_amp = {
+ .attrs = vexpress_hwmon_attrs_amp,
+};
+
+static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1000);
+static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input);
+static struct attribute_group vexpress_hwmon_group_temp = {
+ .attrs = vexpress_hwmon_attrs_temp,
+};
+
+static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1);
+static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input);
+static struct attribute_group vexpress_hwmon_group_power = {
+ .attrs = vexpress_hwmon_attrs_power,
+};
+
+static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
+ NULL, 1);
+static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input);
+static struct attribute_group vexpress_hwmon_group_energy = {
+ .attrs = vexpress_hwmon_attrs_energy,
+};
+
+static struct of_device_id vexpress_hwmon_of_match[] = {
+#if !defined(CONFIG_REGULATOR_VEXPRESS)
+ {
+ .compatible = "arm,vexpress-volt",
+ .data = &vexpress_hwmon_group_volt,
+ },
+#endif
+ {
+ .compatible = "arm,vexpress-amp",
+ .data = &vexpress_hwmon_group_amp,
+ }, {
+ .compatible = "arm,vexpress-temp",
+ .data = &vexpress_hwmon_group_temp,
+ }, {
+ .compatible = "arm,vexpress-power",
+ .data = &vexpress_hwmon_group_power,
+ }, {
+ .compatible = "arm,vexpress-energy",
+ .data = &vexpress_hwmon_group_energy,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match);
+
+static int vexpress_hwmon_probe(struct platform_device *pdev)
+{
+ int err;
+ const struct of_device_id *match;
+ struct vexpress_hwmon_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, data);
+
+ match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ data->func = vexpress_config_func_get_by_dev(&pdev->dev);
+ if (!data->func)
+ return -ENODEV;
+
+ err = sysfs_create_group(&pdev->dev.kobj, match->data);
+ if (err)
+ goto error;
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ sysfs_remove_group(&pdev->dev.kobj, match->data);
+ vexpress_config_func_put(data->func);
+ return err;
+}
+
+static int __devexit vexpress_hwmon_remove(struct platform_device *pdev)
+{
+ struct vexpress_hwmon_data *data = platform_get_drvdata(pdev);
+ const struct of_device_id *match;
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
+ sysfs_remove_group(&pdev->dev.kobj, match->data);
+
+ vexpress_config_func_put(data->func);
+
+ return 0;
+}
+
+static struct platform_driver vexpress_hwmon_driver = {
+ .probe = vexpress_hwmon_probe,
+ .remove = __devexit_p(vexpress_hwmon_remove),
+ .driver = {
+ .name = DRVNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vexpress_hwmon_of_match,
+ },
+};
+
+module_platform_driver(vexpress_hwmon_driver);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_DESCRIPTION("Versatile Express hwmon sensors driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:vexpress-hwmon");
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 55ac41c0556..0e8ffd6059a 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,7 +1,7 @@
/*
* w83627ehf - Driver for the hardware monitoring functionality of
* the Winbond W83627EHF Super-I/O chip
- * Copyright (C) 2005-2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012 Jean Delvare <khali@linux-fr.org>
* Copyright (C) 2006 Yuan Mu (Winbond),
* Rudolf Marek <r.marek@assembler.cz>
* David Hubbard <david.c.hubbard@gmail.com>
@@ -502,6 +502,13 @@ struct w83627ehf_data {
u16 have_temp_offset;
u8 in6_skip:1;
u8 temp3_val_only:1;
+
+#ifdef CONFIG_PM
+ /* Remember extra register values over suspend/resume */
+ u8 vbat;
+ u8 fandiv1;
+ u8 fandiv2;
+#endif
};
struct w83627ehf_sio_data {
@@ -898,6 +905,8 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->temp_max_hyst[i]
= w83627ehf_read_temp(data,
data->reg_temp_hyst[i]);
+ if (i > 2)
+ continue;
if (data->have_temp_offset & (1 << i))
data->temp_offset[i]
= w83627ehf_read_value(data,
@@ -2608,10 +2617,98 @@ static int w83627ehf_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int w83627ehf_suspend(struct device *dev)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ mutex_lock(&data->update_lock);
+ data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
+ if (sio_data->kind == nct6775) {
+ data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
+ data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static int w83627ehf_resume(struct device *dev)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ int i;
+
+ mutex_lock(&data->update_lock);
+ data->bank = 0xff; /* Force initial bank selection */
+
+ /* Restore limits */
+ for (i = 0; i < data->in_num; i++) {
+ if ((i == 6) && data->in6_skip)
+ continue;
+
+ w83627ehf_write_value(data, W83627EHF_REG_IN_MIN(i),
+ data->in_min[i]);
+ w83627ehf_write_value(data, W83627EHF_REG_IN_MAX(i),
+ data->in_max[i]);
+ }
+
+ for (i = 0; i < 5; i++) {
+ if (!(data->has_fan_min & (1 << i)))
+ continue;
+
+ w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+ data->fan_min[i]);
+ }
+
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+
+ if (data->reg_temp_over[i])
+ w83627ehf_write_temp(data, data->reg_temp_over[i],
+ data->temp_max[i]);
+ if (data->reg_temp_hyst[i])
+ w83627ehf_write_temp(data, data->reg_temp_hyst[i],
+ data->temp_max_hyst[i]);
+ if (i > 2)
+ continue;
+ if (data->have_temp_offset & (1 << i))
+ w83627ehf_write_value(data,
+ W83627EHF_REG_TEMP_OFFSET[i],
+ data->temp_offset[i]);
+ }
+
+ /* Restore other settings */
+ w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
+ if (sio_data->kind == nct6775) {
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
+ }
+
+ /* Force re-reading all values */
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops w83627ehf_dev_pm_ops = {
+ .suspend = w83627ehf_suspend,
+ .resume = w83627ehf_resume,
+};
+
+#define W83627EHF_DEV_PM_OPS (&w83627ehf_dev_pm_ops)
+#else
+#define W83627EHF_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver w83627ehf_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
+ .pm = W83627EHF_DEV_PM_OPS,
},
.probe = w83627ehf_probe,
.remove = w83627ehf_remove,
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 7f68b8309d1..81f486520ce 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -5,7 +5,7 @@
* Philip Edelbrock <phil@netroedge.com>,
* and Mark Studebaker <mdsxyz123@yahoo.com>
* Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org>
- * Copyright (c) 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 1012 Jean Delvare <khali@linux-fr.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -389,6 +389,12 @@ struct w83627hf_data {
*/
u8 vrm;
u8 vrm_ovt; /* Register value, 627THF/637HF/687THF only */
+
+#ifdef CONFIG_PM
+ /* Remember extra register values over suspend/resume */
+ u8 scfg1;
+ u8 scfg2;
+#endif
};
@@ -401,10 +407,77 @@ static void w83627hf_update_fan_div(struct w83627hf_data *data);
static struct w83627hf_data *w83627hf_update_device(struct device *dev);
static void w83627hf_init_device(struct platform_device *pdev);
+#ifdef CONFIG_PM
+static int w83627hf_suspend(struct device *dev)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+
+ mutex_lock(&data->update_lock);
+ data->scfg1 = w83627hf_read_value(data, W83781D_REG_SCFG1);
+ data->scfg2 = w83627hf_read_value(data, W83781D_REG_SCFG2);
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static int w83627hf_resume(struct device *dev)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ int i, num_temps = (data->type == w83697hf) ? 2 : 3;
+
+ /* Restore limits */
+ mutex_lock(&data->update_lock);
+ for (i = 0; i <= 8; i++) {
+ /* skip missing sensors */
+ if (((data->type == w83697hf) && (i == 1)) ||
+ ((data->type != w83627hf && data->type != w83697hf)
+ && (i == 5 || i == 6)))
+ continue;
+ w83627hf_write_value(data, W83781D_REG_IN_MAX(i),
+ data->in_max[i]);
+ w83627hf_write_value(data, W83781D_REG_IN_MIN(i),
+ data->in_min[i]);
+ }
+ for (i = 0; i <= 2; i++)
+ w83627hf_write_value(data, W83627HF_REG_FAN_MIN(i),
+ data->fan_min[i]);
+ for (i = 0; i < num_temps; i++) {
+ w83627hf_write_value(data, w83627hf_reg_temp_over[i],
+ data->temp_max[i]);
+ w83627hf_write_value(data, w83627hf_reg_temp_hyst[i],
+ data->temp_max_hyst[i]);
+ }
+
+ /* Fixup BIOS bugs */
+ if (data->type == w83627thf || data->type == w83637hf ||
+ data->type == w83687thf)
+ w83627hf_write_value(data, W83627THF_REG_VRM_OVT_CFG,
+ data->vrm_ovt);
+ w83627hf_write_value(data, W83781D_REG_SCFG1, data->scfg1);
+ w83627hf_write_value(data, W83781D_REG_SCFG2, data->scfg2);
+
+ /* Force re-reading all values */
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops w83627hf_dev_pm_ops = {
+ .suspend = w83627hf_suspend,
+ .resume = w83627hf_resume,
+};
+
+#define W83627HF_DEV_PM_OPS (&w83627hf_dev_pm_ops)
+#else
+#define W83627HF_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver w83627hf_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
+ .pm = W83627HF_DEV_PM_OPS,
},
.probe = w83627hf_probe,
.remove = w83627hf_remove,
@@ -1659,8 +1732,10 @@ static void w83627hf_init_device(struct platform_device *pdev)
/* Minimize conflicts with other winbond i2c-only clients... */
/* disable i2c subclients... how to disable main i2c client?? */
/* force i2c address to relatively uncommon address */
- w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
- w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ if (type == w83627hf) {
+ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
+ w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ }
/* Read VID only once */
if (type == w83627hf || type == w83637hf) {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e9df4612b7e..bdca5111eb9 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -337,6 +337,16 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
help
The unit of the TWI clock is kHz.
+config I2C_CBUS_GPIO
+ tristate "CBUS I2C driver"
+ depends on GENERIC_GPIO
+ help
+ Support for CBUS access using I2C API. Mostly relevant for Nokia
+ Internet Tablets (770, N800 and N810).
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-cbus-gpio.
+
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
depends on (CPM1 || CPM2) && OF_I2C
@@ -818,6 +828,16 @@ config I2C_TINY_USB
This driver can also be built as a module. If so, the module
will be called i2c-tiny-usb.
+config I2C_VIPERBOARD
+ tristate "Viperboard I2C master support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the I2C part of the Nano River
+ Technologies Viperboard as I2C master.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
+
comment "Other I2C/SMBus bus drivers"
config I2C_ACORN
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 395b516ffa0..6181f3ff263 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
+obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
@@ -79,6 +80,7 @@ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
+obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o
# Other I2C/SMBus bus drivers
obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 125cd8e0ad2..3f491815e2c 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -139,7 +139,7 @@ static unsigned short ali1535_offset;
Note the differences between kernels with the old PCI BIOS interface and
newer kernels with the real PCI interface. In compat.h some things are
defined to make the transition easier. */
-static int __devinit ali1535_setup(struct pci_dev *dev)
+static int ali1535_setup(struct pci_dev *dev)
{
int retval;
unsigned char temp;
@@ -502,7 +502,7 @@ static DEFINE_PCI_DEVICE_TABLE(ali1535_ids) = {
MODULE_DEVICE_TABLE(pci, ali1535_ids);
-static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
@@ -518,7 +518,7 @@ static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_
return i2c_add_adapter(&ali1535_adapter);
}
-static void __devexit ali1535_remove(struct pci_dev *dev)
+static void ali1535_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali1535_adapter);
release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
@@ -528,7 +528,7 @@ static struct pci_driver ali1535_driver = {
.name = "ali1535_smbus",
.id_table = ali1535_ids,
.probe = ali1535_probe,
- .remove = __devexit_p(ali1535_remove),
+ .remove = ali1535_remove,
};
module_pci_driver(ali1535_driver);
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index e02d9f86c6a..84ccd9496a5 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -326,7 +326,7 @@ static u32 ali1563_func(struct i2c_adapter * a)
}
-static int __devinit ali1563_setup(struct pci_dev * dev)
+static int ali1563_setup(struct pci_dev *dev)
{
u16 ctrl;
@@ -390,8 +390,8 @@ static struct i2c_adapter ali1563_adapter = {
.algo = &ali1563_algorithm,
};
-static int __devinit ali1563_probe(struct pci_dev * dev,
- const struct pci_device_id * id_table)
+static int ali1563_probe(struct pci_dev *dev,
+ const struct pci_device_id *id_table)
{
int error;
@@ -411,7 +411,7 @@ exit:
return error;
}
-static void __devexit ali1563_remove(struct pci_dev * dev)
+static void ali1563_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali1563_adapter);
ali1563_shutdown(dev);
@@ -428,7 +428,7 @@ static struct pci_driver ali1563_pci_driver = {
.name = "ali1563_smbus",
.id_table = ali1563_id_table,
.probe = ali1563_probe,
- .remove = __devexit_p(ali1563_remove),
+ .remove = ali1563_remove,
};
module_pci_driver(ali1563_pci_driver);
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index ce8d26d053a..26bcc6127ce 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -131,7 +131,7 @@ MODULE_PARM_DESC(force_addr,
static struct pci_driver ali15x3_driver;
static unsigned short ali15x3_smba;
-static int __devinit ali15x3_setup(struct pci_dev *ALI15X3_dev)
+static int ali15x3_setup(struct pci_dev *ALI15X3_dev)
{
u16 a;
unsigned char temp;
@@ -484,7 +484,7 @@ static DEFINE_PCI_DEVICE_TABLE(ali15x3_ids) = {
MODULE_DEVICE_TABLE (pci, ali15x3_ids);
-static int __devinit ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
@@ -500,7 +500,7 @@ static int __devinit ali15x3_probe(struct pci_dev *dev, const struct pci_device_
return i2c_add_adapter(&ali15x3_adapter);
}
-static void __devexit ali15x3_remove(struct pci_dev *dev)
+static void ali15x3_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali15x3_adapter);
release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
@@ -510,7 +510,7 @@ static struct pci_driver ali15x3_driver = {
.name = "ali15x3_smbus",
.id_table = ali15x3_ids,
.probe = ali15x3_probe,
- .remove = __devexit_p(ali15x3_remove),
+ .remove = ali15x3_remove,
};
module_pci_driver(ali15x3_driver);
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 304aa03b57b..e13e2aa2d05 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -324,8 +324,7 @@ static DEFINE_PCI_DEVICE_TABLE(amd756_ids) = {
MODULE_DEVICE_TABLE (pci, amd756_ids);
-static int __devinit amd756_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int amd756_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int nforce = (id->driver_data == NFORCE);
int error;
@@ -397,7 +396,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
return error;
}
-static void __devexit amd756_remove(struct pci_dev *dev)
+static void amd756_remove(struct pci_dev *dev)
{
i2c_del_adapter(&amd756_smbus);
release_region(amd756_ioport, SMB_IOSIZE);
@@ -407,7 +406,7 @@ static struct pci_driver amd756_driver = {
.name = "amd756_smbus",
.id_table = amd756_ids,
.probe = amd756_probe,
- .remove = __devexit_p(amd756_remove),
+ .remove = amd756_remove,
};
module_pci_driver(amd756_driver);
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 0919ac1d99a..a44e6e77c5a 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -422,8 +422,7 @@ static DEFINE_PCI_DEVICE_TABLE(amd8111_ids) = {
MODULE_DEVICE_TABLE (pci, amd8111_ids);
-static int __devinit amd8111_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int amd8111_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct amd_smbus *smbus;
int error;
@@ -475,7 +474,7 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
return error;
}
-static void __devexit amd8111_remove(struct pci_dev *dev)
+static void amd8111_remove(struct pci_dev *dev)
{
struct amd_smbus *smbus = pci_get_drvdata(dev);
@@ -488,7 +487,7 @@ static struct pci_driver amd8111_driver = {
.name = "amd8111_smbus2",
.id_table = amd8111_ids,
.probe = amd8111_probe,
- .remove = __devexit_p(amd8111_remove),
+ .remove = amd8111_remove,
};
module_pci_driver(amd8111_driver);
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index c02bf208084..2bfc04d0a1b 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -19,6 +19,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -29,9 +31,11 @@
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/platform_data/dma-atmel.h>
#define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
+#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
/* AT91 TWI register definitions */
#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -66,24 +70,39 @@
#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
struct at91_twi_pdata {
- unsigned clk_max_div;
- unsigned clk_offset;
- bool has_unre_flag;
+ unsigned clk_max_div;
+ unsigned clk_offset;
+ bool has_unre_flag;
+ bool has_dma_support;
+ struct at_dma_slave dma_slave;
+};
+
+struct at91_twi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sg;
+ struct dma_async_tx_descriptor *data_desc;
+ enum dma_data_direction direction;
+ bool buf_mapped;
+ bool xfer_in_progress;
};
struct at91_twi_dev {
- struct device *dev;
- void __iomem *base;
- struct completion cmd_complete;
- struct clk *clk;
- u8 *buf;
- size_t buf_len;
- struct i2c_msg *msg;
- int irq;
- unsigned transfer_status;
- struct i2c_adapter adapter;
- unsigned twi_cwgr_reg;
- struct at91_twi_pdata *pdata;
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct clk *clk;
+ u8 *buf;
+ size_t buf_len;
+ struct i2c_msg *msg;
+ int irq;
+ unsigned imr;
+ unsigned transfer_status;
+ struct i2c_adapter adapter;
+ unsigned twi_cwgr_reg;
+ struct at91_twi_pdata *pdata;
+ bool use_dma;
+ struct at91_twi_dma dma;
};
static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
@@ -102,6 +121,17 @@ static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
}
+static void at91_twi_irq_save(struct at91_twi_dev *dev)
+{
+ dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
+ at91_disable_twi_interrupts(dev);
+}
+
+static void at91_twi_irq_restore(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_IER, dev->imr);
+}
+
static void at91_init_twi_bus(struct at91_twi_dev *dev)
{
at91_disable_twi_interrupts(dev);
@@ -115,7 +145,7 @@ static void at91_init_twi_bus(struct at91_twi_dev *dev)
* Calculate symmetric clock as stated in datasheet:
* twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
*/
-static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
+static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
{
int ckdiv, cdiv, div;
struct at91_twi_pdata *pdata = dev->pdata;
@@ -138,6 +168,28 @@ static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
}
+static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
+{
+ struct at91_twi_dma *dma = &dev->dma;
+
+ at91_twi_irq_save(dev);
+
+ if (dma->xfer_in_progress) {
+ if (dma->direction == DMA_FROM_DEVICE)
+ dmaengine_terminate_all(dma->chan_rx);
+ else
+ dmaengine_terminate_all(dma->chan_tx);
+ dma->xfer_in_progress = false;
+ }
+ if (dma->buf_mapped) {
+ dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
+ dev->buf_len, dma->direction);
+ dma->buf_mapped = false;
+ }
+
+ at91_twi_irq_restore(dev);
+}
+
static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
{
if (dev->buf_len <= 0)
@@ -154,6 +206,60 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
++dev->buf;
}
+static void at91_twi_write_data_dma_callback(void *data)
+{
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ dev->buf_len, DMA_MEM_TO_DEV);
+
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+}
+
+static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
+{
+ dma_addr_t dma_addr;
+ struct dma_async_tx_descriptor *txdesc;
+ struct at91_twi_dma *dma = &dev->dma;
+ struct dma_chan *chan_tx = dma->chan_tx;
+
+ if (dev->buf_len <= 0)
+ return;
+
+ dma->direction = DMA_TO_DEVICE;
+
+ at91_twi_irq_save(dev);
+ dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, dma_addr)) {
+ dev_err(dev->dev, "dma map failed\n");
+ return;
+ }
+ dma->buf_mapped = true;
+ at91_twi_irq_restore(dev);
+ sg_dma_len(&dma->sg) = dev->buf_len;
+ sg_dma_address(&dma->sg) = dma_addr;
+
+ txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc) {
+ dev_err(dev->dev, "dma prep slave sg failed\n");
+ goto error;
+ }
+
+ txdesc->callback = at91_twi_write_data_dma_callback;
+ txdesc->callback_param = dev;
+
+ dma->xfer_in_progress = true;
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(chan_tx);
+
+ return;
+
+error:
+ at91_twi_dma_cleanup(dev);
+}
+
static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
{
if (dev->buf_len <= 0)
@@ -179,6 +285,61 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
++dev->buf;
}
+static void at91_twi_read_data_dma_callback(void *data)
+{
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ dev->buf_len, DMA_DEV_TO_MEM);
+
+ /* The last two bytes have to be read without using dma */
+ dev->buf += dev->buf_len - 2;
+ dev->buf_len = 2;
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
+}
+
+static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
+{
+ dma_addr_t dma_addr;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct at91_twi_dma *dma = &dev->dma;
+ struct dma_chan *chan_rx = dma->chan_rx;
+
+ dma->direction = DMA_FROM_DEVICE;
+
+ /* Keep in mind that we won't use dma to read the last two bytes */
+ at91_twi_irq_save(dev);
+ dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev->dev, dma_addr)) {
+ dev_err(dev->dev, "dma map failed\n");
+ return;
+ }
+ dma->buf_mapped = true;
+ at91_twi_irq_restore(dev);
+ dma->sg.dma_address = dma_addr;
+ sg_dma_len(&dma->sg) = dev->buf_len - 2;
+
+ rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc) {
+ dev_err(dev->dev, "dma prep slave sg failed\n");
+ goto error;
+ }
+
+ rxdesc->callback = at91_twi_read_data_dma_callback;
+ rxdesc->callback_param = dev;
+
+ dma->xfer_in_progress = true;
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dma->chan_rx);
+
+ return;
+
+error:
+ at91_twi_dma_cleanup(dev);
+}
+
static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
{
struct at91_twi_dev *dev = dev_id;
@@ -229,12 +390,36 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
start_flags |= AT91_TWI_STOP;
at91_twi_write(dev, AT91_TWI_CR, start_flags);
- at91_twi_write(dev, AT91_TWI_IER,
+ /*
+ * When using dma, the last byte has to be read manually in
+ * order to not send the stop command too late and then
+ * to receive extra data. In practice, there are some issues
+ * if you use the dma to read n-1 bytes because of latency.
+ * Reading n-2 bytes with dma and the two last ones manually
+ * seems to be the best solution.
+ */
+ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+ at91_twi_read_data_dma(dev);
+ /*
+ * It is important to enable TXCOMP irq here because
+ * doing it only when transferring the last two bytes
+ * will mask NACK errors since TXCOMP is set when a
+ * NACK occurs.
+ */
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXCOMP);
+ } else
+ at91_twi_write(dev, AT91_TWI_IER,
AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
} else {
- at91_twi_write_next_byte(dev);
- at91_twi_write(dev, AT91_TWI_IER,
- AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+ at91_twi_write_data_dma(dev);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ } else {
+ at91_twi_write_next_byte(dev);
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+ }
}
ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
@@ -242,23 +427,31 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
if (ret == 0) {
dev_err(dev->dev, "controller timed out\n");
at91_init_twi_bus(dev);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto error;
}
if (dev->transfer_status & AT91_TWI_NACK) {
dev_dbg(dev->dev, "received nack\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto error;
}
if (dev->transfer_status & AT91_TWI_OVRE) {
dev_err(dev->dev, "overrun while reading\n");
- return -EIO;
+ ret = -EIO;
+ goto error;
}
if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
dev_err(dev->dev, "underrun while writing\n");
- return -EIO;
+ ret = -EIO;
+ goto error;
}
dev_dbg(dev->dev, "transfer complete\n");
return 0;
+
+error:
+ at91_twi_dma_cleanup(dev);
+ return ret;
}
static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
@@ -329,36 +522,42 @@ static struct at91_twi_pdata at91rm9200_config = {
.clk_max_div = 5,
.clk_offset = 3,
.has_unre_flag = true,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9261_config = {
.clk_max_div = 5,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9260_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g20_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g10_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9x5_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = true,
};
static const struct platform_device_id at91_twi_devtypes[] = {
@@ -405,7 +604,91 @@ MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
#define atmel_twi_dt_ids NULL
#endif
-static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
+{
+ int ret = 0;
+ struct at_dma_slave *sdata;
+ struct dma_slave_config slave_config;
+ struct at91_twi_dma *dma = &dev->dma;
+
+ sdata = &dev->pdata->dma_slave;
+
+ memset(&slave_config, 0, sizeof(slave_config));
+ slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
+
+ if (sdata && sdata->dma_dev) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma->chan_tx = dma_request_channel(mask, filter, sdata);
+ if (!dma->chan_tx) {
+ dev_err(dev->dev, "no DMA channel available for tx\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ dma->chan_rx = dma_request_channel(mask, filter, sdata);
+ if (!dma->chan_rx) {
+ dev_err(dev->dev, "no DMA channel available for rx\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
+ dev_err(dev->dev, "failed to configure tx channel\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
+ dev_err(dev->dev, "failed to configure rx channel\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ sg_init_table(&dma->sg, 1);
+ dma->buf_mapped = false;
+ dma->xfer_in_progress = false;
+
+ dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
+
+ return ret;
+
+error:
+ dev_info(dev->dev, "can't use DMA\n");
+ if (dma->chan_rx)
+ dma_release_channel(dma->chan_rx);
+ if (dma->chan_tx)
+ dma_release_channel(dma->chan_tx);
+ return ret;
+}
+
+static struct at91_twi_pdata *at91_twi_get_driver_data(
struct platform_device *pdev)
{
if (pdev->dev.of_node) {
@@ -413,16 +696,17 @@ static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
if (!match)
return NULL;
- return match->data;
+ return (struct at91_twi_pdata *)match->data;
}
return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
}
-static int __devinit at91_twi_probe(struct platform_device *pdev)
+static int at91_twi_probe(struct platform_device *pdev)
{
struct at91_twi_dev *dev;
struct resource *mem;
int rc;
+ u32 phy_addr;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -433,6 +717,7 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -ENODEV;
+ phy_addr = mem->start;
dev->pdata = at91_twi_get_driver_data(pdev);
if (!dev->pdata)
@@ -462,6 +747,11 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
}
clk_prepare_enable(dev->clk);
+ if (dev->pdata->has_dma_support) {
+ if (at91_twi_configure_dma(dev, phy_addr) == 0)
+ dev->use_dma = true;
+ }
+
at91_calc_twi_clock(dev, TWI_CLK_HZ);
at91_init_twi_bus(dev);
@@ -489,7 +779,7 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit at91_twi_remove(struct platform_device *pdev)
+static int at91_twi_remove(struct platform_device *pdev)
{
struct at91_twi_dev *dev = platform_get_drvdata(pdev);
int rc;
@@ -530,7 +820,7 @@ static const struct dev_pm_ops at91_twi_pm = {
static struct platform_driver at91_twi_driver = {
.probe = at91_twi_probe,
- .remove = __devexit_p(at91_twi_remove),
+ .remove = at91_twi_remove,
.id_table = at91_twi_devtypes,
.driver = {
.name = "at91_i2c",
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 582d616db34..b278298787d 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -313,7 +313,7 @@ static void i2c_au1550_disable(struct i2c_au1550_data *priv)
* Prior to calling us, the 50MHz clock frequency and routing
* must have been set up for the PSC indicated by the adapter.
*/
-static int __devinit
+static int
i2c_au1550_probe(struct platform_device *pdev)
{
struct i2c_au1550_data *priv;
@@ -372,7 +372,7 @@ out:
return ret;
}
-static int __devexit i2c_au1550_remove(struct platform_device *pdev)
+static int i2c_au1550_remove(struct platform_device *pdev)
{
struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
@@ -423,7 +423,7 @@ static struct platform_driver au1xpsc_smbus_driver = {
.pm = AU1XPSC_SMBUS_PMOPS,
},
.probe = i2c_au1550_probe,
- .remove = __devexit_p(i2c_au1550_remove),
+ .remove = i2c_au1550_remove,
};
module_platform_driver(au1xpsc_smbus_driver);
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
new file mode 100644
index 00000000000..98386d65931
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -0,0 +1,300 @@
+/*
+ * CBUS I2C driver for Nokia Internet Tablets.
+ *
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Based on code written by Juha Yrjölä, David Weinehall, Mikko Ylinen and
+ * Felipe Balbi. Converted to I2C driver by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-cbus-gpio.h>
+
+/*
+ * Bit counts are derived from Nokia implementation. These should be checked
+ * if other CBUS implementations appear.
+ */
+#define CBUS_ADDR_BITS 3
+#define CBUS_REG_BITS 5
+
+struct cbus_host {
+ spinlock_t lock; /* host lock */
+ struct device *dev;
+ int clk_gpio;
+ int dat_gpio;
+ int sel_gpio;
+};
+
+/**
+ * cbus_send_bit - sends one bit over the bus
+ * @host: the host we're using
+ * @bit: one bit of information to send
+ */
+static void cbus_send_bit(struct cbus_host *host, unsigned bit)
+{
+ gpio_set_value(host->dat_gpio, bit ? 1 : 0);
+ gpio_set_value(host->clk_gpio, 1);
+ gpio_set_value(host->clk_gpio, 0);
+}
+
+/**
+ * cbus_send_data - sends @len amount of data over the bus
+ * @host: the host we're using
+ * @data: the data to send
+ * @len: size of the transfer
+ */
+static void cbus_send_data(struct cbus_host *host, unsigned data, unsigned len)
+{
+ int i;
+
+ for (i = len; i > 0; i--)
+ cbus_send_bit(host, data & (1 << (i - 1)));
+}
+
+/**
+ * cbus_receive_bit - receives one bit from the bus
+ * @host: the host we're using
+ */
+static int cbus_receive_bit(struct cbus_host *host)
+{
+ int ret;
+
+ gpio_set_value(host->clk_gpio, 1);
+ ret = gpio_get_value(host->dat_gpio);
+ gpio_set_value(host->clk_gpio, 0);
+ return ret;
+}
+
+/**
+ * cbus_receive_word - receives 16-bit word from the bus
+ * @host: the host we're using
+ */
+static int cbus_receive_word(struct cbus_host *host)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 16; i > 0; i--) {
+ int bit = cbus_receive_bit(host);
+
+ if (bit < 0)
+ return bit;
+
+ if (bit)
+ ret |= 1 << (i - 1);
+ }
+ return ret;
+}
+
+/**
+ * cbus_transfer - transfers data over the bus
+ * @host: the host we're using
+ * @rw: read/write flag
+ * @dev: device address
+ * @reg: register address
+ * @data: if @rw == I2C_SBUS_WRITE data to send otherwise 0
+ */
+static int cbus_transfer(struct cbus_host *host, char rw, unsigned dev,
+ unsigned reg, unsigned data)
+{
+ unsigned long flags;
+ int ret;
+
+ /* We don't want interrupts disturbing our transfer */
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Reset state and start of transfer, SEL stays down during transfer */
+ gpio_set_value(host->sel_gpio, 0);
+
+ /* Set the DAT pin to output */
+ gpio_direction_output(host->dat_gpio, 1);
+
+ /* Send the device address */
+ cbus_send_data(host, dev, CBUS_ADDR_BITS);
+
+ /* Send the rw flag */
+ cbus_send_bit(host, rw == I2C_SMBUS_READ);
+
+ /* Send the register address */
+ cbus_send_data(host, reg, CBUS_REG_BITS);
+
+ if (rw == I2C_SMBUS_WRITE) {
+ cbus_send_data(host, data, 16);
+ ret = 0;
+ } else {
+ ret = gpio_direction_input(host->dat_gpio);
+ if (ret) {
+ dev_dbg(host->dev, "failed setting direction\n");
+ goto out;
+ }
+ gpio_set_value(host->clk_gpio, 1);
+
+ ret = cbus_receive_word(host);
+ if (ret < 0) {
+ dev_dbg(host->dev, "failed receiving data\n");
+ goto out;
+ }
+ }
+
+ /* Indicate end of transfer, SEL goes up until next transfer */
+ gpio_set_value(host->sel_gpio, 1);
+ gpio_set_value(host->clk_gpio, 1);
+ gpio_set_value(host->clk_gpio, 0);
+
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+
+static int cbus_i2c_smbus_xfer(struct i2c_adapter *adapter,
+ u16 addr,
+ unsigned short flags,
+ char read_write,
+ u8 command,
+ int size,
+ union i2c_smbus_data *data)
+{
+ struct cbus_host *chost = i2c_get_adapdata(adapter);
+ int ret;
+
+ if (size != I2C_SMBUS_WORD_DATA)
+ return -EINVAL;
+
+ ret = cbus_transfer(chost, read_write == I2C_SMBUS_READ, addr,
+ command, data->word);
+ if (ret < 0)
+ return ret;
+
+ if (read_write == I2C_SMBUS_READ)
+ data->word = ret;
+
+ return 0;
+}
+
+static u32 cbus_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
+}
+
+static const struct i2c_algorithm cbus_i2c_algo = {
+ .smbus_xfer = cbus_i2c_smbus_xfer,
+ .functionality = cbus_i2c_func,
+};
+
+static int cbus_i2c_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+
+ return i2c_del_adapter(adapter);
+}
+
+static int cbus_i2c_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ struct cbus_host *chost;
+ int ret;
+
+ adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter),
+ GFP_KERNEL);
+ if (!adapter)
+ return -ENOMEM;
+
+ chost = devm_kzalloc(&pdev->dev, sizeof(*chost), GFP_KERNEL);
+ if (!chost)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ struct device_node *dnode = pdev->dev.of_node;
+ if (of_gpio_count(dnode) != 3)
+ return -ENODEV;
+ chost->clk_gpio = of_get_gpio(dnode, 0);
+ chost->dat_gpio = of_get_gpio(dnode, 1);
+ chost->sel_gpio = of_get_gpio(dnode, 2);
+ } else if (pdev->dev.platform_data) {
+ struct i2c_cbus_platform_data *pdata = pdev->dev.platform_data;
+ chost->clk_gpio = pdata->clk_gpio;
+ chost->dat_gpio = pdata->dat_gpio;
+ chost->sel_gpio = pdata->sel_gpio;
+ } else {
+ return -ENODEV;
+ }
+
+ adapter->owner = THIS_MODULE;
+ adapter->class = I2C_CLASS_HWMON;
+ adapter->dev.parent = &pdev->dev;
+ adapter->nr = pdev->id;
+ adapter->timeout = HZ;
+ adapter->algo = &cbus_i2c_algo;
+ strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
+
+ spin_lock_init(&chost->lock);
+ chost->dev = &pdev->dev;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->clk_gpio,
+ GPIOF_OUT_INIT_LOW, "CBUS clk");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->dat_gpio, GPIOF_IN,
+ "CBUS data");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->sel_gpio,
+ GPIOF_OUT_INIT_HIGH, "CBUS sel");
+ if (ret)
+ return ret;
+
+ i2c_set_adapdata(adapter, chost);
+ platform_set_drvdata(pdev, adapter);
+
+ return i2c_add_numbered_adapter(adapter);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id i2c_cbus_dt_ids[] = {
+ { .compatible = "i2c-cbus-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids);
+#endif
+
+static struct platform_driver cbus_i2c_driver = {
+ .probe = cbus_i2c_probe,
+ .remove = cbus_i2c_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "i2c-cbus-gpio",
+ },
+};
+module_platform_driver(cbus_i2c_driver);
+
+MODULE_ALIAS("platform:i2c-cbus-gpio");
+MODULE_DESCRIPTION("CBUS I2C driver");
+MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("David Weinehall");
+MODULE_AUTHOR("Mikko Ylinen");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index c1e1096ba06..2e79c102419 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -426,7 +426,7 @@ static const struct i2c_adapter cpm_ops = {
.algo = &cpm_i2c_algo,
};
-static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
+static int cpm_i2c_setup(struct cpm_i2c *cpm)
{
struct platform_device *ofdev = cpm->ofdev;
const u32 *data;
@@ -634,7 +634,7 @@ static void cpm_i2c_shutdown(struct cpm_i2c *cpm)
cpm_muram_free(cpm->i2c_addr);
}
-static int __devinit cpm_i2c_probe(struct platform_device *ofdev)
+static int cpm_i2c_probe(struct platform_device *ofdev)
{
int result, len;
struct cpm_i2c *cpm;
@@ -688,7 +688,7 @@ out_free:
return result;
}
-static int __devexit cpm_i2c_remove(struct platform_device *ofdev)
+static int cpm_i2c_remove(struct platform_device *ofdev)
{
struct cpm_i2c *cpm = dev_get_drvdata(&ofdev->dev);
@@ -716,7 +716,7 @@ MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct platform_driver cpm_i2c_driver = {
.probe = cpm_i2c_probe,
- .remove = __devexit_p(cpm_i2c_remove),
+ .remove = cpm_i2c_remove,
.driver = {
.name = "fsl-i2c-cpm",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 92a1e2c15ba..6add851e9de 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -207,7 +207,7 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return dev->controller->clk_khz;
}
-static int __devinit i2c_dw_pci_probe(struct pci_dev *pdev,
+static int i2c_dw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct dw_i2c_dev *dev;
@@ -328,7 +328,7 @@ exit:
return r;
}
-static void __devexit i2c_dw_pci_remove(struct pci_dev *pdev)
+static void i2c_dw_pci_remove(struct pci_dev *pdev)
{
struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
@@ -368,7 +368,7 @@ static struct pci_driver dw_i2c_driver = {
.name = DRIVER_NAME,
.id_table = i2_designware_pci_ids,
.probe = i2c_dw_pci_probe,
- .remove = __devexit_p(i2c_dw_pci_remove),
+ .remove = i2c_dw_pci_remove,
.driver = {
.pm = &i2c_dw_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0506fef8dc0..343357a2b5b 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -50,7 +50,7 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return clk_get_rate(dev->clk)/1000;
}
-static int __devinit dw_i2c_probe(struct platform_device *pdev)
+static int dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
@@ -169,7 +169,7 @@ err_release_region:
return r;
}
-static int __devexit dw_i2c_remove(struct platform_device *pdev)
+static int dw_i2c_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
@@ -228,7 +228,7 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
MODULE_ALIAS("platform:i2c_designware");
static struct platform_driver dw_i2c_driver = {
- .remove = __devexit_p(dw_i2c_remove),
+ .remove = dw_i2c_remove,
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 259f7697bf2..5e7886e7136 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -758,7 +758,7 @@ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK);
}
-static int __devinit pch_i2c_probe(struct pci_dev *pdev,
+static int pch_i2c_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *base_addr;
@@ -851,7 +851,7 @@ err_pci_enable:
return ret;
}
-static void __devexit pch_i2c_remove(struct pci_dev *pdev)
+static void pch_i2c_remove(struct pci_dev *pdev)
{
int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
@@ -948,7 +948,7 @@ static struct pci_driver pch_pcidriver = {
.name = KBUILD_MODNAME,
.id_table = pch_pcidev_id,
.probe = pch_i2c_probe,
- .remove = __devexit_p(pch_i2c_remove),
+ .remove = pch_i2c_remove,
.suspend = pch_i2c_suspend,
.resume = pch_i2c_resume
};
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 37e2e82a9c8..485497066ed 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -205,7 +205,7 @@ static struct i2c_adapter pcf_isa_ops = {
.name = "i2c-elektor",
};
-static int __devinit elektor_match(struct device *dev, unsigned int id)
+static int elektor_match(struct device *dev, unsigned int id)
{
#ifdef __alpha__
/* check to see we have memory mapped PCF8584 connected to the
@@ -264,7 +264,7 @@ static int __devinit elektor_match(struct device *dev, unsigned int id)
return 1;
}
-static int __devinit elektor_probe(struct device *dev, unsigned int id)
+static int elektor_probe(struct device *dev, unsigned int id)
{
init_waitqueue_head(&pcf_wait);
if (pcf_isa_init())
@@ -293,7 +293,7 @@ static int __devinit elektor_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int __devexit elektor_remove(struct device *dev, unsigned int id)
+static int elektor_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pcf_isa_ops);
@@ -316,7 +316,7 @@ static int __devexit elektor_remove(struct device *dev, unsigned int id)
static struct isa_driver i2c_elektor_driver = {
.match = elektor_match,
.probe = elektor_probe,
- .remove = __devexit_p(elektor_remove),
+ .remove = elektor_remove,
.driver = {
.owner = THIS_MODULE,
.name = "i2c-elektor",
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index e62d2d93862..f3fa4332bbd 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -85,7 +85,7 @@ static int i2c_gpio_getscl(void *data)
return gpio_get_value(pdata->scl_pin);
}
-static int __devinit of_i2c_gpio_probe(struct device_node *np,
+static int of_i2c_gpio_probe(struct device_node *np,
struct i2c_gpio_platform_data *pdata)
{
u32 reg;
@@ -117,7 +117,7 @@ static int __devinit of_i2c_gpio_probe(struct device_node *np,
return 0;
}
-static int __devinit i2c_gpio_probe(struct platform_device *pdev)
+static int i2c_gpio_probe(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
@@ -184,7 +184,11 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
bit_data->data = pdata;
adap->owner = THIS_MODULE;
- snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
+ if (pdev->dev.of_node)
+ strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ else
+ snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
+
adap->algo_data = bit_data;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->dev.parent = &pdev->dev;
@@ -214,7 +218,7 @@ err_request_sda:
return ret;
}
-static int __devexit i2c_gpio_remove(struct platform_device *pdev)
+static int i2c_gpio_remove(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
@@ -247,7 +251,7 @@ static struct platform_driver i2c_gpio_driver = {
.of_match_table = of_match_ptr(i2c_gpio_dt_ids),
},
.probe = i2c_gpio_probe,
- .remove = __devexit_p(i2c_gpio_remove),
+ .remove = i2c_gpio_remove,
};
static int __init i2c_gpio_init(void)
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 19515df6102..3351cc7ed11 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -356,7 +356,7 @@ static const struct i2c_algorithm highlander_i2c_algo = {
.functionality = highlander_i2c_func,
};
-static int __devinit highlander_i2c_probe(struct platform_device *pdev)
+static int highlander_i2c_probe(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev;
struct i2c_adapter *adap;
@@ -441,7 +441,7 @@ err:
return ret;
}
-static int __devexit highlander_i2c_remove(struct platform_device *pdev)
+static int highlander_i2c_remove(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev = platform_get_drvdata(pdev);
@@ -465,7 +465,7 @@ static struct platform_driver highlander_i2c_driver = {
},
.probe = highlander_i2c_probe,
- .remove = __devexit_p(highlander_i2c_remove),
+ .remove = highlander_i2c_remove,
};
module_platform_driver(highlander_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c9f95e1666a..79c3d9069a4 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -112,7 +112,7 @@ static DEFINE_PCI_DEVICE_TABLE(hydra_ids) = {
MODULE_DEVICE_TABLE (pci, hydra_ids);
-static int __devinit hydra_probe(struct pci_dev *dev,
+static int hydra_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned long base = pci_resource_start(dev, 0);
@@ -139,7 +139,7 @@ static int __devinit hydra_probe(struct pci_dev *dev,
return 0;
}
-static void __devexit hydra_remove(struct pci_dev *dev)
+static void hydra_remove(struct pci_dev *dev)
{
pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */
i2c_del_adapter(&hydra_adap);
@@ -153,7 +153,7 @@ static struct pci_driver hydra_driver = {
.name = "hydra_smbus",
.id_table = hydra_ids,
.probe = hydra_probe,
- .remove = __devexit_p(hydra_remove),
+ .remove = hydra_remove,
};
module_pci_driver(hydra_driver);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 6abc00d5988..3092387f6ef 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -81,6 +81,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/err.h>
+#include <linux/of_i2c.h>
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
defined CONFIG_DMI
@@ -840,14 +841,14 @@ struct dmi_onboard_device_info {
const char *i2c_type;
};
-static struct dmi_onboard_device_info __devinitdata dmi_devices[] = {
+static const struct dmi_onboard_device_info dmi_devices[] = {
{ "Syleus", DMI_DEV_TYPE_OTHER, 0x73, "fscsyl" },
{ "Hermes", DMI_DEV_TYPE_OTHER, 0x73, "fscher" },
{ "Hades", DMI_DEV_TYPE_OTHER, 0x73, "fschds" },
};
-static void __devinit dmi_check_onboard_device(u8 type, const char *name,
- struct i2c_adapter *adap)
+static void dmi_check_onboard_device(u8 type, const char *name,
+ struct i2c_adapter *adap)
{
int i;
struct i2c_board_info info;
@@ -870,8 +871,7 @@ static void __devinit dmi_check_onboard_device(u8 type, const char *name,
/* We use our own function to check for onboard devices instead of
dmi_find_device() as some buggy BIOS's have the devices we are interested
in marked as disabled */
-static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
- void *adap)
+static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
{
int i, count;
@@ -900,7 +900,7 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
}
/* Register optional slaves */
-static void __devinit i801_probe_optional_slaves(struct i801_priv *priv)
+static void i801_probe_optional_slaves(struct i801_priv *priv)
{
/* Only register slaves on main SMBus channel */
if (priv->features & FEATURE_IDF)
@@ -920,7 +920,7 @@ static void __devinit i801_probe_optional_slaves(struct i801_priv *priv)
}
#else
static void __init input_apanel_init(void) {}
-static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
+static void i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
@@ -943,7 +943,7 @@ static struct i801_mux_config i801_mux_config_asus_z8_d18 = {
.n_gpios = 2,
};
-static struct dmi_system_id __devinitdata mux_dmi_table[] = {
+static const struct dmi_system_id mux_dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
@@ -1011,7 +1011,7 @@ static struct dmi_system_id __devinitdata mux_dmi_table[] = {
};
/* Setup multiplexing if needed */
-static int __devinit i801_add_mux(struct i801_priv *priv)
+static int i801_add_mux(struct i801_priv *priv)
{
struct device *dev = &priv->adapter.dev;
const struct i801_mux_config *mux_config;
@@ -1047,13 +1047,13 @@ static int __devinit i801_add_mux(struct i801_priv *priv)
return 0;
}
-static void __devexit i801_del_mux(struct i801_priv *priv)
+static void i801_del_mux(struct i801_priv *priv)
{
if (priv->mux_pdev)
platform_device_unregister(priv->mux_pdev);
}
-static unsigned int __devinit i801_get_adapter_class(struct i801_priv *priv)
+static unsigned int i801_get_adapter_class(struct i801_priv *priv)
{
const struct dmi_system_id *id;
const struct i801_mux_config *mux_config;
@@ -1083,8 +1083,7 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
}
#endif
-static int __devinit i801_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
int err, i;
@@ -1108,6 +1107,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
/* fall through */
default:
priv->features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_IRQ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
priv->features |= FEATURE_SMBUS_PEC;
@@ -1120,16 +1120,6 @@ static int __devinit i801_probe(struct pci_dev *dev,
break;
}
- /* IRQ processing tested on CougarPoint PCH, ICH5, ICH7-M and ICH10 */
- if (dev->device == PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS ||
- dev->device == PCI_DEVICE_ID_INTEL_82801EB_3 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH7_17 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH8_5 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH9_6 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH10_4 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH10_5)
- priv->features |= FEATURE_IRQ;
-
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
if (priv->features & disable_features & (1 << i))
@@ -1215,6 +1205,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
goto exit_free_irq;
}
+ of_i2c_register_devices(&priv->adapter);
i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
@@ -1233,7 +1224,7 @@ exit:
return err;
}
-static void __devexit i801_remove(struct pci_dev *dev)
+static void i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
@@ -1279,7 +1270,7 @@ static struct pci_driver i801_driver = {
.name = "i801_smbus",
.id_table = i801_ids,
.probe = i801_probe,
- .remove = __devexit_p(i801_remove),
+ .remove = i801_remove,
.suspend = i801_suspend,
.resume = i801_resume,
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 806e225f3de..33a2abb6c06 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -660,7 +660,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
return (u8)((opb + 9) / 10 - 1);
}
-static int __devinit iic_request_irq(struct platform_device *ofdev,
+static int iic_request_irq(struct platform_device *ofdev,
struct ibm_iic_private *dev)
{
struct device_node *np = ofdev->dev.of_node;
@@ -691,7 +691,7 @@ static int __devinit iic_request_irq(struct platform_device *ofdev,
/*
* Register single IIC interface
*/
-static int __devinit iic_probe(struct platform_device *ofdev)
+static int iic_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct ibm_iic_private *dev;
@@ -781,7 +781,7 @@ error_cleanup:
/*
* Cleanup initialized IIC interface
*/
-static int __devexit iic_remove(struct platform_device *ofdev)
+static int iic_remove(struct platform_device *ofdev)
{
struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
@@ -812,7 +812,7 @@ static struct platform_driver ibm_iic_driver = {
.of_match_table = ibm_iic_match,
},
.probe = iic_probe,
- .remove = __devexit_p(iic_remove),
+ .remove = iic_remove,
};
module_platform_driver(ibm_iic_driver);
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index 7c28f10f95c..de3736bf646 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -947,7 +947,7 @@ static const struct dev_pm_ops intel_mid_i2c_pm_ops = {
* 5. Call intel_mid_i2c_hwinit() for hardware initialization
* 6. Register I2C adapter in i2c-core
*/
-static int __devinit intel_mid_i2c_probe(struct pci_dev *dev,
+static int intel_mid_i2c_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct intel_mid_i2c_private *mrst;
@@ -1079,7 +1079,7 @@ exit:
return err;
}
-static void __devexit intel_mid_i2c_remove(struct pci_dev *dev)
+static void intel_mid_i2c_remove(struct pci_dev *dev)
{
struct intel_mid_i2c_private *mrst = pci_get_drvdata(dev);
intel_mid_i2c_disable(&mrst->adap);
@@ -1113,7 +1113,7 @@ static struct pci_driver intel_mid_i2c_driver = {
.name = DRIVER_NAME,
.id_table = intel_mid_i2c_ids,
.probe = intel_mid_i2c_probe,
- .remove = __devexit_p(intel_mid_i2c_remove),
+ .remove = intel_mid_i2c_remove,
};
module_pci_driver(intel_mid_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index f90a6057508..4099f79c228 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -249,7 +249,7 @@ static struct i2c_adapter sch_adapter = {
.algo = &smbus_algorithm,
};
-static int __devinit smbus_sch_probe(struct platform_device *dev)
+static int smbus_sch_probe(struct platform_device *dev)
{
struct resource *res;
int retval;
@@ -284,7 +284,7 @@ static int __devinit smbus_sch_probe(struct platform_device *dev)
return retval;
}
-static int __devexit smbus_sch_remove(struct platform_device *pdev)
+static int smbus_sch_remove(struct platform_device *pdev)
{
struct resource *res;
if (sch_smba) {
@@ -303,7 +303,7 @@ static struct platform_driver smbus_sch_driver = {
.owner = THIS_MODULE,
},
.probe = smbus_sch_probe,
- .remove = __devexit_p(smbus_sch_remove),
+ .remove = smbus_sch_remove,
};
module_platform_driver(smbus_sch_driver);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index ca86430cb4a..a69459e5c3f 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -175,7 +175,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
}
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
-static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = {
+static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
{28, 0x24}, {30, 0x01}, {32, 0x25}, {34, 0x02},
{36, 0x26}, {40, 0x27}, {44, 0x04}, {48, 0x28},
@@ -196,7 +196,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = {
{10240, 0x9d}, {12288, 0x9e}, {15360, 0x9f}
};
-static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
+static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
int prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
@@ -230,7 +230,7 @@ static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
return (int)div->fdr;
}
-static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
+static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -252,7 +252,7 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
fdr);
}
#else /* !(CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x) */
-static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
+static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -260,7 +260,7 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
#endif /* CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x */
#ifdef CONFIG_PPC_MPC512x
-static void __devinit mpc_i2c_setup_512x(struct device_node *node,
+static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -288,7 +288,7 @@ static void __devinit mpc_i2c_setup_512x(struct device_node *node,
mpc_i2c_setup_52xx(node, i2c, clock, prescaler);
}
#else /* CONFIG_PPC_MPC512x */
-static void __devinit mpc_i2c_setup_512x(struct device_node *node,
+static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -296,7 +296,7 @@ static void __devinit mpc_i2c_setup_512x(struct device_node *node,
#endif /* CONFIG_PPC_MPC512x */
#ifdef CONFIG_FSL_SOC
-static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] __devinitconst = {
+static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
{160, 0x0120}, {192, 0x0121}, {224, 0x0122}, {256, 0x0123},
{288, 0x0100}, {320, 0x0101}, {352, 0x0601}, {384, 0x0102},
{416, 0x0602}, {448, 0x0126}, {480, 0x0103}, {512, 0x0127},
@@ -316,7 +316,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] __devinitconst = {
{49152, 0x011e}, {61440, 0x011f}
};
-static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void)
+static u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
struct device_node *node = NULL;
u32 __iomem *reg;
@@ -345,7 +345,7 @@ static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void)
return val;
}
-static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
+static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
u32 prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
@@ -383,7 +383,7 @@ static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
return div ? (int)div->fdr : -EINVAL;
}
-static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
+static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -408,7 +408,7 @@ static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
}
#else /* !CONFIG_FSL_SOC */
-static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
+static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -615,7 +615,7 @@ static struct i2c_adapter mpc_ops = {
};
static const struct of_device_id mpc_i2c_of_match[];
-static int __devinit fsl_i2c_probe(struct platform_device *op)
+static int fsl_i2c_probe(struct platform_device *op)
{
const struct of_device_id *match;
struct mpc_i2c *i2c;
@@ -706,7 +706,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op)
return result;
};
-static int __devexit fsl_i2c_remove(struct platform_device *op)
+static int fsl_i2c_remove(struct platform_device *op)
{
struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
@@ -746,24 +746,24 @@ static int mpc_i2c_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
#endif
-static const struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_512x = {
.setup = mpc_i2c_setup_512x,
};
-static const struct mpc_i2c_data mpc_i2c_data_52xx __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_52xx = {
.setup = mpc_i2c_setup_52xx,
};
-static const struct mpc_i2c_data mpc_i2c_data_8313 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8313 = {
.setup = mpc_i2c_setup_8xxx,
};
-static const struct mpc_i2c_data mpc_i2c_data_8543 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8543 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 2,
};
-static const struct mpc_i2c_data mpc_i2c_data_8544 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8544 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 3,
};
@@ -785,7 +785,7 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
- .remove = __devexit_p(fsl_i2c_remove),
+ .remove = fsl_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 2e9d56719e9..8b20ef8524a 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -495,7 +495,7 @@ static const struct i2c_algorithm mv64xxx_i2c_algo = {
*
*****************************************************************************
*/
-static int __devinit
+static int
mv64xxx_i2c_map_regs(struct platform_device *pd,
struct mv64xxx_i2c_data *drv_data)
{
@@ -530,13 +530,13 @@ mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
}
#ifdef CONFIG_OF
-static int __devinit
+static int
mv64xxx_calc_freq(const int tclk, const int n, const int m)
{
return tclk / (10 * (m + 1) * (2 << n));
}
-static bool __devinit
+static bool
mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
u32 *best_m)
{
@@ -560,7 +560,7 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
return true;
}
-static int __devinit
+static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device_node *np)
{
@@ -597,7 +597,7 @@ out:
#endif
}
#else /* CONFIG_OF */
-static int __devinit
+static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device_node *np)
{
@@ -605,7 +605,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
}
#endif /* CONFIG_OF */
-static int __devinit
+static int
mv64xxx_i2c_probe(struct platform_device *pd)
{
struct mv64xxx_i2c_data *drv_data;
@@ -697,7 +697,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
return rc;
}
-static int __devexit
+static int
mv64xxx_i2c_remove(struct platform_device *dev)
{
struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev);
@@ -718,7 +718,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
return rc;
}
-static const struct of_device_id mv64xxx_i2c_of_match_table[] __devinitdata = {
+static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
{ .compatible = "marvell,mv64xxx-i2c", },
{}
};
@@ -726,7 +726,7 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
- .remove = __devexit_p(mv64xxx_i2c_remove),
+ .remove = mv64xxx_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = MV64XXX_I2C_CTLR_NAME,
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 0670da79ee5..1b1a936eccc 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -359,7 +359,7 @@ static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 mxs_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
@@ -432,7 +432,7 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
return 0;
}
-static int __devinit mxs_i2c_probe(struct platform_device *pdev)
+static int mxs_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxs_i2c_dev *i2c;
@@ -515,7 +515,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit mxs_i2c_remove(struct platform_device *pdev)
+static int mxs_i2c_remove(struct platform_device *pdev)
{
struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
int ret;
@@ -546,7 +546,7 @@ static struct platform_driver mxs_i2c_driver = {
.owner = THIS_MODULE,
.of_match_table = mxs_i2c_dt_ids,
},
- .remove = __devexit_p(mxs_i2c_remove),
+ .remove = mxs_i2c_remove,
};
static int __init mxs_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 392303b4be0..adac8542771 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -117,7 +117,7 @@ struct nforce2_smbus {
#define MAX_TIMEOUT 100
/* We disable the second SMBus channel on these boards */
-static struct dmi_system_id __devinitdata nforce2_dmi_blacklist2[] = {
+static const struct dmi_system_id nforce2_dmi_blacklist2[] = {
{
.ident = "DFI Lanparty NF4 Expert",
.matches = {
@@ -330,8 +330,8 @@ static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
MODULE_DEVICE_TABLE (pci, nforce2_ids);
-static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
- int alt_reg, struct nforce2_smbus *smbus, const char *name)
+static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
+ struct nforce2_smbus *smbus, const char *name)
{
int error;
@@ -382,7 +382,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
}
-static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct nforce2_smbus *smbuses;
int res1, res2;
@@ -430,7 +430,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
}
-static void __devexit nforce2_remove(struct pci_dev *dev)
+static void nforce2_remove(struct pci_dev *dev)
{
struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
@@ -450,7 +450,7 @@ static struct pci_driver nforce2_driver = {
.name = "nForce2_smbus",
.id_table = nforce2_ids,
.probe = nforce2_probe,
- .remove = __devexit_p(nforce2_remove),
+ .remove = nforce2_remove,
};
module_pci_driver(nforce2_driver);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 02c3115a2df..8b2ffcf4532 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -435,13 +435,6 @@ static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
- if (timeout < 0) {
- dev_err(&dev->adev->dev,
- "wait_for_completion_timeout "
- "returned %d waiting for event\n", timeout);
- status = timeout;
- }
-
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
@@ -523,13 +516,6 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
- if (timeout < 0) {
- dev_err(&dev->adev->dev,
- "wait_for_completion_timeout "
- "returned %d waiting for event\n", timeout);
- status = timeout;
- }
-
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index f41502ef3f5..865ee350adb 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -304,7 +304,7 @@ retry_write:
case STATE_READ:
/* we have a byte of data in the data register, do
- * something with it, and then work out wether we are
+ * something with it, and then work out whether we are
* going to do any more read/write
*/
@@ -518,7 +518,7 @@ static const struct i2c_algorithm nuc900_i2c_algorithm = {
* called by the bus driver when a suitable device is found
*/
-static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
+static int nuc900_i2c_probe(struct platform_device *pdev)
{
struct nuc900_i2c *i2c;
struct nuc900_platform_i2c *pdata;
@@ -663,7 +663,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
* called when device is removed from the bus
*/
-static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
+static int nuc900_i2c_remove(struct platform_device *pdev)
{
struct nuc900_i2c *i2c = platform_get_drvdata(pdev);
@@ -684,7 +684,7 @@ static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
static struct platform_driver nuc900_i2c_driver = {
.probe = nuc900_i2c_probe,
- .remove = __devexit_p(nuc900_i2c_remove),
+ .remove = nuc900_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = "nuc900-i2c0",
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 15da1ac7cf9..a873d0ad1ac 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -4,6 +4,9 @@
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
+ * Support for the GRLIB port of the controller by
+ * Andreas Larsson <andreas@gaisler.com>
+ *
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
@@ -34,6 +37,8 @@ struct ocores_i2c {
int nmsgs;
int state; /* see STATE_ */
int clock_khz;
+ void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
+ u8 (*getreg)(struct ocores_i2c *i2c, int reg);
};
/* registers */
@@ -67,24 +72,47 @@ struct ocores_i2c {
#define STATE_READ 3
#define STATE_ERROR 4
+#define TYPE_OCORES 0
+#define TYPE_GRLIB 1
+
+static void oc_setreg_8(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite8(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static void oc_setreg_16(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite16(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static void oc_setreg_32(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite32(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_8(struct ocores_i2c *i2c, int reg)
+{
+ return ioread8(i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_16(struct ocores_i2c *i2c, int reg)
+{
+ return ioread16(i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_32(struct ocores_i2c *i2c, int reg)
+{
+ return ioread32(i2c->base + (reg << i2c->reg_shift));
+}
+
static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value)
{
- if (i2c->reg_io_width == 4)
- iowrite32(value, i2c->base + (reg << i2c->reg_shift));
- else if (i2c->reg_io_width == 2)
- iowrite16(value, i2c->base + (reg << i2c->reg_shift));
- else
- iowrite8(value, i2c->base + (reg << i2c->reg_shift));
+ i2c->setreg(i2c, reg, value);
}
static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg)
{
- if (i2c->reg_io_width == 4)
- return ioread32(i2c->base + (reg << i2c->reg_shift));
- else if (i2c->reg_io_width == 2)
- return ioread16(i2c->base + (reg << i2c->reg_shift));
- else
- return ioread8(i2c->base + (reg << i2c->reg_shift));
+ return i2c->getreg(i2c, reg);
}
static void ocores_process(struct ocores_i2c *i2c)
@@ -223,11 +251,59 @@ static struct i2c_adapter ocores_adapter = {
.algo = &ocores_algorithm,
};
+static struct of_device_id ocores_i2c_match[] = {
+ {
+ .compatible = "opencores,i2c-ocores",
+ .data = (void *)TYPE_OCORES,
+ },
+ {
+ .compatible = "aeroflexgaisler,i2cmst",
+ .data = (void *)TYPE_GRLIB,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ocores_i2c_match);
+
#ifdef CONFIG_OF
+/* Read and write functions for the GRLIB port of the controller. Registers are
+ * 32-bit big endian and the PRELOW and PREHIGH registers are merged into one
+ * register. The subsequent registers has their offset decreased accordingly. */
+static u8 oc_getreg_grlib(struct ocores_i2c *i2c, int reg)
+{
+ u32 rd;
+ int rreg = reg;
+ if (reg != OCI2C_PRELOW)
+ rreg--;
+ rd = ioread32be(i2c->base + (rreg << i2c->reg_shift));
+ if (reg == OCI2C_PREHIGH)
+ return (u8)(rd >> 8);
+ else
+ return (u8)rd;
+}
+
+static void oc_setreg_grlib(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ u32 curr, wr;
+ int rreg = reg;
+ if (reg != OCI2C_PRELOW)
+ rreg--;
+ if (reg == OCI2C_PRELOW || reg == OCI2C_PREHIGH) {
+ curr = ioread32be(i2c->base + (rreg << i2c->reg_shift));
+ if (reg == OCI2C_PRELOW)
+ wr = (curr & 0xff00) | value;
+ else
+ wr = (((u32)value) << 8) | (curr & 0xff);
+ } else {
+ wr = value;
+ }
+ iowrite32be(wr, i2c->base + (rreg << i2c->reg_shift));
+}
+
static int ocores_i2c_of_probe(struct platform_device *pdev,
struct ocores_i2c *i2c)
{
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
u32 val;
if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
@@ -253,17 +329,26 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
of_property_read_u32(pdev->dev.of_node, "reg-io-width",
&i2c->reg_io_width);
+
+ match = of_match_node(ocores_i2c_match, pdev->dev.of_node);
+ if (match && (int)match->data == TYPE_GRLIB) {
+ dev_dbg(&pdev->dev, "GRLIB variant of i2c-ocores\n");
+ i2c->setreg = oc_setreg_grlib;
+ i2c->getreg = oc_getreg_grlib;
+ }
+
return 0;
}
#else
#define ocores_i2c_of_probe(pdev,i2c) -ENODEV
#endif
-static int __devinit ocores_i2c_probe(struct platform_device *pdev)
+static int ocores_i2c_probe(struct platform_device *pdev)
{
struct ocores_i2c *i2c;
struct ocores_i2c_platform_data *pdata;
- struct resource *res, *res2;
+ struct resource *res;
+ int irq;
int ret;
int i;
@@ -271,26 +356,17 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res2)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Memory region busy\n");
- return -EBUSY;
- }
-
- i2c->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!i2c->base) {
- dev_err(&pdev->dev, "Unable to map registers\n");
- return -EIO;
- }
+ i2c->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!i2c->base)
+ return -EADDRNOTAVAIL;
pdata = pdev->dev.platform_data;
if (pdata) {
@@ -306,10 +382,34 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (i2c->reg_io_width == 0)
i2c->reg_io_width = 1; /* Set to default value */
+ if (!i2c->setreg || !i2c->getreg) {
+ switch (i2c->reg_io_width) {
+ case 1:
+ i2c->setreg = oc_setreg_8;
+ i2c->getreg = oc_getreg_8;
+ break;
+
+ case 2:
+ i2c->setreg = oc_setreg_16;
+ i2c->getreg = oc_getreg_16;
+ break;
+
+ case 4:
+ i2c->setreg = oc_setreg_32;
+ i2c->getreg = oc_getreg_32;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
+ i2c->reg_io_width);
+ return -EINVAL;
+ }
+ }
+
ocores_init(i2c);
init_waitqueue_head(&i2c->wait);
- ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
@@ -341,7 +441,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ocores_i2c_remove(struct platform_device *pdev)
+static int ocores_i2c_remove(struct platform_device *pdev)
{
struct ocores_i2c *i2c = platform_get_drvdata(pdev);
@@ -383,15 +483,9 @@ static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
#define OCORES_I2C_PM NULL
#endif
-static struct of_device_id ocores_i2c_match[] = {
- { .compatible = "opencores,i2c-ocores", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
- .remove = __devexit_p(ocores_i2c_remove),
+ .remove = ocores_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index f44c83549fe..484ca771fdf 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -446,7 +446,7 @@ static struct i2c_adapter octeon_i2c_ops = {
/**
* octeon_i2c_setclock - Calculate and set clock divisors.
*/
-static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c)
+static int octeon_i2c_setclock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -489,7 +489,7 @@ static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c)
return 0;
}
-static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
+static int octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
{
u8 status;
int tries;
@@ -510,7 +510,7 @@ static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
return -EIO;
}
-static int __devinit octeon_i2c_probe(struct platform_device *pdev)
+static int octeon_i2c_probe(struct platform_device *pdev)
{
int irq, result = 0;
struct octeon_i2c *i2c;
@@ -609,7 +609,7 @@ out:
return result;
};
-static int __devexit octeon_i2c_remove(struct platform_device *pdev)
+static int octeon_i2c_remove(struct platform_device *pdev)
{
struct octeon_i2c *i2c = platform_get_drvdata(pdev);
@@ -628,7 +628,7 @@ MODULE_DEVICE_TABLE(of, octeon_i2c_match);
static struct platform_driver octeon_i2c_driver = {
.probe = octeon_i2c_probe,
- .remove = __devexit_p(octeon_i2c_remove),
+ .remove = octeon_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 3525c9e62cb..20d41bfa7c1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -43,14 +43,16 @@
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
/* I2C controller revisions present on specific hardware */
-#define OMAP_I2C_REV_ON_2430 0x36
-#define OMAP_I2C_REV_ON_3430_3530 0x3C
-#define OMAP_I2C_REV_ON_3630_4430 0x40
+#define OMAP_I2C_REV_ON_2430 0x00000036
+#define OMAP_I2C_REV_ON_3430_3530 0x0000003C
+#define OMAP_I2C_REV_ON_3630 0x00000040
+#define OMAP_I2C_REV_ON_4430_PLUS 0x50400002
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
@@ -190,7 +192,6 @@ struct omap_i2c_dev {
void (*set_mpu_wkup_lat)(struct device *dev,
long latency);
u32 speed; /* Speed of bus in kHz */
- u32 dtrev; /* extra revision from DT */
u32 flags;
u16 cmd_err;
u8 *buf;
@@ -202,17 +203,18 @@ struct omap_i2c_dev {
* fifo_size==0 implies no fifo
* if set, should be trsh+1
*/
- u8 rev;
+ u32 rev;
unsigned b_hw:1; /* bad h/w fixes */
unsigned receiver:1; /* true when we're in receiver mode */
u16 iestate; /* Saved interrupt register */
u16 pscstate;
u16 scllstate;
u16 sclhstate;
- u16 bufstate;
u16 syscstate;
u16 westate;
u16 errata;
+
+ struct pinctrl *pins;
};
static const u8 reg_map_ip_v1[] = {
@@ -275,16 +277,39 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
-static int omap_i2c_init(struct omap_i2c_dev *dev)
+static void __omap_i2c_init(struct omap_i2c_dev *dev)
+{
+
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+
+ /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
+ omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
+
+ /* SCL low and high time values */
+ omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
+ omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
+ if (dev->rev >= OMAP_I2C_REV_ON_3430_3530)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+
+ /* Take the I2C module out of reset: */
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+
+ /*
+ * Don't write to this register if the IE state is 0 as it can
+ * cause deadlock.
+ */
+ if (dev->iestate)
+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
+}
+
+static int omap_i2c_reset(struct omap_i2c_dev *dev)
{
- u16 psc = 0, scll = 0, sclh = 0, buf = 0;
- u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
- unsigned long fclk_rate = 12000000;
unsigned long timeout;
- unsigned long internal_clk = 0;
- struct clk *fclk;
+ u16 sysc;
if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
+ sysc = omap_i2c_read_reg(dev, OMAP_I2C_SYSC_REG);
+
/* Disable I2C controller before soft reset */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
@@ -306,32 +331,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
}
/* SYSC register is cleared by the reset; rewrite it */
- if (dev->rev == OMAP_I2C_REV_ON_2430) {
-
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
- SYSC_AUTOIDLE_MASK);
-
- } else if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
- dev->syscstate = SYSC_AUTOIDLE_MASK;
- dev->syscstate |= SYSC_ENAWAKEUP_MASK;
- dev->syscstate |= (SYSC_IDLEMODE_SMART <<
- __ffs(SYSC_SIDLEMODE_MASK));
- dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK <<
- __ffs(SYSC_CLOCKACTIVITY_MASK));
-
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
- dev->syscstate);
- /*
- * Enabling all wakup sources to stop I2C freezing on
- * WFI instruction.
- * REVISIT: Some wkup sources might not be needed.
- */
- dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
- dev->westate);
- }
+ omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, sysc);
+
+ }
+ return 0;
+}
+
+static int omap_i2c_init(struct omap_i2c_dev *dev)
+{
+ u16 psc = 0, scll = 0, sclh = 0;
+ u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
+ unsigned long fclk_rate = 12000000;
+ unsigned long internal_clk = 0;
+ struct clk *fclk;
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
+ /*
+ * Enabling all wakup sources to stop I2C freezing on
+ * WFI instruction.
+ * REVISIT: Some wkup sources might not be needed.
+ */
+ dev->westate = OMAP_I2C_WE_ALL;
}
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
@@ -416,28 +437,17 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
}
- /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
- omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);
-
- /* SCL low and high time values */
- omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
- omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);
-
- /* Take the I2C module out of reset: */
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
-
- /* Enable interrupts */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
- dev->pscstate = psc;
- dev->scllstate = scll;
- dev->sclhstate = sclh;
- dev->bufstate = buf;
- }
+
+ dev->pscstate = psc;
+ dev->scllstate = scll;
+ dev->sclhstate = sclh;
+
+ __omap_i2c_init(dev);
+
return 0;
}
@@ -490,7 +500,7 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
- if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
+ if (dev->rev < OMAP_I2C_REV_ON_3630)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
@@ -586,7 +596,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
OMAP_I2C_TIMEOUT);
if (timeout == 0) {
dev_err(dev->dev, "controller timed out\n");
- omap_i2c_init(dev);
+ omap_i2c_reset(dev);
+ __omap_i2c_init(dev);
return -ETIMEDOUT;
}
@@ -596,7 +607,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
/* We have an error */
if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR |
OMAP_I2C_STAT_XUDF)) {
- omap_i2c_init(dev);
+ omap_i2c_reset(dev);
+ __omap_i2c_init(dev);
return -EIO;
}
@@ -642,13 +654,14 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
break;
}
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, -1);
-
if (r == 0)
r = num;
omap_i2c_wait_for_bb(dev);
+
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
+
out:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1025,9 +1038,7 @@ static const struct i2c_algorithm omap_i2c_algo = {
#ifdef CONFIG_OF
static struct omap_i2c_bus_platform_data omap3_pdata = {
.rev = OMAP_I2C_IP_VERSION_1,
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
- OMAP_I2C_FLAG_BUS_SHIFT_2,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_i2c_bus_platform_data omap4_pdata = {
@@ -1048,7 +1059,17 @@ static const struct of_device_id omap_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
#endif
-static int __devinit
+#define OMAP_I2C_SCHEME(rev) ((rev & 0xc000) >> 14)
+
+#define OMAP_I2C_REV_SCHEME_0_MAJOR(rev) (rev >> 4)
+#define OMAP_I2C_REV_SCHEME_0_MINOR(rev) (rev & 0xf)
+
+#define OMAP_I2C_REV_SCHEME_1_MAJOR(rev) ((rev & 0x0700) >> 7)
+#define OMAP_I2C_REV_SCHEME_1_MINOR(rev) (rev & 0x1f)
+#define OMAP_I2C_SCHEME_0 0
+#define OMAP_I2C_SCHEME_1 1
+
+static int
omap_i2c_probe(struct platform_device *pdev)
{
struct omap_i2c_dev *dev;
@@ -1060,6 +1081,8 @@ omap_i2c_probe(struct platform_device *pdev)
const struct of_device_id *match;
int irq;
int r;
+ u32 rev;
+ u16 minor, major, scheme;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1091,7 +1114,6 @@ omap_i2c_probe(struct platform_device *pdev)
u32 freq = 100000; /* default to 100000 Hz */
pdata = match->data;
- dev->dtrev = pdata->rev;
dev->flags = pdata->flags;
of_property_read_u32(node, "clock-frequency", &freq);
@@ -1101,7 +1123,16 @@ omap_i2c_probe(struct platform_device *pdev)
dev->speed = pdata->clkrate;
dev->flags = pdata->flags;
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- dev->dtrev = pdata->rev;
+ }
+
+ dev->pins = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(dev->pins)) {
+ if (PTR_ERR(dev->pins) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "did not get pins for i2c error: %li\n",
+ PTR_ERR(dev->pins));
+ dev->pins = NULL;
}
dev->dev = &pdev->dev;
@@ -1114,11 +1145,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
- dev->regs = (u8 *)reg_map_ip_v2;
- else
- dev->regs = (u8 *)reg_map_ip_v1;
-
pm_runtime_enable(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(dev->dev);
@@ -1127,11 +1153,37 @@ omap_i2c_probe(struct platform_device *pdev)
if (IS_ERR_VALUE(r))
goto err_free_mem;
- dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
+ /*
+ * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
+ * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset.
+ * Also since the omap_i2c_read_reg uses reg_map_ip_* a
+ * raw_readw is done.
+ */
+ rev = __raw_readw(dev->base + 0x04);
+
+ scheme = OMAP_I2C_SCHEME(rev);
+ switch (scheme) {
+ case OMAP_I2C_SCHEME_0:
+ dev->regs = (u8 *)reg_map_ip_v1;
+ dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG);
+ minor = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+ major = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+ break;
+ case OMAP_I2C_SCHEME_1:
+ /* FALLTHROUGH */
+ default:
+ dev->regs = (u8 *)reg_map_ip_v2;
+ rev = (rev << 16) |
+ omap_i2c_read_reg(dev, OMAP_I2C_IP_V2_REVNB_LO);
+ minor = OMAP_I2C_REV_SCHEME_1_MINOR(rev);
+ major = OMAP_I2C_REV_SCHEME_1_MAJOR(rev);
+ dev->rev = rev;
+ }
dev->errata = 0;
- if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ if (dev->rev >= OMAP_I2C_REV_ON_2430 &&
+ dev->rev < OMAP_I2C_REV_ON_4430_PLUS)
dev->errata |= I2C_OMAP_ERRATA_I207;
if (dev->rev <= OMAP_I2C_REV_ON_3430_3530)
@@ -1152,7 +1204,7 @@ omap_i2c_probe(struct platform_device *pdev)
dev->fifo_size = (dev->fifo_size / 2);
- if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
+ if (dev->rev < OMAP_I2C_REV_ON_3630)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
@@ -1195,8 +1247,8 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_unuse_clocks;
}
- dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", adap->nr,
- dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
+ major, minor, dev->speed);
of_i2c_register_devices(adap);
@@ -1215,7 +1267,7 @@ err_free_mem:
return r;
}
-static int __devexit omap_i2c_remove(struct platform_device *pdev)
+static int omap_i2c_remove(struct platform_device *pdev)
{
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
int ret;
@@ -1239,14 +1291,13 @@ static int omap_i2c_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
- u16 iv;
_dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG);
omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0);
if (_dev->rev < OMAP_I2C_OMAP1_REV_2) {
- iv = omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
+ omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate);
@@ -1262,23 +1313,10 @@ static int omap_i2c_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
- if (_dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
- omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, 0);
- omap_i2c_write_reg(_dev, OMAP_I2C_PSC_REG, _dev->pscstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SCLL_REG, _dev->scllstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SCLH_REG, _dev->sclhstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_BUF_REG, _dev->bufstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SYSC_REG, _dev->syscstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_WE_REG, _dev->westate);
- omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
- }
+ if (!_dev->regs)
+ return 0;
- /*
- * Don't write to this register if the IE state is 0 as it can
- * cause deadlock.
- */
- if (_dev->iestate)
- omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, _dev->iestate);
+ __omap_i2c_init(_dev);
return 0;
}
@@ -1295,7 +1333,7 @@ static struct dev_pm_ops omap_i2c_pm_ops = {
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
- .remove = __devexit_p(omap_i2c_remove),
+ .remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 4b95f7a63a3..aa957788192 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -135,7 +135,7 @@ static struct lineop parport_ctrl_irq = {
.port = PORT_CTRL,
};
-static int __devinit i2c_parport_probe(struct platform_device *pdev)
+static int i2c_parport_probe(struct platform_device *pdev)
{
int err;
@@ -169,7 +169,7 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit i2c_parport_remove(struct platform_device *pdev)
+static int i2c_parport_remove(struct platform_device *pdev)
{
if (ara) {
line_set(0, &parport_ctrl_irq);
@@ -191,7 +191,7 @@ static struct platform_driver i2c_parport_driver = {
.name = DRVNAME,
},
.probe = i2c_parport_probe,
- .remove = __devexit_p(i2c_parport_remove),
+ .remove = i2c_parport_remove,
};
static int __init i2c_parport_device_add(u16 address)
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 12edefd4183..615f632c846 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -340,7 +340,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = pasemi_smb_func,
};
-static int __devinit pasemi_smb_probe(struct pci_dev *dev,
+static int pasemi_smb_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct pasemi_smbus *smbus;
@@ -392,7 +392,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
return error;
}
-static void __devexit pasemi_smb_remove(struct pci_dev *dev)
+static void pasemi_smb_remove(struct pci_dev *dev)
{
struct pasemi_smbus *smbus = pci_get_drvdata(dev);
@@ -412,7 +412,7 @@ static struct pci_driver pasemi_smb_driver = {
.name = "i2c-pasemi",
.id_table = pasemi_smb_ids,
.probe = pasemi_smb_probe,
- .remove = __devexit_p(pasemi_smb_remove),
+ .remove = pasemi_smb_remove,
};
module_pci_driver(pasemi_smb_driver);
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 29933f87d8f..323f061a316 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -119,7 +119,7 @@ static struct i2c_adapter pca_isa_ops = {
.timeout = HZ,
};
-static int __devinit pca_isa_match(struct device *dev, unsigned int id)
+static int pca_isa_match(struct device *dev, unsigned int id)
{
int match = base != 0;
@@ -132,7 +132,7 @@ static int __devinit pca_isa_match(struct device *dev, unsigned int id)
return match;
}
-static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
+static int pca_isa_probe(struct device *dev, unsigned int id)
{
init_waitqueue_head(&pca_wait);
@@ -174,7 +174,7 @@ static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int __devexit pca_isa_remove(struct device *dev, unsigned int id)
+static int pca_isa_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pca_isa_ops);
@@ -190,7 +190,7 @@ static int __devexit pca_isa_remove(struct device *dev, unsigned int id)
static struct isa_driver pca_isa_driver = {
.match = pca_isa_match,
.probe = pca_isa_probe,
- .remove = __devexit_p(pca_isa_remove),
+ .remove = pca_isa_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER,
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 675878f49f7..a30d2f613c0 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -131,7 +131,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
}
-static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
+static int i2c_pca_pf_probe(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c;
struct resource *res;
@@ -257,7 +257,7 @@ e_print:
return ret;
}
-static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
+static int i2c_pca_pf_remove(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
@@ -279,7 +279,7 @@ static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
static struct platform_driver i2c_pca_pf_driver = {
.probe = i2c_pca_pf_probe,
- .remove = __devexit_p(i2c_pca_pf_remove),
+ .remove = i2c_pca_pf_remove,
.driver = {
.name = "i2c-pca-platform",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 8bbd6ece7c4..39ab78c1a02 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(force_addr,
static int srvrworks_csb5_delay;
static struct pci_driver piix4_driver;
-static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
+static const struct dmi_system_id piix4_dmi_blacklist[] = {
{
.ident = "Sapphire AM2RD790",
.matches = {
@@ -119,7 +119,7 @@ static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
/* The IBM entry is in a separate table because we only check it
on Intel-based systems */
-static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = {
+static const struct dmi_system_id piix4_dmi_ibm[] = {
{
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
@@ -131,8 +131,8 @@ struct i2c_piix4_adapdata {
unsigned short smba;
};
-static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id)
+static int piix4_setup(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
{
unsigned char temp;
unsigned short piix4_smba;
@@ -204,9 +204,8 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
*/
pci_write_config_byte(PIIX4_dev, SMBHSTCFG,
temp | 1);
- dev_printk(KERN_NOTICE, &PIIX4_dev->dev,
- "WARNING: SMBus interface has been "
- "FORCEFULLY ENABLED!\n");
+ dev_notice(&PIIX4_dev->dev,
+ "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
@@ -231,8 +230,8 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
-static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id)
+static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
{
unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
@@ -295,9 +294,9 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
-static int __devinit piix4_setup_aux(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id,
- unsigned short base_reg_addr)
+static int piix4_setup_aux(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id,
+ unsigned short base_reg_addr)
{
/* Set up auxiliary SMBus controllers found on some
* AMD chipsets e.g. SP5100 (SB700 derivative) */
@@ -541,9 +540,8 @@ MODULE_DEVICE_TABLE (pci, piix4_ids);
static struct i2c_adapter *piix4_main_adapter;
static struct i2c_adapter *piix4_aux_adapter;
-static int __devinit piix4_add_adapter(struct pci_dev *dev,
- unsigned short smba,
- struct i2c_adapter **padap)
+static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
struct i2c_piix4_adapdata *adapdata;
@@ -589,8 +587,7 @@ static int __devinit piix4_add_adapter(struct pci_dev *dev,
return 0;
}
-static int __devinit piix4_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
@@ -627,7 +624,7 @@ static int __devinit piix4_probe(struct pci_dev *dev,
return 0;
}
-static void __devexit piix4_adap_remove(struct i2c_adapter *adap)
+static void piix4_adap_remove(struct i2c_adapter *adap)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
@@ -639,7 +636,7 @@ static void __devexit piix4_adap_remove(struct i2c_adapter *adap)
}
}
-static void __devexit piix4_remove(struct pci_dev *dev)
+static void piix4_remove(struct pci_dev *dev)
{
if (piix4_main_adapter) {
piix4_adap_remove(piix4_main_adapter);
@@ -656,7 +653,7 @@ static struct pci_driver piix4_driver = {
.name = "piix4_smbus",
.id_table = piix4_ids,
.probe = piix4_probe,
- .remove = __devexit_p(piix4_remove),
+ .remove = piix4_remove,
};
module_pci_driver(piix4_driver);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 3d71395ae1f..083d68cfaf0 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -270,7 +270,7 @@ static irqreturn_t pmcmsptwi_interrupt(int irq, void *ptr)
/*
* Probe for and register the device and return 0 if there is one.
*/
-static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
+static int pmcmsptwi_probe(struct platform_device *pldev)
{
struct resource *res;
int rc = -ENODEV;
@@ -368,7 +368,7 @@ ret_err:
/*
* Release the device and return 0 if there is one.
*/
-static int __devexit pmcmsptwi_remove(struct platform_device *pldev)
+static int pmcmsptwi_remove(struct platform_device *pldev)
{
struct resource *res;
@@ -628,7 +628,7 @@ static struct i2c_adapter pmcmsptwi_adapter = {
static struct platform_driver pmcmsptwi_driver = {
.probe = pmcmsptwi_probe,
- .remove = __devexit_p(pmcmsptwi_remove),
+ .remove = pmcmsptwi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 8488bddfe46..ce4097012e9 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -619,7 +619,7 @@ static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
#define PNX_I2C_PM NULL
#endif
-static int __devinit i2c_pnx_probe(struct platform_device *pdev)
+static int i2c_pnx_probe(struct platform_device *pdev)
{
unsigned long tmp;
int ret = 0;
@@ -765,7 +765,7 @@ err_kzalloc:
return ret;
}
-static int __devexit i2c_pnx_remove(struct platform_device *pdev)
+static int i2c_pnx_remove(struct platform_device *pdev)
{
struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
@@ -797,7 +797,7 @@ static struct platform_driver i2c_pnx_driver = {
.pm = PNX_I2C_PM,
},
.probe = i2c_pnx_probe,
- .remove = __devexit_p(i2c_pnx_remove),
+ .remove = i2c_pnx_remove,
};
static int __init i2c_adap_pnx_init(void)
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 5285f8565de..0dd5b334d09 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -210,7 +210,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
};
-static int __devexit i2c_powermac_remove(struct platform_device *dev)
+static int i2c_powermac_remove(struct platform_device *dev)
{
struct i2c_adapter *adapter = platform_get_drvdata(dev);
int rc;
@@ -227,7 +227,7 @@ static int __devexit i2c_powermac_remove(struct platform_device *dev)
return 0;
}
-static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
+static u32 i2c_powermac_get_addr(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus,
struct device_node *node)
{
@@ -255,7 +255,7 @@ static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
return 0xffffffff;
}
-static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
+static void i2c_powermac_create_one(struct i2c_adapter *adap,
const char *type,
u32 addr)
{
@@ -271,7 +271,7 @@ static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
type);
}
-static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
+static void i2c_powermac_add_missing(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus,
bool found_onyx)
{
@@ -297,7 +297,7 @@ static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
}
}
-static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
+static bool i2c_powermac_get_type(struct i2c_adapter *adap,
struct device_node *node,
u32 addr, char *type, int type_size)
{
@@ -336,7 +336,7 @@ static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
return false;
}
-static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
+static void i2c_powermac_register_devices(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus)
{
struct i2c_client *newdev;
@@ -403,7 +403,7 @@ static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
i2c_powermac_add_missing(adap, bus, found_onyx);
}
-static int __devinit i2c_powermac_probe(struct platform_device *dev)
+static int i2c_powermac_probe(struct platform_device *dev)
{
struct pmac_i2c_bus *bus = dev->dev.platform_data;
struct device_node *parent = NULL;
@@ -467,7 +467,7 @@ static int __devinit i2c_powermac_probe(struct platform_device *dev)
static struct platform_driver i2c_powermac_driver = {
.probe = i2c_powermac_probe,
- .remove = __devexit_p(i2c_powermac_remove),
+ .remove = i2c_powermac_remove,
.driver = {
.name = "i2c-powermac",
.bus = &platform_bus_type,
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index d8515be00b9..d7c512d717a 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -184,7 +184,7 @@ static struct i2c_algorithm puv3_i2c_algorithm = {
/*
* Main initialization routine.
*/
-static int __devinit puv3_i2c_probe(struct platform_device *pdev)
+static int puv3_i2c_probe(struct platform_device *pdev)
{
struct i2c_adapter *adapter;
struct resource *mem;
@@ -231,7 +231,7 @@ fail_nomem:
return rc;
}
-static int __devexit puv3_i2c_remove(struct platform_device *pdev)
+static int puv3_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct resource *mem;
@@ -276,7 +276,7 @@ static SIMPLE_DEV_PM_OPS(puv3_i2c_pm, puv3_i2c_suspend, NULL);
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
- .remove = __devexit_p(puv3_i2c_remove),
+ .remove = puv3_i2c_remove,
.driver = {
.name = "PKUnity-v3-I2C",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 4dc9bef17d7..3d4985695ae 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -94,7 +94,7 @@ out:
return ERR_PTR(ret);
}
-static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
+static int ce4100_i2c_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret;
@@ -135,7 +135,7 @@ err_mem:
return ret;
}
-static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
+static void ce4100_i2c_remove(struct pci_dev *dev)
{
struct ce4100_devices *sds;
unsigned int i;
@@ -160,7 +160,7 @@ static struct pci_driver ce4100_i2c_driver = {
.name = "ce4100_i2c",
.id_table = ce4100_i2c_devices,
.probe = ce4100_i2c_probe,
- .remove = __devexit_p(ce4100_i2c_remove),
+ .remove = ce4100_i2c_remove,
};
module_pci_driver(ce4100_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index f9399d163af..9bd4d73d29e 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -613,7 +613,7 @@ static const struct i2c_algorithm rcar_i2c_algo = {
.functionality = rcar_i2c_func,
};
-static int __devinit rcar_i2c_probe(struct platform_device *pdev)
+static int rcar_i2c_probe(struct platform_device *pdev)
{
struct i2c_rcar_platform_data *pdata = pdev->dev.platform_data;
struct rcar_i2c_priv *priv;
@@ -642,7 +642,7 @@ static int __devinit rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- priv->io = devm_ioremap(dev, res->start, resource_size(res));
+ priv->io = devm_request_and_ioremap(dev, res);
if (!priv->io) {
dev_err(dev, "cannot ioremap\n");
return -ENODEV;
@@ -682,7 +682,7 @@ static int __devinit rcar_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit rcar_i2c_remove(struct platform_device *pdev)
+static int rcar_i2c_remove(struct platform_device *pdev)
{
struct rcar_i2c_priv *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -693,16 +693,16 @@ static int __devexit rcar_i2c_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver rcar_i2c_drv = {
+static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
.owner = THIS_MODULE,
},
.probe = rcar_i2c_probe,
- .remove = __devexit_p(rcar_i2c_remove),
+ .remove = rcar_i2c_remove,
};
-module_platform_driver(rcar_i2c_drv);
+module_platform_driver(rcar_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas R-Car I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 9d902725bac..a290d089cea 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -38,6 +38,7 @@
#include <linux/io.h>
#include <linux/of_i2c.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/irq.h>
@@ -49,6 +50,9 @@
#define QUIRK_HDMIPHY (1 << 1)
#define QUIRK_NO_GPIO (1 << 2)
+/* Max time to wait for bus to become idle after a xfer (in us) */
+#define S3C2410_IDLE_TIMEOUT 5000
+
/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
@@ -59,7 +63,6 @@ enum s3c24xx_i2c_state {
};
struct s3c24xx_i2c {
- spinlock_t lock;
wait_queue_head_t wait;
unsigned int quirks;
unsigned int suspended:1;
@@ -78,11 +81,11 @@ struct s3c24xx_i2c {
void __iomem *regs;
struct clk *clk;
struct device *dev;
- struct resource *ioarea;
struct i2c_adapter adap;
struct s3c2410_platform_i2c *pdata;
int gpios[2];
+ struct pinctrl *pctrl;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -208,7 +211,7 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
if (msg->flags & I2C_M_REV_DIR_ADDR)
addr ^= 1;
- /* todo - check for wether ack wanted or not */
+ /* todo - check for whether ack wanted or not */
s3c24xx_i2c_enable_ack(i2c);
iiccon = readl(i2c->regs + S3C2410_IICCON);
@@ -235,8 +238,47 @@ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
dev_dbg(i2c->dev, "STOP\n");
- /* stop the transfer */
- iicstat &= ~S3C2410_IICSTAT_START;
+ /*
+ * The datasheet says that the STOP sequence should be:
+ * 1) I2CSTAT.5 = 0 - Clear BUSY (or 'generate STOP')
+ * 2) I2CCON.4 = 0 - Clear IRQPEND
+ * 3) Wait until the stop condition takes effect.
+ * 4*) I2CSTAT.4 = 0 - Clear TXRXEN
+ *
+ * Where, step "4*" is only for buses with the "HDMIPHY" quirk.
+ *
+ * However, after much experimentation, it appears that:
+ * a) normal buses automatically clear BUSY and transition from
+ * Master->Slave when they complete generating a STOP condition.
+ * Therefore, step (3) can be done in doxfer() by polling I2CCON.4
+ * after starting the STOP generation here.
+ * b) HDMIPHY bus does neither, so there is no way to do step 3.
+ * There is no indication when this bus has finished generating
+ * STOP.
+ *
+ * In fact, we have found that as soon as the IRQPEND bit is cleared in
+ * step 2, the HDMIPHY bus generates the STOP condition, and then
+ * immediately starts transferring another data byte, even though the
+ * bus is supposedly stopped. This is presumably because the bus is
+ * still in "Master" mode, and its BUSY bit is still set.
+ *
+ * To avoid these extra post-STOP transactions on HDMI phy devices, we
+ * just disable Serial Output on the bus (I2CSTAT.4 = 0) directly,
+ * instead of first generating a proper STOP condition. This should
+ * float SDA & SCK terminating the transfer. Subsequent transfers
+ * start with a proper START condition, and proceed normally.
+ *
+ * The HDMIPHY bus is an internal bus that always has exactly two
+ * devices, the host as Master and the HDMIPHY device as the slave.
+ * Skipping the STOP condition has been tested on this bus and works.
+ */
+ if (i2c->quirks & QUIRK_HDMIPHY) {
+ /* Stop driving the I2C pins */
+ iicstat &= ~S3C2410_IICSTAT_TXRXEN;
+ } else {
+ /* stop the transfer */
+ iicstat &= ~S3C2410_IICSTAT_START;
+ }
writel(iicstat, i2c->regs + S3C2410_IICSTAT);
i2c->state = STATE_STOP;
@@ -397,7 +439,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
case STATE_READ:
/* we have a byte of data in the data register, do
- * something with it, and then work out wether we are
+ * something with it, and then work out whether we are
* going to do any more read/write
*/
@@ -490,13 +532,6 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
unsigned long iicstat;
int timeout = 400;
- /* the timeout for HDMIPHY is reduced to 10 ms because
- * the hangup is expected to happen, so waiting 400 ms
- * causes only unnecessary system hangup
- */
- if (i2c->quirks & QUIRK_HDMIPHY)
- timeout = 10;
-
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
@@ -506,16 +541,61 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
msleep(1);
}
- /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
- if (i2c->quirks & QUIRK_HDMIPHY) {
- writel(0, i2c->regs + S3C2410_IICCON);
- writel(0, i2c->regs + S3C2410_IICSTAT);
- writel(0, i2c->regs + S3C2410_IICDS);
+ return -ETIMEDOUT;
+}
- return 0;
+/* s3c24xx_i2c_wait_idle
+ *
+ * wait for the i2c bus to become idle.
+*/
+
+static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
+{
+ unsigned long iicstat;
+ ktime_t start, now;
+ unsigned long delay;
+ int spins;
+
+ /* ensure the stop has been through the bus */
+
+ dev_dbg(i2c->dev, "waiting for bus idle\n");
+
+ start = now = ktime_get();
+
+ /*
+ * Most of the time, the bus is already idle within a few usec of the
+ * end of a transaction. However, really slow i2c devices can stretch
+ * the clock, delaying STOP generation.
+ *
+ * On slower SoCs this typically happens within a very small number of
+ * instructions so busy wait briefly to avoid scheduling overhead.
+ */
+ spins = 3;
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ while ((iicstat & S3C2410_IICSTAT_START) && --spins) {
+ cpu_relax();
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
}
- return -ETIMEDOUT;
+ /*
+ * If we do get an appreciable delay as a compromise between idle
+ * detection latency for the normal, fast case, and system load in the
+ * slow device case, use an exponential back off in the polling loop,
+ * up to 1/10th of the total timeout, then continue to poll at a
+ * constant rate up to the timeout.
+ */
+ delay = 1;
+ while ((iicstat & S3C2410_IICSTAT_START) &&
+ ktime_us_delta(now, start) < S3C2410_IDLE_TIMEOUT) {
+ usleep_range(delay, 2 * delay);
+ if (delay < S3C2410_IDLE_TIMEOUT / 10)
+ delay <<= 1;
+ now = ktime_get();
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ }
+
+ if (iicstat & S3C2410_IICSTAT_START)
+ dev_warn(i2c->dev, "timeout waiting for bus idle\n");
}
/* s3c24xx_i2c_doxfer
@@ -526,8 +606,7 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long iicstat, timeout;
- int spins = 20;
+ unsigned long timeout;
int ret;
if (i2c->suspended)
@@ -540,8 +619,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
goto out;
}
- spin_lock_irq(&i2c->lock);
-
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
@@ -550,7 +627,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_enable_irq(i2c);
s3c24xx_i2c_message_start(i2c, msgs);
- spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
@@ -564,24 +640,11 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
- /* ensure the stop has been through the bus */
-
- dev_dbg(i2c->dev, "waiting for bus idle\n");
-
- /* first, try busy waiting briefly */
- do {
- cpu_relax();
- iicstat = readl(i2c->regs + S3C2410_IICSTAT);
- } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
-
- /* if that timed out sleep */
- if (!spins) {
- msleep(1);
- iicstat = readl(i2c->regs + S3C2410_IICSTAT);
- }
+ /* For QUIRK_HDMIPHY, bus is already disabled */
+ if (i2c->quirks & QUIRK_HDMIPHY)
+ goto out;
- if (iicstat & S3C2410_IICSTAT_START)
- dev_warn(i2c->dev, "timeout waiting for bus idle\n");
+ s3c24xx_i2c_wait_idle(i2c);
out:
return ret;
@@ -740,7 +803,6 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct s3c24xx_i2c *i2c = freq_to_i2c(nb);
- unsigned long flags;
unsigned int got;
int delta_f;
int ret;
@@ -754,9 +816,9 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
(val == CPUFREQ_PRECHANGE && delta_f > 0)) {
- spin_lock_irqsave(&i2c->lock, flags);
+ i2c_lock_adapter(&i2c->adap);
ret = s3c24xx_i2c_clockrate(i2c, &got);
- spin_unlock_irqrestore(&i2c->lock, flags);
+ i2c_unlock_adapter(&i2c->adap);
if (ret < 0)
dev_err(i2c->dev, "cannot find frequency\n");
@@ -858,14 +920,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
pdata = i2c->pdata;
- /* inititalise the gpio */
-
- if (pdata->cfg_gpio)
- pdata->cfg_gpio(to_platform_device(i2c->dev));
- else
- if (s3c24xx_i2c_parse_dt_gpio(i2c))
- return -EINVAL;
-
/* write slave address */
writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD);
@@ -963,7 +1017,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
i2c->tx_setup = 50;
- spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
@@ -989,36 +1042,38 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
- i2c->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (i2c->ioarea == NULL) {
- dev_err(&pdev->dev, "cannot request IO\n");
- ret = -ENXIO;
- goto err_clk;
- }
-
- i2c->regs = ioremap(res->start, resource_size(res));
+ i2c->regs = devm_request_and_ioremap(&pdev->dev, res);
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
ret = -ENXIO;
- goto err_ioarea;
+ goto err_clk;
}
- dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
- i2c->regs, i2c->ioarea, res);
+ dev_dbg(&pdev->dev, "registers %p (%p)\n",
+ i2c->regs, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
+ i2c->pctrl = devm_pinctrl_get_select_default(i2c->dev);
+
+ /* inititalise the i2c gpio lines */
+
+ if (i2c->pdata->cfg_gpio) {
+ i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
+ } else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
/* initialise the i2c controller */
ret = s3c24xx_i2c_init(i2c);
if (ret != 0)
- goto err_iomap;
+ goto err_clk;
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
@@ -1027,7 +1082,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
- goto err_iomap;
+ goto err_clk;
}
ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
@@ -1035,7 +1090,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- goto err_iomap;
+ goto err_clk;
}
ret = s3c24xx_i2c_register_cpufreq(i2c);
@@ -1075,13 +1130,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
err_irq:
free_irq(i2c->irq, i2c);
- err_iomap:
- iounmap(i2c->regs);
-
- err_ioarea:
- release_resource(i2c->ioarea);
- kfree(i2c->ioarea);
-
err_clk:
clk_disable_unprepare(i2c->clk);
clk_put(i2c->clk);
@@ -1110,16 +1158,13 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
clk_disable_unprepare(i2c->clk);
clk_put(i2c->clk);
- iounmap(i2c->regs);
-
- release_resource(i2c->ioarea);
- s3c24xx_i2c_dt_gpio_free(i2c);
- kfree(i2c->ioarea);
+ if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
+ s3c24xx_i2c_dt_gpio_free(i2c);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c24xx_i2c_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1142,10 +1187,14 @@ static int s3c24xx_i2c_resume(struct device *dev)
return 0;
}
+#endif
+#ifdef CONFIG_PM
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
.suspend_noirq = s3c24xx_i2c_suspend_noirq,
.resume = s3c24xx_i2c_resume,
+#endif
};
#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index b76a29d1f8e..008836409ef 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -248,7 +248,7 @@ static struct i2c_algorithm s6i2c_algorithm = {
.functionality = s6i2c_functionality,
};
-static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
+static u16 nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
{
u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000;
if (dividend > 0xffff)
@@ -256,7 +256,7 @@ static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
return dividend;
}
-static int __devinit s6i2c_probe(struct platform_device *dev)
+static int s6i2c_probe(struct platform_device *dev)
{
struct s6i2c_if *iface = &s6i2c_if;
struct i2c_adapter *p_adap;
@@ -361,7 +361,7 @@ err_out:
return rc;
}
-static int __devexit s6i2c_remove(struct platform_device *pdev)
+static int s6i2c_remove(struct platform_device *pdev)
{
struct s6i2c_if *iface = platform_get_drvdata(pdev);
i2c_wr16(iface, S6_I2C_ENABLE, 0);
@@ -378,7 +378,7 @@ static int __devexit s6i2c_remove(struct platform_device *pdev)
static struct platform_driver s6i2c_driver = {
.probe = s6i2c_probe,
- .remove = __devexit_p(s6i2c_remove),
+ .remove = s6i2c_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index c0c9dffbdb1..3a2253e1bf5 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -390,7 +390,7 @@ static const struct i2c_algorithm sh7760_i2c_algo = {
* iclk = mclk/(CDF + 1). iclk must be < 20MHz.
* scl = iclk/(SCGD*8 + 20).
*/
-static int __devinit calc_CCR(unsigned long scl_hz)
+static int calc_CCR(unsigned long scl_hz)
{
struct clk *mclk;
unsigned long mck, m1, dff, odff, iclk;
@@ -430,7 +430,7 @@ static int __devinit calc_CCR(unsigned long scl_hz)
return ((scgdm << 2) | cdfm);
}
-static int __devinit sh7760_i2c_probe(struct platform_device *pdev)
+static int sh7760_i2c_probe(struct platform_device *pdev)
{
struct sh7760_i2c_platdata *pd;
struct resource *res;
@@ -536,7 +536,7 @@ out0:
return ret;
}
-static int __devexit sh7760_i2c_remove(struct platform_device *pdev)
+static int sh7760_i2c_remove(struct platform_device *pdev)
{
struct cami2c *id = platform_get_drvdata(pdev);
@@ -557,7 +557,7 @@ static struct platform_driver sh7760_i2c_drv = {
.owner = THIS_MODULE,
},
.probe = sh7760_i2c_probe,
- .remove = __devexit_p(sh7760_i2c_remove),
+ .remove = sh7760_i2c_remove,
};
module_platform_driver(sh7760_i2c_drv);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 8110ca45f34..b6e7a83a829 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -120,11 +120,12 @@ struct sh_mobile_i2c_data {
void __iomem *reg;
struct i2c_adapter adap;
unsigned long bus_speed;
+ unsigned int clks_per_count;
struct clk *clk;
u_int8_t icic;
- u_int8_t iccl;
- u_int8_t icch;
u_int8_t flags;
+ u_int16_t iccl;
+ u_int16_t icch;
spinlock_t lock;
wait_queue_head_t wait;
@@ -135,7 +136,8 @@ struct sh_mobile_i2c_data {
#define IIC_FLAG_HAS_ICIC67 (1 << 0)
-#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */
+#define STANDARD_MODE 100000
+#define FAST_MODE 400000
/* Register offsets */
#define ICDR 0x00
@@ -187,57 +189,90 @@ static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs,
iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr);
}
-static void activate_ch(struct sh_mobile_i2c_data *pd)
+static u32 sh_mobile_i2c_iccl(unsigned long count_khz, u32 tLOW, u32 tf, int offset)
{
- unsigned long i2c_clk;
- u_int32_t num;
- u_int32_t denom;
- u_int32_t tmp;
-
- /* Wake up device and enable clock */
- pm_runtime_get_sync(pd->dev);
- clk_enable(pd->clk);
-
- /* Get clock rate after clock is enabled */
- i2c_clk = clk_get_rate(pd->clk);
+ /*
+ * Conditional expression:
+ * ICCL >= COUNT_CLK * (tLOW + tf)
+ *
+ * SH-Mobile IIC hardware starts counting the LOW period of
+ * the SCL signal (tLOW) as soon as it pulls the SCL line.
+ * In order to meet the tLOW timing spec, we need to take into
+ * account the fall time of SCL signal (tf). Default tf value
+ * should be 0.3 us, for safety.
+ */
+ return (((count_khz * (tLOW + tf)) + 5000) / 10000) + offset;
+}
- /* Calculate the value for iccl. From the data sheet:
- * iccl = (p clock / transfer rate) * (L / (L + H))
- * where L and H are the SCL low/high ratio (5/4 in this case).
- * We also round off the result.
+static u32 sh_mobile_i2c_icch(unsigned long count_khz, u32 tHIGH, u32 tf, int offset)
+{
+ /*
+ * Conditional expression:
+ * ICCH >= COUNT_CLK * (tHIGH + tf)
+ *
+ * SH-Mobile IIC hardware is aware of SCL transition period 'tr',
+ * and can ignore it. SH-Mobile IIC controller starts counting
+ * the HIGH period of the SCL signal (tHIGH) after the SCL input
+ * voltage increases at VIH.
+ *
+ * Afterward it turned out calculating ICCH using only tHIGH spec
+ * will result in violation of the tHD;STA timing spec. We need
+ * to take into account the fall time of SDA signal (tf) at START
+ * condition, in order to meet both tHIGH and tHD;STA specs.
*/
- num = i2c_clk * 5;
- denom = pd->bus_speed * 9;
- tmp = num * 10 / denom;
- if (tmp % 10 >= 5)
- pd->iccl = (u_int8_t)((num/denom) + 1);
- else
- pd->iccl = (u_int8_t)(num/denom);
+ return (((count_khz * (tHIGH + tf)) + 5000) / 10000) + offset;
+}
- /* one more bit of ICCL in ICIC */
- if (pd->flags & IIC_FLAG_HAS_ICIC67) {
- if ((num/denom) > 0xff)
- pd->icic |= ICIC_ICCLB8;
- else
- pd->icic &= ~ICIC_ICCLB8;
+static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
+{
+ unsigned long i2c_clk_khz;
+ u32 tHIGH, tLOW, tf;
+ int offset;
+
+ /* Get clock rate after clock is enabled */
+ clk_enable(pd->clk);
+ i2c_clk_khz = clk_get_rate(pd->clk) / 1000;
+ i2c_clk_khz /= pd->clks_per_count;
+
+ if (pd->bus_speed == STANDARD_MODE) {
+ tLOW = 47; /* tLOW = 4.7 us */
+ tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */
+ tf = 3; /* tf = 0.3 us */
+ offset = 0; /* No offset */
+ } else if (pd->bus_speed == FAST_MODE) {
+ tLOW = 13; /* tLOW = 1.3 us */
+ tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */
+ tf = 3; /* tf = 0.3 us */
+ offset = 0; /* No offset */
+ } else {
+ dev_err(pd->dev, "unrecognized bus speed %lu Hz\n",
+ pd->bus_speed);
+ goto out;
}
- /* Calculate the value for icch. From the data sheet:
- icch = (p clock / transfer rate) * (H / (L + H)) */
- num = i2c_clk * 4;
- tmp = num * 10 / denom;
- if (tmp % 10 >= 5)
- pd->icch = (u_int8_t)((num/denom) + 1);
+ pd->iccl = sh_mobile_i2c_iccl(i2c_clk_khz, tLOW, tf, offset);
+ /* one more bit of ICCL in ICIC */
+ if ((pd->iccl > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
+ pd->icic |= ICIC_ICCLB8;
else
- pd->icch = (u_int8_t)(num/denom);
+ pd->icic &= ~ICIC_ICCLB8;
+ pd->icch = sh_mobile_i2c_icch(i2c_clk_khz, tHIGH, tf, offset);
/* one more bit of ICCH in ICIC */
- if (pd->flags & IIC_FLAG_HAS_ICIC67) {
- if ((num/denom) > 0xff)
- pd->icic |= ICIC_ICCHB8;
- else
- pd->icic &= ~ICIC_ICCHB8;
- }
+ if ((pd->icch > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
+ pd->icic |= ICIC_ICCHB8;
+ else
+ pd->icic &= ~ICIC_ICCHB8;
+
+out:
+ clk_disable(pd->clk);
+}
+
+static void activate_ch(struct sh_mobile_i2c_data *pd)
+{
+ /* Wake up device and enable clock */
+ pm_runtime_get_sync(pd->dev);
+ clk_enable(pd->clk);
/* Enable channel and configure rx ack */
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
@@ -246,8 +281,8 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
iic_wr(pd, ICIC, 0);
/* Set the clock */
- iic_wr(pd, ICCL, pd->iccl);
- iic_wr(pd, ICCH, pd->icch);
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
}
static void deactivate_ch(struct sh_mobile_i2c_data *pd)
@@ -434,6 +469,9 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
wake_up(&pd->wait);
}
+ /* defeat write posting to avoid spurious WAIT interrupts */
+ iic_rd(pd, ICSR);
+
return IRQ_HANDLED;
}
@@ -451,8 +489,8 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
/* Set the clock */
- iic_wr(pd, ICCL, pd->iccl);
- iic_wr(pd, ICCH, pd->icch);
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
pd->msg = usr_msg;
pd->pos = -1;
@@ -621,10 +659,13 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_irq;
}
- /* Use platformd data bus speed or NORMAL_SPEED */
- pd->bus_speed = NORMAL_SPEED;
+ /* Use platform data bus speed or STANDARD_MODE */
+ pd->bus_speed = STANDARD_MODE;
if (pdata && pdata->bus_speed)
pd->bus_speed = pdata->bus_speed;
+ pd->clks_per_count = 1;
+ if (pdata && pdata->clks_per_count)
+ pd->clks_per_count = pdata->clks_per_count;
/* The IIC blocks on SH-Mobile ARM processors
* come with two new bits in ICIC.
@@ -632,6 +673,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
if (size > 0x17)
pd->flags |= IIC_FLAG_HAS_ICIC67;
+ sh_mobile_i2c_init(pd);
+
/* Enable Runtime PM for this device.
*
* Also tell the Runtime PM core to ignore children
@@ -667,8 +710,9 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_all;
}
- dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
- adap->nr, pd->bus_speed);
+ dev_info(&dev->dev,
+ "I2C adapter %d with bus speed %lu Hz (L/H=%x/%x)\n",
+ adap->nr, pd->bus_speed, pd->iccl, pd->icch);
of_i2c_register_devices(adap);
return 0;
@@ -714,7 +758,7 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
-static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,rmobile-iic", },
{},
};
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 5574a47792f..3f1818b8797 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -258,7 +258,7 @@ static const struct i2c_algorithm i2c_sirfsoc_algo = {
.functionality = i2c_sirfsoc_func,
};
-static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
+static int i2c_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_i2c *siic;
struct i2c_adapter *adap;
@@ -385,7 +385,7 @@ err_get_clk:
return err;
}
-static int __devexit i2c_sirfsoc_remove(struct platform_device *pdev)
+static int i2c_sirfsoc_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct sirfsoc_i2c *siic = adapter->algo_data;
@@ -433,7 +433,7 @@ static const struct dev_pm_ops i2c_sirfsoc_pm_ops = {
};
#endif
-static const struct of_device_id sirfsoc_i2c_of_match[] __devinitconst = {
+static const struct of_device_id sirfsoc_i2c_of_match[] = {
{ .compatible = "sirf,prima2-i2c", },
{},
};
@@ -449,7 +449,7 @@ static struct platform_driver i2c_sirfsoc_driver = {
.of_match_table = sirfsoc_i2c_of_match,
},
.probe = i2c_sirfsoc_probe,
- .remove = __devexit_p(i2c_sirfsoc_remove),
+ .remove = i2c_sirfsoc_remove,
};
module_platform_driver(i2c_sirfsoc_driver);
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 87e5126d449..79fd96a0438 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -142,7 +142,7 @@ static void sis5595_write(u8 reg, u8 data)
outb(data, sis5595_base + SMB_DAT);
}
-static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+static int sis5595_setup(struct pci_dev *SIS5595_dev)
{
u16 a;
u8 val;
@@ -376,7 +376,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = {
MODULE_DEVICE_TABLE (pci, sis5595_ids);
-static int __devinit sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err;
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 5d6723b7525..de6dddb9f86 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -389,7 +389,7 @@ static u32 sis630_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_BLOCK_DATA;
}
-static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+static int sis630_setup(struct pci_dev *sis630_dev)
{
unsigned char b;
struct pci_dev *dummy = NULL;
@@ -480,7 +480,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
MODULE_DEVICE_TABLE (pci, sis630_ids);
-static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (sis630_setup(dev)) {
dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n");
@@ -496,7 +496,7 @@ static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_i
return i2c_add_adapter(&sis630_adapter);
}
-static void __devexit sis630_remove(struct pci_dev *dev)
+static void sis630_remove(struct pci_dev *dev)
{
if (acpi_base) {
i2c_del_adapter(&sis630_adapter);
@@ -510,7 +510,7 @@ static struct pci_driver sis630_driver = {
.name = "sis630_smbus",
.id_table = sis630_ids,
.probe = sis630_probe,
- .remove = __devexit_p(sis630_remove),
+ .remove = sis630_remove,
};
module_pci_driver(sis630_driver);
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 7b72614a9bc..b9faf9b6002 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -252,7 +252,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis96x_ids) = {
MODULE_DEVICE_TABLE (pci, sis96x_ids);
-static int __devinit sis96x_probe(struct pci_dev *dev,
+static int sis96x_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 ww = 0;
@@ -308,7 +308,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev,
return retval;
}
-static void __devexit sis96x_remove(struct pci_dev *dev)
+static void sis96x_remove(struct pci_dev *dev)
{
if (sis96x_smbus_base) {
i2c_del_adapter(&sis96x_adapter);
@@ -321,7 +321,7 @@ static struct pci_driver sis96x_driver = {
.name = "sis96x_smbus",
.id_table = sis96x_ids,
.probe = sis96x_probe,
- .remove = __devexit_p(sis96x_remove),
+ .remove = sis96x_remove,
};
module_pci_driver(sis96x_driver);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index dcea77bf6f5..7b38877ffec 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -642,7 +642,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
-static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+static const struct of_device_id tegra_i2c_of_match[] = {
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, },
@@ -651,7 +651,7 @@ static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
#endif
-static int __devinit tegra_i2c_probe(struct platform_device *pdev)
+static int tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
@@ -769,7 +769,7 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tegra_i2c_remove(struct platform_device *pdev)
+static int tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
@@ -817,7 +817,7 @@ static SIMPLE_DEV_PM_OPS(tegra_i2c_pm, tegra_i2c_suspend, tegra_i2c_resume);
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
- .remove = __devexit_p(tegra_i2c_remove),
+ .remove = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7ffee71ca19..be662511c58 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -96,7 +96,7 @@ static DEFINE_PCI_DEVICE_TABLE(vt586b_ids) = {
MODULE_DEVICE_TABLE (pci, vt586b_ids);
-static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u16 base;
u8 rev;
@@ -146,7 +146,7 @@ static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_i
return 0;
}
-static void __devexit vt586b_remove(struct pci_dev *dev)
+static void vt586b_remove(struct pci_dev *dev)
{
i2c_del_adapter(&vt586b_adapter);
release_region(I2C_DIR, IOSPACE);
@@ -158,7 +158,7 @@ static struct pci_driver vt586b_driver = {
.name = "vt586b_smbus",
.id_table = vt586b_ids,
.probe = vt586b_probe,
- .remove = __devexit_p(vt586b_remove),
+ .remove = vt586b_remove,
};
module_pci_driver(vt586b_driver);
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 271c9a2b0fd..b2d90e105f4 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -320,8 +320,8 @@ static struct i2c_adapter vt596_adapter = {
.algo = &smbus_algorithm,
};
-static int __devinit vt596_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int vt596_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
unsigned char temp;
int error;
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
new file mode 100644
index 00000000000..f45c32c1ace
--- /dev/null
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -0,0 +1,480 @@
+/*
+ * Nano River Technologies viperboard i2c master driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/viperboard.h>
+
+struct vprbrd_i2c {
+ struct i2c_adapter i2c;
+ u8 bus_freq_param;
+};
+
+/* i2c bus frequency module parameter */
+static u8 i2c_bus_param;
+static unsigned int i2c_bus_freq = 100;
+module_param(i2c_bus_freq, int, 0);
+MODULE_PARM_DESC(i2c_bus_freq,
+ "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000");
+
+static int vprbrd_i2c_status(struct i2c_adapter *i2c,
+ struct vprbrd_i2c_status *status, bool prev_error)
+{
+ u16 bytes_xfer;
+ int ret;
+ struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+
+ /* check for protocol error */
+ bytes_xfer = sizeof(struct vprbrd_i2c_status);
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000,
+ status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS);
+
+ if (ret != bytes_xfer)
+ prev_error = true;
+
+ if (prev_error) {
+ dev_err(&i2c->dev, "failure in usb communication\n");
+ return -EREMOTEIO;
+ }
+
+ dev_dbg(&i2c->dev, " status = %d\n", status->status);
+ if (status->status != 0x00) {
+ dev_err(&i2c->dev, "failure: i2c protocol error\n");
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_receive(struct usb_device *usb_dev,
+ struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer)
+{
+ int ret, bytes_actual;
+ int error = 0;
+
+ /* send the read request */
+ ret = usb_bulk_msg(usb_dev,
+ usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg,
+ sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual,
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0)
+ || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) {
+ dev_err(&usb_dev->dev, "failure transmitting usb\n");
+ error = -EREMOTEIO;
+ }
+
+ /* read the actual data */
+ ret = usb_bulk_msg(usb_dev,
+ usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg,
+ bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0) || (bytes_xfer != bytes_actual)) {
+ dev_err(&usb_dev->dev, "failure receiving usb\n");
+ error = -EREMOTEIO;
+ }
+ return error;
+}
+
+static int vprbrd_i2c_addr(struct usb_device *usb_dev,
+ struct vprbrd_i2c_addr_msg *amsg)
+{
+ int ret, bytes_actual;
+
+ ret = usb_bulk_msg(usb_dev,
+ usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg,
+ sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual,
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0) ||
+ (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) {
+ dev_err(&usb_dev->dev, "failure transmitting usb\n");
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
+{
+ int ret;
+ u16 remain_len, bytes_xfer, len1, len2,
+ start = 0x0000;
+ struct vprbrd_i2c_read_msg *rmsg =
+ (struct vprbrd_i2c_read_msg *)vb->buf;
+
+ remain_len = msg->len;
+ rmsg->header.cmd = VPRBRD_I2C_CMD_READ;
+ while (remain_len > 0) {
+ rmsg->header.addr = cpu_to_le16(start + 0x4000);
+ if (remain_len <= 255) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len;
+ rmsg->header.len1 = 0x00;
+ rmsg->header.len2 = 0x00;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 510) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len - 255;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0x00;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 512) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len - 510;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 767) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 512;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ bytes_xfer = remain_len;
+ remain_len = 0;
+ } else if (remain_len <= 1022) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 767;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 1024) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 1022;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0xff;
+ remain_len = 0;
+ } else {
+ len1 = 512;
+ len2 = 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = 0x02;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0xff;
+ remain_len -= 1024;
+ start += 1024;
+ }
+ rmsg->header.tf1 = cpu_to_le16(len1);
+ rmsg->header.tf2 = cpu_to_le16(len2);
+
+ /* first read transfer */
+ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1);
+ if (ret < 0)
+ return ret;
+ /* copy the received data */
+ memcpy(msg->buf + start, rmsg, len1);
+
+ /* second read transfer if neccessary */
+ if (len2 > 0) {
+ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2);
+ if (ret < 0)
+ return ret;
+ /* copy the received data */
+ memcpy(msg->buf + start + 512, rmsg, len2);
+ }
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg)
+{
+ int ret, bytes_actual;
+ u16 remain_len, bytes_xfer,
+ start = 0x0000;
+ struct vprbrd_i2c_write_msg *wmsg =
+ (struct vprbrd_i2c_write_msg *)vb->buf;
+
+ remain_len = msg->len;
+ wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE;
+ wmsg->header.last = 0x00;
+ wmsg->header.chan = 0x00;
+ wmsg->header.spi = 0x0000;
+ while (remain_len > 0) {
+ wmsg->header.addr = cpu_to_le16(start + 0x4000);
+ if (remain_len > 503) {
+ wmsg->header.len1 = 0xff;
+ wmsg->header.len2 = 0xf8;
+ remain_len -= 503;
+ bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr);
+ start += 503;
+ } else if (remain_len > 255) {
+ wmsg->header.len1 = 0xff;
+ wmsg->header.len2 = (remain_len - 255);
+ bytes_xfer = remain_len +
+ sizeof(struct vprbrd_i2c_write_hdr);
+ remain_len = 0;
+ } else {
+ wmsg->header.len1 = remain_len;
+ wmsg->header.len2 = 0x00;
+ bytes_xfer = remain_len +
+ sizeof(struct vprbrd_i2c_write_hdr);
+ remain_len = 0;
+ }
+ memcpy(wmsg->data, msg->buf + start,
+ bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr));
+
+ ret = usb_bulk_msg(vb->usb_dev,
+ usb_sndbulkpipe(vb->usb_dev,
+ VPRBRD_EP_OUT), wmsg,
+ bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+ if ((ret < 0) || (bytes_xfer != bytes_actual))
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+ int num)
+{
+ struct i2c_msg *pmsg;
+ int i, ret,
+ error = 0;
+ struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+ struct vprbrd_i2c_addr_msg *amsg =
+ (struct vprbrd_i2c_addr_msg *)vb->buf;
+ struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf;
+
+ dev_dbg(&i2c->dev, "master xfer %d messages:\n", num);
+
+ for (i = 0 ; i < num ; i++) {
+ pmsg = &msgs[i];
+
+ dev_dbg(&i2c->dev,
+ " %d: %s (flags %d) %d bytes to 0x%02x\n",
+ i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ pmsg->flags, pmsg->len, pmsg->addr);
+
+ /* msgs longer than 2048 bytes are not supported by adapter */
+ if (pmsg->len > 2048)
+ return -EINVAL;
+
+ mutex_lock(&vb->lock);
+ /* directly send the message */
+ if (pmsg->flags & I2C_M_RD) {
+ /* read data */
+ amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+ amsg->unknown2 = 0x00;
+ amsg->unknown3 = 0x00;
+ amsg->addr = pmsg->addr;
+ amsg->unknown1 = 0x01;
+ amsg->len = cpu_to_le16(pmsg->len);
+ /* send the addr and len, we're interested to board */
+ ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_read(vb, pmsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_status(i2c, smsg, error);
+ if (ret < 0)
+ error = ret;
+ /* in case of protocol error, return the error */
+ if (error < 0)
+ goto error;
+ } else {
+ /* write data */
+ ret = vprbrd_i2c_write(vb, pmsg);
+
+ amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+ amsg->unknown2 = 0x00;
+ amsg->unknown3 = 0x00;
+ amsg->addr = pmsg->addr;
+ amsg->unknown1 = 0x00;
+ amsg->len = cpu_to_le16(pmsg->len);
+ /* send the addr, the data goes to to board */
+ ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_status(i2c, smsg, error);
+ if (ret < 0)
+ error = ret;
+
+ if (error < 0)
+ goto error;
+ }
+ mutex_unlock(&vb->lock);
+ }
+ return 0;
+error:
+ mutex_unlock(&vb->lock);
+ return error;
+}
+
+static u32 vprbrd_i2c_func(struct i2c_adapter *i2c)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+/* This is the actual algorithm we define */
+static const struct i2c_algorithm vprbrd_algorithm = {
+ .master_xfer = vprbrd_i2c_xfer,
+ .functionality = vprbrd_i2c_func,
+};
+
+static int vprbrd_i2c_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_i2c *vb_i2c;
+ int ret;
+ int pipe;
+
+ vb_i2c = kzalloc(sizeof(*vb_i2c), GFP_KERNEL);
+ if (vb_i2c == NULL)
+ return -ENOMEM;
+
+ /* setup i2c adapter description */
+ vb_i2c->i2c.owner = THIS_MODULE;
+ vb_i2c->i2c.class = I2C_CLASS_HWMON;
+ vb_i2c->i2c.algo = &vprbrd_algorithm;
+ vb_i2c->i2c.algo_data = vb;
+ /* save the param in usb capabable memory */
+ vb_i2c->bus_freq_param = i2c_bus_param;
+
+ snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name),
+ "viperboard at bus %03d device %03d",
+ vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+ /* setting the bus frequency */
+ if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ)
+ && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) {
+ pipe = usb_sndctrlpipe(vb->usb_dev, 0);
+ ret = usb_control_msg(vb->usb_dev, pipe,
+ VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret != 1) {
+ dev_err(&pdev->dev,
+ "failure setting i2c_bus_freq to %d\n", i2c_bus_freq);
+ ret = -EIO;
+ goto error;
+ }
+ } else {
+ dev_err(&pdev->dev,
+ "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
+ ret = -EIO;
+ goto error;
+ }
+
+ vb_i2c->i2c.dev.parent = &pdev->dev;
+
+ /* attach to i2c layer */
+ i2c_add_adapter(&vb_i2c->i2c);
+
+ platform_set_drvdata(pdev, vb_i2c);
+
+ return 0;
+
+error:
+ kfree(vb_i2c);
+ return ret;
+}
+
+static int vprbrd_i2c_remove(struct platform_device *pdev)
+{
+ struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i2c_del_adapter(&vb_i2c->i2c);
+
+ return ret;
+}
+
+static struct platform_driver vprbrd_i2c_driver = {
+ .driver.name = "viperboard-i2c",
+ .driver.owner = THIS_MODULE,
+ .probe = vprbrd_i2c_probe,
+ .remove = vprbrd_i2c_remove,
+};
+
+static int __init vprbrd_i2c_init(void)
+{
+ switch (i2c_bus_freq) {
+ case 6000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ;
+ break;
+ case 3000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ;
+ break;
+ case 1000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ;
+ break;
+ case 400:
+ i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ;
+ break;
+ case 200:
+ i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ;
+ break;
+ case 100:
+ i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+ break;
+ case 10:
+ i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ;
+ break;
+ default:
+ pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq);
+ i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+ }
+
+ return platform_driver_register(&vprbrd_i2c_driver);
+}
+subsys_initcall(vprbrd_i2c_init);
+
+static void __exit vprbrd_i2c_exit(void)
+{
+ platform_driver_unregister(&vprbrd_i2c_driver);
+}
+module_exit(vprbrd_i2c_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("I2C master driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-i2c");
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 641d0e5e330..f042f6da0ac 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -689,7 +689,7 @@ static struct i2c_adapter xiic_adapter = {
};
-static int __devinit xiic_i2c_probe(struct platform_device *pdev)
+static int xiic_i2c_probe(struct platform_device *pdev)
{
struct xiic_i2c *i2c;
struct xiic_i2c_platform_data *pdata;
@@ -774,7 +774,7 @@ resource_missing:
return -ENOENT;
}
-static int __devexit xiic_i2c_remove(struct platform_device* pdev)
+static int xiic_i2c_remove(struct platform_device *pdev)
{
struct xiic_i2c *i2c = platform_get_drvdata(pdev);
struct resource *res;
@@ -800,7 +800,7 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
}
#if defined(CONFIG_OF)
-static const struct of_device_id xiic_of_match[] __devinitconst = {
+static const struct of_device_id xiic_of_match[] = {
{ .compatible = "xlnx,xps-iic-2.00.a", },
{},
};
@@ -809,7 +809,7 @@ MODULE_DEVICE_TABLE(of, xiic_of_match);
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
- .remove = __devexit_p(xiic_i2c_remove),
+ .remove = xiic_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 96d3fabd888..a005265461d 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -214,7 +214,7 @@ static struct i2c_algorithm xlr_i2c_algo = {
.functionality = xlr_func,
};
-static int __devinit xlr_i2c_probe(struct platform_device *pdev)
+static int xlr_i2c_probe(struct platform_device *pdev)
{
struct xlr_i2c_private *priv;
struct resource *res;
@@ -251,7 +251,7 @@ static int __devinit xlr_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit xlr_i2c_remove(struct platform_device *pdev)
+static int xlr_i2c_remove(struct platform_device *pdev)
{
struct xlr_i2c_private *priv;
@@ -263,7 +263,7 @@ static int __devexit xlr_i2c_remove(struct platform_device *pdev)
static struct platform_driver xlr_i2c_driver = {
.probe = xlr_i2c_probe,
- .remove = __devexit_p(xlr_i2c_remove),
+ .remove = xlr_i2c_remove,
.driver = {
.name = "xlr-i2cbus",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 08aab57337d..3862a953239 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -389,7 +389,7 @@ static const struct i2c_algorithm scx200_acb_algorithm = {
static struct scx200_acb_iface *scx200_acb_list;
static DEFINE_MUTEX(scx200_acb_list_mutex);
-static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
+static int scx200_acb_probe(struct scx200_acb_iface *iface)
{
u8 val;
@@ -424,7 +424,7 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
return 0;
}
-static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
+static struct scx200_acb_iface *scx200_create_iface(const char *text,
struct device *dev, int index)
{
struct scx200_acb_iface *iface;
@@ -449,7 +449,7 @@ static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
return iface;
}
-static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
+static int scx200_acb_create(struct scx200_acb_iface *iface)
{
struct i2c_adapter *adapter;
int rc;
@@ -480,7 +480,7 @@ static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
return 0;
}
-static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
+static struct scx200_acb_iface *scx200_create_dev(const char *text,
unsigned long base, int index, struct device *dev)
{
struct scx200_acb_iface *iface;
@@ -508,7 +508,7 @@ static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
return NULL;
}
-static int __devinit scx200_probe(struct platform_device *pdev)
+static int scx200_probe(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
struct resource *res;
@@ -530,14 +530,14 @@ static int __devinit scx200_probe(struct platform_device *pdev)
return 0;
}
-static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface)
+static void scx200_cleanup_iface(struct scx200_acb_iface *iface)
{
i2c_del_adapter(&iface->adapter);
release_region(iface->base, 8);
kfree(iface);
}
-static int __devexit scx200_remove(struct platform_device *pdev)
+static int scx200_remove(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
@@ -554,7 +554,7 @@ static struct platform_driver scx200_pci_driver = {
.owner = THIS_MODULE,
},
.probe = scx200_probe,
- .remove = __devexit_p(scx200_remove),
+ .remove = scx200_remove,
};
static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = {
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 566a6757a33..9f50ef04a4b 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/of_i2c.h>
+#include <linux/of_gpio.h>
struct gpiomux {
struct i2c_adapter *parent;
@@ -51,35 +53,116 @@ static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
return 0;
}
-static int __devinit match_gpio_chip_by_label(struct gpio_chip *chip,
+static int match_gpio_chip_by_label(struct gpio_chip *chip,
void *data)
{
return !strcmp(chip->label, data);
}
-static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *adapter_np, *child;
+ struct i2c_adapter *adapter;
+ unsigned *values, *gpios;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ if (!adapter) {
+ dev_err(&pdev->dev, "Cannot find parent bus\n");
+ return -ENODEV;
+ }
+ mux->data.parent = i2c_adapter_id(adapter);
+ put_device(&adapter->dev);
+
+ mux->data.n_values = of_get_child_count(np);
+
+ values = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->data.values) * mux->data.n_values,
+ GFP_KERNEL);
+ if (!values) {
+ dev_err(&pdev->dev, "Cannot allocate values array");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(np, child) {
+ of_property_read_u32(child, "reg", values + i);
+ i++;
+ }
+ mux->data.values = values;
+
+ if (of_property_read_u32(np, "idle-state", &mux->data.idle))
+ mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
+
+ mux->data.n_gpios = of_gpio_named_count(np, "mux-gpios");
+ if (mux->data.n_gpios < 0) {
+ dev_err(&pdev->dev, "Missing mux-gpios property in the DT.\n");
+ return -EINVAL;
+ }
+
+ gpios = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->data.gpios) * mux->data.n_gpios, GFP_KERNEL);
+ if (!gpios) {
+ dev_err(&pdev->dev, "Cannot allocate gpios array");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+ gpios[i] = of_get_named_gpio(np, "mux-gpios", i);
+
+ mux->data.gpios = gpios;
+
+ return 0;
+}
+#else
+static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int i2c_mux_gpio_probe(struct platform_device *pdev)
{
struct gpiomux *mux;
- struct i2c_mux_gpio_platform_data *pdata;
struct i2c_adapter *parent;
int (*deselect) (struct i2c_adapter *, void *, u32);
unsigned initial_state, gpio_base;
int i, ret;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "Missing platform data\n");
- return -ENODEV;
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ dev_err(&pdev->dev, "Cannot allocate gpiomux structure");
+ return -ENOMEM;
}
+ platform_set_drvdata(pdev, mux);
+
+ if (!pdev->dev.platform_data) {
+ ret = i2c_mux_gpio_probe_dt(mux, pdev);
+ if (ret < 0)
+ return ret;
+ } else
+ memcpy(&mux->data, pdev->dev.platform_data, sizeof(mux->data));
+
/*
* If a GPIO chip name is provided, the GPIO pin numbers provided are
* relative to its base GPIO number. Otherwise they are absolute.
*/
- if (pdata->gpio_chip) {
+ if (mux->data.gpio_chip) {
struct gpio_chip *gpio;
- gpio = gpiochip_find(pdata->gpio_chip,
+ gpio = gpiochip_find(mux->data.gpio_chip,
match_gpio_chip_by_label);
if (!gpio)
return -EPROBE_DEFER;
@@ -89,49 +172,44 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
gpio_base = 0;
}
- parent = i2c_get_adapter(pdata->parent);
+ parent = i2c_get_adapter(mux->data.parent);
if (!parent) {
dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
- pdata->parent);
+ mux->data.parent);
return -ENODEV;
}
- mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
- if (!mux) {
- ret = -ENOMEM;
- goto alloc_failed;
- }
-
mux->parent = parent;
- mux->data = *pdata;
mux->gpio_base = gpio_base;
+
mux->adap = devm_kzalloc(&pdev->dev,
- sizeof(*mux->adap) * pdata->n_values,
+ sizeof(*mux->adap) * mux->data.n_values,
GFP_KERNEL);
if (!mux->adap) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
ret = -ENOMEM;
goto alloc_failed;
}
- if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
- initial_state = pdata->idle;
+ if (mux->data.idle != I2C_MUX_GPIO_NO_IDLE) {
+ initial_state = mux->data.idle;
deselect = i2c_mux_gpio_deselect;
} else {
- initial_state = pdata->values[0];
+ initial_state = mux->data.values[0];
deselect = NULL;
}
- for (i = 0; i < pdata->n_gpios; i++) {
- ret = gpio_request(gpio_base + pdata->gpios[i], "i2c-mux-gpio");
+ for (i = 0; i < mux->data.n_gpios; i++) {
+ ret = gpio_request(gpio_base + mux->data.gpios[i], "i2c-mux-gpio");
if (ret)
goto err_request_gpio;
- gpio_direction_output(gpio_base + pdata->gpios[i],
+ gpio_direction_output(gpio_base + mux->data.gpios[i],
initial_state & (1 << i));
}
- for (i = 0; i < pdata->n_values; i++) {
- u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
- unsigned int class = pdata->classes ? pdata->classes[i] : 0;
+ for (i = 0; i < mux->data.n_values; i++) {
+ u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
+ unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
i, class,
@@ -144,26 +222,24 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%d port mux on %s adapter\n",
- pdata->n_values, parent->name);
-
- platform_set_drvdata(pdev, mux);
+ mux->data.n_values, parent->name);
return 0;
add_adapter_failed:
for (; i > 0; i--)
i2c_del_mux_adapter(mux->adap[i - 1]);
- i = pdata->n_gpios;
+ i = mux->data.n_gpios;
err_request_gpio:
for (; i > 0; i--)
- gpio_free(gpio_base + pdata->gpios[i - 1]);
+ gpio_free(gpio_base + mux->data.gpios[i - 1]);
alloc_failed:
i2c_put_adapter(parent);
return ret;
}
-static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
+static int i2c_mux_gpio_remove(struct platform_device *pdev)
{
struct gpiomux *mux = platform_get_drvdata(pdev);
int i;
@@ -180,12 +256,19 @@ static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id i2c_mux_gpio_of_match[] = {
+ { .compatible = "i2c-mux-gpio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match);
+
static struct platform_driver i2c_mux_gpio_driver = {
.probe = i2c_mux_gpio_probe,
- .remove = __devexit_p(i2c_mux_gpio_remove),
+ .remove = i2c_mux_gpio_remove,
.driver = {
.owner = THIS_MODULE,
.name = "i2c-mux-gpio",
+ .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
},
};
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 7fa5b24b16d..1e44d04d1b2 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -129,7 +129,7 @@ static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
}
#endif
-static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
{
struct i2c_mux_pinctrl *mux;
int (*deselect)(struct i2c_adapter *, void *, u32);
@@ -241,7 +241,7 @@ err:
return ret;
}
-static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+static int i2c_mux_pinctrl_remove(struct platform_device *pdev)
{
struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
int i;
@@ -255,7 +255,7 @@ static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+static const struct of_device_id i2c_mux_pinctrl_of_match[] = {
{ .compatible = "i2c-mux-pinctrl", },
{},
};
@@ -269,7 +269,7 @@ static struct platform_driver i2c_mux_pinctrl_driver = {
.of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
},
.probe = i2c_mux_pinctrl_probe,
- .remove = __devexit_p(i2c_mux_pinctrl_remove),
+ .remove = i2c_mux_pinctrl_remove,
};
module_platform_driver(i2c_mux_pinctrl_driver);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 961b8d0a4ba..fe822a14d13 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -125,4 +125,18 @@ config TI_ADC081C
This driver can also be built as a module. If so, the module will be
called ti-adc081c.
+config TI_AM335X_ADC
+ tristate "TI's ADC driver"
+ depends on MFD_TI_AM335X_TSCADC
+ help
+ Say yes here to build support for Texas Instruments ADC
+ driver which is also a MFD client.
+
+config VIPERBOARD_ADC
+ tristate "Viperboard ADC support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the ADC part of the Nano River
+ Technologies Viperboard.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 472fd7cd241..2d5f10080d8 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -13,4 +13,5 @@ obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
-
+obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
new file mode 100644
index 00000000000..02a43c87a8a
--- /dev/null
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -0,0 +1,260 @@
+/*
+ * TI ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+struct tiadc_device {
+ struct ti_tscadc_dev *mfd_tscadc;
+ int channels;
+};
+
+static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg)
+{
+ return readl(adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_writel(struct tiadc_device *adc, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_step_config(struct tiadc_device *adc_dev)
+{
+ unsigned int stepconfig;
+ int i, channels = 0, steps;
+
+ /*
+ * There are 16 configurable steps and 8 analog input
+ * lines available which are shared between Touchscreen and ADC.
+ *
+ * Steps backwards i.e. from 16 towards 0 are used by ADC
+ * depending on number of input lines needed.
+ * Channel would represent which analog input
+ * needs to be given to ADC to digitalize data.
+ */
+
+ steps = TOTAL_STEPS - adc_dev->channels;
+ channels = TOTAL_CHANNELS - adc_dev->channels;
+
+ stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1;
+
+ for (i = (steps + 1); i <= TOTAL_STEPS; i++) {
+ tiadc_writel(adc_dev, REG_STEPCONFIG(i),
+ stepconfig | STEPCONFIG_INP(channels));
+ tiadc_writel(adc_dev, REG_STEPDELAY(i),
+ STEPCONFIG_OPENDLY);
+ channels++;
+ }
+ tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+}
+
+static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
+{
+ struct iio_chan_spec *chan_array;
+ int i;
+
+ indio_dev->num_channels = channels;
+ chan_array = kcalloc(indio_dev->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (chan_array == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < (indio_dev->num_channels); i++) {
+ struct iio_chan_spec *chan = chan_array + i;
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->channel = i;
+ chan->info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT;
+ }
+
+ indio_dev->channels = chan_array;
+
+ return indio_dev->num_channels;
+}
+
+static void tiadc_channels_remove(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->channels);
+}
+
+static int tiadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i;
+ unsigned int fifo1count, readx1;
+
+ /*
+ * When the sub-system is first enabled,
+ * the sequencer will always start with the
+ * lowest step (1) and continue until step (16).
+ * For ex: If we have enabled 4 ADC channels and
+ * currently use only 1 out of them, the
+ * sequencer still configures all the 4 steps,
+ * leading to 3 unwanted data.
+ * Hence we need to flush out this data.
+ */
+
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++) {
+ readx1 = tiadc_readl(adc_dev, REG_FIFO1);
+ if (i == chan->channel)
+ *val = readx1 & 0xfff;
+ }
+ tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info tiadc_info = {
+ .read_raw = &tiadc_read_raw,
+};
+
+static int __devinit tiadc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct tiadc_device *adc_dev;
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct mfd_tscadc_board *pdata;
+ int err;
+
+ pdata = tscadc_dev->dev->platform_data;
+ if (!pdata || !pdata->adc_init) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ indio_dev = iio_device_alloc(sizeof(struct tiadc_device));
+ if (indio_dev == NULL) {
+ dev_err(&pdev->dev, "failed to allocate iio device\n");
+ err = -ENOMEM;
+ goto err_ret;
+ }
+ adc_dev = iio_priv(indio_dev);
+
+ adc_dev->mfd_tscadc = tscadc_dev;
+ adc_dev->channels = pdata->adc_init->adc_channels;
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tiadc_info;
+
+ tiadc_step_config(adc_dev);
+
+ err = tiadc_channel_init(indio_dev, adc_dev->channels);
+ if (err < 0)
+ goto err_free_device;
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_free_channels;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_free_channels:
+ tiadc_channels_remove(indio_dev);
+err_free_device:
+ iio_device_free(indio_dev);
+err_ret:
+ return err;
+}
+
+static int __devexit tiadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ tiadc_channels_remove(indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tiadc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ unsigned int idle;
+
+ if (!device_may_wakeup(tscadc_dev->dev)) {
+ idle = tiadc_readl(adc_dev, REG_CTRL);
+ idle &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, (idle |
+ CNTRLREG_POWERDOWN));
+ }
+
+ return 0;
+}
+
+static int tiadc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int restore;
+
+ /* Make sure ADC is powered up */
+ restore = tiadc_readl(adc_dev, REG_CTRL);
+ restore &= ~(CNTRLREG_POWERDOWN);
+ tiadc_writel(adc_dev, REG_CTRL, restore);
+
+ tiadc_step_config(adc_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tiadc_pm_ops = {
+ .suspend = tiadc_suspend,
+ .resume = tiadc_resume,
+};
+#define TIADC_PM_OPS (&tiadc_pm_ops)
+#else
+#define TIADC_PM_OPS NULL
+#endif
+
+static struct platform_driver tiadc_driver = {
+ .driver = {
+ .name = "tiadc",
+ .owner = THIS_MODULE,
+ .pm = TIADC_PM_OPS,
+ },
+ .probe = tiadc_probe,
+ .remove = __devexit_p(tiadc_remove),
+};
+
+module_platform_driver(tiadc_driver);
+
+MODULE_DESCRIPTION("TI ADC controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
new file mode 100644
index 00000000000..10136a8b20d
--- /dev/null
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -0,0 +1,181 @@
+/*
+ * Nano River Technologies viperboard IIO ADC driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_ADC_CMD_GET 0x00
+
+struct vprbrd_adc_msg {
+ u8 cmd;
+ u8 chan;
+ u8 val;
+} __packed;
+
+struct vprbrd_adc {
+ struct vprbrd *vb;
+};
+
+#define VPRBRD_ADC_CHANNEL(_index) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
+}
+
+static struct iio_chan_spec const vprbrd_adc_iio_channels[] = {
+ VPRBRD_ADC_CHANNEL(0),
+ VPRBRD_ADC_CHANNEL(1),
+ VPRBRD_ADC_CHANNEL(2),
+ VPRBRD_ADC_CHANNEL(3),
+};
+
+static int vprbrd_iio_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ int ret, error = 0;
+ struct vprbrd_adc *adc = iio_priv(iio_dev);
+ struct vprbrd *vb = adc->vb;
+ struct vprbrd_adc_msg *admsg = (struct vprbrd_adc_msg *)vb->buf;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&vb->lock);
+
+ admsg->cmd = VPRBRD_ADC_CMD_GET;
+ admsg->chan = chan->scan_index;
+ admsg->val = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb send error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ *val = admsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb recv error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ if (error)
+ goto error;
+
+ return IIO_VAL_INT;
+ default:
+ error = -EINVAL;
+ break;
+ }
+error:
+ return error;
+}
+
+static const struct iio_info vprbrd_adc_iio_info = {
+ .read_raw = &vprbrd_iio_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit vprbrd_adc_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_adc *adc;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ /* registering iio */
+ indio_dev = iio_device_alloc(sizeof(*adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(indio_dev);
+ adc->vb = vb;
+ indio_dev->name = "viperboard adc";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &vprbrd_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vprbrd_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register iio (adc)");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+error:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int __devexit vprbrd_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver vprbrd_adc_driver = {
+ .driver = {
+ .name = "viperboard-adc",
+ .owner = THIS_MODULE,
+ },
+ .probe = vprbrd_adc_probe,
+ .remove = __devexit_p(vprbrd_adc_remove),
+};
+
+module_platform_driver(vprbrd_adc_driver);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("IIO ADC driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-adc");
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a7568c34a1a..d789eea3216 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -345,17 +345,17 @@ static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_nu
err = ib_query_port(device, port_num, &props);
if (err)
- return 1;
+ return err;
for (i = 0; i < props.gid_tbl_len; ++i) {
err = ib_query_gid(device, port_num, i, &tmp);
if (err)
- return 1;
+ return err;
if (!memcmp(&tmp, gid, sizeof tmp))
return 0;
}
- return -EAGAIN;
+ return -EADDRNOTAVAIL;
}
static int cma_acquire_dev(struct rdma_id_private *id_priv)
@@ -388,8 +388,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
if (!ret) {
id_priv->id.port_num = port;
goto out;
- } else if (ret == 1)
- break;
+ }
}
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index 32d34e88d5c..706cf97cbe8 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -311,6 +311,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&ib_event,
cq->ibcq.cq_context);
+ break;
}
default:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index aaf88ef9409..3e094cd6a0e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -128,9 +128,8 @@ static void stop_ep_timer(struct iwch_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
- printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
+ WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
@@ -1756,9 +1755,8 @@ static void ep_timeout(unsigned long arg)
__state_set(&ep->com, ABORTING);
break;
default:
- printk(KERN_ERR "%s unexpected state ep %p state %u\n",
+ WARN(1, "%s unexpected state ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
abort = 0;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 6cfd4d8fd0b..c13745cde7f 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -38,10 +38,12 @@
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/if_vlan.h>
#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/route.h>
+#include <net/tcp.h>
#include "iw_cxgb4.h"
@@ -61,6 +63,14 @@ static char *states[] = {
NULL,
};
+static int nocong;
+module_param(nocong, int, 0644);
+MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
+
+static int enable_ecn;
+module_param(enable_ecn, int, 0644);
+MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
+
static int dack_mode = 1;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
@@ -151,9 +161,8 @@ static void stop_ep_timer(struct c4iw_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
- printk(KERN_ERR "%s timer stopped when its not running! "
+ WARN(1, "%s timer stopped when its not running! "
"ep %p state %u\n", __func__, ep, ep->com.state);
- WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
@@ -266,6 +275,7 @@ void _c4iw_free_ep(struct kref *kref)
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
+ remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
}
kfree(ep);
}
@@ -442,6 +452,50 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
+#define VLAN_NONE 0xfff
+#define FILTER_SEL_VLAN_NONE 0xffff
+#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
+#define FILTER_SEL_WIDTH_VIN_P_FC \
+ (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
+#define FILTER_SEL_WIDTH_TAG_P_FC \
+ (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
+#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
+
+static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
+ struct l2t_entry *l2t)
+{
+ unsigned int ntuple = 0;
+ u32 viid;
+
+ switch (dev->rdev.lldi.filt_mode) {
+
+ /* default filter mode */
+ case HW_TPL_FR_MT_PR_IV_P_FC:
+ if (l2t->vlan == VLAN_NONE)
+ ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
+ else {
+ ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
+ ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ }
+ ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+ FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ break;
+ case HW_TPL_FR_MT_PR_OV_P_FC: {
+ viid = cxgb4_port_viid(l2t->neigh->dev);
+
+ ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
+ ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
+ ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
+ ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+ FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ break;
+ }
+ default:
+ break;
+ }
+ return ntuple;
+}
+
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -464,7 +518,8 @@ static int send_connect(struct c4iw_ep *ep)
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- opt0 = KEEP_ALIVE(1) |
+ opt0 = (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
@@ -475,6 +530,7 @@ static int send_connect(struct c4iw_ep *ep)
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
+ CCTRL_ECN(enable_ecn) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1);
@@ -493,8 +549,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = ep->com.local_addr.sin_addr.s_addr;
req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = 0;
+ req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
req->opt2 = cpu_to_be32(opt2);
+ set_bit(ACT_OPEN_REQ, &ep->com.history);
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -771,6 +828,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
/* setup the hwtid for this connection */
ep->hwtid = tid;
cxgb4_insert_tid(t, ep, tid);
+ insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -778,7 +836,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_emss(ep, ntohs(req->tcp_opt));
/* dealloc the atid */
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cxgb4_free_atid(t, atid);
+ set_bit(ACT_ESTAB, &ep->com.history);
/* start MPA negotiation */
send_flowc(ep, NULL);
@@ -804,6 +864,7 @@ static void close_complete_upcall(struct c4iw_ep *ep)
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
ep->com.qp = NULL;
+ set_bit(CLOSE_UPCALL, &ep->com.history);
}
}
@@ -812,6 +873,7 @@ static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
close_complete_upcall(ep);
state_set(&ep->com, ABORTING);
+ set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
}
@@ -826,6 +888,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
PDBG("peer close delivered ep %p cm_id %p tid %u\n",
ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ set_bit(DISCONN_UPCALL, &ep->com.history);
}
}
@@ -844,6 +907,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
ep->com.qp = NULL;
+ set_bit(ABORT_UPCALL, &ep->com.history);
}
}
@@ -876,6 +940,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
ep->hwtid, status);
+ set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
if (status < 0) {
@@ -916,6 +981,7 @@ static void connect_request_upcall(struct c4iw_ep *ep)
ep->parent_ep->com.cm_id,
&event);
}
+ set_bit(CONNREQ_UPCALL, &ep->com.history);
c4iw_put_ep(&ep->parent_ep->com);
ep->parent_ep = NULL;
}
@@ -932,6 +998,7 @@ static void established_upcall(struct c4iw_ep *ep)
if (ep->com.cm_id) {
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ set_bit(ESTAB_UPCALL, &ep->com.history);
}
}
@@ -1317,6 +1384,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int dlen = ntohs(hdr->len);
unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids;
+ __u8 status = hdr->status;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
@@ -1339,9 +1407,9 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
case MPA_REP_SENT:
break;
default:
- printk(KERN_ERR MOD "%s Unexpected streaming data."
- " ep %p state %d tid %u\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
+ pr_err("%s Unexpected streaming data." \
+ " ep %p state %d tid %u status %d\n",
+ __func__, ep, state_read(&ep->com), ep->hwtid, status);
/*
* The ep will timeout and inform the ULP of the failure.
@@ -1384,6 +1452,63 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+{
+ struct sk_buff *skb;
+ struct fw_ofld_connection_wr *req;
+ unsigned int mtu_idx;
+ int wscale;
+
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+ req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+ ep->l2t));
+ req->le.lport = ep->com.local_addr.sin_port;
+ req->le.pport = ep->com.remote_addr.sin_port;
+ req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
+ req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
+ req->tcb.t_state_to_astid =
+ htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
+ V_FW_OFLD_CONNECTION_WR_ASTID(atid));
+ req->tcb.cplrxdataack_cplpassacceptrpl =
+ htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
+ req->tcb.tx_max = jiffies;
+ req->tcb.rcv_adv = htons(1);
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ req->tcb.opt0 = TCAM_BYPASS(1) |
+ (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
+ DELACK(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ ULP_MODE(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ(rcv_win >> 10);
+ req->tcb.opt2 = PACE(1) |
+ TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+ RX_CHANNEL(0) |
+ CCTRL_ECN(enable_ecn) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ if (enable_tcp_timestamps)
+ req->tcb.opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack)
+ req->tcb.opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ req->tcb.opt2 |= WND_SCALE_EN(1);
+ req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
+ set_bit(ACT_OFLD_CONN, &ep->com.history);
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
/*
* Return whether a failed active open has allocated a TID
*/
@@ -1393,6 +1518,111 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_ARP_MISS;
}
+#define ACT_OPEN_RETRY_COUNT 2
+
+static int c4iw_reconnect(struct c4iw_ep *ep)
+{
+ int err = 0;
+ struct rtable *rt;
+ struct port_info *pi;
+ struct net_device *pdev;
+ int step;
+ struct neighbour *neigh;
+
+ PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
+ init_timer(&ep->timer);
+
+ /*
+ * Allocate an active TID to initiate a TCP connection.
+ */
+ ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
+ if (ep->atid == -1) {
+ pr_err("%s - cannot alloc atid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+ insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
+
+ /* find a route */
+ rt = find_route(ep->com.dev,
+ ep->com.cm_id->local_addr.sin_addr.s_addr,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->com.cm_id->local_addr.sin_port,
+ ep->com.cm_id->remote_addr.sin_port, 0);
+ if (!rt) {
+ pr_err("%s - cannot find route.\n", __func__);
+ err = -EHOSTUNREACH;
+ goto fail3;
+ }
+ ep->dst = &rt->dst;
+
+ neigh = dst_neigh_lookup(ep->dst,
+ &ep->com.cm_id->remote_addr.sin_addr.s_addr);
+ /* get a l2t entry */
+ if (neigh->dev->flags & IFF_LOOPBACK) {
+ PDBG("%s LOOPBACK\n", __func__);
+ pdev = ip_dev_find(&init_net,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr);
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, pdev, 0);
+ pi = (struct port_info *)netdev_priv(pdev);
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, neigh->dev, 0);
+ pi = (struct port_info *)netdev_priv(neigh->dev);
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(neigh->dev);
+ ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
+ 0x7F) << 1;
+ }
+
+ step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = pi->port_id * step;
+ ep->ctrlq_idx = pi->port_id;
+ step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
+
+ if (!ep->l2t) {
+ pr_err("%s - cannot alloc l2e.\n", __func__);
+ err = -ENOMEM;
+ goto fail4;
+ }
+
+ PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
+
+ state_set(&ep->com, CONNECTING);
+ ep->tos = 0;
+
+ /* send connect request to rnic */
+ err = send_connect(ep);
+ if (!err)
+ goto out;
+
+ cxgb4_l2t_release(ep->l2t);
+fail4:
+ dst_release(ep->dst);
+fail3:
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ /*
+ * remember to send notification to upper layer.
+ * We are in here so the upper layer is not aware that this is
+ * re-connect attempt and so, upper layer is still waiting for
+ * response of 1st connect request.
+ */
+ connect_reply_upcall(ep, -ECONNRESET);
+ c4iw_put_ep(&ep->com);
+out:
+ return err;
+}
+
static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
@@ -1413,6 +1643,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+ set_bit(ACT_OPEN_RPL, &ep->com.history);
+
/*
* Log interesting failures.
*/
@@ -1420,6 +1652,29 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_RESET:
case CPL_ERR_CONN_TIMEDOUT:
break;
+ case CPL_ERR_TCAM_FULL:
+ if (dev->rdev.lldi.enable_fw_ofld_conn) {
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.tcam_full++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ send_fw_act_open_req(ep,
+ GET_TID_TID(GET_AOPEN_ATID(
+ ntohl(rpl->atid_status))));
+ return 0;
+ }
+ break;
+ case CPL_ERR_CONN_EXIST:
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ set_bit(ACT_RETRY_INUSE, &ep->com.history);
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
+ atid);
+ cxgb4_free_atid(t, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_reconnect(ep);
+ return 0;
+ }
+ break;
default:
printk(KERN_INFO MOD "Active open failure - "
"atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
@@ -1437,6 +1692,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (status && act_open_has_tid(status))
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -1453,13 +1709,14 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
if (!ep) {
- printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
- return 0;
+ PDBG("%s stid %d lookup failure!\n", __func__, stid);
+ goto out;
}
PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status));
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+out:
return 0;
}
@@ -1511,14 +1768,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
skb_get(skb);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- opt0 = KEEP_ALIVE(1) |
+ opt0 = (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
TX_CHAN(ep->tx_chan) |
SMAC_SEL(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP(ep->tos >> 2) |
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
@@ -1530,6 +1788,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1);
+ if (enable_ecn) {
+ const struct tcphdr *tcph;
+ u32 hlen = ntohl(req->hdr_len);
+
+ tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
+ G_IP_HDR_LEN(hlen);
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN(1);
+ }
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
@@ -1646,22 +1913,30 @@ out:
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct c4iw_ep *child_ep, *parent_ep;
+ struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
struct rtable *rt;
- __be32 local_ip, peer_ip;
+ __be32 local_ip, peer_ip = 0;
__be16 local_port, peer_port;
int err;
+ u16 peer_mss = ntohs(req->tcpopt.mss);
parent_ep = lookup_stid(t, stid);
- PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
+ if (!parent_ep) {
+ PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ goto reject;
+ }
get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
+ PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
+ "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
+ ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+
if (state_read(&parent_ep->com) != LISTEN) {
printk(KERN_ERR "%s - listening ep not in LISTEN\n",
__func__);
@@ -1695,6 +1970,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
+ if (peer_mss && child_ep->mtu > (peer_mss + 40))
+ child_ep->mtu = peer_mss + 40;
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -1716,6 +1994,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
accept_cr(child_ep, peer_ip, skb, req);
+ set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
goto out;
reject:
reject_cr(dev, hwtid, peer_ip, skb);
@@ -1735,12 +2014,17 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
+ ntohs(req->tcp_opt));
+
set_emss(ep, ntohs(req->tcp_opt));
+ insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
dst_confirm(ep->dst);
state_set(&ep->com, MPA_REQ_WAIT);
start_ep_timer(ep);
send_flowc(ep, skb);
+ set_bit(PASS_ESTAB, &ep->com.history);
return 0;
}
@@ -1760,6 +2044,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
+ set_bit(PEER_CLOSE, &ep->com.history);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case MPA_REQ_WAIT:
@@ -1839,74 +2124,6 @@ static int is_neg_adv_abort(unsigned int status)
status == CPL_ERR_PERSIST_NEG_ADVICE;
}
-static int c4iw_reconnect(struct c4iw_ep *ep)
-{
- struct rtable *rt;
- int err = 0;
-
- PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
- init_timer(&ep->timer);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
- if (ep->atid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(ep->com.dev,
- ep->com.cm_id->local_addr.sin_addr.s_addr,
- ep->com.cm_id->remote_addr.sin_addr.s_addr,
- ep->com.cm_id->local_addr.sin_port,
- ep->com.cm_id->remote_addr.sin_port, 0);
- if (!rt) {
- printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
-
- err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
- ep->dst, ep->com.dev, false);
- if (err) {
- printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- goto fail4;
- }
-
- PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
- ep->l2t->idx);
-
- state_set(&ep->com, CONNECTING);
- ep->tos = 0;
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- cxgb4_l2t_release(ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
-fail2:
- /*
- * remember to send notification to upper layer.
- * We are in here so the upper layer is not aware that this is
- * re-connect attempt and so, upper layer is still waiting for
- * response of 1st connect request.
- */
- connect_reply_upcall(ep, -ECONNRESET);
- c4iw_put_ep(&ep->com);
-out:
- return err;
-}
-
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -1927,6 +2144,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
+ set_bit(PEER_ABORT, &ep->com.history);
/*
* Wake up any threads in rdma_init() or rdma_fini().
@@ -2141,6 +2359,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
c4iw_put_ep(&ep->com);
return -ECONNRESET;
}
+ set_bit(ULP_REJECT, &ep->com.history);
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
if (mpa_rev == 0)
abort_connection(ep, NULL, GFP_KERNEL);
@@ -2170,6 +2389,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
BUG_ON(!qp);
+ set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
abort_connection(ep, NULL, GFP_KERNEL);
@@ -2293,6 +2513,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto fail2;
}
+ insert_handle(dev, &dev->atid_idr, ep, ep->atid);
PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
ntohl(cm_id->local_addr.sin_addr.s_addr),
@@ -2338,6 +2559,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
fail4:
dst_release(ep->dst);
fail3:
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
cm_id->rem_ref(cm_id);
@@ -2352,7 +2574,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
-
might_sleep();
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
@@ -2371,30 +2592,54 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
- ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+ if (dev->rdev.lldi.enable_fw_ofld_conn)
+ ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
+ else
+ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+
if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
err = -ENOMEM;
goto fail2;
}
-
+ insert_handle(dev, &dev->stid_idr, ep, ep->stid);
state_set(&ep->com, LISTEN);
- c4iw_init_wr_wait(&ep->com.wr_wait);
- err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.local_addr.sin_addr.s_addr,
- ep->com.local_addr.sin_port,
- ep->com.dev->rdev.lldi.rxq_ids[0]);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
- __func__);
+ if (dev->rdev.lldi.enable_fw_ofld_conn) {
+ do {
+ err = cxgb4_create_server_filter(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ 0,
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ 0,
+ 0);
+ if (err == -EBUSY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(100));
+ }
+ } while (err == -EBUSY);
+ } else {
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
+ ep->stid, ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ 0,
+ ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (!err)
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev,
+ &ep->com.wr_wait,
+ 0, 0, __func__);
+ }
if (!err) {
cm_id->provider_data = ep;
goto out;
}
-fail3:
+ pr_err("%s cxgb4_create_server/filter failed err %d " \
+ "stid %d laddr %08x lport %d\n", \
+ __func__, err, ep->stid,
+ ntohl(ep->com.local_addr.sin_addr.s_addr),
+ ntohs(ep->com.local_addr.sin_port));
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
fail2:
cm_id->rem_ref(cm_id);
@@ -2413,12 +2658,19 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
might_sleep();
state_set(&ep->com, DEAD);
- c4iw_init_wr_wait(&ep->com.wr_wait);
- err = listen_stop(ep);
- if (err)
- goto done;
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
- __func__);
+ if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
+ err = cxgb4_remove_server_filter(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ } else {
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = listen_stop(ep);
+ if (err)
+ goto done;
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+ 0, 0, __func__);
+ }
+ remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
cm_id->rem_ref(cm_id);
@@ -2482,10 +2734,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (close) {
if (abrupt) {
+ set_bit(EP_DISC_ABORT, &ep->com.history);
close_complete_upcall(ep);
ret = send_abort(ep, NULL, gfp);
- } else
+ } else {
+ set_bit(EP_DISC_CLOSE, &ep->com.history);
ret = send_halfclose(ep, gfp);
+ }
if (ret)
fatal = 1;
}
@@ -2495,10 +2750,323 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return ret;
}
-static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
+static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
+{
+ struct c4iw_ep *ep;
+ int atid = be32_to_cpu(req->tid);
+
+ ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
+ if (!ep)
+ return;
+
+ switch (req->retval) {
+ case FW_ENOMEM:
+ set_bit(ACT_RETRY_NOMEM, &ep->com.history);
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ send_fw_act_open_req(ep, atid);
+ return;
+ }
+ case FW_EADDRINUSE:
+ set_bit(ACT_RETRY_INUSE, &ep->com.history);
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ send_fw_act_open_req(ep, atid);
+ return;
+ }
+ break;
+ default:
+ pr_info("%s unexpected ofld conn wr retval %d\n",
+ __func__, req->retval);
+ break;
+ }
+ pr_err("active ofld_connect_wr failure %d atid %d\n",
+ req->retval, atid);
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.act_ofld_conn_fails++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ connect_reply_upcall(ep, status2errno(req->retval));
+ state_set(&ep->com, DEAD);
+ remove_handle(dev, &dev->atid_idr, atid);
+ cxgb4_free_atid(dev->rdev.lldi.tids, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
+}
+
+static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
+{
+ struct sk_buff *rpl_skb;
+ struct cpl_pass_accept_req *cpl;
+ int ret;
+
+ rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie);
+ BUG_ON(!rpl_skb);
+ if (req->retval) {
+ PDBG("%s passive open failure %d\n", __func__, req->retval);
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.pas_ofld_conn_fails++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ kfree_skb(rpl_skb);
+ } else {
+ cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
+ htonl(req->tid)));
+ ret = pass_accept_req(dev, rpl_skb);
+ if (!ret)
+ kfree_skb(rpl_skb);
+ }
+ return;
+}
+
+static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
- c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
+
+ switch (rpl->type) {
+ case FW6_TYPE_CQE:
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ break;
+ case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
+ req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
+ switch (req->t_state) {
+ case TCP_SYN_SENT:
+ active_ofld_conn_reply(dev, skb, req);
+ break;
+ case TCP_SYN_RECV:
+ passive_ofld_conn_reply(dev, skb, req);
+ break;
+ default:
+ pr_err("%s unexpected ofld conn wr state %d\n",
+ __func__, req->t_state);
+ break;
+ }
+ break;
+ }
+ return 0;
+}
+
+static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
+{
+ u32 l2info;
+ u16 vlantag, len, hdr_len;
+ u8 intf;
+ struct cpl_rx_pkt *cpl = cplhdr(skb);
+ struct cpl_pass_accept_req *req;
+ struct tcp_options_received tmp_opt;
+
+ /* Store values from cpl_rx_pkt in temporary location. */
+ vlantag = cpl->vlan;
+ len = cpl->len;
+ l2info = cpl->l2info;
+ hdr_len = cpl->hdr_len;
+ intf = cpl->iff;
+
+ __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
+
+ /*
+ * We need to parse the TCP options from SYN packet.
+ * to generate cpl_pass_accept_req.
+ */
+ memset(&tmp_opt, 0, sizeof(tmp_opt));
+ tcp_clear_options(&tmp_opt);
+ tcp_parse_options(skb, &tmp_opt, 0, 0, NULL);
+
+ req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
+ V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) |
+ F_SYN_XACT_MATCH);
+ req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) |
+ V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) |
+ V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) |
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info))));
+ req->vlan = vlantag;
+ req->len = len;
+ req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
+ PASS_OPEN_TOS(tos));
+ req->tcpopt.mss = htons(tmp_opt.mss_clamp);
+ if (tmp_opt.wscale_ok)
+ req->tcpopt.wsf = tmp_opt.snd_wscale;
+ req->tcpopt.tstamp = tmp_opt.saw_tstamp;
+ if (tmp_opt.sack_ok)
+ req->tcpopt.sack = 1;
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
+ return;
+}
+
+static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
+ __be32 laddr, __be16 lport,
+ __be32 raddr, __be16 rport,
+ u32 rcv_isn, u32 filter, u16 window,
+ u32 rss_qid, u8 port_id)
+{
+ struct sk_buff *req_skb;
+ struct fw_ofld_connection_wr *req;
+ struct cpl_pass_accept_req *cpl = cplhdr(skb);
+
+ req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
+ req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
+ req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
+ req->le.filter = filter;
+ req->le.lport = lport;
+ req->le.pport = rport;
+ req->le.u.ipv4.lip = laddr;
+ req->le.u.ipv4.pip = raddr;
+ req->tcb.rcv_nxt = htonl(rcv_isn + 1);
+ req->tcb.rcv_adv = htons(window);
+ req->tcb.t_state_to_astid =
+ htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
+ V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
+ V_FW_OFLD_CONNECTION_WR_ASTID(
+ GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+
+ /*
+ * We store the qid in opt2 which will be used by the firmware
+ * to send us the wr response.
+ */
+ req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
+
+ /*
+ * We initialize the MSS index in TCB to 0xF.
+ * So that when driver sends cpl_pass_accept_rpl
+ * TCB picks up the correct value. If this was 0
+ * TP will ignore any value > 0 for MSS index.
+ */
+ req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
+ req->cookie = cpu_to_be64((u64)skb);
+
+ set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
+ cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
+}
+
+/*
+ * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
+ * messages when a filter is being used instead of server to
+ * redirect a syn packet. When packets hit filter they are redirected
+ * to the offload queue and driver tries to establish the connection
+ * using firmware work request.
+ */
+static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ int stid;
+ unsigned int filter;
+ struct ethhdr *eh = NULL;
+ struct vlan_ethhdr *vlan_eh = NULL;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct rss_header *rss = (void *)skb->data;
+ struct cpl_rx_pkt *cpl = (void *)skb->data;
+ struct cpl_pass_accept_req *req = (void *)(rss + 1);
+ struct l2t_entry *e;
+ struct dst_entry *dst;
+ struct rtable *rt;
+ struct c4iw_ep *lep;
+ u16 window;
+ struct port_info *pi;
+ struct net_device *pdev;
+ u16 rss_qid;
+ int step;
+ u32 tx_chan;
+ struct neighbour *neigh;
+
+ /* Drop all non-SYN packets */
+ if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+ goto reject;
+
+ /*
+ * Drop all packets which did not hit the filter.
+ * Unlikely to happen.
+ */
+ if (!(rss->filter_hit && rss->filter_tid))
+ goto reject;
+
+ /*
+ * Calculate the server tid from filter hit index from cpl_rx_pkt.
+ */
+ stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base
+ + dev->rdev.lldi.tids->nstids;
+
+ lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
+ if (!lep) {
+ PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ goto reject;
+ }
+
+ if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
+ eh = (struct ethhdr *)(req + 1);
+ iph = (struct iphdr *)(eh + 1);
+ } else {
+ vlan_eh = (struct vlan_ethhdr *)(req + 1);
+ iph = (struct iphdr *)(vlan_eh + 1);
+ skb->vlan_tci = ntohs(cpl->vlan);
+ }
+
+ if (iph->version != 0x4)
+ goto reject;
+
+ tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)rss);
+ skb_set_transport_header(skb, (void *)tcph - (void *)rss);
+ skb_get(skb);
+
+ PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+ ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
+ ntohs(tcph->source), iph->tos);
+
+ rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
+ iph->tos);
+ if (!rt) {
+ pr_err("%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+ dst = &rt->dst;
+ neigh = dst_neigh_lookup_skb(dst, skb);
+
+ if (neigh->dev->flags & IFF_LOOPBACK) {
+ pdev = ip_dev_find(&init_net, iph->daddr);
+ e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ pdev, 0);
+ pi = (struct port_info *)netdev_priv(pdev);
+ tx_chan = cxgb4_port_chan(pdev);
+ dev_put(pdev);
+ } else {
+ e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ neigh->dev, 0);
+ pi = (struct port_info *)netdev_priv(neigh->dev);
+ tx_chan = cxgb4_port_chan(neigh->dev);
+ }
+ if (!e) {
+ pr_err("%s - failed to allocate l2t entry!\n",
+ __func__);
+ goto free_dst;
+ }
+
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
+ window = htons(tcph->window);
+
+ /* Calcuate filter portion for LE region. */
+ filter = cpu_to_be32(select_ntuple(dev, dst, e));
+
+ /*
+ * Synthesize the cpl_pass_accept_req. We have everything except the
+ * TID. Once firmware sends a reply with TID we update the TID field
+ * in cpl and pass it through the regular cpl_pass_accept_req path.
+ */
+ build_cpl_pass_accept_req(skb, stid, iph->tos);
+ send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
+ tcph->source, ntohl(tcph->seq), filter, window,
+ rss_qid, pi->port_id);
+ cxgb4_l2t_release(e);
+free_dst:
+ dst_release(dst);
+reject:
return 0;
}
@@ -2521,7 +3089,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
- [CPL_FW6_MSG] = async_event
+ [CPL_FW6_MSG] = deferred_fw6_msg,
+ [CPL_RX_PKT] = rx_pkt
};
static void process_timeout(struct c4iw_ep *ep)
@@ -2532,6 +3101,7 @@ static void process_timeout(struct c4iw_ep *ep)
mutex_lock(&ep->com.mutex);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
+ set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
__state_set(&ep->com, ABORTING);
@@ -2551,9 +3121,8 @@ static void process_timeout(struct c4iw_ep *ep)
__state_set(&ep->com, ABORTING);
break;
default:
- printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
+ WARN(1, "%s unexpected state ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, ep->com.state);
- WARN_ON(1);
abort = 0;
}
mutex_unlock(&ep->com.mutex);
@@ -2653,7 +3222,7 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s type %u\n", __func__, rpl->type);
switch (rpl->type) {
- case 1:
+ case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
@@ -2661,7 +3230,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_wake_up(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
break;
- case 2:
+ case FW6_TYPE_CQE:
+ case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
sched(dev, skb);
break;
default:
@@ -2724,7 +3294,8 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
- [CPL_FW6_MSG] = fw6_msg
+ [CPL_FW6_MSG] = fw6_msg,
+ [CPL_RX_PKT] = sched
};
int __init c4iw_cm_init(void)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index cb4ecd78370..ba11c76c0b5 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -279,6 +279,11 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " DB State: %s Transitions %llu\n",
db_state_str[dev->db_state],
dev->rdev.stats.db_state_transitions);
+ seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
+ seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
+ dev->rdev.stats.act_ofld_conn_fails);
+ seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
+ dev->rdev.stats.pas_ofld_conn_fails);
return 0;
}
@@ -309,6 +314,9 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
dev->rdev.stats.db_empty = 0;
dev->rdev.stats.db_drop = 0;
dev->rdev.stats.db_state_transitions = 0;
+ dev->rdev.stats.tcam_full = 0;
+ dev->rdev.stats.act_ofld_conn_fails = 0;
+ dev->rdev.stats.pas_ofld_conn_fails = 0;
mutex_unlock(&dev->rdev.stats.lock);
return count;
}
@@ -322,6 +330,113 @@ static const struct file_operations stats_debugfs_fops = {
.write = stats_clear,
};
+static int dump_ep(int id, void *p, void *data)
+{
+ struct c4iw_ep *ep = p;
+ struct c4iw_debugfs_data *epd = data;
+ int space;
+ int cc;
+
+ space = epd->bufsize - epd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
+ "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
+ ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
+ ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
+ &ep->com.local_addr.sin_addr.s_addr,
+ ntohs(ep->com.local_addr.sin_port),
+ &ep->com.remote_addr.sin_addr.s_addr,
+ ntohs(ep->com.remote_addr.sin_port));
+ if (cc < space)
+ epd->pos += cc;
+ return 0;
+}
+
+static int dump_listen_ep(int id, void *p, void *data)
+{
+ struct c4iw_listen_ep *ep = p;
+ struct c4iw_debugfs_data *epd = data;
+ int space;
+ int cc;
+
+ space = epd->bufsize - epd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
+ "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
+ ep->com.flags, ep->stid, ep->backlog,
+ &ep->com.local_addr.sin_addr.s_addr,
+ ntohs(ep->com.local_addr.sin_port));
+ if (cc < space)
+ epd->pos += cc;
+ return 0;
+}
+
+static int ep_release(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *epd = file->private_data;
+ if (!epd) {
+ pr_info("%s null qpd?\n", __func__);
+ return 0;
+ }
+ vfree(epd->buf);
+ kfree(epd);
+ return 0;
+}
+
+static int ep_open(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *epd;
+ int ret = 0;
+ int count = 1;
+
+ epd = kmalloc(sizeof(*epd), GFP_KERNEL);
+ if (!epd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ epd->devp = inode->i_private;
+ epd->pos = 0;
+
+ spin_lock_irq(&epd->devp->lock);
+ idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
+ idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
+ idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
+ spin_unlock_irq(&epd->devp->lock);
+
+ epd->bufsize = count * 160;
+ epd->buf = vmalloc(epd->bufsize);
+ if (!epd->buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ spin_lock_irq(&epd->devp->lock);
+ idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
+ idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
+ idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
+ spin_unlock_irq(&epd->devp->lock);
+
+ file->private_data = epd;
+ goto out;
+err1:
+ kfree(epd);
+out:
+ return ret;
+}
+
+static const struct file_operations ep_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = ep_open,
+ .release = ep_release,
+ .read = debugfs_read,
+};
+
static int setup_debugfs(struct c4iw_dev *devp)
{
struct dentry *de;
@@ -344,6 +459,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
if (de && de->d_inode)
de->d_inode->i_size = 4096;
+ de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &ep_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
+
return 0;
}
@@ -475,6 +595,9 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
idr_destroy(&ctx->dev->cqidr);
idr_destroy(&ctx->dev->qpidr);
idr_destroy(&ctx->dev->mmidr);
+ idr_destroy(&ctx->dev->hwtid_idr);
+ idr_destroy(&ctx->dev->stid_idr);
+ idr_destroy(&ctx->dev->atid_idr);
iounmap(ctx->dev->rdev.oc_mw_kva);
ib_dealloc_device(&ctx->dev->ibdev);
ctx->dev = NULL;
@@ -532,6 +655,9 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
idr_init(&devp->cqidr);
idr_init(&devp->qpidr);
idr_init(&devp->mmidr);
+ idr_init(&devp->hwtid_idr);
+ idr_init(&devp->stid_idr);
+ idr_init(&devp->atid_idr);
spin_lock_init(&devp->lock);
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
@@ -577,14 +703,76 @@ out:
return ctx;
}
+static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+ const __be64 *rsp,
+ u32 pktshift)
+{
+ struct sk_buff *skb;
+
+ /*
+ * Allocate space for cpl_pass_accept_req which will be synthesized by
+ * driver. Once the driver synthesizes the request the skb will go
+ * through the regular cpl_pass_accept_req processing.
+ * The math here assumes sizeof cpl_pass_accept_req >= sizeof
+ * cpl_rx_pkt.
+ */
+ skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+
+ __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift);
+
+ /*
+ * This skb will contain:
+ * rss_header from the rspq descriptor (1 flit)
+ * cpl_rx_pkt struct from the rspq descriptor (2 flits)
+ * space for the difference between the size of an
+ * rx_pkt and pass_accept_req cpl (1 flit)
+ * the packet data from the gl
+ */
+ skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header));
+ skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
+ sizeof(struct cpl_pass_accept_req),
+ gl->va + pktshift,
+ gl->tot_len - pktshift);
+ return skb;
+}
+
+static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
+ const __be64 *rsp)
+{
+ unsigned int opcode = *(u8 *)rsp;
+ struct sk_buff *skb;
+
+ if (opcode != CPL_RX_PKT)
+ goto out;
+
+ skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
+ if (skb == NULL)
+ goto out;
+
+ if (c4iw_handlers[opcode] == NULL) {
+ pr_info("%s no handler opcode 0x%x...\n", __func__,
+ opcode);
+ kfree_skb(skb);
+ goto out;
+ }
+ c4iw_handlers[opcode](dev, skb);
+ return 1;
+out:
+ return 0;
+}
+
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct uld_ctx *ctx = handle;
struct c4iw_dev *dev = ctx->dev;
struct sk_buff *skb;
- const struct cpl_act_establish *rpl;
- unsigned int opcode;
+ u8 opcode;
if (gl == NULL) {
/* omit RSS and rsp_ctrl at end of descriptor */
@@ -601,19 +789,29 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
u32 qid = be32_to_cpu(rc->pldbuflen_qid);
c4iw_ev_handler(dev, qid);
return 0;
+ } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
+ if (recv_rx_pkt(dev, gl, rsp))
+ return 0;
+
+ pr_info("%s: unexpected FL contents at %p, " \
+ "RSS %#llx, FL %#llx, len %u\n",
+ pci_name(ctx->lldi.pdev), gl->va,
+ (unsigned long long)be64_to_cpu(*rsp),
+ (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
+ gl->tot_len);
+
+ return 0;
} else {
skb = cxgb4_pktgl_to_skb(gl, 128, 128);
if (unlikely(!skb))
goto nomem;
}
- rpl = cplhdr(skb);
- opcode = rpl->ot.opcode;
-
+ opcode = *(u8 *)rsp;
if (c4iw_handlers[opcode])
c4iw_handlers[opcode](dev, skb);
else
- printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
+ pr_info("%s no handler opcode 0x%x...\n", __func__,
opcode);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9beb3a9f033..9c1644fb025 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -130,6 +130,9 @@ struct c4iw_stats {
u64 db_empty;
u64 db_drop;
u64 db_state_transitions;
+ u64 tcam_full;
+ u64 act_ofld_conn_fails;
+ u64 pas_ofld_conn_fails;
};
struct c4iw_rdev {
@@ -223,6 +226,9 @@ struct c4iw_dev {
struct dentry *debugfs_root;
enum db_state db_state;
int qpcnt;
+ struct idr hwtid_idr;
+ struct idr atid_idr;
+ struct idr stid_idr;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -712,6 +718,31 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
};
+enum c4iw_ep_history {
+ ACT_OPEN_REQ = 0,
+ ACT_OFLD_CONN = 1,
+ ACT_OPEN_RPL = 2,
+ ACT_ESTAB = 3,
+ PASS_ACCEPT_REQ = 4,
+ PASS_ESTAB = 5,
+ ABORT_UPCALL = 6,
+ ESTAB_UPCALL = 7,
+ CLOSE_UPCALL = 8,
+ ULP_ACCEPT = 9,
+ ULP_REJECT = 10,
+ TIMEDOUT = 11,
+ PEER_ABORT = 12,
+ PEER_CLOSE = 13,
+ CONNREQ_UPCALL = 14,
+ ABORT_CONN = 15,
+ DISCONN_UPCALL = 16,
+ EP_DISC_CLOSE = 17,
+ EP_DISC_ABORT = 18,
+ CONN_RPL_UPCALL = 19,
+ ACT_RETRY_NOMEM = 20,
+ ACT_RETRY_INUSE = 21
+};
+
struct c4iw_ep_common {
struct iw_cm_id *cm_id;
struct c4iw_qp *qp;
@@ -723,6 +754,7 @@ struct c4iw_ep_common {
struct sockaddr_in remote_addr;
struct c4iw_wr_wait wr_wait;
unsigned long flags;
+ unsigned long history;
};
struct c4iw_listen_ep {
@@ -760,6 +792,7 @@ struct c4iw_ep {
u8 tos;
u8 retry_with_mpa_v1;
u8 tried_with_mpa_v1;
+ unsigned int retry_count;
};
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 2d41d04fd95..89517ffb438 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -90,26 +90,6 @@
static DEFINE_SPINLOCK(hcall_lock);
-static u32 get_longbusy_msecs(int longbusy_rc)
-{
- switch (longbusy_rc) {
- case H_LONG_BUSY_ORDER_1_MSEC:
- return 1;
- case H_LONG_BUSY_ORDER_10_MSEC:
- return 10;
- case H_LONG_BUSY_ORDER_100_MSEC:
- return 100;
- case H_LONG_BUSY_ORDER_1_SEC:
- return 1000;
- case H_LONG_BUSY_ORDER_10_SEC:
- return 10000;
- case H_LONG_BUSY_ORDER_100_SEC:
- return 100000;
- default:
- return 1;
- }
-}
-
static long ehca_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
unsigned long arg2,
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 49b09c697c7..be2a60e142b 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -719,16 +719,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
goto done;
/*
- * we ignore most issues after reporting them, but have to specially
- * handle hardware-disabled chips.
- */
- if (ret == 2) {
- /* unique error, known to ipath_init_one */
- ret = -EPERM;
- goto done;
- }
-
- /*
* We could bump this to allow for full rcvegrcnt + rcvtidcnt,
* but then it no longer nicely fits power of two, and since
* we now use routines that backend onto __get_free_pages, the
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 80079e5a2e3..dbc99d41605 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -268,15 +268,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
unsigned long flags;
- spin_lock_irqsave(&sriov->going_down_lock, flags);
spin_lock(&sriov->id_map_lock);
+ spin_lock_irqsave(&sriov->going_down_lock, flags);
/*make sure that there is no schedule inside the scheduled work.*/
if (!sriov->is_going_down) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
- spin_unlock(&sriov->id_map_lock);
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
+ spin_unlock(&sriov->id_map_lock);
}
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index c9eb6a6815c..ae67df35dd4 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -66,7 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
{
- return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
+ return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
}
static void *get_cqe(struct mlx4_ib_cq *cq, int n)
@@ -77,8 +77,9 @@ static void *get_cqe(struct mlx4_ib_cq *cq, int n)
static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
{
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
+ struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
- return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
}
@@ -99,12 +100,13 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
{
int err;
- err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
+ err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
+ buf->entry_size = dev->dev->caps.cqe_size;
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
&buf->mtt);
if (err)
@@ -120,8 +122,7 @@ err_mtt:
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
err_buf:
- mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
- &buf->buf);
+ mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
out:
return err;
@@ -129,7 +130,7 @@ out:
static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
{
- mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
+ mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
}
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
@@ -137,8 +138,9 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
u64 buf_addr, int cqe)
{
int err;
+ int cqe_size = dev->dev->caps.cqe_size;
- *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
+ *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -331,16 +333,23 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
{
struct mlx4_cqe *cqe, *new_cqe;
int i;
+ int cqe_size = cq->buf.entry_size;
+ int cqe_inc = cqe_size == 64 ? 1 : 0;
i = cq->mcq.cons_index;
cqe = get_cqe(cq, i & cq->ibcq.cqe);
+ cqe += cqe_inc;
+
while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
(i + 1) & cq->resize_buf->cqe);
- memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
+ memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
+ new_cqe += cqe_inc;
+
new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
+ cqe += cqe_inc;
}
++cq->mcq.cons_index;
}
@@ -438,6 +447,7 @@ err_buf:
out:
mutex_unlock(&cq->resize_mutex);
+
return err;
}
@@ -586,6 +596,9 @@ repoll:
if (!cqe)
return -EAGAIN;
+ if (cq->buf.entry_size == 64)
+ cqe++;
+
++cq->mcq.cons_index;
/*
@@ -807,6 +820,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
int nfreed = 0;
struct mlx4_cqe *cqe, *dest;
u8 owner_bit;
+ int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
/*
* First we need to find the current producer index, so we
@@ -825,12 +839,16 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
*/
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
+ cqe += cqe_inc;
+
if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
++nfreed;
} else if (nfreed) {
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
+ dest += cqe_inc;
+
owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
memcpy(dest, cqe, sizeof *cqe);
dest->owner_sr_opcode = owner_bit |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 718ec6b2bad..e7d81c0d1ac 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -563,15 +563,24 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_ucontext *context;
+ struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- resp.qp_tab_size = dev->dev->caps.num_qps;
- resp.bf_reg_size = dev->dev->caps.bf_reg_size;
- resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
+ resp_v3.qp_tab_size = dev->dev->caps.num_qps;
+ resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
+ resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ } else {
+ resp.dev_caps = dev->dev->caps.userspace_caps;
+ resp.qp_tab_size = dev->dev->caps.num_qps;
+ resp.bf_reg_size = dev->dev->caps.bf_reg_size;
+ resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ resp.cqe_size = dev->dev->caps.cqe_size;
+ }
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context)
@@ -586,7 +595,11 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- err = ib_copy_to_udata(udata, &resp, sizeof resp);
+ if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
+ err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
+ else
+ err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
kfree(context);
@@ -1342,7 +1355,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->pdev->dev;
- ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
+ if (dev->caps.userspace_caps)
+ ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
+ else
+ ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
+
ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e04cbc9a54a..dcd845bc30f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -90,6 +90,7 @@ struct mlx4_ib_xrcd {
struct mlx4_ib_cq_buf {
struct mlx4_buf buf;
struct mlx4_mtt mtt;
+ int entry_size;
};
struct mlx4_ib_cq_resize {
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index 13beedeeef9..07e6769ef43 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -40,7 +40,9 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
-#define MLX4_IB_UVERBS_ABI_VERSION 3
+
+#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
+#define MLX4_IB_UVERBS_ABI_VERSION 4
/*
* Make sure that all structs defined in this file remain laid out so
@@ -50,10 +52,18 @@
* instead.
*/
+struct mlx4_ib_alloc_ucontext_resp_v3 {
+ __u32 qp_tab_size;
+ __u16 bf_reg_size;
+ __u16 bf_regs_per_page;
+};
+
struct mlx4_ib_alloc_ucontext_resp {
+ __u32 dev_caps;
__u32 qp_tab_size;
__u16 bf_reg_size;
__u16 bf_regs_per_page;
+ __u32 cqe_size;
};
struct mlx4_ib_alloc_pd_resp {
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 5cac29e6bc1..33cc58941a3 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -532,6 +532,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
int nes_destroy_cqp(struct nes_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
void nes_recheck_link_status(struct work_struct *work);
+void nes_terminate_timeout(unsigned long context);
/* nes_nic.c */
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index cfaacaf6bf5..22ea67eea5d 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -629,11 +629,9 @@ static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_a
case SEND_RDMA_READ_ZERO:
default:
- if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) {
- printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n",
- __func__, __LINE__, cm_node->send_rdma0_op);
- WARN_ON(1);
- }
+ if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO)
+ WARN(1, "Unsupported RDMA0 len operation=%u\n",
+ cm_node->send_rdma0_op);
nes_debug(NES_DBG_CM, "Sending first rdma operation.\n");
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
cpu_to_le32(NES_IWARP_SQ_OP_RDMAR);
@@ -671,7 +669,6 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
struct nes_cm_core *cm_core = cm_node->cm_core;
struct nes_timer_entry *new_send;
int ret = 0;
- u32 was_timer_set;
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send)
@@ -723,12 +720,8 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
}
}
- was_timer_set = timer_pending(&cm_core->tcp_timer);
-
- if (!was_timer_set) {
- cm_core->tcp_timer.expires = new_send->timetosend;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, new_send->timetosend);
return ret;
}
@@ -946,10 +939,8 @@ static void nes_cm_timer_tick(unsigned long pass)
}
if (settimer) {
- if (!timer_pending(&cm_core->tcp_timer)) {
- cm_core->tcp_timer.expires = nexttimeout;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, nexttimeout);
}
}
@@ -1314,8 +1305,6 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
- u32 was_timer_set;
-
cm_node->accelerated = 1;
if (cm_node->accept_pend) {
@@ -1325,11 +1314,8 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
}
- was_timer_set = timer_pending(&cm_core->tcp_timer);
- if (!was_timer_set) {
- cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, (jiffies + NES_SHORT_TIME));
return 0;
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index fe7965ee409..67647e26461 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
static void process_critical_error(struct nes_device *nesdev);
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
-static void nes_terminate_timeout(unsigned long context);
static void nes_terminate_start_timer(struct nes_qp *nesqp);
#ifdef CONFIG_INFINIBAND_NES_DEBUG
@@ -3520,7 +3519,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
}
/* Timeout routine in case terminate fails to complete */
-static void nes_terminate_timeout(unsigned long context)
+void nes_terminate_timeout(unsigned long context)
{
struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
@@ -3530,11 +3529,7 @@ static void nes_terminate_timeout(unsigned long context)
/* Set a timer in case hw cannot complete the terminate sequence */
static void nes_terminate_start_timer(struct nes_qp *nesqp)
{
- init_timer(&nesqp->terminate_timer);
- nesqp->terminate_timer.function = nes_terminate_timeout;
- nesqp->terminate_timer.expires = jiffies + HZ;
- nesqp->terminate_timer.data = (unsigned long)nesqp;
- add_timer(&nesqp->terminate_timer);
+ mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
}
/**
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 3ba7be36945..416645259b0 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -210,6 +210,9 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
}
while (1) {
+ if (skb_queue_empty(&nesqp->pau_list))
+ goto out;
+
seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
if (seq == nextseq) {
if (skb->len || processacks)
@@ -218,14 +221,13 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
goto out;
}
- if (skb->next == (struct sk_buff *)&nesqp->pau_list)
- goto out;
-
old_skb = skb;
skb = skb->next;
skb_unlink(old_skb, &nesqp->pau_list);
nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
nes_rem_ref_cm_node(nesqp->cm_node);
+ if (skb == (struct sk_buff *)&nesqp->pau_list)
+ goto out;
}
return skb;
@@ -245,7 +247,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
struct nes_rskb_cb *cb;
struct pau_fpdu_info *fpdu_info = NULL;
struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
- unsigned long flags;
u32 fpdu_len = 0;
u32 tmp_len;
int frag_cnt = 0;
@@ -260,12 +261,10 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
*pau_fpdu_info = NULL;
- spin_lock_irqsave(&nesqp->pau_lock, flags);
skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb) {
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb)
goto out;
- }
+
cb = (struct nes_rskb_cb *)&skb->cb[0];
if (skb->len) {
fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
@@ -290,10 +289,9 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
skb = nes_get_next_skb(nesdev, nesqp, skb,
nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb) {
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb)
goto out;
- } else if (rst_rcvd) {
+ if (rst_rcvd) {
/* rst received in the middle of fpdu */
for (; i >= 0; i--) {
skb_unlink(frags[i].skb, &nesqp->pau_list);
@@ -320,8 +318,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
frag_cnt = 1;
}
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
-
/* Found one */
fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
if (fpdu_info == NULL) {
@@ -383,9 +379,8 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
if (frags[i].skb->len == 0) {
/* Pull skb off the list - it will be freed in the callback */
- spin_lock_irqsave(&nesqp->pau_lock, flags);
- skb_unlink(frags[i].skb, &nesqp->pau_list);
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb_queue_empty(&nesqp->pau_list))
+ skb_unlink(frags[i].skb, &nesqp->pau_list);
} else {
/* Last skb still has data so update the seq */
iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
@@ -414,14 +409,18 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
struct pau_fpdu_info *fpdu_info;
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_cqp_request *cqp_request;
+ unsigned long flags;
u64 u64tmp;
u32 u32tmp;
int rc;
while (1) {
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
- if (fpdu_info == NULL)
+ if (rc || (fpdu_info == NULL)) {
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
return rc;
+ }
cqp_request = fpdu_info->cqp_request;
cqp_wqe = &cqp_request->cqp_wqe;
@@ -447,7 +446,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
lower_32_bits(u64tmp));
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
- upper_32_bits(u64tmp >> 32));
+ upper_32_bits(u64tmp));
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
lower_32_bits(fpdu_info->frags[0].physaddr));
@@ -475,6 +474,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
atomic_set(&cqp_request->refcount, 1);
nes_post_cqp_request(nesdev, cqp_request);
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
}
return 0;
@@ -649,11 +649,9 @@ static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request
nesqp = qh_chg->nesqp;
/* Should we handle the bad completion */
- if (cqp_request->major_code) {
- printk(KERN_ERR PFX "Invalid cqp_request major_code=0x%x\n",
+ if (cqp_request->major_code)
+ WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
cqp_request->major_code);
- WARN_ON(1);
- }
switch (nesqp->pau_state) {
case PAU_DEL_QH:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 0564be757d8..9542e1644a5 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -944,12 +944,13 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
addr,
perfect_filter_register_address+(mc_index * 8),
mc_nic_index);
- macaddr_high = ((u16) addr[0]) << 8;
- macaddr_high += (u16) addr[1];
- macaddr_low = ((u32) addr[2]) << 24;
- macaddr_low += ((u32) addr[3]) << 16;
- macaddr_low += ((u32) addr[4]) << 8;
- macaddr_low += (u32) addr[5];
+ macaddr_high = ((u8) addr[0]) << 8;
+ macaddr_high += (u8) addr[1];
+ macaddr_low = ((u8) addr[2]) << 24;
+ macaddr_low += ((u8) addr[3]) << 16;
+ macaddr_low += ((u8) addr[4]) << 8;
+ macaddr_low += (u8) addr[5];
+
nes_write_indexed(nesdev,
perfect_filter_register_address+(mc_index * 8),
macaddr_low);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index cd0ecb215cc..07e4fbad987 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
+ init_timer(&nesqp->terminate_timer);
+ nesqp->terminate_timer.function = nes_terminate_timeout;
+ nesqp->terminate_timer.data = (unsigned long)nesqp;
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
@@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
return &nesqp->ibqp;
}
-
/**
* nes_clean_cq
*/
@@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ibmr;
case IWNES_MEMREG_TYPE_QP:
case IWNES_MEMREG_TYPE_CQ:
+ if (!region->length) {
+ nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
+ ib_umem_release(region);
+ return ERR_PTR(-EINVAL);
+ }
nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
if (!nespbl) {
nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 72ae63f0072..03103d2bd64 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -752,6 +752,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
dev->trans_start = jiffies;
++tx->tx_head;
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10221f4080..a1bca70e20a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -615,8 +615,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
address->last_send = priv->tx_head;
++priv->tx_head;
- skb_orphan(skb);
+ skb_orphan(skb);
+ skb_dst_drop(skb);
}
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 922d845f76b..d5088ce7829 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -222,27 +222,29 @@ static int srp_new_cm_id(struct srp_target_port *target)
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
+ struct ib_cq *recv_cq, *send_cq;
+ struct ib_qp *qp;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
if (!init_attr)
return -ENOMEM;
- target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
- if (IS_ERR(target->recv_cq)) {
- ret = PTR_ERR(target->recv_cq);
+ recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
+ if (IS_ERR(recv_cq)) {
+ ret = PTR_ERR(recv_cq);
goto err;
}
- target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
- if (IS_ERR(target->send_cq)) {
- ret = PTR_ERR(target->send_cq);
+ send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
+ if (IS_ERR(send_cq)) {
+ ret = PTR_ERR(send_cq);
goto err_recv_cq;
}
- ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
+ ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -251,30 +253,41 @@ static int srp_create_target_ib(struct srp_target_port *target)
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = IB_QPT_RC;
- init_attr->send_cq = target->send_cq;
- init_attr->recv_cq = target->recv_cq;
+ init_attr->send_cq = send_cq;
+ init_attr->recv_cq = recv_cq;
- target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
- if (IS_ERR(target->qp)) {
- ret = PTR_ERR(target->qp);
+ qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto err_send_cq;
}
- ret = srp_init_qp(target, target->qp);
+ ret = srp_init_qp(target, qp);
if (ret)
goto err_qp;
+ if (target->qp)
+ ib_destroy_qp(target->qp);
+ if (target->recv_cq)
+ ib_destroy_cq(target->recv_cq);
+ if (target->send_cq)
+ ib_destroy_cq(target->send_cq);
+
+ target->qp = qp;
+ target->recv_cq = recv_cq;
+ target->send_cq = send_cq;
+
kfree(init_attr);
return 0;
err_qp:
- ib_destroy_qp(target->qp);
+ ib_destroy_qp(qp);
err_send_cq:
- ib_destroy_cq(target->send_cq);
+ ib_destroy_cq(send_cq);
err_recv_cq:
- ib_destroy_cq(target->recv_cq);
+ ib_destroy_cq(recv_cq);
err:
kfree(init_attr);
@@ -289,6 +302,9 @@ static void srp_free_target_ib(struct srp_target_port *target)
ib_destroy_cq(target->send_cq);
ib_destroy_cq(target->recv_cq);
+ target->qp = NULL;
+ target->send_cq = target->recv_cq = NULL;
+
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
for (i = 0; i < SRP_SQ_SIZE; ++i)
@@ -428,34 +444,50 @@ static int srp_send_req(struct srp_target_port *target)
return status;
}
-static void srp_disconnect_target(struct srp_target_port *target)
+static bool srp_queue_remove_work(struct srp_target_port *target)
{
- /* XXX should send SRP_I_LOGOUT request */
+ bool changed = false;
- init_completion(&target->done);
- if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
- shost_printk(KERN_DEBUG, target->scsi_host,
- PFX "Sending CM DREQ failed\n");
- return;
+ spin_lock_irq(&target->lock);
+ if (target->state != SRP_TARGET_REMOVED) {
+ target->state = SRP_TARGET_REMOVED;
+ changed = true;
}
- wait_for_completion(&target->done);
+ spin_unlock_irq(&target->lock);
+
+ if (changed)
+ queue_work(system_long_wq, &target->remove_work);
+
+ return changed;
}
-static bool srp_change_state(struct srp_target_port *target,
- enum srp_target_state old,
- enum srp_target_state new)
+static bool srp_change_conn_state(struct srp_target_port *target,
+ bool connected)
{
bool changed = false;
spin_lock_irq(&target->lock);
- if (target->state == old) {
- target->state = new;
+ if (target->connected != connected) {
+ target->connected = connected;
changed = true;
}
spin_unlock_irq(&target->lock);
+
return changed;
}
+static void srp_disconnect_target(struct srp_target_port *target)
+{
+ if (srp_change_conn_state(target, false)) {
+ /* XXX should send SRP_I_LOGOUT request */
+
+ if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM DREQ failed\n");
+ }
+ }
+}
+
static void srp_free_req_data(struct srp_target_port *target)
{
struct ib_device *ibdev = target->srp_host->srp_dev->dev;
@@ -489,32 +521,50 @@ static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
device_remove_file(&shost->shost_dev, *attr);
}
-static void srp_remove_work(struct work_struct *work)
+static void srp_remove_target(struct srp_target_port *target)
{
- struct srp_target_port *target =
- container_of(work, struct srp_target_port, work);
-
- if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
- return;
-
- spin_lock(&target->srp_host->target_lock);
- list_del(&target->list);
- spin_unlock(&target->srp_host->target_lock);
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_del_scsi_host_attr(target->scsi_host);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
+ srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
srp_free_req_data(target);
scsi_host_put(target->scsi_host);
}
+static void srp_remove_work(struct work_struct *work)
+{
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, remove_work);
+
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
+
+ spin_lock(&target->srp_host->target_lock);
+ list_del(&target->list);
+ spin_unlock(&target->srp_host->target_lock);
+
+ srp_remove_target(target);
+}
+
+static void srp_rport_delete(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+
+ srp_queue_remove_work(target);
+}
+
static int srp_connect_target(struct srp_target_port *target)
{
int retries = 3;
int ret;
+ WARN_ON_ONCE(target->connected);
+
+ target->qp_in_error = false;
+
ret = srp_lookup_path(target);
if (ret)
return ret;
@@ -534,6 +584,7 @@ static int srp_connect_target(struct srp_target_port *target)
*/
switch (target->status) {
case 0:
+ srp_change_conn_state(target, true);
return 0;
case SRP_PORT_REDIRECT:
@@ -646,13 +697,14 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
static int srp_reconnect_target(struct srp_target_port *target)
{
- struct ib_qp_attr qp_attr;
- struct ib_wc wc;
+ struct Scsi_Host *shost = target->scsi_host;
int i, ret;
- if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
+ if (target->state != SRP_TARGET_LIVE)
return -EAGAIN;
+ scsi_target_block(&shost->shost_gendev);
+
srp_disconnect_target(target);
/*
* Now get a new local CM ID so that we avoid confusing the
@@ -660,21 +712,11 @@ static int srp_reconnect_target(struct srp_target_port *target)
*/
ret = srp_new_cm_id(target);
if (ret)
- goto err;
+ goto unblock;
- qp_attr.qp_state = IB_QPS_RESET;
- ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
- if (ret)
- goto err;
-
- ret = srp_init_qp(target, target->qp);
+ ret = srp_create_target_ib(target);
if (ret)
- goto err;
-
- while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
- ; /* nothing */
- while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
- ; /* nothing */
+ goto unblock;
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
@@ -686,13 +728,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
- target->qp_in_error = 0;
ret = srp_connect_target(target);
+
+unblock:
+ scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
+ SDEV_TRANSPORT_OFFLINE);
+
if (ret)
goto err;
- if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
- ret = -EAGAIN;
+ shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
return ret;
@@ -705,17 +750,8 @@ err:
* However, we have to defer the real removal because we
* are in the context of the SCSI error handler now, which
* will deadlock if we call scsi_remove_host().
- *
- * Schedule our work inside the lock to avoid a race with
- * the flush_scheduled_work() in srp_remove_one().
*/
- spin_lock_irq(&target->lock);
- if (target->state == SRP_TARGET_CONNECTING) {
- target->state = SRP_TARGET_DEAD;
- INIT_WORK(&target->work, srp_remove_work);
- queue_work(ib_wq, &target->work);
- }
- spin_unlock_irq(&target->lock);
+ srp_queue_remove_work(target);
return ret;
}
@@ -1262,6 +1298,19 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
PFX "Recv failed with error code %d\n", res);
}
+static void srp_handle_qp_err(enum ib_wc_status wc_status,
+ enum ib_wc_opcode wc_opcode,
+ struct srp_target_port *target)
+{
+ if (target->connected && !target->qp_in_error) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed %s status %d\n",
+ wc_opcode & IB_WC_RECV ? "receive" : "send",
+ wc_status);
+ }
+ target->qp_in_error = true;
+}
+
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
@@ -1269,15 +1318,11 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(cq, 1, &wc) > 0) {
- if (wc.status) {
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed receive status %d\n",
- wc.status);
- target->qp_in_error = 1;
- break;
+ if (likely(wc.status == IB_WC_SUCCESS)) {
+ srp_handle_recv(target, &wc);
+ } else {
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
-
- srp_handle_recv(target, &wc);
}
}
@@ -1288,16 +1333,12 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
struct srp_iu *iu;
while (ib_poll_cq(cq, 1, &wc) > 0) {
- if (wc.status) {
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed send status %d\n",
- wc.status);
- target->qp_in_error = 1;
- break;
+ if (likely(wc.status == IB_WC_SUCCESS)) {
+ iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
+ list_add(&iu->list, &target->free_tx);
+ } else {
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
-
- iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
- list_add(&iu->list, &target->free_tx);
}
}
@@ -1311,16 +1352,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
unsigned long flags;
int len;
- if (target->state == SRP_TARGET_CONNECTING)
- goto err;
-
- if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED) {
- scmnd->result = DID_BAD_TARGET << 16;
- scmnd->scsi_done(scmnd);
- return 0;
- }
-
spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
@@ -1377,7 +1408,6 @@ err_iu:
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
-err:
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1419,6 +1449,33 @@ err:
return -ENOMEM;
}
+static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
+{
+ uint64_t T_tr_ns, max_compl_time_ms;
+ uint32_t rq_tmo_jiffies;
+
+ /*
+ * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
+ * table 91), both the QP timeout and the retry count have to be set
+ * for RC QP's during the RTR to RTS transition.
+ */
+ WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
+ (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
+
+ /*
+ * Set target->rq_tmo_jiffies to one second more than the largest time
+ * it can take before an error completion is generated. See also
+ * C9-140..142 in the IBTA spec for more information about how to
+ * convert the QP Local ACK Timeout value to nanoseconds.
+ */
+ T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
+ max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
+ do_div(max_compl_time_ms, NSEC_PER_MSEC);
+ rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
+
+ return rq_tmo_jiffies;
+}
+
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_login_rsp *lrsp,
struct srp_target_port *target)
@@ -1478,6 +1535,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (ret)
goto error_free;
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
if (ret)
goto error_free;
@@ -1599,6 +1658,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
case IB_CM_DREQ_RECEIVED:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "DREQ received - connection closed\n");
+ srp_change_conn_state(target, false);
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
@@ -1608,7 +1668,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
- comp = 1;
target->status = 0;
break;
@@ -1636,10 +1695,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED)
- return -1;
-
init_completion(&target->tsk_mgmt_done);
spin_lock_irq(&target->lock);
@@ -1729,6 +1784,21 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return ret;
}
+static int srp_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_target_port *target = host_to_target(shost);
+ struct request_queue *q = sdev->request_queue;
+ unsigned long timeout;
+
+ if (sdev->type == TYPE_DISK) {
+ timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
+ blk_queue_rq_timeout(q, timeout);
+ }
+
+ return 0;
+}
+
static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1861,6 +1931,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.eh_abort_handler = srp_abort,
@@ -1894,11 +1965,14 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
return PTR_ERR(rport);
}
+ rport->lld_data = target;
+
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
spin_unlock(&host->target_lock);
target->state = SRP_TARGET_LIVE;
+ target->connected = false;
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0);
@@ -2188,6 +2262,7 @@ static ssize_t srp_create_target(struct device *dev,
sizeof (struct srp_indirect_buf) +
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
+ INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);
INIT_LIST_HEAD(&target->free_reqs);
@@ -2232,7 +2307,6 @@ static ssize_t srp_create_target(struct device *dev,
if (ret)
goto err_free_ib;
- target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret) {
shost_printk(KERN_ERR, target->scsi_host,
@@ -2422,8 +2496,7 @@ static void srp_remove_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct srp_host *host, *tmp_host;
- LIST_HEAD(target_list);
- struct srp_target_port *target, *tmp_target;
+ struct srp_target_port *target;
srp_dev = ib_get_client_data(device, &srp_client);
@@ -2436,35 +2509,17 @@ static void srp_remove_one(struct ib_device *device)
wait_for_completion(&host->released);
/*
- * Mark all target ports as removed, so we stop queueing
- * commands and don't try to reconnect.
+ * Remove all target ports.
*/
spin_lock(&host->target_lock);
- list_for_each_entry(target, &host->target_list, list) {
- spin_lock_irq(&target->lock);
- target->state = SRP_TARGET_REMOVED;
- spin_unlock_irq(&target->lock);
- }
+ list_for_each_entry(target, &host->target_list, list)
+ srp_queue_remove_work(target);
spin_unlock(&host->target_lock);
/*
- * Wait for any reconnection tasks that may have
- * started before we marked our target ports as
- * removed, and any target port removal tasks.
+ * Wait for target port removal tasks.
*/
- flush_workqueue(ib_wq);
-
- list_for_each_entry_safe(target, tmp_target,
- &host->target_list, list) {
- srp_del_scsi_host_attr(target->scsi_host);
- srp_remove_host(target->scsi_host);
- scsi_remove_host(target->scsi_host);
- srp_disconnect_target(target);
- ib_destroy_cm_id(target->cm_id);
- srp_free_target_ib(target);
- srp_free_req_data(target);
- scsi_host_put(target->scsi_host);
- }
+ flush_workqueue(system_long_wq);
kfree(host);
}
@@ -2478,6 +2533,7 @@ static void srp_remove_one(struct ib_device *device)
}
static struct srp_function_template ib_srp_transport_functions = {
+ .rport_delete = srp_rport_delete,
};
static int __init srp_init_module(void)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 020caf0c378..de2d0b3c0bf 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -80,9 +80,7 @@ enum {
enum srp_target_state {
SRP_TARGET_LIVE,
- SRP_TARGET_CONNECTING,
- SRP_TARGET_DEAD,
- SRP_TARGET_REMOVED
+ SRP_TARGET_REMOVED,
};
enum srp_iu_type {
@@ -163,6 +161,9 @@ struct srp_target_port {
struct ib_sa_query *path_query;
int path_query_id;
+ u32 rq_tmo_jiffies;
+ bool connected;
+
struct ib_cm_id *cm_id;
int max_ti_iu_len;
@@ -173,12 +174,12 @@ struct srp_target_port {
struct srp_iu *rx_ring[SRP_RQ_SIZE];
struct srp_request req_ring[SRP_CMD_SQ_SIZE];
- struct work_struct work;
+ struct work_struct remove_work;
struct list_head list;
struct completion done;
int status;
- int qp_in_error;
+ bool qp_in_error;
struct completion tsk_mgmt_done;
u8 tsk_mgmt_status;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index cf23c46185b..c09d41b1a2f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
- kref_init(&ioctx->kref);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rbuf = 0;
@@ -1291,39 +1290,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
}
/**
- * srpt_put_send_ioctx() - Free up resources.
- */
-static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
-{
- struct srpt_rdma_ch *ch;
- unsigned long flags;
-
- BUG_ON(!ioctx);
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
-
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- transport_generic_free_cmd(&ioctx->cmd, 0);
-
- if (ioctx->n_rbuf > 1) {
- kfree(ioctx->rbufs);
- ioctx->rbufs = NULL;
- ioctx->n_rbuf = 0;
- }
-
- spin_lock_irqsave(&ch->spinlock, flags);
- list_add(&ioctx->free_list, &ch->free_list);
- spin_unlock_irqrestore(&ch->spinlock, flags);
-}
-
-static void srpt_put_send_ioctx_kref(struct kref *kref)
-{
- srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
-}
-
-/**
* srpt_abort_cmd() - Abort a SCSI command.
* @ioctx: I/O context associated with the SCSI command.
* @context: Preferred execution context.
@@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (state == SRPT_STATE_DONE)
+ if (state == SRPT_STATE_DONE) {
+ struct srpt_rdma_ch *ch = ioctx->ch;
+
+ BUG_ON(ch->sess == NULL);
+
+ target_put_sess_cmd(ch->sess, &ioctx->cmd);
goto out;
+ }
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->tag);
@@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
default:
WARN_ON("ERROR: unexpected command state");
@@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
&& state != SRPT_STATE_DONE))
pr_debug("state = %d\n", state);
- if (state != SRPT_STATE_DONE)
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
- else
+ if (state != SRPT_STATE_DONE) {
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+ } else {
printk(KERN_ERR "IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index);
+ }
}
/**
@@ -1712,10 +1686,10 @@ out_err:
static int srpt_check_stop_free(struct se_cmd *cmd)
{
- struct srpt_send_ioctx *ioctx;
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
/**
@@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
uint64_t unpacked_lun;
u64 data_len;
enum dma_data_direction dir;
- int ret;
+ sense_reason_t ret;
+ int rc;
BUG_ON(!send_ioctx);
srp_cmd = recv_ioctx->ioctx.buf;
- kref_get(&send_ioctx->kref);
cmd = &send_ioctx->cmd;
send_ioctx->tag = srp_cmd->tag;
@@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
break;
}
- ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
- if (ret) {
+ if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+ ret = TCM_INVALID_CDB_FIELD;
goto send_sense;
}
- cmd->data_length = data_len;
- cmd->data_direction = dir;
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
sizeof(srp_cmd->lun));
- if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) {
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+ rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
+ &send_ioctx->sense_data[0], unpacked_lun, data_len,
+ MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense;
}
- ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
- if (ret < 0) {
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
- if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
- srpt_queue_status(cmd);
- return 0;
- } else
- goto send_sense;
- }
-
- transport_handle_cdb_direct(cmd);
return 0;
send_sense:
- transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
- 0);
+ transport_send_check_condition_and_sense(cmd, ret, 0);
return -1;
}
@@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
{
struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd;
+ struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
+ uint32_t tag = 0;
int tcm_tmr;
- int res;
+ int rc;
BUG_ON(!send_ioctx);
@@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto process_tmr;
- }
- res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
- if (res < 0) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
- goto process_tmr;
+ goto fail;
}
-
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
- res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
- if (res) {
- pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
- goto process_tmr;
- }
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
- srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
-
-process_tmr:
- kref_get(&send_ioctx->kref);
- if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
- transport_generic_handle_tmr(&send_ioctx->cmd);
- else
- transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+ rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+ if (rc < 0) {
+ send_ioctx->cmd.se_tmr_req->response =
+ TMR_TASK_DOES_NOT_EXIST;
+ goto fail;
+ }
+ tag = srp_tsk->task_tag;
+ }
+ rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+ srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+ TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ goto fail;
+ }
+ return;
+fail:
+ transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
}
/**
@@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
}
}
- transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
- 0, DMA_NONE, MSG_SIMPLE_TAG,
- send_ioctx->sense_data);
-
switch (srp_cmd->opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
@@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
+ struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
@@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w)
sdev = ch->sport->sdev;
BUG_ON(!sdev);
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
+ se_sess = ch->sess;
+ BUG_ON(!se_sess);
+
+ target_wait_for_sess_cmds(se_sess, 0);
+
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
ch->sess = NULL;
srpt_destroy_ch_ib(ch);
@@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
out:
@@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
static void srpt_release_cmd(struct se_cmd *se_cmd)
{
+ struct srpt_send_ioctx *ioctx = container_of(se_cmd,
+ struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ unsigned long flags;
+
+ WARN_ON(ioctx->state != SRPT_STATE_DONE);
+ WARN_ON(ioctx->mapped_sg_count != 0);
+
+ if (ioctx->n_rbuf > 1) {
+ kfree(ioctx->rbufs);
+ ioctx->rbufs = NULL;
+ ioctx->n_rbuf = 0;
+ }
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ list_add(&ioctx->free_list, &ch->free_list);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 61e52b83081..4caf55cda7b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -228,7 +228,6 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch;
- struct kref kref;
struct rdma_iu *rdma_ius;
struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf;
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index daceafe7ee7..fa7a95c1da0 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -57,7 +57,7 @@ static const struct pci_device_id emu_tbl[] = {
MODULE_DEVICE_TABLE(pci, emu_tbl);
-static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct emu *emu;
struct gameport *port;
@@ -107,7 +107,7 @@ static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id
return error;
}
-static void __devexit emu_remove(struct pci_dev *pdev)
+static void emu_remove(struct pci_dev *pdev)
{
struct emu *emu = pci_get_drvdata(pdev);
@@ -122,7 +122,7 @@ static struct pci_driver emu_driver = {
.name = "Emu10k1_gameport",
.id_table = emu_tbl,
.probe = emu_probe,
- .remove = __devexit_p(emu_remove),
+ .remove = emu_remove,
};
module_pci_driver(emu_driver);
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index 48ad3829ff2..ae912d3aee4 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -78,7 +78,7 @@ static int fm801_gp_open(struct gameport *gameport, int mode)
return 0;
}
-static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id)
+static int fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct fm801_gp *gp;
struct gameport *port;
@@ -129,7 +129,7 @@ static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device
return error;
}
-static void __devexit fm801_gp_remove(struct pci_dev *pci)
+static void fm801_gp_remove(struct pci_dev *pci)
{
struct fm801_gp *gp = pci_get_drvdata(pci);
@@ -150,7 +150,7 @@ static struct pci_driver fm801_gp_driver = {
.name = "FM801_gameport",
.id_table = fm801_gp_id_table,
.probe = fm801_gp_probe,
- .remove = __devexit_p(fm801_gp_remove),
+ .remove = fm801_gp_remove,
};
module_pci_driver(fm801_gp_driver);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 1abbc170d8b..47a6009dbf4 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -194,7 +194,7 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
if (!mt)
return;
- oldest = 0;
+ oldest = NULL;
oldid = mt->trkid;
count = 0;
@@ -251,7 +251,7 @@ void input_mt_sync_frame(struct input_dev *dev)
if (mt->flags & INPUT_MT_DROP_UNUSED) {
for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
- if (s->frame == mt->frame)
+ if (input_mt_is_used(mt, s))
continue;
input_mt_slot(dev, s - mt->slots);
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 53a0ddee787..ce01332f7b3 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -534,8 +534,11 @@ EXPORT_SYMBOL(input_grab_device);
static void __input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
+ struct input_handle *grabber;
- if (dev->grab == handle) {
+ grabber = rcu_dereference_protected(dev->grab,
+ lockdep_is_held(&dev->mutex));
+ if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
/* Make sure input_pass_event() notices that grab is gone */
synchronize_rcu();
@@ -1723,7 +1726,7 @@ EXPORT_SYMBOL_GPL(input_class);
/**
* input_allocate_device - allocate memory for new input device
*
- * Returns prepared struct input_dev or NULL.
+ * Returns prepared struct input_dev or %NULL.
*
* NOTE: Use input_free_device() to free devices that have not been
* registered; input_unregister_device() should be used for already
@@ -1750,6 +1753,70 @@ struct input_dev *input_allocate_device(void)
}
EXPORT_SYMBOL(input_allocate_device);
+struct input_devres {
+ struct input_dev *input;
+};
+
+static int devm_input_device_match(struct device *dev, void *res, void *data)
+{
+ struct input_devres *devres = res;
+
+ return devres->input == data;
+}
+
+static void devm_input_device_release(struct device *dev, void *res)
+{
+ struct input_devres *devres = res;
+ struct input_dev *input = devres->input;
+
+ dev_dbg(dev, "%s: dropping reference to %s\n",
+ __func__, dev_name(&input->dev));
+ input_put_device(input);
+}
+
+/**
+ * devm_input_allocate_device - allocate managed input device
+ * @dev: device owning the input device being created
+ *
+ * Returns prepared struct input_dev or %NULL.
+ *
+ * Managed input devices do not need to be explicitly unregistered or
+ * freed as it will be done automatically when owner device unbinds from
+ * its driver (or binding fails). Once managed input device is allocated,
+ * it is ready to be set up and registered in the same fashion as regular
+ * input device. There are no special devm_input_device_[un]register()
+ * variants, regular ones work with both managed and unmanaged devices.
+ *
+ * NOTE: the owner device is set up as parent of input device and users
+ * should not override it.
+ */
+
+struct input_dev *devm_input_allocate_device(struct device *dev)
+{
+ struct input_dev *input;
+ struct input_devres *devres;
+
+ devres = devres_alloc(devm_input_device_release,
+ sizeof(struct input_devres), GFP_KERNEL);
+ if (!devres)
+ return NULL;
+
+ input = input_allocate_device();
+ if (!input) {
+ devres_free(devres);
+ return NULL;
+ }
+
+ input->dev.parent = dev;
+ input->devres_managed = true;
+
+ devres->input = input;
+ devres_add(dev, devres);
+
+ return input;
+}
+EXPORT_SYMBOL(devm_input_allocate_device);
+
/**
* input_free_device - free memory occupied by input_dev structure
* @dev: input device to free
@@ -1766,8 +1833,14 @@ EXPORT_SYMBOL(input_allocate_device);
*/
void input_free_device(struct input_dev *dev)
{
- if (dev)
+ if (dev) {
+ if (dev->devres_managed)
+ WARN_ON(devres_destroy(dev->dev.parent,
+ devm_input_device_release,
+ devm_input_device_match,
+ dev));
input_put_device(dev);
+ }
}
EXPORT_SYMBOL(input_free_device);
@@ -1888,6 +1961,38 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
INPUT_CLEANSE_BITMASK(dev, SW, sw);
}
+static void __input_unregister_device(struct input_dev *dev)
+{
+ struct input_handle *handle, *next;
+
+ input_disconnect_device(dev);
+
+ mutex_lock(&input_mutex);
+
+ list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
+ handle->handler->disconnect(handle);
+ WARN_ON(!list_empty(&dev->h_list));
+
+ del_timer_sync(&dev->timer);
+ list_del_init(&dev->node);
+
+ input_wakeup_procfs_readers();
+
+ mutex_unlock(&input_mutex);
+
+ device_del(&dev->dev);
+}
+
+static void devm_input_device_unregister(struct device *dev, void *res)
+{
+ struct input_devres *devres = res;
+ struct input_dev *input = devres->input;
+
+ dev_dbg(dev, "%s: unregistering device %s\n",
+ __func__, dev_name(&input->dev));
+ __input_unregister_device(input);
+}
+
/**
* input_register_device - register device with input core
* @dev: device to be registered
@@ -1903,11 +2008,21 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
int input_register_device(struct input_dev *dev)
{
static atomic_t input_no = ATOMIC_INIT(0);
+ struct input_devres *devres = NULL;
struct input_handler *handler;
unsigned int packet_size;
const char *path;
int error;
+ if (dev->devres_managed) {
+ devres = devres_alloc(devm_input_device_unregister,
+ sizeof(struct input_devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->input = dev;
+ }
+
/* Every input device generates EV_SYN/SYN_REPORT events. */
__set_bit(EV_SYN, dev->evbit);
@@ -1923,8 +2038,10 @@ int input_register_device(struct input_dev *dev)
dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2;
dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
- if (!dev->vals)
- return -ENOMEM;
+ if (!dev->vals) {
+ error = -ENOMEM;
+ goto err_devres_free;
+ }
/*
* If delay and period are pre-set by the driver, then autorepeating
@@ -1949,7 +2066,7 @@ int input_register_device(struct input_dev *dev)
error = device_add(&dev->dev);
if (error)
- return error;
+ goto err_free_vals;
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
pr_info("%s as %s\n",
@@ -1958,10 +2075,8 @@ int input_register_device(struct input_dev *dev)
kfree(path);
error = mutex_lock_interruptible(&input_mutex);
- if (error) {
- device_del(&dev->dev);
- return error;
- }
+ if (error)
+ goto err_device_del;
list_add_tail(&dev->node, &input_dev_list);
@@ -1972,7 +2087,21 @@ int input_register_device(struct input_dev *dev)
mutex_unlock(&input_mutex);
+ if (dev->devres_managed) {
+ dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
+ __func__, dev_name(&dev->dev));
+ devres_add(dev->dev.parent, devres);
+ }
return 0;
+
+err_device_del:
+ device_del(&dev->dev);
+err_free_vals:
+ kfree(dev->vals);
+ dev->vals = NULL;
+err_devres_free:
+ devres_free(devres);
+ return error;
}
EXPORT_SYMBOL(input_register_device);
@@ -1985,24 +2114,20 @@ EXPORT_SYMBOL(input_register_device);
*/
void input_unregister_device(struct input_dev *dev)
{
- struct input_handle *handle, *next;
-
- input_disconnect_device(dev);
-
- mutex_lock(&input_mutex);
-
- list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
- handle->handler->disconnect(handle);
- WARN_ON(!list_empty(&dev->h_list));
-
- del_timer_sync(&dev->timer);
- list_del_init(&dev->node);
-
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
-
- device_unregister(&dev->dev);
+ if (dev->devres_managed) {
+ WARN_ON(devres_destroy(dev->dev.parent,
+ devm_input_device_unregister,
+ devm_input_device_match,
+ dev));
+ __input_unregister_device(dev);
+ /*
+ * We do not do input_put_device() here because it will be done
+ * when 2nd devres fires up.
+ */
+ } else {
+ __input_unregister_device(dev);
+ input_put_device(dev);
+ }
}
EXPORT_SYMBOL(input_unregister_device);
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index c96653b5886..121cd63d333 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -85,7 +85,10 @@ static int as5011_i2c_write(struct i2c_client *client,
{
uint8_t data[2] = { aregaddr, avalue };
struct i2c_msg msg = {
- client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
+ .addr = client->addr,
+ .flags = I2C_M_IGNORE_NAK,
+ .len = 2,
+ .buf = (uint8_t *)data
};
int error;
@@ -98,8 +101,18 @@ static int as5011_i2c_read(struct i2c_client *client,
{
uint8_t data[2] = { aregaddr };
struct i2c_msg msg_set[2] = {
- { client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
- { client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
+ {
+ .addr = client->addr,
+ .flags = I2C_M_REV_DIR_ADDR,
+ .len = 1,
+ .buf = (uint8_t *)data
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD | I2C_M_NOSTART,
+ .len = 1,
+ .buf = (uint8_t *)data
+ }
};
int error;
@@ -144,7 +157,7 @@ out:
return IRQ_HANDLED;
}
-static int __devinit as5011_configure_chip(struct as5011_device *as5011,
+static int as5011_configure_chip(struct as5011_device *as5011,
const struct as5011_platform_data *plat_dat)
{
struct i2c_client *client = as5011->i2c_client;
@@ -212,8 +225,8 @@ static int __devinit as5011_configure_chip(struct as5011_device *as5011,
return 0;
}
-static int __devinit as5011_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int as5011_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct as5011_platform_data *plat_data;
struct as5011_device *as5011;
@@ -328,7 +341,7 @@ err_free_mem:
return error;
}
-static int __devexit as5011_remove(struct i2c_client *client)
+static int as5011_remove(struct i2c_client *client)
{
struct as5011_device *as5011 = i2c_get_clientdata(client);
@@ -353,7 +366,7 @@ static struct i2c_driver as5011_driver = {
.name = "as5011",
},
.probe = as5011_probe,
- .remove = __devexit_p(as5011_remove),
+ .remove = as5011_remove,
.id_table = as5011_id,
};
diff --git a/drivers/input/joystick/maplecontrol.c b/drivers/input/joystick/maplecontrol.c
index 77cfde571bd..59c10ec5a2a 100644
--- a/drivers/input/joystick/maplecontrol.c
+++ b/drivers/input/joystick/maplecontrol.c
@@ -78,7 +78,7 @@ static void dc_pad_close(struct input_dev *dev)
}
/* allow the controller to be used */
-static int __devinit probe_maple_controller(struct device *dev)
+static int probe_maple_controller(struct device *dev)
{
static const short btn_bit[32] = {
BTN_C, BTN_B, BTN_A, BTN_START, -1, -1, -1, -1,
@@ -157,7 +157,7 @@ fail:
return error;
}
-static int __devexit remove_maple_controller(struct device *dev)
+static int remove_maple_controller(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct dc_pad *pad = maple_get_drvdata(mdev);
@@ -175,7 +175,7 @@ static struct maple_driver dc_pad_driver = {
.drv = {
.name = "Dreamcast_controller",
.probe = probe_maple_controller,
- .remove = __devexit_p(remove_maple_controller),
+ .remove = remove_maple_controller,
},
};
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 4dfa1eed4b7..f8f892b076e 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev)
struct walkera_dev *w = input_get_drvdata(dev);
parport_disable_irq(w->parport);
+ hrtimer_cancel(&w->timer);
}
static int walkera0701_connect(struct walkera_dev *w, int parport)
@@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
if (parport_claim(w->pardevice))
goto init_err1;
+ hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ w->timer.function = timer_handler;
+
w->input_dev = input_allocate_device();
if (!w->input_dev)
goto init_err2;
@@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
if (err)
goto init_err3;
- hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- w->timer.function = timer_handler;
return 0;
init_err3:
@@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
static void walkera0701_disconnect(struct walkera_dev *w)
{
- hrtimer_cancel(&w->timer);
input_unregister_device(w->input_dev);
parport_release(w->pardevice);
parport_unregister_device(w->pardevice);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 83811e45d63..d6cbfe9df21 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -118,11 +118,12 @@ static const struct xpad_device {
u8 xtype;
} xpad_device[] = {
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
- { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+ { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
+ { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
- { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
@@ -136,9 +137,12 @@ static const struct xpad_device {
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
@@ -148,24 +152,28 @@ static const struct xpad_device {
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
- { 0x0e6f, 0x0006, "Pelican 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
+ { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
- { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
- { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -235,7 +243,7 @@ static const signed short xpad_abs_triggers[] = {
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
-static struct usb_device_id xpad_table [] = {
+static struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
@@ -248,10 +256,11 @@ static struct usb_device_id xpad_table [] = {
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
{ }
};
-MODULE_DEVICE_TABLE (usb, xpad_table);
+MODULE_DEVICE_TABLE(usb, xpad_table);
struct usb_xpad {
struct input_dev *dev; /* input device interface */
@@ -783,7 +792,7 @@ static int xpad_open(struct input_dev *dev)
struct usb_xpad *xpad = input_get_drvdata(dev);
/* URB was submitted in probe */
- if(xpad->xtype == XTYPE_XBOX360W)
+ if (xpad->xtype == XTYPE_XBOX360W)
return 0;
xpad->irq_in->dev = xpad->udev;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index de0874054e9..5a240c60342 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -134,7 +134,7 @@ config KEYBOARD_QT1070
config KEYBOARD_QT2160
tristate "Atmel AT42QT2160 Touch Sensor Chip"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for Atmel AT42QT2160 Touch
Sensor chip as a keyboard input.
@@ -409,7 +409,7 @@ config KEYBOARD_NEWTON
config KEYBOARD_NOMADIK
tristate "ST-Ericsson Nomadik SKE keyboard"
- depends on PLAT_NOMADIK
+ depends on (ARCH_NOMADIK || ARCH_U8500)
select INPUT_MATRIXKMAP
help
Say Y here if you want to use a keypad provided on the SKE controller
@@ -544,6 +544,7 @@ config KEYBOARD_OMAP
config KEYBOARD_OMAP4
tristate "TI OMAP4+ keypad support"
+ depends on ARCH_OMAP2PLUS
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP4+ keypad.
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index e9e8674dfda..ef26b17fb15 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -69,7 +69,7 @@ static int adp5520_keys_notifier(struct notifier_block *nb,
return 0;
}
-static int __devinit adp5520_keys_probe(struct platform_device *pdev)
+static int adp5520_keys_probe(struct platform_device *pdev)
{
struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data;
struct input_dev *input;
@@ -182,7 +182,7 @@ err:
return ret;
}
-static int __devexit adp5520_keys_remove(struct platform_device *pdev)
+static int adp5520_keys_remove(struct platform_device *pdev)
{
struct adp5520_keys *dev = platform_get_drvdata(pdev);
@@ -200,7 +200,7 @@ static struct platform_driver adp5520_keys_driver = {
.owner = THIS_MODULE,
},
.probe = adp5520_keys_probe,
- .remove = __devexit_p(adp5520_keys_remove),
+ .remove = adp5520_keys_remove,
};
module_platform_driver(adp5520_keys_driver);
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b083bf10f13..dbd2047f164 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -145,7 +145,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
-static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
+static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
const struct adp5588_kpad_platform_data *pdata)
{
bool pin_used[ADP5588_MAXGPIO];
@@ -170,7 +170,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
-static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
+static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
@@ -224,7 +224,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
return 0;
}
-static void __devexit adp5588_gpio_remove(struct adp5588_kpad *kpad)
+static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
@@ -319,7 +319,7 @@ static irqreturn_t adp5588_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int __devinit adp5588_setup(struct i2c_client *client)
+static int adp5588_setup(struct i2c_client *client)
{
const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
@@ -382,7 +382,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
return 0;
}
-static void __devinit adp5588_report_switch_state(struct adp5588_kpad *kpad)
+static void adp5588_report_switch_state(struct adp5588_kpad *kpad)
{
int gpi_stat1 = adp5588_read(kpad->client, GPIO_DAT_STAT1);
int gpi_stat2 = adp5588_read(kpad->client, GPIO_DAT_STAT2);
@@ -420,8 +420,8 @@ static void __devinit adp5588_report_switch_state(struct adp5588_kpad *kpad)
}
-static int __devinit adp5588_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5588_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adp5588_kpad *kpad;
const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
@@ -587,7 +587,7 @@ static int __devinit adp5588_probe(struct i2c_client *client,
return error;
}
-static int __devexit adp5588_remove(struct i2c_client *client)
+static int adp5588_remove(struct i2c_client *client)
{
struct adp5588_kpad *kpad = i2c_get_clientdata(client);
@@ -650,7 +650,7 @@ static struct i2c_driver adp5588_driver = {
#endif
},
.probe = adp5588_probe,
- .remove = __devexit_p(adp5588_remove),
+ .remove = adp5588_remove,
.id_table = adp5588_id,
};
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 74e60321338..67d12b3427c 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -464,7 +464,7 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
-static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
+static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
const struct adp5589_kpad_platform_data *pdata)
{
bool pin_used[ADP5589_MAXGPIO];
@@ -496,7 +496,7 @@ static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
return n_unused;
}
-static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
+static int adp5589_gpio_add(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
@@ -550,7 +550,7 @@ static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
return 0;
}
-static void __devexit adp5589_gpio_remove(struct adp5589_kpad *kpad)
+static void adp5589_gpio_remove(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
@@ -641,8 +641,7 @@ static irqreturn_t adp5589_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad,
- unsigned short key)
+static int adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key)
{
int i;
@@ -655,7 +654,7 @@ static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad,
return -EINVAL;
}
-static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
+static int adp5589_setup(struct adp5589_kpad *kpad)
{
struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
@@ -820,7 +819,7 @@ static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
return 0;
}
-static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
+static void adp5589_report_switch_state(struct adp5589_kpad *kpad)
{
int gpi_stat_tmp, pin_loc;
int i;
@@ -860,8 +859,8 @@ static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
input_sync(kpad->input);
}
-static int __devinit adp5589_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5589_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adp5589_kpad *kpad;
const struct adp5589_kpad_platform_data *pdata =
@@ -1045,7 +1044,7 @@ err_free_mem:
return error;
}
-static int __devexit adp5589_remove(struct i2c_client *client)
+static int adp5589_remove(struct i2c_client *client)
{
struct adp5589_kpad *kpad = i2c_get_clientdata(client);
@@ -1104,7 +1103,7 @@ static struct i2c_driver adp5589_driver = {
.pm = &adp5589_dev_pm_ops,
},
.probe = adp5589_probe,
- .remove = __devexit_p(adp5589_remove),
+ .remove = adp5589_remove,
.id_table = adp5589_id,
};
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 8eb9116e0a5..20b9fa91fb9 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -177,7 +177,7 @@ static irqreturn_t bfin_kpad_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_kpad_probe(struct platform_device *pdev)
+static int bfin_kpad_probe(struct platform_device *pdev)
{
struct bf54x_kpad *bf54x_kpad;
struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
@@ -331,7 +331,7 @@ out:
return error;
}
-static int __devexit bfin_kpad_remove(struct platform_device *pdev)
+static int bfin_kpad_remove(struct platform_device *pdev)
{
struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
@@ -390,7 +390,7 @@ static struct platform_driver bfin_kpad_device_driver = {
.owner = THIS_MODULE,
},
.probe = bfin_kpad_probe,
- .remove = __devexit_p(bfin_kpad_remove),
+ .remove = bfin_kpad_remove,
.suspend = bfin_kpad_suspend,
.resume = bfin_kpad_resume,
};
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index d5bacbb479b..4e4e453ea15 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -303,7 +303,7 @@ fail1:
return error;
}
-static int __devexit davinci_ks_remove(struct platform_device *pdev)
+static int davinci_ks_remove(struct platform_device *pdev)
{
struct davinci_ks *davinci_ks = platform_get_drvdata(pdev);
@@ -326,7 +326,7 @@ static struct platform_driver davinci_ks_driver = {
.name = "davinci_keyscan",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(davinci_ks_remove),
+ .remove = davinci_ks_remove,
};
static int __init davinci_ks_init(void)
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 7363402de8d..9857e8fd098 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -232,7 +232,7 @@ static int ep93xx_keypad_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
ep93xx_keypad_suspend, ep93xx_keypad_resume);
-static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
+static int ep93xx_keypad_probe(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad;
const struct matrix_keymap_data *keymap_data;
@@ -346,7 +346,7 @@ failed_free:
return err;
}
-static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
+static int ep93xx_keypad_remove(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -380,7 +380,7 @@ static struct platform_driver ep93xx_keypad_driver = {
.pm = &ep93xx_keypad_pm_ops,
},
.probe = ep93xx_keypad_probe,
- .remove = __devexit_p(ep93xx_keypad_remove),
+ .remove = ep93xx_keypad_remove,
};
module_platform_driver(ep93xx_keypad_driver);
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6a68041c261..d327f5a2bb0 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -423,10 +423,10 @@ out:
return IRQ_HANDLED;
}
-static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
- struct input_dev *input,
- struct gpio_button_data *bdata,
- const struct gpio_keys_button *button)
+static int gpio_keys_setup_key(struct platform_device *pdev,
+ struct input_dev *input,
+ struct gpio_button_data *bdata,
+ const struct gpio_keys_button *button)
{
const char *desc = button->desc ? button->desc : "gpio_keys";
struct device *dev = &pdev->dev;
@@ -440,21 +440,13 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
if (gpio_is_valid(button->gpio)) {
- error = gpio_request(button->gpio, desc);
+ error = gpio_request_one(button->gpio, GPIOF_IN, desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
return error;
}
- error = gpio_direction_input(button->gpio);
- if (error < 0) {
- dev_err(dev,
- "Failed to configure direction for GPIO %d, error %d\n",
- button->gpio, error);
- goto fail;
- }
-
if (button->debounce_interval) {
error = gpio_set_debounce(button->gpio,
button->debounce_interval * 1000);
@@ -526,12 +518,35 @@ fail:
return error;
}
+static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
+{
+ struct input_dev *input = ddata->input;
+ int i;
+
+ for (i = 0; i < ddata->pdata->nbuttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (gpio_is_valid(bdata->button->gpio))
+ gpio_keys_gpio_report_event(bdata);
+ }
+ input_sync(input);
+}
+
static int gpio_keys_open(struct input_dev *input)
{
struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = ddata->pdata;
+ int error;
+
+ if (pdata->enable) {
+ error = pdata->enable(input->dev.parent);
+ if (error)
+ return error;
+ }
+
+ /* Report current state of buttons that are connected to GPIOs */
+ gpio_keys_report_state(ddata);
- return pdata->enable ? pdata->enable(input->dev.parent) : 0;
+ return 0;
}
static void gpio_keys_close(struct input_dev *input)
@@ -551,7 +566,7 @@ static void gpio_keys_close(struct input_dev *input)
/*
* Translate OpenFirmware node properties into platform_data
*/
-static struct gpio_keys_platform_data * __devinit
+static struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
struct device_node *node, *pp;
@@ -658,7 +673,7 @@ static void gpio_remove_key(struct gpio_button_data *bdata)
gpio_free(bdata->button->gpio);
}
-static int __devinit gpio_keys_probe(struct platform_device *pdev)
+static int gpio_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
@@ -731,14 +746,6 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
goto fail3;
}
- /* get current state of buttons that are connected to GPIOs */
- for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_button_data *bdata = &ddata->data[i];
- if (gpio_is_valid(bdata->button->gpio))
- gpio_keys_gpio_report_event(bdata);
- }
- input_sync(input);
-
device_init_wakeup(&pdev->dev, wakeup);
return 0;
@@ -760,7 +767,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
return error;
}
-static int __devexit gpio_keys_remove(struct platform_device *pdev)
+static int gpio_keys_remove(struct platform_device *pdev)
{
struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
struct input_dev *input = ddata->input;
@@ -788,6 +795,7 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
static int gpio_keys_suspend(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+ struct input_dev *input = ddata->input;
int i;
if (device_may_wakeup(dev)) {
@@ -796,6 +804,11 @@ static int gpio_keys_suspend(struct device *dev)
if (bdata->button->wakeup)
enable_irq_wake(bdata->irq);
}
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users)
+ gpio_keys_close(input);
+ mutex_unlock(&input->mutex);
}
return 0;
@@ -804,18 +817,27 @@ static int gpio_keys_suspend(struct device *dev)
static int gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+ struct input_dev *input = ddata->input;
+ int error = 0;
int i;
- for (i = 0; i < ddata->pdata->nbuttons; i++) {
- struct gpio_button_data *bdata = &ddata->data[i];
- if (bdata->button->wakeup && device_may_wakeup(dev))
- disable_irq_wake(bdata->irq);
-
- if (gpio_is_valid(bdata->button->gpio))
- gpio_keys_gpio_report_event(bdata);
+ if (device_may_wakeup(dev)) {
+ for (i = 0; i < ddata->pdata->nbuttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (bdata->button->wakeup)
+ disable_irq_wake(bdata->irq);
+ }
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users)
+ error = gpio_keys_open(input);
+ mutex_unlock(&input->mutex);
}
- input_sync(ddata->input);
+ if (error)
+ return error;
+
+ gpio_keys_report_state(ddata);
return 0;
}
#endif
@@ -824,7 +846,7 @@ static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
- .remove = __devexit_p(gpio_keys_remove),
+ .remove = gpio_keys_remove,
.driver = {
.name = "gpio-keys",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index f2142de789e..f686fd97055 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -103,8 +103,7 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
}
#ifdef CONFIG_OF
-static struct gpio_keys_platform_data * __devinit
-gpio_keys_polled_get_devtree_pdata(struct device *dev)
+static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct device_node *node, *pp;
struct gpio_keys_platform_data *pdata;
@@ -196,7 +195,7 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
}
#endif
-static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
+static int gpio_keys_polled_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
@@ -246,7 +245,6 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
input = poll_dev->input;
- input->evbit[0] = BIT(EV_KEY);
input->name = pdev->name;
input->phys = DRV_NAME"/input0";
input->dev.parent = &pdev->dev;
@@ -256,6 +254,10 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0100;
+ __set_bit(EV_KEY, input->evbit);
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
for (i = 0; i < pdata->nbuttons; i++) {
struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_keys_button_data *bdata = &bdev->data[i];
@@ -268,22 +270,14 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
goto err_free_gpio;
}
- error = gpio_request(gpio,
- button->desc ? button->desc : DRV_NAME);
+ error = gpio_request_one(gpio, GPIOF_IN,
+ button->desc ?: DRV_NAME);
if (error) {
dev_err(dev, "unable to claim gpio %u, err=%d\n",
gpio, error);
goto err_free_gpio;
}
- error = gpio_direction_input(gpio);
- if (error) {
- dev_err(dev,
- "unable to set direction on gpio %u, err=%d\n",
- gpio, error);
- goto err_free_gpio;
- }
-
bdata->can_sleep = gpio_cansleep(gpio);
bdata->last_state = -1;
bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
@@ -329,7 +323,7 @@ err_free_pdata:
return error;
}
-static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
+static int gpio_keys_polled_remove(struct platform_device *pdev)
{
struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
@@ -357,7 +351,7 @@ static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
static struct platform_driver gpio_keys_polled_driver = {
.probe = gpio_keys_polled_probe,
- .remove = __devexit_p(gpio_keys_polled_remove),
+ .remove = gpio_keys_polled_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index 5f72440b50c..198dc07a1be 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -200,7 +200,7 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len)
/* initialize HIL */
-static int __devinit hil_keyb_init(void)
+static int hil_keyb_init(void)
{
unsigned char c;
unsigned int i, kbid;
@@ -286,7 +286,7 @@ err1:
return err;
}
-static void __devexit hil_keyb_exit(void)
+static void hil_keyb_exit(void)
{
if (HIL_IRQ)
free_irq(HIL_IRQ, hil_dev.dev_id);
@@ -299,7 +299,7 @@ static void __devexit hil_keyb_exit(void)
}
#if defined(CONFIG_PARISC)
-static int __devinit hil_probe_chip(struct parisc_device *dev)
+static int hil_probe_chip(struct parisc_device *dev)
{
/* Only allow one HIL keyboard */
if (hil_dev.dev)
@@ -320,7 +320,7 @@ static int __devinit hil_probe_chip(struct parisc_device *dev)
return hil_keyb_init();
}
-static int __devexit hil_remove_chip(struct parisc_device *dev)
+static int hil_remove_chip(struct parisc_device *dev)
{
hil_keyb_exit();
@@ -341,7 +341,7 @@ static struct parisc_driver hil_driver = {
.name = "hil",
.id_table = hil_tbl,
.probe = hil_probe_chip,
- .remove = __devexit_p(hil_remove_chip),
+ .remove = hil_remove_chip,
};
static int __init hil_init(void)
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index cdc252612c0..6d150e3e1f5 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -362,7 +362,8 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
writew(reg_val, keypad->mmio_base + KPSR);
/* Colums as open drain and disable all rows */
- writew(0xff00, keypad->mmio_base + KPCR);
+ reg_val = (keypad->cols_en_mask & 0xff) << 8;
+ writew(reg_val, keypad->mmio_base + KPCR);
}
static void imx_keypad_close(struct input_dev *dev)
@@ -413,7 +414,7 @@ open_err:
return -EIO;
}
-static int __devinit imx_keypad_probe(struct platform_device *pdev)
+static int imx_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
struct imx_keypad *keypad;
@@ -554,7 +555,7 @@ failed_rel_mem:
return error;
}
-static int __devexit imx_keypad_remove(struct platform_device *pdev)
+static int imx_keypad_remove(struct platform_device *pdev)
{
struct imx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -632,7 +633,7 @@ static struct platform_driver imx_keypad_driver = {
.pm = &imx_kbd_pm_ops,
},
.probe = imx_keypad_probe,
- .remove = __devexit_p(imx_keypad_remove),
+ .remove = imx_keypad_remove,
};
module_platform_driver(imx_keypad_driver);
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 24f3ea01c4d..74e75a6e8de 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -179,7 +179,7 @@ static void jornadakbd680_poll(struct input_polled_dev *dev)
memcpy(jornadakbd->old_scan, jornadakbd->new_scan, JORNADA_SCAN_SIZE);
}
-static int __devinit jornada680kbd_probe(struct platform_device *pdev)
+static int jornada680kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_polled_dev *poll_dev;
@@ -240,7 +240,7 @@ static int __devinit jornada680kbd_probe(struct platform_device *pdev)
}
-static int __devexit jornada680kbd_remove(struct platform_device *pdev)
+static int jornada680kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
@@ -258,7 +258,7 @@ static struct platform_driver jornada680kbd_driver = {
.owner = THIS_MODULE,
},
.probe = jornada680kbd_probe,
- .remove = __devexit_p(jornada680kbd_remove),
+ .remove = jornada680kbd_remove,
};
module_platform_driver(jornada680kbd_driver);
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 9d639fa1afb..5ceef636df2 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -94,7 +94,7 @@ static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
};
-static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
+static int jornada720_kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_dev *input_dev;
@@ -152,7 +152,7 @@ static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
return err;
};
-static int __devexit jornada720_kbd_remove(struct platform_device *pdev)
+static int jornada720_kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
@@ -173,6 +173,6 @@ static struct platform_driver jornada720_kbd_driver = {
.owner = THIS_MODULE,
},
.probe = jornada720_kbd_probe,
- .remove = __devexit_p(jornada720_kbd_remove),
+ .remove = jornada720_kbd_remove,
};
module_platform_driver(jornada720_kbd_driver);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 39ac2787e27..93c81266213 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -624,7 +624,7 @@ static ssize_t lm8323_set_disable(struct device *dev,
}
static DEVICE_ATTR(disable_kp, 0644, lm8323_show_disable, lm8323_set_disable);
-static int __devinit lm8323_probe(struct i2c_client *client,
+static int lm8323_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm8323_platform_data *pdata = client->dev.platform_data;
@@ -764,7 +764,7 @@ fail1:
return err;
}
-static int __devexit lm8323_remove(struct i2c_client *client)
+static int lm8323_remove(struct i2c_client *client)
{
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -846,7 +846,7 @@ static struct i2c_driver lm8323_i2c_driver = {
.pm = &lm8323_pm_ops,
},
.probe = lm8323_probe,
- .remove = __devexit_p(lm8323_remove),
+ .remove = lm8323_remove,
.id_table = lm8323_id,
};
MODULE_DEVICE_TABLE(i2c, lm8323_id);
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 081fd9effa8..5a8ca35dc9a 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -128,7 +128,7 @@ static irqreturn_t lm8333_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit lm8333_probe(struct i2c_client *client,
+static int lm8333_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct lm8333_platform_data *pdata = client->dev.platform_data;
@@ -202,7 +202,7 @@ static int __devinit lm8333_probe(struct i2c_client *client,
return err;
}
-static int __devexit lm8333_remove(struct i2c_client *client)
+static int lm8333_remove(struct i2c_client *client)
{
struct lm8333 *lm8333 = i2c_get_clientdata(client);
@@ -225,7 +225,7 @@ static struct i2c_driver lm8333_driver = {
.owner = THIS_MODULE,
},
.probe = lm8333_probe,
- .remove = __devexit_p(lm8333_remove),
+ .remove = lm8333_remove,
.id_table = lm8333_id,
};
module_i2c_driver(lm8333_driver);
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index b1ab29861e1..c94d610b9d7 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -46,7 +46,7 @@ MODULE_LICENSE("GPL");
#define KEY_CENTER KEY_F15
static const unsigned char
-locomokbd_keycode[LOCOMOKBD_NUMKEYS] __devinitconst = {
+locomokbd_keycode[LOCOMOKBD_NUMKEYS] = {
0, KEY_ESC, KEY_ACTIVITY, 0, 0, 0, 0, 0, 0, 0, /* 0 - 9 */
0, 0, 0, 0, 0, 0, 0, KEY_MENU, KEY_HOME, KEY_CONTACT, /* 10 - 19 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 29 */
@@ -236,7 +236,7 @@ static void locomokbd_close(struct input_dev *dev)
locomo_writel(r, locomokbd->base + LOCOMO_KIC);
}
-static int __devinit locomokbd_probe(struct locomo_dev *dev)
+static int locomokbd_probe(struct locomo_dev *dev)
{
struct locomokbd *locomokbd;
struct input_dev *input_dev;
@@ -321,7 +321,7 @@ static int __devinit locomokbd_probe(struct locomo_dev *dev)
return err;
}
-static int __devexit locomokbd_remove(struct locomo_dev *dev)
+static int locomokbd_remove(struct locomo_dev *dev)
{
struct locomokbd *locomokbd = locomo_get_drvdata(dev);
@@ -345,7 +345,7 @@ static struct locomo_driver keyboard_driver = {
},
.devid = LOCOMO_DEVID_KEYBOARD,
.probe = locomokbd_probe,
- .remove = __devexit_p(locomokbd_remove),
+ .remove = locomokbd_remove,
};
static int __init locomokbd_init(void)
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index dd786c8a758..1b8add6cfb9 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -139,7 +139,7 @@ static void lpc32xx_kscan_close(struct input_dev *dev)
clk_disable_unprepare(kscandat->clk);
}
-static int __devinit lpc32xx_parse_dt(struct device *dev,
+static int lpc32xx_parse_dt(struct device *dev,
struct lpc32xx_kscan_drv *kscandat)
{
struct device_node *np = dev->of_node;
@@ -166,7 +166,7 @@ static int __devinit lpc32xx_parse_dt(struct device *dev,
return 0;
}
-static int __devinit lpc32xx_kscan_probe(struct platform_device *pdev)
+static int lpc32xx_kscan_probe(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat;
struct input_dev *input;
@@ -310,7 +310,7 @@ err_free_mem:
return error;
}
-static int __devexit lpc32xx_kscan_remove(struct platform_device *pdev)
+static int lpc32xx_kscan_remove(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev);
@@ -377,7 +377,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match);
static struct platform_driver lpc32xx_kscan_driver = {
.probe = lpc32xx_kscan_probe,
- .remove = __devexit_p(lpc32xx_kscan_remove),
+ .remove = lpc32xx_kscan_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 18b72372028..f4ff0dda759 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -23,6 +23,9 @@
#include <linux/gpio.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
struct matrix_keypad {
const struct matrix_keypad_platform_data *pdata;
@@ -37,8 +40,6 @@ struct matrix_keypad {
bool scan_pending;
bool stopped;
bool gpio_all_disabled;
-
- unsigned short keycodes[];
};
/*
@@ -118,6 +119,7 @@ static void matrix_keypad_scan(struct work_struct *work)
struct matrix_keypad *keypad =
container_of(work, struct matrix_keypad, work.work);
struct input_dev *input_dev = keypad->input_dev;
+ const unsigned short *keycodes = input_dev->keycode;
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
@@ -153,7 +155,7 @@ static void matrix_keypad_scan(struct work_struct *work)
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev,
- keypad->keycodes[code],
+ keycodes[code],
new_state[col] & (1 << row));
}
}
@@ -299,8 +301,8 @@ static int matrix_keypad_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
matrix_keypad_suspend, matrix_keypad_resume);
-static int __devinit matrix_keypad_init_gpio(struct platform_device *pdev,
- struct matrix_keypad *keypad)
+static int matrix_keypad_init_gpio(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
int i, err;
@@ -394,33 +396,95 @@ static void matrix_keypad_free_gpio(struct matrix_keypad *keypad)
gpio_free(pdata->col_gpios[i]);
}
-static int __devinit matrix_keypad_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static struct matrix_keypad_platform_data *
+matrix_keypad_parse_dt(struct device *dev)
+{
+ struct matrix_keypad_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+ unsigned int *gpios;
+ int i;
+
+ if (!np) {
+ dev_err(dev, "device lacks DT data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->num_row_gpios = of_gpio_named_count(np, "row-gpios");
+ pdata->num_col_gpios = of_gpio_named_count(np, "col-gpios");
+ if (!pdata->num_row_gpios || !pdata->num_col_gpios) {
+ dev_err(dev, "number of keypad rows/columns not specified\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_get_property(np, "linux,no-autorepeat", NULL))
+ pdata->no_autorepeat = true;
+ if (of_get_property(np, "linux,wakeup", NULL))
+ pdata->wakeup = true;
+ if (of_get_property(np, "gpio-activelow", NULL))
+ pdata->active_low = true;
+
+ of_property_read_u32(np, "debounce-delay-ms", &pdata->debounce_ms);
+ of_property_read_u32(np, "col-scan-delay-us",
+ &pdata->col_scan_delay_us);
+
+ gpios = devm_kzalloc(dev,
+ sizeof(unsigned int) *
+ (pdata->num_row_gpios + pdata->num_col_gpios),
+ GFP_KERNEL);
+ if (!gpios) {
+ dev_err(dev, "could not allocate memory for gpios\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ gpios[i] = of_get_named_gpio(np, "row-gpios", i);
+
+ for (i = 0; i < pdata->num_col_gpios; i++)
+ gpios[pdata->num_row_gpios + i] =
+ of_get_named_gpio(np, "col-gpios", i);
+
+ pdata->row_gpios = gpios;
+ pdata->col_gpios = &gpios[pdata->num_row_gpios];
+
+ return pdata;
+}
+#else
+static inline struct matrix_keypad_platform_data *
+matrix_keypad_parse_dt(struct device *dev)
+{
+ dev_err(dev, "no platform data defined\n");
+
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int matrix_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
- const struct matrix_keymap_data *keymap_data;
struct matrix_keypad *keypad;
struct input_dev *input_dev;
- unsigned int row_shift;
- size_t keymap_size;
int err;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
- keymap_data = pdata->keymap_data;
- if (!keymap_data) {
+ pdata = matrix_keypad_parse_dt(&pdev->dev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return PTR_ERR(pdata);
+ }
+ } else if (!pdata->keymap_data) {
dev_err(&pdev->dev, "no keymap data defined\n");
return -EINVAL;
}
- row_shift = get_count_order(pdata->num_col_gpios);
- keymap_size = (pdata->num_row_gpios << row_shift) *
- sizeof(keypad->keycodes[0]);
- keypad = kzalloc(sizeof(struct matrix_keypad) + keymap_size,
- GFP_KERNEL);
+ keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
input_dev = input_allocate_device();
if (!keypad || !input_dev) {
err = -ENOMEM;
@@ -429,7 +493,7 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
keypad->input_dev = input_dev;
keypad->pdata = pdata;
- keypad->row_shift = row_shift;
+ keypad->row_shift = get_count_order(pdata->num_col_gpios);
keypad->stopped = true;
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
spin_lock_init(&keypad->lock);
@@ -440,12 +504,14 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
- err = matrix_keypad_build_keymap(keymap_data, NULL,
+ err = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
pdata->num_row_gpios,
pdata->num_col_gpios,
- keypad->keycodes, input_dev);
- if (err)
+ NULL, input_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
goto err_free_mem;
+ }
if (!pdata->no_autorepeat)
__set_bit(EV_REP, input_dev->evbit);
@@ -473,7 +539,7 @@ err_free_mem:
return err;
}
-static int __devexit matrix_keypad_remove(struct platform_device *pdev)
+static int matrix_keypad_remove(struct platform_device *pdev)
{
struct matrix_keypad *keypad = platform_get_drvdata(pdev);
@@ -488,13 +554,22 @@ static int __devexit matrix_keypad_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id matrix_keypad_dt_match[] = {
+ { .compatible = "gpio-matrix-keypad" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, matrix_keypad_dt_match);
+#endif
+
static struct platform_driver matrix_keypad_driver = {
.probe = matrix_keypad_probe,
- .remove = __devexit_p(matrix_keypad_remove),
+ .remove = matrix_keypad_remove,
.driver = {
.name = "matrix-keypad",
.owner = THIS_MODULE,
.pm = &matrix_keypad_pm_ops,
+ .of_match_table = of_match_ptr(matrix_keypad_dt_match),
},
};
module_platform_driver(matrix_keypad_driver);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 8edada8ae71..7c7af2b01e6 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -179,7 +179,7 @@ static void max7359_initialize(struct i2c_client *client)
max7359_fall_deepsleep(client);
}
-static int __devinit max7359_probe(struct i2c_client *client,
+static int max7359_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct matrix_keymap_data *keymap_data = client->dev.platform_data;
@@ -260,7 +260,7 @@ failed_free_mem:
return error;
}
-static int __devexit max7359_remove(struct i2c_client *client)
+static int max7359_remove(struct i2c_client *client)
{
struct max7359_keypad *keypad = i2c_get_clientdata(client);
@@ -312,7 +312,7 @@ static struct i2c_driver max7359_i2c_driver = {
.pm = &max7359_pm,
},
.probe = max7359_probe,
- .remove = __devexit_p(max7359_remove),
+ .remove = max7359_remove,
.id_table = max7359_ids,
};
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 0d77f6c8495..7c236f9c6a5 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -97,7 +97,7 @@ static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mcs_touchkey_probe(struct i2c_client *client,
+static int mcs_touchkey_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct mcs_platform_data *pdata;
@@ -200,7 +200,7 @@ err_free_mem:
return error;
}
-static int __devexit mcs_touchkey_remove(struct i2c_client *client)
+static int mcs_touchkey_remove(struct i2c_client *client)
{
struct mcs_touchkey_data *data = i2c_get_clientdata(client);
@@ -270,7 +270,7 @@ static struct i2c_driver mcs_touchkey_driver = {
.pm = &mcs_touchkey_pm_ops,
},
.probe = mcs_touchkey_probe,
- .remove = __devexit_p(mcs_touchkey_remove),
+ .remove = mcs_touchkey_remove,
.shutdown = mcs_touchkey_shutdown,
.id_table = mcs_touchkey_id,
};
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 7613f1cac95..f7f3e9a9fd3 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -71,7 +71,7 @@ struct mpr121_init_register {
u8 val;
};
-static const struct mpr121_init_register init_reg_table[] __devinitconst = {
+static const struct mpr121_init_register init_reg_table[] = {
{ MHD_RISING_ADDR, 0x1 },
{ NHD_RISING_ADDR, 0x1 },
{ MHD_FALLING_ADDR, 0x1 },
@@ -123,7 +123,7 @@ out:
return IRQ_HANDLED;
}
-static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
+static int mpr121_phys_init(const struct mpr121_platform_data *pdata,
struct mpr121_touchkey *mpr121,
struct i2c_client *client)
{
@@ -185,8 +185,8 @@ err_i2c_write:
return ret;
}
-static int __devinit mpr_touchkey_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mpr_touchkey_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct mpr121_platform_data *pdata = client->dev.platform_data;
struct mpr121_touchkey *mpr121;
@@ -272,7 +272,7 @@ err_free_mem:
return error;
}
-static int __devexit mpr_touchkey_remove(struct i2c_client *client)
+static int mpr_touchkey_remove(struct i2c_client *client)
{
struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
@@ -327,7 +327,7 @@ static struct i2c_driver mpr_touchkey_driver = {
},
.id_table = mpr121_id,
.probe = mpr_touchkey_probe,
- .remove = __devexit_p(mpr_touchkey_remove),
+ .remove = mpr_touchkey_remove,
};
module_i2c_driver(mpr_touchkey_driver);
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 49f5fa64e0b..0e6a8151fee 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -67,6 +67,7 @@ struct ske_keypad {
const struct ske_keypad_platform_data *board;
unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS];
struct clk *clk;
+ struct clk *pclk;
spinlock_t ske_keypad_lock;
};
@@ -271,11 +272,18 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
goto err_free_mem_region;
}
+ keypad->pclk = clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(keypad->pclk)) {
+ dev_err(&pdev->dev, "failed to get pclk\n");
+ error = PTR_ERR(keypad->pclk);
+ goto err_iounmap;
+ }
+
keypad->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
error = PTR_ERR(keypad->clk);
- goto err_iounmap;
+ goto err_pclk;
}
input->id.bustype = BUS_HOST;
@@ -287,14 +295,25 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
keypad->keymap, input);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_iounmap;
+ goto err_clk;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- clk_enable(keypad->clk);
+ error = clk_prepare_enable(keypad->pclk);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to prepare/enable pclk\n");
+ goto err_clk;
+ }
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to prepare/enable clk\n");
+ goto err_pclk_disable;
+ }
+
/* go through board initialization helpers */
if (keypad->board->init)
@@ -330,8 +349,13 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
err_free_irq:
free_irq(keypad->irq, keypad);
err_clk_disable:
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
+err_pclk_disable:
+ clk_disable_unprepare(keypad->pclk);
+err_clk:
clk_put(keypad->clk);
+err_pclk:
+ clk_put(keypad->pclk);
err_iounmap:
iounmap(keypad->reg_base);
err_free_mem_region:
@@ -342,7 +366,7 @@ err_free_mem:
return error;
}
-static int __devexit ske_keypad_remove(struct platform_device *pdev)
+static int ske_keypad_remove(struct platform_device *pdev)
{
struct ske_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -351,7 +375,7 @@ static int __devexit ske_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input);
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
clk_put(keypad->clk);
if (keypad->board->exit)
@@ -403,7 +427,7 @@ static struct platform_driver ske_keypad_driver = {
.owner = THIS_MODULE,
.pm = &ske_keypad_dev_pm_ops,
},
- .remove = __devexit_p(ske_keypad_remove),
+ .remove = ske_keypad_remove,
};
static int __init ske_keypad_init(void)
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 4a5fcc8026f..d0d5226d9cd 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -244,7 +244,7 @@ static int omap_kp_resume(struct platform_device *dev)
#define omap_kp_resume NULL
#endif
-static int __devinit omap_kp_probe(struct platform_device *pdev)
+static int omap_kp_probe(struct platform_device *pdev)
{
struct omap_kp *omap_kp;
struct input_dev *input_dev;
@@ -357,7 +357,7 @@ err2:
return -EINVAL;
}
-static int __devexit omap_kp_remove(struct platform_device *pdev)
+static int omap_kp_remove(struct platform_device *pdev)
{
struct omap_kp *omap_kp = platform_get_drvdata(pdev);
@@ -379,7 +379,7 @@ static int __devexit omap_kp_remove(struct platform_device *pdev)
static struct platform_driver omap_kp_driver = {
.probe = omap_kp_probe,
- .remove = __devexit_p(omap_kp_remove),
+ .remove = omap_kp_remove,
.suspend = omap_kp_suspend,
.resume = omap_kp_resume,
.driver = {
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index c05f98c4141..e25b022692c 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -211,8 +211,8 @@ static void omap4_keypad_close(struct input_dev *input)
}
#ifdef CONFIG_OF
-static int __devinit omap4_keypad_parse_dt(struct device *dev,
- struct omap4_keypad *keypad_data)
+static int omap4_keypad_parse_dt(struct device *dev,
+ struct omap4_keypad *keypad_data)
{
struct device_node *np = dev->of_node;
@@ -241,7 +241,7 @@ static inline int omap4_keypad_parse_dt(struct device *dev,
}
#endif
-static int __devinit omap4_keypad_probe(struct platform_device *pdev)
+static int omap4_keypad_probe(struct platform_device *pdev)
{
const struct omap4_keypad_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -406,7 +406,7 @@ err_free_keypad:
return error;
}
-static int __devexit omap4_keypad_remove(struct platform_device *pdev)
+static int omap4_keypad_remove(struct platform_device *pdev)
{
struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
struct resource *res;
@@ -440,7 +440,7 @@ MODULE_DEVICE_TABLE(of, omap_keypad_dt_match);
static struct platform_driver omap4_keypad_driver = {
.probe = omap4_keypad_probe,
- .remove = __devexit_p(omap4_keypad_remove),
+ .remove = omap4_keypad_remove,
.driver = {
.name = "omap4-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index abe728c7b88..7ac5f174c6f 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -37,7 +37,7 @@ static irqreturn_t opencores_kbd_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit opencores_kbd_probe(struct platform_device *pdev)
+static int opencores_kbd_probe(struct platform_device *pdev)
{
struct input_dev *input;
struct opencores_kbd *opencores_kbd;
@@ -139,7 +139,7 @@ static int __devinit opencores_kbd_probe(struct platform_device *pdev)
return error;
}
-static int __devexit opencores_kbd_remove(struct platform_device *pdev)
+static int opencores_kbd_remove(struct platform_device *pdev)
{
struct opencores_kbd *opencores_kbd = platform_get_drvdata(pdev);
@@ -158,7 +158,7 @@ static int __devexit opencores_kbd_remove(struct platform_device *pdev)
static struct platform_driver opencores_kbd_device_driver = {
.probe = opencores_kbd_probe,
- .remove = __devexit_p(opencores_kbd_remove),
+ .remove = opencores_kbd_remove,
.driver = {
.name = "opencores-kbd",
},
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 52c34657d30..74339e139d4 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -397,7 +397,7 @@ static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
+static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
{
int bits, rc, cycles;
u8 scan_val = 0, ctrl_val = 0;
@@ -447,7 +447,7 @@ static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
}
-static int __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
+static int pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config)
{
int rc, i;
@@ -518,7 +518,7 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
* - set irq edge type.
* - enable the keypad controller.
*/
-static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
+static int pmic8xxx_kp_probe(struct platform_device *pdev)
{
const struct pm8xxx_keypad_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -712,7 +712,7 @@ err_alloc_device:
return rc;
}
-static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
+static int pmic8xxx_kp_remove(struct platform_device *pdev)
{
struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
@@ -773,7 +773,7 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops,
static struct platform_driver pmic8xxx_kp_driver = {
.probe = pmic8xxx_kp_probe,
- .remove = __devexit_p(pmic8xxx_kp_remove),
+ .remove = pmic8xxx_kp_remove,
.driver = {
.name = PM8XXX_KEYPAD_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index cad9d5dd597..5330d8fbf6c 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -482,7 +482,7 @@ static const struct dev_pm_ops pxa27x_keypad_pm_ops = {
};
#endif
-static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
+static int pxa27x_keypad_probe(struct platform_device *pdev)
{
struct pxa27x_keypad_platform_data *pdata = pdev->dev.platform_data;
struct pxa27x_keypad *keypad;
@@ -595,7 +595,7 @@ failed_free:
return error;
}
-static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
+static int pxa27x_keypad_remove(struct platform_device *pdev)
{
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -620,7 +620,7 @@ MODULE_ALIAS("platform:pxa27x-keypad");
static struct platform_driver pxa27x_keypad_driver = {
.probe = pxa27x_keypad_probe,
- .remove = __devexit_p(pxa27x_keypad_remove),
+ .remove = pxa27x_keypad_remove,
.driver = {
.name = "pxa27x-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 41488f9add2..bcad95be73a 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -82,7 +82,7 @@ static void pxa930_rotary_close(struct input_dev *dev)
clear_sbcr(r);
}
-static int __devinit pxa930_rotary_probe(struct platform_device *pdev)
+static int pxa930_rotary_probe(struct platform_device *pdev)
{
struct pxa930_rotary_platform_data *pdata = pdev->dev.platform_data;
struct pxa930_rotary *r;
@@ -174,7 +174,7 @@ failed_free:
return err;
}
-static int __devexit pxa930_rotary_remove(struct platform_device *pdev)
+static int pxa930_rotary_remove(struct platform_device *pdev)
{
struct pxa930_rotary *r = platform_get_drvdata(pdev);
@@ -193,7 +193,7 @@ static struct platform_driver pxa930_rotary_driver = {
.owner = THIS_MODULE,
},
.probe = pxa930_rotary_probe,
- .remove = __devexit_p(pxa930_rotary_remove),
+ .remove = pxa930_rotary_remove,
};
module_platform_driver(pxa930_rotary_driver);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index ca68f2992d7..42b773b3125 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -91,7 +91,7 @@ static int qt1070_write(struct i2c_client *client, u8 reg, u8 data)
return ret;
}
-static bool __devinit qt1070_identify(struct i2c_client *client)
+static bool qt1070_identify(struct i2c_client *client)
{
int id, ver;
@@ -140,7 +140,7 @@ static irqreturn_t qt1070_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit qt1070_probe(struct i2c_client *client,
+static int qt1070_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct qt1070_data *data;
@@ -230,7 +230,7 @@ err_free_mem:
return err;
}
-static int __devexit qt1070_remove(struct i2c_client *client)
+static int qt1070_remove(struct i2c_client *client)
{
struct qt1070_data *data = i2c_get_clientdata(client);
@@ -256,7 +256,7 @@ static struct i2c_driver qt1070_driver = {
},
.id_table = qt1070_id,
.probe = qt1070_probe,
- .remove = __devexit_p(qt1070_remove),
+ .remove = qt1070_remove,
};
module_i2c_driver(qt1070_driver);
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 76b7d430d03..3dc2b0f27b0 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -183,7 +183,7 @@ static void qt2160_worker(struct work_struct *work)
qt2160_schedule_read(qt2160);
}
-static int __devinit qt2160_read(struct i2c_client *client, u8 reg)
+static int qt2160_read(struct i2c_client *client, u8 reg)
{
int ret;
@@ -204,29 +204,20 @@ static int __devinit qt2160_read(struct i2c_client *client, u8 reg)
return ret;
}
-static int __devinit qt2160_write(struct i2c_client *client, u8 reg, u8 data)
+static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
{
- int error;
-
- error = i2c_smbus_write_byte(client, reg);
- if (error) {
- dev_err(&client->dev,
- "couldn't send request. Returned %d\n", error);
- return error;
- }
+ int ret;
- error = i2c_smbus_write_byte(client, data);
- if (error) {
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
dev_err(&client->dev,
- "couldn't write data. Returned %d\n", error);
- return error;
- }
+ "couldn't write data. Returned %d\n", ret);
- return error;
+ return ret;
}
-static bool __devinit qt2160_identify(struct i2c_client *client)
+static bool qt2160_identify(struct i2c_client *client)
{
int id, ver, rev;
@@ -257,7 +248,7 @@ static bool __devinit qt2160_identify(struct i2c_client *client)
return true;
}
-static int __devinit qt2160_probe(struct i2c_client *client,
+static int qt2160_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct qt2160_data *qt2160;
@@ -344,7 +335,7 @@ err_free_mem:
return error;
}
-static int __devexit qt2160_remove(struct i2c_client *client)
+static int qt2160_remove(struct i2c_client *client)
{
struct qt2160_data *qt2160 = i2c_get_clientdata(client);
@@ -375,7 +366,7 @@ static struct i2c_driver qt2160_driver = {
.id_table = qt2160_idtable,
.probe = qt2160_probe,
- .remove = __devexit_p(qt2160_remove),
+ .remove = qt2160_remove,
};
module_i2c_driver(qt2160_driver);
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 9d7a111486f..22e357b5102 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -309,7 +309,7 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
struct samsung_keypad *keypad)
{
struct device_node *np = dev->of_node;
- int gpio, ret, row, col;
+ int gpio, error, row, col;
for (row = 0; row < keypad->rows; row++) {
gpio = of_get_named_gpio(np, "row-gpios", row);
@@ -320,10 +320,11 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
continue;
}
- ret = gpio_request(gpio, "keypad-row");
- if (ret)
- dev_err(dev, "keypad row[%d] gpio request failed\n",
- row);
+ error = devm_gpio_request(dev, gpio, "keypad-row");
+ if (error)
+ dev_err(dev,
+ "keypad row[%d] gpio request failed: %d\n",
+ row, error);
}
for (col = 0; col < keypad->cols; col++) {
@@ -335,38 +336,22 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
continue;
}
- ret = gpio_request(gpio, "keypad-col");
- if (ret)
- dev_err(dev, "keypad column[%d] gpio request failed\n",
- col);
+ error = devm_gpio_request(dev, gpio, "keypad-col");
+ if (error)
+ dev_err(dev,
+ "keypad column[%d] gpio request failed: %d\n",
+ col, error);
}
}
-
-static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
-{
- int cnt;
-
- for (cnt = 0; cnt < keypad->rows; cnt++)
- if (gpio_is_valid(keypad->row_gpios[cnt]))
- gpio_free(keypad->row_gpios[cnt]);
-
- for (cnt = 0; cnt < keypad->cols; cnt++)
- if (gpio_is_valid(keypad->col_gpios[cnt]))
- gpio_free(keypad->col_gpios[cnt]);
-}
#else
static
struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
{
return NULL;
}
-
-static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
-{
-}
#endif
-static int __devinit samsung_keypad_probe(struct platform_device *pdev)
+static int samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
const struct matrix_keymap_data *keymap_data;
@@ -405,36 +390,30 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
row_shift = get_count_order(pdata->cols);
keymap_size = (pdata->rows << row_shift) * sizeof(keypad->keycodes[0]);
- keypad = kzalloc(sizeof(*keypad) + keymap_size, GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad) + keymap_size,
+ GFP_KERNEL);
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!keypad || !input_dev)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- error = -ENODEV;
- goto err_free_mem;
- }
+ if (!res)
+ return -ENODEV;
- keypad->base = ioremap(res->start, resource_size(res));
- if (!keypad->base) {
- error = -EBUSY;
- goto err_free_mem;
- }
+ keypad->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!keypad->base)
+ return -EBUSY;
- keypad->clk = clk_get(&pdev->dev, "keypad");
+ keypad->clk = devm_clk_get(&pdev->dev, "keypad");
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_unmap_base;
+ return PTR_ERR(keypad->clk);
}
error = clk_prepare(keypad->clk);
if (error) {
dev_err(&pdev->dev, "keypad clock prepare failed\n");
- goto err_put_clk;
+ return error;
}
keypad->input_dev = input_dev;
@@ -479,14 +458,15 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0) {
error = keypad->irq;
- goto err_put_clk;
+ goto err_unprepare_clk;
}
- error = request_threaded_irq(keypad->irq, NULL, samsung_keypad_irq,
- IRQF_ONESHOT, dev_name(&pdev->dev), keypad);
+ error = devm_request_threaded_irq(&pdev->dev, keypad->irq, NULL,
+ samsung_keypad_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), keypad);
if (error) {
dev_err(&pdev->dev, "failed to register keypad interrupt\n");
- goto err_put_clk;
+ goto err_unprepare_clk;
}
device_init_wakeup(&pdev->dev, pdata->wakeup);
@@ -495,7 +475,7 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
error = input_register_device(keypad->input_dev);
if (error)
- goto err_free_irq;
+ goto err_disable_runtime_pm;
if (pdev->dev.of_node) {
devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
@@ -504,26 +484,16 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
}
return 0;
-err_free_irq:
- free_irq(keypad->irq, keypad);
+err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
err_unprepare_clk:
clk_unprepare(keypad->clk);
-err_put_clk:
- clk_put(keypad->clk);
- samsung_keypad_dt_gpio_free(keypad);
-err_unmap_base:
- iounmap(keypad->base);
-err_free_mem:
- input_free_device(input_dev);
- kfree(keypad);
-
return error;
}
-static int __devexit samsung_keypad_remove(struct platform_device *pdev)
+static int samsung_keypad_remove(struct platform_device *pdev)
{
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
@@ -533,18 +503,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input_dev);
- /*
- * It is safe to free IRQ after unregistering device because
- * samsung_keypad_close will shut off interrupts.
- */
- free_irq(keypad->irq, keypad);
-
clk_unprepare(keypad->clk);
- clk_put(keypad->clk);
- samsung_keypad_dt_gpio_free(keypad);
-
- iounmap(keypad->base);
- kfree(keypad);
return 0;
}
@@ -685,7 +644,7 @@ MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids);
static struct platform_driver samsung_keypad_driver = {
.probe = samsung_keypad_probe,
- .remove = __devexit_p(samsung_keypad_remove),
+ .remove = samsung_keypad_remove,
.driver = {
.name = "samsung-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index da54ad5db15..fdb9eb2df38 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -162,7 +162,7 @@ static irqreturn_t sh_keysc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit sh_keysc_probe(struct platform_device *pdev)
+static int sh_keysc_probe(struct platform_device *pdev)
{
struct sh_keysc_priv *priv;
struct sh_keysc_info *pdata;
@@ -272,7 +272,7 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
return error;
}
-static int __devexit sh_keysc_remove(struct platform_device *pdev)
+static int sh_keysc_remove(struct platform_device *pdev)
{
struct sh_keysc_priv *priv = platform_get_drvdata(pdev);
@@ -331,7 +331,7 @@ static SIMPLE_DEV_PM_OPS(sh_keysc_dev_pm_ops,
static struct platform_driver sh_keysc_device_driver = {
.probe = sh_keysc_probe,
- .remove = __devexit_p(sh_keysc_remove),
+ .remove = sh_keysc_remove,
.driver = {
.name = "sh_keysc",
.pm = &sh_keysc_dev_pm_ops,
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index c7ca97f44bf..695d237417d 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -55,15 +55,15 @@
struct spear_kbd {
struct input_dev *input;
- struct resource *res;
void __iomem *io_base;
struct clk *clk;
unsigned int irq;
unsigned int mode;
+ unsigned int suspended_rate;
unsigned short last_key;
unsigned short keycodes[NUM_ROWS * NUM_COLS];
bool rep;
- unsigned int suspended_rate;
+ bool irq_wake_enabled;
u32 mode_ctl_reg;
};
@@ -146,7 +146,7 @@ static void spear_kbd_close(struct input_dev *dev)
}
#ifdef CONFIG_OF
-static int __devinit spear_kbd_parse_dt(struct platform_device *pdev,
+static int spear_kbd_parse_dt(struct platform_device *pdev,
struct spear_kbd *kbd)
{
struct device_node *np = pdev->dev.of_node;
@@ -181,7 +181,7 @@ static inline int spear_kbd_parse_dt(struct platform_device *pdev,
}
#endif
-static int __devinit spear_kbd_probe(struct platform_device *pdev)
+static int spear_kbd_probe(struct platform_device *pdev)
{
struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
@@ -203,12 +203,16 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
return irq;
}
- kbd = kzalloc(sizeof(*kbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!kbd || !input_dev) {
- dev_err(&pdev->dev, "out of memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ kbd = devm_kzalloc(&pdev->dev, sizeof(*kbd), GFP_KERNEL);
+ if (!kbd) {
+ dev_err(&pdev->dev, "not enough memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
+ dev_err(&pdev->dev, "unable to allocate input device\n");
+ return -ENOMEM;
}
kbd->input = input_dev;
@@ -217,37 +221,25 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
if (!pdata) {
error = spear_kbd_parse_dt(pdev, kbd);
if (error)
- goto err_free_mem;
+ return error;
} else {
kbd->mode = pdata->mode;
kbd->rep = pdata->rep;
kbd->suspended_rate = pdata->suspended_rate;
}
- kbd->res = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!kbd->res) {
- dev_err(&pdev->dev, "keyboard region already claimed\n");
- error = -EBUSY;
- goto err_free_mem;
- }
-
- kbd->io_base = ioremap(res->start, resource_size(res));
+ kbd->io_base = devm_request_and_ioremap(&pdev->dev, res);
if (!kbd->io_base) {
- dev_err(&pdev->dev, "ioremap failed for kbd_region\n");
- error = -ENOMEM;
- goto err_release_mem_region;
+ dev_err(&pdev->dev, "request-ioremap failed for kbd_region\n");
+ return -ENOMEM;
}
- kbd->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(kbd->clk)) {
- error = PTR_ERR(kbd->clk);
- goto err_iounmap;
- }
+ kbd->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(kbd->clk))
+ return PTR_ERR(kbd->clk);
input_dev->name = "Spear Keyboard";
input_dev->phys = "keyboard/input0";
- input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0001;
input_dev->id.product = 0x0001;
@@ -259,7 +251,7 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
kbd->keycodes, input_dev);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_put_clk;
+ return error;
}
if (kbd->rep)
@@ -268,48 +260,36 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, kbd);
- error = request_irq(irq, spear_kbd_interrupt, 0, "keyboard", kbd);
+ error = devm_request_irq(&pdev->dev, irq, spear_kbd_interrupt, 0,
+ "keyboard", kbd);
if (error) {
- dev_err(&pdev->dev, "request_irq fail\n");
- goto err_put_clk;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ return error;
}
+ error = clk_prepare(kbd->clk);
+ if (error)
+ return error;
+
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "Unable to register keyboard device\n");
- goto err_free_irq;
+ clk_unprepare(kbd->clk);
+ return error;
}
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, kbd);
return 0;
-
-err_free_irq:
- free_irq(kbd->irq, kbd);
-err_put_clk:
- clk_put(kbd->clk);
-err_iounmap:
- iounmap(kbd->io_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(kbd);
-
- return error;
}
-static int __devexit spear_kbd_remove(struct platform_device *pdev)
+static int spear_kbd_remove(struct platform_device *pdev)
{
struct spear_kbd *kbd = platform_get_drvdata(pdev);
- free_irq(kbd->irq, kbd);
input_unregister_device(kbd->input);
- clk_put(kbd->clk);
- iounmap(kbd->io_base);
- release_mem_region(kbd->res->start, resource_size(kbd->res));
- kfree(kbd);
+ clk_unprepare(kbd->clk);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
@@ -333,7 +313,8 @@ static int spear_kbd_suspend(struct device *dev)
mode_ctl_reg = readl_relaxed(kbd->io_base + MODE_CTL_REG);
if (device_may_wakeup(&pdev->dev)) {
- enable_irq_wake(kbd->irq);
+ if (!enable_irq_wake(kbd->irq))
+ kbd->irq_wake_enabled = true;
/*
* reprogram the keyboard operating frequency as on some
@@ -379,7 +360,10 @@ static int spear_kbd_resume(struct device *dev)
mutex_lock(&input_dev->mutex);
if (device_may_wakeup(&pdev->dev)) {
- disable_irq_wake(kbd->irq);
+ if (kbd->irq_wake_enabled) {
+ kbd->irq_wake_enabled = false;
+ disable_irq_wake(kbd->irq);
+ }
} else {
if (input_dev->users)
clk_enable(kbd->clk);
@@ -407,7 +391,7 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
- .remove = __devexit_p(spear_kbd_remove),
+ .remove = spear_kbd_remove,
.driver = {
.name = "keyboard",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 470a8778dec..5cbec56f772 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -166,7 +166,7 @@ static irqreturn_t stmpe_keypad_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
+static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
{
const struct stmpe_keypad_variant *variant = keypad->variant;
unsigned int col_gpios = variant->col_gpios;
@@ -207,7 +207,7 @@ static int __devinit stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
}
-static int __devinit stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
+static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
@@ -257,105 +257,131 @@ static int __devinit stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
(plat->debounce_ms << 1));
}
-static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
{
- struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ int row, col;
+
+ for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
+ for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+ int code = MATRIX_SCAN_CODE(row, col,
+ STMPE_KEYPAD_ROW_SHIFT);
+ if (keypad->keymap[code] != KEY_RESERVED) {
+ keypad->rows |= 1 << row;
+ keypad->cols |= 1 << col;
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_OF
+static const struct stmpe_keypad_platform_data *
+stmpe_keypad_of_probe(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
struct stmpe_keypad_platform_data *plat;
+
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
+ of_property_read_u32(np, "st,scan-count", &plat->scan_count);
+
+ plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
+ return plat;
+}
+#else
+static inline const struct stmpe_keypad_platform_data *
+stmpe_keypad_of_probe(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int stmpe_keypad_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ const struct stmpe_keypad_platform_data *plat;
struct stmpe_keypad *keypad;
struct input_dev *input;
- int ret;
+ int error;
int irq;
- int i;
plat = stmpe->pdata->keypad;
- if (!plat)
- return -ENODEV;
+ if (!plat) {
+ plat = stmpe_keypad_of_probe(&pdev->dev);
+ if (IS_ERR(plat))
+ return PTR_ERR(plat);
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- keypad = kzalloc(sizeof(struct stmpe_keypad), GFP_KERNEL);
+ keypad = devm_kzalloc(&pdev->dev, sizeof(struct stmpe_keypad),
+ GFP_KERNEL);
if (!keypad)
return -ENOMEM;
- input = input_allocate_device();
- if (!input) {
- ret = -ENOMEM;
- goto out_freekeypad;
- }
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
input->name = "STMPE keypad";
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- ret = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- STMPE_KEYPAD_MAX_ROWS,
- STMPE_KEYPAD_MAX_COLS,
- keypad->keymap, input);
- if (ret)
- goto out_freeinput;
+ error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ STMPE_KEYPAD_MAX_ROWS,
+ STMPE_KEYPAD_MAX_COLS,
+ keypad->keymap, input);
+ if (error)
+ return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- for (i = 0; i < plat->keymap_data->keymap_size; i++) {
- unsigned int key = plat->keymap_data->keymap[i];
-
- keypad->cols |= 1 << KEY_COL(key);
- keypad->rows |= 1 << KEY_ROW(key);
- }
+ stmpe_keypad_fill_used_pins(keypad);
keypad->stmpe = stmpe;
keypad->plat = plat;
keypad->input = input;
keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
- ret = stmpe_keypad_chip_init(keypad);
- if (ret < 0)
- goto out_freeinput;
+ error = stmpe_keypad_chip_init(keypad);
+ if (error < 0)
+ return error;
- ret = input_register_device(input);
- if (ret) {
- dev_err(&pdev->dev,
- "unable to register input device: %d\n", ret);
- goto out_freeinput;
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, stmpe_keypad_irq,
+ IRQF_ONESHOT, "stmpe-keypad", keypad);
+ if (error) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", error);
+ return error;
}
- ret = request_threaded_irq(irq, NULL, stmpe_keypad_irq, IRQF_ONESHOT,
- "stmpe-keypad", keypad);
- if (ret) {
- dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
- goto out_unregisterinput;
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", error);
+ return error;
}
platform_set_drvdata(pdev, keypad);
return 0;
-
-out_unregisterinput:
- input_unregister_device(input);
- input = NULL;
-out_freeinput:
- input_free_device(input);
-out_freekeypad:
- kfree(keypad);
- return ret;
}
-static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
+static int stmpe_keypad_remove(struct platform_device *pdev)
{
struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
- struct stmpe *stmpe = keypad->stmpe;
- int irq = platform_get_irq(pdev, 0);
-
- stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
- free_irq(irq, keypad);
- input_unregister_device(keypad->input);
- platform_set_drvdata(pdev, NULL);
- kfree(keypad);
+ stmpe_disable(keypad->stmpe, STMPE_BLOCK_KEYPAD);
return 0;
}
@@ -364,7 +390,7 @@ static struct platform_driver stmpe_keypad_driver = {
.driver.name = "stmpe-keypad",
.driver.owner = THIS_MODULE,
.probe = stmpe_keypad_probe,
- .remove = __devexit_p(stmpe_keypad_remove),
+ .remove = stmpe_keypad_remove,
};
module_platform_driver(stmpe_keypad_driver);
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 7d498e69850..2fb0d76a04c 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -299,7 +299,7 @@ static void tc3589x_keypad_close(struct input_dev *input)
tc3589x_keypad_disable(keypad);
}
-static int __devinit tc3589x_keypad_probe(struct platform_device *pdev)
+static int tc3589x_keypad_probe(struct platform_device *pdev)
{
struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
struct tc_keypad *keypad;
@@ -382,7 +382,7 @@ err_free_mem:
return error;
}
-static int __devexit tc3589x_keypad_remove(struct platform_device *pdev)
+static int tc3589x_keypad_remove(struct platform_device *pdev)
{
struct tc_keypad *keypad = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -448,7 +448,7 @@ static struct platform_driver tc3589x_keypad_driver = {
.pm = &tc3589x_keypad_dev_pm_ops,
},
.probe = tc3589x_keypad_probe,
- .remove = __devexit_p(tc3589x_keypad_remove),
+ .remove = tc3589x_keypad_remove,
};
module_platform_driver(tc3589x_keypad_driver);
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index c355cdde8d2..bfc832c35a7 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -166,7 +166,7 @@ static void tca6416_keys_close(struct input_dev *dev)
disable_irq(chip->irqnum);
}
-static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
{
int error;
@@ -197,7 +197,7 @@ static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
return 0;
}
-static int __devinit tca6416_keypad_probe(struct i2c_client *client,
+static int tca6416_keypad_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tca6416_keys_platform_data *pdata;
@@ -313,7 +313,7 @@ fail1:
return error;
}
-static int __devexit tca6416_keypad_remove(struct i2c_client *client)
+static int tca6416_keypad_remove(struct i2c_client *client)
{
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
@@ -361,7 +361,7 @@ static struct i2c_driver tca6416_keypad_driver = {
.pm = &tca6416_keypad_dev_pm_ops,
},
.probe = tca6416_keypad_probe,
- .remove = __devexit_p(tca6416_keypad_remove),
+ .remove = tca6416_keypad_remove,
.id_table = tca6416_id,
};
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 893869b29ed..50e9c5e195e 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -35,6 +35,7 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/tca8418_keypad.h>
+#include <linux/of.h>
/* TCA8418 hardware limits */
#define TCA8418_MAX_ROWS 8
@@ -109,25 +110,11 @@
#define KEY_EVENT_CODE 0x7f
#define KEY_EVENT_VALUE 0x80
-
-static const struct i2c_device_id tca8418_id[] = {
- { TCA8418_NAME, 8418, },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tca8418_id);
-
struct tca8418_keypad {
- unsigned int rows;
- unsigned int cols;
- unsigned int keypad_mask; /* Mask for keypad col/rol regs */
- unsigned int irq;
- unsigned int row_shift;
-
struct i2c_client *client;
struct input_dev *input;
- /* Flexible array member, must be at end of struct */
- unsigned short keymap[];
+ unsigned int row_shift;
};
/*
@@ -172,6 +159,8 @@ static int tca8418_read_byte(struct tca8418_keypad *keypad_data,
static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
{
+ struct input_dev *input = keypad_data->input;
+ unsigned short *keymap = input->keycode;
int error, col, row;
u8 reg, state, code;
@@ -190,9 +179,8 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
col = (col) ? col - 1 : TCA8418_MAX_COLS - 1;
code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
- input_event(keypad_data->input, EV_MSC, MSC_SCAN, code);
- input_report_key(keypad_data->input,
- keypad_data->keymap[code], state);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keymap[code], state);
/* Read for next loop */
error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
@@ -202,7 +190,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
dev_err(&keypad_data->client->dev,
"unable to read REG_KEY_EVENT_A\n");
- input_sync(keypad_data->input);
+ input_sync(input);
}
/*
@@ -218,16 +206,18 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
if (error) {
dev_err(&keypad_data->client->dev,
"unable to read REG_INT_STAT\n");
- goto exit;
+ return IRQ_NONE;
}
+ if (!reg)
+ return IRQ_NONE;
+
if (reg & INT_STAT_OVR_FLOW_INT)
dev_warn(&keypad_data->client->dev, "overflow occurred\n");
if (reg & INT_STAT_K_INT)
tca8418_read_keypad(keypad_data);
-exit:
/* Clear all interrupts, even IRQs we didn't check (GPI, CAD, LCK) */
reg = 0xff;
error = tca8418_write_byte(keypad_data, REG_INT_STAT, reg);
@@ -241,7 +231,8 @@ exit:
/*
* Configure the TCA8418 for keypad operation
*/
-static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
+static int tca8418_configure(struct tca8418_keypad *keypad_data,
+ u32 rows, u32 cols)
{
int reg, error;
@@ -253,9 +244,8 @@ static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
/* Assemble a mask for row and column registers */
- reg = ~(~0 << keypad_data->rows);
- reg += (~(~0 << keypad_data->cols)) << 8;
- keypad_data->keypad_mask = reg;
+ reg = ~(~0 << rows);
+ reg += (~(~0 << cols)) << 8;
/* Set registers to keypad mode */
error |= tca8418_write_byte(keypad_data, REG_KP_GPIO1, reg);
@@ -270,145 +260,144 @@ static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
return error;
}
-static int __devinit tca8418_keypad_probe(struct i2c_client *client,
+static int tca8418_keypad_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
const struct tca8418_keypad_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(dev);
struct tca8418_keypad *keypad_data;
struct input_dev *input;
+ const struct matrix_keymap_data *keymap_data = NULL;
+ u32 rows = 0, cols = 0;
+ bool rep = false;
+ bool irq_is_gpio = false;
+ int irq;
int error, row_shift, max_keys;
/* Copy the platform data */
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data\n");
- return -EINVAL;
- }
-
- if (!pdata->keymap_data) {
- dev_err(&client->dev, "no keymap data defined\n");
- return -EINVAL;
+ if (pdata) {
+ if (!pdata->keymap_data) {
+ dev_err(dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+ keymap_data = pdata->keymap_data;
+ rows = pdata->rows;
+ cols = pdata->cols;
+ rep = pdata->rep;
+ irq_is_gpio = pdata->irq_is_gpio;
+ } else {
+ struct device_node *np = dev->of_node;
+ of_property_read_u32(np, "keypad,num-rows", &rows);
+ of_property_read_u32(np, "keypad,num-columns", &cols);
+ rep = of_property_read_bool(np, "keypad,autorepeat");
}
- if (!pdata->rows || pdata->rows > TCA8418_MAX_ROWS) {
- dev_err(&client->dev, "invalid rows\n");
+ if (!rows || rows > TCA8418_MAX_ROWS) {
+ dev_err(dev, "invalid rows\n");
return -EINVAL;
}
- if (!pdata->cols || pdata->cols > TCA8418_MAX_COLS) {
- dev_err(&client->dev, "invalid columns\n");
+ if (!cols || cols > TCA8418_MAX_COLS) {
+ dev_err(dev, "invalid columns\n");
return -EINVAL;
}
/* Check i2c driver capabilities */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
- dev_err(&client->dev, "%s adapter not supported\n",
+ dev_err(dev, "%s adapter not supported\n",
dev_driver_string(&client->adapter->dev));
return -ENODEV;
}
- row_shift = get_count_order(pdata->cols);
- max_keys = pdata->rows << row_shift;
+ row_shift = get_count_order(cols);
+ max_keys = rows << row_shift;
- /* Allocate memory for keypad_data, keymap and input device */
- keypad_data = kzalloc(sizeof(*keypad_data) +
- max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL);
+ /* Allocate memory for keypad_data and input device */
+ keypad_data = devm_kzalloc(dev, sizeof(*keypad_data), GFP_KERNEL);
if (!keypad_data)
return -ENOMEM;
- keypad_data->rows = pdata->rows;
- keypad_data->cols = pdata->cols;
keypad_data->client = client;
keypad_data->row_shift = row_shift;
/* Initialize the chip or fail if chip isn't present */
- error = tca8418_configure(keypad_data);
+ error = tca8418_configure(keypad_data, rows, cols);
if (error < 0)
- goto fail1;
+ return error;
/* Configure input device */
- input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto fail1;
- }
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
keypad_data->input = input;
input->name = client->name;
- input->dev.parent = &client->dev;
-
input->id.bustype = BUS_I2C;
input->id.vendor = 0x0001;
input->id.product = 0x001;
input->id.version = 0x0001;
- error = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
- pdata->rows, pdata->cols,
- keypad_data->keymap, input);
+ error = matrix_keypad_build_keymap(keymap_data, NULL, rows, cols,
+ NULL, input);
if (error) {
- dev_dbg(&client->dev, "Failed to build keymap\n");
- goto fail2;
+ dev_err(dev, "Failed to build keymap\n");
+ return error;
}
- if (pdata->rep)
+ if (rep)
__set_bit(EV_REP, input->evbit);
input_set_capability(input, EV_MSC, MSC_SCAN);
input_set_drvdata(input, keypad_data);
- if (pdata->irq_is_gpio)
- client->irq = gpio_to_irq(client->irq);
+ irq = client->irq;
+ if (irq_is_gpio)
+ irq = gpio_to_irq(irq);
- error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, keypad_data);
+ error = devm_request_threaded_irq(dev, irq, NULL, tca8418_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_SHARED |
+ IRQF_ONESHOT,
+ client->name, keypad_data);
if (error) {
- dev_dbg(&client->dev,
- "Unable to claim irq %d; error %d\n",
+ dev_err(dev, "Unable to claim irq %d; error %d\n",
client->irq, error);
- goto fail2;
+ return error;
}
error = input_register_device(input);
if (error) {
- dev_dbg(&client->dev,
- "Unable to register input device, error: %d\n", error);
- goto fail3;
+ dev_err(dev, "Unable to register input device, error: %d\n",
+ error);
+ return error;
}
- i2c_set_clientdata(client, keypad_data);
return 0;
-
-fail3:
- free_irq(client->irq, keypad_data);
-fail2:
- input_free_device(input);
-fail1:
- kfree(keypad_data);
- return error;
}
-static int __devexit tca8418_keypad_remove(struct i2c_client *client)
-{
- struct tca8418_keypad *keypad_data = i2c_get_clientdata(client);
-
- free_irq(keypad_data->client->irq, keypad_data);
-
- input_unregister_device(keypad_data->input);
-
- kfree(keypad_data);
-
- return 0;
-}
+static const struct i2c_device_id tca8418_id[] = {
+ { TCA8418_NAME, 8418, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca8418_id);
+#ifdef CONFIG_OF
+static const struct of_device_id tca8418_dt_ids[] __devinitconst = {
+ { .compatible = "ti,tca8418", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
+#endif
static struct i2c_driver tca8418_keypad_driver = {
.driver = {
.name = TCA8418_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tca8418_dt_ids),
},
.probe = tca8418_keypad_probe,
- .remove = __devexit_p(tca8418_keypad_remove),
.id_table = tca8418_id,
};
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 5faaf2553e3..c76f96872d3 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -87,7 +87,7 @@ struct tegra_kbc {
struct clk *clk;
};
-static const u32 tegra_kbc_default_keymap[] __devinitdata = {
+static const u32 tegra_kbc_default_keymap[] = {
KEY(0, 2, KEY_W),
KEY(0, 3, KEY_S),
KEY(0, 4, KEY_A),
@@ -223,7 +223,7 @@ static const u32 tegra_kbc_default_keymap[] __devinitdata = {
};
static const
-struct matrix_keymap_data tegra_kbc_default_keymap_data __devinitdata = {
+struct matrix_keymap_data tegra_kbc_default_keymap_data = {
.keymap = tegra_kbc_default_keymap,
.keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
};
@@ -573,7 +573,7 @@ static void tegra_kbc_close(struct input_dev *dev)
return tegra_kbc_stop(kbc);
}
-static bool __devinit
+static bool
tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
struct device *dev, unsigned int *num_rows)
{
@@ -619,7 +619,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
}
#ifdef CONFIG_OF
-static struct tegra_kbc_platform_data * __devinit tegra_kbc_dt_parse_pdata(
+static struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
struct platform_device *pdev)
{
struct tegra_kbc_platform_data *pdata;
@@ -670,7 +670,7 @@ static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
}
#endif
-static int __devinit tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
+static int tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
{
const struct tegra_kbc_platform_data *pdata = kbc->pdata;
const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
@@ -697,7 +697,7 @@ static int __devinit tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
return retval;
}
-static int __devinit tegra_kbc_probe(struct platform_device *pdev)
+static int tegra_kbc_probe(struct platform_device *pdev)
{
const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
struct tegra_kbc *kbc;
@@ -838,7 +838,7 @@ err_free_pdata:
return err;
}
-static int __devexit tegra_kbc_remove(struct platform_device *pdev)
+static int tegra_kbc_remove(struct platform_device *pdev)
{
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
struct resource *res;
@@ -954,7 +954,7 @@ MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
- .remove = __devexit_p(tegra_kbc_remove),
+ .remove = tegra_kbc_remove,
.driver = {
.name = "tegra-kbc",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 4c34f21fbe2..ee163501129 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -153,7 +153,7 @@ static void keypad_stop(struct input_dev *dev)
clk_disable(kp->clk);
}
-static int __devinit keypad_probe(struct platform_device *pdev)
+static int keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
const struct matrix_keymap_data *keymap_data;
@@ -301,7 +301,7 @@ error_res:
return error;
}
-static int __devexit keypad_remove(struct platform_device *pdev)
+static int keypad_remove(struct platform_device *pdev)
{
struct keypad_data *kp = platform_get_drvdata(pdev);
@@ -319,7 +319,7 @@ static int __devexit keypad_remove(struct platform_device *pdev)
static struct platform_driver keypad_driver = {
.probe = keypad_probe,
- .remove = __devexit_p(keypad_remove),
+ .remove = keypad_remove,
.driver.name = "tnetv107x-keypad",
.driver.owner = THIS_MODULE,
};
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index a2c6f79aa10..04f84fd5717 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -271,7 +271,7 @@ static irqreturn_t do_kp_irq(int irq, void *_kp)
return IRQ_HANDLED;
}
-static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
+static int twl4030_kp_program(struct twl4030_keypad *kp)
{
u8 reg;
int i;
@@ -328,7 +328,7 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
* Registers keypad device with input subsystem
* and configures TWL4030 keypad registers
*/
-static int __devinit twl4030_kp_probe(struct platform_device *pdev)
+static int twl4030_kp_probe(struct platform_device *pdev)
{
struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
const struct matrix_keymap_data *keymap_data;
@@ -432,7 +432,7 @@ err1:
return error;
}
-static int __devexit twl4030_kp_remove(struct platform_device *pdev)
+static int twl4030_kp_remove(struct platform_device *pdev)
{
struct twl4030_keypad *kp = platform_get_drvdata(pdev);
@@ -452,7 +452,7 @@ static int __devexit twl4030_kp_remove(struct platform_device *pdev)
static struct platform_driver twl4030_kp_driver = {
.probe = twl4030_kp_probe,
- .remove = __devexit_p(twl4030_kp_remove),
+ .remove = twl4030_kp_remove,
.driver = {
.name = "twl4030_keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index e0f6cd1ad0f..ee163bee8cc 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -118,7 +118,7 @@ static void w90p910_keypad_close(struct input_dev *dev)
clk_disable(keypad->clk);
}
-static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
+static int w90p910_keypad_probe(struct platform_device *pdev)
{
const struct w90p910_keypad_platform_data *pdata =
pdev->dev.platform_data;
@@ -234,7 +234,7 @@ failed_free:
return error;
}
-static int __devexit w90p910_keypad_remove(struct platform_device *pdev)
+static int w90p910_keypad_remove(struct platform_device *pdev)
{
struct w90p910_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -257,7 +257,7 @@ static int __devexit w90p910_keypad_remove(struct platform_device *pdev)
static struct platform_driver w90p910_keypad_driver = {
.probe = w90p910_keypad_probe,
- .remove = __devexit_p(w90p910_keypad_remove),
+ .remove = w90p910_keypad_remove,
.driver = {
.name = "nuc900-kpi",
.owner = THIS_MODULE,
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
index d88d9be1d1b..3ae496ea5fe 100644
--- a/drivers/input/matrix-keymap.c
+++ b/drivers/input/matrix-keymap.c
@@ -18,6 +18,7 @@
*/
#include <linux/device.h>
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/input.h>
@@ -123,6 +124,11 @@ static int matrix_keypad_parse_of_keymap(const char *propname,
* it will attempt load the keymap from property specified by @keymap_name
* argument (or "linux,keymap" if @keymap_name is %NULL).
*
+ * If @keymap is %NULL the function will automatically allocate managed
+ * block of memory to store the keymap. This memory will be associated with
+ * the parent device and automatically freed when device unbinds from the
+ * driver.
+ *
* Callers are expected to set up input_dev->dev.parent before calling this
* function.
*/
@@ -133,12 +139,27 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
struct input_dev *input_dev)
{
unsigned int row_shift = get_count_order(cols);
+ size_t max_keys = rows << row_shift;
int i;
int error;
+ if (WARN_ON(!input_dev->dev.parent))
+ return -EINVAL;
+
+ if (!keymap) {
+ keymap = devm_kzalloc(input_dev->dev.parent,
+ max_keys * sizeof(*keymap),
+ GFP_KERNEL);
+ if (!keymap) {
+ dev_err(input_dev->dev.parent,
+ "Unable to allocate memory for keymap");
+ return -ENOMEM;
+ }
+ }
+
input_dev->keycode = keymap;
input_dev->keycodesize = sizeof(*keymap);
- input_dev->keycodemax = rows << row_shift;
+ input_dev->keycodemax = max_keys;
__set_bit(EV_KEY, input_dev->evbit);
diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c
index 7f26e7b6c22..ee43e5b7c88 100644
--- a/drivers/input/misc/88pm80x_onkey.c
+++ b/drivers/input/misc/88pm80x_onkey.c
@@ -62,7 +62,7 @@ static irqreturn_t pm80x_onkey_handler(int irq, void *data)
static SIMPLE_DEV_PM_OPS(pm80x_onkey_pm_ops, pm80x_dev_suspend,
pm80x_dev_resume);
-static int __devinit pm80x_onkey_probe(struct platform_device *pdev)
+static int pm80x_onkey_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -139,7 +139,7 @@ out:
return err;
}
-static int __devexit pm80x_onkey_remove(struct platform_device *pdev)
+static int pm80x_onkey_remove(struct platform_device *pdev)
{
struct pm80x_onkey_info *info = platform_get_drvdata(pdev);
@@ -157,7 +157,7 @@ static struct platform_driver pm80x_onkey_driver = {
.pm = &pm80x_onkey_pm_ops,
},
.probe = pm80x_onkey_probe,
- .remove = __devexit_p(pm80x_onkey_remove),
+ .remove = pm80x_onkey_remove,
};
module_platform_driver(pm80x_onkey_driver);
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index f9ce1835e4d..abd8453e521 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -56,7 +56,7 @@ static irqreturn_t pm860x_onkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
+static int pm860x_onkey_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_onkey_info *info;
@@ -121,7 +121,7 @@ out:
return ret;
}
-static int __devexit pm860x_onkey_remove(struct platform_device *pdev)
+static int pm860x_onkey_remove(struct platform_device *pdev)
{
struct pm860x_onkey_info *info = platform_get_drvdata(pdev);
@@ -161,7 +161,7 @@ static struct platform_driver pm860x_onkey_driver = {
.pm = &pm860x_onkey_pm_ops,
},
.probe = pm860x_onkey_probe,
- .remove = __devexit_p(pm860x_onkey_remove),
+ .remove = pm860x_onkey_remove,
};
module_platform_driver(pm860x_onkey_driver);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7c0f1ecfdd7..259ef31abb1 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -72,6 +72,16 @@ config INPUT_AD714X_SPI
To compile this driver as a module, choose M here: the
module will be called ad714x-spi.
+config INPUT_ARIZONA_HAPTICS
+ tristate "Arizona haptics support"
+ depends on MFD_ARIZONA && SND_SOC
+ select INPUT_FF_MEMLESS
+ help
+ Say Y to enable support for the haptics module in Arizona CODECs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called arizona-haptics.
+
config INPUT_BMA150
tristate "BMA150/SMB380 acceleration sensor support"
depends on I2C
@@ -290,8 +300,7 @@ config INPUT_ATI_REMOTE2
called ati_remote2.
config INPUT_KEYSPAN_REMOTE
- tristate "Keyspan DMR USB remote control (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Keyspan DMR USB remote control"
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -340,7 +349,6 @@ config INPUT_POWERMATE
config INPUT_YEALINK
tristate "Yealink usb-p1k voip phone"
- depends on EXPERIMENTAL
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -356,7 +364,6 @@ config INPUT_YEALINK
config INPUT_CM109
tristate "C-Media CM109 USB I/O Controller"
- depends on EXPERIMENTAL
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -367,6 +374,16 @@ config INPUT_CM109
To compile this driver as a module, choose M here: the module will be
called cm109.
+config INPUT_RETU_PWRBUTTON
+ tristate "Retu Power button Driver"
+ depends on MFD_RETU
+ help
+ Say Y here if you want to enable power key reporting via the
+ Retu chips found in Nokia Internet Tablets (770, N800, N810).
+
+ To compile this driver as a module, choose M here. The module will
+ be called retu-pwrbutton.
+
config INPUT_TWL4030_PWRBUTTON
tristate "TWL4030 Power button Driver"
depends on TWL4030_CORE
@@ -434,7 +451,7 @@ config INPUT_PCF50633_PMU
config INPUT_PCF8574
tristate "PCF8574 Keypad input device"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
Say Y here if you want to support a keypad connected via I2C
with a PCF8574.
@@ -444,7 +461,7 @@ config INPUT_PCF8574
config INPUT_PWM_BEEPER
tristate "PWM beeper support"
- depends on HAVE_PWM
+ depends on HAVE_PWM || PWM
help
Say Y here to get support for PWM based beeper devices.
@@ -486,6 +503,16 @@ config INPUT_DA9052_ONKEY
To compile this driver as a module, choose M here: the
module will be called da9052_onkey.
+config INPUT_DA9055_ONKEY
+ tristate "Dialog Semiconductor DA9055 ONKEY"
+ depends on MFD_DA9055
+ help
+ Support the ONKEY of DA9055 PMICs as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called da9055_onkey.
+
config INPUT_DM355EVM
tristate "TI DaVinci DM355 EVM Keypad and IR Remote"
depends on MFD_DM355EVM_MSP
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 83fe6f5b77d..1f1e1b109d9 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_INPUT_ADXL34X) += adxl34x.o
obj-$(CONFIG_INPUT_ADXL34X_I2C) += adxl34x-i2c.o
obj-$(CONFIG_INPUT_ADXL34X_SPI) += adxl34x-spi.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
+obj-$(CONFIG_INPUT_ARIZONA_HAPTICS) += arizona-haptics.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
obj-$(CONFIG_INPUT_BFIN_ROTARY) += bfin_rotary.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
+obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
@@ -45,6 +47,7 @@ obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
+obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 84ec691c05a..2f090b46e71 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -45,7 +45,7 @@ static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
+static int ab8500_ponkey_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_ponkey *ponkey;
@@ -118,7 +118,7 @@ err_free_mem:
return error;
}
-static int __devexit ab8500_ponkey_remove(struct platform_device *pdev)
+static int ab8500_ponkey_remove(struct platform_device *pdev)
{
struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
@@ -146,7 +146,7 @@ static struct platform_driver ab8500_ponkey_driver = {
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
- .remove = __devexit_p(ab8500_ponkey_remove),
+ .remove = ab8500_ponkey_remove,
};
module_platform_driver(ab8500_ponkey_driver);
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index c8a79015472..29d2064c26f 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -72,7 +72,7 @@ static int ad714x_i2c_read(struct ad714x_chip *chip,
return 0;
}
-static int __devinit ad714x_i2c_probe(struct i2c_client *client,
+static int ad714x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad714x_chip *chip;
@@ -87,7 +87,7 @@ static int __devinit ad714x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit ad714x_i2c_remove(struct i2c_client *client)
+static int ad714x_i2c_remove(struct i2c_client *client)
{
struct ad714x_chip *chip = i2c_get_clientdata(client);
@@ -112,7 +112,7 @@ static struct i2c_driver ad714x_i2c_driver = {
.pm = &ad714x_i2c_pm,
},
.probe = ad714x_i2c_probe,
- .remove = __devexit_p(ad714x_i2c_remove),
+ .remove = ad714x_i2c_remove,
.id_table = ad714x_id,
};
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 75f6136d608..bdccca42d13 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -83,7 +83,7 @@ static int ad714x_spi_write(struct ad714x_chip *chip,
return 0;
}
-static int __devinit ad714x_spi_probe(struct spi_device *spi)
+static int ad714x_spi_probe(struct spi_device *spi)
{
struct ad714x_chip *chip;
int err;
@@ -103,7 +103,7 @@ static int __devinit ad714x_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit ad714x_spi_remove(struct spi_device *spi)
+static int ad714x_spi_remove(struct spi_device *spi)
{
struct ad714x_chip *chip = spi_get_drvdata(spi);
@@ -120,7 +120,7 @@ static struct spi_driver ad714x_spi_driver = {
.pm = &ad714x_spi_pm,
},
.probe = ad714x_spi_probe,
- .remove = __devexit_p(ad714x_spi_remove),
+ .remove = ad714x_spi_remove,
};
module_spi_driver(ad714x_spi_driver);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index dd1d1c145a7..535dda48cac 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -73,7 +73,7 @@ static const struct adxl34x_bus_ops adxl34x_i2c_bops = {
.read_block = adxl34x_i2c_read_block,
};
-static int __devinit adxl34x_i2c_probe(struct i2c_client *client,
+static int adxl34x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adxl34x *ac;
@@ -98,7 +98,7 @@ static int __devinit adxl34x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit adxl34x_i2c_remove(struct i2c_client *client)
+static int adxl34x_i2c_remove(struct i2c_client *client)
{
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -144,7 +144,7 @@ static struct i2c_driver adxl34x_driver = {
.pm = &adxl34x_i2c_pm,
},
.probe = adxl34x_i2c_probe,
- .remove = __devexit_p(adxl34x_i2c_remove),
+ .remove = adxl34x_i2c_remove,
.id_table = adxl34x_id,
};
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 820a802a1e6..ad5f40d37e4 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -65,7 +65,7 @@ static const struct adxl34x_bus_ops adxl34x_spi_bops = {
.read_block = adxl34x_spi_read_block,
};
-static int __devinit adxl34x_spi_probe(struct spi_device *spi)
+static int adxl34x_spi_probe(struct spi_device *spi)
{
struct adxl34x *ac;
@@ -87,7 +87,7 @@ static int __devinit adxl34x_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit adxl34x_spi_remove(struct spi_device *spi)
+static int adxl34x_spi_remove(struct spi_device *spi)
{
struct adxl34x *ac = dev_get_drvdata(&spi->dev);
@@ -126,7 +126,7 @@ static struct spi_driver adxl34x_driver = {
.pm = &adxl34x_spi_pm,
},
.probe = adxl34x_spi_probe,
- .remove = __devexit_p(adxl34x_spi_remove),
+ .remove = adxl34x_spi_remove,
};
module_spi_driver(adxl34x_driver);
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
new file mode 100644
index 00000000000..7a04f54ef96
--- /dev/null
+++ b/drivers/input/misc/arizona-haptics.c
@@ -0,0 +1,255 @@
+/*
+ * Arizona haptics driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
+
+struct arizona_haptics {
+ struct arizona *arizona;
+ struct input_dev *input_dev;
+ struct work_struct work;
+
+ struct mutex mutex;
+ u8 intensity;
+};
+
+static void arizona_haptics_work(struct work_struct *work)
+{
+ struct arizona_haptics *haptics = container_of(work,
+ struct arizona_haptics,
+ work);
+ struct arizona *arizona = haptics->arizona;
+ struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex;
+ int ret;
+
+ if (!haptics->arizona->dapm) {
+ dev_err(arizona->dev, "No DAPM context\n");
+ return;
+ }
+
+ if (haptics->intensity) {
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_PHASE_2_INTENSITY,
+ ARIZONA_PHASE2_INTENSITY_MASK,
+ haptics->intensity);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set intensity: %d\n",
+ ret);
+ return;
+ }
+
+ /* This enable sequence will be a noop if already enabled */
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_CTRL_MASK,
+ 1 << ARIZONA_HAP_CTRL_SHIFT);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to start haptics: %d\n",
+ ret);
+ return;
+ }
+
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ mutex_unlock(dapm_mutex);
+
+ } else {
+ /* This disable sequence will be a noop if already enabled */
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ mutex_unlock(dapm_mutex);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_CTRL_MASK,
+ 1 << ARIZONA_HAP_CTRL_SHIFT);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to stop haptics: %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
+static int arizona_haptics_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct arizona *arizona = haptics->arizona;
+
+ if (!arizona->dapm) {
+ dev_err(arizona->dev, "No DAPM context\n");
+ return -EBUSY;
+ }
+
+ if (effect->u.rumble.strong_magnitude) {
+ /* Scale the magnitude into the range the device supports */
+ if (arizona->pdata.hap_act) {
+ haptics->intensity =
+ effect->u.rumble.strong_magnitude >> 9;
+ if (effect->direction < 0x8000)
+ haptics->intensity += 0x7f;
+ } else {
+ haptics->intensity =
+ effect->u.rumble.strong_magnitude >> 8;
+ }
+ } else {
+ haptics->intensity = 0;
+ }
+
+ schedule_work(&haptics->work);
+
+ return 0;
+}
+
+static void arizona_haptics_close(struct input_dev *input)
+{
+ struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex;
+
+ cancel_work_sync(&haptics->work);
+
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ if (haptics->arizona->dapm)
+ snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+
+ mutex_unlock(dapm_mutex);
+}
+
+static int arizona_haptics_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct arizona_haptics *haptics;
+ int ret;
+
+ haptics = devm_kzalloc(&pdev->dev, sizeof(*haptics), GFP_KERNEL);
+ if (!haptics)
+ return -ENOMEM;
+
+ haptics->arizona = arizona;
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_ACT, arizona->pdata.hap_act);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set haptics actuator: %d\n",
+ ret);
+ return ret;
+ }
+
+ INIT_WORK(&haptics->work, arizona_haptics_work);
+
+ haptics->input_dev = input_allocate_device();
+ if (haptics->input_dev == NULL) {
+ dev_err(arizona->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input_set_drvdata(haptics->input_dev, haptics);
+
+ haptics->input_dev->name = "arizona:haptics";
+ haptics->input_dev->dev.parent = pdev->dev.parent;
+ haptics->input_dev->close = arizona_haptics_close;
+ __set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
+
+ ret = input_ff_create_memless(haptics->input_dev, NULL,
+ arizona_haptics_play);
+ if (ret < 0) {
+ dev_err(arizona->dev, "input_ff_create_memless() failed: %d\n",
+ ret);
+ goto err_ialloc;
+ }
+
+ ret = input_register_device(haptics->input_dev);
+ if (ret < 0) {
+ dev_err(arizona->dev, "couldn't register input device: %d\n",
+ ret);
+ goto err_iff;
+ }
+
+ platform_set_drvdata(pdev, haptics);
+
+ return 0;
+
+err_iff:
+ if (haptics->input_dev)
+ input_ff_destroy(haptics->input_dev);
+err_ialloc:
+ input_free_device(haptics->input_dev);
+
+ return ret;
+}
+
+static int arizona_haptics_remove(struct platform_device *pdev)
+{
+ struct arizona_haptics *haptics = platform_get_drvdata(pdev);
+
+ input_unregister_device(haptics->input_dev);
+
+ return 0;
+}
+
+static struct platform_driver arizona_haptics_driver = {
+ .probe = arizona_haptics_probe,
+ .remove = arizona_haptics_remove,
+ .driver = {
+ .name = "arizona-haptics",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(arizona_haptics_driver);
+
+MODULE_ALIAS("platform:arizona-haptics");
+MODULE_DESCRIPTION("Arizona haptics driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 1c4146fccfd..a6666e142a9 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -90,7 +90,7 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_rotary_probe(struct platform_device *pdev)
+static int bfin_rotary_probe(struct platform_device *pdev)
{
struct bfin_rotary_platform_data *pdata = pdev->dev.platform_data;
struct bfin_rot *rotary;
@@ -196,7 +196,7 @@ out1:
return error;
}
-static int __devexit bfin_rotary_remove(struct platform_device *pdev)
+static int bfin_rotary_remove(struct platform_device *pdev)
{
struct bfin_rot *rotary = platform_get_drvdata(pdev);
@@ -255,7 +255,7 @@ static const struct dev_pm_ops bfin_rotary_pm_ops = {
static struct platform_driver bfin_rotary_device_driver = {
.probe = bfin_rotary_probe,
- .remove = __devexit_p(bfin_rotary_remove),
+ .remove = bfin_rotary_remove,
.driver = {
.name = "bfin-rotary",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index e2f1e9f952b..08ffcabd722 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -158,7 +158,7 @@ struct bma150_data {
* are stated and verified by Bosch Sensortec where they are configured
* to provide a generic sensitivity performance.
*/
-static struct bma150_cfg default_cfg __devinitdata = {
+static struct bma150_cfg default_cfg = {
.any_motion_int = 1,
.hg_int = 1,
.lg_int = 1,
@@ -224,7 +224,7 @@ static int bma150_set_mode(struct bma150_data *bma150, u8 mode)
return 0;
}
-static int __devinit bma150_soft_reset(struct bma150_data *bma150)
+static int bma150_soft_reset(struct bma150_data *bma150)
{
int error;
@@ -237,19 +237,19 @@ static int __devinit bma150_soft_reset(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_set_range(struct bma150_data *bma150, u8 range)
+static int bma150_set_range(struct bma150_data *bma150, u8 range)
{
return bma150_set_reg_bits(bma150->client, range, BMA150_RANGE_POS,
BMA150_RANGE_MSK, BMA150_RANGE_REG);
}
-static int __devinit bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
+static int bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
{
return bma150_set_reg_bits(bma150->client, bw, BMA150_BANDWIDTH_POS,
BMA150_BANDWIDTH_MSK, BMA150_BANDWIDTH_REG);
}
-static int __devinit bma150_set_low_g_interrupt(struct bma150_data *bma150,
+static int bma150_set_low_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
@@ -273,7 +273,7 @@ static int __devinit bma150_set_low_g_interrupt(struct bma150_data *bma150,
BMA150_LOW_G_EN_REG);
}
-static int __devinit bma150_set_high_g_interrupt(struct bma150_data *bma150,
+static int bma150_set_high_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
@@ -300,7 +300,7 @@ static int __devinit bma150_set_high_g_interrupt(struct bma150_data *bma150,
}
-static int __devinit bma150_set_any_motion_interrupt(struct bma150_data *bma150,
+static int bma150_set_any_motion_interrupt(struct bma150_data *bma150,
u8 enable, u8 dur, u8 thres)
{
int error;
@@ -424,7 +424,7 @@ static void bma150_poll_close(struct input_polled_dev *ipoll_dev)
bma150_close(bma150);
}
-static int __devinit bma150_initialize(struct bma150_data *bma150,
+static int bma150_initialize(struct bma150_data *bma150,
const struct bma150_cfg *cfg)
{
int error;
@@ -465,7 +465,7 @@ static int __devinit bma150_initialize(struct bma150_data *bma150,
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static void __devinit bma150_init_input_device(struct bma150_data *bma150,
+static void bma150_init_input_device(struct bma150_data *bma150,
struct input_dev *idev)
{
idev->name = BMA150_DRIVER;
@@ -479,7 +479,7 @@ static void __devinit bma150_init_input_device(struct bma150_data *bma150,
input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
}
-static int __devinit bma150_register_input_device(struct bma150_data *bma150)
+static int bma150_register_input_device(struct bma150_data *bma150)
{
struct input_dev *idev;
int error;
@@ -504,7 +504,7 @@ static int __devinit bma150_register_input_device(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_register_polled_device(struct bma150_data *bma150)
+static int bma150_register_polled_device(struct bma150_data *bma150)
{
struct input_polled_dev *ipoll_dev;
int error;
@@ -535,7 +535,7 @@ static int __devinit bma150_register_polled_device(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_probe(struct i2c_client *client,
+static int bma150_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct bma150_platform_data *pdata = client->dev.platform_data;
@@ -613,7 +613,7 @@ err_free_mem:
return error;
}
-static int __devexit bma150_remove(struct i2c_client *client)
+static int bma150_remove(struct i2c_client *client)
{
struct bma150_data *bma150 = i2c_get_clientdata(client);
@@ -670,7 +670,7 @@ static struct i2c_driver bma150_driver = {
.class = I2C_CLASS_HWMON,
.id_table = bma150_id,
.probe = bma150_probe,
- .remove = __devexit_p(bma150_remove),
+ .remove = bma150_remove,
};
module_i2c_driver(bma150_driver);
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index fe9b85f0779..4fdef98ceb5 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -55,7 +55,7 @@ static const struct cma3000_bus_ops cma3000_i2c_bops = {
.write = cma3000_i2c_set,
};
-static int __devinit cma3000_i2c_probe(struct i2c_client *client,
+static int cma3000_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cma3000_accl_data *data;
@@ -69,7 +69,7 @@ static int __devinit cma3000_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit cma3000_i2c_remove(struct i2c_client *client)
+static int cma3000_i2c_remove(struct i2c_client *client)
{
struct cma3000_accl_data *data = i2c_get_clientdata(client);
@@ -114,7 +114,7 @@ MODULE_DEVICE_TABLE(i2c, cma3000_i2c_id);
static struct i2c_driver cma3000_i2c_driver = {
.probe = cma3000_i2c_probe,
- .remove = __devexit_p(cma3000_i2c_remove),
+ .remove = cma3000_i2c_remove,
.id_table = cma3000_i2c_id,
.driver = {
.name = "cma3000_i2c_accl",
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 53e43d29514..4f77f87847e 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -73,7 +73,7 @@ static void handle_buttons(struct input_polled_dev *dev)
}
}
-static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
+static int cobalt_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -135,7 +135,7 @@ static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
return error;
}
-static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
+static int cobalt_buttons_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct buttons_dev *bdev = dev_get_drvdata(dev);
@@ -157,7 +157,7 @@ MODULE_ALIAS("platform:Cobalt buttons");
static struct platform_driver cobalt_buttons_driver = {
.probe = cobalt_buttons_probe,
- .remove = __devexit_p(cobalt_buttons_remove),
+ .remove = cobalt_buttons_remove,
.driver = {
.name = "Cobalt buttons",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 3c843cd725f..020569a499f 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -24,7 +24,6 @@ struct da9052_onkey {
struct da9052 *da9052;
struct input_dev *input;
struct delayed_work work;
- unsigned int irq;
};
static void da9052_onkey_query(struct da9052_onkey *onkey)
@@ -71,12 +70,11 @@ static irqreturn_t da9052_onkey_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit da9052_onkey_probe(struct platform_device *pdev)
+static int da9052_onkey_probe(struct platform_device *pdev)
{
struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
struct da9052_onkey *onkey;
struct input_dev *input_dev;
- int irq;
int error;
if (!da9052) {
@@ -84,13 +82,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
return -EINVAL;
}
- irq = platform_get_irq_byname(pdev, "ONKEY");
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Failed to get an IRQ for input device, %d\n", irq);
- return -EINVAL;
- }
-
onkey = kzalloc(sizeof(*onkey), GFP_KERNEL);
input_dev = input_allocate_device();
if (!onkey || !input_dev) {
@@ -101,7 +92,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
onkey->input = input_dev;
onkey->da9052 = da9052;
- onkey->irq = irq;
INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work);
input_dev->name = "da9052-onkey";
@@ -111,13 +101,11 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
input_dev->evbit[0] = BIT_MASK(EV_KEY);
__set_bit(KEY_POWER, input_dev->keybit);
- error = request_threaded_irq(onkey->irq, NULL, da9052_onkey_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "ONKEY", onkey);
+ error = da9052_request_irq(onkey->da9052, DA9052_IRQ_NONKEY, "ONKEY",
+ da9052_onkey_irq, onkey);
if (error < 0) {
dev_err(onkey->da9052->dev,
- "Failed to register ONKEY IRQ %d, error = %d\n",
- onkey->irq, error);
+ "Failed to register ONKEY IRQ: %d\n", error);
goto err_free_mem;
}
@@ -132,7 +120,7 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
return 0;
err_free_irq:
- free_irq(onkey->irq, onkey);
+ da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
err_free_mem:
input_free_device(input_dev);
@@ -141,11 +129,11 @@ err_free_mem:
return error;
}
-static int __devexit da9052_onkey_remove(struct platform_device *pdev)
+static int da9052_onkey_remove(struct platform_device *pdev)
{
struct da9052_onkey *onkey = platform_get_drvdata(pdev);
- free_irq(onkey->irq, onkey);
+ da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
input_unregister_device(onkey->input);
@@ -156,7 +144,7 @@ static int __devexit da9052_onkey_remove(struct platform_device *pdev)
static struct platform_driver da9052_onkey_driver = {
.probe = da9052_onkey_probe,
- .remove = __devexit_p(da9052_onkey_remove),
+ .remove = da9052_onkey_remove,
.driver = {
.name = "da9052-onkey",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
new file mode 100644
index 00000000000..ee6ae3a0017
--- /dev/null
+++ b/drivers/input/misc/da9055_onkey.c
@@ -0,0 +1,171 @@
+/*
+ * ON pin driver for Dialog DA9055 PMICs
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+
+struct da9055_onkey {
+ struct da9055 *da9055;
+ struct input_dev *input;
+ struct delayed_work work;
+};
+
+static void da9055_onkey_query(struct da9055_onkey *onkey)
+{
+ int key_stat;
+
+ key_stat = da9055_reg_read(onkey->da9055, DA9055_REG_STATUS_A);
+ if (key_stat < 0) {
+ dev_err(onkey->da9055->dev,
+ "Failed to read onkey event %d\n", key_stat);
+ } else {
+ key_stat &= DA9055_NOKEY_STS;
+ /*
+ * Onkey status bit is cleared when onkey button is relased.
+ */
+ if (!key_stat) {
+ input_report_key(onkey->input, KEY_POWER, 0);
+ input_sync(onkey->input);
+ }
+ }
+
+ /*
+ * Interrupt is generated only when the ONKEY pin is asserted.
+ * Hence the deassertion of the pin is simulated through work queue.
+ */
+ if (key_stat)
+ schedule_delayed_work(&onkey->work, msecs_to_jiffies(10));
+
+}
+
+static void da9055_onkey_work(struct work_struct *work)
+{
+ struct da9055_onkey *onkey = container_of(work, struct da9055_onkey,
+ work.work);
+
+ da9055_onkey_query(onkey);
+}
+
+static irqreturn_t da9055_onkey_irq(int irq, void *data)
+{
+ struct da9055_onkey *onkey = data;
+
+ input_report_key(onkey->input, KEY_POWER, 1);
+ input_sync(onkey->input);
+
+ da9055_onkey_query(onkey);
+
+ return IRQ_HANDLED;
+}
+
+static int da9055_onkey_probe(struct platform_device *pdev)
+{
+ struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct da9055_onkey *onkey;
+ struct input_dev *input_dev;
+ int irq, err;
+
+ irq = platform_get_irq_byname(pdev, "ONKEY");
+ if (irq < 0) {
+ dev_err(&pdev->dev,
+ "Failed to get an IRQ for input device, %d\n", irq);
+ return -EINVAL;
+ }
+
+ onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL);
+ if (!onkey) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ onkey->input = input_dev;
+ onkey->da9055 = da9055;
+ input_dev->name = "da9055-onkey";
+ input_dev->phys = "da9055-onkey/input0";
+ input_dev->dev.parent = &pdev->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ __set_bit(KEY_POWER, input_dev->keybit);
+
+ INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
+
+ irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ONKEY", onkey);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to register ONKEY IRQ %d, error = %d\n",
+ irq, err);
+ goto err_free_input;
+ }
+
+ err = input_register_device(input_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register input device, %d\n",
+ err);
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, onkey);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, onkey);
+ cancel_delayed_work_sync(&onkey->work);
+err_free_input:
+ input_free_device(input_dev);
+
+ return err;
+}
+
+static int da9055_onkey_remove(struct platform_device *pdev)
+{
+ struct da9055_onkey *onkey = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, "ONKEY");
+
+ irq = regmap_irq_get_virq(onkey->da9055->irq_data, irq);
+ free_irq(irq, onkey);
+ cancel_delayed_work_sync(&onkey->work);
+ input_unregister_device(onkey->input);
+
+ return 0;
+}
+
+static struct platform_driver da9055_onkey_driver = {
+ .probe = da9055_onkey_probe,
+ .remove = da9055_onkey_remove,
+ .driver = {
+ .name = "da9055-onkey",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da9055_onkey_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Onkey driver for DA9055");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-onkey");
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index c1313d8535c..a309a5c0899 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -173,7 +173,7 @@ static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
/*----------------------------------------------------------------------*/
-static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
+static int dm355evm_keys_probe(struct platform_device *pdev)
{
struct dm355evm_keys *keys;
struct input_dev *input;
@@ -239,7 +239,7 @@ fail1:
return status;
}
-static int __devexit dm355evm_keys_remove(struct platform_device *pdev)
+static int dm355evm_keys_remove(struct platform_device *pdev)
{
struct dm355evm_keys *keys = platform_get_drvdata(pdev);
@@ -262,7 +262,7 @@ static int __devexit dm355evm_keys_remove(struct platform_device *pdev)
*/
static struct platform_driver dm355evm_keys_driver = {
.probe = dm355evm_keys_probe,
- .remove = __devexit_p(dm355evm_keys_remove),
+ .remove = dm355evm_keys_remove,
.driver = {
.owner = THIS_MODULE,
.name = "dm355evm_keys",
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index b6664cfa340..fe30bd0fe4b 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -98,7 +98,7 @@ static void gp2a_device_close(struct input_dev *dev)
"unable to deactivate, err %d\n", error);
}
-static int __devinit gp2a_initialize(struct gp2a_data *dt)
+static int gp2a_initialize(struct gp2a_data *dt)
{
int error;
@@ -122,7 +122,7 @@ static int __devinit gp2a_initialize(struct gp2a_data *dt)
return error;
}
-static int __devinit gp2a_probe(struct i2c_client *client,
+static int gp2a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct gp2a_platform_data *pdata = client->dev.platform_data;
@@ -205,7 +205,7 @@ err_hw_shutdown:
return error;
}
-static int __devexit gp2a_remove(struct i2c_client *client)
+static int gp2a_remove(struct i2c_client *client)
{
struct gp2a_data *dt = i2c_get_clientdata(client);
const struct gp2a_platform_data *pdata = dt->pdata;
@@ -277,7 +277,7 @@ static struct i2c_driver gp2a_i2c_driver = {
.pm = &gp2a_pm,
},
.probe = gp2a_probe,
- .remove = __devexit_p(gp2a_remove),
+ .remove = gp2a_remove,
.id_table = gp2a_i2c_id,
};
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
index 277a0574c19..da05cca8b56 100644
--- a/drivers/input/misc/gpio_tilt_polled.c
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -96,7 +96,7 @@ static void gpio_tilt_polled_close(struct input_polled_dev *dev)
pdata->disable(tdev->dev);
}
-static int __devinit gpio_tilt_polled_probe(struct platform_device *pdev)
+static int gpio_tilt_polled_probe(struct platform_device *pdev)
{
const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
@@ -179,7 +179,7 @@ err_free_tdev:
return error;
}
-static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
+static int gpio_tilt_polled_remove(struct platform_device *pdev)
{
struct gpio_tilt_polled_dev *tdev = platform_get_drvdata(pdev);
const struct gpio_tilt_platform_data *pdata = tdev->pdata;
@@ -198,7 +198,7 @@ static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
static struct platform_driver gpio_tilt_polled_driver = {
.probe = gpio_tilt_polled_probe,
- .remove = __devexit_p(gpio_tilt_polled_remove),
+ .remove = gpio_tilt_polled_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 50e28306830..6ab3decc86e 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -87,7 +87,7 @@ static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
+static int ixp4xx_spkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
int err;
@@ -132,7 +132,7 @@ static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
return err;
}
-static int __devexit ixp4xx_spkr_remove(struct platform_device *dev)
+static int ixp4xx_spkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
@@ -165,7 +165,7 @@ static struct platform_driver ixp4xx_spkr_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ixp4xx_spkr_probe,
- .remove = __devexit_p(ixp4xx_spkr_remove),
+ .remove = ixp4xx_spkr_remove,
.shutdown = ixp4xx_spkr_shutdown,
};
module_platform_driver(ixp4xx_spkr_platform_driver);
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index f46139f19ff..a993b67a8a5 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -295,7 +295,7 @@ static void kxtj9_input_close(struct input_dev *dev)
kxtj9_disable(tj9);
}
-static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
+static void kxtj9_init_input_device(struct kxtj9_data *tj9,
struct input_dev *input_dev)
{
__set_bit(EV_ABS, input_dev->evbit);
@@ -308,7 +308,7 @@ static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
input_dev->dev.parent = &tj9->client->dev;
}
-static int __devinit kxtj9_setup_input_device(struct kxtj9_data *tj9)
+static int kxtj9_setup_input_device(struct kxtj9_data *tj9)
{
struct input_dev *input_dev;
int err;
@@ -433,7 +433,7 @@ static void kxtj9_polled_input_close(struct input_polled_dev *dev)
kxtj9_disable(tj9);
}
-static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
+static int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
{
int err;
struct input_polled_dev *poll_dev;
@@ -466,7 +466,7 @@ static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
return 0;
}
-static void __devexit kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
+static void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
{
input_unregister_polled_device(tj9->poll_dev);
input_free_polled_device(tj9->poll_dev);
@@ -485,7 +485,7 @@ static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
#endif
-static int __devinit kxtj9_verify(struct kxtj9_data *tj9)
+static int kxtj9_verify(struct kxtj9_data *tj9)
{
int retval;
@@ -506,7 +506,7 @@ out:
return retval;
}
-static int __devinit kxtj9_probe(struct i2c_client *client,
+static int kxtj9_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct kxtj9_platform_data *pdata = client->dev.platform_data;
@@ -594,7 +594,7 @@ err_free_mem:
return err;
}
-static int __devexit kxtj9_remove(struct i2c_client *client)
+static int kxtj9_remove(struct i2c_client *client)
{
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -663,7 +663,7 @@ static struct i2c_driver kxtj9_driver = {
.pm = &kxtj9_pm_ops,
},
.probe = kxtj9_probe,
- .remove = __devexit_p(kxtj9_remove),
+ .remove = kxtj9_remove,
.id_table = kxtj9_id,
};
diff --git a/drivers/input/misc/m68kspkr.c b/drivers/input/misc/m68kspkr.c
index 0c64d9bb718..b40ee4b47f4 100644
--- a/drivers/input/misc/m68kspkr.c
+++ b/drivers/input/misc/m68kspkr.c
@@ -48,7 +48,7 @@ static int m68kspkr_event(struct input_dev *dev, unsigned int type, unsigned int
return 0;
}
-static int __devinit m68kspkr_probe(struct platform_device *dev)
+static int m68kspkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
int err;
@@ -80,7 +80,7 @@ static int __devinit m68kspkr_probe(struct platform_device *dev)
return 0;
}
-static int __devexit m68kspkr_remove(struct platform_device *dev)
+static int m68kspkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
@@ -104,7 +104,7 @@ static struct platform_driver m68kspkr_platform_driver = {
.owner = THIS_MODULE,
},
.probe = m68kspkr_probe,
- .remove = __devexit_p(m68kspkr_remove),
+ .remove = m68kspkr_remove,
.shutdown = m68kspkr_shutdown,
};
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 0a12b74140d..369a39de4ff 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -62,7 +62,7 @@ static irqreturn_t max8925_onkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit max8925_onkey_probe(struct platform_device *pdev)
+static int max8925_onkey_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_onkey_info *info;
@@ -141,7 +141,7 @@ err_free_mem:
return error;
}
-static int __devexit max8925_onkey_remove(struct platform_device *pdev)
+static int max8925_onkey_remove(struct platform_device *pdev)
{
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -195,7 +195,7 @@ static struct platform_driver max8925_onkey_driver = {
.pm = &max8925_onkey_pm_ops,
},
.probe = max8925_onkey_probe,
- .remove = __devexit_p(max8925_onkey_remove),
+ .remove = max8925_onkey_remove,
};
module_platform_driver(max8925_onkey_driver);
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 05b7b8bfaf0..e973133212a 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -241,7 +241,7 @@ static void max8997_haptic_close(struct input_dev *dev)
max8997_haptic_disable(chip);
}
-static int __devinit max8997_haptic_probe(struct platform_device *pdev)
+static int max8997_haptic_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata =
@@ -354,7 +354,7 @@ err_free_mem:
return error;
}
-static int __devexit max8997_haptic_remove(struct platform_device *pdev)
+static int max8997_haptic_remove(struct platform_device *pdev)
{
struct max8997_haptic *chip = platform_get_drvdata(pdev);
@@ -396,7 +396,7 @@ static struct platform_driver max8997_haptic_driver = {
.pm = &max8997_haptic_pm_ops,
},
.probe = max8997_haptic_probe,
- .remove = __devexit_p(max8997_haptic_remove),
+ .remove = max8997_haptic_remove,
.id_table = max8997_haptic_id,
};
module_platform_driver(max8997_haptic_driver);
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 8428f1e8e83..0906ca593d5 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -89,7 +89,7 @@ static irqreturn_t button_irq(int irq, void *_priv)
return IRQ_HANDLED;
}
-static int __devinit mc13783_pwrbutton_probe(struct platform_device *pdev)
+static int mc13783_pwrbutton_probe(struct platform_device *pdev)
{
const struct mc13xxx_buttons_platform_data *pdata;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
@@ -230,7 +230,7 @@ free_input_dev:
return err;
}
-static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
+static int mc13783_pwrbutton_remove(struct platform_device *pdev)
{
struct mc13783_pwrb *priv = platform_get_drvdata(pdev);
const struct mc13xxx_buttons_platform_data *pdata;
@@ -257,7 +257,7 @@ static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
- .remove = __devexit_p(mc13783_pwrbutton_remove),
+ .remove = mc13783_pwrbutton_remove,
.driver = {
.name = "mc13783-pwrbutton",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 873ebced544..480557f14f2 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -167,7 +167,7 @@ static void mma8450_close(struct input_polled_dev *dev)
/*
* I2C init/probing/exit functions
*/
-static int __devinit mma8450_probe(struct i2c_client *c,
+static int mma8450_probe(struct i2c_client *c,
const struct i2c_device_id *id)
{
struct input_polled_dev *idev;
@@ -212,7 +212,7 @@ err_free_mem:
return err;
}
-static int __devexit mma8450_remove(struct i2c_client *c)
+static int mma8450_remove(struct i2c_client *c)
{
struct mma8450 *m = i2c_get_clientdata(c);
struct input_polled_dev *idev = m->idev;
@@ -243,7 +243,7 @@ static struct i2c_driver mma8450_driver = {
.of_match_table = mma8450_dt_ids,
},
.probe = mma8450_probe,
- .remove = __devexit_p(mma8450_remove),
+ .remove = mma8450_remove,
.id_table = mma8450_id,
};
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 306f84c2d8f..dce0d95943c 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -257,7 +257,7 @@ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
*
* Called during device probe; configures the sampling method.
*/
-static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
+static int mpu3050_hw_init(struct mpu3050_sensor *sensor)
{
struct i2c_client *client = sensor->client;
int ret;
@@ -306,7 +306,7 @@ static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
*
* If present install the relevant sysfs interfaces and input device.
*/
-static int __devinit mpu3050_probe(struct i2c_client *client,
+static int mpu3050_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mpu3050_sensor *sensor;
@@ -402,7 +402,7 @@ err_free_mem:
*
* Our sensor is going away, clean up the resources.
*/
-static int __devexit mpu3050_remove(struct i2c_client *client)
+static int mpu3050_remove(struct i2c_client *client)
{
struct mpu3050_sensor *sensor = i2c_get_clientdata(client);
@@ -471,7 +471,7 @@ static struct i2c_driver mpu3050_i2c_driver = {
.of_match_table = mpu3050_of_match,
},
.probe = mpu3050_probe,
- .remove = __devexit_p(mpu3050_remove),
+ .remove = mpu3050_remove,
.id_table = mpu3050_ids,
};
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index e09b4fe8191..40ac9a5adf8 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -48,7 +48,7 @@ static irqreturn_t pcap_keys_handler(int irq, void *_pcap_keys)
return IRQ_HANDLED;
}
-static int __devinit pcap_keys_probe(struct platform_device *pdev)
+static int pcap_keys_probe(struct platform_device *pdev)
{
int err = -ENOMEM;
struct pcap_keys *pcap_keys;
@@ -104,7 +104,7 @@ fail:
return err;
}
-static int __devexit pcap_keys_remove(struct platform_device *pdev)
+static int pcap_keys_remove(struct platform_device *pdev)
{
struct pcap_keys *pcap_keys = platform_get_drvdata(pdev);
@@ -119,7 +119,7 @@ static int __devexit pcap_keys_remove(struct platform_device *pdev)
static struct platform_driver pcap_keys_device_driver = {
.probe = pcap_keys_probe,
- .remove = __devexit_p(pcap_keys_remove),
+ .remove = pcap_keys_remove,
.driver = {
.name = "pcap-keys",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 53891de80b0..73b13ebabe5 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -53,7 +53,7 @@ pcf50633_input_irq(int irq, void *data)
input_sync(input->input_dev);
}
-static int __devinit pcf50633_input_probe(struct platform_device *pdev)
+static int pcf50633_input_probe(struct platform_device *pdev)
{
struct pcf50633_input *input;
struct input_dev *input_dev;
@@ -93,7 +93,7 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pcf50633_input_remove(struct platform_device *pdev)
+static int pcf50633_input_remove(struct platform_device *pdev)
{
struct pcf50633_input *input = platform_get_drvdata(pdev);
@@ -111,7 +111,7 @@ static struct platform_driver pcf50633_input_driver = {
.name = "pcf50633-input",
},
.probe = pcf50633_input_probe,
- .remove = __devexit_p(pcf50633_input_remove),
+ .remove = pcf50633_input_remove,
};
module_platform_driver(pcf50633_input_driver);
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 544c6635abe..e37392976fd 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -82,7 +82,7 @@ static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int i, ret;
struct input_dev *idev;
@@ -156,7 +156,7 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
return ret;
}
-static int __devexit pcf8574_kp_remove(struct i2c_client *client)
+static int pcf8574_kp_remove(struct i2c_client *client)
{
struct kp_data *lp = i2c_get_clientdata(client);
@@ -212,7 +212,7 @@ static struct i2c_driver pcf8574_kp_driver = {
#endif
},
.probe = pcf8574_kp_probe,
- .remove = __devexit_p(pcf8574_kp_remove),
+ .remove = pcf8574_kp_remove,
.id_table = pcf8574_kp_id,
};
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index b2484aa07f3..199db78acc4 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -63,7 +63,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
return 0;
}
-static int __devinit pcspkr_probe(struct platform_device *dev)
+static int pcspkr_probe(struct platform_device *dev)
{
struct input_dev *pcspkr_dev;
int err;
@@ -95,7 +95,7 @@ static int __devinit pcspkr_probe(struct platform_device *dev)
return 0;
}
-static int __devexit pcspkr_remove(struct platform_device *dev)
+static int pcspkr_remove(struct platform_device *dev)
{
struct input_dev *pcspkr_dev = platform_get_drvdata(dev);
@@ -131,7 +131,7 @@ static struct platform_driver pcspkr_platform_driver = {
.pm = &pcspkr_pm_ops,
},
.probe = pcspkr_probe,
- .remove = __devexit_p(pcspkr_remove),
+ .remove = pcspkr_remove,
.shutdown = pcspkr_shutdown,
};
module_platform_driver(pcspkr_platform_driver);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index dfbfb463ea5..a9da65e41c5 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -178,7 +178,7 @@ static int pm8xxx_vib_play_effect(struct input_dev *dev, void *data,
return 0;
}
-static int __devinit pm8xxx_vib_probe(struct platform_device *pdev)
+static int pm8xxx_vib_probe(struct platform_device *pdev)
{
struct pm8xxx_vib *vib;
@@ -242,7 +242,7 @@ err_free_mem:
return error;
}
-static int __devexit pm8xxx_vib_remove(struct platform_device *pdev)
+static int pm8xxx_vib_remove(struct platform_device *pdev)
{
struct pm8xxx_vib *vib = platform_get_drvdata(pdev);
@@ -270,7 +270,7 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
- .remove = __devexit_p(pm8xxx_vib_remove),
+ .remove = pm8xxx_vib_remove,
.driver = {
.name = "pm8xxx-vib",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 0f83d0f1d01..4b811be7397 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -81,7 +81,7 @@ static int pmic8xxx_pwrkey_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
-static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
int key_release_irq = platform_get_irq(pdev, 0);
@@ -187,7 +187,7 @@ free_pwrkey:
return err;
}
-static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
+static int pmic8xxx_pwrkey_remove(struct platform_device *pdev)
{
struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
int key_release_irq = platform_get_irq(pdev, 0);
@@ -206,7 +206,7 @@ static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
static struct platform_driver pmic8xxx_pwrkey_driver = {
.probe = pmic8xxx_pwrkey_probe,
- .remove = __devexit_p(pmic8xxx_pwrkey_remove),
+ .remove = pmic8xxx_pwrkey_remove,
.driver = {
.name = PM8XXX_PWRKEY_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index fc84c8a5114..0808868461d 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -65,7 +65,7 @@ static int pwm_beeper_event(struct input_dev *input,
return 0;
}
-static int __devinit pwm_beeper_probe(struct platform_device *pdev)
+static int pwm_beeper_probe(struct platform_device *pdev)
{
unsigned long pwm_id = (unsigned long)pdev->dev.platform_data;
struct pwm_beeper *beeper;
@@ -75,7 +75,11 @@ static int __devinit pwm_beeper_probe(struct platform_device *pdev)
if (!beeper)
return -ENOMEM;
- beeper->pwm = pwm_request(pwm_id, "pwm beeper");
+ beeper->pwm = pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(beeper->pwm)) {
+ dev_dbg(&pdev->dev, "unable to request PWM, trying legacy API\n");
+ beeper->pwm = pwm_request(pwm_id, "pwm beeper");
+ }
if (IS_ERR(beeper->pwm)) {
error = PTR_ERR(beeper->pwm);
@@ -125,7 +129,7 @@ err_free:
return error;
}
-static int __devexit pwm_beeper_remove(struct platform_device *pdev)
+static int pwm_beeper_remove(struct platform_device *pdev)
{
struct pwm_beeper *beeper = platform_get_drvdata(pdev);
@@ -171,13 +175,21 @@ static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
#define PWM_BEEPER_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id pwm_beeper_match[] = {
+ { .compatible = "pwm-beeper", },
+ { },
+};
+#endif
+
static struct platform_driver pwm_beeper_driver = {
.probe = pwm_beeper_probe,
- .remove = __devexit_p(pwm_beeper_remove),
+ .remove = pwm_beeper_remove,
.driver = {
.name = "pwm-beeper",
.owner = THIS_MODULE,
.pm = PWM_BEEPER_PM_OPS,
+ .of_match_table = of_match_ptr(pwm_beeper_match),
},
};
module_platform_driver(pwm_beeper_driver);
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index aeb02bcf723..fb4f8ac3343 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -51,7 +51,7 @@ static void rb532_button_poll(struct input_polled_dev *poll_dev)
input_sync(poll_dev->input);
}
-static int __devinit rb532_button_probe(struct platform_device *pdev)
+static int rb532_button_probe(struct platform_device *pdev)
{
struct input_polled_dev *poll_dev;
int error;
@@ -81,7 +81,7 @@ static int __devinit rb532_button_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit rb532_button_remove(struct platform_device *pdev)
+static int rb532_button_remove(struct platform_device *pdev)
{
struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev);
@@ -94,7 +94,7 @@ static int __devexit rb532_button_remove(struct platform_device *pdev)
static struct platform_driver rb532_button_driver = {
.probe = rb532_button_probe,
- .remove = __devexit_p(rb532_button_remove),
+ .remove = rb532_button_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/retu-pwrbutton.c b/drivers/input/misc/retu-pwrbutton.c
new file mode 100644
index 00000000000..7ca09baa001
--- /dev/null
+++ b/drivers/input/misc/retu-pwrbutton.c
@@ -0,0 +1,99 @@
+/*
+ * Retu power button driver.
+ *
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Original code written by Ari Saastamoinen, Juha Yrjölä and Felipe Balbi.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/retu.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#define RETU_STATUS_PWRONX (1 << 5)
+
+static irqreturn_t retu_pwrbutton_irq(int irq, void *_pwr)
+{
+ struct input_dev *idev = _pwr;
+ struct retu_dev *rdev = input_get_drvdata(idev);
+ bool state;
+
+ state = !(retu_read(rdev, RETU_REG_STATUS) & RETU_STATUS_PWRONX);
+ input_report_key(idev, KEY_POWER, state);
+ input_sync(idev);
+
+ return IRQ_HANDLED;
+}
+
+static int retu_pwrbutton_probe(struct platform_device *pdev)
+{
+ struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *idev;
+ int irq;
+ int error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = "retu-pwrbutton";
+ idev->dev.parent = &pdev->dev;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+ input_set_drvdata(idev, rdev);
+
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, retu_pwrbutton_irq, 0,
+ "retu-pwrbutton", idev);
+ if (error)
+ return error;
+
+ error = input_register_device(idev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int retu_pwrbutton_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver retu_pwrbutton_driver = {
+ .probe = retu_pwrbutton_probe,
+ .remove = retu_pwrbutton_remove,
+ .driver = {
+ .name = "retu-pwrbutton",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(retu_pwrbutton_driver);
+
+MODULE_ALIAS("platform:retu-pwrbutton");
+MODULE_DESCRIPTION("Retu Power Button");
+MODULE_AUTHOR("Ari Saastamoinen");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 99a49e4968d..aff47b2c38f 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -149,8 +149,7 @@ static struct of_device_id rotary_encoder_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
-static struct rotary_encoder_platform_data * __devinit
-rotary_encoder_parse_dt(struct device *dev)
+static struct rotary_encoder_platform_data *rotary_encoder_parse_dt(struct device *dev)
{
const struct of_device_id *of_id =
of_match_device(rotary_encoder_of_match, dev);
@@ -192,7 +191,7 @@ rotary_encoder_parse_dt(struct device *dev)
}
#endif
-static int __devinit rotary_encoder_probe(struct platform_device *pdev)
+static int rotary_encoder_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rotary_encoder_platform_data *pdata = dev_get_platdata(dev);
@@ -302,7 +301,7 @@ exit_free_mem:
return err;
}
-static int __devexit rotary_encoder_remove(struct platform_device *pdev)
+static int rotary_encoder_remove(struct platform_device *pdev)
{
struct rotary_encoder *encoder = platform_get_drvdata(pdev);
const struct rotary_encoder_platform_data *pdata = encoder->pdata;
@@ -325,7 +324,7 @@ static int __devexit rotary_encoder_remove(struct platform_device *pdev)
static struct platform_driver rotary_encoder_driver = {
.probe = rotary_encoder_probe,
- .remove = __devexit_p(rotary_encoder_remove),
+ .remove = rotary_encoder_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 5d9fd557119..ad6415ceaf5 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -91,7 +91,7 @@ static void handle_buttons(struct input_polled_dev *dev)
}
}
-static int __devinit sgi_buttons_probe(struct platform_device *pdev)
+static int sgi_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -143,7 +143,7 @@ static int __devinit sgi_buttons_probe(struct platform_device *pdev)
return error;
}
-static int __devexit sgi_buttons_remove(struct platform_device *pdev)
+static int sgi_buttons_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct buttons_dev *bdev = dev_get_drvdata(dev);
@@ -158,7 +158,7 @@ static int __devexit sgi_buttons_remove(struct platform_device *pdev)
static struct platform_driver sgi_buttons_driver = {
.probe = sgi_buttons_probe,
- .remove = __devexit_p(sgi_buttons_remove),
+ .remove = sgi_buttons_remove,
.driver = {
.name = "sgibtns",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 0122f535157..a53586a7fbd 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -139,7 +139,7 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned
return 0;
}
-static int __devinit sparcspkr_probe(struct device *dev)
+static int sparcspkr_probe(struct device *dev)
{
struct sparcspkr_state *state = dev_get_drvdata(dev);
struct input_dev *input_dev;
@@ -182,7 +182,7 @@ static void sparcspkr_shutdown(struct platform_device *dev)
state->event(input_dev, EV_SND, SND_BELL, 0);
}
-static int __devinit bbc_beep_probe(struct platform_device *op)
+static int bbc_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct bbc_beep_info *info;
@@ -229,7 +229,7 @@ out_err:
return err;
}
-static int __devexit bbc_remove(struct platform_device *op)
+static int bbc_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct input_dev *input_dev = state->input_dev;
@@ -263,11 +263,11 @@ static struct platform_driver bbc_beep_driver = {
.of_match_table = bbc_beep_match,
},
.probe = bbc_beep_probe,
- .remove = __devexit_p(bbc_remove),
+ .remove = bbc_remove,
.shutdown = sparcspkr_shutdown,
};
-static int __devinit grover_beep_probe(struct platform_device *op)
+static int grover_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct grover_beep_info *info;
@@ -310,7 +310,7 @@ out_err:
return err;
}
-static int __devexit grover_remove(struct platform_device *op)
+static int grover_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct grover_beep_info *info = &state->u.grover;
@@ -345,7 +345,7 @@ static struct platform_driver grover_beep_driver = {
.of_match_table = grover_beep_match,
},
.probe = grover_beep_probe,
- .remove = __devexit_p(grover_remove),
+ .remove = grover_remove,
.shutdown = sparcspkr_shutdown,
};
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index b3dd96d6448..27c2bc8aa89 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -39,8 +39,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
int err;
u8 value;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
- STS_HW_CONDITIONS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &value, STS_HW_CONDITIONS);
if (!err) {
pm_wakeup_event(pwr->dev.parent, 0);
input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 2194a3c7236..78eb6b30580 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -207,7 +207,7 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
return false;
}
-static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
+static int twl4030_vibra_probe(struct platform_device *pdev)
{
struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
@@ -269,7 +269,7 @@ err_kzalloc:
return ret;
}
-static int __devexit twl4030_vibra_remove(struct platform_device *pdev)
+static int twl4030_vibra_remove(struct platform_device *pdev)
{
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -283,7 +283,7 @@ static int __devexit twl4030_vibra_remove(struct platform_device *pdev)
static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
- .remove = __devexit_p(twl4030_vibra_remove),
+ .remove = twl4030_vibra_remove,
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index c8a288ae1d5..71a28ee699f 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -255,7 +255,7 @@ static int twl6040_vibra_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
-static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
+static int twl6040_vibra_probe(struct platform_device *pdev)
{
struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
struct device *twl6040_core_dev = pdev->dev.parent;
@@ -418,7 +418,7 @@ err_kzalloc:
return ret;
}
-static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
+static int twl6040_vibra_remove(struct platform_device *pdev)
{
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -433,7 +433,7 @@ static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
- .remove = __devexit_p(twl6040_vibra_remove),
+ .remove = twl6040_vibra_remove,
.driver = {
.name = "twl6040-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index e2bdfd4bea7..56536f4b957 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -170,7 +170,7 @@ static u16 bios_pop_queue(void)
return regs.eax;
}
-static void __devinit bios_attach(void)
+static void bios_attach(void)
{
struct regs regs;
@@ -190,7 +190,7 @@ static void bios_detach(void)
call_bios(&regs);
}
-static u8 __devinit bios_get_cmos_address(void)
+static u8 bios_get_cmos_address(void)
{
struct regs regs;
@@ -202,7 +202,7 @@ static u8 __devinit bios_get_cmos_address(void)
return regs.ecx;
}
-static u16 __devinit bios_get_default_setting(u8 subsys)
+static u16 bios_get_default_setting(u8 subsys)
{
struct regs regs;
@@ -1052,7 +1052,7 @@ static struct led_classdev wistron_wifi_led = {
.brightness_set = wistron_wifi_led_set,
};
-static void __devinit wistron_led_init(struct device *parent)
+static void wistron_led_init(struct device *parent)
{
if (leds_present & FE_WIFI_LED) {
u16 wifi = bios_get_default_setting(WIFI);
@@ -1077,7 +1077,7 @@ static void __devinit wistron_led_init(struct device *parent)
}
}
-static void __devexit wistron_led_remove(void)
+static void wistron_led_remove(void)
{
if (leds_present & FE_MAIL_LED)
led_classdev_unregister(&wistron_mail_led);
@@ -1168,7 +1168,7 @@ static void wistron_poll(struct input_polled_dev *dev)
dev->poll_interval = POLL_INTERVAL_DEFAULT;
}
-static int __devinit wistron_setup_keymap(struct input_dev *dev,
+static int wistron_setup_keymap(struct input_dev *dev,
struct key_entry *entry)
{
switch (entry->type) {
@@ -1199,7 +1199,7 @@ static int __devinit wistron_setup_keymap(struct input_dev *dev,
return 0;
}
-static int __devinit setup_input_dev(void)
+static int setup_input_dev(void)
{
struct input_dev *input_dev;
int error;
@@ -1237,7 +1237,7 @@ static int __devinit setup_input_dev(void)
/* Driver core */
-static int __devinit wistron_probe(struct platform_device *dev)
+static int wistron_probe(struct platform_device *dev)
{
int err;
@@ -1277,7 +1277,7 @@ static int __devinit wistron_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wistron_remove(struct platform_device *dev)
+static int wistron_remove(struct platform_device *dev)
{
wistron_led_remove();
input_unregister_polled_device(wistron_idev);
@@ -1334,7 +1334,7 @@ static struct platform_driver wistron_driver = {
#endif
},
.probe = wistron_probe,
- .remove = __devexit_p(wistron_remove),
+ .remove = wistron_remove,
};
static int __init wb_module_init(void)
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 6790a812a1d..558767d8ebf 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -69,14 +69,15 @@ static irqreturn_t wm831x_on_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit wm831x_on_probe(struct platform_device *pdev)
+static int wm831x_on_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_on *wm831x_on;
int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
int ret;
- wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
+ wm831x_on = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_on),
+ GFP_KERNEL);
if (!wm831x_on) {
dev_err(&pdev->dev, "Can't allocate data\n");
return -ENOMEM;
@@ -120,11 +121,10 @@ err_irq:
err_input_dev:
input_free_device(wm831x_on->dev);
err:
- kfree(wm831x_on);
return ret;
}
-static int __devexit wm831x_on_remove(struct platform_device *pdev)
+static int wm831x_on_remove(struct platform_device *pdev)
{
struct wm831x_on *wm831x_on = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -132,14 +132,13 @@ static int __devexit wm831x_on_remove(struct platform_device *pdev)
free_irq(irq, wm831x_on);
cancel_delayed_work_sync(&wm831x_on->work);
input_unregister_device(wm831x_on->dev);
- kfree(wm831x_on);
return 0;
}
static struct platform_driver wm831x_on_driver = {
.probe = wm831x_on_probe,
- .remove = __devexit_p(wm831x_on_remove),
+ .remove = wm831x_on_remove,
.driver = {
.name = "wm831x-on",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6f7d9901303..e21c1816a8f 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -104,7 +104,7 @@ static irqreturn_t input_handler(int rq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit xenkbd_probe(struct xenbus_device *dev,
+static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, i, abs;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index cf5af1f495e..e229fa3cad9 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -767,9 +767,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
psmouse->packet[5]) & 0x80) ||
(!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
psmouse_dbg(psmouse,
- "refusing packet %x %x %x %x (suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5], psmouse->packet[6]);
+ "refusing packet %4ph (suspected interleaved ps/2)\n",
+ psmouse->packet + 3);
return PSMOUSE_BAD_DATA;
}
@@ -831,9 +830,8 @@ static void alps_flush_packet(unsigned long data)
psmouse->packet[4] |
psmouse->packet[5]) & 0x80) {
psmouse_dbg(psmouse,
- "refusing packet %x %x %x (suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5]);
+ "refusing packet %3ph (suspected interleaved ps/2)\n",
+ psmouse->packet + 3);
} else {
alps_process_packet(psmouse);
}
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 39fe9b737ca..532eaca4cc5 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -46,7 +46,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
input_sync(input);
}
-static int __devinit gpio_mouse_probe(struct platform_device *pdev)
+static int gpio_mouse_probe(struct platform_device *pdev)
{
struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data;
struct input_polled_dev *input_poll;
@@ -150,7 +150,7 @@ static int __devinit gpio_mouse_probe(struct platform_device *pdev)
return error;
}
-static int __devexit gpio_mouse_remove(struct platform_device *pdev)
+static int gpio_mouse_remove(struct platform_device *pdev)
{
struct input_polled_dev *input = platform_get_drvdata(pdev);
struct gpio_mouse_platform_data *pdata = input->private;
@@ -172,7 +172,7 @@ static int __devexit gpio_mouse_remove(struct platform_device *pdev)
static struct platform_driver gpio_mouse_device_driver = {
.probe = gpio_mouse_probe,
- .remove = __devexit_p(gpio_mouse_remove),
+ .remove = gpio_mouse_remove,
.driver = {
.name = "gpio_mouse",
.owner = THIS_MODULE,
diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c
index 5f278176eb9..0a60717b91c 100644
--- a/drivers/input/mouse/maplemouse.c
+++ b/drivers/input/mouse/maplemouse.c
@@ -64,7 +64,7 @@ static void dc_mouse_close(struct input_dev *dev)
}
/* allow the mouse to be used */
-static int __devinit probe_maple_mouse(struct device *dev)
+static int probe_maple_mouse(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct maple_driver *mdrv = to_maple_driver(dev->driver);
@@ -114,7 +114,7 @@ fail:
return error;
}
-static int __devexit remove_maple_mouse(struct device *dev)
+static int remove_maple_mouse(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct dc_mouse *mse = maple_get_drvdata(mdev);
@@ -132,7 +132,7 @@ static struct maple_driver dc_mouse_driver = {
.drv = {
.name = "Dreamcast_mouse",
.probe = probe_maple_mouse,
- .remove = __devexit_p(remove_maple_mouse),
+ .remove = remove_maple_mouse,
},
};
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index c29ae7654d5..8e1b98ea564 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -206,7 +206,7 @@ static void navpoint_close(struct input_dev *input)
navpoint_down(navpoint);
}
-static int __devinit navpoint_probe(struct platform_device *pdev)
+static int navpoint_probe(struct platform_device *pdev)
{
const struct navpoint_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -299,7 +299,7 @@ err_free_gpio:
return error;
}
-static int __devexit navpoint_remove(struct platform_device *pdev)
+static int navpoint_remove(struct platform_device *pdev)
{
const struct navpoint_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -353,7 +353,7 @@ static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
static struct platform_driver navpoint_driver = {
.probe = navpoint_probe,
- .remove = __devexit_p(navpoint_remove),
+ .remove = navpoint_remove,
.driver = {
.name = "navpoint",
.owner = THIS_MODULE,
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 4fe055f2c53..0ecb9e7945e 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -143,7 +143,7 @@ static void pxa930_trkball_close(struct input_dev *dev)
pxa930_trkball_disable(trkball);
}
-static int __devinit pxa930_trkball_probe(struct platform_device *pdev)
+static int pxa930_trkball_probe(struct platform_device *pdev)
{
struct pxa930_trkball *trkball;
struct input_dev *input;
@@ -230,7 +230,7 @@ failed:
return error;
}
-static int __devexit pxa930_trkball_remove(struct platform_device *pdev)
+static int pxa930_trkball_remove(struct platform_device *pdev)
{
struct pxa930_trkball *trkball = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -248,7 +248,7 @@ static struct platform_driver pxa930_trkball_driver = {
.name = "pxa930-trkball",
},
.probe = pxa930_trkball_probe,
- .remove = __devexit_p(pxa930_trkball_remove),
+ .remove = pxa930_trkball_remove,
};
module_platform_driver(pxa930_trkball_driver);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 063a174d3a8..ad822608f6e 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -535,7 +535,7 @@ static struct synaptics_i2c *synaptics_i2c_touch_create(struct i2c_client *clien
return touch;
}
-static int __devinit synaptics_i2c_probe(struct i2c_client *client,
+static int synaptics_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
int ret;
@@ -601,7 +601,7 @@ err_mem_free:
return ret;
}
-static int __devexit synaptics_i2c_remove(struct i2c_client *client)
+static int synaptics_i2c_remove(struct i2c_client *client)
{
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -662,7 +662,7 @@ static struct i2c_driver synaptics_i2c_driver = {
},
.probe = synaptics_i2c_probe,
- .remove = __devexit_p(synaptics_i2c_remove),
+ .remove = synaptics_i2c_remove,
.id_table = synaptics_i2c_id_table,
};
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 55f2c2293ec..4a4e182c33e 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -234,4 +234,13 @@ config SERIO_PS2MULT
To compile this driver as a module, choose M here: the
module will be called ps2mult.
+config SERIO_ARC_PS2
+ tristate "ARC PS/2 support"
+ help
+ Say Y here if you have an ARC FPGA platform with a PS/2
+ controller in it.
+
+ To compile this driver as a module, choose M here; the module
+ will be called arc_ps2.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index dbbe37616c9..4b0c8f84f1c 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_SERIO_RAW) += serio_raw.o
obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
+obj-$(CONFIG_SERIO_ARC_PS2) += arc_ps2.o
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index cc11f4efe11..479ce5fe895 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -81,7 +81,7 @@ static void altera_ps2_close(struct serio *io)
/*
* Add one device to this driver.
*/
-static int __devinit altera_ps2_probe(struct platform_device *pdev)
+static int altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
struct serio *serio;
@@ -159,7 +159,7 @@ static int __devinit altera_ps2_probe(struct platform_device *pdev)
/*
* Remove one device from this driver.
*/
-static int __devexit altera_ps2_remove(struct platform_device *pdev)
+static int altera_ps2_remove(struct platform_device *pdev)
{
struct ps2if *ps2if = platform_get_drvdata(pdev);
@@ -187,7 +187,7 @@ MODULE_DEVICE_TABLE(of, altera_ps2_match);
*/
static struct platform_driver altera_ps2_driver = {
.probe = altera_ps2_probe,
- .remove = __devexit_p(altera_ps2_remove),
+ .remove = altera_ps2_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 2e77246c2e5..4e2fd44865e 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
clk_disable_unprepare(kmi->clk);
}
-static int __devinit amba_kmi_probe(struct amba_device *dev,
+static int amba_kmi_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct amba_kmi_port *kmi;
@@ -163,7 +163,7 @@ static int __devinit amba_kmi_probe(struct amba_device *dev,
return ret;
}
-static int __devexit amba_kmi_remove(struct amba_device *dev)
+static int amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -204,7 +204,7 @@ static struct amba_driver ambakmi_driver = {
},
.id_table = amba_kmi_idtable,
.probe = amba_kmi_probe,
- .remove = __devexit_p(amba_kmi_remove),
+ .remove = amba_kmi_remove,
.resume = amba_kmi_resume,
};
diff --git a/drivers/input/serio/arc_ps2.c b/drivers/input/serio/arc_ps2.c
new file mode 100644
index 00000000000..b571eb3e4ef
--- /dev/null
+++ b/drivers/input/serio/arc_ps2.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver is originally developed by Pavel Sokolov <psokolov@synopsys.com>
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#define ARC_PS2_PORTS 2
+
+#define ARC_ARC_PS2_ID 0x0001f609
+
+#define STAT_TIMEOUT 128
+
+#define PS2_STAT_RX_FRM_ERR (1)
+#define PS2_STAT_RX_BUF_OVER (1 << 1)
+#define PS2_STAT_RX_INT_EN (1 << 2)
+#define PS2_STAT_RX_VAL (1 << 3)
+#define PS2_STAT_TX_ISNOT_FUL (1 << 4)
+#define PS2_STAT_TX_INT_EN (1 << 5)
+
+struct arc_ps2_port {
+ void __iomem *data_addr;
+ void __iomem *status_addr;
+ struct serio *io;
+};
+
+struct arc_ps2_data {
+ struct arc_ps2_port port[ARC_PS2_PORTS];
+ void __iomem *addr;
+ unsigned int frame_error;
+ unsigned int buf_overflow;
+ unsigned int total_int;
+};
+
+static void arc_ps2_check_rx(struct arc_ps2_data *arc_ps2,
+ struct arc_ps2_port *port)
+{
+ unsigned int timeout = 1000;
+ unsigned int flag, status;
+ unsigned char data;
+
+ do {
+ status = ioread32(port->status_addr);
+ if (!(status & PS2_STAT_RX_VAL))
+ return;
+
+ data = ioread32(port->data_addr) & 0xff;
+
+ flag = 0;
+ arc_ps2->total_int++;
+ if (status & PS2_STAT_RX_FRM_ERR) {
+ arc_ps2->frame_error++;
+ flag |= SERIO_PARITY;
+ } else if (status & PS2_STAT_RX_BUF_OVER) {
+ arc_ps2->buf_overflow++;
+ flag |= SERIO_FRAME;
+ }
+
+ serio_interrupt(port->io, data, flag);
+ } while (--timeout);
+
+ dev_err(&port->io->dev, "PS/2 hardware stuck\n");
+}
+
+static irqreturn_t arc_ps2_interrupt(int irq, void *dev)
+{
+ struct arc_ps2_data *arc_ps2 = dev;
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++)
+ arc_ps2_check_rx(arc_ps2, &arc_ps2->port[i]);
+
+ return IRQ_HANDLED;
+}
+
+static int arc_ps2_write(struct serio *io, unsigned char val)
+{
+ unsigned status;
+ struct arc_ps2_port *port = io->port_data;
+ int timeout = STAT_TIMEOUT;
+
+ do {
+ status = ioread32(port->status_addr);
+ cpu_relax();
+
+ if (status & PS2_STAT_TX_ISNOT_FUL) {
+ iowrite32(val & 0xff, port->data_addr);
+ return 0;
+ }
+
+ } while (--timeout);
+
+ dev_err(&io->dev, "write timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int arc_ps2_open(struct serio *io)
+{
+ struct arc_ps2_port *port = io->port_data;
+
+ iowrite32(PS2_STAT_RX_INT_EN, port->status_addr);
+
+ return 0;
+}
+
+static void arc_ps2_close(struct serio *io)
+{
+ struct arc_ps2_port *port = io->port_data;
+
+ iowrite32(ioread32(port->status_addr) & ~PS2_STAT_RX_INT_EN,
+ port->status_addr);
+}
+
+static void __iomem *arc_ps2_calc_addr(struct arc_ps2_data *arc_ps2,
+ int index, bool status)
+{
+ void __iomem *addr;
+
+ addr = arc_ps2->addr + 4 + 4 * index;
+ if (status)
+ addr += ARC_PS2_PORTS * 4;
+
+ return addr;
+}
+
+static void arc_ps2_inhibit_ports(struct arc_ps2_data *arc_ps2)
+{
+ void __iomem *addr;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++) {
+ addr = arc_ps2_calc_addr(arc_ps2, i, true);
+ val = ioread32(addr);
+ val &= ~(PS2_STAT_RX_INT_EN | PS2_STAT_TX_INT_EN);
+ iowrite32(val, addr);
+ }
+}
+
+static int arc_ps2_create_port(struct platform_device *pdev,
+ struct arc_ps2_data *arc_ps2,
+ int index)
+{
+ struct arc_ps2_port *port = &arc_ps2->port[index];
+ struct serio *io;
+
+ io = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!io)
+ return -ENOMEM;
+
+ io->id.type = SERIO_8042;
+ io->write = arc_ps2_write;
+ io->open = arc_ps2_open;
+ io->close = arc_ps2_close;
+ snprintf(io->name, sizeof(io->name), "ARC PS/2 port%d", index);
+ snprintf(io->phys, sizeof(io->phys), "arc/serio%d", index);
+ io->port_data = port;
+
+ port->io = io;
+
+ port->data_addr = arc_ps2_calc_addr(arc_ps2, index, false);
+ port->status_addr = arc_ps2_calc_addr(arc_ps2, index, true);
+
+ dev_dbg(&pdev->dev, "port%d is allocated (data = 0x%p, status = 0x%p)\n",
+ index, port->data_addr, port->status_addr);
+
+ serio_register_port(port->io);
+ return 0;
+}
+
+static int arc_ps2_probe(struct platform_device *pdev)
+{
+ struct arc_ps2_data *arc_ps2;
+ struct resource *res;
+ int irq;
+ int error, id, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no IO memory defined\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq_byname(pdev, "arc_ps2_irq");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ defined\n");
+ return -EINVAL;
+ }
+
+ arc_ps2 = devm_kzalloc(&pdev->dev, sizeof(struct arc_ps2_data),
+ GFP_KERNEL);
+ if (!arc_ps2) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ arc_ps2->addr = devm_request_and_ioremap(&pdev->dev, res);
+ if (!arc_ps2->addr)
+ return -EBUSY;
+
+ dev_info(&pdev->dev, "irq = %d, address = 0x%p, ports = %i\n",
+ irq, arc_ps2->addr, ARC_PS2_PORTS);
+
+ id = ioread32(arc_ps2->addr);
+ if (id != ARC_ARC_PS2_ID) {
+ dev_err(&pdev->dev, "device id does not match\n");
+ return -ENXIO;
+ }
+
+ arc_ps2_inhibit_ports(arc_ps2);
+
+ error = devm_request_irq(&pdev->dev, irq, arc_ps2_interrupt,
+ 0, "arc_ps2", arc_ps2);
+ if (error) {
+ dev_err(&pdev->dev, "Could not allocate IRQ\n");
+ return error;
+ }
+
+ for (i = 0; i < ARC_PS2_PORTS; i++) {
+ error = arc_ps2_create_port(pdev, arc_ps2, i);
+ if (error) {
+ while (--i >= 0)
+ serio_unregister_port(arc_ps2->port[i].io);
+ return error;
+ }
+ }
+
+ platform_set_drvdata(pdev, arc_ps2);
+
+ return 0;
+}
+
+static int arc_ps2_remove(struct platform_device *pdev)
+{
+ struct arc_ps2_data *arc_ps2 = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++)
+ serio_unregister_port(arc_ps2->port[i].io);
+
+ dev_dbg(&pdev->dev, "interrupt count = %i\n", arc_ps2->total_int);
+ dev_dbg(&pdev->dev, "frame error count = %i\n", arc_ps2->frame_error);
+ dev_dbg(&pdev->dev, "buffer overflow count = %i\n",
+ arc_ps2->buf_overflow);
+
+ return 0;
+}
+
+static struct platform_driver arc_ps2_driver = {
+ .driver = {
+ .name = "arc_ps2",
+ .owner = THIS_MODULE,
+ },
+ .probe = arc_ps2_probe,
+ .remove = arc_ps2_remove,
+};
+
+module_platform_driver(arc_ps2_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Sokolov <psokolov@synopsys.com>");
+MODULE_DESCRIPTION("ARC PS/2 Driver");
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 85281656724..cfe549d4eaa 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -175,7 +175,7 @@ static int __init ct82c710_detect(void)
return 0;
}
-static int __devinit ct82c710_probe(struct platform_device *dev)
+static int ct82c710_probe(struct platform_device *dev)
{
ct82c710_port = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ct82c710_port)
@@ -199,7 +199,7 @@ static int __devinit ct82c710_probe(struct platform_device *dev)
return 0;
}
-static int __devexit ct82c710_remove(struct platform_device *dev)
+static int ct82c710_remove(struct platform_device *dev)
{
serio_unregister_port(ct82c710_port);
@@ -212,7 +212,7 @@ static struct platform_driver ct82c710_driver = {
.owner = THIS_MODULE,
},
.probe = ct82c710_probe,
- .remove = __devexit_p(ct82c710_remove),
+ .remove = ct82c710_remove,
};
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 4225f5d6b15..8d9ba0c3827 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -327,7 +327,7 @@ static void gscps2_close(struct serio *port)
* @return: success/error report
*/
-static int __devinit gscps2_probe(struct parisc_device *dev)
+static int gscps2_probe(struct parisc_device *dev)
{
struct gscps2port *ps2port;
struct serio *serio;
@@ -414,7 +414,7 @@ fail_nomem:
* @return: success/error report
*/
-static int __devexit gscps2_remove(struct parisc_device *dev)
+static int gscps2_remove(struct parisc_device *dev)
{
struct gscps2port *ps2port = dev_get_drvdata(&dev->dev);
@@ -444,7 +444,7 @@ static struct parisc_driver parisc_ps2_driver = {
.name = "gsc_ps2",
.id_table = gscps2_device_tbl,
.probe = gscps2_probe,
- .remove = __devexit_p(gscps2_remove),
+ .remove = gscps2_remove,
};
static int __init gscps2_init(void)
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index bfd3865d886..65605e4ef3c 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -686,13 +686,12 @@ static int hilse_donode(hil_mlc *mlc)
write_lock_irqsave(&mlc->lock, flags);
pack = node->object.packet;
out:
- if (mlc->istarted)
- goto out2;
- /* Prepare to receive input */
- if ((node + 1)->act & HILSE_IN)
- hilse_setup_input(mlc, node + 1);
+ if (!mlc->istarted) {
+ /* Prepare to receive input */
+ if ((node + 1)->act & HILSE_IN)
+ hilse_setup_input(mlc, node + 1);
+ }
- out2:
write_unlock_irqrestore(&mlc->lock, flags);
if (down_trylock(&mlc->osem)) {
@@ -1010,8 +1009,6 @@ static int __init hil_mlc_init(void)
static void __exit hil_mlc_exit(void)
{
del_timer_sync(&hil_mlcs_kicker);
-
- tasklet_disable(&hil_mlcs_tasklet);
tasklet_kill(&hil_mlcs_tasklet);
}
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 5d48bb66aa7..a5eed2ade53 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -76,7 +76,7 @@ static inline int i8042_platform_init(void)
if (check_legacy_ioport(I8042_DATA_REG))
return -ENODEV;
#endif
-#if !defined(__sh__) && !defined(__alpha__) && !defined(__mips__)
+#if !defined(__sh__) && !defined(__alpha__)
if (!request_region(I8042_DATA_REG, 16, "i8042"))
return -EBUSY;
#endif
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 395a9af3adc..d6aa4c67dbb 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -49,7 +49,7 @@ static inline void i8042_write_command(int val)
#define OBP_PS2MS_NAME1 "kdmouse"
#define OBP_PS2MS_NAME2 "mouse"
-static int __devinit sparc_i8042_probe(struct platform_device *op)
+static int sparc_i8042_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -80,7 +80,7 @@ static int __devinit sparc_i8042_probe(struct platform_device *op)
return 0;
}
-static int __devexit sparc_i8042_remove(struct platform_device *op)
+static int sparc_i8042_remove(struct platform_device *op)
{
of_iounmap(kbd_res, kbd_iobase, 8);
@@ -102,7 +102,7 @@ static struct platform_driver sparc_i8042_driver = {
.of_match_table = sparc_i8042_match,
},
.probe = sparc_i8042_probe,
- .remove = __devexit_p(sparc_i8042_remove),
+ .remove = sparc_i8042_remove,
};
static int __init i8042_platform_init(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d6cc77a53c7..5f306f79da0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -921,6 +921,7 @@ static int __init i8042_platform_init(void)
int retval;
#ifdef CONFIG_X86
+ u8 a20_on = 0xdf;
/* Just return if pre-detection shows no i8042 controller exist */
if (!x86_platform.i8042_detect())
return -ENODEV;
@@ -960,6 +961,14 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
+
+ /*
+ * A20 was already enabled during early kernel init. But some buggy
+ * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
+ * resume from S3. So we do it here and hope that nothing breaks.
+ */
+ i8042_command(&a20_on, 0x10d1);
+ i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */
#endif /* CONFIG_X86 */
return retval;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 86564414b75..78e4de42efa 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1284,7 +1284,7 @@ static void __init i8042_register_ports(void)
}
}
-static void __devexit i8042_unregister_ports(void)
+static void i8042_unregister_ports(void)
{
int i;
@@ -1437,7 +1437,7 @@ static int __init i8042_probe(struct platform_device *dev)
return error;
}
-static int __devexit i8042_remove(struct platform_device *dev)
+static int i8042_remove(struct platform_device *dev)
{
i8042_unregister_ports();
i8042_free_irqs();
@@ -1455,7 +1455,7 @@ static struct platform_driver i8042_driver = {
.pm = &i8042_pm_ops,
#endif
},
- .remove = __devexit_p(i8042_remove),
+ .remove = i8042_remove,
.shutdown = i8042_shutdown,
};
diff --git a/drivers/input/serio/maceps2.c b/drivers/input/serio/maceps2.c
index 61da763b120..bc85e1cc66d 100644
--- a/drivers/input/serio/maceps2.c
+++ b/drivers/input/serio/maceps2.c
@@ -116,7 +116,7 @@ static void maceps2_close(struct serio *dev)
}
-static struct serio * __devinit maceps2_allocate_port(int idx)
+static struct serio *maceps2_allocate_port(int idx)
{
struct serio *serio;
@@ -135,7 +135,7 @@ static struct serio * __devinit maceps2_allocate_port(int idx)
return serio;
}
-static int __devinit maceps2_probe(struct platform_device *dev)
+static int maceps2_probe(struct platform_device *dev)
{
maceps2_port[0] = maceps2_allocate_port(0);
maceps2_port[1] = maceps2_allocate_port(1);
@@ -151,7 +151,7 @@ static int __devinit maceps2_probe(struct platform_device *dev)
return 0;
}
-static int __devexit maceps2_remove(struct platform_device *dev)
+static int maceps2_remove(struct platform_device *dev)
{
serio_unregister_port(maceps2_port[0]);
serio_unregister_port(maceps2_port[1]);
@@ -165,7 +165,7 @@ static struct platform_driver maceps2_driver = {
.owner = THIS_MODULE,
},
.probe = maceps2_probe,
- .remove = __devexit_p(maceps2_remove),
+ .remove = maceps2_remove,
};
static int __init maceps2_init(void)
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 0c42497aaaf..76f83836fd5 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -127,7 +127,7 @@ static void pcips2_close(struct serio *io)
free_irq(ps2if->dev->irq, ps2if);
}
-static int __devinit pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pcips2_data *ps2if;
struct serio *serio;
@@ -176,7 +176,7 @@ static int __devinit pcips2_probe(struct pci_dev *dev, const struct pci_device_i
return ret;
}
-static void __devexit pcips2_remove(struct pci_dev *dev)
+static void pcips2_remove(struct pci_dev *dev)
{
struct pcips2_data *ps2if = pci_get_drvdata(dev);
@@ -212,7 +212,7 @@ static struct pci_driver pcips2_driver = {
.name = "pcips2",
.id_table = pcips2_ids,
.probe = pcips2_probe,
- .remove = __devexit_p(pcips2_remove),
+ .remove = pcips2_remove,
};
module_pci_driver(pcips2_driver);
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index 0c0df7f7380..70fe542839f 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -122,7 +122,7 @@ static void q40kbd_close(struct serio *port)
q40kbd_flush(q40kbd);
}
-static int __devinit q40kbd_probe(struct platform_device *pdev)
+static int q40kbd_probe(struct platform_device *pdev)
{
struct q40kbd *q40kbd;
struct serio *port;
@@ -168,7 +168,7 @@ err_free_mem:
return error;
}
-static int __devexit q40kbd_remove(struct platform_device *pdev)
+static int q40kbd_remove(struct platform_device *pdev)
{
struct q40kbd *q40kbd = platform_get_drvdata(pdev);
@@ -190,7 +190,7 @@ static struct platform_driver q40kbd_driver = {
.name = "q40kbd",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(q40kbd_remove),
+ .remove = q40kbd_remove,
};
static int __init q40kbd_init(void)
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 2af5df6a8fb..567566ae0da 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -114,7 +114,7 @@ static void rpckbd_close(struct serio *port)
* Allocate and initialize serio structure for subsequent registration
* with serio core.
*/
-static int __devinit rpckbd_probe(struct platform_device *dev)
+static int rpckbd_probe(struct platform_device *dev)
{
struct rpckbd_data *rpckbd;
struct serio *serio;
@@ -153,7 +153,7 @@ static int __devinit rpckbd_probe(struct platform_device *dev)
return 0;
}
-static int __devexit rpckbd_remove(struct platform_device *dev)
+static int rpckbd_remove(struct platform_device *dev)
{
struct serio *serio = platform_get_drvdata(dev);
struct rpckbd_data *rpckbd = serio->port_data;
@@ -166,7 +166,7 @@ static int __devexit rpckbd_remove(struct platform_device *dev)
static struct platform_driver rpckbd_driver = {
.probe = rpckbd_probe,
- .remove = __devexit_p(rpckbd_remove),
+ .remove = rpckbd_remove,
.driver = {
.name = "kart",
.owner = THIS_MODULE,
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 38976670753..b3e688911fd 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -193,7 +193,7 @@ static void ps2_close(struct serio *io)
/*
* Clear the input buffer.
*/
-static void __devinit ps2_clear_input(struct ps2if *ps2if)
+static void ps2_clear_input(struct ps2if *ps2if)
{
int maxread = 100;
@@ -203,7 +203,7 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
}
}
-static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
+static unsigned int ps2_test_one(struct ps2if *ps2if,
unsigned int mask)
{
unsigned int val;
@@ -220,7 +220,7 @@ static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
* Test the keyboard interface. We basically check to make sure that
* we can drive each line to the keyboard independently of each other.
*/
-static int __devinit ps2_test(struct ps2if *ps2if)
+static int ps2_test(struct ps2if *ps2if)
{
unsigned int stat;
int ret = 0;
@@ -251,7 +251,7 @@ static int __devinit ps2_test(struct ps2if *ps2if)
/*
* Add one device to this driver.
*/
-static int __devinit ps2_probe(struct sa1111_dev *dev)
+static int ps2_probe(struct sa1111_dev *dev)
{
struct ps2if *ps2if;
struct serio *serio;
@@ -334,7 +334,7 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
/*
* Remove one device from this driver.
*/
-static int __devexit ps2_remove(struct sa1111_dev *dev)
+static int ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
@@ -357,7 +357,7 @@ static struct sa1111_driver ps2_driver = {
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
- .remove = __devexit_p(ps2_remove),
+ .remove = ps2_remove,
};
static int __init ps2_init(void)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index d0f7533dbf8..25fc5971f42 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -891,8 +891,6 @@ static int serio_bus_match(struct device *dev, struct device_driver *drv)
return serio_match_port(serio_drv->id_table, serio);
}
-#ifdef CONFIG_HOTPLUG
-
#define SERIO_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(env, fmt, val); \
@@ -920,15 +918,6 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#undef SERIO_ADD_UEVENT_VAR
-#else
-
-static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_HOTPLUG */
-
#ifdef CONFIG_PM
static int serio_suspend(struct device *dev)
{
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 1e983bec7d8..17be85948ff 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -233,7 +233,7 @@ static void sxps2_close(struct serio *pserio)
* It returns 0, if the driver is bound to the PS/2 device, or a negative
* value if there is an error.
*/
-static int __devinit xps2_of_probe(struct platform_device *ofdev)
+static int xps2_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
@@ -333,7 +333,7 @@ failed1:
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*/
-static int __devexit xps2_of_remove(struct platform_device *of_dev)
+static int xps2_of_remove(struct platform_device *of_dev)
{
struct xps2data *drvdata = platform_get_drvdata(of_dev);
struct resource r_mem; /* IO mem resources */
@@ -355,7 +355,7 @@ static int __devexit xps2_of_remove(struct platform_device *of_dev)
}
/* Match table for of_platform binding */
-static const struct of_device_id xps2_of_match[] __devinitconst = {
+static const struct of_device_id xps2_of_match[] = {
{ .compatible = "xlnx,xps-ps2-1.00.a", },
{ /* end of list */ },
};
@@ -368,7 +368,7 @@ static struct platform_driver xps2_of_driver = {
.of_match_table = xps2_of_match,
},
.probe = xps2_of_probe,
- .remove = __devexit_p(xps2_of_remove),
+ .remove = xps2_of_remove,
};
module_platform_driver(xps2_of_driver);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 858ad446de9..f92d34f45a1 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -386,23 +386,40 @@ static int wacom_parse_hid(struct usb_interface *intf,
if (usage == WCM_DESKTOP) {
if (finger) {
features->device_type = BTN_TOOL_FINGER;
- if (features->type == TABLETPC2FG) {
- /* need to reset back */
+
+ switch (features->type) {
+ case TABLETPC2FG:
features->pktlen = WACOM_PKGLEN_TPC2FG;
- }
+ break;
- if (features->type == MTSCREEN || features->type == WACOM_24HDT)
+ case MTSCREEN:
+ case WACOM_24HDT:
features->pktlen = WACOM_PKGLEN_MTOUCH;
+ break;
- if (features->type == BAMBOO_PT) {
- /* need to reset back */
+ case MTTPC:
+ features->pktlen = WACOM_PKGLEN_MTTPC;
+ break;
+
+ case BAMBOO_PT:
features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ break;
+
+ default:
+ features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+ break;
+ }
+
+ switch (features->type) {
+ case BAMBOO_PT:
features->x_phy =
get_unaligned_le16(&report[i + 5]);
features->x_max =
get_unaligned_le16(&report[i + 8]);
i += 15;
- } else if (features->type == WACOM_24HDT) {
+ break;
+
+ case WACOM_24HDT:
features->x_max =
get_unaligned_le16(&report[i + 3]);
features->x_phy =
@@ -410,7 +427,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
features->unit = report[i - 1];
features->unitExpo = report[i - 3];
i += 12;
- } else {
+ break;
+
+ default:
features->x_max =
get_unaligned_le16(&report[i + 3]);
features->x_phy =
@@ -418,10 +437,11 @@ static int wacom_parse_hid(struct usb_interface *intf,
features->unit = report[i + 9];
features->unitExpo = report[i + 11];
i += 12;
+ break;
}
} else if (pen) {
/* penabled only accepts exact bytes of data */
- if (features->type == TABLETPC2FG)
+ if (features->type >= TABLETPC)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->x_max =
@@ -434,32 +454,40 @@ static int wacom_parse_hid(struct usb_interface *intf,
case HID_USAGE_Y:
if (usage == WCM_DESKTOP) {
if (finger) {
- int type = features->type;
-
- if (type == TABLETPC2FG || type == MTSCREEN) {
+ switch (features->type) {
+ case TABLETPC2FG:
+ case MTSCREEN:
+ case MTTPC:
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i + 6]);
i += 7;
- } else if (type == WACOM_24HDT) {
+ break;
+
+ case WACOM_24HDT:
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i - 2]);
i += 7;
- } else if (type == BAMBOO_PT) {
+ break;
+
+ case BAMBOO_PT:
features->y_phy =
get_unaligned_le16(&report[i + 3]);
features->y_max =
get_unaligned_le16(&report[i + 6]);
i += 12;
- } else {
+ break;
+
+ default:
features->y_max =
features->x_max;
features->y_phy =
get_unaligned_le16(&report[i + 3]);
i += 4;
+ break;
}
} else if (pen) {
features->y_max =
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 0a67031ffc1..264138f3217 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -467,9 +467,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
/* general pen packet */
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
- (features->type >= WACOM_21UX2 && features->type <= WACOM_24HD)) {
+ if (features->type >= INTUOS4S && features->type <= WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -877,6 +875,11 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
int i;
int current_num_contacts = data[2];
int contacts_to_send = 0;
+ int x_offset = 0;
+
+ /* MTTPC does not support Height and Width */
+ if (wacom->features.type == MTTPC)
+ x_offset = -4;
/*
* First packet resets the counter since only the first
@@ -889,7 +892,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
contacts_to_send = min(5, wacom->num_contacts_left);
for (i = 0; i < contacts_to_send; i++) {
- int offset = (WACOM_BYTES_PER_MT_PACKET * i) + 3;
+ int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
bool touch = data[offset] & 0x1;
int id = le16_to_cpup((__le16 *)&data[offset + 1]);
int slot = find_slot_from_contactid(wacom, id);
@@ -900,8 +903,8 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
- int x = le16_to_cpup((__le16 *)&data[offset + 7]);
- int y = le16_to_cpup((__le16 *)&data[offset + 9]);
+ int x = le16_to_cpup((__le16 *)&data[offset + x_offset + 7]);
+ int y = le16_to_cpup((__le16 *)&data[offset + x_offset + 9]);
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
}
@@ -1336,6 +1339,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case TABLETPCE:
case TABLETPC2FG:
case MTSCREEN:
+ case MTTPC:
sync = wacom_tpc_irq(wacom_wac, len);
break;
@@ -1657,6 +1661,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case MTSCREEN:
+ case MTTPC:
if (features->device_type == BTN_TOOL_FINGER) {
wacom_wac->slots = kmalloc(features->touch_max *
sizeof(int),
@@ -2018,6 +2023,15 @@ static const struct wacom_features wacom_features_0xED =
static const struct wacom_features wacom_features_0xEF =
{ "Wacom ISDv4 EF", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x100 =
+ { "Wacom ISDv4 100", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x101 =
+ { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x4001 =
+ { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2034,7 +2048,8 @@ static const struct wacom_features wacom_features_0xD1 =
.touch_max = 2 };
static const struct wacom_features wacom_features_0xD2 =
{ "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD3 =
{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
@@ -2194,6 +2209,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0xED) },
{ USB_DEVICE_WACOM(0xEF) },
+ { USB_DEVICE_WACOM(0x100) },
+ { USB_DEVICE_WACOM(0x101) },
+ { USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 345f1e76975..9396d7769f8 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -26,6 +26,7 @@
#define WACOM_PKGLEN_BBPEN 10
#define WACOM_PKGLEN_WIRELESS 32
#define WACOM_PKGLEN_MTOUCH 62
+#define WACOM_PKGLEN_MTTPC 40
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
@@ -88,6 +89,7 @@ enum {
TABLETPCE,
TABLETPC2FG,
MTSCREEN,
+ MTTPC,
MAX_TYPE
};
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 326218dbd6e..c7068942ebe 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -115,7 +115,7 @@ static void pm860x_touch_close(struct input_dev *dev)
}
#ifdef CONFIG_OF
-static int __devinit pm860x_touch_dt_init(struct platform_device *pdev,
+static int pm860x_touch_dt_init(struct platform_device *pdev,
struct pm860x_chip *chip,
int *res_x)
{
@@ -169,7 +169,7 @@ static int __devinit pm860x_touch_dt_init(struct platform_device *pdev,
#define pm860x_touch_dt_init(x, y, z) (-1)
#endif
-static int __devinit pm860x_touch_probe(struct platform_device *pdev)
+static int pm860x_touch_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_touch_pdata *pdata = pdev->dev.platform_data;
@@ -293,7 +293,7 @@ out:
return ret;
}
-static int __devexit pm860x_touch_remove(struct platform_device *pdev)
+static int pm860x_touch_remove(struct platform_device *pdev)
{
struct pm860x_touch *touch = platform_get_drvdata(pdev);
@@ -310,7 +310,7 @@ static struct platform_driver pm860x_touch_driver = {
.owner = THIS_MODULE,
},
.probe = pm860x_touch_probe,
- .remove = __devexit_p(pm860x_touch_remove),
+ .remove = pm860x_touch_remove,
};
module_platform_driver(pm860x_touch_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index f7668b24c37..515cfe79054 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -111,18 +111,6 @@ config TOUCHSCREEN_AUO_PIXCIR
To compile this driver as a module, choose M here: the
module will be called auo-pixcir-ts.
-config TOUCHSCREEN_BITSY
- tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
- depends on SA1100_BITSY
- select SERIO
- help
- Say Y here if you have the h3600 (Bitsy) touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called h3600_ts_input.
-
config TOUCHSCREEN_BU21013
tristate "BU21013 based touch panel controllers"
depends on I2C
@@ -529,9 +517,9 @@ config TOUCHSCREEN_TOUCHWIN
To compile this driver as a module, choose M here: the
module will be called touchwin.
-config TOUCHSCREEN_TI_TSCADC
+config TOUCHSCREEN_TI_AM335X_TSC
tristate "TI Touchscreen Interface"
- depends on ARCH_OMAP2PLUS
+ depends on MFD_TI_AM335X_TSCADC
help
Say Y here if you have 4/5/8 wire touchscreen controller
to be connected to the ADC controller on your TI AM335x SoC.
@@ -539,7 +527,7 @@ config TOUCHSCREEN_TI_TSCADC
If unsure, say N.
To compile this driver as a module, choose M here: the
- module will be called ti_tscadc.
+ module will be called ti_am335x_tsc.
config TOUCHSCREEN_ATMEL_TSADCC
tristate "Atmel Touchscreen Interface"
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 178eb128d90..6bfbeab67c9 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
-obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
@@ -52,7 +51,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
-obj-$(CONFIG_TOUCHSCREEN_TI_TSCADC) += ti_tscadc.o
+obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 2c7692108e6..23fa829b869 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -682,7 +682,7 @@ static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
}
}
-static int __devinit ad7877_probe(struct spi_device *spi)
+static int ad7877_probe(struct spi_device *spi)
{
struct ad7877 *ts;
struct input_dev *input_dev;
@@ -810,7 +810,7 @@ err_free_mem:
return err;
}
-static int __devexit ad7877_remove(struct spi_device *spi)
+static int ad7877_remove(struct spi_device *spi)
{
struct ad7877 *ts = dev_get_drvdata(&spi->dev);
@@ -857,7 +857,7 @@ static struct spi_driver ad7877_driver = {
.pm = &ad7877_pm,
},
.probe = ad7877_probe,
- .remove = __devexit_p(ad7877_remove),
+ .remove = ad7877_remove,
};
module_spi_driver(ad7877_driver);
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index 3054354d0dd..dcf39077154 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -54,7 +54,7 @@ static const struct ad7879_bus_ops ad7879_i2c_bus_ops = {
.write = ad7879_i2c_write,
};
-static int __devinit ad7879_i2c_probe(struct i2c_client *client,
+static int ad7879_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad7879 *ts;
@@ -75,7 +75,7 @@ static int __devinit ad7879_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit ad7879_i2c_remove(struct i2c_client *client)
+static int ad7879_i2c_remove(struct i2c_client *client)
{
struct ad7879 *ts = i2c_get_clientdata(client);
@@ -98,7 +98,7 @@ static struct i2c_driver ad7879_i2c_driver = {
.pm = &ad7879_pm_ops,
},
.probe = ad7879_i2c_probe,
- .remove = __devexit_p(ad7879_i2c_remove),
+ .remove = ad7879_i2c_remove,
.id_table = ad7879_id,
};
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index db49abf056b..606da5bd611 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -110,7 +110,7 @@ static const struct ad7879_bus_ops ad7879_spi_bus_ops = {
.write = ad7879_spi_write,
};
-static int __devinit ad7879_spi_probe(struct spi_device *spi)
+static int ad7879_spi_probe(struct spi_device *spi)
{
struct ad7879 *ts;
int err;
@@ -137,7 +137,7 @@ static int __devinit ad7879_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit ad7879_spi_remove(struct spi_device *spi)
+static int ad7879_spi_remove(struct spi_device *spi)
{
struct ad7879 *ts = spi_get_drvdata(spi);
@@ -154,7 +154,7 @@ static struct spi_driver ad7879_spi_driver = {
.pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
- .remove = __devexit_p(ad7879_spi_remove),
+ .remove = ad7879_spi_remove,
};
module_spi_driver(ad7879_spi_driver);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 78e5d9ab0ba..4f702b3ec1a 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -955,7 +955,7 @@ static int ads7846_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
-static int __devinit ads7846_setup_pendown(struct spi_device *spi,
+static int ads7846_setup_pendown(struct spi_device *spi,
struct ads7846 *ts)
{
struct ads7846_platform_data *pdata = spi->dev.platform_data;
@@ -997,7 +997,7 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi,
* Set up the transfers to read touchscreen state; this assumes we
* use formula #2 for pressure, not #3.
*/
-static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts,
+static void ads7846_setup_spi_msg(struct ads7846 *ts,
const struct ads7846_platform_data *pdata)
{
struct spi_message *m = &ts->msg[0];
@@ -1196,7 +1196,7 @@ static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts,
spi_message_add_tail(x, m);
}
-static int __devinit ads7846_probe(struct spi_device *spi)
+static int ads7846_probe(struct spi_device *spi)
{
struct ads7846 *ts;
struct ads7846_packet *packet;
@@ -1390,7 +1390,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
return err;
}
-static int __devexit ads7846_remove(struct spi_device *spi)
+static int ads7846_remove(struct spi_device *spi)
{
struct ads7846 *ts = dev_get_drvdata(&spi->dev);
@@ -1434,7 +1434,7 @@ static struct spi_driver ads7846_driver = {
.pm = &ads7846_pm,
},
.probe = ads7846_probe,
- .remove = __devexit_p(ads7846_remove),
+ .remove = ads7846_remove,
};
module_spi_driver(ads7846_driver);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 1df2396af00..d04f810cb1d 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1095,7 +1095,7 @@ static void mxt_input_close(struct input_dev *dev)
mxt_stop(data);
}
-static int __devinit mxt_probe(struct i2c_client *client,
+static int mxt_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct mxt_platform_data *pdata = client->dev.platform_data;
@@ -1200,7 +1200,7 @@ err_free_mem:
return error;
}
-static int __devexit mxt_remove(struct i2c_client *client)
+static int mxt_remove(struct i2c_client *client)
{
struct mxt_data *data = i2c_get_clientdata(client);
@@ -1270,7 +1270,7 @@ static struct i2c_driver mxt_driver = {
.pm = &mxt_pm_ops,
},
.probe = mxt_probe,
- .remove = __devexit_p(mxt_remove),
+ .remove = mxt_remove,
.id_table = mxt_id,
};
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index ea392ee138e..95f6785a94b 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -177,7 +177,7 @@ static irqreturn_t atmel_tsadcc_interrupt(int irq, void *dev)
* The functions for inserting/removing us as a module.
*/
-static int __devinit atmel_tsadcc_probe(struct platform_device *pdev)
+static int atmel_tsadcc_probe(struct platform_device *pdev)
{
struct atmel_tsadcc *ts_dev;
struct input_dev *input_dev;
@@ -323,7 +323,7 @@ err_free_mem:
return err;
}
-static int __devexit atmel_tsadcc_remove(struct platform_device *pdev)
+static int atmel_tsadcc_remove(struct platform_device *pdev)
{
struct atmel_tsadcc *ts_dev = dev_get_drvdata(&pdev->dev);
struct resource *res;
@@ -346,7 +346,7 @@ static int __devexit atmel_tsadcc_remove(struct platform_device *pdev)
static struct platform_driver atmel_tsadcc_driver = {
.probe = atmel_tsadcc_probe,
- .remove = __devexit_p(atmel_tsadcc_remove),
+ .remove = atmel_tsadcc_remove,
.driver = {
.name = "atmel_tsadcc",
},
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index c7047b6bb02..c6e19a96348 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -286,7 +286,7 @@ static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
return 0;
}
-static __devinit int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
+static int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
int int_setting)
{
struct i2c_client *client = ts->client;
@@ -482,7 +482,7 @@ unlock:
static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops, auo_pixcir_suspend,
auo_pixcir_resume);
-static int __devinit auo_pixcir_probe(struct i2c_client *client,
+static int auo_pixcir_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
@@ -599,7 +599,7 @@ err_gpio_int:
return ret;
}
-static int __devexit auo_pixcir_remove(struct i2c_client *client)
+static int auo_pixcir_remove(struct i2c_client *client)
{
struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
@@ -631,7 +631,7 @@ static struct i2c_driver auo_pixcir_driver = {
.pm = &auo_pixcir_pm_ops,
},
.probe = auo_pixcir_probe,
- .remove = __devexit_p(auo_pixcir_remove),
+ .remove = auo_pixcir_remove,
.id_table = auo_pixcir_idtable,
};
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 5c487d23f11..b9b5ddad665 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -14,6 +14,9 @@
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#define PEN_DOWN_INTR 0
#define MAX_FINGERS 2
@@ -148,11 +151,12 @@
struct bu21013_ts_data {
struct i2c_client *client;
wait_queue_head_t wait;
- bool touch_stopped;
const struct bu21013_platform_device *chip;
struct input_dev *in_dev;
- unsigned int intr_pin;
struct regulator *regulator;
+ unsigned int irq;
+ unsigned int intr_pin;
+ bool touch_stopped;
};
/**
@@ -262,7 +266,7 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
return IRQ_NONE;
}
- data->intr_pin = data->chip->irq_read_val();
+ data->intr_pin = gpio_get_value(data->chip->touch_pin);
if (data->intr_pin == PEN_DOWN_INTR)
wait_event_timeout(data->wait, data->touch_stopped,
msecs_to_jiffies(2));
@@ -418,8 +422,70 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
{
bu21013_data->touch_stopped = true;
wake_up(&bu21013_data->wait);
- free_irq(bu21013_data->chip->irq, bu21013_data);
+ free_irq(bu21013_data->irq, bu21013_data);
+}
+
+/**
+ * bu21013_cs_disable() - deconfigures the touch panel controller
+ * @bu21013_data: device structure pointer
+ *
+ * This function is used to deconfigure the chip selection
+ * for touch panel controller.
+ */
+static void bu21013_cs_disable(struct bu21013_ts_data *bu21013_data)
+{
+ int error;
+
+ error = gpio_direction_output(bu21013_data->chip->cs_pin, 0);
+ if (error < 0)
+ dev_warn(&bu21013_data->client->dev,
+ "%s: gpio direction failed, error: %d\n",
+ __func__, error);
+ else
+ gpio_set_value(bu21013_data->chip->cs_pin, 0);
+
+ gpio_free(bu21013_data->chip->cs_pin);
+}
+
+#ifdef CONFIG_OF
+static const struct bu21013_platform_device *
+bu21013_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct bu21013_platform_device *pdata;
+
+ if (!np) {
+ dev_err(dev, "no device tree or platform data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->y_flip = pdata->x_flip = false;
+
+ pdata->x_flip = of_property_read_bool(np, "rohm,flip-x");
+ pdata->y_flip = of_property_read_bool(np, "rohm,flip-y");
+
+ of_property_read_u32(np, "rohm,touch-max-x", &pdata->touch_x_max);
+ of_property_read_u32(np, "rohm,touch-max-y", &pdata->touch_y_max);
+
+ pdata->touch_pin = of_get_named_gpio(np, "touch-gpio", 0);
+ pdata->cs_pin = of_get_named_gpio(np, "reset-gpio", 0);
+
+ pdata->ext_clk = false;
+
+ return pdata;
}
+#else
+static inline const struct bu21013_platform_device *
+bu21013_parse_dt(struct device *dev)
+{
+ dev_err(dev, "no platform data available\n");
+ return ERR_PTR(-EINVAL);
+}
+#endif
/**
* bu21013_probe() - initializes the i2c-client touchscreen driver
@@ -429,13 +495,13 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
* This function used to initializes the i2c-client touchscreen
* driver and returns integer.
*/
-static int __devinit bu21013_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bu21013_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
+ const struct bu21013_platform_device *pdata =
+ dev_get_platdata(&client->dev);
struct bu21013_ts_data *bu21013_data;
struct input_dev *in_dev;
- const struct bu21013_platform_device *pdata =
- client->dev.platform_data;
int error;
if (!i2c_check_functionality(client->adapter,
@@ -445,7 +511,13 @@ static int __devinit bu21013_probe(struct i2c_client *client,
}
if (!pdata) {
- dev_err(&client->dev, "platform data not defined\n");
+ pdata = bu21013_parse_dt(&client->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
+
+ if (!gpio_is_valid(pdata->touch_pin)) {
+ dev_err(&client->dev, "invalid touch_pin supplied\n");
return -EINVAL;
}
@@ -460,8 +532,9 @@ static int __devinit bu21013_probe(struct i2c_client *client,
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
+ bu21013_data->irq = gpio_to_irq(pdata->touch_pin);
- bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+ bu21013_data->regulator = regulator_get(&client->dev, "avdd");
if (IS_ERR(bu21013_data->regulator)) {
dev_err(&client->dev, "regulator_get failed\n");
error = PTR_ERR(bu21013_data->regulator);
@@ -478,12 +551,11 @@ static int __devinit bu21013_probe(struct i2c_client *client,
init_waitqueue_head(&bu21013_data->wait);
/* configure the gpio pins */
- if (pdata->cs_en) {
- error = pdata->cs_en(pdata->cs_pin);
- if (error < 0) {
- dev_err(&client->dev, "chip init failed\n");
- goto err_disable_regulator;
- }
+ error = gpio_request_one(pdata->cs_pin, GPIOF_OUT_INIT_HIGH,
+ "touchp_reset");
+ if (error < 0) {
+ dev_err(&client->dev, "Unable to request gpio reset_pin\n");
+ goto err_disable_regulator;
}
/* configure the touch panel controller */
@@ -508,12 +580,13 @@ static int __devinit bu21013_probe(struct i2c_client *client,
pdata->touch_y_max, 0, 0);
input_set_drvdata(in_dev, bu21013_data);
- error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
+ error = request_threaded_irq(bu21013_data->irq, NULL, bu21013_gpio_irq,
IRQF_TRIGGER_FALLING | IRQF_SHARED |
IRQF_ONESHOT,
DRIVER_TP, bu21013_data);
if (error) {
- dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
+ dev_err(&client->dev, "request irq %d failed\n",
+ bu21013_data->irq);
goto err_cs_disable;
}
@@ -531,7 +604,7 @@ static int __devinit bu21013_probe(struct i2c_client *client,
err_free_irq:
bu21013_free_irq(bu21013_data);
err_cs_disable:
- pdata->cs_dis(pdata->cs_pin);
+ bu21013_cs_disable(bu21013_data);
err_disable_regulator:
regulator_disable(bu21013_data->regulator);
err_put_regulator:
@@ -549,13 +622,13 @@ err_free_mem:
* This function uses to remove the i2c-client
* touchscreen driver and returns integer.
*/
-static int __devexit bu21013_remove(struct i2c_client *client)
+static int bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
bu21013_free_irq(bu21013_data);
- bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
+ bu21013_cs_disable(bu21013_data);
input_unregister_device(bu21013_data->in_dev);
@@ -584,9 +657,9 @@ static int bu21013_suspend(struct device *dev)
bu21013_data->touch_stopped = true;
if (device_may_wakeup(&client->dev))
- enable_irq_wake(bu21013_data->chip->irq);
+ enable_irq_wake(bu21013_data->irq);
else
- disable_irq(bu21013_data->chip->irq);
+ disable_irq(bu21013_data->irq);
regulator_disable(bu21013_data->regulator);
@@ -621,9 +694,9 @@ static int bu21013_resume(struct device *dev)
bu21013_data->touch_stopped = false;
if (device_may_wakeup(&client->dev))
- disable_irq_wake(bu21013_data->chip->irq);
+ disable_irq_wake(bu21013_data->irq);
else
- enable_irq(bu21013_data->chip->irq);
+ enable_irq(bu21013_data->irq);
return 0;
}
@@ -649,7 +722,7 @@ static struct i2c_driver bu21013_driver = {
#endif
},
.probe = bu21013_probe,
- .remove = __devexit_p(bu21013_remove),
+ .remove = bu21013_remove,
.id_table = bu21013_id,
};
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 464f1bf4b61..96e0eedcc7e 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -99,9 +99,18 @@ static int cy8ctmg110_read_regs(struct cy8ctmg110 *tsc,
int ret;
struct i2c_msg msg[2] = {
/* first write slave position to i2c devices */
- { client->addr, 0, 1, &cmd },
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &cmd
+ },
/* Second read data from position */
- { client->addr, I2C_M_RD, len, data }
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data
+ }
};
ret = i2c_transfer(client->adapter, msg, 2);
@@ -166,7 +175,7 @@ static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit cy8ctmg110_probe(struct i2c_client *client,
+static int cy8ctmg110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct cy8ctmg110_pdata *pdata = client->dev.platform_data;
@@ -314,7 +323,7 @@ static int cy8ctmg110_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
#endif
-static int __devexit cy8ctmg110_remove(struct i2c_client *client)
+static int cy8ctmg110_remove(struct i2c_client *client)
{
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
@@ -348,7 +357,7 @@ static struct i2c_driver cy8ctmg110_driver = {
},
.id_table = cy8ctmg110_idtable,
.probe = cy8ctmg110_probe,
- .remove = __devexit_p(cy8ctmg110_remove),
+ .remove = cy8ctmg110_remove,
};
module_i2c_driver(cy8ctmg110_driver);
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 2af1d0c52bc..4dbdf44b8fc 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -81,7 +81,7 @@ static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = {
.read = cyttsp_i2c_read_block_data,
};
-static int __devinit cyttsp_i2c_probe(struct i2c_client *client,
+static int cyttsp_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cyttsp *ts;
@@ -102,7 +102,7 @@ static int __devinit cyttsp_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit cyttsp_i2c_remove(struct i2c_client *client)
+static int cyttsp_i2c_remove(struct i2c_client *client)
{
struct cyttsp *ts = i2c_get_clientdata(client);
@@ -124,7 +124,7 @@ static struct i2c_driver cyttsp_i2c_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_i2c_probe,
- .remove = __devexit_p(cyttsp_i2c_remove),
+ .remove = cyttsp_i2c_remove,
.id_table = cyttsp_i2c_id,
};
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index 9f263410407..638e20310f1 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -147,7 +147,7 @@ static const struct cyttsp_bus_ops cyttsp_spi_bus_ops = {
.read = cyttsp_spi_read_block_data,
};
-static int __devinit cyttsp_spi_probe(struct spi_device *spi)
+static int cyttsp_spi_probe(struct spi_device *spi)
{
struct cyttsp *ts;
int error;
@@ -172,7 +172,7 @@ static int __devinit cyttsp_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit cyttsp_spi_remove(struct spi_device *spi)
+static int cyttsp_spi_remove(struct spi_device *spi)
{
struct cyttsp *ts = spi_get_drvdata(spi);
@@ -188,7 +188,7 @@ static struct spi_driver cyttsp_spi_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_spi_probe,
- .remove = __devexit_p(cyttsp_spi_remove),
+ .remove = cyttsp_spi_remove,
};
module_spi_driver(cyttsp_spi_driver);
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 36b65cf10d7..34ad84105e6 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -297,7 +297,7 @@ static void da9034_touch_close(struct input_dev *dev)
}
-static int __devinit da9034_touch_probe(struct platform_device *pdev)
+static int da9034_touch_probe(struct platform_device *pdev)
{
struct da9034_touch_pdata *pdata = pdev->dev.platform_data;
struct da9034_touch *touch;
@@ -361,7 +361,7 @@ err_free_touch:
return ret;
}
-static int __devexit da9034_touch_remove(struct platform_device *pdev)
+static int da9034_touch_remove(struct platform_device *pdev)
{
struct da9034_touch *touch = platform_get_drvdata(pdev);
@@ -377,7 +377,7 @@ static struct platform_driver da9034_touch_driver = {
.owner = THIS_MODULE,
},
.probe = da9034_touch_probe,
- .remove = __devexit_p(da9034_touch_remove),
+ .remove = da9034_touch_remove,
};
module_platform_driver(da9034_touch_driver);
diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c
index e8df341090c..8f561e22bdd 100644
--- a/drivers/input/touchscreen/da9052_tsi.c
+++ b/drivers/input/touchscreen/da9052_tsi.c
@@ -27,8 +27,6 @@ struct da9052_tsi {
struct input_dev *dev;
struct delayed_work ts_pen_work;
struct mutex mutex;
- unsigned int irq_pendwn;
- unsigned int irq_datardy;
bool stopped;
bool adc_on;
};
@@ -45,8 +43,8 @@ static irqreturn_t da9052_ts_pendwn_irq(int irq, void *data)
if (!tsi->stopped) {
/* Mask PEN_DOWN event and unmask TSI_READY event */
- disable_irq_nosync(tsi->irq_pendwn);
- enable_irq(tsi->irq_datardy);
+ da9052_disable_irq_nosync(tsi->da9052, DA9052_IRQ_PENDOWN);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
da9052_ts_adc_toggle(tsi, true);
@@ -137,13 +135,13 @@ static void da9052_ts_pen_work(struct work_struct *work)
return;
/* Mask TSI_READY event and unmask PEN_DOWN event */
- disable_irq(tsi->irq_datardy);
- enable_irq(tsi->irq_pendwn);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
}
}
}
-static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
+static int da9052_ts_configure_gpio(struct da9052 *da9052)
{
int error;
@@ -162,7 +160,7 @@ static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
return 0;
}
-static int __devinit da9052_configure_tsi(struct da9052_tsi *tsi)
+static int da9052_configure_tsi(struct da9052_tsi *tsi)
{
int error;
@@ -197,7 +195,7 @@ static int da9052_ts_input_open(struct input_dev *input_dev)
mb();
/* Unmask PEN_DOWN event */
- enable_irq(tsi->irq_pendwn);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
/* Enable Pen Detect Circuit */
return da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG,
@@ -210,11 +208,11 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
tsi->stopped = true;
mb();
- disable_irq(tsi->irq_pendwn);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
cancel_delayed_work_sync(&tsi->ts_pen_work);
if (tsi->adc_on) {
- disable_irq(tsi->irq_datardy);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
da9052_ts_adc_toggle(tsi, false);
/*
@@ -222,33 +220,24 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
* twice and we need to enable it to keep enable/disable
* counter balanced. IRQ is still off though.
*/
- enable_irq(tsi->irq_pendwn);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
}
/* Disable Pen Detect Circuit */
da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
}
-static int __devinit da9052_ts_probe(struct platform_device *pdev)
+static int da9052_ts_probe(struct platform_device *pdev)
{
struct da9052 *da9052;
struct da9052_tsi *tsi;
struct input_dev *input_dev;
- int irq_pendwn;
- int irq_datardy;
int error;
da9052 = dev_get_drvdata(pdev->dev.parent);
if (!da9052)
return -EINVAL;
- irq_pendwn = platform_get_irq_byname(pdev, "PENDWN");
- irq_datardy = platform_get_irq_byname(pdev, "TSIRDY");
- if (irq_pendwn < 0 || irq_datardy < 0) {
- dev_err(da9052->dev, "Unable to determine device interrupts\n");
- return -ENXIO;
- }
-
tsi = kzalloc(sizeof(struct da9052_tsi), GFP_KERNEL);
input_dev = input_allocate_device();
if (!tsi || !input_dev) {
@@ -258,8 +247,6 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
tsi->da9052 = da9052;
tsi->dev = input_dev;
- tsi->irq_pendwn = da9052->irq_base + irq_pendwn;
- tsi->irq_datardy = da9052->irq_base + irq_datardy;
tsi->stopped = true;
INIT_DELAYED_WORK(&tsi->ts_pen_work, da9052_ts_pen_work);
@@ -287,31 +274,25 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
/* Disable ADC */
da9052_ts_adc_toggle(tsi, false);
- error = request_threaded_irq(tsi->irq_pendwn,
- NULL, da9052_ts_pendwn_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "PENDWN", tsi);
+ error = da9052_request_irq(tsi->da9052, DA9052_IRQ_PENDOWN,
+ "pendown-irq", da9052_ts_pendwn_irq, tsi);
if (error) {
dev_err(tsi->da9052->dev,
- "Failed to register PENDWN IRQ %d, error = %d\n",
- tsi->irq_pendwn, error);
+ "Failed to register PENDWN IRQ: %d\n", error);
goto err_free_mem;
}
- error = request_threaded_irq(tsi->irq_datardy,
- NULL, da9052_ts_datardy_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "TSIRDY", tsi);
+ error = da9052_request_irq(tsi->da9052, DA9052_IRQ_TSIREADY,
+ "tsiready-irq", da9052_ts_datardy_irq, tsi);
if (error) {
dev_err(tsi->da9052->dev,
- "Failed to register TSIRDY IRQ %d, error = %d\n",
- tsi->irq_datardy, error);
+ "Failed to register TSIRDY IRQ :%d\n", error);
goto err_free_pendwn_irq;
}
/* Mask PEN_DOWN and TSI_READY events */
- disable_irq(tsi->irq_pendwn);
- disable_irq(tsi->irq_datardy);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
error = da9052_configure_tsi(tsi);
if (error)
@@ -326,9 +307,9 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
return 0;
err_free_datardy_irq:
- free_irq(tsi->irq_datardy, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
err_free_pendwn_irq:
- free_irq(tsi->irq_pendwn, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
err_free_mem:
kfree(tsi);
input_free_device(input_dev);
@@ -336,14 +317,14 @@ err_free_mem:
return error;
}
-static int __devexit da9052_ts_remove(struct platform_device *pdev)
+static int da9052_ts_remove(struct platform_device *pdev)
{
struct da9052_tsi *tsi = platform_get_drvdata(pdev);
da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x19);
- free_irq(tsi->irq_pendwn, tsi);
- free_irq(tsi->irq_datardy, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
input_unregister_device(tsi->dev);
kfree(tsi);
@@ -355,7 +336,7 @@ static int __devexit da9052_ts_remove(struct platform_device *pdev)
static struct platform_driver da9052_tsi_driver = {
.probe = da9052_ts_probe,
- .remove = __devexit_p(da9052_ts_remove),
+ .remove = da9052_ts_remove,
.driver = {
.name = "da9052-tsi",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 099d144ab7c..a9170157b44 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -491,14 +491,6 @@ static int edt_ft5x06_debugfs_mode_set(void *data, u64 mode)
DEFINE_SIMPLE_ATTRIBUTE(debugfs_mode_fops, edt_ft5x06_debugfs_mode_get,
edt_ft5x06_debugfs_mode_set, "%llu\n");
-static int edt_ft5x06_debugfs_raw_data_open(struct inode *inode,
- struct file *file)
-{
- file->private_data = inode->i_private;
-
- return 0;
-}
-
static ssize_t edt_ft5x06_debugfs_raw_data_read(struct file *file,
char __user *buf, size_t count, loff_t *off)
{
@@ -579,11 +571,11 @@ out:
static const struct file_operations debugfs_raw_data_fops = {
- .open = edt_ft5x06_debugfs_raw_data_open,
+ .open = simple_open,
.read = edt_ft5x06_debugfs_raw_data_read,
};
-static void __devinit
+static void
edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
const char *debugfs_name)
{
@@ -600,7 +592,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
}
-static void __devexit
+static void
edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
if (tsdata->debug_dir)
@@ -625,7 +617,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
-static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client,
+static int edt_ft5x06_ts_reset(struct i2c_client *client,
int reset_pin)
{
int error;
@@ -649,7 +641,7 @@ static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client,
return 0;
}
-static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client,
+static int edt_ft5x06_ts_identify(struct i2c_client *client,
char *model_name,
char *fw_version)
{
@@ -683,7 +675,7 @@ static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client,
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
edt_ft5x06_register_write(tsdata, reg, pdata->name)
-static void __devinit
+static void
edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
const struct edt_ft5x06_platform_data *pdata)
{
@@ -697,7 +689,7 @@ edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
EDT_ATTR_CHECKSET(report_rate, WORK_REGISTER_REPORT_RATE);
}
-static void __devinit
+static void
edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
{
tsdata->threshold = edt_ft5x06_register_read(tsdata,
@@ -710,7 +702,7 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
tsdata->num_y = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_Y);
}
-static int __devinit edt_ft5x06_ts_probe(struct i2c_client *client,
+static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct edt_ft5x06_platform_data *pdata =
@@ -830,7 +822,7 @@ err_free_mem:
return error;
}
-static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
+static int edt_ft5x06_ts_remove(struct i2c_client *client)
{
const struct edt_ft5x06_platform_data *pdata =
dev_get_platdata(&client->dev);
@@ -891,7 +883,7 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
},
.id_table = edt_ft5x06_ts_id,
.probe = edt_ft5x06_ts_probe,
- .remove = __devexit_p(edt_ft5x06_ts_remove),
+ .remove = edt_ft5x06_ts_remove,
};
module_i2c_driver(edt_ft5x06_ts_driver);
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 908407efc67..55255a94007 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -154,7 +154,7 @@ static void eeti_ts_close(struct input_dev *dev)
eeti_ts_stop(priv);
}
-static int __devinit eeti_ts_probe(struct i2c_client *client,
+static int eeti_ts_probe(struct i2c_client *client,
const struct i2c_device_id *idp)
{
struct eeti_ts_platform_data *pdata = client->dev.platform_data;
@@ -248,7 +248,7 @@ err0:
return err;
}
-static int __devexit eeti_ts_remove(struct i2c_client *client)
+static int eeti_ts_remove(struct i2c_client *client)
{
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
@@ -321,7 +321,7 @@ static struct i2c_driver eeti_ts_driver = {
#endif
},
.probe = eeti_ts_probe,
- .remove = __devexit_p(eeti_ts_remove),
+ .remove = eeti_ts_remove,
.id_table = eeti_ts_id,
};
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 13fa62fdfb0..17c9097f3b5 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -153,7 +153,7 @@ static int egalax_wake_up_device(struct i2c_client *client)
return 0;
}
-static int __devinit egalax_firmware_version(struct i2c_client *client)
+static int egalax_firmware_version(struct i2c_client *client)
{
static const u8 cmd[MAX_I2C_DATA_LEN] = { 0x03, 0x03, 0xa, 0x01, 0x41 };
int ret;
@@ -165,7 +165,7 @@ static int __devinit egalax_firmware_version(struct i2c_client *client)
return 0;
}
-static int __devinit egalax_ts_probe(struct i2c_client *client,
+static int egalax_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct egalax_ts *ts;
@@ -246,7 +246,7 @@ err_free_ts:
return error;
}
-static __devexit int egalax_ts_remove(struct i2c_client *client)
+static int egalax_ts_remove(struct i2c_client *client)
{
struct egalax_ts *ts = i2c_get_clientdata(client);
@@ -301,7 +301,7 @@ static struct i2c_driver egalax_ts_driver = {
},
.id_table = egalax_ts_id,
.probe = egalax_ts_probe,
- .remove = __devexit_p(egalax_ts_remove),
+ .remove = egalax_ts_remove,
};
module_i2c_driver(egalax_ts_driver);
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
deleted file mode 100644
index b9e8686a6f1..00000000000
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Copyright (c) 2001 "Crazy" James Simmons jsimmons@transvirtual.com
- *
- * Sponsored by Transvirtual Technology.
- *
- * Derived from the code in h3600_ts.[ch] by Charles Flynn
- */
-
-/*
- * Driver for the h3600 Touch Screen and other Atmel controlled devices.
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so by
- * e-mail - mail your message to <jsimmons@transvirtual.com>.
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/serio.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-
-/* SA1100 serial defines */
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
-#define DRIVER_DESC "H3600 touchscreen driver"
-
-MODULE_AUTHOR("James Simmons <jsimmons@transvirtual.com>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
-/*
- * Definitions & global arrays.
- */
-
-/* The start and end of frame characters SOF and EOF */
-#define CHAR_SOF 0x02
-#define CHAR_EOF 0x03
-#define FRAME_OVERHEAD 3 /* CHAR_SOF,CHAR_EOF,LENGTH = 3 */
-
-/*
- Atmel events and response IDs contained in frame.
- Programmer has no control over these numbers.
- TODO there are holes - specifically 1,7,0x0a
-*/
-#define VERSION_ID 0 /* Get Version (request/response) */
-#define KEYBD_ID 2 /* Keyboard (event) */
-#define TOUCHS_ID 3 /* Touch Screen (event)*/
-#define EEPROM_READ_ID 4 /* (request/response) */
-#define EEPROM_WRITE_ID 5 /* (request/response) */
-#define THERMAL_ID 6 /* (request/response) */
-#define NOTIFY_LED_ID 8 /* (request/response) */
-#define BATTERY_ID 9 /* (request/response) */
-#define SPI_READ_ID 0x0b /* ( request/response) */
-#define SPI_WRITE_ID 0x0c /* ( request/response) */
-#define FLITE_ID 0x0d /* backlight ( request/response) */
-#define STX_ID 0xa1 /* extension pack status (req/resp) */
-
-#define MAX_ID 14
-
-#define H3600_MAX_LENGTH 16
-#define H3600_KEY 0xf
-
-#define H3600_SCANCODE_RECORD 1 /* 1 -> record button */
-#define H3600_SCANCODE_CALENDAR 2 /* 2 -> calendar */
-#define H3600_SCANCODE_CONTACTS 3 /* 3 -> contact */
-#define H3600_SCANCODE_Q 4 /* 4 -> Q button */
-#define H3600_SCANCODE_START 5 /* 5 -> start menu */
-#define H3600_SCANCODE_UP 6 /* 6 -> up */
-#define H3600_SCANCODE_RIGHT 7 /* 7 -> right */
-#define H3600_SCANCODE_LEFT 8 /* 8 -> left */
-#define H3600_SCANCODE_DOWN 9 /* 9 -> down */
-
-/*
- * Per-touchscreen data.
- */
-struct h3600_dev {
- struct input_dev *dev;
- struct serio *serio;
- unsigned char event; /* event ID from packet */
- unsigned char chksum;
- unsigned char len;
- unsigned char idx;
- unsigned char buf[H3600_MAX_LENGTH];
- char phys[32];
-};
-
-static irqreturn_t action_button_handler(int irq, void *dev_id)
-{
- int down = (GPLR & GPIO_BITSY_ACTION_BUTTON) ? 0 : 1;
- struct input_dev *dev = dev_id;
-
- input_report_key(dev, KEY_ENTER, down);
- input_sync(dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t npower_button_handler(int irq, void *dev_id)
-{
- int down = (GPLR & GPIO_BITSY_NPOWER_BUTTON) ? 0 : 1;
- struct input_dev *dev = dev_id;
-
- /*
- * This interrupt is only called when we release the key. So we have
- * to fake a key press.
- */
- input_report_key(dev, KEY_SUSPEND, 1);
- input_report_key(dev, KEY_SUSPEND, down);
- input_sync(dev);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PM
-
-static int flite_brightness = 25;
-
-enum flite_pwr {
- FLITE_PWR_OFF = 0,
- FLITE_PWR_ON = 1
-};
-
-/*
- * h3600_flite_power: enables or disables power to frontlight, using last bright */
-unsigned int h3600_flite_power(struct input_dev *dev, enum flite_pwr pwr)
-{
- unsigned char brightness = (pwr == FLITE_PWR_OFF) ? 0 : flite_brightness;
- struct h3600_dev *ts = input_get_drvdata(dev);
-
- /* Must be in this order */
- serio_write(ts->serio, 1);
- serio_write(ts->serio, pwr);
- serio_write(ts->serio, brightness);
-
- return 0;
-}
-
-#endif
-
-/*
- * This function translates the native event packets to linux input event
- * packets. Some packets coming from serial are not touchscreen related. In
- * this case we send them off to be processed elsewhere.
- */
-static void h3600ts_process_packet(struct h3600_dev *ts)
-{
- struct input_dev *dev = ts->dev;
- static int touched = 0;
- int key, down = 0;
-
- switch (ts->event) {
- /*
- Buttons - returned as a single byte
- 7 6 5 4 3 2 1 0
- S x x x N N N N
-
- S switch state ( 0=pressed 1=released)
- x Unused.
- NNNN switch number 0-15
-
- Note: This is true for non interrupt generated key events.
- */
- case KEYBD_ID:
- down = (ts->buf[0] & 0x80) ? 0 : 1;
-
- switch (ts->buf[0] & 0x7f) {
- case H3600_SCANCODE_RECORD:
- key = KEY_RECORD;
- break;
- case H3600_SCANCODE_CALENDAR:
- key = KEY_PROG1;
- break;
- case H3600_SCANCODE_CONTACTS:
- key = KEY_PROG2;
- break;
- case H3600_SCANCODE_Q:
- key = KEY_Q;
- break;
- case H3600_SCANCODE_START:
- key = KEY_PROG3;
- break;
- case H3600_SCANCODE_UP:
- key = KEY_UP;
- break;
- case H3600_SCANCODE_RIGHT:
- key = KEY_RIGHT;
- break;
- case H3600_SCANCODE_LEFT:
- key = KEY_LEFT;
- break;
- case H3600_SCANCODE_DOWN:
- key = KEY_DOWN;
- break;
- default:
- key = 0;
- }
- if (key)
- input_report_key(dev, key, down);
- break;
- /*
- * Native touchscreen event data is formatted as shown below:-
- *
- * +-------+-------+-------+-------+
- * | Xmsb | Xlsb | Ymsb | Ylsb |
- * +-------+-------+-------+-------+
- * byte 0 1 2 3
- */
- case TOUCHS_ID:
- if (!touched) {
- input_report_key(dev, BTN_TOUCH, 1);
- touched = 1;
- }
-
- if (ts->len) {
- unsigned short x, y;
-
- x = ts->buf[0]; x <<= 8; x += ts->buf[1];
- y = ts->buf[2]; y <<= 8; y += ts->buf[3];
-
- input_report_abs(dev, ABS_X, x);
- input_report_abs(dev, ABS_Y, y);
- } else {
- input_report_key(dev, BTN_TOUCH, 0);
- touched = 0;
- }
- break;
- default:
- /* Send a non input event elsewhere */
- break;
- }
-
- input_sync(dev);
-}
-
-/*
- * h3600ts_event() handles events from the input module.
- */
-static int h3600ts_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
-{
-#if 0
- struct h3600_dev *ts = input_get_drvdata(dev);
-
- switch (type) {
- case EV_LED: {
- // serio_write(ts->serio, SOME_CMD);
- return 0;
- }
- }
- return -1;
-#endif
- return 0;
-}
-
-/*
- Frame format
- byte 1 2 3 len + 4
- +-------+---------------+---------------+--=------------+
- |SOF |id |len | len bytes | Chksum |
- +-------+---------------+---------------+--=------------+
- bit 0 7 8 11 12 15 16
-
- +-------+---------------+-------+
- |SOF |id |0 |Chksum | - Note Chksum does not include SOF
- +-------+---------------+-------+
- bit 0 7 8 11 12 15 16
-
-*/
-
-static int state;
-
-/* decode States */
-#define STATE_SOF 0 /* start of FRAME */
-#define STATE_ID 1 /* state where we decode the ID & len */
-#define STATE_DATA 2 /* state where we decode data */
-#define STATE_EOF 3 /* state where we decode checksum or EOF */
-
-static irqreturn_t h3600ts_interrupt(struct serio *serio, unsigned char data,
- unsigned int flags)
-{
- struct h3600_dev *ts = serio_get_drvdata(serio);
-
- /*
- * We have a new frame coming in.
- */
- switch (state) {
- case STATE_SOF:
- if (data == CHAR_SOF)
- state = STATE_ID;
- break;
- case STATE_ID:
- ts->event = (data & 0xf0) >> 4;
- ts->len = (data & 0xf);
- ts->idx = 0;
- if (ts->event >= MAX_ID) {
- state = STATE_SOF;
- break;
- }
- ts->chksum = data;
- state = (ts->len > 0) ? STATE_DATA : STATE_EOF;
- break;
- case STATE_DATA:
- ts->chksum += data;
- ts->buf[ts->idx]= data;
- if (++ts->idx == ts->len)
- state = STATE_EOF;
- break;
- case STATE_EOF:
- state = STATE_SOF;
- if (data == CHAR_EOF || data == ts->chksum)
- h3600ts_process_packet(ts);
- break;
- default:
- printk("Error3\n");
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * h3600ts_connect() is the routine that is called when someone adds a
- * new serio device that supports H3600 protocol and registers it as
- * an input device.
- */
-static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
-{
- struct h3600_dev *ts;
- struct input_dev *input_dev;
- int err;
-
- ts = kzalloc(sizeof(struct h3600_dev), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto fail1;
- }
-
- ts->serio = serio;
- ts->dev = input_dev;
- snprintf(ts->phys, sizeof(ts->phys), "%s/input0", serio->phys);
-
- input_dev->name = "H3600 TouchScreen";
- input_dev->phys = ts->phys;
- input_dev->id.bustype = BUS_RS232;
- input_dev->id.vendor = SERIO_H3600;
- input_dev->id.product = 0x0666; /* FIXME !!! We can ask the hardware */
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &serio->dev;
-
- input_set_drvdata(input_dev, ts);
-
- input_dev->event = h3600ts_event;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) |
- BIT_MASK(EV_LED) | BIT_MASK(EV_PWR);
- input_dev->ledbit[0] = BIT_MASK(LED_SLEEP);
- input_set_abs_params(input_dev, ABS_X, 60, 985, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 35, 1024, 0, 0);
-
- set_bit(KEY_RECORD, input_dev->keybit);
- set_bit(KEY_Q, input_dev->keybit);
- set_bit(KEY_PROG1, input_dev->keybit);
- set_bit(KEY_PROG2, input_dev->keybit);
- set_bit(KEY_PROG3, input_dev->keybit);
- set_bit(KEY_UP, input_dev->keybit);
- set_bit(KEY_RIGHT, input_dev->keybit);
- set_bit(KEY_LEFT, input_dev->keybit);
- set_bit(KEY_DOWN, input_dev->keybit);
- set_bit(KEY_ENTER, input_dev->keybit);
- set_bit(KEY_SUSPEND, input_dev->keybit);
- set_bit(BTN_TOUCH, input_dev->keybit);
-
- /* Device specific stuff */
- set_GPIO_IRQ_edge(GPIO_BITSY_ACTION_BUTTON, GPIO_BOTH_EDGES);
- set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE);
-
- if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler,
- IRQF_SHARED, "h3600_action", ts->dev)) {
- printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
- err = -EBUSY;
- goto fail1;
- }
-
- if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
- IRQF_SHARED, "h3600_suspend", ts->dev)) {
- printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
- err = -EBUSY;
- goto fail2;
- }
-
- serio_set_drvdata(serio, ts);
-
- err = serio_open(serio, drv);
- if (err)
- goto fail3;
-
- //h3600_flite_control(1, 25); /* default brightness */
- err = input_register_device(ts->dev);
- if (err)
- goto fail4;
-
- return 0;
-
-fail4: serio_close(serio);
-fail3: serio_set_drvdata(serio, NULL);
- free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
-fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: input_free_device(input_dev);
- kfree(ts);
- return err;
-}
-
-/*
- * h3600ts_disconnect() is the opposite of h3600ts_connect()
- */
-
-static void h3600ts_disconnect(struct serio *serio)
-{
- struct h3600_dev *ts = serio_get_drvdata(serio);
-
- free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
- free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
- input_get_device(ts->dev);
- input_unregister_device(ts->dev);
- serio_close(serio);
- serio_set_drvdata(serio, NULL);
- input_put_device(ts->dev);
- kfree(ts);
-}
-
-/*
- * The serio driver structure.
- */
-
-static struct serio_device_id h3600ts_serio_ids[] = {
- {
- .type = SERIO_RS232,
- .proto = SERIO_H3600,
- .id = SERIO_ANY,
- .extra = SERIO_ANY,
- },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(serio, h3600ts_serio_ids);
-
-static struct serio_driver h3600ts_drv = {
- .driver = {
- .name = "h3600ts",
- },
- .description = DRIVER_DESC,
- .id_table = h3600ts_serio_ids,
- .interrupt = h3600ts_interrupt,
- .connect = h3600ts_connect,
- .disconnect = h3600ts_disconnect,
-};
-
-module_serio_driver(h3600ts_drv);
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index d13143b68b3..6c4fb843695 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -102,7 +102,7 @@ static void htcpen_close(struct input_dev *dev)
synchronize_irq(HTCPEN_IRQ);
}
-static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id)
+static int htcpen_isa_probe(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev;
int err = -EBUSY;
@@ -174,7 +174,7 @@ static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int __devexit htcpen_isa_remove(struct device *dev, unsigned int id)
+static int htcpen_isa_remove(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev = dev_get_drvdata(dev);
@@ -210,7 +210,7 @@ static int htcpen_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver htcpen_isa_driver = {
.probe = htcpen_isa_probe,
- .remove = __devexit_p(htcpen_isa_remove),
+ .remove = htcpen_isa_remove,
#ifdef CONFIG_PM
.suspend = htcpen_isa_suspend,
.resume = htcpen_isa_resume,
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 4ac69760ec0..1418bdda61b 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -180,7 +180,7 @@ static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
};
-static int __devinit ili210x_i2c_probe(struct i2c_client *client,
+static int ili210x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -298,7 +298,7 @@ err_free_mem:
return error;
}
-static int __devexit ili210x_i2c_remove(struct i2c_client *client)
+static int ili210x_i2c_remove(struct i2c_client *client)
{
struct ili210x *priv = i2c_get_clientdata(client);
@@ -350,7 +350,7 @@ static struct i2c_driver ili210x_ts_driver = {
},
.id_table = ili210x_i2c_id,
.probe = ili210x_i2c_probe,
- .remove = __devexit_p(ili210x_i2c_remove),
+ .remove = ili210x_i2c_remove,
};
module_i2c_driver(ili210x_ts_driver);
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index cf299377fc4..465db5dba8b 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -427,7 +427,7 @@ out:
}
/* Utility to read PMIC ID */
-static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
+static int mrstouch_read_pmic_id(uint *vendor, uint *rev)
{
int err;
u8 r;
@@ -446,7 +446,7 @@ static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
* Parse ADC channels to find end of the channel configured by other ADC user
* NEC and MAXIM requires 4 channels and FreeScale needs 18 channels
*/
-static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
+static int mrstouch_chan_parse(struct mrstouch_dev *tsdev)
{
int found = 0;
int err, i;
@@ -478,7 +478,7 @@ static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
/*
* Writes touch screen channels to ADC address selection registers
*/
-static int __devinit mrstouch_ts_chan_set(uint offset)
+static int mrstouch_ts_chan_set(uint offset)
{
u16 chan;
@@ -494,7 +494,7 @@ static int __devinit mrstouch_ts_chan_set(uint offset)
}
/* Initialize ADC */
-static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
+static int mrstouch_adc_init(struct mrstouch_dev *tsdev)
{
int err, start;
u8 ra, rm;
@@ -568,7 +568,7 @@ static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
/* Probe function for touch screen driver */
-static int __devinit mrstouch_probe(struct platform_device *pdev)
+static int mrstouch_probe(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev;
struct input_dev *input;
@@ -643,7 +643,7 @@ err_free_mem:
return err;
}
-static int __devexit mrstouch_remove(struct platform_device *pdev)
+static int mrstouch_remove(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev = platform_get_drvdata(pdev);
@@ -662,7 +662,7 @@ static struct platform_driver mrstouch_driver = {
.owner = THIS_MODULE,
},
.probe = mrstouch_probe,
- .remove = __devexit_p(mrstouch_remove),
+ .remove = mrstouch_remove,
};
module_platform_driver(mrstouch_driver);
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 7f03d1bd916..282d7c7ad2f 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -99,7 +99,7 @@ static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit jornada720_ts_probe(struct platform_device *pdev)
+static int jornada720_ts_probe(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts;
struct input_dev *input_dev;
@@ -151,7 +151,7 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev)
return error;
}
-static int __devexit jornada720_ts_remove(struct platform_device *pdev)
+static int jornada720_ts_remove(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts = platform_get_drvdata(pdev);
@@ -168,7 +168,7 @@ MODULE_ALIAS("platform:jornada_ts");
static struct platform_driver jornada720_ts_driver = {
.probe = jornada720_ts_probe,
- .remove = __devexit_p(jornada720_ts_remove),
+ .remove = jornada720_ts_remove,
.driver = {
.name = "jornada_ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 4c2b8ed3bf1..9101ee529c9 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -203,7 +203,7 @@ static void lpc32xx_ts_close(struct input_dev *dev)
lpc32xx_stop_tsc(tsc);
}
-static int __devinit lpc32xx_ts_probe(struct platform_device *pdev)
+static int lpc32xx_ts_probe(struct platform_device *pdev)
{
struct lpc32xx_tsc *tsc;
struct input_dev *input;
@@ -309,7 +309,7 @@ err_free_mem:
return error;
}
-static int __devexit lpc32xx_ts_remove(struct platform_device *pdev)
+static int lpc32xx_ts_remove(struct platform_device *pdev)
{
struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev);
struct resource *res;
@@ -394,7 +394,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match);
static struct platform_driver lpc32xx_ts_driver = {
.probe = lpc32xx_ts_probe,
- .remove = __devexit_p(lpc32xx_ts_remove),
+ .remove = lpc32xx_ts_remove,
.driver = {
.name = MOD_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 4eab50b856d..00bc6caa27f 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -156,7 +156,7 @@ out:
return IRQ_HANDLED;
}
-static void __devinit max11801_ts_phy_init(struct max11801_data *data)
+static void max11801_ts_phy_init(struct max11801_data *data)
{
struct i2c_client *client = data->client;
@@ -174,7 +174,7 @@ static void __devinit max11801_ts_phy_init(struct max11801_data *data)
max11801_write_reg(client, OP_MODE_CONF_REG, 0x36);
}
-static int __devinit max11801_ts_probe(struct i2c_client *client,
+static int max11801_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max11801_data *data;
@@ -228,7 +228,7 @@ err_free_mem:
return error;
}
-static __devexit int max11801_ts_remove(struct i2c_client *client)
+static int max11801_ts_remove(struct i2c_client *client)
{
struct max11801_data *data = i2c_get_clientdata(client);
@@ -252,7 +252,7 @@ static struct i2c_driver max11801_ts_driver = {
},
.id_table = max11801_ts_id,
.probe = max11801_ts_probe,
- .remove = __devexit_p(max11801_ts_remove),
+ .remove = max11801_ts_remove,
};
module_i2c_driver(max11801_ts_driver);
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index 48dc5b0d26f..02103b6abb3 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -229,7 +229,7 @@ err_free_mem:
return ret;
}
-static int __devexit mc13783_ts_remove(struct platform_device *pdev)
+static int mc13783_ts_remove(struct platform_device *pdev)
{
struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
@@ -243,7 +243,7 @@ static int __devexit mc13783_ts_remove(struct platform_device *pdev)
}
static struct platform_driver mc13783_ts_driver = {
- .remove = __devexit_p(mc13783_ts_remove),
+ .remove = mc13783_ts_remove,
.driver = {
.owner = THIS_MODULE,
.name = MC13783_TS_NAME,
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index b528511861c..f9f4e0c56ed 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -187,7 +187,7 @@ static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data)
OP_MODE_ACTIVE | REPORT_RATE_80);
}
-static int __devinit mcs5000_ts_probe(struct i2c_client *client,
+static int mcs5000_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcs5000_ts_data *data;
@@ -249,7 +249,7 @@ err_free_mem:
return ret;
}
-static int __devexit mcs5000_ts_remove(struct i2c_client *client)
+static int mcs5000_ts_remove(struct i2c_client *client)
{
struct mcs5000_ts_data *data = i2c_get_clientdata(client);
@@ -292,7 +292,7 @@ MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
static struct i2c_driver mcs5000_ts_driver = {
.probe = mcs5000_ts_probe,
- .remove = __devexit_p(mcs5000_ts_remove),
+ .remove = mcs5000_ts_remove,
.driver = {
.name = "mcs5000_ts",
#ifdef CONFIG_PM
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 560cf09d1c5..98841d8aa63 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/i2c/mms114.h>
#include <linux/input/mt.h>
@@ -360,14 +361,63 @@ static void mms114_input_close(struct input_dev *dev)
mms114_stop(data);
}
-static int __devinit mms114_probe(struct i2c_client *client,
+#ifdef CONFIG_OF
+static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
+{
+ struct mms114_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "failed to allocate platform data\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
+ dev_err(dev, "failed to get x-size property\n");
+ return NULL;
+ };
+
+ if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
+ dev_err(dev, "failed to get y-size property\n");
+ return NULL;
+ };
+
+ of_property_read_u32(np, "contact-threshold",
+ &pdata->contact_threshold);
+ of_property_read_u32(np, "moving-threshold",
+ &pdata->moving_threshold);
+
+ if (of_find_property(np, "x-invert", NULL))
+ pdata->x_invert = true;
+ if (of_find_property(np, "y-invert", NULL))
+ pdata->y_invert = true;
+
+ return pdata;
+}
+#else
+static inline struct mms114_platform_data *mms114_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static int mms114_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct mms114_platform_data *pdata;
struct mms114_data *data;
struct input_dev *input_dev;
int error;
- if (!client->dev.platform_data) {
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata)
+ pdata = mms114_parse_dt(&client->dev);
+
+ if (!pdata) {
dev_err(&client->dev, "Need platform data\n");
return -EINVAL;
}
@@ -389,7 +439,7 @@ static int __devinit mms114_probe(struct i2c_client *client,
data->client = client;
data->input_dev = input_dev;
- data->pdata = client->dev.platform_data;
+ data->pdata = pdata;
input_dev->name = "MELPAS MMS114 Touchscreen";
input_dev->id.bustype = BUS_I2C;
@@ -458,7 +508,7 @@ err_free_mem:
return error;
}
-static int __devexit mms114_remove(struct i2c_client *client)
+static int mms114_remove(struct i2c_client *client)
{
struct mms114_data *data = i2c_get_clientdata(client);
@@ -525,14 +575,22 @@ static const struct i2c_device_id mms114_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mms114_id);
+#ifdef CONFIG_OF
+static struct of_device_id mms114_dt_match[] = {
+ { .compatible = "melfas,mms114" },
+ { }
+};
+#endif
+
static struct i2c_driver mms114_driver = {
.driver = {
.name = "mms114",
.owner = THIS_MODULE,
.pm = &mms114_pm_ops,
+ .of_match_table = of_match_ptr(mms114_dt_match),
},
.probe = mms114_probe,
- .remove = __devexit_p(mms114_remove),
+ .remove = mms114_remove,
.id_table = mms114_id,
};
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index f57aeb80f7e..f22e04dd4e1 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -137,7 +137,7 @@ static void pcap_ts_close(struct input_dev *dev)
pcap_ts->read_state << PCAP_ADC_TS_M_SHIFT);
}
-static int __devinit pcap_ts_probe(struct platform_device *pdev)
+static int pcap_ts_probe(struct platform_device *pdev)
{
struct input_dev *input_dev;
struct pcap_ts *pcap_ts;
@@ -202,7 +202,7 @@ fail:
return err;
}
-static int __devexit pcap_ts_remove(struct platform_device *pdev)
+static int pcap_ts_remove(struct platform_device *pdev)
{
struct pcap_ts *pcap_ts = platform_get_drvdata(pdev);
@@ -245,7 +245,7 @@ static const struct dev_pm_ops pcap_ts_pm_ops = {
static struct platform_driver pcap_ts_driver = {
.probe = pcap_ts_probe,
- .remove = __devexit_p(pcap_ts_remove),
+ .remove = pcap_ts_remove,
.driver = {
.name = "pcap-ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 953b4c105ca..6cc6b36663f 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -125,7 +125,7 @@ static int pixcir_i2c_ts_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
-static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
+static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
@@ -189,7 +189,7 @@ err_free_mem:
return error;
}
-static int __devexit pixcir_i2c_ts_remove(struct i2c_client *client)
+static int pixcir_i2c_ts_remove(struct i2c_client *client)
{
struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
@@ -218,7 +218,7 @@ static struct i2c_driver pixcir_i2c_ts_driver = {
.pm = &pixcir_dev_pm_ops,
},
.probe = pixcir_i2c_ts_probe,
- .remove = __devexit_p(pixcir_i2c_ts_remove),
+ .remove = pixcir_i2c_ts_remove,
.id_table = pixcir_i2c_ts_id,
};
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 549fa29548f..b061af2c837 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -238,7 +238,7 @@ static void s3c24xx_ts_select(struct s3c_adc_client *client, unsigned select)
* Initialise, find and allocate any resources we need to run and then
* register with the ADC and input systems.
*/
-static int __devinit s3c2410ts_probe(struct platform_device *pdev)
+static int s3c2410ts_probe(struct platform_device *pdev)
{
struct s3c2410_ts_mach_info *info;
struct device *dev = &pdev->dev;
@@ -365,7 +365,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
*
* Free up our state ready to be removed.
*/
-static int __devexit s3c2410ts_remove(struct platform_device *pdev)
+static int s3c2410ts_remove(struct platform_device *pdev)
{
free_irq(ts.irq_tc, ts.input);
del_timer_sync(&touch_timer);
@@ -430,7 +430,7 @@ static struct platform_driver s3c_ts_driver = {
},
.id_table = s3cts_driver_ids,
.probe = s3c2410ts_probe,
- .remove = __devexit_p(s3c2410ts_remove),
+ .remove = s3c2410ts_remove,
};
module_platform_driver(s3c_ts_driver);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 6cb68a1981b..d9d05e22242 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -139,7 +139,7 @@ end:
return IRQ_HANDLED;
}
-static int __devinit st1232_ts_probe(struct i2c_client *client,
+static int st1232_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
@@ -206,7 +206,7 @@ err_free_mem:
return error;
}
-static int __devexit st1232_ts_remove(struct i2c_client *client)
+static int st1232_ts_remove(struct i2c_client *client)
{
struct st1232_ts_data *ts = i2c_get_clientdata(client);
@@ -255,7 +255,7 @@ static const struct i2c_device_id st1232_ts_id[] = {
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
#ifdef CONFIG_OF
-static const struct of_device_id st1232_ts_dt_ids[] __devinitconst = {
+static const struct of_device_id st1232_ts_dt_ids[] = {
{ .compatible = "sitronix,st1232", },
{ }
};
@@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
- .remove = __devexit_p(st1232_ts_remove),
+ .remove = st1232_ts_remove,
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 692b685720c..84d884b4ec3 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -1,4 +1,5 @@
-/* STMicroelectronics STMPE811 Touchscreen Driver
+/*
+ * STMicroelectronics STMPE811 Touchscreen Driver
*
* (C) 2010 Luotao Fu <l.fu@pengutronix.de>
* All rights reserved.
@@ -16,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -166,7 +168,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit stmpe_init_hw(struct stmpe_touch *ts)
+static int stmpe_init_hw(struct stmpe_touch *ts)
{
int ret;
u8 adc_ctrl1, adc_ctrl1_mask, tsc_cfg, tsc_cfg_mask;
@@ -261,41 +263,18 @@ static void stmpe_ts_close(struct input_dev *dev)
STMPE_TSC_CTRL_TSC_EN, 0);
}
-static int __devinit stmpe_input_probe(struct platform_device *pdev)
+static void stmpe_ts_get_platform_info(struct platform_device *pdev,
+ struct stmpe_touch *ts)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- struct stmpe_platform_data *pdata = stmpe->pdata;
- struct stmpe_touch *ts;
- struct input_dev *idev;
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_ts_platform_data *ts_pdata = NULL;
- int ret;
- int ts_irq;
-
- ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
- if (ts_irq < 0)
- return ts_irq;
-
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- if (!ts) {
- ret = -ENOMEM;
- goto err_out;
- }
- idev = input_allocate_device();
- if (!idev) {
- ret = -ENOMEM;
- goto err_free_ts;
- }
-
- platform_set_drvdata(pdev, ts);
ts->stmpe = stmpe;
- ts->idev = idev;
- ts->dev = &pdev->dev;
- if (pdata)
- ts_pdata = pdata->ts;
+ if (stmpe->pdata && stmpe->pdata->ts) {
+ ts_pdata = stmpe->pdata->ts;
- if (ts_pdata) {
ts->sample_time = ts_pdata->sample_time;
ts->mod_12b = ts_pdata->mod_12b;
ts->ref_sel = ts_pdata->ref_sel;
@@ -305,22 +284,71 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
ts->settling = ts_pdata->settling;
ts->fraction_z = ts_pdata->fraction_z;
ts->i_drive = ts_pdata->i_drive;
+ } else if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "st,sample-time", &val))
+ ts->sample_time = val;
+ if (!of_property_read_u32(np, "st,mod-12b", &val))
+ ts->mod_12b = val;
+ if (!of_property_read_u32(np, "st,ref-sel", &val))
+ ts->ref_sel = val;
+ if (!of_property_read_u32(np, "st,adc-freq", &val))
+ ts->adc_freq = val;
+ if (!of_property_read_u32(np, "st,ave-ctrl", &val))
+ ts->ave_ctrl = val;
+ if (!of_property_read_u32(np, "st,touch-det-delay", &val))
+ ts->touch_det_delay = val;
+ if (!of_property_read_u32(np, "st,settling", &val))
+ ts->settling = val;
+ if (!of_property_read_u32(np, "st,fraction-z", &val))
+ ts->fraction_z = val;
+ if (!of_property_read_u32(np, "st,i-drive", &val))
+ ts->i_drive = val;
}
+}
+
+static int stmpe_input_probe(struct platform_device *pdev)
+{
+ struct stmpe_touch *ts;
+ struct input_dev *idev;
+ int error;
+ int ts_irq;
+
+ ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
+ if (ts_irq < 0)
+ return ts_irq;
+
+ ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ts);
+ ts->idev = idev;
+ ts->dev = &pdev->dev;
+
+ stmpe_ts_get_platform_info(pdev, ts);
INIT_DELAYED_WORK(&ts->work, stmpe_work);
- ret = request_threaded_irq(ts_irq, NULL, stmpe_ts_handler,
- IRQF_ONESHOT, STMPE_TS_NAME, ts);
- if (ret) {
+ error = devm_request_threaded_irq(&pdev->dev, ts_irq,
+ NULL, stmpe_ts_handler,
+ IRQF_ONESHOT, STMPE_TS_NAME, ts);
+ if (error) {
dev_err(&pdev->dev, "Failed to request IRQ %d\n", ts_irq);
- goto err_free_input;
+ return error;
}
- ret = stmpe_init_hw(ts);
- if (ret)
- goto err_free_irq;
+ error = stmpe_init_hw(ts);
+ if (error)
+ return error;
idev->name = STMPE_TS_NAME;
+ idev->phys = STMPE_TS_NAME"/input0";
idev->id.bustype = BUS_I2C;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
@@ -334,40 +362,21 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
input_set_abs_params(idev, ABS_Y, 0, XY_MASK, 0, 0);
input_set_abs_params(idev, ABS_PRESSURE, 0x0, 0xff, 0, 0);
- ret = input_register_device(idev);
- if (ret) {
+ error = input_register_device(idev);
+ if (error) {
dev_err(&pdev->dev, "Could not register input device\n");
- goto err_free_irq;
+ return error;
}
- return ret;
-
-err_free_irq:
- free_irq(ts_irq, ts);
-err_free_input:
- input_free_device(idev);
- platform_set_drvdata(pdev, NULL);
-err_free_ts:
- kfree(ts);
-err_out:
- return ret;
+ return 0;
}
-static int __devexit stmpe_ts_remove(struct platform_device *pdev)
+static int stmpe_ts_remove(struct platform_device *pdev)
{
struct stmpe_touch *ts = platform_get_drvdata(pdev);
- unsigned int ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
stmpe_disable(ts->stmpe, STMPE_BLOCK_TOUCHSCREEN);
- free_irq(ts_irq, ts);
-
- platform_set_drvdata(pdev, NULL);
-
- input_unregister_device(ts->idev);
-
- kfree(ts);
-
return 0;
}
@@ -377,7 +386,7 @@ static struct platform_driver stmpe_ts_driver = {
.owner = THIS_MODULE,
},
.probe = stmpe_input_probe,
- .remove = __devexit_p(stmpe_ts_remove),
+ .remove = stmpe_ts_remove,
};
module_platform_driver(stmpe_ts_driver);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
new file mode 100644
index 00000000000..51e7b87827a
--- /dev/null
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -0,0 +1,398 @@
+/*
+ * TI Touch Screen driver
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+
+#define ADCFSM_STEPID 0x10
+#define SEQ_SETTLE 275
+#define MAX_12BIT ((1 << 12) - 1)
+
+struct titsc {
+ struct input_dev *input;
+ struct ti_tscadc_dev *mfd_tscadc;
+ unsigned int irq;
+ unsigned int wires;
+ unsigned int x_plate_resistance;
+ bool pen_down;
+ int steps_to_configure;
+};
+
+static unsigned int titsc_readl(struct titsc *ts, unsigned int reg)
+{
+ return readl(ts->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_writel(struct titsc *tsc, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, tsc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_step_config(struct titsc *ts_dev)
+{
+ unsigned int config;
+ int i, total_steps;
+
+ /* Configure the Step registers */
+ total_steps = 2 * ts_dev->steps_to_configure;
+
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_XPP;
+ switch (ts_dev->wires) {
+ case 4:
+ config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+ break;
+ case 5:
+ config |= STEPCONFIG_YNN |
+ STEPCONFIG_INP_AN4 | STEPCONFIG_XNN |
+ STEPCONFIG_YPP;
+ break;
+ case 8:
+ config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+ break;
+ }
+
+ for (i = 1; i <= ts_dev->steps_to_configure; i++) {
+ titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ }
+
+ config = 0;
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_YNN |
+ STEPCONFIG_INM_ADCREFM | STEPCONFIG_FIFO1;
+ switch (ts_dev->wires) {
+ case 4:
+ config |= STEPCONFIG_YPP;
+ break;
+ case 5:
+ config |= STEPCONFIG_XPP | STEPCONFIG_INP_AN4 |
+ STEPCONFIG_XNP | STEPCONFIG_YPN;
+ break;
+ case 8:
+ config |= STEPCONFIG_YPP;
+ break;
+ }
+
+ for (i = (ts_dev->steps_to_configure + 1); i <= total_steps; i++) {
+ titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ }
+
+ config = 0;
+ /* Charge step configuration */
+ config = STEPCONFIG_XPP | STEPCONFIG_YNN |
+ STEPCHARGE_RFP_XPUL | STEPCHARGE_RFM_XNUR |
+ STEPCHARGE_INM_AN1 | STEPCHARGE_INP_AN1;
+
+ titsc_writel(ts_dev, REG_CHARGECONFIG, config);
+ titsc_writel(ts_dev, REG_CHARGEDELAY, CHARGEDLY_OPENDLY);
+
+ config = 0;
+ /* Configure to calculate pressure */
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_YPP |
+ STEPCONFIG_XNN | STEPCONFIG_INM_ADCREFM;
+ titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 1), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 1),
+ STEPCONFIG_OPENDLY);
+
+ config |= STEPCONFIG_INP_AN3 | STEPCONFIG_FIFO1;
+ titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 2), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 2),
+ STEPCONFIG_OPENDLY);
+
+ titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+}
+
+static void titsc_read_coordinates(struct titsc *ts_dev,
+ unsigned int *x, unsigned int *y)
+{
+ unsigned int fifocount = titsc_readl(ts_dev, REG_FIFO0CNT);
+ unsigned int prev_val_x = ~0, prev_val_y = ~0;
+ unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
+ unsigned int read, diff;
+ unsigned int i, channel;
+
+ /*
+ * Delta filter is used to remove large variations in sampled
+ * values from ADC. The filter tries to predict where the next
+ * coordinate could be. This is done by taking a previous
+ * coordinate and subtracting it form current one. Further the
+ * algorithm compares the difference with that of a present value,
+ * if true the value is reported to the sub system.
+ */
+ for (i = 0; i < fifocount - 1; i++) {
+ read = titsc_readl(ts_dev, REG_FIFO0);
+ channel = read & 0xf0000;
+ channel = channel >> 0x10;
+ if ((channel >= 0) && (channel < ts_dev->steps_to_configure)) {
+ read &= 0xfff;
+ diff = abs(read - prev_val_x);
+ if (diff < prev_diff_x) {
+ prev_diff_x = diff;
+ *x = read;
+ }
+ prev_val_x = read;
+ }
+
+ read = titsc_readl(ts_dev, REG_FIFO1);
+ channel = read & 0xf0000;
+ channel = channel >> 0x10;
+ if ((channel >= ts_dev->steps_to_configure) &&
+ (channel < (2 * ts_dev->steps_to_configure - 1))) {
+ read &= 0xfff;
+ diff = abs(read - prev_val_y);
+ if (diff < prev_diff_y) {
+ prev_diff_y = diff;
+ *y = read;
+ }
+ prev_val_y = read;
+ }
+ }
+}
+
+static irqreturn_t titsc_irq(int irq, void *dev)
+{
+ struct titsc *ts_dev = dev;
+ struct input_dev *input_dev = ts_dev->input;
+ unsigned int status, irqclr = 0;
+ unsigned int x = 0, y = 0;
+ unsigned int z1, z2, z;
+ unsigned int fsm;
+ unsigned int fifo1count, fifo0count;
+ int i;
+
+ status = titsc_readl(ts_dev, REG_IRQSTATUS);
+ if (status & IRQENB_FIFO0THRES) {
+ titsc_read_coordinates(ts_dev, &x, &y);
+
+ z1 = titsc_readl(ts_dev, REG_FIFO0) & 0xfff;
+ z2 = titsc_readl(ts_dev, REG_FIFO1) & 0xfff;
+
+ fifo1count = titsc_readl(ts_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++)
+ titsc_readl(ts_dev, REG_FIFO1);
+
+ fifo0count = titsc_readl(ts_dev, REG_FIFO0CNT);
+ for (i = 0; i < fifo0count; i++)
+ titsc_readl(ts_dev, REG_FIFO0);
+
+ if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
+ /*
+ * Calculate pressure using formula
+ * Resistance(touch) = x plate resistance *
+ * x postion/4096 * ((z2 / z1) - 1)
+ */
+ z = z2 - z1;
+ z *= x;
+ z *= ts_dev->x_plate_resistance;
+ z /= z1;
+ z = (z + 2047) >> 12;
+
+ if (z <= MAX_12BIT) {
+ input_report_abs(input_dev, ABS_X, x);
+ input_report_abs(input_dev, ABS_Y, y);
+ input_report_abs(input_dev, ABS_PRESSURE, z);
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_sync(input_dev);
+ }
+ }
+ irqclr |= IRQENB_FIFO0THRES;
+ }
+
+ /*
+ * Time for sequencer to settle, to read
+ * correct state of the sequencer.
+ */
+ udelay(SEQ_SETTLE);
+
+ status = titsc_readl(ts_dev, REG_RAWIRQSTATUS);
+ if (status & IRQENB_PENUP) {
+ /* Pen up event */
+ fsm = titsc_readl(ts_dev, REG_ADCFSM);
+ if (fsm == ADCFSM_STEPID) {
+ ts_dev->pen_down = false;
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ } else {
+ ts_dev->pen_down = true;
+ }
+ irqclr |= IRQENB_PENUP;
+ }
+
+ titsc_writel(ts_dev, REG_IRQSTATUS, irqclr);
+
+ titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The functions for inserting/removing driver as a module.
+ */
+
+static int titsc_probe(struct platform_device *pdev)
+{
+ struct titsc *ts_dev;
+ struct input_dev *input_dev;
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct mfd_tscadc_board *pdata;
+ int err;
+
+ pdata = tscadc_dev->dev->platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for device */
+ ts_dev = kzalloc(sizeof(struct titsc), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ts_dev || !input_dev) {
+ dev_err(&pdev->dev, "failed to allocate memory.\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tscadc_dev->tsc = ts_dev;
+ ts_dev->mfd_tscadc = tscadc_dev;
+ ts_dev->input = input_dev;
+ ts_dev->irq = tscadc_dev->irq;
+ ts_dev->wires = pdata->tsc_init->wires;
+ ts_dev->x_plate_resistance = pdata->tsc_init->x_plate_resistance;
+ ts_dev->steps_to_configure = pdata->tsc_init->steps_to_configure;
+
+ err = request_irq(ts_dev->irq, titsc_irq,
+ 0, pdev->dev.driver->name, ts_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to allocate irq.\n");
+ goto err_free_mem;
+ }
+
+ titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
+ titsc_step_config(ts_dev);
+ titsc_writel(ts_dev, REG_FIFO0THR, ts_dev->steps_to_configure);
+
+ input_dev->name = "ti-tsc";
+ input_dev->dev.parent = &pdev->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
+
+ /* register to the input system */
+ err = input_register_device(input_dev);
+ if (err)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, ts_dev);
+ return 0;
+
+err_free_irq:
+ free_irq(ts_dev->irq, ts_dev);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(ts_dev);
+ return err;
+}
+
+static int titsc_remove(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+
+ free_irq(ts_dev->irq, ts_dev);
+
+ input_unregister_device(ts_dev->input);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ts_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int titsc_suspend(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+ unsigned int idle;
+
+ if (device_may_wakeup(tscadc_dev->dev)) {
+ idle = titsc_readl(ts_dev, REG_IRQENABLE);
+ titsc_writel(ts_dev, REG_IRQENABLE,
+ (idle | IRQENB_HW_PEN));
+ titsc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
+ }
+ return 0;
+}
+
+static int titsc_resume(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+
+ if (device_may_wakeup(tscadc_dev->dev)) {
+ titsc_writel(ts_dev, REG_IRQWAKEUP,
+ 0x00);
+ titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
+ }
+ titsc_step_config(ts_dev);
+ titsc_writel(ts_dev, REG_FIFO0THR,
+ ts_dev->steps_to_configure);
+ return 0;
+}
+
+static const struct dev_pm_ops titsc_pm_ops = {
+ .suspend = titsc_suspend,
+ .resume = titsc_resume,
+};
+#define TITSC_PM_OPS (&titsc_pm_ops)
+#else
+#define TITSC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tsc_driver = {
+ .probe = titsc_probe,
+ .remove = titsc_remove,
+ .driver = {
+ .name = "tsc",
+ .owner = THIS_MODULE,
+ .pm = TITSC_PM_OPS,
+ },
+};
+module_platform_driver(ti_tsc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ti_tscadc.c b/drivers/input/touchscreen/ti_tscadc.c
deleted file mode 100644
index d229c741d54..00000000000
--- a/drivers/input/touchscreen/ti_tscadc.c
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * TI Touch Screen driver
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/input/ti_tscadc.h>
-#include <linux/delay.h>
-
-#define REG_IRQEOI 0x020
-#define REG_RAWIRQSTATUS 0x024
-#define REG_IRQSTATUS 0x028
-#define REG_IRQENABLE 0x02C
-#define REG_IRQWAKEUP 0x034
-#define REG_CTRL 0x040
-#define REG_ADCFSM 0x044
-#define REG_CLKDIV 0x04C
-#define REG_SE 0x054
-#define REG_IDLECONFIG 0x058
-#define REG_CHARGECONFIG 0x05C
-#define REG_CHARGEDELAY 0x060
-#define REG_STEPCONFIG(n) (0x64 + ((n - 1) * 8))
-#define REG_STEPDELAY(n) (0x68 + ((n - 1) * 8))
-#define REG_STEPCONFIG13 0x0C4
-#define REG_STEPDELAY13 0x0C8
-#define REG_STEPCONFIG14 0x0CC
-#define REG_STEPDELAY14 0x0D0
-#define REG_FIFO0CNT 0xE4
-#define REG_FIFO1THR 0xF4
-#define REG_FIFO0 0x100
-#define REG_FIFO1 0x200
-
-/* Register Bitfields */
-#define IRQWKUP_ENB BIT(0)
-#define STPENB_STEPENB 0x7FFF
-#define IRQENB_FIFO1THRES BIT(5)
-#define IRQENB_PENUP BIT(9)
-#define STEPCONFIG_MODE_HWSYNC 0x2
-#define STEPCONFIG_SAMPLES_AVG (1 << 4)
-#define STEPCONFIG_XPP (1 << 5)
-#define STEPCONFIG_XNN (1 << 6)
-#define STEPCONFIG_YPP (1 << 7)
-#define STEPCONFIG_YNN (1 << 8)
-#define STEPCONFIG_XNP (1 << 9)
-#define STEPCONFIG_YPN (1 << 10)
-#define STEPCONFIG_INM (1 << 18)
-#define STEPCONFIG_INP (1 << 20)
-#define STEPCONFIG_INP_5 (1 << 21)
-#define STEPCONFIG_FIFO1 (1 << 26)
-#define STEPCONFIG_OPENDLY 0xff
-#define STEPCONFIG_Z1 (3 << 19)
-#define STEPIDLE_INP (1 << 22)
-#define STEPCHARGE_RFP (1 << 12)
-#define STEPCHARGE_INM (1 << 15)
-#define STEPCHARGE_INP (1 << 19)
-#define STEPCHARGE_RFM (1 << 23)
-#define STEPCHARGE_DELAY 0x1
-#define CNTRLREG_TSCSSENB (1 << 0)
-#define CNTRLREG_STEPID (1 << 1)
-#define CNTRLREG_STEPCONFIGWRT (1 << 2)
-#define CNTRLREG_4WIRE (1 << 5)
-#define CNTRLREG_5WIRE (1 << 6)
-#define CNTRLREG_8WIRE (3 << 5)
-#define CNTRLREG_TSCENB (1 << 7)
-#define ADCFSM_STEPID 0x10
-
-#define SEQ_SETTLE 275
-#define ADC_CLK 3000000
-#define MAX_12BIT ((1 << 12) - 1)
-#define TSCADC_DELTA_X 15
-#define TSCADC_DELTA_Y 15
-
-struct tscadc {
- struct input_dev *input;
- struct clk *tsc_ick;
- void __iomem *tsc_base;
- unsigned int irq;
- unsigned int wires;
- unsigned int x_plate_resistance;
- bool pen_down;
-};
-
-static unsigned int tscadc_readl(struct tscadc *ts, unsigned int reg)
-{
- return readl(ts->tsc_base + reg);
-}
-
-static void tscadc_writel(struct tscadc *tsc, unsigned int reg,
- unsigned int val)
-{
- writel(val, tsc->tsc_base + reg);
-}
-
-static void tscadc_step_config(struct tscadc *ts_dev)
-{
- unsigned int config;
- int i;
-
- /* Configure the Step registers */
-
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_XPP;
- switch (ts_dev->wires) {
- case 4:
- config |= STEPCONFIG_INP | STEPCONFIG_XNN;
- break;
- case 5:
- config |= STEPCONFIG_YNN |
- STEPCONFIG_INP_5 | STEPCONFIG_XNN |
- STEPCONFIG_YPP;
- break;
- case 8:
- config |= STEPCONFIG_INP | STEPCONFIG_XNN;
- break;
- }
-
- for (i = 1; i < 7; i++) {
- tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
- tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
- }
-
- config = 0;
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YNN |
- STEPCONFIG_INM | STEPCONFIG_FIFO1;
- switch (ts_dev->wires) {
- case 4:
- config |= STEPCONFIG_YPP;
- break;
- case 5:
- config |= STEPCONFIG_XPP | STEPCONFIG_INP_5 |
- STEPCONFIG_XNP | STEPCONFIG_YPN;
- break;
- case 8:
- config |= STEPCONFIG_YPP;
- break;
- }
-
- for (i = 7; i < 13; i++) {
- tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
- tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
- }
-
- config = 0;
- /* Charge step configuration */
- config = STEPCONFIG_XPP | STEPCONFIG_YNN |
- STEPCHARGE_RFP | STEPCHARGE_RFM |
- STEPCHARGE_INM | STEPCHARGE_INP;
-
- tscadc_writel(ts_dev, REG_CHARGECONFIG, config);
- tscadc_writel(ts_dev, REG_CHARGEDELAY, STEPCHARGE_DELAY);
-
- config = 0;
- /* Configure to calculate pressure */
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YPP |
- STEPCONFIG_XNN | STEPCONFIG_INM;
- tscadc_writel(ts_dev, REG_STEPCONFIG13, config);
- tscadc_writel(ts_dev, REG_STEPDELAY13, STEPCONFIG_OPENDLY);
-
- config |= STEPCONFIG_Z1 | STEPCONFIG_FIFO1;
- tscadc_writel(ts_dev, REG_STEPCONFIG14, config);
- tscadc_writel(ts_dev, REG_STEPDELAY14, STEPCONFIG_OPENDLY);
-
- tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
-}
-
-static void tscadc_idle_config(struct tscadc *ts_config)
-{
- unsigned int idleconfig;
-
- idleconfig = STEPCONFIG_YNN |
- STEPCONFIG_INM |
- STEPCONFIG_YPN | STEPIDLE_INP;
- tscadc_writel(ts_config, REG_IDLECONFIG, idleconfig);
-}
-
-static void tscadc_read_coordinates(struct tscadc *ts_dev,
- unsigned int *x, unsigned int *y)
-{
- unsigned int fifocount = tscadc_readl(ts_dev, REG_FIFO0CNT);
- unsigned int prev_val_x = ~0, prev_val_y = ~0;
- unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
- unsigned int read, diff;
- unsigned int i;
-
- /*
- * Delta filter is used to remove large variations in sampled
- * values from ADC. The filter tries to predict where the next
- * coordinate could be. This is done by taking a previous
- * coordinate and subtracting it form current one. Further the
- * algorithm compares the difference with that of a present value,
- * if true the value is reported to the sub system.
- */
- for (i = 0; i < fifocount - 1; i++) {
- read = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
- diff = abs(read - prev_val_x);
- if (diff < prev_diff_x) {
- prev_diff_x = diff;
- *x = read;
- }
- prev_val_x = read;
-
- read = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
- diff = abs(read - prev_val_y);
- if (diff < prev_diff_y) {
- prev_diff_y = diff;
- *y = read;
- }
- prev_val_y = read;
- }
-}
-
-static irqreturn_t tscadc_irq(int irq, void *dev)
-{
- struct tscadc *ts_dev = dev;
- struct input_dev *input_dev = ts_dev->input;
- unsigned int status, irqclr = 0;
- unsigned int x = 0, y = 0;
- unsigned int z1, z2, z;
- unsigned int fsm;
-
- status = tscadc_readl(ts_dev, REG_IRQSTATUS);
- if (status & IRQENB_FIFO1THRES) {
- tscadc_read_coordinates(ts_dev, &x, &y);
-
- z1 = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
- z2 = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
-
- if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
- /*
- * Calculate pressure using formula
- * Resistance(touch) = x plate resistance *
- * x postion/4096 * ((z2 / z1) - 1)
- */
- z = z2 - z1;
- z *= x;
- z *= ts_dev->x_plate_resistance;
- z /= z1;
- z = (z + 2047) >> 12;
-
- if (z <= MAX_12BIT) {
- input_report_abs(input_dev, ABS_X, x);
- input_report_abs(input_dev, ABS_Y, y);
- input_report_abs(input_dev, ABS_PRESSURE, z);
- input_report_key(input_dev, BTN_TOUCH, 1);
- input_sync(input_dev);
- }
- }
- irqclr |= IRQENB_FIFO1THRES;
- }
-
- /*
- * Time for sequencer to settle, to read
- * correct state of the sequencer.
- */
- udelay(SEQ_SETTLE);
-
- status = tscadc_readl(ts_dev, REG_RAWIRQSTATUS);
- if (status & IRQENB_PENUP) {
- /* Pen up event */
- fsm = tscadc_readl(ts_dev, REG_ADCFSM);
- if (fsm == ADCFSM_STEPID) {
- ts_dev->pen_down = false;
- input_report_key(input_dev, BTN_TOUCH, 0);
- input_report_abs(input_dev, ABS_PRESSURE, 0);
- input_sync(input_dev);
- } else {
- ts_dev->pen_down = true;
- }
- irqclr |= IRQENB_PENUP;
- }
-
- tscadc_writel(ts_dev, REG_IRQSTATUS, irqclr);
- /* check pending interrupts */
- tscadc_writel(ts_dev, REG_IRQEOI, 0x0);
-
- tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
- return IRQ_HANDLED;
-}
-
-/*
- * The functions for inserting/removing driver as a module.
- */
-
-static int __devinit tscadc_probe(struct platform_device *pdev)
-{
- const struct tsc_data *pdata = pdev->dev.platform_data;
- struct resource *res;
- struct tscadc *ts_dev;
- struct input_dev *input_dev;
- struct clk *clk;
- int err;
- int clk_value, ctrl, irq;
-
- if (!pdata) {
- dev_err(&pdev->dev, "missing platform data.\n");
- return -EINVAL;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no memory resource defined.\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq ID is specified.\n");
- return -EINVAL;
- }
-
- /* Allocate memory for device */
- ts_dev = kzalloc(sizeof(struct tscadc), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts_dev || !input_dev) {
- dev_err(&pdev->dev, "failed to allocate memory.\n");
- err = -ENOMEM;
- goto err_free_mem;
- }
-
- ts_dev->input = input_dev;
- ts_dev->irq = irq;
- ts_dev->wires = pdata->wires;
- ts_dev->x_plate_resistance = pdata->x_plate_resistance;
-
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "failed to reserve registers.\n");
- err = -EBUSY;
- goto err_free_mem;
- }
-
- ts_dev->tsc_base = ioremap(res->start, resource_size(res));
- if (!ts_dev->tsc_base) {
- dev_err(&pdev->dev, "failed to map registers.\n");
- err = -ENOMEM;
- goto err_release_mem_region;
- }
-
- err = request_irq(ts_dev->irq, tscadc_irq,
- 0, pdev->dev.driver->name, ts_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to allocate irq.\n");
- goto err_unmap_regs;
- }
-
- ts_dev->tsc_ick = clk_get(&pdev->dev, "adc_tsc_ick");
- if (IS_ERR(ts_dev->tsc_ick)) {
- dev_err(&pdev->dev, "failed to get TSC ick\n");
- goto err_free_irq;
- }
- clk_enable(ts_dev->tsc_ick);
-
- clk = clk_get(&pdev->dev, "adc_tsc_fck");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get TSC fck\n");
- err = PTR_ERR(clk);
- goto err_disable_clk;
- }
-
- clk_value = clk_get_rate(clk) / ADC_CLK;
- clk_put(clk);
-
- if (clk_value < 7) {
- dev_err(&pdev->dev, "clock input less than min clock requirement\n");
- goto err_disable_clk;
- }
- /* CLKDIV needs to be configured to the value minus 1 */
- tscadc_writel(ts_dev, REG_CLKDIV, clk_value - 1);
-
- /* Enable wake-up of the SoC using touchscreen */
- tscadc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
-
- ctrl = CNTRLREG_STEPCONFIGWRT |
- CNTRLREG_TSCENB |
- CNTRLREG_STEPID;
- switch (ts_dev->wires) {
- case 4:
- ctrl |= CNTRLREG_4WIRE;
- break;
- case 5:
- ctrl |= CNTRLREG_5WIRE;
- break;
- case 8:
- ctrl |= CNTRLREG_8WIRE;
- break;
- }
- tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
- tscadc_idle_config(ts_dev);
- tscadc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO1THRES);
- tscadc_step_config(ts_dev);
- tscadc_writel(ts_dev, REG_FIFO1THR, 6);
-
- ctrl |= CNTRLREG_TSCSSENB;
- tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
- input_dev->name = "ti-tsc-adc";
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
-
- /* register to the input system */
- err = input_register_device(input_dev);
- if (err)
- goto err_disable_clk;
-
- platform_set_drvdata(pdev, ts_dev);
- return 0;
-
-err_disable_clk:
- clk_disable(ts_dev->tsc_ick);
- clk_put(ts_dev->tsc_ick);
-err_free_irq:
- free_irq(ts_dev->irq, ts_dev);
-err_unmap_regs:
- iounmap(ts_dev->tsc_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts_dev);
- return err;
-}
-
-static int __devexit tscadc_remove(struct platform_device *pdev)
-{
- struct tscadc *ts_dev = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(ts_dev->irq, ts_dev);
-
- input_unregister_device(ts_dev->input);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(ts_dev->tsc_base);
- release_mem_region(res->start, resource_size(res));
-
- clk_disable(ts_dev->tsc_ick);
- clk_put(ts_dev->tsc_ick);
-
- kfree(ts_dev);
-
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-static struct platform_driver ti_tsc_driver = {
- .probe = tscadc_probe,
- .remove = __devexit_p(tscadc_remove),
- .driver = {
- .name = "tsc",
- .owner = THIS_MODULE,
- },
-};
-module_platform_driver(ti_tsc_driver);
-
-MODULE_DESCRIPTION("TI touchscreen controller driver");
-MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 368d2c6cf78..acfb87607b8 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -243,7 +243,7 @@ static void tsc_stop(struct input_dev *dev)
clk_disable(ts->clk);
}
-static int __devinit tsc_probe(struct platform_device *pdev)
+static int tsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tsc_data *ts;
@@ -357,7 +357,7 @@ error_res:
return error;
}
-static int __devexit tsc_remove(struct platform_device *pdev)
+static int tsc_remove(struct platform_device *pdev)
{
struct tsc_data *ts = platform_get_drvdata(pdev);
@@ -374,7 +374,7 @@ static int __devexit tsc_remove(struct platform_device *pdev)
static struct platform_driver tsc_driver = {
.probe = tsc_probe,
- .remove = __devexit_p(tsc_remove),
+ .remove = tsc_remove,
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index f7eda3d00fa..820a066c3b8 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -345,7 +345,7 @@ err0:
return error;
}
-static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
+static int tps6507x_ts_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
struct tps6507x_ts *tsc = tps6507x_dev->ts;
@@ -367,7 +367,7 @@ static struct platform_driver tps6507x_ts_driver = {
.owner = THIS_MODULE,
},
.probe = tps6507x_ts_probe,
- .remove = __devexit_p(tps6507x_ts_remove),
+ .remove = tps6507x_ts_remove,
};
module_platform_driver(tps6507x_ts_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 5ce3fa8ce64..9c0cdc7ea44 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -555,7 +555,7 @@ static void tsc2005_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
+static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
{
tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
@@ -569,7 +569,7 @@ static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
}
-static int __devinit tsc2005_probe(struct spi_device *spi)
+static int tsc2005_probe(struct spi_device *spi)
{
const struct tsc2005_platform_data *pdata = spi->dev.platform_data;
struct tsc2005 *ts;
@@ -686,7 +686,7 @@ err_free_mem:
return error;
}
-static int __devexit tsc2005_remove(struct spi_device *spi)
+static int tsc2005_remove(struct spi_device *spi)
{
struct tsc2005 *ts = spi_get_drvdata(spi);
@@ -745,7 +745,7 @@ static struct spi_driver tsc2005_driver = {
.pm = &tsc2005_pm_ops,
},
.probe = tsc2005_probe,
- .remove = __devexit_p(tsc2005_remove),
+ .remove = tsc2005_remove,
};
module_spi_driver(tsc2005_driver);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 1473d2382af..0b67ba476b4 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -273,7 +273,7 @@ static void tsc2007_close(struct input_dev *input_dev)
tsc2007_stop(ts);
}
-static int __devinit tsc2007_probe(struct i2c_client *client,
+static int tsc2007_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tsc2007 *ts;
@@ -366,7 +366,7 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
return err;
}
-static int __devexit tsc2007_remove(struct i2c_client *client)
+static int tsc2007_remove(struct i2c_client *client)
{
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
@@ -396,7 +396,7 @@ static struct i2c_driver tsc2007_driver = {
},
.id_table = tsc2007_idtable,
.probe = tsc2007_probe,
- .remove = __devexit_p(tsc2007_remove),
+ .remove = tsc2007_remove,
};
module_i2c_driver(tsc2007_driver);
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 46e83ad53f4..1271f97b407 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -274,7 +274,7 @@ static void ucb1400_ts_close(struct input_dev *idev)
* Try to probe our interrupt, rather than relying on lots of
* hard-coded machine dependencies.
*/
-static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
+static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
struct platform_device *pdev)
{
unsigned long mask, timeout;
@@ -318,7 +318,7 @@ static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
return 0;
}
-static int __devinit ucb1400_ts_probe(struct platform_device *pdev)
+static int ucb1400_ts_probe(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
int error, x_res, y_res;
@@ -397,7 +397,7 @@ err:
return error;
}
-static int __devexit ucb1400_ts_remove(struct platform_device *pdev)
+static int ucb1400_ts_remove(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
@@ -442,7 +442,7 @@ static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
static struct platform_driver ucb1400_ts_driver = {
.probe = ucb1400_ts_probe,
- .remove = __devexit_p(ucb1400_ts_remove),
+ .remove = ucb1400_ts_remove,
.driver = {
.name = "ucb1400_ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 9396b21d0e8..d2ef8f05c66 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -215,7 +215,7 @@ static void w90p910_close(struct input_dev *dev)
clk_disable(w90p910_ts->clk);
}
-static int __devinit w90x900ts_probe(struct platform_device *pdev)
+static int w90x900ts_probe(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts;
struct input_dev *input_dev;
@@ -301,7 +301,7 @@ fail1: input_free_device(input_dev);
return err;
}
-static int __devexit w90x900ts_remove(struct platform_device *pdev)
+static int w90x900ts_remove(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts = platform_get_drvdata(pdev);
struct resource *res;
@@ -325,7 +325,7 @@ static int __devexit w90x900ts_remove(struct platform_device *pdev)
static struct platform_driver w90x900ts_driver = {
.probe = w90x900ts_probe,
- .remove = __devexit_p(w90x900ts_remove),
+ .remove = w90x900ts_remove,
.driver = {
.name = "nuc900-ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 0c01657132f..bf0d07620ba 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -144,7 +144,7 @@ static void wacom_i2c_close(struct input_dev *dev)
disable_irq(client->irq);
}
-static int __devinit wacom_i2c_probe(struct i2c_client *client,
+static int wacom_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wacom_i2c *wac_i2c;
@@ -225,7 +225,7 @@ err_free_mem:
return error;
}
-static int __devexit wacom_i2c_remove(struct i2c_client *client)
+static int wacom_i2c_remove(struct i2c_client *client)
{
struct wacom_i2c *wac_i2c = i2c_get_clientdata(client);
@@ -272,7 +272,7 @@ static struct i2c_driver wacom_i2c_driver = {
},
.probe = wacom_i2c_probe,
- .remove = __devexit_p(wacom_i2c_remove),
+ .remove = wacom_i2c_remove,
.id_table = wacom_i2c_id,
};
module_i2c_driver(wacom_i2c_driver);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 52abb98a8ae..f88fab56178 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -233,7 +233,7 @@ static void wm831x_ts_input_close(struct input_dev *idev)
}
}
-static __devinit int wm831x_ts_probe(struct platform_device *pdev)
+static int wm831x_ts_probe(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts;
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -245,7 +245,8 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
if (core_pdata)
pdata = core_pdata->touch;
- wm831x_ts = kzalloc(sizeof(struct wm831x_ts), GFP_KERNEL);
+ wm831x_ts = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ts),
+ GFP_KERNEL);
input_dev = input_allocate_device();
if (!wm831x_ts || !input_dev) {
error = -ENOMEM;
@@ -376,21 +377,18 @@ err_data_irq:
free_irq(wm831x_ts->data_irq, wm831x_ts);
err_alloc:
input_free_device(input_dev);
- kfree(wm831x_ts);
return error;
}
-static __devexit int wm831x_ts_remove(struct platform_device *pdev)
+static int wm831x_ts_remove(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
free_irq(wm831x_ts->pd_irq, wm831x_ts);
free_irq(wm831x_ts->data_irq, wm831x_ts);
input_unregister_device(wm831x_ts->input_dev);
- kfree(wm831x_ts);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -400,7 +398,7 @@ static struct platform_driver wm831x_ts_driver = {
.owner = THIS_MODULE,
},
.probe = wm831x_ts_probe,
- .remove = __devexit_p(wm831x_ts_remove),
+ .remove = wm831x_ts_remove,
};
module_platform_driver(wm831x_ts_driver);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 55074cba20e..c1c74e030a5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -57,17 +57,9 @@
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
- * Traditionally the IOMMU core just handed us the mappings directly,
- * after making sure the size is an order of a 4KiB page and that the
- * mapping has natural alignment.
- *
- * To retain this behavior, we currently advertise that we support
- * all page sizes that are an order of 4KiB.
- *
- * If at some point we'd like to utilize the IOMMU core's new behavior,
- * we could change this to advertise the real page sizes we support.
+ * 512GB Pages are not supported due to a hardware bug
*/
-#define AMD_IOMMU_PGSIZES (~0xFFFUL)
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
@@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
list_del(&dev_data->dev_data_list);
spin_unlock_irqrestore(&dev_data_list_lock, flags);
+ if (dev_data->group)
+ iommu_group_put(dev_data->group);
+
kfree(dev_data);
}
@@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
*from = to;
}
-#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
-
-static int iommu_init_device(struct device *dev)
+static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
{
- struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
- struct iommu_dev_data *dev_data;
- struct iommu_group *group;
- u16 alias;
- int ret;
-
- if (dev->archdata.iommu)
- return 0;
-
- dev_data = find_dev_data(get_device_id(dev));
- if (!dev_data)
- return -ENOMEM;
-
- alias = amd_iommu_alias_table[dev_data->devid];
- if (alias != dev_data->devid) {
- struct iommu_dev_data *alias_data;
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ return ERR_PTR(-ENODEV);
+ }
- alias_data = find_dev_data(alias);
- if (alias_data == NULL) {
- pr_err("AMD-Vi: Warning: Unhandled device %s\n",
- dev_name(dev));
- free_dev_data(dev_data);
- return -ENOTSUPP;
- }
- dev_data->alias_data = alias_data;
+ return bus;
+}
- dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
- }
+#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
- if (dma_pdev == NULL)
- dma_pdev = pci_dev_get(pdev);
+static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
+{
+ struct pci_dev *dma_pdev = pdev;
/* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
@@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev)
* Finding the next device may require skipping virtual buses.
*/
while (!pci_is_root_bus(dma_pdev->bus)) {
- struct pci_bus *bus = dma_pdev->bus;
-
- while (!bus->self) {
- if (!pci_is_root_bus(bus))
- bus = bus->parent;
- else
- goto root_bus;
- }
+ struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
+ if (IS_ERR(bus))
+ break;
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
@@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev)
swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
-root_bus:
- group = iommu_group_get(&dma_pdev->dev);
- pci_dev_put(dma_pdev);
+ return dma_pdev;
+}
+
+static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
+{
+ struct iommu_group *group = iommu_group_get(&pdev->dev);
+ int ret;
+
if (!group) {
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
+
+ WARN_ON(&pdev->dev != dev);
}
ret = iommu_group_add_device(group, dev);
-
iommu_group_put(group);
+ return ret;
+}
+
+static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
+ struct device *dev)
+{
+ if (!dev_data->group) {
+ struct iommu_group *group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ dev_data->group = group;
+ }
+
+ return iommu_group_add_device(dev_data->group, dev);
+}
+
+static int init_iommu_group(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_group *group;
+ struct pci_dev *dma_pdev;
+ int ret;
+
+ group = iommu_group_get(dev);
+ if (group) {
+ iommu_group_put(group);
+ return 0;
+ }
+
+ dev_data = find_dev_data(get_device_id(dev));
+ if (!dev_data)
+ return -ENOMEM;
+
+ if (dev_data->alias_data) {
+ u16 alias;
+ struct pci_bus *bus;
+
+ if (dev_data->alias_data->group)
+ goto use_group;
+
+ /*
+ * If the alias device exists, it's effectively just a first
+ * level quirk for finding the DMA source.
+ */
+ alias = amd_iommu_alias_table[dev_data->devid];
+ dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
+ if (dma_pdev) {
+ dma_pdev = get_isolation_root(dma_pdev);
+ goto use_pdev;
+ }
+
+ /*
+ * If the alias is virtual, try to find a parent device
+ * and test whether the IOMMU group is actualy rooted above
+ * the alias. Be careful to also test the parent device if
+ * we think the alias is the root of the group.
+ */
+ bus = pci_find_bus(0, alias >> 8);
+ if (!bus)
+ goto use_group;
+
+ bus = find_hosted_bus(bus);
+ if (IS_ERR(bus) || !bus->self)
+ goto use_group;
+
+ dma_pdev = get_isolation_root(pci_dev_get(bus->self));
+ if (dma_pdev != bus->self || (dma_pdev->multifunction &&
+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
+ goto use_pdev;
+
+ pci_dev_put(dma_pdev);
+ goto use_group;
+ }
+
+ dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
+use_pdev:
+ ret = use_pdev_iommu_group(dma_pdev, dev);
+ pci_dev_put(dma_pdev);
+ return ret;
+use_group:
+ return use_dev_data_iommu_group(dev_data->alias_data, dev);
+}
+
+static int iommu_init_device(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct iommu_dev_data *dev_data;
+ u16 alias;
+ int ret;
+
+ if (dev->archdata.iommu)
+ return 0;
+
+ dev_data = find_dev_data(get_device_id(dev));
+ if (!dev_data)
+ return -ENOMEM;
+
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ struct iommu_dev_data *alias_data;
+
+ alias_data = find_dev_data(alias);
+ if (alias_data == NULL) {
+ pr_err("AMD-Vi: Warning: Unhandled device %s\n",
+ dev_name(dev));
+ free_dev_data(dev_data);
+ return -ENOTSUPP;
+ }
+ dev_data->alias_data = alias_data;
+ }
+ ret = init_iommu_group(dev);
if (ret)
return ret;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9aa3d079ff..e38ab438bb3 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -426,6 +426,7 @@ struct iommu_dev_data {
struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reference count */
+ struct iommu_group *group; /* IOMMU group for virtual aliases */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0badfa48b32..c2c07a4a7f2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1827,10 +1827,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (!pte)
return -ENOMEM;
/* It is large page*/
- if (largepage_lvl > 1)
+ if (largepage_lvl > 1) {
pteval |= DMA_PTE_LARGE_PAGE;
- else
+ /* Ensure that old small page tables are removed to make room
+ for superpage, if they exist. */
+ dma_pte_clear_range(domain, iov_pfn,
+ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ dma_pte_free_pagetable(domain, iov_pfn,
+ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ } else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+ }
}
/* We don't need lock here, nobody else
@@ -2320,8 +2327,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
return 0;
}
+static bool device_has_rmrr(struct pci_dev *dev)
+{
+ struct dmar_rmrr_unit *rmrr;
+ int i;
+
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ /*
+ * Return TRUE if this RMRR contains the device that
+ * is passed in.
+ */
+ if (rmrr->devices[i] == dev)
+ return true;
+ }
+ }
+ return false;
+}
+
static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
{
+
+ /*
+ * We want to prevent any device associated with an RMRR from
+ * getting placed into the SI Domain. This is done because
+ * problems exist when devices are moved in and out of domains
+ * and their respective RMRR info is lost. We exempt USB devices
+ * from this process due to their usage of RMRRs that are known
+ * to not be needed after BIOS hand-off to OS.
+ */
+ if (device_has_rmrr(pdev) &&
+ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
+ return 0;
+
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return 1;
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index badc17c2bcb..18108c1405e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -16,13 +16,13 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/iommu.h>
#include <linux/omap-iommu.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <asm/cacheflush.h>
@@ -143,31 +143,44 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj)
{
int err;
+ struct platform_device *pdev = to_platform_device(obj->dev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!obj)
+ if (!obj || !pdata)
return -EINVAL;
if (!arch_iommu)
return -ENODEV;
- clk_enable(obj->clk);
+ if (pdata->deassert_reset) {
+ err = pdata->deassert_reset(pdev, pdata->reset_name);
+ if (err) {
+ dev_err(obj->dev, "deassert_reset failed: %d\n", err);
+ return err;
+ }
+ }
+
+ pm_runtime_get_sync(obj->dev);
err = arch_iommu->enable(obj);
- clk_disable(obj->clk);
return err;
}
static void iommu_disable(struct omap_iommu *obj)
{
- if (!obj)
- return;
+ struct platform_device *pdev = to_platform_device(obj->dev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
- clk_enable(obj->clk);
+ if (!obj || !pdata)
+ return;
arch_iommu->disable(obj);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
+
+ if (pdata->assert_reset)
+ pdata->assert_reset(pdev, pdata->reset_name);
}
/*
@@ -290,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
if (!obj || !obj->nr_tlb_entries || !e)
return -EINVAL;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &l);
if (l.base == obj->nr_tlb_entries) {
@@ -320,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
cr = iotlb_alloc_cr(obj, e);
if (IS_ERR(cr)) {
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return PTR_ERR(cr);
}
@@ -334,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
l.vict = l.base;
iotlb_lock_set(obj, &l);
out:
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return err;
}
@@ -364,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
int i;
struct cr_regs cr;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
u32 start;
@@ -383,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
if (i == obj->nr_tlb_entries)
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -397,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
{
struct iotlb_lock l;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
l.base = 0;
l.vict = 0;
@@ -405,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
iommu_write_reg(obj, 1, MMU_GFLUSH);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
}
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
@@ -415,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
if (!obj || !buf)
return -EINVAL;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
bytes = arch_iommu->dump_ctx(obj, buf, bytes);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return bytes;
}
@@ -433,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
struct cr_regs tmp;
struct cr_regs *p = crs;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &saved);
for_each_iotlb_cr(obj, num, i, tmp) {
@@ -443,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
}
iotlb_lock_set(obj, &saved);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return p - crs;
}
@@ -807,9 +820,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!obj->refcount)
return IRQ_NONE;
- clk_enable(obj->clk);
errs = iommu_report_fault(obj, &da);
- clk_disable(obj->clk);
if (errs == 0)
return IRQ_HANDLED;
@@ -931,17 +942,10 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (pdev->num_resources != 2)
- return -EINVAL;
-
obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- obj->clk = clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(obj->clk))
- goto err_clk;
-
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
obj->dev = &pdev->dev;
@@ -984,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
goto err_irq;
platform_set_drvdata(pdev, obj);
+ pm_runtime_irq_safe(obj->dev);
+ pm_runtime_enable(obj->dev);
+
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
@@ -992,8 +999,6 @@ err_irq:
err_ioremap:
release_mem_region(res->start, resource_size(res));
err_mem:
- clk_put(obj->clk);
-err_clk:
kfree(obj);
return err;
}
@@ -1014,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
iounmap(obj->regbase);
- clk_put(obj->clk);
+ pm_runtime_disable(obj->dev);
+
dev_info(&pdev->dev, "%s removed\n", obj->name);
kfree(obj);
return 0;
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 2b5f3c04d16..12008420660 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -29,7 +29,6 @@ struct iotlb_entry {
struct omap_iommu {
const char *name;
struct module *owner;
- struct clk *clk;
void __iomem *regbase;
struct device *dev;
void *isr_priv;
@@ -116,8 +115,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
* MMU Register offsets
*/
#define MMU_REVISION 0x00
-#define MMU_SYSCONFIG 0x10
-#define MMU_SYSSTATUS 0x14
#define MMU_IRQSTATUS 0x18
#define MMU_IRQENABLE 0x1c
#define MMU_WALKING_ST 0x40
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
index c0202029237..d745094a69d 100644
--- a/drivers/iommu/omap-iommu2.c
+++ b/drivers/iommu/omap-iommu2.c
@@ -28,19 +28,6 @@
*/
#define IOMMU_ARCH_VERSION 0x00000011
-/* SYSCONF */
-#define MMU_SYS_IDLE_SHIFT 3
-#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
-#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
-#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
-#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
-
-#define MMU_SYS_SOFTRESET (1 << 1)
-#define MMU_SYS_AUTOIDLE 1
-
-/* SYSSTATUS */
-#define MMU_SYS_RESETDONE 1
-
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
@@ -97,7 +84,6 @@ static void __iommu_set_twl(struct omap_iommu *obj, bool on)
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
- unsigned long timeout;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
@@ -106,29 +92,10 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
- iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
-
- timeout = jiffies + msecs_to_jiffies(20);
- do {
- l = iommu_read_reg(obj, MMU_SYSSTATUS);
- if (l & MMU_SYS_RESETDONE)
- break;
- } while (!time_after(jiffies, timeout));
-
- if (!(l & MMU_SYS_RESETDONE)) {
- dev_err(obj->dev, "can't take mmu out of reset\n");
- return -ENODEV;
- }
-
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
- l = iommu_read_reg(obj, MMU_SYSCONFIG);
- l &= ~MMU_SYS_IDLE_MASK;
- l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
- iommu_write_reg(obj, l, MMU_SYSCONFIG);
-
iommu_write_reg(obj, pa, MMU_TTB);
__iommu_set_twl(obj, true);
@@ -142,7 +109,6 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
- iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
@@ -271,8 +237,6 @@ omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
char *p = buf;
pr_reg(REVISION);
- pr_reg(SYSCONFIG);
- pr_reg(SYSSTATUS);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c16e8fc8a4b..4c9db62814f 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -398,6 +398,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
+ bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return 0;
fail:
@@ -450,7 +451,6 @@ static struct platform_driver tegra_gart_driver = {
static int __devinit tegra_gart_init(void)
{
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return platform_driver_register(&tegra_gart_driver);
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4252d743963..25c1210c083 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -694,10 +694,8 @@ static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
*pte = _PTE_VACANT(iova);
FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
- if (!--(*count)) {
+ if (!--(*count))
free_ptbl(as, iova);
- smmu_flush_regs(as->smmu, 0);
- }
}
static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
@@ -1232,6 +1230,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
smmu_debugfs_create(smmu);
smmu_handle = smmu;
+ bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
return 0;
}
@@ -1276,7 +1275,6 @@ static struct platform_driver tegra_smmu_driver = {
static int __devinit tegra_smmu_init(void)
{
- bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
return platform_driver_register(&tegra_smmu_driver);
}
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 02bd37a6187..bf4609a5bd9 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,3 +1,4 @@
-obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
-obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
-obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
+obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
new file mode 100644
index 00000000000..80e1d2fd9d4
--- /dev/null
+++ b/drivers/irqchip/spear-shirq.c
@@ -0,0 +1,316 @@
+/*
+ * SPEAr platform shared irq layer source file
+ *
+ * Copyright (C) 2009-2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/spear-shirq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+static DEFINE_SPINLOCK(lock);
+
+/* spear300 shared irq registers offsets and masks */
+#define SPEAR300_INT_ENB_MASK_REG 0x54
+#define SPEAR300_INT_STS_MASK_REG 0x58
+
+static struct spear_shirq spear300_shirq_ras1 = {
+ .irq_nr = 9,
+ .irq_bit_off = 0,
+ .regs = {
+ .enb_reg = SPEAR300_INT_ENB_MASK_REG,
+ .status_reg = SPEAR300_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq *spear300_shirq_blocks[] = {
+ &spear300_shirq_ras1,
+};
+
+/* spear310 shared irq registers offsets and masks */
+#define SPEAR310_INT_STS_MASK_REG 0x04
+
+static struct spear_shirq spear310_shirq_ras1 = {
+ .irq_nr = 8,
+ .irq_bit_off = 0,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_ras2 = {
+ .irq_nr = 5,
+ .irq_bit_off = 8,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_ras3 = {
+ .irq_nr = 1,
+ .irq_bit_off = 13,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_intrcomm_ras = {
+ .irq_nr = 3,
+ .irq_bit_off = 14,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq *spear310_shirq_blocks[] = {
+ &spear310_shirq_ras1,
+ &spear310_shirq_ras2,
+ &spear310_shirq_ras3,
+ &spear310_shirq_intrcomm_ras,
+};
+
+/* spear320 shared irq registers offsets and masks */
+#define SPEAR320_INT_STS_MASK_REG 0x04
+#define SPEAR320_INT_CLR_MASK_REG 0x04
+#define SPEAR320_INT_ENB_MASK_REG 0x08
+
+static struct spear_shirq spear320_shirq_ras1 = {
+ .irq_nr = 3,
+ .irq_bit_off = 7,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_ras2 = {
+ .irq_nr = 1,
+ .irq_bit_off = 10,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_ras3 = {
+ .irq_nr = 3,
+ .irq_bit_off = 0,
+ .invalid_irq = 1,
+ .regs = {
+ .enb_reg = SPEAR320_INT_ENB_MASK_REG,
+ .reset_to_enb = 1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_intrcomm_ras = {
+ .irq_nr = 11,
+ .irq_bit_off = 11,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq *spear320_shirq_blocks[] = {
+ &spear320_shirq_ras3,
+ &spear320_shirq_ras1,
+ &spear320_shirq_ras2,
+ &spear320_shirq_intrcomm_ras,
+};
+
+static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
+{
+ struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+ u32 val, offset = d->irq - shirq->irq_base;
+ unsigned long flags;
+
+ if (shirq->regs.enb_reg == -1)
+ return;
+
+ spin_lock_irqsave(&lock, flags);
+ val = readl(shirq->base + shirq->regs.enb_reg);
+
+ if (mask ^ shirq->regs.reset_to_enb)
+ val &= ~(0x1 << shirq->irq_bit_off << offset);
+ else
+ val |= 0x1 << shirq->irq_bit_off << offset;
+
+ writel(val, shirq->base + shirq->regs.enb_reg);
+ spin_unlock_irqrestore(&lock, flags);
+
+}
+
+static void shirq_irq_mask(struct irq_data *d)
+{
+ shirq_irq_mask_unmask(d, 1);
+}
+
+static void shirq_irq_unmask(struct irq_data *d)
+{
+ shirq_irq_mask_unmask(d, 0);
+}
+
+static struct irq_chip shirq_chip = {
+ .name = "spear-shirq",
+ .irq_ack = shirq_irq_mask,
+ .irq_mask = shirq_irq_mask,
+ .irq_unmask = shirq_irq_unmask,
+};
+
+static void shirq_handler(unsigned irq, struct irq_desc *desc)
+{
+ u32 i, j, val, mask, tmp;
+ struct irq_chip *chip;
+ struct spear_shirq *shirq = irq_get_handler_data(irq);
+
+ chip = irq_get_chip(irq);
+ chip->irq_ack(&desc->irq_data);
+
+ mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
+ while ((val = readl(shirq->base + shirq->regs.status_reg) &
+ mask)) {
+
+ val >>= shirq->irq_bit_off;
+ for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
+
+ if (!(j & val))
+ continue;
+
+ generic_handle_irq(shirq->irq_base + i);
+
+ /* clear interrupt */
+ if (shirq->regs.clear_reg == -1)
+ continue;
+
+ tmp = readl(shirq->base + shirq->regs.clear_reg);
+ if (shirq->regs.reset_to_clear)
+ tmp &= ~(j << shirq->irq_bit_off);
+ else
+ tmp |= (j << shirq->irq_bit_off);
+ writel(tmp, shirq->base + shirq->regs.clear_reg);
+ }
+ }
+ chip->irq_unmask(&desc->irq_data);
+}
+
+static void __init spear_shirq_register(struct spear_shirq *shirq)
+{
+ int i;
+
+ if (shirq->invalid_irq)
+ return;
+
+ irq_set_chained_handler(shirq->irq, shirq_handler);
+ for (i = 0; i < shirq->irq_nr; i++) {
+ irq_set_chip_and_handler(shirq->irq_base + i,
+ &shirq_chip, handle_simple_irq);
+ set_irq_flags(shirq->irq_base + i, IRQF_VALID);
+ irq_set_chip_data(shirq->irq_base + i, shirq);
+ }
+
+ irq_set_handler_data(shirq->irq, shirq);
+}
+
+static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
+ struct device_node *np)
+{
+ int i, irq_base, hwirq = 0, irq_nr = 0;
+ static struct irq_domain *shirq_domain;
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: failed to map shirq registers\n", __func__);
+ return -ENXIO;
+ }
+
+ for (i = 0; i < block_nr; i++)
+ irq_nr += shirq_blocks[i]->irq_nr;
+
+ irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ pr_err("%s: irq desc alloc failed\n", __func__);
+ goto err_unmap;
+ }
+
+ shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (WARN_ON(!shirq_domain)) {
+ pr_warn("%s: irq domain init failed\n", __func__);
+ goto err_free_desc;
+ }
+
+ for (i = 0; i < block_nr; i++) {
+ shirq_blocks[i]->base = base;
+ shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
+ hwirq);
+ shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
+
+ spear_shirq_register(shirq_blocks[i]);
+ hwirq += shirq_blocks[i]->irq_nr;
+ }
+
+ return 0;
+
+err_free_desc:
+ irq_free_descs(irq_base, irq_nr);
+err_unmap:
+ iounmap(base);
+ return -ENXIO;
+}
+
+int __init spear300_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear300_shirq_blocks,
+ ARRAY_SIZE(spear300_shirq_blocks), np);
+}
+
+int __init spear310_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear310_shirq_blocks,
+ ARRAY_SIZE(spear310_shirq_blocks), np);
+}
+
+int __init spear320_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear320_shirq_blocks,
+ ARRAY_SIZE(spear320_shirq_blocks), np);
+}
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 89342f7e0c5..525471e776a 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -628,7 +628,7 @@ Amd7930_l1hw(struct PStack *st, int pr, void *arg)
if ((cs->dc.amd7930.ph_state == 8)) {
/* b-channels off, PH-AR cleared
* change to F3 */
- Amd7930_ph_command(cs, 0x20, "HW_RESET REQEST"); //LMR1 bit 5
+ Amd7930_ph_command(cs, 0x20, "HW_RESET REQUEST"); //LMR1 bit 5
spin_unlock_irqrestore(&cs->lock, flags);
} else {
Amd7930_ph_command(cs, 0x40, "HW_RESET REQUEST");
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index ff5e139f485..7fdf34704fe 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1417,7 +1417,7 @@ modeisar(struct BCState *bcs, int mode, int bc)
&bcs->hw.isar.reg->Flags))
bcs->hw.isar.dpath = 1;
else {
- printk(KERN_WARNING"isar modeisar both pathes in use\n");
+ printk(KERN_WARNING"isar modeisar both paths in use\n");
return (1);
}
break;
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 28c99c623bc..22b720ec80c 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1217,8 +1217,7 @@ static void __exit dsp_cleanup(void)
{
mISDN_unregister_Bprotocol(&DSP);
- if (timer_pending(&dsp_spl_tl))
- del_timer(&dsp_spl_tl);
+ del_timer_sync(&dsp_spl_tl);
if (!list_empty(&dsp_ilist)) {
printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 48cce18e9d6..a20752f562b 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -211,7 +211,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
led_trigger_set_default(led_cdev);
#endif
- printk(KERN_DEBUG "Registered led device: %s\n",
+ dev_dbg(parent, "Registered led device: %s\n",
led_cdev->name);
return 0;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 262eb419371..3c972b2f989 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -166,6 +166,19 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
+void led_trigger_rename_static(const char *name, struct led_trigger *trig)
+{
+ /* new name must be on a temporary string to prevent races */
+ BUG_ON(name == trig->name);
+
+ down_write(&triggers_list_lock);
+ /* this assumes that trig->name was originaly allocated to
+ * non constant storage */
+ strcpy((char *)trig->name, name);
+ up_write(&triggers_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_trigger_rename_static);
+
/* LED Trigger Interface */
int led_trigger_register(struct led_trigger *trig)
@@ -300,13 +313,13 @@ void led_trigger_register_simple(const char *name, struct led_trigger **tp)
if (err < 0) {
kfree(trig);
trig = NULL;
- printk(KERN_WARNING "LED trigger %s failed to register"
- " (%d)\n", name, err);
+ pr_warn("LED trigger %s failed to register (%d)\n",
+ name, err);
}
- } else
- printk(KERN_WARNING "LED trigger %s failed to register"
- " (no memory)\n", name);
-
+ } else {
+ pr_warn("LED trigger %s failed to register (no memory)\n",
+ name);
+ }
*tp = trig;
}
EXPORT_SYMBOL_GPL(led_trigger_register_simple);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index b7e8cc0957f..6be2edd4117 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -165,15 +165,13 @@ static int pm860x_led_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "control");
if (!res) {
dev_err(&pdev->dev, "No REG resource for control\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_control = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "blink");
if (!res) {
dev_err(&pdev->dev, "No REG resource for blink\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_blink = res->start;
memset(data->name, 0, MFD_NAME_SIZE);
@@ -224,9 +222,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
}
pm860x_led_set(&data->cdev, 0);
return 0;
-out:
- devm_kfree(&pdev->dev, data);
- return ret;
}
static int pm860x_led_remove(struct platform_device *pdev)
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index dcd9128a51a..e8072abe76e 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -5,10 +5,10 @@
*
* Loosely derived from leds-da903x:
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* Licensed under the GPL-2 or later.
*/
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 9abe8de40ed..851517030cc 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -26,8 +26,8 @@
#define BD2802_LED_OFFSET 0xa
#define BD2802_COLOR_OFFSET 0x3
-#define BD2802_REG_CLKSETUP 0x00
-#define BD2802_REG_CONTROL 0x01
+#define BD2802_REG_CLKSETUP 0x00
+#define BD2802_REG_CONTROL 0x01
#define BD2802_REG_HOURSETUP 0x02
#define BD2802_REG_CURRENT1SETUP 0x03
#define BD2802_REG_CURRENT2SETUP 0x04
@@ -93,7 +93,7 @@ struct bd2802_led {
* In ADF mode, user can set registers of BD2802GU directly,
* therefore BD2802GU doesn't enter reset state.
*/
- int adf_on;
+ int adf_on;
enum led_ids led_id;
enum led_colors color;
@@ -328,7 +328,7 @@ static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \
int ret; \
if (!count) \
return -EINVAL; \
- ret = strict_strtoul(buf, 16, &val); \
+ ret = kstrtoul(buf, 16, &val); \
if (ret) \
return ret; \
down_write(&led->rwsem); \
@@ -492,7 +492,7 @@ static ssize_t bd2802_store_##attr_name(struct device *dev, \
int ret; \
if (!count) \
return -EINVAL; \
- ret = strict_strtoul(buf, 16, &val); \
+ ret = kstrtoul(buf, 16, &val); \
if (ret) \
return ret; \
down_write(&led->rwsem); \
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index b02547052e1..6a8405df76a 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
@@ -26,7 +27,7 @@ static struct platform_device *pdev;
static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
{
- printk(KERN_INFO KBUILD_MODNAME ": '%s' found\n", id->ident);
+ pr_info("'%s' found\n", id->ident);
return 1;
}
@@ -135,8 +136,7 @@ static int clevo_mail_led_blink(struct led_classdev *led_cdev,
status = 0;
} else {
- printk(KERN_DEBUG KBUILD_MODNAME
- ": clevo_mail_led_blink(..., %lu, %lu),"
+ pr_debug("clevo_mail_led_blink(..., %lu, %lu),"
" returning -EINVAL (unsupported)\n",
*delay_on, *delay_off);
}
@@ -183,7 +183,7 @@ static int __init clevo_mail_led_init(void)
count = dmi_check_system(clevo_mail_led_dmi_table);
} else {
count = 1;
- printk(KERN_ERR KBUILD_MODNAME ": Skipping DMI detection. "
+ pr_err("Skipping DMI detection. "
"If the driver works on your hardware please "
"report model and the output of dmidecode in tracker "
"at http://sourceforge.net/projects/clevo-mailled/\n");
@@ -197,8 +197,7 @@ static int __init clevo_mail_led_init(void)
error = platform_driver_probe(&clevo_mail_led_driver,
clevo_mail_led_probe);
if (error) {
- printk(KERN_ERR KBUILD_MODNAME
- ": Can't probe platform driver\n");
+ pr_err("Can't probe platform driver\n");
platform_device_unregister(pdev);
}
} else
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index ffa99303b62..8abcb66db01 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -43,7 +43,7 @@ static int cobalt_qube_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, resource_size(res));
+ led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
@@ -52,12 +52,11 @@ static int cobalt_qube_led_probe(struct platform_device *pdev)
retval = led_classdev_register(&pdev->dev, &qube_front_led);
if (retval)
- goto err_iounmap;
+ goto err_null;
return 0;
-err_iounmap:
- iounmap(led_port);
+err_null:
led_port = NULL;
return retval;
@@ -67,10 +66,8 @@ static int cobalt_qube_led_remove(struct platform_device *pdev)
{
led_classdev_unregister(&qube_front_led);
- if (led_port) {
- iounmap(led_port);
+ if (led_port)
led_port = NULL;
- }
return 0;
}
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index d52e47de396..001088b3137 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -85,13 +85,13 @@ static int cobalt_raq_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, resource_size(res));
+ led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
retval = led_classdev_register(&pdev->dev, &raq_power_off_led);
if (retval)
- goto err_iounmap;
+ goto err_null;
retval = led_classdev_register(&pdev->dev, &raq_web_led);
if (retval)
@@ -102,8 +102,7 @@ static int cobalt_raq_led_probe(struct platform_device *pdev)
err_unregister:
led_classdev_unregister(&raq_power_off_led);
-err_iounmap:
- iounmap(led_port);
+err_null:
led_port = NULL;
return retval;
@@ -114,10 +113,8 @@ static int cobalt_raq_led_remove(struct platform_device *pdev)
led_classdev_unregister(&raq_power_off_led);
led_classdev_unregister(&raq_web_led);
- if (led_port) {
- iounmap(led_port);
+ if (led_port)
led_port = NULL;
- }
return 0;
}
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 6f31b776765..c263a21db82 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -2,10 +2,10 @@
* LEDs driver for Dialog Semiconductor DA9030/DA9034
*
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -85,7 +85,7 @@ static void da903x_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct da903x_led *led;
-
+
led = container_of(led_cdev, struct da903x_led, cdev);
led->new_brightness = value;
schedule_work(&led->work);
@@ -156,7 +156,7 @@ static struct platform_driver da903x_led_driver = {
module_platform_driver(da903x_led_driver);
MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
-MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
- "Mike Rapoport <mike@compulab.co.il>");
+MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da903x-led");
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index b9053fa6e25..b4d5a44cc41 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -20,8 +20,8 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/io.h>
#define FSG_LED_WLAN_BIT 0
#define FSG_LED_WAN_BIT 1
@@ -149,11 +149,10 @@ static int fsg_led_probe(struct platform_device *pdev)
int ret;
/* Map the LED chip select address space */
- latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
- if (!latch_address) {
- ret = -ENOMEM;
- goto failremap;
- }
+ latch_address = (unsigned short *) devm_ioremap(&pdev->dev,
+ IXP4XX_EXP_BUS_BASE(2), 512);
+ if (!latch_address)
+ return -ENOMEM;
latch_value = 0xffff;
*latch_address = latch_value;
@@ -195,8 +194,6 @@ static int fsg_led_probe(struct platform_device *pdev)
failwan:
led_classdev_unregister(&fsg_wlan_led);
failwlan:
- iounmap(latch_address);
- failremap:
return ret;
}
@@ -210,8 +207,6 @@ static int fsg_led_remove(struct platform_device *pdev)
led_classdev_unregister(&fsg_sync_led);
led_classdev_unregister(&fsg_ring_led);
- iounmap(latch_address);
-
return 0;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 291c20797ca..a0d931bcb37 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/err.h>
struct gpio_led_data {
struct led_classdev cdev;
@@ -101,15 +102,11 @@ static int create_gpio_led(const struct gpio_led *template,
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
- printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
+ dev_info(parent, "Skipping unavailable LED gpio %d (%s)\n",
template->gpio, template->name);
return 0;
}
- ret = gpio_request(template->gpio, template->name);
- if (ret < 0)
- return ret;
-
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
@@ -129,20 +126,20 @@ static int create_gpio_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
+ ret = devm_gpio_request_one(parent, template->gpio,
+ (led_dat->active_low ^ state) ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ template->name);
if (ret < 0)
- goto err;
-
+ return ret;
+
INIT_WORK(&led_dat->work, gpio_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-err:
- gpio_free(led_dat->gpio);
- return ret;
}
static void delete_gpio_led(struct gpio_led_data *led)
@@ -151,7 +148,6 @@ static void delete_gpio_led(struct gpio_led_data *led)
return;
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
- gpio_free(led->gpio);
}
struct gpio_leds_priv {
@@ -176,12 +172,16 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
/* count LEDs in this device, so we know how much to allocate */
count = of_get_child_count(np);
if (!count)
- return NULL;
+ return ERR_PTR(-ENODEV);
+
+ for_each_child_of_node(np, child)
+ if (of_get_gpio(child, 0) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
priv = devm_kzalloc(&pdev->dev, sizeof_gpio_leds_priv(count),
GFP_KERNEL);
if (!priv)
- return NULL;
+ return ERR_PTR(-ENOMEM);
for_each_child_of_node(np, child) {
struct gpio_led led = {};
@@ -216,7 +216,7 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
err:
for (count = priv->num_leds - 2; count >= 0; count--)
delete_gpio_led(&priv->leds[count]);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static const struct of_device_id of_gpio_leds_match[] = {
@@ -226,7 +226,7 @@ static const struct of_device_id of_gpio_leds_match[] = {
#else /* CONFIG_OF_GPIO */
static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_OF_GPIO */
@@ -264,8 +264,8 @@ static int gpio_led_probe(struct platform_device *pdev)
}
} else {
priv = gpio_leds_create_of(pdev);
- if (!priv)
- return -ENODEV;
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index b13ce037191..65d79284c48 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -408,10 +408,10 @@ static ssize_t lm3556_indicator_pattern_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(pattern, 0666, NULL, lm3556_indicator_pattern_store);
+static DEVICE_ATTR(pattern, S_IWUSR, NULL, lm3556_indicator_pattern_store);
static const struct regmap_config lm355x_regmap = {
.reg_bits = 8,
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 215a7c1e56c..07b3dde9061 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -201,13 +201,13 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
out_strtoint:
dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(torch_pin, 0666, NULL, lm3642_torch_pin_store);
+static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store);
static void lm3642_deferred_torch_brightness_set(struct work_struct *work)
{
@@ -258,13 +258,13 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
out_strtoint:
dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(strobe_pin, 0666, NULL, lm3642_strobe_pin_store);
+static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store);
static void lm3642_deferred_strobe_brightness_set(struct work_struct *work)
{
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index b081f67e1de..0c4386e656c 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -86,7 +86,7 @@ static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
tmp = i2c_smbus_read_byte_data(client, reg);
if (tmp < 0)
- return -EINVAL;
+ return tmp;
*value = tmp;
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 966f158a07d..cb8a5220200 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -152,7 +152,7 @@ static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*buf = ret;
return 0;
@@ -616,7 +616,7 @@ static ssize_t store_led_pattern(struct device *dev,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
@@ -788,10 +788,17 @@ static int lp5521_probe(struct i2c_client *client,
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
+ if (ret) {
dev_err(&client->dev, "error in resetting chip\n");
goto fail2;
}
+ if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ dev_err(&client->dev,
+ "unexpected data in register (expected 0x%x got 0x%x)\n",
+ LP5521_REG_R_CURR_DEFAULT, buf);
+ ret = -EINVAL;
+ goto fail2;
+ }
usleep_range(10000, 20000);
ret = lp5521_detect(client);
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 7e304b7ff77..7f5be8948cd 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -171,7 +171,7 @@ static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
s32 ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*buf = ret;
return 0;
@@ -248,7 +248,10 @@ static int lp5523_configure(struct i2c_client *client)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp5523_read(client, LP5523_REG_STATUS, &status);
+ ret = lp5523_read(client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
status &= LP5523_ENG_STATUS_MASK;
if (status == LP5523_ENG_STATUS_MASK) {
@@ -464,10 +467,16 @@ static ssize_t lp5523_selftest(struct device *dev,
LP5523_EN_LEDTEST | 16);
usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ goto fail;
+
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000); /* Was not ready. Wait little bit */
- ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ if (ret < 0)
+ goto fail;
+
vdd--; /* There may be some fluctuation in measurement */
for (i = 0; i < LP5523_LEDS; i++) {
@@ -489,9 +498,14 @@ static ssize_t lp5523_selftest(struct device *dev,
/* ADC conversion time is 2.7 ms typically */
usleep_range(3000, 6000);
ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ goto fail;
+
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000);/* Was not ready. Wait. */
- ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+ ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+ if (ret < 0)
+ goto fail;
if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
pos += sprintf(buf + pos, "LED %d FAIL\n", i);
@@ -696,7 +710,7 @@ static ssize_t store_current(struct device *dev,
ssize_t ret;
unsigned long curr;
- if (strict_strtoul(buf, 0, &curr))
+ if (kstrtoul(buf, 0, &curr))
return -EINVAL;
if (curr > led->max_current)
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 34b3ba4376f..c9b9e1fec58 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -89,15 +89,11 @@ static int create_lt3593_led(const struct gpio_led *template,
/* skip leds on GPIOs that aren't available */
if (!gpio_is_valid(template->gpio)) {
- printk(KERN_INFO "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
+ dev_info(parent, "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
KBUILD_MODNAME, template->gpio, template->name);
return 0;
}
- ret = gpio_request(template->gpio, template->name);
- if (ret < 0)
- return ret;
-
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
@@ -110,24 +106,21 @@ static int create_lt3593_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, state);
+ ret = devm_gpio_request_one(parent, template->gpio,
+ GPIOF_DIR_OUT | state, template->name);
if (ret < 0)
- goto err;
+ return ret;
INIT_WORK(&led_dat->work, lt3593_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
if (ret < 0)
- goto err;
+ return ret;
- printk(KERN_INFO "%s: registered LT3593 LED '%s' at GPIO %d\n",
+ dev_info(parent, "%s: registered LT3593 LED '%s' at GPIO %d\n",
KBUILD_MODNAME, template->name, template->gpio);
return 0;
-
-err:
- gpio_free(led_dat->gpio);
- return ret;
}
static void delete_lt3593_led(struct lt3593_led_data *led)
@@ -137,7 +130,6 @@ static void delete_lt3593_led(struct lt3593_led_data *led)
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
- gpio_free(led->gpio);
}
static int lt3593_led_probe(struct platform_device *pdev)
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index f117f7326c5..27d06c52824 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/nsc_gpio.h>
#include <linux/scx200_gpio.h>
#include <linux/module.h>
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 58a800b17dc..c61c5ebcc08 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -243,7 +243,7 @@ static ssize_t netxbig_led_sata_store(struct device *dev,
int mode_val;
int ret;
- ret = strict_strtoul(buff, 10, &enable);
+ ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 7b75affb308..d978171c25b 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -150,7 +150,7 @@ static ssize_t ns2_led_sata_store(struct device *dev,
unsigned long enable;
enum ns2_led_modes mode;
- ret = strict_strtoul(buff, 10, &enable);
+ ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
return ret;
@@ -192,29 +192,22 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
int ret;
enum ns2_led_modes mode;
- ret = gpio_request(template->cmd, template->name);
- if (ret == 0) {
- ret = gpio_direction_output(template->cmd,
- gpio_get_value(template->cmd));
- if (ret)
- gpio_free(template->cmd);
- }
+ ret = devm_gpio_request_one(&pdev->dev, template->cmd,
+ GPIOF_DIR_OUT | gpio_get_value(template->cmd),
+ template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup command GPIO\n",
template->name);
+ return ret;
}
- ret = gpio_request(template->slow, template->name);
- if (ret == 0) {
- ret = gpio_direction_output(template->slow,
- gpio_get_value(template->slow));
- if (ret)
- gpio_free(template->slow);
- }
+ ret = devm_gpio_request_one(&pdev->dev, template->slow,
+ GPIOF_DIR_OUT | gpio_get_value(template->slow),
+ template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup slow GPIO\n",
template->name);
- goto err_free_cmd;
+ return ret;
}
rwlock_init(&led_dat->rw_lock);
@@ -229,7 +222,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
ret = ns2_led_get_mode(led_dat, &mode);
if (ret < 0)
- goto err_free_slow;
+ return ret;
/* Set LED initial state. */
led_dat->sata = (mode == NS_V2_LED_SATA) ? 1 : 0;
@@ -238,7 +231,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0)
- goto err_free_slow;
+ return ret;
ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata);
if (ret < 0)
@@ -248,11 +241,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
err_free_cdev:
led_classdev_unregister(&led_dat->cdev);
-err_free_slow:
- gpio_free(led_dat->slow);
-err_free_cmd:
- gpio_free(led_dat->cmd);
-
return ret;
}
@@ -260,8 +248,6 @@ static void delete_ns2_led(struct ns2_led_data *led_dat)
{
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
led_classdev_unregister(&led_dat->cdev);
- gpio_free(led_dat->cmd);
- gpio_free(led_dat->slow);
}
#ifdef CONFIG_OF_GPIO
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 706791af8fc..edf485b773c 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -277,7 +277,7 @@ static int pca955x_probe(struct i2c_client *client,
return -ENODEV;
}
- printk(KERN_INFO "leds-pca955x: Using %s %d-bit LED driver at "
+ dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index e51ff7a3cd8..2157524f277 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -26,7 +26,7 @@
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
- unsigned int active_low;
+ unsigned int active_low;
unsigned int period;
};
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index 9ebdd5011a7..2e746d257b0 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -16,7 +16,7 @@
#include <asm/mach-rc32434/rb.h>
static void rb532_led_set(struct led_classdev *cdev,
- enum led_brightness brightness)
+ enum led_brightness brightness)
{
if (brightness)
set_latch_u5(LO_ULED, 0);
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index bc8984795a3..e0fff1ca592 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -204,10 +204,10 @@ static void r_tpu_set_pin(struct r_tpu_priv *p, enum r_tpu_pin new_state,
if (p->pin_state == R_TPU_PIN_GPIO_FN)
gpio_free(cfg->pin_gpio_fn);
- if (new_state == R_TPU_PIN_GPIO) {
- gpio_request(cfg->pin_gpio, cfg->name);
- gpio_direction_output(cfg->pin_gpio, !!brightness);
- }
+ if (new_state == R_TPU_PIN_GPIO)
+ gpio_request_one(cfg->pin_gpio, GPIOF_DIR_OUT | !!brightness,
+ cfg->name);
+
if (new_state == R_TPU_PIN_GPIO_FN)
gpio_request(cfg->pin_gpio_fn, cfg->name);
@@ -263,18 +263,18 @@ static int r_tpu_probe(struct platform_device *pdev)
}
/* map memory, let mapbase point to our channel */
- p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ p->mapbase = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
if (p->mapbase == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
return -ENXIO;
}
/* get hold of clock */
- p->clk = clk_get(&pdev->dev, NULL);
+ p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err0;
+ return PTR_ERR(p->clk);
}
p->pdev = pdev;
@@ -293,7 +293,7 @@ static int r_tpu_probe(struct platform_device *pdev)
p->ldev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &p->ldev);
if (ret < 0)
- goto err1;
+ goto err0;
/* max_brightness may be updated by the LED core code */
p->min_rate = p->ldev.max_brightness * p->refresh_rate;
@@ -301,11 +301,8 @@ static int r_tpu_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return 0;
- err1:
- r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
- clk_put(p->clk);
err0:
- iounmap(p->mapbase);
+ r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
return ret;
}
@@ -320,9 +317,7 @@ static int r_tpu_remove(struct platform_device *pdev)
r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
pm_runtime_disable(&pdev->dev);
- clk_put(p->clk);
- iounmap(p->mapbase);
return 0;
}
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 6469849e826..ec9b287ecfb 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -459,7 +459,7 @@ static ssize_t nas_led_blink_store(struct device *dev,
struct led_classdev *led = dev_get_drvdata(dev);
unsigned long blink_state;
- ret = strict_strtoul(buf, 10, &blink_state);
+ ret = kstrtoul(buf, 10, &blink_state);
if (ret)
return ret;
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 88f23f84559..ed15157c8f6 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -216,13 +216,13 @@ static int wm8350_led_probe(struct platform_device *pdev)
isink = devm_regulator_get(&pdev->dev, "led_isink");
if (IS_ERR(isink)) {
- printk(KERN_ERR "%s: can't get ISINK\n", __func__);
+ dev_err(&pdev->dev, "%s: can't get ISINK\n", __func__);
return PTR_ERR(isink);
}
dcdc = devm_regulator_get(&pdev->dev, "led_vcc");
if (IS_ERR(dcdc)) {
- printk(KERN_ERR "%s: can't get DCDC\n", __func__);
+ dev_err(&pdev->dev, "%s: can't get DCDC\n", __func__);
return PTR_ERR(dcdc);
}
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 6e21e654bb0..b358cc05eff 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200_gpio.h>
#include <linux/module.h>
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index b941685f222..027a2b15d7d 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -40,7 +40,7 @@ static int fb_notifier_callback(struct notifier_block *p,
int new_status = *blank ? BLANK : UNBLANK;
switch (event) {
- case FB_EVENT_BLANK :
+ case FB_EVENT_BLANK:
if (new_status == n->old_status)
break;
@@ -76,7 +76,7 @@ static ssize_t bl_trig_invert_store(struct device *dev,
unsigned long invert;
int ret;
- ret = strict_strtoul(buf, 10, &invert);
+ ret = kstrtoul(buf, 10, &invert);
if (ret < 0)
return ret;
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ba215dc42f9..72e3ebfc281 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -110,7 +110,7 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
unsigned long inverted;
int ret;
- ret = strict_strtoul(buf, 10, &inverted);
+ ret = kstrtoul(buf, 10, &inverted);
if (ret < 0)
return ret;
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index b5fdcb78a75..a5ebc0083d8 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -225,7 +225,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
* eventfd (ie. the appropriate virtqueue thread)?
*/
if (!send_notify_to_eventfd(cpu)) {
- /* OK, we tell the main Laucher. */
+ /* OK, we tell the main Launcher. */
if (put_user(cpu->pending_notify, user))
return -EFAULT;
return sizeof(cpu->pending_notify);
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 19636800900..9c6b9641486 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -997,7 +997,7 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
"%02x !\n", id, hdr->id);
goto failure;
}
- if (prom_add_property(smu->of_node, prop)) {
+ if (of_add_property(smu->of_node, prop)) {
printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x "
"property !\n", id);
goto failure;
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index b3411edb324..fd6ed15a979 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -593,19 +593,7 @@ static struct i2c_driver wf_fcu_driver = {
.id_table = wf_fcu_id,
};
-static int __init wf_fcu_init(void)
-{
- return i2c_add_driver(&wf_fcu_driver);
-}
-
-static void __exit wf_fcu_exit(void)
-{
- i2c_del_driver(&wf_fcu_driver);
-}
-
-
-module_init(wf_fcu_init);
-module_exit(wf_fcu_exit);
+module_i2c_driver(wf_fcu_driver);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("FCU control objects for PowerMacs thermal control");
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index b0c2d3695b3..9ef32b3df91 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -174,19 +174,7 @@ static struct i2c_driver wf_lm75_driver = {
.id_table = wf_lm75_id,
};
-static int __init wf_lm75_sensor_init(void)
-{
- return i2c_add_driver(&wf_lm75_driver);
-}
-
-static void __exit wf_lm75_sensor_exit(void)
-{
- i2c_del_driver(&wf_lm75_driver);
-}
-
-
-module_init(wf_lm75_sensor_init);
-module_exit(wf_lm75_sensor_exit);
+module_i2c_driver(wf_lm75_driver);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("LM75 sensor objects for PowerMacs thermal control");
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 371b058d2f7..945a25b2f31 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -130,18 +130,7 @@ static struct i2c_driver wf_max6690_driver = {
.id_table = wf_max6690_id,
};
-static int __init wf_max6690_sensor_init(void)
-{
- return i2c_add_driver(&wf_max6690_driver);
-}
-
-static void __exit wf_max6690_sensor_exit(void)
-{
- i2c_del_driver(&wf_max6690_driver);
-}
-
-module_init(wf_max6690_sensor_init);
-module_exit(wf_max6690_sensor_exit);
+module_i2c_driver(wf_max6690_driver);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("MAX6690 sensor objects for PowerMac thermal control");
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 426e810233d..d87f5ee04ca 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -364,18 +364,7 @@ static struct i2c_driver wf_sat_driver = {
.id_table = wf_sat_id,
};
-static int __init sat_sensors_init(void)
-{
- return i2c_add_driver(&wf_sat_driver);
-}
-
-static void __exit sat_sensors_exit(void)
-{
- i2c_del_driver(&wf_sat_driver);
-}
-
-module_init(sat_sensors_init);
-module_exit(sat_sensors_exit);
+module_i2c_driver(wf_sat_driver);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("SMU satellite sensors for PowerMac thermal control");
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index e4e84156745..aefb78e3cbf 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -208,31 +208,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
EXPORT_SYMBOL_GPL(dm_cell_release);
/*
- * There are a couple of places where we put a bio into a cell briefly
- * before taking it out again. In these situations we know that no other
- * bio may be in the cell. This function releases the cell, and also does
- * a sanity check.
- */
-static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- BUG_ON(cell->holder != bio);
- BUG_ON(!bio_list_empty(&cell->bios));
-
- __cell_release(cell, NULL);
-}
-
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
-
- spin_lock_irqsave(&prison->lock, flags);
- __cell_release_singleton(cell, bio);
- spin_unlock_irqrestore(&prison->lock, flags);
-}
-EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
-
-/*
* Sometimes we don't want the holder, just the additional bios.
*/
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 4e0ac376700..53d1a7a84e2 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
struct bio *inmate, struct dm_bio_prison_cell **ref);
void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
void dm_cell_error(struct dm_bio_prison_cell *cell);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bbf459bca61..f7369f9d859 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1689,8 +1689,7 @@ bad:
return ret;
}
-static int crypt_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int crypt_map(struct dm_target *ti, struct bio *bio)
{
struct dm_crypt_io *io;
struct crypt_config *cc = ti->private;
@@ -1846,7 +1845,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index f53846f9ab5..cc1bd048acb 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -274,8 +274,7 @@ static void delay_resume(struct dm_target *ti)
atomic_set(&dc->may_delay, 1);
}
-static int delay_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int delay_map(struct dm_target *ti, struct bio *bio)
{
struct delay_c *dc = ti->private;
@@ -338,7 +337,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index cc15543a6ad..9721f2ffb1a 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -39,6 +39,10 @@ enum feature_flag_bits {
DROP_WRITES
};
+struct per_bio_data {
+ bool bio_submitted;
+};
+
static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
@@ -214,6 +218,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
@@ -265,11 +270,12 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
}
}
-static int flakey_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int flakey_map(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
unsigned elapsed;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ pb->bio_submitted = false;
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
@@ -277,7 +283,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio,
/*
* Flag this bio as submitted while down.
*/
- map_context->ll = 1;
+ pb->bio_submitted = true;
/*
* Map reads as normal.
@@ -314,17 +320,16 @@ map_bio:
return DM_MAPIO_REMAPPED;
}
-static int flakey_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct flakey_c *fc = ti->private;
- unsigned bio_submitted_while_down = map_context->ll;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
/*
* Corrupt successful READs while in down state.
* If flags were specified, only corrupt those that match.
*/
- if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
+ if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
(bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc))
corrupt_bio_data(bio, fc);
@@ -406,7 +411,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 1c46f97d666..ea49834377c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -287,7 +287,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
- sector_t discard_sectors;
+ unsigned short logical_block_size = queue_logical_block_size(q);
+ sector_t num_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
@@ -297,7 +298,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/*
* Allocate a suitably sized-bio.
*/
- if (rw & REQ_DISCARD)
+ if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
num_bvecs = 1;
else
num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
@@ -310,9 +311,21 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
- discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = discard_sectors << SECTOR_SHIFT;
- remaining -= discard_sectors;
+ num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ bio->bi_size = num_sectors << SECTOR_SHIFT;
+ remaining -= num_sectors;
+ } else if (rw & REQ_WRITE_SAME) {
+ /*
+ * WRITE SAME only uses a single page.
+ */
+ dp->get_page(dp, &page, &len, &offset);
+ bio_add_page(bio, page, logical_block_size, offset);
+ num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
+ bio->bi_size = num_sectors << SECTOR_SHIFT;
+
+ offset = 0;
+ remaining -= num_sectors;
+ dp->next_page(dp);
} else while (remaining) {
/*
* Try and add as many pages as possible.
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index afd95986d09..0666b5d14b8 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1543,7 +1543,21 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
+#define DM_PARAMS_VMALLOC 0x0001 /* Params alloced with vmalloc not kmalloc */
+#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
+
+static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
+{
+ if (param_flags & DM_WIPE_BUFFER)
+ memset(param, 0, param_size);
+
+ if (param_flags & DM_PARAMS_VMALLOC)
+ vfree(param);
+ else
+ kfree(param);
+}
+
+static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags)
{
struct dm_ioctl tmp, *dmi;
int secure_data;
@@ -1556,7 +1570,21 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
- dmi = vmalloc(tmp.data_size);
+ *param_flags = secure_data ? DM_WIPE_BUFFER : 0;
+
+ /*
+ * Try to avoid low memory issues when a device is suspended.
+ * Use kmalloc() rather than vmalloc() when we can.
+ */
+ dmi = NULL;
+ if (tmp.data_size <= KMALLOC_MAX_SIZE)
+ dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+ if (!dmi) {
+ dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+ *param_flags |= DM_PARAMS_VMALLOC;
+ }
+
if (!dmi) {
if (secure_data && clear_user(user, tmp.data_size))
return -EFAULT;
@@ -1566,6 +1594,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
if (copy_from_user(dmi, user, tmp.data_size))
goto bad;
+ /*
+ * Abort if something changed the ioctl data while it was being copied.
+ */
+ if (dmi->data_size != tmp.data_size) {
+ DMERR("rejecting ioctl: data size modified while processing parameters");
+ goto bad;
+ }
+
/* Wipe the user buffer so we do not return it to userspace */
if (secure_data && clear_user(user, tmp.data_size))
goto bad;
@@ -1574,9 +1610,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
return 0;
bad:
- if (secure_data)
- memset(dmi, 0, tmp.data_size);
- vfree(dmi);
+ free_params(dmi, tmp.data_size, *param_flags);
+
return -EFAULT;
}
@@ -1613,7 +1648,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
- int wipe_buffer;
+ int param_flags;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
@@ -1649,24 +1684,14 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
}
/*
- * Trying to avoid low memory issues when a device is
- * suspended.
- */
- current->flags |= PF_MEMALLOC;
-
- /*
* Copy the parameters into kernel space.
*/
- r = copy_params(user, &param);
-
- current->flags &= ~PF_MEMALLOC;
+ r = copy_params(user, &param, &param_flags);
if (r)
return r;
input_param_size = param->data_size;
- wipe_buffer = param->flags & DM_SECURE_DATA_FLAG;
-
r = validate_params(cmd, param);
if (r)
goto out;
@@ -1681,10 +1706,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
r = -EFAULT;
out:
- if (wipe_buffer)
- memset(param, 0, input_param_size);
-
- vfree(param);
+ free_params(param, input_param_size, param_flags);
return r;
}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index bed444c93d8..68c02673263 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -349,7 +349,7 @@ static void complete_io(unsigned long error, void *context)
struct dm_kcopyd_client *kc = job->kc;
if (error) {
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
job->write_err |= error;
else
job->read_err = 1;
@@ -361,7 +361,7 @@ static void complete_io(unsigned long error, void *context)
}
}
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
push(&kc->complete_jobs, job);
else {
@@ -432,7 +432,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -585,6 +585,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
struct kcopyd_job *job;
+ int i;
/*
* Allocate an array of jobs consisting of one master job
@@ -611,7 +612,16 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
memset(&job->source, 0, sizeof job->source);
job->source.count = job->dests[0].count;
job->pages = &zero_page_list;
- job->rw = WRITE;
+
+ /*
+ * Use WRITE SAME to optimize zeroing if all dests support it.
+ */
+ job->rw = WRITE | REQ_WRITE_SAME;
+ for (i = 0; i < job->num_dests; i++)
+ if (!bdev_write_same(job->dests[i].bdev)) {
+ job->rw = WRITE;
+ break;
+ }
}
job->fn = fn;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 1bf19a93eef..328cad5617a 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -55,6 +55,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->num_write_same_requests = 1;
ti->private = lc;
return 0;
@@ -87,8 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
}
-static int linear_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int linear_map(struct dm_target *ti, struct bio *bio)
{
linear_map_bio(ti, bio);
@@ -155,7 +155,7 @@ static int linear_iterate_devices(struct dm_target *ti,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 45d94a7e7f6..3d8984edeff 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -295,9 +295,11 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
* Choose a reasonable default. All figures in sectors.
*/
if (min_region_size > (1 << 13)) {
+ /* If not a power of 2, make it the next power of 2 */
+ if (min_region_size & (min_region_size - 1))
+ region_size = 1 << fls(region_size);
DMINFO("Choosing default region size of %lu sectors",
region_size);
- region_size = min_region_size;
} else {
DMINFO("Choosing default region size of 4MiB");
region_size = 1 << 13; /* sectors */
@@ -1216,7 +1218,7 @@ static void raid_dtr(struct dm_target *ti)
context_free(rs);
}
-static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
+static int raid_map(struct dm_target *ti, struct bio *bio)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
@@ -1430,7 +1432,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 3, 1},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fd61f98ee1f..fa519185ebb 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -61,7 +61,6 @@ struct mirror_set {
struct dm_region_hash *rh;
struct dm_kcopyd_client *kcopyd_client;
struct dm_io_client *io_client;
- mempool_t *read_record_pool;
/* recovery */
region_t nr_regions;
@@ -139,14 +138,13 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
queue_bio(ms, bio, WRITE);
}
-#define MIN_READ_RECORDS 20
-struct dm_raid1_read_record {
+struct dm_raid1_bio_record {
struct mirror *m;
+ /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
+ region_t write_region;
};
-static struct kmem_cache *_dm_raid1_read_record_cache;
-
/*
* Every mirror should look like this one.
*/
@@ -876,19 +874,9 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
atomic_set(&ms->suspend, 0);
atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
- ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
- _dm_raid1_read_record_cache);
-
- if (!ms->read_record_pool) {
- ti->error = "Error creating mirror read_record_pool";
- kfree(ms);
- return NULL;
- }
-
ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
- mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
@@ -900,7 +888,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
if (IS_ERR(ms->rh)) {
ti->error = "Error creating dirty region hash";
dm_io_client_destroy(ms->io_client);
- mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
@@ -916,7 +903,6 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
dm_io_client_destroy(ms->io_client);
dm_region_hash_destroy(ms->rh);
- mempool_destroy(ms->read_record_pool);
kfree(ms);
}
@@ -1088,6 +1074,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
@@ -1155,18 +1142,20 @@ static void mirror_dtr(struct dm_target *ti)
/*
* Mirror mapping function
*/
-static int mirror_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int mirror_map(struct dm_target *ti, struct bio *bio)
{
int r, rw = bio_rw(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
- struct dm_raid1_read_record *read_record = NULL;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+ struct dm_raid1_bio_record *bio_record =
+ dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
+
+ bio_record->details.bi_bdev = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
- map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
+ bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
@@ -1194,33 +1183,29 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
if (unlikely(!m))
return -EIO;
- read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
- if (likely(read_record)) {
- dm_bio_record(&read_record->details, bio);
- map_context->ptr = read_record;
- read_record->m = m;
- }
+ dm_bio_record(&bio_record->details, bio);
+ bio_record->m = m;
map_bio(m, bio);
return DM_MAPIO_REMAPPED;
}
-static int mirror_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
{
int rw = bio_rw(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
- struct dm_raid1_read_record *read_record = map_context->ptr;
+ struct dm_raid1_bio_record *bio_record =
+ dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
/*
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
- dm_rh_dec(ms->rh, map_context->ll);
+ dm_rh_dec(ms->rh, bio_record->write_region);
return error;
}
@@ -1231,7 +1216,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(error)) {
- if (!read_record) {
+ if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1241,7 +1226,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
return -EIO;
}
- m = read_record->m;
+ m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
m->dev->name);
@@ -1253,22 +1238,18 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* mirror.
*/
if (default_ok(m) || mirror_available(ms, bio)) {
- bd = &read_record->details;
+ bd = &bio_record->details;
dm_bio_restore(bd, bio);
- mempool_free(read_record, ms->read_record_pool);
- map_context->ptr = NULL;
+ bio_record->details.bi_bdev = NULL;
queue_bio(ms, bio, rw);
- return 1;
+ return DM_ENDIO_INCOMPLETE;
}
DMERR("All replicated volumes dead, failing I/O");
}
out:
- if (read_record) {
- mempool_free(read_record, ms->read_record_pool);
- map_context->ptr = NULL;
- }
+ bio_record->details.bi_bdev = NULL;
return error;
}
@@ -1422,7 +1403,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 12, 1},
+ .version = {1, 13, 1},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
@@ -1439,13 +1420,6 @@ static int __init dm_mirror_init(void)
{
int r;
- _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
- if (!_dm_raid1_read_record_cache) {
- DMERR("Can't allocate dm_raid1_read_record cache");
- r = -ENOMEM;
- goto bad_cache;
- }
-
r = dm_register_target(&mirror_target);
if (r < 0) {
DMERR("Failed to register mirror target");
@@ -1455,15 +1429,12 @@ static int __init dm_mirror_init(void)
return 0;
bad_target:
- kmem_cache_destroy(_dm_raid1_read_record_cache);
-bad_cache:
return r;
}
static void __exit dm_mirror_exit(void)
{
dm_unregister_target(&mirror_target);
- kmem_cache_destroy(_dm_raid1_read_record_cache);
}
/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a143921feaf..59fc18ae52c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -79,7 +79,6 @@ struct dm_snapshot {
/* Chunks with outstanding reads */
spinlock_t tracked_chunk_lock;
- mempool_t *tracked_chunk_pool;
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
/* The on disk metadata handler */
@@ -191,35 +190,38 @@ struct dm_snap_tracked_chunk {
chunk_t chunk;
};
-static struct kmem_cache *tracked_chunk_cache;
+static void init_tracked_chunk(struct bio *bio)
+{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+ INIT_HLIST_NODE(&c->node);
+}
-static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
- chunk_t chunk)
+static bool is_bio_tracked(struct bio *bio)
{
- struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
- GFP_NOIO);
- unsigned long flags;
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+ return !hlist_unhashed(&c->node);
+}
+
+static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
+{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
c->chunk = chunk;
- spin_lock_irqsave(&s->tracked_chunk_lock, flags);
+ spin_lock_irq(&s->tracked_chunk_lock);
hlist_add_head(&c->node,
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
- spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
-
- return c;
+ spin_unlock_irq(&s->tracked_chunk_lock);
}
-static void stop_tracking_chunk(struct dm_snapshot *s,
- struct dm_snap_tracked_chunk *c)
+static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
unsigned long flags;
spin_lock_irqsave(&s->tracked_chunk_lock, flags);
hlist_del(&c->node);
spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
-
- mempool_free(c, s->tracked_chunk_pool);
}
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
@@ -1120,14 +1122,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_pending_pool;
}
- s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
- tracked_chunk_cache);
- if (!s->tracked_chunk_pool) {
- ti->error = "Could not allocate tracked_chunk mempool for "
- "tracking reads";
- goto bad_tracked_chunk_pool;
- }
-
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
@@ -1135,6 +1129,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = s;
ti->num_flush_requests = num_flush_requests;
+ ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -1183,9 +1178,6 @@ bad_read_metadata:
unregister_snapshot(s);
bad_load_and_register:
- mempool_destroy(s->tracked_chunk_pool);
-
-bad_tracked_chunk_pool:
mempool_destroy(s->pending_pool);
bad_pending_pool:
@@ -1290,8 +1282,6 @@ static void snapshot_dtr(struct dm_target *ti)
BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
#endif
- mempool_destroy(s->tracked_chunk_pool);
-
__free_exceptions(s);
mempool_destroy(s->pending_pool);
@@ -1577,8 +1567,7 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
s->store->chunk_mask);
}
-static int snapshot_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int snapshot_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
struct dm_snapshot *s = ti->private;
@@ -1586,6 +1575,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ init_tracked_chunk(bio);
+
if (bio->bi_rw & REQ_FLUSH) {
bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
@@ -1670,7 +1661,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
} else {
bio->bi_bdev = s->origin->bdev;
- map_context->ptr = track_chunk(s, chunk);
+ track_chunk(s, bio, chunk);
}
out_unlock:
@@ -1691,20 +1682,20 @@ out:
* If merging is currently taking place on the chunk in question, the
* I/O is deferred by adding it to s->bios_queued_during_merge.
*/
-static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
struct dm_snapshot *s = ti->private;
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
+ init_tracked_chunk(bio);
+
if (bio->bi_rw & REQ_FLUSH) {
- if (!map_context->target_request_nr)
+ if (!dm_bio_get_target_request_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
- map_context->ptr = NULL;
return DM_MAPIO_REMAPPED;
}
@@ -1733,7 +1724,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
remap_exception(s, e, bio, chunk);
if (bio_rw(bio) == WRITE)
- map_context->ptr = track_chunk(s, chunk);
+ track_chunk(s, bio, chunk);
goto out_unlock;
}
@@ -1751,14 +1742,12 @@ out_unlock:
return r;
}
-static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct dm_snapshot *s = ti->private;
- struct dm_snap_tracked_chunk *c = map_context->ptr;
- if (c)
- stop_tracking_chunk(s, c);
+ if (is_bio_tracked(bio))
+ stop_tracking_chunk(s, bio);
return 0;
}
@@ -2127,8 +2116,7 @@ static void origin_dtr(struct dm_target *ti)
dm_put_device(ti, dev);
}
-static int origin_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int origin_map(struct dm_target *ti, struct bio *bio)
{
struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
@@ -2193,7 +2181,7 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 7, 1},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -2206,7 +2194,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2220,7 +2208,7 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2281,17 +2269,8 @@ static int __init dm_snapshot_init(void)
goto bad_pending_cache;
}
- tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
- if (!tracked_chunk_cache) {
- DMERR("Couldn't create cache to track chunks in use.");
- r = -ENOMEM;
- goto bad_tracked_chunk_cache;
- }
-
return 0;
-bad_tracked_chunk_cache:
- kmem_cache_destroy(pending_cache);
bad_pending_cache:
kmem_cache_destroy(exception_cache);
bad_exception_cache:
@@ -2317,7 +2296,6 @@ static void __exit dm_snapshot_exit(void)
exit_origin_hash();
kmem_cache_destroy(pending_cache);
kmem_cache_destroy(exception_cache);
- kmem_cache_destroy(tracked_chunk_cache);
dm_exception_store_exit();
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e2f87653974..c89cde86d40 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -162,6 +162,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = stripes;
ti->num_discard_requests = stripes;
+ ti->num_write_same_requests = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
@@ -251,8 +252,8 @@ static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
*result += sc->chunk_size; /* next chunk */
}
-static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
- uint32_t target_stripe)
+static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
+ uint32_t target_stripe)
{
sector_t begin, end;
@@ -271,23 +272,23 @@ static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
}
}
-static int stripe_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
unsigned target_request_nr;
if (bio->bi_rw & REQ_FLUSH) {
- target_request_nr = map_context->target_request_nr;
+ target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- target_request_nr = map_context->target_request_nr;
+ if (unlikely(bio->bi_rw & REQ_DISCARD) ||
+ unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
+ target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
- return stripe_map_discard(sc, bio, target_request_nr);
+ return stripe_map_range(sc, bio, target_request_nr);
}
stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
@@ -342,8 +343,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
return 0;
}
-static int stripe_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
{
unsigned i;
char major_minor[16];
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 100368eb799..daf25d0890b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -967,13 +967,22 @@ bool dm_table_request_based(struct dm_table *t)
int dm_table_alloc_md_mempools(struct dm_table *t)
{
unsigned type = dm_table_get_type(t);
+ unsigned per_bio_data_size = 0;
+ struct dm_target *tgt;
+ unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools");
return -EINVAL;
}
- t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
+ if (type == DM_TYPE_BIO_BASED)
+ for (i = 0; i < t->num_targets; i++) {
+ tgt = t->targets + i;
+ per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
+ }
+
+ t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
if (!t->mempools)
return -ENOMEM;
@@ -1414,6 +1423,33 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return 1;
}
+static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !q->limits.max_write_same_sectors;
+}
+
+static bool dm_table_supports_write_same(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->num_write_same_requests)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+ return false;
+ }
+
+ return true;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1445,6 +1481,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+ if (!dm_table_supports_write_same(t))
+ q->limits.max_write_same_sectors = 0;
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 8da366cf381..617d21a7725 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -126,15 +126,14 @@ static void io_err_dtr(struct dm_target *tt)
/* empty */
}
-static int io_err_map(struct dm_target *tt, struct bio *bio,
- union map_info *map_context)
+static int io_err_map(struct dm_target *tt, struct bio *bio)
{
return -EIO;
}
static struct target_type error_target = {
.name = "error",
- .version = {1, 0, 1},
+ .version = {1, 1, 0},
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 693e149e972..4d6e85367b8 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -408,7 +408,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
pmd->tl_info.tm = pmd->tm;
pmd->tl_info.levels = 1;
- pmd->tl_info.value_type.context = &pmd->info;
+ pmd->tl_info.value_type.context = &pmd->bl_info;
pmd->tl_info.value_type.size = sizeof(__le64);
pmd->tl_info.value_type.inc = subtree_inc;
pmd->tl_info.value_type.dec = subtree_dec;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 058acf3a5ba..675ae527401 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -186,7 +186,6 @@ struct pool {
struct dm_thin_new_mapping *next_mapping;
mempool_t *mapping_pool;
- mempool_t *endio_hook_pool;
process_bio_fn process_bio;
process_bio_fn process_discard;
@@ -304,7 +303,7 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
bio_list_init(master);
while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
if (h->tc == tc)
bio_endio(bio, DM_ENDIO_REQUEUE);
@@ -368,6 +367,17 @@ static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
dm_thin_changed_this_transaction(tc->td);
}
+static void inc_all_io_entry(struct pool *pool, struct bio *bio)
+{
+ struct dm_thin_endio_hook *h;
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return;
+
+ h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+ h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
+}
+
static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
@@ -474,7 +484,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
static void overwrite_endio(struct bio *bio, int err)
{
unsigned long flags;
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct dm_thin_new_mapping *m = h->overwrite_mapping;
struct pool *pool = m->tc->pool;
@@ -499,8 +509,7 @@ static void overwrite_endio(struct bio *bio, int err)
/*
* This sends the bios in the cell back to the deferred_bios list.
*/
-static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
- dm_block_t data_block)
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
struct pool *pool = tc->pool;
unsigned long flags;
@@ -513,17 +522,13 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
}
/*
- * Same as cell_defer above, except it omits one particular detainee,
- * a write bio that covers the block and has already been processed.
+ * Same as cell_defer except it omits the original holder of the cell.
*/
-static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
+static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
- struct bio_list bios;
struct pool *pool = tc->pool;
unsigned long flags;
- bio_list_init(&bios);
-
spin_lock_irqsave(&pool->lock, flags);
dm_cell_release_no_holder(cell, &pool->deferred_bios);
spin_unlock_irqrestore(&pool->lock, flags);
@@ -561,7 +566,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
- DMERR("dm_thin_insert_block() failed");
+ DMERR_LIMIT("dm_thin_insert_block() failed");
dm_cell_error(m->cell);
goto out;
}
@@ -573,10 +578,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
* the bios in the cell.
*/
if (bio) {
- cell_defer_except(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell);
bio_endio(bio, 0);
} else
- cell_defer(tc, m->cell, m->data_block);
+ cell_defer(tc, m->cell);
out:
list_del(&m->list);
@@ -588,8 +593,8 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
struct thin_c *tc = m->tc;
bio_io_error(m->bio);
- cell_defer_except(tc, m->cell);
- cell_defer_except(tc, m->cell2);
+ cell_defer_no_holder(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell2);
mempool_free(m, tc->pool->mapping_pool);
}
@@ -597,13 +602,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+ inc_all_io_entry(tc->pool, m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell2);
+
if (m->pass_discard)
remap_and_issue(tc, m->bio, m->data_block);
else
bio_endio(m->bio, 0);
- cell_defer_except(tc, m->cell);
- cell_defer_except(tc, m->cell2);
mempool_free(m, tc->pool->mapping_pool);
}
@@ -614,7 +621,7 @@ static void process_prepared_discard(struct dm_thin_new_mapping *m)
r = dm_thin_remove_block(tc->td, m->virt_block);
if (r)
- DMERR("dm_thin_remove_block() failed");
+ DMERR_LIMIT("dm_thin_remove_block() failed");
process_prepared_discard_passdown(m);
}
@@ -706,11 +713,12 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
* bio immediately. Otherwise we use kcopyd to clone the data first.
*/
if (io_overwrites_block(pool, bio)) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ inc_all_io_entry(pool, bio);
remap_and_issue(tc, bio, data_dest);
} else {
struct dm_io_region from, to;
@@ -727,7 +735,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
0, copy_complete, m);
if (r < 0) {
mempool_free(m, pool->mapping_pool);
- DMERR("dm_kcopyd_copy() failed");
+ DMERR_LIMIT("dm_kcopyd_copy() failed");
dm_cell_error(cell);
}
}
@@ -775,11 +783,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
process_prepared_mapping(m);
else if (io_overwrites_block(pool, bio)) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ inc_all_io_entry(pool, bio);
remap_and_issue(tc, bio, data_block);
} else {
int r;
@@ -792,7 +801,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
if (r < 0) {
mempool_free(m, pool->mapping_pool);
- DMERR("dm_kcopyd_zero() failed");
+ DMERR_LIMIT("dm_kcopyd_zero() failed");
dm_cell_error(cell);
}
}
@@ -804,7 +813,7 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
- DMERR("commit failed, error = %d", r);
+ DMERR_LIMIT("commit failed: error = %d", r);
return r;
}
@@ -889,7 +898,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
*/
static void retry_on_resume(struct bio *bio)
{
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
struct pool *pool = tc->pool;
unsigned long flags;
@@ -936,7 +945,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
*/
build_data_key(tc->td, lookup_result.block, &key2);
if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
break;
}
@@ -962,13 +971,15 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
} else {
+ inc_all_io_entry(pool, bio);
+ cell_defer_no_holder(tc, cell);
+ cell_defer_no_holder(tc, cell2);
+
/*
* The DM core makes sure that the discard doesn't span
* a block boundary. So we submit the discard of a
* partial block appropriately.
*/
- dm_cell_release_singleton(cell, bio);
- dm_cell_release_singleton(cell2, bio);
if ((!lookup_result.shared) && pool->pf.discard_passdown)
remap_and_issue(tc, bio, lookup_result.block);
else
@@ -980,13 +991,14 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
/*
* It isn't provisioned, just forget it.
*/
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
bio_endio(bio, 0);
break;
default:
- DMERR("discard: find block unexpectedly returned %d", r);
- dm_cell_release_singleton(cell, bio);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
+ cell_defer_no_holder(tc, cell);
bio_io_error(bio);
break;
}
@@ -1012,7 +1024,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
default:
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+ __func__, r);
dm_cell_error(cell);
break;
}
@@ -1037,11 +1050,12 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_data_dir(bio) == WRITE && bio->bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
+ inc_all_io_entry(pool, bio);
+ cell_defer_no_holder(tc, cell);
- dm_cell_release_singleton(cell, bio);
remap_and_issue(tc, bio, lookup_result->block);
}
}
@@ -1056,7 +1070,9 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
* Remap empty bios (flushes) immediately, without provisioning.
*/
if (!bio->bi_size) {
- dm_cell_release_singleton(cell, bio);
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_and_issue(tc, bio, 0);
return;
}
@@ -1066,7 +1082,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
*/
if (bio_data_dir(bio) == READ) {
zero_fill_bio(bio);
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
bio_endio(bio, 0);
return;
}
@@ -1085,7 +1101,8 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
break;
default:
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+ __func__, r);
set_pool_mode(tc->pool, PM_READ_ONLY);
dm_cell_error(cell);
break;
@@ -1111,34 +1128,31 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
- /*
- * We can release this cell now. This thread is the only
- * one that puts bios into a cell, and we know there were
- * no preceding bios.
- */
- /*
- * TODO: this will probably have to change when discard goes
- * back in.
- */
- dm_cell_release_singleton(cell, bio);
-
- if (lookup_result.shared)
+ if (lookup_result.shared) {
process_shared_bio(tc, bio, block, &lookup_result);
- else
+ cell_defer_no_holder(tc, cell);
+ } else {
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_and_issue(tc, bio, lookup_result.block);
+ }
break;
case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) {
- dm_cell_release_singleton(cell, bio);
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_to_origin_and_issue(tc, bio);
} else
provision_block(tc, bio, block, cell);
break;
default:
- DMERR("dm_thin_find_block() failed, error = %d", r);
- dm_cell_release_singleton(cell, bio);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
+ cell_defer_no_holder(tc, cell);
bio_io_error(bio);
break;
}
@@ -1156,8 +1170,10 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
case 0:
if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
bio_io_error(bio);
- else
+ else {
+ inc_all_io_entry(tc->pool, bio);
remap_and_issue(tc, bio, lookup_result.block);
+ }
break;
case -ENODATA:
@@ -1167,6 +1183,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
}
if (tc->origin_dev) {
+ inc_all_io_entry(tc->pool, bio);
remap_to_origin_and_issue(tc, bio);
break;
}
@@ -1176,7 +1193,8 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
break;
default:
- DMERR("dm_thin_find_block() failed, error = %d", r);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
bio_io_error(bio);
break;
}
@@ -1207,7 +1225,7 @@ static void process_deferred_bios(struct pool *pool)
spin_unlock_irqrestore(&pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
/*
@@ -1340,32 +1358,30 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
-static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
{
- struct pool *pool = tc->pool;
- struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->tc = tc;
h->shared_read_entry = NULL;
- h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
+ h->all_io_entry = NULL;
h->overwrite_mapping = NULL;
-
- return h;
}
/*
* Non-blocking function called from the thin target's map function.
*/
-static int thin_bio_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int thin_bio_map(struct dm_target *ti, struct bio *bio)
{
int r;
struct thin_c *tc = ti->private;
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_device *td = tc->td;
struct dm_thin_lookup_result result;
+ struct dm_bio_prison_cell *cell1, *cell2;
+ struct dm_cell_key key;
- map_context->ptr = thin_hook_bio(tc, bio);
+ thin_hook_bio(tc, bio);
if (get_pool_mode(tc->pool) == PM_FAIL) {
bio_io_error(bio);
@@ -1400,12 +1416,25 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* shared flag will be set in their case.
*/
thin_defer_bio(tc, bio);
- r = DM_MAPIO_SUBMITTED;
- } else {
- remap(tc, bio, result.block);
- r = DM_MAPIO_REMAPPED;
+ return DM_MAPIO_SUBMITTED;
}
- break;
+
+ build_virtual_key(tc->td, block, &key);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+ return DM_MAPIO_SUBMITTED;
+
+ build_data_key(tc->td, result.block, &key);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
+ cell_defer_no_holder(tc, cell1);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell2);
+ cell_defer_no_holder(tc, cell1);
+
+ remap(tc, bio, result.block);
+ return DM_MAPIO_REMAPPED;
case -ENODATA:
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
@@ -1414,8 +1443,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* of doing so. Just error it.
*/
bio_io_error(bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
}
/* fall through */
@@ -1425,8 +1453,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* provide the hint to load the metadata into cache.
*/
thin_defer_bio(tc, bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
default:
/*
@@ -1435,11 +1462,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* pool is switched to fail-io mode.
*/
bio_io_error(bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
}
-
- return r;
}
static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
@@ -1566,14 +1590,12 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, pool->mapping_pool);
mempool_destroy(pool->mapping_pool);
- mempool_destroy(pool->endio_hook_pool);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
}
static struct kmem_cache *_new_mapping_cache;
-static struct kmem_cache *_endio_hook_cache;
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
@@ -1667,13 +1689,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_mapping_pool;
}
- pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
- _endio_hook_cache);
- if (!pool->endio_hook_pool) {
- *error = "Error creating pool's endio_hook mempool";
- err_p = ERR_PTR(-ENOMEM);
- goto bad_endio_hook_pool;
- }
pool->ref_count = 1;
pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
@@ -1682,8 +1697,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return pool;
-bad_endio_hook_pool:
- mempool_destroy(pool->mapping_pool);
bad_mapping_pool:
dm_deferred_set_destroy(pool->all_io_ds);
bad_all_io_ds:
@@ -1966,8 +1979,7 @@ out_unlock:
return r;
}
-static int pool_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int pool_map(struct dm_target *ti, struct bio *bio)
{
int r;
struct pool_c *pt = ti->private;
@@ -2358,7 +2370,9 @@ static int pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (pool->pf.discard_enabled && pool->pf.discard_passdown)
+ if (!pool->pf.discard_enabled)
+ DMEMIT("ignore_discard");
+ else if (pool->pf.discard_passdown)
DMEMIT("discard_passdown");
else
DMEMIT("no_discard_passdown");
@@ -2454,7 +2468,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2576,6 +2590,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_requests = 1;
ti->flush_supported = true;
+ ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
if (tc->pool->pf.discard_enabled) {
@@ -2609,20 +2624,17 @@ out_unlock:
return r;
}
-static int thin_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int thin_map(struct dm_target *ti, struct bio *bio)
{
bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
- return thin_bio_map(ti, bio, map_context);
+ return thin_bio_map(ti, bio);
}
-static int thin_endio(struct dm_target *ti,
- struct bio *bio, int err,
- union map_info *map_context)
+static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
{
unsigned long flags;
- struct dm_thin_endio_hook *h = map_context->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct list_head work;
struct dm_thin_new_mapping *m, *tmp;
struct pool *pool = h->tc->pool;
@@ -2643,14 +2655,15 @@ static int thin_endio(struct dm_target *ti,
if (h->all_io_entry) {
INIT_LIST_HEAD(&work);
dm_deferred_entry_dec(h->all_io_entry, &work);
- spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry_safe(m, tmp, &work, list)
- list_add(&m->list, &pool->prepared_discards);
- spin_unlock_irqrestore(&pool->lock, flags);
+ if (!list_empty(&work)) {
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry_safe(m, tmp, &work, list)
+ list_add(&m->list, &pool->prepared_discards);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ wake_worker(pool);
+ }
}
- mempool_free(h, pool->endio_hook_pool);
-
return 0;
}
@@ -2745,7 +2758,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
@@ -2779,14 +2792,8 @@ static int __init dm_thin_init(void)
if (!_new_mapping_cache)
goto bad_new_mapping_cache;
- _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
- if (!_endio_hook_cache)
- goto bad_endio_hook_cache;
-
return 0;
-bad_endio_hook_cache:
- kmem_cache_destroy(_new_mapping_cache);
bad_new_mapping_cache:
dm_unregister_target(&pool_target);
bad_pool_target:
@@ -2801,7 +2808,6 @@ static void dm_thin_exit(void)
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
- kmem_cache_destroy(_endio_hook_cache);
}
module_init(dm_thin_init);
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 9e7328bb403..52cde982164 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -55,7 +55,6 @@ struct dm_verity {
unsigned shash_descsize;/* the size of temporary space for crypto */
int hash_failed; /* set to 1 if hash of any block failed */
- mempool_t *io_mempool; /* mempool of struct dm_verity_io */
mempool_t *vec_mempool; /* mempool of bio vector */
struct workqueue_struct *verify_wq;
@@ -66,7 +65,6 @@ struct dm_verity {
struct dm_verity_io {
struct dm_verity *v;
- struct bio *bio;
/* original values of bio->bi_end_io and bio->bi_private */
bio_end_io_t *orig_bi_end_io;
@@ -389,8 +387,8 @@ test_block_hash:
*/
static void verity_finish_io(struct dm_verity_io *io, int error)
{
- struct bio *bio = io->bio;
struct dm_verity *v = io->v;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
@@ -398,8 +396,6 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
if (io->io_vec != io->io_vec_inline)
mempool_free(io->io_vec, v->vec_mempool);
- mempool_free(io, v->io_mempool);
-
bio_endio(bio, error);
}
@@ -462,8 +458,7 @@ no_prefetch_cluster:
* Bio map function. It allocates dm_verity_io structure and bio vector and
* fills them. Then it issues prefetches and the I/O.
*/
-static int verity_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int verity_map(struct dm_target *ti, struct bio *bio)
{
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
@@ -486,9 +481,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio,
if (bio_data_dir(bio) == WRITE)
return -EIO;
- io = mempool_alloc(v->io_mempool, GFP_NOIO);
+ io = dm_per_bio_data(bio, ti->per_bio_data_size);
io->v = v;
- io->bio = bio;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
@@ -610,9 +604,6 @@ static void verity_dtr(struct dm_target *ti)
if (v->vec_mempool)
mempool_destroy(v->vec_mempool);
- if (v->io_mempool)
- mempool_destroy(v->io_mempool);
-
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -841,13 +832,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- v->io_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
- sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2);
- if (!v->io_mempool) {
- ti->error = "Cannot allocate io mempool";
- r = -ENOMEM;
- goto bad;
- }
+ ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
BIO_MAX_PAGES * sizeof(struct bio_vec));
@@ -875,7 +860,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index cc2b3cb8194..69a5c3b3b34 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -33,8 +33,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/*
* Return zeros only on reads
*/
-static int zero_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int zero_map(struct dm_target *ti, struct bio *bio)
{
switch(bio_rw(bio)) {
case READ:
@@ -56,7 +55,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
static struct target_type zero_target = {
.name = "zero",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = zero_ctr,
.map = zero_map,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 77e6eff41ca..c72e4d5a961 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -63,18 +63,6 @@ struct dm_io {
};
/*
- * For bio-based dm.
- * One of these is allocated per target within a bio. Hopefully
- * this will be simplified out one day.
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- union map_info info;
- struct bio clone;
-};
-
-/*
* For request-based dm.
* One of these is allocated per request.
*/
@@ -657,7 +645,7 @@ static void clone_endio(struct bio *bio, int error)
error = -EIO;
if (endio) {
- r = endio(tio->ti, bio, error, &tio->info);
+ r = endio(tio->ti, bio, error);
if (r < 0 || r == DM_ENDIO_REQUEUE)
/*
* error and requeue request are handled
@@ -1016,7 +1004,7 @@ static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
*/
atomic_inc(&tio->io->io_count);
sector = clone->bi_sector;
- r = ti->type->map(ti, clone, &tio->info);
+ r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1111,6 +1099,7 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
+ tio->target_request_nr = 0;
return tio;
}
@@ -1121,7 +1110,7 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
struct bio *clone = &tio->clone;
- tio->info.target_request_nr = request_nr;
+ tio->target_request_nr = request_nr;
/*
* Discard requests require the bio's inline iovecs be initialized.
@@ -1174,7 +1163,28 @@ static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
ci->sector_count = 0;
}
-static int __clone_and_map_discard(struct clone_info *ci)
+typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
+
+static unsigned get_num_discard_requests(struct dm_target *ti)
+{
+ return ti->num_discard_requests;
+}
+
+static unsigned get_num_write_same_requests(struct dm_target *ti)
+{
+ return ti->num_write_same_requests;
+}
+
+typedef bool (*is_split_required_fn)(struct dm_target *ti);
+
+static bool is_split_required_for_discard(struct dm_target *ti)
+{
+ return ti->split_discard_requests;
+}
+
+static int __clone_and_map_changing_extent_only(struct clone_info *ci,
+ get_num_requests_fn get_num_requests,
+ is_split_required_fn is_split_required)
{
struct dm_target *ti;
sector_t len;
@@ -1185,15 +1195,15 @@ static int __clone_and_map_discard(struct clone_info *ci)
return -EIO;
/*
- * Even though the device advertised discard support,
- * that does not mean every target supports it, and
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
* reconfiguration might also have changed that since the
* check was performed.
*/
- if (!ti->num_discard_requests)
+ if (!get_num_requests || !get_num_requests(ti))
return -EOPNOTSUPP;
- if (!ti->split_discard_requests)
+ if (is_split_required && !is_split_required(ti))
len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
@@ -1206,6 +1216,17 @@ static int __clone_and_map_discard(struct clone_info *ci)
return 0;
}
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+ return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
+ is_split_required_for_discard);
+}
+
+static int __clone_and_map_write_same(struct clone_info *ci)
+{
+ return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *bio = ci->bio;
@@ -1215,6 +1236,8 @@ static int __clone_and_map(struct clone_info *ci)
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __clone_and_map_discard(ci);
+ else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ return __clone_and_map_write_same(ci);
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
@@ -1946,13 +1969,20 @@ static void free_dev(struct mapped_device *md)
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
- struct dm_md_mempools *p;
+ struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs)
- /* the md already has necessary mempools */
+ if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
+ /*
+ * The md already has necessary mempools. Reload just the
+ * bioset because front_pad may have changed because
+ * a different table was loaded.
+ */
+ bioset_free(md->bs);
+ md->bs = p->bs;
+ p->bs = NULL;
goto out;
+ }
- p = dm_table_get_md_mempools(t);
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
md->io_pool = p->io_pool;
@@ -2711,7 +2741,7 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
@@ -2719,6 +2749,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
if (!pools)
return NULL;
+ per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
+
pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
mempool_create_slab_pool(MIN_IOS, _io_cache) :
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
@@ -2734,7 +2766,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
pools->bs = (type == DM_TYPE_BIO_BASED) ?
bioset_create(pool_size,
- offsetof(struct dm_target_io, clone)) :
+ per_bio_data_size + offsetof(struct dm_target_io, clone)) :
bioset_create(pool_size,
offsetof(struct dm_rq_clone_bio_info, clone));
if (!pools->bs)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 6a99fefaa74..45b97da1bd0 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -159,7 +159,7 @@ void dm_kcopyd_exit(void);
/*
* Mempool operations
*/
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity);
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 61200717687..3db3d1b271f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -452,7 +452,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
spin_lock_irq(&mddev->write_lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
- mddev->write_lock, /*nothing*/);
+ mddev->write_lock);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->write_lock);
@@ -1414,12 +1414,11 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
- int i;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
- for (i=0; size>=4; size -= 4 )
+ for (; size >= 4; size -= 4)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
@@ -4124,7 +4123,7 @@ static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
-/* Metdata version.
+/* Metadata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
* 'external' for arrays with externally managed metadata,
@@ -4753,6 +4752,8 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
+ if (entry->store == new_dev_store)
+ flush_workqueue(md_misc_wq);
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->store(mddev, page, length);
@@ -6346,24 +6347,23 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
* Commands dealing with the RAID driver but not any
* particular array:
*/
- switch (cmd)
- {
- case RAID_VERSION:
- err = get_version(argp);
- goto done;
+ switch (cmd) {
+ case RAID_VERSION:
+ err = get_version(argp);
+ goto done;
- case PRINT_RAID_DEBUG:
- err = 0;
- md_print_devices();
- goto done;
+ case PRINT_RAID_DEBUG:
+ err = 0;
+ md_print_devices();
+ goto done;
#ifndef MODULE
- case RAID_AUTORUN:
- err = 0;
- autostart_arrays(arg);
- goto done;
+ case RAID_AUTORUN:
+ err = 0;
+ autostart_arrays(arg);
+ goto done;
#endif
- default:;
+ default:;
}
/*
@@ -6398,6 +6398,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto abort;
}
+ if (cmd == ADD_NEW_DISK)
+ /* need to ensure md_delayed_delete() has completed */
+ flush_workqueue(md_misc_wq);
+
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
@@ -6406,50 +6410,44 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto abort;
}
- switch (cmd)
- {
- case SET_ARRAY_INFO:
- {
- mdu_array_info_t info;
- if (!arg)
- memset(&info, 0, sizeof(info));
- else if (copy_from_user(&info, argp, sizeof(info))) {
- err = -EFAULT;
- goto abort_unlock;
- }
- if (mddev->pers) {
- err = update_array_info(mddev, &info);
- if (err) {
- printk(KERN_WARNING "md: couldn't update"
- " array info. %d\n", err);
- goto abort_unlock;
- }
- goto done_unlock;
- }
- if (!list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: array %s already has disks!\n",
- mdname(mddev));
- err = -EBUSY;
- goto abort_unlock;
- }
- if (mddev->raid_disks) {
- printk(KERN_WARNING
- "md: array %s already initialised!\n",
- mdname(mddev));
- err = -EBUSY;
- goto abort_unlock;
- }
- err = set_array_info(mddev, &info);
- if (err) {
- printk(KERN_WARNING "md: couldn't set"
- " array info. %d\n", err);
- goto abort_unlock;
- }
+ if (cmd == SET_ARRAY_INFO) {
+ mdu_array_info_t info;
+ if (!arg)
+ memset(&info, 0, sizeof(info));
+ else if (copy_from_user(&info, argp, sizeof(info))) {
+ err = -EFAULT;
+ goto abort_unlock;
+ }
+ if (mddev->pers) {
+ err = update_array_info(mddev, &info);
+ if (err) {
+ printk(KERN_WARNING "md: couldn't update"
+ " array info. %d\n", err);
+ goto abort_unlock;
}
goto done_unlock;
-
- default:;
+ }
+ if (!list_empty(&mddev->disks)) {
+ printk(KERN_WARNING
+ "md: array %s already has disks!\n",
+ mdname(mddev));
+ err = -EBUSY;
+ goto abort_unlock;
+ }
+ if (mddev->raid_disks) {
+ printk(KERN_WARNING
+ "md: array %s already initialised!\n",
+ mdname(mddev));
+ err = -EBUSY;
+ goto abort_unlock;
+ }
+ err = set_array_info(mddev, &info);
+ if (err) {
+ printk(KERN_WARNING "md: couldn't set"
+ " array info. %d\n", err);
+ goto abort_unlock;
+ }
+ goto done_unlock;
}
/*
@@ -6468,52 +6466,51 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/*
* Commands even a read-only array can execute:
*/
- switch (cmd)
- {
- case GET_BITMAP_FILE:
- err = get_bitmap_file(mddev, argp);
- goto done_unlock;
+ switch (cmd) {
+ case GET_BITMAP_FILE:
+ err = get_bitmap_file(mddev, argp);
+ goto done_unlock;
- case RESTART_ARRAY_RW:
- err = restart_array(mddev);
- goto done_unlock;
+ case RESTART_ARRAY_RW:
+ err = restart_array(mddev);
+ goto done_unlock;
- case STOP_ARRAY:
- err = do_md_stop(mddev, 0, bdev);
- goto done_unlock;
+ case STOP_ARRAY:
+ err = do_md_stop(mddev, 0, bdev);
+ goto done_unlock;
- case STOP_ARRAY_RO:
- err = md_set_readonly(mddev, bdev);
- goto done_unlock;
+ case STOP_ARRAY_RO:
+ err = md_set_readonly(mddev, bdev);
+ goto done_unlock;
- case BLKROSET:
- if (get_user(ro, (int __user *)(arg))) {
- err = -EFAULT;
- goto done_unlock;
- }
- err = -EINVAL;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
- /* if the bdev is going readonly the value of mddev->ro
- * does not matter, no writes are coming
- */
- if (ro)
- goto done_unlock;
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
- /* are we are already prepared for writes? */
- if (mddev->ro != 1)
- goto done_unlock;
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
- /* transitioning to readauto need only happen for
- * arrays that call md_write_start
- */
- if (mddev->pers) {
- err = restart_array(mddev);
- if (err == 0) {
- mddev->ro = 2;
- set_disk_ro(mddev->gendisk, 0);
- }
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
}
- goto done_unlock;
+ }
+ goto done_unlock;
}
/*
@@ -6535,37 +6532,36 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
}
- switch (cmd)
+ switch (cmd) {
+ case ADD_NEW_DISK:
{
- case ADD_NEW_DISK:
- {
- mdu_disk_info_t info;
- if (copy_from_user(&info, argp, sizeof(info)))
- err = -EFAULT;
- else
- err = add_new_disk(mddev, &info);
- goto done_unlock;
- }
+ mdu_disk_info_t info;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ err = -EFAULT;
+ else
+ err = add_new_disk(mddev, &info);
+ goto done_unlock;
+ }
- case HOT_REMOVE_DISK:
- err = hot_remove_disk(mddev, new_decode_dev(arg));
- goto done_unlock;
+ case HOT_REMOVE_DISK:
+ err = hot_remove_disk(mddev, new_decode_dev(arg));
+ goto done_unlock;
- case HOT_ADD_DISK:
- err = hot_add_disk(mddev, new_decode_dev(arg));
- goto done_unlock;
+ case HOT_ADD_DISK:
+ err = hot_add_disk(mddev, new_decode_dev(arg));
+ goto done_unlock;
- case RUN_ARRAY:
- err = do_md_run(mddev);
- goto done_unlock;
+ case RUN_ARRAY:
+ err = do_md_run(mddev);
+ goto done_unlock;
- case SET_BITMAP_FILE:
- err = set_bitmap_file(mddev, (int)arg);
- goto done_unlock;
+ case SET_BITMAP_FILE:
+ err = set_bitmap_file(mddev, (int)arg);
+ goto done_unlock;
- default:
- err = -EINVAL;
- goto abort_unlock;
+ default:
+ err = -EINVAL;
+ goto abort_unlock;
}
done_unlock:
@@ -7184,6 +7180,7 @@ void md_done_sync(struct mddev *mddev, int blocks, int ok)
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
@@ -7281,6 +7278,7 @@ EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
+#define UPDATE_FREQUENCY (5*60*HZ)
void md_do_sync(struct md_thread *thread)
{
struct mddev *mddev = thread->mddev;
@@ -7289,6 +7287,7 @@ void md_do_sync(struct md_thread *thread)
window;
sector_t max_sectors,j, io_sectors;
unsigned long mark[SYNC_MARKS];
+ unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
@@ -7448,6 +7447,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync_completed = j;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
md_new_event(mddev);
+ update_time = jiffies;
blk_start_plug(&plug);
while (j < max_sectors) {
@@ -7459,6 +7459,7 @@ void md_do_sync(struct md_thread *thread)
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
+ time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
@@ -7466,6 +7467,10 @@ void md_do_sync(struct md_thread *thread)
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ j > mddev->recovery_cp)
+ mddev->recovery_cp = j;
+ update_time = jiffies;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -7570,8 +7575,13 @@ void md_do_sync(struct md_thread *thread)
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
- mddev->recovery_cp =
- mddev->curr_resync_completed;
+ if (test_bit(MD_RECOVERY_ERROR,
+ &mddev->recovery))
+ mddev->recovery_cp =
+ mddev->curr_resync_completed;
+ else
+ mddev->recovery_cp =
+ mddev->curr_resync;
}
} else
mddev->recovery_cp = MaxSector;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index af443ab868d..eca59c3074e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -307,6 +307,7 @@ struct mddev {
* REQUEST: user-space has requested a sync (used with SYNC)
* CHECK: user-space request for check-only, no repair
* RESHAPE: A reshape is happening
+ * ERROR: sync-action interrupted because io-error
*
* If neither SYNC or RESHAPE are set, then it is a recovery.
*/
@@ -320,6 +321,7 @@ struct mddev {
#define MD_RECOVERY_CHECK 7
#define MD_RECOVERY_RESHAPE 8
#define MD_RECOVERY_FROZEN 9
+#define MD_RECOVERY_ERROR 10
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
@@ -551,32 +553,6 @@ struct md_thread {
#define THREAD_WAKEUP 0
-#define __wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-#define wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- if (condition) \
- break; \
- __wait_event_lock_irq(wq, condition, lock, cmd); \
-} while (0)
-
static inline void safe_put_page(struct page *p)
{
if (p) put_page(p);
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 5ba277768d9..28c3ed072a7 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -25,7 +25,7 @@
* may be held at once. This is just an implementation detail.
*
* ii) Recursive locking attempts are detected and return EINVAL. A stack
- * trace is also emitted for the previous lock aquisition.
+ * trace is also emitted for the previous lock acquisition.
*
* iii) Priority is given to write locks.
*/
@@ -109,7 +109,7 @@ static int __check_holder(struct block_lock *lock)
DMERR("previously held here:");
print_stack_trace(lock->traces + i, 4);
- DMERR("subsequent aquisition attempted here:");
+ DMERR("subsequent acquisition attempted here:");
t.nr_entries = 0;
t.max_entries = MAX_STACK;
t.entries = entries;
@@ -428,15 +428,17 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm,
if (!v)
return 0;
r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
- if (unlikely(r))
+ if (unlikely(r)) {
+ DMERR_LIMIT("%s validator check failed for block %llu", v->name,
+ (unsigned long long) dm_bufio_get_block_number(buf));
return r;
+ }
aux->validator = v;
} else {
if (unlikely(aux->validator != v)) {
- DMERR("validator mismatch (old=%s vs new=%s) for block %llu",
- aux->validator->name, v ? v->name : "NULL",
- (unsigned long long)
- dm_bufio_get_block_number(buf));
+ DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
+ aux->validator->name, v ? v->name : "NULL",
+ (unsigned long long) dm_bufio_get_block_number(buf));
return -EINVAL;
}
}
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 5709bfeab1e..accbb05f17b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -36,13 +36,13 @@ struct node_header {
__le32 padding;
} __packed;
-struct node {
+struct btree_node {
struct node_header header;
__le64 keys[0];
} __packed;
-void inc_children(struct dm_transaction_manager *tm, struct node *n,
+void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt);
int new_block(struct dm_btree_info *info, struct dm_block **result);
@@ -64,7 +64,7 @@ struct ro_spine {
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
int exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
-struct node *ro_node(struct ro_spine *s);
+struct btree_node *ro_node(struct ro_spine *s);
struct shadow_spine {
struct dm_btree_info *info;
@@ -98,17 +98,17 @@ int shadow_root(struct shadow_spine *s);
/*
* Some inlines.
*/
-static inline __le64 *key_ptr(struct node *n, uint32_t index)
+static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
{
return n->keys + index;
}
-static inline void *value_base(struct node *n)
+static inline void *value_base(struct btree_node *n)
{
return &n->keys[le32_to_cpu(n->header.max_entries)];
}
-static inline void *value_ptr(struct node *n, uint32_t index)
+static inline void *value_ptr(struct btree_node *n, uint32_t index)
{
uint32_t value_size = le32_to_cpu(n->header.value_size);
return value_base(n) + (value_size * index);
@@ -117,7 +117,7 @@ static inline void *value_ptr(struct node *n, uint32_t index)
/*
* Assumes the values are suitably-aligned and converts to core format.
*/
-static inline uint64_t value64(struct node *n, uint32_t index)
+static inline uint64_t value64(struct btree_node *n, uint32_t index)
{
__le64 *values_le = value_base(n);
@@ -127,7 +127,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
/*
* Searching for a key within a single node.
*/
-int lower_bound(struct node *n, uint64_t key);
+int lower_bound(struct btree_node *n, uint64_t key);
extern struct dm_block_validator btree_node_validator;
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index aa71e2359a0..c4f28133ef8 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -53,7 +53,7 @@
/*
* Some little utilities for moving node data around.
*/
-static void node_shift(struct node *n, int shift)
+static void node_shift(struct btree_node *n, int shift)
{
uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
uint32_t value_size = le32_to_cpu(n->header.value_size);
@@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
}
}
-static void node_copy(struct node *left, struct node *right, int shift)
+static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
@@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
/*
* Delete a specific entry from a leaf node.
*/
-static void delete_at(struct node *n, unsigned index)
+static void delete_at(struct btree_node *n, unsigned index)
{
unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
unsigned nr_to_copy = nr_entries - (index + 1);
@@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
n->header.nr_entries = cpu_to_le32(nr_entries - 1);
}
-static unsigned merge_threshold(struct node *n)
+static unsigned merge_threshold(struct btree_node *n)
{
return le32_to_cpu(n->header.max_entries) / 3;
}
@@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
struct child {
unsigned index;
struct dm_block *block;
- struct node *n;
+ struct btree_node *n;
};
static struct dm_btree_value_type le64_type = {
@@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
.equal = NULL
};
-static int init_child(struct dm_btree_info *info, struct node *parent,
+static int init_child(struct dm_btree_info *info, struct btree_node *parent,
unsigned index, struct child *result)
{
int r, inc;
@@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
return dm_tm_unlock(info->tm, c->block);
}
-static void shift(struct node *left, struct node *right, int count)
+static void shift(struct btree_node *left, struct btree_node *right, int count)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
@@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
right->header.nr_entries = cpu_to_le32(nr_right + count);
}
-static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *r)
{
- struct node *left = l->n;
- struct node *right = r->n;
+ struct btree_node *left = l->n;
+ struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
unsigned threshold = 2 * merge_threshold(left) + 1;
@@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
unsigned left_index)
{
int r;
- struct node *parent;
+ struct btree_node *parent;
struct child left, right;
parent = dm_block_data(shadow_current(s));
@@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
* in right, then rebalance2. This wastes some cpu, but I want something
* simple atm.
*/
-static void delete_center_node(struct dm_btree_info *info, struct node *parent,
+static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r,
- struct node *left, struct node *center, struct node *right,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
@@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
/*
* Redistributes entries among 3 sibling nodes.
*/
-static void redistribute3(struct dm_btree_info *info, struct node *parent,
+static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r,
- struct node *left, struct node *center, struct node *right,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
int s;
@@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
*key_ptr(parent, r->index) = right->keys[0];
}
-static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r)
{
- struct node *left = l->n;
- struct node *center = c->n;
- struct node *right = r->n;
+ struct btree_node *left = l->n;
+ struct btree_node *center = c->n;
+ struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
@@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
unsigned left_index)
{
int r;
- struct node *parent = dm_block_data(shadow_current(s));
+ struct btree_node *parent = dm_block_data(shadow_current(s));
struct child left, center, right;
/*
@@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
{
int r;
struct dm_block *block;
- struct node *n;
+ struct btree_node *n;
r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
if (r)
@@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
{
int i, r, has_left_sibling, has_right_sibling;
uint32_t child_entries;
- struct node *n;
+ struct btree_node *n;
n = dm_block_data(shadow_current(s));
@@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
return r;
}
-static int do_leaf(struct node *n, uint64_t key, unsigned *index)
+static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
{
int i = lower_bound(n, key);
@@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
uint64_t key, unsigned *index)
{
int i = *index, r;
- struct node *n;
+ struct btree_node *n;
for (;;) {
r = shadow_step(s, root, vt);
@@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
unsigned level, last_level = info->levels - 1;
int index = 0, r = 0;
struct shadow_spine spine;
- struct node *n;
+ struct btree_node *n;
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index d9a7912ee8e..f199a0c4ed0 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
- struct node *n = dm_block_data(b);
+ struct btree_node *n = dm_block_data(b);
struct node_header *h = &n->header;
h->blocknr = cpu_to_le64(dm_block_location(b));
@@ -38,15 +38,15 @@ static int node_check(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
- struct node *n = dm_block_data(b);
+ struct btree_node *n = dm_block_data(b);
struct node_header *h = &n->header;
size_t value_size;
__le32 csum_disk;
uint32_t flags;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
- DMERR("node_check failed blocknr %llu wanted %llu",
- le64_to_cpu(h->blocknr), dm_block_location(b));
+ DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(h->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -54,8 +54,8 @@ static int node_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BTREE_CSUM_XOR));
if (csum_disk != h->csum) {
- DMERR("node_check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
+ DMERR_LIMIT("node_check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
return -EILSEQ;
}
@@ -63,12 +63,12 @@ static int node_check(struct dm_block_validator *v,
if (sizeof(struct node_header) +
(sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
- DMERR("node_check failed: max_entries too large");
+ DMERR_LIMIT("node_check failed: max_entries too large");
return -EILSEQ;
}
if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
- DMERR("node_check failed, too many entries");
+ DMERR_LIMIT("node_check failed: too many entries");
return -EILSEQ;
}
@@ -77,7 +77,7 @@ static int node_check(struct dm_block_validator *v,
*/
flags = le32_to_cpu(h->flags);
if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
- DMERR("node_check failed, node is neither INTERNAL or LEAF");
+ DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF");
return -EILSEQ;
}
@@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
return r;
}
-struct node *ro_node(struct ro_spine *s)
+struct btree_node *ro_node(struct ro_spine *s)
{
struct dm_block *block;
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index d12b2cc51f1..4caf66918cd 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
/*----------------------------------------------------------------*/
/* makes the assumption that no two keys are the same. */
-static int bsearch(struct node *n, uint64_t key, int want_hi)
+static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
{
int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
@@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
return want_hi ? hi : lo;
}
-int lower_bound(struct node *n, uint64_t key)
+int lower_bound(struct btree_node *n, uint64_t key)
{
return bsearch(n, key, 0);
}
-void inc_children(struct dm_transaction_manager *tm, struct node *n,
+void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt)
{
unsigned i;
@@ -77,7 +77,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
vt->inc(vt->context, value_ptr(n, i));
}
-static int insert_at(size_t value_size, struct node *node, unsigned index,
+static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
uint64_t key, void *value)
__dm_written_to_disk(value)
{
@@ -122,7 +122,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
{
int r;
struct dm_block *b;
- struct node *n;
+ struct btree_node *n;
size_t block_size;
uint32_t max_entries;
@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
#define MAX_SPINE_DEPTH 64
struct frame {
struct dm_block *b;
- struct node *n;
+ struct btree_node *n;
unsigned level;
unsigned nr_children;
unsigned current_child;
@@ -230,6 +230,11 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+ return f->level < (info->levels - 1);
+}
+
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -241,7 +246,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
s->tm = info->tm;
s->top = -1;
- r = push_frame(s, root, 1);
+ r = push_frame(s, root, 0);
if (r)
goto out;
@@ -267,7 +272,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
if (r)
goto out;
- } else if (f->level != (info->levels - 1)) {
+ } else if (is_internal_level(info, f)) {
b = value64(f->n, f->current_child);
f->current_child++;
r = push_frame(s, b, f->level + 1);
@@ -295,7 +300,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
/*----------------------------------------------------------------*/
static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
- int (*search_fn)(struct node *, uint64_t),
+ int (*search_fn)(struct btree_node *, uint64_t),
uint64_t *result_key, void *v, size_t value_size)
{
int i, r;
@@ -406,7 +411,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
size_t size;
unsigned nr_left, nr_right;
struct dm_block *left, *right, *parent;
- struct node *ln, *rn, *pn;
+ struct btree_node *ln, *rn, *pn;
__le64 location;
left = shadow_current(s);
@@ -491,7 +496,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
size_t size;
unsigned nr_left, nr_right;
struct dm_block *left, *right, *new_parent;
- struct node *pn, *ln, *rn;
+ struct btree_node *pn, *ln, *rn;
__le64 val;
new_parent = shadow_current(s);
@@ -576,7 +581,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
uint64_t key, unsigned *index)
{
int r, i = *index, top = 1;
- struct node *node;
+ struct btree_node *node;
for (;;) {
r = shadow_step(s, root, vt);
@@ -643,7 +648,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
unsigned level, index = -1, last_level = info->levels - 1;
dm_block_t block = root;
struct shadow_spine spine;
- struct node *n;
+ struct btree_node *n;
struct dm_btree_value_type le64_type;
le64_type.context = NULL;
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index ae02c84410f..a2cd50441ca 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -35,7 +35,7 @@ struct dm_transaction_manager;
*/
/*
- * Infomation about the values stored within the btree.
+ * Information about the values stored within the btree.
*/
struct dm_btree_value_type {
void *context;
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index f3a9af8cdec..3e7a88d99eb 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -39,8 +39,8 @@ static int index_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
- DMERR("index_check failed blocknr %llu wanted %llu",
- le64_to_cpu(mi_le->blocknr), dm_block_location(b));
+ DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(mi_le->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -48,8 +48,8 @@ static int index_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
INDEX_CSUM_XOR));
if (csum_disk != mi_le->csum) {
- DMERR("index_check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
+ DMERR_LIMIT("index_check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
return -EILSEQ;
}
@@ -89,8 +89,8 @@ static int bitmap_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
- DMERR("bitmap check failed blocknr %llu wanted %llu",
- le64_to_cpu(disk_header->blocknr), dm_block_location(b));
+ DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(disk_header->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -98,8 +98,8 @@ static int bitmap_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BITMAP_CSUM_XOR));
if (csum_disk != disk_header->csum) {
- DMERR("bitmap check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
+ DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
return -EILSEQ;
}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index e89ae5e7a51..906cf3df71a 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -337,7 +337,7 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
{
int r = sm_metadata_new_block_(sm, b);
if (r)
- DMERR("out of metadata space");
+ DMERR("unable to allocate new metadata block");
return r;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a0f73092176..d5bddfc4010 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -822,7 +822,7 @@ static void raise_barrier(struct r1conf *conf)
/* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
- conf->resync_lock, );
+ conf->resync_lock);
/* block any new IO from starting */
conf->barrier++;
@@ -830,7 +830,7 @@ static void raise_barrier(struct r1conf *conf)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, );
+ conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
}
@@ -864,8 +864,7 @@ static void wait_barrier(struct r1conf *conf)
(conf->nr_pending &&
current->bio_list &&
!bio_list_empty(current->bio_list)),
- conf->resync_lock,
- );
+ conf->resync_lock);
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -898,10 +897,10 @@ static void freeze_array(struct r1conf *conf)
spin_lock_irq(&conf->resync_lock);
conf->barrier++;
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
- conf->resync_lock,
- flush_pending_writes(conf));
+ wait_event_lock_irq_cmd(conf->wait_barrier,
+ conf->nr_pending == conf->nr_queued+1,
+ conf->resync_lock,
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(struct r1conf *conf)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c9acbd71713..64d48249c03 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -952,7 +952,7 @@ static void raise_barrier(struct r10conf *conf, int force)
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock, );
+ conf->resync_lock);
/* block any new IO from starting */
conf->barrier++;
@@ -960,7 +960,7 @@ static void raise_barrier(struct r10conf *conf, int force)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, );
+ conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
}
@@ -993,8 +993,7 @@ static void wait_barrier(struct r10conf *conf)
(conf->nr_pending &&
current->bio_list &&
!bio_list_empty(current->bio_list)),
- conf->resync_lock,
- );
+ conf->resync_lock);
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -1027,10 +1026,10 @@ static void freeze_array(struct r10conf *conf)
spin_lock_irq(&conf->resync_lock);
conf->barrier++;
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
- conf->resync_lock,
- flush_pending_writes(conf));
+ wait_event_lock_irq_cmd(conf->wait_barrier,
+ conf->nr_pending == conf->nr_queued+1,
+ conf->resync_lock,
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a4502686e7a..19d77a02663 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,8 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <trace/events/block.h>
+
#include "md.h"
#include "raid5.h"
#include "raid0.h"
@@ -182,6 +184,8 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -466,7 +470,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
@@ -480,8 +484,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
- conf->device_lock,
- );
+ conf->device_lock);
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -671,6 +674,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_next = NULL;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
+ trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+ bi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(bi);
}
if (rrdev) {
@@ -698,6 +704,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
rbi->bi_next = NULL;
+ trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+ rbi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(rbi);
}
if (!rdev && !rrdev) {
@@ -1576,7 +1585,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
* This happens in stages:
* 1/ create a new kmem_cache and allocate the required number of
* stripe_heads.
- * 2/ gather all the old stripe_heads and tranfer the pages across
+ * 2/ gather all the old stripe_heads and transfer the pages across
* to the new stripe_heads. This will have the side effect of
* freezing the array as once all stripe_heads have been collected,
* no IO will be possible. Old stripe heads are freed once their
@@ -1646,8 +1655,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
- conf->device_lock,
- );
+ conf->device_lock);
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
@@ -2855,8 +2863,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
- if (rmw < rcw && rmw > 0)
+ if (rmw < rcw && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
+ blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
+ (unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
@@ -2867,7 +2877,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old block "
- "%d for r-m-w\n", i);
+ "%d for r-m-w\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
@@ -2877,8 +2887,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
+ }
if (rcw <= rmw && rcw > 0) {
/* want reconstruct write, but need to get some data */
+ int qread =0;
rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2897,12 +2909,17 @@ static void handle_stripe_dirtying(struct r5conf *conf,
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
+ qread++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
+ if (rcw)
+ blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
+ (unsigned long long)sh->sector,
+ rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
}
/* now if nothing is locked, and if we have enough data,
* we can start a write request
@@ -3224,10 +3241,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
}
/* done submitting copies, wait for them to complete */
- if (tx) {
- async_tx_ack(tx);
- dma_wait_for_async_tx(tx);
- }
+ async_tx_quiesce(&tx);
}
/*
@@ -3903,6 +3917,8 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
+ trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+ raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4003,10 +4019,13 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock);
+ trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+ align_bi, disk_devt(mddev->gendisk),
+ raid_bio->bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4081,6 +4100,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
struct stripe_head *sh;
struct mddev *mddev = cb->cb.data;
struct r5conf *conf = mddev->private;
+ int cnt = 0;
if (cb->list.next && !list_empty(&cb->list)) {
spin_lock_irq(&conf->device_lock);
@@ -4095,9 +4115,11 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
smp_mb__before_clear_bit();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
__release_stripe(conf, sh);
+ cnt++;
}
spin_unlock_irq(&conf->device_lock);
}
+ trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
}
@@ -4355,6 +4377,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
}
}
@@ -4731,8 +4755,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0)
+ if (remaining == 0) {
+ trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+ raid_bio, 0);
bio_endio(raid_bio, 0);
+ }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
@@ -6095,7 +6122,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
wait_event_lock_irq(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
conf->quiesce = 1;
spin_unlock_irq(&conf->device_lock);
/* allow reshape to continue */
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 121b0110af3..d2a436ce77f 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -1,3 +1,10 @@
+# Used by common drivers, when they need to ask questions
+config MEDIA_COMMON_OPTIONS
+ bool
+
+comment "common driver options"
+ depends on MEDIA_COMMON_OPTIONS
+
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
diff --git a/drivers/media/common/b2c2/Kconfig b/drivers/media/common/b2c2/Kconfig
index 1df9e578daa..a8c6cdfaa2f 100644
--- a/drivers/media/common/b2c2/Kconfig
+++ b/drivers/media/common/b2c2/Kconfig
@@ -17,11 +17,6 @@ config DVB_B2C2_FLEXCOP
select DVB_CX24123 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_CX24113 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the digital TV receiver chip made by B2C2 Inc. included in
- Technisats PCI cards and USB boxes.
-
- Say Y if you own such a device and want to use it.
# Selected via the PCI or USB flexcop drivers
config DVB_B2C2_FLEXCOP_DEBUG
diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
index 425aeadfb49..68f0f604678 100644
--- a/drivers/media/common/siano/Kconfig
+++ b/drivers/media/common/siano/Kconfig
@@ -4,14 +4,16 @@
config SMS_SIANO_MDTV
tristate
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
+ depends on !RC_CORE || RC_CORE
depends on SMS_USB_DRV || SMS_SDIO_DRV
default y
- ---help---
- Choose Y or M here if you have MDTV receiver with a Siano chipset.
-
- To compile this driver as a module, choose M here
- (The module will be called smsmdtv).
- Further documentation on this driver can be found on the WWW
- at http://www.siano-ms.com/
+config SMS_SIANO_RC
+ bool "Enable Remote Controller support for Siano devices"
+ depends on SMS_SIANO_MDTV && RC_CORE
+ depends on SMS_USB_DRV || SMS_SDIO_DRV
+ depends on MEDIA_COMMON_OPTIONS
+ default y
+ ---help---
+ Choose Y to select Remote Controller support for Siano driver.
diff --git a/drivers/media/common/siano/Makefile b/drivers/media/common/siano/Makefile
index 2a09279e064..81b1e985bea 100644
--- a/drivers/media/common/siano/Makefile
+++ b/drivers/media/common/siano/Makefile
@@ -1,7 +1,11 @@
-smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
+smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o
obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o
+ifeq ($(CONFIG_SMS_SIANO_RC),y)
+ smsmdtv-objs += smsir.o
+endif
+
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 9cc55546cc3..1842e64e633 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -1092,7 +1092,7 @@ EXPORT_SYMBOL_GPL(smscore_onresponse);
* @return pointer to descriptor on success, NULL on error.
*/
-struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
+static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
{
struct smscore_buffer_t *cb = NULL;
unsigned long flags;
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 37bc5c4b8ad..b8c5cad7853 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -88,7 +88,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
dev->priv = coredev;
dev->driver_type = RC_DRIVER_IR_RAW;
- dev->allowed_protos = RC_TYPE_ALL;
+ dev->allowed_protos = RC_BIT_ALL;
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h
index ae92b3a8587..69b59b9eee2 100644
--- a/drivers/media/common/siano/smsir.h
+++ b/drivers/media/common/siano/smsir.h
@@ -46,10 +46,19 @@ struct ir_t {
u32 controller;
};
+#ifdef CONFIG_SMS_SIANO_RC
int sms_ir_init(struct smscore_device_t *coredev);
void sms_ir_exit(struct smscore_device_t *coredev);
void sms_ir_event(struct smscore_device_t *coredev,
const char *buf, int len);
+#else
+inline static int sms_ir_init(struct smscore_device_t *coredev) {
+ return 0;
+}
+inline static void sms_ir_exit(struct smscore_device_t *coredev) {};
+inline static void sms_ir_event(struct smscore_device_t *coredev,
+ const char *buf, int len) {};
+#endif
#endif /* __SMS_IR_H__ */
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 889c9c16c6d..d81dbb22aa8 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -877,7 +877,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
dvb_dmxdev_filter_stop(dmxdevfilter);
dvb_dmxdev_filter_reset(dmxdevfilter);
- if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
+ if ((unsigned)params->pes_type > DMX_PES_OTHER)
return -EINVAL;
dmxdevfilter->type = DMXDEV_TYPE_PES;
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 02ebe28f830..48c6cf92ab9 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
+#include <linux/time.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/fs.h>
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 58e0220447c..388c2eb4d74 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -250,6 +250,7 @@
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
#define USB_PID_NOXON_DAB_STICK 0x00b3
+#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
#define USB_PID_PINNACLE_PCTV2000E 0x022c
#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 7e92793260f..49d95040096 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1029,12 +1029,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
/* Get */
_DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1),
_DTV_CMD(DTV_API_VERSION, 0, 0),
- _DTV_CMD(DTV_CODE_RATE_HP, 0, 0),
- _DTV_CMD(DTV_CODE_RATE_LP, 0, 0),
- _DTV_CMD(DTV_GUARD_INTERVAL, 0, 0),
- _DTV_CMD(DTV_TRANSMISSION_MODE, 0, 0),
- _DTV_CMD(DTV_HIERARCHY, 0, 0),
- _DTV_CMD(DTV_INTERLEAVING, 0, 0),
_DTV_CMD(DTV_ENUM_DELSYS, 0, 0),
@@ -1042,13 +1036,11 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 1, 0),
_DTV_CMD(DTV_ATSCMH_FIC_VER, 0, 0),
- _DTV_CMD(DTV_ATSCMH_PARADE_ID, 0, 0),
_DTV_CMD(DTV_ATSCMH_NOG, 0, 0),
_DTV_CMD(DTV_ATSCMH_TNOG, 0, 0),
_DTV_CMD(DTV_ATSCMH_SGN, 0, 0),
_DTV_CMD(DTV_ATSCMH_PRC, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE, 0, 0),
- _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE, 0, 0),
@@ -1056,8 +1048,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
-
- _DTV_CMD(DTV_LNA, 0, 0),
};
static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index f2a90f990ce..3d399d9a634 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -139,7 +139,7 @@ static int cx22700_set_tps(struct cx22700_state *state,
if (p->code_rate_HP == FEC_4_5 || p->code_rate_LP == FEC_4_5)
return -EINVAL;
- if (p->guard_interval < GUARD_INTERVAL_1_32 ||
+ if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
@@ -152,7 +152,7 @@ static int cx22700_set_tps(struct cx22700_state *state,
p->modulation != QAM_64)
return -EINVAL;
- if (p->hierarchy < HIERARCHY_NONE ||
+ if ((int)p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 7e28b4ee7d4..68c88ab58e7 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -338,7 +338,7 @@ static int cx24123_set_fec(struct cx24123_state *state, fe_code_rate_t fec)
{
u8 nom_reg = cx24123_readreg(state, 0x0e) & ~0x07;
- if ((fec < FEC_NONE) || (fec > FEC_AUTO))
+ if (((int)fec < FEC_NONE) || (fec > FEC_AUTO))
fec = FEC_AUTO;
/* Set the soft decision threshold */
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index b5781a48034..de1cc91fd83 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -97,7 +97,7 @@ static inline int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb
return -ENODEV;
}
-int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
+static inline int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 6d9853750d2..e71cc60851e 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1748,7 +1748,8 @@ static int DRX_Stop(struct drxd_state *state)
return status;
}
-int SetOperationMode(struct drxd_state *state, int oMode)
+#if 0 /* Currently unused */
+static int SetOperationMode(struct drxd_state *state, int oMode)
{
int status;
@@ -1788,6 +1789,7 @@ int SetOperationMode(struct drxd_state *state, int oMode)
state->operation_mode = oMode;
return status;
}
+#endif
static int StartDiversity(struct drxd_state *state)
{
@@ -2612,7 +2614,7 @@ static int CDRXD(struct drxd_state *state, u32 IntermediateFrequency)
return 0;
}
-int DRXD_init(struct drxd_state *state, const u8 * fw, u32 fw_size)
+static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
{
int status = 0;
u32 driverVersion;
@@ -2774,7 +2776,7 @@ int DRXD_init(struct drxd_state *state, const u8 * fw, u32 fw_size)
return status;
}
-int DRXD_status(struct drxd_state *state, u32 * pLockStatus)
+static int DRXD_status(struct drxd_state *state, u32 *pLockStatus)
{
DRX_GetLockStatus(state, pLockStatus);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 8b4c6d5f8f3..c2fc7da0d6b 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -65,16 +65,6 @@ static bool IsQAM(struct drxk_state *state)
state->m_OperationMode == OM_QAM_ITU_C;
}
-bool IsA1WithPatchCode(struct drxk_state *state)
-{
- return state->m_DRXK_A1_PATCH_CODE;
-}
-
-bool IsA1WithRomCode(struct drxk_state *state)
-{
- return state->m_DRXK_A1_ROM_CODE;
-}
-
#define NOA1ROM 0
#define DRXDAP_FASI_SHORT_FORMAT(addr) (((addr) & 0xFC30FF80) == 0)
@@ -189,7 +179,7 @@ static inline u32 MulDiv32(u32 a, u32 b, u32 c)
return (u32) tmp64;
}
-inline u32 Frac28a(u32 a, u32 c)
+static inline u32 Frac28a(u32 a, u32 c)
{
int i = 0;
u32 Q1 = 0;
@@ -587,7 +577,7 @@ static int write_block(struct drxk_state *state, u32 Address,
#define DRXK_MAX_RETRIES_POWERUP 20
#endif
-int PowerUpDevice(struct drxk_state *state)
+static int PowerUpDevice(struct drxk_state *state)
{
int status;
u8 data = 0;
@@ -720,11 +710,6 @@ static int init_state(struct drxk_state *state)
state->m_bPowerDown = (ulPowerDown != 0);
- state->m_DRXK_A1_PATCH_CODE = false;
- state->m_DRXK_A1_ROM_CODE = false;
- state->m_DRXK_A2_ROM_CODE = false;
- state->m_DRXK_A3_ROM_CODE = false;
- state->m_DRXK_A2_PATCH_CODE = false;
state->m_DRXK_A3_PATCH_CODE = false;
/* Init AGC and PGA parameters */
@@ -921,7 +906,7 @@ static int GetDeviceCapabilities(struct drxk_state *state)
status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
if (status < 0)
goto error;
- status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
if (status < 0)
goto error;
status = read16(state, SIO_PDR_OHW_CFG__A, &sioPdrOhwCfg);
@@ -948,7 +933,7 @@ static int GetDeviceCapabilities(struct drxk_state *state)
state->m_oscClockFreq = 20250;
break;
default:
- printk(KERN_ERR "drxk: Clock Frequency is unkonwn\n");
+ printk(KERN_ERR "drxk: Clock Frequency is unknown\n");
return -EINVAL;
}
/*
@@ -1217,7 +1202,7 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable)
goto error;
/* MPEG TS pad configuration */
- status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
if (status < 0)
goto error;
@@ -5461,6 +5446,7 @@ static int QAMDemodulatorCommand(struct drxk_state *state,
} else {
printk(KERN_WARNING "drxk: Unknown QAM demodulator parameter "
"count %d\n", numberOfParameters);
+ status = -EINVAL;
}
error:
diff --git a/drivers/media/dvb-frontends/drxk_hard.h b/drivers/media/dvb-frontends/drxk_hard.h
index 6bb9fc4a7b9..d18a896a983 100644
--- a/drivers/media/dvb-frontends/drxk_hard.h
+++ b/drivers/media/dvb-frontends/drxk_hard.h
@@ -320,11 +320,7 @@ struct drxk_state {
u8 *m_microcode;
int m_microcode_length;
- bool m_DRXK_A1_PATCH_CODE;
- bool m_DRXK_A1_ROM_CODE;
- bool m_DRXK_A2_ROM_CODE;
- bool m_DRXK_A3_ROM_CODE;
- bool m_DRXK_A2_PATCH_CODE;
+ bool m_DRXK_A3_ROM_CODE;
bool m_DRXK_A3_PATCH_CODE;
bool m_rfmirror;
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 5b639087ce4..60a529e3833 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -30,7 +30,6 @@
#include "ds3000.h"
static int debug;
-static int force_fw_upload;
#define dprintk(args...) \
do { \
@@ -234,7 +233,6 @@ struct ds3000_state {
struct i2c_adapter *i2c;
const struct ds3000_config *config;
struct dvb_frontend frontend;
- u8 skip_fw_load;
/* previous uncorrected block counter for DVB-S2 */
u16 prevUCBS2;
};
@@ -397,9 +395,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- if (state->skip_fw_load || !force_fw_upload)
- return 0; /* Firmware already uploaded, skipping */
-
/* Load firmware */
/* request the firmware, this will block until someone uploads it */
printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__,
@@ -413,9 +408,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
return ret;
}
- /* Make sure we don't recurse back through here during loading */
- state->skip_fw_load = 1;
-
ret = ds3000_load_firmware(fe, fw);
if (ret)
printk("%s: Writing firmware to device failed\n", __func__);
@@ -425,9 +417,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
dprintk("%s: Firmware upload %s\n", __func__,
ret == 0 ? "complete" : "failed");
- /* Ensure firmware is always loaded if required */
- state->skip_fw_load = 0;
-
return ret;
}
@@ -1309,10 +1298,8 @@ static struct dvb_frontend_ops ds3000_ops = {
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-module_param(force_fw_upload, int, 0644);
-MODULE_PARM_DESC(force_fw_upload, "Force firmware upload (default:0)");
-
MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
"DS3000/TS2020 hardware");
MODULE_AUTHOR("Konstantin Dimitrov");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 36fcf559e36..ddf866c46f8 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -180,11 +180,11 @@ static int apply_frontend_param(struct dvb_frontend *fe)
p->transmission_mode != TRANSMISSION_MODE_8K)
return -EINVAL;
- if (p->guard_interval < GUARD_INTERVAL_1_32 ||
+ if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
- if (p->hierarchy < HIERARCHY_NONE ||
+ if ((int)p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index e20bf13aa86..ec388c1d691 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -549,7 +549,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
|| (p->frequency > fe->ops.info.frequency_max))
return -EINVAL;
- if ((p->inversion < INVERSION_OFF)
+ if (((int)p->inversion < INVERSION_OFF)
|| (p->inversion > INVERSION_ON))
return -EINVAL;
@@ -557,7 +557,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
|| (p->symbol_rate > fe->ops.info.symbol_rate_max))
return -EINVAL;
- if ((p->fec_inner < FEC_NONE)
+ if (((int)p->fec_inner < FEC_NONE)
|| (p->fec_inner > FEC_AUTO))
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index b0f6ec03d1e..362d26d11e8 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -130,7 +130,7 @@ static int rtl2830_rd_reg(struct rtl2830_priv *priv, u16 reg, u8 *val)
}
/* write single register with mask */
-int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
+static int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
@@ -150,7 +150,7 @@ int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
}
/* read single register with mask */
-int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask)
+static int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
@@ -256,7 +256,7 @@ static int rtl2830_sleep(struct dvb_frontend *fe)
return 0;
}
-int rtl2830_get_tune_settings(struct dvb_frontend *fe,
+static int rtl2830_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 80c8e5f1182..73887690b04 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -265,7 +265,7 @@ static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val)
return rtl2832_rd_regs(priv, reg, page, val, 1);
}
-int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
+static int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
{
int ret;
@@ -305,7 +305,7 @@ err:
}
-int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
+static int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
{
int ret, i;
u8 len;
@@ -510,7 +510,7 @@ static int rtl2832_sleep(struct dvb_frontend *fe)
return 0;
}
-int rtl2832_get_tune_settings(struct dvb_frontend *fe,
+static int rtl2832_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
struct rtl2832_priv *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 79e29de87fb..cc278b3d6d5 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1260,7 +1260,7 @@ static inline void CONVERT32(u32 x, char *str)
*str = '\0';
}
-int stb0899_get_dev_id(struct stb0899_state *state)
+static int stb0899_get_dev_id(struct stb0899_state *state)
{
u8 chip_id, release;
u16 id;
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 2a8aaeb1112..0c8e45949b1 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -879,7 +879,8 @@ static u8 stv0367_readbits(struct stv0367_state *state, u32 label)
return val;
}
-u8 stv0367_getbits(u8 reg, u32 label)
+#if 0 /* Currently, unused */
+static u8 stv0367_getbits(u8 reg, u32 label)
{
u8 mask, pos;
@@ -887,7 +888,7 @@ u8 stv0367_getbits(u8 reg, u32 label)
return (reg & mask) >> pos;
}
-
+#endif
static int stv0367ter_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct stv0367_state *state = fe->demodulator_priv;
@@ -1263,8 +1264,8 @@ stv0367_ter_signal_type stv0367ter_check_cpamp(struct stv0367_state *state,
return CPAMPStatus;
}
-enum
-stv0367_ter_signal_type stv0367ter_lock_algo(struct stv0367_state *state)
+static enum stv0367_ter_signal_type
+stv0367ter_lock_algo(struct stv0367_state *state)
{
enum stv0367_ter_signal_type ret_flag;
short int wd, tempo;
@@ -1528,7 +1529,7 @@ static int stv0367ter_sleep(struct dvb_frontend *fe)
return stv0367ter_standby(fe, 1);
}
-int stv0367ter_init(struct dvb_frontend *fe)
+static int stv0367ter_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
@@ -2378,9 +2379,9 @@ static u32 stv0367cab_get_adc_freq(struct dvb_frontend *fe, u32 ExtClk_Hz)
return ADCClk_Hz;
}
-enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
- u32 SymbolRate,
- enum stv0367cab_mod QAMSize)
+static enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
+ u32 SymbolRate,
+ enum stv0367cab_mod QAMSize)
{
/* Set QAM size */
stv0367_writebits(state, F367CAB_QAM_MODE, QAMSize);
@@ -2762,7 +2763,7 @@ static int stv0367cab_sleep(struct dvb_frontend *fe)
return stv0367cab_standby(fe, 1);
}
-int stv0367cab_init(struct dvb_frontend *fe)
+static int stv0367cab_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index a83bf680234..16a4bc54dbe 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -96,7 +96,8 @@ static int tda10071_rd_reg(struct tda10071_priv *priv, u8 reg, u8 *val)
}
/* write single register with mask */
-int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
+static int tda10071_wr_reg_mask(struct tda10071_priv *priv,
+ u8 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
@@ -116,7 +117,8 @@ int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
}
/* read single register with mask */
-int tda10071_rd_reg_mask(struct tda10071_priv *priv, u8 reg, u8 *val, u8 mask)
+static int tda10071_rd_reg_mask(struct tda10071_priv *priv,
+ u8 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index ad7c72e8f51..d281f77d5c2 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -32,6 +32,7 @@
#include <asm/div64.h>
#include "dvb_frontend.h"
+#include "tda18271c2dd.h"
struct SStandardParam {
s32 m_IFFrequency;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 4fdcd8cb753..c2ba085e0d2 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -13,6 +13,7 @@
#ifndef _FIREDTV_H
#define _FIREDTV_H
+#include <linux/time.h>
#include <linux/dvb/dmx.h>
#include <linux/dvb/frontend.h>
#include <linux/list.h>
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 18a38b38fcb..df163800c8e 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -3,10 +3,10 @@
*
* Copyright (C) 2008--2011 Nokia Corporation
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* Contributors:
- * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
* Tuukka Toivonen <tuukkat76@gmail.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index e1d4c89d714..10c3c1db4cd 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -681,18 +681,7 @@ static struct i2c_driver adv7183_driver = {
.id_table = adv7183_id,
};
-static __init int adv7183_init(void)
-{
- return i2c_add_driver(&adv7183_driver);
-}
-
-static __exit void adv7183_exit(void)
-{
- i2c_del_driver(&adv7183_driver);
-}
-
-module_init(adv7183_init);
-module_exit(adv7183_exit);
+module_i2c_driver(adv7183_driver);
MODULE_DESCRIPTION("Analog Devices ADV7183 video decoder driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 05f8950f6f9..f47555b1000 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -486,9 +486,19 @@ static inline int edid_read_block(struct v4l2_subdev *sd, unsigned len, u8 *val)
struct i2c_client *client = state->i2c_edid;
u8 msgbuf0[1] = { 0 };
u8 msgbuf1[256];
- struct i2c_msg msg[2] = { { client->addr, 0, 1, msgbuf0 },
- { client->addr, 0 | I2C_M_RD, len, msgbuf1 }
- };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = msgbuf0
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = msgbuf1
+ },
+ };
if (i2c_transfer(client->adapter, msg, 2) < 0)
return -EIO;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 2cee69e3418..f4149eb4d7b 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -2065,7 +2065,7 @@ static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
#define DIF_BPF_COEFF3435 (0x38c)
#define DIF_BPF_COEFF36 (0x390)
-void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
+static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
{
u64 pll_freq;
u32 pll_freq_word;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 04f192a0398..08ae067b2b6 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -284,7 +284,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
char *ir_codes = NULL;
const char *name = NULL;
- u64 rc_type = RC_TYPE_UNKNOWN;
+ u64 rc_type = RC_BIT_UNKNOWN;
struct IR_i2c *ir;
struct rc_dev *rc = NULL;
struct i2c_adapter *adap = client->adapter;
@@ -303,7 +303,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x64:
name = "Pixelview";
ir->get_key = get_key_pixelview;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x18:
@@ -311,31 +311,31 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x1a:
name = "Hauppauge";
ir->get_key = get_key_haup;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x40:
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
case 0x71:
name = "Hauppauge/Zilog Z8";
ir->get_key = get_key_haup_xvr;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
}
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 49c1b3abb42..2750de63427 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -343,7 +343,7 @@ static int s5k4ecgx_load_firmware(struct v4l2_subdev *sd)
}
regs_num = le32_to_cpu(get_unaligned_le32(fw->data));
- v4l2_dbg(3, debug, sd, "FW: %s size %d register sets %d\n",
+ v4l2_dbg(3, debug, sd, "FW: %s size %zu register sets %d\n",
S5K4ECGX_FIRMWARE, fw->size, regs_num);
regs_num++; /* Add header */
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index a577614bd84..d8d5da7c52d 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -58,7 +58,7 @@ static int bounds_check(struct device *dev, uint32_t val,
if (val >= min && val <= max)
return 0;
- dev_warn(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
+ dev_dbg(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
return -EINVAL;
}
@@ -87,14 +87,14 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz);
}
-int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
- struct smiapp_pll *pll)
+static int __smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll, uint32_t mul,
+ uint32_t div, uint32_t lane_op_clock_ratio)
{
uint32_t sys_div;
uint32_t best_pix_div = INT_MAX >> 1;
uint32_t vt_op_binning_div;
- uint32_t lane_op_clock_ratio;
- uint32_t mul, div;
uint32_t more_mul_min, more_mul_max;
uint32_t more_mul_factor;
uint32_t min_vt_div, max_vt_div, vt_div;
@@ -102,54 +102,6 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
unsigned int i;
int rval;
- if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
- lane_op_clock_ratio = pll->lanes;
- else
- lane_op_clock_ratio = 1;
- dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
-
- dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
- pll->binning_vertical);
-
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- pll->pll_op_clk_freq_hz = pll->link_freq * 2
- * (pll->lanes / lane_op_clock_ratio);
-
- /* Figure out limits for pre-pll divider based on extclk */
- dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
- limits->max_pre_pll_clk_div =
- min_t(uint16_t, limits->max_pre_pll_clk_div,
- clk_div_even(pll->ext_clk_freq_hz /
- limits->min_pll_ip_freq_hz));
- limits->min_pre_pll_clk_div =
- max_t(uint16_t, limits->min_pre_pll_clk_div,
- clk_div_even_up(
- DIV_ROUND_UP(pll->ext_clk_freq_hz,
- limits->max_pll_ip_freq_hz)));
- dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
-
- i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
- mul = div_u64(pll->pll_op_clk_freq_hz, i);
- div = pll->ext_clk_freq_hz / i;
- dev_dbg(dev, "mul %d / div %d\n", mul, div);
-
- limits->min_pre_pll_clk_div =
- max_t(uint16_t, limits->min_pre_pll_clk_div,
- clk_div_even_up(
- DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
- limits->max_pll_op_freq_hz)));
- dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
-
- if (limits->min_pre_pll_clk_div > limits->max_pre_pll_clk_div) {
- dev_err(dev, "unable to compute pre_pll divisor\n");
- return -EINVAL;
- }
-
- pll->pre_pll_clk_div = limits->min_pre_pll_clk_div;
-
/*
* Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
* too high.
@@ -162,7 +114,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_max);
/* Don't go above max pll op frequency. */
more_mul_max =
- min_t(int,
+ min_t(uint32_t,
more_mul_max,
limits->max_pll_op_freq_hz
/ (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul));
@@ -170,7 +122,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_max);
/* Don't go above the division capability of op sys clock divider. */
more_mul_max = min(more_mul_max,
- limits->max_op_sys_clk_div * pll->pre_pll_clk_div
+ limits->op.max_sys_clk_div * pll->pre_pll_clk_div
/ div);
dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %d\n",
more_mul_max);
@@ -193,14 +145,14 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_min);
if (more_mul_min > more_mul_max) {
- dev_warn(dev,
- "unable to compute more_mul_min and more_mul_max");
+ dev_dbg(dev,
+ "unable to compute more_mul_min and more_mul_max\n");
return -EINVAL;
}
more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div;
dev_dbg(dev, "more_mul_factor: %d\n", more_mul_factor);
- more_mul_factor = lcm(more_mul_factor, limits->min_op_sys_clk_div);
+ more_mul_factor = lcm(more_mul_factor, limits->op.min_sys_clk_div);
dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n",
more_mul_factor);
i = roundup(more_mul_min, more_mul_factor);
@@ -209,7 +161,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
dev_dbg(dev, "final more_mul: %d\n", i);
if (i > more_mul_max) {
- dev_warn(dev, "final more_mul is bad, max %d", more_mul_max);
+ dev_dbg(dev, "final more_mul is bad, max %d\n", more_mul_max);
return -EINVAL;
}
@@ -268,19 +220,19 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
dev_dbg(dev, "min_vt_div: %d\n", min_vt_div);
min_vt_div = max(min_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->max_vt_pix_clk_freq_hz));
+ limits->vt.max_pix_clk_freq_hz));
dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %d\n",
min_vt_div);
min_vt_div = max_t(uint32_t, min_vt_div,
- limits->min_vt_pix_clk_div
- * limits->min_vt_sys_clk_div);
+ limits->vt.min_pix_clk_div
+ * limits->vt.min_sys_clk_div);
dev_dbg(dev, "min_vt_div: min_vt_clk_div: %d\n", min_vt_div);
- max_vt_div = limits->max_vt_sys_clk_div * limits->max_vt_pix_clk_div;
+ max_vt_div = limits->vt.max_sys_clk_div * limits->vt.max_pix_clk_div;
dev_dbg(dev, "max_vt_div: %d\n", max_vt_div);
max_vt_div = min(max_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz));
+ limits->vt.min_pix_clk_freq_hz));
dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %d\n",
max_vt_div);
@@ -288,28 +240,28 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
* Find limitsits for sys_clk_div. Not all values are possible
* with all values of pix_clk_div.
*/
- min_sys_div = limits->min_vt_sys_clk_div;
+ min_sys_div = limits->vt.min_sys_clk_div;
dev_dbg(dev, "min_sys_div: %d\n", min_sys_div);
min_sys_div = max(min_sys_div,
DIV_ROUND_UP(min_vt_div,
- limits->max_vt_pix_clk_div));
+ limits->vt.max_pix_clk_div));
dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %d\n", min_sys_div);
min_sys_div = max(min_sys_div,
pll->pll_op_clk_freq_hz
- / limits->max_vt_sys_clk_freq_hz);
+ / limits->vt.max_sys_clk_freq_hz);
dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %d\n", min_sys_div);
min_sys_div = clk_div_even_up(min_sys_div);
dev_dbg(dev, "min_sys_div: one or even: %d\n", min_sys_div);
- max_sys_div = limits->max_vt_sys_clk_div;
+ max_sys_div = limits->vt.max_sys_clk_div;
dev_dbg(dev, "max_sys_div: %d\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(max_vt_div,
- limits->min_vt_pix_clk_div));
+ limits->vt.min_pix_clk_div));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %d\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz));
+ limits->vt.min_pix_clk_freq_hz));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %d\n", max_sys_div);
/*
@@ -322,15 +274,15 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
for (sys_div = min_sys_div;
sys_div <= max_sys_div;
sys_div += 2 - (sys_div & 1)) {
- int pix_div = DIV_ROUND_UP(vt_div, sys_div);
+ uint16_t pix_div = DIV_ROUND_UP(vt_div, sys_div);
- if (pix_div < limits->min_vt_pix_clk_div
- || pix_div > limits->max_vt_pix_clk_div) {
+ if (pix_div < limits->vt.min_pix_clk_div
+ || pix_div > limits->vt.max_pix_clk_div) {
dev_dbg(dev,
"pix_div %d too small or too big (%d--%d)\n",
pix_div,
- limits->min_vt_pix_clk_div,
- limits->max_vt_pix_clk_div);
+ limits->vt.min_pix_clk_div,
+ limits->vt.max_pix_clk_div);
continue;
}
@@ -354,16 +306,10 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
pll->pixel_rate_csi =
pll->op_pix_clk_freq_hz * lane_op_clock_ratio;
- print_pll(dev, pll);
-
- rval = bounds_check(dev, pll->pre_pll_clk_div,
- limits->min_pre_pll_clk_div,
- limits->max_pre_pll_clk_div, "pre_pll_clk_div");
- if (!rval)
- rval = bounds_check(
- dev, pll->pll_ip_clk_freq_hz,
- limits->min_pll_ip_freq_hz, limits->max_pll_ip_freq_hz,
- "pll_ip_clk_freq_hz");
+ rval = bounds_check(dev, pll->pll_ip_clk_freq_hz,
+ limits->min_pll_ip_freq_hz,
+ limits->max_pll_ip_freq_hz,
+ "pll_ip_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->pll_multiplier,
@@ -377,42 +323,121 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
if (!rval)
rval = bounds_check(
dev, pll->op_sys_clk_div,
- limits->min_op_sys_clk_div, limits->max_op_sys_clk_div,
+ limits->op.min_sys_clk_div, limits->op.max_sys_clk_div,
"op_sys_clk_div");
if (!rval)
rval = bounds_check(
dev, pll->op_pix_clk_div,
- limits->min_op_pix_clk_div, limits->max_op_pix_clk_div,
+ limits->op.min_pix_clk_div, limits->op.max_pix_clk_div,
"op_pix_clk_div");
if (!rval)
rval = bounds_check(
dev, pll->op_sys_clk_freq_hz,
- limits->min_op_sys_clk_freq_hz,
- limits->max_op_sys_clk_freq_hz,
+ limits->op.min_sys_clk_freq_hz,
+ limits->op.max_sys_clk_freq_hz,
"op_sys_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->op_pix_clk_freq_hz,
- limits->min_op_pix_clk_freq_hz,
- limits->max_op_pix_clk_freq_hz,
+ limits->op.min_pix_clk_freq_hz,
+ limits->op.max_pix_clk_freq_hz,
"op_pix_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->vt_sys_clk_freq_hz,
- limits->min_vt_sys_clk_freq_hz,
- limits->max_vt_sys_clk_freq_hz,
+ limits->vt.min_sys_clk_freq_hz,
+ limits->vt.max_sys_clk_freq_hz,
"vt_sys_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->vt_pix_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz,
- limits->max_vt_pix_clk_freq_hz,
+ limits->vt.min_pix_clk_freq_hz,
+ limits->vt.max_pix_clk_freq_hz,
"vt_pix_clk_freq_hz");
return rval;
}
+
+int smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll)
+{
+ uint16_t min_pre_pll_clk_div;
+ uint16_t max_pre_pll_clk_div;
+ uint32_t lane_op_clock_ratio;
+ uint32_t mul, div;
+ unsigned int i;
+ int rval = -EINVAL;
+
+ if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
+ lane_op_clock_ratio = pll->csi2.lanes;
+ else
+ lane_op_clock_ratio = 1;
+ dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
+
+ dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
+ pll->binning_vertical);
+
+ switch (pll->bus_type) {
+ case SMIAPP_PLL_BUS_TYPE_CSI2:
+ /* CSI transfers 2 bits per clock per lane; thus times 2 */
+ pll->pll_op_clk_freq_hz = pll->link_freq * 2
+ * (pll->csi2.lanes / lane_op_clock_ratio);
+ break;
+ case SMIAPP_PLL_BUS_TYPE_PARALLEL:
+ pll->pll_op_clk_freq_hz = pll->link_freq * pll->bits_per_pixel
+ / DIV_ROUND_UP(pll->bits_per_pixel,
+ pll->parallel.bus_width);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Figure out limits for pre-pll divider based on extclk */
+ dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+ max_pre_pll_clk_div =
+ min_t(uint16_t, limits->max_pre_pll_clk_div,
+ clk_div_even(pll->ext_clk_freq_hz /
+ limits->min_pll_ip_freq_hz));
+ min_pre_pll_clk_div =
+ max_t(uint16_t, limits->min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(pll->ext_clk_freq_hz,
+ limits->max_pll_ip_freq_hz)));
+ dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
+ min_pre_pll_clk_div, max_pre_pll_clk_div);
+
+ i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
+ mul = div_u64(pll->pll_op_clk_freq_hz, i);
+ div = pll->ext_clk_freq_hz / i;
+ dev_dbg(dev, "mul %d / div %d\n", mul, div);
+
+ min_pre_pll_clk_div =
+ max_t(uint16_t, min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
+ limits->max_pll_op_freq_hz)));
+ dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
+ min_pre_pll_clk_div, max_pre_pll_clk_div);
+
+ for (pll->pre_pll_clk_div = min_pre_pll_clk_div;
+ pll->pre_pll_clk_div <= max_pre_pll_clk_div;
+ pll->pre_pll_clk_div += 2 - (pll->pre_pll_clk_div & 1)) {
+ rval = __smiapp_pll_calculate(dev, limits, pll, mul, div,
+ lane_op_clock_ratio);
+ if (rval)
+ continue;
+
+ print_pll(dev, pll);
+ return 0;
+ }
+
+ dev_info(dev, "unable to compute pre_pll divisor\n");
+ return rval;
+}
EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ PLL calculator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/smiapp-pll.h b/drivers/media/i2c/smiapp-pll.h
index cb2d2db5d02..a4a649834a1 100644
--- a/drivers/media/i2c/smiapp-pll.h
+++ b/drivers/media/i2c/smiapp-pll.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -27,16 +27,34 @@
#include <linux/device.h>
+/* CSI-2 or CCP-2 */
+#define SMIAPP_PLL_BUS_TYPE_CSI2 0x00
+#define SMIAPP_PLL_BUS_TYPE_PARALLEL 0x01
+
+/* op pix clock is for all lanes in total normally */
+#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
+#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
+
struct smiapp_pll {
- uint8_t lanes;
+ /* input values */
+ uint8_t bus_type;
+ union {
+ struct {
+ uint8_t lanes;
+ } csi2;
+ struct {
+ uint8_t bus_width;
+ } parallel;
+ };
+ uint8_t flags;
uint8_t binning_horizontal;
uint8_t binning_vertical;
uint8_t scale_m;
uint8_t scale_n;
uint8_t bits_per_pixel;
- uint16_t flags;
uint32_t link_freq;
+ /* output values */
uint16_t pre_pll_clk_div;
uint16_t pll_multiplier;
uint16_t op_sys_clk_div;
@@ -55,6 +73,17 @@ struct smiapp_pll {
uint32_t pixel_rate_csi;
};
+struct smiapp_pll_branch_limits {
+ uint16_t min_sys_clk_div;
+ uint16_t max_sys_clk_div;
+ uint32_t min_sys_clk_freq_hz;
+ uint32_t max_sys_clk_freq_hz;
+ uint16_t min_pix_clk_div;
+ uint16_t max_pix_clk_div;
+ uint32_t min_pix_clk_freq_hz;
+ uint32_t max_pix_clk_freq_hz;
+};
+
struct smiapp_pll_limits {
/* Strict PLL limits */
uint32_t min_ext_clk_freq_hz;
@@ -68,36 +97,18 @@ struct smiapp_pll_limits {
uint32_t min_pll_op_freq_hz;
uint32_t max_pll_op_freq_hz;
- uint16_t min_vt_sys_clk_div;
- uint16_t max_vt_sys_clk_div;
- uint32_t min_vt_sys_clk_freq_hz;
- uint32_t max_vt_sys_clk_freq_hz;
- uint16_t min_vt_pix_clk_div;
- uint16_t max_vt_pix_clk_div;
- uint32_t min_vt_pix_clk_freq_hz;
- uint32_t max_vt_pix_clk_freq_hz;
-
- uint16_t min_op_sys_clk_div;
- uint16_t max_op_sys_clk_div;
- uint32_t min_op_sys_clk_freq_hz;
- uint32_t max_op_sys_clk_freq_hz;
- uint16_t min_op_pix_clk_div;
- uint16_t max_op_pix_clk_div;
- uint32_t min_op_pix_clk_freq_hz;
- uint32_t max_op_pix_clk_freq_hz;
+ struct smiapp_pll_branch_limits vt;
+ struct smiapp_pll_branch_limits op;
/* Other relevant limits */
uint32_t min_line_length_pck_bin;
uint32_t min_line_length_pck;
};
-/* op pix clock is for all lanes in total normally */
-#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
-#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
-
struct device;
-int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
+int smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
struct smiapp_pll *pll);
#endif /* SMIAPP_PLL_H */
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index e08e588ad24..83c7ed7ffcc 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2010--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* Based on smiapp driver by Vimarsh Zutshi
* Based on jt8ev1.c by Vimarsh Zutshi
@@ -252,23 +252,23 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
.min_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ],
.max_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ],
- .min_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
- .max_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
- .min_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
- .max_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
- .min_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
- .max_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
- .min_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
- .max_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
-
- .min_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
- .max_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
- .min_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
- .max_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
- .min_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
- .max_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
- .min_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
- .max_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
+ .op.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
+ .op.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
+ .op.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
+ .op.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
+ .op.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
+ .op.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
+ .op.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
+ .op.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
+
+ .vt.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
+ .vt.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
+ .vt.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
+ .vt.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
+ .vt.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
+ .vt.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
+ .vt.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
+ .vt.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
.min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
.min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
@@ -276,11 +276,6 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
struct smiapp_pll *pll = &sensor->pll;
int rval;
- memset(&sensor->pll, 0, sizeof(sensor->pll));
-
- pll->lanes = sensor->platform_data->lanes;
- pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
-
if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) {
/*
* Fill in operational clock divisors limits from the
@@ -288,28 +283,14 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
* requirements regarding them are essentially the
* same as on VT ones.
*/
- lim.min_op_sys_clk_div = lim.min_vt_sys_clk_div;
- lim.max_op_sys_clk_div = lim.max_vt_sys_clk_div;
- lim.min_op_pix_clk_div = lim.min_vt_pix_clk_div;
- lim.max_op_pix_clk_div = lim.max_vt_pix_clk_div;
- lim.min_op_sys_clk_freq_hz = lim.min_vt_sys_clk_freq_hz;
- lim.max_op_sys_clk_freq_hz = lim.max_vt_sys_clk_freq_hz;
- lim.min_op_pix_clk_freq_hz = lim.min_vt_pix_clk_freq_hz;
- lim.max_op_pix_clk_freq_hz = lim.max_vt_pix_clk_freq_hz;
- /* Profile 0 sensors have no separate OP clock branch. */
- pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ lim.op = lim.vt;
}
- if (smiapp_needs_quirk(sensor,
- SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
- pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
-
pll->binning_horizontal = sensor->binning_horizontal;
pll->binning_vertical = sensor->binning_vertical;
pll->link_freq =
sensor->link_freq->qmenu_int[sensor->link_freq->val];
pll->scale_m = sensor->scale_m;
- pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
pll->bits_per_pixel = sensor->csi_format->compressed;
rval = smiapp_pll_calculate(&client->dev, &lim, pll);
@@ -1010,7 +991,7 @@ static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
* do not change, or if you do at least know what you're
* doing. :-)
*
- * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> 2010-10-25
+ * Sakari Ailus <sakari.ailus@iki.fi> 2010-10-25
*
* flash_strobe_length [us] / 10^6 = (tFlash_strobe_width_ctrl
* / EXTCLK freq [Hz]) * flash_strobe_adjustment
@@ -2369,6 +2350,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_pll *pll = &sensor->pll;
struct smiapp_subdev *last = NULL;
u32 tmp;
unsigned int i;
@@ -2635,6 +2617,18 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
if (rval < 0)
goto out_nvm_release;
+ /* prepare PLL configuration input values */
+ pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
+ pll->csi2.lanes = sensor->platform_data->lanes;
+ pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+ /* Profile 0 sensors have no separate OP clock branch. */
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ if (smiapp_needs_quirk(sensor,
+ SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
+ pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+ pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
rval = smiapp_update_mode(sensor);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
@@ -2893,6 +2887,6 @@ static struct i2c_driver smiapp_i2c_driver = {
module_i2c_driver(smiapp_i2c_driver);
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ camera module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.c b/drivers/media/i2c/smiapp/smiapp-limits.c
index fb2f81ad8c3..847cb235e19 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.c
+++ b/drivers/media/i2c/smiapp/smiapp-limits.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.h b/drivers/media/i2c/smiapp/smiapp-limits.h
index 9ae765e23ea..343e9c3827f 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.h
+++ b/drivers/media/i2c/smiapp/smiapp-limits.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index 725cf62836c..bb8c506e0e3 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.h b/drivers/media/i2c/smiapp/smiapp-quirk.h
index 86fd3e8bfb0..504a6d80ced 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.h
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-reg-defs.h b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
index defa7c5adeb..3aa0ca948d8 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg-defs.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 54568ca2fe6..b0dcbb8fa5e 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 70e0d8db013..4fac32cfcb3 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.h b/drivers/media/i2c/smiapp/smiapp-regs.h
index 7f9013b4797..eefc6c84d5f 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.h
+++ b/drivers/media/i2c/smiapp/smiapp-regs.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 4182a695ab5..7cc5aae662f 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2010--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 333ef178d6f..d40a8858be0 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -15,6 +15,7 @@
#include <linux/log2.h>
#include <linux/module.h>
+#include <media/mt9v022.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
@@ -50,6 +51,7 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
#define MT9V022_PIXEL_OPERATION_MODE 0x0f
#define MT9V022_LED_OUT_CONTROL 0x1b
#define MT9V022_ADC_MODE_CONTROL 0x1c
+#define MT9V022_REG32 0x20
#define MT9V022_ANALOG_GAIN 0x35
#define MT9V022_BLACK_LEVEL_CALIB_CTRL 0x47
#define MT9V022_PIXCLK_FV_LV 0x74
@@ -71,7 +73,15 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
#define MT9V022_COLUMN_SKIP 1
#define MT9V022_ROW_SKIP 4
-#define is_mt9v024(id) (id == 0x1324)
+#define MT9V022_HORIZONTAL_BLANKING_MIN 43
+#define MT9V022_HORIZONTAL_BLANKING_MAX 1023
+#define MT9V022_HORIZONTAL_BLANKING_DEF 94
+#define MT9V022_VERTICAL_BLANKING_MIN 2
+#define MT9V022_VERTICAL_BLANKING_MAX 3000
+#define MT9V022_VERTICAL_BLANKING_DEF 45
+
+#define is_mt9v022_rev3(id) (id == 0x1313)
+#define is_mt9v024(id) (id == 0x1324)
/* MT9V022 has only one fixed colorspace per pixelcode */
struct mt9v022_datafmt {
@@ -136,6 +146,8 @@ struct mt9v022 {
struct v4l2_ctrl *autogain;
struct v4l2_ctrl *gain;
};
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
struct v4l2_rect rect; /* Sensor window */
const struct mt9v022_datafmt *fmt;
const struct mt9v022_datafmt *fmts;
@@ -143,6 +155,7 @@ struct mt9v022 {
int num_fmts;
int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */
u16 chip_control;
+ u16 chip_version;
unsigned short y_skip_top; /* Lines to skip at the top */
};
@@ -225,12 +238,32 @@ static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- if (enable)
+ if (enable) {
/* Switch to master "normal" mode */
mt9v022->chip_control &= ~0x10;
- else
+ if (is_mt9v022_rev3(mt9v022->chip_version) ||
+ is_mt9v024(mt9v022->chip_version)) {
+ /*
+ * Unset snapshot mode specific settings: clear bit 9
+ * and bit 2 in reg. 0x20 when in normal mode.
+ */
+ if (reg_clear(client, MT9V022_REG32, 0x204))
+ return -EIO;
+ }
+ } else {
/* Switch to snapshot mode */
mt9v022->chip_control |= 0x10;
+ if (is_mt9v022_rev3(mt9v022->chip_version) ||
+ is_mt9v024(mt9v022->chip_version)) {
+ /*
+ * Required settings for snapshot mode: set bit 9
+ * (RST enable) and bit 2 (CR enable) in reg. 0x20
+ * See TechNote TN0960 or TN-09-225.
+ */
+ if (reg_set(client, MT9V022_REG32, 0x204))
+ return -EIO;
+ }
+ }
if (reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control) < 0)
return -EIO;
@@ -282,11 +315,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
* Default 94, Phytec driver says:
* "width + horizontal blank >= 660"
*/
- ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING,
- rect.width > 660 - 43 ? 43 :
- 660 - rect.width);
+ ret = v4l2_ctrl_s_ctrl(mt9v022->hblank,
+ rect.width > 660 - 43 ? 43 : 660 - rect.width);
if (!ret)
- ret = reg_write(client, MT9V022_VERTICAL_BLANKING, 45);
+ ret = v4l2_ctrl_s_ctrl(mt9v022->vblank, 45);
if (!ret)
ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width);
if (!ret)
@@ -509,6 +541,18 @@ static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
range = exp->maximum - exp->minimum;
exp->val = ((data - 1) * range + 239) / 479 + exp->minimum;
return 0;
+ case V4L2_CID_HBLANK:
+ data = reg_read(client, MT9V022_HORIZONTAL_BLANKING);
+ if (data < 0)
+ return -EIO;
+ ctrl->val = data;
+ return 0;
+ case V4L2_CID_VBLANK:
+ data = reg_read(client, MT9V022_VERTICAL_BLANKING);
+ if (data < 0)
+ return -EIO;
+ ctrl->val = data;
+ return 0;
}
return -EINVAL;
}
@@ -590,6 +634,16 @@ static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl)
return -EIO;
}
return 0;
+ case V4L2_CID_HBLANK:
+ if (reg_write(client, MT9V022_HORIZONTAL_BLANKING,
+ ctrl->val) < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_VBLANK:
+ if (reg_write(client, MT9V022_VERTICAL_BLANKING,
+ ctrl->val) < 0)
+ return -EIO;
+ return 0;
}
return -EINVAL;
}
@@ -621,6 +675,8 @@ static int mt9v022_video_probe(struct i2c_client *client)
goto ei2c;
}
+ mt9v022->chip_version = data;
+
mt9v022->reg = is_mt9v024(data) ? &mt9v024_register :
&mt9v022_register;
@@ -819,6 +875,7 @@ static int mt9v022_probe(struct i2c_client *client,
struct mt9v022 *mt9v022;
struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct mt9v022_platform_data *pdata = icl->priv;
int ret;
if (!icl) {
@@ -857,10 +914,21 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->exposure = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
V4L2_CID_EXPOSURE, 1, 255, 1, 255);
+ mt9v022->hblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_HBLANK, MT9V022_HORIZONTAL_BLANKING_MIN,
+ MT9V022_HORIZONTAL_BLANKING_MAX, 1,
+ MT9V022_HORIZONTAL_BLANKING_DEF);
+
+ mt9v022->vblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_VBLANK, MT9V022_VERTICAL_BLANKING_MIN,
+ MT9V022_VERTICAL_BLANKING_MAX, 1,
+ MT9V022_VERTICAL_BLANKING_DEF);
+
mt9v022->subdev.ctrl_handler = &mt9v022->hdl;
if (mt9v022->hdl.error) {
int err = mt9v022->hdl.error;
+ dev_err(&client->dev, "control initialisation err %d\n", err);
kfree(mt9v022);
return err;
}
@@ -871,10 +939,10 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT;
/*
- * MT9V022 _really_ corrupts the first read out line.
- * TODO: verify on i.MX31
+ * On some platforms the first read out line is corrupted.
+ * Workaround it by skipping if indicated by platform data.
*/
- mt9v022->y_skip_top = 1;
+ mt9v022->y_skip_top = pdata ? pdata->y_skip_top : 0;
mt9v022->rect.left = MT9V022_COLUMN_SKIP;
mt9v022->rect.top = MT9V022_ROW_SKIP;
mt9v022->rect.width = MT9V022_MAX_WIDTH;
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index d2d298b6354..66698a83bda 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -586,9 +586,20 @@ static const struct regval_list ov2640_format_change_preamble_regs[] = {
ENDMARKER,
};
-static const struct regval_list ov2640_yuv422_regs[] = {
+static const struct regval_list ov2640_yuyv_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_YUV422 },
+ { 0xd7, 0x03 },
+ { 0x33, 0xa0 },
+ { 0xe5, 0x1f },
+ { 0xe1, 0x67 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_uyvy_regs[] = {
{ IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_YUV422 },
- { 0xD7, 0x01 },
+ { 0xd7, 0x01 },
{ 0x33, 0xa0 },
{ 0xe1, 0x67 },
{ RESET, 0x00 },
@@ -596,7 +607,15 @@ static const struct regval_list ov2640_yuv422_regs[] = {
ENDMARKER,
};
-static const struct regval_list ov2640_rgb565_regs[] = {
+static const struct regval_list ov2640_rgb565_be_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_RGB565 },
+ { 0xd7, 0x03 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_rgb565_le_regs[] = {
{ IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_RGB565 },
{ 0xd7, 0x03 },
{ RESET, 0x00 },
@@ -605,7 +624,9 @@ static const struct regval_list ov2640_rgb565_regs[] = {
};
static enum v4l2_mbus_pixelcode ov2640_codes[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_RGB565_2X8_BE,
V4L2_MBUS_FMT_RGB565_2X8_LE,
};
@@ -787,14 +808,22 @@ static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
/* select format */
priv->cfmt_code = 0;
switch (code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_be_regs;
+ break;
case V4L2_MBUS_FMT_RGB565_2X8_LE:
- dev_dbg(&client->dev, "%s: Selected cfmt RGB565", __func__);
- selected_cfmt_regs = ov2640_rgb565_regs;
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_le_regs;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__);
+ selected_cfmt_regs = ov2640_yuyv_regs;
break;
default:
case V4L2_MBUS_FMT_UYVY8_2X8:
- dev_dbg(&client->dev, "%s: Selected cfmt YUV422", __func__);
- selected_cfmt_regs = ov2640_yuv422_regs;
+ dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__);
+ selected_cfmt_regs = ov2640_uyvy_regs;
}
/* reset hardware */
@@ -859,10 +888,12 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
mf->code = priv->cfmt_code;
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -879,11 +910,13 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -896,21 +929,21 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
static int ov2640_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- const struct ov2640_win_size *win;
-
/*
- * select suitable win
+ * select suitable win, but don't store it
*/
- win = ov2640_select_win(&mf->width, &mf->height);
+ ov2640_select_win(&mf->width, &mf->height);
mf->field = V4L2_FIELD_NONE;
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 42ae9dc9c57..f434a19b9bc 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -910,18 +910,7 @@ static struct i2c_driver vs6624_driver = {
.id_table = vs6624_id,
};
-static __init int vs6624_init(void)
-{
- return i2c_add_driver(&vs6624_driver);
-}
-
-static __exit void vs6624_exit(void)
-{
- i2c_del_driver(&vs6624_driver);
-}
-
-module_init(vs6624_init);
-module_exit(vs6624_exit);
+module_i2c_driver(vs6624_driver);
MODULE_DESCRIPTION("VS6624 sensor driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/mmc/siano/Kconfig b/drivers/media/mmc/siano/Kconfig
index fa62475be3b..aa05ad3c1cc 100644
--- a/drivers/media/mmc/siano/Kconfig
+++ b/drivers/media/mmc/siano/Kconfig
@@ -4,7 +4,8 @@
config SMS_SDIO_DRV
tristate "Siano SMS1xxx based MDTV via SDIO interface"
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
depends on MMC
+ select MEDIA_COMMON_OPTIONS
---help---
Choose if you would like to have Siano's support for SDIO interface
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 56c6c77793d..de6f41f1918 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -200,7 +200,7 @@ static void flush_request_modules(struct bttv *dev)
}
#else
#define request_modules(dev)
-#define flush_request_modules(dev)
+#define flush_request_modules(dev) do {} while(0)
#endif /* CONFIG_MODULES */
@@ -301,11 +301,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* totalwidth */ 1135,
/* sqwidth */ 944,
/* vdelay */ 0x20,
- /* sheight */ 576,
- /* videostart0 */ 23)
/* bt878 (and bt848?) can capture another
line below active video. */
- .cropcap.bounds.height = (576 + 2) + 0x20 - 2,
+ /* sheight */ (576 + 2) + 0x20 - 2,
+ /* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
.name = "NTSC",
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 6d2a98246b6..8e971ff6058 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -197,7 +197,7 @@ err_exit:
return ret;
}
-int cx18_alsa_load(struct cx18 *cx)
+static int __init cx18_alsa_load(struct cx18 *cx)
{
struct v4l2_device *v4l2_dev = &cx->v4l2_dev;
struct cx18_stream *s;
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 7a5b84a86bb..180077c4912 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -37,6 +37,7 @@
#include "cx18-streams.h"
#include "cx18-fileops.h"
#include "cx18-alsa.h"
+#include "cx18-alsa-pcm.h"
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 51609d5c88c..4908eb7bcf6 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -98,7 +98,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
case CX18_HW_Z8F0811_IR_RX_HAUP:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = cx->card_name;
info.platform_data = init_data;
break;
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 72af9b5c2d7..843c62b2f48 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -97,7 +97,7 @@ static struct {
};
-void cx18_dma_free(struct videobuf_queue *q,
+static void cx18_dma_free(struct videobuf_queue *q,
struct cx18_stream *s, struct cx18_videobuf_buffer *buf)
{
videobuf_waiton(q, &buf->vb, 0, 0);
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index 495781ee471..2926f7fadcc 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -263,7 +263,7 @@ static int netup_fpga_op_rw(struct fpga_internal *inter, int addr,
}
/* flag - mem/io, read - read/write */
-int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
+static int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 val)
{
@@ -298,31 +298,32 @@ int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
return mem;
}
-int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
- int slot, int addr)
+static int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr)
{
return altera_ci_op_cam(en50221, slot, 0, NETUP_CI_FLG_RD, addr, 0);
}
-int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
- int slot, int addr, u8 data)
+static int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
-int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+static int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL,
NETUP_CI_FLG_RD, addr, 0);
}
-int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
- u8 addr, u8 data)
+static int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
+ u8 addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, 0, addr, data);
}
-int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
@@ -365,13 +366,13 @@ int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
return 0;
}
-int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
-int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
@@ -448,8 +449,8 @@ int altera_ci_irq(void *dev)
}
EXPORT_SYMBOL(altera_ci_irq);
-int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot,
- int open)
+static int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
{
struct altera_ci_state *state = en50221->data;
@@ -459,7 +460,7 @@ int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot,
return state->status;
}
-void altera_hw_filt_release(void *main_dev, int filt_nr)
+static void altera_hw_filt_release(void *main_dev, int filt_nr)
{
struct fpga_inode *temp_int = find_inode(main_dev);
struct netup_hw_pid_filter *pid_filt = NULL;
@@ -581,7 +582,7 @@ static void altera_toggle_fullts_streaming(struct netup_hw_pid_filter *pid_filt,
mutex_unlock(&inter->fpga_mutex);
}
-int altera_pid_feed_control(void *demux_dev, int filt_nr,
+static int altera_pid_feed_control(void *demux_dev, int filt_nr,
struct dvb_demux_feed *feed, int onoff)
{
struct fpga_inode *temp_int = find_dinode(demux_dev);
@@ -603,41 +604,41 @@ int altera_pid_feed_control(void *demux_dev, int filt_nr,
}
EXPORT_SYMBOL(altera_pid_feed_control);
-int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
+static int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 1);
return 0;
}
-int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
+static int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 0);
return 0;
}
-int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
+static int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 1);
}
-int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
+static int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 1);
}
-int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
+static int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 2);
}
-int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
+static int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 2);
}
-int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
+static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
{
struct netup_hw_pid_filter *pid_filt = NULL;
struct fpga_inode *temp_int = find_inode(config->dev);
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 6617774a326..7344849183a 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -24,6 +24,7 @@
*/
#include "cx23885.h"
+#include "cimax2.h"
#include "dvb_ca_en50221.h"
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
@@ -87,7 +88,7 @@ struct netup_ci_state {
};
-int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+static int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
@@ -120,7 +121,7 @@ int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
return 0;
}
-int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
@@ -147,7 +148,7 @@ int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
return 0;
}
-int netup_ci_get_mem(struct cx23885_dev *dev)
+static int netup_ci_get_mem(struct cx23885_dev *dev)
{
int mem;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
@@ -166,7 +167,7 @@ int netup_ci_get_mem(struct cx23885_dev *dev)
return mem & 0xff;
}
-int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
+static int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 data)
{
struct netup_ci_state *state = en50221->data;
@@ -248,7 +249,8 @@ int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
return netup_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
-int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
+ u8 addr)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL,
NETUP_CI_RD, addr, 0);
@@ -295,7 +297,7 @@ int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
return 0;
}
-int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
+static int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
{
struct netup_ci_state *state = en50221->data;
int ret;
@@ -399,7 +401,8 @@ int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status)
return 1;
}
-int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
+int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
{
struct netup_ci_state *state = en50221->data;
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index 795169237e7..c6c9bd58f8b 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -45,8 +45,10 @@
#define AUDIO_SRAM_CHANNEL SRAM_CH07
-#define dprintk(level, fmt, arg...) if (audio_debug >= level) \
- printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (audio_debug + 1 > level) \
+ printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg); \
+} while(0)
#define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \
printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg)
diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c
index 134ebddd860..e958a01fd55 100644
--- a/drivers/media/pci/cx23885/cx23885-av.c
+++ b/drivers/media/pci/cx23885/cx23885-av.c
@@ -22,6 +22,7 @@
*/
#include "cx23885.h"
+#include "cx23885-av.h"
void cx23885_av_work_handler(struct work_struct *work)
{
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 5acdf954ff6..6277e145f0b 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1427,7 +1427,7 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
}
}
-int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
+static int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
{
int data;
int tdo = 0;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 697728f0943..065ecd54bda 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -303,7 +303,7 @@ static struct sram_channel cx23887_sram_channels[] = {
},
};
-void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
+static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
@@ -1516,8 +1516,7 @@ int cx23885_restart_queue(struct cx23885_tsport *port,
buf = list_entry(q->queued.next, struct cx23885_buffer,
vb.queue);
if (NULL == prev) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
cx23885_start_dma(port, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
@@ -1528,8 +1527,7 @@ int cx23885_restart_queue(struct cx23885_tsport *port,
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 4379d8a6dad..2f5b902e63a 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -659,7 +659,7 @@ static struct mt2063_config terratec_mt2063_config[] = {
},
};
-int netup_altera_fpga_rw(void *device, int flag, int data, int read)
+static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
diff --git a/drivers/media/pci/cx23885/cx23885-f300.c b/drivers/media/pci/cx23885/cx23885-f300.c
index 93998f22098..5444cc52600 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.c
+++ b/drivers/media/pci/cx23885/cx23885-f300.c
@@ -29,6 +29,7 @@
*/
#include "cx23885.h"
+#include "cx23885-f300.h"
#define F300_DATA GPIO_0
#define F300_RESET GPIO_1
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 2c925f77cf2..4f1055a194b 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -40,6 +40,7 @@
#include <media/v4l2-subdev.h>
#include "cx23885.h"
+#include "cx23885-input.h"
#define MODULE_NAME "cx23885"
@@ -270,21 +271,21 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Integrated CX2388[58] IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_ALL;
+ allowed_protos = RC_BIT_ALL;
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_HAUPPAUGE;
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_NEC;
+ allowed_protos = RC_BIT_NEC;
/* The grey Terratec remote with orange buttons */
rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
break;
case CX23885_BOARD_TEVII_S470:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_ALL;
+ allowed_protos = RC_BIT_ALL;
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
diff --git a/drivers/media/pci/cx23885/cx23885-input.h b/drivers/media/pci/cx23885/cx23885-input.h
index 75ef15d3f52..87dc44e6997 100644
--- a/drivers/media/pci/cx23885/cx23885-input.h
+++ b/drivers/media/pci/cx23885/cx23885-input.h
@@ -23,7 +23,7 @@
#ifndef _CX23885_INPUT_H_
#define _CX23885_INPUT_H_
-int cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events);
+void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events);
int cx23885_input_init(struct cx23885_dev *dev);
void cx23885_input_fini(struct cx23885_dev *dev);
diff --git a/drivers/media/pci/cx23885/cx23885-ioctl.c b/drivers/media/pci/cx23885/cx23885-ioctl.c
index 44812ca7889..ea9a614f3bb 100644
--- a/drivers/media/pci/cx23885/cx23885-ioctl.c
+++ b/drivers/media/pci/cx23885/cx23885-ioctl.c
@@ -22,6 +22,8 @@
*/
#include "cx23885.h"
+#include "cx23885-ioctl.h"
+
#include <media/v4l2-chip-ident.h>
int cx23885_g_chip_ident(struct file *file, void *fh,
diff --git a/drivers/media/pci/cx23885/cx23885-ir.c b/drivers/media/pci/cx23885/cx23885-ir.c
index 7125247dd25..bfef1935929 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.c
+++ b/drivers/media/pci/cx23885/cx23885-ir.c
@@ -24,6 +24,7 @@
#include <media/v4l2-device.h>
#include "cx23885.h"
+#include "cx23885-ir.h"
#include "cx23885-input.h"
#define CX23885_IR_RX_FIFO_SERVICE_REQ 0
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c2bc39c58f8..c4bd1e95d33 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -29,6 +29,7 @@
#include <media/rc-core.h>
#include "cx23885.h"
+#include "cx23888-ir.h"
static unsigned int ir_888_debug;
module_param(ir_888_debug, int, 0644);
diff --git a/drivers/media/pci/cx23885/netup-init.c b/drivers/media/pci/cx23885/netup-init.c
index f4893e69cd8..0044fef7ca2 100644
--- a/drivers/media/pci/cx23885/netup-init.c
+++ b/drivers/media/pci/cx23885/netup-init.c
@@ -24,6 +24,7 @@
*/
#include "cx23885.h"
+#include "netup-init.h"
static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val)
{
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
index 8b2a99975c2..87491ca05ee 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
@@ -44,7 +44,7 @@ MODULE_LICENSE("GPL");
static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF |
FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR;
-int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
+static int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
@@ -133,7 +133,7 @@ static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
return rp;
}
-int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
+static int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
struct pci_dev *pci,
unsigned int bpl, unsigned int lines)
{
@@ -197,7 +197,7 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
return 0;
}
-void cx25821_free_memory_audio(struct cx25821_dev *dev)
+static void cx25821_free_memory_audio(struct cx25821_dev *dev)
{
if (dev->_risc_virt_addr) {
pci_free_consistent(dev->pci, dev->_audiorisc_size,
@@ -256,7 +256,7 @@ void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
cx25821_free_memory_audio(dev);
}
-int cx25821_get_audio_data(struct cx25821_dev *dev,
+static int cx25821_get_audio_data(struct cx25821_dev *dev,
struct sram_channel *sram_ch)
{
struct file *myfile;
@@ -351,7 +351,7 @@ static void cx25821_audioups_handler(struct work_struct *work)
sram_channels);
}
-int cx25821_openfile_audio(struct cx25821_dev *dev,
+static int cx25821_openfile_audio(struct cx25821_dev *dev,
struct sram_channel *sram_ch)
{
struct file *myfile;
@@ -490,7 +490,7 @@ error:
return ret;
}
-int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
+static int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
u32 status)
{
int i = 0;
@@ -634,8 +634,8 @@ static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
}
-int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -700,9 +700,7 @@ fail_irq:
int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
{
struct sram_channel *sram_ch;
- int retval = 0;
int err = 0;
- int str_length = 0;
if (dev->_audio_is_running) {
pr_warn("Audio Channel is still running so return!\n");
@@ -731,27 +729,29 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
_line_size = AUDIO_LINE_SIZE;
if (dev->input_audiofilename) {
- str_length = strlen(dev->input_audiofilename);
- dev->_audiofilename = kmemdup(dev->input_audiofilename,
- str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kstrdup(dev->input_audiofilename,
+ GFP_KERNEL);
- if (!dev->_audiofilename)
+ if (!dev->_audiofilename) {
+ err = -ENOMEM;
goto error;
+ }
/* Default if filename is empty string */
if (strcmp(dev->input_audiofilename, "") == 0)
dev->_audiofilename = "/root/audioGOOD.wav";
} else {
- str_length = strlen(_defaultAudioName);
- dev->_audiofilename = kmemdup(_defaultAudioName,
- str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kstrdup(_defaultAudioName,
+ GFP_KERNEL);
- if (!dev->_audiofilename)
+ if (!dev->_audiofilename) {
+ err = -ENOMEM;
goto error;
+ }
}
- retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
- _line_size, 0);
+ cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
+ _line_size, 0);
dev->audio_upstream_riscbuf_size =
AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
@@ -759,9 +759,9 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
+ err = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
_line_size);
- if (retval < 0) {
+ if (err < 0) {
pr_err("%s: Failed to set up Audio upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-biffuncs.h b/drivers/media/pci/cx25821/cx25821-biffuncs.h
index 9326a7c729e..937f5a70fb7 100644
--- a/drivers/media/pci/cx25821/cx25821-biffuncs.h
+++ b/drivers/media/pci/cx25821/cx25821-biffuncs.h
@@ -25,17 +25,17 @@
#define SetBit(Bit) (1 << Bit)
-inline u8 getBit(u32 sample, u8 index)
+static inline u8 getBit(u32 sample, u8 index)
{
return (u8) ((sample >> index) & 1);
}
-inline u32 clearBitAtPos(u32 value, u8 bit)
+static inline u32 clearBitAtPos(u32 value, u8 bit)
{
return value & ~(1 << bit);
}
-inline u32 setBitAtPos(u32 sample, u8 bit)
+static inline u32 setBitAtPos(u32 sample, u8 bit)
{
sample |= (1 << bit);
return sample;
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 9844549764c..a8dc945bbe1 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -329,7 +329,8 @@ int cx25821_i2c_unregister(struct cx25821_i2c *bus)
return 0;
}
-void cx25821_av_clk(struct cx25821_dev *dev, int enable)
+#if 0 /* Currently unused */
+static void cx25821_av_clk(struct cx25821_dev *dev, int enable)
{
/* write 0 to bus 2 addr 0x144 via i2x_xfer() */
char buffer[3];
@@ -351,6 +352,7 @@ void cx25821_av_clk(struct cx25821_dev *dev, int enable)
i2c_xfer(&dev->i2c_bus[0].i2c_adap, &msg, 1);
}
+#endif
int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
{
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
index d33fc1a2303..cf2723c7197 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
@@ -123,10 +123,11 @@ static __le32 *cx25821_risc_field_upstream_ch2(struct cx25821_dev *dev,
return rp;
}
-int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int top_offset, unsigned int bpl,
- unsigned int lines)
+static int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
+ struct pci_dev *pci,
+ unsigned int top_offset,
+ unsigned int bpl,
+ unsigned int lines)
{
__le32 *rp;
int fifo_enable = 0;
@@ -255,7 +256,8 @@ void cx25821_free_mem_upstream_ch2(struct cx25821_dev *dev)
}
}
-int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_get_frame_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int frame_index_temp = dev->_frame_index_ch2;
@@ -360,7 +362,8 @@ static void cx25821_vidups_handler_ch2(struct work_struct *work)
_channel2_upstream_select].sram_channels);
}
-int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_openfile_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
@@ -507,8 +510,9 @@ error:
return ret;
}
-int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
- u32 status)
+static int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev,
+ int chan_num,
+ u32 status)
{
u32 int_msk_tmp;
struct sram_channel *channel = dev->channels[chan_num].sram_channels;
@@ -647,8 +651,8 @@ static void cx25821_set_pixelengine_ch2(struct cx25821_dev *dev,
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
}
-int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -704,11 +708,9 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
{
struct sram_channel *sram_ch;
u32 tmp;
- int retval = 0;
int err = 0;
int data_frame_size = 0;
int risc_buffer_size = 0;
- int str_length = 0;
if (dev->_is_running_ch2) {
pr_info("Video Channel is still running so return!\n");
@@ -744,20 +746,16 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
risc_buffer_size = dev->_isNTSC_ch2 ?
NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
- if (dev->input_filename_ch2) {
- str_length = strlen(dev->input_filename_ch2);
- dev->_filename_ch2 = kmemdup(dev->input_filename_ch2,
- str_length + 1, GFP_KERNEL);
-
- if (!dev->_filename_ch2)
- goto error;
- } else {
- str_length = strlen(dev->_defaultname_ch2);
- dev->_filename_ch2 = kmemdup(dev->_defaultname_ch2,
- str_length + 1, GFP_KERNEL);
+ if (dev->input_filename_ch2)
+ dev->_filename_ch2 = kstrdup(dev->input_filename_ch2,
+ GFP_KERNEL);
+ else
+ dev->_filename_ch2 = kstrdup(dev->_defaultname_ch2,
+ GFP_KERNEL);
- if (!dev->_filename_ch2)
- goto error;
+ if (!dev->_filename_ch2) {
+ err = -ENOENT;
+ goto error;
}
/* Default if filename is empty string */
@@ -773,7 +771,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
}
}
- retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
dev->_line_size_ch2, 0);
/* setup fifo + format */
@@ -783,9 +781,9 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
dev->upstream_databuf_size_ch2 = data_frame_size * 2;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
+ err = cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
dev->_line_size_ch2);
- if (retval < 0) {
+ if (err < 0) {
pr_err("%s: Failed to set up Video upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index 6759fff8eb6..7fc97110d97 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -173,10 +173,10 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
return rp;
}
-int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int top_offset,
- unsigned int bpl, unsigned int lines)
+static int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
+ struct pci_dev *pci,
+ unsigned int top_offset,
+ unsigned int bpl, unsigned int lines)
{
__le32 *rp;
int fifo_enable = 0;
@@ -300,7 +300,8 @@ void cx25821_free_mem_upstream_ch1(struct cx25821_dev *dev)
}
}
-int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_get_frame(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int frame_index_temp = dev->_frame_index;
@@ -405,7 +406,8 @@ static void cx25821_vidups_handler(struct work_struct *work)
sram_channels);
}
-int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_openfile(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
@@ -486,8 +488,9 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
return 0;
}
-int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
- struct sram_channel *sram_ch, int bpl)
+static int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch,
+ int bpl)
{
int ret = 0;
dma_addr_t dma_addr;
@@ -548,8 +551,8 @@ error:
return ret;
}
-int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
- u32 status)
+static int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
+ u32 status)
{
u32 int_msk_tmp;
struct sram_channel *channel = dev->channels[chan_num].sram_channels;
@@ -664,8 +667,9 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
- int pix_format)
+static void cx25821_set_pixelengine(struct cx25821_dev *dev,
+ struct sram_channel *ch,
+ int pix_format)
{
int width = WIDTH_D1;
int height = dev->_lines_count;
@@ -696,8 +700,8 @@ void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
}
-int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -753,7 +757,6 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
{
struct sram_channel *sram_ch;
u32 tmp;
- int retval = 0;
int err = 0;
int data_frame_size = 0;
int risc_buffer_size = 0;
@@ -796,15 +799,19 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_filename = kmemdup(dev->input_filename, str_length + 1,
GFP_KERNEL);
- if (!dev->_filename)
+ if (!dev->_filename) {
+ err = -ENOENT;
goto error;
+ }
} else {
str_length = strlen(dev->_defaultname);
dev->_filename = kmemdup(dev->_defaultname, str_length + 1,
GFP_KERNEL);
- if (!dev->_filename)
+ if (!dev->_filename) {
+ err = -ENOENT;
goto error;
+ }
}
/* Default if filename is empty string */
@@ -828,7 +835,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_line_size = (dev->_pixel_format == PIXEL_FRMT_422) ?
(WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
dev->_line_size, 0);
/* setup fifo + format */
@@ -838,8 +845,8 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->upstream_databuf_size = data_frame_size * 2;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
- if (retval < 0) {
+ err = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
+ if (err < 0) {
pr_err("%s: Failed to set up Video upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 0a80245165d..53b16dd7032 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -291,9 +291,9 @@ int cx25821_start_video_dma(struct cx25821_dev *dev,
return 0;
}
-int cx25821_restart_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q,
- struct sram_channel *channel)
+static int cx25821_restart_video_queue(struct cx25821_dev *dev,
+ struct cx25821_dmaqueue *q,
+ struct sram_channel *channel)
{
struct cx25821_buffer *buf, *prev;
struct list_head *item;
@@ -342,7 +342,7 @@ int cx25821_restart_video_queue(struct cx25821_dev *dev,
}
}
-void cx25821_vid_timeout(unsigned long data)
+static void cx25821_vid_timeout(unsigned long data)
{
struct cx25821_data *timeout_data = (struct cx25821_data *)data;
struct cx25821_dev *dev = timeout_data->dev;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 3aa6856ead3..d2de1a913e1 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -45,11 +45,15 @@
#include "cx88.h"
#include "cx88-reg.h"
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg)
-
-#define dprintk_core(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg);\
+} while(0)
+
+#define dprintk_core(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg);\
+} while(0)
/****************************************************************************
Data type declarations - Can be moded to a header file later
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 62184eb919e..a6ff8a6f4fc 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -53,9 +53,10 @@ static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg)
-
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg); \
+} while(0)
/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index c97b174be3a..19a58754c6e 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -646,22 +646,22 @@ int cx88_reset(struct cx88_core *core)
/* ------------------------------------------------------------------ */
-static unsigned int inline norm_swidth(v4l2_std_id norm)
+static inline unsigned int norm_swidth(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 754 : 922;
}
-static unsigned int inline norm_hdelay(v4l2_std_id norm)
+static inline unsigned int norm_hdelay(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 135 : 186;
}
-static unsigned int inline norm_vdelay(v4l2_std_id norm)
+static inline unsigned int norm_vdelay(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 0x24 : 0x18;
}
-static unsigned int inline norm_fsc8(v4l2_std_id norm)
+static inline unsigned int norm_fsc8(v4l2_std_id norm)
{
if (norm & V4L2_STD_PAL_M)
return 28604892; // 3.575611 MHz
@@ -681,7 +681,7 @@ static unsigned int inline norm_fsc8(v4l2_std_id norm)
return 35468950; // 4.43361875 MHz +/- 5 Hz
}
-static unsigned int inline norm_htotal(v4l2_std_id norm)
+static inline unsigned int norm_htotal(v4l2_std_id norm)
{
unsigned int fsc4=norm_fsc8(norm)/2;
@@ -692,7 +692,7 @@ static unsigned int inline norm_htotal(v4l2_std_id norm)
((fsc4+262)/525*1001+15000)/30000;
}
-static unsigned int inline norm_vbipack(v4l2_std_id norm)
+static inline unsigned int norm_vbipack(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 511 : 400;
}
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index ebf448c48ca..f29e18c72f4 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -248,7 +248,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
struct cx88_IR *ir;
struct rc_dev *dev;
char *ir_codes = NULL;
- u64 rc_type = RC_TYPE_OTHER;
+ u64 rc_type = RC_BIT_OTHER;
int err = -ENOMEM;
u32 hardware_mask = 0; /* For devices with a hardware mask, when
* used with a full-code IR table
@@ -416,7 +416,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_TWINHAN_VP1027_DVBS:
ir_codes = RC_MAP_TWINHAN_VP1027_DVBS;
- rc_type = RC_TYPE_NEC;
+ rc_type = RC_BIT_NEC;
ir->sampling = 0xff00; /* address */
break;
}
@@ -592,7 +592,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
case CX88_BOARD_LEADTEK_PVR2000:
addr_list = pvr2000_addr_list;
core->init_data.name = "cx88 Leadtek PVR 2000 remote";
- core->init_data.type = RC_TYPE_UNKNOWN;
+ core->init_data.type = RC_BIT_UNKNOWN;
core->init_data.get_key = get_key_pvr2000;
core->init_data.ir_codes = RC_MAP_EMPTY;
break;
@@ -613,7 +613,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
/* Hauppauge XVR */
core->init_data.name = "cx88 Hauppauge XVR remote";
core->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- core->init_data.type = RC_TYPE_RC5;
+ core->init_data.type = RC_BIT_RC5;
core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
info.platform_data = &core->init_data;
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index d154bc19735..d46b008a46b 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -45,11 +45,15 @@ static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg); \
+} while(0)
-#define mpeg_dbg(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg)
+#define mpeg_dbg(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg); \
+} while(0)
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work)
@@ -217,8 +221,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
return 0;
buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
if (NULL == prev) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
cx8802_start_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
@@ -229,8 +232,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 44ffc8b3d45..ba0dba4a4d2 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -94,13 +94,13 @@ enum cx8802_board_access {
/* ----------------------------------------------------------- */
/* tv norms */
-static unsigned int inline norm_maxw(v4l2_std_id norm)
+static inline unsigned int norm_maxw(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 720 : 768;
}
-static unsigned int inline norm_maxh(v4l2_std_id norm)
+static inline unsigned int norm_maxh(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 576 : 480;
}
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index a609b3a9b14..f288ffcc4b6 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -736,7 +736,7 @@ static irqreturn_t dm1105_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
+static int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
{
struct rc_dev *dev;
int err = -ENOMEM;
@@ -776,7 +776,7 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
return 0;
}
-void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105)
+static void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105)
{
rc_unregister_device(dm1105->ir.dev);
}
@@ -1128,8 +1128,10 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
INIT_WORK(&dev->work, dm1105_dmx_buffer);
sprintf(dev->wqn, "%s/%d", dvb_adapter->name, dvb_adapter->num);
dev->wq = create_singlethread_workqueue(dev->wqn);
- if (!dev->wq)
+ if (!dev->wq) {
+ ret = -ENOMEM;
goto err_dvb_net;
+ }
ret = request_irq(pdev->irq, dm1105_irq, IRQF_SHARED,
DRIVER_NAME, dev);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 8deab1629b3..4a221c69399 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -205,7 +205,7 @@ err_exit:
return ret;
}
-int ivtv_alsa_load(struct ivtv *itv)
+static int __init ivtv_alsa_load(struct ivtv *itv)
{
struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
struct ivtv_stream *s;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index f7022bd58ff..e1863dbf4ed 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -37,6 +37,7 @@
#include "ivtv-streams.h"
#include "ivtv-fileops.h"
#include "ivtv-alsa.h"
+#include "ivtv-alsa-pcm.h"
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
@@ -69,8 +70,9 @@ static struct snd_pcm_hardware snd_ivtv_hw_capture = {
.periods_max = 98, /* 12544, */
};
-void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc, u8 *pcm_data,
- size_t num_bytes)
+static void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc,
+ u8 *pcm_data,
+ size_t num_bytes)
{
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
index 5ab18319ea4..23dfe0d1240 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
@@ -21,7 +21,3 @@
*/
int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
-
-/* Used by ivtv driver to announce the PCM data to the module */
-void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *card, u8 *pcm_data,
- size_t num_bytes);
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 6ec7705af55..68387d4369d 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -276,7 +276,7 @@ void ivtv_init_mpeg_decoder(struct ivtv *itv)
}
/* Try to restart the card & restore previous settings */
-int ivtv_firmware_restart(struct ivtv *itv)
+static int ivtv_firmware_restart(struct ivtv *itv)
{
int rc = 0;
v4l2_std_id std;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index d47f41a0ef6..46e262becb6 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -200,21 +200,21 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
- init_data->type = RC_TYPE_OTHER;
+ init_data->type = RC_BIT_OTHER;
init_data->name = "AVerMedia AVerTV card";
break;
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_RX_HAUP:
/* Default to grey remote */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_I2C_IR_RX_ADAPTEC:
@@ -222,7 +222,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->name = itv->card_name;
/* FIXME: The protocol and RC_MAP needs to be corrected */
init_data->ir_codes = RC_MAP_EMPTY;
- init_data->type = RC_TYPE_UNKNOWN;
+ init_data->type = RC_BIT_UNKNOWN;
break;
}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 949ae230e11..7a8b0d0b612 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -993,7 +993,7 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
v4l2_std_id std;
int i;
- if (inp < 0 || inp >= itv->nof_inputs)
+ if (inp >= itv->nof_inputs)
return -EINVAL;
if (inp == itv->active_input) {
@@ -1168,7 +1168,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
}
}
-int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
+static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct ivtv *itv = fh2id(fh)->itv;
diff --git a/drivers/media/pci/mantis/mantis_input.c b/drivers/media/pci/mantis/mantis_input.c
index db6d54d3fec..0e5252e5c0e 100644
--- a/drivers/media/pci/mantis/mantis_input.c
+++ b/drivers/media/pci/mantis/mantis_input.c
@@ -18,6 +18,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#if 0 /* Currently unused */
+
#include <media/rc-core.h>
#include <linux/pci.h>
@@ -150,10 +152,11 @@ out:
return err;
}
-int mantis_exit(struct mantis_pci *mantis)
+int mantis_init_exit(struct mantis_pci *mantis)
{
rc_unregister_device(mantis->rc);
rc_map_unregister(&ir_mantis_map);
return 0;
}
+#endif
diff --git a/drivers/media/pci/mantis/mantis_uart.c b/drivers/media/pci/mantis/mantis_uart.c
index 85e977861b4..a7071921863 100644
--- a/drivers/media/pci/mantis/mantis_uart.c
+++ b/drivers/media/pci/mantis/mantis_uart.c
@@ -61,7 +61,7 @@ static struct {
#define UART_MAX_BUF 16
-int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
+static int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
{
struct mantis_hwconfig *config = mantis->hwconfig;
u32 stat = 0, i;
diff --git a/drivers/media/pci/mantis/mantis_vp1033.c b/drivers/media/pci/mantis/mantis_vp1033.c
index ad013e93ed1..115003e8d19 100644
--- a/drivers/media/pci/mantis/mantis_vp1033.c
+++ b/drivers/media/pci/mantis/mantis_vp1033.c
@@ -83,7 +83,7 @@ u8 lgtdqcs001f_inittab[] = {
#define MANTIS_MODEL_NAME "VP-1033"
#define MANTIS_DEV_TYPE "DVB-S/DSS"
-int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
+static int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
@@ -115,8 +115,8 @@ int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
return 0;
}
-int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
- u32 srate, u32 ratio)
+static int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
+ u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index e5a76da8608..ae7d32027bf 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1945,7 +1945,7 @@ static struct pci_driver meye_driver = {
static int __init meye_init(void)
{
gbuffers = max(2, min((int)gbuffers, MEYE_MAX_BUFNBRS));
- if (gbufsize < 0 || gbufsize > MEYE_MAX_BUFSIZE)
+ if (gbufsize > MEYE_MAX_BUFSIZE)
gbufsize = MEYE_MAX_BUFSIZE;
gbufsize = PAGE_ALIGN(gbufsize);
printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) "
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 96a13ed197d..b38bce52956 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -425,8 +425,10 @@ static int ReadEEProm(struct i2c_adapter *adapter,
status = i2c_read_eeprom(adapter, 0x50, Addr, data, Length);
if (!status) {
*pLength = EETag[2];
+#if 0
if (Length < EETag[2])
- ; /*status=STATUS_BUFFER_OVERFLOW; */
+ status = STATUS_BUFFER_OVERFLOW;
+#endif
}
}
return status;
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index c8e0d5b99d4..8eeec4f50cc 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -752,8 +752,8 @@ void set_transfer(struct ngene_channel *chan, int state)
if (chan->mode & NGENE_IO_TSIN)
chan->pBufferExchange = tsin_exchange;
spin_unlock_irq(&chan->state_lock);
- } else
- ;/* printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
+ }
+ /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
ngreadl(0x9310)); */
ret = ngene_command_stream_control(dev, chan->number,
@@ -1691,7 +1691,8 @@ int __devinit ngene_probe(struct pci_dev *pci_dev,
dev->i2c_current_bus = -1;
/* Register DVB adapters and devices for both channels */
- if (init_channels(dev) < 0)
+ stat = init_channels(dev);
+ if (stat < 0)
goto fail2;
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index f2b37e05b96..8976d0e6581 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -944,8 +944,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
/* board config */
dev->board = pci_id->driver_data;
- if (card[dev->nr] >= 0 &&
- card[dev->nr] < saa7134_bcount)
+ if ((unsigned)card[dev->nr] < saa7134_bcount)
dev->board = card[dev->nr];
if (SAA7134_BOARD_UNKNOWN == dev->board)
must_configure_manually(0);
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 0f78f5e537e..e761262f747 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -990,7 +990,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
dev->init_data.ir_codes = RC_MAP_BEHOLD;
- dev->init_data.type = RC_TYPE_NEC;
+ dev->init_data.type = RC_BIT_NEC;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 4a77124ee70..3abf52711e1 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2511,7 +2511,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
/* sanitycheck insmod options */
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
- if (gbufsize < 0 || gbufsize > gbufsize_max)
+ if (gbufsize > gbufsize_max)
gbufsize = gbufsize_max;
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index eff7135cf0e..e042963d377 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -165,7 +165,7 @@ int saa7164_api_set_vbi_format(struct saa7164_port *port)
return ret;
}
-int saa7164_api_set_gop_size(struct saa7164_port *port)
+static int saa7164_api_set_gop_size(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResEncVideoGopStructure gs;
@@ -619,7 +619,7 @@ int saa7164_api_get_videomux(struct saa7164_port *port)
return ret;
}
-int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
+static int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
{
struct saa7164_dev *dev = port->dev;
@@ -822,8 +822,8 @@ int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen)
&reg[0], 128, buf);
}
-int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
- struct saa7164_port *port)
+static int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
+ struct saa7164_port *port)
{
struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc;
@@ -858,9 +858,10 @@ int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
- struct saa7164_port *port,
- struct tmComResTSFormatDescrHeader *tsfmt)
+static int
+saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResTSFormatDescrHeader *tsfmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex);
dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset);
@@ -892,9 +893,10 @@ int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
- struct saa7164_port *port,
- struct tmComResPSFormatDescrHeader *fmt)
+static int
+saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResPSFormatDescrHeader *fmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength);
@@ -925,7 +927,7 @@ int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
+static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
{
struct saa7164_port *tsport = NULL;
struct saa7164_port *encport = NULL;
@@ -1486,7 +1488,7 @@ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen,
return ret == SAA_OK ? 0 : -EIO;
}
-int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
+static int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
u8 pin, u8 state)
{
int ret;
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a7f58a99875..5f6f3094c44 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -81,7 +81,7 @@ void saa7164_bus_dump(struct saa7164_dev *dev)
}
/* Intensionally throw a BUG() if the state of the message bus looks corrupt */
-void saa7164_bus_verify(struct saa7164_dev *dev)
+static void saa7164_bus_verify(struct saa7164_dev *dev)
{
struct tmComResBusInfo *b = &dev->bus;
int bug = 0;
@@ -106,8 +106,8 @@ void saa7164_bus_verify(struct saa7164_dev *dev)
}
}
-void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo* m,
- void *buf)
+static void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo *m,
+ void *buf)
{
dprintk(DBGLVL_BUS, "Dumping msg structure:\n");
dprintk(DBGLVL_BUS, " .id = %d\n", m->id);
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 62fac7f9d04..cfabcbacc33 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -23,7 +23,7 @@
#include "saa7164.h"
-int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
+static int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
{
int i, ret = -1;
@@ -42,7 +42,7 @@ int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
return ret;
}
-void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
+static void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
{
mutex_lock(&dev->lock);
if ((dev->cmds[seqno].inuse == 1) &&
@@ -54,7 +54,7 @@ void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
+static void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
{
mutex_lock(&dev->lock);
if ((dev->cmds[seqno].inuse == 1) &&
@@ -64,7 +64,7 @@ void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
+static u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
{
int ret = 0;
@@ -132,7 +132,7 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
/* Commands to the f/w get marshelled to/from this code then onto the PCI
* -bus/c running buffer. */
-int saa7164_cmd_dequeue(struct saa7164_dev *dev)
+static int saa7164_cmd_dequeue(struct saa7164_dev *dev)
{
int loop = 1;
int ret;
@@ -186,8 +186,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
return SAA_OK;
}
-int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
- void *buf)
+static int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
+ void *buf)
{
struct tmComResBusInfo *bus = &dev->bus;
u8 cmd_sent;
@@ -259,7 +259,7 @@ out:
/* Wait for a signal event, without holding a mutex. Either return TIMEOUT if
* the event never occurred, or SAA_OK if it was signaled during the wait.
*/
-int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
+static int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
{
wait_queue_head_t *q = NULL;
int ret = SAA_BUS_TIMEOUT;
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 2c9ad878bef..063047f5676 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -410,7 +410,7 @@ static void saa7164_work_enchandler(struct work_struct *w)
} else
rp = (port->last_svc_rp + 1) % 8;
- if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ if (rp > (port->hwcfg.buffercount - 1)) {
printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
break;
}
@@ -486,7 +486,7 @@ static void saa7164_work_vbihandler(struct work_struct *w)
} else
rp = (port->last_svc_rp + 1) % 8;
- if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ if (rp > (port->hwcfg.buffercount - 1)) {
printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
break;
}
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index a9ed686ad08..994018e2d0d 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1101,7 +1101,8 @@ static int fops_release(struct file *file)
return 0;
}
-struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
+static struct
+saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
{
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -1287,8 +1288,8 @@ static const struct v4l2_file_operations mpeg_fops = {
.unlocked_ioctl = video_ioctl2,
};
-int saa7164_g_chip_ident(struct file *file, void *fh,
- struct v4l2_dbg_chip_ident *chip)
+static int saa7164_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *chip)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
@@ -1297,8 +1298,8 @@ int saa7164_g_chip_ident(struct file *file, void *fh,
return 0;
}
-int saa7164_g_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int saa7164_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
@@ -1310,8 +1311,8 @@ int saa7164_g_register(struct file *file, void *fh,
return 0;
}
-int saa7164_s_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int saa7164_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index a266bf0169e..86763203d61 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -37,7 +37,7 @@ struct fw_header {
u32 version;
};
-int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
+static int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while ((saa7164_readl(reg) & 0x01) == 0) {
@@ -53,7 +53,7 @@ int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
return 0;
}
-int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
+static int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while (saa7164_readl(reg) & 0x01) {
@@ -71,8 +71,8 @@ int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
/* TODO: move dlflags into dev-> and change to write/readl/b */
/* TODO: Excessive levels of debug */
-int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
- u32 dlflags, u8 *dst, u32 dstsize)
+static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
+ u32 dlflags, u8 *dst, u32 dstsize)
{
u32 reg, timeout, offset;
u8 *srcbuf = NULL;
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index d8e6c8f1407..b4532299c0e 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -984,7 +984,8 @@ out:
return ret;
}
-int saa7164_vbi_fmt(struct file *file, void *priv, struct v4l2_format *f)
+static int saa7164_vbi_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
/* ntsc */
f->fmt.vbi.samples_per_line = 1600;
@@ -1047,7 +1048,8 @@ static int fops_release(struct file *file)
return 0;
}
-struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
+static struct
+saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
{
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 88b3b2d6cc0..a378662b1dc 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/time.h>
#include <linux/dvb/video.h>
#include <linux/dvb/audio.h>
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 12ddb53c58d..1f8b1bb0bf9 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1477,8 +1477,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
if (saa7113_init(budget_av) == 0) {
budget_av->has_saa7113 = 1;
-
- if (0 != saa7146_vv_init(dev, &vv_data)) {
+ err = saa7146_vv_init(dev, &vv_data);
+ if (err != 0) {
/* fixme: proper cleanup here */
ERR("cannot init vv subsystem\n");
return err;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 181c7686e41..3dcfea612c4 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -109,6 +109,18 @@ config VIDEO_OMAP3_DEBUG
---help---
Enable debug messages on OMAP 3 camera controller driver.
+config VIDEO_S3C_CAMIF
+ tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on (PLAT_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
+ host interface (CAMIF).
+
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
+
source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/s5p-fimc/Kconfig"
source "drivers/media/platform/s5p-tv/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index baaa55026c8..4817d280217 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_VIDEO_CODA) += coda.o
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index cb2eb26850b..ec476ef5b70 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -1050,19 +1050,7 @@ static struct platform_driver bcap_driver = {
.probe = bcap_probe,
.remove = __devexit_p(bcap_remove),
};
-
-static __init int bcap_init(void)
-{
- return platform_driver_register(&bcap_driver);
-}
-
-static __exit void bcap_exit(void)
-{
- platform_driver_unregister(&bcap_driver);
-}
-
-module_init(bcap_init);
-module_exit(bcap_exit);
+module_platform_driver(bcap_driver);
MODULE_DESCRIPTION("Analog Devices blackfin video capture driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index cd04ae252c3..7b8b547f2d5 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1540,7 +1540,7 @@ static irqreturn_t coda_irq_handler(int irq, void *data)
u32 wr_ptr, start_ptr;
struct coda_ctx *ctx;
- __cancel_delayed_work(&dev->timeout);
+ cancel_delayed_work(&dev->timeout);
/* read status register to attend the IRQ */
coda_read(dev, CODA_REG_BIT_INT_STATUS);
@@ -1877,7 +1877,7 @@ static const struct coda_devtype coda_devdata[] = {
static struct platform_device_id coda_platform_ids[] = {
{ .name = "coda-imx27", .driver_data = CODA_IMX27 },
- { .name = "coda-imx53", .driver_data = CODA_7541 },
+ { .name = "coda-imx53", .driver_data = CODA_IMX53 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, coda_platform_ids);
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index 78e26d24f63..3c56037c82f 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -101,7 +101,7 @@ config VIDEO_DM644X_VPBE
tristate "DM644X VPBE HW module"
depends on ARCH_DAVINCI_DM644x
select VIDEO_VPSS_SYSTEM
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
help
Enables VPBE modules used for display on a DM644x
SoC.
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index ce0e4131c06..030950dcfb1 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -1003,7 +1003,7 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.mclk);
goto fail_nomap;
}
- if (clk_enable(ccdc_cfg.mclk)) {
+ if (clk_prepare_enable(ccdc_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1014,7 +1014,7 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.sclk);
goto fail_mclk;
}
- if (clk_enable(ccdc_cfg.sclk)) {
+ if (clk_prepare_enable(ccdc_cfg.sclk)) {
status = -ENODEV;
goto fail_sclk;
}
@@ -1034,8 +1034,10 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
return 0;
fail_sclk:
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.sclk);
fail_mclk:
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
fail_nomap:
iounmap(ccdc_cfg.base_addr);
@@ -1050,6 +1052,8 @@ static int dm355_ccdc_remove(struct platform_device *pdev)
{
struct resource *res;
+ clk_disable_unprepare(ccdc_cfg.sclk);
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
clk_put(ccdc_cfg.sclk);
iounmap(ccdc_cfg.base_addr);
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index ee7942b1996..0215ab6ebc9 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -994,7 +994,7 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.mclk);
goto fail_nomap;
}
- if (clk_enable(ccdc_cfg.mclk)) {
+ if (clk_prepare_enable(ccdc_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1005,7 +1005,7 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.sclk);
goto fail_mclk;
}
- if (clk_enable(ccdc_cfg.sclk)) {
+ if (clk_prepare_enable(ccdc_cfg.sclk)) {
status = -ENODEV;
goto fail_sclk;
}
@@ -1013,8 +1013,10 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
return 0;
fail_sclk:
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.sclk);
fail_mclk:
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
fail_nomap:
iounmap(ccdc_cfg.base_addr);
@@ -1029,6 +1031,8 @@ static int dm644x_ccdc_remove(struct platform_device *pdev)
{
struct resource *res;
+ clk_disable_unprepare(ccdc_cfg.mclk);
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.mclk);
clk_put(ccdc_cfg.sclk);
iounmap(ccdc_cfg.base_addr);
@@ -1046,8 +1050,8 @@ static int dm644x_ccdc_suspend(struct device *dev)
/* Disable CCDC */
ccdc_enable(0);
/* Disable both master and slave clock */
- clk_disable(ccdc_cfg.mclk);
- clk_disable(ccdc_cfg.sclk);
+ clk_disable_unprepare(ccdc_cfg.mclk);
+ clk_disable_unprepare(ccdc_cfg.sclk);
return 0;
}
@@ -1055,8 +1059,8 @@ static int dm644x_ccdc_suspend(struct device *dev)
static int dm644x_ccdc_resume(struct device *dev)
{
/* Enable both master and slave clock */
- clk_enable(ccdc_cfg.mclk);
- clk_enable(ccdc_cfg.sclk);
+ clk_prepare_enable(ccdc_cfg.mclk);
+ clk_prepare_enable(ccdc_cfg.sclk);
/* Restore CCDC context */
ccdc_restore_context();
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index b99d5423e3a..2c26c3e1837 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1053,7 +1053,7 @@ static int __devinit isif_probe(struct platform_device *pdev)
status = PTR_ERR(isif_cfg.mclk);
goto fail_mclk;
}
- if (clk_enable(isif_cfg.mclk)) {
+ if (clk_prepare_enable(isif_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1125,6 +1125,7 @@ fail_nobase_res:
i--;
}
fail_mclk:
+ clk_disable_unprepare(isif_cfg.mclk);
clk_put(isif_cfg.mclk);
vpfe_unregister_ccdc_device(&isif_hw_dev);
return status;
@@ -1145,6 +1146,8 @@ static int isif_remove(struct platform_device *pdev)
i++;
}
vpfe_unregister_ccdc_device(&isif_hw_dev);
+ clk_disable_unprepare(isif_cfg.mclk);
+ clk_put(isif_cfg.mclk);
return 0;
}
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 69d7a58c92c..7f5cf9b347b 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -612,7 +612,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
ret = PTR_ERR(vpbe_dev->dac_clk);
goto fail_mutex_unlock;
}
- if (clk_enable(vpbe_dev->dac_clk)) {
+ if (clk_prepare_enable(vpbe_dev->dac_clk)) {
ret = -ENODEV;
goto fail_mutex_unlock;
}
@@ -759,8 +759,10 @@ fail_kfree_encoders:
fail_dev_unregister:
v4l2_device_unregister(&vpbe_dev->v4l2_dev);
fail_clk_put:
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
clk_put(vpbe_dev->dac_clk);
+ }
fail_mutex_unlock:
mutex_unlock(&vpbe_dev->lock);
return ret;
@@ -777,8 +779,10 @@ fail_mutex_unlock:
static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
{
v4l2_device_unregister(&vpbe_dev->v4l2_dev);
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
clk_put(vpbe_dev->dac_clk);
+ }
kfree(vpbe_dev->amp);
kfree(vpbe_dev->encoders);
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 161c77650e2..2bfde7958fe 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -47,6 +47,9 @@ static int debug;
module_param(debug, int, 0644);
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer);
+
static int venc_is_second_field(struct vpbe_display *disp_dev)
{
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
@@ -73,10 +76,11 @@ static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
if (layer->cur_frm == layer->next_frm)
return;
ktime_get_ts(&timevalue);
- layer->cur_frm->ts.tv_sec = timevalue.tv_sec;
- layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC;
- layer->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&layer->cur_frm->done);
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_sec =
+ timevalue.tv_sec;
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_usec =
+ timevalue.tv_nsec / NSEC_PER_USEC;
+ vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
layer->cur_frm = layer->next_frm;
}
@@ -99,16 +103,14 @@ static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
* otherwise hold on current frame
* Get next from the buffer queue
*/
- layer->next_frm = list_entry(
- layer->dma_queue.next,
- struct videobuf_buffer,
- queue);
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
/* Remove that from the buffer queue */
- list_del(&layer->next_frm->queue);
+ list_del(&layer->next_frm->list);
spin_unlock(&disp_obj->dma_queue_lock);
/* Mark state of the frame to active */
- layer->next_frm->state = VIDEOBUF_ACTIVE;
- addr = videobuf_to_dma_contig(layer->next_frm);
+ layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0);
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
addr,
@@ -199,39 +201,29 @@ static irqreturn_t venc_isr(int irq, void *arg)
/*
* vpbe_buffer_prepare()
- * This is the callback function called from videobuf_qbuf() function
+ * This is the callback function called from vb2_qbuf() function
* the buffer is prepared and user space virtual address is converted into
* physical address
*/
-static int vpbe_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vpbe_buffer_prepare(struct vb2_buffer *vb)
{
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
unsigned long addr;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"vpbe_buffer_prepare\n");
- /* If buffer is not initialized, initialize it */
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = layer->pix_fmt.width;
- vb->height = layer->pix_fmt.height;
- vb->size = layer->pix_fmt.sizeimage;
- vb->field = field;
-
- ret = videobuf_iolock(q, vb, NULL);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
- user address\n");
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED) {
+ vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
return -EINVAL;
- }
-
- addr = videobuf_to_dma_contig(vb);
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (q->streaming) {
if (!IS_ALIGNED(addr, 8)) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -240,7 +232,6 @@ static int vpbe_buffer_prepare(struct videobuf_queue *q,
return -EINVAL;
}
}
- vb->state = VIDEOBUF_PREPARED;
}
return 0;
}
@@ -249,22 +240,26 @@ static int vpbe_buffer_prepare(struct videobuf_queue *q,
* vpbe_buffer_setup()
* This function allocates memory for the buffers
*/
-static int vpbe_buffer_setup(struct videobuf_queue *q,
- unsigned int *count,
- unsigned int *size)
+static int
+vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
- *size = layer->pix_fmt.sizeimage;
-
/* Store number of buffers allocated in numbuffer member */
- if (*count < VPBE_DEFAULT_NUM_BUFS)
- *count = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+ if (*nbuffers < VPBE_DEFAULT_NUM_BUFS)
+ *nbuffers = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+
+ *nplanes = 1;
+ sizes[0] = layer->pix_fmt.sizeimage;
+ alloc_ctxs[0] = layer->alloc_ctx;
return 0;
}
@@ -273,11 +268,12 @@ static int vpbe_buffer_setup(struct videobuf_queue *q,
* vpbe_buffer_queue()
* This function adds the buffer to DMA queue
*/
-static void vpbe_buffer_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpbe_buffer_queue(struct vb2_buffer *vb)
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
struct vpbe_layer *layer = fh->layer;
struct vpbe_display *disp = fh->disp_dev;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
@@ -288,39 +284,125 @@ static void vpbe_buffer_queue(struct videobuf_queue *q,
/* add the buffer to the DMA queue */
spin_lock_irqsave(&disp->dma_queue_lock, flags);
- list_add_tail(&vb->queue, &layer->dma_queue);
+ list_add_tail(&buf->list, &layer->dma_queue);
spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
- /* Change state of the buffer */
- vb->state = VIDEOBUF_QUEUED;
}
/*
- * vpbe_buffer_release()
- * This function is called from the videobuf layer to free memory allocated to
+ * vpbe_buf_cleanup()
+ * This function is called from the vb2 layer to free memory allocated to
* the buffers
*/
-static void vpbe_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpbe_buf_cleanup(struct vb2_buffer *vb)
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+ unsigned long flags;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe_buffer_release\n");
+ "vpbe_buf_cleanup\n");
+
+ spin_lock_irqsave(&layer->irqlock, flags);
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+ spin_unlock_irqrestore(&layer->irqlock, flags);
+}
+
+static void vpbe_wait_prepare(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_unlock(&layer->opslock);
+}
+
+static void vpbe_wait_finish(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_lock(&layer->opslock);
+}
+
+static int vpbe_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+ return 0;
+}
+
+static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ /* If buffer queue is empty, return error */
+ if (list_empty(&layer->dma_queue)) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
+ return -EINVAL;
+ }
+ /* Get the next frame from the buffer queue */
+ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&layer->cur_frm->list);
+ /* Mark state of the current frame to active */
+ layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ layer->field_id = 0;
+
+ /* Set parameters in OSD and VENC */
+ ret = vpbe_set_osd_display_params(fh->disp_dev, layer);
+ if (ret < 0)
+ return ret;
- if (V4L2_MEMORY_USERPTR != layer->memory)
- videobuf_dma_contig_free(q, vb);
+ /*
+ * if request format is yuv420 semiplanar, need to
+ * enable both video windows
+ */
+ layer->started = 1;
+ layer->layer_first_int = 1;
+
+ return ret;
+}
+
+static int vpbe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ if (!vb2_is_streaming(vq))
+ return 0;
+
+ /* release all active buffers */
+ while (!list_empty(&layer->dma_queue)) {
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ list_del(&layer->next_frm->list);
+ vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
- vb->state = VIDEOBUF_NEEDS_INIT;
+ return 0;
}
-static struct videobuf_queue_ops video_qops = {
- .buf_setup = vpbe_buffer_setup,
+static struct vb2_ops video_qops = {
+ .queue_setup = vpbe_buffer_queue_setup,
+ .wait_prepare = vpbe_wait_prepare,
+ .wait_finish = vpbe_wait_finish,
+ .buf_init = vpbe_buffer_init,
.buf_prepare = vpbe_buffer_prepare,
+ .start_streaming = vpbe_start_streaming,
+ .stop_streaming = vpbe_stop_streaming,
+ .buf_cleanup = vpbe_buf_cleanup,
.buf_queue = vpbe_buffer_queue,
- .buf_release = vpbe_buffer_release,
};
static
@@ -345,7 +427,7 @@ static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
unsigned long addr;
int ret;
- addr = videobuf_to_dma_contig(layer->cur_frm);
+ addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0);
/* Set address in the display registers */
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
@@ -620,9 +702,12 @@ static int vpbe_display_querycap(struct file *file, void *priv,
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
cap->version = VPBE_DISPLAY_VERSION_CODE;
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- strlcpy(cap->driver, VPBE_DISPLAY_DRIVER, sizeof(cap->driver));
- strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ snprintf(cap->driver, sizeof(cap->driver), "%s",
+ dev_name(vpbe_dev->pdev));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpbe_dev->pdev));
strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
return 0;
@@ -1161,7 +1246,7 @@ static int vpbe_display_streamoff(struct file *file, void *priv,
osd_device->ops.disable_layer(osd_device,
layer->layer_info.id);
layer->started = 0;
- ret = videobuf_streamoff(&layer->buffer_queue);
+ ret = vb2_streamoff(&layer->buffer_queue, buf_type);
return ret;
}
@@ -1199,46 +1284,15 @@ static int vpbe_display_streamon(struct file *file, void *priv,
}
/*
- * Call videobuf_streamon to start streaming
+ * Call vb2_streamon to start streaming
* in videobuf
*/
- ret = videobuf_streamon(&layer->buffer_queue);
+ ret = vb2_streamon(&layer->buffer_queue, buf_type);
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev,
- "error in videobuf_streamon\n");
+ "error in vb2_streamon\n");
return ret;
}
- /* If buffer queue is empty, return error */
- if (list_empty(&layer->dma_queue)) {
- v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
- goto streamoff;
- }
- /* Get the next frame from the buffer queue */
- layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
- struct videobuf_buffer, queue);
- /* Remove buffer from the buffer queue */
- list_del(&layer->cur_frm->queue);
- /* Mark state of the current frame to active */
- layer->cur_frm->state = VIDEOBUF_ACTIVE;
- /* Initialize field_id and started member */
- layer->field_id = 0;
-
- /* Set parameters in OSD and VENC */
- ret = vpbe_set_osd_display_params(disp_dev, layer);
- if (ret < 0)
- goto streamoff;
-
- /*
- * if request format is yuv420 semiplanar, need to
- * enable both video windows
- */
- layer->started = 1;
-
- layer->layer_first_int = 1;
-
- return ret;
-streamoff:
- ret = videobuf_streamoff(&layer->buffer_queue);
return ret;
}
@@ -1265,10 +1319,10 @@ static int vpbe_display_dqbuf(struct file *file, void *priv,
}
if (file->f_flags & O_NONBLOCK)
/* Call videobuf_dqbuf for non blocking mode */
- ret = videobuf_dqbuf(&layer->buffer_queue, buf, 1);
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 1);
else
/* Call videobuf_dqbuf for blocking mode */
- ret = videobuf_dqbuf(&layer->buffer_queue, buf, 0);
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 0);
return ret;
}
@@ -1295,7 +1349,7 @@ static int vpbe_display_qbuf(struct file *file, void *priv,
return -EACCES;
}
- return videobuf_qbuf(&layer->buffer_queue, p);
+ return vb2_qbuf(&layer->buffer_queue, p);
}
static int vpbe_display_querybuf(struct file *file, void *priv,
@@ -1304,7 +1358,6 @@ static int vpbe_display_querybuf(struct file *file, void *priv,
struct vpbe_fh *fh = file->private_data;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"VIDIOC_QUERYBUF, layer id = %d\n",
@@ -1314,11 +1367,8 @@ static int vpbe_display_querybuf(struct file *file, void *priv,
v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
return -EINVAL;
}
-
- /* Call videobuf_querybuf to get information */
- ret = videobuf_querybuf(&layer->buffer_queue, buf);
-
- return ret;
+ /* Call vb2_querybuf to get information */
+ return vb2_querybuf(&layer->buffer_queue, buf);
}
static int vpbe_display_reqbufs(struct file *file, void *priv,
@@ -1327,8 +1377,8 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
struct vpbe_fh *fh = file->private_data;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vb2_queue *q;
int ret;
-
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
@@ -1342,15 +1392,26 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
return -EBUSY;
}
/* Initialize videobuf queue as per the buffer type */
- videobuf_queue_dma_contig_init(&layer->buffer_queue,
- &video_qops,
- vpbe_dev->pdev,
- &layer->irqlock,
- V4L2_BUF_TYPE_VIDEO_OUTPUT,
- layer->pix_fmt.field,
- sizeof(struct videobuf_buffer),
- fh, NULL);
+ layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
+ if (!layer->alloc_ctx) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
+ return -EINVAL;
+ }
+ q = &layer->buffer_queue;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
+ ret = vb2_queue_init(q);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(layer->alloc_ctx);
+ return ret;
+ }
/* Set io allowed member of file handle to TRUE */
fh->io_allowed = 1;
/* Increment io usrs member of layer object to 1 */
@@ -1360,9 +1421,7 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
/* Initialize buffer queue */
INIT_LIST_HEAD(&layer->dma_queue);
/* Allocate buffers */
- ret = videobuf_reqbufs(&layer->buffer_queue, req_buf);
-
- return ret;
+ return vb2_reqbufs(q, req_buf);
}
/*
@@ -1381,7 +1440,7 @@ static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
if (mutex_lock_interruptible(&layer->opslock))
return -ERESTARTSYS;
- ret = videobuf_mmap_mapper(&layer->buffer_queue, vma);
+ ret = vb2_mmap(&layer->buffer_queue, vma);
mutex_unlock(&layer->opslock);
return ret;
}
@@ -1398,7 +1457,7 @@ static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
if (layer->started) {
mutex_lock(&layer->opslock);
- err = videobuf_poll_stream(filep, &layer->buffer_queue, wait);
+ err = vb2_poll(&layer->buffer_queue, filep, wait);
mutex_unlock(&layer->opslock);
}
return err;
@@ -1488,8 +1547,8 @@ static int vpbe_display_release(struct file *file)
layer->layer_info.id);
layer->started = 0;
/* Free buffers allocated */
- videobuf_queue_cancel(&layer->buffer_queue);
- videobuf_mmap_free(&layer->buffer_queue);
+ vb2_queue_release(&layer->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(&layer->buffer_queue);
}
/* Decrement layer usrs counter */
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index bba299dbf39..707f243f810 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -62,7 +62,7 @@ static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 val = readl(addr) | mask;
writel(val, addr);
@@ -74,7 +74,7 @@ static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 val = readl(addr) & ~mask;
writel(val, addr);
@@ -87,7 +87,7 @@ static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 new_val = (readl(addr) & ~mask) | (val & mask);
writel(new_val, addr);
@@ -1559,8 +1559,7 @@ static int osd_probe(struct platform_device *pdev)
ret = -ENODEV;
goto free_mem;
}
- osd->osd_base = (unsigned long)ioremap_nocache(res->start,
- osd->osd_size);
+ osd->osd_base = ioremap_nocache(res->start, osd->osd_size);
if (!osd->osd_base) {
dev_err(osd->dev, "Unable to map the OSD region\n");
ret = -ENODEV;
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index cff3c0ab501..0d6cc8e4deb 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -444,7 +444,7 @@ static int __devinit vpif_probe(struct platform_device *pdev)
status = PTR_ERR(vpif_clk);
goto clk_fail;
}
- clk_enable(vpif_clk);
+ clk_prepare_enable(vpif_clk);
spin_lock_init(&vpif_lock);
dev_info(&pdev->dev, "vpif probe success\n");
@@ -460,7 +460,7 @@ fail:
static int __devexit vpif_remove(struct platform_device *pdev)
{
if (vpif_clk) {
- clk_disable(vpif_clk);
+ clk_disable_unprepare(vpif_clk);
clk_put(vpif_clk);
}
@@ -472,13 +472,13 @@ static int __devexit vpif_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int vpif_suspend(struct device *dev)
{
- clk_disable(vpif_clk);
+ clk_disable_unprepare(vpif_clk);
return 0;
}
static int vpif_resume(struct device *dev)
{
- clk_enable(vpif_clk);
+ clk_prepare_enable(vpif_clk);
return 0;
}
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index fcabc023885..a409ccefb38 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -201,13 +201,16 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
struct vpif_cap_buffer *buf = container_of(vb,
struct vpif_cap_buffer, vb);
struct common_obj *common;
+ unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
vpif_dbg(2, debug, "vpif_buffer_queue\n");
+ spin_lock_irqsave(&common->irqlock, flags);
/* add the buffer to the DMA queue */
list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
}
/**
@@ -278,10 +281,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpif = &ch->vpifparams;
unsigned long addr = 0;
+ unsigned long flags;
int ret;
- /* If buffer queue is empty, return error */
+ /* If buffer queue is empty, return error */
+ spin_lock_irqsave(&common->irqlock, flags);
if (list_empty(&common->dma_queue)) {
+ spin_unlock_irqrestore(&common->irqlock, flags);
vpif_dbg(1, debug, "buffer queue is empty\n");
return -EIO;
}
@@ -291,6 +297,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct vpif_cap_buffer, list);
/* Remove buffer from the buffer queue */
list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
/* Mark state of the current frame to active */
common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
/* Initialize field_id and started member */
@@ -362,6 +369,7 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
if (!vb2_is_streaming(vq))
return 0;
@@ -369,12 +377,14 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
/* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
list_del(&common->next_frm->list);
vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return 0;
}
@@ -420,10 +430,12 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
{
unsigned long addr = 0;
+ spin_lock(&common->irqlock);
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
@@ -468,8 +480,12 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
/* Check the field format */
if (1 == ch->vpifparams.std_info.frm_fmt) {
/* Progressive mode */
- if (list_empty(&common->dma_queue))
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
if (!channel_first_int[i][channel_id])
vpif_process_buffer_complete(common);
@@ -513,9 +529,13 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
vpif_process_buffer_complete(common);
} else if (1 == fid) {
/* odd field */
+ spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue) ||
- (common->cur_frm != common->next_frm))
+ (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
vpif_schedule_next_buffer(common);
}
@@ -1004,9 +1024,9 @@ static int vpif_reqbufs(struct file *file, void *priv,
/* Initialize videobuf2 queue as per the buffer type */
common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (!common->alloc_ctx) {
+ if (IS_ERR(common->alloc_ctx)) {
vpif_err("Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(common->alloc_ctx);
}
q = &common->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1715,7 +1735,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -EINVAL;
return ret;
}
@@ -1735,7 +1755,7 @@ vpif_query_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -ENODATA;
return ret;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index b716fbd4241..9f2b603be9c 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -177,11 +177,14 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
struct vpif_disp_buffer, vb);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
/* add the buffer to the DMA queue */
+ spin_lock_irqsave(&common->irqlock, flags);
list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
}
/*
@@ -246,10 +249,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpif = &ch->vpifparams;
unsigned long addr = 0;
+ unsigned long flags;
int ret;
/* If buffer queue is empty, return error */
+ spin_lock_irqsave(&common->irqlock, flags);
if (list_empty(&common->dma_queue)) {
+ spin_unlock_irqrestore(&common->irqlock, flags);
vpif_err("buffer queue is empty\n");
return -EIO;
}
@@ -260,6 +266,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct vpif_disp_buffer, list);
list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
/* Mark state of the current frame to active */
common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
@@ -330,6 +337,7 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
if (!vb2_is_streaming(vq))
return 0;
@@ -337,12 +345,14 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
/* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
list_del(&common->next_frm->list);
vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return 0;
}
@@ -363,11 +373,13 @@ static void process_progressive_mode(struct common_obj *common)
{
unsigned long addr = 0;
+ spin_lock(&common->irqlock);
/* Get the next buffer from buffer queue */
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
/* Mark status of the buffer as active */
common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
@@ -398,16 +410,18 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
common->cur_frm = common->next_frm;
} else if (1 == fid) { /* odd field */
+ spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue)
|| (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
return;
}
+ spin_unlock(&common->irqlock);
/* one field is displayed configure the next
* frame if it is available else hold on current
* frame */
/* Get next from the buffer queue */
process_progressive_mode(common);
-
}
}
@@ -437,8 +451,12 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
continue;
if (1 == ch->vpifparams.std_info.frm_fmt) {
- if (list_empty(&common->dma_queue))
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
/* Progressive mode */
if (!channel_first_int[i][channel_id]) {
@@ -972,9 +990,9 @@ static int vpif_reqbufs(struct file *file, void *priv,
}
/* Initialize videobuf2 queue as per the buffer type */
common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (!common->alloc_ctx) {
+ if (IS_ERR(common->alloc_ctx)) {
vpif_err("Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(common->alloc_ctx);
}
q = &common->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
@@ -1380,7 +1398,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -EINVAL;
return ret;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 19cbb12a12a..cc7b218d047 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -982,7 +982,7 @@ static void *gsc_get_drv_data(struct platform_device *pdev)
match = of_match_node(of_match_ptr(exynos_gsc_match),
pdev->dev.of_node);
if (match)
- driver_data = match->data;
+ driver_data = (struct gsc_driverdata *)match->data;
} else {
driver_data = (struct gsc_driverdata *)
platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index c065d040ed9..c267c57c76f 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -122,7 +122,7 @@ static void gsc_m2m_device_run(void *priv)
struct gsc_ctx *ctx = priv;
struct gsc_dev *gsc;
unsigned long flags;
- u32 ret;
+ int ret;
bool is_set = false;
if (WARN(!ctx, "null hardware context\n"))
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 31ac4dc6924..a8ddb0cacab 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -352,8 +352,7 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
return 0;
buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
if (prev == NULL) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &vidq->active);
+ list_move_tail(&buf->vb.queue, &vidq->active);
dprintk(1, "Restarting video dma\n");
viu_stop_dma(vidq->dev);
@@ -367,8 +366,7 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &vidq->active);
+ list_move_tail(&buf->vb.queue, &vidq->active);
buf->vb.state = VIDEOBUF_ACTIVE;
dprintk(2, "[%p/%d] restart_queue - move to active\n",
buf, buf->vb.i);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 45164c4f845..05c560f2ef0 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -218,15 +218,14 @@ static void dma_callback(void *data)
static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
int do_callback)
{
- struct deinterlace_q_data *s_q_data, *d_q_data;
+ struct deinterlace_q_data *s_q_data;
struct vb2_buffer *src_buf, *dst_buf;
struct deinterlace_dev *pcdev = ctx->dev;
struct dma_chan *chan = pcdev->dma_chan;
struct dma_device *dmadev = chan->device;
struct dma_async_tx_descriptor *tx;
unsigned int s_width, s_height;
- unsigned int d_width, d_height;
- unsigned int d_size, s_size;
+ unsigned int s_size;
dma_addr_t p_in, p_out;
enum dma_ctrl_flags flags;
@@ -238,11 +237,6 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
s_height = s_q_data->height;
s_size = s_width * s_height;
- d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
- d_width = d_q_data->width;
- d_height = d_q_data->height;
- d_size = d_width * d_height;
-
p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0);
p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0);
if (!p_in || !p_out) {
@@ -1108,17 +1102,5 @@ static struct platform_driver deinterlace_pdrv = {
.owner = THIS_MODULE,
},
};
-
-static void __exit deinterlace_exit(void)
-{
- platform_driver_unregister(&deinterlace_pdrv);
-}
-
-static int __init deinterlace_init(void)
-{
- return platform_driver_register(&deinterlace_pdrv);
-}
-
-module_init(deinterlace_init);
-module_exit(deinterlace_exit);
+module_platform_driver(deinterlace_pdrv);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 2e2121e9813..7487d7208de 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -839,7 +839,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &m2mtest_qops;
@@ -850,7 +850,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &m2mtest_qops;
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 8f22ce543cf..6b155d7be8e 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -371,7 +371,7 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
if (!curr_ctx->aborting) {
if ((irqst & PRP_INTR_ST_RDERR) ||
(irqst & PRP_INTR_ST_CH2WERR)) {
- pr_err("PrP bus error ocurred, this transfer is probably corrupted\n");
+ pr_err("PrP bus error occurred, this transfer is probably corrupted\n");
writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
} else if (irqst & PRP_INTR_ST_CH2B1CI) { /* buffer ready */
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
@@ -1013,16 +1013,4 @@ static struct platform_driver emmaprp_pdrv = {
.owner = THIS_MODULE,
},
};
-
-static void __exit emmaprp_exit(void)
-{
- platform_driver_unregister(&emmaprp_pdrv);
-}
-
-static int __init emmaprp_init(void)
-{
- return platform_driver_register(&emmaprp_pdrv);
-}
-
-module_init(emmaprp_init);
-module_exit(emmaprp_exit);
+module_platform_driver(emmaprp_pdrv);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 4b1becc86e5..35cc526e6c9 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -44,8 +44,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <plat/cpu.h>
-#include <plat-omap/dma-omap.h>
#include <video/omapvrfb.h>
#include <video/omapdss.h>
@@ -1174,13 +1172,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
/* set default crop and win */
omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
- /* Save the changes in the overlay strcuture */
- ret = omapvid_init(vout, 0);
- if (ret) {
- v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
- goto s_fmt_vid_out_exit;
- }
-
ret = 0;
s_fmt_vid_out_exit:
@@ -1684,20 +1675,6 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
omap_dispc_register_isr(omap_vout_isr, vout, mask);
- for (j = 0; j < ovid->num_overlays; j++) {
- struct omap_overlay *ovl = ovid->overlays[j];
-
- if (ovl->get_device(ovl)) {
- struct omap_overlay_info info;
- ovl->get_overlay_info(ovl, &info);
- info.paddr = addr;
- if (ovl->set_overlay_info(ovl, &info)) {
- ret = -EINVAL;
- goto streamon_err1;
- }
- }
- }
-
/* First save the configuration in ovelray structure */
ret = omapvid_init(vout, addr);
if (ret)
@@ -2064,7 +2041,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
vout->vid_info.id = k + 1;
/* Set VRFB as rotation_type for omap2 and omap3 */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
vout->vid_info.rotation_type = VOUT_ROT_VRFB;
/* Setup the default configuration for the video devices
@@ -2094,11 +2071,12 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
}
video_set_drvdata(vfd, vout);
- /* Configure the overlay structure */
- ret = omapvid_init(vid_dev->vouts[k], 0);
- if (!ret)
- goto success;
+ dev_info(&pdev->dev, ": registered and initialized"
+ " video device %d\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+ continue;
error2:
if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
omap_vout_release_vrfb(vout);
@@ -2108,12 +2086,6 @@ error1:
error:
kfree(vout);
return ret;
-
-success:
- dev_info(&pdev->dev, ": registered and initialized"
- " video device %d\n", vfd->minor);
- if (k == (pdev->num_resources - 1))
- return 0;
}
return -ENODEV;
@@ -2186,14 +2158,23 @@ static int __init omap_vout_probe(struct platform_device *pdev)
struct omap_dss_device *def_display;
struct omap2video_device *vid_dev = NULL;
+ ret = omapdss_compat_init();
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init dss\n");
+ return ret;
+ }
+
if (pdev->num_resources == 0) {
dev_err(&pdev->dev, "probed for an unknown device\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_dss_init;
}
vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
- if (vid_dev == NULL)
- return -ENOMEM;
+ if (vid_dev == NULL) {
+ ret = -ENOMEM;
+ goto err_dss_init;
+ }
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
@@ -2288,6 +2269,8 @@ probe_err1:
}
probe_err0:
kfree(vid_dev);
+err_dss_init:
+ omapdss_compat_uninit();
return ret;
}
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 8340445a0ee..cf1c437a868 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -16,7 +16,7 @@
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <video/omapvrfb.h>
#include "omap_voutdef.h"
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 115408b9274..80b0d88f125 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
-#include <plat/cpu.h>
+#include <video/omapdss.h>
#include "omap_voutlib.h"
@@ -124,7 +124,7 @@ int omap_vout_new_window(struct v4l2_rect *crop,
win->chromakey = new_win->chromakey;
/* Adjust the cropping window to allow for resizing limitation */
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
/* For 24xx limit is 8x to 1/2x scaling. */
if ((crop->height/win->w.height) >= 2)
crop->height = win->w.height * 2;
@@ -140,7 +140,7 @@ int omap_vout_new_window(struct v4l2_rect *crop,
if (crop->height != win->w.height)
crop->width = 768;
}
- } else if (cpu_is_omap34xx()) {
+ } else if (omap_vout_dss_omap34xx()) {
/* For 34xx limit is 8x to 1/4x scaling. */
if ((crop->height/win->w.height) >= 4)
crop->height = win->w.height * 4;
@@ -196,7 +196,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.width <= 0 || try_crop.height <= 0)
return -EINVAL;
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
if (try_crop.height != win->w.height) {
/* If we're resizing vertically, we can't support a
* crop width wider than 768 pixels.
@@ -207,9 +207,9 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
/* vertical resizing */
vresize = (1024 * try_crop.height) / win->w.height;
- if (cpu_is_omap24xx() && (vresize > 2048))
+ if (omap_vout_dss_omap24xx() && (vresize > 2048))
vresize = 2048;
- else if (cpu_is_omap34xx() && (vresize > 4096))
+ else if (omap_vout_dss_omap34xx() && (vresize > 4096))
vresize = 4096;
win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
@@ -226,9 +226,9 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
/* horizontal resizing */
hresize = (1024 * try_crop.width) / win->w.width;
- if (cpu_is_omap24xx() && (hresize > 2048))
+ if (omap_vout_dss_omap24xx() && (hresize > 2048))
hresize = 2048;
- else if (cpu_is_omap34xx() && (hresize > 4096))
+ else if (omap_vout_dss_omap34xx() && (hresize > 4096))
hresize = 4096;
win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
@@ -243,7 +243,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.width == 0)
try_crop.width = 2;
}
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
if ((try_crop.height/win->w.height) >= 2)
try_crop.height = win->w.height * 2;
@@ -258,7 +258,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.height != win->w.height)
try_crop.width = 768;
}
- } else if (cpu_is_omap34xx()) {
+ } else if (omap_vout_dss_omap34xx()) {
if ((try_crop.height/win->w.height) >= 4)
try_crop.height = win->w.height * 4;
@@ -337,3 +337,21 @@ void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
}
free_pages((unsigned long) virtaddr, order);
}
+
+bool omap_vout_dss_omap24xx(void)
+{
+ return omapdss_get_version() == OMAPDSS_VER_OMAP24xx;
+}
+
+bool omap_vout_dss_omap34xx(void)
+{
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/media/platform/omap/omap_voutlib.h b/drivers/media/platform/omap/omap_voutlib.h
index e51750a597e..f9d1c0779f3 100644
--- a/drivers/media/platform/omap/omap_voutlib.h
+++ b/drivers/media/platform/omap/omap_voutlib.h
@@ -32,5 +32,8 @@ void omap_vout_new_format(struct v4l2_pix_format *pix,
struct v4l2_window *win);
unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
+
+bool omap_vout_dss_omap24xx(void);
+bool omap_vout_dss_omap34xx(void);
#endif /* #ifndef OMAP_VOUTLIB_H */
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 7f182f0ff3d..2e8c0cb79c3 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -71,8 +71,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
-#include <plat/cpu.h>
-
#include "isp.h"
#include "ispreg.h"
#include "ispccdc.h"
@@ -103,7 +101,8 @@ static const struct isp_res_mapping isp_res_maps[] = {
1 << OMAP3_ISP_IOMEM_RESZ |
1 << OMAP3_ISP_IOMEM_SBL |
1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
- 1 << OMAP3_ISP_IOMEM_CSIPHY2,
+ 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
+ 1 << OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
},
{
.isp_rev = ISP_REVISION_15_0,
@@ -120,7 +119,8 @@ static const struct isp_res_mapping isp_res_maps[] = {
1 << OMAP3_ISP_IOMEM_CSI2A_REGS2 |
1 << OMAP3_ISP_IOMEM_CSI2C_REGS1 |
1 << OMAP3_ISP_IOMEM_CSIPHY1 |
- 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2 |
+ 1 << OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
},
};
@@ -1331,7 +1331,8 @@ void omap3isp_subclk_disable(struct isp_device *isp,
* isp_enable_clocks - Enable ISP clocks
* @isp: OMAP3 ISP device
*
- * Return 0 if successful, or clk_enable return value if any of tthem fails.
+ * Return 0 if successful, or clk_prepare_enable return value if any of them
+ * fails.
*/
static int isp_enable_clocks(struct isp_device *isp)
{
@@ -1348,14 +1349,11 @@ static int isp_enable_clocks(struct isp_device *isp)
* has to be twice of what is set on OMAP3430 to get
* the required value for cam_mclk
*/
- if (cpu_is_omap3630())
- divisor = 1;
- else
- divisor = 2;
+ divisor = isp->revision == ISP_REVISION_15_0 ? 1 : 2;
- r = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
if (r) {
- dev_err(isp->dev, "clk_enable cam_ick failed\n");
+ dev_err(isp->dev, "failed to enable cam_ick clock\n");
goto out_clk_enable_ick;
}
r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
@@ -1364,9 +1362,9 @@ static int isp_enable_clocks(struct isp_device *isp)
dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
goto out_clk_enable_mclk;
}
- r = clk_enable(isp->clock[ISP_CLK_CAM_MCLK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
if (r) {
- dev_err(isp->dev, "clk_enable cam_mclk failed\n");
+ dev_err(isp->dev, "failed to enable cam_mclk clock\n");
goto out_clk_enable_mclk;
}
rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
@@ -1374,17 +1372,17 @@ static int isp_enable_clocks(struct isp_device *isp)
dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
" expected : %d\n"
" actual : %ld\n", CM_CAM_MCLK_HZ, rate);
- r = clk_enable(isp->clock[ISP_CLK_CSI2_FCK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
if (r) {
- dev_err(isp->dev, "clk_enable csi2_fck failed\n");
+ dev_err(isp->dev, "failed to enable csi2_fck clock\n");
goto out_clk_enable_csi2_fclk;
}
return 0;
out_clk_enable_csi2_fclk:
- clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
out_clk_enable_mclk:
- clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
out_clk_enable_ick:
return r;
}
@@ -1395,9 +1393,9 @@ out_clk_enable_ick:
*/
static void isp_disable_clocks(struct isp_device *isp)
{
- clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
- clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
- clk_disable(isp->clock[ISP_CLK_CSI2_FCK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
}
static const char *isp_clocks[] = {
@@ -1678,7 +1676,7 @@ isp_register_subdev_group(struct isp_device *isp,
adapter = i2c_get_adapter(board_info->i2c_adapter_id);
if (adapter == NULL) {
- printk(KERN_ERR "%s: Unable to get I2C adapter %d for "
+ dev_err(isp->dev, "%s: Unable to get I2C adapter %d for "
"device %s\n", __func__,
board_info->i2c_adapter_id,
board_info->board_info->type);
@@ -1688,7 +1686,7 @@ isp_register_subdev_group(struct isp_device *isp,
subdev = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
board_info->board_info, NULL);
if (subdev == NULL) {
- printk(KERN_ERR "%s: Unable to register subdev %s\n",
+ dev_err(isp->dev, "%s: Unable to register subdev %s\n",
__func__, board_info->board_info->type);
continue;
}
@@ -1713,7 +1711,7 @@ static int isp_register_entities(struct isp_device *isp)
isp->media_dev.link_notify = isp_pipeline_link_notify;
ret = media_device_register(&isp->media_dev);
if (ret < 0) {
- printk(KERN_ERR "%s: Media device registration failed (%d)\n",
+ dev_err(isp->dev, "%s: Media device registration failed (%d)\n",
__func__, ret);
return ret;
}
@@ -1721,7 +1719,7 @@ static int isp_register_entities(struct isp_device *isp)
isp->v4l2_dev.mdev = &isp->media_dev;
ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
if (ret < 0) {
- printk(KERN_ERR "%s: V4L2 device registration failed (%d)\n",
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
__func__, ret);
goto done;
}
@@ -1766,6 +1764,7 @@ static int isp_register_entities(struct isp_device *isp)
struct media_entity *input;
unsigned int flags;
unsigned int pad;
+ unsigned int i;
sensor = isp_register_subdev_group(isp, subdevs->subdevs);
if (sensor == NULL)
@@ -1807,13 +1806,25 @@ static int isp_register_entities(struct isp_device *isp)
break;
default:
- printk(KERN_ERR "%s: invalid interface type %u\n",
- __func__, subdevs->interface);
+ dev_err(isp->dev, "%s: invalid interface type %u\n",
+ __func__, subdevs->interface);
ret = -EINVAL;
goto done;
}
- ret = media_entity_create_link(&sensor->entity, 0, input, pad,
+ for (i = 0; i < sensor->entity.num_pads; i++) {
+ if (sensor->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == sensor->entity.num_pads) {
+ dev_err(isp->dev,
+ "%s: no source pad in external entity\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = media_entity_create_link(&sensor->entity, i, input, pad,
flags);
if (ret < 0)
goto done;
@@ -2096,7 +2107,11 @@ static int __devinit isp_probe(struct platform_device *pdev)
isp->isp_csiphy1.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY1");
isp->isp_csiphy2.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY2");
- /* Clocks */
+ /* Clocks
+ *
+ * The ISP clock tree is revision-dependent. We thus need to enable ICLK
+ * manually to read the revision before calling __omap3isp_get().
+ */
ret = isp_map_mem_resource(pdev, isp, OMAP3_ISP_IOMEM_MAIN);
if (ret < 0)
goto error;
@@ -2105,6 +2120,16 @@ static int __devinit isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
+ ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ if (ret < 0)
+ goto error;
+
+ isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+ dev_info(isp->dev, "Revision %d.%d found\n",
+ (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
+
+ clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+
if (__omap3isp_get(isp, false) == NULL) {
ret = -ENODEV;
goto error;
@@ -2115,10 +2140,6 @@ static int __devinit isp_probe(struct platform_device *pdev)
goto error_isp;
/* Memory resources */
- isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
- dev_info(isp->dev, "Revision %d.%d found\n",
- (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
-
for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
if (isp->revision == isp_res_maps[m].isp_rev)
break;
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 8d6866942b8..517d348ce32 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -70,6 +70,8 @@ enum isp_mem_resources {
OMAP3_ISP_IOMEM_CSI2C_REGS1,
OMAP3_ISP_IOMEM_CSIPHY1,
OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
OMAP3_ISP_IOMEM_LAST
};
@@ -125,9 +127,6 @@ struct isp_reg {
struct isp_platform_callback {
u32 (*set_xclk)(struct isp_device *isp, u32 xclk, u8 xclksel);
- int (*csiphy_config)(struct isp_csiphy *phy,
- struct isp_csiphy_dphy_cfg *dphy,
- struct isp_csiphy_lanes_cfg *lanes);
};
/*
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6a3ff792af7..783f4b05b15 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -517,7 +517,7 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
} while (soft_reset_retries < 5);
if (soft_reset_retries == 5) {
- printk(KERN_ERR "CSI2: Soft reset try count exceeded!\n");
+ dev_err(isp->dev, "CSI2: Soft reset try count exceeded!\n");
return -EBUSY;
}
@@ -535,8 +535,8 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
} while (--i > 0);
if (i == 0) {
- printk(KERN_ERR
- "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+ dev_err(isp->dev,
+ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
return -EBUSY;
}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 348f67ebbbc..3d56b33f85e 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -32,34 +32,92 @@
#include "ispreg.h"
#include "ispcsiphy.h"
-/*
- * csiphy_lanes_config - Configuration of CSIPHY lanes.
- *
- * Updates HW configuration.
- * Called with phy->mutex taken.
- */
-static void csiphy_lanes_config(struct isp_csiphy *phy)
+static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
+ bool ccp2_strobe)
{
- unsigned int i;
- u32 reg;
+ u32 reg = isp_reg_readl(
+ phy->isp, OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+ u32 shift, mode;
+
+ switch (iface) {
+ case ISP_INTERFACE_CCP2B_PHY1:
+ reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2C_PHY1:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ case ISP_INTERFACE_CCP2B_PHY2:
+ reg |= OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2A_PHY2:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ }
- reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ /* Select data/clock or data/strobe mode for CCP2 */
+ switch (iface) {
+ case ISP_INTERFACE_CCP2B_PHY1:
+ case ISP_INTERFACE_CCP2B_PHY2:
+ if (ccp2_strobe)
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE;
+ else
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK;
+ }
- for (i = 0; i < phy->num_data_lanes; i++) {
- reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
- ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
- reg |= (phy->lanes.data[i].pol <<
- ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
- reg |= (phy->lanes.data[i].pos <<
- ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ reg &= ~(OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK << shift);
+ reg |= mode << shift;
+
+ isp_reg_writel(phy->isp, reg,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+}
+
+static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ u32 csirxfe = OMAP343X_CONTROL_CSIRXFE_PWRDNZ
+ | OMAP343X_CONTROL_CSIRXFE_RESET;
+
+ /* Only the CCP2B on PHY1 is configurable. */
+ if (iface != ISP_INTERFACE_CCP2B_PHY1)
+ return;
+
+ if (!on) {
+ isp_reg_writel(phy->isp, 0,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+ return;
}
- reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
- ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
- reg |= phy->lanes.clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
- reg |= phy->lanes.clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+ if (ccp2_strobe)
+ csirxfe |= OMAP343X_CONTROL_CSIRXFE_SELFORM;
- isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ isp_reg_writel(phy->isp, csirxfe,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+}
+
+/*
+ * Configure OMAP 3 CSI PHY routing.
+ * @phy: relevant phy device
+ * @iface: ISP_INTERFACE_*
+ * @on: power on or off
+ * @ccp2_strobe: false: data/clock, true: data/strobe
+ *
+ * Note that the underlying routing configuration registers are part of the
+ * control (SCM) register space and part of the CORE power domain on both 3430
+ * and 3630, so they will not hold their contents in off-mode. This isn't an
+ * issue since the MPU power domain is forced on whilst the ISP is in use.
+ */
+static void csiphy_routing_cfg(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL]
+ && on)
+ return csiphy_routing_cfg_3630(phy, iface, ccp2_strobe);
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE])
+ return csiphy_routing_cfg_3430(phy, iface, on, ccp2_strobe);
}
/*
@@ -99,7 +157,7 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
} while ((reg != power >> 2) && (retry_count < 100));
if (retry_count == 100) {
- printk(KERN_ERR "CSI2 CIO set power failed!\n");
+ dev_err(phy->isp->dev, "CSI2 CIO set power failed!\n");
return -EBUSY;
}
@@ -107,43 +165,28 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
}
/*
- * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
- *
- * Called with phy->mutex taken.
+ * TCLK values are OK at their reset values
*/
-static void csiphy_dphy_config(struct isp_csiphy *phy)
-{
- u32 reg;
-
- /* Set up ISPCSIPHY_REG0 */
- reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0);
-
- reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
- ISPCSIPHY_REG0_THS_SETTLE_MASK);
- reg |= phy->dphy.ths_term << ISPCSIPHY_REG0_THS_TERM_SHIFT;
- reg |= phy->dphy.ths_settle << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
-
- isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
-
- /* Set up ISPCSIPHY_REG1 */
- reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1);
-
- reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
- ISPCSIPHY_REG1_TCLK_MISS_MASK |
- ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
- reg |= phy->dphy.tclk_term << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
- reg |= phy->dphy.tclk_miss << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
- reg |= phy->dphy.tclk_settle << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
-
- isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
-}
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
-static int csiphy_config(struct isp_csiphy *phy,
- struct isp_csiphy_dphy_cfg *dphy,
- struct isp_csiphy_lanes_cfg *lanes)
+static int omap3isp_csiphy_config(struct isp_csiphy *phy)
{
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+ struct isp_csiphy_lanes_cfg *lanes;
+ int csi2_ddrclk_khz;
unsigned int used_lanes = 0;
unsigned int i;
+ u32 reg;
+
+ if (subdevs->interface == ISP_INTERFACE_CCP2B_PHY1
+ || subdevs->interface == ISP_INTERFACE_CCP2B_PHY2)
+ lanes = &subdevs->bus.ccp2.lanecfg;
+ else
+ lanes = &subdevs->bus.csi2.lanecfg;
/* Clock and data lanes verification */
for (i = 0; i < phy->num_data_lanes; i++) {
@@ -162,10 +205,61 @@ static int csiphy_config(struct isp_csiphy *phy,
if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
return -EINVAL;
- mutex_lock(&phy->mutex);
- phy->dphy = *dphy;
- phy->lanes = *lanes;
- mutex_unlock(&phy->mutex);
+ /*
+ * The PHY configuration is lost in off mode, that's not an
+ * issue since the MPU power domain is forced on whilst the
+ * ISP is in use.
+ */
+ csiphy_routing_cfg(phy, subdevs->interface, true,
+ subdevs->bus.ccp2.phy_layer);
+
+ /* DPHY timing configuration */
+ /* CSI-2 is DDR and we only count used lanes. */
+ csi2_ddrclk_khz = pipe->external_rate / 1000
+ / (2 * hweight32(used_lanes)) * pipe->external_width;
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
+ ISPCSIPHY_REG0_THS_SETTLE_MASK);
+ /* THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1. */
+ reg |= (DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1)
+ << ISPCSIPHY_REG0_THS_TERM_SHIFT;
+ /* THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3. */
+ reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3)
+ << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG1);
+
+ reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
+ ISPCSIPHY_REG1_TCLK_MISS_MASK |
+ ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
+ reg |= TCLK_TERM << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
+ reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
+ reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
+
+ /* DPHY lane configuration */
+ reg = isp_reg_readl(csi2->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+ for (i = 0; i < phy->num_data_lanes; i++) {
+ reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
+ ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
+ reg |= (lanes->data[i].pol <<
+ ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
+ reg |= (lanes->data[i].pos <<
+ ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ }
+
+ reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
+ ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
+ reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
+ reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
return 0;
}
@@ -190,8 +284,9 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
if (rval < 0)
goto done;
- csiphy_dphy_config(phy);
- csiphy_lanes_config(phy);
+ rval = omap3isp_csiphy_config(phy);
+ if (rval < 0)
+ goto done;
rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
if (rval) {
@@ -211,6 +306,14 @@ void omap3isp_csiphy_release(struct isp_csiphy *phy)
{
mutex_lock(&phy->mutex);
if (phy->phy_in_use) {
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs =
+ pipe->external->host_priv;
+
+ csiphy_routing_cfg(phy, subdevs->interface, false,
+ subdevs->bus.ccp2.phy_layer);
csiphy_power_autoswitch_enable(phy, false);
csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
regulator_disable(phy->vdd);
@@ -227,8 +330,6 @@ int omap3isp_csiphy_init(struct isp_device *isp)
struct isp_csiphy *phy1 = &isp->isp_csiphy1;
struct isp_csiphy *phy2 = &isp->isp_csiphy2;
- isp->platform_cb.csiphy_config = csiphy_config;
-
phy2->isp = isp;
phy2->csi2 = &isp->isp_csi2a;
phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES;
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
index e93a661e65d..14551fd7769 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -32,14 +32,6 @@
struct isp_csi2_device;
struct regulator;
-struct isp_csiphy_dphy_cfg {
- u8 ths_term;
- u8 ths_settle;
- u8 tclk_term;
- unsigned tclk_miss:1;
- u8 tclk_settle;
-};
-
struct isp_csiphy {
struct isp_device *isp;
struct mutex mutex; /* serialize csiphy configuration */
@@ -52,8 +44,6 @@ struct isp_csiphy {
unsigned int phy_regs;
u8 num_data_lanes; /* number of CSI2 Data Lanes supported */
- struct isp_csiphy_lanes_cfg lanes;
- struct isp_csiphy_dphy_cfg dphy;
};
int omap3isp_csiphy_acquire(struct isp_csiphy *phy);
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index e7f9c4292cc..2d759c56f37 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -74,11 +74,14 @@ static void hist_reset_mem(struct ispstat *hist)
static void hist_dma_config(struct ispstat *hist)
{
+ struct isp_device *isp = hist->isp;
+
hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32;
hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT;
hist->dma_config.frame_count = 1;
hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT;
- hist->dma_config.src_start = OMAP3ISP_HIST_REG_BASE + ISPHIST_DATA;
+ hist->dma_config.src_start = isp->mmio_base_phys[OMAP3_ISP_IOMEM_HIST]
+ + ISPHIST_DATA;
hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC;
hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
}
@@ -479,6 +482,8 @@ int omap3isp_hist_init(struct isp_device *isp)
return -ENOMEM;
memset(hist, 0, sizeof(*hist));
+ hist->isp = isp;
+
if (HIST_CONFIG_DMA)
ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
hist_dma_cb, hist, &hist->dma_ch);
@@ -496,7 +501,6 @@ int omap3isp_hist_init(struct isp_device *isp)
hist->ops = &hist_ops;
hist->priv = hist_cfg;
hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
- hist->isp = isp;
ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
if (ret) {
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 1ae1c0909ed..691b92a3c3e 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -200,10 +200,10 @@ static void preview_enable_invalaw(struct isp_prev_device *prev, bool enable)
if (enable)
isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+ ISPPRV_PCR_INVALAW);
else
isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+ ISPPRV_PCR_INVALAW);
}
/*
@@ -1014,7 +1014,7 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
/*
* preview_config_input_format - Configure the input format
* @prev: The preview engine
- * @format: Format on the preview engine sink pad
+ * @info: Sink pad format information
*
* Enable and configure CFA interpolation for Bayer formats and disable it for
* greyscale formats.
@@ -1025,22 +1025,29 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
* reordered to support non-GRBG Bayer patterns.
*/
static void preview_config_input_format(struct isp_prev_device *prev,
- const struct v4l2_mbus_framefmt *format)
+ const struct isp_format_info *info)
{
struct isp_device *isp = to_isp_device(prev);
struct prev_params *params;
- switch (format->code) {
- case V4L2_MBUS_FMT_SGRBG10_1X10:
+ if (info->width == 8)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+
+ switch (info->flavor) {
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
prev->params.cfa_order = 0;
break;
- case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case V4L2_MBUS_FMT_SRGGB8_1X8:
prev->params.cfa_order = 1;
break;
- case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
prev->params.cfa_order = 2;
break;
- case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case V4L2_MBUS_FMT_SGBRG8_1X8:
prev->params.cfa_order = 3;
break;
default:
@@ -1081,7 +1088,8 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
+ if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ format->code != V4L2_MBUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
@@ -1389,6 +1397,7 @@ static unsigned int preview_max_out_width(struct isp_prev_device *prev)
static void preview_configure(struct isp_prev_device *prev)
{
struct isp_device *isp = to_isp_device(prev);
+ const struct isp_format_info *info;
struct v4l2_mbus_framefmt *format;
unsigned long flags;
u32 update;
@@ -1402,17 +1411,18 @@ static void preview_configure(struct isp_prev_device *prev)
/* PREV_PAD_SINK */
format = &prev->formats[PREV_PAD_SINK];
+ info = omap3isp_video_format_info(format->code);
preview_adjust_bandwidth(prev);
- preview_config_input_format(prev, format);
+ preview_config_input_format(prev, info);
preview_config_input_size(prev, active);
if (prev->input == PREVIEW_INPUT_CCDC)
preview_config_inlineoffset(prev, 0);
else
- preview_config_inlineoffset(prev,
- ALIGN(format->width, 0x20) * 2);
+ preview_config_inlineoffset(prev, ALIGN(format->width, 0x20) *
+ info->bpp);
preview_setup_hw(prev, update, active);
@@ -1709,6 +1719,11 @@ __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
/* previewer format descriptions */
static const unsigned int preview_input_fmts[] = {
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8,
V4L2_MBUS_FMT_Y10_1X10,
V4L2_MBUS_FMT_SGRBG10_1X10,
V4L2_MBUS_FMT_SRGGB10_1X10,
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
index e2c57f334c5..b7d90e6fb01 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -29,83 +29,6 @@
#define CM_CAM_MCLK_HZ 172800000 /* Hz */
-/* ISP Submodules offset */
-
-#define L4_34XX_BASE 0x48000000
-#define OMAP3430_ISP_BASE (L4_34XX_BASE + 0xBC000)
-
-#define OMAP3ISP_REG_BASE OMAP3430_ISP_BASE
-#define OMAP3ISP_REG(offset) (OMAP3ISP_REG_BASE + (offset))
-
-#define OMAP3ISP_CCP2_REG_OFFSET 0x0400
-#define OMAP3ISP_CCP2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CCP2_REG_OFFSET)
-#define OMAP3ISP_CCP2_REG(offset) (OMAP3ISP_CCP2_REG_BASE + (offset))
-
-#define OMAP3ISP_CCDC_REG_OFFSET 0x0600
-#define OMAP3ISP_CCDC_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CCDC_REG_OFFSET)
-#define OMAP3ISP_CCDC_REG(offset) (OMAP3ISP_CCDC_REG_BASE + (offset))
-
-#define OMAP3ISP_HIST_REG_OFFSET 0x0A00
-#define OMAP3ISP_HIST_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_HIST_REG_OFFSET)
-#define OMAP3ISP_HIST_REG(offset) (OMAP3ISP_HIST_REG_BASE + (offset))
-
-#define OMAP3ISP_H3A_REG_OFFSET 0x0C00
-#define OMAP3ISP_H3A_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_H3A_REG_OFFSET)
-#define OMAP3ISP_H3A_REG(offset) (OMAP3ISP_H3A_REG_BASE + (offset))
-
-#define OMAP3ISP_PREV_REG_OFFSET 0x0E00
-#define OMAP3ISP_PREV_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_PREV_REG_OFFSET)
-#define OMAP3ISP_PREV_REG(offset) (OMAP3ISP_PREV_REG_BASE + (offset))
-
-#define OMAP3ISP_RESZ_REG_OFFSET 0x1000
-#define OMAP3ISP_RESZ_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_RESZ_REG_OFFSET)
-#define OMAP3ISP_RESZ_REG(offset) (OMAP3ISP_RESZ_REG_BASE + (offset))
-
-#define OMAP3ISP_SBL_REG_OFFSET 0x1200
-#define OMAP3ISP_SBL_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_SBL_REG_OFFSET)
-#define OMAP3ISP_SBL_REG(offset) (OMAP3ISP_SBL_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2A_REGS1_REG_OFFSET 0x1800
-#define OMAP3ISP_CSI2A_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2A_REGS1_REG_OFFSET)
-#define OMAP3ISP_CSI2A_REGS1_REG(offset) \
- (OMAP3ISP_CSI2A_REGS1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSIPHY2_REG_OFFSET 0x1970
-#define OMAP3ISP_CSIPHY2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSIPHY2_REG_OFFSET)
-#define OMAP3ISP_CSIPHY2_REG(offset) (OMAP3ISP_CSIPHY2_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2A_REGS2_REG_OFFSET 0x19C0
-#define OMAP3ISP_CSI2A_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2A_REGS2_REG_OFFSET)
-#define OMAP3ISP_CSI2A_REGS2_REG(offset) \
- (OMAP3ISP_CSI2A_REGS2_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2C_REGS1_REG_OFFSET 0x1C00
-#define OMAP3ISP_CSI2C_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2C_REGS1_REG_OFFSET)
-#define OMAP3ISP_CSI2C_REGS1_REG(offset) \
- (OMAP3ISP_CSI2C_REGS1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSIPHY1_REG_OFFSET 0x1D70
-#define OMAP3ISP_CSIPHY1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSIPHY1_REG_OFFSET)
-#define OMAP3ISP_CSIPHY1_REG(offset) (OMAP3ISP_CSIPHY1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2C_REGS2_REG_OFFSET 0x1DC0
-#define OMAP3ISP_CSI2C_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2C_REGS2_REG_OFFSET)
-#define OMAP3ISP_CSI2C_REGS2_REG(offset) \
- (OMAP3ISP_CSI2C_REGS2_REG_BASE + (offset))
-
/* ISP module register offset */
#define ISP_REVISION (0x000)
@@ -1583,4 +1506,26 @@
#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_MASK \
(0x7fffff << ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT)
+/* -----------------------------------------------------------------------------
+ * CONTROL registers for CSI-2 phy routing
+ */
+
+/* OMAP343X_CONTROL_CSIRXFE */
+#define OMAP343X_CONTROL_CSIRXFE_CSIB_INV (1 << 7)
+#define OMAP343X_CONTROL_CSIRXFE_RESENABLE (1 << 8)
+#define OMAP343X_CONTROL_CSIRXFE_SELFORM (1 << 10)
+#define OMAP343X_CONTROL_CSIRXFE_PWRDNZ (1 << 12)
+#define OMAP343X_CONTROL_CSIRXFE_RESET (1 << 13)
+
+/* OMAP3630_CONTROL_CAMERA_PHY_CTRL */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT 2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT 0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY 0x0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE 0x1
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK 0x2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_GPI 0x3
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK 0x3
+/* CCP2B: set to receive data from PHY2 instead of PHY1 */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2 (1 << 4)
+
#endif /* OMAP3_ISP_REG_H */
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index e7939869bda..61e17f9bd8b 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -257,7 +257,7 @@ static int isp_stat_buf_queue(struct ispstat *stat)
if (!stat->active_buf)
return STAT_NO_BUF;
- do_gettimeofday(&stat->active_buf->ts);
+ ktime_get_ts(&stat->active_buf->ts);
stat->active_buf->buf_size = stat->buf_size;
if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
@@ -537,7 +537,8 @@ int omap3isp_stat_request_statistics(struct ispstat *stat,
return PTR_ERR(buf);
}
- data->ts = buf->ts;
+ data->ts.tv_sec = buf->ts.tv_sec;
+ data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC;
data->config_counter = buf->config_counter;
data->frame_number = buf->frame_number;
data->buf_size = buf->buf_size;
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index 99cf10449cf..9a047c929b9 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -30,7 +30,7 @@
#include <linux/types.h>
#include <linux/omap3isp.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <media/v4l2-event.h>
#include "isp.h"
@@ -50,7 +50,7 @@ struct ispstat_buffer {
struct iovm_struct *iovm;
void *virt_addr;
dma_addr_t dma_addr;
- struct timeval ts;
+ struct timespec ts;
u32 buf_size;
u32 frame_number;
u16 config_counter;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 3311d6bb345..e0d73a64218 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1392,7 +1392,8 @@ int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
if (ret < 0)
- printk(KERN_ERR "%s: could not register video device (%d)\n",
+ dev_err(video->isp->dev,
+ "%s: could not register video device (%d)\n",
__func__, ret);
return ret;
diff --git a/drivers/media/platform/s3c-camif/Makefile b/drivers/media/platform/s3c-camif/Makefile
new file mode 100644
index 00000000000..50bf8c59b99
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/Makefile
@@ -0,0 +1,5 @@
+# Makefile for s3c244x/s3c64xx CAMIF driver
+
+s3c-camif-objs := camif-core.o camif-capture.o camif-regs.o
+
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif.o
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
new file mode 100644
index 00000000000..a55793c3d81
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -0,0 +1,1672 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Based on drivers/media/platform/s5p-fimc,
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+#include "camif-regs.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+/* Locking: called with vp->camif->slock spinlock held */
+static void camif_cfg_video_path(struct camif_vp *vp)
+{
+ WARN_ON(s3c_camif_get_scaler_config(vp, &vp->scaler));
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_target_format(vp);
+ camif_hw_set_output_dma(vp);
+}
+
+static void camif_prepare_dma_offset(struct camif_vp *vp)
+{
+ struct camif_frame *f = &vp->out_frame;
+
+ f->dma_offset.initial = f->rect.top * f->f_width + f->rect.left;
+ f->dma_offset.line = f->f_width - (f->rect.left + f->rect.width);
+
+ pr_debug("dma_offset: initial: %d, line: %d\n",
+ f->dma_offset.initial, f->dma_offset.line);
+}
+
+/* Locking: called with camif->slock spinlock held */
+static int s3c_camif_hw_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+
+ if (camif->sensor.sd == NULL || vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (variant->ip_revision == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_hw_set_camera_bus(camif);
+ camif_hw_set_source_format(camif);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cb, camif->colorfx_cr);
+ if (variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ camif_hw_set_input_path(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+
+ return 0;
+}
+
+/*
+ * Initialize the video path, only up from the scaler stage. The camera
+ * input interface set up is skipped. This is useful to enable one of the
+ * video paths when the other is already running.
+ * Locking: called with camif->slock spinlock held.
+ */
+static int s3c_camif_hw_vp_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ camif_prepare_dma_offset(vp);
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+ return 0;
+}
+
+static int sensor_set_power(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (!on == camif->sensor.power_count)
+ err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (!err)
+ sensor->power_count += on ? 1 : -1;
+
+ pr_debug("on: %d, power_count: %d, err: %d\n",
+ on, sensor->power_count, err);
+
+ return err;
+}
+
+static int sensor_set_streaming(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (!on == camif->sensor.stream_count)
+ err = v4l2_subdev_call(sensor->sd, video, s_stream, on);
+ if (!err)
+ sensor->stream_count += on ? 1 : -1;
+
+ pr_debug("on: %d, stream_count: %d, err: %d\n",
+ on, sensor->stream_count, err);
+
+ return err;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start streaming again.
+ * Return any buffers to vb2, perform CAMIF software reset and
+ * turn off streaming at the data pipeline (sensor) if required.
+ */
+static int camif_reinitialize(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ streaming = vp->state & ST_VP_SENSOR_STREAMING;
+
+ vp->state &= ~(ST_VP_PENDING | ST_VP_RUNNING | ST_VP_OFF |
+ ST_VP_ABORTING | ST_VP_STREAMING |
+ ST_VP_SENSOR_STREAMING | ST_VP_LASTIRQ);
+
+ /* Release unused buffers */
+ while (!list_empty(&vp->pending_buf_q)) {
+ buf = camif_pending_queue_pop(vp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&vp->active_buf_q)) {
+ buf = camif_active_queue_pop(vp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!streaming)
+ return 0;
+
+ return sensor_set_streaming(camif, 0);
+}
+
+static bool s3c_vp_active(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ ret = (vp->state & ST_VP_RUNNING) || (vp->state & ST_VP_PENDING);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return ret;
+}
+
+static bool camif_is_streaming(struct camif_dev *camif)
+{
+ unsigned long flags;
+ bool status;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ status = camif->stream_count > 0;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return status;
+}
+
+static int camif_stop_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ if (!s3c_vp_active(vp))
+ return 0;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->state &= ~(ST_VP_OFF | ST_VP_LASTIRQ);
+ vp->state |= ST_VP_ABORTING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ ret = wait_event_timeout(vp->irq_queue,
+ !(vp->state & ST_VP_ABORTING),
+ msecs_to_jiffies(CAMIF_STOP_TIMEOUT));
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (ret == 0 && !(vp->state & ST_VP_OFF)) {
+ /* Timed out, forcibly stop capture */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return camif_reinitialize(vp);
+}
+
+static int camif_prepare_addr(struct camif_vp *vp, struct vb2_buffer *vb,
+ struct camif_addr *paddr)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 pix_size;
+
+ if (vb == NULL || frame == NULL)
+ return -EINVAL;
+
+ pix_size = frame->rect.width * frame->rect.height;
+
+ pr_debug("colplanes: %d, pix_size: %u\n",
+ vp->out_fmt->colplanes, pix_size);
+
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ switch (vp->out_fmt->colplanes) {
+ case 1:
+ paddr->cb = 0;
+ paddr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ paddr->cb = (u32)(paddr->y + pix_size);
+ paddr->cr = 0;
+ break;
+ case 3:
+ paddr->cb = (u32)(paddr->y + pix_size);
+ /* decompose Y into Y/Cb/Cr */
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 1));
+ else /* 420 */
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 2));
+
+ if (vp->out_fmt->color == IMG_FMT_YCRCB420)
+ swap(paddr->cb, paddr->cr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_debug("DMA address: y: %#x cb: %#x cr: %#x\n",
+ paddr->y, paddr->cb, paddr->cr);
+
+ return 0;
+}
+
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
+{
+ struct camif_vp *vp = priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ unsigned int status;
+
+ spin_lock(&camif->slock);
+
+ if (ip_rev == S3C6410_CAMIF_IP_REV)
+ camif_hw_clear_pending_irq(vp);
+
+ status = camif_hw_get_status(vp);
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV && (status & CISTATUS_OVF_MASK)) {
+ camif_hw_clear_fifo_overflow(vp);
+ goto unlock;
+ }
+
+ if (vp->state & ST_VP_ABORTING) {
+ if (vp->state & ST_VP_OFF) {
+ /* Last IRQ */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+ wake_up(&vp->irq_queue);
+ goto unlock;
+ } else if (vp->state & ST_VP_LASTIRQ) {
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ camif_hw_set_lastirq(vp, false);
+ vp->state |= ST_VP_OFF;
+ } else {
+ /* Disable capture, enable last IRQ */
+ camif_hw_set_lastirq(vp, true);
+ vp->state |= ST_VP_LASTIRQ;
+ }
+ }
+
+ if (!list_empty(&vp->pending_buf_q) && (vp->state & ST_VP_RUNNING) &&
+ !list_empty(&vp->active_buf_q)) {
+ unsigned int index;
+ struct camif_buffer *vbuf;
+ struct timeval *tv;
+ struct timespec ts;
+ /*
+ * Get previous DMA write buffer index:
+ * 0 => DMA buffer 0, 2;
+ * 1 => DMA buffer 1, 3.
+ */
+ index = (CISTATUS_FRAMECNT(status) + 2) & 1;
+
+ ktime_get_ts(&ts);
+ vbuf = camif_active_queue_peek(vp, index);
+
+ if (!WARN_ON(vbuf == NULL)) {
+ /* Dequeue a filled buffer */
+ tv = &vbuf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ vbuf->vb.v4l2_buf.sequence = vp->frame_sequence++;
+ vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+
+ /* Set up an empty buffer at the DMA engine */
+ vbuf = camif_pending_queue_pop(vp);
+ vbuf->index = index;
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index);
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index + 2);
+
+ /* Scheduled in H/W, add to the queue */
+ camif_active_queue_add(vp, vbuf);
+ }
+ } else if (!(vp->state & ST_VP_ABORTING) &&
+ (vp->state & ST_VP_PENDING)) {
+ vp->state |= ST_VP_RUNNING;
+ }
+
+ if (vp->state & ST_VP_CONFIG) {
+ camif_prepare_dma_offset(vp);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (camif->variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cb, camif->colorfx_cr);
+ vp->state &= ~ST_VP_CONFIG;
+ }
+unlock:
+ spin_unlock(&camif->slock);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * We assume the codec capture path is always activated
+ * first, before the preview path starts streaming.
+ * This is required to avoid internal FIFO overflow and
+ * a need for CAMIF software reset.
+ */
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (camif->stream_count == 0) {
+ camif_hw_reset(camif);
+ ret = s3c_camif_hw_init(camif, vp);
+ } else {
+ ret = s3c_camif_hw_vp_init(camif, vp);
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (ret < 0) {
+ camif_reinitialize(vp);
+ return ret;
+ }
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->frame_sequence = 0;
+ vp->state |= ST_VP_PENDING;
+
+ if (!list_empty(&vp->pending_buf_q) &&
+ (!(vp->state & ST_VP_STREAMING) ||
+ !(vp->state & ST_VP_SENSOR_STREAMING))) {
+
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ vp->state |= ST_VP_STREAMING;
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ ret = sensor_set_streaming(camif, 1);
+ if (ret)
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+
+ return ret;
+ }
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ return camif_stop_capture(vp);
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ const struct v4l2_pix_format *pix = NULL;
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int size;
+
+ if (pfmt) {
+ pix = &pfmt->fmt.pix;
+ fmt = s3c_camif_find_format(vp, &pix->pixelformat, -1);
+ size = (pix->width * pix->height * fmt->depth) / 8;
+ } else {
+ size = (frame->f_width * frame->f_height * fmt->depth) / 8;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+ *num_planes = 1;
+
+ if (pix)
+ sizes[0] = max(size, pix->sizeimage);
+ else
+ sizes[0] = size;
+ allocators[0] = camif->alloc_ctx;
+
+ pr_debug("size: %u\n", sizes[0]);
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (vb2_plane_size(vb, 0) < vp->payload) {
+ v4l2_err(&vp->vdev, "buffer too small: %lu, required: %u\n",
+ vb2_plane_size(vb, 0), vp->payload);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, vp->payload);
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct camif_buffer *buf = container_of(vb, struct camif_buffer, vb);
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ WARN_ON(camif_prepare_addr(vp, &buf->vb, &buf->paddr));
+
+ if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
+ /* Schedule an empty buffer in H/W */
+ buf->index = vp->buf_index;
+
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index);
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index + 2);
+
+ camif_active_queue_add(vp, buf);
+ vp->buf_index = !vp->buf_index;
+ } else {
+ camif_pending_queue_add(vp, buf);
+ }
+
+ if (vb2_is_streaming(&vp->vb_queue) && !list_empty(&vp->pending_buf_q)
+ && !(vp->state & ST_VP_STREAMING)) {
+
+ vp->state |= ST_VP_STREAMING;
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ if (sensor_set_streaming(camif, 1) == 0)
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ else
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+ }
+ return;
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+}
+
+static void camif_lock(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ mutex_lock(&vp->camif->lock);
+}
+
+static void camif_unlock(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ mutex_unlock(&vp->camif->lock);
+}
+
+static const struct vb2_ops s3c_camif_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = camif_unlock,
+ .wait_finish = camif_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static int s3c_camif_open(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ if (mutex_lock_interruptible(&camif->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = pm_runtime_get_sync(camif->dev);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = sensor_set_power(camif, 1);
+ if (!ret)
+ goto unlock;
+
+ pm_runtime_put(camif->dev);
+err_pm:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_close(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ mutex_lock(&camif->lock);
+
+ if (vp->owner == file->private_data) {
+ camif_stop_capture(vp);
+ vb2_queue_release(&vp->vb_queue);
+ vp->owner = NULL;
+ }
+
+ sensor_set_power(camif, 0);
+
+ pm_runtime_put(camif->dev);
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static unsigned int s3c_camif_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ mutex_lock(&camif->lock);
+ if (vp->owner && vp->owner != file->private_data)
+ ret = -EBUSY;
+ else
+ ret = vb2_poll(&vp->vb_queue, file, wait);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != file->private_data)
+ ret = -EBUSY;
+ else
+ ret = vb2_mmap(&vp->vb_queue, vma);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations s3c_camif_fops = {
+ .owner = THIS_MODULE,
+ .open = s3c_camif_open,
+ .release = s3c_camif_close,
+ .poll = s3c_camif_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s3c_camif_mmap,
+};
+
+/*
+ * Video node IOCTLs
+ */
+
+static int s3c_camif_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ strlcpy(cap->driver, S3C_CAMIF_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, S3C_CAMIF_DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
+ dev_name(vp->camif->dev), vp->id);
+
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_subdev *sensor = vp->camif->sensor.sd;
+
+ if (input->index || sensor == NULL)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(input->name, sensor->name, sizeof(input->name));
+ return 0;
+}
+
+static int s3c_camif_vidioc_s_input(struct file *file, void *priv,
+ unsigned int i)
+{
+ return i == 0 ? 0 : -EINVAL;
+}
+
+static int s3c_camif_vidioc_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, NULL, f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ pr_debug("fmt(%d): %s\n", f->index, f->description);
+ return 0;
+}
+
+static int s3c_camif_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+
+ pix->bytesperline = frame->f_width * fmt->ybpp;
+ pix->sizeimage = vp->payload;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->width = frame->f_width;
+ pix->height = frame->f_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ return 0;
+}
+
+static int __camif_video_try_format(struct camif_vp *vp,
+ struct v4l2_pix_format *pix,
+ const struct camif_fmt **ffmt)
+{
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ unsigned int wmin, hmin, sc_hrmax, sc_vrmax;
+ const struct vp_pix_limits *pix_lim;
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, &pix->pixelformat, 0);
+
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+
+ if (ffmt)
+ *ffmt = fmt;
+
+ pix_lim = &camif->variant->vp_pix_limits[vp->id];
+
+ pr_debug("fmt: %ux%u, crop: %ux%u, bytesperline: %u\n",
+ pix->width, pix->height, crop->width, crop->height,
+ pix->bytesperline);
+ /*
+ * Calculate minimum width and height according to the configured
+ * camera input interface crop rectangle and the resizer's capabilities.
+ */
+ sc_hrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->width) - 3));
+ sc_vrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->height) - 1));
+
+ wmin = max_t(u32, pix_lim->min_out_width, crop->width / sc_hrmax);
+ wmin = round_up(wmin, pix_lim->out_width_align);
+ hmin = max_t(u32, 8, crop->height / sc_vrmax);
+ hmin = round_up(hmin, 8);
+
+ v4l_bound_align_image(&pix->width, wmin, pix_lim->max_sc_out_width,
+ ffs(pix_lim->out_width_align) - 1,
+ &pix->height, hmin, pix_lim->max_height, 0, 0);
+
+ pix->bytesperline = pix->width * fmt->ybpp;
+ pix->sizeimage = (pix->width * pix->height * fmt->depth) / 8;
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->field = V4L2_FIELD_NONE;
+
+ pr_debug("%ux%u, wmin: %d, hmin: %d, sc_hrmax: %d, sc_vrmax: %d\n",
+ pix->width, pix->height, wmin, hmin, sc_hrmax, sc_vrmax);
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return __camif_video_try_format(vp, &f->fmt.pix, NULL);
+}
+
+static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_frame *out_frame = &vp->out_frame;
+ const struct camif_fmt *fmt = NULL;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vb2_is_busy(&vp->vb_queue))
+ return -EBUSY;
+
+ ret = __camif_video_try_format(vp, &f->fmt.pix, &fmt);
+ if (ret < 0)
+ return ret;
+
+ vp->out_fmt = fmt;
+ vp->payload = pix->sizeimage;
+ out_frame->f_width = pix->width;
+ out_frame->f_height = pix->height;
+
+ /* Reset composition rectangle */
+ out_frame->rect.width = pix->width;
+ out_frame->rect.height = pix->height;
+ out_frame->rect.left = 0;
+ out_frame->rect.top = 0;
+
+ if (vp->owner == NULL)
+ vp->owner = priv;
+
+ pr_debug("%ux%u. payload: %u. fmt: %s. %d %d. sizeimage: %d. bpl: %d\n",
+ out_frame->f_width, out_frame->f_height, vp->payload, fmt->name,
+ pix->width * pix->height * fmt->depth, fmt->depth,
+ pix->sizeimage, pix->bytesperline);
+
+ return 0;
+}
+
+/* Only check pixel formats at the sensor and the camif subdev pads */
+static int camif_pipeline_validate(struct camif_dev *camif)
+{
+ struct v4l2_subdev_format src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ /* Retrieve format at the sensor subdev source pad */
+ pad = media_entity_remote_source(&camif->pads[0]);
+ if (!pad || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EPIPE;
+
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(camif->sensor.sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != camif->mbus_fmt.width ||
+ src_fmt.format.height != camif->mbus_fmt.height ||
+ src_fmt.format.code != camif->mbus_fmt.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int s3c_camif_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct media_entity *sensor = &camif->sensor.sd->entity;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (s3c_vp_active(vp))
+ return 0;
+
+ ret = media_entity_pipeline_start(sensor, camif->m_pipeline);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_pipeline_validate(camif);
+ if (ret < 0) {
+ media_entity_pipeline_stop(sensor);
+ return ret;
+ }
+
+ return vb2_streamon(&vp->vb_queue, type);
+}
+
+static int s3c_camif_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ ret = vb2_streamoff(&vp->vb_queue, type);
+ if (ret == 0)
+ media_entity_pipeline_stop(&camif->sensor.sd->entity);
+ return ret;
+}
+
+static int s3c_camif_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n",
+ vp->id, rb->count, vp->owner, priv);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (rb->count)
+ rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count);
+ else
+ vp->owner = NULL;
+
+ ret = vb2_reqbufs(&vp->vb_queue, rb);
+ if (!ret) {
+ vp->reqbufs_count = rb->count;
+ if (vp->owner == NULL && rb->count > 0)
+ vp->owner = priv;
+ }
+
+ return ret;
+}
+
+static int s3c_camif_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_querybuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_qbuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d] sequence: %d\n", vp->id, vp->frame_sequence);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_dqbuf(&vp->vb_queue, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int s3c_camif_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ create->count = max_t(u32, 1, create->count);
+ ret = vb2_create_bufs(&vp->vb_queue, create);
+
+ if (!ret && vp->owner == NULL)
+ vp->owner = priv;
+
+ return ret;
+}
+
+static int s3c_camif_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_prepare_buf(&vp->vb_queue, b);
+}
+
+static int s3c_camif_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = vp->out_frame.f_width;
+ sel->r.height = vp->out_frame.f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = vp->out_frame.rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void __camif_try_compose(struct camif_dev *camif, struct camif_vp *vp,
+ struct v4l2_rect *r)
+{
+ /* s3c244x doesn't support composition */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ *r = vp->out_frame.rect;
+ return;
+ }
+
+ /* TODO: s3c64xx */
+}
+
+static int s3c_camif_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ __camif_try_compose(camif, vp, &rect);
+
+ sel->r = rect;
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->out_frame.rect = rect;
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%dx%d\n",
+ sel->type, sel->target, sel->flags,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops s3c_camif_ioctl_ops = {
+ .vidioc_querycap = s3c_camif_vidioc_querycap,
+ .vidioc_enum_input = s3c_camif_vidioc_enum_input,
+ .vidioc_g_input = s3c_camif_vidioc_g_input,
+ .vidioc_s_input = s3c_camif_vidioc_s_input,
+ .vidioc_enum_fmt_vid_cap = s3c_camif_vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = s3c_camif_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = s3c_camif_vidioc_s_fmt,
+ .vidioc_g_fmt_vid_cap = s3c_camif_vidioc_g_fmt,
+ .vidioc_g_selection = s3c_camif_g_selection,
+ .vidioc_s_selection = s3c_camif_s_selection,
+ .vidioc_reqbufs = s3c_camif_reqbufs,
+ .vidioc_querybuf = s3c_camif_querybuf,
+ .vidioc_prepare_buf = s3c_camif_prepare_buf,
+ .vidioc_create_bufs = s3c_camif_create_bufs,
+ .vidioc_qbuf = s3c_camif_qbuf,
+ .vidioc_dqbuf = s3c_camif_dqbuf,
+ .vidioc_streamon = s3c_camif_streamon,
+ .vidioc_streamoff = s3c_camif_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+/*
+ * Video node controls
+ */
+static int s3c_camif_video_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_vp *vp = ctrl->priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ pr_debug("[vp%d] ctrl: %s, value: %d\n", vp->id,
+ ctrl->name, ctrl->val);
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ vp->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ vp->vflip = ctrl->val;
+ break;
+ }
+
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+/* Codec and preview video node control ops */
+static const struct v4l2_ctrl_ops s3c_camif_video_ctrl_ops = {
+ .s_ctrl = s3c_camif_video_s_ctrl,
+};
+
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+{
+ struct camif_vp *vp = &camif->vp[idx];
+ struct vb2_queue *q = &vp->vb_queue;
+ struct video_device *vfd = &vp->vdev;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ memset(vfd, 0, sizeof(*vfd));
+ snprintf(vfd->name, sizeof(vfd->name), "camif-%s",
+ vp->id == 0 ? "codec" : "preview");
+
+ vfd->fops = &s3c_camif_fops;
+ vfd->ioctl_ops = &s3c_camif_ioctl_ops;
+ vfd->v4l2_dev = &camif->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &camif->lock;
+ vp->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&vp->pending_buf_q);
+ INIT_LIST_HEAD(&vp->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &s3c_camif_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct camif_buffer);
+ q->drv_priv = vp;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_vd_rel;
+
+ vp->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &vp->pad, 0);
+ if (ret)
+ goto err_vd_rel;
+
+ video_set_drvdata(vfd, vp);
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+
+ v4l2_ctrl_handler_init(&vp->ctrl_handler, 1);
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+
+ ret = vp->ctrl_handler.error;
+ if (ret < 0)
+ goto err_me_cleanup;
+
+ vfd->ctrl_handler = &vp->ctrl_handler;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_ctrlh_free;
+
+ v4l2_info(&camif->v4l2_dev, "registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_ctrlh_free:
+ v4l2_ctrl_handler_free(&vp->ctrl_handler);
+err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+err_vd_rel:
+ video_device_release(vfd);
+ return ret;
+}
+
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx)
+{
+ struct video_device *vfd = &camif->vp[idx].vdev;
+
+ if (video_is_registered(vfd)) {
+ video_unregister_device(vfd);
+ media_entity_cleanup(&vfd->entity);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ }
+}
+
+/* Media bus pixel formats supported at the camif input */
+static const enum v4l2_mbus_pixelcode camif_mbus_formats[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_YVYU8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_VYUY8_2X8,
+};
+
+/*
+ * Camera input interface subdev operations
+ */
+
+static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(camif_mbus_formats))
+ return -EINVAL;
+
+ code->code = camif_mbus_formats[code->index];
+ return 0;
+}
+
+static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ /* full camera input pixel size */
+ *mf = camif->mbus_fmt;
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* crop rectangle at camera interface input */
+ mf->width = camif->camif_crop.width;
+ mf->height = camif->camif_crop.height;
+ mf->code = camif->mbus_fmt.code;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ return 0;
+}
+
+static void __camif_subdev_try_format(struct camif_dev *camif,
+ struct v4l2_mbus_framefmt *mf, int pad)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+ const struct vp_pix_limits *pix_lim;
+ int i = ARRAY_SIZE(camif_mbus_formats);
+
+ /* FIXME: constraints against codec or preview path ? */
+ pix_lim = &variant->vp_pix_limits[VP_CODEC];
+
+ while (i-- >= 0)
+ if (camif_mbus_formats[i] == mf->code)
+ break;
+
+ mf->code = camif_mbus_formats[i];
+
+ if (pad == CAMIF_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, CAMIF_MAX_PIX_HEIGHT, 0,
+ 0);
+ } else {
+ struct v4l2_rect *crop = &camif->camif_crop;
+ v4l_bound_align_image(&mf->width, 8, crop->width,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, crop->height,
+ 0, 0);
+ }
+
+ v4l2_dbg(1, debug, &camif->subdev, "%ux%u\n", mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ int i;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %ux%u\n",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mutex_lock(&camif->lock);
+
+ /*
+ * No pixel format change at the camera input is allowed
+ * while streaming.
+ */
+ if (vb2_is_busy(&camif->vp[VP_CODEC].vb_queue) ||
+ vb2_is_busy(&camif->vp[VP_PREVIEW].vb_queue)) {
+ mutex_unlock(&camif->lock);
+ return -EBUSY;
+ }
+
+ __camif_subdev_try_format(camif, mf, fmt->pad);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ mutex_unlock(&camif->lock);
+ return 0;
+ }
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ camif->mbus_fmt = *mf;
+ /* Reset sink crop rectangle. */
+ crop->width = mf->width;
+ crop->height = mf->height;
+ crop->left = 0;
+ crop->top = 0;
+ /*
+ * Reset source format (the camif's crop rectangle)
+ * and the video output resolution.
+ */
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_frame *frame = &camif->vp[i].out_frame;
+ frame->rect = *crop;
+ frame->f_width = mf->width;
+ frame->f_height = mf->height;
+ }
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* Pixel format can be only changed on the sink pad. */
+ mf->code = camif->mbus_fmt.code;
+ mf->width = crop->width;
+ mf->height = crop->height;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ return 0;
+}
+
+static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ if (sel->target == V4L2_SEL_TGT_CROP) {
+ sel->r = *crop;
+ } else { /* crop bounds */
+ sel->r.width = mf->width;
+ sel->r.height = mf->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d) %dx%d, size: %ux%u\n",
+ __func__, crop->left, crop->top, crop->width,
+ crop->height, mf->width, mf->height);
+
+ return 0;
+}
+
+static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ const struct camif_pix_limits *pix_lim = &camif->variant->pix_limits;
+ unsigned int left = 2 * r->left;
+ unsigned int top = 2 * r->top;
+
+ /*
+ * Following constraints must be met:
+ * - r->width + 2 * r->left = mf->width;
+ * - r->height + 2 * r->top = mf->height;
+ * - crop rectangle size and position must be aligned
+ * to 8 or 2 pixels, depending on SoC version.
+ */
+ v4l_bound_align_image(&r->width, 0, mf->width,
+ ffs(pix_lim->win_hor_offset_align) - 1,
+ &r->height, 0, mf->height, 1, 0);
+
+ v4l_bound_align_image(&left, 0, mf->width - r->width,
+ ffs(pix_lim->win_hor_offset_align),
+ &top, 0, mf->height - r->height, 2, 0);
+
+ r->left = left / 2;
+ r->top = top / 2;
+ r->width = mf->width - left;
+ r->height = mf->height - top;
+ /*
+ * Make sure we either downscale or upscale both the pixel
+ * width and height. Just return current crop rectangle if
+ * this scaler constraint is not met.
+ */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV &&
+ camif_is_streaming(camif)) {
+ unsigned int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct v4l2_rect *or = &camif->vp[i].out_frame.rect;
+ if ((or->width > r->width) == (or->height > r->height))
+ continue;
+ *r = camif->camif_crop;
+ pr_debug("Width/height scaling direction limitation\n");
+ break;
+ }
+ }
+
+ v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%dx%d, fmt: %ux%u\n",
+ r->left, r->top, r->width, r->height, mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct camif_scaler scaler;
+
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&camif->lock);
+ __camif_try_crop(camif, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ *crop = sel->r;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ scaler = vp->scaler;
+ if (s3c_camif_get_scaler_config(vp, &scaler))
+ continue;
+ vp->scaler = scaler;
+ vp->state |= ST_VP_CONFIG;
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ }
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %u, f_h: %u\n",
+ __func__, crop->left, crop->top, crop->width, crop->height,
+ camif->mbus_fmt.width, camif->mbus_fmt.height);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops s3c_camif_subdev_pad_ops = {
+ .enum_mbus_code = s3c_camif_subdev_enum_mbus_code,
+ .get_selection = s3c_camif_subdev_get_selection,
+ .set_selection = s3c_camif_subdev_set_selection,
+ .get_fmt = s3c_camif_subdev_get_fmt,
+ .set_fmt = s3c_camif_subdev_set_fmt,
+};
+
+static struct v4l2_subdev_ops s3c_camif_subdev_ops = {
+ .pad = &s3c_camif_subdev_pad_ops,
+};
+
+static int s3c_camif_subdev_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_dev *camif = container_of(ctrl->handler, struct camif_dev,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ camif->colorfx = camif->ctrl_colorfx->val;
+ /* Set Cb, Cr */
+ switch (ctrl->val) {
+ case V4L2_COLORFX_SEPIA:
+ camif->colorfx_cb = 115;
+ camif->colorfx_cr = 145;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ camif->colorfx_cb = camif->ctrl_colorfx_cbcr->val >> 8;
+ camif->colorfx_cr = camif->ctrl_colorfx_cbcr->val & 0xff;
+ break;
+ default:
+ /* for V4L2_COLORFX_BW and others */
+ camif->colorfx_cb = 128;
+ camif->colorfx_cr = 128;
+ }
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ camif->test_pattern = camif->ctrl_test_pattern->val;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ camif->vp[VP_CODEC].state |= ST_VP_CONFIG;
+ camif->vp[VP_PREVIEW].state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops s3c_camif_subdev_ctrl_ops = {
+ .s_ctrl = s3c_camif_subdev_s_ctrl,
+};
+
+static const char * const s3c_camif_test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+ "Horizontal increment",
+ "Vertical increment",
+};
+
+int s3c_camif_create_subdev(struct camif_dev *camif)
+{
+ struct v4l2_ctrl_handler *handler = &camif->ctrl_handler;
+ struct v4l2_subdev *sd = &camif->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &s3c_camif_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strlcpy(sd->name, "S3C-CAMIF", sizeof(sd->name));
+
+ camif->pads[CAMIF_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ camif->pads[CAMIF_SD_PAD_SOURCE_C].flags = MEDIA_PAD_FL_SOURCE;
+ camif->pads[CAMIF_SD_PAD_SOURCE_P].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_init(&sd->entity, CAMIF_SD_PADS_NUM,
+ camif->pads, 0);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 3);
+ camif->ctrl_test_pattern = v4l2_ctrl_new_std_menu_items(handler,
+ &s3c_camif_subdev_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(s3c_camif_test_pattern_menu) - 1, 0, 0,
+ s3c_camif_test_pattern_menu);
+
+ camif->ctrl_colorfx = v4l2_ctrl_new_std_menu(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x981f, V4L2_COLORFX_NONE);
+
+ camif->ctrl_colorfx_cbcr = v4l2_ctrl_new_std(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+ if (handler->error) {
+ v4l2_ctrl_handler_free(handler);
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ v4l2_ctrl_auto_cluster(2, &camif->ctrl_colorfx,
+ V4L2_COLORFX_SET_CBCR, false);
+ if (!camif->variant->has_img_effect) {
+ camif->ctrl_colorfx->flags |= V4L2_CTRL_FLAG_DISABLED;
+ camif->ctrl_colorfx_cbcr->flags |= V4L2_CTRL_FLAG_DISABLED;
+ }
+ sd->ctrl_handler = handler;
+ v4l2_set_subdevdata(sd, camif);
+
+ return 0;
+}
+
+void s3c_camif_unregister_subdev(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = &camif->subdev;
+
+ /* Return if not registered */
+ if (v4l2_get_subdevdata(sd) == NULL)
+ return;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&camif->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+int s3c_camif_set_defaults(struct camif_dev *camif)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ struct camif_frame *f = &vp->out_frame;
+
+ vp->camif = camif;
+ vp->id = i;
+ vp->offset = camif->variant->vp_offset;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ vp->fmt_flags = i ? FMT_FL_S3C24XX_PREVIEW :
+ FMT_FL_S3C24XX_CODEC;
+ else
+ vp->fmt_flags = FMT_FL_S3C64XX;
+
+ vp->out_fmt = s3c_camif_find_format(vp, NULL, 0);
+ BUG_ON(vp->out_fmt == NULL);
+
+ memset(f, 0, sizeof(*f));
+ f->f_width = CAMIF_DEF_WIDTH;
+ f->f_height = CAMIF_DEF_HEIGHT;
+ f->rect.width = CAMIF_DEF_WIDTH;
+ f->rect.height = CAMIF_DEF_HEIGHT;
+
+ /* Scaler is always enabled */
+ vp->scaler.enable = 1;
+
+ vp->payload = (f->f_width * f->f_height *
+ vp->out_fmt->depth) / 8;
+ }
+
+ memset(&camif->mbus_fmt, 0, sizeof(camif->mbus_fmt));
+ camif->mbus_fmt.width = CAMIF_DEF_WIDTH;
+ camif->mbus_fmt.height = CAMIF_DEF_HEIGHT;
+ camif->mbus_fmt.code = camif_mbus_formats[0];
+
+ memset(&camif->camif_crop, 0, sizeof(camif->camif_crop));
+ camif->camif_crop.width = CAMIF_DEF_WIDTH;
+ camif->camif_crop.height = CAMIF_DEF_HEIGHT;
+
+ return 0;
+}
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
new file mode 100644
index 00000000000..0dd65376c06
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -0,0 +1,662 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+
+static char *camif_clocks[CLK_MAX_NUM] = {
+ /* HCLK CAMIF clock */
+ [CLK_GATE] = "camif",
+ /* CAMIF / external camera sensor master clock */
+ [CLK_CAM] = "camera",
+};
+
+static const struct camif_fmt camif_formats[] = {
+ {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = 16,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR422P,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YVU 4:2:0 planar, Y/Cr/Cb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCRCB420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "RGB565, 16 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .ybpp = 2,
+ .color = IMG_FMT_RGB565,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "XRGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_XRGB8888,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_RGB666,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C64XX,
+ }
+};
+
+/**
+ * s3c_camif_find_format() - lookup camif color format by fourcc or an index
+ * @pixelformat: fourcc to match, ignored if null
+ * @index: index to the camif_formats array, ignored if negative
+ */
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat,
+ int index)
+{
+ const struct camif_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(camif_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) {
+ fmt = &camif_formats[i];
+ if (vp && !(vp->fmt_flags & fmt->flags))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ unsigned int sh = 6;
+
+ if (src >= 64 * tar)
+ return -EINVAL;
+
+ while (sh--) {
+ unsigned int tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
+ }
+ *shift = 0, *ratio = 1;
+ return 0;
+}
+
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler)
+{
+ struct v4l2_rect *camif_crop = &vp->camif->camif_crop;
+ int source_x = camif_crop->width;
+ int source_y = camif_crop->height;
+ int target_x = vp->out_frame.rect.width;
+ int target_y = vp->out_frame.rect.height;
+ int ret;
+
+ if (vp->rotation == 90 || vp->rotation == 270)
+ swap(target_x, target_y);
+
+ ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio,
+ &scaler->h_shift);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio,
+ &scaler->v_shift);
+ if (ret < 0)
+ return ret;
+
+ scaler->pre_dst_width = source_x / scaler->pre_h_ratio;
+ scaler->pre_dst_height = source_y / scaler->pre_v_ratio;
+
+ scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift);
+ scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift);
+
+ scaler->scaleup_h = (target_x >= source_x);
+ scaler->scaleup_v = (target_y >= source_y);
+
+ scaler->copy = 0;
+
+ pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n",
+ scaler->pre_h_ratio, scaler->h_shift,
+ scaler->pre_v_ratio, scaler->v_shift);
+
+ pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n",
+ source_x, source_y, target_x, target_y,
+ scaler->scaleup_h, scaler->scaleup_v);
+
+ return 0;
+}
+
+static int camif_register_sensor(struct camif_dev *camif)
+{
+ struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ camif->sensor.sd = NULL;
+
+ if (sensor->i2c_board_info.addr == 0)
+ return -EINVAL;
+
+ adapter = i2c_get_adapter(sensor->i2c_bus_num);
+ if (adapter == NULL) {
+ v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n",
+ sensor->i2c_bus_num);
+ return -EPROBE_DEFER;
+ }
+
+ sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter,
+ &sensor->i2c_board_info, NULL);
+ if (sd == NULL) {
+ i2c_put_adapter(adapter);
+ v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n",
+ sensor->i2c_board_info.type);
+ return -EPROBE_DEFER;
+ }
+ camif->sensor.sd = sd;
+
+ v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name);
+
+ /* Get initial pixel format and set it at the camif sink pad */
+ format.pad = 0;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+
+ if (ret < 0)
+ return 0;
+
+ format.pad = CAMIF_SD_PAD_SINK;
+ v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format);
+
+ v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n",
+ format.format.width, format.format.height,
+ format.format.code);
+ return 0;
+}
+
+static void camif_unregister_sensor(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = camif->sensor.sd;
+ struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL;
+ struct i2c_adapter *adapter;
+
+ if (client == NULL)
+ return;
+
+ adapter = client->adapter;
+ v4l2_device_unregister_subdev(sd);
+ camif->sensor.sd = NULL;
+ i2c_unregister_device(client);
+ if (adapter)
+ i2c_put_adapter(adapter);
+}
+
+static int camif_create_media_links(struct camif_dev *camif)
+{
+ int i, ret;
+
+ ret = media_entity_create_link(&camif->sensor.sd->entity, 0,
+ &camif->subdev.entity, CAMIF_SD_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) {
+ ret = media_entity_create_link(&camif->subdev.entity, i,
+ &camif->vp[i - 1].vdev.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ }
+
+ return ret;
+}
+
+static int camif_register_video_nodes(struct camif_dev *camif)
+{
+ int ret = s3c_camif_register_video_node(camif, VP_CODEC);
+ if (ret < 0)
+ return ret;
+
+ return s3c_camif_register_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_video_nodes(struct camif_dev *camif)
+{
+ s3c_camif_unregister_video_node(camif, VP_CODEC);
+ s3c_camif_unregister_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_media_entities(struct camif_dev *camif)
+{
+ camif_unregister_video_nodes(camif);
+ camif_unregister_sensor(camif);
+ s3c_camif_unregister_subdev(camif);
+}
+
+/*
+ * Media device
+ */
+static int camif_media_dev_register(struct camif_dev *camif)
+{
+ struct media_device *md = &camif->media_dev;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int ret;
+
+ memset(md, 0, sizeof(*md));
+ snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF",
+ ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X");
+ strlcpy(md->bus_info, "platform", sizeof(md->bus_info));
+ md->hw_revision = ip_rev;
+ md->driver_version = KERNEL_VERSION(1, 0, 0);
+
+ md->dev = camif->dev;
+
+ strlcpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name));
+ v4l2_dev->mdev = md;
+
+ ret = v4l2_device_register(camif->dev, v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = media_device_register(md);
+ if (ret < 0)
+ v4l2_device_unregister(v4l2_dev);
+
+ return ret;
+}
+
+static void camif_clk_put(struct camif_dev *camif)
+{
+ int i;
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ if (IS_ERR_OR_NULL(camif->clock[i]))
+ continue;
+ clk_unprepare(camif->clock[i]);
+ clk_put(camif->clock[i]);
+ }
+}
+
+static int camif_clk_get(struct camif_dev *camif)
+{
+ int ret, i;
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ camif->clock[i] = clk_get(camif->dev, camif_clocks[i]);
+ if (IS_ERR(camif->clock[i])) {
+ ret = PTR_ERR(camif->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(camif->clock[i]);
+ if (ret < 0) {
+ clk_put(camif->clock[i]);
+ camif->clock[i] = NULL;
+ goto err;
+ }
+ }
+ return 0;
+err:
+ camif_clk_put(camif);
+ dev_err(camif->dev, "failed to get clock: %s\n",
+ camif_clocks[i]);
+ return ret;
+}
+
+/*
+ * The CAMIF device has two relatively independent data processing paths
+ * that can source data from memory or the common camera input frontend.
+ * Register interrupts for each data processing path (camif_vp).
+ */
+static int camif_request_irqs(struct platform_device *pdev,
+ struct camif_dev *camif)
+{
+ int irq, ret, i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+
+ init_waitqueue_head(&vp->irq_queue);
+
+ irq = platform_get_irq(pdev, i);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get IRQ %d\n", i);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
+ 0, dev_name(&pdev->dev), vp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int s3c_camif_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s3c_camif_plat_data *pdata = dev->platform_data;
+ struct s3c_camif_drvdata *drvdata;
+ struct camif_dev *camif;
+ struct resource *mres;
+ int ret = 0;
+
+ camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
+ if (!camif)
+ return -ENOMEM;
+
+ spin_lock_init(&camif->slock);
+ mutex_init(&camif->lock);
+
+ camif->dev = dev;
+
+ if (!pdata || !pdata->gpio_get || !pdata->gpio_put) {
+ dev_err(dev, "wrong platform data\n");
+ return -EINVAL;
+ }
+
+ camif->pdata = *pdata;
+ drvdata = (void *)platform_get_device_id(pdev)->driver_data;
+ camif->variant = drvdata->variant;
+
+ mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ camif->io_base = devm_request_and_ioremap(dev, mres);
+ if (!camif->io_base) {
+ dev_err(dev, "failed to obtain I/O memory\n");
+ return -ENOENT;
+ }
+
+ ret = camif_request_irqs(pdev, camif);
+ if (ret < 0)
+ return ret;
+
+ ret = pdata->gpio_get();
+ if (ret < 0)
+ return ret;
+
+ ret = s3c_camif_create_subdev(camif);
+ if (ret < 0)
+ goto err_sd;
+
+ ret = camif_clk_get(camif);
+ if (ret < 0)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, camif);
+ clk_set_rate(camif->clock[CLK_CAM],
+ camif->pdata.sensor.clock_frequency);
+
+ dev_info(dev, "sensor clock frequency: %lu\n",
+ clk_get_rate(camif->clock[CLK_CAM]));
+ /*
+ * Set initial pixel format, resolution and crop rectangle.
+ * Must be done before a sensor subdev is registered as some
+ * settings are overrode with values from sensor subdev.
+ */
+ s3c_camif_set_defaults(camif);
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm;
+
+ /* Initialize contiguous memory allocator */
+ camif->alloc_ctx = vb2_dma_contig_init_ctx(dev);
+ if (IS_ERR(camif->alloc_ctx)) {
+ ret = PTR_ERR(camif->alloc_ctx);
+ goto err_alloc;
+ }
+
+ ret = camif_media_dev_register(camif);
+ if (ret < 0)
+ goto err_mdev;
+
+ ret = camif_register_sensor(camif);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev);
+ if (ret < 0)
+ goto err_sens;
+
+ mutex_lock(&camif->media_dev.graph_mutex);
+
+ ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = camif_register_video_nodes(camif);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = camif_create_media_links(camif);
+ if (ret < 0)
+ goto err_unlock;
+
+ mutex_unlock(&camif->media_dev.graph_mutex);
+ pm_runtime_put(dev);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&camif->media_dev.graph_mutex);
+err_sens:
+ v4l2_device_unregister(&camif->v4l2_dev);
+ media_device_unregister(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+err_mdev:
+ vb2_dma_contig_cleanup_ctx(camif->alloc_ctx);
+err_alloc:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+err_pm:
+ camif_clk_put(camif);
+err_clk:
+ s3c_camif_unregister_subdev(camif);
+err_sd:
+ pdata->gpio_put();
+ return ret;
+}
+
+static int __devexit s3c_camif_remove(struct platform_device *pdev)
+{
+ struct camif_dev *camif = platform_get_drvdata(pdev);
+ struct s3c_camif_plat_data *pdata = &camif->pdata;
+
+ media_device_unregister(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+ v4l2_device_unregister(&camif->v4l2_dev);
+
+ pm_runtime_disable(&pdev->dev);
+ camif_clk_put(camif);
+ pdata->gpio_put();
+
+ return 0;
+}
+
+static int s3c_camif_runtime_resume(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ clk_enable(camif->clock[CLK_GATE]);
+ /* null op on s3c244x */
+ clk_enable(camif->clock[CLK_CAM]);
+ return 0;
+}
+
+static int s3c_camif_runtime_suspend(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ /* null op on s3c244x */
+ clk_disable(camif->clock[CLK_CAM]);
+
+ clk_disable(camif->clock[CLK_GATE]);
+ return 0;
+}
+
+static const struct s3c_camif_variant s3c244x_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 640,
+ .max_sc_out_width = 640,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 480,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C244X_CAMIF_IP_REV,
+};
+
+static struct s3c_camif_drvdata s3c244x_camif_drvdata = {
+ .variant = &s3c244x_camif_variant,
+ .bus_clk_freq = 24000000UL,
+};
+
+static const struct s3c_camif_variant s3c6410_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 720,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C6410_CAMIF_IP_REV,
+ .has_img_effect = 1,
+ .vp_offset = 0x20,
+};
+
+static struct s3c_camif_drvdata s3c6410_camif_drvdata = {
+ .variant = &s3c6410_camif_variant,
+ .bus_clk_freq = 133000000UL,
+};
+
+static struct platform_device_id s3c_camif_driver_ids[] = {
+ {
+ .name = "s3c2440-camif",
+ .driver_data = (unsigned long)&s3c244x_camif_drvdata,
+ }, {
+ .name = "s3c6410-camif",
+ .driver_data = (unsigned long)&s3c6410_camif_drvdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids);
+
+static const struct dev_pm_ops s3c_camif_pm_ops = {
+ .runtime_suspend = s3c_camif_runtime_suspend,
+ .runtime_resume = s3c_camif_runtime_resume,
+};
+
+static struct platform_driver s3c_camif_driver = {
+ .probe = s3c_camif_probe,
+ .remove = __devexit_p(s3c_camif_remove),
+ .id_table = s3c_camif_driver_ids,
+ .driver = {
+ .name = S3C_CAMIF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &s3c_camif_pm_ops,
+ }
+};
+
+module_platform_driver(s3c_camif_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
new file mode 100644
index 00000000000..261134baa65
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -0,0 +1,393 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_CORE_H_
+#define CAMIF_CORE_H_
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-core.h>
+#include <media/s3c_camif.h>
+
+#define S3C_CAMIF_DRIVER_NAME "s3c-camif"
+#define CAMIF_REQ_BUFS_MIN 3
+#define CAMIF_MAX_OUT_BUFS 4
+#define CAMIF_MAX_PIX_WIDTH 4096
+#define CAMIF_MAX_PIX_HEIGHT 4096
+#define SCALER_MAX_RATIO 64
+#define CAMIF_DEF_WIDTH 640
+#define CAMIF_DEF_HEIGHT 480
+#define CAMIF_STOP_TIMEOUT 1500 /* ms */
+
+#define S3C244X_CAMIF_IP_REV 0x20 /* 2.0 */
+#define S3C2450_CAMIF_IP_REV 0x30 /* 3.0 - not implemented, not tested */
+#define S3C6400_CAMIF_IP_REV 0x31 /* 3.1 - not implemented, not tested */
+#define S3C6410_CAMIF_IP_REV 0x32 /* 3.2 */
+
+/* struct camif_vp::state */
+
+#define ST_VP_PENDING (1 << 0)
+#define ST_VP_RUNNING (1 << 1)
+#define ST_VP_STREAMING (1 << 2)
+#define ST_VP_SENSOR_STREAMING (1 << 3)
+
+#define ST_VP_ABORTING (1 << 4)
+#define ST_VP_OFF (1 << 5)
+#define ST_VP_LASTIRQ (1 << 6)
+
+#define ST_VP_CONFIG (1 << 8)
+
+#define CAMIF_SD_PAD_SINK 0
+#define CAMIF_SD_PAD_SOURCE_C 1
+#define CAMIF_SD_PAD_SOURCE_P 2
+#define CAMIF_SD_PADS_NUM 3
+
+enum img_fmt {
+ IMG_FMT_RGB565 = 0x0010,
+ IMG_FMT_RGB666,
+ IMG_FMT_XRGB8888,
+ IMG_FMT_YCBCR420 = 0x0020,
+ IMG_FMT_YCRCB420,
+ IMG_FMT_YCBCR422P,
+ IMG_FMT_YCBYCR422 = 0x0040,
+ IMG_FMT_YCRYCB422,
+ IMG_FMT_CBYCRY422,
+ IMG_FMT_CRYCBY422,
+};
+
+#define img_fmt_is_rgb(x) ((x) & 0x10)
+#define img_fmt_is_ycbcr(x) ((x) & 0x60)
+
+/* Possible values for struct camif_fmt::flags */
+#define FMT_FL_S3C24XX_CODEC (1 << 0)
+#define FMT_FL_S3C24XX_PREVIEW (1 << 1)
+#define FMT_FL_S3C64XX (1 << 2)
+
+/**
+ * struct camif_fmt - pixel format description
+ * @fourcc: fourcc code for this format, 0 if not applicable
+ * @color: a corresponding enum img_fmt
+ * @colplanes: number of physically contiguous data planes
+ * @flags: indicate for which SoCs revisions this format is valid
+ * @depth: bits per pixel (total)
+ * @ybpp: number of luminance bytes per pixel
+ */
+struct camif_fmt {
+ char *name;
+ u32 fourcc;
+ u32 color;
+ u16 colplanes;
+ u16 flags;
+ u8 depth;
+ u8 ybpp;
+};
+
+/**
+ * struct camif_dma_offset - pixel offset information for DMA
+ * @initial: offset (in pixels) to first pixel
+ * @line: offset (in pixels) from end of line to start of next line
+ */
+struct camif_dma_offset {
+ int initial;
+ int line;
+};
+
+/**
+ * struct camif_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ * @dma_offset: DMA offset configuration
+ */
+struct camif_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+ struct camif_dma_offset dma_offset;
+};
+
+/* CAMIF clocks enumeration */
+enum {
+ CLK_GATE,
+ CLK_CAM,
+ CLK_MAX_NUM,
+};
+
+struct vp_pix_limits {
+ u16 max_out_width;
+ u16 max_sc_out_width;
+ u16 out_width_align;
+ u16 max_height;
+ u8 min_out_width;
+ u16 out_hor_offset_align;
+};
+
+struct camif_pix_limits {
+ u16 win_hor_offset_align;
+};
+
+/**
+ * struct s3c_camif_variant - CAMIF variant structure
+ * @vp_pix_limits: pixel limits for the codec and preview paths
+ * @camif_pix_limits: pixel limits for the camera input interface
+ * @ip_revision: the CAMIF IP revision: 0x20 for s3c244x, 0x32 for s3c6410
+ */
+struct s3c_camif_variant {
+ struct vp_pix_limits vp_pix_limits[2];
+ struct camif_pix_limits pix_limits;
+ u8 ip_revision;
+ u8 has_img_effect;
+ unsigned int vp_offset;
+};
+
+struct s3c_camif_drvdata {
+ const struct s3c_camif_variant *variant;
+ unsigned long bus_clk_freq;
+};
+
+struct camif_scaler {
+ u8 scaleup_h;
+ u8 scaleup_v;
+ u8 copy;
+ u8 enable;
+ u32 h_shift;
+ u32 v_shift;
+ u32 pre_h_ratio;
+ u32 pre_v_ratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 main_h_ratio;
+ u32 main_v_ratio;
+};
+
+struct camif_dev;
+
+/**
+ * struct camif_vp - CAMIF data processing path structure (codec/preview)
+ * @irq_queue: interrupt handling waitqueue
+ * @irq: interrupt number for this data path
+ * @camif: pointer to the camif structure
+ * @pad: media pad for the video node
+ * @vdev video device
+ * @ctrl_handler: video node controls handler
+ * @owner: file handle that own the streaming
+ * @pending_buf_q: pending (empty) buffers queue head
+ * @active_buf_q: active (being written) buffers queue head
+ * @active_buffers: counter of buffer set up at the DMA engine
+ * @buf_index: identifier of a last empty buffer set up in H/W
+ * @frame_sequence: image frame sequence counter
+ * @reqbufs_count: the number of buffers requested
+ * @scaler: the scaler structure
+ * @out_fmt: pixel format at this video path output
+ * @payload: the output data frame payload size
+ * @out_frame: the output pixel resolution
+ * @state: the video path's state
+ * @fmt_flags: flags determining supported pixel formats
+ * @id: CAMIF id, 0 - codec, 1 - preview
+ * @rotation: current image rotation value
+ * @hflip: apply horizontal flip if set
+ * @vflip: apply vertical flip if set
+ */
+struct camif_vp {
+ wait_queue_head_t irq_queue;
+ int irq;
+ struct camif_dev *camif;
+ struct media_pad pad;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_fh *owner;
+ struct vb2_queue vb_queue;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ unsigned int active_buffers;
+ unsigned int buf_index;
+ unsigned int frame_sequence;
+ unsigned int reqbufs_count;
+ struct camif_scaler scaler;
+ const struct camif_fmt *out_fmt;
+ unsigned int payload;
+ struct camif_frame out_frame;
+ unsigned int state;
+ u16 fmt_flags;
+ u8 id;
+ u8 rotation;
+ u8 hflip;
+ u8 vflip;
+ unsigned int offset;
+};
+
+/* Video processing path enumeration */
+#define VP_CODEC 0
+#define VP_PREVIEW 1
+#define CAMIF_VP_NUM 2
+
+/**
+ * struct camif_dev - the CAMIF driver private data structure
+ * @media_dev: top-level media device structure
+ * @v4l2_dev: root v4l2_device
+ * @subdev: camera interface ("catchcam") subdev
+ * @mbus_fmt: camera input media bus format
+ * @camif_crop: camera input interface crop rectangle
+ * @pads: the camif subdev's media pads
+ * @stream_count: the camera interface streaming reference counter
+ * @sensor: image sensor data structure
+ * @m_pipeline: video entity pipeline description
+ * @ctrl_handler: v4l2 control handler (owned by @subdev)
+ * @test_pattern: test pattern controls
+ * @vp: video path (DMA) description (codec/preview)
+ * @alloc_ctx: memory buffer allocator context
+ * @variant: variant information for this device
+ * @dev: pointer to the CAMIF device struct
+ * @pdata: a copy of the driver's platform data
+ * @clock: clocks required for the CAMIF operation
+ * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting CAMIF registers
+ * @io_base: start address of the mmaped CAMIF registers
+ */
+struct camif_dev {
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_rect camif_crop;
+ struct media_pad pads[CAMIF_SD_PADS_NUM];
+ int stream_count;
+
+ struct cam_sensor {
+ struct v4l2_subdev *sd;
+ short power_count;
+ short stream_count;
+ } sensor;
+ struct media_pipeline *m_pipeline;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_test_pattern;
+ struct {
+ struct v4l2_ctrl *ctrl_colorfx;
+ struct v4l2_ctrl *ctrl_colorfx_cbcr;
+ };
+ u8 test_pattern;
+ u8 colorfx;
+ u8 colorfx_cb;
+ u8 colorfx_cr;
+
+ struct camif_vp vp[CAMIF_VP_NUM];
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ const struct s3c_camif_variant *variant;
+ struct device *dev;
+ struct s3c_camif_plat_data pdata;
+ struct clk *clock[CLK_MAX_NUM];
+ struct mutex lock;
+ spinlock_t slock;
+ void __iomem *io_base;
+};
+
+/**
+ * struct camif_addr - Y/Cb/Cr DMA start address structure
+ * @y: luminance plane dma address
+ * @cb: Cb plane dma address
+ * @cr: Cr plane dma address
+ */
+struct camif_addr {
+ dma_addr_t y;
+ dma_addr_t cb;
+ dma_addr_t cr;
+};
+
+/**
+ * struct camif_buffer - the camif video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: DMA start addresses
+ * @index: an identifier of this buffer at the DMA engine
+ */
+struct camif_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ struct camif_addr paddr;
+ unsigned int index;
+};
+
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat, int index);
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx);
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx);
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv);
+int s3c_camif_create_subdev(struct camif_dev *camif);
+void s3c_camif_unregister_subdev(struct camif_dev *camif);
+int s3c_camif_set_defaults(struct camif_dev *camif);
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler);
+
+static inline void camif_active_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->active_buf_q);
+ vp->active_buffers++;
+}
+
+static inline struct camif_buffer *camif_active_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->active_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+}
+
+static inline struct camif_buffer *camif_active_queue_peek(
+ struct camif_vp *vp, int index)
+{
+ struct camif_buffer *tmp, *buf;
+
+ if (WARN_ON(list_empty(&vp->active_buf_q)))
+ return NULL;
+
+ list_for_each_entry_safe(buf, tmp, &vp->active_buf_q, list) {
+ if (buf->index == index) {
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+ }
+ }
+
+ return NULL;
+}
+
+static inline void camif_pending_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->pending_buf_q);
+}
+
+static inline struct camif_buffer *camif_pending_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->pending_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* CAMIF_CORE_H_ */
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
new file mode 100644
index 00000000000..1a3b4fc05ec
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -0,0 +1,606 @@
+/*
+ * Samsung s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include "camif-regs.h"
+
+#define camif_write(_camif, _off, _val) writel(_val, (_camif)->io_base + (_off))
+#define camif_read(_camif, _off) readl((_camif)->io_base + (_off))
+
+void camif_hw_reset(struct camif_dev *camif)
+{
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg |= CISRCFMT_ITU601_8BIT;
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+
+ /* S/W reset */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_SWRST;
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIGCTRL_IRQ_LEVEL;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_SWRST;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+}
+
+void camif_hw_clear_pending_irq(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_IRQ_CLR(vp->id);
+ camif_write(vp->camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+/*
+ * Sets video test pattern (off, color bar, horizontal or vertical gradient).
+ * External sensor pixel clock must be active for the test pattern to work.
+ */
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern)
+{
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_TESTPATTERN_MASK;
+ cfg |= (pattern << 27);
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb)
+{
+ static const struct v4l2_control colorfx[] = {
+ { V4L2_COLORFX_NONE, CIIMGEFF_FIN_BYPASS },
+ { V4L2_COLORFX_BW, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_SEPIA, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_NEGATIVE, CIIMGEFF_FIN_NEGATIVE },
+ { V4L2_COLORFX_ART_FREEZE, CIIMGEFF_FIN_ARTFREEZE },
+ { V4L2_COLORFX_EMBOSS, CIIMGEFF_FIN_EMBOSSING },
+ { V4L2_COLORFX_SILHOUETTE, CIIMGEFF_FIN_SILHOUETTE },
+ { V4L2_COLORFX_SET_CBCR, CIIMGEFF_FIN_ARBITRARY },
+ };
+ unsigned int i, cfg;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++)
+ if (colorfx[i].id == effect)
+ break;
+
+ if (i == ARRAY_SIZE(colorfx))
+ return;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset));
+ /* Set effect */
+ cfg &= ~CIIMGEFF_FIN_MASK;
+ cfg |= colorfx[i].value;
+ /* Set both paths */
+ if (camif->variant->ip_revision >= S3C6400_CAMIF_IP_REV) {
+ if (effect == V4L2_COLORFX_NONE)
+ cfg &= ~CIIMGEFF_IE_ENABLE_MASK;
+ else
+ cfg |= CIIMGEFF_IE_ENABLE_MASK;
+ }
+ cfg &= ~CIIMGEFF_PAT_CBCR_MASK;
+ cfg |= cr | (cb << 13);
+ camif_write(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset), cfg);
+}
+
+static const u32 src_pixfmt_map[8][2] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
+ { V4L2_MBUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
+ { V4L2_MBUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
+ { V4L2_MBUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
+};
+
+/* Set camera input pixel format and resolution */
+void camif_hw_set_source_format(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+ u32 cfg;
+
+ while (i-- >= 0) {
+ if (src_pixfmt_map[i][0] == mf->code)
+ break;
+ }
+
+ if (i == 0 && src_pixfmt_map[i][0] != mf->code) {
+ dev_err(camif->dev,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg &= ~(CISRCFMT_ORDER422_MASK | CISRCFMT_SIZE_CAM_MASK);
+ cfg |= (mf->width << 16) | mf->height;
+ cfg |= src_pixfmt_map[i][1];
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void camif_hw_set_camera_crop(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ /* Note: s3c244x requirement: left = f_width - rect.width / 2 */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ cfg &= ~(CIWDOFST_OFST_MASK | CIWDOFST_WINOFSEN);
+ cfg |= (crop->left << 16) | crop->top;
+ if (crop->left != 0 || crop->top != 0)
+ cfg |= CIWDOFST_WINOFSEN;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ hoff2 = mf->width - crop->width - crop->left;
+ voff2 = mf->height - crop->height - crop->top;
+ cfg = (hoff2 << 16) | voff2;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST2, cfg);
+ }
+}
+
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ if (vp->id == 0)
+ cfg |= (CIWDOFST_CLROVCOFIY | CIWDOFST_CLROVCOFICB |
+ CIWDOFST_CLROVCOFICR);
+ else
+ cfg |= (/* CIWDOFST_CLROVPRFIY | */ CIWDOFST_CLROVPRFICB |
+ CIWDOFST_CLROVPRFICR);
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+}
+
+/* Set video bus signals polarity */
+void camif_hw_set_camera_bus(struct camif_dev *camif)
+{
+ unsigned int flags = camif->pdata.sensor.flags;
+
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+
+ cfg &= ~(CIGCTRL_INVPOLPCLK | CIGCTRL_INVPOLVSYNC |
+ CIGCTRL_INVPOLHREF | CIGCTRL_INVPOLFIELD);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= CIGCTRL_INVPOLVSYNC;
+ /*
+ * HREF is normally high during frame active data
+ * transmission and low during horizontal synchronization
+ * period. Thus HREF active high means HSYNC active low.
+ */
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ cfg |= CIGCTRL_INVPOLHREF; /* HREF active low */
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ if (flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= CIGCTRL_INVPOLFIELD;
+ cfg |= CIGCTRL_FIELDMODE;
+ }
+
+ pr_debug("Setting CIGCTRL to: %#x\n", cfg);
+
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_output_addr(struct camif_vp *vp,
+ struct camif_addr *paddr, int i)
+{
+ struct camif_dev *camif = vp->camif;
+
+ camif_write(camif, S3C_CAMIF_REG_CIYSA(vp->id, i), paddr->y);
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV
+ || vp->id == VP_CODEC) {
+ camif_write(camif, S3C_CAMIF_REG_CICBSA(vp->id, i),
+ paddr->cb);
+ camif_write(camif, S3C_CAMIF_REG_CICRSA(vp->id, i),
+ paddr->cr);
+ }
+
+ pr_debug("dst_buf[%d]: %#X, cb: %#X, cr: %#X\n",
+ i, paddr->y, paddr->cb, paddr->cr);
+}
+
+static void camif_hw_set_out_dma_size(struct camif_vp *vp)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_get_dma_burst(u32 width, u32 ybpp, u32 *mburst, u32 *rburst)
+{
+ unsigned int nwords = width * ybpp / 4;
+ unsigned int div, rem;
+
+ if (WARN_ON(width < 8 || (width * ybpp) & 7))
+ return;
+
+ for (div = 16; div >= 2; div /= 2) {
+ if (nwords < div)
+ continue;
+
+ rem = nwords & (div - 1);
+ if (rem == 0) {
+ *mburst = div;
+ *rburst = div;
+ break;
+ }
+ if (rem == div / 2 || rem == div / 4) {
+ *mburst = div;
+ *rburst = rem;
+ break;
+ }
+ }
+}
+
+void camif_hw_set_output_dma(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int ymburst = 0, yrburst = 0;
+ u32 cfg;
+
+ camif_hw_set_out_dma_size(vp);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ struct camif_dma_offset *offset = &frame->dma_offset;
+ /* Set the input dma offsets. */
+ cfg = S3C_CISS_OFFS_INITIAL(offset->initial);
+ cfg |= S3C_CISS_OFFS_LINE(offset->line);
+ camif_write(camif, S3C_CAMIF_REG_CISSY(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCB(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCR(vp->id), cfg);
+ }
+
+ /* Configure DMA burst values */
+ camif_get_dma_burst(frame->rect.width, fmt->ybpp, &ymburst, &yrburst);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset));
+ cfg &= ~CICTRL_BURST_MASK;
+
+ cfg |= CICTRL_YBURST1(ymburst) | CICTRL_YBURST2(yrburst);
+ cfg |= CICTRL_CBURST1(ymburst / 2) | CICTRL_CBURST2(yrburst / 2);
+
+ camif_write(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("ymburst: %u, yrburst: %u\n", ymburst, yrburst);
+}
+
+void camif_hw_set_input_path(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id));
+ cfg &= ~MSCTRL_SEL_DMA_CAM;
+ camif_write(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id), cfg);
+}
+
+void camif_hw_set_target_format(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ pr_debug("fw: %d, fh: %d color: %d\n", frame->f_width,
+ frame->f_height, vp->out_fmt->color);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ /* We currently support only YCbCr 4:2:2 at the camera input */
+ cfg |= CITRGFMT_IN422;
+ cfg &= ~CITRGFMT_OUT422;
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ cfg |= CITRGFMT_OUT422;
+ } else {
+ cfg &= ~CITRGFMT_OUTFORMAT_MASK;
+ switch (vp->out_fmt->color) {
+ case IMG_FMT_RGB565...IMG_FMT_XRGB8888:
+ cfg |= CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case IMG_FMT_YCBCR420...IMG_FMT_YCRCB420:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ case IMG_FMT_YCBCR422P:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case IMG_FMT_YCBYCR422...IMG_FMT_CRYCBY422:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422I;
+ break;
+ }
+ }
+
+ /* Rotation is only supported by s3c64xx */
+ if (vp->rotation == 90 || vp->rotation == 270)
+ cfg |= (frame->f_height << 16) | frame->f_width;
+ else
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+
+ /* Target area, output pixel width * height */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset));
+ cfg &= ~CITAREA_MASK;
+ cfg |= (frame->f_width * frame->f_height);
+ camif_write(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset), cfg);
+}
+
+void camif_hw_set_flip(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif,
+ S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+
+ cfg &= ~CITRGFMT_FLIP_MASK;
+
+ if (vp->hflip)
+ cfg |= CITRGFMT_FLIP_Y_MIRROR;
+ if (vp->vflip)
+ cfg |= CITRGFMT_FLIP_X_MIRROR;
+
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_hw_set_prescaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *sc = &vp->scaler;
+ u32 cfg, shfactor, addr;
+
+ addr = S3C_CAMIF_REG_CISCPRERATIO(vp->id, vp->offset);
+
+ shfactor = 10 - (sc->h_shift + sc->v_shift);
+ cfg = shfactor << 28;
+
+ cfg |= (sc->pre_h_ratio << 16) | sc->pre_v_ratio;
+ camif_write(camif, addr, cfg);
+
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ camif_write(camif, S3C_CAMIF_REG_CISCPREDST(vp->id, vp->offset), cfg);
+}
+
+void camif_s3c244x_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_SCALEUP_MASK | CISCCTRL_SCALERBYPASS |
+ CISCCTRL_MAIN_RATIO_MASK | CIPRSCCTRL_RGB_FORMAT_24BIT);
+
+ if (scaler->enable) {
+ if (scaler->scaleup_h) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_H;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_H;
+ }
+ if (scaler->scaleup_v) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_V;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_V;
+ }
+ } else {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALERBYPASS;
+ }
+
+ cfg |= ((scaler->main_h_ratio & 0x1ff) << 16);
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ if (vp->id == VP_PREVIEW) {
+ if (color == IMG_FMT_XRGB8888)
+ cfg |= CIPRSCCTRL_RGB_FORMAT_24BIT;
+ cfg |= CIPRSCCTRL_SAMPLE;
+ }
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+void camif_s3c64xx_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE
+ | CISCCTRL_SCALEUP_H | CISCCTRL_SCALEUP_V
+ | CISCCTRL_SCALERBYPASS | CISCCTRL_ONE2ONE
+ | CISCCTRL_INRGB_FMT_MASK | CISCCTRL_OUTRGB_FMT_MASK
+ | CISCCTRL_INTERLACE | CISCCTRL_EXTRGB_EXTENSION
+ | CISCCTRL_MAIN_RATIO_MASK);
+
+ cfg |= (CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE);
+
+ if (!scaler->enable) {
+ cfg |= CISCCTRL_SCALERBYPASS;
+ } else {
+ if (scaler->scaleup_h)
+ cfg |= CISCCTRL_SCALEUP_H;
+ if (scaler->scaleup_v)
+ cfg |= CISCCTRL_SCALEUP_V;
+ if (scaler->copy)
+ cfg |= CISCCTRL_ONE2ONE;
+ }
+
+ switch (color) {
+ case IMG_FMT_RGB666:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB666;
+ break;
+ case IMG_FMT_XRGB8888:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB888;
+ break;
+ }
+
+ cfg |= (scaler->main_h_ratio & 0x1ff) << 16;
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+void camif_hw_set_scaler(struct camif_vp *vp)
+{
+ unsigned int ip_rev = vp->camif->variant->ip_revision;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_s3c244x_hw_set_scaler(vp);
+ else
+ camif_s3c64xx_hw_set_scaler(vp);
+}
+
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on)
+{
+ u32 addr = S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (on)
+ cfg |= CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~CISCCTRL_SCALERSTART;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable)
+{
+ u32 addr = S3C_CAMIF_REG_CICTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (enable)
+ cfg |= CICTRL_LASTIRQ_ENABLE;
+ else
+ cfg &= ~CICTRL_LASTIRQ_ENABLE;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_enable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ camif->stream_count++;
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIIMGCPT_CPT_FREN_ENABLE(vp->id);
+
+ if (vp->scaler.enable)
+ cfg |= CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (camif->stream_count == 1)
+ cfg |= CIIMGCPT_IMGCPTEN;
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+}
+
+void camif_hw_disable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ cfg &= ~CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (WARN_ON(--(camif->stream_count) < 0))
+ camif->stream_count = 0;
+
+ if (camif->stream_count == 0)
+ cfg &= ~CIIMGCPT_IMGCPTEN;
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+}
+
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { S3C_CAMIF_REG_CISRCFMT, "CISRCFMT" },
+ { S3C_CAMIF_REG_CIWDOFST, "CIWDOFST" },
+ { S3C_CAMIF_REG_CIGCTRL, "CIGCTRL" },
+ { S3C_CAMIF_REG_CIWDOFST2, "CIWDOFST2" },
+ { S3C_CAMIF_REG_CIYSA(0, 0), "CICOYSA0" },
+ { S3C_CAMIF_REG_CICBSA(0, 0), "CICOCBSA0" },
+ { S3C_CAMIF_REG_CICRSA(0, 0), "CICOCRSA0" },
+ { S3C_CAMIF_REG_CIYSA(0, 1), "CICOYSA1" },
+ { S3C_CAMIF_REG_CICBSA(0, 1), "CICOCBSA1" },
+ { S3C_CAMIF_REG_CICRSA(0, 1), "CICOCRSA1" },
+ { S3C_CAMIF_REG_CIYSA(0, 2), "CICOYSA2" },
+ { S3C_CAMIF_REG_CICBSA(0, 2), "CICOCBSA2" },
+ { S3C_CAMIF_REG_CICRSA(0, 2), "CICOCRSA2" },
+ { S3C_CAMIF_REG_CIYSA(0, 3), "CICOYSA3" },
+ { S3C_CAMIF_REG_CICBSA(0, 3), "CICOCBSA3" },
+ { S3C_CAMIF_REG_CICRSA(0, 3), "CICOCRSA3" },
+ { S3C_CAMIF_REG_CIYSA(1, 0), "CIPRYSA0" },
+ { S3C_CAMIF_REG_CIYSA(1, 1), "CIPRYSA1" },
+ { S3C_CAMIF_REG_CIYSA(1, 2), "CIPRYSA2" },
+ { S3C_CAMIF_REG_CIYSA(1, 3), "CIPRYSA3" },
+ { S3C_CAMIF_REG_CITRGFMT(0, 0), "CICOTRGFMT" },
+ { S3C_CAMIF_REG_CITRGFMT(1, 0), "CIPRTRGFMT" },
+ { S3C_CAMIF_REG_CICTRL(0, 0), "CICOCTRL" },
+ { S3C_CAMIF_REG_CICTRL(1, 0), "CIPRCTRL" },
+ { S3C_CAMIF_REG_CISCPREDST(0, 0), "CICOSCPREDST" },
+ { S3C_CAMIF_REG_CISCPREDST(1, 0), "CIPRSCPREDST" },
+ { S3C_CAMIF_REG_CISCPRERATIO(0, 0), "CICOSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCPRERATIO(1, 0), "CIPRSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCCTRL(0, 0), "CICOSCCTRL" },
+ { S3C_CAMIF_REG_CISCCTRL(1, 0), "CIPRSCCTRL" },
+ { S3C_CAMIF_REG_CITAREA(0, 0), "CICOTAREA" },
+ { S3C_CAMIF_REG_CITAREA(1, 0), "CIPRTAREA" },
+ { S3C_CAMIF_REG_CISTATUS(0, 0), "CICOSTATUS" },
+ { S3C_CAMIF_REG_CISTATUS(1, 0), "CIPRSTATUS" },
+ { S3C_CAMIF_REG_CIIMGCPT(0), "CIIMGCPT" },
+ };
+ u32 i;
+
+ pr_info("--- %s ---\n", label);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(camif->io_base + registers[i].offset);
+ printk(KERN_INFO "%s:\t0x%08x\n", registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/platform/s3c-camif/camif-regs.h b/drivers/media/platform/s3c-camif/camif-regs.h
new file mode 100644
index 00000000000..af2d472ea1d
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.h
@@ -0,0 +1,269 @@
+/*
+ * Register definition file for s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_REGS_H_
+#define CAMIF_REGS_H_
+
+#include "camif-core.h"
+#include <media/s3c_camif.h>
+
+/*
+ * The id argument indicates the processing path:
+ * id = 0 - codec (FIMC C), 1 - preview (FIMC P).
+ */
+
+/* Camera input format */
+#define S3C_CAMIF_REG_CISRCFMT 0x00
+#define CISRCFMT_ITU601_8BIT (1 << 31)
+#define CISRCFMT_ITU656_8BIT (0 << 31)
+#define CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define CISRCFMT_ORDER422_CRYCBY (3 << 14)
+#define CISRCFMT_ORDER422_MASK (3 << 14)
+#define CISRCFMT_SIZE_CAM_MASK (0x1fff << 16 | 0x1fff)
+
+/* Window offset */
+#define S3C_CAMIF_REG_CIWDOFST 0x04
+#define CIWDOFST_WINOFSEN (1 << 31)
+#define CIWDOFST_CLROVCOFIY (1 << 30)
+#define CIWDOFST_CLROVRLB_PR (1 << 28)
+/* #define CIWDOFST_CLROVPRFIY (1 << 27) */
+#define CIWDOFST_CLROVCOFICB (1 << 15)
+#define CIWDOFST_CLROVCOFICR (1 << 14)
+#define CIWDOFST_CLROVPRFICB (1 << 13)
+#define CIWDOFST_CLROVPRFICR (1 << 12)
+#define CIWDOFST_OFST_MASK (0x7ff << 16 | 0x7ff)
+
+/* Window offset 2 */
+#define S3C_CAMIF_REG_CIWDOFST2 0x14
+#define CIWDOFST2_OFST2_MASK (0xfff << 16 | 0xfff)
+
+/* Global control */
+#define S3C_CAMIF_REG_CIGCTRL 0x08
+#define CIGCTRL_SWRST (1 << 31)
+#define CIGCTRL_CAMRST (1 << 30)
+#define CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define CIGCTRL_INVPOLPCLK (1 << 26)
+#define CIGCTRL_INVPOLVSYNC (1 << 25)
+#define CIGCTRL_INVPOLHREF (1 << 24)
+#define CIGCTRL_IRQ_OVFEN (1 << 22)
+#define CIGCTRL_HREF_MASK (1 << 21)
+#define CIGCTRL_IRQ_LEVEL (1 << 20)
+/* IRQ_CLR_C, IRQ_CLR_P */
+#define CIGCTRL_IRQ_CLR(id) (1 << (19 - (id)))
+#define CIGCTRL_FIELDMODE (1 << 2)
+#define CIGCTRL_INVPOLFIELD (1 << 1)
+#define CIGCTRL_CAM_INTERLACE (1 << 0)
+
+/* Y DMA output frame start address. n = 0..3. */
+#define S3C_CAMIF_REG_CIYSA(id, n) (0x18 + (id) * 0x54 + (n) * 4)
+/* Cb plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICBSA(id, n) (0x28 + (id) * 0x54 + (n) * 4)
+/* Cr plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICRSA(id, n) (0x38 + (id) * 0x54 + (n) * 4)
+
+/* CICOTRGFMT, CIPRTRGFMT - Target format */
+#define S3C_CAMIF_REG_CITRGFMT(id, _offs) (0x48 + (id) * (0x34 + (_offs)))
+#define CITRGFMT_IN422 (1 << 31) /* only for s3c24xx */
+#define CITRGFMT_OUT422 (1 << 30) /* only for s3c24xx */
+#define CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422I (2 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_RGB (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_MASK (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_TARGETHSIZE(x) ((x) << 16)
+#define CITRGFMT_FLIP_NORMAL (0 << 14)
+#define CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define CITRGFMT_FLIP_180 (3 << 14)
+#define CITRGFMT_FLIP_MASK (3 << 14)
+/* Preview path only */
+#define CITRGFMT_ROT90_PR (1 << 13)
+#define CITRGFMT_TARGETVSIZE(x) ((x) << 0)
+#define CITRGFMT_TARGETSIZE_MASK ((0x1fff << 16) | 0x1fff)
+
+/* CICOCTRL, CIPRCTRL. Output DMA control. */
+#define S3C_CAMIF_REG_CICTRL(id, _offs) (0x4c + (id) * (0x34 + (_offs)))
+#define CICTRL_BURST_MASK (0xfffff << 4)
+/* xBURSTn - 5-bits width */
+#define CICTRL_YBURST1(x) ((x) << 19)
+#define CICTRL_YBURST2(x) ((x) << 14)
+#define CICTRL_RGBBURST1(x) ((x) << 19)
+#define CICTRL_RGBBURST2(x) ((x) << 14)
+#define CICTRL_CBURST1(x) ((x) << 9)
+#define CICTRL_CBURST2(x) ((x) << 4)
+#define CICTRL_LASTIRQ_ENABLE (1 << 2)
+#define CICTRL_ORDER422_MASK (3 << 0)
+
+/* CICOSCPRERATIO, CIPRSCPRERATIO. Pre-scaler control 1. */
+#define S3C_CAMIF_REG_CISCPRERATIO(id, _offs) (0x50 + (id) * (0x34 + (_offs)))
+
+/* CICOSCPREDST, CIPRSCPREDST. Pre-scaler control 2. */
+#define S3C_CAMIF_REG_CISCPREDST(id, _offs) (0x54 + (id) * (0x34 + (_offs)))
+
+/* CICOSCCTRL, CIPRSCCTRL. Main scaler control. */
+#define S3C_CAMIF_REG_CISCCTRL(id, _offs) (0x58 + (id) * (0x34 + (_offs)))
+#define CISCCTRL_SCALERBYPASS (1 << 31)
+/* s3c244x preview path only, s3c64xx both */
+#define CIPRSCCTRL_SAMPLE (1 << 31)
+/* 0 - 16-bit RGB, 1 - 24-bit RGB */
+#define CIPRSCCTRL_RGB_FORMAT_24BIT (1 << 30) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_H (1 << 29) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_V (1 << 28) /* only for s3c244x */
+/* s3c64xx */
+#define CISCCTRL_SCALEUP_H (1 << 30)
+#define CISCCTRL_SCALEUP_V (1 << 29)
+#define CISCCTRL_SCALEUP_MASK (0x3 << 29)
+#define CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define CISCCTRL_INTERLACE (1 << 25)
+#define CISCCTRL_SCALERSTART (1 << 15)
+#define CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define CISCCTRL_ONE2ONE (1 << 9)
+#define CISCCTRL_MAIN_RATIO_MASK (0x1ff << 16 | 0x1ff)
+
+/* CICOTAREA, CIPRTAREA. Target area for DMA (Hsize x Vsize). */
+#define S3C_CAMIF_REG_CITAREA(id, _offs) (0x5c + (id) * (0x34 + (_offs)))
+#define CITAREA_MASK 0xfffffff
+
+/* Codec (id = 0) or preview (id = 1) path status. */
+#define S3C_CAMIF_REG_CISTATUS(id, _offs) (0x64 + (id) * (0x34 + (_offs)))
+#define CISTATUS_OVFIY_STATUS (1 << 31)
+#define CISTATUS_OVFICB_STATUS (1 << 30)
+#define CISTATUS_OVFICR_STATUS (1 << 29)
+#define CISTATUS_OVF_MASK (0x7 << 29)
+#define CIPRSTATUS_OVF_MASK (0x3 << 30)
+#define CISTATUS_VSYNC_STATUS (1 << 28)
+#define CISTATUS_FRAMECNT_MASK (3 << 26)
+#define CISTATUS_FRAMECNT(__reg) (((__reg) >> 26) & 0x3)
+#define CISTATUS_WINOFSTEN_STATUS (1 << 25)
+#define CISTATUS_IMGCPTEN_STATUS (1 << 22)
+#define CISTATUS_IMGCPTENSC_STATUS (1 << 21)
+#define CISTATUS_VSYNC_A_STATUS (1 << 20)
+#define CISTATUS_FRAMEEND_STATUS (1 << 19) /* 17 on s3c64xx */
+
+/* Image capture enable */
+#define S3C_CAMIF_REG_CIIMGCPT(_offs) (0xa0 + (_offs))
+#define CIIMGCPT_IMGCPTEN (1 << 31)
+#define CIIMGCPT_IMGCPTEN_SC(id) (1 << (30 - (id)))
+/* Frame control: 1 - one-shot, 0 - free run */
+#define CIIMGCPT_CPT_FREN_ENABLE(id) (1 << (25 - (id)))
+#define CIIMGCPT_CPT_FRMOD_ENABLE (0 << 18)
+#define CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Capture sequence */
+#define S3C_CAMIF_REG_CICPTSEQ 0xc4
+
+/* Image effects */
+#define S3C_CAMIF_REG_CIIMGEFF(_offs) (0xb0 + (_offs))
+#define CIIMGEFF_IE_ENABLE(id) (1 << (30 + (id)))
+#define CIIMGEFF_IE_ENABLE_MASK (3 << 30)
+/* Image effect: 1 - after scaler, 0 - before scaler */
+#define CIIMGEFF_IE_AFTER_SC (1 << 29)
+#define CIIMGEFF_FIN_MASK (7 << 26)
+#define CIIMGEFF_FIN_BYPASS (0 << 26)
+#define CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+#define CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define CIIMGEFF_PAT_CR(x) (x)
+
+/* MSCOY0SA, MSPRY0SA. Y/Cb/Cr frame start address for input DMA. */
+#define S3C_CAMIF_REG_MSY0SA(id) (0xd4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0SA(id) (0xd8 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0SA(id) (0xdc + ((id) * 0x2c))
+
+/* MSCOY0END, MSCOY0END. Y/Cb/Cr frame end address for input DMA. */
+#define S3C_CAMIF_REG_MSY0END(id) (0xe0 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0END(id) (0xe4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0END(id) (0xe8 + ((id) * 0x2c))
+
+/* MSPRYOFF, MSPRYOFF. Y/Cb/Cr offset. n: 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSYOFF(id) (0x118 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCBOFF(id) (0x11c + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCROFF(id) (0x120 + ((id) * 0x2c))
+
+/* Real input DMA data size. n = 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSWIDTH(id) (0xf8 + (id) * 0x2c)
+#define AUTOLOAD_ENABLE (1 << 31)
+#define ADDR_CH_DIS (1 << 30)
+#define MSHEIGHT(x) (((x) & 0x3ff) << 16)
+#define MSWIDTH(x) ((x) & 0x3ff)
+
+/* Input DMA control. n = 0 - codec, 1 - preview */
+#define S3C_CAMIF_REG_MSCTRL(id) (0xfc + (id) * 0x2c)
+#define MSCTRL_ORDER422_M_YCBYCR (0 << 4)
+#define MSCTRL_ORDER422_M_YCRYCB (1 << 4)
+#define MSCTRL_ORDER422_M_CBYCRY (2 << 4)
+#define MSCTRL_ORDER422_M_CRYCBY (3 << 4)
+/* 0 - camera, 1 - DMA */
+#define MSCTRL_SEL_DMA_CAM (1 << 3)
+#define MSCTRL_INFORMAT_M_YCBCR420 (0 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422 (1 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422I (2 << 1)
+#define MSCTRL_INFORMAT_M_RGB (3 << 1)
+#define MSCTRL_ENVID_M (1 << 0)
+
+/* CICOSCOSY, CIPRSCOSY. Scan line Y/Cb/Cr offset. */
+#define S3C_CAMIF_REG_CISSY(id) (0x12c + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCB(id) (0x130 + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCR(id) (0x134 + (id) * 0x0c)
+#define S3C_CISS_OFFS_INITIAL(x) ((x) << 16)
+#define S3C_CISS_OFFS_LINE(x) ((x) << 0)
+
+/* ------------------------------------------------------------------ */
+
+void camif_hw_reset(struct camif_dev *camif);
+void camif_hw_clear_pending_irq(struct camif_vp *vp);
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp);
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable);
+void camif_hw_set_input_path(struct camif_vp *vp);
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on);
+void camif_hw_enable_capture(struct camif_vp *vp);
+void camif_hw_disable_capture(struct camif_vp *vp);
+void camif_hw_set_camera_bus(struct camif_dev *camif);
+void camif_hw_set_source_format(struct camif_dev *camif);
+void camif_hw_set_camera_crop(struct camif_dev *camif);
+void camif_hw_set_scaler(struct camif_vp *vp);
+void camif_hw_set_flip(struct camif_vp *vp);
+void camif_hw_set_output_dma(struct camif_vp *vp);
+void camif_hw_set_target_format(struct camif_vp *vp);
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern);
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb);
+void camif_hw_set_output_addr(struct camif_vp *vp, struct camif_addr *paddr,
+ int index);
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label);
+
+static inline u32 camif_hw_get_status(struct camif_vp *vp)
+{
+ return readl(vp->camif->io_base + S3C_CAMIF_REG_CISTATUS(vp->id,
+ vp->offset));
+}
+
+#endif /* CAMIF_REGS_H_ */
diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c
index 891ee873c62..fdb6740248a 100644
--- a/drivers/media/platform/s5p-fimc/fimc-capture.c
+++ b/drivers/media/platform/s5p-fimc/fimc-capture.c
@@ -1230,6 +1230,14 @@ static int fimc_cap_qbuf(struct file *file, void *priv,
return vb2_qbuf(&fimc->vid_cap.vbq, buf);
}
+static int fimc_cap_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ return vb2_expbuf(&fimc->vid_cap.vbq, eb);
+}
+
static int fimc_cap_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
@@ -1354,6 +1362,7 @@ static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_qbuf = fimc_cap_qbuf,
.vidioc_dqbuf = fimc_cap_dqbuf,
+ .vidioc_expbuf = fimc_cap_expbuf,
.vidioc_prepare_buf = fimc_cap_prepare_buf,
.vidioc_create_bufs = fimc_cap_create_bufs,
@@ -1729,7 +1738,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
q = &fimc->vid_cap.vbq;
memset(q, 0, sizeof(*q));
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->drv_priv = fimc->vid_cap.ctx;
q->ops = &fimc_capture_qops;
q->mem_ops = &vb2_dma_contig_memops;
diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c
index 62afed3162e..1d21da4bd24 100644
--- a/drivers/media/platform/s5p-fimc/fimc-m2m.c
+++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c
@@ -105,7 +105,7 @@ static void fimc_device_run(void *priv)
struct fimc_frame *sf, *df;
struct fimc_dev *fimc;
unsigned long flags;
- u32 ret;
+ int ret;
if (WARN(!ctx, "Null context\n"))
return;
@@ -439,6 +439,15 @@ static int fimc_m2m_dqbuf(struct file *file, void *fh,
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
+static int fimc_m2m_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
+
static int fimc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
@@ -607,6 +616,7 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querybuf = fimc_m2m_querybuf,
.vidioc_qbuf = fimc_m2m_qbuf,
.vidioc_dqbuf = fimc_m2m_dqbuf,
+ .vidioc_expbuf = fimc_m2m_expbuf,
.vidioc_streamon = fimc_m2m_streamon,
.vidioc_streamoff = fimc_m2m_streamoff,
.vidioc_g_crop = fimc_m2m_g_crop,
@@ -622,7 +632,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &fimc_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
@@ -633,7 +643,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &fimc_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index 0531ab70a94..1bd5678cfeb 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -213,7 +213,7 @@ static int fimc_pipeline_close(struct fimc_pipeline *p)
* @pipeline: video pipeline structure
* @on: passed as the s_stream call argument
*/
-int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
+static int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
{
int i, ret;
@@ -547,7 +547,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (ret)
break;
- v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
source->name, flags ? '=' : '-', sink->name);
if (flags == 0 || sensor == NULL)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index eb6a70b0f82..6dad9a74f61 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -636,6 +636,19 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EINVAL;
}
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
/* Stream on */
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
@@ -813,6 +826,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_crop = vidioc_g_crop,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2af6d522f4a..f92f6ddd739 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1165,6 +1165,19 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return ret;
}
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
/* Stream on */
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
@@ -1542,7 +1555,7 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
}
static int vidioc_subscribe_event(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
+ const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_EOS:
@@ -1568,6 +1581,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_s_parm = vidioc_s_parm,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 367db755228..2895333866f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -28,7 +28,7 @@ static struct s5p_mfc_pm *pm;
static struct s5p_mfc_dev *p_dev;
#ifdef CLK_DEBUG
-atomic_t clk_ref;
+static atomic_t clk_ref;
#endif
int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 0c1cd895ff6..7379e77bf4e 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -19,7 +19,6 @@
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
@@ -698,6 +697,15 @@ static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
}
+static int mxr_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_expbuf(&layer->vb_queue, eb);
+}
+
static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct mxr_layer *layer = video_drvdata(file);
@@ -725,6 +733,7 @@ static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
.vidioc_querybuf = mxr_querybuf,
.vidioc_qbuf = mxr_qbuf,
.vidioc_dqbuf = mxr_dqbuf,
+ .vidioc_expbuf = mxr_expbuf,
/* Streaming control */
.vidioc_streamon = mxr_streamon,
.vidioc_streamoff = mxr_streamoff,
@@ -1093,7 +1102,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
layer->vb_queue = (struct vb2_queue) {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
- .io_modes = VB2_MMAP | VB2_USERPTR,
+ .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
.drv_priv = layer,
.buf_struct_size = sizeof(struct mxr_buffer),
.ops = &mxr_video_qops,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 9afe1e7bde7..cb6791e62bd 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -19,6 +19,7 @@ config MX1_VIDEO
config VIDEO_MX1
tristate "i.MX1/i.MXL CMOS Sensor Interface driver"
+ depends on BROKEN
depends on VIDEO_DEV && ARCH_MX1 && SOC_CAMERA
select FIQ
select VIDEOBUF_DMA_CONTIG
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index d3cfe65c049..39a77f0b886 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -34,7 +34,7 @@
#include <media/videobuf-dma-contig.h>
#include <media/videobuf-dma-sg.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#define DRIVER_NAME "omap1-camera"
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index d3f0b84e2d7..4e3735679f1 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -645,11 +645,17 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct soc_camera_device *icd = file->private_data;
- int err = -EINVAL;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ dev_dbg(icd->pdev, "read called, buf %p\n", buf);
+
+ if (ici->ops->init_videobuf2 && icd->vb2_vidq.io_modes & VB2_READ)
+ return vb2_read(&icd->vb2_vidq, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
dev_err(icd->pdev, "camera device read not implemented\n");
- return err;
+ return -EINVAL;
}
static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1048,10 +1054,8 @@ static void scan_add_host(struct soc_camera_host *ici)
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
- int ret;
-
icd->parent = ici->v4l2_dev.dev;
- ret = soc_camera_probe(icd);
+ soc_camera_probe(icd);
}
}
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index b366b050a3d..0d59b9db83c 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -39,7 +39,6 @@
/* Wake up at about 30 fps */
#define WAKE_NUMERATOR 30
#define WAKE_DENOMINATOR 1001
-#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1200
@@ -352,11 +351,6 @@ static void precalculate_bars(struct vivi_dev *dev)
}
}
-#define TSTAMP_MIN_Y 24
-#define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15)
-#define TSTAMP_INPUT_X 10
-#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-
/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
{
@@ -1308,7 +1302,7 @@ static int __init vivi_create_instance(int inst)
/* initialize queue */
q = &dev->vb_vidq;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivi_buffer);
q->ops = &vivi_video_qops;
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 12c70e876f5..a739ad492e7 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -82,7 +82,7 @@ static struct radio_isa_card *rtrack_alloc(void)
#define AIMS_BIT_VOL_UP (1 << 6) /* active low */
#define AIMS_BIT_VOL_DN (1 << 7) /* active low */
-void rtrack_set_pins(void *handle, u8 pins)
+static void rtrack_set_pins(void *handle, u8 pins)
{
struct radio_isa_card *isa = handle;
struct rtrack *rt = container_of(isa, struct rtrack, isa);
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 697a421c994..643d80ac28f 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -645,7 +645,8 @@ static int __init cadet_init(void)
set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
- if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
+ res = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr);
+ if (res < 0)
goto err_hdl;
v4l2_info(v4l2_dev, "ADS Cadet Radio Card at 0x%x\n", dev->io);
return 0;
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 3c0067de432..84b7b9f4385 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -191,7 +191,7 @@ static bool radio_isa_valid_io(const struct radio_isa_driver *drv, int io)
return false;
}
-struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
+static struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
struct device *pdev)
{
struct v4l2_device *v4l2_dev;
@@ -207,8 +207,9 @@ struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
return isa;
}
-int radio_isa_common_probe(struct radio_isa_card *isa, struct device *pdev,
- int radio_nr, unsigned region_size)
+static int radio_isa_common_probe(struct radio_isa_card *isa,
+ struct device *pdev,
+ int radio_nr, unsigned region_size)
{
const struct radio_isa_driver *drv = isa->drv;
const struct radio_isa_ops *ops = drv->ops;
@@ -287,7 +288,8 @@ err_dev_reg:
return res;
}
-int radio_isa_common_remove(struct radio_isa_card *isa, unsigned region_size)
+static int radio_isa_common_remove(struct radio_isa_card *isa,
+ unsigned region_size)
{
const struct radio_isa_ops *ops = isa->drv->ops;
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 227dcdb54df..c260a2a354b 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -64,7 +64,7 @@ bool pnp_attached;
#define FMI_BIT_VOL_SW (1 << 3)
#define FMI_BIT_TUN_STRQ (1 << 4)
-void fmi_set_pins(void *handle, u8 pins)
+static void fmi_set_pins(void *handle, u8 pins)
{
struct fmi *fmi = handle;
u8 bits = FMI_BIT_TUN_STRQ;
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index d0c90531007..36aec575e0e 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -145,7 +145,7 @@ struct tea5764_device {
};
/* I2C code related */
-int tea5764_i2c_read(struct tea5764_device *radio)
+static int tea5764_i2c_read(struct tea5764_device *radio)
{
int i;
u16 *p = (u16 *) &radio->regs;
@@ -165,7 +165,7 @@ int tea5764_i2c_read(struct tea5764_device *radio)
return 0;
}
-int tea5764_i2c_write(struct tea5764_device *radio)
+static int tea5764_i2c_write(struct tea5764_device *radio)
{
struct tea5764_write_regs wr;
struct tea5764_regs *r = &radio->regs;
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index e3079c142c5..bd61b3bd0ca 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1769,7 +1769,7 @@ exit:
}
/* si4713_ioctl - deal with private ioctls (only rnl for now) */
-long si4713_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+static long si4713_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct si4713_device *sdev = to_si4713_device(sd);
struct si4713_rnl *rnl = arg;
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index d84ad9dad32..aac0f025f76 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -60,7 +60,7 @@
#define fmdbg(format, ...) \
printk(KERN_DEBUG "fmdrv: " format, ## __VA_ARGS__)
#else /* DEBUG */
-#define fmdbg(format, ...)
+#define fmdbg(format, ...) do {} while(0)
#endif
enum {
FM_MODE_OFF,
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index bf867a6b5ea..602ef7ac8c2 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -742,7 +742,7 @@ static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
break;
- if (blk_idx < FM_RDS_BLK_IDX_A || blk_idx > FM_RDS_BLK_IDX_D) {
+ if (blk_idx > FM_RDS_BLK_IDX_D) {
fmdbg("Block sequence mismatch\n");
rds->last_blk_idx = -1;
break;
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index 3dd9fc097c4..ebf09a3927d 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -305,7 +305,7 @@ int fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
- if (vol_to_set < FM_RX_VOLUME_MIN || vol_to_set > FM_RX_VOLUME_MAX) {
+ if (vol_to_set > FM_RX_VOLUME_MAX) {
fmerr("Volume is not within(%d-%d) range\n",
FM_RX_VOLUME_MIN, FM_RX_VOLUME_MAX);
return -EINVAL;
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 49bb356ed14..2d6fb26a017 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -784,7 +784,7 @@ static void ati_remote_rc_init(struct ati_remote *ati_remote)
rdev->priv = ati_remote;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_TYPE_OTHER;
+ rdev->allowed_protos = RC_BIT_OTHER;
rdev->driver_name = "ati_remote";
rdev->open = ati_remote_rc_open;
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index d05ac15b5de..22231dd4f62 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -329,7 +329,7 @@ static int ene_rx_get_sample_reg(struct ene_device *dev)
}
/* Sense current received carrier */
-void ene_rx_sense_carrier(struct ene_device *dev)
+static void ene_rx_sense_carrier(struct ene_device *dev)
{
DEFINE_IR_RAW_EVENT(ev);
@@ -1003,7 +1003,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
rdev = rc_allocate_device();
if (!dev || !rdev)
- goto error1;
+ goto failure;
/* validate resources */
error = -ENODEV;
@@ -1014,10 +1014,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
- goto error;
+ goto failure;
if (!pnp_irq_valid(pnp_dev, 0))
- goto error;
+ goto failure;
spin_lock_init(&dev->hw_lock);
@@ -1033,7 +1033,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
- goto error;
+ goto failure;
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
@@ -1046,7 +1046,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
learning_mode_force = false;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->priv = dev;
rdev->open = ene_open;
rdev->close = ene_close;
@@ -1078,30 +1078,27 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* claim the resources */
error = -EBUSY;
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
- dev->hw_io = -1;
- dev->irq = -1;
- goto error;
+ goto failure;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
- dev->irq = -1;
- goto error;
+ goto failure2;
}
error = rc_register_device(rdev);
if (error < 0)
- goto error;
+ goto failure3;
pr_notice("driver has been successfully loaded\n");
return 0;
-error:
- if (dev && dev->irq >= 0)
- free_irq(dev->irq, dev);
- if (dev && dev->hw_io >= 0)
- release_region(dev->hw_io, ENE_IO_SIZE);
-error1:
+
+failure3:
+ free_irq(dev->irq, dev);
+failure2:
+ release_region(dev->hw_io, ENE_IO_SIZE);
+failure:
rc_free_device(rdev);
kfree(dev);
return error;
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 52fd7696b1b..936c3f79b62 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -541,7 +541,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* Set up the rc device */
rdev->priv = fintek;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = fintek_open;
rdev->close = fintek_close;
rdev->input_name = FINTEK_DESCRIPTION;
@@ -684,12 +684,12 @@ static struct pnp_driver fintek_driver = {
.shutdown = fintek_shutdown,
};
-int fintek_init(void)
+static int fintek_init(void)
{
return pnp_register_driver(&fintek_driver);
}
-void fintek_exit(void)
+static void fintek_exit(void)
{
pnp_unregister_driver(&fintek_driver);
}
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 04cb272db16..ba1a1eb356c 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -95,7 +95,7 @@ static int __devinit gpio_ir_recv_probe(struct platform_device *pdev)
if (pdata->allowed_protos)
rcdev->allowed_protos = pdata->allowed_protos;
else
- rcdev->allowed_protos = RC_TYPE_ALL;
+ rcdev->allowed_protos = RC_BIT_ALL;
rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 51d7057aca0..5a9163da63c 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -499,7 +499,7 @@ static int __devinit iguanair_probe(struct usb_interface *intf,
usb_to_input_id(ir->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->priv = ir;
rc->open = iguanair_open;
rc->close = iguanair_close;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 5dd0386604f..8f6a28921ed 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1001,7 +1001,7 @@ static void imon_touch_display_timeout(unsigned long data)
* it is not, so we must acquire it prior to calling send_packet, which
* requires that the lock is held.
*/
-static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
+static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
int retval;
struct imon_context *ictx = rc->priv;
@@ -1010,31 +1010,27 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
- if (rc_type && !(rc_type & rc->allowed_protos))
+ if (*rc_type && !(*rc_type & rc->allowed_protos))
dev_warn(dev, "Looks like you're trying to use an IR protocol "
"this device does not support\n");
- switch (rc_type) {
- case RC_TYPE_RC6:
+ if (*rc_type & RC_BIT_RC6_MCE) {
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
- break;
- case RC_TYPE_UNKNOWN:
- case RC_TYPE_OTHER:
+ *rc_type = RC_BIT_RC6_MCE;
+ } else if (*rc_type & RC_BIT_OTHER) {
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
- rc_type = RC_TYPE_OTHER;
- break;
- default:
+ *rc_type = RC_BIT_OTHER;
+ } else {
dev_warn(dev, "Unsupported IR protocol specified, overriding "
"to iMON IR protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
- rc_type = RC_TYPE_OTHER;
- break;
+ *rc_type = RC_BIT_OTHER;
}
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
@@ -1048,7 +1044,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
if (retval)
goto out;
- ictx->rc_type = rc_type;
+ ictx->rc_type = *rc_type;
ictx->pad_mouse = false;
out:
@@ -1323,7 +1319,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
rel_x = buf[2];
rel_y = buf[3];
- if (ictx->rc_type == RC_TYPE_OTHER && pad_stabilize) {
+ if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) {
if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
@@ -1390,7 +1386,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
buf[0] = 0x01;
buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0;
- if (ictx->rc_type == RC_TYPE_OTHER && pad_stabilize) {
+ if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
@@ -1511,7 +1507,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
kc = imon_panel_key_lookup(scancode);
} else {
scancode = be32_to_cpu(*((u32 *)buf));
- if (ictx->rc_type == RC_TYPE_RC6) {
+ if (ictx->rc_type == RC_BIT_RC6_MCE) {
ktype = IMON_KEY_IMON;
if (buf[0] == 0x80)
ktype = IMON_KEY_MCE;
@@ -1744,7 +1740,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
{
u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
- u64 allowed_protos = RC_TYPE_OTHER;
+ u64 allowed_protos = RC_BIT_OTHER;
switch (ffdc_cfg_byte) {
/* iMON Knob, no display, iMON IR + vol knob */
@@ -1775,13 +1771,13 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
- allowed_protos = RC_TYPE_RC6;
+ allowed_protos = RC_BIT_RC6_MCE;
break;
/* iMON LCD, MCE IR */
case 0x9f:
dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_LCD;
- allowed_protos = RC_TYPE_RC6;
+ allowed_protos = RC_BIT_RC6_MCE;
break;
default:
dev_info(ictx->dev, "Unknown 0xffdc device, "
@@ -1789,7 +1785,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
detected_display_type = IMON_DISPLAY_TYPE_VFD;
/* We don't know which one it is, allow user to set the
* RC6 one from userspace if OTHER wasn't correct. */
- allowed_protos |= RC_TYPE_RC6;
+ allowed_protos |= RC_BIT_RC6_MCE;
break;
}
@@ -1875,7 +1871,7 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
rdev->priv = ictx;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
+ rdev->allowed_protos = RC_BIT_OTHER | RC_BIT_RC6_MCE; /* iMON PAD or MCE */
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
@@ -1893,7 +1889,7 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
imon_set_display_type(ictx);
- if (ictx->rc_type == RC_TYPE_RC6)
+ if (ictx->rc_type == RC_BIT_RC6_MCE)
rdev->map_name = RC_MAP_IMON_MCE;
else
rdev->map_name = RC_MAP_IMON_PAD;
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 035668e27f6..69edffb9fe9 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -47,7 +47,7 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct jvc_dec *data = &dev->raw->jvc;
- if (!(dev->raw->enabled_protocols & RC_TYPE_JVC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_JVC))
return 0;
if (!is_timing_event(ev)) {
@@ -174,7 +174,7 @@ out:
}
static struct ir_raw_handler jvc_handler = {
- .protocols = RC_TYPE_JVC,
+ .protocols = RC_BIT_JVC,
.decode = ir_jvc_decode,
};
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 870c93052fd..9945e5e7f61 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,7 +35,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!(dev->raw->enabled_protocols & RC_TYPE_LIRC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_LIRC))
return 0;
if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
@@ -408,7 +408,7 @@ static int ir_lirc_unregister(struct rc_dev *dev)
}
static struct ir_raw_handler lirc_handler = {
- .protocols = RC_TYPE_LIRC,
+ .protocols = RC_BIT_LIRC,
.decode = ir_lirc_decode,
.raw_register = ir_lirc_register,
.raw_unregister = ir_lirc_unregister,
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 3784ebf80ec..33fafa4cf7c 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -216,7 +216,7 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
unsigned long delay;
- if (!(dev->raw->enabled_protocols & RC_TYPE_MCE_KBD))
+ if (!(dev->raw->enabled_protocols & RC_BIT_MCE_KBD))
return 0;
if (!is_timing_event(ev)) {
@@ -422,7 +422,7 @@ static int ir_mce_kbd_unregister(struct rc_dev *dev)
}
static struct ir_raw_handler mce_kbd_handler = {
- .protocols = RC_TYPE_MCE_KBD,
+ .protocols = RC_BIT_MCE_KBD,
.decode = ir_mce_kbd_decode,
.raw_register = ir_mce_kbd_register,
.raw_unregister = ir_mce_kbd_unregister,
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 2ca509e6e16..a47ee363496 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -52,7 +52,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 address, not_address, command, not_command;
bool send_32bits = false;
- if (!(dev->raw->enabled_protocols & RC_TYPE_NEC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_NEC))
return 0;
if (!is_timing_event(ev)) {
@@ -201,7 +201,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
static struct ir_raw_handler nec_handler = {
- .protocols = RC_TYPE_NEC,
+ .protocols = RC_BIT_NEC,
.decode = ir_nec_decode,
};
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 9ab663a507a..5b4d1ddeac4 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -52,8 +52,8 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle;
u32 scancode;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC5))
- return 0;
+ if (!(dev->raw->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X)))
+ return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
@@ -128,6 +128,10 @@ again:
if (data->wanted_bits == RC5X_NBITS) {
/* RC5X */
u8 xdata, command, system;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5X)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
xdata = (data->bits & 0x0003F) >> 0;
command = (data->bits & 0x00FC0) >> 6;
system = (data->bits & 0x1F000) >> 12;
@@ -141,6 +145,10 @@ again:
} else {
/* RC5 */
u8 command, system;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
command = (data->bits & 0x0003F) >> 0;
system = (data->bits & 0x007C0) >> 6;
toggle = (data->bits & 0x00800) ? 1 : 0;
@@ -164,7 +172,7 @@ out:
}
static struct ir_raw_handler rc5_handler = {
- .protocols = RC_TYPE_RC5,
+ .protocols = RC_BIT_RC5 | RC_BIT_RC5X,
.decode = ir_rc5_decode,
};
diff --git a/drivers/media/rc/ir-rc5-sz-decoder.c b/drivers/media/rc/ir-rc5-sz-decoder.c
index ec8d4a2e2c5..fd807a8308d 100644
--- a/drivers/media/rc/ir-rc5-sz-decoder.c
+++ b/drivers/media/rc/ir-rc5-sz-decoder.c
@@ -48,8 +48,8 @@ static int ir_rc5_sz_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle, command, system;
u32 scancode;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC5_SZ))
- return 0;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5_SZ))
+ return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
@@ -128,7 +128,7 @@ out:
}
static struct ir_raw_handler rc5_sz_handler = {
- .protocols = RC_TYPE_RC5_SZ,
+ .protocols = RC_BIT_RC5_SZ,
.decode = ir_rc5_sz_decode,
};
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 4cfdd7fa4bb..e19072ffb36 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -89,7 +89,9 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 toggle;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC6))
+ if (!(dev->raw->enabled_protocols &
+ (RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
+ RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)))
return 0;
if (!is_timing_event(ev)) {
@@ -271,7 +273,9 @@ out:
}
static struct ir_raw_handler rc6_handler = {
- .protocols = RC_TYPE_RC6,
+ .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
+ RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
};
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 82e6c1e282d..9e76c7b40af 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -479,18 +479,7 @@ struct platform_driver lirc_rx51_platform_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init lirc_rx51_init(void)
-{
- return platform_driver_register(&lirc_rx51_platform_driver);
-}
-module_init(lirc_rx51_init);
-
-static void __exit lirc_rx51_exit(void)
-{
- platform_driver_unregister(&lirc_rx51_platform_driver);
-}
-module_exit(lirc_rx51_exit);
+module_platform_driver(lirc_rx51_platform_driver);
MODULE_DESCRIPTION("LIRC TX driver for Nokia RX51");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 7e54ec57bcf..7e69a3b6537 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -58,7 +58,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 address, command, not_command;
- if (!(dev->raw->enabled_protocols & RC_TYPE_SANYO))
+ if (!(dev->raw->enabled_protocols & RC_BIT_SANYO))
return 0;
if (!is_timing_event(ev)) {
@@ -179,7 +179,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
static struct ir_raw_handler sanyo_handler = {
- .protocols = RC_TYPE_SANYO,
+ .protocols = RC_BIT_SANYO,
.decode = ir_sanyo_decode,
};
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index dab98b37621..fb914342cf4 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -45,7 +45,8 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 device, subdevice, function;
- if (!(dev->raw->enabled_protocols & RC_TYPE_SONY))
+ if (!(dev->raw->enabled_protocols &
+ (RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20)))
return 0;
if (!is_timing_event(ev)) {
@@ -123,16 +124,28 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
switch (data->count) {
case 12:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY12)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits << 3) & 0xF8);
subdevice = 0;
function = bitrev8((data->bits >> 4) & 0xFE);
break;
case 15:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY15)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits >> 0) & 0xFF);
subdevice = 0;
function = bitrev8((data->bits >> 7) & 0xFE);
break;
case 20:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY20)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits >> 5) & 0xF8);
subdevice = bitrev8((data->bits >> 0) & 0xFF);
function = bitrev8((data->bits >> 12) & 0xFE);
@@ -157,7 +170,7 @@ out:
}
static struct ir_raw_handler sony_handler = {
- .protocols = RC_TYPE_SONY,
+ .protocols = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20,
.decode = ir_sony_decode,
};
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 24c77a42fc3..5e5a7f2b818 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1563,7 +1563,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* set up ir-core props */
rdev->priv = itdev;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = ite_open;
rdev->close = ite_close;
rdev->s_idle = ite_s_idle;
@@ -1708,12 +1708,12 @@ static struct pnp_driver ite_driver = {
.shutdown = ite_shutdown,
};
-int ite_init(void)
+static int ite_init(void)
{
return pnp_register_driver(&ite_driver);
}
-void ite_exit(void)
+static void ite_exit(void)
{
pnp_unregister_driver(&ite_driver);
}
diff --git a/drivers/media/rc/keymaps/rc-imon-mce.c b/drivers/media/rc/keymaps/rc-imon-mce.c
index 124c7228ba8..f0da960560b 100644
--- a/drivers/media/rc/keymaps/rc-imon-mce.c
+++ b/drivers/media/rc/keymaps/rc-imon-mce.c
@@ -121,7 +121,7 @@ static struct rc_map_list imon_mce_map = {
.scan = imon_mce,
.size = ARRAY_SIZE(imon_mce),
/* its RC6, but w/a hardware decoder */
- .rc_type = RC_TYPE_RC6,
+ .rc_type = RC_TYPE_RC6_MCE,
.name = RC_MAP_IMON_MCE,
}
};
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 753e43ec787..ef4006fe4de 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -97,7 +97,7 @@ static struct rc_map_list rc6_mce_map = {
.map = {
.scan = rc6_mce,
.size = ARRAY_SIZE(rc6_mce),
- .rc_type = RC_TYPE_RC6,
+ .rc_type = RC_TYPE_RC6_MCE,
.name = RC_MAP_RC6_MCE,
}
};
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 850547fe711..b2146cd99fd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1205,7 +1205,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = MS_TO_NS(100);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 2ea913a44ae..e4ea89a11ee 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -472,6 +472,7 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
}
+#if 0 /* Currently unused */
/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
{
@@ -504,7 +505,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
return carrier;
}
-
+#endif
/*
* set carrier frequency
*
@@ -620,7 +621,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
{
DEFINE_IR_RAW_EVENT(rawir);
- u32 carrier;
u8 sample;
int i;
@@ -629,9 +629,6 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
if (debug)
nvt_dump_rx_buf(nvt);
- if (nvt->carrier_detect_enabled)
- carrier = nvt_rx_carrier_detect(nvt);
-
nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
init_ir_raw_event(&rawir);
@@ -1045,7 +1042,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
@@ -1220,12 +1217,12 @@ static struct pnp_driver nvt_driver = {
.shutdown = nvt_shutdown,
};
-int nvt_init(void)
+static int nvt_init(void)
{
return pnp_register_driver(&nvt_driver);
}
-void nvt_exit(void)
+static void nvt_exit(void)
{
pnp_unregister_driver(&nvt_driver);
}
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 0d5e0872a2e..7c3674ff5ea 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -103,7 +103,6 @@ struct nvt_dev {
/* rx settings */
bool learning_enabled;
- bool carrier_detect_enabled;
/* track cir wake state */
u8 wake_state;
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index f9be68132c6..53d02827a47 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -195,7 +195,7 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
rc->max_timeout = UINT_MAX;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index cabc19c1051..601d1ac1c68 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -725,25 +725,36 @@ static struct class ir_input_class = {
.devnode = ir_devnode,
};
+/*
+ * These are the protocol textual descriptions that are
+ * used by the sysfs protocols file. Note that the order
+ * of the entries is relevant.
+ */
static struct {
u64 type;
char *name;
} proto_names[] = {
- { RC_TYPE_UNKNOWN, "unknown" },
- { RC_TYPE_RC5, "rc-5" },
- { RC_TYPE_NEC, "nec" },
- { RC_TYPE_RC6, "rc-6" },
- { RC_TYPE_JVC, "jvc" },
- { RC_TYPE_SONY, "sony" },
- { RC_TYPE_RC5_SZ, "rc-5-sz" },
- { RC_TYPE_SANYO, "sanyo" },
- { RC_TYPE_MCE_KBD, "mce_kbd" },
- { RC_TYPE_LIRC, "lirc" },
- { RC_TYPE_OTHER, "other" },
+ { RC_BIT_NONE, "none" },
+ { RC_BIT_OTHER, "other" },
+ { RC_BIT_UNKNOWN, "unknown" },
+ { RC_BIT_RC5 |
+ RC_BIT_RC5X, "rc-5" },
+ { RC_BIT_NEC, "nec" },
+ { RC_BIT_RC6_0 |
+ RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 |
+ RC_BIT_RC6_6A_32 |
+ RC_BIT_RC6_MCE, "rc-6" },
+ { RC_BIT_JVC, "jvc" },
+ { RC_BIT_SONY12 |
+ RC_BIT_SONY15 |
+ RC_BIT_SONY20, "sony" },
+ { RC_BIT_RC5_SZ, "rc-5-sz" },
+ { RC_BIT_SANYO, "sanyo" },
+ { RC_BIT_MCE_KBD, "mce_kbd" },
+ { RC_BIT_LIRC, "lirc" },
};
-#define PROTO_NONE "none"
-
/**
* show_protocols() - shows the current IR protocol(s)
* @device: the device descriptor
@@ -790,6 +801,9 @@ static ssize_t show_protocols(struct device *device,
tmp += sprintf(tmp, "[%s] ", proto_names[i].name);
else if (allowed & proto_names[i].type)
tmp += sprintf(tmp, "%s ", proto_names[i].name);
+
+ if (allowed & proto_names[i].type)
+ allowed &= ~proto_names[i].type;
}
if (tmp != buf)
@@ -867,26 +881,20 @@ static ssize_t store_protocols(struct device *device,
disable = false;
}
- if (!enable && !disable && !strncasecmp(tmp, PROTO_NONE, sizeof(PROTO_NONE))) {
- tmp += sizeof(PROTO_NONE);
- mask = 0;
- count++;
- } else {
- for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
- if (!strcasecmp(tmp, proto_names[i].name)) {
- tmp += strlen(proto_names[i].name);
- mask = proto_names[i].type;
- break;
- }
- }
- if (i == ARRAY_SIZE(proto_names)) {
- IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
- ret = -EINVAL;
- goto out;
+ for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
+ if (!strcasecmp(tmp, proto_names[i].name)) {
+ mask = proto_names[i].type;
+ break;
}
- count++;
}
+ if (i == ARRAY_SIZE(proto_names)) {
+ IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
+ return -EINVAL;
+ }
+
+ count++;
+
if (enable)
type |= mask;
else if (disable)
@@ -902,7 +910,7 @@ static ssize_t store_protocols(struct device *device,
}
if (dev->change_protocol) {
- rc = dev->change_protocol(dev, type);
+ rc = dev->change_protocol(dev, &type);
if (rc < 0) {
IR_dprintk(1, "Error setting protocols to 0x%llx\n",
(long long)type);
@@ -1117,7 +1125,8 @@ int rc_register_device(struct rc_dev *dev)
}
if (dev->change_protocol) {
- rc = dev->change_protocol(dev, rc_map->rc_type);
+ u64 rc_type = (1 << rc_map->rc_type);
+ rc = dev->change_protocol(dev, &rc_type);
if (rc < 0)
goto out_raw;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 9f5a17bb5ef..a8887aba9fa 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1082,7 +1082,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->dev.parent = dev;
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = US_TO_NS(2750);
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index d6f4bfe0939..c720f12f661 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -322,7 +322,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
rdev->dev.parent = dev;
rdev->priv = sz;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index fef05235234..f0921b5483e 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -316,7 +316,7 @@ static int __devinit ttusbir_probe(struct usb_interface *intf,
usb_to_input_id(tt->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->priv = tt;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_TT_1500;
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 7c9b5f33113..7f3c476dde0 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -7,6 +7,7 @@
* with minor modifications.
*
* Original Author: David Härdeman <david@hardeman.nu>
+ * Copyright (C) 2012 Sean Young <sean@mess.org>
* Copyright (C) 2009 - 2011 David Härdeman <david@hardeman.nu>
*
* Dedicated to my daughter Matilda, without whose loving attention this
@@ -22,9 +23,7 @@
* o IR Receive
* o IR Transmit
* o Wake-On-CIR functionality
- *
- * To do:
- * o Learning
+ * o Carrier detection
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -149,6 +148,12 @@
#define WBCIR_REGSEL_MASK 0x20
/* Starting address of selected register in WBCIR_REG_WCEIR_INDEX */
#define WBCIR_REG_ADDR0 0x00
+/* Enable carrier counter */
+#define WBCIR_CNTR_EN 0x01
+/* Reset carrier counter */
+#define WBCIR_CNTR_R 0x02
+/* Invert TX */
+#define WBCIR_IRTX_INV 0x04
/* Valid banks for the SP3 UART */
enum wbcir_bank {
@@ -184,7 +189,7 @@ enum wbcir_txstate {
};
/* Misc */
-#define WBCIR_NAME "winbond-cir"
+#define WBCIR_NAME "Winbond CIR"
#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */
#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */
#define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */
@@ -207,7 +212,8 @@ struct wbcir_data {
/* RX state */
enum wbcir_rxstate rxstate;
struct led_trigger *rxtrigger;
- struct ir_raw_event rxev;
+ int carrier_report_enabled;
+ u32 pulse_duration;
/* TX state */
enum wbcir_txstate txstate;
@@ -330,6 +336,30 @@ wbcir_to_rc6cells(u8 val)
*****************************************************************************/
static void
+wbcir_carrier_report(struct wbcir_data *data)
+{
+ unsigned counter = inb(data->ebase + WBCIR_REG_ECEIR_CNT_LO) |
+ inb(data->ebase + WBCIR_REG_ECEIR_CNT_HI) << 8;
+
+ if (counter > 0 && counter < 0xffff) {
+ DEFINE_IR_RAW_EVENT(ev);
+
+ ev.carrier_report = 1;
+ ev.carrier = DIV_ROUND_CLOSEST(counter * 1000000u,
+ data->pulse_duration);
+
+ ir_raw_event_store(data->dev, &ev);
+ }
+
+ /* reset and restart the counter */
+ data->pulse_duration = 0;
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_EN,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+}
+
+static void
wbcir_idle_rx(struct rc_dev *dev, bool idle)
{
struct wbcir_data *data = dev->priv;
@@ -339,9 +369,16 @@ wbcir_idle_rx(struct rc_dev *dev, bool idle)
led_trigger_event(data->rxtrigger, LED_FULL);
}
- if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE)
+ if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE) {
+ data->rxstate = WBCIR_RXSTATE_INACTIVE;
+ led_trigger_event(data->rxtrigger, LED_OFF);
+
+ if (data->carrier_report_enabled)
+ wbcir_carrier_report(data);
+
/* Tell hardware to go idle by setting RXINACTIVE */
outb(WBCIR_RX_DISABLE, data->sbase + WBCIR_REG_SP3_ASCR);
+ }
}
static void
@@ -349,21 +386,22 @@ wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
{
u8 irdata;
DEFINE_IR_RAW_EVENT(rawir);
+ unsigned duration;
/* Since RXHDLEV is set, at least 8 bytes are in the FIFO */
while (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_AVAIL) {
irdata = inb(data->sbase + WBCIR_REG_SP3_RXDATA);
if (data->rxstate == WBCIR_RXSTATE_ERROR)
continue;
+
+ duration = ((irdata & 0x7F) + 1) * 2;
rawir.pulse = irdata & 0x80 ? false : true;
- rawir.duration = US_TO_NS(((irdata & 0x7F) + 1) * 10);
- ir_raw_event_store_with_filter(data->dev, &rawir);
- }
+ rawir.duration = US_TO_NS(duration);
- /* Check if we should go idle */
- if (data->dev->idle) {
- led_trigger_event(data->rxtrigger, LED_OFF);
- data->rxstate = WBCIR_RXSTATE_INACTIVE;
+ if (rawir.pulse)
+ data->pulse_duration += duration;
+
+ ir_raw_event_store_with_filter(data->dev, &rawir);
}
ir_raw_event_handle(data->dev);
@@ -492,6 +530,33 @@ wbcir_irq_handler(int irqno, void *cookie)
*****************************************************************************/
static int
+wbcir_set_carrier_report(struct rc_dev *dev, int enable)
+{
+ struct wbcir_data *data = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->spinlock, flags);
+
+ if (data->carrier_report_enabled == enable) {
+ spin_unlock_irqrestore(&data->spinlock, flags);
+ return 0;
+ }
+
+ data->pulse_duration = 0;
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+
+ if (enable && data->dev->idle)
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL,
+ WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R);
+
+ data->carrier_report_enabled = enable;
+ spin_unlock_irqrestore(&data->spinlock, flags);
+
+ return 0;
+}
+
+static int
wbcir_txcarrier(struct rc_dev *dev, u32 carrier)
{
struct wbcir_data *data = dev->priv;
@@ -837,7 +902,7 @@ wbcir_init_hw(struct wbcir_data *data)
/* Set IRTX_INV */
if (invert)
- outb(0x04, data->ebase + WBCIR_REG_ECEIR_CCTL);
+ outb(WBCIR_IRTX_INV, data->ebase + WBCIR_REG_ECEIR_CCTL);
else
outb(0x00, data->ebase + WBCIR_REG_ECEIR_CCTL);
@@ -866,8 +931,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
- /* Set baud divisor to sample every 10 us */
- outb(0x0F, data->sbase + WBCIR_REG_SP3_BGDL);
+ /* Set baud divisor to sample every 2 ns */
+ outb(0x03, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
@@ -876,9 +941,12 @@ wbcir_init_hw(struct wbcir_data *data)
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
- /* Disable RX demod, enable run-length enc/dec, set freq span */
+ /*
+ * Disable RX demod, enable run-length enc/dec, set freq span and
+ * enable over-sampling
+ */
wbcir_select_bank(data, WBCIR_BANK_7);
- outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
+ outb(0xd0, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
@@ -915,9 +983,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* Clear RX state */
data->rxstate = WBCIR_RXSTATE_INACTIVE;
- data->rxev.duration = 0;
ir_raw_event_reset(data->dev);
- ir_raw_event_handle(data->dev);
+ ir_raw_event_set_idle(data->dev, true);
/* Clear TX state */
if (data->txstate == WBCIR_TXSTATE_ACTIVE) {
@@ -1007,7 +1074,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
}
data->dev->driver_type = RC_DRIVER_IR_RAW;
- data->dev->driver_name = WBCIR_NAME;
+ data->dev->driver_name = DRVNAME;
data->dev->input_name = WBCIR_NAME;
data->dev->input_phys = "wbcir/cir0";
data->dev->input_id.bustype = BUS_HOST;
@@ -1016,13 +1083,15 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->input_id.version = WBCIR_ID_CHIP;
data->dev->map_name = RC_MAP_RC6_MCE;
data->dev->s_idle = wbcir_idle_rx;
+ data->dev->s_carrier_report = wbcir_set_carrier_report;
data->dev->s_tx_mask = wbcir_txmask;
data->dev->s_tx_carrier = wbcir_txcarrier;
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
data->dev->timeout = MS_TO_NS(100);
- data->dev->allowed_protos = RC_TYPE_ALL;
+ data->dev->rx_resolution = US_TO_NS(2);
+ data->dev->allowed_protos = RC_BIT_ALL;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index aff39ae457a..81f38aae9c6 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -35,8 +35,6 @@
* Currently it blind writes bunch of static registers from the
* fc2580_freq_regs_lut[] when fc2580_set_params() is called. Add some
* logic to reduce unneeded register writes.
- * There is also don't-care registers, initialized with value 0xff, and those
- * are also written to the chip currently (yes, not wise).
*/
/* write multiple registers */
@@ -111,6 +109,17 @@ static int fc2580_rd_reg(struct fc2580_priv *priv, u8 reg, u8 *val)
return fc2580_rd_regs(priv, reg, val, 1);
}
+/* write single register conditionally only when value differs from 0xff
+ * XXX: This is special routine meant only for writing fc2580_freq_regs_lut[]
+ * values. Do not use for the other purposes. */
+static int fc2580_wr_reg_ff(struct fc2580_priv *priv, u8 reg, u8 val)
+{
+ if (val == 0xff)
+ return 0;
+ else
+ return fc2580_wr_regs(priv, reg, &val, 1);
+}
+
static int fc2580_set_params(struct dvb_frontend *fe)
{
struct fc2580_priv *priv = fe->tuner_priv;
@@ -213,99 +222,99 @@ static int fc2580_set_params(struct dvb_frontend *fe)
if (i == ARRAY_SIZE(fc2580_freq_regs_lut))
goto err;
- ret = fc2580_wr_reg(priv, 0x25, fc2580_freq_regs_lut[i].r25_val);
+ ret = fc2580_wr_reg_ff(priv, 0x25, fc2580_freq_regs_lut[i].r25_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x27, fc2580_freq_regs_lut[i].r27_val);
+ ret = fc2580_wr_reg_ff(priv, 0x27, fc2580_freq_regs_lut[i].r27_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x28, fc2580_freq_regs_lut[i].r28_val);
+ ret = fc2580_wr_reg_ff(priv, 0x28, fc2580_freq_regs_lut[i].r28_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x29, fc2580_freq_regs_lut[i].r29_val);
+ ret = fc2580_wr_reg_ff(priv, 0x29, fc2580_freq_regs_lut[i].r29_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2b, fc2580_freq_regs_lut[i].r2b_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2b, fc2580_freq_regs_lut[i].r2b_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2c, fc2580_freq_regs_lut[i].r2c_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2c, fc2580_freq_regs_lut[i].r2c_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2d, fc2580_freq_regs_lut[i].r2d_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2d, fc2580_freq_regs_lut[i].r2d_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x30, fc2580_freq_regs_lut[i].r30_val);
+ ret = fc2580_wr_reg_ff(priv, 0x30, fc2580_freq_regs_lut[i].r30_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x44, fc2580_freq_regs_lut[i].r44_val);
+ ret = fc2580_wr_reg_ff(priv, 0x44, fc2580_freq_regs_lut[i].r44_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x50, fc2580_freq_regs_lut[i].r50_val);
+ ret = fc2580_wr_reg_ff(priv, 0x50, fc2580_freq_regs_lut[i].r50_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x53, fc2580_freq_regs_lut[i].r53_val);
+ ret = fc2580_wr_reg_ff(priv, 0x53, fc2580_freq_regs_lut[i].r53_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x5f, fc2580_freq_regs_lut[i].r5f_val);
+ ret = fc2580_wr_reg_ff(priv, 0x5f, fc2580_freq_regs_lut[i].r5f_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x61, fc2580_freq_regs_lut[i].r61_val);
+ ret = fc2580_wr_reg_ff(priv, 0x61, fc2580_freq_regs_lut[i].r61_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x62, fc2580_freq_regs_lut[i].r62_val);
+ ret = fc2580_wr_reg_ff(priv, 0x62, fc2580_freq_regs_lut[i].r62_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x63, fc2580_freq_regs_lut[i].r63_val);
+ ret = fc2580_wr_reg_ff(priv, 0x63, fc2580_freq_regs_lut[i].r63_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x67, fc2580_freq_regs_lut[i].r67_val);
+ ret = fc2580_wr_reg_ff(priv, 0x67, fc2580_freq_regs_lut[i].r67_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x68, fc2580_freq_regs_lut[i].r68_val);
+ ret = fc2580_wr_reg_ff(priv, 0x68, fc2580_freq_regs_lut[i].r68_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x69, fc2580_freq_regs_lut[i].r69_val);
+ ret = fc2580_wr_reg_ff(priv, 0x69, fc2580_freq_regs_lut[i].r69_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6a, fc2580_freq_regs_lut[i].r6a_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6a, fc2580_freq_regs_lut[i].r6a_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6b, fc2580_freq_regs_lut[i].r6b_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6b, fc2580_freq_regs_lut[i].r6b_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6c, fc2580_freq_regs_lut[i].r6c_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6c, fc2580_freq_regs_lut[i].r6c_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6d, fc2580_freq_regs_lut[i].r6d_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6d, fc2580_freq_regs_lut[i].r6d_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6e, fc2580_freq_regs_lut[i].r6e_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6e, fc2580_freq_regs_lut[i].r6e_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6f, fc2580_freq_regs_lut[i].r6f_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6f, fc2580_freq_regs_lut[i].r6f_val);
if (ret < 0)
goto err;
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index ba84936aafd..95ed46f2cd2 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -161,7 +161,7 @@ static int max2165_set_bandwidth(struct max2165_priv *priv, u32 bw)
return 0;
}
-int fixpt_div32(u32 dividend, u32 divisor, u32 *quotient, u32 *fraction)
+static int fixpt_div32(u32 dividend, u32 divisor, u32 *quotient, u32 *fraction)
{
u32 remainder;
u32 q, f = 0;
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 38966847407..83a6240f64d 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -136,7 +136,7 @@ static int tua9001_set_params(struct dvb_frontend *fe)
{
struct tua9001_priv *priv = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
+ int ret = 0, i;
u16 val;
u32 frequency;
struct reg_val data[2];
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 4937712278f..5c0fd787cc8 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -934,7 +934,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
int rc = 0, is_retry = 0;
u16 hwmodel;
v4l2_std_id std0;
- u8 hw_major, hw_minor, fw_major, fw_minor;
+ u8 hw_major = 0, hw_minor = 0, fw_major = 0, fw_minor = 0;
dprintk(1, "%s called\n", __func__);
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 448361c6a13..0cb7c28dcb1 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -25,7 +25,7 @@
#include "media/tuner.h"
#include "media/v4l2-common.h"
-void hvr950q_cs5340_audio(void *priv, int enable)
+static void hvr950q_cs5340_audio(void *priv, int enable)
{
/* Because the HVR-950q shares an i2s bus between the cs5340 and the
au8522, we need to hold cs5340 in reset when using the au8522 */
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index b328f6550d0..9a6f15613a3 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -272,7 +272,6 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
struct au0828_dev *dev = container_of(work, struct au0828_dev,
restart_streaming);
struct au0828_dvb *dvb = &dev->dvb;
- int ret;
if (dev->urb_streaming == 0)
return;
@@ -282,7 +281,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
mutex_lock(&dvb->lock);
/* Stop transport */
- ret = stop_urb_transfer(dev);
+ stop_urb_transfer(dev);
au0828_write(dev, 0x608, 0x00);
au0828_write(dev, 0x609, 0x00);
au0828_write(dev, 0x60a, 0x00);
@@ -293,7 +292,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
au0828_write(dev, 0x609, 0x72);
au0828_write(dev, 0x60a, 0x71);
au0828_write(dev, 0x60b, 0x01);
- ret = start_urb_transfer(dev);
+ start_urb_transfer(dev);
mutex_unlock(&dvb->lock);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 87058557057..45387aab10c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -158,7 +158,7 @@ static void au0828_irq_callback(struct urb *urb)
/*
* Stop and Deallocate URBs
*/
-void au0828_uninit_isoc(struct au0828_dev *dev)
+static void au0828_uninit_isoc(struct au0828_dev *dev)
{
struct urb *urb;
int i;
@@ -197,9 +197,9 @@ void au0828_uninit_isoc(struct au0828_dev *dev)
/*
* Allocate URBs and start IRQ
*/
-int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
- int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct au0828_dev *dev, struct urb *urb))
+static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
+ int num_bufs, int max_pkt_size,
+ int (*isoc_copy) (struct au0828_dev *dev, struct urb *urb))
{
struct au0828_dmaqueue *dma_q = &dev->vidq;
int i;
@@ -783,7 +783,7 @@ static int au0828_i2s_init(struct au0828_dev *dev)
* Auvitek au0828 analog stream enable
* Please set interface0 to AS5 before enable the stream
*/
-int au0828_analog_stream_enable(struct au0828_dev *d)
+static int au0828_analog_stream_enable(struct au0828_dev *d)
{
dprintk(1, "au0828_analog_stream_enable called\n");
au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
@@ -810,7 +810,7 @@ int au0828_analog_stream_disable(struct au0828_dev *d)
return 0;
}
-void au0828_analog_stream_reset(struct au0828_dev *dev)
+static void au0828_analog_stream_reset(struct au0828_dev *dev)
{
dprintk(1, "au0828_analog_stream_reset called\n");
au0828_writereg(dev, AU0828_SENSORCTRL_100, 0x0);
@@ -913,7 +913,7 @@ static int get_ressource(struct au0828_fh *fh)
/* This function ensures that video frames continue to be delivered even if
the ITU-656 input isn't receiving any data (thereby preventing applications
such as tvtime from hanging) */
-void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(unsigned long data)
{
struct au0828_dev *dev = (struct au0828_dev *) data;
struct au0828_dmaqueue *dma_q = &dev->vidq;
@@ -937,7 +937,7 @@ void au0828_vid_buffer_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock, flags);
}
-void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(unsigned long data)
{
struct au0828_dev *dev = (struct au0828_dev *) data;
struct au0828_dmaqueue *dma_q = &dev->vbiq;
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 447148eff95..72220791374 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1068,12 +1068,12 @@ int cx231xx_unmute_audio(struct cx231xx *dev)
}
EXPORT_SYMBOL_GPL(cx231xx_unmute_audio);
-int stopAudioFirmware(struct cx231xx *dev)
+static int stopAudioFirmware(struct cx231xx *dev)
{
return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03);
}
-int restartAudioFirmware(struct cx231xx *dev)
+static int restartAudioFirmware(struct cx231xx *dev)
{
return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13);
}
@@ -2631,11 +2631,6 @@ int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
rc = cx231xx_stop_stream(dev, ep_mask);
}
- if (dev->mode == CX231XX_ANALOG_MODE)
- ;/* do any in Analog mode */
- else
- ;/* do any in digital mode */
-
return rc;
}
EXPORT_SYMBOL_GPL(cx231xx_capture_start);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index b84ebc54d91..bbed1e40eed 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -686,7 +686,7 @@ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg)
}
EXPORT_SYMBOL_GPL(cx231xx_tuner_callback);
-void cx231xx_reset_out(struct cx231xx *dev)
+static void cx231xx_reset_out(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
msleep(200);
@@ -694,11 +694,13 @@ void cx231xx_reset_out(struct cx231xx *dev)
msleep(200);
cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
}
-void cx231xx_enable_OSC(struct cx231xx *dev)
+
+static void cx231xx_enable_OSC(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, CX23417_OSC_EN, 1);
}
-void cx231xx_sleep_s5h1432(struct cx231xx *dev)
+
+static void cx231xx_sleep_s5h1432(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, SLEEP_S5H1432, 0);
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 781feed406f..96a5a096539 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -72,8 +72,8 @@ static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus,
/*
* cx231xx_i2c_send_bytes()
*/
-int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap,
- const struct i2c_msg *msg)
+static int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap,
+ const struct i2c_msg *msg)
{
struct cx231xx_i2c *bus = i2c_adap->algo_data;
struct cx231xx *dev = bus->dev;
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 96176e9db5a..0f7b4244682 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -99,7 +99,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
/* The i2c micro-controller only outputs the cmd part of NEC protocol */
dev->init_data.rc_dev->scanmask = 0xff;
dev->init_data.rc_dev->driver_name = "cx231xx";
- dev->init_data.type = RC_TYPE_NEC;
+ dev->init_data.type = RC_BIT_NEC;
info.addr = 0x30;
/* Load and bind ir-kbd-i2c */
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 3d7526e28d4..943d9342370 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1306,7 +1306,7 @@ static int af9015_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
if (!rc->map_name)
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = af9015_rc_query;
rc->interval = 500;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index ea27eaff4e3..61ae7f9d0b2 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1023,10 +1023,10 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
switch (tmp) {
case 0: /* NEC */
default:
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
break;
case 1: /* RC6 */
- rc->allowed_protos = RC_TYPE_RC6;
+ rc->allowed_protos = RC_BIT_RC6_MCE;
break;
}
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index ec540140c81..d05c5b563da 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -1048,7 +1048,7 @@ static int anysee_rc_query(struct dvb_usb_device *d)
static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = anysee_rc_query;
rc->interval = 250; /* windows driver uses 500ms */
@@ -1170,7 +1170,7 @@ static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot,
struct dvb_usb_device *d = ci->data;
struct anysee_state *state = d_to_priv(d);
int ret;
- u8 tmp;
+ u8 tmp = 0;
ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40);
if (ret)
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 54f1221d930..d75dbf27e99 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -826,7 +826,7 @@ static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
pr_debug("Getting az6007 Remote Control properties\n");
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = az6007_rc_query;
rc->interval = 400;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index bae16a1189d..059291b892b 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -137,7 +137,7 @@ struct dvb_usb_driver_info {
struct dvb_usb_rc {
const char *map_name;
u64 allowed_protos;
- int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
int (*query) (struct dvb_usb_device *d);
unsigned int interval;
const enum rc_driver_type driver_type;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index ba51f65204d..671b4fa232b 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -224,7 +224,7 @@ static void dvb_usb_data_complete_raw(struct usb_data_stream *stream, u8 *buf,
dvb_dmx_swfilter_raw(&adap->demux, buf, len);
}
-int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -236,7 +236,7 @@ int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
return usb_urb_initv2(&adap->stream, &adap->props->stream);
}
-int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -368,7 +368,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
return dvb_usb_ctrl_feed(dvbdmxfeed, -1);
}
-int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
{
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
@@ -440,7 +440,7 @@ err_dvb_register_adapter:
return ret;
}
-int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -456,7 +456,7 @@ int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
return 0;
}
-int dvb_usbv2_device_power_ctrl(struct dvb_usb_device *d, int onoff)
+static int dvb_usbv2_device_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
@@ -553,7 +553,7 @@ err:
return ret;
}
-int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
{
int ret, i, count_registered = 0;
struct dvb_usb_device *d = adap_to_d(adap);
@@ -622,7 +622,7 @@ err:
return ret;
}
-int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
{
int i;
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 695f9106bc5..47204280b8b 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -659,13 +659,19 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
it913x_wr_reg(d, DEV_0_DMOD, MP2IF2_SW_RST, 0x1);
it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x0f);
it913x_wr_reg(d, DEV_0, EP0_TX_NAK, 0x1b);
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x2f);
+ if (st->proprietary_ir == false) /* Enable endpoint 3 */
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x3f);
+ else
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x2f);
it913x_wr_reg(d, DEV_0, EP4_TX_LEN_LSB,
ep_size & 0xff);
it913x_wr_reg(d, DEV_0, EP4_TX_LEN_MSB, ep_size >> 8);
ret = it913x_wr_reg(d, DEV_0, EP4_MAX_PKT, pkt_size);
} else if (adap->id == 1 && adap->fe[0]) {
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x6f);
+ if (st->proprietary_ir == false)
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x7f);
+ else
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x6f);
it913x_wr_reg(d, DEV_0, EP5_TX_LEN_LSB,
ep_size & 0xff);
it913x_wr_reg(d, DEV_0, EP5_TX_LEN_MSB, ep_size >> 8);
@@ -698,7 +704,7 @@ static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
}
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = it913x_rc_query;
rc->interval = 250;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index c41d9d9ec7b..6427ac359f2 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -799,7 +799,7 @@ static const char fw_c_rs2000[] = LME2510_C_RS2000;
static const char fw_lg[] = LME2510_LG;
static const char fw_s0194[] = LME2510_S0194;
-const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
+static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
{
struct lme2510_state *st = d->priv;
struct usb_device *udev = d->udev;
@@ -1253,7 +1253,7 @@ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
static int lme2510_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 093f1acce40..a4c302d0aa3 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1197,7 +1197,7 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = rtl2831u_rc_query;
rc->interval = 400;
@@ -1269,7 +1269,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = rtl2832u_rc_query;
rc->interval = 400;
@@ -1338,6 +1338,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "G-Tek Electronics Group Lifeview LV5TDLX DVB-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK,
&rtl2832u_props, "NOXON DAB/DAB+ USB dongle", NULL) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
+ &rtl2832u_props, "NOXON DAB/DAB+ USB dongle (rev 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
&rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 5989b659037..7346f85f3f2 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -112,7 +112,7 @@ int usb_urb_submitv2(struct usb_data_stream *stream,
return 0;
}
-int usb_urb_free_urbs(struct usb_data_stream *stream)
+static int usb_urb_free_urbs(struct usb_data_stream *stream)
{
int i;
@@ -205,7 +205,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
return 0;
}
-int usb_free_stream_buffers(struct usb_data_stream *stream)
+static int usb_free_stream_buffers(struct usb_data_stream *stream)
{
if (stream->state & USB_STATE_URB_BUF) {
while (stream->buf_num) {
@@ -223,8 +223,8 @@ int usb_free_stream_buffers(struct usb_data_stream *stream)
return 0;
}
-int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num,
- unsigned long size)
+static int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num,
+ unsigned long size)
{
stream->buf_num = 0;
stream->buf_size = size;
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 5e45ae60542..91e0119e8a8 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -298,7 +298,8 @@ struct stb6100_config az6027_stb6100_config = {
/* check for mutex FIXME */
-int az6027_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
+static int az6027_usb_in_op(struct dvb_usb_device *d, u8 req,
+ u16 value, u16 index, u8 *b, int blen)
{
int ret = -1;
if (mutex_lock_interruptible(&d->usb_mutex))
@@ -1051,10 +1052,10 @@ static struct i2c_algorithm az6027_i2c_algo = {
.functionality = az6027_i2c_func,
};
-int az6027_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
- int *cold)
+static int az6027_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
{
u8 *b;
s16 ret;
diff --git a/drivers/media/usb/dvb-usb/dib0700.h b/drivers/media/usb/dvb-usb/dib0700.h
index 7de125c0b36..637b6123f39 100644
--- a/drivers/media/usb/dvb-usb/dib0700.h
+++ b/drivers/media/usb/dvb-usb/dib0700.h
@@ -64,7 +64,7 @@ extern int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
extern struct i2c_algorithm dib0700_i2c_algo;
extern int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc, int *cold);
-extern int dib0700_change_protocol(struct rc_dev *dev, u64 rc_type);
+extern int dib0700_change_protocol(struct rc_dev *dev, u64 *rc_type);
extern int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz);
extern int dib0700_device_count;
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index ef87229de6a..19b5ed2825d 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -605,7 +605,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return ret;
}
-int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
+int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
struct dvb_usb_device *d = rc->priv;
struct dib0700_state *st = d->priv;
@@ -621,17 +621,19 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
st->buf[2] = 0;
/* Set the IR mode */
- if (rc_type == RC_TYPE_RC5)
+ if (*rc_type & RC_BIT_RC5) {
new_proto = 1;
- else if (rc_type == RC_TYPE_NEC)
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
new_proto = 0;
- else if (rc_type == RC_TYPE_RC6) {
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type & RC_BIT_RC6_MCE) {
if (st->fw_version < 0x10200) {
ret = -EINVAL;
goto out;
}
-
new_proto = 2;
+ *rc_type = RC_BIT_RC6_MCE;
} else {
ret = -EINVAL;
goto out;
@@ -645,7 +647,7 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
goto out;
}
- d->props.rc.core.protocol = rc_type;
+ d->props.rc.core.protocol = *rc_type;
out:
mutex_unlock(&d->usb_mutex);
@@ -707,7 +709,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
purb->actual_length);
switch (d->props.rc.core.protocol) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
toggle = 0;
/* NEC protocol sends repeat code as 0 0 0 FF */
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 510001da6e8..11798426fa8 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -518,7 +518,7 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
d->last_event = 0;
switch (d->props.rc.core.protocol) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
/* NEC protocol sends repeat code as 0 0 0 FF */
if ((key[3-2] == 0x00) && (key[3-3] == 0x00) &&
(key[3] == 0xff))
@@ -3658,9 +3658,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3698,9 +3698,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3763,9 +3763,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3808,9 +3808,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3890,9 +3890,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3936,9 +3936,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3987,9 +3987,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4055,9 +4055,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4106,9 +4106,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_NEC_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4177,9 +4177,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4215,9 +4215,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4295,9 +4295,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4341,9 +4341,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_NEC_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4394,9 +4394,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4433,9 +4433,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4472,9 +4472,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4511,9 +4511,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4550,9 +4550,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4589,9 +4589,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4644,9 +4644,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4681,9 +4681,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4721,9 +4721,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4761,9 +4761,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4802,9 +4802,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
},
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index aab0f99bc89..ce4c4e3b58b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -202,7 +202,7 @@ struct dvb_rc {
u64 protocol;
u64 allowed_protos;
enum rc_driver_type driver_type;
- int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
char *module_name;
int (*rc_query) (struct dvb_usb_device *d);
int rc_interval;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 02e878577c3..d1ddfa13de8 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -927,7 +927,7 @@ static struct dvb_usb_device_properties pctv452e_properties = {
.rc.core = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
.rc_query = pctv452e_rc_query,
.rc_interval = 100,
},
@@ -980,7 +980,7 @@ static struct dvb_usb_device_properties tt_connect_s2_3600_properties = {
.rc.core = {
.rc_codes = RC_MAP_TT_1500,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
.rc_query = pctv452e_rc_query,
.rc_interval = 100,
},
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 7a8c8c18590..40832a1aef6 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -732,7 +732,7 @@ static struct dvb_usb_device_properties technisat_usb2_devices = {
.rc_codes = RC_MAP_TECHNISAT_USB2,
.module_name = "technisat-usb2",
.rc_query = technisat_usb2_rc_query,
- .allowed_protos = RC_TYPE_ALL,
+ .allowed_protos = RC_BIT_ALL,
.driver_type = RC_DRIVER_IR_RAW,
}
};
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index 6a50cdea3bc..bcdac225ebe 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -741,7 +741,7 @@ static struct dvb_usb_device_properties ttusb2_properties_ct3650 = {
.rc_interval = 150, /* Less than IR_KEYPRESS_TIMEOUT */
.rc_codes = RC_MAP_TT_1500,
.rc_query = tt3650_rc_query,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_adapters = 1,
diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
index 07c673a6e76..22cf9f96cb9 100644
--- a/drivers/media/usb/dvb-usb/vp702x.c
+++ b/drivers/media/usb/dvb-usb/vp702x.c
@@ -56,7 +56,7 @@ static int vp702x_usb_in_op_unlocked(struct dvb_usb_device *d, u8 req,
}
int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen)
+ u16 index, u8 *b, int blen)
{
int ret;
@@ -67,8 +67,8 @@ int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
return ret;
}
-int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen)
+static int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req,
+ u16 value, u16 index, u8 *b, int blen)
{
int ret;
deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ",req,value,index);
@@ -86,7 +86,7 @@ int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value,
return 0;
}
-int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
+static int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
int ret;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 16a84f9f46d..619bffbab3b 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -1979,6 +1979,15 @@ struct em28xx_board em28xx_boards[] = {
EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
},
+ [EM2884_BOARD_TERRATEC_HTC_USB_XS] = {
+ .name = "Terratec Cinergy HTC USB XS",
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+ .tuner_type = TUNER_ABSENT,
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -2057,9 +2066,9 @@ struct usb_device_id em28xx_id_table[] = {
{ USB_DEVICE(0x0ccd, 0x0043),
.driver_info = EM2870_BOARD_TERRATEC_XS },
{ USB_DEVICE(0x0ccd, 0x008e), /* Cinergy HTC USB XS Rev. 1 */
- .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS },
{ USB_DEVICE(0x0ccd, 0x00ac), /* Cinergy HTC USB XS Rev. 2 */
- .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS },
{ USB_DEVICE(0x0ccd, 0x10a2), /* H5 Rev. 1 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */
@@ -3297,7 +3306,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->num_alt = interface->num_altsetting;
- if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
+ if ((unsigned)card[nr] < em28xx_bcount)
dev->model = card[nr];
/* save our data pointer in this interface device */
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 13ae821949e..63f2e7070c0 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -331,7 +331,7 @@ static struct drxk_config hauppauge_930c_drxk = {
.load_firmware_sync = true,
};
-struct drxk_config terratec_htc_stick_drxk = {
+static struct drxk_config terratec_htc_stick_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
@@ -520,7 +520,10 @@ static void terratec_htc_stick_init(struct em28xx *dev)
{ -1, -1, -1, -1},
};
- /* Init the analog decoder? */
+ /*
+ * Init the analog decoder (not yet supported), but
+ * it's probably still a good idea.
+ */
struct {
unsigned char r[4];
int len;
@@ -547,6 +550,64 @@ static void terratec_htc_stick_init(struct em28xx *dev)
em28xx_gpio_set(dev, terratec_htc_stick_end);
};
+static void terratec_htc_usb_xs_init(struct em28xx *dev)
+{
+ int i;
+
+ struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xb2, 0xff, 100},
+ {EM2874_R80_GPIO, 0xb2, 0xff, 50},
+ {EM2874_R80_GPIO, 0xb6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
+ {EM2874_R80_GPIO, 0xa6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 50},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+
+ /*
+ * Init the analog decoder (not yet supported), but
+ * it's probably still a good idea.
+ */
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs[] = {
+ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
+ {{ 0x01, 0x02 }, 2},
+ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
+ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
+ {{ 0x04, 0x00 }, 2},
+ {{ 0x00, 0x04 }, 2},
+ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
+ {{ 0x04, 0x14 }, 2},
+ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
+ };
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+
+ em28xx_gpio_set(dev, terratec_htc_usb_xs_init);
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
+ msleep(10);
+
+ dev->i2c_client.addr = 0x82 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
+
+ em28xx_gpio_set(dev, terratec_htc_usb_xs_end);
+};
+
static void pctv_520e_init(struct em28xx *dev)
{
/*
@@ -1155,6 +1216,25 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2884_BOARD_TERRATEC_HTC_USB_XS:
+ terratec_htc_usb_xs_init(dev);
+
+ /* attach demodulator */
+ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_htc_stick_drxk,
+ &dev->i2c_adap);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* Attach the demodulator. */
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap,
+ &em28xx_cxd2820r_tda18271_config)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 97d36b4f19d..660bf803c9e 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -345,7 +345,7 @@ static void em28xx_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
+static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
{
int rc = 0;
struct em28xx_IR *ir = rc_dev->priv;
@@ -354,14 +354,16 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
/* Adjust xclk based o IR table for RC5/NEC tables */
- if (rc_type == RC_TYPE_RC5) {
+ if (*rc_type & RC_BIT_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
- } else if (rc_type == RC_TYPE_NEC) {
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
ir_config = EM2874_IR_NEC;
ir->full_code = 1;
- } else if (rc_type != RC_TYPE_UNKNOWN)
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type != RC_BIT_UNKNOWN)
rc = -EINVAL;
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
@@ -524,6 +526,7 @@ static int em28xx_ir_init(struct em28xx *dev)
struct em28xx_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
+ u64 rc_type;
if (dev->board.ir_codes == NULL) {
/* No remote control support */
@@ -546,14 +549,15 @@ static int em28xx_ir_init(struct em28xx *dev)
* em2874 supports more protocols. For now, let's just announce
* the two protocols that were already tested
*/
- rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
rc->priv = ir;
rc->change_protocol = em28xx_ir_change_protocol;
rc->open = em28xx_ir_start;
rc->close = em28xx_ir_stop;
/* By default, keep protocol field untouched */
- err = em28xx_ir_change_protocol(rc, RC_TYPE_UNKNOWN);
+ rc_type = RC_BIT_UNKNOWN;
+ err = em28xx_ir_change_protocol(rc, &rc_type);
if (err)
goto err_out_free;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 8757523e686..86e90d86da6 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -128,6 +128,7 @@
#define EM2874_BOARD_MAXMEDIA_UB425_TC 84
#define EM2884_BOARD_PCTV_510E 85
#define EM2884_BOARD_PCTV_520E 86
+#define EM2884_BOARD_TERRATEC_HTC_USB_XS 87
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index a2b934146eb..e0a431bb0d4 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1586,8 +1586,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
- if (v4l2_buf->index < 0
- || v4l2_buf->index >= gspca_dev->nframes)
+ if (v4l2_buf->index >= gspca_dev->nframes)
return -EINVAL;
frame = &gspca_dev->frame[v4l2_buf->index];
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index e3eab82cd4e..352317d7acd 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -32,7 +32,7 @@ do { \
#define D_USBO 0x00
#define D_V4L2 0x0100
#else
-#define PDEBUG(level, fmt, ...)
+#define PDEBUG(level, fmt, ...) do {} while(0)
#endif
#define GSPCA_MAX_FRAMES 16 /* maximum number of video frame buffers */
diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c
index b897aa86f31..1ba29fe7fad 100644
--- a/drivers/media/usb/gspca/jeilinj.c
+++ b/drivers/media/usb/gspca/jeilinj.c
@@ -114,7 +114,7 @@ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command)
}
/* Responses are one byte only */
-static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
+static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char *response)
{
int retval;
@@ -123,7 +123,7 @@ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
retval = usb_bulk_msg(gspca_dev->dev,
usb_rcvbulkpipe(gspca_dev->dev, 0x84),
gspca_dev->usb_buf, 1, NULL, 500);
- response = gspca_dev->usb_buf[0];
+ *response = gspca_dev->usb_buf[0];
if (retval < 0) {
pr_err("read command [%02x] error %d\n",
gspca_dev->usb_buf[0], retval);
@@ -260,7 +260,7 @@ static int jlj_start(struct gspca_dev *gspca_dev)
if (start_commands[i].delay)
msleep(start_commands[i].delay);
if (start_commands[i].ack_wanted)
- jlj_read1(gspca_dev, response);
+ jlj_read1(gspca_dev, &response);
}
setcamquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual));
msleep(2);
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
index cc8ec3f7e8d..c8e1572eb50 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
@@ -74,6 +74,12 @@ static
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 2548")
}
}, {
+ .ident = "Fujitsu-Siemens Amilo Pi 2530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 2530")
+ }
+ }, {
.ident = "MSI GX700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2d5c6d8343a..4f5869a9808 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -29,14 +29,13 @@
* Register page 0:
*
* Address Description
- * 0x02 Red balance control
- * 0x03 Green balance control
- * 0x04 Blue balance control
- * Valus are inverted (0=max, 255=min).
+ * 0x01 Red balance control
+ * 0x02 Green balance control
+ * 0x03 Blue balance control
* The Windows driver uses a quadratic approach to map
* the settable values (0-200) on register values:
- * min=0x80, default=0x40, max=0x20
- * 0x0f-0x20 Colors, saturation and exposure control
+ * min=0x20, default=0x40, max=0x80
+ * 0x0f-0x20 Color and saturation control
* 0xa2-0xab Brightness, contrast and gamma control
* 0xb6 Sharpness control (bits 0-4)
*
@@ -78,12 +77,12 @@
*
* Page | Register | Function
* -----+------------+---------------------------------------------------
+ * 0 | 0x01 | setredbalance()
+ * 0 | 0x03 | setbluebalance()
* 0 | 0x0f..0x20 | setcolors()
* 0 | 0xa2..0xab | setbrightcont()
* 0 | 0xb6 | setsharpness()
- * 0 | 0xc5 | setredbalance()
* 0 | 0xc6 | setwhitebalance()
- * 0 | 0xc7 | setbluebalance()
* 0 | 0xdc | setbrightcont(), setcolors()
* 3 | 0x02 | setexposure()
* 3 | 0x10, 0x12 | setgain()
@@ -99,10 +98,13 @@
/* Include pac common sof detection functions */
#include "pac_common.h"
-#define PAC7302_GAIN_DEFAULT 15
-#define PAC7302_GAIN_KNEE 42
-#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
-#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
+#define PAC7302_RGB_BALANCE_MIN 0
+#define PAC7302_RGB_BALANCE_MAX 200
+#define PAC7302_RGB_BALANCE_DEFAULT 100
+#define PAC7302_GAIN_DEFAULT 15
+#define PAC7302_GAIN_KNEE 42
+#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
+#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
"Thomas Kaiser thomas@kaiser-linux.li");
@@ -439,12 +441,31 @@ static void setwhitebalance(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0xdc, 0x01);
}
+static u8 rgbbalance_ctrl_to_reg_value(s32 rgb_ctrl_val)
+{
+ const unsigned int k = 1000; /* precision factor */
+ unsigned int norm;
+
+ /* Normed value [0...k] */
+ norm = k * (rgb_ctrl_val - PAC7302_RGB_BALANCE_MIN)
+ / (PAC7302_RGB_BALANCE_MAX - PAC7302_RGB_BALANCE_MIN);
+ /* Qudratic apporach improves control at small (register) values: */
+ return 64 * norm * norm / (k*k) + 32 * norm / k + 32;
+ /* Y = 64*X*X + 32*X + 32
+ * => register values 0x20-0x80; Windows driver uses these limits */
+
+ /* NOTE: for full value range (0x00-0xff) use
+ * Y = 254*X*X + X
+ * => 254 * norm * norm / (k*k) + 1 * norm / k */
+}
+
static void setredbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc5, sd->red_balance->val);
+ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
+ reg_w(gspca_dev, 0x01,
+ rgbbalance_ctrl_to_reg_value(sd->red_balance->val));
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -454,7 +475,8 @@ static void setbluebalance(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc7, sd->blue_balance->val);
+ reg_w(gspca_dev, 0x03,
+ rgbbalance_ctrl_to_reg_value(sd->blue_balance->val));
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -643,9 +665,15 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_WHITE_BALANCE_TEMPERATURE,
0, 255, 1, 55);
sd->red_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_RED_BALANCE, 0, 3, 1, 1);
+ V4L2_CID_RED_BALANCE,
+ PAC7302_RGB_BALANCE_MIN,
+ PAC7302_RGB_BALANCE_MAX,
+ 1, PAC7302_RGB_BALANCE_DEFAULT);
sd->blue_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_BLUE_BALANCE, 0, 3, 1, 1);
+ V4L2_CID_BLUE_BALANCE,
+ PAC7302_RGB_BALANCE_MIN,
+ PAC7302_RGB_BALANCE_MAX,
+ 1, PAC7302_RGB_BALANCE_DEFAULT);
gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index fd1f8d2d3b0..70511d5f953 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1449,6 +1449,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
#endif
+ {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
{USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
{USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
{USB_DEVICE(0x0c45, 0x602a), SB(HV7131D, 102)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 304f43ef59e..84dc26fe80e 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -401,12 +401,14 @@ static int hdpvr_probe(struct usb_interface *interface,
client = hdpvr_register_ir_rx_i2c(dev);
if (!client) {
v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+ retval = -ENODEV;
goto reg_fail;
}
client = hdpvr_register_ir_tx_i2c(dev);
if (!client) {
v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+ retval = -ENODEV;
goto reg_fail;
}
#endif
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 82e819fa91c..031cf024304 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -55,7 +55,7 @@ struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
/* Our default information for ir-kbd-i2c.c to use */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = "HD-PVR";
init_data->polling_interval = 405; /* ms, duplicated from Windows */
hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index fb828ba1dbb..299751a8b06 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -3563,9 +3563,9 @@ void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *hdw,
enum pvr2_v4l_type index,int v)
{
switch (index) {
- case pvr2_v4l_type_video: hdw->v4l_minor_number_video = v;
- case pvr2_v4l_type_vbi: hdw->v4l_minor_number_vbi = v;
- case pvr2_v4l_type_radio: hdw->v4l_minor_number_radio = v;
+ case pvr2_v4l_type_video: hdw->v4l_minor_number_video = v;break;
+ case pvr2_v4l_type_vbi: hdw->v4l_minor_number_vbi = v;break;
+ case pvr2_v4l_type_radio: hdw->v4l_minor_number_radio = v;break;
default: break;
}
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 885ce11f222..9ab596c78a4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -581,7 +581,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = hdw->hdw_desc->description;
init_data->polling_interval = 100; /* ms From ir-kbd-i2c */
/* IR Receiver */
@@ -596,7 +596,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = hdw->hdw_desc->description;
/* IR Receiver */
info.addr = 0x71;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index db249cad3cd..6930676051e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -196,7 +196,7 @@ static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
return ret;
}
-int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
+static int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
@@ -365,7 +365,7 @@ static int pvr2_s_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
vt->audmode);
}
-int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
+static int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
diff --git a/drivers/media/usb/pwc/pwc-ctrl.c b/drivers/media/usb/pwc/pwc-ctrl.c
index 1f506fde97d..3a1618580ed 100644
--- a/drivers/media/usb/pwc/pwc-ctrl.c
+++ b/drivers/media/usb/pwc/pwc-ctrl.c
@@ -179,6 +179,8 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
return -EINVAL;
if (frames < 4)
frames = 4;
+ else if (size > PSZ_QCIF && frames > 15)
+ frames = 15;
else if (frames > 25)
frames = 25;
frames = frames2frames[frames];
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 42e36bac4d7..5210239cbae 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -155,7 +155,7 @@ static struct video_device pwc_template = {
/***************************************************************************/
/* Private functions */
-struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
+static struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
{
unsigned long flags = 0;
struct pwc_frame_buf *buf = NULL;
@@ -1000,7 +1000,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
pdev->vb_queue.ops = &pwc_vb_queue_ops;
pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
- vb2_queue_init(&pdev->vb_queue);
+ rc = vb2_queue_init(&pdev->vb_queue);
+ if (rc < 0) {
+ PWC_ERROR("Oops, could not initialize vb2 queue.\n");
+ goto err_free_mem;
+ }
/* Init video_device structure */
memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 2191f6ddf9e..8ebec0d7bf5 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1651,7 +1651,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
int is_ntsc = 0;
#define NUM_FRAME_ENUMS 4
int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
- if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
+ if (fe->index >= NUM_FRAME_ENUMS)
return -EINVAL;
switch (fe->width) {
case 640:
diff --git a/drivers/media/usb/siano/Kconfig b/drivers/media/usb/siano/Kconfig
index 3c76e62d820..5afbd9a4b55 100644
--- a/drivers/media/usb/siano/Kconfig
+++ b/drivers/media/usb/siano/Kconfig
@@ -4,7 +4,8 @@
config SMS_USB_DRV
tristate "Siano SMS1xxx based MDTV receiver"
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
+ select MEDIA_COMMON_OPTIONS
---help---
Choose if you would like to have Siano's support for USB interface
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/media/usb/sn9c102/sn9c102_core.c
index 5bfc8e2f018..73605864fff 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/media/usb/sn9c102/sn9c102_core.c
@@ -2481,11 +2481,13 @@ sn9c102_vidioc_enum_framesizes(struct sn9c102_device* cam, void __user * arg)
if (frmsize.pixel_format != V4L2_PIX_FMT_SN9C10X &&
frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
return -EINVAL;
+ break;
case BRIDGE_SN9C105:
case BRIDGE_SN9C120:
if (frmsize.pixel_format != V4L2_PIX_FMT_JPEG &&
frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
return -EINVAL;
+ break;
}
frmsize.type = V4L2_FRMSIZE_TYPE_STEPWISE;
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 176ac937306..850cf285ada 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -116,7 +116,7 @@ static int stk1160_i2c_read_reg(struct stk1160 *dev, u8 addr,
if (rc < 0)
return rc;
- stk1160_read_reg(dev, STK1160_SBUSR_RD, value);
+ rc = stk1160_read_reg(dev, STK1160_SBUSR_RD, value);
if (rc < 0)
return rc;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 8bdfb027531..fa3671de02a 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -475,7 +475,11 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
if (!dev->isoc_ctl.transfer_buffer[i]) {
stk1160_err("cannot alloc %d bytes for tx[%d] buffer\n",
sb_size, i);
- goto free_i_bufs;
+
+ /* Not enough transfer buffers, so just give up */
+ if (i < STK1160_MIN_BUFS)
+ goto free_i_bufs;
+ goto nomore_tx_bufs;
}
memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
@@ -506,13 +510,28 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
}
}
- stk1160_dbg("urbs allocated\n");
+ stk1160_dbg("%d urbs allocated\n", num_bufs);
/* At last we can say we have some buffers */
dev->isoc_ctl.num_bufs = num_bufs;
return 0;
+nomore_tx_bufs:
+ /*
+ * Failed to allocate desired buffer count. However, we may have
+ * enough to work fine, so we just free the extra urb,
+ * store the allocated count and keep going, fingers crossed!
+ */
+ usb_free_urb(dev->isoc_ctl.urb[i]);
+ dev->isoc_ctl.urb[i] = NULL;
+
+ stk1160_warn("%d urbs allocated. Trying to continue...\n", i - 1);
+
+ dev->isoc_ctl.num_bufs = i - 1;
+
+ return 0;
+
free_i_bufs:
/* Save the allocated buffers so far, so we can properly free them */
dev->isoc_ctl.num_bufs = i+1;
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 68c8707d36a..05b05b160e1 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -30,11 +30,12 @@
#define STK1160_VERSION "0.9.5"
#define STK1160_VERSION_NUM 0x000905
-/* TODO: Decide on number of packets for each buffer */
+/* Decide on number of packets for each buffer */
#define STK1160_NUM_PACKETS 64
/* Number of buffers for isoc transfers */
-#define STK1160_NUM_BUFS 16 /* TODO */
+#define STK1160_NUM_BUFS 16
+#define STK1160_MIN_BUFS 1
/* TODO: This endpoint address should be retrieved */
#define STK1160_EP_VIDEO 0x82
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 86a0fc56c33..5d3c032d733 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -54,10 +54,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jaime Velasco Juan <jsagarribay@gmail.com> and Nicolas VIVIEN");
MODULE_DESCRIPTION("Syntek DC1125 webcam driver");
-
-/* bool for webcam LED management */
-int first_init = 1;
-
/* Some cameras have audio interfaces, we aren't interested in those */
static struct usb_device_id stkwebcam_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x174f, 0xa311, 0xff, 0xff, 0xff) },
@@ -554,6 +550,7 @@ static void stk_free_buffers(struct stk_camera *dev)
static int v4l_stk_open(struct file *fp)
{
+ static int first_init = 1; /* webcam LED management */
struct stk_camera *dev;
struct video_device *vdev;
diff --git a/drivers/media/usb/tlg2300/pd-dvb.c b/drivers/media/usb/tlg2300/pd-dvb.c
index 30fcb117e89..ca4994a5190 100644
--- a/drivers/media/usb/tlg2300/pd-dvb.c
+++ b/drivers/media/usb/tlg2300/pd-dvb.c
@@ -1,6 +1,7 @@
#include "pd-common.h"
#include <linux/kernel.h>
#include <linux/usb.h>
+#include <linux/time.h>
#include <linux/dvb/dmx.h>
#include <linux/delay.h>
#include <linux/gfp.h>
diff --git a/drivers/media/usb/tlg2300/pd-video.c b/drivers/media/usb/tlg2300/pd-video.c
index 1f448ac7a49..3082bfa9b2c 100644
--- a/drivers/media/usb/tlg2300/pd-video.c
+++ b/drivers/media/usb/tlg2300/pd-video.c
@@ -888,7 +888,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *in)
{
struct front_face *front = fh;
- if (in->index < 0 || in->index >= POSEIDON_INPUTS)
+ if (in->index >= POSEIDON_INPUTS)
return -EINVAL;
strcpy(in->name, pd_inputs[in->index].name);
in->type = V4L2_INPUT_TYPE_TUNER;
@@ -923,7 +923,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
struct poseidon *pd = front->pd;
s32 ret, cmd_status;
- if (i < 0 || i >= POSEIDON_INPUTS)
+ if (i >= POSEIDON_INPUTS)
return -EINVAL;
ret = send_set_req(pd, SGNL_SRC_SEL,
pd_inputs[i].tlg_src, &cmd_status);
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index dffbd4bd47b..8a6bbf1d80e 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -109,12 +109,12 @@ static int tm6000_ir_config(struct tm6000_IR *ir)
*/
switch (ir->rc_type) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
leader = 900; /* ms */
pulse = 700; /* ms - the actual value would be 562 */
break;
default:
- case RC_TYPE_RC5:
+ case RC_BIT_RC5:
leader = 900; /* ms - from the NEC decoding */
pulse = 1780; /* ms - The actual value would be 1776 */
break;
@@ -122,12 +122,12 @@ static int tm6000_ir_config(struct tm6000_IR *ir)
pulse = ir_clock_mhz * pulse;
leader = ir_clock_mhz * leader;
- if (ir->rc_type == RC_TYPE_NEC)
+ if (ir->rc_type == RC_BIT_NEC)
leader = leader | 0x8000;
dprintk(2, "%s: %s, %d MHz, leader = 0x%04x, pulse = 0x%06x \n",
__func__,
- (ir->rc_type == RC_TYPE_NEC) ? "NEC" : "RC-5",
+ (ir->rc_type == RC_BIT_NEC) ? "NEC" : "RC-5",
ir_clock_mhz, leader, pulse);
/* Remote WAKEUP = enable, normal mode, from IR decoder output */
@@ -297,7 +297,7 @@ static void tm6000_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
+static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
struct tm6000_IR *ir = rc->priv;
@@ -306,10 +306,10 @@ static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
dprintk(2, "%s\n",__func__);
- if ((rc->rc_map.scan) && (rc_type == RC_TYPE_NEC))
+ if ((rc->rc_map.scan) && (*rc_type == RC_BIT_NEC))
ir->key_addr = ((rc->rc_map.scan[0].scancode >> 8) & 0xffff);
- ir->rc_type = rc_type;
+ ir->rc_type = *rc_type;
tm6000_ir_config(ir);
/* TODO */
@@ -398,6 +398,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
struct tm6000_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
+ u64 rc_type;
if (!enable_ir)
return -ENODEV;
@@ -421,7 +422,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
ir->rc = rc;
/* input setup */
- rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
/* Neded, in order to support NEC remotes with 24 or 32 bits */
rc->scanmask = 0xffff;
rc->priv = ir;
@@ -444,7 +445,8 @@ int tm6000_ir_init(struct tm6000_core *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- tm6000_ir_change_protocol(rc, RC_TYPE_UNKNOWN);
+ rc_type = RC_BIT_UNKNOWN;
+ tm6000_ir_change_protocol(rc, &rc_type);
rc->input_name = ir->name;
rc->input_phys = ir->phys;
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 4342cd4f5c8..f656fd7a39a 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1802,6 +1802,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
if (!dev->radio_dev) {
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
+ ret = -ENXIO;
return ret; /* FIXME release resource */
}
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 43cf61fe494..8a25876d72c 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -167,7 +167,7 @@ enum {
/* This macro restricts an int variable to an inclusive range */
#define RESTRICT_TO_RANGE(v, mi, ma) \
- { if ((v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
+ { if (((int)v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
/*
* We use macros to do YUV -> RGB conversion because this is
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index f7061a5ef1d..2bb7613ddeb 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -927,7 +927,7 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
int ret;
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
- return -EINVAL;
+ return -EACCES;
if (!ctrl->loaded) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
@@ -1452,8 +1452,12 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
if (step == 0)
step = 1;
- xctrl->value = min + (xctrl->value - min + step/2) / step * step;
- xctrl->value = clamp(xctrl->value, min, max);
+ xctrl->value = min + ((u32)(xctrl->value - min) + step / 2)
+ / step * step;
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
+ xctrl->value = clamp(xctrl->value, min, max);
+ else
+ xctrl->value = clamp_t(u32, xctrl->value, min, max);
value = xctrl->value;
break;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 5967081747c..5dbefa68b1d 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1562,6 +1562,9 @@ static int uvc_scan_device(struct uvc_device *dev)
INIT_LIST_HEAD(&chain->entities);
mutex_init(&chain->ctrl_mutex);
chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
kfree(chain);
@@ -1722,6 +1725,8 @@ static int uvc_register_video(struct uvc_device *dev,
vdev->v4l2_dev = &dev->vdev;
vdev->fops = &uvc_fops;
vdev->release = uvc_release;
+ vdev->prio = &stream->chain->prio;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags);
if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
vdev->vfl_dir = VFL_DIR_TX;
strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1741,6 +1746,11 @@ static int uvc_register_video(struct uvc_device *dev,
return ret;
}
+ if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ stream->chain->caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else
+ stream->chain->caps |= V4L2_CAP_VIDEO_OUTPUT;
+
atomic_inc(&dev->nstreams);
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index 29e239911d0..dc56a59ecad 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -93,6 +93,8 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
} else if (entity->vdev != NULL) {
ret = media_entity_init(&entity->vdev->entity,
entity->num_pads, entity->pads, 0);
+ if (entity->flags & UVC_ENTITY_FLAG_DEFAULT)
+ entity->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
} else
ret = 0;
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 18a91fae6bc..778addc5caf 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -128,7 +128,7 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
int ret;
queue->queue.type = type;
- queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
+ queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue->queue.drv_priv = queue;
queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue->queue.ops = &uvc_queue_qops;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index f00db3060e0..f2ee8c6b0d8 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -165,17 +165,18 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
fcc[0], fcc[1], fcc[2], fcc[3],
fmt->fmt.pix.width, fmt->fmt.pix.height);
- /* Check if the hardware supports the requested format. */
+ /* Check if the hardware supports the requested format, use the default
+ * format otherwise.
+ */
for (i = 0; i < stream->nformats; ++i) {
format = &stream->format[i];
if (format->fcc == fmt->fmt.pix.pixelformat)
break;
}
- if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
- uvc_trace(UVC_TRACE_FORMAT, "Unsupported format 0x%08x.\n",
- fmt->fmt.pix.pixelformat);
- return -EINVAL;
+ if (i == stream->nformats) {
+ format = stream->def_format;
+ fmt->fmt.pix.pixelformat = format->fcc;
}
/* Find the closest image size. The distance between image sizes is
@@ -564,15 +565,30 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
usb_make_path(stream->dev->udev,
cap->bus_info, sizeof(cap->bus_info));
cap->version = LINUX_VERSION_CODE;
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | chain->caps;
if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
- | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
+ | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
break;
}
+ /* Priority */
+ case VIDIOC_G_PRIORITY:
+ *(u32 *)arg = v4l2_prio_max(vdev->prio);
+ break;
+
+ case VIDIOC_S_PRIORITY:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_prio_change(vdev->prio, &handle->vfh.prio,
+ *(u32 *)arg);
+
/* Get, Set & Query control */
case VIDIOC_QUERYCTRL:
return uvc_query_v4l2_ctrl(chain, arg);
@@ -601,6 +617,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_control *ctrl = arg;
struct v4l2_ext_control xctrl;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
memset(&xctrl, 0, sizeof xctrl);
xctrl.id = ctrl->id;
xctrl.value = ctrl->value;
@@ -637,7 +657,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = uvc_ctrl_get(chain, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
- ctrls->error_idx = i;
+ ctrls->error_idx = ret == -ENOENT
+ ? ctrls->count : i;
return ret;
}
}
@@ -647,6 +668,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
case VIDIOC_S_EXT_CTRLS:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+ /* Fall through */
case VIDIOC_TRY_EXT_CTRLS:
{
struct v4l2_ext_controls *ctrls = arg;
@@ -661,7 +686,9 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = uvc_ctrl_set(chain, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
- ctrls->error_idx = i;
+ ctrls->error_idx = (ret == -ENOENT &&
+ cmd == VIDIOC_S_EXT_CTRLS)
+ ? ctrls->count : i;
return ret;
}
}
@@ -739,6 +766,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
u32 input = *(u32 *)arg + 1;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -792,6 +823,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
case VIDIOC_S_FMT:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -894,6 +929,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_v4l2_get_streamparm(stream, arg);
case VIDIOC_S_PARM:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -924,10 +963,14 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_G_CROP:
case VIDIOC_S_CROP:
- return -EINVAL;
+ return -ENOTTY;
/* Buffers & streaming */
case VIDIOC_REQBUFS:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -973,6 +1016,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (*type != stream->type)
return -EINVAL;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if (!uvc_has_privileges(handle))
return -EBUSY;
@@ -991,6 +1038,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (*type != stream->type)
return -EINVAL;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if (!uvc_has_privileges(handle))
return -EBUSY;
@@ -1030,7 +1081,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_ENUMOUTPUT:
uvc_trace(UVC_TRACE_IOCTL, "Unsupported ioctl 0x%08x\n", cmd);
- return -EINVAL;
+ return -ENOTTY;
case UVCIOC_CTRL_MAP:
return uvc_ioctl_ctrl_map(chain, arg);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 57c3076a462..3394c343201 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1812,6 +1812,7 @@ int uvc_video_init(struct uvc_streaming *stream)
probe->bFormatIndex = format->index;
probe->bFrameIndex = frame->bFrameIndex;
+ stream->def_format = format;
stream->cur_format = format;
stream->cur_frame = frame;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index af216ec45e3..af505fdd9b3 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -225,10 +225,14 @@ struct uvc_format_desc {
* always be accessed with the UVC_ENTITY_* macros and never directly.
*/
+#define UVC_ENTITY_FLAG_DEFAULT (1 << 0)
+
struct uvc_entity {
struct list_head list; /* Entity as part of a UVC device. */
struct list_head chain; /* Entity as part of a video device
* chain. */
+ unsigned int flags;
+
__u8 id;
__u16 type;
char name[64];
@@ -371,6 +375,9 @@ struct uvc_video_chain {
struct uvc_entity *selector; /* Selector unit */
struct mutex ctrl_mutex; /* Protects ctrl.info */
+
+ struct v4l2_prio_state prio; /* V4L2 priority state */
+ u32 caps; /* V4L2 chain-wide caps */
};
struct uvc_stats_frame {
@@ -436,6 +443,7 @@ struct uvc_streaming {
struct uvc_format *format;
struct uvc_streaming_control ctrl;
+ struct uvc_format *def_format;
struct uvc_format *cur_format;
struct uvc_frame *cur_frame;
/* Protect access to ctrl, cur_format, cur_frame and hardware video
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 9afab35878b..39edd444293 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1007,8 +1007,7 @@ static void read_pipe_completion(struct urb *purb)
return;
}
- if (purb->actual_length < 0 ||
- purb->actual_length > pipe_info->transfer_size) {
+ if (purb->actual_length > pipe_info->transfer_size) {
dev_err(&cam->udev->dev, "wrong number of bytes\n");
return;
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 0c54e19d994..65875c3aba1 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -59,6 +59,7 @@ config VIDEOBUF_DVB
# Used by drivers that need Videobuf2 modules
config VIDEOBUF2_CORE
+ select DMA_SHARED_BUFFER
tristate
config VIDEOBUF2_MEMOPS
@@ -68,11 +69,13 @@ config VIDEOBUF2_DMA_CONTIG
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_VMALLOC
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_DMA_SG
tristate
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index f995dd31151..380ddd89fa4 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -837,7 +837,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
struct v4l2_dv_timings *fmt)
{
int pix_clk;
- int v_fp, v_bp, h_fp, h_bp, hsync;
+ int v_fp, v_bp, h_fp, hsync;
int frame_width, image_height, image_width;
bool default_gtf;
int h_blank;
@@ -885,7 +885,6 @@ bool v4l2_detect_gtf(unsigned frame_height,
hsync = hsync - hsync % GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
- h_bp = h_blank / 2;
fmt->bt.polarities = polarities;
fmt->bt.width = image_width;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 83ffb6436ba..7157af301b1 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -297,6 +297,7 @@ struct v4l2_plane32 {
union {
__u32 mem_offset;
compat_long_t userptr;
+ __s32 fd;
} m;
__u32 data_offset;
__u32 reserved[11];
@@ -318,6 +319,7 @@ struct v4l2_buffer32 {
__u32 offset;
compat_long_t userptr;
compat_caddr_t planes;
+ __s32 fd;
} m;
__u32 length;
__u32 reserved2;
@@ -341,6 +343,9 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
up_pln = compat_ptr(p);
if (put_user((unsigned long)up_pln, &up->m.userptr))
return -EFAULT;
+ } else if (memory == V4L2_MEMORY_DMABUF) {
+ if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
+ return -EFAULT;
} else {
if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
sizeof(__u32)))
@@ -364,6 +369,11 @@ static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
sizeof(__u32)))
return -EFAULT;
+ /* For DMABUF, driver might've set up the fd, so copy it back. */
+ if (memory == V4L2_MEMORY_DMABUF)
+ if (copy_in_user(&up32->m.fd, &up->m.fd,
+ sizeof(int)))
+ return -EFAULT;
return 0;
}
@@ -446,6 +456,10 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (get_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -510,6 +524,10 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (put_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -1000,6 +1018,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_S_FBUF32:
case VIDIOC_OVERLAY32:
case VIDIOC_QBUF32:
+ case VIDIOC_EXPBUF:
case VIDIOC_DQBUF32:
case VIDIOC_STREAMON32:
case VIDIOC_STREAMOFF32:
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index a2df842e510..98dcad9c8a3 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -571,6 +571,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 18a040b935a..c7200921815 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 9e3fc040ea2..e57c002b415 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 8f388ff31eb..aa6e7c788db 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -155,6 +155,7 @@ static const char *v4l2_memory_names[] = {
[V4L2_MEMORY_MMAP] = "mmap",
[V4L2_MEMORY_USERPTR] = "userptr",
[V4L2_MEMORY_OVERLAY] = "overlay",
+ [V4L2_MEMORY_DMABUF] = "dmabuf",
};
#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
@@ -453,6 +454,15 @@ static void v4l_print_buffer(const void *arg, bool write_only)
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
+static void v4l_print_exportbuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_exportbuffer *p = arg;
+
+ pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
+ p->fd, prt_names(p->type, v4l2_type_names),
+ p->index, p->plane, p->flags);
+}
+
static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
@@ -1960,6 +1970,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 3ac83583ad7..438ea45d107 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -369,6 +369,19 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
/**
+ * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_exportbuffer *eb)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
+ return vb2_expbuf(vq, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
+/**
* v4l2_m2m_streamon() - turn on streaming for a video queue
*/
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -510,12 +523,10 @@ struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
- if (!m2m_ops)
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
+ WARN_ON(!m2m_ops->job_abort))
return ERR_PTR(-EINVAL);
- BUG_ON(!m2m_ops->device_run);
- BUG_ON(!m2m_ops->job_abort);
-
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
if (!m2m_dev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index dced41c1d99..996c248dea4 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -412,20 +412,20 @@ static int
v4l2_subdev_link_validate_get_format(struct media_pad *pad,
struct v4l2_subdev_format *fmt)
{
- switch (media_entity_type(pad->entity)) {
- case MEDIA_ENT_T_V4L2_SUBDEV:
+ if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt->pad = pad->index;
- return v4l2_subdev_call(media_entity_to_v4l2_subdev(
- pad->entity),
- pad, get_fmt, NULL, fmt);
- default:
- WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
- media_entity_type(pad->entity), pad->entity->name);
- /* Fall through */
- case MEDIA_ENT_T_DEVNODE_V4L:
- return -EINVAL;
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
}
+
+ WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
+ "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
+ pad->entity->type, pad->entity->name);
+
+ return -EINVAL;
}
int v4l2_subdev_link_validate(struct media_link *link)
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index bf7a326b1cd..5449e8aa984 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -335,6 +335,9 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
case V4L2_MEMORY_OVERLAY:
b->m.offset = vb->boff;
break;
+ case V4L2_MEMORY_DMABUF:
+ /* DMABUF is not handled in videobuf framework */
+ break;
}
b->flags = 0;
@@ -405,6 +408,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
break;
case V4L2_MEMORY_USERPTR:
case V4L2_MEMORY_OVERLAY:
+ case V4L2_MEMORY_DMABUF:
/* nothing */
break;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 432df119af2..9f81be23a81 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -109,6 +109,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
}
/**
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_memop(q, unmap_dmabuf, p->mem_priv);
+
+ call_memop(q, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+}
+
+/**
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
@@ -230,6 +260,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
/* Free MMAP buffers or release USERPTR buffers */
if (q->memory == V4L2_MEMORY_MMAP)
__vb2_buf_mem_free(vb);
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
else
__vb2_buf_userptr_put(vb);
}
@@ -362,6 +394,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
b->m.offset = vb->v4l2_planes[0].m.mem_offset;
else if (q->memory == V4L2_MEMORY_USERPTR)
b->m.userptr = vb->v4l2_planes[0].m.userptr;
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ b->m.fd = vb->v4l2_planes[0].m.fd;
}
/*
@@ -454,13 +488,28 @@ static int __verify_mmap_ops(struct vb2_queue *q)
}
/**
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
* __verify_memory_type() - Check whether the memory type and buffer type
* passed to a buffer operation are compatible with the queue.
*/
static int __verify_memory_type(struct vb2_queue *q,
enum v4l2_memory memory, enum v4l2_buf_type type)
{
- if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
+ if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
+ memory != V4L2_MEMORY_DMABUF) {
dprintk(1, "reqbufs: unsupported memory type\n");
return -EINVAL;
}
@@ -484,6 +533,11 @@ static int __verify_memory_type(struct vb2_queue *q,
return -EINVAL;
}
+ if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
/*
* Place the busy tests at the end: -EBUSY can be ignored when
* create_bufs is called with count == 0, but count == 0 should still
@@ -790,6 +844,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
struct vb2_queue *q = vb->vb2_queue;
unsigned long flags;
+ unsigned int plane;
if (vb->state != VB2_BUF_STATE_ACTIVE)
return;
@@ -800,6 +855,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
dprintk(4, "Done processing on buffer %d, state: %d\n",
vb->v4l2_buf.index, vb->state);
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, finish, vb->planes[plane].mem_priv);
+
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
@@ -845,6 +904,16 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
b->m.planes[plane].length;
}
}
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ v4l2_planes[plane].length =
+ b->m.planes[plane].length;
+ v4l2_planes[plane].data_offset =
+ b->m.planes[plane].data_offset;
+ }
+ }
} else {
/*
* Single-planar buffers do not use planes array,
@@ -859,6 +928,13 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
v4l2_planes[0].m.userptr = b->m.userptr;
v4l2_planes[0].length = b->length;
}
+
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ v4l2_planes[0].m.fd = b->m.fd;
+ v4l2_planes[0].length = b->length;
+ v4l2_planes[0].data_offset = 0;
+ }
+
}
vb->v4l2_buf.field = b->field;
@@ -959,14 +1035,121 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/**
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ */
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ /* Verify and copy relevant information provided by the userspace */
+ __fill_vb2_buffer(vb, b, planes);
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* use DMABUF size if length is not provided */
+ if (planes[plane].length == 0)
+ planes[plane].length = dbuf->size;
+
+ if (planes[plane].length < planes[plane].data_offset +
+ q->plane_sizes[plane]) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Skip the plane if already verified */
+ if (dbuf == vb->planes[plane].dbuf &&
+ vb->v4l2_planes[plane].length == planes[plane].length) {
+ dma_buf_put(dbuf);
+ continue;
+ }
+
+ dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+
+ /* Acquire each plane's memory */
+ mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+ dbuf, planes[plane].length, write);
+ if (IS_ERR(mem_priv)) {
+ dprintk(1, "qbuf: failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ dma_buf_put(dbuf);
+ goto err;
+ }
+
+ vb->planes[plane].dbuf = dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
+ * really we want to do this just before the DMA, not while queueing
+ * the buffer(s)..
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
+ plane);
+ goto err;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
+ }
+
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_qop(q, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ goto err;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ __vb2_buf_dmabuf_put(vb);
+
+ return ret;
+}
+
+/**
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
*/
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->queued_count);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, prepare, vb->planes[plane].mem_priv);
+
q->ops->buf_queue(vb);
}
@@ -982,6 +1165,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
case V4L2_MEMORY_USERPTR:
ret = __qbuf_userptr(vb, b);
break;
+ case V4L2_MEMORY_DMABUF:
+ ret = __qbuf_dmabuf(vb, b);
+ break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
@@ -1303,6 +1489,30 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
/**
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int i;
+
+ /* nothing to do if the buffer is already dequeued */
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ return;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+
+ /* unmap DMABUF buffer */
+ if (q->memory == V4L2_MEMORY_DMABUF)
+ for (i = 0; i < vb->num_planes; ++i) {
+ if (!vb->planes[i].dbuf_mapped)
+ continue;
+ call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+ vb->planes[i].dbuf_mapped = 0;
+ }
+}
+
+/**
* vb2_dqbuf() - Dequeue a buffer to the userspace
* @q: videobuf2 queue
* @b: buffer structure passed from userspace to vidioc_dqbuf handler
@@ -1363,11 +1573,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
__fill_v4l2_buffer(vb, b);
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
+ /* go back to dequeued state */
+ __vb2_dqbuf(vb);
dprintk(1, "dqbuf of buffer %d, with state %d\n",
vb->v4l2_buf.index, vb->state);
- vb->state = VB2_BUF_STATE_DEQUEUED;
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
@@ -1406,7 +1617,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* Reinitialize all buffers for next use.
*/
for (i = 0; i < q->num_buffers; ++i)
- q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+ __vb2_dqbuf(q->bufs[i]);
}
/**
@@ -1540,6 +1751,79 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
}
/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @eb: export buffer structure passed from userspace to vidioc_expbuf
+ * handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+ struct vb2_buffer *vb = NULL;
+ struct vb2_plane *vb_plane;
+ int ret;
+ struct dma_buf *dbuf;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "Queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ if (!q->mem_ops->get_dmabuf) {
+ dprintk(1, "Queue does not support DMA buffer exporting\n");
+ return -EINVAL;
+ }
+
+ if (eb->flags & ~O_CLOEXEC) {
+ dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+ return -EINVAL;
+ }
+
+ if (eb->type != q->type) {
+ dprintk(1, "qbuf: invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ if (eb->index >= q->num_buffers) {
+ dprintk(1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[eb->index];
+
+ if (eb->plane >= vb->num_planes) {
+ dprintk(1, "buffer plane out of range\n");
+ return -EINVAL;
+ }
+
+ vb_plane = &vb->planes[eb->plane];
+
+ dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "Failed to export buffer %d, plane %d\n",
+ eb->index, eb->plane);
+ return -EINVAL;
+ }
+
+ ret = dma_buf_fd(dbuf, eb->flags);
+ if (ret < 0) {
+ dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
+ eb->index, eb->plane, ret);
+ dma_buf_put(dbuf);
+ return ret;
+ }
+
+ dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
+ eb->index, eb->plane, ret);
+ eb->fd = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+/**
* vb2_mmap() - map video buffers into application address space
* @q: videobuf2 queue
* @vma: vma passed to the mmap file operation handler in the driver
@@ -2245,6 +2529,16 @@ int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
}
EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
/* v4l2_file_operations helpers */
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 4b7132660a9..10beaee7f0a 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -10,7 +10,10 @@
* the Free Software Foundation.
*/
+#include <linux/dma-buf.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -23,40 +26,158 @@ struct vb2_dc_conf {
};
struct vb2_dc_buf {
- struct vb2_dc_conf *conf;
+ struct device *dev;
void *vaddr;
- dma_addr_t dma_addr;
unsigned long size;
- struct vm_area_struct *vma;
- atomic_t refcount;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ struct sg_table *dma_sgt;
+
+ /* MMAP related */
struct vb2_vmarea_handler handler;
+ atomic_t refcount;
+ struct sg_table *sgt_base;
+
+ /* USERPTR related */
+ struct vm_area_struct *vma;
+
+ /* DMABUF related */
+ struct dma_buf_attachment *db_attach;
};
-static void vb2_dma_contig_put(void *buf_priv);
+/*********************************************/
+/* scatterlist table functions */
+/*********************************************/
+
+
+static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
+ void (*cb)(struct page *pg))
+{
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ struct page *page = sg_page(s);
+ unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
+ >> PAGE_SHIFT;
+ unsigned int j;
+
+ for (j = 0; j < n_pages; ++j, ++page)
+ cb(page);
+ }
+}
+
+static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+/*********************************************/
+/* callbacks for all buffers */
+/*********************************************/
+
+static void *vb2_dc_cookie(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return &buf->dma_addr;
+}
+
+static void *vb2_dc_vaddr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_dc_num_users(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static void vb2_dc_prepare(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static void vb2_dc_finish(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+/*********************************************/
+/* callbacks for MMAP buffers */
+/*********************************************/
+
+static void vb2_dc_put(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!atomic_dec_and_test(&buf->refcount))
+ return;
+
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+ put_device(buf->dev);
+ kfree(buf);
+}
-static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
+static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
{
struct vb2_dc_conf *conf = alloc_ctx;
+ struct device *dev = conf->dev;
struct vb2_dc_buf *buf;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
- GFP_KERNEL);
+ /* align image size to PAGE_SIZE */
+ size = PAGE_ALIGN(size);
+
+ buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
if (!buf->vaddr) {
- dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
- size);
+ dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
- buf->conf = conf;
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(dev);
buf->size = size;
buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_dma_contig_put;
+ buf->handler.put = vb2_dc_put;
buf->handler.arg = buf;
atomic_inc(&buf->refcount);
@@ -64,100 +185,569 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
return buf;
}
-static void vb2_dma_contig_put(void *buf_priv)
+static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_dc_buf *buf = buf_priv;
+ int ret;
- if (atomic_dec_and_test(&buf->refcount)) {
- dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
- buf->dma_addr);
- kfree(buf);
+ if (!buf) {
+ printk(KERN_ERR "No buffer to map\n");
+ return -EINVAL;
+ }
+
+ /*
+ * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
+ * map whole buffer
+ */
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
+ buf->dma_addr, buf->size);
+
+ if (ret) {
+ pr_err("Remapping memory failed, error: %d\n", ret);
+ return ret;
}
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
+
+ return 0;
}
-static void *vb2_dma_contig_cookie(void *buf_priv)
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
+ struct vb2_dc_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dc_buf *buf = dbuf->priv;
+ int ret;
- return &buf->dma_addr;
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->sgt_base->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
}
-static void *vb2_dma_contig_vaddr(void *buf_priv)
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
- if (!buf)
- return NULL;
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dir == dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ attach->dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dir = dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dc_get_dmabuf */
+ vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
return buf->vaddr;
}
-static unsigned int vb2_dma_contig_num_users(void *buf_priv)
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
{
- struct vb2_dc_buf *buf = buf_priv;
+ return vb2_dc_mmap(dbuf->priv, vma);
+}
- return atomic_read(&buf->refcount);
+static struct dma_buf_ops vb2_dc_dmabuf_ops = {
+ .attach = vb2_dc_dmabuf_ops_attach,
+ .detach = vb2_dc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+ .kmap = vb2_dc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+ .vmap = vb2_dc_dmabuf_ops_vmap,
+ .mmap = vb2_dc_dmabuf_ops_mmap,
+ .release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ dev_err(buf->dev, "failed to alloc sg table\n");
+ return NULL;
+ }
+
+ ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
+ buf->size);
+ if (ret < 0) {
+ dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+ kfree(sgt);
+ return NULL;
+ }
+
+ return sgt;
}
-static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
- if (!buf) {
- printk(KERN_ERR "No buffer to map\n");
- return -EINVAL;
+ if (!buf->sgt_base)
+ buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+ if (WARN_ON(!buf->sgt_base))
+ return NULL;
+
+ dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for USERPTR buffers */
+/*********************************************/
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
+ int n_pages, struct vm_area_struct *vma, int write)
+{
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ pages[i] = pfn_to_page(pfn);
+ }
+ } else {
+ int n;
+
+ n = get_user_pages(current, current->mm, start & PAGE_MASK,
+ n_pages, write, 1, pages, NULL);
+ /* negative error means that no page was pinned */
+ n = max(n, 0);
+ if (n != n_pages) {
+ pr_err("got only %d of %d user pages\n", n, n_pages);
+ while (n)
+ put_page(pages[--n]);
+ return -EFAULT;
+ }
}
- return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
- &vb2_common_vm_ops, &buf->handler);
+ return 0;
}
-static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+static void vb2_dc_put_dirty_page(struct page *page)
{
+ set_page_dirty_lock(page);
+ put_page(page);
+}
+
+static void vb2_dc_put_userptr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+
+ sg_free_table(sgt);
+ kfree(sgt);
+ vb2_put_vma(buf->vma);
+ kfree(buf);
+}
+
+static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
+ unsigned long start;
+ unsigned long end;
+ unsigned long offset;
+ struct page **pages;
+ int n_pages;
+ int ret = 0;
struct vm_area_struct *vma;
- dma_addr_t dma_addr = 0;
- int ret;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+ unsigned long dma_align = dma_get_cache_alignment();
+
+ /* Only cache aligned DMA transfers are reliable */
+ if (!IS_ALIGNED(vaddr | size, dma_align)) {
+ pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!size) {
+ pr_debug("size is zero\n");
+ return ERR_PTR(-EINVAL);
+ }
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
+ buf->dev = conf->dev;
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ start = vaddr & PAGE_MASK;
+ offset = vaddr & ~PAGE_MASK;
+ end = PAGE_ALIGN(vaddr + size);
+ n_pages = (end - start) >> PAGE_SHIFT;
+
+ pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ pr_err("failed to allocate pages table\n");
+ goto fail_buf;
+ }
+
+ /* current->mm->mmap_sem is taken by videobuf2 core */
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ pr_err("no vma for address %lu\n", vaddr);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ pr_err("failed to copy vma\n");
+ ret = -ENOMEM;
+ goto fail_pages;
+ }
+
+ /* extract page list from userspace mapping */
+ ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
if (ret) {
- printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
- vaddr);
- kfree(buf);
- return ERR_PTR(ret);
+ pr_err("failed to get user pages\n");
+ goto fail_vma;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ pr_err("failed to allocate sg table\n");
+ ret = -ENOMEM;
+ goto fail_get_user_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
+ offset, size, GFP_KERNEL);
+ if (ret) {
+ pr_err("failed to initialize sg table\n");
+ goto fail_sgt;
}
+ /* pages are no longer needed */
+ kfree(pages);
+ pages = NULL;
+
+ sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir);
+ if (sgt->nents <= 0) {
+ pr_err("failed to map scatterlist\n");
+ ret = -EIO;
+ goto fail_sgt_init;
+ }
+
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < size) {
+ pr_err("contiguous mapping is too small %lu/%lu\n",
+ contig_size, size);
+ ret = -EFAULT;
+ goto fail_map_sg;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
buf->size = size;
- buf->dma_addr = dma_addr;
- buf->vma = vma;
+ buf->dma_sgt = sgt;
return buf;
+
+fail_map_sg:
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+
+fail_sgt_init:
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, put_page);
+ sg_free_table(sgt);
+
+fail_sgt:
+ kfree(sgt);
+
+fail_get_user_pages:
+ if (pages && !vma_is_io(buf->vma))
+ while (n_pages)
+ put_page(pages[--n_pages]);
+
+fail_vma:
+ vb2_put_vma(buf->vma);
+
+fail_pages:
+ kfree(pages); /* kfree is NULL-proof */
+
+fail_buf:
+ kfree(buf);
+
+ return ERR_PTR(ret);
}
-static void vb2_dma_contig_put_userptr(void *mem_priv)
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dc_map_dmabuf(void *mem_priv)
{
struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt;
+ unsigned long contig_size;
- if (!buf)
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR_OR_NULL(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ /* checking if dmabuf is big enough to store contiguous chunk */
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < buf->size) {
+ pr_err("contiguous chunk is too small %lu/%lu b\n",
+ contig_size, buf->size);
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ return -EFAULT;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_sgt = sgt;
+
+ return 0;
+}
+
+static void vb2_dc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
return;
+ }
- vb2_put_vma(buf->vma);
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_addr = 0;
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_addr))
+ vb2_dc_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
kfree(buf);
}
+static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct vb2_dc_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
+/*********************************************/
+/* DMA CONTIG exported functions */
+/*********************************************/
+
const struct vb2_mem_ops vb2_dma_contig_memops = {
- .alloc = vb2_dma_contig_alloc,
- .put = vb2_dma_contig_put,
- .cookie = vb2_dma_contig_cookie,
- .vaddr = vb2_dma_contig_vaddr,
- .mmap = vb2_dma_contig_mmap,
- .get_userptr = vb2_dma_contig_get_userptr,
- .put_userptr = vb2_dma_contig_put_userptr,
- .num_users = vb2_dma_contig_num_users,
+ .alloc = vb2_dc_alloc,
+ .put = vb2_dc_put,
+ .get_dmabuf = vb2_dc_get_dmabuf,
+ .cookie = vb2_dc_cookie,
+ .vaddr = vb2_dc_vaddr,
+ .mmap = vb2_dc_mmap,
+ .get_userptr = vb2_dc_get_userptr,
+ .put_userptr = vb2_dc_put_userptr,
+ .prepare = vb2_dc_prepare,
+ .finish = vb2_dc_finish,
+ .map_dmabuf = vb2_dc_map_dmabuf,
+ .unmap_dmabuf = vb2_dc_unmap_dmabuf,
+ .attach_dmabuf = vb2_dc_attach_dmabuf,
+ .detach_dmabuf = vb2_dc_detach_dmabuf,
+ .num_users = vb2_dc_num_users,
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 051ea3571b2..81c1ad8b2cf 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -137,46 +137,6 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
/**
- * vb2_mmap_pfn_range() - map physical pages to userspace
- * @vma: virtual memory region for the mapping
- * @paddr: starting physical address of the memory to be mapped
- * @size: size of the memory to be mapped
- * @vm_ops: vm operations to be assigned to the created area
- * @priv: private data to be associated with the area
- *
- * Returns 0 on success.
- */
-int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
- unsigned long size,
- const struct vm_operations_struct *vm_ops,
- void *priv)
-{
- int ret;
-
- size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
- size, vma->vm_page_prot);
- if (ret) {
- printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
- return ret;
- }
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = priv;
- vma->vm_ops = vm_ops;
-
- vma->vm_ops->open(vma);
-
- pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, paddr, vma->vm_start, size);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
-
-/**
* vb2_common_vm_open() - increase refcount of the vma
* @vma: virtual memory region for the mapping
*
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 94efa04d8d5..a47fd4f589a 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -30,6 +30,7 @@ struct vb2_vmalloc_buf {
unsigned int n_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
+ struct dma_buf *dbuf;
};
static void vb2_vmalloc_put(void *buf_priv);
@@ -207,11 +208,66 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
return 0;
}
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_vmalloc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ buf->vaddr = dma_buf_vmap(buf->dbuf);
+
+ return buf->vaddr ? 0 : -EFAULT;
+}
+
+static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ buf->vaddr = NULL;
+}
+
+static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ if (buf->vaddr)
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dbuf = dbuf;
+ buf->write = write;
+ buf->size = size;
+
+ return buf;
+}
+
+
const struct vb2_mem_ops vb2_vmalloc_memops = {
.alloc = vb2_vmalloc_alloc,
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
+ .map_dmabuf = vb2_vmalloc_map_dmabuf,
+ .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
+ .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
+ .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
.vaddr = vb2_vmalloc_vaddr,
.mmap = vb2_vmalloc_mmap,
.num_users = vb2_vmalloc_num_users,
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 0c3ced70707..164afa71bba 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -792,6 +792,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
* than an unsolicited DID_ABORT.
*/
sc->result = DID_RESET << 16;
+ break;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
if (ioc->bus_type == FC)
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl
index 5fb195af43e..4a7d2ebdfc9 100644
--- a/drivers/message/i2o/README.ioctl
+++ b/drivers/message/i2o/README.ioctl
@@ -138,7 +138,7 @@ VI. Setting Parameters
The return value is the size in bytes of the data written into
ops->resbuf if no errors occur. If an error occurs, -1 is returned
- and errno is set appropriatly:
+ and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -222,7 +222,7 @@ VIII. Downloading Software
RETURNS
This function returns 0 no errors occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -264,7 +264,7 @@ IX. Uploading Software
RETURNS
This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -301,7 +301,7 @@ X. Removing Software
RETURNS
This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -325,7 +325,7 @@ X. Validating Configuration
RETURNS
This function returns 0 if no erro occur. If an error occurs, -1 is
- returned and errno is set appropriatly:
+ returned and errno is set appropriately:
ETIMEDOUT Timeout waiting for reply message
ENXIO Invalid IOP number
@@ -360,7 +360,7 @@ XI. Configuration Dialog
RETURNS
This function returns 0 if no error occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 4796bbf0ae4..49e86aed2bc 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -609,7 +609,7 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode)
u8 operation;
/*
- * This is to deail with the case of an application
+ * This is to deal with the case of an application
* opening a device and then the device disappears while
* it's in use, and then the application tries to release
* it. ex: Unmounting a deleted RAID volume at reboot.
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 9a49c243a6a..5451beff183 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -189,7 +189,7 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
return -ENXIO;
/*
- * Stop users being able to try and allocate arbitary amounts
+ * Stop users being able to try and allocate arbitrary amounts
* of DMA space. 64K is way more than sufficient for this.
*/
if (kcmd.oplen > 65536)
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 94bdf83b4bc..1c0abd4dfc4 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -104,6 +104,17 @@ config MFD_TI_SSP
To compile this driver as a module, choose M here: the
module will be called ti-ssp.
+config MFD_TI_AM335X_TSCADC
+ tristate "TI ADC / Touch Screen chip support"
+ select MFD_CORE
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for Texas Instruments series
+ of Touch Screen /ADC chips.
+ To compile this driver as a module, choose M here: the
+ module will be called ti_am335x_tscadc.
+
config HTC_EGPIO
bool "HTC EGPIO support"
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
@@ -211,7 +222,6 @@ config MFD_TPS6586X
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
- depends on REGULATOR
help
If you say yes here you get support for the TPS6586X series of
Power Management chips.
@@ -254,6 +264,20 @@ config MFD_TPS65912_SPI
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
+config MFD_TPS80031
+ bool "TI TPS80031/TPS80032 Power Management chips"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ If you say yes here you get support for the Texas Instruments
+ TPS80031/ TPS80032 Fully Integrated Power Management with Power
+ Path and Battery Charger. The device provides five configurable
+ step-down converters, 11 general purpose LDOs, USB OTG Module,
+ ADC, RTC, 2 PWM, System Voltage Regulator/Battery Charger with
+ Power Path from USB, 32K clock generator.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -310,10 +334,10 @@ config MFD_TWL4030_AUDIO
config TWL6040_CORE
bool "Support for TWL6040 audio codec"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
- select IRQ_DOMAIN
+ select REGMAP_IRQ
default n
help
Say yes here if you want support for Texas Instruments TWL6040 audio
@@ -991,6 +1015,7 @@ config MFD_TPS65090
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
If you say yes here you get support for the TPS65090 series of
Power Management chips.
@@ -1035,6 +1060,7 @@ config MFD_STA2X11
bool "STA2X11 multi function device support"
depends on STA2X11
select MFD_CORE
+ select REGMAP_MMIO
config MFD_SYSCON
bool "System Controller Register R/W Based on Regmap"
@@ -1054,6 +1080,38 @@ config MFD_PALMAS
If you say yes here you get support for the Palmas
series of PMIC chips from Texas Instruments.
+config MFD_VIPERBOARD
+ tristate "Support for Nano River Technologies Viperboard"
+ select MFD_CORE
+ depends on USB
+ default n
+ help
+ Say yes here if you want support for Nano River Technologies
+ Viperboard.
+ There are mfd cell drivers available for i2c master, adc and
+ both gpios found on the board. The spi part does not yet
+ have a driver.
+ You need to select the mfd cell drivers separately.
+ The drivers do not support all features the board exposes.
+
+config MFD_RETU
+ tristate "Support for Retu multi-function device"
+ select MFD_CORE
+ depends on I2C
+ select REGMAP_IRQ
+ help
+ Retu is a multi-function device found on Nokia Internet Tablets
+ (770, N800 and N810).
+
+config MFD_AS3711
+ bool "Support for AS3711"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C=y
+ help
+ Support for the AS3711 PMIC from AMS
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 69f260ae022..8b977f8045a 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
+obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o
obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
@@ -55,18 +56,19 @@ obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MFD_TPS65217) += tps65217.o
-obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o
tps65912-objs := tps65912-core.o tps65912-irq.o
obj-$(CONFIG_MFD_TPS65912) += tps65912.o
obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
+obj-$(CONFIG_MFD_TPS80031) += tps80031.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
-obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
+obj-$(CONFIG_TWL6040_CORE) += twl6040.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
+obj-$(CONFIG_PMIC_DA9052) += da9052-irq.o
obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
@@ -137,8 +140,11 @@ obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
+obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o
obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
obj-$(CONFIG_MFD_SYSCON) += syscon.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o
+obj-$(CONFIG_MFD_RETU) += retu-mfd.o
+obj-$(CONFIG_MFD_AS3711) += as3711.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 127b00aadae..e1650badd10 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -565,15 +565,10 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
else
num_irqs = AB8500_NR_IRQS;
- if (ab8500->irq_base) {
- ab8500->domain = irq_domain_add_legacy(
- NULL, num_irqs, ab8500->irq_base,
- 0, &ab8500_irq_ops, ab8500);
- }
- else {
- ab8500->domain = irq_domain_add_linear(
- np, num_irqs, &ab8500_irq_ops, ab8500);
- }
+ /* If ->irq_base is zero this will give a linear mapping */
+ ab8500->domain = irq_domain_add_simple(NULL,
+ num_irqs, ab8500->irq_base,
+ &ab8500_irq_ops, ab8500);
if (!ab8500->domain) {
dev_err(ab8500->dev, "Failed to create irqdomain\n");
@@ -591,38 +586,6 @@ int ab8500_suspend(struct ab8500 *ab8500)
return 0;
}
-/* AB8500 GPIO Resources */
-static struct resource __devinitdata ab8500_gpio_resources[] = {
- {
- .name = "GPIO_INT6",
- .start = AB8500_INT_GPIO6R,
- .end = AB8500_INT_GPIO41F,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-/* AB9540 GPIO Resources */
-static struct resource __devinitdata ab9540_gpio_resources[] = {
- {
- .name = "GPIO_INT6",
- .start = AB8500_INT_GPIO6R,
- .end = AB8500_INT_GPIO41F,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "GPIO_INT14",
- .start = AB9540_INT_GPIO50R,
- .end = AB9540_INT_GPIO54R,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "GPIO_INT15",
- .start = AB9540_INT_GPIO50F,
- .end = AB9540_INT_GPIO54F,
- .flags = IORESOURCE_IRQ,
- }
-};
-
static struct resource ab8500_gpadc_resources[] = {
{
.name = "HW_CONV_END",
@@ -984,6 +947,10 @@ static struct mfd_cell abx500_common_devs[] = {
.of_compatible = "stericsson,ab8500-regulator",
},
{
+ .name = "abx500-clk",
+ .of_compatible = "stericsson,abx500-clk",
+ },
+ {
.name = "ab8500-gpadc",
.of_compatible = "stericsson,ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
@@ -1041,23 +1008,43 @@ static struct mfd_cell abx500_common_devs[] = {
static struct mfd_cell ab8500_bm_devs[] = {
{
.name = "ab8500-charger",
+ .of_compatible = "stericsson,ab8500-charger",
.num_resources = ARRAY_SIZE(ab8500_charger_resources),
.resources = ab8500_charger_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
{
.name = "ab8500-btemp",
+ .of_compatible = "stericsson,ab8500-btemp",
.num_resources = ARRAY_SIZE(ab8500_btemp_resources),
.resources = ab8500_btemp_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
{
.name = "ab8500-fg",
+ .of_compatible = "stericsson,ab8500-fg",
.num_resources = ARRAY_SIZE(ab8500_fg_resources),
.resources = ab8500_fg_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
{
.name = "ab8500-chargalg",
+ .of_compatible = "stericsson,ab8500-chargalg",
.num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
.resources = ab8500_chargalg_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
};
@@ -1065,8 +1052,6 @@ static struct mfd_cell ab8500_devs[] = {
{
.name = "ab8500-gpio",
.of_compatible = "stericsson,ab8500-gpio",
- .num_resources = ARRAY_SIZE(ab8500_gpio_resources),
- .resources = ab8500_gpio_resources,
},
{
.name = "ab8500-usb",
@@ -1083,8 +1068,6 @@ static struct mfd_cell ab8500_devs[] = {
static struct mfd_cell ab9540_devs[] = {
{
.name = "ab8500-gpio",
- .num_resources = ARRAY_SIZE(ab9540_gpio_resources),
- .resources = ab9540_gpio_resources,
},
{
.name = "ab9540-usb",
@@ -1269,7 +1252,7 @@ static int ab8500_probe(struct platform_device *pdev)
int i;
u8 value;
- ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ ab8500 = devm_kzalloc(&pdev->dev, sizeof *ab8500, GFP_KERNEL);
if (!ab8500)
return -ENOMEM;
@@ -1279,10 +1262,8 @@ static int ab8500_probe(struct platform_device *pdev)
ab8500->dev = &pdev->dev;
resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!resource) {
- ret = -ENODEV;
- goto out_free_ab8500;
- }
+ if (!resource)
+ return -ENODEV;
ab8500->irq = resource->start;
@@ -1305,7 +1286,7 @@ static int ab8500_probe(struct platform_device *pdev)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_IC_NAME_REG, &value);
if (ret < 0)
- goto out_free_ab8500;
+ return ret;
ab8500->version = value;
}
@@ -1313,7 +1294,7 @@ static int ab8500_probe(struct platform_device *pdev)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_REV_REG, &value);
if (ret < 0)
- goto out_free_ab8500;
+ return ret;
ab8500->chip_id = value;
@@ -1330,14 +1311,13 @@ static int ab8500_probe(struct platform_device *pdev)
ab8500->mask_size = AB8500_NUM_IRQ_REGS;
ab8500->irq_reg_offset = ab8500_irq_regoffset;
}
- ab8500->mask = kzalloc(ab8500->mask_size, GFP_KERNEL);
+ ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
if (!ab8500->mask)
return -ENOMEM;
- ab8500->oldmask = kzalloc(ab8500->mask_size, GFP_KERNEL);
- if (!ab8500->oldmask) {
- ret = -ENOMEM;
- goto out_freemask;
- }
+ ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
+ if (!ab8500->oldmask)
+ return -ENOMEM;
+
/*
* ab8500 has switched off due to (SWITCH_OFF_STATUS):
* 0x01 Swoff bit programming
@@ -1391,37 +1371,37 @@ static int ab8500_probe(struct platform_device *pdev)
ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
if (ret)
- goto out_freeoldmask;
+ return ret;
for (i = 0; i < ab8500->mask_size; i++)
ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
ret = ab8500_irq_init(ab8500, np);
if (ret)
- goto out_freeoldmask;
+ return ret;
/* Activate this feature only in ab9540 */
/* till tests are done on ab8500 1p2 or later*/
if (is_ab9540(ab8500)) {
- ret = request_threaded_irq(ab8500->irq, NULL,
- ab8500_hierarchical_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+ ab8500_hierarchical_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
}
else {
- ret = request_threaded_irq(ab8500->irq, NULL,
- ab8500_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+ ab8500_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
if (ret)
- goto out_freeoldmask;
+ return ret;
}
ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
ARRAY_SIZE(abx500_common_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (is_ab9540(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
@@ -1432,14 +1412,14 @@ static int ab8500_probe(struct platform_device *pdev)
ARRAY_SIZE(ab8500_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (is_ab9540(ab8500) || is_ab8505(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
ARRAY_SIZE(ab9540_ab8505_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (!no_bm) {
/* Add battery management devices */
@@ -1460,17 +1440,6 @@ static int ab8500_probe(struct platform_device *pdev)
dev_err(ab8500->dev, "error creating sysfs entries\n");
return ret;
-
-out_freeirq:
- free_irq(ab8500->irq, ab8500);
-out_freeoldmask:
- kfree(ab8500->oldmask);
-out_freemask:
- kfree(ab8500->mask);
-out_free_ab8500:
- kfree(ab8500);
-
- return ret;
}
static int ab8500_remove(struct platform_device *pdev)
@@ -1483,11 +1452,6 @@ static int ab8500_remove(struct platform_device *pdev)
sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
mfd_remove_devices(ab8500->dev);
- free_irq(ab8500->irq, ab8500);
-
- kfree(ab8500->oldmask);
- kfree(ab8500->mask);
- kfree(ab8500);
return 0;
}
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 1a6f943f733..bc8a3edb6bb 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -272,6 +272,7 @@ static struct mfd_cell early_devs[] = {
static struct mfd_cell wm5102_devs[] = {
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
+ { .name = "arizona-haptics" },
{ .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5102-codec" },
@@ -280,6 +281,7 @@ static struct mfd_cell wm5102_devs[] = {
static struct mfd_cell wm5110_devs[] = {
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
+ { .name = "arizona-haptics" },
{ .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5110-codec" },
@@ -290,6 +292,7 @@ int arizona_dev_init(struct arizona *arizona)
struct device *dev = arizona->dev;
const char *type_name;
unsigned int reg, val;
+ int (*apply_patch)(struct arizona *) = NULL;
int ret, i;
dev_set_drvdata(arizona->dev, arizona);
@@ -389,7 +392,7 @@ int arizona_dev_init(struct arizona *arizona)
arizona->type);
arizona->type = WM5102;
}
- ret = wm5102_patch(arizona);
+ apply_patch = wm5102_patch;
break;
#endif
#ifdef CONFIG_MFD_WM5110
@@ -400,7 +403,7 @@ int arizona_dev_init(struct arizona *arizona)
arizona->type);
arizona->type = WM5110;
}
- ret = wm5110_patch(arizona);
+ apply_patch = wm5110_patch;
break;
#endif
default:
@@ -410,9 +413,6 @@ int arizona_dev_init(struct arizona *arizona)
dev_info(dev, "%s revision %c\n", type_name, arizona->rev + 'A');
- if (ret != 0)
- dev_err(arizona->dev, "Failed to apply patch: %d\n", ret);
-
/* If we have a /RESET GPIO we'll already be reset */
if (!arizona->pdata.reset) {
regcache_mark_dirty(arizona->regmap);
@@ -436,6 +436,15 @@ int arizona_dev_init(struct arizona *arizona)
goto err_reset;
}
+ if (apply_patch) {
+ ret = apply_patch(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to apply patch: %d\n",
+ ret);
+ goto err_reset;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
if (!arizona->pdata.gpio_defaults[i])
continue;
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index b1b00917740..74713bf5371 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -224,6 +224,7 @@ int arizona_irq_init(struct arizona *arizona)
arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops,
arizona);
if (!arizona->virq) {
+ dev_err(arizona->dev, "Failed to add core IRQ domain\n");
ret = -EINVAL;
goto err;
}
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
new file mode 100644
index 00000000000..e994c969112
--- /dev/null
+++ b/drivers/mfd/as3711.c
@@ -0,0 +1,217 @@
+/*
+ * AS3711 PMIC MFC driver
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/as3711.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum {
+ AS3711_REGULATOR,
+ AS3711_BACKLIGHT,
+};
+
+/*
+ * Ok to have it static: it is only used during probing and multiple I2C devices
+ * cannot be probed simultaneously. Just make sure to avoid stale data.
+ */
+static struct mfd_cell as3711_subdevs[] = {
+ [AS3711_REGULATOR] = {.name = "as3711-regulator",},
+ [AS3711_BACKLIGHT] = {.name = "as3711-backlight",},
+};
+
+static bool as3711_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_GPIO_SIGNAL_IN:
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ case AS3711_CHARGER_STATUS_1:
+ case AS3711_CHARGER_STATUS_2:
+ case AS3711_REG_STATUS:
+ return true;
+ }
+ return false;
+}
+
+static bool as3711_precious_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ return true;
+ }
+ return false;
+}
+
+static bool as3711_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_SD_1_VOLTAGE:
+ case AS3711_SD_2_VOLTAGE:
+ case AS3711_SD_3_VOLTAGE:
+ case AS3711_SD_4_VOLTAGE:
+ case AS3711_LDO_1_VOLTAGE:
+ case AS3711_LDO_2_VOLTAGE:
+ case AS3711_LDO_3_VOLTAGE:
+ case AS3711_LDO_4_VOLTAGE:
+ case AS3711_LDO_5_VOLTAGE:
+ case AS3711_LDO_6_VOLTAGE:
+ case AS3711_LDO_7_VOLTAGE:
+ case AS3711_LDO_8_VOLTAGE:
+ case AS3711_SD_CONTROL:
+ case AS3711_GPIO_SIGNAL_OUT:
+ case AS3711_GPIO_SIGNAL_IN:
+ case AS3711_SD_CONTROL_1:
+ case AS3711_SD_CONTROL_2:
+ case AS3711_CURR_CONTROL:
+ case AS3711_CURR1_VALUE:
+ case AS3711_CURR2_VALUE:
+ case AS3711_CURR3_VALUE:
+ case AS3711_STEPUP_CONTROL_1:
+ case AS3711_STEPUP_CONTROL_2:
+ case AS3711_STEPUP_CONTROL_4:
+ case AS3711_STEPUP_CONTROL_5:
+ case AS3711_REG_STATUS:
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ case AS3711_CHARGER_STATUS_1:
+ case AS3711_CHARGER_STATUS_2:
+ case AS3711_ASIC_ID_1:
+ case AS3711_ASIC_ID_2:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config as3711_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = as3711_volatile_reg,
+ .readable_reg = as3711_readable_reg,
+ .precious_reg = as3711_precious_reg,
+ .max_register = AS3711_MAX_REGS,
+ .num_reg_defaults_raw = AS3711_MAX_REGS,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int as3711_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct as3711 *as3711;
+ struct as3711_platform_data *pdata = client->dev.platform_data;
+ unsigned int id1, id2;
+ int ret;
+
+ if (!pdata)
+ dev_dbg(&client->dev, "Platform data not found\n");
+
+ as3711 = devm_kzalloc(&client->dev, sizeof(struct as3711), GFP_KERNEL);
+ if (!as3711) {
+ dev_err(&client->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ as3711->dev = &client->dev;
+ i2c_set_clientdata(client, as3711);
+
+ if (client->irq)
+ dev_notice(&client->dev, "IRQ not supported yet\n");
+
+ as3711->regmap = devm_regmap_init_i2c(client, &as3711_regmap_config);
+ if (IS_ERR(as3711->regmap)) {
+ ret = PTR_ERR(as3711->regmap);
+ dev_err(&client->dev, "regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_1, &id1);
+ if (!ret)
+ ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_2, &id2);
+ if (ret < 0) {
+ dev_err(&client->dev, "regmap_read() failed: %d\n", ret);
+ return ret;
+ }
+ if (id1 != 0x8b)
+ return -ENODEV;
+ dev_info(as3711->dev, "AS3711 detected: %x:%x\n", id1, id2);
+
+ /* We can reuse as3711_subdevs[], it will be copied in mfd_add_devices() */
+ if (pdata) {
+ as3711_subdevs[AS3711_REGULATOR].platform_data = &pdata->regulator;
+ as3711_subdevs[AS3711_REGULATOR].pdata_size = sizeof(pdata->regulator);
+ as3711_subdevs[AS3711_BACKLIGHT].platform_data = &pdata->backlight;
+ as3711_subdevs[AS3711_BACKLIGHT].pdata_size = sizeof(pdata->backlight);
+ } else {
+ as3711_subdevs[AS3711_REGULATOR].platform_data = NULL;
+ as3711_subdevs[AS3711_REGULATOR].pdata_size = 0;
+ as3711_subdevs[AS3711_BACKLIGHT].platform_data = NULL;
+ as3711_subdevs[AS3711_BACKLIGHT].pdata_size = 0;
+ }
+
+ ret = mfd_add_devices(as3711->dev, -1, as3711_subdevs,
+ ARRAY_SIZE(as3711_subdevs), NULL, 0, NULL);
+ if (ret < 0)
+ dev_err(&client->dev, "add mfd devices failed: %d\n", ret);
+
+ return ret;
+}
+
+static int as3711_i2c_remove(struct i2c_client *client)
+{
+ struct as3711 *as3711 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(as3711->dev);
+ return 0;
+}
+
+static const struct i2c_device_id as3711_i2c_id[] = {
+ {.name = "as3711", .driver_data = 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, as3711_i2c_id);
+
+static struct i2c_driver as3711_i2c_driver = {
+ .driver = {
+ .name = "as3711",
+ .owner = THIS_MODULE,
+ },
+ .probe = as3711_i2c_probe,
+ .remove = as3711_i2c_remove,
+ .id_table = as3711_i2c_id,
+};
+
+static int __init as3711_i2c_init(void)
+{
+ return i2c_add_driver(&as3711_i2c_driver);
+}
+/* Initialise early */
+subsys_initcall(as3711_i2c_init);
+
+static void __exit as3711_i2c_exit(void)
+{
+ i2c_del_driver(&as3711_i2c_driver);
+}
+module_exit(as3711_i2c_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("AS3711 PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 689b747416a..a3c9613f916 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -24,16 +23,6 @@
#include <linux/mfd/da9052/pdata.h>
#include <linux/mfd/da9052/reg.h>
-#define DA9052_NUM_IRQ_REGS 4
-#define DA9052_IRQ_MASK_POS_1 0x01
-#define DA9052_IRQ_MASK_POS_2 0x02
-#define DA9052_IRQ_MASK_POS_3 0x04
-#define DA9052_IRQ_MASK_POS_4 0x08
-#define DA9052_IRQ_MASK_POS_5 0x10
-#define DA9052_IRQ_MASK_POS_6 0x20
-#define DA9052_IRQ_MASK_POS_7 0x40
-#define DA9052_IRQ_MASK_POS_8 0x80
-
static bool da9052_reg_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -425,15 +414,6 @@ err:
}
EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
-static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
-{
- struct da9052 *da9052 = irq_data;
-
- complete(&da9052->done);
-
- return IRQ_HANDLED;
-}
-
int da9052_adc_read_temp(struct da9052 *da9052)
{
int tbat;
@@ -447,74 +427,6 @@ int da9052_adc_read_temp(struct da9052 *da9052)
}
EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
-static struct resource da9052_rtc_resource = {
- .name = "ALM",
- .start = DA9052_IRQ_ALARM,
- .end = DA9052_IRQ_ALARM,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_onkey_resource = {
- .name = "ONKEY",
- .start = DA9052_IRQ_NONKEY,
- .end = DA9052_IRQ_NONKEY,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_bat_resources[] = {
- {
- .name = "BATT TEMP",
- .start = DA9052_IRQ_TBAT,
- .end = DA9052_IRQ_TBAT,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "DCIN DET",
- .start = DA9052_IRQ_DCIN,
- .end = DA9052_IRQ_DCIN,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "DCIN REM",
- .start = DA9052_IRQ_DCINREM,
- .end = DA9052_IRQ_DCINREM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "VBUS DET",
- .start = DA9052_IRQ_VBUS,
- .end = DA9052_IRQ_VBUS,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "VBUS REM",
- .start = DA9052_IRQ_VBUSREM,
- .end = DA9052_IRQ_VBUSREM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "CHG END",
- .start = DA9052_IRQ_CHGEND,
- .end = DA9052_IRQ_CHGEND,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource da9052_tsi_resources[] = {
- {
- .name = "PENDWN",
- .start = DA9052_IRQ_PENDOWN,
- .end = DA9052_IRQ_PENDOWN,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "TSIRDY",
- .start = DA9052_IRQ_TSIREADY,
- .end = DA9052_IRQ_TSIREADY,
- .flags = IORESOURCE_IRQ,
- },
-};
-
static struct mfd_cell da9052_subdev_info[] = {
{
.name = "da9052-regulator",
@@ -574,13 +486,9 @@ static struct mfd_cell da9052_subdev_info[] = {
},
{
.name = "da9052-onkey",
- .resources = &da9052_onkey_resource,
- .num_resources = 1,
},
{
.name = "da9052-rtc",
- .resources = &da9052_rtc_resource,
- .num_resources = 1,
},
{
.name = "da9052-gpio",
@@ -602,160 +510,15 @@ static struct mfd_cell da9052_subdev_info[] = {
},
{
.name = "da9052-tsi",
- .resources = da9052_tsi_resources,
- .num_resources = ARRAY_SIZE(da9052_tsi_resources),
},
{
.name = "da9052-bat",
- .resources = da9052_bat_resources,
- .num_resources = ARRAY_SIZE(da9052_bat_resources),
},
{
.name = "da9052-watchdog",
},
};
-static struct regmap_irq da9052_irqs[] = {
- [DA9052_IRQ_DCIN] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_VBUS] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_DCINREM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_VBUSREM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_VDDLOW] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_ALARM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_SEQRDY] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_COMP1V2] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_NONKEY] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_IDFLOAT] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_IDGND] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_CHGEND] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_TBAT] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_ADC_EOM] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_PENDOWN] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_TSIREADY] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_GPI0] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_GPI1] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_GPI2] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_GPI3] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_GPI4] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_GPI5] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_GPI6] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_GPI7] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_GPI8] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_GPI9] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_GPI10] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_GPI11] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_GPI12] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_GPI13] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_GPI14] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_GPI15] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
-};
-
-static struct regmap_irq_chip da9052_regmap_irq_chip = {
- .name = "da9052_irq",
- .status_base = DA9052_EVENT_A_REG,
- .mask_base = DA9052_IRQ_MASK_A_REG,
- .ack_base = DA9052_EVENT_A_REG,
- .num_regs = DA9052_NUM_IRQ_REGS,
- .irqs = da9052_irqs,
- .num_irqs = ARRAY_SIZE(da9052_irqs),
-};
-
struct regmap_config da9052_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -782,45 +545,31 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
da9052->chip_id = chip_id;
- if (!pdata || !pdata->irq_base)
- da9052->irq_base = -1;
- else
- da9052->irq_base = pdata->irq_base;
-
- ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- da9052->irq_base, &da9052_regmap_irq_chip,
- &da9052->irq_data);
- if (ret < 0)
- goto regmap_err;
-
- da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
-
- ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "adc irq", da9052);
- if (ret != 0)
- dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+ ret = da9052_irq_init(da9052);
+ if (ret != 0) {
+ dev_err(da9052->dev, "da9052_irq_init failed: %d\n", ret);
+ return ret;
+ }
ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
- if (ret)
+ if (ret) {
+ dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
goto err;
+ }
return 0;
err:
- free_irq(DA9052_IRQ_ADC_EOM, da9052);
- mfd_remove_devices(da9052->dev);
-regmap_err:
+ da9052_irq_exit(da9052);
+
return ret;
}
void da9052_device_exit(struct da9052 *da9052)
{
- free_irq(DA9052_IRQ_ADC_EOM, da9052);
- regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
mfd_remove_devices(da9052->dev);
+ da9052_irq_exit(da9052);
}
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
diff --git a/drivers/mfd/da9052-irq.c b/drivers/mfd/da9052-irq.c
new file mode 100644
index 00000000000..57ae7841f53
--- /dev/null
+++ b/drivers/mfd/da9052-irq.c
@@ -0,0 +1,288 @@
+/*
+ * DA9052 interrupt support
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
+ * Based on arizona-irq.c, which is:
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS 4
+#define DA9052_IRQ_MASK_POS_1 0x01
+#define DA9052_IRQ_MASK_POS_2 0x02
+#define DA9052_IRQ_MASK_POS_3 0x04
+#define DA9052_IRQ_MASK_POS_4 0x08
+#define DA9052_IRQ_MASK_POS_5 0x10
+#define DA9052_IRQ_MASK_POS_6 0x20
+#define DA9052_IRQ_MASK_POS_7 0x40
+#define DA9052_IRQ_MASK_POS_8 0x80
+
+static struct regmap_irq da9052_irqs[] = {
+ [DA9052_IRQ_DCIN] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_VBUS] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_DCINREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_VBUSREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_VDDLOW] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ALARM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_SEQRDY] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_COMP1V2] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_NONKEY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_IDFLOAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_IDGND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_CHGEND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_TBAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ADC_EOM] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_PENDOWN] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_TSIREADY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI0] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI1] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI2] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI3] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI4] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI5] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI6] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI7] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI8] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI9] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI10] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI11] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI12] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI13] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI14] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI15] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+ .name = "da9052_irq",
+ .status_base = DA9052_EVENT_A_REG,
+ .mask_base = DA9052_IRQ_MASK_A_REG,
+ .ack_base = DA9052_EVENT_A_REG,
+ .num_regs = DA9052_NUM_IRQ_REGS,
+ .irqs = da9052_irqs,
+ .num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+static int da9052_map_irq(struct da9052 *da9052, int irq)
+{
+ return regmap_irq_get_virq(da9052->irq_data, irq);
+}
+
+int da9052_enable_irq(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ enable_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_enable_irq);
+
+int da9052_disable_irq(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ disable_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq);
+
+int da9052_disable_irq_nosync(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ disable_irq_nosync(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq_nosync);
+
+int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ name, data);
+}
+EXPORT_SYMBOL_GPL(da9052_request_irq);
+
+void da9052_free_irq(struct da9052 *da9052, int irq, void *data)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+EXPORT_SYMBOL_GPL(da9052_free_irq);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+ struct da9052 *da9052 = irq_data;
+
+ complete(&da9052->done);
+
+ return IRQ_HANDLED;
+}
+
+int da9052_irq_init(struct da9052 *da9052)
+{
+ int ret;
+
+ ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ -1, &da9052_regmap_irq_chip,
+ &da9052->irq_data);
+ if (ret < 0) {
+ dev_err(da9052->dev, "regmap_add_irq_chip failed: %d\n", ret);
+ goto regmap_err;
+ }
+
+ ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq",
+ da9052_auxadc_irq, da9052);
+
+ if (ret != 0) {
+ dev_err(da9052->dev, "DA9052_IRQ_ADC_EOM failed: %d\n", ret);
+ goto request_irq_err;
+ }
+
+ return 0;
+
+request_irq_err:
+ regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+regmap_err:
+ return ret;
+
+}
+
+int da9052_irq_exit(struct da9052 *da9052)
+{
+ da9052_free_irq(da9052, DA9052_IRQ_ADC_EOM , da9052);
+ regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+
+ return 0;
+}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index dc569156937..dc8826d8d69 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2743,9 +2743,15 @@ static struct irq_domain_ops db8500_irq_ops = {
static int db8500_irq_init(struct device_node *np)
{
- db8500_irq_domain = irq_domain_add_legacy(
- np, NUM_PRCMU_WAKEUPS, IRQ_PRCMU_BASE,
- 0, &db8500_irq_ops, NULL);
+ int irq_base = -1;
+
+ /* In the device tree case, just take some IRQs */
+ if (!np)
+ irq_base = IRQ_PRCMU_BASE;
+
+ db8500_irq_domain = irq_domain_add_simple(
+ np, NUM_PRCMU_WAKEUPS, irq_base,
+ &db8500_irq_ops, NULL);
if (!db8500_irq_domain) {
pr_err("Failed to create irqdomain\n");
@@ -2757,7 +2763,7 @@ static int db8500_irq_init(struct device_node *np)
void __init db8500_prcmu_early_init(void)
{
- if (cpu_is_u8500v2()) {
+ if (cpu_is_u8500v2() || cpu_is_u9540()) {
void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
if (tcpm_base != NULL) {
@@ -2775,7 +2781,11 @@ void __init db8500_prcmu_early_init(void)
iounmap(tcpm_base);
}
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
+ if (cpu_is_u9540())
+ tcdm_base = ioremap_nocache(U8500_PRCMU_TCDM_BASE,
+ SZ_4K + SZ_8K) + SZ_8K;
+ else
+ tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
} else {
pr_err("prcmu: Unsupported chip version\n");
BUG();
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 0b8b55bb9b1..e80587f1a79 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -211,7 +211,7 @@ static int jz4740_adc_probe(struct platform_device *pdev)
int ret;
int irq_base;
- adc = kmalloc(sizeof(*adc), GFP_KERNEL);
+ adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
if (!adc) {
dev_err(&pdev->dev, "Failed to allocate driver structure\n");
return -ENOMEM;
@@ -221,30 +221,27 @@ static int jz4740_adc_probe(struct platform_device *pdev)
if (adc->irq < 0) {
ret = adc->irq;
dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
- goto err_free;
+ return ret;
}
irq_base = platform_get_irq(pdev, 1);
if (irq_base < 0) {
- ret = irq_base;
- dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
- goto err_free;
+ dev_err(&pdev->dev, "Failed to get irq base: %d\n", irq_base);
+ return irq_base;
}
mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_base) {
- ret = -ENOENT;
dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
- goto err_free;
+ return -ENOENT;
}
/* Only request the shared registers for the MFD driver */
adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS,
pdev->name);
if (!adc->mem) {
- ret = -EBUSY;
dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- goto err_free;
+ return -EBUSY;
}
adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem));
@@ -301,9 +298,6 @@ err_iounmap:
iounmap(adc->base);
err_release_mem_region:
release_mem_region(adc->mem->start, resource_size(adc->mem));
-err_free:
- kfree(adc);
-
return ret;
}
@@ -325,8 +319,6 @@ static int jz4740_adc_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- kfree(adc);
-
return 0;
}
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 2ad24caa07d..d9d930302e9 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -734,7 +734,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev,
pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
lpc_ich_cells[LPC_GPIO].num_resources--;
goto gpe0_done;
}
@@ -760,7 +760,7 @@ gpe0_done:
pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
ret = -ENODEV;
goto gpio_done;
}
@@ -810,7 +810,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
ret = -ENODEV;
goto wdt_done;
}
@@ -830,12 +830,15 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
* we have to read RCBA from PCI Config space 0xf0 and use
* it as base. GCS = RCBA + ICH6_GCS(0x3410).
*/
- if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+ if (lpc_chipset_info[id->driver_data].iTCO_version == 1) {
+ /* Don't register iomem for TCO ver 1 */
+ lpc_ich_cells[LPC_WDT].num_resources--;
+ } else {
pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0xffffc000;
if (!(base_addr_cfg & 1)) {
- pr_err("RCBA is disabled by hardware/BIOS, "
- "device disabled\n");
+ dev_notice(&dev->dev, "RCBA is disabled by "
+ "hardware/BIOS, device disabled\n");
ret = -ENODEV;
goto wdt_done;
}
@@ -871,6 +874,7 @@ static int lpc_ich_probe(struct pci_dev *dev,
* successfully.
*/
if (!cell_added) {
+ dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
return -ENODEV;
}
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index f123517065e..abd5c80c7cf 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -21,8 +21,10 @@
* This driver is based on max8998.c
*/
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -47,6 +49,13 @@ static struct mfd_cell max8997_devs[] = {
{ .name = "max8997-led", .id = 2 },
};
+#ifdef CONFIG_OF
+static struct of_device_id __devinitdata max8997_pmic_dt_match[] = {
+ { .compatible = "maxim,max8997-pmic", .data = TYPE_MAX8997 },
+ {},
+};
+#endif
+
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
@@ -123,6 +132,58 @@ int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
}
EXPORT_SYMBOL_GPL(max8997_update_reg);
+#ifdef CONFIG_OF
+/*
+ * Only the common platform data elements for max8997 are parsed here from the
+ * device tree. Other sub-modules of max8997 such as pmic, rtc and others have
+ * to parse their own platform data elements from device tree.
+ *
+ * The max8997 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ struct max8997_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pd->ono = irq_of_parse_and_map(dev->of_node, 1);
+
+ /*
+ * ToDo: the 'wakeup' member in the platform data is more of a linux
+ * specfic information. Hence, there is no binding for that yet and
+ * not parsed here.
+ */
+
+ return pd;
+}
+#else
+static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+#ifdef CONFIG_OF
+ if (i2c->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
+ return (int)match->data;
+ }
+#endif
+ return (int)id->driver_data;
+}
+
static int max8997_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -137,12 +198,21 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, max8997);
max8997->dev = &i2c->dev;
max8997->i2c = i2c;
- max8997->type = id->driver_data;
+ max8997->type = max8997_i2c_get_driver_data(i2c, id);
max8997->irq = i2c->irq;
+ if (max8997->dev->of_node) {
+ pdata = max8997_i2c_parse_dt_pdata(max8997->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err;
+ }
+ }
+
if (!pdata)
goto err;
+ max8997->pdata = pdata;
max8997->ono = pdata->ono;
mutex_init(&max8997->iolock);
@@ -434,6 +504,7 @@ static struct i2c_driver max8997_i2c_driver = {
.name = "max8997",
.owner = THIS_MODULE,
.pm = &max8997_pm,
+ .of_match_table = of_match_ptr(max8997_pmic_dt_match),
},
.probe = max8997_i2c_probe,
.remove = max8997_i2c_remove,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 1aba0238f42..2a9b100c482 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -119,6 +119,11 @@
#define MC13XXX_REVISION_FAB (0x03 << 11)
#define MC13XXX_REVISION_ICIDCODE (0x3f << 13)
+#define MC34708_REVISION_REVMETAL (0x07 << 0)
+#define MC34708_REVISION_REVFULL (0x07 << 3)
+#define MC34708_REVISION_FIN (0x07 << 6)
+#define MC34708_REVISION_FAB (0x07 << 9)
+
#define MC13XXX_ADC1 44
#define MC13XXX_ADC1_ADEN (1 << 0)
#define MC13XXX_ADC1_RAND (1 << 1)
@@ -410,62 +415,52 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
return IRQ_RETVAL(handled);
}
-static const char *mc13xxx_chipname[] = {
- [MC13XXX_ID_MC13783] = "mc13783",
- [MC13XXX_ID_MC13892] = "mc13892",
-};
-
#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx)
+static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
- u32 icid;
- u32 revision;
- int ret;
-
- /*
- * Get the generation ID from register 46, as apparently some older
- * IC revisions only have this info at this location. Newer ICs seem to
- * have both.
- */
- ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
- if (ret)
- return ret;
+ dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
+ "fin: %d, fab: %d, icid: %d/%d\n",
+ mc13xxx->variant->name,
+ maskval(revision, MC13XXX_REVISION_REVFULL),
+ maskval(revision, MC13XXX_REVISION_REVMETAL),
+ maskval(revision, MC13XXX_REVISION_FIN),
+ maskval(revision, MC13XXX_REVISION_FAB),
+ maskval(revision, MC13XXX_REVISION_ICID),
+ maskval(revision, MC13XXX_REVISION_ICIDCODE));
+}
- icid = (icid >> 6) & 0x7;
+static void mc34708_print_revision(struct mc13xxx *mc13xxx, u32 revision)
+{
+ dev_info(mc13xxx->dev, "%s: rev %d.%d, fin: %d, fab: %d\n",
+ mc13xxx->variant->name,
+ maskval(revision, MC34708_REVISION_REVFULL),
+ maskval(revision, MC34708_REVISION_REVMETAL),
+ maskval(revision, MC34708_REVISION_FIN),
+ maskval(revision, MC34708_REVISION_FAB));
+}
- switch (icid) {
- case 2:
- mc13xxx->ictype = MC13XXX_ID_MC13783;
- break;
- case 7:
- mc13xxx->ictype = MC13XXX_ID_MC13892;
- break;
- default:
- mc13xxx->ictype = MC13XXX_ID_INVALID;
- break;
- }
+/* These are only exported for mc13xxx-i2c and mc13xxx-spi */
+struct mc13xxx_variant mc13xxx_variant_mc13783 = {
+ .name = "mc13783",
+ .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13783);
- if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
- mc13xxx->ictype == MC13XXX_ID_MC13892) {
- ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
-
- dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
- "fin: %d, fab: %d, icid: %d/%d\n",
- mc13xxx_chipname[mc13xxx->ictype],
- maskval(revision, MC13XXX_REVISION_REVFULL),
- maskval(revision, MC13XXX_REVISION_REVMETAL),
- maskval(revision, MC13XXX_REVISION_FIN),
- maskval(revision, MC13XXX_REVISION_FAB),
- maskval(revision, MC13XXX_REVISION_ICID),
- maskval(revision, MC13XXX_REVISION_ICIDCODE));
- }
+struct mc13xxx_variant mc13xxx_variant_mc13892 = {
+ .name = "mc13892",
+ .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13892);
- return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
-}
+struct mc13xxx_variant mc13xxx_variant_mc34708 = {
+ .name = "mc34708",
+ .print_revision = mc34708_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc34708);
static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
{
- return mc13xxx_chipname[mc13xxx->ictype];
+ return mc13xxx->variant->name;
}
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -653,13 +648,16 @@ int mc13xxx_common_init(struct mc13xxx *mc13xxx,
struct mc13xxx_platform_data *pdata, int irq)
{
int ret;
+ u32 revision;
mc13xxx_lock(mc13xxx);
- ret = mc13xxx_identify(mc13xxx);
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
if (ret)
goto err_revision;
+ mc13xxx->variant->print_revision(mc13xxx, revision);
+
/* mask all irqs */
ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff);
if (ret)
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 7957999f30b..f745e27ee87 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -24,7 +24,10 @@
static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
{
.name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+ }, {
+ .name = "mc34708",
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -34,7 +37,10 @@ MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
static const struct of_device_id mc13xxx_dt_ids[] = {
{
.compatible = "fsl,mc13892",
- .data = (void *) &mc13xxx_i2c_device_id[0],
+ .data = &mc13xxx_variant_mc13892,
+ }, {
+ .compatible = "fsl,mc34708",
+ .data = &mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -76,11 +82,15 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
return ret;
}
- ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+ if (client->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(mc13xxx_dt_ids, &client->dev);
+ mc13xxx->variant = of_id->data;
+ } else {
+ mc13xxx->variant = (void *)id->driver_data;
+ }
- if (ret == 0 && (id->driver_data != mc13xxx->ictype))
- dev_warn(mc13xxx->dev,
- "device id doesn't match auto detection!\n");
+ ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
return ret;
}
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index cb32f69d80b..3032bae20b6 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -28,10 +28,13 @@
static const struct spi_device_id mc13xxx_device_id[] = {
{
.name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13783,
}, {
.name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+ }, {
+ .name = "mc34708",
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -39,8 +42,9 @@ static const struct spi_device_id mc13xxx_device_id[] = {
MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
static const struct of_device_id mc13xxx_dt_ids[] = {
- { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
- { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { .compatible = "fsl,mc13783", .data = &mc13xxx_variant_mc13783, },
+ { .compatible = "fsl,mc13892", .data = &mc13xxx_variant_mc13892, },
+ { .compatible = "fsl,mc34708", .data = &mc13xxx_variant_mc34708, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
@@ -144,19 +148,18 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
return ret;
}
- ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+ if (spi->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(mc13xxx_dt_ids, &spi->dev);
- if (ret) {
- dev_set_drvdata(&spi->dev, NULL);
+ mc13xxx->variant = of_id->data;
} else {
- const struct spi_device_id *devid =
- spi_get_device_id(spi);
- if (!devid || devid->driver_data != mc13xxx->ictype)
- dev_warn(mc13xxx->dev,
- "device id doesn't match auto detection!\n");
+ const struct spi_device_id *id_entry = spi_get_device_id(spi);
+
+ mc13xxx->variant = (void *)id_entry->driver_data;
}
- return ret;
+ return mc13xxx_common_init(mc13xxx, pdata, spi->irq);
}
static int mc13xxx_spi_remove(struct spi_device *spi)
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
index bbba06feea0..460ec5c7b18 100644
--- a/drivers/mfd/mc13xxx.h
+++ b/drivers/mfd/mc13xxx.h
@@ -13,19 +13,25 @@
#include <linux/regmap.h>
#include <linux/mfd/mc13xxx.h>
-enum mc13xxx_id {
- MC13XXX_ID_MC13783,
- MC13XXX_ID_MC13892,
- MC13XXX_ID_INVALID,
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx;
+
+struct mc13xxx_variant {
+ const char *name;
+ void (*print_revision)(struct mc13xxx *mc13xxx, u32 revision);
};
-#define MC13XXX_NUMREGS 0x3f
+extern struct mc13xxx_variant
+ mc13xxx_variant_mc13783,
+ mc13xxx_variant_mc13892,
+ mc13xxx_variant_mc34708;
struct mc13xxx {
struct regmap *regmap;
struct device *dev;
- enum mc13xxx_id ictype;
+ const struct mc13xxx_variant *variant;
struct mutex lock;
int irq;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f8b77711ad2..7604f4e5df4 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -21,6 +21,10 @@
#include <linux/irqdomain.h>
#include <linux/of.h>
+static struct device_type mfd_dev_type = {
+ .name = "mfd_device",
+};
+
int mfd_cell_enable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
@@ -91,6 +95,7 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_device;
pdev->dev.parent = parent;
+ pdev->dev.type = &mfd_dev_type;
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
@@ -204,10 +209,16 @@ EXPORT_SYMBOL(mfd_add_devices);
static int mfd_remove_devices_fn(struct device *dev, void *c)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct platform_device *pdev;
+ const struct mfd_cell *cell;
atomic_t **usage_count = c;
+ if (dev->type != &mfd_dev_type)
+ return 0;
+
+ pdev = to_platform_device(dev);
+ cell = mfd_get_cell(pdev);
+
/* find the base address of usage_count pointers (for freeing) */
if (!*usage_count || (cell->usage_count < *usage_count))
*usage_count = cell->usage_count;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 770a0d01e0b..05164d7f054 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
-#include <plat/cpu.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/pm_runtime.h>
@@ -384,7 +383,7 @@ static void omap_usbhs_init(struct device *dev)
reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
/* Bypass the TLL module for PHY mode operation */
- if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
+ if (pdata->single_ulpi_bypass) {
dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
if (is_ehci_phy_mode(pdata->port_mode[0]) ||
is_ehci_phy_mode(pdata->port_mode[1]) ||
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index fe00cdd6f83..b41db596870 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -345,7 +345,7 @@ int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base)
mutex_init(&rc5t583->irq_lock);
/* Initailize all int register to 0 */
- for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; i++) {
+ for (i = 0; i < RC5T583_MAX_INTERRUPT_EN_REGS; i++) {
ret = rc5t583_write(rc5t583->dev, irq_en_add[i],
rc5t583->irq_en_reg[i]);
if (ret < 0)
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
new file mode 100644
index 00000000000..7ff4a37ab0c
--- /dev/null
+++ b/drivers/mfd/retu-mfd.c
@@ -0,0 +1,264 @@
+/*
+ * Retu MFD driver
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Based on code written by Juha Yrjölä, David Weinehall and Mikko Ylinen.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/retu.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+/* Registers */
+#define RETU_REG_ASICR 0x00 /* ASIC ID and revision */
+#define RETU_REG_ASICR_VILMA (1 << 7) /* Bit indicating Vilma */
+#define RETU_REG_IDR 0x01 /* Interrupt ID */
+#define RETU_REG_IMR 0x02 /* Interrupt mask */
+
+/* Interrupt sources */
+#define RETU_INT_PWR 0 /* Power button */
+
+struct retu_dev {
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex mutex;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static struct resource retu_pwrbutton_res[] = {
+ {
+ .name = "retu-pwrbutton",
+ .start = RETU_INT_PWR,
+ .end = RETU_INT_PWR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell retu_devs[] = {
+ {
+ .name = "retu-wdt"
+ },
+ {
+ .name = "retu-pwrbutton",
+ .resources = retu_pwrbutton_res,
+ .num_resources = ARRAY_SIZE(retu_pwrbutton_res),
+ }
+};
+
+static struct regmap_irq retu_irqs[] = {
+ [RETU_INT_PWR] = {
+ .mask = 1 << RETU_INT_PWR,
+ }
+};
+
+static struct regmap_irq_chip retu_irq_chip = {
+ .name = "RETU",
+ .irqs = retu_irqs,
+ .num_irqs = ARRAY_SIZE(retu_irqs),
+ .num_regs = 1,
+ .status_base = RETU_REG_IDR,
+ .mask_base = RETU_REG_IMR,
+ .ack_base = RETU_REG_IDR,
+};
+
+/* Retu device registered for the power off. */
+static struct retu_dev *retu_pm_power_off;
+
+int retu_read(struct retu_dev *rdev, u8 reg)
+{
+ int ret;
+ int value;
+
+ mutex_lock(&rdev->mutex);
+ ret = regmap_read(rdev->regmap, reg, &value);
+ mutex_unlock(&rdev->mutex);
+
+ return ret ? ret : value;
+}
+EXPORT_SYMBOL_GPL(retu_read);
+
+int retu_write(struct retu_dev *rdev, u8 reg, u16 data)
+{
+ int ret;
+
+ mutex_lock(&rdev->mutex);
+ ret = regmap_write(rdev->regmap, reg, data);
+ mutex_unlock(&rdev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(retu_write);
+
+static void retu_power_off(void)
+{
+ struct retu_dev *rdev = retu_pm_power_off;
+ int reg;
+
+ mutex_lock(&retu_pm_power_off->mutex);
+
+ /* Ignore power button state */
+ regmap_read(rdev->regmap, RETU_REG_CC1, &reg);
+ regmap_write(rdev->regmap, RETU_REG_CC1, reg | 2);
+
+ /* Expire watchdog immediately */
+ regmap_write(rdev->regmap, RETU_REG_WATCHDOG, 0);
+
+ /* Wait for poweroff */
+ for (;;)
+ cpu_relax();
+
+ mutex_unlock(&retu_pm_power_off->mutex);
+}
+
+static int retu_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int ret;
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ BUG_ON(reg_size != 1 || val_size != 2);
+
+ ret = i2c_smbus_read_word_data(i2c, *(u8 const *)reg);
+ if (ret < 0)
+ return ret;
+
+ *(u16 *)val = ret;
+ return 0;
+}
+
+static int retu_regmap_write(void *context, const void *data, size_t count)
+{
+ u8 reg;
+ u16 val;
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ BUG_ON(count != sizeof(reg) + sizeof(val));
+ memcpy(&reg, data, sizeof(reg));
+ memcpy(&val, data + sizeof(reg), sizeof(val));
+ return i2c_smbus_write_word_data(i2c, reg, val);
+}
+
+static struct regmap_bus retu_bus = {
+ .read = retu_regmap_read,
+ .write = retu_regmap_write,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static struct regmap_config retu_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+};
+
+static int __devinit retu_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct retu_dev *rdev;
+ int ret;
+
+ rdev = devm_kzalloc(&i2c->dev, sizeof(*rdev), GFP_KERNEL);
+ if (rdev == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rdev);
+ rdev->dev = &i2c->dev;
+ mutex_init(&rdev->mutex);
+ rdev->regmap = devm_regmap_init(&i2c->dev, &retu_bus, &i2c->dev,
+ &retu_config);
+ if (IS_ERR(rdev->regmap))
+ return PTR_ERR(rdev->regmap);
+
+ ret = retu_read(rdev, RETU_REG_ASICR);
+ if (ret < 0) {
+ dev_err(rdev->dev, "could not read Retu revision: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(rdev->dev, "Retu%s v%d.%d found\n",
+ (ret & RETU_REG_ASICR_VILMA) ? " & Vilma" : "",
+ (ret >> 4) & 0x7, ret & 0xf);
+
+ /* Mask all RETU interrupts. */
+ ret = retu_write(rdev, RETU_REG_IMR, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_add_irq_chip(rdev->regmap, i2c->irq, IRQF_ONESHOT, -1,
+ &retu_irq_chip, &rdev->irq_data);
+ if (ret < 0)
+ return ret;
+
+ ret = mfd_add_devices(rdev->dev, -1, retu_devs, ARRAY_SIZE(retu_devs),
+ NULL, regmap_irq_chip_get_base(rdev->irq_data),
+ NULL);
+ if (ret < 0) {
+ regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+ return ret;
+ }
+
+ if (!pm_power_off) {
+ retu_pm_power_off = rdev;
+ pm_power_off = retu_power_off;
+ }
+
+ return 0;
+}
+
+static int __devexit retu_remove(struct i2c_client *i2c)
+{
+ struct retu_dev *rdev = i2c_get_clientdata(i2c);
+
+ if (retu_pm_power_off == rdev) {
+ pm_power_off = NULL;
+ retu_pm_power_off = NULL;
+ }
+ mfd_remove_devices(rdev->dev);
+ regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id retu_id[] = {
+ { "retu-mfd", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, retu_id);
+
+static struct i2c_driver retu_driver = {
+ .driver = {
+ .name = "retu-mfd",
+ .owner = THIS_MODULE,
+ },
+ .probe = retu_probe,
+ .remove = retu_remove,
+ .id_table = retu_id,
+};
+module_i2c_driver(retu_driver);
+
+MODULE_DESCRIPTION("Retu MFD driver");
+MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("David Weinehall");
+MODULE_AUTHOR("Mikko Ylinen");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 56d4377c62c..3a44efa2920 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index c901fa50fea..0dd84e99081 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -24,67 +24,67 @@
static struct regmap_irq s2mps11_irqs[] = {
[S2MPS11_IRQ_PWRONF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRONF_MASK,
},
[S2MPS11_IRQ_PWRONR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRONR_MASK,
},
[S2MPS11_IRQ_JIGONBF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_JIGONBF_MASK,
},
[S2MPS11_IRQ_JIGONBR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_JIGONBR_MASK,
},
[S2MPS11_IRQ_ACOKBF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_ACOKBF_MASK,
},
[S2MPS11_IRQ_ACOKBR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_ACOKBR_MASK,
},
[S2MPS11_IRQ_PWRON1S] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRON1S_MASK,
},
[S2MPS11_IRQ_MRB] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_MRB_MASK,
},
[S2MPS11_IRQ_RTC60S] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTC60S_MASK,
},
[S2MPS11_IRQ_RTCA1] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTCA1_MASK,
},
[S2MPS11_IRQ_RTCA2] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTCA2_MASK,
},
[S2MPS11_IRQ_SMPL] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_SMPL_MASK,
},
[S2MPS11_IRQ_RTC1S] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTC1S_MASK,
},
[S2MPS11_IRQ_WTSR] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_WTSR_MASK,
},
[S2MPS11_IRQ_INT120C] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S2MPS11_IRQ_INT120C_MASK,
},
[S2MPS11_IRQ_INT140C] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S2MPS11_IRQ_INT140C_MASK,
},
};
@@ -92,146 +92,146 @@ static struct regmap_irq s2mps11_irqs[] = {
static struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWRR_MASK,
},
[S5M8767_IRQ_PWRF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWRF_MASK,
},
[S5M8767_IRQ_PWR1S] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWR1S_MASK,
},
[S5M8767_IRQ_JIGR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_JIGR_MASK,
},
[S5M8767_IRQ_JIGF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_JIGF_MASK,
},
[S5M8767_IRQ_LOWBAT2] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_LOWBAT2_MASK,
},
[S5M8767_IRQ_LOWBAT1] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_LOWBAT1_MASK,
},
[S5M8767_IRQ_MRB] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_MRB_MASK,
},
[S5M8767_IRQ_DVSOK2] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK2_MASK,
},
[S5M8767_IRQ_DVSOK3] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK3_MASK,
},
[S5M8767_IRQ_DVSOK4] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK4_MASK,
},
[S5M8767_IRQ_RTC60S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTC60S_MASK,
},
[S5M8767_IRQ_RTCA1] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTCA1_MASK,
},
[S5M8767_IRQ_RTCA2] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTCA2_MASK,
},
[S5M8767_IRQ_SMPL] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_SMPL_MASK,
},
[S5M8767_IRQ_RTC1S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTC1S_MASK,
},
[S5M8767_IRQ_WTSR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_WTSR_MASK,
},
};
static struct regmap_irq s5m8763_irqs[] = {
[S5M8763_IRQ_DCINF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_DCINF_MASK,
},
[S5M8763_IRQ_DCINR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_DCINR_MASK,
},
[S5M8763_IRQ_JIGF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_JIGF_MASK,
},
[S5M8763_IRQ_JIGR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_JIGR_MASK,
},
[S5M8763_IRQ_PWRONF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_PWRONF_MASK,
},
[S5M8763_IRQ_PWRONR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_PWRONR_MASK,
},
[S5M8763_IRQ_WTSREVNT] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_WTSREVNT_MASK,
},
[S5M8763_IRQ_SMPLEVNT] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_SMPLEVNT_MASK,
},
[S5M8763_IRQ_ALARM1] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_ALARM1_MASK,
},
[S5M8763_IRQ_ALARM0] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_ALARM0_MASK,
},
[S5M8763_IRQ_ONKEY1S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_ONKEY1S_MASK,
},
[S5M8763_IRQ_TOPOFFR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_TOPOFFR_MASK,
},
[S5M8763_IRQ_DCINOVPR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_DCINOVPR_MASK,
},
[S5M8763_IRQ_CHGRSTF] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_CHGRSTF_MASK,
},
[S5M8763_IRQ_DONER] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_DONER_MASK,
},
[S5M8763_IRQ_CHGFAULT] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_CHGFAULT_MASK,
},
[S5M8763_IRQ_LOBAT1] = {
- .reg_offset = 4,
+ .reg_offset = 3,
.mask = S5M8763_IRQ_LOBAT1_MASK,
},
[S5M8763_IRQ_LOBAT2] = {
- .reg_offset = 4,
+ .reg_offset = 3,
.mask = S5M8763_IRQ_LOBAT2_MASK,
},
};
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
index d6284cacd27..1225dcbcfcf 100644
--- a/drivers/mfd/sta2x11-mfd.c
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini, Davide Ciminaghi)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,21 +27,28 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/sta2x11-mfd.h>
+#include <linux/regmap.h>
#include <asm/sta2x11.h>
+static inline int __reg_within_range(unsigned int r,
+ unsigned int start,
+ unsigned int end)
+{
+ return ((r >= start) && (r <= end));
+}
+
/* This describes STA2X11 MFD chip for us, we may have several */
struct sta2x11_mfd {
struct sta2x11_instance *instance;
- spinlock_t lock;
+ struct regmap *regmap[sta2x11_n_mfd_plat_devs];
+ spinlock_t lock[sta2x11_n_mfd_plat_devs];
struct list_head list;
- void __iomem *sctl_regs;
- void __iomem *apbreg_regs;
+ void __iomem *regs[sta2x11_n_mfd_plat_devs];
};
static LIST_HEAD(sta2x11_mfd_list);
@@ -71,6 +78,7 @@ static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
{
+ int i;
struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
struct sta2x11_instance *instance;
@@ -83,7 +91,8 @@ static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
if (!mfd)
return -ENOMEM;
INIT_LIST_HEAD(&mfd->list);
- spin_lock_init(&mfd->lock);
+ for (i = 0; i < ARRAY_SIZE(mfd->lock); i++)
+ spin_lock_init(&mfd->lock[i]);
mfd->instance = instance;
list_add(&mfd->list, &sta2x11_mfd_list);
return 0;
@@ -100,161 +109,276 @@ static int mfd_remove(struct pci_dev *pdev)
return 0;
}
-/* These two functions are exported and are not expected to fail */
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+/* This function is exported and is not expected to fail */
+u32 __sta2x11_mfd_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val,
+ enum sta2x11_mfd_plat_dev index)
{
struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
u32 r;
unsigned long flags;
+ void __iomem *regs;
if (!mfd) {
dev_warn(&pdev->dev, ": can't access sctl regs\n");
return 0;
}
- if (!mfd->sctl_regs) {
+
+ regs = mfd->regs[index];
+ if (!regs) {
dev_warn(&pdev->dev, ": system ctl not initialized\n");
return 0;
}
- spin_lock_irqsave(&mfd->lock, flags);
- r = readl(mfd->sctl_regs + reg);
+ spin_lock_irqsave(&mfd->lock[index], flags);
+ r = readl(regs + reg);
r &= ~mask;
r |= val;
if (mask)
- writel(r, mfd->sctl_regs + reg);
- spin_unlock_irqrestore(&mfd->lock, flags);
+ writel(r, regs + reg);
+ spin_unlock_irqrestore(&mfd->lock[index], flags);
return r;
}
-EXPORT_SYMBOL(sta2x11_sctl_mask);
+EXPORT_SYMBOL(__sta2x11_mfd_mask);
-u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+int sta2x11_mfd_get_regs_data(struct platform_device *dev,
+ enum sta2x11_mfd_plat_dev index,
+ void __iomem **regs,
+ spinlock_t **lock)
{
- struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
- u32 r;
- unsigned long flags;
+ struct pci_dev *pdev = *(struct pci_dev **)(dev->dev.platform_data);
+ struct sta2x11_mfd *mfd;
- if (!mfd) {
- dev_warn(&pdev->dev, ": can't access apb regs\n");
- return 0;
- }
- if (!mfd->apbreg_regs) {
- dev_warn(&pdev->dev, ": apb bridge not initialized\n");
- return 0;
- }
- spin_lock_irqsave(&mfd->lock, flags);
- r = readl(mfd->apbreg_regs + reg);
- r &= ~mask;
- r |= val;
- if (mask)
- writel(r, mfd->apbreg_regs + reg);
- spin_unlock_irqrestore(&mfd->lock, flags);
- return r;
+ if (!pdev)
+ return -ENODEV;
+ mfd = sta2x11_mfd_find(pdev);
+ if (!mfd)
+ return -ENODEV;
+ if (index >= sta2x11_n_mfd_plat_devs)
+ return -ENODEV;
+ *regs = mfd->regs[index];
+ *lock = &mfd->lock[index];
+ pr_debug("%s %d *regs = %p\n", __func__, __LINE__, *regs);
+ return *regs ? 0 : -ENODEV;
}
-EXPORT_SYMBOL(sta2x11_apbreg_mask);
-
-/* Two debugfs files, for our registers (FIXME: one instance only) */
-#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
-static struct debugfs_reg32 sta2x11_sctl_regs[] = {
- REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
- REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
- REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
- REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
- REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
- REG(SCCLKSTAT2), REG(SCRSTSTA),
-};
-#undef REG
+EXPORT_SYMBOL(sta2x11_mfd_get_regs_data);
-static struct debugfs_regset32 sctl_regset = {
- .regs = sta2x11_sctl_regs,
- .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
-};
+/*
+ * Special sta2x11-mfd regmap lock/unlock functions
+ */
+
+static void sta2x11_regmap_lock(void *__lock)
+{
+ spinlock_t *lock = __lock;
+ spin_lock(lock);
+}
-#define REG(regname) {.name = #regname, .offset = regname}
-static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
- REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
- REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+static void sta2x11_regmap_unlock(void *__lock)
+{
+ spinlock_t *lock = __lock;
+ spin_unlock(lock);
+}
+
+/* OTP (one time programmable registers do not require locking */
+static void sta2x11_regmap_nolock(void *__lock)
+{
+}
+
+static const char *sta2x11_mfd_names[sta2x11_n_mfd_plat_devs] = {
+ [sta2x11_sctl] = STA2X11_MFD_SCTL_NAME,
+ [sta2x11_apbreg] = STA2X11_MFD_APBREG_NAME,
+ [sta2x11_apb_soc_regs] = STA2X11_MFD_APB_SOC_REGS_NAME,
+ [sta2x11_scr] = STA2X11_MFD_SCR_NAME,
};
-#undef REG
-static struct debugfs_regset32 apbreg_regset = {
- .regs = sta2x11_apbreg_regs,
- .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+static bool sta2x11_sctl_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return !__reg_within_range(reg, SCTL_SCPCIECSBRST, SCTL_SCRSTSTA);
+}
+
+static struct regmap_config sta2x11_sctl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = SCTL_SCRSTSTA,
+ .writeable_reg = sta2x11_sctl_writeable_reg,
};
-static struct dentry *sta2x11_sctl_debugfs;
-static struct dentry *sta2x11_apbreg_debugfs;
+static bool sta2x11_scr_readable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg == STA2X11_SECR_CR) ||
+ __reg_within_range(reg, STA2X11_SECR_FVR0, STA2X11_SECR_FVR1);
+}
-/* Probe for the two platform devices */
-static int sta2x11_sctl_probe(struct platform_device *dev)
+static bool sta2x11_scr_writeable_reg(struct device *dev, unsigned int reg)
{
- struct pci_dev **pdev;
- struct sta2x11_mfd *mfd;
- struct resource *res;
+ return false;
+}
- pdev = dev->dev.platform_data;
- mfd = sta2x11_mfd_find(*pdev);
- if (!mfd)
- return -ENODEV;
+static struct regmap_config sta2x11_scr_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_nolock,
+ .unlock = sta2x11_regmap_nolock,
+ .max_register = STA2X11_SECR_FVR1,
+ .readable_reg = sta2x11_scr_readable_reg,
+ .writeable_reg = sta2x11_scr_writeable_reg,
+};
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
+static bool sta2x11_apbreg_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* Two blocks (CAN and MLB, SARAC) 0x100 bytes apart */
+ if (reg >= APBREG_BSR_SARAC)
+ reg -= APBREG_BSR_SARAC;
+ switch (reg) {
+ case APBREG_BSR:
+ case APBREG_PAER:
+ case APBREG_PWAC:
+ case APBREG_PRAC:
+ case APBREG_PCG:
+ case APBREG_PUR:
+ case APBREG_EMU_PCG:
+ return true;
+ default:
+ return false;
+ }
+}
- if (!request_mem_region(res->start, resource_size(res),
- "sta2x11-sctl"))
- return -EBUSY;
+static bool sta2x11_apbreg_writeable_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= APBREG_BSR_SARAC)
+ reg -= APBREG_BSR_SARAC;
+ if (!sta2x11_apbreg_readable_reg(dev, reg))
+ return false;
+ return reg != APBREG_PAER;
+}
- mfd->sctl_regs = ioremap(res->start, resource_size(res));
- if (!mfd->sctl_regs) {
- release_mem_region(res->start, resource_size(res));
- return -ENOMEM;
+static struct regmap_config sta2x11_apbreg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = APBREG_EMU_PCG_SARAC,
+ .readable_reg = sta2x11_apbreg_readable_reg,
+ .writeable_reg = sta2x11_apbreg_writeable_reg,
+};
+
+static bool sta2x11_apb_soc_regs_readable_reg(struct device *dev,
+ unsigned int reg)
+{
+ return reg <= PCIE_SoC_INT_ROUTER_STATUS3_REG ||
+ __reg_within_range(reg, DMA_IP_CTRL_REG, SPARE3_RESERVED) ||
+ __reg_within_range(reg, MASTER_LOCK_REG,
+ SYSTEM_CONFIG_STATUS_REG) ||
+ reg == MSP_CLK_CTRL_REG ||
+ __reg_within_range(reg, COMPENSATION_REG1, TEST_CTL_REG);
+}
+
+static bool sta2x11_apb_soc_regs_writeable_reg(struct device *dev,
+ unsigned int reg)
+{
+ if (!sta2x11_apb_soc_regs_readable_reg(dev, reg))
+ return false;
+ switch (reg) {
+ case PCIE_COMMON_CLOCK_CONFIG_0_4_0:
+ case SYSTEM_CONFIG_STATUS_REG:
+ case COMPENSATION_REG1:
+ case PCIE_SoC_INT_ROUTER_STATUS0_REG...PCIE_SoC_INT_ROUTER_STATUS3_REG:
+ case PCIE_PM_STATUS_0_PORT_0_4...PCIE_PM_STATUS_7_0_EP4:
+ return false;
+ default:
+ return true;
}
- sctl_regset.base = mfd->sctl_regs;
- sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
- S_IFREG | S_IRUGO,
- NULL, &sctl_regset);
- return 0;
}
-static int sta2x11_apbreg_probe(struct platform_device *dev)
+static struct regmap_config sta2x11_apb_soc_regs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = TEST_CTL_REG,
+ .readable_reg = sta2x11_apb_soc_regs_readable_reg,
+ .writeable_reg = sta2x11_apb_soc_regs_writeable_reg,
+};
+
+static struct regmap_config *
+sta2x11_mfd_regmap_configs[sta2x11_n_mfd_plat_devs] = {
+ [sta2x11_sctl] = &sta2x11_sctl_regmap_config,
+ [sta2x11_apbreg] = &sta2x11_apbreg_regmap_config,
+ [sta2x11_apb_soc_regs] = &sta2x11_apb_soc_regs_regmap_config,
+ [sta2x11_scr] = &sta2x11_scr_regmap_config,
+};
+
+/* Probe for the four platform devices */
+
+static int sta2x11_mfd_platform_probe(struct platform_device *dev,
+ enum sta2x11_mfd_plat_dev index)
{
struct pci_dev **pdev;
struct sta2x11_mfd *mfd;
struct resource *res;
+ const char *name = sta2x11_mfd_names[index];
+ struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
pdev = dev->dev.platform_data;
- dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
- dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
-
mfd = sta2x11_mfd_find(*pdev);
if (!mfd)
return -ENODEV;
+ if (!regmap_config)
+ return -ENODEV;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
- if (!request_mem_region(res->start, resource_size(res),
- "sta2x11-apbreg"))
+ if (!request_mem_region(res->start, resource_size(res), name))
return -EBUSY;
- mfd->apbreg_regs = ioremap(res->start, resource_size(res));
- if (!mfd->apbreg_regs) {
+ mfd->regs[index] = ioremap(res->start, resource_size(res));
+ if (!mfd->regs[index]) {
release_mem_region(res->start, resource_size(res));
return -ENOMEM;
}
- dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+ regmap_config->lock_arg = &mfd->lock;
+ /*
+ No caching, registers could be reached both via regmap and via
+ void __iomem *
+ */
+ regmap_config->cache_type = REGCACHE_NONE;
+ mfd->regmap[index] = devm_regmap_init_mmio(&dev->dev, mfd->regs[index],
+ regmap_config);
+ WARN_ON(!mfd->regmap[index]);
- apbreg_regset.base = mfd->apbreg_regs;
- sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
- S_IFREG | S_IRUGO,
- NULL, &apbreg_regset);
return 0;
}
-/* The two platform drivers */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_sctl);
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_apbreg);
+}
+
+static int sta2x11_apb_soc_regs_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_apb_soc_regs);
+}
+
+static int sta2x11_scr_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_scr);
+}
+
+/* The three platform drivers */
static struct platform_driver sta2x11_sctl_platform_driver = {
.driver = {
- .name = "sta2x11-sctl",
+ .name = STA2X11_MFD_SCTL_NAME,
.owner = THIS_MODULE,
},
.probe = sta2x11_sctl_probe,
@@ -268,7 +392,7 @@ static int __init sta2x11_sctl_init(void)
static struct platform_driver sta2x11_platform_driver = {
.driver = {
- .name = "sta2x11-apbreg",
+ .name = STA2X11_MFD_APBREG_NAME,
.owner = THIS_MODULE,
},
.probe = sta2x11_apbreg_probe,
@@ -280,13 +404,44 @@ static int __init sta2x11_apbreg_init(void)
return platform_driver_register(&sta2x11_platform_driver);
}
+static struct platform_driver sta2x11_apb_soc_regs_platform_driver = {
+ .driver = {
+ .name = STA2X11_MFD_APB_SOC_REGS_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_apb_soc_regs_probe,
+};
+
+static int __init sta2x11_apb_soc_regs_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_apb_soc_regs_platform_driver);
+}
+
+static struct platform_driver sta2x11_scr_platform_driver = {
+ .driver = {
+ .name = STA2X11_MFD_SCR_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_scr_probe,
+};
+
+static int __init sta2x11_scr_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_scr_platform_driver);
+}
+
+
/*
- * What follows is the PCI device that hosts the above two pdevs.
+ * What follows are the PCI devices that host the above pdevs.
* Each logic block is 4kB and they are all consecutive: we use this info.
*/
-/* Bar 0 */
-enum bar0_cells {
+/* Mfd 0 device */
+
+/* Mfd 0, Bar 0 */
+enum mfd0_bar0_cells {
STA2X11_GPIO_0 = 0,
STA2X11_GPIO_1,
STA2X11_GPIO_2,
@@ -295,8 +450,8 @@ enum bar0_cells {
STA2X11_SCR,
STA2X11_TIME,
};
-/* Bar 1 */
-enum bar1_cells {
+/* Mfd 0 , Bar 1 */
+enum mfd0_bar1_cells {
STA2X11_APBREG = 0,
};
#define CELL_4K(_name, _cell) { \
@@ -307,40 +462,71 @@ enum bar1_cells {
static const struct resource gpio_resources[] = {
{
- .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+ /* 4 consecutive cells, 1 driver */
+ .name = STA2X11_MFD_GPIO_NAME,
.start = 0,
.end = (4 * 4096) - 1,
.flags = IORESOURCE_MEM,
}
};
static const struct resource sctl_resources[] = {
- CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+ CELL_4K(STA2X11_MFD_SCTL_NAME, STA2X11_SCTL),
};
static const struct resource scr_resources[] = {
- CELL_4K("sta2x11-scr", STA2X11_SCR),
+ CELL_4K(STA2X11_MFD_SCR_NAME, STA2X11_SCR),
};
static const struct resource time_resources[] = {
- CELL_4K("sta2x11-time", STA2X11_TIME),
+ CELL_4K(STA2X11_MFD_TIME_NAME, STA2X11_TIME),
};
static const struct resource apbreg_resources[] = {
- CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+ CELL_4K(STA2X11_MFD_APBREG_NAME, STA2X11_APBREG),
};
#define DEV(_name, _r) \
{ .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
-static struct mfd_cell sta2x11_mfd_bar0[] = {
- DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
- DEV("sta2x11-sctl", sctl_resources),
- DEV("sta2x11-scr", scr_resources),
- DEV("sta2x11-time", time_resources),
+static struct mfd_cell sta2x11_mfd0_bar0[] = {
+ /* offset 0: we add pdata later */
+ DEV(STA2X11_MFD_GPIO_NAME, gpio_resources),
+ DEV(STA2X11_MFD_SCTL_NAME, sctl_resources),
+ DEV(STA2X11_MFD_SCR_NAME, scr_resources),
+ DEV(STA2X11_MFD_TIME_NAME, time_resources),
};
-static struct mfd_cell sta2x11_mfd_bar1[] = {
- DEV("sta2x11-apbreg", apbreg_resources),
+static struct mfd_cell sta2x11_mfd0_bar1[] = {
+ DEV(STA2X11_MFD_APBREG_NAME, apbreg_resources),
};
+/* Mfd 1 devices */
+
+/* Mfd 1, Bar 0 */
+enum mfd1_bar0_cells {
+ STA2X11_VIC = 0,
+};
+
+/* Mfd 1, Bar 1 */
+enum mfd1_bar1_cells {
+ STA2X11_APB_SOC_REGS = 0,
+};
+
+static const __devinitconst struct resource vic_resources[] = {
+ CELL_4K(STA2X11_MFD_VIC_NAME, STA2X11_VIC),
+};
+
+static const __devinitconst struct resource apb_soc_regs_resources[] = {
+ CELL_4K(STA2X11_MFD_APB_SOC_REGS_NAME, STA2X11_APB_SOC_REGS),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd1_bar0[] = {
+ DEV(STA2X11_MFD_VIC_NAME, vic_resources),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd1_bar1[] = {
+ DEV(STA2X11_MFD_APB_SOC_REGS_NAME, apb_soc_regs_resources),
+};
+
+
static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
@@ -363,11 +549,63 @@ static int sta2x11_mfd_resume(struct pci_dev *pdev)
return 0;
}
+struct sta2x11_mfd_bar_setup_data {
+ struct mfd_cell *cells;
+ int ncells;
+};
+
+struct sta2x11_mfd_setup_data {
+ struct sta2x11_mfd_bar_setup_data bars[2];
+};
+
+#define STA2X11_MFD0 0
+#define STA2X11_MFD1 1
+
+static struct sta2x11_mfd_setup_data mfd_setup_data[] = {
+ /* Mfd 0: gpio, sctl, scr, timers / apbregs */
+ [STA2X11_MFD0] = {
+ .bars = {
+ [0] = {
+ .cells = sta2x11_mfd0_bar0,
+ .ncells = ARRAY_SIZE(sta2x11_mfd0_bar0),
+ },
+ [1] = {
+ .cells = sta2x11_mfd0_bar1,
+ .ncells = ARRAY_SIZE(sta2x11_mfd0_bar1),
+ },
+ },
+ },
+ /* Mfd 1: vic / apb-soc-regs */
+ [STA2X11_MFD1] = {
+ .bars = {
+ [0] = {
+ .cells = sta2x11_mfd1_bar0,
+ .ncells = ARRAY_SIZE(sta2x11_mfd1_bar0),
+ },
+ [1] = {
+ .cells = sta2x11_mfd1_bar1,
+ .ncells = ARRAY_SIZE(sta2x11_mfd1_bar1),
+ },
+ },
+ },
+};
+
+static void sta2x11_mfd_setup(struct pci_dev *pdev,
+ struct sta2x11_mfd_setup_data *sd)
+{
+ int i, j;
+ for (i = 0; i < ARRAY_SIZE(sd->bars); i++)
+ for (j = 0; j < sd->bars[i].ncells; j++) {
+ sd->bars[i].cells[j].pdata_size = sizeof(pdev);
+ sd->bars[i].cells[j].platform_data = &pdev;
+ }
+}
+
static int sta2x11_mfd_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
+ const struct pci_device_id *pci_id)
{
int err, i;
- struct sta2x11_gpio_pdata *gpio_data;
+ struct sta2x11_mfd_setup_data *setup_data;
dev_info(&pdev->dev, "%s\n", __func__);
@@ -381,46 +619,29 @@ static int sta2x11_mfd_probe(struct pci_dev *pdev,
if (err)
dev_info(&pdev->dev, "Enable msi failed\n");
- /* Read gpio config data as pci device's platform data */
- gpio_data = dev_get_platdata(&pdev->dev);
- if (!gpio_data)
- dev_warn(&pdev->dev, "no gpio configuration\n");
-
- dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
- gpio_data, &gpio_data);
- dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
- pdev, &pdev);
+ setup_data = pci_id->device == PCI_DEVICE_ID_STMICRO_GPIO ?
+ &mfd_setup_data[STA2X11_MFD0] :
+ &mfd_setup_data[STA2X11_MFD1];
/* platform data is the pci device for all of them */
- for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
- sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
- sta2x11_mfd_bar0[i].platform_data = &pdev;
- }
- sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
- sta2x11_mfd_bar1[0].platform_data = &pdev;
+ sta2x11_mfd_setup(pdev, setup_data);
/* Record this pdev before mfd_add_devices: their probe looks for it */
- sta2x11_mfd_add(pdev, GFP_ATOMIC);
-
-
- err = mfd_add_devices(&pdev->dev, -1,
- sta2x11_mfd_bar0,
- ARRAY_SIZE(sta2x11_mfd_bar0),
- &pdev->resource[0],
- 0, NULL);
- if (err) {
- dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
- goto err_disable;
- }
-
- err = mfd_add_devices(&pdev->dev, -1,
- sta2x11_mfd_bar1,
- ARRAY_SIZE(sta2x11_mfd_bar1),
- &pdev->resource[1],
- 0, NULL);
- if (err) {
- dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
- goto err_disable;
+ if (!sta2x11_mfd_find(pdev))
+ sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+ /* Just 2 bars for all mfd's at present */
+ for (i = 0; i < 2; i++) {
+ err = mfd_add_devices(&pdev->dev, -1,
+ setup_data->bars[i].cells,
+ setup_data->bars[i].ncells,
+ &pdev->resource[i],
+ 0, NULL);
+ if (err) {
+ dev_err(&pdev->dev,
+ "mfd_add_devices[%d] failed: %d\n", i, err);
+ goto err_disable;
+ }
}
return 0;
@@ -434,6 +655,7 @@ err_disable:
static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIC)},
{0,},
};
@@ -459,6 +681,8 @@ static int __init sta2x11_mfd_init(void)
*/
subsys_initcall(sta2x11_apbreg_init);
subsys_initcall(sta2x11_sctl_init);
+subsys_initcall(sta2x11_apb_soc_regs_init);
+subsys_initcall(sta2x11_scr_init);
rootfs_initcall(sta2x11_mfd_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 36df1877802..fd5fcb63068 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -82,11 +82,13 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, stmpe_id);
static struct i2c_driver stmpe_i2c_driver = {
- .driver.name = "stmpe-i2c",
- .driver.owner = THIS_MODULE,
+ .driver = {
+ .name = "stmpe-i2c",
+ .owner = THIS_MODULE,
#ifdef CONFIG_PM
- .driver.pm = &stmpe_dev_pm_ops,
+ .pm = &stmpe_dev_pm_ops,
#endif
+ },
.probe = stmpe_i2c_probe,
.remove = stmpe_i2c_remove,
.id_table = stmpe_i2c_id,
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 79e88d1fd99..5e8e6927cfc 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -7,11 +7,15 @@
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
@@ -312,20 +316,17 @@ static struct mfd_cell stmpe_gpio_cell_noirq = {
static struct resource stmpe_keypad_resources[] = {
{
.name = "KEYPAD",
- .start = 0,
- .end = 0,
.flags = IORESOURCE_IRQ,
},
{
.name = "KEYPAD_OVER",
- .start = 1,
- .end = 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell stmpe_keypad_cell = {
.name = "stmpe-keypad",
+ .of_compatible = "st,stmpe-keypad",
.resources = stmpe_keypad_resources,
.num_resources = ARRAY_SIZE(stmpe_keypad_resources),
};
@@ -399,20 +400,17 @@ static struct stmpe_variant_info stmpe801_noirq = {
static struct resource stmpe_ts_resources[] = {
{
.name = "TOUCH_DET",
- .start = 0,
- .end = 0,
.flags = IORESOURCE_IRQ,
},
{
.name = "FIFO_TH",
- .start = 1,
- .end = 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell stmpe_ts_cell = {
.name = "stmpe-ts",
+ .of_compatible = "st,stmpe-ts",
.resources = stmpe_ts_resources,
.num_resources = ARRAY_SIZE(stmpe_ts_resources),
};
@@ -528,12 +526,12 @@ static const u8 stmpe1601_regs[] = {
static struct stmpe_variant_block stmpe1601_blocks[] = {
{
.cell = &stmpe_gpio_cell,
- .irq = STMPE24XX_IRQ_GPIOC,
+ .irq = STMPE1601_IRQ_GPIOC,
.block = STMPE_BLOCK_GPIO,
},
{
.cell = &stmpe_keypad_cell,
- .irq = STMPE24XX_IRQ_KEYPAD,
+ .irq = STMPE1601_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
};
@@ -767,7 +765,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
int i;
if (variant->id_val == STMPE801_ID) {
- handle_nested_irq(stmpe->irq_base);
+ int base = irq_create_mapping(stmpe->domain, 0);
+
+ handle_nested_irq(base);
return IRQ_HANDLED;
}
@@ -788,8 +788,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
while (status) {
int bit = __ffs(status);
int line = bank * 8 + bit;
+ int nestedirq = irq_create_mapping(stmpe->domain, line);
- handle_nested_irq(stmpe->irq_base + line);
+ handle_nested_irq(nestedirq);
status &= ~(1 << bit);
}
@@ -830,7 +831,7 @@ static void stmpe_irq_sync_unlock(struct irq_data *data)
static void stmpe_irq_mask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
- int offset = data->irq - stmpe->irq_base;
+ int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -840,7 +841,7 @@ static void stmpe_irq_mask(struct irq_data *data)
static void stmpe_irq_unmask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
- int offset = data->irq - stmpe->irq_base;
+ int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -855,43 +856,59 @@ static struct irq_chip stmpe_irq_chip = {
.irq_unmask = stmpe_irq_unmask,
};
-static int __devinit stmpe_irq_init(struct stmpe *stmpe)
+static int stmpe_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
{
+ struct stmpe *stmpe = d->host_data;
struct irq_chip *chip = NULL;
- int num_irqs = stmpe->variant->num_irqs;
- int base = stmpe->irq_base;
- int irq;
if (stmpe->variant->id_val != STMPE801_ID)
chip = &stmpe_irq_chip;
- for (irq = base; irq < base + num_irqs; irq++) {
- irq_set_chip_data(irq, stmpe);
- irq_set_chip_and_handler(irq, chip, handle_edge_irq);
- irq_set_nested_thread(irq, 1);
+ irq_set_chip_data(virq, stmpe);
+ irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
+ set_irq_flags(virq, IRQF_VALID);
#else
- irq_set_noprobe(irq);
+ irq_set_noprobe(virq);
#endif
- }
return 0;
}
-static void stmpe_irq_remove(struct stmpe *stmpe)
+static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
{
- int num_irqs = stmpe->variant->num_irqs;
- int base = stmpe->irq_base;
- int irq;
-
- for (irq = base; irq < base + num_irqs; irq++) {
#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
+ set_irq_flags(virq, 0);
#endif
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+}
+
+static struct irq_domain_ops stmpe_irq_ops = {
+ .map = stmpe_irq_map,
+ .unmap = stmpe_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __devinit stmpe_irq_init(struct stmpe *stmpe,
+ struct device_node *np)
+{
+ int base = 0;
+ int num_irqs = stmpe->variant->num_irqs;
+
+ if (!np)
+ base = stmpe->irq_base;
+
+ stmpe->domain = irq_domain_add_simple(np, num_irqs, base,
+ &stmpe_irq_ops, stmpe);
+ if (!stmpe->domain) {
+ dev_err(stmpe->dev, "Failed to create irqdomain\n");
+ return -ENOSYS;
}
+
+ return 0;
}
static int __devinit stmpe_chip_init(struct stmpe *stmpe)
@@ -942,13 +959,6 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
else
icr |= STMPE_ICR_LSB_HIGH;
}
-
- if (stmpe->pdata->irq_invert_polarity) {
- if (id == STMPE801_ID)
- icr ^= STMPE801_REG_SYS_CTRL_INT_HI;
- else
- icr ^= STMPE_ICR_LSB_HIGH;
- }
}
if (stmpe->pdata->autosleep) {
@@ -961,10 +971,10 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
}
static int __devinit stmpe_add_device(struct stmpe *stmpe,
- struct mfd_cell *cell, int irq)
+ struct mfd_cell *cell)
{
return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
- NULL, stmpe->irq_base + irq, NULL);
+ NULL, stmpe->irq_base, stmpe->domain);
}
static int __devinit stmpe_devices_init(struct stmpe *stmpe)
@@ -972,7 +982,7 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
struct stmpe_variant_info *variant = stmpe->variant;
unsigned int platform_blocks = stmpe->pdata->blocks;
int ret = -EINVAL;
- int i;
+ int i, j;
for (i = 0; i < variant->num_blocks; i++) {
struct stmpe_variant_block *block = &variant->blocks[i];
@@ -980,8 +990,17 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
if (!(platform_blocks & block->block))
continue;
+ for (j = 0; j < block->cell->num_resources; j++) {
+ struct resource *res =
+ (struct resource *) &block->cell->resources[j];
+
+ /* Dynamically fill in a variant's IRQ. */
+ if (res->flags & IORESOURCE_IRQ)
+ res->start = res->end = block->irq + j;
+ }
+
platform_blocks &= ~block->block;
- ret = stmpe_add_device(stmpe, block->cell, block->irq);
+ ret = stmpe_add_device(stmpe, block->cell);
if (ret)
return ret;
}
@@ -994,17 +1013,56 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
return ret;
}
+void __devinit stmpe_of_probe(struct stmpe_platform_data *pdata,
+ struct device_node *np)
+{
+ struct device_node *child;
+
+ pdata->id = -1;
+ pdata->irq_trigger = IRQF_TRIGGER_NONE;
+
+ of_property_read_u32(np, "st,autosleep-timeout",
+ &pdata->autosleep_timeout);
+
+ pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
+
+ for_each_child_of_node(np, child) {
+ if (!strcmp(child->name, "stmpe_gpio")) {
+ pdata->blocks |= STMPE_BLOCK_GPIO;
+ } else if (!strcmp(child->name, "stmpe_keypad")) {
+ pdata->blocks |= STMPE_BLOCK_KEYPAD;
+ } else if (!strcmp(child->name, "stmpe_touchscreen")) {
+ pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
+ } else if (!strcmp(child->name, "stmpe_adc")) {
+ pdata->blocks |= STMPE_BLOCK_ADC;
+ } else if (!strcmp(child->name, "stmpe_pwm")) {
+ pdata->blocks |= STMPE_BLOCK_PWM;
+ } else if (!strcmp(child->name, "stmpe_rotator")) {
+ pdata->blocks |= STMPE_BLOCK_ROTATOR;
+ }
+ }
+}
+
/* Called from client specific probe routines */
int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
{
struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
+ struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
int ret;
- if (!pdata)
- return -EINVAL;
+ if (!pdata) {
+ if (!np)
+ return -EINVAL;
+
+ pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ stmpe_of_probe(pdata, np);
+ }
- stmpe = kzalloc(sizeof(struct stmpe), GFP_KERNEL);
+ stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
if (!stmpe)
return -ENOMEM;
@@ -1026,11 +1084,12 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
ci->init(stmpe);
if (pdata->irq_over_gpio) {
- ret = gpio_request_one(pdata->irq_gpio, GPIOF_DIR_IN, "stmpe");
+ ret = devm_gpio_request_one(ci->dev, pdata->irq_gpio,
+ GPIOF_DIR_IN, "stmpe");
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
ret);
- goto out_free;
+ return ret;
}
stmpe->irq = gpio_to_irq(pdata->irq_gpio);
@@ -1047,51 +1106,40 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
dev_err(stmpe->dev,
"%s does not support no-irq mode!\n",
stmpe->variant->name);
- ret = -ENODEV;
- goto free_gpio;
+ return -ENODEV;
}
stmpe->variant = stmpe_noirq_variant_info[stmpe->partnum];
+ } else if (pdata->irq_trigger == IRQF_TRIGGER_NONE) {
+ pdata->irq_trigger =
+ irqd_get_trigger_type(irq_get_irq_data(stmpe->irq));
}
ret = stmpe_chip_init(stmpe);
if (ret)
- goto free_gpio;
+ return ret;
if (stmpe->irq >= 0) {
- ret = stmpe_irq_init(stmpe);
+ ret = stmpe_irq_init(stmpe, np);
if (ret)
- goto free_gpio;
+ return ret;
- ret = request_threaded_irq(stmpe->irq, NULL, stmpe_irq,
- pdata->irq_trigger | IRQF_ONESHOT,
+ ret = devm_request_threaded_irq(ci->dev, stmpe->irq, NULL,
+ stmpe_irq, pdata->irq_trigger | IRQF_ONESHOT,
"stmpe", stmpe);
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ: %d\n",
ret);
- goto out_removeirq;
+ return ret;
}
}
ret = stmpe_devices_init(stmpe);
- if (ret) {
- dev_err(stmpe->dev, "failed to add children\n");
- goto out_removedevs;
- }
-
- return 0;
+ if (!ret)
+ return 0;
-out_removedevs:
+ dev_err(stmpe->dev, "failed to add children\n");
mfd_remove_devices(stmpe->dev);
- if (stmpe->irq >= 0)
- free_irq(stmpe->irq, stmpe);
-out_removeirq:
- if (stmpe->irq >= 0)
- stmpe_irq_remove(stmpe);
-free_gpio:
- if (pdata->irq_over_gpio)
- gpio_free(pdata->irq_gpio);
-out_free:
- kfree(stmpe);
+
return ret;
}
@@ -1099,16 +1147,6 @@ int stmpe_remove(struct stmpe *stmpe)
{
mfd_remove_devices(stmpe->dev);
- if (stmpe->irq >= 0) {
- free_irq(stmpe->irq, stmpe);
- stmpe_irq_remove(stmpe);
- }
-
- if (stmpe->pdata->irq_over_gpio)
- gpio_free(stmpe->pdata->irq_gpio);
-
- kfree(stmpe);
-
return 0;
}
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
new file mode 100644
index 00000000000..8ca3bf023fb
--- /dev/null
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -0,0 +1,274 @@
+/*
+ * TI Touch Screen / ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+static unsigned int tscadc_readl(struct ti_tscadc_dev *tsadc, unsigned int reg)
+{
+ unsigned int val;
+
+ regmap_read(tsadc->regmap_tscadc, reg, &val);
+ return val;
+}
+
+static void tscadc_writel(struct ti_tscadc_dev *tsadc, unsigned int reg,
+ unsigned int val)
+{
+ regmap_write(tsadc->regmap_tscadc, reg, val);
+}
+
+static const struct regmap_config tscadc_regmap_config = {
+ .name = "ti_tscadc",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static void tscadc_idle_config(struct ti_tscadc_dev *config)
+{
+ unsigned int idleconfig;
+
+ idleconfig = STEPCONFIG_YNN | STEPCONFIG_INM_ADCREFM |
+ STEPCONFIG_INP_ADCREFM | STEPCONFIG_YPN;
+
+ tscadc_writel(config, REG_IDLECONFIG, idleconfig);
+}
+
+static int __devinit ti_tscadc_probe(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc;
+ struct resource *res;
+ struct clk *clk;
+ struct mfd_tscadc_board *pdata = pdev->dev.platform_data;
+ struct mfd_cell *cell;
+ int err, ctrl;
+ int clk_value, clock_rate;
+ int tsc_wires, adc_channels = 0, total_channels;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdata->adc_init)
+ adc_channels = pdata->adc_init->adc_channels;
+
+ tsc_wires = pdata->tsc_init->wires;
+ total_channels = tsc_wires + adc_channels;
+
+ if (total_channels > 8) {
+ dev_err(&pdev->dev, "Number of i/p channels more than 8\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource defined.\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for device */
+ tscadc = devm_kzalloc(&pdev->dev,
+ sizeof(struct ti_tscadc_dev), GFP_KERNEL);
+ if (!tscadc) {
+ dev_err(&pdev->dev, "failed to allocate memory.\n");
+ return -ENOMEM;
+ }
+ tscadc->dev = &pdev->dev;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "no irq ID is specified.\n");
+ goto ret;
+ } else
+ tscadc->irq = err;
+
+ res = devm_request_mem_region(&pdev->dev,
+ res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to reserve registers.\n");
+ return -EBUSY;
+ }
+
+ tscadc->tscadc_base = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!tscadc->tscadc_base) {
+ dev_err(&pdev->dev, "failed to map registers.\n");
+ return -ENOMEM;
+ }
+
+ tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev,
+ tscadc->tscadc_base, &tscadc_regmap_config);
+ if (IS_ERR(tscadc->regmap_tscadc)) {
+ dev_err(&pdev->dev, "regmap init failed\n");
+ err = PTR_ERR(tscadc->regmap_tscadc);
+ goto ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /*
+ * The TSC_ADC_Subsystem has 2 clock domains
+ * OCP_CLK and ADC_CLK.
+ * The ADC clock is expected to run at target of 3MHz,
+ * and expected to capture 12-bit data at a rate of 200 KSPS.
+ * The TSC_ADC_SS controller design assumes the OCP clock is
+ * at least 6x faster than the ADC clock.
+ */
+ clk = clk_get(&pdev->dev, "adc_tsc_fck");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get TSC fck\n");
+ err = PTR_ERR(clk);
+ goto err_disable_clk;
+ }
+ clock_rate = clk_get_rate(clk);
+ clk_put(clk);
+ clk_value = clock_rate / ADC_CLK;
+ if (clk_value < MAX_CLK_DIV) {
+ dev_err(&pdev->dev, "clock input less than min clock requirement\n");
+ err = -EINVAL;
+ goto err_disable_clk;
+ }
+ /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
+ clk_value = clk_value - 1;
+ tscadc_writel(tscadc, REG_CLKDIV, clk_value);
+
+ /* Set the control register bits */
+ ctrl = CNTRLREG_STEPCONFIGWRT |
+ CNTRLREG_TSCENB |
+ CNTRLREG_STEPID |
+ CNTRLREG_4WIRE;
+ tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+ /* Set register bits for Idle Config Mode */
+ tscadc_idle_config(tscadc);
+
+ /* Enable the TSC module enable bit */
+ ctrl = tscadc_readl(tscadc, REG_CTRL);
+ ctrl |= CNTRLREG_TSCSSENB;
+ tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+ /* TSC Cell */
+ cell = &tscadc->cells[TSC_CELL];
+ cell->name = "tsc";
+ cell->platform_data = tscadc;
+ cell->pdata_size = sizeof(*tscadc);
+
+ /* ADC Cell */
+ cell = &tscadc->cells[ADC_CELL];
+ cell->name = "tiadc";
+ cell->platform_data = tscadc;
+ cell->pdata_size = sizeof(*tscadc);
+
+ err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
+ TSCADC_CELLS, NULL, 0, NULL);
+ if (err < 0)
+ goto err_disable_clk;
+
+ device_init_wakeup(&pdev->dev, true);
+ platform_set_drvdata(pdev, tscadc);
+
+ return 0;
+
+err_disable_clk:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ret:
+ return err;
+}
+
+static int __devexit ti_tscadc_remove(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev);
+
+ tscadc_writel(tscadc, REG_SE, 0x00);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ mfd_remove_devices(tscadc->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tscadc_suspend(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+
+ tscadc_writel(tscadc_dev, REG_SE, 0x00);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tscadc_resume(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+ unsigned int restore, ctrl;
+
+ pm_runtime_get_sync(dev);
+
+ /* context restore */
+ ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_TSCENB |
+ CNTRLREG_STEPID | CNTRLREG_4WIRE;
+ tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+ tscadc_idle_config(tscadc_dev);
+ tscadc_writel(tscadc_dev, REG_SE, STPENB_STEPENB);
+ restore = tscadc_readl(tscadc_dev, REG_CTRL);
+ tscadc_writel(tscadc_dev, REG_CTRL,
+ (restore | CNTRLREG_TSCSSENB));
+
+ return 0;
+}
+
+static const struct dev_pm_ops tscadc_pm_ops = {
+ .suspend = tscadc_suspend,
+ .resume = tscadc_resume,
+};
+#define TSCADC_PM_OPS (&tscadc_pm_ops)
+#else
+#define TSCADC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tscadc_driver = {
+ .driver = {
+ .name = "ti_tscadc",
+ .owner = THIS_MODULE,
+ .pm = TSCADC_PM_OPS,
+ },
+ .probe = ti_tscadc_probe,
+ .remove = __devexit_p(ti_tscadc_remove),
+
+};
+
+module_platform_driver(ti_tscadc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen / ADC MFD controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 1b203499c74..409afa23d5d 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -86,9 +86,9 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct tps6507x_dev *tps6507x;
- int ret = 0;
- tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
+ tps6507x = devm_kzalloc(&i2c->dev, sizeof(struct tps6507x_dev),
+ GFP_KERNEL);
if (tps6507x == NULL)
return -ENOMEM;
@@ -98,19 +98,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
tps6507x->read_dev = tps6507x_i2c_read_device;
tps6507x->write_dev = tps6507x_i2c_write_device;
- ret = mfd_add_devices(tps6507x->dev, -1,
- tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
- NULL, 0, NULL);
-
- if (ret < 0)
- goto err;
-
- return ret;
-
-err:
- mfd_remove_devices(tps6507x->dev);
- kfree(tps6507x);
- return ret;
+ return mfd_add_devices(tps6507x->dev, -1, tps6507x_devs,
+ ARRAY_SIZE(tps6507x_devs), NULL, 0, NULL);
}
static int tps6507x_i2c_remove(struct i2c_client *i2c)
@@ -118,8 +107,6 @@ static int tps6507x_i2c_remove(struct i2c_client *i2c)
struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
mfd_remove_devices(tps6507x->dev);
- kfree(tps6507x);
-
return 0;
}
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 382a857b0dd..8d12a8e00d9 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -25,7 +25,6 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
-#include <linux/regmap.h>
#include <linux/err.h>
#define NUM_INT_REG 2
@@ -39,204 +38,102 @@
#define TPS65090_INT_MSK 0x2
#define TPS65090_INT_MSK2 0x3
-struct tps65090_irq_data {
- u8 mask_reg;
- u8 mask_pos;
-};
-
-#define TPS65090_IRQ(_reg, _mask_pos) \
- { \
- .mask_reg = (_reg), \
- .mask_pos = (_mask_pos), \
- }
-
-static const struct tps65090_irq_data tps65090_irqs[] = {
- [0] = TPS65090_IRQ(0, 0),
- [1] = TPS65090_IRQ(0, 1),
- [2] = TPS65090_IRQ(0, 2),
- [3] = TPS65090_IRQ(0, 3),
- [4] = TPS65090_IRQ(0, 4),
- [5] = TPS65090_IRQ(0, 5),
- [6] = TPS65090_IRQ(0, 6),
- [7] = TPS65090_IRQ(0, 7),
- [8] = TPS65090_IRQ(1, 0),
- [9] = TPS65090_IRQ(1, 1),
- [10] = TPS65090_IRQ(1, 2),
- [11] = TPS65090_IRQ(1, 3),
- [12] = TPS65090_IRQ(1, 4),
- [13] = TPS65090_IRQ(1, 5),
- [14] = TPS65090_IRQ(1, 6),
- [15] = TPS65090_IRQ(1, 7),
-};
+#define TPS65090_INT1_MASK_VAC_STATUS_CHANGE 1
+#define TPS65090_INT1_MASK_VSYS_STATUS_CHANGE 2
+#define TPS65090_INT1_MASK_BAT_STATUS_CHANGE 3
+#define TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE 4
+#define TPS65090_INT1_MASK_CHARGING_COMPLETE 5
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC1 6
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC2 7
+#define TPS65090_INT2_MASK_OVERLOAD_DCDC3 0
+#define TPS65090_INT2_MASK_OVERLOAD_FET1 1
+#define TPS65090_INT2_MASK_OVERLOAD_FET2 2
+#define TPS65090_INT2_MASK_OVERLOAD_FET3 3
+#define TPS65090_INT2_MASK_OVERLOAD_FET4 4
+#define TPS65090_INT2_MASK_OVERLOAD_FET5 5
+#define TPS65090_INT2_MASK_OVERLOAD_FET6 6
+#define TPS65090_INT2_MASK_OVERLOAD_FET7 7
static struct mfd_cell tps65090s[] = {
{
.name = "tps65090-pmic",
},
{
- .name = "tps65090-regulator",
+ .name = "tps65090-charger",
},
};
-int tps65090_write(struct device *dev, int reg, uint8_t val)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_write(tps->rmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(tps65090_write);
-
-int tps65090_read(struct device *dev, int reg, uint8_t *val)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- unsigned int temp_val;
- int ret;
- ret = regmap_read(tps->rmap, reg, &temp_val);
- if (!ret)
- *val = temp_val;
- return ret;
-}
-EXPORT_SYMBOL_GPL(tps65090_read);
-
-int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_set_bits);
-
-int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_clr_bits);
-
-static void tps65090_irq_lock(struct irq_data *data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65090->irq_lock);
-}
-
-static void tps65090_irq_mask(struct irq_data *irq_data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->hwirq;
- const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
- tps65090_set_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
- data->mask_pos);
-}
-
-static void tps65090_irq_unmask(struct irq_data *irq_data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps65090->irq_base;
- const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
- tps65090_clr_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
- data->mask_pos);
-}
-
-static void tps65090_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
- mutex_unlock(&tps65090->irq_lock);
-}
-
-static irqreturn_t tps65090_irq(int irq, void *data)
-{
- struct tps65090 *tps65090 = data;
- int ret = 0;
- u8 status, mask;
- unsigned long int acks = 0;
- int i;
-
- for (i = 0; i < NUM_INT_REG; i++) {
- ret = tps65090_read(tps65090->dev, TPS65090_INT_MSK + i, &mask);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to read mask reg [addr:%d]\n",
- TPS65090_INT_MSK + i);
- return IRQ_NONE;
- }
- ret = tps65090_read(tps65090->dev, TPS65090_INT_STS + i,
- &status);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to read status reg [addr:%d]\n",
- TPS65090_INT_STS + i);
- return IRQ_NONE;
- }
- if (status) {
- /* Ack only those interrupts which are not masked */
- status &= (~mask);
- ret = tps65090_write(tps65090->dev,
- TPS65090_INT_STS + i, status);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to write interrupt status\n");
- return IRQ_NONE;
- }
- acks |= (status << (i * 8));
- }
- }
-
- for_each_set_bit(i, &acks, ARRAY_SIZE(tps65090_irqs))
- handle_nested_irq(tps65090->irq_base + i);
- return acks ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int tps65090_irq_init(struct tps65090 *tps65090, int irq,
- int irq_base)
-{
- int i, ret;
-
- if (!irq_base) {
- dev_err(tps65090->dev, "IRQ base not set\n");
- return -EINVAL;
- }
-
- mutex_init(&tps65090->irq_lock);
-
- for (i = 0; i < NUM_INT_REG; i++)
- tps65090_write(tps65090->dev, TPS65090_INT_MSK + i, 0xFF);
-
- for (i = 0; i < NUM_INT_REG; i++)
- tps65090_write(tps65090->dev, TPS65090_INT_STS + i, 0xff);
-
- tps65090->irq_base = irq_base;
- tps65090->irq_chip.name = "tps65090";
- tps65090->irq_chip.irq_mask = tps65090_irq_mask;
- tps65090->irq_chip.irq_unmask = tps65090_irq_unmask;
- tps65090->irq_chip.irq_bus_lock = tps65090_irq_lock;
- tps65090->irq_chip.irq_bus_sync_unlock = tps65090_irq_sync_unlock;
-
- for (i = 0; i < ARRAY_SIZE(tps65090_irqs); i++) {
- int __irq = i + tps65090->irq_base;
- irq_set_chip_data(__irq, tps65090);
- irq_set_chip_and_handler(__irq, &tps65090->irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#endif
- }
-
- ret = request_threaded_irq(irq, NULL, tps65090_irq, IRQF_ONESHOT,
- "tps65090", tps65090);
- if (!ret) {
- device_init_wakeup(tps65090->dev, 1);
- enable_irq_wake(irq);
- }
+static const struct regmap_irq tps65090_irqs[] = {
+ /* INT1 IRQs*/
+ [TPS65090_IRQ_VAC_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_VAC_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_VSYS_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_VSYS_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_BAT_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_BAT_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_CHARGING_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_CHARGING_COMPLETE] = {
+ .mask = TPS65090_INT1_MASK_CHARGING_COMPLETE,
+ },
+ [TPS65090_IRQ_OVERLOAD_DCDC1] = {
+ .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC1,
+ },
+ [TPS65090_IRQ_OVERLOAD_DCDC2] = {
+ .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC2,
+ },
+ /* INT2 IRQs*/
+ [TPS65090_IRQ_OVERLOAD_DCDC3] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_DCDC3,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET1] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET1,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET2] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET2,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET3] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET3,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET4] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET4,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET5] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET5,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET6] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET6,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET7] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET7,
+ },
+};
- return ret;
-}
+static struct regmap_irq_chip tps65090_irq_chip = {
+ .name = "tps65090",
+ .irqs = tps65090_irqs,
+ .num_irqs = ARRAY_SIZE(tps65090_irqs),
+ .num_regs = NUM_INT_REG,
+ .status_base = TPS65090_INT_STS,
+ .mask_base = TPS65090_INT_MSK,
+ .mask_invert = true,
+};
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
- if (reg == TPS65090_INT_STS)
+ if ((reg == TPS65090_INT_STS) || (reg == TPS65090_INT_STS2))
return true;
else
return false;
@@ -263,36 +160,36 @@ static int tps65090_i2c_probe(struct i2c_client *client,
return -EINVAL;
}
- tps65090 = devm_kzalloc(&client->dev, sizeof(struct tps65090),
- GFP_KERNEL);
- if (tps65090 == NULL)
+ tps65090 = devm_kzalloc(&client->dev, sizeof(*tps65090), GFP_KERNEL);
+ if (!tps65090) {
+ dev_err(&client->dev, "mem alloc for tps65090 failed\n");
return -ENOMEM;
+ }
- tps65090->client = client;
tps65090->dev = &client->dev;
i2c_set_clientdata(client, tps65090);
- mutex_init(&tps65090->lock);
-
- if (client->irq) {
- ret = tps65090_irq_init(tps65090, client->irq, pdata->irq_base);
- if (ret) {
- dev_err(&client->dev, "IRQ init failed with err: %d\n",
- ret);
- goto err_exit;
- }
- }
-
- tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
- &tps65090_regmap_config);
+ tps65090->rmap = devm_regmap_init_i2c(client, &tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
ret = PTR_ERR(tps65090->rmap);
dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
- goto err_irq_exit;
+ return ret;
+ }
+
+ if (client->irq) {
+ ret = regmap_add_irq_chip(tps65090->rmap, client->irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW, pdata->irq_base,
+ &tps65090_irq_chip, &tps65090->irq_data);
+ if (ret) {
+ dev_err(&client->dev,
+ "IRQ init failed with err: %d\n", ret);
+ return ret;
+ }
}
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
- ARRAY_SIZE(tps65090s), NULL, 0, NULL);
+ ARRAY_SIZE(tps65090s), NULL,
+ regmap_irq_chip_get_base(tps65090->irq_data), NULL);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
@@ -303,8 +200,7 @@ static int tps65090_i2c_probe(struct i2c_client *client,
err_irq_exit:
if (client->irq)
- free_irq(client->irq, tps65090);
-err_exit:
+ regmap_del_irq_chip(client->irq, tps65090->irq_data);
return ret;
}
@@ -314,7 +210,7 @@ static int tps65090_i2c_remove(struct i2c_client *client)
mfd_remove_devices(tps65090->dev);
if (client->irq)
- free_irq(client->irq, tps65090);
+ regmap_del_irq_chip(client->irq, tps65090->irq_data);
return 0;
}
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index e14e252e347..b8f48647661 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -160,6 +160,7 @@ static int tps65217_probe(struct i2c_client *client,
unsigned int version;
unsigned int chip_id = ids->driver_data;
const struct of_device_id *match;
+ bool status_off = false;
int ret;
if (client->dev.of_node) {
@@ -170,6 +171,8 @@ static int tps65217_probe(struct i2c_client *client,
return -EINVAL;
}
chip_id = (unsigned int)match->data;
+ status_off = of_property_read_bool(client->dev.of_node,
+ "ti,pmic-shutdown-controller");
}
if (!chip_id) {
@@ -207,6 +210,15 @@ static int tps65217_probe(struct i2c_client *client,
return ret;
}
+ /* Set the PMIC to shutdown on PWR_EN toggle */
+ if (status_off) {
+ ret = tps65217_set_bits(tps, TPS65217_REG_STATUS,
+ TPS65217_STATUS_OFF, TPS65217_STATUS_OFF,
+ TPS65217_PROTECT_NONE);
+ if (ret)
+ dev_warn(tps->dev, "unable to set the status OFF\n");
+ }
+
dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
(version & TPS65217_CHIPID_CHIP_MASK) >> 4,
version & TPS65217_CHIPID_REV_MASK);
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 9f92c3b2209..721b9186a5d 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -17,15 +17,15 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/regulator/machine.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6586x.h>
@@ -94,12 +94,25 @@ static const struct tps6586x_irq_data tps6586x_irqs[] = {
[TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
};
+static struct resource tps6586x_rtc_resources[] = {
+ {
+ .start = TPS6586X_INT_RTC_ALM1,
+ .end = TPS6586X_INT_RTC_ALM1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell tps6586x_cell[] = {
{
.name = "tps6586x-gpio",
},
{
+ .name = "tps6586x-pmic",
+ },
+ {
.name = "tps6586x-rtc",
+ .num_resources = ARRAY_SIZE(tps6586x_rtc_resources),
+ .resources = &tps6586x_rtc_resources[0],
},
{
.name = "tps6586x-onkey",
@@ -116,6 +129,7 @@ struct tps6586x {
int irq_base;
u32 irq_en;
u8 mask_reg[5];
+ struct irq_domain *irq_domain;
};
static inline struct tps6586x *dev_to_tps6586x(struct device *dev)
@@ -184,6 +198,14 @@ int tps6586x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
}
EXPORT_SYMBOL_GPL(tps6586x_update);
+int tps6586x_irq_get_virq(struct device *dev, int irq)
+{
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+
+ return irq_create_mapping(tps6586x->irq_domain, irq);
+}
+EXPORT_SYMBOL_GPL(tps6586x_irq_get_virq);
+
static int __remove_subdev(struct device *dev, void *unused)
{
platform_device_unregister(to_platform_device(dev));
@@ -205,7 +227,7 @@ static void tps6586x_irq_lock(struct irq_data *data)
static void tps6586x_irq_enable(struct irq_data *irq_data)
{
struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+ unsigned int __irq = irq_data->hwirq;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
@@ -216,7 +238,7 @@ static void tps6586x_irq_disable(struct irq_data *irq_data)
{
struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+ unsigned int __irq = irq_data->hwirq;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
@@ -239,6 +261,39 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&tps6586x->irq_lock);
}
+static struct irq_chip tps6586x_irq_chip = {
+ .name = "tps6586x",
+ .irq_bus_lock = tps6586x_irq_lock,
+ .irq_bus_sync_unlock = tps6586x_irq_sync_unlock,
+ .irq_disable = tps6586x_irq_disable,
+ .irq_enable = tps6586x_irq_enable,
+};
+
+static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct tps6586x *tps6586x = h->host_data;
+
+ irq_set_chip_data(virq, tps6586x);
+ irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops tps6586x_domain_ops = {
+ .map = tps6586x_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
static irqreturn_t tps6586x_irq(int irq, void *data)
{
struct tps6586x *tps6586x = data;
@@ -259,7 +314,8 @@ static irqreturn_t tps6586x_irq(int irq, void *data)
int i = __ffs(acks);
if (tps6586x->irq_en & (1 << i))
- handle_nested_irq(tps6586x->irq_base + i);
+ handle_nested_irq(
+ irq_find_mapping(tps6586x->irq_domain, i));
acks &= ~(1 << i);
}
@@ -272,11 +328,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
{
int i, ret;
u8 tmp[4];
-
- if (!irq_base) {
- dev_warn(tps6586x->dev, "No interrupt support on IRQ base\n");
- return -EINVAL;
- }
+ int new_irq_base;
+ int irq_num = ARRAY_SIZE(tps6586x_irqs);
mutex_init(&tps6586x->irq_lock);
for (i = 0; i < 5; i++) {
@@ -286,25 +339,24 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, sizeof(tmp), tmp);
- tps6586x->irq_base = irq_base;
-
- tps6586x->irq_chip.name = "tps6586x";
- tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
- tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
- tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
- tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
-
- for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
- int __irq = i + tps6586x->irq_base;
- irq_set_chip_data(__irq, tps6586x);
- irq_set_chip_and_handler(__irq, &tps6586x->irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#endif
+ if (irq_base > 0) {
+ new_irq_base = irq_alloc_descs(irq_base, 0, irq_num, -1);
+ if (new_irq_base < 0) {
+ dev_err(tps6586x->dev,
+ "Failed to alloc IRQs: %d\n", new_irq_base);
+ return new_irq_base;
+ }
+ } else {
+ new_irq_base = 0;
}
+ tps6586x->irq_domain = irq_domain_add_simple(tps6586x->dev->of_node,
+ irq_num, new_irq_base, &tps6586x_domain_ops,
+ tps6586x);
+ if (!tps6586x->irq_domain) {
+ dev_err(tps6586x->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
"tps6586x", tps6586x);
@@ -350,80 +402,19 @@ failed:
}
#ifdef CONFIG_OF
-static struct of_regulator_match tps6586x_matches[] = {
- { .name = "sys", .driver_data = (void *)TPS6586X_ID_SYS },
- { .name = "sm0", .driver_data = (void *)TPS6586X_ID_SM_0 },
- { .name = "sm1", .driver_data = (void *)TPS6586X_ID_SM_1 },
- { .name = "sm2", .driver_data = (void *)TPS6586X_ID_SM_2 },
- { .name = "ldo0", .driver_data = (void *)TPS6586X_ID_LDO_0 },
- { .name = "ldo1", .driver_data = (void *)TPS6586X_ID_LDO_1 },
- { .name = "ldo2", .driver_data = (void *)TPS6586X_ID_LDO_2 },
- { .name = "ldo3", .driver_data = (void *)TPS6586X_ID_LDO_3 },
- { .name = "ldo4", .driver_data = (void *)TPS6586X_ID_LDO_4 },
- { .name = "ldo5", .driver_data = (void *)TPS6586X_ID_LDO_5 },
- { .name = "ldo6", .driver_data = (void *)TPS6586X_ID_LDO_6 },
- { .name = "ldo7", .driver_data = (void *)TPS6586X_ID_LDO_7 },
- { .name = "ldo8", .driver_data = (void *)TPS6586X_ID_LDO_8 },
- { .name = "ldo9", .driver_data = (void *)TPS6586X_ID_LDO_9 },
- { .name = "ldo_rtc", .driver_data = (void *)TPS6586X_ID_LDO_RTC },
-};
-
static struct tps6586x_platform_data *tps6586x_parse_dt(struct i2c_client *client)
{
- const unsigned int num = ARRAY_SIZE(tps6586x_matches);
struct device_node *np = client->dev.of_node;
struct tps6586x_platform_data *pdata;
- struct tps6586x_subdev_info *devs;
- struct device_node *regs;
- const char *sys_rail_name = NULL;
- unsigned int count;
- unsigned int i, j;
- int err;
-
- regs = of_find_node_by_name(np, "regulators");
- if (!regs)
- return NULL;
-
- err = of_regulator_match(&client->dev, regs, tps6586x_matches, num);
- if (err < 0) {
- of_node_put(regs);
- return NULL;
- }
-
- of_node_put(regs);
- count = err;
-
- devs = devm_kzalloc(&client->dev, count * sizeof(*devs), GFP_KERNEL);
- if (!devs)
- return NULL;
-
- for (i = 0, j = 0; i < num && j < count; i++) {
- struct regulator_init_data *reg_idata;
-
- if (!tps6586x_matches[i].init_data)
- continue;
-
- reg_idata = tps6586x_matches[i].init_data;
- devs[j].name = "tps6586x-regulator";
- devs[j].platform_data = tps6586x_matches[i].init_data;
- devs[j].id = (int)tps6586x_matches[i].driver_data;
- if (devs[j].id == TPS6586X_ID_SYS)
- sys_rail_name = reg_idata->constraints.name;
-
- if ((devs[j].id == TPS6586X_ID_LDO_5) ||
- (devs[j].id == TPS6586X_ID_LDO_RTC))
- reg_idata->supply_regulator = sys_rail_name;
-
- devs[j].of_node = tps6586x_matches[i].of_node;
- j++;
- }
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ if (!pdata) {
+ dev_err(&client->dev, "Memory allocation failed\n");
return NULL;
+ }
- pdata->num_subdevs = count;
- pdata->subdevs = devs;
+ pdata->num_subdevs = 0;
+ pdata->subdevs = NULL;
pdata->gpio_base = -1;
pdata->irq_base = -1;
pdata->pm_off = of_property_read_bool(np, "ti,system-power-controller");
@@ -521,7 +512,7 @@ static int tps6586x_i2c_probe(struct i2c_client *client,
ret = mfd_add_devices(tps6586x->dev, -1,
tps6586x_cell, ARRAY_SIZE(tps6586x_cell),
- NULL, 0, NULL);
+ NULL, 0, tps6586x->irq_domain);
if (ret < 0) {
dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
goto err_mfd_add;
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
deleted file mode 100644
index 09aab3e4776..00000000000
--- a/drivers/mfd/tps65910-irq.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * tps65910-irq.c -- TI TPS6591x
- *
- * Copyright 2010 Texas Instruments Inc.
- *
- * Author: Graeme Gregory <gg@slimlogic.co.uk>
- * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/bug.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65910.h>
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI. Since all
- * interrupts are clear on read the IRQ line will be reasserted and
- * the physical IRQ will be handled again if another interrupt is
- * asserted while we run - in the normal course of events this is a
- * rare occurrence so we save I2C/SPI reads. We're also assuming that
- * it's rare to get lots of interrupts firing simultaneously so try to
- * minimise I/O.
- */
-static irqreturn_t tps65910_irq(int irq, void *irq_data)
-{
- struct tps65910 *tps65910 = irq_data;
- unsigned int reg;
- u32 irq_sts;
- u32 irq_mask;
- int i;
-
- tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
- irq_sts = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
- irq_sts |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
- irq_sts |= reg << 16;
- }
-
- tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
- irq_mask = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
- irq_mask |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
- irq_mask |= reg << 16;
- }
-
- irq_sts &= ~irq_mask;
-
- if (!irq_sts)
- return IRQ_NONE;
-
- for (i = 0; i < tps65910->irq_num; i++) {
-
- if (!(irq_sts & (1 << i)))
- continue;
-
- handle_nested_irq(irq_find_mapping(tps65910->domain, i));
- }
-
- /* Write the STS register back to clear IRQs we handled */
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
- reg = irq_sts & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- reg = irq_sts >> 8;
- tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
- }
-
- return IRQ_HANDLED;
-}
-
-static void tps65910_irq_lock(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- u32 reg_mask;
- unsigned int reg;
-
- tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
- reg_mask = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
- reg_mask |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
- reg_mask |= reg << 16;
- }
-
- if (tps65910->irq_mask != reg_mask) {
- reg = tps65910->irq_mask & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
- reg = tps65910->irq_mask >> 8 & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- reg = tps65910->irq_mask >> 16;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
- }
- }
- mutex_unlock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_enable(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- tps65910->irq_mask &= ~(1 << data->hwirq);
-}
-
-static void tps65910_irq_disable(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- tps65910->irq_mask |= (1 << data->hwirq);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tps65910_irq_set_wake(struct irq_data *data, unsigned int enable)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- return irq_set_irq_wake(tps65910->chip_irq, enable);
-}
-#else
-#define tps65910_irq_set_wake NULL
-#endif
-
-static struct irq_chip tps65910_irq_chip = {
- .name = "tps65910",
- .irq_bus_lock = tps65910_irq_lock,
- .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
- .irq_disable = tps65910_irq_disable,
- .irq_enable = tps65910_irq_enable,
- .irq_set_wake = tps65910_irq_set_wake,
-};
-
-static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct tps65910 *tps65910 = h->host_data;
-
- irq_set_chip_data(virq, tps65910);
- irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
- irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
- irq_set_noprobe(virq);
-#endif
-
- return 0;
-}
-
-static struct irq_domain_ops tps65910_domain_ops = {
- .map = tps65910_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-int tps65910_irq_init(struct tps65910 *tps65910, int irq,
- struct tps65910_platform_data *pdata)
-{
- int ret;
- int flags = IRQF_ONESHOT;
-
- if (!irq) {
- dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
- return -EINVAL;
- }
-
- if (!pdata) {
- dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
- return -EINVAL;
- }
-
- switch (tps65910_chip_id(tps65910)) {
- case TPS65910:
- tps65910->irq_num = TPS65910_NUM_IRQ;
- break;
- case TPS65911:
- tps65910->irq_num = TPS65911_NUM_IRQ;
- break;
- }
-
- if (pdata->irq_base > 0) {
- pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
- tps65910->irq_num, -1);
- if (pdata->irq_base < 0) {
- dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
- pdata->irq_base);
- return pdata->irq_base;
- }
- }
-
- tps65910->irq_mask = 0xFFFFFF;
-
- mutex_init(&tps65910->irq_lock);
- tps65910->chip_irq = irq;
- tps65910->irq_base = pdata->irq_base;
-
- if (pdata->irq_base > 0)
- tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
- tps65910->irq_num,
- pdata->irq_base,
- 0,
- &tps65910_domain_ops, tps65910);
- else
- tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
- tps65910->irq_num,
- &tps65910_domain_ops, tps65910);
-
- if (!tps65910->domain) {
- dev_err(tps65910->dev, "Failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
- "tps65910", tps65910);
-
- irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
-
- if (ret != 0)
- dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
-
- return ret;
-}
-
-int tps65910_irq_exit(struct tps65910 *tps65910)
-{
- if (tps65910->chip_irq)
- free_irq(tps65910->chip_irq, tps65910);
- return 0;
-}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index ce054654f5b..d7927720483 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,6 +19,9 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
@@ -50,6 +53,219 @@ static struct mfd_cell tps65910s[] = {
};
+static const struct regmap_irq tps65911_irqs[] = {
+ /* INT_STS */
+ [TPS65911_IRQ_PWRHOLD_F] = {
+ .mask = INT_MSK_PWRHOLD_F_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_VBAT_VMHI] = {
+ .mask = INT_MSK_VMBHI_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRON] = {
+ .mask = INT_MSK_PWRON_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRON_LP] = {
+ .mask = INT_MSK_PWRON_LP_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRHOLD_R] = {
+ .mask = INT_MSK_PWRHOLD_R_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_HOTDIE] = {
+ .mask = INT_MSK_HOTDIE_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_RTC_ALARM] = {
+ .mask = INT_MSK_RTC_ALARM_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_RTC_PERIOD] = {
+ .mask = INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+
+ /* INT_STS2 */
+ [TPS65911_IRQ_GPIO0_R] = {
+ .mask = INT_MSK2_GPIO0_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO0_F] = {
+ .mask = INT_MSK2_GPIO0_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO1_R] = {
+ .mask = INT_MSK2_GPIO1_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO1_F] = {
+ .mask = INT_MSK2_GPIO1_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO2_R] = {
+ .mask = INT_MSK2_GPIO2_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO2_F] = {
+ .mask = INT_MSK2_GPIO2_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO3_R] = {
+ .mask = INT_MSK2_GPIO3_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO3_F] = {
+ .mask = INT_MSK2_GPIO3_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+
+ /* INT_STS2 */
+ [TPS65911_IRQ_GPIO4_R] = {
+ .mask = INT_MSK3_GPIO4_R_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO4_F] = {
+ .mask = INT_MSK3_GPIO4_F_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO5_R] = {
+ .mask = INT_MSK3_GPIO5_R_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO5_F] = {
+ .mask = INT_MSK3_GPIO5_F_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_WTCHDG] = {
+ .mask = INT_MSK3_WTCHDG_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_VMBCH2_H] = {
+ .mask = INT_MSK3_VMBCH2_H_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_VMBCH2_L] = {
+ .mask = INT_MSK3_VMBCH2_L_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_PWRDN] = {
+ .mask = INT_MSK3_PWRDN_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+};
+
+static const struct regmap_irq tps65910_irqs[] = {
+ /* INT_STS */
+ [TPS65910_IRQ_VBAT_VMBDCH] = {
+ .mask = TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_VBAT_VMHI] = {
+ .mask = TPS65910_INT_MSK_VMBHI_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRON] = {
+ .mask = TPS65910_INT_MSK_PWRON_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRON_LP] = {
+ .mask = TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRHOLD] = {
+ .mask = TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_HOTDIE] = {
+ .mask = TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_RTC_ALARM] = {
+ .mask = TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_RTC_PERIOD] = {
+ .mask = TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+
+ /* INT_STS2 */
+ [TPS65910_IRQ_GPIO_R] = {
+ .mask = TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65910_IRQ_GPIO_F] = {
+ .mask = TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+};
+
+static struct regmap_irq_chip tps65911_irq_chip = {
+ .name = "tps65910",
+ .irqs = tps65911_irqs,
+ .num_irqs = ARRAY_SIZE(tps65911_irqs),
+ .num_regs = 3,
+ .irq_reg_stride = 2,
+ .status_base = TPS65910_INT_STS,
+ .mask_base = TPS65910_INT_MSK,
+ .ack_base = TPS65910_INT_STS,
+};
+
+static struct regmap_irq_chip tps65910_irq_chip = {
+ .name = "tps65910",
+ .irqs = tps65910_irqs,
+ .num_irqs = ARRAY_SIZE(tps65910_irqs),
+ .num_regs = 2,
+ .irq_reg_stride = 2,
+ .status_base = TPS65910_INT_STS,
+ .mask_base = TPS65910_INT_MSK,
+ .ack_base = TPS65910_INT_STS,
+};
+
+static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+ struct tps65910_platform_data *pdata)
+{
+ int ret = 0;
+ static struct regmap_irq_chip *tps6591x_irqs_chip;
+
+ if (!irq) {
+ dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+ return -EINVAL;
+ }
+
+ if (!pdata) {
+ dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
+ return -EINVAL;
+ }
+
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ tps6591x_irqs_chip = &tps65910_irq_chip;
+ break;
+ case TPS65911:
+ tps6591x_irqs_chip = &tps65911_irq_chip;
+ break;
+ }
+
+ tps65910->chip_irq = irq;
+ ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
+ IRQF_ONESHOT, pdata->irq_base,
+ tps6591x_irqs_chip, &tps65910->irq_data);
+ if (ret < 0)
+ dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
+ return ret;
+}
+
+static int tps65910_irq_exit(struct tps65910 *tps65910)
+{
+ if (tps65910->chip_irq > 0)
+ regmap_del_irq_chip(tps65910->chip_irq, tps65910->irq_data);
+ return 0;
+}
+
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -270,7 +486,6 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
tps65910->dev = &i2c->dev;
tps65910->i2c_client = i2c;
tps65910->id = chip_id;
- mutex_init(&tps65910->io_mutex);
tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
if (IS_ERR(tps65910->regmap)) {
@@ -279,14 +494,6 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = mfd_add_devices(tps65910->dev, -1,
- tps65910s, ARRAY_SIZE(tps65910s),
- NULL, 0, NULL);
- if (ret < 0) {
- dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
- return ret;
- }
-
init_data->irq = pmic_plat_data->irq;
init_data->irq_base = pmic_plat_data->irq_base;
@@ -299,6 +506,15 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
pm_power_off = tps65910_power_off;
}
+ ret = mfd_add_devices(tps65910->dev, -1,
+ tps65910s, ARRAY_SIZE(tps65910s),
+ NULL, 0,
+ regmap_irq_get_domain(tps65910->irq_data));
+ if (ret < 0) {
+ dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+ return ret;
+ }
+
return ret;
}
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
new file mode 100644
index 00000000000..10b51f7dfff
--- /dev/null
+++ b/drivers/mfd/tps80031.c
@@ -0,0 +1,574 @@
+/*
+ * tps80031.c -- TI TPS80031/TPS80032 mfd core driver.
+ *
+ * MFD core driver for TI TPS80031/TPS80032 Fully Integrated
+ * Power Management with Power Path and Battery Charger
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static struct resource tps80031_rtc_resources[] = {
+ {
+ .start = TPS80031_INT_RTC_ALARM,
+ .end = TPS80031_INT_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* TPS80031 sub mfd devices */
+static struct mfd_cell tps80031_cell[] = {
+ {
+ .name = "tps80031-pmic",
+ },
+ {
+ .name = "tps80031-clock",
+ },
+ {
+ .name = "tps80031-rtc",
+ .num_resources = ARRAY_SIZE(tps80031_rtc_resources),
+ .resources = tps80031_rtc_resources,
+ },
+ {
+ .name = "tps80031-gpadc",
+ },
+ {
+ .name = "tps80031-fuel-gauge",
+ },
+ {
+ .name = "tps80031-charger",
+ },
+};
+
+static int tps80031_slave_address[TPS80031_NUM_SLAVES] = {
+ TPS80031_I2C_ID0_ADDR,
+ TPS80031_I2C_ID1_ADDR,
+ TPS80031_I2C_ID2_ADDR,
+ TPS80031_I2C_ID3_ADDR,
+};
+
+struct tps80031_pupd_data {
+ u8 reg;
+ u8 pullup_bit;
+ u8 pulldown_bit;
+};
+
+#define TPS80031_IRQ(_reg, _mask) \
+ { \
+ .reg_offset = (TPS80031_INT_MSK_LINE_##_reg) - \
+ TPS80031_INT_MSK_LINE_A, \
+ .mask = BIT(_mask), \
+ }
+
+static const struct regmap_irq tps80031_main_irqs[] = {
+ [TPS80031_INT_PWRON] = TPS80031_IRQ(A, 0),
+ [TPS80031_INT_RPWRON] = TPS80031_IRQ(A, 1),
+ [TPS80031_INT_SYS_VLOW] = TPS80031_IRQ(A, 2),
+ [TPS80031_INT_RTC_ALARM] = TPS80031_IRQ(A, 3),
+ [TPS80031_INT_RTC_PERIOD] = TPS80031_IRQ(A, 4),
+ [TPS80031_INT_HOT_DIE] = TPS80031_IRQ(A, 5),
+ [TPS80031_INT_VXX_SHORT] = TPS80031_IRQ(A, 6),
+ [TPS80031_INT_SPDURATION] = TPS80031_IRQ(A, 7),
+ [TPS80031_INT_WATCHDOG] = TPS80031_IRQ(B, 0),
+ [TPS80031_INT_BAT] = TPS80031_IRQ(B, 1),
+ [TPS80031_INT_SIM] = TPS80031_IRQ(B, 2),
+ [TPS80031_INT_MMC] = TPS80031_IRQ(B, 3),
+ [TPS80031_INT_RES] = TPS80031_IRQ(B, 4),
+ [TPS80031_INT_GPADC_RT] = TPS80031_IRQ(B, 5),
+ [TPS80031_INT_GPADC_SW2_EOC] = TPS80031_IRQ(B, 6),
+ [TPS80031_INT_CC_AUTOCAL] = TPS80031_IRQ(B, 7),
+ [TPS80031_INT_ID_WKUP] = TPS80031_IRQ(C, 0),
+ [TPS80031_INT_VBUSS_WKUP] = TPS80031_IRQ(C, 1),
+ [TPS80031_INT_ID] = TPS80031_IRQ(C, 2),
+ [TPS80031_INT_VBUS] = TPS80031_IRQ(C, 3),
+ [TPS80031_INT_CHRG_CTRL] = TPS80031_IRQ(C, 4),
+ [TPS80031_INT_EXT_CHRG] = TPS80031_IRQ(C, 5),
+ [TPS80031_INT_INT_CHRG] = TPS80031_IRQ(C, 6),
+ [TPS80031_INT_RES2] = TPS80031_IRQ(C, 7),
+};
+
+static struct regmap_irq_chip tps80031_irq_chip = {
+ .name = "tps80031",
+ .irqs = tps80031_main_irqs,
+ .num_irqs = ARRAY_SIZE(tps80031_main_irqs),
+ .num_regs = 3,
+ .status_base = TPS80031_INT_STS_A,
+ .mask_base = TPS80031_INT_MSK_LINE_A,
+};
+
+#define PUPD_DATA(_reg, _pulldown_bit, _pullup_bit) \
+ { \
+ .reg = TPS80031_CFG_INPUT_PUPD##_reg, \
+ .pulldown_bit = _pulldown_bit, \
+ .pullup_bit = _pullup_bit, \
+ }
+
+static const struct tps80031_pupd_data tps80031_pupds[] = {
+ [TPS80031_PREQ1] = PUPD_DATA(1, BIT(0), BIT(1)),
+ [TPS80031_PREQ2A] = PUPD_DATA(1, BIT(2), BIT(3)),
+ [TPS80031_PREQ2B] = PUPD_DATA(1, BIT(4), BIT(5)),
+ [TPS80031_PREQ2C] = PUPD_DATA(1, BIT(6), BIT(7)),
+ [TPS80031_PREQ3] = PUPD_DATA(2, BIT(0), BIT(1)),
+ [TPS80031_NRES_WARM] = PUPD_DATA(2, 0, BIT(2)),
+ [TPS80031_PWM_FORCE] = PUPD_DATA(2, BIT(5), 0),
+ [TPS80031_CHRG_EXT_CHRG_STATZ] = PUPD_DATA(2, 0, BIT(6)),
+ [TPS80031_SIM] = PUPD_DATA(3, BIT(0), BIT(1)),
+ [TPS80031_MMC] = PUPD_DATA(3, BIT(2), BIT(3)),
+ [TPS80031_GPADC_START] = PUPD_DATA(3, BIT(4), 0),
+ [TPS80031_DVSI2C_SCL] = PUPD_DATA(4, 0, BIT(0)),
+ [TPS80031_DVSI2C_SDA] = PUPD_DATA(4, 0, BIT(1)),
+ [TPS80031_CTLI2C_SCL] = PUPD_DATA(4, 0, BIT(2)),
+ [TPS80031_CTLI2C_SDA] = PUPD_DATA(4, 0, BIT(3)),
+};
+static struct tps80031 *tps80031_power_off_dev;
+
+int tps80031_ext_power_req_config(struct device *dev,
+ unsigned long ext_ctrl_flag, int preq_bit,
+ int state_reg_add, int trans_reg_add)
+{
+ u8 res_ass_reg = 0;
+ int preq_mask_bit = 0;
+ int ret;
+
+ if (!(ext_ctrl_flag & TPS80031_EXT_PWR_REQ))
+ return 0;
+
+ if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ1) {
+ res_ass_reg = TPS80031_PREQ1_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 5;
+ } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ2) {
+ res_ass_reg = TPS80031_PREQ2_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 6;
+ } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ3) {
+ res_ass_reg = TPS80031_PREQ3_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 7;
+ }
+
+ /* Configure REQ_ASS registers */
+ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1, res_ass_reg,
+ BIT(preq_bit & 0x7));
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x setbit failed, err = %d\n",
+ res_ass_reg, ret);
+ return ret;
+ }
+
+ /* Unmask the PREQ */
+ ret = tps80031_clr_bits(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, BIT(preq_mask_bit));
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x clrbit failed, err = %d\n",
+ TPS80031_PHOENIX_MSK_TRANSITION, ret);
+ return ret;
+ }
+
+ /* Switch regulator control to resource now */
+ if (ext_ctrl_flag & (TPS80031_PWR_REQ_INPUT_PREQ2 |
+ TPS80031_PWR_REQ_INPUT_PREQ3)) {
+ ret = tps80031_update(dev, TPS80031_SLAVE_ID1, state_reg_add,
+ 0x0, TPS80031_STATE_MASK);
+ if (ret < 0)
+ dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+ state_reg_add, ret);
+ } else {
+ ret = tps80031_update(dev, TPS80031_SLAVE_ID1, trans_reg_add,
+ TPS80031_TRANS_SLEEP_OFF,
+ TPS80031_TRANS_SLEEP_MASK);
+ if (ret < 0)
+ dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+ trans_reg_add, ret);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_ext_power_req_config);
+
+static void tps80031_power_off(void)
+{
+ dev_info(tps80031_power_off_dev->dev, "switching off PMU\n");
+ tps80031_write(tps80031_power_off_dev->dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_DEV_ON, TPS80031_DEVOFF);
+}
+
+static void tps80031_pupd_init(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ struct tps80031_pupd_init_data *pupd_init_data = pdata->pupd_init_data;
+ int data_size = pdata->pupd_init_data_size;
+ int i;
+
+ for (i = 0; i < data_size; ++i) {
+ struct tps80031_pupd_init_data *pupd_init = &pupd_init_data[i];
+ const struct tps80031_pupd_data *pupd =
+ &tps80031_pupds[pupd_init->input_pin];
+ u8 update_value = 0;
+ u8 update_mask = pupd->pulldown_bit | pupd->pullup_bit;
+
+ if (pupd_init->setting == TPS80031_PUPD_PULLDOWN)
+ update_value = pupd->pulldown_bit;
+ else if (pupd_init->setting == TPS80031_PUPD_PULLUP)
+ update_value = pupd->pullup_bit;
+
+ tps80031_update(tps80031->dev, TPS80031_SLAVE_ID1, pupd->reg,
+ update_value, update_mask);
+ }
+}
+
+static int tps80031_init_ext_control(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ struct device *dev = tps80031->dev;
+ int ret;
+ int i;
+
+ /* Clear all external control for this rail */
+ for (i = 0; i < 9; ++i) {
+ ret = tps80031_write(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PREQ1_RES_ASS_A + i, 0);
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x write failed, err = %d\n",
+ TPS80031_PREQ1_RES_ASS_A + i, ret);
+ return ret;
+ }
+ }
+
+ /* Mask the PREQ */
+ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, 0x7 << 5);
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x set_bits failed, err = %d\n",
+ TPS80031_PHOENIX_MSK_TRANSITION, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int __devinit tps80031_irq_init(struct tps80031 *tps80031, int irq,
+ int irq_base)
+{
+ struct device *dev = tps80031->dev;
+ int i, ret;
+
+ /*
+ * The MASK register used for updating status register when
+ * interrupt occurs and LINE register used to pass the status
+ * to actual interrupt line. As per datasheet:
+ * When INT_MSK_LINE [i] is set to 1, the associated interrupt
+ * number i is INT line masked, which means that no interrupt is
+ * generated on the INT line.
+ * When INT_MSK_LINE [i] is set to 0, the associated interrupt
+ * number i is line enabled: An interrupt is generated on the
+ * INT line.
+ * In any case, the INT_STS [i] status bit may or may not be updated,
+ * only linked to the INT_MSK_STS [i] configuration register bit.
+ *
+ * When INT_MSK_STS [i] is set to 1, the associated interrupt number
+ * i is status masked, which means that no interrupt is stored in
+ * the INT_STS[i] status bit. Note that no interrupt number i is
+ * generated on the INT line, even if the INT_MSK_LINE [i] register
+ * bit is set to 0.
+ * When INT_MSK_STS [i] is set to 0, the associated interrupt number i
+ * is status enabled: An interrupt status is updated in the INT_STS [i]
+ * register. The interrupt may or may not be generated on the INT line,
+ * depending on the INT_MSK_LINE [i] configuration register bit.
+ */
+ for (i = 0; i < 3; i++)
+ tps80031_write(dev, TPS80031_SLAVE_ID2,
+ TPS80031_INT_MSK_STS_A + i, 0x00);
+
+ ret = regmap_add_irq_chip(tps80031->regmap[TPS80031_SLAVE_ID2], irq,
+ IRQF_ONESHOT, irq_base,
+ &tps80031_irq_chip, &tps80031->irq_data);
+ if (ret < 0) {
+ dev_err(dev, "add irq failed, err = %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static bool rd_wr_reg_id0(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SMPS1_CFG_FORCE ... TPS80031_SMPS2_CFG_VOLTAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id1(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SECONDS_REG ... TPS80031_RTC_RESET_STATUS_REG:
+ case TPS80031_VALIDITY0 ... TPS80031_VALIDITY7:
+ case TPS80031_PHOENIX_START_CONDITION ... TPS80031_KEY_PRESS_DUR_CFG:
+ case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+ case TPS80031_BROADCAST_ADDR_ALL ... TPS80031_BROADCAST_ADDR_CLK_RST:
+ case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+ case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+ case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+ case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+ case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+ case TPS80031_BACKUP_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_volatile_reg_id1(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+ case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+ case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+ case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+ case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+ case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id2(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_USB_VENDOR_ID_LSB ... TPS80031_USB_OTG_REVISION:
+ case TPS80031_GPADC_CTRL ... TPS80031_CTRL_P1:
+ case TPS80031_RTCH0_LSB ... TPS80031_GPCH0_MSB:
+ case TPS80031_TOGGLE1 ... TPS80031_VIBMODE:
+ case TPS80031_PWM1ON ... TPS80031_PWM2OFF:
+ case TPS80031_FG_REG_00 ... TPS80031_FG_REG_11:
+ case TPS80031_INT_STS_A ... TPS80031_INT_MSK_STS_C:
+ case TPS80031_CONTROLLER_CTRL2 ... TPS80031_LED_PWM_CTRL2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id3(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_GPADC_TRIM0 ... TPS80031_GPADC_TRIM18:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tps80031_regmap_configs[] = {
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id0,
+ .readable_reg = rd_wr_reg_id0,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id1,
+ .readable_reg = rd_wr_reg_id1,
+ .volatile_reg = is_volatile_reg_id1,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id2,
+ .readable_reg = rd_wr_reg_id2,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id3,
+ .readable_reg = rd_wr_reg_id3,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+};
+
+static int __devinit tps80031_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps80031_platform_data *pdata = client->dev.platform_data;
+ struct tps80031 *tps80031;
+ int ret;
+ uint8_t es_version;
+ uint8_t ep_ver;
+ int i;
+
+ if (!pdata) {
+ dev_err(&client->dev, "tps80031 requires platform data\n");
+ return -EINVAL;
+ }
+
+ tps80031 = devm_kzalloc(&client->dev, sizeof(*tps80031), GFP_KERNEL);
+ if (!tps80031) {
+ dev_err(&client->dev, "Malloc failed for tps80031\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031_slave_address[i] == client->addr)
+ tps80031->clients[i] = client;
+ else
+ tps80031->clients[i] = i2c_new_dummy(client->adapter,
+ tps80031_slave_address[i]);
+ if (!tps80031->clients[i]) {
+ dev_err(&client->dev, "can't attach client %d\n", i);
+ ret = -ENOMEM;
+ goto fail_client_reg;
+ }
+
+ i2c_set_clientdata(tps80031->clients[i], tps80031);
+ tps80031->regmap[i] = devm_regmap_init_i2c(tps80031->clients[i],
+ &tps80031_regmap_configs[i]);
+ if (IS_ERR(tps80031->regmap[i])) {
+ ret = PTR_ERR(tps80031->regmap[i]);
+ dev_err(&client->dev,
+ "regmap %d init failed, err %d\n", i, ret);
+ goto fail_client_reg;
+ }
+ }
+
+ ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+ TPS80031_JTAGVERNUM, &es_version);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Silicon version number read failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+ TPS80031_EPROM_REV, &ep_ver);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Silicon eeprom version read failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ dev_info(&client->dev, "ES version 0x%02x and EPROM version 0x%02x\n",
+ es_version, ep_ver);
+ tps80031->es_version = es_version;
+ tps80031->dev = &client->dev;
+ i2c_set_clientdata(client, tps80031);
+ tps80031->chip_info = id->driver_data;
+
+ ret = tps80031_irq_init(tps80031, client->irq, pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ tps80031_pupd_init(tps80031, pdata);
+
+ tps80031_init_ext_control(tps80031, pdata);
+
+ ret = mfd_add_devices(tps80031->dev, -1,
+ tps80031_cell, ARRAY_SIZE(tps80031_cell),
+ NULL, 0,
+ regmap_irq_get_domain(tps80031->irq_data));
+ if (ret < 0) {
+ dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
+ goto fail_mfd_add;
+ }
+
+ if (pdata->use_power_off && !pm_power_off) {
+ tps80031_power_off_dev = tps80031;
+ pm_power_off = tps80031_power_off;
+ }
+ return 0;
+
+fail_mfd_add:
+ regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+fail_client_reg:
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031->clients[i] && (tps80031->clients[i] != client))
+ i2c_unregister_device(tps80031->clients[i]);
+ }
+ return ret;
+}
+
+static int __devexit tps80031_remove(struct i2c_client *client)
+{
+ struct tps80031 *tps80031 = i2c_get_clientdata(client);
+ int i;
+
+ if (tps80031_power_off_dev == tps80031) {
+ tps80031_power_off_dev = NULL;
+ pm_power_off = NULL;
+ }
+
+ mfd_remove_devices(tps80031->dev);
+
+ regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031->clients[i] != client)
+ i2c_unregister_device(tps80031->clients[i]);
+ }
+ return 0;
+}
+
+static const struct i2c_device_id tps80031_id_table[] = {
+ { "tps80031", TPS80031 },
+ { "tps80032", TPS80032 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps80031_id_table);
+
+static struct i2c_driver tps80031_driver = {
+ .driver = {
+ .name = "tps80031",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_probe,
+ .remove = __devexit_p(tps80031_remove),
+ .id_table = tps80031_id_table,
+};
+
+static int __init tps80031_init(void)
+{
+ return i2c_add_driver(&tps80031_driver);
+}
+subsys_initcall(tps80031_init);
+
+static void __exit tps80031_exit(void)
+{
+ i2c_del_driver(&tps80031_driver);
+}
+module_exit(tps80031_exit);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("TPS80031 core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 11b76c0109f..4f3baadd003 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/device.h>
@@ -65,9 +66,6 @@
/* Triton Core internal information (BEGIN) */
-/* Last - for index max*/
-#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
-
#define TWL_NUM_SLAVES 4
#define SUB_CHIP_ID0 0
@@ -171,13 +169,7 @@ EXPORT_SYMBOL(twl_rev);
/* Structure for each TWL4030/TWL6030 Slave */
struct twl_client {
struct i2c_client *client;
- u8 address;
-
- /* max numb of i2c_msg required is for read =2 */
- struct i2c_msg xfer_msg[2];
-
- /* To lock access to xfer_msg */
- struct mutex xfer_lock;
+ struct regmap *regmap;
};
static struct twl_client twl_modules[TWL_NUM_SLAVES];
@@ -189,7 +181,7 @@ struct twl_mapping {
};
static struct twl_mapping *twl_map;
-static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+static struct twl_mapping twl4030_map[] = {
/*
* NOTE: don't change this table without updating the
* <linux/i2c/twl.h> defines for TWL4030_MODULE_*
@@ -197,34 +189,62 @@ static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
*/
{ 0, TWL4030_BASEADD_USB },
-
{ 1, TWL4030_BASEADD_AUDIO_VOICE },
{ 1, TWL4030_BASEADD_GPIO },
{ 1, TWL4030_BASEADD_INTBR },
{ 1, TWL4030_BASEADD_PIH },
- { 1, TWL4030_BASEADD_TEST },
+ { 1, TWL4030_BASEADD_TEST },
{ 2, TWL4030_BASEADD_KEYPAD },
{ 2, TWL4030_BASEADD_MADC },
{ 2, TWL4030_BASEADD_INTERRUPTS },
{ 2, TWL4030_BASEADD_LED },
+
{ 2, TWL4030_BASEADD_MAIN_CHARGE },
{ 2, TWL4030_BASEADD_PRECHARGE },
{ 2, TWL4030_BASEADD_PWM0 },
{ 2, TWL4030_BASEADD_PWM1 },
{ 2, TWL4030_BASEADD_PWMA },
+
{ 2, TWL4030_BASEADD_PWMB },
{ 2, TWL5031_BASEADD_ACCESSORY },
{ 2, TWL5031_BASEADD_INTERRUPTS },
-
{ 3, TWL4030_BASEADD_BACKUP },
{ 3, TWL4030_BASEADD_INT },
+
{ 3, TWL4030_BASEADD_PM_MASTER },
{ 3, TWL4030_BASEADD_PM_RECEIVER },
{ 3, TWL4030_BASEADD_RTC },
{ 3, TWL4030_BASEADD_SECURED_REG },
};
+static struct regmap_config twl4030_regmap_config[4] = {
+ {
+ /* Address 0x48 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x49 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4a */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4b */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+};
+
static struct twl_mapping twl6030_map[] = {
/*
* NOTE: don't change this table without updating the
@@ -254,14 +274,35 @@ static struct twl_mapping twl6030_map[] = {
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
{ SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
{ SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
-
{ SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
{ SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
{ SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
};
+static struct regmap_config twl6030_regmap_config[3] = {
+ {
+ /* Address 0x48 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x49 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4a */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+};
+
/*----------------------------------------------------------------------*/
/* Exported Functions */
@@ -283,9 +324,8 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
int ret;
int sid;
struct twl_client *twl;
- struct i2c_msg *msg;
- if (unlikely(mod_no > TWL_MODULE_LAST)) {
+ if (unlikely(mod_no >= TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
@@ -301,32 +341,14 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
twl = &twl_modules[sid];
- mutex_lock(&twl->xfer_lock);
- /*
- * [MSG1]: fill the register address data
- * fill the data Tx buffer
- */
- msg = &twl->xfer_msg[0];
- msg->addr = twl->address;
- msg->len = num_bytes + 1;
- msg->flags = 0;
- msg->buf = value;
- /* over write the first byte of buffer with the register address */
- *value = twl_map[mod_no].base + reg;
- ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
- mutex_unlock(&twl->xfer_lock);
-
- /* i2c_transfer returns number of messages transferred */
- if (ret != 1) {
- pr_err("%s: i2c_write failed to transfer all messages\n",
- DRIVER_NAME);
- if (ret < 0)
- return ret;
- else
- return -EIO;
- } else {
- return 0;
- }
+ ret = regmap_bulk_write(twl->regmap, twl_map[mod_no].base + reg,
+ value, num_bytes);
+
+ if (ret)
+ pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n",
+ DRIVER_NAME, mod_no, reg, num_bytes);
+
+ return ret;
}
EXPORT_SYMBOL(twl_i2c_write);
@@ -342,12 +364,10 @@ EXPORT_SYMBOL(twl_i2c_write);
int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
int ret;
- u8 val;
int sid;
struct twl_client *twl;
- struct i2c_msg *msg;
- if (unlikely(mod_no > TWL_MODULE_LAST)) {
+ if (unlikely(mod_no >= TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
@@ -363,34 +383,14 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
twl = &twl_modules[sid];
- mutex_lock(&twl->xfer_lock);
- /* [MSG1] fill the register address data */
- msg = &twl->xfer_msg[0];
- msg->addr = twl->address;
- msg->len = 1;
- msg->flags = 0; /* Read the register value */
- val = twl_map[mod_no].base + reg;
- msg->buf = &val;
- /* [MSG2] fill the data rx buffer */
- msg = &twl->xfer_msg[1];
- msg->addr = twl->address;
- msg->flags = I2C_M_RD; /* Read the register value */
- msg->len = num_bytes; /* only n bytes */
- msg->buf = value;
- ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
- mutex_unlock(&twl->xfer_lock);
-
- /* i2c_transfer returns number of messages transferred */
- if (ret != 2) {
- pr_err("%s: i2c_read failed to transfer all messages\n",
- DRIVER_NAME);
- if (ret < 0)
- return ret;
- else
- return -EIO;
- } else {
- return 0;
- }
+ ret = regmap_bulk_read(twl->regmap, twl_map[mod_no].base + reg,
+ value, num_bytes);
+
+ if (ret)
+ pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n",
+ DRIVER_NAME, mod_no, reg, num_bytes);
+
+ return ret;
}
EXPORT_SYMBOL(twl_i2c_read);
@@ -404,12 +404,7 @@ EXPORT_SYMBOL(twl_i2c_read);
*/
int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
{
-
- /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
- u8 temp_buffer[2] = { 0 };
- /* offset 1 contains the data */
- temp_buffer[1] = value;
- return twl_i2c_write(mod_no, temp_buffer, reg, 1);
+ return twl_i2c_write(mod_no, &value, reg, 1);
}
EXPORT_SYMBOL(twl_i2c_write_u8);
@@ -646,8 +641,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
return PTR_ERR(child);
}
- if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc) {
- child = add_child(2, "twl4030_madc",
+ if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
+ twl_class_is_4030()) {
+ child = add_child(SUB_CHIP_ID2, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
true, irq_base + MADC_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -663,15 +659,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* HW security concerns, and "least privilege".
*/
sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
- child = add_child(sub_chip_id, "twl_rtc",
- NULL, 0,
+ child = add_child(sub_chip_id, "twl_rtc", NULL, 0,
true, irq_base + RTC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (IS_ENABLED(CONFIG_PWM_TWL6030) && twl_class_is_6030()) {
- child = add_child(SUB_CHIP_ID1, "twl6030-pwm", NULL, 0,
+ if (IS_ENABLED(CONFIG_PWM_TWL)) {
+ child = add_child(SUB_CHIP_ID1, "twl-pwm", NULL, 0,
+ false, 0, 0);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
+ if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
+ child = add_child(SUB_CHIP_ID1, "twl-pwmled", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -723,9 +725,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
- child = add_child(0, "twl4030_usb",
- pdata->usb, sizeof(*pdata->usb),
- true,
+ child = add_child(SUB_CHIP_ID0, "twl4030_usb",
+ pdata->usb, sizeof(*pdata->usb), true,
/* irq0 = USB_PRES, irq1 = USB */
irq_base + USB_PRES_INTR_OFFSET,
irq_base + USB_INTR_OFFSET);
@@ -773,9 +774,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
pdata->usb->features = features;
- child = add_child(0, "twl6030_usb",
- pdata->usb, sizeof(*pdata->usb),
- true,
+ child = add_child(SUB_CHIP_ID0, "twl6030_usb",
+ pdata->usb, sizeof(*pdata->usb), true,
/* irq1 = VBUS_PRES, irq0 = USB ID */
irq_base + USBOTG_INTR_OFFSET,
irq_base + USB_PRES_INTR_OFFSET);
@@ -799,22 +799,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
+ child = add_child(SUB_CHIP_ID3, "twl4030_wdt", NULL, 0,
+ false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(1, "twl4030_pwrbutton",
- NULL, 0, true, irq_base + 8 + 0, 0);
+ child = add_child(SUB_CHIP_ID3, "twl4030_pwrbutton", NULL, 0,
+ true, irq_base + 8 + 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
twl_class_is_4030()) {
- sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl4030-audio",
+ child = add_child(SUB_CHIP_ID1, "twl4030-audio",
pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
@@ -1054,7 +1054,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
!(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(3, "twl4030_bci",
+ child = add_child(SUB_CHIP_ID3, "twl4030_bci",
pdata->bci, sizeof(*pdata->bci), false,
/* irq0 = CHG_PRES, irq1 = BCI */
irq_base + BCI_PRES_INTR_OFFSET,
@@ -1077,8 +1077,8 @@ static inline int __init protect_pm_master(void)
{
int e = 0;
- e = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
return e;
}
@@ -1086,12 +1086,10 @@ static inline int __init unprotect_pm_master(void)
{
int e = 0;
- e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
- e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
return e;
}
@@ -1176,6 +1174,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct twl4030_platform_data *pdata = client->dev.platform_data;
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
+ struct regmap_config *twl_regmap_config;
int irq_base = 0;
int status;
unsigned i, num_slaves;
@@ -1229,22 +1228,23 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
if ((id->driver_data) & TWL6030_CLASS) {
twl_id = TWL6030_CLASS_ID;
twl_map = &twl6030_map[0];
+ twl_regmap_config = twl6030_regmap_config;
num_slaves = TWL_NUM_SLAVES - 1;
} else {
twl_id = TWL4030_CLASS_ID;
twl_map = &twl4030_map[0];
+ twl_regmap_config = twl4030_regmap_config;
num_slaves = TWL_NUM_SLAVES;
}
for (i = 0; i < num_slaves; i++) {
struct twl_client *twl = &twl_modules[i];
- twl->address = client->addr + i;
if (i == 0) {
twl->client = client;
} else {
twl->client = i2c_new_dummy(client->adapter,
- twl->address);
+ client->addr + i);
if (!twl->client) {
dev_err(&client->dev,
"can't attach client %d\n", i);
@@ -1252,7 +1252,16 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto fail;
}
}
- mutex_init(&twl->xfer_lock);
+
+ twl->regmap = devm_regmap_init_i2c(twl->client,
+ &twl_regmap_config[i]);
+ if (IS_ERR(twl->regmap)) {
+ status = PTR_ERR(twl->regmap);
+ dev_err(&client->dev,
+ "Failed to allocate regmap %d, err: %d\n", i,
+ status);
+ goto fail;
+ }
}
inuse = true;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index cdd1173ed4e..a5f9888aa19 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -295,8 +295,8 @@ static irqreturn_t handle_twl4030_pih(int irq, void *devid)
irqreturn_t ret;
u8 pih_isr;
- ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
- REG_PIH_ISR_P1);
+ ret = twl_i2c_read_u8(TWL_MODULE_PIH, &pih_isr,
+ REG_PIH_ISR_P1);
if (ret) {
pr_warning("twl4030: I2C error %d reading PIH ISR\n", ret);
return IRQ_NONE;
@@ -501,7 +501,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
} imr;
/* byte[0] gets overwritten as we write ... */
- imr.word = cpu_to_le32(agent->imr << 8);
+ imr.word = cpu_to_le32(agent->imr);
agent->imr_change_pending = false;
/* write the whole mask ... simpler than subsetting it */
@@ -526,7 +526,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
* any processor on the other IRQ line, EDR registers are
* shared.
*/
- status = twl_i2c_read(sih->module, bytes + 1,
+ status = twl_i2c_read(sih->module, bytes,
sih->edr_offset, sih->bytes_edr);
if (status) {
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -538,7 +538,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
while (edge_change) {
int i = fls(edge_change) - 1;
struct irq_data *idata;
- int byte = 1 + (i >> 2);
+ int byte = i >> 2;
int off = (i & 0x3) * 2;
unsigned int type;
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index a39dcf3e213..88ff9dc8330 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -173,7 +173,7 @@ static int twl4030battery_temperature(int raw_volt)
volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
/* Getting and calculating the supply current in micro ampers */
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
REG_BCICTL2);
if (ret < 0)
return ret;
@@ -196,7 +196,7 @@ static int twl4030battery_current(int raw_volt)
int ret;
u8 val;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
TWL4030_BCI_BCICTL1);
if (ret)
return ret;
@@ -635,7 +635,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
int ret;
u8 regval;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
@@ -646,7 +646,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
else
regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
- ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
@@ -668,7 +668,7 @@ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
u8 regval;
int ret;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_MADC_CTRL1);
if (ret) {
dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
@@ -725,7 +725,7 @@ static int twl4030_madc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_current_generator;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
@@ -733,7 +733,7 @@ static int twl4030_madc_probe(struct platform_device *pdev)
goto err_i2c;
}
regval |= TWL4030_BCI_MESBAT;
- ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index a5332063183..4dae241e501 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -128,12 +128,10 @@ static int twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_MEMORY_ADDRESS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_MEMORY_ADDRESS);
if (err)
goto out;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
- R_MEMORY_DATA);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, byte, R_MEMORY_DATA);
out:
return err;
}
@@ -189,19 +187,16 @@ static int twl4030_config_wakeup3_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P3 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_S2A3);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A3);
if (err)
goto out;
/* P3 LVL_WAKEUP should be on LEVEL */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P3_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 wakeup sequence for P3 config error\n");
@@ -214,43 +209,38 @@ static int twl4030_config_wakeup12_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_S2A12);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A12);
if (err)
goto out;
/* P1/P2 LVL_WAKEUP should be on LEVEL */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P1_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P2_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P2_SW_EVENTS);
if (err)
goto out;
if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) {
/* Disabling AC charger effect on sleep-active transitions */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_CFG_P1_TRANSITION);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data,
+ R_CFG_P1_TRANSITION);
if (err)
goto out;
data &= ~(1<<1);
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
- R_CFG_P1_TRANSITION);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data,
+ R_CFG_P1_TRANSITION);
if (err)
goto out;
}
@@ -267,8 +257,7 @@ static int twl4030_config_sleep_sequence(u8 address)
int err;
/* Set ACTIVE to SLEEP SEQ address in T2 memory*/
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_A2S);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_A2S);
if (err)
pr_err("TWL4030 sleep sequence config error\n");
@@ -282,42 +271,35 @@ static int twl4030_config_warmreset_sequence(u8 address)
u8 rd_data;
/* Set WARM RESET SEQ address for P1 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_WARM);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_WARM);
if (err)
goto out;
/* P1/P2/P3 enable WARMRESET */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P1_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P2_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P2_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P3_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 warmreset seq config error\n");
@@ -341,7 +323,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
rconfig_addr = res_config_addrs[rconfig->resource];
/* Set resource group */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &grp,
rconfig_addr + DEV_GRP_OFFSET);
if (err) {
pr_err("TWL4030 Resource %d group could not be read\n",
@@ -352,7 +334,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) {
grp &= ~DEV_GRP_MASK;
grp |= rconfig->devgroup << DEV_GRP_SHIFT;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
grp, rconfig_addr + DEV_GRP_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program devgroup\n");
@@ -361,7 +343,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
}
/* Set resource types */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &type,
rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d type could not be read\n",
@@ -379,7 +361,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
type |= rconfig->type2 << TYPE2_SHIFT;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
type, rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program resource type\n");
@@ -387,7 +369,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
}
/* Set remap states */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d remap could not be read\n",
@@ -405,7 +387,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
remap |= rconfig->remap_sleep << SLEEP_STATE_SHIFT;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
@@ -463,49 +445,47 @@ int twl4030_remove_script(u8 flags)
{
int err = 0;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
if (flags & TWL4030_WRST_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_WARM);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_WARM);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP12_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_S2A12);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_S2A12);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP3_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_S2A3);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_S2A3);
if (err)
return err;
}
if (flags & TWL4030_SLEEP_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_A2S);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_A2S);
if (err)
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
@@ -521,7 +501,7 @@ void twl4030_power_off(void)
{
int err;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
TWL4030_PM_MASTER_P1_SW_EVENTS);
if (err)
pr_err("TWL4030 Unable to power off\n");
@@ -534,15 +514,13 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
struct twl4030_resconfig *resconfig;
u8 val, address = twl4030_start_script_address;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
@@ -567,14 +545,14 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
/* Board has to be wired properly to use this feature */
if (twl4030_scripts->use_poweroff && !pm_power_off) {
/* Default for SEQ_OFFSYNC is set, lets ensure this */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_warning("TWL4030 Unable to read registers\n");
} else if (!(val & SEQ_OFFSYNC)) {
val |= SEQ_OFFSYNC;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
@@ -586,8 +564,8 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
relock:
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index b76902f1e44..277a8dba42d 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -355,7 +355,7 @@ int twl6030_init_irq(struct device *dev, int irq_num)
static struct irq_chip twl6030_irq_chip;
int status = 0;
int i;
- u8 mask[4];
+ u8 mask[3];
nr_irqs = TWL6030_NR_IRQS;
@@ -370,9 +370,9 @@ int twl6030_init_irq(struct device *dev, int irq_num)
irq_end = irq_base + nr_irqs;
+ mask[0] = 0xFF;
mask[1] = 0xFF;
mask[2] = 0xFF;
- mask[3] = 0xFF;
/* mask all int lines */
twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
deleted file mode 100644
index 4b42543da22..00000000000
--- a/drivers/mfd/twl6040-irq.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Interrupt controller support for TWL6040
- *
- * Author: Misael Lopez Cruz <misael.lopez@ti.com>
- *
- * Copyright: (C) 2011 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/twl6040.h>
-
-struct twl6040_irq_data {
- int mask;
- int status;
-};
-
-static struct twl6040_irq_data twl6040_irqs[] = {
- {
- .mask = TWL6040_THMSK,
- .status = TWL6040_THINT,
- },
- {
- .mask = TWL6040_PLUGMSK,
- .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
- },
- {
- .mask = TWL6040_HOOKMSK,
- .status = TWL6040_HOOKINT,
- },
- {
- .mask = TWL6040_HFMSK,
- .status = TWL6040_HFINT,
- },
- {
- .mask = TWL6040_VIBMSK,
- .status = TWL6040_VIBINT,
- },
- {
- .mask = TWL6040_READYMSK,
- .status = TWL6040_READYINT,
- },
-};
-
-static inline
-struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
- int irq)
-{
- return &twl6040_irqs[irq - twl6040->irq_base];
-}
-
-static void twl6040_irq_lock(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_sync_unlock(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
- /* write back to hardware any change in irq mask */
- if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
- twl6040->irq_masks_cache = twl6040->irq_masks_cur;
- twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
- twl6040->irq_masks_cur);
- }
-
- mutex_unlock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_enable(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
- struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
- data->irq);
-
- twl6040->irq_masks_cur &= ~irq_data->mask;
-}
-
-static void twl6040_irq_disable(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
- struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
- data->irq);
-
- twl6040->irq_masks_cur |= irq_data->mask;
-}
-
-static struct irq_chip twl6040_irq_chip = {
- .name = "twl6040",
- .irq_bus_lock = twl6040_irq_lock,
- .irq_bus_sync_unlock = twl6040_irq_sync_unlock,
- .irq_enable = twl6040_irq_enable,
- .irq_disable = twl6040_irq_disable,
-};
-
-static irqreturn_t twl6040_irq_thread(int irq, void *data)
-{
- struct twl6040 *twl6040 = data;
- u8 intid;
- int i;
-
- intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
- /* apply masking and report (backwards to handle READYINT first) */
- for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
- if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
- intid &= ~twl6040_irqs[i].status;
- if (intid & twl6040_irqs[i].status)
- handle_nested_irq(twl6040->irq_base + i);
- }
-
- /* ack unmasked irqs */
- twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
-
- return IRQ_HANDLED;
-}
-
-int twl6040_irq_init(struct twl6040 *twl6040)
-{
- struct device_node *node = twl6040->dev->of_node;
- int i, nr_irqs, irq_base, ret;
- u8 val;
-
- mutex_init(&twl6040->irq_mutex);
-
- /* mask the individual interrupt sources */
- twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
- twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
- twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
-
- nr_irqs = ARRAY_SIZE(twl6040_irqs);
-
- irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(irq_base)) {
- dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
- return irq_base;
- }
- twl6040->irq_base = irq_base;
-
- irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
- &irq_domain_simple_ops, NULL);
-
- /* Register them with genirq */
- for (i = irq_base; i < irq_base + nr_irqs; i++) {
- irq_set_chip_data(i, twl6040);
- irq_set_chip_and_handler(i, &twl6040_irq_chip,
- handle_level_irq);
- irq_set_nested_thread(i, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(i, IRQF_VALID);
-#else
- irq_set_noprobe(i);
-#endif
- }
-
- ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
- IRQF_ONESHOT, "twl6040", twl6040);
- if (ret) {
- dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
- twl6040->irq, ret);
- return ret;
- }
-
- /* reset interrupts */
- val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
- /* interrupts cleared on write */
- twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_INTCLRMODE);
-
- return 0;
-}
-EXPORT_SYMBOL(twl6040_irq_init);
-
-void twl6040_irq_exit(struct twl6040 *twl6040)
-{
- free_irq(twl6040->irq, twl6040);
-}
-EXPORT_SYMBOL(twl6040_irq_exit);
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040.c
index 3f2a1cf02fc..583be76e36a 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040.c
@@ -37,7 +37,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
#include <linux/regulator/consumer.h>
@@ -104,7 +103,7 @@ int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
EXPORT_SYMBOL(twl6040_clear_bits);
/* twl6040 codec manual power-up sequence */
-static int twl6040_power_up(struct twl6040 *twl6040)
+static int twl6040_power_up_manual(struct twl6040 *twl6040)
{
u8 ldoctl, ncpctl, lppllctl;
int ret;
@@ -158,11 +157,12 @@ ncp_err:
ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ dev_err(twl6040->dev, "manual power-up failed\n");
return ret;
}
/* twl6040 manual power-down sequence */
-static void twl6040_power_down(struct twl6040 *twl6040)
+static void twl6040_power_down_manual(struct twl6040 *twl6040)
{
u8 ncpctl, ldoctl, lppllctl;
@@ -192,45 +192,48 @@ static void twl6040_power_down(struct twl6040 *twl6040)
twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
}
-static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+static irqreturn_t twl6040_readyint_handler(int irq, void *data)
{
struct twl6040 *twl6040 = data;
- u8 intid, status;
- intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+ complete(&twl6040->ready);
- if (intid & TWL6040_READYINT)
- complete(&twl6040->ready);
+ return IRQ_HANDLED;
+}
- if (intid & TWL6040_THINT) {
- status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
- if (status & TWL6040_TSHUTDET) {
- dev_warn(twl6040->dev,
- "Thermal shutdown, powering-off");
- twl6040_power(twl6040, 0);
- } else {
- dev_warn(twl6040->dev,
- "Leaving thermal shutdown, powering-on");
- twl6040_power(twl6040, 1);
- }
+static irqreturn_t twl6040_thint_handler(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 status;
+
+ status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+ if (status & TWL6040_TSHUTDET) {
+ dev_warn(twl6040->dev, "Thermal shutdown, powering-off");
+ twl6040_power(twl6040, 0);
+ } else {
+ dev_warn(twl6040->dev, "Leaving thermal shutdown, powering-on");
+ twl6040_power(twl6040, 1);
}
return IRQ_HANDLED;
}
-static int twl6040_power_up_completion(struct twl6040 *twl6040,
- int naudint)
+static int twl6040_power_up_automatic(struct twl6040 *twl6040)
{
int time_left;
- u8 intid;
+
+ gpio_set_value(twl6040->audpwron, 1);
time_left = wait_for_completion_timeout(&twl6040->ready,
msecs_to_jiffies(144));
if (!time_left) {
+ u8 intid;
+
+ dev_warn(twl6040->dev, "timeout waiting for READYINT\n");
intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
if (!(intid & TWL6040_READYINT)) {
- dev_err(twl6040->dev,
- "timeout waiting for READYINT\n");
+ dev_err(twl6040->dev, "automatic power-up failed\n");
+ gpio_set_value(twl6040->audpwron, 0);
return -ETIMEDOUT;
}
}
@@ -240,8 +243,6 @@ static int twl6040_power_up_completion(struct twl6040 *twl6040,
int twl6040_power(struct twl6040 *twl6040, int on)
{
- int audpwron = twl6040->audpwron;
- int naudint = twl6040->irq;
int ret = 0;
mutex_lock(&twl6040->mutex);
@@ -251,23 +252,17 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (twl6040->power_count++)
goto out;
- if (gpio_is_valid(audpwron)) {
- /* use AUDPWRON line */
- gpio_set_value(audpwron, 1);
- /* wait for power-up completion */
- ret = twl6040_power_up_completion(twl6040, naudint);
+ if (gpio_is_valid(twl6040->audpwron)) {
+ /* use automatic power-up sequence */
+ ret = twl6040_power_up_automatic(twl6040);
if (ret) {
- dev_err(twl6040->dev,
- "automatic power-down failed\n");
twl6040->power_count = 0;
goto out;
}
} else {
/* use manual power-up sequence */
- ret = twl6040_power_up(twl6040);
+ ret = twl6040_power_up_manual(twl6040);
if (ret) {
- dev_err(twl6040->dev,
- "manual power-up failed\n");
twl6040->power_count = 0;
goto out;
}
@@ -288,15 +283,15 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (--twl6040->power_count)
goto out;
- if (gpio_is_valid(audpwron)) {
+ if (gpio_is_valid(twl6040->audpwron)) {
/* use AUDPWRON line */
- gpio_set_value(audpwron, 0);
+ gpio_set_value(twl6040->audpwron, 0);
/* power-down sequence latency */
usleep_range(500, 700);
} else {
/* use manual power-down sequence */
- twl6040_power_down(twl6040);
+ twl6040_power_down_manual(twl6040);
}
twl6040->sysclk = 0;
twl6040->mclk = 0;
@@ -503,6 +498,25 @@ static struct regmap_config twl6040_regmap_config = {
.readable_reg = twl6040_readable_reg,
};
+static const struct regmap_irq twl6040_irqs[] = {
+ { .reg_offset = 0, .mask = TWL6040_THINT, },
+ { .reg_offset = 0, .mask = TWL6040_PLUGINT | TWL6040_UNPLUGINT, },
+ { .reg_offset = 0, .mask = TWL6040_HOOKINT, },
+ { .reg_offset = 0, .mask = TWL6040_HFINT, },
+ { .reg_offset = 0, .mask = TWL6040_VIBINT, },
+ { .reg_offset = 0, .mask = TWL6040_READYINT, },
+};
+
+static struct regmap_irq_chip twl6040_irq_chip = {
+ .name = "twl6040",
+ .irqs = twl6040_irqs,
+ .num_irqs = ARRAY_SIZE(twl6040_irqs),
+
+ .num_regs = 1,
+ .status_base = TWL6040_REG_INTID,
+ .mask_base = TWL6040_REG_INTMR,
+};
+
static int __devinit twl6040_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -578,18 +592,31 @@ static int __devinit twl6040_probe(struct i2c_client *client,
goto gpio_err;
}
- /* codec interrupt */
- ret = twl6040_irq_init(twl6040);
- if (ret)
+ ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq,
+ IRQF_ONESHOT, 0, &twl6040_irq_chip,
+ &twl6040->irq_data);
+ if (ret < 0)
goto irq_init_err;
- ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
- NULL, twl6040_naudint_handler, IRQF_ONESHOT,
+ twl6040->irq_ready = regmap_irq_get_virq(twl6040->irq_data,
+ TWL6040_IRQ_READY);
+ twl6040->irq_th = regmap_irq_get_virq(twl6040->irq_data,
+ TWL6040_IRQ_TH);
+
+ ret = request_threaded_irq(twl6040->irq_ready, NULL,
+ twl6040_readyint_handler, IRQF_ONESHOT,
"twl6040_irq_ready", twl6040);
if (ret) {
- dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
- ret);
- goto irq_err;
+ dev_err(twl6040->dev, "READY IRQ request failed: %d\n", ret);
+ goto readyirq_err;
+ }
+
+ ret = request_threaded_irq(twl6040->irq_th, NULL,
+ twl6040_thint_handler, IRQF_ONESHOT,
+ "twl6040_irq_th", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
+ goto thirq_err;
}
/* dual-access registers controlled by I2C only */
@@ -601,7 +628,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
* The ASoC codec can work without pdata, pass the platform_data only if
* it has been provided.
*/
- irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+ irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_PLUG);
cell = &twl6040->cells[children];
cell->name = "twl6040-codec";
twl6040_codec_rsrc[0].start = irq;
@@ -615,7 +642,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
children++;
if (twl6040_has_vibra(pdata, node)) {
- irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+ irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_VIB);
cell = &twl6040->cells[children];
cell->name = "twl6040-vibra";
@@ -654,9 +681,11 @@ static int __devinit twl6040_probe(struct i2c_client *client,
return 0;
mfd_err:
- free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
-irq_err:
- twl6040_irq_exit(twl6040);
+ free_irq(twl6040->irq_th, twl6040);
+thirq_err:
+ free_irq(twl6040->irq_ready, twl6040);
+readyirq_err:
+ regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
irq_init_err:
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
@@ -680,8 +709,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
- free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
- twl6040_irq_exit(twl6040);
+ free_irq(twl6040->irq_ready, twl6040);
+ free_irq(twl6040->irq_th, twl6040);
+ regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
mfd_remove_devices(&client->dev);
i2c_set_clientdata(client, NULL);
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
new file mode 100644
index 00000000000..af2a6703f34
--- /dev/null
+++ b/drivers/mfd/viperboard.c
@@ -0,0 +1,137 @@
+/*
+ * Nano River Technologies viperboard driver
+ *
+ * This is the core driver for the viperboard. There are cell drivers
+ * available for I2C, ADC and both GPIOs. SPI is not yet supported.
+ * The drivers do not support all features the board exposes. See user
+ * manual of the viperboard.
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/viperboard.h>
+
+#include <linux/usb.h>
+
+
+static const struct usb_device_id vprbrd_table[] = {
+ { USB_DEVICE(0x2058, 0x1005) }, /* Nano River Technologies */
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, vprbrd_table);
+
+static struct mfd_cell vprbrd_devs[] = {
+ {
+ .name = "viperboard-gpio",
+ },
+ {
+ .name = "viperboard-i2c",
+ },
+ {
+ .name = "viperboard-adc",
+ },
+};
+
+static int vprbrd_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct vprbrd *vb;
+
+ u16 version = 0;
+ int pipe, ret;
+
+ /* allocate memory for our device state and initialize it */
+ vb = kzalloc(sizeof(*vb), GFP_KERNEL);
+ if (vb == NULL) {
+ dev_err(&interface->dev, "Out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&vb->lock);
+
+ vb->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, vb);
+ dev_set_drvdata(&vb->pdev.dev, vb);
+
+ /* get version information, major first, minor then */
+ pipe = usb_rcvctrlpipe(vb->usb_dev, 0);
+ ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MAJOR,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret == 1)
+ version = vb->buf[0];
+
+ ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MINOR,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret == 1) {
+ version <<= 8;
+ version = version | vb->buf[0];
+ }
+
+ dev_info(&interface->dev,
+ "version %x.%02x found at bus %03d address %03d\n",
+ version >> 8, version & 0xff,
+ vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+ ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
+ ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+ if (ret != 0) {
+ dev_err(&interface->dev, "Failed to add mfd devices to core.");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (vb) {
+ usb_put_dev(vb->usb_dev);
+ kfree(vb);
+ }
+
+ return ret;
+}
+
+static void vprbrd_disconnect(struct usb_interface *interface)
+{
+ struct vprbrd *vb = usb_get_intfdata(interface);
+
+ mfd_remove_devices(&interface->dev);
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(vb->usb_dev);
+ kfree(vb);
+
+ dev_dbg(&interface->dev, "disconnected\n");
+}
+
+static struct usb_driver vprbrd_driver = {
+ .name = "viperboard",
+ .probe = vprbrd_probe,
+ .disconnect = vprbrd_disconnect,
+ .id_table = vprbrd_table,
+};
+
+module_usb_driver(vprbrd_driver);
+
+MODULE_DESCRIPTION("Nano River Technologies viperboard mfd core driver");
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 14490cc785d..088872ab633 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -56,6 +56,18 @@ static const struct reg_default wm5102_reva_patch[] = {
{ 0x80, 0x0000 },
};
+static const struct reg_default wm5102_revb_patch[] = {
+ { 0x80, 0x0003 },
+ { 0x081, 0xE022 },
+ { 0x410, 0x6080 },
+ { 0x418, 0x6080 },
+ { 0x420, 0x6080 },
+ { 0x428, 0xC000 },
+ { 0x441, 0x8014 },
+ { 0x458, 0x000b },
+ { 0x80, 0x0000 },
+};
+
/* We use a function so we can use ARRAY_SIZE() */
int wm5102_patch(struct arizona *arizona)
{
@@ -65,7 +77,9 @@ int wm5102_patch(struct arizona *arizona)
wm5102_reva_patch,
ARRAY_SIZE(wm5102_reva_patch));
default:
- return 0;
+ return regmap_register_patch(arizona->regmap,
+ wm5102_revb_patch,
+ ARRAY_SIZE(wm5102_revb_patch));
}
}
@@ -258,6 +272,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
+ { 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
{ 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
@@ -290,6 +305,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
{ 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
+ { 0x00000212, 0x0001 }, /* R530 - LDO1 Control 2 */
{ 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
{ 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
@@ -1047,6 +1063,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_RATE_ESTIMATOR_3:
case ARIZONA_RATE_ESTIMATOR_4:
case ARIZONA_RATE_ESTIMATOR_5:
+ case ARIZONA_DYNAMIC_FREQUENCY_SCALING_1:
case ARIZONA_FLL1_CONTROL_1:
case ARIZONA_FLL1_CONTROL_2:
case ARIZONA_FLL1_CONTROL_3:
@@ -1054,6 +1071,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -1069,6 +1087,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
@@ -1079,6 +1098,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_GPIO_CLOCK:
case ARIZONA_MIC_CHARGE_PUMP_1:
case ARIZONA_LDO1_CONTROL_1:
+ case ARIZONA_LDO1_CONTROL_2:
case ARIZONA_LDO2_CONTROL_1:
case ARIZONA_MIC_BIAS_CTRL_1:
case ARIZONA_MIC_BIAS_CTRL_2:
@@ -1802,6 +1822,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP1_STATUS_3:
return true;
default:
return false;
@@ -1810,15 +1831,23 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
{
+ if (reg > 0xffff)
+ return true;
+
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_RAW_OUTPUT_STATUS_1:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_SAMPLE_RATE_1_STATUS:
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_FLL1_NCO_TEST_0:
+ case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FX_CTRL2:
case ARIZONA_INTERRUPT_STATUS_1:
case ARIZONA_INTERRUPT_STATUS_2:
@@ -1844,6 +1873,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP1_STATUS_3:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_MIC_DETECT_3:
return true;
@@ -1852,12 +1882,14 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
}
}
+#define WM5102_MAX_REGISTER 0x1a8fff
+
const struct regmap_config wm5102_spi_regmap = {
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
@@ -1871,7 +1903,7 @@ const struct regmap_config wm5102_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index c7f62ac544a..57c488d42d3 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -401,13 +401,19 @@ static const struct reg_default wm1811_reva_patch[] = {
*/
static int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
- struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ struct wm8994_pdata *pdata;
struct regmap_config *regmap_config;
const struct reg_default *regmap_patch = NULL;
const char *devname;
int ret, i, patch_regs;
int pulls = 0;
+ if (dev_get_platdata(wm8994->dev)) {
+ pdata = dev_get_platdata(wm8994->dev);
+ wm8994->pdata = *pdata;
+ }
+ pdata = &wm8994->pdata;
+
dev_set_drvdata(wm8994->dev, wm8994);
/* Add the on-chip regulators first for bootstrapping */
@@ -529,11 +535,10 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
case 2:
case 3:
+ default:
regmap_patch = wm8994_revc_patch;
patch_regs = ARRAY_SIZE(wm8994_revc_patch);
break;
- default:
- break;
}
break;
@@ -552,17 +557,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
/* Revision C did not change the relevant layer */
if (wm8994->revision > 1)
wm8994->revision++;
- switch (wm8994->revision) {
- case 0:
- case 1:
- case 2:
- case 3:
- regmap_patch = wm1811_reva_patch;
- patch_regs = ARRAY_SIZE(wm1811_reva_patch);
- break;
- default:
- break;
- }
+
+ regmap_patch = wm1811_reva_patch;
+ patch_regs = ARRAY_SIZE(wm1811_reva_patch);
break;
default:
@@ -604,24 +601,21 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
}
}
- if (pdata) {
- wm8994->irq_base = pdata->irq_base;
- wm8994->gpio_base = pdata->gpio_base;
-
- /* GPIO configuration is only applied if it's non-zero */
- for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
- if (pdata->gpio_defaults[i]) {
- wm8994_set_bits(wm8994, WM8994_GPIO_1 + i,
- 0xffff,
- pdata->gpio_defaults[i]);
- }
+ wm8994->irq_base = pdata->irq_base;
+ wm8994->gpio_base = pdata->gpio_base;
+
+ /* GPIO configuration is only applied if it's non-zero */
+ for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+ if (pdata->gpio_defaults[i]) {
+ wm8994_set_bits(wm8994, WM8994_GPIO_1 + i,
+ 0xffff, pdata->gpio_defaults[i]);
}
+ }
- wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;
+ wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;
- if (pdata->spkmode_pu)
- pulls |= WM8994_SPKMODE_PU;
- }
+ if (pdata->spkmode_pu)
+ pulls |= WM8994_SPKMODE_PU;
/* Disable unneeded pulls */
wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index c58f9abcb35..158da5a81a6 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
+
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
static LIST_HEAD(ssc_list);
@@ -29,7 +31,13 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
spin_lock(&user_lock);
list_for_each_entry(ssc, &ssc_list, list) {
- if (ssc->pdev->id == ssc_num) {
+ if (ssc->pdev->dev.of_node) {
+ if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
+ == ssc_num) {
+ ssc_valid = 1;
+ break;
+ }
+ } else if (ssc->pdev->id == ssc_num) {
ssc_valid = 1;
break;
}
@@ -68,39 +76,93 @@ void ssc_free(struct ssc_device *ssc)
}
EXPORT_SYMBOL(ssc_free);
-static int __init ssc_probe(struct platform_device *pdev)
+static struct atmel_ssc_platform_data at91rm9200_config = {
+ .use_dma = 0,
+};
+
+static struct atmel_ssc_platform_data at91sam9g45_config = {
+ .use_dma = 1,
+};
+
+static const struct platform_device_id atmel_ssc_devtypes[] = {
+ {
+ .name = "at91rm9200_ssc",
+ .driver_data = (unsigned long) &at91rm9200_config,
+ }, {
+ .name = "at91sam9g45_ssc",
+ .driver_data = (unsigned long) &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ssc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-ssc",
+ .data = &at91rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9g45-ssc",
+ .data = &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
+#endif
+
+static inline const struct atmel_ssc_platform_data * __init
+ atmel_ssc_get_driver_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_ssc_dt_ids, pdev->dev.of_node);
+ if (match == NULL)
+ return NULL;
+ return match->data;
+ }
+
+ return (struct atmel_ssc_platform_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int ssc_probe(struct platform_device *pdev)
{
- int retval = 0;
struct resource *regs;
struct ssc_device *ssc;
+ const struct atmel_ssc_platform_data *plat_dat;
- ssc = kzalloc(sizeof(struct ssc_device), GFP_KERNEL);
+ ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
dev_dbg(&pdev->dev, "out of memory\n");
- retval = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
+ ssc->pdev = pdev;
+
+ plat_dat = atmel_ssc_get_driver_data(pdev);
+ if (!plat_dat)
+ return -ENODEV;
+ ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_dbg(&pdev->dev, "no mmio resource defined\n");
- retval = -ENXIO;
- goto out_free;
+ return -ENXIO;
}
- ssc->clk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(ssc->clk)) {
- dev_dbg(&pdev->dev, "no pclk clock defined\n");
- retval = -ENXIO;
- goto out_free;
- }
-
- ssc->pdev = pdev;
- ssc->regs = ioremap(regs->start, resource_size(regs));
+ ssc->regs = devm_request_and_ioremap(&pdev->dev, regs);
if (!ssc->regs) {
dev_dbg(&pdev->dev, "ioremap failed\n");
- retval = -EINVAL;
- goto out_clk;
+ return -EINVAL;
+ }
+
+ ssc->phybase = regs->start;
+
+ ssc->clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(ssc->clk)) {
+ dev_dbg(&pdev->dev, "no pclk clock defined\n");
+ return -ENXIO;
}
/* disable all interrupts */
@@ -112,8 +174,7 @@ static int __init ssc_probe(struct platform_device *pdev)
ssc->irq = platform_get_irq(pdev, 0);
if (!ssc->irq) {
dev_dbg(&pdev->dev, "could not get irq\n");
- retval = -ENXIO;
- goto out_unmap;
+ return -ENXIO;
}
spin_lock(&user_lock);
@@ -125,16 +186,7 @@ static int __init ssc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n",
ssc->regs, ssc->irq);
- goto out;
-
-out_unmap:
- iounmap(ssc->regs);
-out_clk:
- clk_put(ssc->clk);
-out_free:
- kfree(ssc);
-out:
- return retval;
+ return 0;
}
static int ssc_remove(struct platform_device *pdev)
@@ -142,34 +194,23 @@ static int ssc_remove(struct platform_device *pdev)
struct ssc_device *ssc = platform_get_drvdata(pdev);
spin_lock(&user_lock);
- iounmap(ssc->regs);
- clk_put(ssc->clk);
list_del(&ssc->list);
- kfree(ssc);
spin_unlock(&user_lock);
return 0;
}
static struct platform_driver ssc_driver = {
- .remove = ssc_remove,
.driver = {
.name = "ssc",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_ssc_dt_ids),
},
+ .id_table = atmel_ssc_devtypes,
+ .probe = ssc_probe,
+ .remove = ssc_remove,
};
-
-static int __init ssc_init(void)
-{
- return platform_driver_probe(&ssc_driver, ssc_probe);
-}
-module_init(ssc_init);
-
-static void __exit ssc_exit(void)
-{
- platform_driver_unregister(&ssc_driver);
-}
-module_exit(ssc_exit);
+module_platform_driver(ssc_driver);
MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 636409f9667..9299a8c29a6 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -370,7 +370,7 @@ void mei_watchdog_register(struct mei_device *dev)
void mei_watchdog_unregister(struct mei_device *dev)
{
- if (test_bit(WDOG_UNREGISTERED, &amt_wd_dev.status))
+ if (watchdog_get_drvdata(&amt_wd_dev) == NULL)
return;
watchdog_set_drvdata(&amt_wd_dev, NULL);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 8d082b46426..d971817182f 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -53,6 +53,10 @@
#include <linux/kthread.h>
#include "xpc.h"
+#ifdef CONFIG_X86_64
+#include <asm/traps.h>
+#endif
+
/* define two XPC debug device structures to be used with dev_dbg() et al */
struct device_driver xpc_dbg_name = {
@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
return NOTIFY_DONE;
}
+/* Used to only allow one cpu to complete disconnect */
+static unsigned int xpc_die_disconnecting;
+
/*
* Notify other partitions to deactivate from us by first disengaging from all
* references to our memory.
@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void)
long keep_waiting;
long wait_to_print;
+ if (cmpxchg(&xpc_die_disconnecting, 0, 1))
+ return;
+
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void)
* about the lack of a heartbeat.
*/
static int
-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
{
#ifdef CONFIG_IA64 /* !!! temporary kludge */
switch (event) {
@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
break;
}
#else
- xpc_die_deactivate();
+ struct die_args *die_args = _die_args;
+
+ switch (event) {
+ case DIE_TRAP:
+ if (die_args->trapnr == X86_TRAP_DF)
+ xpc_die_deactivate();
+
+ if (((die_args->trapnr == X86_TRAP_MF) ||
+ (die_args->trapnr == X86_TRAP_XF)) &&
+ !user_mode_vm(die_args->regs))
+ xpc_die_deactivate();
+
+ break;
+ case DIE_INT3:
+ case DIE_DEBUG:
+ break;
+ case DIE_OOPS:
+ case DIE_GPF:
+ default:
+ xpc_die_deactivate();
+ }
#endif
return NOTIFY_DONE;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 737e4edc241..8d13c659452 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -533,7 +533,7 @@ config MMC_DW_PLTFM
If unsure, say Y.
config MMC_DW_EXYNOS
- tristate "Exynos specific extentions for Synopsys DW Memory Card Interface"
+ tristate "Exynos specific extensions for Synopsys DW Memory Card Interface"
depends on MMC_DW
select MMC_DW_PLTFM
help
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index b648058d718..e4e218c930b 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -49,6 +49,8 @@ obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
+obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
+
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 12eff6f8cab..571915dfb21 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -21,6 +21,7 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -382,8 +383,6 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
0xFF, (u8)data->blocks);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H,
0xFF, (u8)(data->blocks >> 8));
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- CARD_DATA_SOURCE, 0x01, RING_BUFFER);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
DMA_DONE_INT, DMA_DONE_INT);
@@ -407,6 +406,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
0x01, RING_BUFFER);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
trans_mode | SD_TRANSFER_START);
rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 94539312995..7c057a05adb 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -26,19 +26,16 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
-#include <linux/magic.h>
#include <linux/module.h>
+#include <uapi/linux/magic.h>
+
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
-#ifndef SQUASHFS_MAGIC
-#define SQUASHFS_MAGIC 0x73717368
-#endif
-
struct ar7_bin_rec {
unsigned int checksum;
unsigned int length;
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 63d2a64331f..6eeb84c81bc 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -37,8 +37,7 @@
#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */
-#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_CFE_BLOCK_SIZE 0x10000 /* always at least 64KiB */
#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
@@ -79,7 +78,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
unsigned int rootfsaddr, kerneladdr, spareaddr;
unsigned int rootfslen, kernellen, sparelen, totallen;
unsigned int cfelen, nvramlen;
- int namelen = 0;
+ unsigned int cfe_erasesize;
int i;
u32 computed_crc;
bool rootfs_first = false;
@@ -87,8 +86,11 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
if (bcm63xx_detect_cfe(master))
return -EINVAL;
- cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
- nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+ cfe_erasesize = max_t(uint32_t, master->erasesize,
+ BCM63XX_CFE_BLOCK_SIZE);
+
+ cfelen = cfe_erasesize;
+ nvramlen = cfe_erasesize;
/* Allocate memory for buffer */
buf = vmalloc(sizeof(struct bcm_tag));
@@ -121,7 +123,6 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
spareaddr = roundup(totallen, master->erasesize) + cfelen;
- sparelen = master->size - spareaddr - nvramlen;
if (rootfsaddr < kerneladdr) {
/* default Broadcom layout */
@@ -139,19 +140,15 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
rootfslen = 0;
rootfsaddr = 0;
spareaddr = cfelen;
- sparelen = master->size - cfelen - nvramlen;
}
+ sparelen = master->size - spareaddr - nvramlen;
/* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
+ if (rootfslen > 0)
nrparts++;
- namelen += 6;
- }
- if (kernellen > 0) {
+
+ if (kernellen > 0)
nrparts++;
- namelen += 6;
- }
/* Ask kernel for more memory */
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
@@ -193,17 +190,16 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
parts[curpart].name = "nvram";
parts[curpart].offset = master->size - nvramlen;
parts[curpart].size = nvramlen;
+ curpart++;
/* Global partition "linux" to make easy firmware upgrade */
- curpart++;
parts[curpart].name = "linux";
parts[curpart].offset = cfelen;
parts[curpart].size = master->size - cfelen - nvramlen;
for (i = 0; i < nrparts; i++)
- pr_info("Partition %d is %s offset %lx and length %lx\n", i,
- parts[i].name, (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
pr_info("Spare partition is offset %x and length %x\n", spareaddr,
sparelen);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 5ff5c4a1694..b86197286f2 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1536,8 +1536,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
UDELAY(map, chip, adr, 1);
}
- /* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ /*
+ * Recovery from write-buffer programming failures requires
+ * the write-to-buffer-reset sequence. Since the last part
+ * of the sequence also works as a normal reset, we can run
+ * the same commands regardless of why we are here.
+ * See e.g.
+ * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
+ */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
xip_enable(map, chip, adr);
/* FIXME - should have reset delay before continuing */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index aed1b8a63c9..c533f27d863 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -56,8 +56,8 @@
/* special size referring to all the remaining space in a partition */
-#define SIZE_REMAINING UINT_MAX
-#define OFFSET_CONTINUOUS UINT_MAX
+#define SIZE_REMAINING ULLONG_MAX
+#define OFFSET_CONTINUOUS ULLONG_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
@@ -89,7 +89,7 @@ static struct mtd_partition * newpart(char *s,
int extra_mem_size)
{
struct mtd_partition *parts;
- unsigned long size, offset = OFFSET_CONTINUOUS;
+ unsigned long long size, offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -104,7 +104,8 @@ static struct mtd_partition * newpart(char *s,
} else {
size = memparse(s, &s);
if (size < PAGE_SIZE) {
- printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
+ printk(KERN_ERR ERRP "partition size too small (%llx)\n",
+ size);
return ERR_PTR(-EINVAL);
}
}
@@ -296,7 +297,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
- unsigned long offset;
+ unsigned long long offset;
int i, err;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
@@ -308,48 +309,52 @@ static int parse_cmdline_partitions(struct mtd_info *master,
return err;
}
+ /*
+ * Search for the partition definition matching master->name.
+ * If master->name is not set, stop at first partition definition.
+ */
for (part = partitions; part; part = part->next) {
- if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) {
- for (i = 0, offset = 0; i < part->num_parts; i++) {
- if (part->parts[i].offset == OFFSET_CONTINUOUS)
- part->parts[i].offset = offset;
- else
- offset = part->parts[i].offset;
-
- if (part->parts[i].size == SIZE_REMAINING)
- part->parts[i].size = master->size - offset;
-
- if (part->parts[i].size == 0) {
- printk(KERN_WARNING ERRP
- "%s: skipping zero sized partition\n",
- part->mtd_id);
- part->num_parts--;
- memmove(&part->parts[i],
- &part->parts[i + 1],
- sizeof(*part->parts) * (part->num_parts - i));
- continue;
- }
-
- if (offset + part->parts[i].size > master->size) {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
- part->parts[i].size = master->size - offset;
- }
- offset += part->parts[i].size;
- }
-
- *pparts = kmemdup(part->parts,
- sizeof(*part->parts) * part->num_parts,
- GFP_KERNEL);
- if (!*pparts)
- return -ENOMEM;
-
- return part->num_parts;
+ if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
+ break;
+ }
+
+ if (!part)
+ return 0;
+
+ for (i = 0, offset = 0; i < part->num_parts; i++) {
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+
+ if (part->parts[i].size == 0) {
+ printk(KERN_WARNING ERRP
+ "%s: skipping zero sized partition\n",
+ part->mtd_id);
+ part->num_parts--;
+ memmove(&part->parts[i], &part->parts[i + 1],
+ sizeof(*part->parts) * (part->num_parts - i));
+ continue;
}
+
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
}
- return 0;
+ *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
+ GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ return part->num_parts;
}
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 2dc5a6f3fd5..4714584aa99 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -66,7 +66,7 @@ out:
return err;
}
-static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
+static int bcm47xxsflash_remove(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
@@ -77,7 +77,7 @@ static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
}
static struct platform_driver bcma_sflash_driver = {
- .remove = __devexit_p(bcm47xxsflash_remove),
+ .remove = bcm47xxsflash_remove,
.driver = {
.name = "bcma_sflash",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 681e2ee0f2d..e081bfeaaf7 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -62,6 +62,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
memset(page_address(page), 0xff, PAGE_SIZE);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
break;
}
@@ -152,6 +153,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
memcpy(page_address(page) + offset, buf, cpylen);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
}
page_cache_release(page);
@@ -433,7 +435,7 @@ static int __init block2mtd_init(void)
}
-static void __devexit block2mtd_exit(void)
+static void block2mtd_exit(void)
{
struct list_head *pos, *next;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index d34d83b8f9c..8510ccb9c6f 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1440,7 +1440,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
oobdelta = mtd->ecclayout->oobavail;
break;
default:
- oobdelta = 0;
+ return -EINVAL;
}
if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
(ofs % DOC_LAYOUT_PAGE_SIZE))
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 706b847b46b..88b3fd3e18a 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -70,8 +70,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
0xffffffff };
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 03838bab1f5..4eeeb2d7f6e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -73,14 +73,6 @@
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define MAX_CMD_SIZE 5
-#ifdef CONFIG_M25PXX_USE_FAST_READ
-#define OPCODE_READ OPCODE_FAST_READ
-#define FAST_READ_DUMMY_BYTE 1
-#else
-#define OPCODE_READ OPCODE_NORM_READ
-#define FAST_READ_DUMMY_BYTE 0
-#endif
-
#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
/****************************************************************************/
@@ -93,6 +85,7 @@ struct m25p {
u16 addr_width;
u8 erase_opcode;
u8 *command;
+ bool fast_read;
};
static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -168,6 +161,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
switch (JEDEC_MFR(jedec_id)) {
case CFI_MFR_MACRONIX:
+ case 0xEF /* winbond */:
flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
return spi_write(flash->spi, flash->command, 1);
default:
@@ -342,6 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
struct m25p *flash = mtd_to_m25p(mtd);
struct spi_transfer t[2];
struct spi_message m;
+ uint8_t opcode;
pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)from, len);
@@ -354,7 +349,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
* Should add 1 byte DUMMY_BYTE.
*/
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
+ t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
@@ -376,12 +371,14 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
/* Set up the write data buffer. */
- flash->command[0] = OPCODE_READ;
+ opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ;
+ flash->command[0] = opcode;
m25p_addr2cmd(flash, from, flash->command);
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
+ *retlen = m.actual_length - m25p_cmdsz(flash) -
+ (flash->fast_read ? 1 : 0);
mutex_unlock(&flash->lock);
@@ -664,7 +661,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
/* Micron */
- { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
/* Spansion -- single (large) sector size only, at least
@@ -745,6 +743,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
@@ -756,7 +756,7 @@ static const struct spi_device_id m25p_ids[] = {
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
-static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
+static const struct spi_device_id *jedec_probe(struct spi_device *spi)
{
int tmp;
u8 code = OPCODE_RDID;
@@ -801,7 +801,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
* matches what the READ command supports, at least until this driver
* understands FAST_READ (for clocks over 25 MHz).
*/
-static int __devinit m25p_probe(struct spi_device *spi)
+static int m25p_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct flash_platform_data *data;
@@ -809,9 +809,10 @@ static int __devinit m25p_probe(struct spi_device *spi)
struct flash_info *info;
unsigned i;
struct mtd_part_parser_data ppdata;
+ struct device_node __maybe_unused *np = spi->dev.of_node;
#ifdef CONFIG_MTD_OF_PARTS
- if (!of_device_is_available(spi->dev.of_node))
+ if (!of_device_is_available(np))
return -ENODEV;
#endif
@@ -863,7 +864,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash = kzalloc(sizeof *flash, GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
+ flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
+ GFP_KERNEL);
if (!flash->command) {
kfree(flash);
return -ENOMEM;
@@ -920,6 +922,16 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->page_size = info->page_size;
flash->mtd.writebufsize = flash->page_size;
+ flash->fast_read = false;
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(np, "m25p,fast-read"))
+ flash->fast_read = true;
+#endif
+
+#ifdef CONFIG_M25PXX_USE_FAST_READ
+ flash->fast_read = true;
+#endif
+
if (info->addr_width)
flash->addr_width = info->addr_width;
else {
@@ -961,7 +973,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
}
-static int __devexit m25p_remove(struct spi_device *spi)
+static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = dev_get_drvdata(&spi->dev);
int status;
@@ -983,7 +995,7 @@ static struct spi_driver m25p80_driver = {
},
.id_table = m25p_ids,
.probe = m25p_probe,
- .remove = __devexit_p(m25p_remove),
+ .remove = m25p_remove,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 928fb0e6d73..ea7ea7b595d 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -618,7 +618,7 @@ static char *otp_setup(struct mtd_info *device, char revision)
/*
* Register DataFlash device with MTD subsystem.
*/
-static int __devinit
+static int
add_dataflash_otp(struct spi_device *spi, char *name,
int nr_pages, int pagesize, int pageoffset, char revision)
{
@@ -679,7 +679,7 @@ add_dataflash_otp(struct spi_device *spi, char *name,
return err;
}
-static inline int __devinit
+static inline int
add_dataflash(struct spi_device *spi, char *name,
int nr_pages, int pagesize, int pageoffset)
{
@@ -705,7 +705,7 @@ struct flash_info {
#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
};
-static struct flash_info __devinitdata dataflash_data [] = {
+static struct flash_info dataflash_data[] = {
/*
* NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
@@ -740,7 +740,7 @@ static struct flash_info __devinitdata dataflash_data [] = {
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
};
-static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
+static struct flash_info *jedec_probe(struct spi_device *spi)
{
int tmp;
uint8_t code = OP_READ_ID;
@@ -823,7 +823,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
* AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
* AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
*/
-static int __devinit dataflash_probe(struct spi_device *spi)
+static int dataflash_probe(struct spi_device *spi)
{
int status;
struct flash_info *info;
@@ -897,7 +897,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
return status;
}
-static int __devexit dataflash_remove(struct spi_device *spi)
+static int dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = dev_get_drvdata(&spi->dev);
int status;
@@ -920,7 +920,7 @@ static struct spi_driver dataflash_driver = {
},
.probe = dataflash_probe,
- .remove = __devexit_p(dataflash_remove),
+ .remove = dataflash_remove,
/* FIXME: investigate suspend and resume... */
};
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index dcc3c951153..2d2c2a5d4d2 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -756,7 +756,7 @@ err_probe:
#ifdef CONFIG_OF
-static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
@@ -799,7 +799,7 @@ static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
return 0;
}
#else
-static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
return -ENOSYS;
@@ -901,7 +901,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
* and do proper init for any found one.
* Returns 0 on success, non zero otherwise
*/
-static int __devinit spear_smi_probe(struct platform_device *pdev)
+static int spear_smi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_smi_plat_data *pdata = NULL;
@@ -1016,7 +1016,7 @@ err:
*
* free all allocations and delete the partitions.
*/
-static int __devexit spear_smi_remove(struct platform_device *pdev)
+static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
@@ -1092,20 +1092,9 @@ static struct platform_driver spear_smi_driver = {
#endif
},
.probe = spear_smi_probe,
- .remove = __devexit_p(spear_smi_remove),
+ .remove = spear_smi_remove,
};
-
-static int spear_smi_init(void)
-{
- return platform_driver_register(&spear_smi_driver);
-}
-module_init(spear_smi_init);
-
-static void spear_smi_exit(void)
-{
- platform_driver_unregister(&spear_smi_driver);
-}
-module_exit(spear_smi_exit);
+module_platform_driver(spear_smi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index ab8a2f4c8d6..8091b016369 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -64,7 +64,7 @@ struct flash_info {
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
-static struct flash_info __devinitdata sst25l_flash_info[] = {
+static struct flash_info sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
@@ -313,7 +313,7 @@ out:
return ret;
}
-static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
+static struct flash_info *sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
@@ -353,7 +353,7 @@ static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
return flash_info;
}
-static int __devinit sst25l_probe(struct spi_device *spi)
+static int sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
@@ -411,7 +411,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
return 0;
}
-static int __devexit sst25l_remove(struct spi_device *spi)
+static int sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
int ret;
@@ -428,7 +428,7 @@ static struct spi_driver sst25l_driver = {
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
- .remove = __devexit_p(sst25l_remove),
+ .remove = sst25l_remove,
};
module_spi_driver(sst25l_driver);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index df304868beb..62ba82c396c 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -358,13 +358,6 @@ config MTD_IXP2000
IXP2000 based board and would like to use the flash chips on it,
say 'Y'.
-config MTD_FORTUNET
- tristate "CFI Flash device mapped on the FortuNet board"
- depends on MTD_CFI && SA1100_FORTUNET
- help
- This enables access to the Flash on the FortuNet board. If you
- have such a board, say 'Y'.
-
config MTD_AUTCPU12
bool "NV-RAM mapping AUTCPU12 board"
depends on ARCH_AUTCPU12
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index a0240edd196..4ded28711bc 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
-obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index e2875d6fe12..f7207b0a76d 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -100,8 +100,8 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
}
-static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int amd76xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -289,7 +289,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
+static void amd76xrom_remove_one(struct pci_dev *pdev)
{
struct amd76xrom_window *window = &amd76xrom_window;
@@ -347,4 +347,3 @@ module_exit(cleanup_amd76xrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
-
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 76fb594bb1d..a2dc2ae4b24 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -33,7 +33,7 @@ struct autcpu12_nvram_priv {
struct map_info map;
};
-static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
+static int autcpu12_nvram_probe(struct platform_device *pdev)
{
map_word tmp, save0, save1;
struct resource *res;
@@ -105,7 +105,7 @@ static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
return -ENOMEM;
}
-static int __devexit autcpu12_nvram_remove(struct platform_device *pdev)
+static int autcpu12_nvram_remove(struct platform_device *pdev)
{
struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev);
@@ -121,7 +121,7 @@ static struct platform_driver autcpu12_nvram_driver = {
.owner = THIS_MODULE,
},
.probe = autcpu12_nvram_probe,
- .remove = __devexit_p(autcpu12_nvram_remove),
+ .remove = autcpu12_nvram_remove,
};
module_platform_driver(autcpu12_nvram_driver);
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index ef5cde84a8b..f833edfaab7 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -30,7 +30,8 @@
#include <linux/io.h>
#include <asm/unaligned.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "bfin-async-flash"
@@ -123,7 +124,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-static int __devinit bfin_flash_probe(struct platform_device *pdev)
+static int bfin_flash_probe(struct platform_device *pdev)
{
int ret;
struct physmap_flash_data *pdata = pdev->dev.platform_data;
@@ -172,7 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit bfin_flash_remove(struct platform_device *pdev)
+static int bfin_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
@@ -184,7 +185,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
static struct platform_driver bfin_flash_driver = {
.probe = bfin_flash_probe,
- .remove = __devexit_p(bfin_flash_remove),
+ .remove = bfin_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 3d0e762fa5f..586a1c77e48 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
}
-static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -320,7 +320,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
+static void ck804xrom_remove_one(struct pci_dev *pdev)
{
struct ck804xrom_window *window = &ck804xrom_window;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 08322b1c3e8..ff8681a2583 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,7 +144,7 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
pci_dev_put(window->pdev);
}
-static int __devinit esb2rom_init_one(struct pci_dev *pdev,
+static int esb2rom_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
@@ -378,13 +378,13 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
return 0;
}
-static void __devexit esb2rom_remove_one (struct pci_dev *pdev)
+static void esb2rom_remove_one(struct pci_dev *pdev)
{
struct esb2rom_window *window = &esb2rom_window;
esb2rom_cleanup(window);
}
-static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = {
+static struct pci_device_id esb2rom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
deleted file mode 100644
index 956e2e4f30e..00000000000
--- a/drivers/mtd/maps/fortunet.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/* fortunet.c memory map
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define MAX_NUM_REGIONS 4
-#define MAX_NUM_PARTITIONS 8
-
-#define DEF_WINDOW_ADDR_PHY 0x00000000
-#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
-
-#define MTD_FORTUNET_PK "MTD FortuNet: "
-
-#define MAX_NAME_SIZE 128
-
-struct map_region
-{
- int window_addr_physical;
- int altbankwidth;
- struct map_info map_info;
- struct mtd_info *mymtd;
- struct mtd_partition parts[MAX_NUM_PARTITIONS];
- char map_name[MAX_NAME_SIZE];
- char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
-};
-
-static struct map_region map_regions[MAX_NUM_REGIONS];
-static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
-static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
-
-
-
-struct map_info default_map = {
- .size = DEF_WINDOW_SIZE,
- .bankwidth = 4,
-};
-
-static char * __init get_string_option(char *dest,int dest_size,char *sor)
-{
- if(!dest_size)
- return sor;
- dest_size--;
- while(*sor)
- {
- if(*sor==',')
- {
- sor++;
- break;
- }
- else if(*sor=='\"')
- {
- sor++;
- while(*sor)
- {
- if(*sor=='\"')
- {
- sor++;
- break;
- }
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- else
- {
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- *dest = 0;
- return sor;
-}
-
-static int __init MTD_New_Region(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[6];
- get_options (get_string_option(string,sizeof(string),line),6,params);
- if(params[0]<1)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
- " name,region-number[,base,size,bankwidth,altbankwidth]\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
- memcpy(&map_regions[params[1]].map_info,
- &default_map,sizeof(map_regions[params[1]].map_info));
- map_regions_set[params[1]] = 1;
- map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[params[1]].altbankwidth = 2;
- map_regions[params[1]].mymtd = NULL;
- map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
- strcpy(map_regions[params[1]].map_info.name,string);
- if(params[0]>1)
- {
- map_regions[params[1]].window_addr_physical = params[2];
- }
- if(params[0]>2)
- {
- map_regions[params[1]].map_info.size = params[3];
- }
- if(params[0]>3)
- {
- map_regions[params[1]].map_info.bankwidth = params[4];
- }
- if(params[0]>4)
- {
- map_regions[params[1]].altbankwidth = params[5];
- }
- return 1;
-}
-
-static int __init MTD_New_Partition(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[4];
- get_options (get_string_option(string,sizeof(string),line),4,params);
- if(params[0]<3)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
- " name,region-number,size,offset\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
- {
- printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
- return 1;
- }
- map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
- map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
- strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
- map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
- params[2];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
- params[3];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
- map_regions_parts[params[1]]++;
- return 1;
-}
-
-__setup("MTD_Region=", MTD_New_Region);
-__setup("MTD_Partition=", MTD_New_Partition);
-
-/* Backwards-spelling-compatibility */
-__setup("MTD_Partion=", MTD_New_Partition);
-
-static int __init init_fortunet(void)
-{
- int ix,iy;
- for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_parts[ix]&&(!map_regions_set[ix]))
- {
- printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
- ix);
- memset(&map_regions[ix],0,sizeof(map_regions[ix]));
- memcpy(&map_regions[ix].map_info,&default_map,
- sizeof(map_regions[ix].map_info));
- map_regions_set[ix] = 1;
- map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[ix].altbankwidth = 2;
- map_regions[ix].mymtd = NULL;
- map_regions[ix].map_info.name = map_regions[ix].map_name;
- strcpy(map_regions[ix].map_info.name,"FORTUNET");
- }
- if(map_regions_set[ix])
- {
- iy++;
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
- " address %x size %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
-
- map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
-
- map_regions[ix].map_info.virt =
- ioremap_nocache(
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
- if(!map_regions[ix].map_info.virt)
- {
- int j = 0;
- printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
- map_regions[ix].map_info.name);
- for (j = 0 ; j < ix; j++)
- iounmap(map_regions[j].map_info.virt);
- return -ENXIO;
- }
- simple_map_init(&map_regions[ix].map_info);
-
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].map_info.virt);
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- if((!map_regions[ix].mymtd)&&(
- map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
- {
- printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
- "for %s flash.\n",
- map_regions[ix].map_info.name);
- map_regions[ix].map_info.bankwidth =
- map_regions[ix].altbankwidth;
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- }
- map_regions[ix].mymtd->owner = THIS_MODULE;
- mtd_device_register(map_regions[ix].mymtd,
- map_regions[ix].parts,
- map_regions_parts[ix]);
- }
- }
- if(iy)
- return 0;
- return -ENXIO;
-}
-
-static void __exit cleanup_fortunet(void)
-{
- int ix;
- for(ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_set[ix])
- {
- if( map_regions[ix].mymtd )
- {
- mtd_device_unregister(map_regions[ix].mymtd);
- map_destroy( map_regions[ix].mymtd );
- }
- iounmap((void *)map_regions[ix].map_info.virt);
- }
- }
-}
-
-module_init(init_fortunet);
-module_exit(cleanup_fortunet);
-
-MODULE_AUTHOR("FortuNet, Inc.");
-MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index e4de96ba52b..7b643de2500 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -26,7 +26,8 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "gpio-addr-flash"
#define PFX DRIVER_NAME ": "
@@ -142,7 +143,8 @@ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
*
* See gf_copy_from() caveat.
*/
-static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void gf_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
{
struct async_state *state = gf_map_info_to_state(map);
@@ -185,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
* ...
* };
*/
-static int __devinit gpio_flash_probe(struct platform_device *pdev)
+static int gpio_flash_probe(struct platform_device *pdev)
{
size_t i, arr_size;
struct physmap_flash_data *pdata;
@@ -258,7 +260,7 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit gpio_flash_remove(struct platform_device *pdev)
+static int gpio_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
size_t i = 0;
@@ -273,7 +275,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
static struct platform_driver gpio_flash_driver = {
.probe = gpio_flash_probe,
- .remove = __devexit_p(gpio_flash_remove),
+ .remove = gpio_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6689dcb3124..c7478e18f48 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
}
-static int __devinit ichxrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
@@ -315,13 +315,13 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
+static void ichxrom_remove_one(struct pci_dev *pdev)
{
struct ichxrom_window *window = &ichxrom_window;
ichxrom_cleanup(window);
}
-static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
+static struct pci_device_id ichxrom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 93f03175c82..3ee2ad1dcbe 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -63,24 +63,24 @@ struct vr_nor_mtd {
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
-static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
+static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
-static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
+static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
-static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
+static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
-static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
+static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char *probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
@@ -96,7 +96,7 @@ static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
return 0;
}
-static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
+static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
@@ -116,7 +116,7 @@ static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
-static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
+static int vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
@@ -176,7 +176,7 @@ static struct pci_device_id vr_nor_pci_ids[] = {
{0,}
};
-static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
+static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
@@ -189,7 +189,7 @@ static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
pci_disable_device(dev);
}
-static int __devinit
+static int
vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
@@ -256,7 +256,7 @@ vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
- .remove = __devexit_p(vr_nor_pci_remove),
+ .remove = vr_nor_pci_remove,
.id_table = vr_nor_pci_ids,
};
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index c03456f1700..3c3c791eb96 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -45,7 +45,7 @@ struct ltq_mtd {
};
static const char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = {
+static const char *ltq_probe_types[] = {
"cmdlinepart", "ofpart", NULL };
static map_word
@@ -109,7 +109,7 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static int __devinit
+static int
ltq_mtd_probe(struct platform_device *pdev)
{
struct mtd_part_parser_data ppdata;
@@ -185,7 +185,7 @@ err_out:
return err;
}
-static int __devexit
+static int
ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
@@ -209,7 +209,7 @@ MODULE_DEVICE_TABLE(of, ltq_mtd_match);
static struct platform_driver ltq_mtd_driver = {
.probe = ltq_mtd_probe,
- .remove = __devexit_p(ltq_mtd_remove),
+ .remove = ltq_mtd_remove,
.driver = {
.name = "ltq-nor",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 3c7ad17fca7..ab0fead56b8 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -125,7 +125,7 @@ static int latch_addr_flash_remove(struct platform_device *dev)
return 0;
}
-static int __devinit latch_addr_flash_probe(struct platform_device *dev)
+static int latch_addr_flash_probe(struct platform_device *dev)
{
struct latch_addr_flash_data *latch_addr_data;
struct latch_addr_flash_info *info;
@@ -218,7 +218,7 @@ done:
static struct platform_driver latch_addr_flash_driver = {
.probe = latch_addr_flash_probe,
- .remove = __devexit_p(latch_addr_flash_remove),
+ .remove = latch_addr_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1c30c1a307f..ed82914966f 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -253,7 +253,7 @@ static struct pci_device_id mtd_pci_ids[] = {
* Generic code follows.
*/
-static int __devinit
+static int
mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
@@ -308,7 +308,7 @@ out:
return err;
}
-static void __devexit
+static void
mtd_pci_remove(struct pci_dev *dev)
{
struct mtd_info *mtd = pci_get_drvdata(dev);
@@ -326,7 +326,7 @@ mtd_pci_remove(struct pci_dev *dev)
static struct pci_driver mtd_pci_driver = {
.name = "MTD PCI",
.probe = mtd_pci_probe,
- .remove = __devexit_p(mtd_pci_remove),
+ .remove = mtd_pci_remove,
.id_table = mtd_pci_ids,
};
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 6f19acadb06..37cdc201652 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -77,7 +77,7 @@ static int of_flash_remove(struct platform_device *dev)
/* Helper function to handle probing of the obsolete "direct-mapped"
* compatible binding, which has an extra "probe-type" property
* describing the type of flash probe necessary. */
-static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
+static struct mtd_info *obsolete_probe(struct platform_device *dev,
struct map_info *map)
{
struct device_node *dp = dev->dev.of_node;
@@ -116,7 +116,7 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
information. */
static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
"ofpart", "ofoldpart", NULL };
-static const char ** __devinit of_get_probes(struct device_node *dp)
+static const char **of_get_probes(struct device_node *dp)
{
const char *cp;
int cplen;
@@ -145,14 +145,14 @@ static const char ** __devinit of_get_probes(struct device_node *dp)
return res;
}
-static void __devinit of_free_probes(const char **probes)
+static void of_free_probes(const char **probes)
{
if (probes != part_probe_types_def)
kfree(probes);
}
static struct of_device_id of_flash_match[];
-static int __devinit of_flash_probe(struct platform_device *dev)
+static int of_flash_probe(struct platform_device *dev)
{
const char **part_probe_types;
const struct of_device_id *match;
@@ -170,6 +170,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
resource_size_t res_size;
struct mtd_part_parser_data ppdata;
bool map_indirect;
+ const char *mtd_name;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
@@ -178,6 +179,8 @@ static int __devinit of_flash_probe(struct platform_device *dev)
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+ of_property_read_string(dp, "linux,mtd-name", &mtd_name);
+
/*
* Get number of "reg" tuples. Scan for MTD devices on area's
* described by each "reg" region. This makes it possible (including
@@ -234,7 +237,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
goto err_out;
}
- info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
@@ -282,6 +285,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
err = 0;
+ info->cmtd = NULL;
if (info->list_size == 1) {
info->cmtd = info->list[0].mtd;
} else if (info->list_size > 1) {
@@ -290,9 +294,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
*/
info->cmtd = mtd_concat_create(mtd_list, info->list_size,
dev_name(&dev->dev));
- if (info->cmtd == NULL)
- err = -ENXIO;
}
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 65bd1cd4d62..afea93b515d 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -58,7 +58,7 @@ static void pismo_set_vpp(struct platform_device *pdev, int on)
pismo->vpp(pismo->vpp_data, on);
}
-static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
+static unsigned int pismo_width_to_bytes(unsigned int width)
{
width &= 15;
if (width > 2)
@@ -66,7 +66,7 @@ static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
return 1 << width;
}
-static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
+static int pismo_eeprom_read(struct i2c_client *client, void *buf,
u8 addr, size_t size)
{
int ret;
@@ -88,7 +88,7 @@ static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
return ret == ARRAY_SIZE(msg) ? size : -EIO;
}
-static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
+static int pismo_add_device(struct pismo_data *pismo, int i,
struct pismo_mem *region, const char *name, void *pdata, size_t psize)
{
struct platform_device *dev;
@@ -129,7 +129,7 @@ static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
return ret;
}
-static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
+static int pismo_add_nor(struct pismo_data *pismo, int i,
struct pismo_mem *region)
{
struct physmap_flash_data data = {
@@ -143,7 +143,7 @@ static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
+static int pismo_add_sram(struct pismo_data *pismo, int i,
struct pismo_mem *region)
{
struct platdata_mtd_ram data = {
@@ -154,7 +154,7 @@ static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
+static void pismo_add_one(struct pismo_data *pismo, int i,
const struct pismo_cs_block *cs, phys_addr_t base)
{
struct device *dev = &pismo->client->dev;
@@ -197,7 +197,7 @@ static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
}
}
-static int __devexit pismo_remove(struct i2c_client *client)
+static int pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
@@ -210,7 +210,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
return 0;
}
-static int __devinit pismo_probe(struct i2c_client *client,
+static int pismo_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
@@ -267,7 +267,7 @@ static struct i2c_driver pismo_driver = {
.owner = THIS_MODULE,
},
.probe = pismo_probe,
- .remove = __devexit_p(pismo_remove),
+ .remove = pismo_remove,
.id_table = pismo_id,
};
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 891558de3ec..2de66b062f0 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -219,7 +219,7 @@ static int platram_probe(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RW);
- /* check to see if there are any available partitions, or wether
+ /* check to see if there are any available partitions, or whether
* to add this device whole */
err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 81884c27740..43e3dbb976d 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -49,7 +49,7 @@ struct pxa2xx_flash_info {
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
+static int pxa2xx_flash_probe(struct platform_device *pdev)
{
struct flash_platform_data *flash = pdev->dev.platform_data;
struct pxa2xx_flash_info *info;
@@ -105,7 +105,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
+static int pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -139,7 +139,7 @@ static struct platform_driver pxa2xx_flash_driver = {
.owner = THIS_MODULE,
},
.probe = pxa2xx_flash_probe,
- .remove = __devexit_p(pxa2xx_flash_remove),
+ .remove = pxa2xx_flash_remove,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a675bdbcb0f..f694417cf7e 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -149,8 +149,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
plat->exit();
}
-static struct sa_info *__devinit
-sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
+static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
+ struct flash_platform_data *plat)
{
struct sa_info *info;
int nr, size, i, ret = 0;
@@ -246,7 +246,7 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
+static int sa1100_mtd_probe(struct platform_device *pdev)
{
struct flash_platform_data *plat = pdev->dev.platform_data;
struct sa_info *info;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 9dcbc684abd..71796137e97 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -69,7 +69,7 @@ static struct map_info scb2_map = {
};
static int region_fail;
-static int __devinit
+static int
scb2_fixup_mtd(struct mtd_info *mtd)
{
int i;
@@ -133,7 +133,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
#define CSB5_FCR 0x41
#define CSB5_FCR_DECODE_ALL 0x0e
-static int __devinit
+static int
scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
u8 reg;
@@ -197,7 +197,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
return 0;
}
-static void __devexit
+static void
scb2_flash_remove(struct pci_dev *dev)
{
if (!scb2_mtd)
@@ -231,7 +231,7 @@ static struct pci_driver scb2_flash_driver = {
.name = "Intel SCB2 BIOS Flash",
.id_table = scb2_flash_pci_ids,
.probe = scb2_flash_probe,
- .remove = __devexit_p(scb2_flash_remove),
+ .remove = scb2_flash_remove,
};
module_pci_driver(scb2_flash_driver);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 175e537b444..d467f3b11c9 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
return 0;
}
-static int __devinit uflash_probe(struct platform_device *op)
+static int uflash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -121,7 +121,7 @@ static int __devinit uflash_probe(struct platform_device *op)
return uflash_devinit(op, dp);
}
-static int __devexit uflash_remove(struct platform_device *op)
+static int uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
@@ -155,7 +155,7 @@ static struct platform_driver uflash_driver = {
.of_match_table = uflash_match,
},
.probe = uflash_probe,
- .remove = __devexit_p(uflash_remove),
+ .remove = uflash_remove,
};
module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 2e2b0945edc..6b223cfe92b 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -596,7 +596,7 @@ fail_name:
}
/* Handles very basic info about the flash, queries for details */
-static int __devinit vmu_connect(struct maple_device *mdev)
+static int vmu_connect(struct maple_device *mdev)
{
unsigned long test_flash_data, basic_flash_data;
int c, error;
@@ -690,7 +690,7 @@ fail_nomem:
return error;
}
-static void __devexit vmu_disconnect(struct maple_device *mdev)
+static void vmu_disconnect(struct maple_device *mdev)
{
struct memcard *card;
struct mdev_part *mpart;
@@ -772,7 +772,7 @@ static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
}
-static int __devinit probe_maple_vmu(struct device *dev)
+static int probe_maple_vmu(struct device *dev)
{
int error;
struct maple_device *mdev = to_maple_dev(dev);
@@ -789,7 +789,7 @@ static int __devinit probe_maple_vmu(struct device *dev)
return 0;
}
-static int __devexit remove_maple_vmu(struct device *dev)
+static int remove_maple_vmu(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
@@ -802,7 +802,7 @@ static struct maple_driver vmu_flash_driver = {
.drv = {
.name = "Dreamcast_visual_memory",
.probe = probe_maple_vmu,
- .remove = __devexit_p(remove_maple_vmu),
+ .remove = remove_maple_vmu,
},
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f1f06715d4e..5ad39bb5ab4 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,7 +32,6 @@
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <asm/uaccess.h>
#include "mtdcore.h"
@@ -121,16 +120,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
{
- if (kthread_should_stop())
- return 1;
-
return dev->bg_stop;
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
-static int mtd_blktrans_thread(void *arg)
+static void mtd_blktrans_work(struct work_struct *work)
{
- struct mtd_blktrans_dev *dev = arg;
+ struct mtd_blktrans_dev *dev =
+ container_of(work, struct mtd_blktrans_dev, work);
struct mtd_blktrans_ops *tr = dev->tr;
struct request_queue *rq = dev->rq;
struct request *req = NULL;
@@ -138,7 +135,7 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
- while (!kthread_should_stop()) {
+ while (1) {
int res;
dev->bg_stop = false;
@@ -156,15 +153,7 @@ static int mtd_blktrans_thread(void *arg)
background_done = !dev->bg_stop;
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop())
- set_current_state(TASK_RUNNING);
-
- spin_unlock_irq(rq->queue_lock);
- schedule();
- spin_lock_irq(rq->queue_lock);
- continue;
+ break;
}
spin_unlock_irq(rq->queue_lock);
@@ -185,8 +174,6 @@ static int mtd_blktrans_thread(void *arg)
__blk_end_request_all(req, -EIO);
spin_unlock_irq(rq->queue_lock);
-
- return 0;
}
static void mtd_blktrans_request(struct request_queue *rq)
@@ -199,10 +186,8 @@ static void mtd_blktrans_request(struct request_queue *rq)
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
- else {
- dev->bg_stop = true;
- wake_up_process(dev->thread);
- }
+ else
+ queue_work(dev->wq, &dev->work);
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -325,7 +310,7 @@ unlock:
return ret;
}
-static const struct block_device_operations mtd_blktrans_ops = {
+static const struct block_device_operations mtd_block_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
@@ -401,7 +386,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
- gd->fops = &mtd_blktrans_ops;
+ gd->fops = &mtd_block_ops;
if (tr->part_bits)
if (new->devnum < 26)
@@ -437,14 +422,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->queue = new->rq;
- /* Create processing thread */
- /* TODO: workqueue ? */
- new->thread = kthread_run(mtd_blktrans_thread, new,
- "%s%d", tr->name, new->mtd->index);
- if (IS_ERR(new->thread)) {
- ret = PTR_ERR(new->thread);
+ /* Create processing workqueue */
+ new->wq = alloc_workqueue("%s%d", 0, 0,
+ tr->name, new->mtd->index);
+ if (!new->wq)
goto error4;
- }
+ INIT_WORK(&new->work, mtd_blktrans_work);
+
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -484,9 +468,8 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
/* Stop new requests to arrive */
del_gendisk(old->disk);
-
- /* Stop the thread */
- kthread_stop(old->thread);
+ /* Stop workqueue. This will perform any pending request. */
+ destroy_workqueue(old->wq);
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f5b3f91fa1c..97bb8f6304d 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -271,7 +271,7 @@ static void find_next_position(struct mtdoops_context *cxt)
if (count[0] == 0xffffffff && count[1] == 0xffffffff)
mark_page_unused(cxt, page);
- if (count[0] == 0xffffffff)
+ if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
continue;
if (maxcount == 0xffffffff) {
maxcount = count[0];
@@ -289,14 +289,13 @@ static void find_next_position(struct mtdoops_context *cxt)
}
}
if (maxcount == 0xffffffff) {
- cxt->nextpage = 0;
- cxt->nextcount = 1;
- schedule_work(&cxt->work_erase);
- return;
+ cxt->nextpage = cxt->oops_pages - 1;
+ cxt->nextcount = 0;
+ }
+ else {
+ cxt->nextpage = maxpos;
+ cxt->nextcount = maxcount;
}
-
- cxt->nextpage = maxpos;
- cxt->nextcount = maxcount;
mtdoops_inc_counter(cxt);
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 531807dec6b..5819eb57521 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -50,16 +50,30 @@ config MTD_NAND_MUSEUM_IDS
of these chips were reused by later, larger chips.
config MTD_NAND_DENALI
- depends on PCI
+ tristate "Support Denali NAND controller"
+ help
+ Enable support for the Denali NAND controller. This should be
+ combined with either the PCI or platform drivers to provide device
+ registration.
+
+config MTD_NAND_DENALI_PCI
tristate "Support Denali NAND controller on Intel Moorestown"
+ depends on PCI && MTD_NAND_DENALI
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
-
+
+config MTD_NAND_DENALI_DT
+ tristate "Support Denali NAND controller as a DT device"
+ depends on HAVE_CLK && MTD_NAND_DENALI
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
config MTD_NAND_DENALI_SCRATCH_REG_ADDR
hex "Denali NAND size scratch register address"
default "0xFF108018"
- depends on MTD_NAND_DENALI
+ depends on MTD_NAND_DENALI_PCI
help
Some platforms place the NAND chip size in a scratch register
because (some versions of) the driver aren't able to automatically
@@ -433,6 +447,14 @@ config MTD_NAND_GPMI_NAND
block, such as SD card. So pay attention to it when you enable
the GPMI.
+config MTD_NAND_BCM47XXNFLASH
+ tristate "Support for NAND flash on BCM4706 BCMA bus"
+ depends on BCMA_NFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ NAND flash memories. For now only BCM4706 is supported.
+
config MTD_NAND_PLATFORM
tristate "Support for generic platform NAND driver"
depends on HAS_IOMEM
@@ -499,12 +521,6 @@ config MTD_NAND_MXC
This enables the driver for the NAND flash controller on the
MXC processors.
-config MTD_NAND_NOMADIK
- tristate "ST Nomadik 8815 NAND support"
- depends on ARCH_NOMADIK
- help
- Driver for the NAND flash controller on the Nomadik, with ECC.
-
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
depends on SUPERH || ARCH_SHMOBILE
@@ -546,7 +562,7 @@ config MTD_NAND_JZ4740
config MTD_NAND_FSMC
tristate "Support for NAND on ST Micros FSMC"
- depends on PLAT_SPEAR || PLAT_NOMADIK || MACH_U300
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
help
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6c7f2b3ca8a..d76d9120569 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
@@ -45,11 +47,11 @@ obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
-obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 9e7723aa7ac..f1d71cdc8aa 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -173,7 +173,7 @@ static const struct gpio _mandatory_gpio[] = {
/*
* Main initialization routine
*/
-static int __devinit ams_delta_init(struct platform_device *pdev)
+static int ams_delta_init(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -270,7 +270,7 @@ out_free:
/*
* Clean up routine
*/
-static int __devexit ams_delta_cleanup(struct platform_device *pdev)
+static int ams_delta_cleanup(struct platform_device *pdev)
{
void __iomem *io_base = platform_get_drvdata(pdev);
@@ -289,7 +289,7 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
static struct platform_driver ams_delta_nand_driver = {
.probe = ams_delta_init,
- .remove = __devexit_p(ams_delta_cleanup),
+ .remove = ams_delta_cleanup,
.driver = {
.name = "ams-delta-nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 92623ac2015..90bdca61c79 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -331,13 +331,13 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* 12-bits 20-bytes 21-bytes
* 24-bits 39-bytes 42-bytes
*/
-static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size)
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
{
int m = 12 + sector_size / 512;
return (m * cap + 7) / 8;
}
-static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
int oobsize, int ecc_len)
{
int i;
@@ -353,7 +353,7 @@ static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
oobsize - ecc_len - layout->oobfree[0].offset;
}
-static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
{
int table_size;
@@ -375,7 +375,7 @@ static void pmecc_data_free(struct atmel_nand_host *host)
kfree(host->pmecc_delta);
}
-static int __devinit pmecc_data_alloc(struct atmel_nand_host *host)
+static int pmecc_data_alloc(struct atmel_nand_host *host)
{
const int cap = host->pmecc_corr_cap;
@@ -724,6 +724,7 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
struct atmel_nand_host *host = nand_chip->priv;
int i, err_nbr, eccbytes;
uint8_t *buf_pos;
+ int total_err = 0;
eccbytes = nand_chip->ecc.bytes;
for (i = 0; i < eccbytes; i++)
@@ -751,12 +752,13 @@ normal_check:
pmecc_correct_data(mtd, buf_pos, ecc, i,
host->pmecc_bytes_per_sector, err_nbr);
mtd->ecc_stats.corrected += err_nbr;
+ total_err += err_nbr;
}
}
pmecc_stat >>= 1;
}
- return 0;
+ return total_err;
}
static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
@@ -768,6 +770,7 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t stat;
unsigned long end_time;
+ int bitflips = 0;
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
@@ -790,11 +793,14 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
}
stat = pmecc_readl_relaxed(host->ecc, ISR);
- if (stat != 0)
- if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
- return -EIO;
+ if (stat != 0) {
+ bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
+ if (bitflips < 0)
+ /* uncorrectable errors */
+ return 0;
+ }
- return 0;
+ return bitflips;
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -1206,7 +1212,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
}
#if defined(CONFIG_OF)
-static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
+static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
u32 val, table_offset;
@@ -1293,7 +1299,7 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
return 0;
}
#else
-static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
+static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
return -EINVAL;
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5c47b200045..217459d02b2 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -382,7 +382,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
while(!this->dev_ready(mtd));
}
-static int __devinit find_nand_cs(unsigned long nand_base)
+static int find_nand_cs(unsigned long nand_base)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
@@ -403,7 +403,7 @@ static int __devinit find_nand_cs(unsigned long nand_base)
return -ENODEV;
}
-static int __devinit au1550nd_probe(struct platform_device *pdev)
+static int au1550nd_probe(struct platform_device *pdev)
{
struct au1550nd_platdata *pd;
struct au1550nd_ctx *ctx;
@@ -491,7 +491,7 @@ out1:
return ret;
}
-static int __devexit au1550nd_remove(struct platform_device *pdev)
+static int au1550nd_remove(struct platform_device *pdev)
{
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -509,7 +509,7 @@ static struct platform_driver au1550nd_driver = {
.owner = THIS_MODULE,
},
.probe = au1550nd_probe,
- .remove = __devexit_p(au1550nd_remove),
+ .remove = au1550nd_remove,
};
module_platform_driver(au1550nd_driver);
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
new file mode 100644
index 00000000000..f05b119e134
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/Makefile
@@ -0,0 +1,4 @@
+bcm47xxnflash-y += main.o
+bcm47xxnflash-y += ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 00000000000..0bdb2ce4da7
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,22 @@
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+
+struct bcm47xxnflash {
+ struct bcma_drv_cc *cc;
+
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+
+ unsigned curr_command;
+ int curr_page_addr;
+ int curr_column;
+
+ u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
new file mode 100644
index 00000000000..2b8b05bec3d
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -0,0 +1,108 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxnflash.h"
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxnflash *b47n;
+ int err = 0;
+
+ b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
+ if (!b47n) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ b47n->nand_chip.priv = b47n;
+ b47n->mtd.owner = THIS_MODULE;
+ b47n->mtd.priv = &b47n->nand_chip; /* Required */
+ b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+ if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ err = bcm47xxnflash_ops_bcm4706_init(b47n);
+ } else {
+ pr_err("Device not supported\n");
+ err = -ENOTSUPP;
+ }
+ if (err) {
+ pr_err("Initialization failed: %d\n", err);
+ goto err_init;
+ }
+
+ err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ goto err_dev_reg;
+ }
+
+ return 0;
+
+err_dev_reg:
+err_init:
+ kfree(b47n);
+out:
+ return err;
+}
+
+static int __devexit bcm47xxnflash_remove(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+
+ if (nflash->mtd)
+ mtd_device_unregister(nflash->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+ .remove = __devexit_p(bcm47xxnflash_remove),
+ .driver = {
+ .name = "bcma_nflash",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bcm47xxnflash_init(void)
+{
+ int err;
+
+ /*
+ * Platform device "bcma_nflash" exists on SoCs and is registered very
+ * early, it won't be added during runtime (use platform_driver_probe).
+ */
+ err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
+ if (err)
+ pr_err("Failed to register serial flash driver: %d\n", err);
+
+ return err;
+}
+
+static void __exit bcm47xxnflash_exit(void)
+{
+ platform_driver_unregister(&bcm47xxnflash_driver);
+}
+
+module_init(bcm47xxnflash_init);
+module_exit(bcm47xxnflash_exit);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 00000000000..86c9a79b89b
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,413 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxnflash.h"
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown 164 retries as maxiumum. */
+#define NFLASH_READY_RETRIES 1000
+
+#define NFLASH_SECTOR_SIZE 512
+
+#define NCTL_CMD0 0x00010000
+#define NCTL_CMD1W 0x00080000
+#define NCTL_READ 0x00100000
+#define NCTL_WRITE 0x00200000
+#define NCTL_SPECADDR 0x01000000
+#define NCTL_READY 0x04000000
+#define NCTL_ERR 0x08000000
+#define NCTL_CSA 0x40000000
+#define NCTL_START 0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+ return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+ int i = 0;
+
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+ i = 0;
+ break;
+ }
+ }
+ if (i) {
+ pr_err("NFLASH control command not ready!\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+ int i;
+
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+ BCMA_CC_NFLASH_CTL_ERR) {
+ pr_err("Error on polling\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ pr_err("Polling timeout!\n");
+ return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ u32 ctlcode;
+ u32 *dest = (u32 *)buf;
+ int i;
+ int toread;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ while (len) {
+ /* We can read maximum of 0x200 bytes at once */
+ toread = min(len, 0x200);
+
+ /* Set page and column */
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to read */
+ ctlcode = NCTL_CSA | NCTL_CMD1W | 0x00040000 | 0x00020000 |
+ NCTL_CMD0;
+ ctlcode |= NAND_CMD_READSTART << 8;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+ return;
+ if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+ return;
+
+ /* Eventually read some data :) */
+ for (i = 0; i < toread; i += 4, dest++) {
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+ if (i == toread - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode))
+ return;
+ *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+ }
+
+ b47n->curr_column += toread;
+ len -= toread;
+ }
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+
+ u32 ctlcode;
+ const u32 *data = (u32 *)buf;
+ int i;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ for (i = 0; i < len; i += 4, data++) {
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+ if (i == len - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+ pr_err("%s ctl_cmd didn't work!\n", __func__);
+ return;
+ }
+ }
+
+ b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
+ int chip)
+{
+ return;
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
+ unsigned command, int column,
+ int page_addr)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 ctlcode;
+ int i;
+
+ if (column != -1)
+ b47n->curr_column = column;
+ if (page_addr != -1)
+ b47n->curr_page_addr = page_addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ pr_warn("Chip reset not implemented yet\n");
+ break;
+ case NAND_CMD_READID:
+ ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+ ctlcode |= NAND_CMD_READID;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+
+ /*
+ * Reading is specific, last one has to go without NCTL_CSA
+ * bit. We don't know how many reads NAND subsystem is going
+ * to perform, so cache everything.
+ */
+ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+ ctlcode = NCTL_CSA | NCTL_READ;
+ if (i == ARRAY_SIZE(b47n->id_data) - 1)
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+ b47n->id_data[i] =
+ bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+ & 0xFF;
+ }
+
+ break;
+ case NAND_CMD_STATUS:
+ ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("STATUS command error\n");
+ break;
+ case NAND_CMD_READ0:
+ break;
+ case NAND_CMD_READOOB:
+ if (page_addr != -1)
+ b47n->curr_column += mtd->writesize;
+ break;
+ case NAND_CMD_ERASE1:
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+ ctlcode = 0x00040000 | NCTL_CMD1W | NCTL_CMD0 |
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("ERASE1 failed\n");
+ break;
+ case NAND_CMD_ERASE2:
+ break;
+ case NAND_CMD_SEQIN:
+ /* Set page and column */
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to write */
+ ctlcode = 0x40000000 | 0x00040000 | 0x00020000 | 0x00010000;
+ ctlcode |= NAND_CMD_SEQIN;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("SEQIN failed\n");
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, 0x00010000 |
+ NAND_CMD_PAGEPROG))
+ pr_err("PAGEPROG failed\n");
+ if (bcm47xxnflash_ops_bcm4706_poll(cc))
+ pr_err("PAGEPROG not ready\n");
+ break;
+ default:
+ pr_err("Command 0x%X unsupported\n", command);
+ break;
+ }
+ b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 tmp = 0;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READID:
+ if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+ pr_err("Requested invalid id_data: %d\n",
+ b47n->curr_column);
+ return 0;
+ }
+ return b47n->id_data[b47n->curr_column++];
+ case NAND_CMD_STATUS:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+ return 0;
+ return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+ return tmp & 0xFF;
+ }
+
+ pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+ return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
+ uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_SEQIN:
+ bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+ int err;
+ u32 freq;
+ u16 clock;
+ u8 w0, w1, w2, w3, w4;
+
+ unsigned long chipsize; /* MiB */
+ u8 tbits, col_bits, col_size, row_bits, row_bsize;
+ u32 val;
+
+ b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+ b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+
+ /* Enable NAND flash access */
+ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ BCMA_CC_4706_FLASHSCFG_NF1);
+
+ /* Configure wait counters */
+ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+ freq = 100000000;
+ } else {
+ freq = bcma_chipco_pll_read(b47n->cc, 4);
+ freq = (freq * 0xFFF) >> 3;
+ freq = (freq * 25000000) >> 3;
+ }
+ clock = freq / 1000000;
+ w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+ w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+ w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+ (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+ /* Scan NAND */
+ err = nand_scan(&b47n->mtd, 1);
+ if (err) {
+ pr_err("Could not scan NAND flash: %d\n", err);
+ goto exit;
+ }
+
+ /* Configure FLASH */
+ chipsize = b47n->nand_chip.chipsize >> 20;
+ tbits = ffs(chipsize); /* find first bit set */
+ if (!tbits || tbits != fls(chipsize)) {
+ pr_err("Invalid flash size: 0x%lX\n", chipsize);
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+ col_bits = b47n->nand_chip.page_shift + 1;
+ col_size = (col_bits + 7) / 8;
+
+ row_bits = tbits - col_bits + 1;
+ row_bsize = (row_bits + 7) / 8;
+
+ val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+ if (err)
+ bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ ~BCMA_CC_4706_FLASHSCFG_NF1);
+ return err;
+}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index ab0caa74eb4..4271e948d1e 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -658,7 +658,7 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
/*
* Device management interface
*/
-static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
+static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
{
struct mtd_info *mtd = &info->mtd;
struct mtd_partition *parts = info->platform->partitions;
@@ -667,7 +667,7 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
return mtd_device_register(mtd, parts, nr);
}
-static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
+static int bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
@@ -725,7 +725,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
+static int bf5xx_nand_probe(struct platform_device *pdev)
{
struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
struct bf5xx_nand_info *info = NULL;
@@ -865,7 +865,7 @@ static int bf5xx_nand_resume(struct platform_device *dev)
/* driver device registration */
static struct platform_driver bf5xx_nand_driver = {
.probe = bf5xx_nand_probe,
- .remove = __devexit_p(bf5xx_nand_remove),
+ .remove = bf5xx_nand_remove,
.suspend = bf5xx_nand_suspend,
.resume = bf5xx_nand_resume,
.driver = {
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2bb7170502c..010d6126653 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -585,7 +585,7 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
}
/* F_2[X]/(X**6+X+1) */
-static unsigned short __devinit gf64_mul(u8 a, u8 b)
+static unsigned short gf64_mul(u8 a, u8 b)
{
u8 c;
unsigned int i;
@@ -604,7 +604,7 @@ static unsigned short __devinit gf64_mul(u8 a, u8 b)
}
/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
-static u16 __devinit gf4096_mul(u16 a, u16 b)
+static u16 gf4096_mul(u16 a, u16 b)
{
u8 ah, al, bh, bl, ch, cl;
@@ -619,14 +619,14 @@ static u16 __devinit gf4096_mul(u16 a, u16 b)
return (ch << 6) ^ cl;
}
-static int __devinit cafe_mul(int x)
+static int cafe_mul(int x)
{
if (x == 0)
return 1;
return gf4096_mul(x, 0xe01);
}
-static int __devinit cafe_nand_probe(struct pci_dev *pdev,
+static int cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mtd_info *mtd;
@@ -821,7 +821,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
return err;
}
-static void __devexit cafe_nand_remove(struct pci_dev *pdev)
+static void cafe_nand_remove(struct pci_dev *pdev)
{
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct cafe_priv *cafe = mtd->priv;
@@ -887,7 +887,7 @@ static struct pci_driver cafe_nand_pci_driver = {
.name = "CAFÉ NAND",
.id_table = cafe_nand_tbl,
.probe = cafe_nand_probe,
- .remove = __devexit_p(cafe_nand_remove),
+ .remove = cafe_nand_remove,
.resume = cafe_nand_resume,
};
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index adb6c3ef37f..2cdeab8bebc 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
this->ecc.hwctl = cs_enable_hwecc;
this->ecc.calculate = cs_calculate_ecc;
this->ecc.correct = nand_correct_data;
+ this->ecc.strength = 1;
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
@@ -247,8 +248,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
goto out_ior;
}
- this->ecc.strength = 1;
-
new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
cs553x_mtd[cs] = new_mtd;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 945047ad095..3502606f648 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -821,9 +821,16 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
- pdata->nr_parts);
-
+ if (pdata->parts)
+ ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata->parts, pdata->nr_parts);
+ else {
+ struct mtd_part_parser_data ppdata;
+
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
+ NULL, 0);
+ }
if (ret < 0)
goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index e706a237170..0c8bb6bf842 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -16,14 +16,12 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
-
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
@@ -89,13 +87,6 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
* format the bank into the proper bits for the controller */
#define BANK(x) ((x) << 24)
-/* List of platforms this NAND controller has be integrated into */
-static const struct pci_device_id denali_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
- { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
- { /* end: all zeroes */ }
-};
-
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -699,7 +690,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
if (comp_res == 0) {
/* timeout */
- printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
intr_status, irq_mask);
intr_status = 0;
@@ -1305,8 +1296,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
/* TODO: Read OOB data */
break;
default:
- printk(KERN_ERR ": unsupported command"
- " received 0x%x\n", cmd);
+ pr_err(": unsupported command received 0x%x\n", cmd);
break;
}
}
@@ -1425,107 +1415,48 @@ void denali_drv_init(struct denali_nand_info *denali)
denali->irq_status = 0;
}
-/* driver entry point */
-static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int denali_init(struct denali_nand_info *denali)
{
- int ret = -ENODEV;
- resource_size_t csr_base, mem_base;
- unsigned long csr_len, mem_len;
- struct denali_nand_info *denali;
-
- denali = kzalloc(sizeof(*denali), GFP_KERNEL);
- if (!denali)
- return -ENOMEM;
+ int ret;
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- goto failed_alloc_memery;
- }
-
- if (id->driver_data == INTEL_CE4100) {
+ if (denali->platform == INTEL_CE4100) {
/* Due to a silicon limitation, we can only support
* ONFI timing mode 1 and below.
*/
if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
- printk(KERN_ERR "Intel CE4100 only supports"
- " ONFI timing mode 1 or below\n");
- ret = -EINVAL;
- goto failed_enable_dev;
- }
- denali->platform = INTEL_CE4100;
- mem_base = pci_resource_start(dev, 0);
- mem_len = pci_resource_len(dev, 1);
- csr_base = pci_resource_start(dev, 1);
- csr_len = pci_resource_len(dev, 1);
- } else {
- denali->platform = INTEL_MRST;
- csr_base = pci_resource_start(dev, 0);
- csr_len = pci_resource_len(dev, 0);
- mem_base = pci_resource_start(dev, 1);
- mem_len = pci_resource_len(dev, 1);
- if (!mem_len) {
- mem_base = csr_base + csr_len;
- mem_len = csr_len;
+ pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
+ return -EINVAL;
}
}
/* Is 32-bit DMA supported? */
- ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
if (ret) {
- printk(KERN_ERR "Spectra: no usable DMA configuration\n");
- goto failed_enable_dev;
+ pr_err("Spectra: no usable DMA configuration\n");
+ return ret;
}
- denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
+ denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
DENALI_BUF_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
- dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
- goto failed_enable_dev;
- }
-
- pci_set_master(dev);
- denali->dev = &dev->dev;
- denali->mtd.dev.parent = &dev->dev;
-
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
- if (ret) {
- printk(KERN_ERR "Spectra: Unable to request memory regions\n");
- goto failed_dma_map;
- }
-
- denali->flash_reg = ioremap_nocache(csr_base, csr_len);
- if (!denali->flash_reg) {
- printk(KERN_ERR "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_req_regions;
- }
-
- denali->flash_mem = ioremap_nocache(mem_base, mem_len);
- if (!denali->flash_mem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- ret = -ENOMEM;
- goto failed_remap_reg;
+ if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+ dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ return -EIO;
}
-
+ denali->mtd.dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
/* denali_isr register is done after all the hardware
* initilization is finished*/
- if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
DENALI_NAND_NAME, denali)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- goto failed_remap_mem;
+ pr_err("Spectra: Unable to allocate IRQ\n");
+ return -ENODEV;
}
/* now that our ISR is registered, we can enable interrupts */
denali_set_intr_modes(denali, true);
-
- pci_set_drvdata(dev, denali);
-
denali->mtd.name = "denali-nand";
denali->mtd.owner = THIS_MODULE;
denali->mtd.priv = &denali->nand;
@@ -1549,8 +1480,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
*/
if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
ret = -ENODEV;
- printk(KERN_ERR "Spectra: device size not supported by this "
- "version of MTD.");
+ pr_err("Spectra: device size not supported by this version of MTD.");
goto failed_req_irq;
}
@@ -1602,8 +1532,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
ECC_8BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE))) {
- printk(KERN_ERR "Your NAND chip OOB is not large enough to"
- " contain 8bit ECC correction codes");
+ pr_err("Your NAND chip OOB is not large enough to \
+ contain 8bit ECC correction codes");
goto failed_req_irq;
} else {
denali->nand.ecc.strength = 8;
@@ -1655,56 +1585,24 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ret = mtd_device_register(&denali->mtd, NULL, 0);
if (ret) {
- dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
+ dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
ret);
goto failed_req_irq;
}
return 0;
failed_req_irq:
- denali_irq_cleanup(dev->irq, denali);
-failed_remap_mem:
- iounmap(denali->flash_mem);
-failed_remap_reg:
- iounmap(denali->flash_reg);
-failed_req_regions:
- pci_release_regions(dev);
-failed_dma_map:
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
-failed_enable_dev:
- pci_disable_device(dev);
-failed_alloc_memery:
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+
return ret;
}
+EXPORT_SYMBOL(denali_init);
/* driver exit point */
-static void denali_pci_remove(struct pci_dev *dev)
+void denali_remove(struct denali_nand_info *denali)
{
- struct denali_nand_info *denali = pci_get_drvdata(dev);
-
- nand_release(&denali->mtd);
-
- denali_irq_cleanup(dev->irq, denali);
-
- iounmap(denali->flash_reg);
- iounmap(denali->flash_mem);
- pci_release_regions(dev);
- pci_disable_device(dev);
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- pci_set_drvdata(dev, NULL);
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+ dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
}
-
-MODULE_DEVICE_TABLE(pci, denali_pci_ids);
-
-static struct pci_driver denali_pci_driver = {
- .name = DENALI_NAND_NAME,
- .id_table = denali_pci_ids,
- .probe = denali_pci_probe,
- .remove = denali_pci_remove,
-};
-
-module_pci_driver(denali_pci_driver);
+EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index fabb9d56b39..cec5712862c 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -466,6 +466,7 @@ struct nand_buf {
#define INTEL_CE4100 1
#define INTEL_MRST 2
+#define DT 3
struct denali_nand_info {
struct mtd_info mtd;
@@ -487,6 +488,7 @@ struct denali_nand_info {
uint32_t irq_status;
int irq_debug_array[32];
int idx;
+ int irq;
uint32_t devnum; /* represent how many nands connected */
uint32_t fwblks; /* represent how many blocks FW used */
@@ -496,4 +498,7 @@ struct denali_nand_info {
uint32_t max_banks;
};
+extern int denali_init(struct denali_nand_info *denali);
+extern void denali_remove(struct denali_nand_info *denali);
+
#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
new file mode 100644
index 00000000000..546f8cb5688
--- /dev/null
+++ b/drivers/mtd/nand/denali_dt.c
@@ -0,0 +1,167 @@
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+struct denali_dt {
+ struct denali_nand_info denali;
+ struct clk *clk;
+};
+
+static void __iomem *request_and_map(struct device *dev,
+ const struct resource *res)
+{
+ void __iomem *ptr;
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ "denali-dt")) {
+ dev_err(dev, "unable to request %s\n", res->name);
+ return NULL;
+ }
+
+ ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!res)
+ dev_err(dev, "ioremap_nocache of %s failed!", res->name);
+
+ return ptr;
+}
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+ { .compatible = "denali,denali-nand-dt" },
+ { /* sentinel */ }
+ };
+
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static u64 denali_dma_mask;
+
+static int denali_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *denali_reg, *nand_data;
+ struct denali_dt *dt;
+ struct denali_nand_info *denali;
+ int ret;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+ denali = &dt->denali;
+
+ denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
+ nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
+ if (!denali_reg || !nand_data) {
+ dev_err(&ofdev->dev, "resources not completely defined\n");
+ return -EINVAL;
+ }
+
+ denali->platform = DT;
+ denali->dev = &ofdev->dev;
+ denali->irq = platform_get_irq(ofdev, 0);
+ if (denali->irq < 0) {
+ dev_err(&ofdev->dev, "no irq defined\n");
+ return -ENXIO;
+ }
+
+ denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
+ if (!denali->flash_reg)
+ return -ENOMEM;
+
+ denali->flash_mem = request_and_map(&ofdev->dev, nand_data);
+ if (!denali->flash_mem)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(ofdev->dev.of_node,
+ "dma-mask", (u32 *)&denali_dma_mask)) {
+ denali->dev->dma_mask = &denali_dma_mask;
+ } else {
+ denali->dev->dma_mask = NULL;
+ }
+
+ dt->clk = clk_get(&ofdev->dev, NULL);
+ if (IS_ERR(dt->clk)) {
+ dev_err(&ofdev->dev, "no clk available\n");
+ return PTR_ERR(dt->clk);
+ }
+ clk_prepare_enable(dt->clk);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_disable_clk;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(dt->clk);
+ clk_put(dt->clk);
+
+ return ret;
+}
+
+static int denali_dt_remove(struct platform_device *ofdev)
+{
+ struct denali_dt *dt = platform_get_drvdata(ofdev);
+
+ denali_remove(&dt->denali);
+ clk_disable(dt->clk);
+ clk_put(dt->clk);
+
+ return 0;
+}
+
+static struct platform_driver denali_dt_driver = {
+ .probe = denali_dt_probe,
+ .remove = denali_dt_remove,
+ .driver = {
+ .name = "denali-nand-dt",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(denali_nand_dt_ids),
+ },
+};
+
+static int __init denali_init_dt(void)
+{
+ return platform_driver_register(&denali_dt_driver);
+}
+module_init(denali_init_dt);
+
+static void __exit denali_exit_dt(void)
+{
+ platform_driver_unregister(&denali_dt_driver);
+}
+module_exit(denali_exit_dt);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
new file mode 100644
index 00000000000..e3e46623b2b
--- /dev/null
+++ b/drivers/mtd/nand/denali_pci.c
@@ -0,0 +1,144 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand-pci"
+
+/* List of platforms this NAND controller has be integrated into */
+static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ pr_err("Spectra: pci_enable_device failed.\n");
+ goto failed_alloc_memery;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->irq = dev->irq;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ pr_err("Spectra: Unable to request memory regions\n");
+ goto failed_enable_dev;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ pr_err("Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_req_regions;
+ }
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ pr_err("Spectra: ioremap_nocache failed!");
+ ret = -ENOMEM;
+ goto failed_remap_reg;
+ }
+
+ ret = denali_init(denali);
+ if (ret)
+ goto failed_remap_mem;
+
+ pci_set_drvdata(dev, denali);
+
+ return 0;
+
+failed_remap_mem:
+ iounmap(denali->flash_mem);
+failed_remap_reg:
+ iounmap(denali->flash_reg);
+failed_req_regions:
+ pci_release_regions(dev);
+failed_enable_dev:
+ pci_disable_device(dev);
+failed_alloc_memery:
+ kfree(denali);
+
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ denali_remove(denali);
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ kfree(denali);
+}
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int denali_init_pci(void)
+{
+ pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
+ return pci_register_driver(&denali_pci_driver);
+}
+module_init(denali_init_pci);
+
+static void denali_exit_pci(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+module_exit(denali_exit_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 256eb30f618..81fa5784f98 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -53,8 +53,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
0xffffffff };
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 799da5d1c85..18fa4489e52 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -46,6 +46,25 @@
#include <linux/bitrev.h>
/*
+ * In "reliable mode" consecutive 2k pages are used in parallel (in some
+ * fashion) to store the same data. The data can be read back from the
+ * even-numbered pages in the normal manner; odd-numbered pages will appear to
+ * contain junk. Systems that boot from the docg4 typically write the secondary
+ * program loader (SPL) code in this mode. The SPL is loaded by the initial
+ * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
+ * to the reset vector address). This module parameter enables you to use this
+ * driver to write the SPL. When in this mode, no more than 2k of data can be
+ * written at a time, because the addresses do not increment in the normal
+ * manner, and the starting offset must be within an even-numbered 2k region;
+ * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
+ * 0x1a00, ... Reliable mode is a special case and should not be used unless
+ * you know what you're doing.
+ */
+static bool reliable_mode;
+module_param(reliable_mode, bool, 0);
+MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
+
+/*
* You'll want to ignore badblocks if you're reading a partition that contains
* data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
* it does not use mtd nand's method for marking bad blocks (using oob area).
@@ -113,6 +132,7 @@ struct docg4_priv {
#define DOCG4_SEQ_PAGEWRITE 0x16
#define DOCG4_SEQ_PAGEPROG 0x1e
#define DOCG4_SEQ_BLOCKERASE 0x24
+#define DOCG4_SEQ_SETMODE 0x45
/* DOC_FLASHCOMMAND register commands */
#define DOCG4_CMD_PAGE_READ 0x00
@@ -122,6 +142,8 @@ struct docg4_priv {
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOCG4_CMD_PAGEWRITE 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
+#define DOC_CMD_RELIABLE_MODE 0x22
#define DOC_CMD_RESET 0xff
/* DOC_POWERMODE register bits */
@@ -190,17 +212,20 @@ struct docg4_priv {
#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
/*
- * Oob bytes 0 - 6 are available to the user.
- * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
+ * Bytes 0, 1 are used as badblock marker.
+ * Bytes 2 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 oob bytes only.
+ * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
* Byte 15 (the last) is used by the driver as a "page written" flag.
*/
static struct nand_ecclayout docg4_oobinfo = {
.eccbytes = 9,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
- .oobavail = 7,
- .oobfree = { {0, 7} }
+ .oobavail = 5,
+ .oobfree = { {.offset = 2, .length = 5} }
};
/*
@@ -611,6 +636,14 @@ static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
dev_dbg(doc->dev,
"docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
sequence_reset(mtd);
+
+ if (unlikely(reliable_mode)) {
+ writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ }
+
writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
@@ -691,6 +724,15 @@ static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
break;
case NAND_CMD_SEQIN:
+ if (unlikely(reliable_mode)) {
+ uint16_t g4_page = g4_addr >> 16;
+
+ /* writes to odd-numbered 2k pages are invalid */
+ if (g4_page & 0x01)
+ dev_warn(doc->dev,
+ "invalid reliable mode address\n");
+ }
+
write_page_prologue(mtd, g4_addr);
/* hack for deferred write of oob bytes */
@@ -979,16 +1021,15 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
struct docg4_priv *doc = nand->priv;
uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
uint8_t *buf;
- int i, block, status;
+ int i, block;
+ __u32 eccfailed_stats = mtd->ecc_stats.failed;
buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
read_page_prologue(mtd, g4_addr);
- status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
- if (status)
- goto exit;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
/*
* If no memory-based bbt was created, exit. This will happen if module
@@ -1000,6 +1041,20 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
if (nand->bbt == NULL) /* no memory-based bbt */
goto exit;
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ /*
+ * Whoops, an ecc failure ocurred reading the factory bbt.
+ * It is stored redundantly, so we get another chance.
+ */
+ eccfailed_stats = mtd->ecc_stats.failed;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ dev_warn(doc->dev,
+ "The factory bbt could not be read!\n");
+ goto exit;
+ }
+ }
+
/*
* Parse factory bbt and update memory-based bbt. Factory bbt format is
* simple: one bit per block, block numbers increase left to right (msb
@@ -1019,7 +1074,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
}
exit:
kfree(buf);
- return status;
+ return 0;
}
static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index cc1480a5e4c..20657209a47 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -109,20 +109,6 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
};
/*
- * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
- * 1, so we have to adjust bad block pattern. This pattern should be used for
- * x8 chips only. So far hardware does not support x16 chips anyway.
- */
-static u8 scan_ff_pattern[] = { 0xff, };
-
-static struct nand_bbt_descr largepage_memorybased = {
- .options = 0,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern,
-};
-
-/*
* ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
* interfere with ECC positions, that's why we implement our own descriptors.
* OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
@@ -699,7 +685,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
&fsl_elbc_oob_lp_eccm1 :
&fsl_elbc_oob_lp_eccm0;
- chip->badblock_pattern = &largepage_memorybased;
}
} else {
dev_err(priv->dev,
@@ -814,7 +799,7 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
static DEFINE_MUTEX(fsl_elbc_nand_mutex);
-static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
{
struct fsl_lbc_regs __iomem *lbc;
struct fsl_elbc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 3551a99076b..ad6222627fe 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -389,7 +389,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
timing = IFC_FIR_OP_RBCD;
out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(timing << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -754,7 +754,7 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
/* READID */
out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -922,7 +922,7 @@ static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
static DEFINE_MUTEX(fsl_ifc_nand_mutex);
-static int __devinit fsl_ifc_nand_probe(struct platform_device *dev)
+static int fsl_ifc_nand_probe(struct platform_device *dev)
{
struct fsl_ifc_regs __iomem *ifc;
struct fsl_ifc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 45df542b9c6..5a8f5c4ce51 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -152,7 +152,7 @@ static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
fun_wait_rnb(fun);
}
-static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
+static int fun_chip_init(struct fsl_upm_nand *fun,
const struct device_node *upm_np,
const struct resource *io_res)
{
@@ -201,7 +201,7 @@ err:
return ret;
}
-static int __devinit fun_probe(struct platform_device *ofdev)
+static int fun_probe(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun;
struct resource io_res;
@@ -318,7 +318,7 @@ err1:
return ret;
}
-static int __devexit fun_remove(struct platform_device *ofdev)
+static int fun_remove(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
int i;
@@ -350,7 +350,7 @@ static struct platform_driver of_fun_driver = {
.of_match_table = of_fun_match,
},
.probe = fun_probe,
- .remove = __devexit_p(fun_remove),
+ .remove = fun_remove,
};
module_platform_driver(of_fun_driver);
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 38d26240d8b..1d7446434b0 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -361,7 +361,7 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
struct nand_chip *this = mtd->priv;
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- void *__iomem *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
unsigned int bank = host->bank;
if (ctrl & NAND_CTRL_CHANGE) {
@@ -383,13 +383,13 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
pc |= FSMC_ENABLE;
else
pc &= ~FSMC_ENABLE;
- writel(pc, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
}
mb();
if (cmd != NAND_CMD_NONE)
- writeb(cmd, this->IO_ADDR_W);
+ writeb_relaxed(cmd, this->IO_ADDR_W);
}
/*
@@ -426,14 +426,18 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (busw)
- writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(value | FSMC_DEVWID_16,
+ FSMC_NAND_REG(regs, bank, PC));
else
- writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(value | FSMC_DEVWID_8,
+ FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
FSMC_NAND_REG(regs, bank, PC));
- writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
- writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, COMM));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, ATTRIB));
}
/*
@@ -446,11 +450,11 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
FSMC_NAND_REG(regs, bank, PC));
}
@@ -470,7 +474,7 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
- if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
+ if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
break;
else
cond_resched();
@@ -481,25 +485,25 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
return -ETIMEDOUT;
}
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
ecc[3] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
ecc[4] = (uint8_t) (ecc_tmp >> 0);
ecc[5] = (uint8_t) (ecc_tmp >> 8);
ecc[6] = (uint8_t) (ecc_tmp >> 16);
ecc[7] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
ecc[8] = (uint8_t) (ecc_tmp >> 0);
ecc[9] = (uint8_t) (ecc_tmp >> 8);
ecc[10] = (uint8_t) (ecc_tmp >> 16);
ecc[11] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
ecc[12] = (uint8_t) (ecc_tmp >> 16);
return 0;
@@ -519,7 +523,7 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
uint32_t bank = host->bank;
uint32_t ecc_tmp;
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -601,7 +605,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_async_issue_pending(chan);
ret =
- wait_for_completion_interruptible_timeout(&host->dma_access_complete,
+ wait_for_completion_timeout(&host->dma_access_complete,
msecs_to_jiffies(3000));
if (ret <= 0) {
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -628,10 +632,10 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
- writel(p[i], chip->IO_ADDR_W);
+ writel_relaxed(p[i], chip->IO_ADDR_W);
} else {
for (i = 0; i < len; i++)
- writeb(buf[i], chip->IO_ADDR_W);
+ writeb_relaxed(buf[i], chip->IO_ADDR_W);
}
}
@@ -651,10 +655,10 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
- p[i] = readl(chip->IO_ADDR_R);
+ p[i] = readl_relaxed(chip->IO_ADDR_R);
} else {
for (i = 0; i < len; i++)
- buf[i] = readb(chip->IO_ADDR_R);
+ buf[i] = readb_relaxed(chip->IO_ADDR_R);
}
}
@@ -783,7 +787,7 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
uint32_t num_err, i;
uint32_t ecc1, ecc2, ecc3, ecc4;
- num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+ num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
/* no bit flipping */
if (likely(num_err == 0))
@@ -826,10 +830,10 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
* uint64_t array and error offset indexes are populated in err_idx
* array
*/
- ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
- ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
- ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
- ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
+ ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
err_idx[0] = (ecc1 >> 0) & 0x1FFF;
err_idx[1] = (ecc1 >> 13) & 0x1FFF;
@@ -860,7 +864,7 @@ static bool filter(struct dma_chan *chan, void *slave)
}
#ifdef CONFIG_OF
-static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -876,15 +880,13 @@ static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
return -EINVAL;
}
}
- of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
- of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
if (of_get_property(np, "nand-skip-bbtscan", NULL))
pdata->options = NAND_SKIP_BBTSCAN;
return 0;
}
#else
-static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
return -ENOSYS;
@@ -935,41 +937,28 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory data resourse\n");
- return -ENOENT;
- }
-
- host->data_pa = (dma_addr_t)res->start;
- host->data_va = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ host->data_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->data_va) {
dev_err(&pdev->dev, "data ioremap failed\n");
return -ENOMEM;
}
+ host->data_pa = (dma_addr_t)res->start;
- if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
- return -ENOENT;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ if (!res)
+ return -EINVAL;
- host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
- resource_size(res));
+ host->addr_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->addr_va) {
dev_err(&pdev->dev, "ale ioremap failed\n");
return -ENOMEM;
}
- if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
- return -ENOENT;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ if (!res)
+ return -EINVAL;
- host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
- resource_size(res));
+ host->cmd_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->cmd_va) {
dev_err(&pdev->dev, "ale ioremap failed\n");
return -ENOMEM;
@@ -979,14 +968,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
- return -ENOENT;
- }
-
- host->regs_va = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ host->regs_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->regs_va) {
dev_err(&pdev->dev, "regs ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index bc73bc5f271..e789e3f5171 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -90,14 +90,14 @@ static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
- writesb(this->IO_ADDR_W, buf, len);
+ iowrite8_rep(this->IO_ADDR_W, buf, len);
}
static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
- readsb(this->IO_ADDR_R, buf, len);
+ ioread8_rep(this->IO_ADDR_R, buf, len);
}
static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
@@ -106,7 +106,7 @@ static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
- writesw(this->IO_ADDR_W, buf, len>>1);
+ iowrite16_rep(this->IO_ADDR_W, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
@@ -121,7 +121,7 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
- readsw(this->IO_ADDR_R, buf, len>>1);
+ ioread16_rep(this->IO_ADDR_R, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
@@ -134,7 +134,11 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ return gpio_get_value(gpiomtd->plat.gpio_rdy);
+
+ return 1;
}
#ifdef CONFIG_OF
@@ -227,7 +231,7 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
return platform_get_resource(pdev, IORESOURCE_MEM, 1);
}
-static int __devexit gpio_nand_remove(struct platform_device *dev)
+static int gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
struct resource *res;
@@ -252,7 +256,8 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
gpio_free(gpiomtd->plat.gpio_nce);
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_free(gpiomtd->plat.gpio_nwp);
- gpio_free(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ gpio_free(gpiomtd->plat.gpio_rdy);
kfree(gpiomtd);
@@ -277,7 +282,7 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
return ptr;
}
-static int __devinit gpio_nand_probe(struct platform_device *dev)
+static int gpio_nand_probe(struct platform_device *dev)
{
struct gpiomtd *gpiomtd;
struct nand_chip *this;
@@ -336,10 +341,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
if (ret)
goto err_cle;
gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
- ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
- if (ret)
- goto err_rdy;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
+ ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
+ if (ret)
+ goto err_rdy;
+ gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ }
this->IO_ADDR_W = this->IO_ADDR_R;
@@ -386,7 +393,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- gpio_free(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ gpio_free(gpiomtd->plat.gpio_rdy);
err_rdy:
gpio_free(gpiomtd->plat.gpio_cle);
err_cle:
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 3502accd4bc..d84699c7968 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -18,7 +18,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/mtd/gpmi-nand.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -166,6 +165,15 @@ int gpmi_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+ * See later BCH reset for explanation of MX23 handling
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+
/* Choose NAND mode. */
writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d79696b2f19..5cd141f7bfc 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
@@ -33,6 +32,12 @@
#include <linux/of_mtd.h>
#include "gpmi-nand.h"
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
+
/* add our owner bbt descriptor */
static uint8_t scan_ff_pattern[] = { 0xff };
static struct nand_bbt_descr gpmi_bbt_descr = {
@@ -222,7 +227,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
ret = dma_map_sg(this->dev, sgl, 1, dr);
if (ret == 0)
- pr_err("map failed.\n");
+ pr_err("DMA mapping failed.\n");
this->direct_dma_map_ok = false;
}
@@ -314,7 +319,7 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
return 0;
}
-static int __devinit
+static int
acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
{
struct platform_device *pdev = this->pdev;
@@ -355,7 +360,7 @@ static void release_register_block(struct gpmi_nand_data *this)
res->bch_regs = NULL;
}
-static int __devinit
+static int
acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
@@ -422,7 +427,7 @@ static void release_dma_channels(struct gpmi_nand_data *this)
}
}
-static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
+static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
struct resource *r_dma;
@@ -456,7 +461,7 @@ static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
if (!dma_chan) {
- pr_err("dma_request_channel failed.\n");
+ pr_err("Failed to request DMA channel.\n");
goto acquire_err;
}
@@ -487,7 +492,7 @@ static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
};
-static int __devinit gpmi_get_clks(struct gpmi_nand_data *this)
+static int gpmi_get_clks(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
char **extra_clks = NULL;
@@ -533,7 +538,7 @@ err_clock:
return -ENOMEM;
}
-static int __devinit acquire_resources(struct gpmi_nand_data *this)
+static int acquire_resources(struct gpmi_nand_data *this)
{
struct pinctrl *pinctrl;
int ret;
@@ -583,7 +588,7 @@ static void release_resources(struct gpmi_nand_data *this)
release_dma_channels(this);
}
-static int __devinit init_hardware(struct gpmi_nand_data *this)
+static int init_hardware(struct gpmi_nand_data *this)
{
int ret;
@@ -625,7 +630,8 @@ static int read_page_prepare(struct gpmi_nand_data *this,
length, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dest_phys)) {
if (alt_size < length) {
- pr_err("Alternate buffer is too small\n");
+ pr_err("%s, Alternate buffer is too small\n",
+ __func__);
return -ENOMEM;
}
goto map_failed;
@@ -675,7 +681,8 @@ static int send_page_prepare(struct gpmi_nand_data *this,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, source_phys)) {
if (alt_size < length) {
- pr_err("Alternate buffer is too small\n");
+ pr_err("%s, Alternate buffer is too small\n",
+ __func__);
return -ENOMEM;
}
goto map_failed;
@@ -763,7 +770,7 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
error_alloc:
gpmi_free_dma_buffer(this);
- pr_err("allocate DMA buffer ret!!\n");
+ pr_err("Error allocating DMA buffers!\n");
return -ENOMEM;
}
@@ -1474,7 +1481,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
/* Set up the NFC geometry which is used by BCH. */
ret = bch_set_geometry(this);
if (ret) {
- pr_err("set geometry ret : %d\n", ret);
+ pr_err("Error setting BCH geometry : %d\n", ret);
return ret;
}
@@ -1535,7 +1542,7 @@ static void gpmi_nfc_exit(struct gpmi_nand_data *this)
gpmi_free_dma_buffer(this);
}
-static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
+static int gpmi_nfc_init(struct gpmi_nand_data *this)
{
struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = &this->nand;
@@ -1618,7 +1625,7 @@ static const struct of_device_id gpmi_nand_id_table[] = {
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
-static int __devinit gpmi_nand_probe(struct platform_device *pdev)
+static int gpmi_nand_probe(struct platform_device *pdev)
{
struct gpmi_nand_data *this;
const struct of_device_id *of_id;
@@ -1668,7 +1675,7 @@ exit_acquire_resources:
return ret;
}
-static int __devexit gpmi_nand_remove(struct platform_device *pdev)
+static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
@@ -1685,7 +1692,7 @@ static struct platform_driver gpmi_nand_driver = {
.of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
- .remove = __devexit_p(gpmi_nand_remove),
+ .remove = gpmi_nand_remove,
.id_table = gpmi_ids,
};
module_platform_driver(gpmi_nand_driver);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 7ac25c1e58f..3d93a5e3909 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -130,7 +130,6 @@ struct gpmi_nand_data {
/* System Interface */
struct device *dev;
struct platform_device *pdev;
- struct gpmi_nand_platform_data *pdata;
/* Resources */
struct resources resources;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 100b6775e17..8d415f014e1 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -316,13 +316,17 @@ err:
return ret;
}
-static inline void jz_nand_iounmap_resource(struct resource *res, void __iomem *base)
+static inline void jz_nand_iounmap_resource(struct resource *res,
+ void __iomem *base)
{
iounmap(base);
release_mem_region(res->start, resource_size(res));
}
-static int __devinit jz_nand_detect_bank(struct platform_device *pdev, struct jz_nand *nand, unsigned char bank, size_t chipnr, uint8_t *nand_maf_id, uint8_t *nand_dev_id) {
+static int jz_nand_detect_bank(struct platform_device *pdev,
+ struct jz_nand *nand, unsigned char bank,
+ size_t chipnr, uint8_t *nand_maf_id,
+ uint8_t *nand_dev_id) {
int ret;
int gpio;
char gpio_name[9];
@@ -400,7 +404,7 @@ notfound_gpio:
return ret;
}
-static int __devinit jz_nand_probe(struct platform_device *pdev)
+static int jz_nand_probe(struct platform_device *pdev)
{
int ret;
struct jz_nand *nand;
@@ -541,7 +545,7 @@ err_free:
return ret;
}
-static int __devexit jz_nand_remove(struct platform_device *pdev)
+static int jz_nand_remove(struct platform_device *pdev)
{
struct jz_nand *nand = platform_get_drvdata(pdev);
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
@@ -573,7 +577,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
static struct platform_driver jz_nand_driver = {
.probe = jz_nand_probe,
- .remove = __devexit_p(jz_nand_remove),
+ .remove = jz_nand_remove,
.driver = {
.name = "jz4740-nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index c29b7ac1f6a..f182befa736 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -655,7 +655,7 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
/*
* Probe for NAND controller
*/
-static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
@@ -845,7 +845,7 @@ err_exit1:
/*
* Remove NAND device
*/
-static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+static int lpc32xx_nand_remove(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
@@ -907,7 +907,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove = __devexit_p(lpc32xx_nand_remove),
+ .remove = lpc32xx_nand_remove,
.resume = lpc32xx_nand_resume,
.suspend = lpc32xx_nand_suspend,
.driver = {
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 32409c45d47..030b78c6289 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -755,7 +755,7 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
/*
* Probe for NAND controller
*/
-static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
@@ -949,7 +949,7 @@ err_exit1:
/*
* Remove NAND device.
*/
-static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+static int lpc32xx_nand_remove(struct platform_device *pdev)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
@@ -1021,7 +1021,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove = __devexit_p(lpc32xx_nand_remove),
+ .remove = lpc32xx_nand_remove,
.resume = lpc32xx_nand_resume,
.suspend = lpc32xx_nand_suspend,
.driver = {
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index f776c8577b8..3c9cdcbc4cb 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
iounmap(prv->csreg);
}
-static int __devinit mpc5121_nfc_probe(struct platform_device *op)
+static int mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *rootnode, *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -827,7 +827,7 @@ error:
return retval;
}
-static int __devexit mpc5121_nfc_remove(struct platform_device *op)
+static int mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
@@ -841,14 +841,14 @@ static int __devexit mpc5121_nfc_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+static struct of_device_id mpc5121_nfc_match[] = {
{ .compatible = "fsl,mpc5121-nfc", },
{},
};
static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
- .remove = __devexit_p(mpc5121_nfc_remove),
+ .remove = mpc5121_nfc_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 022dcdc256f..45204e41a02 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -266,7 +266,8 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
+static const char const *part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", NULL };
static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
{
@@ -1378,7 +1379,7 @@ static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
}
#endif
-static int __devinit mxcnd_probe(struct platform_device *pdev)
+static int mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
@@ -1556,12 +1557,13 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
return 0;
escan:
- clk_disable_unprepare(host->clk);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
return err;
}
-static int __devexit mxcnd_remove(struct platform_device *pdev)
+static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
@@ -1580,7 +1582,7 @@ static struct platform_driver mxcnd_driver = {
},
.id_table = mxcnd_devtype,
.probe = mxcnd_probe,
- .remove = __devexit_p(mxcnd_remove),
+ .remove = mxcnd_remove,
};
module_platform_driver(mxcnd_driver);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1a03b7f673c..8323ac991ad 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -93,8 +93,7 @@ static struct nand_ecclayout nand_oob_128 = {
.length = 78} }
};
-static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
- int new_state);
+static int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
@@ -130,15 +129,12 @@ static int check_offs_len(struct mtd_info *mtd,
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
*
- * Deselect, release chip lock and wake up anyone waiting on the device.
+ * Release chip lock and wake up anyone waiting on the device.
*/
static void nand_release_device(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- /* De-select the NAND device */
- chip->select_chip(mtd, -1);
-
/* Release the controller and the chip */
spin_lock(&chip->controller->lock);
chip->controller->active = NULL;
@@ -160,7 +156,7 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
}
/**
- * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* @mtd: MTD device structure
*
@@ -303,7 +299,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
if (getchip) {
chipnr = (int)(ofs >> chip->chip_shift);
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
@@ -333,8 +329,10 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
i++;
} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
- if (getchip)
+ if (getchip) {
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
+ }
return res;
}
@@ -383,7 +381,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_oob_ops ops;
loff_t wr_ofs = ofs;
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
ops.datbuf = NULL;
ops.oobbuf = buf;
@@ -492,7 +490,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- unsigned long timeo = jiffies + 2;
+ unsigned long timeo = jiffies + msecs_to_jiffies(20);
/* 400ms timeout */
if (in_interrupt() || oops_in_progress)
@@ -750,15 +748,15 @@ static void panic_nand_get_device(struct nand_chip *chip,
/**
* nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
* @mtd: MTD device structure
* @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
static int
-nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
+nand_get_device(struct mtd_info *mtd, int new_state)
{
+ struct nand_chip *chip = mtd->priv;
spinlock_t *lock = &chip->controller->lock;
wait_queue_head_t *wq = &chip->controller->wq;
DECLARE_WAITQUEUE(wait, current);
@@ -865,6 +863,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
return status;
}
@@ -899,7 +899,7 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
- if (status & 0x01) {
+ if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
@@ -932,7 +932,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ofs + len == mtd->size)
len -= mtd->erasesize;
- nand_get_device(chip, mtd, FL_UNLOCKING);
+ nand_get_device(mtd, FL_UNLOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -950,6 +950,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -981,7 +982,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
- nand_get_device(chip, mtd, FL_LOCKING);
+ nand_get_device(mtd, FL_LOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -1004,7 +1005,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
- if (status & 0x01) {
+ if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
@@ -1014,6 +1015,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -1550,6 +1552,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
}
+ chip->select_chip(mtd, -1);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
@@ -1577,11 +1580,10 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
struct mtd_oob_ops ops;
int ret;
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
ops.len = len;
ops.datbuf = buf;
ops.oobbuf = NULL;
@@ -1804,6 +1806,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
}
+ chip->select_chip(mtd, -1);
ops->oobretlen = ops->ooblen - readlen;
@@ -1827,7 +1830,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
@@ -1839,7 +1841,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -2186,8 +2188,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
- return -EIO;
+ if (nand_check_wp(mtd)) {
+ ret = -EIO;
+ goto err_out;
+ }
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
@@ -2199,8 +2203,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
/* Don't allow multipage oob writes with offset */
- if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
- return -EINVAL;
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
while (1) {
int bytes = mtd->writesize;
@@ -2251,6 +2257,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
ops->retlen = ops->len - writelen;
if (unlikely(oob))
ops->oobretlen = ops->ooblen;
+
+err_out:
+ chip->select_chip(mtd, -1);
return ret;
}
@@ -2302,11 +2311,10 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
struct mtd_oob_ops ops;
int ret;
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
@@ -2377,8 +2385,10 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
+ if (nand_check_wp(mtd)) {
+ chip->select_chip(mtd, -1);
return -EROFS;
+ }
/* Invalidate the page cache, if we write to the cached page */
if (page == chip->pagebuf)
@@ -2391,6 +2401,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
else
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+ chip->select_chip(mtd, -1);
+
if (status)
return status;
@@ -2408,7 +2420,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
@@ -2420,7 +2431,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -2513,7 +2524,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
return -EINVAL;
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_ERASING);
+ nand_get_device(mtd, FL_ERASING);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -2623,6 +2634,7 @@ erase_exit:
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
/* Deselect and wake up anyone waiting on the device */
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
/* Do call back function */
@@ -2658,12 +2670,10 @@ erase_exit:
*/
static void nand_sync(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_SYNCING);
+ nand_get_device(mtd, FL_SYNCING);
/* Release it and go back */
nand_release_device(mtd);
}
@@ -2749,9 +2759,7 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
*/
static int nand_suspend(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
- return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
+ return nand_get_device(mtd, FL_PM_SUSPENDED);
}
/**
@@ -2849,6 +2857,8 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
+ /* ONFI need to be probed in 8 bits mode */
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
@@ -2913,7 +2923,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
*
* Check if an ID string is repeated within a given sequence of bytes at
* specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
- * period of 2). This is a helper function for nand_id_len(). Returns non-zero
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
* if the repetition has a period of @period; otherwise, returns zero.
*/
static int nand_id_has_period(u8 *id_data, int arrlen, int period)
@@ -3242,11 +3252,15 @@ ident_done:
break;
}
- /*
- * Check, if buswidth is correct. Hardware drivers should set
- * chip correct!
- */
- if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
+ chip->options |= busw;
+ nand_set_defaults(chip, busw);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
pr_info("NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
@@ -3285,10 +3299,10 @@ ident_done:
chip->cmdfunc = nand_command_lp;
pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
- " page size: %d, OOB size: %d\n",
+ " %dMiB, page size: %d, OOB size: %d\n",
*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name,
- mtd->writesize, mtd->oobsize);
+ (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
return type;
}
@@ -3327,6 +3341,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return PTR_ERR(type);
}
+ chip->select_chip(mtd, -1);
+
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
chip->select_chip(mtd, i);
@@ -3336,8 +3352,11 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd))
+ nand_dev_id != chip->read_byte(mtd)) {
+ chip->select_chip(mtd, -1);
break;
+ }
+ chip->select_chip(mtd, -1);
}
if (i > 1)
pr_info("%d NAND chips detected\n", i);
@@ -3596,9 +3615,6 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Initialize state */
chip->state = FL_READY;
- /* De-select the device */
- chip->select_chip(mtd, -1);
-
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a932c485eb0..818b65c85d1 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -42,6 +42,8 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -105,7 +107,6 @@ static char *weakblocks = NULL;
static char *weakpages = NULL;
static unsigned int bitflips = 0;
static char *gravepages = NULL;
-static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
@@ -130,7 +131,6 @@ module_param(weakblocks, charp, 0400);
module_param(weakpages, charp, 0400);
module_param(bitflips, uint, 0400);
module_param(gravepages, charp, 0400);
-module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
@@ -162,7 +162,6 @@ MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (z
MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be read only twice before failing");
-MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
@@ -286,6 +285,11 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
#define NS_MAX_HELD_PAGES 16
+struct nandsim_debug_info {
+ struct dentry *dfs_root;
+ struct dentry *dfs_wear_report;
+};
+
/*
* A union to represent flash memory contents and flash buffer.
*/
@@ -365,6 +369,8 @@ struct nandsim {
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
+
+ struct nandsim_debug_info dbg;
};
/*
@@ -442,11 +448,123 @@ static LIST_HEAD(grave_pages);
static unsigned long *erase_block_wear = NULL;
static unsigned int wear_eb_count = 0;
static unsigned long total_wear = 0;
-static unsigned int rptwear_cnt = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
+static int nandsim_debugfs_show(struct seq_file *m, void *private)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+
+ /* Output wear report */
+ seq_printf(m, "Total numbers of erases: %lu\n", tot);
+ seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
+ seq_printf(m, "Average number of erases: %lu\n", avg);
+ seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+ seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+
+ return 0;
+}
+
+static int nandsim_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nandsim_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dfs_fops = {
+ .open = nandsim_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * nandsim_debugfs_create - initialize debugfs
+ * @dev: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int nandsim_debugfs_create(struct nandsim *dev)
+{
+ struct nandsim_debug_info *dbg = &dev->dbg;
+ struct dentry *dent;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dent = debugfs_create_dir("nandsim", NULL);
+ if (IS_ERR_OR_NULL(dent)) {
+ int err = dent ? -ENODEV : PTR_ERR(dent);
+
+ NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
+ err);
+ return err;
+ }
+ dbg->dfs_root = dent;
+
+ dent = debugfs_create_file("wear_report", S_IRUSR,
+ dbg->dfs_root, dev, &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dbg->dfs_wear_report = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(dbg->dfs_root);
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ return err;
+}
+
+/**
+ * nandsim_debugfs_remove - destroy all debugfs files
+ */
+static void nandsim_debugfs_remove(struct nandsim *ns)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ns->dbg.dfs_root);
+}
+
/*
* Allocate array of page pointers, create slab allocation for an array
* and initialize the array by NULL pointers.
@@ -911,8 +1029,6 @@ static int setup_wear_reporting(struct mtd_info *mtd)
{
size_t mem;
- if (!rptwear)
- return 0;
wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
@@ -929,64 +1045,18 @@ static int setup_wear_reporting(struct mtd_info *mtd)
static void update_wear(unsigned int erase_block_no)
{
- unsigned long wmin = -1, wmax = 0, avg;
- unsigned long deciles[10], decile_max[10], tot = 0;
- unsigned int i;
-
if (!erase_block_wear)
return;
total_wear += 1;
+ /*
+ * TODO: Notify this through a debugfs entry,
+ * instead of showing an error message.
+ */
if (total_wear == 0)
NS_ERR("Erase counter total overflow\n");
erase_block_wear[erase_block_no] += 1;
if (erase_block_wear[erase_block_no] == 0)
NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
- rptwear_cnt += 1;
- if (rptwear_cnt < rptwear)
- return;
- rptwear_cnt = 0;
- /* Calc wear stats */
- for (i = 0; i < wear_eb_count; ++i) {
- unsigned long wear = erase_block_wear[i];
- if (wear < wmin)
- wmin = wear;
- if (wear > wmax)
- wmax = wear;
- tot += wear;
- }
- for (i = 0; i < 9; ++i) {
- deciles[i] = 0;
- decile_max[i] = (wmax * (i + 1) + 5) / 10;
- }
- deciles[9] = 0;
- decile_max[9] = wmax;
- for (i = 0; i < wear_eb_count; ++i) {
- int d;
- unsigned long wear = erase_block_wear[i];
- for (d = 0; d < 10; ++d)
- if (wear <= decile_max[d]) {
- deciles[d] += 1;
- break;
- }
- }
- avg = tot / wear_eb_count;
- /* Output wear report */
- NS_INFO("*** Wear Report ***\n");
- NS_INFO("Total numbers of erases: %lu\n", tot);
- NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
- NS_INFO("Average number of erases: %lu\n", avg);
- NS_INFO("Maximum number of erases: %lu\n", wmax);
- NS_INFO("Minimum number of erases: %lu\n", wmin);
- for (i = 0; i < 10; ++i) {
- unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
- if (from > decile_max[i])
- continue;
- NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
- from,
- decile_max[i],
- deciles[i]);
- }
- NS_INFO("*** End of Wear Report ***\n");
}
/*
@@ -1397,10 +1467,7 @@ int do_read_error(struct nandsim *ns, int num)
unsigned int page_no = ns->regs.row;
if (read_error(page_no)) {
- int i;
- memset(ns->buf.byte, 0xFF, num);
- for (i = 0; i < num; ++i)
- ns->buf.byte[i] = random32();
+ prandom_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
@@ -2330,6 +2397,9 @@ static int __init ns_init_module(void)
if ((retval = setup_wear_reporting(nsmtd)) != 0)
goto err_exit;
+ if ((retval = nandsim_debugfs_create(nand)) != 0)
+ goto err_exit;
+
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
@@ -2369,6 +2439,7 @@ static void __exit ns_cleanup_module(void)
struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
int i;
+ nandsim_debugfs_remove(ns);
free_nandsim(ns); /* Free nandsim private resources */
nand_release(nsmtd); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 5fd3f010e3a..8e148f1478f 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -197,7 +197,7 @@ err:
return ret;
}
-static int __devinit ndfc_probe(struct platform_device *ofdev)
+static int ndfc_probe(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc;
const __be32 *reg;
@@ -256,7 +256,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
return 0;
}
-static int __devexit ndfc_remove(struct platform_device *ofdev)
+static int ndfc_remove(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
@@ -279,7 +279,7 @@ static struct platform_driver ndfc_driver = {
.of_match_table = ndfc_match,
},
.probe = ndfc_probe,
- .remove = __devexit_p(ndfc_remove),
+ .remove = ndfc_remove,
};
module_platform_driver(ndfc_driver);
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
deleted file mode 100644
index 9ee0c4edfac..00000000000
--- a/drivers/mtd/nand/nomadik_nand.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * drivers/mtd/nand/nomadik_nand.c
- *
- * Overview:
- * Driver for on-board NAND flash on Nomadik Platforms
- *
- * Copyright © 2007 STMicroelectronics Pvt. Ltd.
- * Author: Sachin Verma <sachin.verma@st.com>
- *
- * Copyright © 2009 Alessandro Rubini
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/platform_data/mtd-nomadik-nand.h>
-#include <mach/fsmc.h>
-
-#include <mtd/mtd-abi.h>
-
-struct nomadik_nand_host {
- struct mtd_info mtd;
- struct nand_chip nand;
- void __iomem *data_va;
- void __iomem *cmd_va;
- void __iomem *addr_va;
- struct nand_bbt_descr *bbt_desc;
-};
-
-static struct nand_ecclayout nomadik_ecc_layout = {
- .eccbytes = 3 * 4,
- .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
- 0x02, 0x03, 0x04,
- 0x12, 0x13, 0x14,
- 0x22, 0x23, 0x24,
- 0x32, 0x33, 0x34},
- /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
- .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
-};
-
-static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
-{
- /* No need to enable hw ecc, it's on by default */
-}
-
-static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *nand = mtd->priv;
- struct nomadik_nand_host *host = nand->priv;
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, host->cmd_va);
- else
- writeb(cmd, host->addr_va);
-}
-
-static int nomadik_nand_probe(struct platform_device *pdev)
-{
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
- struct nomadik_nand_host *host;
- struct mtd_info *mtd;
- struct nand_chip *nand;
- struct resource *res;
- int ret = 0;
-
- /* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "Failed to allocate device structure.\n");
- return -ENOMEM;
- }
-
- /* Call the client's init function, if any */
- if (pdata->init)
- ret = pdata->init();
- if (ret < 0) {
- dev_err(&pdev->dev, "Init function failed\n");
- goto err;
- }
-
- /* ioremap three regions */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->addr_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->data_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->cmd_va = ioremap(res->start, resource_size(res));
-
- if (!host->addr_va || !host->data_va || !host->cmd_va) {
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- /* Link all private pointers */
- mtd = &host->mtd;
- nand = &host->nand;
- mtd->priv = nand;
- nand->priv = host;
-
- host->mtd.owner = THIS_MODULE;
- nand->IO_ADDR_R = host->data_va;
- nand->IO_ADDR_W = host->data_va;
- nand->cmd_ctrl = nomadik_cmd_ctrl;
-
- /*
- * This stanza declares ECC_HW but uses soft routines. It's because
- * HW claims to make the calculation but not the correction. However,
- * I haven't managed to get the desired data out of it until now.
- */
- nand->ecc.mode = NAND_ECC_SOFT;
- nand->ecc.layout = &nomadik_ecc_layout;
- nand->ecc.hwctl = nomadik_ecc_control;
- nand->ecc.size = 512;
- nand->ecc.bytes = 3;
-
- nand->options = pdata->options;
-
- /*
- * Scan to find existence of the device
- */
- if (nand_scan(&host->mtd, 1)) {
- ret = -ENXIO;
- goto err_unmap;
- }
-
- mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
-
- platform_set_drvdata(pdev, host);
- return 0;
-
- err_unmap:
- if (host->cmd_va)
- iounmap(host->cmd_va);
- if (host->data_va)
- iounmap(host->data_va);
- if (host->addr_va)
- iounmap(host->addr_va);
- err:
- kfree(host);
- return ret;
-}
-
-/*
- * Clean up routine
- */
-static int nomadik_nand_remove(struct platform_device *pdev)
-{
- struct nomadik_nand_host *host = platform_get_drvdata(pdev);
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
-
- if (pdata->exit)
- pdata->exit();
-
- if (host) {
- nand_release(&host->mtd);
- iounmap(host->cmd_va);
- iounmap(host->data_va);
- iounmap(host->addr_va);
- kfree(host);
- }
- return 0;
-}
-
-static int nomadik_nand_suspend(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- int ret = 0;
- if (host)
- ret = mtd_suspend(&host->mtd);
- return ret;
-}
-
-static int nomadik_nand_resume(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- if (host)
- mtd_resume(&host->mtd);
- return 0;
-}
-
-static const struct dev_pm_ops nomadik_nand_pm_ops = {
- .suspend = nomadik_nand_suspend,
- .resume = nomadik_nand_resume,
-};
-
-static struct platform_driver nomadik_nand_driver = {
- .probe = nomadik_nand_probe,
- .remove = nomadik_nand_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "nomadik_nand",
- .pm = &nomadik_nand_pm_ops,
- },
-};
-
-module_platform_driver(nomadik_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
-MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 94dc46bc118..a6191198d25 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -246,7 +246,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit nuc900_nand_probe(struct platform_device *pdev)
+static int nuc900_nand_probe(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
@@ -317,7 +317,7 @@ fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit nuc900_nand_remove(struct platform_device *pdev)
+static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
@@ -340,7 +340,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev)
static struct platform_driver nuc900_nand_driver = {
.probe = nuc900_nand_probe,
- .remove = __devexit_p(nuc900_nand_remove),
+ .remove = nuc900_nand_remove,
.driver = {
.name = "nuc900-fmi",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5c8978e9024..0002d5e94f0 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -27,7 +27,6 @@
#include <linux/bch.h>
#endif
-#include <plat-omap/dma-omap.h>
#include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
@@ -1324,7 +1323,7 @@ static void omap3_free_bch(struct mtd_info *mtd)
}
#endif /* CONFIG_MTD_NAND_OMAP_BCH */
-static int __devinit omap_nand_probe(struct platform_device *pdev)
+static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index aefaf8cd31e..cd72b9299f6 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -194,7 +194,7 @@ no_res:
return ret;
}
-static int __devexit orion_nand_remove(struct platform_device *pdev)
+static int orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
@@ -223,7 +223,7 @@ static struct of_device_id orion_nand_of_match_table[] = {
#endif
static struct platform_driver orion_nand_driver = {
- .remove = __devexit_p(orion_nand_remove),
+ .remove = orion_nand_remove,
.driver = {
.name = "orion_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 1440e51cedc..5a67082c07e 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,7 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
-static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
+static int pasemi_nand_probe(struct platform_device *ofdev)
{
struct pci_dev *pdev;
struct device_node *np = ofdev->dev.of_node;
@@ -184,7 +184,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
return err;
}
-static int __devexit pasemi_nand_remove(struct platform_device *ofdev)
+static int pasemi_nand_remove(struct platform_device *ofdev)
{
struct nand_chip *chip;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index a47ee68a0cf..c004566a9ad 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -28,7 +28,7 @@ static const char *part_probe_types[] = { "cmdlinepart", NULL };
/*
* Probe for the NAND device.
*/
-static int __devinit plat_nand_probe(struct platform_device *pdev)
+static int plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = pdev->dev.platform_data;
struct mtd_part_parser_data ppdata;
@@ -134,7 +134,7 @@ out_free:
/*
* Remove a NAND device.
*/
-static int __devexit plat_nand_remove(struct platform_device *pdev)
+static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = pdev->dev.platform_data;
@@ -160,7 +160,7 @@ MODULE_DEVICE_TABLE(of, plat_nand_match);
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = __devexit_p(plat_nand_remove),
+ .remove = plat_nand_remove,
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 295e4bedad9..df954b4dcba 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -730,11 +730,14 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
- if (set)
+ if (set) {
mtd->mtd.name = set->name;
- return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+ return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
set->partitions, set->nr_partitions);
+ }
+
+ return -ENODEV;
}
/**
@@ -879,7 +882,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
if (chip->ecc.mode != NAND_ECC_HW)
return;
- /* change the behaviour depending on wether we are using
+ /* change the behaviour depending on whether we are using
* the large or small page nand device */
if (chip->page_shift > 10) {
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index f48ac5d80bb..57b3971c9c0 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -23,11 +23,18 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -106,6 +113,84 @@ static void wait_completion(struct sh_flctl *flctl)
writeb(0x0, FLTRCR(flctl));
}
+static void flctl_dma_complete(void *param)
+{
+ struct sh_flctl *flctl = param;
+
+ complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+ if (flctl->chan_fifo0_rx) {
+ dma_release_channel(flctl->chan_fifo0_rx);
+ flctl->chan_fifo0_rx = NULL;
+ }
+ if (flctl->chan_fifo0_tx) {
+ dma_release_channel(flctl->chan_fifo0_tx);
+ flctl->chan_fifo0_tx = NULL;
+ }
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+ dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ struct platform_device *pdev = flctl->pdev;
+ struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+ return;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_fifo0_tx);
+ dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+ flctl->chan_fifo0_tx);
+
+ if (!flctl->chan_fifo0_tx)
+ return;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.slave_id = pdata->slave_id_fifo0_tx;
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_fifo0_rx);
+ dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+ flctl->chan_fifo0_rx);
+
+ if (!flctl->chan_fifo0_rx)
+ goto err;
+
+ cfg.slave_id = pdata->slave_id_fifo0_rx;
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
+ ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ init_completion(&flctl->dma_complete);
+
+ return;
+
+err:
+ flctl_release_dma(flctl);
+}
+
static void set_addr(struct mtd_info *mtd, int column, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -225,7 +310,7 @@ static enum flctl_ecc_res_t wait_recfifo_ready
for (i = 0; i < 3; i++) {
uint8_t org;
- int index;
+ unsigned int index;
data = readl(ecc_reg[i]);
@@ -261,6 +346,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
timeout_error(flctl, __func__);
}
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+ int len, enum dma_data_direction dir)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan;
+ enum dma_transfer_direction tr_dir;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie = -EINVAL;
+ uint32_t reg;
+ int ret;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = flctl->chan_fifo0_rx;
+ tr_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = flctl->chan_fifo0_tx;
+ tr_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+ if (dma_addr)
+ desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+ tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (desc) {
+ reg = readl(FLINTDMACR(flctl));
+ reg |= DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ desc->callback = flctl_dma_complete;
+ desc->callback_param = flctl;
+ cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(chan);
+ } else {
+ /* DMA failed, fall back to PIO */
+ flctl_release_dma(flctl);
+ dev_warn(&flctl->pdev->dev,
+ "DMA failed, falling back to PIO\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret =
+ wait_for_completion_timeout(&flctl->dma_complete,
+ msecs_to_jiffies(3000));
+
+ if (ret <= 0) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ }
+
+out:
+ reg = readl(FLINTDMACR(flctl));
+ reg &= ~DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+ /* ret > 0 is success */
+ return ret;
+}
+
static void read_datareg(struct sh_flctl *flctl, int offset)
{
unsigned long data;
@@ -279,11 +428,20 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
len_4align = (rlen + 3) / 4;
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_rx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+ goto convert; /* DMA success */
+
+ /* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_rfifo_ready(flctl);
buf[i] = readl(FLDTFIFO(flctl));
- buf[i] = be32_to_cpu(buf[i]);
}
+
+convert:
+ for (i = 0; i < len_4align; i++)
+ buf[i] = be32_to_cpu(buf[i]);
}
static enum flctl_ecc_res_t read_ecfiforeg
@@ -305,28 +463,39 @@ static enum flctl_ecc_res_t read_ecfiforeg
return res;
}
-static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
{
int i, len_4align;
- unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
- void *fifo_addr = (void *)FLDTFIFO(flctl);
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
for (i = 0; i < len_4align; i++) {
wait_wfifo_ready(flctl);
- writel(cpu_to_be32(data[i]), fifo_addr);
+ writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
}
}
-static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
{
int i, len_4align;
- unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_tx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+ return; /* DMA success */
+
+ /* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_wecfifo_ready(flctl);
- writel(cpu_to_be32(data[i]), FLECFIFO(flctl));
+ writel(buf[i], FLECFIFO(flctl));
}
}
@@ -750,41 +919,35 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- memcpy(&flctl->done_buff[index], buf, len);
+ memcpy(&flctl->done_buff[flctl->index], buf, len);
flctl->index += len;
}
static uint8_t flctl_read_byte(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
uint8_t data;
- data = flctl->done_buff[index];
+ data = flctl->done_buff[flctl->index];
flctl->index++;
return data;
}
static uint16_t flctl_read_word(struct mtd_info *mtd)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- uint16_t data;
- uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
- data = *buf;
- flctl->index += 2;
- return data;
+ flctl->index += 2;
+ return *buf;
}
static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- memcpy(buf, &flctl->done_buff[index], len);
+ memcpy(buf, &flctl->done_buff[flctl->index], len);
flctl->index += len;
}
@@ -858,7 +1021,74 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit flctl_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+struct flctl_soc_config {
+ unsigned long flcmncr_val;
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+ .has_hwecc = 1,
+ .use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+ { .compatible = "renesas,shmobile-flctl-sh7372",
+ .data = &flctl_sh7372_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ const struct of_device_id *match;
+ struct flctl_soc_config *config;
+ struct sh_flctl_platform_data *pdata;
+ struct device_node *dn = dev->of_node;
+ int ret;
+
+ match = of_match_device(of_flctl_match, dev);
+ if (match)
+ config = (struct flctl_soc_config *)match->data;
+ else {
+ dev_err(dev, "%s: no OF configuration attached\n", __func__);
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "%s: failed to allocate config data\n", __func__);
+ return NULL;
+ }
+
+ /* set SoC specific options */
+ pdata->flcmncr_val = config->flcmncr_val;
+ pdata->has_hwecc = config->has_hwecc;
+ pdata->use_holden = config->use_holden;
+
+ /* parse user defined options */
+ ret = of_get_nand_bus_width(dn);
+ if (ret == 16)
+ pdata->flcmncr_val |= SEL_16BIT;
+ else if (ret != 8) {
+ dev_err(dev, "%s: invalid bus width\n", __func__);
+ return NULL;
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+#define of_flctl_match NULL
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static int flctl_probe(struct platform_device *pdev)
{
struct resource *res;
struct sh_flctl *flctl;
@@ -867,12 +1097,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
struct sh_flctl_platform_data *pdata;
int ret = -ENXIO;
int irq;
-
- pdata = pdev->dev.platform_data;
- if (pdata == NULL) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
+ struct mtd_part_parser_data ppdata = {};
flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
if (!flctl) {
@@ -904,6 +1129,17 @@ static int __devinit flctl_probe(struct platform_device *pdev)
goto err_flste;
}
+ if (pdev->dev.of_node)
+ pdata = flctl_parse_dt(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no setup data defined\n");
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+
platform_set_drvdata(pdev, flctl);
flctl_mtd = &flctl->mtd;
nand = &flctl->chip;
@@ -932,6 +1168,8 @@ static int __devinit flctl_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ flctl_setup_dma(flctl);
+
ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err_chip;
@@ -944,12 +1182,16 @@ static int __devinit flctl_probe(struct platform_device *pdev)
if (ret)
goto err_chip;
- mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
+ pdata->nr_parts);
return 0;
err_chip:
+ flctl_release_dma(flctl);
pm_runtime_disable(&pdev->dev);
+err_pdata:
free_irq(irq, flctl);
err_flste:
iounmap(flctl->reg);
@@ -958,10 +1200,11 @@ err_iomap:
return ret;
}
-static int __devexit flctl_remove(struct platform_device *pdev)
+static int flctl_remove(struct platform_device *pdev)
{
struct sh_flctl *flctl = platform_get_drvdata(pdev);
+ flctl_release_dma(flctl);
nand_release(&flctl->mtd);
pm_runtime_disable(&pdev->dev);
free_irq(platform_get_irq(pdev, 0), flctl);
@@ -976,6 +1219,7 @@ static struct platform_driver flctl_driver = {
.driver = {
.name = "sh_flctl",
.owner = THIS_MODULE,
+ .of_match_table = of_flctl_match,
},
};
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 3421e3762a5..127bc427182 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -106,7 +106,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
/*
* Main initialization routine
*/
-static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
+static int sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *r;
@@ -205,7 +205,7 @@ err_get_res:
/*
* Clean up routine
*/
-static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
+static int sharpsl_nand_remove(struct platform_device *pdev)
{
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
@@ -228,7 +228,7 @@ static struct platform_driver sharpsl_nand_driver = {
.owner = THIS_MODULE,
},
.probe = sharpsl_nand_probe,
- .remove = __devexit_p(sharpsl_nand_remove),
+ .remove = sharpsl_nand_remove,
};
module_platform_driver(sharpsl_nand_driver);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index f3f28fafbf7..09dde7d2717 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -140,7 +140,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
/*
* Probe for the NAND device.
*/
-static int __devinit socrates_nand_probe(struct platform_device *ofdev)
+static int socrates_nand_probe(struct platform_device *ofdev)
{
struct socrates_nand_host *host;
struct mtd_info *mtd;
@@ -220,7 +220,7 @@ out:
/*
* Remove a NAND device.
*/
-static int __devexit socrates_nand_remove(struct platform_device *ofdev)
+static int socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
struct mtd_info *mtd = &host->mtd;
@@ -251,7 +251,7 @@ static struct platform_driver socrates_nand_driver = {
.of_match_table = socrates_nand_match,
},
.probe = socrates_nand_probe,
- .remove = __devexit_p(socrates_nand_remove),
+ .remove = socrates_nand_remove,
};
module_platform_driver(socrates_nand_driver);
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index d9127e2ed80..dbd3aa574ea 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -71,7 +71,10 @@ static int parse_ofpart_partitions(struct mtd_info *master,
(*pparts)[i].name = (char *)partname;
if (of_get_property(pp, "read-only", &len))
- (*pparts)[i].mask_flags = MTD_WRITEABLE;
+ (*pparts)[i].mask_flags |= MTD_WRITEABLE;
+
+ if (of_get_property(pp, "lock", &len))
+ (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
i++;
}
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 1c4f97c63e6..9f11562f849 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -35,7 +35,7 @@ struct onenand_info {
struct onenand_chip onenand;
};
-static int __devinit generic_onenand_probe(struct platform_device *pdev)
+static int generic_onenand_probe(struct platform_device *pdev)
{
struct onenand_info *info;
struct onenand_platform_data *pdata = pdev->dev.platform_data;
@@ -88,7 +88,7 @@ out_free_info:
return err;
}
-static int __devexit generic_onenand_remove(struct platform_device *pdev)
+static int generic_onenand_remove(struct platform_device *pdev)
{
struct onenand_info *info = platform_get_drvdata(pdev);
struct resource *res = pdev->resource;
@@ -112,7 +112,7 @@ static struct platform_driver generic_onenand_driver = {
.owner = THIS_MODULE,
},
.probe = generic_onenand_probe,
- .remove = __devexit_p(generic_onenand_remove),
+ .remove = generic_onenand_remove,
};
module_platform_driver(generic_onenand_driver);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 99f96e19ebe..065f3fe02a2 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -41,7 +41,7 @@
#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/gpio.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
@@ -630,7 +630,7 @@ static int omap2_onenand_disable(struct mtd_info *mtd)
return ret;
}
-static int __devinit omap2_onenand_probe(struct platform_device *pdev)
+static int omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
struct omap2_onenand *c;
@@ -799,7 +799,7 @@ err_kfree:
return r;
}
-static int __devexit omap2_onenand_remove(struct platform_device *pdev)
+static int omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -822,7 +822,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = __devexit_p(omap2_onenand_remove),
+ .remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 8e4b3f2742b..33f2a8fb8df 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -1053,7 +1053,7 @@ onenand_fail:
return err;
}
-static int __devexit s3c_onenand_remove(struct platform_device *pdev)
+static int s3c_onenand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
@@ -1130,7 +1130,7 @@ static struct platform_driver s3c_onenand_driver = {
},
.id_table = s3c_onenand_driver_ids,
.probe = s3c_onenand_probe,
- .remove = __devexit_p(s3c_onenand_remove),
+ .remove = s3c_onenand_remove,
};
module_platform_driver(s3c_onenand_driver);
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c
index cc8d62cb280..207bf9a9972 100644
--- a/drivers/mtd/tests/mtd_nandbiterrs.c
+++ b/drivers/mtd/tests/mtd_nandbiterrs.c
@@ -39,6 +39,9 @@
* this program; see the file COPYING. If not, write to the Free Software
* Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -47,8 +50,6 @@
#include <linux/mtd/nand.h>
#include <linux/slab.h>
-#define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA)
-
static int dev;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -103,7 +104,7 @@ static int erase_block(void)
struct erase_info ei;
loff_t addr = eraseblock * mtd->erasesize;
- msg("erase_block\n");
+ pr_info("erase_block\n");
memset(&ei, 0, sizeof(struct erase_info));
ei.mtd = mtd;
@@ -112,7 +113,7 @@ static int erase_block(void)
err = mtd_erase(mtd, &ei);
if (err || ei.state == MTD_ERASE_FAILED) {
- msg("error %d while erasing\n", err);
+ pr_err("error %d while erasing\n", err);
if (!err)
err = -EIO;
return err;
@@ -128,11 +129,11 @@ static int write_page(int log)
size_t written;
if (log)
- msg("write_page\n");
+ pr_info("write_page\n");
err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
if (err || written != mtd->writesize) {
- msg("error: write failed at %#llx\n", (long long)offset);
+ pr_err("error: write failed at %#llx\n", (long long)offset);
if (!err)
err = -EIO;
}
@@ -147,7 +148,7 @@ static int rewrite_page(int log)
struct mtd_oob_ops ops;
if (log)
- msg("rewrite page\n");
+ pr_info("rewrite page\n");
ops.mode = MTD_OPS_RAW; /* No ECC */
ops.len = mtd->writesize;
@@ -160,7 +161,7 @@ static int rewrite_page(int log)
err = mtd_write_oob(mtd, offset, &ops);
if (err || ops.retlen != mtd->writesize) {
- msg("error: write_oob failed (%d)\n", err);
+ pr_err("error: write_oob failed (%d)\n", err);
if (!err)
err = -EIO;
}
@@ -177,7 +178,7 @@ static int read_page(int log)
struct mtd_ecc_stats oldstats;
if (log)
- msg("read_page\n");
+ pr_info("read_page\n");
/* Saving last mtd stats */
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
@@ -187,7 +188,7 @@ static int read_page(int log)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
- msg("error: read failed at %#llx\n", (long long)offset);
+ pr_err("error: read failed at %#llx\n", (long long)offset);
if (err >= 0)
err = -EIO;
}
@@ -201,11 +202,11 @@ static int verify_page(int log)
unsigned i, errs = 0;
if (log)
- msg("verify_page\n");
+ pr_info("verify_page\n");
for (i = 0; i < mtd->writesize; i++) {
if (rbuffer[i] != hash(i+seed)) {
- msg("Error: page offset %u, expected %02x, got %02x\n",
+ pr_err("Error: page offset %u, expected %02x, got %02x\n",
i, hash(i+seed), rbuffer[i]);
errs++;
}
@@ -230,13 +231,13 @@ static int insert_biterror(unsigned byte)
for (bit = 7; bit >= 0; bit--) {
if (CBIT(wbuffer[byte], bit)) {
BCLR(wbuffer[byte], bit);
- msg("Inserted biterror @ %u/%u\n", byte, bit);
+ pr_info("Inserted biterror @ %u/%u\n", byte, bit);
return 0;
}
}
byte++;
}
- msg("biterror: Failed to find a '1' bit\n");
+ pr_err("biterror: Failed to find a '1' bit\n");
return -EIO;
}
@@ -248,7 +249,7 @@ static int incremental_errors_test(void)
unsigned i;
unsigned errs_per_subpage = 0;
- msg("incremental biterrors test\n");
+ pr_info("incremental biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
@@ -265,9 +266,9 @@ static int incremental_errors_test(void)
err = read_page(1);
if (err > 0)
- msg("Read reported %d corrected bit errors\n", err);
+ pr_info("Read reported %d corrected bit errors\n", err);
if (err < 0) {
- msg("After %d biterrors per subpage, read reported error %d\n",
+ pr_err("After %d biterrors per subpage, read reported error %d\n",
errs_per_subpage, err);
err = 0;
goto exit;
@@ -275,11 +276,11 @@ static int incremental_errors_test(void)
err = verify_page(1);
if (err) {
- msg("ECC failure, read data is incorrect despite read success\n");
+ pr_err("ECC failure, read data is incorrect despite read success\n");
goto exit;
}
- msg("Successfully corrected %d bit errors per subpage\n",
+ pr_info("Successfully corrected %d bit errors per subpage\n",
errs_per_subpage);
for (i = 0; i < subcount; i++) {
@@ -311,7 +312,7 @@ static int overwrite_test(void)
memset(bitstats, 0, sizeof(bitstats));
- msg("overwrite biterrors test\n");
+ pr_info("overwrite biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
@@ -329,18 +330,18 @@ static int overwrite_test(void)
err = read_page(0);
if (err >= 0) {
if (err >= MAXBITS) {
- msg("Implausible number of bit errors corrected\n");
+ pr_info("Implausible number of bit errors corrected\n");
err = -EIO;
break;
}
bitstats[err]++;
if (err > max_corrected) {
max_corrected = err;
- msg("Read reported %d corrected bit errors\n",
+ pr_info("Read reported %d corrected bit errors\n",
err);
}
} else { /* err < 0 */
- msg("Read reported error %d\n", err);
+ pr_info("Read reported error %d\n", err);
err = 0;
break;
}
@@ -348,7 +349,7 @@ static int overwrite_test(void)
err = verify_page(0);
if (err) {
bitstats[max_corrected] = opno;
- msg("ECC failure, read data is incorrect despite read success\n");
+ pr_info("ECC failure, read data is incorrect despite read success\n");
break;
}
@@ -357,9 +358,9 @@ static int overwrite_test(void)
/* At this point bitstats[0] contains the number of ops with no bit
* errors, bitstats[1] the number of ops with 1 bit error, etc. */
- msg("Bit error histogram (%d operations total):\n", opno);
+ pr_info("Bit error histogram (%d operations total):\n", opno);
for (i = 0; i < max_corrected; i++)
- msg("Page reads with %3d corrected bit errors: %d\n",
+ pr_info("Page reads with %3d corrected bit errors: %d\n",
i, bitstats[i]);
exit:
@@ -370,36 +371,36 @@ static int __init mtd_nandbiterrs_init(void)
{
int err = 0;
- msg("\n");
- msg("==================================================\n");
- msg("MTD device: %d\n", dev);
+ printk("\n");
+ printk(KERN_INFO "==================================================\n");
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- msg("error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
goto exit_mtddev;
}
if (mtd->type != MTD_NANDFLASH) {
- msg("this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
err = -ENODEV;
goto exit_nand;
}
- msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
+ pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
(unsigned long long)mtd->size, mtd->erasesize,
mtd->writesize, mtd->oobsize);
subsize = mtd->writesize >> mtd->subpage_sft;
subcount = mtd->writesize / subsize;
- msg("Device uses %d subpages of %d bytes\n", subcount, subsize);
+ pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
offset = page_offset * mtd->writesize;
eraseblock = mtd_div_by_eb(offset, mtd);
- msg("Using page=%u, offset=%llu, eraseblock=%u\n",
+ pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
page_offset, offset, eraseblock);
wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
@@ -432,8 +433,8 @@ static int __init mtd_nandbiterrs_init(void)
goto exit_error;
err = -EIO;
- msg("finished successfully.\n");
- msg("==================================================\n");
+ pr_info("finished successfully.\n");
+ printk(KERN_INFO "==================================================\n");
exit_error:
kfree(rbuffer);
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index b437fa42507..1eee264509a 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -264,13 +266,13 @@ static int nand_ecc_test_run(const size_t size)
correct_data, size);
if (err) {
- pr_err("mtd_nandecctest: not ok - %s-%zd\n",
+ pr_err("not ok - %s-%zd\n",
nand_ecc_test[i].name, size);
dump_data_ecc(error_data, error_ecc,
correct_data, correct_ecc, size);
break;
}
- pr_info("mtd_nandecctest: ok - %s-%zd\n",
+ pr_info("ok - %s-%zd\n",
nand_ecc_test[i].name, size);
}
error:
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index ed9b62827f1..e827fa8cd84 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,8 +30,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_oobtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -80,13 +80,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
+ pr_err("some erase error occurred at EB %d\n", ebnum);
return -EIO;
}
@@ -98,7 +97,7 @@ static int erase_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -107,7 +106,7 @@ static int erase_whole_device(void)
return err;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
return 0;
}
@@ -141,9 +140,9 @@ static int write_eraseblock(int ebnum)
ops.oobbuf = writebuf;
err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: writeoob failed at %#llx\n",
+ pr_err("error: writeoob failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "error: use_len %d, use_offset %d\n",
+ pr_err("error: use_len %d, use_offset %d\n",
use_len, use_offset);
errcnt += 1;
return err ? err : -1;
@@ -160,7 +159,7 @@ static int write_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -168,10 +167,10 @@ static int write_whole_device(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
return 0;
}
@@ -194,17 +193,17 @@ static int verify_eraseblock(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
@@ -221,29 +220,28 @@ static int verify_eraseblock(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
- printk(PRINT_PREF "error: readoob failed at "
- "%#llx\n", (long long)addr);
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf + use_offset, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at "
- "%#llx\n", (long long)addr);
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many "
- "errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
for (k = 0; k < use_offset; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -251,12 +249,12 @@ static int verify_eraseblock(int ebnum)
for (k = use_offset + use_len;
k < mtd->ecclayout->oobavail; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -286,17 +284,17 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
@@ -309,7 +307,7 @@ static int verify_all_eraseblocks(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -317,10 +315,10 @@ static int verify_all_eraseblocks(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -331,7 +329,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -341,18 +339,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kmalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -368,22 +366,22 @@ static int __init mtd_oobtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -392,7 +390,7 @@ static int __init mtd_oobtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -401,12 +399,12 @@ static int __init mtd_oobtest_init(void)
err = -ENOMEM;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -420,7 +418,7 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* First test: write all OOB, read it back and verify */
- printk(PRINT_PREF "test 1 of 5\n");
+ pr_info("test 1 of 5\n");
err = erase_whole_device();
if (err)
@@ -440,7 +438,7 @@ static int __init mtd_oobtest_init(void)
* Second test: write all OOB, a block at a time, read it back and
* verify.
*/
- printk(PRINT_PREF "test 2 of 5\n");
+ pr_info("test 2 of 5\n");
err = erase_whole_device();
if (err)
@@ -453,7 +451,7 @@ static int __init mtd_oobtest_init(void)
/* Check all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -461,16 +459,16 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
/*
* Third test: write OOB at varying offsets and lengths, read it back
* and verify.
*/
- printk(PRINT_PREF "test 3 of 5\n");
+ pr_info("test 3 of 5\n");
err = erase_whole_device();
if (err)
@@ -503,7 +501,7 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* Fourth test: try to write off end of device */
- printk(PRINT_PREF "test 4 of 5\n");
+ pr_info("test 4 of 5\n");
err = erase_whole_device();
if (err)
@@ -522,14 +520,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to start write past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to start write past end of OOB\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can write past end of OOB\n");
+ pr_err("error: can write past end of OOB\n");
errcnt += 1;
}
@@ -542,19 +540,19 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to start read past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to start read past end of OOB\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can read past end of OOB\n");
+ pr_err("error: can read past end of OOB\n");
errcnt += 1;
}
if (bbt[ebcnt - 1])
- printk(PRINT_PREF "skipping end of device tests because last "
+ pr_info("skipping end of device tests because last "
"block is bad\n");
else {
/* Attempt to write off end of device */
@@ -566,14 +564,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
@@ -586,14 +584,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
@@ -610,14 +608,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
@@ -630,20 +628,20 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
}
/* Fifth test: write / read across block boundaries */
- printk(PRINT_PREF "test 5 of 5\n");
+ pr_info("test 5 of 5\n");
/* Erase all eraseblocks */
err = erase_whole_device();
@@ -652,7 +650,7 @@ static int __init mtd_oobtest_init(void)
/* Write all eraseblocks */
simple_srand(11);
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
int pg;
@@ -674,17 +672,16 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock "
- "%u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
addr += mtd->writesize;
}
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(11);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
@@ -702,28 +699,28 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
goto out;
}
}
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(writebuf);
kfree(readbuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 252ddb092fb..f93a76f8811 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,8 +30,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_pagetest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -79,12 +79,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -102,7 +102,7 @@ static int write_eraseblock(int ebnum)
cond_resched();
err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
return err;
@@ -131,7 +131,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err;
}
@@ -139,7 +139,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)(addrn - bufsize));
return err;
}
@@ -148,12 +148,12 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
break;
}
if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -166,7 +166,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err;
}
@@ -174,7 +174,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)(addrn - bufsize));
return err;
}
@@ -183,14 +183,14 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err;
}
memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
set_random_data(boundary + pgsize, pgsize);
if (memcmp(twopages, boundary, bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -206,10 +206,10 @@ static int crosstest(void)
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
- printk(PRINT_PREF "crosstest\n");
+ pr_info("crosstest\n");
pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
if (!pp1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
pp2 = pp1 + pgsize;
@@ -231,7 +231,7 @@ static int crosstest(void)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -243,7 +243,7 @@ static int crosstest(void)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -251,12 +251,12 @@ static int crosstest(void)
/* Read first page to pp2 */
addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp2);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -264,12 +264,12 @@ static int crosstest(void)
/* Read last page to pp3 */
addr = addrn - pgsize;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp3);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -277,25 +277,25 @@ static int crosstest(void)
/* Read first page again to pp4 */
addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp4);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
}
/* pp2 and pp4 should be the same */
- printk(PRINT_PREF "verifying pages read at %#llx match\n",
+ pr_info("verifying pages read at %#llx match\n",
(long long)addr0);
if (memcmp(pp2, pp4, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
} else if (!err)
- printk(PRINT_PREF "crosstest ok\n");
+ pr_info("crosstest ok\n");
kfree(pp1);
return err;
}
@@ -307,7 +307,7 @@ static int erasecrosstest(void)
loff_t addr0;
char *readbuf = twopages;
- printk(PRINT_PREF "erasecrosstest\n");
+ pr_info("erasecrosstest\n");
ebnum = 0;
addr0 = 0;
@@ -320,79 +320,79 @@ static int erasecrosstest(void)
while (ebnum2 && bbt[ebnum2])
ebnum2 -= 1;
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_info("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+ pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum2);
+ pr_info("erasing block %d\n", ebnum2);
err = erase_eraseblock(ebnum2);
if (err)
return err;
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+ pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
if (!err)
- printk(PRINT_PREF "erasecrosstest ok\n");
+ pr_info("erasecrosstest ok\n");
return err;
}
@@ -402,7 +402,7 @@ static int erasetest(void)
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
- printk(PRINT_PREF "erasetest\n");
+ pr_info("erasetest\n");
ebnum = 0;
addr0 = 0;
@@ -411,40 +411,40 @@ static int erasetest(void)
ebnum += 1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
err = mtd_read(mtd, addr0, pgsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n",
+ pr_info("verifying 1st page of block %d is all 0xff\n",
ebnum);
for (i = 0; i < pgsize; ++i)
if (twopages[i] != 0xff) {
- printk(PRINT_PREF "verifying all 0xff failed at %d\n",
+ pr_err("verifying all 0xff failed at %d\n",
i);
errcnt += 1;
ok = 0;
@@ -452,7 +452,7 @@ static int erasetest(void)
}
if (ok && !err)
- printk(PRINT_PREF "erasetest ok\n");
+ pr_info("erasetest ok\n");
return err;
}
@@ -464,7 +464,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -474,18 +474,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -499,22 +499,22 @@ static int __init mtd_pagetest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -524,7 +524,7 @@ static int __init mtd_pagetest_init(void)
pgcnt = mtd->erasesize / mtd->writesize;
pgsize = mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -534,17 +534,17 @@ static int __init mtd_pagetest_init(void)
bufsize = pgsize * 2;
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
twopages = kmalloc(bufsize, GFP_KERNEL);
if (!twopages) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
boundary = kmalloc(bufsize, GFP_KERNEL);
if (!boundary) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -553,7 +553,7 @@ static int __init mtd_pagetest_init(void)
goto out;
/* Erase all eraseblocks */
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -562,11 +562,11 @@ static int __init mtd_pagetest_init(void)
goto out;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
/* Write all eraseblocks */
simple_srand(1);
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -574,14 +574,14 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -589,10 +589,10 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = crosstest();
if (err)
@@ -606,7 +606,7 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
@@ -615,7 +615,7 @@ out:
kfree(writebuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 121aba189ce..266de04b6d2 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_readtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -51,12 +51,12 @@ static int read_eraseblock_by_page(int ebnum)
void *oobbuf = iobuf1;
for (i = 0; i < pgcnt; i++) {
- memset(buf, 0 , pgcnt);
+ memset(buf, 0 , pgsize);
ret = mtd_read(mtd, addr, pgsize, &read, buf);
if (ret == -EUCLEAN)
ret = 0;
if (ret || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
if (!err)
err = ret;
@@ -77,7 +77,7 @@ static int read_eraseblock_by_page(int ebnum)
ret = mtd_read_oob(mtd, addr, &ops);
if ((ret && !mtd_is_bitflip(ret)) ||
ops.oobretlen != mtd->oobsize) {
- printk(PRINT_PREF "error: read oob failed at "
+ pr_err("error: read oob failed at "
"%#llx\n", (long long)addr);
if (!err)
err = ret;
@@ -99,7 +99,7 @@ static void dump_eraseblock(int ebnum)
char line[128];
int pg, oob;
- printk(PRINT_PREF "dumping eraseblock %d\n", ebnum);
+ pr_info("dumping eraseblock %d\n", ebnum);
n = mtd->erasesize;
for (i = 0; i < n;) {
char *p = line;
@@ -112,7 +112,7 @@ static void dump_eraseblock(int ebnum)
}
if (!mtd->oobsize)
return;
- printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum);
+ pr_info("dumping oob from eraseblock %d\n", ebnum);
n = mtd->oobsize;
for (pg = 0, i = 0; pg < pgcnt; pg++)
for (oob = 0; oob < n;) {
@@ -134,7 +134,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -144,21 +144,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
return 0;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -171,21 +171,21 @@ static int __init mtd_readtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: Cannot get MTD device\n");
+ pr_err("error: Cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -196,7 +196,7 @@ static int __init mtd_readtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -205,12 +205,12 @@ static int __init mtd_readtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -219,7 +219,7 @@ static int __init mtd_readtest_init(void)
goto out;
/* Read all eraseblocks 1 page at a time */
- printk(PRINT_PREF "testing page read\n");
+ pr_info("testing page read\n");
for (i = 0; i < ebcnt; ++i) {
int ret;
@@ -235,9 +235,9 @@ static int __init mtd_readtest_init(void)
}
if (err)
- printk(PRINT_PREF "finished with errors\n");
+ pr_info("finished with errors\n");
else
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
@@ -246,7 +246,7 @@ out:
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 42b0f7456fc..596cbea8df4 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -28,8 +30,6 @@
#include <linux/sched.h>
#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_speedtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -70,12 +70,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -96,13 +96,13 @@ static int multiblock_erase(int ebnum, int blocks)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
+ pr_err("error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d,"
+ pr_err("some erase error occurred at EB %d,"
"blocks %d\n", ebnum, blocks);
return -EIO;
}
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
if (err || written != mtd->erasesize) {
- printk(PRINT_PREF "error: write failed at %#llx\n", addr);
+ pr_err("error: write failed at %#llx\n", addr);
if (!err)
err = -EINVAL;
}
@@ -152,7 +152,7 @@ static int write_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -175,7 +175,7 @@ static int write_eraseblock_by_2pages(int ebnum)
for (i = 0; i < n; i++) {
err = mtd_write(mtd, addr, sz, &written, buf);
if (err || written != sz) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -187,7 +187,7 @@ static int write_eraseblock_by_2pages(int ebnum)
if (pgcnt % 2) {
err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -208,7 +208,7 @@ static int read_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != mtd->erasesize) {
- printk(PRINT_PREF "error: read failed at %#llx\n", addr);
+ pr_err("error: read failed at %#llx\n", addr);
if (!err)
err = -EINVAL;
}
@@ -229,7 +229,7 @@ static int read_eraseblock_by_page(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -255,7 +255,7 @@ static int read_eraseblock_by_2pages(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != sz) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -270,7 +270,7 @@ static int read_eraseblock_by_2pages(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -287,7 +287,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -321,21 +321,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
goto out;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
out:
goodebcnt = ebcnt - bad;
return 0;
@@ -351,25 +351,25 @@ static int __init mtd_speedtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
if (count)
- printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
+ pr_info("MTD device: %d count: %d\n", dev, count);
else
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -380,7 +380,7 @@ static int __init mtd_speedtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -392,7 +392,7 @@ static int __init mtd_speedtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -407,7 +407,7 @@ static int __init mtd_speedtest_init(void)
goto out;
/* Write all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock write speed\n");
+ pr_info("testing eraseblock write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -419,10 +419,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock read speed\n");
+ pr_info("testing eraseblock read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -434,14 +434,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock read speed is %ld KiB/s\n", speed);
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page write speed\n");
+ pr_info("testing page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -453,10 +453,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed);
+ pr_info("page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page read speed\n");
+ pr_info("testing page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -468,14 +468,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed);
+ pr_info("page read speed is %ld KiB/s\n", speed);
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page write speed\n");
+ pr_info("testing 2 page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -487,10 +487,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed);
+ pr_info("2 page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page read speed\n");
+ pr_info("testing 2 page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -502,10 +502,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed);
+ pr_info("2 page read speed is %ld KiB/s\n", speed);
/* Erase all eraseblocks */
- printk(PRINT_PREF "Testing erase speed\n");
+ pr_info("Testing erase speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -517,12 +517,12 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
+ pr_info("erase speed is %ld KiB/s\n", speed);
/* Multi-block erase all eraseblocks */
for (k = 1; k < 7; k++) {
blocks = 1 << k;
- printk(PRINT_PREF "Testing %dx multi-block erase speed\n",
+ pr_info("Testing %dx multi-block erase speed\n",
blocks);
start_timing();
for (i = 0; i < ebcnt; ) {
@@ -541,16 +541,16 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n",
+ pr_info("%dx multi-block erase speed is %ld KiB/s\n",
blocks, speed);
}
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
kfree(iobuf);
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index cb268cebf01..3729f679ae5 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -29,8 +31,6 @@
#include <linux/vmalloc.h>
#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_stresstest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -94,12 +94,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (unlikely(err)) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (unlikely(ei.state == MTD_ERASE_FAILED)) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -114,7 +114,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -137,7 +137,7 @@ static int do_read(void)
if (mtd_is_bitflip(err))
err = 0;
if (unlikely(err || read != len)) {
- printk(PRINT_PREF "error: read failed at 0x%llx\n",
+ pr_err("error: read failed at 0x%llx\n",
(long long)addr);
if (!err)
err = -EINVAL;
@@ -174,7 +174,7 @@ static int do_write(void)
addr = eb * mtd->erasesize + offs;
err = mtd_write(mtd, addr, len, &written, writebuf);
if (unlikely(err || written != len)) {
- printk(PRINT_PREF "error: write failed at 0x%llx\n",
+ pr_err("error: write failed at 0x%llx\n",
(long long)addr);
if (!err)
err = -EINVAL;
@@ -203,21 +203,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
return 0;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -231,22 +231,22 @@ static int __init mtd_stresstest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -257,14 +257,14 @@ static int __init mtd_stresstest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
if (ebcnt < 2) {
- printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+ pr_err("error: need at least 2 eraseblocks\n");
err = -ENOSPC;
goto out_put_mtd;
}
@@ -277,7 +277,7 @@ static int __init mtd_stresstest_init(void)
writebuf = vmalloc(bufsize);
offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
if (!readbuf || !writebuf || !offsets) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
for (i = 0; i < ebcnt; i++)
@@ -290,16 +290,16 @@ static int __init mtd_stresstest_init(void)
goto out;
/* Do operations */
- printk(PRINT_PREF "doing operations\n");
+ pr_info("doing operations\n");
for (op = 0; op < count; op++) {
if ((op & 1023) == 0)
- printk(PRINT_PREF "%d operations done\n", op);
+ pr_info("%d operations done\n", op);
err = do_operation();
if (err)
goto out;
cond_resched();
}
- printk(PRINT_PREF "finished, %d operations done\n", op);
+ pr_info("finished, %d operations done\n", op);
out:
kfree(offsets);
@@ -309,7 +309,7 @@ out:
out_put_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 9667bf53528..c880c2229c5 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -82,12 +82,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -100,7 +100,7 @@ static int erase_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -109,7 +109,7 @@ static int erase_whole_device(void)
return err;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
return 0;
}
@@ -122,11 +122,11 @@ static int write_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
@@ -136,11 +136,11 @@ static int write_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
@@ -160,12 +160,12 @@ static int write_eraseblock2(int ebnum)
set_random_data(writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n",
+ pr_err(" write size: %#x\n",
subpgsize * k);
- printk(PRINT_PREF " written: %#08zx\n",
+ pr_err(" written: %#08zx\n",
written);
}
return err ? err : -1;
@@ -198,23 +198,23 @@ static int verify_eraseblock(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
@@ -225,23 +225,23 @@ static int verify_eraseblock(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_info("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
@@ -262,17 +262,17 @@ static int verify_eraseblock2(int ebnum)
err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
if (mtd_is_bitflip(err) && read == subpgsize * k) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -295,17 +295,17 @@ static int verify_eraseblock_ff(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify 0xff failed at "
+ pr_err("error: verify 0xff failed at "
"%#llx\n", (long long)addr);
errcnt += 1;
}
@@ -320,7 +320,7 @@ static int verify_all_eraseblocks_ff(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks for 0xff\n");
+ pr_info("verifying all eraseblocks for 0xff\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -328,10 +328,10 @@ static int verify_all_eraseblocks_ff(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -342,7 +342,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -352,18 +352,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -377,22 +377,22 @@ static int __init mtd_subpagetest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -402,7 +402,7 @@ static int __init mtd_subpagetest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, subpage size %u, count of eraseblocks %u, "
"pages per eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -412,12 +412,12 @@ static int __init mtd_subpagetest_init(void)
bufsize = subpgsize * 32;
writebuf = kmalloc(bufsize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_info("error: cannot allocate memory\n");
goto out;
}
readbuf = kmalloc(bufsize, GFP_KERNEL);
if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_info("error: cannot allocate memory\n");
goto out;
}
@@ -429,7 +429,7 @@ static int __init mtd_subpagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
simple_srand(1);
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -438,13 +438,13 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -452,10 +452,10 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = erase_whole_device();
if (err)
@@ -467,7 +467,7 @@ static int __init mtd_subpagetest_init(void)
/* Write all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -475,14 +475,14 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -490,10 +490,10 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = erase_whole_device();
if (err)
@@ -503,7 +503,7 @@ static int __init mtd_subpagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
@@ -511,7 +511,7 @@ out:
kfree(writebuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index b65861bc7b8..c4cde1e9edd 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -23,6 +23,8 @@
* damage caused by this program.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -31,7 +33,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_torturetest: "
#define RETRIES 3
static int eb = 8;
@@ -107,12 +108,12 @@ static inline int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -139,40 +140,40 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
retry:
err = mtd_read(mtd, addr, len, &read, check_buf);
if (mtd_is_bitflip(err))
- printk(PRINT_PREF "single bit flip occurred at EB %d "
+ pr_err("single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
else if (err) {
- printk(PRINT_PREF "error %d while reading EB %d, "
+ pr_err("error %d while reading EB %d, "
"read %zd\n", err, ebnum, read);
return err;
}
if (read != len) {
- printk(PRINT_PREF "failed to read %zd bytes from EB %d, "
+ pr_err("failed to read %zd bytes from EB %d, "
"read only %zd, but no error reported\n",
len, ebnum, read);
return -EIO;
}
if (memcmp(buf, check_buf, len)) {
- printk(PRINT_PREF "read wrong data from EB %d\n", ebnum);
+ pr_err("read wrong data from EB %d\n", ebnum);
report_corrupt(check_buf, buf);
if (retries++ < RETRIES) {
/* Try read again */
yield();
- printk(PRINT_PREF "re-try reading data from EB %d\n",
+ pr_info("re-try reading data from EB %d\n",
ebnum);
goto retry;
} else {
- printk(PRINT_PREF "retried %d times, still errors, "
+ pr_info("retried %d times, still errors, "
"give-up\n", RETRIES);
return -EINVAL;
}
}
if (retries != 0)
- printk(PRINT_PREF "only attempt number %d was OK (!!!)\n",
+ pr_info("only attempt number %d was OK (!!!)\n",
retries);
return 0;
@@ -191,12 +192,12 @@ static inline int write_pattern(int ebnum, void *buf)
}
err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
- printk(PRINT_PREF "error %d while writing EB %d, written %zd"
+ pr_err("error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
return err;
}
if (written != len) {
- printk(PRINT_PREF "written only %zd bytes of %zd, but no error"
+ pr_info("written only %zd bytes of %zd, but no error"
" reported\n", written, len);
return -EIO;
}
@@ -211,64 +212,64 @@ static int __init tort_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "Warning: this program is trying to wear out your "
+ pr_info("Warning: this program is trying to wear out your "
"flash, stop it if this is not wanted.\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
- printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
+ pr_info("MTD device: %d\n", dev);
+ pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
ebcnt, eb, eb + ebcnt - 1, dev);
if (pgcnt)
- printk(PRINT_PREF "torturing just %d pages per eraseblock\n",
+ pr_info("torturing just %d pages per eraseblock\n",
pgcnt);
- printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled");
+ pr_info("write verify %s\n", check ? "enabled" : "disabled");
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
- printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt);
+ pr_err("error: invalid pgcnt value %d\n", pgcnt);
goto out_mtd;
}
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_5A5) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_mtd;
}
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_A5A) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_5A5;
}
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_FF) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_A5A;
}
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!check_buf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_FF;
}
@@ -295,13 +296,13 @@ static int __init tort_init(void)
err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
if (err < 0) {
- printk(PRINT_PREF "block_isbad() returned %d "
+ pr_info("block_isbad() returned %d "
"for EB %d\n", err, i);
goto out;
}
if (err) {
- printk("EB %d is bad. Skip it.\n", i);
+ pr_err("EB %d is bad. Skip it.\n", i);
bad_ebs[i - eb] = 1;
}
}
@@ -329,7 +330,7 @@ static int __init tort_init(void)
continue;
err = check_eraseblock(i, patt_FF);
if (err) {
- printk(PRINT_PREF "verify failed"
+ pr_info("verify failed"
" for 0xFF... pattern\n");
goto out;
}
@@ -362,7 +363,7 @@ static int __init tort_init(void)
patt = patt_A5A;
err = check_eraseblock(i, patt);
if (err) {
- printk(PRINT_PREF "verify failed for %s"
+ pr_info("verify failed for %s"
" pattern\n",
((eb + erase_cycles) & 1) ?
"0x55AA55..." : "0xAA55AA...");
@@ -380,7 +381,7 @@ static int __init tort_init(void)
stop_timing();
ms = (finish.tv_sec - start.tv_sec) * 1000 +
(finish.tv_usec - start.tv_usec) / 1000;
- printk(PRINT_PREF "%08u erase cycles done, took %lu "
+ pr_info("%08u erase cycles done, took %lu "
"milliseconds (%lu seconds)\n",
erase_cycles, ms, ms / 1000);
start_timing();
@@ -391,7 +392,7 @@ static int __init tort_init(void)
}
out:
- printk(PRINT_PREF "finished after %u erase cycles\n",
+ pr_info("finished after %u erase cycles\n",
erase_cycles);
kfree(check_buf);
out_patt_FF:
@@ -403,7 +404,7 @@ out_patt_5A5:
out_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred during torturing\n", err);
+ pr_info("error %d occurred during torturing\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
@@ -441,9 +442,9 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
&bits) >= 0)
pages++;
- printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n",
+ pr_info("verify fails on %d pages, %d bytes/%d bits\n",
pages, bytes, bits);
- printk(PRINT_PREF "The following is a list of all differences between"
+ pr_info("The following is a list of all differences between"
" what was read from flash and what was expected\n");
for (i = 0; i < check_len; i += pgsize) {
@@ -457,7 +458,7 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
printk("-------------------------------------------------------"
"----------------------------------\n");
- printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify,"
+ pr_info("Page %zd has %d bytes/%d bits failing verify,"
" starting at offset 0x%x\n",
(mtd->erasesize - check_len + i) / pgsize,
bytes, bits, first);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index fec406b4553..c071d410488 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -322,7 +322,6 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
- void *buf;
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vh = NULL;
@@ -393,18 +392,14 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
/* Read the data of the copy and check the CRC */
len = be32_to_cpu(vid_hdr->data_size);
- buf = vmalloc(len);
- if (!buf) {
- err = -ENOMEM;
- goto out_free_vidh;
- }
- err = ubi_io_read_data(ubi, buf, pnum, 0, len);
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
- goto out_free_buf;
+ goto out_unlock;
data_crc = be32_to_cpu(vid_hdr->data_crc);
- crc = crc32(UBI_CRC32_INIT, buf, len);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
pnum, crc, data_crc);
@@ -415,8 +410,8 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
dbg_bld("PEB %d CRC is OK", pnum);
bitflips = !!err;
}
+ mutex_unlock(&ubi->buf_mutex);
- vfree(buf);
ubi_free_vid_hdr(ubi, vh);
if (second_is_newer)
@@ -426,8 +421,8 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
return second_is_newer | (bitflips << 1) | (corrupted << 2);
-out_free_buf:
- vfree(buf);
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
return err;
@@ -1453,7 +1448,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
goto out_wl;
#ifdef CONFIG_MTD_UBI_FASTMAP
- if (ubi->fm && ubi->dbg->chk_gen) {
+ if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
@@ -1503,7 +1498,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
/*
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 344b4cb49d4..a56133585e9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -825,8 +825,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
@@ -986,14 +985,10 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->fm_buf)
goto out_free;
#endif
- err = ubi_debugging_init_dev(ubi);
- if (err)
- goto out_free;
-
err = ubi_attach(ubi, 0);
if (err) {
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
- goto out_debugging;
+ goto out_free;
}
if (ubi->autoresize_vol_id != -1) {
@@ -1060,8 +1055,6 @@ out_detach:
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
-out_debugging:
- ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
@@ -1139,7 +1132,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 26908a59506..63cb1d7236c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -217,32 +217,6 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
pr_err("\t1st 16 characters of name: %s\n", nm);
}
-/**
- * ubi_debugging_init_dev - initialize debugging for an UBI device.
- * @ubi: UBI device description object
- *
- * This function initializes debugging-related data for UBI device @ubi.
- * Returns zero in case of success and a negative error code in case of
- * failure.
- */
-int ubi_debugging_init_dev(struct ubi_device *ubi)
-{
- ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
- if (!ubi->dbg)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ubi_debugging_exit_dev - free debugging data for an UBI device.
- * @ubi: UBI device description object
- */
-void ubi_debugging_exit_dev(struct ubi_device *ubi)
-{
- kfree(ubi->dbg);
-}
-
/*
* Root directory for UBI stuff in debugfs. Contains sub-directories which
* contain the stuff specific to particular UBI devices.
@@ -295,7 +269,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
if (dent == d->dfs_chk_gen)
val = d->chk_gen;
@@ -341,7 +315,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
buf_size = min_t(size_t, count, (sizeof(buf) - 1));
if (copy_from_user(buf, user_buf, buf_size)) {
@@ -398,7 +372,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
unsigned long ubi_num = ubi->ubi_num;
const char *fname;
struct dentry *dent;
- struct ubi_debug_info *d = ubi->dbg;
+ struct ubi_debug_info *d = &ubi->dbg;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -471,5 +445,5 @@ out:
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 3dbc877d966..33f8f3b2c9b 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -60,51 +60,11 @@ void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
int len);
-int ubi_debugging_init_dev(struct ubi_device *ubi);
-void ubi_debugging_exit_dev(struct ubi_device *ubi);
int ubi_debugfs_init(void);
void ubi_debugfs_exit(void);
int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
-/*
- * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
- * + 2 for the number plus 1 for the trailing zero byte.
- */
-#define UBI_DFS_DIR_NAME "ubi%d"
-#define UBI_DFS_DIR_LEN (3 + 2 + 1)
-
-/**
- * struct ubi_debug_info - debugging information for an UBI device.
- *
- * @chk_gen: if UBI general extra checks are enabled
- * @chk_io: if UBI I/O extra checks are enabled
- * @disable_bgt: disable the background task for testing purposes
- * @emulate_bitflips: emulate bit-flips for testing purposes
- * @emulate_io_failures: emulate write/erase failures for testing purposes
- * @dfs_dir_name: name of debugfs directory containing files of this UBI device
- * @dfs_dir: direntry object of the UBI device debugfs directory
- * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
- * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
- * @dfs_disable_bgt: debugfs knob to disable the background task
- * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
- * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
- */
-struct ubi_debug_info {
- unsigned int chk_gen:1;
- unsigned int chk_io:1;
- unsigned int disable_bgt:1;
- unsigned int emulate_bitflips:1;
- unsigned int emulate_io_failures:1;
- char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
- struct dentry *dfs_dir;
- struct dentry *dfs_chk_gen;
- struct dentry *dfs_chk_io;
- struct dentry *dfs_disable_bgt;
- struct dentry *dfs_emulate_bitflips;
- struct dentry *dfs_emulate_io_failures;
-};
-
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
* @ubi: UBI device description object
@@ -114,7 +74,7 @@ struct ubi_debug_info {
*/
static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi->dbg->disable_bgt;
+ return ubi->dbg.disable_bgt;
}
/**
@@ -125,7 +85,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_bitflips)
+ if (ubi->dbg.emulate_bitflips)
return !(random32() % 200);
return 0;
}
@@ -139,7 +99,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 500);
return 0;
}
@@ -153,9 +113,18 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 400);
return 0;
}
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
+
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1a5f53c090d..0648c6996d4 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -814,10 +814,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (max_sqnum > ai->max_sqnum)
ai->max_sqnum = max_sqnum;
- list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
- list_del(&tmp_aeb->u.list);
- list_add_tail(&tmp_aeb->u.list, &ai->free);
- }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
/*
* If fastmap is leaking PEBs (must not happen), raise a
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 4bd4db8c84c..b93807b4c45 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -171,17 +171,17 @@ static void gluebi_put_device(struct mtd_info *mtd)
static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
- int err = 0, lnum, offs, total_read;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
- total_read = len;
- while (total_read) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_read = mtd->erasesize - offs;
- if (to_read > total_read)
- to_read = total_read;
+ if (to_read > bytes_left)
+ to_read = bytes_left;
err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
@@ -189,11 +189,11 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
lnum += 1;
offs = 0;
- total_read -= to_read;
+ bytes_left -= to_read;
buf += to_read;
}
- *retlen = len - total_read;
+ *retlen = len - bytes_left;
return err;
}
@@ -211,7 +211,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- int err = 0, lnum, offs, total_written;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
@@ -220,12 +220,12 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
- total_written = len;
- while (total_written) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_write = mtd->erasesize - offs;
- if (to_write > total_written)
- to_write = total_written;
+ if (to_write > bytes_left)
+ to_write = bytes_left;
err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
@@ -233,11 +233,11 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
lnum += 1;
offs = 0;
- total_written -= to_write;
+ bytes_left -= to_write;
buf += to_write;
}
- *retlen = len - total_written;
+ *retlen = len - bytes_left;
return err;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 78a1dcbf210..bf79def4012 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1132,7 +1132,7 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
err = ubi_io_is_bad(ubi, pnum);
@@ -1159,7 +1159,7 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1197,7 +1197,7 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1241,7 +1241,7 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1282,7 +1282,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1334,7 +1334,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1398,7 +1398,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7d57469723c..8ea6297a208 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -85,6 +85,13 @@
#define UBI_UNKNOWN -1
/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/*
* Error codes returned by the I/O sub-system.
*
* UBI_IO_FF: the read region of flash contains only 0xFFs
@@ -342,6 +349,37 @@ struct ubi_volume_desc {
struct ubi_wl_entry;
/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+};
+
+/**
* struct ubi_device - UBI device description structure
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
@@ -545,7 +583,7 @@ struct ubi_device {
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct ubi_debug_info *dbg;
+ struct ubi_debug_info dbg;
};
/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 9f2ebd8750e..ec2c2dc1c1c 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -64,8 +64,7 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
mutex_lock(&ubi->device_mutex);
@@ -93,8 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 9169e58c262..8330703c098 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -535,7 +535,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
/* Change volume table record */
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
@@ -847,7 +847,7 @@ static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 926e3df14fb..d77b1c1d7c7 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -858,7 +858,7 @@ out_free:
*/
static void self_vtbl_check(const struct ubi_device *ubi)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return;
if (vtbl_check(ubi, ubi->vtbl)) {
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2144f611196..5df49d3cb5c 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1,5 +1,4 @@
/*
- * @ubi: UBI device description object
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
@@ -2050,7 +2049,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -2090,7 +2089,7 @@ out_free:
static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_wl_tree(e, root))
@@ -2116,7 +2115,7 @@ static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *p;
int i;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ef2cb241853..b7d45f367d4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4431,8 +4431,6 @@ static void bond_uninit(struct net_device *bond_dev)
list_del(&bond->bond_list);
- bond_work_cancel_all(bond);
-
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 6dded569b11..21b68e5c14f 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -245,7 +245,7 @@ struct bonding {
struct delayed_work ad_work;
struct delayed_work mcast_work;
#ifdef CONFIG_DEBUG_FS
- /* debugging suport via debugfs */
+ /* debugging support via debugfs */
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
};
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 0f5917000aa..6433b81256c 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -121,7 +121,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
}
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (irq == 0) {
dev_err(&ofdev->dev, "no irq found\n");
err = -ENODEV;
goto exit_unmap_mem;
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 0338352bc03..70dba5d01ad 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -109,7 +109,7 @@ static inline struct ax_device *to_ax_dev(struct net_device *dev)
/*
* ax_initial_check
*
- * do an initial probe for the card to check wether it exists
+ * do an initial probe for the card to check whether it exists
* and is functional
*/
static int ax_initial_check(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a2998bea5d4..01588b66a38 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1832,7 +1832,6 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
- int i;
/* Although RSS is meaningless when there is a single HW queue we
* still need it enabled in order to have HW Rx hash generated.
@@ -1864,9 +1863,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- for (i = 0; i < sizeof(params.rss_key) / 4; i++)
- params.rss_key[i] = random32();
-
+ prandom_bytes(params.rss_key, sizeof(params.rss_key));
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index b8b4b749daa..09b625e0fda 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4318,7 +4318,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
if (o->next_tx_only >= o->max_cos)
/* >= becuase tx only must always be smaller than cos since the
- * primary connection suports COS 0
+ * primary connection supports COS 0
*/
BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
o->next_tx_only, o->max_cos);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index aef45d3113b..3dee68612c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3307,7 +3307,7 @@ static void config_pcie(struct adapter *adap)
G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
log2_width = fls(adap->params.pci.width) - 1;
acklat = ack_lat[log2_width][pldsize];
- if (val & 1) /* check LOsEnable */
+ if (val & PCI_EXP_LNKCTL_ASPM_L0S) /* check LOsEnable */
acklat += fst_trn_tx * 4;
rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 378988b5709..6db997c78a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -35,6 +35,8 @@
#ifndef __CXGB4_H__
#define __CXGB4_H__
+#include "t4_hw.h"
+
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
@@ -212,6 +214,8 @@ struct tp_err_stats {
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned short tx_modq_map; /* TX modulation scheduler queue to */
+ /* channel map */
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
@@ -526,6 +530,7 @@ struct adapter {
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
+ u32 filter_mode;
unsigned int l2t_start;
unsigned int l2t_end;
struct l2t_data *l2t;
@@ -545,6 +550,129 @@ struct adapter {
spinlock_t stats_lock;
};
+/* Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/* Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ *
+ * Most of the following data structures are modeled on T4 capabilities.
+ * Drivers for earlier chips use the subsets which make sense for those chips.
+ * We really need to come up with a hardware-independent mechanism to
+ * represent hardware filter capabilities ...
+ */
+struct ch_filter_tuple {
+ /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /* Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+};
+
+/* A filter ioctl command.
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter.
+ */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /* Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+
+ /* Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t rpttid:1; /* report TID in RSS hash field */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
+ uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
+ /* 1 => TCB contains IQ ID */
+
+ /* Switch proxy/rewrite fields. An ingress packet which matches a
+ * filter with "switch" set will be looped back out as an egress
+ * packet -- potentially with some Ethernet header rewriting.
+ */
+ uint32_t eport:2; /* egress port to switch packet out */
+ uint32_t newdmac:1; /* rewrite destination MAC address */
+ uint32_t newsmac:1; /* rewrite source MAC address */
+ uint32_t newvlan:2; /* rewrite VLAN Tag */
+ uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
+ uint8_t smac[ETH_ALEN]; /* new source MAC address */
+ uint16_t vlan; /* VLAN Tag to insert */
+
+ /* Filter rule value/mask pairs.
+ */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum {
+ VLAN_NOCHANGE = 0, /* default */
+ VLAN_REMOVE,
+ VLAN_INSERT,
+ VLAN_REWRITE
+};
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -701,6 +829,12 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+
+struct fw_filter_wr;
+
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
@@ -737,6 +871,8 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
+
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr);
int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 130dd9d5b49..f0718e1a836 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -175,6 +175,30 @@ enum {
MIN_FL_ENTRIES = 16
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -325,6 +349,9 @@ enum {
static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+module_param(tp_vlan_pri_map, uint, 0644);
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -506,8 +533,67 @@ static int link_start(struct net_device *dev)
return ret;
}
-/*
- * Response queue handler for the FW event queue.
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+static void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/* Handle a filter write/deletion reply.
+ */
+static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int idx = GET_TID(rpl);
+ unsigned int nidx = idx - adap->tids.ftid_base;
+ unsigned int ret;
+ struct filter_entry *f;
+
+ if (idx >= adap->tids.ftid_base && nidx <
+ (adap->tids.nftids + adap->tids.nsftids)) {
+ idx = nidx;
+ ret = GET_TCB_COOKIE(rpl->cookie);
+ f = &adap->tids.ftid_tab[idx];
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ }
+ }
+}
+
+/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
@@ -542,6 +628,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (void *)rsp;
+
+ filter_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
@@ -983,6 +1073,148 @@ static void t4_free_mem(void *addr)
kfree(addr);
}
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+static int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int ftid;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter->l2t);
+ if (f->l2t == NULL)
+ return -EAGAIN;
+ if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
+ f->fs.eport, f->fs.dmac)) {
+ cxgb4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
+ fwr->tid_to_iq =
+ htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
+ V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
+ V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
+ V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
+ V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio) |
+ V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
+ V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
+ V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
+ V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
+ V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
+ V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
+ V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
+ V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
+ V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
+ V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Delete the filter at a specified index.
+ */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int len, ftid;
+
+ len = sizeof(*fwr);
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
@@ -2195,7 +2427,7 @@ int cxgb4_alloc_atid(struct tid_info *t, void *data)
if (t->afree) {
union aopen_entry *p = t->afree;
- atid = p - t->atid_tab;
+ atid = (p - t->atid_tab) + t->atid_base;
t->afree = p->next;
p->data = data;
t->atids_in_use++;
@@ -2210,7 +2442,7 @@ EXPORT_SYMBOL(cxgb4_alloc_atid);
*/
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
{
- union aopen_entry *p = &t->atid_tab[atid];
+ union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
spin_lock_bh(&t->atid_lock);
p->next = t->afree;
@@ -2249,8 +2481,34 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
}
EXPORT_SYMBOL(cxgb4_alloc_stid);
-/*
- * Release a server TID.
+/* Allocate a server filter TID and set it to the supplied value.
+ */
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
+{
+ int stid;
+
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET) {
+ stid = find_next_zero_bit(t->stid_bmap,
+ t->nstids + t->nsftids, t->nstids);
+ if (stid < (t->nstids + t->nsftids))
+ __set_bit(stid, t->stid_bmap);
+ else
+ stid = -1;
+ } else {
+ stid = -1;
+ }
+ if (stid >= 0) {
+ t->stid_tab[stid].data = data;
+ stid += t->stid_base;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_sftid);
+
+/* Release a server TID.
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
@@ -2362,18 +2620,26 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
static int tid_init(struct tid_info *t)
{
size_t size;
+ unsigned int stid_bmap_size;
unsigned int natids = t->natids;
- size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+ stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
- BITS_TO_LONGS(t->nstids) * sizeof(long);
+ t->nsftids * sizeof(*t->stid_tab) +
+ stid_bmap_size * sizeof(long) +
+ t->nftids * sizeof(*t->ftid_tab) +
+ t->nsftids * sizeof(*t->ftid_tab);
+
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
return -ENOMEM;
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
- t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+ t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
+ t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
@@ -2388,7 +2654,7 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids);
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
return 0;
}
@@ -2404,7 +2670,8 @@ static int tid_init(struct tid_info *t)
* Returns <0 on error and one of the %NET_XMIT_* values on success.
*/
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue)
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue)
{
unsigned int chan;
struct sk_buff *skb;
@@ -2750,6 +3017,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
struct cxgb4_lld_info lli;
+ unsigned short i;
lli.pdev = adap->pdev;
lli.l2t = adap->l2t;
@@ -2776,10 +3044,16 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
+ lli.filt_mode = adap->filter_mode;
+ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
+ for (i = 0; i < NCHAN; i++)
+ lli.tx_modq[i] = i;
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh;
+ lli.sge_pktshift = adap->sge.pktshift;
+ lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -2999,6 +3273,126 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
}
+/* Return an error number if the indicated filter isn't writable ...
+ */
+static int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+static int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue, unsigned char port, unsigned char mask)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+ int i;
+ u8 *val;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ /* Check to make sure the filter requested is writable ...
+ */
+ f = &adap->tids.ftid_tab[stid];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adap, f);
+
+ /* Clear out filter specifications */
+ memset(&f->fs, 0, sizeof(struct ch_filter_specification));
+ f->fs.val.lport = cpu_to_be16(sport);
+ f->fs.mask.lport = ~0;
+ val = (u8 *)&sip;
+ if ((val[0] | val[1] | val[2] | val[3]) != 0) {
+ for (i = 0; i < 4; i++) {
+ f->fs.val.lip[i] = val[i];
+ f->fs.mask.lip[i] = ~0;
+ }
+ if (adap->filter_mode & F_PORT) {
+ f->fs.val.iport = port;
+ f->fs.mask.iport = mask;
+ }
+ }
+
+ f->fs.dirsteer = 1;
+ f->fs.iq = queue;
+ /* Mark filter as locked */
+ f->locked = 1;
+ f->fs.rpttid = 1;
+
+ ret = set_filter_wr(adap, stid);
+ if (ret) {
+ clear_filter(adap, f);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_create_server_filter);
+
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ f = &adap->tids.ftid_tab[stid];
+ /* Unlock the filter */
+ f->locked = 0;
+
+ ret = delete_filter(adap, stid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_remove_server_filter);
+
static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *ns)
{
@@ -3203,7 +3597,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- c->retval_len16 = htonl(FW_LEN16(*c));
+ c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3245,6 +3639,34 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
v = t4_read_reg(adap, TP_PIO_DATA);
t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ /* first 4 Tx modulation queues point to consecutive Tx channels */
+ adap->params.tp.tx_modq_map = 0xE4;
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+
+ /* associate each Tx modulation queue with consecutive Tx channels */
+ v = 0x84218421;
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_HDR);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_FIFO);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_PCMD);
+
+#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
+ if (is_offload(adap)) {
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ }
+
/* get basic stuff going */
return t4_early_init(adap, adap->fn);
}
@@ -3397,7 +3819,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -3422,7 +3844,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
@@ -3497,7 +3919,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -3929,7 +4351,7 @@ static int adap_init0(struct adapter *adap)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -4035,6 +4457,10 @@ static int adap_init0(struct adapter *adap)
for (j = 0; j < NCHAN; j++)
adap->params.tp.tx_modq[j] = j;
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->filter_mode, 1,
+ TP_VLAN_PRI_MAP);
+
adap->flags |= FW_OK;
return 0;
@@ -4661,6 +5087,17 @@ static void remove_one(struct pci_dev *pdev)
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
+ /* If we allocated filters, free up state associated with any
+ * valid filters ...
+ */
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ for (i = 0; i < (adapter->tids.nftids +
+ adapter->tids.nsftids); i++, f++)
+ if (f->valid)
+ clear_filter(adapter, f);
+ }
+
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 39bec73ff87..e2bbc7f3e2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -38,6 +38,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
#include <linux/atomic.h>
/* CPL message priority levels */
@@ -97,7 +98,9 @@ struct tid_info {
union aopen_entry *atid_tab;
unsigned int natids;
+ unsigned int atid_base;
+ struct filter_entry *ftid_tab;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -129,7 +132,7 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
stid -= t->stid_base;
- return stid < t->nstids ? t->stid_tab[stid].data : NULL;
+ return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
@@ -141,6 +144,7 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
@@ -148,8 +152,14 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue);
-
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue);
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue,
+ unsigned char port, unsigned char mask);
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
@@ -221,9 +231,16 @@ struct cxgb4_lld_info {
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
+ unsigned short filt_mode; /* filter optional components */
+ unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
+ /* scheduler queue */
void __iomem *gts_reg; /* address of GTS register */
void __iomem *db_reg; /* address of kernel doorbell */
int dbfifo_int_thresh; /* doorbell fifo int threshold */
+ unsigned int sge_pktshift; /* Padding between CPL and */
+ /* packet data */
+ bool enable_fw_ofld_conn; /* Enable connection through fw */
+ /* WR */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 6ac77a62f36..29878098101 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -484,6 +484,38 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
handle_failed_resolution(adap, arpq);
}
+/* Allocate an L2T entry for use by a switching rule. Such need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+{
+ struct l2t_entry *e;
+
+ write_lock_bh(&d->lock);
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_SWITCHING;
+ atomic_set(&e->refcnt, 1);
+ spin_unlock(&e->lock);
+ }
+ write_unlock_bh(&d->lock);
+ return e;
+}
+
+/* Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr)
+{
+ e->vlan = vlan;
+ e->lport = port;
+ memcpy(e->dmac, eth_addr, ETH_ALEN);
+ return write_l2e(adap, e, 0);
+}
+
struct l2t_data *t4_init_l2t(void)
{
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 02b31d0c641..108c0f1fce1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -100,6 +100,9 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr);
struct l2t_data *t4_init_l2t(void);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 3ecc087d732..fe9a2ea3588 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -508,7 +508,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= 8) {
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
q->pend_cred &= 7;
}
@@ -2082,10 +2082,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
FW_IQ_CMD_FL0FETCHRO(1) |
FW_IQ_CMD_FL0DATARO(1) |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PADEN(1));
c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
FW_IQ_CMD_FL0FBMAX(3));
c.fl0size = htons(flsz);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 45f2bea2e92..22f3af5166b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -109,7 +109,7 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
@@ -648,12 +648,12 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
- ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, SF_DATA);
return ret;
@@ -676,14 +676,14 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_DATA, val);
t4_write_reg(adapter, SF_OP, lock |
cont | BYTECNT(byte_cnt - 1) | OP_WR);
- return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
}
/**
@@ -2252,14 +2252,14 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
t4_write_reg(adap, EPIO_REG(DATA0), mask0);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
/* write CRC */
t4_write_reg(adap, EPIO_REG(DATA0), crc);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
}
#undef EPIO_REG
@@ -2268,6 +2268,26 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
return 0;
}
+/* t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
#define INIT_CMD(var, cmd, rd_wr) do { \
(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
FW_CMD_REQUEST | FW_CMD_##rd_wr); \
@@ -2405,7 +2425,7 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
- c.err_to_mbasyncnot = htonl(
+ c.err_to_clearinit = htonl(
FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
@@ -2426,7 +2446,7 @@ retry:
return ret;
}
- v = ntohl(c.err_to_mbasyncnot);
+ v = ntohl(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
if (state) {
if (v & FW_HELLO_CMD_ERR)
@@ -2774,7 +2794,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -2797,7 +2817,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index eb71b8250b9..261d17703ad 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -193,8 +193,24 @@ struct work_request_hdr {
__be64 wr_lo;
};
+/* wr_hi fields */
+#define S_WR_OP 24
+#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+
#define WR_HDR struct work_request_hdr wr
+/* option 0 fields */
+#define S_MSS_IDX 60
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define M_RSS_QUEUE 0x3FF
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+
struct cpl_pass_open_req {
WR_HDR;
union opcode_tid ot;
@@ -204,12 +220,14 @@ struct cpl_pass_open_req {
__be32 peer_ip;
__be64 opt0;
#define TX_CHAN(x) ((x) << 2)
+#define NO_CONG(x) ((x) << 4)
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
+#define TCAM_BYPASS(x) ((u64)(x) << 48)
#define NAGLE(x) ((u64)(x) << 49)
#define WND_SCALE(x) ((u64)(x) << 50)
#define KEEP_ALIVE(x) ((u64)(x) << 54)
@@ -247,8 +265,10 @@ struct cpl_pass_accept_rpl {
#define RSS_QUEUE_VALID (1 << 10)
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
+#define PACE(x) ((x) << 16)
#define TX_QUEUE(x) ((x) << 23)
#define RX_CHANNEL(x) ((x) << 26)
+#define CCTRL_ECN(x) ((x) << 27)
#define WND_SCALE_EN(x) ((x) << 28)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
@@ -292,6 +312,9 @@ struct cpl_pass_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_stid;
+#define PASS_OPEN_TID(x) ((x) << 0)
+#define PASS_OPEN_TOS(x) ((x) << 24)
+#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
#define GET_POPEN_TID(x) ((x) & 0xffffff)
#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx;
@@ -332,6 +355,7 @@ struct cpl_set_tcb_field {
__be16 word_cookie;
#define TCB_WORD(x) ((x) << 0)
#define TCB_COOKIE(x) ((x) << 5)
+#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask;
__be64 val;
};
@@ -536,6 +560,37 @@ struct cpl_rx_pkt {
__be16 err_vec;
};
+/* rx_pkt.l2info fields */
+#define S_RX_ETHHDR_LEN 0
+#define M_RX_ETHHDR_LEN 0x1F
+#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
+#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+
+#define S_RX_MACIDX 8
+#define M_RX_MACIDX 0x1FF
+#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
+#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
+
+#define S_RXF_SYN 21
+#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
+#define F_RXF_SYN V_RXF_SYN(1U)
+
+#define S_RX_CHAN 28
+#define M_RX_CHAN 0xF
+#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
+#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+
+/* rx_pkt.hdr_len fields */
+#define S_RX_TCPHDR_LEN 0
+#define M_RX_TCPHDR_LEN 0x3F
+#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
+#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+
+#define S_RX_IPHDR_LEN 6
+#define M_RX_IPHDR_LEN 0x3FF
+#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
+#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
@@ -634,6 +689,17 @@ struct cpl_fw6_msg {
/* cpl_fw6_msg.type values */
enum {
FW6_TYPE_CMD_RPL = 0,
+ FW6_TYPE_WR_RPL = 1,
+ FW6_TYPE_CQE = 2,
+ FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+};
+
+struct cpl_fw6_msg_ofld_connection_wr_rpl {
+ __u64 cookie;
+ __be32 tid; /* or atid in case of active failure */
+ __u8 t_state;
+ __u8 retval;
+ __u8 rsvd[2];
};
enum {
@@ -658,6 +724,7 @@ struct ulptx_sgl {
__be32 cmd_nsge;
#define ULPTX_CMD(x) ((x) << 24)
#define ULPTX_NSGE(x) ((x) << 0)
+#define ULPTX_MORE (1U << 23)
__be32 len0;
__be64 addr0;
struct ulptx_sge_pair sge[0];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1a8b57200f..83ec5f7844a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -67,7 +67,7 @@
#define QID_MASK 0xffff8000U
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
-#define DBPRIO 0x00004000U
+#define DBPRIO(x) ((x) << 14)
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
@@ -193,6 +193,12 @@
#define SGE_FL_BUFFER_SIZE1 0x1048
#define SGE_FL_BUFFER_SIZE2 0x104c
#define SGE_FL_BUFFER_SIZE3 0x1050
+#define SGE_FL_BUFFER_SIZE4 0x1054
+#define SGE_FL_BUFFER_SIZE5 0x1058
+#define SGE_FL_BUFFER_SIZE6 0x105c
+#define SGE_FL_BUFFER_SIZE7 0x1060
+#define SGE_FL_BUFFER_SIZE8 0x1064
+
#define SGE_INGRESS_RX_THRESHOLD 0x10a0
#define THRESHOLD_0_MASK 0x3f000000U
#define THRESHOLD_0_SHIFT 24
@@ -217,6 +223,17 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+#define SGE_DBFIFO_STATUS 0x10a4
+#define HP_INT_THRESH_SHIFT 28
+#define HP_INT_THRESH_MASK 0xfU
+#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT)
+#define LP_INT_THRESH_SHIFT 12
+#define LP_INT_THRESH_MASK 0xfU
+#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT)
+
+#define SGE_DOORBELL_CONTROL 0x10a8
+#define ENABLE_DROP (1 << 13)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -277,6 +294,10 @@
#define A_SGE_CTXT_CMD 0x11fc
#define A_SGE_DBQ_CTXT_BADDR 0x1084
+#define PCIE_PF_CFG 0x40
+#define AIVEC(x) ((x) << 4)
+#define AIVEC_MASK 0x3ffU
+
#define PCIE_PF_CLI 0x44
#define PCIE_INT_CAUSE 0x3004
#define UNXSPLCPLERR 0x20000000U
@@ -322,6 +343,13 @@
#define PCIE_MEM_ACCESS_OFFSET 0x306c
#define PCIE_FW 0x30b8
+#define PCIE_FW_ERR 0x80000000U
+#define PCIE_FW_INIT 0x40000000U
+#define PCIE_FW_HALT 0x20000000U
+#define PCIE_FW_MASTER_VLD 0x00008000U
+#define PCIE_FW_MASTER(x) ((x) << 12)
+#define PCIE_FW_MASTER_MASK 0x7
+#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
#define RNPP 0x80000000U
@@ -432,6 +460,9 @@
#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
+#define CIM_PF_HOST_INT_ENABLE 0x288
+#define MBMSGRDYINTEN(x) ((x) << 19)
+
#define CIM_PF_HOST_INT_CAUSE 0x28c
#define MBMSGRDYINT 0x00080000U
@@ -922,7 +953,7 @@
#define SF_DATA 0x193f8
#define SF_OP 0x193fc
-#define BUSY 0x80000000U
+#define SF_BUSY 0x80000000U
#define SF_LOCK 0x00000010U
#define SF_CONT 0x00000008U
#define BYTECNT_MASK 0x00000006U
@@ -981,6 +1012,7 @@
#define I2CM 0x00000002U
#define CIM 0x00000001U
+#define PL_INT_ENABLE 0x19410
#define PL_INT_MAP0 0x19414
#define PL_RST 0x19428
#define PIORST 0x00000002U
@@ -1032,4 +1064,41 @@
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
#define XGMAC_PORT_INT_CAUSE 0x10dc
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+
+#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+
+#define S_TX_MODQ_WEIGHT3 24
+#define M_TX_MODQ_WEIGHT3 0xffU
+#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+
+#define S_TX_MODQ_WEIGHT2 16
+#define M_TX_MODQ_WEIGHT2 0xffU
+#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+
+#define S_TX_MODQ_WEIGHT1 8
+#define M_TX_MODQ_WEIGHT1 0xffU
+#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+
+#define S_TX_MODQ_WEIGHT0 0
+#define M_TX_MODQ_WEIGHT0 0xffU
+#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+
+#define A_TP_TX_SCHED_HDR 0x23
+
+#define A_TP_TX_SCHED_FIFO 0x24
+
+#define A_TP_TX_SCHED_PCMD 0x25
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a6364632b49..a0dcccd846c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -35,6 +35,45 @@
#ifndef _T4FW_INTERFACE_H_
#define _T4FW_INTERFACE_H_
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed sucessfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_ENOENT = 2, /* no such file or directory */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* file exists */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSPC = 28, /* no space left on device */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_EPROTO = 71, /* protocol error */
+ FW_EADDRINUSE = 98, /* address already in use */
+ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
+ FW_ENETDOWN = 100, /* network is down */
+ FW_ENETUNREACH = 101, /* network is unreachable */
+ FW_ENOBUFS = 105, /* no buffer space available */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_EINPROGRESS = 115, /* fw internal */
+ FW_SCSI_ABORT_REQUESTED = 128, /* */
+ FW_SCSI_ABORT_TIMEDOUT = 129, /* */
+ FW_SCSI_ABORTED = 130, /* */
+ FW_SCSI_CLOSE_REQUESTED = 131, /* */
+ FW_ERR_LINK_DOWN = 132, /* */
+ FW_RDEV_NOT_READY = 133, /* */
+ FW_ERR_RDEV_LOST = 134, /* */
+ FW_ERR_RDEV_LOGO = 135, /* */
+ FW_FCOE_NO_XCHG = 136, /* */
+ FW_SCSI_RSP_ERR = 137, /* */
+ FW_ERR_RDEV_IMPL_LOGO = 138, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_DDP_ERR = 141, /* DDP error*/
+ FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
+};
+
#define FW_T4VF_SGE_BASE_ADDR 0x0000
#define FW_T4VF_MPS_BASE_ADDR 0x0100
#define FW_T4VF_PL_BASE_ADDR 0x0200
@@ -46,6 +85,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
+ FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
FW_CMD_WR = 0x10,
@@ -68,6 +108,7 @@ struct fw_wr_hdr {
};
#define FW_WR_OP(x) ((x) << 24)
+#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_WR_ATOMIC(x) ((x) << 23)
#define FW_WR_FLUSH(x) ((x) << 22)
#define FW_WR_COMPL(x) ((x) << 21)
@@ -80,6 +121,282 @@ struct fw_wr_hdr {
#define FW_WR_LEN16(x) ((x) << 0)
#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
+#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define M_FW_FILTER_WR_TID 0xfffff
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+#define G_FW_FILTER_WR_TID(x) \
+ (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define M_FW_FILTER_WR_RQTYPE 0x1
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+#define G_FW_FILTER_WR_RQTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
+#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define M_FW_FILTER_WR_NOREPLY 0x1
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+#define G_FW_FILTER_WR_NOREPLY(x) \
+ (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
+#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
+
+#define S_FW_FILTER_WR_IQ 0
+#define M_FW_FILTER_WR_IQ 0x3ff
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+#define G_FW_FILTER_WR_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define M_FW_FILTER_WR_DEL_FILTER 0x1
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define G_FW_FILTER_WR_DEL_FILTER(x) \
+ (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define M_FW_FILTER_WR_RPTTID 0x1
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+#define G_FW_FILTER_WR_RPTTID(x) \
+ (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
+#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
+
+#define S_FW_FILTER_WR_DROP 24
+#define M_FW_FILTER_WR_DROP 0x1
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+#define G_FW_FILTER_WR_DROP(x) \
+ (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
+#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define M_FW_FILTER_WR_DIRSTEER 0x1
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+#define G_FW_FILTER_WR_DIRSTEER(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
+#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define M_FW_FILTER_WR_MASKHASH 0x1
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+#define G_FW_FILTER_WR_MASKHASH(x) \
+ (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
+#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
+#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define M_FW_FILTER_WR_LPBK 0x1
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+#define G_FW_FILTER_WR_LPBK(x) \
+ (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
+#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define M_FW_FILTER_WR_DMAC 0x1
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+#define G_FW_FILTER_WR_DMAC(x) \
+ (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
+#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
+
+#define S_FW_FILTER_WR_SMAC 18
+#define M_FW_FILTER_WR_SMAC 0x1
+#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
+#define G_FW_FILTER_WR_SMAC(x) \
+ (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
+#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define M_FW_FILTER_WR_INSVLAN 0x1
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+#define G_FW_FILTER_WR_INSVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
+#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define M_FW_FILTER_WR_RMVLAN 0x1
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+#define G_FW_FILTER_WR_RMVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
+#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define M_FW_FILTER_WR_HITCNTS 0x1
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+#define G_FW_FILTER_WR_HITCNTS(x) \
+ (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
+#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define M_FW_FILTER_WR_TXCHAN 0x3
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+#define G_FW_FILTER_WR_TXCHAN(x) \
+ (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define M_FW_FILTER_WR_PRIO 0x1
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+#define G_FW_FILTER_WR_PRIO(x) \
+ (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
+#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define M_FW_FILTER_WR_L2TIX 0xfff
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+#define G_FW_FILTER_WR_L2TIX(x) \
+ (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define M_FW_FILTER_WR_FRAG 0x1
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+#define G_FW_FILTER_WR_FRAG(x) \
+ (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
+#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define M_FW_FILTER_WR_FRAGM 0x1
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+#define G_FW_FILTER_WR_FRAGM(x) \
+ (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
+#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define M_FW_FILTER_WR_IVLAN_VLD 0x1
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+#define G_FW_FILTER_WR_IVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
+#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define M_FW_FILTER_WR_OVLAN_VLD 0x1
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+#define G_FW_FILTER_WR_OVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
+#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
+#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
+#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define M_FW_FILTER_WR_RX_CHAN 0x1
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+#define G_FW_FILTER_WR_RX_CHAN(x) \
+ (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
+#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define M_FW_FILTER_WR_MACI 0x1ff
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+#define G_FW_FILTER_WR_MACI(x) \
+ (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define M_FW_FILTER_WR_MACIM 0x1ff
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+#define G_FW_FILTER_WR_MACIM(x) \
+ (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define M_FW_FILTER_WR_FCOE 0x1
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+#define G_FW_FILTER_WR_FCOE(x) \
+ (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
+#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define M_FW_FILTER_WR_FCOEM 0x1
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+#define G_FW_FILTER_WR_FCOEM(x) \
+ (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
+#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
+
+#define S_FW_FILTER_WR_PORT 9
+#define M_FW_FILTER_WR_PORT 0x7
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+#define G_FW_FILTER_WR_PORT(x) \
+ (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define M_FW_FILTER_WR_PORTM 0x7
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+#define G_FW_FILTER_WR_PORTM(x) \
+ (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define M_FW_FILTER_WR_MATCHTYPE 0x7
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+#define G_FW_FILTER_WR_MATCHTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define M_FW_FILTER_WR_MATCHTYPEM 0x7
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+#define G_FW_FILTER_WR_MATCHTYPEM(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
struct fw_ulptx_wr {
__be32 op_to_compl;
@@ -99,6 +416,108 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
+struct fw_ofld_connection_wr {
+ __be32 op_compl;
+ __be32 len16_pkd;
+ __u64 cookie;
+ __be64 r2;
+ __be64 r3;
+ struct fw_ofld_connection_le {
+ __be32 version_cpl;
+ __be32 filter;
+ __be32 r1;
+ __be16 lport;
+ __be16 pport;
+ union fw_ofld_connection_leip {
+ struct fw_ofld_connection_le_ipv4 {
+ __be32 pip;
+ __be32 lip;
+ __be64 r0;
+ __be64 r1;
+ __be64 r2;
+ } ipv4;
+ struct fw_ofld_connection_le_ipv6 {
+ __be64 pip_hi;
+ __be64 pip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ } ipv6;
+ } u;
+ } le;
+ struct fw_ofld_connection_tcb {
+ __be32 t_state_to_astid;
+ __be16 cplrxdataack_cplpassacceptrpl;
+ __be16 rcv_adv;
+ __be32 rcv_nxt;
+ __be32 tx_max;
+ __be64 opt0;
+ __be32 opt2;
+ __be32 r1;
+ __be64 r2;
+ __be64 r3;
+ } tcb;
+};
+
+#define S_FW_OFLD_CONNECTION_WR_VERSION 31
+#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1
+#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_VERSION)
+#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \
+ M_FW_OFLD_CONNECTION_WR_VERSION)
+#define F_FW_OFLD_CONNECTION_WR_VERSION \
+ V_FW_OFLD_CONNECTION_WR_VERSION(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPL 30
+#define M_FW_OFLD_CONNECTION_WR_CPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL)
+#define G_FW_OFLD_CONNECTION_WR_CPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL)
+#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_T_STATE 28
+#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf
+#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE)
+#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \
+ M_FW_OFLD_CONNECTION_WR_T_STATE)
+
+#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24
+#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf
+#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \
+ M_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+
+#define S_FW_OFLD_CONNECTION_WR_ASTID 0
+#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff
+#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_ASTID)
+#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15
+#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \
+ M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \
+ V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14
+#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \
+ M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \
+ V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U)
+
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
@@ -222,6 +641,7 @@ struct fw_cmd_hdr {
#define FW_CMD_OP(x) ((x) << 24)
#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_CMD_REQUEST (1U << 23)
+#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1)
#define FW_CMD_READ (1U << 22)
#define FW_CMD_WRITE (1U << 21)
#define FW_CMD_EXEC (1U << 20)
@@ -229,6 +649,7 @@ struct fw_cmd_hdr {
#define FW_CMD_RETVAL(x) ((x) << 8)
#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
#define FW_CMD_LEN16(x) ((x) << 0)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
@@ -241,7 +662,8 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_TP_MIB = 0x0012,
FW_LDST_ADDRSPC_MDIO = 0x0018,
FW_LDST_ADDRSPC_MPS = 0x0020,
- FW_LDST_ADDRSPC_FUNC = 0x0028
+ FW_LDST_ADDRSPC_FUNC = 0x0028,
+ FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
};
enum fw_ldst_mps_fid {
@@ -303,6 +725,16 @@ struct fw_ldst_cmd {
__be64 data0;
__be64 data1;
} func;
+ struct fw_ldst_pcie {
+ u8 ctrl_to_fn;
+ u8 bnum;
+ u8 r;
+ u8 ext_r;
+ u8 select_naccess;
+ u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
} u;
};
@@ -312,6 +744,9 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID(x) ((x) << 15)
#define FW_LDST_CMD_CTL(x) ((x) << 0)
#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
+#define FW_LDST_CMD_LC (1U << 4)
+#define FW_LDST_CMD_NACCESS(x) ((x) << 0)
+#define FW_LDST_CMD_FN(x) ((x) << 0)
struct fw_reset_cmd {
__be32 op_to_write;
@@ -333,7 +768,7 @@ enum fw_hellow_cmd {
struct fw_hello_cmd {
__be32 op_to_write;
__be32 retval_len16;
- __be32 err_to_mbasyncnot;
+ __be32 err_to_clearinit;
#define FW_HELLO_CMD_ERR (1U << 31)
#define FW_HELLO_CMD_INIT (1U << 30)
#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
@@ -343,6 +778,7 @@ struct fw_hello_cmd {
#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
#define FW_HELLO_CMD_MBMASTER_GET(x) \
(((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
+#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23)
#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
#define FW_HELLO_CMD_CLEARINIT (1U << 16)
@@ -428,6 +864,7 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_fcoe {
FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
+ FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004,
};
enum fw_memtype_cf {
@@ -440,7 +877,7 @@ enum fw_memtype_cf {
struct fw_caps_config_cmd {
__be32 op_to_write;
- __be32 retval_len16;
+ __be32 cfvalid_to_len16;
__be32 r2;
__be32 hwmbitmap;
__be16 nbmcaps;
@@ -701,8 +1138,8 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL0PADEN (1U << 2)
-#define FW_IQ_CMD_FL0PACKEN (1U << 1)
+#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2)
+#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1)
#define FW_IQ_CMD_FL0CONGEN (1U << 0)
#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
@@ -1190,6 +1627,14 @@ enum fw_port_dcb_cfg_rc {
FW_PORT_DCB_CFG_ERROR = 0x1
};
+enum fw_port_dcb_type {
+ FW_PORT_DCB_TYPE_PGID = 0x00,
+ FW_PORT_DCB_TYPE_PGRATE = 0x01,
+ FW_PORT_DCB_TYPE_PRIORATE = 0x02,
+ FW_PORT_DCB_TYPE_PFC = 0x03,
+ FW_PORT_DCB_TYPE_APP_ID = 0x04,
+};
+
struct fw_port_cmd {
__be32 op_to_portid;
__be32 action_to_len16;
@@ -1257,6 +1702,7 @@ struct fw_port_cmd {
#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
#define FW_PORT_CMD_LSTATUS (1U << 31)
+#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1)
#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
#define FW_PORT_CMD_TXPAUSE (1U << 23)
@@ -1305,6 +1751,9 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
FW_PORT_MOD_TYPE_LRM,
+ FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1,
FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f16745f4b36..92170d50d9d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -536,7 +536,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- DBPRIO |
+ DBPRIO(1) |
QID(fl->cntxt_id) |
PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
fl->pend_cred %= FL_PER_EQ_UNIT;
@@ -952,7 +952,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* Warn if we write doorbells with the wrong priority and write
* descriptors before telling HW.
*/
- WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+ WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1));
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(tq->cntxt_id) | PIDX(n));
@@ -2126,8 +2126,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqns_to_fl0congen =
cpu_to_be32(
FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
- FW_IQ_CMD_FL0PACKEN |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PACKEN(1) |
+ FW_IQ_CMD_FL0PADEN(1));
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index abf26c7c1d1..3bc1912afba 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -616,7 +616,7 @@ static inline bool be_error(struct be_adapter *adapter)
return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
}
-static inline bool be_crit_error(struct be_adapter *adapter)
+static inline bool be_hw_error(struct be_adapter *adapter)
{
return adapter->eeh_error || adapter->hw_error;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index f2875aa4766..8a250c38fb8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -298,7 +298,12 @@ void be_async_mcc_enable(struct be_adapter *adapter)
void be_async_mcc_disable(struct be_adapter *adapter)
{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
adapter->mcc_obj.rearm_cq = false;
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
}
int be_process_mcc(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f95612b907a..9dca22be812 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1689,15 +1689,41 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
struct be_queue_info *rxq = &rxo->q;
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
+ struct be_adapter *adapter = rxo->adapter;
+ int flush_wait = 0;
u16 tail;
- /* First cleanup pending rx completions */
- while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
- be_rx_compl_discard(rxo, rxcp);
- be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
+ /* Consume pending rx completions.
+ * Wait for the flush completion (identified by zero num_rcvd)
+ * to arrive. Notify CQ even when there are no more CQ entries
+ * for HW to flush partially coalesced CQ entries.
+ * In Lancer, there is no need to wait for flush compl.
+ */
+ for (;;) {
+ rxcp = be_rx_compl_get(rxo);
+ if (rxcp == NULL) {
+ if (lancer_chip(adapter))
+ break;
+
+ if (flush_wait++ > 10 || be_hw_error(adapter)) {
+ dev_warn(&adapter->pdev->dev,
+ "did not receive flush compl\n");
+ break;
+ }
+ be_cq_notify(adapter, rx_cq->id, true, 0);
+ mdelay(1);
+ } else {
+ be_rx_compl_discard(rxo, rxcp);
+ be_cq_notify(adapter, rx_cq->id, true, 1);
+ if (rxcp->num_rcvd == 0)
+ break;
+ }
}
- /* Then free posted rx buffer that were not used */
+ /* After cleanup, leave the CQ in unarmed state */
+ be_cq_notify(adapter, rx_cq->id, false, 0);
+
+ /* Then free posted rx buffers that were not used */
tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
page_info = get_rx_page_info(rxo, tail);
@@ -2157,7 +2183,7 @@ void be_detect_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- if (be_crit_error(adapter))
+ if (be_hw_error(adapter))
return;
if (lancer_chip(adapter)) {
@@ -2398,13 +2424,22 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- be_async_mcc_disable(adapter);
-
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
- for_all_evt_queues(adapter, eqo, i) {
+ for_all_evt_queues(adapter, eqo, i)
napi_disable(&eqo->napi);
+
+ be_async_mcc_disable(adapter);
+
+ /* Wait for all pending tx completions to arrive so that
+ * all tx skbs are freed.
+ */
+ be_tx_compl_clean(adapter);
+
+ be_rx_qs_destroy(adapter);
+
+ for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
synchronize_irq(be_msix_vec_get(adapter, eqo));
else
@@ -2414,12 +2449,6 @@ static int be_close(struct net_device *netdev)
be_irq_unregister(adapter);
- /* Wait for all pending tx completions to arrive so that
- * all tx skbs are freed.
- */
- be_tx_compl_clean(adapter);
-
- be_rx_qs_destroy(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 5ba6e1cbd34..ec490d741fc 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -94,9 +94,8 @@ config GIANFAR
config FEC_PTP
bool "PTP Hardware Clock (PHC)"
- depends on FEC && ARCH_MXC
+ depends on FEC && ARCH_MXC && !SOC_IMX25 && !SOC_IMX27 && !SOC_IMX35 && !SOC_IMX5
select PTP_1588_CLOCK
- default y if SOC_IMX6Q
--help---
Say Y here if you want to use PTP Hardware Clock (PHC) in the
driver. Only the basic clock operations have been implemented.
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 8364815c32f..99b6c2a38db 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -39,26 +39,6 @@
* hcp_* - structures, variables and functions releated to Hypervisor Calls
*/
-static inline u32 get_longbusy_msecs(int long_busy_ret_code)
-{
- switch (long_busy_ret_code) {
- case H_LONG_BUSY_ORDER_1_MSEC:
- return 1;
- case H_LONG_BUSY_ORDER_10_MSEC:
- return 10;
- case H_LONG_BUSY_ORDER_100_MSEC:
- return 100;
- case H_LONG_BUSY_ORDER_1_SEC:
- return 1000;
- case H_LONG_BUSY_ORDER_10_SEC:
- return 10000;
- case H_LONG_BUSY_ORDER_100_SEC:
- return 100000;
- default:
- return 1;
- }
-}
-
/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
#define EHEA_MAX_RPAGE 512
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 9089d00f142..14e30515f6a 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1671,7 +1671,7 @@ static void e1000_get_wol(struct net_device *netdev,
/* apply any specific unsupported masks here */
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- /* KSP3 does not suppport UCAST wake-ups */
+ /* KSP3 does not support UCAST wake-ups */
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 0029934748b..edfba937092 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -31,6 +31,30 @@ config MV643XX_ETH
Some boards that use the Discovery chipset are the Momenco
Ocelot C and Jaguar ATX and Pegasos II.
+config MVMDIO
+ tristate "Marvell MDIO interface support"
+ ---help---
+ This driver supports the MDIO interface found in the network
+ interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
+ Dove, Armada 370 and Armada XP).
+
+ For now, this driver is only needed for the MVNETA driver
+ (used on Armada 370 and XP), but it could be used in the
+ future by the MV643XX_ETH driver.
+
+config MVNETA
+ tristate "Marvell Armada 370/XP network interface support"
+ depends on MACH_ARMADA_370_XP
+ select PHYLIB
+ select MVMDIO
+ ---help---
+ This driver supports the network interface units in the
+ Marvell ARMADA XP and ARMADA 370 SoC family.
+
+ Note that this driver is distinct from the mv643xx_eth
+ driver, which should be used for the older Marvell SoCs
+ (Dove, Orion, Discovery, Kirkwood).
+
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
depends on CPU_PXA168
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 57e3234a37b..7f63b4aac43 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -3,6 +3,8 @@
#
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_MVMDIO) += mvmdio.o
+obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
new file mode 100644
index 00000000000..74f1c157a48
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -0,0 +1,228 @@
+/*
+ * Driver for the MDIO interface of Marvell network interfaces.
+ *
+ * Since the MDIO interface of Marvell network interfaces is shared
+ * between all network interfaces, having a single driver allows to
+ * handle concurrent accesses properly (you may have four Ethernet
+ * ports, but they in fact share the same SMI interface to access the
+ * MDIO bus). Moreover, this MDIO interface code is similar between
+ * the mv643xx_eth driver and the mvneta driver. For now, it is only
+ * used by the mvneta driver, but it could later be used by the
+ * mv643xx_eth driver as well.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define MVMDIO_SMI_DATA_SHIFT 0
+#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
+#define MVMDIO_SMI_PHY_REG_SHIFT 21
+#define MVMDIO_SMI_READ_OPERATION BIT(26)
+#define MVMDIO_SMI_WRITE_OPERATION 0
+#define MVMDIO_SMI_READ_VALID BIT(27)
+#define MVMDIO_SMI_BUSY BIT(28)
+
+struct orion_mdio_dev {
+ struct mutex lock;
+ void __iomem *smireg;
+};
+
+/* Wait for the SMI unit to be ready for another operation
+ */
+static int orion_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int count;
+ u32 val;
+
+ count = 0;
+ while (1) {
+ val = readl(dev->smireg);
+ if (!(val & MVMDIO_SMI_BUSY))
+ break;
+
+ if (count > 100) {
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(10);
+ count++;
+ }
+
+ return 0;
+}
+
+static int orion_mdio_read(struct mii_bus *bus, int mii_id,
+ int regnum)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int count;
+ u32 val;
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0) {
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_READ_OPERATION),
+ dev->smireg);
+
+ /* Wait for the value to become available */
+ count = 0;
+ while (1) {
+ val = readl(dev->smireg);
+ if (val & MVMDIO_SMI_READ_VALID)
+ break;
+
+ if (count > 100) {
+ dev_err(bus->parent, "Timeout when reading PHY\n");
+ mutex_unlock(&dev->lock);
+ return -ETIMEDOUT;
+ }
+
+ udelay(10);
+ count++;
+ }
+
+ mutex_unlock(&dev->lock);
+
+ return val & 0xFFFF;
+}
+
+static int orion_mdio_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0) {
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_WRITE_OPERATION |
+ (value << MVMDIO_SMI_DATA_SHIFT)),
+ dev->smireg);
+
+ mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int orion_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static int orion_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct orion_mdio_dev *dev;
+ int i, ret;
+
+ bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
+ if (!bus) {
+ dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ bus->name = "orion_mdio_bus";
+ bus->read = orion_mdio_read;
+ bus->write = orion_mdio_write;
+ bus->reset = orion_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
+ dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ dev_err(&pdev->dev, "Cannot allocate PHY IRQ array\n");
+ mdiobus_free(bus);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bus->irq[i] = PHY_POLL;
+
+ dev = bus->priv;
+ dev->smireg = of_iomap(pdev->dev.of_node, 0);
+ if (!dev->smireg) {
+ dev_err(&pdev->dev, "No SMI register address given in DT\n");
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return -ENODEV;
+ }
+
+ mutex_init(&dev->lock);
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ iounmap(dev->smireg);
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int orion_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+ mdiobus_unregister(bus);
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return 0;
+}
+
+static const struct of_device_id orion_mdio_match[] = {
+ { .compatible = "marvell,orion-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, orion_mdio_match);
+
+static struct platform_driver orion_mdio_driver = {
+ .probe = orion_mdio_probe,
+ .remove = orion_mdio_remove,
+ .driver = {
+ .name = "orion-mdio",
+ .of_match_table = orion_mdio_match,
+ },
+};
+
+module_platform_driver(orion_mdio_driver);
+
+MODULE_DESCRIPTION("Marvell MDIO interface driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
new file mode 100644
index 00000000000..b6025c305e1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -0,0 +1,2847 @@
+/*
+ * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Rami Rosen <rosenr@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+/* Registers */
+#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
+#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
+#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
+#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
+#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
+#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
+#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
+#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
+#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
+#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
+#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
+#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
+#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
+#define MVNETA_PORT_RX_RESET 0x1cc0
+#define MVNETA_PORT_RX_DMA_RESET BIT(0)
+#define MVNETA_PHY_ADDR 0x2000
+#define MVNETA_PHY_ADDR_MASK 0x1f
+#define MVNETA_MBUS_RETRY 0x2010
+#define MVNETA_UNIT_INTR_CAUSE 0x2080
+#define MVNETA_UNIT_CONTROL 0x20B0
+#define MVNETA_PHY_POLLING_ENABLE BIT(1)
+#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
+#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
+#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
+#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_PORT_CONFIG 0x2400
+#define MVNETA_UNI_PROMISC_MODE BIT(0)
+#define MVNETA_DEF_RXQ(q) ((q) << 1)
+#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
+#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
+#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
+#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
+#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
+#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
+#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
+ MVNETA_DEF_RXQ_ARP(q) | \
+ MVNETA_DEF_RXQ_TCP(q) | \
+ MVNETA_DEF_RXQ_UDP(q) | \
+ MVNETA_DEF_RXQ_BPDU(q) | \
+ MVNETA_TX_UNSET_ERR_SUM | \
+ MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
+#define MVNETA_PORT_CONFIG_EXTEND 0x2404
+#define MVNETA_MAC_ADDR_LOW 0x2414
+#define MVNETA_MAC_ADDR_HIGH 0x2418
+#define MVNETA_SDMA_CONFIG 0x241c
+#define MVNETA_SDMA_BRST_SIZE_16 4
+#define MVNETA_NO_DESC_SWAP 0x0
+#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
+#define MVNETA_RX_NO_DATA_SWAP BIT(4)
+#define MVNETA_TX_NO_DATA_SWAP BIT(5)
+#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
+#define MVNETA_PORT_STATUS 0x2444
+#define MVNETA_TX_IN_PRGRS BIT(1)
+#define MVNETA_TX_FIFO_EMPTY BIT(8)
+#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+#define MVNETA_TYPE_PRIO 0x24bc
+#define MVNETA_FORCE_UNI BIT(21)
+#define MVNETA_TXQ_CMD_1 0x24e4
+#define MVNETA_TXQ_CMD 0x2448
+#define MVNETA_TXQ_DISABLE_SHIFT 8
+#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_ACC_MODE 0x2500
+#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
+#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
+#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
+#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
+#define MVNETA_INTR_NEW_CAUSE 0x25a0
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
+#define MVNETA_INTR_NEW_MASK 0x25a4
+#define MVNETA_INTR_OLD_CAUSE 0x25a8
+#define MVNETA_INTR_OLD_MASK 0x25ac
+#define MVNETA_INTR_MISC_CAUSE 0x25b0
+#define MVNETA_INTR_MISC_MASK 0x25b4
+#define MVNETA_INTR_ENABLE 0x25b8
+#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
+#define MVNETA_RXQ_CMD 0x2680
+#define MVNETA_RXQ_DISABLE_SHIFT 8
+#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
+#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
+#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
+#define MVNETA_GMAC_CTRL_0 0x2c00
+#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
+#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
+#define MVNETA_GMAC_CTRL_2 0x2c08
+#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PORT_RGMII BIT(4)
+#define MVNETA_GMAC2_PORT_RESET BIT(6)
+#define MVNETA_GMAC_STATUS 0x2c10
+#define MVNETA_GMAC_LINK_UP BIT(0)
+#define MVNETA_GMAC_SPEED_1000 BIT(1)
+#define MVNETA_GMAC_SPEED_100 BIT(2)
+#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
+#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
+#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVNETA_MIB_COUNTERS_BASE 0x3080
+#define MVNETA_MIB_LATE_COLLISION 0x7c
+#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
+#define MVNETA_DA_FILT_OTH_MCAST 0x3500
+#define MVNETA_DA_FILT_UCAST_BASE 0x3600
+#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
+#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
+#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
+#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
+#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
+#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
+#define MVNETA_TXQ_SENT_DESC_SHIFT 16
+#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
+#define MVNETA_PORT_TX_RESET 0x3cf0
+#define MVNETA_PORT_TX_DMA_RESET BIT(0)
+#define MVNETA_TX_MTU 0x3e0c
+#define MVNETA_TX_TOKEN_SIZE 0x3e14
+#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
+#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
+#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+
+#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Descriptor ring Macros */
+#define MVNETA_QUEUE_NEXT_DESC(q, index) \
+ (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVNETA_TXDONE_COAL_PKTS 16
+#define MVNETA_RX_COAL_PKTS 32
+#define MVNETA_RX_COAL_USEC 100
+
+/* Timer */
+#define MVNETA_TX_DONE_TIMER_PERIOD 10
+
+/* Napi polling weight */
+#define MVNETA_RX_POLL_WEIGHT 64
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVNETA_MH_SIZE 2
+
+#define MVNETA_VLAN_TAG_LEN 4
+
+#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
+#define MVNETA_TX_CSUM_MAX_SIZE 9800
+#define MVNETA_ACC_MODE_EXT 1
+
+/* Timeout constants */
+#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
+
+#define MVNETA_TX_MTU_MAX 0x3ffff
+
+/* Max number of Rx descriptors */
+#define MVNETA_MAX_RXD 128
+
+/* Max number of Tx descriptors */
+#define MVNETA_MAX_TXD 532
+
+/* descriptor aligned size */
+#define MVNETA_DESC_ALIGNED_SIZE 32
+
+#define MVNETA_RX_PKT_SIZE(mtu) \
+ ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
+ ETH_HLEN + ETH_FCS_LEN, \
+ MVNETA_CPU_D_CACHE_LINE_SIZE)
+
+#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+
+struct mvneta_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
+struct mvneta_port {
+ int pkt_size;
+ void __iomem *base;
+ struct mvneta_rx_queue *rxqs;
+ struct mvneta_tx_queue *txqs;
+ struct timer_list tx_done_timer;
+ struct net_device *dev;
+
+ u32 cause_rx_tx;
+ struct napi_struct napi;
+
+ /* Flags */
+ unsigned long flags;
+#define MVNETA_F_TX_DONE_TIMER_BIT 0
+
+ /* Napi weight */
+ int weight;
+
+ /* Core clock */
+ struct clk *clk;
+ u8 mcast_count[256];
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+ struct mvneta_stats tx_stats;
+ struct mvneta_stats rx_stats;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ phy_interface_t phy_interface;
+ struct device_node *phy_node;
+ unsigned int link;
+ unsigned int duplex;
+ unsigned int speed;
+};
+
+/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+struct mvneta_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+#define MVNETA_TX_L3_OFF_SHIFT 0
+#define MVNETA_TX_IP_HLEN_SHIFT 8
+#define MVNETA_TX_L4_UDP BIT(16)
+#define MVNETA_TX_L3_IP6 BIT(17)
+#define MVNETA_TXD_IP_CSUM BIT(18)
+#define MVNETA_TXD_Z_PAD BIT(19)
+#define MVNETA_TXD_L_DESC BIT(20)
+#define MVNETA_TXD_F_DESC BIT(21)
+#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
+ MVNETA_TXD_L_DESC | \
+ MVNETA_TXD_F_DESC)
+#define MVNETA_TX_L4_CSUM_FULL BIT(30)
+#define MVNETA_TX_L4_CSUM_NOT BIT(31)
+
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u32 status; /* Info about received packet */
+#define MVNETA_RXD_ERR_CRC 0x0
+#define MVNETA_RXD_ERR_SUMMARY BIT(16)
+#define MVNETA_RXD_ERR_OVERRUN BIT(17)
+#define MVNETA_RXD_ERR_LEN BIT(18)
+#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
+#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
+#define MVNETA_RXD_L3_IP4 BIT(25)
+#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
+#define MVNETA_RXD_L4_CSUM_OK BIT(30)
+
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u16 data_size; /* Size of received packet in bytes */
+ u32 buf_phys_addr; /* Physical address of the buffer */
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+
+struct mvneta_tx_queue {
+ /* Number of this TX queue, in the range 0-7 */
+ u8 id;
+
+ /* Number of TX DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used TX DMA descriptor in the
+ * descriptor ring
+ */
+ int count;
+
+ /* Array of transmitted skb */
+ struct sk_buff **tx_skb;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+
+ /* Index of the TX DMA descriptor to be cleaned up */
+ int txq_get_index;
+
+ u32 done_pkts_coal;
+
+ /* Virtual address of the TX DMA descriptors array */
+ struct mvneta_tx_desc *descs;
+
+ /* DMA address of the TX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last TX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next TX DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+struct mvneta_rx_queue {
+ /* rx queue number, in the range 0-7 */
+ u8 id;
+
+ /* num of rx descriptors in the rx descriptor ring */
+ int size;
+
+ /* counter of times when mvneta_refill() failed */
+ int missed;
+
+ u32 pkts_coal;
+ u32 time_coal;
+
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvneta_rx_desc *descs;
+
+ /* DMA address of the RX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last RX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next RX DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+static int rxq_number = 8;
+static int txq_number = 8;
+
+static int rxq_def;
+static int txq_def;
+
+#define MVNETA_DRIVER_NAME "mvneta"
+#define MVNETA_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+/* Write helper method */
+static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
+{
+ writel(data, pp->base + offset);
+}
+
+/* Read helper method */
+static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
+{
+ return readl(pp->base + offset);
+}
+
+/* Increment txq get counter */
+static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
+{
+ txq->txq_get_index++;
+ if (txq->txq_get_index == txq->size)
+ txq->txq_get_index = 0;
+}
+
+/* Increment txq put counter */
+static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
+{
+ txq->txq_put_index++;
+ if (txq->txq_put_index == txq->size)
+ txq->txq_put_index = 0;
+}
+
+
+/* Clear all MIB counters */
+static void mvneta_mib_counters_clear(struct mvneta_port *pp)
+{
+ int i;
+ u32 dummy;
+
+ /* Perform dummy reads from MIB counters */
+ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
+ dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+}
+
+/* Get System Network Statistics */
+struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ unsigned int start;
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
+ stats->rx_packets = pp->rx_stats.packets;
+ stats->rx_bytes = pp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+
+
+ do {
+ start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
+ stats->tx_packets = pp->tx_stats.packets;
+ stats->tx_bytes = pp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ stats->tx_dropped = dev->stats.tx_dropped;
+
+ return stats;
+}
+
+/* Rx descriptors helper methods */
+
+/* Checks whether the given RX descriptor is both the first and the
+ * last descriptor for the RX packet. Each RX packet is currently
+ * received through a single RX descriptor, so not having each RX
+ * descriptor with its first and last bits set is an error
+ */
+static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
+{
+ return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
+ MVNETA_RXD_FIRST_LAST_DESC;
+}
+
+/* Add number of descriptors ready to receive new packets */
+static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int ndescs)
+{
+ /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
+ * be added at once
+ */
+ while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
+ MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+ ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
+ }
+
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+}
+
+/* Get number of RX descriptors occupied by received packets */
+static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
+ return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
+}
+
+/* Update num of rx desc called upon return from rx path or
+ * from mvneta_rxq_drop_pkts().
+ */
+static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int rx_done, int rx_filled)
+{
+ u32 val;
+
+ if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
+ val = rx_done |
+ (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ return;
+ }
+
+ /* Only 255 descriptors can be added at once */
+ while ((rx_done > 0) || (rx_filled > 0)) {
+ if (rx_done <= 0xff) {
+ val = rx_done;
+ rx_done = 0;
+ } else {
+ val = 0xff;
+ rx_done -= 0xff;
+ }
+ if (rx_filled <= 0xff) {
+ val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled = 0;
+ } else {
+ val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled -= 0xff;
+ }
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ }
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static struct mvneta_rx_desc *
+mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
+{
+ int rx_desc = rxq->next_desc_to_proc;
+
+ rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+ return rxq->descs + rx_desc;
+}
+
+/* Change maximum receive size of the port. */
+static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
+ val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
+ MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+
+/* Set rx queue offset */
+static void mvneta_rxq_offset_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int offset)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
+
+ /* Offset is in */
+ val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+/* Tx descriptors helper methods */
+
+/* Update HW with number of TX descriptors to be sent */
+static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int pend_desc)
+{
+ u32 val;
+
+ /* Only 255 descriptors can be added at once ; Assume caller
+ * process TX desriptors in quanta less than 256
+ */
+ val = pend_desc;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get pointer to next TX descriptor to be processed (send) by HW */
+static struct mvneta_tx_desc *
+mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
+{
+ int tx_desc = txq->next_desc_to_proc;
+
+ txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
+ return txq->descs + tx_desc;
+}
+
+/* Release the last allocated TX descriptor. Useful to handle DMA
+ * mapping failures in the TX path.
+ */
+static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
+{
+ if (txq->next_desc_to_proc == 0)
+ txq->next_desc_to_proc = txq->last_desc - 1;
+ else
+ txq->next_desc_to_proc--;
+}
+
+/* Set rxq buf size */
+static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int buf_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
+
+ val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
+ val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
+}
+
+/* Disable buffer management (BM) */
+static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+
+/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
+static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ if (enable)
+ val |= MVNETA_GMAC2_PORT_RGMII;
+ else
+ val &= ~MVNETA_GMAC2_PORT_RGMII;
+
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+}
+
+/* Config SGMII port */
+static void mvneta_port_sgmii_config(struct mvneta_port *pp)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val |= MVNETA_GMAC2_PSC_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+}
+
+/* Start the Ethernet port RX and TX activity */
+static void mvneta_port_up(struct mvneta_port *pp)
+{
+ int queue;
+ u32 q_map;
+
+ /* Enable all initialized TXs. */
+ mvneta_mib_counters_clear(pp);
+ q_map = 0;
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ if (txq->descs != NULL)
+ q_map |= (1 << queue);
+ }
+ mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
+
+ /* Enable all initialized RXQs. */
+ q_map = 0;
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ if (rxq->descs != NULL)
+ q_map |= (1 << queue);
+ }
+
+ mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+}
+
+/* Stop the Ethernet port activity */
+static void mvneta_port_down(struct mvneta_port *pp)
+{
+ u32 val;
+ int count;
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
+
+ /* Issue stop command for active channels only */
+ if (val != 0)
+ mvreg_write(pp, MVNETA_RXQ_CMD,
+ val << MVNETA_RXQ_DISABLE_SHIFT);
+
+ /* Wait for all Rx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_RXQ_CMD);
+ } while (val & 0xff);
+
+ /* Stop Tx port activity. Check port Tx activity. Issue stop
+ * command for active channels only
+ */
+ val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
+
+ if (val != 0)
+ mvreg_write(pp, MVNETA_TXQ_CMD,
+ (val << MVNETA_TXQ_DISABLE_SHIFT));
+
+ /* Wait for all Tx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for TX stopped status=0x%08x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ /* Check TX Command reg that all Txqs are stopped */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD);
+
+ } while (val & 0xff);
+
+ /* Double check to verify that TX FIFO is empty */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
+ netdev_warn(pp->dev,
+ "TX FIFO empty timeout status=0x08%x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_PORT_STATUS);
+ } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
+ (val & MVNETA_TX_IN_PRGRS));
+
+ udelay(200);
+}
+
+/* Enable the port by setting the port enable bit of the MAC control register */
+static void mvneta_port_enable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Enable port */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val |= MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+/* Disable the port and wait for about 200 usec before retuning */
+static void mvneta_port_disable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Reset the Enable bit in the Serial Control Register */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+
+ udelay(200);
+}
+
+/* Multicast tables methods */
+
+/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
+}
+
+/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
+
+}
+
+/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
+static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
+ val = 0;
+ } else {
+ memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
+}
+
+/* This method sets defaults to the NETA port:
+ * Clears interrupt Cause and Mask registers.
+ * Clears all MAC tables.
+ * Sets defaults to all registers.
+ * Resets RX and TX descriptor rings.
+ * Resets PHY.
+ * This method can be called after mvneta_port_down() to return the port
+ * settings to defaults.
+ */
+static void mvneta_defaults_set(struct mvneta_port *pp)
+{
+ int cpu;
+ int queue;
+ u32 val;
+
+ /* Clear all Cause registers */
+ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+
+ /* Mask all interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
+
+ /* Enable MBUS Retry bit16 */
+ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
+
+ /* Set CPU queue access map - all CPUs have access to all RX
+ * queues and to all TX queues
+ */
+ for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+ mvreg_write(pp, MVNETA_CPU_MAP(cpu),
+ (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
+ MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
+
+ /* Reset RX and TX DMAs */
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset */
+ mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
+ for (queue = 0; queue < txq_number; queue++) {
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
+ }
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+
+ /* Set Port Acceleration Mode */
+ val = MVNETA_ACC_MODE_EXT;
+ mvreg_write(pp, MVNETA_ACC_MODE, val);
+
+ /* Update val of portCfg register accordingly with all RxQueue types */
+ val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
+ mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+ val = 0;
+ mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
+ mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
+
+ /* Build PORT_SDMA_CONFIG_REG */
+ val = 0;
+
+ /* Default burst size */
+ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+
+ val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
+ MVNETA_NO_DESC_SWAP);
+
+ /* Assign port SDMA configuration */
+ mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ /* Set port interrupt enable register - default enable all */
+ mvreg_write(pp, MVNETA_INTR_ENABLE,
+ (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
+ | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+}
+
+/* Set max sizes for tx queues */
+static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
+
+{
+ u32 val, size, mtu;
+ int queue;
+
+ mtu = max_tx_size * 8;
+ if (mtu > MVNETA_TX_MTU_MAX)
+ mtu = MVNETA_TX_MTU_MAX;
+
+ /* Set MTU */
+ val = mvreg_read(pp, MVNETA_TX_MTU);
+ val &= ~MVNETA_TX_MTU_MAX;
+ val |= mtu;
+ mvreg_write(pp, MVNETA_TX_MTU, val);
+
+ /* TX token size and all TXQs token size must be larger that MTU */
+ val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
+
+ size = val & MVNETA_TX_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
+ }
+ for (queue = 0; queue < txq_number; queue++) {
+ val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
+
+ size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
+ }
+ }
+}
+
+/* Set unicast address */
+static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
+ int queue)
+{
+ unsigned int unicast_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Locate the Unicast table entry */
+ last_nibble = (0xf & last_nibble);
+
+ /* offset from unicast tbl base */
+ tbl_offset = (last_nibble / 4) * 4;
+
+ /* offset within the above reg */
+ reg_offset = last_nibble % 4;
+
+ unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified unicast DA tbl entry */
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
+}
+
+/* Set mac address */
+static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
+ int queue)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ if (queue != -1) {
+ mac_l = (addr[4] << 8) | (addr[5]);
+ mac_h = (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | (addr[3] << 0);
+
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
+ }
+
+ /* Accept frames of this address */
+ mvneta_set_ucast_addr(pp, addr[5], queue);
+}
+
+/* Set the number of packets that will be received before RX interrupt
+ * will be generated by HW.
+ */
+static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
+ value | MVNETA_RXQ_NON_OCCUPIED(0));
+ rxq->pkts_coal = value;
+}
+
+/* Set the time delay in usec before RX interrupt will be generated by
+ * HW.
+ */
+static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ u32 val;
+ unsigned long clk_rate;
+
+ clk_rate = clk_get_rate(pp->clk);
+ val = (clk_rate / 1000000) * value;
+
+ mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
+ rxq->time_coal = value;
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, u32 value)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
+
+ val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
+ val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
+
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
+
+ txq->done_pkts_coal = value;
+}
+
+/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
+static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
+{
+ if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
+ pp->tx_done_timer.expires = jiffies +
+ msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
+ add_timer(&pp->tx_done_timer);
+ }
+}
+
+
+/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
+static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
+ u32 phys_addr, u32 cookie)
+{
+ rx_desc->buf_cookie = cookie;
+ rx_desc->buf_phys_addr = phys_addr;
+}
+
+/* Decrement sent descriptors counter */
+static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int sent_desc)
+{
+ u32 val;
+
+ /* Only 255 TX descriptors can be updated at once */
+ while (sent_desc > 0xff) {
+ val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ sent_desc = sent_desc - 0xff;
+ }
+
+ val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get number of TX descriptors already sent by HW */
+static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ u32 val;
+ int sent_desc;
+
+ val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
+ sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
+ MVNETA_TXQ_SENT_DESC_SHIFT;
+
+ return sent_desc;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ */
+static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ int sent_desc;
+
+ /* Get number of sent descriptors */
+ sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
+
+ /* Decrement sent descriptors counter */
+ if (sent_desc)
+ mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
+
+ return sent_desc;
+}
+
+/* Set TXQ descriptors fields relevant for CSUM calculation */
+static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+ int ip_hdr_len, int l4_proto)
+{
+ u32 command;
+
+ /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+ * G_L4_chk, L4_type; required only for checksum
+ * calculation
+ */
+ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
+ command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
+
+ if (l3_proto == swab16(ETH_P_IP))
+ command |= MVNETA_TXD_IP_CSUM;
+ else
+ command |= MVNETA_TX_L3_IP6;
+
+ if (l4_proto == IPPROTO_TCP)
+ command |= MVNETA_TX_L4_CSUM_FULL;
+ else if (l4_proto == IPPROTO_UDP)
+ command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
+ else
+ command |= MVNETA_TX_L4_CSUM_NOT;
+
+ return command;
+}
+
+
+/* Display more error info */
+static void mvneta_rx_error(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc)
+{
+ u32 status = rx_desc->status;
+
+ if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
+ netdev_err(pp->dev,
+ "bad rx status %08x (buffer oversize), size=%d\n",
+ rx_desc->status, rx_desc->data_size);
+ return;
+ }
+
+ switch (status & MVNETA_RXD_ERR_CODE_MASK) {
+ case MVNETA_RXD_ERR_CRC:
+ netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_OVERRUN:
+ netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_LEN:
+ netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_RESOURCE:
+ netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ }
+}
+
+/* Handle RX checksum offload */
+static void mvneta_rx_csum(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
+ (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+}
+
+/* Free tx queue skbuffs */
+static void mvneta_txq_bufs_free(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct mvneta_tx_desc *tx_desc = txq->descs +
+ txq->txq_get_index;
+ struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+
+ mvneta_txq_inc_get(txq);
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Handle end of transmission */
+static int mvneta_txq_done(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+ int tx_done;
+
+ tx_done = mvneta_txq_sent_desc_proc(pp, txq);
+ if (tx_done == 0)
+ return tx_done;
+ mvneta_txq_bufs_free(pp, txq, tx_done);
+
+ txq->count -= tx_done;
+
+ if (netif_tx_queue_stopped(nq)) {
+ if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
+ netif_tx_wake_queue(nq);
+ }
+
+ return tx_done;
+}
+
+/* Refill processing */
+static int mvneta_rx_refill(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc)
+
+{
+ dma_addr_t phys_addr;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
+ if (!skb)
+ return -ENOMEM;
+
+ phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
+
+ return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int ip_hdr_len = 0;
+ u8 l4_proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ /* Calculate IPv4 checksum and L4 checksum */
+ ip_hdr_len = ip4h->ihl;
+ l4_proto = ip4h->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /* Read l4_protocol from one of IPv6 extra headers */
+ if (skb_network_header_len(skb) > 0)
+ ip_hdr_len = (skb_network_header_len(skb) >> 2);
+ l4_proto = ip6h->nexthdr;
+ } else
+ return MVNETA_TX_L4_CSUM_NOT;
+
+ return mvneta_txq_desc_csum(skb_network_offset(skb),
+ skb->protocol, ip_hdr_len, l4_proto);
+ }
+
+ return MVNETA_TX_L4_CSUM_NOT;
+}
+
+/* Returns rx queue pointer (find last set bit) according to causeRxTx
+ * value
+ */
+static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
+ u32 cause)
+{
+ int queue = fls(cause >> 8) - 1;
+
+ return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
+}
+
+/* Drop packets received by the RXQ and free buffers */
+static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ int rx_done, i;
+
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ for (i = 0; i < rxq->size; i++) {
+ struct mvneta_rx_desc *rx_desc = rxq->descs + i;
+ struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+ dev_kfree_skb_any(skb);
+ dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ rx_desc->data_size, DMA_FROM_DEVICE);
+ }
+
+ if (rx_done)
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+}
+
+/* Main rx processing */
+static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ struct mvneta_rx_queue *rxq)
+{
+ struct net_device *dev = pp->dev;
+ int rx_done, rx_filled;
+
+ /* Get number of received packets */
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+
+ if (rx_todo > rx_done)
+ rx_todo = rx_done;
+
+ rx_done = 0;
+ rx_filled = 0;
+
+ /* Fairness NAPI loop */
+ while (rx_done < rx_todo) {
+ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+ struct sk_buff *skb;
+ u32 rx_status;
+ int rx_bytes, err;
+
+ prefetch(rx_desc);
+ rx_done++;
+ rx_filled++;
+ rx_status = rx_desc->status;
+ skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+ if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
+ (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ dev->stats.rx_errors++;
+ mvneta_rx_error(pp, rx_desc);
+ mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
+ (u32)skb);
+ continue;
+ }
+
+ dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ rx_desc->data_size, DMA_FROM_DEVICE);
+
+ rx_bytes = rx_desc->data_size -
+ (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ u64_stats_update_begin(&pp->rx_stats.syncp);
+ pp->rx_stats.packets++;
+ pp->rx_stats.bytes += rx_bytes;
+ u64_stats_update_end(&pp->rx_stats.syncp);
+
+ /* Linux processing */
+ skb_reserve(skb, MVNETA_MH_SIZE);
+ skb_put(skb, rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ mvneta_rx_csum(pp, rx_desc, skb);
+
+ napi_gro_receive(&pp->napi, skb);
+
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(pp->dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ rx_filled--;
+ }
+ }
+
+ /* Update rxq management counters */
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+
+ return rx_done;
+}
+
+/* Handle tx fragmentation processing */
+static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
+ struct mvneta_tx_queue *txq)
+{
+ struct mvneta_tx_desc *tx_desc;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = page_address(frag->page.p) + frag->page_offset;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = frag->size;
+
+ tx_desc->buf_phys_addr =
+ dma_map_single(pp->dev->dev.parent, addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto error;
+ }
+
+ if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ /* Last descriptor */
+ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->tx_skb[txq->txq_put_index] = skb;
+
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* Descriptor in the middle: Not First, Not Last */
+ tx_desc->command = 0;
+
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ mvneta_txq_inc_put(txq);
+ }
+ }
+
+ return 0;
+
+error:
+ /* Release all descriptors that were used to map fragments of
+ * this packet, as well as the corresponding DMA mappings
+ */
+ for (i = i - 1; i >= 0; i--) {
+ tx_desc = txq->descs + i;
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ }
+
+ return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+ struct mvneta_tx_desc *tx_desc;
+ struct netdev_queue *nq;
+ int frags = 0;
+ u32 tx_cmd;
+
+ if (!netif_running(dev))
+ goto out;
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+ nq = netdev_get_tx_queue(dev, txq_def);
+
+ /* Get a descriptor for the first part of the packet */
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ tx_cmd = mvneta_skb_tx_csum(pp, skb);
+
+ tx_desc->data_size = skb_headlen(skb);
+
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+
+ if (frags == 1) {
+ /* First and Last descriptor */
+ tx_cmd |= MVNETA_TXD_FLZ_DESC;
+ tx_desc->command = tx_cmd;
+ txq->tx_skb[txq->txq_put_index] = skb;
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* First but not Last */
+ tx_cmd |= MVNETA_TXD_F_DESC;
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ mvneta_txq_inc_put(txq);
+ tx_desc->command = tx_cmd;
+ /* Continue with other skb fragments */
+ if (mvneta_tx_frag_process(pp, skb, txq)) {
+ dma_unmap_single(dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+ }
+
+ txq->count += frags;
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+
+ if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
+ netif_tx_stop_queue(nq);
+
+out:
+ if (frags > 0) {
+ u64_stats_update_begin(&pp->tx_stats.syncp);
+ pp->tx_stats.packets++;
+ pp->tx_stats.bytes += skb->len;
+ u64_stats_update_end(&pp->tx_stats.syncp);
+
+ } else {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
+ mvneta_txq_done(pp, txq);
+
+ /* If after calling mvneta_txq_done, count equals
+ * frags, we need to set the timer
+ */
+ if (txq->count == frags && frags > 0)
+ mvneta_add_tx_done_timer(pp);
+
+ return NETDEV_TX_OK;
+}
+
+
+/* Free tx resources, when resetting a port */
+static void mvneta_txq_done_force(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+
+{
+ int tx_done = txq->count;
+
+ mvneta_txq_bufs_free(pp, txq, tx_done);
+
+ /* reset txq */
+ txq->count = 0;
+ txq->txq_put_index = 0;
+ txq->txq_get_index = 0;
+}
+
+/* handle tx done - called from tx done timer callback */
+static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
+ int *tx_todo)
+{
+ struct mvneta_tx_queue *txq;
+ u32 tx_done = 0;
+ struct netdev_queue *nq;
+
+ *tx_todo = 0;
+ while (cause_tx_done != 0) {
+ txq = mvneta_tx_done_policy(pp, cause_tx_done);
+ if (!txq)
+ break;
+
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (txq->count) {
+ tx_done += mvneta_txq_done(pp, txq);
+ *tx_todo += txq->count;
+ }
+
+ __netif_tx_unlock(nq);
+ cause_tx_done &= ~((1 << txq->id));
+ }
+
+ return tx_done;
+}
+
+/* Compute crc8 of the specified address, using a unique algorithm ,
+ * according to hw spec, different than generic crc8 algorithm
+ */
+static int mvneta_addr_crc(unsigned char *addr)
+{
+ int crc = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ int j;
+
+ crc = (crc ^ addr[i]) << 8;
+ for (j = 7; j >= 0; j--) {
+ if (crc & (0x100 << j))
+ crc ^= 0x107 << j;
+ }
+ }
+
+ return crc;
+}
+
+/* This method controls the net device special MAC multicast support.
+ * The Special Multicast Table for MAC addresses supports MAC of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table. This method set the Special
+ * Multicast Table appropriate entry.
+ */
+static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
+ unsigned char last_byte,
+ int queue)
+{
+ unsigned int smc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Register offset from SMC table base */
+ tbl_offset = (last_byte / 4);
+ /* Entry offset within the above reg */
+ reg_offset = last_byte % 4;
+
+ smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
+ + tbl_offset * 4));
+
+ if (queue == -1)
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ else {
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
+ smc_table_reg);
+}
+
+/* This method controls the network device Other MAC multicast support.
+ * The Other Multicast Table is used for multicast of another type.
+ * A CRC-8 is used as an index to the Other Multicast Table entries
+ * in the DA-Filter table.
+ * The method gets the CRC-8 value from the calling routine and
+ * sets the Other Multicast Table appropriate entry according to the
+ * specified CRC-8 .
+ */
+static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
+ unsigned char crc8,
+ int queue)
+{
+ unsigned int omc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
+ reg_offset = crc8 % 4; /* Entry offset within the above reg */
+
+ omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified Other DA table entry */
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
+}
+
+/* The network device supports multicast using two tables:
+ * 1) Special Multicast Table for MAC addresses of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table.
+ * 2) Other Multicast Table for multicast of another type. A CRC-8 value
+ * is used as an index to the Other Multicast Table entries in the
+ * DA-Filter table.
+ */
+static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
+ int queue)
+{
+ unsigned char crc_result = 0;
+
+ if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
+ mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
+ return 0;
+ }
+
+ crc_result = mvneta_addr_crc(p_addr);
+ if (queue == -1) {
+ if (pp->mcast_count[crc_result] == 0) {
+ netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
+ crc_result);
+ return -EINVAL;
+ }
+
+ pp->mcast_count[crc_result]--;
+ if (pp->mcast_count[crc_result] != 0) {
+ netdev_info(pp->dev,
+ "After delete there are %d valid Mcast for crc8=0x%02x\n",
+ pp->mcast_count[crc_result], crc_result);
+ return -EINVAL;
+ }
+ } else
+ pp->mcast_count[crc_result]++;
+
+ mvneta_set_other_mcast_addr(pp, crc_result, queue);
+
+ return 0;
+}
+
+/* Configure Fitering mode of Ethernet port */
+static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
+ int is_promisc)
+{
+ u32 port_cfg_reg, val;
+
+ port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
+
+ val = mvreg_read(pp, MVNETA_TYPE_PRIO);
+
+ /* Set / Clear UPM bit in port configuration register */
+ if (is_promisc) {
+ /* Accept all Unicast addresses */
+ port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
+ val |= MVNETA_FORCE_UNI;
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
+ } else {
+ /* Reject all Unicast addresses */
+ port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
+ val &= ~MVNETA_FORCE_UNI;
+ }
+
+ mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
+ mvreg_write(pp, MVNETA_TYPE_PRIO, val);
+}
+
+/* register unicast and multicast addresses */
+static void mvneta_set_rx_mode(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Accept all: Multicast + Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 1);
+ mvneta_set_ucast_table(pp, rxq_def);
+ mvneta_set_special_mcast_table(pp, rxq_def);
+ mvneta_set_other_mcast_table(pp, rxq_def);
+ } else {
+ /* Accept single Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 0);
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast */
+ mvneta_set_special_mcast_table(pp, rxq_def);
+ mvneta_set_other_mcast_table(pp, rxq_def);
+ } else {
+ /* Accept only initialized multicast */
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ mvneta_mcast_addr_set(pp, ha->addr,
+ rxq_def);
+ }
+ }
+ }
+ }
+}
+
+/* Interrupt handling - the callback for request_irq() */
+static irqreturn_t mvneta_isr(int irq, void *dev_id)
+{
+ struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+
+ /* Mask all interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+
+ napi_schedule(&pp->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* NAPI handler
+ * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
+ * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
+ * Bits 8 -15 of the cause Rx Tx register indicate that are received
+ * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
+ * Each CPU has its own causeRxTx register
+ */
+static int mvneta_poll(struct napi_struct *napi, int budget)
+{
+ int rx_done = 0;
+ u32 cause_rx_tx;
+ unsigned long flags;
+ struct mvneta_port *pp = netdev_priv(napi->dev);
+
+ if (!netif_running(pp->dev)) {
+ napi_complete(napi);
+ return rx_done;
+ }
+
+ /* Read cause register */
+ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
+ MVNETA_RX_INTR_MASK(rxq_number);
+
+ /* For the case where the last mvneta_poll did not process all
+ * RX packets
+ */
+ cause_rx_tx |= pp->cause_rx_tx;
+ if (rxq_number > 1) {
+ while ((cause_rx_tx != 0) && (budget > 0)) {
+ int count;
+ struct mvneta_rx_queue *rxq;
+ /* get rx queue number from cause_rx_tx */
+ rxq = mvneta_rx_policy(pp, cause_rx_tx);
+ if (!rxq)
+ break;
+
+ /* process the packet in that rx queue */
+ count = mvneta_rx(pp, budget, rxq);
+ rx_done += count;
+ budget -= count;
+ if (budget > 0) {
+ /* set off the rx bit of the
+ * corresponding bit in the cause rx
+ * tx register, so that next iteration
+ * will find the next rx queue where
+ * packets are received on
+ */
+ cause_rx_tx &= ~((1 << rxq->id) << 8);
+ }
+ }
+ } else {
+ rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+ budget -= rx_done;
+ }
+
+ if (budget > 0) {
+ cause_rx_tx = 0;
+ napi_complete(napi);
+ local_irq_save(flags);
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number));
+ local_irq_restore(flags);
+ }
+
+ pp->cause_rx_tx = cause_rx_tx;
+ return rx_done;
+}
+
+/* tx done timer callback */
+static void mvneta_tx_done_timer_callback(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvneta_port *pp = netdev_priv(dev);
+ int tx_done = 0, tx_todo = 0;
+
+ if (!netif_running(dev))
+ return ;
+
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ tx_done = mvneta_tx_done_gbe(pp,
+ (((1 << txq_number) - 1) &
+ MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
+ &tx_todo);
+ if (tx_todo > 0)
+ mvneta_add_tx_done_timer(pp);
+}
+
+/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
+static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ int num)
+{
+ struct net_device *dev = pp->dev;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct sk_buff *skb;
+ struct mvneta_rx_desc *rx_desc;
+ unsigned long phys_addr;
+
+ skb = dev_alloc_skb(pp->pkt_size);
+ if (!skb) {
+ netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
+ break;
+ }
+
+ rx_desc = rxq->descs + i;
+ memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
+ phys_addr = dma_map_single(dev->dev.parent, skb->head,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
+ }
+
+ /* Add this number of RX descriptors as non occupied (ready to
+ * get packets)
+ */
+ mvneta_rxq_non_occup_desc_add(pp, rxq, i);
+
+ return i;
+}
+
+/* Free all packets pending transmit from all TXQs and reset TX port */
+static void mvneta_tx_reset(struct mvneta_port *pp)
+{
+ int queue;
+
+ /* free the skb's in the hal tx ring */
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_done_force(pp, &pp->txqs[queue]);
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+}
+
+static void mvneta_rx_reset(struct mvneta_port *pp)
+{
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+
+{
+ rxq->size = pp->rx_ring_size;
+
+ /* Allocate memory for RX descriptors */
+ rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &rxq->descs_phys, GFP_KERNEL);
+ if (rxq->descs == NULL) {
+ netdev_err(pp->dev,
+ "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
+ rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ rxq->size);
+ return -ENOMEM;
+ }
+
+ BUG_ON(rxq->descs !=
+ PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+ rxq->last_desc = rxq->size - 1;
+
+ /* Set Rx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
+
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
+
+ /* Set coalescing pkts and time */
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+
+ /* Fill RXQ with buffers from RX pool */
+ mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ mvneta_rxq_bm_disable(pp, rxq);
+ mvneta_rxq_fill(pp, rxq, rxq->size);
+
+ return 0;
+}
+
+/* Cleanup Rx queue */
+static void mvneta_rxq_deinit(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ mvneta_rxq_drop_pkts(pp, rxq);
+
+ if (rxq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ rxq->descs,
+ rxq->descs_phys);
+
+ rxq->descs = NULL;
+ rxq->last_desc = 0;
+ rxq->next_desc_to_proc = 0;
+ rxq->descs_phys = 0;
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ txq->size = pp->tx_ring_size;
+
+ /* Allocate memory for TX descriptors */
+ txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &txq->descs_phys, GFP_KERNEL);
+ if (txq->descs == NULL) {
+ netdev_err(pp->dev,
+ "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
+ txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->size);
+ return -ENOMEM;
+ }
+
+ /* Make sure descriptor address is cache line size aligned */
+ BUG_ON(txq->descs !=
+ PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+ txq->last_desc = txq->size - 1;
+
+ /* Set maximum bandwidth for enabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+ /* Set Tx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+ txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
+ if (txq->tx_skb == NULL) {
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+ return -ENOMEM;
+ }
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+
+ return 0;
+}
+
+/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ kfree(txq->tx_skb);
+
+ if (txq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ txq->descs = NULL;
+ txq->last_desc = 0;
+ txq->next_desc_to_proc = 0;
+ txq->descs_phys = 0;
+
+ /* Set minimum bandwidth for disabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
+
+ /* Set Tx descriptors queue starting address and size */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
+}
+
+/* Cleanup all Tx queues */
+static void mvneta_cleanup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_deinit(pp, &pp->txqs[queue]);
+}
+
+/* Cleanup all Rx queues */
+static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++)
+ mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+}
+
+
+/* Init all Rx queues */
+static int mvneta_setup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_rxqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Init all tx queues */
+static int mvneta_setup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++) {
+ int err = mvneta_txq_init(pp, &pp->txqs[queue]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create txq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_txqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void mvneta_start_dev(struct mvneta_port *pp)
+{
+ mvneta_max_rx_size_set(pp, pp->pkt_size);
+ mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
+
+ /* start the Rx/Tx activity */
+ mvneta_port_enable(pp);
+
+ /* Enable polling on the port */
+ napi_enable(&pp->napi);
+
+ /* Unmask interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number));
+
+ phy_start(pp->phy_dev);
+ netif_tx_start_all_queues(pp->dev);
+}
+
+static void mvneta_stop_dev(struct mvneta_port *pp)
+{
+ phy_stop(pp->phy_dev);
+
+ napi_disable(&pp->napi);
+
+ netif_carrier_off(pp->dev);
+
+ mvneta_port_down(pp);
+ netif_tx_stop_all_queues(pp->dev);
+
+ /* Stop the port activity */
+ mvneta_port_disable(pp);
+
+ /* Clear all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+
+ /* Mask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+ mvneta_tx_reset(pp);
+ mvneta_rx_reset(pp);
+}
+
+/* tx timeout callback - display a message and stop/start the network device */
+static void mvneta_tx_timeout(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ netdev_info(dev, "tx timeout\n");
+ mvneta_stop_dev(pp);
+ mvneta_start_dev(pp);
+}
+
+/* Return positive if MTU is valid */
+static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
+{
+ if (mtu < 68) {
+ netdev_err(dev, "cannot change mtu to less than 68\n");
+ return -EINVAL;
+ }
+
+ /* 9676 == 9700 - 20 and rounding to 8 */
+ if (mtu > 9676) {
+ netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
+ mtu = 9676;
+ }
+
+ if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
+ mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
+ }
+
+ return mtu;
+}
+
+/* Change the device mtu */
+static int mvneta_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ mtu = mvneta_check_mtu_valid(dev, mtu);
+ if (mtu < 0)
+ return -EINVAL;
+
+ dev->mtu = mtu;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* The interface is running, so we have to force a
+ * reallocation of the RXQs
+ */
+ mvneta_stop_dev(pp);
+
+ mvneta_cleanup_txqs(pp);
+ mvneta_cleanup_rxqs(pp);
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret) {
+ netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
+ return ret;
+ }
+
+ mvneta_setup_txqs(pp);
+
+ mvneta_start_dev(pp);
+ mvneta_port_up(pp);
+
+ return 0;
+}
+
+/* Handle setting mac address */
+static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u8 *mac = addr + 2;
+ int i;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Remove previous address table entry */
+ mvneta_mac_addr_set(pp, dev->dev_addr, -1);
+
+ /* Set new addr in hw */
+ mvneta_mac_addr_set(pp, mac, rxq_def);
+
+ /* Set addr in the device */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = mac[i];
+
+ return 0;
+}
+
+static void mvneta_adjust_link(struct net_device *ndev)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ struct phy_device *phydev = pp->phy_dev;
+ int status_change = 0;
+
+ if (phydev->link) {
+ if ((pp->speed != phydev->speed) ||
+ (pp->duplex != phydev->duplex)) {
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+
+ if (phydev->duplex)
+ val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (phydev->speed == SPEED_1000)
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else
+ val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+
+ pp->duplex = phydev->duplex;
+ pp->speed = phydev->speed;
+ }
+ }
+
+ if (phydev->link != pp->link) {
+ if (!phydev->link) {
+ pp->duplex = -1;
+ pp->speed = 0;
+ }
+
+ pp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link) {
+ u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val |= (MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_FORCE_LINK_DOWN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ mvneta_port_up(pp);
+ netdev_info(pp->dev, "link up\n");
+ } else {
+ mvneta_port_down(pp);
+ netdev_info(pp->dev, "link down\n");
+ }
+ }
+}
+
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+ struct phy_device *phy_dev;
+
+ phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
+ pp->phy_interface);
+ if (!phy_dev) {
+ netdev_err(pp->dev, "could not find the PHY\n");
+ return -ENODEV;
+ }
+
+ phy_dev->supported &= PHY_GBIT_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
+
+ pp->phy_dev = phy_dev;
+ pp->link = 0;
+ pp->duplex = 0;
+ pp->speed = 0;
+
+ return 0;
+}
+
+static void mvneta_mdio_remove(struct mvneta_port *pp)
+{
+ phy_disconnect(pp->phy_dev);
+ pp->phy_dev = NULL;
+}
+
+static int mvneta_open(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret)
+ return ret;
+
+ ret = mvneta_setup_txqs(pp);
+ if (ret)
+ goto err_cleanup_rxqs;
+
+ /* Connect to port interrupt line */
+ ret = request_irq(pp->dev->irq, mvneta_isr, 0,
+ MVNETA_DRIVER_NAME, pp);
+ if (ret) {
+ netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
+ goto err_cleanup_txqs;
+ }
+
+ /* In default link is down */
+ netif_carrier_off(pp->dev);
+
+ ret = mvneta_mdio_probe(pp);
+ if (ret < 0) {
+ netdev_err(dev, "cannot probe MDIO bus\n");
+ goto err_free_irq;
+ }
+
+ mvneta_start_dev(pp);
+
+ return 0;
+
+err_free_irq:
+ free_irq(pp->dev->irq, pp);
+err_cleanup_txqs:
+ mvneta_cleanup_txqs(pp);
+err_cleanup_rxqs:
+ mvneta_cleanup_rxqs(pp);
+ return ret;
+}
+
+/* Stop the port, free port interrupt line */
+static int mvneta_stop(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ mvneta_stop_dev(pp);
+ mvneta_mdio_remove(pp);
+ free_irq(dev->irq, pp);
+ mvneta_cleanup_rxqs(pp);
+ mvneta_cleanup_txqs(pp);
+ del_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ return 0;
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(pp->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(pp->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvneta_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->time_coal = c->rx_coalesce_usecs;
+ rxq->pkts_coal = c->rx_max_coalesced_frames;
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+ }
+
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->done_pkts_coal = c->tx_max_coalesced_frames;
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+ }
+
+ return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvneta_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
+ c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
+
+ c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
+ return 0;
+}
+
+
+static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+
+static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvneta_port *pp = netdev_priv(netdev);
+
+ ring->rx_max_pending = MVNETA_MAX_RXD;
+ ring->tx_max_pending = MVNETA_MAX_TXD;
+ ring->rx_pending = pp->rx_ring_size;
+ ring->tx_pending = pp->tx_ring_size;
+}
+
+static int mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
+ return -EINVAL;
+ pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
+ ring->rx_pending : MVNETA_MAX_RXD;
+ pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
+ ring->tx_pending : MVNETA_MAX_TXD;
+
+ if (netif_running(dev)) {
+ mvneta_stop(dev);
+ if (mvneta_open(dev)) {
+ netdev_err(dev,
+ "error on opening device after ring param change\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops mvneta_netdev_ops = {
+ .ndo_open = mvneta_open,
+ .ndo_stop = mvneta_stop,
+ .ndo_start_xmit = mvneta_tx,
+ .ndo_set_rx_mode = mvneta_set_rx_mode,
+ .ndo_set_mac_address = mvneta_set_mac_addr,
+ .ndo_change_mtu = mvneta_change_mtu,
+ .ndo_tx_timeout = mvneta_tx_timeout,
+ .ndo_get_stats64 = mvneta_get_stats64,
+};
+
+const struct ethtool_ops mvneta_eth_tool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = mvneta_ethtool_get_settings,
+ .set_settings = mvneta_ethtool_set_settings,
+ .set_coalesce = mvneta_ethtool_set_coalesce,
+ .get_coalesce = mvneta_ethtool_get_coalesce,
+ .get_drvinfo = mvneta_ethtool_get_drvinfo,
+ .get_ringparam = mvneta_ethtool_get_ringparam,
+ .set_ringparam = mvneta_ethtool_set_ringparam,
+};
+
+/* Initialize hw */
+static int mvneta_init(struct mvneta_port *pp, int phy_addr)
+{
+ int queue;
+
+ /* Disable port */
+ mvneta_port_disable(pp);
+
+ /* Set port default values */
+ mvneta_defaults_set(pp);
+
+ pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
+ GFP_KERNEL);
+ if (!pp->txqs)
+ return -ENOMEM;
+
+ /* Initialize TX descriptor rings */
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->id = queue;
+ txq->size = pp->tx_ring_size;
+ txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
+ }
+
+ pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
+ GFP_KERNEL);
+ if (!pp->rxqs) {
+ kfree(pp->txqs);
+ return -ENOMEM;
+ }
+
+ /* Create Rx descriptor rings */
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->id = queue;
+ rxq->size = pp->rx_ring_size;
+ rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
+ rxq->time_coal = MVNETA_RX_COAL_USEC;
+ }
+
+ return 0;
+}
+
+static void mvneta_deinit(struct mvneta_port *pp)
+{
+ kfree(pp->txqs);
+ kfree(pp->rxqs);
+}
+
+/* platform glue : initialize decoding windows */
+static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
+ const struct mbus_dram_target_info *dram)
+{
+ u32 win_enable;
+ u32 win_protect;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
+ }
+
+ win_enable = 0x3f;
+ win_protect = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+ mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
+
+ mvreg_write(pp, MVNETA_WIN_SIZE(i),
+ (cs->size - 1) & 0xffff0000);
+
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+
+ mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Power up the port */
+static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+{
+ u32 val;
+
+ /* MAC Cause register should be cleared */
+ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII)
+ mvneta_port_sgmii_config(pp);
+
+ mvneta_gmac_rgmii_set(pp, 1);
+
+ /* Cancel Port Reset */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val &= ~MVNETA_GMAC2_PORT_RESET;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+ while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+ MVNETA_GMAC2_PORT_RESET) != 0)
+ continue;
+}
+
+/* Device initialization routine */
+static int mvneta_probe(struct platform_device *pdev)
+{
+ const struct mbus_dram_target_info *dram_target_info;
+ struct device_node *dn = pdev->dev.of_node;
+ struct device_node *phy_node;
+ u32 phy_addr;
+ struct mvneta_port *pp;
+ struct net_device *dev;
+ const char *mac_addr;
+ int phy_mode;
+ int err;
+
+ /* Our multiqueue support is not complete, so for now, only
+ * allow the usage of the first RX queue
+ */
+ if (rxq_def != 0) {
+ dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
+ return -EINVAL;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0) {
+ err = -EINVAL;
+ goto err_free_netdev;
+ }
+
+ phy_node = of_parse_phandle(dn, "phy", 0);
+ if (!phy_node) {
+ dev_err(&pdev->dev, "no associated PHY\n");
+ err = -ENODEV;
+ goto err_free_irq;
+ }
+
+ phy_mode = of_get_phy_mode(dn);
+ if (phy_mode < 0) {
+ dev_err(&pdev->dev, "incorrect phy-mode\n");
+ err = -EINVAL;
+ goto err_free_irq;
+ }
+
+ mac_addr = of_get_mac_address(dn);
+
+ if (!mac_addr || !is_valid_ether_addr(mac_addr))
+ eth_hw_addr_random(dev);
+ else
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+
+ SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+
+ pp = netdev_priv(dev);
+
+ pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
+ init_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ pp->weight = MVNETA_RX_POLL_WEIGHT;
+ pp->phy_node = phy_node;
+ pp->phy_interface = phy_mode;
+
+ pp->base = of_iomap(dn, 0);
+ if (pp->base == NULL) {
+ err = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_unmap;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->tx_done_timer.data = (unsigned long)dev;
+
+ pp->tx_ring_size = MVNETA_MAX_TXD;
+ pp->rx_ring_size = MVNETA_MAX_RXD;
+
+ pp->dev = dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = mvneta_init(pp, phy_addr);
+ if (err < 0) {
+ dev_err(&pdev->dev, "can't init eth hal\n");
+ goto err_clk;
+ }
+ mvneta_port_power_up(pp, phy_mode);
+
+ dram_target_info = mv_mbus_dram_info();
+ if (dram_target_info)
+ mvneta_conf_mbus_windows(pp, dram_target_info);
+
+ netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+
+ err = register_netdev(dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register\n");
+ goto err_deinit;
+ }
+
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+ platform_set_drvdata(pdev, pp->dev);
+
+ return 0;
+
+err_deinit:
+ mvneta_deinit(pp);
+err_clk:
+ clk_disable_unprepare(pp->clk);
+err_unmap:
+ iounmap(pp->base);
+err_free_irq:
+ irq_dispose_mapping(dev->irq);
+err_free_netdev:
+ free_netdev(dev);
+ return err;
+}
+
+/* Device removal routine */
+static int mvneta_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ mvneta_deinit(pp);
+ clk_disable_unprepare(pp->clk);
+ iounmap(pp->base);
+ irq_dispose_mapping(dev->irq);
+ free_netdev(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id mvneta_match[] = {
+ { .compatible = "marvell,armada-370-neta" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvneta_match);
+
+static struct platform_driver mvneta_driver = {
+ .probe = mvneta_probe,
+ .remove = mvneta_remove,
+ .driver = {
+ .name = MVNETA_DRIVER_NAME,
+ .of_match_table = mvneta_match,
+ },
+};
+
+module_platform_driver(mvneta_driver);
+
+MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
+
+module_param(rxq_number, int, S_IRUGO);
+module_param(txq_number, int, S_IRUGO);
+
+module_param(rxq_def, int, S_IRUGO);
+module_param(txq_def, int, S_IRUGO);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 3d1899ff107..fdc5f23d8e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1498,6 +1498,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
u32 reply;
u8 is_going_down = 0;
int i;
+ unsigned long flags;
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
@@ -1576,12 +1577,12 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
goto reset_slave;
}
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = cmd;
else
is_going_down = 1;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
mlx4_warn(dev, "Slave is going down aborting command(%d)"
" executing from slave:%d\n",
@@ -1597,10 +1598,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
reset_slave:
/* cleanup any slave resources */
mlx4_delete_all_resources_for_slave(dev, slave);
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*with slave in the middle of flr, no need to clean resources again.*/
inform_slave_state:
memset(&slave_state[slave].event_eq, 0,
@@ -1755,7 +1756,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
spin_lock_init(&s_state->lock);
}
- memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index aa9c2f6cf3c..b8d0854a7ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,7 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->ring = ring;
cq->is_tx = mode;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7d1287f81a3..75a3f467bb5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1604,6 +1604,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
priv->rx_ring_num = prof->rx_ring_num;
+ priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f76c9671f36..fed26d867f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -566,6 +566,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct ethhdr *ethh;
dma_addr_t dma;
u64 s_mac;
+ int factor = priv->cqe_factor;
if (!priv->port_up)
return 0;
@@ -574,7 +575,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
- cqe = &cq->buf[index];
+ cqe = &cq->buf[(index << factor) + factor];
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -709,7 +710,7 @@ next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
- cqe = &cq->buf[index];
+ cqe = &cq->buf[(index << factor) + factor];
if (++polled == budget)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1f571d00915..2b799f4f1c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -315,12 +315,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
struct mlx4_cqe *buf = cq->buf;
u32 packets = 0;
u32 bytes = 0;
+ int factor = priv->cqe_factor;
if (!priv->port_up)
return;
index = cons_index & size_mask;
- cqe = &buf[index];
+ cqe = &buf[(index << factor) + factor];
ring_index = ring->cons & size_mask;
/* Process all completed CQEs */
@@ -349,7 +350,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
++cons_index;
index = cons_index & size_mask;
- cqe = &buf[index];
+ cqe = &buf[(index << factor) + factor];
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index b84a88bc44d..251ae2f9311 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -101,15 +101,21 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
mb();
}
-static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
{
- unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
- return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
+ /* (entry & (eq->nent - 1)) gives us a cyclic array */
+ unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
+ /* CX3 is capable of extending the EQE from 32 to 64 bytes.
+ * When this feature is enabled, the first (in the lower addresses)
+ * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
+ * contain the legacy EQE information.
+ */
+ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
}
-static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
{
- struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
+ struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
@@ -177,7 +183,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
return;
}
- memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
wmb();
@@ -401,6 +407,7 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int i;
int err;
+ unsigned long flags;
mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
@@ -412,10 +419,10 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
slave_state[i].is_slave_going_down = 0;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*notify the FW:*/
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
@@ -440,8 +447,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
u8 update_slave_state;
int i;
enum slave_port_gen_event gen_event;
+ unsigned long flags;
- while ((eqe = next_eqe_sw(eq))) {
+ while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -647,13 +655,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
} else
update_slave_state = 1;
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (update_slave_state) {
priv->mfunc.master.slave_state[flr_slave].active = false;
priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
}
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_flr_event_work);
break;
@@ -864,7 +872,8 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
- npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+ npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
@@ -966,8 +975,9 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int err;
- int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
int i;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+ int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -1267,7 +1277,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
- /* Map the new eq to handle all asyncronous events */
+ /* Map the new eq to handle all asynchronous events */
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[i].eqn);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 4f30b99324c..8b3d0512a46 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -110,6 +110,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[42] = "Multicast VEP steering support",
[48] = "Counters support",
[59] = "Port management change event support",
+ [61] = "64 byte EQE support",
+ [62] = "64 byte CQE support",
};
int i;
@@ -235,7 +237,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = dev->caps.num_ports;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
- size = 0; /* no PF behaviour is set for now */
+ size = dev->caps.function_caps; /* set PF behaviours */
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
field = 0; /* protected FMR support not available as yet */
@@ -1237,6 +1239,24 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+ /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
+ *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
+ dev->caps.eqe_size = 64;
+ dev->caps.eqe_factor = 1;
+ } else {
+ dev->caps.eqe_size = 32;
+ dev->caps.eqe_factor = 0;
+ }
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
+ *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
+ dev->caps.cqe_size = 64;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ } else {
+ dev->caps.cqe_size = 32;
+ }
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1318,7 +1338,9 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
+ u32 dword_field;
int err;
+ u8 byte_field;
#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
@@ -1351,10 +1373,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+ MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
+ if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
+ param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ } else {
+ MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
+ if (byte_field & 0x8)
+ param->steering_mode = MLX4_STEERING_MODE_B0;
+ else
+ param->steering_mode = MLX4_STEERING_MODE_A0;
+ }
/* steering attributes */
- if (dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED) {
-
+ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
@@ -1370,6 +1400,13 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
}
+ /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+ MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
+ if (byte_field & 0x20) /* 64-bytes eqe enabled */
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
+ if (byte_field & 0x40) /* 64-bytes cqe enabled */
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
+
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 85abe9c11a2..dbf2f69cc59 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -172,6 +172,8 @@ struct mlx4_init_hca_param {
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 fs_hash_enable_bits;
+ u8 steering_mode; /* for QUERY_HCA */
+ u64 dev_cap_enabled;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 200cc0ec805..e1bafffbc3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -85,18 +85,24 @@ static int probe_vf;
module_param(probe_vf, int, 0644);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
-int mlx4_log_num_mgm_entry_size = 10;
+int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size,
mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
- " 10 gives 248.range: 9<="
+ " 10 gives 248.range: 7 <="
" log_num_mgm_entry_size <= 12."
- " Not in use with device managed"
- " flow steering");
+ " To activate device managed"
+ " flow steering when available, set to -1");
+
+static bool enable_64b_cqe_eqe;
+module_param(enable_64b_cqe_eqe, bool, 0444);
+MODULE_PARM_DESC(enable_64b_cqe_eqe,
+ "Enable 64 byte CQEs/EQEs when the the FW supports this");
#define HCA_GLOBAL_CAP_MASK 0
-#define PF_CONTEXT_BEHAVIOUR_MASK 0
+
+#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
@@ -275,28 +281,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
- if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
- dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
- dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
- dev->caps.fs_log_max_ucast_qp_range_size =
- dev_cap->fs_log_max_ucast_qp_range_size;
- } else {
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
- dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
- } else {
- dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
- "set to use B0 steering. Falling back to A0 steering mode.\n");
- }
- dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
- }
- mlx4_dbg(dev, "Steering mode is: %s\n",
- mlx4_steering_mode_str(dev->caps.steering_mode));
-
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -386,6 +370,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
+
+ if (!enable_64b_cqe_eqe) {
+ if (dev_cap->flags &
+ (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
+ mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
+ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
+ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
+ }
+ }
+
+ if ((dev_cap->flags &
+ (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
+ mlx4_is_master(dev))
+ dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
+
return 0;
}
/*The function checks if there are live vf, return the num of them*/
@@ -472,6 +471,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
}
EXPORT_SYMBOL(mlx4_is_slave_active);
+static void slave_adjust_steering_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *hca_param)
+{
+ dev->caps.steering_mode = hca_param->steering_mode;
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else
+ dev->caps.num_qp_per_mgm =
+ 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
+
+ mlx4_dbg(dev, "Steering mode is: %s\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode));
+}
+
static int mlx4_slave_cap(struct mlx4_dev *dev)
{
int err;
@@ -599,6 +615,23 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
goto err_mem;
}
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
+ dev->caps.eqe_size = 64;
+ dev->caps.eqe_factor = 1;
+ } else {
+ dev->caps.eqe_size = 32;
+ dev->caps.eqe_factor = 0;
+ }
+
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
+ dev->caps.cqe_size = 64;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ } else {
+ dev->caps.cqe_size = 32;
+ }
+
+ slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
+
return 0;
err_mem:
@@ -1285,6 +1318,59 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
}
}
+static int choose_log_fs_mgm_entry_size(int qp_per_entry)
+{
+ int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
+
+ for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
+ i++) {
+ if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
+ break;
+ }
+
+ return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
+}
+
+static void choose_steering_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ if (mlx4_log_num_mgm_entry_size == -1 &&
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
+ (!mlx4_is_mfunc(dev) ||
+ (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
+ choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
+ dev->oper_log_mgm_entry_size =
+ choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
+ dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
+ else {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
+ "set to use B0 steering. Falling back to A0 steering mode.\n");
+ }
+ dev->oper_log_mgm_entry_size =
+ mlx4_log_num_mgm_entry_size > 0 ?
+ mlx4_log_num_mgm_entry_size :
+ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
+ }
+ mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
+ "modparam log_num_mgm_entry_size = %d\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode),
+ dev->oper_log_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size);
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1324,6 +1410,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ choose_steering_mode(dev, &dev_cap);
+
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
@@ -2416,6 +2504,17 @@ static int __init mlx4_verify_params(void)
port_type_array[0] = true;
}
+ if (mlx4_log_num_mgm_entry_size != -1 &&
+ (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
+ mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
+ pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
+ "in legal range (-1 or %d..%d)\n",
+ mlx4_log_num_mgm_entry_size,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+ return -1;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index e151c21baf2..1ee4db3c640 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,12 +54,7 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
- if (dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED)
- return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
- else
- return min((1 << mlx4_log_num_mgm_entry_size),
- MLX4_MAX_MGM_ENTRY_SIZE);
+ return 1 << dev->oper_log_mgm_entry_size;
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 1cf42036d7b..116c5c29d2d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -94,8 +94,10 @@ enum {
};
enum {
- MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
- MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
+ MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
MLX4_MTT_ENTRY_PER_SEG = 8,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 334ec483480..8d54412ada6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -473,6 +473,7 @@ struct mlx4_en_priv {
int mac_index;
unsigned max_mtu;
int base_qpn;
+ int cqe_factor;
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b05705f50f0..561ed2a22a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3071,6 +3071,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
int err;
+ int qpn;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header;
int header_id;
@@ -3080,13 +3081,21 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+ qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, NULL);
+ if (err) {
+ pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ return err;
+ }
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
- if (validate_eth_header_mac(slave, rule_header, rlist))
- return -EINVAL;
+ if (validate_eth_header_mac(slave, rule_header, rlist)) {
+ err = -EINVAL;
+ goto err_put;
+ }
break;
case MLX4_NET_TRANS_RULE_ID_IB:
break;
@@ -3094,14 +3103,17 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
- if (add_eth_header(dev, slave, inbox, rlist, header_id))
- return -EINVAL;
+ if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
+ err = -EINVAL;
+ goto err_put;
+ }
vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
pr_err("Corrupted mailbox.\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put;
}
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
@@ -3109,16 +3121,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- return err;
+ goto err_put;
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to add flow steering resources.\n ");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
- MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
}
+err_put:
+ put_res(dev, slave, qpn, RES_QP);
return err;
}
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 093d594435e..8ebc352bcbe 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4761,7 +4761,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
struct ksz_dma_buf *dma_buf;
struct net_device *dev = NULL;
- spin_lock(&hw_priv->hwlock);
+ spin_lock_irq(&hw_priv->hwlock);
last = info->last;
while (info->avail < info->alloc) {
@@ -4795,7 +4795,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
info->avail++;
}
info->last = last;
- spin_unlock(&hw_priv->hwlock);
+ spin_unlock_irq(&hw_priv->hwlock);
/* Notify the network subsystem that the packet has been sent. */
if (dev)
@@ -5259,11 +5259,15 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
+ spin_lock(&hw_priv->hwlock);
+
hw_read_intr(hw, &int_enable);
/* Not our interrupt! */
- if (!int_enable)
+ if (!int_enable) {
+ spin_unlock(&hw_priv->hwlock);
return IRQ_NONE;
+ }
do {
hw_ack_intr(hw, int_enable);
@@ -5310,6 +5314,8 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
hw_ena_intr(hw);
+ spin_unlock(&hw_priv->hwlock);
+
return IRQ_HANDLED;
}
@@ -6769,7 +6775,7 @@ static int stp;
/*
* This enables fast aging in the KSZ8842 switch. Not sure what situation
* needs that. However, fast aging is used to flush the dynamic MAC table when
- * STP suport is enabled.
+ * STP support is enabled.
*/
static int fast_aging;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
index 7ec4b864a55..75ec5e7cf50 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
@@ -27,7 +27,7 @@ struct mcp_gen_header {
*
* Fields below this comment are extensions added in later versions
* of this struct, drivers should compare the header_length against
- * offsetof(field) to check wether a given MCP implements them.
+ * offsetof(field) to check whether a given MCP implements them.
*
* Never remove any field. Keep everything naturally align.
*/
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 53790247968..bc7ec64e9c7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 29
-#define QLCNIC_LINUX_VERSIONID "5.0.29"
+#define _QLCNIC_LINUX_SUBVERSION 30
+#define QLCNIC_LINUX_VERSIONID "5.0.30"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 58f094ca052..b14b8f0787e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -134,7 +134,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
__le32 *tmp_buf;
struct qlcnic_cmd_args cmd;
struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
+ struct qlcnic_dump_template_hdr *tmpl_hdr;
dma_addr_t tmp_addr_t = 0;
ahw = adapter->ahw;
@@ -150,6 +150,8 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
}
temp_size = cmd.rsp.arg2;
version = cmd.rsp.arg3;
+ dev_info(&adapter->pdev->dev,
+ "minidump template version = 0x%x", version);
if (!temp_size)
return -EIO;
@@ -174,7 +176,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
err = -EIO;
goto error;
}
- tmp_tmpl = tmp_addr;
ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
if (!ahw->fw_dump.tmpl_hdr) {
err = -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index fc48e000f35..7a6d5ebe4e0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -365,7 +365,7 @@ static int
qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- u32 i, producer, consumer;
+ u32 i, producer;
struct qlcnic_cmd_buffer *pbuf;
struct cmd_desc_type0 *cmd_desc;
struct qlcnic_host_tx_ring *tx_ring;
@@ -379,7 +379,6 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
- consumer = tx_ring->sw_consumer;
if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
@@ -402,7 +401,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
pbuf->frag_count = 0;
memcpy(&tx_ring->desc_head[producer],
- &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+ cmd_desc, sizeof(struct cmd_desc_type0));
producer = get_next_index(producer, tx_ring->num_desc);
i++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index a7554d9aab0..d833f592789 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -445,13 +445,10 @@ static int
qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
{
u8 id;
- u32 ref_count;
int i, ret = 1;
u32 data = QLCNIC_MGMT_FUNC;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* If other drivers are not in use set their privilege level */
- ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
ret = qlcnic_api_lock(adapter);
if (ret)
goto err_lock;
@@ -531,11 +528,9 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
{
u32 offset;
void __iomem *mem_ptr0 = NULL;
- resource_size_t mem_base;
unsigned long mem_len, pci_len0 = 0, bar0_len;
/* remap phys address */
- mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
qlcnic_get_bar_length(pdev->device, &bar0_len);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 12ff2927074..0b8d8625834 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -197,7 +197,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
int i, k, timeout = 0;
void __iomem *base = adapter->ahw->pci_base0;
u32 addr, data;
- u8 opcode, no_ops;
+ u8 no_ops;
struct __ctrl *ctr = &entry->region.ctrl;
struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
@@ -206,7 +206,6 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
for (i = 0; i < no_ops; i++) {
k = 0;
- opcode = 0;
for (k = 0; k < 8; k++) {
if (!(ctr->opcode & (1 << k)))
continue;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index cb6fc5a743c..5ac93323a40 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -577,28 +577,30 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct cp_private *cp;
+ int handled = 0;
u16 status;
if (unlikely(dev == NULL))
return IRQ_NONE;
cp = netdev_priv(dev);
+ spin_lock(&cp->lock);
+
status = cpr16(IntrStatus);
if (!status || (status == 0xFFFF))
- return IRQ_NONE;
+ goto out_unlock;
+
+ handled = 1;
netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
- spin_lock(&cp->lock);
-
/* close possible race's with dev_close */
if (unlikely(!netif_running(dev))) {
cpw16(IntrMask, 0);
- spin_unlock(&cp->lock);
- return IRQ_HANDLED;
+ goto out_unlock;
}
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
@@ -612,7 +614,6 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
if (status & LinkChg)
mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
- spin_unlock(&cp->lock);
if (status & PciErr) {
u16 pci_status;
@@ -625,7 +626,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
/* TODO: reset hardware */
}
- return IRQ_HANDLED;
+out_unlock:
+ spin_unlock(&cp->lock);
+
+ return IRQ_RETVAL(handled);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 022b45bc14f..a670d23d934 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2386,8 +2386,6 @@ static const struct of_device_id smc91x_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, smc91x_match);
-#else
-#define smc91x_match NULL
#endif
static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2402,7 +2400,7 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
- .of_match_table = smc91x_match,
+ .of_match_table = of_match_ptr(smc91x_match),
},
};
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4616bf27d51..e112877d15d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2575,11 +2575,13 @@ static const struct dev_pm_ops smsc911x_pm_ops = {
#define SMSC911X_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
static const struct of_device_id smsc911x_dt_ids[] = {
{ .compatible = "smsc,lan9115", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
+#endif
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
@@ -2588,7 +2590,7 @@ static struct platform_driver smsc911x_driver = {
.name = SMSC_CHIPNAME,
.owner = THIS_MODULE,
.pm = SMSC911X_PM_OPS,
- .of_match_table = smsc911x_dt_ids,
+ .of_match_table = of_match_ptr(smsc911x_dt_ids),
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 023a4fb4efa..b05df8983be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -127,14 +127,14 @@ static inline int stmmac_register_platform(void)
}
static inline void stmmac_unregister_platform(void)
{
- platform_driver_register(&stmmac_pltfr_driver);
+ platform_driver_unregister(&stmmac_pltfr_driver);
}
#else
static inline int stmmac_register_platform(void)
{
pr_debug("stmmac: do not register the platf driver\n");
- return -EINVAL;
+ return 0;
}
static inline void stmmac_unregister_platform(void)
{
@@ -162,7 +162,7 @@ static inline int stmmac_register_pci(void)
{
pr_debug("stmmac: do not register the PCI driver\n");
- return -EINVAL;
+ return 0;
}
static inline void stmmac_unregister_pci(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 542edbcd92c..f07c0612abf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2194,18 +2194,20 @@ int stmmac_restore(struct net_device *ndev)
*/
static int __init stmmac_init(void)
{
- int err_plt = 0;
- int err_pci = 0;
-
- err_plt = stmmac_register_platform();
- err_pci = stmmac_register_pci();
-
- if ((err_pci) && (err_plt)) {
- pr_err("stmmac: driver registration failed\n");
- return -EINVAL;
- }
+ int ret;
+ ret = stmmac_register_platform();
+ if (ret)
+ goto err;
+ ret = stmmac_register_pci();
+ if (ret)
+ goto err_pci;
return 0;
+err_pci:
+ stmmac_unregister_platform();
+err:
+ pr_err("stmmac: driver registration failed\n");
+ return ret;
}
static void __exit stmmac_exit(void)
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 337766738ec..463597f919f 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -27,8 +27,6 @@
#include <linux/uaccess.h>
#include <linux/workqueue.h>
-#include <plat/clock.h>
-
#include "cpts.h"
#ifdef CONFIG_TI_CPTS
@@ -249,8 +247,7 @@ static void cpts_clk_init(struct cpts *cpts)
cpts->refclk = NULL;
return;
}
- clk_enable(cpts->refclk);
- cpts->freq = cpts->refclk->recalc(cpts->refclk);
+ clk_prepare_enable(cpts->refclk);
}
static void cpts_clk_release(struct cpts *cpts)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index e1bba3a496b..fe993cdd7e2 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -120,7 +120,6 @@ struct cpts {
struct delayed_work overflow_work;
int phc_index;
struct clk *refclk;
- unsigned long freq;
struct list_head events;
struct list_head pool;
struct cpts_event pool_data[CPTS_MAX_EVENTS];
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 58f13adaa54..ae7cd7f3656 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -608,7 +608,7 @@ static int bcm5421_poll_link(struct mii_phy* phy)
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
- /* try to find out wether we have a link */
+ /* try to find out whether we have a link */
phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = phy_read(phy, MII_NCONFIG);
@@ -634,7 +634,7 @@ static int bcm5421_read_link(struct mii_phy* phy)
phy->speed = SPEED_1000;
- /* find out wether we are running half- or full duplex */
+ /* find out whether we are running half- or full duplex */
phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = phy_read(phy, MII_NCONFIG);
@@ -681,7 +681,7 @@ static int bcm5461_poll_link(struct mii_phy* phy)
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
- /* find out wether we have a link */
+ /* find out whether we have a link */
phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = phy_read(phy, MII_NCONFIG);
@@ -710,7 +710,7 @@ static int bcm5461_read_link(struct mii_phy* phy)
phy->speed = SPEED_1000;
- /* find out wether we are running half- or full duplex */
+ /* find out whether we are running half- or full duplex */
phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = phy_read(phy, MII_NCONFIG);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2ac2164a1e3..fbd106edbe5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -138,6 +138,8 @@ struct tun_file {
/* only used for fasnyc */
unsigned int flags;
u16 queue_index;
+ struct list_head next;
+ struct tun_struct *detached;
};
struct tun_flow_entry {
@@ -178,10 +180,11 @@ struct tun_struct {
int debug;
#endif
spinlock_t lock;
- struct kmem_cache *flow_cache;
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
struct timer_list flow_gc_timer;
unsigned long ageing_time;
+ unsigned int numdisabled;
+ struct list_head disabled;
};
static inline u32 tun_hashfn(u32 rxhash)
@@ -205,8 +208,8 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
struct hlist_head *head,
u32 rxhash, u16 queue_index)
{
- struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
- GFP_ATOMIC);
+ struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
+
if (e) {
tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
rxhash, queue_index);
@@ -219,19 +222,12 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
return e;
}
-static void tun_flow_free(struct rcu_head *head)
-{
- struct tun_flow_entry *e
- = container_of(head, struct tun_flow_entry, rcu);
- kmem_cache_free(e->tun->flow_cache, e);
-}
-
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
- call_rcu(&e->rcu, tun_flow_free);
+ kfree_rcu(e, rcu);
}
static void tun_flow_flush(struct tun_struct *tun)
@@ -297,13 +293,12 @@ static void tun_flow_cleanup(unsigned long data)
spin_unlock_bh(&tun->lock);
}
-static void tun_flow_update(struct tun_struct *tun, struct sk_buff *skb,
+static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
u16 queue_index)
{
struct hlist_head *head;
struct tun_flow_entry *e;
unsigned long delay = tun->ageing_time;
- u32 rxhash = skb_get_rxhash(skb);
if (!rxhash)
return;
@@ -386,6 +381,23 @@ static void tun_set_real_num_queues(struct tun_struct *tun)
netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
}
+static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
+{
+ tfile->detached = tun;
+ list_add_tail(&tfile->next, &tun->disabled);
+ ++tun->numdisabled;
+}
+
+static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
+{
+ struct tun_struct *tun = tfile->detached;
+
+ tfile->detached = NULL;
+ list_del_init(&tfile->next);
+ --tun->numdisabled;
+ return tun;
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -407,20 +419,25 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
ntfile->queue_index = index;
--tun->numqueues;
- sock_put(&tfile->sk);
+ if (clean)
+ sock_put(&tfile->sk);
+ else
+ tun_disable_queue(tun, tfile);
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
/* Drop read queue */
skb_queue_purge(&tfile->sk.sk_receive_queue);
tun_set_real_num_queues(tun);
-
- if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
- if (dev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(dev);
- }
+ } else if (tfile->detached && clean)
+ tun = tun_enable_queue(tfile);
if (clean) {
+ if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
+ !(tun->flags & TUN_PERSIST))
+ if (tun->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(tun->dev);
+
BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
&tfile->socket.flags));
sk_release_kernel(&tfile->sk);
@@ -437,7 +454,7 @@ static void tun_detach(struct tun_file *tfile, bool clean)
static void tun_detach_all(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- struct tun_file *tfile;
+ struct tun_file *tfile, *tmp;
int i, n = tun->numqueues;
for (i = 0; i < n; i++) {
@@ -458,6 +475,12 @@ static void tun_detach_all(struct net_device *dev)
skb_queue_purge(&tfile->sk.sk_receive_queue);
sock_put(&tfile->sk);
}
+ list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_enable_queue(tfile);
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ sock_put(&tfile->sk);
+ }
+ BUG_ON(tun->numdisabled != 0);
}
static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -474,7 +497,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
goto out;
err = -E2BIG;
- if (tun->numqueues == MAX_TAP_QUEUES)
+ if (!tfile->detached &&
+ tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
goto out;
err = 0;
@@ -488,9 +512,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
tfile->queue_index = tun->numqueues;
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
- sock_hold(&tfile->sk);
tun->numqueues++;
+ if (tfile->detached)
+ tun_enable_queue(tfile);
+ else
+ sock_hold(&tfile->sk);
+
tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra
@@ -797,12 +825,6 @@ static int tun_flow_init(struct tun_struct *tun)
{
int i;
- tun->flow_cache = kmem_cache_create("tun_flow_cache",
- sizeof(struct tun_flow_entry), 0, 0,
- NULL);
- if (!tun->flow_cache)
- return -ENOMEM;
-
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
INIT_HLIST_HEAD(&tun->flows[i]);
@@ -818,10 +840,6 @@ static void tun_flow_uninit(struct tun_struct *tun)
{
del_timer_sync(&tun->flow_gc_timer);
tun_flow_flush(tun);
-
- /* Wait for completion of call_rcu()'s */
- rcu_barrier();
- kmem_cache_destroy(tun->flow_cache);
}
/* Initialize net device. */
@@ -1010,6 +1028,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int copylen;
bool zerocopy = false;
int err;
+ u32 rxhash;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) > total_len)
@@ -1162,12 +1181,14 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
}
+ skb_reset_network_header(skb);
+ rxhash = skb_get_rxhash(skb);
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
- tun_flow_update(tun, skb, tfile->queue_index);
+ tun_flow_update(tun, rxhash, tfile->queue_index);
return total_len;
}
@@ -1348,6 +1369,7 @@ static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ BUG_ON(!(list_empty(&tun->disabled)));
tun_flow_uninit(tun);
free_netdev(dev);
}
@@ -1542,6 +1564,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = tun_attach(tun, file);
if (err < 0)
return err;
+
+ if (tun->flags & TUN_TAP_MQ &&
+ (tun->numqueues + tun->numdisabled > 1))
+ return err;
}
else {
char *name;
@@ -1600,6 +1626,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
TUN_USER_FEATURES;
dev->features = dev->hw_features;
+ INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file);
if (err < 0)
goto err_free_dev;
@@ -1754,32 +1781,28 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
- struct net_device *dev;
int ret = 0;
rtnl_lock();
if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
- dev = __dev_get_by_name(tfile->net, ifr->ifr_name);
- if (!dev) {
- ret = -EINVAL;
- goto unlock;
- }
-
- tun = netdev_priv(dev);
- if (dev->netdev_ops != &tap_netdev_ops &&
- dev->netdev_ops != &tun_netdev_ops)
+ tun = tfile->detached;
+ if (!tun)
ret = -EINVAL;
else if (tun_not_capable(tun))
ret = -EPERM;
else
ret = tun_attach(tun, file);
- } else if (ifr->ifr_flags & IFF_DETACH_QUEUE)
- __tun_detach(tfile, false);
- else
+ } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
+ tun = rcu_dereference_protected(tfile->tun,
+ lockdep_rtnl_is_held());
+ if (!tun || !(tun->flags & TUN_TAP_MQ))
+ ret = -EINVAL;
+ else
+ __tun_detach(tfile, false);
+ } else
ret = -EINVAL;
-unlock:
rtnl_unlock();
return ret;
}
@@ -2091,6 +2114,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
file->private_data = tfile;
set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
+ INIT_LIST_HEAD(&tfile->next);
return 0;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index d0129827602..3f3d12d766e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -457,12 +457,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
-static int cdc_manage_power(struct usbnet *dev, int on)
-{
- dev->intf->needs_remote_wakeup = on;
- return 0;
-}
-
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -470,7 +464,7 @@ static const struct driver_info cdc_info = {
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
- .manage_power = cdc_manage_power,
+ .manage_power = usbnet_manage_power,
};
static const struct driver_info wwan_info = {
@@ -479,7 +473,7 @@ static const struct driver_info wwan_info = {
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
- .manage_power = cdc_manage_power,
+ .manage_power = usbnet_manage_power,
};
/*-------------------------------------------------------------------------*/
@@ -487,6 +481,7 @@ static const struct driver_info wwan_info = {
#define HUAWEI_VENDOR_ID 0x12D1
#define NOVATEL_VENDOR_ID 0x1410
#define ZTE_VENDOR_ID 0x19D2
+#define DELL_VENDOR_ID 0x413C
static const struct usb_device_id products [] = {
/*
@@ -594,27 +589,29 @@ static const struct usb_device_id products [] = {
/* Novatel USB551L and MC551 - handled by qmi_wwan */
{
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = NOVATEL_VENDOR_ID,
- .idProduct = 0xB001,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0xB001, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* Novatel E362 - handled by qmi_wwan */
{
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = NOVATEL_VENDOR_ID,
- .idProduct = 0x9010,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9010, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8195, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8196, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d38bc20a60e..71b6e92b8e9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1129,19 +1129,13 @@ static void cdc_ncm_disconnect(struct usb_interface *intf)
usbnet_disconnect(intf);
}
-static int cdc_ncm_manage_power(struct usbnet *dev, int status)
-{
- dev->intf->needs_remote_wakeup = status;
- return 0;
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.check_connect = cdc_ncm_check_connect,
- .manage_power = cdc_ncm_manage_power,
+ .manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
@@ -1155,7 +1149,7 @@ static const struct driver_info wwan_info = {
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.check_connect = cdc_ncm_check_connect,
- .manage_power = cdc_ncm_manage_power,
+ .manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 1ea91f4237f..91d7cb9728e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -383,6 +383,20 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Dell Wireless 5800 (Novatel E362) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* Dell Wireless 5800 V2 (Novatel E362) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8196,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
@@ -419,6 +433,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
+ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04110ba677..3d4bf01641b 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -719,7 +719,8 @@ int usbnet_stop (struct net_device *net)
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
- if (info->manage_power)
+ if (info->manage_power &&
+ !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
else
usb_autopm_put_interface(dev->intf);
@@ -794,14 +795,14 @@ int usbnet_open (struct net_device *net)
tasklet_schedule (&dev->bh);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
- if (retval < 0)
- goto done_manage_power_error;
- usb_autopm_put_interface(dev->intf);
+ if (retval < 0) {
+ retval = 0;
+ set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+ } else {
+ usb_autopm_put_interface(dev->intf);
+ }
}
return retval;
-
-done_manage_power_error:
- clear_bit(EVENT_DEV_OPEN, &dev->flags);
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -1615,6 +1616,16 @@ void usbnet_device_suggests_idle(struct usbnet *dev)
}
EXPORT_SYMBOL(usbnet_device_suggests_idle);
+/*
+ * For devices that can do without special commands
+ */
+int usbnet_manage_power(struct usbnet *dev, int on)
+{
+ dev->intf->needs_remote_wakeup = on;
+ return 0;
+}
+EXPORT_SYMBOL(usbnet_manage_power);
+
/*-------------------------------------------------------------------------*/
static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, void *data, u16 size)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 68d64f0313e..a6fcf15adc4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -130,7 +130,6 @@ struct skb_vnet_hdr {
struct virtio_net_hdr hdr;
struct virtio_net_hdr_mrg_rxbuf mhdr;
};
- unsigned int num_sg;
};
struct padded_vnet_hdr {
@@ -530,10 +529,10 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
err = add_recvbuf_small(rq, gfp);
oom = err == -ENOMEM;
- if (err < 0)
+ if (err)
break;
++rq->num;
- } while (err > 0);
+ } while (rq->vq->num_free);
if (unlikely(rq->num > rq->max))
rq->max = rq->num;
virtqueue_kick(rq->vq);
@@ -640,10 +639,10 @@ static int virtnet_open(struct net_device *dev)
return 0;
}
-static unsigned int free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq)
{
struct sk_buff *skb;
- unsigned int len, tot_sgs = 0;
+ unsigned int len;
struct virtnet_info *vi = sq->vq->vdev->priv;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
@@ -655,10 +654,8 @@ static unsigned int free_old_xmit_skbs(struct send_queue *sq)
stats->tx_packets++;
u64_stats_update_end(&stats->tx_syncp);
- tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
}
- return tot_sgs;
}
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
@@ -666,6 +663,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
+ unsigned num_sg;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
@@ -704,8 +702,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
else
sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
- hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
- return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg,
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
0, skb, GFP_ATOMIC);
}
@@ -714,28 +712,20 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int qnum = skb_get_queue_mapping(skb);
struct send_queue *sq = &vi->sq[qnum];
- int capacity;
+ int err;
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq);
/* Try to transmit */
- capacity = xmit_skb(sq, skb);
-
- /* This can happen with OOM and indirect buffers. */
- if (unlikely(capacity < 0)) {
- if (likely(capacity == -ENOMEM)) {
- if (net_ratelimit())
- dev_warn(&dev->dev,
- "TXQ (%d) failure: out of memory\n",
- qnum);
- } else {
- dev->stats.tx_fifo_errors++;
- if (net_ratelimit())
- dev_warn(&dev->dev,
- "Unexpected TXQ (%d) failure: %d\n",
- qnum, capacity);
- }
+ err = xmit_skb(sq, skb);
+
+ /* This should not happen! */
+ if (unlikely(err)) {
+ dev->stats.tx_fifo_errors++;
+ if (net_ratelimit())
+ dev_warn(&dev->dev,
+ "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -748,12 +738,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Apparently nice girls don't return TX_BUSY; stop the queue
* before it gets out of hand. Naturally, this wastes entries. */
- if (capacity < 2+MAX_SKB_FRAGS) {
+ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- capacity += free_old_xmit_skbs(sq);
- if (capacity >= 2+MAX_SKB_FRAGS) {
+ free_old_xmit_skbs(sq);
+ if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3b3fdf648ea..40f2cc135a4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -505,7 +505,8 @@ static int vxlan_join_group(struct net_device *dev)
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct sock *sk = vn->sock->sk;
struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_ifindex = vxlan->link,
};
int err;
@@ -532,7 +533,8 @@ static int vxlan_leave_group(struct net_device *dev)
int err = 0;
struct sock *sk = vn->sock->sk;
struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_ifindex = vxlan->link,
};
/* Only leave group when last vxlan is done. */
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 4b66ab1d0e5..6702da838b0 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -209,6 +209,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
result = i2400m_reset(i2400m, rt);
if (result >= 0)
result = 0;
+ break;
default:
result = -EINVAL;
}
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 6650fde99e1..9f1e947f355 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -152,6 +152,9 @@ enum {
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
USB_DEVICE_ID_I6050_2 = 0x0188,
+ USB_DEVICE_ID_I6150 = 0x07d6,
+ USB_DEVICE_ID_I6150_2 = 0x07d7,
+ USB_DEVICE_ID_I6150_3 = 0x07d9,
USB_DEVICE_ID_I6250 = 0x0187,
};
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 713d033891e..080f36303a4 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface,
switch (id->idProduct) {
case USB_DEVICE_ID_I6050:
case USB_DEVICE_ID_I6050_2:
+ case USB_DEVICE_ID_I6150:
+ case USB_DEVICE_ID_I6150_2:
+ case USB_DEVICE_ID_I6150_3:
case USB_DEVICE_ID_I6250:
i2400mu->i6050 = 1;
break;
@@ -759,6 +762,9 @@ static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 6deaae18db5..28aa05f60c2 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -156,11 +156,7 @@ config PRISM54
---help---
This enables support for FullMAC PCI/Cardbus prism54 devices. This
driver is now deprecated in favor for the SoftMAC driver, p54pci.
- p54pci supports FullMAC PCI/Cardbus devices as well. For details on
- the scheduled removal of this driver on the kernel see the feature
- removal schedule:
-
- Documentation/feature-removal-schedule.txt
+ p54pci supports FullMAC PCI/Cardbus devices as well.
For more information refer to the p54 wiki:
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 062dfdff636..67156efe14c 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -47,7 +47,7 @@ obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
-obj-$(CONFIG_ATH_COMMON) += ath/
+obj-$(CONFIG_ATH_CARDS) += ath/
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 8e9b826f878..7ae73fbd913 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -114,23 +114,23 @@ static void ath_pci_aspm_init(struct ath_common *common)
if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
(AR_SREV_9285(ah))) {
- /* Bluetooth coexistance requires disabling ASPM. */
+ /* Bluetooth coexistence requires disabling ASPM. */
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
- PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1);
/*
* Both upstream and downstream PCIe components should
* have the same ASPM settings.
*/
pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
- PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1);
ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
return;
}
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
- if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
+ if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
ath9k_hw_configpcipowersave(ah, false);
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 4a4e98f7180..4374079dfc2 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2963,7 +2963,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
ssid_el_p[1] = priv->SSID_size;
memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
ssid_el_p[2 + priv->SSID_size] = WLAN_EID_SUPP_RATES;
- ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */
+ ssid_el_p[3 + priv->SSID_size] = 4; /* len of supported rates */
memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4);
atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 97312524249..5fe17cbab1f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -1045,7 +1045,7 @@ typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW
IPW_ORD_POWER_MGMT_MODE, // Power mode - 0=CAM, 1=PSP
IPW_ORD_POWER_MGMT_INDEX, //NS //
IPW_ORD_COUNTRY_CODE, // IEEE country code as recv'd from beacon
- IPW_ORD_COUNTRY_CHANNELS, // channels suported by country
+ IPW_ORD_COUNTRY_CHANNELS, // channels supported by country
// IPW_ORD_COUNTRY_CHANNELS:
// For 11b the lower 2-byte are used for channels from 1-14
// and the higher 2-byte are not used.
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 2d092f32854..1b15b0b2292 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -917,10 +917,6 @@ struct il4965_scd_bc_tbl {
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
#define IL4965_DEFAULT_TX_RETRY 15
/* EEPROM */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 318ed3c9fe7..7e16d10a7f1 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1183,9 +1183,10 @@ EXPORT_SYMBOL(il_power_update_mode);
void
il_power_initialize(struct il_priv *il)
{
- u16 lctl = il_pcie_link_ctl(il);
+ u16 lctl;
- il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+ pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
il->power_data.debug_sleep_level_override = -1;
@@ -4233,9 +4234,8 @@ il_apm_init(struct il_priv *il)
* power savings, even without L1.
*/
if (il->cfg->set_l0s) {
- lctl = il_pcie_link_ctl(il);
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
/* L1-ASPM enabled; disable(!) L0S */
il_set_bit(il, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index e254cba4557..a9a569f432f 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1829,14 +1829,6 @@ int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
* PCI *
*****************************************************/
-static inline u16
-il_pcie_link_ctl(struct il_priv *il)
-{
- u16 pci_lnk_ctl;
- pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
void il_bg_watchdog(unsigned long data);
u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
@@ -2434,10 +2426,6 @@ struct il_tfd {
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
struct il_rate_info {
u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index d66cad4a7d6..35708b959ad 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -94,8 +94,6 @@ static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -111,9 +109,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
* power savings, even without L1.
*/
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
/* L1-ASPM enabled; disable(!) L0S */
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
@@ -122,7 +118,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
}
- trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+ trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 4ffb6a584cd..44f8b3f3cbe 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -685,6 +685,14 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
* to mac80211.
*/
rx_status = IEEE80211_SKB_RXCB(entry->skb);
+
+ /* Ensure that all fields of rx_status are initialized
+ * properly. The skb->cb array was used for driver
+ * specific informations, so rx_status might contain
+ * garbage.
+ */
+ memset(rx_status, 0, sizeof(*rx_status));
+
rx_status->mactime = rxdesc.timestamp;
rx_status->band = rt2x00dev->curr_band;
rx_status->freq = rt2x00dev->curr_freq;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 18b0bc51766..bb7cc90bafb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -341,7 +341,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
.maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
-static struct pci_device_id rtl8723ae_pci_ids[] __devinitdata = {
+static struct pci_device_id rtl8723ae_pci_ids[] = {
{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723ae_hal_cfg)},
{},
};
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index ff306d763e3..71ab320fae8 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1155,7 +1155,7 @@ int zd_chip_init_hw(struct zd_chip *chip)
if (r)
goto out;
/* Currently we support IEEE 802.11g for full and high speed USB.
- * It might be discussed, whether we should suppport pure b mode for
+ * It might be discussed, whether we should support pure b mode for
* full speed USB.
*/
r = set_mandatory_rates(chip, 1);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index dfba3e64d59..d37bfcf5a3a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -53,7 +53,7 @@ config OF_DEVICE
config OF_I2C
def_tristate I2C
- depends on I2C && !SPARC
+ depends on I2C
help
OpenFirmware I2C accessors
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 538e3cfad23..2390ddb22d6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -629,7 +629,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from,
read_unlock(&devtree_lock);
return np;
}
-EXPORT_SYMBOL(of_find_matching_node);
+EXPORT_SYMBOL(of_find_matching_node_and_match);
/**
* of_modalias_node - Lookup appropriate modalias for a device node
@@ -1025,7 +1025,7 @@ EXPORT_SYMBOL(of_parse_phandle);
* To get a device_node of the `node2' node you may call this:
* of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
*/
-int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
+int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name, int index,
struct of_phandle_args *out_args)
{
@@ -1114,13 +1114,36 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL(of_parse_phandle_with_args);
+#if defined(CONFIG_OF_DYNAMIC)
+static int of_property_notify(int action, struct device_node *np,
+ struct property *prop)
+{
+ struct of_prop_reconfig pr;
+
+ pr.dn = np;
+ pr.prop = prop;
+ return of_reconfig_notify(action, &pr);
+}
+#else
+static int of_property_notify(int action, struct device_node *np,
+ struct property *prop)
+{
+ return 0;
+}
+#endif
+
/**
- * prom_add_property - Add a property to a node
+ * of_add_property - Add a property to a node
*/
-int prom_add_property(struct device_node *np, struct property *prop)
+int of_add_property(struct device_node *np, struct property *prop)
{
struct property **next;
unsigned long flags;
+ int rc;
+
+ rc = of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop);
+ if (rc)
+ return rc;
prop->next = NULL;
write_lock_irqsave(&devtree_lock, flags);
@@ -1146,18 +1169,23 @@ int prom_add_property(struct device_node *np, struct property *prop)
}
/**
- * prom_remove_property - Remove a property from a node.
+ * of_remove_property - Remove a property from a node.
*
* Note that we don't actually remove it, since we have given out
* who-knows-how-many pointers to the data using get-property.
* Instead we just move the property to the "dead properties"
* list, so it won't be found any more.
*/
-int prom_remove_property(struct device_node *np, struct property *prop)
+int of_remove_property(struct device_node *np, struct property *prop)
{
struct property **next;
unsigned long flags;
int found = 0;
+ int rc;
+
+ rc = of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop);
+ if (rc)
+ return rc;
write_lock_irqsave(&devtree_lock, flags);
next = &np->properties;
@@ -1187,7 +1215,7 @@ int prom_remove_property(struct device_node *np, struct property *prop)
}
/*
- * prom_update_property - Update a property in a node, if the property does
+ * of_update_property - Update a property in a node, if the property does
* not exist, add it.
*
* Note that we don't actually remove it, since we have given out
@@ -1195,19 +1223,22 @@ int prom_remove_property(struct device_node *np, struct property *prop)
* Instead we just move the property to the "dead properties" list,
* and add the new property to the property list
*/
-int prom_update_property(struct device_node *np,
- struct property *newprop)
+int of_update_property(struct device_node *np, struct property *newprop)
{
struct property **next, *oldprop;
unsigned long flags;
- int found = 0;
+ int rc, found = 0;
+
+ rc = of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop);
+ if (rc)
+ return rc;
if (!newprop->name)
return -EINVAL;
oldprop = of_find_property(np, newprop->name, NULL);
if (!oldprop)
- return prom_add_property(np, newprop);
+ return of_add_property(np, newprop);
write_lock_irqsave(&devtree_lock, flags);
next = &np->properties;
@@ -1246,12 +1277,55 @@ int prom_update_property(struct device_node *np,
* device tree nodes.
*/
+static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
+
+int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&of_reconfig_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_reconfig_notifier_register);
+
+int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&of_reconfig_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
+
+int of_reconfig_notify(unsigned long action, void *p)
+{
+ int rc;
+
+ rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
+ return notifier_to_errno(rc);
+}
+
+#ifdef CONFIG_PROC_DEVICETREE
+static void of_add_proc_dt_entry(struct device_node *dn)
+{
+ struct proc_dir_entry *ent;
+
+ ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
+ if (ent)
+ proc_device_tree_add_node(dn, ent);
+}
+#else
+static void of_add_proc_dt_entry(struct device_node *dn)
+{
+ return;
+}
+#endif
+
/**
* of_attach_node - Plug a device node into the tree and global list.
*/
-void of_attach_node(struct device_node *np)
+int of_attach_node(struct device_node *np)
{
unsigned long flags;
+ int rc;
+
+ rc = of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np);
+ if (rc)
+ return rc;
write_lock_irqsave(&devtree_lock, flags);
np->sibling = np->parent->child;
@@ -1259,24 +1333,61 @@ void of_attach_node(struct device_node *np)
np->parent->child = np;
of_allnodes = np;
write_unlock_irqrestore(&devtree_lock, flags);
+
+ of_add_proc_dt_entry(np);
+ return 0;
}
+#ifdef CONFIG_PROC_DEVICETREE
+static void of_remove_proc_dt_entry(struct device_node *dn)
+{
+ struct device_node *parent = dn->parent;
+ struct property *prop = dn->properties;
+
+ while (prop) {
+ remove_proc_entry(prop->name, dn->pde);
+ prop = prop->next;
+ }
+
+ if (dn->pde)
+ remove_proc_entry(dn->pde->name, parent->pde);
+}
+#else
+static void of_remove_proc_dt_entry(struct device_node *dn)
+{
+ return;
+}
+#endif
+
/**
* of_detach_node - "Unplug" a node from the device tree.
*
* The caller must hold a reference to the node. The memory associated with
* the node is not freed until its refcount goes to zero.
*/
-void of_detach_node(struct device_node *np)
+int of_detach_node(struct device_node *np)
{
struct device_node *parent;
unsigned long flags;
+ int rc = 0;
+
+ rc = of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np);
+ if (rc)
+ return rc;
write_lock_irqsave(&devtree_lock, flags);
+ if (of_node_check_flag(np, OF_DETACHED)) {
+ /* someone already detached it */
+ write_unlock_irqrestore(&devtree_lock, flags);
+ return rc;
+ }
+
parent = np->parent;
- if (!parent)
- goto out_unlock;
+ if (!parent) {
+ write_unlock_irqrestore(&devtree_lock, flags);
+ return rc;
+ }
if (of_allnodes == np)
of_allnodes = np->allnext;
@@ -1301,9 +1412,10 @@ void of_detach_node(struct device_node *np)
}
of_node_set_flag(np, OF_DETACHED);
-
-out_unlock:
write_unlock_irqrestore(&devtree_lock, flags);
+
+ of_remove_proc_dt_entry(np);
+ return rc;
}
#endif /* defined(CONFIG_OF_DYNAMIC) */
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index a65c39c473b..808be06bb67 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -488,14 +488,8 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
depth++;
pathp = (char *)p;
p = ALIGN(p + strlen(pathp) + 1, 4);
- if ((*pathp) == '/') {
- const char *lp, *np;
- for (lp = NULL, np = pathp; *np; np++)
- if ((*np) == '/')
- lp = np+1;
- if (lp != NULL)
- pathp = lp;
- }
+ if (*pathp == '/')
+ pathp = kbasename(pathp);
rc = it(p, pathp, depth, data);
if (rc != 0)
break;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index a543746fb35..ad6a8b63569 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -170,6 +170,11 @@ int pci_bus_add_device(struct pci_dev *dev)
int retval;
pci_fixup_device(pci_fixup_final, dev);
+
+ retval = pcibios_add_device(dev);
+ if (retval)
+ return retval;
+
retval = device_add(&dev->dev);
if (retval)
return retval;
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index b0e46dede1a..13e9e63a726 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -151,4 +151,15 @@ config HOTPLUG_PCI_SGI
When in doubt, say N.
+config HOTPLUG_PCI_S390
+ tristate "System z PCI Hotplug Support"
+ depends on S390 && 64BIT
+ help
+ Say Y here if you want to use the System z PCI Hotplug
+ driver for PCI devices. Without this driver it is not
+ possible to access stand-by PCI functions nor to deconfigure
+ PCI functions.
+
+ When in doubt, say Y.
+
endif # HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index c459cd4e39c..47ec8c80e16 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
+obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
# acpiphp_ibm extends acpiphp, so should be linked afterwards.
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
new file mode 100644
index 00000000000..dee68e0698e
--- /dev/null
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -0,0 +1,252 @@
+/*
+ * PCI Hot Plug Controller Driver for System z
+ *
+ * Copyright 2012 IBM Corp.
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define COMPONENT "zPCI hpc"
+#define pr_fmt(fmt) COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/init.h>
+#include <asm/sclp.h>
+
+#define SLOT_NAME_SIZE 10
+static LIST_HEAD(s390_hotplug_slot_list);
+
+MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
+MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
+MODULE_LICENSE("GPL");
+
+static int zpci_fn_configured(enum zpci_state state)
+{
+ return state == ZPCI_FN_STATE_CONFIGURED ||
+ state == ZPCI_FN_STATE_ONLINE;
+}
+
+/*
+ * struct slot - slot information for each *physical* slot
+ */
+struct slot {
+ struct list_head slot_list;
+ struct hotplug_slot *hotplug_slot;
+ struct zpci_dev *zdev;
+};
+
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
+ return -EIO;
+
+ rc = sclp_pci_configure(slot->zdev->fid);
+ if (!rc) {
+ slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ /* automatically scan the device after is was configured */
+ zpci_enable_device(slot->zdev);
+ zpci_scan_device(slot->zdev);
+ }
+ return rc;
+}
+
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (!zpci_fn_configured(slot->zdev->state))
+ return -EIO;
+
+ /* TODO: we rely on the user to unbind/remove the device, is that plausible
+ * or do we need to trigger that here?
+ */
+ rc = sclp_pci_deconfigure(slot->zdev->fid);
+ if (!rc) {
+ /* Fixme: better call List-PCI to find the disabled FH
+ for the FID since the FH should be opaque... */
+ slot->zdev->fh &= 0x7fffffff;
+ slot->zdev->state = ZPCI_FN_STATE_STANDBY;
+ }
+ return rc;
+}
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ switch (slot->zdev->state) {
+ case ZPCI_FN_STATE_STANDBY:
+ *value = 0;
+ break;
+ default:
+ *value = 1;
+ break;
+ }
+ return 0;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ /* if the slot exits it always contains a function */
+ *value = 1;
+ return 0;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+static struct hotplug_slot_ops s390_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .get_power_status = get_power_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+static int init_pci_slot(struct zpci_dev *zdev)
+{
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *info;
+ char name[SLOT_NAME_SIZE];
+ struct slot *slot;
+ int rc;
+
+ if (!zdev)
+ return 0;
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
+ if (!hotplug_slot)
+ goto error_hp;
+ hotplug_slot->private = slot;
+
+ slot->hotplug_slot = hotplug_slot;
+ slot->zdev = zdev;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto error_info;
+ hotplug_slot->info = info;
+
+ hotplug_slot->ops = &s390_hotplug_slot_ops;
+ hotplug_slot->release = &release_slot;
+
+ get_power_status(hotplug_slot, &info->power_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
+
+ snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
+ rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
+ ZPCI_DEVFN, name);
+ if (rc) {
+ pr_err("pci_hp_register failed with error %d\n", rc);
+ goto error_reg;
+ }
+ list_add(&slot->slot_list, &s390_hotplug_slot_list);
+ return 0;
+
+error_reg:
+ kfree(info);
+error_info:
+ kfree(hotplug_slot);
+error_hp:
+ kfree(slot);
+error:
+ return -ENOMEM;
+}
+
+static int __init init_pci_slots(void)
+{
+ struct zpci_dev *zdev;
+ int device = 0;
+
+ /*
+ * Create a structure for each slot, and register that slot
+ * with the pci_hotplug subsystem.
+ */
+ mutex_lock(&zpci_list_lock);
+ list_for_each_entry(zdev, &zpci_list, entry) {
+ init_pci_slot(zdev);
+ device++;
+ }
+
+ mutex_unlock(&zpci_list_lock);
+ return (device) ? 0 : -ENODEV;
+}
+
+static void exit_pci_slot(struct zpci_dev *zdev)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ if (slot->zdev != zdev)
+ continue;
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
+
+static void __exit exit_pci_slots(void)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ /*
+ * Unregister all of our slots with the pci_hotplug subsystem.
+ * Memory will be freed in release_slot() callback after slot's
+ * lifespan is finished.
+ */
+ list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
+
+static int __init pci_hotplug_s390_init(void)
+{
+ /*
+ * Do specific initialization stuff for your driver here
+ * like initializing your controller hardware (if any) and
+ * determining the number of slots you have in the system
+ * right now.
+ */
+
+ if (!pci_probe)
+ return -EOPNOTSUPP;
+
+ /* register callbacks for slot handling from arch code */
+ mutex_lock(&zpci_list_lock);
+ hotplug_ops.create_slot = init_pci_slot;
+ hotplug_ops.remove_slot = exit_pci_slot;
+ mutex_unlock(&zpci_list_lock);
+ pr_info("registered hotplug slot callbacks\n");
+ return init_pci_slots();
+}
+
+static void __exit pci_hotplug_s390_exit(void)
+{
+ exit_pci_slots();
+}
+
+module_init(pci_hotplug_s390_init);
+module_exit(pci_hotplug_s390_exit);
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 2eca902a428..3c6bbdd059a 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -125,3 +125,5 @@ static void __exit ioapic_exit(void)
module_init(ioapic_init);
module_exit(ioapic_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index aeccc911abb..bafd2bbcaf6 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -106,7 +106,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
size = resource_size(res);
- do_div(size, iov->total);
+ do_div(size, iov->total_VFs);
virtfn->resource[i].start = res->start + size * id;
virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
rc = request_resource(res, &virtfn->resource[i]);
@@ -194,7 +194,7 @@ static int sriov_migration(struct pci_dev *dev)
u16 status;
struct pci_sriov *iov = dev->sriov;
- if (!iov->nr_virtfn)
+ if (!iov->num_VFs)
return 0;
if (!(iov->cap & PCI_SRIOV_CAP_VFM))
@@ -216,7 +216,7 @@ static void sriov_migration_task(struct work_struct *work)
u16 status;
struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
- for (i = iov->initial; i < iov->nr_virtfn; i++) {
+ for (i = iov->initial_VFs; i < iov->num_VFs; i++) {
state = readb(iov->mstate + i);
if (state == PCI_SRIOV_VFM_MI) {
writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
@@ -244,7 +244,7 @@ static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
resource_size_t pa;
struct pci_sriov *iov = dev->sriov;
- if (nr_virtfn <= iov->initial)
+ if (nr_virtfn <= iov->initial_VFs)
return 0;
pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
@@ -294,15 +294,15 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (!nr_virtfn)
return 0;
- if (iov->nr_virtfn)
+ if (iov->num_VFs)
return -EINVAL;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
- if (initial > iov->total ||
- (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
+ if (initial > iov->total_VFs ||
+ (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
return -EIO;
- if (nr_virtfn < 0 || nr_virtfn > iov->total ||
+ if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
@@ -359,7 +359,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
msleep(100);
pci_cfg_access_unlock(dev);
- iov->initial = initial;
+ iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
@@ -376,7 +376,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
}
kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
- iov->nr_virtfn = nr_virtfn;
+ iov->num_VFs = nr_virtfn;
return 0;
@@ -401,13 +401,13 @@ static void sriov_disable(struct pci_dev *dev)
int i;
struct pci_sriov *iov = dev->sriov;
- if (!iov->nr_virtfn)
+ if (!iov->num_VFs)
return;
if (iov->cap & PCI_SRIOV_CAP_VFM)
sriov_disable_migration(dev);
- for (i = 0; i < iov->nr_virtfn; i++)
+ for (i = 0; i < iov->num_VFs; i++)
virtfn_remove(dev, i, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
@@ -419,7 +419,7 @@ static void sriov_disable(struct pci_dev *dev)
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
- iov->nr_virtfn = 0;
+ iov->num_VFs = 0;
}
static int sriov_init(struct pci_dev *dev, int pos)
@@ -496,7 +496,7 @@ found:
iov->pos = pos;
iov->nres = nres;
iov->ctrl = ctrl;
- iov->total = total;
+ iov->total_VFs = total;
iov->offset = offset;
iov->stride = stride;
iov->pgsz = pgsz;
@@ -529,7 +529,7 @@ failed:
static void sriov_release(struct pci_dev *dev)
{
- BUG_ON(dev->sriov->nr_virtfn);
+ BUG_ON(dev->sriov->num_VFs);
if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
@@ -554,7 +554,7 @@ static void sriov_restore_state(struct pci_dev *dev)
pci_update_resource(dev, i);
pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
msleep(100);
@@ -661,7 +661,7 @@ int pci_iov_bus_range(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_physfn)
continue;
- busnr = virtfn_bus(dev, dev->sriov->total - 1);
+ busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1);
if (busnr > max)
max = busnr;
}
@@ -729,9 +729,56 @@ EXPORT_SYMBOL_GPL(pci_sriov_migration);
*/
int pci_num_vf(struct pci_dev *dev)
{
- if (!dev || !dev->is_physfn)
+ if (!dev->is_physfn)
return 0;
- else
- return dev->sriov->nr_virtfn;
+
+ return dev->sriov->num_VFs;
}
EXPORT_SYMBOL_GPL(pci_num_vf);
+
+/**
+ * pci_sriov_set_totalvfs -- reduce the TotalVFs available
+ * @dev: the PCI PF device
+ * numvfs: number that should be used for TotalVFs supported
+ *
+ * Should be called from PF driver's probe routine with
+ * device's mutex held.
+ *
+ * Returns 0 if PF is an SRIOV-capable device and
+ * value of numvfs valid. If not a PF with VFS, return -EINVAL;
+ * if VFs already enabled, return -EBUSY.
+ */
+int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
+{
+ if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs))
+ return -EINVAL;
+
+ /* Shouldn't change if VFs already enabled */
+ if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
+ return -EBUSY;
+ else
+ dev->sriov->driver_max_VFs = numvfs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
+
+/**
+ * pci_sriov_get_totalvfs -- get total VFs supported on this devic3
+ * @dev: the PCI PF device
+ *
+ * For a PCIe device with SRIOV support, return the PCIe
+ * SRIOV capability value of TotalVFs or the value of driver_max_VFs
+ * if the driver reduced it. Otherwise, -EINVAL.
+ */
+int pci_sriov_get_totalvfs(struct pci_dev *dev)
+{
+ if (!dev->is_physfn)
+ return -EINVAL;
+
+ if (dev->sriov->driver_max_VFs)
+ return dev->sriov->driver_max_VFs;
+
+ return dev->sriov->total_VFs;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index e5f69a43b1b..b008cf86b9c 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -14,11 +14,11 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
{
struct pci_dev *parent = to_pci_dev(pdev->dev.parent);
- dev_printk(KERN_ERR, &pdev->dev,
- "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
- dev_name(&parent->dev), parent->vendor, parent->device);
- dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason);
- dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
+ dev_err(&pdev->dev,
+ "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
+ dev_name(&parent->dev), parent->vendor, parent->device);
+ dev_err(&pdev->dev, "%s\n", reason);
+ dev_err(&pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
WARN_ON(1);
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a825d78fd0a..5099636a6e5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -207,6 +207,8 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
desc->masked = __msix_mask_irq(desc, flag);
}
+#ifdef CONFIG_GENERIC_HARDIRQS
+
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
struct msi_desc *desc = irq_data_get_msi(data);
@@ -230,6 +232,8 @@ void unmask_msi_irq(struct irq_data *data)
msi_set_mask_bit(data, 0);
}
+#endif /* CONFIG_GENERIC_HARDIRQS */
+
void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
BUG_ON(entry->dev->current_state != PCI_D0);
@@ -337,8 +341,10 @@ static void free_msi_irqs(struct pci_dev *dev)
if (!entry->irq)
continue;
nvec = 1 << entry->msi_attrib.multiple;
+#ifdef CONFIG_GENERIC_HARDIRQS
for (i = 0; i < nvec; i++)
BUG_ON(irq_has_action(entry->irq + i));
+#endif
}
arch_teardown_msi_irqs(dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1dc78c5cabf..f79cbcd3944 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -248,31 +248,26 @@ struct drv_dev_and_id {
static long local_pci_probe(void *_ddi)
{
struct drv_dev_and_id *ddi = _ddi;
- struct device *dev = &ddi->dev->dev;
- struct device *parent = dev->parent;
+ struct pci_dev *pci_dev = ddi->dev;
+ struct pci_driver *pci_drv = ddi->drv;
+ struct device *dev = &pci_dev->dev;
int rc;
- /* The parent bridge must be in active state when probing */
- if (parent)
- pm_runtime_get_sync(parent);
- /* Unbound PCI devices are always set to disabled and suspended.
- * During probe, the device is set to enabled and active and the
- * usage count is incremented. If the driver supports runtime PM,
- * it should call pm_runtime_put_noidle() in its probe routine and
- * pm_runtime_get_noresume() in its remove routine.
+ /*
+ * Unbound PCI devices are always put in D0, regardless of
+ * runtime PM status. During probe, the device is set to
+ * active and the usage count is incremented. If the driver
+ * supports runtime PM, it should call pm_runtime_put_noidle()
+ * in its probe routine and pm_runtime_get_noresume() in its
+ * remove routine.
*/
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- rc = ddi->drv->probe(ddi->dev, ddi->id);
+ pm_runtime_get_sync(dev);
+ pci_dev->driver = pci_drv;
+ rc = pci_drv->probe(pci_dev, ddi->id);
if (rc) {
- pm_runtime_disable(dev);
- pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
+ pci_dev->driver = NULL;
+ pm_runtime_put_sync(dev);
}
- if (parent)
- pm_runtime_put(parent);
return rc;
}
@@ -322,10 +317,8 @@ __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
id = pci_match_device(drv, pci_dev);
if (id)
error = pci_call_probe(drv, pci_dev, id);
- if (error >= 0) {
- pci_dev->driver = drv;
+ if (error >= 0)
error = 0;
- }
}
return error;
}
@@ -361,9 +354,7 @@ static int pci_device_remove(struct device * dev)
}
/* Undo the runtime PM settings in local_pci_probe() */
- pm_runtime_disable(dev);
- pm_runtime_set_suspended(dev);
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
/*
* If the device is still on, set the power state as "unknown",
@@ -986,6 +977,13 @@ static int pci_pm_runtime_suspend(struct device *dev)
pci_power_t prev = pci_dev->current_state;
int error;
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
+
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
@@ -1007,10 +1005,10 @@ static int pci_pm_runtime_suspend(struct device *dev)
return 0;
}
- if (!pci_dev->state_saved)
+ if (!pci_dev->state_saved) {
pci_save_state(pci_dev);
-
- pci_finish_runtime_suspend(pci_dev);
+ pci_finish_runtime_suspend(pci_dev);
+ }
return 0;
}
@@ -1021,6 +1019,13 @@ static int pci_pm_runtime_resume(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
+
if (!pm || !pm->runtime_resume)
return -ENOSYS;
@@ -1038,8 +1043,16 @@ static int pci_pm_runtime_resume(struct device *dev)
static int pci_pm_runtime_idle(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ goto out;
+
if (!pm)
return -ENOSYS;
@@ -1049,8 +1062,8 @@ static int pci_pm_runtime_idle(struct device *dev)
return ret;
}
+out:
pm_runtime_suspend(dev);
-
return 0;
}
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 775e933c222..6e47c519c51 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -28,7 +28,7 @@ MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is "
static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n");
+ dev_info(&dev->dev, "claimed by stub\n");
return 0;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 68d56f02e72..9c6e9bb674e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -401,6 +401,89 @@ static ssize_t d3cold_allowed_show(struct device *dev,
}
#endif
+#ifdef CONFIG_PCI_IOV
+static ssize_t sriov_totalvfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
+}
+
+
+static ssize_t sriov_numvfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+}
+
+/*
+ * num_vfs > 0; number of VFs to enable
+ * num_vfs = 0; disable all VFs
+ *
+ * Note: SRIOV spec doesn't allow partial VF
+ * disable, so it's all or none.
+ */
+static ssize_t sriov_numvfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+ u16 num_vfs;
+
+ ret = kstrtou16(buf, 0, &num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (num_vfs > pci_sriov_get_totalvfs(pdev))
+ return -ERANGE;
+
+ if (num_vfs == pdev->sriov->num_VFs)
+ return count; /* no change */
+
+ /* is PF driver loaded w/callback */
+ if (!pdev->driver || !pdev->driver->sriov_configure) {
+ dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
+ return -ENOSYS;
+ }
+
+ if (num_vfs == 0) {
+ /* disable VFs */
+ ret = pdev->driver->sriov_configure(pdev, 0);
+ if (ret < 0)
+ return ret;
+ return count;
+ }
+
+ /* enable VFs */
+ if (pdev->sriov->num_VFs) {
+ dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
+ pdev->sriov->num_VFs, num_vfs);
+ return -EBUSY;
+ }
+
+ ret = pdev->driver->sriov_configure(pdev, num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (ret != num_vfs)
+ dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
+ num_vfs, ret);
+
+ return count;
+}
+
+static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
+static struct device_attribute sriov_numvfs_attr =
+ __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
+ sriov_numvfs_show, sriov_numvfs_store);
+#endif /* CONFIG_PCI_IOV */
+
struct device_attribute pci_dev_attrs[] = {
__ATTR_RO(resource),
__ATTR_RO(vendor),
@@ -1262,29 +1345,20 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
pdev->rom_attr = attr;
}
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
- retval = device_create_file(&pdev->dev, &vga_attr);
- if (retval)
- goto err_rom_file;
- }
-
/* add platform-specific attributes */
retval = pcibios_add_platform_entries(pdev);
if (retval)
- goto err_vga_file;
+ goto err_rom_file;
/* add sysfs entries for various capabilities */
retval = pci_create_capabilities_sysfs(pdev);
if (retval)
- goto err_vga_file;
+ goto err_rom_file;
pci_create_firmware_label_files(pdev);
return 0;
-err_vga_file:
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
- device_remove_file(&pdev->dev, &vga_attr);
err_rom_file:
if (rom_size) {
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
@@ -1370,3 +1444,62 @@ static int __init pci_sysfs_init(void)
}
late_initcall(pci_sysfs_init);
+
+static struct attribute *pci_dev_dev_attrs[] = {
+ &vga_attr.attr,
+ NULL,
+};
+
+static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (a == &vga_attr.attr)
+ if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return 0;
+
+ return a->mode;
+}
+
+#ifdef CONFIG_PCI_IOV
+static struct attribute *sriov_dev_attrs[] = {
+ &sriov_totalvfs_attr.attr,
+ &sriov_numvfs_attr.attr,
+ NULL,
+};
+
+static umode_t sriov_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+
+ if (!dev_is_pf(dev))
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group sriov_dev_attr_group = {
+ .attrs = sriov_dev_attrs,
+ .is_visible = sriov_attrs_are_visible,
+};
+#endif /* CONFIG_PCI_IOV */
+
+static struct attribute_group pci_dev_attr_group = {
+ .attrs = pci_dev_dev_attrs,
+ .is_visible = pci_dev_attrs_are_visible,
+};
+
+static const struct attribute_group *pci_dev_attr_groups[] = {
+ &pci_dev_attr_group,
+#ifdef CONFIG_PCI_IOV
+ &sriov_dev_attr_group,
+#endif
+ NULL,
+};
+
+struct device_type pci_dev_type = {
+ .groups = pci_dev_attr_groups,
+};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index bdf66b500f2..5cb5820fae4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1333,6 +1333,19 @@ void pcim_pin_device(struct pci_dev *pdev)
dr->pinned = 1;
}
+/*
+ * pcibios_add_device - provide arch specific hooks when adding device dev
+ * @dev: the PCI device being added
+ *
+ * Permits the platform to provide architecture specific functionality when
+ * devices are added. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+int __weak pcibios_add_device (struct pci_dev *dev)
+{
+ return 0;
+}
+
/**
* pcibios_disable_device - disable arch specific PCI resources for device dev
* @dev: the PCI device to disable
@@ -1578,15 +1591,25 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- /* PCI (as opposed to PCIe) PME requires that the device have
- its PME# line hooked up correctly. Not all hardware vendors
- do this, so the PME never gets delivered and the device
- remains asleep. The easiest way around this is to
- periodically walk the list of suspended devices and check
- whether any have their PME flag set. The assumption is that
- we'll wake up often enough anyway that this won't be a huge
- hit, and the power savings from the devices will still be a
- win. */
+ /*
+ * PCI (as opposed to PCIe) PME requires that the device have
+ * its PME# line hooked up correctly. Not all hardware vendors
+ * do this, so the PME never gets delivered and the device
+ * remains asleep. The easiest way around this is to
+ * periodically walk the list of suspended devices and check
+ * whether any have their PME flag set. The assumption is that
+ * we'll wake up often enough anyway that this won't be a huge
+ * hit, and the power savings from the devices will still be a
+ * win.
+ *
+ * Although PCIe uses in-band PME message instead of PME# line
+ * to report PME, PME does not work for some PCIe devices in
+ * reality. For example, there are devices that set their PME
+ * status bits, but don't really bother to send a PME message;
+ * there are PCI Express Root Ports that don't bother to
+ * trigger interrupts when they receive PME messages from the
+ * devices below. So PME poll is used for PCIe devices too.
+ */
if (dev->pme_poll) {
struct pci_pme_device *pme_dev;
@@ -1900,6 +1923,8 @@ void pci_pm_init(struct pci_dev *dev)
u16 pmc;
pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
@@ -3865,14 +3890,13 @@ static void pci_no_domains(void)
}
/**
- * pci_ext_cfg_enabled - can we access extended PCI config space?
- * @dev: The PCI device of the root bridge.
+ * pci_ext_cfg_avail - can we access extended PCI config space?
*
* Returns 1 if we can access PCI extended config space (offsets
* greater than 0xff). This is the default implementation. Architecture
* implementations can override this.
*/
-int __weak pci_ext_cfg_avail(struct pci_dev *dev)
+int __weak pci_ext_cfg_avail(void)
{
return 1;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e253881c427..e8518292826 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -158,6 +158,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
}
extern struct device_attribute pci_dev_attrs[];
extern struct device_attribute pcibus_dev_attrs[];
+extern struct device_type pci_dev_type;
extern struct bus_attribute pci_bus_attrs[];
@@ -229,13 +230,14 @@ struct pci_sriov {
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
- u16 total; /* total VFs associated with the PF */
- u16 initial; /* initial VFs associated with the PF */
- u16 nr_virtfn; /* number of VFs available */
+ u16 total_VFs; /* total VFs associated with the PF */
+ u16 initial_VFs; /* initial VFs associated with the PF */
+ u16 num_VFs; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
+ u16 driver_max_VFs; /* max num VFs driver supports */
struct pci_dev *dev; /* lowest numbered PF */
struct pci_dev *self; /* this PF */
struct mutex lock; /* lock for VF bus */
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 94a7598eb26..22f840f4dda 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -87,6 +87,9 @@ struct aer_broadcast_data {
static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
+ if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
+ return PCI_ERS_RESULT_NO_AER_DRIVER;
+
if (new == PCI_ERS_RESULT_NONE)
return orig;
@@ -97,7 +100,7 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
break;
case PCI_ERS_RESULT_DISCONNECT:
if (new == PCI_ERS_RESULT_NEED_RESET)
- orig = new;
+ orig = PCI_ERS_RESULT_NEED_RESET;
break;
default:
break;
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index af4e31cd3a3..421bbc5fee3 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -232,13 +232,27 @@ static int report_error_detected(struct pci_dev *dev, void *data)
dev->driver ?
"no AER-aware driver" : "no driver");
}
- goto out;
+
+ /*
+ * If there's any device in the subtree that does not
+ * have an error_detected callback, returning
+ * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
+ * the subsequent mmio_enabled/slot_reset/resume
+ * callbacks of "any" device in the subtree. All the
+ * devices in the subtree are left in the error state
+ * without recovery.
+ */
+
+ if (!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
+ vote = PCI_ERS_RESULT_NO_AER_DRIVER;
+ else
+ vote = PCI_ERS_RESULT_NONE;
+ } else {
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->error_detected(dev, result_data->state);
}
- err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
-out:
device_unlock(&dev->dev);
return 0;
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 213753b283a..b52630b8ead 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -242,8 +242,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
return;
/* Training failed. Restore common clock configurations */
- dev_printk(KERN_ERR, &parent->dev,
- "ASPM: Could not configure common clock\n");
+ dev_err(&parent->dev, "ASPM: Could not configure common clock\n");
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_capability_write_word(child, PCI_EXP_LNKCTL,
child_reg[PCI_FUNC(child->devfn)]);
@@ -427,7 +426,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
- pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 0x3, val);
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, val);
}
static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
@@ -442,12 +442,12 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
if (state & ASPM_STATE_L0S_UP)
- dwstream |= PCIE_LINK_STATE_L0S;
+ dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
if (state & ASPM_STATE_L0S_DW)
- upstream |= PCIE_LINK_STATE_L0S;
+ upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
if (state & ASPM_STATE_L1) {
- upstream |= PCIE_LINK_STATE_L1;
- dwstream |= PCIE_LINK_STATE_L1;
+ upstream |= PCI_EXP_LNKCTL_ASPM_L1;
+ dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
/*
* Spec 2.0 suggests all functions should be configured the
@@ -507,9 +507,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
*/
pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
- dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
- " on pre-1.1 PCIe device. You can enable it"
- " with 'pcie_aspm=force'\n");
+ dev_info(&child->dev, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
return -EINVAL;
}
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index ed129b41462..b42133afca9 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -120,8 +120,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
* the value in this field indicates which MSI-X Table entry is
* used to generate the interrupt message."
*/
- pos = pci_pcie_cap(dev);
- pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+ pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
if (entry >= nr_entries)
goto Error;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index d4824cb78b4..08c243ab034 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -134,10 +134,28 @@ static int pcie_port_runtime_resume(struct device *dev)
return 0;
}
+static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
+{
+ bool *pme_poll = data;
+
+ if (pdev->pme_poll)
+ *pme_poll = true;
+ return 0;
+}
+
static int pcie_port_runtime_idle(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool pme_poll = false;
+
+ /*
+ * If any subordinate device needs pme poll, we should keep
+ * the port in D0, because we need port in D0 to poll it.
+ */
+ pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
/* Delay for a short while to prevent too frequent suspend/resume */
- pm_schedule_suspend(dev, 10);
+ if (!pme_poll)
+ pm_schedule_suspend(dev, 10);
return -EBUSY;
}
#else
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3683f6094e3..6186f03d84f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -521,7 +521,7 @@ static unsigned char pcie_link_speed[] = {
void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
{
- bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
+ bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
}
EXPORT_SYMBOL_GPL(pcie_update_link_speed);
@@ -579,14 +579,16 @@ static void pci_set_bus_speed(struct pci_bus *bus)
if (pos) {
u16 status;
enum pci_bus_speed max;
- pci_read_config_word(bridge, pos + 2, &status);
- if (status & 0x8000) {
+ pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
+ &status);
+
+ if (status & PCI_X_SSTATUS_533MHZ) {
max = PCI_SPEED_133MHz_PCIX_533;
- } else if (status & 0x4000) {
+ } else if (status & PCI_X_SSTATUS_266MHZ) {
max = PCI_SPEED_133MHz_PCIX_266;
- } else if (status & 0x0002) {
- if (((status >> 12) & 0x3) == 2) {
+ } else if (status & PCI_X_SSTATUS_133MHZ) {
+ if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
max = PCI_SPEED_133MHz_PCIX_ECC;
} else {
max = PCI_SPEED_133MHz_PCIX;
@@ -596,7 +598,8 @@ static void pci_set_bus_speed(struct pci_bus *bus)
}
bus->max_bus_speed = max;
- bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
+ bus->cur_bus_speed = pcix_bus_speed[
+ (status & PCI_X_SSTATUS_FREQ) >> 6];
return;
}
@@ -607,7 +610,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
u16 linksta;
pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
- bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
+ bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
pcie_update_link_speed(bus, linksta);
@@ -975,6 +978,7 @@ int pci_setup_device(struct pci_dev *dev)
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
dev->dev.bus = &pci_bus_type;
+ dev->dev.type = &pci_dev_type;
dev->hdr_type = hdr_type & 0x7f;
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
@@ -1889,6 +1893,28 @@ unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
return max;
}
+/**
+ * pci_rescan_bus - scan a PCI bus for devices.
+ * @bus: PCI bus to scan
+ *
+ * Scan a PCI bus and child buses for new devices, adds them,
+ * and enables them.
+ *
+ * Returns the max number of subordinate bus discovered.
+ */
+unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
+{
+ unsigned int max;
+
+ max = pci_scan_child_bus(bus);
+ pci_assign_unassigned_bus_resources(bus);
+ pci_enable_bridges(bus);
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+EXPORT_SYMBOL_GPL(pci_rescan_bus);
+
EXPORT_SYMBOL(pci_add_new_bus);
EXPORT_SYMBOL(pci_scan_slot);
EXPORT_SYMBOL(pci_scan_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 22ad3ee0cf0..0369fb6fc1d 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1790,6 +1790,45 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
quirk_tc86c001_ide);
+/*
+ * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
+ * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
+ * being read correctly if bit 7 of the base address is set.
+ * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
+ * Re-allocate the regions to a 256-byte boundary if necessary.
+ */
+static void quirk_plx_pci9050(struct pci_dev *dev)
+{
+ unsigned int bar;
+
+ /* Fixed in revision 2 (PCI 9052). */
+ if (dev->revision >= 2)
+ return;
+ for (bar = 0; bar <= 1; bar++)
+ if (pci_resource_len(dev, bar) == 0x80 &&
+ (pci_resource_start(dev, bar) & 0x80)) {
+ struct resource *r = &dev->resource[bar];
+ dev_info(&dev->dev,
+ "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
+ bar);
+ r->start = 0;
+ r->end = 0xff;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ quirk_plx_pci9050);
+/*
+ * The following Meilhaus (vendor ID 0x1402) device IDs (amongst others)
+ * may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b,
+ * 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c,
+ * 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b.
+ *
+ * Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq"
+ * driver.
+ */
+DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
+DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
+
static void quirk_netmos(struct pci_dev *dev)
{
unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
@@ -2686,7 +2725,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
if (PCI_FUNC(dev->devfn))
return;
/*
- * RICOH 0xe823 SD/MMC card reader fails to recognize
+ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
* certain types of SD/MMC cards. Lowering the SD base
* clock frequency from 200Mhz to 50Mhz fixes this issue.
*
@@ -2697,7 +2736,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
* 0xf9 - Key register for 0x150
* 0xfc - key register for 0xe1
*/
- if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
+ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
pci_write_config_byte(dev, 0xf9, 0xfc);
pci_write_config_byte(dev, 0x150, 0x10);
pci_write_config_byte(dev, 0xf9, 0x00);
@@ -2724,6 +2764,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 513972f3ed1..7c0fd9252e6 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -111,3 +111,39 @@ void pci_stop_and_remove_bus_device(struct pci_dev *dev)
pci_remove_bus_device(dev);
}
EXPORT_SYMBOL(pci_stop_and_remove_bus_device);
+
+void pci_stop_root_bus(struct pci_bus *bus)
+{
+ struct pci_dev *child, *tmp;
+ struct pci_host_bridge *host_bridge;
+
+ if (!pci_is_root_bus(bus))
+ return;
+
+ host_bridge = to_pci_host_bridge(bus->bridge);
+ list_for_each_entry_safe_reverse(child, tmp,
+ &bus->devices, bus_list)
+ pci_stop_bus_device(child);
+
+ /* stop the host bridge */
+ device_del(&host_bridge->dev);
+}
+
+void pci_remove_root_bus(struct pci_bus *bus)
+{
+ struct pci_dev *child, *tmp;
+ struct pci_host_bridge *host_bridge;
+
+ if (!pci_is_root_bus(bus))
+ return;
+
+ host_bridge = to_pci_host_bridge(bus->bridge);
+ list_for_each_entry_safe(child, tmp,
+ &bus->devices, bus_list)
+ pci_remove_bus_device(child);
+ pci_remove_bus(bus);
+ host_bridge->bus = NULL;
+
+ /* remove the host bridge */
+ put_device(&host_bridge->dev);
+}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 0b3037ab8b9..ab886b7ee32 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -118,11 +118,17 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
void __iomem *rom;
/*
+ * Some devices may provide ROMs via a source other than the BAR
+ */
+ if (pdev->rom && pdev->romlen) {
+ *size = pdev->romlen;
+ return phys_to_virt(pdev->rom);
+ /*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
* memory map if the VGA enable bit of the Bridge Control register is
* set for embedded VGA.
*/
- if (res->flags & IORESOURCE_ROM_SHADOW) {
+ } else if (res->flags & IORESOURCE_ROM_SHADOW) {
/* primary video rom always starts here */
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
@@ -181,7 +187,8 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
return;
- iounmap(rom);
+ if (!pdev->rom || !pdev->romlen)
+ iounmap(rom);
/* Disable again before continuing, leave enabled if pci=rom */
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 1e808ca338f..6d3591d57ea 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1550,25 +1550,12 @@ enable_all:
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
-#ifdef CONFIG_HOTPLUG
-/**
- * pci_rescan_bus - scan a PCI bus for devices.
- * @bus: PCI bus to scan
- *
- * Scan a PCI bus and child buses for new devices, adds them,
- * and enables them.
- *
- * Returns the max number of subordinate bus discovered.
- */
-unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
+void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
{
- unsigned int max;
struct pci_dev *dev;
LIST_HEAD(add_list); /* list of resources that
want additional resources */
- max = pci_scan_child_bus(bus);
-
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list)
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
@@ -1579,11 +1566,4 @@ unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
BUG_ON(!list_empty(&add_list));
-
- pci_enable_bridges(bus);
- pci_bus_add_devices(bus);
-
- return max;
}
-EXPORT_SYMBOL_GPL(pci_rescan_bus);
-#endif
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index db542f4196a..966abc6054d 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1068,13 +1068,16 @@ static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
case XenbusStateInitialising:
case XenbusStateInitWait:
case XenbusStateInitialised:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
pcifront_try_connect(pdev);
break;
+ case XenbusStateClosed:
+ if (xdev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
dev_warn(&xdev->dev, "backend going away!\n");
pcifront_try_disconnect(pdev);
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index ffe74b27d66..40c9c3eecd9 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -22,22 +22,22 @@
#include "pinctrl-mvebu.h"
-#define DOVE_SB_REGS_VIRT_BASE 0xfde00000
-#define DOVE_MPP_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xd0200)
+#define DOVE_SB_REGS_VIRT_BASE IOMEM(0xfde00000)
+#define DOVE_MPP_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0200)
#define DOVE_PMU_MPP_GENERAL_CTRL (DOVE_MPP_VIRT_BASE + 0x10)
#define DOVE_AU0_AC97_SEL BIT(16)
-#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE | 0xe802C)
+#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE + 0xe802C)
#define DOVE_TWSI_ENABLE_OPTION1 BIT(7)
-#define DOVE_GLOBAL_CONFIG_2 (DOVE_SB_REGS_VIRT_BASE | 0xe8030)
+#define DOVE_GLOBAL_CONFIG_2 (DOVE_SB_REGS_VIRT_BASE + 0xe8030)
#define DOVE_TWSI_ENABLE_OPTION2 BIT(20)
#define DOVE_TWSI_ENABLE_OPTION3 BIT(21)
#define DOVE_TWSI_OPTION3_GPIO BIT(22)
-#define DOVE_SSP_CTRL_STATUS_1 (DOVE_SB_REGS_VIRT_BASE | 0xe8034)
+#define DOVE_SSP_CTRL_STATUS_1 (DOVE_SB_REGS_VIRT_BASE + 0xe8034)
#define DOVE_SSP_ON_AU1 BIT(0)
-#define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xe803c)
+#define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xe803c)
#define DOVE_AU1_SPDIFO_GPIO_EN BIT(1)
#define DOVE_NAND_GPIO_EN BIT(0)
-#define DOVE_GPIO_LO_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xd0400)
+#define DOVE_GPIO_LO_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE + 0xd0400)
#define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40)
#define DOVE_SPI_GPIO_SEL BIT(5)
#define DOVE_UART1_GPIO_SEL BIT(4)
@@ -234,6 +234,14 @@ static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+ /*
+ * clear all audio1 related bits before configure
+ */
+ gcfg2 &= ~DOVE_TWSI_OPTION3_GPIO;
+ gmpp &= ~DOVE_AU1_SPDIFO_GPIO_EN;
+ sspc1 &= ~DOVE_SSP_ON_AU1;
+ mpp4 &= ~DOVE_AU1_GPIO_SEL;
+
if (config & BIT(0))
gcfg2 |= DOVE_TWSI_OPTION3_GPIO;
if (config & BIT(1))
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 9a74ef674a0..fa6ce31c94d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -21,340 +21,341 @@
#include "pinctrl-mvebu.h"
-#define V(f6180, f6190, f6192, f6281, f6282) \
+#define V(f6180, f6190, f6192, f6281, f6282, dx4122) \
((f6180 << 0) | (f6190 << 1) | (f6192 << 2) | \
- (f6281 << 3) | (f6282 << 4))
+ (f6281 << 3) | (f6282 << 4) | (dx4122 << 5))
enum kirkwood_variant {
- VARIANT_MV88F6180 = V(1, 0, 0, 0, 0),
- VARIANT_MV88F6190 = V(0, 1, 0, 0, 0),
- VARIANT_MV88F6192 = V(0, 0, 1, 0, 0),
- VARIANT_MV88F6281 = V(0, 0, 0, 1, 0),
- VARIANT_MV88F6282 = V(0, 0, 0, 0, 1),
+ VARIANT_MV88F6180 = V(1, 0, 0, 0, 0, 0),
+ VARIANT_MV88F6190 = V(0, 1, 0, 0, 0, 0),
+ VARIANT_MV88F6192 = V(0, 0, 1, 0, 0, 0),
+ VARIANT_MV88F6281 = V(0, 0, 0, 1, 0, 0),
+ VARIANT_MV88F6282 = V(0, 0, 0, 0, 1, 0),
+ VARIANT_MV98DX4122 = V(0, 0, 0, 0, 0, 1),
};
static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
MPP_MODE(0,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io2", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io2", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1, 1))),
MPP_MODE(1,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io3", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "spi", "mosi", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io3", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "mosi", V(1, 1, 1, 1, 1, 1))),
MPP_MODE(2,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io4", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io4", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1, 1))),
MPP_MODE(3,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io5", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io5", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1, 1))),
MPP_MODE(4,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io6", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "uart0", "rxd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io6", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "rxd", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0, 0))),
MPP_MODE(5,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io7", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "uart0", "txd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "ptp", "trig", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io7", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "txd", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "ptp", "trig", V(1, 1, 1, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(6,
- MPP_VAR_FUNCTION(0x0, "sysrst", "out", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "spi", "mosi", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "ptp", "trig", V(1, 1, 1, 1, 0))),
+ MPP_VAR_FUNCTION(0x0, "sysrst", "out", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "spi", "mosi", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "ptp", "trig", V(1, 1, 1, 1, 0, 0))),
MPP_MODE(7,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ptp", "trig", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "ptp", "trig", V(1, 1, 1, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(8,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "twsi0", "sda", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "uart1", "rts", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "mii-1", "rxerr", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xc, "ptp", "clk", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "twsi0", "sda", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rts", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "mii-1", "rxerr", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xc, "ptp", "clk", V(1, 1, 1, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1, 0))),
MPP_MODE(9,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "twsi0", "sck", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "uart1", "cts", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xc, "ptp", "evreq", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "twsi0", "sck", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "cts", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xc, "ptp", "evreq", V(1, 1, 1, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1, 0))),
MPP_MODE(10,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0X3, "uart0", "txd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xc, "ptp", "trig", V(1, 1, 1, 1, 0))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0X3, "uart0", "txd", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xc, "ptp", "trig", V(1, 1, 1, 1, 0, 0))),
MPP_MODE(11,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "uart0", "rxd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "ptp-1", "evreq", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0xc, "ptp-2", "trig", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart0", "rxd", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "ptp-1", "evreq", V(1, 1, 1, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0xc, "ptp-2", "trig", V(1, 1, 1, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0))),
MPP_MODE(12,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 0, 1)),
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0)),
- MPP_VAR_FUNCTION(0x1, "sdio", "clk", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xa, "audio", "spdifo", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xb, "spi", "mosi", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xd, "twsi1", "sda", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "clk", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xa, "audio", "spdifo", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "spi", "mosi", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "twsi1", "sda", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(13,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "sdio", "cmd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xa, "audio", "rmclk", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "cmd", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0xa, "audio", "rmclk", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(14,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "sdio", "d0", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "sata1", "prsnt", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xa, "audio", "spdifi", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xb, "audio-1", "sdi", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d0", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x4, "sata1", "prsnt", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xa, "audio", "spdifi", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "audio-1", "sdi", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1, 0))),
MPP_MODE(15,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "sdio", "d1", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "sata0", "act", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "spi", "cs", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d1", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "spi", "cs", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(16,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "sdio", "d2", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "sata1", "act", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "extclk", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d2", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "sata1", "act", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "extclk", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1, 0))),
MPP_MODE(17,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "sdio", "d3", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "sata0", "prsnt", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xa, "sata1", "act", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xd, "twsi1", "sck", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "sdio", "d3", V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "sata0", "prsnt", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xa, "sata1", "act", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xd, "twsi1", "sck", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(18,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io0", V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "pex", "clkreq", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io0", V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "pex", "clkreq", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(19,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "nand", "io1", V(1, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "nand", "io1", V(1, 1, 1, 1, 1, 1))),
MPP_MODE(20,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txd0", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d0", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 0, 0, 0, 0))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd0", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d0", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 0, 0, 0, 0, 0))),
MPP_MODE(21,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txd1", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d1", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd1", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d1", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(22,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txd2", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d2", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd2", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d2", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(23,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txd3", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d3", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txd3", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d3", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(24,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "rxd0", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d4", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd0", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d4", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(25,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "rxd1", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d5", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd1", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d5", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(26,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "rxd2", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d6", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd2", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d6", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(27,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "rxd3", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d7", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxd3", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d7", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(28,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "col", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d8", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "col", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d8", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(29,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txclk", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 0, 0)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d9", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txclk", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d9", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(30,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "rxctl", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d10", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxctl", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d10", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(31,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "rxclk", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d11", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxclk", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d11", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(32,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txclko", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d12", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txclko", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d12", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(33,
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txctl", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d13", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txctl", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d13", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(34,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "txen", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d14", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "txen", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d14", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(35,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x3, "ge1", "rxerr", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d15", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 1, 1, 1, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x3, "ge1", "rxerr", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d15", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 1, 1, 1, 1, 0))),
MPP_MODE(36,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "twsi1", "sda", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "twsi1", "sda", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(37,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "twsi1", "sck", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "twsi1", "sck", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(38,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d18", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d18", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(39,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d19", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d19", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(40,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d20", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d20", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(41,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d21", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d21", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(42,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d22", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d22", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(43,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d23", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d23", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(44,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "clk", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "clk", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(45,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "e", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "e", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(46,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(47,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(48,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d16", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d16", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(49,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0)),
- MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 0)),
- MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 0, 1, 1)),
- MPP_VAR_FUNCTION(0x5, "ptp", "clk", V(0, 0, 0, 1, 0)),
- MPP_VAR_FUNCTION(0xa, "pex", "clkreq", V(0, 0, 0, 0, 1)),
- MPP_VAR_FUNCTION(0xb, "lcd", "d17", V(0, 0, 0, 0, 1))),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x5, "ptp", "clk", V(0, 0, 0, 1, 0, 0)),
+ MPP_VAR_FUNCTION(0xa, "pex", "clkreq", V(0, 0, 0, 0, 1, 0)),
+ MPP_VAR_FUNCTION(0xb, "lcd", "d17", V(0, 0, 0, 0, 1, 0))),
};
static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
@@ -433,12 +434,23 @@ static struct mvebu_pinctrl_soc_info mv88f6282_info = {
.ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges),
};
+static struct mvebu_pinctrl_soc_info mv98dx4122_info = {
+ .variant = VARIANT_MV98DX4122,
+ .controls = mv88f628x_mpp_controls,
+ .ncontrols = ARRAY_SIZE(mv88f628x_mpp_controls),
+ .modes = mv88f6xxx_mpp_modes,
+ .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes),
+ .gpioranges = mv88f628x_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges),
+};
+
static struct of_device_id kirkwood_pinctrl_of_match[] __devinitdata = {
{ .compatible = "marvell,88f6180-pinctrl", .data = &mv88f6180_info },
{ .compatible = "marvell,88f6190-pinctrl", .data = &mv88f6190_info },
{ .compatible = "marvell,88f6192-pinctrl", .data = &mv88f6192_info },
{ .compatible = "marvell,88f6281-pinctrl", .data = &mv88f6281_info },
{ .compatible = "marvell,88f6282-pinctrl", .data = &mv88f6282_info },
+ { .compatible = "marvell,98dx4122-pinctrl", .data = &mv98dx4122_info },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c
index b8635f634e9..07db89528dc 100644
--- a/drivers/pinctrl/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/pinctrl-exynos5440.c
@@ -117,7 +117,7 @@ struct exynos5440_pinctrl_priv_data {
};
/* list of all possible config options supported */
-struct pin_config {
+static struct pin_config {
char *prop_cfg;
unsigned int cfg_type;
} pcfgs[] = {
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 8ef3e85cb01..ef66f98e920 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -31,9 +31,8 @@
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/pinctrl-nomadik.h>
-
#include <asm/mach/irq.h>
-
+#include <mach/irqs.h>
#include "pinctrl-nomadik.h"
/*
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 8f31b656c4e..864fed822f9 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -37,7 +37,7 @@
#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
/* list of all possible config options supported */
-struct pin_config {
+static struct pin_config {
char *prop_cfg;
unsigned int cfg_type;
} pcfgs[] = {
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h
index 5addfd16e3c..e2d4e67f7e8 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/pinctrl-samsung.h
@@ -104,7 +104,7 @@ struct samsung_pinctrl_drv_data;
/**
* struct samsung_pin_bank: represent a controller pin-bank.
- * @reg_offset: starting offset of the pin-bank registers.
+ * @pctl_offset: starting offset of the pin-bank registers.
* @pin_base: starting pin number of the bank.
* @nr_pins: number of pins included in this bank.
* @func_width: width of the function selector bit field.
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 6b0ebdeae91..be790402e0f 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -32,7 +32,7 @@
#define ASUS_NB_WMI_FILE "asus-nb-wmi"
-MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>");
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
MODULE_DESCRIPTION("Asus Notebooks WMI Hotkey Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index c0e9ff489b2..f80ae4d10f6 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -51,7 +51,7 @@
#include "asus-wmi.h"
-MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>, "
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>, "
"Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Asus Generic WMI Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 5838332ea5b..60cb76a5b51 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -39,7 +39,7 @@
#define EEEPC_WMI_FILE "eeepc-wmi"
-MODULE_AUTHOR("Corentin Chary <corentincj@iksaif.net>");
+MODULE_AUTHOR("Corentin Chary <corentin.chary@gmail.com>");
MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 49a89397231..9f45e2f77d5 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -245,6 +245,13 @@ config BATTERY_INTEL_MID
Say Y here to enable the battery driver on Intel MID
platforms.
+config BATTERY_RX51
+ tristate "Nokia RX-51 (N900) battery driver"
+ depends on TWL4030_MADC
+ help
+ Say Y here to enable support for battery information on Nokia
+ RX-51, also known as N900 tablet.
+
config CHARGER_ISP1704
tristate "ISP1704 USB Charger Detection"
depends on USB_OTG_UTILS
@@ -315,6 +322,16 @@ config CHARGER_MAX8998
Say Y to enable support for the battery charger control sysfs and
platform data of MAX8998/LP3974 PMICs.
+config CHARGER_BQ2415X
+ tristate "TI BQ2415x battery charger driver"
+ depends on I2C
+ help
+ Say Y to enable support for the TI BQ2415x battery charger
+ PMICs.
+
+ You'll need this driver to charge batteries on e.g. Nokia
+ RX-51/N900.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
@@ -329,12 +346,8 @@ config AB8500_BM
help
Say Y to include support for AB8500 battery management.
-config AB8500_BATTERY_THERM_ON_BATCTRL
- bool "Thermistor connected on BATCTRL ADC"
- depends on AB8500_BM
- help
- Say Y to enable battery temperature measurements using
- thermistor connected on BATCTRL ADC.
+source "drivers/power/reset/Kconfig"
+
endif # POWER_SUPPLY
source "drivers/power/avs/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b949cf85590..22c8913382c 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -37,7 +37,8 @@ obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
-obj-$(CONFIG_AB8500_BM) += ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o
+obj-$(CONFIG_BATTERY_RX51) += rx51_battery.o
+obj-$(CONFIG_AB8500_BM) += ab8500_bmdata.o ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
@@ -47,5 +48,7 @@ obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
+obj-$(CONFIG_POWER_RESET) += reset/
diff --git a/drivers/power/ab8500_bmdata.c b/drivers/power/ab8500_bmdata.c
new file mode 100644
index 00000000000..03cc528425c
--- /dev/null
+++ b/drivers/power/ab8500_bmdata.c
@@ -0,0 +1,521 @@
+#include <linux/export.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+
+/*
+ * These are the defined batteries that uses a NTC and ID resistor placed
+ * inside of the battery pack.
+ * Note that the res_to_temp table must be strictly sorted by falling resistance
+ * values to work.
+ */
+static struct abx500_res_to_temp temp_tbl_A_thermistor[] = {
+ {-5, 53407},
+ { 0, 48594},
+ { 5, 43804},
+ {10, 39188},
+ {15, 34870},
+ {20, 30933},
+ {25, 27422},
+ {30, 24347},
+ {35, 21694},
+ {40, 19431},
+ {45, 17517},
+ {50, 15908},
+ {55, 14561},
+ {60, 13437},
+ {65, 12500},
+};
+
+static struct abx500_res_to_temp temp_tbl_B_thermistor[] = {
+ {-5, 200000},
+ { 0, 159024},
+ { 5, 151921},
+ {10, 144300},
+ {15, 136424},
+ {20, 128565},
+ {25, 120978},
+ {30, 113875},
+ {35, 107397},
+ {40, 101629},
+ {45, 96592},
+ {50, 92253},
+ {55, 88569},
+ {60, 85461},
+ {65, 82869},
+};
+
+static struct abx500_v_to_cap cap_tbl_A_thermistor[] = {
+ {4171, 100},
+ {4114, 95},
+ {4009, 83},
+ {3947, 74},
+ {3907, 67},
+ {3863, 59},
+ {3830, 56},
+ {3813, 53},
+ {3791, 46},
+ {3771, 33},
+ {3754, 25},
+ {3735, 20},
+ {3717, 17},
+ {3681, 13},
+ {3664, 8},
+ {3651, 6},
+ {3635, 5},
+ {3560, 3},
+ {3408, 1},
+ {3247, 0},
+};
+
+static struct abx500_v_to_cap cap_tbl_B_thermistor[] = {
+ {4161, 100},
+ {4124, 98},
+ {4044, 90},
+ {4003, 85},
+ {3966, 80},
+ {3933, 75},
+ {3888, 67},
+ {3849, 60},
+ {3813, 55},
+ {3787, 47},
+ {3772, 30},
+ {3751, 25},
+ {3718, 20},
+ {3681, 16},
+ {3660, 14},
+ {3589, 10},
+ {3546, 7},
+ {3495, 4},
+ {3404, 2},
+ {3250, 0},
+};
+
+static struct abx500_v_to_cap cap_tbl[] = {
+ {4186, 100},
+ {4163, 99},
+ {4114, 95},
+ {4068, 90},
+ {3990, 80},
+ {3926, 70},
+ {3898, 65},
+ {3866, 60},
+ {3833, 55},
+ {3812, 50},
+ {3787, 40},
+ {3768, 30},
+ {3747, 25},
+ {3730, 20},
+ {3705, 15},
+ {3699, 14},
+ {3684, 12},
+ {3672, 9},
+ {3657, 7},
+ {3638, 6},
+ {3556, 4},
+ {3424, 2},
+ {3317, 1},
+ {3094, 0},
+};
+
+/*
+ * Note that the res_to_temp table must be strictly sorted by falling
+ * resistance values to work.
+ */
+static struct abx500_res_to_temp temp_tbl[] = {
+ {-5, 214834},
+ { 0, 162943},
+ { 5, 124820},
+ {10, 96520},
+ {15, 75306},
+ {20, 59254},
+ {25, 47000},
+ {30, 37566},
+ {35, 30245},
+ {40, 24520},
+ {45, 20010},
+ {50, 16432},
+ {55, 13576},
+ {60, 11280},
+ {65, 9425},
+};
+
+/*
+ * Note that the batres_vs_temp table must be strictly sorted by falling
+ * temperature values to work.
+ */
+static struct batres_vs_temp temp_to_batres_tbl_thermistor[] = {
+ { 40, 120},
+ { 30, 135},
+ { 20, 165},
+ { 10, 230},
+ { 00, 325},
+ {-10, 445},
+ {-20, 595},
+};
+
+/*
+ * Note that the batres_vs_temp table must be strictly sorted by falling
+ * temperature values to work.
+ */
+static struct batres_vs_temp temp_to_batres_tbl_ext_thermistor[] = {
+ { 60, 300},
+ { 30, 300},
+ { 20, 300},
+ { 10, 300},
+ { 00, 300},
+ {-10, 300},
+ {-20, 300},
+};
+
+/* battery resistance table for LI ION 9100 battery */
+static struct batres_vs_temp temp_to_batres_tbl_9100[] = {
+ { 60, 180},
+ { 30, 180},
+ { 20, 180},
+ { 10, 180},
+ { 00, 180},
+ {-10, 180},
+ {-20, 180},
+};
+
+static struct abx500_battery_type bat_type_thermistor[] = {
+[BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+ .battery_resistance = 300,
+ .charge_full_design = 612,
+ .nominal_voltage = 3700,
+ .termination_vol = 4050,
+ .termination_curr = 200,
+ .recharge_vol = 3990,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4000,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+},
+{
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 53407,
+ .resis_low = 12500,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A_thermistor),
+ .r_to_t_tbl = temp_tbl_A_thermistor,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A_thermistor),
+ .v_to_cap_tbl = cap_tbl_A_thermistor,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+
+},
+{
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 200000,
+ .resis_low = 82869,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3600,
+ .termination_vol = 4150,
+ .termination_curr = 80,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B_thermistor),
+ .r_to_t_tbl = temp_tbl_B_thermistor,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B_thermistor),
+ .v_to_cap_tbl = cap_tbl_B_thermistor,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+},
+};
+
+static struct abx500_battery_type bat_type_ext_thermistor[] = {
+[BATTERY_UNKNOWN] = {
+ /* First element always represent the UNKNOWN battery */
+ .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+ .resis_high = 0,
+ .resis_low = 0,
+ .battery_resistance = 300,
+ .charge_full_design = 612,
+ .nominal_voltage = 3700,
+ .termination_vol = 4050,
+ .termination_curr = 200,
+ .recharge_vol = 3990,
+ .normal_cur_lvl = 400,
+ .normal_vol_lvl = 4100,
+ .maint_a_cur_lvl = 400,
+ .maint_a_vol_lvl = 4050,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 400,
+ .maint_b_vol_lvl = 4000,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+},
+/*
+ * These are the batteries that doesn't have an internal NTC resistor to measure
+ * its temperature. The temperature in this case is measure with a NTC placed
+ * near the battery but on the PCB.
+ */
+{
+ .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .resis_high = 76000,
+ .resis_low = 53000,
+ .battery_resistance = 300,
+ .charge_full_design = 900,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+},
+{
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 30000,
+ .resis_low = 10000,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+},
+{
+ .name = POWER_SUPPLY_TECHNOLOGY_LION,
+ .resis_high = 95000,
+ .resis_low = 76001,
+ .battery_resistance = 300,
+ .charge_full_design = 950,
+ .nominal_voltage = 3700,
+ .termination_vol = 4150,
+ .termination_curr = 100,
+ .recharge_vol = 4130,
+ .normal_cur_lvl = 700,
+ .normal_vol_lvl = 4200,
+ .maint_a_cur_lvl = 600,
+ .maint_a_vol_lvl = 4150,
+ .maint_a_chg_timer_h = 60,
+ .maint_b_cur_lvl = 600,
+ .maint_b_vol_lvl = 4100,
+ .maint_b_chg_timer_h = 200,
+ .low_high_cur_lvl = 300,
+ .low_high_vol_lvl = 4000,
+ .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+ .r_to_t_tbl = temp_tbl,
+ .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+ .v_to_cap_tbl = cap_tbl,
+ .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+ .batres_tbl = temp_to_batres_tbl_thermistor,
+},
+};
+
+static const struct abx500_bm_capacity_levels cap_levels = {
+ .critical = 2,
+ .low = 10,
+ .normal = 70,
+ .high = 95,
+ .full = 100,
+};
+
+static const struct abx500_fg_parameters fg = {
+ .recovery_sleep_timer = 10,
+ .recovery_total_time = 100,
+ .init_timer = 1,
+ .init_discard_time = 5,
+ .init_total_time = 40,
+ .high_curr_time = 60,
+ .accu_charging = 30,
+ .accu_high_curr = 30,
+ .high_curr_threshold = 50,
+ .lowbat_threshold = 3100,
+ .battok_falling_th_sel0 = 2860,
+ .battok_raising_th_sel1 = 2860,
+ .user_cap_limit = 15,
+ .maint_thres = 97,
+};
+
+static const struct abx500_maxim_parameters maxi_params = {
+ .ena_maxi = true,
+ .chg_curr = 910,
+ .wait_cycles = 10,
+ .charger_curr_step = 100,
+};
+
+static const struct abx500_bm_charger_parameters chg = {
+ .usb_volt_max = 5500,
+ .usb_curr_max = 1500,
+ .ac_volt_max = 7500,
+ .ac_curr_max = 1500,
+};
+
+struct abx500_bm_data ab8500_bm_data = {
+ .temp_under = 3,
+ .temp_low = 8,
+ .temp_high = 43,
+ .temp_over = 48,
+ .main_safety_tmr_h = 4,
+ .temp_interval_chg = 20,
+ .temp_interval_nochg = 120,
+ .usb_safety_tmr_h = 4,
+ .bkup_bat_v = BUP_VCH_SEL_2P6V,
+ .bkup_bat_i = BUP_ICH_SEL_150UA,
+ .no_maintenance = false,
+ .adc_therm = ABx500_ADC_THERM_BATCTRL,
+ .chg_unknown_bat = false,
+ .enable_overshoot = false,
+ .fg_res = 100,
+ .cap_levels = &cap_levels,
+ .bat_type = bat_type_thermistor,
+ .n_btypes = 3,
+ .batt_id = 0,
+ .interval_charging = 5,
+ .interval_not_charging = 120,
+ .temp_hysteresis = 3,
+ .gnd_lift_resistance = 34,
+ .maxi = &maxi_params,
+ .chg_params = &chg,
+ .fg_params = &fg,
+};
+
+int __devinit
+bmdevs_of_probe(struct device *dev,
+ struct device_node *np,
+ struct abx500_bm_data **battery)
+{
+ struct abx500_battery_type *btype;
+ struct device_node *np_bat_supply;
+ struct abx500_bm_data *bat;
+ const char *btech;
+ char bat_tech[8];
+ int i, thermistor;
+
+ *battery = &ab8500_bm_data;
+
+ /* get phandle to 'battery-info' node */
+ np_bat_supply = of_parse_phandle(np, "battery", 0);
+ if (!np_bat_supply) {
+ dev_err(dev, "missing property battery\n");
+ return -EINVAL;
+ }
+ if (of_property_read_bool(np_bat_supply,
+ "thermistor-on-batctrl"))
+ thermistor = NTC_INTERNAL;
+ else
+ thermistor = NTC_EXTERNAL;
+
+ bat = *battery;
+ if (thermistor == NTC_EXTERNAL) {
+ bat->n_btypes = 4;
+ bat->bat_type = bat_type_ext_thermistor;
+ bat->adc_therm = ABx500_ADC_THERM_BATTEMP;
+ }
+ btech = of_get_property(np_bat_supply,
+ "stericsson,battery-type", NULL);
+ if (!btech) {
+ dev_warn(dev, "missing property battery-name/type\n");
+ strcpy(bat_tech, "UNKNOWN");
+ } else {
+ strcpy(bat_tech, btech);
+ }
+
+ if (strncmp(bat_tech, "LION", 4) == 0) {
+ bat->no_maintenance = true;
+ bat->chg_unknown_bat = true;
+ bat->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600;
+ bat->bat_type[BATTERY_UNKNOWN].termination_vol = 4150;
+ bat->bat_type[BATTERY_UNKNOWN].recharge_vol = 4130;
+ bat->bat_type[BATTERY_UNKNOWN].normal_cur_lvl = 520;
+ bat->bat_type[BATTERY_UNKNOWN].normal_vol_lvl = 4200;
+ }
+ /* select the battery resolution table */
+ for (i = 0; i < bat->n_btypes; ++i) {
+ btype = (bat->bat_type + i);
+ if (thermistor == NTC_EXTERNAL) {
+ btype->batres_tbl =
+ temp_to_batres_tbl_ext_thermistor;
+ } else if (strncmp(bat_tech, "LION", 4) == 0) {
+ btype->batres_tbl =
+ temp_to_batres_tbl_9100;
+ } else {
+ btype->batres_tbl =
+ temp_to_batres_tbl_thermistor;
+ }
+ }
+ of_node_put(np_bat_supply);
+ return 0;
+}
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index 989b09950af..20e2a7d3ef4 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -20,11 +20,13 @@
#include <linux/power_supply.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
-#include <linux/mfd/abx500/ab8500.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/abx500/ab8500-gpadc.h>
-#include <linux/jiffies.h>
#define VTVOUT_V 1800
@@ -76,7 +78,6 @@ struct ab8500_btemp_ranges {
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
* @fg: Pointer to the struct fg
- * @pdata: Pointer to the abx500_btemp platform data
* @bat: Pointer to the abx500_bm platform data
* @btemp_psy: Structure for BTEMP specific battery properties
* @events: Structure for information about events triggered
@@ -93,7 +94,6 @@ struct ab8500_btemp {
struct ab8500 *parent;
struct ab8500_gpadc *gpadc;
struct ab8500_fg *fg;
- struct abx500_btemp_platform_data *pdata;
struct abx500_bm_data *bat;
struct power_supply btemp_psy;
struct ab8500_btemp_events events;
@@ -955,56 +955,57 @@ static int ab8500_btemp_remove(struct platform_device *pdev)
flush_scheduled_work();
power_supply_unregister(&di->btemp_psy);
platform_set_drvdata(pdev, NULL);
- kfree(di);
return 0;
}
+static char *supply_interface[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+};
+
static int ab8500_btemp_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct ab8500_btemp *di;
int irq, i, ret = 0;
u8 val;
- struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
- struct ab8500_btemp *di;
-
- if (!plat_data) {
- dev_err(&pdev->dev, "No platform data\n");
- return -EINVAL;
- }
- di = kzalloc(sizeof(*di), GFP_KERNEL);
- if (!di)
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_btemp\n", __func__);
return -ENOMEM;
+ }
+ di->bat = pdev->mfd_cell->platform_data;
+ if (!di->bat) {
+ if (np) {
+ ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get battery information\n");
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "missing dt node for ab8500_btemp\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&pdev->dev, "falling back to legacy platform data\n");
+ }
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- /* get btemp specific platform data */
- di->pdata = plat_data->btemp;
- if (!di->pdata) {
- dev_err(di->dev, "no btemp platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- /* get battery specific platform data */
- di->bat = plat_data->battery;
- if (!di->bat) {
- dev_err(di->dev, "no battery platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
/* BTEMP supply */
di->btemp_psy.name = "ab8500_btemp";
di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
di->btemp_psy.properties = ab8500_btemp_props;
di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
di->btemp_psy.get_property = ab8500_btemp_get_property;
- di->btemp_psy.supplied_to = di->pdata->supplied_to;
- di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.supplied_to = supply_interface;
+ di->btemp_psy.num_supplicants = ARRAY_SIZE(supply_interface);
di->btemp_psy.external_power_changed =
ab8500_btemp_external_power_changed;
@@ -1014,8 +1015,7 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
create_singlethread_workqueue("ab8500_btemp_wq");
if (di->btemp_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- ret = -ENOMEM;
- goto free_device_info;
+ return -ENOMEM;
}
/* Init work for measuring temperature periodically */
@@ -1093,12 +1093,14 @@ free_irq:
}
free_btemp_wq:
destroy_workqueue(di->btemp_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_btemp_match[] = {
+ { .compatible = "stericsson,ab8500-btemp", },
+ { },
+};
+
static struct platform_driver ab8500_btemp_driver = {
.probe = ab8500_btemp_probe,
.remove = ab8500_btemp_remove,
@@ -1107,6 +1109,7 @@ static struct platform_driver ab8500_btemp_driver = {
.driver = {
.name = "ab8500-btemp",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_btemp_match,
},
};
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index 7ecb8abe20b..3be9c0ee3fc 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -23,6 +23,8 @@
#include <linux/err.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
@@ -181,9 +183,9 @@ struct ab8500_charger_usb_state {
* @vbat Battery voltage
* @old_vbat Previously measured battery voltage
* @autopower Indicate if we should have automatic pwron after pwrloss
+ * @autopower_cfg platform specific power config support for "pwron after pwrloss"
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
- * @pdata: Pointer to the abx500_charger platform data
* @bat: Pointer to the abx500_bm platform data
* @flags: Structure for information about events triggered
* @usb_state: Structure for usb stack information
@@ -218,9 +220,9 @@ struct ab8500_charger {
int vbat;
int old_vbat;
bool autopower;
+ bool autopower_cfg;
struct ab8500 *parent;
struct ab8500_gpadc *gpadc;
- struct abx500_charger_platform_data *pdata;
struct abx500_bm_data *bat;
struct ab8500_charger_event_flags flags;
struct ab8500_charger_usb_state usb_state;
@@ -322,7 +324,7 @@ static void ab8500_power_loss_handling(struct ab8500_charger *di)
static void ab8500_power_supply_changed(struct ab8500_charger *di,
struct power_supply *psy)
{
- if (di->pdata->autopower_cfg) {
+ if (di->autopower_cfg) {
if (!di->usb.charger_connected &&
!di->ac.charger_connected &&
di->autopower) {
@@ -2526,25 +2528,45 @@ static int ab8500_charger_remove(struct platform_device *pdev)
power_supply_unregister(&di->usb_chg.psy);
power_supply_unregister(&di->ac_chg.psy);
platform_set_drvdata(pdev, NULL);
- kfree(di);
return 0;
}
+static char *supply_interface[] = {
+ "ab8500_chargalg",
+ "ab8500_fg",
+ "ab8500_btemp",
+};
+
static int ab8500_charger_probe(struct platform_device *pdev)
{
- int irq, i, charger_status, ret = 0;
- struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
struct ab8500_charger *di;
+ int irq, i, charger_status, ret = 0;
- if (!plat_data) {
- dev_err(&pdev->dev, "No platform data\n");
- return -EINVAL;
- }
-
- di = kzalloc(sizeof(*di), GFP_KERNEL);
- if (!di)
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_charger\n", __func__);
return -ENOMEM;
+ }
+ di->bat = pdev->mfd_cell->platform_data;
+ if (!di->bat) {
+ if (np) {
+ ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get battery information\n");
+ return ret;
+ }
+ di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
+ } else {
+ dev_err(&pdev->dev, "missing dt node for ab8500_charger\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&pdev->dev, "falling back to legacy platform data\n");
+ di->autopower_cfg = false;
+ }
/* get parent data */
di->dev = &pdev->dev;
@@ -2554,22 +2576,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* initialize lock */
spin_lock_init(&di->usb_state.usb_lock);
- /* get charger specific platform data */
- di->pdata = plat_data->charger;
- if (!di->pdata) {
- dev_err(di->dev, "no charger platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- /* get battery specific platform data */
- di->bat = plat_data->battery;
- if (!di->bat) {
- dev_err(di->dev, "no battery platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
di->autopower = false;
/* AC supply */
@@ -2579,8 +2585,8 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->ac_chg.psy.properties = ab8500_charger_ac_props;
di->ac_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_ac_props);
di->ac_chg.psy.get_property = ab8500_charger_ac_get_property;
- di->ac_chg.psy.supplied_to = di->pdata->supplied_to;
- di->ac_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ di->ac_chg.psy.supplied_to = supply_interface;
+ di->ac_chg.psy.num_supplicants = ARRAY_SIZE(supply_interface),
/* ux500_charger sub-class */
di->ac_chg.ops.enable = &ab8500_charger_ac_en;
di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
@@ -2597,8 +2603,8 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_chg.psy.properties = ab8500_charger_usb_props;
di->usb_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_usb_props);
di->usb_chg.psy.get_property = ab8500_charger_usb_get_property;
- di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
- di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ di->usb_chg.psy.supplied_to = supply_interface;
+ di->usb_chg.psy.num_supplicants = ARRAY_SIZE(supply_interface),
/* ux500_charger sub-class */
di->usb_chg.ops.enable = &ab8500_charger_usb_en;
di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
@@ -2614,8 +2620,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
create_singlethread_workqueue("ab8500_charger_wq");
if (di->charger_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- ret = -ENOMEM;
- goto free_device_info;
+ return -ENOMEM;
}
/* Init work for HW failure check */
@@ -2757,12 +2762,14 @@ free_regulator:
regulator_put(di->regu);
free_charger_wq:
destroy_workqueue(di->charger_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_charger_match[] = {
+ { .compatible = "stericsson,ab8500-charger", },
+ { },
+};
+
static struct platform_driver ab8500_charger_driver = {
.probe = ab8500_charger_probe,
.remove = ab8500_charger_remove,
@@ -2771,6 +2778,7 @@ static struct platform_driver ab8500_charger_driver = {
.driver = {
.name = "ab8500-charger",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_charger_match,
},
};
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 331dc43ded4..b3bf178c346 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -22,15 +22,16 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/kobject.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500.h>
#include <linux/slab.h>
-#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/delay.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
-#include <linux/mfd/abx500.h>
#include <linux/time.h>
+#include <linux/of.h>
#include <linux/completion.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
#define MILLI_TO_MICRO 1000
#define FG_LSB_IN_MA 1627
@@ -172,7 +173,6 @@ struct inst_curr_result_list {
* @avg_cap: Average capacity filter
* @parent: Pointer to the struct ab8500
* @gpadc: Pointer to the struct gpadc
- * @pdata: Pointer to the abx500_fg platform data
* @bat: Pointer to the abx500_bm platform data
* @fg_psy: Structure that holds the FG specific battery properties
* @fg_wq: Work queue for running the FG algorithm
@@ -212,7 +212,6 @@ struct ab8500_fg {
struct ab8500_fg_avg_cap avg_cap;
struct ab8500 *parent;
struct ab8500_gpadc *gpadc;
- struct abx500_fg_platform_data *pdata;
struct abx500_bm_data *bat;
struct power_supply fg_psy;
struct workqueue_struct *fg_wq;
@@ -2429,7 +2428,6 @@ static int ab8500_fg_remove(struct platform_device *pdev)
flush_scheduled_work();
power_supply_unregister(&di->fg_psy);
platform_set_drvdata(pdev, NULL);
- kfree(di);
return ret;
}
@@ -2442,21 +2440,39 @@ static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
{"CCEOC", ab8500_fg_cc_data_end_handler},
};
+static char *supply_interface[] = {
+ "ab8500_chargalg",
+ "ab8500_usb",
+};
+
static int ab8500_fg_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct ab8500_fg *di;
int i, irq;
int ret = 0;
- struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
- struct ab8500_fg *di;
-
- if (!plat_data) {
- dev_err(&pdev->dev, "No platform data\n");
- return -EINVAL;
- }
- di = kzalloc(sizeof(*di), GFP_KERNEL);
- if (!di)
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_fg\n", __func__);
return -ENOMEM;
+ }
+ di->bat = pdev->mfd_cell->platform_data;
+ if (!di->bat) {
+ if (np) {
+ ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get battery information\n");
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "missing dt node for ab8500_fg\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&pdev->dev, "falling back to legacy platform data\n");
+ }
mutex_init(&di->cc_lock);
@@ -2465,29 +2481,13 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->parent = dev_get_drvdata(pdev->dev.parent);
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- /* get fg specific platform data */
- di->pdata = plat_data->fg;
- if (!di->pdata) {
- dev_err(di->dev, "no fg platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
- /* get battery specific platform data */
- di->bat = plat_data->battery;
- if (!di->bat) {
- dev_err(di->dev, "no battery platform data supplied\n");
- ret = -EINVAL;
- goto free_device_info;
- }
-
di->fg_psy.name = "ab8500_fg";
di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
di->fg_psy.properties = ab8500_fg_props;
di->fg_psy.num_properties = ARRAY_SIZE(ab8500_fg_props);
di->fg_psy.get_property = ab8500_fg_get_property;
- di->fg_psy.supplied_to = di->pdata->supplied_to;
- di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.supplied_to = supply_interface;
+ di->fg_psy.num_supplicants = ARRAY_SIZE(supply_interface),
di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
di->bat_cap.max_mah_design = MILLI_TO_MICRO *
@@ -2506,8 +2506,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
if (di->fg_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- ret = -ENOMEM;
- goto free_device_info;
+ return -ENOMEM;
}
/* Init work for running the fg algorithm instantly */
@@ -2606,12 +2605,14 @@ free_irq:
}
free_inst_curr_wq:
destroy_workqueue(di->fg_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_fg_match[] = {
+ { .compatible = "stericsson,ab8500-fg", },
+ { },
+};
+
static struct platform_driver ab8500_fg_driver = {
.probe = ab8500_fg_probe,
.remove = ab8500_fg_remove,
@@ -2620,6 +2621,7 @@ static struct platform_driver ab8500_fg_driver = {
.driver = {
.name = "ab8500-fg",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_fg_match,
},
};
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index 19f25419079..29708914606 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -21,6 +21,8 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ux500_chargalg.h>
#include <linux/mfd/abx500/ab8500-bm.h>
@@ -205,7 +207,6 @@ enum maxim_ret {
* @chg_info: information about connected charger types
* @batt_data: data of the battery
* @susp_status: current charger suspension status
- * @pdata: pointer to the abx500_chargalg platform data
* @bat: pointer to the abx500_bm platform data
* @chargalg_psy: structure that holds the battery properties exposed by
* the charging algorithm
@@ -231,7 +232,6 @@ struct abx500_chargalg {
struct abx500_chargalg_charger_info chg_info;
struct abx500_chargalg_battery_data batt_data;
struct abx500_chargalg_suspension_status susp_status;
- struct abx500_chargalg_platform_data *pdata;
struct abx500_bm_data *bat;
struct power_supply chargalg_psy;
struct ux500_charger *ac_chg;
@@ -1795,36 +1795,53 @@ static int abx500_chargalg_remove(struct platform_device *pdev)
flush_scheduled_work();
power_supply_unregister(&di->chargalg_psy);
platform_set_drvdata(pdev, NULL);
- kfree(di);
return 0;
}
+static char *supply_interface[] = {
+ "ab8500_fg",
+};
+
static int abx500_chargalg_probe(struct platform_device *pdev)
{
- struct abx500_bm_plat_data *plat_data;
+ struct device_node *np = pdev->dev.of_node;
+ struct abx500_chargalg *di;
int ret = 0;
- struct abx500_chargalg *di =
- kzalloc(sizeof(struct abx500_chargalg), GFP_KERNEL);
- if (!di)
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
+ if (!di) {
+ dev_err(&pdev->dev, "%s no mem for ab8500_chargalg\n", __func__);
return -ENOMEM;
+ }
+ di->bat = pdev->mfd_cell->platform_data;
+ if (!di->bat) {
+ if (np) {
+ ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get battery information\n");
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "missing dt node for ab8500_chargalg\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&pdev->dev, "falling back to legacy platform data\n");
+ }
/* get device struct */
di->dev = &pdev->dev;
- plat_data = pdev->dev.platform_data;
- di->pdata = plat_data->chargalg;
- di->bat = plat_data->battery;
-
/* chargalg supply */
di->chargalg_psy.name = "abx500_chargalg";
di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
di->chargalg_psy.properties = abx500_chargalg_props;
di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props);
di->chargalg_psy.get_property = abx500_chargalg_get_property;
- di->chargalg_psy.supplied_to = di->pdata->supplied_to;
- di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->chargalg_psy.supplied_to = supply_interface;
+ di->chargalg_psy.num_supplicants = ARRAY_SIZE(supply_interface),
di->chargalg_psy.external_power_changed =
abx500_chargalg_external_power_changed;
@@ -1844,7 +1861,7 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
create_singlethread_workqueue("abx500_chargalg_wq");
if (di->chargalg_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
- goto free_device_info;
+ return -ENOMEM;
}
/* Init work for chargalg */
@@ -1885,20 +1902,23 @@ free_psy:
power_supply_unregister(&di->chargalg_psy);
free_chargalg_wq:
destroy_workqueue(di->chargalg_wq);
-free_device_info:
- kfree(di);
-
return ret;
}
+static const struct of_device_id ab8500_chargalg_match[] = {
+ { .compatible = "stericsson,ab8500-chargalg", },
+ { },
+};
+
static struct platform_driver abx500_chargalg_driver = {
.probe = abx500_chargalg_probe,
.remove = abx500_chargalg_remove,
.suspend = abx500_chargalg_suspend,
.resume = abx500_chargalg_resume,
.driver = {
- .name = "abx500-chargalg",
+ .name = "ab8500-chargalg",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_chargalg_match,
},
};
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index d0fed2c5cf2..6b2238bb6a8 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -27,8 +27,6 @@
#include <linux/pm_runtime.h>
#include <linux/power/smartreflex.h>
-#include <plat/cpu.h>
-
#define SMARTREFLEX_NAME_LEN 16
#define NVALUE_NAME_LEN 40
#define SR_DISABLE_TIMEOUT 200
@@ -130,24 +128,21 @@ static irqreturn_t sr_interrupt(int irq, void *data)
static void sr_set_clk_length(struct omap_sr *sr)
{
- struct clk *sys_ck;
- u32 sys_clk_speed;
+ struct clk *fck;
+ u32 fclk_speed;
- if (cpu_is_omap34xx())
- sys_ck = clk_get(NULL, "sys_ck");
- else
- sys_ck = clk_get(NULL, "sys_clkin_ck");
+ fck = clk_get(&sr->pdev->dev, "fck");
- if (IS_ERR(sys_ck)) {
- dev_err(&sr->pdev->dev, "%s: unable to get sys clk\n",
- __func__);
+ if (IS_ERR(fck)) {
+ dev_err(&sr->pdev->dev, "%s: unable to get fck for device %s\n",
+ __func__, dev_name(&sr->pdev->dev));
return;
}
- sys_clk_speed = clk_get_rate(sys_ck);
- clk_put(sys_ck);
+ fclk_speed = clk_get_rate(fck);
+ clk_put(fck);
- switch (sys_clk_speed) {
+ switch (fclk_speed) {
case 12000000:
sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
break;
@@ -164,34 +159,12 @@ static void sr_set_clk_length(struct omap_sr *sr)
sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
break;
default:
- dev_err(&sr->pdev->dev, "%s: Invalid sysclk value: %d\n",
- __func__, sys_clk_speed);
+ dev_err(&sr->pdev->dev, "%s: Invalid fclk rate: %d\n",
+ __func__, fclk_speed);
break;
}
}
-static void sr_set_regfields(struct omap_sr *sr)
-{
- /*
- * For time being these values are defined in smartreflex.h
- * and populated during init. May be they can be moved to board
- * file or pmic specific data structure. In that case these structure
- * fields will have to be populated using the pdata or pmic structure.
- */
- if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
- sr->err_weight = OMAP3430_SR_ERRWEIGHT;
- sr->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT;
- sr->accum_data = OMAP3430_SR_ACCUMDATA;
- if (!(strcmp(sr->name, "smartreflex_mpu_iva"))) {
- sr->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT;
- sr->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT;
- } else {
- sr->senn_avgweight = OMAP3430_SR2_SENNAVGWEIGHT;
- sr->senp_avgweight = OMAP3430_SR2_SENPAVGWEIGHT;
- }
- }
-}
-
static void sr_start_vddautocomp(struct omap_sr *sr)
{
if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
@@ -924,8 +897,14 @@ static int __init omap_sr_probe(struct platform_device *pdev)
sr_info->nvalue_count = pdata->nvalue_count;
sr_info->senn_mod = pdata->senn_mod;
sr_info->senp_mod = pdata->senp_mod;
+ sr_info->err_weight = pdata->err_weight;
+ sr_info->err_maxlimit = pdata->err_maxlimit;
+ sr_info->accum_data = pdata->accum_data;
+ sr_info->senn_avgweight = pdata->senn_avgweight;
+ sr_info->senp_avgweight = pdata->senp_avgweight;
sr_info->autocomp_active = false;
sr_info->ip_type = pdata->ip_type;
+
sr_info->base = ioremap(mem->start, resource_size(mem));
if (!sr_info->base) {
dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
@@ -937,7 +916,6 @@ static int __init omap_sr_probe(struct platform_device *pdev)
sr_info->irq = irq->start;
sr_set_clk_length(sr_info);
- sr_set_regfields(sr_info);
list_add(&sr_info->node, &sr_list);
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
new file mode 100644
index 00000000000..ee842b37f46
--- /dev/null
+++ b/drivers/power/bq2415x_charger.c
@@ -0,0 +1,1670 @@
+/*
+ * bq2415x charger driver
+ *
+ * Copyright (C) 2011-2012 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Datasheets:
+ * http://www.ti.com/product/bq24150
+ * http://www.ti.com/product/bq24150a
+ * http://www.ti.com/product/bq24152
+ * http://www.ti.com/product/bq24153
+ * http://www.ti.com/product/bq24153a
+ * http://www.ti.com/product/bq24155
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/idr.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <linux/power/bq2415x_charger.h>
+
+/* timeout for resetting chip timer */
+#define BQ2415X_TIMER_TIMEOUT 10
+
+#define BQ2415X_REG_STATUS 0x00
+#define BQ2415X_REG_CONTROL 0x01
+#define BQ2415X_REG_VOLTAGE 0x02
+#define BQ2415X_REG_VENDER 0x03
+#define BQ2415X_REG_CURRENT 0x04
+
+/* reset state for all registers */
+#define BQ2415X_RESET_STATUS BIT(6)
+#define BQ2415X_RESET_CONTROL (BIT(4)|BIT(5))
+#define BQ2415X_RESET_VOLTAGE (BIT(1)|BIT(3))
+#define BQ2415X_RESET_CURRENT (BIT(0)|BIT(3)|BIT(7))
+
+/* status register */
+#define BQ2415X_BIT_TMR_RST 7
+#define BQ2415X_BIT_OTG 7
+#define BQ2415X_BIT_EN_STAT 6
+#define BQ2415X_MASK_STAT (BIT(4)|BIT(5))
+#define BQ2415X_SHIFT_STAT 4
+#define BQ2415X_BIT_BOOST 3
+#define BQ2415X_MASK_FAULT (BIT(0)|BIT(1)|BIT(2))
+#define BQ2415X_SHIFT_FAULT 0
+
+/* control register */
+#define BQ2415X_MASK_LIMIT (BIT(6)|BIT(7))
+#define BQ2415X_SHIFT_LIMIT 6
+#define BQ2415X_MASK_VLOWV (BIT(4)|BIT(5))
+#define BQ2415X_SHIFT_VLOWV 4
+#define BQ2415X_BIT_TE 3
+#define BQ2415X_BIT_CE 2
+#define BQ2415X_BIT_HZ_MODE 1
+#define BQ2415X_BIT_OPA_MODE 0
+
+/* voltage register */
+#define BQ2415X_MASK_VO (BIT(2)|BIT(3)|BIT(4)|BIT(5)|BIT(6)|BIT(7))
+#define BQ2415X_SHIFT_VO 2
+#define BQ2415X_BIT_OTG_PL 1
+#define BQ2415X_BIT_OTG_EN 0
+
+/* vender register */
+#define BQ2415X_MASK_VENDER (BIT(5)|BIT(6)|BIT(7))
+#define BQ2415X_SHIFT_VENDER 5
+#define BQ2415X_MASK_PN (BIT(3)|BIT(4))
+#define BQ2415X_SHIFT_PN 3
+#define BQ2415X_MASK_REVISION (BIT(0)|BIT(1)|BIT(2))
+#define BQ2415X_SHIFT_REVISION 0
+
+/* current register */
+#define BQ2415X_MASK_RESET BIT(7)
+#define BQ2415X_MASK_VI_CHRG (BIT(4)|BIT(5)|BIT(6))
+#define BQ2415X_SHIFT_VI_CHRG 4
+/* N/A BIT(3) */
+#define BQ2415X_MASK_VI_TERM (BIT(0)|BIT(1)|BIT(2))
+#define BQ2415X_SHIFT_VI_TERM 0
+
+
+enum bq2415x_command {
+ BQ2415X_TIMER_RESET,
+ BQ2415X_OTG_STATUS,
+ BQ2415X_STAT_PIN_STATUS,
+ BQ2415X_STAT_PIN_ENABLE,
+ BQ2415X_STAT_PIN_DISABLE,
+ BQ2415X_CHARGE_STATUS,
+ BQ2415X_BOOST_STATUS,
+ BQ2415X_FAULT_STATUS,
+
+ BQ2415X_CHARGE_TERMINATION_STATUS,
+ BQ2415X_CHARGE_TERMINATION_ENABLE,
+ BQ2415X_CHARGE_TERMINATION_DISABLE,
+ BQ2415X_CHARGER_STATUS,
+ BQ2415X_CHARGER_ENABLE,
+ BQ2415X_CHARGER_DISABLE,
+ BQ2415X_HIGH_IMPEDANCE_STATUS,
+ BQ2415X_HIGH_IMPEDANCE_ENABLE,
+ BQ2415X_HIGH_IMPEDANCE_DISABLE,
+ BQ2415X_BOOST_MODE_STATUS,
+ BQ2415X_BOOST_MODE_ENABLE,
+ BQ2415X_BOOST_MODE_DISABLE,
+
+ BQ2415X_OTG_LEVEL,
+ BQ2415X_OTG_ACTIVATE_HIGH,
+ BQ2415X_OTG_ACTIVATE_LOW,
+ BQ2415X_OTG_PIN_STATUS,
+ BQ2415X_OTG_PIN_ENABLE,
+ BQ2415X_OTG_PIN_DISABLE,
+
+ BQ2415X_VENDER_CODE,
+ BQ2415X_PART_NUMBER,
+ BQ2415X_REVISION,
+};
+
+enum bq2415x_chip {
+ BQUNKNOWN,
+ BQ24150,
+ BQ24150A,
+ BQ24151,
+ BQ24151A,
+ BQ24152,
+ BQ24153,
+ BQ24153A,
+ BQ24155,
+ BQ24156,
+ BQ24156A,
+ BQ24158,
+};
+
+static char *bq2415x_chip_name[] = {
+ "unknown",
+ "bq24150",
+ "bq24150a",
+ "bq24151",
+ "bq24151a",
+ "bq24152",
+ "bq24153",
+ "bq24153a",
+ "bq24155",
+ "bq24156",
+ "bq24156a",
+ "bq24158",
+};
+
+struct bq2415x_device {
+ struct device *dev;
+ struct bq2415x_platform_data init_data;
+ struct power_supply charger;
+ struct delayed_work work;
+ enum bq2415x_mode reported_mode;/* mode reported by hook function */
+ enum bq2415x_mode mode; /* current configured mode */
+ enum bq2415x_chip chip;
+ const char *timer_error;
+ char *model;
+ char *name;
+ int autotimer; /* 1 - if driver automatically reset timer, 0 - not */
+ int automode; /* 1 - enabled, 0 - disabled; -1 - not supported */
+ int id;
+};
+
+/* each registered chip must have unique id */
+static DEFINE_IDR(bq2415x_id);
+
+static DEFINE_MUTEX(bq2415x_id_mutex);
+static DEFINE_MUTEX(bq2415x_timer_mutex);
+static DEFINE_MUTEX(bq2415x_i2c_mutex);
+
+/**** i2c read functions ****/
+
+/* read value from register */
+static int bq2415x_i2c_read(struct bq2415x_device *bq, u8 reg)
+{
+ struct i2c_client *client = to_i2c_client(bq->dev);
+ struct i2c_msg msg[2];
+ u8 val;
+ int ret;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = &reg;
+ msg[0].len = sizeof(reg);
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = &val;
+ msg[1].len = sizeof(val);
+
+ mutex_lock(&bq2415x_i2c_mutex);
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ mutex_unlock(&bq2415x_i2c_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+/* read value from register, apply mask and right shift it */
+static int bq2415x_i2c_read_mask(struct bq2415x_device *bq, u8 reg,
+ u8 mask, u8 shift)
+{
+ int ret;
+
+ if (shift > 8)
+ return -EINVAL;
+
+ ret = bq2415x_i2c_read(bq, reg);
+ if (ret < 0)
+ return ret;
+ return (ret & mask) >> shift;
+}
+
+/* read value from register and return one specified bit */
+static int bq2415x_i2c_read_bit(struct bq2415x_device *bq, u8 reg, u8 bit)
+{
+ if (bit > 8)
+ return -EINVAL;
+ return bq2415x_i2c_read_mask(bq, reg, BIT(bit), bit);
+}
+
+/**** i2c write functions ****/
+
+/* write value to register */
+static int bq2415x_i2c_write(struct bq2415x_device *bq, u8 reg, u8 val)
+{
+ struct i2c_client *client = to_i2c_client(bq->dev);
+ struct i2c_msg msg[1];
+ u8 data[2];
+ int ret;
+
+ data[0] = reg;
+ data[1] = val;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = data;
+ msg[0].len = ARRAY_SIZE(data);
+
+ mutex_lock(&bq2415x_i2c_mutex);
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ mutex_unlock(&bq2415x_i2c_mutex);
+
+ /* i2c_transfer returns number of messages transferred */
+ if (ret < 0)
+ return ret;
+ else if (ret != 1)
+ return -EIO;
+
+ return 0;
+}
+
+/* read value from register, change it with mask left shifted and write back */
+static int bq2415x_i2c_write_mask(struct bq2415x_device *bq, u8 reg, u8 val,
+ u8 mask, u8 shift)
+{
+ int ret;
+
+ if (shift > 8)
+ return -EINVAL;
+
+ ret = bq2415x_i2c_read(bq, reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~mask;
+ ret |= val << shift;
+
+ return bq2415x_i2c_write(bq, reg, ret);
+}
+
+/* change only one bit in register */
+static int bq2415x_i2c_write_bit(struct bq2415x_device *bq, u8 reg,
+ bool val, u8 bit)
+{
+ if (bit > 8)
+ return -EINVAL;
+ return bq2415x_i2c_write_mask(bq, reg, val, BIT(bit), bit);
+}
+
+/**** global functions ****/
+
+/* exec command function */
+static int bq2415x_exec_command(struct bq2415x_device *bq,
+ enum bq2415x_command command)
+{
+ int ret;
+
+ switch (command) {
+ case BQ2415X_TIMER_RESET:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_STATUS,
+ 1, BQ2415X_BIT_TMR_RST);
+ case BQ2415X_OTG_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_STATUS,
+ BQ2415X_BIT_OTG);
+ case BQ2415X_STAT_PIN_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_STATUS,
+ BQ2415X_BIT_EN_STAT);
+ case BQ2415X_STAT_PIN_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_STATUS, 1,
+ BQ2415X_BIT_EN_STAT);
+ case BQ2415X_STAT_PIN_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_STATUS, 0,
+ BQ2415X_BIT_EN_STAT);
+ case BQ2415X_CHARGE_STATUS:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_STATUS,
+ BQ2415X_MASK_STAT, BQ2415X_SHIFT_STAT);
+ case BQ2415X_BOOST_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_STATUS,
+ BQ2415X_BIT_BOOST);
+ case BQ2415X_FAULT_STATUS:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_STATUS,
+ BQ2415X_MASK_FAULT, BQ2415X_SHIFT_FAULT);
+
+ case BQ2415X_CHARGE_TERMINATION_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_TE);
+ case BQ2415X_CHARGE_TERMINATION_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_TE);
+ case BQ2415X_CHARGE_TERMINATION_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_TE);
+ case BQ2415X_CHARGER_STATUS:
+ ret = bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_CE);
+ if (ret < 0)
+ return ret;
+ else
+ return ret > 0 ? 0 : 1;
+ case BQ2415X_CHARGER_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_CE);
+ case BQ2415X_CHARGER_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_CE);
+ case BQ2415X_HIGH_IMPEDANCE_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_HZ_MODE);
+ case BQ2415X_HIGH_IMPEDANCE_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_HZ_MODE);
+ case BQ2415X_HIGH_IMPEDANCE_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_HZ_MODE);
+ case BQ2415X_BOOST_MODE_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_BIT_OPA_MODE);
+ case BQ2415X_BOOST_MODE_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 1, BQ2415X_BIT_OPA_MODE);
+ case BQ2415X_BOOST_MODE_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_CONTROL,
+ 0, BQ2415X_BIT_OPA_MODE);
+
+ case BQ2415X_OTG_LEVEL:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_VOLTAGE,
+ BQ2415X_BIT_OTG_PL);
+ case BQ2415X_OTG_ACTIVATE_HIGH:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 1, BQ2415X_BIT_OTG_PL);
+ case BQ2415X_OTG_ACTIVATE_LOW:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 0, BQ2415X_BIT_OTG_PL);
+ case BQ2415X_OTG_PIN_STATUS:
+ return bq2415x_i2c_read_bit(bq, BQ2415X_REG_VOLTAGE,
+ BQ2415X_BIT_OTG_EN);
+ case BQ2415X_OTG_PIN_ENABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 1, BQ2415X_BIT_OTG_EN);
+ case BQ2415X_OTG_PIN_DISABLE:
+ return bq2415x_i2c_write_bit(bq, BQ2415X_REG_VOLTAGE,
+ 0, BQ2415X_BIT_OTG_EN);
+
+ case BQ2415X_VENDER_CODE:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_VENDER,
+ BQ2415X_MASK_VENDER, BQ2415X_SHIFT_VENDER);
+ case BQ2415X_PART_NUMBER:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_VENDER,
+ BQ2415X_MASK_PN, BQ2415X_SHIFT_PN);
+ case BQ2415X_REVISION:
+ return bq2415x_i2c_read_mask(bq, BQ2415X_REG_VENDER,
+ BQ2415X_MASK_REVISION, BQ2415X_SHIFT_REVISION);
+ }
+ return -EINVAL;
+}
+
+/* detect chip type */
+static enum bq2415x_chip bq2415x_detect_chip(struct bq2415x_device *bq)
+{
+ struct i2c_client *client = to_i2c_client(bq->dev);
+ int ret = bq2415x_exec_command(bq, BQ2415X_PART_NUMBER);
+
+ if (ret < 0)
+ return ret;
+
+ switch (client->addr) {
+ case 0x6b:
+ switch (ret) {
+ case 0:
+ if (bq->chip == BQ24151A)
+ return bq->chip;
+ else
+ return BQ24151;
+ case 1:
+ if (bq->chip == BQ24150A ||
+ bq->chip == BQ24152 ||
+ bq->chip == BQ24155)
+ return bq->chip;
+ else
+ return BQ24150;
+ case 2:
+ if (bq->chip == BQ24153A)
+ return bq->chip;
+ else
+ return BQ24153;
+ default:
+ return BQUNKNOWN;
+ }
+ break;
+
+ case 0x6a:
+ switch (ret) {
+ case 0:
+ if (bq->chip == BQ24156A)
+ return bq->chip;
+ else
+ return BQ24156;
+ case 2:
+ return BQ24158;
+ default:
+ return BQUNKNOWN;
+ }
+ break;
+ }
+
+ return BQUNKNOWN;
+}
+
+/* detect chip revision */
+static int bq2415x_detect_revision(struct bq2415x_device *bq)
+{
+ int ret = bq2415x_exec_command(bq, BQ2415X_REVISION);
+ int chip = bq2415x_detect_chip(bq);
+
+ if (ret < 0 || chip < 0)
+ return -1;
+
+ switch (chip) {
+ case BQ24150:
+ case BQ24150A:
+ case BQ24151:
+ case BQ24151A:
+ case BQ24152:
+ if (ret >= 0 && ret <= 3)
+ return ret;
+ else
+ return -1;
+ case BQ24153:
+ case BQ24153A:
+ case BQ24156:
+ case BQ24156A:
+ case BQ24158:
+ if (ret == 3)
+ return 0;
+ else if (ret == 1)
+ return 1;
+ else
+ return -1;
+ case BQ24155:
+ if (ret == 3)
+ return 3;
+ else
+ return -1;
+ case BQUNKNOWN:
+ return -1;
+ }
+
+ return -1;
+}
+
+/* return chip vender code */
+static int bq2415x_get_vender_code(struct bq2415x_device *bq)
+{
+ int ret;
+
+ ret = bq2415x_exec_command(bq, BQ2415X_VENDER_CODE);
+ if (ret < 0)
+ return 0;
+
+ /* convert to binary */
+ return (ret & 0x1) +
+ ((ret >> 1) & 0x1) * 10 +
+ ((ret >> 2) & 0x1) * 100;
+}
+
+/* reset all chip registers to default state */
+static void bq2415x_reset_chip(struct bq2415x_device *bq)
+{
+ bq2415x_i2c_write(bq, BQ2415X_REG_CURRENT, BQ2415X_RESET_CURRENT);
+ bq2415x_i2c_write(bq, BQ2415X_REG_VOLTAGE, BQ2415X_RESET_VOLTAGE);
+ bq2415x_i2c_write(bq, BQ2415X_REG_CONTROL, BQ2415X_RESET_CONTROL);
+ bq2415x_i2c_write(bq, BQ2415X_REG_STATUS, BQ2415X_RESET_STATUS);
+ bq->timer_error = NULL;
+}
+
+/**** properties functions ****/
+
+/* set current limit in mA */
+static int bq2415x_set_current_limit(struct bq2415x_device *bq, int mA)
+{
+ int val;
+
+ if (mA <= 100)
+ val = 0;
+ else if (mA <= 500)
+ val = 1;
+ else if (mA <= 800)
+ val = 2;
+ else
+ val = 3;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CONTROL, val,
+ BQ2415X_MASK_LIMIT, BQ2415X_SHIFT_LIMIT);
+}
+
+/* get current limit in mA */
+static int bq2415x_get_current_limit(struct bq2415x_device *bq)
+{
+ int ret;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_MASK_LIMIT, BQ2415X_SHIFT_LIMIT);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return 100;
+ else if (ret == 1)
+ return 500;
+ else if (ret == 2)
+ return 800;
+ else if (ret == 3)
+ return 1800;
+ return -EINVAL;
+}
+
+/* set weak battery voltage in mV */
+static int bq2415x_set_weak_battery_voltage(struct bq2415x_device *bq, int mV)
+{
+ int val;
+
+ /* round to 100mV */
+ if (mV <= 3400 + 50)
+ val = 0;
+ else if (mV <= 3500 + 50)
+ val = 1;
+ else if (mV <= 3600 + 50)
+ val = 2;
+ else
+ val = 3;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CONTROL, val,
+ BQ2415X_MASK_VLOWV, BQ2415X_SHIFT_VLOWV);
+}
+
+/* get weak battery voltage in mV */
+static int bq2415x_get_weak_battery_voltage(struct bq2415x_device *bq)
+{
+ int ret;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CONTROL,
+ BQ2415X_MASK_VLOWV, BQ2415X_SHIFT_VLOWV);
+ if (ret < 0)
+ return ret;
+ return 100 * (34 + ret);
+}
+
+/* set battery regulation voltage in mV */
+static int bq2415x_set_battery_regulation_voltage(struct bq2415x_device *bq,
+ int mV)
+{
+ int val = (mV/10 - 350) / 2;
+
+ if (val < 0)
+ val = 0;
+ else if (val > 94) /* FIXME: Max is 94 or 122 ? Set max value ? */
+ return -EINVAL;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_VOLTAGE, val,
+ BQ2415X_MASK_VO, BQ2415X_SHIFT_VO);
+}
+
+/* get battery regulation voltage in mV */
+static int bq2415x_get_battery_regulation_voltage(struct bq2415x_device *bq)
+{
+ int ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_VOLTAGE,
+ BQ2415X_MASK_VO, BQ2415X_SHIFT_VO);
+
+ if (ret < 0)
+ return ret;
+ return 10 * (350 + 2*ret);
+}
+
+/* set charge current in mA (platform data must provide resistor sense) */
+static int bq2415x_set_charge_current(struct bq2415x_device *bq, int mA)
+{
+ int val;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ val = (mA * bq->init_data.resistor_sense - 37400) / 6800;
+ if (val < 0)
+ val = 0;
+ else if (val > 7)
+ val = 7;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CURRENT, val,
+ BQ2415X_MASK_VI_CHRG | BQ2415X_MASK_RESET,
+ BQ2415X_SHIFT_VI_CHRG);
+}
+
+/* get charge current in mA (platform data must provide resistor sense) */
+static int bq2415x_get_charge_current(struct bq2415x_device *bq)
+{
+ int ret;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CURRENT,
+ BQ2415X_MASK_VI_CHRG, BQ2415X_SHIFT_VI_CHRG);
+ if (ret < 0)
+ return ret;
+ return (37400 + 6800*ret) / bq->init_data.resistor_sense;
+}
+
+/* set termination current in mA (platform data must provide resistor sense) */
+static int bq2415x_set_termination_current(struct bq2415x_device *bq, int mA)
+{
+ int val;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ val = (mA * bq->init_data.resistor_sense - 3400) / 3400;
+ if (val < 0)
+ val = 0;
+ else if (val > 7)
+ val = 7;
+
+ return bq2415x_i2c_write_mask(bq, BQ2415X_REG_CURRENT, val,
+ BQ2415X_MASK_VI_TERM | BQ2415X_MASK_RESET,
+ BQ2415X_SHIFT_VI_TERM);
+}
+
+/* get termination current in mA (platform data must provide resistor sense) */
+static int bq2415x_get_termination_current(struct bq2415x_device *bq)
+{
+ int ret;
+
+ if (bq->init_data.resistor_sense <= 0)
+ return -ENOSYS;
+
+ ret = bq2415x_i2c_read_mask(bq, BQ2415X_REG_CURRENT,
+ BQ2415X_MASK_VI_TERM, BQ2415X_SHIFT_VI_TERM);
+ if (ret < 0)
+ return ret;
+ return (3400 + 3400*ret) / bq->init_data.resistor_sense;
+}
+
+/* set default value of property */
+#define bq2415x_set_default_value(bq, prop) \
+ do { \
+ int ret = 0; \
+ if (bq->init_data.prop != -1) \
+ ret = bq2415x_set_##prop(bq, bq->init_data.prop); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+/* set default values of all properties */
+static int bq2415x_set_defaults(struct bq2415x_device *bq)
+{
+ bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_DISABLE);
+ bq2415x_exec_command(bq, BQ2415X_CHARGER_DISABLE);
+ bq2415x_exec_command(bq, BQ2415X_CHARGE_TERMINATION_DISABLE);
+
+ bq2415x_set_default_value(bq, current_limit);
+ bq2415x_set_default_value(bq, weak_battery_voltage);
+ bq2415x_set_default_value(bq, battery_regulation_voltage);
+
+ if (bq->init_data.resistor_sense > 0) {
+ bq2415x_set_default_value(bq, charge_current);
+ bq2415x_set_default_value(bq, termination_current);
+ bq2415x_exec_command(bq, BQ2415X_CHARGE_TERMINATION_ENABLE);
+ }
+
+ bq2415x_exec_command(bq, BQ2415X_CHARGER_ENABLE);
+ return 0;
+}
+
+/**** charger mode functions ****/
+
+/* set charger mode */
+static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
+{
+ int ret = 0;
+ int charger = 0;
+ int boost = 0;
+
+ if (mode == BQ2415X_MODE_HOST_CHARGER ||
+ mode == BQ2415X_MODE_DEDICATED_CHARGER)
+ charger = 1;
+
+ if (mode == BQ2415X_MODE_BOOST)
+ boost = 1;
+
+ if (!charger)
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGER_DISABLE);
+
+ if (!boost)
+ ret = bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_DISABLE);
+
+ if (ret < 0)
+ return ret;
+
+ switch (mode) {
+ case BQ2415X_MODE_NONE:
+ dev_dbg(bq->dev, "changing mode to: N/A\n");
+ ret = bq2415x_set_current_limit(bq, 100);
+ break;
+ case BQ2415X_MODE_HOST_CHARGER:
+ dev_dbg(bq->dev, "changing mode to: Host/HUB charger\n");
+ ret = bq2415x_set_current_limit(bq, 500);
+ break;
+ case BQ2415X_MODE_DEDICATED_CHARGER:
+ dev_dbg(bq->dev, "changing mode to: Dedicated charger\n");
+ ret = bq2415x_set_current_limit(bq, 1800);
+ break;
+ case BQ2415X_MODE_BOOST: /* Boost mode */
+ dev_dbg(bq->dev, "changing mode to: Boost\n");
+ ret = bq2415x_set_current_limit(bq, 100);
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ if (charger)
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGER_ENABLE);
+ else if (boost)
+ ret = bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_ENABLE);
+
+ if (ret < 0)
+ return ret;
+
+ bq2415x_set_default_value(bq, weak_battery_voltage);
+ bq2415x_set_default_value(bq, battery_regulation_voltage);
+
+ bq->mode = mode;
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "mode");
+
+ return 0;
+
+}
+
+/* hook function called by other driver which set reported mode */
+static void bq2415x_hook_function(enum bq2415x_mode mode, void *data)
+{
+ struct bq2415x_device *bq = data;
+
+ if (!bq)
+ return;
+
+ dev_dbg(bq->dev, "hook function was called\n");
+ bq->reported_mode = mode;
+
+ /* if automode is not enabled do not tell about reported_mode */
+ if (bq->automode < 1)
+ return;
+
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
+ bq2415x_set_mode(bq, bq->reported_mode);
+
+}
+
+/**** timer functions ****/
+
+/* enable/disable auto resetting chip timer */
+static void bq2415x_set_autotimer(struct bq2415x_device *bq, int state)
+{
+ mutex_lock(&bq2415x_timer_mutex);
+
+ if (bq->autotimer == state) {
+ mutex_unlock(&bq2415x_timer_mutex);
+ return;
+ }
+
+ bq->autotimer = state;
+
+ if (state) {
+ schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ);
+ bq2415x_exec_command(bq, BQ2415X_TIMER_RESET);
+ bq->timer_error = NULL;
+ } else {
+ cancel_delayed_work_sync(&bq->work);
+ }
+
+ mutex_unlock(&bq2415x_timer_mutex);
+}
+
+/* called by bq2415x_timer_work on timer error */
+static void bq2415x_timer_error(struct bq2415x_device *bq, const char *msg)
+{
+ bq->timer_error = msg;
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "timer");
+ dev_err(bq->dev, "%s\n", msg);
+ if (bq->automode > 0)
+ bq->automode = 0;
+ bq2415x_set_mode(bq, BQ2415X_MODE_NONE);
+ bq2415x_set_autotimer(bq, 0);
+}
+
+/* delayed work function for auto resetting chip timer */
+static void bq2415x_timer_work(struct work_struct *work)
+{
+ struct bq2415x_device *bq = container_of(work, struct bq2415x_device,
+ work.work);
+ int ret;
+ int error;
+ int boost;
+
+ if (!bq->autotimer)
+ return;
+
+ ret = bq2415x_exec_command(bq, BQ2415X_TIMER_RESET);
+ if (ret < 0) {
+ bq2415x_timer_error(bq, "Resetting timer failed");
+ return;
+ }
+
+ boost = bq2415x_exec_command(bq, BQ2415X_BOOST_MODE_STATUS);
+ if (boost < 0) {
+ bq2415x_timer_error(bq, "Unknown error");
+ return;
+ }
+
+ error = bq2415x_exec_command(bq, BQ2415X_FAULT_STATUS);
+ if (error < 0) {
+ bq2415x_timer_error(bq, "Unknown error");
+ return;
+ }
+
+ if (boost) {
+ switch (error) {
+ /* Non fatal errors, chip is OK */
+ case 0: /* No error */
+ break;
+ case 6: /* Timer expired */
+ dev_err(bq->dev, "Timer expired\n");
+ break;
+ case 3: /* Battery voltage too low */
+ dev_err(bq->dev, "Battery voltage to low\n");
+ break;
+
+ /* Fatal errors, disable and reset chip */
+ case 1: /* Overvoltage protection (chip fried) */
+ bq2415x_timer_error(bq,
+ "Overvoltage protection (chip fried)");
+ return;
+ case 2: /* Overload */
+ bq2415x_timer_error(bq, "Overload");
+ return;
+ case 4: /* Battery overvoltage protection */
+ bq2415x_timer_error(bq,
+ "Battery overvoltage protection");
+ return;
+ case 5: /* Thermal shutdown (too hot) */
+ bq2415x_timer_error(bq,
+ "Thermal shutdown (too hot)");
+ return;
+ case 7: /* N/A */
+ bq2415x_timer_error(bq, "Unknown error");
+ return;
+ }
+ } else {
+ switch (error) {
+ /* Non fatal errors, chip is OK */
+ case 0: /* No error */
+ break;
+ case 2: /* Sleep mode */
+ dev_err(bq->dev, "Sleep mode\n");
+ break;
+ case 3: /* Poor input source */
+ dev_err(bq->dev, "Poor input source\n");
+ break;
+ case 6: /* Timer expired */
+ dev_err(bq->dev, "Timer expired\n");
+ break;
+ case 7: /* No battery */
+ dev_err(bq->dev, "No battery\n");
+ break;
+
+ /* Fatal errors, disable and reset chip */
+ case 1: /* Overvoltage protection (chip fried) */
+ bq2415x_timer_error(bq,
+ "Overvoltage protection (chip fried)");
+ return;
+ case 4: /* Battery overvoltage protection */
+ bq2415x_timer_error(bq,
+ "Battery overvoltage protection");
+ return;
+ case 5: /* Thermal shutdown (too hot) */
+ bq2415x_timer_error(bq,
+ "Thermal shutdown (too hot)");
+ return;
+ }
+ }
+
+ schedule_delayed_work(&bq->work, BQ2415X_TIMER_TIMEOUT * HZ);
+}
+
+/**** power supply interface code ****/
+
+static enum power_supply_property bq2415x_power_supply_props[] = {
+ /* TODO: maybe add more power supply properties */
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int bq2415x_power_supply_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq2415x_exec_command(bq, BQ2415X_CHARGE_STATUS);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0) /* Ready */
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret == 1) /* Charge in progress */
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (ret == 2) /* Charge done */
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bq->model;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int bq2415x_power_supply_init(struct bq2415x_device *bq)
+{
+ int ret;
+ int chip;
+ char revstr[8];
+
+ bq->charger.name = bq->name;
+ bq->charger.type = POWER_SUPPLY_TYPE_USB;
+ bq->charger.properties = bq2415x_power_supply_props;
+ bq->charger.num_properties = ARRAY_SIZE(bq2415x_power_supply_props);
+ bq->charger.get_property = bq2415x_power_supply_get_property;
+
+ ret = bq2415x_detect_chip(bq);
+ if (ret < 0)
+ chip = BQUNKNOWN;
+ else
+ chip = ret;
+
+ ret = bq2415x_detect_revision(bq);
+ if (ret < 0)
+ strcpy(revstr, "unknown");
+ else
+ sprintf(revstr, "1.%d", ret);
+
+ bq->model = kasprintf(GFP_KERNEL,
+ "chip %s, revision %s, vender code %.3d",
+ bq2415x_chip_name[chip], revstr,
+ bq2415x_get_vender_code(bq));
+ if (!bq->model) {
+ dev_err(bq->dev, "failed to allocate model name\n");
+ return -ENOMEM;
+ }
+
+ ret = power_supply_register(bq->dev, &bq->charger);
+ if (ret) {
+ kfree(bq->model);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bq2415x_power_supply_exit(struct bq2415x_device *bq)
+{
+ bq->autotimer = 0;
+ if (bq->automode > 0)
+ bq->automode = 0;
+ cancel_delayed_work_sync(&bq->work);
+ power_supply_unregister(&bq->charger);
+ kfree(bq->model);
+}
+
+/**** additional sysfs entries for power supply interface ****/
+
+/* show *_status entries */
+static ssize_t bq2415x_sysfs_show_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_command command;
+ int ret;
+
+ if (strcmp(attr->attr.name, "otg_status") == 0)
+ command = BQ2415X_OTG_STATUS;
+ else if (strcmp(attr->attr.name, "charge_status") == 0)
+ command = BQ2415X_CHARGE_STATUS;
+ else if (strcmp(attr->attr.name, "boost_status") == 0)
+ command = BQ2415X_BOOST_STATUS;
+ else if (strcmp(attr->attr.name, "fault_status") == 0)
+ command = BQ2415X_FAULT_STATUS;
+ else
+ return -EINVAL;
+
+ ret = bq2415x_exec_command(bq, command);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+/*
+ * set timer entry:
+ * auto - enable auto mode
+ * off - disable auto mode
+ * (other values) - reset chip timer
+ */
+static ssize_t bq2415x_sysfs_set_timer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ int ret = 0;
+
+ if (strncmp(buf, "auto", 4) == 0)
+ bq2415x_set_autotimer(bq, 1);
+ else if (strncmp(buf, "off", 3) == 0)
+ bq2415x_set_autotimer(bq, 0);
+ else
+ ret = bq2415x_exec_command(bq, BQ2415X_TIMER_RESET);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show timer entry (auto or off) */
+static ssize_t bq2415x_sysfs_show_timer(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+
+ if (bq->timer_error)
+ return sprintf(buf, "%s\n", bq->timer_error);
+
+ if (bq->autotimer)
+ return sprintf(buf, "auto\n");
+ return sprintf(buf, "off\n");
+}
+
+/*
+ * set mode entry:
+ * auto - if automode is supported, enable it and set mode to reported
+ * none - disable charger and boost mode
+ * host - charging mode for host/hub chargers (current limit 500mA)
+ * dedicated - charging mode for dedicated chargers (unlimited current limit)
+ * boost - disable charger and enable boost mode
+ */
+static ssize_t bq2415x_sysfs_set_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_mode mode;
+ int ret = 0;
+
+ if (strncmp(buf, "auto", 4) == 0) {
+ if (bq->automode < 0)
+ return -ENOSYS;
+ bq->automode = 1;
+ mode = bq->reported_mode;
+ } else if (strncmp(buf, "none", 4) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_NONE;
+ } else if (strncmp(buf, "host", 4) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_HOST_CHARGER;
+ } else if (strncmp(buf, "dedicated", 9) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_DEDICATED_CHARGER;
+ } else if (strncmp(buf, "boost", 5) == 0) {
+ if (bq->automode > 0)
+ bq->automode = 0;
+ mode = BQ2415X_MODE_BOOST;
+ } else if (strncmp(buf, "reset", 5) == 0) {
+ bq2415x_reset_chip(bq);
+ bq2415x_set_defaults(bq);
+ if (bq->automode <= 0)
+ return count;
+ bq->automode = 1;
+ mode = bq->reported_mode;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = bq2415x_set_mode(bq, mode);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show mode entry (auto, none, host, dedicated or boost) */
+static ssize_t bq2415x_sysfs_show_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ ssize_t ret = 0;
+
+ if (bq->automode > 0)
+ ret += sprintf(buf+ret, "auto (");
+
+ switch (bq->mode) {
+ case BQ2415X_MODE_NONE:
+ ret += sprintf(buf+ret, "none");
+ break;
+ case BQ2415X_MODE_HOST_CHARGER:
+ ret += sprintf(buf+ret, "host");
+ break;
+ case BQ2415X_MODE_DEDICATED_CHARGER:
+ ret += sprintf(buf+ret, "dedicated");
+ break;
+ case BQ2415X_MODE_BOOST:
+ ret += sprintf(buf+ret, "boost");
+ break;
+ }
+
+ if (bq->automode > 0)
+ ret += sprintf(buf+ret, ")");
+
+ ret += sprintf(buf+ret, "\n");
+ return ret;
+}
+
+/* show reported_mode entry (none, host, dedicated or boost) */
+static ssize_t bq2415x_sysfs_show_reported_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+
+ if (bq->automode < 0)
+ return -EINVAL;
+
+ switch (bq->reported_mode) {
+ case BQ2415X_MODE_NONE:
+ return sprintf(buf, "none\n");
+ case BQ2415X_MODE_HOST_CHARGER:
+ return sprintf(buf, "host\n");
+ case BQ2415X_MODE_DEDICATED_CHARGER:
+ return sprintf(buf, "dedicated\n");
+ case BQ2415X_MODE_BOOST:
+ return sprintf(buf, "boost\n");
+ }
+
+ return -EINVAL;
+}
+
+/* directly set raw value to chip register, format: 'register value' */
+static ssize_t bq2415x_sysfs_set_registers(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ ssize_t ret = 0;
+ unsigned int reg;
+ unsigned int val;
+
+ if (sscanf(buf, "%x %x", &reg, &val) != 2)
+ return -EINVAL;
+
+ if (reg > 4 || val > 255)
+ return -EINVAL;
+
+ ret = bq2415x_i2c_write(bq, reg, val);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* print value of chip register, format: 'register=value' */
+static ssize_t bq2415x_sysfs_print_reg(struct bq2415x_device *bq,
+ u8 reg,
+ char *buf)
+{
+ int ret = bq2415x_i2c_read(bq, reg);
+
+ if (ret < 0)
+ return sprintf(buf, "%#.2x=error %d\n", reg, ret);
+ return sprintf(buf, "%#.2x=%#.2x\n", reg, ret);
+}
+
+/* show all raw values of chip register, format per line: 'register=value' */
+static ssize_t bq2415x_sysfs_show_registers(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ ssize_t ret = 0;
+
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_STATUS, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_CONTROL, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_VOLTAGE, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_VENDER, buf+ret);
+ ret += bq2415x_sysfs_print_reg(bq, BQ2415X_REG_CURRENT, buf+ret);
+ return ret;
+}
+
+/* set current and voltage limit entries (in mA or mV) */
+static ssize_t bq2415x_sysfs_set_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ long val;
+ int ret;
+
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "current_limit") == 0)
+ ret = bq2415x_set_current_limit(bq, val);
+ else if (strcmp(attr->attr.name, "weak_battery_voltage") == 0)
+ ret = bq2415x_set_weak_battery_voltage(bq, val);
+ else if (strcmp(attr->attr.name, "battery_regulation_voltage") == 0)
+ ret = bq2415x_set_battery_regulation_voltage(bq, val);
+ else if (strcmp(attr->attr.name, "charge_current") == 0)
+ ret = bq2415x_set_charge_current(bq, val);
+ else if (strcmp(attr->attr.name, "termination_current") == 0)
+ ret = bq2415x_set_termination_current(bq, val);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show current and voltage limit entries (in mA or mV) */
+static ssize_t bq2415x_sysfs_show_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ int ret;
+
+ if (strcmp(attr->attr.name, "current_limit") == 0)
+ ret = bq2415x_get_current_limit(bq);
+ else if (strcmp(attr->attr.name, "weak_battery_voltage") == 0)
+ ret = bq2415x_get_weak_battery_voltage(bq);
+ else if (strcmp(attr->attr.name, "battery_regulation_voltage") == 0)
+ ret = bq2415x_get_battery_regulation_voltage(bq);
+ else if (strcmp(attr->attr.name, "charge_current") == 0)
+ ret = bq2415x_get_charge_current(bq);
+ else if (strcmp(attr->attr.name, "termination_current") == 0)
+ ret = bq2415x_get_termination_current(bq);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+/* set *_enable entries */
+static ssize_t bq2415x_sysfs_set_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_command command;
+ long val;
+ int ret;
+
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ if (strcmp(attr->attr.name, "charge_termination_enable") == 0)
+ command = val ? BQ2415X_CHARGE_TERMINATION_ENABLE :
+ BQ2415X_CHARGE_TERMINATION_DISABLE;
+ else if (strcmp(attr->attr.name, "high_impedance_enable") == 0)
+ command = val ? BQ2415X_HIGH_IMPEDANCE_ENABLE :
+ BQ2415X_HIGH_IMPEDANCE_DISABLE;
+ else if (strcmp(attr->attr.name, "otg_pin_enable") == 0)
+ command = val ? BQ2415X_OTG_PIN_ENABLE :
+ BQ2415X_OTG_PIN_DISABLE;
+ else if (strcmp(attr->attr.name, "stat_pin_enable") == 0)
+ command = val ? BQ2415X_STAT_PIN_ENABLE :
+ BQ2415X_STAT_PIN_DISABLE;
+ else
+ return -EINVAL;
+
+ ret = bq2415x_exec_command(bq, command);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+/* show *_enable entries */
+static ssize_t bq2415x_sysfs_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq2415x_device *bq = container_of(psy, struct bq2415x_device,
+ charger);
+ enum bq2415x_command command;
+ int ret;
+
+ if (strcmp(attr->attr.name, "charge_termination_enable") == 0)
+ command = BQ2415X_CHARGE_TERMINATION_STATUS;
+ else if (strcmp(attr->attr.name, "high_impedance_enable") == 0)
+ command = BQ2415X_HIGH_IMPEDANCE_STATUS;
+ else if (strcmp(attr->attr.name, "otg_pin_enable") == 0)
+ command = BQ2415X_OTG_PIN_STATUS;
+ else if (strcmp(attr->attr.name, "stat_pin_enable") == 0)
+ command = BQ2415X_STAT_PIN_STATUS;
+ else
+ return -EINVAL;
+
+ ret = bq2415x_exec_command(bq, command);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+static DEVICE_ATTR(current_limit, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(weak_battery_voltage, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(battery_regulation_voltage, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(charge_current, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+static DEVICE_ATTR(termination_current, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_limit, bq2415x_sysfs_set_limit);
+
+static DEVICE_ATTR(charge_termination_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+static DEVICE_ATTR(high_impedance_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+static DEVICE_ATTR(otg_pin_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+static DEVICE_ATTR(stat_pin_enable, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_enable, bq2415x_sysfs_set_enable);
+
+static DEVICE_ATTR(reported_mode, S_IRUGO,
+ bq2415x_sysfs_show_reported_mode, NULL);
+static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_mode, bq2415x_sysfs_set_mode);
+static DEVICE_ATTR(timer, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_timer, bq2415x_sysfs_set_timer);
+
+static DEVICE_ATTR(registers, S_IWUSR | S_IRUGO,
+ bq2415x_sysfs_show_registers, bq2415x_sysfs_set_registers);
+
+static DEVICE_ATTR(otg_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+static DEVICE_ATTR(charge_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+static DEVICE_ATTR(boost_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+static DEVICE_ATTR(fault_status, S_IRUGO, bq2415x_sysfs_show_status, NULL);
+
+static struct attribute *bq2415x_sysfs_attributes[] = {
+ /*
+ * TODO: some (appropriate) of these attrs should be switched to
+ * use power supply class props.
+ */
+ &dev_attr_current_limit.attr,
+ &dev_attr_weak_battery_voltage.attr,
+ &dev_attr_battery_regulation_voltage.attr,
+ &dev_attr_charge_current.attr,
+ &dev_attr_termination_current.attr,
+
+ &dev_attr_charge_termination_enable.attr,
+ &dev_attr_high_impedance_enable.attr,
+ &dev_attr_otg_pin_enable.attr,
+ &dev_attr_stat_pin_enable.attr,
+
+ &dev_attr_reported_mode.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_timer.attr,
+
+ &dev_attr_registers.attr,
+
+ &dev_attr_otg_status.attr,
+ &dev_attr_charge_status.attr,
+ &dev_attr_boost_status.attr,
+ &dev_attr_fault_status.attr,
+ NULL,
+};
+
+static const struct attribute_group bq2415x_sysfs_attr_group = {
+ .attrs = bq2415x_sysfs_attributes,
+};
+
+static int bq2415x_sysfs_init(struct bq2415x_device *bq)
+{
+ return sysfs_create_group(&bq->charger.dev->kobj,
+ &bq2415x_sysfs_attr_group);
+}
+
+static void bq2415x_sysfs_exit(struct bq2415x_device *bq)
+{
+ sysfs_remove_group(&bq->charger.dev->kobj, &bq2415x_sysfs_attr_group);
+}
+
+/* main bq2415x probe function */
+static int bq2415x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ int num;
+ char *name;
+ struct bq2415x_device *bq;
+
+ if (!client->dev.platform_data) {
+ dev_err(&client->dev, "platform data not set\n");
+ return -ENODEV;
+ }
+
+ /* Get new ID for the new device */
+ ret = idr_pre_get(&bq2415x_id, GFP_KERNEL);
+ if (ret == 0)
+ return -ENOMEM;
+
+ mutex_lock(&bq2415x_id_mutex);
+ ret = idr_get_new(&bq2415x_id, client, &num);
+ mutex_unlock(&bq2415x_id_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
+ if (!name) {
+ dev_err(&client->dev, "failed to allocate device name\n");
+ ret = -ENOMEM;
+ goto error_1;
+ }
+
+ bq = kzalloc(sizeof(*bq), GFP_KERNEL);
+ if (!bq) {
+ dev_err(&client->dev, "failed to allocate device data\n");
+ ret = -ENOMEM;
+ goto error_2;
+ }
+
+ i2c_set_clientdata(client, bq);
+
+ bq->id = num;
+ bq->dev = &client->dev;
+ bq->chip = id->driver_data;
+ bq->name = name;
+ bq->mode = BQ2415X_MODE_NONE;
+ bq->reported_mode = BQ2415X_MODE_NONE;
+ bq->autotimer = 0;
+ bq->automode = 0;
+
+ memcpy(&bq->init_data, client->dev.platform_data,
+ sizeof(bq->init_data));
+
+ bq2415x_reset_chip(bq);
+
+ ret = bq2415x_power_supply_init(bq);
+ if (ret) {
+ dev_err(bq->dev, "failed to register power supply: %d\n", ret);
+ goto error_3;
+ }
+
+ ret = bq2415x_sysfs_init(bq);
+ if (ret) {
+ dev_err(bq->dev, "failed to create sysfs entries: %d\n", ret);
+ goto error_4;
+ }
+
+ ret = bq2415x_set_defaults(bq);
+ if (ret) {
+ dev_err(bq->dev, "failed to set default values: %d\n", ret);
+ goto error_5;
+ }
+
+ if (bq->init_data.set_mode_hook) {
+ if (bq->init_data.set_mode_hook(
+ bq2415x_hook_function, bq)) {
+ bq->automode = 1;
+ bq2415x_set_mode(bq, bq->reported_mode);
+ dev_info(bq->dev, "automode enabled\n");
+ } else {
+ bq->automode = -1;
+ dev_info(bq->dev, "automode failed\n");
+ }
+ } else {
+ bq->automode = -1;
+ dev_info(bq->dev, "automode not supported\n");
+ }
+
+ INIT_DELAYED_WORK(&bq->work, bq2415x_timer_work);
+ bq2415x_set_autotimer(bq, 1);
+
+ dev_info(bq->dev, "driver registered\n");
+ return 0;
+
+error_5:
+ bq2415x_sysfs_exit(bq);
+error_4:
+ bq2415x_power_supply_exit(bq);
+error_3:
+ kfree(bq);
+error_2:
+ kfree(name);
+error_1:
+ mutex_lock(&bq2415x_id_mutex);
+ idr_remove(&bq2415x_id, num);
+ mutex_unlock(&bq2415x_id_mutex);
+
+ return ret;
+}
+
+/* main bq2415x remove function */
+
+static int bq2415x_remove(struct i2c_client *client)
+{
+ struct bq2415x_device *bq = i2c_get_clientdata(client);
+
+ if (bq->init_data.set_mode_hook)
+ bq->init_data.set_mode_hook(NULL, NULL);
+
+ bq2415x_sysfs_exit(bq);
+ bq2415x_power_supply_exit(bq);
+
+ bq2415x_reset_chip(bq);
+
+ mutex_lock(&bq2415x_id_mutex);
+ idr_remove(&bq2415x_id, bq->id);
+ mutex_unlock(&bq2415x_id_mutex);
+
+ dev_info(bq->dev, "driver unregistered\n");
+
+ kfree(bq->name);
+ kfree(bq);
+
+ return 0;
+}
+
+static const struct i2c_device_id bq2415x_i2c_id_table[] = {
+ { "bq2415x", BQUNKNOWN },
+ { "bq24150", BQ24150 },
+ { "bq24150a", BQ24150A },
+ { "bq24151", BQ24151 },
+ { "bq24151a", BQ24151A },
+ { "bq24152", BQ24152 },
+ { "bq24153", BQ24153 },
+ { "bq24153a", BQ24153A },
+ { "bq24155", BQ24155 },
+ { "bq24156", BQ24156 },
+ { "bq24156a", BQ24156A },
+ { "bq24158", BQ24158 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, bq2415x_i2c_id_table);
+
+static struct i2c_driver bq2415x_driver = {
+ .driver = {
+ .name = "bq2415x-charger",
+ },
+ .probe = bq2415x_probe,
+ .remove = bq2415x_remove,
+ .id_table = bq2415x_i2c_id_table,
+};
+
+static int __init bq2415x_init(void)
+{
+ return i2c_add_driver(&bq2415x_driver);
+}
+module_init(bq2415x_init);
+
+static void __exit bq2415x_exit(void)
+{
+ i2c_del_driver(&bq2415x_driver);
+}
+module_exit(bq2415x_exit);
+
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_DESCRIPTION("bq2415x charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index e0edaf7de54..36b34efdafc 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -230,6 +230,14 @@ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
*/
static inline int bq27x00_battery_read_nac(struct bq27x00_device_info *di)
{
+ int flags;
+ bool is_bq27500 = di->chip == BQ27500;
+ bool is_higher = bq27xxx_is_chip_version_higher(di);
+
+ flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500);
+ if (flags >= 0 && !is_higher && (flags & BQ27000_FLAG_CI))
+ return -ENODATA;
+
return bq27x00_battery_read_charge(di, BQ27x00_REG_NAC);
}
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index adb3a4b59cb..6ba047f5ac2 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -239,44 +239,37 @@ static bool is_full_charged(struct charger_manager *cm)
int uV;
/* If there is no battery, it cannot be charged */
- if (!is_batt_present(cm)) {
- val.intval = 0;
- goto out;
- }
+ if (!is_batt_present(cm))
+ return false;
if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) {
+ val.intval = 0;
+
/* Not full if capacity of fuel gauge isn't full */
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
POWER_SUPPLY_PROP_CHARGE_FULL, &val);
- if (!ret && val.intval > desc->fullbatt_full_capacity) {
- val.intval = 1;
- goto out;
- }
+ if (!ret && val.intval > desc->fullbatt_full_capacity)
+ return true;
}
/* Full, if it's over the fullbatt voltage */
if (desc->fullbatt_uV > 0) {
ret = get_batt_uV(cm, &uV);
- if (!ret && uV >= desc->fullbatt_uV) {
- val.intval = 1;
- goto out;
- }
+ if (!ret && uV >= desc->fullbatt_uV)
+ return true;
}
/* Full, if the capacity is more than fullbatt_soc */
if (cm->fuel_gauge && desc->fullbatt_soc > 0) {
+ val.intval = 0;
+
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
POWER_SUPPLY_PROP_CAPACITY, &val);
- if (!ret && val.intval >= desc->fullbatt_soc) {
- val.intval = 1;
- goto out;
- }
+ if (!ret && val.intval >= desc->fullbatt_soc)
+ return true;
}
- val.intval = 0;
-
-out:
- return val.intval ? true : false;
+ return false;
}
/**
@@ -489,8 +482,9 @@ static void fullbatt_vchk(struct work_struct *work)
return;
}
- diff = desc->fullbatt_uV;
- diff -= batt_uV;
+ diff = desc->fullbatt_uV - batt_uV;
+ if (diff < 0)
+ return;
dev_info(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index bb0df8917ad..3c5c2e459d7 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -440,8 +440,10 @@ static int da9052_bat_check_health(struct da9052_battery *bat, int *health)
static irqreturn_t da9052_bat_irq(int irq, void *data)
{
struct da9052_battery *bat = data;
+ int virq;
- irq -= bat->da9052->irq_base;
+ virq = regmap_irq_get_virq(bat->da9052->irq_data, irq);
+ irq -= virq;
if (irq == DA9052_IRQ_CHGEND)
bat->status = POWER_SUPPLY_STATUS_FULL;
@@ -567,7 +569,7 @@ static struct power_supply template_battery = {
.get_property = da9052_bat_get_property,
};
-static const char *const da9052_bat_irqs[] = {
+static char *da9052_bat_irqs[] = {
"BATT TEMP",
"DCIN DET",
"DCIN REM",
@@ -576,12 +578,20 @@ static const char *const da9052_bat_irqs[] = {
"CHG END",
};
+static int da9052_bat_irq_bits[] = {
+ DA9052_IRQ_TBAT,
+ DA9052_IRQ_DCIN,
+ DA9052_IRQ_DCINREM,
+ DA9052_IRQ_VBUS,
+ DA9052_IRQ_VBUSREM,
+ DA9052_IRQ_CHGEND,
+};
+
static s32 da9052_bat_probe(struct platform_device *pdev)
{
struct da9052_pdata *pdata;
struct da9052_battery *bat;
int ret;
- int irq;
int i;
bat = kzalloc(sizeof(struct da9052_battery), GFP_KERNEL);
@@ -602,15 +612,14 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
bat->psy.use_for_apm = 1;
for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
- ret = request_threaded_irq(bat->da9052->irq_base + irq,
- NULL, da9052_bat_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- da9052_bat_irqs[i], bat);
+ ret = da9052_request_irq(bat->da9052,
+ da9052_bat_irq_bits[i], da9052_bat_irqs[i],
+ da9052_bat_irq, bat);
+
if (ret != 0) {
dev_err(bat->da9052->dev,
- "DA9052 failed to request %s IRQ %d: %d\n",
- da9052_bat_irqs[i], irq, ret);
+ "DA9052 failed to request %s IRQ: %d\n",
+ da9052_bat_irqs[i], ret);
goto err;
}
}
@@ -623,23 +632,20 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
return 0;
err:
- while (--i >= 0) {
- irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
- free_irq(bat->da9052->irq_base + irq, bat);
- }
+ while (--i >= 0)
+ da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
+
kfree(bat);
return ret;
}
static int da9052_bat_remove(struct platform_device *pdev)
{
int i;
- int irq;
struct da9052_battery *bat = platform_get_drvdata(pdev);
- for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
- free_irq(bat->da9052->irq_base + irq, bat);
- }
+ for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++)
+ da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
+
power_supply_unregister(&bat->psy);
kfree(bat);
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 6bb6e2f5ea8..2fa9b6bf1f3 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -80,13 +80,13 @@ static inline int ds278x_read_reg16(struct ds278x_info *info, int reg_msb,
{
int ret;
- ret = swab16(i2c_smbus_read_word_data(info->client, reg_msb));
+ ret = i2c_smbus_read_word_data(info->client, reg_msb);
if (ret < 0) {
dev_err(&info->client->dev, "register read failed\n");
return ret;
}
- *val = ret;
+ *val = swab16(ret);
return 0;
}
diff --git a/drivers/power/generic-adc-battery.c b/drivers/power/generic-adc-battery.c
index e902b088d52..32ce17e235c 100644
--- a/drivers/power/generic-adc-battery.c
+++ b/drivers/power/generic-adc-battery.c
@@ -279,7 +279,8 @@ static int gab_probe(struct platform_device *pdev)
}
memcpy(psy->properties, gab_props, sizeof(gab_props));
- properties = psy->properties + sizeof(gab_props);
+ properties = (enum power_supply_property *)
+ ((char *)psy->properties + sizeof(gab_props));
/*
* getting channel from iio and copying the battery properties
@@ -327,7 +328,7 @@ static int gab_probe(struct platform_device *pdev)
ret = request_any_context_irq(irq, gab_charged,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"battery charged", adc_bat);
- if (ret)
+ if (ret < 0)
goto err_gpio;
}
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 74ac69e0687..bf914893c6f 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -33,7 +33,6 @@ struct jz_battery {
struct jz_battery_platform_data *pdata;
struct platform_device *pdev;
- struct resource *mem;
void __iomem *base;
int irq;
@@ -244,13 +243,14 @@ static int jz_battery_probe(struct platform_device *pdev)
struct jz_battery_platform_data *pdata = pdev->dev.parent->platform_data;
struct jz_battery *jz_battery;
struct power_supply *battery;
+ struct resource *mem;
if (!pdata) {
dev_err(&pdev->dev, "No platform_data supplied\n");
return -ENXIO;
}
- jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL);
+ jz_battery = devm_kzalloc(&pdev->dev, sizeof(*jz_battery), GFP_KERNEL);
if (!jz_battery) {
dev_err(&pdev->dev, "Failed to allocate driver structure\n");
return -ENOMEM;
@@ -260,33 +260,15 @@ static int jz_battery_probe(struct platform_device *pdev)
jz_battery->irq = platform_get_irq(pdev, 0);
if (jz_battery->irq < 0) {
- ret = jz_battery->irq;
dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
- goto err_free;
+ return jz_battery->irq;
}
- jz_battery->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!jz_battery->mem) {
- ret = -ENOENT;
- dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
- goto err_free;
- }
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jz_battery->mem = request_mem_region(jz_battery->mem->start,
- resource_size(jz_battery->mem), pdev->name);
- if (!jz_battery->mem) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- goto err_free;
- }
-
- jz_battery->base = ioremap_nocache(jz_battery->mem->start,
- resource_size(jz_battery->mem));
- if (!jz_battery->base) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
- goto err_release_mem_region;
- }
+ jz_battery->base = devm_request_and_ioremap(&pdev->dev, mem);
+ if (!jz_battery->base)
+ return -EBUSY;
battery = &jz_battery->battery;
battery->name = pdata->info.name;
@@ -309,7 +291,7 @@ static int jz_battery_probe(struct platform_device *pdev)
jz_battery);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %d\n", ret);
- goto err_iounmap;
+ goto err;
}
disable_irq(jz_battery->irq);
@@ -366,13 +348,8 @@ err_free_gpio:
gpio_free(jz_battery->pdata->gpio_charge);
err_free_irq:
free_irq(jz_battery->irq, jz_battery);
-err_iounmap:
+err:
platform_set_drvdata(pdev, NULL);
- iounmap(jz_battery->base);
-err_release_mem_region:
- release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
-err_free:
- kfree(jz_battery);
return ret;
}
@@ -392,10 +369,6 @@ static int jz_battery_remove(struct platform_device *pdev)
free_irq(jz_battery->irq, jz_battery);
- iounmap(jz_battery->base);
- release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem));
- kfree(jz_battery);
-
return 0;
}
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index a1c51ac117f..22b6407c9ca 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -235,25 +235,14 @@ static int lp8788_get_battery_present(struct lp8788_charger *pchg,
return 0;
}
-static int lp8788_get_vbatt_adc(struct lp8788_charger *pchg,
- unsigned int *result)
+static int lp8788_get_vbatt_adc(struct lp8788_charger *pchg, int *result)
{
struct iio_channel *channel = pchg->chan[LP8788_VBATT];
- int scaleint;
- int scalepart;
- int ret;
if (!channel)
return -EINVAL;
- ret = iio_read_channel_scale(channel, &scaleint, &scalepart);
- if (ret != IIO_VAL_INT_PLUS_MICRO)
- return -EINVAL;
-
- /* unit: mV */
- *result = (scaleint + scalepart * 1000000) / 1000;
-
- return 0;
+ return iio_read_channel_processed(channel, result);
}
static int lp8788_get_battery_voltage(struct lp8788_charger *pchg,
@@ -268,7 +257,7 @@ static int lp8788_get_battery_capacity(struct lp8788_charger *pchg,
struct lp8788 *lp = pchg->lp;
struct lp8788_charger_platform_data *pdata = pchg->pdata;
unsigned int max_vbatt;
- unsigned int vbatt;
+ int vbatt;
enum lp8788_charging_state state;
u8 data;
int ret;
@@ -304,19 +293,18 @@ static int lp8788_get_battery_temperature(struct lp8788_charger *pchg,
union power_supply_propval *val)
{
struct iio_channel *channel = pchg->chan[LP8788_BATT_TEMP];
- int scaleint;
- int scalepart;
+ int result;
int ret;
if (!channel)
return -EINVAL;
- ret = iio_read_channel_scale(channel, &scaleint, &scalepart);
- if (ret != IIO_VAL_INT_PLUS_MICRO)
+ ret = iio_read_channel_processed(channel, &result);
+ if (ret < 0)
return -EINVAL;
/* unit: 0.1 'C */
- val->intval = (scaleint + scalepart * 1000000) / 100;
+ val->intval = result * 10;
return 0;
}
@@ -592,53 +580,22 @@ static void lp8788_irq_unregister(struct platform_device *pdev,
}
}
-static void lp8788_setup_adc_channel(struct lp8788_charger *pchg)
+static void lp8788_setup_adc_channel(const char *consumer_name,
+ struct lp8788_charger *pchg)
{
struct lp8788_charger_platform_data *pdata = pchg->pdata;
- struct device *dev = pchg->lp->dev;
struct iio_channel *chan;
- enum lp8788_adc_id id;
- const char *chan_name[LPADC_MAX] = {
- [LPADC_VBATT_5P5] = "vbatt-5p5",
- [LPADC_VBATT_6P0] = "vbatt-6p0",
- [LPADC_VBATT_5P0] = "vbatt-5p0",
- [LPADC_ADC1] = "adc1",
- [LPADC_ADC2] = "adc2",
- [LPADC_ADC3] = "adc3",
- [LPADC_ADC4] = "adc4",
- };
if (!pdata)
return;
- id = pdata->vbatt_adc;
- switch (id) {
- case LPADC_VBATT_5P5:
- case LPADC_VBATT_6P0:
- case LPADC_VBATT_5P0:
- chan = iio_channel_get(NULL, chan_name[id]);
- pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan;
- break;
- default:
- dev_err(dev, "invalid ADC id for VBATT: %d\n", id);
- pchg->chan[LP8788_VBATT] = NULL;
- break;
- }
+ /* ADC channel for battery voltage */
+ chan = iio_channel_get(consumer_name, pdata->adc_vbatt);
+ pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan;
- id = pdata->batt_temp_adc;
- switch (id) {
- case LPADC_ADC1:
- case LPADC_ADC2:
- case LPADC_ADC3:
- case LPADC_ADC4:
- chan = iio_channel_get(NULL, chan_name[id]);
- pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan;
- break;
- default:
- dev_err(dev, "invalid ADC id for BATT_TEMP : %d\n", id);
- pchg->chan[LP8788_BATT_TEMP] = NULL;
- break;
- }
+ /* ADC channel for battery temperature */
+ chan = iio_channel_get(consumer_name, pdata->adc_batt_temp);
+ pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan;
}
static void lp8788_release_adc_channel(struct lp8788_charger *pchg)
@@ -747,7 +704,7 @@ static int lp8788_charger_probe(struct platform_device *pdev)
if (ret)
return ret;
- lp8788_setup_adc_channel(pchg);
+ lp8788_setup_adc_channel(pdev->name, pchg);
ret = lp8788_psy_register(pdev, pchg);
if (ret)
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 5ffe46916f0..d664ef58afa 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -572,7 +572,8 @@ static int max17042_init_chip(struct max17042_chip *chip)
__func__);
return -EIO;
}
- max17042_verify_model_lock(chip);
+
+ ret = max17042_verify_model_lock(chip);
if (ret) {
dev_err(&chip->client->dev, "%s lock verify failed\n",
__func__);
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 1a075f1f1b6..665cdc76c26 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -426,6 +427,54 @@ static int max8925_deinit_charger(struct max8925_power_info *info)
return 0;
}
+#ifdef CONFIG_OF
+static struct max8925_power_pdata *
+max8925_power_dt_init(struct platform_device *pdev)
+{
+ struct device_node *nproot = pdev->dev.parent->of_node;
+ struct device_node *np;
+ int batt_detect;
+ int topoff_threshold;
+ int fast_charge;
+ int no_temp_support;
+ int no_insert_detect;
+ struct max8925_power_pdata *pdata;
+
+ if (!nproot)
+ return pdev->dev.platform_data;
+
+ np = of_find_node_by_name(nproot, "charger");
+ if (!np) {
+ dev_err(&pdev->dev, "failed to find charger node\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct max8925_power_pdata),
+ GFP_KERNEL);
+
+ of_property_read_u32(np, "topoff-threshold", &topoff_threshold);
+ of_property_read_u32(np, "batt-detect", &batt_detect);
+ of_property_read_u32(np, "fast-charge", &fast_charge);
+ of_property_read_u32(np, "no-insert-detect", &no_insert_detect);
+ of_property_read_u32(np, "no-temp-support", &no_temp_support);
+
+ pdata->batt_detect = batt_detect;
+ pdata->fast_charge = fast_charge;
+ pdata->topoff_threshold = topoff_threshold;
+ pdata->no_insert_detect = no_insert_detect;
+ pdata->no_temp_support = no_temp_support;
+
+ return pdata;
+}
+#else
+static struct max8925_power_pdata *
+max8925_power_dt_init(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+#endif
+
static int max8925_power_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -433,7 +482,7 @@ static int max8925_power_probe(struct platform_device *pdev)
struct max8925_power_info *info;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = max8925_power_dt_init(pdev);
if (!pdata) {
dev_err(&pdev->dev, "platform data isn't assigned to "
"power supply\n");
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index f77a41272e5..8a7cfb3cc16 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -216,6 +216,86 @@ static void psy_unregister_thermal(struct power_supply *psy)
return;
thermal_zone_device_unregister(psy->tzd);
}
+
+/* thermal cooling device callbacks */
+static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
+ unsigned long *state)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = tcd->devdata;
+ ret = psy->get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+ if (!ret)
+ *state = val.intval;
+
+ return ret;
+}
+
+static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
+ unsigned long *state)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = tcd->devdata;
+ ret = psy->get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ if (!ret)
+ *state = val.intval;
+
+ return ret;
+}
+
+static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
+ unsigned long state)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ psy = tcd->devdata;
+ val.intval = state;
+ ret = psy->set_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+
+ return ret;
+}
+
+static struct thermal_cooling_device_ops psy_tcd_ops = {
+ .get_max_state = ps_get_max_charge_cntl_limit,
+ .get_cur_state = ps_get_cur_chrage_cntl_limit,
+ .set_cur_state = ps_set_cur_charge_cntl_limit,
+};
+
+static int psy_register_cooler(struct power_supply *psy)
+{
+ int i;
+
+ /* Register for cooling device if psy can control charging */
+ for (i = 0; i < psy->num_properties; i++) {
+ if (psy->properties[i] ==
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) {
+ psy->tcd = thermal_cooling_device_register(
+ (char *)psy->name,
+ psy, &psy_tcd_ops);
+ if (IS_ERR(psy->tcd))
+ return PTR_ERR(psy->tcd);
+ break;
+ }
+ }
+ return 0;
+}
+
+static void psy_unregister_cooler(struct power_supply *psy)
+{
+ if (IS_ERR_OR_NULL(psy->tcd))
+ return;
+ thermal_cooling_device_unregister(psy->tcd);
+}
#else
static int psy_register_thermal(struct power_supply *psy)
{
@@ -225,6 +305,15 @@ static int psy_register_thermal(struct power_supply *psy)
static void psy_unregister_thermal(struct power_supply *psy)
{
}
+
+static int psy_register_cooler(struct power_supply *psy)
+{
+ return 0;
+}
+
+static void psy_unregister_cooler(struct power_supply *psy)
+{
+}
#endif
int power_supply_register(struct device *parent, struct power_supply *psy)
@@ -259,6 +348,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto register_thermal_failed;
+ rc = psy_register_cooler(psy);
+ if (rc)
+ goto register_cooler_failed;
+
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
@@ -268,6 +361,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto success;
create_triggers_failed:
+ psy_unregister_cooler(psy);
+register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
device_del(dev);
@@ -284,6 +379,7 @@ void power_supply_unregister(struct power_supply *psy)
cancel_work_sync(&psy->changed_work);
sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
+ psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
device_unregister(psy->dev);
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 395c2cfa16c..40fa3b7cae5 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -164,6 +164,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(constant_charge_current_max),
POWER_SUPPLY_ATTR(constant_charge_voltage),
POWER_SUPPLY_ATTR(constant_charge_voltage_max),
+ POWER_SUPPLY_ATTR(charge_control_limit),
+ POWER_SUPPLY_ATTR(charge_control_limit_max),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
POWER_SUPPLY_ATTR(energy_full),
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
new file mode 100644
index 00000000000..6461b489fb0
--- /dev/null
+++ b/drivers/power/reset/Kconfig
@@ -0,0 +1,15 @@
+menuconfig POWER_RESET
+ bool "Board level reset or power off"
+ help
+ Provides a number of drivers which either reset a complete board
+ or shut it down, by manipulating the main power supply on the board.
+
+ Say Y here to enable board reset and power off
+
+config POWER_RESET_GPIO
+ bool "GPIO power-off driver"
+ depends on OF_GPIO && POWER_RESET
+ help
+ This driver supports turning off your board via a GPIO line.
+ If your board needs a GPIO high/low to power down, say Y and
+ create a binding in your devicetree.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
new file mode 100644
index 00000000000..751488a4a0c
--- /dev/null
+++ b/drivers/power/reset/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
new file mode 100644
index 00000000000..0491e5335d0
--- /dev/null
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -0,0 +1,129 @@
+/*
+ * Toggles a GPIO pin to power down a device
+ *
+ * Jamie Lentin <jm@lentin.co.uk>
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * Copyright (C) 2012 Jamie Lentin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/module.h>
+
+/*
+ * Hold configuration here, cannot be more than one instance of the driver
+ * since pm_power_off itself is global.
+ */
+static int gpio_num = -1;
+static int gpio_active_low;
+
+static void gpio_poweroff_do_poweroff(void)
+{
+ BUG_ON(gpio_num == -1);
+
+ /* drive it active */
+ gpio_direction_output(gpio_num, !gpio_active_low);
+ mdelay(100);
+ /* rising edge or drive inactive */
+ gpio_set_value(gpio_num, gpio_active_low);
+ mdelay(100);
+ /* falling edge */
+ gpio_set_value(gpio_num, !gpio_active_low);
+
+ /* give it some time */
+ mdelay(3000);
+
+ WARN_ON(1);
+}
+
+static int __devinit gpio_poweroff_probe(struct platform_device *pdev)
+{
+ enum of_gpio_flags flags;
+ bool input = false;
+ int ret;
+
+ /* If a pm_power_off function has already been added, leave it alone */
+ if (pm_power_off != NULL) {
+ pr_err("%s: pm_power_off function already registered",
+ __func__);
+ return -EBUSY;
+ }
+
+ gpio_num = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+ if (gpio_num < 0) {
+ pr_err("%s: Could not get GPIO configuration: %d",
+ __func__, gpio_num);
+ return -ENODEV;
+ }
+ gpio_active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+ if (of_get_property(pdev->dev.of_node, "input", NULL))
+ input = true;
+
+ ret = gpio_request(gpio_num, "poweroff-gpio");
+ if (ret) {
+ pr_err("%s: Could not get GPIO %d", __func__, gpio_num);
+ return ret;
+ }
+ if (input) {
+ if (gpio_direction_input(gpio_num)) {
+ pr_err("Could not set direction of GPIO %d to input",
+ gpio_num);
+ goto err;
+ }
+ } else {
+ if (gpio_direction_output(gpio_num, gpio_active_low)) {
+ pr_err("Could not set direction of GPIO %d", gpio_num);
+ goto err;
+ }
+ }
+
+ pm_power_off = &gpio_poweroff_do_poweroff;
+ return 0;
+
+err:
+ gpio_free(gpio_num);
+ return -ENODEV;
+}
+
+static int __devexit gpio_poweroff_remove(struct platform_device *pdev)
+{
+ if (gpio_num != -1)
+ gpio_free(gpio_num);
+ if (pm_power_off == &gpio_poweroff_do_poweroff)
+ pm_power_off = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id of_gpio_poweroff_match[] = {
+ { .compatible = "gpio-poweroff", },
+ {},
+};
+
+static struct platform_driver gpio_poweroff_driver = {
+ .probe = gpio_poweroff_probe,
+ .remove = __devexit_p(gpio_poweroff_remove),
+ .driver = {
+ .name = "poweroff-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_gpio_poweroff_match,
+ },
+};
+
+module_platform_driver(gpio_poweroff_driver);
+
+MODULE_AUTHOR("Jamie Lentin <jm@lentin.co.uk>");
+MODULE_DESCRIPTION("GPIO poweroff driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:poweroff-gpio");
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
new file mode 100644
index 00000000000..ca49d6c0ee9
--- /dev/null
+++ b/drivers/power/rx51_battery.c
@@ -0,0 +1,251 @@
+/*
+ * Nokia RX-51 battery driver
+ *
+ * Copyright (C) 2012 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/i2c/twl4030-madc.h>
+
+struct rx51_device_info {
+ struct device *dev;
+ struct power_supply bat;
+};
+
+/*
+ * Read ADCIN channel value, code copied from maemo kernel
+ */
+static int rx51_battery_read_adc(int channel)
+{
+ struct twl4030_madc_request req;
+
+ req.channels = 1 << channel;
+ req.do_avg = 1;
+ req.method = TWL4030_MADC_SW1;
+ req.func_cb = NULL;
+ req.type = TWL4030_MADC_WAIT;
+
+ if (twl4030_madc_conversion(&req) <= 0)
+ return -ENODATA;
+
+ return req.rbuf[channel];
+}
+
+/*
+ * Read ADCIN channel 12 (voltage) and convert RAW value to micro voltage
+ * This conversion formula was extracted from maemo program bsi-read
+ */
+static int rx51_battery_read_voltage(struct rx51_device_info *di)
+{
+ int voltage = rx51_battery_read_adc(12);
+
+ if (voltage < 0)
+ return voltage;
+
+ return 1000 * (10000 * voltage / 1705);
+}
+
+/*
+ * Temperature look-up tables
+ * TEMP = (1/(t1 + 1/298) - 273.15)
+ * Where t1 = (1/B) * ln((RAW_ADC_U * 2.5)/(R * I * 255))
+ * Formula is based on experimental data, RX-51 CAL data, maemo program bme
+ * and formula from da9052 driver with values R = 100, B = 3380, I = 0.00671
+ */
+
+/*
+ * Table1 (temperature for first 25 RAW values)
+ * Usage: TEMP = rx51_temp_table1[RAW]
+ * RAW is between 1 and 24
+ * TEMP is between 201 C and 55 C
+ */
+static u8 rx51_temp_table1[] = {
+ 255, 201, 159, 138, 124, 114, 106, 99, 94, 89, 85, 82, 78, 75,
+ 73, 70, 68, 66, 64, 62, 61, 59, 57, 56, 55
+};
+
+/*
+ * Table2 (lowest RAW value for temperature)
+ * Usage: RAW = rx51_temp_table2[TEMP-rx51_temp_table2_first]
+ * TEMP is between 53 C and -32 C
+ * RAW is between 25 and 993
+ */
+#define rx51_temp_table2_first 53
+static u16 rx51_temp_table2[] = {
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39,
+ 40, 41, 43, 44, 46, 48, 49, 51, 53, 55, 57, 59, 61, 64,
+ 66, 69, 71, 74, 77, 80, 83, 86, 90, 94, 97, 101, 106, 110,
+ 115, 119, 125, 130, 136, 141, 148, 154, 161, 168, 176, 184, 202, 211,
+ 221, 231, 242, 254, 266, 279, 293, 308, 323, 340, 357, 375, 395, 415,
+ 437, 460, 485, 511, 539, 568, 600, 633, 669, 706, 747, 790, 836, 885,
+ 937, 993, 1024
+};
+
+/*
+ * Read ADCIN channel 0 (battery temp) and convert value to tenths of Celsius
+ * Use Temperature look-up tables for conversation
+ */
+static int rx51_battery_read_temperature(struct rx51_device_info *di)
+{
+ int min = 0;
+ int max = ARRAY_SIZE(rx51_temp_table2) - 1;
+ int raw = rx51_battery_read_adc(0);
+
+ /* Zero and negative values are undefined */
+ if (raw <= 0)
+ return INT_MAX;
+
+ /* ADC channels are 10 bit, higher value are undefined */
+ if (raw >= (1 << 10))
+ return INT_MIN;
+
+ /* First check for temperature in first direct table */
+ if (raw < ARRAY_SIZE(rx51_temp_table1))
+ return rx51_temp_table1[raw] * 100;
+
+ /* Binary search RAW value in second inverse table */
+ while (max - min > 1) {
+ int mid = (max + min) / 2;
+ if (rx51_temp_table2[mid] <= raw)
+ min = mid;
+ else if (rx51_temp_table2[mid] > raw)
+ max = mid;
+ if (rx51_temp_table2[mid] == raw)
+ break;
+ }
+
+ return (rx51_temp_table2_first - min) * 100;
+}
+
+/*
+ * Read ADCIN channel 4 (BSI) and convert RAW value to micro Ah
+ * This conversion formula was extracted from maemo program bsi-read
+ */
+static int rx51_battery_read_capacity(struct rx51_device_info *di)
+{
+ int capacity = rx51_battery_read_adc(4);
+
+ if (capacity < 0)
+ return capacity;
+
+ return 1280 * (1200 * capacity)/(1024 - capacity);
+}
+
+/*
+ * Return power_supply property
+ */
+static int rx51_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rx51_device_info *di = container_of((psy),
+ struct rx51_device_info, bat);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = 4200000;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = rx51_battery_read_voltage(di) ? 1 : 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = rx51_battery_read_voltage(di);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = rx51_battery_read_temperature(di);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = rx51_battery_read_capacity(di);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val->intval == INT_MAX || val->intval == INT_MIN)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum power_supply_property rx51_battery_props[] = {
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+};
+
+static int __devinit rx51_battery_probe(struct platform_device *pdev)
+{
+ struct rx51_device_info *di;
+ int ret;
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, di);
+
+ di->bat.name = dev_name(&pdev->dev);
+ di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->bat.properties = rx51_battery_props;
+ di->bat.num_properties = ARRAY_SIZE(rx51_battery_props);
+ di->bat.get_property = rx51_battery_get_property;
+
+ ret = power_supply_register(di->dev, &di->bat);
+ if (ret) {
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit rx51_battery_remove(struct platform_device *pdev)
+{
+ struct rx51_device_info *di = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&di->bat);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static struct platform_driver rx51_battery_driver = {
+ .probe = rx51_battery_probe,
+ .remove = __devexit_p(rx51_battery_remove),
+ .driver = {
+ .name = "rx51-battery",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(rx51_battery_driver);
+
+MODULE_ALIAS("platform:rx51-battery");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_DESCRIPTION("Nokia RX-51 battery driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index f9e70cf0819..a69d0d11b54 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -114,12 +114,12 @@ static int twl4030_clear_set(u8 mod_no, u8 clear, u8 set, u8 reg)
static int twl4030_bci_read(u8 reg, u8 *val)
{
- return twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, val, reg);
+ return twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, val, reg);
}
static int twl4030_clear_set_boot_bci(u8 clear, u8 set)
{
- return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, clear,
+ return twl4030_clear_set(TWL_MODULE_PM_MASTER, clear,
TWL4030_CONFIG_DONE | TWL4030_BCIAUTOWEN | set,
TWL4030_PM_MASTER_BOOT_BCI);
}
@@ -152,7 +152,7 @@ static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
int ret;
u8 hwsts;
- ret = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &hwsts,
+ ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &hwsts,
TWL4030_PM_MASTER_STS_HW_CONDITIONS);
if (ret < 0)
return 0;
@@ -199,7 +199,7 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
return ret;
/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
- ret = twl4030_clear_set(TWL4030_MODULE_MAIN_CHARGE, 0,
+ ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
} else {
ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
@@ -238,7 +238,7 @@ static int twl4030_charger_enable_backup(int uvolt, int uamp)
if (uvolt < 2500000 ||
uamp < 25) {
/* disable charging of backup battery */
- ret = twl4030_clear_set(TWL4030_MODULE_PM_RECEIVER,
+ ret = twl4030_clear_set(TWL_MODULE_PM_RECEIVER,
TWL4030_BBCHEN, 0, TWL4030_BB_CFG);
return ret;
}
@@ -262,7 +262,7 @@ static int twl4030_charger_enable_backup(int uvolt, int uamp)
else
flags |= TWL4030_BBISEL_25uA;
- ret = twl4030_clear_set(TWL4030_MODULE_PM_RECEIVER,
+ ret = twl4030_clear_set(TWL_MODULE_PM_RECEIVER,
TWL4030_BBSEL_MASK | TWL4030_BBISEL_MASK,
flags,
TWL4030_BB_CFG);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index ed81720e7b2..e513cd99817 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -112,6 +112,17 @@ config PWM_SAMSUNG
To compile this driver as a module, choose M here: the module
will be called pwm-samsung.
+config PWM_SPEAR
+ tristate "STMicroelectronics SPEAr PWM support"
+ depends on PLAT_SPEAR
+ depends on OF
+ help
+ Generic PWM framework driver for the PWM controller on ST
+ SPEAr SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-spear.
+
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
depends on ARCH_TEGRA
@@ -125,6 +136,7 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
depends on SOC_AM33XX
+ select PWM_TIPWMSS
help
PWM driver support for the ECAP APWM controller found on AM33XX
TI SOC
@@ -135,6 +147,7 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
depends on SOC_AM33XX
+ select PWM_TIPWMSS
help
PWM driver support for the EHRPWM controller found on AM33XX
TI SOC
@@ -142,14 +155,32 @@ config PWM_TIEHRPWM
To compile this driver as a module, choose M here: the module
will be called pwm-tiehrpwm.
-config PWM_TWL6030
- tristate "TWL6030 PWM support"
+config PWM_TIPWMSS
+ bool
+ depends on SOC_AM33XX && (PWM_TIEHRPWM || PWM_TIECAP)
+ help
+ PWM Subsystem driver support for AM33xx SOC.
+
+ PWM submodules require PWM config space access from submodule
+ drivers and require common parent driver support.
+
+config PWM_TWL
+ tristate "TWL4030/6030 PWM support"
+ depends on TWL4030_CORE
+ help
+ Generic PWM framework driver for TWL4030/6030.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-twl.
+
+config PWM_TWL_LED
+ tristate "TWL4030/6030 PWM support for LED drivers"
depends on TWL4030_CORE
help
- Generic PWM framework driver for TWL6030.
+ Generic PWM framework driver for TWL4030/6030 LED terminals.
To compile this driver as a module, choose M here: the module
- will be called pwm-twl6030.
+ will be called pwm-twl-led.
config PWM_VT8500
tristate "vt8500 pwm support"
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index acfe4821c58..62a2963cfe5 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -8,8 +8,11 @@ obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
+obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
-obj-$(CONFIG_PWM_TWL6030) += pwm-twl6030.o
+obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
+obj-$(CONFIG_PWM_TWL) += pwm-twl.o
+obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index f5acdaa5270..903138b1884 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -32,6 +32,9 @@
#define MAX_PWMS 1024
+/* flags in the third cell of the DT PWM specifier */
+#define PWM_SPEC_POLARITY (1 << 0)
+
static DEFINE_MUTEX(pwm_lookup_lock);
static LIST_HEAD(pwm_lookup_list);
static DEFINE_MUTEX(pwm_lock);
@@ -129,6 +132,32 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
return 0;
}
+struct pwm_device *
+of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ if (pc->of_pwm_n_cells < 3)
+ return ERR_PTR(-EINVAL);
+
+ if (args->args[0] >= pc->npwm)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm_set_period(pwm, args->args[1]);
+
+ if (args->args[2] & PWM_SPEC_POLARITY)
+ pwm_set_polarity(pwm, PWM_POLARITY_INVERSED);
+ else
+ pwm_set_polarity(pwm, PWM_POLARITY_NORMAL);
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
+
static struct pwm_device *
of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
{
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 8f26e9fcea9..65a86bdeabe 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -235,7 +235,7 @@ static int imx_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_pwm_dt_ids, &pdev->dev);
- struct imx_pwm_data *data;
+ const struct imx_pwm_data *data;
struct imx_chip *imx;
struct resource *r;
int ret = 0;
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 015a8223562..14106440294 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -49,9 +49,24 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
c = 0; /* 0 set division by 256 */
period_cycles = c;
+ /* The duty-cycle value is as follows:
+ *
+ * DUTY-CYCLE HIGH LEVEL
+ * 1 99.9%
+ * 25 90.0%
+ * 128 50.0%
+ * 220 10.0%
+ * 255 0.1%
+ * 0 0.0%
+ *
+ * In other words, the register value is duty-cycle % 256 with
+ * duty-cycle in the range 1-256.
+ */
c = 256 * duty_ns;
do_div(c, period_ns);
- duty_cycles = c;
+ if (c > 255)
+ c = 255;
+ duty_cycles = 256 - c;
writel(PWM_ENABLE | PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles),
lpc32xx->base + (pwm->hwpwm << 2));
@@ -106,6 +121,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.dev = &pdev->dev;
lpc32xx->chip.ops = &lpc32xx_pwm_ops;
lpc32xx->chip.npwm = 2;
+ lpc32xx->chip.base = -1;
ret = pwmchip_add(&lpc32xx->chip);
if (ret < 0) {
@@ -121,8 +137,11 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
static int lpc32xx_pwm_remove(struct platform_device *pdev)
{
struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < lpc32xx->chip.npwm; i++)
+ pwm_disable(&lpc32xx->chip.pwms[i]);
- clk_disable(lpc32xx->clk);
return pwmchip_remove(&lpc32xx->chip);
}
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index e9b15d099c0..5207e6cd864 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -222,6 +222,7 @@ static int s3c_pwm_probe(struct platform_device *pdev)
/* calculate base of control bits in TCON */
s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
+ s3c->pwm_id = id;
s3c->chip.dev = &pdev->dev;
s3c->chip.ops = &s3c_pwm_ops;
s3c->chip.base = -1;
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
new file mode 100644
index 00000000000..83b21d9d5cf
--- /dev/null
+++ b/drivers/pwm/pwm-spear.c
@@ -0,0 +1,276 @@
+/*
+ * ST Microelectronics SPEAr Pulse Width Modulator driver
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define NUM_PWM 4
+
+/* PWM registers and bits definitions */
+#define PWMCR 0x00 /* Control Register */
+#define PWMCR_PWM_ENABLE 0x1
+#define PWMCR_PRESCALE_SHIFT 2
+#define PWMCR_MIN_PRESCALE 0x00
+#define PWMCR_MAX_PRESCALE 0x3FFF
+
+#define PWMDCR 0x04 /* Duty Cycle Register */
+#define PWMDCR_MIN_DUTY 0x0001
+#define PWMDCR_MAX_DUTY 0xFFFF
+
+#define PWMPCR 0x08 /* Period Register */
+#define PWMPCR_MIN_PERIOD 0x0001
+#define PWMPCR_MAX_PERIOD 0xFFFF
+
+/* Following only available on 13xx SoCs */
+#define PWMMCR 0x3C /* Master Control Register */
+#define PWMMCR_PWM_ENABLE 0x1
+
+/**
+ * struct spear_pwm_chip - struct representing pwm chip
+ *
+ * @mmio_base: base address of pwm chip
+ * @clk: pointer to clk structure of pwm chip
+ * @chip: linux pwm chip representation
+ * @dev: pointer to device structure of pwm chip
+ */
+struct spear_pwm_chip {
+ void __iomem *mmio_base;
+ struct clk *clk;
+ struct pwm_chip chip;
+ struct device *dev;
+};
+
+static inline struct spear_pwm_chip *to_spear_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct spear_pwm_chip, chip);
+}
+
+static inline u32 spear_pwm_readl(struct spear_pwm_chip *chip, unsigned int num,
+ unsigned long offset)
+{
+ return readl_relaxed(chip->mmio_base + (num << 4) + offset);
+}
+
+static inline void spear_pwm_writel(struct spear_pwm_chip *chip,
+ unsigned int num, unsigned long offset,
+ unsigned long val)
+{
+ writel_relaxed(val, chip->mmio_base + (num << 4) + offset);
+}
+
+static int spear_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
+ u64 val, div, clk_rate;
+ unsigned long prescale = PWMCR_MIN_PRESCALE, pv, dc;
+ int ret;
+
+ /*
+ * Find pv, dc and prescale to suit duty_ns and period_ns. This is done
+ * according to formulas described below:
+ *
+ * period_ns = 10^9 * (PRESCALE + 1) * PV / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ *
+ * PV = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
+ * DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
+ */
+ clk_rate = clk_get_rate(pc->clk);
+ while (1) {
+ div = 1000000000;
+ div *= 1 + prescale;
+ val = clk_rate * period_ns;
+ pv = div64_u64(val, div);
+ val = clk_rate * duty_ns;
+ dc = div64_u64(val, div);
+
+ /* if duty_ns and period_ns are not achievable then return */
+ if (pv < PWMPCR_MIN_PERIOD || dc < PWMDCR_MIN_DUTY)
+ return -EINVAL;
+
+ /*
+ * if pv and dc have crossed their upper limit, then increase
+ * prescale and recalculate pv and dc.
+ */
+ if (pv > PWMPCR_MAX_PERIOD || dc > PWMDCR_MAX_DUTY) {
+ if (++prescale > PWMCR_MAX_PRESCALE)
+ return -EINVAL;
+ continue;
+ }
+ break;
+ }
+
+ /*
+ * NOTE: the clock to PWM has to be enabled first before writing to the
+ * registers.
+ */
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+
+ spear_pwm_writel(pc, pwm->hwpwm, PWMCR,
+ prescale << PWMCR_PRESCALE_SHIFT);
+ spear_pwm_writel(pc, pwm->hwpwm, PWMDCR, dc);
+ spear_pwm_writel(pc, pwm->hwpwm, PWMPCR, pv);
+ clk_disable(pc->clk);
+
+ return 0;
+}
+
+static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
+ int rc = 0;
+ u32 val;
+
+ rc = clk_enable(pc->clk);
+ if (!rc)
+ return rc;
+
+ val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
+ val |= PWMCR_PWM_ENABLE;
+ spear_pwm_writel(pc, pwm->hwpwm, PWMCR, val);
+
+ return 0;
+}
+
+static void spear_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct spear_pwm_chip *pc = to_spear_pwm_chip(chip);
+ u32 val;
+
+ val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
+ val &= ~PWMCR_PWM_ENABLE;
+ spear_pwm_writel(pc, pwm->hwpwm, PWMCR, val);
+
+ clk_disable(pc->clk);
+}
+
+static const struct pwm_ops spear_pwm_ops = {
+ .config = spear_pwm_config,
+ .enable = spear_pwm_enable,
+ .disable = spear_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int spear_pwm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spear_pwm_chip *pc;
+ struct resource *r;
+ int ret;
+ u32 val;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resources defined\n");
+ return -ENODEV;
+ }
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!pc->mmio_base)
+ return -EADDRNOTAVAIL;
+
+ pc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pc->clk))
+ return PTR_ERR(pc->clk);
+
+ pc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pc);
+
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &spear_pwm_ops;
+ pc->chip.base = -1;
+ pc->chip.npwm = NUM_PWM;
+
+ ret = clk_prepare(pc->clk);
+ if (!ret)
+ return ret;
+
+ if (of_device_is_compatible(np, "st,spear1340-pwm")) {
+ ret = clk_enable(pc->clk);
+ if (!ret) {
+ clk_unprepare(pc->clk);
+ return ret;
+ }
+ /*
+ * Following enables PWM chip, channels would still be
+ * enabled individually through their control register
+ */
+ val = readl_relaxed(pc->mmio_base + PWMMCR);
+ val |= PWMMCR_PWM_ENABLE;
+ writel_relaxed(val, pc->mmio_base + PWMMCR);
+
+ clk_disable(pc->clk);
+ }
+
+ ret = pwmchip_add(&pc->chip);
+ if (!ret) {
+ clk_unprepare(pc->clk);
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int spear_pwm_remove(struct platform_device *pdev)
+{
+ struct spear_pwm_chip *pc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < NUM_PWM; i++)
+ pwm_disable(&pc->chip.pwms[i]);
+
+ /* clk was prepared in probe, hence unprepare it here */
+ clk_unprepare(pc->clk);
+ return pwmchip_remove(&pc->chip);
+}
+
+static struct of_device_id spear_pwm_of_match[] = {
+ { .compatible = "st,spear320-pwm" },
+ { .compatible = "st,spear1340-pwm" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, spear_pwm_of_match);
+
+static struct platform_driver spear_pwm_driver = {
+ .driver = {
+ .name = "spear-pwm",
+ .of_match_table = spear_pwm_of_match,
+ },
+ .probe = spear_pwm_probe,
+ .remove = spear_pwm_remove,
+};
+
+module_platform_driver(spear_pwm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Shiraz Hashim <shiraz.hashim@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.com>");
+MODULE_ALIAS("platform:spear-pwm");
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 87c091b245c..5cf016dd982 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -25,6 +25,10 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "pwm-tipwmss.h"
/* ECAP registers and bits definitions */
#define CAP1 0x08
@@ -184,12 +188,24 @@ static const struct pwm_ops ecap_pwm_ops = {
.owner = THIS_MODULE,
};
+static const struct of_device_id ecap_of_match[] = {
+ { .compatible = "ti,am33xx-ecap" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ecap_of_match);
+
static int ecap_pwm_probe(struct platform_device *pdev)
{
int ret;
struct resource *r;
struct clk *clk;
struct ecap_pwm_chip *pc;
+ u16 status;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "unable to select pin group\n");
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc) {
@@ -211,6 +227,8 @@ static int ecap_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &ecap_pwm_ops;
+ pc->chip.of_xlate = of_pwm_xlate_with_flags;
+ pc->chip.of_pwm_n_cells = 3;
pc->chip.base = -1;
pc->chip.npwm = 1;
@@ -231,14 +249,40 @@ static int ecap_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ status = pwmss_submodule_state_change(pdev->dev.parent,
+ PWMSS_ECAPCLK_EN);
+ if (!(status & PWMSS_ECAPCLK_EN_ACK)) {
+ dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
+ ret = -EINVAL;
+ goto pwmss_clk_failure;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
platform_set_drvdata(pdev, pc);
return 0;
+
+pwmss_clk_failure:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pwmchip_remove(&pc->chip);
+ return ret;
}
static int ecap_pwm_remove(struct platform_device *pdev)
{
struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ /*
+ * Due to hardware misbehaviour, acknowledge of the stop_req
+ * is missing. Hence checking of the status bit skipped.
+ */
+ pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ);
+ pm_runtime_put_sync(&pdev->dev);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
@@ -246,7 +290,9 @@ static int ecap_pwm_remove(struct platform_device *pdev)
static struct platform_driver ecap_pwm_driver = {
.driver = {
- .name = "ecap",
+ .name = "ecap",
+ .owner = THIS_MODULE,
+ .of_match_table = ecap_of_match,
},
.probe = ecap_pwm_probe,
.remove = ecap_pwm_remove,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 9ffd389d0c8..72a6dd40c9e 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -25,6 +25,10 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "pwm-tipwmss.h"
/* EHRPWM registers and bits definitions */
@@ -115,6 +119,7 @@ struct ehrpwm_pwm_chip {
void __iomem *mmio_base;
unsigned long period_cycles[NUM_PWM_CHANNEL];
enum pwm_polarity polarity[NUM_PWM_CHANNEL];
+ struct clk *tbclk;
};
static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
@@ -335,6 +340,9 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Channels polarity can be configured from action qualifier module */
configure_polarity(pc, pwm->hwpwm);
+ /* Enable TBCLK before enabling PWM device */
+ clk_enable(pc->tbclk);
+
/* Enable time counter for free_run */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
return 0;
@@ -363,6 +371,9 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
+ /* Disabling TBCLK on PWM disable */
+ clk_disable(pc->tbclk);
+
/* Stop Time base counter */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
@@ -392,12 +403,24 @@ static const struct pwm_ops ehrpwm_pwm_ops = {
.owner = THIS_MODULE,
};
+static const struct of_device_id ehrpwm_of_match[] = {
+ { .compatible = "ti,am33xx-ehrpwm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehrpwm_of_match);
+
static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
int ret;
struct resource *r;
struct clk *clk;
struct ehrpwm_pwm_chip *pc;
+ u16 status;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev, "unable to select pin group\n");
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc) {
@@ -419,6 +442,8 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &ehrpwm_pwm_ops;
+ pc->chip.of_xlate = of_pwm_xlate_with_flags;
+ pc->chip.of_pwm_n_cells = 3;
pc->chip.base = -1;
pc->chip.npwm = NUM_PWM_CHANNEL;
@@ -432,6 +457,13 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
if (!pc->mmio_base)
return -EADDRNOTAVAIL;
+ /* Acquire tbclk for Time Base EHRPWM submodule */
+ pc->tbclk = devm_clk_get(&pdev->dev, "tbclk");
+ if (IS_ERR(pc->tbclk)) {
+ dev_err(&pdev->dev, "Failed to get tbclk\n");
+ return PTR_ERR(pc->tbclk);
+ }
+
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
@@ -439,14 +471,40 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ status = pwmss_submodule_state_change(pdev->dev.parent,
+ PWMSS_EPWMCLK_EN);
+ if (!(status & PWMSS_EPWMCLK_EN_ACK)) {
+ dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
+ ret = -EINVAL;
+ goto pwmss_clk_failure;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
platform_set_drvdata(pdev, pc);
return 0;
+
+pwmss_clk_failure:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pwmchip_remove(&pc->chip);
+ return ret;
}
static int ehrpwm_pwm_remove(struct platform_device *pdev)
{
struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ /*
+ * Due to hardware misbehaviour, acknowledge of the stop_req
+ * is missing. Hence checking of the status bit skipped.
+ */
+ pwmss_submodule_state_change(pdev->dev.parent, PWMSS_EPWMCLK_STOP_REQ);
+ pm_runtime_put_sync(&pdev->dev);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
@@ -454,7 +512,9 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
static struct platform_driver ehrpwm_pwm_driver = {
.driver = {
- .name = "ehrpwm",
+ .name = "ehrpwm",
+ .owner = THIS_MODULE,
+ .of_match_table = ehrpwm_of_match,
},
.probe = ehrpwm_pwm_probe,
.remove = ehrpwm_pwm_remove,
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
new file mode 100644
index 00000000000..3448a1c8859
--- /dev/null
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -0,0 +1,139 @@
+/*
+ * TI PWM Subsystem driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+
+#include "pwm-tipwmss.h"
+
+#define PWMSS_CLKCONFIG 0x8 /* Clock gating reg */
+#define PWMSS_CLKSTATUS 0xc /* Clock gating status reg */
+
+struct pwmss_info {
+ void __iomem *mmio_base;
+ struct mutex pwmss_lock;
+ u16 pwmss_clkconfig;
+};
+
+u16 pwmss_submodule_state_change(struct device *dev, int set)
+{
+ struct pwmss_info *info = dev_get_drvdata(dev);
+ u16 val;
+
+ mutex_lock(&info->pwmss_lock);
+ val = readw(info->mmio_base + PWMSS_CLKCONFIG);
+ val |= set;
+ writew(val , info->mmio_base + PWMSS_CLKCONFIG);
+ mutex_unlock(&info->pwmss_lock);
+
+ return readw(info->mmio_base + PWMSS_CLKSTATUS);
+}
+EXPORT_SYMBOL(pwmss_submodule_state_change);
+
+static const struct of_device_id pwmss_of_match[] = {
+ { .compatible = "ti,am33xx-pwmss" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pwmss_of_match);
+
+static int pwmss_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct pwmss_info *info;
+ struct device_node *node = pdev->dev.of_node;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&info->pwmss_lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ info->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!info->mmio_base)
+ return -EADDRNOTAVAIL;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ platform_set_drvdata(pdev, info);
+
+ /* Populate all the child nodes here... */
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "no child node found\n");
+
+ return ret;
+}
+
+static int pwmss_remove(struct platform_device *pdev)
+{
+ struct pwmss_info *info = platform_get_drvdata(pdev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ mutex_destroy(&info->pwmss_lock);
+ return 0;
+}
+
+static int pwmss_suspend(struct device *dev)
+{
+ struct pwmss_info *info = dev_get_drvdata(dev);
+
+ info->pwmss_clkconfig = readw(info->mmio_base + PWMSS_CLKCONFIG);
+ pm_runtime_put_sync(dev);
+ return 0;
+}
+
+static int pwmss_resume(struct device *dev)
+{
+ struct pwmss_info *info = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ writew(info->pwmss_clkconfig, info->mmio_base + PWMSS_CLKCONFIG);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume);
+
+static struct platform_driver pwmss_driver = {
+ .driver = {
+ .name = "pwmss",
+ .owner = THIS_MODULE,
+ .pm = &pwmss_pm_ops,
+ .of_match_table = pwmss_of_match,
+ },
+ .probe = pwmss_probe,
+ .remove = pwmss_remove,
+};
+
+module_platform_driver(pwmss_driver);
+
+MODULE_DESCRIPTION("PWM Subsystem driver");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-tipwmss.h b/drivers/pwm/pwm-tipwmss.h
new file mode 100644
index 00000000000..11f76a1e266
--- /dev/null
+++ b/drivers/pwm/pwm-tipwmss.h
@@ -0,0 +1,39 @@
+/*
+ * TI PWM Subsystem driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TIPWMSS_H
+#define __TIPWMSS_H
+
+#ifdef CONFIG_PWM_TIPWMSS
+/* PWM substem clock gating */
+#define PWMSS_ECAPCLK_EN BIT(0)
+#define PWMSS_ECAPCLK_STOP_REQ BIT(1)
+#define PWMSS_EPWMCLK_EN BIT(8)
+#define PWMSS_EPWMCLK_STOP_REQ BIT(9)
+
+#define PWMSS_ECAPCLK_EN_ACK BIT(0)
+#define PWMSS_EPWMCLK_EN_ACK BIT(8)
+
+extern u16 pwmss_submodule_state_change(struct device *dev, int set);
+#else
+static inline u16 pwmss_submodule_state_change(struct device *dev, int set)
+{
+ /* return success status value */
+ return 0xFFFF;
+}
+#endif
+#endif /* __TIPWMSS_H */
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
new file mode 100644
index 00000000000..9dfa0f3eca3
--- /dev/null
+++ b/drivers/pwm/pwm-twl-led.c
@@ -0,0 +1,344 @@
+/*
+ * Driver for TWL4030/6030 Pulse Width Modulator used as LED driver
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This driver is a complete rewrite of the former pwm-twl6030.c authorded by:
+ * Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/i2c/twl.h>
+#include <linux/slab.h>
+
+/*
+ * This driver handles the PWM driven LED terminals of TWL4030 and TWL6030.
+ * To generate the signal on TWL4030:
+ * - LEDA uses PWMA
+ * - LEDB uses PWMB
+ * TWL6030 has one LED pin with dedicated LEDPWM
+ */
+
+#define TWL4030_LED_MAX 0x7f
+#define TWL6030_LED_MAX 0xff
+
+/* Registers, bits and macro for TWL4030 */
+#define TWL4030_LEDEN_REG 0x00
+#define TWL4030_PWMA_REG 0x01
+
+#define TWL4030_LEDXON (1 << 0)
+#define TWL4030_LEDXPWM (1 << 4)
+#define TWL4030_LED_PINS (TWL4030_LEDXON | TWL4030_LEDXPWM)
+#define TWL4030_LED_TOGGLE(led, x) ((x) << (led))
+
+/* Register, bits and macro for TWL6030 */
+#define TWL6030_LED_PWM_CTRL1 0xf4
+#define TWL6030_LED_PWM_CTRL2 0xf5
+
+#define TWL6040_LED_MODE_HW 0x00
+#define TWL6040_LED_MODE_ON 0x01
+#define TWL6040_LED_MODE_OFF 0x02
+#define TWL6040_LED_MODE_MASK 0x03
+
+struct twl_pwmled_chip {
+ struct pwm_chip chip;
+ struct mutex mutex;
+};
+
+static inline struct twl_pwmled_chip *to_twl(struct pwm_chip *chip)
+{
+ return container_of(chip, struct twl_pwmled_chip, chip);
+}
+
+static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ int duty_cycle = DIV_ROUND_UP(duty_ns * TWL4030_LED_MAX, period_ns) + 1;
+ u8 pwm_config[2] = { 1, 0 };
+ int base, ret;
+
+ /*
+ * To configure the duty period:
+ * On-cycle is set to 1 (the minimum allowed value)
+ * The off time of 0 is not configurable, so the mapping is:
+ * 0 -> off cycle = 2,
+ * 1 -> off cycle = 2,
+ * 2 -> off cycle = 3,
+ * 126 - > off cycle 127,
+ * 127 - > off cycle 1
+ * When on cycle == off cycle the PWM will be always on
+ */
+ if (duty_cycle == 1)
+ duty_cycle = 2;
+ else if (duty_cycle > TWL4030_LED_MAX)
+ duty_cycle = 1;
+
+ base = pwm->hwpwm * 2 + TWL4030_PWMA_REG;
+
+ pwm_config[1] = duty_cycle;
+
+ ret = twl_i2c_write(TWL4030_MODULE_LED, pwm_config, base, 2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+
+ return ret;
+}
+
+static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ goto out;
+ }
+
+ val |= TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl4030_pwmled_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL4030_LED_TOGGLE(pwm->hwpwm, TWL4030_LED_PINS);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ int duty_cycle = (duty_ns * TWL6030_LED_MAX) / period_ns;
+ u8 on_time;
+ int ret;
+
+ on_time = duty_cycle & 0xff;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, on_time,
+ TWL6030_LED_PWM_CTRL1);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+
+ return ret;
+}
+
+static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_ON;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl6030_pwmled_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_OFF;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_OFF;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwmled_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6040_LED_MODE_MASK;
+ val |= TWL6040_LED_MODE_HW;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static const struct pwm_ops twl4030_pwmled_ops = {
+ .enable = twl4030_pwmled_enable,
+ .disable = twl4030_pwmled_disable,
+ .config = twl4030_pwmled_config,
+};
+
+static const struct pwm_ops twl6030_pwmled_ops = {
+ .enable = twl6030_pwmled_enable,
+ .disable = twl6030_pwmled_disable,
+ .config = twl6030_pwmled_config,
+ .request = twl6030_pwmled_request,
+ .free = twl6030_pwmled_free,
+};
+
+static int twl_pwmled_probe(struct platform_device *pdev)
+{
+ struct twl_pwmled_chip *twl;
+ int ret;
+
+ twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
+ if (!twl)
+ return -ENOMEM;
+
+ if (twl_class_is_4030()) {
+ twl->chip.ops = &twl4030_pwmled_ops;
+ twl->chip.npwm = 2;
+ } else {
+ twl->chip.ops = &twl6030_pwmled_ops;
+ twl->chip.npwm = 1;
+ }
+
+ twl->chip.dev = &pdev->dev;
+ twl->chip.base = -1;
+
+ mutex_init(&twl->mutex);
+
+ ret = pwmchip_add(&twl->chip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, twl);
+
+ return 0;
+}
+
+static int twl_pwmled_remove(struct platform_device *pdev)
+{
+ struct twl_pwmled_chip *twl = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&twl->chip);
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id twl_pwmled_of_match[] = {
+ { .compatible = "ti,twl4030-pwmled" },
+ { .compatible = "ti,twl6030-pwmled" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_pwmled_of_match);
+#endif
+
+static struct platform_driver twl_pwmled_driver = {
+ .driver = {
+ .name = "twl-pwmled",
+ .of_match_table = of_match_ptr(twl_pwmled_of_match),
+ },
+ .probe = twl_pwmled_probe,
+ .remove = twl_pwmled_remove,
+};
+module_platform_driver(twl_pwmled_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("PWM driver for TWL4030 and TWL6030 LED outputs");
+MODULE_ALIAS("platform:twl-pwmled");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
new file mode 100644
index 00000000000..e65db95d5e5
--- /dev/null
+++ b/drivers/pwm/pwm-twl.c
@@ -0,0 +1,359 @@
+/*
+ * Driver for TWL4030/6030 Generic Pulse Width Modulator
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/i2c/twl.h>
+#include <linux/slab.h>
+
+/*
+ * This driver handles the PWMs of TWL4030 and TWL6030.
+ * The TRM names for the PWMs on TWL4030 are: PWM0, PWM1
+ * TWL6030 also have two PWMs named in the TRM as PWM1, PWM2
+ */
+
+#define TWL_PWM_MAX 0x7f
+
+/* Registers, bits and macro for TWL4030 */
+#define TWL4030_GPBR1_REG 0x0c
+#define TWL4030_PMBR1_REG 0x0d
+
+/* GPBR1 register bits */
+#define TWL4030_PWMXCLK_ENABLE (1 << 0)
+#define TWL4030_PWMX_ENABLE (1 << 2)
+#define TWL4030_PWMX_BITS (TWL4030_PWMX_ENABLE | TWL4030_PWMXCLK_ENABLE)
+#define TWL4030_PWM_TOGGLE(pwm, x) ((x) << (pwm))
+
+/* PMBR1 register bits */
+#define TWL4030_GPIO6_PWM0_MUTE_MASK (0x03 << 2)
+#define TWL4030_GPIO6_PWM0_MUTE_PWM0 (0x01 << 2)
+#define TWL4030_GPIO7_VIBRASYNC_PWM1_MASK (0x03 << 4)
+#define TWL4030_GPIO7_VIBRASYNC_PWM1_PWM1 (0x03 << 4)
+
+/* Register, bits and macro for TWL6030 */
+#define TWL6030_TOGGLE3_REG 0x92
+
+#define TWL6030_PWMXR (1 << 0)
+#define TWL6030_PWMXS (1 << 1)
+#define TWL6030_PWMXEN (1 << 2)
+#define TWL6030_PWM_TOGGLE(pwm, x) ((x) << (pwm * 3))
+
+struct twl_pwm_chip {
+ struct pwm_chip chip;
+ struct mutex mutex;
+ u8 twl6030_toggle3;
+ u8 twl4030_pwm_mux;
+};
+
+static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip)
+{
+ return container_of(chip, struct twl_pwm_chip, chip);
+}
+
+static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ int duty_cycle = DIV_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1;
+ u8 pwm_config[2] = { 1, 0 };
+ int base, ret;
+
+ /*
+ * To configure the duty period:
+ * On-cycle is set to 1 (the minimum allowed value)
+ * The off time of 0 is not configurable, so the mapping is:
+ * 0 -> off cycle = 2,
+ * 1 -> off cycle = 2,
+ * 2 -> off cycle = 3,
+ * 126 - > off cycle 127,
+ * 127 - > off cycle 1
+ * When on cycle == off cycle the PWM will be always on
+ */
+ if (duty_cycle == 1)
+ duty_cycle = 2;
+ else if (duty_cycle > TWL_PWM_MAX)
+ duty_cycle = 1;
+
+ base = pwm->hwpwm * 3;
+
+ pwm_config[1] = duty_cycle;
+
+ ret = twl_i2c_write(TWL_MODULE_PWM, pwm_config, base, 2);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+
+ return ret;
+}
+
+static int twl4030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ goto out;
+ }
+
+ val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+ val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl4030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = to_twl(chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+ val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl4030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = to_twl(chip);
+ int ret;
+ u8 val, mask, bits;
+
+ if (pwm->hwpwm == 1) {
+ mask = TWL4030_GPIO7_VIBRASYNC_PWM1_MASK;
+ bits = TWL4030_GPIO7_VIBRASYNC_PWM1_PWM1;
+ } else {
+ mask = TWL4030_GPIO6_PWM0_MUTE_MASK;
+ bits = TWL4030_GPIO6_PWM0_MUTE_PWM0;
+ }
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ goto out;
+ }
+
+ /* Save the current MUX configuration for the PWM */
+ twl->twl4030_pwm_mux &= ~mask;
+ twl->twl4030_pwm_mux |= (val & mask);
+
+ /* Select PWM functionality */
+ val &= ~mask;
+ val |= bits;
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+ return ret;
+}
+
+static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
+ chip);
+ int ret;
+ u8 val, mask;
+
+ if (pwm->hwpwm == 1)
+ mask = TWL4030_GPIO7_VIBRASYNC_PWM1_MASK;
+ else
+ mask = TWL4030_GPIO6_PWM0_MUTE_MASK;
+
+ mutex_lock(&twl->mutex);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ goto out;
+ }
+
+ /* Restore the MUX configuration for the PWM */
+ val &= ~mask;
+ val |= (twl->twl4030_pwm_mux & mask);
+
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
+ if (ret < 0)
+ dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
+ chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ val = twl->twl6030_toggle3;
+ val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
+ val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXR);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ goto out;
+ }
+
+ twl->twl6030_toggle3 = val;
+out:
+ mutex_unlock(&twl->mutex);
+ return 0;
+}
+
+static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct twl_pwm_chip *twl = container_of(chip, struct twl_pwm_chip,
+ chip);
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl->mutex);
+ val = twl->twl6030_toggle3;
+ val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXR);
+ val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to read TOGGLE3\n", pwm->label);
+ goto out;
+ }
+
+ val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXS | TWL6030_PWMXEN);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ goto out;
+ }
+
+ twl->twl6030_toggle3 = val;
+out:
+ mutex_unlock(&twl->mutex);
+}
+
+static const struct pwm_ops twl4030_pwm_ops = {
+ .config = twl_pwm_config,
+ .enable = twl4030_pwm_enable,
+ .disable = twl4030_pwm_disable,
+ .request = twl4030_pwm_request,
+ .free = twl4030_pwm_free,
+};
+
+static const struct pwm_ops twl6030_pwm_ops = {
+ .config = twl_pwm_config,
+ .enable = twl6030_pwm_enable,
+ .disable = twl6030_pwm_disable,
+};
+
+static int twl_pwm_probe(struct platform_device *pdev)
+{
+ struct twl_pwm_chip *twl;
+ int ret;
+
+ twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
+ if (!twl)
+ return -ENOMEM;
+
+ if (twl_class_is_4030())
+ twl->chip.ops = &twl4030_pwm_ops;
+ else
+ twl->chip.ops = &twl6030_pwm_ops;
+
+ twl->chip.dev = &pdev->dev;
+ twl->chip.base = -1;
+ twl->chip.npwm = 2;
+
+ mutex_init(&twl->mutex);
+
+ ret = pwmchip_add(&twl->chip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, twl);
+
+ return 0;
+}
+
+static int twl_pwm_remove(struct platform_device *pdev)
+{
+ struct twl_pwm_chip *twl = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&twl->chip);
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id twl_pwm_of_match[] = {
+ { .compatible = "ti,twl4030-pwm" },
+ { .compatible = "ti,twl6030-pwm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_pwm_of_match);
+#endif
+
+static struct platform_driver twl_pwm_driver = {
+ .driver = {
+ .name = "twl-pwm",
+ .of_match_table = of_match_ptr(twl_pwm_of_match),
+ },
+ .probe = twl_pwm_probe,
+ .remove = twl_pwm_remove,
+};
+module_platform_driver(twl_pwm_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("PWM driver for TWL4030 and TWL6030");
+MODULE_ALIAS("platform:twl-pwm");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-twl6030.c b/drivers/pwm/pwm-twl6030.c
deleted file mode 100644
index 378a7e28636..00000000000
--- a/drivers/pwm/pwm-twl6030.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * twl6030_pwm.c
- * Driver for PHOENIX (TWL6030) Pulse Width Modulator
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pwm.h>
-#include <linux/i2c/twl.h>
-#include <linux/slab.h>
-
-#define LED_PWM_CTRL1 0xF4
-#define LED_PWM_CTRL2 0xF5
-
-/* Max value for CTRL1 register */
-#define PWM_CTRL1_MAX 255
-
-/* Pull down disable */
-#define PWM_CTRL2_DIS_PD (1 << 6)
-
-/* Current control 2.5 milli Amps */
-#define PWM_CTRL2_CURR_02 (2 << 4)
-
-/* LED supply source */
-#define PWM_CTRL2_SRC_VAC (1 << 2)
-
-/* LED modes */
-#define PWM_CTRL2_MODE_HW (0 << 0)
-#define PWM_CTRL2_MODE_SW (1 << 0)
-#define PWM_CTRL2_MODE_DIS (2 << 0)
-
-#define PWM_CTRL2_MODE_MASK 0x3
-
-struct twl6030_pwm_chip {
- struct pwm_chip chip;
-};
-
-static int twl6030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
- u8 val;
-
- /* Configure PWM */
- val = PWM_CTRL2_DIS_PD | PWM_CTRL2_CURR_02 | PWM_CTRL2_SRC_VAC |
- PWM_CTRL2_MODE_HW;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to configure PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int twl6030_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
-{
- u8 duty_cycle = (duty_ns * PWM_CTRL1_MAX) / period_ns;
- int ret;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, duty_cycle, LED_PWM_CTRL1);
- if (ret < 0) {
- pr_err("%s: Failed to configure PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
- u8 val;
-
- ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- /* Change mode to software control */
- val &= ~PWM_CTRL2_MODE_MASK;
- val |= PWM_CTRL2_MODE_SW;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
- pwm->label, ret);
- return ret;
- }
-
- twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
- return 0;
-}
-
-static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- int ret;
- u8 val;
-
- ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
- pwm->label, ret);
- return;
- }
-
- val &= ~PWM_CTRL2_MODE_MASK;
- val |= PWM_CTRL2_MODE_HW;
-
- ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, LED_PWM_CTRL2);
- if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
- pwm->label, ret);
- }
-}
-
-static const struct pwm_ops twl6030_pwm_ops = {
- .request = twl6030_pwm_request,
- .config = twl6030_pwm_config,
- .enable = twl6030_pwm_enable,
- .disable = twl6030_pwm_disable,
-};
-
-static int twl6030_pwm_probe(struct platform_device *pdev)
-{
- struct twl6030_pwm_chip *twl6030;
- int ret;
-
- twl6030 = devm_kzalloc(&pdev->dev, sizeof(*twl6030), GFP_KERNEL);
- if (!twl6030)
- return -ENOMEM;
-
- twl6030->chip.dev = &pdev->dev;
- twl6030->chip.ops = &twl6030_pwm_ops;
- twl6030->chip.base = -1;
- twl6030->chip.npwm = 1;
-
- ret = pwmchip_add(&twl6030->chip);
- if (ret < 0)
- return ret;
-
- platform_set_drvdata(pdev, twl6030);
-
- return 0;
-}
-
-static int twl6030_pwm_remove(struct platform_device *pdev)
-{
- struct twl6030_pwm_chip *twl6030 = platform_get_drvdata(pdev);
-
- return pwmchip_remove(&twl6030->chip);
-}
-
-static struct platform_driver twl6030_pwm_driver = {
- .driver = {
- .name = "twl6030-pwm",
- },
- .probe = twl6030_pwm_probe,
- .remove = twl6030_pwm_remove,
-};
-module_platform_driver(twl6030_pwm_driver);
-
-MODULE_ALIAS("platform:twl6030-pwm");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index ad14389b714..b0ba2d40343 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -1,7 +1,8 @@
/*
* drivers/pwm/pwm-vt8500.c
*
- * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -21,14 +22,24 @@
#include <linux/io.h>
#include <linux/pwm.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <asm/div64.h>
-#define VT8500_NR_PWMS 4
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+/*
+ * SoC architecture allocates register space for 4 PWMs but only
+ * 2 are currently implemented.
+ */
+#define VT8500_NR_PWMS 2
struct vt8500_chip {
struct pwm_chip chip;
void __iomem *base;
+ struct clk *clk;
};
#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
@@ -51,8 +62,15 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
unsigned long long c;
unsigned long period_cycles, prescale, pv, dc;
+ int err;
- c = 25000000/2; /* wild guess --- need to implement clocks */
+ err = clk_enable(vt8500->clk);
+ if (err < 0) {
+ dev_err(chip->dev, "failed to enable clock\n");
+ return err;
+ }
+
+ c = clk_get_rate(vt8500->clk);
c = c * period_ns;
do_div(c, 1000000000);
period_cycles = c;
@@ -64,8 +82,10 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (pv > 4095)
pv = 4095;
- if (prescale > 1023)
+ if (prescale > 1023) {
+ clk_disable(vt8500->clk);
return -EINVAL;
+ }
c = (unsigned long long)pv * duty_ns;
do_div(c, period_ns);
@@ -80,13 +100,21 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3));
writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4));
+ clk_disable(vt8500->clk);
return 0;
}
static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ int err;
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ err = clk_enable(vt8500->clk);
+ if (err < 0) {
+ dev_err(chip->dev, "failed to enable clock\n");
+ return err;
+ }
+
pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
writel(5, vt8500->base + (pwm->hwpwm << 4));
return 0;
@@ -98,6 +126,8 @@ static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
writel(0, vt8500->base + (pwm->hwpwm << 4));
+
+ clk_disable(vt8500->clk);
}
static struct pwm_ops vt8500_pwm_ops = {
@@ -107,12 +137,24 @@ static struct pwm_ops vt8500_pwm_ops = {
.owner = THIS_MODULE,
};
-static int __devinit pwm_probe(struct platform_device *pdev)
+static const struct of_device_id vt8500_pwm_dt_ids[] = {
+ { .compatible = "via,vt8500-pwm", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
+
+static int vt8500_pwm_probe(struct platform_device *pdev)
{
struct vt8500_chip *chip;
struct resource *r;
+ struct device_node *np = pdev->dev.of_node;
int ret;
+ if (!np) {
+ dev_err(&pdev->dev, "invalid devicetree node\n");
+ return -EINVAL;
+ }
+
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
@@ -124,6 +166,12 @@ static int __devinit pwm_probe(struct platform_device *pdev)
chip->chip.base = -1;
chip->chip.npwm = VT8500_NR_PWMS;
+ chip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(chip->clk)) {
+ dev_err(&pdev->dev, "clock source not specified\n");
+ return PTR_ERR(chip->clk);
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(&pdev->dev, "no memory resource defined\n");
@@ -131,18 +179,26 @@ static int __devinit pwm_probe(struct platform_device *pdev)
}
chip->base = devm_request_and_ioremap(&pdev->dev, r);
- if (chip->base == NULL)
+ if (!chip->base)
return -EADDRNOTAVAIL;
+ ret = clk_prepare(chip->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to prepare clock\n");
+ return ret;
+ }
+
ret = pwmchip_add(&chip->chip);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip\n");
return ret;
+ }
platform_set_drvdata(pdev, chip);
return ret;
}
-static int __devexit pwm_remove(struct platform_device *pdev)
+static int vt8500_pwm_remove(struct platform_device *pdev)
{
struct vt8500_chip *chip;
@@ -150,28 +206,22 @@ static int __devexit pwm_remove(struct platform_device *pdev)
if (chip == NULL)
return -ENODEV;
+ clk_unprepare(chip->clk);
+
return pwmchip_remove(&chip->chip);
}
-static struct platform_driver pwm_driver = {
+static struct platform_driver vt8500_pwm_driver = {
+ .probe = vt8500_pwm_probe,
+ .remove = vt8500_pwm_remove,
.driver = {
.name = "vt8500-pwm",
.owner = THIS_MODULE,
+ .of_match_table = vt8500_pwm_dt_ids,
},
- .probe = pwm_probe,
- .remove = __devexit_p(pwm_remove),
};
+module_platform_driver(vt8500_pwm_driver);
-static int __init pwm_init(void)
-{
- return platform_driver_register(&pwm_driver);
-}
-arch_initcall(pwm_init);
-
-static void __exit pwm_exit(void)
-{
- platform_driver_unregister(&pwm_driver);
-}
-module_exit(pwm_exit);
-
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VT8500 PWM Driver");
+MODULE_AUTHOR("Tony Prisk <linux@prisktech.co.nz>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 1c5ab0172ea..2b557119ada 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -394,7 +394,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
#define pm8607_regulator_dt_init(x, y, z) (-1)
#endif
-static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
+static int pm8607_regulator_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm8607_regulator_info *info = NULL;
@@ -454,7 +454,7 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
+static int pm8607_regulator_remove(struct platform_device *pdev)
{
struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
@@ -481,7 +481,7 @@ static struct platform_driver pm8607_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = pm8607_regulator_probe,
- .remove = __devexit_p(pm8607_regulator_remove),
+ .remove = pm8607_regulator_remove,
.id_table = pm8607_regulator_driver_ids,
};
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 67d47b59a66..551a22b0753 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -109,6 +109,16 @@ config REGULATOR_DA9052
This driver supports the voltage regulators of DA9052-BC and
DA9053-AA/Bx PMIC.
+config REGULATOR_DA9055
+ tristate "Dialog Semiconductor DA9055 regulators"
+ depends on MFD_DA9055
+ help
+ Say y here to support the BUCKs and LDOs regulators found on
+ Dialog Semiconductor DA9055 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called da9055-regulator.
+
config REGULATOR_FAN53555
tristate "Fairchild FAN53555 Regulator"
depends on I2C
@@ -204,6 +214,16 @@ config REGULATOR_MAX8952
via I2C bus. Maxim 8952 has one voltage output and supports 4 DVS
modes ranging from 0.77V to 1.40V by 0.01V steps.
+config REGULATOR_MAX8973
+ tristate "Maxim MAX8973 voltage regulator "
+ depends on I2C
+ select REGMAP_I2C
+ help
+ The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down
+ switching regulator delievers up to 9A of output current. Each
+ phase operates at a 2MHz fixed frequency with a 120 deg shift
+ from the adjacent phase, allowing the use of small magnetic component.
+
config REGULATOR_MAX8997
tristate "Maxim 8997/8966 regulator"
depends on MFD_MAX8997
@@ -335,6 +355,17 @@ config REGULATOR_PALMAS
on the muxing. This is handled automatically in the driver by
reading the mux info from OTP.
+config REGULATOR_TPS51632
+ tristate "TI TPS51632 Power Regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver supports TPS51632 voltage regulator chip.
+ The TPS51632 is 3-2-1 Phase D-Cap+ Step Down Driverless Controller
+ with Serial VID control and DVFS.
+ The voltage output can be configure through I2C interface or PWM
+ interface.
+
config REGULATOR_TPS6105X
tristate "TI TPS6105X Power regulators"
depends on TPS6105X
@@ -415,6 +446,15 @@ config REGULATOR_TPS65912
help
This driver supports TPS65912 voltage regulator chip.
+config REGULATOR_TPS80031
+ tristate "TI TPS80031/TPS80032 power regualtor driver"
+ depends on MFD_TPS80031
+ help
+ TPS80031/ TPS80032 Fully Integrated Power Management with Power
+ Path and Battery Charger. It has 5 configurable step-down
+ converters, 11 general purpose LDOs, VBUS generator and digital
+ output to control regulators.
+
config REGULATOR_TWL4030
bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
depends on TWL4030_CORE
@@ -422,6 +462,13 @@ config REGULATOR_TWL4030
This driver supports the voltage regulators provided by
this family of companion chips.
+config REGULATOR_VEXPRESS
+ tristate "Versatile Express regulators"
+ depends on VEXPRESS_CONFIG
+ help
+ This driver provides support for voltage regulators available
+ on the ARM Ltd's Versatile Express platform.
+
config REGULATOR_WM831X
tristate "Wolfson Microelectronics WM831x PMIC regulators"
depends on MFD_WM831X
@@ -450,5 +497,12 @@ config REGULATOR_WM8994
This driver provides support for the voltage regulators on the
WM8994 CODEC.
+config REGULATOR_AS3711
+ tristate "AS3711 PMIC"
+ depends on MFD_AS3711
+ help
+ This driver provides support for the voltage regulators on the
+ AS3711 PMIC
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index e431eed8a87..b802b0c7fb0 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -16,8 +16,10 @@ obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
+obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
+obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
+obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
@@ -41,6 +44,7 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
+obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
@@ -56,7 +60,9 @@ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
+obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
+obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 167c93f2198..8b5876356db 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -187,7 +187,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit aat2870_regulator_remove(struct platform_device *pdev)
+static int aat2870_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
@@ -201,7 +201,7 @@ static struct platform_driver aat2870_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = aat2870_regulator_probe,
- .remove = __devexit_p(aat2870_regulator_remove),
+ .remove = aat2870_regulator_remove,
};
static int __init aat2870_regulator_init(void)
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index df4ad8927f0..111ec69a3e9 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -494,7 +494,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
* for all the different regulators.
*/
-static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
+static int ab3100_regulators_probe(struct platform_device *pdev)
{
struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
struct regulator_config config = { };
@@ -571,7 +571,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ab3100_regulators_remove(struct platform_device *pdev)
+static int ab3100_regulators_remove(struct platform_device *pdev)
{
int i;
@@ -589,7 +589,7 @@ static struct platform_driver ab3100_regulators_driver = {
.owner = THIS_MODULE,
},
.probe = ab3100_regulators_probe,
- .remove = __devexit_p(ab3100_regulators_remove),
+ .remove = ab3100_regulators_remove,
};
static __init int ab3100_regulators_init(void)
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index e3d1d063025..09014f38a94 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -641,7 +641,7 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
};
-static __devinit int
+static int
ab8500_regulator_init_registers(struct platform_device *pdev, int id, int value)
{
int err;
@@ -676,7 +676,7 @@ ab8500_regulator_init_registers(struct platform_device *pdev, int id, int value)
return 0;
}
-static __devinit int ab8500_regulator_register(struct platform_device *pdev,
+static int ab8500_regulator_register(struct platform_device *pdev,
struct regulator_init_data *init_data,
int id,
struct device_node *np)
@@ -735,7 +735,7 @@ static struct of_regulator_match ab8500_regulator_matches[] = {
{ .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
};
-static __devinit int
+static int
ab8500_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
{
int err, i;
@@ -751,7 +751,7 @@ ab8500_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
return 0;
}
-static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
+static int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_platform_data *pdata;
@@ -817,7 +817,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
return 0;
}
-static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
+static int ab8500_regulator_remove(struct platform_device *pdev)
{
int i;
@@ -836,7 +836,7 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
static struct platform_driver ab8500_regulator_driver = {
.probe = ab8500_regulator_probe,
- .remove = __devexit_p(ab8500_regulator_remove),
+ .remove = ab8500_regulator_remove,
.driver = {
.name = "ab8500-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index f123f7e3b75..6b981b5faa7 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -211,7 +211,7 @@ static const struct i2c_device_id ad5398_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad5398_id);
-static int __devinit ad5398_probe(struct i2c_client *client,
+static int ad5398_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regulator_init_data *init_data = client->dev.platform_data;
@@ -256,7 +256,7 @@ err:
return ret;
}
-static int __devexit ad5398_remove(struct i2c_client *client)
+static int ad5398_remove(struct i2c_client *client)
{
struct ad5398_chip_info *chip = i2c_get_clientdata(client);
@@ -266,7 +266,7 @@ static int __devexit ad5398_remove(struct i2c_client *client)
static struct i2c_driver ad5398_driver = {
.probe = ad5398_probe,
- .remove = __devexit_p(ad5398_remove),
+ .remove = ad5398_remove,
.driver = {
.name = "ad5398",
},
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 1af97686f44..0199eeea63b 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -48,36 +48,21 @@ static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg,
unsigned selector)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val, mask;
if (!anatop_reg->control_reg)
return -ENOTSUPP;
- val = anatop_reg->min_bit_val + selector;
- dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
- mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
- anatop_reg->vol_bit_shift;
- val <<= anatop_reg->vol_bit_shift;
- regmap_update_bits(anatop_reg->anatop, anatop_reg->control_reg,
- mask, val);
-
- return 0;
+ return regulator_set_voltage_sel_regmap(reg, selector);
}
static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val, mask;
if (!anatop_reg->control_reg)
return -ENOTSUPP;
- regmap_read(anatop_reg->anatop, anatop_reg->control_reg, &val);
- mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
- anatop_reg->vol_bit_shift;
- val = (val & mask) >> anatop_reg->vol_bit_shift;
-
- return val - anatop_reg->min_bit_val;
+ return regulator_get_voltage_sel_regmap(reg);
}
static struct regulator_ops anatop_rops = {
@@ -87,7 +72,7 @@ static struct regulator_ops anatop_rops = {
.map_voltage = regulator_map_voltage_linear,
};
-static int __devinit anatop_regulator_probe(struct platform_device *pdev)
+static int anatop_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -158,15 +143,20 @@ static int __devinit anatop_regulator_probe(struct platform_device *pdev)
goto anatop_probe_end;
}
- rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage)
- / 25000 + 1;
+ rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1
+ + sreg->min_bit_val;
rdesc->min_uV = sreg->min_voltage;
rdesc->uV_step = 25000;
+ rdesc->linear_min_sel = sreg->min_bit_val;
+ rdesc->vsel_reg = sreg->control_reg;
+ rdesc->vsel_mask = ((1 << sreg->vol_bit_width) - 1) <<
+ sreg->vol_bit_shift;
config.dev = &pdev->dev;
config.init_data = initdata;
config.driver_data = sreg;
config.of_node = pdev->dev.of_node;
+ config.regmap = sreg->anatop;
/* register regulator */
rdev = regulator_register(rdesc, &config);
@@ -186,7 +176,7 @@ anatop_probe_end:
return ret;
}
-static int __devexit anatop_regulator_remove(struct platform_device *pdev)
+static int anatop_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct anatop_regulator *sreg = rdev_get_drvdata(rdev);
@@ -210,7 +200,7 @@ static struct platform_driver anatop_regulator_driver = {
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
- .remove = __devexit_p(anatop_regulator_remove),
+ .remove = anatop_regulator_remove,
};
static int __init anatop_regulator_init(void)
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index d184aa35abc..ed7beec53af 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -34,6 +34,108 @@ struct arizona_ldo1 {
struct regulator_init_data init_data;
};
+static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+
+ if (selector == rdev->desc->n_voltages - 1)
+ return 1800000;
+ else
+ return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
+}
+
+static int arizona_ldo1_hc_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int sel;
+
+ sel = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
+ if (sel >= rdev->desc->n_voltages)
+ sel = rdev->desc->n_voltages - 1;
+
+ return sel;
+}
+
+static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned sel)
+{
+ struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev);
+ struct regmap *regmap = ldo->arizona->regmap;
+ unsigned int val;
+ int ret;
+
+ if (sel == rdev->desc->n_voltages - 1)
+ val = ARIZONA_LDO1_HI_PWR;
+ else
+ val = 0;
+
+ ret = regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_2,
+ ARIZONA_LDO1_HI_PWR, val);
+ if (ret != 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, ARIZONA_DYNAMIC_FREQUENCY_SCALING_1,
+ ARIZONA_SUBSYS_MAX_FREQ, val);
+ if (ret != 0)
+ return ret;
+
+ if (val)
+ return 0;
+
+ val = sel << ARIZONA_LDO1_VSEL_SHIFT;
+
+ return regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_1,
+ ARIZONA_LDO1_VSEL_MASK, val);
+}
+
+static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev);
+ struct regmap *regmap = ldo->arizona->regmap;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_2, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val & ARIZONA_LDO1_HI_PWR)
+ return rdev->desc->n_voltages - 1;
+
+ ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_1, &val);
+ if (ret != 0)
+ return ret;
+
+ return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT;
+}
+
+static struct regulator_ops arizona_ldo1_hc_ops = {
+ .list_voltage = arizona_ldo1_hc_list_voltage,
+ .map_voltage = arizona_ldo1_hc_map_voltage,
+ .get_voltage_sel = arizona_ldo1_hc_get_voltage_sel,
+ .set_voltage_sel = arizona_ldo1_hc_set_voltage_sel,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+};
+
+static const struct regulator_desc arizona_ldo1_hc = {
+ .name = "LDO1",
+ .supply_name = "LDOVDD",
+ .type = REGULATOR_VOLTAGE,
+ .ops = &arizona_ldo1_hc_ops,
+
+ .bypass_reg = ARIZONA_LDO1_CONTROL_1,
+ .bypass_mask = ARIZONA_LDO1_BYPASS,
+ .min_uV = 900000,
+ .uV_step = 50000,
+ .n_voltages = 8,
+ .enable_time = 500,
+
+ .owner = THIS_MODULE,
+};
+
static struct regulator_ops arizona_ldo1_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
@@ -55,11 +157,22 @@ static const struct regulator_desc arizona_ldo1 = {
.bypass_mask = ARIZONA_LDO1_BYPASS,
.min_uV = 900000,
.uV_step = 50000,
- .n_voltages = 6,
+ .n_voltages = 7,
+ .enable_time = 500,
.owner = THIS_MODULE,
};
+static const struct regulator_init_data arizona_ldo1_dvfs = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = 1,
+};
+
static const struct regulator_init_data arizona_ldo1_default = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -67,9 +180,10 @@ static const struct regulator_init_data arizona_ldo1_default = {
.num_consumer_supplies = 1,
};
-static __devinit int arizona_ldo1_probe(struct platform_device *pdev)
+static int arizona_ldo1_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ const struct regulator_desc *desc;
struct regulator_config config = { };
struct arizona_ldo1 *ldo1;
int ret;
@@ -87,7 +201,17 @@ static __devinit int arizona_ldo1_probe(struct platform_device *pdev)
* default init_data for it. This will be overridden with
* platform data if provided.
*/
- ldo1->init_data = arizona_ldo1_default;
+ switch (arizona->type) {
+ case WM5102:
+ desc = &arizona_ldo1_hc;
+ ldo1->init_data = arizona_ldo1_dvfs;
+ break;
+ default:
+ desc = &arizona_ldo1;
+ ldo1->init_data = arizona_ldo1_default;
+ break;
+ }
+
ldo1->init_data.consumer_supplies = &ldo1->supply;
ldo1->supply.supply = "DCVDD";
ldo1->supply.dev_name = dev_name(arizona->dev);
@@ -102,7 +226,7 @@ static __devinit int arizona_ldo1_probe(struct platform_device *pdev)
else
config.init_data = &ldo1->init_data;
- ldo1->regulator = regulator_register(&arizona_ldo1, &config);
+ ldo1->regulator = regulator_register(desc, &config);
if (IS_ERR(ldo1->regulator)) {
ret = PTR_ERR(ldo1->regulator);
dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n",
@@ -115,7 +239,7 @@ static __devinit int arizona_ldo1_probe(struct platform_device *pdev)
return 0;
}
-static __devexit int arizona_ldo1_remove(struct platform_device *pdev)
+static int arizona_ldo1_remove(struct platform_device *pdev)
{
struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
@@ -126,7 +250,7 @@ static __devexit int arizona_ldo1_remove(struct platform_device *pdev)
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
- .remove = __devexit_p(arizona_ldo1_remove),
+ .remove = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index d9b1f82cc5b..a6d040cbf8a 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -101,6 +101,8 @@ static const struct regulator_desc arizona_micsupp = {
.bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1,
.bypass_mask = ARIZONA_CPMIC_BYPASS,
+ .enable_time = 3000,
+
.owner = THIS_MODULE,
};
@@ -115,7 +117,7 @@ static const struct regulator_init_data arizona_micsupp_default = {
.num_consumer_supplies = 1,
};
-static __devinit int arizona_micsupp_probe(struct platform_device *pdev)
+static int arizona_micsupp_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
@@ -166,7 +168,7 @@ static __devinit int arizona_micsupp_probe(struct platform_device *pdev)
return 0;
}
-static __devexit int arizona_micsupp_remove(struct platform_device *pdev)
+static int arizona_micsupp_remove(struct platform_device *pdev)
{
struct arizona_micsupp *micsupp = platform_get_drvdata(pdev);
@@ -177,7 +179,7 @@ static __devexit int arizona_micsupp_remove(struct platform_device *pdev)
static struct platform_driver arizona_micsupp_driver = {
.probe = arizona_micsupp_probe,
- .remove = __devexit_p(arizona_micsupp_remove),
+ .remove = arizona_micsupp_remove,
.driver = {
.name = "arizona-micsupp",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
new file mode 100644
index 00000000000..2f1341db38a
--- /dev/null
+++ b/drivers/regulator/as3711-regulator.c
@@ -0,0 +1,369 @@
+/*
+ * AS3711 PMIC regulator driver, using DCDC Step Down and LDO supplies
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/as3711.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/slab.h>
+
+struct as3711_regulator_info {
+ struct regulator_desc desc;
+ unsigned int max_uV;
+};
+
+struct as3711_regulator {
+ struct as3711_regulator_info *reg_info;
+ struct regulator_dev *rdev;
+};
+
+static int as3711_list_voltage_sd(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+
+ if (!selector)
+ return 0;
+ if (selector < 0x41)
+ return 600000 + selector * 12500;
+ if (selector < 0x71)
+ return 1400000 + (selector - 0x40) * 25000;
+ return 2600000 + (selector - 0x70) * 50000;
+}
+
+static int as3711_list_voltage_aldo(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+
+ if (selector < 0x10)
+ return 1200000 + selector * 50000;
+ return 1800000 + (selector - 0x10) * 100000;
+}
+
+static int as3711_list_voltage_dldo(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= rdev->desc->n_voltages ||
+ (selector > 0x10 && selector < 0x20))
+ return -EINVAL;
+
+ if (selector < 0x11)
+ return 900000 + selector * 50000;
+ return 1750000 + (selector - 0x20) * 50000;
+}
+
+static int as3711_bound_check(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV)
+{
+ struct as3711_regulator *reg = rdev_get_drvdata(rdev);
+ struct as3711_regulator_info *info = reg->reg_info;
+
+ dev_dbg(&rdev->dev, "%s(), %d, %d, %d\n", __func__,
+ *min_uV, rdev->desc->min_uV, info->max_uV);
+
+ if (*max_uV < *min_uV ||
+ *min_uV > info->max_uV || rdev->desc->min_uV > *max_uV)
+ return -EINVAL;
+
+ if (rdev->desc->n_voltages == 1)
+ return 0;
+
+ if (*max_uV > info->max_uV)
+ *max_uV = info->max_uV;
+
+ if (*min_uV < rdev->desc->min_uV)
+ *min_uV = rdev->desc->min_uV;
+
+ return *min_uV;
+}
+
+static int as3711_sel_check(int min, int max, int bottom, int step)
+{
+ int sel, voltage;
+
+ /* Round up min, when dividing: keeps us within the range */
+ sel = DIV_ROUND_UP(min - bottom, step);
+ voltage = sel * step + bottom;
+ pr_debug("%s(): select %d..%d in %d+N*%d: %d\n", __func__,
+ min, max, bottom, step, sel);
+ if (voltage > max)
+ return -EINVAL;
+
+ return sel;
+}
+
+static int as3711_map_voltage_sd(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int ret;
+
+ ret = as3711_bound_check(rdev, &min_uV, &max_uV);
+ if (ret <= 0)
+ return ret;
+
+ if (min_uV <= 1400000)
+ return as3711_sel_check(min_uV, max_uV, 600000, 12500);
+
+ if (min_uV <= 2600000)
+ return as3711_sel_check(min_uV, max_uV, 1400000, 25000) + 0x40;
+
+ return as3711_sel_check(min_uV, max_uV, 2600000, 50000) + 0x70;
+}
+
+/*
+ * The regulator API supports 4 modes of operataion: FAST, NORMAL, IDLE and
+ * STANDBY. We map them in the following way to AS3711 SD1-4 DCDC modes:
+ * FAST: sdX_fast=1
+ * NORMAL: low_noise=1
+ * IDLE: low_noise=0
+ */
+
+static int as3711_set_mode_sd(struct regulator_dev *rdev, unsigned int mode)
+{
+ unsigned int fast_bit = rdev->desc->enable_mask,
+ low_noise_bit = fast_bit << 4;
+ u8 val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = fast_bit | low_noise_bit;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = low_noise_bit;
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, AS3711_SD_CONTROL_1,
+ low_noise_bit | fast_bit, val);
+}
+
+static unsigned int as3711_get_mode_sd(struct regulator_dev *rdev)
+{
+ unsigned int fast_bit = rdev->desc->enable_mask,
+ low_noise_bit = fast_bit << 4, mask = fast_bit | low_noise_bit;
+ unsigned int val;
+ int ret = regmap_read(rdev->regmap, AS3711_SD_CONTROL_1, &val);
+
+ if (ret < 0)
+ return ret;
+
+ if ((val & mask) == mask)
+ return REGULATOR_MODE_FAST;
+
+ if ((val & mask) == low_noise_bit)
+ return REGULATOR_MODE_NORMAL;
+
+ if (!(val & mask))
+ return REGULATOR_MODE_IDLE;
+
+ return -EINVAL;
+}
+
+static int as3711_map_voltage_aldo(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int ret;
+
+ ret = as3711_bound_check(rdev, &min_uV, &max_uV);
+ if (ret <= 0)
+ return ret;
+
+ if (min_uV <= 1800000)
+ return as3711_sel_check(min_uV, max_uV, 1200000, 50000);
+
+ return as3711_sel_check(min_uV, max_uV, 1800000, 100000) + 0x10;
+}
+
+static int as3711_map_voltage_dldo(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int ret;
+
+ ret = as3711_bound_check(rdev, &min_uV, &max_uV);
+ if (ret <= 0)
+ return ret;
+
+ if (min_uV <= 1700000)
+ return as3711_sel_check(min_uV, max_uV, 900000, 50000);
+
+ return as3711_sel_check(min_uV, max_uV, 1750000, 50000) + 0x20;
+}
+
+static struct regulator_ops as3711_sd_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = as3711_list_voltage_sd,
+ .map_voltage = as3711_map_voltage_sd,
+ .get_mode = as3711_get_mode_sd,
+ .set_mode = as3711_set_mode_sd,
+};
+
+static struct regulator_ops as3711_aldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = as3711_list_voltage_aldo,
+ .map_voltage = as3711_map_voltage_aldo,
+};
+
+static struct regulator_ops as3711_dldo_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = as3711_list_voltage_dldo,
+ .map_voltage = as3711_map_voltage_dldo,
+};
+
+#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _vshift, _min_uV, _max_uV, _sfx) \
+ [AS3711_REGULATOR_ ## _id] = { \
+ .desc = { \
+ .name = "as3711-regulator-" # _id, \
+ .id = AS3711_REGULATOR_ ## _id, \
+ .n_voltages = (_vmask + 1), \
+ .ops = &as3711_ ## _sfx ## _ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = AS3711_ ## _id ## _VOLTAGE, \
+ .vsel_mask = _vmask << _vshift, \
+ .enable_reg = AS3711_ ## _en_reg, \
+ .enable_mask = BIT(_en_bit), \
+ .min_uV = _min_uV, \
+ }, \
+ .max_uV = _max_uV, \
+}
+
+static struct as3711_regulator_info as3711_reg_info[] = {
+ AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, 0, 612500, 3350000, sd),
+ AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, 0, 612500, 3350000, sd),
+ AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, 0, 612500, 3350000, sd),
+ AS3711_REG(SD_4, SD_CONTROL, 3, 0x7f, 0, 612500, 3350000, sd),
+ AS3711_REG(LDO_1, LDO_1_VOLTAGE, 7, 0x1f, 0, 1200000, 3300000, aldo),
+ AS3711_REG(LDO_2, LDO_2_VOLTAGE, 7, 0x1f, 0, 1200000, 3300000, aldo),
+ AS3711_REG(LDO_3, LDO_3_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
+ AS3711_REG(LDO_4, LDO_4_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
+ AS3711_REG(LDO_5, LDO_5_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
+ AS3711_REG(LDO_6, LDO_6_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
+ AS3711_REG(LDO_7, LDO_7_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
+ AS3711_REG(LDO_8, LDO_8_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo),
+ /* StepUp output voltage depends on supplying regulator */
+};
+
+#define AS3711_REGULATOR_NUM ARRAY_SIZE(as3711_reg_info)
+
+static int as3711_regulator_probe(struct platform_device *pdev)
+{
+ struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_init_data *reg_data;
+ struct regulator_config config = {.dev = &pdev->dev,};
+ struct as3711_regulator *reg = NULL;
+ struct as3711_regulator *regs;
+ struct regulator_dev *rdev;
+ struct as3711_regulator_info *ri;
+ int ret;
+ int id;
+
+ if (!pdata)
+ dev_dbg(&pdev->dev, "No platform data...\n");
+
+ regs = devm_kzalloc(&pdev->dev, AS3711_REGULATOR_NUM *
+ sizeof(struct as3711_regulator), GFP_KERNEL);
+ if (!regs) {
+ dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+ return -ENOMEM;
+ }
+
+ for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) {
+ reg_data = pdata ? pdata->init_data[id] : NULL;
+
+ /* No need to register if there is no regulator data */
+ if (!ri->desc.name)
+ continue;
+
+ reg = &regs[id];
+ reg->reg_info = ri;
+
+ config.init_data = reg_data;
+ config.driver_data = reg;
+ config.regmap = as3711->regmap;
+
+ rdev = regulator_register(&ri->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ ri->desc.name);
+ ret = PTR_ERR(rdev);
+ goto eregreg;
+ }
+ reg->rdev = rdev;
+ }
+ platform_set_drvdata(pdev, regs);
+ return 0;
+
+eregreg:
+ while (--id >= 0)
+ regulator_unregister(regs[id].rdev);
+
+ return ret;
+}
+
+static int as3711_regulator_remove(struct platform_device *pdev)
+{
+ struct as3711_regulator *regs = platform_get_drvdata(pdev);
+ int id;
+
+ for (id = 0; id < AS3711_REGULATOR_NUM; ++id)
+ regulator_unregister(regs[id].rdev);
+ return 0;
+}
+
+static struct platform_driver as3711_regulator_driver = {
+ .driver = {
+ .name = "as3711-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = as3711_regulator_probe,
+ .remove = as3711_regulator_remove,
+};
+
+static int __init as3711_regulator_init(void)
+{
+ return platform_driver_register(&as3711_regulator_driver);
+}
+subsys_initcall(as3711_regulator_init);
+
+static void __exit as3711_regulator_exit(void)
+{
+ platform_driver_unregister(&as3711_regulator_driver);
+}
+module_exit(as3711_regulator_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("AS3711 regulator driver");
+MODULE_ALIAS("platform:as3711-regulator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e872c8be080..0f65b246cc0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -199,8 +199,11 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
*min_uV = regulator->min_uV;
}
- if (*min_uV > *max_uV)
+ if (*min_uV > *max_uV) {
+ dev_err(regulator->dev, "Restricting voltage, %u-%uuV\n",
+ regulator->min_uV, regulator->max_uV);
return -EINVAL;
+ }
return 0;
}
@@ -880,7 +883,9 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
/* final: [min_uV..max_uV] valid iff constraints valid */
if (max_uV < min_uV) {
- rdev_err(rdev, "unsupportable voltage constraints\n");
+ rdev_err(rdev,
+ "unsupportable voltage constraints %u-%uuV\n",
+ min_uV, max_uV);
return -EINVAL;
}
@@ -1867,6 +1872,28 @@ int regulator_is_enabled(struct regulator *regulator)
EXPORT_SYMBOL_GPL(regulator_is_enabled);
/**
+ * regulator_can_change_voltage - check if regulator can change voltage
+ * @regulator: regulator source
+ *
+ * Returns positive if the regulator driver backing the source/client
+ * can change its voltage, false otherwise. Usefull for detecting fixed
+ * or dummy regulators and disabling voltage change logic in the client
+ * driver.
+ */
+int regulator_can_change_voltage(struct regulator *regulator)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+
+ if (rdev->constraints &&
+ rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE &&
+ (rdev->desc->n_voltages - rdev->desc->linear_min_sel) > 1)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_can_change_voltage);
+
+/**
* regulator_count_voltages - count regulator_list_voltage() selectors
* @regulator: regulator source
*
@@ -1897,6 +1924,10 @@ int regulator_list_voltage_linear(struct regulator_dev *rdev,
{
if (selector >= rdev->desc->n_voltages)
return -EINVAL;
+ if (selector < rdev->desc->linear_min_sel)
+ return 0;
+
+ selector -= rdev->desc->linear_min_sel;
return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
}
@@ -1985,6 +2016,11 @@ int regulator_is_supported_voltage(struct regulator *regulator,
return ret;
}
+ /* Any voltage within constrains range is fine? */
+ if (rdev->desc->continuous_voltage_range)
+ return min_uV >= rdev->constraints->min_uV &&
+ max_uV <= rdev->constraints->max_uV;
+
ret = regulator_count_voltages(regulator);
if (ret < 0)
return ret;
@@ -2120,6 +2156,8 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
if (ret < 0)
return ret;
+ ret += rdev->desc->linear_min_sel;
+
/* Map back into a voltage to verify we're still in bounds */
voltage = rdev->desc->ops->list_voltage(rdev, ret);
if (voltage < min_uV || voltage > max_uV)
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 36c5b92fe0a..2afa5730f32 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -460,7 +460,7 @@ static inline struct da903x_regulator_info *find_regulator_info(int id)
return NULL;
}
-static int __devinit da903x_regulator_probe(struct platform_device *pdev)
+static int da903x_regulator_probe(struct platform_device *pdev)
{
struct da903x_regulator_info *ri = NULL;
struct regulator_dev *rdev;
@@ -499,7 +499,7 @@ static int __devinit da903x_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit da903x_regulator_remove(struct platform_device *pdev)
+static int da903x_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
@@ -513,7 +513,7 @@ static struct platform_driver da903x_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = da903x_regulator_probe,
- .remove = __devexit_p(da903x_regulator_remove),
+ .remove = da903x_regulator_remove,
};
static int __init da903x_regulator_init(void)
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 27355b1199e..d0963090442 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -129,17 +129,17 @@ static int da9052_dcdc_set_current_limit(struct regulator_dev *rdev, int min_uA,
else if (offset == 0)
row = 1;
- if (min_uA > da9052_current_limits[row][DA9052_MAX_UA] ||
- max_uA < da9052_current_limits[row][DA9052_MIN_UA])
- return -EINVAL;
-
for (i = DA9052_CURRENT_RANGE - 1; i >= 0; i--) {
- if (da9052_current_limits[row][i] <= max_uA) {
+ if ((min_uA <= da9052_current_limits[row][i]) &&
+ (da9052_current_limits[row][i] <= max_uA)) {
reg_val = i;
break;
}
}
+ if (i < 0)
+ return -EINVAL;
+
/* Determine the even or odd position of the buck current limit
* register field
*/
@@ -365,7 +365,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
return NULL;
}
-static int __devinit da9052_regulator_probe(struct platform_device *pdev)
+static int da9052_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
struct da9052_regulator *regulator;
@@ -430,7 +430,7 @@ static int __devinit da9052_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit da9052_regulator_remove(struct platform_device *pdev)
+static int da9052_regulator_remove(struct platform_device *pdev)
{
struct da9052_regulator *regulator = platform_get_drvdata(pdev);
@@ -440,7 +440,7 @@ static int __devexit da9052_regulator_remove(struct platform_device *pdev)
static struct platform_driver da9052_regulator_driver = {
.probe = da9052_regulator_probe,
- .remove = __devexit_p(da9052_regulator_remove),
+ .remove = da9052_regulator_remove,
.driver = {
.name = "da9052-regulator",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
new file mode 100644
index 00000000000..a4b9cb8c431
--- /dev/null
+++ b/drivers/regulator/da9055-regulator.c
@@ -0,0 +1,641 @@
+/*
+* Regulator driver for DA9055 PMIC
+*
+* Copyright(c) 2012 Dialog Semiconductor Ltd.
+*
+* Author: David Dajun Chen <dchen@diasemi.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+#include <linux/mfd/da9055/pdata.h>
+
+#define DA9055_MIN_UA 0
+#define DA9055_MAX_UA 3
+
+#define DA9055_LDO_MODE_SYNC 0
+#define DA9055_LDO_MODE_SLEEP 1
+
+#define DA9055_BUCK_MODE_SLEEP 1
+#define DA9055_BUCK_MODE_SYNC 2
+#define DA9055_BUCK_MODE_AUTO 3
+
+/* DA9055 REGULATOR IDs */
+#define DA9055_ID_BUCK1 0
+#define DA9055_ID_BUCK2 1
+#define DA9055_ID_LDO1 2
+#define DA9055_ID_LDO2 3
+#define DA9055_ID_LDO3 4
+#define DA9055_ID_LDO4 5
+#define DA9055_ID_LDO5 6
+#define DA9055_ID_LDO6 7
+
+/* DA9055 BUCK current limit */
+static const int da9055_current_limits[] = { 500000, 600000, 700000, 800000 };
+
+struct da9055_conf_reg {
+ int reg;
+ int sel_mask;
+ int en_mask;
+};
+
+struct da9055_volt_reg {
+ int reg_a;
+ int reg_b;
+ int sl_shift;
+ int v_mask;
+ int v_shift;
+};
+
+struct da9055_mode_reg {
+ int reg;
+ int mask;
+ int shift;
+};
+
+struct da9055_regulator_info {
+ struct regulator_desc reg_desc;
+ struct da9055_conf_reg conf;
+ struct da9055_volt_reg volt;
+ struct da9055_mode_reg mode;
+};
+
+struct da9055_regulator {
+ struct da9055 *da9055;
+ struct da9055_regulator_info *info;
+ struct regulator_dev *rdev;
+ enum gpio_select reg_rselect;
+};
+
+static unsigned int da9055_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ int ret, mode = 0;
+
+ ret = da9055_reg_read(regulator->da9055, info->mode.reg);
+ if (ret < 0)
+ return ret;
+
+ switch ((ret & info->mode.mask) >> info->mode.shift) {
+ case DA9055_BUCK_MODE_SYNC:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case DA9055_BUCK_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case DA9055_BUCK_MODE_SLEEP:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ }
+
+ return mode;
+}
+
+static int da9055_buck_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ int val = 0;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = DA9055_BUCK_MODE_SYNC << info->mode.shift;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = DA9055_BUCK_MODE_AUTO << info->mode.shift;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = DA9055_BUCK_MODE_SLEEP << info->mode.shift;
+ break;
+ }
+
+ return da9055_reg_update(regulator->da9055, info->mode.reg,
+ info->mode.mask, val);
+}
+
+static unsigned int da9055_ldo_get_mode(struct regulator_dev *rdev)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9055_reg_read(regulator->da9055, info->volt.reg_b);
+ if (ret < 0)
+ return ret;
+
+ if (ret >> info->volt.sl_shift)
+ return REGULATOR_MODE_STANDBY;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ struct da9055_volt_reg volt = info->volt;
+ int val = 0;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ case REGULATOR_MODE_FAST:
+ val = DA9055_LDO_MODE_SYNC;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = DA9055_LDO_MODE_SLEEP;
+ break;
+ }
+
+ return da9055_reg_update(regulator->da9055, volt.reg_b,
+ 1 << volt.sl_shift,
+ val << volt.sl_shift);
+}
+
+static int da9055_buck_get_current_limit(struct regulator_dev *rdev)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9055_reg_read(regulator->da9055, DA9055_REG_BUCK_LIM);
+ if (ret < 0)
+ return ret;
+
+ ret &= info->mode.mask;
+ return da9055_current_limits[ret >> info->mode.shift];
+}
+
+static int da9055_buck_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ int i;
+
+ for (i = ARRAY_SIZE(da9055_current_limits) - 1; i >= 0; i--) {
+ if ((min_uA <= da9055_current_limits[i]) &&
+ (da9055_current_limits[i] <= max_uA))
+ return da9055_reg_update(regulator->da9055,
+ DA9055_REG_BUCK_LIM,
+ info->mode.mask,
+ i << info->mode.shift);
+ }
+
+ return -EINVAL;
+}
+
+static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ struct da9055_volt_reg volt = info->volt;
+ int ret, sel;
+
+ /*
+ * There are two voltage register set A & B for voltage ramping but
+ * either one of then can be active therefore we first determine
+ * the active register set.
+ */
+ ret = da9055_reg_read(regulator->da9055, info->conf.reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= info->conf.sel_mask;
+
+ /* Get the voltage for the active register set A/B */
+ if (ret == DA9055_REGUALTOR_SET_A)
+ ret = da9055_reg_read(regulator->da9055, volt.reg_a);
+ else
+ ret = da9055_reg_read(regulator->da9055, volt.reg_b);
+
+ if (ret < 0)
+ return ret;
+
+ sel = (ret & volt.v_mask);
+ return sel;
+}
+
+static int da9055_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ int ret;
+
+ /*
+ * Regulator register set A/B is not selected through GPIO therefore
+ * we use default register set A for voltage ramping.
+ */
+ if (regulator->reg_rselect == NO_GPIO) {
+ /* Select register set A */
+ ret = da9055_reg_update(regulator->da9055, info->conf.reg,
+ info->conf.sel_mask, DA9055_SEL_REG_A);
+ if (ret < 0)
+ return ret;
+
+ /* Set the voltage */
+ return da9055_reg_update(regulator->da9055, info->volt.reg_a,
+ info->volt.v_mask, selector);
+ }
+
+ /*
+ * Here regulator register set A/B is selected through GPIO.
+ * Therefore we first determine the selected register set A/B and
+ * then set the desired voltage for that register set A/B.
+ */
+ ret = da9055_reg_read(regulator->da9055, info->conf.reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= info->conf.sel_mask;
+
+ /* Set the voltage */
+ if (ret == DA9055_REGUALTOR_SET_A)
+ return da9055_reg_update(regulator->da9055, info->volt.reg_a,
+ info->volt.v_mask, selector);
+ else
+ return da9055_reg_update(regulator->da9055, info->volt.reg_b,
+ info->volt.v_mask, selector);
+}
+
+static int da9055_regulator_set_suspend_voltage(struct regulator_dev *rdev,
+ int uV)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+ int ret;
+
+ /* Select register set B for suspend voltage ramping. */
+ if (regulator->reg_rselect == NO_GPIO) {
+ ret = da9055_reg_update(regulator->da9055, info->conf.reg,
+ info->conf.sel_mask, DA9055_SEL_REG_B);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = regulator_map_voltage_linear(rdev, uV, uV);
+ if (ret < 0)
+ return ret;
+
+ return da9055_reg_update(regulator->da9055, info->volt.reg_b,
+ info->volt.v_mask, ret);
+}
+
+static int da9055_suspend_enable(struct regulator_dev *rdev)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+
+ /* Select register set B for voltage ramping. */
+ if (regulator->reg_rselect == NO_GPIO)
+ return da9055_reg_update(regulator->da9055, info->conf.reg,
+ info->conf.sel_mask, DA9055_SEL_REG_B);
+ else
+ return 0;
+}
+
+static int da9055_suspend_disable(struct regulator_dev *rdev)
+{
+ struct da9055_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9055_regulator_info *info = regulator->info;
+
+ /* Diselect register set B. */
+ if (regulator->reg_rselect == NO_GPIO)
+ return da9055_reg_update(regulator->da9055, info->conf.reg,
+ info->conf.sel_mask, DA9055_SEL_REG_A);
+ else
+ return 0;
+}
+
+static struct regulator_ops da9055_buck_ops = {
+ .get_mode = da9055_buck_get_mode,
+ .set_mode = da9055_buck_set_mode,
+
+ .get_current_limit = da9055_buck_get_current_limit,
+ .set_current_limit = da9055_buck_set_current_limit,
+
+ .get_voltage_sel = da9055_regulator_get_voltage_sel,
+ .set_voltage_sel = da9055_regulator_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+
+ .set_suspend_voltage = da9055_regulator_set_suspend_voltage,
+ .set_suspend_enable = da9055_suspend_enable,
+ .set_suspend_disable = da9055_suspend_disable,
+ .set_suspend_mode = da9055_buck_set_mode,
+};
+
+static struct regulator_ops da9055_ldo_ops = {
+ .get_mode = da9055_ldo_get_mode,
+ .set_mode = da9055_ldo_set_mode,
+
+ .get_voltage_sel = da9055_regulator_get_voltage_sel,
+ .set_voltage_sel = da9055_regulator_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+
+ .set_suspend_voltage = da9055_regulator_set_suspend_voltage,
+ .set_suspend_enable = da9055_suspend_enable,
+ .set_suspend_disable = da9055_suspend_disable,
+ .set_suspend_mode = da9055_ldo_set_mode,
+
+};
+
+#define DA9055_LDO(_id, step, min, max, vbits, voffset) \
+{\
+ .reg_desc = {\
+ .name = #_id,\
+ .ops = &da9055_ldo_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = DA9055_ID_##_id,\
+ .n_voltages = (max - min) / step + 1 + (voffset), \
+ .enable_reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \
+ .enable_mask = 1, \
+ .min_uV = (min) * 1000,\
+ .uV_step = (step) * 1000,\
+ .linear_min_sel = (voffset),\
+ .owner = THIS_MODULE,\
+ },\
+ .conf = {\
+ .reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \
+ .sel_mask = (1 << 4),\
+ .en_mask = 1,\
+ },\
+ .volt = {\
+ .reg_a = DA9055_REG_VBCORE_A + DA9055_ID_##_id, \
+ .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
+ .sl_shift = 7,\
+ .v_mask = (1 << (vbits)) - 1,\
+ .v_shift = (vbits),\
+ },\
+}
+
+#define DA9055_BUCK(_id, step, min, max, vbits, voffset, mbits, sbits) \
+{\
+ .reg_desc = {\
+ .name = #_id,\
+ .ops = &da9055_buck_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = DA9055_ID_##_id,\
+ .n_voltages = (max - min) / step + 1 + (voffset), \
+ .enable_reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \
+ .enable_mask = 1,\
+ .min_uV = (min) * 1000,\
+ .uV_step = (step) * 1000,\
+ .linear_min_sel = (voffset),\
+ .owner = THIS_MODULE,\
+ },\
+ .conf = {\
+ .reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \
+ .sel_mask = (1 << 4),\
+ .en_mask = 1,\
+ },\
+ .volt = {\
+ .reg_a = DA9055_REG_VBCORE_A + DA9055_ID_##_id, \
+ .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
+ .sl_shift = 7,\
+ .v_mask = (1 << (vbits)) - 1,\
+ .v_shift = (vbits),\
+ },\
+ .mode = {\
+ .reg = DA9055_REG_BCORE_MODE,\
+ .mask = (mbits),\
+ .shift = (sbits),\
+ },\
+}
+
+static struct da9055_regulator_info da9055_regulator_info[] = {
+ DA9055_BUCK(BUCK1, 25, 725, 2075, 6, 9, 0xc, 2),
+ DA9055_BUCK(BUCK2, 25, 925, 2500, 6, 0, 3, 0),
+ DA9055_LDO(LDO1, 50, 900, 3300, 6, 2),
+ DA9055_LDO(LDO2, 50, 900, 3300, 6, 3),
+ DA9055_LDO(LDO3, 50, 900, 3300, 6, 2),
+ DA9055_LDO(LDO4, 50, 900, 3300, 6, 2),
+ DA9055_LDO(LDO5, 50, 900, 2750, 6, 2),
+ DA9055_LDO(LDO6, 20, 900, 3300, 7, 0),
+};
+
+/*
+ * Configures regulator to be controlled either through GPIO 1 or 2.
+ * GPIO can control regulator state and/or select the regulator register
+ * set A/B for voltage ramping.
+ */
+static __devinit int da9055_gpio_init(struct da9055_regulator *regulator,
+ struct regulator_config *config,
+ struct da9055_pdata *pdata, int id)
+{
+ struct da9055_regulator_info *info = regulator->info;
+ int ret = 0;
+
+ if (pdata->gpio_ren && pdata->gpio_ren[id]) {
+ char name[18];
+ int gpio_mux = pdata->gpio_ren[id];
+
+ config->ena_gpio = pdata->ena_gpio[id];
+ config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+ config->ena_gpio_invert = 1;
+
+ /*
+ * GPI pin is muxed with regulator to control the
+ * regulator state.
+ */
+ sprintf(name, "DA9055 GPI %d", gpio_mux);
+ ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN,
+ name);
+ if (ret < 0)
+ goto err;
+
+ /*
+ * Let the regulator know that its state is controlled
+ * through GPI.
+ */
+ ret = da9055_reg_update(regulator->da9055, info->conf.reg,
+ DA9055_E_GPI_MASK,
+ pdata->reg_ren[id]
+ << DA9055_E_GPI_SHIFT);
+ if (ret < 0)
+ goto err;
+ }
+
+ if (pdata->gpio_rsel && pdata->gpio_rsel[id]) {
+ char name[18];
+ int gpio_mux = pdata->gpio_rsel[id];
+
+ regulator->reg_rselect = pdata->reg_rsel[id];
+
+ /*
+ * GPI pin is muxed with regulator to select the
+ * regulator register set A/B for voltage ramping.
+ */
+ sprintf(name, "DA9055 GPI %d", gpio_mux);
+ ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN,
+ name);
+ if (ret < 0)
+ goto err;
+
+ /*
+ * Let the regulator know that its register set A/B
+ * will be selected through GPI for voltage ramping.
+ */
+ ret = da9055_reg_update(regulator->da9055, info->conf.reg,
+ DA9055_V_GPI_MASK,
+ pdata->reg_rsel[id]
+ << DA9055_V_GPI_SHIFT);
+ }
+
+err:
+ return ret;
+}
+
+static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data)
+{
+ struct da9055_regulator *regulator = data;
+
+ regulator_notifier_call_chain(regulator->rdev,
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static inline struct da9055_regulator_info *find_regulator_info(int id)
+{
+ struct da9055_regulator_info *info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da9055_regulator_info); i++) {
+ info = &da9055_regulator_info[i];
+ if (info->reg_desc.id == id)
+ return info;
+ }
+
+ return NULL;
+}
+
+static int __devinit da9055_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config config = { };
+ struct da9055_regulator *regulator;
+ struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct da9055_pdata *pdata = da9055->dev->platform_data;
+ int ret, irq;
+
+ if (pdata == NULL || pdata->regulators[pdev->id] == NULL)
+ return -ENODEV;
+
+ regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9055_regulator),
+ GFP_KERNEL);
+ if (!regulator)
+ return -ENOMEM;
+
+ regulator->info = find_regulator_info(pdev->id);
+ if (regulator->info == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+
+ regulator->da9055 = da9055;
+ config.dev = &pdev->dev;
+ config.driver_data = regulator;
+ config.regmap = da9055->regmap;
+
+ if (pdata && pdata->regulators)
+ config.init_data = pdata->regulators[pdev->id];
+
+ ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
+ if (ret < 0)
+ return ret;
+
+ regulator->rdev = regulator_register(&regulator->info->reg_desc,
+ &config);
+ if (IS_ERR(regulator->rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ regulator->info->reg_desc.name);
+ ret = PTR_ERR(regulator->rdev);
+ return ret;
+ }
+
+ /* Only LDO 5 and 6 has got the over current interrupt */
+ if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
+ irq = platform_get_irq_byname(pdev, "REGULATOR");
+ irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ da9055_ldo5_6_oc_irq,
+ IRQF_TRIGGER_HIGH |
+ IRQF_ONESHOT |
+ IRQF_PROBE_SHARED,
+ pdev->name, regulator);
+ if (ret != 0) {
+ if (ret != -EBUSY) {
+ dev_err(&pdev->dev,
+ "Failed to request Regulator IRQ %d: %d\n",
+ irq, ret);
+ goto err_regulator;
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, regulator);
+
+ return 0;
+
+err_regulator:
+ regulator_unregister(regulator->rdev);
+ return ret;
+}
+
+static int __devexit da9055_regulator_remove(struct platform_device *pdev)
+{
+ struct da9055_regulator *regulator = platform_get_drvdata(pdev);
+
+ regulator_unregister(regulator->rdev);
+
+ return 0;
+}
+
+static struct platform_driver da9055_regulator_driver = {
+ .probe = da9055_regulator_probe,
+ .remove = __devexit_p(da9055_regulator_remove),
+ .driver = {
+ .name = "da9055-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9055_regulator_init(void)
+{
+ return platform_driver_register(&da9055_regulator_driver);
+}
+subsys_initcall(da9055_regulator_init);
+
+static void __exit da9055_regulator_exit(void)
+{
+ platform_driver_unregister(&da9055_regulator_driver);
+}
+module_exit(da9055_regulator_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Power Regulator driver for Dialog DA9055 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-regulator");
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 359f8d18fc3..219d162b651 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -412,7 +412,7 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
},
};
-static __devinit int db8500_regulator_register(struct platform_device *pdev,
+static int db8500_regulator_register(struct platform_device *pdev,
struct regulator_init_data *init_data,
int id,
struct device_node *np)
@@ -474,7 +474,7 @@ static struct of_regulator_match db8500_regulator_matches[] = {
{ .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
};
-static __devinit int
+static int
db8500_regulator_of_probe(struct platform_device *pdev,
struct device_node *np)
{
@@ -491,7 +491,7 @@ db8500_regulator_of_probe(struct platform_device *pdev,
return 0;
}
-static int __devinit db8500_regulator_probe(struct platform_device *pdev)
+static int db8500_regulator_probe(struct platform_device *pdev)
{
struct regulator_init_data *db8500_init_data =
dev_get_platdata(&pdev->dev);
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index f2e5ecdc586..261f3d2299b 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -173,7 +173,7 @@ int __attribute__((weak)) dbx500_regulator_testcase(
return 0;
}
-int __devinit
+int
ux500_regulator_debug_init(struct platform_device *pdev,
struct dbx500_regulator_info *regulator_info,
int num_regulators)
@@ -230,7 +230,7 @@ exit_no_debugfs:
return -ENOMEM;
}
-int __devexit ux500_regulator_debug_exit(void)
+int ux500_regulator_debug_exit(void)
{
debugfs_remove_recursive(rdebug.dir);
kfree(rdebug.state_after_suspend);
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 03a1d7c11ef..df9f42524ab 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -37,7 +37,7 @@ static struct regulator_desc dummy_desc = {
.ops = &dummy_ops,
};
-static int __devinit dummy_regulator_probe(struct platform_device *pdev)
+static int dummy_regulator_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
int ret;
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 339f4d732e9..9165b0c40ed 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -230,7 +230,7 @@ static struct regmap_config fan53555_regmap_config = {
.val_bits = 8,
};
-static int __devinit fan53555_regulator_probe(struct i2c_client *client,
+static int fan53555_regulator_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct fan53555_device_info *di;
@@ -293,7 +293,7 @@ static int __devinit fan53555_regulator_probe(struct i2c_client *client,
}
-static int __devexit fan53555_regulator_remove(struct i2c_client *client)
+static int fan53555_regulator_remove(struct i2c_client *client)
{
struct fan53555_device_info *di = i2c_get_clientdata(client);
@@ -311,7 +311,7 @@ static struct i2c_driver fan53555_regulator_driver = {
.name = "fan53555-regulator",
},
.probe = fan53555_regulator_probe,
- .remove = __devexit_p(fan53555_regulator_remove),
+ .remove = fan53555_regulator_remove,
.id_table = fan53555_id,
};
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 185468c4d38..48d5b7608b0 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -134,7 +134,7 @@ static struct regulator_ops fixed_voltage_ops = {
.list_voltage = fixed_voltage_list_voltage,
};
-static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
+static int reg_fixed_voltage_probe(struct platform_device *pdev)
{
struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
@@ -234,7 +234,7 @@ err:
return ret;
}
-static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
+static int reg_fixed_voltage_remove(struct platform_device *pdev)
{
struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(of, fixed_of_match);
static struct platform_driver regulator_fixed_voltage_driver = {
.probe = reg_fixed_voltage_probe,
- .remove = __devexit_p(reg_fixed_voltage_remove),
+ .remove = reg_fixed_voltage_remove,
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 8b5944f2d7d..8ae288fc150 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -28,9 +28,12 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regulator/gpio-regulator.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
struct gpio_regulator_data {
struct regulator_desc desc;
@@ -79,7 +82,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
for (ptr = 0; ptr < data->nr_gpios; ptr++) {
state = (target & (1 << ptr)) >> ptr;
- gpio_set_value(data->gpios[ptr].gpio, state);
+ gpio_set_value_cansleep(data->gpios[ptr].gpio, state);
}
data->state = target;
@@ -116,7 +119,7 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
for (ptr = 0; ptr < data->nr_gpios; ptr++) {
state = (target & (1 << ptr)) >> ptr;
- gpio_set_value(data->gpios[ptr].gpio, state);
+ gpio_set_value_cansleep(data->gpios[ptr].gpio, state);
}
data->state = target;
@@ -129,18 +132,108 @@ static struct regulator_ops gpio_regulator_voltage_ops = {
.list_voltage = gpio_regulator_list_voltage,
};
+struct gpio_regulator_config *
+of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
+{
+ struct gpio_regulator_config *config;
+ struct property *prop;
+ const char *regtype;
+ int proplen, gpio, i;
+
+ config = devm_kzalloc(dev,
+ sizeof(struct gpio_regulator_config),
+ GFP_KERNEL);
+ if (!config)
+ return ERR_PTR(-ENOMEM);
+
+ config->init_data = of_get_regulator_init_data(dev, np);
+ if (!config->init_data)
+ return ERR_PTR(-EINVAL);
+
+ config->supply_name = config->init_data->constraints.name;
+
+ if (of_property_read_bool(np, "enable-active-high"))
+ config->enable_high = true;
+
+ if (of_property_read_bool(np, "enable-at-boot"))
+ config->enabled_at_boot = true;
+
+ of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
+
+ config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
+
+ /* Fetch GPIOs. */
+ for (i = 0; ; i++)
+ if (of_get_named_gpio(np, "gpios", i) < 0)
+ break;
+ config->nr_gpios = i;
+
+ config->gpios = devm_kzalloc(dev,
+ sizeof(struct gpio) * config->nr_gpios,
+ GFP_KERNEL);
+ if (!config->gpios)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < config->nr_gpios; i++) {
+ gpio = of_get_named_gpio(np, "gpios", i);
+ if (gpio < 0)
+ break;
+ config->gpios[i].gpio = gpio;
+ }
+
+ /* Fetch states. */
+ prop = of_find_property(np, "states", NULL);
+ if (!prop) {
+ dev_err(dev, "No 'states' property found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ proplen = prop->length / sizeof(int);
+
+ config->states = devm_kzalloc(dev,
+ sizeof(struct gpio_regulator_state)
+ * (proplen / 2),
+ GFP_KERNEL);
+ if (!config->states)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < proplen / 2; i++) {
+ config->states[i].value =
+ be32_to_cpup((int *)prop->value + (i * 2));
+ config->states[i].gpios =
+ be32_to_cpup((int *)prop->value + (i * 2 + 1));
+ }
+ config->nr_states = i;
+
+ of_property_read_string(np, "regulator-type", &regtype);
+
+ if (!strncmp("voltage", regtype, 7))
+ config->type = REGULATOR_VOLTAGE;
+ else if (!strncmp("current", regtype, 7))
+ config->type = REGULATOR_CURRENT;
+
+ return config;
+}
+
static struct regulator_ops gpio_regulator_current_ops = {
.get_current_limit = gpio_regulator_get_value,
.set_current_limit = gpio_regulator_set_current_limit,
};
-static int __devinit gpio_regulator_probe(struct platform_device *pdev)
+static int gpio_regulator_probe(struct platform_device *pdev)
{
struct gpio_regulator_config *config = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
struct gpio_regulator_data *drvdata;
struct regulator_config cfg = { };
int ptr, ret, state;
+ if (np) {
+ config = of_get_gpio_regulator_config(&pdev->dev, np);
+ if (IS_ERR(config))
+ return PTR_ERR(config);
+ }
+
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
GFP_KERNEL);
if (drvdata == NULL) {
@@ -215,6 +308,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
cfg.driver_data = drvdata;
+ cfg.of_node = np;
if (config->enable_gpio >= 0)
cfg.ena_gpio = config->enable_gpio;
@@ -254,7 +348,7 @@ err:
return ret;
}
-static int __devexit gpio_regulator_remove(struct platform_device *pdev)
+static int gpio_regulator_remove(struct platform_device *pdev)
{
struct gpio_regulator_data *drvdata = platform_get_drvdata(pdev);
@@ -270,12 +364,20 @@ static int __devexit gpio_regulator_remove(struct platform_device *pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id regulator_gpio_of_match[] __devinitconst = {
+ { .compatible = "regulator-gpio", },
+ {},
+};
+#endif
+
static struct platform_driver gpio_regulator_driver = {
.probe = gpio_regulator_probe,
- .remove = __devexit_p(gpio_regulator_remove),
+ .remove = gpio_regulator_remove,
.driver = {
.name = "gpio-regulator",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(regulator_gpio_of_match),
},
};
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index d8ecf49a577..d1e5bee2a26 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -106,7 +106,7 @@ static const struct regulator_desc isl_rd[] = {
},
};
-static int __devinit isl6271a_probe(struct i2c_client *i2c,
+static int isl6271a_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct regulator_config config = { };
@@ -151,7 +151,7 @@ error:
return err;
}
-static int __devexit isl6271a_remove(struct i2c_client *i2c)
+static int isl6271a_remove(struct i2c_client *i2c)
{
struct isl_pmic *pmic = i2c_get_clientdata(i2c);
int i;
@@ -174,7 +174,7 @@ static struct i2c_driver isl6271a_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = isl6271a_probe,
- .remove = __devexit_p(isl6271a_remove),
+ .remove = isl6271a_remove,
.id_table = isl6271a_id,
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 7c6e3b8ff48..5f68ff11a29 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -386,7 +386,7 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
return ret;
}
-static int __devinit setup_regulators(struct lp3971 *lp3971,
+static int setup_regulators(struct lp3971 *lp3971,
struct lp3971_platform_data *pdata)
{
int i, err;
@@ -429,7 +429,7 @@ err_nomem:
return err;
}
-static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
+static int lp3971_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lp3971 *lp3971;
@@ -472,7 +472,7 @@ err_detect:
return ret;
}
-static int __devexit lp3971_i2c_remove(struct i2c_client *i2c)
+static int lp3971_i2c_remove(struct i2c_client *i2c)
{
struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
int i;
@@ -498,7 +498,7 @@ static struct i2c_driver lp3971_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lp3971_i2c_probe,
- .remove = __devexit_p(lp3971_i2c_remove),
+ .remove = lp3971_i2c_remove,
.id_table = lp3971_i2c_id,
};
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 3cdc755d9b2..69c42c318b8 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -481,7 +481,7 @@ static const struct regulator_desc regulators[] = {
},
};
-static int __devinit setup_regulators(struct lp3972 *lp3972,
+static int setup_regulators(struct lp3972 *lp3972,
struct lp3972_platform_data *pdata)
{
int i, err;
@@ -523,7 +523,7 @@ err_nomem:
return err;
}
-static int __devinit lp3972_i2c_probe(struct i2c_client *i2c,
+static int lp3972_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lp3972 *lp3972;
@@ -569,7 +569,7 @@ err_detect:
return ret;
}
-static int __devexit lp3972_i2c_remove(struct i2c_client *i2c)
+static int lp3972_i2c_remove(struct i2c_client *i2c)
{
struct lp3972 *lp3972 = i2c_get_clientdata(i2c);
int i;
@@ -594,7 +594,7 @@ static struct i2c_driver lp3972_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lp3972_i2c_probe,
- .remove = __devexit_p(lp3972_i2c_remove),
+ .remove = lp3972_i2c_remove,
.id_table = lp3972_i2c_id,
};
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 708f4b6a17d..9289ead715c 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -893,7 +893,7 @@ err_dev:
return ret;
}
-static int __devexit lp872x_remove(struct i2c_client *cl)
+static int lp872x_remove(struct i2c_client *cl)
{
struct lp872x *lp = i2c_get_clientdata(cl);
@@ -914,7 +914,7 @@ static struct i2c_driver lp872x_driver = {
.owner = THIS_MODULE,
},
.probe = lp872x_probe,
- .remove = __devexit_p(lp872x_remove),
+ .remove = lp872x_remove,
.id_table = lp872x_ids,
};
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index ba3e0aa402d..aef3f2b0c5e 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -429,18 +429,6 @@ static struct regulator_desc lp8788_buck_desc[] = {
},
};
-static int _gpio_request(struct lp8788_buck *buck, int gpio, char *name)
-{
- struct device *dev = buck->lp->dev;
-
- if (!gpio_is_valid(gpio)) {
- dev_err(dev, "invalid gpio: %d\n", gpio);
- return -EINVAL;
- }
-
- return devm_gpio_request_one(dev, gpio, DVS_LOW, name);
-}
-
static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
enum lp8788_buck_id id)
{
@@ -452,7 +440,8 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
switch (id) {
case BUCK1:
gpio = pdata->buck1_dvs->gpio;
- ret = _gpio_request(buck, gpio, b1_name);
+ ret = devm_gpio_request_one(buck->lp->dev, gpio, DVS_LOW,
+ b1_name);
if (ret)
return ret;
@@ -461,7 +450,8 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
case BUCK2:
for (i = 0 ; i < LP8788_NUM_BUCK2_DVS ; i++) {
gpio = pdata->buck2_dvs->gpio[i];
- ret = _gpio_request(buck, gpio, b2_name[i]);
+ ret = devm_gpio_request_one(buck->lp->dev, gpio,
+ DVS_LOW, b2_name[i]);
if (ret)
return ret;
}
@@ -504,7 +494,7 @@ set_default_dvs_mode:
default_dvs_mode[id]);
}
-static __devinit int lp8788_buck_probe(struct platform_device *pdev)
+static int lp8788_buck_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
int id = pdev->id;
@@ -542,7 +532,7 @@ static __devinit int lp8788_buck_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit lp8788_buck_remove(struct platform_device *pdev)
+static int lp8788_buck_remove(struct platform_device *pdev)
{
struct lp8788_buck *buck = platform_get_drvdata(pdev);
@@ -554,7 +544,7 @@ static int __devexit lp8788_buck_remove(struct platform_device *pdev)
static struct platform_driver lp8788_buck_driver = {
.probe = lp8788_buck_probe,
- .remove = __devexit_p(lp8788_buck_remove),
+ .remove = lp8788_buck_remove,
.driver = {
.name = LP8788_DEV_BUCK,
.owner = THIS_MODULE,
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 6796eeb47dc..3792741708c 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -126,7 +126,7 @@ struct lp8788_ldo {
};
/* DLDO 1, 2, 3, 9 voltage table */
-const int lp8788_dldo1239_vtbl[] = {
+static const int lp8788_dldo1239_vtbl[] = {
1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
2600000, 2700000, 2800000, 2900000, 3000000, 2850000, 2850000, 2850000,
2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000,
@@ -662,14 +662,6 @@ static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
[EN_DLDO7] = LP8788_EN_SEL_DLDO7_M,
[EN_DLDO911] = LP8788_EN_SEL_DLDO911_M,
};
- u8 val[] = {
- [EN_ALDO1] = 0 << 5,
- [EN_ALDO234] = 0 << 4,
- [EN_ALDO5] = 0 << 3,
- [EN_ALDO7] = 0 << 2,
- [EN_DLDO7] = 0 << 1,
- [EN_DLDO911] = 0 << 0,
- };
switch (id) {
case DLDO7:
@@ -708,11 +700,10 @@ static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
return ret;
set_default_ldo_enable_mode:
- return lp8788_update_bits(lp, LP8788_EN_SEL, en_mask[enable_id],
- val[enable_id]);
+ return lp8788_update_bits(lp, LP8788_EN_SEL, en_mask[enable_id], 0);
}
-static __devinit int lp8788_dldo_probe(struct platform_device *pdev)
+static int lp8788_dldo_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
int id = pdev->id;
@@ -749,7 +740,7 @@ static __devinit int lp8788_dldo_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit lp8788_dldo_remove(struct platform_device *pdev)
+static int lp8788_dldo_remove(struct platform_device *pdev)
{
struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
@@ -761,14 +752,14 @@ static int __devexit lp8788_dldo_remove(struct platform_device *pdev)
static struct platform_driver lp8788_dldo_driver = {
.probe = lp8788_dldo_probe,
- .remove = __devexit_p(lp8788_dldo_remove),
+ .remove = lp8788_dldo_remove,
.driver = {
.name = LP8788_DEV_DLDO,
.owner = THIS_MODULE,
},
};
-static __devinit int lp8788_aldo_probe(struct platform_device *pdev)
+static int lp8788_aldo_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
int id = pdev->id;
@@ -805,7 +796,7 @@ static __devinit int lp8788_aldo_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit lp8788_aldo_remove(struct platform_device *pdev)
+static int lp8788_aldo_remove(struct platform_device *pdev)
{
struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
@@ -817,7 +808,7 @@ static int __devexit lp8788_aldo_remove(struct platform_device *pdev)
static struct platform_driver lp8788_aldo_driver = {
.probe = lp8788_aldo_probe,
- .remove = __devexit_p(lp8788_aldo_remove),
+ .remove = lp8788_aldo_remove,
.driver = {
.name = LP8788_DEV_ALDO,
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index f67af3c1b96..8c5a54f541b 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -44,6 +44,9 @@ struct max1586_data {
unsigned int min_uV;
unsigned int max_uV;
+ unsigned int v3_curr_sel;
+ unsigned int v6_curr_sel;
+
struct regulator_dev *rdev[0];
};
@@ -63,31 +66,60 @@ static int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
* R24 and R25=100kOhm as described in the data sheet.
* The gain is approximately: 1 + R24/R25 + R24/185.5kOhm
*/
+static int max1586_v3_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct max1586_data *max1586 = rdev_get_drvdata(rdev);
+
+ return max1586->v3_curr_sel;
+}
+
static int max1586_v3_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
struct i2c_client *client = max1586->client;
+ int ret;
u8 v3_prog;
dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
regulator_list_voltage_linear(rdev, selector) / 1000);
v3_prog = I2C_V3_SELECT | (u8) selector;
- return i2c_smbus_write_byte(client, v3_prog);
+ ret = i2c_smbus_write_byte(client, v3_prog);
+ if (ret)
+ return ret;
+
+ max1586->v3_curr_sel = selector;
+
+ return 0;
+}
+
+static int max1586_v6_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct max1586_data *max1586 = rdev_get_drvdata(rdev);
+
+ return max1586->v6_curr_sel;
}
static int max1586_v6_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
- struct i2c_client *client = rdev_get_drvdata(rdev);
+ struct max1586_data *max1586 = rdev_get_drvdata(rdev);
+ struct i2c_client *client = max1586->client;
u8 v6_prog;
+ int ret;
dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
rdev->desc->volt_table[selector] / 1000);
v6_prog = I2C_V6_SELECT | (u8) selector;
- return i2c_smbus_write_byte(client, v6_prog);
+ ret = i2c_smbus_write_byte(client, v6_prog);
+ if (ret)
+ return ret;
+
+ max1586->v6_curr_sel = selector;
+
+ return 0;
}
/*
@@ -95,12 +127,14 @@ static int max1586_v6_set_voltage_sel(struct regulator_dev *rdev,
* the set up value.
*/
static struct regulator_ops max1586_v3_ops = {
+ .get_voltage_sel = max1586_v3_get_voltage_sel,
.set_voltage_sel = max1586_v3_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
};
static struct regulator_ops max1586_v6_ops = {
+ .get_voltage_sel = max1586_v6_get_voltage_sel,
.set_voltage_sel = max1586_v6_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
};
@@ -125,7 +159,7 @@ static struct regulator_desc max1586_reg[] = {
},
};
-static int __devinit max1586_pmic_probe(struct i2c_client *client,
+static int max1586_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
@@ -148,6 +182,10 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
max1586->min_uV = MAX1586_V3_MIN_UV / 1000 * pdata->v3_gain / 1000;
max1586->max_uV = MAX1586_V3_MAX_UV / 1000 * pdata->v3_gain / 1000;
+ /* Set curr_sel to default voltage on power-up */
+ max1586->v3_curr_sel = 24; /* 1.3V */
+ max1586->v6_curr_sel = 0;
+
rdev = max1586->rdev;
for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) {
id = pdata->subdevs[i].id;
@@ -188,7 +226,7 @@ err:
return ret;
}
-static int __devexit max1586_pmic_remove(struct i2c_client *client)
+static int max1586_pmic_remove(struct i2c_client *client)
{
struct max1586_data *max1586 = i2c_get_clientdata(client);
int i;
@@ -207,7 +245,7 @@ MODULE_DEVICE_TABLE(i2c, max1586_id);
static struct i2c_driver max1586_pmic_driver = {
.probe = max1586_pmic_probe,
- .remove = __devexit_p(max1586_pmic_remove),
+ .remove = max1586_pmic_remove,
.driver = {
.name = "max1586",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index 2a67d08658a..b85040caaea 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -67,8 +67,94 @@ enum max77686_ramp_rate {
struct max77686_data {
struct regulator_dev *rdev[MAX77686_REGULATORS];
+ unsigned int opmode[MAX77686_REGULATORS];
};
+/* Some BUCKS supports Normal[ON/OFF] mode during suspend */
+static int max77686_buck_set_suspend_disable(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ struct max77686_data *max77686 = rdev_get_drvdata(rdev);
+
+ if (rdev->desc->id == MAX77686_BUCK1)
+ val = 0x1;
+ else
+ val = 0x1 << MAX77686_OPMODE_BUCK234_SHIFT;
+
+ max77686->opmode[rdev->desc->id] = val;
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask,
+ val);
+}
+
+/* Some LDOs supports [LPM/Normal]ON mode during suspend state */
+static int max77686_set_suspend_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct max77686_data *max77686 = rdev_get_drvdata(rdev);
+ unsigned int val;
+
+ /* BUCK[5-9] doesn't support this feature */
+ if (rdev->desc->id >= MAX77686_BUCK5)
+ return 0;
+
+ switch (mode) {
+ case REGULATOR_MODE_IDLE: /* ON in LP Mode */
+ val = 0x2 << MAX77686_OPMODE_SHIFT;
+ break;
+ case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
+ val = 0x3 << MAX77686_OPMODE_SHIFT;
+ break;
+ default:
+ pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
+ rdev->desc->name, mode);
+ return -EINVAL;
+ }
+
+ max77686->opmode[rdev->desc->id] = val;
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask,
+ val);
+}
+
+/* Some LDOs supports LPM-ON/OFF/Normal-ON mode during suspend state */
+static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ unsigned int val;
+ struct max77686_data *max77686 = rdev_get_drvdata(rdev);
+
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY: /* switch off */
+ val = 0x1 << MAX77686_OPMODE_SHIFT;
+ break;
+ case REGULATOR_MODE_IDLE: /* ON in LP Mode */
+ val = 0x2 << MAX77686_OPMODE_SHIFT;
+ break;
+ case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
+ val = 0x3 << MAX77686_OPMODE_SHIFT;
+ break;
+ default:
+ pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
+ rdev->desc->name, mode);
+ return -EINVAL;
+ }
+
+ max77686->opmode[rdev->desc->id] = val;
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask,
+ val);
+}
+
+static int max77686_enable(struct regulator_dev *rdev)
+{
+ struct max77686_data *max77686 = rdev_get_drvdata(rdev);
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask,
+ max77686->opmode[rdev->desc->id]);
+}
+
static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
{
unsigned int ramp_value = RAMP_RATE_NO_CTRL;
@@ -98,23 +184,49 @@ static struct regulator_ops max77686_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
+ .enable = max77686_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_mode = max77686_set_suspend_mode,
+};
+
+static struct regulator_ops max77686_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = max77686_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_mode = max77686_ldo_set_suspend_mode,
+};
+
+static struct regulator_ops max77686_buck1_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = max77686_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_disable = max77686_buck_set_suspend_disable,
};
static struct regulator_ops max77686_buck_dvs_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
+ .enable = max77686_enable,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_ramp_delay = max77686_set_ramp_delay,
+ .set_suspend_disable = max77686_buck_set_suspend_disable,
};
#define regulator_desc_ldo(num) { \
@@ -133,9 +245,41 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.enable_mask = MAX77686_OPMODE_MASK \
<< MAX77686_OPMODE_SHIFT, \
}
+#define regulator_desc_lpm_ldo(num) { \
+ .name = "LDO"#num, \
+ .id = MAX77686_LDO##num, \
+ .ops = &max77686_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_LDO_MINUV, \
+ .uV_step = MAX77686_LDO_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_SHIFT, \
+}
#define regulator_desc_ldo_low(num) { \
.name = "LDO"#num, \
.id = MAX77686_LDO##num, \
+ .ops = &max77686_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_LDO_LOW_MINUV, \
+ .uV_step = MAX77686_LDO_LOW_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_SHIFT, \
+}
+#define regulator_desc_ldo1_low(num) { \
+ .name = "LDO"#num, \
+ .id = MAX77686_LDO##num, \
.ops = &max77686_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -167,7 +311,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
#define regulator_desc_buck1(num) { \
.name = "BUCK"#num, \
.id = MAX77686_BUCK##num, \
- .ops = &max77686_ops, \
+ .ops = &max77686_buck1_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.min_uV = MAX77686_BUCK_MINUV, \
@@ -197,7 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
}
static struct regulator_desc regulators[] = {
- regulator_desc_ldo_low(1),
+ regulator_desc_ldo1_low(1),
regulator_desc_ldo_low(2),
regulator_desc_ldo(3),
regulator_desc_ldo(4),
@@ -206,13 +350,13 @@ static struct regulator_desc regulators[] = {
regulator_desc_ldo_low(7),
regulator_desc_ldo_low(8),
regulator_desc_ldo(9),
- regulator_desc_ldo(10),
- regulator_desc_ldo(11),
- regulator_desc_ldo(12),
+ regulator_desc_lpm_ldo(10),
+ regulator_desc_lpm_ldo(11),
+ regulator_desc_lpm_ldo(12),
regulator_desc_ldo(13),
- regulator_desc_ldo(14),
+ regulator_desc_lpm_ldo(14),
regulator_desc_ldo_low(15),
- regulator_desc_ldo(16),
+ regulator_desc_lpm_ldo(16),
regulator_desc_ldo(17),
regulator_desc_ldo(18),
regulator_desc_ldo(19),
@@ -280,7 +424,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
}
#endif /* CONFIG_OF */
-static __devinit int max77686_pmic_probe(struct platform_device *pdev)
+static int max77686_pmic_probe(struct platform_device *pdev)
{
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev);
@@ -314,12 +458,14 @@ static __devinit int max77686_pmic_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.regmap = iodev->regmap;
+ config.driver_data = max77686;
platform_set_drvdata(pdev, max77686);
for (i = 0; i < MAX77686_REGULATORS; i++) {
config.init_data = pdata->regulators[i].initdata;
config.of_node = pdata->regulators[i].of_node;
+ max77686->opmode[i] = regulators[i].enable_mask;
max77686->rdev[i] = regulator_register(&regulators[i], &config);
if (IS_ERR(max77686->rdev[i])) {
ret = PTR_ERR(max77686->rdev[i]);
@@ -337,7 +483,7 @@ err:
return ret;
}
-static int __devexit max77686_pmic_remove(struct platform_device *pdev)
+static int max77686_pmic_remove(struct platform_device *pdev)
{
struct max77686_data *max77686 = platform_get_drvdata(pdev);
int i;
@@ -360,7 +506,7 @@ static struct platform_driver max77686_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max77686_pmic_probe,
- .remove = __devexit_p(max77686_pmic_remove),
+ .remove = max77686_pmic_remove,
.id_table = max77686_pmic_id,
};
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 9d540cd02da..3ca14380f22 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -176,7 +176,7 @@ static struct regmap_config max8649_regmap_config = {
.val_bits = 8,
};
-static int __devinit max8649_regulator_probe(struct i2c_client *client,
+static int max8649_regulator_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8649_platform_data *pdata = client->dev.platform_data;
@@ -271,7 +271,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
return 0;
}
-static int __devexit max8649_regulator_remove(struct i2c_client *client)
+static int max8649_regulator_remove(struct i2c_client *client)
{
struct max8649_regulator_info *info = i2c_get_clientdata(client);
@@ -291,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, max8649_id);
static struct i2c_driver max8649_driver = {
.probe = max8649_regulator_probe,
- .remove = __devexit_p(max8649_regulator_remove),
+ .remove = max8649_regulator_remove,
.driver = {
.name = "max8649",
},
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 8d531742f59..4d7c635c36c 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -305,7 +305,7 @@ static const struct regulator_desc max8660_reg[] = {
},
};
-static int __devinit max8660_probe(struct i2c_client *client,
+static int max8660_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
@@ -420,7 +420,7 @@ err_out:
return ret;
}
-static int __devexit max8660_remove(struct i2c_client *client)
+static int max8660_remove(struct i2c_client *client)
{
struct max8660 *max8660 = i2c_get_clientdata(client);
int i;
@@ -440,7 +440,7 @@ MODULE_DEVICE_TABLE(i2c, max8660_id);
static struct i2c_driver max8660_driver = {
.probe = max8660_probe,
- .remove = __devexit_p(max8660_remove),
+ .remove = max8660_remove,
.driver = {
.name = "max8660",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index af7607515ab..d1a77512d83 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -275,7 +275,7 @@ static inline struct device_node *match_of_node(int index)
}
#endif
-static __devinit int max8907_regulator_probe(struct platform_device *pdev)
+static int max8907_regulator_probe(struct platform_device *pdev)
{
struct max8907 *max8907 = dev_get_drvdata(pdev->dev.parent);
struct max8907_platform_data *pdata = dev_get_platdata(max8907->dev);
@@ -368,7 +368,7 @@ err_unregister_regulator:
return ret;
}
-static __devexit int max8907_regulator_remove(struct platform_device *pdev)
+static int max8907_regulator_remove(struct platform_device *pdev)
{
struct max8907_regulator *pmic = platform_get_drvdata(pdev);
int i;
@@ -385,7 +385,7 @@ static struct platform_driver max8907_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = max8907_regulator_probe,
- .remove = __devexit_p(max8907_regulator_remove),
+ .remove = max8907_regulator_remove,
};
static int __init max8907_regulator_init(void)
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 9bb0be37495..446a8544555 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -17,6 +17,8 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/max8925.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
#define SD1_DVM_VMIN 850000
#define SD1_DVM_VMAX 1000000
@@ -187,6 +189,34 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
.enable_reg = MAX8925_LDOCTL##_id, \
}
+#ifdef CONFIG_OF
+static struct of_regulator_match max8925_regulator_matches[] = {
+ { .name = "SDV1",},
+ { .name = "SDV2",},
+ { .name = "SDV3",},
+ { .name = "LDO1",},
+ { .name = "LDO2",},
+ { .name = "LDO3",},
+ { .name = "LDO4",},
+ { .name = "LDO5",},
+ { .name = "LDO6",},
+ { .name = "LDO7",},
+ { .name = "LDO8",},
+ { .name = "LDO9",},
+ { .name = "LDO10",},
+ { .name = "LDO11",},
+ { .name = "LDO12",},
+ { .name = "LDO13",},
+ { .name = "LDO14",},
+ { .name = "LDO15",},
+ { .name = "LDO16",},
+ { .name = "LDO17",},
+ { .name = "LDO18",},
+ { .name = "LDO19",},
+ { .name = "LDO20",},
+};
+#endif
+
static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_SDV(1, 637.5, 1425, 12.5),
MAX8925_SDV(2, 650, 2225, 25),
@@ -214,7 +244,37 @@ static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_LDO(20, 750, 3900, 50),
};
-static int __devinit max8925_regulator_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int max8925_regulator_dt_init(struct platform_device *pdev,
+ struct max8925_regulator_info *info,
+ struct regulator_config *config,
+ int ridx)
+{
+ struct device_node *nproot, *np;
+ int rcount;
+ nproot = pdev->dev.parent->of_node;
+ if (!nproot)
+ return -ENODEV;
+ np = of_find_node_by_name(nproot, "regulators");
+ if (!np) {
+ dev_err(&pdev->dev, "failed to find regulators node\n");
+ return -ENODEV;
+ }
+
+ rcount = of_regulator_match(&pdev->dev, np,
+ &max8925_regulator_matches[ridx], 1);
+ if (rcount < 0)
+ return -ENODEV;
+ config->init_data = max8925_regulator_matches[ridx].init_data;
+ config->of_node = max8925_regulator_matches[ridx].of_node;
+
+ return 0;
+}
+#else
+#define max8925_regulator_dt_init(w, x, y, z) (-1)
+#endif
+
+static int max8925_regulator_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct regulator_init_data *pdata = pdev->dev.platform_data;
@@ -222,7 +282,7 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
struct max8925_regulator_info *ri;
struct resource *res;
struct regulator_dev *rdev;
- int i;
+ int i, regulator_idx;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (!res) {
@@ -231,9 +291,12 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(max8925_regulator_info); i++) {
ri = &max8925_regulator_info[i];
- if (ri->vol_reg == res->start)
+ if (ri->vol_reg == res->start) {
+ regulator_idx = i;
break;
+ }
}
+
if (i == ARRAY_SIZE(max8925_regulator_info)) {
dev_err(&pdev->dev, "Failed to find regulator %llu\n",
(unsigned long long)res->start);
@@ -243,9 +306,12 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
ri->chip = chip;
config.dev = &pdev->dev;
- config.init_data = pdata;
config.driver_data = ri;
+ if (max8925_regulator_dt_init(pdev, ri, &config, regulator_idx))
+ if (pdata)
+ config.init_data = pdata;
+
rdev = regulator_register(&ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -257,7 +323,7 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit max8925_regulator_remove(struct platform_device *pdev)
+static int max8925_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
@@ -273,7 +339,7 @@ static struct platform_driver max8925_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = max8925_regulator_probe,
- .remove = __devexit_p(max8925_regulator_remove),
+ .remove = max8925_regulator_remove,
};
static int __init max8925_regulator_init(void)
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 355ca7bad9d..fc7935a19e3 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -126,7 +126,7 @@ static const struct regulator_desc regulator = {
.owner = THIS_MODULE,
};
-static int __devinit max8952_pmic_probe(struct i2c_client *client,
+static int max8952_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
@@ -247,7 +247,7 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
return 0;
}
-static int __devexit max8952_pmic_remove(struct i2c_client *client)
+static int max8952_pmic_remove(struct i2c_client *client)
{
struct max8952_data *max8952 = i2c_get_clientdata(client);
struct max8952_platform_data *pdata = max8952->pdata;
@@ -268,7 +268,7 @@ MODULE_DEVICE_TABLE(i2c, max8952_ids);
static struct i2c_driver max8952_pmic_driver = {
.probe = max8952_pmic_probe,
- .remove = __devexit_p(max8952_pmic_remove),
+ .remove = max8952_pmic_remove,
.driver = {
.name = "max8952",
},
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
new file mode 100644
index 00000000000..3ee26387b12
--- /dev/null
+++ b/drivers/regulator/max8973-regulator.c
@@ -0,0 +1,505 @@
+/*
+ * max8973-regulator.c -- Maxim max8973
+ *
+ * Regulator driver for MAXIM 8973 DC-DC step-down switching regulator.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/max8973-regulator.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+/* Register definitions */
+#define MAX8973_VOUT 0x0
+#define MAX8973_VOUT_DVS 0x1
+#define MAX8973_CONTROL1 0x2
+#define MAX8973_CONTROL2 0x3
+#define MAX8973_CHIPID1 0x4
+#define MAX8973_CHIPID2 0x5
+
+#define MAX8973_MAX_VOUT_REG 2
+
+/* MAX8973_VOUT */
+#define MAX8973_VOUT_ENABLE BIT(7)
+#define MAX8973_VOUT_MASK 0x7F
+
+/* MAX8973_VOUT_DVS */
+#define MAX8973_DVS_VOUT_MASK 0x7F
+
+/* MAX8973_CONTROL1 */
+#define MAX8973_SNS_ENABLE BIT(7)
+#define MAX8973_FPWM_EN_M BIT(6)
+#define MAX8973_NFSR_ENABLE BIT(5)
+#define MAX8973_AD_ENABLE BIT(4)
+#define MAX8973_BIAS_ENABLE BIT(3)
+#define MAX8973_FREQSHIFT_9PER BIT(2)
+
+#define MAX8973_RAMP_12mV_PER_US 0x0
+#define MAX8973_RAMP_25mV_PER_US 0x1
+#define MAX8973_RAMP_50mV_PER_US 0x2
+#define MAX8973_RAMP_200mV_PER_US 0x3
+
+/* MAX8973_CONTROL2 */
+#define MAX8973_WDTMR_ENABLE BIT(6)
+#define MAX8973_DISCH_ENBABLE BIT(5)
+#define MAX8973_FT_ENABLE BIT(4)
+
+#define MAX8973_CKKADV_TRIP_DISABLE 0xC
+#define MAX8973_CKKADV_TRIP_75mV_PER_US 0x0
+#define MAX8973_CKKADV_TRIP_150mV_PER_US 0x4
+#define MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS 0x8
+#define MAX8973_CONTROL_CLKADV_TRIP_MASK 0x00030000
+
+#define MAX8973_INDUCTOR_MIN_30_PER 0x0
+#define MAX8973_INDUCTOR_NOMINAL 0x1
+#define MAX8973_INDUCTOR_PLUS_30_PER 0x2
+#define MAX8973_INDUCTOR_PLUS_60_PER 0x3
+#define MAX8973_CONTROL_INDUCTOR_VALUE_MASK 0x00300000
+
+#define MAX8973_MIN_VOLATGE 606250
+#define MAX8973_MAX_VOLATGE 1400000
+#define MAX8973_VOLATGE_STEP 6250
+#define MAX8973_BUCK_N_VOLTAGE 0x80
+
+/* Maxim 8973 chip information */
+struct max8973_chip {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ bool enable_external_control;
+ int dvs_gpio;
+ int lru_index[MAX8973_MAX_VOUT_REG];
+ int curr_vout_val[MAX8973_MAX_VOUT_REG];
+ int curr_vout_reg;
+ int curr_gpio_val;
+ bool valid_dvs_gpio;
+};
+
+/*
+ * find_voltage_set_register: Find new voltage configuration register (VOUT).
+ * The finding of the new VOUT register will be based on the LRU mechanism.
+ * Each VOUT register will have different voltage configured . This
+ * Function will look if any of the VOUT register have requested voltage set
+ * or not.
+ * - If it is already there then it will make that register as most
+ * recently used and return as found so that caller need not to set
+ * the VOUT register but need to set the proper gpios to select this
+ * VOUT register.
+ * - If requested voltage is not found then it will use the least
+ * recently mechanism to get new VOUT register for new configuration
+ * and will return not_found so that caller need to set new VOUT
+ * register and then gpios (both).
+ */
+static bool find_voltage_set_register(struct max8973_chip *tps,
+ int req_vsel, int *vout_reg, int *gpio_val)
+{
+ int i;
+ bool found = false;
+ int new_vout_reg = tps->lru_index[MAX8973_MAX_VOUT_REG - 1];
+ int found_index = MAX8973_MAX_VOUT_REG - 1;
+
+ for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i) {
+ if (tps->curr_vout_val[tps->lru_index[i]] == req_vsel) {
+ new_vout_reg = tps->lru_index[i];
+ found_index = i;
+ found = true;
+ goto update_lru_index;
+ }
+ }
+
+update_lru_index:
+ for (i = found_index; i > 0; i--)
+ tps->lru_index[i] = tps->lru_index[i - 1];
+
+ tps->lru_index[0] = new_vout_reg;
+ *gpio_val = new_vout_reg;
+ *vout_reg = MAX8973_VOUT + new_vout_reg;
+ return found;
+}
+
+static int max8973_dcdc_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct max8973_chip *max = rdev_get_drvdata(rdev);
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(max->regmap, max->curr_vout_reg, &data);
+ if (ret < 0) {
+ dev_err(max->dev, "register %d read failed, err = %d\n",
+ max->curr_vout_reg, ret);
+ return ret;
+ }
+ return data & MAX8973_VOUT_MASK;
+}
+
+static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned vsel)
+{
+ struct max8973_chip *max = rdev_get_drvdata(rdev);
+ int ret;
+ bool found = false;
+ int vout_reg = max->curr_vout_reg;
+ int gpio_val = max->curr_gpio_val;
+
+ /*
+ * If gpios are available to select the VOUT register then least
+ * recently used register for new configuration.
+ */
+ if (max->valid_dvs_gpio)
+ found = find_voltage_set_register(max, vsel,
+ &vout_reg, &gpio_val);
+
+ if (!found) {
+ ret = regmap_update_bits(max->regmap, vout_reg,
+ MAX8973_VOUT_MASK, vsel);
+ if (ret < 0) {
+ dev_err(max->dev, "register %d update failed, err %d\n",
+ vout_reg, ret);
+ return ret;
+ }
+ max->curr_vout_reg = vout_reg;
+ max->curr_vout_val[gpio_val] = vsel;
+ }
+
+ /* Select proper VOUT register vio gpios */
+ if (max->valid_dvs_gpio) {
+ gpio_set_value_cansleep(max->dvs_gpio, gpio_val & 0x1);
+ max->curr_gpio_val = gpio_val;
+ }
+ return 0;
+}
+
+static int max8973_dcdc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct max8973_chip *max = rdev_get_drvdata(rdev);
+ int ret;
+ int pwm;
+
+ /* Enable force PWM mode in FAST mode only. */
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ pwm = MAX8973_FPWM_EN_M;
+ break;
+
+ case REGULATOR_MODE_NORMAL:
+ pwm = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1,
+ MAX8973_FPWM_EN_M, pwm);
+ if (ret < 0)
+ dev_err(max->dev, "register %d update failed, err %d\n",
+ MAX8973_CONTROL1, ret);
+ return ret;
+}
+
+static unsigned int max8973_dcdc_get_mode(struct regulator_dev *rdev)
+{
+ struct max8973_chip *max = rdev_get_drvdata(rdev);
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(max->regmap, MAX8973_CONTROL1, &data);
+ if (ret < 0) {
+ dev_err(max->dev, "register %d read failed, err %d\n",
+ MAX8973_CONTROL1, ret);
+ return ret;
+ }
+ return (data & MAX8973_FPWM_EN_M) ?
+ REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops max8973_dcdc_ops = {
+ .get_voltage_sel = max8973_dcdc_get_voltage_sel,
+ .set_voltage_sel = max8973_dcdc_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_mode = max8973_dcdc_set_mode,
+ .get_mode = max8973_dcdc_get_mode,
+};
+
+static int __devinit max8973_init_dcdc(struct max8973_chip *max,
+ struct max8973_regulator_platform_data *pdata)
+{
+ int ret;
+ uint8_t control1 = 0;
+ uint8_t control2 = 0;
+
+ if (pdata->control_flags & MAX8973_CONTROL_REMOTE_SENSE_ENABLE)
+ control1 |= MAX8973_SNS_ENABLE;
+
+ if (!(pdata->control_flags & MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE))
+ control1 |= MAX8973_NFSR_ENABLE;
+
+ if (pdata->control_flags & MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE)
+ control1 |= MAX8973_AD_ENABLE;
+
+ if (pdata->control_flags & MAX8973_CONTROL_BIAS_ENABLE)
+ control1 |= MAX8973_BIAS_ENABLE;
+
+ if (pdata->control_flags & MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE)
+ control1 |= MAX8973_FREQSHIFT_9PER;
+
+ /* Set ramp delay */
+ if (pdata->reg_init_data &&
+ pdata->reg_init_data->constraints.ramp_delay) {
+ if (pdata->reg_init_data->constraints.ramp_delay < 25000)
+ control1 = MAX8973_RAMP_12mV_PER_US;
+ else if (pdata->reg_init_data->constraints.ramp_delay < 50000)
+ control1 = MAX8973_RAMP_25mV_PER_US;
+ else if (pdata->reg_init_data->constraints.ramp_delay < 200000)
+ control1 = MAX8973_RAMP_50mV_PER_US;
+ else
+ control1 = MAX8973_RAMP_200mV_PER_US;
+ } else {
+ control1 = MAX8973_RAMP_12mV_PER_US;
+ max->desc.ramp_delay = 12500;
+ }
+
+ if (!(pdata->control_flags & MAX8973_CONTROL_PULL_DOWN_ENABLE))
+ control2 |= MAX8973_DISCH_ENBABLE;
+
+ /* Clock advance trip configuration */
+ switch (pdata->control_flags & MAX8973_CONTROL_CLKADV_TRIP_MASK) {
+ case MAX8973_CONTROL_CLKADV_TRIP_DISABLED:
+ control2 |= MAX8973_CKKADV_TRIP_DISABLE;
+ break;
+
+ case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US:
+ control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US;
+ break;
+
+ case MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US:
+ control2 |= MAX8973_CKKADV_TRIP_150mV_PER_US;
+ break;
+
+ case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US_HIST_DIS:
+ control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS;
+ break;
+ }
+
+ /* Configure inductor value */
+ switch (pdata->control_flags & MAX8973_CONTROL_INDUCTOR_VALUE_MASK) {
+ case MAX8973_CONTROL_INDUCTOR_VALUE_NOMINAL:
+ control2 |= MAX8973_INDUCTOR_NOMINAL;
+ break;
+
+ case MAX8973_CONTROL_INDUCTOR_VALUE_MINUS_30_PER:
+ control2 |= MAX8973_INDUCTOR_MIN_30_PER;
+ break;
+
+ case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_30_PER:
+ control2 |= MAX8973_INDUCTOR_PLUS_30_PER;
+ break;
+
+ case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_60_PER:
+ control2 |= MAX8973_INDUCTOR_PLUS_60_PER;
+ break;
+ }
+
+ ret = regmap_write(max->regmap, MAX8973_CONTROL1, control1);
+ if (ret < 0) {
+ dev_err(max->dev, "register %d write failed, err = %d",
+ MAX8973_CONTROL1, ret);
+ return ret;
+ }
+
+ ret = regmap_write(max->regmap, MAX8973_CONTROL2, control2);
+ if (ret < 0) {
+ dev_err(max->dev, "register %d write failed, err = %d",
+ MAX8973_CONTROL2, ret);
+ return ret;
+ }
+
+ /* If external control is enabled then disable EN bit */
+ if (max->enable_external_control) {
+ ret = regmap_update_bits(max->regmap, MAX8973_VOUT,
+ MAX8973_VOUT_ENABLE, 0);
+ if (ret < 0)
+ dev_err(max->dev, "register %d update failed, err = %d",
+ MAX8973_VOUT, ret);
+ }
+ return ret;
+}
+
+static const struct regmap_config max8973_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX8973_CHIPID2,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int __devinit max8973_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max8973_regulator_platform_data *pdata;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ struct max8973_chip *max;
+ int ret;
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->dev, "No Platform data");
+ return -EIO;
+ }
+
+ max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
+ if (!max) {
+ dev_err(&client->dev, "Memory allocation for max failed\n");
+ return -ENOMEM;
+ }
+
+ max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
+ if (IS_ERR(max->regmap)) {
+ ret = PTR_ERR(max->regmap);
+ dev_err(&client->dev, "regmap init failed, err %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(client, max);
+ max->dev = &client->dev;
+ max->desc.name = id->name;
+ max->desc.id = 0;
+ max->desc.ops = &max8973_dcdc_ops;
+ max->desc.type = REGULATOR_VOLTAGE;
+ max->desc.owner = THIS_MODULE;
+ max->desc.min_uV = MAX8973_MIN_VOLATGE;
+ max->desc.uV_step = MAX8973_VOLATGE_STEP;
+ max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE;
+
+ if (!pdata->enable_ext_control) {
+ max->desc.enable_reg = MAX8973_VOUT;
+ max->desc.enable_mask = MAX8973_VOUT_ENABLE;
+ max8973_dcdc_ops.enable = regulator_enable_regmap;
+ max8973_dcdc_ops.disable = regulator_disable_regmap;
+ max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
+ }
+
+ max->enable_external_control = pdata->enable_ext_control;
+ max->dvs_gpio = pdata->dvs_gpio;
+ max->curr_gpio_val = pdata->dvs_def_state;
+ max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
+ max->lru_index[0] = max->curr_vout_reg;
+ max->valid_dvs_gpio = false;
+
+ if (gpio_is_valid(max->dvs_gpio)) {
+ int gpio_flags;
+ int i;
+
+ gpio_flags = (pdata->dvs_def_state) ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ ret = devm_gpio_request_one(&client->dev, max->dvs_gpio,
+ gpio_flags, "max8973-dvs");
+ if (ret) {
+ dev_err(&client->dev,
+ "gpio_request for gpio %d failed, err = %d\n",
+ max->dvs_gpio, ret);
+ return ret;
+ }
+ max->valid_dvs_gpio = true;
+
+ /*
+ * Initialize the lru index with vout_reg id
+ * The index 0 will be most recently used and
+ * set with the max->curr_vout_reg */
+ for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i)
+ max->lru_index[i] = i;
+ max->lru_index[0] = max->curr_vout_reg;
+ max->lru_index[max->curr_vout_reg] = 0;
+ }
+
+ ret = max8973_init_dcdc(max, pdata);
+ if (ret < 0) {
+ dev_err(max->dev, "Max8973 Init failed, err = %d\n", ret);
+ return ret;
+ }
+
+ config.dev = &client->dev;
+ config.init_data = pdata->reg_init_data;
+ config.driver_data = max;
+ config.of_node = client->dev.of_node;
+ config.regmap = max->regmap;
+
+ /* Register the regulators */
+ rdev = regulator_register(&max->desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(max->dev, "regulator register failed, err %d\n", ret);
+ return ret;
+ }
+
+ max->rdev = rdev;
+ return 0;
+}
+
+static int __devexit max8973_remove(struct i2c_client *client)
+{
+ struct max8973_chip *max = i2c_get_clientdata(client);
+
+ regulator_unregister(max->rdev);
+ return 0;
+}
+
+static const struct i2c_device_id max8973_id[] = {
+ {.name = "max8973",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, max8973_id);
+
+static struct i2c_driver max8973_i2c_driver = {
+ .driver = {
+ .name = "max8973",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8973_probe,
+ .remove = __devexit_p(max8973_remove),
+ .id_table = max8973_id,
+};
+
+static int __init max8973_init(void)
+{
+ return i2c_add_driver(&max8973_i2c_driver);
+}
+subsys_initcall(max8973_init);
+
+static void __exit max8973_cleanup(void)
+{
+ i2c_del_driver(&max8973_i2c_driver);
+}
+module_exit(max8973_cleanup);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("MAX8973 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index e39a0c7260d..df0eafb0dc7 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -24,6 +24,7 @@
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -31,6 +32,7 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
+#include <linux/regulator/of_regulator.h>
struct max8997_data {
struct device *dev;
@@ -933,22 +935,163 @@ static struct regulator_desc regulators[] = {
max8997_charger_fixedstate_ops),
};
-static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
+ struct max8997_platform_data *pdata,
+ struct device_node *pmic_np)
+{
+ int i, gpio;
+
+ for (i = 0; i < 3; i++) {
+ gpio = of_get_named_gpio(pmic_np,
+ "max8997,pmic-buck125-dvs-gpios", i);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+ return -EINVAL;
+ }
+ pdata->buck125_gpios[i] = gpio;
+ }
+ return 0;
+}
+
+static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+ struct max8997_platform_data *pdata)
+{
+ struct device_node *pmic_np, *regulators_np, *reg_np;
+ struct max8997_regulator_data *rdata;
+ unsigned int i, dvs_voltage_nr = 1, ret;
+
+ pmic_np = iodev->dev->of_node;
+ if (!pmic_np) {
+ dev_err(iodev->dev, "could not find pmic sub-node\n");
+ return -ENODEV;
+ }
+
+ regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ if (!regulators_np) {
+ dev_err(iodev->dev, "could not find regulators sub-node\n");
+ return -EINVAL;
+ }
+
+ /* count the number of regulators to be supported in pmic */
+ pdata->num_regulators = 0;
+ for_each_child_of_node(regulators_np, reg_np)
+ pdata->num_regulators++;
+
+ rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+ pdata->num_regulators, GFP_KERNEL);
+ if (!rdata) {
+ dev_err(iodev->dev, "could not allocate memory for "
+ "regulator data\n");
+ return -ENOMEM;
+ }
+
+ pdata->regulators = rdata;
+ for_each_child_of_node(regulators_np, reg_np) {
+ for (i = 0; i < ARRAY_SIZE(regulators); i++)
+ if (!of_node_cmp(reg_np->name, regulators[i].name))
+ break;
+
+ if (i == ARRAY_SIZE(regulators)) {
+ dev_warn(iodev->dev, "don't know how to configure "
+ "regulator %s\n", reg_np->name);
+ continue;
+ }
+
+ rdata->id = i;
+ rdata->initdata = of_get_regulator_init_data(
+ iodev->dev, reg_np);
+ rdata->reg_node = reg_np;
+ rdata++;
+ }
+
+ if (of_get_property(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs", NULL))
+ pdata->buck1_gpiodvs = true;
+
+ if (of_get_property(pmic_np, "max8997,pmic-buck2-uses-gpio-dvs", NULL))
+ pdata->buck2_gpiodvs = true;
+
+ if (of_get_property(pmic_np, "max8997,pmic-buck5-uses-gpio-dvs", NULL))
+ pdata->buck5_gpiodvs = true;
+
+ if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
+ pdata->buck5_gpiodvs) {
+ ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
+ if (ret)
+ return -EINVAL;
+
+ if (of_property_read_u32(pmic_np,
+ "max8997,pmic-buck125-default-dvs-idx",
+ &pdata->buck125_default_idx)) {
+ pdata->buck125_default_idx = 0;
+ } else {
+ if (pdata->buck125_default_idx >= 8) {
+ pdata->buck125_default_idx = 0;
+ dev_info(iodev->dev, "invalid value for "
+ "default dvs index, using 0 instead\n");
+ }
+ }
+
+ if (of_get_property(pmic_np,
+ "max8997,pmic-ignore-gpiodvs-side-effect", NULL))
+ pdata->ignore_gpiodvs_side_effect = true;
+
+ dvs_voltage_nr = 8;
+ }
+
+ if (of_property_read_u32_array(pmic_np,
+ "max8997,pmic-buck1-dvs-voltage",
+ pdata->buck1_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck1 voltages not specified\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_array(pmic_np,
+ "max8997,pmic-buck2-dvs-voltage",
+ pdata->buck2_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck2 voltages not specified\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_array(pmic_np,
+ "max8997,pmic-buck5-dvs-voltage",
+ pdata->buck5_voltage, dvs_voltage_nr)) {
+ dev_err(iodev->dev, "buck5 voltages not specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+ struct max8997_platform_data *pdata)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int max8997_pmic_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8997_platform_data *pdata = iodev->pdata;
struct regulator_config config = { };
struct regulator_dev **rdev;
struct max8997_data *max8997;
struct i2c_client *i2c;
- int i, ret, size;
+ int i, ret, size, nr_dvs;
u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
- if (!pdata) {
+ if (IS_ERR_OR_NULL(pdata)) {
dev_err(pdev->dev.parent, "No platform init data supplied.\n");
return -ENODEV;
}
+ if (iodev->dev->of_node) {
+ ret = max8997_pmic_dt_parse_pdata(iodev, pdata);
+ if (ret)
+ return ret;
+ }
+
max8997 = devm_kzalloc(&pdev->dev, sizeof(struct max8997_data),
GFP_KERNEL);
if (!max8997)
@@ -973,7 +1116,10 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
- for (i = 0; i < 8; i++) {
+ nr_dvs = (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
+ pdata->buck5_gpiodvs) ? 8 : 1;
+
+ for (i = 0; i < nr_dvs; i++) {
max8997->buck1_vol[i] = ret =
max8997_get_voltage_proper_val(
&buck1245_voltage_map_desc,
@@ -1019,6 +1165,19 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
max_buck5, 0x3f);
}
+ /* Initialize all the DVS related BUCK registers */
+ for (i = 0; i < nr_dvs; i++) {
+ max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
+ max8997->buck1_vol[i],
+ 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
+ max8997->buck2_vol[i],
+ 0x3f);
+ max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
+ max8997->buck5_vol[i],
+ 0x3f);
+ }
+
/*
* If buck 1, 2, and 5 do not care DVS GPIO settings, ignore them.
* If at least one of them cares, set gpios.
@@ -1068,19 +1227,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
max8997_update_reg(i2c, MAX8997_REG_BUCK5CTRL, (pdata->buck5_gpiodvs) ?
(1 << 1) : (0 << 1), 1 << 1);
- /* Initialize all the DVS related BUCK registers */
- for (i = 0; i < 8; i++) {
- max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
- max8997->buck1_vol[i],
- 0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
- max8997->buck2_vol[i],
- 0x3f);
- max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
- max8997->buck5_vol[i],
- 0x3f);
- }
-
/* Misc Settings */
max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
@@ -1101,6 +1247,7 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
config.dev = max8997->dev;
config.init_data = pdata->regulators[i].initdata;
config.driver_data = max8997;
+ config.of_node = pdata->regulators[i].reg_node;
rdev[i] = regulator_register(&regulators[id], &config);
if (IS_ERR(rdev[i])) {
@@ -1120,7 +1267,7 @@ err_out:
return ret;
}
-static int __devexit max8997_pmic_remove(struct platform_device *pdev)
+static int max8997_pmic_remove(struct platform_device *pdev)
{
struct max8997_data *max8997 = platform_get_drvdata(pdev);
struct regulator_dev **rdev = max8997->rdev;
@@ -1143,7 +1290,7 @@ static struct platform_driver max8997_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max8997_pmic_probe,
- .remove = __devexit_p(max8997_pmic_remove),
+ .remove = max8997_pmic_remove,
.id_table = max8997_pmic_id,
};
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 5dfa920ff0c..b821d08eb64 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -633,7 +633,7 @@ static struct regulator_desc regulators[] = {
}
};
-static __devinit int max8998_pmic_probe(struct platform_device *pdev)
+static int max8998_pmic_probe(struct platform_device *pdev)
{
struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
@@ -818,7 +818,7 @@ err_out:
return ret;
}
-static int __devexit max8998_pmic_remove(struct platform_device *pdev)
+static int max8998_pmic_remove(struct platform_device *pdev)
{
struct max8998_data *max8998 = platform_get_drvdata(pdev);
struct regulator_dev **rdev = max8998->rdev;
@@ -842,7 +842,7 @@ static struct platform_driver max8998_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = max8998_pmic_probe,
- .remove = __devexit_p(max8998_pmic_remove),
+ .remove = max8998_pmic_remove,
.id_table = max8998_pmic_id,
};
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 0801a6d0c12..c46c6705cd7 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -392,7 +392,7 @@ static struct regulator_ops mc13783_gpo_regulator_ops = {
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
};
-static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
+static int mc13783_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
@@ -445,7 +445,7 @@ err:
return ret;
}
-static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
+static int mc13783_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
struct mc13xxx_regulator_platform_data *pdata =
@@ -465,7 +465,7 @@ static struct platform_driver mc13783_regulator_driver = {
.name = "mc13783-regulator",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(mc13783_regulator_remove),
+ .remove = mc13783_regulator_remove,
.probe = mc13783_regulator_probe,
};
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 1fa63812f7a..0d84b1f3319 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -486,7 +486,7 @@ static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev)
}
-static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
+static int mc13892_regulator_probe(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv;
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
@@ -588,7 +588,7 @@ err_unlock:
return ret;
}
-static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
+static int mc13892_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
int i;
@@ -606,7 +606,7 @@ static struct platform_driver mc13892_regulator_driver = {
.name = "mc13892-regulator",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(mc13892_regulator_remove),
+ .remove = mc13892_regulator_remove,
.probe = mc13892_regulator_probe,
};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 88cbb832d55..4ed89c65411 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -162,7 +162,7 @@ struct regulator_ops mc13xxx_fixed_regulator_ops = {
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
#ifdef CONFIG_OF
-int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
struct device_node *parent, *child;
int num = 0;
@@ -179,7 +179,7 @@ int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
-struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
+struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
int num_regulators)
{
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 07aee694ba9..e915629a25c 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -309,68 +309,22 @@ static int palmas_list_voltage_smps(struct regulator_dev *dev,
int id = rdev_get_id(dev);
int mult = 1;
- if (!selector)
- return 0;
-
/* Read the multiplier set in VSEL register to return
* the correct voltage.
*/
if (pmic->range[id])
mult = 2;
- /* Voltage is (0.49V + (selector * 0.01V)) * RANGE
- * as defined in data sheet. RANGE is either x1 or x2
- */
- return (490000 + (selector * 10000)) * mult;
-}
-
-static int palmas_get_voltage_smps_sel(struct regulator_dev *dev)
-{
- struct palmas_pmic *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
- int selector;
- unsigned int reg;
- unsigned int addr;
-
- addr = palmas_regs_info[id].vsel_addr;
-
- palmas_smps_read(pmic->palmas, addr, &reg);
-
- selector = reg & PALMAS_SMPS12_VOLTAGE_VSEL_MASK;
-
- /* Adjust selector to match list_voltage ranges */
- if ((selector > 0) && (selector < 6))
- selector = 6;
- if (!selector)
- selector = 5;
- if (selector > 121)
- selector = 121;
- selector -= 5;
-
- return selector;
-}
-
-static int palmas_set_voltage_smps_sel(struct regulator_dev *dev,
- unsigned selector)
-{
- struct palmas_pmic *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
- unsigned int reg = 0;
- unsigned int addr;
-
- addr = palmas_regs_info[id].vsel_addr;
-
- /* Make sure we don't change the value of RANGE */
- if (pmic->range[id])
- reg |= PALMAS_SMPS12_VOLTAGE_RANGE;
-
- /* Adjust the linux selector into range used in VSEL register */
- if (selector)
- reg |= selector + 5;
-
- palmas_smps_write(pmic->palmas, addr, reg);
-
- return 0;
+ if (selector == 0)
+ return 0;
+ else if (selector < 6)
+ return 500000 * mult;
+ else
+ /* Voltage is linear mapping starting from selector 6,
+ * volt = (0.49V + ((selector - 5) * 0.01V)) * RANGE
+ * RANGE is either x1 or x2
+ */
+ return (490000 + ((selector - 5) * 10000)) * mult;
}
static int palmas_map_voltage_smps(struct regulator_dev *rdev,
@@ -386,11 +340,11 @@ static int palmas_map_voltage_smps(struct regulator_dev *rdev,
if (pmic->range[id]) { /* RANGE is x2 */
if (min_uV < 1000000)
min_uV = 1000000;
- ret = DIV_ROUND_UP(min_uV - 1000000, 20000) + 1;
+ ret = DIV_ROUND_UP(min_uV - 1000000, 20000) + 6;
} else { /* RANGE is x1 */
if (min_uV < 500000)
min_uV = 500000;
- ret = DIV_ROUND_UP(min_uV - 500000, 10000) + 1;
+ ret = DIV_ROUND_UP(min_uV - 500000, 10000) + 6;
}
/* Map back into a voltage to verify we're still in bounds */
@@ -407,8 +361,8 @@ static struct regulator_ops palmas_ops_smps = {
.disable = palmas_disable_smps,
.set_mode = palmas_set_mode_smps,
.get_mode = palmas_get_mode_smps,
- .get_voltage_sel = palmas_get_voltage_smps_sel,
- .set_voltage_sel = palmas_set_voltage_smps_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = palmas_list_voltage_smps,
.map_voltage = palmas_map_voltage_smps,
};
@@ -436,44 +390,14 @@ static int palmas_is_enabled_ldo(struct regulator_dev *dev)
return !!(reg);
}
-static int palmas_list_voltage_ldo(struct regulator_dev *dev,
- unsigned selector)
-{
- if (!selector)
- return 0;
-
- /* voltage is 0.85V + (selector * 0.05v) */
- return 850000 + (selector * 50000);
-}
-
-static int palmas_map_voltage_ldo(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int ret, voltage;
-
- if (min_uV == 0)
- return 0;
-
- if (min_uV < 900000)
- min_uV = 900000;
- ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1;
-
- /* Map back into a voltage to verify we're still in bounds */
- voltage = palmas_list_voltage_ldo(rdev, ret);
- if (voltage < min_uV || voltage > max_uV)
- return -EINVAL;
-
- return ret;
-}
-
static struct regulator_ops palmas_ops_ldo = {
.is_enabled = palmas_is_enabled_ldo,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = palmas_list_voltage_ldo,
- .map_voltage = palmas_map_voltage_ldo,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
};
/*
@@ -595,7 +519,7 @@ static struct of_regulator_match palmas_matches[] = {
{ .name = "ldousb", },
};
-static void __devinit palmas_dt_to_pdata(struct device *dev,
+static void palmas_dt_to_pdata(struct device *dev,
struct device_node *node,
struct palmas_pmic_platform_data *pdata)
{
@@ -663,7 +587,7 @@ static void __devinit palmas_dt_to_pdata(struct device *dev,
}
-static __devinit int palmas_probe(struct platform_device *pdev)
+static int palmas_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct palmas_pmic_platform_data *pdata = pdev->dev.platform_data;
@@ -733,6 +657,14 @@ static __devinit int palmas_probe(struct platform_device *pdev)
continue;
}
+ /* Initialise sleep/init values from platform data */
+ if (pdata && pdata->reg_init[id]) {
+ reg_init = pdata->reg_init[id];
+ ret = palmas_smps_init(palmas, id, reg_init);
+ if (ret)
+ goto err_unregister_regulator;
+ }
+
/* Register the regulators */
pmic->desc[id].name = palmas_regs_info[id].name;
pmic->desc[id].id = id;
@@ -753,29 +685,11 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].uV_step = 1250000;
break;
default:
- pmic->desc[id].ops = &palmas_ops_smps;
- pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
- }
-
- pmic->desc[id].type = REGULATOR_VOLTAGE;
- pmic->desc[id].owner = THIS_MODULE;
-
- /* Initialise sleep/init values from platform data */
- if (pdata) {
- reg_init = pdata->reg_init[id];
- if (reg_init) {
- ret = palmas_smps_init(palmas, id, reg_init);
- if (ret)
- goto err_unregister_regulator;
- }
- }
-
- /*
- * read and store the RANGE bit for later use
- * This must be done before regulator is probed otherwise
- * we error in probe with unsuportable ranges.
- */
- if (id != PALMAS_REG_SMPS10) {
+ /*
+ * Read and store the RANGE bit for later use
+ * This must be done before regulator is probed,
+ * otherwise we error in probe with unsupportable ranges.
+ */
addr = palmas_regs_info[id].vsel_addr;
ret = palmas_smps_read(pmic->palmas, addr, &reg);
@@ -783,8 +697,19 @@ static __devinit int palmas_probe(struct platform_device *pdev)
goto err_unregister_regulator;
if (reg & PALMAS_SMPS12_VOLTAGE_RANGE)
pmic->range[id] = 1;
+
+ pmic->desc[id].ops = &palmas_ops_smps;
+ pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
+ pmic->desc[id].vsel_reg =
+ PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+ palmas_regs_info[id].vsel_addr);
+ pmic->desc[id].vsel_mask =
+ PALMAS_SMPS12_VOLTAGE_VSEL_MASK;
}
+ pmic->desc[id].type = REGULATOR_VOLTAGE;
+ pmic->desc[id].owner = THIS_MODULE;
+
if (pdata)
config.init_data = pdata->reg_data[id];
else
@@ -821,6 +746,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].type = REGULATOR_VOLTAGE;
pmic->desc[id].owner = THIS_MODULE;
+ pmic->desc[id].min_uV = 900000;
+ pmic->desc[id].uV_step = 50000;
+ pmic->desc[id].linear_min_sel = 1;
pmic->desc[id].vsel_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
palmas_regs_info[id].vsel_addr);
pmic->desc[id].vsel_mask = PALMAS_LDO1_VOLTAGE_VSEL_MASK;
@@ -868,7 +796,7 @@ err_unregister_regulator:
return ret;
}
-static int __devexit palmas_remove(struct platform_device *pdev)
+static int palmas_remove(struct platform_device *pdev)
{
struct palmas_pmic *pmic = platform_get_drvdata(pdev);
int id;
@@ -890,7 +818,7 @@ static struct platform_driver palmas_driver = {
.owner = THIS_MODULE,
},
.probe = palmas_probe,
- .remove = __devexit_p(palmas_remove),
+ .remove = palmas_remove,
};
static int __init palmas_init(void)
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 68777acc099..4899342f1fc 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -236,7 +236,7 @@ static const struct regulator_desc pcap_regulators[] = {
VREG(VAUX4), VREG(VSIM), VREG(VSIM2), VREG(VVIB), VREG(SW1), VREG(SW2),
};
-static int __devinit pcap_regulator_probe(struct platform_device *pdev)
+static int pcap_regulator_probe(struct platform_device *pdev)
{
struct regulator_dev *rdev;
void *pcap = dev_get_drvdata(pdev->dev.parent);
@@ -255,7 +255,7 @@ static int __devinit pcap_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pcap_regulator_remove(struct platform_device *pdev)
+static int pcap_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
@@ -271,7 +271,7 @@ static struct platform_driver pcap_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = pcap_regulator_probe,
- .remove = __devexit_p(pcap_regulator_remove),
+ .remove = pcap_regulator_remove,
};
static int __init pcap_regulator_init(void)
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 092e5cb848a..534075e13d6 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -24,12 +24,15 @@
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/pmic.h>
-#define PCF50633_REGULATOR(_name, _id, _n) \
+#define PCF50633_REGULATOR(_name, _id, _min_uV, _uV_step, _min_sel, _n) \
{ \
.name = _name, \
.id = PCF50633_REGULATOR_##_id, \
.ops = &pcf50633_regulator_ops, \
.n_voltages = _n, \
+ .min_uV = _min_uV, \
+ .uV_step = _uV_step, \
+ .linear_min_sel = _min_sel, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = PCF50633_REG_##_id##OUT, \
@@ -38,165 +41,42 @@
.enable_mask = PCF50633_REGULATOR_ON, \
}
-/* Bits from voltage value */
-static u8 auto_voltage_bits(unsigned int millivolts)
-{
- if (millivolts < 1800)
- return 0x2f;
- if (millivolts > 3800)
- return 0xff;
-
- millivolts -= 625;
-
- return millivolts / 25;
-}
-
-static u8 down_voltage_bits(unsigned int millivolts)
-{
- if (millivolts < 625)
- return 0;
- else if (millivolts > 3000)
- return 0xff;
-
- millivolts -= 625;
-
- return millivolts / 25;
-}
-
-static u8 ldo_voltage_bits(unsigned int millivolts)
-{
- if (millivolts < 900)
- return 0;
- else if (millivolts > 3600)
- return 0x1f;
-
- millivolts -= 900;
- return millivolts / 100;
-}
-
-/* Obtain voltage value from bits */
-static unsigned int auto_voltage_value(u8 bits)
-{
- /* AUTOOUT: 00000000 to 00101110 are reserved.
- * Return 0 for bits in reserved range, which means this selector code
- * can't be used on this system */
- if (bits < 0x2f)
- return 0;
-
- return 625 + (bits * 25);
-}
-
-
-static unsigned int down_voltage_value(u8 bits)
-{
- return 625 + (bits * 25);
-}
-
-
-static unsigned int ldo_voltage_value(u8 bits)
-{
- bits &= 0x1f;
-
- return 900 + (bits * 100);
-}
-
-static int pcf50633_regulator_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- struct pcf50633 *pcf;
- int regulator_id, millivolts;
- u8 volt_bits;
-
- pcf = rdev_get_drvdata(rdev);
-
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= PCF50633_NUM_REGULATORS)
- return -EINVAL;
-
- millivolts = min_uV / 1000;
-
- switch (regulator_id) {
- case PCF50633_REGULATOR_AUTO:
- volt_bits = auto_voltage_bits(millivolts);
- break;
- case PCF50633_REGULATOR_DOWN1:
- case PCF50633_REGULATOR_DOWN2:
- volt_bits = down_voltage_bits(millivolts);
- break;
- case PCF50633_REGULATOR_LDO1:
- case PCF50633_REGULATOR_LDO2:
- case PCF50633_REGULATOR_LDO3:
- case PCF50633_REGULATOR_LDO4:
- case PCF50633_REGULATOR_LDO5:
- case PCF50633_REGULATOR_LDO6:
- case PCF50633_REGULATOR_HCLDO:
- case PCF50633_REGULATOR_MEMLDO:
- volt_bits = ldo_voltage_bits(millivolts);
- break;
- default:
- return -EINVAL;
- }
-
- return volt_bits;
-}
-
-static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned int index)
-{
- int regulator_id = rdev_get_id(rdev);
-
- int millivolts;
-
- switch (regulator_id) {
- case PCF50633_REGULATOR_AUTO:
- millivolts = auto_voltage_value(index);
- break;
- case PCF50633_REGULATOR_DOWN1:
- case PCF50633_REGULATOR_DOWN2:
- millivolts = down_voltage_value(index);
- break;
- case PCF50633_REGULATOR_LDO1:
- case PCF50633_REGULATOR_LDO2:
- case PCF50633_REGULATOR_LDO3:
- case PCF50633_REGULATOR_LDO4:
- case PCF50633_REGULATOR_LDO5:
- case PCF50633_REGULATOR_LDO6:
- case PCF50633_REGULATOR_HCLDO:
- case PCF50633_REGULATOR_MEMLDO:
- millivolts = ldo_voltage_value(index);
- break;
- default:
- return -EINVAL;
- }
-
- return millivolts * 1000;
-}
-
static struct regulator_ops pcf50633_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = pcf50633_regulator_list_voltage,
- .map_voltage = pcf50633_regulator_map_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_desc regulators[] = {
- [PCF50633_REGULATOR_AUTO] = PCF50633_REGULATOR("auto", AUTO, 128),
- [PCF50633_REGULATOR_DOWN1] = PCF50633_REGULATOR("down1", DOWN1, 96),
- [PCF50633_REGULATOR_DOWN2] = PCF50633_REGULATOR("down2", DOWN2, 96),
- [PCF50633_REGULATOR_LDO1] = PCF50633_REGULATOR("ldo1", LDO1, 28),
- [PCF50633_REGULATOR_LDO2] = PCF50633_REGULATOR("ldo2", LDO2, 28),
- [PCF50633_REGULATOR_LDO3] = PCF50633_REGULATOR("ldo3", LDO3, 28),
- [PCF50633_REGULATOR_LDO4] = PCF50633_REGULATOR("ldo4", LDO4, 28),
- [PCF50633_REGULATOR_LDO5] = PCF50633_REGULATOR("ldo5", LDO5, 28),
- [PCF50633_REGULATOR_LDO6] = PCF50633_REGULATOR("ldo6", LDO6, 28),
- [PCF50633_REGULATOR_HCLDO] = PCF50633_REGULATOR("hcldo", HCLDO, 28),
- [PCF50633_REGULATOR_MEMLDO] = PCF50633_REGULATOR("memldo", MEMLDO, 28),
+ [PCF50633_REGULATOR_AUTO] =
+ PCF50633_REGULATOR("auto", AUTO, 1800000, 25000, 0x2f, 128),
+ [PCF50633_REGULATOR_DOWN1] =
+ PCF50633_REGULATOR("down1", DOWN1, 625000, 25000, 0, 96),
+ [PCF50633_REGULATOR_DOWN2] =
+ PCF50633_REGULATOR("down2", DOWN2, 625000, 25000, 0, 96),
+ [PCF50633_REGULATOR_LDO1] =
+ PCF50633_REGULATOR("ldo1", LDO1, 900000, 100000, 0, 28),
+ [PCF50633_REGULATOR_LDO2] =
+ PCF50633_REGULATOR("ldo2", LDO2, 900000, 100000, 0, 28),
+ [PCF50633_REGULATOR_LDO3] =
+ PCF50633_REGULATOR("ldo3", LDO3, 900000, 100000, 0, 28),
+ [PCF50633_REGULATOR_LDO4] =
+ PCF50633_REGULATOR("ldo4", LDO4, 900000, 100000, 0, 28),
+ [PCF50633_REGULATOR_LDO5] =
+ PCF50633_REGULATOR("ldo5", LDO5, 900000, 100000, 0, 28),
+ [PCF50633_REGULATOR_LDO6] =
+ PCF50633_REGULATOR("ldo6", LDO6, 900000, 100000, 0, 28),
+ [PCF50633_REGULATOR_HCLDO] =
+ PCF50633_REGULATOR("hcldo", HCLDO, 900000, 100000, 0, 28),
+ [PCF50633_REGULATOR_MEMLDO] =
+ PCF50633_REGULATOR("memldo", MEMLDO, 900000, 100000, 0, 28),
};
-static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
+static int pcf50633_regulator_probe(struct platform_device *pdev)
{
struct regulator_dev *rdev;
struct pcf50633 *pcf;
@@ -222,7 +102,7 @@ static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pcf50633_regulator_remove(struct platform_device *pdev)
+static int pcf50633_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
@@ -237,7 +117,7 @@ static struct platform_driver pcf50633_regulator_driver = {
.name = "pcf50633-regltr",
},
.probe = pcf50633_regulator_probe,
- .remove = __devexit_p(pcf50633_regulator_remove),
+ .remove = pcf50633_regulator_remove,
};
static int __init pcf50633_regulator_init(void)
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 8bf4e8c9de9..9e6f78694bf 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -119,7 +119,7 @@ static struct rc5t583_regulator_info rc5t583_reg_info[RC5T583_REGULATOR_MAX] = {
RC5T583_REG(LDO9, LDOEN1, 1, LDODIS1, 1, 0x7F, 900, 3400, 25000, 133),
};
-static int __devinit rc5t583_regulator_probe(struct platform_device *pdev)
+static int rc5t583_regulator_probe(struct platform_device *pdev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
@@ -198,7 +198,7 @@ clean_exit:
return ret;
}
-static int __devexit rc5t583_regulator_remove(struct platform_device *pdev)
+static int rc5t583_regulator_remove(struct platform_device *pdev)
{
struct rc5t583_regulator *regs = platform_get_drvdata(pdev);
int id;
@@ -214,7 +214,7 @@ static struct platform_driver rc5t583_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = rc5t583_regulator_probe,
- .remove = __devexit_p(rc5t583_regulator_remove),
+ .remove = rc5t583_regulator_remove,
};
static int __init rc5t583_regulator_init(void)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 926f9c8f2fa..bd062a2ffbe 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -231,7 +231,7 @@ static struct regulator_desc regulators[] = {
regulator_desc_buck10,
};
-static __devinit int s2mps11_pmic_probe(struct platform_device *pdev)
+static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
@@ -269,16 +269,16 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev)
if (ramp_enable) {
if (s2mps11->buck2_ramp)
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) >> 6;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) << 6;
if (s2mps11->buck3_ramp || s2mps11->buck4_ramp)
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) >> 4;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) << 4;
sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable);
}
ramp_reg &= 0x00;
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) >> 6;
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) >> 4;
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) >> 2;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) << 6;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) << 4;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) << 2;
ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9);
sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg);
@@ -307,7 +307,7 @@ err:
return ret;
}
-static int __devexit s2mps11_pmic_remove(struct platform_device *pdev)
+static int s2mps11_pmic_remove(struct platform_device *pdev)
{
struct s2mps11_info *s2mps11 = platform_get_drvdata(pdev);
int i;
@@ -330,7 +330,7 @@ static struct platform_driver s2mps11_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = s2mps11_pmic_probe,
- .remove = __devexit_p(s2mps11_pmic_remove),
+ .remove = s2mps11_pmic_remove,
.id_table = s2mps11_pmic_id,
};
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index abe64a32aed..9f991f2c525 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -168,7 +168,7 @@ static unsigned int s5m8767_opmode_reg[][4] = {
static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
int *enable_ctrl)
{
- int reg_id = rdev_get_id(rdev);
+ int i, reg_id = rdev_get_id(rdev);
unsigned int mode;
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
@@ -195,8 +195,17 @@ static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
return -EINVAL;
}
- mode = s5m8767->opmode[reg_id].mode;
- *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+ for (i = 0; i < s5m8767->num_regulators; i++) {
+ if (s5m8767->opmode[i].id == reg_id) {
+ mode = s5m8767->opmode[i].mode;
+ break;
+ }
+ }
+
+ if (i < s5m8767->num_regulators)
+ *enable_ctrl =
+ s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+
return 0;
}
@@ -263,17 +272,17 @@ static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
reg = S5M8767_REG_BUCK1CTRL2;
break;
case S5M8767_BUCK2:
- reg = S5M8767_REG_BUCK2DVS2;
+ reg = S5M8767_REG_BUCK2DVS1;
if (s5m8767->buck2_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK3:
- reg = S5M8767_REG_BUCK3DVS2;
+ reg = S5M8767_REG_BUCK3DVS1;
if (s5m8767->buck3_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK4:
- reg = S5M8767_REG_BUCK4DVS2;
+ reg = S5M8767_REG_BUCK4DVS1;
if (s5m8767->buck4_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
@@ -499,7 +508,7 @@ static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(BUCK9),
};
-static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
+static int s5m8767_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
@@ -547,7 +556,7 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
rdev = s5m8767->rdev;
s5m8767->dev = &pdev->dev;
s5m8767->iodev = iodev;
- s5m8767->num_regulators = S5M8767_REG_MAX - 2;
+ s5m8767->num_regulators = pdata->num_regulators;
platform_set_drvdata(pdev, s5m8767);
s5m8767->buck_gpioindex = pdata->buck_default_idx;
@@ -617,9 +626,16 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
}
}
- if (gpio_is_valid(pdata->buck_gpios[0]) &&
- gpio_is_valid(pdata->buck_gpios[1]) &&
- gpio_is_valid(pdata->buck_gpios[2])) {
+ if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
+ pdata->buck4_gpiodvs) {
+
+ if (!gpio_is_valid(pdata->buck_gpios[0]) ||
+ !gpio_is_valid(pdata->buck_gpios[1]) ||
+ !gpio_is_valid(pdata->buck_gpios[2])) {
+ dev_err(&pdev->dev, "GPIO NOT VALID\n");
+ return -EINVAL;
+ }
+
ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[0],
"S5M8767 SET1");
if (ret)
@@ -644,10 +660,6 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
/* SET3 GPIO */
gpio_direction_output(pdata->buck_gpios[2],
(s5m8767->buck_gpioindex >> 0) & 0x1);
- } else {
- dev_err(&pdev->dev, "GPIO NOT VALID\n");
- ret = -EINVAL;
- return ret;
}
ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[0], "S5M8767 DS2");
@@ -773,7 +785,7 @@ err:
return ret;
}
-static int __devexit s5m8767_pmic_remove(struct platform_device *pdev)
+static int s5m8767_pmic_remove(struct platform_device *pdev)
{
struct s5m8767_info *s5m8767 = platform_get_drvdata(pdev);
struct regulator_dev **rdev = s5m8767->rdev;
@@ -798,7 +810,7 @@ static struct platform_driver s5m8767_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = s5m8767_pmic_probe,
- .remove = __devexit_p(s5m8767_pmic_remove),
+ .remove = s5m8767_pmic_remove,
.id_table = s5m8767_pmic_id,
};
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
new file mode 100644
index 00000000000..ab21133e678
--- /dev/null
+++ b/drivers/regulator/tps51632-regulator.c
@@ -0,0 +1,342 @@
+/*
+ * tps51632-regulator.c -- TI TPS51632
+ *
+ * Regulator driver for TPS51632 3-2-1 Phase D-Cap Step Down Driverless
+ * Controller with serial VID control and DVFS.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/tps51632-regulator.h>
+#include <linux/slab.h>
+
+/* Register definitions */
+#define TPS51632_VOLTAGE_SELECT_REG 0x0
+#define TPS51632_VOLTAGE_BASE_REG 0x1
+#define TPS51632_OFFSET_REG 0x2
+#define TPS51632_IMON_REG 0x3
+#define TPS51632_VMAX_REG 0x4
+#define TPS51632_DVFS_CONTROL_REG 0x5
+#define TPS51632_POWER_STATE_REG 0x6
+#define TPS51632_SLEW_REGS 0x7
+#define TPS51632_FAULT_REG 0x14
+
+#define TPS51632_MAX_REG 0x15
+
+#define TPS51632_VOUT_MASK 0x7F
+#define TPS51632_VOUT_OFFSET_MASK 0x1F
+#define TPS51632_VMAX_MASK 0x7F
+#define TPS51632_VMAX_LOCK 0x80
+
+/* TPS51632_DVFS_CONTROL_REG */
+#define TPS51632_DVFS_PWMEN 0x1
+#define TPS51632_DVFS_STEP_20 0x2
+#define TPS51632_DVFS_VMAX_PG 0x4
+#define TPS51632_DVFS_PWMRST 0x8
+#define TPS51632_DVFS_OCA_EN 0x10
+#define TPS51632_DVFS_FCCM 0x20
+
+/* TPS51632_POWER_STATE_REG */
+#define TPS51632_POWER_STATE_MASK 0x03
+#define TPS51632_POWER_STATE_MULTI_PHASE_CCM 0x0
+#define TPS51632_POWER_STATE_SINGLE_PHASE_CCM 0x1
+#define TPS51632_POWER_STATE_SINGLE_PHASE_DCM 0x2
+
+#define TPS51632_MIN_VOLATGE 500000
+#define TPS51632_MAX_VOLATGE 1520000
+#define TPS51632_VOLATGE_STEP_10mV 10000
+#define TPS51632_VOLATGE_STEP_20mV 20000
+#define TPS51632_MAX_VSEL 0x7F
+#define TPS51632_MIN_VSEL 0x19
+#define TPS51632_DEFAULT_RAMP_DELAY 6000
+#define TPS51632_VOLT_VSEL(uV) \
+ (DIV_ROUND_UP(uV - TPS51632_MIN_VOLATGE, \
+ TPS51632_VOLATGE_STEP_10mV) + \
+ TPS51632_MIN_VSEL)
+
+/* TPS51632 chip information */
+struct tps51632_chip {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ bool enable_pwm_dvfs;
+};
+
+static int tps51632_dcdc_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct tps51632_chip *tps = rdev_get_drvdata(rdev);
+ unsigned int data;
+ int ret;
+ unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
+ int vsel;
+
+ if (tps->enable_pwm_dvfs)
+ reg = TPS51632_VOLTAGE_BASE_REG;
+
+ ret = regmap_read(tps->regmap, reg, &data);
+ if (ret < 0) {
+ dev_err(tps->dev, "reg read failed, err %d\n", ret);
+ return ret;
+ }
+
+ vsel = data & TPS51632_VOUT_MASK;
+ return vsel;
+}
+
+static int tps51632_dcdc_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct tps51632_chip *tps = rdev_get_drvdata(rdev);
+ int ret;
+ unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
+
+ if (tps->enable_pwm_dvfs)
+ reg = TPS51632_VOLTAGE_BASE_REG;
+
+ if (selector > TPS51632_MAX_VSEL)
+ return -EINVAL;
+
+ ret = regmap_write(tps->regmap, reg, selector);
+ if (ret < 0)
+ dev_err(tps->dev, "reg write failed, err %d\n", ret);
+ return ret;
+}
+
+static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ struct tps51632_chip *tps = rdev_get_drvdata(rdev);
+ int bit = ramp_delay/6000;
+ int ret;
+
+ if (bit)
+ bit--;
+ ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
+ if (ret < 0)
+ dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
+ return ret;
+}
+
+static struct regulator_ops tps51632_dcdc_ops = {
+ .get_voltage_sel = tps51632_dcdc_get_voltage_sel,
+ .set_voltage_sel = tps51632_dcdc_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = tps51632_dcdc_set_ramp_delay,
+};
+
+static int tps51632_init_dcdc(struct tps51632_chip *tps,
+ struct tps51632_regulator_platform_data *pdata)
+{
+ int ret;
+ uint8_t control = 0;
+ int vsel;
+
+ if (!pdata->enable_pwm_dvfs)
+ goto skip_pwm_config;
+
+ control |= TPS51632_DVFS_PWMEN;
+ tps->enable_pwm_dvfs = pdata->enable_pwm_dvfs;
+ vsel = TPS51632_VOLT_VSEL(pdata->base_voltage_uV);
+ ret = regmap_write(tps->regmap, TPS51632_VOLTAGE_BASE_REG, vsel);
+ if (ret < 0) {
+ dev_err(tps->dev, "BASE reg write failed, err %d\n", ret);
+ return ret;
+ }
+
+ if (pdata->dvfs_step_20mV)
+ control |= TPS51632_DVFS_STEP_20;
+
+ if (pdata->max_voltage_uV) {
+ unsigned int vmax;
+ /**
+ * TPS51632 hw behavior: VMAX register can be write only
+ * once as it get locked after first write. The lock get
+ * reset only when device is power-reset.
+ * Write register only when lock bit is not enabled.
+ */
+ ret = regmap_read(tps->regmap, TPS51632_VMAX_REG, &vmax);
+ if (ret < 0) {
+ dev_err(tps->dev, "VMAX read failed, err %d\n", ret);
+ return ret;
+ }
+ if (!(vmax & TPS51632_VMAX_LOCK)) {
+ vsel = TPS51632_VOLT_VSEL(pdata->max_voltage_uV);
+ ret = regmap_write(tps->regmap, TPS51632_VMAX_REG,
+ vsel);
+ if (ret < 0) {
+ dev_err(tps->dev,
+ "VMAX write failed, err %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+skip_pwm_config:
+ ret = regmap_write(tps->regmap, TPS51632_DVFS_CONTROL_REG, control);
+ if (ret < 0)
+ dev_err(tps->dev, "DVFS reg write failed, err %d\n", ret);
+ return ret;
+}
+
+static bool rd_wr_reg(struct device *dev, unsigned int reg)
+{
+ if ((reg >= 0x8) && (reg <= 0x10))
+ return false;
+ return true;
+}
+
+static const struct regmap_config tps51632_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg,
+ .readable_reg = rd_wr_reg,
+ .max_register = TPS51632_MAX_REG - 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int tps51632_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps51632_regulator_platform_data *pdata;
+ struct regulator_dev *rdev;
+ struct tps51632_chip *tps;
+ int ret;
+ struct regulator_config config = { };
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->dev, "No Platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdata->enable_pwm_dvfs) {
+ if ((pdata->base_voltage_uV < TPS51632_MIN_VOLATGE) ||
+ (pdata->base_voltage_uV > TPS51632_MAX_VOLATGE)) {
+ dev_err(&client->dev, "Invalid base_voltage_uV setting\n");
+ return -EINVAL;
+ }
+
+ if ((pdata->max_voltage_uV) &&
+ ((pdata->max_voltage_uV < TPS51632_MIN_VOLATGE) ||
+ (pdata->max_voltage_uV > TPS51632_MAX_VOLATGE))) {
+ dev_err(&client->dev, "Invalid max_voltage_uV setting\n");
+ return -EINVAL;
+ }
+ }
+
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps) {
+ dev_err(&client->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ tps->dev = &client->dev;
+ tps->desc.name = id->name;
+ tps->desc.id = 0;
+ tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY;
+ tps->desc.min_uV = TPS51632_MIN_VOLATGE;
+ tps->desc.uV_step = TPS51632_VOLATGE_STEP_10mV;
+ tps->desc.linear_min_sel = TPS51632_MIN_VSEL;
+ tps->desc.n_voltages = TPS51632_MAX_VSEL + 1;
+ tps->desc.ops = &tps51632_dcdc_ops;
+ tps->desc.type = REGULATOR_VOLTAGE;
+ tps->desc.owner = THIS_MODULE;
+
+ tps->regmap = devm_regmap_init_i2c(client, &tps51632_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ ret = PTR_ERR(tps->regmap);
+ dev_err(&client->dev, "regmap init failed, err %d\n", ret);
+ return ret;
+ }
+ i2c_set_clientdata(client, tps);
+
+ ret = tps51632_init_dcdc(tps, pdata);
+ if (ret < 0) {
+ dev_err(tps->dev, "Init failed, err = %d\n", ret);
+ return ret;
+ }
+
+ /* Register the regulators */
+ config.dev = &client->dev;
+ config.init_data = pdata->reg_init_data;
+ config.driver_data = tps;
+ config.regmap = tps->regmap;
+ config.of_node = client->dev.of_node;
+
+ rdev = regulator_register(&tps->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "regulator register failed\n");
+ return PTR_ERR(rdev);
+ }
+
+ tps->rdev = rdev;
+ return 0;
+}
+
+static int tps51632_remove(struct i2c_client *client)
+{
+ struct tps51632_chip *tps = i2c_get_clientdata(client);
+
+ regulator_unregister(tps->rdev);
+ return 0;
+}
+
+static const struct i2c_device_id tps51632_id[] = {
+ {.name = "tps51632",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, tps51632_id);
+
+static struct i2c_driver tps51632_i2c_driver = {
+ .driver = {
+ .name = "tps51632",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps51632_probe,
+ .remove = tps51632_remove,
+ .id_table = tps51632_id,
+};
+
+static int __init tps51632_init(void)
+{
+ return i2c_add_driver(&tps51632_i2c_driver);
+}
+subsys_initcall(tps51632_init);
+
+static void __exit tps51632_cleanup(void)
+{
+ i2c_del_driver(&tps51632_i2c_driver);
+}
+module_exit(tps51632_cleanup);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("TPS51632 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 1378409efae..ec9453ffb77 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -127,7 +127,7 @@ static const struct regulator_desc tps6105x_regulator_desc = {
/*
* Registers the chip as a voltage regulator
*/
-static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
+static int tps6105x_regulator_probe(struct platform_device *pdev)
{
struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
struct tps6105x_platform_data *pdata = tps6105x->pdata;
@@ -159,7 +159,7 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tps6105x_regulator_remove(struct platform_device *pdev)
+static int tps6105x_regulator_remove(struct platform_device *pdev)
{
struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
regulator_unregister(tps6105x->regulator);
@@ -172,7 +172,7 @@ static struct platform_driver tps6105x_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = tps6105x_regulator_probe,
- .remove = __devexit_p(tps6105x_regulator_remove),
+ .remove = tps6105x_regulator_remove,
};
static __init int tps6105x_regulator_init(void)
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index 68729a7c870..acbd63fde41 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -243,7 +243,7 @@ static struct regulator_ops tps62360_dcdc_ops = {
.get_mode = tps62360_get_mode,
};
-static int __devinit tps62360_init_dcdc(struct tps62360_chip *tps,
+static int tps62360_init_dcdc(struct tps62360_chip *tps,
struct tps62360_regulator_platform_data *pdata)
{
int ret;
@@ -339,7 +339,7 @@ static const struct of_device_id tps62360_of_match[] = {
MODULE_DEVICE_TABLE(of, tps62360_of_match);
#endif
-static int __devinit tps62360_probe(struct i2c_client *client,
+static int tps62360_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regulator_config config = { };
@@ -490,7 +490,7 @@ static int __devinit tps62360_probe(struct i2c_client *client,
*
* Unregister TPS driver as an i2c client device driver
*/
-static int __devexit tps62360_remove(struct i2c_client *client)
+static int tps62360_remove(struct i2c_client *client)
{
struct tps62360_chip *tps = i2c_get_clientdata(client);
@@ -531,7 +531,7 @@ static struct i2c_driver tps62360_i2c_driver = {
.of_match_table = of_match_ptr(tps62360_of_match),
},
.probe = tps62360_probe,
- .remove = __devexit_p(tps62360_remove),
+ .remove = tps62360_remove,
.shutdown = tps62360_shutdown,
.id_table = tps62360_id,
};
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 6998d579d07..9b9af6d889c 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -219,7 +219,7 @@ static struct regmap_config tps65023_regmap_config = {
.val_bits = 8,
};
-static int __devinit tps_65023_probe(struct i2c_client *client,
+static int tps_65023_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct tps_driver_data *drv_data = (void *)id->driver_data;
@@ -319,7 +319,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
return error;
}
-static int __devexit tps_65023_remove(struct i2c_client *client)
+static int tps_65023_remove(struct i2c_client *client)
{
struct tps_pmic *tps = i2c_get_clientdata(client);
int i;
@@ -446,7 +446,7 @@ static struct i2c_driver tps_65023_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = tps_65023_probe,
- .remove = __devexit_p(tps_65023_remove),
+ .remove = tps_65023_remove,
.id_table = tps_65023_id,
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 07d01ccdf30..0233cfb5656 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -356,7 +356,7 @@ static struct regulator_ops tps6507x_pmic_ops = {
.list_voltage = regulator_list_voltage_table,
};
-static __devinit int tps6507x_pmic_probe(struct platform_device *pdev)
+static int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
struct tps_info *info = &tps6507x_pmic_regs[0];
@@ -439,7 +439,7 @@ fail:
return error;
}
-static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
+static int tps6507x_pmic_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
struct tps6507x_pmic *tps = tps6507x_dev->pmic;
@@ -456,7 +456,7 @@ static struct platform_driver tps6507x_pmic_driver = {
.owner = THIS_MODULE,
},
.probe = tps6507x_pmic_probe,
- .remove = __devexit_p(tps6507x_pmic_remove),
+ .remove = tps6507x_pmic_remove,
};
static int __init tps6507x_pmic_init(void)
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index 001ad554ac6..41c391789c9 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -18,119 +18,240 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65090.h>
-#include <linux/regulator/tps65090-regulator.h>
struct tps65090_regulator {
- int id;
- /* used by regulator core */
- struct regulator_desc desc;
-
- /* Device */
struct device *dev;
+ struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+};
+
+static struct regulator_ops tps65090_ext_control_ops = {
+};
+
+static struct regulator_ops tps65090_reg_contol_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops tps65090_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
+static struct regulator_ops tps65090_ldo_ops = {
};
-#define tps65090_REG(_id) \
+#define tps65090_REG_DESC(_id, _sname, _en_reg, _ops) \
{ \
- .id = TPS65090_ID_##_id, \
- .desc = { \
- .name = tps65090_rails(_id), \
- .id = TPS65090_ID_##_id, \
- .ops = &tps65090_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .enable_reg = (TPS65090_ID_##_id) + 12, \
- .enable_mask = BIT(0), \
- }, \
+ .name = "TPS65090_RAILS"#_id, \
+ .supply_name = _sname, \
+ .id = TPS65090_REGULATOR_##_id, \
+ .ops = &_ops, \
+ .enable_reg = _en_reg, \
+ .enable_mask = BIT(0), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
}
-static struct tps65090_regulator TPS65090_regulator[] = {
- tps65090_REG(DCDC1),
- tps65090_REG(DCDC2),
- tps65090_REG(DCDC3),
- tps65090_REG(FET1),
- tps65090_REG(FET2),
- tps65090_REG(FET3),
- tps65090_REG(FET4),
- tps65090_REG(FET5),
- tps65090_REG(FET6),
- tps65090_REG(FET7),
+static struct regulator_desc tps65090_regulator_desc[] = {
+ tps65090_REG_DESC(DCDC1, "vsys1", 0x0C, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(DCDC2, "vsys2", 0x0D, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(DCDC3, "vsys3", 0x0E, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(FET1, "infet1", 0x0F, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(FET2, "infet2", 0x10, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(FET3, "infet3", 0x11, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(FET4, "infet4", 0x12, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(FET5, "infet5", 0x13, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(FET6, "infet6", 0x14, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(FET7, "infet7", 0x15, tps65090_reg_contol_ops),
+ tps65090_REG_DESC(LDO1, "vsys_l1", 0, tps65090_ldo_ops),
+ tps65090_REG_DESC(LDO2, "vsys_l2", 0, tps65090_ldo_ops),
};
-static inline struct tps65090_regulator *find_regulator_info(int id)
+static inline bool is_dcdc(int id)
{
- struct tps65090_regulator *ri;
- int i;
+ switch (id) {
+ case TPS65090_REGULATOR_DCDC1:
+ case TPS65090_REGULATOR_DCDC2:
+ case TPS65090_REGULATOR_DCDC3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int tps65090_config_ext_control(
+ struct tps65090_regulator *ri, bool enable)
+{
+ int ret;
+ struct device *parent = ri->dev->parent;
+ unsigned int reg_en_reg = ri->desc->enable_reg;
+
+ if (enable)
+ ret = tps65090_set_bits(parent, reg_en_reg, 1);
+ else
+ ret = tps65090_clr_bits(parent, reg_en_reg, 1);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in updating reg 0x%x\n", reg_en_reg);
+ return ret;
+}
+
+static int tps65090_regulator_disable_ext_control(
+ struct tps65090_regulator *ri,
+ struct tps65090_regulator_plat_data *tps_pdata)
+{
+ int ret = 0;
+ struct device *parent = ri->dev->parent;
+ unsigned int reg_en_reg = ri->desc->enable_reg;
+
+ /*
+ * First enable output for internal control if require.
+ * And then disable external control.
+ */
+ if (tps_pdata->reg_init_data->constraints.always_on ||
+ tps_pdata->reg_init_data->constraints.boot_on) {
+ ret = tps65090_set_bits(parent, reg_en_reg, 0);
+ if (ret < 0) {
+ dev_err(ri->dev, "Error in set reg 0x%x\n", reg_en_reg);
+ return ret;
+ }
+ }
+ return tps65090_config_ext_control(ri, false);
+}
+
+static void tps65090_configure_regulator_config(
+ struct tps65090_regulator_plat_data *tps_pdata,
+ struct regulator_config *config)
+{
+ if (gpio_is_valid(tps_pdata->gpio)) {
+ int gpio_flag = GPIOF_OUT_INIT_LOW;
+
+ if (tps_pdata->reg_init_data->constraints.always_on ||
+ tps_pdata->reg_init_data->constraints.boot_on)
+ gpio_flag = GPIOF_OUT_INIT_HIGH;
- for (i = 0; i < ARRAY_SIZE(TPS65090_regulator); i++) {
- ri = &TPS65090_regulator[i];
- if (ri->desc.id == id)
- return ri;
+ config->ena_gpio = tps_pdata->gpio;
+ config->ena_gpio_flags = gpio_flag;
}
- return NULL;
}
-static int __devinit tps65090_regulator_probe(struct platform_device *pdev)
+static int tps65090_regulator_probe(struct platform_device *pdev)
{
struct tps65090 *tps65090_mfd = dev_get_drvdata(pdev->dev.parent);
struct tps65090_regulator *ri = NULL;
struct regulator_config config = { };
struct regulator_dev *rdev;
- struct tps65090_regulator_platform_data *tps_pdata;
- int id = pdev->id;
+ struct tps65090_regulator_plat_data *tps_pdata;
+ struct tps65090_regulator *pmic;
+ struct tps65090_platform_data *tps65090_pdata;
+ int num;
+ int ret;
- dev_dbg(&pdev->dev, "Probing regulator %d\n", id);
+ dev_dbg(&pdev->dev, "Probing regulator\n");
- ri = find_regulator_info(id);
- if (ri == NULL) {
- dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ tps65090_pdata = dev_get_platdata(pdev->dev.parent);
+ if (!tps65090_pdata) {
+ dev_err(&pdev->dev, "Platform data missing\n");
return -EINVAL;
}
- tps_pdata = pdev->dev.platform_data;
- ri->dev = &pdev->dev;
-
- config.dev = &pdev->dev;
- config.init_data = &tps_pdata->regulator;
- config.driver_data = ri;
- config.regmap = tps65090_mfd->rmap;
-
- rdev = regulator_register(&ri->desc, &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev, "failed to register regulator %s\n",
- ri->desc.name);
- return PTR_ERR(rdev);
+
+ pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic),
+ GFP_KERNEL);
+ if (!pmic) {
+ dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+ return -ENOMEM;
+ }
+
+ for (num = 0; num < TPS65090_REGULATOR_MAX; num++) {
+ tps_pdata = tps65090_pdata->reg_pdata[num];
+
+ ri = &pmic[num];
+ ri->dev = &pdev->dev;
+ ri->desc = &tps65090_regulator_desc[num];
+
+ /*
+ * TPS5090 DCDC support the control from external digital input.
+ * Configure it as per platform data.
+ */
+ if (tps_pdata && is_dcdc(num) && tps_pdata->reg_init_data) {
+ if (tps_pdata->enable_ext_control) {
+ tps65090_configure_regulator_config(
+ tps_pdata, &config);
+ ri->desc->ops = &tps65090_ext_control_ops;
+ } else {
+ ret = tps65090_regulator_disable_ext_control(
+ ri, tps_pdata);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed disable ext control\n");
+ goto scrub;
+ }
+ }
+ }
+
+ config.dev = &pdev->dev;
+ config.driver_data = ri;
+ config.regmap = tps65090_mfd->rmap;
+ if (tps_pdata)
+ config.init_data = tps_pdata->reg_init_data;
+ else
+ config.init_data = NULL;
+
+ rdev = regulator_register(ri->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ ri->desc->name);
+ ret = PTR_ERR(rdev);
+ goto scrub;
+ }
+ ri->rdev = rdev;
+
+ /* Enable external control if it is require */
+ if (tps_pdata && is_dcdc(num) && tps_pdata->reg_init_data &&
+ tps_pdata->enable_ext_control) {
+ ret = tps65090_config_ext_control(ri, true);
+ if (ret < 0) {
+ /* Increment num to get unregister rdev */
+ num++;
+ goto scrub;
+ }
+ }
}
- platform_set_drvdata(pdev, rdev);
+ platform_set_drvdata(pdev, pmic);
return 0;
+
+scrub:
+ while (--num >= 0) {
+ ri = &pmic[num];
+ regulator_unregister(ri->rdev);
+ }
+ return ret;
}
-static int __devexit tps65090_regulator_remove(struct platform_device *pdev)
+static int tps65090_regulator_remove(struct platform_device *pdev)
{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ struct tps65090_regulator *pmic = platform_get_drvdata(pdev);
+ struct tps65090_regulator *ri;
+ int num;
- regulator_unregister(rdev);
+ for (num = 0; num < TPS65090_REGULATOR_MAX; ++num) {
+ ri = &pmic[num];
+ regulator_unregister(ri->rdev);
+ }
return 0;
}
static struct platform_driver tps65090_regulator_driver = {
.driver = {
- .name = "tps65090-regulator",
+ .name = "tps65090-pmic",
.owner = THIS_MODULE,
},
.probe = tps65090_regulator_probe,
- .remove = __devexit_p(tps65090_regulator_remove),
+ .remove = tps65090_regulator_remove,
};
static int __init tps65090_regulator_init(void)
@@ -148,3 +269,4 @@ module_exit(tps65090_regulator_exit);
MODULE_DESCRIPTION("tps65090 regulator driver");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65090-pmic");
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index ab00cab905b..73dce766412 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -332,7 +332,7 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
}
#endif
-static int __devinit tps65217_regulator_probe(struct platform_device *pdev)
+static int tps65217_regulator_probe(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_board *pdata = dev_get_platdata(tps->dev);
@@ -397,7 +397,7 @@ err_unregister_regulator:
return ret;
}
-static int __devexit tps65217_regulator_remove(struct platform_device *pdev)
+static int tps65217_regulator_remove(struct platform_device *pdev)
{
struct tps65217 *tps = platform_get_drvdata(pdev);
unsigned int i;
@@ -415,7 +415,7 @@ static struct platform_driver tps65217_regulator_driver = {
.name = "tps65217-pmic",
},
.probe = tps65217_regulator_probe,
- .remove = __devexit_p(tps65217_regulator_remove),
+ .remove = tps65217_regulator_remove,
};
static int __init tps65217_regulator_init(void)
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 058d2f2675e..843ee0a9bb9 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -592,7 +592,7 @@ static int pmic_remove(struct spi_device *spi)
return 0;
}
-static int __devinit pmic_probe(struct spi_device *spi)
+static int pmic_probe(struct spi_device *spi)
{
struct tps6524x *hw;
struct device *dev = &spi->dev;
@@ -649,7 +649,7 @@ fail:
static struct spi_driver pmic_driver = {
.probe = pmic_probe,
- .remove = __devexit_p(pmic_remove),
+ .remove = pmic_remove,
.driver = {
.name = "tps6524x",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index ce1e7cb8d51..f86da672c75 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -17,10 +17,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/mfd/tps6586x.h>
/* supply control and voltage setting */
@@ -255,10 +257,10 @@ static inline int tps6586x_regulator_preinit(struct device *parent,
1 << ri->enable_bit[1]);
}
-static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev)
+static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev,
+ int id, struct regulator_init_data *p)
{
struct device *parent = pdev->dev.parent;
- struct regulator_init_data *p = pdev->dev.platform_data;
struct tps6586x_settings *setting = p->driver_data;
uint8_t reg;
@@ -269,7 +271,7 @@ static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev)
return 0;
/* only SM0 and SM1 can have the slew rate settings */
- switch (pdev->id) {
+ switch (id) {
case TPS6586X_ID_SM_0:
reg = TPS6586X_SM0SL;
break;
@@ -298,58 +300,185 @@ static inline struct tps6586x_regulator *find_regulator_info(int id)
return NULL;
}
-static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static struct of_regulator_match tps6586x_matches[] = {
+ { .name = "sys", .driver_data = (void *)TPS6586X_ID_SYS },
+ { .name = "sm0", .driver_data = (void *)TPS6586X_ID_SM_0 },
+ { .name = "sm1", .driver_data = (void *)TPS6586X_ID_SM_1 },
+ { .name = "sm2", .driver_data = (void *)TPS6586X_ID_SM_2 },
+ { .name = "ldo0", .driver_data = (void *)TPS6586X_ID_LDO_0 },
+ { .name = "ldo1", .driver_data = (void *)TPS6586X_ID_LDO_1 },
+ { .name = "ldo2", .driver_data = (void *)TPS6586X_ID_LDO_2 },
+ { .name = "ldo3", .driver_data = (void *)TPS6586X_ID_LDO_3 },
+ { .name = "ldo4", .driver_data = (void *)TPS6586X_ID_LDO_4 },
+ { .name = "ldo5", .driver_data = (void *)TPS6586X_ID_LDO_5 },
+ { .name = "ldo6", .driver_data = (void *)TPS6586X_ID_LDO_6 },
+ { .name = "ldo7", .driver_data = (void *)TPS6586X_ID_LDO_7 },
+ { .name = "ldo8", .driver_data = (void *)TPS6586X_ID_LDO_8 },
+ { .name = "ldo9", .driver_data = (void *)TPS6586X_ID_LDO_9 },
+ { .name = "ldo_rtc", .driver_data = (void *)TPS6586X_ID_LDO_RTC },
+};
+
+static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
+ struct platform_device *pdev,
+ struct of_regulator_match **tps6586x_reg_matches)
+{
+ const unsigned int num = ARRAY_SIZE(tps6586x_matches);
+ struct device_node *np = pdev->dev.parent->of_node;
+ struct device_node *regs;
+ const char *sys_rail = NULL;
+ unsigned int i;
+ struct tps6586x_platform_data *pdata;
+ int err;
+
+ regs = of_find_node_by_name(np, "regulators");
+ if (!regs) {
+ dev_err(&pdev->dev, "regulator node not found\n");
+ return NULL;
+ }
+
+ err = of_regulator_match(&pdev->dev, regs, tps6586x_matches, num);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Regulator match failed, e %d\n", err);
+ of_node_put(regs);
+ return NULL;
+ }
+
+ of_node_put(regs);
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Memory alloction failed\n");
+ return NULL;
+ }
+
+ for (i = 0; i < num; i++) {
+ int id;
+ if (!tps6586x_matches[i].init_data)
+ continue;
+
+ pdata->reg_init_data[i] = tps6586x_matches[i].init_data;
+ id = (int)tps6586x_matches[i].driver_data;
+ if (id == TPS6586X_ID_SYS)
+ sys_rail = pdata->reg_init_data[i]->constraints.name;
+
+ if ((id == TPS6586X_ID_LDO_5) || (id == TPS6586X_ID_LDO_RTC))
+ pdata->reg_init_data[i]->supply_regulator = sys_rail;
+ }
+ *tps6586x_reg_matches = tps6586x_matches;
+ return pdata;
+}
+#else
+static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
+ struct platform_device *pdev,
+ struct of_regulator_match **tps6586x_reg_matches)
+{
+ *tps6586x_reg_matches = NULL;
+ return NULL;
+}
+#endif
+
+static int tps6586x_regulator_probe(struct platform_device *pdev)
{
struct tps6586x_regulator *ri = NULL;
struct regulator_config config = { };
- struct regulator_dev *rdev;
- int id = pdev->id;
+ struct regulator_dev **rdev;
+ struct regulator_init_data *reg_data;
+ struct tps6586x_platform_data *pdata;
+ struct of_regulator_match *tps6586x_reg_matches = NULL;
+ int id;
int err;
- dev_dbg(&pdev->dev, "Probing regulator %d\n", id);
+ dev_dbg(&pdev->dev, "Probing regulator\n");
- ri = find_regulator_info(id);
- if (ri == NULL) {
- dev_err(&pdev->dev, "invalid regulator ID specified\n");
- return -EINVAL;
- }
+ pdata = dev_get_platdata(pdev->dev.parent);
+ if ((!pdata) && (pdev->dev.parent->of_node))
+ pdata = tps6586x_parse_regulator_dt(pdev,
+ &tps6586x_reg_matches);
- err = tps6586x_regulator_preinit(pdev->dev.parent, ri);
- if (err)
- return err;
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data not available, exiting\n");
+ return -ENODEV;
+ }
- config.dev = pdev->dev.parent;
- config.of_node = pdev->dev.of_node;
- config.init_data = pdev->dev.platform_data;
- config.driver_data = ri;
+ rdev = devm_kzalloc(&pdev->dev, TPS6586X_ID_MAX_REGULATOR *
+ sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ dev_err(&pdev->dev, "Mmemory alloc failed\n");
+ return -ENOMEM;
+ }
- rdev = regulator_register(&ri->desc, &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev, "failed to register regulator %s\n",
- ri->desc.name);
- return PTR_ERR(rdev);
+ for (id = 0; id < TPS6586X_ID_MAX_REGULATOR; ++id) {
+ reg_data = pdata->reg_init_data[id];
+
+ ri = find_regulator_info(id);
+ if (!ri) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ err = tps6586x_regulator_preinit(pdev->dev.parent, ri);
+ if (err) {
+ dev_err(&pdev->dev,
+ "regulator %d preinit failed, e %d\n", id, err);
+ goto fail;
+ }
+
+ config.dev = pdev->dev.parent;
+ config.init_data = reg_data;
+ config.driver_data = ri;
+
+ if (tps6586x_reg_matches)
+ config.of_node = tps6586x_reg_matches[id].of_node;
+
+ rdev[id] = regulator_register(&ri->desc, &config);
+ if (IS_ERR(rdev[id])) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ ri->desc.name);
+ err = PTR_ERR(rdev[id]);
+ goto fail;
+ }
+
+ if (reg_data) {
+ err = tps6586x_regulator_set_slew_rate(pdev, id,
+ reg_data);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Slew rate config failed, e %d\n", err);
+ regulator_unregister(rdev[id]);
+ goto fail;
+ }
+ }
}
platform_set_drvdata(pdev, rdev);
+ return 0;
- return tps6586x_regulator_set_slew_rate(pdev);
+fail:
+ while (--id >= 0)
+ regulator_unregister(rdev[id]);
+ return err;
}
-static int __devexit tps6586x_regulator_remove(struct platform_device *pdev)
+static int tps6586x_regulator_remove(struct platform_device *pdev)
{
- struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = platform_get_drvdata(pdev);
+ int id = TPS6586X_ID_MAX_REGULATOR;
+
+ while (--id >= 0)
+ regulator_unregister(rdev[id]);
- regulator_unregister(rdev);
return 0;
}
static struct platform_driver tps6586x_regulator_driver = {
.driver = {
- .name = "tps6586x-regulator",
+ .name = "tps6586x-pmic",
.owner = THIS_MODULE,
},
.probe = tps6586x_regulator_probe,
- .remove = __devexit_p(tps6586x_regulator_remove),
+ .remove = tps6586x_regulator_remove,
};
static int __init tps6586x_regulator_init(void)
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 793adda560c..59c3770fa77 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -38,6 +38,11 @@ static const unsigned int VIO_VSEL_table[] = {
/* VSEL tables for TPS65910 specific LDOs and dcdc's */
+/* supported VRTC voltages in microvolts */
+static const unsigned int VRTC_VSEL_table[] = {
+ 1800000,
+};
+
/* supported VDD3 voltages in microvolts */
static const unsigned int VDD3_VSEL_table[] = {
5000000,
@@ -95,6 +100,8 @@ static struct tps_info tps65910_regs[] = {
{
.name = "vrtc",
.vin_name = "vcc7",
+ .n_voltages = ARRAY_SIZE(VRTC_VSEL_table),
+ .voltage_table = VRTC_VSEL_table,
.enable_time_us = 2200,
},
{
@@ -1026,7 +1033,7 @@ static inline struct tps65910_board *tps65910_parse_dt_reg_data(
}
#endif
-static __devinit int tps65910_probe(struct platform_device *pdev)
+static int tps65910_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
@@ -1184,7 +1191,7 @@ err_unregister_regulator:
return err;
}
-static int __devexit tps65910_remove(struct platform_device *pdev)
+static int tps65910_remove(struct platform_device *pdev)
{
struct tps65910_reg *pmic = platform_get_drvdata(pdev);
int i;
@@ -1231,7 +1238,7 @@ static struct platform_driver tps65910_driver = {
.owner = THIS_MODULE,
},
.probe = tps65910_probe,
- .remove = __devexit_p(tps65910_remove),
+ .remove = tps65910_remove,
.shutdown = tps65910_shutdown,
};
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 18b2a1dcb4b..17e994e47dc 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -459,7 +459,7 @@ static struct regulator_ops tps65912_ops_ldo = {
.list_voltage = tps65912_list_voltage,
};
-static __devinit int tps65912_probe(struct platform_device *pdev)
+static int tps65912_probe(struct platform_device *pdev)
{
struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
@@ -525,7 +525,7 @@ err:
return err;
}
-static int __devexit tps65912_remove(struct platform_device *pdev)
+static int tps65912_remove(struct platform_device *pdev)
{
struct tps65912_reg *tps65912_reg = platform_get_drvdata(pdev);
int i;
@@ -541,7 +541,7 @@ static struct platform_driver tps65912_driver = {
.owner = THIS_MODULE,
},
.probe = tps65912_probe,
- .remove = __devexit_p(tps65912_remove),
+ .remove = tps65912_remove,
};
static int __init tps65912_init(void)
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
new file mode 100644
index 00000000000..b15d711bc8c
--- /dev/null
+++ b/drivers/regulator/tps80031-regulator.c
@@ -0,0 +1,788 @@
+/*
+ * tps80031-regulator.c -- TI TPS80031 regulator driver.
+ *
+ * Regulator driver for TI TPS80031/TPS80032 Fully Integrated Power
+ * Management with Power Path and Battery Charger.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/slab.h>
+
+/* Flags for DCDC Voltage reading */
+#define DCDC_OFFSET_EN BIT(0)
+#define DCDC_EXTENDED_EN BIT(1)
+#define TRACK_MODE_ENABLE BIT(2)
+
+#define SMPS_MULTOFFSET_VIO BIT(1)
+#define SMPS_MULTOFFSET_SMPS1 BIT(3)
+#define SMPS_MULTOFFSET_SMPS2 BIT(4)
+#define SMPS_MULTOFFSET_SMPS3 BIT(6)
+#define SMPS_MULTOFFSET_SMPS4 BIT(0)
+
+#define SMPS_CMD_MASK 0xC0
+#define SMPS_VSEL_MASK 0x3F
+#define LDO_VSEL_MASK 0x1F
+#define LDO_TRACK_VSEL_MASK 0x3F
+
+#define MISC2_LDOUSB_IN_VSYS BIT(4)
+#define MISC2_LDOUSB_IN_PMID BIT(3)
+#define MISC2_LDOUSB_IN_MASK 0x18
+
+#define MISC2_LDO3_SEL_VIB_VAL BIT(0)
+#define MISC2_LDO3_SEL_VIB_MASK 0x1
+
+#define BOOST_HW_PWR_EN BIT(5)
+#define BOOST_HW_PWR_EN_MASK BIT(5)
+
+#define OPA_MODE_EN BIT(6)
+#define OPA_MODE_EN_MASK BIT(6)
+
+#define USB_VBUS_CTRL_SET 0x04
+#define USB_VBUS_CTRL_CLR 0x05
+#define VBUS_DISCHRG 0x20
+
+struct tps80031_regulator_info {
+ /* Regulator register address.*/
+ u8 trans_reg;
+ u8 state_reg;
+ u8 force_reg;
+ u8 volt_reg;
+ u8 volt_id;
+
+ /*Power request bits */
+ int preq_bit;
+
+ /* used by regulator core */
+ struct regulator_desc desc;
+
+};
+
+struct tps80031_regulator {
+ struct device *dev;
+ struct regulator_dev *rdev;
+ struct tps80031_regulator_info *rinfo;
+
+ u8 device_flags;
+ unsigned int config_flags;
+ unsigned int ext_ctrl_flag;
+};
+
+static inline struct device *to_tps80031_dev(struct regulator_dev *rdev)
+{
+ return rdev_get_dev(rdev)->parent->parent;
+}
+
+static int tps80031_reg_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ u8 reg_val;
+ int ret;
+
+ if (ri->ext_ctrl_flag & TPS80031_EXT_PWR_REQ)
+ return true;
+
+ ret = tps80031_read(parent, TPS80031_SLAVE_ID1, ri->rinfo->state_reg,
+ &reg_val);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "Reg 0x%02x read failed, err = %d\n",
+ ri->rinfo->state_reg, ret);
+ return ret;
+ }
+ return ((reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON);
+}
+
+static int tps80031_reg_enable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+
+ if (ri->ext_ctrl_flag & TPS80031_EXT_PWR_REQ)
+ return 0;
+
+ ret = tps80031_update(parent, TPS80031_SLAVE_ID1, ri->rinfo->state_reg,
+ TPS80031_STATE_ON, TPS80031_STATE_MASK);
+ if (ret < 0) {
+ dev_err(&rdev->dev, "Reg 0x%02x update failed, err = %d\n",
+ ri->rinfo->state_reg, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int tps80031_reg_disable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+
+ if (ri->ext_ctrl_flag & TPS80031_EXT_PWR_REQ)
+ return 0;
+
+ ret = tps80031_update(parent, TPS80031_SLAVE_ID1, ri->rinfo->state_reg,
+ TPS80031_STATE_OFF, TPS80031_STATE_MASK);
+ if (ret < 0)
+ dev_err(&rdev->dev, "Reg 0x%02x update failed, err = %d\n",
+ ri->rinfo->state_reg, ret);
+ return ret;
+}
+
+/* DCDC voltages for the selector of 58 to 63 */
+static int tps80031_dcdc_voltages[4][5] = {
+ { 1350, 1500, 1800, 1900, 2100},
+ { 1350, 1500, 1800, 1900, 2100},
+ { 2084, 2315, 2778, 2932, 3241},
+ { 4167, 2315, 2778, 2932, 3241},
+};
+
+static int tps80031_dcdc_list_voltage(struct regulator_dev *rdev, unsigned sel)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ int volt_index = ri->device_flags & 0x3;
+
+ if (sel == 0)
+ return 0;
+ else if (sel < 58)
+ return regulator_list_voltage_linear(rdev, sel - 1);
+ else
+ return tps80031_dcdc_voltages[volt_index][sel - 58] * 1000;
+}
+
+static int tps80031_dcdc_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned vsel)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+ u8 reg_val;
+
+ if (ri->rinfo->force_reg) {
+ ret = tps80031_read(parent, ri->rinfo->volt_id,
+ ri->rinfo->force_reg, &reg_val);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
+ ri->rinfo->force_reg, ret);
+ return ret;
+ }
+ if (!(reg_val & SMPS_CMD_MASK)) {
+ ret = tps80031_update(parent, ri->rinfo->volt_id,
+ ri->rinfo->force_reg, vsel, SMPS_VSEL_MASK);
+ if (ret < 0)
+ dev_err(ri->dev,
+ "reg 0x%02x update failed, e = %d\n",
+ ri->rinfo->force_reg, ret);
+ return ret;
+ }
+ }
+ ret = tps80031_update(parent, ri->rinfo->volt_id,
+ ri->rinfo->volt_reg, vsel, SMPS_VSEL_MASK);
+ if (ret < 0)
+ dev_err(ri->dev, "reg 0x%02x update failed, e = %d\n",
+ ri->rinfo->volt_reg, ret);
+ return ret;
+}
+
+static int tps80031_dcdc_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ uint8_t vsel = 0;
+ int ret;
+
+ if (ri->rinfo->force_reg) {
+ ret = tps80031_read(parent, ri->rinfo->volt_id,
+ ri->rinfo->force_reg, &vsel);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
+ ri->rinfo->force_reg, ret);
+ return ret;
+ }
+
+ if (!(vsel & SMPS_CMD_MASK))
+ return vsel & SMPS_VSEL_MASK;
+ }
+ ret = tps80031_read(parent, ri->rinfo->volt_id,
+ ri->rinfo->volt_reg, &vsel);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
+ ri->rinfo->volt_reg, ret);
+ return ret;
+ }
+ return vsel & SMPS_VSEL_MASK;
+}
+
+static int tps80031_ldo_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned sel)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+
+ /* Check for valid setting for TPS80031 or TPS80032-ES1.0 */
+ if ((ri->rinfo->desc.id == TPS80031_REGULATOR_LDO2) &&
+ (ri->device_flags & TRACK_MODE_ENABLE)) {
+ unsigned nvsel = (sel) & 0x1F;
+ if (((tps80031_get_chip_info(parent) == TPS80031) ||
+ ((tps80031_get_chip_info(parent) == TPS80032) &&
+ (tps80031_get_pmu_version(parent) == 0x0))) &&
+ ((nvsel == 0x0) || (nvsel >= 0x19 && nvsel <= 0x1F))) {
+ dev_err(ri->dev,
+ "Invalid sel %d in track mode LDO2\n",
+ nvsel);
+ return -EINVAL;
+ }
+ }
+
+ ret = tps80031_write(parent, ri->rinfo->volt_id,
+ ri->rinfo->volt_reg, sel);
+ if (ret < 0)
+ dev_err(ri->dev, "Error in writing reg 0x%02x, e = %d\n",
+ ri->rinfo->volt_reg, ret);
+ return ret;
+}
+
+static int tps80031_ldo_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ uint8_t vsel;
+ int ret;
+
+ ret = tps80031_read(parent, ri->rinfo->volt_id,
+ ri->rinfo->volt_reg, &vsel);
+ if (ret < 0) {
+ dev_err(ri->dev, "Error in writing the Voltage register\n");
+ return ret;
+ }
+ return vsel & rdev->desc->vsel_mask;
+}
+
+static int tps80031_vbus_is_enabled(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret = -EIO;
+ uint8_t ctrl1 = 0;
+ uint8_t ctrl3 = 0;
+
+ ret = tps80031_read(parent, TPS80031_SLAVE_ID2,
+ TPS80031_CHARGERUSB_CTRL1, &ctrl1);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
+ TPS80031_CHARGERUSB_CTRL1, ret);
+ return ret;
+ }
+ ret = tps80031_read(parent, TPS80031_SLAVE_ID2,
+ TPS80031_CHARGERUSB_CTRL3, &ctrl3);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
+ TPS80031_CHARGERUSB_CTRL3, ret);
+ return ret;
+ }
+ if ((ctrl1 & OPA_MODE_EN) && (ctrl3 & BOOST_HW_PWR_EN))
+ return 1;
+ return ret;
+}
+
+static int tps80031_vbus_enable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret;
+
+ ret = tps80031_set_bits(parent, TPS80031_SLAVE_ID2,
+ TPS80031_CHARGERUSB_CTRL1, OPA_MODE_EN);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
+ TPS80031_CHARGERUSB_CTRL1, ret);
+ return ret;
+ }
+
+ ret = tps80031_set_bits(parent, TPS80031_SLAVE_ID2,
+ TPS80031_CHARGERUSB_CTRL3, BOOST_HW_PWR_EN);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
+ TPS80031_CHARGERUSB_CTRL3, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int tps80031_vbus_disable(struct regulator_dev *rdev)
+{
+ struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
+ struct device *parent = to_tps80031_dev(rdev);
+ int ret = 0;
+
+ if (ri->config_flags & TPS80031_VBUS_DISCHRG_EN_PDN) {
+ ret = tps80031_write(parent, TPS80031_SLAVE_ID2,
+ USB_VBUS_CTRL_SET, VBUS_DISCHRG);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x write failed, e = %d\n",
+ USB_VBUS_CTRL_SET, ret);
+ return ret;
+ }
+ }
+
+ ret = tps80031_clr_bits(parent, TPS80031_SLAVE_ID2,
+ TPS80031_CHARGERUSB_CTRL1, OPA_MODE_EN);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x clearbit failed, e = %d\n",
+ TPS80031_CHARGERUSB_CTRL1, ret);
+ return ret;
+ }
+
+ ret = tps80031_clr_bits(parent, TPS80031_SLAVE_ID2,
+ TPS80031_CHARGERUSB_CTRL3, BOOST_HW_PWR_EN);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x clearbit failed, e = %d\n",
+ TPS80031_CHARGERUSB_CTRL3, ret);
+ return ret;
+ }
+
+ mdelay(DIV_ROUND_UP(ri->rinfo->desc.enable_time, 1000));
+ if (ri->config_flags & TPS80031_VBUS_DISCHRG_EN_PDN) {
+ ret = tps80031_write(parent, TPS80031_SLAVE_ID2,
+ USB_VBUS_CTRL_CLR, VBUS_DISCHRG);
+ if (ret < 0) {
+ dev_err(ri->dev, "reg 0x%02x write failed, e = %d\n",
+ USB_VBUS_CTRL_CLR, ret);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static struct regulator_ops tps80031_dcdc_ops = {
+ .list_voltage = tps80031_dcdc_list_voltage,
+ .set_voltage_sel = tps80031_dcdc_set_voltage_sel,
+ .get_voltage_sel = tps80031_dcdc_get_voltage_sel,
+ .enable = tps80031_reg_enable,
+ .disable = tps80031_reg_disable,
+ .is_enabled = tps80031_reg_is_enabled,
+};
+
+static struct regulator_ops tps80031_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = tps80031_ldo_set_voltage_sel,
+ .get_voltage_sel = tps80031_ldo_get_voltage_sel,
+ .enable = tps80031_reg_enable,
+ .disable = tps80031_reg_disable,
+ .is_enabled = tps80031_reg_is_enabled,
+};
+
+static struct regulator_ops tps80031_vbus_sw_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = tps80031_vbus_enable,
+ .disable = tps80031_vbus_disable,
+ .is_enabled = tps80031_vbus_is_enabled,
+};
+
+static struct regulator_ops tps80031_vbus_hw_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+};
+
+static struct regulator_ops tps80031_ext_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = tps80031_reg_enable,
+ .disable = tps80031_reg_disable,
+ .is_enabled = tps80031_reg_is_enabled,
+};
+
+/* Non-exiting default definition for some register */
+#define TPS80031_SMPS3_CFG_FORCE 0
+#define TPS80031_SMPS4_CFG_FORCE 0
+
+#define TPS80031_VBUS_CFG_TRANS 0
+#define TPS80031_VBUS_CFG_STATE 0
+
+#define TPS80031_REG_SMPS(_id, _volt_id, _pbit) \
+{ \
+ .trans_reg = TPS80031_##_id##_CFG_TRANS, \
+ .state_reg = TPS80031_##_id##_CFG_STATE, \
+ .force_reg = TPS80031_##_id##_CFG_FORCE, \
+ .volt_reg = TPS80031_##_id##_CFG_VOLTAGE, \
+ .volt_id = TPS80031_SLAVE_##_volt_id, \
+ .preq_bit = _pbit, \
+ .desc = { \
+ .name = "tps80031_"#_id, \
+ .id = TPS80031_REGULATOR_##_id, \
+ .n_voltages = 63, \
+ .ops = &tps80031_dcdc_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .enable_time = 500, \
+ }, \
+}
+
+#define TPS80031_REG_LDO(_id, _preq_bit) \
+{ \
+ .trans_reg = TPS80031_##_id##_CFG_TRANS, \
+ .state_reg = TPS80031_##_id##_CFG_STATE, \
+ .volt_reg = TPS80031_##_id##_CFG_VOLTAGE, \
+ .volt_id = TPS80031_SLAVE_ID1, \
+ .preq_bit = _preq_bit, \
+ .desc = { \
+ .owner = THIS_MODULE, \
+ .name = "tps80031_"#_id, \
+ .id = TPS80031_REGULATOR_##_id, \
+ .ops = &tps80031_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .min_uV = 1000000, \
+ .uV_step = 100000, \
+ .linear_min_sel = 1, \
+ .n_voltages = 25, \
+ .vsel_mask = LDO_VSEL_MASK, \
+ .enable_time = 500, \
+ }, \
+}
+
+#define TPS80031_REG_FIXED(_id, max_mV, _ops, _delay, _pbit) \
+{ \
+ .trans_reg = TPS80031_##_id##_CFG_TRANS, \
+ .state_reg = TPS80031_##_id##_CFG_STATE, \
+ .volt_id = TPS80031_SLAVE_ID1, \
+ .preq_bit = _pbit, \
+ .desc = { \
+ .name = "tps80031_"#_id, \
+ .id = TPS80031_REGULATOR_##_id, \
+ .min_uV = max_mV * 1000, \
+ .n_voltages = 1, \
+ .ops = &_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .enable_time = _delay, \
+ }, \
+}
+
+static struct tps80031_regulator_info tps80031_rinfo[TPS80031_REGULATOR_MAX] = {
+ TPS80031_REG_SMPS(VIO, ID0, 4),
+ TPS80031_REG_SMPS(SMPS1, ID0, 0),
+ TPS80031_REG_SMPS(SMPS2, ID0, 1),
+ TPS80031_REG_SMPS(SMPS3, ID1, 2),
+ TPS80031_REG_SMPS(SMPS4, ID1, 3),
+ TPS80031_REG_LDO(VANA, -1),
+ TPS80031_REG_LDO(LDO1, 8),
+ TPS80031_REG_LDO(LDO2, 9),
+ TPS80031_REG_LDO(LDO3, 10),
+ TPS80031_REG_LDO(LDO4, 11),
+ TPS80031_REG_LDO(LDO5, 12),
+ TPS80031_REG_LDO(LDO6, 13),
+ TPS80031_REG_LDO(LDO7, 14),
+ TPS80031_REG_LDO(LDOLN, 15),
+ TPS80031_REG_LDO(LDOUSB, 5),
+ TPS80031_REG_FIXED(VBUS, 5000, tps80031_vbus_hw_ops, 100000, -1),
+ TPS80031_REG_FIXED(REGEN1, 3300, tps80031_ext_reg_ops, 0, 16),
+ TPS80031_REG_FIXED(REGEN2, 3300, tps80031_ext_reg_ops, 0, 17),
+ TPS80031_REG_FIXED(SYSEN, 3300, tps80031_ext_reg_ops, 0, 18),
+};
+
+static int tps80031_power_req_config(struct device *parent,
+ struct tps80031_regulator *ri,
+ struct tps80031_regulator_platform_data *tps80031_pdata)
+{
+ int ret = 0;
+
+ if (ri->rinfo->preq_bit < 0)
+ goto skip_pwr_req_config;
+
+ ret = tps80031_ext_power_req_config(parent, ri->ext_ctrl_flag,
+ ri->rinfo->preq_bit, ri->rinfo->state_reg,
+ ri->rinfo->trans_reg);
+ if (ret < 0) {
+ dev_err(ri->dev, "ext powerreq config failed, err = %d\n", ret);
+ return ret;
+ }
+
+skip_pwr_req_config:
+ if (tps80031_pdata->ext_ctrl_flag & TPS80031_PWR_ON_ON_SLEEP) {
+ ret = tps80031_update(parent, TPS80031_SLAVE_ID1,
+ ri->rinfo->trans_reg, TPS80031_TRANS_SLEEP_ON,
+ TPS80031_TRANS_SLEEP_MASK);
+ if (ret < 0) {
+ dev_err(ri->dev, "Reg 0x%02x update failed, e %d\n",
+ ri->rinfo->trans_reg, ret);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int tps80031_regulator_config(struct device *parent,
+ struct tps80031_regulator *ri,
+ struct tps80031_regulator_platform_data *tps80031_pdata)
+{
+ int ret = 0;
+
+ switch (ri->rinfo->desc.id) {
+ case TPS80031_REGULATOR_LDOUSB:
+ if (ri->config_flags & (TPS80031_USBLDO_INPUT_VSYS |
+ TPS80031_USBLDO_INPUT_PMID)) {
+ unsigned val = 0;
+ if (ri->config_flags & TPS80031_USBLDO_INPUT_VSYS)
+ val = MISC2_LDOUSB_IN_VSYS;
+ else
+ val = MISC2_LDOUSB_IN_PMID;
+
+ ret = tps80031_update(parent, TPS80031_SLAVE_ID1,
+ TPS80031_MISC2, val,
+ MISC2_LDOUSB_IN_MASK);
+ if (ret < 0) {
+ dev_err(ri->dev,
+ "LDOUSB config failed, e= %d\n", ret);
+ return ret;
+ }
+ }
+ break;
+
+ case TPS80031_REGULATOR_LDO3:
+ if (ri->config_flags & TPS80031_LDO3_OUTPUT_VIB) {
+ ret = tps80031_update(parent, TPS80031_SLAVE_ID1,
+ TPS80031_MISC2, MISC2_LDO3_SEL_VIB_VAL,
+ MISC2_LDO3_SEL_VIB_MASK);
+ if (ret < 0) {
+ dev_err(ri->dev,
+ "LDO3 config failed, e = %d\n", ret);
+ return ret;
+ }
+ }
+ break;
+
+ case TPS80031_REGULATOR_VBUS:
+ /* Provide SW control Ops if VBUS is SW control */
+ if (!(ri->config_flags & TPS80031_VBUS_SW_ONLY))
+ ri->rinfo->desc.ops = &tps80031_vbus_sw_ops;
+ break;
+ default:
+ break;
+ }
+
+ /* Configure Active state to ON, SLEEP to OFF and OFF_state to OFF */
+ ret = tps80031_update(parent, TPS80031_SLAVE_ID1, ri->rinfo->trans_reg,
+ TPS80031_TRANS_ACTIVE_ON | TPS80031_TRANS_SLEEP_OFF |
+ TPS80031_TRANS_OFF_OFF, TPS80031_TRANS_ACTIVE_MASK |
+ TPS80031_TRANS_SLEEP_MASK | TPS80031_TRANS_OFF_MASK);
+ if (ret < 0) {
+ dev_err(ri->dev, "trans reg update failed, e %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int check_smps_mode_mult(struct device *parent,
+ struct tps80031_regulator *ri)
+{
+ int mult_offset;
+ int ret;
+ u8 smps_offset;
+ u8 smps_mult;
+
+ ret = tps80031_read(parent, TPS80031_SLAVE_ID1,
+ TPS80031_SMPS_OFFSET, &smps_offset);
+ if (ret < 0) {
+ dev_err(parent, "Error in reading smps offset register\n");
+ return ret;
+ }
+
+ ret = tps80031_read(parent, TPS80031_SLAVE_ID1,
+ TPS80031_SMPS_MULT, &smps_mult);
+ if (ret < 0) {
+ dev_err(parent, "Error in reading smps mult register\n");
+ return ret;
+ }
+
+ switch (ri->rinfo->desc.id) {
+ case TPS80031_REGULATOR_VIO:
+ mult_offset = SMPS_MULTOFFSET_VIO;
+ break;
+ case TPS80031_REGULATOR_SMPS1:
+ mult_offset = SMPS_MULTOFFSET_SMPS1;
+ break;
+ case TPS80031_REGULATOR_SMPS2:
+ mult_offset = SMPS_MULTOFFSET_SMPS2;
+ break;
+ case TPS80031_REGULATOR_SMPS3:
+ mult_offset = SMPS_MULTOFFSET_SMPS3;
+ break;
+ case TPS80031_REGULATOR_SMPS4:
+ mult_offset = SMPS_MULTOFFSET_SMPS4;
+ break;
+ case TPS80031_REGULATOR_LDO2:
+ ri->device_flags = smps_mult & BIT(5) ? TRACK_MODE_ENABLE : 0;
+ /* TRACK mode the ldo2 varies from 600mV to 1300mV */
+ if (ri->device_flags & TRACK_MODE_ENABLE) {
+ ri->rinfo->desc.min_uV = 600000;
+ ri->rinfo->desc.uV_step = 12500;
+ ri->rinfo->desc.n_voltages = 57;
+ ri->rinfo->desc.vsel_mask = LDO_TRACK_VSEL_MASK;
+ }
+ return 0;
+ default:
+ return 0;
+ }
+
+ ri->device_flags = (smps_offset & mult_offset) ? DCDC_OFFSET_EN : 0;
+ ri->device_flags |= (smps_mult & mult_offset) ? DCDC_EXTENDED_EN : 0;
+ switch (ri->device_flags) {
+ case 0:
+ ri->rinfo->desc.min_uV = 607700;
+ ri->rinfo->desc.uV_step = 12660;
+ break;
+ case DCDC_OFFSET_EN:
+ ri->rinfo->desc.min_uV = 700000;
+ ri->rinfo->desc.uV_step = 12500;
+ break;
+ case DCDC_EXTENDED_EN:
+ ri->rinfo->desc.min_uV = 1852000;
+ ri->rinfo->desc.uV_step = 38600;
+ break;
+ case DCDC_OFFSET_EN | DCDC_EXTENDED_EN:
+ ri->rinfo->desc.min_uV = 2161000;
+ ri->rinfo->desc.uV_step = 38600;
+ break;
+ }
+ return 0;
+}
+
+static int tps80031_regulator_probe(struct platform_device *pdev)
+{
+ struct tps80031_platform_data *pdata;
+ struct tps80031_regulator_platform_data *tps_pdata;
+ struct tps80031_regulator *ri;
+ struct tps80031_regulator *pmic;
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ int ret;
+ int num;
+
+ pdata = dev_get_platdata(pdev->dev.parent);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ pmic = devm_kzalloc(&pdev->dev,
+ TPS80031_REGULATOR_MAX * sizeof(*pmic), GFP_KERNEL);
+ if (!pmic) {
+ dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+ return -ENOMEM;
+ }
+
+ for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
+ tps_pdata = pdata->regulator_pdata[num];
+ ri = &pmic[num];
+ ri->rinfo = &tps80031_rinfo[num];
+ ri->dev = &pdev->dev;
+
+ check_smps_mode_mult(pdev->dev.parent, ri);
+ config.dev = &pdev->dev;
+ config.init_data = NULL;
+ config.driver_data = ri;
+ if (tps_pdata) {
+ config.init_data = tps_pdata->reg_init_data;
+ ri->config_flags = tps_pdata->config_flags;
+ ri->ext_ctrl_flag = tps_pdata->ext_ctrl_flag;
+ ret = tps80031_regulator_config(pdev->dev.parent,
+ ri, tps_pdata);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "regulator config failed, e %d\n", ret);
+ goto fail;
+ }
+
+ ret = tps80031_power_req_config(pdev->dev.parent,
+ ri, tps_pdata);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "pwr_req config failed, err %d\n", ret);
+ goto fail;
+ }
+ }
+ rdev = regulator_register(&ri->rinfo->desc, &config);
+ if (IS_ERR_OR_NULL(rdev)) {
+ dev_err(&pdev->dev,
+ "register regulator failed %s\n",
+ ri->rinfo->desc.name);
+ ret = PTR_ERR(rdev);
+ goto fail;
+ }
+ ri->rdev = rdev;
+ }
+
+ platform_set_drvdata(pdev, pmic);
+ return 0;
+fail:
+ while (--num >= 0) {
+ ri = &pmic[num];
+ regulator_unregister(ri->rdev);
+ }
+ return ret;
+}
+
+static int tps80031_regulator_remove(struct platform_device *pdev)
+{
+ struct tps80031_regulator *pmic = platform_get_drvdata(pdev);
+ struct tps80031_regulator *ri = NULL;
+ int num;
+
+ for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
+ ri = &pmic[num];
+ regulator_unregister(ri->rdev);
+ }
+ return 0;
+}
+
+static struct platform_driver tps80031_regulator_driver = {
+ .driver = {
+ .name = "tps80031-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_regulator_probe,
+ .remove = tps80031_regulator_remove,
+};
+
+static int __init tps80031_regulator_init(void)
+{
+ return platform_driver_register(&tps80031_regulator_driver);
+}
+subsys_initcall(tps80031_regulator_init);
+
+static void __exit tps80031_regulator_exit(void)
+{
+ platform_driver_unregister(&tps80031_regulator_driver);
+}
+module_exit(tps80031_regulator_exit);
+
+MODULE_ALIAS("platform:tps80031-regulator");
+MODULE_DESCRIPTION("Regulator Driver for TI TPS80031/TPS80032 PMIC");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7eb986a4074..493c8c6a241 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1116,7 +1116,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = {
};
MODULE_DEVICE_TABLE(of, twl_of_match);
-static int __devinit twlreg_probe(struct platform_device *pdev)
+static int twlreg_probe(struct platform_device *pdev)
{
int i, id;
struct twlreg_info *info;
@@ -1241,7 +1241,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit twlreg_remove(struct platform_device *pdev)
+static int twlreg_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct twlreg_info *info = rdev->reg_data;
@@ -1255,7 +1255,7 @@ MODULE_ALIAS("platform:twl_reg");
static struct platform_driver twlreg_driver = {
.probe = twlreg_probe,
- .remove = __devexit_p(twlreg_remove),
+ .remove = twlreg_remove,
/* NOTE: short name, to work around driver model truncation of
* "twl_regulator.12" (and friends) to "twl_regulator.1".
*/
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
new file mode 100644
index 00000000000..4668c7f8133
--- /dev/null
+++ b/drivers/regulator/vexpress.c
@@ -0,0 +1,147 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#define DRVNAME "vexpress-regulator"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/vexpress.h>
+
+struct vexpress_regulator {
+ struct regulator_desc desc;
+ struct regulator_dev *regdev;
+ struct vexpress_config_func *func;
+};
+
+static int vexpress_regulator_get_voltage(struct regulator_dev *regdev)
+{
+ struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
+ u32 uV;
+ int err = vexpress_config_read(reg->func, 0, &uV);
+
+ return err ? err : uV;
+}
+
+static int vexpress_regulator_set_voltage(struct regulator_dev *regdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct vexpress_regulator *reg = rdev_get_drvdata(regdev);
+
+ return vexpress_config_write(reg->func, 0, min_uV);
+}
+
+static struct regulator_ops vexpress_regulator_ops_ro = {
+ .get_voltage = vexpress_regulator_get_voltage,
+};
+
+static struct regulator_ops vexpress_regulator_ops = {
+ .get_voltage = vexpress_regulator_get_voltage,
+ .set_voltage = vexpress_regulator_set_voltage,
+};
+
+static int vexpress_regulator_probe(struct platform_device *pdev)
+{
+ int err;
+ struct vexpress_regulator *reg;
+ struct regulator_init_data *init_data;
+ struct regulator_config config = { };
+
+ reg = devm_kzalloc(&pdev->dev, sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ err = -ENOMEM;
+ goto error_kzalloc;
+ }
+
+ reg->func = vexpress_config_func_get_by_dev(&pdev->dev);
+ if (!reg->func) {
+ err = -ENXIO;
+ goto error_get_func;
+ }
+
+ reg->desc.name = dev_name(&pdev->dev);
+ reg->desc.type = REGULATOR_VOLTAGE;
+ reg->desc.owner = THIS_MODULE;
+ reg->desc.continuous_voltage_range = true;
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ if (!init_data) {
+ err = -EINVAL;
+ goto error_get_regulator_init_data;
+ }
+
+ init_data->constraints.apply_uV = 0;
+ if (init_data->constraints.min_uV && init_data->constraints.max_uV)
+ reg->desc.ops = &vexpress_regulator_ops;
+ else
+ reg->desc.ops = &vexpress_regulator_ops_ro;
+
+ config.dev = &pdev->dev;
+ config.init_data = init_data;
+ config.driver_data = reg;
+ config.of_node = pdev->dev.of_node;
+
+ reg->regdev = regulator_register(&reg->desc, &config);
+ if (IS_ERR(reg->regdev)) {
+ err = PTR_ERR(reg->regdev);
+ goto error_regulator_register;
+ }
+
+ platform_set_drvdata(pdev, reg);
+
+ return 0;
+
+error_regulator_register:
+error_get_regulator_init_data:
+ vexpress_config_func_put(reg->func);
+error_get_func:
+error_kzalloc:
+ return err;
+}
+
+static int vexpress_regulator_remove(struct platform_device *pdev)
+{
+ struct vexpress_regulator *reg = platform_get_drvdata(pdev);
+
+ vexpress_config_func_put(reg->func);
+ regulator_unregister(reg->regdev);
+
+ return 0;
+}
+
+static struct of_device_id vexpress_regulator_of_match[] = {
+ { .compatible = "arm,vexpress-volt", },
+ { }
+};
+
+static struct platform_driver vexpress_regulator_driver = {
+ .probe = vexpress_regulator_probe,
+ .remove = vexpress_regulator_remove,
+ .driver = {
+ .name = DRVNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vexpress_regulator_of_match,
+ },
+};
+
+module_platform_driver(vexpress_regulator_driver);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_DESCRIPTION("Versatile Express regulator");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:vexpress-regulator");
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index c038e742253..01c66e9712a 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -285,7 +285,7 @@ static const struct attribute_group regulator_virtual_attr_group = {
.attrs = regulator_virtual_attributes,
};
-static int __devinit regulator_virtual_probe(struct platform_device *pdev)
+static int regulator_virtual_probe(struct platform_device *pdev)
{
char *reg_id = pdev->dev.platform_data;
struct virtual_consumer_data *drvdata;
@@ -321,7 +321,7 @@ static int __devinit regulator_virtual_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit regulator_virtual_remove(struct platform_device *pdev)
+static int regulator_virtual_remove(struct platform_device *pdev)
{
struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
@@ -337,7 +337,7 @@ static int __devexit regulator_virtual_remove(struct platform_device *pdev)
static struct platform_driver regulator_virtual_consumer_driver = {
.probe = regulator_virtual_probe,
- .remove = __devexit_p(regulator_virtual_remove),
+ .remove = regulator_virtual_remove,
.driver = {
.name = "reg-virt-consumer",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 782c228a19b..0af6898bcd7 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -223,7 +223,7 @@ static int wm831x_buckv_map_voltage(struct regulator_dev *rdev,
if (min_uV < 600000)
vsel = 0;
else if (min_uV <= 1800000)
- vsel = ((min_uV - 600000) / 12500) + 8;
+ vsel = DIV_ROUND_UP(min_uV - 600000, 12500) + 8;
else
return -EINVAL;
@@ -290,7 +290,7 @@ static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
if (vsel > dcdc->dvs_vsel) {
ret = wm831x_set_bits(wm831x, dvs_reg,
WM831X_DC1_DVS_VSEL_MASK,
- dcdc->dvs_vsel);
+ vsel);
if (ret == 0)
dcdc->dvs_vsel = vsel;
else
@@ -387,7 +387,7 @@ static struct regulator_ops wm831x_buckv_ops = {
* Set up DVS control. We just log errors since we can still run
* (with reduced performance) if we fail.
*/
-static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
+static void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
struct wm831x_buckv_pdata *pdata)
{
struct wm831x *wm831x = dcdc->wm831x;
@@ -448,7 +448,7 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
}
}
-static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
+static int wm831x_buckv_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -562,7 +562,7 @@ err:
return ret;
}
-static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
+static int wm831x_buckv_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
struct wm831x *wm831x = dcdc->wm831x;
@@ -582,7 +582,7 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
static struct platform_driver wm831x_buckv_driver = {
.probe = wm831x_buckv_probe,
- .remove = __devexit_p(wm831x_buckv_remove),
+ .remove = wm831x_buckv_remove,
.driver = {
.name = "wm831x-buckv",
.owner = THIS_MODULE,
@@ -623,7 +623,7 @@ static struct regulator_ops wm831x_buckp_ops = {
.set_suspend_mode = wm831x_dcdc_set_suspend_mode,
};
-static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
+static int wm831x_buckp_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -710,7 +710,7 @@ err:
return ret;
}
-static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
+static int wm831x_buckp_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
@@ -725,7 +725,7 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
static struct platform_driver wm831x_buckp_driver = {
.probe = wm831x_buckp_probe,
- .remove = __devexit_p(wm831x_buckp_remove),
+ .remove = wm831x_buckp_remove,
.driver = {
.name = "wm831x-buckp",
.owner = THIS_MODULE,
@@ -771,7 +771,7 @@ static struct regulator_ops wm831x_boostp_ops = {
.disable = regulator_disable_regmap,
};
-static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
+static int wm831x_boostp_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -845,7 +845,7 @@ err:
return ret;
}
-static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
+static int wm831x_boostp_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
@@ -860,7 +860,7 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
static struct platform_driver wm831x_boostp_driver = {
.probe = wm831x_boostp_probe,
- .remove = __devexit_p(wm831x_boostp_remove),
+ .remove = wm831x_boostp_remove,
.driver = {
.name = "wm831x-boostp",
.owner = THIS_MODULE,
@@ -883,7 +883,7 @@ static struct regulator_ops wm831x_epe_ops = {
.get_status = wm831x_dcdc_get_status,
};
-static __devinit int wm831x_epe_probe(struct platform_device *pdev)
+static int wm831x_epe_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -936,7 +936,7 @@ err:
return ret;
}
-static __devexit int wm831x_epe_remove(struct platform_device *pdev)
+static int wm831x_epe_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
@@ -948,7 +948,7 @@ static __devexit int wm831x_epe_remove(struct platform_device *pdev)
static struct platform_driver wm831x_epe_driver = {
.probe = wm831x_epe_probe,
- .remove = __devexit_p(wm831x_epe_remove),
+ .remove = wm831x_epe_remove,
.driver = {
.name = "wm831x-epe",
.owner = THIS_MODULE,
@@ -993,4 +993,5 @@ MODULE_DESCRIPTION("WM831x DC-DC convertor driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-buckv");
MODULE_ALIAS("platform:wm831x-buckp");
+MODULE_ALIAS("platform:wm831x-boostp");
MODULE_ALIAS("platform:wm831x-epe");
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 2646a1902b3..68586ee3e1c 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -148,7 +148,7 @@ static irqreturn_t wm831x_isink_irq(int irq, void *data)
}
-static __devinit int wm831x_isink_probe(struct platform_device *pdev)
+static int wm831x_isink_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -221,7 +221,7 @@ err:
return ret;
}
-static __devexit int wm831x_isink_remove(struct platform_device *pdev)
+static int wm831x_isink_remove(struct platform_device *pdev)
{
struct wm831x_isink *isink = platform_get_drvdata(pdev);
@@ -236,7 +236,7 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
static struct platform_driver wm831x_isink_driver = {
.probe = wm831x_isink_probe,
- .remove = __devexit_p(wm831x_isink_remove),
+ .remove = wm831x_isink_remove,
.driver = {
.name = "wm831x-isink",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index c2dc03993dc..1ec379a9a95 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -247,7 +247,7 @@ static struct regulator_ops wm831x_gp_ldo_ops = {
.disable = regulator_disable_regmap,
};
-static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
+static int wm831x_gp_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -334,7 +334,7 @@ err:
return ret;
}
-static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
+static int wm831x_gp_ldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
@@ -349,7 +349,7 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
static struct platform_driver wm831x_gp_ldo_driver = {
.probe = wm831x_gp_ldo_probe,
- .remove = __devexit_p(wm831x_gp_ldo_remove),
+ .remove = wm831x_gp_ldo_remove,
.driver = {
.name = "wm831x-ldo",
.owner = THIS_MODULE,
@@ -504,7 +504,7 @@ static struct regulator_ops wm831x_aldo_ops = {
.disable = regulator_disable_regmap,
};
-static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
+static int wm831x_aldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -590,7 +590,7 @@ err:
return ret;
}
-static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
+static int wm831x_aldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
@@ -603,7 +603,7 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
static struct platform_driver wm831x_aldo_driver = {
.probe = wm831x_aldo_probe,
- .remove = __devexit_p(wm831x_aldo_remove),
+ .remove = wm831x_aldo_remove,
.driver = {
.name = "wm831x-aldo",
.owner = THIS_MODULE,
@@ -660,7 +660,7 @@ static struct regulator_ops wm831x_alive_ldo_ops = {
.disable = regulator_disable_regmap,
};
-static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
+static int wm831x_alive_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -737,7 +737,7 @@ err:
return ret;
}
-static __devexit int wm831x_alive_ldo_remove(struct platform_device *pdev)
+static int wm831x_alive_ldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
@@ -748,7 +748,7 @@ static __devexit int wm831x_alive_ldo_remove(struct platform_device *pdev)
static struct platform_driver wm831x_alive_ldo_driver = {
.probe = wm831x_alive_ldo_probe,
- .remove = __devexit_p(wm831x_alive_ldo_remove),
+ .remove = wm831x_alive_ldo_remove,
.driver = {
.name = "wm831x-alive-ldo",
.owner = THIS_MODULE,
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 27c746ef063..c6a32ea80b9 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -226,7 +226,7 @@ static struct regulator_desc regulators[] = {
},
};
-static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
+static int wm8400_regulator_probe(struct platform_device *pdev)
{
struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]);
struct regulator_config config = { };
@@ -246,7 +246,7 @@ static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit wm8400_regulator_remove(struct platform_device *pdev)
+static int wm8400_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
@@ -261,7 +261,7 @@ static struct platform_driver wm8400_regulator_driver = {
.name = "wm8400-regulator",
},
.probe = wm8400_regulator_probe,
- .remove = __devexit_p(wm8400_regulator_remove),
+ .remove = wm8400_regulator_remove,
};
/**
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 86bb48db149..6ff87234264 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -99,7 +99,7 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
},
};
-static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
+static int wm8994_ldo_probe(struct platform_device *pdev)
{
struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
struct wm8994_pdata *pdata = wm8994->dev->platform_data;
@@ -142,7 +142,7 @@ err:
return ret;
}
-static __devexit int wm8994_ldo_remove(struct platform_device *pdev)
+static int wm8994_ldo_remove(struct platform_device *pdev)
{
struct wm8994_ldo *ldo = platform_get_drvdata(pdev);
@@ -155,7 +155,7 @@ static __devexit int wm8994_ldo_remove(struct platform_device *pdev)
static struct platform_driver wm8994_ldo_driver = {
.probe = wm8994_ldo_probe,
- .remove = __devexit_p(wm8994_ldo_remove),
+ .remove = wm8994_ldo_remove,
.driver = {
.name = "wm8994-ldo",
.owner = THIS_MODULE,
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index e1f89d64973..0d36f94ab51 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -66,13 +66,13 @@ rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
return -EINVAL;
}
- /* We assume the firmware has the same endianess as the host */
+ /* We assume the firmware has the same endianness as the host */
# ifdef __LITTLE_ENDIAN
if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
# else /* BIG ENDIAN */
if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
# endif
- dev_err(dev, "Unsupported firmware endianess\n");
+ dev_err(dev, "Unsupported firmware endianness\n");
return -EINVAL;
}
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 1859f71372e..027096fe6a1 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -764,7 +764,7 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
/* add message to the remote processor's virtqueue */
err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
- if (err < 0) {
+ if (err) {
/*
* need to reclaim the buffer here, otherwise it's lost
* (memory won't leak, but rpmsg won't use it again for TX).
@@ -776,8 +776,6 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
/* tell the remote processor it has a pending message to read */
virtqueue_kick(vrp->svq);
-
- err = 0;
out:
mutex_unlock(&vrp->tx_lock);
return err;
@@ -980,7 +978,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
GFP_KERNEL);
- WARN_ON(err < 0); /* sanity check; this can't really happen */
+ WARN_ON(err); /* sanity check; this can't really happen */
}
/* suppress "tx-complete" interrupts */
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 19c03ab2bdc..d0cea02b5df 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -269,6 +269,15 @@ config RTC_DRV_X1205
This driver can also be built as a module. If so, the module
will be called rtc-x1205.
+config RTC_DRV_PCF8523
+ tristate "NXP PCF8523"
+ help
+ If you say yes here you get support for the NXP PCF8523 RTC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf8523.
+
config RTC_DRV_PCF8563
tristate "Philips PCF8563/Epson RTC8564"
help
@@ -600,6 +609,16 @@ config RTC_DRV_DA9052
Say y here to support the RTC driver for Dialog Semiconductor
DA9052-BC and DA9053-AA/Bx PMICs.
+config RTC_DRV_DA9055
+ tristate "Dialog Semiconductor DA9055 RTC"
+ depends on MFD_DA9055
+ help
+ If you say yes here you will get support for the
+ RTC of the Dialog DA9055 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-da9055
+
config RTC_DRV_EFI
tristate "EFI RTC"
depends on IA64
@@ -768,7 +787,7 @@ config RTC_DRV_DAVINCI
config RTC_DRV_IMXDI
tristate "Freescale IMX DryIce Real Time Clock"
- depends on SOC_IMX25
+ depends on ARCH_MXC
help
Support for Freescale IMX DryIce RTC
@@ -777,11 +796,13 @@ config RTC_DRV_IMXDI
config RTC_DRV_OMAP
tristate "TI OMAP1"
- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
+ depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX || SOC_AM33XX
help
- Say "yes" here to support the real time clock on TI OMAP1 and
- DA8xx/OMAP-L13x chips. This driver can also be built as a
- module called rtc-omap.
+ Say "yes" here to support the on chip real time clock
+ present on TI OMAP1, AM33xx and DA8xx/OMAP-L13x.
+
+ This driver can also be built as a module, if so, module
+ will be called rtc-omap.
config HAVE_S3C_RTC
bool
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 56297f0fd38..c3f62c80dc0 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o
+obj-$(CONFIG_RTC_DRV_DA9055) += rtc-da9055.o
obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
+obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index f8a0aab218c..5143629dedb 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -244,7 +244,6 @@ void rtc_device_unregister(struct rtc_device *rtc)
rtc_proc_del_device(rtc);
device_unregister(&rtc->dev);
rtc->ops = NULL;
- ida_simple_remove(&rtc_ida, rtc->id);
mutex_unlock(&rtc->ops_lock);
put_device(&rtc->dev);
}
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
new file mode 100644
index 00000000000..96bafc5c3bf
--- /dev/null
+++ b/drivers/rtc/rtc-da9055.c
@@ -0,0 +1,413 @@
+/*
+ * Real time clock driver for DA9055
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Dajun Dajun Chen <dajun.chen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+#include <linux/mfd/da9055/pdata.h>
+
+struct da9055_rtc {
+ struct rtc_device *rtc;
+ struct da9055 *da9055;
+ int alarm_enable;
+};
+
+static int da9055_rtc_enable_alarm(struct da9055_rtc *rtc, bool enable)
+{
+ int ret;
+ if (enable) {
+ ret = da9055_reg_update(rtc->da9055, DA9055_REG_ALARM_Y,
+ DA9055_RTC_ALM_EN,
+ DA9055_RTC_ALM_EN);
+ if (ret != 0)
+ dev_err(rtc->da9055->dev, "Failed to enable ALM: %d\n",
+ ret);
+ rtc->alarm_enable = 1;
+ } else {
+ ret = da9055_reg_update(rtc->da9055, DA9055_REG_ALARM_Y,
+ DA9055_RTC_ALM_EN, 0);
+ if (ret != 0)
+ dev_err(rtc->da9055->dev,
+ "Failed to disable ALM: %d\n", ret);
+ rtc->alarm_enable = 0;
+ }
+ return ret;
+}
+
+static irqreturn_t da9055_rtc_alm_irq(int irq, void *data)
+{
+ struct da9055_rtc *rtc = data;
+
+ da9055_rtc_enable_alarm(rtc, 0);
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static int da9055_read_alarm(struct da9055 *da9055, struct rtc_time *rtc_tm)
+{
+ int ret;
+ uint8_t v[5];
+
+ ret = da9055_group_read(da9055, DA9055_REG_ALARM_MI, 5, v);
+ if (ret != 0) {
+ dev_err(da9055->dev, "Failed to group read ALM: %d\n", ret);
+ return ret;
+ }
+
+ rtc_tm->tm_year = (v[4] & DA9055_RTC_ALM_YEAR) + 100;
+ rtc_tm->tm_mon = (v[3] & DA9055_RTC_ALM_MONTH) - 1;
+ rtc_tm->tm_mday = v[2] & DA9055_RTC_ALM_DAY;
+ rtc_tm->tm_hour = v[1] & DA9055_RTC_ALM_HOUR;
+ rtc_tm->tm_min = v[0] & DA9055_RTC_ALM_MIN;
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int da9055_set_alarm(struct da9055 *da9055, struct rtc_time *rtc_tm)
+{
+ int ret;
+ uint8_t v[2];
+
+ rtc_tm->tm_year -= 100;
+ rtc_tm->tm_mon += 1;
+
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_MI,
+ DA9055_RTC_ALM_MIN, rtc_tm->tm_min);
+ if (ret != 0) {
+ dev_err(da9055->dev, "Failed to write ALRM MIN: %d\n", ret);
+ return ret;
+ }
+
+ v[0] = rtc_tm->tm_hour;
+ v[1] = rtc_tm->tm_mday;
+
+ ret = da9055_group_write(da9055, DA9055_REG_ALARM_H, 2, v);
+ if (ret < 0)
+ return ret;
+
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_MO,
+ DA9055_RTC_ALM_MONTH, rtc_tm->tm_mon);
+ if (ret < 0)
+ dev_err(da9055->dev, "Failed to write ALM Month:%d\n", ret);
+
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_Y,
+ DA9055_RTC_ALM_YEAR, rtc_tm->tm_year);
+ if (ret < 0)
+ dev_err(da9055->dev, "Failed to write ALM Year:%d\n", ret);
+
+ return ret;
+}
+
+static int da9055_rtc_get_alarm_status(struct da9055 *da9055)
+{
+ int ret;
+
+ ret = da9055_reg_read(da9055, DA9055_REG_ALARM_Y);
+ if (ret < 0) {
+ dev_err(da9055->dev, "Failed to read ALM: %d\n", ret);
+ return ret;
+ }
+ ret &= DA9055_RTC_ALM_EN;
+ return (ret > 0) ? 1 : 0;
+}
+
+static int da9055_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+ uint8_t v[6];
+ int ret;
+
+ ret = da9055_reg_read(rtc->da9055, DA9055_REG_COUNT_S);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Registers are only valid when RTC_READ
+ * status bit is asserted
+ */
+ if (!(ret & DA9055_RTC_READ))
+ return -EBUSY;
+
+ ret = da9055_group_read(rtc->da9055, DA9055_REG_COUNT_S, 6, v);
+ if (ret < 0) {
+ dev_err(rtc->da9055->dev, "Failed to read RTC time : %d\n",
+ ret);
+ return ret;
+ }
+
+ rtc_tm->tm_year = (v[5] & DA9055_RTC_YEAR) + 100;
+ rtc_tm->tm_mon = (v[4] & DA9055_RTC_MONTH) - 1;
+ rtc_tm->tm_mday = v[3] & DA9055_RTC_DAY;
+ rtc_tm->tm_hour = v[2] & DA9055_RTC_HOUR;
+ rtc_tm->tm_min = v[1] & DA9055_RTC_MIN;
+ rtc_tm->tm_sec = v[0] & DA9055_RTC_SEC;
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int da9055_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct da9055_rtc *rtc;
+ uint8_t v[6];
+
+ rtc = dev_get_drvdata(dev);
+
+ v[0] = tm->tm_sec;
+ v[1] = tm->tm_min;
+ v[2] = tm->tm_hour;
+ v[3] = tm->tm_mday;
+ v[4] = tm->tm_mon + 1;
+ v[5] = tm->tm_year - 100;
+
+ return da9055_group_write(rtc->da9055, DA9055_REG_COUNT_S, 6, v);
+}
+
+static int da9055_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ int ret;
+ struct rtc_time *tm = &alrm->time;
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+
+ ret = da9055_read_alarm(rtc->da9055, tm);
+
+ if (ret)
+ return ret;
+
+ alrm->enabled = da9055_rtc_get_alarm_status(rtc->da9055);
+
+ return 0;
+}
+
+static int da9055_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ int ret;
+ struct rtc_time *tm = &alrm->time;
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+
+ ret = da9055_rtc_enable_alarm(rtc, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = da9055_set_alarm(rtc->da9055, tm);
+ if (ret)
+ return ret;
+
+ ret = da9055_rtc_enable_alarm(rtc, 1);
+
+ return ret;
+}
+
+static int da9055_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct da9055_rtc *rtc = dev_get_drvdata(dev);
+
+ return da9055_rtc_enable_alarm(rtc, enabled);
+}
+
+static const struct rtc_class_ops da9055_rtc_ops = {
+ .read_time = da9055_rtc_read_time,
+ .set_time = da9055_rtc_set_time,
+ .read_alarm = da9055_rtc_read_alarm,
+ .set_alarm = da9055_rtc_set_alarm,
+ .alarm_irq_enable = da9055_rtc_alarm_irq_enable,
+};
+
+static int __init da9055_rtc_device_init(struct da9055 *da9055,
+ struct da9055_pdata *pdata)
+{
+ int ret;
+
+ /* Enable RTC and the internal Crystal */
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_RTC_EN, DA9055_RTC_EN);
+ if (ret < 0)
+ return ret;
+ ret = da9055_reg_update(da9055, DA9055_REG_EN_32K,
+ DA9055_CRYSTAL_EN, DA9055_CRYSTAL_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Enable RTC in Power Down mode */
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_RTC_MODE_PD, DA9055_RTC_MODE_PD);
+ if (ret < 0)
+ return ret;
+
+ /* Enable RTC in Reset mode */
+ if (pdata && pdata->reset_enable) {
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_RTC_MODE_SD,
+ DA9055_RTC_MODE_SD <<
+ DA9055_RTC_MODE_SD_SHIFT);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Disable the RTC TICK ALM */
+ ret = da9055_reg_update(da9055, DA9055_REG_ALARM_MO,
+ DA9055_RTC_TICK_WAKE_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int da9055_rtc_probe(struct platform_device *pdev)
+{
+ struct da9055_rtc *rtc;
+ struct da9055_pdata *pdata = NULL;
+ int ret, alm_irq;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(struct da9055_rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->da9055 = dev_get_drvdata(pdev->dev.parent);
+ pdata = rtc->da9055->dev->platform_data;
+ platform_set_drvdata(pdev, rtc);
+
+ ret = da9055_rtc_device_init(rtc->da9055, pdata);
+ if (ret < 0)
+ goto err_rtc;
+
+ ret = da9055_reg_read(rtc->da9055, DA9055_REG_ALARM_Y);
+ if (ret < 0)
+ goto err_rtc;
+
+ if (ret & DA9055_RTC_ALM_EN)
+ rtc->alarm_enable = 1;
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &da9055_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ ret = PTR_ERR(rtc->rtc);
+ goto err_rtc;
+ }
+
+ alm_irq = platform_get_irq_byname(pdev, "ALM");
+ alm_irq = regmap_irq_get_virq(rtc->da9055->irq_data, alm_irq);
+ ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL,
+ da9055_rtc_alm_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ALM", rtc);
+ if (ret != 0)
+ dev_err(rtc->da9055->dev, "irq registration failed: %d\n", ret);
+
+err_rtc:
+ return ret;
+
+}
+
+static int da9055_rtc_remove(struct platform_device *pdev)
+{
+ struct da9055_rtc *rtc = pdev->dev.platform_data;
+
+ rtc_device_unregister(rtc->rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/* Turn off the alarm if it should not be a wake source. */
+static int da9055_rtc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct da9055_rtc *rtc = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (!device_may_wakeup(&pdev->dev)) {
+ /* Disable the ALM IRQ */
+ ret = da9055_rtc_enable_alarm(rtc, 0);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to disable RTC ALM\n");
+ }
+
+ return 0;
+}
+
+/* Enable the alarm if it should be enabled (in case it was disabled to
+ * prevent use as a wake source).
+ */
+static int da9055_rtc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct da9055_rtc *rtc = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (!device_may_wakeup(&pdev->dev)) {
+ if (rtc->alarm_enable) {
+ ret = da9055_rtc_enable_alarm(rtc, 1);
+ if (ret < 0)
+ dev_err(&pdev->dev,
+ "Failed to restart RTC ALM\n");
+ }
+ }
+
+ return 0;
+}
+
+/* Unconditionally disable the alarm */
+static int da9055_rtc_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct da9055_rtc *rtc = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ ret = da9055_rtc_enable_alarm(rtc, 0);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to freeze RTC ALMs\n");
+
+ return 0;
+
+}
+#else
+#define da9055_rtc_suspend NULL
+#define da9055_rtc_resume NULL
+#define da9055_rtc_freeze NULL
+#endif
+
+static const struct dev_pm_ops da9055_rtc_pm_ops = {
+ .suspend = da9055_rtc_suspend,
+ .resume = da9055_rtc_resume,
+
+ .freeze = da9055_rtc_freeze,
+ .thaw = da9055_rtc_resume,
+ .restore = da9055_rtc_resume,
+
+ .poweroff = da9055_rtc_suspend,
+};
+
+static struct platform_driver da9055_rtc_driver = {
+ .probe = da9055_rtc_probe,
+ .remove = da9055_rtc_remove,
+ .driver = {
+ .name = "da9055-rtc",
+ .owner = THIS_MODULE,
+ .pm = &da9055_rtc_pm_ops,
+ },
+};
+
+module_platform_driver(da9055_rtc_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("RTC driver for Dialog DA9055 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-rtc");
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 14c2109dbaa..07cd03eae60 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -485,7 +485,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
struct resource *res, *mem;
int ret = 0;
- davinci_rtc = kzalloc(sizeof(struct davinci_rtc), GFP_KERNEL);
+ davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL);
if (!davinci_rtc) {
dev_dbg(dev, "could not allocate memory for private data\n");
return -ENOMEM;
@@ -494,15 +494,13 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
davinci_rtc->irq = platform_get_irq(pdev, 0);
if (davinci_rtc->irq < 0) {
dev_err(dev, "no RTC irq\n");
- ret = davinci_rtc->irq;
- goto fail1;
+ return davinci_rtc->irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "no mem resource\n");
- ret = -EINVAL;
- goto fail1;
+ return -EINVAL;
}
davinci_rtc->pbase = res->start;
@@ -513,8 +511,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
if (!mem) {
dev_err(dev, "RTC registers at %08x are not free\n",
davinci_rtc->pbase);
- ret = -EBUSY;
- goto fail1;
+ return -EBUSY;
}
davinci_rtc->base = ioremap(davinci_rtc->pbase, davinci_rtc->base_size);
@@ -529,8 +526,9 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&davinci_rtc_ops, THIS_MODULE);
if (IS_ERR(davinci_rtc->rtc)) {
- dev_err(dev, "unable to register RTC device, err %ld\n",
- PTR_ERR(davinci_rtc->rtc));
+ ret = PTR_ERR(davinci_rtc->rtc);
+ dev_err(dev, "unable to register RTC device, err %d\n",
+ ret);
goto fail3;
}
@@ -566,9 +564,6 @@ fail3:
iounmap(davinci_rtc->base);
fail2:
release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
-fail1:
- kfree(davinci_rtc);
-
return ret;
}
@@ -589,8 +584,6 @@ static int __devexit davinci_rtc_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- kfree(davinci_rtc);
-
return 0;
}
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index cace6d3aed9..9a86b4bd869 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -379,25 +379,6 @@ static long rtc_dev_ioctl(struct file *file,
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
break;
-#if 0
- case RTC_EPOCH_SET:
-#ifndef rtc_epoch
- /*
- * There were no RTC clocks before 1900.
- */
- if (arg < 1900) {
- err = -EINVAL;
- break;
- }
- rtc_epoch = arg;
- err = 0;
-#endif
- break;
-
- case RTC_EPOCH_READ:
- err = put_user(rtc_epoch, (unsigned long __user *)uarg);
- break;
-#endif
case RTC_WKALM_SET:
mutex_unlock(&rtc->ops_lock);
if (copy_from_user(&alarm, uarg, sizeof(alarm)))
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 4eed51044c5..8da7a5cf83c 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -36,7 +36,9 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
/* DryIce Register Definitions */
@@ -495,10 +497,20 @@ static int __devexit dryice_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dryice_dt_ids[] = {
+ { .compatible = "fsl,imx25-rtc" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, dryice_dt_ids);
+#endif
+
static struct platform_driver dryice_rtc_driver = {
.driver = {
.name = "imxdi_rtc",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dryice_dt_ids),
},
.remove = __devexit_p(dryice_rtc_remove),
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 26c81f23360..afb7cfa85cc 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -118,7 +118,7 @@ isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[],
return ret;
}
-/* simple check to see wether we have a isl1208 */
+/* simple check to see whether we have a isl1208 */
static int
isl1208_i2c_validate_client(struct i2c_client *client)
{
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 0b614e32653..600971407aa 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -20,6 +20,9 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <asm/io.h>
@@ -38,6 +41,8 @@
* the SoC). See the BOARD-SPECIFIC CUSTOMIZATION comment.
*/
+#define DRIVER_NAME "omap_rtc"
+
#define OMAP_RTC_BASE 0xfffb4800
/* RTC registers */
@@ -64,6 +69,9 @@
#define OMAP_RTC_COMP_MSB_REG 0x50
#define OMAP_RTC_OSC_REG 0x54
+#define OMAP_RTC_KICK0_REG 0x6c
+#define OMAP_RTC_KICK1_REG 0x70
+
/* OMAP_RTC_CTRL_REG bit fields: */
#define OMAP_RTC_CTRL_SPLIT (1<<7)
#define OMAP_RTC_CTRL_DISABLE (1<<6)
@@ -88,10 +96,18 @@
#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
+/* OMAP_RTC_KICKER values */
+#define KICK0_VALUE 0x83e70b13
+#define KICK1_VALUE 0x95a4f1e0
+
+#define OMAP_RTC_HAS_KICKER 0x1
+
static void __iomem *rtc_base;
-#define rtc_read(addr) __raw_readb(rtc_base + (addr))
-#define rtc_write(val, addr) __raw_writeb(val, rtc_base + (addr))
+#define rtc_read(addr) readb(rtc_base + (addr))
+#define rtc_write(val, addr) writeb(val, rtc_base + (addr))
+
+#define rtc_writel(val, addr) writel(val, rtc_base + (addr))
/* we rely on the rtc framework to handle locking (rtc->ops_lock),
@@ -285,11 +301,38 @@ static struct rtc_class_ops omap_rtc_ops = {
static int omap_rtc_alarm;
static int omap_rtc_timer;
+#define OMAP_RTC_DATA_DA830_IDX 1
+
+static struct platform_device_id omap_rtc_devtype[] = {
+ {
+ .name = DRIVER_NAME,
+ }, {
+ .name = "da830-rtc",
+ .driver_data = OMAP_RTC_HAS_KICKER,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, omap_rtc_devtype);
+
+static const struct of_device_id omap_rtc_of_match[] = {
+ { .compatible = "ti,da830-rtc",
+ .data = &omap_rtc_devtype[OMAP_RTC_DATA_DA830_IDX],
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
+
static int __init omap_rtc_probe(struct platform_device *pdev)
{
struct resource *res, *mem;
struct rtc_device *rtc;
u8 reg, new_ctrl;
+ const struct platform_device_id *id_entry;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(omap_rtc_of_match, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
omap_rtc_timer = platform_get_irq(pdev, 0);
if (omap_rtc_timer <= 0) {
@@ -322,6 +365,16 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
goto fail;
}
+ /* Enable the clock/module so that we can access the registers */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ id_entry = platform_get_device_id(pdev);
+ if (id_entry && (id_entry->driver_data & OMAP_RTC_HAS_KICKER)) {
+ rtc_writel(KICK0_VALUE, OMAP_RTC_KICK0_REG);
+ rtc_writel(KICK1_VALUE, OMAP_RTC_KICK1_REG);
+ }
+
rtc = rtc_device_register(pdev->name, &pdev->dev,
&omap_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -398,6 +451,10 @@ fail2:
fail1:
rtc_device_unregister(rtc);
fail0:
+ if (id_entry && (id_entry->driver_data & OMAP_RTC_HAS_KICKER))
+ rtc_writel(0, OMAP_RTC_KICK0_REG);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
iounmap(rtc_base);
fail:
release_mem_region(mem->start, resource_size(mem));
@@ -408,6 +465,8 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
struct resource *mem = dev_get_drvdata(&rtc->dev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(pdev);
device_init_wakeup(&pdev->dev, 0);
@@ -420,6 +479,13 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
free_irq(omap_rtc_alarm, rtc);
rtc_device_unregister(rtc);
+ if (id_entry && (id_entry->driver_data & OMAP_RTC_HAS_KICKER))
+ rtc_writel(0, OMAP_RTC_KICK0_REG);
+
+ /* Disable the clock/module */
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
iounmap(rtc_base);
release_mem_region(mem->start, resource_size(mem));
return 0;
@@ -442,11 +508,17 @@ static int omap_rtc_suspend(struct platform_device *pdev, pm_message_t state)
else
rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+ /* Disable the clock/module */
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
static int omap_rtc_resume(struct platform_device *pdev)
{
+ /* Enable the clock/module so that we can access the registers */
+ pm_runtime_get_sync(&pdev->dev);
+
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(omap_rtc_alarm);
else
@@ -471,9 +543,11 @@ static struct platform_driver omap_rtc_driver = {
.resume = omap_rtc_resume,
.shutdown = omap_rtc_shutdown,
.driver = {
- .name = "omap_rtc",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(omap_rtc_of_match),
},
+ .id_table = omap_rtc_devtype,
};
static int __init rtc_init(void)
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
new file mode 100644
index 00000000000..be05a645f99
--- /dev/null
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/of.h>
+
+#define DRIVER_NAME "rtc-pcf8523"
+
+#define REG_CONTROL1 0x00
+#define REG_CONTROL1_CAP_SEL (1 << 7)
+#define REG_CONTROL1_STOP (1 << 5)
+
+#define REG_CONTROL3 0x02
+#define REG_CONTROL3_PM_BLD (1 << 7) /* battery low detection disabled */
+#define REG_CONTROL3_PM_VDD (1 << 6) /* switch-over disabled */
+#define REG_CONTROL3_PM_DSM (1 << 5) /* direct switching mode */
+#define REG_CONTROL3_PM_MASK 0xe0
+
+#define REG_SECONDS 0x03
+#define REG_SECONDS_OS (1 << 7)
+
+#define REG_MINUTES 0x04
+#define REG_HOURS 0x05
+#define REG_DAYS 0x06
+#define REG_WEEKDAYS 0x07
+#define REG_MONTHS 0x08
+#define REG_YEARS 0x09
+
+struct pcf8523 {
+ struct rtc_device *rtc;
+};
+
+static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
+{
+ struct i2c_msg msgs[2];
+ u8 value = 0;
+ int err;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(reg);
+ msgs[0].buf = &reg;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(value);
+ msgs[1].buf = &value;
+
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err < 0)
+ return err;
+
+ *valuep = value;
+
+ return 0;
+}
+
+static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ u8 buffer[2] = { reg, value };
+ struct i2c_msg msg;
+ int err;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = sizeof(buffer);
+ msg.buf = buffer;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ if (!high)
+ value &= ~REG_CONTROL1_CAP_SEL;
+ else
+ value |= REG_CONTROL1_CAP_SEL;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return err;
+}
+
+static int pcf8523_set_pm(struct i2c_client *client, u8 pm)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
+ value = (value & ~REG_CONTROL3_PM_MASK) | pm;
+
+ err = pcf8523_write(client, REG_CONTROL3, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_stop_rtc(struct i2c_client *client)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ value |= REG_CONTROL1_STOP;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_start_rtc(struct i2c_client *client)
+{
+ u8 value;
+ int err;
+
+ err = pcf8523_read(client, REG_CONTROL1, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~REG_CONTROL1_STOP;
+
+ err = pcf8523_write(client, REG_CONTROL1, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 start = REG_SECONDS, regs[7];
+ struct i2c_msg msgs[2];
+ int err;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &start;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(regs);
+ msgs[1].buf = regs;
+
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err < 0)
+ return err;
+
+ if (regs[0] & REG_SECONDS_OS) {
+ /*
+ * If the oscillator was stopped, try to clear the flag. Upon
+ * power-up the flag is always set, but if we cannot clear it
+ * the oscillator isn't running properly for some reason. The
+ * sensible thing therefore is to return an error, signalling
+ * that the clock cannot be assumed to be correct.
+ */
+
+ regs[0] &= ~REG_SECONDS_OS;
+
+ err = pcf8523_write(client, REG_SECONDS, regs[0]);
+ if (err < 0)
+ return err;
+
+ err = pcf8523_read(client, REG_SECONDS, &regs[0]);
+ if (err < 0)
+ return err;
+
+ if (regs[0] & REG_SECONDS_OS)
+ return -EAGAIN;
+ }
+
+ tm->tm_sec = bcd2bin(regs[0] & 0x7f);
+ tm->tm_min = bcd2bin(regs[1] & 0x7f);
+ tm->tm_hour = bcd2bin(regs[2] & 0x3f);
+ tm->tm_mday = bcd2bin(regs[3] & 0x3f);
+ tm->tm_wday = regs[4] & 0x7;
+ tm->tm_mon = bcd2bin(regs[5] & 0x1f);
+ tm->tm_year = bcd2bin(regs[6]) + 100;
+
+ return rtc_valid_tm(tm);
+}
+
+static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_msg msg;
+ u8 regs[8];
+ int err;
+
+ err = pcf8523_stop_rtc(client);
+ if (err < 0)
+ return err;
+
+ regs[0] = REG_SECONDS;
+ regs[1] = bin2bcd(tm->tm_sec);
+ regs[2] = bin2bcd(tm->tm_min);
+ regs[3] = bin2bcd(tm->tm_hour);
+ regs[4] = bin2bcd(tm->tm_mday);
+ regs[5] = tm->tm_wday;
+ regs[6] = bin2bcd(tm->tm_mon);
+ regs[7] = bin2bcd(tm->tm_year - 100);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = sizeof(regs);
+ msg.buf = regs;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err < 0) {
+ /*
+ * If the time cannot be set, restart the RTC anyway. Note
+ * that errors are ignored if the RTC cannot be started so
+ * that we have a chance to propagate the original error.
+ */
+ pcf8523_start_rtc(client);
+ return err;
+ }
+
+ return pcf8523_start_rtc(client);
+}
+
+static const struct rtc_class_ops pcf8523_rtc_ops = {
+ .read_time = pcf8523_rtc_read_time,
+ .set_time = pcf8523_rtc_set_time,
+};
+
+static int pcf8523_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pcf8523 *pcf;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
+ if (!pcf)
+ return -ENOMEM;
+
+ err = pcf8523_select_capacitance(client, true);
+ if (err < 0)
+ return err;
+
+ err = pcf8523_set_pm(client, 0);
+ if (err < 0)
+ return err;
+
+ pcf->rtc = rtc_device_register(DRIVER_NAME, &client->dev,
+ &pcf8523_rtc_ops, THIS_MODULE);
+ if (IS_ERR(pcf->rtc))
+ return PTR_ERR(pcf->rtc);
+
+ i2c_set_clientdata(client, pcf);
+
+ return 0;
+}
+
+static int pcf8523_remove(struct i2c_client *client)
+{
+ struct pcf8523 *pcf = i2c_get_clientdata(client);
+
+ rtc_device_unregister(pcf->rtc);
+
+ return 0;
+}
+
+static const struct i2c_device_id pcf8523_id[] = {
+ { "pcf8523", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf8523_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcf8523_of_match[] = {
+ { .compatible = "nxp,pcf8523" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcf8523_of_match);
+#endif
+
+static struct i2c_driver pcf8523_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcf8523_of_match),
+ },
+ .probe = pcf8523_probe,
+ .remove = pcf8523_remove,
+ .id_table = pcf8523_id,
+};
+module_i2c_driver(pcf8523_driver);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NXP PCF8523 RTC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 77823d21d31..4bd9414aee6 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -47,8 +47,6 @@ struct s3c_rtc_drv_data {
/* I have yet to find an S3C implementation with more than one
* of these rtc blocks in */
-static struct resource *s3c_rtc_mem;
-
static struct clk *rtc_clk;
static void __iomem *s3c_rtc_base;
static int s3c_rtc_alarmno = NO_IRQ;
@@ -186,7 +184,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR);
rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC);
- /* the only way to work out wether the system was mid-update
+ /* the only way to work out whether the system was mid-update
* when we read it is to check the second counter, and if it
* is zero, then we re-try the entire read
*/
@@ -427,21 +425,13 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
- free_irq(s3c_rtc_alarmno, rtc);
- free_irq(s3c_rtc_tickno, rtc);
-
platform_set_drvdata(dev, NULL);
rtc_device_unregister(rtc);
s3c_rtc_setaie(&dev->dev, 0);
- clk_put(rtc_clk);
rtc_clk = NULL;
- iounmap(s3c_rtc_base);
- release_resource(s3c_rtc_mem);
- kfree(s3c_rtc_mem);
-
return 0;
}
@@ -496,28 +486,18 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- s3c_rtc_mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (s3c_rtc_mem == NULL) {
- dev_err(&pdev->dev, "failed to reserve memory region\n");
- ret = -ENOENT;
- goto err_nores;
- }
-
- s3c_rtc_base = ioremap(res->start, resource_size(res));
+ s3c_rtc_base = devm_request_and_ioremap(&pdev->dev, res);
if (s3c_rtc_base == NULL) {
- dev_err(&pdev->dev, "failed ioremap()\n");
- ret = -EINVAL;
- goto err_nomap;
+ dev_err(&pdev->dev, "failed to ioremap memory region\n");
+ return -EINVAL;
}
- rtc_clk = clk_get(&pdev->dev, "rtc");
+ rtc_clk = devm_clk_get(&pdev->dev, "rtc");
if (IS_ERR(rtc_clk)) {
dev_err(&pdev->dev, "failed to find rtc clock source\n");
ret = PTR_ERR(rtc_clk);
rtc_clk = NULL;
- goto err_clk;
+ return ret;
}
clk_enable(rtc_clk);
@@ -576,28 +556,24 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_setfreq(&pdev->dev, 1);
- ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
+ ret = devm_request_irq(&pdev->dev, s3c_rtc_alarmno, s3c_rtc_alarmirq,
0, "s3c2410-rtc alarm", rtc);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
goto err_alarm_irq;
}
- ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
+ ret = devm_request_irq(&pdev->dev, s3c_rtc_tickno, s3c_rtc_tickirq,
0, "s3c2410-rtc tick", rtc);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
- free_irq(s3c_rtc_alarmno, rtc);
- goto err_tick_irq;
+ goto err_alarm_irq;
}
clk_disable(rtc_clk);
return 0;
- err_tick_irq:
- free_irq(s3c_rtc_alarmno, rtc);
-
err_alarm_irq:
platform_set_drvdata(pdev, NULL);
rtc_device_unregister(rtc);
@@ -605,15 +581,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
err_nortc:
s3c_rtc_enable(pdev, 0);
clk_disable(rtc_clk);
- clk_put(rtc_clk);
- err_clk:
- iounmap(s3c_rtc_base);
-
- err_nomap:
- release_resource(s3c_rtc_mem);
-
- err_nores:
return ret;
}
@@ -695,8 +663,6 @@ static const struct of_device_id s3c_rtc_dt_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, s3c_rtc_dt_match);
-#else
-#define s3c_rtc_dt_match NULL
#endif
static struct platform_device_id s3c_rtc_driver_ids[] = {
@@ -727,7 +693,7 @@ static struct platform_driver s3c_rtc_driver = {
.driver = {
.name = "s3c-rtc",
.owner = THIS_MODULE,
- .of_match_table = s3c_rtc_dt_match,
+ .of_match_table = of_match_ptr(s3c_rtc_dt_match),
},
};
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index bb507d23f6c..141fc945295 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -363,35 +363,42 @@ static int __devinit spear_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "no resource defined\n");
return -EBUSY;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "rtc region already claimed\n");
- return -EBUSY;
- }
- config = kzalloc(sizeof(*config), GFP_KERNEL);
+ config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
if (!config) {
dev_err(&pdev->dev, "out of memory\n");
- status = -ENOMEM;
- goto err_release_region;
+ return -ENOMEM;
}
- config->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(config->clk)) {
- status = PTR_ERR(config->clk);
- goto err_kfree;
+ /* alarm irqs */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no update irq?\n");
+ return irq;
}
- status = clk_enable(config->clk);
- if (status < 0)
- goto err_clk_put;
+ status = devm_request_irq(&pdev->dev, irq, spear_rtc_irq, 0, pdev->name,
+ config);
+ if (status) {
+ dev_err(&pdev->dev, "Alarm interrupt IRQ%d already claimed\n",
+ irq);
+ return status;
+ }
- config->ioaddr = ioremap(res->start, resource_size(res));
+ config->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
if (!config->ioaddr) {
- dev_err(&pdev->dev, "ioremap fail\n");
- status = -ENOMEM;
- goto err_disable_clock;
+ dev_err(&pdev->dev, "request-ioremap fail\n");
+ return -ENOMEM;
}
+ config->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(config->clk))
+ return PTR_ERR(config->clk);
+
+ status = clk_prepare_enable(config->clk);
+ if (status < 0)
+ return status;
+
spin_lock_init(&config->lock);
platform_set_drvdata(pdev, config);
@@ -401,42 +408,19 @@ static int __devinit spear_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
PTR_ERR(config->rtc));
status = PTR_ERR(config->rtc);
- goto err_iounmap;
- }
-
- /* alarm irqs */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no update irq?\n");
- status = irq;
- goto err_clear_platdata;
+ goto err_disable_clock;
}
- status = request_irq(irq, spear_rtc_irq, 0, pdev->name, config);
- if (status) {
- dev_err(&pdev->dev, "Alarm interrupt IRQ%d already \
- claimed\n", irq);
- goto err_clear_platdata;
- }
+ config->rtc->uie_unsupported = 1;
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
return 0;
-err_clear_platdata:
- platform_set_drvdata(pdev, NULL);
- rtc_device_unregister(config->rtc);
-err_iounmap:
- iounmap(config->ioaddr);
err_disable_clock:
- clk_disable(config->clk);
-err_clk_put:
- clk_put(config->clk);
-err_kfree:
- kfree(config);
-err_release_region:
- release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ clk_disable_unprepare(config->clk);
return status;
}
@@ -444,24 +428,11 @@ err_release_region:
static int __devexit spear_rtc_remove(struct platform_device *pdev)
{
struct spear_rtc_config *config = platform_get_drvdata(pdev);
- int irq;
- struct resource *res;
- /* leave rtc running, but disable irqs */
+ rtc_device_unregister(config->rtc);
spear_rtc_disable_interrupt(config);
+ clk_disable_unprepare(config->clk);
device_init_wakeup(&pdev->dev, 0);
- irq = platform_get_irq(pdev, 0);
- if (irq)
- free_irq(irq, pdev);
- clk_disable(config->clk);
- clk_put(config->clk);
- iounmap(config->ioaddr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
- rtc_device_unregister(config->rtc);
- kfree(config);
return 0;
}
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 7e96254bd36..974b9ae252a 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -152,24 +152,24 @@ static int __init test_init(void)
if ((test1 = platform_device_alloc("rtc-test", 1)) == NULL) {
err = -ENOMEM;
- goto exit_free_test0;
+ goto exit_put_test0;
}
if ((err = platform_device_add(test0)))
- goto exit_free_test1;
+ goto exit_put_test1;
if ((err = platform_device_add(test1)))
- goto exit_device_unregister;
+ goto exit_del_test0;
return 0;
-exit_device_unregister:
- platform_device_unregister(test0);
+exit_del_test0:
+ platform_device_del(test0);
-exit_free_test1:
+exit_put_test1:
platform_device_put(test1);
-exit_free_test0:
+exit_put_test0:
platform_device_put(test0);
exit_driver_unregister:
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 073108dcf9e..22eb4ebfa1a 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -247,6 +247,13 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
return ret;
dev_dbg(&pdev->dev, "Enabling rtc-tps65910.\n");
+
+ /* Enable RTC digital power domain */
+ ret = regmap_update_bits(tps65910->regmap, TPS65910_DEVCTRL,
+ DEVCTRL_RTC_PWDN_MASK, 0 << DEVCTRL_RTC_PWDN_SHIFT);
+ if (ret < 0)
+ return ret;
+
rtc_reg = TPS65910_RTC_CTRL_STOP_RTC;
ret = regmap_write(tps65910->regmap, TPS65910_RTC_CTRL, rtc_reg);
if (ret < 0)
@@ -261,7 +268,7 @@ static int __devinit tps65910_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
- "rtc-tps65910", &pdev->dev);
+ dev_name(&pdev->dev), &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
return ret;
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9277d945bf4..8b7464c8b5c 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -233,7 +233,7 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
*/
static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- unsigned char rtc_data[ALL_TIME_REGS + 1];
+ unsigned char rtc_data[ALL_TIME_REGS];
int ret;
u8 save_control;
u8 rtc_control;
@@ -300,15 +300,15 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char save_control;
- unsigned char rtc_data[ALL_TIME_REGS + 1];
+ unsigned char rtc_data[ALL_TIME_REGS];
int ret;
- rtc_data[1] = bin2bcd(tm->tm_sec);
- rtc_data[2] = bin2bcd(tm->tm_min);
- rtc_data[3] = bin2bcd(tm->tm_hour);
- rtc_data[4] = bin2bcd(tm->tm_mday);
- rtc_data[5] = bin2bcd(tm->tm_mon + 1);
- rtc_data[6] = bin2bcd(tm->tm_year - 100);
+ rtc_data[0] = bin2bcd(tm->tm_sec);
+ rtc_data[1] = bin2bcd(tm->tm_min);
+ rtc_data[2] = bin2bcd(tm->tm_hour);
+ rtc_data[3] = bin2bcd(tm->tm_mday);
+ rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+ rtc_data[5] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the TC registers */
ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
@@ -341,7 +341,7 @@ out:
*/
static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- unsigned char rtc_data[ALL_TIME_REGS + 1];
+ unsigned char rtc_data[ALL_TIME_REGS];
int ret;
ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
@@ -368,19 +368,19 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- unsigned char alarm_data[ALL_TIME_REGS + 1];
+ unsigned char alarm_data[ALL_TIME_REGS];
int ret;
ret = twl_rtc_alarm_irq_enable(dev, 0);
if (ret)
goto out;
- alarm_data[1] = bin2bcd(alm->time.tm_sec);
- alarm_data[2] = bin2bcd(alm->time.tm_min);
- alarm_data[3] = bin2bcd(alm->time.tm_hour);
- alarm_data[4] = bin2bcd(alm->time.tm_mday);
- alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
- alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
+ alarm_data[0] = bin2bcd(alm->time.tm_sec);
+ alarm_data[1] = bin2bcd(alm->time.tm_min);
+ alarm_data[2] = bin2bcd(alm->time.tm_hour);
+ alarm_data[3] = bin2bcd(alm->time.tm_mday);
+ alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
+ alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
/* update all the alarm registers in one shot */
ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data,
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index 07bf19364a7..14e2d8cfcc8 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -210,7 +210,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
struct vt8500_rtc *vt8500_rtc;
int ret;
- vt8500_rtc = kzalloc(sizeof(struct vt8500_rtc), GFP_KERNEL);
+ vt8500_rtc = devm_kzalloc(&pdev->dev,
+ sizeof(struct vt8500_rtc), GFP_KERNEL);
if (!vt8500_rtc)
return -ENOMEM;
@@ -220,15 +221,13 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!vt8500_rtc->res) {
dev_err(&pdev->dev, "No I/O memory resource defined\n");
- ret = -ENXIO;
- goto err_free;
+ return -ENXIO;
}
vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
if (vt8500_rtc->irq_alarm < 0) {
dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
- ret = -ENXIO;
- goto err_free;
+ return -ENXIO;
}
vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
@@ -236,8 +235,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
"vt8500-rtc");
if (vt8500_rtc->res == NULL) {
dev_err(&pdev->dev, "failed to request I/O memory\n");
- ret = -EBUSY;
- goto err_free;
+ return -EBUSY;
}
vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start,
@@ -278,8 +276,6 @@ err_unmap:
err_release:
release_mem_region(vt8500_rtc->res->start,
resource_size(vt8500_rtc->res));
-err_free:
- kfree(vt8500_rtc);
return ret;
}
@@ -297,7 +293,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
release_mem_region(vt8500_rtc->res->start,
resource_size(vt8500_rtc->res));
- kfree(vt8500_rtc);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0595c763daf..29225e1c159 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -349,6 +349,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
return rc;
}
+static inline
+int _wait_for_empty_queues(struct dasd_device *device)
+{
+ if (device->block)
+ return list_empty(&device->ccw_queue) &&
+ list_empty(&device->block->ccw_queue);
+ else
+ return list_empty(&device->ccw_queue);
+}
+
/*
* Remove device from block device layer. Destroy dirty buffers.
* Forget format information. Check if the target level is basic
@@ -1841,6 +1851,13 @@ static void __dasd_device_check_expire(struct dasd_device *device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
(time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+ if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * IO in safe offline processing should not
+ * run out of retries
+ */
+ cqr->retries++;
+ }
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
@@ -3024,11 +3041,11 @@ void dasd_generic_remove(struct ccw_device *cdev)
cdev->handler = NULL;
- dasd_remove_sysfs_files(cdev);
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return;
- if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return;
@@ -3048,6 +3065,8 @@ void dasd_generic_remove(struct ccw_device *cdev)
*/
if (block)
dasd_free_block(block);
+
+ dasd_remove_sysfs_files(cdev);
}
/*
@@ -3126,16 +3145,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
{
struct dasd_device *device;
struct dasd_block *block;
- int max_count, open_count;
+ int max_count, open_count, rc;
+ rc = 0;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
- if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
- /* Already doing offline processing */
- dasd_put_device(device);
- return 0;
- }
+
/*
* We must make sure that this device is currently not in use.
* The open_count is increased for every opener, that includes
@@ -3159,6 +3175,54 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
return -EBUSY;
}
}
+
+ if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * safe offline allready running
+ * could only be called by normal offline so safe_offline flag
+ * needs to be removed to run normal offline and kill all I/O
+ */
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* Already doing normal offline processing */
+ dasd_put_device(device);
+ return -EBUSY;
+ } else
+ clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+
+ } else
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ return -EBUSY;
+ }
+
+ /*
+ * if safe_offline called set safe_offline_running flag and
+ * clear safe_offline so that a call to normal offline
+ * can overrun safe_offline processing
+ */
+ if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
+ !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * If we want to set the device safe offline all IO operations
+ * should be finished before continuing the offline process
+ * so sync bdev first and then wait for our queues to become
+ * empty
+ */
+ /* sync blockdev and partitions */
+ rc = fsync_bdev(device->block->bdev);
+ if (rc != 0)
+ goto interrupted;
+
+ /* schedule device tasklet and wait for completion */
+ dasd_schedule_device_bh(device);
+ rc = wait_event_interruptible(shutdown_waitq,
+ _wait_for_empty_queues(device));
+ if (rc != 0)
+ goto interrupted;
+ }
+
+ set_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
@@ -3170,6 +3234,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
if (block)
dasd_free_block(block);
return 0;
+
+interrupted:
+ /* interrupted by signal */
+ clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+ clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
+ clear_bit(DASD_FLAG_OFFLINE, &device->flags);
+ dasd_put_device(device);
+ return rc;
}
int dasd_generic_last_path_gone(struct dasd_device *device)
@@ -3489,15 +3561,6 @@ char *dasd_get_sense(struct irb *irb)
}
EXPORT_SYMBOL_GPL(dasd_get_sense);
-static inline int _wait_for_empty_queues(struct dasd_device *device)
-{
- if (device->block)
- return list_empty(&device->ccw_queue) &&
- list_empty(&device->block->ccw_queue);
- else
- return list_empty(&device->ccw_queue);
-}
-
void dasd_generic_shutdown(struct ccw_device *cdev)
{
struct dasd_device *device;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index b2b8c18eece..c196827c228 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -952,6 +952,39 @@ static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
dasd_use_raw_store);
static ssize_t
+dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct dasd_device *device;
+ int rc;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device)) {
+ rc = PTR_ERR(device);
+ goto out;
+ }
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+ test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+ dasd_put_device(device);
+
+ rc = ccw_device_set_offline(cdev);
+
+out:
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
+
+static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1320,6 +1353,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_expires.attr,
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
+ &dev_attr_safe_offline.attr,
NULL,
};
@@ -1344,7 +1378,7 @@ dasd_get_feature(struct ccw_device *cdev, int feature)
/*
* Set / reset given feature.
- * Flag indicates wether to set (!=0) or the reset (=0) the feature.
+ * Flag indicates whether to set (!=0) or the reset (=0) the feature.
*/
int
dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 108332b44d9..806fe912d6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1026,7 +1026,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
- int rc;
+ int rc, path_err;
__u8 lpm, opm;
struct dasd_eckd_private *private, path_private;
struct dasd_path *path_data;
@@ -1037,6 +1037,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
conf_data_saved = 0;
+ path_err = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
if (!(lpm & opm))
@@ -1122,7 +1123,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"the same device, path %02X leads to "
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
- return -EINVAL;
+ path_err = -EINVAL;
+ continue;
}
path_private.conf_data = NULL;
@@ -1142,7 +1144,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
kfree(conf_data);
}
- return 0;
+ return path_err;
}
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
@@ -3847,7 +3849,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
len = 0;
while (from <= to) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
@@ -3908,23 +3910,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
@@ -3937,23 +3939,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
@@ -3962,10 +3964,10 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" Related CP in req: %p\n", req);
dasd_eckd_dump_ccw_range(first, to, page + len);
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
@@ -3975,7 +3977,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
irb->scsw.cmd.cpa; /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
- len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page, PRINTK_HEADER "......\n");
}
to = min(fail + 1, last);
len += dasd_eckd_dump_ccw_range(from, to, page + len);
@@ -3984,11 +3986,11 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
}
len += dasd_eckd_dump_ccw_range(from, last, page + len);
if (len > 0)
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
}
free_page((unsigned long) page);
}
@@ -4012,10 +4014,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
@@ -4023,7 +4025,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
req ? req->intrc : 0);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing TCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.tm.tcw);
@@ -4035,43 +4037,42 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
if (tsb) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->length %d\n", tsb->length);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->flags %x\n", tsb->flags);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->dcw_offset %d\n", tsb->dcw_offset);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
- len += sprintf(page + len,
- KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
@@ -4084,15 +4085,14 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
- " tsb->tsa.intrg.: not supportet yet \n");
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.intrg.: not supportet yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len,
- KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -4104,27 +4104,27 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
- sprintf(page + len, KERN_ERR PRINTK_HEADER
+ sprintf(page + len, PRINTK_HEADER
" SORRY - NO TSB DATA AVAILABLE\n");
}
- printk("%s", page);
+ printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
@@ -4161,9 +4161,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */
- rc = dasd_eckd_read_conf(device);
- if (rc)
- goto out_err;
+ dasd_eckd_read_conf(device);
dasd_eckd_get_uid(device, &temp_uid);
/* Generate device unique id */
@@ -4183,9 +4181,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST);
/* RE-Read Configuration Data */
- rc = dasd_eckd_read_conf(device);
- if (rc)
- goto out_err;
+ dasd_eckd_read_conf(device);
/* Read Feature Codes */
dasd_eckd_read_features(device);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index fb7f3bdc660..eb748507c7f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -479,19 +479,19 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"No memory to dump sense data");
return;
}
- len = sprintf(page, KERN_ERR PRINTK_HEADER
+ len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
@@ -502,7 +502,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
}
} else {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
@@ -512,10 +512,9 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
act = req->cpaddr;
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
- len = sprintf(page, KERN_ERR PRINTK_HEADER
- " Related CP in req: %p\n", req);
+ len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
while (act <= end) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
@@ -533,11 +532,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
@@ -552,10 +551,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
}
while (act <= last) {
- len += sprintf(page + len, KERN_ERR PRINTK_HEADER
+ len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 7ff93eea673..899e3f5a56e 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -516,6 +516,8 @@ struct dasd_block {
#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
+#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
+#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
void dasd_put_device_wake(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 8252f37d04e..03c0e044455 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
+#include <asm/schid.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
@@ -308,11 +309,12 @@ static int dasd_ioctl_information(struct dasd_block *block,
unsigned int cmd, void __user *argp)
{
struct dasd_information2_t *dasd_info;
- unsigned long flags;
- int rc;
+ struct subchannel_id sch_id;
+ struct ccw_dev_id dev_id;
struct dasd_device *base;
struct ccw_device *cdev;
- struct ccw_dev_id dev_id;
+ unsigned long flags;
+ int rc;
base = block->base;
if (!base->discipline || !base->discipline->fill_info)
@@ -330,9 +332,10 @@ static int dasd_ioctl_information(struct dasd_block *block,
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
+ ccw_device_get_schid(cdev, &sch_id);
dasd_info->devno = dev_id.devno;
- dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
+ dasd_info->schid = sch_id.sch_no;
dasd_info->cu_type = cdev->id.cu_type;
dasd_info->cu_model = cdev->id.cu_model;
dasd_info->dev_type = cdev->id.dev_type;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index d7e97ae9ef6..25bcd4c0ed8 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 1999, 2009
+ * Copyright IBM Corp. 1999,2012
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -103,6 +103,7 @@ extern u64 sclp_facilities;
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
+#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL)
struct gds_subvector {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 71ea923c322..c44d13f607b 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2007, 2009
+ * Copyright IBM Corp. 2007,2012
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
@@ -19,10 +20,11 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <asm/ctl_reg.h>
#include <asm/chpid.h>
-#include <asm/sclp.h>
#include <asm/setup.h>
-#include <asm/ctl_reg.h>
+#include <asm/page.h>
+#include <asm/sclp.h>
#include "sclp.h"
@@ -400,17 +402,15 @@ out:
static int sclp_assign_storage(u16 rn)
{
- unsigned long long start, address;
+ unsigned long long start;
int rc;
rc = do_assign_storage(0x000d0001, rn);
if (rc)
- goto out;
- start = address = rn2addr(rn);
- for (; address < start + rzm; address += PAGE_SIZE)
- page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
-out:
- return rc;
+ return rc;
+ start = rn2addr(rn);
+ storage_key_init_range(start, start + rzm);
+ return 0;
}
static int sclp_unassign_storage(u16 rn)
@@ -702,6 +702,67 @@ __initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
+ * PCI I/O adapter configuration related functions.
+ */
+#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
+#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
+
+#define SCLP_RECONFIG_PCI_ATPYE 2
+
+struct pci_cfg_sccb {
+ struct sccb_header header;
+ u8 atype; /* adapter type */
+ u8 reserved1;
+ u16 reserved2;
+ u32 aid; /* adapter identifier */
+} __packed;
+
+static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
+{
+ struct pci_cfg_sccb *sccb;
+ int rc;
+
+ if (!SCLP_HAS_PCI_RECONFIG)
+ return -EOPNOTSUPP;
+
+ sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+
+ sccb->header.length = PAGE_SIZE;
+ sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
+ sccb->aid = fid;
+ rc = do_sync_request(cmd, sccb);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ case 0x0120:
+ break;
+ default:
+ pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
+ cmd, sccb->header.response_code);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long) sccb);
+ return rc;
+}
+
+int sclp_pci_configure(u32 fid)
+{
+ return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_configure);
+
+int sclp_pci_deconfigure(u32 fid)
+{
+ return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_deconfigure);
+
+/*
* Channel path configuration related functions.
*/
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 731470e6849..84846c2b96d 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -65,10 +65,18 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
}
}
-static int ccwgroup_set_online(struct ccwgroup_device *gdev)
+/**
+ * ccwgroup_set_online() - enable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the online state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- int ret = 0;
+ int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
@@ -84,11 +92,20 @@ out:
atomic_set(&gdev->onoff, 0);
return ret;
}
+EXPORT_SYMBOL(ccwgroup_set_online);
-static int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+/**
+ * ccwgroup_set_offline() - disable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the offline state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
- int ret = 0;
+ int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
@@ -104,6 +121,7 @@ out:
atomic_set(&gdev->onoff, 0);
return ret;
}
+EXPORT_SYMBOL(ccwgroup_set_offline);
static ssize_t ccwgroup_online_store(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4d51a7c4eb8..68e80e2734a 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
/*
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999, 2010
+ * Copyright IBM Corp. 1999,2012
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/pci.h>
#include <asm/cio.h>
#include <asm/chpid.h>
@@ -260,26 +261,45 @@ __get_chpid_from_lir(void *data)
return (u16) (lir->indesc[0]&0x000000ff);
}
-struct chsc_sei_area {
- struct chsc_header request;
+struct chsc_sei_nt0_area {
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
u32 reserved1;
u32 reserved2;
- u32 reserved3;
- struct chsc_header response;
- u32 reserved4;
- u8 flags;
- u8 vf; /* validity flags */
- u8 rs; /* reporting source */
- u8 cc; /* content code */
- u16 fla; /* full link address */
- u16 rsid; /* reporting source id */
- u32 reserved5;
- u32 reserved6;
- u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
/* ccdf has to be big enough for a link-incident record */
-} __attribute__ ((packed));
-
-static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
+ u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+ u8 flags; /* p and v bit */
+ u8 reserved1;
+ u8 reserved2;
+ u8 cc; /* content code */
+ u32 reserved3[13];
+ u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0 0ULL
+#define CHSC_SEI_NT2 (1ULL << 61)
+
+struct chsc_sei {
+ struct chsc_header request;
+ u32 reserved1;
+ u64 ntsm; /* notification type mask */
+ struct chsc_header response;
+ u32 reserved2;
+ union {
+ struct chsc_sei_nt0_area nt0_area;
+ struct chsc_sei_nt2_area nt2_area;
+ u8 nt_area[PAGE_SIZE - 24];
+ } u;
+} __packed;
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
{
struct chp_id chpid;
int id;
@@ -298,7 +318,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
struct chp_link link;
struct chp_id chpid;
@@ -330,7 +350,7 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
s390_process_res_acc(&link);
}
-static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
{
struct channel_path *chp;
struct chp_id chpid;
@@ -366,7 +386,7 @@ struct chp_config_data {
u8 pc;
};
-static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
{
struct chp_config_data *data;
struct chp_id chpid;
@@ -398,7 +418,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
}
}
-static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
{
int ret;
@@ -412,13 +432,26 @@ static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
" failed (rc=%d).\n", ret);
}
-static void chsc_process_sei(struct chsc_sei_area *sei_area)
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
- /* Check if we might have lost some information. */
- if (sei_area->flags & 0x40) {
- CIO_CRW_EVENT(2, "chsc: event overflow\n");
- css_schedule_eval_all();
+#ifdef CONFIG_PCI
+ switch (sei_area->cc) {
+ case 1:
+ zpci_event_error(sei_area->ccdf);
+ break;
+ case 2:
+ zpci_event_availability(sei_area->ccdf);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
+ sei_area->cc);
+ break;
}
+#endif
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
/* which kind of information was stored? */
switch (sei_area->cc) {
case 1: /* link incident*/
@@ -443,9 +476,51 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
}
}
+static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
+{
+ do {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code == 0x0001) {
+ CIO_CRW_EVENT(2, "chsc: sei successful\n");
+
+ /* Check if we might have lost some information. */
+ if (sei->u.nt0_area.flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
+
+ switch (sei->ntsm) {
+ case CHSC_SEI_NT0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ return 1;
+ case CHSC_SEI_NT2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ return 1;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
+ sei->ntsm);
+ return 0;
+ }
+ } else {
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
+ sei->response.code);
+ break;
+ }
+ } while (sei->u.nt0_area.flags & 0x80);
+
+ return 0;
+}
+
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
- struct chsc_sei_area *sei_area;
+ struct chsc_sei *sei;
if (overflow) {
css_schedule_eval_all();
@@ -459,25 +534,18 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
return;
/* Access to sei_page is serialized through machine check handler
* thread, so no need for locking. */
- sei_area = sei_page;
+ sei = sei_page;
CIO_TRACE_EVENT(2, "prcss");
- do {
- memset(sei_area, 0, sizeof(*sei_area));
- sei_area->request.length = 0x0010;
- sei_area->request.code = 0x000e;
- if (chsc(sei_area))
- break;
- if (sei_area->response.code == 0x0001) {
- CIO_CRW_EVENT(4, "chsc: sei successful\n");
- chsc_process_sei(sei_area);
- } else {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei_area->response.code);
- break;
- }
- } while (sei_area->flags & 0x80);
+ /*
+ * The ntsm does not allow to select NT0 and NT2 together. We need to
+ * first check for NT2, than additionally for NT0...
+ */
+#ifdef CONFIG_PCI
+ if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
+#endif
+ __chsc_process_crw(sei, CHSC_SEI_NT0);
}
void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fd3143c291c..6995cff4463 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2036,16 +2036,6 @@ void ccw_driver_unregister(struct ccw_driver *cdriver)
driver_unregister(&cdriver->driver);
}
-/* Helper func for qdio. */
-struct subchannel_id
-ccw_device_get_subchannel_id(struct ccw_device *cdev)
-{
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- return sch->schid;
-}
-
static void ccw_device_todo(struct work_struct *work)
{
struct ccw_device_private *priv;
@@ -2138,4 +2128,3 @@ EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
-EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 6bace694239..2e575cff984 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -142,9 +142,7 @@ int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
-/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
-extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ec7fb6d3b47..c77b6e06bf6 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -755,14 +755,18 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
-// FIXME: these have to go:
-
-int
-_ccw_device_get_subchannel_number(struct ccw_device *cdev)
+/**
+ * ccw_device_get_schid - obtain a subchannel id
+ * @cdev: device to obtain the id for
+ * @schid: where to fill in the values
+ */
+void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
{
- return cdev->private->schid.sch_no;
-}
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ *schid = sch->schid;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_schid);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -777,5 +781,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
-EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 368368fe04b..908d287f66c 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
* Determine pathgroup state from PGID data.
*/
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
- int *mismatch, int *reserved, u8 *reset)
+ int *mismatch, u8 *reserved, u8 *reset)
{
struct pgid *pgid = &cdev->private->pgid[0];
struct pgid *first = NULL;
@@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
if ((cdev->private->pgid_valid_mask & lpm) == 0)
continue;
if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
- *reserved = 1;
+ *reserved |= lpm;
if (pgid_is_reset(pgid)) {
*reset |= lpm;
continue;
@@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int mismatch = 0;
- int reserved = 0;
+ u8 reserved = 0;
u8 reset = 0;
u8 donepm;
if (rc)
goto out;
pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
- if (reserved)
+ if (reserved == cdev->private->pgid_valid_mask)
rc = -EUSERS;
else if (mismatch)
rc = -EOPNOTSUPP;
@@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
- "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
+ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e06fa03ea1e..1671d3461f2 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -129,7 +129,6 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
unsigned int ccq = 0;
- BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, eqbs);
if (!q->is_input_q)
@@ -147,7 +146,6 @@ again:
}
if (rc == 2) {
- BUG_ON(tmp_count == count);
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
tmp_count);
@@ -189,8 +187,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
if (!count)
return 0;
-
- BUG_ON(!q->irq_ptr->sch_token);
qperf_inc(q, sqbs);
if (!q->is_input_q)
@@ -199,7 +195,7 @@ again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
rc = qdio_check_ccq(q, ccq);
if (!rc) {
- WARN_ON(tmp_count);
+ WARN_ON_ONCE(tmp_count);
return count - tmp_count;
}
@@ -224,9 +220,6 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char __state = 0;
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
@@ -258,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
{
int i;
- BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
- BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
-
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
@@ -345,7 +335,6 @@ again:
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
- WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
retries++;
if (!start_time) {
@@ -559,7 +548,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
return q->first_to_check;
@@ -678,12 +667,10 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
if (aob == NULL)
continue;
- BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
} else if (state == SLSB_P_OUTPUT_EMPTY) {
- BUG_ON(q->u.out.sbal_state == NULL);
q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
@@ -703,12 +690,11 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- BUG_ON(q->sbal_state == NULL);
q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
- BUG_ON(phys_aob & 0xFF);
+ WARN_ON_ONCE(phys_aob & 0xFF);
}
out:
@@ -809,8 +795,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
goto out;
switch (state) {
- case SLSB_P_OUTPUT_PENDING:
- BUG();
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
@@ -840,7 +824,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_OUTPUT_HALTED:
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
out:
@@ -912,7 +896,7 @@ retry:
static void __qdio_outbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_outbound);
- BUG_ON(atomic_read(&q->nr_buf_used) < 0);
+ WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
qdio_kick_handler(q);
@@ -1138,16 +1122,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
irq_ptr->perf_stat.qdio_int++;
if (IS_ERR(irb)) {
- switch (PTR_ERR(irb)) {
- case -EIO:
- DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
- wake_up(&cdev->private->wait_q);
- return;
- default:
- WARN_ON(1);
- return;
- }
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ wake_up(&cdev->private->wait_q);
+ return;
}
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
@@ -1173,7 +1151,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
case QDIO_IRQ_STATE_STOPPED:
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
@@ -1227,7 +1205,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
- BUG_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled());
DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
@@ -1358,7 +1336,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto out_rel;
- WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
init_data->no_output_qs))
@@ -1597,9 +1574,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
-
used = atomic_add_return(count, &q->nr_buf_used) - count;
- BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
if (need_siga_in(q))
return qdio_siga_input(q);
@@ -1624,7 +1599,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
- BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
if (used == QDIO_MAX_BUFFERS_PER_Q)
qperf_inc(q, outbound_queue_full);
@@ -1678,7 +1652,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
{
struct qdio_irq *irq_ptr;
-
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
@@ -1721,8 +1694,6 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
- WARN_ON(queue_irqs_enabled(q));
-
clear_nonshared_ind(irq_ptr);
qdio_stop_polling(q);
clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
@@ -1769,7 +1740,6 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
- WARN_ON(queue_irqs_enabled(q));
/*
* Cannot rely on automatic sync after interrupt since queues may
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 6c973db1498..16ecd35b8e5 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,10 +140,8 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sbal[j] = *sbals_array++;
- BUG_ON((unsigned long)q->sbal[j] & 0xff);
- }
/* fill in slib */
if (i > 0) {
@@ -434,9 +432,8 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
-
- irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
+ ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
@@ -483,7 +480,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
char s[80];
snprintf(s, 80, "qdio: %s %s on SC %x using "
- "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
+ "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
dev_name(&cdev->dev),
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 2e060088fa8..bdb394b066f 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -73,7 +73,6 @@ static void put_indicator(u32 *addr)
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
mutex_lock(&tiq_list_lock);
- BUG_ON(irq_ptr->nr_input_qs < 1);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
xchg(irq_ptr->dsci, 1 << 7);
@@ -83,7 +82,6 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
- BUG_ON(irq_ptr->nr_input_qs < 1);
q = irq_ptr->input_qs[0];
/* if establish triggered an error */
if (!q || !q->entry.prev || !q->entry.next)
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 035b6dc31b7..7c522f338bd 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -241,84 +241,70 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
- int mod_len, short_len, long_len, long_offset, limit;
+ int mod_len, short_len;
unsigned char *p, *q, *dp, *dq, *u, *inp;
mod_len = crt->inputdatalength;
short_len = mod_len / 2;
- long_len = mod_len / 2 + 8;
/*
- * CEX2A cannot handle p, dp, or U > 128 bytes.
- * If we have one of these, we need to do extra checking.
- * For CEX3A the limit is 256 bytes.
+ * CEX2A and CEX3A w/o FW update can handle requests up to
+ * 256 byte modulus (2k keys).
+ * CEX3A with FW update and CEX4A cards are able to handle
+ * 512 byte modulus (4k keys).
*/
- if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
- limit = 256;
- else
- limit = 128;
-
- if (long_len > limit) {
- /*
- * zcrypt_rsa_crt already checked for the leading
- * zeroes of np_prime, bp_key and u_mult_inc.
- */
- long_offset = long_len - limit;
- long_len = limit;
- } else
- long_offset = 0;
-
- /*
- * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
- * the larger message structure.
- */
- if (long_len <= 64) {
+ if (mod_len <= 128) { /* up to 1024 bit key size */
struct type50_crb1_msg *crb1 = ap_msg->message;
memset(crb1, 0, sizeof(*crb1));
ap_msg->length = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
crb1->header.msg_len = sizeof(*crb1);
crb1->keyblock_type = TYPE50_CRB1_FMT;
- p = crb1->p + sizeof(crb1->p) - long_len;
+ p = crb1->p + sizeof(crb1->p) - short_len;
q = crb1->q + sizeof(crb1->q) - short_len;
- dp = crb1->dp + sizeof(crb1->dp) - long_len;
+ dp = crb1->dp + sizeof(crb1->dp) - short_len;
dq = crb1->dq + sizeof(crb1->dq) - short_len;
- u = crb1->u + sizeof(crb1->u) - long_len;
+ u = crb1->u + sizeof(crb1->u) - short_len;
inp = crb1->message + sizeof(crb1->message) - mod_len;
- } else if (long_len <= 128) {
+ } else if (mod_len <= 256) { /* up to 2048 bit key size */
struct type50_crb2_msg *crb2 = ap_msg->message;
memset(crb2, 0, sizeof(*crb2));
ap_msg->length = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
crb2->header.msg_len = sizeof(*crb2);
crb2->keyblock_type = TYPE50_CRB2_FMT;
- p = crb2->p + sizeof(crb2->p) - long_len;
+ p = crb2->p + sizeof(crb2->p) - short_len;
q = crb2->q + sizeof(crb2->q) - short_len;
- dp = crb2->dp + sizeof(crb2->dp) - long_len;
+ dp = crb2->dp + sizeof(crb2->dp) - short_len;
dq = crb2->dq + sizeof(crb2->dq) - short_len;
- u = crb2->u + sizeof(crb2->u) - long_len;
+ u = crb2->u + sizeof(crb2->u) - short_len;
inp = crb2->message + sizeof(crb2->message) - mod_len;
- } else {
- /* long_len >= 256 */
+ } else if ((mod_len <= 512) && /* up to 4096 bit key size */
+ (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
struct type50_crb3_msg *crb3 = ap_msg->message;
memset(crb3, 0, sizeof(*crb3));
ap_msg->length = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
crb3->header.msg_len = sizeof(*crb3);
crb3->keyblock_type = TYPE50_CRB3_FMT;
- p = crb3->p + sizeof(crb3->p) - long_len;
+ p = crb3->p + sizeof(crb3->p) - short_len;
q = crb3->q + sizeof(crb3->q) - short_len;
- dp = crb3->dp + sizeof(crb3->dp) - long_len;
+ dp = crb3->dp + sizeof(crb3->dp) - short_len;
dq = crb3->dq + sizeof(crb3->dq) - short_len;
- u = crb3->u + sizeof(crb3->u) - long_len;
+ u = crb3->u + sizeof(crb3->u) - short_len;
inp = crb3->message + sizeof(crb3->message) - mod_len;
- }
+ } else
+ return -EINVAL;
- if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
+ /*
+ * correct the offset of p, bp and mult_inv according zcrypt.h
+ * block size right aligned (skip the first byte)
+ */
+ if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(q, crt->nq_prime, short_len) ||
- copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
+ copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(dq, crt->bq_key, short_len) ||
- copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
+ copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
copy_from_user(inp, crt->inputdata, mod_len))
return -EFAULT;
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index e56dc72c773..0a66e4aeeb5 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -33,6 +33,8 @@
#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
+#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
+
int zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 74bf1aa7af4..142f632e2a2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -603,6 +603,7 @@ config SCSI_ARCMSR
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
@@ -1812,6 +1813,7 @@ config SCSI_VIRTIO
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
+source "drivers/scsi/csiostor/Kconfig"
endif # SCSI_LOWLEVEL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 888f73a4aae..b607ba4f563 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -106,6 +107,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d79457ac8be..681434e2dfe 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -132,11 +132,13 @@ struct inquiry_data {
* M O D U L E G L O B A L S
*/
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
-static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
-static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max);
-static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new);
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max);
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
+ int pages, int nseg, int nseg_new);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
@@ -971,6 +973,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
+ long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
@@ -982,7 +985,10 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd2->byteCount = cpu_to_le32(count<<9);
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
- aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize);
+ ret = aac_build_sgraw2(cmd, readcmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
@@ -996,7 +1002,9 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
- aac_build_sgraw(cmd, &readcmd->sg);
+ ret = aac_build_sgraw(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
@@ -1019,6 +1027,8 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
{
u16 fibsize;
struct aac_read64 *readcmd;
+ long ret;
+
aac_fib_init(fib);
readcmd = (struct aac_read64 *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
@@ -1028,7 +1038,9 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
readcmd->pad = 0;
readcmd->flags = 0;
- aac_build_sg64(cmd, &readcmd->sg);
+ ret = aac_build_sg64(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64));
@@ -1050,6 +1062,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
{
u16 fibsize;
struct aac_read *readcmd;
+ long ret;
+
aac_fib_init(fib);
readcmd = (struct aac_read *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
@@ -1057,7 +1071,9 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
- aac_build_sg(cmd, &readcmd->sg);
+ ret = aac_build_sg(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_read) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry));
@@ -1079,6 +1095,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
+ long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
@@ -1093,7 +1110,10 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
cpu_to_le16(RIO2_IO_TYPE_WRITE);
- aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize);
+ ret = aac_build_sgraw2(cmd, writecmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
@@ -1110,7 +1130,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
cpu_to_le16(RIO_TYPE_WRITE);
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
- aac_build_sgraw(cmd, &writecmd->sg);
+ ret = aac_build_sgraw(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
@@ -1133,6 +1155,8 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
{
u16 fibsize;
struct aac_write64 *writecmd;
+ long ret;
+
aac_fib_init(fib);
writecmd = (struct aac_write64 *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
@@ -1142,7 +1166,9 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
writecmd->pad = 0;
writecmd->flags = 0;
- aac_build_sg64(cmd, &writecmd->sg);
+ ret = aac_build_sg64(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_write64) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry64));
@@ -1164,6 +1190,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
u16 fibsize;
struct aac_write *writecmd;
+ long ret;
+
aac_fib_init(fib);
writecmd = (struct aac_write *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
@@ -1173,7 +1201,9 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
- aac_build_sg(cmd, &writecmd->sg);
+ ret = aac_build_sg(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_write) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry));
@@ -1235,8 +1265,11 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
- aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
+ ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
+ if (ret < 0)
+ return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
@@ -1263,8 +1296,11 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
- aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
+ ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
+ if (ret < 0)
+ return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
@@ -2870,7 +2906,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
return -1;
}
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
@@ -2883,7 +2919,8 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -2912,7 +2949,7 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
}
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
@@ -2927,7 +2964,8 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -2957,7 +2995,7 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
return byte_count;
}
-static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
{
unsigned long byte_count = 0;
int nseg;
@@ -2972,7 +3010,8 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
psg->sg[0].flags = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -3005,13 +3044,15 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
return byte_count;
}
-static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max)
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max)
{
unsigned long byte_count = 0;
int nseg;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i, conformable = 0;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9e933a88a8b..742f5d7eb0f 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 29800
+# define AAC_DRIVER_BUILD 29801
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a50b6a9030e..f1733dfa3ae 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
-
+#define BE_GEN4 4
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -84,9 +84,12 @@ static inline void queue_tail_inc(struct be_queue_info *q)
/*ISCSI */
struct be_eq_obj {
+ bool todo_mcc_cq;
+ bool todo_cq;
struct be_queue_info q;
struct beiscsi_hba *phba;
struct be_queue_info *cq;
+ struct work_struct work_cqs; /* Work Item */
struct blk_iopoll iopoll;
};
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 07d2cb126d9..5c87768c109 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -56,7 +56,7 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
writel(pconline0, (void *)pci_online0_offset);
writel(pconline1, (void *)pci_online1_offset);
- sreset = BE2_SET_RESET;
+ sreset |= BE2_SET_RESET;
writel(sreset, (void *)pci_reset_offset);
i = 0;
@@ -133,6 +133,87 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
return tag;
}
+/*
+ * beiscsi_mccq_compl()- Wait for completion of MBX
+ * @phba: Driver private structure
+ * @tag: Tag for the MBX Command
+ * @wrb: the WRB used for the MBX Command
+ * @cmd_hdr: IOCTL Hdr for the MBX Cmd
+ *
+ * Waits for MBX completion with the passed TAG.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ void *cmd_hdr)
+{
+ int rc = 0;
+ uint32_t mcc_tag_response;
+ uint16_t status = 0, addl_status = 0, wrb_num = 0;
+ struct be_mcc_wrb *temp_wrb;
+ struct be_cmd_req_hdr *ioctl_hdr;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ if (beiscsi_error(phba))
+ return -EIO;
+
+ /* wait for the mccq completion */
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
+
+ if (rc <= 0) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Completion timed out\n");
+ rc = -EAGAIN;
+ goto release_mcc_tag;
+ } else
+ rc = 0;
+
+ mcc_tag_response = phba->ctrl.mcc_numtag[tag];
+ status = (mcc_tag_response & CQE_STATUS_MASK);
+ addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
+ CQE_STATUS_ADDL_SHIFT);
+
+ if (cmd_hdr) {
+ ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
+ } else {
+ wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
+ CQE_STATUS_WRB_SHIFT;
+ temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
+ ioctl_hdr = embedded_payload(temp_wrb);
+
+ if (wrb)
+ *wrb = temp_wrb;
+ }
+
+ if (status || addl_status) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Failed for "
+ "Subsys : %d Opcode : %d with "
+ "Status : %d and Extd_Status : %d\n",
+ ioctl_hdr->subsystem,
+ ioctl_hdr->opcode,
+ status, addl_status);
+ rc = -EAGAIN;
+ }
+
+release_mcc_tag:
+ /* Release the MCC entry */
+ free_mcc_tag(&phba->ctrl, tag);
+
+ return rc;
+}
+
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
{
spin_lock(&ctrl->mbox_lock);
@@ -168,11 +249,24 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
compl->flags = 0;
}
+/*
+ * be_mcc_compl_process()- Check the MBX comapletion status
+ * @ctrl: Function specific MBX data structure
+ * @compl: Completion status of MBX Command
+ *
+ * Check for the MBX completion status when BMBX method used
+ *
+ * return
+ * Success: Zero
+ * Failure: Non-Zero
+ **/
static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
be_dws_le_to_cpu(compl, 4);
@@ -184,7 +278,10 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
+ "BC_%d : error in cmd completion: "
+ "Subsystem : %d Opcode : %d "
+ "status(compl/extd)=%d/%d\n",
+ hdr->subsystem, hdr->opcode,
compl_status, extd_status);
return -EBUSY;
@@ -314,11 +411,24 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
return status;
}
-/* Wait till no more pending mcc requests are present */
+/*
+ * be_mcc_wait_compl()- Wait for MBX completion
+ * @phba: driver private structure
+ *
+ * Wait till no more pending mcc requests are present
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ *
+ **/
static int be_mcc_wait_compl(struct beiscsi_hba *phba)
{
int i, status;
for (i = 0; i < mcc_timeout; i++) {
+ if (beiscsi_error(phba))
+ return -EIO;
+
status = beiscsi_process_mcc(phba);
if (status)
return status;
@@ -330,51 +440,83 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
if (i == mcc_timeout) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : mccq poll timed out\n");
-
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
return -EBUSY;
}
return 0;
}
-/* Notify MCC requests and wait for completion */
+/*
+ * be_mcc_notify_wait()- Notify and wait for Compl
+ * @phba: driver private structure
+ *
+ * Notify MCC requests and wait for completion
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
int be_mcc_notify_wait(struct beiscsi_hba *phba)
{
be_mcc_notify(phba);
return be_mcc_wait_compl(phba);
}
+/*
+ * be_mbox_db_ready_wait()- Check ready status
+ * @ctrl: Function specific MBX data structure
+ *
+ * Check for the ready status of FW to send BMBX
+ * commands to adapter.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{
-#define long_delay 2000
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
- int cnt = 0, wait = 5; /* in usecs */
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ int wait = 0;
u32 ready;
do {
+
+ if (beiscsi_error(phba))
+ return -EIO;
+
ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
- if (cnt > 12000000) {
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : mbox_db poll timed out\n");
-
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
return -EBUSY;
}
- if (cnt > 50) {
- wait = long_delay;
- mdelay(long_delay / 1000);
- } else
- udelay(wait);
- cnt += wait;
+ mdelay(1);
+ wait++;
} while (true);
return 0;
}
+/*
+ * be_mbox_notify: Notify adapter of new BMBX command
+ * @ctrl: Function specific MBX data structure
+ *
+ * Ring doorbell to inform adapter of a BMBX command
+ * to process
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
int be_mbox_notify(struct be_ctrl_info *ctrl)
{
int status;
@@ -391,13 +533,9 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : be_mbox_db_ready_wait failed\n");
-
+ if (status)
return status;
- }
+
val = 0;
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val &= ~MPU_MAILBOX_DB_HI_MASK;
@@ -405,13 +543,9 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : be_mbox_db_ready_wait failed\n");
-
+ if (status)
return status;
- }
+
if (be_mcc_compl_is_new(compl)) {
status = be_mcc_compl_process(ctrl, &mbox->compl);
be_mcc_compl_use(compl);
@@ -499,7 +633,7 @@ void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
- req_hdr->timeout = 120;
+ req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@@ -649,18 +783,34 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (chip_skh_r(ctrl->pdev)) {
+ req->hdr.version = MBX_CMD_VER2;
+ req->page_size = 1;
+ AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_cq_context, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
+ PCI_FUNC(ctrl->pdev->devfn));
+ }
- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
- __ilog2_u32(cq->len / 256));
- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
- PCI_FUNC(ctrl->pdev->devfn));
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 2c8f98df128..23397d51ac5 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -57,6 +57,16 @@ struct be_mcc_wrb {
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
#define CQE_STATUS_EXTD_MASK 0xFFFF
#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
+#define CQE_STATUS_ADDL_MASK 0xFF00
+#define CQE_STATUS_MASK 0xFF
+#define CQE_STATUS_ADDL_SHIFT 0x08
+#define CQE_STATUS_WRB_MASK 0xFF0000
+#define CQE_STATUS_WRB_SHIFT 16
+#define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000)
+#define BEISCSI_FW_MBX_TIMEOUT 100
+
+/* MBOX Command VER */
+#define MBX_CMD_VER2 0x02
struct be_mcc_compl {
u32 status; /* dword 0 */
@@ -183,7 +193,8 @@ struct be_cmd_req_hdr {
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
- u32 rsvd0; /* dword 3 */
+ u8 version; /* dword 3 */
+ u8 rsvd0[3]; /* dword 3 */
};
struct be_cmd_resp_hdr {
@@ -483,10 +494,28 @@ struct amap_cq_context {
u8 rsvd5[32]; /* dword 3 */
} __packed;
+struct amap_cq_context_v2 {
+ u8 rsvd0[12]; /* dword 0 */
+ u8 coalescwm[2]; /* dword 0 */
+ u8 nodelay; /* dword 0 */
+ u8 rsvd1[12]; /* dword 0 */
+ u8 count[2]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 rsvd2; /* dword 0 */
+ u8 eventable; /* dword 0 */
+ u8 eqid[16]; /* dword 1 */
+ u8 rsvd3[15]; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 cqecount[16];/* dword 2 */
+ u8 rsvd4[16]; /* dword 2 */
+ u8 rsvd5[32]; /* dword 3 */
+};
+
struct be_cmd_req_cq_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
- u16 rsvd0;
+ u8 page_size;
+ u8 rsvd0;
u8 context[sizeof(struct amap_cq_context) / 8];
struct phys_addr pages[4];
} __packed;
@@ -663,6 +692,9 @@ unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
@@ -804,6 +836,59 @@ struct amap_sol_cqe_ring {
u8 valid; /* dword 3 */
} __packed;
+struct amap_sol_cqe_v2 {
+ u8 hw_sts[8]; /* dword 0 */
+ u8 i_sts[8]; /* dword 0 */
+ u8 wrb_index[16]; /* dword 0 */
+ u8 i_exp_cmd_sn[32]; /* dword 1 */
+ u8 code[6]; /* dword 2 */
+ u8 cmd_cmpl; /* dword 2 */
+ u8 rsvd0; /* dword 2 */
+ u8 i_cmd_wnd[8]; /* dword 2 */
+ u8 cid[13]; /* dword 2 */
+ u8 u; /* dword 2 */
+ u8 o; /* dword 2 */
+ u8 s; /* dword 2 */
+ u8 i_res_cnt[31]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct common_sol_cqe {
+ u32 exp_cmdsn;
+ u32 res_cnt;
+ u16 wrb_index;
+ u16 cid;
+ u8 hw_sts;
+ u8 cmd_wnd;
+ u8 res_flag; /* the s feild of structure */
+ u8 i_resp; /* for skh if cmd_complete is set then i_sts is response */
+ u8 i_flags; /* for skh or the u and o feilds */
+ u8 i_sts; /* for skh if cmd_complete is not-set then i_sts is status */
+};
+
+/*** iSCSI ack/driver message completions ***/
+struct amap_it_dmsg_cqe {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 cid[10]; /* DWORD 2 */
+ u8 wrb_idx[8]; /* DWORD 2 */
+ u8 rsvd0[8]; /* DWORD 2*/
+ u8 rsvd1[31]; /* DWORD 3*/
+ u8 valid; /* DWORD 3 */
+} __packed;
+
+struct amap_it_dmsg_cqe_v2 {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 rsvd0[10]; /* DWORD 2 */
+ u8 wrb_idx[16]; /* DWORD 2 */
+ u8 rsvd1[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd2[2]; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
/**
@@ -992,8 +1077,6 @@ struct be_cmd_get_all_if_id_req {
#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
* sequence number by driver */
-/* Returns byte size of given field with a structure. */
-
/* Returns the number of items in the field array. */
#define BE_NUMBER_OF_FIELD(_type_, _field_) \
(FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index aedb0d9a9da..214d691adb5 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -531,9 +531,9 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
if (!if_info.dhcp_state)
- len = sprintf(buf, "static");
+ len = sprintf(buf, "static\n");
else
- len = sprintf(buf, "dhcp");
+ len = sprintf(buf, "dhcp\n");
break;
case ISCSI_NET_PARAM_IPV4_SUBNET:
len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
@@ -541,7 +541,7 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n",
(if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
- ? "Disabled" : "Enabled");
+ ? "Disabled\n" : "Enabled\n");
break;
case ISCSI_NET_PARAM_VLAN_ID:
if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
@@ -586,7 +586,7 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
len = be2iscsi_get_if_param(phba, iface, param, buf);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
- len = sprintf(buf, "enabled");
+ len = sprintf(buf, "enabled\n");
break;
case ISCSI_NET_PARAM_IPV4_GW:
memset(&gateway, 0, sizeof(gateway));
@@ -690,11 +690,9 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
{
int rc;
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_hba_name *resp;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
tag = be_cmd_get_initname(phba);
if (!tag) {
@@ -702,26 +700,16 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
"BS_%d : Getting Initiator Name Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ }
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+ "BS_%d : Initiator Name MBX Failed\n");
+ return rc;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
resp = embedded_payload(wrb);
rc = sprintf(buf, "%s\n", resp->initiator_name);
return rc;
@@ -731,7 +719,6 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
* beiscsi_get_port_state - Get the Port State
* @shost : pointer to scsi_host structure
*
- * returns number of bytes
*/
static void beiscsi_get_port_state(struct Scsi_Host *shost)
{
@@ -750,13 +737,12 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
*/
static int beiscsi_get_port_speed(struct Scsi_Host *shost)
{
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ int rc;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_ntwk_link_status_resp *resp;
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
tag = be_cmd_get_port_speed(phba);
if (!tag) {
@@ -764,26 +750,14 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
"BS_%d : Getting Port Speed Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
-
- if (status || extd_status) {
+ }
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+ "BS_%d : Port Speed MBX Failed\n");
+ return rc;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
resp = embedded_payload(wrb);
switch (resp->mac_speed) {
@@ -937,6 +911,14 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
session->initial_r2t_en);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params,
session->imm_data_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ data_seq_inorder, params,
+ session->dataseq_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder, params,
+ session->pdu_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_r2t, params,
+ session->max_r2t);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
(conn->exp_statsn - 1));
}
@@ -1027,12 +1009,10 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
struct be_mcc_wrb *wrb;
struct tcp_connect_and_offload_out *ptcpcnct_out;
- unsigned short status, extd_status;
struct be_dma_mem nonemb_cmd;
- unsigned int tag, wrb_num;
+ unsigned int tag;
int ret = -ENOMEM;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
@@ -1084,35 +1064,26 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
}
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+
+ ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : mgmt_open_connection Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
+ "BS_%d : mgmt_open_connection Failed");
- free_mcc_tag(&phba->ctrl, tag);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
goto free_ep;
- } else {
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
-
- ptcpcnct_out = embedded_payload(wrb);
- beiscsi_ep = ep->dd_data;
- beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
- beiscsi_ep->cid_vld = 1;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : mgmt_open_connection Success\n");
}
+
+ ptcpcnct_out = embedded_payload(wrb);
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
+ beiscsi_ep->cid_vld = 1;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_open_connection Success\n");
+
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
@@ -1150,8 +1121,8 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : The Adapter state is Not UP\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BS_%d : The Adapter Port state is Down!!!\n");
return ERR_PTR(ret);
}
@@ -1216,11 +1187,9 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
beiscsi_ep->ep_cid);
ret = -EAGAIN;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
return ret;
}
@@ -1281,12 +1250,9 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+ beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 8b826fc06bc..38eab723215 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index ff73f9500b0..48d37dded8f 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -47,8 +47,6 @@
static unsigned int be_iopoll_budget = 10;
static unsigned int be_max_phys_size = 64;
static unsigned int enable_msix = 1;
-static unsigned int gcrashmode = 0;
-static unsigned int num_hba = 0;
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -153,11 +151,54 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
"\t\t\t\tIO Path Events : 0x10\n"
"\t\t\t\tConfiguration Path : 0x20\n");
+DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
+ &dev_attr_beiscsi_drvr_ver,
+ &dev_attr_beiscsi_adapter_family,
NULL,
};
+static char const *cqe_desc[] = {
+ "RESERVED_DESC",
+ "SOL_CMD_COMPLETE",
+ "SOL_CMD_KILLED_DATA_DIGEST_ERR",
+ "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
+ "CXN_KILLED_BURST_LEN_MISMATCH",
+ "CXN_KILLED_AHS_RCVD",
+ "CXN_KILLED_HDR_DIGEST_ERR",
+ "CXN_KILLED_UNKNOWN_HDR",
+ "CXN_KILLED_STALE_ITT_TTT_RCVD",
+ "CXN_KILLED_INVALID_ITT_TTT_RCVD",
+ "CXN_KILLED_RST_RCVD",
+ "CXN_KILLED_TIMED_OUT",
+ "CXN_KILLED_RST_SENT",
+ "CXN_KILLED_FIN_RCVD",
+ "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
+ "CXN_KILLED_BAD_WRB_INDEX_ERROR",
+ "CXN_KILLED_OVER_RUN_RESIDUAL",
+ "CXN_KILLED_UNDER_RUN_RESIDUAL",
+ "CMD_KILLED_INVALID_STATSN_RCVD",
+ "CMD_KILLED_INVALID_R2T_RCVD",
+ "CMD_CXN_KILLED_LUN_INVALID",
+ "CMD_CXN_KILLED_ICD_INVALID",
+ "CMD_CXN_KILLED_ITT_INVALID",
+ "CMD_CXN_KILLED_SEQ_OUTOFORDER",
+ "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
+ "CXN_INVALIDATE_NOTIFY",
+ "CXN_INVALIDATE_INDEX_NOTIFY",
+ "CMD_INVALIDATED_NOTIFY",
+ "UNSOL_HDR_NOTIFY",
+ "UNSOL_DATA_NOTIFY",
+ "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
+ "DRIVERMSG_NOTIFY",
+ "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
+ "SOL_CMD_KILLED_DIF_ERR",
+ "CXN_KILLED_SYN_RCVD",
+ "CXN_KILLED_IMM_DATA_RCVD"
+};
+
static int beiscsi_slave_configure(struct scsi_device *sdev)
{
blk_queue_max_segment_size(sdev->request_queue, 65536);
@@ -226,11 +267,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
nonemb_cmd.va, nonemb_cmd.dma);
return FAILED;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_abort(sc);
@@ -301,11 +340,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return FAILED;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_device_reset(sc);
@@ -482,6 +519,7 @@ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
+ { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
@@ -730,7 +768,7 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) == mcc->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
}
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
@@ -738,8 +776,8 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if (num_eq_processed)
hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
@@ -779,29 +817,26 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
-
- return IRQ_HANDLED;
} else {
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
& EQE_VALID_MASK) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 1;
+ pbe_eq->todo_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
queue_tail_inc(eq);
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (phba->todo_cq)
- queue_work(phba->wq, &phba->work_cqs);
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
-
- return IRQ_HANDLED;
+ if (pbe_eq->todo_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
}
+
+ if (num_eq_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+
+ return IRQ_HANDLED;
}
/**
@@ -849,7 +884,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) == mcc->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
num_mcceq_processed++;
} else {
@@ -862,8 +897,8 @@ static irqreturn_t be_isr(int irq, void *dev_id)
eqe = queue_tail_node(eq);
}
if (num_ioeq_processed || num_mcceq_processed) {
- if (phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if ((num_mcceq_processed) && (!num_ioeq_processed))
hwi_ring_eq_db(phba, eq->id, 0,
@@ -886,11 +921,11 @@ static irqreturn_t be_isr(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) != cq->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
} else {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 1;
+ pbe_eq->todo_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
}
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
@@ -898,8 +933,8 @@ static irqreturn_t be_isr(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_ioeq_processed++;
}
- if (phba->todo_cq || phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if (num_ioeq_processed) {
hwi_ring_eq_db(phba, eq->id, 0,
@@ -1211,7 +1246,8 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
static void
be_complete_io(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct be_status_bhs *sts_bhs =
@@ -1221,20 +1257,14 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
u32 resid = 0, exp_cmdsn, max_cmdsn;
u8 rsp, status, flags;
- exp_cmdsn = (psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK);
- max_cmdsn = ((psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
- rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
- & SOL_RESP_MASK) >> 16);
- status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
- & SOL_STS_MASK) >> 8);
- flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
+ exp_cmdsn = csol_cqe->exp_cmdsn;
+ max_cmdsn = (csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+ rsp = csol_cqe->i_resp;
+ status = csol_cqe->i_sts;
+ flags = csol_cqe->i_flags;
+ resid = csol_cqe->res_cnt;
+
if (!task->sc) {
if (io_task->scsi_cmnd)
scsi_dma_unmap(io_task->scsi_cmnd);
@@ -1249,9 +1279,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
/* bidi not initially supported */
if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
- resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
- 32] & SOL_RES_CNT_MASK);
-
if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
task->sc->result = DID_ERROR << 16;
@@ -1273,13 +1300,8 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
}
- if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
- if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
- & SOL_RES_CNT_MASK)
- conn->rxdata_octets += (psol->
- dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
- & SOL_RES_CNT_MASK);
- }
+ if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
+ conn->rxdata_octets += resid;
unmap:
scsi_dma_unmap(io_task->scsi_cmnd);
iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
@@ -1287,7 +1309,8 @@ unmap:
static void
be_complete_logout(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_logout_rsp *hdr;
struct beiscsi_io_task *io_task = task->dd_data;
@@ -1297,18 +1320,11 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
hdr->opcode = ISCSI_OP_LOGOUT_RSP;
hdr->t2wait = 5;
hdr->t2retain = 0;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
- 32] & SOL_RESP_MASK);
- hdr->exp_cmdsn = cpu_to_be32(psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
+ hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+
hdr->dlength[0] = 0;
hdr->dlength[1] = 0;
hdr->dlength[2] = 0;
@@ -1319,7 +1335,8 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
static void
be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_tm_rsp *hdr;
struct iscsi_conn *conn = beiscsi_conn->conn;
@@ -1327,16 +1344,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
hdr = (struct iscsi_tm_rsp *)task->hdr;
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
- 32] & SOL_RESP_MASK);
- hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
+ hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
}
@@ -1352,15 +1365,24 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_io_task *io_task;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
+ uint16_t wrb_index, cid;
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[((psol->
- dw[offsetof(struct amap_sol_cqe, cid) / 32] &
- SOL_CID_MASK) >> 6) -
- phba->fw_config.iscsi_cid_start];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
- dw[offsetof(struct amap_sol_cqe, wrb_index) /
- 32] & SOL_WRB_INDEX_MASK) >> 16)];
+ if (chip_skh_r(phba->pcidev)) {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ cid, psol);
+ } else {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ cid, psol);
+ }
+
+ pwrb_context = &phwi_ctrlr->wrb_context[
+ cid - phba->fw_config.iscsi_cid_start];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
task = pwrb_handle->pio_handle;
io_task = task->dd_data;
@@ -1374,26 +1396,78 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
static void
be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_nopin *hdr;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct beiscsi_io_task *io_task = task->dd_data;
hdr = (struct iscsi_nopin *)task->hdr;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+ hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
hdr->opcode = ISCSI_OP_NOOP_IN;
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
}
+static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
+ struct sol_cqe *psol,
+ struct common_sol_cqe *csol_cqe)
+{
+ if (chip_skh_r(phba->pcidev)) {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_res_cnt, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ hw_sts, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_cmd_wnd, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cmd_cmpl, psol))
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ else
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ u, psol))
+ csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
+
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ o, psol))
+ csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
+ } else {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_res_cnt, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_cmd_wnd, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ hw_sts, psol);
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_resp, psol);
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_sts, psol);
+ csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_flags, psol);
+ }
+}
+
+
static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_hba *phba, struct sol_cqe *psol)
{
@@ -1405,19 +1479,22 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
unsigned int type;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
+ struct common_sol_cqe csol_cqe = {0};
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
- (struct amap_sol_cqe, cid) / 32]
- & SOL_CID_MASK) >> 6) -
- phba->fw_config.iscsi_cid_start];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
- dw[offsetof(struct amap_sol_cqe, wrb_index) /
- 32] & SOL_WRB_INDEX_MASK) >> 16)];
+
+ /* Copy the elements to a common structure */
+ adapter_get_sol_cqe(phba, psol, &csol_cqe);
+
+ pwrb_context = &phwi_ctrlr->wrb_context[
+ csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[
+ csol_cqe.wrb_index];
+
task = pwrb_handle->pio_handle;
pwrb = pwrb_handle->pwrb;
- type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
- WRB_TYPE_MASK) >> 28;
+ type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
spin_lock_bh(&session->lock);
switch (type) {
@@ -1425,17 +1502,16 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
case HWH_TYPE_IO_RD:
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
ISCSI_OP_NOOP_OUT)
- be_complete_nopin_resp(beiscsi_conn, task, psol);
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
else
- be_complete_io(beiscsi_conn, task, psol);
+ be_complete_io(beiscsi_conn, task, &csol_cqe);
break;
case HWH_TYPE_LOGOUT:
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
- be_complete_logout(beiscsi_conn, task, psol);
+ be_complete_logout(beiscsi_conn, task, &csol_cqe);
else
- be_complete_tmf(beiscsi_conn, task, psol);
-
+ be_complete_tmf(beiscsi_conn, task, &csol_cqe);
break;
case HWH_TYPE_LOGIN:
@@ -1446,7 +1522,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
case HWH_TYPE_NOP:
- be_complete_nopin_resp(beiscsi_conn, task, psol);
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
break;
default:
@@ -1454,10 +1530,8 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d : In hwi_complete_cmd, unknown type = %d"
"wrb_index 0x%x CID 0x%x\n", type,
- ((psol->dw[offsetof(struct amap_iscsi_wrb,
- type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
- ((psol->dw[offsetof(struct amap_sol_cqe,
- cid) / 32] & SOL_CID_MASK) >> 6));
+ csol_cqe.wrb_index,
+ csol_cqe.cid);
break;
}
@@ -1485,13 +1559,26 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
struct list_head *pbusy_list;
struct async_pdu_handle *pasync_handle = NULL;
unsigned char is_header = 0;
+ unsigned int index, dpl;
+
+ if (chip_skh_r(phba->pcidev)) {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ index, pdpdu_cqe);
+ } else {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ index, pdpdu_cqe);
+ }
phys_addr.u.a32.address_lo =
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
- ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
- & PDUCQE_DPL_MASK) >> 16);
+ (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_lo) / 32] - dpl);
phys_addr.u.a32.address_hi =
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_hi) / 32];
phys_addr.u.a64.address =
*((unsigned long long *)(&phys_addr.u.a64.address));
@@ -1501,14 +1588,12 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
case UNSOL_HDR_NOTIFY:
is_header = 1;
- pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
- (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK));
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
break;
case UNSOL_DATA_NOTIFY:
- pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
- dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK));
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
break;
default:
pbusy_list = NULL;
@@ -1531,12 +1616,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start;
pasync_handle->is_header = is_header;
- pasync_handle->buffer_len = ((pdpdu_cqe->
- dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
- & PDUCQE_DPL_MASK) >> 16);
+ pasync_handle->buffer_len = dpl;
+ *pcq_index = index;
- *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK);
return pasync_handle;
}
@@ -1914,6 +1996,13 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
}
+/**
+ * beiscsi_process_cq()- Process the Completion Queue
+ * @pbe_eq: Event Q on which the Completion has come
+ *
+ * return
+ * Number of Completion Entries processed.
+ **/
static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
{
struct be_queue_info *cq;
@@ -1935,12 +2024,24 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
CQE_VALID_MASK) {
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
- CQE_CID_MASK) >> 6);
- code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
- CQE_CODE_MASK);
- ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK);
+
+ /* Get the CID */
+ if (chip_skh_r(phba->pcidev)) {
+ if ((code == DRIVERMSG_NOTIFY) ||
+ (code == UNSOL_HDR_NOTIFY) ||
+ (code == UNSOL_DATA_NOTIFY))
+ cid = AMAP_GET_BITS(
+ struct amap_i_t_dpdu_cqe_v2,
+ cid, sol);
+ else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, sol);
+ } else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+ ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
@@ -1958,7 +2059,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case DRIVERMSG_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Received DRIVERMSG_NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
@@ -1966,7 +2068,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case UNSOL_HDR_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Received UNSOL_HDR_ NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
@@ -1974,7 +2077,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case UNSOL_DATA_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : Received UNSOL_DATA_NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
@@ -1984,8 +2088,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_INVALIDATE_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Ignoring CQ Error notification for"
- " cmd/cxn invalidate\n");
+ "BM_%d : Ignoring %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
break;
case SOL_CMD_KILLED_DATA_DIGEST_ERR:
case CMD_KILLED_INVALID_STATSN_RCVD:
@@ -1997,14 +2101,14 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : CQ Error notification for cmd.. "
- "code %d cid 0x%x\n", code, cid);
+ "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Digest error on def pdu ring,"
- " dropping..\n");
+ "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
(struct i_t_dpdu_cqe *) sol);
break;
@@ -2017,6 +2121,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_INVALID_ITT_TTT_RCVD:
case CXN_KILLED_TIMED_OUT:
case CXN_KILLED_FIN_RCVD:
+ case CXN_KILLED_RST_SENT:
+ case CXN_KILLED_RST_RCVD:
case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
case CXN_KILLED_BAD_WRB_INDEX_ERROR:
case CXN_KILLED_OVER_RUN_RESIDUAL:
@@ -2024,19 +2130,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error %d, reset CID 0x%x...\n",
- code, cid);
- if (beiscsi_conn)
- iscsi_conn_failure(beiscsi_conn->conn,
- ISCSI_ERR_CONN_FAILED);
- break;
- case CXN_KILLED_RST_SENT:
- case CXN_KILLED_RST_RCVD:
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error %d, reset"
- "received/sent on CID 0x%x...\n",
- code, cid);
+ "BM_%d : Event %s[%d] received on CID : %d\n",
+ cqe_desc[code], code, cid);
if (beiscsi_conn)
iscsi_conn_failure(beiscsi_conn->conn,
ISCSI_ERR_CONN_FAILED);
@@ -2044,8 +2139,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error Invalid code= %d "
- "received on CID 0x%x...\n",
+ "BM_%d : Invalid CQE Event Received Code : %d"
+ "CID 0x%x...\n",
code, cid);
break;
}
@@ -2068,30 +2163,30 @@ void beiscsi_process_all_cqs(struct work_struct *work)
unsigned long flags;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
- struct be_eq_obj *pbe_eq;
- struct beiscsi_hba *phba =
- container_of(work, struct beiscsi_hba, work_cqs);
+ struct beiscsi_hba *phba;
+ struct be_eq_obj *pbe_eq =
+ container_of(work, struct be_eq_obj, work_cqs);
+ phba = pbe_eq->phba;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (phba->msix_enabled)
- pbe_eq = &phwi_context->be_eq[phba->num_cpus];
- else
- pbe_eq = &phwi_context->be_eq[0];
- if (phba->todo_mcc_cq) {
+ if (pbe_eq->todo_mcc_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 0;
+ pbe_eq->todo_mcc_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
beiscsi_process_mcc_isr(phba);
}
- if (phba->todo_cq) {
+ if (pbe_eq->todo_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 0;
+ pbe_eq->todo_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
beiscsi_process_cq(pbe_eq);
}
+
+ /* rearm EQ for further interrupts */
+ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
}
static int be_iopoll(struct blk_iopoll *iop, int budget)
@@ -2115,6 +2210,101 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
}
static void
+hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
+ unsigned int num_sg, struct beiscsi_io_task *io_task)
+{
+ struct iscsi_sge *psgl;
+ unsigned int sg_len, index;
+ unsigned int sge_len = 0;
+ unsigned long long addr;
+ struct scatterlist *l_sg;
+ unsigned int offset;
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ l_sg = sg;
+ for (index = 0; (index < num_sg) && (index < 2); index++,
+ sg = sg_next(sg)) {
+ if (index == 0) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_len, pwrb,
+ sg_len);
+ sge_len = sg_len;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
+ pwrb, sge_len);
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_len, pwrb,
+ sg_len);
+ }
+ }
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+ memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+
+ if (num_sg == 1) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ } else if (num_sg == 2) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ }
+
+ sg = l_sg;
+ psgl++;
+ psgl++;
+ offset = 0;
+ for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+ offset += sg_len;
+ }
+ psgl--;
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+static void
hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
unsigned int num_sg, struct beiscsi_io_task *io_task)
{
@@ -2202,13 +2392,18 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
}
+/**
+ * hwi_write_buffer()- Populate the WRB with task info
+ * @pwrb: ptr to the WRB entry
+ * @task: iscsi task which is to be executed
+ **/
static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
{
struct iscsi_sge *psgl;
- unsigned long long addr;
struct beiscsi_io_task *io_task = task->dd_data;
struct beiscsi_conn *beiscsi_conn = io_task->conn;
struct beiscsi_hba *phba = beiscsi_conn->phba;
+ uint8_t dsp_value = 0;
io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
@@ -2217,26 +2412,38 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
io_task->bhs_pa.u.a32.address_hi);
if (task->data) {
- if (task->data_count) {
- AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
- addr = (u64) pci_map_single(phba->pcidev,
- task->data,
- task->data_count, 1);
- } else {
- AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
- addr = 0;
- }
+
+ /* Check for the data_count */
+ dsp_value = (task->data_count) ? 1 : 0;
+
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+ pwrb, dsp_value);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+ pwrb, dsp_value);
+
+ /* Map addr only if there is data_count */
+ if (dsp_value) {
+ io_task->mtask_addr = pci_map_single(phba->pcidev,
+ task->data,
+ task->data_count,
+ PCI_DMA_TODEVICE);
+ io_task->mtask_data_count = task->data_count;
+ } else
+ io_task->mtask_addr = 0;
+
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
- ((u32)(addr & 0xFFFFFFFF)));
+ lower_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
- ((u32)(addr >> 32)));
+ upper_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
task->data_count);
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
} else {
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
- addr = 0;
+ io_task->mtask_addr = 0;
}
psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
@@ -2259,9 +2466,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
psgl++;
if (task->data) {
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
- ((u32)(addr & 0xFFFFFFFF)));
+ lower_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
- ((u32)(addr >> 32)));
+ upper_32_bits(io_task->mtask_addr));
}
AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
}
@@ -2843,7 +3050,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
}
return 0;
create_eq_error:
- for (i = 0; i < (phba->num_cpus + 1); i++) {
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
if (mem->va)
@@ -3268,15 +3475,31 @@ err:
return -ENOMEM;
}
-static int find_num_cpus(void)
+/**
+ * find_num_cpus()- Get the CPU online count
+ * @phba: ptr to priv structure
+ *
+ * CPU count is used for creating EQ.
+ **/
+static void find_num_cpus(struct beiscsi_hba *phba)
{
int num_cpus = 0;
num_cpus = num_online_cpus();
- if (num_cpus >= MAX_CPUS)
- num_cpus = MAX_CPUS - 1;
- return num_cpus;
+ switch (phba->generation) {
+ case BE_GEN2:
+ case BE_GEN3:
+ phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
+ BEISCSI_MAX_NUM_CPUS : num_cpus;
+ break;
+ case BE_GEN4:
+ phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
+ OC_SKH_MAX_NUM_CPUS : num_cpus;
+ break;
+ default:
+ phba->num_cpus = 1;
+ }
}
static int hwi_init_port(struct beiscsi_hba *phba)
@@ -3644,12 +3867,9 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
{
struct be_cmd_get_session_resp *session_resp;
- struct be_mcc_wrb *wrb;
struct be_dma_mem nonemb_cmd;
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ unsigned int tag;
unsigned int s_handle;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
int ret = -ENOMEM;
/* Get the session handle of the boot target */
@@ -3682,25 +3902,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
" Failed\n");
goto boot_freemem;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
+ }
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+ if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BM_%d : beiscsi_get_session_info Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
+ "BM_%d : beiscsi_get_session_info Failed");
goto boot_freemem;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
session_resp = nonemb_cmd.va ;
memcpy(&phba->boot_sess, &session_resp->session_info,
@@ -3853,6 +4064,11 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
kfree(phba->ep_array);
}
+/**
+ * beiscsi_cleanup_task()- Free driver resources of the task
+ * @task: ptr to the iscsi task
+ *
+ **/
static void beiscsi_cleanup_task(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
@@ -3900,6 +4116,13 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
spin_unlock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = NULL;
}
+ if (io_task->mtask_addr) {
+ pci_unmap_single(phba->pcidev,
+ io_task->mtask_addr,
+ io_task->mtask_data_count,
+ PCI_DMA_TODEVICE);
+ io_task->mtask_addr = 0;
+ }
}
}
}
@@ -3909,8 +4132,6 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params)
{
struct wrb_handle *pwrb_handle;
- struct iscsi_target_context_update_wrb *pwrb = NULL;
- struct be_mem_descriptor *mem_descr;
struct beiscsi_hba *phba = beiscsi_conn->phba;
struct iscsi_task *task = beiscsi_conn->task;
struct iscsi_session *session = task->conn->session;
@@ -3927,67 +4148,16 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start));
- pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
- memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_burst_length, pwrb, params->dw[offsetof
- (struct amap_beiscsi_offload_params,
- max_burst_length) / 32]);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_send_data_segment_length, pwrb,
- params->dw[offsetof(struct amap_beiscsi_offload_params,
- max_send_data_segment_length) / 32]);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- first_burst_length,
- pwrb,
- params->dw[offsetof(struct amap_beiscsi_offload_params,
- first_burst_length) / 32]);
-
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- erl) / 32] & OFFLD_PARAMS_ERL));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
- pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- exp_statsn) / 32] + 1));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
- 0x7);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
- pwrb, pwrb_handle->wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
- pwrb, pwrb_handle->nxt_wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- session_state, pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
- pwrb, 1);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
- pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
- 0);
- mem_descr = phba->init_mem;
- mem_descr += ISCSI_MEM_GLOBAL_HEADER;
-
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- pad_buffer_addr_hi, pwrb,
- mem_descr->mem_array[0].bus_address.u.a32.address_hi);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- pad_buffer_addr_lo, pwrb,
- mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+ /* Check for the adapter family */
+ if (chip_skh_r(phba->pcidev))
+ beiscsi_offload_cxn_v2(params, pwrb_handle);
+ else
+ beiscsi_offload_cxn_v0(params, pwrb_handle,
+ phba->init_mem);
- be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
+ be_dws_le_to_cpu(pwrb_handle->pwrb,
+ sizeof(struct iscsi_target_context_update_wrb));
doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
@@ -4044,13 +4214,25 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
spin_lock(&phba->io_sgl_lock);
io_task->psgl_handle = alloc_io_sgl_handle(phba);
spin_unlock(&phba->io_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of IO_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
+ }
io_task->pwrb_handle = alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_io_hndls;
+ }
} else {
io_task->scsi_cmnd = NULL;
if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
@@ -4059,8 +4241,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->psgl_handle = (struct sgl_handle *)
alloc_mgmt_sgl_handle(phba);
spin_unlock(&phba->mgmt_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
goto free_hndls;
+ }
beiscsi_conn->login_in_progress = 1;
beiscsi_conn->plogin_sgl_handle =
@@ -4069,8 +4259,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
- goto free_io_hndls;
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
+ goto free_mgmt_hndls;
+ }
beiscsi_conn->plogin_wrb_handle =
io_task->pwrb_handle;
@@ -4085,14 +4283,28 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
spin_unlock(&phba->mgmt_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
goto free_hndls;
+ }
io_task->pwrb_handle =
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
+ }
}
}
@@ -4124,11 +4336,64 @@ free_hndls:
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
io_task->bhs_pa.u.a64.address);
io_task->cmd_bhs = NULL;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of SGL_ICD Failed\n");
return -ENOMEM;
}
+int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+ unsigned int num_sg, unsigned int xferlen,
+ unsigned int writedir)
+{
+
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+
+ pwrb = io_task->pwrb_handle->pwrb;
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
+ io_task->bhs_len = sizeof(struct be_cmd_bhs);
+
+ if (writedir) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
+ }
+
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
+ type, pwrb);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
+ cpu_to_be16(*(unsigned short *)
+ &io_task->cmd_bhs->iscsi_hdr.lun));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+
+ hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) <<
+ DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+ iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ return 0;
+}
static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
unsigned int num_sg, unsigned int xferlen,
@@ -4156,6 +4421,9 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
}
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
+ type, pwrb);
+
AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
cpu_to_be16(*(unsigned short *)
&io_task->cmd_bhs->iscsi_hdr.lun));
@@ -4191,55 +4459,75 @@ static int beiscsi_mtask(struct iscsi_task *task)
struct iscsi_wrb *pwrb = NULL;
unsigned int doorbell = 0;
unsigned int cid;
+ unsigned int pwrb_typeoffset = 0;
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
- be32_to_cpu(task->cmdsn));
- AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
- io_task->pwrb_handle->wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
- io_task->psgl_handle->sgl_index);
+
+ if (chip_skh_r(phba->pcidev)) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+ }
+
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_NOOP_OUT:
if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
- pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 1);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 1);
} else {
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_RD_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 0);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 0);
}
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_TEXT:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_TMF_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_LOGOUT:
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- HWH_TYPE_LOGOUT);
+ ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
@@ -4251,11 +4539,10 @@ static int beiscsi_mtask(struct iscsi_task *task)
return -EINVAL;
}
- AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
- task->data_count);
- AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
- io_task->pwrb_handle->nxt_wrb_index);
- be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+ /* Set the task type */
+ io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
+ AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
+ AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
doorbell |= cid & DB_WRB_POST_CID_MASK;
doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4269,10 +4556,13 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ struct beiscsi_hba *phba = NULL;
struct scatterlist *sg;
int num_sg;
unsigned int writedir = 0, xferlen = 0;
+ phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
+
if (!sc)
return beiscsi_mtask(task);
@@ -4295,7 +4585,7 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
else
writedir = 0;
- return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
+ return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
}
/**
@@ -4326,20 +4616,24 @@ static int beiscsi_bsg_request(struct bsg_job *job)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : Failed to allocate memory for "
"beiscsi_bsg_request\n");
- return -EIO;
+ return -ENOMEM;
}
tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
&nonemb_cmd);
if (!tag) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BM_%d : be_cmd_get_mac_addr Failed\n");
+ "BM_%d : MBX Tag Allocation Failed\n");
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
+ }
+
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
free_mcc_tag(&phba->ctrl, tag);
@@ -4356,11 +4650,13 @@ static int beiscsi_bsg_request(struct bsg_job *job)
nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BM_%d : be_cmd_get_mac_addr Failed"
+ "BM_%d : MBX Cmd Failed"
" status = %d extd_status = %d\n",
status, extd_status);
return -EIO;
+ } else {
+ rc = 0;
}
break;
@@ -4380,14 +4676,18 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
beiscsi_log_enable_init(phba, beiscsi_log_enable);
}
+/*
+ * beiscsi_quiesce()- Cleanup Driver resources
+ * @phba: Instance Priv structure
+ *
+ * Free the OS and HW resources held by the driver
+ **/
static void beiscsi_quiesce(struct beiscsi_hba *phba)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
unsigned int i, msix_vec;
- u8 *real_offset = 0;
- u32 value = 0;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -4411,19 +4711,14 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
- value = readl((void *)real_offset);
-
- if (value & 0x00010000) {
- value &= 0xfffeffff;
- writel(value, (void *)real_offset);
- }
beiscsi_unmap_pci_function(phba);
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
+
+ cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
}
static void beiscsi_remove(struct pci_dev *pcidev)
@@ -4476,6 +4771,25 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
return;
}
+/*
+ * beiscsi_hw_health_check()- Check adapter health
+ * @work: work item to check HW health
+ *
+ * Check if adapter in an unrecoverable state or not.
+ **/
+static void
+beiscsi_hw_health_check(struct work_struct *work)
+{
+ struct beiscsi_hba *phba =
+ container_of(work, struct beiscsi_hba,
+ beiscsi_hw_check_task.work);
+
+ beiscsi_ue_detect(phba);
+
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+}
+
static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
@@ -4483,9 +4797,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
- int ret, num_cpus, i;
- u8 *real_offset = 0;
- u32 value = 0;
+ int ret, i;
ret = beiscsi_enable_pci(pcidev);
if (ret < 0) {
@@ -4504,25 +4816,33 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
/* Initialize Driver configuration Paramters */
beiscsi_hba_attrs_init(phba);
+ phba->fw_timeout = false;
+
+
switch (pcidev->device) {
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
case OC_DEVICE_ID2:
phba->generation = BE_GEN2;
+ phba->iotask_fn = beiscsi_iotask;
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID3:
phba->generation = BE_GEN3;
+ phba->iotask_fn = beiscsi_iotask;
break;
+ case OC_SKH_ID1:
+ phba->generation = BE_GEN4;
+ phba->iotask_fn = beiscsi_iotask_v2;
default:
phba->generation = 0;
}
if (enable_msix)
- num_cpus = find_num_cpus();
+ find_num_cpus(phba);
else
- num_cpus = 1;
- phba->num_cpus = num_cpus;
+ phba->num_cpus = 1;
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : num_cpus = %d\n",
phba->num_cpus);
@@ -4540,31 +4860,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto hba_free;
}
- if (!num_hba) {
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
- value = readl((void *)real_offset);
- if (value & 0x00010000) {
- gcrashmode++;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Loading Driver in crashdump mode\n");
- ret = beiscsi_cmd_reset_function(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Reset Failed. Aborting Crashdump\n");
- goto hba_free;
- }
- ret = be_chk_reset_complete(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to get out of reset."
- "Aborting Crashdump\n");
- goto hba_free;
- }
- } else {
- value |= 0x00010000;
- writel(value, (void *)real_offset);
- num_hba++;
- }
+ ret = beiscsi_cmd_reset_function(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed. Aborting Crashdump\n");
+ goto hba_free;
+ }
+ ret = be_chk_reset_complete(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset."
+ "Aborting Crashdump\n");
+ goto hba_free;
}
spin_lock_init(&phba->io_sgl_lock);
@@ -4596,7 +4903,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
+ snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
phba->shost->host_no);
phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
if (!phba->wq) {
@@ -4606,10 +4913,12 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_twq;
}
- INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
+ INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
+ beiscsi_hw_health_check);
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
+
if (blk_iopoll_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
@@ -4617,7 +4926,25 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
be_iopoll);
blk_iopoll_enable(&pbe_eq->iopoll);
}
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+ } else {
+ if (phba->msix_enabled) {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
+ } else {
+ pbe_eq = &phwi_context->be_eq[0];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
}
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -4637,6 +4964,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
"iSCSI boot info.\n");
beiscsi_create_def_ifaces(phba);
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
@@ -4652,15 +4982,6 @@ free_twq:
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
free_port:
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
-
- value = readl((void *)real_offset);
-
- if (value & 0x00010000) {
- value &= 0xfffeffff;
- writel(value, (void *)real_offset);
- }
-
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index b8912263ef4..5946577d79d 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -36,12 +36,13 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "4.4.58.0"
+#define BUILD_STR "10.0.272.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
#define BE_VENDOR_ID 0x19A2
+#define ELX_VENDOR_ID 0x10DF
/* DEVICE ID's for BE2 */
#define BE_DEVICE_ID1 0x212
#define OC_DEVICE_ID1 0x702
@@ -51,6 +52,9 @@
#define BE_DEVICE_ID2 0x222
#define OC_DEVICE_ID3 0x712
+/* DEVICE ID for SKH */
+#define OC_SKH_ID1 0x722
+
#define BE2_IO_DEPTH 1024
#define BE2_MAX_SESSIONS 256
#define BE2_CMDS_PER_CXN 128
@@ -60,7 +64,11 @@
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
-#define MAX_CPUS 31
+#define MAX_CPUS 64
+#define BEISCSI_MAX_NUM_CPUS 7
+#define OC_SKH_MAX_NUM_CPUS 63
+
+
#define BEISCSI_SGLIST_ELEMENTS 30
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
@@ -257,6 +265,7 @@ struct invalidate_command_table {
unsigned short cid;
} __packed;
+#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1)
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
@@ -270,12 +279,11 @@ struct beiscsi_hba {
struct be_bus_address pci_pa; /* CSR */
/* PCI representation of our HBA */
struct pci_dev *pcidev;
- unsigned int state;
unsigned short asic_revision;
unsigned int num_cpus;
unsigned int nxt_cqid;
- struct msix_entry msix_entries[MAX_CPUS + 1];
- char *msi_name[MAX_CPUS + 1];
+ struct msix_entry msix_entries[MAX_CPUS];
+ char *msi_name[MAX_CPUS];
bool msix_enabled;
struct be_mem_descriptor *init_mem;
@@ -325,12 +333,14 @@ struct beiscsi_hba {
spinlock_t cid_lock;
} fw_config;
+ unsigned int state;
+ bool fw_timeout;
+ bool ue_detected;
+ struct delayed_work beiscsi_hw_check_task;
+
u8 mac_address[ETH_ALEN];
- unsigned short todo_cq;
- unsigned short todo_mcc_cq;
char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
- struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
unsigned int generation;
unsigned int interface_handle;
@@ -338,7 +348,10 @@ struct beiscsi_hba {
struct invalidate_command_table inv_tbl[128];
unsigned int attr_log_enable;
-
+ int (*iotask_fn)(struct iscsi_task *,
+ struct scatterlist *sg,
+ uint32_t num_sg, uint32_t xferlen,
+ uint32_t writedir);
};
struct beiscsi_session {
@@ -410,6 +423,9 @@ struct beiscsi_io_task {
struct be_cmd_bhs *cmd_bhs;
struct be_bus_address bhs_pa;
unsigned short bhs_len;
+ dma_addr_t mtask_addr;
+ uint32_t mtask_data_count;
+ uint8_t wrb_type;
};
struct be_nonio_bhs {
@@ -457,6 +473,9 @@ struct beiscsi_offload_params {
#define OFFLD_PARAMS_HDE 0x00000008
#define OFFLD_PARAMS_IR2T 0x00000010
#define OFFLD_PARAMS_IMD 0x00000020
+#define OFFLD_PARAMS_DATA_SEQ_INORDER 0x00000040
+#define OFFLD_PARAMS_PDU_SEQ_INORDER 0x00000080
+#define OFFLD_PARAMS_MAX_R2T 0x00FFFF00
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -471,7 +490,10 @@ struct amap_beiscsi_offload_params {
u8 hde[1];
u8 ir2t[1];
u8 imd[1];
- u8 pad[26];
+ u8 data_seq_inorder[1];
+ u8 pdu_seq_inorder[1];
+ u8 max_r2t[16];
+ u8 pad[8];
u8 exp_statsn[32];
};
@@ -569,6 +591,20 @@ struct amap_i_t_dpdu_cqe {
u8 valid;
} __packed;
+struct amap_i_t_dpdu_cqe_v2 {
+ u8 db_addr_hi[32]; /* DWORD 0 */
+ u8 db_addr_lo[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 num_cons; /* DWORD 2*/
+ u8 rsvd0[8]; /* DWORD 2 */
+ u8 dpl[17]; /* DWORD 2 */
+ u8 index[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd1; /* DWORD 3 */
+ u8 final; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
+
#define CQE_VALID_MASK 0x80000000
#define CQE_CODE_MASK 0x0000003F
#define CQE_CID_MASK 0x0000FFC0
@@ -617,6 +653,11 @@ struct iscsi_wrb {
} __packed;
#define WRB_TYPE_MASK 0xF0000000
+#define SKH_WRB_TYPE_OFFSET 27
+#define BE_WRB_TYPE_OFFSET 28
+
+#define ADAPTER_SET_WRB_TYPE(pwrb, wrb_type, type_offset) \
+ (pwrb->dw[0] |= (wrb_type << type_offset))
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -663,12 +704,57 @@ struct amap_iscsi_wrb {
} __packed;
+struct amap_iscsi_wrb_v2 {
+ u8 r2t_exp_dtl[25]; /* DWORD 0 */
+ u8 rsvd0[2]; /* DWORD 0*/
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 lun[16]; /* DWORD 1 */
+ u8 sgl_idx[16]; /* DWORD 2 */
+ u8 ref_sgl_icd_idx[16]; /* DWORD 2 */
+ u8 exp_data_sn[32]; /* DWORD 3 */
+ u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
+ u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
+ u8 cq_id[16]; /* DWORD 6 */
+ u8 rsvd1[16]; /* DWORD 6 */
+ u8 cmdsn_itt[32]; /* DWORD 7 */
+ u8 sge0_addr_hi[32]; /* DWORD 8 */
+ u8 sge0_addr_lo[32]; /* DWORD 9 */
+ u8 sge0_offset[24]; /* DWORD 10 */
+ u8 rsvd2[7]; /* DWORD 10 */
+ u8 sge0_last; /* DWORD 10 */
+ u8 sge0_len[17]; /* DWORD 11 */
+ u8 rsvd3[7]; /* DWORD 11 */
+ u8 diff_enbl; /* DWORD 11 */
+ u8 u_run; /* DWORD 11 */
+ u8 o_run; /* DWORD 11 */
+ u8 invalid; /* DWORD 11 */
+ u8 dsp; /* DWORD 11 */
+ u8 dmsg; /* DWORD 11 */
+ u8 rsvd4; /* DWORD 11 */
+ u8 lt; /* DWORD 11 */
+ u8 sge1_addr_hi[32]; /* DWORD 12 */
+ u8 sge1_addr_lo[32]; /* DWORD 13 */
+ u8 sge1_r2t_offset[24]; /* DWORD 14 */
+ u8 rsvd5[7]; /* DWORD 14 */
+ u8 sge1_last; /* DWORD 14 */
+ u8 sge1_len[17]; /* DWORD 15 */
+ u8 rsvd6[15]; /* DWORD 15 */
+} __packed;
+
+
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
void beiscsi_process_all_cqs(struct work_struct *work);
+static inline bool beiscsi_error(struct beiscsi_hba *phba)
+{
+ return phba->ue_detected || phba->fw_timeout;
+}
+
struct pdu_nop_out {
u32 dw[12];
};
@@ -728,6 +814,7 @@ struct iscsi_target_context_update_wrb {
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
+#define BE_TGT_CTX_UPDT_CMD 0x07
struct amap_iscsi_target_context_update_wrb {
u8 lun[14]; /* DWORD 0 */
u8 lt; /* DWORD 0 */
@@ -773,6 +860,47 @@ struct amap_iscsi_target_context_update_wrb {
} __packed;
+#define BEISCSI_MAX_RECV_DATASEG_LEN (64 * 1024)
+#define BEISCSI_MAX_CXNS 1
+struct amap_iscsi_target_context_update_wrb_v2 {
+ u8 max_burst_length[24]; /* DWORD 0 */
+ u8 rsvd0[3]; /* DWORD 0 */
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 rsvd1[16]; /* DWORD 1 */
+ u8 max_send_data_segment_length[24]; /* DWORD 2 */
+ u8 rsvd2[8]; /* DWORD 2 */
+ u8 first_burst_length[24]; /* DWORD 3 */
+ u8 rsvd3[8]; /* DOWRD 3 */
+ u8 max_r2t[16]; /* DWORD 4 */
+ u8 rsvd4[10]; /* DWORD 4 */
+ u8 hde; /* DWORD 4 */
+ u8 dde; /* DWORD 4 */
+ u8 erl[2]; /* DWORD 4 */
+ u8 imd; /* DWORD 4 */
+ u8 ir2t; /* DWORD 4 */
+ u8 stat_sn[32]; /* DWORD 5 */
+ u8 rsvd5[32]; /* DWORD 6 */
+ u8 rsvd6[32]; /* DWORD 7 */
+ u8 max_recv_dataseg_len[24]; /* DWORD 8 */
+ u8 rsvd7[8]; /* DWORD 8 */
+ u8 rsvd8[32]; /* DWORD 9 */
+ u8 rsvd9[32]; /* DWORD 10 */
+ u8 max_cxns[16]; /* DWORD 11 */
+ u8 rsvd10[11]; /* DWORD 11*/
+ u8 invld; /* DWORD 11 */
+ u8 rsvd11;/* DWORD 11*/
+ u8 dmsg; /* DWORD 11 */
+ u8 data_seq_inorder; /* DWORD 11 */
+ u8 pdu_seq_inorder; /* DWORD 11 */
+ u8 rsvd12[32]; /*DWORD 12 */
+ u8 rsvd13[32]; /* DWORD 13 */
+ u8 rsvd14[32]; /* DWORD 14 */
+ u8 rsvd15[32]; /* DWORD 15 */
+} __packed;
+
+
struct be_ring {
u32 pages; /* queue size in pages */
u32 id; /* queue id assigned by beklib */
@@ -837,7 +965,7 @@ struct hwi_context_memory {
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
struct be_eq_obj be_eq[MAX_CPUS];
- struct be_queue_info be_cq[MAX_CPUS];
+ struct be_queue_info be_cq[MAX_CPUS - 1];
struct be_queue_info be_def_hdrq;
struct be_queue_info be_def_dataq;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aab5dd359e2..a6c2fe4b4d6 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -22,6 +22,138 @@
#include <scsi/scsi_bsg_iscsi.h>
#include "be_mgmt.h"
#include "be_iscsi.h"
+#include "be_main.h"
+
+/* UE Status Low CSR */
+static const char * const desc_ue_status_low[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
+};
+
+/* UE Status High CSR */
+static const char * const desc_ue_status_hi[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
+
+/*
+ * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read registers linked to UE and check for the UE status
+ **/
+void beiscsi_ue_detect(struct beiscsi_hba *phba)
+{
+ uint32_t ue_hi = 0, ue_lo = 0;
+ uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
+ uint8_t i = 0;
+
+ if (phba->ue_detected)
+ return;
+
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_LOW, &ue_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_LOW,
+ &ue_mask_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_HIGH,
+ &ue_hi);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_HI,
+ &ue_mask_hi);
+
+ ue_lo = (ue_lo & ~ue_mask_lo);
+ ue_hi = (ue_hi & ~ue_mask_hi);
+
+
+ if (ue_lo || ue_hi) {
+ phba->ue_detected = true;
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : Error detected on the adapter\n");
+ }
+
+ if (ue_lo) {
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_LOW %s bit set\n",
+ desc_ue_status_low[i]);
+ }
+ }
+
+ if (ue_hi) {
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_HIGH %s bit set\n",
+ desc_ue_status_hi[i]);
+ }
+ }
+}
/**
* mgmt_reopen_session()- Reopen a session based on reopen_type
@@ -575,13 +707,20 @@ unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
return status;
}
+/*
+ * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
+ * @phba: Driver priv structure
+ * @nonemb_cmd: Address of the MBX command issued
+ * @resp_buf: Buffer to copy the MBX cmd response
+ * @resp_buf_len: respone lenght to be copied
+ *
+ **/
static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *nonemb_cmd, void *resp_buf,
int resp_buf_len)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- unsigned short status, extd_status;
struct be_sge *sge;
unsigned int tag;
int rc = 0;
@@ -599,31 +738,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : mgmt_exec_nonemb_cmd Failed status = %d"
- "extd_status = %d\n", status, extd_status);
+ "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+
rc = -EIO;
- goto free_tag;
+ goto free_cmd;
}
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
-free_tag:
- free_mcc_tag(&phba->ctrl, tag);
free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
@@ -1009,10 +1142,9 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
{
struct be_cmd_get_boot_target_resp *boot_resp;
struct be_mcc_wrb *wrb;
- unsigned int tag, wrb_num;
+ unsigned int tag;
uint8_t boot_retry = 3;
- unsigned short status, extd_status;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ int rc;
do {
/* Get the Boot Target Session Handle and Count*/
@@ -1022,24 +1154,16 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BG_%d : Getting Boot Target Info Failed\n");
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_get_boot_target Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
+ "BG_%d : MBX CMD get_boot_target Failed\n");
return -EBUSY;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
boot_resp = embedded_payload(wrb);
/* Check if the there are any Boot targets configured */
@@ -1064,24 +1188,15 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : mgmt_reopen_session Failed\n");
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_reopen_session Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
- return -EBUSY;
+ "BG_%d : mgmt_reopen_session Failed");
+ return rc;
}
- free_mcc_tag(&phba->ctrl, tag);
-
} while (--boot_retry);
/* Couldn't log into the boot target */
@@ -1106,8 +1221,9 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
int mgmt_set_vlan(struct beiscsi_hba *phba,
uint16_t vlan_tag)
{
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ int rc;
+ unsigned int tag;
+ struct be_mcc_wrb *wrb = NULL;
tag = be_cmd_set_vlan(phba, vlan_tag);
if (!tag) {
@@ -1115,24 +1231,208 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
"BG_%d : VLAN Setting Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ }
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
- "BS_%d : status : %d extd_status : %d\n",
- status, extd_status);
+ "BS_%d : VLAN MBX Cmd Failed\n");
+ return rc;
+ }
+ return rc;
+}
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+/**
+ * beiscsi_drvr_ver_disp()- Display the driver Name and Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
+}
+
+/**
+ * beiscsi_adap_family_disp()- Display adapter family.
+ * @dev: ptr to device to get priv structure
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ uint16_t dev_id = 0;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ dev_id = phba->pcidev->device;
+ switch (dev_id) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ case OC_DEVICE_ID2:
+ return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n");
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID3:
+ return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n");
+ break;
+ case OC_SKH_ID1:
+ return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n");
+ break;
+ default:
+ return snprintf(buf, PAGE_SIZE,
+ "Unkown Adapter Family: 0x%x\n", dev_id);
+ break;
}
+}
- free_mcc_tag(&phba->ctrl, tag);
- return 0;
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ first_burst_length,
+ pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ session_state, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
+ pwrb, 1);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
+ pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
+ 0);
+
+ mem_descr += ISCSI_MEM_GLOBAL_HEADER;
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_hi, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_lo, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+}
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ first_burst_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_cxns, pwrb, BEISCSI_MAX_CXNS);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ data_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ data_seq_inorder) / 32] &
+ OFFLD_PARAMS_DATA_SEQ_INORDER) >> 6);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ pdu_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder) / 32] &
+ OFFLD_PARAMS_PDU_SEQ_INORDER) >> 7);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_r2t,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_r2t) / 32] &
+ OFFLD_PARAMS_MAX_R2T) >> 8);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index c50cef6fec0..2e4968add79 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -30,6 +30,12 @@
#define IP_V6_LEN 16
#define IP_V4_LEN 4
+/* UE Status and Mask register */
+#define PCICFG_UE_STATUS_LOW 0xA0
+#define PCICFG_UE_STATUS_HIGH 0xA4
+#define PCICFG_UE_STATUS_MASK_LOW 0xA8
+#define PCICFG_UE_STATUS_MASK_HI 0xAC
+
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
@@ -301,4 +307,19 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+
+ssize_t beiscsi_drvr_ver_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_adap_family_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr);
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle);
+void beiscsi_ue_detect(struct beiscsi_hba *phba);
+
#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 6d6eee42ac7..ef60afa94d0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -296,7 +296,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req3.flags |= (interface->vlan_enabled <<
FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
- /* C2_VALID and ACK flags are not set as they are not suppported */
+ /* C2_VALID and ACK flags are not set as they are not supported */
/* Initialize offload request 4 structure */
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 3f9e7061258..b44d04e41b0 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -800,7 +800,7 @@ extern struct device_attribute *bnx2i_dev_attributes[];
/*
* Function Prototypes
*/
-extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev);
extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index b17637aab9a..ee009e4ad09 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -79,42 +79,33 @@ static struct notifier_block bnx2i_cpu_notifier = {
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
+ * @cnic: Corresponding cnic device
*
* This function identifies the NX2 device type and sets appropriate
* queue mailbox register access method, 5709 requires driver to
* access MBOX regs using *bin* mode
*/
-void bnx2i_identify_device(struct bnx2i_hba *hba)
+void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev)
{
hba->cnic_dev_type = 0;
- if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
- set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
- else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
- set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
- else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
- set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
- hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712E ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800_VF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810_VF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840_VF)
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ if (hba->pci_did == PCI_DEVICE_ID_NX2_5706 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5706S) {
+ set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5708 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5708S) {
+ set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5709 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5709S) {
+ set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+ hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+ }
+ } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
- else
+ } else {
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
hba->pci_did);
+ }
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 3b34c13e2f0..0056e47bd56 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -808,7 +808,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
- bnx2i_identify_device(hba);
+ bnx2i_identify_device(hba, cnic);
bnx2i_setup_host_queue_size(hba, shost);
hba->reg_base = pci_resource_start(hba->pcidev, 0);
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig
new file mode 100644
index 00000000000..4d03b032aa1
--- /dev/null
+++ b/drivers/scsi/csiostor/Kconfig
@@ -0,0 +1,19 @@
+config SCSI_CHELSIO_FCOE
+ tristate "Chelsio Communications FCoE support"
+ depends on PCI && SCSI
+ select SCSI_FC_ATTRS
+ select FW_LOADER
+ help
+ This driver supports FCoE Offload functionality over
+ Chelsio T4-based 10Gb Converged Network Adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called csiostor.
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
new file mode 100644
index 00000000000..b581966c88f
--- /dev/null
+++ b/drivers/scsi/csiostor/Makefile
@@ -0,0 +1,11 @@
+#
+## Chelsio FCoE driver
+#
+##
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
+
+csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
+ csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
new file mode 100644
index 00000000000..065a87ace62
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -0,0 +1,796 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_init.h"
+
+static void
+csio_vport_set_state(struct csio_lnode *ln);
+
+/*
+ * csio_reg_rnode - Register a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_add() to register this remote port with FC transport.
+ * If remote port is Initiator OR Target OR both, change the role appropriately.
+ *
+ */
+void
+csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+ struct csio_service_parms *sp;
+
+ ids.node_name = wwn_to_u64(csio_rn_wwnn(rn));
+ ids.port_name = wwn_to_u64(csio_rn_wwpn(rn));
+ ids.port_id = rn->nport_id;
+ ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
+ rport = rn->rport;
+ CSIO_ASSERT(rport != NULL);
+ goto update_role;
+ }
+
+ rn->rport = fc_remote_port_add(shost, 0, &ids);
+ if (!rn->rport) {
+ csio_ln_err(ln, "Failed to register rport = 0x%x.\n",
+ rn->nport_id);
+ return;
+ }
+
+ ln->num_reg_rnodes++;
+ rport = rn->rport;
+ spin_lock_irq(shost->host_lock);
+ *((struct csio_rnode **)rport->dd_data) = rn;
+ spin_unlock_irq(shost->host_lock);
+
+ sp = &rn->rn_sparm;
+ rport->maxframe_size = ntohs(sp->csp.sp_bb_data);
+ if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)
+ rport->supported_classes = FC_COS_CLASS3;
+ else
+ rport->supported_classes = FC_COS_UNSPECIFIED;
+update_role:
+ if (rn->role & CSIO_RNFR_INITIATOR)
+ ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (rn->role & CSIO_RNFR_TARGET)
+ ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ if (ids.roles != FC_RPORT_ROLE_UNKNOWN)
+ fc_remote_port_rolechg(rport, ids.roles);
+
+ rn->scsi_id = rport->scsi_target_id;
+
+ csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
+ rn->nport_id, ids.roles);
+}
+
+/*
+ * csio_unreg_rnode - Unregister a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_delete() to unregister this remote port with FC
+ * transport.
+ *
+ */
+void
+csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct fc_rport *rport = rn->rport;
+
+ rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
+ fc_remote_port_delete(rport);
+ ln->num_reg_rnodes--;
+
+ csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);
+}
+
+/*
+ * csio_lnode_async_event - Async events from local port.
+ * @ln: lnode representing local port.
+ *
+ * Async events from local node that FC transport/SCSI ML
+ * should be made aware of (Eg: RSCN).
+ */
+void
+csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)
+{
+ switch (fc_evt) {
+ case CSIO_LN_FC_RSCN:
+ /* Get payload of rscn from ln */
+ /* For each RSCN entry */
+ /*
+ * fc_host_post_event(shost,
+ * fc_get_event_number(),
+ * FCH_EVT_RSCN,
+ * rscn_entry);
+ */
+ break;
+ case CSIO_LN_FC_LINKUP:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_LINKDOWN:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_ATTRIB_UPDATE:
+ csio_fchost_attr_init(ln);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * csio_fchost_attr_init - Initialize FC transport attributes
+ * @ln: Lnode.
+ *
+ */
+void
+csio_fchost_attr_init(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));
+ fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));
+
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+ fc_host_max_npiv_vports(shost) =
+ (csio_lnode_to_hw(ln))->fres_info.max_vnps;
+ fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |
+ FC_PORTSPEED_1GBIT;
+
+ fc_host_maxframe_size(shost) = ntohs(ln->ln_sparm.csp.sp_bb_data);
+ memset(fc_host_supported_fc4s(shost), 0,
+ sizeof(fc_host_supported_fc4s(shost)));
+ fc_host_supported_fc4s(shost)[7] = 1;
+
+ memset(fc_host_active_fc4s(shost), 0,
+ sizeof(fc_host_active_fc4s(shost)));
+ fc_host_active_fc4s(shost)[7] = 1;
+}
+
+/*
+ * csio_get_host_port_id - sysfs entries for nport_id is
+ * populated/cached from this function
+ */
+static void
+csio_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ fc_host_port_id(shost) = ln->nport_id;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_type - Return FC local port type.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ if (csio_is_npiv_ln(ln))
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_state - Return FC local port state.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ char state[16];
+
+ spin_lock_irq(&hw->lock);
+
+ csio_lnode_state_to_str(ln, state);
+ if (!strcmp(state, "READY"))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else if (!strcmp(state, "OFFLINE"))
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return link speed to FC transport.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_speed(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ switch (hw->pport[ln->portid].link_speed) {
+ case FW_PORT_CAP_SPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case FW_PORT_CAP_SPEED_10G:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_fabric_name - Return fabric name
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_rnode *rn = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);
+ if (rn)
+ fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));
+ else
+ fc_host_fabric_name(shost) = 0;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return FC transport statistics.
+ * @ln: Lnode.
+ *
+ */
+static struct fc_host_statistics *
+csio_get_stats(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct fc_host_statistics *fhs = &ln->fch_stats;
+ struct fw_fcoe_port_stats fcoe_port_stats;
+ uint64_t seconds;
+
+ memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));
+ csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);
+
+ fhs->tx_frames += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_frames));
+ fhs->tx_words += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_frames));
+ fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames);
+ fhs->fcp_input_requests += ln->stats.n_input_requests;
+ fhs->fcp_output_requests += ln->stats.n_output_requests;
+ fhs->fcp_control_requests += ln->stats.n_control_requests;
+ fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20;
+ fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20;
+ fhs->link_failure_count = ln->stats.n_link_down;
+ /* Reset stats for the device */
+ seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;
+ do_div(seconds, 1000);
+ fhs->seconds_since_last_reset = seconds;
+
+ return fhs;
+}
+
+/*
+ * csio_set_rport_loss_tmo - Set the rport dev loss timeout
+ * @rport: fc rport.
+ * @timeout: new value for dev loss tmo.
+ *
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ */
+static void
+csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+static void
+csio_vport_set_state(struct csio_lnode *ln)
+{
+ struct fc_vport *fc_vport = ln->fc_vport;
+ struct csio_lnode *pln = ln->pln;
+ char state[16];
+
+ /* Set fc vport state based on phyiscal lnode */
+ csio_lnode_state_to_str(pln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+
+ if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {
+ fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);
+ return;
+ }
+
+ /* Set fc vport state based on virtual lnode */
+ csio_lnode_state_to_str(ln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+static int
+csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to alloc vport */
+ /* Allocate Mbox request */
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+ ln->fcf_flowid = pln->fcf_flowid;
+ ln->portid = pln->portid;
+
+ csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ pln->fcf_flowid, pln->vnp_flowid, 0,
+ csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ /* FW is expected to complete vnp cmd in immediate mode
+ * without much delay.
+ * Otherwise, there will be increase in IO latency since HW
+ * lock is held till completion of vnp mbox cmd.
+ */
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(
+ ntohl(rsp->gen_wwn_to_vnpi));
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);
+ csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],
+ ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],
+ ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],
+ ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);
+ csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],
+ ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],
+ ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],
+ ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to free vport */
+ /* Allocate Mbox request */
+
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+
+ csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid, ln->vnp_flowid,
+ NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ }
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct csio_lnode *pln = shost_priv(shost);
+ struct csio_lnode *ln = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(pln);
+ uint8_t wwn[8];
+ int ret = -1;
+
+ ln = csio_shost_init(hw, &fc_vport->dev, false, pln);
+ if (!ln)
+ goto error;
+
+ if (fc_vport->node_name != 0) {
+ u64_to_wwn(fc_vport->node_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwnn\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwnn(ln), wwn, 8);
+ }
+
+ if (fc_vport->port_name != 0) {
+ u64_to_wwn(fc_vport->port_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwpn\n");
+ goto error;
+ }
+
+ if (csio_lnode_lookup_by_wwpn(hw, wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. wwpn already exists\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwpn(ln), wwn, 8);
+ }
+
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+
+ if (csio_fcoe_alloc_vnp(hw, ln))
+ goto error;
+
+ *(struct csio_lnode **)fc_vport->dd_data = ln;
+ ln->fc_vport = fc_vport;
+ if (!fc_vport->node_name)
+ fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
+ if (!fc_vport->port_name)
+ fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));
+ csio_fchost_attr_init(ln);
+ return 0;
+error:
+ if (ln)
+ csio_shost_exit(ln);
+
+ return ret;
+}
+
+static int
+csio_vport_delete(struct fc_vport *fc_vport)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ int rmv;
+
+ spin_lock_irq(&hw->lock);
+ rmv = csio_is_hw_removing(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (rmv) {
+ csio_shost_exit(ln);
+ return 0;
+ }
+
+ /* Quiesce ios and send remove event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_close(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ if (fc_vport->vport_state != FC_VPORT_DISABLED)
+ csio_fcoe_free_vnp(hw, ln);
+
+ csio_shost_exit(ln);
+ return 0;
+}
+
+static int
+csio_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* disable vport */
+ if (disable) {
+ /* Quiesce ios and send stop event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_stop(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ csio_fcoe_free_vnp(hw, ln);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ csio_ln_err(ln, "vport disabled\n");
+ return 0;
+ } else {
+ /* enable vport */
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+ if (csio_fcoe_alloc_vnp(hw, ln)) {
+ csio_ln_err(ln, "vport enabled failed.\n");
+ return -1;
+ }
+ csio_ln_err(ln, "vport enabled\n");
+ return 0;
+ }
+}
+
+static void
+csio_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+ struct csio_rnode *rn;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ rn = *((struct csio_rnode **)rport->dd_data);
+ ln = csio_rnode_to_lnode(rn);
+ hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+
+ /* return if driver is being removed or same rnode comes back online */
+ if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))
+ goto out;
+
+ csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",
+ rn, rn->nport_id, csio_rn_flowid(rn));
+
+ CSIO_INC_STATS(ln, n_dev_loss_tmo);
+
+ /*
+ * enqueue devloss event to event worker thread to serialize all
+ * rnode events.
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out;
+ }
+
+ if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+
+out:
+ spin_unlock_irq(&hw->lock);
+}
+
+/* FC transport functions template - Physical port */
+struct fc_function_template csio_fc_transport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .show_host_active_fc4s = 1,
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+ .dd_fcvport_size = sizeof(struct csio_lnode *),
+
+ .vport_create = csio_vport_create,
+ .vport_disable = csio_vport_disable,
+ .vport_delete = csio_vport_delete,
+};
+
+/* FC transport functions template - Virtual port */
+struct fc_function_template csio_fc_transport_vport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+
+};
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
new file mode 100644
index 00000000000..c38017b4af9
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_DEFS_H__
+#define __CSIO_DEFS_H__
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+
+#define CSIO_INVALID_IDX 0xFFFFFFFF
+#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)
+#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)
+#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)
+#define CSIO_DID_MASK 0xFFFFFF
+#define CSIO_WORD_TO_BYTE 4
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline int
+csio_list_deleted(struct list_head *list)
+{
+ return ((list->next == list) && (list->prev == list));
+}
+
+#define csio_list_next(elem) (((struct list_head *)(elem))->next)
+#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
+
+/* State machine */
+typedef void (*csio_sm_state_t)(void *, uint32_t);
+
+struct csio_sm {
+ struct list_head sm_list;
+ csio_sm_state_t sm_state;
+};
+
+static inline void
+csio_set_state(void *smp, void *state)
+{
+ ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
+}
+
+static inline void
+csio_init_state(struct csio_sm *smp, void *state)
+{
+ csio_set_state(smp, state);
+}
+
+static inline void
+csio_post_event(void *smp, uint32_t evt)
+{
+ ((struct csio_sm *)smp)->sm_state(smp, evt);
+}
+
+static inline csio_sm_state_t
+csio_get_state(void *smp)
+{
+ return ((struct csio_sm *)smp)->sm_state;
+}
+
+static inline bool
+csio_match_state(void *smp, void *state)
+{
+ return (csio_get_state(smp) == (csio_sm_state_t)state);
+}
+
+#define CSIO_ASSERT(cond) BUG_ON(!(cond))
+
+#ifdef __CSIO_DEBUG__
+#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))
+#else
+#define CSIO_DB_ASSERT(__c)
+#endif
+
+#endif /* ifndef __CSIO_DEFS_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
new file mode 100644
index 00000000000..8ecdb94a59f
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -0,0 +1,4395 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_force_master;
+int csio_dbg_level = 0xFEFF;
+unsigned int csio_port_mask = 0xf;
+
+/* Default FW event queue entries. */
+static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
+
+/* Default MSI param level */
+int csio_msi = 2;
+
+/* FCoE function instances */
+static int dev_num;
+
+/* FCoE Adapter types & its description */
+static const struct csio_adap_desc csio_fcoe_adapters[] = {
+ {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
+ {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
+ {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
+ {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
+ {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
+ {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
+ {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
+ {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
+ {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
+ {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
+ {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
+ {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
+ {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
+ {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
+ {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
+ {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"}
+};
+
+static void csio_mgmtm_cleanup(struct csio_mgmtm *);
+static void csio_hw_mbm_cleanup(struct csio_hw *);
+
+/* State machine forward declarations */
+static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
+
+static void csio_hw_initialize(struct csio_hw *hw);
+static void csio_evtq_stop(struct csio_hw *hw);
+static void csio_evtq_start(struct csio_hw *hw);
+
+int csio_is_hw_ready(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_ready);
+}
+
+int csio_is_hw_removing(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_removing);
+}
+
+
+/*
+ * csio_hw_wait_op_done_val - wait until an operation is completed
+ * @hw: the HW module
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+static int
+csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
+ int polarity, int attempts, int delay, uint32_t *valp)
+{
+ uint32_t val;
+ while (1) {
+ val = csio_rd_reg32(hw, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+void
+csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
+ uint32_t value)
+{
+ uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
+
+ csio_wr_reg32(hw, val | value, reg);
+ /* Flush */
+ csio_rd_reg32(hw, reg);
+
+}
+
+/*
+ * csio_hw_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int
+csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
+ csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
+ csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ MC_BIST_CMD);
+ i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_hw_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int
+csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
+ csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
+ csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
+ EDC_BIST_CMD + idx);
+ i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_mem_win_rw - read/write memory through PCIE memory window
+ * @hw: the adapter
+ * @addr: address of first byte requested
+ * @data: MEMWIN0_APERTURE bytes of data containing the requested address
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
+ * MEMWIN0_APERTURE-byte-aligned address that covers the requested
+ * address @addr.
+ */
+static int
+csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir)
+{
+ int i;
+
+ /*
+ * Setup offset into PCIE memory window. Address must be a
+ * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
+ * ensure that changes propagate before we attempt to use the new
+ * values.)
+ */
+ csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),
+ PCIE_MEM_ACCESS_OFFSET);
+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);
+
+ /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
+ for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {
+ if (dir)
+ *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));
+ else
+ csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));
+ }
+
+ return 0;
+}
+
+/*
+ * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,
+ uint32_t *buf, int dir)
+{
+ uint32_t pos, start, end, offset, memoffset;
+ int ret;
+ uint32_t *data;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2
+ */
+ memoffset = (mtype * (5 * 1024 * 1024));
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
+ * at a time so we need to round down the start and round up the end.
+ * We'll start copying out of the first line at (addr - start) a word
+ * at a time.
+ */
+ start = addr & ~(MEMWIN0_APERTURE-1);
+ end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
+ offset = (addr - start)/sizeof(__be32);
+
+ for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+ /*
+ * If we're writing, copy the data from the caller's memory
+ * buffer
+ */
+ if (!dir) {
+ /*
+ * If we're doing a partial write, then we need to do
+ * a read-modify-write ...
+ */
+ if (offset || len < MEMWIN0_APERTURE) {
+ ret = csio_mem_win_rw(hw, pos, data, 1);
+ if (ret) {
+ kfree(data);
+ return ret;
+ }
+ }
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ data[offset++] = *buf++;
+ len -= sizeof(__be32);
+ }
+ }
+
+ /*
+ * Transfer a block of memory and bail if there's an error.
+ */
+ ret = csio_mem_win_rw(hw, pos, data, dir);
+ if (ret) {
+ kfree(data);
+ return ret;
+ }
+
+ /*
+ * If we're reading, copy the data into the caller's memory
+ * buffer.
+ */
+ if (dir)
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ *buf++ = data[offset++];
+ len -= sizeof(__be32);
+ }
+ }
+
+ kfree(data);
+
+ return 0;
+}
+
+static int
+csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
+{
+ return csio_memory_rw(hw, mtype, addr, len, buf, 0);
+}
+
+/*
+ * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
+ */
+#define EEPROM_MAX_RD_POLL 40
+#define EEPROM_MAX_WR_POLL 6
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
+#define VPD_LEN 512
+#define VPD_INFO_FLD_HDR_SIZE 3
+
+/*
+ * csio_hw_seeprom_read - read a serial EEPROM location
+ * @hw: hw to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+static int
+csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
+{
+ uint16_t val = 0;
+ int attempts = EEPROM_MAX_RD_POLL;
+ uint32_t base = hw->params.pci.vpd_cap_addr;
+
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
+
+ do {
+ udelay(10);
+ pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (!(val & PCI_VPD_ADDR_F)) {
+ csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+
+ return 0;
+}
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t4_vpd_hdr {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[ID_LEN];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+};
+
+/*
+ * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
+ * the VPD
+ * @v: Pointer to buffered vpd data structure
+ * @kw: The keyword to search for
+ *
+ * Returns the value of the information field keyword or
+ * -EINVAL otherwise.
+ */
+static int
+csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
+{
+ int32_t i;
+ int32_t offset , len;
+ const uint8_t *buf = &v->id_tag;
+ const uint8_t *vpdr_len = &v->vpdr_tag;
+ offset = sizeof(struct t4_vpd_hdr);
+ len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
+
+ if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
+ return -EINVAL;
+
+ for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
+ if (memcmp(buf + i , kw, 2) == 0) {
+ i += VPD_INFO_FLD_HDR_SIZE;
+ return i;
+ }
+
+ i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
+ }
+
+ return -EINVAL;
+}
+
+static int
+csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
+{
+ *pos = pci_find_capability(pdev, cap);
+ if (*pos)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
+ * @hw: HW module
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int
+csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
+{
+ int i, ret, ec, sn, addr;
+ uint8_t *vpd, csum;
+ const struct t4_vpd_hdr *v;
+ /* To get around compilation warning from strstrip */
+ char *s;
+
+ if (csio_is_valid_vpd(hw))
+ return 0;
+
+ ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
+ &hw->params.pci.vpd_cap_addr);
+ if (ret)
+ return -EINVAL;
+
+ vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
+ if (vpd == NULL)
+ return -ENOMEM;
+
+ /*
+ * Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
+ ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
+ addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ for (i = 0; i < VPD_LEN; i += 4) {
+ ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
+ if (ret) {
+ kfree(vpd);
+ return ret;
+ }
+ }
+
+ /* Reset the VPD flag! */
+ hw->flags &= (~CSIO_HWF_VPD_VALID);
+
+ v = (const struct t4_vpd_hdr *)vpd;
+
+#define FIND_VPD_KW(var, name) do { \
+ var = csio_hw_get_vpd_keyword_val(v, name); \
+ if (var < 0) { \
+ csio_err(hw, "missing VPD keyword " name "\n"); \
+ kfree(vpd); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
+
+ if (csum) {
+ csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
+ kfree(vpd);
+ return -EINVAL;
+ }
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+#undef FIND_VPD_KW
+
+ memcpy(p->id, v->id_data, ID_LEN);
+ s = strstrip(p->id);
+ memcpy(p->ec, vpd + ec, EC_LEN);
+ s = strstrip(p->ec);
+ i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ s = strstrip(p->sn);
+
+ csio_valid_vpd_copied(hw);
+
+ kfree(vpd);
+ return 0;
+}
+
+/*
+ * csio_hw_sf1_read - read data from the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
+ int32_t lock, uint32_t *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ return -EBUSY;
+
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+
+ csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
+ ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ 10, NULL);
+ if (!ret)
+ *valp = csio_rd_reg32(hw, SF_DATA);
+ return ret;
+}
+
+/*
+ * csio_hw_sf1_write - write data to the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
+ int32_t lock, uint32_t val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ return -EBUSY;
+
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+
+ csio_wr_reg32(hw, val, SF_DATA);
+ csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
+
+ return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ 10, NULL);
+}
+
+/*
+ * csio_hw_flash_wait_op - wait for a flash operation to complete
+ * @hw: the HW module
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int
+csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
+{
+ int ret;
+ uint32_t status;
+
+ while (1) {
+ ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
+ if (ret != 0)
+ return ret;
+
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/*
+ * csio_hw_read_flash - read words from serial flash
+ * @hw: the HW module
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+static int
+csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
+ uint32_t *data, int32_t byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
+ if (ret != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_write_flash - write up to a page of data to the serial flash
+ * @hw: the hw
+ * @addr: the start address to write
+ * @n: length of data to write in bytes
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address. All the data must be written to the same page.
+ */
+static int
+csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
+ uint32_t n, const uint8_t *data)
+{
+ int ret = -EINVAL;
+ uint32_t buf[64];
+ uint32_t i, c, left, val, offset = addr & 0xff;
+
+ if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto unlock;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
+ if (ret != 0)
+ goto unlock;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
+ if (ret)
+ goto unlock;
+ }
+ ret = csio_hw_flash_wait_op(hw, 8, 1);
+ if (ret)
+ goto unlock;
+
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+
+ /* Read the page to verify the write succeeded */
+ ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
+ csio_err(hw,
+ "failed to correctly write the flash page at %#x\n",
+ addr);
+ return -EINVAL;
+ }
+
+ return 0;
+
+unlock:
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ return ret;
+}
+
+/*
+ * csio_hw_flash_erase_sectors - erase a range of flash sectors
+ * @hw: the HW module
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given inclusive range.
+ */
+static int
+csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
+{
+ int ret = 0;
+
+ while (start <= end) {
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_sf1_write(hw, 4, 0, 1,
+ SF_ERASE_SECTOR | (start << 8));
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_flash_wait_op(hw, 14, 500);
+ if (ret != 0)
+ goto out;
+
+ start++;
+ }
+out:
+ if (ret)
+ csio_err(hw, "erase of flash sector %d failed, error %d\n",
+ start, ret);
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ return 0;
+}
+
+/*
+ * csio_hw_flash_cfg_addr - return the address of the flash
+ * configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_hw_flash_cfg_addr(struct csio_hw *hw)
+{
+ if (hw->params.sf_size == 0x100000)
+ return FPGA_FLASH_CFG_OFFSET;
+ else
+ return FLASH_CFG_OFFSET;
+}
+
+static void
+csio_hw_print_fw_version(struct csio_hw *hw, char *str)
+{
+ csio_info(hw, "%s: %u.%u.%u.%u\n", str,
+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+}
+
+/*
+ * csio_hw_get_fw_version - read the firmware version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int
+csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
+{
+ return csio_hw_read_flash(hw, FW_IMG_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_get_tp_version - read the TP microcode version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int
+csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
+{
+ return csio_hw_read_flash(hw, FLASH_FW_START +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_check_fw_version - check if the FW is compatible with
+ * this driver
+ * @hw: HW module
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major/minor version mismatch/minor.
+ */
+static int
+csio_hw_check_fw_version(struct csio_hw *hw)
+{
+ int ret, major, minor, micro;
+
+ ret = csio_hw_get_fw_version(hw, &hw->fwrev);
+ if (!ret)
+ ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
+ if (ret)
+ return ret;
+
+ major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev);
+ minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
+ micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
+
+ if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ csio_err(hw, "card FW has major version %u, driver wants %u\n",
+ major, FW_VERSION_MAJOR);
+ return -EINVAL;
+ }
+
+ if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ return 0; /* perfect match */
+
+ /* Minor/micro version mismatch */
+ return -EINVAL;
+}
+
+/*
+ * csio_hw_fw_dload - download firmware.
+ * @hw: HW module
+ * @fw_data: firmware image to write.
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ */
+static int
+csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
+{
+ uint32_t csum;
+ int32_t addr;
+ int ret;
+ uint32_t i;
+ uint8_t first_page[SF_PAGE_SIZE];
+ const __be32 *p = (const __be32 *)fw_data;
+ struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
+ uint32_t sf_sec_size;
+
+ if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
+ csio_err(hw, "Serial Flash data invalid\n");
+ return -EINVAL;
+ }
+
+ if (!size) {
+ csio_err(hw, "FW image has no data\n");
+ return -EINVAL;
+ }
+
+ if (size & 511) {
+ csio_err(hw, "FW image size not multiple of 512 bytes\n");
+ return -EINVAL;
+ }
+
+ if (ntohs(hdr->len512) * 512 != size) {
+ csio_err(hw, "FW image size differs from size in FW header\n");
+ return -EINVAL;
+ }
+
+ if (size > FW_MAX_SIZE) {
+ csio_err(hw, "FW image too large, max is %u bytes\n",
+ FW_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
+ return -EINVAL;
+ }
+
+ sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+
+ csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
+ FW_START_SEC, FW_START_SEC + i - 1);
+
+ ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
+ FW_START_SEC + i - 1);
+ if (ret) {
+ csio_err(hw, "Flash Erase failed\n");
+ goto out;
+ }
+
+ /*
+ * We write the correct version at the end so the driver can see a bad
+ * version if the FW write fails. Start by writing a copy of the
+ * first page with a bad version.
+ */
+ memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ if (ret)
+ goto out;
+
+ csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
+ FW_IMG_START, FW_IMG_START + size);
+
+ addr = FW_IMG_START;
+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ addr += SF_PAGE_SIZE;
+ fw_data += SF_PAGE_SIZE;
+ ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
+ if (ret)
+ goto out;
+ }
+
+ ret = csio_hw_write_flash(hw,
+ FW_IMG_START +
+ offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver),
+ (const uint8_t *)&hdr->fw_ver);
+
+out:
+ if (ret)
+ csio_err(hw, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+static int
+csio_hw_get_flash_params(struct csio_hw *hw)
+{
+ int ret;
+ uint32_t info = 0;
+
+ ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ if (ret != 0)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ hw->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ hw->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ hw->params.sf_size = 1 << info;
+
+ return 0;
+}
+
+static void
+csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
+{
+ uint16_t val;
+ uint32_t pcie_cap;
+
+ if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
+ pci_read_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= 0xfff0;
+ val |= range ;
+ pci_write_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
+
+/*
+ * Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+static uint32_t
+csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
+{
+ u32 val = 0;
+ struct csio_mb *mbp;
+ int rv;
+ struct fw_ldst_cmd *ldst_cmd;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ pci_read_config_dword(hw->pdev, reg, &val);
+ return val;
+ }
+
+ csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
+
+ rv = csio_mb_issue(hw, mbp);
+
+ /*
+ * If the LDST Command suucceeded, exctract the returned register
+ * value. Otherwise read it directly ourself.
+ */
+ if (rv == 0) {
+ ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ val = ntohl(ldst_cmd->u.pcie.data[0]);
+ } else
+ pci_read_config_dword(hw->pdev, reg, &val);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return val;
+} /* csio_read_pcie_cfg4 */
+
+static int
+csio_hw_set_mem_win(struct csio_hw *hw)
+{
+ u32 bar0;
+
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+ bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
+ csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
+ csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ return 0;
+} /* csio_hw_set_mem_win */
+
+
+
+/*****************************************************************************/
+/* HW State machine assists */
+/*****************************************************************************/
+
+static int
+csio_hw_dev_ready(struct csio_hw *hw)
+{
+ uint32_t reg;
+ int cnt = 6;
+
+ while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
+ (--cnt != 0))
+ mdelay(100);
+
+ if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
+ (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
+ csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
+ return -EIO;
+ }
+
+ hw->pfn = SOURCEPF_GET(reg);
+
+ return 0;
+}
+
+/*
+ * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
+ * @hw: HW module
+ * @state: Device state
+ *
+ * FW_HELLO_CMD has to be polled for completion.
+ */
+static int
+csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
+{
+ struct csio_mb *mbp;
+ int rv = 0;
+ enum csio_dev_master master;
+ enum fw_retval retval;
+ uint8_t mpfn;
+ char state_str[16];
+ int retries = FW_CMD_HELLO_RETRIES;
+
+ memset(state_str, 0, sizeof(state_str));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ rv = -ENOMEM;
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto out;
+ }
+
+ master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;
+
+retry:
+ csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
+ hw->pfn, master, NULL);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv) {
+ csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
+ goto out_free_mb;
+ }
+
+ csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
+ rv = -EINVAL;
+ goto out_free_mb;
+ }
+
+ /* Firmware has designated us to be master */
+ if (hw->pfn == mpfn) {
+ hw->flags |= CSIO_HWF_MASTER;
+ } else if (*state == CSIO_DEV_STATE_UNINIT) {
+ /*
+ * If we're not the Master PF then we need to wait around for
+ * the Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable
+ * PF and there is no current Master PF; a Master PF may show up
+ * momentarily and we wouldn't want to fail pointlessly. (This
+ * can happen when an OS loads lots of different drivers rapidly
+ * at the same time). In this case, the Master PF returned by
+ * the firmware will be PCIE_FW_MASTER_MASK so the test below
+ * will work ...
+ */
+
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ uint32_t pcie_fw;
+
+ msleep(50);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = csio_rd_reg32(hw, PCIE_FW);
+ if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ rv = -ETIMEDOUT;
+ break;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & PCIE_FW_ERR) {
+ *state = CSIO_DEV_STATE_ERR;
+ rv = -ETIMEDOUT;
+ } else if (pcie_fw & PCIE_FW_INIT)
+ *state = CSIO_DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (mpfn == PCIE_FW_MASTER_MASK &&
+ (pcie_fw & PCIE_FW_MASTER_VLD))
+ mpfn = PCIE_FW_MASTER_GET(pcie_fw);
+ break;
+ }
+ hw->flags &= ~CSIO_HWF_MASTER;
+ }
+
+ switch (*state) {
+ case CSIO_DEV_STATE_UNINIT:
+ strcpy(state_str, "Initializing");
+ break;
+ case CSIO_DEV_STATE_INIT:
+ strcpy(state_str, "Initialized");
+ break;
+ case CSIO_DEV_STATE_ERR:
+ strcpy(state_str, "Error");
+ break;
+ default:
+ strcpy(state_str, "Unknown");
+ break;
+ }
+
+ if (hw->pfn == mpfn)
+ csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
+ hw->pfn, state_str);
+ else
+ csio_info(hw,
+ "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
+ hw->pfn, mpfn, state_str);
+
+out_free_mb:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return rv;
+}
+
+/*
+ * csio_do_bye - Perform the BYE FW Mailbox command and process response.
+ * @hw: HW module
+ *
+ */
+static int
+csio_do_bye(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of BYE command failed\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_do_reset- Perform the device reset.
+ * @hw: HW module
+ * @fw_rst: FW reset
+ *
+ * If fw_rst is set, issues FW reset mbox cmd otherwise
+ * does PIO reset.
+ * Performs reset of the function.
+ */
+static int
+csio_do_reset(struct csio_hw *hw, bool fw_rst)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ if (!fw_rst) {
+ /* PIO reset */
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ mdelay(2000);
+ return 0;
+ }
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE | PIORST, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed.n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
+ uint16_t caps;
+
+ caps = ntohs(rsp->fcoecaps);
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
+ csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
+ return -EINVAL;
+ }
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
+ csio_err(hw, "No FCoE Control Offload capability\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * PCIE_FW_MASTER_MASK).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+static int
+csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
+{
+ enum fw_retval retval = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= PCIE_FW_MASTER_MASK) {
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1),
+ NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (retval == 0 || force) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return retval ? -EINVAL : 0;
+}
+
+/*
+ * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
+ * @hw: the HW module
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by csio_hw_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+static int
+csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= PCIE_FW_MASTER_MASK) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ msleep(100);
+ if (csio_do_reset(hw, true) == 0)
+ return 0;
+ }
+
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ msleep(2000);
+ } else {
+ int ms;
+
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
+ return 0;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @fw_data: the firmware image to write
+ * @size: image size
+ * @force: force upgrade even if firmware doesn't cooperate
+ *
+ * Perform all of the steps necessary for upgrading an adapter's
+ * firmware image. Normally this requires the cooperation of the
+ * existing firmware in order to halt all existing activities
+ * but if an invalid mailbox token is passed in we skip that step
+ * (though we'll still put the adapter microprocessor into RESET in
+ * that case).
+ *
+ * On successful return the new firmware will have been loaded and
+ * the adapter will have been fully RESET losing all previous setup
+ * state. On unsuccessful return the adapter may be completely hosed ...
+ * positive errno indicates that the adapter is ~probably~ intact, a
+ * negative errno indicates that things are looking bad ...
+ */
+static int
+csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
+ const u8 *fw_data, uint32_t size, int32_t force)
+{
+ const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+ int reset, ret;
+
+ ret = csio_hw_fw_halt(hw, mbox, force);
+ if (ret != 0 && !force)
+ return ret;
+
+ ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Older versions of the firmware don't understand the new
+ * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+ * restart. So for newly loaded older firmware we'll have to do the
+ * RESET for it so it starts up on a clean slate. We can tell if
+ * the newly loaded firmware will handle this right by checking
+ * its header flags to see if it advertises the capability.
+ */
+ reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ return csio_hw_fw_restart(hw, mbox, reset);
+}
+
+
+/*
+ * csio_hw_fw_config_file - setup an adapter via a Configuration File
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW command
+ * @mtype: the memory type where the Configuration File is located
+ * @maddr: the memory address where the Configuration File is located
+ * @finiver: return value for CF [fini] version
+ * @finicsum: return value for CF [fini] checksum
+ * @cfcsum: return value for CF computed checksum
+ *
+ * Issue a command to get the firmware to process the Configuration
+ * File located at the specified mtype/maddress. If the Configuration
+ * File is processed successfully and return value pointers are
+ * provided, the Configuration File "[fini] section version and
+ * checksum values will be returned along with the computed checksum.
+ * It's up to the caller to decide how it wants to respond to the
+ * checksums not matching but it recommended that a prominant warning
+ * be emitted in order to help people rapidly identify changed or
+ * corrupted Configuration Files.
+ *
+ * Also note that it's possible to modify things like "niccaps",
+ * "toecaps",etc. between processing the Configuration File and telling
+ * the firmware to use the new configuration. Callers which want to
+ * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
+ * Configuration Files if they want to do this.
+ */
+static int
+csio_hw_fw_config_file(struct csio_hw *hw,
+ unsigned int mtype, unsigned int maddr,
+ uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
+{
+ struct csio_mb *mbp;
+ struct fw_caps_config_cmd *caps_cmd;
+ int rv = -EINVAL;
+ enum fw_retval ret;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+ /*
+ * Tell the firmware to process the indicated Configuration File.
+ * If there are no errors and the caller has provided return value
+ * pointers for the [fini] section version, checksum and computed
+ * checksum, pass those back to the caller.
+ */
+ caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ caps_cmd->cfvalid_to_len16 =
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
+ goto out;
+ }
+
+ ret = csio_mb_fw_retval(mbp);
+ if (ret != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto out;
+ }
+
+ if (finiver)
+ *finiver = ntohl(caps_cmd->finiver);
+ if (finicsum)
+ *finicsum = ntohl(caps_cmd->finicsum);
+ if (cfcsum)
+ *cfcsum = ntohl(caps_cmd->cfcsum);
+
+ /* Validate device capabilities */
+ if (csio_hw_validate_caps(hw, mbp)) {
+ rv = -ENOENT;
+ goto out;
+ }
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE);
+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
+ goto out;
+ }
+
+ ret = csio_mb_fw_retval(mbp);
+ if (ret != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto out;
+ }
+
+ rv = 0;
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+}
+
+/*
+ * csio_get_device_params - Get device parameters.
+ * @hw: HW module
+ *
+ */
+static int
+csio_get_device_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 param[6];
+ int i, j = 0;
+
+ /* Initialize portids to -1 */
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ hw->pport[i].portid = -1;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get port vec information. */
+ param[0] = FW_PARAM_DEV(PORTVEC);
+
+ /* Get Core clock. */
+ param[1] = FW_PARAM_DEV(CCLK);
+
+ /* Get EQ id start and end. */
+ param[2] = FW_PARAM_PFVF(EQ_START);
+ param[3] = FW_PARAM_PFVF(EQ_END);
+
+ /* Get IQ id start and end. */
+ param[4] = FW_PARAM_PFVF(IQFLINT_START);
+ param[5] = FW_PARAM_PFVF(IQFLINT_END);
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(param), param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(param), param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* cache the information. */
+ hw->port_vec = param[0];
+ hw->vpd.cclk = param[1];
+ wrm->fw_eq_start = param[2];
+ wrm->fw_iq_start = param[4];
+
+ /* Using FW configured max iqs & eqs */
+ if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
+ !csio_is_hw_master(hw)) {
+ hw->cfg_niq = param[5] - param[4] + 1;
+ hw->cfg_neq = param[3] - param[2] + 1;
+ csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
+ hw->cfg_niq, hw->cfg_neq);
+ }
+
+ hw->port_vec &= csio_port_mask;
+
+ hw->num_pports = hweight32(hw->port_vec);
+
+ csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
+ hw->port_vec, hw->num_pports);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ while ((hw->port_vec & (1 << j)) == 0)
+ j++;
+ hw->pport[i].portid = j++;
+ csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
+ }
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+
+/*
+ * csio_config_device_caps - Get and set device capabilities.
+ * @hw: HW module
+ *
+ */
+static int
+csio_config_device_caps(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv = -EINVAL;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
+ goto out;
+ }
+
+ /* Validate device capabilities */
+ if (csio_hw_validate_caps(hw, mbp))
+ goto out;
+
+ /* Don't config device capabilities if already configured */
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+ rv = 0;
+ goto out;
+ }
+
+ /* Write back desired device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
+ false, true, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
+ goto out;
+ }
+
+ rv = 0;
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+}
+
+static int
+csio_config_global_rss(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP,
+ NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_config_pfvf - Configure Physical/Virtual functions settings.
+ * @hw: HW module
+ *
+ */
+static int
+csio_config_pfvf(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /*
+ * For now, allow all PFs to access to all ports using a pmask
+ * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will
+ * need to provide access based on some rule.
+ */
+ csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ,
+ CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK,
+ CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PFVF_CMD failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_enable_ports - Bring up all available ports.
+ * @hw: HW module.
+ *
+ */
+static int
+csio_enable_ports(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_pports; i++) {
+ portid = hw->pport[i].portid;
+
+ /* Read PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
+ false, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_port_rsp(hw, mbp, &retval,
+ &hw->pport[i].pcap);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Write back PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
+ (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ } /* For all ports */
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_get_fcoe_resinfo - Read fcoe fw resource info.
+ * @hw: HW module
+ * Issued with lock held.
+ */
+static int
+csio_get_fcoe_resinfo(struct csio_hw *hw)
+{
+ struct csio_fcoe_res_info *res_info = &hw->fres_info;
+ struct fw_fcoe_res_info_cmd *rsp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FW resource information */
+ csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ res_info->e_d_tov = ntohs(rsp->e_d_tov);
+ res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
+ res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
+ res_info->r_r_tov = ntohs(rsp->r_r_tov);
+ res_info->max_xchgs = ntohl(rsp->max_xchgs);
+ res_info->max_ssns = ntohl(rsp->max_ssns);
+ res_info->used_xchgs = ntohl(rsp->used_xchgs);
+ res_info->used_ssns = ntohl(rsp->used_ssns);
+ res_info->max_fcfs = ntohl(rsp->max_fcfs);
+ res_info->max_vnps = ntohl(rsp->max_vnps);
+ res_info->used_fcfs = ntohl(rsp->used_fcfs);
+ res_info->used_vnps = ntohl(rsp->used_vnps);
+
+ csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
+ res_info->max_xchgs);
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 _param[1];
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /*
+ * Find out whether we're dealing with a version of
+ * the firmware which has configuration file support.
+ */
+ _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(_param), _param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(_param), _param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ *param = _param[0];
+
+ return 0;
+}
+
+static int
+csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
+{
+ int ret = 0;
+ const struct firmware *cf;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev;
+ unsigned int mtype = 0, maddr = 0;
+ uint32_t *cfg_data;
+ int value_to_add = 0;
+
+ if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) {
+ csio_err(hw, "could not find config file " CSIO_CF_FNAME
+ ",err: %d\n", ret);
+ return -ENOENT;
+ }
+
+ if (cf->size%4 != 0)
+ value_to_add = 4 - (cf->size % 4);
+
+ cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
+ if (cfg_data == NULL)
+ return -ENOMEM;
+
+ memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
+
+ if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0)
+ return -EINVAL;
+
+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+
+ ret = csio_memory_write(hw, mtype, maddr,
+ cf->size + value_to_add, cfg_data);
+ if (ret == 0) {
+ csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n");
+ strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
+ }
+
+ kfree(cfg_data);
+ release_firmware(cf);
+
+ return ret;
+}
+
+/*
+ * HW initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration -- either using the configuration
+ * file stored in flash on the adapter or using a filesystem-local file
+ * if available.
+ *
+ * If we don't have configuration file support in the firmware, then we'll
+ * have to set things up the old fashioned way with hard-coded register
+ * writes and firmware commands ...
+ */
+
+/*
+ * Attempt to initialize the HW via a Firmware Configuration File.
+ */
+static int
+csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
+{
+ unsigned int mtype, maddr;
+ int rv;
+ uint32_t finiver, finicsum, cfcsum;
+ int using_flash;
+ char path[64];
+
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ rv = csio_do_reset(hw, true);
+ if (rv != 0)
+ goto bye;
+ }
+
+ /*
+ * If we have a configuration file in host ,
+ * then use that. Otherwise, use the configuration file stored
+ * in the HW flash ...
+ */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_config(hw, fw_cfg_param, path);
+ spin_lock_irq(&hw->lock);
+ if (rv != 0) {
+ if (rv == -ENOENT) {
+ /*
+ * config file was not found. Use default
+ * config file from flash.
+ */
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = csio_hw_flash_cfg_addr(hw);
+ using_flash = 1;
+ } else {
+ /*
+ * we revert back to the hardwired config if
+ * flashing failed.
+ */
+ goto bye;
+ }
+ } else {
+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+ using_flash = 0;
+ }
+
+ hw->cfg_store = (uint8_t)mtype;
+
+ /*
+ * Issue a Capability Configuration command to the firmware to get it
+ * to parse the Configuration File.
+ */
+ rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
+ &finicsum, &cfcsum);
+ if (rv != 0)
+ goto bye;
+
+ hw->cfg_finiver = finiver;
+ hw->cfg_finicsum = finicsum;
+ hw->cfg_cfcsum = cfcsum;
+ hw->cfg_csum_status = true;
+
+ if (finicsum != cfcsum) {
+ csio_warn(hw,
+ "Config File checksum mismatch: csum=%#x, computed=%#x\n",
+ finicsum, cfcsum);
+
+ hw->cfg_csum_status = false;
+ }
+
+ /*
+ * Note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants buried in the driver.
+ */
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto bye;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+
+ csio_info(hw,
+ "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
+ (using_flash ? "in device FLASH" : path), finiver, cfcsum);
+
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ...
+ */
+bye:
+ hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
+ csio_dbg(hw, "Configuration file error %d\n", rv);
+ return rv;
+}
+
+/*
+ * Attempt to initialize the adapter via hard-coded, driver supplied
+ * parameters ...
+ */
+static int
+csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
+{
+ int rv;
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ rv = csio_do_reset(hw, true);
+ if (rv != 0)
+ goto out;
+ }
+
+ /* Get and set device capabilities */
+ rv = csio_config_device_caps(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Config Global RSS command */
+ rv = csio_config_global_rss(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure PF/VF capabilities of device */
+ rv = csio_config_pfvf(hw);
+ if (rv != 0)
+ goto out;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+
+out:
+ return rv;
+}
+
+/*
+ * Returns -EINVAL if attempts to flash the firmware failed
+ * else returns 0,
+ * if flashing was not attempted because the card had the
+ * latest firmware ECANCELED is returned
+ */
+static int
+csio_hw_flash_fw(struct csio_hw *hw)
+{
+ int ret = -ECANCELED;
+ const struct firmware *fw;
+ const struct fw_hdr *hdr;
+ u32 fw_ver;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev ;
+
+ if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) {
+ csio_err(hw, "could not find firmware image " CSIO_FW_FNAME
+ ",err: %d\n", ret);
+ return -EINVAL;
+ }
+
+ hdr = (const struct fw_hdr *)fw->data;
+ fw_ver = ntohl(hdr->fw_ver);
+ if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR)
+ return -EINVAL; /* wrong major version, won't do */
+
+ /*
+ * If the flash FW is unusable or we found something newer, load it.
+ */
+ if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR ||
+ fw_ver > hw->fwrev) {
+ ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
+ /*force=*/false);
+ if (!ret)
+ csio_info(hw, "firmware upgraded to version %pI4 from "
+ CSIO_FW_FNAME "\n", &hdr->fw_ver);
+ else
+ csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+
+/*
+ * csio_hw_configure - Configure HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_configure(struct csio_hw *hw)
+{
+ int reset = 1;
+ int rv;
+ u32 param[1];
+
+ rv = csio_hw_dev_ready(hw);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* HW version */
+ hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
+
+ /* Needed for FW download */
+ rv = csio_hw_get_flash_params(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Set pci completion timeout value to 4 seconds. */
+ csio_set_pcie_completion_timeout(hw, 0xd);
+
+ csio_hw_set_mem_win(hw);
+
+ rv = csio_hw_get_fw_version(hw, &hw->fwrev);
+ if (rv != 0)
+ goto out;
+
+ csio_hw_print_fw_version(hw, "Firmware revision");
+
+ rv = csio_do_hello(hw, &hw->fw_state);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Read vpd */
+ rv = csio_hw_get_vpd_params(hw, &hw->vpd);
+ if (rv != 0)
+ goto out;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ rv = csio_hw_check_fw_version(hw);
+ if (rv == -EINVAL) {
+
+ /* Do firmware update */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_fw(hw);
+ spin_lock_irq(&hw->lock);
+
+ if (rv == 0) {
+ reset = 0;
+ /*
+ * Note that the chip was reset as part of the
+ * firmware upgrade so we don't reset it again
+ * below and grab the new firmware version.
+ */
+ rv = csio_hw_check_fw_version(hw);
+ }
+ }
+ /*
+ * If the firmware doesn't support Configuration
+ * Files, use the old Driver-based, hard-wired
+ * initialization. Otherwise, try using the
+ * Configuration File support and fall back to the
+ * Driver-based initialization if there's no
+ * Configuration File found.
+ */
+ if (csio_hw_check_fwconfig(hw, param) == 0) {
+ rv = csio_hw_use_fwconfig(hw, reset, param);
+ if (rv == -ENOENT)
+ goto out;
+ if (rv != 0) {
+ csio_info(hw,
+ "No Configuration File present "
+ "on adapter. Using hard-wired "
+ "configuration parameters.\n");
+ rv = csio_hw_no_fwconfig(hw, reset);
+ }
+ } else {
+ rv = csio_hw_no_fwconfig(hw, reset);
+ }
+
+ if (rv != 0)
+ goto out;
+
+ } else {
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Get device capabilities */
+ rv = csio_config_device_caps(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+ goto out;
+ }
+ } /* if not master */
+
+out:
+ return;
+}
+
+/*
+ * csio_hw_initialize - Initialize HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_initialize(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv;
+ int i;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ goto out;
+
+ csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
+ goto free_and_out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
+ retval);
+ goto free_and_out;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ rv = csio_get_fcoe_resinfo(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
+ goto out;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ rv = csio_config_queues(hw);
+ spin_lock_irq(&hw->lock);
+
+ if (rv != 0) {
+ csio_err(hw, "Config of queues failed!: %d\n", rv);
+ goto out;
+ }
+
+ for (i = 0; i < hw->num_pports; i++)
+ hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ rv = csio_enable_ports(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to enable ports: %d\n", rv);
+ goto out;
+ }
+ }
+
+ csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
+ return;
+
+free_and_out:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return;
+}
+
+#define PF_INTR_MASK (PFSW | PFCIM)
+
+/*
+ * csio_hw_intr_enable - Enable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Enable interrupts in HW registers.
+ */
+static void
+csio_hw_intr_enable(struct csio_hw *hw)
+{
+ uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+ uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
+
+ /*
+ * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
+ * by FW, so do nothing for INTX.
+ */
+ if (hw->intr_mode == CSIO_IM_MSIX)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
+ AIVEC(AIVEC_MASK), vec);
+ else if (hw->intr_mode == CSIO_IM_MSI)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
+ AIVEC(AIVEC_MASK), 0);
+
+ csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
+
+ /* Turn on MB interrupts - this will internally flush PIO as well */
+ csio_mb_intr_enable(hw);
+
+ /* These are common registers - only a master can modify them */
+ if (csio_is_hw_master(hw)) {
+ /*
+ * Disable the Serial FLASH interrupt, if enabled!
+ */
+ pl &= (~SF);
+ csio_wr_reg32(hw, pl, PL_INT_ENABLE);
+
+ csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
+ EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
+ ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
+ ERR_DATA_CPL_ON_HIGH_QID1 |
+ ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
+ ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
+ ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
+ ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
+ SGE_INT_ENABLE3);
+ csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
+ }
+
+ hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
+
+}
+
+/*
+ * csio_hw_intr_disable - Disable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Turn off Mailbox and PCI_PF_CFG interrupts.
+ */
+void
+csio_hw_intr_disable(struct csio_hw *hw)
+{
+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+
+ if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
+ return;
+
+ hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
+
+ csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
+ if (csio_is_hw_master(hw))
+ csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
+
+ /* Turn off MB interrupts */
+ csio_mb_intr_disable(hw);
+
+}
+
+static void
+csio_hw_fatal_err(struct csio_hw *hw)
+{
+ csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
+ csio_hw_intr_disable(hw);
+
+ /* Do not reset HW, we may need FW state for debugging */
+ csio_fatal(hw, "HW Fatal error encountered!\n");
+}
+
+/*****************************************************************************/
+/* START: HW SM */
+/*****************************************************************************/
+/*
+ * csio_hws_uninit - Uninit state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_CFG:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_configuring - Configuring state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT:
+ csio_set_state(&hw->sm, csio_hws_initializing);
+ csio_hw_initialize(hw);
+ break;
+
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_initializing - Initialiazing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+
+ /* Enable interrupts */
+ csio_hw_intr_enable(hw);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_ready - Ready state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ /* Remember the event */
+ hw->evtflag = evt;
+
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ case CSIO_HWE_FW_DLOAD:
+ case CSIO_HWE_SUSPEND:
+ case CSIO_HWE_PCI_REMOVE:
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_quiescing);
+ /* cleanup all outstanding cmds */
+ if (evt == CSIO_HWE_HBA_RESET ||
+ evt == CSIO_HWE_PCIERR_DETECTED)
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
+ else
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
+
+ csio_hw_intr_disable(hw);
+ csio_hw_mbm_cleanup(hw);
+ csio_evtq_stop(hw);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
+ csio_evtq_flush(hw);
+ csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
+ csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiescing - Quiescing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_QUIESCED:
+ switch (hw->evtflag) {
+ case CSIO_HWE_FW_DLOAD:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Download firmware */
+ /* Fall through */
+
+ case CSIO_HWE_HBA_RESET:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Start reset of the HBA */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
+ csio_wr_destroy_queues(hw, false);
+ csio_do_reset(hw, false);
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_set_state(&hw->sm, csio_hws_removing);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
+ csio_wr_destroy_queues(hw, true);
+ /* Now send the bye command */
+ csio_do_bye(hw);
+ break;
+
+ case CSIO_HWE_SUSPEND:
+ csio_set_state(&hw->sm, csio_hws_quiesced);
+ break;
+
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_pcierr);
+ csio_wr_destroy_queues(hw, false);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiesced - Quiesced state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_RESUME:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_resetting - HW Resetting state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET_DONE:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_removing - PCI Hotplug removing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ if (!csio_is_hw_master(hw))
+ break;
+ /*
+ * The BYE should have alerady been issued, so we cant
+ * use the mailbox interface. Hence we use the PL_RST
+ * register directly.
+ */
+ csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ mdelay(2000);
+ break;
+
+ /* Should never receive any new events */
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+}
+
+/*
+ * csio_hws_pcierr - PCI Error state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_PCIERR_SLOT_RESET:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: HW SM */
+/*****************************************************************************/
+
+/* Slow path handlers */
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/*
+ * csio_handle_intr_status - table driven interrupt handler
+ * @hw: HW instance
+ * @reg: the interrupt status register to process
+ * @acts: table of interrupt actions
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occured. The actions include
+ * optionally emitting a warning or alert message. The table is terminated
+ * by an entry specifying mask 0. Returns the number of fatal interrupt
+ * conditions.
+ */
+static int
+csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
+ const struct intr_info *acts)
+{
+ int fatal = 0;
+ unsigned int mask = 0;
+ unsigned int status = csio_rd_reg32(hw, reg);
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ csio_fatal(hw, "Fatal %s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ } else if (acts->msg)
+ csio_info(hw, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ mask |= acts->mask;
+ }
+ status &= mask;
+ if (status) /* clear processed interrupts */
+ csio_wr_reg32(hw, status, reg);
+ return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_intr_info[] = {
+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+ 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void csio_tp_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info tp_intr_info[] = {
+ { 0x3fffffff, "TP parity error", -1, 1 },
+ { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void csio_sge_intr_handler(struct csio_hw *hw)
+{
+ uint64_t v;
+
+ static struct intr_info sge_intr_info[] = {
+ { ERR_CPL_EXCEED_IQE_SIZE,
+ "SGE received CPL exceeding IQE size", -1, 1 },
+ { ERR_INVALID_CIDX_INC,
+ "SGE GTS CIDX increment too large", -1, 0 },
+ { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ "SGE IQID > 1023 received CPL for FL", -1, 0 },
+ { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ 0 },
+ { ERR_ING_CTXT_PRIO,
+ "SGE too many priority ingress contexts", -1, 0 },
+ { ERR_EGR_CTXT_PRIO,
+ "SGE too many priority egress contexts", -1, 0 },
+ { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
+ ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
+ if (v) {
+ csio_fatal(hw, "SGE parity error (%#llx)\n",
+ (unsigned long long)v);
+ csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
+ SGE_INT_CAUSE1);
+ csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
+ }
+
+ v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
+
+ if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
+ v != 0)
+ csio_hw_fatal_err(hw);
+}
+
+#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
+ OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
+#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
+ IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
+
+/*
+ * CIM interrupt handler.
+ */
+static void csio_cim_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cim_intr_info[] = {
+ { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info cim_upintr_info[] = {
+ { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT, "CIM illegal write", -1, 1 },
+ { ILLRDINT, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
+ cim_intr_info) +
+ csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
+ cim_upintr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void csio_ulprx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { 0x1800000, "ULPRX context error", -1, 1 },
+ { 0x7fffff, "ULPRX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void csio_ulptx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulptx_intr_info[] = {
+ { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ 0 },
+ { 0xfffffff, "ULPTX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void csio_pmtx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { 0xffffff0, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+ 1 },
+ { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+ { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void csio_pmrx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { 0x3ffff0, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+ 1 },
+ { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+ { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void csio_cplsw_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cplsw_intr_info[] = {
+ { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void csio_le_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info le_intr_info[] = {
+ { LIPMISS, "LE LIP miss", -1, 0 },
+ { LIP0, "LE 0 LIP error", -1, 0 },
+ { PARITYERR, "LE parity error", -1, 1 },
+ { UNKNOWNCMD, "LE unknown command", -1, 1 },
+ { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void csio_mps_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info mps_rx_intr_info[] = {
+ { 0xffffff, "MPS Rx parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_tx_intr_info[] = {
+ { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
+ { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
+ { BUBBLE, "MPS Tx underflow", -1, 1 },
+ { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR, "MPS Tx framing error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_trc_intr_info[] = {
+ { FILTMEM, "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
+ { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_sram_intr_info[] = {
+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_tx_intr_info[] = {
+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_rx_intr_info[] = {
+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_cls_intr_info[] = {
+ { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
+ mps_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
+ mps_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
+ mps_trc_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
+ mps_stat_sram_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ mps_stat_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ mps_stat_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
+ mps_cls_intr_info);
+
+ csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
+ csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
+{
+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+ unsigned int addr, cnt_addr, v;
+
+ if (idx <= MEM_EDC1) {
+ addr = EDC_REG(EDC_INT_CAUSE, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ } else {
+ addr = MC_INT_CAUSE;
+ cnt_addr = MC_ECC_STATUS;
+ }
+
+ v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
+ if (v & PERR_INT_CAUSE)
+ csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
+ if (v & ECC_CE_INT_CAUSE) {
+ uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
+
+ csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
+ csio_warn(hw, "%u %s correctable ECC data error%s\n",
+ cnt, name[idx], cnt > 1 ? "s" : "");
+ }
+ if (v & ECC_UE_INT_CAUSE)
+ csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
+
+ csio_wr_reg32(hw, v, addr);
+ if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void csio_ma_intr_handler(struct csio_hw *hw)
+{
+ uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
+
+ if (status & MEM_PERR_INT_CAUSE)
+ csio_fatal(hw, "MA parity error, parity status %#x\n",
+ csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
+ if (status & MEM_WRAP_INT_CAUSE) {
+ v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
+ csio_fatal(hw,
+ "MA address wrap-around error by client %u to address %#x\n",
+ MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
+ }
+ csio_wr_reg32(hw, status, MA_INT_CAUSE);
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void csio_smb_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info smb_intr_info[] = {
+ { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void csio_ncsi_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ncsi_intr_info[] = {
+ { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
+{
+ uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+
+ v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ if (!v)
+ return;
+
+ if (v & TXFIFO_PRTY_ERR)
+ csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
+ if (v & RXFIFO_PRTY_ERR)
+ csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
+ csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void csio_pl_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pl_intr_info[] = {
+ { FATALPERR, "T4 fatal parity error", -1, 1 },
+ { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_hw_slow_intr_handler - control path interrupt handler
+ * @hw: HW module
+ *
+ * Interrupt handler for non-data global interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
+ */
+int
+csio_hw_slow_intr_handler(struct csio_hw *hw)
+{
+ uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
+
+ if (!(cause & CSIO_GLBL_INTR_MASK)) {
+ CSIO_INC_STATS(hw, n_plint_unexp);
+ return 0;
+ }
+
+ csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
+
+ CSIO_INC_STATS(hw, n_plint_cnt);
+
+ if (cause & CIM)
+ csio_cim_intr_handler(hw);
+
+ if (cause & MPS)
+ csio_mps_intr_handler(hw);
+
+ if (cause & NCSI)
+ csio_ncsi_intr_handler(hw);
+
+ if (cause & PL)
+ csio_pl_intr_handler(hw);
+
+ if (cause & SMB)
+ csio_smb_intr_handler(hw);
+
+ if (cause & XGMAC0)
+ csio_xgmac_intr_handler(hw, 0);
+
+ if (cause & XGMAC1)
+ csio_xgmac_intr_handler(hw, 1);
+
+ if (cause & XGMAC_KR0)
+ csio_xgmac_intr_handler(hw, 2);
+
+ if (cause & XGMAC_KR1)
+ csio_xgmac_intr_handler(hw, 3);
+
+ if (cause & PCIE)
+ csio_pcie_intr_handler(hw);
+
+ if (cause & MC)
+ csio_mem_intr_handler(hw, MEM_MC);
+
+ if (cause & EDC0)
+ csio_mem_intr_handler(hw, MEM_EDC0);
+
+ if (cause & EDC1)
+ csio_mem_intr_handler(hw, MEM_EDC1);
+
+ if (cause & LE)
+ csio_le_intr_handler(hw);
+
+ if (cause & TP)
+ csio_tp_intr_handler(hw);
+
+ if (cause & MA)
+ csio_ma_intr_handler(hw);
+
+ if (cause & PM_TX)
+ csio_pmtx_intr_handler(hw);
+
+ if (cause & PM_RX)
+ csio_pmrx_intr_handler(hw);
+
+ if (cause & ULP_RX)
+ csio_ulprx_intr_handler(hw);
+
+ if (cause & CPL_SWITCH)
+ csio_cplsw_intr_handler(hw);
+
+ if (cause & SGE)
+ csio_sge_intr_handler(hw);
+
+ if (cause & ULP_TX)
+ csio_ulptx_intr_handler(hw);
+
+ /* Clear the interrupts just processed for which we are the master. */
+ csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
+ csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
+
+ return 1;
+}
+
+/*****************************************************************************
+ * HW <--> mailbox interfacing routines.
+ ****************************************************************************/
+/*
+ * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
+ *
+ * @data: Private data pointer.
+ *
+ * Called from worker thread context.
+ */
+static void
+csio_mberr_worker(void *data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mbm *mbm = &hw->mbm;
+ LIST_HEAD(cbfn_q);
+ struct csio_mb *mbp_next;
+ int rv;
+
+ del_timer_sync(&mbm->timer);
+
+ spin_lock_irq(&hw->lock);
+ if (list_empty(&mbm->cbfn_q)) {
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+
+ /* Try to start waiting mailboxes */
+ if (!list_empty(&mbm->req_q)) {
+ mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
+ list_del_init(&mbp_next->list);
+
+ rv = csio_mb_issue(hw, mbp_next);
+ if (rv != 0)
+ list_add_tail(&mbp_next->list, &mbm->req_q);
+ else
+ CSIO_DEC_STATS(mbm, n_activeq);
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Now callback completions */
+ csio_mb_completions(hw, &cbfn_q);
+}
+
+/*
+ * csio_hw_mb_timer - Top-level Mailbox timeout handler.
+ *
+ * @data: private data pointer
+ *
+ **/
+static void
+csio_hw_mb_timer(uintptr_t data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mb *mbp = NULL;
+
+ spin_lock_irq(&hw->lock);
+ mbp = csio_mb_tmo_handler(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Call back the function for the timed-out Mailbox */
+ if (mbp)
+ mbp->mb_cbfn(hw, mbp);
+
+}
+
+/*
+ * csio_hw_mbm_cleanup - Cleanup Mailbox module.
+ * @hw: HW module
+ *
+ * Called with lock held, should exit with lock held.
+ * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
+ * into a local queue. Drops lock and calls the completions. Holds
+ * lock and returns.
+ */
+static void
+csio_hw_mbm_cleanup(struct csio_hw *hw)
+{
+ LIST_HEAD(cbfn_q);
+
+ csio_mb_cancel_all(hw, &cbfn_q);
+
+ spin_unlock_irq(&hw->lock);
+ csio_mb_completions(hw, &cbfn_q);
+ spin_lock_irq(&hw->lock);
+}
+
+/*****************************************************************************
+ * Event handling
+ ****************************************************************************/
+int
+csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ return -EINVAL;
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ return -ENOMEM;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+ memcpy((void *)evt_entry->data, evt_msg, len);
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+
+ return 0;
+}
+
+static int
+csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len, bool msg_sg)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+ struct csio_fl_dma_buf *fl_sg;
+ uint32_t off = 0;
+ unsigned long flags;
+ int n, ret = 0;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+
+ /* If Payload in SG list*/
+ if (msg_sg) {
+ fl_sg = (struct csio_fl_dma_buf *) evt_msg;
+ for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
+ memcpy((void *)((uintptr_t)evt_entry->data + off),
+ fl_sg->flbufs[n].vaddr,
+ fl_sg->flbufs[n].len);
+ off += fl_sg->flbufs[n].len;
+ }
+ } else
+ memcpy((void *)evt_entry->data, evt_msg, len);
+
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+out:
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return ret;
+}
+
+static void
+csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
+{
+ if (evt_entry) {
+ spin_lock_irq(&hw->lock);
+ list_del_init(&evt_entry->list);
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_DEC_STATS(hw, n_evt_activeq);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ spin_unlock_irq(&hw->lock);
+ }
+}
+
+void
+csio_evtq_flush(struct csio_hw *hw)
+{
+ uint32_t count;
+ count = 30;
+ while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
+}
+
+static void
+csio_evtq_stop(struct csio_hw *hw)
+{
+ hw->flags |= CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_start(struct csio_hw *hw)
+{
+ hw->flags &= ~CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_cleanup(struct csio_hw *hw)
+{
+ struct list_head *evt_entry, *next_entry;
+
+ /* Release outstanding events from activeq to freeq*/
+ if (!list_empty(&hw->evt_active_q))
+ list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
+
+ hw->stats.n_evt_activeq = 0;
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+
+ /* Freeup event entry */
+ list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
+ kfree(evt_entry);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->stats.n_evt_freeq = 0;
+}
+
+
+static void
+csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ __u8 op;
+ __be64 *data;
+ void *msg = NULL;
+ uint32_t msg_len = 0;
+ bool msg_sg = 0;
+
+ op = ((struct rss_header *) wr)->opcode;
+ if (op == CPL_FW6_PLD) {
+ CSIO_INC_STATS(hw, n_cpl_fw6_pld);
+ if (!flb || !flb->totlen) {
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ msg = (void *) flb;
+ msg_len = flb->totlen;
+ msg_sg = 1;
+
+ data = (__be64 *) msg;
+ } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
+
+ CSIO_INC_STATS(hw, n_cpl_fw6_msg);
+ /* skip RSS header */
+ msg = (void *)((uintptr_t)wr + sizeof(__be64));
+ msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
+ sizeof(struct cpl_fw4_msg);
+
+ data = (__be64 *) msg;
+ } else {
+ csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
+ (uint16_t)msg_len, msg_sg))
+ CSIO_INC_STATS(hw, n_evt_drop);
+}
+
+void
+csio_evtq_worker(struct work_struct *work)
+{
+ struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
+ struct list_head *evt_entry, *next_entry;
+ LIST_HEAD(evt_q);
+ struct csio_evt_msg *evt_msg;
+ struct cpl_fw6_msg *msg;
+ struct csio_rnode *rn;
+ int rv = 0;
+ uint8_t evtq_stop = 0;
+
+ csio_dbg(hw, "event worker thread active evts#%d\n",
+ hw->stats.n_evt_activeq);
+
+ spin_lock_irq(&hw->lock);
+ while (!list_empty(&hw->evt_active_q)) {
+ list_splice_tail_init(&hw->evt_active_q, &evt_q);
+ spin_unlock_irq(&hw->lock);
+
+ list_for_each_safe(evt_entry, next_entry, &evt_q) {
+ evt_msg = (struct csio_evt_msg *) evt_entry;
+
+ /* Drop events if queue is STOPPED */
+ spin_lock_irq(&hw->lock);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ evtq_stop = 1;
+ spin_unlock_irq(&hw->lock);
+ if (evtq_stop) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto free_evt;
+ }
+
+ switch (evt_msg->type) {
+ case CSIO_EVT_FW:
+ msg = (struct cpl_fw6_msg *)(evt_msg->data);
+
+ if ((msg->opcode == CPL_FW6_MSG ||
+ msg->opcode == CPL_FW4_MSG) &&
+ !msg->type) {
+ rv = csio_mb_fwevt_handler(hw,
+ msg->data);
+ if (!rv)
+ break;
+ /* Handle any remaining fw events */
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else if (msg->opcode == CPL_FW6_PLD) {
+
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else {
+ csio_warn(hw,
+ "Unhandled FW msg op %x type %x\n",
+ msg->opcode, msg->type);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+ break;
+
+ case CSIO_EVT_MBX:
+ csio_mberr_worker(hw);
+ break;
+
+ case CSIO_EVT_DEV_LOSS:
+ memcpy(&rn, evt_msg->data, sizeof(rn));
+ csio_rnode_devloss_handler(rn);
+ break;
+
+ default:
+ csio_warn(hw, "Unhandled event %x on evtq\n",
+ evt_msg->type);
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+free_evt:
+ csio_free_evt(hw, evt_msg);
+ }
+
+ spin_lock_irq(&hw->lock);
+ }
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+}
+
+int
+csio_fwevtq_handler(struct csio_hw *hw)
+{
+ int rv;
+
+ if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+
+ rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
+ csio_process_fwevtq_entry, NULL);
+ return rv;
+}
+
+/****************************************************************************
+ * Entry points
+ ****************************************************************************/
+
+/* Management module */
+/*
+ * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
+ * mgmt - mgmt module
+ * @io_req - io request
+ *
+ * Return - 0:if given IO Req exists in active Q.
+ * -EINVAL :if lookup fails.
+ */
+int
+csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
+{
+ struct list_head *tmp;
+
+ /* Lookup ioreq in the ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ if (io_req == (struct csio_ioreq *)tmp)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
+
+/*
+ * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
+ * @data - Event data.
+ *
+ * Return - none.
+ */
+static void
+csio_mgmt_tmo_handler(uintptr_t data)
+{
+ struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
+ struct list_head *tmp;
+ struct csio_ioreq *io_req;
+
+ csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
+
+ spin_lock_irq(&mgmtm->hw->lock);
+
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
+
+ if (!io_req->tmo) {
+ /* Dequeue the request from retry Q. */
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ } else {
+ CSIO_DB_ASSERT(0);
+ }
+ }
+ }
+
+ /* If retry queue is not empty, re-arm timer */
+ if (!list_empty(&mgmtm->active_q))
+ mod_timer(&mgmtm->mgmt_timer,
+ jiffies + msecs_to_jiffies(ECM_MIN_TMO));
+ spin_unlock_irq(&mgmtm->hw->lock);
+}
+
+static void
+csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
+{
+ struct csio_hw *hw = mgmtm->hw;
+ struct csio_ioreq *io_req;
+ struct list_head *tmp;
+ uint32_t count;
+
+ count = 30;
+ /* Wait for all outstanding req to complete gracefully */
+ while ((!list_empty(&mgmtm->active_q)) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* release outstanding req from ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ }
+ }
+}
+
+/*
+ * csio_mgmt_init - Mgmt module init entry point
+ * @mgmtsm - mgmt module
+ * @hw - HW module
+ *
+ * Initialize mgmt timer, resource wait queue, active queue,
+ * completion q. Allocate Egress and Ingress
+ * WR queues and save off the queue index returned by the WR
+ * module for future use. Allocate and save off mgmt reqs in the
+ * mgmt_req_freelist for future use. Make sure their SM is initialized
+ * to uninit state.
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
+{
+ struct timer_list *timer = &mgmtm->mgmt_timer;
+
+ init_timer(timer);
+ timer->function = csio_mgmt_tmo_handler;
+ timer->data = (unsigned long)mgmtm;
+
+ INIT_LIST_HEAD(&mgmtm->active_q);
+ INIT_LIST_HEAD(&mgmtm->cbfn_q);
+
+ mgmtm->hw = hw;
+ /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
+
+ return 0;
+}
+
+/*
+ * csio_mgmtm_exit - MGMT module exit entry point
+ * @mgmtsm - mgmt module
+ *
+ * This function called during MGMT module uninit.
+ * Stop timers, free ioreqs allocated.
+ * Returns: None
+ *
+ */
+static void
+csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
+{
+ del_timer_sync(&mgmtm->mgmt_timer);
+}
+
+
+/**
+ * csio_hw_start - Kicks off the HW State machine
+ * @hw: Pointer to HW module.
+ *
+ * It is assumed that the initialization is a synchronous operation.
+ * So when we return afer posting the event, the HW SM should be in
+ * the ready state, if there were no errors during init.
+ */
+int
+csio_hw_start(struct csio_hw *hw)
+{
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_CFG);
+ spin_unlock_irq(&hw->lock);
+
+ if (csio_is_hw_ready(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+int
+csio_hw_stop(struct csio_hw *hw)
+{
+ csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
+
+ if (csio_is_hw_removing(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/* Max reset retries */
+#define CSIO_MAX_RESET_RETRIES 3
+
+/**
+ * csio_hw_reset - Reset the hardware
+ * @hw: HW module.
+ *
+ * Caller should hold lock across this function.
+ */
+int
+csio_hw_reset(struct csio_hw *hw)
+{
+ if (!csio_is_hw_master(hw))
+ return -EPERM;
+
+ if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
+ csio_dbg(hw, "Max hw reset attempts reached..");
+ return -EINVAL;
+ }
+
+ hw->rst_retries++;
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
+
+ if (csio_is_hw_ready(hw)) {
+ hw->rst_retries = 0;
+ hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+/*
+ * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
+ * @hw: HW module.
+ */
+static void
+csio_hw_get_device_id(struct csio_hw *hw)
+{
+ /* Is the adapter device id cached already ?*/
+ if (csio_is_dev_id_cached(hw))
+ return;
+
+ /* Get the PCI vendor & device id */
+ pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
+ &hw->params.pci.vendor_id);
+ pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
+ &hw->params.pci.device_id);
+
+ csio_dev_id_cached(hw);
+
+} /* csio_hw_get_device_id */
+
+/*
+ * csio_hw_set_description - Set the model, description of the hw.
+ * @hw: HW module.
+ * @ven_id: PCI Vendor ID
+ * @dev_id: PCI Device ID
+ */
+static void
+csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
+{
+ uint32_t adap_type, prot_type;
+
+ if (ven_id == CSIO_VENDOR_ID) {
+ prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
+ adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
+
+ if (prot_type == CSIO_FPGA) {
+ memcpy(hw->model_desc,
+ csio_fcoe_adapters[13].description, 32);
+ } else if (prot_type == CSIO_T4_FCOE_ASIC) {
+ memcpy(hw->hw_ver,
+ csio_fcoe_adapters[adap_type].model_no, 16);
+ memcpy(hw->model_desc,
+ csio_fcoe_adapters[adap_type].description, 32);
+ } else {
+ char tempName[32] = "Chelsio FCoE Controller";
+ memcpy(hw->model_desc, tempName, 32);
+
+ CSIO_DB_ASSERT(0);
+ }
+ }
+} /* csio_hw_set_description */
+
+/**
+ * csio_hw_init - Initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ * Initialize the members of the HW module.
+ */
+int
+csio_hw_init(struct csio_hw *hw)
+{
+ int rv = -EINVAL;
+ uint32_t i;
+ uint16_t ven_id, dev_id;
+ struct csio_evt_msg *evt_entry;
+
+ INIT_LIST_HEAD(&hw->sm.sm_list);
+ csio_init_state(&hw->sm, csio_hws_uninit);
+ spin_lock_init(&hw->lock);
+ INIT_LIST_HEAD(&hw->sln_head);
+
+ /* Get the PCI vendor & device id */
+ csio_hw_get_device_id(hw);
+
+ strcpy(hw->name, CSIO_HW_NAME);
+
+ /* Set the model & its description */
+
+ ven_id = hw->params.pci.vendor_id;
+ dev_id = hw->params.pci.device_id;
+
+ csio_hw_set_description(hw, ven_id, dev_id);
+
+ /* Initialize default log level */
+ hw->params.log_level = (uint32_t) csio_dbg_level;
+
+ csio_set_fwevt_intr_idx(hw, -1);
+ csio_set_nondata_intr_idx(hw, -1);
+
+ /* Init all the modules: Mailbox, WorkRequest and Transport */
+ if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
+ goto err;
+
+ rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
+ if (rv)
+ goto err_mbm_exit;
+
+ rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
+ if (rv)
+ goto err_wrm_exit;
+
+ rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
+ if (rv)
+ goto err_scsim_exit;
+ /* Pre-allocate evtq and initialize them */
+ INIT_LIST_HEAD(&hw->evt_active_q);
+ INIT_LIST_HEAD(&hw->evt_free_q);
+ for (i = 0; i < csio_evtq_sz; i++) {
+
+ evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
+ if (!evt_entry) {
+ csio_err(hw, "Failed to initialize eventq");
+ goto err_evtq_cleanup;
+ }
+
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->dev_num = dev_num;
+ dev_num++;
+
+ return 0;
+
+err_evtq_cleanup:
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+err_scsim_exit:
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+err_wrm_exit:
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+err_mbm_exit:
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+err:
+ return rv;
+}
+
+/**
+ * csio_hw_exit - Un-initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ */
+void
+csio_hw_exit(struct csio_hw *hw)
+{
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+}
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
new file mode 100644
index 00000000000..9edcca4c71a
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -0,0 +1,665 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_HW_H__
+#define __CSIO_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/compiler.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_scsi.h"
+#include "csio_defs.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+
+/*
+ * An error value used by host. Should not clash with FW defined return values.
+ */
+#define FW_HOSTERROR 255
+
+#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
+#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
+
+#define FW_VERSION_MAJOR 1
+#define FW_VERSION_MINOR 2
+#define FW_VERSION_MICRO 8
+
+#define CSIO_HW_NAME "Chelsio FCoE Adapter"
+#define CSIO_MAX_PFN 8
+#define CSIO_MAX_PPORTS 4
+
+#define CSIO_MAX_LUN 0xFFFF
+#define CSIO_MAX_QUEUE 2048
+#define CSIO_MAX_CMD_PER_LUN 32
+#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
+#define CSIO_MAX_SECTOR_SIZE 128
+
+/* Interrupts */
+#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
+ * (Forward intr iq + fw iq) */
+#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */
+#define CSIO_MAX_SCSI_CPU 128
+#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS)
+#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS)
+
+/* Queues */
+enum {
+ CSIO_INTR_WRSIZE = 128,
+ CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE),
+ CSIO_FWEVT_WRSIZE = 128,
+ CSIO_FWEVT_IQLEN = 128,
+ CSIO_FWEVT_FLBUFS = 64,
+ CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN),
+ CSIO_HW_NIQ = 1,
+ CSIO_HW_NFLQ = 1,
+ CSIO_HW_NEQ = 1,
+ CSIO_HW_NINTXQ = 1,
+};
+
+struct csio_msix_entries {
+ unsigned short vector; /* Vector assigned by pci_enable_msix */
+ void *dev_id; /* Priv object associated w/ this msix*/
+ char desc[24]; /* Description of this vector */
+};
+
+struct csio_scsi_qset {
+ int iq_idx; /* Ingress index */
+ int eq_idx; /* Egress index */
+ uint32_t intr_idx; /* MSIX Vector index */
+};
+
+struct csio_scsi_cpu_info {
+ int16_t max_cpus;
+};
+
+extern int csio_dbg_level;
+extern int csio_force_master;
+extern unsigned int csio_port_mask;
+extern int csio_msi;
+
+#define CSIO_VENDOR_ID 0x1425
+#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
+#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
+#define CSIO_FPGA 0xA000
+#define CSIO_T4_FCOE_ASIC 0x4600
+
+#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
+ EDC1 | LE | TP | MA | PM_TX | PM_RX | \
+ ULP_RX | CPL_SWITCH | SGE | \
+ ULP_TX | SF)
+
+/*
+ * Hard parameters used to initialize the card in the absence of a
+ * configuration file.
+ */
+enum {
+ /* General */
+ CSIO_SGE_DBFIFO_INT_THRESH = 10,
+
+ CSIO_SGE_RX_DMA_OFFSET = 2,
+
+ CSIO_SGE_FLBUF_SIZE1 = 65536,
+ CSIO_SGE_FLBUF_SIZE2 = 1536,
+ CSIO_SGE_FLBUF_SIZE3 = 9024,
+ CSIO_SGE_FLBUF_SIZE4 = 9216,
+ CSIO_SGE_FLBUF_SIZE5 = 2048,
+ CSIO_SGE_FLBUF_SIZE6 = 128,
+ CSIO_SGE_FLBUF_SIZE7 = 8192,
+ CSIO_SGE_FLBUF_SIZE8 = 16384,
+
+ CSIO_SGE_TIMER_VAL_0 = 5,
+ CSIO_SGE_TIMER_VAL_1 = 10,
+ CSIO_SGE_TIMER_VAL_2 = 20,
+ CSIO_SGE_TIMER_VAL_3 = 50,
+ CSIO_SGE_TIMER_VAL_4 = 100,
+ CSIO_SGE_TIMER_VAL_5 = 200,
+
+ CSIO_SGE_INT_CNT_VAL_0 = 1,
+ CSIO_SGE_INT_CNT_VAL_1 = 4,
+ CSIO_SGE_INT_CNT_VAL_2 = 8,
+ CSIO_SGE_INT_CNT_VAL_3 = 16,
+
+ /* Storage specific - used by FW_PFVF_CMD */
+ CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */
+ CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */
+ CSIO_NVI = 4,
+ CSIO_NIQ_FLINT = 34,
+ CSIO_NETH_CTRL = 32,
+ CSIO_NEQ = 66,
+ CSIO_NEXACTF = 32,
+ CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK,
+ CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK,
+};
+
+/* Slowpath events */
+enum csio_evt {
+ CSIO_EVT_FW = 0, /* FW event */
+ CSIO_EVT_MBX, /* MBX event */
+ CSIO_EVT_SCN, /* State change notification */
+ CSIO_EVT_DEV_LOSS, /* Device loss event */
+ CSIO_EVT_MAX, /* Max supported event */
+};
+
+#define CSIO_EVT_MSG_SIZE 512
+#define CSIO_EVTQ_SIZE 512
+
+/* Event msg */
+struct csio_evt_msg {
+ struct list_head list; /* evt queue*/
+ enum csio_evt type;
+ uint8_t data[CSIO_EVT_MSG_SIZE];
+};
+
+enum {
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ SERNUM_LEN = 16, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+ TRACE_LEN = 112, /* length of trace data and mask */
+};
+
+enum {
+ SF_PAGE_SIZE = 256, /* serial flash page size */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
+};
+
+enum { MEM_EDC0, MEM_EDC1, MEM_MC };
+
+enum {
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
+ MEMWIN1_APERTURE = 32768,
+ MEMWIN1_BASE = 0x28000,
+ MEMWIN2_APERTURE = 65536,
+ MEMWIN2_BASE = 0x30000,
+};
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_START_SEC = 8, /* first flash sector for FW */
+ FW_END_SEC = 15, /* last flash sector for FW */
+ FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
+ FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+
+ FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
+ FLASH_CFG_OFFSET = 0x1f0000,
+ FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
+ FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
+ * at 1MB - 64KB */
+ FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
+};
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start) ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+
+enum {
+ /*
+ * Location of firmware image in FLASH.
+ */
+ FLASH_FW_START_SEC = 8,
+ FLASH_FW_NSECS = 8,
+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+/* Management module */
+enum {
+ CSIO_MGMT_EQ_WRSIZE = 512,
+ CSIO_MGMT_IQ_WRSIZE = 128,
+ CSIO_MGMT_EQLEN = 64,
+ CSIO_MGMT_IQLEN = 64,
+};
+
+#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE)
+#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE)
+
+/* mgmt module stats */
+struct csio_mgmtm_stats {
+ uint32_t n_abort_req; /* Total abort request */
+ uint32_t n_abort_rsp; /* Total abort response */
+ uint32_t n_close_req; /* Total close request */
+ uint32_t n_close_rsp; /* Total close response */
+ uint32_t n_err; /* Total Errors */
+ uint32_t n_drop; /* Total request dropped */
+ uint32_t n_active; /* Count of active_q */
+ uint32_t n_cbfn; /* Count of cbfn_q */
+};
+
+/* MGMT module */
+struct csio_mgmtm {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ int eq_idx; /* Egress queue index */
+ int iq_idx; /* Ingress queue index */
+ int msi_vec; /* MSI vector */
+ struct list_head active_q; /* Outstanding ELS/CT */
+ struct list_head abort_q; /* Outstanding abort req */
+ struct list_head cbfn_q; /* Completion queue */
+ struct list_head mgmt_req_freelist; /* Free poll of reqs */
+ /* ELSCT request freelist*/
+ struct timer_list mgmt_timer; /* MGMT timer */
+ struct csio_mgmtm_stats stats; /* ELS/CT stats */
+};
+
+struct csio_adap_desc {
+ char model_no[16];
+ char description[32];
+};
+
+struct pci_params {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint32_t vpd_cap_addr;
+ uint16_t speed;
+ uint8_t width;
+};
+
+/* User configurable hw parameters */
+struct csio_hw_params {
+ uint32_t sf_size; /* serial flash
+ * size in bytes
+ */
+ uint32_t sf_nsec; /* # of flash sectors */
+ struct pci_params pci;
+ uint32_t log_level; /* Module-level for
+ * debug log.
+ */
+};
+
+struct csio_vpd {
+ uint32_t cclk;
+ uint8_t ec[EC_LEN + 1];
+ uint8_t sn[SERNUM_LEN + 1];
+ uint8_t id[ID_LEN + 1];
+};
+
+struct csio_pport {
+ uint16_t pcap;
+ uint8_t portid;
+ uint8_t link_status;
+ uint16_t link_speed;
+ uint8_t mac[6];
+ uint8_t mod_type;
+ uint8_t rsvd1;
+ uint8_t rsvd2;
+ uint8_t rsvd3;
+};
+
+/* fcoe resource information */
+struct csio_fcoe_res_info {
+ uint16_t e_d_tov;
+ uint16_t r_a_tov_seq;
+ uint16_t r_a_tov_els;
+ uint16_t r_r_tov;
+ uint32_t max_xchgs;
+ uint32_t max_ssns;
+ uint32_t used_xchgs;
+ uint32_t used_ssns;
+ uint32_t max_fcfs;
+ uint32_t max_vnps;
+ uint32_t used_fcfs;
+ uint32_t used_vnps;
+};
+
+/* HW State machine Events */
+enum csio_hw_ev {
+ CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */
+ CSIO_HWE_INIT, /* Config done, start Init */
+ CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */
+ CSIO_HWE_FATAL, /* Fatal error during initialization */
+ CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */
+ CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */
+ CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */
+ CSIO_HWE_QUIESCED, /* HBA quiesced */
+ CSIO_HWE_HBA_RESET, /* HBA reset requested */
+ CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */
+ CSIO_HWE_FW_DLOAD, /* FW download requested */
+ CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */
+ CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */
+ CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */
+ CSIO_HWE_MAX, /* Max HW event */
+};
+
+/* hw stats */
+struct csio_hw_stats {
+ uint32_t n_evt_activeq; /* Number of event in active Q */
+ uint32_t n_evt_freeq; /* Number of event in free Q */
+ uint32_t n_evt_drop; /* Number of event droped */
+ uint32_t n_evt_unexp; /* Number of unexpected events */
+ uint32_t n_pcich_offline;/* Number of pci channel offline */
+ uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */
+ uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/
+ uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/
+ uint32_t n_cpl_unexp; /* Number of unexpected cpl */
+ uint32_t n_mbint_unexp; /* Number of unexpected mbox */
+ /* interrupt */
+ uint32_t n_plint_unexp; /* Number of unexpected PL */
+ /* interrupt */
+ uint32_t n_plint_cnt; /* Number of PL interrupt */
+ uint32_t n_int_stray; /* Number of stray interrupt */
+ uint32_t n_err; /* Number of hw errors */
+ uint32_t n_err_fatal; /* Number of fatal errors */
+ uint32_t n_err_nomem; /* Number of memory alloc failure */
+ uint32_t n_err_io; /* Number of IO failure */
+ enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */
+ uint64_t n_reset_start; /* Start time after the reset */
+ uint32_t rsvd1;
+};
+
+/* Defines for hw->flags */
+#define CSIO_HWF_MASTER 0x00000001 /* This is the Master
+ * function for the
+ * card.
+ */
+#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt
+ * enable bit set?
+ */
+#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */
+#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been
+ * allocated memory.
+ */
+#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been
+ * allocated in FW.
+ */
+#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */
+#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device
+ * id cached */
+#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing
+ * FW events
+ */
+#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config
+ * params
+ */
+#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
+ * enabled?
+ */
+
+#define csio_is_hw_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
+#define csio_is_host_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED)
+#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER)
+#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID)
+#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED)
+#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID)
+#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED)
+
+/* Defines for intr_mode */
+enum csio_intr_mode {
+ CSIO_IM_NONE = 0,
+ CSIO_IM_INTX = 1,
+ CSIO_IM_MSI = 2,
+ CSIO_IM_MSIX = 3,
+};
+
+/* Master HW structure: One per function */
+struct csio_hw {
+ struct csio_sm sm; /* State machine: should
+ * be the 1st member.
+ */
+ spinlock_t lock; /* Lock for hw */
+
+ struct csio_scsim scsim; /* SCSI module*/
+ struct csio_wrm wrm; /* Work request module*/
+ struct pci_dev *pdev; /* PCI device */
+
+ void __iomem *regstart; /* Virtual address of
+ * register map
+ */
+ /* SCSI queue sets */
+ uint32_t num_sqsets; /* Number of SCSI
+ * queue sets */
+ uint32_t num_scsi_msix_cpus; /* Number of CPUs that
+ * will be used
+ * for ingress
+ * processing.
+ */
+
+ struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU];
+ struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS];
+
+ uint32_t evtflag; /* Event flag */
+ uint32_t flags; /* HW flags */
+
+ struct csio_mgmtm mgmtm; /* management module */
+ struct csio_mbm mbm; /* Mailbox module */
+
+ /* Lnodes */
+ uint32_t num_lns; /* Number of lnodes */
+ struct csio_lnode *rln; /* Root lnode */
+ struct list_head sln_head; /* Sibling node list
+ * list
+ */
+ int intr_iq_idx; /* Forward interrupt
+ * queue.
+ */
+ int fwevt_iq_idx; /* FW evt queue */
+ struct work_struct evtq_work; /* Worker thread for
+ * HW events.
+ */
+ struct list_head evt_free_q; /* freelist of evt
+ * elements
+ */
+ struct list_head evt_active_q; /* active evt queue*/
+
+ /* board related info */
+ char name[32];
+ char hw_ver[16];
+ char model_desc[32];
+ char drv_version[32];
+ char fwrev_str[32];
+ uint32_t optrom_ver;
+ uint32_t fwrev;
+ uint32_t tp_vers;
+ char chip_ver;
+ uint32_t cfg_finiver;
+ uint32_t cfg_finicsum;
+ uint32_t cfg_cfcsum;
+ uint8_t cfg_csum_status;
+ uint8_t cfg_store;
+ enum csio_dev_state fw_state;
+ struct csio_vpd vpd;
+
+ uint8_t pfn; /* Physical Function
+ * number
+ */
+ uint32_t port_vec; /* Port vector */
+ uint8_t num_pports; /* Number of physical
+ * ports.
+ */
+ uint8_t rst_retries; /* Reset retries */
+ uint8_t cur_evt; /* current s/m evt */
+ uint8_t prev_evt; /* Previous s/m evt */
+ uint32_t dev_num; /* device number */
+ struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */
+ struct csio_hw_params params; /* Hw parameters */
+
+ struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */
+ mempool_t *mb_mempool; /* Mailbox memory pool*/
+ mempool_t *rnode_mempool; /* rnode memory pool */
+
+ /* Interrupt */
+ enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */
+ uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt
+ * index
+ */
+ uint32_t nondata_intr_idx; /* nondata MSIX/intr
+ * idx
+ */
+
+ uint8_t cfg_neq; /* FW configured no of
+ * egress queues
+ */
+ uint8_t cfg_niq; /* FW configured no of
+ * iq queues.
+ */
+
+ struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
+
+ /* MSIX vectors */
+ struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
+
+ struct dentry *debugfs_root; /* Debug FS */
+ struct csio_hw_stats stats; /* Hw statistics */
+};
+
+/* Register access macros */
+#define csio_reg(_b, _r) ((_b) + (_r))
+
+#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r)))
+
+#define csio_wr_reg8(_h, _v, _r) writeb((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg16(_h, _v, _r) writew((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg32(_h, _v, _r) writel((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg64(_h, _v, _r) writeq((_v), \
+ csio_reg((_h)->regstart, (_r)))
+
+void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t);
+
+/* Core clocks <==> uSecs */
+static inline uint32_t
+csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk;
+}
+
+static inline uint32_t
+csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
+{
+ return (us * hw->vpd.cclk) / 1000;
+}
+
+/* Easy access macros */
+#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm))
+#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm))
+#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim))
+#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm))
+
+#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number)
+#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn))
+#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn))
+
+#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i))
+#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx)
+#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i))
+#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx)
+
+/* Printing/logging */
+#define CSIO_DEVID(__dev) ((__dev)->dev_num)
+#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF)
+#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF)
+
+#define csio_info(__hw, __fmt, ...) \
+ dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_fatal(__hw, __fmt, ...) \
+ dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_err(__hw, __fmt, ...) \
+ dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_warn(__hw, __fmt, ...) \
+ dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#ifdef __CSIO_DEBUG__
+#define csio_dbg(__hw, __fmt, ...) \
+ csio_info((__hw), __fmt, ##__VA_ARGS__);
+#else
+#define csio_dbg(__hw, __fmt, ...)
+#endif
+
+int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
+void csio_hw_intr_disable(struct csio_hw *);
+int csio_hw_slow_intr_handler(struct csio_hw *hw);
+int csio_hw_start(struct csio_hw *);
+int csio_hw_stop(struct csio_hw *);
+int csio_hw_reset(struct csio_hw *);
+int csio_is_hw_ready(struct csio_hw *);
+int csio_is_hw_removing(struct csio_hw *);
+
+int csio_fwevtq_handler(struct csio_hw *);
+void csio_evtq_worker(struct work_struct *);
+int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,
+ void *evt_msg, uint16_t len);
+void csio_evtq_flush(struct csio_hw *hw);
+
+int csio_request_irqs(struct csio_hw *);
+void csio_intr_enable(struct csio_hw *);
+void csio_intr_disable(struct csio_hw *, bool);
+
+struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
+int csio_config_queues(struct csio_hw *);
+
+int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *);
+int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *);
+int csio_hw_init(struct csio_hw *);
+void csio_hw_exit(struct csio_hw *);
+#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
new file mode 100644
index 00000000000..fdd408ff80a
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -0,0 +1,1274 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "csio_init.h"
+#include "csio_defs.h"
+
+#define CSIO_MIN_MEMPOOL_SZ 64
+
+static struct dentry *csio_debugfs_root;
+
+static struct scsi_transport_template *csio_fcoe_transport;
+static struct scsi_transport_template *csio_fcoe_transport_vport;
+
+/*
+ * debugfs support
+ */
+static int
+csio_mem_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file->f_path.dentry->d_inode->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct csio_hw *hw = file->private_data - mem;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ __be32 data[16];
+
+ if (mem == MEM_MC)
+ ret = csio_hw_mc_read(hw, pos, data, NULL);
+ else
+ ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
+ if (ret)
+ return ret;
+
+ ofst = pos % sizeof(data);
+ len = min(count, sizeof(data) - ofst);
+ if (copy_to_user(buf, (u8 *)data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations csio_mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = csio_mem_open,
+ .read = csio_mem_read,
+ .llseek = default_llseek,
+};
+
+static void __devinit
+csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
+ (void *)hw + idx, &csio_mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+static int __devinit
+csio_setup_debugfs(struct csio_hw *hw)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(hw->debugfs_root))
+ return -1;
+
+ i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+ if (i & EDRAM0_ENABLE)
+ csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
+ if (i & EDRAM1_ENABLE)
+ csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
+ if (i & EXT_MEM_ENABLE)
+ csio_add_debugfs_mem(hw, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
+ return 0;
+}
+
+/*
+ * csio_dfs_create - Creates and sets up per-hw debugfs.
+ *
+ */
+static int
+csio_dfs_create(struct csio_hw *hw)
+{
+ if (csio_debugfs_root) {
+ hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
+ csio_debugfs_root);
+ csio_setup_debugfs(hw);
+ }
+
+ return 0;
+}
+
+/*
+ * csio_dfs_destroy - Destroys per-hw debugfs.
+ */
+static int
+csio_dfs_destroy(struct csio_hw *hw)
+{
+ if (hw->debugfs_root)
+ debugfs_remove_recursive(hw->debugfs_root);
+
+ return 0;
+}
+
+/*
+ * csio_dfs_init - Debug filesystem initialization for the module.
+ *
+ */
+static int
+csio_dfs_init(void)
+{
+ csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!csio_debugfs_root)
+ pr_warn("Could not create debugfs entry, continuing\n");
+
+ return 0;
+}
+
+/*
+ * csio_dfs_exit - debugfs cleanup for the module.
+ */
+static void
+csio_dfs_exit(void)
+{
+ debugfs_remove(csio_debugfs_root);
+}
+
+/*
+ * csio_pci_init - PCI initialization.
+ * @pdev: PCI device.
+ * @bars: Bitmask of bars to be requested.
+ *
+ * Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ */
+static int
+csio_pci_init(struct pci_dev *pdev, int *bars)
+{
+ int rv = -ENODEV;
+
+ *bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_enable_device_mem(pdev))
+ goto err;
+
+ if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
+ goto err_disable_device;
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "No suitable DMA available.\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_selected_regions(pdev, *bars);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return rv;
+
+}
+
+/*
+ * csio_pci_exit - PCI unitialization.
+ * @pdev: PCI device.
+ * @bars: Bars to be released.
+ *
+ */
+static void
+csio_pci_exit(struct pci_dev *pdev, int *bars)
+{
+ pci_release_selected_regions(pdev, *bars);
+ pci_disable_device(pdev);
+}
+
+/*
+ * csio_hw_init_workers - Initialize the HW module's worker threads.
+ * @hw: HW module.
+ *
+ */
+static void
+csio_hw_init_workers(struct csio_hw *hw)
+{
+ INIT_WORK(&hw->evtq_work, csio_evtq_worker);
+}
+
+static void
+csio_hw_exit_workers(struct csio_hw *hw)
+{
+ cancel_work_sync(&hw->evtq_work);
+ flush_scheduled_work();
+}
+
+static int
+csio_create_queues(struct csio_hw *hw)
+{
+ int i, j;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
+ return 0;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
+ 0, hw->pport[0].portid, false, NULL);
+ if (rv != 0) {
+ csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
+ return rv;
+ }
+ }
+
+ /* FW event queue */
+ rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
+ csio_get_fwevt_intr_idx(hw),
+ hw->pport[0].portid, true, NULL);
+ if (rv != 0) {
+ csio_err(hw, "FW event IQ config failed!: %d\n", rv);
+ return rv;
+ }
+
+ /* Create mgmt queue */
+ rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
+ mgmtm->iq_idx, hw->pport[0].portid, NULL);
+
+ if (rv != 0) {
+ csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
+ goto err;
+ }
+
+ /* Create SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < info->max_cpus; j++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+
+ rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
+ sqset->intr_idx, i, false, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module IQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
+ sqset->iq_idx, i, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module EQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
+ return 0;
+err:
+ csio_wr_destroy_queues(hw, true);
+ return -EINVAL;
+}
+
+/*
+ * csio_config_queues - Configure the DMA queues.
+ * @hw: HW module.
+ *
+ * Allocates memory for queues are registers them with FW.
+ */
+int
+csio_config_queues(struct csio_hw *hw)
+{
+ int i, j, idx, k = 0;
+ int rv;
+ struct csio_scsi_qset *sqset;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_scsi_qset *orig;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
+ return csio_create_queues(hw);
+
+ /* Calculate number of SCSI queues for MSIX we would like */
+ hw->num_scsi_msix_cpus = num_online_cpus();
+ hw->num_sqsets = num_online_cpus() * hw->num_pports;
+
+ if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
+ hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
+ hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
+ }
+
+ /* Initialize max_cpus, may get reduced during msix allocations */
+ for (i = 0; i < hw->num_pports; i++)
+ hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
+
+ csio_dbg(hw, "nsqsets:%d scpus:%d\n",
+ hw->num_sqsets, hw->num_scsi_msix_cpus);
+
+ csio_intr_enable(hw);
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+
+ /* Allocate Forward interrupt iq. */
+ hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
+ CSIO_INTR_WRSIZE, CSIO_INGRESS,
+ (void *)hw, 0, 0, NULL);
+ if (hw->intr_iq_idx == -1) {
+ csio_err(hw,
+ "Forward interrupt queue creation failed\n");
+ goto intr_disable;
+ }
+ }
+
+ /* Allocate the FW evt queue */
+ hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
+ CSIO_FWEVT_WRSIZE,
+ CSIO_INGRESS, (void *)hw,
+ CSIO_FWEVT_FLBUFS, 0,
+ csio_fwevt_intx_handler);
+ if (hw->fwevt_iq_idx == -1) {
+ csio_err(hw, "FW evt queue creation failed\n");
+ goto intr_disable;
+ }
+
+ /* Allocate the mgmt queue */
+ mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
+ CSIO_MGMT_EQ_WRSIZE,
+ CSIO_EGRESS, (void *)hw, 0, 0, NULL);
+ if (mgmtm->eq_idx == -1) {
+ csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
+ goto intr_disable;
+ }
+
+ /* Use FW IQ for MGMT req completion */
+ mgmtm->iq_idx = hw->fwevt_iq_idx;
+
+ /* Allocate SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ sqset = &hw->sqset[i][j];
+
+ if (j >= info->max_cpus) {
+ k = j % info->max_cpus;
+ orig = &hw->sqset[i][k];
+ sqset->eq_idx = orig->eq_idx;
+ sqset->iq_idx = orig->iq_idx;
+ continue;
+ }
+
+ idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
+ CSIO_EGRESS, (void *)hw, 0, 0,
+ NULL);
+ if (idx == -1) {
+ csio_err(hw, "EQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+
+ sqset->eq_idx = idx;
+
+ idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
+ CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
+ (void *)hw, 0, 0,
+ csio_scsi_intx_handler);
+ if (idx == -1) {
+ csio_err(hw, "IQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+ sqset->iq_idx = idx;
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
+
+ rv = csio_create_queues(hw);
+ if (rv != 0)
+ goto intr_disable;
+
+ /*
+ * Now request IRQs for the vectors. In the event of a failure,
+ * cleanup is handled internally by this function.
+ */
+ rv = csio_request_irqs(hw);
+ if (rv != 0)
+ return -EINVAL;
+
+ return 0;
+
+intr_disable:
+ csio_intr_disable(hw, false);
+
+ return -EINVAL;
+}
+
+static int
+csio_resource_alloc(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv = -ENOMEM;
+
+ wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
+ CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
+
+ hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_mb));
+ if (!hw->mb_mempool)
+ goto err;
+
+ hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_rnode));
+ if (!hw->rnode_mempool)
+ goto err_free_mb_mempool;
+
+ hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
+ CSIO_SCSI_RSP_LEN, 8, 0);
+ if (!hw->scsi_pci_pool)
+ goto err_free_rn_pool;
+
+ return 0;
+
+err_free_rn_pool:
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+err_free_mb_mempool:
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+err:
+ return rv;
+}
+
+static void
+csio_resource_free(struct csio_hw *hw)
+{
+ pci_pool_destroy(hw->scsi_pci_pool);
+ hw->scsi_pci_pool = NULL;
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+}
+
+/*
+ * csio_hw_alloc - Allocate and initialize the HW module.
+ * @pdev: PCI device.
+ *
+ * Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ */
+static struct csio_hw * __devinit
+csio_hw_alloc(struct pci_dev *pdev)
+{
+ struct csio_hw *hw;
+
+ hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
+ if (!hw)
+ goto err;
+
+ hw->pdev = pdev;
+ strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
+
+ /* memory pool/DMA pool allocation */
+ if (csio_resource_alloc(hw))
+ goto err_free_hw;
+
+ /* Get the start address of registers from BAR 0 */
+ hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->regstart) {
+ csio_err(hw, "Could not map BAR 0, regstart = %p\n",
+ hw->regstart);
+ goto err_resource_free;
+ }
+
+ csio_hw_init_workers(hw);
+
+ if (csio_hw_init(hw))
+ goto err_unmap_bar;
+
+ csio_dfs_create(hw);
+
+ csio_dbg(hw, "hw:%p\n", hw);
+
+ return hw;
+
+err_unmap_bar:
+ csio_hw_exit_workers(hw);
+ iounmap(hw->regstart);
+err_resource_free:
+ csio_resource_free(hw);
+err_free_hw:
+ kfree(hw);
+err:
+ return NULL;
+}
+
+/*
+ * csio_hw_free - Uninitialize and free the HW module.
+ * @hw: The HW module
+ *
+ * Disable interrupts, uninit the HW module, free resources, free hw.
+ */
+static void
+csio_hw_free(struct csio_hw *hw)
+{
+ csio_intr_disable(hw, true);
+ csio_hw_exit_workers(hw);
+ csio_hw_exit(hw);
+ iounmap(hw->regstart);
+ csio_dfs_destroy(hw);
+ csio_resource_free(hw);
+ kfree(hw);
+}
+
+/**
+ * csio_shost_init - Create and initialize the lnode module.
+ * @hw: The HW module.
+ * @dev: The device associated with this invocation.
+ * @probe: Called from probe context or not?
+ * @os_pln: Parent lnode if any.
+ *
+ * Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initializes lnode module and registers with SCSI ML
+ * via scsi_host_add. This function is shared between physical and
+ * virtual node ports.
+ */
+struct csio_lnode *
+csio_shost_init(struct csio_hw *hw, struct device *dev,
+ bool probe, struct csio_lnode *pln)
+{
+ struct Scsi_Host *shost = NULL;
+ struct csio_lnode *ln;
+
+ csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
+ csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
+
+ /*
+ * hw->pdev is the physical port's PCI dev structure,
+ * which will be different from the NPIV dev structure.
+ */
+ if (dev == &hw->pdev->dev)
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_template,
+ sizeof(struct csio_lnode));
+ else
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_vport_template,
+ sizeof(struct csio_lnode));
+
+ if (!shost)
+ goto err;
+
+ ln = shost_priv(shost);
+ memset(ln, 0, sizeof(struct csio_lnode));
+
+ /* Link common lnode to this lnode */
+ ln->dev_num = (shost->host_no << 16);
+
+ shost->can_queue = CSIO_MAX_QUEUE;
+ shost->this_id = -1;
+ shost->unique_id = shost->host_no;
+ shost->max_cmd_len = 16; /* Max CDB length supported */
+ shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
+ hw->fres_info.max_ssns);
+ shost->max_lun = CSIO_MAX_LUN;
+ if (dev == &hw->pdev->dev)
+ shost->transportt = csio_fcoe_transport;
+ else
+ shost->transportt = csio_fcoe_transport_vport;
+
+ /* root lnode */
+ if (!hw->rln)
+ hw->rln = ln;
+
+ /* Other initialization here: Common, Transport specific */
+ if (csio_lnode_init(ln, hw, pln))
+ goto err_shost_put;
+
+ if (scsi_add_host(shost, dev))
+ goto err_lnode_exit;
+
+ return ln;
+
+err_lnode_exit:
+ csio_lnode_exit(ln);
+err_shost_put:
+ scsi_host_put(shost);
+err:
+ return NULL;
+}
+
+/**
+ * csio_shost_exit - De-instantiate the shost.
+ * @ln: The lnode module corresponding to the shost.
+ *
+ */
+void
+csio_shost_exit(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* Inform transport */
+ fc_remove_host(shost);
+
+ /* Inform SCSI ML */
+ scsi_remove_host(shost);
+
+ /* Flush all the events, so that any rnode removal events
+ * already queued are all handled, before we remove the lnode.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_evtq_flush(hw);
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_exit(ln);
+ scsi_host_put(shost);
+}
+
+struct csio_lnode *
+csio_lnode_alloc(struct csio_hw *hw)
+{
+ return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
+}
+
+void
+csio_lnodes_block_request(struct csio_hw *hw)
+{
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_request(struct csio_hw *hw)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_exit(struct csio_hw *hw, bool npiv)
+{
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
+ return;
+ }
+
+ /* Get all child lnodes(NPIV ports) */
+ spin_lock_irq(&hw->lock);
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete NPIV lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ fc_vport_terminate(ln->fc_vport);
+ }
+
+ /* Delete only npiv lnodes */
+ if (npiv)
+ goto free_lnodes;
+
+ cur_cnt = 0;
+ /* Get all physical lnodes */
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete physical lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
+ csio_shost_exit(lnode_list[ii]);
+ }
+
+free_lnodes:
+ kfree(lnode_list);
+}
+
+/*
+ * csio_lnode_init_post: Set lnode attributes after starting HW.
+ * @ln: lnode.
+ *
+ */
+static void
+csio_lnode_init_post(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ csio_fchost_attr_init(ln);
+
+ scsi_scan_host(shost);
+}
+
+/*
+ * csio_probe_one - Instantiate this function.
+ * @pdev: PCI device
+ * @id: Device ID
+ *
+ * This is the .probe() callback of the driver. This function:
+ * - Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ * - Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ * - Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initialized lnode module and registers with SCSI ML
+ * via scsi_host_add.
+ * - Enables interrupts, and starts the chip by kicking off the
+ * HW state machine.
+ * - Once hardware is ready, initiated scan of the host via
+ * scsi_scan_host.
+ */
+static int __devinit
+csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int rv;
+ int bars;
+ int i;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ rv = csio_pci_init(pdev, &bars);
+ if (rv)
+ goto err;
+
+ hw = csio_hw_alloc(pdev);
+ if (!hw) {
+ rv = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ pci_set_drvdata(pdev, hw);
+
+ if (csio_hw_start(hw) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to start FW, continuing in debug mode.\n");
+ return 0;
+ }
+
+ sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_lnode_exit;
+
+ return 0;
+
+err_lnode_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ pci_set_drvdata(hw->pdev, NULL);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+err_pci_exit:
+ csio_pci_exit(pdev, &bars);
+err:
+ dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
+ return rv;
+}
+
+/*
+ * csio_remove_one - Remove one instance of the driver at this PCI function.
+ * @pdev: PCI device
+ *
+ * Used during hotplug operation.
+ */
+static void __devexit
+csio_remove_one(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Stops lnode, Rnode s/m
+ * Quiesce IOs.
+ * All sessions with remote ports are unregistered.
+ */
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ pci_set_drvdata(pdev, NULL);
+ csio_pci_exit(pdev, &bars);
+}
+
+/*
+ * csio_pci_error_detected - PCI error was detected
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Post PCI error detected evt to HW s/m
+ * HW s/m handles this evt by quiescing IOs, unregisters rports
+ * and finally takes the device to offline.
+ */
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_intr_disable(hw, true);
+ pci_disable_device(pdev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * csio_pci_slot_reset - PCI slot has been reset.
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int ready;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* Bring HW s/m to ready state.
+ * but don't resume IOs.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
+ ready = csio_is_hw_ready(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (ready) {
+ return PCI_ERS_RESULT_RECOVERED;
+ } else {
+ dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+/*
+ * csio_pci_resume - Resume normal operations
+ * @pdev: PCI device
+ *
+ */
+static void
+csio_pci_resume(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ struct csio_lnode *ln;
+ int rv = 0;
+ int i;
+
+ /* Bring the LINK UP and Resume IO */
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_resume_exit;
+
+ return;
+
+err_resume_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
+}
+
+static struct pci_error_handlers csio_err_handler = {
+ .error_detected = csio_pci_error_detected,
+ .slot_reset = csio_pci_slot_reset,
+ .resume = csio_pci_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
+ CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
+ CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
+ { 0, 0, 0, 0, 0, 0, 0 }
+};
+
+
+static struct pci_driver csio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ .id_table = csio_pci_tbl,
+ .probe = csio_probe_one,
+ .remove = csio_remove_one,
+ .err_handler = &csio_err_handler,
+};
+
+/*
+ * csio_init - Chelsio storage driver initialization function.
+ *
+ */
+static int __init
+csio_init(void)
+{
+ int rv = -ENOMEM;
+
+ pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
+
+ csio_dfs_init();
+
+ csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
+ if (!csio_fcoe_transport)
+ goto err;
+
+ csio_fcoe_transport_vport =
+ fc_attach_transport(&csio_fc_transport_vport_funcs);
+ if (!csio_fcoe_transport_vport)
+ goto err_vport;
+
+ rv = pci_register_driver(&csio_pci_driver);
+ if (rv)
+ goto err_pci;
+
+ return 0;
+
+err_pci:
+ fc_release_transport(csio_fcoe_transport_vport);
+err_vport:
+ fc_release_transport(csio_fcoe_transport);
+err:
+ csio_dfs_exit();
+ return rv;
+}
+
+/*
+ * csio_exit - Chelsio storage driver uninitialization .
+ *
+ * Function that gets called in the unload path.
+ */
+static void __exit
+csio_exit(void)
+{
+ pci_unregister_driver(&csio_pci_driver);
+ csio_dfs_exit();
+ fc_release_transport(csio_fcoe_transport_vport);
+ fc_release_transport(csio_fcoe_transport);
+}
+
+module_init(csio_init);
+module_exit(csio_exit);
+MODULE_AUTHOR(CSIO_DRV_AUTHOR);
+MODULE_DESCRIPTION(CSIO_DRV_DESC);
+MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
+MODULE_VERSION(CSIO_DRV_VERSION);
+MODULE_FIRMWARE(CSIO_FW_FNAME);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
new file mode 100644
index 00000000000..0838fd7ec9c
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -0,0 +1,158 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_INIT_H__
+#define __CSIO_INIT_H__
+
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_scsi.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_hw.h"
+
+#define CSIO_DRV_AUTHOR "Chelsio Communications"
+#define CSIO_DRV_LICENSE "Dual BSD/GPL"
+#define CSIO_DRV_DESC "Chelsio FCoE driver"
+#define CSIO_DRV_VERSION "1.0.0"
+
+#define CSIO_DEVICE(devid, idx) \
+{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
+
+#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
+ ((_dev) == CSIO_DEVID_PE10K_PF1))
+
+/* FCoE device IDs */
+#define CSIO_DEVID_PE10K 0xA000
+#define CSIO_DEVID_PE10K_PF1 0xA001
+#define CSIO_DEVID_T440DBG_FCOE 0x4600
+#define CSIO_DEVID_T420CR_FCOE 0x4601
+#define CSIO_DEVID_T422CR_FCOE 0x4602
+#define CSIO_DEVID_T440CR_FCOE 0x4603
+#define CSIO_DEVID_T420BCH_FCOE 0x4604
+#define CSIO_DEVID_T440BCH_FCOE 0x4605
+#define CSIO_DEVID_T440CH_FCOE 0x4606
+#define CSIO_DEVID_T420SO_FCOE 0x4607
+#define CSIO_DEVID_T420CX_FCOE 0x4608
+#define CSIO_DEVID_T420BT_FCOE 0x4609
+#define CSIO_DEVID_T404BT_FCOE 0x460A
+#define CSIO_DEVID_B420_FCOE 0x460B
+#define CSIO_DEVID_B404_FCOE 0x460C
+#define CSIO_DEVID_T480CR_FCOE 0x460D
+#define CSIO_DEVID_T440LPCR_FCOE 0x460E
+
+extern struct fc_function_template csio_fc_transport_funcs;
+extern struct fc_function_template csio_fc_transport_vport_funcs;
+
+void csio_fchost_attr_init(struct csio_lnode *);
+
+/* INTx handlers */
+void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+/* Common os lnode APIs */
+void csio_lnodes_block_request(struct csio_hw *);
+void csio_lnodes_unblock_request(struct csio_hw *);
+void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);
+void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);
+
+struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
+ struct csio_lnode *);
+void csio_shost_exit(struct csio_lnode *);
+void csio_lnodes_exit(struct csio_hw *, bool);
+
+static inline struct Scsi_Host *
+csio_ln_to_shost(struct csio_lnode *ln)
+{
+ return container_of((void *)ln, struct Scsi_Host, hostdata[0]);
+}
+
+/* SCSI -- locking version of get/put ioreqs */
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)
+{
+ struct csio_ioreq *ioreq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ ioreq = csio_get_scsi_ioreq(scsim);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+
+ return ioreq;
+}
+
+static inline void
+csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct csio_ioreq *ioreq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq(scsim, ioreq);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_put_scsi_ddp_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+#endif /* ifndef __CSIO_INIT_H__ */
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
new file mode 100644
index 00000000000..7ee9777ae2c
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -0,0 +1,624 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+
+#include "csio_init.h"
+#include "csio_hw.h"
+
+static irqreturn_t
+csio_nondata_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ int rv;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_hw_slow_intr_handler(hw);
+ rv = csio_mb_isr_handler(hw);
+
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_handler - Common FW event handler routine.
+ * @hw: HW module.
+ *
+ * This is the ISR for FW events. It is shared b/w MSIX
+ * and INTx handlers.
+ */
+static void
+csio_fwevt_handler(struct csio_hw *hw)
+{
+ int rv;
+ unsigned long flags;
+
+ rv = csio_fwevtq_handler(hw);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+} /* csio_fwevt_handler */
+
+/*
+ * csio_fwevt_isr() - FW events MSIX ISR
+ * @irq:
+ * @dev_id:
+ *
+ * Process WRs on the FW event queue.
+ *
+ */
+static irqreturn_t
+csio_fwevt_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_fwevt_handler(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_isr() - INTx wrapper for handling FW events.
+ * @irq:
+ * @dev_id:
+ */
+void
+csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ csio_fwevt_handler(hw);
+} /* csio_fwevt_intx_handler */
+
+/*
+ * csio_process_scsi_cmpl - Process a SCSI WR completion.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ *
+ */
+static void
+csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *cbfn_q)
+{
+ struct csio_ioreq *ioreq;
+ uint8_t *scsiwr;
+ uint8_t subop;
+ void *cmnd;
+ unsigned long flags;
+
+ ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
+ if (likely(ioreq)) {
+ if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
+ subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
+ ((struct fw_scsi_abrt_cls_wr *)
+ scsiwr)->sub_opcode_to_chk_all_io);
+
+ csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
+ subop ? "Close" : "Abort",
+ ioreq, ioreq->wr_status);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (subop)
+ csio_scsi_closed(ioreq,
+ (struct list_head *)cbfn_q);
+ else
+ csio_scsi_aborted(ioreq,
+ (struct list_head *)cbfn_q);
+ /*
+ * We call scsi_done for I/Os that driver thinks aborts
+ * have timed out. If there is a race caused by FW
+ * completing abort at the exact same time that the
+ * driver has deteced the abort timeout, the following
+ * check prevents calling of scsi_done twice for the
+ * same command: once from the eh_abort_handler, another
+ * from csio_scsi_isr_handler(). This also avoids the
+ * need to check if csio_scsi_cmnd(req) is NULL in the
+ * fast path.
+ */
+ cmnd = csio_scsi_cmnd(ioreq);
+ if (unlikely(cmnd == NULL))
+ list_del_init(&ioreq->sm.sm_list);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (unlikely(cmnd == NULL))
+ csio_put_scsi_ioreq_lock(hw,
+ csio_hw_to_scsim(hw), ioreq);
+ } else {
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ }
+}
+
+/*
+ * csio_scsi_isr_handler() - Common SCSI ISR handler.
+ * @iq: Ingress queue pointer.
+ *
+ * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
+ * by calling csio_wr_process_iq_idx. If there are completions on the
+ * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
+ * Once done, add these completions onto the freelist.
+ * This routine is shared b/w MSIX and INTx.
+ */
+static inline irqreturn_t
+csio_scsi_isr_handler(struct csio_q *iq)
+{
+ struct csio_hw *hw = (struct csio_hw *)iq->owner;
+ LIST_HEAD(cbfn_q);
+ struct list_head *tmp;
+ struct csio_scsim *scm;
+ struct csio_ioreq *ioreq;
+ int isr_completions = 0;
+
+ scm = csio_hw_to_scsim(hw);
+
+ if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
+ &cbfn_q) != 0))
+ return IRQ_NONE;
+
+ /* Call back the completion routines */
+ list_for_each(tmp, &cbfn_q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ isr_completions++;
+ ioreq->io_cbfn(hw, ioreq);
+ /* Release ddp buffer if used for this req */
+ if (unlikely(ioreq->dcopy))
+ csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
+ ioreq->nsge);
+ }
+
+ if (isr_completions) {
+ /* Return the ioreqs back to ioreq->freelist */
+ csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
+ isr_completions);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_isr() - SCSI MSIX handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+static irqreturn_t
+csio_scsi_isr(int irq, void *dev_id)
+{
+ struct csio_q *iq = (struct csio_q *) dev_id;
+ struct csio_hw *hw;
+
+ if (unlikely(!iq))
+ return IRQ_NONE;
+
+ hw = (struct csio_hw *)iq->owner;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_scsi_isr_handler(iq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_intx_handler() - SCSI INTx handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+void
+csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ struct csio_q *iq = priv;
+
+ csio_scsi_isr_handler(iq);
+
+} /* csio_scsi_intx_handler */
+
+/*
+ * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
+ * @irq:
+ * @dev_id:
+ *
+ *
+ */
+static irqreturn_t
+csio_fcoe_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ struct csio_q *intx_q = NULL;
+ int rv;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ /* Disable the interrupt for this PCI function. */
+ if (hw->intr_mode == CSIO_IM_INTX)
+ csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
+
+ /*
+ * The read in the following function will flush the
+ * above write.
+ */
+ if (csio_hw_slow_intr_handler(hw))
+ ret = IRQ_HANDLED;
+
+ /* Get the INTx Forward interrupt IQ. */
+ intx_q = csio_get_q(hw, hw->intr_iq_idx);
+
+ CSIO_DB_ASSERT(intx_q);
+
+ /* IQ handler is not possible for intx_q, hence pass in NULL */
+ if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
+ ret = IRQ_HANDLED;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ rv = csio_mb_isr_handler(hw);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ return ret;
+}
+
+static void
+csio_add_msix_desc(struct csio_hw *hw)
+{
+ int i;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ int k = CSIO_EXTRA_VECS;
+ int len = sizeof(entryp->desc) - 1;
+ int cnt = hw->num_sqsets + k;
+
+ /* Non-data vector */
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+
+ entryp++;
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+ entryp++;
+
+ /* Name SCSI vecs */
+ for (i = k; i < cnt; i++, entryp++) {
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
+ CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
+ }
+}
+
+int
+csio_request_irqs(struct csio_hw *hw)
+{
+ int rv, i, j, k = 0;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
+ (hw->intr_mode == CSIO_IM_MSI) ?
+ 0 : IRQF_SHARED,
+ KBUILD_MODNAME, hw);
+ if (rv) {
+ if (hw->intr_mode == CSIO_IM_MSI)
+ pci_disable_msi(hw->pdev);
+ csio_err(hw, "Failed to allocate interrupt line.\n");
+ return -EINVAL;
+ }
+
+ goto out;
+ }
+
+ /* Add the MSIX vector descriptions */
+ csio_add_msix_desc(hw);
+
+ rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ /* Allocate IRQs for SCSI */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ for (j = 0; j < info->max_cpus; j++, k++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+ struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
+
+ rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
+ entryp[k].desc, q);
+ if (rv) {
+ csio_err(hw,
+ "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k].dev_id = (void *)q;
+
+ } /* for all scsi cpus */
+ } /* for all ports */
+
+out:
+ hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
+
+ return 0;
+
+err:
+ for (i = 0; i < k; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ pci_disable_msix(hw->pdev);
+
+ return -EINVAL;
+}
+
+static void
+csio_disable_msix(struct csio_hw *hw, bool free)
+{
+ int i;
+ struct csio_msix_entries *entryp;
+ int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
+
+ if (free) {
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ }
+ pci_disable_msix(hw->pdev);
+}
+
+/* Reduce per-port max possible CPUs */
+static void
+csio_reduce_sqsets(struct csio_hw *hw, int cnt)
+{
+ int i;
+ struct csio_scsi_cpu_info *info;
+
+ while (cnt < hw->num_sqsets) {
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ if (info->max_cpus > 1) {
+ info->max_cpus--;
+ hw->num_sqsets--;
+ if (hw->num_sqsets <= cnt)
+ break;
+ }
+ }
+ }
+
+ csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
+}
+
+static int
+csio_enable_msix(struct csio_hw *hw)
+{
+ int rv, i, j, k, n, min, cnt;
+ struct csio_msix_entries *entryp;
+ struct msix_entry *entries;
+ int extra = CSIO_EXTRA_VECS;
+ struct csio_scsi_cpu_info *info;
+
+ min = hw->num_pports + extra;
+ cnt = hw->num_sqsets + extra;
+
+ /* Max vectors required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
+ cnt = min_t(uint8_t, hw->cfg_niq, cnt);
+
+ entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++)
+ entries[i].entry = (uint16_t)i;
+
+ csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
+
+ while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
+ cnt = rv;
+ if (!rv) {
+ if (cnt < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
+ csio_reduce_sqsets(hw, cnt - extra);
+ }
+ } else {
+ if (rv > 0) {
+ pci_disable_msix(hw->pdev);
+ csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
+ }
+
+ kfree(entries);
+ return -ENOMEM;
+ }
+
+ /* Save off vectors */
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ entryp->vector = entries[i].vector;
+ }
+
+ /* Distribute vectors */
+ k = 0;
+ csio_set_nondata_intr_idx(hw, entries[k].entry);
+ csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
+ csio_set_fwevt_intr_idx(hw, entries[k++].entry);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ n = (j % info->max_cpus) + k;
+ hw->sqset[i][j].intr_idx = entries[n].entry;
+ }
+
+ k += info->max_cpus;
+ }
+
+ kfree(entries);
+ return 0;
+}
+
+void
+csio_intr_enable(struct csio_hw *hw)
+{
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+
+ /* Try MSIX, then MSI or fall back to INTx */
+ if ((csio_msi == 2) && !csio_enable_msix(hw))
+ hw->intr_mode = CSIO_IM_MSIX;
+ else {
+ /* Max iqs required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
+ !csio_is_hw_master(hw)) {
+ int extra = CSIO_EXTRA_MSI_IQS;
+
+ if (hw->cfg_niq < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n",
+ hw->cfg_niq - extra);
+ csio_reduce_sqsets(hw, hw->cfg_niq - extra);
+ }
+ }
+
+ if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
+ hw->intr_mode = CSIO_IM_MSI;
+ else
+ hw->intr_mode = CSIO_IM_INTX;
+ }
+
+ csio_dbg(hw, "Using %s interrupt mode.\n",
+ (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
+ ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
+}
+
+void
+csio_intr_disable(struct csio_hw *hw, bool free)
+{
+ csio_hw_intr_disable(hw);
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_MSIX:
+ csio_disable_msix(hw, free);
+ break;
+ case CSIO_IM_MSI:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ pci_disable_msi(hw->pdev);
+ break;
+ case CSIO_IM_INTX:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ break;
+ default:
+ break;
+ }
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
new file mode 100644
index 00000000000..ffe9be04dc3
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -0,0 +1,2135 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ms.h>
+
+#include "csio_hw.h"
+#include "csio_mb.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_fcoe_rnodes = 1024;
+int csio_fdmi_enable = 1;
+
+#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
+
+/* Lnode SM declarations */
+static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
+
+static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
+
+/* LN event mapping */
+static enum csio_ln_ev fwevt_to_lnevt[] = {
+ CSIO_LNE_NONE, /* None */
+ CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RCVD */
+ CSIO_LNE_NONE, /* PLOGO_RCVD */
+ CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RCVD */
+ CSIO_LNE_NONE, /* PRLO_RCVD */
+ CSIO_LNE_NONE, /* NPORT_ID_CHGD */
+ CSIO_LNE_LOGO, /* FLOGO_RCVD */
+ CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
+ CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_LNE_NONE, /* PRLI_TMO */
+ CSIO_LNE_NONE, /* ADISC_TMO */
+ CSIO_LNE_NONE, /* RSCN_DEV_LOST */
+ CSIO_LNE_NONE, /* SCR_ACC_RCVD */
+ CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* LOGO_SNT */
+ CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_LNE_NONE : \
+ fwevt_to_lnevt[_evt])
+
+#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
+#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
+#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
+#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
+
+/*
+ * csio_ln_match_by_portid - lookup lnode using given portid.
+ * @hw: HW module
+ * @portid: port-id.
+ *
+ * If found, returns lnode matching given portid otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln = hw->rln;
+ struct list_head *tmp;
+
+ /* Match siblings lnode with portid */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid == portid)
+ return ln;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
+ * @hw - HW module
+ * @vnpi - vnp index.
+ * Returns - If found, returns lnode matching given vnp id
+ * otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (sln->vnp_flowid == vnp_id)
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (cln->vnp_flowid == vnp_id)
+ return cln;
+ }
+ }
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+}
+
+/**
+ * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
+ * @hw: HW module.
+ * @wwpn: WWPN.
+ *
+ * If found, returns lnode matching given wwpn, returns NULL otherwise.
+ */
+struct csio_lnode *
+csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
+ return cln;
+ }
+ }
+ return NULL;
+}
+
+/* FDMI */
+static void
+csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
+{
+ struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
+ cmd->ct_rev = FC_CT_REV;
+ cmd->ct_fs_type = type;
+ cmd->ct_fs_subtype = sub_type;
+ cmd->ct_cmd = htons(op);
+}
+
+static int
+csio_hostname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
+ return 0;
+ return -1;
+}
+
+static int
+csio_osname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version) > 0)
+ return 0;
+
+ return -1;
+}
+
+static inline void
+csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
+{
+ struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
+ ae->type = htons(type);
+ len += 4; /* includes attribute type and length */
+ len = (len + 3) & ~3; /* should be multiple of 4 bytes */
+ ae->len = htons(len);
+ memcpy(ae->value, val, len);
+ *ptr += len;
+}
+
+/*
+ * csio_ln_fdmi_done - FDMI registeration completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ struct csio_lnode *ln = fdmi_req->lnode;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+}
+
+/*
+ * csio_ln_fdmi_rhba_cbfn - RHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ __be32 val;
+ __be16 mfs;
+ uint32_t numattrs = 0;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fs_fdmi_attrs *attrib_blk;
+ struct fc_fdmi_port_name *port_name;
+ uint8_t buf[64];
+ uint8_t *fc4_type;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+
+ /* Prepare CT hdr for RPA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
+
+ /* Prepare RPA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ port_name = (struct fc_fdmi_port_name *)pld;
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*port_name);
+
+ /* Start appending Port attributes */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ fc4_type = &buf[0];
+ memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ fc4_type[2] = 1;
+ fc4_type[7] = 1;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
+ fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ numattrs++;
+ val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
+ numattrs++;
+
+ if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
+ val = htonl(FC_PORTSPEED_1GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
+ val = htonl(FC_PORTSPEED_10GBIT);
+ else
+ val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ numattrs++;
+
+ mfs = ln->ln_sparm.csp.sp_bb_data;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
+ (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ numattrs++;
+
+ strcpy(buf, "csiostor");
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+
+ if (!csio_hostname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+ attrib_blk->numattrs = htonl(numattrs);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+
+ /* Submit FDMI RPA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dprt_cbfn - DPRT completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ uint32_t numattrs = 0;
+ __be32 maxpayload = htonl(65536);
+ struct fc_fdmi_hba_identifier *hbaid;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fc_fdmi_rpl *reg_pl;
+ struct fs_fdmi_attrs *attrib_blk;
+ uint8_t buf[64];
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Prepare CT hdr for RHBA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
+ len = FC_CT_HDR_LEN;
+
+ /* Prepare RHBA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ hbaid = (struct fc_fdmi_hba_identifier *)pld;
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
+ pld += sizeof(*hbaid);
+
+ /* Register one port per hba */
+ reg_pl = (struct fc_fdmi_rpl *)pld;
+ reg_pl->numport = htonl(1);
+ memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*reg_pl);
+
+ /* Start appending HBA attributes hba */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
+ FC_FDMI_HBA_ATTR_NODENAME_LEN);
+ numattrs++;
+
+ memset(buf, 0, sizeof(buf));
+
+ strcpy(buf, "Chelsio Communications");
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
+ hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
+ (uint16_t)sizeof(hw->vpd.id));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
+ hw->model_desc, (uint16_t)strlen(hw->model_desc));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
+ hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
+ hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
+ numattrs++;
+
+ if (!csio_osname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ (uint8_t *)&maxpayload,
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+ numattrs++;
+ attrib_blk->numattrs = htonl(numattrs);
+
+ /* Submit FDMI RHBA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dhba_cbfn - DHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ struct csio_lnode *ln = fdmi_req->lnode;
+ void *cmd;
+ struct fc_fdmi_port_name *port_name;
+ uint32_t len;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Send FDMI cmd to de-register any Port attributes if registered
+ * before
+ */
+
+ /* Prepare FDMI DPRT cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
+ len = FC_CT_HDR_LEN;
+ port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ len += sizeof(*port_name);
+
+ /* Submit FDMI request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/**
+ * csio_ln_fdmi_start - Start an FDMI request.
+ * @ln: lnode
+ * @context: session context
+ *
+ * Issued with lock held.
+ */
+int
+csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
+{
+ struct csio_ioreq *fdmi_req;
+ struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
+ void *cmd;
+ struct fc_fdmi_hba_identifier *hbaid;
+ uint32_t len;
+
+ if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
+ return -EPROTONOSUPPORT;
+
+ if (!csio_is_rnode_ready(fdmi_rn))
+ CSIO_INC_STATS(ln, n_fdmi_err);
+
+ /* Send FDMI cmd to de-register any HBA attributes if registered
+ * before
+ */
+
+ fdmi_req = ln->mgmt_req;
+ fdmi_req->lnode = ln;
+ fdmi_req->rnode = fdmi_rn;
+
+ /* Prepare FDMI DHBA cmd */
+ cmd = fdmi_req->dma_buf.vaddr;
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
+ len = FC_CT_HDR_LEN;
+
+ hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
+ len += sizeof(*hbaid);
+
+ /* Submit FDMI request */
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
+ }
+
+ return 0;
+}
+
+/*
+ * csio_ln_vnp_read_cbfn - vnp read completion handler.
+ * @hw: HW lnode
+ * @cbfn: Completion handler.
+ *
+ * Reads vnp response and updates ln parameters.
+ */
+static void
+csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
+ struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ struct fc_els_csp *csp;
+ struct fc_els_cssp *clsp;
+ enum fw_retval retval;
+ __be32 nport_id;
+
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+
+ memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
+ memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
+ ln->nport_id = ntohl(nport_id);
+ ln->nport_id = ln->nport_id >> 8;
+
+ /* Update WWNs */
+ /*
+ * This may look like a duplication of what csio_fcoe_enable_link()
+ * does, but is absolutely necessary if the vnpi changes between
+ * a FCOE LINK UP and FCOE LINK DOWN.
+ */
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ /* Copy common sparam */
+ csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
+ ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
+ ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
+ ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
+ ln->ln_sparm.csp.sp_features = csp->sp_features;
+ ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
+ ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
+ ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
+
+ /* Copy word 0 & word 1 of class sparam */
+ clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
+ ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
+ ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
+ ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
+ ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ /* Send an event to update local attribs */
+ csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
+}
+
+/*
+ * csio_ln_vnp_read - Read vnp params.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_vnp_read(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ /* Allocate Mbox request */
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Prepare VNP Command */
+ csio_fcoe_vnp_read_init_mb(ln, mbp,
+ CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid,
+ ln->vnp_flowid,
+ cbfn);
+
+ /* Issue MBOX cmd */
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_fcoe_enable_link - Enable fcoe link.
+ * @ln: lnode
+ * @enable: enable/disable
+ * Issued with lock held.
+ * Issues mbox cmd to bring up FCOE link on port associated with given ln.
+ */
+static int
+csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ uint8_t sub_op;
+ struct fw_fcoe_link_cmd *lcmd;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ portid = ln->portid;
+ sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
+
+ csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
+ sub_op ? "UP" : "DOWN", portid);
+
+ csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ portid, sub_op, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw,
+ "FCOE LINK %s cmd on port[%d] failed with "
+ "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (!enable)
+ goto out;
+
+ lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
+
+ memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
+
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ if (hw->pport[i].portid == portid)
+ memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
+
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_read_fcf_cbfn - Read fcf parameters
+ * @ln: lnode
+ *
+ * read fcf response and Update ln fcf information.
+ */
+static void
+csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
+ struct csio_fcf_info *fcf_info;
+ struct fw_fcoe_fcf_cmd *rsp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+ enum fw_retval retval;
+
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ fcf_info = ln->fcfinfo;
+ fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
+ ntohs(rsp->priority_pkd));
+ fcf_info->vf_id = ntohs(rsp->vf_id);
+ fcf_info->vlan_id = rsp->vlan_id;
+ fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
+ fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
+ fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
+ fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
+ fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
+ fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
+ fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
+ memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
+ memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
+ memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
+ memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
+ memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+}
+
+/*
+ * csio_ln_read_fcf_entry - Read fcf entry.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_read_fcf_entry(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FCF information */
+ csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->portid, ln->fcf_flowid, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE FCF cmd\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_handle_link_up - Logical Linkup event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none.
+ *
+ * This event is received from FW, when virtual link is established between
+ * Physical port[ENode] and FCF. If its new vnpi, then local node object is
+ * created on this FCF and set to [ONLINE] state.
+ * Lnode waits for FW_RDEV_CMD event to be received indicating that
+ * Fabric login is completed and lnode moves to [READY] state.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_lnode *ln = NULL;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ /* Pick lnode based on portid */
+ ln = csio_ln_lookup_by_portid(hw, portid);
+ if (!ln) {
+ csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
+ portid);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+
+ /* Check if lnode has valid vnp flowid */
+ if (ln->vnp_flowid != CSIO_INVALID_IDX) {
+ /* New VN-Port */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_alloc(hw);
+ spin_lock_irq(&hw->lock);
+ if (!ln) {
+ csio_err(hw,
+ "failed to allocate fcoe lnode"
+ "for port:%d vnpi:x%x\n",
+ portid, vnpi);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+ ln->portid = portid;
+ }
+ ln->vnp_flowid = vnpi;
+ ln->dev_num &= ~0xFFFF;
+ ln->dev_num |= vnpi;
+ }
+
+ /*Initialize fcfi */
+ ln->fcf_flowid = fcfi;
+
+ csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
+
+ CSIO_INC_STATS(ln, n_link_up);
+
+ /* Send LINKUP event to SM */
+ csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
+}
+
+/*
+ * csio_post_event_rns
+ * @ln - FCOE lnode
+ * @evt - Given rnode event
+ * Returns - none
+ *
+ * Posts given rnode event to all FCOE rnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_post_event(&rn->sm, evt);
+ }
+}
+
+/*
+ * csio_cleanup_rns
+ * @ln - FCOE lnode
+ * Returns - none
+ *
+ * Frees all FCOE rnodes connected with given Lnode.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_cleanup_rns(struct csio_lnode *ln)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next_rn;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_put_rnode(ln, rn);
+ }
+
+}
+
+/*
+ * csio_post_event_lns
+ * @ln - FCOE lnode
+ * @evt - Given lnode event
+ * Returns - none
+ *
+ * Posts given lnode event to all FCOE lnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct list_head *tmp;
+ struct csio_lnode *cln, *sln;
+
+ /* If NPIV lnode, send evt only to that and return */
+ if (csio_is_npiv_ln(ln)) {
+ csio_post_event(&ln->sm, evt);
+ return;
+ }
+
+ sln = ln;
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp;
+ csio_post_event(&cln->sm, evt);
+ }
+
+ /* Send evt to parent lnode */
+ csio_post_event(&ln->sm, evt);
+}
+
+/*
+ * csio_ln_down - Lcoal nport is down
+ * @ln - FCOE Lnode
+ * Returns - none
+ *
+ * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_ln_down(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
+}
+
+/*
+ * csio_handle_link_down - Logical Linkdown event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none
+ *
+ * This event is received from FW, when virtual link goes down between
+ * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
+ * this vnpi[VN-Port] will be de-instantiated.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_fcf_info *fp;
+ struct csio_lnode *ln;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (ln) {
+ fp = ln->fcfinfo;
+ CSIO_INC_STATS(ln, n_link_down);
+
+ /*Warn if linkdown received if lnode is not in ready state */
+ if (!csio_is_lnode_ready(ln)) {
+ csio_ln_warn(ln,
+ "warn: FCOE link is already in offline "
+ "Ignoring Fcoe linkdown event on portid %d\n",
+ portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* Verify portid */
+ if (fp->portid != portid) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid port %d\n", portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* verify fcfi */
+ if (ln->fcf_flowid != fcfi) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid fcfi x%x\n", fcfi);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
+
+ /* Send LINK_DOWN event to lnode s/m */
+ csio_ln_down(ln);
+
+ return;
+ } else {
+ csio_warn(hw,
+ "warn: FCOE linkdown recv with invalid vnpi x%x\n",
+ vnpi);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+}
+
+/*
+ * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
+ * @ln: Lnode module
+ *
+ * Returns True if FCOE lnode is in ready state.
+ */
+int
+csio_is_lnode_ready(struct csio_lnode *ln)
+{
+ return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
+}
+
+/*****************************************************************************/
+/* START: Lnode SM */
+/*****************************************************************************/
+/*
+ * csio_lns_uninit - The request in uninit state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "uninit" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_online - The request in online state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "online" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_ln_warn(ln,
+ "warn: FCOE link is up already "
+ "Ignoring linkup on port:%d\n", ln->portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_set_state(&ln->sm, csio_lns_ready);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
+ spin_lock_irq(&hw->lock);
+
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ /* Fall through */
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_ready - The request in ready state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "ready" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[ready].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ /* Host need to issue aborts in case if FW has not returned
+ * WRs with status "ABORTED"
+ */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ case CSIO_LNE_LOGO:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_offline - The request in offline state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "offline" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ case CSIO_LNE_DOWN_LINK:
+ case CSIO_LNE_LOGO:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[offline].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[offline]\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*****************************************************************************/
+/* END: Lnode SM */
+/*****************************************************************************/
+
+static void
+csio_free_fcfinfo(struct kref *kref)
+{
+ struct csio_fcf_info *fcfinfo = container_of(kref,
+ struct csio_fcf_info, kref);
+ kfree(fcfinfo);
+}
+
+/* Helper routines for attributes */
+/*
+ * csio_lnode_state_to_str - Get current state of FCOE lnode.
+ * @ln - lnode
+ * @str - state of lnode.
+ *
+ */
+void
+csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
+{
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
+ strcpy(str, "UNINIT");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
+ strcpy(str, "READY");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
+ strcpy(str, "OFFLINE");
+ return;
+ }
+ strcpy(str, "UNKNOWN");
+} /* csio_lnode_state_to_str */
+
+
+int
+csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
+ struct fw_fcoe_port_stats *port_stats)
+{
+ struct csio_mb *mbp;
+ struct fw_fcoe_port_cmd_params portparams;
+ enum fw_retval retval;
+ int idx;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
+ return -EINVAL;
+ }
+ portparams.portid = portid;
+
+ for (idx = 1; idx <= 3; idx++) {
+ portparams.idx = (idx-1)*6 + 1;
+ portparams.nstats = 6;
+ if (idx == 3)
+ portparams.nstats = 4;
+ csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ &portparams, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FCoE port params failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_mb_process_portparams_rsp(hw, mbp, &retval,
+ &portparams, port_stats);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
+ * @wr - WR.
+ * @len - WR len.
+ * This handler is invoked when an outstanding mgmt WR is completed.
+ * Its invoked in the context of FW event worker thread for every
+ * mgmt event received.
+ * Return - none.
+ */
+
+static void
+csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
+{
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_ioreq *io_req = NULL;
+ struct fw_fcoe_els_ct_wr *wr_cmd;
+
+
+ wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
+
+ if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
+ csio_err(mgmtm->hw,
+ "Invalid ELS CT WR length recvd, len:%x\n", len);
+ mgmtm->stats.n_err++;
+ return;
+ }
+
+ io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
+ io_req->wr_status = csio_wr_status(wr_cmd);
+
+ /* lookup ioreq exists in our active Q */
+ spin_lock_irq(&hw->lock);
+ if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
+ csio_err(mgmtm->hw,
+ "Error- Invalid IO handle recv in WR. handle: %p\n",
+ io_req);
+ mgmtm->stats.n_err++;
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ mgmtm = csio_hw_to_mgmtm(hw);
+
+ /* Dequeue from active queue */
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ spin_unlock_irq(&hw->lock);
+
+ /* io_req will be freed by completion handler */
+ if (io_req->io_cbfn)
+ io_req->io_cbfn(hw, io_req);
+}
+
+/**
+ * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
+ * @hw: HW module
+ * @cpl_op: CPL opcode
+ * @cmd: FW cmd/WR.
+ *
+ * Process received FCoE cmd/WR event from FW.
+ */
+void
+csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
+{
+ struct csio_lnode *ln;
+ struct csio_rnode *rn;
+ uint8_t portid, opcode = *(uint8_t *)cmd;
+ struct fw_fcoe_link_cmd *lcmd;
+ struct fw_wr_hdr *wr;
+ struct fw_rdev_wr *rdev_wr;
+ enum fw_fcoe_link_status lstatus;
+ uint32_t fcfi, rdev_flowid, vnpi;
+ enum csio_ln_ev evt;
+
+ if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
+
+ lcmd = (struct fw_fcoe_link_cmd *)cmd;
+ lstatus = lcmd->lstatus;
+ portid = FW_FCOE_LINK_CMD_PORTID_GET(
+ ntohl(lcmd->op_to_portid));
+ fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
+ vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
+
+ if (lstatus == FCOE_LINKUP) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_up(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+
+ } else if (lstatus == FCOE_LINKDOWN) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_down(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+ } else {
+ csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
+ lcmd->lstatus);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_PLD) {
+ wr = (struct fw_wr_hdr *) (cmd + 4);
+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi))
+ == FW_RDEV_WR) {
+
+ rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
+
+ rdev_flowid = FW_RDEV_WR_FLOWID_GET(
+ ntohl(rdev_wr->alloc_to_len16));
+ vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
+ ntohl(rdev_wr->flags_to_assoc_flowid));
+
+ csio_dbg(hw,
+ "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
+ "vnpi:0x%x\n", rdev_flowid,
+ rdev_wr->event_cause, vnpi);
+
+ if (rdev_wr->protocol != PROT_FCOE) {
+ csio_err(hw,
+ "FW_RDEV_WR: invalid proto:x%x "
+ "received with flowid:x%x\n",
+ rdev_wr->protocol,
+ rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ return;
+ }
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ csio_err(hw,
+ "FW_DEV_WR: invalid vnpi:x%x received "
+ "with flowid:x%x\n", vnpi, rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ rn = csio_confirm_rnode(ln, rdev_flowid,
+ &rdev_wr->u.fcoe_rdev);
+ if (!rn) {
+ csio_ln_dbg(ln,
+ "Failed to confirm rnode "
+ "for flowid:x%x\n", rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ /* save previous event for debugging */
+ ln->prev_evt = ln->cur_evt;
+ ln->cur_evt = rdev_wr->event_cause;
+ CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
+
+ /* Translate all the fabric events to lnode SM events */
+ evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
+ if (evt) {
+ csio_ln_dbg(ln,
+ "Posting event to lnode event:%d "
+ "cause:%d flowid:x%x\n", evt,
+ rdev_wr->event_cause, rdev_flowid);
+ csio_post_event(&ln->sm, evt);
+ }
+
+ /* Handover event to rn SM here. */
+ csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
+out_pld:
+ spin_unlock_irq(&hw->lock);
+ return;
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_MSG) {
+ wr = (struct fw_wr_hdr *) (cmd);
+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
+ csio_ln_mgmt_wr_handler(hw, wr,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else {
+ csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+}
+
+/**
+ * csio_lnode_start - Kickstart lnode discovery.
+ * @ln: lnode
+ *
+ * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
+ */
+int
+csio_lnode_start(struct csio_lnode *ln)
+{
+ int rv = 0;
+ if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ rv = csio_fcoe_enable_link(ln, 1);
+ ln->flags |= CSIO_LNF_LINK_ENABLE;
+ }
+
+ return rv;
+}
+
+/**
+ * csio_lnode_stop - Stop the lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to stop lnode and its associated NPIV
+ * lnodes.
+ */
+void
+csio_lnode_stop(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
+ if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ csio_fcoe_enable_link(ln, 0);
+ ln->flags &= ~CSIO_LNF_LINK_ENABLE;
+ }
+ csio_ln_dbg(ln, "stopping ln :%p\n", ln);
+}
+
+/**
+ * csio_lnode_close - Close an lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to close an lnode and its
+ * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
+ * set to uninitialized state.
+ */
+void
+csio_lnode_close(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_CLOSE);
+ if (csio_is_phys_ln(ln))
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+
+ csio_ln_dbg(ln, "closed ln :%p\n", ln);
+}
+
+/*
+ * csio_ln_prep_ecwr - Prepare ELS/CT WR.
+ * @io_req - IO request.
+ * @wr_len - WR len
+ * @immd_len - WR immediate data
+ * @sub_op - Sub opcode
+ * @sid - source portid.
+ * @did - destination portid
+ * @flow_id - flowid
+ * @fw_wr - ELS/CT WR to be prepared.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
+ uint32_t immd_len, uint8_t sub_op, uint32_t sid,
+ uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
+{
+ struct fw_fcoe_els_ct_wr *wr;
+ __be32 port_id;
+
+ wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |
+ FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
+
+ wr_len = DIV_ROUND_UP(wr_len, 16);
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |
+ FW_WR_LEN16(wr_len));
+ wr->els_ct_type = sub_op;
+ wr->ctl_pri = 0;
+ wr->cp_en_class = 0;
+ wr->cookie = io_req->fw_handle;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(
+ io_req->lnode->hwp, io_req->iq_idx));
+ wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
+ wr->tmo_val = (uint8_t) io_req->tmo;
+ port_id = htonl(sid);
+ memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
+ port_id = htonl(did);
+ memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
+ wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_submit_wr - Post elsct work request.
+ * @mgmtm - mgmtm
+ * @io_req - io request.
+ * @sub_op - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ * Prepares ELSCT Work request and sents it to FW.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
+ uint8_t sub_op, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_wr_pair wrp;
+ struct csio_lnode *ln = io_req->lnode;
+ struct csio_rnode *rn = io_req->rnode;
+ struct csio_hw *hw = mgmtm->hw;
+ uint8_t fw_wr[64];
+ struct ulptx_sgl dsgl;
+ uint32_t wr_size = 0;
+ uint8_t im_len = 0;
+ uint32_t wr_off = 0;
+
+ int ret = 0;
+
+ /* Calculate WR Size for this ELS REQ */
+ wr_size = sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Send as immediate data if pld < 256 */
+ if (pld_len < 256) {
+ wr_size += ALIGN(pld_len, 8);
+ im_len = (uint8_t)pld_len;
+ } else
+ wr_size += sizeof(struct ulptx_sgl);
+
+ /* Roundup WR size in units of 16 bytes */
+ wr_size = ALIGN(wr_size, 16);
+
+ /* Get WR to send ELS REQ */
+ ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
+ if (ret != 0) {
+ csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
+ io_req, ret);
+ return ret;
+ }
+
+ /* Prepare Generic WR used by all ELS/CT cmd */
+ csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
+ ln->nport_id, rn->nport_id,
+ csio_rn_flowid(rn),
+ &fw_wr[0]);
+
+ /* Copy ELS/CT WR CMD */
+ csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ wr_off += sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Copy payload to Immediate section of WR */
+ if (im_len)
+ csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
+ else {
+ /* Program DSGL to dma payload */
+ dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_MORE | ULPTX_NSGE(1));
+ dsgl.len0 = cpu_to_be32(pld_len);
+ dsgl.addr0 = cpu_to_be64(pld->paddr);
+ csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
+ sizeof(struct ulptx_sgl));
+ }
+
+ /* Issue work request to xmit ELS/CT req to FW */
+ csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
+ return ret;
+}
+
+/*
+ * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
+ * @io_req - IO Request
+ * @io_cbfn - Completion handler.
+ * @req_type - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ *
+ *
+ * This API used submit managment ELS/CT request.
+ * This called with hw lock held
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+
+ io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
+ io_req->fw_handle = (uintptr_t) (io_req);
+ io_req->eq_idx = mgmtm->eq_idx;
+ io_req->iq_idx = mgmtm->iq_idx;
+
+ rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
+ if (rv == 0) {
+ list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
+ mgmtm->stats.n_active++;
+ }
+ return rv;
+}
+
+/*
+ * csio_ln_fdmi_init - FDMI Init entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_init(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_dma_buf *dma_buf;
+
+ /* Allocate MGMT request required for FDMI */
+ ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ln->mgmt_req) {
+ csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Allocate Dma buffers for FDMI response Payload */
+ dma_buf = &ln->mgmt_req->dma_buf;
+ dma_buf->len = 2048;
+ dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
+ kfree(ln->mgmt_req);
+ ln->mgmt_req = NULL;
+ return -ENOMEM;
+ }
+
+ ln->flags |= CSIO_LNF_FDMI_ENABLE;
+ return 0;
+}
+
+/*
+ * csio_ln_fdmi_exit - FDMI exit entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_exit(struct csio_lnode *ln)
+{
+ struct csio_dma_buf *dma_buf;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (!ln->mgmt_req)
+ return 0;
+
+ dma_buf = &ln->mgmt_req->dma_buf;
+ if (dma_buf->vaddr)
+ pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ln->mgmt_req);
+ return 0;
+}
+
+int
+csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
+ unsigned long time, unsigned long max_scan_ticks,
+ unsigned long delta_scan_ticks)
+{
+ int rv = 0;
+
+ if (time >= max_scan_ticks)
+ return 1;
+
+ if (!ln->tgt_scan_tick)
+ ln->tgt_scan_tick = ticks;
+
+ if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
+ if (!ln->last_scan_ntgts)
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ else {
+ if (ln->last_scan_ntgts == ln->n_scsi_tgts)
+ return 1;
+
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ }
+ ln->tgt_scan_tick = ticks;
+ }
+ return rv;
+}
+
+/*
+ * csio_notify_lnodes:
+ * @hw: HW module
+ * @note: Notification
+ *
+ * Called from the HW SM to fan out notifications to the
+ * Lnode SM. Since the HW SM is entered with lock held,
+ * there is no need to hold locks here.
+ *
+ */
+void
+csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying all nodes of event %d\n", note);
+
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+
+ switch (note) {
+ case CSIO_LN_NOTIFY_HWREADY:
+ csio_lnode_start(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWRESET:
+ case CSIO_LN_NOTIFY_HWREMOVE:
+ csio_lnode_close(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWSTOP:
+ csio_lnode_stop(ln);
+ break;
+
+ default:
+ break;
+
+ }
+ }
+}
+
+/*
+ * csio_disable_lnodes:
+ * @hw: HW module
+ * @portid:port id
+ * @disable: disable/enable flag.
+ * If disable=1, disables all lnode hosted on given physical port.
+ * otherwise enables all the lnodes on given phsysical port.
+ * This routine need to called with hw lock held.
+ */
+void
+csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
+
+ /* Traverse sibling lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid != portid)
+ continue;
+
+ if (disable)
+ csio_lnode_stop(ln);
+ else
+ csio_lnode_start(ln);
+ }
+}
+
+/*
+ * csio_ln_init - Initialize an lnode.
+ * @ln: lnode
+ *
+ */
+static int
+csio_ln_init(struct csio_lnode *ln)
+{
+ int rv = -EINVAL;
+ struct csio_lnode *rln, *pln;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_init_state(&ln->sm, csio_lns_uninit);
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+ ln->fcf_flowid = CSIO_INVALID_IDX;
+
+ if (csio_is_root_ln(ln)) {
+
+ /* This is the lnode used during initialization */
+
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF record\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&ln->fcf_lsthead);
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+
+ } else { /* Either a non-root physical or a virtual lnode */
+
+ /*
+ * THe rest is common for non-root physical and NPIV lnodes.
+ * Just get references to all other modules
+ */
+ rln = csio_root_lnode(ln);
+
+ if (csio_is_npiv_ln(ln)) {
+ /* NPIV */
+ pln = csio_parent_lnode(ln);
+ kref_get(&pln->fcfinfo->kref);
+ ln->fcfinfo = pln->fcfinfo;
+ } else {
+ /* Another non-root physical lnode (FCF) */
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
+ GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF info\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+ }
+
+ } /* if (!csio_is_root_ln(ln)) */
+
+ return 0;
+err:
+ return rv;
+}
+
+static void
+csio_ln_exit(struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+
+ csio_cleanup_rns(ln);
+ if (csio_is_npiv_ln(ln)) {
+ pln = csio_parent_lnode(ln);
+ kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
+ } else {
+ kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
+ if (csio_fdmi_enable)
+ csio_ln_fdmi_exit(ln);
+ }
+ ln->fcfinfo = NULL;
+}
+
+/**
+ * csio_lnode_init - Initialize the members of an lnode.
+ * @ln: lnode
+ *
+ */
+int
+csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
+ struct csio_lnode *pln)
+{
+ int rv = -EINVAL;
+
+ /* Link this lnode to hw */
+ csio_lnode_to_hw(ln) = hw;
+
+ /* Link child to parent if child lnode */
+ if (pln)
+ ln->pln = pln;
+ else
+ ln->pln = NULL;
+
+ /* Initialize scsi_tgt and timers to zero */
+ ln->n_scsi_tgts = 0;
+ ln->last_scan_ntgts = 0;
+ ln->tgt_scan_tick = 0;
+
+ /* Initialize rnode list */
+ INIT_LIST_HEAD(&ln->rnhead);
+ INIT_LIST_HEAD(&ln->cln_head);
+
+ /* Initialize log level for debug */
+ ln->params.log_level = hw->params.log_level;
+
+ if (csio_ln_init(ln))
+ goto err;
+
+ /* Add lnode to list of sibling or children lnodes */
+ spin_lock_irq(&hw->lock);
+ list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
+ if (pln)
+ pln->num_vports++;
+ spin_unlock_irq(&hw->lock);
+
+ hw->num_lns++;
+
+ return 0;
+err:
+ csio_lnode_to_hw(ln) = NULL;
+ return rv;
+}
+
+/**
+ * csio_lnode_exit - De-instantiate an lnode.
+ * @ln: lnode
+ *
+ */
+void
+csio_lnode_exit(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_ln_exit(ln);
+
+ /* Remove this lnode from hw->sln_head */
+ spin_lock_irq(&hw->lock);
+
+ list_del_init(&ln->sm.sm_list);
+
+ /* If it is children lnode, decrement the
+ * counter in its parent lnode
+ */
+ if (ln->pln)
+ ln->pln->num_vports--;
+
+ /* Update root lnode pointer */
+ if (list_empty(&hw->sln_head))
+ hw->rln = NULL;
+ else
+ hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
+
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_to_hw(ln) = NULL;
+ hw->num_lns--;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
new file mode 100644
index 00000000000..8d84988ab06
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -0,0 +1,255 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_LNODE_H__
+#define __CSIO_LNODE_H__
+
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <scsi/fc/fc_els.h>
+
+
+#include "csio_defs.h"
+#include "csio_hw.h"
+
+#define CSIO_FCOE_MAX_NPIV 128
+#define CSIO_FCOE_MAX_RNODES 2048
+
+/* FDMI port attribute unknown speed */
+#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000
+
+extern int csio_fcoe_rnodes;
+extern int csio_fdmi_enable;
+
+/* State machine evets */
+enum csio_ln_ev {
+ CSIO_LNE_NONE = (uint32_t)0,
+ CSIO_LNE_LINKUP,
+ CSIO_LNE_FAB_INIT_DONE,
+ CSIO_LNE_LINK_DOWN,
+ CSIO_LNE_DOWN_LINK,
+ CSIO_LNE_LOGO,
+ CSIO_LNE_CLOSE,
+ CSIO_LNE_MAX_EVENT,
+};
+
+
+struct csio_fcf_info {
+ struct list_head list;
+ uint8_t priority;
+ uint8_t mac[6];
+ uint8_t name_id[8];
+ uint8_t fabric[8];
+ uint16_t vf_id;
+ uint8_t vlan_id;
+ uint16_t max_fcoe_size;
+ uint8_t fc_map[3];
+ uint32_t fka_adv;
+ uint32_t fcfi;
+ uint8_t get_next:1;
+ uint8_t link_aff:1;
+ uint8_t fpma:1;
+ uint8_t spma:1;
+ uint8_t login:1;
+ uint8_t portid;
+ uint8_t spma_mac[6];
+ struct kref kref;
+};
+
+/* Defines for flags */
+#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */
+#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */
+#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */
+#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */
+
+/* Transport events */
+enum csio_ln_fc_evt {
+ CSIO_LN_FC_LINKUP = 1,
+ CSIO_LN_FC_LINKDOWN,
+ CSIO_LN_FC_RSCN,
+ CSIO_LN_FC_ATTRIB_UPDATE,
+};
+
+/* Lnode stats */
+struct csio_lnode_stats {
+ uint32_t n_link_up; /* Link down */
+ uint32_t n_link_down; /* Link up */
+ uint32_t n_err; /* error */
+ uint32_t n_err_nomem; /* memory not available */
+ uint32_t n_inval_parm; /* Invalid parameters */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* dropped event */
+ uint32_t n_rnode_match; /* matched rnode */
+ uint32_t n_dev_loss_tmo; /* Device loss timeout */
+ uint32_t n_fdmi_err; /* fdmi err */
+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_rnode_alloc; /* rnode allocated */
+ uint32_t n_rnode_free; /* rnode freed */
+ uint32_t n_rnode_nomem; /* rnode alloc failure */
+ uint32_t n_input_requests; /* Input Requests */
+ uint32_t n_output_requests; /* Output Requests */
+ uint32_t n_control_requests; /* Control Requests */
+ uint32_t n_input_bytes; /* Input Bytes */
+ uint32_t n_output_bytes; /* Output Bytes */
+ uint32_t rsvd1;
+};
+
+/* Common Lnode params */
+struct csio_lnode_params {
+ uint32_t ra_tov;
+ uint32_t fcfi;
+ uint32_t log_level; /* Module level for debugging */
+};
+
+struct csio_service_parms {
+ struct fc_els_csp csp; /* Common service parms */
+ uint8_t wwpn[8]; /* WWPN */
+ uint8_t wwnn[8]; /* WWNN */
+ struct fc_els_cssp clsp[4]; /* Class service params */
+ uint8_t vvl[16]; /* Vendor version level */
+};
+
+/* Lnode */
+struct csio_lnode {
+ struct csio_sm sm; /* State machine + sibling
+ * lnode list.
+ */
+ struct csio_hw *hwp; /* Pointer to the HW module */
+ uint8_t portid; /* Port ID */
+ uint8_t rsvd1;
+ uint16_t rsvd2;
+ uint32_t dev_num; /* Device number */
+ uint32_t flags; /* Flags */
+ struct list_head fcf_lsthead; /* FCF entries */
+ struct csio_fcf_info *fcfinfo; /* FCF in use */
+ struct csio_ioreq *mgmt_req; /* MGMT request */
+
+ /* FCoE identifiers */
+ uint8_t mac[6];
+ uint32_t nport_id;
+ struct csio_service_parms ln_sparm; /* Service parms */
+
+ /* Firmware identifiers */
+ uint32_t fcf_flowid; /*fcf flowid */
+ uint32_t vnp_flowid;
+ uint16_t ssn_cnt; /* Registered Session */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+
+ /* Children */
+ struct list_head cln_head; /* Head of the children lnode
+ * list.
+ */
+ uint32_t num_vports; /* Total NPIV/children LNodes*/
+ struct csio_lnode *pln; /* Parent lnode of child
+ * lnodes.
+ */
+ struct list_head cmpl_q; /* Pending I/Os on this lnode */
+
+ /* Remote node information */
+ struct list_head rnhead; /* Head of rnode list */
+ uint32_t num_reg_rnodes; /* Number of rnodes registered
+ * with the host.
+ */
+ uint32_t n_scsi_tgts; /* Number of scsi targets
+ * found
+ */
+ uint32_t last_scan_ntgts;/* Number of scsi targets
+ * found per last scan.
+ */
+ uint32_t tgt_scan_tick; /* timer started after
+ * new tgt found
+ */
+ /* FC transport data */
+ struct fc_vport *fc_vport;
+ struct fc_host_statistics fch_stats;
+
+ struct csio_lnode_stats stats; /* Common lnode stats */
+ struct csio_lnode_params params; /* Common lnode params */
+};
+
+#define csio_lnode_to_hw(ln) ((ln)->hwp)
+#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)
+#define csio_parent_lnode(ln) ((ln)->pln)
+#define csio_ln_flowid(ln) ((ln)->vnp_flowid)
+#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)
+#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)
+
+#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)
+#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)
+#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)
+
+
+#define csio_ln_dbg(_ln, _fmt, ...) \
+ csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_err(_ln, _fmt, ...) \
+ csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_warn(_ln, _fmt, ...) \
+ csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+/* HW->Lnode notifications */
+enum csio_ln_notify {
+ CSIO_LN_NOTIFY_HWREADY = 1,
+ CSIO_LN_NOTIFY_HWSTOP,
+ CSIO_LN_NOTIFY_HWREMOVE,
+ CSIO_LN_NOTIFY_HWRESET,
+};
+
+void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);
+int csio_is_lnode_ready(struct csio_lnode *);
+void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);
+struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);
+int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,
+ struct fw_fcoe_port_stats *);
+int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);
+void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);
+void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);
+int csio_ln_fdmi_start(struct csio_lnode *, void *);
+int csio_lnode_start(struct csio_lnode *);
+void csio_lnode_stop(struct csio_lnode *);
+void csio_lnode_close(struct csio_lnode *);
+int csio_lnode_init(struct csio_lnode *, struct csio_hw *,
+ struct csio_lnode *);
+void csio_lnode_exit(struct csio_lnode *);
+
+#endif /* ifndef __CSIO_LNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
new file mode 100644
index 00000000000..5b27c48f683
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -0,0 +1,1750 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_mb.h"
+#include "csio_wr.h"
+
+#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
+
+/* MB Command/Response Helpers */
+/*
+ * csio_mb_fw_retval - FW return value from a mailbox response.
+ * @mbp: Mailbox structure
+ *
+ */
+enum fw_retval
+csio_mb_fw_retval(struct csio_mb *mbp)
+{
+ struct fw_cmd_hdr *hdr;
+
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ return FW_CMD_RETVAL_GET(ntohl(hdr->lo));
+}
+
+/*
+ * csio_mb_hello - FW HELLO command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @m_mbox: Master mailbox number, if any.
+ * @a_mbox: Mailbox number for asycn notifications.
+ * @master: Device mastership.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->err_to_clearinit = htonl(
+ FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
+ m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
+ FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
+ FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT);
+
+}
+
+/*
+ * csio_mb_process_hello_rsp - FW HELLO response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @state: State that the function is in.
+ * @mpfn: Master pfn
+ *
+ */
+void
+csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, enum csio_dev_state *state,
+ uint8_t *mpfn)
+{
+ struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
+ uint32_t value;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS) {
+ hw->fwrev = ntohl(rsp->fwrev);
+
+ value = ntohl(rsp->err_to_clearinit);
+ *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
+
+ if (value & FW_HELLO_CMD_INIT)
+ *state = CSIO_DEV_STATE_INIT;
+ else if (value & FW_HELLO_CMD_ERR)
+ *state = CSIO_DEV_STATE_ERR;
+ else
+ *state = CSIO_DEV_STATE_UNINIT;
+ }
+}
+
+/*
+ * csio_mb_bye - FW BYE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_reset - FW RESET command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @reset: Type of reset.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reset, int halt,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->val = htonl(reset);
+ cmdp->halt_pkd = htonl(halt);
+
+}
+
+/*
+ * csio_mb_params - FW PARAMS command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: Command timeout.
+ * @pf: PF number.
+ * @vf: VF number.
+ * @nparams: Number of paramters
+ * @params: Parameter mnemonic array.
+ * @val: Parameter value array.
+ * @wr: Write/Read PARAMS.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ unsigned int pf, unsigned int vf, unsigned int nparams,
+ const u32 *params, u32 *val, bool wr,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ uint32_t i;
+ uint32_t temp_params = 0, temp_val = 0;
+ struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
+ __be32 *p = &cmdp->param[0].mnem;
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_WRITE : FW_CMD_READ) |
+ FW_PARAMS_CMD_PFN(pf) |
+ FW_PARAMS_CMD_VFN(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ /* Write Params */
+ if (wr) {
+ while (nparams--) {
+ temp_params = *params++;
+ temp_val = *val++;
+
+ *p++ = htonl(temp_params);
+ *p++ = htonl(temp_val);
+ }
+ } else {
+ for (i = 0; i < nparams; i++, p += 2) {
+ temp_params = *params++;
+ *p = htonl(temp_params);
+ }
+ }
+
+}
+
+/*
+ * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @nparams: Number of parameters
+ * @val: Parameter value array.
+ *
+ */
+void
+csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, unsigned int nparams,
+ u32 *val)
+{
+ struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
+ uint32_t i;
+ __be32 *p = &rsp->param[0].val;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS)
+ for (i = 0; i < nparams; i++, p += 2)
+ *val++ = ntohl(*p);
+}
+
+/*
+ * csio_mb_ldst - FW LDST command
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: timeout
+ * @reg: register
+ *
+ */
+void
+csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
+{
+ struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
+
+ /*
+ * Construct and send the Firmware LDST Command to retrieve the
+ * specified PCI-E Configuration Space register.
+ */
+ ldst_cmd->op_to_addrspace =
+ htonl(FW_CMD_OP(FW_LDST_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+ ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
+ ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+ ldst_cmd->u.pcie.ctrl_to_fn =
+ (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
+ ldst_cmd->u.pcie.r = (uint8_t)reg;
+}
+
+/*
+ *
+ * csio_mb_caps_config - FW Read/Write Capabilities command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @wr: Write if 1, Read if 0
+ * @init: Turn on initiator mode.
+ * @tgt: Turn on target mode.
+ * @cofld: If 1, Control Offload for FCoE
+ * @cbfn: Callback, if any.
+ *
+ * This helper assumes that cmdp has MB payload from a previous CAPS
+ * read command.
+ */
+void
+csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ bool wr, bool init, bool tgt, bool cofld,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_caps_config_cmd *cmdp =
+ (struct fw_caps_config_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_WRITE : FW_CMD_READ));
+ cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ /* Read config */
+ if (!wr)
+ return;
+
+ /* Write config */
+ cmdp->fcoecaps = 0;
+
+ if (cofld)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
+ if (init)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
+ if (tgt)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
+}
+
+void
+csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t tmo, uint8_t mode, unsigned int flags,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ struct fw_rss_glb_config_cmd *cmdp =
+ (struct fw_rss_glb_config_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+ cmdp->u.manual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ cmdp->u.basicvirtual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ }
+}
+
+
+/*
+ * csio_mb_pfvf - FW Write PF/VF capabilities command helper.
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @pf:
+ * @vf:
+ * @txq:
+ * @txq_eht_ctrl:
+ * @rxqi:
+ * @rxq:
+ * @tc:
+ * @vi:
+ * @pmask:
+ * @rcaps:
+ * @wxcaps:
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ unsigned int pf, unsigned int vf, unsigned int txq,
+ unsigned int txq_eth_ctrl, unsigned int rxqi,
+ unsigned int rxq, unsigned int tc, unsigned int vi,
+ unsigned int cmask, unsigned int pmask, unsigned int nexactf,
+ unsigned int rcaps, unsigned int wxcaps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_PFVF_CMD_PFN(pf) |
+ FW_PFVF_CMD_VFN(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
+ FW_PFVF_CMD_NIQ(rxq));
+
+ cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE |
+ FW_PFVF_CMD_CMASK(cmask) |
+ FW_PFVF_CMD_PMASK(pmask) |
+ FW_PFVF_CMD_NEQ(txq));
+ cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) |
+ FW_PFVF_CMD_NVI(vi) |
+ FW_PFVF_CMD_NEXACTF(nexactf));
+ cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
+ FW_PFVF_CMD_WX_CAPS(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+}
+
+#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/*
+ * csio_mb_port- FW PORT command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @portid: Port ID to get/set info
+ * @wr: Write/Read PORT information.
+ * @fc: Flow control
+ * @caps: Port capabilites to set.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
+ unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_EXEC : FW_CMD_READ) |
+ FW_PORT_CMD_PORTID(portid));
+ if (!wr) {
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ return;
+ }
+
+ /* Set port */
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ if (fc & PAUSE_RX)
+ lfc |= FW_PORT_CAP_FC_RX;
+ if (fc & PAUSE_TX)
+ lfc |= FW_PORT_CAP_FC_TX;
+
+ if (!(caps & FW_PORT_CAP_ANEG))
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
+ else
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
+ lfc | mdi);
+}
+
+/*
+ * csio_mb_process_read_port_rsp - FW PORT command response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @caps: port capabilities
+ *
+ */
+void
+csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, uint16_t *caps)
+{
+ struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));
+
+ if (*retval == FW_SUCCESS)
+ *caps = ntohs(rsp->u.info.pcap);
+}
+
+/*
+ * csio_mb_initialize - FW INITIALIZE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_iq_alloc - Initializes the mailbox to allocate an
+ * Ingress DMA queue in the firmware.
+ *
+ * @hw: The hw structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->type_to_iqandstindex = htonl(
+ FW_IQ_CMD_VIID(iq_params->viid) |
+ FW_IQ_CMD_TYPE(iq_params->type) |
+ FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
+
+ cmdp->fl0size = htons(iq_params->fl0size);
+ cmdp->fl0size = htons(iq_params->fl1size);
+
+} /* csio_mb_iq_alloc */
+
+/*
+ * csio_mb_iq_write - Initializes the mailbox for writing into an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
+ * @iq_params: Ingress queue params needed for writing.
+ * @cbfn: The call-back function
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this IQ write request can be cascaded with a previous
+ * IQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ uint32_t iq_start_stop = (iq_params->iq_start) ?
+ FW_IQ_CMD_IQSTART(1) :
+ FW_IQ_CMD_IQSTOP(1);
+
+ /*
+ * If this IQ write is cascaded with IQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(iq_start_stop |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->iqid |= htons(iq_params->iqid);
+ cmdp->fl0id |= htons(iq_params->fl0id);
+ cmdp->fl1id |= htons(iq_params->fl1id);
+ cmdp->type_to_iqandstindex |= htonl(
+ FW_IQ_CMD_IQANDST(iq_params->iqandst) |
+ FW_IQ_CMD_IQANUS(iq_params->iqanus) |
+ FW_IQ_CMD_IQANUD(iq_params->iqanud) |
+ FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
+ cmdp->iqdroprss_to_iqesize |= htons(
+ FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
+ FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
+ FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
+ FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
+ FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
+ FW_IQ_CMD_IQESIZE(iq_params->iqesize));
+
+ cmdp->iqsize |= htons(iq_params->iqsize);
+ cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
+
+ if (iq_params->type == 0) {
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
+ FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
+ }
+
+ if (iq_params->fl0size && iq_params->fl0addr &&
+ (iq_params->fl0id != 0xFFFF)) {
+
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
+ FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
+ FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
+ cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
+ FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
+ FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
+ FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
+ FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
+ FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
+ cmdp->fl0size |= htons(iq_params->fl0size);
+ cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
+ }
+} /* csio_mb_iq_write */
+
+/*
+ * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation & writing.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
+ csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
+} /* csio_mb_iq_alloc_write */
+
+/*
+ * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
+ * of ingress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @iq_params: Ingress queue parameters, after allocation and write.
+ *
+ */
+void
+csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *ret_val,
+ struct csio_iq_params *iq_params)
+{
+ struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ if (*ret_val == FW_SUCCESS) {
+ iq_params->physiqid = ntohs(rsp->physiqid);
+ iq_params->iqid = ntohs(rsp->iqid);
+ iq_params->fl0id = ntohs(rsp->fl0id);
+ iq_params->fl1id = ntohs(rsp->fl1id);
+ } else {
+ iq_params->physiqid = iq_params->iqid =
+ iq_params->fl0id = iq_params->fl1id = 0;
+ }
+} /* csio_mb_iq_alloc_write_rsp */
+
+/*
+ * csio_mb_iq_free - Initializes the mailbox for freeing a
+ * specified Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Parameters of ingress queue, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
+
+ cmdp->iqid = htons(iq_params->iqid);
+ cmdp->fl0id = htons(iq_params->fl0id);
+ cmdp->fl1id = htons(iq_params->fl1id);
+
+} /* csio_mb_iq_free */
+
+/*
+ * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
+ * an offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_mb_eq_ofld_alloc */
+
+/*
+ * csio_mb_eq_ofld_write - Initializes the mailbox for writing
+ * an alloacted offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this EQ write request can be cascaded with a previous
+ * EQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
+ FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
+
+ /*
+ * If this EQ write is cascaded with EQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(eq_start_stop |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+
+ cmdp->fetchszm_to_iqid |= htonl(
+ FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
+ FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
+ FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
+ FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
+
+ cmdp->dcaen_to_eqsize |= htonl(
+ FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
+ FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
+ FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
+ FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
+ FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
+
+ cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
+
+} /* csio_mb_eq_ofld_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
+ * writing into an Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
+ void *priv, uint32_t mb_tmo,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
+ csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
+ eq_ofld_params, cbfn);
+} /* csio_mb_eq_ofld_alloc_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
+ * & write egress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp, enum fw_retval *ret_val,
+ struct csio_eq_params *eq_ofld_params)
+{
+ struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+
+ if (*ret_val == FW_SUCCESS) {
+ eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
+ ntohl(rsp->eqid_pkd));
+ eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
+ ntohl(rsp->physeqid_pkd));
+ } else
+ eq_ofld_params->eqid = 0;
+
+} /* csio_mb_eq_ofld_alloc_write_rsp */
+
+/*
+ * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
+ * specified Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data area.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+
+} /* csio_mb_eq_ofld_free */
+
+/*
+ * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
+ * condition.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call back function.
+ *
+ *
+ */
+void
+csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
+ uint8_t cos, bool link_status, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_link_cmd *cmdp =
+ (struct fw_fcoe_link_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_portid = htonl((
+ FW_CMD_OP(FW_FCOE_LINK_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_FCOE_LINK_CMD_PORTID(port_id)));
+ cmdp->sub_opcode_fcfi = htonl(
+ FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
+ FW_FCOE_LINK_CMD_FCFI(fcfi));
+ cmdp->lstatus = link_status;
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_write_fcoe_link_cond_init_mb */
+
+/*
+ * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
+ * resource information(FW_GET_RES_INFO_CMD).
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_res_info_cmd *cmdp =
+ (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+
+ cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ));
+
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_res_info_init_mb */
+
+/*
+ * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
+ * in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @iqid: iqid
+ * @vnport_wwnn: vnport WWNN
+ * @vnport_wwpn: vnport WWPN
+ * @cbfn: The call-back function.
+ *
+ *
+ */
+void
+csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
+ uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_FCOE_VNP_CMD_FCFI(fcfi)));
+
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+
+ cmdp->iqid = htons(iqid);
+
+ if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
+ cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
+
+ if (vnport_wwnn)
+ memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
+ if (vnport_wwpn)
+ memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
+
+} /* csio_fcoe_vnp_alloc_init_mb */
+
+/*
+ * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @cbfn: The call-back handler.
+ */
+void
+csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
+ * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF flow id
+ * @vnpi: VNP flow id
+ * @cbfn: The call-back function.
+ * Return: None
+ */
+void
+csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
+ * FCF records.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcf_params: FC-Forwarder parameters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_fcf_cmd *cmdp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_FCOE_FCF_CMD_FCFI(fcfi));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_fcf_init_mb */
+
+void
+csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *,
+ struct csio_mb *))
+{
+ struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+ mbp->mb_size = 64;
+
+ cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));
+
+ cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
+ FW_FCOE_STATS_CMD_PORT(portparams->portid);
+
+ cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
+ FW_FCOE_STATS_CMD_PORT_VALID;
+
+} /* csio_fcoe_read_portparams_init_mb */
+
+void
+csio_mb_process_portparams_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats)
+{
+ struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+ struct fw_fcoe_port_stats stats;
+ uint8_t *src;
+ uint8_t *dst;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));
+
+ memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
+
+ if (*retval == FW_SUCCESS) {
+ dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
+ src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
+ memcpy(dst, src, (portparams->nstats * 8));
+ if (portparams->idx == 1) {
+ /* Get the first 6 flits from the Mailbox */
+ portstats->tx_bcast_bytes = stats.tx_bcast_bytes;
+ portstats->tx_bcast_frames = stats.tx_bcast_frames;
+ portstats->tx_mcast_bytes = stats.tx_mcast_bytes;
+ portstats->tx_mcast_frames = stats.tx_mcast_frames;
+ portstats->tx_ucast_bytes = stats.tx_ucast_bytes;
+ portstats->tx_ucast_frames = stats.tx_ucast_frames;
+ }
+ if (portparams->idx == 7) {
+ /* Get the second 6 flits from the Mailbox */
+ portstats->tx_drop_frames = stats.tx_drop_frames;
+ portstats->tx_offload_bytes = stats.tx_offload_bytes;
+ portstats->tx_offload_frames = stats.tx_offload_frames;
+#if 0
+ portstats->rx_pf_bytes = stats.rx_pf_bytes;
+ portstats->rx_pf_frames = stats.rx_pf_frames;
+#endif
+ portstats->rx_bcast_bytes = stats.rx_bcast_bytes;
+ portstats->rx_bcast_frames = stats.rx_bcast_frames;
+ portstats->rx_mcast_bytes = stats.rx_mcast_bytes;
+ }
+ if (portparams->idx == 13) {
+ /* Get the last 4 flits from the Mailbox */
+ portstats->rx_mcast_frames = stats.rx_mcast_frames;
+ portstats->rx_ucast_bytes = stats.rx_ucast_bytes;
+ portstats->rx_ucast_frames = stats.rx_ucast_frames;
+ portstats->rx_err_frames = stats.rx_err_frames;
+ }
+ }
+}
+
+/* Entry points/APIs for MB module */
+/*
+ * csio_mb_intr_enable - Enable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
+ */
+void
+csio_mb_intr_enable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+}
+
+/*
+ * csio_mb_intr_disable - Disable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Disable bit in HostInterruptEnable CIM register.
+ */
+void
+csio_mb_intr_disable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+}
+
+static void
+csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
+{
+ struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
+
+ if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
+ csio_info(hw, "FW print message:\n");
+ csio_info(hw, "\tdebug->dprtstridx = %d\n",
+ ntohs(dbg->u.prt.dprtstridx));
+ csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam0));
+ csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam1));
+ csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam2));
+ csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam3));
+ } else {
+ /* This is a FW assertion */
+ csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ dbg->u.assert.filename_0_7,
+ ntohl(dbg->u.assert.line),
+ ntohl(dbg->u.assert.x),
+ ntohl(dbg->u.assert.y));
+ }
+}
+
+static void
+csio_mb_debug_cmd_handler(struct csio_hw *hw)
+{
+ int i;
+ __be64 cmd[CSIO_MB_MAX_REGS];
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size = sizeof(struct fw_debug_cmd);
+
+ /* Copy mailbox data */
+ for (i = 0; i < size; i += 8)
+ cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
+
+ csio_mb_dump_fw_dbg(hw, cmd);
+
+ /* Notify FW of mailbox by setting owner as UP */
+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
+ ctl_reg);
+
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+}
+
+/*
+ * csio_mb_issue - generic routine for issuing Mailbox commands.
+ * @hw: The HW structure
+ * @mbp: Mailbox command to issue
+ *
+ * Caller should hold hw lock across this call.
+ */
+int
+csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ uint32_t owner, ctl;
+ int i;
+ uint32_t ii;
+ __be64 *cmd = mbp->mb;
+ __be64 hdr;
+ struct csio_mbm *mbm = &hw->mbm;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size = mbp->mb_size;
+ int rv = -EINVAL;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /* Determine mode */
+ if (mbp->mb_cbfn == NULL) {
+ /* Need to issue/get results in the same context */
+ if (mbp->tmo < CSIO_MB_POLL_FREQ) {
+ csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
+ goto error_out;
+ }
+ } else if (!csio_is_host_intr_enabled(hw) ||
+ !csio_is_hw_intr_enabled(hw)) {
+ csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
+ *((uint8_t *)mbp->mb));
+ goto error_out;
+ }
+
+ if (mbm->mcurrent != NULL) {
+ /* Queue mbox cmd, if another mbox cmd is active */
+ if (mbp->mb_cbfn == NULL) {
+ rv = -EBUSY;
+ csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb));
+
+ goto error_out;
+ } else {
+ list_add_tail(&mbp->list, &mbm->req_q);
+ CSIO_INC_STATS(mbm, n_activeq);
+
+ return 0;
+ }
+ }
+
+ /* Now get ownership of mailbox */
+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+
+ if (!csio_mb_is_host_owner(owner)) {
+
+ for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+ /*
+ * Mailbox unavailable. In immediate mode, fail the command.
+ * In other modes, enqueue the request.
+ */
+ if (!csio_mb_is_host_owner(owner)) {
+ if (mbp->mb_cbfn == NULL) {
+ rv = owner ? -EBUSY : -ETIMEDOUT;
+
+ csio_dbg(hw,
+ "Couldnt own Mailbox %x op:0x%x "
+ "owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb), owner);
+ goto error_out;
+ } else {
+ if (mbm->mcurrent == NULL) {
+ csio_err(hw,
+ "Couldnt own Mailbox %x "
+ "op:0x%x owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb),
+ owner);
+ csio_err(hw,
+ "No outstanding driver"
+ " mailbox as well\n");
+ goto error_out;
+ }
+ }
+ }
+ }
+
+ /* Mailbox is available, copy mailbox data into it */
+ for (i = 0; i < size; i += 8) {
+ csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
+ cmd++;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ /* Start completion timers in non-immediate modes and notify FW */
+ if (mbp->mb_cbfn != NULL) {
+ mbm->mcurrent = mbp;
+ mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
+ MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
+ } else
+ csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
+ ctl_reg);
+
+ /* Flush posted writes */
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+
+ CSIO_INC_STATS(mbm, n_req);
+
+ if (mbp->mb_cbfn)
+ return 0;
+
+ /* Poll for completion in immediate mode */
+ cmd = mbp->mb;
+
+ for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
+ mdelay(CSIO_MB_POLL_FREQ);
+
+ /* Check for response */
+ ctl = csio_rd_reg32(hw, ctl_reg);
+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+
+ if (!(ctl & MBMSGVALID)) {
+ csio_wr_reg32(hw, 0, ctl_reg);
+ continue;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ continue;
+ }
+
+ /* Copy response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+ csio_wr_reg32(hw, 0, ctl_reg);
+
+ if (csio_mb_fw_retval(mbp) != FW_SUCCESS)
+ CSIO_INC_STATS(mbm, n_err);
+
+ CSIO_INC_STATS(mbm, n_rsp);
+ return 0;
+ }
+ }
+
+ CSIO_INC_STATS(mbm, n_tmo);
+
+ csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
+ hw->pfn, *((uint8_t *)cmd));
+
+ return -ETIMEDOUT;
+
+error_out:
+ CSIO_INC_STATS(mbm, n_err);
+ return rv;
+}
+
+/*
+ * csio_mb_completions - Completion handler for Mailbox commands
+ * @hw: The HW structure
+ * @cbfn_q: Completion queue.
+ *
+ */
+void
+csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ enum fw_retval rv;
+
+ while (!list_empty(cbfn_q)) {
+ mbp = list_first_entry(cbfn_q, struct csio_mb, list);
+ list_del_init(&mbp->list);
+
+ rv = csio_mb_fw_retval(mbp);
+ if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
+ CSIO_INC_STATS(mbm, n_err);
+ else if (rv != FW_HOSTERROR)
+ CSIO_INC_STATS(mbm, n_rsp);
+
+ if (mbp->mb_cbfn)
+ mbp->mb_cbfn(hw, mbp);
+ }
+}
+
+static void
+csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
+{
+ static char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
+ };
+
+ struct csio_pport *port = &hw->pport[port_id];
+
+ if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
+ csio_info(hw, "Port:%d - port module unplugged\n", port_id);
+ else if (port->mod_type < ARRAY_SIZE(mod_str))
+ csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
+ mod_str[port->mod_type]);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ csio_info(hw,
+ "Port:%d - unsupported optical port module "
+ "inserted\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ csio_info(hw,
+ "Port:%d - unknown port module inserted, forcing "
+ "TWINAX\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ csio_info(hw, "Port:%d - transceiver module error\n", port_id);
+ else
+ csio_info(hw, "Port:%d - unknown module type %d inserted\n",
+ port_id, port->mod_type);
+}
+
+int
+csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
+{
+ uint8_t opcode = *(uint8_t *)cmd;
+ struct fw_port_cmd *pcmd;
+ uint8_t port_id;
+ uint32_t link_status;
+ uint16_t action;
+ uint8_t mod_type;
+
+ if (opcode == FW_PORT_CMD) {
+ pcmd = (struct fw_port_cmd *)cmd;
+ port_id = FW_PORT_CMD_PORTID_GET(
+ ntohl(pcmd->op_to_portid));
+ action = FW_PORT_CMD_ACTION_GET(
+ ntohl(pcmd->action_to_len16));
+ if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+ csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
+ action);
+ return -EINVAL;
+ }
+
+ link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
+ mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
+
+ hw->pport[port_id].link_status =
+ FW_PORT_CMD_LSTATUS_GET(link_status);
+ hw->pport[port_id].link_speed =
+ FW_PORT_CMD_LSPEED_GET(link_status);
+
+ csio_info(hw, "Port:%x - LINK %s\n", port_id,
+ FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
+
+ if (mod_type != hw->pport[port_id].mod_type) {
+ hw->pport[port_id].mod_type = mod_type;
+ csio_mb_portmod_changed(hw, port_id);
+ }
+ } else if (opcode == FW_DEBUG_CMD) {
+ csio_mb_dump_fw_dbg(hw, cmd);
+ } else {
+ csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_mb_isr_handler - Handle mailboxes related interrupts.
+ * @hw: The HW structure
+ *
+ * Called from the ISR to handle Mailbox related interrupts.
+ * HW Lock should be held across this call.
+ */
+int
+csio_mb_isr_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ __be64 *cmd;
+ uint32_t ctl, cim_cause, pl_cause;
+ int i;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size;
+ __be64 hdr;
+ struct fw_cmd_hdr *fw_hdr;
+
+ pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
+ cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+
+ if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
+ CSIO_INC_STATS(hw, n_mbint_unexp);
+ return -EINVAL;
+ }
+
+ /*
+ * The cause registers below HAVE to be cleared in the SAME
+ * order as below: The low level cause register followed by
+ * the upper level cause register. In other words, CIM-cause
+ * first followed by PL-Cause next.
+ */
+ csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+ csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
+
+ ctl = csio_rd_reg32(hw, ctl_reg);
+
+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ if (!(ctl & MBMSGVALID)) {
+ csio_warn(hw,
+ "Stray mailbox interrupt recvd,"
+ " mailbox data not valid\n");
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+ return -EINVAL;
+ }
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ return -EINVAL;
+#if 0
+ case FW_ERROR_CMD:
+ case FW_INITIALIZE_CMD: /* When we are not master */
+#endif
+ }
+
+ CSIO_ASSERT(mbp != NULL);
+
+ cmd = mbp->mb;
+ size = mbp->mb_size;
+ /* Get response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+
+ mbm->mcurrent = NULL;
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, &mbm->cbfn_q);
+ CSIO_INC_STATS(mbm, n_cbfnq);
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
+ CSIO_INC_STATS(hw, n_evt_drop);
+
+ return 0;
+
+ } else {
+ /*
+ * We can get here if mailbox MSIX vector is shared,
+ * or in INTx case. Or a stray interrupt.
+ */
+ csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+}
+
+/*
+ * csio_mb_tmo_handler - Timeout handler
+ * @hw: The HW structure
+ *
+ */
+struct csio_mb *
+csio_mb_tmo_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /*
+ * Could be a race b/w the completion handler and the timer
+ * and the completion handler won that race.
+ */
+ if (mbp == NULL) {
+ CSIO_DB_ASSERT(0);
+ return NULL;
+ }
+
+ fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
+ FW_CMD_OP_GET(ntohl(fw_hdr->hi)));
+
+ mbm->mcurrent = NULL;
+ CSIO_INC_STATS(mbm, n_tmo);
+ fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));
+
+ return mbp;
+}
+
+/*
+ * csio_mb_cancel_all - Cancel all waiting commands.
+ * @hw: The HW structure
+ * @cbfn_q: The callback queue.
+ *
+ * Caller should hold hw lock across this call.
+ */
+void
+csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ struct fw_cmd_hdr *hdr;
+ struct list_head *tmp;
+
+ if (mbm->mcurrent) {
+ mbp = mbm->mcurrent;
+
+ /* Stop mailbox completion timer */
+ del_timer_sync(&mbm->timer);
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, cbfn_q);
+ mbm->mcurrent = NULL;
+ }
+
+ if (!list_empty(&mbm->req_q)) {
+ list_splice_tail_init(&mbm->req_q, cbfn_q);
+ mbm->stats.n_activeq = 0;
+ }
+
+ if (!list_empty(&mbm->cbfn_q)) {
+ list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+ }
+
+ if (list_empty(cbfn_q))
+ return;
+
+ list_for_each(tmp, cbfn_q) {
+ mbp = (struct csio_mb *)tmp;
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
+ hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));
+
+ CSIO_INC_STATS(mbm, n_cancel);
+ hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));
+ }
+}
+
+/*
+ * csio_mbm_init - Initialize Mailbox module
+ * @mbm: Mailbox module
+ * @hw: The HW structure
+ * @timer: Timing function for interrupting mailboxes
+ *
+ * Initialize timer and the request/response queues.
+ */
+int
+csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
+ void (*timer_fn)(uintptr_t))
+{
+ struct timer_list *timer = &mbm->timer;
+
+ init_timer(timer);
+ timer->function = timer_fn;
+ timer->data = (unsigned long)hw;
+
+ INIT_LIST_HEAD(&mbm->req_q);
+ INIT_LIST_HEAD(&mbm->cbfn_q);
+ csio_set_mb_intr_idx(mbm, -1);
+
+ return 0;
+}
+
+/*
+ * csio_mbm_exit - Uninitialize mailbox module
+ * @mbm: Mailbox module
+ *
+ * Stop timer.
+ */
+void
+csio_mbm_exit(struct csio_mbm *mbm)
+{
+ del_timer_sync(&mbm->timer);
+
+ CSIO_DB_ASSERT(mbm->mcurrent == NULL);
+ CSIO_DB_ASSERT(list_empty(&mbm->req_q));
+ CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
+}
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h
new file mode 100644
index 00000000000..1788ea506f3
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.h
@@ -0,0 +1,278 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_MB_H__
+#define __CSIO_MB_H__
+
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+#include "csio_defs.h"
+
+#define CSIO_STATS_OFFSET (2)
+#define CSIO_NUM_STATS_PER_MB (6)
+
+struct fw_fcoe_port_cmd_params {
+ uint8_t portid;
+ uint8_t idx;
+ uint8_t nstats;
+};
+
+#define CSIO_DUMP_MB(__hw, __num, __mb) \
+ csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \
+ (unsigned long long)csio_rd_reg64(__hw, __mb), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 56))
+
+#define CSIO_MB_MAX_REGS 8
+#define CSIO_MAX_MB_SIZE 64
+#define CSIO_MB_POLL_FREQ 5 /* 5 ms */
+#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT
+
+/* Device master in HELLO command */
+enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };
+
+enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };
+
+enum csio_dev_state {
+ CSIO_DEV_STATE_UNINIT,
+ CSIO_DEV_STATE_INIT,
+ CSIO_DEV_STATE_ERR
+};
+
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y(0) | \
+ FW_PARAMS_PARAM_Z(0))
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
+do { \
+ if (__clear) \
+ memset((__cp), 0, \
+ CSIO_MB_MAX_REGS * sizeof(__be64)); \
+ INIT_LIST_HEAD(&(__mbp)->list); \
+ (__mbp)->tmo = (__tmo); \
+ (__mbp)->priv = (void *)(__priv); \
+ (__mbp)->mb_cbfn = (__fn); \
+ (__mbp)->mb_size = sizeof(*(__cp)); \
+} while (0)
+
+struct csio_mbm_stats {
+ uint32_t n_req; /* number of mbox req */
+ uint32_t n_rsp; /* number of mbox rsp */
+ uint32_t n_activeq; /* number of mbox req active Q */
+ uint32_t n_cbfnq; /* number of mbox req cbfn Q */
+ uint32_t n_tmo; /* number of mbox timeout */
+ uint32_t n_cancel; /* number of mbox cancel */
+ uint32_t n_err; /* number of mbox error */
+};
+
+/* Driver version of Mailbox */
+struct csio_mb {
+ struct list_head list; /* for req/resp */
+ /* queue in driver */
+ __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */
+ int mb_size; /* Size of this
+ * mailbox.
+ */
+ uint32_t tmo; /* Timeout */
+ struct completion cmplobj; /* MB Completion
+ * object
+ */
+ void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);
+ /* Callback fn */
+ void *priv; /* Owner private ptr */
+};
+
+struct csio_mbm {
+ uint32_t a_mbox; /* Async mbox num */
+ uint32_t intr_idx; /* Interrupt index */
+ struct timer_list timer; /* Mbox timer */
+ struct list_head req_q; /* Mbox request queue */
+ struct list_head cbfn_q; /* Mbox completion q */
+ struct csio_mb *mcurrent; /* Current mailbox */
+ uint32_t req_q_cnt; /* Outstanding mbox
+ * cmds
+ */
+ struct csio_mbm_stats stats; /* Statistics */
+};
+
+#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))
+#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)
+
+struct csio_iq_params;
+struct csio_eq_params;
+
+enum fw_retval csio_mb_fw_retval(struct csio_mb *);
+
+/* MB helpers */
+void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint32_t, uint32_t, enum csio_dev_master,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, enum csio_dev_state *,
+ uint8_t *);
+
+void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,
+ unsigned int, unsigned int, const u32 *, u32 *, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, unsigned int , u32 *);
+
+void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reg);
+
+void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
+ bool, bool, bool, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,
+ uint32_t, uint8_t, unsigned int,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint8_t, bool, uint32_t, uint16_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, uint16_t *);
+
+void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_iq_params *);
+
+void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_eq_params *);
+
+void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t , struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,
+ uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t , uint16_t,
+ uint8_t [8], uint8_t [8],
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t , uint32_t, uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t, uint32_t,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,
+ struct csio_mb *mbp, uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats);
+
+/* MB module functions */
+int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
+ void (*)(uintptr_t));
+void csio_mbm_exit(struct csio_mbm *);
+void csio_mb_intr_enable(struct csio_hw *);
+void csio_mb_intr_disable(struct csio_hw *);
+
+int csio_mb_issue(struct csio_hw *, struct csio_mb *);
+void csio_mb_completions(struct csio_hw *, struct list_head *);
+int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);
+int csio_mb_isr_handler(struct csio_hw *);
+struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);
+void csio_mb_cancel_all(struct csio_hw *, struct list_head *);
+
+#endif /* ifndef __CSIO_MB_H__ */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
new file mode 100644
index 00000000000..51c6a388de2
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -0,0 +1,913 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
+static void csio_rnode_exit(struct csio_rnode *);
+
+/* Static machine forward declarations */
+static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
+
+/* RNF event mapping */
+static enum csio_rn_ev fwevt_to_rnevt[] = {
+ CSIO_RNFE_NONE, /* None */
+ CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
+ CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
+ CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
+ CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
+ CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
+ CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
+ CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_RNFE_NONE, /* PRLI_TMO */
+ CSIO_RNFE_NONE, /* ADISC_TMO */
+ CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
+ CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
+ CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* LOGO_SNT */
+ CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_RNFE_NONE : \
+ fwevt_to_rnevt[_evt])
+int
+csio_is_rnode_ready(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_ready);
+}
+
+static int
+csio_is_rnode_uninit(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_uninit);
+}
+
+static int
+csio_is_rnode_wka(uint8_t rport_type)
+{
+ if ((rport_type == FLOGI_VFPORT) ||
+ (rport_type == FDISC_VFPORT) ||
+ (rport_type == NS_VNPORT) ||
+ (rport_type == FDMI_VNPORT))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * csio_rn_lookup - Finds the rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flowid.
+ *
+ * Does the rnode lookup on the given lnode and flowid.If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->flowid == flowid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
+ * @ln: lnode
+ * @wwpn: wwpn
+ *
+ * Does the rnode lookup on the given lnode and wwpn. If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
+ return rn;
+ }
+
+ return NULL;
+}
+
+/**
+ * csio_rnode_lookup_portid - Finds the rnode with the given portid
+ * @ln: lnode
+ * @portid: port id
+ *
+ * Lookup the rnode list for a given portid. If no matching entry
+ * found, NULL is returned.
+ */
+struct csio_rnode *
+csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->nport_id == portid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+static int
+csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
+ uint32_t *vnp_flowid)
+{
+ struct csio_rnode *rnhead;
+ struct list_head *tmp, *tmp1;
+ struct csio_rnode *rn;
+ struct csio_lnode *ln_tmp;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ list_for_each(tmp1, &hw->sln_head) {
+ ln_tmp = (struct csio_lnode *) tmp1;
+ if (ln_tmp == ln)
+ continue;
+
+ rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+
+ rn = (struct csio_rnode *) tmp;
+ if (csio_is_rnode_ready(rn)) {
+ if (rn->flowid == rdev_flowid) {
+ *vnp_flowid = csio_ln_flowid(ln_tmp);
+ return 1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct csio_rnode *
+csio_alloc_rnode(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
+ if (!rn)
+ goto err;
+
+ memset(rn, 0, sizeof(struct csio_rnode));
+ if (csio_rnode_init(rn, ln))
+ goto err_free;
+
+ CSIO_INC_STATS(ln, n_rnode_alloc);
+
+ return rn;
+
+err_free:
+ mempool_free(rn, hw->rnode_mempool);
+err:
+ CSIO_INC_STATS(ln, n_rnode_nomem);
+ return NULL;
+}
+
+static void
+csio_free_rnode(struct csio_rnode *rn)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
+
+ csio_rnode_exit(rn);
+ CSIO_INC_STATS(rn->lnp, n_rnode_free);
+ mempool_free(rn, hw->rnode_mempool);
+}
+
+/*
+ * csio_get_rnode - Gets rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+static struct csio_rnode *
+csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rn;
+
+ rn = csio_rn_lookup(ln, flowid);
+ if (!rn) {
+ rn = csio_alloc_rnode(ln);
+ if (!rn)
+ return NULL;
+
+ rn->flowid = flowid;
+ }
+
+ return rn;
+}
+
+/*
+ * csio_put_rnode - Frees the given rnode
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+void
+csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
+{
+ CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
+ csio_free_rnode(rn);
+}
+
+/*
+ * csio_confirm_rnode - confirms rnode based on wwpn.
+ * @ln: lnode
+ * @rdev_flowid: remote device flowid
+ * @rdevp: remote device params
+ * This routines searches other rnode in list having same wwpn of new rnode.
+ * If there is a match, then matched rnode is returned and otherwise new rnode
+ * is returned.
+ * returns rnode.
+ */
+struct csio_rnode *
+csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t rport_type;
+ struct csio_rnode *rn, *match_rn;
+ uint32_t vnp_flowid;
+ __be32 *port_id;
+
+ port_id = (__be32 *)&rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+
+ /* Drop rdev event for cntrl port */
+ if (rport_type == FAB_CTLR_VNPORT) {
+ csio_ln_dbg(ln,
+ "Unhandled rport_type:%d recv in rdev evt "
+ "ssni:x%x\n", rport_type, rdev_flowid);
+ return NULL;
+ }
+
+ /* Lookup on flowid */
+ rn = csio_rn_lookup(ln, rdev_flowid);
+ if (!rn) {
+
+ /* Drop events with duplicate flowid */
+ if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
+ csio_ln_warn(ln,
+ "ssni:%x already active on vnpi:%x",
+ rdev_flowid, vnp_flowid);
+ return NULL;
+ }
+
+ /* Lookup on wwpn for NPORTs */
+ rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (!rn)
+ goto alloc_rnode;
+
+ } else {
+ /* Lookup well-known ports with nport id */
+ if (csio_is_rnode_wka(rport_type)) {
+ match_rn = csio_rnode_lookup_portid(ln,
+ ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
+ if (match_rn == NULL) {
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /*
+ * Now compare the wwpn to confirm that
+ * same port relogged in. If so update the matched rn.
+ * Else, go ahead and alloc a new rnode.
+ */
+ if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already"
+ "active ssni:x%x\n",
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+
+ /* Update rn */
+ goto found_rnode;
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /* wwpn match */
+ if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
+ goto found_rnode;
+
+ /* Search for rnode that have same wwpn */
+ match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (match_rn != NULL) {
+ csio_ln_dbg(ln,
+ "ssni:x%x changed for rport name(wwpn):%llx "
+ "did:x%x\n", rdev_flowid,
+ wwn_to_u64(rdevp->wwpn),
+ match_rn->nport_id);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+ } else {
+ csio_ln_dbg(ln,
+ "rnode wwpn mismatch found ssni:x%x "
+ "name(wwpn):%llx\n",
+ rdev_flowid,
+ wwn_to_u64(csio_rn_wwpn(rn)));
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already active "
+ "wwpn:%llx ssni:x%x\n",
+ wwn_to_u64(csio_rn_wwpn(rn)),
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+ }
+
+found_rnode:
+ csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* Update flowid */
+ csio_rn_flowid(rn) = rdev_flowid;
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ CSIO_INC_STATS(ln, n_rnode_match);
+ return rn;
+
+alloc_rnode:
+ rn = csio_get_rnode(ln, rdev_flowid);
+ if (!rn)
+ return NULL;
+
+ csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ return rn;
+}
+
+/*
+ * csio_rn_verify_rparams - verify rparams.
+ * @ln: lnode
+ * @rn: rnode
+ * @rdevp: remote device params
+ * returns success if rparams are verified.
+ */
+static int
+csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t null[8];
+ uint8_t rport_type;
+ uint8_t fc_class;
+ __be32 *did;
+
+ did = (__be32 *) &rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+ switch (rport_type) {
+ case FLOGI_VFPORT:
+ rn->role = CSIO_RNFR_FABRIC;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ /* NPIV support */
+ if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
+ ln->flags |= CSIO_LNF_NPIVSUPP;
+
+ break;
+
+ case NS_VNPORT:
+ rn->role = CSIO_RNFR_NS;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ break;
+
+ case REG_FC4_VNPORT:
+ case REG_VNPORT:
+ rn->role = CSIO_RNFR_NPORT;
+ if (rdevp->event_cause == PRLI_ACC_RCVD ||
+ rdevp->event_cause == PRLI_RCVD) {
+ if (FW_RDEV_WR_TASK_RETRY_ID_GET(
+ rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
+
+ if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_RETRY;
+
+ if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
+
+ if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_TARGET;
+
+ if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_INITIATOR;
+ }
+
+ break;
+
+ case FDMI_VNPORT:
+ case FAB_CTLR_VNPORT:
+ rn->role = 0;
+ break;
+
+ default:
+ csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
+ csio_rn_flowid(rn), rport_type);
+ return -EINVAL;
+ }
+
+ /* validate wwpn/wwnn for Name server/remote port */
+ if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
+ memset(null, 0, 8);
+ if (!memcmp(rdevp->wwnn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwnn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ if (!memcmp(rdevp->wwpn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwpn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ }
+
+ /* Copy wwnn, wwpn and nport id */
+ rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
+ memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
+ memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
+ rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
+ fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
+ rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
+
+ return 0;
+}
+
+static void
+__csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_unlock_irq(&hw->lock);
+ csio_reg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ if (rn->role & CSIO_RNFR_TARGET)
+ ln->n_scsi_tgts++;
+
+ if (rn->nport_id == FC_FID_MGMT_SERV)
+ csio_ln_fdmi_start(ln, (void *) rn);
+}
+
+static void
+__csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ LIST_HEAD(tmp_q);
+ int cmpl = 0;
+
+ if (!list_empty(&rn->host_cmpl_q)) {
+ csio_dbg(hw, "Returning completion queue I/Os\n");
+ list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
+ cmpl = 1;
+ }
+
+ if (rn->role & CSIO_RNFR_TARGET) {
+ ln->n_scsi_tgts--;
+ ln->last_scan_ntgts--;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ csio_unreg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ /* Cleanup I/Os that were waiting for rnode to unregister */
+ if (cmpl)
+ csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
+
+}
+
+/*****************************************************************************/
+/* START: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rns_uninit -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ }
+ break;
+ case CSIO_RNFE_LOGO_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_ready -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[ready]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_PRLI_DONE:
+ case CSIO_RNFE_PRLI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret)
+ __csio_reg_rnode(rn);
+ else
+ CSIO_INC_STATS(rn, n_err_inval);
+
+ break;
+ case CSIO_RNFE_DOWN:
+ csio_set_state(&rn->sm, csio_rns_offline);
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_LOGO_RECV:
+ csio_set_state(&rn->sm, csio_rns_offline);
+
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /*
+ * Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ __csio_unreg_rnode(rn);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ __csio_unreg_rnode(rn);
+
+ /*
+ * FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_offline -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_DOWN:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_disappeared -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset.
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_DOWN:
+ case CSIO_RNFE_NAME_MISSING:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rnode_devloss_handler - Device loss event handler
+ * @rn: rnode
+ *
+ * Post event to close rnode SM and free rnode.
+ */
+void
+csio_rnode_devloss_handler(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+
+ /* ignore if same rnode came back as online */
+ if (csio_is_rnode_ready(rn))
+ return;
+
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/**
+ * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
+ * @rn: rnode
+ *
+ */
+void
+csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ enum csio_rn_ev evt;
+
+ evt = CSIO_FWE_TO_RNFE(fwevt);
+ if (!evt) {
+ csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
+ csio_rn_flowid(rn), fwevt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ return;
+ }
+ CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
+
+ /* Track previous & current events for debugging */
+ rn->prev_evt = rn->cur_evt;
+ rn->cur_evt = fwevt;
+
+ /* Post event to rnode SM */
+ csio_post_event(&rn->sm, evt);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/*
+ * csio_rnode_init - Initialize rnode.
+ * @rn: RNode
+ * @ln: Associated lnode
+ *
+ * Caller is responsible for holding the lock. The lock is required
+ * to be held for inserting the rnode in ln->rnhead list.
+ */
+static int
+csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
+{
+ csio_rnode_to_lnode(rn) = ln;
+ csio_init_state(&rn->sm, csio_rns_uninit);
+ INIT_LIST_HEAD(&rn->host_cmpl_q);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+
+ /* Add rnode to list of lnodes->rnhead */
+ list_add_tail(&rn->sm.sm_list, &ln->rnhead);
+
+ return 0;
+}
+
+static void
+csio_rnode_exit(struct csio_rnode *rn)
+{
+ list_del_init(&rn->sm.sm_list);
+ CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
+}
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
new file mode 100644
index 00000000000..a3b434c801d
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -0,0 +1,141 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_RNODE_H__
+#define __CSIO_RNODE_H__
+
+#include "csio_defs.h"
+
+/* State machine evets */
+enum csio_rn_ev {
+ CSIO_RNFE_NONE = (uint32_t)0, /* None */
+ CSIO_RNFE_LOGGED_IN, /* [N/F]Port login
+ * complete.
+ */
+ CSIO_RNFE_PRLI_DONE, /* PRLI completed */
+ CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */
+ CSIO_RNFE_PRLI_RECV, /* Received PLOGI */
+ CSIO_RNFE_LOGO_RECV, /* Received LOGO */
+ CSIO_RNFE_PRLO_RECV, /* Received PRLO */
+ CSIO_RNFE_DOWN, /* Rnode is down */
+ CSIO_RNFE_CLOSE, /* Close rnode */
+ CSIO_RNFE_NAME_MISSING, /* Rnode name missing
+ * in name server.
+ */
+ CSIO_RNFE_MAX_EVENT,
+};
+
+/* rnode stats */
+struct csio_rnode_stats {
+ uint32_t n_err; /* error */
+ uint32_t n_err_inval; /* invalid parameter */
+ uint32_t n_err_nomem; /* error nomem */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* unexpected event */
+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_lun_rst; /* Number of resets of
+ * of LUNs under this
+ * target
+ */
+ uint32_t n_lun_rst_fail; /* Number of LUN reset
+ * failures.
+ */
+ uint32_t n_tgt_rst; /* Number of target resets */
+ uint32_t n_tgt_rst_fail; /* Number of target reset
+ * failures.
+ */
+};
+
+/* Defines for rnode role */
+#define CSIO_RNFR_INITIATOR 0x1
+#define CSIO_RNFR_TARGET 0x2
+#define CSIO_RNFR_FABRIC 0x4
+#define CSIO_RNFR_NS 0x8
+#define CSIO_RNFR_NPORT 0x10
+
+struct csio_rnode {
+ struct csio_sm sm; /* State machine -
+ * should be the
+ * 1st member
+ */
+ struct csio_lnode *lnp; /* Pointer to owning
+ * Lnode */
+ uint32_t flowid; /* Firmware ID */
+ struct list_head host_cmpl_q; /* SCSI IOs
+ * pending to completed
+ * to Mid-layer.
+ */
+ /* FC identifiers for remote node */
+ uint32_t nport_id;
+ uint16_t fcp_flags; /* FCP Flags */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+ uint32_t role; /* Fabric/Target/
+ * Initiator/NS
+ */
+ struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */
+ struct csio_service_parms rn_sparm;
+
+ /* FC transport attributes */
+ struct fc_rport *rport; /* FC transport rport */
+ uint32_t supp_classes; /* Supported FC classes */
+ uint32_t maxframe_size; /* Max Frame size */
+ uint32_t scsi_id; /* Transport given SCSI id */
+
+ struct csio_rnode_stats stats; /* Common rnode stats */
+};
+
+#define csio_rn_flowid(rn) ((rn)->flowid)
+#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)
+#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)
+#define csio_rnode_to_lnode(rn) ((rn)->lnp)
+
+int csio_is_rnode_ready(struct csio_rnode *rn);
+void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);
+
+struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);
+struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,
+ uint32_t, struct fcoe_rdev_entry *);
+
+void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);
+
+void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
+
+void csio_reg_rnode(struct csio_rnode *);
+void csio_unreg_rnode(struct csio_rnode *);
+
+void csio_rnode_devloss_handler(struct csio_rnode *);
+
+#endif /* ifndef __CSIO_RNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
new file mode 100644
index 00000000000..ddd38e5eb0e
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -0,0 +1,2555 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <asm/page.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_scsi.h"
+#include "csio_init.h"
+
+int csio_scsi_eqsize = 65536;
+int csio_scsi_iqlen = 128;
+int csio_scsi_ioreqs = 2048;
+uint32_t csio_max_scan_tmo;
+uint32_t csio_delta_scan_tmo = 5;
+int csio_lun_qdepth = 32;
+
+static int csio_ddp_descs = 128;
+
+static int csio_do_abrt_cls(struct csio_hw *,
+ struct csio_ioreq *, bool);
+
+static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
+
+/*
+ * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
+ * @ioreq: The I/O request
+ * @sld: Level information
+ *
+ * Should be called with lock held.
+ *
+ */
+static bool
+csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
+{
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
+
+ switch (sld->level) {
+ case CSIO_LEV_LUN:
+ if (scmnd == NULL)
+ return false;
+
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode) &&
+ ((uint64_t)scmnd->device->lun == sld->oslun));
+
+ case CSIO_LEV_RNODE:
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode));
+ case CSIO_LEV_LNODE:
+ return (ioreq->lnode == sld->lnode);
+ case CSIO_LEV_ALL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * csio_scsi_gather_active_ios - Gather active I/Os based on level
+ * @scm: SCSI module
+ * @sld: Level information
+ * @dest: The queue where these I/Os have to be gathered.
+ *
+ * Should be called with lock held.
+ */
+static void
+csio_scsi_gather_active_ios(struct csio_scsim *scm,
+ struct csio_scsi_level_data *sld,
+ struct list_head *dest)
+{
+ struct list_head *tmp, *next;
+
+ if (list_empty(&scm->active_q))
+ return;
+
+ /* Just splice the entire active_q into dest */
+ if (sld->level == CSIO_LEV_ALL) {
+ list_splice_tail_init(&scm->active_q, dest);
+ return;
+ }
+
+ list_for_each_safe(tmp, next, &scm->active_q) {
+ if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
+ list_del_init(tmp);
+ list_add_tail(tmp, dest);
+ }
+ }
+}
+
+static inline bool
+csio_scsi_itnexus_loss_error(uint16_t error)
+{
+ switch (error) {
+ case FW_ERR_LINK_DOWN:
+ case FW_RDEV_NOT_READY:
+ case FW_ERR_RDEV_LOST:
+ case FW_ERR_RDEV_LOGO:
+ case FW_ERR_RDEV_IMPL_LOGO:
+ return 1;
+ }
+ return 0;
+}
+
+static inline void
+csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq,
+ uint8_t oq, uint8_t sq)
+{
+ char stag[2];
+
+ if (scsi_populate_tag_msg(scmnd, stag)) {
+ switch (stag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ *tag = hq;
+ break;
+ case ORDERED_QUEUE_TAG:
+ *tag = oq;
+ break;
+ default:
+ *tag = sq;
+ break;
+ }
+ } else
+ *tag = 0;
+}
+
+/*
+ * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ *
+ * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
+ */
+static inline void
+csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
+{
+ struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ /* Check for Task Management */
+ if (likely(scmnd->SCp.Message == 0)) {
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = 0;
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+
+ memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
+ csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta,
+ FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE);
+ fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
+
+ if (req->nsge)
+ if (req->datadir == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
+ else
+ fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
+ else
+ fcp_cmnd->fc_flags = 0;
+ } else {
+ memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
+ }
+}
+
+/*
+ * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) |
+ FW_SCSI_CMD_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ wr->r3 = 0;
+ memset(&wr->r5, 0, 8);
+
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r6 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r4_lo[0] = 0;
+ wr->u.fcoe.r4_lo[1] = 0;
+
+ /* Frame a FCP command */
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
+ sizeof(struct fw_scsi_cmd_wr)));
+}
+
+#define CSIO_SCSI_CMD_WR_SZ(_imm) \
+ (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \
+ ALIGN((_imm), 16)) /* Immed data */
+
+#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \
+ (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
+
+/*
+ * csio_scsi_cmd - Create a SCSI CMD WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
+ *
+ */
+static inline void
+csio_scsi_cmd(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (unlikely(req->drv_status != 0))
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_cmd_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*
+ * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
+ * @hw: HW module
+ * @req: IO request
+ * @sgl: ULP TX SGL pointer.
+ *
+ */
+static inline void
+csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
+ struct ulptx_sgl *sgl)
+{
+ struct ulptx_sge_pair *sge_pair = NULL;
+ struct scatterlist *sgel;
+ uint32_t i = 0;
+ uint32_t xfer_len;
+ struct list_head *tmp;
+ struct csio_dma_buf *dma_buf;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE |
+ ULPTX_NSGE(req->nsge));
+ /* Now add the data SGLs */
+ if (likely(!req->dcopy)) {
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
+ sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ continue;
+ }
+ if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[1] = cpu_to_be32(
+ sg_dma_len(sgel));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[0] = cpu_to_be32(
+ sg_dma_len(sgel));
+ }
+ }
+ } else {
+ /* Program sg elements with driver's DDP buffer */
+ xfer_len = scsi_bufflen(scmnd);
+ list_for_each(tmp, &req->gen_list) {
+ dma_buf = (struct csio_dma_buf *)tmp;
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(dma_buf->paddr);
+ sgl->len0 = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ } else if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[1] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[0] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ }
+ xfer_len -= min(xfer_len, dma_buf->len);
+ i++;
+ }
+ }
+}
+
+/*
+ * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_read_wr.
+ */
+static inline void
+csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) |
+ FW_SCSI_READ_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/*
+ * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_write_wr.
+ */
+static inline void
+csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) |
+ FW_SCSI_WRITE_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
+#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \
+do { \
+ (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \
+ ALIGN((imm), 16) + /* Immed data */ \
+ sizeof(struct ulptx_sgl); /* ulptx_sgl */ \
+ \
+ if (unlikely((req)->nsge > 1)) \
+ (sz) += (sizeof(struct ulptx_sge_pair) * \
+ (ALIGN(((req)->nsge - 1), 2) / 2)); \
+ /* Data SGE */ \
+} while (0)
+
+/*
+ * csio_scsi_read - Create a SCSI READ WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI READ WR.
+ *
+ */
+static inline void
+csio_scsi_read(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_read_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_read_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_scsi_write - Create a SCSI WRITE WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI WRITE WR.
+ *
+ */
+static inline void
+csio_scsi_write(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_write_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_write_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_setup_ddp - Setup DDP buffers for Read request.
+ * @req: IO req structure.
+ *
+ * Checks SGLs/Data buffers are virtually contiguous required for DDP.
+ * If contiguous,driver posts SGLs in the WR otherwise post internal
+ * buffers for such request for DDP.
+ */
+static inline void
+csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
+{
+#ifdef __CSIO_DEBUG__
+ struct csio_hw *hw = req->lnode->hwp;
+#endif
+ struct scatterlist *sgel = NULL;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+ uint64_t sg_addr = 0;
+ uint32_t ddp_pagesz = 4096;
+ uint32_t buf_off;
+ struct csio_dma_buf *dma_buf = NULL;
+ uint32_t alloc_len = 0;
+ uint32_t xfer_len = 0;
+ uint32_t sg_len = 0;
+ uint32_t i;
+
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ sg_addr = sg_dma_address(sgel);
+ sg_len = sg_dma_len(sgel);
+
+ buf_off = sg_addr & (ddp_pagesz - 1);
+
+ /* Except 1st buffer,all buffer addr have to be Page aligned */
+ if (i != 0 && buf_off) {
+ csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
+ sg_addr, sg_len);
+ goto unaligned;
+ }
+
+ /* Except last buffer,all buffer must end on page boundary */
+ if ((i != (req->nsge - 1)) &&
+ ((buf_off + sg_len) & (ddp_pagesz - 1))) {
+ csio_dbg(hw,
+ "SGL addr not ending on page boundary"
+ "(%llx:%d)\n", sg_addr, sg_len);
+ goto unaligned;
+ }
+ }
+
+ /* SGL's are virtually contiguous. HW will DDP to SGLs */
+ req->dcopy = 0;
+ csio_scsi_read(req);
+
+ return;
+
+unaligned:
+ CSIO_INC_STATS(scsim, n_unaligned);
+ /*
+ * For unaligned SGLs, driver will allocate internal DDP buffer.
+ * Once command is completed data from DDP buffer copied to SGLs
+ */
+ req->dcopy = 1;
+
+ /* Use gen_list to store the DDP buffers */
+ INIT_LIST_HEAD(&req->gen_list);
+ xfer_len = scsi_bufflen(scmnd);
+
+ i = 0;
+ /* Allocate ddp buffers for this request */
+ while (alloc_len < xfer_len) {
+ dma_buf = csio_get_scsi_ddp(scsim);
+ if (dma_buf == NULL || i > scsim->max_sge) {
+ req->drv_status = -EBUSY;
+ break;
+ }
+ alloc_len += dma_buf->len;
+ /* Added to IO req */
+ list_add_tail(&dma_buf->list, &req->gen_list);
+ i++;
+ }
+
+ if (!req->drv_status) {
+ /* set number of ddp bufs used */
+ req->nsge = i;
+ csio_scsi_read(req);
+ return;
+ }
+
+ /* release dma descs */
+ if (i > 0)
+ csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
+}
+
+/*
+ * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR
+ * @abort: abort OR close
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
+ bool abort)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ /* 0 for CHK_ALL_IO tells FW to look up t_cookie */
+ wr->sub_opcode_to_chk_all_io =
+ (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
+ FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
+ wr->r3[0] = 0;
+ wr->r3[1] = 0;
+ wr->r3[2] = 0;
+ wr->r3[3] = 0;
+ /* Since we re-use the same ioreq for abort as well */
+ wr->t_cookie = (uintptr_t) req;
+}
+
+static inline void
+csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (req->drv_status != 0)
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*****************************************************************************/
+/* START: SCSI SM */
+/*****************************************************************************/
+static void
+csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_START_IO:
+
+ if (req->nsge) {
+ if (req->datadir == DMA_TO_DEVICE) {
+ req->dcopy = 0;
+ csio_scsi_write(req);
+ } else
+ csio_setup_ddp(scsim, req);
+ } else {
+ csio_scsi_cmd(req);
+ }
+
+ if (likely(req->drv_status == 0)) {
+ /* change state and enqueue on active_q */
+ csio_set_state(&req->sm, csio_scsis_io_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_active);
+
+ return;
+ }
+ break;
+
+ case CSIO_SCSIE_START_TM:
+ csio_scsi_cmd(req);
+ if (req->drv_status == 0) {
+ /*
+ * NOTE: We collect the affected I/Os prior to issuing
+ * LUN reset, and not after it. This is to prevent
+ * aborting I/Os that get issued after the LUN reset,
+ * but prior to LUN reset completion (in the event that
+ * the host stack has not blocked I/Os to a LUN that is
+ * being reset.
+ */
+ csio_set_state(&req->sm, csio_scsis_tm_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_tm_active);
+ }
+ return;
+
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * NOTE:
+ * We could get here due to :
+ * - a window in the cleanup path of the SCSI module
+ * (csio_scsi_abort_io()). Please see NOTE in this function.
+ * - a window in the time we tried to issue an abort/close
+ * of a request to FW, and the FW completed the request
+ * itself.
+ * Print a message for now, and return INVAL either way.
+ */
+ req->drv_status = -EINVAL;
+ csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn;
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ /*
+ * In MSIX mode, with multiple queues, the SCSI compeltions
+ * could reach us sooner than the FW events sent to indicate
+ * I-T nexus loss (link down, remote device logo etc). We
+ * dont want to be returning such I/Os to the upper layer
+ * immediately, since we wouldnt have reported the I-T nexus
+ * loss itself. This forces us to serialize such completions
+ * with the reporting of the I-T nexus loss. Therefore, we
+ * internally queue up such up such completions in the rnode.
+ * The reporting of I-T nexus loss to the upper layer is then
+ * followed by the returning of I/Os in this internal queue.
+ * Having another state alongwith another queue helps us take
+ * actions for events such as ABORT received while we are
+ * in this rnode queue.
+ */
+ if (unlikely(req->wr_status != FW_SUCCESS)) {
+ rn = req->rnode;
+ /*
+ * FW says remote device is lost, but rnode
+ * doesnt reflect it.
+ */
+ if (csio_scsi_itnexus_loss_error(req->wr_status) &&
+ csio_is_rnode_ready(rn)) {
+ csio_set_state(&req->sm,
+ csio_scsis_shost_cmpl_await);
+ list_add_tail(&req->sm.sm_list,
+ &rn->host_cmpl_q);
+ }
+ }
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_tm_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_tm_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in aborting st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the ABORTED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the ABORT and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ CSIO_INC_STATS(scm, n_abrt_dups);
+ break;
+
+ case CSIO_SCSIE_ABORTED:
+
+ csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
+ req, req->wr_status, req->drv_status);
+ /*
+ * Check if original I/O WR completed before the Abort
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_warn(hw,
+ "Abort completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * There are the following possible scenarios:
+ * 1. The abort completed successfully, FW returned FW_SUCCESS.
+ * 2. The completion of an I/O and the receipt of
+ * abort for that I/O by the FW crossed each other.
+ * The FW returned FW_EINVAL. The original I/O would have
+ * returned with FW_SUCCESS or any other SCSI error.
+ * 3. The FW couldnt sent the abort out on the wire, as there
+ * was an I-T nexus loss (link down, remote device logged
+ * out etc). FW sent back an appropriate IT nexus loss status
+ * for the abort.
+ * 4. FW sent an abort, but abort timed out (remote device
+ * didnt respond). FW replied back with
+ * FW_SCSI_ABORT_TIMEDOUT.
+ * 5. FW couldnt genuinely abort the request for some reason,
+ * and sent us an error.
+ *
+ * The first 3 scenarios are treated as succesful abort
+ * operations by the host, while the last 2 are failed attempts
+ * to abort. Manipulate the return value of the request
+ * appropriately, so that host can convey these results
+ * back to the upper layer.
+ */
+ if ((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL) ||
+ csio_scsi_itnexus_loss_error(req->wr_status))
+ req->wr_status = FW_SCSI_ABORT_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * We can receive this event from the module
+ * cleanup paths, if the FW forgot to reply to the ABORT WR
+ * and left this ioreq in this state. For now, just ignore
+ * the event. The CLOSE event is sent to this state, as
+ * the LINK may have already gone down.
+ */
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in closing st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the CLOSED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the CLOSE and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_CLOSED:
+ /*
+ * Check if original I/O WR completed before the Close
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_fatal(hw,
+ "Close completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * Either close succeeded, or we issued close to FW at the
+ * same time FW compelted it to us. Either way, the I/O
+ * is closed.
+ */
+ CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL));
+ req->wr_status = FW_SCSI_CLOSE_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ switch (evt) {
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * Just succeed the abort request, and hope that
+ * the remote device unregister path will cleanup
+ * this I/O to the upper layer within a sane
+ * amount of time.
+ */
+ /*
+ * A close can come in during a LINK DOWN. The FW would have
+ * returned us the I/O back, but not the remote device lost
+ * FW event. In this interval, if the I/O times out at the upper
+ * layer, a close can come in. Take the same action as abort:
+ * return success, and hope that the remote device unregister
+ * path will cleanup this I/O. If the FW still doesnt send
+ * the msg, the close times out, and the upper layer resorts
+ * to the next level of error recovery.
+ */
+ req->drv_status = 0;
+ break;
+ case CSIO_SCSIE_DRVCLEANUP:
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+ default:
+ csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
+ evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+/*
+ * csio_scsi_cmpl_handler - WR completion handler for SCSI.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ * @priv: Private object
+ * @scsiwr: Pointer to SCSI WR.
+ *
+ * This is the WR completion handler called per completion from the
+ * ISR. It is called with lock held. It walks past the RSS and CPL message
+ * header where the actual WR is present.
+ * It then gets the status, WR handle (ioreq pointer) and the len of
+ * the WR, based on WR opcode. Only on a non-good status is the entire
+ * WR copied into the WR cache (ioreq->fw_wr).
+ * The ioreq corresponding to the WR is returned to the caller.
+ * NOTE: The SCSI queue doesnt allocate a freelist today, hence
+ * no freelist buffer is expected.
+ */
+struct csio_ioreq *
+csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
+{
+ struct csio_ioreq *ioreq = NULL;
+ struct cpl_fw6_msg *cpl;
+ uint8_t *tempwr;
+ uint8_t status;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ /* skip RSS header */
+ cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
+
+ if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
+ csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
+ cpl->opcode);
+ CSIO_INC_STATS(scm, n_inval_cplop);
+ return NULL;
+ }
+
+ tempwr = (uint8_t *)(cpl->data);
+ status = csio_wr_status(tempwr);
+ *scsiwr = tempwr;
+
+ if (likely((*tempwr == FW_SCSI_READ_WR) ||
+ (*tempwr == FW_SCSI_WRITE_WR) ||
+ (*tempwr == FW_SCSI_CMD_WR))) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_read_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+
+ return ioreq;
+ }
+
+ if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+ return ioreq;
+ }
+
+ csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
+ CSIO_INC_STATS(scm, n_inval_scsiop);
+ return NULL;
+}
+
+/*
+ * csio_scsi_cleanup_io_q - Cleanup the given queue.
+ * @scm: SCSI module.
+ * @q: Queue to be cleaned up.
+ *
+ * Called with lock held. Has to exit with lock held.
+ */
+void
+csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_ioreq *ioreq;
+ struct list_head *tmp, *next;
+ struct scsi_cmnd *scmnd;
+
+ /* Call back the completion routines of the active_q */
+ list_for_each_safe(tmp, next, q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ scmnd = csio_scsi_cmnd(ioreq);
+ spin_unlock_irq(&hw->lock);
+
+ /*
+ * Upper layers may have cleared this command, hence this
+ * check to avoid accessing stale references.
+ */
+ if (scmnd != NULL)
+ ioreq->io_cbfn(hw, ioreq);
+
+ spin_lock_irq(&scm->freelist_lock);
+ csio_put_scsi_ioreq(scm, ioreq);
+ spin_unlock_irq(&scm->freelist_lock);
+
+ spin_lock_irq(&hw->lock);
+ }
+}
+
+#define CSIO_SCSI_ABORT_Q_POLL_MS 2000
+
+static void
+csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
+{
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_hw *hw = ln->hwp;
+ int ready = 0;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int rv;
+
+ if (csio_scsi_cmnd(ioreq) != scmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ if (rv != 0) {
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+ }
+}
+
+/*
+ * csio_scsi_abort_io_q - Abort all I/Os on given queue
+ * @scm: SCSI module.
+ * @q: Queue to abort.
+ * @tmo: Timeout in ms
+ *
+ * Attempt to abort all I/Os on given queue, and wait for a max
+ * of tmo milliseconds for them to complete. Returns success
+ * if all I/Os are aborted. Else returns -ETIMEDOUT.
+ * Should be entered with lock held. Exits with lock held.
+ * NOTE:
+ * Lock has to be held across the loop that aborts I/Os, since dropping the lock
+ * in between can cause the list to be corrupted. As a result, the caller
+ * of this function has to ensure that the number of I/os to be aborted
+ * is finite enough to not cause lock-held-for-too-long issues.
+ */
+static int
+csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
+{
+ struct csio_hw *hw = scm->hw;
+ struct list_head *tmp, *next;
+ int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
+ struct scsi_cmnd *scmnd;
+
+ if (list_empty(q))
+ return 0;
+
+ csio_dbg(hw, "Aborting SCSI I/Os\n");
+
+ /* Now abort/close I/Os in the queue passed */
+ list_for_each_safe(tmp, next, q) {
+ scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
+ csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
+ }
+
+ /* Wait till all active I/Os are completed/aborted/closed */
+ while (!list_empty(q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all aborts completed */
+ if (list_empty(q))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
+ * @scm: SCSI module.
+ * @abort: abort required.
+ * Called with lock held, should exit with lock held.
+ * Can sleep when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
+{
+ struct csio_hw *hw = scm->hw;
+ int rv = 0;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ /* No I/Os pending */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Wait until all active I/Os are completed */
+ while (!list_empty(&scm->active_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Else abort */
+ if (abort) {
+ rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
+ if (rv == 0)
+ return rv;
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ }
+
+ csio_scsi_cleanup_io_q(scm, &scm->active_q);
+
+ CSIO_DB_ASSERT(list_empty(&scm->active_q));
+
+ return rv;
+}
+
+/*
+ * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
+ * @scm: SCSI module.
+ * @lnode: lnode
+ *
+ * Called with lock held, should exit with lock held.
+ * Can sleep (with dropped lock) when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_scsi_level_data sld;
+ int rv;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
+
+ sld.level = CSIO_LEV_LNODE;
+ sld.lnode = ln;
+ INIT_LIST_HEAD(&ln->cmpl_q);
+ csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
+
+ /* No I/Os pending on this lnode */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ /* Wait until all active I/Os on this lnode are completed */
+ while (!list_empty(&ln->cmpl_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
+
+ /* I/Os are pending, abort them */
+ rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
+ if (rv != 0) {
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
+ }
+
+ CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
+
+ return rv;
+}
+
+static ssize_t
+csio_show_hw_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (csio_is_hw_ready(hw))
+ return snprintf(buf, PAGE_SIZE, "ready\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "not ready\n");
+}
+
+/* Device reset */
+static ssize_t
+csio_device_reset(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (*buf != '1')
+ return -EINVAL;
+
+ /* Delete NPIV lnodes */
+ csio_lnodes_exit(hw, 1);
+
+ /* Block upper IOs */
+ csio_lnodes_block_request(hw);
+
+ spin_lock_irq(&hw->lock);
+ csio_hw_reset(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_request(hw);
+ return count;
+}
+
+/* disable port */
+static ssize_t
+csio_disable_port(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ bool disable;
+
+ if (*buf == '1' || *buf == '0')
+ disable = (*buf == '1') ? true : false;
+ else
+ return -EINVAL;
+
+ /* Block upper IOs */
+ csio_lnodes_block_by_port(hw, ln->portid);
+
+ spin_lock_irq(&hw->lock);
+ csio_disable_lnodes(hw, ln->portid, disable);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_by_port(hw, ln->portid);
+ return count;
+}
+
+/* Show debug level */
+static ssize_t
+csio_show_dbg_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+}
+
+/* Store debug level */
+static ssize_t
+csio_store_dbg_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ uint32_t dbg_level = 0;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &dbg_level))
+ return -EINVAL;
+
+ ln->params.log_level = dbg_level;
+ hw->params.log_level = dbg_level;
+
+ return 0;
+}
+
+static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
+static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset);
+static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port);
+static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
+ csio_store_dbg_level);
+
+static struct device_attribute *csio_fcoe_lport_attrs[] = {
+ &dev_attr_hw_state,
+ &dev_attr_device_reset,
+ &dev_attr_disable_port,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static ssize_t
+csio_show_num_reg_rnodes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+}
+
+static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
+
+static struct device_attribute *csio_fcoe_vport_attrs[] = {
+ &dev_attr_num_reg_rnodes,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static inline uint32_t
+csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct scatterlist *sg;
+ uint32_t bytes_left;
+ uint32_t bytes_copy;
+ uint32_t buf_off = 0;
+ uint32_t start_off = 0;
+ uint32_t sg_off = 0;
+ void *sg_addr;
+ void *buf_addr;
+ struct csio_dma_buf *dma_buf;
+
+ bytes_left = scsi_bufflen(scmnd);
+ sg = scsi_sglist(scmnd);
+ dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
+
+ /* Copy data from driver buffer to SGs of SCSI CMD */
+ while (bytes_left > 0 && sg && dma_buf) {
+ if (buf_off >= dma_buf->len) {
+ buf_off = 0;
+ dma_buf = (struct csio_dma_buf *)
+ csio_list_next(dma_buf);
+ continue;
+ }
+
+ if (start_off >= sg->length) {
+ start_off -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+
+ buf_addr = dma_buf->vaddr + buf_off;
+ sg_off = sg->offset + start_off;
+ bytes_copy = min((dma_buf->len - buf_off),
+ sg->length - start_off);
+ bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
+ bytes_copy);
+
+ sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
+ if (!sg_addr) {
+ csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
+ sg, req);
+ break;
+ }
+
+ csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
+ sg_addr, sg_off, buf_addr, bytes_copy);
+ memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
+ kunmap_atomic(sg_addr);
+
+ start_off += bytes_copy;
+ buf_off += bytes_copy;
+ bytes_left -= bytes_copy;
+ }
+
+ if (bytes_left > 0)
+ return DID_ERROR;
+ else
+ return DID_OK;
+}
+
+/*
+ * csio_scsi_err_handler - SCSI error handler.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static inline void
+csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags, scsi_status = 0;
+ uint32_t host_status = DID_OK;
+ uint32_t rsp_len = 0, sns_len = 0;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+
+ switch (req->wr_status) {
+ case FW_HOSTERROR:
+ if (unlikely(!csio_is_hw_ready(hw)))
+ return;
+
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_hosterror);
+
+ break;
+ case FW_SCSI_RSP_ERR:
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+ flags = fcp_resp->resp.fr_flags;
+ scsi_status = fcp_resp->resp.fr_status;
+
+ if (flags & FCP_RSP_LEN_VAL) {
+ rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
+ if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
+ (rsp_info->rsp_code != FCP_TMF_CMPL)) {
+ host_status = DID_ERROR;
+ goto out;
+ }
+ }
+
+ if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
+ sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
+ if (sns_len > SCSI_SENSE_BUFFERSIZE)
+ sns_len = SCSI_SENSE_BUFFERSIZE;
+
+ memcpy(cmnd->sense_buffer,
+ &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
+ CSIO_INC_STATS(scm, n_autosense);
+ }
+
+ scsi_set_resid(cmnd, 0);
+
+ /* Under run */
+ if (flags & FCP_RESID_UNDER) {
+ scsi_set_resid(cmnd,
+ be32_to_cpu(fcp_resp->ext.fr_resid));
+
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (scsi_status == SAM_STAT_GOOD) &&
+ ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
+ < cmnd->underflow))
+ host_status = DID_ERROR;
+ } else if (flags & FCP_RESID_OVER)
+ host_status = DID_ERROR;
+
+ CSIO_INC_STATS(scm, n_rsperror);
+ break;
+
+ case FW_SCSI_OVER_FLOW_ERR:
+ csio_warn(hw,
+ "Over-flow error,cmnd:0x%x expected len:0x%x"
+ " resid:0x%x\n", cmnd->cmnd[0],
+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_ovflerror);
+ break;
+
+ case FW_SCSI_UNDER_FLOW_ERR:
+ csio_warn(hw,
+ "Under-flow error,cmnd:0x%x expected"
+ " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n",
+ cmnd->cmnd[0], scsi_bufflen(cmnd),
+ scsi_get_resid(cmnd), cmnd->device->lun,
+ rn->flowid);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_unflerror);
+ break;
+
+ case FW_SCSI_ABORT_REQUESTED:
+ case FW_SCSI_ABORTED:
+ case FW_SCSI_CLOSE_REQUESTED:
+ csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
+ cmnd->cmnd[0],
+ (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
+ "closed" : "aborted");
+ /*
+ * csio_eh_abort_handler checks this value to
+ * succeed or fail the abort request.
+ */
+ host_status = DID_REQUEUE;
+ if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
+ CSIO_INC_STATS(scm, n_closed);
+ else
+ CSIO_INC_STATS(scm, n_aborted);
+ break;
+
+ case FW_SCSI_ABORT_TIMEDOUT:
+ /* FW timed out the abort itself */
+ csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
+ req, cmnd, req->wr_status);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_abrt_timedout);
+ break;
+
+ case FW_RDEV_NOT_READY:
+ /*
+ * In firmware, a RDEV can get into this state
+ * temporarily, before moving into dissapeared/lost
+ * state. So, the driver should complete the request equivalent
+ * to device-disappeared!
+ */
+ CSIO_INC_STATS(scm, n_rdev_nr_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOST:
+ CSIO_INC_STATS(scm, n_rdev_lost_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOGO:
+ CSIO_INC_STATS(scm, n_rdev_logo_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_IMPL_LOGO:
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_LINK_DOWN:
+ CSIO_INC_STATS(scm, n_link_down_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_FCOE_NO_XCHG:
+ CSIO_INC_STATS(scm, n_no_xchg_error);
+ host_status = DID_ERROR;
+ break;
+
+ default:
+ csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
+ req->wr_status, req, cmnd);
+ CSIO_DB_ASSERT(0);
+
+ CSIO_INC_STATS(scm, n_unknown_error);
+ host_status = DID_ERROR;
+ break;
+ }
+
+out:
+ if (req->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+
+ /* Wake up waiting threads */
+ csio_scsi_cmnd(req) = NULL;
+ complete_all(&req->cmplobj);
+}
+
+/*
+ * csio_scsi_cbfn - SCSI callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static void
+csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ uint8_t scsi_status = SAM_STAT_GOOD;
+ uint32_t host_status = DID_OK;
+
+ if (likely(req->wr_status == FW_SUCCESS)) {
+ if (req->nsge > 0) {
+ scsi_dma_unmap(cmnd);
+ if (req->dcopy)
+ host_status = csio_scsi_copy_to_sgl(hw, req);
+ }
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+ csio_scsi_cmnd(req) = NULL;
+ CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
+ } else {
+ /* Error handling */
+ csio_scsi_err_handler(hw, req);
+ }
+}
+
+/**
+ * csio_queuecommand - Entry point to kickstart an I/O request.
+ * @host: The scsi_host pointer.
+ * @cmnd: The I/O request from ML.
+ *
+ * This routine does the following:
+ * - Checks for HW and Rnode module readiness.
+ * - Gets a free ioreq structure (which is already initialized
+ * to uninit during its allocation).
+ * - Maps SG elements.
+ * - Initializes ioreq members.
+ * - Kicks off the SCSI state machine for this IO.
+ * - Returns busy status on error.
+ */
+static int
+csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ unsigned long flags;
+ int nsge = 0;
+ int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
+ int retval;
+ int cpu;
+ struct csio_scsi_qset *sqset;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+ if (!blk_rq_cpu_valid(cmnd->request))
+ cpu = smp_processor_id();
+ else
+ cpu = cmnd->request->cpu;
+
+ sqset = &hw->sqset[ln->portid][cpu];
+
+ nr = fc_remote_port_chkready(rport);
+ if (nr) {
+ cmnd->result = nr;
+ CSIO_INC_STATS(scsim, n_rn_nr_error);
+ goto err_done;
+ }
+
+ if (unlikely(!csio_is_hw_ready(hw))) {
+ cmnd->result = (DID_REQUEUE << 16);
+ CSIO_INC_STATS(scsim, n_hw_nr_error);
+ goto err_done;
+ }
+
+ /* Get req->nsge, if there are SG elements to be mapped */
+ nsge = scsi_dma_map(cmnd);
+ if (unlikely(nsge < 0)) {
+ CSIO_INC_STATS(scsim, n_dmamap_error);
+ goto err;
+ }
+
+ /* Do we support so many mappings? */
+ if (unlikely(nsge > scsim->max_sge)) {
+ csio_warn(hw,
+ "More SGEs than can be supported."
+ " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
+ CSIO_INC_STATS(scsim, n_unsupp_sge_error);
+ goto err_dma_unmap;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+ if (!ioreq) {
+ csio_err(hw, "Out of I/O request elements. Active #:%d\n",
+ scsim->stats.n_active);
+ CSIO_INC_STATS(scsim, n_no_req_error);
+ goto err_dma_unmap;
+ }
+
+ ioreq->nsge = nsge;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+ ioreq->wr_status = 0;
+ ioreq->drv_status = 0;
+ csio_scsi_cmnd(ioreq) = (void *)cmnd;
+ ioreq->tmo = 0;
+ ioreq->datadir = cmnd->sc_data_direction;
+
+ if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ CSIO_INC_STATS(ln, n_output_requests);
+ ln->stats.n_output_bytes += scsi_bufflen(cmnd);
+ } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
+ CSIO_INC_STATS(ln, n_input_requests);
+ ln->stats.n_input_bytes += scsi_bufflen(cmnd);
+ } else
+ CSIO_INC_STATS(ln, n_control_requests);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_scsi_cbfn;
+
+ /* Needed during abort */
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Message = 0;
+
+ /* Kick off SCSI IO SM on the ioreq */
+ spin_lock_irqsave(&hw->lock, flags);
+ retval = csio_scsi_start_io(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
+ ioreq, retval);
+ CSIO_INC_STATS(scsim, n_busy_error);
+ goto err_put_req;
+ }
+
+ return 0;
+
+err_put_req:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+err_dma_unmap:
+ if (nsge > 0)
+ scsi_dma_unmap(cmnd);
+err:
+ return rv;
+
+err_done:
+ cmnd->scsi_done(cmnd);
+ return 0;
+}
+
+static int
+csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
+{
+ int rv;
+ int cpu = smp_processor_id();
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
+
+ ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
+ /*
+ * Use current processor queue for posting the abort/close, but retain
+ * the ingress queue ID of the original I/O being aborted/closed - we
+ * need the abort/close completion to be received on the same queue
+ * as the original I/O.
+ */
+ ioreq->eq_idx = sqset->eq_idx;
+
+ if (abort == SCSI_ABORT)
+ rv = csio_scsi_abort(ioreq);
+ else
+ rv = csio_scsi_close(ioreq);
+
+ return rv;
+}
+
+static int
+csio_eh_abort_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int ready = 0, ret;
+ unsigned long tmo = 0;
+ int rv;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ ioreq = (struct csio_ioreq *)cmnd->host_scribble;
+ if (!ioreq)
+ return SUCCESS;
+
+ if (!rn)
+ return FAILED;
+
+ csio_dbg(hw,
+ "Request to abort ioreq:%p cmd:%p cdb:%08llx"
+ " ssni:0x%x lun:%d iq:0x%x\n",
+ ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
+ cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
+
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return SUCCESS;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+ tmo = CSIO_SCSI_ABRT_TMO_MS;
+
+ spin_lock_irq(&hw->lock);
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ spin_unlock_irq(&hw->lock);
+
+ if (rv != 0) {
+ if (rv == -EINVAL) {
+ /* Return success, if abort/close request issued on
+ * already completed IO
+ */
+ return SUCCESS;
+ }
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+
+ goto inval_scmnd;
+ }
+
+ /* Wait for completion */
+ init_completion(&ioreq->cmplobj);
+ wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
+
+ /* FW didnt respond to abort within our timeout */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+
+ csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
+ CSIO_INC_STATS(scsim, n_abrt_timedout);
+
+inval_scmnd:
+ if (ioreq->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_cmnd(ioreq) = NULL;
+ spin_unlock_irq(&hw->lock);
+
+ cmnd->result = (DID_ERROR << 16);
+ cmnd->scsi_done(cmnd);
+
+ return FAILED;
+ }
+
+ /* FW successfully aborted the request */
+ if (host_byte(cmnd->result) == DID_REQUEUE) {
+ csio_info(hw,
+ "Aborted SCSI command to (%d:%d) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return SUCCESS;
+ } else {
+ csio_info(hw,
+ "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return FAILED;
+ }
+}
+
+/*
+ * csio_tm_cbfn - TM callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ * Cache the result in 'cmnd', since ioreq will be freed soon
+ * after we return from here, and the waiting thread shouldnt trust
+ * the ioreq contents.
+ */
+static void
+csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags = 0;
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+
+ csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
+ req, req->wr_status);
+
+ /* Cache FW return status */
+ cmnd->SCp.Status = req->wr_status;
+
+ /* Special handling based on FCP response */
+
+ /*
+ * FW returns us this error, if flags were set. FCP4 says
+ * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
+ * So if a target were to set this bit, we expect that the
+ * rsp_code is set to FCP_TMF_CMPL for a successful TM
+ * completion. Any other rsp_code means TM operation failed.
+ * If a target were to just ignore setting flags, we treat
+ * the TM operation as success, and FW returns FW_SUCCESS.
+ */
+ if (req->wr_status == FW_SCSI_RSP_ERR) {
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+
+ flags = fcp_resp->resp.fr_flags;
+
+ /* Modify return status if flags indicate success */
+ if (flags & FCP_RSP_LEN_VAL)
+ if (rsp_info->rsp_code == FCP_TMF_CMPL)
+ cmnd->SCp.Status = FW_SUCCESS;
+
+ csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
+ }
+
+ /* Wake up the TM handler thread */
+ csio_scsi_cmnd(req) = NULL;
+}
+
+static int
+csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ struct csio_scsi_qset *sqset;
+ unsigned long flags;
+ int retval;
+ int count, ret;
+ LIST_HEAD(local_q);
+ struct csio_scsi_level_data sld;
+
+ if (!rn)
+ goto fail;
+
+ csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n",
+ cmnd->device->lun, rn->flowid, rn->scsi_id);
+
+ if (!csio_is_lnode_ready(ln)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " local node vnpi:0x%x (LUN:%d)\n",
+ ln->vnp_flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Lnode is ready, now wait on rport node readiness */
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ /*
+ * If we have blocked in the previous call, at this point, either the
+ * remote node has come back online, or device loss timer has fired
+ * and the remote node is destroyed. Allow the LUN reset only for
+ * the former case, since LUN reset is a TMF I/O on the wire, and we
+ * need a valid session to issue it.
+ */
+ if (fc_remote_port_chkready(rn->rport)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " remote node ssni:0x%x (LUN:%d)\n",
+ rn->flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+
+ if (!ioreq) {
+ csio_err(hw, "Out of IO request elements. Active # :%d\n",
+ scsim->stats.n_active);
+ goto fail;
+ }
+
+ sqset = &hw->sqset[ln->portid][smp_processor_id()];
+ ioreq->nsge = 0;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+
+ csio_scsi_cmnd(ioreq) = cmnd;
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Status = 0;
+
+ cmnd->SCp.Message = FCP_TMF_LUN_RESET;
+ ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
+
+ /*
+ * FW times the LUN reset for ioreq->tmo, so we got to wait a little
+ * longer (10s for now) than that to allow FW to return the timed
+ * out command.
+ */
+ count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_tm_cbfn;
+
+ /* Save of the ioreq info for later use */
+ sld.level = CSIO_LEV_LUN;
+ sld.lnode = ioreq->lnode;
+ sld.rnode = ioreq->rnode;
+ sld.oslun = (uint64_t)cmnd->device->lun;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ /* Kick off TM SM on the ioreq */
+ retval = csio_scsi_start_tm(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
+ ioreq, retval);
+ goto fail_ret_ioreq;
+ }
+
+ csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
+ count * (CSIO_SCSI_TM_POLL_MS / 1000));
+ /* Wait for completion */
+ while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
+ && count--)
+ msleep(CSIO_SCSI_TM_POLL_MS);
+
+ /* LUN reset timed-out */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+ csio_err(hw, "LUN reset (%d:%d) timed out\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ spin_unlock_irq(&hw->lock);
+
+ goto fail_ret_ioreq;
+ }
+
+ /* LUN reset returned, check cached status */
+ if (cmnd->SCp.Status != FW_SUCCESS) {
+ csio_err(hw, "LUN reset failed (%d:%d), status: %d\n",
+ cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
+ goto fail;
+ }
+
+ /* LUN reset succeeded, Start aborting affected I/Os */
+ /*
+ * Since the host guarantees during LUN reset that there
+ * will not be any more I/Os to that LUN, until the LUN reset
+ * completes, we gather pending I/Os after the LUN reset.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_scsi_gather_active_ios(scsim, &sld, &local_q);
+
+ retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
+ spin_unlock_irq(&hw->lock);
+
+ /* Aborts may have timed out */
+ if (retval != 0) {
+ csio_err(hw,
+ "Attempt to abort I/Os during LUN reset of %d"
+ " returned %d\n", cmnd->device->lun, retval);
+ /* Return I/Os back to active_q */
+ spin_lock_irq(&hw->lock);
+ list_splice_tail_init(&local_q, &scsim->active_q);
+ spin_unlock_irq(&hw->lock);
+ goto fail;
+ }
+
+ CSIO_INC_STATS(rn, n_lun_rst);
+
+ csio_info(hw, "LUN reset occurred (%d:%d)\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ return SUCCESS;
+
+fail_ret_ioreq:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+fail:
+ CSIO_INC_STATS(rn, n_lun_rst_fail);
+ return FAILED;
+}
+
+static int
+csio_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
+
+ return 0;
+}
+
+static int
+csio_slave_configure(struct scsi_device *sdev)
+{
+ if (sdev->tagged_supported)
+ scsi_activate_tcq(sdev, csio_lun_qdepth);
+ else
+ scsi_deactivate_tcq(sdev, csio_lun_qdepth);
+
+ return 0;
+}
+
+static void
+csio_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+}
+
+static int
+csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ int rv = 1;
+
+ spin_lock_irq(shost->host_lock);
+ if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
+ goto out;
+
+ rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
+ csio_delta_scan_tmo * HZ);
+out:
+ spin_unlock_irq(shost->host_lock);
+
+ return rv;
+}
+
+struct scsi_host_template csio_fcoe_shost_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_lport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+};
+
+struct scsi_host_template csio_fcoe_shost_vport_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_vport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+};
+
+/*
+ * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ * @buf_size: buffer size
+ * @num_buf : Number of buffers.
+ *
+ * This routine allocates DMA buffers required for SCSI Data xfer, if
+ * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
+ * not virtually contiguous.
+ */
+static int
+csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
+ int buf_size, int num_buf)
+{
+ int n = 0;
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc = NULL;
+ uint32_t unit_size = 0;
+
+ if (!num_buf)
+ return 0;
+
+ if (!buf_size)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&scm->ddp_freelist);
+
+ /* Align buf size to page size */
+ buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
+ /* Initialize dma descriptors */
+ for (n = 0; n < num_buf; n++) {
+ /* Set unit size to request size */
+ unit_size = buf_size;
+ ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
+ if (!ddp_desc) {
+ csio_err(hw,
+ "Failed to allocate ddp descriptors,"
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ddp);
+ goto no_mem;
+ }
+
+ /* Allocate Dma buffers for DDP */
+ ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
+ &ddp_desc->paddr);
+ if (!ddp_desc->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer (ddp) allocation"
+ " failed!\n");
+ kfree(ddp_desc);
+ goto no_mem;
+ }
+
+ ddp_desc->len = unit_size;
+
+ /* Added it to scsi ddp freelist */
+ list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+ }
+
+ return 0;
+no_mem:
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+
+ return -ENOMEM;
+}
+
+/*
+ * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ *
+ * This routine frees ddp buffers.
+ */
+static void
+csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc;
+
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+}
+
+/**
+ * csio_scsim_init - Initialize SCSI Module
+ * @scm: SCSI Module
+ * @hw: HW module
+ *
+ */
+int
+csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ int i;
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ INIT_LIST_HEAD(&scm->active_q);
+ scm->hw = hw;
+
+ scm->proto_cmd_len = sizeof(struct fcp_cmnd);
+ scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
+ scm->max_sge = CSIO_SCSI_MAX_SGE;
+
+ spin_lock_init(&scm->freelist_lock);
+
+ /* Pre-allocate ioreqs and initialize them */
+ INIT_LIST_HEAD(&scm->ioreq_freelist);
+ for (i = 0; i < csio_scsi_ioreqs; i++) {
+
+ ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ioreq) {
+ csio_err(hw,
+ "I/O request element allocation failed, "
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ioreq);
+
+ goto free_ioreq;
+ }
+
+ /* Allocate Dma buffers for Response Payload */
+ dma_buf = &ioreq->dma_buf;
+ dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer allocation"
+ " failed!\n");
+ kfree(ioreq);
+ goto free_ioreq;
+ }
+
+ dma_buf->len = scm->proto_rsp_len;
+
+ /* Set state to uninit */
+ csio_init_state(&ioreq->sm, csio_scsis_uninit);
+ INIT_LIST_HEAD(&ioreq->gen_list);
+ init_completion(&ioreq->cmplobj);
+
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+ }
+
+ if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
+ goto free_ioreq;
+
+ return 0;
+
+free_ioreq:
+ /*
+ * Free up existing allocations, since an error
+ * from here means we are returning for good
+ */
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * csio_scsim_exit: Uninitialize SCSI Module
+ * @scm: SCSI Module
+ *
+ */
+void
+csio_scsim_exit(struct csio_scsim *scm)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ csio_scsi_free_ddp_bufs(scm, scm->hw);
+}
diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h
new file mode 100644
index 00000000000..2257c3dcf72
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.h
@@ -0,0 +1,342 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_SCSI_H__
+#define __CSIO_SCSI_H__
+
+#include <linux/spinlock_types.h>
+#include <linux/completion.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "csio_defs.h"
+#include "csio_wr.h"
+
+extern struct scsi_host_template csio_fcoe_shost_template;
+extern struct scsi_host_template csio_fcoe_shost_vport_template;
+
+extern int csio_scsi_eqsize;
+extern int csio_scsi_iqlen;
+extern int csio_scsi_ioreqs;
+extern uint32_t csio_max_scan_tmo;
+extern uint32_t csio_delta_scan_tmo;
+extern int csio_lun_qdepth;
+
+/*
+ **************************** NOTE *******************************
+ * How do we calculate MAX FCoE SCSI SGEs? Here is the math:
+ * Max Egress WR size = 512 bytes
+ * One SCSI egress WR has the following fixed no of bytes:
+ * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR
+ * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD
+ * ------
+ * 80
+ * ------
+ * That leaves us with 512 - 96 = 432 bytes for data SGE. Using
+ * struct ulptx_sgl header for the SGE consumes:
+ * - 4 bytes for cmnd_sge.
+ * - 12 bytes for the first SGL.
+ * That leaves us with 416 bytes for the remaining SGE pairs. Which is
+ * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,
+ * or 34 SGEs. Adding the first SGE fetches us 35 SGEs.
+ */
+#define CSIO_SCSI_MAX_SGE 35
+#define CSIO_SCSI_ABRT_TMO_MS 60000
+#define CSIO_SCSI_LUNRST_TMO_MS 60000
+#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than
+ * all TM timeouts.
+ */
+#define CSIO_SCSI_IQ_WRSZ 128
+#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)
+
+#define CSIO_MAX_SNS_LEN 128
+#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)
+
+/* Reference to scsi_cmnd */
+#define csio_scsi_cmnd(req) ((req)->scratch1)
+
+struct csio_scsi_stats {
+ uint64_t n_tot_success; /* Total number of good I/Os */
+ uint32_t n_rn_nr_error; /* No. of remote-node-not-
+ * ready errors
+ */
+ uint32_t n_hw_nr_error; /* No. of hw-module-not-
+ * ready errors
+ */
+ uint32_t n_dmamap_error; /* No. of DMA map erros */
+ uint32_t n_unsupp_sge_error; /* No. of too-many-SGes
+ * errors.
+ */
+ uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */
+ uint32_t n_busy_error; /* No. of -EBUSY errors */
+ uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */
+ uint32_t n_rsperror; /* No. of response errors */
+ uint32_t n_autosense; /* No. of auto sense replies */
+ uint32_t n_ovflerror; /* No. of overflow errors */
+ uint32_t n_unflerror; /* No. of underflow errors */
+ uint32_t n_rdev_nr_error;/* No. of rdev not
+ * ready errors
+ */
+ uint32_t n_rdev_lost_error;/* No. of rdev lost errors */
+ uint32_t n_rdev_logo_error;/* No. of rdev logo errors */
+ uint32_t n_link_down_error;/* No. of link down errors */
+ uint32_t n_no_xchg_error; /* No. no exchange error */
+ uint32_t n_unknown_error;/* No. of unhandled errors */
+ uint32_t n_aborted; /* No. of aborted I/Os */
+ uint32_t n_abrt_timedout; /* No. of abort timedouts */
+ uint32_t n_abrt_fail; /* No. of abort failures */
+ uint32_t n_abrt_dups; /* No. of duplicate aborts */
+ uint32_t n_abrt_race_comp; /* No. of aborts that raced
+ * with completions.
+ */
+ uint32_t n_abrt_busy_error;/* No. of abort failures
+ * due to -EBUSY.
+ */
+ uint32_t n_closed; /* No. of closed I/Os */
+ uint32_t n_cls_busy_error; /* No. of close failures
+ * due to -EBUSY.
+ */
+ uint32_t n_active; /* No. of IOs in active_q */
+ uint32_t n_tm_active; /* No. of TMs in active_q */
+ uint32_t n_wcbfn; /* No. of I/Os in worker
+ * cbfn q
+ */
+ uint32_t n_free_ioreq; /* No. of freelist entries */
+ uint32_t n_free_ddp; /* No. of DDP freelist */
+ uint32_t n_unaligned; /* No. of Unaligned SGls */
+ uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */
+ uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/
+};
+
+struct csio_scsim {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ uint8_t max_sge; /* Max SGE */
+ uint8_t proto_cmd_len; /* Proto specific SCSI
+ * cmd length
+ */
+ uint16_t proto_rsp_len; /* Proto specific SCSI
+ * response length
+ */
+ spinlock_t freelist_lock; /* Lock for ioreq freelist */
+ struct list_head active_q; /* Outstanding SCSI I/Os */
+ struct list_head ioreq_freelist; /* Free list of ioreq's */
+ struct list_head ddp_freelist; /* DDP descriptor freelist */
+ struct csio_scsi_stats stats; /* This module's statistics */
+};
+
+/* State machine defines */
+enum csio_scsi_ev {
+ CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */
+ CSIO_SCSIE_START_TM, /* Start a TM IO */
+ CSIO_SCSIE_COMPLETED, /* IO Completed */
+ CSIO_SCSIE_ABORT, /* Abort IO */
+ CSIO_SCSIE_ABORTED, /* IO Aborted */
+ CSIO_SCSIE_CLOSE, /* Close exchange */
+ CSIO_SCSIE_CLOSED, /* Exchange closed */
+ CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually
+ * cleanup this I/O.
+ */
+};
+
+enum csio_scsi_lev {
+ CSIO_LEV_ALL = 1,
+ CSIO_LEV_LNODE,
+ CSIO_LEV_RNODE,
+ CSIO_LEV_LUN,
+};
+
+struct csio_scsi_level_data {
+ enum csio_scsi_lev level;
+ struct csio_rnode *rnode;
+ struct csio_lnode *lnode;
+ uint64_t oslun;
+};
+
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq(struct csio_scsim *scm)
+{
+ struct csio_sm *req;
+
+ if (likely(!list_empty(&scm->ioreq_freelist))) {
+ req = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&req->sm_list);
+ CSIO_DEC_STATS(scm, n_free_ioreq);
+ return (struct csio_ioreq *)req;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)
+{
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+}
+
+static inline void
+csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_init(reqlist, &scm->ioreq_freelist);
+ scm->stats.n_free_ioreq += n;
+}
+
+static inline struct csio_dma_buf *
+csio_get_scsi_ddp(struct csio_scsim *scm)
+{
+ struct csio_dma_buf *ddp;
+
+ if (likely(!list_empty(&scm->ddp_freelist))) {
+ ddp = list_first_entry(&scm->ddp_freelist,
+ struct csio_dma_buf, list);
+ list_del_init(&ddp->list);
+ CSIO_DEC_STATS(scm, n_free_ddp);
+ return ddp;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)
+{
+ list_add_tail(&ddp->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+}
+
+static inline void
+csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_tail_init(reqlist, &scm->ddp_freelist);
+ scm->stats.n_free_ddp += n;
+}
+
+static inline void
+csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);
+ if (csio_list_deleted(&ioreq->sm.sm_list))
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_drvcleanup(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);
+}
+
+/*
+ * csio_scsi_start_io - Kick starts the IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_io(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_start_tm - Kicks off the Task management IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_tm(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_abort - Abort an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_abort(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_close - Close an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_close(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);
+ return ioreq->drv_status;
+}
+
+void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);
+int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);
+int csio_scsim_cleanup_io_lnode(struct csio_scsim *,
+ struct csio_lnode *);
+struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *,
+ void *, uint8_t **);
+int csio_scsi_qconfig(struct csio_hw *);
+int csio_scsim_init(struct csio_scsim *, struct csio_hw *);
+void csio_scsim_exit(struct csio_scsim *);
+
+#endif /* __CSIO_SCSI_H__ */
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
new file mode 100644
index 00000000000..c32df1bdaa9
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -0,0 +1,1632 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include <linux/cache.h>
+
+#include "csio_hw.h"
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_defs.h"
+
+int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
+static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
+
+int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
+static int csio_sge_timer_reg = 1;
+
+#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
+ csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+
+static void
+csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
+{
+ sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+ reg * sizeof(uint32_t));
+}
+
+/* Free list buffer size */
+static inline uint32_t
+csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
+{
+ return sge->sge_fl_buf_size[buf->paddr & 0xF];
+}
+
+/* Size of the egress queue status page */
+static inline uint32_t
+csio_wr_qstat_pgsz(struct csio_hw *hw)
+{
+ return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
+}
+
+/* Ring freelist doorbell */
+static inline void
+csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
+{
+ /*
+ * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
+ * number of bytes in the freelist queue. This translates to atleast
+ * 8 freelist buffer pointers (since each pointer is 8 bytes).
+ */
+ if (flq->inc_idx >= 8) {
+ csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
+ PIDX(flq->inc_idx / 8),
+ MYPF_REG(SGE_PF_KDOORBELL));
+ flq->inc_idx &= 7;
+ }
+}
+
+/* Write a 0 cidx increment value to enable SGE interrupts for this queue */
+static void
+csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
+{
+ csio_wr_reg32(hw, CIDXINC(0) |
+ INGRESSQID(iqid) |
+ TIMERREG(X_TIMERREG_RESTART_COUNTER),
+ MYPF_REG(SGE_PF_GTS));
+}
+
+/*
+ * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ * Fill up freelist buffer entries with buffers of size specified
+ * in the size register.
+ *
+ */
+static int
+csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ __be64 *d = (__be64 *)(flq->vstart);
+ struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
+ uint64_t paddr;
+ int sreg = flq->un.fl.sreg;
+ int n = flq->credits;
+
+ while (n--) {
+ buf->len = sge->sge_fl_buf_size[sreg];
+ buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
+ &buf->paddr);
+ if (!buf->vaddr) {
+ csio_err(hw, "Could only fill %d buffers!\n", n + 1);
+ return -ENOMEM;
+ }
+
+ paddr = buf->paddr | (sreg & 0xF);
+
+ *d++ = cpu_to_be64(paddr);
+ buf++;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_wr_update_fl -
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ *
+ */
+static inline void
+csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
+{
+
+ flq->inc_idx += n;
+ flq->pidx += n;
+ if (unlikely(flq->pidx >= flq->credits))
+ flq->pidx -= (uint16_t)flq->credits;
+
+ CSIO_INC_STATS(flq, n_flq_refill);
+}
+
+/*
+ * csio_wr_alloc_q - Allocate a WR queue and initialize it.
+ * @hw: HW module
+ * @qsize: Size of the queue in bytes
+ * @wrsize: Since of WR in this queue, if fixed.
+ * @type: Type of queue (Ingress/Egress/Freelist)
+ * @owner: Module that owns this queue.
+ * @nflb: Number of freelist buffers for FL.
+ * @sreg: What is the FL buffer size register?
+ * @iq_int_handler: Ingress queue handler in INTx mode.
+ *
+ * This function allocates and sets up a queue for the caller
+ * of size qsize, aligned at the required boundary. This is subject to
+ * be free entries being available in the queue array. If one is found,
+ * it is initialized with the allocated queue, marked as being used (owner),
+ * and a handle returned to the caller in form of the queue's index
+ * into the q_arr array.
+ * If user has indicated a freelist (by specifying nflb > 0), create
+ * another queue (with its own index into q_arr) for the freelist. Allocate
+ * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
+ * idx in the ingress queue's flq.idx. This is how a Freelist is associated
+ * with its owning ingress queue.
+ */
+int
+csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
+ uint16_t type, void *owner, uint32_t nflb, int sreg,
+ iq_handler_t iq_intx_handler)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q, *flq;
+ int free_idx = wrm->free_qidx;
+ int ret_idx = free_idx;
+ uint32_t qsz;
+ int flq_idx;
+
+ if (free_idx >= wrm->num_q) {
+ csio_err(hw, "No more free queues.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case CSIO_EGRESS:
+ qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
+ break;
+ case CSIO_INGRESS:
+ switch (wrsize) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ csio_err(hw, "Invalid Ingress queue WR size:%d\n",
+ wrsize);
+ return -1;
+ }
+
+ /*
+ * Number of elements must be a multiple of 16
+ * So this includes status page size
+ */
+ qsz = ALIGN(qsize/wrsize, 16) * wrsize;
+
+ break;
+ case CSIO_FREELIST:
+ qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
+ break;
+ default:
+ csio_err(hw, "Invalid queue type: 0x%x\n", type);
+ return -1;
+ }
+
+ q = wrm->q_arr[free_idx];
+
+ q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
+ if (!q->vstart) {
+ csio_err(hw,
+ "Failed to allocate DMA memory for "
+ "queue at id: %d size: %d\n", free_idx, qsize);
+ return -1;
+ }
+
+ /*
+ * We need to zero out the contents, importantly for ingress,
+ * since we start with a generatiom bit of 1 for ingress.
+ */
+ memset(q->vstart, 0, qsz);
+
+ q->type = type;
+ q->owner = owner;
+ q->pidx = q->cidx = q->inc_idx = 0;
+ q->size = qsz;
+ q->wr_sz = wrsize; /* If using fixed size WRs */
+
+ wrm->free_qidx++;
+
+ if (type == CSIO_INGRESS) {
+ /* Since queue area is set to zero */
+ q->un.iq.genbit = 1;
+
+ /*
+ * Ingress queue status page size is always the size of
+ * the ingress queue entry.
+ */
+ q->credits = (qsz - q->wr_sz) / q->wr_sz;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - q->wr_sz);
+
+ /* Allocate memory for FL if requested */
+ if (nflb > 0) {
+ flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
+ sizeof(__be64), CSIO_FREELIST,
+ owner, 0, sreg, NULL);
+ if (flq_idx == -1) {
+ csio_err(hw,
+ "Failed to allocate FL queue"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ /* Associate the new FL with the Ingress quue */
+ q->un.iq.flq_idx = flq_idx;
+
+ flq = wrm->q_arr[q->un.iq.flq_idx];
+ flq->un.fl.bufs = kzalloc(flq->credits *
+ sizeof(struct csio_dma_buf),
+ GFP_KERNEL);
+ if (!flq->un.fl.bufs) {
+ csio_err(hw,
+ "Failed to allocate FL queue bufs"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ flq->un.fl.packen = 0;
+ flq->un.fl.offset = 0;
+ flq->un.fl.sreg = sreg;
+
+ /* Fill up the free list buffers */
+ if (csio_wr_fill_fl(hw, flq))
+ return -1;
+
+ /*
+ * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated,otherwise HW thinks
+ * FLQ is empty.
+ */
+ flq->pidx = flq->inc_idx = flq->credits - 8;
+ } else {
+ q->un.iq.flq_idx = -1;
+ }
+
+ /* Associate the IQ INTx handler. */
+ q->un.iq.iq_intx_handler = iq_intx_handler;
+
+ csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
+
+ } else if (type == CSIO_EGRESS) {
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
+ } else { /* Freelist */
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
+ }
+
+ return ret_idx;
+}
+
+/*
+ * csio_wr_iq_create_rsp - Response handler for IQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that got created.
+ *
+ * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
+ */
+static int
+csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ struct csio_iq_params iqp;
+ enum fw_retval retval;
+ uint32_t iq_id;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_iqid(hw, iq_idx) = iqp.iqid;
+ csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
+ csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
+ csio_q_inc_idx(hw, iq_idx) = 0;
+
+ /* Actual iq-id. */
+ iq_id = iqp.iqid - hw->wrm.fw_iq_start;
+
+ /* Set the iq-id to iq map table. */
+ if (iq_id >= CSIO_MAX_IQ) {
+ csio_err(hw,
+ "Exceeding MAX_IQ(%d) supported!"
+ " iqid:%d rel_iqid:%d FW iq_start:%d\n",
+ CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_q_set_intr_map(hw, iq_idx, iq_id);
+
+ /*
+ * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
+ * ingress context of this queue. This will block interrupts to
+ * this queue until the next GTS write. Therefore, we do a
+ * 0-cidx increment GTS write for this queue just to clear the
+ * interrupt_sent bit. This will re-enable interrupts to this
+ * queue.
+ */
+ csio_wr_sge_intr_enable(hw, iqp.physiqid);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ csio_q_flid(hw, flq_idx) = iqp.fl0id;
+ csio_q_cidx(hw, flq_idx) = 0;
+ csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+ csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+
+ /* Now update SGE about the buffers allocated during init */
+ csio_wr_ring_fldb(hw, flq);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_iq_create - Configure an Ingress queue with FW.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index in the WR module.
+ * @vec: MSIX vector.
+ * @portid: PCIE Channel to be associated with this queue.
+ * @async: Is this a FW asynchronous message handling queue?
+ * @cbfn: Completion callback.
+ *
+ * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
+ * with alloc/write bits set.
+ */
+int
+csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
+ uint32_t vec, uint8_t portid, bool async,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+ csio_q_portid(hw, iq_idx) = portid;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "IQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_INTX:
+ case CSIO_IM_MSI:
+ /* For interrupt forwarding queue only */
+ if (hw->intr_iq_idx == iq_idx)
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ else
+ iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
+ iqp.iqandstindex =
+ csio_q_physiqid(hw, hw->intr_iq_idx);
+ break;
+ case CSIO_IM_MSIX:
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ iqp.iqandstindex = (uint16_t)vec;
+ break;
+ case CSIO_IM_NONE:
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Pass in the ingress queue cmd parameters */
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iq_start = 1;
+ iqp.viid = 0;
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+ iqp.iqasynch = async;
+ if (csio_intr_coalesce_cnt)
+ iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
+ else
+ iqp.iqanus = X_UPDATESCHEDULING_TIMER;
+ iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
+ iqp.iqpciech = portid;
+ iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
+
+ switch (csio_q_wr_sz(hw, iq_idx)) {
+ case 16:
+ iqp.iqesize = 0; break;
+ case 32:
+ iqp.iqesize = 1; break;
+ case 64:
+ iqp.iqesize = 2; break;
+ case 128:
+ iqp.iqesize = 3; break;
+ }
+
+ iqp.iqsize = csio_q_size(hw, iq_idx) /
+ csio_q_wr_sz(hw, iq_idx);
+ iqp.iqaddr = csio_q_pstart(hw, iq_idx);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ iqp.fl0paden = 1;
+ iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
+ iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
+ iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
+ iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
+ iqp.fl0addr = csio_q_pstart(hw, flq_idx);
+ }
+
+ csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of IQ cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_create_rsp - Response handler for EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that got created.
+ *
+ * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
+ */
+static int
+csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ struct csio_eq_params eqp;
+ enum fw_retval retval;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
+ csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
+ csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
+ csio_q_inc_idx(hw, eq_idx) = 0;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_eq_create - Configure an Egress queue with FW.
+ * @hw: HW module.
+ * @priv: Private data.
+ * @eq_idx: Egress queue index in the WR module.
+ * @iq_idx: Associated ingress queue index.
+ * @cbfn: Completion callback.
+ *
+ * This API configures a offload egress queue with FW by issuing a
+ * FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
+ */
+int
+csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
+ int iq_idx, uint8_t portid,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "EQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqstart = 1;
+ eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
+ eqp.iqid = csio_q_iqid(hw, iq_idx);
+ eqp.fbmin = X_FETCHBURSTMIN_64B;
+ eqp.fbmax = X_FETCHBURSTMAX_512B;
+ eqp.cidxfthresh = 0;
+ eqp.pciechn = portid;
+ eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
+ eqp.eqaddr = csio_q_pstart(hw, eq_idx);
+
+ csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
+ &eqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that was freed.
+ *
+ * Handle FW_IQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_iq_destroy - Free an ingress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an ingress queue by issuing the FW_IQ_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iqid = csio_q_iqid(hw, iq_idx);
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1)
+ iqp.fl0id = csio_q_flid(hw, flq_idx);
+ else
+ iqp.fl0id = 0xFFFF;
+
+ iqp.fl1id = 0xFFFF;
+
+ csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that was freed.
+ *
+ * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_eq_destroy - Free an Egress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @eq_idx: Egress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqid = csio_q_eqid(hw, eq_idx);
+
+ csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
+ * @hw: HW module
+ * @qidx: Egress queue index
+ *
+ * Cleanup the Egress queue status page.
+ */
+static void
+csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
+{
+ struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+
+ memset(stp, 0, sizeof(*stp));
+}
+
+/*
+ * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
+ * @hw: HW module
+ * @qidx: Ingress queue index
+ *
+ * Cleanup the footer entries in the given ingress queue,
+ * set to 1 the internal copy of genbit.
+ */
+static void
+csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *wr;
+ struct csio_iqwr_footer *ftr;
+ uint32_t i = 0;
+
+ /* set to 1 since we are just about zero out genbit */
+ q->un.iq.genbit = 1;
+
+ for (i = 0; i < q->credits; i++) {
+ /* Get the WR */
+ wr = (void *)((uintptr_t)q->vstart +
+ (i * q->wr_sz));
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ /* Zero out footer */
+ memset(ftr, 0, sizeof(*ftr));
+ }
+}
+
+int
+csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
+{
+ int i, flq_idx;
+ struct csio_q *q;
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv;
+
+ for (i = 0; i < wrm->free_qidx; i++) {
+ q = wrm->q_arr[i];
+
+ switch (q->type) {
+ case CSIO_EGRESS:
+ if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_eq_stpg(hw, i);
+ if (!cmd) {
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ }
+ case CSIO_INGRESS:
+ if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_iq_ftr(hw, i);
+ if (!cmd) {
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) =
+ CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
+ }
+ default:
+ break;
+ }
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
+
+ return 0;
+}
+
+/*
+ * csio_wr_get - Get requested size of WR entry/entries from queue.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @size: Cumulative size of Work request(s).
+ * @wrp: Work request pair.
+ *
+ * If requested credits are available, return the start address of the
+ * work request in the work request pair. Set pidx accordingly and
+ * return.
+ *
+ * NOTE about WR pair:
+ * ==================
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/size to be passed back to the caller -
+ * hence Work request pair format.
+ */
+int
+csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
+ struct csio_wr_pair *wrp)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *cwr = (void *)((uintptr_t)(q->vstart) +
+ (q->pidx * CSIO_QCREDIT_SZ));
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+ uint16_t cidx = q->cidx = ntohs(stp->cidx);
+ uint16_t pidx = q->pidx;
+ uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
+ int req_credits = req_sz / CSIO_QCREDIT_SZ;
+ int credits;
+
+ CSIO_DB_ASSERT(q->owner != NULL);
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+ CSIO_DB_ASSERT(cidx <= q->credits);
+
+ /* Calculate credits */
+ if (pidx > cidx) {
+ credits = q->credits - (pidx - cidx) - 1;
+ } else if (cidx > pidx) {
+ credits = cidx - pidx - 1;
+ } else {
+ /* cidx == pidx, empty queue */
+ credits = q->credits;
+ CSIO_INC_STATS(q, n_qempty);
+ }
+
+ /*
+ * Check if we have enough credits.
+ * credits = 1 implies queue is full.
+ */
+ if (!credits || (req_credits > credits)) {
+ CSIO_INC_STATS(q, n_qfull);
+ return -EBUSY;
+ }
+
+ /*
+ * If we are here, we have enough credits to satisfy the
+ * request. Check if we are near the end of q, and if WR spills over.
+ * If it does, use the first addr/size to cover the queue until
+ * the end. Fit the remainder portion of the request at the top
+ * of queue and return it in the second addr/len. Set pidx
+ * accordingly.
+ */
+ if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
+ wrp->addr1 = cwr;
+ wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
+ wrp->addr2 = q->vstart;
+ wrp->size2 = req_sz - wrp->size1;
+ q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
+ CSIO_QCREDIT_SZ);
+ CSIO_INC_STATS(q, n_qwrap);
+ CSIO_INC_STATS(q, n_eq_wr_split);
+ } else {
+ wrp->addr1 = cwr;
+ wrp->size1 = req_sz;
+ wrp->addr2 = NULL;
+ wrp->size2 = 0;
+ q->pidx += (uint16_t)req_credits;
+
+ /* We are the end of queue, roll back pidx to top of queue */
+ if (unlikely(q->pidx == q->credits)) {
+ q->pidx = 0;
+ CSIO_INC_STATS(q, n_qwrap);
+ }
+ }
+
+ q->inc_idx = (uint16_t)req_credits;
+
+ CSIO_INC_STATS(q, n_tot_reqs);
+
+ return 0;
+}
+
+/*
+ * csio_wr_copy_to_wrp - Copies given data into WR.
+ * @data_buf - Data buffer
+ * @wrp - Work request pair.
+ * @wr_off - Work request offset.
+ * @data_len - Data length.
+ *
+ * Copies the given data in Work Request. Work request pair(wrp) specifies
+ * address information of Work request.
+ * Returns: none
+ */
+void
+csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
+ uint32_t wr_off, uint32_t data_len)
+{
+ uint32_t nbytes;
+
+ /* Number of space available in buffer addr1 of WRP */
+ nbytes = ((wrp->size1 - wr_off) >= data_len) ?
+ data_len : (wrp->size1 - wr_off);
+
+ memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
+ data_len -= nbytes;
+
+ /* Write the remaining data from the begining of circular buffer */
+ if (data_len) {
+ CSIO_DB_ASSERT(data_len <= wrp->size2);
+ CSIO_DB_ASSERT(wrp->addr2 != NULL);
+ memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
+ }
+}
+
+/*
+ * csio_wr_issue - Notify chip of Work request.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @prio: 0: Low priority, 1: High priority
+ *
+ * Rings the SGE Doorbell by writing the current producer index of the passed
+ * in queue into the register.
+ *
+ */
+int
+csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+
+ wmb();
+ /* Ring SGE Doorbell writing q->pidx into it */
+ csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
+ PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
+ q->inc_idx = 0;
+
+ return 0;
+}
+
+static inline uint32_t
+csio_wr_avail_qcredits(struct csio_q *q)
+{
+ if (q->pidx > q->cidx)
+ return q->pidx - q->cidx;
+ else if (q->cidx > q->pidx)
+ return q->credits - (q->cidx - q->pidx);
+ else
+ return 0; /* cidx == pidx, empty queue */
+}
+
+/*
+ * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
+ * @hw: HW module.
+ * @flq: The freelist queue.
+ *
+ * Invalidate the driver's version of a freelist buffer entry,
+ * without freeing the associated the DMA memory. The entry
+ * to be invalidated is picked up from the current Free list
+ * queue cidx.
+ *
+ */
+static inline void
+csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
+{
+ flq->cidx++;
+ if (flq->cidx == flq->credits) {
+ flq->cidx = 0;
+ CSIO_INC_STATS(flq, n_qwrap);
+ }
+}
+
+/*
+ * csio_wr_process_fl - Process a freelist completion.
+ * @hw: HW module.
+ * @q: The ingress queue attached to the Freelist.
+ * @wr: The freelist completion WR in the ingress queue.
+ * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
+ * @iq_handler: Caller's handler for this completion.
+ * @priv: Private pointer of caller
+ *
+ */
+static inline void
+csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
+ void *wr, uint32_t len_to_qid,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ struct csio_fl_dma_buf flb;
+ struct csio_dma_buf *buf, *fbuf;
+ uint32_t bufsz, len, lastlen = 0;
+ int n;
+ struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
+
+ CSIO_DB_ASSERT(flq != NULL);
+
+ len = len_to_qid;
+
+ if (len & IQWRF_NEWBUF) {
+ if (flq->un.fl.offset > 0) {
+ csio_wr_inval_flq_buf(hw, flq);
+ flq->un.fl.offset = 0;
+ }
+ len = IQWRF_LEN_GET(len);
+ }
+
+ CSIO_DB_ASSERT(len != 0);
+
+ flb.totlen = len;
+
+ /* Consume all freelist buffers used for len bytes */
+ for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
+ buf = &flq->un.fl.bufs[flq->cidx];
+ bufsz = csio_wr_fl_bufsz(sge, buf);
+
+ fbuf->paddr = buf->paddr;
+ fbuf->vaddr = buf->vaddr;
+
+ flb.offset = flq->un.fl.offset;
+ lastlen = min(bufsz, len);
+ fbuf->len = lastlen;
+
+ len -= lastlen;
+ if (!len)
+ break;
+ csio_wr_inval_flq_buf(hw, flq);
+ }
+
+ flb.defer_free = flq->un.fl.packen ? 0 : 1;
+
+ iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
+ &flb, priv);
+
+ if (flq->un.fl.packen)
+ flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
+ else
+ csio_wr_inval_flq_buf(hw, flq);
+
+}
+
+/*
+ * csio_is_new_iqwr - Is this a new Ingress queue entry ?
+ * @q: Ingress quueue.
+ * @ftr: Ingress queue WR SGE footer.
+ *
+ * The entry is new if our generation bit matches the corresponding
+ * bit in the footer of the current WR.
+ */
+static inline bool
+csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
+{
+ return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
+}
+
+/*
+ * csio_wr_process_iq - Process elements in Ingress queue.
+ * @hw: HW pointer
+ * @qidx: Index of queue
+ * @iq_handler: Handler for this queue
+ * @priv: Caller's private pointer
+ *
+ * This routine walks through every entry of the ingress queue, calling
+ * the provided iq_handler with the entry, until the generation bit
+ * flips.
+ */
+int
+csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
+ struct csio_iqwr_footer *ftr;
+ uint32_t wr_type, fw_qid, qid;
+ struct csio_q *q_completed;
+ struct csio_q *flq = csio_iq_has_fl(q) ?
+ wrm->q_arr[q->un.iq.flq_idx] : NULL;
+ int rv = 0;
+
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+
+ /*
+ * When q wrapped around last time, driver should have inverted
+ * ic.genbit as well.
+ */
+ while (csio_is_new_iqwr(q, ftr)) {
+
+ CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
+ (uintptr_t)q->vwrap);
+ rmb();
+ wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
+
+ switch (wr_type) {
+ case X_RSPD_TYPE_CPL:
+ /* Subtract footer from WR len */
+ iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
+ break;
+ case X_RSPD_TYPE_FLBUF:
+ csio_wr_process_fl(hw, q, wr,
+ ntohl(ftr->pldbuflen_qid),
+ iq_handler, priv);
+ break;
+ case X_RSPD_TYPE_INTR:
+ fw_qid = ntohl(ftr->pldbuflen_qid);
+ qid = fw_qid - wrm->fw_iq_start;
+ q_completed = hw->wrm.intr_map[qid];
+
+ if (unlikely(qid ==
+ csio_q_physiqid(hw, hw->intr_iq_idx))) {
+ /*
+ * We are already in the Forward Interrupt
+ * Interrupt Queue Service! Do-not service
+ * again!
+ *
+ */
+ } else {
+ CSIO_DB_ASSERT(q_completed);
+ CSIO_DB_ASSERT(
+ q_completed->un.iq.iq_intx_handler);
+
+ /* Call the queue handler. */
+ q_completed->un.iq.iq_intx_handler(hw, NULL,
+ 0, NULL, (void *)q_completed);
+ }
+ break;
+ default:
+ csio_warn(hw, "Unknown resp type 0x%x received\n",
+ wr_type);
+ CSIO_INC_STATS(q, n_rsp_unknown);
+ break;
+ }
+
+ /*
+ * Ingress *always* has fixed size WR entries. Therefore,
+ * there should always be complete WRs towards the end of
+ * queue.
+ */
+ if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
+
+ /* Roll over to start of queue */
+ q->cidx = 0;
+ wr = q->vstart;
+
+ /* Toggle genbit */
+ q->un.iq.genbit ^= 0x1;
+
+ CSIO_INC_STATS(q, n_qwrap);
+ } else {
+ q->cidx++;
+ wr = (void *)((uintptr_t)(q->vstart) +
+ (q->cidx * q->wr_sz));
+ }
+
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ q->inc_idx++;
+
+ } /* while (q->un.iq.genbit == hdr->genbit) */
+
+ /*
+ * We need to re-arm SGE interrupts in case we got a stray interrupt,
+ * especially in msix mode. With INTx, this may be a common occurence.
+ */
+ if (unlikely(!q->inc_idx)) {
+ CSIO_INC_STATS(q, n_stray_comp);
+ rv = -EINVAL;
+ goto restart;
+ }
+
+ /* Replenish free list buffers if pending falls below low water mark */
+ if (flq) {
+ uint32_t avail = csio_wr_avail_qcredits(flq);
+ if (avail <= 16) {
+ /* Make sure in FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated otherwise HW thinks
+ * FLQ is empty.
+ */
+ csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
+ csio_wr_ring_fldb(hw, flq);
+ }
+ }
+
+restart:
+ /* Now inform SGE about our incremental index value */
+ csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
+ INGRESSQID(q->un.iq.physiqid) |
+ TIMERREG(csio_sge_timer_reg),
+ MYPF_REG(SGE_PF_GTS));
+ q->stats.n_tot_rsps += q->inc_idx;
+
+ q->inc_idx = 0;
+
+ return rv;
+}
+
+int
+csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *iq = wrm->q_arr[qidx];
+
+ return csio_wr_process_iq(hw, iq, iq_handler, priv);
+}
+
+static int
+csio_closest_timer(struct csio_sge *s, int time)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int
+csio_closest_thresh(struct csio_sge *s, int cnt)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = cnt - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static void
+csio_wr_fixup_host_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t clsz = L1_CACHE_BYTES;
+ uint32_t s_hps = PAGE_SHIFT - 10;
+ uint32_t ingpad = 0;
+ uint32_t stat_len = clsz > 64 ? 128 : 64;
+
+ csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
+ HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
+ HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
+ HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
+ SGE_HOST_PAGE_SIZE);
+
+ sge->csio_fl_align = clsz < 32 ? 32 : clsz;
+ ingpad = ilog2(sge->csio_fl_align) - 5;
+
+ csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE(1),
+ INGPADBOUNDARY(ingpad) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+
+ /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
+ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE2);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE3);
+
+ csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+
+ /* default value of rx_dma_offset of the NIC driver */
+ csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
+ PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+}
+
+static void
+csio_init_intr_coalesce_parms(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+
+ csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
+ if (csio_intr_coalesce_cnt) {
+ csio_sge_thresh_reg = 0;
+ csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
+ return;
+ }
+
+ csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
+}
+
+/*
+ * csio_wr_get_sge - Get SGE register values.
+ * @hw: HW module.
+ *
+ * Used by non-master functions and by master-functions relying on config file.
+ */
+static void
+csio_wr_get_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t ingpad;
+ int i;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+ ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+
+ switch (ingpad) {
+ case X_INGPCIEBOUNDARY_32B:
+ sge->csio_fl_align = 32; break;
+ case X_INGPCIEBOUNDARY_64B:
+ sge->csio_fl_align = 64; break;
+ case X_INGPCIEBOUNDARY_128B:
+ sge->csio_fl_align = 128; break;
+ case X_INGPCIEBOUNDARY_256B:
+ sge->csio_fl_align = 256; break;
+ case X_INGPCIEBOUNDARY_512B:
+ sge->csio_fl_align = 512; break;
+ case X_INGPCIEBOUNDARY_1024B:
+ sge->csio_fl_align = 1024; break;
+ case X_INGPCIEBOUNDARY_2048B:
+ sge->csio_fl_align = 2048; break;
+ case X_INGPCIEBOUNDARY_4096B:
+ sge->csio_fl_align = 4096; break;
+ }
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
+ timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
+ timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+
+ sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE0_GET(timer_value_0_and_1));
+ sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE1_GET(timer_value_0_and_1));
+ sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE2_GET(timer_value_2_and_3));
+ sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE3_GET(timer_value_2_and_3));
+ sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE4_GET(timer_value_4_and_5));
+ sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE5_GET(timer_value_4_and_5));
+
+ ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
+ sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
+ sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
+ sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
+ sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+/*
+ * csio_wr_set_sge - Initialize SGE registers
+ * @hw: HW module.
+ *
+ * Used by Master function to initialize SGE registers in the absence
+ * of a config file.
+ */
+static void
+csio_wr_set_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ int i;
+
+ /*
+ * Set up our basic SGE mode to deliver CPL messages to our Ingress
+ * Queue and Packet Date to the Free List.
+ */
+ csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+ /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
+
+ /*
+ * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
+ * and generate an interrupt when this occurs so we can recover.
+ */
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
+ HP_INT_THRESH(HP_INT_THRESH_MASK) |
+ LP_INT_THRESH(LP_INT_THRESH_MASK),
+ HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
+ LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
+ csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
+ ENABLE_DROP);
+
+ /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
+
+ CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
+ CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
+ CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
+ CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
+ CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
+ CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
+ CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
+ CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ /* Initialize interrupt coalescing attributes */
+ sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
+ sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
+ sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
+ sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
+ sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
+ sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
+
+ sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
+ sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
+ sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
+ sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
+
+ csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
+ THRESHOLD_1(sge->counter_val[1]) |
+ THRESHOLD_2(sge->counter_val[2]) |
+ THRESHOLD_3(sge->counter_val[3]),
+ SGE_INGRESS_RX_THRESHOLD);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+ TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+ SGE_TIMER_VALUE_0_AND_1);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+ TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+ SGE_TIMER_VALUE_2_AND_3);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+ TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+ SGE_TIMER_VALUE_4_AND_5);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+void
+csio_wr_sge_init(struct csio_hw *hw)
+{
+ /*
+ * If we are master:
+ * - If we plan to use the config file, we need to fixup some
+ * host specific registers, and read the rest of the SGE
+ * configuration.
+ * - If we dont plan to use the config file, we need to initialize
+ * SGE entirely, including fixing the host specific registers.
+ * If we arent the master, we are only allowed to read and work off of
+ * the already initialized SGE values.
+ *
+ * Therefore, before calling this function, we assume that the master-
+ * ship of the card, and whether to use config file or not, have
+ * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
+ * CSIO_HWF_MASTER should be set/unset.
+ */
+ if (csio_is_hw_master(hw)) {
+ csio_wr_fixup_host_params(hw);
+
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
+ csio_wr_get_sge(hw);
+ else
+ csio_wr_set_sge(hw);
+ } else
+ csio_wr_get_sge(hw);
+}
+
+/*
+ * csio_wrm_init - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW pointer
+ *
+ * Allocates memory for an array of queue pointers starting at q_arr.
+ */
+int
+csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+
+ if (!wrm->num_q) {
+ csio_err(hw, "Num queues is not set\n");
+ return -EINVAL;
+ }
+
+ wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
+ if (!wrm->q_arr)
+ goto err;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
+ if (!wrm->q_arr[i]) {
+ while (--i >= 0)
+ kfree(wrm->q_arr[i]);
+ goto err_free_arr;
+ }
+ }
+ wrm->free_qidx = 0;
+
+ return 0;
+
+err_free_arr:
+ kfree(wrm->q_arr);
+err:
+ return -ENOMEM;
+}
+
+/*
+ * csio_wrm_exit - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW module
+ *
+ * Uninitialize WR module. Free q_arr and pointers in it.
+ * We have the additional job of freeing the DMA memory associated
+ * with the queues.
+ */
+void
+csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+ uint32_t j;
+ struct csio_q *q;
+ struct csio_dma_buf *buf;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ q = wrm->q_arr[i];
+
+ if (wrm->free_qidx && (i < wrm->free_qidx)) {
+ if (q->type == CSIO_FREELIST) {
+ if (!q->un.fl.bufs)
+ continue;
+ for (j = 0; j < q->credits; j++) {
+ buf = &q->un.fl.bufs[j];
+ if (!buf->vaddr)
+ continue;
+ pci_free_consistent(hw->pdev, buf->len,
+ buf->vaddr,
+ buf->paddr);
+ }
+ kfree(q->un.fl.bufs);
+ }
+ pci_free_consistent(hw->pdev, q->size,
+ q->vstart, q->pstart);
+ }
+ kfree(q);
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
+
+ kfree(wrm->q_arr);
+}
diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h
new file mode 100644
index 00000000000..8d30e7ac1f5
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.h
@@ -0,0 +1,512 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_WR_H__
+#define __CSIO_WR_H__
+
+#include <linux/cache.h>
+
+#include "csio_defs.h"
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+
+/*
+ * SGE register field values.
+ */
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPCIEBOUNDARY_64B 1
+#define X_INGPCIEBOUNDARY_128B 2
+#define X_INGPCIEBOUNDARY_256B 3
+#define X_INGPCIEBOUNDARY_512B 4
+#define X_INGPCIEBOUNDARY_1024B 5
+#define X_INGPCIEBOUNDARY_2048B 6
+#define X_INGPCIEBOUNDARY_4096B 7
+
+/* GTS register */
+#define X_TIMERREG_COUNTER0 0
+#define X_TIMERREG_COUNTER1 1
+#define X_TIMERREG_COUNTER2 2
+#define X_TIMERREG_COUNTER3 3
+#define X_TIMERREG_COUNTER4 4
+#define X_TIMERREG_COUNTER5 5
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define X_FETCHBURSTMIN_16B 0
+#define X_FETCHBURSTMIN_32B 1
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+
+#define X_FETCHBURSTMAX_64B 0
+#define X_FETCHBURSTMAX_128B 1
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+#define X_HOSTFCMODE_INGRESS_QUEUE 1
+#define X_HOSTFCMODE_STATUS_PAGE 2
+#define X_HOSTFCMODE_BOTH 3
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATESCHEDULING_TIMER 0
+#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
+
+#define X_UPDATEDELIVERY_NONE 0
+#define X_UPDATEDELIVERY_INTERRUPT 1
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
+#define X_UPDATEDELIVERY_BOTH 3
+
+#define X_INTERRUPTDESTINATION_PCIE 0
+#define X_INTERRUPTDESTINATION_IQ 1
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+#define X_RSPD_TYPE_INTR 2
+
+/* WR status is at the same position as retval in a CMD header */
+#define csio_wr_status(_wr) \
+ (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
+
+struct csio_hw;
+
+extern int csio_intr_coalesce_cnt;
+extern int csio_intr_coalesce_time;
+
+/* Ingress queue params */
+struct csio_iq_params {
+
+ uint8_t iq_start:1;
+ uint8_t iq_stop:1;
+ uint8_t pfn:3;
+
+ uint8_t vfn;
+
+ uint16_t physiqid;
+ uint16_t iqid;
+
+ uint16_t fl0id;
+ uint16_t fl1id;
+
+ uint8_t viid;
+
+ uint8_t type;
+ uint8_t iqasynch;
+ uint8_t reserved4;
+
+ uint8_t iqandst;
+ uint8_t iqanus;
+ uint8_t iqanud;
+
+ uint16_t iqandstindex;
+
+ uint8_t iqdroprss;
+ uint8_t iqpciech;
+ uint8_t iqdcaen;
+
+ uint8_t iqdcacpu;
+ uint8_t iqintcntthresh;
+ uint8_t iqo;
+
+ uint8_t iqcprio;
+ uint8_t iqesize;
+
+ uint16_t iqsize;
+
+ uint64_t iqaddr;
+
+ uint8_t iqflintiqhsen;
+ uint8_t reserved5;
+ uint8_t iqflintcongen;
+ uint8_t iqflintcngchmap;
+
+ uint32_t reserved6;
+
+ uint8_t fl0hostfcmode;
+ uint8_t fl0cprio;
+ uint8_t fl0paden;
+ uint8_t fl0packen;
+ uint8_t fl0congen;
+ uint8_t fl0dcaen;
+
+ uint8_t fl0dcacpu;
+ uint8_t fl0fbmin;
+
+ uint8_t fl0fbmax;
+ uint8_t fl0cidxfthresho;
+ uint8_t fl0cidxfthresh;
+
+ uint16_t fl0size;
+
+ uint64_t fl0addr;
+
+ uint64_t reserved7;
+
+ uint8_t fl1hostfcmode;
+ uint8_t fl1cprio;
+ uint8_t fl1paden;
+ uint8_t fl1packen;
+ uint8_t fl1congen;
+ uint8_t fl1dcaen;
+
+ uint8_t fl1dcacpu;
+ uint8_t fl1fbmin;
+
+ uint8_t fl1fbmax;
+ uint8_t fl1cidxfthresho;
+ uint8_t fl1cidxfthresh;
+
+ uint16_t fl1size;
+
+ uint64_t fl1addr;
+};
+
+/* Egress queue params */
+struct csio_eq_params {
+
+ uint8_t pfn;
+ uint8_t vfn;
+
+ uint8_t eqstart:1;
+ uint8_t eqstop:1;
+
+ uint16_t physeqid;
+ uint32_t eqid;
+
+ uint8_t hostfcmode:2;
+ uint8_t cprio:1;
+ uint8_t pciechn:3;
+
+ uint16_t iqid;
+
+ uint8_t dcaen:1;
+ uint8_t dcacpu:5;
+
+ uint8_t fbmin:3;
+ uint8_t fbmax:3;
+
+ uint8_t cidxfthresho:1;
+ uint8_t cidxfthresh:3;
+
+ uint16_t eqsize;
+
+ uint64_t eqaddr;
+};
+
+struct csio_dma_buf {
+ struct list_head list;
+ void *vaddr; /* Virtual address */
+ dma_addr_t paddr; /* Physical address */
+ uint32_t len; /* Buffer size */
+};
+
+/* Generic I/O request structure */
+struct csio_ioreq {
+ struct csio_sm sm; /* SM, List
+ * should be the first member
+ */
+ int iq_idx; /* Ingress queue index */
+ int eq_idx; /* Egress queue index */
+ uint32_t nsge; /* Number of SG elements */
+ uint32_t tmo; /* Driver timeout */
+ uint32_t datadir; /* Data direction */
+ struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */
+ uint16_t wr_status; /* WR completion status */
+ int16_t drv_status; /* Driver internal status */
+ struct csio_lnode *lnode; /* Owner lnode */
+ struct csio_rnode *rnode; /* Src/destination rnode */
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
+ /* completion callback */
+ void *scratch1; /* Scratch area 1.
+ */
+ void *scratch2; /* Scratch area 2. */
+ struct list_head gen_list; /* Any list associated with
+ * this ioreq.
+ */
+ uint64_t fw_handle; /* Unique handle passed
+ * to FW
+ */
+ uint8_t dcopy; /* Data copy required */
+ uint8_t reserved1;
+ uint16_t reserved2;
+ struct completion cmplobj; /* ioreq completion object */
+} ____cacheline_aligned_in_smp;
+
+/*
+ * Egress status page for egress cidx updates
+ */
+struct csio_qstatus_page {
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+
+enum {
+ CSIO_MAX_FLBUF_PER_IQWR = 4,
+ CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments
+ * in bytes
+ */
+ CSIO_MAX_QID = 0xFFFF,
+ CSIO_MAX_IQ = 128,
+
+ CSIO_SGE_NTIMERS = 6,
+ CSIO_SGE_NCOUNTERS = 4,
+ CSIO_SGE_FL_SIZE_REGS = 16,
+};
+
+/* Defines for type */
+enum {
+ CSIO_EGRESS = 1,
+ CSIO_INGRESS = 2,
+ CSIO_FREELIST = 3,
+};
+
+/*
+ * Structure for footer (last 2 flits) of Ingress Queue Entry.
+ */
+struct csio_iqwr_footer {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define IQWRF_NEWBUF (1 << 31)
+#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
+#define IQWRF_GEN_SHIFT 7
+#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
+
+
+/*
+ * WR pair:
+ * ========
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/len to be passed back to the caller -
+ * hence the Work request pair structure.
+ */
+struct csio_wr_pair {
+ void *addr1;
+ uint32_t size1;
+ void *addr2;
+ uint32_t size2;
+};
+
+/*
+ * The following structure is used by ingress processing to return the
+ * free list buffers to consumers.
+ */
+struct csio_fl_dma_buf {
+ struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];
+ /* Freelist DMA buffers */
+ int offset; /* Offset within the
+ * first FL buf.
+ */
+ uint32_t totlen; /* Total length */
+ uint8_t defer_free; /* Free of buffer can
+ * deferred
+ */
+};
+
+/* Data-types */
+typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+struct csio_iq {
+ uint16_t iqid; /* Queue ID */
+ uint16_t physiqid; /* Physical Queue ID */
+ uint16_t genbit; /* Generation bit,
+ * initially set to 1
+ */
+ int flq_idx; /* Freelist queue index */
+ iq_handler_t iq_intx_handler; /* IQ INTx handler routine */
+};
+
+struct csio_eq {
+ uint16_t eqid; /* Qid */
+ uint16_t physeqid; /* Physical Queue ID */
+ uint8_t wrap[512]; /* Temp area for q-wrap around*/
+};
+
+struct csio_fl {
+ uint16_t flid; /* Qid */
+ uint16_t packen; /* Packing enabled? */
+ int offset; /* Offset within FL buf */
+ int sreg; /* Size register */
+ struct csio_dma_buf *bufs; /* Free list buffer ptr array
+ * indexed using flq->cidx/pidx
+ */
+};
+
+struct csio_qstats {
+ uint32_t n_tot_reqs; /* Total no. of Requests */
+ uint32_t n_tot_rsps; /* Total no. of responses */
+ uint32_t n_qwrap; /* Queue wraps */
+ uint32_t n_eq_wr_split; /* Number of split EQ WRs */
+ uint32_t n_qentry; /* Queue entry */
+ uint32_t n_qempty; /* Queue empty */
+ uint32_t n_qfull; /* Queue fulls */
+ uint32_t n_rsp_unknown; /* Unknown response type */
+ uint32_t n_stray_comp; /* Stray completion intr */
+ uint32_t n_flq_refill; /* Number of FL refills */
+};
+
+/* Queue metadata */
+struct csio_q {
+ uint16_t type; /* Type: Ingress/Egress/FL */
+ uint16_t pidx; /* producer index */
+ uint16_t cidx; /* consumer index */
+ uint16_t inc_idx; /* Incremental index */
+ uint32_t wr_sz; /* Size of all WRs in this q
+ * if fixed
+ */
+ void *vstart; /* Base virtual address
+ * of queue
+ */
+ void *vwrap; /* Virtual end address to
+ * wrap around at
+ */
+ uint32_t credits; /* Size of queue in credits */
+ void *owner; /* Owner */
+ union { /* Queue contexts */
+ struct csio_iq iq;
+ struct csio_eq eq;
+ struct csio_fl fl;
+ } un;
+
+ dma_addr_t pstart; /* Base physical address of
+ * queue
+ */
+ uint32_t portid; /* PCIE Channel */
+ uint32_t size; /* Size of queue in bytes */
+ struct csio_qstats stats; /* Statistics */
+} ____cacheline_aligned_in_smp;
+
+struct csio_sge {
+ uint32_t csio_fl_align; /* Calculated and cached
+ * for fast path
+ */
+ uint32_t sge_control; /* padding, boundaries,
+ * lengths, etc.
+ */
+ uint32_t sge_host_page_size; /* Host page size */
+ uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
+ /* free list buffer sizes */
+ uint16_t timer_val[CSIO_SGE_NTIMERS];
+ uint8_t counter_val[CSIO_SGE_NCOUNTERS];
+};
+
+/* Work request module */
+struct csio_wrm {
+ int num_q; /* Number of queues */
+ struct csio_q **q_arr; /* Array of queue pointers
+ * allocated dynamically
+ * based on configured values
+ */
+ uint32_t fw_iq_start; /* Start ID of IQ for this fn*/
+ uint32_t fw_eq_start; /* Start ID of EQ for this fn*/
+ struct csio_q *intr_map[CSIO_MAX_IQ];
+ /* IQ-id to IQ map table. */
+ int free_qidx; /* queue idx of free queue */
+ struct csio_sge sge; /* SGE params */
+};
+
+#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
+#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
+#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
+#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
+#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
+#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
+#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
+#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
+#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
+#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
+#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
+#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
+#define csio_q_physiqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
+#define csio_q_iq_flq_idx(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
+#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
+#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
+
+#define csio_q_physeqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
+#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
+
+#define csio_q_iq_to_flid(__hw, __iq_idx) \
+ csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
+#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
+ (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
+#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
+
+struct csio_mb;
+
+int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
+ uint16_t, void *, uint32_t, int, iq_handler_t);
+int csio_wr_iq_create(struct csio_hw *, void *, int,
+ uint32_t, uint8_t, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
+
+
+int csio_wr_get(struct csio_hw *, int, uint32_t,
+ struct csio_wr_pair *);
+void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
+int csio_wr_issue(struct csio_hw *, int, bool);
+int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+int csio_wr_process_iq_idx(struct csio_hw *, int,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+
+void csio_wr_sge_init(struct csio_hw *);
+int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
+void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
+
+#endif /* ifndef __CSIO_WR_H__ */
diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h
new file mode 100644
index 00000000000..097e52c0f8e
--- /dev/null
+++ b/drivers/scsi/csiostor/t4fw_api_stor.h
@@ -0,0 +1,539 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_API_STOR_H_
+#define _T4FW_API_STOR_H_
+
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_fcoe_link_sub_op {
+ FCOE_LINK_DOWN = 0x0,
+ FCOE_LINK_UP = 0x1,
+ FCOE_LINK_COND = 0x2,
+};
+
+enum fw_fcoe_link_status {
+ FCOE_LINKDOWN = 0x0,
+ FCOE_LINKUP = 0x1,
+};
+
+enum fw_ofld_prot {
+ PROT_FCOE = 0x1,
+ PROT_ISCSI = 0x2,
+};
+
+enum rport_type_fcoe {
+ FLOGI_VFPORT = 0x1, /* 0xfffffe */
+ FDISC_VFPORT = 0x2, /* 0xfffffe */
+ NS_VNPORT = 0x3, /* 0xfffffc */
+ REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */
+ REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */
+ FDMI_VNPORT = 0x6, /* 0xfffffa */
+ FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */
+};
+
+enum event_cause_fcoe {
+ PLOGI_ACC_RCVD = 0x01,
+ PLOGI_RJT_RCVD = 0x02,
+ PLOGI_RCVD = 0x03,
+ PLOGO_RCVD = 0x04,
+ PRLI_ACC_RCVD = 0x05,
+ PRLI_RJT_RCVD = 0x06,
+ PRLI_RCVD = 0x07,
+ PRLO_RCVD = 0x08,
+ NPORT_ID_CHGD = 0x09,
+ FLOGO_RCVD = 0x0a,
+ CLR_VIRT_LNK_RCVD = 0x0b,
+ FLOGI_ACC_RCVD = 0x0c,
+ FLOGI_RJT_RCVD = 0x0d,
+ FDISC_ACC_RCVD = 0x0e,
+ FDISC_RJT_RCVD = 0x0f,
+ FLOGI_TMO_MAX_RETRY = 0x10,
+ IMPL_LOGO_ADISC_ACC = 0x11,
+ IMPL_LOGO_ADISC_RJT = 0x12,
+ IMPL_LOGO_ADISC_CNFLT = 0x13,
+ PRLI_TMO = 0x14,
+ ADISC_TMO = 0x15,
+ RSCN_DEV_LOST = 0x16,
+ SCR_ACC_RCVD = 0x17,
+ ADISC_RJT_RCVD = 0x18,
+ LOGO_SNT = 0x19,
+ PROTO_ERR_IMPL_LOGO = 0x1a,
+};
+
+enum fcoe_cmn_type {
+ FCOE_ELS,
+ FCOE_CT,
+ FCOE_SCSI_CMD,
+ FCOE_UNSOL_ELS,
+};
+
+enum fw_wr_stor_opcodes {
+ FW_RDEV_WR = 0x38,
+ FW_FCOE_ELS_CT_WR = 0x30,
+ FW_SCSI_WRITE_WR = 0x31,
+ FW_SCSI_READ_WR = 0x32,
+ FW_SCSI_CMD_WR = 0x33,
+ FW_SCSI_ABRT_CLS_WR = 0x34,
+};
+
+struct fw_rdev_wr {
+ __be32 op_to_immdlen;
+ __be32 alloc_to_len16;
+ __be64 cookie;
+ u8 protocol;
+ u8 event_cause;
+ u8 cur_state;
+ u8 prev_state;
+ __be32 flags_to_assoc_flowid;
+ union rdev_entry {
+ struct fcoe_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 rjt_reason;
+ u8 cur_login_st;
+ u8 prev_login_st;
+ __be16 rcv_fr_sz;
+ u8 rd_xfer_rdy_to_rport_type;
+ u8 vft_to_qos;
+ u8 org_proc_assoc_to_acc_rsp_code;
+ u8 enh_disc_to_tgt;
+ u8 wwnn[8];
+ u8 wwpn[8];
+ __be16 iqid;
+ u8 fc_oui[3];
+ u8 r_id[3];
+ } fcoe_rdev;
+ struct iscsi_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 r3;
+ __be16 iscsi_opts;
+ __be16 tcp_opts;
+ __be16 ip_opts;
+ __be16 max_rcv_len;
+ __be16 max_snd_len;
+ __be16 first_brst_len;
+ __be16 max_brst_len;
+ __be16 r4;
+ __be16 def_time2wait;
+ __be16 def_time2ret;
+ __be16 nop_out_intrvl;
+ __be16 non_scsi_to;
+ __be16 isid;
+ __be16 tsid;
+ __be16 port;
+ __be16 tpgt;
+ u8 r5[6];
+ __be16 iqid;
+ } iscsi_rdev;
+ } u;
+};
+
+#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff)
+#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f)
+#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1)
+#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3)
+#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1)
+#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1)
+#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1)
+#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1)
+#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1)
+
+struct fw_fcoe_els_ct_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 els_ct_type;
+ u8 ctl_pri;
+ u8 cp_en_class;
+ __be16 xfer_cnt;
+ u8 fl_to_sp;
+ u8 l_id[3];
+ u8 r5;
+ u8 r_id[3];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24)
+#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0)
+
+struct fw_scsi_write_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_write_priv {
+ struct fcoe_write_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_write_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_read_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_read_priv {
+ struct fcoe_read_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_read_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_cmd_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 r3;
+ union fw_scsi_cmd_priv {
+ struct fcoe_cmd_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r4_lo[2];
+ } fcoe;
+ struct iscsi_cmd_priv {
+ u8 r4[4];
+ } iscsi;
+ } u;
+ u8 r5[8];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0)
+
+#define SCSI_ABORT 0
+#define SCSI_CLOSE 1
+
+struct fw_scsi_abrt_cls_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 sub_opcode_to_chk_all_io;
+ u8 r3[4];
+ u64 t_cookie;
+};
+
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2)
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f)
+#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0)
+
+enum fw_cmd_stor_opcodes {
+ FW_FCOE_RES_INFO_CMD = 0x31,
+ FW_FCOE_LINK_CMD = 0x32,
+ FW_FCOE_VNP_CMD = 0x33,
+ FW_FCOE_SPARAMS_CMD = 0x35,
+ FW_FCOE_STATS_CMD = 0x37,
+ FW_FCOE_FCF_CMD = 0x38,
+};
+
+struct fw_fcoe_res_info_cmd {
+ __be32 op_to_read;
+ __be32 retval_len16;
+ __be16 e_d_tov;
+ __be16 r_a_tov_seq;
+ __be16 r_a_tov_els;
+ __be16 r_r_tov;
+ __be32 max_xchgs;
+ __be32 max_ssns;
+ __be32 used_xchgs;
+ __be32 used_ssns;
+ __be32 max_fcfs;
+ __be32 max_vnps;
+ __be32 used_fcfs;
+ __be32 used_vnps;
+};
+
+struct fw_fcoe_link_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ __be32 sub_opcode_fcfi;
+ u8 r3;
+ u8 lstatus;
+ __be16 flags;
+ u8 r4;
+ u8 set_vlan;
+ __be16 vlan_id;
+ __be32 vnpi_pkd;
+ __be16 r6;
+ u8 phy_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+};
+
+#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U)
+#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff)
+#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_vnp_cmd {
+ __be32 op_to_fcfi;
+ __be32 alloc_to_len16;
+ __be32 gen_wwn_to_vnpi;
+ __be32 vf_id;
+ __be16 iqid;
+ u8 vnport_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 clsp_word_0_1[8];
+};
+
+#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_ALLOC (1U << 31)
+#define FW_FCOE_VNP_CMD_FREE (1U << 30)
+#define FW_FCOE_VNP_CMD_MODIFY (1U << 29)
+#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22)
+#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20)
+#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_sparams_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ u8 r3[7];
+ u8 cos;
+ u8 lport_wwnn[8];
+ u8 lport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 cls_srv_parms[16];
+};
+
+#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0)
+
+struct fw_fcoe_stats_cmd {
+ __be32 op_to_flowid;
+ __be32 free_to_len16;
+ union fw_fcoe_stats {
+ struct fw_fcoe_stats_ctl {
+ u8 nstats_port;
+ u8 port_valid_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_fcoe_port_stats {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } port_stats;
+ struct fw_fcoe_fcf_stats {
+ __be32 fip_tx_bytes;
+ __be32 fip_tx_fr;
+ __be64 fcf_ka;
+ __be64 mcast_adv_rcvd;
+ __be16 ucast_adv_rcvd;
+ __be16 sol_sent;
+ __be16 vlan_req;
+ __be16 vlan_rpl;
+ __be16 clr_vlink;
+ __be16 link_down;
+ __be16 link_up;
+ __be16 logo;
+ __be16 flogi_req;
+ __be16 flogi_rpl;
+ __be16 fdisc_req;
+ __be16 fdisc_rpl;
+ __be16 fka_prd_chg;
+ __be16 fc_map_chg;
+ __be16 vfid_chg;
+ u8 no_fka_req;
+ u8 no_vnp;
+ } fcf_stats;
+ struct fw_fcoe_pcb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 vnp_ka;
+ __be32 unsol_els_rcvd;
+ __be64 unsol_cmd_rcvd;
+ __be16 implicit_logo;
+ __be16 flogi_inv_sparm;
+ __be16 fdisc_inv_sparm;
+ __be16 flogi_rjt;
+ __be16 fdisc_rjt;
+ __be16 no_ssn;
+ __be16 mac_flt_fail;
+ __be16 inv_fr_rcvd;
+ } pcb_stats;
+ struct fw_fcoe_scb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 host_abrt_req;
+ __be32 adap_auto_abrt;
+ __be32 adap_abrt_rsp;
+ __be32 host_ios_req;
+ __be16 ssn_offl_ios;
+ __be16 ssn_not_rdy_ios;
+ u8 rx_data_ddp_err;
+ u8 ddp_flt_set_err;
+ __be16 rx_data_fr_err;
+ u8 bad_st_abrt_req;
+ u8 no_io_abrt_req;
+ u8 abort_tmo;
+ u8 abort_tmo_2;
+ __be32 abort_req;
+ u8 no_ppod_res_tmo;
+ u8 bp_tmo;
+ u8 adap_auto_cls;
+ u8 no_io_cls_req;
+ __be32 host_cls_req;
+ __be64 unsol_cmd_rcvd;
+ __be32 plogi_req_rcvd;
+ __be32 prli_req_rcvd;
+ __be16 logo_req_rcvd;
+ __be16 prlo_req_rcvd;
+ __be16 plogi_rjt_rcvd;
+ __be16 prli_rjt_rcvd;
+ __be32 adisc_req_rcvd;
+ __be32 rscn_rcvd;
+ __be32 rrq_req_rcvd;
+ __be32 unsol_els_rcvd;
+ u8 adisc_rjt_rcvd;
+ u8 scr_rjt;
+ u8 ct_rjt;
+ u8 inval_bls_rcvd;
+ __be32 ba_rjt_rcvd;
+ } scb_stats;
+ } u;
+};
+
+#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_FREE (1U << 30)
+#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7)
+#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_fcoe_fcf_cmd {
+ __be32 op_to_fcfi;
+ __be32 retval_len16;
+ __be16 priority_pkd;
+ u8 mac[6];
+ u8 name_id[8];
+ u8 fabric[8];
+ __be16 vf_id;
+ __be16 max_fcoe_size;
+ u8 vlan_id;
+ u8 fc_map[3];
+ __be32 fka_adv;
+ __be32 r6;
+ u8 r7_hi;
+ u8 fpma_to_portid;
+ u8 spma_mac[6];
+ __be64 r8;
+};
+
+#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1)
+#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1)
+#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1)
+#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+
+#endif /* _T4FW_API_STOR_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 2ebe03a4b51..4a909d7cfde 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2144,7 +2144,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
*/
port_id = fip->port_id;
if (fip->probe_tries)
- port_id = prandom32(&fip->rnd_state) & 0xffff;
+ port_id = prandom_u32_state(&fip->rnd_state) & 0xffff;
else if (!port_id)
port_id = fip->lp->wwpn & 0xffff;
if (!port_id || port_id == 0xffff)
@@ -2169,7 +2169,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
{
fip->probe_tries = 0;
- prandom32_seed(&fip->rnd_state, fip->lp->wwpn);
+ prandom_seed_state(&fip->rnd_state, fip->lp->wwpn);
fcoe_ctlr_vn_restart(fip);
}
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 192724ed7a3..138e573f37e 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
static char driver_name[] = "hptiop";
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
-static const char driver_ver[] = "v1.6 (091225)";
+static const char driver_ver[] = "v1.8";
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -77,6 +77,11 @@ static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
}
+static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
+{
+ return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
{
if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
@@ -230,6 +235,74 @@ static int iop_intr_mv(struct hptiop_hba *hba)
return ret;
}
+static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
+{
+ u32 req_type = _tag & 0xf;
+ struct hpt_iop_request_scsi_command *req;
+
+ switch (req_type) {
+ case IOP_REQUEST_TYPE_GET_CONFIG:
+ case IOP_REQUEST_TYPE_SET_CONFIG:
+ hba->msg_done = 1;
+ break;
+
+ case IOP_REQUEST_TYPE_SCSI_COMMAND:
+ req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
+ if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = IOP_RESULT_SUCCESS;
+ hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int iop_intr_mvfrey(struct hptiop_hba *hba)
+{
+ u32 _tag, status, cptr, cur_rptr;
+ int ret = 0;
+
+ if (hba->initialized)
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
+ if (status & CPU_TO_F0_DRBL_MSG_BIT) {
+ u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
+ dprintk("received outbound msg %x\n", msg);
+ hptiop_message_callback(hba, msg);
+ }
+ ret = 1;
+ }
+
+ status = readl(&(hba->u.mvfrey.mu->isr_cause));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->isr_cause));
+ do {
+ cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
+ cur_rptr = hba->u.mvfrey.outlist_rptr;
+ while (cur_rptr != cptr) {
+ cur_rptr++;
+ if (cur_rptr == hba->u.mvfrey.list_count)
+ cur_rptr = 0;
+
+ _tag = hba->u.mvfrey.outlist[cur_rptr].val;
+ BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
+ hptiop_request_callback_mvfrey(hba, _tag);
+ ret = 1;
+ }
+ hba->u.mvfrey.outlist_rptr = cur_rptr;
+ } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
+ }
+
+ if (hba->initialized)
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ return ret;
+}
+
static int iop_send_sync_request_itl(struct hptiop_hba *hba,
void __iomem *_req, u32 millisec)
{
@@ -272,6 +345,26 @@ static int iop_send_sync_request_mv(struct hptiop_hba *hba,
return -1;
}
+static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
+ u32 size_bits, u32 millisec)
+{
+ struct hpt_iop_request_header *reqhdr =
+ hba->u.mvfrey.internal_req.req_virt;
+ u32 i;
+
+ hba->msg_done = 0;
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+ hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
+
+ for (i = 0; i < millisec; i++) {
+ iop_intr_mvfrey(hba);
+ if (hba->msg_done)
+ break;
+ msleep(1);
+ }
+ return hba->msg_done ? 0 : -1;
+}
+
static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
{
writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
@@ -285,11 +378,18 @@ static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
readl(&hba->u.mv.regs->inbound_doorbell);
}
+static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+ readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+}
+
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
{
u32 i;
hba->msg_done = 0;
+ hba->ops->disable_intr(hba);
hba->ops->post_msg(hba, msg);
for (i = 0; i < millisec; i++) {
@@ -301,6 +401,7 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
msleep(1);
}
+ hba->ops->enable_intr(hba);
return hba->msg_done? 0 : -1;
}
@@ -354,6 +455,28 @@ static int iop_get_config_mv(struct hptiop_hba *hba,
return 0;
}
+static int iop_get_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config)
+{
+ struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
+
+ if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
+ info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
+ return -1;
+
+ config->interface_version = info->interface_version;
+ config->firmware_version = info->firmware_version;
+ config->max_requests = info->max_requests;
+ config->request_size = info->request_size;
+ config->max_sg_count = info->max_sg_count;
+ config->data_transfer_length = info->data_transfer_length;
+ config->alignment_mask = info->alignment_mask;
+ config->max_devices = info->max_devices;
+ config->sdram_size = info->sdram_size;
+
+ return 0;
+}
+
static int iop_set_config_itl(struct hptiop_hba *hba,
struct hpt_iop_request_set_config *config)
{
@@ -408,6 +531,29 @@ static int iop_set_config_mv(struct hptiop_hba *hba,
return 0;
}
+static int iop_set_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config)
+{
+ struct hpt_iop_request_set_config *req =
+ hba->u.mvfrey.internal_req.req_virt;
+
+ memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+ req->header.context_hi32 = 0;
+
+ if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
+ dprintk("Set config send cmd failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
{
writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
@@ -420,6 +566,13 @@ static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
&hba->u.mv.regs->outbound_intmask);
}
+static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
static int hptiop_initialize_iop(struct hptiop_hba *hba)
{
/* enable interrupts */
@@ -502,17 +655,39 @@ static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
return 0;
}
+static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.mvfrey.config == NULL)
+ return -1;
+
+ hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
+ if (hba->u.mvfrey.mu == NULL) {
+ iounmap(hba->u.mvfrey.config);
+ return -1;
+ }
+
+ return 0;
+}
+
static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
{
iounmap(hba->u.mv.regs);
iounmap(hba->u.mv.mu);
}
+static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ iounmap(hba->u.mvfrey.config);
+ iounmap(hba->u.mvfrey.mu);
+}
+
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
{
dprintk("iop message 0x%x\n", msg);
- if (msg == IOPMU_INBOUND_MSG0_NOP)
+ if (msg == IOPMU_INBOUND_MSG0_NOP ||
+ msg == IOPMU_INBOUND_MSG0_RESET_COMM)
hba->msg_done = 1;
if (!hba->initialized)
@@ -592,6 +767,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
memcpy(scp->sense_buffer, &req->sg_list,
min_t(size_t, SCSI_SENSE_BUFFERSIZE,
le32_to_cpu(req->dataxfer_length)));
+ goto skip_resid;
break;
default:
@@ -599,6 +775,10 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
break;
}
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
+
+skip_resid:
dprintk("scsi_done(%p)\n", scp);
scp->scsi_done(scp);
free_req(hba, &hba->reqs[tag]);
@@ -692,7 +872,8 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
- psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
+ psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
+ hba->ops->host_phy_flag;
psg[idx].size = cpu_to_le32(sg_dma_len(sg));
psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
cpu_to_le32(1) : 0;
@@ -751,6 +932,78 @@ static void hptiop_post_req_mv(struct hptiop_hba *hba,
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
}
+static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+ u32 index;
+
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
+ IOP_REQUEST_FLAG_ADDR_BITS |
+ ((_req->req_shifted_phy >> 11) & 0xffff0000));
+ reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+ (_req->index << 4) | reqhdr->type);
+ reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
+ 0xffffffff);
+
+ hba->u.mvfrey.inlist_wptr++;
+ index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+ if (index == hba->u.mvfrey.list_count) {
+ index = 0;
+ hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+ hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+ }
+
+ hba->u.mvfrey.inlist[index].addr =
+ (dma_addr_t)_req->req_shifted_phy << 5;
+ hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
+ writel(hba->u.mvfrey.inlist_wptr,
+ &(hba->u.mvfrey.mu->inbound_write_ptr));
+ readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
+}
+
+static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = hba->u.mvfrey.list_count;
+
+ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
+ return -1;
+
+ /* wait 100ms for MCU ready */
+ msleep(100);
+
+ writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->inbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->inbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_shadow_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_shadow_base_high));
+
+ hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ hba->u.mvfrey.outlist_rptr = list_count - 1;
+ return 0;
+}
+
static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
{
@@ -771,14 +1024,15 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
_req->scp = scp;
- dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
+ dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%08x-%08x-%08x-%08x) "
"req_index=%d, req=%p\n",
scp,
host->host_no, scp->device->channel,
scp->device->id, scp->device->lun,
- ((u32 *)scp->cmnd)[0],
- ((u32 *)scp->cmnd)[1],
- ((u32 *)scp->cmnd)[2],
+ cpu_to_be32(((u32 *)scp->cmnd)[0]),
+ cpu_to_be32(((u32 *)scp->cmnd)[1]),
+ cpu_to_be32(((u32 *)scp->cmnd)[2]),
+ cpu_to_be32(((u32 *)scp->cmnd)[3]),
_req->index, _req->req_virt);
scp->result = 0;
@@ -933,6 +1187,11 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = hptiop_adjust_disk_queue_depth,
};
+static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
{
hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
@@ -943,6 +1202,63 @@ static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
return -1;
}
+static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
+ char *p;
+ dma_addr_t phy;
+
+ BUG_ON(hba->max_request_size == 0);
+
+ if (list_count == 0) {
+ BUG_ON(1);
+ return -1;
+ }
+
+ list_count >>= 16;
+
+ hba->u.mvfrey.list_count = list_count;
+ hba->u.mvfrey.internal_mem_size = 0x800 +
+ list_count * sizeof(struct mvfrey_inlist_entry) +
+ list_count * sizeof(struct mvfrey_outlist_entry) +
+ sizeof(int);
+
+ p = dma_alloc_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ hba->u.mvfrey.internal_req.req_virt = p;
+ hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
+ hba->u.mvfrey.internal_req.scp = NULL;
+ hba->u.mvfrey.internal_req.next = NULL;
+
+ p += 0x800;
+ phy += 0x800;
+
+ hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
+ hba->u.mvfrey.inlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_inlist_entry);
+ phy += list_count * sizeof(struct mvfrey_inlist_entry);
+
+ hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
+ hba->u.mvfrey.outlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_outlist_entry);
+ phy += list_count * sizeof(struct mvfrey_outlist_entry);
+
+ hba->u.mvfrey.outlist_cptr = (__le32 *)p;
+ hba->u.mvfrey.outlist_cptr_phy = phy;
+
+ return 0;
+}
+
+static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
{
if (hba->u.mv.internal_req) {
@@ -953,6 +1269,19 @@ static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
return -1;
}
+static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
+{
+ if (hba->u.mvfrey.internal_req.req_virt) {
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size,
+ hba->u.mvfrey.internal_req.req_virt,
+ (dma_addr_t)
+ hba->u.mvfrey.internal_req.req_shifted_phy << 5);
+ return 0;
+ } else
+ return -1;
+}
+
static int __devinit hptiop_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
@@ -1027,7 +1356,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto unmap_pci_bar;
}
- if (hba->ops->internal_memalloc) {
+ if (hba->ops->family == MV_BASED_IOP) {
if (hba->ops->internal_memalloc(hba)) {
printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
hba->host->host_no);
@@ -1050,6 +1379,19 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba->interface_version = le32_to_cpu(iop_config.interface_version);
hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
+ if (hba->ops->family == MVFREY_BASED_IOP) {
+ if (hba->ops->internal_memalloc(hba)) {
+ printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ if (hba->ops->reset_comm(hba)) {
+ printk(KERN_ERR "scsi%d: reset comm failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ }
+
if (hba->firmware_version > 0x01020000 ||
hba->interface_version > 0x01020000)
hba->iopintf_v2 = 1;
@@ -1104,14 +1446,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba->dma_coherent = start_virt;
hba->dma_coherent_handle = start_phy;
- if ((start_phy & 0x1f) != 0)
- {
+ if ((start_phy & 0x1f) != 0) {
offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
start_phy += offset;
start_virt += offset;
}
- hba->req_list = start_virt;
+ hba->req_list = NULL;
for (i = 0; i < hba->max_requests; i++) {
hba->reqs[i].next = NULL;
hba->reqs[i].req_virt = start_virt;
@@ -1132,7 +1473,6 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto free_request_mem;
}
-
scsi_scan_host(host);
dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
@@ -1147,8 +1487,7 @@ free_request_irq:
free_irq(hba->pcidev->irq, hba);
unmap_pci_bar:
- if (hba->ops->internal_memfree)
- hba->ops->internal_memfree(hba);
+ hba->ops->internal_memfree(hba);
hba->ops->unmap_pci_bar(hba);
@@ -1198,6 +1537,16 @@ static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
readl(&hba->u.mv.regs->outbound_intmask);
}
+static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0, &(hba->u.mvfrey.mu->isr_enable));
+ readl(&(hba->u.mvfrey.mu->isr_enable));
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+ readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
static void hptiop_remove(struct pci_dev *pcidev)
{
struct Scsi_Host *host = pci_get_drvdata(pcidev);
@@ -1216,8 +1565,7 @@ static void hptiop_remove(struct pci_dev *pcidev)
hba->dma_coherent,
hba->dma_coherent_handle);
- if (hba->ops->internal_memfree)
- hba->ops->internal_memfree(hba);
+ hba->ops->internal_memfree(hba);
hba->ops->unmap_pci_bar(hba);
@@ -1229,9 +1577,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
}
static struct hptiop_adapter_ops hptiop_itl_ops = {
+ .family = INTEL_BASED_IOP,
.iop_wait_ready = iop_wait_ready_itl,
- .internal_memalloc = NULL,
- .internal_memfree = NULL,
+ .internal_memalloc = hptiop_internal_memalloc_itl,
+ .internal_memfree = hptiop_internal_memfree_itl,
.map_pci_bar = hptiop_map_pci_bar_itl,
.unmap_pci_bar = hptiop_unmap_pci_bar_itl,
.enable_intr = hptiop_enable_intr_itl,
@@ -1242,9 +1591,12 @@ static struct hptiop_adapter_ops hptiop_itl_ops = {
.post_msg = hptiop_post_msg_itl,
.post_req = hptiop_post_req_itl,
.hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_itl,
+ .host_phy_flag = cpu_to_le64(0),
};
static struct hptiop_adapter_ops hptiop_mv_ops = {
+ .family = MV_BASED_IOP,
.iop_wait_ready = iop_wait_ready_mv,
.internal_memalloc = hptiop_internal_memalloc_mv,
.internal_memfree = hptiop_internal_memfree_mv,
@@ -1258,6 +1610,27 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
.post_msg = hptiop_post_msg_mv,
.post_req = hptiop_post_req_mv,
.hw_dma_bit_mask = 33,
+ .reset_comm = hptiop_reset_comm_mv,
+ .host_phy_flag = cpu_to_le64(0),
+};
+
+static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
+ .family = MVFREY_BASED_IOP,
+ .iop_wait_ready = iop_wait_ready_mvfrey,
+ .internal_memalloc = hptiop_internal_memalloc_mvfrey,
+ .internal_memfree = hptiop_internal_memfree_mvfrey,
+ .map_pci_bar = hptiop_map_pci_bar_mvfrey,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey,
+ .enable_intr = hptiop_enable_intr_mvfrey,
+ .disable_intr = hptiop_disable_intr_mvfrey,
+ .get_config = iop_get_config_mvfrey,
+ .set_config = iop_set_config_mvfrey,
+ .iop_intr = iop_intr_mvfrey,
+ .post_msg = hptiop_post_msg_mvfrey,
+ .post_req = hptiop_post_req_mvfrey,
+ .hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_mvfrey,
+ .host_phy_flag = cpu_to_le64(1),
};
static struct pci_device_id hptiop_id_table[] = {
@@ -1283,6 +1656,8 @@ static struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
{},
};
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index baa648d87fd..020619d60b0 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -75,6 +75,45 @@ struct hpt_iopmv_regs {
__le32 outbound_intmask;
};
+#pragma pack(1)
+struct hpt_iopmu_mvfrey {
+ __le32 reserved0[(0x4000 - 0) / 4];
+ __le32 inbound_base;
+ __le32 inbound_base_high;
+ __le32 reserved1[(0x4018 - 0x4008) / 4];
+ __le32 inbound_write_ptr;
+ __le32 reserved2[(0x402c - 0x401c) / 4];
+ __le32 inbound_conf_ctl;
+ __le32 reserved3[(0x4050 - 0x4030) / 4];
+ __le32 outbound_base;
+ __le32 outbound_base_high;
+ __le32 outbound_shadow_base;
+ __le32 outbound_shadow_base_high;
+ __le32 reserved4[(0x4088 - 0x4060) / 4];
+ __le32 isr_cause;
+ __le32 isr_enable;
+ __le32 reserved5[(0x1020c - 0x4090) / 4];
+ __le32 pcie_f0_int_enable;
+ __le32 reserved6[(0x10400 - 0x10210) / 4];
+ __le32 f0_to_cpu_msg_a;
+ __le32 reserved7[(0x10420 - 0x10404) / 4];
+ __le32 cpu_to_f0_msg_a;
+ __le32 reserved8[(0x10480 - 0x10424) / 4];
+ __le32 f0_doorbell;
+ __le32 f0_doorbell_enable;
+};
+
+struct mvfrey_inlist_entry {
+ dma_addr_t addr;
+ __le32 intrfc_len;
+ __le32 reserved;
+};
+
+struct mvfrey_outlist_entry {
+ __le32 val;
+};
+#pragma pack()
+
#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
@@ -87,6 +126,9 @@ struct hpt_iopmv_regs {
#define MVIOP_MU_OUTBOUND_INT_MSG 1
#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
+#define CL_POINTER_TOGGLE 0x00004000
+#define CPU_TO_F0_DRBL_MSG_BIT 0x02000000
+
enum hpt_iopmu_message {
/* host-to-iop messages */
IOPMU_INBOUND_MSG0_NOP = 0,
@@ -95,6 +137,7 @@ enum hpt_iopmu_message {
IOPMU_INBOUND_MSG0_SHUTDOWN,
IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
+ IOPMU_INBOUND_MSG0_RESET_COMM,
IOPMU_INBOUND_MSG0_MAX = 0xff,
/* iop-to-host messages */
IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
@@ -118,6 +161,7 @@ struct hpt_iop_request_header {
#define IOP_REQUEST_FLAG_BIST_REQUEST 2
#define IOP_REQUEST_FLAG_REMAPPED 4
#define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
+#define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
enum hpt_iop_request_type {
IOP_REQUEST_TYPE_GET_CONFIG = 0,
@@ -223,6 +267,13 @@ struct hpt_scsi_pointer {
#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
+enum hptiop_family {
+ UNKNOWN_BASED_IOP,
+ INTEL_BASED_IOP,
+ MV_BASED_IOP,
+ MVFREY_BASED_IOP
+} ;
+
struct hptiop_hba {
struct hptiop_adapter_ops *ops;
union {
@@ -236,6 +287,22 @@ struct hptiop_hba {
void *internal_req;
dma_addr_t internal_req_phy;
} mv;
+ struct {
+ struct hpt_iop_request_get_config __iomem *config;
+ struct hpt_iopmu_mvfrey __iomem *mu;
+
+ int internal_mem_size;
+ struct hptiop_request internal_req;
+ int list_count;
+ struct mvfrey_inlist_entry *inlist;
+ dma_addr_t inlist_phy;
+ __le32 inlist_wptr;
+ struct mvfrey_outlist_entry *outlist;
+ dma_addr_t outlist_phy;
+ __le32 *outlist_cptr; /* copy pointer shadow */
+ dma_addr_t outlist_cptr_phy;
+ __le32 outlist_rptr;
+ } mvfrey;
} u;
struct Scsi_Host *host;
@@ -283,6 +350,7 @@ struct hpt_ioctl_k {
};
struct hptiop_adapter_ops {
+ enum hptiop_family family;
int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
int (*internal_memalloc)(struct hptiop_hba *hba);
int (*internal_memfree)(struct hptiop_hba *hba);
@@ -298,6 +366,8 @@ struct hptiop_adapter_ops {
void (*post_msg)(struct hptiop_hba *hba, u32 msg);
void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
int hw_dma_bit_mask;
+ int (*reset_comm)(struct hptiop_hba *hba);
+ __le64 host_phy_flag;
};
#define HPT_IOCTL_RESULT_OK 0
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index efc6e72f09f..aec2e0da501 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1800,7 +1800,7 @@ out:
* @dev:domain device to be detect.
* @src_dev: the device which originated BROADCAST(CHANGE).
*
- * Add self-configuration expander suport. Suppose two expander cascading,
+ * Add self-configuration expander support. Suppose two expander cascading,
* when the first level expander is self-configuring, hotplug the disks in
* second level expander, BROADCAST(CHANGE) will not only be originated
* in the second level expander, but also be originated in the first level
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 69b59935b53..df4c13a5534 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -689,6 +689,7 @@ struct lpfc_hba {
#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
uint32_t cfg_fcf_failover_policy;
uint32_t cfg_fcp_io_sched;
+ uint32_t cfg_fcp2_no_tgt_reset;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
@@ -714,6 +715,7 @@ struct lpfc_hba {
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
+ uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ad16e54ac38..a364cae9e98 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3618,6 +3618,77 @@ static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
/**
+ * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_request_firmware_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val != 1)
+ return -EINVAL;
+
+ rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
+ if (rc)
+ rc = -EPERM;
+ else
+ rc = strlen(buf);
+ return rc;
+}
+
+static int lpfc_req_fw_upgrade;
+module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
+lpfc_param_show(request_firmware_upgrade)
+
+/**
+ * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
+ * @phba: lpfc_hba pointer.
+ * @val: 0 or 1.
+ *
+ * Description:
+ * Set the initial Linux generic firmware upgrade enable or disable flag.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= 1) {
+ phba->cfg_request_firmware_upgrade = val;
+ return 0;
+ }
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
+ lpfc_request_firmware_upgrade_show,
+ lpfc_request_firmware_upgrade_store);
+
+/**
* lpfc_fcp_imax_store
*
* @dev: class device that is converted into a Scsi_host.
@@ -3788,6 +3859,16 @@ LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
+# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
+# range is [0,1]. Default value is 0.
+# For [0], bus reset issues target reset to ALL devices
+# For [1], bus reset issues target reset to non-FCP2 devices
+*/
+LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
+ "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
+
+
+/*
# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
@@ -4029,6 +4110,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
&dev_attr_lpfc_fcp_io_sched,
+ &dev_attr_lpfc_fcp2_no_tgt_reset,
&dev_attr_lpfc_cr_delay,
&dev_attr_lpfc_cr_count,
&dev_attr_lpfc_multi_ring_support,
@@ -4069,6 +4151,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
&dev_attr_lpfc_sriov_nr_virtfn,
+ &dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -5019,6 +5102,7 @@ void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
+ lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@@ -5051,6 +5135,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
+ lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4380a44000b..69d66e3662c 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -468,3 +468,4 @@ void lpfc_sli4_node_prep(struct lpfc_hba *);
int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
+int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7ffabb7e3af..65f9fb6862e 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -634,7 +634,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
+ (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f19e9b6f9f1..b9440deaad4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1182,8 +1182,6 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cmn.w2.r_a_tov = 0;
sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
- sp->cls2.seqDelivery = 1;
- sp->cls3.seqDelivery = 1;
if (sp->cmn.fcphLow < FC_PH3)
sp->cmn.fcphLow = FC_PH3;
if (sp->cmn.fcphHigh < FC_PH3)
@@ -1198,7 +1196,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Set the fcfi to the fcfi we registered with */
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
}
+ /* Can't do SLI4 class2 without support sequence coalescing */
+ sp->cls2.classValid = 0;
+ sp->cls2.seqDelivery = 0;
} else {
+ /* Historical, setting sequential-delivery bit for SLI3 */
+ sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
+ sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
sp->cmn.request_multiple_Nport = 1;
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2cdeb5434fb..a47cfbdd05f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3219,6 +3219,9 @@ struct wqe_common {
#define wqe_dif_SHIFT 0
#define wqe_dif_MASK 0x00000003
#define wqe_dif_WORD word7
+#define LPFC_WQE_DIF_PASSTHRU 1
+#define LPFC_WQE_DIF_STRIP 2
+#define LPFC_WQE_DIF_INSERT 3
#define wqe_ct_SHIFT 2
#define wqe_ct_MASK 0x00000003
#define wqe_ct_WORD word7
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dc4218d9c4..c20eec78adc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3854,7 +3854,7 @@ static void
lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
{
char port_name;
- char message[80];
+ char message[128];
uint8_t status;
struct lpfc_acqe_misconfigured_event *misconfigured;
@@ -9450,7 +9450,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
struct lpfc_dmabuf *dmabuf, *next;
uint32_t offset = 0, temp_offset = 0;
- /* It can be null, sanity check */
+ /* It can be null in no-wait mode, sanity check */
if (!fw) {
rc = -ENXIO;
goto out;
@@ -9528,11 +9528,48 @@ release_out:
release_firmware(fw);
out:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.", rc);
+ "3024 Firmware update done: %d.\n", rc);
return;
}
/**
+ * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to perform Linux generic firmware upgrade on device
+ * that supports such feature.
+ **/
+int
+lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+{
+ uint8_t file_name[ELX_MODEL_NAME_SIZE];
+ int ret;
+ const struct firmware *fw;
+
+ /* Only supported on SLI4 interface type 2 for now */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -EPERM;
+
+ snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
+
+ if (fw_upgrade == INT_FW_UPGRADE) {
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ file_name, &phba->pcidev->dev,
+ GFP_KERNEL, (void *)phba,
+ lpfc_write_firmware);
+ } else if (fw_upgrade == RUN_FW_UPGRADE) {
+ ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!ret)
+ lpfc_write_firmware(fw, (void *)phba);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -9560,7 +9597,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
uint32_t cfg_mode, intr_mode;
int mcnt;
int adjusted_fcp_io_channel;
- uint8_t file_name[ELX_MODEL_NAME_SIZE];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -9703,16 +9739,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
- /* check for firmware upgrade or downgrade (if_type 2 only) */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_2) {
- snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp",
- phba->ModelName);
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- file_name, &phba->pcidev->dev,
- GFP_KERNEL, (void *)phba,
- lpfc_write_firmware);
- }
+ /* check for firmware upgrade or downgrade */
+ if (phba->cfg_request_firmware_upgrade)
+ ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7f45ac9964a..60e5a177644 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3227,6 +3227,21 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
}
}
+ switch (scsi_get_prot_op(scsi_cmnd)) {
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_READ_STRIP:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_INSERT:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_READ_PASS:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ break;
+ }
+
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
@@ -3236,7 +3251,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
return 0;
err:
@@ -4914,6 +4928,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
+ if (vport->phba->cfg_fcp2_no_tgt_reset &&
+ (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
+ continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
ndlp->nlp_sid == i &&
ndlp->rport) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d7f3313ef88..624eab37039 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8068,10 +8068,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- if (iocbq->iocb_flag & LPFC_IO_DIF) {
- iocbq->iocb_flag &= ~LPFC_IO_DIF;
- bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
- }
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
break;
case CMD_FCP_IREAD64_CR:
@@ -8091,10 +8087,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- if (iocbq->iocb_flag & LPFC_IO_DIF) {
- iocbq->iocb_flag &= ~LPFC_IO_DIF;
- bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
- }
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break;
case CMD_FCP_ICMND64_CR:
@@ -8304,6 +8296,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
break;
}
+ if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+ iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
+ LPFC_IO_DIF_INSERT);
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 2f48d000a3b..9d2e0c6fe33 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -69,7 +69,9 @@ struct lpfc_iocbq {
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define DSS_SECURITY_OP 0x100 /* security IO */
#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */
-#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
+#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
+#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
+#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index f44a06a4c6e..44c427a45d6 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -82,6 +82,9 @@
#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+#define INT_FW_UPGRADE 0
+#define RUN_FW_UPGRADE 1
+
enum lpfc_sli4_queue_type {
LPFC_EQ,
LPFC_GCQ,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0c2149189dd..ba596e854bb 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.35"
+#define LPFC_DRIVER_VERSION "8.3.36"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
new file mode 100644
index 00000000000..81471bf415d
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -0,0 +1,67 @@
+#
+# Kernel configuration file for the MPT3SAS
+#
+# This code is based on drivers/scsi/mpt3sas/Kconfig
+# Copyright (C) 2012 LSI Corporation
+# (mailto:DL-MPTFusionLinux@lsi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+config SCSI_MPT3SAS
+ tristate "LSI MPT Fusion SAS 3.0 Device Driver"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ select RAID_ATTRS
+ ---help---
+ This driver supports PCI-Express SAS 12Gb/s Host Adapters.
+
+config SCSI_MPT3SAS_MAX_SGE
+ int "LSI MPT Fusion Max number of SG Entries (16 - 256)"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ default "128"
+ range 16 256
+ ---help---
+ This option allows you to specify the maximum number of scatter-
+ gather entries per I/O. The driver default is 128, which matches
+ MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
+ can be 256. However, it may decreased down to 16. Decreasing this
+ parameter will reduce memory requirements on a per controller instance.
+
+config SCSI_MPT3SAS_LOGGING
+ bool "LSI MPT Fusion logging facility"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ ---help---
+ This turns on a logging facility.
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
new file mode 100644
index 00000000000..4c1d2e7a117
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -0,0 +1,8 @@
+# mpt3sas makefile
+obj-m += mpt3sas.o
+mpt3sas-y += mpt3sas_base.o \
+ mpt3sas_config.o \
+ mpt3sas_scsih.o \
+ mpt3sas_transport.o \
+ mpt3sas_ctl.o \
+ mpt3sas_trigger_diag.o
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
new file mode 100644
index 00000000000..03317ffea62
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -0,0 +1,1164 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2.h
+ * Title: MPI Message independent structures and definitions
+ * including System Interface Register Set and
+ * scatter/gather formats.
+ * Creation Date: June 21, 2006
+ *
+ * mpi2.h Version: 02.00.26
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved ReplyPostHostIndex register to offset 0x6C of the
+ * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
+ * Added union of request descriptors.
+ * Added union of reply descriptors.
+ * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_VERSION_02_00.
+ * Fixed the size of the FunctionDependent5 field in the
+ * MPI2_DEFAULT_REPLY structure.
+ * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Removed the MPI-defined Fault Codes and extended the
+ * product specific codes up to 0xEFFF.
+ * Added a sixth key value for the WriteSequence register
+ * and changed the flush value to 0x0.
+ * Added message function codes for Diagnostic Buffer Post
+ * and Diagnsotic Release.
+ * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
+ * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
+ * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added #defines for marking a reply descriptor as unused.
+ * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved LUN field defines from mpi2_init.h.
+ * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
+ * In all request and reply descriptors, replaced VF_ID
+ * field with MSIxIndex field.
+ * Removed DevHandle field from
+ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
+ * bytes reserved.
+ * Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
+ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_H
+#define MPI2_H
+
+/*****************************************************************************
+*
+* MPI Version Definitions
+*
+*****************************************************************************/
+
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+
+/*major version for all MPI v2.x */
+#define MPI2_VERSION_MAJOR (0x02)
+
+/*minor version for MPI v2.0 compatible products */
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_VERSION_02_00 (0x0200)
+
+/*minor version for MPI v2.5 compatible products */
+#define MPI25_VERSION_MINOR (0x05)
+#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI25_VERSION_MINOR)
+#define MPI2_VERSION_02_05 (0x0205)
+
+/*Unit and Dev versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x1A)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+ MPI2_HEADER_VERSION_DEV)
+
+/*****************************************************************************
+*
+* IOC State Definitions
+*
+*****************************************************************************/
+
+#define MPI2_IOC_STATE_RESET (0x00000000)
+#define MPI2_IOC_STATE_READY (0x10000000)
+#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
+#define MPI2_IOC_STATE_FAULT (0x40000000)
+
+#define MPI2_IOC_STATE_MASK (0xF0000000)
+#define MPI2_IOC_STATE_SHIFT (28)
+
+/*Fault state range for prodcut specific codes */
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000)
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF)
+
+/*****************************************************************************
+*
+* System Interface Register Definitions
+*
+*****************************************************************************/
+
+typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
+ U32 Doorbell; /*0x00 */
+ U32 WriteSequence; /*0x04 */
+ U32 HostDiagnostic; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DiagRWData; /*0x10 */
+ U32 DiagRWAddressLow; /*0x14 */
+ U32 DiagRWAddressHigh; /*0x18 */
+ U32 Reserved2[5]; /*0x1C */
+ U32 HostInterruptStatus; /*0x30 */
+ U32 HostInterruptMask; /*0x34 */
+ U32 DCRData; /*0x38 */
+ U32 DCRAddress; /*0x3C */
+ U32 Reserved3[2]; /*0x40 */
+ U32 ReplyFreeHostIndex; /*0x48 */
+ U32 Reserved4[8]; /*0x4C */
+ U32 ReplyPostHostIndex; /*0x6C */
+ U32 Reserved5; /*0x70 */
+ U32 HCBSize; /*0x74 */
+ U32 HCBAddressLow; /*0x78 */
+ U32 HCBAddressHigh; /*0x7C */
+ U32 Reserved6[16]; /*0x80 */
+ U32 RequestDescriptorPostLow; /*0xC0 */
+ U32 RequestDescriptorPostHigh; /*0xC4 */
+ U32 Reserved7[14]; /*0xC8 */
+} MPI2_SYSTEM_INTERFACE_REGS,
+ *PTR_MPI2_SYSTEM_INTERFACE_REGS,
+ Mpi2SystemInterfaceRegs_t,
+ *pMpi2SystemInterfaceRegs_t;
+
+/*
+ *Defines for working with the Doorbell register.
+ */
+#define MPI2_DOORBELL_OFFSET (0x00000000)
+
+/*IOC --> System values */
+#define MPI2_DOORBELL_USED (0x08000000)
+#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000)
+#define MPI2_DOORBELL_WHO_INIT_SHIFT (24)
+#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF)
+#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF)
+
+/*System --> IOC values */
+#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000)
+#define MPI2_DOORBELL_FUNCTION_SHIFT (24)
+#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
+#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16)
+
+/*
+ *Defines for the WriteSequence register
+ */
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+/*
+ *Defines for the HostDiagnostic register
+ */
+#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+
+#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
+#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
+#define MPI2_DIAG_HCB_MODE (0x00000100)
+#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080)
+#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040)
+#define MPI2_DIAG_RESET_HISTORY (0x00000020)
+#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010)
+#define MPI2_DIAG_RESET_ADAPTER (0x00000004)
+#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002)
+
+/*
+ *Offsets for DiagRWData and address
+ */
+#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010)
+#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014)
+#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018)
+
+/*
+ *Defines for the HostInterruptStatus register
+ */
+#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
+#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000)
+#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS
+#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000)
+#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008)
+#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001)
+#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS
+
+/*
+ *Defines for the HostInterruptMask register
+ */
+#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
+#define MPI2_HIM_RESET_IRQ_MASK (0x40000000)
+#define MPI2_HIM_REPLY_INT_MASK (0x00000008)
+#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK
+#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001)
+#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK
+
+/*
+ *Offsets for DCRData and address
+ */
+#define MPI2_DCR_DATA_OFFSET (0x00000038)
+#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C)
+
+/*
+ *Offset for the Reply Free Queue
+ */
+#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
+
+/*
+ *Defines for the Reply Descriptor Post Queue
+ */
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
+
+/*
+ *Defines for the HCBSize and address
+ */
+#define MPI2_HCB_SIZE_OFFSET (0x00000074)
+#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000)
+#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001)
+
+#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078)
+#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
+
+/*
+ *Offsets for the Request Queue
+ */
+#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
+#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+
+/*Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
+
+/*****************************************************************************
+*
+* Message Descriptors
+*
+*****************************************************************************/
+
+/*Request Descriptors */
+
+/*Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DescriptorTypeDependent; /*0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t,
+ *pMpi2DefaultRequestDescriptor_t;
+
+/*defines for the RequestFlags field */
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
+#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
+#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+
+/*High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ *pMpi2HighPriorityRequestDescriptor_t;
+
+/*SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DevHandle; /*0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t,
+ *pMpi2SCSIIORequestDescriptor_t;
+
+/*SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ *pMpi2SCSITargetRequestDescriptor_t;
+
+/*RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved; /*0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ *pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+/*Fast Path SCSI IO Request Descriptor */
+typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi25FastPathSCSIIORequestDescriptor_t,
+ *pMpi25FastPathSCSIIORequestDescriptor_t;
+
+/*union of Request Descriptors */
+typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ U64 Words;
+} MPI2_REQUEST_DESCRIPTOR_UNION,
+ *PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
+ Mpi2RequestDescriptorUnion_t,
+ *pMpi2RequestDescriptorUnion_t;
+
+/*Reply Descriptors */
+
+/*Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 DescriptorTypeDependent1; /*0x02 */
+ U32 DescriptorTypeDependent2; /*0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t,
+ *pMpi2DefaultReplyDescriptor_t;
+
+/*defines for the ReplyFlags field */
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
+#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
+#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+
+/*values for marking a reply descriptor as unused */
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
+
+/*Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 ReplyFrameAddress; /*0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t,
+ *pMpi2AddressReplyDescriptor_t;
+
+#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00)
+
+/*SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 TaskTag; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ *pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+/*TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U8 SequenceNumber; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ *pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+/*Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U8 VP_ID; /*0x02 */
+ U8 Flags; /*0x03 */
+ U16 InitiatorDevHandle; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ *pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/*defines for Flags field */
+#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
+
+/*RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 Reserved; /*0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+/*Fast Path SCSI IO Success Reply Descriptor */
+typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
+ *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+
+/*union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ U64 Words;
+} MPI2_REPLY_DESCRIPTORS_UNION,
+ *PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+ Mpi2ReplyDescriptorsUnion_t,
+ *pMpi2ReplyDescriptorsUnion_t;
+
+/*****************************************************************************
+*
+* Message Functions
+*
+*****************************************************************************/
+
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI2_FUNCTION_IOC_INIT (0x02)
+#define MPI2_FUNCTION_IOC_FACTS (0x03)
+#define MPI2_FUNCTION_CONFIG (0x04)
+#define MPI2_FUNCTION_PORT_FACTS (0x05)
+#define MPI2_FUNCTION_PORT_ENABLE (0x06)
+#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07)
+#define MPI2_FUNCTION_EVENT_ACK (0x08)
+#define MPI2_FUNCTION_FW_DOWNLOAD (0x09)
+#define MPI2_FUNCTION_TARGET_ASSIST (0x0B)
+#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C)
+#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D)
+#define MPI2_FUNCTION_FW_UPLOAD (0x12)
+#define MPI2_FUNCTION_RAID_ACTION (0x15)
+#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16)
+#define MPI2_FUNCTION_TOOLBOX (0x17)
+#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
+#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A)
+#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
+#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C)
+#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D)
+#define MPI2_FUNCTION_DIAG_RELEASE (0x1E)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
+#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C)
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*Doorbell functions */
+#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI2_FUNCTION_HANDSHAKE (0x42)
+
+/*****************************************************************************
+*
+* IOC Status Values
+*
+*****************************************************************************/
+
+/*mask for IOCStatus status value */
+#define MPI2_IOCSTATUS_MASK (0x7FFF)
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SUCCESS (0x0000)
+#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI2_IOCSTATUS_BUSY (0x0002)
+#define MPI2_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI2_IOCSTATUS_INVALID_VPID (0x0005)
+#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
+#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
+#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
+#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
+#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
+#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
+#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
+#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
+
+/****************************************************************************
+* RAID Accelerator values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0)
+
+/****************************************************************************
+* IOCStatus flag to indicate that log info is available
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
+
+/****************************************************************************
+* IOCLogInfo Types
+****************************************************************************/
+
+#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000)
+#define MPI2_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI2_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1)
+#define MPI2_IOCLOGINFO_TYPE_FC (0x2)
+#define MPI2_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4)
+#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+
+/*****************************************************************************
+*
+* Standard Message Structures
+*
+*****************************************************************************/
+
+/****************************************************************************
+*Request Message Header for all request messages
+****************************************************************************/
+
+typedef struct _MPI2_REQUEST_HEADER {
+ U16 FunctionDependent1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER,
+ MPI2RequestHeader_t, *pMPI2RequestHeader_t;
+
+/****************************************************************************
+* Default Reply
+****************************************************************************/
+
+typedef struct _MPI2_DEFAULT_REPLY {
+ U16 FunctionDependent1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 FunctionDependent5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY,
+ MPI2DefaultReply_t, *pMPI2DefaultReply_t;
+
+/*common version structure/union used in messages and configuration pages */
+
+typedef struct _MPI2_VERSION_STRUCT {
+ U8 Dev; /*0x00 */
+ U8 Unit; /*0x01 */
+ U8 Minor; /*0x02 */
+ U8 Major; /*0x03 */
+} MPI2_VERSION_STRUCT;
+
+typedef union _MPI2_VERSION_UNION {
+ MPI2_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI2_VERSION_UNION;
+
+/*LUN field defines, common to many structures */
+#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* MPI Simple Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_SIMPLE32 {
+ U32 FlagsLength;
+ U32 Address;
+} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32,
+ Mpi2SGESimple32_t, *pMpi2SGESimple32_t;
+
+typedef struct _MPI2_SGE_SIMPLE64 {
+ U32 FlagsLength;
+ U64 Address;
+} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64,
+ Mpi2SGESimple64_t, *pMpi2SGESimple64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION {
+ U32 FlagsLength;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION,
+ *PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t,
+ *pMpi2SGESimpleUnion_t;
+
+/****************************************************************************
+* MPI Chain Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_CHAIN32 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U32 Address;
+} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32,
+ Mpi2SGEChain32_t, *pMpi2SGEChain32_t;
+
+typedef struct _MPI2_SGE_CHAIN64 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U64 Address;
+} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64,
+ Mpi2SGEChain64_t, *pMpi2SGEChain64_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION,
+ *PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t,
+ *pMpi2SGEChainUnion_t;
+
+/****************************************************************************
+* MPI Transaction Context Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANSACTION32 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[1];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION32,
+ *PTR_MPI2_SGE_TRANSACTION32,
+ Mpi2SGETransaction32_t,
+ *pMpi2SGETransaction32_t;
+
+typedef struct _MPI2_SGE_TRANSACTION64 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[2];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION64,
+ *PTR_MPI2_SGE_TRANSACTION64,
+ Mpi2SGETransaction64_t,
+ *pMpi2SGETransaction64_t;
+
+typedef struct _MPI2_SGE_TRANSACTION96 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[3];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96,
+ Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t;
+
+typedef struct _MPI2_SGE_TRANSACTION128 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[4];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128,
+ Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128;
+
+typedef struct _MPI2_SGE_TRANSACTION_UNION {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ union {
+ U32 TransactionContext32[1];
+ U32 TransactionContext64[2];
+ U32 TransactionContext96[3];
+ U32 TransactionContext128[4];
+ } u;
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION_UNION,
+ *PTR_MPI2_SGE_TRANSACTION_UNION,
+ Mpi2SGETransactionUnion_t,
+ *pMpi2SGETransactionUnion_t;
+
+/****************************************************************************
+* MPI SGE union for IO SGL's - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_IO_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION,
+ Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t;
+
+/****************************************************************************
+* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_SGE_TRANS_SIMPLE_UNION,
+ *PTR_MPI2_SGE_TRANS_SIMPLE_UNION,
+ Mpi2SGETransSimpleUnion_t,
+ *pMpi2SGETransSimpleUnion_t;
+
+/****************************************************************************
+* All MPI SGE types union
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION,
+ Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t;
+
+/****************************************************************************
+* MPI SGE field definition and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80)
+#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40)
+#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
+#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08)
+#define MPI2_SGE_FLAGS_DIRECTION (0x04)
+#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02)
+#define MPI2_SGE_FLAGS_END_OF_LIST (0x01)
+
+#define MPI2_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF)
+#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
+
+/*Element Type */
+
+#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
+#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
+#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30)
+#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30)
+
+/*Address location */
+
+#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
+
+/*Direction */
+
+#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
+#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
+
+#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
+/*Address Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/*Context Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02)
+#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04)
+#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06)
+
+#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
+#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16)
+
+/****************************************************************************
+* MPI SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \
+ MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK)
+#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK)
+
+#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_SGE_SET_FLAGS(f))
+#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \
+ MPI2_SGE_CHAIN_OFFSET_SHIFT)
+
+/*****************************************************************************
+*
+* Fusion-MPT IEEE Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IEEE Simple Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_SIMPLE32 {
+ U32 Address;
+ U32 FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 Reserved2;
+ U8 Flags;
+} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION {
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION,
+ *PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t,
+ *pMpi2IeeeSgeSimpleUnion_t;
+
+/****************************************************************************
+* IEEE Chain Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+
+/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION {
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION,
+ *PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t,
+ *pMpi2IeeeSgeChainUnion_t;
+
+/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */
+typedef struct _MPI25_IEEE_SGE_CHAIN64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 NextChainOffset;
+ U8 Flags;
+} MPI25_IEEE_SGE_CHAIN64,
+ *PTR_MPI25_IEEE_SGE_CHAIN64,
+ Mpi25IeeeSgeChain64_t,
+ *pMpi25IeeeSgeChain64_t;
+
+/****************************************************************************
+* All IEEE SGE types union
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_UNION {
+ union {
+ MPI2_IEEE_SGE_SIMPLE_UNION Simple;
+ MPI2_IEEE_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION,
+ Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t;
+
+/****************************************************************************
+* IEEE SGE union for IO SGL's
+****************************************************************************/
+
+typedef union _MPI25_SGE_IO_UNION {
+ MPI2_IEEE_SGE_SIMPLE64 IeeeSimple;
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain;
+} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION,
+ Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t;
+
+/****************************************************************************
+* IEEE SGE field definitions and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
+#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
+
+/*Element Type */
+
+#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
+#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+
+/*Data Location Address Space */
+
+#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR)
+
+/****************************************************************************
+* IEEE SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \
+ >> MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK)
+
+#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \
+ MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \
+ MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_SET_FLAGS(f))
+#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI/IEEE Scatter Gather Unions
+*
+*****************************************************************************/
+
+typedef union _MPI2_SIMPLE_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION,
+ Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t;
+
+typedef union _MPI2_SGE_IO_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t;
+
+/****************************************************************************
+*
+* Values for SGLFlags field, used in many request messages with an SGL
+*
+****************************************************************************/
+
+/*values for MPI SGL Data Location Address Space subfield */
+#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C)
+#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
+#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
+#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
+/*values for SGL Type subfield */
+#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
+#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
new file mode 100644
index 00000000000..d8b2c3eedb5
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -0,0 +1,3323 @@
+/*
+ * Copyright (c) 2000-2011 LSI Corporation.
+ *
+ *
+ * Name: mpi2_cnfg.h
+ * Title: MPI Configuration messages and pages
+ * Creation Date: November 10, 2006
+ *
+ * mpi2_cnfg.h Version: 02.00.22
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
+ * Added Manufacturing Page 11.
+ * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
+ * define.
+ * 06-26-07 02.00.02 Adding generic structure for product-specific
+ * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
+ * Rework of BIOS Page 2 configuration page.
+ * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
+ * forms.
+ * Added configuration pages IOC Page 8 and Driver
+ * Persistent Mapping Page 0.
+ * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
+ * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
+ * RAID Physical Disk Pages 0 and 1, RAID Configuration
+ * Page 0).
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 10-31-07 02.00.04 Added missing SEPDevHandle field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
+ * NVDATA.
+ * Modified IOC Page 7 to use masks and added field for
+ * SASBroadcastPrimitiveMasks.
+ * Added MPI2_CONFIG_PAGE_BIOS_4.
+ * Added MPI2_CONFIG_PAGE_LOG_0.
+ * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
+ * Added SAS Device IDs.
+ * Updated Integrated RAID configuration pages including
+ * Manufacturing Page 4, IOC Page 6, and RAID Configuration
+ * Page 0.
+ * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
+ * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
+ * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
+ * Added missing MaxNumRoutedSasAddresses field to
+ * MPI2_CONFIG_PAGE_EXPANDER_0.
+ * Added SAS Port Page 0.
+ * Modified structure layout for
+ * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
+ * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
+ * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
+ * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
+ * to 0x000000FF.
+ * Added two new values for the Physical Disk Coercion Size
+ * bits in the Flags field of Manufacturing Page 4.
+ * Added product-specific Manufacturing pages 16 to 31.
+ * Modified Flags bits for controlling write cache on SATA
+ * drives in IO Unit Page 1.
+ * Added new bit to AdditionalControlFlags of SAS IO Unit
+ * Page 1 to control Invalid Topology Correction.
+ * Added additional defines for RAID Volume Page 0
+ * VolumeStatusFlags field.
+ * Modified meaning of RAID Volume Page 0 VolumeSettings
+ * define for auto-configure of hot-swap drives.
+ * Added SupportedPhysDisks field to RAID Volume Page 1 and
+ * added related defines.
+ * Added PhysDiskAttributes field (and related defines) to
+ * RAID Physical Disk Page 0.
+ * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
+ * Added three new DiscoveryStatus bits for SAS IO Unit
+ * Page 0 and SAS Expander Page 0.
+ * Removed multiplexing information from SAS IO Unit pages.
+ * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
+ * Removed Zone Address Resolved bit from PhyInfo and from
+ * Expander Page 0 Flags field.
+ * Added two new AccessStatus values to SAS Device Page 0
+ * for indicating routing problems. Added 3 reserved words
+ * to this page.
+ * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
+ * Inserted missing reserved field into structure for IOC
+ * Page 6.
+ * Added more pending task bits to RAID Volume Page 0
+ * VolumeStatusFlags defines.
+ * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
+ * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
+ * and SAS Expander Page 0 to flag a downstream initiator
+ * when in simplified routing mode.
+ * Removed SATA Init Failure defines for DiscoveryStatus
+ * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
+ * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
+ * Added PortGroups, DmaGroup, and ControlGroup fields to
+ * SAS Device Page 0.
+ * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
+ * Unit Page 6.
+ * Added expander reduced functionality data to SAS
+ * Expander Page 0.
+ * Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up bit
+ * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO
+ * Unit Page 4.
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
+ * Incorporating additions for MPI v2.5.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_CNFG_H
+#define MPI2_CNFG_H
+
+/*****************************************************************************
+* Configuration Page Header and defines
+*****************************************************************************/
+
+/*Config Page Header */
+typedef struct _MPI2_CONFIG_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 PageLength; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER,
+ Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t;
+
+typedef union _MPI2_CONFIG_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ U8 Bytes[4];
+ U16 Word16[2];
+ U32 Word32;
+} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION,
+ Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion;
+
+/*Extended Config Page Header */
+typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 Reserved2; /*0x07 */
+} MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ Mpi2ConfigExtendedPageHeader_t,
+ *pMpi2ConfigExtendedPageHeader_t;
+
+typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext;
+ U8 Bytes[8];
+ U16 Word16[4];
+ U32 Word32[2];
+} MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ Mpi2ConfigPageExtendedHeaderUnion,
+ *pMpi2ConfigPageExtendedHeaderUnion;
+
+
+/*PageType field values */
+#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI2_CONFIG_PAGEATTR_MASK (0xF0)
+
+#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI2_CONFIG_PAGETYPE_IOC (0x01)
+#define MPI2_CONFIG_PAGETYPE_BIOS (0x02)
+#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
+#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09)
+#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
+#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F)
+#define MPI2_CONFIG_PAGETYPE_MASK (0x0F)
+
+#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF)
+
+
+/*ExtPageType field values */
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
+#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14)
+#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
+#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
+#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+
+
+/*****************************************************************************
+* PageAddress defines
+*****************************************************************************/
+
+/*RAID Volume PageAddress format */
+#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Physical Disk PageAddress format */
+#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
+#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
+#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000)
+
+#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
+#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF)
+
+
+/*SAS Expander PageAddress format */
+#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+
+
+/*SAS Device PageAddress format */
+#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*SAS PHY PageAddress format */
+#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000)
+
+#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
+
+
+/*SAS Port PageAddress format */
+#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+
+#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF)
+
+
+/*SAS Enclosure PageAddress format */
+#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Configuration PageAddress format */
+#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
+#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000)
+#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000)
+
+#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF)
+
+
+/*Driver Persistent Mapping PageAddress format */
+#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000)
+
+#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000)
+#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16)
+#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
+
+
+/*Ethernet PageAddress format */
+#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000)
+
+#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+
+
+
+/****************************************************************************
+* Configuration messages
+****************************************************************************/
+
+/*Configuration Request Message */
+typedef struct _MPI2_CONFIG_REQUEST {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 ProxyVF_ID; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ U32 Reserved3; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+ U32 PageAddress; /*0x18 */
+ MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */
+} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST,
+ Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t;
+
+/*values for the Action field */
+#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
+#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
+#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
+#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
+#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/*Config Reply Message */
+typedef struct _MPI2_CONFIG_REPLY {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 Reserved2; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY,
+ Mpi2ConfigReply_t, *pMpi2ConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+* C o n f i g u r a t i o n P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Manufacturing Config pages
+****************************************************************************/
+
+#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+
+/*MPI v2.0 SAS products */
+#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
+#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072)
+#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074)
+#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076)
+#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
+#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
+#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
+
+#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
+
+#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
+#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
+#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
+#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
+#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
+#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
+#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
+
+/*MPI v2.5 SAS products */
+#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096)
+#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097)
+#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090)
+#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091)
+#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094)
+#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095)
+
+
+
+
+/*Manufacturing Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 ChipName[16]; /*0x04 */
+ U8 ChipRevision[8]; /*0x14 */
+ U8 BoardName[16]; /*0x1C */
+ U8 BoardAssembly[16]; /*0x2C */
+ U8 BoardTracerNumber[16]; /*0x3C */
+} MPI2_CONFIG_PAGE_MAN_0,
+ *PTR_MPI2_CONFIG_PAGE_MAN_0,
+ Mpi2ManufacturingPage0_t,
+ *pMpi2ManufacturingPage0_t;
+
+#define MPI2_MANUFACTURING0_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 VPD[256]; /*0x04 */
+} MPI2_CONFIG_PAGE_MAN_1,
+ *PTR_MPI2_CONFIG_PAGE_MAN_1,
+ Mpi2ManufacturingPage1_t,
+ *pMpi2ManufacturingPage1_t;
+
+#define MPI2_MANUFACTURING1_PAGEVERSION (0x00)
+
+
+typedef struct _MPI2_CHIP_REVISION_ID {
+ U16 DeviceID; /*0x00 */
+ U8 PCIRevisionID; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID,
+ Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t;
+
+
+/*Manufacturing Page 2 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_2,
+ *PTR_MPI2_CONFIG_PAGE_MAN_2,
+ Mpi2ManufacturingPage2_t,
+ *pMpi2ManufacturingPage2_t;
+
+#define MPI2_MANUFACTURING2_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
+#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_3,
+ *PTR_MPI2_CONFIG_PAGE_MAN_3,
+ Mpi2ManufacturingPage3_t,
+ *pMpi2ManufacturingPage3_t;
+
+#define MPI2_MANUFACTURING3_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 4 */
+
+typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS {
+ U8 PowerSaveFlags; /*0x00 */
+ U8 InternalOperationsSleepTime; /*0x01 */
+ U8 InternalOperationsRunTime; /*0x02 */
+ U8 HostIdleTime; /*0x03 */
+} MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ Mpi2ManPage4PwrSaveSettings_t,
+ *pMpi2ManPage4PwrSaveSettings_t;
+
+/*defines for the PowerSaveFlags field */
+#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03)
+#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00)
+#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01)
+#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02)
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Flags; /*0x08 */
+ U8 InquirySize; /*0x0C */
+ U8 Reserved2; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ U8 InquiryData[56]; /*0x10 */
+ U32 RAID0VolumeSettings; /*0x48 */
+ U32 RAID1EVolumeSettings; /*0x4C */
+ U32 RAID1VolumeSettings; /*0x50 */
+ U32 RAID10VolumeSettings; /*0x54 */
+ U32 Reserved4; /*0x58 */
+ U32 Reserved5; /*0x5C */
+ MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */
+ U8 MaxOCEDisks; /*0x64 */
+ U8 ResyncRate; /*0x65 */
+ U16 DataScrubDuration; /*0x66 */
+ U8 MaxHotSpares; /*0x68 */
+ U8 MaxPhysDisksPerVol; /*0x69 */
+ U8 MaxPhysDisks; /*0x6A */
+ U8 MaxVolumes; /*0x6B */
+} MPI2_CONFIG_PAGE_MAN_4,
+ *PTR_MPI2_CONFIG_PAGE_MAN_4,
+ Mpi2ManufacturingPage4_t,
+ *pMpi2ManufacturingPage4_t;
+
+#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A)
+
+/*Manufacturing Page 4 Flags field */
+#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000)
+#define MPI2_MANPAGE4_METADATA_512MB (0x00000000)
+
+#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000)
+#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000)
+#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000)
+
+#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00)
+#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000)
+#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400)
+#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800)
+#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00)
+
+#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300)
+#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000)
+#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100)
+#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200)
+
+#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080)
+#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040)
+#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020)
+#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010)
+#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008)
+#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004)
+#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002)
+#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001)
+
+
+/*Manufacturing Page 5 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
+#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_MANUFACTURING5_ENTRY {
+ U64 WWID; /*0x00 */
+ U64 DeviceName; /*0x08 */
+} MPI2_MANUFACTURING5_ENTRY,
+ *PTR_MPI2_MANUFACTURING5_ENTRY,
+ Mpi2Manufacturing5Entry_t,
+ *pMpi2Manufacturing5Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_MANUFACTURING5_ENTRY
+ Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_5,
+ *PTR_MPI2_CONFIG_PAGE_MAN_5,
+ Mpi2ManufacturingPage5_t,
+ *pMpi2ManufacturingPage5_t;
+
+#define MPI2_MANUFACTURING5_PAGEVERSION (0x03)
+
+
+/*Manufacturing Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_6,
+ *PTR_MPI2_CONFIG_PAGE_MAN_6,
+ Mpi2ManufacturingPage6_t,
+ *pMpi2ManufacturingPage6_t;
+
+#define MPI2_MANUFACTURING6_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 7 */
+
+typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
+ U32 Pinout; /*0x00 */
+ U8 Connector[16]; /*0x04 */
+ U8 Location; /*0x14 */
+ U8 ReceptacleID; /*0x15 */
+ U16 Slot; /*0x16 */
+ U32 Reserved2; /*0x18 */
+} MPI2_MANPAGE7_CONNECTOR_INFO,
+ *PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
+ Mpi2ManPage7ConnectorInfo_t,
+ *pMpi2ManPage7ConnectorInfo_t;
+
+/*defines for the Pinout field */
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+
+/*defines for the Location field */
+#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
+#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02)
+#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04)
+#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08)
+#define MPI2_MANPAGE7_LOCATION_AUTO (0x10)
+#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
+#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
+#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Flags; /*0x0C */
+ U8 EnclosureName[16]; /*0x10 */
+ U8 NumPhys; /*0x20 */
+ U8 Reserved3; /*0x21 */
+ U16 Reserved4; /*0x22 */
+ MPI2_MANPAGE7_CONNECTOR_INFO
+ ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */
+} MPI2_CONFIG_PAGE_MAN_7,
+ *PTR_MPI2_CONFIG_PAGE_MAN_7,
+ Mpi2ManufacturingPage7_t,
+ *pMpi2ManufacturingPage7_t;
+
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
+
+/*defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+
+
+/*
+ *Generic structure to use for product-specific manufacturing pages
+ *(currently Manufacturing Page 8 through Manufacturing Page 31).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_PS {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_MAN_PS,
+ Mpi2ManufacturingPagePS_t,
+ *pMpi2ManufacturingPagePS_t;
+
+#define MPI2_MANUFACTURING8_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING9_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING10_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING11_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING12_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING13_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING14_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING15_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING16_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING17_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING18_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING19_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING20_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING21_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING22_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING23_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING24_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING25_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING26_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING27_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING28_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING29_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING30_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING31_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IO Unit Config Pages
+****************************************************************************/
+
+/*IO Unit Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64 UniqueValue; /*0x04 */
+ MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */
+ MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */
+} MPI2_CONFIG_PAGE_IO_UNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0,
+ Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t;
+
+#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02)
+
+
+/*IO Unit Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+} MPI2_CONFIG_PAGE_IO_UNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1,
+ Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t;
+
+#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
+
+/*IO Unit Page 1 Flags defines */
+#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
+#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
+#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
+#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
+#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
+#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
+#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
+#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
+#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
+#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
+
+
+/*IO Unit Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for GPIOCount at runtime.
+ */
+#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 GPIOCount; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16
+ GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */
+} MPI2_CONFIG_PAGE_IO_UNIT_3,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3,
+ Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t;
+
+#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01)
+
+/*defines for IO Unit Page 3 GPIOVal field */
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC)
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
+
+
+/*IO Unit Page 5 */
+
+/*
+ *Upper layer code (drivers, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumDmaEngines at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
+#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64
+ RaidAcceleratorBufferBaseAddress; /*0x04 */
+ U64
+ RaidAcceleratorBufferSize; /*0x0C */
+ U64
+ RaidAcceleratorControlBaseAddress; /*0x14 */
+ U8 RAControlSize; /*0x1C */
+ U8 NumDmaEngines; /*0x1D */
+ U8 RAMinControlSize; /*0x1E */
+ U8 RAMaxControlSize; /*0x1F */
+ U32 Reserved1; /*0x20 */
+ U32 Reserved2; /*0x24 */
+ U32 Reserved3; /*0x28 */
+ U32
+ DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */
+} MPI2_CONFIG_PAGE_IO_UNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
+ Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t;
+
+#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 5 DmaEngineCapabilities field */
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00)
+#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
+
+#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
+#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004)
+#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002)
+#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001)
+
+
+/*IO Unit Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 Flags; /*0x04 */
+ U8 RAHostControlSize; /*0x06 */
+ U8 Reserved0; /*0x07 */
+ U64
+ RaidAcceleratorHostControlBaseAddress; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+ U32 Reserved3; /*0x18 */
+} MPI2_CONFIG_PAGE_IO_UNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6,
+ Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t;
+
+#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 6 Flags field */
+#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
+
+
+/*IO Unit Page 7 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 CurrentPowerMode; /*0x04 */
+ U8 PreviousPowerMode; /*0x05 */
+ U8 PCIeWidth; /*0x06 */
+ U8 PCIeSpeed; /*0x07 */
+ U32 ProcessorState; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U16 IOCTemperature; /*0x10 */
+ U8
+ IOCTemperatureUnits; /*0x12 */
+ U8 IOCSpeed; /*0x13 */
+ U16 BoardTemperature; /*0x14 */
+ U8
+ BoardTemperatureUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+} MPI2_CONFIG_PAGE_IO_UNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
+ Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
+
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
+
+/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
+#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
+#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40)
+#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80)
+#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01)
+#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04)
+#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05)
+#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06)
+
+
+/*defines for IO Unit Page 7 PCIeWidth field */
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+
+/*defines for IO Unit Page 7 PCIeSpeed field */
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+
+/*defines for IO Unit Page 7 ProcessorState field */
+#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
+#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0)
+
+#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
+#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
+
+/*defines for IO Unit Page 7 PowerManagementCapabilities field */
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001)
+
+/*obsolete names for the PowerManagementCapabilities bits (above) */
+#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */
+
+
+/*defines for IO Unit Page 7 IOCTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02)
+
+/*defines for IO Unit Page 7 IOCSpeed field */
+#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+
+/*defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
+
+/*IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+ U16 Flags; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U16
+ Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR,
+ Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t;
+
+/*defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 PollingInterval; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT8_SENSOR
+ Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+ Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+ U16 CurrentTemperature; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 Flags; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U16 Reserved3; /*0x06 */
+ U32 Reserved4; /*0x08 */
+ U32 Reserved5; /*0x0C */
+} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR,
+ Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t;
+
+/*defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 Reserved4; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT9_SENSOR
+ Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+ Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+ U8 CreditPercent; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_IOUNIT10_FUNCTION,
+ *PTR_MPI2_IOUNIT10_FUNCTION,
+ Mpi2IOUnit10Function_t,
+ *pMpi2IOUnit10Function_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumFunctions; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_IOUNIT10_FUNCTION
+ Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+ Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+
+
+
+/****************************************************************************
+* IOC Config Pages
+****************************************************************************/
+
+/*IOC Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U16 VendorID; /*0x0C */
+ U16 DeviceID; /*0x0E */
+ U8 RevisionID; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ U32 ClassCode; /*0x14 */
+ U16 SubsystemVendorID; /*0x18 */
+ U16 SubsystemID; /*0x1A */
+} MPI2_CONFIG_PAGE_IOC_0,
+ *PTR_MPI2_CONFIG_PAGE_IOC_0,
+ Mpi2IOCPage0_t, *pMpi2IOCPage0_t;
+
+#define MPI2_IOCPAGE0_PAGEVERSION (0x02)
+
+
+/*IOC Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+ U32 CoalescingTimeout; /*0x08 */
+ U8 CoalescingDepth; /*0x0C */
+ U8 PCISlotNum; /*0x0D */
+ U8 PCIBusNum; /*0x0E */
+ U8 PCIDomainSegment; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_1,
+ *PTR_MPI2_CONFIG_PAGE_IOC_1,
+ Mpi2IOCPage1_t, *pMpi2IOCPage1_t;
+
+#define MPI2_IOCPAGE1_PAGEVERSION (0x05)
+
+/*defines for IOC Page 1 Flags field */
+#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001)
+
+#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF)
+
+/*IOC Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32
+ CapabilitiesFlags; /*0x04 */
+ U8 MaxDrivesRAID0; /*0x08 */
+ U8 MaxDrivesRAID1; /*0x09 */
+ U8
+ MaxDrivesRAID1E; /*0x0A */
+ U8
+ MaxDrivesRAID10; /*0x0B */
+ U8 MinDrivesRAID0; /*0x0C */
+ U8 MinDrivesRAID1; /*0x0D */
+ U8
+ MinDrivesRAID1E; /*0x0E */
+ U8
+ MinDrivesRAID10; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U8
+ MaxGlobalHotSpares; /*0x14 */
+ U8 MaxPhysDisks; /*0x15 */
+ U8 MaxVolumes; /*0x16 */
+ U8 MaxConfigs; /*0x17 */
+ U8 MaxOCEDisks; /*0x18 */
+ U8 Reserved2; /*0x19 */
+ U16 Reserved3; /*0x1A */
+ U32
+ SupportedStripeSizeMapRAID0; /*0x1C */
+ U32
+ SupportedStripeSizeMapRAID1E; /*0x20 */
+ U32
+ SupportedStripeSizeMapRAID10; /*0x24 */
+ U32 Reserved4; /*0x28 */
+ U32 Reserved5; /*0x2C */
+ U16
+ DefaultMetadataSize; /*0x30 */
+ U16 Reserved6; /*0x32 */
+ U16
+ MaxBadBlockTableEntries; /*0x34 */
+ U16 Reserved7; /*0x36 */
+ U32
+ IRNvsramVersion; /*0x38 */
+} MPI2_CONFIG_PAGE_IOC_6,
+ *PTR_MPI2_CONFIG_PAGE_IOC_6,
+ Mpi2IOCPage6_t, *pMpi2IOCPage6_t;
+
+#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
+
+/*defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002)
+#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
+
+
+/*IOC Page 7 */
+
+#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32
+ EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */
+ U16 SASBroadcastPrimitiveMasks; /*0x18 */
+ U16 SASNotifyPrimitiveMasks; /*0x1A */
+ U32 Reserved3; /*0x1C */
+} MPI2_CONFIG_PAGE_IOC_7,
+ *PTR_MPI2_CONFIG_PAGE_IOC_7,
+ Mpi2IOCPage7_t, *pMpi2IOCPage7_t;
+
+#define MPI2_IOCPAGE7_PAGEVERSION (0x02)
+
+
+/*IOC Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumDevsPerEnclosure; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16 MaxPersistentEntries; /*0x08 */
+ U16 MaxNumPhysicalMappedIDs; /*0x0A */
+ U16 Flags; /*0x0C */
+ U16 Reserved3; /*0x0E */
+ U16 IRVolumeMappingFlags; /*0x10 */
+ U16 Reserved4; /*0x12 */
+ U32 Reserved5; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_8,
+ *PTR_MPI2_CONFIG_PAGE_IOC_8,
+ Mpi2IOCPage8_t, *pMpi2IOCPage8_t;
+
+#define MPI2_IOCPAGE8_PAGEVERSION (0x00)
+
+/*defines for IOC Page 8 Flags field */
+#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020)
+#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010)
+
+#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E)
+#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002)
+
+#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001)
+#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000)
+
+/*defines for IOC Page 8 IRVolumeMappingFlags */
+#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
+#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001)
+
+
+/****************************************************************************
+* BIOS Config Pages
+****************************************************************************/
+
+/*BIOS Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 BiosOptions; /*0x04 */
+ U32 IOCSettings; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DeviceSettings; /*0x10 */
+ U16 NumberOfDevices; /*0x14 */
+ U16 UEFIVersion; /*0x16 */
+ U16 IOTimeoutBlockDevicesNonRM; /*0x18 */
+ U16 IOTimeoutSequential; /*0x1A */
+ U16 IOTimeoutOther; /*0x1C */
+ U16 IOTimeoutBlockDevicesRM; /*0x1E */
+} MPI2_CONFIG_PAGE_BIOS_1,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_1,
+ Mpi2BiosPage1_t, *pMpi2BiosPage1_t;
+
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
+
+/*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+
+/*values for BIOS Page 1 IOCSettings field */
+#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
+#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
+#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
+#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
+#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
+#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
+#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
+
+#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
+
+/*values for BIOS Page 1 DeviceSettings field */
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+
+/*defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
+
+
+
+/*BIOS Page 2 */
+
+typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER {
+ U32 Reserved1; /*0x00 */
+ U32 Reserved2; /*0x04 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ Mpi2BootDeviceAdapterOrder_t,
+ *pMpi2BootDeviceAdapterOrder_t;
+
+typedef struct _MPI2_BOOT_DEVICE_SAS_WWID {
+ U64 SASAddress; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_SAS_WWID,
+ *PTR_MPI2_BOOT_DEVICE_SAS_WWID,
+ Mpi2BootDeviceSasWwid_t,
+ *pMpi2BootDeviceSasWwid_t;
+
+typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT {
+ U64 EnclosureLogicalID; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 SlotNumber; /*0x10 */
+ U16 Reserved3; /*0x12 */
+ U32 Reserved4; /*0x14 */
+} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ Mpi2BootDeviceEnclosureSlot_t,
+ *pMpi2BootDeviceEnclosureSlot_t;
+
+typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME {
+ U64 DeviceName; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_DEVICE_NAME,
+ *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME,
+ Mpi2BootDeviceDeviceName_t,
+ *pMpi2BootDeviceDeviceName_t;
+
+typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE {
+ MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
+ MPI2_BOOT_DEVICE_SAS_WWID SasWwid;
+ MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
+ MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName;
+} MPI2_BIOSPAGE2_BOOT_DEVICE,
+ *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE,
+ Mpi2BiosPage2BootDevice_t,
+ *pMpi2BiosPage2BootDevice_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+ U32 Reserved5; /*0x14 */
+ U32 Reserved6; /*0x18 */
+ U8 ReqBootDeviceForm; /*0x1C */
+ U8 Reserved7; /*0x1D */
+ U16 Reserved8; /*0x1E */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */
+ U8 ReqAltBootDeviceForm; /*0x38 */
+ U8 Reserved9; /*0x39 */
+ U16 Reserved10; /*0x3A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */
+ U8 CurrentBootDeviceForm; /*0x58 */
+ U8 Reserved11; /*0x59 */
+ U16 Reserved12; /*0x5A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */
+} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2,
+ Mpi2BiosPage2_t, *pMpi2BiosPage2_t;
+
+#define MPI2_BIOSPAGE2_PAGEVERSION (0x04)
+
+/*values for BIOS Page 2 BootDeviceForm fields */
+#define MPI2_BIOSPAGE2_FORM_MASK (0x0F)
+#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
+#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05)
+#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
+#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
+
+
+/*BIOS Page 3 */
+
+typedef struct _MPI2_ADAPTER_INFO {
+ U8 PciBusNumber; /*0x00 */
+ U8 PciDeviceAndFunctionNumber; /*0x01 */
+ U16 AdapterFlags; /*0x02 */
+} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO,
+ Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t;
+
+#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
+#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 GlobalFlags; /*0x04 */
+ U32 BiosVersion; /*0x08 */
+ MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */
+ U32 Reserved1; /*0x1C */
+} MPI2_CONFIG_PAGE_BIOS_3,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_3,
+ Mpi2BiosPage3_t, *pMpi2BiosPage3_t;
+
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+
+/*values for BIOS Page 3 GlobalFlags */
+#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
+#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004)
+#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010)
+
+#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
+#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
+
+
+/*BIOS Page 4 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
+#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_BIOS4_ENTRY {
+ U64 ReassignmentWWID; /*0x00 */
+ U64 ReassignmentDeviceName; /*0x08 */
+} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY,
+ Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ MPI2_BIOS4_ENTRY
+ Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */
+} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4,
+ Mpi2BiosPage4_t, *pMpi2BiosPage4_t;
+
+#define MPI2_BIOSPAGE4_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* RAID Volume Config Pages
+****************************************************************************/
+
+/*RAID Volume Page 0 */
+
+typedef struct _MPI2_RAIDVOL0_PHYS_DISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U8 PhysDiskNum; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK,
+ Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAIDVOL0_SETTINGS {
+ U16 Settings; /*0x00 */
+ U8 HotSparePool; /*0x01 */
+ U8 Reserved; /*0x02 */
+} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS,
+ Mpi2RaidVol0Settings_t,
+ *pMpi2RaidVol0Settings_t;
+
+/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01)
+#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02)
+#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04)
+#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08)
+#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10)
+#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20)
+#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40)
+#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80)
+
+/*RAID Volume Page 0 VolumeSettings defines */
+#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008)
+#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004)
+
+#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003)
+#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000)
+#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001)
+#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDisks at runtime.
+ */
+#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 VolumeState; /*0x06 */
+ U8 VolumeType; /*0x07 */
+ U32 VolumeStatusFlags; /*0x08 */
+ MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */
+ U64 MaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U16 BlockSize; /*0x1C */
+ U16 Reserved1; /*0x1E */
+ U8 SupportedPhysDisks;/*0x20 */
+ U8 ResyncRate; /*0x21 */
+ U16 DataScrubDuration; /*0x22 */
+ U8 NumPhysDisks; /*0x24 */
+ U8 Reserved2; /*0x25 */
+ U8 Reserved3; /*0x26 */
+ U8 InactiveStatus; /*0x27 */
+ MPI2_RAIDVOL0_PHYS_DISK
+ PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */
+} MPI2_CONFIG_PAGE_RAID_VOL_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
+ Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t;
+
+#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A)
+
+/*values for RAID VolumeState */
+#define MPI2_RAID_VOL_STATE_MISSING (0x00)
+#define MPI2_RAID_VOL_STATE_FAILED (0x01)
+#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02)
+#define MPI2_RAID_VOL_STATE_ONLINE (0x03)
+#define MPI2_RAID_VOL_STATE_DEGRADED (0x04)
+#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05)
+
+/*values for RAID VolumeType */
+#define MPI2_RAID_VOL_TYPE_RAID0 (0x00)
+#define MPI2_RAID_VOL_TYPE_RAID1E (0x01)
+#define MPI2_RAID_VOL_TYPE_RAID1 (0x02)
+#define MPI2_RAID_VOL_TYPE_RAID10 (0x05)
+#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF)
+
+/*values for RAID Volume Page 0 VolumeStatusFlags field */
+#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
+#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004)
+#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001)
+
+/*values for RAID Volume Page 0 SupportedPhysDisks field */
+#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08)
+#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04)
+#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02)
+#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01)
+
+/*values for RAID Volume Page 0 InactiveStatus field */
+#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
+#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
+#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
+#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
+#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
+
+
+/*RAID Volume Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U16 Reserved0; /*0x06 */
+ U8 GUID[24]; /*0x08 */
+ U8 Name[16]; /*0x20 */
+ U64 WWID; /*0x30 */
+ U32 Reserved1; /*0x38 */
+ U32 Reserved2; /*0x3C */
+} MPI2_CONFIG_PAGE_RAID_VOL_1,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1,
+ Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t;
+
+#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03)
+
+
+/****************************************************************************
+* RAID Physical Disk Config Pages
+****************************************************************************/
+
+/*RAID Physical Disk Page 0 */
+
+typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS {
+ U16 Reserved1; /*0x00 */
+ U8 HotSparePool; /*0x02 */
+ U8 Reserved2; /*0x03 */
+} MPI2_RAIDPHYSDISK0_SETTINGS,
+ *PTR_MPI2_RAIDPHYSDISK0_SETTINGS,
+ Mpi2RaidPhysDisk0Settings_t,
+ *pMpi2RaidPhysDisk0Settings_t;
+
+/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */
+
+typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA {
+ U8 VendorID[8]; /*0x00 */
+ U8 ProductID[16]; /*0x08 */
+ U8 ProductRevLevel[4]; /*0x18 */
+ U8 SerialNum[32]; /*0x1C */
+} MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ Mpi2RaidPhysDisk0InquiryData_t,
+ *pMpi2RaidPhysDisk0InquiryData_t;
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 PhysDiskNum; /*0x07 */
+ MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */
+ U32 Reserved3; /*0x4C */
+ U8 PhysDiskState; /*0x50 */
+ U8 OfflineReason; /*0x51 */
+ U8 IncompatibleReason; /*0x52 */
+ U8 PhysDiskAttributes; /*0x53 */
+ U32 PhysDiskStatusFlags;/*0x54 */
+ U64 DeviceMaxLBA; /*0x58 */
+ U64 HostMaxLBA; /*0x60 */
+ U64 CoercedMaxLBA; /*0x68 */
+ U16 BlockSize; /*0x70 */
+ U16 Reserved5; /*0x72 */
+ U32 Reserved6; /*0x74 */
+} MPI2_CONFIG_PAGE_RD_PDISK_0,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0,
+ Mpi2RaidPhysDiskPage0_t,
+ *pMpi2RaidPhysDiskPage0_t;
+
+#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05)
+
+/*PhysDiskState defines */
+#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00)
+#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
+#define MPI2_RAID_PD_STATE_OFFLINE (0x02)
+#define MPI2_RAID_PD_STATE_ONLINE (0x03)
+#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04)
+#define MPI2_RAID_PD_STATE_DEGRADED (0x05)
+#define MPI2_RAID_PD_STATE_REBUILDING (0x06)
+#define MPI2_RAID_PD_STATE_OPTIMAL (0x07)
+
+/*OfflineReason defines */
+#define MPI2_PHYSDISK0_ONLINE (0x00)
+#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03)
+#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04)
+#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06)
+#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF)
+
+/*IncompatibleReason defines */
+#define MPI2_PHYSDISK0_COMPATIBLE (0x00)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
+
+/*PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
+#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
+#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
+#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
+#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
+
+/*PhysDiskStatusFlags defines */
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020)
+#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000)
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008)
+#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004)
+#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001)
+
+
+/*RAID Physical Disk Page 1 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
+#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
+#endif
+
+typedef struct _MPI2_RAIDPHYSDISK1_PATH {
+ U16 DevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U64 WWID; /*0x04 */
+ U64 OwnerWWID; /*0x0C */
+ U8 OwnerIdentifier; /*0x14 */
+ U8 Reserved2; /*0x15 */
+ U16 Flags; /*0x16 */
+} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH,
+ Mpi2RaidPhysDisk1Path_t,
+ *pMpi2RaidPhysDisk1Path_t;
+
+/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */
+#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004)
+#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
+#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhysDiskPaths; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 Reserved1; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ MPI2_RAIDPHYSDISK1_PATH
+ PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */
+} MPI2_CONFIG_PAGE_RD_PDISK_1,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
+ Mpi2RaidPhysDiskPage1_t,
+ *pMpi2RaidPhysDiskPage1_t;
+
+#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* values for fields used by several types of SAS Config Pages
+****************************************************************************/
+
+/*values for NegotiatedLinkRates fields */
+#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0)
+#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4)
+#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/*link rates used for Negotiated Physical and Logical Link Rate */
+#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
+#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+
+
+/*values for AttachedPhyInfo fields */
+#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+
+#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F)
+#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+
+
+/*values for PhyInfo fields */
+#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
+
+#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
+#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
+
+#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
+#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
+#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000)
+#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+
+#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000)
+#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+
+#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+
+#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
+#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
+
+#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
+#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000)
+#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
+#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020)
+
+
+/*values for SAS ProgrammedLinkRate fields */
+#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
+#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+
+
+/*values for SAS HwLinkRate fields */
+#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
+#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+
+
+
+/****************************************************************************
+* SAS IO Unit Config Pages
+****************************************************************************/
+
+/*SAS IO Unit Page 0 */
+
+typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 NegotiatedLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo;/*0x04 */
+ U16 AttachedDevHandle; /*0x08 */
+ U16 ControllerDevHandle; /*0x0A */
+ U32 DiscoveryStatus; /*0x0C */
+ U32 Reserved; /*0x10 */
+} MPI2_SAS_IO_UNIT0_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA,
+ Mpi2SasIOUnit0PhyData_t,
+ *pMpi2SasIOUnit0PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
+#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1;/*0x08 */
+ U8 NumPhys; /*0x0C */
+ U8 Reserved2;/*0x0D */
+ U16 Reserved3;/*0x0E */
+ MPI2_SAS_IO_UNIT0_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t;
+
+#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05)
+
+/*values for SAS IO Unit Page 0 PortFlags */
+#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
+#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
+#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+/*values for SAS IO Unit Page 0 DiscoveryStatus */
+#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001)
+
+
+/*SAS IO Unit Page 1 */
+
+typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 MaxMinLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo; /*0x04 */
+ U16 MaxTargetPortConnectTime; /*0x08 */
+ U16 Reserved1; /*0x0A */
+} MPI2_SAS_IO_UNIT1_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA,
+ Mpi2SasIOUnit1PhyData_t,
+ *pMpi2SasIOUnit1PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
+#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16
+ ControlFlags; /*0x08 */
+ U16
+ SASNarrowMaxQueueDepth; /*0x0A */
+ U16
+ AdditionalControlFlags; /*0x0C */
+ U16
+ SASWideMaxQueueDepth; /*0x0E */
+ U8
+ NumPhys; /*0x10 */
+ U8
+ SATAMaxQDepth; /*0x11 */
+ U8
+ ReportDeviceMissingDelay; /*0x12 */
+ U8
+ IODeviceMissingDelay; /*0x13 */
+ MPI2_SAS_IO_UNIT1_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t;
+
+#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09)
+
+/*values for SAS IO Unit Page 1 ControlFlags */
+#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+
+#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
+#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2)
+
+#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
+
+/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+
+/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
+#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
+#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
+
+/*values for SAS IO Unit Page 1 PortFlags */
+#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
+#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+
+/*values for SAS IO Unit Page 1 MaxMinLinkRate */
+#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
+#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
+#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
+#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
+#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
+#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
+#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
+#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+/*SAS IO Unit Page 4 */
+
+typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP {
+ U8 MaxTargetSpinup; /*0x00 */
+ U8 SpinupDelay; /*0x01 */
+ U8 SpinupFlags; /*0x02 */
+ U8 Reserved1; /*0x03 */
+} MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ Mpi2SasIOUnit4SpinupGroup_t,
+ *pMpi2SasIOUnit4SpinupGroup_t;
+/*defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
+#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */
+ MPI2_SAS_IOUNIT4_SPINUP_GROUP
+ SpinupGroupParameters[4]; /*0x08 */
+ U32
+ Reserved1; /*0x18 */
+ U32
+ Reserved2; /*0x1C */
+ U32
+ Reserved3; /*0x20 */
+ U8
+ BootDeviceWaitTime; /*0x24 */
+ U8
+ Reserved4; /*0x25 */
+ U16
+ Reserved5; /*0x26 */
+ U8
+ NumPhys; /*0x28 */
+ U8
+ PEInitialSpinupDelay; /*0x29 */
+ U8
+ PEReplyDelay; /*0x2A */
+ U8
+ Flags; /*0x2B */
+ U8
+ PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */
+} MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t;
+
+#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02)
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01)
+
+/*defines for PHY field */
+#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+/*SAS IO Unit Page 5 */
+
+typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
+ U8 ControlFlags; /*0x00 */
+ U8 PortWidthModGroup; /*0x01 */
+ U16 InactivityTimerExponent; /*0x02 */
+ U8 SATAPartialTimeout; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U8 SATASlumberTimeout; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U8 SASPartialTimeout; /*0x08 */
+ U8 Reserved4; /*0x09 */
+ U8 SASSlumberTimeout; /*0x0A */
+ U8 Reserved5; /*0x0B */
+} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ Mpi2SasIOUnit5PhyPmSettings_t,
+ *pMpi2SasIOUnit5PhyPmSettings_t;
+
+/*defines for ControlFlags field */
+#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+
+/*defines for PortWidthModeGroup field */
+#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
+
+/*defines for InactivityTimerExponent field */
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0)
+
+#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7)
+#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5)
+#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4)
+#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2)
+#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1)
+#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
+#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x08 */
+ U8 Reserved1;/*0x09 */
+ U16 Reserved2;/*0x0A */
+ U32 Reserved3;/*0x0C */
+ MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
+ SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t;
+
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
+
+
+/*SAS IO Unit Page 6 */
+
+typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS {
+ U8 CurrentStatus; /*0x00 */
+ U8 CurrentModulation; /*0x01 */
+ U8 CurrentUtilization; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 Reserved2; /*0x04 */
+} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ Mpi2SasIOUnit6PortWidthModGroupStatus_t,
+ *pMpi2SasIOUnit6PortWidthModGroupStatus_t;
+
+/*defines for CurrentStatus field */
+#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
+#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
+#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
+#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
+#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
+#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
+
+/*defines for CurrentModulation field */
+#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
+#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
+#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
+#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
+#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U8 NumGroups; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+ PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t;
+
+#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 7 */
+
+typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS {
+ U8 Flags; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 Threshold75Pct; /*0x04 */
+ U8 Threshold50Pct; /*0x05 */
+ U8 Threshold25Pct; /*0x06 */
+ U8 Reserved3; /*0x07 */
+} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ Mpi2SasIOUnit7PortWidthModGroupSettings_t,
+ *pMpi2SasIOUnit7PortWidthModGroupSettings_t;
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
+#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 SamplingInterval; /*0x08 */
+ U8 WindowLength; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U8 NumGroups; /*0x14 */
+ U8 Reserved4; /*0x15 */
+ U16 Reserved5; /*0x16 */
+ MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+ PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t;
+
+#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U8
+ TxRxSleepStatus; /*0x10 */
+ U8
+ Reserved2; /*0x11 */
+ U16
+ Reserved3; /*0x12 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t;
+
+#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
+
+/*defines for PowerManagementCapabilities field */
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+
+/*defines for TxRxSleepStatus field */
+#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03)
+
+
+
+/*SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U64
+ TimeStamp; /*0x08 */
+ U32
+ Reserved1; /*0x10 */
+ U32
+ Reserved2; /*0x14 */
+ U32
+ FastPathPendedRequests; /*0x18 */
+ U32
+ FastPathUnPendedRequests; /*0x1C */
+ U32
+ FastPathHostRequestStarts; /*0x20 */
+ U32
+ FastPathFirmwareRequestStarts; /*0x24 */
+ U32
+ FastPathHostCompletions; /*0x28 */
+ U32
+ FastPathFirmwareCompletions; /*0x2C */
+ U32
+ NonFastPathRequestStarts; /*0x30 */
+ U32
+ NonFastPathHostCompletions; /*0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+ Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Expander Config Pages
+****************************************************************************/
+
+/*SAS Expander Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ ReportGenLength; /*0x09 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ DiscoveryStatus; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ ParentDevHandle; /*0x1A */
+ U16
+ ExpanderChangeCount; /*0x1C */
+ U16
+ ExpanderRouteIndexes; /*0x1E */
+ U8
+ NumPhys; /*0x20 */
+ U8
+ SASLevel; /*0x21 */
+ U16
+ Flags; /*0x22 */
+ U16
+ STPBusInactivityTimeLimit; /*0x24 */
+ U16
+ STPMaxConnectTimeLimit; /*0x26 */
+ U16
+ STP_SMP_NexusLossTime; /*0x28 */
+ U16
+ MaxNumRoutedSasAddresses; /*0x2A */
+ U64
+ ActiveZoneManagerSASAddress;/*0x2C */
+ U16
+ ZoneLockInactivityLimit; /*0x34 */
+ U16
+ Reserved1; /*0x36 */
+ U8
+ TimeToReducedFunc; /*0x38 */
+ U8
+ InitialTimeToReducedFunc; /*0x39 */
+ U8
+ MaxReducedFuncTime; /*0x3A */
+ U8
+ Reserved2; /*0x3B */
+} MPI2_CONFIG_PAGE_EXPANDER_0,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
+ Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t;
+
+#define MPI2_SASEXPANDER0_PAGEVERSION (0x06)
+
+/*values for SAS Expander Page 0 DiscoveryStatus field */
+#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
+
+/*values for SAS Expander Page 0 Flags field */
+#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+
+
+/*SAS Expander Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ Reserved1; /*0x09 */
+ U16
+ Reserved2; /*0x0A */
+ U8
+ NumPhys; /*0x0C */
+ U8
+ Phy; /*0x0D */
+ U16
+ NumTableEntriesProgrammed; /*0x0E */
+ U8
+ ProgrammedLinkRate; /*0x10 */
+ U8
+ HwLinkRate; /*0x11 */
+ U16
+ AttachedDevHandle; /*0x12 */
+ U32
+ PhyInfo; /*0x14 */
+ U32
+ AttachedDeviceInfo; /*0x18 */
+ U16
+ ExpanderDevHandle; /*0x1C */
+ U8
+ ChangeCount; /*0x1E */
+ U8
+ NegotiatedLinkRate; /*0x1F */
+ U8
+ PhyIdentifier; /*0x20 */
+ U8
+ AttachedPhyIdentifier; /*0x21 */
+ U8
+ Reserved3; /*0x22 */
+ U8
+ DiscoveryInfo; /*0x23 */
+ U32
+ AttachedPhyInfo; /*0x24 */
+ U8
+ ZoneGroup; /*0x28 */
+ U8
+ SelfConfigStatus; /*0x29 */
+ U16
+ Reserved4; /*0x2A */
+} MPI2_CONFIG_PAGE_EXPANDER_1,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_1,
+ Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t;
+
+#define MPI2_SASEXPANDER1_PAGEVERSION (0x02)
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines
+ *used for the AttachedDeviceInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*values for SAS Expander Page 1 DiscoveryInfo field */
+#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+
+/****************************************************************************
+* SAS Device Config Pages
+****************************************************************************/
+
+/*SAS Device Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Slot; /*0x08 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U16
+ ParentDevHandle; /*0x14 */
+ U8
+ PhyNum; /*0x16 */
+ U8
+ AccessStatus; /*0x17 */
+ U16
+ DevHandle; /*0x18 */
+ U8
+ AttachedPhyIdentifier; /*0x1A */
+ U8
+ ZoneGroup; /*0x1B */
+ U32
+ DeviceInfo; /*0x1C */
+ U16
+ Flags; /*0x20 */
+ U8
+ PhysicalPort; /*0x22 */
+ U8
+ MaxPortConnections; /*0x23 */
+ U64
+ DeviceName; /*0x24 */
+ U8
+ PortGroups; /*0x2C */
+ U8
+ DmaGroup; /*0x2D */
+ U8
+ ControlGroup; /*0x2E */
+ U8
+ Reserved1; /*0x2F */
+ U32
+ Reserved2; /*0x30 */
+ U32
+ Reserved3; /*0x34 */
+} MPI2_CONFIG_PAGE_SAS_DEV_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
+ Mpi2SasDevicePage0_t,
+ *pMpi2SasDevicePage0_t;
+
+#define MPI2_SASDEVICE0_PAGEVERSION (0x08)
+
+/*values for SAS Device Page 0 AccessStatus field */
+#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
+#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
+#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
+#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
+/*specific values for SATA Init failures */
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
+
+/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+/*values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000)
+#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
+#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
+#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
+#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+
+
+/*SAS Device Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ Reserved2; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ Reserved3; /*0x1A */
+ U8
+ InitialRegDeviceFIS[20];/*0x1C */
+} MPI2_CONFIG_PAGE_SAS_DEV_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1,
+ Mpi2SasDevicePage1_t,
+ *pMpi2SasDevicePage1_t;
+
+#define MPI2_SASDEVICE1_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* SAS PHY Config Pages
+****************************************************************************/
+
+/*SAS PHY Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ OwnerDevHandle; /*0x08 */
+ U16
+ Reserved1; /*0x0A */
+ U16
+ AttachedDevHandle; /*0x0C */
+ U8
+ AttachedPhyIdentifier; /*0x0E */
+ U8
+ Reserved2; /*0x0F */
+ U32
+ AttachedPhyInfo; /*0x10 */
+ U8
+ ProgrammedLinkRate; /*0x14 */
+ U8
+ HwLinkRate; /*0x15 */
+ U8
+ ChangeCount; /*0x16 */
+ U8
+ Flags; /*0x17 */
+ U32
+ PhyInfo; /*0x18 */
+ U8
+ NegotiatedLinkRate; /*0x1C */
+ U8
+ Reserved3; /*0x1D */
+ U16
+ Reserved4; /*0x1E */
+} MPI2_CONFIG_PAGE_SAS_PHY_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0,
+ Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t;
+
+#define MPI2_SASPHY0_PAGEVERSION (0x03)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*values for SAS PHY Page 0 Flags field */
+#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/*SAS PHY Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ InvalidDwordCount; /*0x0C */
+ U32
+ RunningDisparityErrorCount; /*0x10 */
+ U32
+ LossDwordSynchCount; /*0x14 */
+ U32
+ PhyResetProblemCount; /*0x18 */
+} MPI2_CONFIG_PAGE_SAS_PHY_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1,
+ Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t;
+
+#define MPI2_SASPHY1_PAGEVERSION (0x01)
+
+
+/*SAS PHY Page 2 */
+
+typedef struct _MPI2_SASPHY2_PHY_EVENT {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 PhyEventInfo; /*0x04 */
+} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT,
+ Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
+#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY2_PHY_EVENT
+ PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_2,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
+ Mpi2SasPhyPage2_t,
+ *pMpi2SasPhyPage2_t;
+
+#define MPI2_SASPHY2_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 3 */
+
+typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 CounterType; /*0x04 */
+ U8 ThresholdWindow; /*0x05 */
+ U8 TimeUnits; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U32 EventThreshold; /*0x08 */
+ U16 ThresholdFlags; /*0x0C */
+ U16 Reserved4; /*0x0E */
+} MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ Mpi2SasPhy3PhyEventConfig_t,
+ *pMpi2SasPhy3PhyEventConfig_t;
+
+/*values for PhyEventCode field */
+#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0)
+#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+
+/*values for the CounterType field */
+#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/*values for the TimeUnits field */
+#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+
+/*values for the ThresholdFlags field */
+#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
+#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY3_PHY_EVENT_CONFIG
+ PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_3,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
+ Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t;
+
+#define MPI2_SASPHY3_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Reserved1; /*0x08 */
+ U8
+ Reserved2; /*0x0A */
+ U8
+ Flags; /*0x0B */
+ U8
+ InitialFrame[28]; /*0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+ Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION (0x00)
+
+/*values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
+
+
+
+
+/****************************************************************************
+* SAS Port Config Pages
+****************************************************************************/
+
+/*SAS Port Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PortNumber; /*0x08 */
+ U8
+ PhysicalPort; /*0x09 */
+ U8
+ PortWidth; /*0x0A */
+ U8
+ PhysicalPortWidth; /*0x0B */
+ U8
+ ZoneGroup; /*0x0C */
+ U8
+ Reserved1; /*0x0D */
+ U16
+ Reserved2; /*0x0E */
+ U64
+ SASAddress; /*0x10 */
+ U32
+ DeviceInfo; /*0x18 */
+ U32
+ Reserved3; /*0x1C */
+ U32
+ Reserved4; /*0x20 */
+} MPI2_CONFIG_PAGE_SAS_PORT_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0,
+ Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t;
+
+#define MPI2_SASPORT0_PAGEVERSION (0x00)
+
+/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */
+
+
+/****************************************************************************
+* SAS Enclosure Config Pages
+****************************************************************************/
+
+/*SAS Enclosure Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ EnclosureLogicalID; /*0x0C */
+ U16
+ Flags; /*0x14 */
+ U16
+ EnclosureHandle; /*0x16 */
+ U16
+ NumSlots; /*0x18 */
+ U16
+ StartSlot; /*0x1A */
+ U16
+ Reserved2; /*0x1C */
+ U16
+ SEPDevHandle; /*0x1E */
+ U32
+ Reserved3; /*0x20 */
+ U32
+ Reserved4; /*0x24 */
+} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
+
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03)
+
+/*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+
+
+/****************************************************************************
+* Log Config Page
+****************************************************************************/
+
+/*Log Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLogEntries at runtime.
+ */
+#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
+#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
+#endif
+
+#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_LOG_0_ENTRY {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8
+ LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */
+} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY,
+ Mpi2Log0Entry_t, *pMpi2Log0Entry_t;
+
+/*values for Log Page 0 LogEntry LogEntryQualifier field */
+#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
+#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
+#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002)
+#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000)
+#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF)
+
+typedef struct _MPI2_CONFIG_PAGE_LOG_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 NumLogEntries;/*0x10 */
+ U16 Reserved3; /*0x12 */
+ MPI2_LOG_0_ENTRY
+ LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */
+} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0,
+ Mpi2LogPage0_t, *pMpi2LogPage0_t;
+
+#define MPI2_LOG_0_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* RAID Config Page
+****************************************************************************/
+
+/*RAID Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumElements at runtime.
+ */
+#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
+#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
+#endif
+
+typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 HotSparePool; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ Mpi2RaidConfig0ConfigElement_t,
+ *pMpi2RaidConfig0ConfigElement_t;
+
+/*values for the ElementFlags field */
+#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
+#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
+#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
+
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumHotSpares; /*0x08 */
+ U8 NumPhysDisks; /*0x09 */
+ U8 NumVolumes; /*0x0A */
+ U8 ConfigNum; /*0x0B */
+ U32 Flags; /*0x0C */
+ U8 ConfigGUID[24]; /*0x10 */
+ U32 Reserved1; /*0x28 */
+ U8 NumElements; /*0x2C */
+ U8 Reserved2; /*0x2D */
+ U16 Reserved3; /*0x2E */
+ MPI2_RAIDCONFIG0_CONFIG_ELEMENT
+ ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */
+} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ Mpi2RaidConfigurationPage0_t,
+ *pMpi2RaidConfigurationPage0_t;
+
+#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00)
+
+/*values for RAID Configuration Page 0 Flags field */
+#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001)
+
+
+/****************************************************************************
+* Driver Persistent Mapping Config Pages
+****************************************************************************/
+
+/*Driver Persistent Mapping Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY {
+ U64 PhysicalIdentifier; /*0x00 */
+ U16 MappingInformation; /*0x08 */
+ U16 DeviceIndex; /*0x0A */
+ U32 PhysicalBitsMapping; /*0x0C */
+ U32 Reserved1; /*0x10 */
+} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */
+} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t;
+
+#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00)
+
+/*values for Driver Persistent Mapping Page 0 MappingInformation field */
+#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0)
+#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4)
+#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
+
+
+/****************************************************************************
+* Ethernet Config Pages
+****************************************************************************/
+
+/*Ethernet Page 0 */
+
+/*IP address (union of IPv4 and IPv6) */
+typedef union _MPI2_ETHERNET_IP_ADDR {
+ U32 IPv4Addr;
+ U32 IPv6Addr[4];
+} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR,
+ Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t;
+
+#define MPI2_ETHERNET_HOST_NAME_LENGTH (32)
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumInterfaces; /*0x08 */
+ U8 Reserved0; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Status; /*0x0C */
+ U8 MediaState; /*0x10 */
+ U8 Reserved2; /*0x11 */
+ U16 Reserved3; /*0x12 */
+ U8 MacAddress[6]; /*0x14 */
+ U8 Reserved4; /*0x1A */
+ U8 Reserved5; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */
+ MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */
+ MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_0,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
+ Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t;
+
+#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 0 Status field */
+#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000)
+#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000)
+#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000)
+#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080)
+#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040)
+#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020)
+#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010)
+#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008)
+#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004)
+#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001)
+
+/*values for Ethernet Page 0 MediaState field */
+#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07)
+#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00)
+#define MPI2_ETHPG0_MS_10MBIT (0x01)
+#define MPI2_ETHPG0_MS_100MBIT (0x02)
+#define MPI2_ETHPG0_MS_1GBIT (0x03)
+
+
+/*Ethernet Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved0; /*0x08 */
+ U32
+ Flags; /*0x0C */
+ U8
+ MediaState; /*0x10 */
+ U8
+ Reserved1; /*0x11 */
+ U16
+ Reserved2; /*0x12 */
+ U8
+ MacAddress[6]; /*0x14 */
+ U8
+ Reserved3; /*0x1A */
+ U8
+ Reserved4; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR
+ StaticIpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticSubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticGatewayIpAddress; /*0x3C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS2IpAddress; /*0x5C */
+ U32
+ Reserved5; /*0x6C */
+ U32
+ Reserved6; /*0x70 */
+ U32
+ Reserved7; /*0x74 */
+ U32
+ Reserved8; /*0x78 */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_1,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
+ Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t;
+
+#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 1 Flags field */
+#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080)
+#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040)
+#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020)
+#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004)
+#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001)
+
+/*values for Ethernet Page 1 MediaState field */
+#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07)
+#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00)
+#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01)
+#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02)
+#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+
+
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ *Generic structure to use for product-specific extended manufacturing pages
+ *(currently Extended Manufacturing Page 40 through Extended Manufacturing
+ *Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ ProductSpecificInfo; /*0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t,
+ *pMpi2ExtManufacturingPagePS_t;
+
+/*PageVersion should be provided by product-specific code */
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
new file mode 100644
index 00000000000..a079e524247
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_init.h
+ * Title: MPI SCSI initiator mode messages and structures
+ * Creation Date: June 23, 2006
+ *
+ * mpi2_init.h Version: 02.00.14
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
+ * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
+ * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
+ * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
+ * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
+ * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
+ * Control field Task Attribute flags.
+ * Moved LUN field defines to mpi2.h becasue they are
+ * common to many structures.
+ * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
+ * Query Asynchronous Event.
+ * Defined two new bits in the SlotStatus field of the SCSI
+ * Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 11-18-11 02.00.12 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
+ * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
+ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_INIT_H
+#define MPI2_INIT_H
+
+/*****************************************************************************
+*
+* SCSI Initiator Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SCSI IO messages and associated structures
+****************************************************************************/
+
+typedef struct _MPI2_SCSI_IO_CDB_EEDP32 {
+ U8 CDB[20]; /*0x00 */
+ U32 PrimaryReferenceTag; /*0x14 */
+ U16 PrimaryApplicationTag; /*0x18 */
+ U16 PrimaryApplicationTagMask; /*0x1A */
+ U32 TransferLength; /*0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t;
+
+/*MPI v2.0 CDB field */
+typedef union _MPI2_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t;
+
+/*MPI v2.0 SCSI IO Request Message */
+typedef struct _MPI2_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U16 SGLFlags; /*0x10 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U32 EEDPBlockSize; /*0x28 */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI2_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST,
+ Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t;
+
+/*SCSI IO MsgFlags bits */
+
+/*MsgFlags for SenseBufferAddressSpace */
+#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C)
+#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
+#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+
+/*SCSI IO SGLFlags bits */
+
+/*base values for Data Location Address Space */
+#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
+
+/*base values for Type */
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02)
+
+/*shift values for each sub-field */
+#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12)
+#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8)
+#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
+#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+
+/*number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*SCSI IO IoFlags bits */
+
+/*Large CDB Address Space */
+#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000)
+#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000)
+
+#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400)
+#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200)
+#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*SCSI IO EEDPFlags bits */
+
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */
+
+/*SCSI IO Control bits */
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24)
+#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+/*alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
+
+#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
+
+#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
+#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
+#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
+#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
+
+/*MPI v2.5 CDB field */
+typedef union _MPI25_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_IEEE_SGE_SIMPLE64 SGE;
+} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION,
+ Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t;
+
+/*MPI v2.5 SCSI IO Request Message */
+typedef struct _MPI25_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U8 DMAFlags; /*0x10 */
+ U8 Reserved5; /*0x11 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U16 EEDPBlockSize; /*0x28 */
+ U16 Reserved6; /*0x2A */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI25_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST,
+ Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t;
+
+/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */
+
+/*Defines for the DMAFlags field
+ * Each setting affects 4 SGLS, from SGL0 to SGL3.
+ * D = Data
+ * C = Cache DIF
+ * I = Interleaved
+ * H = Host DIF
+ */
+#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F)
+
+/*number of SGLOffset fields */
+#define MPI25_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*defines for the IoFlags field */
+#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
+#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
+#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+
+#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*MPI v2.5 defines for the EEDPFlags bits */
+/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */
+#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0)
+#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0)
+
+#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030)
+#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010)
+
+/*use MPI2_LUN_ defines from mpi2.h for the LUN field */
+
+/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */
+
+/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so
+ * MPI2_SCSI_IO_REPLY is used for both.
+ */
+
+/*SCSI IO Error Reply Message */
+typedef struct _MPI2_SCSI_IO_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 SCSIStatus; /*0x0C */
+ U8 SCSIState; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferCount; /*0x14 */
+ U32 SenseCount; /*0x18 */
+ U32 ResponseInfo; /*0x1C */
+ U16 TaskTag; /*0x20 */
+ U16 Reserved4; /*0x22 */
+ U32 BidirectionalTransferCount; /*0x24 */
+ U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
+ U32 Reserved6; /*0x2C */
+} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
+ Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
+
+/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
+
+#define MPI2_SCSI_STATUS_GOOD (0x00)
+#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI2_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI2_SCSI_STATUS_BUSY (0x08)
+#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */
+#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40)
+
+/*SCSI IO Reply SCSIState flags */
+
+#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI2_SCSI_STATE_TERMINATED (0x08)
+#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
+
+#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved1; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 LUN[8]; /*0x0C */
+ U32 Reserved4[7]; /*0x14 */
+ U16 TaskMID; /*0x30 */
+ U16 Reserved5; /*0x32 */
+} MPI2_SCSI_TASK_MANAGE_REQUEST,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST,
+ Mpi2SCSITaskManagementRequest_t,
+ *pMpi2SCSITaskManagementRequest_t;
+
+/*TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/*obsolete TaskType name */
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \
+ (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT)
+
+/*MsgFlags bits */
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
+/*SCSI Task Management Reply Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 ResponseCode; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TerminationCount; /*0x14 */
+ U32 ResponseInfo; /*0x18 */
+} MPI2_SCSI_TASK_MANAGE_REPLY,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
+ Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t;
+
+/*ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
+
+/****************************************************************************
+* SCSI Enclosure Processor messages
+****************************************************************************/
+
+/*SCSI Enclosure Processor Request Message */
+typedef struct _MPI2_SEP_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 SlotStatus; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST,
+ Mpi2SepRequest_t, *pMpi2SepRequest_t;
+
+/*Action defines */
+#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00)
+#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01)
+
+/*Flags defines */
+#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
+#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
+
+/*SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
+#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
+
+/*SCSI Enclosure Processor Reply Message */
+typedef struct _MPI2_SEP_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 SlotStatus; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY,
+ Mpi2SepReply_t, *pMpi2SepReply_t;
+
+/*SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
new file mode 100644
index 00000000000..0de425d8fd7
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -0,0 +1,1665 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_ioc.h
+ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
+ * Creation Date: October 11, 2006
+ *
+ * mpi2_ioc.h Version: 02.00.21
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
+ * MaxTargets.
+ * Added TotalImageSize field to FWDownload Request.
+ * Added reserved words to FWUpload Request.
+ * 06-26-07 02.00.02 Added IR Configuration Change List Event.
+ * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
+ * request and replaced it with
+ * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
+ * Replaced the MinReplyQueueDepth field of the IOCFacts
+ * reply with MaxReplyDescriptorPostQueueDepth.
+ * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
+ * depth for the Reply Descriptor Post Queue.
+ * Added SASAddress field to Initiator Device Table
+ * Overflow Event data.
+ * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
+ * for SAS Initiator Device Status Change Event data.
+ * Modified Reason Code defines for SAS Topology Change
+ * List Event data, including adding a bit for PHY Vacant
+ * status, and adding a mask for the Reason Code.
+ * Added define for
+ * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
+ * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
+ * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
+ * the IOCFacts Reply.
+ * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Moved MPI2_VERSION_UNION to mpi2.h.
+ * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
+ * instead of enables, and added SASBroadcastPrimitiveMasks
+ * field.
+ * Added Log Entry Added Event and related structure.
+ * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
+ * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
+ * Added MaxVolumes and MaxPersistentEntries fields to
+ * IOCFacts reply.
+ * Added ProtocalFlags and IOCCapabilities fields to
+ * MPI2_FW_IMAGE_HEADER.
+ * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
+ * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
+ * a U16 (from a U32).
+ * Removed extra 's' from EventMasks name.
+ * 06-27-08 02.00.08 Fixed an offset in a comment.
+ * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
+ * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
+ * renamed MinReplyFrameSize to ReplyFrameSize.
+ * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
+ * Added two new RAIDOperation values for Integrated RAID
+ * Operations Status Event data.
+ * Added four new IR Configuration Change List Event data
+ * ReasonCode values.
+ * Added two new ReasonCode defines for SAS Device Status
+ * Change Event data.
+ * Added three new DiscoveryStatus bits for the SAS
+ * Discovery event data.
+ * Added Multiplexing Status Change bit to the PhyStatus
+ * field of the SAS Topology Change List event data.
+ * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
+ * BootFlags are now product-specific.
+ * Added defines for the indivdual signature bytes
+ * for MPI2_INIT_IMAGE_FOOTER.
+ * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
+ * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
+ * define.
+ * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
+ * define.
+ * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
+ * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
+ * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
+ * Added two new reason codes for SAS Device Status Change
+ * Event.
+ * Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 11-18-11 02.00.20 Incorporating additions for MPI v2.5.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_IOC_H
+#define MPI2_IOC_H
+
+/*****************************************************************************
+*
+* IOC Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IOCInit message
+****************************************************************************/
+
+/*IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 MsgVersion; /*0x0C */
+ U16 HeaderVersion; /*0x0E */
+ U32 Reserved5; /*0x10 */
+ U16 Reserved6; /*0x14 */
+ U8 Reserved7; /*0x16 */
+ U8 HostMSIxVectors; /*0x17 */
+ U16 Reserved8; /*0x18 */
+ U16 SystemRequestFrameSize; /*0x1A */
+ U16 ReplyDescriptorPostQueueDepth; /*0x1C */
+ U16 ReplyFreeQueueDepth; /*0x1E */
+ U32 SenseBufferAddressHigh; /*0x20 */
+ U32 SystemReplyAddressHigh; /*0x24 */
+ U64 SystemRequestFrameBaseAddress; /*0x28 */
+ U64 ReplyDescriptorPostQueueAddress; /*0x30 */
+ U64 ReplyFreeQueueAddress; /*0x38 */
+ U64 TimeStamp; /*0x40 */
+} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t;
+
+/*WhoInit values */
+#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI2_WHOINIT_ROM_BIOS (0x02)
+#define MPI2_WHOINIT_PCI_PEER (0x03)
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_WHOINIT_MANUFACTURER (0x05)
+
+/*MsgVersion */
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+
+/*minimum depth for the Reply Descriptor Post Queue */
+#define MPI2_RDPQ_DEPTH_MIN (16)
+
+/*IOCInit Reply message */
+typedef struct _MPI2_IOC_INIT_REPLY {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY,
+ Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t;
+
+/****************************************************************************
+* IOCFacts message
+****************************************************************************/
+
+/*IOCFacts Request message */
+typedef struct _MPI2_IOC_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST,
+ Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t;
+
+/*IOCFacts Reply message */
+typedef struct _MPI2_IOC_FACTS_REPLY {
+ U16 MsgVersion; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 HeaderVersion; /*0x04 */
+ U8 IOCNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 IOCExceptions; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 MaxChainDepth; /*0x14 */
+ U8 WhoInit; /*0x15 */
+ U8 NumberOfPorts; /*0x16 */
+ U8 MaxMSIxVectors; /*0x17 */
+ U16 RequestCredit; /*0x18 */
+ U16 ProductID; /*0x1A */
+ U32 IOCCapabilities; /*0x1C */
+ MPI2_VERSION_UNION FWVersion; /*0x20 */
+ U16 IOCRequestFrameSize; /*0x24 */
+ U16 IOCMaxChainSegmentSize; /*0x26 */
+ U16 MaxInitiators; /*0x28 */
+ U16 MaxTargets; /*0x2A */
+ U16 MaxSasExpanders; /*0x2C */
+ U16 MaxEnclosures; /*0x2E */
+ U16 ProtocolFlags; /*0x30 */
+ U16 HighPriorityCredit; /*0x32 */
+ U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */
+ U8 ReplyFrameSize; /*0x36 */
+ U8 MaxVolumes; /*0x37 */
+ U16 MaxDevHandle; /*0x38 */
+ U16 MaxPersistentEntries; /*0x3A */
+ U16 MinDevHandle; /*0x3C */
+ U16 Reserved4; /*0x3E */
+} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
+ Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
+
+/*MsgVersion */
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
+
+/*IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
+
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060)
+
+#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
+#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008)
+#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
+#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
+#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
+
+/*defines for WhoInit field are after the IOCInit Request */
+
+/*ProductID field uses MPI2_FW_HEADER_PID_ */
+
+/*IOCCapabilities */
+#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
+#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
+#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
+#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
+#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
+#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
+#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
+#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
+#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
+#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
+#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
+#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
+#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
+
+/*ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+
+/****************************************************************************
+* PortFacts message
+****************************************************************************/
+
+/*PortFacts Request message */
+typedef struct _MPI2_PORT_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST,
+ Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t;
+
+/*PortFacts Reply message */
+typedef struct _MPI2_PORT_FACTS_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 Reserved5; /*0x14 */
+ U8 PortType; /*0x15 */
+ U16 Reserved6; /*0x16 */
+ U16 MaxPostedCmdBuffers; /*0x18 */
+ U16 Reserved7; /*0x1A */
+} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY,
+ Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t;
+
+/*PortType values */
+#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00)
+#define MPI2_PORTFACTS_PORTTYPE_FC (0x10)
+#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+
+/****************************************************************************
+* PortEnable message
+****************************************************************************/
+
+/*PortEnable Request message */
+typedef struct _MPI2_PORT_ENABLE_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST,
+ Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t;
+
+/*PortEnable Reply message */
+typedef struct _MPI2_PORT_ENABLE_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY,
+ Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t;
+
+/****************************************************************************
+* EventNotification message
+****************************************************************************/
+
+/*EventNotification Request message */
+#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */
+ U16 SASBroadcastPrimitiveMasks; /*0x24 */
+ U16 SASNotifyPrimitiveMasks; /*0x26 */
+ U32 Reserved8; /*0x28 */
+} MPI2_EVENT_NOTIFICATION_REQUEST,
+ *PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
+ Mpi2EventNotificationRequest_t,
+ *pMpi2EventNotificationRequest_t;
+
+/*EventNotification Reply message */
+typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
+ U16 EventDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 AckRequired; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U16 Event; /*0x14 */
+ U16 Reserved4; /*0x16 */
+ U32 EventContext; /*0x18 */
+ U32 EventData[1]; /*0x1C */
+} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY,
+ Mpi2EventNotificationReply_t,
+ *pMpi2EventNotificationReply_t;
+
+/*AckRequired */
+#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
+
+/*Event */
+#define MPI2_EVENT_LOG_DATA (0x0001)
+#define MPI2_EVENT_STATE_CHANGE (0x0002)
+#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
+#define MPI2_EVENT_EVENT_CHANGE (0x000A)
+#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */
+#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
+#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
+#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
+#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
+#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
+#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
+#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
+#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_IR_VOLUME (0x001E)
+#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
+#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
+#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
+#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
+#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
+#define MPI2_EVENT_SAS_QUIESCE (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE (0x0028)
+#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
+
+/*Log Entry Added Event data */
+
+/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */
+#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */
+} MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ Mpi2EventDataLogEntryAdded_t,
+ *pMpi2EventDataLogEntryAdded_t;
+
+/*GPIO Interrupt Event data */
+
+typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
+ U8 GPIONum; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ Mpi2EventDataGpioInterrupt_t,
+ *pMpi2EventDataGpioInterrupt_t;
+
+/*Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+ U16 Status; /*0x00 */
+ U8 SensorNum; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U16 CurrentTemperature; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+ *PTR_MPI2_EVENT_DATA_TEMPERATURE,
+ Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t;
+
+/*Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001)
+
+/*Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+ U8 SourceVF_ID; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ U32 HostData[1]; /*0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+ Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
+
+/*Power Performance Change Event */
+
+typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE {
+ U8 CurrentPowerMode; /*0x00 */
+ U8 PreviousPowerMode; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ Mpi2EventDataPowerPerfChange_t,
+ *pMpi2EventDataPowerPerfChange_t;
+
+/*defines for CurrentPowerMode and PreviousPowerMode fields */
+#define MPI2_EVENT_PM_INIT_MASK (0xC0)
+#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_INIT_HOST (0x40)
+#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80)
+#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI2_EVENT_PM_MODE_MASK (0x07)
+#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01)
+#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04)
+#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05)
+#define MPI2_EVENT_PM_MODE_STANDBY (0x06)
+
+/*Hard Reset Received Event data */
+
+typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED {
+ U8 Reserved1; /*0x00 */
+ U8 Port; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ Mpi2EventDataHardResetReceived_t,
+ *pMpi2EventDataHardResetReceived_t;
+
+/*Task Set Full Event data */
+/* this event is obsolete */
+
+typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL {
+ U16 DevHandle; /*0x00 */
+ U16 CurrentDepth; /*0x02 */
+} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL,
+ Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t;
+
+/*SAS Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE {
+ U16 TaskTag; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U8 ASC; /*0x04 */
+ U8 ASCQ; /*0x05 */
+ U16 DevHandle; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ U64 SASAddress; /*0x0C */
+ U8 LUN[8]; /*0x14 */
+} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ Mpi2EventDataSasDeviceStatusChange_t,
+ *pMpi2EventDataSasDeviceStatusChange_t;
+
+/*SAS Device Status Change Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
+
+/*Integrated RAID Operation Status Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS {
+ U16 VolDevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 RAIDOperation; /*0x04 */
+ U8 PercentComplete; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Resereved3; /*0x08 */
+} MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ Mpi2EventDataIrOperationStatus_t,
+ *pMpi2EventDataIrOperationStatus_t;
+
+/*Integrated RAID Operation Status Event data RAIDOperation values */
+#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00)
+#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
+#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
+#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
+
+/*Integrated RAID Volume Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_VOLUME {
+ U16 VolDevHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 NewValue; /*0x04 */
+ U32 PreviousValue; /*0x08 */
+} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME,
+ Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t;
+
+/*Integrated RAID Volume Event data ReasonCode values */
+#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Physical Disk Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK {
+ U16 Reserved1; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysDiskNum; /*0x03 */
+ U16 PhysDiskDevHandle; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U16 Slot; /*0x08 */
+ U16 EnclosureHandle; /*0x0A */
+ U32 NewValue; /*0x0C */
+ U32 PreviousValue; /*0x10 */
+} MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ Mpi2EventDataIrPhysicalDisk_t,
+ *pMpi2EventDataIrPhysicalDisk_t;
+
+/*Integrated RAID Physical Disk Event data ReasonCode values */
+#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Configuration Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumElements at runtime.
+ */
+#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
+#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 ReasonCode; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT,
+ Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t;
+
+/*IR Configuration Change List Event data ElementFlags values */
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
+
+/*IR Configuration Change List Event data ReasonCode values */
+#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01)
+#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02)
+#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
+#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04)
+#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
+
+typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST {
+ U8 NumElements; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 Reserved2; /*0x02 */
+ U8 ConfigNum; /*0x03 */
+ U32 Flags; /*0x04 */
+ MPI2_EVENT_IR_CONFIG_ELEMENT
+ ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */
+} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ Mpi2EventDataIrConfigChangeList_t,
+ *pMpi2EventDataIrConfigChangeList_t;
+
+/*IR Configuration Change List Event data Flags values */
+#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
+
+/*SAS Discovery Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY {
+ U8 Flags; /*0x00 */
+ U8 ReasonCode; /*0x01 */
+ U8 PhysicalPort; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 DiscoveryStatus; /*0x04 */
+} MPI2_EVENT_DATA_SAS_DISCOVERY,
+ *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY,
+ Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t;
+
+/*SAS Discovery Event data Flags values */
+#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02)
+#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01)
+
+/*SAS Discovery Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+
+/*SAS Discovery Event data DiscoveryStatus values */
+#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400)
+#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001)
+
+/*SAS Broadcast Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 PortWidth; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ Mpi2EventDataSasBroadcastPrimitive_t,
+ *pMpi2EventDataSasBroadcastPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01)
+#define MPI2_EVENT_PRIMITIVE_SES (0x02)
+#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03)
+#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05)
+#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06)
+#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+
+/*SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 Reserved1; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ Mpi2EventDataSasNotifyPrimitive_t,
+ *pMpi2EventDataSasNotifyPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04)
+
+/*SAS Initiator Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE {
+ U8 ReasonCode; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U16 DevHandle; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasInitDevStatusChange_t,
+ *pMpi2EventDataSasInitDevStatusChange_t;
+
+/*SAS Initiator Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+
+/*SAS Initiator Device Table Overflow Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW {
+ U16 MaxInit; /*0x00 */
+ U16 CurrentInit; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ Mpi2EventDataSasInitTableOverflow_t,
+ *pMpi2EventDataSasInitTableOverflow_t;
+
+/*SAS Topology Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumEntries at runtime.
+ */
+#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY {
+ U16 AttachedDevHandle; /*0x00 */
+ U8 LinkRate; /*0x02 */
+ U8 PhyStatus; /*0x03 */
+} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY,
+ Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t;
+
+typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
+ U16 EnclosureHandle; /*0x00 */
+ U16 ExpanderDevHandle; /*0x02 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U8 NumEntries; /*0x08 */
+ U8 StartPhyNum; /*0x09 */
+ U8 ExpStatus; /*0x0A */
+ U8 PhysicalPort; /*0x0B */
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY
+ PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */
+} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ Mpi2EventDataSasTopologyChangeList_t,
+ *pMpi2EventDataSasTopologyChangeList_t;
+
+/*values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+
+/*defines for the LinkRate field */
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0)
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+
+#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
+#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+
+/*values for the PhyStatus field */
+#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
+#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10)
+/*values for the PhyStatus ReasonCode sub-field */
+#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
+#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
+#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
+
+/*SAS Enclosure Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE {
+ U16 EnclosureHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U64 EnclosureLogicalID; /*0x04 */
+ U16 NumSlots; /*0x0C */
+ U16 StartSlot; /*0x0E */
+ U32 PhyBits; /*0x10 */
+} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasEnclDevStatusChange_t,
+ *pMpi2EventDataSasEnclDevStatusChange_t;
+
+/*SAS Enclosure Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+
+/*SAS PHY Counter Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 PhyEventCode; /*0x0C */
+ U8 PhyNum; /*0x0D */
+ U16 Reserved2; /*0x0E */
+ U32 PhyEventInfo; /*0x10 */
+ U8 CounterType; /*0x14 */
+ U8 ThresholdWindow; /*0x15 */
+ U8 TimeUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 EventThreshold; /*0x18 */
+ U16 ThresholdFlags; /*0x1C */
+ U16 Reserved4; /*0x1E */
+} MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ Mpi2EventDataSasPhyCounter_t,
+ *pMpi2EventDataSasPhyCounter_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h
+ *for the PhyEventCode field */
+
+/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h
+ *for the CounterType field */
+
+/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h
+ *for the TimeUnits field */
+
+/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h
+ *for the ThresholdFlags field */
+
+/*SAS Quiesce Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE {
+ U8 ReasonCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+} MPI2_EVENT_DATA_SAS_QUIESCE,
+ *PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
+ Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t;
+
+/*SAS Quiesce Event data ReasonCode values */
+#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
+
+/*Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS {
+ U8 Flags; /*0x00 */
+ U8 NegotiatedLinkRate; /*0x01 */
+ U8 PhyNum; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U32 Reserved1; /*0x04 */
+ U8 InitialFrame[28]; /*0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS,
+ Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t;
+
+/*values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h
+ *for the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
+ MPI2_EVENT_HBD_PHY_SAS Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+ Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY {
+ U8 DescriptorType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY,
+ Mpi2EventDataHbdPhy_t,
+ *pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/*values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+/****************************************************************************
+* EventAck message
+****************************************************************************/
+
+/*EventAck Request message */
+typedef struct _MPI2_EVENT_ACK_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Event; /*0x0C */
+ U16 Reserved5; /*0x0E */
+ U32 EventContext; /*0x10 */
+} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST,
+ Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t;
+
+/*EventAck Reply message */
+typedef struct _MPI2_EVENT_ACK_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY,
+ Mpi2EventAckReply_t, *pMpi2EventAckReply_t;
+
+/****************************************************************************
+* SendHostMessage message
+****************************************************************************/
+
+/*SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+ U16 HostDataLength; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 Reserved4; /*0x0C */
+ U8 DestVF_ID; /*0x0D */
+ U16 Reserved5; /*0x0E */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 Reserved8; /*0x18 */
+ U32 Reserved9; /*0x1C */
+ U32 Reserved10; /*0x20 */
+ U32 HostData[1]; /*0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+ *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+ Mpi2SendHostMessageRequest_t,
+ *pMpi2SendHostMessageRequest_t;
+
+/*SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+ U16 HostDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+ Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t;
+
+/****************************************************************************
+* FWDownload message
+****************************************************************************/
+
+/*MPI v2.0 FWDownload Request message */
+typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST,
+ Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest;
+
+#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
+
+#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01)
+#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02)
+#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+
+/*MPI v2.0 FWDownload TransactionContext Element */
+typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE,
+ Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t;
+
+/*MPI v2.5 FWDownload Request message */
+typedef struct _MPI25_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST,
+ Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest;
+
+/*FWDownload Reply message */
+typedef struct _MPI2_FW_DOWNLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY,
+ Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t;
+
+/****************************************************************************
+* FWUpload message
+****************************************************************************/
+
+/*MPI v2.0 FWUpload Request message */
+typedef struct _MPI2_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST,
+ Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t;
+
+#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00)
+#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+/*MPI v2.0 FWUpload TransactionContext Element */
+typedef struct _MPI2_FW_UPLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE,
+ Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t;
+
+/*MPI v2.5 FWUpload Request message */
+typedef struct _MPI25_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST,
+ Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t;
+
+/*FWUpload Reply message */
+typedef struct _MPI2_FW_UPLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ActualImageSize; /*0x14 */
+} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY,
+ Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t;
+
+/*FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER {
+ U32 Signature; /*0x00 */
+ U32 Signature0; /*0x04 */
+ U32 Signature1; /*0x08 */
+ U32 Signature2; /*0x0C */
+ MPI2_VERSION_UNION MPIVersion; /*0x10 */
+ MPI2_VERSION_UNION FWVersion; /*0x14 */
+ MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
+ MPI2_VERSION_UNION PackageVersion; /*0x1C */
+ U16 VendorID; /*0x20 */
+ U16 ProductID; /*0x22 */
+ U16 ProtocolFlags; /*0x24 */
+ U16 Reserved26; /*0x26 */
+ U32 IOCCapabilities; /*0x28 */
+ U32 ImageSize; /*0x2C */
+ U32 NextImageHeaderOffset; /*0x30 */
+ U32 Checksum; /*0x34 */
+ U32 Reserved38; /*0x38 */
+ U32 Reserved3C; /*0x3C */
+ U32 Reserved40; /*0x40 */
+ U32 Reserved44; /*0x44 */
+ U32 Reserved48; /*0x48 */
+ U32 Reserved4C; /*0x4C */
+ U32 Reserved50; /*0x50 */
+ U32 Reserved54; /*0x54 */
+ U32 Reserved58; /*0x58 */
+ U32 Reserved5C; /*0x5C */
+ U32 Reserved60; /*0x60 */
+ U32 FirmwareVersionNameWhat; /*0x64 */
+ U8 FirmwareVersionName[32]; /*0x68 */
+ U32 VendorNameWhat; /*0x88 */
+ U8 VendorName[32]; /*0x8C */
+ U32 PackageNameWhat; /*0x88 */
+ U8 PackageName[32]; /*0x8C */
+ U32 ReservedD0; /*0xD0 */
+ U32 ReservedD4; /*0xD4 */
+ U32 ReservedD8; /*0xD8 */
+ U32 ReservedDC; /*0xDC */
+ U32 ReservedE0; /*0xE0 */
+ U32 ReservedE4; /*0xE4 */
+ U32 ReservedE8; /*0xE8 */
+ U32 ReservedEC; /*0xEC */
+ U32 ReservedF0; /*0xF0 */
+ U32 ReservedF4; /*0xF4 */
+ U32 ReservedF8; /*0xF8 */
+ U32 ReservedFC; /*0xFC */
+} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
+ Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
+
+/*Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+
+/*Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+
+/*Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+
+/*Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+
+/*defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/*SAS ProductID Family bits */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
+#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+
+/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE (0x100)
+
+/*Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Checksum; /*0x04 */
+ U32 ImageSize; /*0x08 */
+ U32 NextImageHeaderOffset; /*0x0C */
+ U32 PackageVersion; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U32 Reserved5; /*0x1C */
+ U8 IdentifyString[32]; /*0x20 */
+} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
+ Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
+
+/*useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
+
+/*defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
+
+/*FLASH Layout Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
+#endif
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION {
+ U8 RegionType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 RegionOffset; /*0x04 */
+ U32 RegionSize; /*0x08 */
+ U32 Reserved3; /*0x0C */
+} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
+ Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT {
+ U32 FlashSize; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
+} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
+ Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 SizeOfRegion; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U16 NumberOfLayouts; /*0x04 */
+ U16 RegionsPerLayout; /*0x06 */
+ U16 MinimumSectorAlignment; /*0x08 */
+ U16 Reserved3; /*0x0A */
+ U32 Reserved4; /*0x0C */
+ MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
+} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
+ Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
+
+/*defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE (0x01)
+#define MPI2_FLASH_REGION_BIOS (0x02)
+#define MPI2_FLASH_REGION_NVDATA (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
+#define MPI2_FLASH_REGION_MEGARAID (0x09)
+#define MPI2_FLASH_REGION_INIT (0x0A)
+
+/*ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
+
+/*Supported Devices Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE {
+ U16 DeviceID; /*0x00 */
+ U16 VendorID; /*0x02 */
+ U16 DeviceIDMask; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U8 LowPCIRev; /*0x08 */
+ U8 HighPCIRev; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
+ Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 NumberOfDevices; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U32 Reserved3; /*0x04 */
+ MPI2_SUPPORTED_DEVICE
+ SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
+ Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
+
+/*ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
+
+/*Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER {
+ U32 BootFlags; /*0x00 */
+ U32 ImageSize; /*0x04 */
+ U32 Signature0; /*0x08 */
+ U32 Signature1; /*0x0C */
+ U32 Signature2; /*0x10 */
+ U32 ResetVector; /*0x14 */
+} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
+ Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
+
+/*defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
+
+/*defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
+
+/*defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
+
+/*defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
+
+/*defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
+
+/*Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
+
+/*defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+/****************************************************************************
+* PowerManagementControl message
+****************************************************************************/
+
+/*PowerManagementControl Request message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Parameter1; /*0x0C */
+ U8 Parameter2; /*0x0D */
+ U8 Parameter3; /*0x0E */
+ U8 Parameter4; /*0x0F */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
+ Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t;
+
+/*defines for the Feature field */
+#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
+#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */
+#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05)
+#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
+/*Parameter1 contains a PHY number */
+/*Parameter2 indicates power condition action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
+#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
+#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION
+ * Feature */
+/*Parameter1 contains SAS port width modulation group number */
+/*Parameter2 indicates IOC action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
+#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
+#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
+/*Parameter3 indicates desired modulation level using these defines */
+#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
+#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
+#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
+#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
+/*Parameter4 is reserved */
+
+/*this next set (_PCIE_LINK) is obsolete */
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
+/*Parameter1 indicates desired PCIe link speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */
+/*Parameter2 indicates desired PCIe link width using these defines */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
+/*Parameter1 indicates desired IOC hardware clock speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
+#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
+#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
+/*Parameter2, Parameter3, and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/
+/*Parameter1 indicates host action regarding global power management mode */
+#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01)
+#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02)
+#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03)
+/*Parameter2 indicates the requested global power management mode */
+#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01)
+#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08)
+#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40)
+/*Parameter3 and Parameter4 are reserved */
+
+/*PowerManagementControl Reply message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
+ Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
new file mode 100644
index 00000000000..d1d9866cf30
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_raid.h
+ * Title: MPI Integrated RAID messages and structures
+ * Creation Date: April 26, 2007
+ *
+ * mpi2_raid.h Version: 02.00.08
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
+ * including the Actions and ActionData.
+ * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
+ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
+ * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
+ * can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+ * 11-18-11 02.00.07 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_RAID_H
+#define MPI2_RAID_H
+
+/*****************************************************************************
+*
+* Integrated RAID Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* RAID Action messages
+****************************************************************************/
+
+/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
+#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
+#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */
+#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001)
+
+/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */
+typedef struct _MPI2_RAID_ACTION_RATE_DATA {
+ U8 RateToChange; /*0x00 */
+ U8 RateOrMode; /*0x01 */
+ U16 DataScrubDuration; /*0x02 */
+} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA,
+ Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t;
+
+#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00)
+#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01)
+#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02)
+
+/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ Mpi2RaidActionStartRaidFunction_t,
+ *pMpi2RaidActionStartRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_START_NEW (0x00)
+#define MPI2_RAID_ACTION_START_RESUME (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ Mpi2RaidActionStopRaidFunction_t,
+ *pMpi2RaidActionStopRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_STOP_ABORT (0x00)
+#define MPI2_RAID_ACTION_STOP_PAUSE (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */
+typedef struct _MPI2_RAID_ACTION_HOT_SPARE {
+ U8 HotSparePool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 DevHandle; /*0x02 */
+} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE,
+ Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t;
+
+/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */
+typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE {
+ U8 Flags; /*0x00 */
+ U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ Mpi2RaidActionFwUpdateMode_t,
+ *pMpi2RaidActionFwUpdateMode_t;
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00)
+#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01)
+
+typedef union _MPI2_RAID_ACTION_DATA {
+ U32 Word;
+ MPI2_RAID_ACTION_RATE_DATA Rates;
+ MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
+ MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
+ MPI2_RAID_ACTION_HOT_SPARE HotSpare;
+ MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
+} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA,
+ Mpi2RaidActionData_t, *pMpi2RaidActionData_t;
+
+/*RAID Action Request Message */
+typedef struct _MPI2_RAID_ACTION_REQUEST {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+ MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */
+ MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */
+} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST,
+ Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t;
+
+/*RAID Action request Action values */
+
+#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01)
+#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02)
+#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03)
+#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04)
+#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05)
+#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
+#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
+#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F)
+#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11)
+#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
+#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17)
+#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18)
+#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19)
+#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C)
+#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D)
+#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E)
+#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
+#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
+#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*RAID Volume Creation Structure */
+
+/*
+ *The following define can be customized for the targeted product.
+ */
+#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS
+#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1)
+#endif
+
+typedef struct _MPI2_RAID_VOLUME_PHYSDISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U16 PhysDiskDevHandle; /*0x02 */
+} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK,
+ Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT {
+ U8 NumPhysDisks; /*0x00 */
+ U8 VolumeType; /*0x01 */
+ U16 Reserved1; /*0x02 */
+ U32 VolumeCreationFlags; /*0x04 */
+ U32 VolumeSettings; /*0x08 */
+ U8 Reserved2; /*0x0C */
+ U8 ResyncRate; /*0x0D */
+ U16 DataScrubDuration; /*0x0E */
+ U64 VolumeMaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U8 Name[16]; /*0x1C */
+ MPI2_RAID_VOLUME_PHYSDISK
+ PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */
+} MPI2_RAID_VOLUME_CREATION_STRUCT,
+ *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT,
+ Mpi2RaidVolumeCreationStruct_t,
+ *pMpi2RaidVolumeCreationStruct_t;
+
+/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
+
+/*defines for the VolumeCreationFlags field */
+#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000)
+#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004)
+#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002)
+#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001)
+/*The following is an obsolete define.
+ *It must be shifted left 24 bits in order to set the proper bit.
+ */
+#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
+
+/*RAID Online Capacity Expansion Structure */
+
+typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION {
+ U32 Flags; /*0x00 */
+ U16 DevHandle0; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U16 DevHandle1; /*0x08 */
+ U16 Reserved2; /*0x0A */
+} MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ Mpi2RaidOnlineCapacityExpansion_t,
+ *pMpi2RaidOnlineCapacityExpansion_t;
+
+/*RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /*0x00 */
+ U16 CandidateDevHandle; /*0x02 */
+ U32 Flags; /*0x04 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ Mpi2RaidCompatibilityInputStruct_t,
+ *pMpi2RaidCompatibilityInputStruct_t;
+
+/*defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
+/*RAID Volume Indicator Structure */
+
+typedef struct _MPI2_RAID_VOL_INDICATOR {
+ U64 TotalBlocks; /*0x00 */
+ U64 BlocksRemaining; /*0x08 */
+ U32 Flags; /*0x10 */
+} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR,
+ Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t;
+
+/*defines for RAID Volume Indicator Flags field */
+#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F)
+#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000)
+#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
+#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
+#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+
+/*RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 GenericAttributes; /*0x04 */
+ U32 OEMSpecificAttributes; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ Mpi2RaidCompatibilityResultStruct_t,
+ *pMpi2RaidCompatibilityResultStruct_t;
+
+/*defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/*defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
+
+/*RAID Action Reply ActionData union */
+typedef union _MPI2_RAID_ACTION_REPLY_DATA {
+ U32 Word[5];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
+} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA,
+ Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t;
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*RAID Action Reply Message */
+typedef struct _MPI2_RAID_ACTION_REPLY {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */
+} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY,
+ Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
new file mode 100644
index 00000000000..b4e7084aba3
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_sas.h
+ * Title: MPI Serial Attached SCSI structures and definitions
+ * Creation Date: February 9, 2007
+ *
+ * mpi2_sas.h Version: 02.00.07
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
+ * Control Request.
+ * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
+ * Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
+ * 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
+ * Passthrough Request message.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_SAS_H
+#define MPI2_SAS_H
+
+/*
+ *Values for SASStatus.
+ */
+#define MPI2_SASSTATUS_SUCCESS (0x00)
+#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01)
+#define MPI2_SASSTATUS_INVALID_FRAME (0x02)
+#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03)
+#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
+#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
+#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
+#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
+#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
+#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
+#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
+#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
+#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
+#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
+#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
+#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
+#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
+#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11)
+#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
+#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
+#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
+
+/*
+ *Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ *data and SAS Configuration pages.
+ */
+#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000)
+#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
+#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+/*****************************************************************************
+*
+* SAS Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SMP Passthrough messages
+****************************************************************************/
+
+/*SMP Passthrough Request Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 RequestDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U64 SASAddress; /*0x10 */
+ U32 Reserved3; /*0x18 */
+ U32 Reserved4; /*0x1C */
+ MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */
+} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST,
+ Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SMP Passthrough Reply Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REPLY {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ResponseDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U8 ResponseData[4]; /*0x18 */
+} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY,
+ Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SATA Passthrough messages
+****************************************************************************/
+
+typedef union _MPI2_SATA_PT_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */
+ MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */
+} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION,
+ Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t;
+
+/*SATA Passthrough Request Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 DataLength; /*0x18 */
+ U8 CommandFIS[20]; /*0x1C */
+ MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/
+} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
+ Mpi2SataPassthroughRequest_t,
+ *pMpi2SataPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SATA Passthrough Reply Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 StatusFIS[20]; /*0x14 */
+ U32 StatusControlRegisters; /*0x28 */
+ U32 TransferCount; /*0x2C */
+} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY,
+ Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t;
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SAS IO Unit Control messages
+****************************************************************************/
+
+/*SAS IO Unit Control Request Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U8 PhyNum; /*0x0E */
+ U8 PrimFlags; /*0x0F */
+ U32 Primitive; /*0x10 */
+ U8 LookupMethod; /*0x14 */
+ U8 Reserved5; /*0x15 */
+ U16 SlotNumber; /*0x16 */
+ U64 LookupAddress; /*0x18 */
+ U32 IOCParameterValue; /*0x20 */
+ U32 Reserved7; /*0x24 */
+ U32 Reserved8; /*0x28 */
+} MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ Mpi2SasIoUnitControlRequest_t,
+ *pMpi2SasIoUnitControlRequest_t;
+
+/*values for the Operation field */
+#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI2_SAS_OP_PHY_LINK_RESET (0x06)
+#define MPI2_SAS_OP_PHY_HARD_RESET (0x07)
+#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
+#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
+#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
+#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
+#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10)
+#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11)
+#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12)
+#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/*values for the PrimFlags field */
+#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08)
+#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02)
+#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01)
+
+/*values for the LookupMethod field */
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+/*SAS IO Unit Control Reply Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
new file mode 100644
index 00000000000..71453d11c1c
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_tool.h
+ * Title: MPI diagnostic tool structures and definitions
+ * Creation Date: March 26, 2007
+ *
+ * mpi2_tool.h Version: 02.00.09
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
+ * structures and defines.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ * 11-18-11 02.00.08 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request
+ * message.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TOOL_H
+#define MPI2_TOOL_H
+
+/*****************************************************************************
+*
+* Toolbox Messages
+*
+*****************************************************************************/
+
+/*defines for the Tools */
+#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
+#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
+#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
+#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
+#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+
+/****************************************************************************
+* Toolbox reply
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY,
+ Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t;
+
+/****************************************************************************
+* Toolbox Clean Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Flags; /*0x0C */
+} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST,
+ Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
+#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
+#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
+#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
+#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
+#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
+#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
+#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
+
+/****************************************************************************
+* Toolbox Memory Move request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */
+} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST,
+ Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t;
+
+/****************************************************************************
+* Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 Flags; /*0x10 */
+ U32 DataLength; /*0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ Mpi2ToolboxDiagDataUploadRequest_t,
+ *pMpi2ToolboxDiagDataUploadRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER {
+ U32 DiagDataLength; /*00h */
+ U8 FormatCode; /*04h */
+ U8 Reserved1; /*05h */
+ U16 Reserved2; /*06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+ Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t;
+
+/****************************************************************************
+* Toolbox ISTWI Read Write Tool
+****************************************************************************/
+
+/*Toolbox ISTWI Read Write Tool request message */
+typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 SGLFlags; /*0x16 */
+ U8 Flags; /*0x17 */
+ U16 TxDataLength; /*0x18 */
+ U16 RxDataLength; /*0x1A */
+ U32 Reserved8; /*0x1C */
+ U32 Reserved9; /*0x20 */
+ U32 Reserved10; /*0x24 */
+ U32 Reserved11; /*0x28 */
+ U32 Reserved12; /*0x2C */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */
+} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ Mpi2ToolboxIstwiReadWriteRequest_t,
+ *pMpi2ToolboxIstwiReadWriteRequest_t;
+
+/*values for the Action field */
+#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01)
+#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02)
+#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03)
+#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10)
+#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
+#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
+
+/*Toolbox ISTWI Read Write Tool reply message */
+typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 IstwiStatus; /*0x16 */
+ U8 Reserved6; /*0x17 */
+ U16 TxDataCount; /*0x18 */
+ U16 RxDataCount; /*0x1A */
+} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY,
+ Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t;
+
+/****************************************************************************
+* Toolbox Beacon Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_BEACON_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Reserved5; /*0x0C */
+ U8 PhysicalPort; /*0x0D */
+ U8 Reserved6; /*0x0E */
+ U8 Flags; /*0x0F */
+} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST,
+ Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00)
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
+
+/****************************************************************************
+* Toolbox Diagnostic CLI Tool
+****************************************************************************/
+
+#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C)
+
+/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x70 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi2ToolboxDiagnosticCliRequest_t,
+ *pMpi2ToolboxDiagnosticCliRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI25_SGE_IO_UNION SGL; /*0x70 */
+} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi25ToolboxDiagnosticCliRequest_t,
+ *pMpi25ToolboxDiagnosticCliRequest_t;
+
+/*Toolbox Diagnostic CLI Tool reply message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ReturnedDataLength; /*0x14 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY,
+ *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY,
+ Mpi2ToolboxDiagnosticCliReply_t,
+ *pMpi2ToolboxDiagnosticCliReply_t;
+
+/*****************************************************************************
+*
+* Diagnostic Buffer Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Diagnostic Buffer Post request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U64 BufferAddress; /*0x0C */
+ U32 BufferLength; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U32 Reserved6; /*0x1C */
+ U32 Flags; /*0x20 */
+ U32 ProductSpecific[23]; /*0x24 */
+} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
+ Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t;
+
+/*values for the ExtendedType field */
+#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02)
+
+/*values for the BufferType field */
+#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
+#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
+#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02)
+/*count of the number of buffer types */
+#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+
+/*values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REPLY {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferLength; /*0x14 */
+} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY,
+ Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t;
+
+/****************************************************************************
+* Diagnostic Release request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REQUEST {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST,
+ Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t;
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REPLY {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY,
+ Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
new file mode 100644
index 00000000000..516f959573f
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000-2007 LSI Corporation.
+ *
+ *
+ * Name: mpi2_type.h
+ * Title: MPI basic type definitions
+ * Creation Date: August 16, 2006
+ *
+ * mpi2_type.h Version: 02.00.00
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TYPE_H
+#define MPI2_TYPE_H
+
+/*******************************************************************************
+ * Define * if it hasn't already been defined. By default
+ * * is defined to be a near pointer. MPI2_POINTER can be defined as
+ * a far pointer by defining * as "far *" before this header file is
+ * included.
+ */
+
+/* the basic types may have already been included by mpi_type.h */
+#ifndef MPI_TYPE_H
+/*****************************************************************************
+*
+* Basic Types
+*
+*****************************************************************************/
+
+typedef u8 U8;
+typedef __le16 U16;
+typedef __le32 U32;
+typedef __le64 U64 __attribute__ ((aligned(4)));
+
+/*****************************************************************************
+*
+* Pointer Types
+*
+*****************************************************************************/
+
+typedef U8 *PU8;
+typedef U16 *PU16;
+typedef U32 *PU32;
+typedef U64 *PU64;
+
+#endif
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
new file mode 100644
index 00000000000..04f8010f077
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -0,0 +1,4840 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/time.h>
+#include <linux/kthread.h>
+#include <linux/aer.h>
+
+
+#include "mpt3sas_base.h"
+
+static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
+
+
+#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+
+ /* maximum controller queue depth */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
+static int max_queue_depth = -1;
+module_param(max_queue_depth, int, 0);
+MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+
+static int max_sgl_entries = -1;
+module_param(max_sgl_entries, int, 0);
+MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
+
+static int msix_disable = -1;
+module_param(msix_disable, int, 0);
+MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+
+static int mpt3sas_fwfault_debug;
+MODULE_PARM_DESC(mpt3sas_fwfault_debug,
+ " enable detection of firmware fault and halt firmware - (default=0)");
+
+
+/**
+ * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ *
+ */
+static int
+_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+ return 0;
+}
+module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
+ param_get_int, &mpt3sas_fwfault_debug, 0644);
+
+/**
+ * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt3sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_stop_and_remove_bus_device(pdev);
+ return 0;
+}
+
+/**
+ * _base_fault_reset_work - workq handling ioc fault conditions
+ * @work: input argument, used to derive ioc
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+static void
+_base_fault_reset_work(struct work_struct *work)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
+ unsigned long flags;
+ u32 doorbell;
+ int rc;
+ struct task_struct *p;
+
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery)
+ goto rearm_timer;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
+ ioc->name);
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
+ "mpt3sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p))
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ else
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ return; /* don't rearm timer */
+ }
+
+ if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
+ __func__, (rc == 0) ? "success" : "failed");
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_OPERATIONAL)
+ return; /* don't rearm timer */
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ rearm_timer:
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_start_watchdog - start the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ if (ioc->fault_reset_work_q)
+ return;
+
+ /* initialize fault polling */
+
+ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+ snprintf(ioc->fault_reset_work_q_name,
+ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+ ioc->fault_reset_work_q =
+ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ if (!ioc->fault_reset_work_q) {
+ pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ wq = ioc->fault_reset_work_q;
+ ioc->fault_reset_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work(&ioc->fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+{
+ pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
+ ioc->name, fault_code);
+}
+
+/**
+ * mpt3sas_halt_firmware - halt's mpt controller firmware
+ * @ioc: per adapter object
+ *
+ * For debugging timeout related issues. Writing 0xCOFFEE00
+ * to the doorbell register will halt controller firmware. With
+ * the purpose to stop both driver and firmware, the enduser can
+ * obtain a ring buffer from controller UART.
+ */
+void
+mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 doorbell;
+
+ if (!ioc->fwfault_debug)
+ return;
+
+ dump_stack();
+
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc , doorbell);
+ else {
+ writel(0xC0FFEE00, &ioc->chip->Doorbell);
+ pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
+ ioc->name);
+ }
+
+ if (ioc->fwfault_debug == 2)
+ for (;;)
+ ;
+ else
+ panic("panic in %s\n", __func__);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _base_sas_ioc_info - verbose translation of the ioc status
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @request_hdr: request mf
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
+ MPI2RequestHeader_t *request_hdr)
+{
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ char *desc = NULL;
+ u16 frame_sz;
+ char *func_str = NULL;
+
+ /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
+ if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return;
+
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
+ switch (ioc_status) {
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_BUSY:
+ desc = "busy";
+ break;
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ desc = "invalid sgl";
+ break;
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ desc = "internal error";
+ break;
+ case MPI2_IOCSTATUS_INVALID_VPID:
+ desc = "invalid vpid";
+ break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ desc = "insufficient resources";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ desc = "invalid field";
+ break;
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ desc = "invalid state";
+ break;
+ case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
+ desc = "op state not supported";
+ break;
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
+ desc = "config invalid action";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
+ desc = "config invalid type";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
+ desc = "config invalid page";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
+ desc = "config invalid data";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
+ desc = "config no defaults";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
+ desc = "config cant commit";
+ break;
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ break;
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc = "eedp app tag error";
+ break;
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
+ desc = "target invalid io index";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ABORTED:
+ desc = "target aborted";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
+ desc = "target no conn retryable";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
+ desc = "target no connection";
+ break;
+ case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
+ desc = "target xfer count mismatch";
+ break;
+ case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
+ desc = "target data offset error";
+ break;
+ case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
+ desc = "target too much write data";
+ break;
+ case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
+ desc = "target iu too short";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
+ desc = "target ack nak timeout";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
+ desc = "target nak received";
+ break;
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
+ desc = "smp request failed";
+ break;
+ case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
+ desc = "smp data overrun";
+ break;
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
+ desc = "diagnostic released";
+ break;
+ default:
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ switch (request_hdr->Function) {
+ case MPI2_FUNCTION_CONFIG:
+ frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
+ func_str = "config_page";
+ break;
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
+ func_str = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
+ func_str = "sas_iounit_ctl";
+ break;
+ case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
+ frame_sz = sizeof(Mpi2SepRequest_t);
+ func_str = "enclosure";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ frame_sz = sizeof(Mpi2IOCInitRequest_t);
+ func_str = "ioc_init";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ frame_sz = sizeof(Mpi2PortEnableRequest_t);
+ func_str = "port_enable";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
+ func_str = "smp_passthru";
+ break;
+ default:
+ frame_sz = 32;
+ func_str = "unknown";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+ ioc->name, desc, ioc_status, request_hdr, func_str);
+
+ _debug_dump_mf(request_hdr, frame_sz/4);
+}
+
+/**
+ * _base_display_event_data - verbose translation of firmware asyn events
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * Return nothing.
+ */
+static void
+_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ switch (event) {
+ case MPI2_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI2_EVENT_STATE_CHANGE:
+ desc = "Status Change";
+ break;
+ case MPI2_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI2_EVENT_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ desc = "Device Status Change";
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ desc = "IR Operation Status";
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ return;
+ }
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "SAS Enclosure Device Status Change";
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ desc = "IR Volume";
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ desc = "IR Physical Disk";
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ desc = "IR Configuration Change List";
+ break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ desc = "Log Entry Added";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+}
+#endif
+
+/**
+ * _base_sas_log_info - verbose translation of firmware log info
+ * @ioc: per adapter object
+ * @log_info: log info
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+{
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
+ union loginfo_type sas_loginfo;
+ char *originator_str = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
+ /* each nexus loss loginfo */
+ if (log_info == 0x31170000)
+ return;
+
+ /* eat the loginfos associated with task aborts */
+ if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
+ 0x31140000 || log_info == 0x31130000))
+ return;
+
+ switch (sas_loginfo.dw.originator) {
+ case 0:
+ originator_str = "IOP";
+ break;
+ case 1:
+ originator_str = "PL";
+ break;
+ case 2:
+ originator_str = "IR";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+ ioc->name, log_info,
+ originator_str, sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
+/**
+ * _base_display_reply_info -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return nothing.
+ */
+static void
+_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+ u32 loginfo = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
+ (ioc->logging_level & MPT_DEBUG_REPLY)) {
+ _base_sas_ioc_info(ioc , mpi_reply,
+ mpt3sas_base_get_msg_frame(ioc, smid));
+ }
+#endif
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+ loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
+ _base_sas_log_info(ioc, loginfo);
+ }
+
+ if (ioc_status || loginfo) {
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
+ }
+}
+
+/**
+ * mpt3sas_base_done - base internal command completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+ return 1;
+
+ if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ }
+ ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
+
+ complete(&ioc->base_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_async_event - main callback handler for firmware asyn events
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ Mpi2EventAckRequest_t *ack_request;
+ u16 smid;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+ if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return 1;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _base_display_event_data(ioc, mpi_reply);
+#endif
+ if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
+ goto out;
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = mpi_reply->Event;
+ ack_request->EventContext = mpi_reply->EventContext;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ mpt3sas_base_put_smid_default(ioc, smid);
+
+ out:
+
+ /* scsih callback handler */
+ mpt3sas_scsih_event_callback(ioc, msix_index, reply);
+
+ /* ctl callback handler */
+ mpt3sas_ctl_event_callback(ioc, msix_index, reply);
+
+ return 1;
+}
+
+/**
+ * _base_get_cb_idx - obtain the callback index
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return callback index.
+ */
+static u8
+_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ int i;
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
+ i = smid - 1;
+ cb_idx = ioc->scsi_lookup[i].cb_idx;
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
+ return cb_idx;
+}
+
+/**
+ * _base_mask_interrupts - disable interrupts
+ * @ioc: per adapter object
+ *
+ * Disabling ResetIRQ, Reply and Doorbell Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ ioc->mask_interrupts = 1;
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ readl(&ioc->chip->HostInterruptMask);
+}
+
+/**
+ * _base_unmask_interrupts - enable interrupts
+ * @ioc: per adapter object
+ *
+ * Enabling only Reply Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register &= ~MPI2_HIM_RIM;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ ioc->mask_interrupts = 0;
+}
+
+union reply_descriptor {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ * @r: pt_regs pointer (not used)
+ *
+ * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+ struct adapter_reply_queue *reply_q = bus_id;
+ union reply_descriptor rd;
+ u32 completed_cmds;
+ u8 request_desript_type;
+ u16 smid;
+ u8 cb_idx;
+ u32 reply;
+ u8 msix_index = reply_q->msix_index;
+ struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+ Mpi2ReplyDescriptorsUnion_t *rpf;
+ u8 rc;
+
+ if (ioc->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!atomic_add_unless(&reply_q->busy, 1, 1))
+ return IRQ_NONE;
+
+ rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
+ request_desript_type = rpf->Default.ReplyFlags
+ & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ completed_cmds = 0;
+ cb_idx = 0xFF;
+ do {
+ rd.word = le64_to_cpu(rpf->Words);
+ if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
+ goto out;
+ reply = 0;
+ smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
+ if (request_desript_type ==
+ MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
+ request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, 0);
+ if (rc)
+ mpt3sas_base_free_smid(ioc, smid);
+ }
+ } else if (request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
+ reply = le32_to_cpu(
+ rpf->AddressReply.ReplyFrameAddress);
+ if (reply > ioc->reply_dma_max_address ||
+ reply < ioc->reply_dma_min_address)
+ reply = 0;
+ if (smid) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, reply);
+ if (reply)
+ _base_display_reply_info(ioc,
+ smid, msix_index, reply);
+ if (rc)
+ mpt3sas_base_free_smid(ioc,
+ smid);
+ }
+ } else {
+ _base_async_event(ioc, msix_index, reply);
+ }
+
+ /* reply free queue handling */
+ if (reply) {
+ ioc->reply_free_host_index =
+ (ioc->reply_free_host_index ==
+ (ioc->reply_free_queue_depth - 1)) ?
+ 0 : ioc->reply_free_host_index + 1;
+ ioc->reply_free[ioc->reply_free_host_index] =
+ cpu_to_le32(reply);
+ wmb();
+ writel(ioc->reply_free_host_index,
+ &ioc->chip->ReplyFreeHostIndex);
+ }
+ }
+
+ rpf->Words = cpu_to_le64(ULLONG_MAX);
+ reply_q->reply_post_host_index =
+ (reply_q->reply_post_host_index ==
+ (ioc->reply_post_queue_depth - 1)) ? 0 :
+ reply_q->reply_post_host_index + 1;
+ request_desript_type =
+ reply_q->reply_post_free[reply_q->reply_post_host_index].
+ Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ completed_cmds++;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ goto out;
+ if (!reply_q->reply_post_host_index)
+ rpf = reply_q->reply_post_free;
+ else
+ rpf++;
+ } while (1);
+
+ out:
+
+ if (!completed_cmds) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ wmb();
+ writel(reply_q->reply_post_host_index | (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+ atomic_dec(&reply_q->busy);
+ return IRQ_HANDLED;
+}
+
+/**
+ * _base_is_controller_msix_enabled - is controller support muli-reply queues
+ * @ioc: per adapter object
+ *
+ */
+static inline int
+_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
+{
+ return (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
+}
+
+/**
+ * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * @ioc: per adapter object
+ * Context: ISR conext
+ *
+ * Called when a Task Management request has completed. We want
+ * to flush the other reply queues so all the outstanding IO has been
+ * completed back to OS before we process the TM completetion.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+
+ /* If MSIX capability is turned off
+ * then multi-queues are not enabled
+ */
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (ioc->shost_recovery)
+ return;
+ /* TMs are on msix_index == 0 */
+ if (reply_q->msix_index == 0)
+ continue;
+ _base_interrupt(reply_q->vector, (void *)reply_q);
+ }
+}
+
+/**
+ * mpt3sas_base_release_callback_handler - clear interrupt callback handler
+ * @cb_idx: callback index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_release_callback_handler(u8 cb_idx)
+{
+ mpt_callbacks[cb_idx] = NULL;
+}
+
+/**
+ * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
+ * @cb_func: callback function
+ *
+ * Returns cb_func.
+ */
+u8
+mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
+{
+ u8 cb_idx;
+
+ for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
+ if (mpt_callbacks[cb_idx] == NULL)
+ break;
+
+ mpt_callbacks[cb_idx] = cb_func;
+ return cb_idx;
+}
+
+/**
+ * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_initialize_callback_handler(void)
+{
+ u8 cb_idx;
+
+ for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
+ mpt3sas_base_release_callback_handler(cb_idx);
+}
+
+
+/**
+ * _base_build_zero_len_sge - build zero length sg entry
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ ioc->base_add_sg_single(paddr, flags_length, -1);
+}
+
+/**
+ * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple32_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le32(dma_addr);
+}
+
+
+/**
+ * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple64_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_get_chain_buffer_tracker - obtain chain tracker
+ * @ioc: per adapter object
+ * @smid: smid associated to an IO request
+ *
+ * Returns chain tracker(from ioc->free_chain_list)
+ */
+static struct chain_tracker *
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct chain_tracker *chain_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_chain_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "chain buffers not available\n", ioc->name));
+ return NULL;
+ }
+ chain_req = list_entry(ioc->free_chain_list.next,
+ struct chain_tracker, tracker_list);
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->scsi_lookup[smid - 1].chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return chain_req;
+}
+
+
+/**
+ * _base_build_sg - build generic sg
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u32 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ }
+}
+
+/* IEEE format sgls */
+
+/**
+ * _base_add_sg_single_ieee - add sg element for IEEE format
+ * @paddr: virtual address for SGE
+ * @flags: SGE flags
+ * @chain_offset: number of 128 byte elements from start of segment
+ * @length: data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
+ dma_addr_t dma_addr)
+{
+ Mpi25IeeeSgeChain64_t *sgel = paddr;
+
+ sgel->Flags = flags;
+ sgel->NextChainOffset = chain_offset;
+ sgel->Length = cpu_to_le32(length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
+ _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
+}
+
+/**
+ * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
+ * @ioc: per adapter object
+ * @scmd: scsi command
+ * @smid: system request message index
+ * Context: none.
+ *
+ * The main routine that builds scatter gather table from a given
+ * scsi request sent via the .queuecommand main handler.
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_offset;
+ u32 chain_length;
+ u32 chain_flags;
+ int sges_left;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 chain_sgl_flags;
+ struct chain_tracker *chain_req;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /* init scatter gather flags */
+ simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ if (!sges_left) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "pci_map_sg failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+
+ sg_local = &mpi_request->SGL;
+ sges_in_segment = (ioc->request_sz -
+ offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
+ (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ /* initializing the chain flags and pointers */
+ chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ do {
+ sges_in_segment = (sges_left <=
+ ioc->max_sges_in_chain_message) ? sges_left :
+ ioc->max_sges_in_chain_message;
+ chain_offset = (sges_left == sges_in_segment) ?
+ 0 : sges_in_segment;
+ chain_length = sges_in_segment * ioc->sge_size_ieee;
+ if (chain_offset)
+ chain_length += ioc->sge_size_ieee;
+ _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
+ chain_offset, chain_length, chain_dma);
+
+ sg_local = chain;
+ if (!chain_offset)
+ goto fill_in_last_segment;
+
+ /* fill in chain segments */
+ while (sges_in_segment) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ } while (1);
+
+
+ fill_in_last_segment:
+
+ /* fill the last segment */
+ while (sges_left) {
+ if (sges_left == 1)
+ _base_add_sg_single_ieee(sg_local,
+ simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * _base_build_sg_ieee - build generic sg for IEEE format
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u8 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge_ieee(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size_ieee;
+
+ /* READ sgel last */
+ sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ }
+}
+
+#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
+
+/**
+ * _base_config_dma_addressing - set dma addressing
+ * @ioc: per adapter object
+ * @pdev: PCI device struct
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+{
+ struct sysinfo s;
+ char *desc = NULL;
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask =
+ dma_get_required_mask(&pdev->dev);
+ if ((required_mask > DMA_BIT_MASK(32)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ desc = "64";
+ goto out;
+ }
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ desc = "32";
+ } else
+ return -ENODEV;
+
+ out:
+ si_meminfo(&s);
+ pr_info(MPT3SAS_FMT
+ "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->name, desc, convert_to_kb(s.totalram));
+
+ return 0;
+}
+
+/**
+ * _base_check_enable_msix - checks MSIX capabable.
+ * @ioc: per adapter object
+ *
+ * Check to see if card is capable of MSIX, and set number
+ * of available msix vectors
+ */
+static int
+_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ int base;
+ u16 message_control;
+
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
+ ioc->name));
+ return -EINVAL;
+ }
+
+ /* get msix vector count */
+
+ pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ if (ioc->msix_vector_count > 8)
+ ioc->msix_vector_count = 8;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "msix is supported, vector_count(%d)\n",
+ ioc->name, ioc->msix_vector_count));
+ return 0;
+}
+
+/**
+ * _base_free_irq - free irq
+ * @ioc: per adapter object
+ *
+ * Freeing respective reply_queue from the list.
+ */
+static void
+_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q, *next;
+
+ if (list_empty(&ioc->reply_queue_list))
+ return;
+
+ list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ list_del(&reply_q->list);
+ synchronize_irq(reply_q->vector);
+ free_irq(reply_q->vector, reply_q);
+ kfree(reply_q);
+ }
+}
+
+/**
+ * _base_request_irq - request irq
+ * @ioc: per adapter object
+ * @index: msix index into vector table
+ * @vector: irq vector
+ *
+ * Inserting respective reply_queue into the list.
+ */
+static int
+_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
+{
+ struct adapter_reply_queue *reply_q;
+ int r;
+
+ reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
+ if (!reply_q) {
+ pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
+ ioc->name, (int)sizeof(struct adapter_reply_queue));
+ return -ENOMEM;
+ }
+ reply_q->ioc = ioc;
+ reply_q->msix_index = index;
+ reply_q->vector = vector;
+ atomic_set(&reply_q->busy, 0);
+ if (ioc->msix_enable)
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+ MPT3SAS_DRIVER_NAME, ioc->id, index);
+ else
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+ MPT3SAS_DRIVER_NAME, ioc->id);
+ r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
+ reply_q);
+ if (r) {
+ pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+ reply_q->name, vector);
+ kfree(reply_q);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&reply_q->list);
+ list_add_tail(&reply_q->list, &ioc->reply_queue_list);
+ return 0;
+}
+
+/**
+ * _base_assign_reply_queues - assigning msix index for each cpu
+ * @ioc: per adapter object
+ *
+ * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
+ *
+ * It would nice if we could call irq_set_affinity, however it is not
+ * an exported symbol
+ */
+static void
+_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+ int cpu_id;
+ int cpu_grouping, loop, grouping, grouping_mod;
+ int reply_queue;
+
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
+
+ /* NUMA Hardware bug workaround - drop to less reply queues */
+ if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) {
+ ioc->reply_queue_count = ioc->facts.MaxMSIxVectors;
+ reply_queue = 0;
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->msix_index = reply_queue;
+ if (++reply_queue == ioc->reply_queue_count)
+ reply_queue = 0;
+ }
+ }
+
+ /* when there are more cpus than available msix vectors,
+ * then group cpus togeather on same irq
+ */
+ if (ioc->cpu_count > ioc->msix_vector_count) {
+ grouping = ioc->cpu_count / ioc->msix_vector_count;
+ grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
+ if (grouping < 2 || (grouping == 2 && !grouping_mod))
+ cpu_grouping = 2;
+ else if (grouping < 4 || (grouping == 4 && !grouping_mod))
+ cpu_grouping = 4;
+ else if (grouping < 8 || (grouping == 8 && !grouping_mod))
+ cpu_grouping = 8;
+ else
+ cpu_grouping = 16;
+ } else
+ cpu_grouping = 0;
+
+ loop = 0;
+ reply_q = list_entry(ioc->reply_queue_list.next,
+ struct adapter_reply_queue, list);
+ for_each_online_cpu(cpu_id) {
+ if (!cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ } else {
+ if (loop < cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop++;
+ } else {
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop = 1;
+ }
+ }
+ }
+}
+
+/**
+ * _base_disable_msix - disables msix
+ * @ioc: per adapter object
+ *
+ */
+static void
+_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (!ioc->msix_enable)
+ return;
+ pci_disable_msix(ioc->pdev);
+ ioc->msix_enable = 0;
+}
+
+/**
+ * _base_enable_msix - enables msix, failback to io_apic
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct msix_entry *entries, *a;
+ int r;
+ int i;
+ u8 try_msix = 0;
+
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
+
+ if (msix_disable == -1 || msix_disable == 0)
+ try_msix = 1;
+
+ if (!try_msix)
+ goto try_ioapic;
+
+ if (_base_check_enable_msix(ioc) != 0)
+ goto try_ioapic;
+
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+ ioc->msix_vector_count);
+
+ entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!entries) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "kcalloc failed @ at %s:%d/%s() !!!\n",
+ ioc->name, __FILE__, __LINE__, __func__));
+ goto try_ioapic;
+ }
+
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
+ a->entry = i;
+
+ r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
+ if (r) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "pci_enable_msix failed (r=%d) !!!\n",
+ ioc->name, r));
+ kfree(entries);
+ goto try_ioapic;
+ }
+
+ ioc->msix_enable = 1;
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
+ r = _base_request_irq(ioc, i, a->vector);
+ if (r) {
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ kfree(entries);
+ goto try_ioapic;
+ }
+ }
+
+ kfree(entries);
+ return 0;
+
+/* failback to io_apic interrupt routing */
+ try_ioapic:
+
+ r = _base_request_irq(ioc, 0, ioc->pdev->irq);
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+ u32 memap_sz;
+ u32 pio_sz;
+ int i, r = 0;
+ u64 pio_chip = 0;
+ u64 chip_phys = 0;
+ struct adapter_reply_queue *reply_q;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
+ ioc->name, __func__));
+
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev)) {
+ pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
+ ioc->name);
+ return -ENODEV;
+ }
+
+
+ if (pci_request_selected_regions(pdev, ioc->bars,
+ MPT3SAS_DRIVER_NAME)) {
+ pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
+ ioc->name);
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+/* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+
+ if (_base_config_dma_addressing(ioc, pdev) != 0) {
+ pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+ for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ if (pio_sz)
+ continue;
+ pio_chip = (u64)pci_resource_start(pdev, i);
+ pio_sz = pci_resource_len(pdev, i);
+ } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ if (memap_sz)
+ continue;
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
+ ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
+ }
+ }
+
+ _base_mask_interrupts(ioc);
+ r = _base_enable_msix(ioc);
+ if (r)
+ goto out_fail;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
+ reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
+ "IO-APIC enabled"), reply_q->vector);
+
+ pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+ pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
+ ioc->name, (unsigned long long)pio_chip, pio_sz);
+
+ /* Save PCI configuration state for recovery from PCI AER/EEH errors */
+ pci_save_state(pdev);
+ return 0;
+
+ out_fail:
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return r;
+}
+
+/**
+ * mpt3sas_base_get_msg_frame - obtain request mf pointer
+ * @ioc: per adapter object
+ * @smid: system request message index(smid zero is invalid)
+ *
+ * Returns virt pointer to message frame.
+ */
+void *
+mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->request + (smid * ioc->request_sz));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns virt pointer to sense buffer.
+ */
+void *
+mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns phys pointer to the low 32bit address of the sense buffer.
+ */
+__le32
+mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
+ SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
+ * @ioc: per adapter object
+ * @phys_addr: lower 32 physical addr of the reply
+ *
+ * Converts 32bit lower physical addr into a virt address.
+ */
+void *
+mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+ return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
+}
+
+/**
+ * mpt3sas_base_get_smid - obtain a free smid from internal queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->internal_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->internal_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ * @scmd: pointer to scsi command object
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct scsiio_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->free_list.next,
+ struct scsiio_tracker, tracker_list);
+ request->scmd = scmd;
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->hpr_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return 0;
+ }
+
+ request = list_entry(ioc->hpr_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_free_smid - put smid back on free_list
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ int i;
+ struct chain_tracker *chain_req, *next;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
+ }
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
+ }
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+}
+
+/**
+ * _base_writeq - 64 bit write to MMIO
+ * @ioc: per adapter object
+ * @b: data payload
+ * @addr: address in MMIO space
+ * @writeq_lock: spin lock
+ *
+ * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
+ * care of 32 bit environment where its not quarenteed to send the entire word
+ * in one transfer.
+ */
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ writeq(cpu_to_le64(b), addr);
+}
+#else
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ unsigned long flags;
+ __u64 data_out = cpu_to_le64(b);
+
+ spin_lock_irqsave(writeq_lock, flags);
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+ spin_unlock_irqrestore(writeq_lock, flags);
+}
+#endif
+
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
+/**
+ * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+
+ descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.SCSIIO.RequestFlags =
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.SMID = cpu_to_le16(smid);
+ descriptor.HighPriority.LMID = 0;
+ descriptor.HighPriority.Reserved1 = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.SMID = cpu_to_le16(smid);
+ descriptor.Default.LMID = 0;
+ descriptor.Default.DescriptorTypeDependent = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+
+
+/**
+ * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i = 0;
+ char desc[16];
+ u32 iounit_pg1_flags;
+ u32 bios_version;
+
+ bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ strncpy(desc, ioc->manu_pg0.ChipName, 16);
+ pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
+ "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ ioc->name, desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
+
+ pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
+ pr_info("Initiator");
+ i++;
+ }
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
+ pr_info("%sTarget", i ? "," : "");
+ i++;
+ }
+
+ i = 0;
+ pr_info("), ");
+ pr_info("Capabilities=(");
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
+ pr_info("Raid");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
+ pr_info("%sTLR", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
+ pr_info("%sMulticast", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
+ pr_info("%sBIDI Target", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
+ pr_info("%sEEDP", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
+ pr_info("%sSnapshot Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
+ pr_info("%sDiag Trace Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
+ pr_info("%sDiag Extended Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
+ pr_info("%sTask Set Full", i ? "," : "");
+ i++;
+ }
+
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
+ pr_info("%sNCQ", i ? "," : "");
+ i++;
+ }
+
+ pr_info(")\n");
+}
+
+/**
+ * mpt3sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+ *
+ * Return nothing.
+ *
+ * Passed on the command line, this function will modify the device missing
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+void
+mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+{
+ u16 dmd, dmd_new, dmd_orignal;
+ u8 io_missing_delay_original;
+ u16 sz;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 num_phys = 0;
+ u16 ioc_status;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys)
+ return;
+
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* device missing delay */
+ dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ dmd_orignal = dmd;
+ if (device_missing_delay > 0x7F) {
+ dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
+ device_missing_delay;
+ dmd = dmd / 16;
+ dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
+ } else
+ dmd = device_missing_delay;
+ sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
+
+ /* io missing delay */
+ io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
+ sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
+
+ if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd_new = (dmd &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd_new =
+ dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
+ ioc->name, dmd_orignal, dmd_new);
+ pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
+ ioc->name, io_missing_delay_original,
+ io_missing_delay);
+ ioc->device_missing_delay = dmd_new;
+ ioc->io_missing_delay = io_missing_delay;
+ }
+
+out:
+ kfree(sas_iounit_pg1);
+}
+/**
+ * _base_static_config_pages - static start of day config pages
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ u32 iounit_pg1_flags;
+
+ mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
+ if (ioc->ir_firmware)
+ mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+ &ioc->manu_pg10);
+
+ /*
+ * Ensure correct T10 PI operation if vendor left EEDPTagMode
+ * flag unset in NVDATA.
+ */
+ mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
+ if (ioc->manu_pg11.EEDPTagMode == 0) {
+ pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ ioc->name);
+ ioc->manu_pg11.EEDPTagMode &= ~0x3;
+ ioc->manu_pg11.EEDPTagMode |= 0x1;
+ mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
+ &ioc->manu_pg11);
+ }
+
+ mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ _base_display_ioc_capabilities(ioc);
+
+ /*
+ * Enable task_set_full handling in iounit_pg1 when the
+ * facts capabilities indicate that its supported.
+ */
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
+ iounit_pg1_flags &=
+ ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ else
+ iounit_pg1_flags |=
+ MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
+ mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+}
+
+/**
+ * _base_release_memory_pools - release memory
+ * @ioc: per adapter object
+ *
+ * Free memory allocated from _base_allocate_memory_pools.
+ *
+ * Return nothing.
+ */
+static void
+_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->request) {
+ pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ ioc->request, ioc->request_dma);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request_pool(0x%p): free\n",
+ ioc->name, ioc->request));
+ ioc->request = NULL;
+ }
+
+ if (ioc->sense) {
+ pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+ if (ioc->sense_dma_pool)
+ pci_pool_destroy(ioc->sense_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense_pool(0x%p): free\n",
+ ioc->name, ioc->sense));
+ ioc->sense = NULL;
+ }
+
+ if (ioc->reply) {
+ pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
+ if (ioc->reply_dma_pool)
+ pci_pool_destroy(ioc->reply_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_pool(0x%p): free\n",
+ ioc->name, ioc->reply));
+ ioc->reply = NULL;
+ }
+
+ if (ioc->reply_free) {
+ pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
+ ioc->reply_free_dma);
+ if (ioc->reply_free_dma_pool)
+ pci_pool_destroy(ioc->reply_free_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_pool(0x%p): free\n",
+ ioc->name, ioc->reply_free));
+ ioc->reply_free = NULL;
+ }
+
+ if (ioc->reply_post_free) {
+ pci_pool_free(ioc->reply_post_free_dma_pool,
+ ioc->reply_post_free, ioc->reply_post_free_dma);
+ if (ioc->reply_post_free_dma_pool)
+ pci_pool_destroy(ioc->reply_post_free_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_pool(0x%p): free\n", ioc->name,
+ ioc->reply_post_free));
+ ioc->reply_post_free = NULL;
+ }
+
+ if (ioc->config_page) {
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config_page(0x%p): free\n", ioc->name,
+ ioc->config_page));
+ pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ ioc->config_page, ioc->config_page_dma);
+ }
+
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
+ kfree(ioc->hpr_lookup);
+ kfree(ioc->internal_lookup);
+ if (ioc->chain_lookup) {
+ for (i = 0; i < ioc->chain_depth; i++) {
+ if (ioc->chain_lookup[i].chain_buffer)
+ pci_pool_free(ioc->chain_dma_pool,
+ ioc->chain_lookup[i].chain_buffer,
+ ioc->chain_lookup[i].chain_buffer_dma);
+ }
+ if (ioc->chain_dma_pool)
+ pci_pool_destroy(ioc->chain_dma_pool);
+ free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ ioc->chain_lookup = NULL;
+ }
+}
+
+/**
+ * _base_allocate_memory_pools - allocate start of day memory pools
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ struct mpt3sas_facts *facts;
+ u16 max_sge_elements;
+ u16 chains_needed_per_io;
+ u32 sz, total_sz, reply_post_free_sz;
+ u32 retry_sz;
+ u16 max_request_credit;
+ unsigned short sg_tablesize;
+ u16 sge_size;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+
+ retry_sz = 0;
+ facts = &ioc->facts;
+
+ /* command line tunables for max sgl entries */
+ if (max_sgl_entries != -1)
+ sg_tablesize = max_sgl_entries;
+ else
+ sg_tablesize = MPT3SAS_SG_DEPTH;
+
+ if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
+ sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
+ else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
+ sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
+ ioc->shost->sg_tablesize = sg_tablesize;
+
+ ioc->hi_priority_depth = facts->HighPriorityCredit;
+ ioc->internal_depth = ioc->hi_priority_depth + (5);
+ /* command line tunables for max controller queue depth */
+ if (max_queue_depth != -1 && max_queue_depth != 0) {
+ max_request_credit = min_t(u16, max_queue_depth +
+ ioc->hi_priority_depth + ioc->internal_depth,
+ facts->RequestCredit);
+ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+ max_request_credit = MAX_HBA_QUEUE_DEPTH;
+ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
+
+ ioc->hba_queue_depth = max_request_credit;
+
+ /* request frame size */
+ ioc->request_sz = facts->IOCRequestFrameSize * 4;
+
+ /* reply frame size */
+ ioc->reply_sz = facts->ReplyFrameSize * 4;
+
+ /* calculate the max scatter element size */
+ sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
+
+ retry_allocation:
+ total_sz = 0;
+ /* calculate number of sg elements left over in the 1st frame */
+ max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
+ sizeof(Mpi2SGEIOUnion_t)) + sge_size);
+ ioc->max_sges_in_main_message = max_sge_elements/sge_size;
+
+ /* now do the same for a chain buffer */
+ max_sge_elements = ioc->request_sz - sge_size;
+ ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
+
+ /*
+ * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
+ */
+ chains_needed_per_io = ((ioc->shost->sg_tablesize -
+ ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
+ + 1;
+ if (chains_needed_per_io > facts->MaxChainDepth) {
+ chains_needed_per_io = facts->MaxChainDepth;
+ ioc->shost->sg_tablesize = min_t(u16,
+ ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
+ * chains_needed_per_io), ioc->shost->sg_tablesize);
+ }
+ ioc->chains_needed_per_io = chains_needed_per_io;
+
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+
+ /* calculate reply descriptor post queue depth */
+ ioc->reply_post_queue_depth = ioc->hba_queue_depth +
+ ioc->reply_free_queue_depth + 1 ;
+ /* align the reply post queue on the next 16 count boundary */
+ if (ioc->reply_post_queue_depth % 16)
+ ioc->reply_post_queue_depth += 16 -
+ (ioc->reply_post_queue_depth % 16);
+
+
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth =
+ facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16);
+ ioc->hba_queue_depth =
+ ((ioc->reply_post_queue_depth - 64) / 2) - 1;
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
+ "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+ "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
+
+ ioc->scsiio_depth = ioc->hba_queue_depth -
+ ioc->hi_priority_depth - ioc->internal_depth;
+
+ /* set the scsi host can_queue depth
+ * with some internal commands that could be outstanding
+ */
+ ioc->shost->can_queue = ioc->scsiio_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "scsi host: can_queue depth (%d)\n",
+ ioc->name, ioc->shost->can_queue));
+
+
+ /* contiguous pool for request and chains, 16 byte align, one extra "
+ * "frame for smid=0
+ */
+ ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
+ sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
+
+ /* hi-priority queue */
+ sz += (ioc->hi_priority_depth * ioc->request_sz);
+
+ /* internal queue */
+ sz += (ioc->internal_depth * ioc->request_sz);
+
+ ioc->request_dma_sz = sz;
+ ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ if (!ioc->request) {
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
+ goto out;
+ retry_sz += 64;
+ ioc->hba_queue_depth = max_request_credit - retry_sz;
+ goto retry_allocation;
+ }
+
+ if (retry_sz)
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+
+ /* hi-priority queue */
+ ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+ ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+
+ /* internal queue */
+ ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
+ ioc->request_sz);
+ ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
+ ioc->request_sz);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz)/1024));
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
+ ioc->name, (unsigned long long) ioc->request_dma));
+ total_sz += sz;
+
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
+ if (!ioc->scsi_lookup) {
+ pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
+ ioc->name, (int)sz);
+ goto out;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
+ ioc->name, ioc->request, ioc->scsiio_depth));
+
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
+ if (!ioc->chain_lookup) {
+ pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ ioc->request_sz, 16, 0);
+ if (!ioc->chain_dma_pool) {
+ pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->chain_depth; i++) {
+ ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+ ioc->chain_dma_pool , GFP_KERNEL,
+ &ioc->chain_lookup[i].chain_buffer_dma);
+ if (!ioc->chain_lookup[i].chain_buffer) {
+ ioc->chain_depth = i;
+ goto chain_done;
+ }
+ total_sz += ioc->request_sz;
+ }
+ chain_done:
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->chain_depth, ioc->request_sz,
+ ((ioc->chain_depth * ioc->request_sz))/1024));
+
+ /* initialize hi-priority queue smid's */
+ ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->hpr_lookup) {
+ pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->hi_priority_smid = ioc->scsiio_depth + 1;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
+
+ /* initialize internal queue smid's */
+ ioc->internal_lookup = kcalloc(ioc->internal_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->internal_lookup) {
+ pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "internal(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
+
+ /* sense buffers, 4 byte align */
+ sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+ ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->sense_dma_pool) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
+ &ioc->sense_dma);
+ if (!ioc->sense) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
+ "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->sense_dma));
+ total_sz += sz;
+
+ /* reply pool, 4 byte align */
+ sz = ioc->reply_free_queue_depth * ioc->reply_sz;
+ ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->reply_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
+ &ioc->reply_dma);
+ if (!ioc->reply) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+ ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->reply,
+ ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_dma));
+ total_sz += sz;
+
+ /* reply free queue, 16 byte align */
+ sz = ioc->reply_free_queue_depth * 4;
+ ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_free_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
+ &ioc->reply_free_dma);
+ if (!ioc->reply_free) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
+ "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_dma (0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_free_dma));
+ total_sz += sz;
+
+ /* reply post queue, 16 byte align */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ if (_base_is_controller_msix_enabled(ioc))
+ sz = reply_post_free_sz * ioc->reply_queue_count;
+ else
+ sz = reply_post_free_sz;
+ ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
+ GFP_KERNEL, &ioc->reply_post_free_dma);
+ if (!ioc->reply_post_free) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_post_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
+ "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
+ sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_dma = (0x%llx)\n",
+ ioc->name, (unsigned long long)
+ ioc->reply_post_free_dma));
+ total_sz += sz;
+
+ ioc->config_page_sz = 512;
+ ioc->config_page = pci_alloc_consistent(ioc->pdev,
+ ioc->config_page_sz, &ioc->config_page_dma);
+ if (!ioc->config_page) {
+ pr_err(MPT3SAS_FMT
+ "config page: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config page(0x%p): size(%d)\n",
+ ioc->name, ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->config_page_dma));
+ total_sz += ioc->config_page_sz;
+
+ pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
+ ioc->name, total_sz/1024);
+ pr_info(MPT3SAS_FMT
+ "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+ ioc->name, ioc->shost->can_queue, facts->RequestCredit);
+ pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
+ ioc->name, ioc->shost->sg_tablesize);
+ return 0;
+
+ out:
+ return -ENOMEM;
+}
+
+/**
+ * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @cooked: Request raw or cooked IOC state
+ *
+ * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Doorbell bits in MPI_IOC_STATE_MASK.
+ */
+u32
+mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
+{
+ u32 s, sc;
+
+ s = readl(&ioc->chip->Doorbell);
+ sc = s & MPI2_IOC_STATE_MASK;
+ return cooked ? sc : s;
+}
+
+/**
+ * _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
+ int sleep_flag)
+{
+ u32 count, cntdn;
+ u32 current_state;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ current_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (current_state == ioc_state)
+ return 0;
+ if (count && current_state == MPI2_IOC_STATE_FAULT)
+ break;
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ return current_state;
+}
+
+/**
+ * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
+ * a write to the doorbell)
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
+ */
+static int
+_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
+ * doorbell.
+ */
+static int
+_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+ u32 doorbell;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc , doorbell);
+ return -EFAULT;
+ }
+ } else if (int_status == 0xFFFFFFFF)
+ goto out;
+
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ out:
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 doorbell_reg;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ doorbell_reg = readl(&ioc->chip->Doorbell);
+ if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+ ioc->name, __func__, count, doorbell_reg);
+ return -EFAULT;
+}
+
+/**
+ * _base_send_ioc_reset - send doorbell reset
+ * @ioc: per adapter object
+ * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state;
+ int r = 0;
+
+ if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
+ pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
+ ioc->name, __func__);
+ return -EFAULT;
+ }
+
+ if (!(ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
+ return -EFAULT;
+
+ pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+
+ writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
+ &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+ r = -EFAULT;
+ goto out;
+ }
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ r = -EFAULT;
+ goto out;
+ }
+ out:
+ pr_info(MPT3SAS_FMT "message unit reset: %s\n",
+ ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * _base_handshake_req_reply_wait - send request thru doorbell interface
+ * @ioc: per adapter object
+ * @request_bytes: request length
+ * @request: pointer having request payload
+ * @reply_bytes: reply length
+ * @reply: pointer to reply payload
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+{
+ MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
+ int i;
+ u8 failed;
+ u16 dummy;
+ __le32 *mfp;
+
+ /* make sure doorbell is not in use */
+ if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ pr_err(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* clear pending doorbell interrupts from previous state changes */
+ if (readl(&ioc->chip->HostInterruptStatus) &
+ MPI2_HIS_IOC2SYS_DB_STATUS)
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ /* send message to ioc */
+ writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
+ ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
+ &ioc->chip->Doorbell);
+
+ if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake ack failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* send message 32-bits at a time */
+ for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+ failed = 1;
+ }
+
+ if (failed) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake sending request failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* now wait for the reply */
+ if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* read the first two 16-bits, it gives the total length of the reply */
+ reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ for (i = 2; i < default_reply->MsgLength * 2; i++) {
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ if (i >= reply_bytes/2) /* overflow case */
+ dummy = readl(&ioc->chip->Doorbell);
+ else
+ reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ }
+
+ _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
+ if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ mfp = (__le32 *)reply;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < reply_bytes/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SAS IO Unit Control Request message allows the host to perform low-level
+ * operations, such as resets on the PHYs of the IO Unit, also allows the host
+ * to obtain the IOC assigned device handles for a device if it has other
+ * identifying information about the device, in addition allows the host to
+ * remove IOC resources associated with the device.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ u8 issue_reset;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
+ if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
+ ioc->ioc_link_reset_in_progress = 1;
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
+ ioc->ioc_link_reset_in_progress)
+ ioc->ioc_link_reset_in_progress = 0;
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SasIoUnitControlReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SCSI Enclosure Processor request message causes the IOC to
+ * communicate with SES devices to control LED status signals.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ u8 issue_reset;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SepReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _base_get_port_facts - obtain port facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
+{
+ Mpi2PortFactsRequest_t mpi_request;
+ Mpi2PortFactsReply_t mpi_reply;
+ struct mpt3sas_port_facts *pfacts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
+ mpi_request.PortNumber = port;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ pfacts = &ioc->pfacts[port];
+ memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
+ pfacts->PortNumber = mpi_reply.PortNumber;
+ pfacts->VP_ID = mpi_reply.VP_ID;
+ pfacts->VF_ID = mpi_reply.VF_ID;
+ pfacts->MaxPostedCmdBuffers =
+ le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
+
+ return 0;
+}
+
+/**
+ * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCFactsRequest_t mpi_request;
+ Mpi2IOCFactsReply_t mpi_reply;
+ struct mpt3sas_facts *facts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ facts = &ioc->facts;
+ memset(facts, 0, sizeof(struct mpt3sas_facts));
+ facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
+ facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
+ facts->VP_ID = mpi_reply.VP_ID;
+ facts->VF_ID = mpi_reply.VF_ID;
+ facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
+ facts->MaxChainDepth = mpi_reply.MaxChainDepth;
+ facts->WhoInit = mpi_reply.WhoInit;
+ facts->NumberOfPorts = mpi_reply.NumberOfPorts;
+ facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
+ facts->MaxReplyDescriptorPostQueueDepth =
+ le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
+ facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
+ facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
+ if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
+ ioc->ir_firmware = 1;
+ facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
+ facts->IOCRequestFrameSize =
+ le16_to_cpu(mpi_reply.IOCRequestFrameSize);
+ facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
+ facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
+ ioc->shost->max_id = -1;
+ facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
+ facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
+ facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
+ facts->HighPriorityCredit =
+ le16_to_cpu(mpi_reply.HighPriorityCredit);
+ facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
+ facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hba queue depth(%d), max chains per io(%d)\n",
+ ioc->name, facts->RequestCredit,
+ facts->MaxChainDepth));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request frame size(%d), reply frame size(%d)\n", ioc->name,
+ facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ return 0;
+}
+
+/**
+ * _base_send_ioc_init - send ioc_init to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCInitRequest_t mpi_request;
+ Mpi2IOCInitReply_t mpi_reply;
+ int r;
+ struct timeval current_time;
+ u16 ioc_status;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
+ mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ mpi_request.VF_ID = 0; /* TODO */
+ mpi_request.VP_ID = 0;
+ mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
+ mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+
+ if (_base_is_controller_msix_enabled(ioc))
+ mpi_request.HostMSIxVectors = ioc->reply_queue_count;
+ mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
+ mpi_request.ReplyDescriptorPostQueueDepth =
+ cpu_to_le16(ioc->reply_post_queue_depth);
+ mpi_request.ReplyFreeQueueDepth =
+ cpu_to_le16(ioc->reply_free_queue_depth);
+
+ mpi_request.SenseBufferAddressHigh =
+ cpu_to_le32((u64)ioc->sense_dma >> 32);
+ mpi_request.SystemReplyAddressHigh =
+ cpu_to_le32((u64)ioc->reply_dma >> 32);
+ mpi_request.SystemRequestFrameBaseAddress =
+ cpu_to_le64((u64)ioc->request_dma);
+ mpi_request.ReplyFreeQueueAddress =
+ cpu_to_le64((u64)ioc->reply_free_dma);
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)ioc->reply_post_free_dma);
+
+
+ /* This time stamp specifies number of milliseconds
+ * since epoch ~ midnight January 1, 1970.
+ */
+ do_gettimeofday(&current_time);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ __le32 *mfp;
+ int i;
+
+ mfp = (__le32 *)&mpi_request;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+
+ r = _base_handshake_req_reply_wait(ioc,
+ sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
+ sleep_flag);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
+ mpi_reply.IOCLogInfo) {
+ pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+ r = -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mpt3sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+
+ if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+
+ if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
+ return 1;
+
+ ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
+ ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
+ ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ ioc->port_enable_failed = 1;
+
+ if (ioc->is_driver_loading) {
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ mpt3sas_port_enable_complete(ioc);
+ return 1;
+ } else {
+ ioc->start_scan_failed = ioc_status;
+ ioc->start_scan = 0;
+ return 1;
+ }
+ }
+ complete(&ioc->port_enable_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_send_port_enable - send port_enable(discovery stuff) to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ Mpi2PortEnableReply_t *mpi_reply;
+ unsigned long timeleft;
+ int r = 0;
+ u16 smid;
+ u16 ioc_status;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ init_completion(&ioc->port_enable_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
+ 300*HZ);
+ if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2PortEnableRequest_t)/4);
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ goto out;
+ }
+
+ mpi_reply = ioc->port_enable_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
+ ioc->name, __func__, ioc_status);
+ r = -EFAULT;
+ goto out;
+ }
+
+ out:
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+ "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ u16 smid;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
+{
+ /* We wait for discovery to complete if IR firmware is loaded.
+ * The sas topology events arrive before PD events, so we need time to
+ * turn on the bit in ioc->pd_handles to indicate PD
+ * Also, it maybe required to report Volumes ahead of physical
+ * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+ */
+ if (ioc->ir_firmware)
+ return 1;
+
+ /* if no Bios, then we don't need to wait */
+ if (!ioc->bios_pg3.BiosVersion)
+ return 0;
+
+ /* Bios is present, then we drop down here.
+ *
+ * If there any entries in the Bios Page 2, then we wait
+ * for discovery to complete.
+ */
+
+ /* Current Boot Device */
+ if ((ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Request Boot Device */
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Alternate Request Boot Device */
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * _base_unmask_events - turn on notification for this event
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The mask is stored in ioc->event_masks.
+ */
+static void
+_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u32 desired_event;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+
+ if (event < 32)
+ ioc->event_masks[0] &= ~desired_event;
+ else if (event < 64)
+ ioc->event_masks[1] &= ~desired_event;
+ else if (event < 96)
+ ioc->event_masks[2] &= ~desired_event;
+ else if (event < 128)
+ ioc->event_masks[3] &= ~desired_event;
+}
+
+/**
+ * _base_event_notification - send event notification
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2EventNotificationRequest_t *mpi_request;
+ unsigned long timeleft;
+ u16 smid;
+ int r = 0;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mpi_request->EventMasks[i] =
+ cpu_to_le32(ioc->event_masks[i]);
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2EventNotificationRequest_t)/4);
+ if (ioc->base_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ } else
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
+ ioc->name, __func__));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ return r;
+}
+
+/**
+ * mpt3sas_base_validate_event_type - validating event types
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * This will turn on firmware event notification when application
+ * ask for that event. We don't mask events that are already enabled.
+ */
+void
+mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
+{
+ int i, j;
+ u32 event_mask, desired_event;
+ u8 send_update_to_fw;
+
+ for (i = 0, send_update_to_fw = 0; i <
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
+ event_mask = ~event_type[i];
+ desired_event = 1;
+ for (j = 0; j < 32; j++) {
+ if (!(event_mask & desired_event) &&
+ (ioc->event_masks[i] & desired_event)) {
+ ioc->event_masks[i] &= ~desired_event;
+ send_update_to_fw = 1;
+ }
+ desired_event = (desired_event << 1);
+ }
+ }
+
+ if (!send_update_to_fw)
+ return;
+
+ mutex_lock(&ioc->base_cmds.mutex);
+ _base_event_notification(ioc, CAN_SLEEP);
+ mutex_unlock(&ioc->base_cmds.mutex);
+}
+
+/**
+ * _base_diag_reset - the "big hammer" start of day reset
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 host_diagnostic;
+ u32 ioc_state;
+ u32 count;
+ u32 hcb_size;
+
+ pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
+ ioc->name));
+
+ count = 0;
+ do {
+ /* Write magic sequence to WriteSequence register
+ * Loop until in diagnostic mode
+ */
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "write magic sequence\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ /* wait 100 msec */
+ if (sleep_flag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+
+ if (count++ > 20)
+ goto out;
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ ioc->name, count, host_diagnostic));
+
+ } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+
+ hcb_size = readl(&ioc->chip->HCBSize);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
+ ioc->name));
+ writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
+ &ioc->chip->HostDiagnostic);
+
+ /* don't access any registers for 50 milliseconds */
+ msleep(50);
+
+ /* 300 second max wait */
+ for (count = 0; count < 3000000 ; count++) {
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+
+ if (host_diagnostic == 0xFFFFFFFF)
+ goto out;
+ if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
+ break;
+
+ /* wait 1 msec */
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ mdelay(1);
+ }
+
+ if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "restart the adapter assuming the HCB Address points to good F/W\n",
+ ioc->name));
+ host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
+ host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
+ writel(host_diagnostic, &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "re-enable the HCDW\n", ioc->name));
+ writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
+ &ioc->chip->HCBSize);
+ }
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
+ ioc->name));
+ writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
+ &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "disable writes to the diagnostic register\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "Wait for FW to go to the READY state\n", ioc->name));
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
+ sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ goto out;
+ }
+
+ pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+ return 0;
+
+ out:
+ pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+ return -EFAULT;
+}
+
+/**
+ * _base_make_ioc_ready - put controller in READY state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ u32 ioc_state;
+ int rc;
+ int count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery)
+ return 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ /* if in RESET state, it should move to READY state shortly */
+ count = 0;
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
+ while ((ioc_state & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_READY) {
+ if (count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ return -EFAULT;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ ssleep(1);
+ else
+ mdelay(1000);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ }
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
+ return 0;
+
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n",
+ ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ if (type == FORCE_BIG_HAMMER)
+ goto issue_diag_reset;
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
+ if (!(_base_send_ioc_reset(ioc,
+ MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+ return 0;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, CAN_SLEEP);
+ return rc;
+}
+
+/**
+ * _base_make_ioc_operational - put controller in OPERATIONAL state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ int r, i;
+ unsigned long flags;
+ u32 reply_address;
+ u16 smid;
+ struct _tr_list *delayed_tr, *delayed_tr_next;
+ struct adapter_reply_queue *reply_q;
+ long reply_post_free;
+ u32 reply_post_free_sz;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* clean the delayed target reset list */
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_volume_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+ /* initialize the scsi lookup free list */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ INIT_LIST_HEAD(&ioc->free_list);
+ smid = 1;
+ for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
+ INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].smid = smid;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
+ }
+
+ /* hi-priority queue */
+ INIT_LIST_HEAD(&ioc->hpr_free_list);
+ smid = ioc->hi_priority_smid;
+ for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ ioc->hpr_lookup[i].smid = smid;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ }
+
+ /* internal queue */
+ INIT_LIST_HEAD(&ioc->internal_free_list);
+ smid = ioc->internal_smid;
+ for (i = 0; i < ioc->internal_depth; i++, smid++) {
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ ioc->internal_lookup[i].smid = smid;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+
+ /* chain pool */
+ INIT_LIST_HEAD(&ioc->free_chain_list);
+ for (i = 0; i < ioc->chain_depth; i++)
+ list_add_tail(&ioc->chain_lookup[i].tracker_list,
+ &ioc->free_chain_list);
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /* initialize Reply Free Queue */
+ for (i = 0, reply_address = (u32)ioc->reply_dma ;
+ i < ioc->reply_free_queue_depth ; i++, reply_address +=
+ ioc->reply_sz)
+ ioc->reply_free[i] = cpu_to_le32(reply_address);
+
+ /* initialize reply queues */
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
+
+ /* initialize Reply Post Free Queue */
+ reply_post_free = (long)ioc->reply_post_free;
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->reply_post_host_index = 0;
+ reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+ reply_post_free;
+ for (i = 0; i < ioc->reply_post_queue_depth; i++)
+ reply_q->reply_post_free[i].Words =
+ cpu_to_le64(ULLONG_MAX);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_free_queue;
+ reply_post_free += reply_post_free_sz;
+ }
+ skip_init_reply_post_free_queue:
+
+ r = _base_send_ioc_init(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ /* initialize reply free host index */
+ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
+ writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
+
+ /* initialize reply post host index */
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
+ &ioc->chip->ReplyPostHostIndex);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_host_index;
+ }
+
+ skip_init_reply_post_host_index:
+
+ _base_unmask_interrupts(ioc);
+ r = _base_event_notification(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ if (sleep_flag == CAN_SLEEP)
+ _base_static_config_pages(ioc);
+
+
+ if (ioc->is_driver_loading) {
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+
+ return r; /* scan_start and scan_finished support */
+ }
+
+ r = _base_send_port_enable(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_free_resources - free resources controller resources
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return;
+}
+
+/**
+ * mpt3sas_base_attach - attach controller instance
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+{
+ int r, i;
+ int cpu_id, last_cpu_id = 0;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* setup cpu_msix_table */
+ ioc->cpu_count = num_online_cpus();
+ for_each_online_cpu(cpu_id)
+ last_cpu_id = cpu_id;
+ ioc->cpu_msix_table_sz = last_cpu_id + 1;
+ ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
+ ioc->reply_queue_count = 1;
+ if (!ioc->cpu_msix_table) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "allocation for cpu_msix_table failed!!!\n",
+ ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ goto out_free_resources;
+
+
+ pci_set_drvdata(ioc->pdev, ioc->shost);
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ /*
+ * In SAS3.0,
+ * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
+ * Target Status - all require the IEEE formated scatter gather
+ * elements.
+ */
+
+ ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
+ ioc->build_sg = &_base_build_sg_ieee;
+ ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
+ ioc->mpi25 = 1;
+ ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
+ /*
+ * These function pointers for other requests that don't
+ * the require IEEE scatter gather elements.
+ *
+ * For example Configuration Pages and SAS IOUNIT Control don't.
+ */
+ ioc->build_sg_mpi = &_base_build_sg;
+ ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
+
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ if (r)
+ goto out_free_resources;
+
+ ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
+ sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
+ r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+ }
+
+ r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ init_waitqueue_head(&ioc->reset_wq);
+
+ /* allocate memory pd handle bitmask list */
+ ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pd_handles_sz++;
+ ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->pd_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->blocking_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+
+ /* base internal command bits */
+ mutex_init(&ioc->base_cmds.mutex);
+ ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* port_enable command bits */
+ ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* transport internal command bits */
+ ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->transport_cmds.mutex);
+
+ /* scsih internal command bits */
+ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->scsih_cmds.mutex);
+
+ /* task management internal command bits */
+ ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->tm_cmds.mutex);
+
+ /* config page internal command bits */
+ ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->config_cmds.mutex);
+
+ /* ctl module internal command bits */
+ ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->ctl_cmds.mutex);
+
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
+ !ioc->ctl_cmds.sense) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ ioc->event_masks[i] = -1;
+
+ /* here we enable the events we care about */
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
+ _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+
+ r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ return 0;
+
+ out_free_resources:
+
+ ioc->remove_host = 1;
+
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->pfacts);
+ ioc->ctl_cmds.reply = NULL;
+ ioc->base_cmds.reply = NULL;
+ ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
+ ioc->transport_cmds.reply = NULL;
+ ioc->config_cmds.reply = NULL;
+ ioc->pfacts = NULL;
+ return r;
+}
+
+
+/**
+ * mpt3sas_base_detach - remove controller instance
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
+{
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->pfacts);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+}
+
+/**
+ * _base_reset_handler - reset callback handler (for base)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+static void
+_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ mpt3sas_scsih_reset_handler(ioc, reset_phase);
+ mpt3sas_ctl_reset_handler(ioc, reset_phase);
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT3_CMD_NOT_USED;
+ } else
+ complete(&ioc->port_enable_cmds.done);
+ }
+ if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ break;
+ }
+}
+
+/**
+ * _wait_for_commands_to_complete - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * This function waiting(3s) for all pending commands to complete
+ * prior to putting controller in reset.
+ */
+static void
+_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 ioc_state;
+ unsigned long flags;
+ u16 i;
+
+ ioc->pending_io_count = 0;
+ if (sleep_flag != CAN_SLEEP)
+ return;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
+ return;
+
+ /* pending command count */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = 0; i < ioc->scsiio_depth; i++)
+ if (ioc->scsi_lookup[i].cb_idx != 0xFF)
+ ioc->pending_io_count++;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!ioc->pending_io_count)
+ return;
+
+ /* wait for pending commands to complete */
+ wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
+}
+
+/**
+ * mpt3sas_base_hard_reset_handler - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ int r;
+ unsigned long flags;
+ u32 ioc_state;
+ u8 is_fault = 0, is_trigger = 0;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery) {
+ pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
+ ioc->name, __func__);
+ r = 0;
+ goto out_unlocked;
+ }
+
+ if (mpt3sas_fwfault_debug)
+ mpt3sas_halt_firmware(ioc);
+
+ /* TODO - What we really should be doing is pulling
+ * out all the code associated with NO_SLEEP; its never used.
+ * That is legacy code from mpt fusion driver, ported over.
+ * I will leave this BUG_ON here for now till its been resolved.
+ */
+ BUG_ON(sleep_flag == NO_SLEEP);
+
+ /* wait for an active reset in progress to complete */
+ if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
+ do {
+ ssleep(1);
+ } while (ioc->shost_recovery == 1);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return ioc->ioc_reset_in_progress_status;
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->shost_recovery = 1;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))) {
+ is_trigger = 1;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ is_fault = 1;
+ }
+ _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
+ _wait_for_commands_to_complete(ioc, sleep_flag);
+ _base_mask_interrupts(ioc);
+ r = _base_make_ioc_ready(ioc, sleep_flag, type);
+ if (r)
+ goto out;
+ _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
+
+ /* If this hard reset is called while port enable is active, then
+ * there is no reason to call make_ioc_operational
+ */
+ if (ioc->is_driver_loading && ioc->port_enable_failed) {
+ ioc->remove_host = 1;
+ r = -EFAULT;
+ goto out;
+ }
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out;
+ r = _base_make_ioc_operational(ioc, sleep_flag);
+ if (!r)
+ _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
+
+ out:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
+ ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_in_progress_status = r;
+ ioc->shost_recovery = 0;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_count++;
+ mutex_unlock(&ioc->reset_in_progress_mutex);
+
+ out_unlocked:
+ if ((r == 0) && is_trigger) {
+ if (is_fault)
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
+ else
+ mpt3sas_trigger_master(ioc,
+ MASTER_TRIGGER_ADAPTER_RESET);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return r;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
new file mode 100644
index 00000000000..994656cbfac
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -0,0 +1,1139 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_BASE_H_INCLUDED
+#define MPT3SAS_BASE_H_INCLUDED
+
+#include "mpi/mpi2_type.h"
+#include "mpi/mpi2.h"
+#include "mpi/mpi2_ioc.h"
+#include "mpi/mpi2_cnfg.h"
+#include "mpi/mpi2_init.h"
+#include "mpi/mpi2_raid.h"
+#include "mpi/mpi2_tool.h"
+#include "mpi/mpi2_sas.h"
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "mpt3sas_debug.h"
+#include "mpt3sas_trigger_diag.h"
+
+/* driver versioning info */
+#define MPT3SAS_DRIVER_NAME "mpt3sas"
+#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
+#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
+#define MPT3SAS_DRIVER_VERSION "01.100.01.00"
+#define MPT3SAS_MAJOR_VERSION 1
+#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_BUILD_VERSION 1
+#define MPT3SAS_RELEASE_VERSION 00
+
+/*
+ * Set MPT3SAS_SG_DEPTH value based on user input.
+ */
+#define MPT3SAS_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS
+#define MPT3SAS_MIN_PHYS_SEGMENTS 16
+#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
+#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
+#else
+#define MPT3SAS_SG_DEPTH MPT3SAS_MAX_PHYS_SEGMENTS
+#endif
+
+
+/*
+ * Generic Defines
+ */
+#define MPT3SAS_SATA_QUEUE_DEPTH 32
+#define MPT3SAS_SAS_QUEUE_DEPTH 254
+#define MPT3SAS_RAID_QUEUE_DEPTH 128
+
+#define MPT_NAME_LENGTH 32 /* generic length of strings */
+#define MPT_STRING_LENGTH 64
+
+#define MPT_MAX_CALLBACKS 32
+
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
+
+#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/
+
+#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF
+
+/*
+ * reset phases
+ */
+#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
+#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */
+#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */
+
+/*
+ * logging format
+ */
+#define MPT3SAS_FMT "%s: "
+
+/*
+ * per target private data
+ */
+#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
+#define MPT_TARGET_FLAGS_VOLUME 0x02
+#define MPT_TARGET_FLAGS_DELETED 0x04
+#define MPT_TARGET_FASTPATH_IO 0x08
+
+
+
+/*
+ * status bits for ioc->diag_buffer_status
+ */
+#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
+#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
+#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+
+
+/* OEM Identifiers */
+#define MFG10_OEM_ID_INVALID (0x00000000)
+#define MFG10_OEM_ID_DELL (0x00000001)
+#define MFG10_OEM_ID_FSC (0x00000002)
+#define MFG10_OEM_ID_SUN (0x00000003)
+#define MFG10_OEM_ID_IBM (0x00000004)
+
+/* GENERIC Flags 0*/
+#define MFG10_GF0_OCE_DISABLED (0x00000001)
+#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
+#define MFG10_GF0_R10_DISPLAY (0x00000004)
+#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
+#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
+
+/* OEM Specific Flags will come from OEM specific header files */
+struct Mpi2ManufacturingPage10_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 OEMIdentifier; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 08h */
+ U32 Reserved3; /* 0Ch */
+ U32 GenericFlags0; /* 10h */
+ U32 GenericFlags1; /* 14h */
+ U32 Reserved4; /* 18h */
+ U32 OEMSpecificFlags0; /* 1Ch */
+ U32 OEMSpecificFlags1; /* 20h */
+ U32 Reserved5[18]; /* 24h - 60h*/
+};
+
+
+/* Miscellaneous options */
+struct Mpi2ManufacturingPage11_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ __le32 Reserved1; /* 04h */
+ u8 Reserved2; /* 08h */
+ u8 EEDPTagMode; /* 09h */
+ u8 Reserved3; /* 0Ah */
+ u8 Reserved4; /* 0Bh */
+ __le32 Reserved5[23]; /* 0Ch-60h*/
+};
+
+/**
+ * struct MPT3SAS_TARGET - starget private hostdata
+ * @starget: starget object
+ * @sas_address: target sas address
+ * @handle: device handle
+ * @num_luns: number luns
+ * @flags: MPT_TARGET_FLAGS_XXX flags
+ * @deleted: target flaged for deletion
+ * @tm_busy: target is busy with TM request.
+ */
+struct MPT3SAS_TARGET {
+ struct scsi_target *starget;
+ u64 sas_address;
+ u16 handle;
+ int num_luns;
+ u32 flags;
+ u8 deleted;
+ u8 tm_busy;
+};
+
+
+/*
+ * per device private data
+ */
+#define MPT_DEVICE_FLAGS_INIT 0x01
+#define MPT_DEVICE_TLR_ON 0x02
+
+/**
+ * struct MPT3SAS_DEVICE - sdev private hostdata
+ * @sas_target: starget private hostdata
+ * @lun: lun number
+ * @flags: MPT_DEVICE_XXX flags
+ * @configured_lun: lun is configured
+ * @block: device is in SDEV_BLOCK state
+ * @tlr_snoop_check: flag used in determining whether to disable TLR
+ * @eedp_enable: eedp support enable bit
+ * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
+ * @eedp_block_length: block size
+ */
+struct MPT3SAS_DEVICE {
+ struct MPT3SAS_TARGET *sas_target;
+ unsigned int lun;
+ u32 flags;
+ u8 configured_lun;
+ u8 block;
+ u8 tlr_snoop_check;
+};
+
+#define MPT3_CMD_NOT_USED 0x8000 /* free */
+#define MPT3_CMD_COMPLETE 0x0001 /* completed */
+#define MPT3_CMD_PENDING 0x0002 /* pending */
+#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
+#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
+
+/**
+ * struct _internal_cmd - internal commands struct
+ * @mutex: mutex
+ * @done: completion
+ * @reply: reply message pointer
+ * @sense: sense data
+ * @status: MPT3_CMD_XXX status
+ * @smid: system message id
+ */
+struct _internal_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ void *sense;
+ u16 status;
+ u16 smid;
+};
+
+
+
+/**
+ * struct _sas_device - attached device information
+ * @list: sas device list
+ * @starget: starget object
+ * @sas_address: device sas address
+ * @device_name: retrieved from the SAS IDENTIFY frame.
+ * @handle: device handle
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: enclosure handle
+ * @enclosure_logical_id: enclosure logical identifier
+ * @volume_handle: volume handle (valid when hidden raid member)
+ * @volume_wwid: volume unique identifier
+ * @device_info: bitfield provides detailed info about the device
+ * @id: target id
+ * @channel: target channel
+ * @slot: number number
+ * @phy: phy identifier provided in sas device page 0
+ * @fast_path: fast path feature enable bit
+ * @responding: used in _scsih_sas_device_mark_responding
+ */
+struct _sas_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ u64 sas_address;
+ u64 device_name;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u16 volume_handle;
+ u64 volume_wwid;
+ u32 device_info;
+ int id;
+ int channel;
+ u16 slot;
+ u8 phy;
+ u8 responding;
+ u8 fast_path;
+};
+
+/**
+ * struct _raid_device - raid volume link list
+ * @list: sas device list
+ * @starget: starget object
+ * @sdev: scsi device struct (volumes are single lun)
+ * @wwid: unique identifier for the volume
+ * @handle: device handle
+ * @id: target id
+ * @channel: target channel
+ * @volume_type: the raid level
+ * @device_info: bitfield provides detailed info about the hidden components
+ * @num_pds: number of hidden raid components
+ * @responding: used in _scsih_raid_device_mark_responding
+ * @percent_complete: resync percent complete
+ */
+#define MPT_MAX_WARPDRIVE_PDS 8
+struct _raid_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ u64 wwid;
+ u16 handle;
+ int id;
+ int channel;
+ u8 volume_type;
+ u8 num_pds;
+ u8 responding;
+ u8 percent_complete;
+ u32 device_info;
+};
+
+/**
+ * struct _boot_device - boot device info
+ * @is_raid: flag to indicate whether this is volume
+ * @device: holds pointer for either struct _sas_device or
+ * struct _raid_device
+ */
+struct _boot_device {
+ u8 is_raid;
+ void *device;
+};
+
+/**
+ * struct _sas_port - wide/narrow sas port information
+ * @port_list: list of ports belonging to expander
+ * @num_phys: number of phys belonging to this port
+ * @remote_identify: attached device identification
+ * @rphy: sas transport rphy object
+ * @port: sas transport wide/narrow port object
+ * @phy_list: _sas_phy list objects belonging to this port
+ */
+struct _sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct _sas_phy - phy information
+ * @port_siblings: list of phys belonging to a port
+ * @identify: phy identification
+ * @remote_identify: attached device identification
+ * @phy: sas transport phy object
+ * @phy_id: unique phy id
+ * @handle: device handle for this phy
+ * @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
+ */
+struct _sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+};
+
+/**
+ * struct _sas_node - sas_host/expander information
+ * @list: list of expanders
+ * @parent_dev: parent device class
+ * @num_phys: number phys belonging to this sas_host/expander
+ * @sas_address: sas address of this sas_host/expander
+ * @handle: handle for this sas_host/expander
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: handle for this a member of an enclosure
+ * @device_info: bitwise defining capabilities of this sas_host/expander
+ * @responding: used in _scsih_expander_device_mark_responding
+ * @phy: a list of phys that make up this sas_host/expander
+ * @sas_port_list: list of ports attached to this sas_host/expander
+ */
+struct _sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 responding;
+ struct _sas_phy *phy;
+ struct list_head sas_port_list;
+};
+
+/**
+ * enum reset_type - reset state
+ * @FORCE_BIG_HAMMER: issue diagnostic reset
+ * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer
+ */
+enum reset_type {
+ FORCE_BIG_HAMMER,
+ SOFT_RESET,
+};
+
+/**
+ * struct chain_tracker - firmware chain tracker
+ * @chain_buffer: chain buffer
+ * @chain_buffer_dma: physical address
+ * @tracker_list: list of free request (ioc->free_chain_list)
+ */
+struct chain_tracker {
+ void *chain_buffer;
+ dma_addr_t chain_buffer_dma;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct scsiio_tracker - scsi mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct scsiio_tracker {
+ u16 smid;
+ struct scsi_cmnd *scmd;
+ u8 cb_idx;
+ struct list_head chain_list;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct request_tracker - firmware request tracker
+ * @smid: system message id
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct _tr_list - target reset list
+ * @handle: device handle
+ * @state: state machine
+ */
+struct _tr_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+
+/**
+ * struct adapter_reply_queue - the reply queue struct
+ * @ioc: per adapter object
+ * @msix_index: msix index into vector table
+ * @vector: irq vector
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @reply_post_free: reply post base virt address
+ * @name: the name registered to request_irq()
+ * @busy: isr is actively processing replies on another cpu
+ * @list: this list
+*/
+struct adapter_reply_queue {
+ struct MPT3SAS_ADAPTER *ioc;
+ u8 msix_index;
+ unsigned int vector;
+ u32 reply_post_host_index;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ char name[MPT_NAME_LENGTH];
+ atomic_t busy;
+ struct list_head list;
+};
+
+typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
+
+/* SAS3.0 support */
+typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid);
+typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz,
+ dma_addr_t data_in_dma, size_t data_in_sz);
+typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
+ void *paddr);
+
+
+
+/* IOC Facts and Port Facts converted from little endian to cpu */
+union mpi3_version_union {
+ MPI2_VERSION_STRUCT Struct;
+ u32 Word;
+};
+
+struct mpt3sas_facts {
+ u16 MsgVersion;
+ u16 HeaderVersion;
+ u8 IOCNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u16 IOCExceptions;
+ u16 IOCStatus;
+ u32 IOCLogInfo;
+ u8 MaxChainDepth;
+ u8 WhoInit;
+ u8 NumberOfPorts;
+ u8 MaxMSIxVectors;
+ u16 RequestCredit;
+ u16 ProductID;
+ u32 IOCCapabilities;
+ union mpi3_version_union FWVersion;
+ u16 IOCRequestFrameSize;
+ u16 Reserved3;
+ u16 MaxInitiators;
+ u16 MaxTargets;
+ u16 MaxSasExpanders;
+ u16 MaxEnclosures;
+ u16 ProtocolFlags;
+ u16 HighPriorityCredit;
+ u16 MaxReplyDescriptorPostQueueDepth;
+ u8 ReplyFrameSize;
+ u8 MaxVolumes;
+ u16 MaxDevHandle;
+ u16 MaxPersistentEntries;
+ u16 MinDevHandle;
+};
+
+struct mpt3sas_port_facts {
+ u8 PortNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u8 PortType;
+ u16 MaxPostedCmdBuffers;
+};
+
+/**
+ * enum mutex_type - task management mutex type
+ * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
+ * @TM_MUTEX_ON: mutex is required
+ */
+enum mutex_type {
+ TM_MUTEX_OFF = 0,
+ TM_MUTEX_ON = 1,
+};
+
+typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
+/**
+ * struct MPT3SAS_ADAPTER - per adapter struct
+ * @list: ioc_list
+ * @shost: shost object
+ * @id: unique adapter id
+ * @cpu_count: number online cpus
+ * @name: generic ioc string
+ * @tmp_string: tmp string used for logging
+ * @pdev: pci pdev object
+ * @pio_chip: physical io register space
+ * @chip: memory mapped register space
+ * @chip_phys: physical addrss prior to mapping
+ * @logging_level: see mpt3sas_debug.h
+ * @fwfault_debug: debuging FW timeouts
+ * @ir_firmware: IR firmware present
+ * @bars: bitmask of BAR's that must be configured
+ * @mask_interrupts: ignore interrupt
+ * @fault_reset_work_q_name: fw fault work queue
+ * @fault_reset_work_q: ""
+ * @fault_reset_work: ""
+ * @firmware_event_name: fw event work queue
+ * @firmware_event_thread: ""
+ * @fw_event_lock:
+ * @fw_event_list: list of fw events
+ * @aen_event_read_flag: event log was read
+ * @broadcast_aen_busy: broadcast aen waiting to be serviced
+ * @shost_recovery: host reset in progress
+ * @ioc_reset_in_progress_lock:
+ * @ioc_link_reset_in_progress: phy/hard reset in progress
+ * @ignore_loginfos: ignore loginfos during task management
+ * @remove_host: flag for when driver unloads, to avoid sending dev resets
+ * @pci_error_recovery: flag to prevent ioc access until slot reset completes
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ * waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
+ * @msix_enable: flag indicating msix is enabled
+ * @msix_vector_count: number msix vectors
+ * @cpu_msix_table: table for mapping cpus to msix index
+ * @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @scsi_io_cb_idx: shost generated commands
+ * @tm_cb_idx: task management commands
+ * @scsih_cb_idx: scsih internal commands
+ * @transport_cb_idx: transport internal commands
+ * @ctl_cb_idx: clt internal commands
+ * @base_cb_idx: base internal commands
+ * @config_cb_idx: base internal commands
+ * @tm_tr_cb_idx : device removal target reset handshake
+ * @tm_tr_volume_cb_idx : volume removal target reset
+ * @base_cmds:
+ * @transport_cmds:
+ * @scsih_cmds:
+ * @tm_cmds:
+ * @ctl_cmds:
+ * @config_cmds:
+ * @base_add_sg_single: handler for either 32/64 bit sgl's
+ * @event_type: bits indicating which events to log
+ * @event_context: unique id for each logged event
+ * @event_log: event log pointer
+ * @event_masks: events that are masked
+ * @facts: static facts data
+ * @pfacts: static port facts data
+ * @manu_pg0: static manufacturing page 0
+ * @manu_pg10: static manufacturing page 10
+ * @manu_pg11: static manufacturing page 11
+ * @bios_pg2: static bios page 2
+ * @bios_pg3: static bios page 3
+ * @ioc_pg8: static ioc page 8
+ * @iounit_pg0: static iounit page 0
+ * @iounit_pg1: static iounit page 1
+ * @sas_hba: sas host object
+ * @sas_expander_list: expander object list
+ * @sas_node_lock:
+ * @sas_device_list: sas device object list
+ * @sas_device_init_list: sas device object list (used only at init time)
+ * @sas_device_lock:
+ * @io_missing_delay: time for IO completed by fw when PDR enabled
+ * @device_missing_delay: time for device missing by fw when PDR enabled
+ * @sas_id : used for setting volume target IDs
+ * @blocking_handles: bitmask used to identify which devices need blocking
+ * @pd_handles : bitmask for PD handles
+ * @pd_handles_sz : size of pd_handle bitmask
+ * @config_page_sz: config page size
+ * @config_page: reserve memory for config page payload
+ * @config_page_dma:
+ * @hba_queue_depth: hba request queue depth
+ * @sge_size: sg element size for either 32/64 bit
+ * @scsiio_depth: SCSI_IO queue depth
+ * @request_sz: per request frame size
+ * @request: pool of request frames
+ * @request_dma:
+ * @request_dma_sz:
+ * @scsi_lookup: firmware request tracker list
+ * @scsi_lookup_lock:
+ * @free_list: free list of request
+ * @pending_io_count:
+ * @reset_wq:
+ * @chain: pool of chains
+ * @chain_dma:
+ * @max_sges_in_main_message: number sg elements in main message
+ * @max_sges_in_chain_message: number sg elements per chain
+ * @chains_needed_per_io: max chains per io
+ * @chain_depth: total chains allocated
+ * @hi_priority_smid:
+ * @hi_priority:
+ * @hi_priority_dma:
+ * @hi_priority_depth:
+ * @hpr_lookup:
+ * @hpr_free_list:
+ * @internal_smid:
+ * @internal:
+ * @internal_dma:
+ * @internal_depth:
+ * @internal_lookup:
+ * @internal_free_list:
+ * @sense: pool of sense
+ * @sense_dma:
+ * @sense_dma_pool:
+ * @reply_depth: hba reply queue depth:
+ * @reply_sz: per reply frame size:
+ * @reply: pool of replys:
+ * @reply_dma:
+ * @reply_dma_pool:
+ * @reply_free_queue_depth: reply free depth
+ * @reply_free: pool for reply free queue (32 bit addr)
+ * @reply_free_dma:
+ * @reply_free_dma_pool:
+ * @reply_free_host_index: tail index in pool to insert free replys
+ * @reply_post_queue_depth: reply post queue depth
+ * @reply_post_free: pool for reply post (64bit descriptor)
+ * @reply_post_free_dma:
+ * @reply_queue_count: number of reply queue's
+ * @reply_queue_list: link list contaning the reply queue info
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @delayed_tr_list: target reset link list
+ * @delayed_tr_volume_list: volume target reset link list
+ */
+struct MPT3SAS_ADAPTER {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ char name[MPT_NAME_LENGTH];
+ char tmp_string[MPT_STRING_LENGTH];
+ struct pci_dev *pdev;
+ Mpi2SystemInterfaceRegs_t __iomem *chip;
+ resource_size_t chip_phys;
+ int logging_level;
+ int fwfault_debug;
+ u8 ir_firmware;
+ int bars;
+ u8 mask_interrupts;
+
+ /* fw fault handler */
+ char fault_reset_work_q_name[20];
+ struct workqueue_struct *fault_reset_work_q;
+ struct delayed_work fault_reset_work;
+
+ /* fw event handler */
+ char firmware_event_name[20];
+ struct workqueue_struct *firmware_event_thread;
+ spinlock_t fw_event_lock;
+ struct list_head fw_event_list;
+
+ /* misc flags */
+ int aen_event_read_flag;
+ u8 broadcast_aen_busy;
+ u16 broadcast_aen_pending;
+ u8 shost_recovery;
+
+ struct mutex reset_in_progress_mutex;
+ spinlock_t ioc_reset_in_progress_lock;
+ u8 ioc_link_reset_in_progress;
+ u8 ioc_reset_in_progress_status;
+
+ u8 ignore_loginfos;
+ u8 remove_host;
+ u8 pci_error_recovery;
+ u8 wait_for_discovery_to_complete;
+ u8 is_driver_loading;
+ u8 port_enable_failed;
+ u8 start_scan;
+ u16 start_scan_failed;
+
+ u8 msix_enable;
+ u16 msix_vector_count;
+ u8 *cpu_msix_table;
+ u16 cpu_msix_table_sz;
+ u32 ioc_reset_count;
+ MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+
+ /* internal commands, callback index */
+ u8 scsi_io_cb_idx;
+ u8 tm_cb_idx;
+ u8 transport_cb_idx;
+ u8 scsih_cb_idx;
+ u8 ctl_cb_idx;
+ u8 base_cb_idx;
+ u8 port_enable_cb_idx;
+ u8 config_cb_idx;
+ u8 tm_tr_cb_idx;
+ u8 tm_tr_volume_cb_idx;
+ u8 tm_sas_control_cb_idx;
+ struct _internal_cmd base_cmds;
+ struct _internal_cmd port_enable_cmds;
+ struct _internal_cmd transport_cmds;
+ struct _internal_cmd scsih_cmds;
+ struct _internal_cmd tm_cmds;
+ struct _internal_cmd ctl_cmds;
+ struct _internal_cmd config_cmds;
+
+ MPT_ADD_SGE base_add_sg_single;
+
+ /* function ptr for either IEEE or MPI sg elements */
+ MPT_BUILD_SG_SCMD build_sg_scmd;
+ MPT_BUILD_SG build_sg;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge;
+ u8 mpi25;
+ u16 sge_size_ieee;
+
+ /* function ptr for MPI sg elements only */
+ MPT_BUILD_SG build_sg_mpi;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
+
+ /* event log */
+ u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+ u32 event_context;
+ void *event_log;
+ u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ /* static config pages */
+ struct mpt3sas_facts facts;
+ struct mpt3sas_port_facts *pfacts;
+ Mpi2ManufacturingPage0_t manu_pg0;
+ struct Mpi2ManufacturingPage10_t manu_pg10;
+ struct Mpi2ManufacturingPage11_t manu_pg11;
+ Mpi2BiosPage2_t bios_pg2;
+ Mpi2BiosPage3_t bios_pg3;
+ Mpi2IOCPage8_t ioc_pg8;
+ Mpi2IOUnitPage0_t iounit_pg0;
+ Mpi2IOUnitPage1_t iounit_pg1;
+
+ struct _boot_device req_boot_device;
+ struct _boot_device req_alt_boot_device;
+ struct _boot_device current_boot_device;
+
+ /* sas hba, expander, and device list */
+ struct _sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head sas_device_list;
+ struct list_head sas_device_init_list;
+ spinlock_t sas_device_lock;
+ struct list_head raid_device_list;
+ spinlock_t raid_device_lock;
+ u8 io_missing_delay;
+ u16 device_missing_delay;
+ int sas_id;
+
+ void *blocking_handles;
+ void *pd_handles;
+ u16 pd_handles_sz;
+
+ /* config page */
+ u16 config_page_sz;
+ void *config_page;
+ dma_addr_t config_page_dma;
+
+ /* scsiio request */
+ u16 hba_queue_depth;
+ u16 sge_size;
+ u16 scsiio_depth;
+ u16 request_sz;
+ u8 *request;
+ dma_addr_t request_dma;
+ u32 request_dma_sz;
+ struct scsiio_tracker *scsi_lookup;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
+ struct list_head free_list;
+ int pending_io_count;
+ wait_queue_head_t reset_wq;
+
+ /* chain */
+ struct chain_tracker *chain_lookup;
+ struct list_head free_chain_list;
+ struct dma_pool *chain_dma_pool;
+ ulong chain_pages;
+ u16 max_sges_in_main_message;
+ u16 max_sges_in_chain_message;
+ u16 chains_needed_per_io;
+ u32 chain_depth;
+
+ /* hi-priority queue */
+ u16 hi_priority_smid;
+ u8 *hi_priority;
+ dma_addr_t hi_priority_dma;
+ u16 hi_priority_depth;
+ struct request_tracker *hpr_lookup;
+ struct list_head hpr_free_list;
+
+ /* internal queue */
+ u16 internal_smid;
+ u8 *internal;
+ dma_addr_t internal_dma;
+ u16 internal_depth;
+ struct request_tracker *internal_lookup;
+ struct list_head internal_free_list;
+
+ /* sense */
+ u8 *sense;
+ dma_addr_t sense_dma;
+ struct dma_pool *sense_dma_pool;
+
+ /* reply */
+ u16 reply_sz;
+ u8 *reply;
+ dma_addr_t reply_dma;
+ u32 reply_dma_max_address;
+ u32 reply_dma_min_address;
+ struct dma_pool *reply_dma_pool;
+
+ /* reply free queue */
+ u16 reply_free_queue_depth;
+ __le32 *reply_free;
+ dma_addr_t reply_free_dma;
+ struct dma_pool *reply_free_dma_pool;
+ u32 reply_free_host_index;
+
+ /* reply post queue */
+ u16 reply_post_queue_depth;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ dma_addr_t reply_post_free_dma;
+ struct dma_pool *reply_post_free_dma_pool;
+ u8 reply_queue_count;
+ struct list_head reply_queue_list;
+
+ struct list_head delayed_tr_list;
+ struct list_head delayed_tr_volume_list;
+
+ /* diag buffer support */
+ u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT];
+ dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
+ u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
+ u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 ring_buffer_offset;
+ u32 ring_buffer_sz;
+ spinlock_t diag_trigger_lock;
+ u8 diag_trigger_active;
+ struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
+ struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
+ struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
+ struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+};
+
+typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+
+
+/* base shared API */
+extern struct list_head mpt3sas_ioc_list;
+void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc);
+
+int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type);
+
+void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
+void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc);
+
+/* hi-priority queue */
+u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd);
+
+u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_initialize_callback_handler(void);
+u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
+void mpt3sas_base_release_callback_handler(u8 cb_idx);
+
+u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply);
+void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
+ u32 phys_addr);
+
+u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
+
+void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request);
+int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
+
+void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc,
+ u32 *event_type);
+
+void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc);
+
+void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay);
+
+int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
+
+
+/* scsih shared API */
+u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply);
+void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, unsigned long serial_number, enum mutex_type m_type);
+void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address);
+
+struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
+ struct MPT3SAS_ADAPTER *ioc, u16 handle);
+struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *mpt3sas_scsih_sas_device_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+
+void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
+
+/* config shared API */
+u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
+ u8 *num_phys);
+int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page);
+
+int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+
+int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage2_t *config_page);
+int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage0_t *config_page);
+int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOCPage8_t *config_page);
+int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page,
+ u32 phy_number, u16 handle);
+int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number);
+int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number);
+int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle);
+int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds);
+int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz);
+int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page,
+ u32 form, u32 form_specific);
+int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle);
+int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc,
+ u16 volume_handle, u64 *wwid);
+
+/* ctl shared API */
+extern struct device_attribute *mpt3sas_host_attrs[];
+extern struct device_attribute *mpt3sas_dev_attrs[];
+void mpt3sas_ctl_init(void);
+void mpt3sas_ctl_exit(void);
+u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
+ u8 msix_index, u32 reply);
+void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply);
+
+void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc,
+ u8 bits_to_regsiter);
+int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset);
+
+/* transport shared API */
+u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, u64 sas_address);
+void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent);
+int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
+int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev);
+void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+extern struct sas_function_template mpt3sas_transport_functions;
+extern struct scsi_transport_template *mpt3sas_transport_template;
+extern int scsi_internal_device_block(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
+/* trigger data externs */
+void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc,
+ u32 tigger_bitmask);
+void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier);
+void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key,
+ u8 asc, u8 ascq);
+void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
+ u32 loginfo);
+#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
new file mode 100644
index 00000000000..ce7e59b2fc0
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -0,0 +1,1650 @@
+/*
+ * This module provides common API for accessing firmware configuration pages
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include "mpt3sas_base.h"
+
+/* local definitions */
+
+/* Timeout for config page request (in seconds) */
+#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15
+
+/* Common sgl flags for READING a config page. */
+#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT)
+
+/* Common sgl flags for WRITING a config page. */
+#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \
+ << MPI2_SGE_FLAGS_SHIFT)
+
+/**
+ * struct config_request - obtain dma memory via routine
+ * @sz: size
+ * @page: virt pointer
+ * @page_dma: phys pointer
+ *
+ */
+struct config_request {
+ u16 sz;
+ void *page;
+ dma_addr_t page_dma;
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _config_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
+ case MPI2_CONFIG_PAGETYPE_IO_UNIT:
+ desc = "io_unit";
+ break;
+ case MPI2_CONFIG_PAGETYPE_IOC:
+ desc = "ioc";
+ break;
+ case MPI2_CONFIG_PAGETYPE_BIOS:
+ desc = "bios";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_VOLUME:
+ desc = "raid_volume";
+ break;
+ case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
+ desc = "manufaucturing";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
+ desc = "physdisk";
+ break;
+ case MPI2_CONFIG_PAGETYPE_EXTENDED:
+ switch (mpi_request->ExtPageType) {
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT:
+ desc = "sas_io_unit";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER:
+ desc = "sas_expander";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE:
+ desc = "sas_device";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY:
+ desc = "sas_phy";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_LOG:
+ desc = "log";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE:
+ desc = "enclosure";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG:
+ desc = "raid_config";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
+ desc = "driver_mappping";
+ break;
+ }
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT
+ "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+ ioc->name, calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+}
+#endif
+
+/**
+ * _config_alloc_config_dma_memory - obtain physical memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper for obtaining dma-able memory for config page request.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ int r = 0;
+
+ if (mem->sz > ioc->config_page_sz) {
+ mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
+ &mem->page_dma, GFP_KERNEL);
+ if (!mem->page) {
+ pr_err(MPT3SAS_FMT
+ "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+ ioc->name, __func__, mem->sz);
+ r = -ENOMEM;
+ }
+ } else { /* use tmp buffer if less than 512 bytes */
+ mem->page = ioc->config_page;
+ mem->page_dma = ioc->config_page_dma;
+ }
+ return r;
+}
+
+/**
+ * _config_free_config_dma_memory - wrapper to free the memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static void
+_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ if (mem->sz > ioc->config_page_sz)
+ dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
+ mem->page_dma);
+}
+
+/**
+ * mpt3sas_config_done - config page completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using _config_request.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->config_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->config_cmds.smid != smid)
+ return 1;
+ ioc->config_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->config_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ }
+ ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+#endif
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ return 1;
+}
+
+/**
+ * _config_request - main routine for sending config page requests
+ * @ioc: per adapter object
+ * @mpi_request: request message frame
+ * @mpi_reply: reply mf payload returned from firmware
+ * @timeout: timeout in seconds
+ * @config_page: contents of the config page
+ * @config_page_sz: size of config page
+ * Context: sleep
+ *
+ * A generic API for config page requests to firmware.
+ *
+ * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling
+ * this API.
+ *
+ * The callback index is set inside `ioc->config_cb_idx.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
+ void *config_page, u16 config_page_sz)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ Mpi2ConfigRequest_t *config_request;
+ int r;
+ u8 retry_count, issue_host_reset = 0;
+ u16 wait_state_count;
+ struct config_request mem;
+ u32 ioc_status = UINT_MAX;
+
+ mutex_lock(&ioc->config_cmds.mutex);
+ if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
+ ioc->name, __func__);
+ mutex_unlock(&ioc->config_cmds.mutex);
+ return -EAGAIN;
+ }
+
+ retry_count = 0;
+ memset(&mem, 0, sizeof(struct config_request));
+
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ if (config_page) {
+ mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
+ mpi_request->Header.PageType = mpi_reply->Header.PageType;
+ mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
+ mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
+ mpi_request->ExtPageType = mpi_reply->ExtPageType;
+ if (mpi_request->Header.PageLength)
+ mem.sz = mpi_request->Header.PageLength * 4;
+ else
+ mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
+ r = _config_alloc_config_dma_memory(ioc, &mem);
+ if (r != 0)
+ goto out;
+ if (mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+ mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
+ mem.page_dma);
+ memcpy(mem.page, config_page, min_t(u16, mem.sz,
+ config_page_sz));
+ } else {
+ memset(config_page, 0, config_page_sz);
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
+ memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz));
+ }
+ }
+
+ retry_config:
+ if (retry_count) {
+ if (retry_count > 2) { /* attempt only 2 retries */
+ r = -EFAULT;
+ goto free_mem;
+ }
+ pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
+ ioc->name, __func__, retry_count);
+ }
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EFAULT;
+ goto free_mem;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EAGAIN;
+ goto free_mem;
+ }
+
+ r = 0;
+ memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ ioc->config_cmds.status = MPT3_CMD_PENDING;
+ config_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->config_cmds.smid = smid;
+ memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_request", NULL);
+#endif
+ init_completion(&ioc->config_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
+ timeout*HZ);
+ if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
+ retry_count++;
+ if (ioc->config_cmds.smid == smid)
+ mpt3sas_base_free_smid(ioc, smid);
+ if ((ioc->shost_recovery) || (ioc->config_cmds.status &
+ MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ goto retry_config;
+ issue_host_reset = 1;
+ r = -EFAULT;
+ goto free_mem;
+ }
+
+ if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) {
+ memcpy(mpi_reply, ioc->config_cmds.reply,
+ sizeof(Mpi2ConfigReply_t));
+
+ /* Reply Frame Sanity Checks to workaround FW issues */
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (mpi_reply->Header.PageType & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested PageType(0x%02x)" \
+ " Reply PageType(0x%02x)\n", \
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (mpi_reply->Header.PageType & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__, mpi_request->ExtPageType,
+ mpi_reply->ExtPageType);
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ }
+
+ if (retry_count)
+ pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
+ ioc->name, __func__, retry_count);
+
+ if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
+ config_page && mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) {
+ u8 *p = (u8 *)mem.page;
+
+ /* Config Page Sanity Checks to workaround FW issues */
+ if (p) {
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (p[3] & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested PageType(0x%02x)"
+ " Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (p[3] & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ (mpi_request->ExtPageType != p[6])) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType, p[6]);
+ }
+ }
+ memcpy(config_page, mem.page, min_t(u16, mem.sz,
+ config_page_sz));
+ }
+
+ free_mem:
+ if (config_page)
+ _config_free_config_dma_memory(ioc, &mem);
+ out:
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->config_cmds.mutex);
+
+ if (issue_host_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 7;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 10;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg2 - obtain bios page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg3 - obtain bios page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_iounit_pg1 - set iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host
+ * @ioc: per adapter object
+ * @num_phys: pointer returned with the number of phys
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+ u16 ioc_status;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t config_page;
+
+ *num_phys = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2SasIOUnitPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_phys = config_page.NumPhys;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg0 - obtain expander page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg1 - obtain expander page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number,
+ u16 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
+ (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg0 - obtain phy page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg1 - obtain phy page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume
+ * @ioc: per adapter object
+ * @handle: volume handle
+ * @num_pds: returns pds count
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2RaidVolPage0_t config_page;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ u16 ioc_status;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ *num_pds = 0;
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2RaidVolPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_pds = config_page.NumPhysDisks;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE
+ * @form_specific: specific to the form
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
+ u32 form_specific)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | form_specific);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_handle - returns volume handle for give hidden
+ * raid components
+ * @ioc: per adapter object
+ * @pd_handle: phys disk handle
+ * @volume_handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle)
+{
+ Mpi2RaidConfigurationPage0_t *config_page = NULL;
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2ConfigReply_t mpi_reply;
+ int r, i, config_page_sz;
+ u16 ioc_status;
+ int config_num;
+ u16 element_type;
+ u16 phys_disk_dev_handle;
+
+ *volume_handle = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG;
+ mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
+ config_page = kmalloc(config_page_sz, GFP_KERNEL);
+ if (!config_page) {
+ r = -1;
+ goto out;
+ }
+
+ config_num = 0xff;
+ while (1) {
+ mpi_request.PageAddress = cpu_to_le32(config_num +
+ MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ config_page_sz);
+ if (r)
+ goto out;
+ r = -1;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < config_page->NumElements; i++) {
+ element_type = le16_to_cpu(config_page->
+ ConfigElement[i].ElementFlags) &
+ MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+ if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+ element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+ phys_disk_dev_handle =
+ le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle);
+ if (phys_disk_dev_handle == pd_handle) {
+ *volume_handle =
+ le16_to_cpu(config_page->
+ ConfigElement[i].VolDevHandle);
+ r = 0;
+ goto out;
+ }
+ } else if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+ *volume_handle = 0;
+ r = 0;
+ goto out;
+ }
+ }
+ config_num = config_page->ConfigNum;
+ }
+ out:
+ kfree(config_page);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle
+ * @ioc: per adapter object
+ * @volume_handle: volume handle
+ * @wwid: volume wwid
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle,
+ u64 *wwid)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2RaidVolPage1_t raid_vol_pg1;
+
+ *wwid = 0;
+ if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE,
+ volume_handle))) {
+ *wwid = le64_to_cpu(raid_vol_pg1.WWID);
+ return 0;
+ } else
+ return -1;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
new file mode 100644
index 00000000000..8af944d7d13
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -0,0 +1,3297 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+#include "mpt3sas_ctl.h"
+
+
+static struct fasync_struct *async_queue;
+static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
+
+
+/**
+ * enum block_state - blocking state
+ * @NON_BLOCKING: non blocking
+ * @BLOCKING: blocking
+ *
+ * These states are for ioctls that need to wait for a response
+ * from firmware, so they probably require sleep.
+ */
+enum block_state {
+ NON_BLOCKING,
+ BLOCKING,
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _ctl_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->handle != handle)
+ continue;
+ r = sas_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _ctl_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "scsi_io, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ desc = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ desc = "ioc_init";
+ break;
+ case MPI2_FUNCTION_IOC_FACTS:
+ desc = "ioc_facts";
+ break;
+ case MPI2_FUNCTION_CONFIG:
+ {
+ Mpi2ConfigRequest_t *config_request =
+ (Mpi2ConfigRequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "config, type(0x%02x), ext_type(0x%02x), number(%d)",
+ (config_request->Header.PageType &
+ MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
+ config_request->Header.PageNumber);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_PORT_FACTS:
+ desc = "port_facts";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ desc = "port_enable";
+ break;
+ case MPI2_FUNCTION_EVENT_NOTIFICATION:
+ desc = "event_notification";
+ break;
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ desc = "fw_download";
+ break;
+ case MPI2_FUNCTION_FW_UPLOAD:
+ desc = "fw_upload";
+ break;
+ case MPI2_FUNCTION_RAID_ACTION:
+ desc = "raid_action";
+ break;
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "raid_pass, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ desc = "sas_iounit_cntl";
+ break;
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ desc = "sata_pass";
+ break;
+ case MPI2_FUNCTION_DIAG_BUFFER_POST:
+ desc = "diag_buffer_post";
+ break;
+ case MPI2_FUNCTION_DIAG_RELEASE:
+ desc = "diag_release";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ desc = "smp_passthrough";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
+ ioc->name, calling_function_name, desc, smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ Mpi2SCSIIOReply_t *scsi_reply =
+ (Mpi2SCSIIOReply_t *)mpi_reply;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _ctl_sas_device_find_by_handle(ioc,
+ le16_to_cpu(scsi_reply->DevHandle));
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
+ pr_info(MPT3SAS_FMT
+ "\tscsi_state(0x%02x), scsi_status"
+ "(0x%02x)\n", ioc->name,
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
+ }
+}
+
+#endif
+
+/**
+ * mpt3sas_ctl_done - ctl module completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using ioc->ctl_cb_idx.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi2SCSIIOReply_t *scsiio_reply;
+ const void *sense_data;
+ u32 sz;
+
+ if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->ctl_cmds.smid != smid)
+ return 1;
+ ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
+ /* get sense data */
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_reply->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
+ if (scsiio_reply->SCSIState &
+ MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->SenseCount));
+ sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ memcpy(ioc->ctl_cmds.sense, sense_data, sz);
+ }
+ }
+ }
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
+#endif
+ ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->ctl_cmds.done);
+ return 1;
+}
+
+/**
+ * _ctl_check_event_type - determines when an event needs logging
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The bitmask in ioc->event_type[] indicates which events should be
+ * be saved in the driver event_log. This bitmask is set by application.
+ *
+ * Returns 1 when event should be captured, or zero means no match.
+ */
+static int
+_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u16 i;
+ u32 desired_event;
+
+ if (event >= 128 || !event || !ioc->event_log)
+ return 0;
+
+ desired_event = (1 << (event % 32));
+ if (!desired_event)
+ desired_event = 1;
+ i = event / 32;
+ return desired_event & ioc->event_type[i];
+}
+
+/**
+ * mpt3sas_ctl_add_to_event_log - add event
+ * @ioc: per adapter object
+ * @mpi_reply: reply message frame
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ struct MPT3_IOCTL_EVENTS *event_log;
+ u16 event;
+ int i;
+ u32 sz, event_data_sz;
+ u8 send_aen = 0;
+
+ if (!ioc->event_log)
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (_ctl_check_event_type(ioc, event)) {
+
+ /* insert entry into circular event_log */
+ i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
+ event_log = ioc->event_log;
+ event_log[i].event = event;
+ event_log[i].context = ioc->event_context++;
+
+ event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
+ sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
+ memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
+ memcpy(event_log[i].data, mpi_reply->EventData, sz);
+ send_aen = 1;
+ }
+
+ /* This aen_event_read_flag flag is set until the
+ * application has read the event log.
+ * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
+ */
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
+ (send_aen && !ioc->aen_event_read_flag)) {
+ ioc->aen_event_read_flag = 1;
+ wake_up_interruptible(&ctl_poll_wait);
+ if (async_queue)
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ }
+}
+
+/**
+ * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ return 1;
+}
+
+/**
+ * _ctl_verify_adapter - validates ioc_number passed from application
+ * @ioc: per adapter object
+ * @iocpp: The ioc pointer is returned in this.
+ *
+ * Return (-1) means error, else ioc_number.
+ */
+static int
+_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->id != ioc_number)
+ continue;
+ *iocpp = ioc;
+ return ioc_number;
+ }
+ *iocpp = NULL;
+ return -1;
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ */
+void
+mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ int i;
+ u8 issue_reset;
+
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ mpt3sas_send_diag_release(ioc, i, &issue_reset);
+ }
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ }
+ break;
+ }
+}
+
+/**
+ * _ctl_fasync -
+ * @fd -
+ * @filep -
+ * @mode -
+ *
+ * Called when application request fasyn callback handler.
+ */
+static int
+_ctl_fasync(int fd, struct file *filep, int mode)
+{
+ return fasync_helper(fd, filep, mode, &async_queue);
+}
+
+/**
+ * _ctl_release -
+ * @inode -
+ * @filep -
+ *
+ * Called when application releases the fasyn callback handler.
+ */
+static int
+_ctl_release(struct inode *inode, struct file *filep)
+{
+ return fasync_helper(-1, filep, 0, &async_queue);
+}
+
+/**
+ * _ctl_poll -
+ * @file -
+ * @wait -
+ *
+ */
+static unsigned int
+_ctl_poll(struct file *filep, poll_table *wait)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ poll_wait(filep, &ctl_poll_wait, wait);
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->aen_event_read_flag)
+ return POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_set_task_mid - assign an active smid to tm request
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @tm_request - pointer to mf from user space
+ *
+ * Returns 0 when an smid if found, else fail.
+ * during failure, the reply frame is filled.
+ */
+static int
+_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
+ Mpi2SCSITaskManagementRequest_t *tm_request)
+{
+ u8 found = 0;
+ u16 i;
+ u16 handle;
+ struct scsi_cmnd *scmd;
+ struct MPT3SAS_DEVICE *priv_data;
+ unsigned long flags;
+ Mpi2SCSITaskManagementReply_t *tm_reply;
+ u32 sz;
+ u32 lun;
+ char *desc = NULL;
+
+ if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ desc = "abort_task";
+ else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ desc = "query_task";
+ else
+ return 0;
+
+ lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
+
+ handle = le16_to_cpu(tm_request->DevHandle);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = ioc->scsiio_depth; i && !found; i--) {
+ scmd = ioc->scsi_lookup[i - 1].scmd;
+ if (scmd == NULL || scmd->device == NULL ||
+ scmd->device->hostdata == NULL)
+ continue;
+ if (lun != scmd->device->lun)
+ continue;
+ priv_data = scmd->device->hostdata;
+ if (priv_data->sas_target == NULL)
+ continue;
+ if (priv_data->sas_target->handle != handle)
+ continue;
+ tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ found = 1;
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!found) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+ ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
+ tm_reply = ioc->ctl_cmds.reply;
+ tm_reply->DevHandle = tm_request->DevHandle;
+ tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ tm_reply->TaskType = tm_request->TaskType;
+ tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
+ tm_reply->VP_ID = tm_request->VP_ID;
+ tm_reply->VF_ID = tm_request->VF_ID;
+ sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz))
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 1;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
+ return 0;
+}
+
+/**
+ * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @mf - pointer to mf in user space
+ */
+static long
+_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
+ void __user *mf)
+{
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ u32 ioc_state;
+ u16 ioc_status;
+ u16 smid;
+ unsigned long timeout, timeleft;
+ u8 issue_reset;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+ u16 wait_state_count;
+
+ issue_reset = 0;
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
+ if (!mpi_request) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed obtaining a memory for mpi_request\n",
+ ioc->name, __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Check for overflow and wraparound */
+ if (karg.data_sge_offset * 4 > ioc->request_sz ||
+ karg.data_sge_offset > (UINT_MAX / 4)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* copy in request message frame from user */
+ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
+ __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memcpy(request, mpi_request, karg.data_sge_offset*4);
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = karg.data_out_size;
+ data_in_sz = karg.data_in_size;
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
+ ret = -EINVAL;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
+ &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ if (copy_from_user(data_out, karg.data_out_buf_ptr,
+ data_out_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -EFAULT;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
+ &data_in_dma);
+ if (!data_in) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = (void *)request + (karg.data_sge_offset*4);
+
+ /* send command to firmware */
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
+#endif
+
+ init_completion(&ioc->ctl_cmds.done);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsiio_request =
+ (Mpi2SCSIIORequest_t *)request;
+ scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)request;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+ ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
+ if (tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+ if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ {
+ Mpi2SmpPassthroughRequest_t *smp_request =
+ (Mpi2SmpPassthroughRequest_t *)mpi_request;
+ u8 *data;
+
+ /* ioc determines which port to use */
+ smp_request->PhysicalPort = 0xFF;
+ if (smp_request->PassthroughFlags &
+ MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
+ data = (u8 *)&smp_request->SGL;
+ else {
+ if (unlikely(data_out == NULL)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ data = data_out;
+ }
+
+ if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ case MPI2_FUNCTION_FW_UPLOAD:
+ {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_TOOLBOX:
+ {
+ Mpi2ToolboxCleanRequest_t *toolbox_request =
+ (Mpi2ToolboxCleanRequest_t *)mpi_request;
+
+ if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ } else {
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ }
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ {
+ Mpi2SasIoUnitControlRequest_t *sasiounit_request =
+ (Mpi2SasIoUnitControlRequest_t *)mpi_request;
+
+ if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
+ || sasiounit_request->Operation ==
+ MPI2_SAS_OP_PHY_LINK_RESET) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ /* drop to default case for posting the request */
+ }
+ default:
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+
+ if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+ else
+ timeout = karg.timeout;
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ timeout*HZ);
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
+ ioc->ioc_link_reset_in_progress) {
+ ioc->ioc_link_reset_in_progress = 0;
+ ioc->ignore_loginfos = 0;
+ }
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request, karg.data_sge_offset);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
+ (ioc->logging_level & MPT_DEBUG_TM)) {
+ Mpi2SCSITaskManagementReply_t *tm_reply =
+ (Mpi2SCSITaskManagementReply_t *)mpi_reply;
+
+ pr_info(MPT3SAS_FMT "TASK_MGMT: " \
+ "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
+ "TerminationCount(0x%08x)\n", ioc->name,
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
+ }
+#endif
+ /* copy out xdata to user */
+ if (data_in_sz) {
+ if (copy_to_user(karg.data_in_buf_ptr, data_in,
+ data_in_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out reply message frame to user */
+ if (karg.max_reply_bytes) {
+ sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out sense to user */
+ if (karg.max_sense_bytes && (mpi_request->Function ==
+ MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
+ pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
+ ioc->name,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_halt_firmware(ioc);
+ mpt3sas_scsih_issue_tm(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
+ 0, TM_MUTEX_ON);
+ } else
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ }
+
+ out:
+
+ /* free memory associated with sg buffers */
+ if (data_in)
+ pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ data_out_dma);
+
+ kfree(mpi_request);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return ret;
+}
+
+/**
+ * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_iocinfo karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memset(&karg, 0 , sizeof(karg));
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->pfacts)
+ karg.port_number = ioc->pfacts[0].PortNumber;
+ karg.hw_rev = ioc->pdev->revision;
+ karg.pci_id = ioc->pdev->device;
+ karg.subsystem_device = ioc->pdev->subsystem_device;
+ karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
+ karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
+ karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
+ karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
+ karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
+ karg.firmware_version = ioc->facts.FWVersion.Word;
+ strcpy(karg.driver_version, MPT3SAS_DRIVER_NAME);
+ strcat(karg.driver_version, "-");
+ strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
+ karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventquery karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
+ memcpy(karg.event_types, ioc->event_type,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventenable karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memcpy(ioc->event_type, karg.event_types,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+ mpt3sas_base_validate_event_type(ioc, ioc->event_type);
+
+ if (ioc->event_log)
+ return 0;
+ /* initialize event_log */
+ ioc->event_context = 0;
+ ioc->aen_event_read_flag = 0;
+ ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
+ sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
+ if (!ioc->event_log) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventreport karg;
+ u32 number_bytes, max_events, max;
+ struct mpt3_ioctl_eventreport __user *uarg = arg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ number_bytes = karg.hdr.max_data_size -
+ sizeof(struct mpt3_ioctl_header);
+ max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
+ max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
+
+ /* If fewer than 1 event is requested, there must have
+ * been some type of error.
+ */
+ if (!max || !ioc->event_log)
+ return -ENODATA;
+
+ number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
+ if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ /* reset flag so SIGIO can restart */
+ ioc->aen_event_read_flag = 0;
+ return 0;
+}
+
+/**
+ * _ctl_do_reset - main handler for MPT3HARDRESET opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_diag_reset karg;
+ int retval;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_info(MPT3SAS_FMT "host reset: %s\n",
+ ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ return 0;
+}
+
+/**
+ * _ctl_btdh_search_sas_device - searching for sas device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->sas_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == sas_device->handle) {
+ btdh->bus = sas_device->channel;
+ btdh->id = sas_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == sas_device->channel && btdh->id ==
+ sas_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = sas_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_search_raid_device - searching for raid device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->raid_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == raid_device->handle) {
+ btdh->bus = raid_device->channel;
+ btdh->id = raid_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == raid_device->channel && btdh->id ==
+ raid_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = raid_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_btdh_mapping karg;
+ int rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = _ctl_btdh_search_sas_device(ioc, &karg);
+ if (!rc)
+ _ctl_btdh_search_raid_device(ioc, &karg);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_diag_capability - return diag buffer capability
+ * @ioc: per adapter object
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ *
+ * returns 1 when diag buffer support is enabled in firmware
+ */
+static u8
+_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
+{
+ u8 rc = 0;
+
+ switch (buffer_type) {
+ case MPI2_DIAG_BUF_TYPE_TRACE:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_EXTENDED:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+ rc = 1;
+ }
+
+ return rc;
+}
+
+
+/**
+ * _ctl_diag_register_2 - wrapper for registering diag buffer support
+ * @ioc: per adapter object
+ * @diag_register: the diag_register struct passed in from user space
+ *
+ */
+static long
+_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_diag_register *diag_register)
+{
+ int rc, i;
+ void *request_data = NULL;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz = 0;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ u8 buffer_type;
+ unsigned long timeleft;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ u8 issue_reset = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ buffer_type = diag_register->buffer_type;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ pr_err(MPT3SAS_FMT
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__,
+ buffer_type);
+ return -EINVAL;
+ }
+
+ if (diag_register->requested_buffer_size % 4) {
+ pr_err(MPT3SAS_FMT
+ "%s: the requested_buffer_size is not 4 byte aligned\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ request_data = ioc->diag_buffer[buffer_type];
+ request_data_sz = diag_register->requested_buffer_size;
+ ioc->unique_id[buffer_type] = diag_register->unique_id;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ memcpy(ioc->product_specific[buffer_type],
+ diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
+ ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
+
+ if (request_data) {
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
+ request_data = NULL;
+ }
+ }
+
+ if (request_data == NULL) {
+ ioc->diag_buffer_sz[buffer_type] = 0;
+ ioc->diag_buffer_dma[buffer_type] = 0;
+ request_data = pci_alloc_consistent(
+ ioc->pdev, request_data_sz, &request_data_dma);
+ if (request_data == NULL) {
+ pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
+ " for diag buffers, requested size(%d)\n",
+ ioc->name, __func__, request_data_sz);
+ mpt3sas_base_free_smid(ioc, smid);
+ return -ENOMEM;
+ }
+ ioc->diag_buffer[buffer_type] = request_data;
+ ioc->diag_buffer_sz[buffer_type] = request_data_sz;
+ ioc->diag_buffer_dma[buffer_type] = request_data_dma;
+ }
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = diag_register->buffer_type;
+ mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
+ mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
+ mpi_request->BufferLength = cpu_to_le32(request_data_sz);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+ ioc->name, __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ if (rc && request_data)
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
+ * @ioc: per adapter object
+ * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
+ *
+ * This is called when command line option diag_buffer_enable is enabled
+ * at driver load time.
+ */
+void
+mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
+{
+ struct mpt3_diag_register diag_register;
+
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+
+ if (bits_to_register & 1) {
+ pr_info(MPT3SAS_FMT "registering trace buffer support\n",
+ ioc->name);
+ ioc->diag_trigger_master.MasterData =
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 2) {
+ pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 4) {
+ pr_info(MPT3SAS_FMT "registering extended buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+}
+
+/**
+ * _ctl_diag_register - application register with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+static long
+_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_register karg;
+ long rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ rc = _ctl_diag_register_2(ioc, &karg);
+ return rc;
+}
+
+/**
+ * _ctl_diag_unregister - application unregister with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+static long
+_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_unregister karg;
+ void *request_data;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) has not been released\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ return 0;
+}
+
+/**
+ * _ctl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+static long
+_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_query karg;
+ void *request_data;
+ int i;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ karg.application_flags = 0;
+ buffer_type = karg.buffer_type;
+
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID);
+ else
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID |
+ MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ karg.product_specific[i] =
+ ioc->product_specific[buffer_type][i];
+
+ karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
+ karg.driver_added_buffer_size = 0;
+ karg.unique_id = ioc->unique_id[buffer_type];
+ karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
+
+ if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
+ pr_err(MPT3SAS_FMT
+ "%s: unable to write mpt3_diag_query data @ %p\n",
+ ioc->name, __func__, arg);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_send_diag_release - Diag Release Message
+ * @ioc: per adapter object
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset - specifies whether host reset is required.
+ *
+ */
+int
+mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset)
+{
+ Mpi2DiagReleaseRequest_t *mpi_request;
+ Mpi2DiagReleaseReply_t *mpi_reply;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ int rc;
+ unsigned long timeleft;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = 0;
+ *issue_reset = 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED)
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: skipping due to FAULT state\n", ioc->name,
+ __func__));
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ *issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ out:
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * _ctl_diag_release - request to send Diag Release Message to firmware
+ * @arg - user space buffer containing ioctl content
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+static long
+_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_release karg;
+ void *request_data;
+ int rc;
+ u8 buffer_type;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is already released\n",
+ ioc->name, __func__,
+ buffer_type);
+ return 0;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ /* buffers were released by due to host reset */
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) was released due to host reset\n",
+ ioc->name, __func__, buffer_type);
+ return 0;
+ }
+
+ rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ return rc;
+}
+
+/**
+ * _ctl_diag_read_buffer - request for copy of the diag buffer
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_read_buffer karg;
+ struct mpt3_diag_read_buffer __user *uarg = arg;
+ void *request_data, *diag_data;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ int rc, i;
+ u8 buffer_type;
+ unsigned long timeleft, request_size, copy_size;
+ u16 smid;
+ u16 ioc_status;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_size = ioc->diag_buffer_sz[buffer_type];
+
+ if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
+ pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
+ "or bytes_to_read are not 4 byte aligned\n", ioc->name,
+ __func__);
+ return -EINVAL;
+ }
+
+ if (karg.starting_offset > request_size)
+ return -EINVAL;
+
+ diag_data = (void *)(request_data + karg.starting_offset);
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+ ioc->name, __func__,
+ diag_data, karg.starting_offset, karg.bytes_to_read));
+
+ /* Truncate data on requests that are too large */
+ if ((diag_data + karg.bytes_to_read < diag_data) ||
+ (diag_data + karg.bytes_to_read > request_data + request_size))
+ copy_size = request_size - karg.starting_offset;
+ else
+ copy_size = karg.bytes_to_read;
+
+ if (copy_to_user((void __user *)uarg->diagnostic_data,
+ diag_data, copy_size)) {
+ pr_err(MPT3SAS_FMT
+ "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+ ioc->name, __func__, diag_data);
+ return -EFAULT;
+ }
+
+ if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
+ return 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: Reregister buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type));
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is still registered\n",
+ ioc->name, __func__, buffer_type));
+ return 0;
+ }
+ /* Get a free request frame and save the message context.
+ */
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->BufferLength =
+ cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
+ mpi_request->BufferAddress =
+ cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
+ * @ioc: per adapter object
+ * @cmd - ioctl opcode
+ * @arg - (struct mpt3_ioctl_command32)
+ *
+ * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
+ */
+static long
+_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
+ void __user *arg)
+{
+ struct mpt3_ioctl_command32 karg32;
+ struct mpt3_ioctl_command32 __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+ if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
+ return -EINVAL;
+
+ uarg = (struct mpt3_ioctl_command32 __user *) arg;
+
+ if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
+ karg.hdr.ioc_number = karg32.hdr.ioc_number;
+ karg.hdr.port_number = karg32.hdr.port_number;
+ karg.hdr.max_data_size = karg32.hdr.max_data_size;
+ karg.timeout = karg32.timeout;
+ karg.max_reply_bytes = karg32.max_reply_bytes;
+ karg.data_in_size = karg32.data_in_size;
+ karg.data_out_size = karg32.data_out_size;
+ karg.max_sense_bytes = karg32.max_sense_bytes;
+ karg.data_sge_offset = karg32.data_sge_offset;
+ karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+ karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+ karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+ karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
+ return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+}
+#endif
+
+/**
+ * _ctl_ioctl_main - main ioctl entry point
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ * compat - handles 32 bit applications in 64bit os
+ */
+static long
+_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+ u8 compat)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct mpt3_ioctl_header ioctl_header;
+ enum block_state state;
+ long ret = -EINVAL;
+
+ /* get IOCTL header */
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct mpt3_ioctl_header))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
+ return -ENODEV;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
+ if (state == NON_BLOCKING) {
+ if (!mutex_trylock(&ioc->ctl_cmds.mutex))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+ return -ERESTARTSYS;
+
+
+ switch (cmd) {
+ case MPT3IOCINFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
+ ret = _ctl_getiocinfo(ioc, arg);
+ break;
+#ifdef CONFIG_COMPAT
+ case MPT3COMMAND32:
+#endif
+ case MPT3COMMAND:
+ {
+ struct mpt3_ioctl_command __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ ret = _ctl_compat_mpt_command(ioc, cmd, arg);
+ break;
+ }
+#endif
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
+ uarg = arg;
+ ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+ }
+ break;
+ }
+ case MPT3EVENTQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
+ ret = _ctl_eventquery(ioc, arg);
+ break;
+ case MPT3EVENTENABLE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
+ ret = _ctl_eventenable(ioc, arg);
+ break;
+ case MPT3EVENTREPORT:
+ ret = _ctl_eventreport(ioc, arg);
+ break;
+ case MPT3HARDRESET:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
+ ret = _ctl_do_reset(ioc, arg);
+ break;
+ case MPT3BTDHMAPPING:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
+ ret = _ctl_btdh_mapping(ioc, arg);
+ break;
+ case MPT3DIAGREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
+ ret = _ctl_diag_register(ioc, arg);
+ break;
+ case MPT3DIAGUNREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
+ ret = _ctl_diag_unregister(ioc, arg);
+ break;
+ case MPT3DIAGQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
+ ret = _ctl_diag_query(ioc, arg);
+ break;
+ case MPT3DIAGRELEASE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
+ ret = _ctl_diag_release(ioc, arg);
+ break;
+ case MPT3DIAGREADBUFFER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
+ ret = _ctl_diag_read_buffer(ioc, arg);
+ break;
+ default:
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ break;
+ }
+
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+ return ret;
+}
+
+/**
+ * _ctl_ioctl - main ioctl entry point (unlocked)
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ */
+static long
+_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
+ * @file -
+ * @cmd -
+ * @arg -
+ *
+ * This routine handles 32 bit applications in 64bit os.
+ */
+static long
+_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
+ return ret;
+}
+#endif
+
+/* scsi host attributes */
+/**
+ * _ctl_version_fw_show - firmware version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF);
+}
+static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
+
+/**
+ * _ctl_version_bios_show - bios version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (version & 0xFF000000) >> 24,
+ (version & 0x00FF0000) >> 16,
+ (version & 0x0000FF00) >> 8,
+ version & 0x000000FF);
+}
+static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
+
+/**
+ * _ctl_version_mpi_show - MPI (message passing interface) version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
+ ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
+}
+static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
+
+/**
+ * _ctl_version_product_show - product name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
+}
+static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
+
+/**
+ * _ctl_version_nvdata_persistent_show - ndvata persistent version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_persistent_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+}
+static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
+ _ctl_version_nvdata_persistent_show, NULL);
+
+/**
+ * _ctl_version_nvdata_default_show - nvdata default version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
+ *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+}
+static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
+ _ctl_version_nvdata_default_show, NULL);
+
+/**
+ * _ctl_board_name_show - board name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
+}
+static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
+
+/**
+ * _ctl_board_assembly_show - board assembly name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
+}
+static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
+
+/**
+ * _ctl_board_tracer_show - board tracer number
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
+}
+static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
+
+/**
+ * _ctl_io_delay_show - io missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
+}
+static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
+
+/**
+ * _ctl_device_delay_show - device missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
+}
+static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
+
+/**
+ * _ctl_fw_queue_depth_show - global credits
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
+}
+static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
+
+/**
+ * _ctl_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the controller sas address
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)ioc->sas_hba.sas_address);
+}
+static DEVICE_ATTR(host_sas_address, S_IRUGO,
+ _ctl_host_sas_address_show, NULL);
+
+/**
+ * _ctl_logging_level_show - logging level
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
+}
+static ssize_t
+_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%x", &val) != 1)
+ return -EINVAL;
+
+ ioc->logging_level = val;
+ pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
+ ioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
+ _ctl_logging_level_store);
+
+/**
+ * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * mpt3sas_fwfault_debug is command line option
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
+}
+static ssize_t
+_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->fwfault_debug = val;
+ pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
+ ioc->fwfault_debug);
+ return strlen(buf);
+}
+static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
+ _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+
+/**
+ * _ctl_ioc_reset_count_show - ioc reset count
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
+}
+static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
+
+/**
+ * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reply_queue_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 reply_queue_count;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
+ reply_queue_count = ioc->reply_queue_count;
+ else
+ reply_queue_count = 1;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
+}
+static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
+ NULL);
+
+struct DIAG_BUFFER_START {
+ __le32 Size;
+ __le32 DiagVersion;
+ u8 BufferType;
+ u8 Reserved[3];
+ __le32 Reserved1;
+ __le32 Reserved2;
+ __le32 Reserved3;
+};
+
+/**
+ * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_trace_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 size = 0;
+ struct DIAG_BUFFER_START *request_data;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request_data = (struct DIAG_BUFFER_START *)
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
+ if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
+ le32_to_cpu(request_data->Reserved3) == 0x4742444c)
+ size = le32_to_cpu(request_data->Size);
+
+ ioc->ring_buffer_sz = size;
+ return snprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
+ _ctl_host_trace_buffer_size_show, NULL);
+
+/**
+ * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * You will only be able to read 4k bytes of ring buffer at a time.
+ * In order to read beyond 4k bytes, you will have to write out the
+ * offset to the same attribute, it will move the pointer.
+ */
+static ssize_t
+_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ void *request_data;
+ u32 size;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
+ return 0;
+
+ size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+ request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
+ memcpy(buf, request_data, size);
+ return size;
+}
+
+static ssize_t
+_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->ring_buffer_offset = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
+
+
+/*****************************************/
+
+/**
+ * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * This is a mechnism to post/release host_trace_buffers
+ */
+static ssize_t
+_ctl_host_trace_buffer_enable_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+ else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ return snprintf(buf, PAGE_SIZE, "release\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "post\n");
+}
+
+static ssize_t
+_ctl_host_trace_buffer_enable_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ char str[10] = "";
+ struct mpt3_diag_register diag_register;
+ u8 issue_reset = 0;
+
+ /* don't allow post/release occurr while recovery is active */
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery || ioc->is_driver_loading)
+ return -EBUSY;
+
+ if (sscanf(buf, "%9s", str) != 1)
+ return -EINVAL;
+
+ if (!strcmp(str, "post")) {
+ /* exit out if host buffers are already posted */
+ if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
+ goto out;
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+ pr_info(MPT3SAS_FMT "posting host trace buffers\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ diag_register.requested_buffer_size = (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
+ _ctl_diag_register_2(ioc, &diag_register);
+ } else if (!strcmp(str, "release")) {
+ /* exit out if host buffers are already released */
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ goto out;
+ pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
+ ioc->name);
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ out:
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_enable_show,
+ _ctl_host_trace_buffer_enable_store);
+
+/*********** diagnostic trigger suppport *********************************/
+
+/**
+ * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
+ memcpy(buf, &ioc->diag_trigger_master, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+ memset(&ioc->diag_trigger_master, 0,
+ sizeof(struct SL_WH_MASTER_TRIGGER_T));
+ memcpy(&ioc->diag_trigger_master, buf, rc);
+ ioc->diag_trigger_master.MasterData |=
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
+
+
+/**
+ * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_event, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_event, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_event, buf, sz);
+ if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_scsi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_scsi, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_scsi, buf, sz);
+ if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_mpi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_mpi, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_mpi, buf, sz);
+ if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+
+static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
+
+/*********** diagnostic trigger suppport *** END ****************************/
+
+
+
+/*****************************************/
+
+struct device_attribute *mpt3sas_host_attrs[] = {
+ &dev_attr_version_fw,
+ &dev_attr_version_bios,
+ &dev_attr_version_mpi,
+ &dev_attr_version_product,
+ &dev_attr_version_nvdata_persistent,
+ &dev_attr_version_nvdata_default,
+ &dev_attr_board_name,
+ &dev_attr_board_assembly,
+ &dev_attr_board_tracer,
+ &dev_attr_io_delay,
+ &dev_attr_device_delay,
+ &dev_attr_logging_level,
+ &dev_attr_fwfault_debug,
+ &dev_attr_fw_queue_depth,
+ &dev_attr_host_sas_address,
+ &dev_attr_ioc_reset_count,
+ &dev_attr_host_trace_buffer_size,
+ &dev_attr_host_trace_buffer,
+ &dev_attr_host_trace_buffer_enable,
+ &dev_attr_reply_queue_count,
+ &dev_attr_diag_trigger_master,
+ &dev_attr_diag_trigger_event,
+ &dev_attr_diag_trigger_scsi,
+ &dev_attr_diag_trigger_mpi,
+ NULL,
+};
+
+/* device attributes */
+
+/**
+ * _ctl_device_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the sas address for the target
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->sas_target->sas_address);
+}
+static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
+
+/**
+ * _ctl_device_handle_show - device handle
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the firmware assigned device handle
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n",
+ sas_device_priv_data->sas_target->handle);
+}
+static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+
+struct device_attribute *mpt3sas_dev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_sas_device_handle,
+ NULL,
+};
+
+static const struct file_operations ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = _ctl_ioctl,
+ .release = _ctl_release,
+ .poll = _ctl_poll,
+ .fasync = _ctl_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = _ctl_ioctl_compat,
+#endif
+};
+
+static struct miscdevice ctl_dev = {
+ .minor = MPT3SAS_MINOR,
+ .name = MPT3SAS_DEV_NAME,
+ .fops = &ctl_fops,
+};
+
+/**
+ * mpt3sas_ctl_init - main entry point for ctl.
+ *
+ */
+void
+mpt3sas_ctl_init(void)
+{
+ async_queue = NULL;
+ if (misc_register(&ctl_dev) < 0)
+ pr_err("%s can't register misc device [minor=%d]\n",
+ MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
+
+ init_waitqueue_head(&ctl_poll_wait);
+}
+
+/**
+ * mpt3sas_ctl_exit - exit point for ctl
+ *
+ */
+void
+mpt3sas_ctl_exit(void)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ int i;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+
+ /* free memory associated to diag buffers */
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!ioc->diag_buffer[i])
+ continue;
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ ioc->diag_buffer[i] = NULL;
+ ioc->diag_buffer_status[i] = 0;
+ }
+
+ kfree(ioc->event_log);
+ }
+ misc_deregister(&ctl_dev);
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
new file mode 100644
index 00000000000..bd89f4f0055
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -0,0 +1,418 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_CTL_H_INCLUDED
+#define MPT3SAS_CTL_H_INCLUDED
+
+#ifdef __KERNEL__
+#include <linux/miscdevice.h>
+#endif
+
+
+#ifndef MPT3SAS_MINOR
+#define MPT3SAS_MINOR (MPT_MINOR + 2)
+#endif
+#define MPT3SAS_DEV_NAME "mpt3ctl"
+#define MPT3_MAGIC_NUMBER 'L'
+#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */
+
+/**
+ * IOCTL opcodes
+ */
+#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \
+ struct mpt3_ioctl_iocinfo)
+#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command)
+#ifdef CONFIG_COMPAT
+#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command32)
+#endif
+#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \
+ struct mpt3_ioctl_eventquery)
+#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \
+ struct mpt3_ioctl_eventenable)
+#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \
+ struct mpt3_ioctl_eventreport)
+#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \
+ struct mpt3_ioctl_diag_reset)
+#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \
+ struct mpt3_ioctl_btdh_mapping)
+
+/* diag buffer support */
+#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \
+ struct mpt3_diag_register)
+#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \
+ struct mpt3_diag_release)
+#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \
+ struct mpt3_diag_unregister)
+#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \
+ struct mpt3_diag_query)
+#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
+ struct mpt3_diag_read_buffer)
+
+/**
+ * struct mpt3_ioctl_header - main header structure
+ * @ioc_number - IOC unit number
+ * @port_number - IOC port number
+ * @max_data_size - maximum number bytes to transfer on read
+ */
+struct mpt3_ioctl_header {
+ uint32_t ioc_number;
+ uint32_t port_number;
+ uint32_t max_data_size;
+};
+
+/**
+ * struct mpt3_ioctl_diag_reset - diagnostic reset
+ * @hdr - generic header
+ */
+struct mpt3_ioctl_diag_reset {
+ struct mpt3_ioctl_header hdr;
+};
+
+
+/**
+ * struct mpt3_ioctl_pci_info - pci device info
+ * @device - pci device id
+ * @function - pci function id
+ * @bus - pci bus id
+ * @segment_id - pci segment id
+ */
+struct mpt3_ioctl_pci_info {
+ union {
+ struct {
+ uint32_t device:5;
+ uint32_t function:3;
+ uint32_t bus:24;
+ } bits;
+ uint32_t word;
+ } u;
+ uint32_t segment_id;
+};
+
+
+#define MPT2_IOCTL_INTERFACE_SCSI (0x00)
+#define MPT2_IOCTL_INTERFACE_FC (0x01)
+#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
+#define MPT2_IOCTL_INTERFACE_SAS (0x03)
+#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
+#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT2_IOCTL_VERSION_LENGTH (32)
+
+/**
+ * struct mpt3_ioctl_iocinfo - generic controller info
+ * @hdr - generic header
+ * @adapter_type - type of adapter (spi, fc, sas)
+ * @port_number - port number
+ * @pci_id - PCI Id
+ * @hw_rev - hardware revision
+ * @sub_system_device - PCI subsystem Device ID
+ * @sub_system_vendor - PCI subsystem Vendor ID
+ * @rsvd0 - reserved
+ * @firmware_version - firmware version
+ * @bios_version - BIOS version
+ * @driver_version - driver version - 32 ASCII characters
+ * @rsvd1 - reserved
+ * @scsi_id - scsi id of adapter 0
+ * @rsvd2 - reserved
+ * @pci_information - pci info (2nd revision)
+ */
+struct mpt3_ioctl_iocinfo {
+ struct mpt3_ioctl_header hdr;
+ uint32_t adapter_type;
+ uint32_t port_number;
+ uint32_t pci_id;
+ uint32_t hw_rev;
+ uint32_t subsystem_device;
+ uint32_t subsystem_vendor;
+ uint32_t rsvd0;
+ uint32_t firmware_version;
+ uint32_t bios_version;
+ uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
+ uint8_t rsvd1;
+ uint8_t scsi_id;
+ uint16_t rsvd2;
+ struct mpt3_ioctl_pci_info pci_information;
+};
+
+
+/* number of event log entries */
+#define MPT3SAS_CTL_EVENT_LOG_SIZE (50)
+
+/**
+ * struct mpt3_ioctl_eventquery - query event count and type
+ * @hdr - generic header
+ * @event_entries - number of events returned by get_event_report
+ * @rsvd - reserved
+ * @event_types - type of events currently being captured
+ */
+struct mpt3_ioctl_eventquery {
+ struct mpt3_ioctl_header hdr;
+ uint16_t event_entries;
+ uint16_t rsvd;
+ uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+/**
+ * struct mpt3_ioctl_eventenable - enable/disable event capturing
+ * @hdr - generic header
+ * @event_types - toggle off/on type of events to be captured
+ */
+struct mpt3_ioctl_eventenable {
+ struct mpt3_ioctl_header hdr;
+ uint32_t event_types[4];
+};
+
+#define MPT3_EVENT_DATA_SIZE (192)
+/**
+ * struct MPT3_IOCTL_EVENTS -
+ * @event - the event that was reported
+ * @context - unique value for each event assigned by driver
+ * @data - event data returned in fw reply message
+ */
+struct MPT3_IOCTL_EVENTS {
+ uint32_t event;
+ uint32_t context;
+ uint8_t data[MPT3_EVENT_DATA_SIZE];
+};
+
+/**
+ * struct mpt3_ioctl_eventreport - returing event log
+ * @hdr - generic header
+ * @event_data - (see struct MPT3_IOCTL_EVENTS)
+ */
+struct mpt3_ioctl_eventreport {
+ struct mpt3_ioctl_header hdr;
+ struct MPT3_IOCTL_EVENTS event_data[1];
+};
+
+/**
+ * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl
+ * @hdr - generic header
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @sense_data_ptr - sense data location
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @max_sense_bytes - maximum number of bytes for auto sense buffers
+ * @data_sge_offset - offset in words from the start of the request message to
+ * the first SGL
+ * @mf[1];
+ */
+struct mpt3_ioctl_command {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ void __user *reply_frame_buf_ptr;
+ void __user *data_in_buf_ptr;
+ void __user *data_out_buf_ptr;
+ void __user *sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+
+#ifdef CONFIG_COMPAT
+struct mpt3_ioctl_command32 {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ uint32_t reply_frame_buf_ptr;
+ uint32_t data_in_buf_ptr;
+ uint32_t data_out_buf_ptr;
+ uint32_t sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+#endif
+
+/**
+ * struct mpt3_ioctl_btdh_mapping - mapping info
+ * @hdr - generic header
+ * @id - target device identification number
+ * @bus - SCSI bus number that the target device exists on
+ * @handle - device handle for the target device
+ * @rsvd - reserved
+ *
+ * To obtain a bus/id the application sets
+ * handle to valid handle, and bus/id to 0xFFFF.
+ *
+ * To obtain the device handle the application sets
+ * bus/id valid value, and the handle to 0xFFFF.
+ */
+struct mpt3_ioctl_btdh_mapping {
+ struct mpt3_ioctl_header hdr;
+ uint32_t id;
+ uint32_t bus;
+ uint16_t handle;
+ uint16_t rsvd;
+};
+
+
+
+/* application flags for mpt3_diag_register, mpt3_diag_query */
+#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
+#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
+#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+
+/* flags for mpt3_diag_read_buffer */
+#define MPT3_FLAGS_REREGISTER (0x0001)
+
+#define MPT3_PRODUCT_SPECIFIC_DWORDS 23
+
+/**
+ * struct mpt3_diag_register - application register with driver
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @requested_buffer_size - buffers size in bytes
+ * @unique_id - tag specified by application that is used to signal ownership
+ * of the buffer.
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+struct mpt3_diag_register {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t requested_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_unregister - application unregister with driver
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be unregistered
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+struct mpt3_diag_unregister {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_query - query relevant info associated with diag buffers
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @total_buffer_size - diag buffer size in bytes
+ * @driver_added_buffer_size - size of extra space appended to end of buffer
+ * @unique_id - unique id associated with this buffer.
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+struct mpt3_diag_query {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t total_buffer_size;
+ uint32_t driver_added_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_release - request to send Diag Release Message to firmware
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be released
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+struct mpt3_diag_release {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_read_buffer - request for copy of the diag buffer
+ * @hdr - generic header
+ * @status -
+ * @reserved -
+ * @flags - misc flags
+ * @starting_offset - starting offset within drivers buffer where to start
+ * reading data at into the specified application buffer
+ * @bytes_to_read - number of bytes to copy from the drivers buffer into the
+ * application buffer starting at starting_offset.
+ * @unique_id - unique id associated with this buffer.
+ * @diagnostic_data - data payload
+ */
+struct mpt3_diag_read_buffer {
+ struct mpt3_ioctl_header hdr;
+ uint8_t status;
+ uint8_t reserved;
+ uint16_t flags;
+ uint32_t starting_offset;
+ uint32_t bytes_to_read;
+ uint32_t unique_id;
+ uint32_t diagnostic_data[1];
+};
+
+#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
new file mode 100644
index 00000000000..35405e7044f
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -0,0 +1,219 @@
+/*
+ * Logging Support for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_DEBUG_H_INCLUDED
+#define MPT3SAS_DEBUG_H_INCLUDED
+
+#define MPT_DEBUG 0x00000001
+#define MPT_DEBUG_MSG_FRAME 0x00000002
+#define MPT_DEBUG_SG 0x00000004
+#define MPT_DEBUG_EVENTS 0x00000008
+#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010
+#define MPT_DEBUG_INIT 0x00000020
+#define MPT_DEBUG_EXIT 0x00000040
+#define MPT_DEBUG_FAIL 0x00000080
+#define MPT_DEBUG_TM 0x00000100
+#define MPT_DEBUG_REPLY 0x00000200
+#define MPT_DEBUG_HANDSHAKE 0x00000400
+#define MPT_DEBUG_CONFIG 0x00000800
+#define MPT_DEBUG_DL 0x00001000
+#define MPT_DEBUG_RESET 0x00002000
+#define MPT_DEBUG_SCSI 0x00004000
+#define MPT_DEBUG_IOCTL 0x00008000
+#define MPT_DEBUG_SAS 0x00020000
+#define MPT_DEBUG_TRANSPORT 0x00040000
+#define MPT_DEBUG_TASK_SET_FULL 0x00080000
+
+#define MPT_DEBUG_TRIGGER_DIAG 0x00200000
+
+
+/*
+ * CONFIG_SCSI_MPT3SAS_LOGGING - enabled in Kconfig
+ */
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
+{ \
+ if (IOC->logging_level & BITS) \
+ CMD; \
+}
+#else
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+
+/*
+ * debug macros
+ */
+
+#define dprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG)
+
+#define dsgprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG)
+
+#define devtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS)
+
+#define dewtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK)
+
+#define dinitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT)
+
+#define dexitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT)
+
+#define dfailprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL)
+
+#define dtmprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM)
+
+#define dreplyprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY)
+
+#define dhsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE)
+
+#define dcprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG)
+
+#define ddlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL)
+
+#define drsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET)
+
+#define dsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI)
+
+#define dctlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL)
+
+#define dsasprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS)
+
+#define dsastransport(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
+
+#define dmfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
+
+#define dtsfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL)
+
+#define dtransportprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT)
+
+#define dTriggerDiagPrintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG)
+
+
+
+/* inline functions for dumping debug data*/
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _debug_dump_mf - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_mf(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("mf:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_reply - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_reply(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("reply:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_config - print config page contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_config(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("config:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+#else
+#define _debug_dump_mf(mpi_request, sz)
+#define _debug_dump_reply(mpi_request, sz)
+#define _debug_dump_config(mpi_request, sz)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
new file mode 100644
index 00000000000..05f80450ac7
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -0,0 +1,8167 @@
+/*
+ * Scsi Host Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+#include <linux/raid_class.h>
+
+#include "mpt3sas_base.h"
+
+MODULE_AUTHOR(MPT3SAS_AUTHOR);
+MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
+
+#define RAID_CHANNEL 1
+/* forward proto's */
+static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander);
+static void _firmware_event_work(struct work_struct *work);
+
+static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device);
+static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 retry_count, u8 is_pd);
+
+static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
+/* global parameters */
+LIST_HEAD(mpt3sas_ioc_list);
+
+/* local parameters */
+static u8 scsi_io_cb_idx = -1;
+static u8 tm_cb_idx = -1;
+static u8 ctl_cb_idx = -1;
+static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
+static u8 transport_cb_idx = -1;
+static u8 scsih_cb_idx = -1;
+static u8 config_cb_idx = -1;
+static int mpt_ids;
+
+static u8 tm_tr_cb_idx = -1 ;
+static u8 tm_tr_volume_cb_idx = -1 ;
+static u8 tm_sas_control_cb_idx = -1;
+
+/* command line options */
+static u32 logging_level;
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+
+static ushort max_sectors = 0xFFFF;
+module_param(max_sectors, ushort, 0);
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
+
+
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
+/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+#define MPT3SAS_MAX_LUN (16895)
+static int max_lun = MPT3SAS_MAX_LUN;
+module_param(max_lun, int, 0);
+MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+
+
+
+
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable,
+ " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+
+
+/* raid transport support */
+
+static struct raid_template *mpt3sas_raid_template;
+
+
+/**
+ * struct sense_info - common structure for obtaining sense keys
+ * @skey: sense key
+ * @asc: additional sense code
+ * @ascq: additional sense code qualifier
+ */
+struct sense_info {
+ u8 skey;
+ u8 asc;
+ u8 ascq;
+};
+
+#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
+#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
+#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
+/**
+ * struct fw_event_work - firmware event struct
+ * @list: link list framework
+ * @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
+ * @ioc: per adapter object
+ * @device_handle: device handle
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @ignore: flag meaning this event has been marked to ignore
+ * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event_data: reply event data payload follows
+ *
+ * This object stored on ioc->fw_event_list.
+ */
+struct fw_event_work {
+ struct list_head list;
+ struct work_struct work;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
+
+ struct MPT3SAS_ADAPTER *ioc;
+ u16 device_handle;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 ignore;
+ u16 event;
+ void *event_data;
+};
+
+/* raid transport support */
+static struct raid_template *mpt3sas_raid_template;
+
+/**
+ * struct _scsi_io_transfer - scsi io transfer
+ * @handle: sas device handle (assigned by firmware)
+ * @is_raid: flag set for hidden raid components
+ * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ * @data_length: data transfer length
+ * @data_dma: dma pointer to data
+ * @sense: sense data
+ * @lun: lun number
+ * @cdb_length: cdb length
+ * @cdb: cdb contents
+ * @timeout: timeout for this command
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @valid_reply: flag set for reply message
+ * @sense_length: sense length
+ * @ioc_status: ioc status
+ * @scsi_state: scsi state
+ * @scsi_status: scsi staus
+ * @log_info: log information
+ * @transfer_length: data length transfer when there is a reply message
+ *
+ * Used for sending internal scsi commands to devices within this module.
+ * Refer to _scsi_send_scsi_io().
+ */
+struct _scsi_io_transfer {
+ u16 handle;
+ u8 is_raid;
+ enum dma_data_direction dir;
+ u32 data_length;
+ dma_addr_t data_dma;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+ u32 lun;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 timeout;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 valid_reply;
+ /* the following bits are only valid when 'valid_reply = 1' */
+ u32 sense_length;
+ u16 ioc_status;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ u32 transfer_length;
+};
+
+/*
+ * The pci device ids are defined in mpi/mpi2_cnfg.h.
+ */
+static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = {
+ /* Fury ~ 3004 and 3008 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Invader ~ 3108 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, scsih_pci_table);
+
+/**
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
+ *
+ * Note: The logging levels are defined in mpt3sas_debug.h.
+ */
+static int
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting logging_level(0x%08x)\n", logging_level);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->logging_level = logging_level;
+ return 0;
+}
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
+ &logging_level, 0644);
+
+/**
+ * _scsih_srch_boot_sas_address - search based on sas_address
+ * @sas_address: sas address
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_sas_address(u64 sas_address,
+ Mpi2BootDeviceSasWwid_t *boot_device)
+{
+ return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_device_name - search based on device name
+ * @device_name: device name specified in INDENTIFY fram
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_device_name(u64 device_name,
+ Mpi2BootDeviceDeviceName_t *boot_device)
+{
+ return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
+ Mpi2BootDeviceEnclosureSlot_t *boot_device)
+{
+ return (enclosure_logical_id == le64_to_cpu(boot_device->
+ EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
+ SlotNumber)) ? 1 : 0;
+}
+
+/**
+ * _scsih_is_boot_device - search for matching boot device.
+ * @sas_address: sas address
+ * @device_name: device name specified in INDENTIFY fram
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @form: specifies boot device form
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static int
+_scsih_is_boot_device(u64 sas_address, u64 device_name,
+ u64 enclosure_logical_id, u16 slot, u8 form,
+ Mpi2BiosPage2BootDevice_t *boot_device)
+{
+ int rc = 0;
+
+ switch (form) {
+ case MPI2_BIOSPAGE2_FORM_SAS_WWID:
+ if (!sas_address)
+ break;
+ rc = _scsih_srch_boot_sas_address(
+ sas_address, &boot_device->SasWwid);
+ break;
+ case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
+ if (!enclosure_logical_id)
+ break;
+ rc = _scsih_srch_boot_encl_slot(
+ enclosure_logical_id,
+ slot, &boot_device->EnclosureSlot);
+ break;
+ case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
+ if (!device_name)
+ break;
+ rc = _scsih_srch_boot_device_name(
+ device_name, &boot_device->DeviceName);
+ break;
+ case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * _scsih_get_sas_address - set the sas_address for given device handle
+ * @handle: device handle
+ * @sas_address: sas address
+ *
+ * Returns 0 success, non-zero when failure
+ */
+static int
+_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 *sas_address)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 ioc_status;
+
+ *sas_address = 0;
+
+ if (handle <= ioc->sas_hba.num_phys) {
+ *sas_address = ioc->sas_hba.sas_address;
+ return 0;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ return 0;
+ }
+
+ /* we hit this becuase the given parent handle doesn't exist */
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return -ENXIO;
+
+ /* else error case */
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+}
+
+/**
+ * _scsih_determine_boot_device - determine boot device.
+ * @ioc: per adapter object
+ * @device: either sas_device or raid_device object
+ * @is_raid: [flag] 1 = raid object, 0 = sas object
+ *
+ * Determines whether this device should be first reported device to
+ * to scsi-ml or sas transport, this purpose is for persistent boot device.
+ * There are primary, alternate, and current entries in bios page 2. The order
+ * priority is primary, alternate, then current. This routine saves
+ * the corresponding device object and is_raid flag in the ioc object.
+ * The saved data to be used later in _scsih_probe_boot_devices().
+ */
+static void
+_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
+ void *device, u8 is_raid)
+{
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u64 sas_address;
+ u64 device_name;
+ u64 enclosure_logical_id;
+ u16 slot;
+
+ /* only process this function when driver loads */
+ if (!ioc->is_driver_loading)
+ return;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ if (!is_raid) {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
+ } else {
+ raid_device = device;
+ sas_address = raid_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ }
+
+ if (!ioc->req_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_boot_device.device = device;
+ ioc->req_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->req_alt_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedAltBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_alt_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_alt_boot_device.device = device;
+ ioc->req_alt_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->current_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.CurrentBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: current_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->current_boot_device.device = device;
+ ioc->current_boot_device.is_raid = is_raid;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_sas_device_find_by_sas_address - sas device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+struct _sas_device *
+mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_remove - remove sas_device from list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Removing object and freeing associated memory from the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ if (!sas_device)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_device_remove_by_handle - removing device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * mpt3sas_device_remove_by_sas_address - removing device object by sas address
+ * @ioc: per adapter object
+ * @sas_address: device sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * _scsih_sas_device_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object to the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+}
+
+/**
+ * _scsih_sas_device_init_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->sas_device_init_list.
+ */
+static void
+_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
+ _scsih_determine_boot_device(ioc, sas_device, 0);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_find_by_id - raid device search
+ * @ioc: per adapter object
+ * @id: sas device target id
+ * @channel: sas device channel
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on target id, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->id == id && raid_device->channel == channel) {
+ r = raid_device;
+ goto out;
+ }
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_handle - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on handle, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->handle != handle)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_wwid - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on wwid, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid != wwid)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_add - add raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ * This is added to the raid_device_list link list.
+ */
+static void
+_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ raid_device->handle, (unsigned long long)raid_device->wwid));
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_add_tail(&raid_device->list, &ioc->raid_device_list);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_remove - delete raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ */
+static void
+_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_handle - expander device search
+ * @ioc: per adapter object
+ * @handle: expander handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for expander device based on handle, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_sas_address - expander device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * This searches for expander device based on sas_address, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * _scsih_expander_node_add - insert expander device to the list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_is_end_device - determines if device is an end device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if end device.
+ */
+static int
+_scsih_is_end_device(u32 device_info)
+{
+ if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
+ ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_get - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ */
+static struct scsi_cmnd *
+_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->scsi_lookup[smid - 1].scmd;
+ ioc->scsi_lookup[smid - 1].scmd = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_scmd - scmd lookup
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @scmd: pointer to scsi command object
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a scmd pointer in the scsi_lookup array,
+ * returning the revelent smid. A returned value of zero means invalid.
+ */
+static u16
+_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
+ *scmd)
+{
+ u16 smid;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ smid = 0;
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd == scmd) {
+ smid = ioc->scsi_lookup[i].smid;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel &&
+ ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+
+static void
+_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ max_depth = shost->can_queue;
+
+ /* limit max device queue for SATA to 32 */
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ goto not_sata;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ if (!sas_target_priv_data)
+ goto not_sata;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
+ goto not_sata;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (sas_device && sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ not_sata:
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+}
+
+/**
+ * _scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
+ * (see include/scsi/scsi_host.h for definition)
+ *
+ * Returns queue depth.
+ */
+static int
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+ if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
+ _scsih_adjust_queue_depth(sdev, qdepth);
+ else if (reason == SCSI_QDEPTH_QFULL)
+ scsi_track_queue_full(sdev, qdepth);
+ else
+ return -EOPNOTSUPP;
+
+ if (sdev->inquiry_len > 7)
+ sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \
+ "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
+ sdev->ordered_tags, sdev->scsi_level,
+ (sdev->inquiry[7] & 2) >> 1);
+
+ return sdev->queue_depth;
+}
+
+/**
+ * _scsih_change_queue_type - changing device queue tag type
+ * @sdev: scsi device struct
+ * @tag_type: requested tag type
+ *
+ * Returns queue tag type.
+ */
+static int
+_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+
+/**
+ * _scsih_target_alloc - target add routine
+ * @starget: scsi target struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL);
+ if (!sas_target_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = sas_target_priv_data;
+ sas_target_priv_data->starget = starget;
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+
+ /* RAID volumes */
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ sas_target_priv_data->handle = raid_device->handle;
+ sas_target_priv_data->sas_address = raid_device->wwid;
+ sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
+ raid_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return 0;
+ }
+
+ /* sas/sata devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+
+ if (sas_device) {
+ sas_target_priv_data->handle = sas_device->handle;
+ sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_device->starget = starget;
+ sas_device->id = starget->id;
+ sas_device->channel = starget->channel;
+ if (test_bit(sas_device->handle, ioc->pd_handles))
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ if (sas_device->fast_path)
+ sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return 0;
+}
+
+/**
+ * _scsih_target_destroy - target destroy routine
+ * @starget: scsi target struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = starget->hostdata;
+ if (!sas_target_priv_data)
+ return;
+
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ raid_device->starget = NULL;
+ raid_device->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device && (sas_device->starget == starget) &&
+ (sas_device->id == starget->id) &&
+ (sas_device->channel == starget->channel))
+ sas_device->starget = NULL;
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ out:
+ kfree(sas_target_priv_data);
+ starget->hostdata = NULL;
+}
+
+/**
+ * _scsih_slave_alloc - device add routine
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
+ if (!sas_device_priv_data)
+ return -ENOMEM;
+
+ sas_device_priv_data->lun = sdev->lun;
+ sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns++;
+ sas_device_priv_data->sas_target = sas_target_priv_data;
+ sdev->hostdata = sas_device_priv_data;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
+ sdev->no_uld_attach = 1;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc,
+ starget->id, starget->channel);
+ if (raid_device)
+ raid_device->sdev = sdev; /* raid is single lun */
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_slave_destroy - device destroy routine
+ * @sdev: scsi device struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_slave_destroy(struct scsi_device *sdev)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns--;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && !sas_target_priv_data->num_luns)
+ sas_device->starget = NULL;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * _scsih_display_sata_capabilities - sata capabilities
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @sdev: scsi device struct
+ */
+static void
+_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, struct scsi_device *sdev)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u16 flags;
+ u32 device_info;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ flags = le16_to_cpu(sas_device_pg0.Flags);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ sdev_printk(KERN_INFO, sdev,
+ "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
+ "sw_preserve(%s)\n",
+ (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
+ "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
+}
+
+/*
+ * raid transport support -
+ * Enabled for SLES11 and newer, in older kernels the driver will panic when
+ * unloading the driver followed by a load - I beleive that the subroutine
+ * raid_class_release() is not cleaning up properly.
+ */
+
+/**
+ * _scsih_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+_scsih_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
+}
+
+/**
+ * _scsih_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+_scsih_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volume_status_flags;
+ u8 percent_complete;
+ u16 handle;
+
+ percent_complete = 0;
+ handle = 0;
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device) {
+ handle = raid_device->handle;
+ percent_complete = raid_device->percent_complete;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!handle)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ percent_complete = 0;
+ goto out;
+ }
+
+ volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (!(volume_status_flags &
+ MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
+ percent_complete = 0;
+
+ out:
+ raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+}
+
+/**
+ * _scsih_get_state - get raid volume level
+ * @dev the device struct object
+ */
+static void
+_scsih_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volstate;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ u16 handle = 0;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device)
+ handle = raid_device->handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
+ state = RAID_STATE_RESYNCING;
+ goto out;
+ }
+
+ switch (vol_pg0.VolumeState) {
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MPI2_RAID_VOL_STATE_FAILED:
+ case MPI2_RAID_VOL_STATE_MISSING:
+ state = RAID_STATE_OFFLINE;
+ break;
+ }
+ out:
+ raid_set_state(mpt3sas_raid_template, dev, state);
+}
+
+/**
+ * _scsih_set_level - set raid level
+ * @sdev: scsi device struct
+ * @volume_type: volume type
+ */
+static void
+_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
+{
+ enum raid_level level = RAID_LEVEL_UNKNOWN;
+
+ switch (volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ level = RAID_LEVEL_0;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ level = RAID_LEVEL_10;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ level = RAID_LEVEL_1E;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ level = RAID_LEVEL_1;
+ break;
+ }
+
+ raid_set_level(mpt3sas_raid_template, &sdev->sdev_gendev, level);
+}
+
+
+/**
+ * _scsih_get_volume_capabilities - volume capabilities
+ * @ioc: per adapter object
+ * @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
+ */
+static int
+_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ Mpi2RaidVolPage0_t *vol_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 sz;
+ u8 num_pds;
+
+ if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
+ &num_pds)) || !num_pds) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ raid_device->num_pds = num_pds;
+ sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
+ sizeof(Mpi2RaidVol0PhysDisk_t));
+ vol_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!vol_pg0) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ kfree(vol_pg0);
+ return 1;
+ }
+
+ raid_device->volume_type = vol_pg0->VolumeType;
+
+ /* figure out what the underlying devices are by
+ * obtaining the device_info bits for the 1st device
+ */
+ if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
+ vol_pg0->PhysDisk[0].PhysDiskNum))) {
+ if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ le16_to_cpu(pd_pg0.DevHandle)))) {
+ raid_device->device_info =
+ le32_to_cpu(sas_device_pg0.DeviceInfo);
+ }
+ }
+
+ kfree(vol_pg0);
+ return 0;
+}
+
+
+
+/**
+ * _scsih_enable_tlr - setting TLR flags
+ * @ioc: per adapter object
+ * @sdev: scsi device struct
+ *
+ * Enabling Transaction Layer Retries for tape devices when
+ * vpd page 0x90 is present
+ *
+ */
+static void
+_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
+{
+
+ /* only for TAPE */
+ if (sdev->type != TYPE_TAPE)
+ return;
+
+ if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
+ return;
+
+ sas_enable_tlr(sdev);
+ sdev_printk(KERN_INFO, sdev, "TLR %s\n",
+ sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
+ return;
+
+}
+
+/**
+ * _scsih_slave_configure - device configure routine.
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int qdepth;
+ u8 ssp_target = 0;
+ char *ds = "";
+ char *r_level = "";
+ u16 handle, volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ qdepth = 1;
+ sas_device_priv_data = sdev->hostdata;
+ sas_device_priv_data->configured_lun = 1;
+ sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ handle = sas_target_priv_data->handle;
+
+ /* raid volume handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (!raid_device) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+ if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+
+ /* RAID Queue Depth Support
+ * IS volume = underlying qdepth of drive type, either
+ * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
+ * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
+ */
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ else
+ ds = "STP";
+ }
+
+ switch (raid_device->volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ r_level = "RAID0";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ if (ioc->manu_pg10.OEMIdentifier &&
+ (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
+ MFG10_GF0_R10_DISPLAY) &&
+ !(raid_device->num_pds % 2))
+ r_level = "RAID10";
+ else
+ r_level = "RAID1E";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID1";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID10";
+ break;
+ case MPI2_RAID_VOL_TYPE_UNKNOWN:
+ default:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAIDX";
+ break;
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n",
+ r_level, raid_device->handle,
+ (unsigned long long)raid_device->wwid,
+ raid_device->num_pds, ds);
+
+
+ _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+
+/* raid transport support */
+ _scsih_set_level(sdev, raid_device->volume_type);
+ return 0;
+ }
+
+ /* non-raid handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ if (mpt3sas_config_get_volume_handle(ioc, handle,
+ &volume_handle)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
+ volume_handle, &volume_wwid)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ssp_target = 1;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "STP";
+ else if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ }
+
+ sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
+ "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
+ ds, handle, (unsigned long long)sas_device->sas_address,
+ sas_device->phy, (unsigned long long)sas_device->device_name);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
+ ds, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!ssp_target)
+ _scsih_display_sata_capabilities(ioc, handle, sdev);
+
+
+ _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+
+ if (ssp_target) {
+ sas_read_port_mode_page(sdev);
+ _scsih_enable_tlr(ioc, sdev);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * @sdev: scsi device struct
+ * @bdev: pointer to block device context
+ * @capacity: device size (in 512 byte sectors)
+ * @params: three element array to place output:
+ * params[0] number of heads (max 255)
+ * params[1] number of sectors (max 63)
+ * params[2] number of cylinders
+ *
+ * Return nothing.
+ */
+static int
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ /*
+ * Handle extended translation size for logical drives
+ * > 1Gb
+ */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ /* return result */
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+
+ return 0;
+}
+
+/**
+ * _scsih_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @response_code: response code returned by the device
+ *
+ * Return nothing.
+ */
+static void
+_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
+{
+ char *desc;
+
+ switch (response_code) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
+ ioc->name, response_code, desc);
+}
+
+/**
+ * _scsih_tm_done - tm completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using scsih_issue_tm.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->tm_cmds.smid != smid)
+ return 1;
+ mpt3sas_base_flush_reply_queues(ioc);
+ ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->tm_cmds.done);
+ return 1;
+}
+
+/**
+ * mpt3sas_scsih_set_tm_flag - set per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 1;
+ skip = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 0;
+ skip = 1;
+ ioc->ignore_loginfos = 0;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_issue_tm - main routine for sending tm requests
+ * @ioc: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ * @timeout: timeout in seconds
+ * @serial_number: the serial_number from scmd
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * A generic API for sending task management requests to firmware.
+ *
+ * The callback index is set inside `ioc->tm_cb_idx`.
+ *
+ * Return SUCCESS or FAILED.
+ */
+int
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ unsigned long serial_number, enum mutex_type m_type)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ u16 smid = 0;
+ u32 ioc_state;
+ unsigned long timeleft;
+ struct scsiio_tracker *scsi_lookup = NULL;
+ int rc;
+
+ if (m_type == TM_MUTEX_ON)
+ mutex_lock(&ioc->tm_cmds.mutex);
+ if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n", ioc->name));
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
+ ioc->name, handle, type, smid_task));
+ ioc->tm_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->tm_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = type;
+ mpi_request->TaskMID = cpu_to_le16(smid_task);
+ int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
+ mpt3sas_scsih_set_tm_flag(ioc, handle);
+ init_completion(&ioc->tm_cmds.done);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
+ }
+
+ if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ mpi_reply = ioc->tm_cmds.reply;
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
+ "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->logging_level & MPT_DEBUG_TM) {
+ _scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
+ }
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ if (scsi_lookup->scmd == NULL)
+ break;
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _scsih_tm_display_info - displays info about the device
+ * @ioc: per adapter struct
+ * @scmd: pointer to scsi command object
+ *
+ * Called by task management callback handlers.
+ */
+static void
+_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
+{
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ scsi_print_command(scmd);
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ starget_printk(KERN_INFO, starget,
+ "%s handle(0x%04x), %s wwid(0x%016llx)\n",
+ device_str, priv_target->handle,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ if (priv_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ starget_printk(KERN_INFO, starget,
+ "volume handle(0x%04x), "
+ "volume wwid(0x%016llx)\n",
+ sas_device->volume_handle,
+ (unsigned long long)sas_device->volume_wwid);
+ }
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ sas_device->handle,
+ (unsigned long long)sas_device->sas_address,
+ sas_device->phy);
+ starget_printk(KERN_INFO, starget,
+ "enclosure_logical_id(0x%016llx), slot(%d)\n",
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_abort - eh threads main abort routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_abort(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u16 smid;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting task abort! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* search for the command */
+ smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
+ if (!smid) {
+ scmd->result = DID_RESET << 16;
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components and volumes this is not supported */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ mpt3sas_halt_firmware(ioc);
+
+ handle = sas_device_priv_data->sas_target->handle;
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd->serial_number, TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting device reset! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
+ TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_target_reset - eh threads main target reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_target_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+ struct scsi_target *starget = scmd->device->sdev_target;
+
+ starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
+ scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
+ scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, 0, TM_MUTEX_ON);
+
+ out:
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+
+/**
+ * _scsih_host_reset - eh threads main host reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_host_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ int r, retval;
+
+ pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
+ ioc->name, scmd);
+ scsi_print_command(scmd);
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ r = (retval < 0) ? FAILED : SUCCESS;
+ pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
+ ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return r;
+}
+
+/**
+ * _scsih_fw_event_add - insert and queue up fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This adds the firmware event object into link list, then queues it up to
+ * be processed from user context.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ unsigned long flags;
+
+ if (ioc->firmware_event_thread == NULL)
+ return;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ INIT_LIST_HEAD(&fw_event->list);
+ list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ INIT_WORK(&fw_event->work, _firmware_event_work);
+ queue_work(ioc->firmware_event_thread, &fw_event->work);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_fw_event_free - delete fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This removes firmware event object from link list, frees associated memory.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
+ *fw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event->event_data);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+
+ /**
+ * mpt3sas_send_trigger_data_event - send event for processing trigger data
+ * @ioc: per adapter object
+ * @event_data: trigger event data
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC);
+ if (!fw_event->event_data)
+ return;
+ fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
+ fw_event->ioc = ioc;
+ memcpy(fw_event->event_data, event_data, sizeof(*event_data));
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_error_recovery_delete_devices - remove devices not responding
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt3sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
+ * @ioc: per adapter object
+ *
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event, *next;
+
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
+
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ fw_event->cancel_pending_work = 1;
+ }
+}
+
+/**
+ * _scsih_ublock_io_all_device - unblock every device
+ * @ioc: per adapter object
+ *
+ * change the device state from block to running
+ */
+static void
+_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (!sas_device_priv_data->block)
+ continue;
+
+ sas_device_priv_data->block = 0;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
+ "device_running, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+}
+
+
+/**
+ * _scsih_ublock_io_device - prepare device to be deleted
+ * @ioc: per adapter object
+ * @sas_addr: sas address
+ *
+ * unblock then put device in offline state
+ */
+static void
+_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->sas_address
+ != sas_address)
+ continue;
+ if (sas_device_priv_data->block) {
+ sas_device_priv_data->block = 0;
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ }
+}
+
+/**
+ * _scsih_block_io_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle != handle)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev,
+ "device_blocked, handle(0x%04x)\n", handle);
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_to_ex
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * attached to this expander. This function called when expander is
+ * pulled.
+ */
+static void
+_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_sibling;
+ unsigned long flags;
+
+ if (!sas_expander)
+ return;
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device =
+ mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ if (sas_device)
+ set_bit(sas_device->handle,
+ ioc->blocking_handles);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+ }
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ expander_sibling =
+ mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, mpt3sas_port->remote_identify.sas_address);
+ _scsih_block_io_to_children_attached_to_ex(ioc,
+ expander_sibling);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull.
+ */
+static void
+_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+
+/**
+ * _scsih_tm_tr_send - send task management request
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This code is to initiate the device removal handshake protocol
+ * with controller firmware. This function will issue target reset
+ * using high priority request queue. It will send a sas iounit
+ * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _sas_device *sas_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ struct _tr_list *delayed_tr;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed: handle(0x%04x)\n",
+ __func__, ioc->name, handle));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+
+ /* if PD, then return */
+ if (test_bit(handle, ioc->pd_handles))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device && sas_device->starget &&
+ sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = sas_device->sas_address;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_target_priv_data) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle,
+ (unsigned long long)sas_address));
+ _scsih_ublock_io_device(ioc, sas_address);
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
+}
+
+/**
+ * _scsih_tm_tr_complete -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the target reset completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u16 smid_sas_ctrl;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed\n", __func__, ioc->name));
+ return 1;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery\n", __func__,
+ ioc->name));
+ return 1;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational\n", __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
+ if (!smid_sas_ctrl) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return 1;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
+ mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_sas_control_complete - completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the sas iounit control completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ Mpi2SasIoUnitControlReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (likely(mpi_reply)) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ } else {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ return 1;
+}
+
+/**
+ * _scsih_tm_tr_volume_send - send target reset request for volumes
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _tr_list *delayed_tr;
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_volume_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+}
+
+/**
+ * _scsih_tm_volume_tr_complete - target reset completion
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_check_for_pending_tm - check for pending task management
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * This will check delayed target reset list, and feed the
+ * next reqeust.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _tr_list *delayed_tr;
+
+ if (!list_empty(&ioc->delayed_tr_volume_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * _scsih_check_topo_delete_events - sanity check on topo events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This routine added to better handle cable breaker.
+ *
+ * This handles the case where driver receives multiple expander
+ * add and delete events in a single shot. When there is a delete event
+ * the routine will void any pending add events waiting in the event queue.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventDataSasTopologyChangeList_t *local_event_data;
+ u16 expander_handle;
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle;
+
+ for (i = 0 ; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ if (expander_handle < ioc->sas_hba.num_phys) {
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+ return;
+ }
+ if (event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
+ /* put expander attached devices into blocking state */
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ expander_handle);
+ _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ do {
+ handle = find_first_bit(ioc->blocking_handles,
+ ioc->facts.MaxDevHandle);
+ if (handle < ioc->facts.MaxDevHandle)
+ _scsih_block_io_device(ioc, handle);
+ } while (test_and_clear_bit(handle, ioc->blocking_handles));
+ } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+
+ if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data = fw_event->event_data;
+ if (local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
+ expander_handle) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting ignoring flag\n", ioc->name));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_delete_flag - setting volume delete flag
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * This returns nothing.
+ */
+static void
+_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device && raid_device->starget &&
+ raid_device->starget->hostdata) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), "
+ "wwid(0x%016llx)\n", ioc->name, handle,
+ (unsigned long long) raid_device->wwid));
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
+ * @handle: input handle
+ * @a: handle for volume a
+ * @b: handle for volume b
+ *
+ * IR firmware only supports two raid volumes. The purpose of this
+ * routine is to set the volume handle in either a or b. When the given
+ * input handle is non-zero, or when a and b have not been set before.
+ */
+static void
+_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
+{
+ if (!handle || handle == *a || handle == *b)
+ return;
+ if (!*a)
+ *a = handle;
+ else if (!*b)
+ *b = handle;
+}
+
+/**
+ * _scsih_check_ir_config_unhide_events - check for UNHIDE events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This routine will send target reset to volume, followed by target
+ * resets to the PDs. This is called when a PD has been removed, or
+ * volume has been deleted or removed. When the target reset is sent
+ * to volume, the PD target resets need to be queued to start upon
+ * completion of the volume target reset.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u16 handle, volume_handle, a, b;
+ struct _tr_list *delayed_tr;
+
+ a = 0;
+ b = 0;
+
+ /* Volume Resets for Deleted or Removed */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_delete_flag(ioc, volume_handle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ /* Volume Resets for UNHIDE events */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ if (a)
+ _scsih_tm_tr_volume_send(ioc, a);
+ if (b)
+ _scsih_tm_tr_volume_send(ioc, b);
+
+ /* PD target resets */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
+ continue;
+ handle = le16_to_cpu(element->PhysDiskDevHandle);
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ clear_bit(handle, ioc->pd_handles);
+ if (!volume_handle)
+ _scsih_tm_tr_send(ioc, handle);
+ else if (volume_handle == a || volume_handle == b) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ BUG_ON(!delayed_tr);
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
+ handle));
+ } else
+ _scsih_tm_tr_send(ioc, handle);
+ }
+}
+
+
+/**
+ * _scsih_check_volume_delete_events - set delete flag for volumes
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This will handle the case when the cable connected to entire volume is
+ * pulled. We will take care of setting the deleted flag so normal IO will
+ * not be sent.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrVolume_t *event_data)
+{
+ u32 state;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+ state = le32_to_cpu(event_data->NewValue);
+ if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
+ MPI2_RAID_VOL_STATE_FAILED)
+ _scsih_set_volume_delete_flag(ioc,
+ le16_to_cpu(event_data->VolDevHandle));
+}
+
+/**
+ * _scsih_flush_running_cmds - completing outstanding commands.
+ * @ioc: per adapter object
+ *
+ * The flushing out of all pending scmd commands following host reset,
+ * where all IO is dropped to the floor.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct scsi_cmnd *scmd;
+ u16 smid;
+ u16 count = 0;
+
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (!scmd)
+ continue;
+ count++;
+ mpt3sas_base_free_smid(ioc, smid);
+ scsi_dma_unmap(scmd);
+ if (ioc->pci_error_recovery)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
+ ioc->name, count));
+}
+
+/**
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIORequest_t *mpi_request)
+{
+ u16 eedp_flags;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+ Mpi25SCSIIORequest_t *mpi_request_3v =
+ (Mpi25SCSIIORequest_t *)mpi_request;
+
+ if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
+ return;
+
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ else
+ return;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+
+ /*
+ * enable ref/guard checking
+ * auto increment ref tag
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_get_lba(scmd));
+ break;
+
+ case SCSI_PROT_DIF_TYPE3:
+
+ /*
+ * enable guard checking
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+
+ break;
+ }
+
+ mpi_request_3v->EEDPBlockSize =
+ cpu_to_le16(scmd->device->sector_size);
+ mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+ u8 ascq;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
+ ascq);
+ scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
+ SAM_STAT_CHECK_CONDITION;
+}
+
+
+/**
+ * _scsih_qcmd_lck - main scsi request entry point
+ * @scmd: pointer to scsi command object
+ * @done: function pointer to be invoked on completion
+ *
+ * The callback index is set inside `ioc->scsi_io_cb_idx`.
+ *
+ * Returns 0 on success. If there's a failure, return either:
+ * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
+ * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
+ */
+static int
+_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2SCSIIORequest_t *mpi_request;
+ u32 mpi_control;
+ u16 smid;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_SCSI)
+ scsi_print_command(scmd);
+#endif
+
+ scmd->scsi_done = done;
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (ioc->pci_error_recovery || ioc->remove_host) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+
+ /* invalid device handle */
+ handle = sas_target_priv_data->handle;
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ /* device busy with task managment */
+ } else if (sas_target_priv_data->tm_busy ||
+ sas_device_priv_data->block)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
+ else
+ mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
+
+ /* set tags */
+ if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) {
+ if (scmd->device->tagged_supported) {
+ if (scmd->device->ordered_tags)
+ mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
+ else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ scmd->cmd_len != 32)
+ mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ _scsih_setup_eedp(ioc, scmd, mpi_request);
+
+ if (scmd->cmd_len == 32)
+ mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+ else
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+ mpi_request->Control = cpu_to_le32(mpi_control);
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
+ mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
+ mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ mpi_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
+ mpi_request->LUN);
+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
+
+ if (mpi_request->DataLength) {
+ if (ioc->build_sg_scmd(ioc, scmd, smid)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ } else
+ ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
+
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
+ if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
+ MPI25_SCSIIO_IOFLAGS_FAST_PATH);
+ mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_scsi_io(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+
+ out:
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+static DEF_SCSI_QCMD(_scsih_qcmd)
+
+
+/**
+ * _scsih_normalize_sense - normalize descriptor and fixed format sense data
+ * @sense_buffer: sense data returned by target
+ * @data: normalized skey/asc/ascq
+ *
+ * Return nothing.
+ */
+static void
+_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
+{
+ if ((sense_buffer[0] & 0x7F) >= 0x72) {
+ /* descriptor format */
+ data->skey = sense_buffer[1] & 0x0F;
+ data->asc = sense_buffer[2];
+ data->ascq = sense_buffer[3];
+ } else {
+ /* fixed format */
+ data->skey = sense_buffer[2] & 0x0F;
+ data->asc = sense_buffer[12];
+ data->ascq = sense_buffer[13];
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * scsi_status - SCSI Status code returned from target device
+ * scsi_state - state info associated with SCSI_IO determined by ioc
+ * ioc_status - ioc supplied status info
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
+{
+ u32 response_info;
+ u8 *response_bytes;
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ u8 scsi_state = mpi_reply->SCSIState;
+ u8 scsi_status = mpi_reply->SCSIStatus;
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = ioc->tmp_string;
+ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ if (log_info == 0x31170000)
+ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
+ desc_ioc_state = "success";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc_ioc_state = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ desc_ioc_state = "scsi recovered error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ desc_ioc_state = "scsi invalid dev handle";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ desc_ioc_state = "scsi device not there";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ desc_ioc_state = "scsi data overrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ desc_ioc_state = "scsi data underrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ desc_ioc_state = "scsi io data error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ desc_ioc_state = "scsi protocol error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ desc_ioc_state = "scsi task terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ desc_ioc_state = "scsi residual mismatch";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ desc_ioc_state = "scsi task mgmt failed";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ desc_ioc_state = "scsi ioc terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ desc_ioc_state = "scsi ext terminated";
+ break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc_ioc_state = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc_ioc_state = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc_ioc_state = "eedp app tag error";
+ break;
+ default:
+ desc_ioc_state = "unknown";
+ break;
+ }
+
+ switch (scsi_status) {
+ case MPI2_SCSI_STATUS_GOOD:
+ desc_scsi_status = "good";
+ break;
+ case MPI2_SCSI_STATUS_CHECK_CONDITION:
+ desc_scsi_status = "check condition";
+ break;
+ case MPI2_SCSI_STATUS_CONDITION_MET:
+ desc_scsi_status = "condition met";
+ break;
+ case MPI2_SCSI_STATUS_BUSY:
+ desc_scsi_status = "busy";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE:
+ desc_scsi_status = "intermediate";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
+ desc_scsi_status = "intermediate condmet";
+ break;
+ case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
+ desc_scsi_status = "reservation conflict";
+ break;
+ case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
+ desc_scsi_status = "command terminated";
+ break;
+ case MPI2_SCSI_STATUS_TASK_SET_FULL:
+ desc_scsi_status = "task set full";
+ break;
+ case MPI2_SCSI_STATUS_ACA_ACTIVE:
+ desc_scsi_status = "aca active";
+ break;
+ case MPI2_SCSI_STATUS_TASK_ABORTED:
+ desc_scsi_status = "task aborted";
+ break;
+ default:
+ desc_scsi_status = "unknown";
+ break;
+ }
+
+ desc_scsi_state[0] = '\0';
+ if (!scsi_state)
+ desc_scsi_state = " ";
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ strcat(desc_scsi_state, "response info ");
+ if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ strcat(desc_scsi_state, "state terminated ");
+ if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
+ strcat(desc_scsi_state, "no status ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
+ strcat(desc_scsi_state, "autosense failed ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
+ strcat(desc_scsi_state, "autosense valid ");
+
+ scsi_print_command(scmd);
+
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT
+ "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ pr_warn(MPT3SAS_FMT
+ "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ ioc->name, scsi_bufflen(scmd), scmd->underflow,
+ scsi_get_resid(scmd));
+ pr_warn(MPT3SAS_FMT
+ "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ pr_warn(MPT3SAS_FMT
+ "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ ioc->name, desc_scsi_status,
+ scsi_status, desc_scsi_state, scsi_state);
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ pr_warn(MPT3SAS_FMT
+ "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
+ response_info = le32_to_cpu(mpi_reply->ResponseInfo);
+ response_bytes = (u8 *)&response_info;
+ _scsih_response_code(ioc, response_bytes[0]);
+ }
+}
+#endif
+
+/**
+ * _scsih_turn_on_fault_led - illuminate Fault LED
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_TURN_ON_FAULT_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2EventNotificationReply_t *event_reply;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data;
+ struct _sas_device *sas_device;
+ ssize_t sz;
+ unsigned long flags;
+
+ /* only handle non-raid devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
+ ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
+
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+ event_reply = kzalloc(sz, GFP_KERNEL);
+ if (!event_reply) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ event_reply->Event =
+ cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ event_reply->MsgLength = sz/4;
+ event_reply->EventDataLength =
+ cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
+ event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
+ event_reply->EventData;
+ event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
+ event_data->ASC = 0x5D;
+ event_data->DevHandle = cpu_to_le16(handle);
+ event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
+ mpt3sas_ctl_add_to_event_log(ioc, event_reply);
+ kfree(event_reply);
+}
+
+/**
+ * _scsih_io_done - scsi request callback
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when using _scsih_qcmd.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ Mpi2SCSIIOReply_t *mpi_reply;
+ struct scsi_cmnd *scmd;
+ u16 ioc_status;
+ u32 xfer_cnt;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 response_code = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (scmd == NULL)
+ return 1;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ if (mpi_reply == NULL) {
+ scmd->result = DID_OK << 16;
+ goto out;
+ }
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ sas_device_priv_data->sas_target->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+
+ /* turning off TLR */
+ scsi_state = mpi_reply->SCSIState;
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code =
+ le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
+ if (!sas_device_priv_data->tlr_snoop_check) {
+ sas_device_priv_data->tlr_snoop_check++;
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
+ sas_device_priv_data->flags &=
+ ~MPT_DEVICE_TLR_ON;
+ }
+
+ xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ scsi_status = mpi_reply->SCSIStatus;
+
+ if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI2_SCSI_STATUS_BUSY ||
+ scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
+ ioc_status = MPI2_IOCSTATUS_SUCCESS;
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(mpi_reply->SenseCount));
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ /* failure prediction threshold exceeded */
+ if (data.asc == 0x5D)
+ _scsih_smart_predicted_fault(ioc,
+ le16_to_cpu(mpi_reply->DevHandle));
+ mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
+ }
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_BUSY:
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ if (sas_device_priv_data->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ goto out;
+ }
+ if (log_info == 0x31110630) {
+ if (scmd->retries > 2) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_device_set_state(scmd->device,
+ SDEV_OFFLINE);
+ } else {
+ scmd->result = DID_SOFT_ERROR << 16;
+ scmd->device->expecting_cc_ua = 1;
+ }
+ break;
+ }
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+
+ if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
+ break;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
+ mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->sense_buffer[0] = 0x70;
+ scmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ scmd->sense_buffer[12] = 0x20;
+ scmd->sense_buffer[13] = 0;
+ }
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (response_code ==
+ MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
+ (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ _scsih_eedp_error_handling(scmd, ioc_status);
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+
+ }
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
+ _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
+#endif
+
+ out:
+
+ scsi_dma_unmap(scmd);
+
+ scmd->scsi_done(scmd);
+ return 1;
+}
+
+/**
+ * _scsih_sas_host_refresh - refreshing sas host object contents
+ * @ioc: per adapter object
+ * Context: user
+ *
+ * During port enable, fw will send topology events for every device. Its
+ * possible that the handles may change from the previous setting, so this
+ * code keeping handles updating if changed.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 sz;
+ u16 ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u8 link_rate;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "updating handles for sas_host(0x%016llx)\n",
+ ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
+ * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
+ AttachedDevHandle);
+ if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
+ attached_handle, i, link_rate);
+ }
+ out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_sas_host_add - create sas host object
+ * @ioc: per adapter object
+ *
+ * Creating host side data object, stored in ioc->sas_hba
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u16 ioc_status;
+ u16 sz;
+ u8 device_missing_delay;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
+ if (!ioc->sas_hba.num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ /* sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ ioc->io_missing_delay =
+ sas_iounit_pg1->IODeviceMissingDelay;
+ device_missing_delay =
+ sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ ioc->device_missing_delay = (device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ ioc->device_missing_delay = device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+
+ ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!ioc->sas_hba.phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
+ phy_pg0, ioc->sas_hba.parent_dev);
+ }
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->sas_hba.enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ pr_info(MPT3SAS_FMT
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->name, ioc->sas_hba.handle,
+ (unsigned long long) ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys) ;
+
+ if (ioc->sas_hba.enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ ioc->sas_hba.enclosure_handle)))
+ ioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_expander_add - creating expander object
+ * @ioc: per adapter object
+ * @handle: expander handle
+ *
+ * Creating expander object, stored in ioc->sas_expander_list.
+ *
+ * Return 0 for success, else error.
+ */
+static int
+_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u32 ioc_status;
+ u16 parent_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port = NULL;
+
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return -1;
+
+ if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* handle out of order topology events */
+ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
+ if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
+ != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if (sas_address_parent != ioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address_parent);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = _scsih_expander_add(ioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct _sas_node),
+ GFP_KERNEL);
+ if (!sas_expander) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.NumPhys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+
+ pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
+ " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys)
+ goto out_fail;
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+
+ if ((mpt3sas_transport_add_expander_phy(ioc,
+ &sas_expander->phy[i], expander_pg1,
+ sas_expander->parent_dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_expander->enclosure_handle)))
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ _scsih_expander_node_add(ioc, sas_expander);
+ return 0;
+
+ out_fail:
+
+ if (mpt3sas_port)
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_address_parent);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpt3sas_expander_remove - removing expander object
+ * @ioc: per adapter object
+ * @sas_address: expander sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_expander)
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (sas_expander)
+ _scsih_expander_node_remove(ioc, sas_expander);
+}
+
+/**
+ * _scsih_done - internal SCSI_IO callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated SCSI_IO.
+ * The callback index passed is `ioc->scsih_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->scsih_cmds.smid != smid)
+ return 1;
+ ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->scsih_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->scsih_cmds.done);
+ return 1;
+}
+
+
+
+
+#define MPT3_MAX_LUNS (255)
+
+
+/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ pr_err(MPT3SAS_FMT
+ "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ ioc->name, desc, (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+/**
+ * _scsih_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @parent_sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
+ u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* wide port handling ~ we need only handle device once for the phy that
+ * is matched in sas device page zero
+ */
+ if (phy_number != sas_device_pg0.PhyNum)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "handle changed from(0x%04x) to (0x%04x)!!!\n",
+ sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT
+ "device is not present handle(0x%04x), flags!!!\n",
+ ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ _scsih_ublock_io_device(ioc, sas_address);
+
+}
+
+/**
+ * _scsih_add_device - creating sas device object
+ * @ioc: per adapter object
+ * @handle: sas device handle
+ * @phy_num: phy number end device attached to
+ * @is_pd: is this hidden raid component
+ *
+ * Creating end device object, stored in ioc->sas_device_list.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
+ u8 is_pd)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ u64 sas_address;
+ u32 device_info;
+ unsigned long flags;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return -1;
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
+ ioc->name, handle);
+ return -1;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return -1;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return -1;
+
+ sas_device = kzalloc(sizeof(struct _sas_device),
+ GFP_KERNEL);
+ if (!sas_device) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ sas_device->handle = handle;
+ if (_scsih_get_sas_address(ioc,
+ le16_to_cpu(sas_device_pg0.ParentDevHandle),
+ &sas_device->sas_address_parent) != 0)
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ sas_device->slot =
+ le16_to_cpu(sas_device_pg0.Slot);
+ sas_device->device_info = device_info;
+ sas_device->sas_address = sas_address;
+ sas_device->phy = sas_device_pg0.PhyNum;
+ sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
+ MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+
+ /* get enclosure_logical_id */
+ if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
+ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device->enclosure_handle)))
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ /* get device name */
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_sas_device_init_add(ioc, sas_device);
+ else
+ _scsih_sas_device_add(ioc, sas_device);
+
+ return 0;
+}
+
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ sas_target_priv_data->handle =
+ MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, sas_device->handle,
+ (unsigned long long) sas_device->sas_address);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ kfree(sas_device);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_topology_change_event_debug - debug for topology event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->ExpStatus) {
+ case MPI2_EVENT_SAS_TOPO_ES_ADDED:
+ status_str = "add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
+ ioc->name, status_str);
+ pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
+ "start_phy(%02d), count(%d)\n",
+ le16_to_cpu(event_data->ExpanderDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPhyNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ status_str = "target add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
+ " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
+ handle, status_str, link_rate, prev_link_rate);
+
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_topology_change_event - handle topology changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static int
+_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 parent_handle, handle;
+ u16 reason_code;
+ u8 phy_number, max_phys;
+ struct _sas_node *sas_expander;
+ u64 sas_address;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate;
+ Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_topology_change_event_debug(ioc, event_data);
+#endif
+
+ if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+
+ if (!ioc->sas_hba.num_phys)
+ _scsih_sas_host_add(ioc);
+ else
+ _scsih_sas_host_refresh(ioc);
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+
+ parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+
+ /* handle expander add */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
+ if (_scsih_expander_add(ioc, parent_handle) != 0)
+ return 0;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ parent_handle);
+ if (sas_expander) {
+ sas_address = sas_expander->sas_address;
+ max_phys = sas_expander->num_phys;
+ } else if (parent_handle < ioc->sas_hba.num_phys) {
+ sas_address = ioc->sas_hba.sas_address;
+ max_phys = ioc->sas_hba.num_phys;
+ } else {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+ phy_number = event_data->StartPhyNum + i;
+ if (phy_number >= max_phys)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if ((event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
+ MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
+ continue;
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, sas_address, handle,
+ phy_number, link_rate);
+
+
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ _scsih_add_device(ioc, handle, phy_number, 0);
+
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+
+ _scsih_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+
+ /* handle expander removal */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
+ sas_expander)
+ mpt3sas_expander_remove(ioc, sas_address);
+
+ return 0;
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_device_status_change_event_debug - debug for device event
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
+ reason_str = "sata init failure";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality complete";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "device status change: (%s)\n"
+ "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
+ pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ event_data->ASC, event_data->ASCQ);
+ pr_info("\n");
+}
+#endif
+
+/**
+ * _scsih_sas_device_status_change_event - handle device status change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT3SAS_TARGET *target_priv_data;
+ struct _sas_device *sas_device;
+ u64 sas_address;
+ unsigned long flags;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data =
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ event_data);
+#endif
+
+ /* In MPI Revision K (0xC), the internal device reset complete was
+ * implemented, so avoid setting tm_busy flag for older firmware.
+ */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+ return;
+
+ if (event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(event_data->SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device || !sas_device->starget) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ target_priv_data = sas_device->starget->hostdata;
+ if (!target_priv_data) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
+ * event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_ENCL_RC_ADDED:
+ reason_str = "enclosure add";
+ break;
+ case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
+ reason_str = "enclosure remove";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx)"
+ " number slots(%d)\n", ioc->name, reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
+}
+#endif
+
+/**
+ * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ fw_event->event_data);
+#endif
+}
+
+/**
+ * _scsih_sas_broadcast_primitive_event - handle broadcast events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
+ u16 smid, handle;
+ u32 lun;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 termination_count;
+ u32 query_count;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
+ u16 ioc_status;
+ unsigned long flags;
+ int r;
+ u8 max_retries = 0;
+ u8 task_abort_retries;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ pr_info(MPT3SAS_FMT
+ "%s: enter: phy number(%d), width(%d)\n",
+ ioc->name, __func__, event_data->PhyNum,
+ event_data->PortWidth);
+
+ _scsih_block_io_all_device(ioc);
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ mpi_reply = ioc->tm_cmds.reply;
+ broadcast_aen_retry:
+
+ /* sanity checks for retrying this loop */
+ if (max_retries++ == 5) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
+ ioc->name, __func__));
+ goto out;
+ } else if (max_retries > 1)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
+ ioc->name, __func__, max_retries - 1));
+
+ termination_count = 0;
+ query_count = 0;
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ if (ioc->shost_recovery)
+ goto out;
+ scmd = _scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ sdev = scmd->device;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
+ continue;
+ /* skip hidden raid components */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue;
+ /* skip volumes */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_VOLUME)
+ continue;
+
+ handle = sas_device_priv_data->sas_target->handle;
+ lun = sas_device_priv_data->lun;
+ query_count++;
+
+ if (ioc->shost_recovery)
+ goto out;
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: FAILED when sending "
+ "QUERY_TASK: scmd(%p)\n", scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev,
+ "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
+ ioc_status, scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ /* see if IO is still owned by IOC and target */
+ if (mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+ mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ continue;
+ }
+ task_abort_retries = 0;
+ tm_retry:
+ if (task_abort_retries++ == 60) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: ABORT_TASK: giving up\n", ioc->name,
+ __func__));
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ if (ioc->shost_recovery)
+ goto out_no_lock;
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd->serial_number, TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
+ "scmd(%p)\n", scmd);
+ goto tm_retry;
+ }
+
+ if (task_abort_retries > 1)
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
+ " scmd(%p)\n",
+ task_abort_retries - 1, scmd);
+
+ termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ }
+
+ if (ioc->broadcast_aen_pending) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: loop back due to pending AEN\n",
+ ioc->name, __func__));
+ ioc->broadcast_aen_pending = 0;
+ goto broadcast_aen_retry;
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ out_no_lock:
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - exit, query_count = %d termination_count = %d\n",
+ ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ if (!ioc->shost_recovery)
+ _scsih_ublock_io_all_device(ioc);
+ mutex_unlock(&ioc->tm_cmds.mutex);
+}
+
+/**
+ * _scsih_sas_discovery_event - handle discovery events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
+ pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ }
+#endif
+
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
+ !ioc->sas_hba.num_phys) {
+ if (disable_discovery > 0 && ioc->shost_recovery) {
+ /* Wait for the reset to complete */
+ while (ioc->shost_recovery)
+ ssleep(1);
+ }
+ _scsih_sas_host_add(ioc);
+ }
+}
+
+/**
+ * _scsih_ir_fastpath - turn on fastpath for IR physdisk
+ * @ioc: per adapter object
+ * @handle: device handle for physical disk
+ * @phys_disk_num: physical disk number
+ *
+ * Return 0 for success, else failure.
+ */
+static int
+_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+ u8 issue_reset = 0;
+ int rc = 0;
+ u16 ioc_status;
+ u32 log_info;
+
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
+ mpi_request->PhysDiskNum = phys_disk_num;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
+ "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
+ handle, phys_disk_num));
+
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: failed: ioc_status(0x%04x), "
+ "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
+ log_info));
+ rc = -EFAULT;
+ } else
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: completed successfully\n",
+ ioc->name));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return rc;
+}
+
+/**
+ * _scsih_reprobe_lun - reprobing lun
+ * @sdev: scsi device struct
+ * @no_uld_attach: sdev->no_uld_attach flag setting
+ *
+ **/
+static void
+_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ int rc;
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev, "%s raid component\n",
+ sdev->no_uld_attach ? "hidding" : "exposing");
+ rc = scsi_device_reprobe(sdev);
+}
+
+/**
+ * _scsih_sas_volume_add - add new volume
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ u64 wwid;
+ u16 handle = le16_to_cpu(element->VolDevHandle);
+ int rc;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ return;
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ if (!ioc->wait_for_discovery_to_complete) {
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ _scsih_determine_boot_device(ioc, raid_device, 1);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_sas_volume_delete - delete volume
+ * @ioc: per adapter object
+ * @handle: volume device handle
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget = NULL;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device) {
+ if (raid_device->starget) {
+ starget = raid_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+/**
+ * _scsih_sas_pd_expose - expose pd component to /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device->volume_handle = 0;
+ sas_device->volume_wwid = 0;
+ clear_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags &=
+ ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* exposing raid component */
+ if (starget)
+ starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_hide - hide pd component from /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ u16 volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
+ if (volume_handle)
+ mpt3sas_config_get_volume_wwid(ioc, volume_handle,
+ &volume_wwid);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ set_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* hiding raid component */
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ if (starget)
+ starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_delete - delete pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ _scsih_device_remove_by_handle(ioc, handle);
+}
+
+/**
+ * _scsih_sas_pd_add - remove pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u64 sas_address;
+ u16 parent_handle;
+
+ set_bit(handle, ioc->pd_handles);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device) {
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ _scsih_add_device(ioc, handle, 0, 1);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ u8 element_type;
+ int i;
+ char *reason_str = NULL, *element_str = NULL;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+
+ pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
+ ioc->name, (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
+ "foreign" : "native", event_data->NumElements);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ reason_str = "add";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ reason_str = "remove";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
+ reason_str = "no change";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ reason_str = "hide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ reason_str = "unhide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ reason_str = "volume_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ reason_str = "volume_deleted";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ reason_str = "pd_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ reason_str = "pd_deleted";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ element_type = le16_to_cpu(element->ElementFlags) &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
+ switch (element_type) {
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
+ element_str = "volume";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
+ element_str = "phys disk";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
+ element_str = "hot spare";
+ break;
+ default:
+ element_str = "unknown element";
+ break;
+ }
+ pr_info("\t(%s:%s), vol handle(0x%04x), " \
+ "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
+ reason_str, le16_to_cpu(element->VolDevHandle),
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_ir_config_change_event - handle ir configuration change events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u8 foreign_config;
+ Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data);
+
+#endif
+
+ foreign_config = (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ if (ioc->shost_recovery) {
+
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
+ _scsih_ir_fastpath(ioc,
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+ return;
+ }
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ if (!foreign_config)
+ _scsih_sas_volume_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ if (!foreign_config)
+ _scsih_sas_volume_delete(ioc,
+ le16_to_cpu(element->VolDevHandle));
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ _scsih_sas_pd_hide(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ _scsih_sas_pd_expose(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ _scsih_sas_pd_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ _scsih_sas_pd_delete(ioc, element);
+ break;
+ }
+ }
+}
+
+/**
+ * _scsih_sas_ir_volume_event - IR volume event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u64 wwid;
+ unsigned long flags;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u32 state;
+ int rc;
+ Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_VOL_STATE_MISSING:
+ case MPI2_RAID_VOL_STATE_FAILED:
+ _scsih_sas_volume_delete(ioc, handle);
+ break;
+
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ break;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ break;
+
+ case MPI2_RAID_VOL_STATE_INITIALIZING:
+ default:
+ break;
+ }
+}
+
+/**
+ * _scsih_sas_ir_physical_disk_event - PD event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u16 handle, parent_handle;
+ u32 state;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
+ u64 sas_address;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->PhysDiskDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
+
+ set_bit(handle, ioc->pd_handles);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
+
+ break;
+
+ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrOperationStatus_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->RAIDOperation) {
+ case MPI2_EVENT_IR_RAIDOP_RESYNC:
+ reason_str = "resync";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
+ reason_str = "online capacity expansion";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
+ reason_str = "consistency check";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
+ reason_str = "background init";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
+ reason_str = "make data consistent";
+ break;
+ }
+
+ if (!reason_str)
+ return;
+
+ pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
+ "\thandle(0x%04x), percent complete(%d)\n",
+ ioc->name, reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
+}
+#endif
+
+/**
+ * _scsih_sas_ir_operation_status_event - handle RAID operation events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_operation_status_event_debug(ioc,
+ event_data);
+#endif
+
+ /* code added for raid transport support */
+ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device)
+ raid_device->percent_complete =
+ event_data->PercentComplete;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
+ * _scsih_mark_responding_sas_device - mark a sas_devices as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @slot: enclosure slot id
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_sas_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 slot, u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address == sas_address &&
+ sas_device->slot == slot) {
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget)
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx), "
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n", handle,
+ (unsigned long long)sas_device->sas_address,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->handle == handle)
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_sas_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u32 device_info;
+
+ pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ continue;
+ _scsih_mark_responding_sas_device(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress),
+ le16_to_cpu(sas_device_pg0.Slot), handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_raid_device - mark a raid_device as responding
+ * @ioc: per adapter object
+ * @wwid: world wide identifier for raid volume
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_raid_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ starget_printk(KERN_INFO, raid_device->starget,
+ "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ (unsigned long long)raid_device->wwid);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ if (raid_device->handle == handle) {
+ spin_unlock_irqrestore(&ioc->raid_device_lock,
+ flags);
+ return;
+ }
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ raid_device->handle);
+ raid_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_raid_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u8 phys_disk_num;
+
+ if (!ioc->ir_firmware)
+ return;
+
+ pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
+ ioc->name);
+
+ if (list_empty(&ioc->raid_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
+ _scsih_mark_responding_raid_device(ioc,
+ le64_to_cpu(volume_pg1.WWID), handle);
+ }
+
+ /* refresh the pd_handles */
+ phys_disk_num = 0xFF;
+ memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ set_bit(handle, ioc->pd_handles);
+ }
+ out:
+ pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_expander - mark a expander as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle:
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_expanders.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ sas_expander->responding = 1;
+ if (sas_expander->handle == handle)
+ goto out;
+ pr_info("\texpander(0x%016llx): handle changed" \
+ " from(0x%04x) to (0x%04x)!!!\n",
+ (unsigned long long)sas_expander->sas_address,
+ sas_expander->handle, handle);
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ goto out;
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_expanders -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u64 sas_address;
+ u16 handle;
+
+ pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_expander_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (unsigned long long)sas_address);
+ _scsih_mark_responding_expander(ioc, sas_address, handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+}
+
+/**
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *sas_device_next;
+ struct _sas_node *sas_expander, *sas_expander_next;
+ struct _raid_device *raid_device, *raid_device_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
+ ioc->name);
+
+ /* removing unresponding end devices */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
+ ioc->name);
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_list, list) {
+ if (!sas_device->responding)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ sas_device->sas_address);
+ else
+ sas_device->responding = 0;
+ }
+
+ /* removing unresponding volumes */
+ if (ioc->ir_firmware) {
+ pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
+ ioc->name);
+ list_for_each_entry_safe(raid_device, raid_device_next,
+ &ioc->raid_device_list, list) {
+ if (!raid_device->responding)
+ _scsih_sas_volume_delete(ioc,
+ raid_device->handle);
+ else
+ raid_device->responding = 0;
+ }
+ }
+
+ /* removing unresponding expanders */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
+ ioc->name);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ INIT_LIST_HEAD(&tmp_list);
+ list_for_each_entry_safe(sas_expander, sas_expander_next,
+ &ioc->sas_expander_list, list) {
+ if (!sas_expander->responding)
+ list_move_tail(&sas_expander->list, &tmp_list);
+ else
+ sas_expander->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
+ list) {
+ list_del(&sas_expander->list);
+ _scsih_expander_node_remove(ioc, sas_expander);
+ }
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
+ ioc->name);
+
+ /* unblock devices */
+ _scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander, u16 handle)
+{
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int i;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
+ le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+ expander_pg1.NegotiatedLinkRate >> 4);
+ }
+}
+
+/**
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2EventIrConfigElement_t element;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 phys_disk_num;
+ u16 ioc_status;
+ u16 handle, parent_handle;
+ u64 sas_address;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
+ u8 retry_count;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ expander_device = mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+ else {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
+ pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
+ ioc->name);
+
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 1)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (raid_device)
+ continue;
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
+ pr_info(MPT3SAS_FMT
+ "\tBEFORE adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
+ pr_info(MPT3SAS_FMT
+ "\tAFTER adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
+ ioc->name);
+
+ skip_to_sas:
+
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
+ ioc->name);
+
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
+ " ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+ continue;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 0)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+}
+/**
+ * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
+
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
+ }
+ break;
+ }
+}
+
+/**
+ * _mpt3sas_fw_work - delayed task for processing firmware events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ /* the queue is being flushed so ignore this event */
+ if (ioc->remove_host || fw_event->cancel_pending_work ||
+ ioc->pci_error_recovery) {
+ _scsih_fw_event_free(ioc, fw_event);
+ return;
+ }
+
+ switch (fw_event->event) {
+ case MPT3SAS_PROCESS_TRIGGER_DIAG:
+ mpt3sas_process_trigger_data(ioc, fw_event->event_data);
+ break;
+ case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
+ while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+ ssleep(1);
+ _scsih_remove_unresponding_sas_devices(ioc);
+ _scsih_scan_for_devices_after_reset(ioc);
+ break;
+ case MPT3SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
+ missing_delay[1]);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "port enable: complete from worker thread\n",
+ ioc->name));
+ break;
+ case MPT3SAS_TURN_ON_FAULT_LED:
+ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_sas_topology_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ _scsih_sas_discovery_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ _scsih_sas_broadcast_primitive_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ _scsih_sas_enclosure_dev_status_change_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_sas_ir_config_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_sas_ir_volume_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ _scsih_sas_ir_physical_disk_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ _scsih_sas_ir_operation_status_event(ioc, fw_event);
+ break;
+ }
+ _scsih_fw_event_free(ioc, fw_event);
+}
+
+/**
+ * _firmware_event_work
+ * @ioc: per adapter object
+ * @work: The fw_event_work object
+ * Context: user.
+ *
+ * wrappers for the work thread handling firmware events
+ *
+ * Return nothing.
+ */
+
+static void
+_firmware_event_work(struct work_struct *work)
+{
+ struct fw_event_work *fw_event = container_of(work,
+ struct fw_event_work, work);
+
+ _mpt3sas_fw_work(fw_event->ioc, fw_event);
+}
+
+/**
+ * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 event;
+ u16 sz;
+
+ /* events turned off due to host reset or driver unloading */
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
+ mpt3sas_trigger_event(ioc, event, 0);
+
+ switch (event) {
+ /* handle these */
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ mpi_reply->EventData;
+
+ if (baen_data->Primitive !=
+ MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+ return 1;
+
+ if (ioc->broadcast_aen_busy) {
+ ioc->broadcast_aen_pending++;
+ return 1;
+ } else
+ ioc->broadcast_aen_busy = 1;
+ break;
+ }
+
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_topo_delete_events(ioc,
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_check_ir_config_unhide_events(ioc,
+ (Mpi2EventDataIrConfigChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_check_volume_delete_events(ioc,
+ (Mpi2EventDataIrVolume_t *)
+ mpi_reply->EventData);
+ break;
+
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ case MPI2_EVENT_SAS_DISCOVERY:
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ break;
+
+ default: /* ignore the rest */
+ return 1;
+ }
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event->event_data) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ kfree(fw_event);
+ return 1;
+ }
+
+ memcpy(fw_event->event_data, mpi_reply->EventData, sz);
+ fw_event->ioc = ioc;
+ fw_event->VF_ID = mpi_reply->VF_ID;
+ fw_event->VP_ID = mpi_reply->VP_ID;
+ fw_event->event = event;
+ _scsih_fw_event_add(ioc, fw_event);
+ return 1;
+}
+
+/* shost template */
+static struct scsi_host_template scsih_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Fusion MPT SAS Host",
+ .proc_name = MPT3SAS_DRIVER_NAME,
+ .queuecommand = _scsih_qcmd,
+ .target_alloc = _scsih_target_alloc,
+ .slave_alloc = _scsih_slave_alloc,
+ .slave_configure = _scsih_slave_configure,
+ .target_destroy = _scsih_target_destroy,
+ .slave_destroy = _scsih_slave_destroy,
+ .scan_finished = _scsih_scan_finished,
+ .scan_start = _scsih_scan_start,
+ .change_queue_depth = _scsih_change_queue_depth,
+ .change_queue_type = _scsih_change_queue_type,
+ .eh_abort_handler = _scsih_abort,
+ .eh_device_reset_handler = _scsih_dev_reset,
+ .eh_target_reset_handler = _scsih_target_reset,
+ .eh_host_reset_handler = _scsih_host_reset,
+ .bios_param = _scsih_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPT3SAS_SG_DEPTH,
+ .max_sectors = 32767,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mpt3sas_host_attrs,
+ .sdev_attrs = mpt3sas_dev_attrs,
+};
+
+/**
+ * _scsih_expander_node_remove - removing expander device from list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port, *next;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mpt3sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (ioc->shost_recovery)
+ return;
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name,
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * _scsih_ir_shutdown - IR shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+
+ /* is IR firmware build loaded ? */
+ if (!ioc->ir_firmware)
+ return;
+
+ /* are there any volumes ? */
+ if (list_empty(&ioc->raid_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+
+ pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ pr_info(MPT3SAS_FMT
+ "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
+ * _scsih_remove - detach and remove add host
+ * @pdev: PCI device struct
+ *
+ * Routine called when unloading the driver.
+ * Return nothing.
+ */
+static void __devexit
+_scsih_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct _sas_port *mpt3sas_port, *next_port;
+ struct _raid_device *raid_device, *next;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ /* release all the volumes */
+ _scsih_ir_shutdown(ioc);
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ scsi_remove_target(&raid_device->starget->dev);
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+
+ /* free ports attached to the sas_host */
+ list_for_each_entry_safe(mpt3sas_port, next_port,
+ &ioc->sas_hba.sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ /* free phys attached to the sas_host */
+ if (ioc->sas_hba.num_phys) {
+ kfree(ioc->sas_hba.phy);
+ ioc->sas_hba.phy = NULL;
+ ioc->sas_hba.num_phys = 0;
+ }
+
+ sas_remove_host(shost);
+ mpt3sas_base_detach(ioc);
+ list_del(&ioc->list);
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+}
+
+/**
+ * _scsih_shutdown - routine call during system shutdown
+ * @pdev: PCI device struct
+ *
+ * Return nothing.
+ */
+static void
+_scsih_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ _scsih_ir_shutdown(ioc);
+ mpt3sas_base_detach(ioc);
+}
+
+
+/**
+ * _scsih_probe_boot_devices - reports 1st device
+ * @ioc: per adapter object
+ *
+ * If specified in bios page 2, this routine reports the 1st
+ * device scsi-ml or sas transport for persistent boot device
+ * purposes. Please refer to function _scsih_determine_boot_device()
+ */
+static void
+_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u8 is_raid;
+ void *device;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u64 sas_address_parent;
+ u64 sas_address;
+ unsigned long flags;
+ int rc;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ device = NULL;
+ is_raid = 0;
+ if (ioc->req_boot_device.device) {
+ device = ioc->req_boot_device.device;
+ is_raid = ioc->req_boot_device.is_raid;
+ } else if (ioc->req_alt_boot_device.device) {
+ device = ioc->req_alt_boot_device.device;
+ is_raid = ioc->req_alt_boot_device.is_raid;
+ } else if (ioc->current_boot_device.device) {
+ device = ioc->current_boot_device.device;
+ is_raid = ioc->current_boot_device.is_raid;
+ }
+
+ if (!device)
+ return;
+
+ if (is_raid) {
+ raid_device = device;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = device;
+ handle = sas_device->handle;
+ sas_address_parent = sas_device->sas_address_parent;
+ sas_address = sas_device->sas_address;
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc, sas_address,
+ sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+ }
+}
+
+/**
+ * _scsih_probe_raid - reporting raid volumes to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _raid_device *raid_device, *raid_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_device, raid_next,
+ &ioc->raid_device_list, list) {
+ if (raid_device->starget)
+ continue;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+}
+
+/**
+ * _scsih_probe_sas - reporting sas devices to sas transport
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *next;
+ unsigned long flags;
+
+ /* SAS Device List */
+ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
+ list) {
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to
+ * remove devices while scanning is turned on due to an
+ * oops in scsi_sysfs_add_sdev()->add_device()->
+ * sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_probe_devices - probing for devices
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 volume_mapping_flags;
+
+ if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
+ return; /* return when IOC doesn't support initiator mode */
+
+ _scsih_probe_boot_devices(ioc);
+
+ if (ioc->ir_firmware) {
+ volume_mapping_flags =
+ le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ _scsih_probe_raid(ioc);
+ _scsih_probe_sas(ioc);
+ } else {
+ _scsih_probe_sas(ioc);
+ _scsih_probe_raid(ioc);
+ }
+ } else
+ _scsih_probe_sas(ioc);
+}
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus. In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+ if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+ mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+ if (disable_discovery > 0)
+ return;
+
+ ioc->start_scan = 1;
+ rc = mpt3sas_port_enable(ioc);
+
+ if (rc != 0)
+ pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodicallyn until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (disable_discovery > 0) {
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ return 1;
+ }
+
+ if (time >= (300 * HZ)) {
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with timeout (timeout=300s)\n",
+ ioc->name);
+ ioc->is_driver_loading = 0;
+ return 1;
+ }
+
+ if (ioc->start_scan)
+ return 0;
+
+ if (ioc->start_scan_failed) {
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->name, ioc->start_scan_failed);
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ ioc->remove_host = 1;
+ return 1;
+ }
+
+ pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+ return 1;
+}
+
+/**
+ * _scsih_probe - attach and add scsi host
+ * @pdev: PCI device struct
+ * @id: pci device id
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct Scsi_Host *shost;
+
+ shost = scsi_host_alloc(&scsih_driver_template,
+ sizeof(struct MPT3SAS_ADAPTER));
+ if (!shost)
+ return -ENODEV;
+
+ /* init local params */
+ ioc = shost_priv(shost);
+ memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
+ INIT_LIST_HEAD(&ioc->list);
+ list_add_tail(&ioc->list, &mpt3sas_ioc_list);
+ ioc->shost = shost;
+ ioc->id = mpt_ids++;
+ sprintf(ioc->name, "%s%d", MPT3SAS_DRIVER_NAME, ioc->id);
+ ioc->pdev = pdev;
+ ioc->scsi_io_cb_idx = scsi_io_cb_idx;
+ ioc->tm_cb_idx = tm_cb_idx;
+ ioc->ctl_cb_idx = ctl_cb_idx;
+ ioc->base_cb_idx = base_cb_idx;
+ ioc->port_enable_cb_idx = port_enable_cb_idx;
+ ioc->transport_cb_idx = transport_cb_idx;
+ ioc->scsih_cb_idx = scsih_cb_idx;
+ ioc->config_cb_idx = config_cb_idx;
+ ioc->tm_tr_cb_idx = tm_tr_cb_idx;
+ ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
+ ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
+ ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* misc semaphores and spin locks */
+ mutex_init(&ioc->reset_in_progress_mutex);
+ spin_lock_init(&ioc->ioc_reset_in_progress_lock);
+ spin_lock_init(&ioc->scsi_lookup_lock);
+ spin_lock_init(&ioc->sas_device_lock);
+ spin_lock_init(&ioc->sas_node_lock);
+ spin_lock_init(&ioc->fw_event_lock);
+ spin_lock_init(&ioc->raid_device_lock);
+ spin_lock_init(&ioc->diag_trigger_lock);
+
+ INIT_LIST_HEAD(&ioc->sas_device_list);
+ INIT_LIST_HEAD(&ioc->sas_device_init_list);
+ INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->fw_event_list);
+ INIT_LIST_HEAD(&ioc->raid_device_list);
+ INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+
+ /* init shost parameters */
+ shost->max_cmd_len = 32;
+ shost->max_lun = max_lun;
+ shost->transportt = mpt3sas_transport_template;
+ shost->unique_id = ioc->id;
+
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "value of 64.\n", ioc->name, max_sectors);
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "default value of 32767.\n", ioc->name,
+ max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ pr_info(MPT3SAS_FMT
+ "The max_sectors value is set to %d\n",
+ ioc->name, shost->max_sectors);
+ }
+ }
+
+ if ((scsi_add_host(shost, &pdev->dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ list_del(&ioc->list);
+ goto out_add_shost_fail;
+ }
+
+ /* register EEDP capabilities with SCSI layer */
+ if (prot_mask > 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else
+ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION);
+
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ /* event thread */
+ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
+ "fw_event%d", ioc->id);
+ ioc->firmware_event_thread = create_singlethread_workqueue(
+ ioc->firmware_event_name);
+ if (!ioc->firmware_event_thread) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_thread_fail;
+ }
+
+ ioc->is_driver_loading = 1;
+ if ((mpt3sas_base_attach(ioc))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_attach_fail;
+ }
+ scsi_scan_host(shost);
+ return 0;
+
+ out_attach_fail:
+ destroy_workqueue(ioc->firmware_event_thread);
+ out_thread_fail:
+ list_del(&ioc->list);
+ scsi_remove_host(shost);
+ out_add_shost_fail:
+ scsi_host_put(shost);
+ return -ENODEV;
+}
+
+#ifdef CONFIG_PM
+/**
+ * _scsih_suspend - power management suspend main entry point
+ * @pdev: PCI device struct
+ * @state: PM state change to (usually PCI_D3)
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state;
+
+ mpt3sas_base_stop_watchdog(ioc);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_save_state(pdev);
+ mpt3sas_base_free_resources(ioc);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+/**
+ * _scsih_resume - power management resume main entry point
+ * @pdev: PCI device struct
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ ioc->pdev = pdev;
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ return r;
+
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ scsi_unblock_requests(shost);
+ mpt3sas_base_start_watchdog(ioc);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ ioc->pci_error_recovery = 1;
+ scsi_block_requests(ioc->shost);
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ ioc->pci_error_recovery = 1;
+ mpt3sas_base_stop_watchdog(ioc);
+ _scsih_flush_running_cmds(ioc);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pci_error_recovery = 0;
+ ioc->pdev = pdev;
+ pci_restore_state(pdev);
+ rc = mpt3sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt3sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* raid transport support */
+static struct raid_function_template mpt3sas_raid_functions = {
+ .cookie = &scsih_driver_template,
+ .is_raid = _scsih_is_raid,
+ .get_resync = _scsih_get_resync,
+ .get_state = _scsih_get_state,
+};
+
+static struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
+
+static struct pci_driver scsih_driver = {
+ .name = MPT3SAS_DRIVER_NAME,
+ .id_table = scsih_pci_table,
+ .probe = _scsih_probe,
+ .remove = __devexit_p(_scsih_remove),
+ .shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
+#ifdef CONFIG_PM
+ .suspend = _scsih_suspend,
+ .resume = _scsih_resume,
+#endif
+};
+
+
+/**
+ * _scsih_init - main entry point for this driver.
+ *
+ * Returns 0 success, anything else error.
+ */
+static int __init
+_scsih_init(void)
+{
+ int error;
+
+ mpt_ids = 0;
+
+ pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_transport_template =
+ sas_attach_transport(&mpt3sas_transport_functions);
+ if (!mpt3sas_transport_template)
+ return -ENODEV;
+
+/* raid transport support */
+ mpt3sas_raid_template = raid_class_attach(&mpt3sas_raid_functions);
+ if (!mpt3sas_raid_template) {
+ sas_release_transport(mpt3sas_transport_template);
+ return -ENODEV;
+ }
+
+ mpt3sas_base_initialize_callback_handler();
+
+ /* queuecommand callback hander */
+ scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
+
+ /* task managment callback handler */
+ tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
+
+ /* base internal commands callback handler */
+ base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
+ port_enable_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_port_enable_done);
+
+ /* transport internal commands callback handler */
+ transport_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_transport_done);
+
+ /* scsih internal commands callback handler */
+ scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
+
+ /* configuration page API internal commands callback handler */
+ config_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_config_done);
+
+ /* ctl module callback handler */
+ ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
+
+ tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_tr_complete);
+
+ tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_volume_tr_complete);
+
+ tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_sas_control_complete);
+
+ mpt3sas_ctl_init();
+
+ error = pci_register_driver(&scsih_driver);
+ if (error) {
+ /* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+ }
+
+ return error;
+}
+
+/**
+ * _scsih_exit - exit point for this driver (when it is a module).
+ *
+ * Returns 0 success, anything else error.
+ */
+static void __exit
+_scsih_exit(void)
+{
+ pr_info("mpt3sas version %s unloading\n",
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_ctl_exit();
+
+ pci_unregister_driver(&scsih_driver);
+
+
+ mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_cb_idx);
+ mpt3sas_base_release_callback_handler(base_cb_idx);
+ mpt3sas_base_release_callback_handler(port_enable_cb_idx);
+ mpt3sas_base_release_callback_handler(transport_cb_idx);
+ mpt3sas_base_release_callback_handler(scsih_cb_idx);
+ mpt3sas_base_release_callback_handler(config_cb_idx);
+ mpt3sas_base_release_callback_handler(ctl_cb_idx);
+
+ mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
+
+/* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+}
+
+module_init(_scsih_init);
+module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
new file mode 100644
index 00000000000..87ca2b7287c
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -0,0 +1,2128 @@
+/*
+ * SAS Transport Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _transport_sas_node_find_by_sas_address - sas node search
+ * @ioc: per adapter object
+ * @sas_address: sas address of expander or sas host
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Search for either hba phys or expander device based on handle, then returns
+ * the sas_node object.
+ */
+static struct _sas_node *
+_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ if (ioc->sas_hba.sas_address == sas_address)
+ return &ioc->sas_hba;
+ else
+ return mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+}
+
+/**
+ * _transport_convert_phy_link_rate -
+ * @link_rate: link rate returned from mpt firmware
+ *
+ * Convert link_rate from mpi fusion into sas_transport form.
+ */
+static enum sas_linkrate
+_transport_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI2_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI25_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+
+ default:
+ case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * _transport_set_identify - set identify for phys and end devices
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @identify: sas identify info
+ *
+ * Populates sas identify info.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ struct sas_identify *identify)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 device_info;
+ u32 ioc_status;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sas_device_pg0.PhyNum;
+
+ /* device_type */
+ switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI2_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /* target_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_done - internal transport layer callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated transport cmds.
+ * The callback index passed is `ioc->transport_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->transport_cmds.smid != smid)
+ return 1;
+ ioc->transport_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->transport_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->transport_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->transport_cmds.done);
+ return 1;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * @ioc: per adapter object
+ * @sas_address: expander sas address
+ * @edev: the sas_expander_device object
+ *
+ * Fills in the sas_expander_device object when SMP port is created.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct sas_expander_device *edev)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ &data_out_dma);
+
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + sizeof(struct rep_manu_request);
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = cpu_to_le64(sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(data_out_sz);
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - send to sas_addr(0x%016llx)\n",
+ ioc->name, (unsigned long long)sas_address));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+ u8 *tmp;
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
+ goto out;
+
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+
+/**
+ * _transport_delete_port - helper function to removing a port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+ enum sas_device_type device_type =
+ mpt3sas_port->remote_identify.device_type;
+
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ if (device_type == SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc, sas_address);
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc, sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_delete_phy - helper function to removing single phy from port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mpt3sas_phy->phy_id);
+
+ list_del(&mpt3sas_phy->port_siblings);
+ mpt3sas_port->num_phys--;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * _transport_add_phy - helper function to adding single phy to port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
+ struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mpt3sas_phy->phy_id);
+
+ list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @sas_address: sas address of device/expander were phy needs to be added to
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
+ u64 sas_address)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch == mpt3sas_phy)
+ return;
+ }
+ _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy);
+ return;
+ }
+
+}
+
+/**
+ * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy)
+{
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch != mpt3sas_phy)
+ continue;
+
+ if (mpt3sas_port->num_phys == 1)
+ _transport_delete_port(ioc, mpt3sas_port);
+ else
+ _transport_delete_phy(ioc, mpt3sas_port,
+ mpt3sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address != sas_address)
+ continue;
+ if (sas_node->phy[i].phy_belongs_to_port == 1)
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ &sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpt3sas_transport_port_add - insert port to the list
+ * @ioc: per adapter object
+ * @handle: handle of attached device
+ * @sas_address: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new port object to the sas_node->sas_port_list.
+ *
+ * Returns mpt3sas_port.
+ */
+struct _sas_port *
+mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 sas_address)
+{
+ struct _sas_phy *mpt3sas_phy, *next;
+ struct _sas_port *mpt3sas_port;
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct sas_rphy *rphy;
+ int i;
+ struct sas_port *port;
+
+ mpt3sas_port = kzalloc(sizeof(struct _sas_port),
+ GFP_KERNEL);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mpt3sas_port->port_list);
+ INIT_LIST_HEAD(&mpt3sas_port->phy_list);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!sas_node) {
+ pr_err(MPT3SAS_FMT
+ "%s: Could not find parent sas_address(0x%016llx)!\n",
+ ioc->name, __func__, (unsigned long long)sas_address);
+ goto out_fail;
+ }
+
+ if ((_transport_set_identify(ioc, handle,
+ &mpt3sas_port->remote_identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ _transport_sanity_check(ioc, sas_node,
+ mpt3sas_port->remote_identify.sas_address);
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address !=
+ mpt3sas_port->remote_identify.sas_address)
+ continue;
+ list_add_tail(&sas_node->phy[i].port_siblings,
+ &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ }
+
+ if (!mpt3sas_port->num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &port->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ sas_port_add_phy(port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+ }
+
+ mpt3sas_port->port = port;
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ rphy = sas_end_device_alloc(port);
+ else
+ rphy = sas_expander_alloc(port,
+ mpt3sas_port->remote_identify.device_type);
+
+ rphy->identify = mpt3sas_port->remote_identify;
+ if ((sas_rphy_add(rphy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &rphy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->rphy = rphy;
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* fill in report manufacture */
+ if (mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+ mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
+ _transport_expander_report_manufacture(ioc,
+ mpt3sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy));
+ return mpt3sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
+ port_siblings)
+ list_del(&mpt3sas_phy->port_siblings);
+ kfree(mpt3sas_port);
+ return NULL;
+}
+
+/**
+ * mpt3sas_transport_port_remove - remove port from the list
+ * @ioc: per adapter object
+ * @sas_address: sas address of attached device
+ * @sas_address_parent: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_port_list.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent)
+{
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_node *sas_node;
+ u8 found = 0;
+ struct _sas_phy *mpt3sas_phy, *next_phy;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address_parent);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ found = 1;
+ list_del(&mpt3sas_port->port_list);
+ goto out;
+ }
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ memset(&sas_node->phy[i].remote_identify, 0 ,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ list_for_each_entry_safe(mpt3sas_phy, next_phy,
+ &mpt3sas_port->phy_list, port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ list_del(&mpt3sas_phy->port_siblings);
+ }
+ sas_port_delete(mpt3sas_port->port);
+ kfree(mpt3sas_port);
+}
+
+/**
+ * mpt3sas_transport_add_host_phy - report sas_host phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @phy_pg0: sas phy page 0
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+
+/**
+ * mpt3sas_transport_add_expander_phy - report expander phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @expander_pg1: expander page 1
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_update_links - refreshing phy link changes
+ * @ioc: per adapter object
+ * @sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct _sas_phy *mpt3sas_phy;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ mpt3sas_phy = &sas_node->phy[phy_number];
+ mpt3sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ _transport_set_identify(ioc, handle,
+ &mpt3sas_phy->remote_identify);
+ _transport_add_phy_to_an_existing_port(ioc, sas_node,
+ mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+ } else
+ memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
+ sas_identify));
+
+ if (mpt3sas_phy->phy)
+ mpt3sas_phy->phy->negotiated_linkrate =
+ _transport_convert_phy_link_rate(link_rate);
+
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "refresh: parent sas_addr(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ (unsigned long long)sas_address,
+ link_rate, phy_number, handle, (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+}
+
+static inline void *
+phy_to_ioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ return shost_priv(shost);
+}
+
+static inline void *
+rphy_to_ioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+/**
+ * _transport_get_expander_phy_error_log - return expander counters
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_error_log_request) +
+ sizeof(struct phy_error_log_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma,
+ sizeof(struct phy_error_log_request),
+ data_out_dma + sizeof(struct phy_error_log_request),
+ sizeof(struct phy_error_log_reply));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ phy_error_log_reply = data_out +
+ sizeof(struct phy_error_log_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - function_result(%d)\n",
+ ioc->name, phy_error_log_reply->function_result));
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_get_linkerrors - return phy counters for both hba and expanders
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasPhyPage1_t phy_pg1;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_get_expander_phy_error_log(ioc, phy);
+
+ /* get hba phy error logs */
+ if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
+ phy->number))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.LossDwordSynchCount);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.PhyResetProblemCount);
+ return 0;
+}
+
+/**
+ * _transport_get_enclosure_identifier -
+ * @phy: The sas phy object
+ *
+ * Obtain the enclosure logical id for an expander.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device) {
+ *identifier = sas_device->enclosure_logical_id;
+ rc = 0;
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _transport_get_bay_identifier -
+ * @phy: The sas phy object
+ *
+ * Returns the slot id for a device that resides inside an enclosure.
+ */
+static int
+_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device)
+ rc = sas_device->slot;
+ else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * _transport_expander_phy_control - expander phy control
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_control_request) +
+ sizeof(struct phy_control_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_request), data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_reply), data_out_dma +
+ sizeof(struct phy_control_request));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number, phy_operation));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+
+ phy_control_reply = data_out +
+ sizeof(struct phy_control_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - function_result(%d)\n",
+ ioc->name, phy_control_reply->function_result));
+
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_phy_reset -
+ * @phy: The sas phy object
+ * @hard_reset:
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIoUnitControlReply_t mpi_reply;
+ Mpi2SasIoUnitControlRequest_t mpi_request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request.Operation = hard_reset ?
+ MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
+ mpi_request.PhyNum = phy->number;
+
+ if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ return 0;
+}
+
+/**
+ * _transport_phy_enable - enable/disable phys
+ * @phy: The sas phy object
+ * @enable: enable phy when true
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int rc = 0;
+ unsigned long flags;
+ int i, discovery_active;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+
+ /* read sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when when discovery is active */
+ for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
+ pr_err(MPT3SAS_FMT "discovery is active on " \
+ "port = %d, phy = %d: unable to enable/disable "
+ "phys, try again later!\n", ioc->name,
+ sas_iounit_pg0->PhyData[i].Port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy Port/PortFlags/PhyFlags from page 0 */
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ sas_iounit_pg1->PhyData[i].Port =
+ sas_iounit_pg0->PhyData[i].Port;
+ sas_iounit_pg1->PhyData[i].PortFlags =
+ (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
+ sas_iounit_pg1->PhyData[i].PhyFlags =
+ (sas_iounit_pg0->PhyData[i].PhyFlags &
+ (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
+ MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
+ }
+
+ if (enable)
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ _transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+ return rc;
+}
+
+/**
+ * _transport_phy_speed - set phy min/max link rates
+ * @phy: The sas phy object
+ * @rates: rates defined in sas_phy_linkrates
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int i;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return _transport_expander_phy_control(ioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ if (phy->number != i) {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (ioc->sas_hba.phy[i].phy->minimum_linkrate +
+ (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
+ } else {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (rates->minimum_linkrate +
+ (rates->maximum_linkrate << 4));
+ }
+ }
+
+ if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ _transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ phy->number)) {
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
+ * _transport_smp_handler - transport portal for smp passthru
+ * @shost: shost object
+ * @rphy: sas transport rphy object
+ * @req:
+ *
+ * This used primarily for smp_utils.
+ * Example:
+ * smp_rep_general /sys/class/bsg/expander-5:0
+ */
+static int
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ int rc, i;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ dma_addr_t dma_addr_in = 0;
+ dma_addr_t dma_addr_out = 0;
+ dma_addr_t pci_dma_in = 0;
+ dma_addr_t pci_dma_out = 0;
+ void *pci_addr_in = NULL;
+ void *pci_addr_out = NULL;
+ u16 wait_state_count;
+ struct request *rsp = req->next_rq;
+ struct bio_vec *bvec = NULL;
+
+ if (!rsp) {
+ pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
+ if (rc)
+ return rc;
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
+ __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ /* Check if the request is split across multiple segments */
+ if (req->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+
+ /* Allocate memory and copy the request */
+ pci_addr_out = pci_alloc_consistent(ioc->pdev,
+ blk_rq_bytes(req), &pci_dma_out);
+ if (!pci_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ bio_for_each_segment(bvec, req->bio, i) {
+ memcpy(pci_addr_out + offset,
+ page_address(bvec->bv_page) + bvec->bv_offset,
+ bvec->bv_len);
+ offset += bvec->bv_len;
+ }
+ } else {
+ dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto free_pci;
+ }
+ }
+
+ /* Check if the response needs to be populated across
+ * multiple segments */
+ if (rsp->bio->bi_vcnt > 1) {
+ pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
+ &pci_dma_in);
+ if (!pci_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ } else {
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto unmap;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto unmap;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(ioc->sas_hba.sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+ psge = &mpi_request->SGL;
+
+ if (req->bio->bi_vcnt > 1)
+ ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
+ pci_dma_in, (blk_rq_bytes(rsp) + 4));
+ else
+ ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4),
+ dma_addr_in, (blk_rq_bytes(rsp) + 4));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - sending smp request\n", ioc->name, __func__));
+
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s : timeout\n",
+ __func__, ioc->name);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - complete\n", ioc->name, __func__));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - reply data transfer size(%d)\n",
+ ioc->name, __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
+ req->sense_len = sizeof(*mpi_reply);
+ req->resid_len = 0;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+
+ /* check if the resp needs to be copied from the allocated
+ * pci mem */
+ if (rsp->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+ u32 bytes_to_copy =
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+ bio_for_each_segment(bvec, rsp->bio, i) {
+ if (bytes_to_copy <= bvec->bv_len) {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bytes_to_copy);
+ break;
+ } else {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bvec->bv_len);
+ bytes_to_copy -= bvec->bv_len;
+ }
+ offset += bvec->bv_len;
+ }
+ }
+ } else {
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - no reply\n", ioc->name, __func__));
+ rc = -ENXIO;
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = -ETIMEDOUT;
+ }
+
+ unmap:
+ if (dma_addr_out)
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_addr_in)
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
+ PCI_DMA_BIDIRECTIONAL);
+
+ free_pci:
+ if (pci_addr_out)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
+ pci_dma_out);
+
+ if (pci_addr_in)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
+ pci_dma_in);
+
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+struct sas_function_template mpt3sas_transport_functions = {
+ .get_linkerrors = _transport_get_linkerrors,
+ .get_enclosure_identifier = _transport_get_enclosure_identifier,
+ .get_bay_identifier = _transport_get_bay_identifier,
+ .phy_reset = _transport_phy_reset,
+ .phy_enable = _transport_phy_enable,
+ .set_phy_speed = _transport_phy_speed,
+ .smp_handler = _transport_smp_handler,
+};
+
+struct scsi_transport_template *mpt3sas_transport_template;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
new file mode 100644
index 00000000000..da6c5f25749
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -0,0 +1,434 @@
+/*
+ * This module provides common API to set Diagnostic trigger for MPT
+ * (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _mpt3sas_raise_sigio - notifiy app
+ * @ioc: per adapter object
+ * @event_data:
+ */
+static void
+_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 sz, event_data_sz;
+ unsigned long flags;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
+ mpi_reply = kzalloc(sz, GFP_KERNEL);
+ if (!mpi_reply)
+ goto out;
+ mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED);
+ event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4;
+ mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
+ memcpy(&mpi_reply->EventData, event_data,
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: add to driver event log\n",
+ ioc->name, __func__));
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ kfree(mpi_reply);
+ out:
+
+ /* clearing the diag_trigger_active flag */
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: clearing diag_trigger_active flag\n",
+ ioc->name, __func__));
+ ioc->diag_trigger_active = 0;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_process_trigger_data - process the event data for the trigger
+ * @ioc: per adapter object
+ * @event_data:
+ */
+void
+mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ u8 issue_reset = 0;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ /* release the diag buffer trace */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: release trace diag buffer\n", ioc->name, __func__));
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ _mpt3sas_raise_sigio(ioc, event_data);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_master - Master trigger handler
+ * @ioc: per adapter object
+ * @trigger_bitmask:
+ *
+ */
+void
+mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ unsigned long flags;
+ u8 found_match = 0;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ goto by_pass_checks;
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ by_pass_checks:
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - trigger_bitmask = 0x%08x\n",
+ ioc->name, __func__, trigger_bitmask));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MASTER;
+ event_data.u.master.MasterData = trigger_bitmask;
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ _mpt3sas_raise_sigio(ioc, &event_data);
+ else
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_event - Event trigger handler
+ * @ioc: per adapter object
+ * @event:
+ * @log_entry_qualifier:
+ *
+ */
+void
+mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_EVENT_TRIGGER_T *event_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+ ioc->name, __func__, event, log_entry_qualifier));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ event_trigger = ioc->diag_trigger_event.EventTriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries
+ && !found_match; i++, event_trigger++) {
+ if (event_trigger->EventValue != event)
+ continue;
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
+ if (event_trigger->LogEntryQualifier ==
+ log_entry_qualifier)
+ found_match = 1;
+ continue;
+ }
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
+ event_data.u.event.EventValue = event;
+ event_data.u.event.LogEntryQualifier = log_entry_qualifier;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_scsi - SCSI trigger handler
+ * @ioc: per adapter object
+ * @sense_key:
+ * @asc:
+ * @ascq:
+ *
+ */
+void
+mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
+ u8 ascq)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_SCSI_TRIGGER_T *scsi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+ ioc->name, __func__, sense_key, asc, ascq));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries
+ && !found_match; i++, scsi_trigger++) {
+ if (scsi_trigger->SenseKey != sense_key)
+ continue;
+ if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc))
+ continue;
+ if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
+ event_data.u.scsi.SenseKey = sense_key;
+ event_data.u.scsi.ASC = asc;
+ event_data.u.scsi.ASCQ = ascq;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_mpi - MPI trigger handler
+ * @ioc: per adapter object
+ * @ioc_status:
+ * @loginfo:
+ *
+ */
+void
+mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_MPI_TRIGGER_T *mpi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+ ioc->name, __func__, ioc_status, loginfo));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries
+ && !found_match; i++, mpi_trigger++) {
+ if (mpi_trigger->IOCStatus != ioc_status)
+ continue;
+ if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF ||
+ mpi_trigger->IocLogInfo == loginfo))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
+ event_data.u.mpi.IOCStatus = ioc_status;
+ event_data.u.mpi.IocLogInfo = loginfo;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
new file mode 100644
index 00000000000..a10c3090739
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -0,0 +1,193 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * to set Diagnostic triggers for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+ /* Diagnostic Trigger Configuration Data Structures */
+
+#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+
+/* limitation on number of entries */
+#define NUM_VALID_ENTRIES (20)
+
+/* trigger types */
+#define MPT3SAS_TRIGGER_MASTER (1)
+#define MPT3SAS_TRIGGER_EVENT (2)
+#define MPT3SAS_TRIGGER_SCSI (3)
+#define MPT3SAS_TRIGGER_MPI (4)
+
+/* trigger names */
+#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master"
+#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event"
+#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi"
+#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi"
+
+/* master trigger bitmask */
+#define MASTER_TRIGGER_FW_FAULT (0x00000001)
+#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002)
+#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004)
+#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008)
+
+/* fake firmware event for tigger */
+#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E)
+
+/**
+ * MasterTrigger is a single U32 passed to/from sysfs.
+ *
+ * Bit Flags (enables) include:
+ * 1. FW Faults
+ * 2. Adapter Reset issued by driver
+ * 3. TMs
+ * 4. Device Remove Event sent by FW
+ */
+
+struct SL_WH_MASTER_TRIGGER_T {
+ uint32_t MasterData;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element
+ * @EventValue: Event Code to trigger on
+ * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only)
+ *
+ * Defines an event that should induce a DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_EVENT_TRIGGER_T {
+ uint16_t EventValue;
+ uint16_t LogEntryQualifier;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of Event Triggers to be monitored for.
+ * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this
+ * structure.
+ * @EventTriggerEntry: List of Event trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set Event Triggers
+ * in the Linux Driver.
+ */
+
+struct SL_WH_EVENT_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element
+ * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for
+ * wildcard.
+ * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard
+ * @SenseKey: SCSI Sense Key
+ *
+ * Defines a sense key (single or many variants) that should induce a
+ * DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_SCSI_TRIGGER_T {
+ U8 ASCQ;
+ U8 ASC;
+ U8 SenseKey;
+ U8 Reserved;
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of SCSI sense codes that should trigger a DIAG_SERVICE event when
+ * observed.
+ * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this
+ * structure.
+ * @SCSITriggerEntry: List of SCSI Sense Code trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set SCSI Sense Code
+ * Triggers in the Linux Driver.
+ */
+struct SL_WH_SCSI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element
+ * @IOCStatus: MPI IOCStatus
+ * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard
+ *
+ * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER
+ * driver event if observed.
+ */
+struct SL_WH_MPI_TRIGGER_T {
+ uint16_t IOCStatus;
+ uint16_t Reserved;
+ uint32_t IocLogInfo;
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE
+ * event when observed.
+ * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this
+ * structure.
+ * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set MPI Error Triggers
+ * in the Linux Driver.
+ */
+struct SL_WH_MPI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger
+ * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX)
+ * @u: trigger condition that caused trigger to be sent
+ */
+struct SL_WH_TRIGGERS_EVENT_DATA_T {
+ uint32_t trigger_type;
+ union {
+ struct SL_WH_MASTER_TRIGGER_T master;
+ struct SL_WH_EVENT_TRIGGER_T event;
+ struct SL_WH_SCSI_TRIGGER_T scsi;
+ struct SL_WH_MPI_TRIGGER_T mpi;
+ } u;
+};
+#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8f7eb4f2114..487aa6f9741 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
#define SPI_ADDR_VLD_94XX (1U << 1)
#define SPI_CTRL_SpiStart_94XX (1U << 0)
-#define mv_ffc(x) ffz(x)
-
static inline int
mv_ffc64(u64 v)
{
- int i;
- i = mv_ffc((u32)v);
- if (i >= 0)
- return i;
- i = mv_ffc((u32)(v>>32));
-
- if (i != 0)
- return 32 + i;
-
- return -1;
+ u64 x = ~v;
+ return x ? __ffs64(x) : -1;
}
#define r_reg_set_enable(i) \
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index c04a4f5b597..da249553858 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache;
#define DEV_IS_EXPANDER(type) \
((type == EDGE_DEV) || (type == FANOUT_DEV))
-#define bit(n) ((u32)1 << n)
+#define bit(n) ((u64)1 << n)
#define for_each_phy(__lseq_mask, __mc, __lseq) \
for ((__mc) = (__lseq_mask), (__lseq) = 0; \
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index d4ed9eb5265..43754176a7b 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -97,9 +97,37 @@ struct osd_dev_handle {
static DEFINE_IDA(osd_minor_ida);
+/*
+ * scsi sysfs attribute operations
+ */
+static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+ return sprintf(buf, "%s\n", ould->odi.osdname);
+}
+
+static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+
+ memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
+ return ould->odi.systemid_len;
+}
+
+static struct device_attribute osd_uld_attrs[] = {
+ __ATTR(osdname, S_IRUGO, osdname_show, NULL),
+ __ATTR(systemid, S_IRUGO, systemid_show, NULL),
+ __ATTR_NULL,
+};
+
static struct class osd_uld_class = {
.owner = THIS_MODULE,
.name = "scsi_osd",
+ .dev_attrs = osd_uld_attrs,
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c28215f8be..83d798428c1 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1615,8 +1615,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
*/
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
if (IS_FWI2_CAPABLE(fcport->vha->hw))
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2f9bddd3c61..9f34dedcdad 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -219,7 +219,8 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
break;
}
exit_fcp_prio_cfg:
- bsg_job->job_done(bsg_job);
+ if (!ret)
+ bsg_job->job_done(bsg_job);
return ret;
}
@@ -741,9 +742,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
}
ql_dbg(ql_dbg_user, vha, 0x70c0,
@@ -761,9 +761,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
new_config, elreq.options);
if (rval) {
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
}
type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -795,9 +794,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"MPI reset failed.\n");
}
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EIO;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
}
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -812,34 +810,27 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x702c,
"Vendor request %s failed.\n", type);
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
- sizeof(struct fc_bsg_reply);
-
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->reply->reply_payload_rcv_len = 0;
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
-
- bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
- sizeof(response) + sizeof(uint8_t);
- bsg_job->reply->reply_payload_rcv_len =
- bsg_job->reply_payload.payload_len;
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- bsg_job->reply->result = DID_OK;
+ bsg_job->reply->result = (DID_OK << 16);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
}
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(response) + sizeof(uint8_t);
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+
+done_free_dma_rsp:
dma_free_coherent(&ha->pdev->dev, rsp_data_len,
rsp_data, rsp_data_dma);
done_free_dma_req:
@@ -853,6 +844,8 @@ done_unmap_req_sg:
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -877,16 +870,15 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x7030,
"Vendor request 84xx reset failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
}
- bsg_job->job_done(bsg_job);
return rval;
}
@@ -976,8 +968,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7037,
"Vendor request 84xx updatefw failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7038,
"Vendor request 84xx updatefw completed.\n");
@@ -986,7 +977,6 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
bsg_job->reply->result = DID_OK;
}
- bsg_job->job_done(bsg_job);
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
done_free_fw_buf:
@@ -996,6 +986,8 @@ done_unmap_sg:
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -1163,8 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7043,
"Vendor request 84xx mgmt failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7044,
@@ -1184,8 +1175,6 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
}
}
- bsg_job->job_done(bsg_job);
-
done_unmap_sg:
if (mgmt_b)
dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
@@ -1200,6 +1189,8 @@ done_unmap_sg:
exit_mgmt:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -1276,9 +1267,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
fcport->port_name[3], fcport->port_name[4],
fcport->port_name[5], fcport->port_name[6],
fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
-
+ rval = (DID_ERROR << 16);
} else {
if (!port_param->mode) {
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
@@ -1292,9 +1281,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
}
- bsg_job->job_done(bsg_job);
return rval;
}
@@ -1887,8 +1876,6 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
return qla24xx_process_bidir_cmd(bsg_job);
default:
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->job_done(bsg_job);
return -ENOSYS;
}
}
@@ -1919,8 +1906,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_job->request->msgcode);
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->job_done(bsg_job);
return -EBUSY;
}
@@ -1943,7 +1928,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_RPT_CT:
default:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
- bsg_job->reply->result = ret;
break;
}
return ret;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 44efe3cc79e..53f9e492f9d 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0124 | 0x4b,0xba,0xfa |
+ * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x114f | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
@@ -526,8 +526,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ha->max_req_queues : ha->max_rsp_queues;
mq->count = htonl(que_cnt);
for (cnt = 0; cnt < que_cnt; cnt++) {
- reg = (struct device_reg_25xxmq *) ((void *)
- ha->mqiobase + cnt * QLA_QUE_PAGE);
+ reg = (struct device_reg_25xxmq __iomem *)
+ (ha->mqiobase + cnt * QLA_QUE_PAGE);
que_idx = cnt * 4;
mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
@@ -2268,7 +2268,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (!cnt) {
nxt = fw->code_ram;
- nxt += sizeof(fw->code_ram),
+ nxt += sizeof(fw->code_ram);
nxt += (ha->fw_memory_size - 0x100000 + 1);
goto copy_queue;
} else
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9725bf5527..6e7727f46d4 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2486,9 +2486,9 @@ struct bidi_statistics {
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
((ha->mqenable || IS_QLA83XX(ha)) ? \
- ((void *)(ha->mqiobase) +\
+ ((device_reg_t __iomem *)(ha->mqiobase) +\
(QLA_QUE_PAGE * id)) :\
- ((void *)(ha->iobase)))
+ ((device_reg_t __iomem *)(ha->iobase)))
#define QLA_REQ_QUE_ID(tag) \
((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
#define QLA_DEFAULT_QUE_QOS 5
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 59524aa0ab3..be6d61a89ed 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1092,6 +1092,27 @@ struct device_reg_24xx {
uint32_t unused_6[2]; /* Gap. */
uint32_t iobase_sdata;
};
+/* RISC-RISC semaphore register PCI offet */
+#define RISC_REGISTER_BASE_OFFSET 0x7010
+#define RISC_REGISTER_WINDOW_OFFET 0x6
+
+/* RISC-RISC semaphore/flag register (risc address 0x7016) */
+
+#define RISC_SEMAPHORE 0x1UL
+#define RISC_SEMAPHORE_WE (RISC_SEMAPHORE << 16)
+#define RISC_SEMAPHORE_CLR (RISC_SEMAPHORE_WE | 0x0UL)
+#define RISC_SEMAPHORE_SET (RISC_SEMAPHORE_WE | RISC_SEMAPHORE)
+
+#define RISC_SEMAPHORE_FORCE 0x8000UL
+#define RISC_SEMAPHORE_FORCE_WE (RISC_SEMAPHORE_FORCE << 16)
+#define RISC_SEMAPHORE_FORCE_CLR (RISC_SEMAPHORE_FORCE_WE | 0x0UL)
+#define RISC_SEMAPHORE_FORCE_SET \
+ (RISC_SEMAPHORE_FORCE_WE | RISC_SEMAPHORE_FORCE)
+
+/* RISC semaphore timeouts (ms) */
+#define TIMEOUT_SEMAPHORE 2500
+#define TIMEOUT_SEMAPHORE_FORCE 2000
+#define TIMEOUT_TOTAL_ELAPSED 4500
/* Trace Control *************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6acb39785a4..2411d1a12b2 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -416,7 +416,7 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
-extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -598,7 +598,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *);
/* ISP 8021 hardware related */
extern void qla82xx_set_drv_active(scsi_qla_host_t *);
-extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index f4e4bd7c3f4..01efc0e9cc3 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -218,6 +218,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
WWN_SIZE);
+ fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
+ FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+
if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
@@ -1930,6 +1933,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
case BIT_11:
list[i].fp_speed = PORT_SPEED_8GB;
break;
+ case BIT_10:
+ list[i].fp_speed = PORT_SPEED_16GB;
+ break;
}
ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 48fca47384b..563eee3fa92 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -429,7 +429,7 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
-int
+static int
qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -997,7 +997,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-int
+static int
qla81xx_reset_mpi(scsi_qla_host_t *vha)
{
uint16_t mb[4] = {0x1010, 0, 1, 0};
@@ -1095,6 +1095,83 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ha->isp_ops->enable_intrs(ha);
}
+static void
+qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
+
+}
+
+static void
+qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+}
+
+static void
+qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t wd32 = 0;
+ uint delta_msec = 100;
+ uint elapsed_msec = 0;
+ uint timeout_msec;
+ ulong n;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
+ return;
+
+attempt:
+ timeout_msec = TIMEOUT_SEMAPHORE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (wd32 & RISC_SEMAPHORE)
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (!(wd32 & RISC_SEMAPHORE))
+ goto force;
+
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ goto acquired;
+
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
+ timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (wd32 & RISC_SEMAPHORE_FORCE)
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
+
+ goto attempt;
+
+force:
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
+
+acquired:
+ return;
+}
+
/**
* qla24xx_reset_chip() - Reset ISP24xx chip.
* @ha: HA context
@@ -1113,6 +1190,8 @@ qla24xx_reset_chip(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
+ qla25xx_manipulate_risc_semaphore(vha);
+
/* Perform RISC reset. */
qla24xx_reset_risc(vha);
}
@@ -1888,10 +1967,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
qla2x00_init_response_q_entries(rsp);
}
- spin_lock(&ha->vport_slock);
-
- spin_unlock(&ha->vport_slock);
-
ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
ha->tgt.atio_ring_index = 0;
/* Initialize ATIO queue entries */
@@ -1971,6 +2046,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
"Waiting for LIP to complete.\n");
do {
+ memset(state, -1, sizeof(state));
rval = qla2x00_get_firmware_state(vha, state);
if (rval == QLA_SUCCESS) {
if (state[0] < FSTATE_LOSS_OF_SYNC) {
@@ -2907,7 +2983,6 @@ cleanup_allocation:
static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- char *link_speed;
int rval;
uint16_t mb[4];
struct qla_hw_data *ha = vha->hw;
@@ -2934,10 +3009,10 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]);
} else {
- link_speed = qla2x00_get_link_speed_str(ha);
ql_dbg(ql_dbg_disc, vha, 0x2005,
"iIDMA adjusted to %s GB/s "
- "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
+ "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+ qla2x00_get_link_speed_str(ha, fcport->fp_speed),
fcport->port_name[0], fcport->port_name[1],
fcport->port_name[2], fcport->port_name[3],
fcport->port_name[4], fcport->port_name[5],
@@ -3007,10 +3082,10 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
qla2x00_reg_remote_port(vha, fcport);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
}
/*
@@ -3868,7 +3943,7 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha)
}
}
-int
+static int
__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -3884,19 +3959,7 @@ __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-int
-qla83xx_set_drv_ack(scsi_qla_host_t *vha)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_set_drv_ack(vha);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
+static int
__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -3912,19 +3975,7 @@ __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-int
-qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_clear_drv_ack(vha);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-const char *
+static const char *
qla83xx_dev_state_to_string(uint32_t dev_state)
{
switch (dev_state) {
@@ -3978,7 +4029,7 @@ qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
}
/* Assumes idc_lock always held on entry */
-int
+static int
qla83xx_initiating_reset(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4026,36 +4077,12 @@ __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
}
int
-qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_set_idc_control(vha, idc_control);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
{
return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
}
-int
-qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_get_idc_control(vha, idc_control);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
+static int
qla83xx_check_driver_presence(scsi_qla_host_t *vha)
{
uint32_t drv_presence = 0;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 03b75263283..a481684479c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -520,7 +520,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk24 = NULL;
req = ha->req_q_map[0];
- mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
+ mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
if (mrk == NULL) {
ql_log(ql_log_warn, base_vha, 0x3026,
"Failed to allocate Marker IOCB.\n");
@@ -2551,7 +2551,7 @@ sufficient_dsds:
(unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD(
(unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
@@ -2748,7 +2748,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
struct rsp_que *rsp;
struct req_que *req;
int rval = EXT_STATUS_OK;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
rval = QLA_SUCCESS;
@@ -2786,15 +2785,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable)
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
- else if (IS_QLA82XX(ha))
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
- else if (IS_FWI2_CAPABLE(ha))
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
- else
- cnt = qla2x00_debounce_register(
- ISP_REQ_Q_OUT(ha, &reg->isp));
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5733811ce8e..873c82014b1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -251,7 +251,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
/* Read all mbox registers? */
mboxes = (1 << ha->mbx_count) - 1;
if (!ha->mcp)
- ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
+ ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
else
mboxes = ha->mcp->in_mb;
@@ -316,28 +316,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
}
#define LS_UNKNOWN 2
-char *
-qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+const char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
{
- static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
- char *link_speed;
- int fw_speed = ha->link_data_rate;
+ static const char * const link_speeds[] = {
+ "1", "2", "?", "4", "8", "16", "10"
+ };
if (IS_QLA2100(ha) || IS_QLA2200(ha))
- link_speed = link_speeds[0];
- else if (fw_speed == 0x13)
- link_speed = link_speeds[6];
- else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (fw_speed < 6)
- link_speed =
- link_speeds[fw_speed];
- }
-
- return link_speed;
+ return link_speeds[0];
+ else if (speed == 0x13)
+ return link_speeds[6];
+ else if (speed < 6)
+ return link_speeds[speed];
+ else
+ return link_speeds[LS_UNKNOWN];
}
-void
+static void
qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
struct qla_hw_data *ha = vha->hw;
@@ -671,7 +667,7 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha));
+ qla2x00_get_link_speed_str(ha, ha->link_data_rate));
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -860,7 +856,7 @@ skip_rio:
mb[1], mb[2], mb[3]);
ql_log(ql_log_warn, vha, 0x505f,
"Link is operational (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha));
+ qla2x00_get_link_speed_str(ha, ha->link_data_rate));
/*
* Mark all devices as missing so we will login again.
@@ -2318,7 +2314,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
/* Read all mbox registers? */
mboxes = (1 << ha->mbx_count) - 1;
if (!ha->mcp)
- ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
+ ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
else
mboxes = ha->mcp->in_mb;
@@ -2944,7 +2940,9 @@ skip_msi:
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
- }
+ } else if (!ha->flags.msi_enabled)
+ ql_dbg(ql_dbg_init, vha, 0x0125,
+ "INTa mode: Enabled.\n");
clear_risc_ints:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 18c509fae55..68c55eaa318 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3122,7 +3122,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
- if (MSB(stat) != 0) {
+ if (MSB(stat) != 0 && MSB(stat) != 2) {
ql_dbg(ql_dbg_mbx, vha, 0x10ba,
"Could not acquire ID for VP[%d].\n", vp_idx);
return;
@@ -3536,7 +3536,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
QLA_QUE_PAGE * req->id);
mcp->mb[4] = req->id;
@@ -3605,7 +3605,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
QLA_QUE_PAGE * rsp->id);
mcp->mb[4] = rsp->id;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 14cd361742f..3e3f593bada 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -36,7 +36,7 @@
#define MAX_CRB_XFORM 60
static unsigned long crb_addr_xform[MAX_CRB_XFORM];
-int qla82xx_crb_table_initialized;
+static int qla82xx_crb_table_initialized;
#define qla82xx_crb_addr_transform(name) \
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
@@ -102,7 +102,7 @@ static void qla82xx_crb_addr_transform_setup(void)
qla82xx_crb_table_initialized = 1;
}
-struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
{{{0, 0, 0, 0} } },
{{{1, 0x0100000, 0x0102000, 0x120000},
{1, 0x0110000, 0x0120000, 0x130000},
@@ -262,7 +262,7 @@ struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
/*
* top 12 bits of crb internal address (hub, agent)
*/
-unsigned qla82xx_crb_hub_agt[64] = {
+static unsigned qla82xx_crb_hub_agt[64] = {
0,
QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
@@ -330,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = {
};
/* Device states */
-char *q_dev_state[] = {
+static char *q_dev_state[] = {
"Unknown",
"Cold",
"Initializing",
@@ -359,12 +359,13 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
ha->crb_win = CRB_HI(*off);
writel(ha->crb_win,
- (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ win_read = RD_REG_DWORD((void __iomem *)
+ (CRB_WINDOW_2M + ha->nx_pcibase));
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
@@ -567,7 +568,7 @@ qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
return 1;
}
-int qla82xx_pci_set_window_warning_count;
+static int qla82xx_pci_set_window_warning_count;
static unsigned long
qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
@@ -677,10 +678,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
u64 off, void *data, int size)
{
unsigned long flags;
- void *addr = NULL;
+ void __iomem *addr = NULL;
int ret = 0;
u64 start;
- uint8_t *mem_ptr = NULL;
+ uint8_t __iomem *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -712,7 +713,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
else
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
- if (mem_ptr == 0UL) {
+ if (mem_ptr == NULL) {
*(u8 *)data = 0;
return -1;
}
@@ -749,10 +750,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
u64 off, void *data, int size)
{
unsigned long flags;
- void *addr = NULL;
+ void __iomem *addr = NULL;
int ret = 0;
u64 start;
- uint8_t *mem_ptr = NULL;
+ uint8_t __iomem *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -784,7 +785,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
else
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
- if (mem_ptr == 0UL)
+ if (mem_ptr == NULL)
return -1;
addr = mem_ptr;
@@ -908,24 +909,24 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
return 0;
}
-int
+static int
qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
+ WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase),
(off & 0xFFFF0000));
/* Read back value to make sure write has gone through */
- RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD((void *)
+ WRT_REG_DWORD((void __iomem *)
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
data);
else
- rval = RD_REG_DWORD((void *)
+ rval = RD_REG_DWORD((void __iomem *)
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
return rval;
@@ -955,7 +956,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
}
if (loops >= 50000) {
ql_log(ql_log_fatal, vha, 0x00b9,
- "Failed to aquire SEM2 lock.\n");
+ "Failed to acquire SEM2 lock.\n");
return -1;
}
ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -1122,7 +1123,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
long data;
};
- /* Halt all the indiviual PEGs and other blocks of the ISP */
+ /* Halt all the individual PEGs and other blocks of the ISP */
qla82xx_rom_lock(ha);
/* disable all I2Q */
@@ -1654,7 +1655,6 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
if (!ha->nx_pcibase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
"Cannot remap pcibase MMIO, aborting.\n");
- pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1669,7 +1669,6 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
if (!ha->nxdb_wr_ptr) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
"Cannot remap MMIO, aborting.\n");
- pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1764,14 +1763,6 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
}
-void qla82xx_reset_adapter(struct scsi_qla_host *vha)
-{
- struct qla_hw_data *ha = vha->hw;
- vha->flags.online = 0;
- qla2x00_try_to_stop_firmware(vha);
- ha->isp_ops->disable_intrs(ha);
-}
-
static int
qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
{
@@ -1856,7 +1847,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
return -1;
}
-int
+static int
qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
{
__le32 val;
@@ -1961,20 +1952,6 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
}
/* ISR related functions */
-uint32_t qla82xx_isr_int_target_mask_enable[8] = {
- ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
- ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
- ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
- ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
-};
-
-uint32_t qla82xx_isr_int_target_status[8] = {
- ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
- ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
- ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
- ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
-};
-
static struct qla82xx_legacy_intr_set legacy_intr[] = \
QLA82XX_LEGACY_INTR_CONFIG;
@@ -2813,7 +2790,7 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
else {
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
wmb();
@@ -2821,7 +2798,8 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
}
}
-void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
+static void
+qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -3177,7 +3155,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
}
-int
+static int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
uint32_t fw_heartbeat_counter;
@@ -3817,7 +3795,8 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
+ r_value = RD_REG_DWORD((void __iomem *)
+ (r_addr + ha->nx_pcibase));
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -4376,7 +4355,7 @@ qla82xx_md_free(scsi_qla_host_t *vha)
ha->md_tmplt_hdr, ha->md_template_size / 1024);
dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
- ha->md_tmplt_hdr = 0;
+ ha->md_tmplt_hdr = NULL;
}
/* Release the template data buffer allocated */
@@ -4386,7 +4365,7 @@ qla82xx_md_free(scsi_qla_host_t *vha)
ha->md_dump, ha->md_dump_size / 1024);
vfree(ha->md_dump);
ha->md_dump_size = 0;
- ha->md_dump = 0;
+ ha->md_dump = NULL;
}
}
@@ -4423,7 +4402,7 @@ qla82xx_md_prep(scsi_qla_host_t *vha)
dma_free_coherent(&ha->pdev->dev,
ha->md_template_size,
ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
- ha->md_tmplt_hdr = 0;
+ ha->md_tmplt_hdr = NULL;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d501bf5f806..3a1661cf8c1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -41,7 +41,7 @@ static struct kmem_cache *ctx_cachep;
*/
int ql_errlev = ql_log_all;
-int ql2xenableclass2;
+static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
"Specify if Class 2 operations are supported from the very "
@@ -89,6 +89,8 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
+ "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
+ "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
"\t\t0x1e400000 - Preferred value for capturing essential "
"debug information (equivalent to old "
@@ -494,12 +496,20 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
(BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
strcpy(str, "PCIe (");
- if (lspeed == 1)
+ switch (lspeed) {
+ case 1:
strcat(str, "2.5GT/s ");
- else if (lspeed == 2)
+ break;
+ case 2:
strcat(str, "5.0GT/s ");
- else
+ break;
+ case 3:
+ strcat(str, "8.0GT/s ");
+ break;
+ default:
strcat(str, "<unknown> ");
+ break;
+ }
snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
strcat(str, lwstr);
@@ -719,7 +729,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_io, vha, 0x3013,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
goto qc24_host_busy_free_sp;
}
@@ -2357,7 +2367,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Configure PCI I/O space */
ret = ha->isp_ops->iospace_config(ha);
if (ret)
- goto probe_hw_failed;
+ goto iospace_config_failed;
ql_log_pci(ql_log_info, pdev, 0x001d,
"Found an ISP%04X irq %d iobase 0x%p.\n",
@@ -2668,7 +2678,11 @@ probe_hw_failed:
qla82xx_idc_lock(ha);
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
- iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ }
+iospace_config_failed:
+ if (IS_QLA82XX(ha)) {
+ if (!ha->nx_pcibase)
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
if (!ql2xdbwr)
iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
} else {
@@ -2755,6 +2769,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->flags.host_shutting_down = 1;
+ set_bit(UNLOADING, &base_vha->dpc_flags);
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
struct Scsi_Host *scsi_host;
@@ -2784,8 +2799,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
"Error while clearing DRV-Presence.\n");
}
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
qla2x00_dfs_remove(base_vha);
@@ -3721,10 +3734,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (fcport->flags &
FCF_FCP2_DEVICE)
opts |= BIT_1;
- status2 =
- qla2x00_get_port_database(
- vha, fcport,
- opts);
+ status2 =
+ qla2x00_get_port_database(
+ vha, fcport, opts);
if (status2 != QLA_SUCCESS)
status = 1;
}
@@ -3836,7 +3848,7 @@ qla83xx_idc_state_handler_work(struct work_struct *work)
qla83xx_idc_unlock(base_vha, 0);
}
-int
+static int
qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
{
int rval = QLA_SUCCESS;
@@ -3954,7 +3966,7 @@ qla83xx_wait_logic(void)
}
}
-int
+static int
qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
{
int rval;
@@ -4013,7 +4025,7 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
return rval;
}
-int
+static int
qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
{
int rval = QLA_SUCCESS;
@@ -4212,7 +4224,7 @@ qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
return rval;
}
-void
+static void
qla83xx_need_reset_handler(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4224,7 +4236,7 @@ qla83xx_need_reset_handler(scsi_qla_host_t *vha)
while (1) {
qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
- if (drv_ack == drv_presence)
+ if ((drv_ack & drv_presence) == drv_presence)
break;
if (time_after_eq(jiffies, ack_timeout)) {
@@ -4251,7 +4263,7 @@ qla83xx_need_reset_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
}
-int
+static int
qla83xx_device_bootstrap(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -4505,9 +4517,9 @@ qla2x00_do_dpc(void *data)
"ISP abort end.\n");
}
- if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
+ &base_vha->dpc_flags)) {
qla2x00_update_fcports(base_vha);
- clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
@@ -4987,7 +4999,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
+static uint32_t
+qla82xx_error_recovery(scsi_qla_host_t *base_vha)
{
uint32_t rval = QLA_FUNCTION_FAILED;
uint32_t drv_active = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 62aa5584f64..80f4b849e2b 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -73,7 +73,7 @@ enum fcp_resp_rsp_codes {
#define FCP_PTA_SIMPLE 0 /* simple task attribute */
#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
#define FCP_PTA_ORDERED 2 /* ordered task attribute */
-#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
+#define FCP_PTA_ACA 4 /* auto. contingent allegiance */
#define FCP_PTA_MASK 7 /* mask for task attribute field */
#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
@@ -1029,7 +1029,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
EXPORT_SYMBOL(qlt_stop_phase2);
/* Called from qlt_remove_target() -> qla2x00_remove_one() */
-void qlt_release(struct qla_tgt *tgt)
+static void qlt_release(struct qla_tgt *tgt)
{
struct qla_hw_data *ha = tgt->ha;
@@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
{
struct qla_hw_data *ha = vha->hw;
+ struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct se_cmd *se_cmd;
+ u32 lun = 0;
int rc;
+ bool found_lun = false;
+
+ spin_lock(&se_sess->sess_cmd_lock);
+ list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
+ struct qla_tgt_cmd *cmd =
+ container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ if (cmd->tag == abts->exchange_addr_to_abort) {
+ lun = cmd->unpacked_lun;
+ found_lun = true;
+ break;
+ }
+ }
+ spin_unlock(&se_sess->sess_cmd_lock);
+
+ if (!found_lun)
+ return -ENOENT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
@@ -3980,7 +3999,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
"qla_target(%d): System error async event %#x "
- "occured", vha->vp_idx, code);
+ "occurred", vha->vp_idx, code);
break;
case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -3989,7 +4008,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_LOOP_UP:
{
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
- "qla_target(%d): Async LOOP_UP occured "
+ "qla_target(%d): Async LOOP_UP occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
@@ -4006,7 +4025,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_LIP_RESET:
case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
- "qla_target(%d): Async event %#x occured "
+ "qla_target(%d): Async event %#x occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
@@ -4015,7 +4034,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_PORT_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
"qla_target(%d): Port update async event %#x "
- "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+ "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
"m[2]=%x, m[3]=%x)", vha->vp_idx, code,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
@@ -4031,7 +4050,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
- "qla_target(%d): Async event %#x occured: "
+ "qla_target(%d): Async event %#x occurred: "
"ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cfe934e1af4..49697ca41e7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.07-k"
+#define QLA2XXX_VERSION "8.04.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3d74f2f39ae..d182c96e17e 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -367,7 +367,7 @@ static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
if (!nacl) {
- pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+ pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
return NULL;
}
@@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
- cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
- transport_generic_request_failure(&cmd->se_cmd);
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9032e910bca..f1bf5aff68e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1418,7 +1418,7 @@ static int scsi_lld_busy(struct request_queue *q)
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- if (blk_queue_dead(q))
+ if (blk_queue_dying(q))
return 0;
shost = sdev->host;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index dc0ad85853e..8f6b12cbd22 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -16,16 +16,14 @@
#include "scsi_priv.h"
-static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
+static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
{
- struct device_driver *drv;
int err;
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
- drv = dev->driver;
- if (drv && drv->suspend) {
- err = drv->suspend(dev, msg);
+ if (cb) {
+ err = cb(dev);
if (err)
scsi_device_resume(to_scsi_device(dev));
}
@@ -34,14 +32,12 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
return err;
}
-static int scsi_dev_type_resume(struct device *dev)
+static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
{
- struct device_driver *drv;
int err = 0;
- drv = dev->driver;
- if (drv && drv->resume)
- err = drv->resume(dev);
+ if (cb)
+ err = cb(dev);
scsi_device_resume(to_scsi_device(dev));
dev_dbg(dev, "scsi resume: %d\n", err);
return err;
@@ -49,51 +45,39 @@ static int scsi_dev_type_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
-static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
+static int
+scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
{
int err = 0;
if (scsi_is_sdev_device(dev)) {
/*
- * sd is the only high-level SCSI driver to implement runtime
- * PM, and sd treats runtime suspend, system suspend, and
- * system hibernate identically (but not system freeze).
+ * All the high-level SCSI drivers that implement runtime
+ * PM treat runtime suspend, system suspend, and system
+ * hibernate identically.
*/
- if (pm_runtime_suspended(dev)) {
- if (msg.event == PM_EVENT_SUSPEND ||
- msg.event == PM_EVENT_HIBERNATE)
- return 0; /* already suspended */
+ if (pm_runtime_suspended(dev))
+ return 0;
- /* wake up device so that FREEZE will succeed */
- pm_runtime_resume(dev);
- }
- err = scsi_dev_type_suspend(dev, msg);
+ err = scsi_dev_type_suspend(dev, cb);
}
+
return err;
}
-static int scsi_bus_resume_common(struct device *dev)
+static int
+scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
{
int err = 0;
- /*
- * Parent device may have runtime suspended as soon as
- * it is woken up during the system resume.
- *
- * Resume it on behalf of child.
- */
- pm_runtime_get_sync(dev->parent);
-
if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev);
+ err = scsi_dev_type_resume(dev, cb);
+
if (err == 0) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
-
- pm_runtime_put_sync(dev->parent);
-
return err;
}
@@ -112,26 +96,49 @@ static int scsi_bus_prepare(struct device *dev)
static int scsi_bus_suspend(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
+}
+
+static int scsi_bus_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
}
static int scsi_bus_freeze(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_FREEZE);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
+}
+
+static int scsi_bus_thaw(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
}
static int scsi_bus_poweroff(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_HIBERNATE);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
+}
+
+static int scsi_bus_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
}
#else /* CONFIG_PM_SLEEP */
-#define scsi_bus_resume_common NULL
#define scsi_bus_prepare NULL
#define scsi_bus_suspend NULL
+#define scsi_bus_resume NULL
#define scsi_bus_freeze NULL
+#define scsi_bus_thaw NULL
#define scsi_bus_poweroff NULL
+#define scsi_bus_restore NULL
#endif /* CONFIG_PM_SLEEP */
@@ -140,10 +147,12 @@ static int scsi_bus_poweroff(struct device *dev)
static int scsi_runtime_suspend(struct device *dev)
{
int err = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
dev_dbg(dev, "scsi_runtime_suspend\n");
if (scsi_is_sdev_device(dev)) {
- err = scsi_dev_type_suspend(dev, PMSG_AUTO_SUSPEND);
+ err = scsi_dev_type_suspend(dev,
+ pm ? pm->runtime_suspend : NULL);
if (err == -EAGAIN)
pm_schedule_suspend(dev, jiffies_to_msecs(
round_jiffies_up_relative(HZ/10)));
@@ -157,10 +166,11 @@ static int scsi_runtime_suspend(struct device *dev)
static int scsi_runtime_resume(struct device *dev)
{
int err = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
dev_dbg(dev, "scsi_runtime_resume\n");
if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev);
+ err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL);
/* Insert hooks here for targets, hosts, and transport classes */
@@ -239,11 +249,11 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
const struct dev_pm_ops scsi_bus_pm_ops = {
.prepare = scsi_bus_prepare,
.suspend = scsi_bus_suspend,
- .resume = scsi_bus_resume_common,
+ .resume = scsi_bus_resume,
.freeze = scsi_bus_freeze,
- .thaw = scsi_bus_resume_common,
+ .thaw = scsi_bus_thaw,
.poweroff = scsi_bus_poweroff,
- .restore = scsi_bus_resume_common,
+ .restore = scsi_bus_restore,
.runtime_suspend = scsi_runtime_suspend,
.runtime_resume = scsi_runtime_resume,
.runtime_idle = scsi_runtime_idle,
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ce5224c92ed..931a7d95420 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -247,11 +247,11 @@ show_shost_active_mode(struct device *dev,
static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
-static int check_reset_type(char *str)
+static int check_reset_type(const char *str)
{
- if (strncmp(str, "adapter", 10) == 0)
+ if (sysfs_streq(str, "adapter"))
return SCSI_ADAPTER_RESET;
- else if (strncmp(str, "firmware", 10) == 0)
+ else if (sysfs_streq(str, "firmware"))
return SCSI_FIRMWARE_RESET;
else
return 0;
@@ -264,12 +264,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct scsi_host_template *sht = shost->hostt;
int ret = -EINVAL;
- char str[10];
int type;
- sscanf(buf, "%s", str);
- type = check_reset_type(str);
-
+ type = check_reset_type(buf);
if (!type)
goto exit_store_host_reset;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f7565fc4f0e..1b681427dde 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -151,6 +151,7 @@ static struct {
{ SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
{ SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+ { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
};
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 21a045e0559..f379c7f3034 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -38,7 +38,7 @@ struct srp_host_attrs {
#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
#define SRP_HOST_ATTRS 0
-#define SRP_RPORT_ATTRS 2
+#define SRP_RPORT_ATTRS 3
struct srp_internal {
struct scsi_transport_template t;
@@ -47,7 +47,6 @@ struct srp_internal {
struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
- struct device_attribute private_rport_attrs[SRP_RPORT_ATTRS];
struct transport_container rport_attr_cont;
};
@@ -72,24 +71,6 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
-#define SETUP_TEMPLATE(attrb, field, perm, test, ro_test, ro_perm) \
- i->private_##attrb[count] = dev_attr_##field; \
- i->private_##attrb[count].attr.mode = perm; \
- if (ro_test) { \
- i->private_##attrb[count].attr.mode = ro_perm; \
- i->private_##attrb[count].store = NULL; \
- } \
- i->attrb[count] = &i->private_##attrb[count]; \
- if (test) \
- count++
-
-#define SETUP_RPORT_ATTRIBUTE_RD(field) \
- SETUP_TEMPLATE(rport_attrs, field, S_IRUGO, 1, 0, 0)
-
-#define SETUP_RPORT_ATTRIBUTE_RW(field) \
- SETUP_TEMPLATE(rport_attrs, field, S_IRUGO | S_IWUSR, \
- 1, 1, S_IRUGO)
-
#define SRP_PID(p) \
(p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
(p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
@@ -135,6 +116,24 @@ show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
+static ssize_t store_srp_rport_delete(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ if (i->f->rport_delete) {
+ i->f->rport_delete(rport);
+ return count;
+ } else {
+ return -ENOSYS;
+ }
+}
+
+static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
+
static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
@@ -324,12 +323,16 @@ srp_attach_transport(struct srp_function_template *ft)
i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
i->rport_attr_cont.ac.class = &srp_rport_class.class;
i->rport_attr_cont.ac.match = srp_rport_match;
- transport_container_register(&i->rport_attr_cont);
count = 0;
- SETUP_RPORT_ATTRIBUTE_RD(port_id);
- SETUP_RPORT_ATTRIBUTE_RD(roles);
- i->rport_attrs[count] = NULL;
+ i->rport_attrs[count++] = &dev_attr_port_id;
+ i->rport_attrs[count++] = &dev_attr_roles;
+ if (ft->rport_delete)
+ i->rport_attrs[count++] = &dev_attr_delete;
+ i->rport_attrs[count++] = NULL;
+ BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
+
+ transport_container_register(&i->rport_attr_cont);
i->f = ft;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 352bc77b7c8..7992635d405 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -105,7 +105,7 @@ static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend(struct device *, pm_message_t state);
+static int sd_suspend(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *);
@@ -465,15 +465,23 @@ static struct class sd_disk_class = {
.dev_attrs = sd_disk_attrs,
};
+static const struct dev_pm_ops sd_pm_ops = {
+ .suspend = sd_suspend,
+ .resume = sd_resume,
+ .poweroff = sd_suspend,
+ .restore = sd_resume,
+ .runtime_suspend = sd_suspend,
+ .runtime_resume = sd_resume,
+};
+
static struct scsi_driver sd_template = {
.owner = THIS_MODULE,
.gendrv = {
.name = "sd",
.probe = sd_probe,
.remove = sd_remove,
- .suspend = sd_suspend,
- .resume = sd_resume,
.shutdown = sd_shutdown,
+ .pm = &sd_pm_ops,
},
.rescan = sd_rescan,
.done = sd_done,
@@ -1011,7 +1019,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
- } else if (block > 0xffffffff) {
+ } else if (sdp->use_16_for_rw) {
SCpnt->cmnd[0] += READ_16 - READ_6;
SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
@@ -2203,6 +2211,8 @@ got_data:
}
}
+ sdp->use_16_for_rw = (sdkp->capacity > 0xffffffff);
+
/* Rescale capacity to 512-byte units */
if (sector_size == 4096)
sdkp->capacity <<= 3;
@@ -3052,7 +3062,7 @@ exit:
scsi_disk_put(sdkp);
}
-static int sd_suspend(struct device *dev, pm_message_t mesg)
+static int sd_suspend(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
int ret = 0;
@@ -3067,7 +3077,7 @@ static int sd_suspend(struct device *dev, pm_message_t mesg)
goto done;
}
- if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
+ if (sdkp->device->manage_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
ret = sd_start_stop_device(sdkp, 0);
}
@@ -3116,10 +3126,6 @@ static int __init init_sd(void)
if (err)
goto err_out;
- err = scsi_register_driver(&sd_template.gendrv);
- if (err)
- goto err_out_class;
-
sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
0, 0, NULL);
if (!sd_cdb_cache) {
@@ -3133,8 +3139,15 @@ static int __init init_sd(void)
goto err_out_cache;
}
+ err = scsi_register_driver(&sd_template.gendrv);
+ if (err)
+ goto err_out_driver;
+
return 0;
+err_out_driver:
+ mempool_destroy(sd_cdb_pool);
+
err_out_cache:
kmem_cache_destroy(sd_cdb_cache);
@@ -3157,10 +3170,10 @@ static void __exit exit_sd(void)
SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
+ scsi_unregister_driver(&sd_template.gendrv);
mempool_destroy(sd_cdb_pool);
kmem_cache_destroy(sd_cdb_cache);
- scsi_unregister_driver(&sd_template.gendrv);
class_unregister(&sd_disk_class);
for (i = 0; i < SD_MAJORS; i++)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 595af1ae442..74ab67a169e 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -215,7 +215,7 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
struct virtio_scsi_event_node *event_node)
{
- int ret;
+ int err;
struct scatterlist sg;
unsigned long flags;
@@ -223,13 +223,14 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
- ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
- if (ret >= 0)
+ err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node,
+ GFP_ATOMIC);
+ if (!err)
virtqueue_kick(vscsi->event_vq.vq);
spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
- return ret;
+ return err;
}
static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
@@ -410,22 +411,23 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
{
unsigned int out_num, in_num;
unsigned long flags;
- int ret;
+ int err;
+ bool needs_kick = false;
spin_lock_irqsave(&tgt->tgt_lock, flags);
virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
spin_lock(&vq->vq_lock);
- ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
+ err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
spin_unlock(&tgt->tgt_lock);
- if (ret >= 0)
- ret = virtqueue_kick_prepare(vq->vq);
+ if (!err)
+ needs_kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->vq_lock, flags);
- if (ret > 0)
+ if (needs_kick)
virtqueue_notify(vq->vq);
- return ret;
+ return err;
}
static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
@@ -467,8 +469,10 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
- GFP_ATOMIC) >= 0)
+ GFP_ATOMIC) == 0)
ret = 0;
+ else
+ mempool_free(cmd, virtscsi_cmd_pool);
out:
return ret;
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index b3dc44146ca..5aedcdf4ac5 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -401,7 +401,6 @@ static int fsidiv_enable(struct clk *clk)
static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
{
- u32 val;
int idx;
idx = (clk->parent->rate / rate) & 0xffff;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1acae359cab..2e188e1127e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -123,6 +123,13 @@ config SPI_BUTTERFLY
inexpensive battery powered microcontroller evaluation board.
This same cable can be used to flash new firmware.
+config SPI_CLPS711X
+ tristate "CLPS711X host SPI controller"
+ depends on ARCH_CLPS711X
+ help
+ This enables dedicated general purpose SPI/Microwire1-compatible
+ master mode interface (SSI1) for CLPS711X-based CPUs.
+
config SPI_COLDFIRE_QSPI
tristate "Freescale Coldfire QSPI controller"
depends on (M520x || M523x || M5249 || M525x || M527x || M528x || M532x)
@@ -341,10 +348,10 @@ config SPI_SC18IS602
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
- depends on SUPERH && HAVE_CLK
+ depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
select SPI_BITBANG
help
- SPI driver for SuperH MSIOF blocks.
+ SPI driver for SuperH and SH Mobile MSIOF blocks.
config SPI_SH
tristate "SuperH SPI controller"
@@ -372,12 +379,6 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
-config SPI_STMP3XXX
- tristate "Freescale STMP37xx/378x SPI/SSP controller"
- depends on ARCH_STMP3XXX
- help
- SPI driver for Freescale STMP37xx/378x SoC SSP interface
-
config SPI_MXS
tristate "Freescale MXS SPI controller"
depends on ARCH_MXS
@@ -385,6 +386,20 @@ config SPI_MXS
help
SPI driver for Freescale MXS devices.
+config SPI_TEGRA20_SFLASH
+ tristate "Nvidia Tegra20 Serial flash Controller"
+ depends on ARCH_TEGRA
+ help
+ SPI driver for Nvidia Tegra20 Serial flash Controller interface.
+ The main usecase of this controller is to use spi flash as boot
+ device.
+
+config SPI_TEGRA20_SLINK
+ tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
+ depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ help
+ SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
+
config SPI_TI_SSP
tristate "TI Sequencer Serial Port - SPI Support"
depends on MFD_TI_SSP
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c48df47e4b0..64e970ba261 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
+obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
@@ -59,11 +60,11 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
-obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
+obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
+obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
-
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index f1fec2a19d1..5e7314ac51e 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -215,7 +215,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit altera_spi_probe(struct platform_device *pdev)
+static int altera_spi_probe(struct platform_device *pdev)
{
struct altera_spi_platform_data *platp = pdev->dev.platform_data;
struct altera_spi *hw;
@@ -290,7 +290,7 @@ exit:
return err;
}
-static int __devexit altera_spi_remove(struct platform_device *dev)
+static int altera_spi_remove(struct platform_device *dev)
{
struct altera_spi *hw = platform_get_drvdata(dev);
struct spi_master *master = hw->bitbang.master;
@@ -311,7 +311,7 @@ MODULE_DEVICE_TABLE(of, altera_spi_match);
static struct platform_driver altera_spi_driver = {
.probe = altera_spi_probe,
- .remove = __devexit_p(altera_spi_remove),
+ .remove = altera_spi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 249077e5cc4..9a5d7791c5f 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -192,7 +192,7 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
}
-static __devinit int ath79_spi_probe(struct platform_device *pdev)
+static int ath79_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ath79_spi *sp;
@@ -251,7 +251,7 @@ err_put_master:
return ret;
}
-static __devexit int ath79_spi_remove(struct platform_device *pdev)
+static int ath79_spi_remove(struct platform_device *pdev)
{
struct ath79_spi *sp = platform_get_drvdata(pdev);
@@ -265,7 +265,7 @@ static __devexit int ath79_spi_remove(struct platform_device *pdev)
static struct platform_driver ath79_spi_driver = {
.probe = ath79_spi_probe,
- .remove = __devexit_p(ath79_spi_remove),
+ .remove = ath79_spi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 61fb0ec26f0..ab34497bcfe 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/gpio.h>
@@ -768,6 +769,10 @@ static int atmel_spi_setup(struct spi_device *spi)
/* chipselect must have been muxed as GPIO (e.g. in board setup) */
npcs_pin = (unsigned int)spi->controller_data;
+
+ if (gpio_is_valid(spi->cs_gpio))
+ npcs_pin = spi->cs_gpio;
+
asd = spi->controller_state;
if (!asd) {
asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
@@ -907,7 +912,7 @@ static void atmel_spi_cleanup(struct spi_device *spi)
/*-------------------------------------------------------------------------*/
-static int __devinit atmel_spi_probe(struct platform_device *pdev)
+static int atmel_spi_probe(struct platform_device *pdev)
{
struct resource *regs;
int irq;
@@ -937,8 +942,9 @@ static int __devinit atmel_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
- master->num_chipselect = 4;
+ master->num_chipselect = master->dev.of_node ? 0 : 4;
master->setup = atmel_spi_setup;
master->transfer = atmel_spi_transfer;
master->cleanup = atmel_spi_cleanup;
@@ -1003,7 +1009,7 @@ out_free:
return ret;
}
-static int __devexit atmel_spi_remove(struct platform_device *pdev)
+static int atmel_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -1064,11 +1070,20 @@ static int atmel_spi_resume(struct platform_device *pdev)
#define atmel_spi_resume NULL
#endif
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_spi_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-spi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
+#endif
static struct platform_driver atmel_spi_driver = {
.driver = {
.name = "atmel_spi",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_spi_dt_ids),
},
.suspend = atmel_spi_suspend,
.resume = atmel_spi_resume,
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index a9f4049c676..f44ab550853 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -36,7 +36,6 @@
#include <bcm63xx_dev_spi.h>
#define PFX KBUILD_MODNAME
-#define DRV_VER "0.1.2"
struct bcm63xx_spi {
struct completion done;
@@ -170,13 +169,6 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- ret = bcm63xx_spi_check_transfer(spi, NULL);
- if (ret < 0) {
- dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return ret;
- }
-
dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
__func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
@@ -337,7 +329,7 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
}
-static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
+static int bcm63xx_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct device *dev = &pdev->dev;
@@ -441,8 +433,8 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
goto out_clk_disable;
}
- dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d) v%s\n",
- r->start, irq, bs->fifo_size, DRV_VER);
+ dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n",
+ r->start, irq, bs->fifo_size);
return 0;
@@ -457,7 +449,7 @@ out:
return ret;
}
-static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
+static int bcm63xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
@@ -485,6 +477,8 @@ static int bcm63xx_spi_suspend(struct device *dev)
platform_get_drvdata(to_platform_device(dev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ spi_master_suspend(master);
+
clk_disable(bs->clk);
return 0;
@@ -498,6 +492,8 @@ static int bcm63xx_spi_resume(struct device *dev)
clk_enable(bs->clk);
+ spi_master_resume(master);
+
return 0;
}
@@ -518,7 +514,7 @@ static struct platform_driver bcm63xx_spi_driver = {
.pm = BCM63XX_SPI_PM_OPS,
},
.probe = bcm63xx_spi_probe,
- .remove = __devexit_p(bcm63xx_spi_remove),
+ .remove = bcm63xx_spi_remove,
};
module_platform_driver(bcm63xx_spi_driver);
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 6555ecd0730..ac7ffca7ba4 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -755,8 +755,7 @@ bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
return 0;
}
-static int __devinit
-bfin_sport_spi_probe(struct platform_device *pdev)
+static int bfin_sport_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bfin5xx_spi_master *platform_info;
@@ -863,8 +862,7 @@ bfin_sport_spi_probe(struct platform_device *pdev)
}
/* stop hardware and remove the driver */
-static int __devexit
-bfin_sport_spi_remove(struct platform_device *pdev)
+static int bfin_sport_spi_remove(struct platform_device *pdev)
{
struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
@@ -935,7 +933,7 @@ static struct platform_driver bfin_sport_spi_driver = {
.owner = THIS_MODULE,
},
.probe = bfin_sport_spi_probe,
- .remove = __devexit_p(bfin_sport_spi_remove),
+ .remove = bfin_sport_spi_remove,
.suspend = bfin_sport_spi_suspend,
.resume = bfin_sport_spi_resume,
};
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 9bb4d4af854..0429d833f75 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -1387,7 +1387,7 @@ out_error_get_res:
}
/* stop hardware and remove the driver */
-static int __devexit bfin_spi_remove(struct platform_device *pdev)
+static int bfin_spi_remove(struct platform_device *pdev)
{
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
@@ -1477,7 +1477,7 @@ static struct platform_driver bfin_spi_driver = {
},
.suspend = bfin_spi_suspend,
.resume = bfin_spi_resume,
- .remove = __devexit_p(bfin_spi_remove),
+ .remove = bfin_spi_remove,
};
static int __init bfin_spi_init(void)
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index aef59b1a15f..8b3d8efafd3 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -260,11 +260,11 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
+ struct spi_message *m, *_m;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
- while (!list_empty(&bitbang->queue)) {
- struct spi_message *m;
+ list_for_each_entry_safe(m, _m, &bitbang->queue, queue) {
struct spi_device *spi;
unsigned nsecs;
struct spi_transfer *t = NULL;
@@ -273,9 +273,7 @@ static void bitbang_work(struct work_struct *work)
int status;
int do_setup = -1;
- m = container_of(bitbang->queue.next, struct spi_message,
- queue);
- list_del_init(&m->queue);
+ list_del(&m->queue);
spin_unlock_irqrestore(&bitbang->lock, flags);
/* FIXME this is made-up ... the correct value is known to
@@ -346,17 +344,14 @@ static void bitbang_work(struct work_struct *work)
if (t->delay_usecs)
udelay(t->delay_usecs);
- if (!cs_change)
- continue;
- if (t->transfer_list.next == &m->transfers)
- break;
-
- /* sometimes a short mid-message deselect of the chip
- * may be needed to terminate a mode or command
- */
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
+ if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
+ ndelay(nsecs);
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
+ }
}
m->status = status;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
new file mode 100644
index 00000000000..1366c4620d5
--- /dev/null
+++ b/drivers/spi/spi-clps711x.c
@@ -0,0 +1,296 @@
+/*
+ * CLPS711X SPI bus driver
+ *
+ * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/spi-clps711x.h>
+
+#include <mach/hardware.h>
+
+#define DRIVER_NAME "spi-clps711x"
+
+struct spi_clps711x_data {
+ struct completion done;
+
+ struct clk *spi_clk;
+ u32 max_speed_hz;
+
+ u8 *tx_buf;
+ u8 *rx_buf;
+ int count;
+ int len;
+
+ int chipselect[0];
+};
+
+static int spi_clps711x_setup(struct spi_device *spi)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
+
+ if (spi->bits_per_word != 8) {
+ dev_err(&spi->dev, "Unsupported master bus width %i\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ /* We are expect that SPI-device is not selected */
+ gpio_direction_output(hw->chipselect[spi->chip_select],
+ !(spi->mode & SPI_CS_HIGH));
+
+ return 0;
+}
+
+static void spi_clps711x_setup_mode(struct spi_device *spi)
+{
+ /* Setup edge for transfer */
+ if (spi->mode & SPI_CPHA)
+ clps_writew(clps_readw(SYSCON3) | SYSCON3_ADCCKNSEN, SYSCON3);
+ else
+ clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCKNSEN, SYSCON3);
+}
+
+static int spi_clps711x_setup_xfer(struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ u32 speed = xfer->speed_hz ? : spi->max_speed_hz;
+ u8 bpw = xfer->bits_per_word ? : spi->bits_per_word;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
+
+ if (bpw != 8) {
+ dev_err(&spi->dev, "Unsupported master bus width %i\n", bpw);
+ return -EINVAL;
+ }
+
+ /* Setup SPI frequency divider */
+ if (!speed || (speed >= hw->max_speed_hz))
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(3), SYSCON1);
+ else if (speed >= (hw->max_speed_hz / 2))
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(2), SYSCON1);
+ else if (speed >= (hw->max_speed_hz / 8))
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(1), SYSCON1);
+ else
+ clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
+ SYSCON1_ADCKSEL(0), SYSCON1);
+
+ return 0;
+}
+
+static int spi_clps711x_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ int status = 0, cs = hw->chipselect[msg->spi->chip_select];
+ u32 data;
+
+ spi_clps711x_setup_mode(msg->spi);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (spi_clps711x_setup_xfer(msg->spi, xfer)) {
+ status = -EINVAL;
+ goto out_xfr;
+ }
+
+ gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
+
+ INIT_COMPLETION(hw->done);
+
+ hw->count = 0;
+ hw->len = xfer->len;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
+
+ /* Initiate transfer */
+ data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
+ clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
+
+ wait_for_completion(&hw->done);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change ||
+ list_is_last(&xfer->transfer_list, &msg->transfers))
+ gpio_set_value(cs, !(msg->spi->mode & SPI_CS_HIGH));
+
+ msg->actual_length += xfer->len;
+ }
+
+out_xfr:
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
+{
+ struct spi_clps711x_data *hw = (struct spi_clps711x_data *)dev_id;
+ u32 data;
+
+ /* Handle RX */
+ data = clps_readb(SYNCIO);
+ if (hw->rx_buf)
+ hw->rx_buf[hw->count] = (u8)data;
+
+ hw->count++;
+
+ /* Handle TX */
+ if (hw->count < hw->len) {
+ data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
+ clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
+ } else
+ complete(&hw->done);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_clps711x_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct spi_master *master;
+ struct spi_clps711x_data *hw;
+ struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ if (pdata->num_chipselect < 1) {
+ dev_err(&pdev->dev, "At least one CS must be defined\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_clps711x_data) +
+ sizeof(int) * pdata->num_chipselect);
+ if (!master) {
+ dev_err(&pdev->dev, "SPI allocating memory error\n");
+ return -ENOMEM;
+ }
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
+ master->num_chipselect = pdata->num_chipselect;
+ master->setup = spi_clps711x_setup;
+ master->transfer_one_message = spi_clps711x_transfer_one_message;
+
+ hw = spi_master_get_devdata(master);
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ hw->chipselect[i] = pdata->chipselect[i];
+ if (!gpio_is_valid(hw->chipselect[i])) {
+ dev_err(&pdev->dev, "Invalid CS GPIO %i\n", i);
+ ret = -EINVAL;
+ goto err_out;
+ }
+ if (gpio_request(hw->chipselect[i], DRIVER_NAME)) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
+ ret = -EINVAL;
+ goto err_out;
+ }
+ }
+
+ hw->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->spi_clk)) {
+ dev_err(&pdev->dev, "Can't get clocks\n");
+ ret = PTR_ERR(hw->spi_clk);
+ goto err_out;
+ }
+ hw->max_speed_hz = clk_get_rate(hw->spi_clk);
+
+ init_completion(&hw->done);
+ platform_set_drvdata(pdev, master);
+
+ /* Disable extended mode due hardware problems */
+ clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCON, SYSCON3);
+
+ /* Clear possible pending interrupt */
+ clps_readl(SYNCIO);
+
+ ret = devm_request_irq(&pdev->dev, IRQ_SSEOTI, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't request IRQ\n");
+ clk_put(hw->spi_clk);
+ goto clk_out;
+ }
+
+ ret = spi_register_master(master);
+ if (!ret) {
+ dev_info(&pdev->dev,
+ "SPI bus driver initialized. Master clock %u Hz\n",
+ hw->max_speed_hz);
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "Failed to register master\n");
+ devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw);
+
+clk_out:
+ devm_clk_put(&pdev->dev, hw->spi_clk);
+
+err_out:
+ while (--i >= 0)
+ if (gpio_is_valid(hw->chipselect[i]))
+ gpio_free(hw->chipselect[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ kfree(master);
+
+ return ret;
+}
+
+static int spi_clps711x_remove(struct platform_device *pdev)
+{
+ int i;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+
+ devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw);
+
+ for (i = 0; i < master->num_chipselect; i++)
+ if (gpio_is_valid(hw->chipselect[i]))
+ gpio_free(hw->chipselect[i]);
+
+ devm_clk_put(&pdev->dev, hw->spi_clk);
+ platform_set_drvdata(pdev, NULL);
+ spi_unregister_master(master);
+ kfree(master);
+
+ return 0;
+}
+
+static struct platform_driver clps711x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = spi_clps711x_probe,
+ .remove = spi_clps711x_remove,
+};
+module_platform_driver(clps711x_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("CLPS711X SPI bus driver");
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 764bfee7592..58466b810da 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -401,7 +401,7 @@ static int mcfqspi_setup(struct spi_device *spi)
return 0;
}
-static int __devinit mcfqspi_probe(struct platform_device *pdev)
+static int mcfqspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct mcfqspi *mcfqspi;
@@ -515,7 +515,7 @@ fail0:
return status;
}
-static int __devexit mcfqspi_remove(struct platform_device *pdev)
+static int mcfqspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
@@ -594,7 +594,7 @@ static struct platform_driver mcfqspi_driver = {
.driver.owner = THIS_MODULE,
.driver.pm = &mcfqspi_pm,
.probe = mcfqspi_probe,
- .remove = __devexit_p(mcfqspi_remove),
+ .remove = mcfqspi_remove,
};
module_platform_driver(mcfqspi_driver);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 147dfa87a64..13661e129d9 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -769,7 +769,7 @@ rx_dma_failed:
* It will invoke spi_bitbang_start to create work queue so that client driver
* can register transfer method to work queue.
*/
-static int __devinit davinci_spi_probe(struct platform_device *pdev)
+static int davinci_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct davinci_spi *dspi;
@@ -952,7 +952,7 @@ err:
* It will also call spi_bitbang_stop to destroy the work queue which was
* created by spi_bitbang_start.
*/
-static int __devexit davinci_spi_remove(struct platform_device *pdev)
+static int davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
struct spi_master *master;
@@ -980,7 +980,7 @@ static struct platform_driver davinci_spi_driver = {
.owner = THIS_MODULE,
},
.probe = davinci_spi_probe,
- .remove = __devexit_p(davinci_spi_remove),
+ .remove = davinci_spi_remove,
};
module_platform_driver(davinci_spi_driver);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index db2f1ba06ea..4a6d5c9057a 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -26,7 +26,7 @@ struct dw_spi_mmio {
struct clk *clk;
};
-static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
+static int dw_spi_mmio_probe(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
@@ -106,7 +106,7 @@ err_end:
return ret;
}
-static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
+static int dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
struct resource *mem;
@@ -129,7 +129,7 @@ static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
static struct platform_driver dw_spi_mmio_driver = {
.probe = dw_spi_mmio_probe,
- .remove = __devexit_p(dw_spi_mmio_remove),
+ .remove = dw_spi_mmio_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index ff81abbb306..6055c8d9fdd 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -32,7 +32,7 @@ struct dw_spi_pci {
struct dw_spi dws;
};
-static int __devinit spi_pci_probe(struct pci_dev *pdev,
+static int spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct dw_spi_pci *dwpci;
@@ -105,7 +105,7 @@ err_disable:
return ret;
}
-static void __devexit spi_pci_remove(struct pci_dev *pdev)
+static void spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
@@ -159,7 +159,7 @@ static struct pci_driver dw_spi_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = spi_pci_probe,
- .remove = __devexit_p(spi_pci_remove),
+ .remove = spi_pci_remove,
.suspend = spi_suspend,
.resume = spi_resume,
};
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d1a495f64e2..c1abc06899e 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -696,7 +696,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
kfree(chip);
}
-static int __devinit init_queue(struct dw_spi *dws)
+static int init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
spin_lock_init(&dws->lock);
@@ -795,7 +795,7 @@ static void spi_hw_init(struct dw_spi *dws)
}
}
-int __devinit dw_spi_add_host(struct dw_spi *dws)
+int dw_spi_add_host(struct dw_spi *dws)
{
struct spi_master *master;
int ret;
@@ -877,7 +877,7 @@ exit:
}
EXPORT_SYMBOL_GPL(dw_spi_add_host);
-void __devexit dw_spi_remove_host(struct dw_spi *dws)
+void dw_spi_remove_host(struct dw_spi *dws)
{
int status = 0;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 3a219599612..acb1e1935c5 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -1023,7 +1023,7 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
free_page((unsigned long)espi->zeropage);
}
-static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
+static int ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct ep93xx_spi_info *info;
@@ -1138,7 +1138,7 @@ fail_release_master:
return error;
}
-static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
+static int ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
@@ -1180,7 +1180,7 @@ static struct platform_driver ep93xx_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ep93xx_spi_probe,
- .remove = __devexit_p(ep93xx_spi_remove),
+ .remove = ep93xx_spi_remove,
};
module_platform_driver(ep93xx_spi_driver);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 8f6aa735a24..6a6f62ec284 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -403,7 +403,7 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
return 0;
}
-static int __devinit falcon_sflash_probe(struct platform_device *pdev)
+static int falcon_sflash_probe(struct platform_device *pdev)
{
struct falcon_sflash *priv;
struct spi_master *master;
@@ -438,7 +438,7 @@ static int __devinit falcon_sflash_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit falcon_sflash_remove(struct platform_device *pdev)
+static int falcon_sflash_remove(struct platform_device *pdev)
{
struct falcon_sflash *priv = platform_get_drvdata(pdev);
@@ -455,7 +455,7 @@ MODULE_DEVICE_TABLE(of, falcon_sflash_match);
static struct platform_driver falcon_sflash_driver = {
.probe = falcon_sflash_probe,
- .remove = __devexit_p(falcon_sflash_remove),
+ .remove = falcon_sflash_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 27bdc47b525..24610ca8955 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -587,7 +587,7 @@ static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
iounmap(mspi->reg_base);
}
-static struct spi_master * __devinit fsl_espi_probe(struct device *dev,
+static struct spi_master * fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -686,7 +686,7 @@ static int of_fsl_espi_get_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_espi_probe(struct platform_device *ofdev)
+static int of_fsl_espi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -725,7 +725,7 @@ err:
return ret;
}
-static int __devexit of_fsl_espi_remove(struct platform_device *dev)
+static int of_fsl_espi_remove(struct platform_device *dev)
{
return mpc8xxx_spi_remove(&dev->dev);
}
@@ -743,7 +743,7 @@ static struct platform_driver fsl_espi_driver = {
.of_match_table = of_fsl_espi_match,
},
.probe = of_fsl_espi_probe,
- .remove = __devexit_p(of_fsl_espi_remove),
+ .remove = of_fsl_espi_remove,
};
module_platform_driver(fsl_espi_driver);
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 1503574b215..8ade675a04f 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -169,7 +169,7 @@ err:
return ret;
}
-int __devexit mpc8xxx_spi_remove(struct device *dev)
+int mpc8xxx_spi_remove(struct device *dev)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct spi_master *master;
@@ -189,7 +189,7 @@ int __devexit mpc8xxx_spi_remove(struct device *dev)
return 0;
}
-int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev)
+int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 6a62934ca74..1a7f6359d99 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -843,7 +843,7 @@ static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
fsl_spi_cpm_free(mspi);
}
-static struct spi_master * __devinit fsl_spi_probe(struct device *dev,
+static struct spi_master * fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -1041,7 +1041,7 @@ static int of_fsl_spi_free_chipselects(struct device *dev)
return 0;
}
-static int __devinit of_fsl_spi_probe(struct platform_device *ofdev)
+static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -1081,7 +1081,7 @@ err:
return ret;
}
-static int __devexit of_fsl_spi_remove(struct platform_device *ofdev)
+static int of_fsl_spi_remove(struct platform_device *ofdev)
{
int ret;
@@ -1105,7 +1105,7 @@ static struct platform_driver of_fsl_spi_driver = {
.of_match_table = of_fsl_spi_match,
},
.probe = of_fsl_spi_probe,
- .remove = __devexit_p(of_fsl_spi_remove),
+ .remove = of_fsl_spi_remove,
};
#ifdef CONFIG_MPC832x_RDB
@@ -1116,7 +1116,7 @@ static struct platform_driver of_fsl_spi_driver = {
* tree can work with OpenFirmware driver. But for now we support old trees
* as well.
*/
-static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
+static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
@@ -1139,7 +1139,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
+static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
{
return mpc8xxx_spi_remove(&pdev->dev);
}
@@ -1147,7 +1147,7 @@ static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:mpc8xxx_spi");
static struct platform_driver mpc8xxx_spi_driver = {
.probe = plat_mpc8xxx_spi_probe,
- .remove = __devexit_p(plat_mpc8xxx_spi_remove),
+ .remove = plat_mpc8xxx_spi_remove,
.driver = {
.name = "mpc8xxx_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index a2b50c516b3..c7cf0b7a069 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -287,7 +287,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -301,9 +301,8 @@ static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __devinit
-spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
- u16 *res_flags)
+static int spi_gpio_request(struct spi_gpio_platform_data *pdata,
+ const char *label, u16 *res_flags)
{
int value;
@@ -392,7 +391,7 @@ static inline int spi_gpio_probe_dt(struct platform_device *pdev)
}
#endif
-static int __devinit spi_gpio_probe(struct platform_device *pdev)
+static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
struct spi_master *master;
@@ -485,7 +484,7 @@ gpio_free:
return status;
}
-static int __devexit spi_gpio_remove(struct platform_device *pdev)
+static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
@@ -518,7 +517,7 @@ static struct platform_driver spi_gpio_driver = {
.of_match_table = of_match_ptr(spi_gpio_dt_ids),
},
.probe = spi_gpio_probe,
- .remove = __devexit_p(spi_gpio_remove),
+ .remove = spi_gpio_remove,
};
module_platform_driver(spi_gpio_driver);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c9a0d8467de..904913290aa 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -750,7 +750,7 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
-static int __devinit spi_imx_probe(struct platform_device *pdev)
+static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
@@ -906,7 +906,7 @@ out_gpio_free:
return ret;
}
-static int __devexit spi_imx_remove(struct platform_device *pdev)
+static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -942,7 +942,7 @@ static struct platform_driver spi_imx_driver = {
},
.id_table = spi_imx_devtype,
.probe = spi_imx_probe,
- .remove = __devexit_p(spi_imx_remove),
+ .remove = spi_imx_remove,
};
module_platform_driver(spi_imx_driver);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 0a1e39e94d0..cb3a3106bd4 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -406,7 +406,7 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
}
/* bus_num is used only for the case dev->platform_data == NULL */
-static int __devinit mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
+static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq,
s16 bus_num)
{
@@ -492,7 +492,7 @@ free_master:
return ret;
}
-static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
+static int mpc512x_psc_spi_do_remove(struct device *dev)
{
struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
@@ -508,7 +508,7 @@ static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
return 0;
}
-static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op)
+static int mpc512x_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -539,7 +539,7 @@ static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op)
irq_of_parse_and_map(op->dev.of_node, 0), id);
}
-static int __devexit mpc512x_psc_spi_of_remove(struct platform_device *op)
+static int mpc512x_psc_spi_of_remove(struct platform_device *op)
{
return mpc512x_psc_spi_do_remove(&op->dev);
}
@@ -553,7 +553,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
static struct platform_driver mpc512x_psc_spi_of_driver = {
.probe = mpc512x_psc_spi_of_probe,
- .remove = __devexit_p(mpc512x_psc_spi_of_remove),
+ .remove = mpc512x_psc_spi_of_remove,
.driver = {
.name = "mpc512x-psc-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index bd47d262d53..291120b37db 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -363,7 +363,7 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
}
/* bus_num is used only for the case dev->platform_data == NULL */
-static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
+static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq, s16 bus_num)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -450,7 +450,7 @@ free_master:
return ret;
}
-static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
+static int mpc52xx_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
@@ -479,7 +479,7 @@ static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
irq_of_parse_and_map(op->dev.of_node, 0), id);
}
-static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op)
+static int mpc52xx_psc_spi_of_remove(struct platform_device *op)
{
struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
@@ -505,7 +505,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
static struct platform_driver mpc52xx_psc_spi_of_driver = {
.probe = mpc52xx_psc_spi_of_probe,
- .remove = __devexit_p(mpc52xx_psc_spi_of_remove),
+ .remove = mpc52xx_psc_spi_of_remove,
.driver = {
.name = "mpc52xx-psc-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 04541065021..29f77056eed 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -390,7 +390,7 @@ static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
/*
* OF Platform Bus Binding
*/
-static int __devinit mpc52xx_spi_probe(struct platform_device *op)
+static int mpc52xx_spi_probe(struct platform_device *op)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
@@ -527,7 +527,7 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
return rc;
}
-static int __devexit mpc52xx_spi_remove(struct platform_device *op)
+static int mpc52xx_spi_remove(struct platform_device *op)
{
struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
struct mpc52xx_spi *ms = spi_master_get_devdata(master);
@@ -547,7 +547,7 @@ static int __devexit mpc52xx_spi_remove(struct platform_device *op)
return 0;
}
-static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
+static const struct of_device_id mpc52xx_spi_match[] = {
{ .compatible = "fsl,mpc5200-spi", },
{}
};
@@ -560,6 +560,6 @@ static struct platform_driver mpc52xx_spi_of_driver = {
.of_match_table = mpc52xx_spi_match,
},
.probe = mpc52xx_spi_probe,
- .remove = __devexit_p(mpc52xx_spi_remove),
+ .remove = mpc52xx_spi_remove,
};
module_platform_driver(mpc52xx_spi_of_driver);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 86dd04d6bc8..a3ede249d05 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -509,7 +509,7 @@ static const struct of_device_id mxs_spi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
-static int __devinit mxs_spi_probe(struct platform_device *pdev)
+static int mxs_spi_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_spi_dt_ids, &pdev->dev);
@@ -636,7 +636,7 @@ out_master_free:
return ret;
}
-static int __devexit mxs_spi_remove(struct platform_device *pdev)
+static int mxs_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct mxs_spi *spi;
@@ -659,7 +659,7 @@ static int __devexit mxs_spi_remove(struct platform_device *pdev)
static struct platform_driver mxs_spi_driver = {
.probe = mxs_spi_probe,
- .remove = __devexit_p(mxs_spi_remove),
+ .remove = mxs_spi_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index a6eca6ffdab..b3f9ec83ef7 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -346,7 +346,7 @@ static void nuc900_init_spi(struct nuc900_spi *hw)
nuc900_enable_int(hw);
}
-static int __devinit nuc900_spi_probe(struct platform_device *pdev)
+static int nuc900_spi_probe(struct platform_device *pdev)
{
struct nuc900_spi *hw;
struct spi_master *master;
@@ -453,7 +453,7 @@ err_nomem:
return err;
}
-static int __devexit nuc900_spi_remove(struct platform_device *dev)
+static int nuc900_spi_remove(struct platform_device *dev)
{
struct nuc900_spi *hw = platform_get_drvdata(dev);
@@ -477,7 +477,7 @@ static int __devexit nuc900_spi_remove(struct platform_device *dev)
static struct platform_driver nuc900_spi_driver = {
.probe = nuc900_spi_probe,
- .remove = __devexit_p(nuc900_spi_remove),
+ .remove = nuc900_spi_remove,
.driver = {
.name = "nuc900-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 9d9071b730b..432e66ec308 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -243,7 +243,7 @@ static irqreturn_t tiny_spi_irq(int irq, void *dev)
#ifdef CONFIG_OF
#include <linux/of_gpio.h>
-static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+static int tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
@@ -277,13 +277,13 @@ static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
return 0;
}
#else /* !CONFIG_OF */
-static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
+static int tiny_spi_of_probe(struct platform_device *pdev)
{
return 0;
}
#endif /* CONFIG_OF */
-static int __devinit tiny_spi_probe(struct platform_device *pdev)
+static int tiny_spi_probe(struct platform_device *pdev)
{
struct tiny_spi_platform_data *platp = pdev->dev.platform_data;
struct tiny_spi *hw;
@@ -373,7 +373,7 @@ exit:
return err;
}
-static int __devexit tiny_spi_remove(struct platform_device *pdev)
+static int tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
@@ -399,7 +399,7 @@ MODULE_DEVICE_TABLE(of, tiny_spi_match);
static struct platform_driver tiny_spi_driver = {
.probe = tiny_spi_probe,
- .remove = __devexit_p(tiny_spi_remove),
+ .remove = tiny_spi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index ea8fb2efb0f..24daf964a40 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -266,7 +266,7 @@ static int octeon_spi_nop_transfer_hardware(struct spi_master *master)
return 0;
}
-static int __devinit octeon_spi_probe(struct platform_device *pdev)
+static int octeon_spi_probe(struct platform_device *pdev)
{
struct resource *res_mem;
@@ -326,7 +326,7 @@ fail:
return err;
}
-static int __devexit octeon_spi_remove(struct platform_device *pdev)
+static int octeon_spi_remove(struct platform_device *pdev)
{
struct octeon_spi *p = platform_get_drvdata(pdev);
u64 register_base = p->register_base;
@@ -352,7 +352,7 @@ static struct platform_driver octeon_spi_driver = {
.of_match_table = octeon_spi_match,
},
.probe = octeon_spi_probe,
- .remove = __devexit_p(octeon_spi_remove),
+ .remove = octeon_spi_remove,
};
module_platform_driver(octeon_spi_driver);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index dfb4b7f448c..3aef7fa7d5b 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -486,7 +486,7 @@ static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
return 0;
}
-static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
+static int omap1_spi100k_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct omap1_spi100k *spi100k;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3542fdc664b..b610f522ca4 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -39,7 +39,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/err.h>
#include <linux/spi/spi.h>
@@ -130,6 +129,7 @@ struct omap2_mcspi {
struct omap2_mcspi_dma *dma_channels;
struct device *dev;
struct omap2_mcspi_regs ctx;
+ unsigned int pin_dir:1;
};
struct omap2_mcspi_cs {
@@ -323,19 +323,11 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
unsigned int count;
- u8 * rx;
- const u8 * tx;
- void __iomem *chstat_reg;
- struct omap2_mcspi_cs *cs = spi->controller_state;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
- chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
-
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
@@ -359,19 +351,6 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
- wait_for_completion(&mcspi_dma->dma_tx_completion);
- dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
- DMA_TO_DEVICE);
-
- /* for TX_ONLY mode, be sure all words have shifted out */
- if (rx == NULL) {
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXS) < 0)
- dev_err(&spi->dev, "TXS timed out\n");
- else if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_EOT) < 0)
- dev_err(&spi->dev, "EOT timed out\n");
- }
}
static unsigned
@@ -492,6 +471,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct dma_slave_config cfg;
enum dma_slave_buswidth width;
unsigned es;
+ void __iomem *chstat_reg;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -526,8 +506,24 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
omap2_mcspi_tx_dma(spi, xfer, cfg);
if (rx != NULL)
- return omap2_mcspi_rx_dma(spi, xfer, cfg, es);
-
+ count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
+
+ if (tx != NULL) {
+ chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ wait_for_completion(&mcspi_dma->dma_tx_completion);
+ dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (rx == NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0)
+ dev_err(&spi->dev, "TXS timed out\n");
+ else if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0)
+ dev_err(&spi->dev, "EOT timed out\n");
+ }
+ }
return count;
}
@@ -765,8 +761,15 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
/* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
* REVISIT: this controller could support SPI_3WIRE mode.
*/
- l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
- l |= OMAP2_MCSPI_CHCONF_DPE0;
+ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+ l &= ~OMAP2_MCSPI_CHCONF_IS;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+ } else {
+ l |= OMAP2_MCSPI_CHCONF_IS;
+ l |= OMAP2_MCSPI_CHCONF_DPE1;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+ }
/* wordlength */
l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
@@ -1085,7 +1088,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
return 0;
}
-static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
+static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1138,7 +1141,7 @@ static const struct of_device_id omap_mcspi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
-static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
+static int omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
const struct omap2_mcspi_platform_config *pdata;
@@ -1167,6 +1170,11 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
+ dev_set_drvdata(&pdev->dev, master);
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi->master = master;
+
match = of_match_device(omap_mcspi_of_match, &pdev->dev);
if (match) {
u32 num_cs = 1; /* default number of chipselect */
@@ -1175,19 +1183,17 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
master->num_chipselect = num_cs;
master->bus_num = bus_num++;
+ if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
+ mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
} else {
pdata = pdev->dev.platform_data;
master->num_chipselect = pdata->num_cs;
if (pdev->id != -1)
master->bus_num = pdev->id;
+ mcspi->pin_dir = pdata->pin_dir;
}
regs_offset = pdata->regs_offset;
- dev_set_drvdata(&pdev->dev, master);
-
- mcspi = spi_master_get_devdata(master);
- mcspi->master = master;
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
@@ -1272,7 +1278,7 @@ free_master:
return status;
}
-static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
+static int omap2_mcspi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct omap2_mcspi *mcspi;
@@ -1341,7 +1347,7 @@ static struct platform_driver omap2_mcspi_driver = {
.of_match_table = omap_mcspi_of_match,
},
.probe = omap2_mcspi_probe,
- .remove = __devexit_p(omap2_mcspi_remove),
+ .remove = omap2_mcspi_remove,
};
module_platform_driver(omap2_mcspi_driver);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index b17c09cf0a0..b7e718254b1 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -32,8 +32,12 @@
#define ORION_SPI_DATA_IN_REG 0x0c
#define ORION_SPI_INT_CAUSE_REG 0x10
+#define ORION_SPI_MODE_CPOL (1 << 11)
+#define ORION_SPI_MODE_CPHA (1 << 12)
#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
+#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
+ ORION_SPI_MODE_CPHA)
struct orion_spi {
struct spi_master *master;
@@ -123,6 +127,23 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
return 0;
}
+static void
+orion_spi_mode_set(struct spi_device *spi)
+{
+ u32 reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+ reg &= ~ORION_SPI_MODE_MASK;
+ if (spi->mode & SPI_CPOL)
+ reg |= ORION_SPI_MODE_CPOL;
+ if (spi->mode & SPI_CPHA)
+ reg |= ORION_SPI_MODE_CPHA;
+ writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+}
+
/*
* called only when no transfer is active on the bus
*/
@@ -142,6 +163,8 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if ((t != NULL) && t->bits_per_word)
bits_per_word = t->bits_per_word;
+ orion_spi_mode_set(spi);
+
rc = orion_spi_baudrate_set(spi, speed);
if (rc)
return rc;
@@ -399,7 +422,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
}
/* we support only mode 0, and no options */
- master->mode_bits = 0;
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
master->setup = orion_spi_setup;
master->transfer_one_message = orion_spi_transfer_one_message;
@@ -478,7 +501,7 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:" DRIVER_NAME);
-static const struct of_device_id orion_spi_of_match_table[] __devinitdata = {
+static const struct of_device_id orion_spi_of_match_table[] = {
{ .compatible = "marvell,orion-spi", },
{}
};
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index a1db91a99b8..b0fe393c882 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -371,6 +371,7 @@ struct pl022 {
/* Two optional pin states - default & sleep */
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_idle;
struct pinctrl_state *pins_sleep;
struct spi_master *master;
struct pl022_ssp_controller *master_info;
@@ -1088,7 +1089,7 @@ err_alloc_rx_sg:
return -ENOMEM;
}
-static int __devinit pl022_dma_probe(struct pl022 *pl022)
+static int pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
@@ -2057,8 +2058,7 @@ pl022_platform_data_dt_get(struct device *dev)
return pd;
}
-static int __devinit
-pl022_probe(struct amba_device *adev, const struct amba_id *id)
+static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
@@ -2116,6 +2116,11 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
} else
dev_err(dev, "could not get default pinstate\n");
+ pl022->pins_idle = pinctrl_lookup_state(pl022->pinctrl,
+ PINCTRL_STATE_IDLE);
+ if (IS_ERR(pl022->pins_idle))
+ dev_dbg(dev, "could not get idle pinstate\n");
+
pl022->pins_sleep = pinctrl_lookup_state(pl022->pinctrl,
PINCTRL_STATE_SLEEP);
if (IS_ERR(pl022->pins_sleep))
@@ -2246,10 +2251,9 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_set_autosuspend_delay(dev,
platform_info->autosuspend_delay);
pm_runtime_use_autosuspend(dev);
- pm_runtime_put_autosuspend(dev);
- } else {
- pm_runtime_put(dev);
}
+ pm_runtime_put(dev);
+
return 0;
err_spi_register:
@@ -2270,7 +2274,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
return status;
}
-static int __devexit
+static int
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
@@ -2303,35 +2307,47 @@ pl022_remove(struct amba_device *adev)
* the runtime counterparts to handle external resources like
* clocks, pins and regulators when going to sleep.
*/
-static void pl022_suspend_resources(struct pl022 *pl022)
+static void pl022_suspend_resources(struct pl022 *pl022, bool runtime)
{
int ret;
+ struct pinctrl_state *pins_state;
clk_disable(pl022->clk);
+ pins_state = runtime ? pl022->pins_idle : pl022->pins_sleep;
/* Optionally let pins go into sleep states */
- if (!IS_ERR(pl022->pins_sleep)) {
- ret = pinctrl_select_state(pl022->pinctrl,
- pl022->pins_sleep);
+ if (!IS_ERR(pins_state)) {
+ ret = pinctrl_select_state(pl022->pinctrl, pins_state);
if (ret)
- dev_err(&pl022->adev->dev,
- "could not set pins to sleep state\n");
+ dev_err(&pl022->adev->dev, "could not set %s pins\n",
+ runtime ? "idle" : "sleep");
}
}
-static void pl022_resume_resources(struct pl022 *pl022)
+static void pl022_resume_resources(struct pl022 *pl022, bool runtime)
{
int ret;
/* Optionaly enable pins to be muxed in and configured */
+ /* First go to the default state */
if (!IS_ERR(pl022->pins_default)) {
- ret = pinctrl_select_state(pl022->pinctrl,
- pl022->pins_default);
+ ret = pinctrl_select_state(pl022->pinctrl, pl022->pins_default);
if (ret)
dev_err(&pl022->adev->dev,
"could not set default pins\n");
}
+ if (!runtime) {
+ /* Then let's idle the pins until the next transfer happens */
+ if (!IS_ERR(pl022->pins_idle)) {
+ ret = pinctrl_select_state(pl022->pinctrl,
+ pl022->pins_idle);
+ if (ret)
+ dev_err(&pl022->adev->dev,
+ "could not set idle pins\n");
+ }
+ }
+
clk_enable(pl022->clk);
}
#endif
@@ -2347,7 +2363,9 @@ static int pl022_suspend(struct device *dev)
dev_warn(dev, "cannot suspend master\n");
return ret;
}
- pl022_suspend_resources(pl022);
+
+ pm_runtime_get_sync(dev);
+ pl022_suspend_resources(pl022, false);
dev_dbg(dev, "suspended\n");
return 0;
@@ -2358,7 +2376,8 @@ static int pl022_resume(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
- pl022_resume_resources(pl022);
+ pl022_resume_resources(pl022, false);
+ pm_runtime_put(dev);
/* Start the queue running */
ret = spi_master_resume(pl022->master);
@@ -2376,7 +2395,7 @@ static int pl022_runtime_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_suspend_resources(pl022);
+ pl022_suspend_resources(pl022, true);
return 0;
}
@@ -2384,7 +2403,7 @@ static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_resume_resources(pl022);
+ pl022_resume_resources(pl022, true);
return 0;
}
#endif
@@ -2464,7 +2483,7 @@ static struct amba_driver pl022_driver = {
},
.id_table = pl022_ids,
.probe = pl022_probe,
- .remove = __devexit_p(pl022_remove),
+ .remove = pl022_remove,
};
static int __init pl022_init(void)
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 9f6ba34b172..cf95587eefd 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -51,7 +51,7 @@ void pxa_ssp_free(struct ssp_device *ssp)
}
EXPORT_SYMBOL_GPL(pxa_ssp_free);
-static int __devinit ce4100_spi_probe(struct pci_dev *dev,
+static int ce4100_spi_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret;
@@ -129,7 +129,7 @@ err_nomem:
return ret;
}
-static void __devexit ce4100_spi_remove(struct pci_dev *dev)
+static void ce4100_spi_remove(struct pci_dev *dev)
{
struct ce4100_info *spi_info;
struct ssp_device *ssp;
@@ -161,7 +161,7 @@ static struct pci_driver ce4100_spi_driver = {
.name = "ce4100_spi",
.id_table = ce4100_spi_devices,
.probe = ce4100_spi_probe,
- .remove = __devexit_p(ce4100_spi_remove),
+ .remove = ce4100_spi_remove,
};
module_pci_driver(ce4100_spi_driver);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dc25bee8d33..5c8c4f5883c 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1438,7 +1438,7 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
-static int __devinit init_queue(struct driver_data *drv_data)
+static int init_queue(struct driver_data *drv_data)
{
INIT_LIST_HEAD(&drv_data->queue);
spin_lock_init(&drv_data->lock);
@@ -1526,7 +1526,7 @@ static int destroy_queue(struct driver_data *drv_data)
return 0;
}
-static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
+static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pxa2xx_spi_master *platform_info;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 30faf6d4ab9..902f2fb902d 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -661,7 +661,7 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
-static int __devinit rspi_request_dma(struct rspi_data *rspi,
+static int rspi_request_dma(struct rspi_data *rspi,
struct platform_device *pdev)
{
struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
@@ -709,7 +709,7 @@ static int __devinit rspi_request_dma(struct rspi_data *rspi,
return 0;
}
-static void __devexit rspi_release_dma(struct rspi_data *rspi)
+static void rspi_release_dma(struct rspi_data *rspi)
{
if (rspi->chan_tx)
dma_release_channel(rspi->chan_tx);
@@ -717,7 +717,7 @@ static void __devexit rspi_release_dma(struct rspi_data *rspi)
dma_release_channel(rspi->chan_rx);
}
-static int __devexit rspi_remove(struct platform_device *pdev)
+static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
@@ -731,7 +731,7 @@ static int __devexit rspi_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit rspi_probe(struct platform_device *pdev)
+static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -827,7 +827,7 @@ error1:
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
- .remove = __devexit_p(rspi_remove),
+ .remove = rspi_remove,
.driver = {
.name = "rspi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index a2a080b7f42..02d64603fcc 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -506,7 +506,7 @@ static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
}
}
-static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
+static int s3c24xx_spi_probe(struct platform_device *pdev)
{
struct s3c2410_spi_info *pdata;
struct s3c24xx_spi *hw;
@@ -663,7 +663,7 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
return err;
}
-static int __devexit s3c24xx_spi_remove(struct platform_device *dev)
+static int s3c24xx_spi_remove(struct platform_device *dev)
{
struct s3c24xx_spi *hw = platform_get_drvdata(dev);
@@ -722,7 +722,7 @@ static const struct dev_pm_ops s3c24xx_spi_pmops = {
MODULE_ALIAS("platform:s3c2410-spi");
static struct platform_driver s3c24xx_spi_driver = {
.probe = s3c24xx_spi_probe,
- .remove = __devexit_p(s3c24xx_spi_remove),
+ .remove = s3c24xx_spi_remove,
.driver = {
.name = "s3c2410-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 6e7a805d324..ad93231a803 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -215,6 +215,10 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
val |= S3C64XX_SPI_CH_SW_RST;
val &= ~S3C64XX_SPI_CH_HS_EN;
writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -248,10 +252,6 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
-
- val = readl(regs + S3C64XX_SPI_CH_CFG);
- val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
- writel(val, regs + S3C64XX_SPI_CH_CFG);
}
static void s3c64xx_spi_dmacb(void *data)
@@ -516,7 +516,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
/* Disable Clock */
if (sdd->port_conf->clk_from_cmu) {
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->src_clk);
} else {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
@@ -564,7 +564,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
/* There is half-multiplier before the SPI */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
/* Enable Clock */
- clk_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->src_clk);
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -771,8 +771,6 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
if (list_is_last(&xfer->transfer_list,
&msg->transfers))
cs_toggle = 1;
- else
- disable_cs(sdd, spi);
}
msg->actual_length += xfer->len;
@@ -1056,7 +1054,7 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
flush_fifo(sdd);
}
-static int __devinit s3c64xx_spi_get_dmares(
+static int s3c64xx_spi_get_dmares(
struct s3c64xx_spi_driver_data *sdd, bool tx)
{
struct platform_device *pdev = sdd->pdev;
@@ -1112,7 +1110,7 @@ static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio);
goto free_gpio;
}
-
+ sdd->gpios[idx] = gpio;
ret = gpio_request(gpio, "spi-bus");
if (ret) {
dev_err(dev, "gpio [%d] request failed: %d\n",
@@ -1135,7 +1133,7 @@ static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
gpio_free(sdd->gpios[idx]);
}
-static struct __devinit s3c64xx_spi_info * s3c64xx_spi_parse_dt(
+static struct s3c64xx_spi_info * s3c64xx_spi_parse_dt(
struct device *dev)
{
struct s3c64xx_spi_info *sci;
@@ -1302,7 +1300,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err3;
}
- if (clk_enable(sdd->clk)) {
+ if (clk_prepare_enable(sdd->clk)) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
ret = -EBUSY;
goto err4;
@@ -1317,7 +1315,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err5;
}
- if (clk_enable(sdd->src_clk)) {
+ if (clk_prepare_enable(sdd->src_clk)) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
goto err6;
@@ -1361,11 +1359,11 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
err8:
free_irq(irq, sdd);
err7:
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->src_clk);
err6:
clk_put(sdd->src_clk);
err5:
- clk_disable(sdd->clk);
+ clk_disable_unprepare(sdd->clk);
err4:
clk_put(sdd->clk);
err3:
@@ -1393,10 +1391,10 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
free_irq(platform_get_irq(pdev, 0), sdd);
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->src_clk);
clk_put(sdd->src_clk);
- clk_disable(sdd->clk);
+ clk_disable_unprepare(sdd->clk);
clk_put(sdd->clk);
if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
@@ -1417,8 +1415,8 @@ static int s3c64xx_spi_suspend(struct device *dev)
spi_master_suspend(master);
/* Disable the clock */
- clk_disable(sdd->src_clk);
- clk_disable(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+ clk_disable_unprepare(sdd->clk);
if (!sdd->cntrlr_info->cfg_gpio && dev->of_node)
s3c64xx_spi_dt_gpio_free(sdd);
@@ -1440,8 +1438,8 @@ static int s3c64xx_spi_resume(struct device *dev)
sci->cfg_gpio();
/* Enable the clock */
- clk_enable(sdd->src_clk);
- clk_enable(sdd->clk);
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
s3c64xx_spi_hwinit(sdd, sdd->port_id);
@@ -1457,8 +1455,8 @@ static int s3c64xx_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- clk_disable(sdd->clk);
- clk_disable(sdd->src_clk);
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
return 0;
}
@@ -1468,8 +1466,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- clk_enable(sdd->src_clk);
- clk_enable(sdd->clk);
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
return 0;
}
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 796c077ef43..60cfae51c71 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -68,6 +68,16 @@ static u32 hspi_read(struct hspi_priv *hspi, int reg)
return ioread32(hspi->addr + reg);
}
+static void hspi_bit_set(struct hspi_priv *hspi, int reg, u32 mask, u32 set)
+{
+ u32 val = hspi_read(hspi, reg);
+
+ val &= ~mask;
+ val |= set & mask;
+
+ hspi_write(hspi, reg, val);
+}
+
/*
* transfer function
*/
@@ -105,6 +115,13 @@ static int hspi_unprepare_transfer(struct spi_master *master)
return 0;
}
+#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
+#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
+static void hspi_hw_cs_ctrl(struct hspi_priv *hspi, int hi)
+{
+ hspi_bit_set(hspi, SPSCR, (1 << 6), (hi) << 6);
+}
+
static void hspi_hw_setup(struct hspi_priv *hspi,
struct spi_message *msg,
struct spi_transfer *t)
@@ -155,7 +172,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
- hspi_write(hspi, SPSCR, 0x1); /* master mode */
+ hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
static int hspi_transfer_one_message(struct spi_master *master,
@@ -166,12 +183,21 @@ static int hspi_transfer_one_message(struct spi_master *master,
u32 tx;
u32 rx;
int ret, i;
+ unsigned int cs_change;
+ const int nsecs = 50;
dev_dbg(hspi->dev, "%s\n", __func__);
+ cs_change = 1;
ret = 0;
list_for_each_entry(t, &msg->transfers, transfer_list) {
- hspi_hw_setup(hspi, msg, t);
+
+ if (cs_change) {
+ hspi_hw_setup(hspi, msg, t);
+ hspi_hw_cs_enable(hspi);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
for (i = 0; i < t->len; i++) {
@@ -198,9 +224,22 @@ static int hspi_transfer_one_message(struct spi_master *master,
}
msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ ndelay(nsecs);
+ }
}
msg->status = ret;
+ if (!cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ }
spi_finalize_current_message(master);
return ret;
@@ -229,7 +268,7 @@ static void hspi_cleanup(struct spi_device *spi)
dev_dbg(dev, "%s cleanup\n", spi->modalias);
}
-static int __devinit hspi_probe(struct platform_device *pdev)
+static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -251,7 +290,7 @@ static int __devinit hspi_probe(struct platform_device *pdev)
}
clk = clk_get(NULL, "shyway_clk");
- if (!clk) {
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "shyway_clk is required\n");
ret = -EINVAL;
goto error0;
@@ -300,7 +339,7 @@ static int __devinit hspi_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit hspi_remove(struct platform_device *pdev)
+static int hspi_remove(struct platform_device *pdev)
{
struct hspi_priv *hspi = dev_get_drvdata(&pdev->dev);
@@ -314,7 +353,7 @@ static int __devexit hspi_remove(struct platform_device *pdev)
static struct platform_driver hspi_driver = {
.probe = hspi_probe,
- .remove = __devexit_p(hspi_remove),
+ .remove = hspi_remove,
.driver = {
.name = "sh-hspi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1f466bc66d9..96358d0eabb 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -597,7 +597,6 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
struct resource *r;
struct spi_master *master;
struct sh_msiof_spi_priv *p;
- char clk_name[16];
int i;
int ret;
@@ -614,10 +613,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p->info = pdev->dev.platform_data;
init_completion(&p->done);
- snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id);
- p->clk = clk_get(&pdev->dev, clk_name);
+ p->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 79442c31bcd..3c3600a994b 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -432,7 +432,7 @@ static irqreturn_t spi_sh_irq(int irq, void *_ss)
return IRQ_HANDLED;
}
-static int __devexit spi_sh_remove(struct platform_device *pdev)
+static int spi_sh_remove(struct platform_device *pdev)
{
struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
@@ -444,7 +444,7 @@ static int __devexit spi_sh_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit spi_sh_probe(struct platform_device *pdev)
+static int spi_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
@@ -539,7 +539,7 @@ static int __devinit spi_sh_probe(struct platform_device *pdev)
static struct platform_driver spi_sh_driver = {
.probe = spi_sh_probe,
- .remove = __devexit_p(spi_sh_remove),
+ .remove = spi_sh_remove,
.driver = {
.name = "sh_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index ecc3d9763d1..e0f43a512e8 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -479,7 +479,7 @@ static int spi_sirfsoc_setup(struct spi_device *spi)
return spi_sirfsoc_setup_transfer(spi, NULL);
}
-static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
+static int spi_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_spi *sspi;
struct spi_master *master;
@@ -604,7 +604,7 @@ err_cs:
return ret;
}
-static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
+static int spi_sirfsoc_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct sirfsoc_spi *sspi;
@@ -673,7 +673,7 @@ static struct platform_driver spi_sirfsoc_driver = {
.of_match_table = spi_sirfsoc_of_match,
},
.probe = spi_sirfsoc_probe,
- .remove = __devexit_p(spi_sirfsoc_remove),
+ .remove = spi_sirfsoc_remove,
};
module_platform_driver(spi_sirfsoc_driver);
diff --git a/drivers/spi/spi-stmp.c b/drivers/spi/spi-stmp.c
deleted file mode 100644
index 911e904b3c8..00000000000
--- a/drivers/spi/spi-stmp.c
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * Freescale STMP378X SPI master driver
- *
- * Author: dmitry pervushin <dimka@embeddedalley.com>
- *
- * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- */
-
-/*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-
-#include <mach/platform.h>
-#include <mach/stmp3xxx.h>
-#include <mach/dma.h>
-#include <mach/regs-ssp.h>
-#include <mach/regs-apbh.h>
-
-
-/* 0 means DMA mode(recommended, default), !0 - PIO mode */
-static int pio;
-static int clock;
-
-/* default timeout for busy waits is 2 seconds */
-#define STMP_SPI_TIMEOUT (2 * HZ)
-
-struct stmp_spi {
- int id;
-
- void * __iomem regs; /* vaddr of the control registers */
-
- int irq, err_irq;
- u32 dma;
- struct stmp3xxx_dma_descriptor d;
-
- u32 speed_khz;
- u32 saved_timings;
- u32 divider;
-
- struct clk *clk;
- struct device *master_dev;
-
- struct work_struct work;
- struct workqueue_struct *workqueue;
-
- /* lock protects queue access */
- spinlock_t lock;
- struct list_head queue;
-
- struct completion done;
-};
-
-#define busy_wait(cond) \
- ({ \
- unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \
- bool succeeded = false; \
- do { \
- if (cond) { \
- succeeded = true; \
- break; \
- } \
- cpu_relax(); \
- } while (time_before(jiffies, end_jiffies)); \
- succeeded; \
- })
-
-/**
- * stmp_spi_init_hw
- * Initialize the SSP port
- */
-static int stmp_spi_init_hw(struct stmp_spi *ss)
-{
- int err = 0;
- void *pins = ss->master_dev->platform_data;
-
- err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev));
- if (err)
- goto out;
-
- ss->clk = clk_get(NULL, "ssp");
- if (IS_ERR(ss->clk)) {
- err = PTR_ERR(ss->clk);
- goto out_free_pins;
- }
- clk_enable(ss->clk);
-
- stmp3xxx_reset_block(ss->regs, false);
- stmp3xxx_dma_reset_channel(ss->dma);
-
- return 0;
-
-out_free_pins:
- stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
-out:
- return err;
-}
-
-static void stmp_spi_release_hw(struct stmp_spi *ss)
-{
- void *pins = ss->master_dev->platform_data;
-
- if (ss->clk && !IS_ERR(ss->clk)) {
- clk_disable(ss->clk);
- clk_put(ss->clk);
- }
- stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
-}
-
-static int stmp_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- u8 bits_per_word;
- u32 hz;
- struct stmp_spi *ss = spi_master_get_devdata(spi->master);
- u16 rate;
-
- bits_per_word = spi->bits_per_word;
- if (t && t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
- /*
- * Calculate speed:
- * - by default, use maximum speed from ssp clk
- * - if device overrides it, use it
- * - if transfer specifies other speed, use transfer's one
- */
- hz = 1000 * ss->speed_khz / ss->divider;
- if (spi->max_speed_hz)
- hz = min(hz, spi->max_speed_hz);
- if (t && t->speed_hz)
- hz = min(hz, t->speed_hz);
-
- if (hz == 0) {
- dev_err(&spi->dev, "Cannot continue with zero clock\n");
- return -EINVAL;
- }
-
- if (bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
- dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n",
- hz, ss->speed_khz, ss->divider,
- ss->speed_khz * 1000 / ss->divider);
-
- if (ss->speed_khz * 1000 / ss->divider < hz) {
- dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
- __func__, hz);
- return -EINVAL;
- }
-
- rate = 1000 * ss->speed_khz/ss->divider/hz;
-
- writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) |
- BF(rate - 1, SSP_TIMING_CLOCK_RATE),
- HW_SSP_TIMING + ss->regs);
-
- writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) |
- BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) |
- ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
- ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) |
- (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE),
- ss->regs + HW_SSP_CTRL1);
-
- return 0;
-}
-
-static int stmp_spi_setup(struct spi_device *spi)
-{
- /* spi_setup() does basic checks,
- * stmp_spi_setup_transfer() does more later
- */
- if (spi->bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, spi->bits_per_word);
- return -EINVAL;
- }
- return 0;
-}
-
-static inline u32 stmp_spi_cs(unsigned cs)
-{
- return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) |
- ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0);
-}
-
-static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
- unsigned char *buf, dma_addr_t dma_buf, int len,
- int first, int last, bool write)
-{
- u32 c0 = 0;
- dma_addr_t spi_buf_dma = dma_buf;
- int status = 0;
- enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0);
- c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0);
- c0 |= (write ? 0 : BM_SSP_CTRL0_READ);
- c0 |= BM_SSP_CTRL0_DATA_XFER;
-
- c0 |= stmp_spi_cs(cs);
-
- c0 |= BF(len, SSP_CTRL0_XFER_COUNT);
-
- if (!dma_buf)
- spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir);
-
- ss->d.command->cmd =
- BF(len, APBH_CHn_CMD_XFER_COUNT) |
- BF(1, APBH_CHn_CMD_CMDWORDS) |
- BM_APBH_CHn_CMD_WAIT4ENDCMD |
- BM_APBH_CHn_CMD_IRQONCMPLT |
- BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ :
- BV_APBH_CHn_CMD_COMMAND__DMA_WRITE,
- APBH_CHn_CMD_COMMAND);
- ss->d.command->pio_words[0] = c0;
- ss->d.command->buf_ptr = spi_buf_dma;
-
- stmp3xxx_dma_reset_channel(ss->dma);
- stmp3xxx_dma_clear_interrupt(ss->dma);
- stmp3xxx_dma_enable_interrupt(ss->dma);
- init_completion(&ss->done);
- stmp3xxx_dma_go(ss->dma, &ss->d, 1);
- wait_for_completion(&ss->done);
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
- status = -ETIMEDOUT;
-
- if (!dma_buf)
- dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
-
- return status;
-}
-
-static inline void stmp_spi_enable(struct stmp_spi *ss)
-{
- stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
- stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
-}
-
-static inline void stmp_spi_disable(struct stmp_spi *ss)
-{
- stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
- stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
-}
-
-static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs,
- unsigned char *buf, int len,
- bool first, bool last, bool write)
-{
- if (first)
- stmp_spi_enable(ss);
-
- stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0);
-
- while (len--) {
- if (last && len <= 0)
- stmp_spi_disable(ss);
-
- stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT,
- ss->regs + HW_SSP_CTRL0);
- stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0);
-
- if (write)
- stmp3xxx_clearl(BM_SSP_CTRL0_READ,
- ss->regs + HW_SSP_CTRL0);
- else
- stmp3xxx_setl(BM_SSP_CTRL0_READ,
- ss->regs + HW_SSP_CTRL0);
-
- /* Run! */
- stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0);
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
- BM_SSP_CTRL0_RUN))
- break;
-
- if (write)
- writel(*buf, ss->regs + HW_SSP_DATA);
-
- /* Set TRANSFER */
- stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0);
-
- if (!write) {
- if (busy_wait((readl(ss->regs + HW_SSP_STATUS) &
- BM_SSP_STATUS_FIFO_EMPTY)))
- break;
- *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF;
- }
-
- if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
- BM_SSP_CTRL0_RUN))
- break;
-
- /* advance to the next byte */
- buf++;
- }
-
- return len < 0 ? 0 : -ETIMEDOUT;
-}
-
-static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m)
-{
- bool first, last;
- struct spi_transfer *t, *tmp_t;
- int status = 0;
- int cs;
-
- cs = m->spi->chip_select;
-
- list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
-
- first = (&t->transfer_list == m->transfers.next);
- last = (&t->transfer_list == m->transfers.prev);
-
- if (first || t->speed_hz || t->bits_per_word)
- stmp_spi_setup_transfer(m->spi, t);
-
- /* reject "not last" transfers which request to change cs */
- if (t->cs_change && !last) {
- dev_err(&m->spi->dev,
- "Message with t->cs_change has been skipped\n");
- continue;
- }
-
- if (t->tx_buf) {
- status = pio ?
- stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf,
- t->len, first, last, true) :
- stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf,
- t->tx_dma, t->len, first, last, true);
-#ifdef DEBUG
- if (t->len < 0x10)
- print_hex_dump_bytes("Tx ",
- DUMP_PREFIX_OFFSET,
- t->tx_buf, t->len);
- else
- pr_debug("Tx: %d bytes\n", t->len);
-#endif
- }
- if (t->rx_buf) {
- status = pio ?
- stmp_spi_txrx_pio(ss, cs, t->rx_buf,
- t->len, first, last, false) :
- stmp_spi_txrx_dma(ss, cs, t->rx_buf,
- t->rx_dma, t->len, first, last, false);
-#ifdef DEBUG
- if (t->len < 0x10)
- print_hex_dump_bytes("Rx ",
- DUMP_PREFIX_OFFSET,
- t->rx_buf, t->len);
- else
- pr_debug("Rx: %d bytes\n", t->len);
-#endif
- }
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (status)
- break;
-
- }
- return status;
-}
-
-/**
- * stmp_spi_handle - handle messages from the queue
- */
-static void stmp_spi_handle(struct work_struct *w)
-{
- struct stmp_spi *ss = container_of(w, struct stmp_spi, work);
- unsigned long flags;
- struct spi_message *m;
-
- spin_lock_irqsave(&ss->lock, flags);
- while (!list_empty(&ss->queue)) {
- m = list_entry(ss->queue.next, struct spi_message, queue);
- list_del_init(&m->queue);
- spin_unlock_irqrestore(&ss->lock, flags);
-
- m->status = stmp_spi_handle_message(ss, m);
- m->complete(m->context);
-
- spin_lock_irqsave(&ss->lock, flags);
- }
- spin_unlock_irqrestore(&ss->lock, flags);
-
- return;
-}
-
-/**
- * stmp_spi_transfer - perform message transfer.
- * Called indirectly from spi_async, queues all the messages to
- * spi_handle_message.
- * @spi: spi device
- * @m: message to be queued
- */
-static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
- struct stmp_spi *ss = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- m->status = -EINPROGRESS;
- spin_lock_irqsave(&ss->lock, flags);
- list_add_tail(&m->queue, &ss->queue);
- queue_work(ss->workqueue, &ss->work);
- spin_unlock_irqrestore(&ss->lock, flags);
- return 0;
-}
-
-static irqreturn_t stmp_spi_irq(int irq, void *dev_id)
-{
- struct stmp_spi *ss = dev_id;
-
- stmp3xxx_dma_clear_interrupt(ss->dma);
- complete(&ss->done);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id)
-{
- struct stmp_spi *ss = dev_id;
- u32 c1, st;
-
- c1 = readl(ss->regs + HW_SSP_CTRL1);
- st = readl(ss->regs + HW_SSP_STATUS);
- dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n",
- __func__, st, c1);
- stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit stmp_spi_probe(struct platform_device *dev)
-{
- int err = 0;
- struct spi_master *master;
- struct stmp_spi *ss;
- struct resource *r;
-
- master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi));
- if (master == NULL) {
- err = -ENOMEM;
- goto out0;
- }
- master->flags = SPI_MASTER_HALF_DUPLEX;
-
- ss = spi_master_get_devdata(master);
- platform_set_drvdata(dev, master);
-
- /* Get resources(memory, IRQ) associated with the device */
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- err = -ENODEV;
- goto out_put_master;
- }
- ss->regs = ioremap(r->start, resource_size(r));
- if (!ss->regs) {
- err = -EINVAL;
- goto out_put_master;
- }
-
- ss->master_dev = &dev->dev;
- ss->id = dev->id;
-
- INIT_WORK(&ss->work, stmp_spi_handle);
- INIT_LIST_HEAD(&ss->queue);
- spin_lock_init(&ss->lock);
-
- ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
- if (!ss->workqueue) {
- err = -ENXIO;
- goto out_put_master;
- }
- master->transfer = stmp_spi_transfer;
- master->setup = stmp_spi_setup;
-
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA;
-
- ss->irq = platform_get_irq(dev, 0);
- if (ss->irq < 0) {
- err = ss->irq;
- goto out_put_master;
- }
- ss->err_irq = platform_get_irq(dev, 1);
- if (ss->err_irq < 0) {
- err = ss->err_irq;
- goto out_put_master;
- }
-
- r = platform_get_resource(dev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- err = -ENODEV;
- goto out_put_master;
- }
-
- ss->dma = r->start;
- err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev));
- if (err)
- goto out_put_master;
-
- err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d);
- if (err)
- goto out_free_dma;
-
- master->bus_num = dev->id;
- master->num_chipselect = 1;
-
- /* SPI controller initializations */
- err = stmp_spi_init_hw(ss);
- if (err) {
- dev_dbg(&dev->dev, "cannot initialize hardware\n");
- goto out_free_dma_desc;
- }
-
- if (clock) {
- dev_info(&dev->dev, "clock rate forced to %d\n", clock);
- clk_set_rate(ss->clk, clock);
- }
- ss->speed_khz = clk_get_rate(ss->clk);
- ss->divider = 2;
- dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n",
- ss->speed_khz, clk_get_rate(ss->clk), ss->divider);
-
- /* Register for SPI interrupt */
- err = request_irq(ss->irq, stmp_spi_irq, 0,
- dev_name(&dev->dev), ss);
- if (err) {
- dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
- goto out_release_hw;
- }
-
- /* ..and shared interrupt for all SSP controllers */
- err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED,
- dev_name(&dev->dev), ss);
- if (err) {
- dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err);
- goto out_free_irq;
- }
-
- err = spi_register_master(master);
- if (err) {
- dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
- goto out_free_irq_2;
- }
- dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n",
- (u32)ss->regs, ss->irq, master->bus_num,
- pio ? "PIO" : "DMA");
- return 0;
-
-out_free_irq_2:
- free_irq(ss->err_irq, ss);
-out_free_irq:
- free_irq(ss->irq, ss);
-out_free_dma_desc:
- stmp3xxx_dma_free_command(ss->dma, &ss->d);
-out_free_dma:
- stmp3xxx_dma_release(ss->dma);
-out_release_hw:
- stmp_spi_release_hw(ss);
-out_put_master:
- if (ss->workqueue)
- destroy_workqueue(ss->workqueue);
- if (ss->regs)
- iounmap(ss->regs);
- platform_set_drvdata(dev, NULL);
- spi_master_put(master);
-out0:
- return err;
-}
-
-static int __devexit stmp_spi_remove(struct platform_device *dev)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = spi_master_get(platform_get_drvdata(dev));
- ss = spi_master_get_devdata(master);
-
- spi_unregister_master(master);
-
- free_irq(ss->err_irq, ss);
- free_irq(ss->irq, ss);
- stmp3xxx_dma_free_command(ss->dma, &ss->d);
- stmp3xxx_dma_release(ss->dma);
- stmp_spi_release_hw(ss);
- destroy_workqueue(ss->workqueue);
- iounmap(ss->regs);
- spi_master_put(master);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = platform_get_drvdata(pdev);
- ss = spi_master_get_devdata(master);
-
- ss->saved_timings = readl(HW_SSP_TIMING + ss->regs);
- clk_disable(ss->clk);
-
- return 0;
-}
-
-static int stmp_spi_resume(struct platform_device *pdev)
-{
- struct stmp_spi *ss;
- struct spi_master *master;
-
- master = platform_get_drvdata(pdev);
- ss = spi_master_get_devdata(master);
-
- clk_enable(ss->clk);
- stmp3xxx_reset_block(ss->regs, false);
- writel(ss->saved_timings, ss->regs + HW_SSP_TIMING);
-
- return 0;
-}
-
-#else
-#define stmp_spi_suspend NULL
-#define stmp_spi_resume NULL
-#endif
-
-static struct platform_driver stmp_spi_driver = {
- .probe = stmp_spi_probe,
- .remove = __devexit_p(stmp_spi_remove),
- .driver = {
- .name = "stmp3xxx_ssp",
- .owner = THIS_MODULE,
- },
- .suspend = stmp_spi_suspend,
- .resume = stmp_spi_resume,
-};
-module_platform_driver(stmp_spi_driver);
-
-module_param(pio, int, S_IRUGO);
-module_param(clock, int, S_IRUGO);
-MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
-MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
new file mode 100644
index 00000000000..448a8cc71df
--- /dev/null
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -0,0 +1,665 @@
+/*
+ * SPI driver for Nvidia's Tegra20 Serial Flash Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-tegra.h>
+#include <mach/clk.h>
+
+#define SPI_COMMAND 0x000
+#define SPI_GO BIT(30)
+#define SPI_M_S BIT(28)
+#define SPI_ACTIVE_SCLK_MASK (0x3 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26)
+#define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26)
+#define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26)
+
+#define SPI_CK_SDA_FALLING (1 << 21)
+#define SPI_CK_SDA_RISING (0 << 21)
+#define SPI_CK_SDA_MASK (1 << 21)
+#define SPI_ACTIVE_SDA (0x3 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_ACTIVE_SDA_PULL_LOW (2 << 18)
+#define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18)
+
+#define SPI_CS_POL_INVERT BIT(16)
+#define SPI_TX_EN BIT(15)
+#define SPI_RX_EN BIT(14)
+#define SPI_CS_VAL_HIGH BIT(13)
+#define SPI_CS_VAL_LOW 0x0
+#define SPI_CS_SW BIT(12)
+#define SPI_CS_HW 0x0
+#define SPI_CS_DELAY_MASK (7 << 9)
+#define SPI_CS3_EN BIT(8)
+#define SPI_CS2_EN BIT(7)
+#define SPI_CS1_EN BIT(6)
+#define SPI_CS0_EN BIT(5)
+
+#define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \
+ SPI_CS1_EN | SPI_CS0_EN)
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+
+#define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK)
+
+#define SPI_STATUS 0x004
+#define SPI_BSY BIT(31)
+#define SPI_RDY BIT(30)
+#define SPI_TXF_FLUSH BIT(29)
+#define SPI_RXF_FLUSH BIT(28)
+#define SPI_RX_UNF BIT(27)
+#define SPI_TX_OVF BIT(26)
+#define SPI_RXF_EMPTY BIT(25)
+#define SPI_RXF_FULL BIT(24)
+#define SPI_TXF_EMPTY BIT(23)
+#define SPI_TXF_FULL BIT(22)
+#define SPI_BLK_CNT(count) (((count) & 0xffff) + 1)
+
+#define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF)
+#define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY)
+
+#define SPI_RX_CMP 0x8
+#define SPI_DMA_CTL 0x0C
+#define SPI_DMA_EN BIT(31)
+#define SPI_IE_RXC BIT(27)
+#define SPI_IE_TXC BIT(26)
+#define SPI_PACKED BIT(20)
+#define SPI_RX_TRIG_MASK (0x3 << 18)
+#define SPI_RX_TRIG_1W (0x0 << 18)
+#define SPI_RX_TRIG_4W (0x1 << 18)
+#define SPI_TX_TRIG_MASK (0x3 << 16)
+#define SPI_TX_TRIG_1W (0x0 << 16)
+#define SPI_TX_TRIG_4W (0x1 << 16)
+#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF);
+
+#define SPI_TX_FIFO 0x10
+#define SPI_RX_FIFO 0x20
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 4
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+struct tegra_sflash_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned irq;
+ u32 spi_max_frequency;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned bytes_per_word;
+ unsigned cur_direction;
+ unsigned curr_xfer_words;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+
+ u32 def_command_reg;
+ u32 command_reg;
+ u32 dma_control_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+};
+
+static int tegra_sflash_runtime_suspend(struct device *dev);
+static int tegra_sflash_runtime_resume(struct device *dev);
+
+static inline unsigned long tegra_sflash_readl(struct tegra_sflash_data *tsd,
+ unsigned long reg)
+{
+ return readl(tsd->base + reg);
+}
+
+static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, tsd->base + reg);
+}
+
+static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd)
+{
+ /* Write 1 to clear status register */
+ tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS);
+}
+
+static unsigned tegra_sflash_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_sflash_data *tsd,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tsd->cur_pos;
+ unsigned max_word;
+
+ tsd->bytes_per_word = (t->bits_per_word - 1) / 8 + 1;
+ max_word = remain_len / tsd->bytes_per_word;
+ if (max_word > SPI_FIFO_DEPTH)
+ max_word = SPI_FIFO_DEPTH;
+ tsd->curr_xfer_words = max_word;
+ return max_word;
+}
+
+static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned long status;
+ unsigned max_n_32bit = tsd->curr_xfer_words;
+ u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
+
+ if (max_n_32bit > SPI_FIFO_DEPTH)
+ max_n_32bit = SPI_FIFO_DEPTH;
+ nbytes = max_n_32bit * tsd->bytes_per_word;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_TXF_FULL)) {
+ int i;
+ unsigned int x = 0;
+
+ for (i = 0; nbytes && (i < tsd->bytes_per_word);
+ i++, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
+ if (!nbytes)
+ break;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word;
+ return max_n_32bit;
+}
+
+static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned long status;
+ unsigned int read_words = 0;
+ u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_RXF_EMPTY)) {
+ int i;
+ unsigned long x;
+
+ x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+ for (i = 0; (i < tsd->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ read_words++;
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_rx_pos += read_words * tsd->bytes_per_word;
+ return 0;
+}
+
+static int tegra_sflash_start_cpu_based_transfer(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned long val = 0;
+ unsigned cur_words;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TXC;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RXC;
+
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t);
+ else
+ cur_words = tsd->curr_xfer_words;
+ val |= SPI_DMA_BLK_COUNT(cur_words);
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+ val |= SPI_DMA_EN;
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_sflash_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+ u32 speed;
+ unsigned long command;
+
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (!speed)
+ speed = tsd->spi_max_frequency;
+ if (speed != tsd->cur_speed) {
+ clk_set_rate(tsd->clk, speed);
+ tsd->cur_speed = speed;
+ }
+
+ tsd->cur_spi = spi;
+ tsd->cur_pos = 0;
+ tsd->cur_rx_pos = 0;
+ tsd->cur_tx_pos = 0;
+ tsd->curr_xfer = t;
+ tegra_sflash_calculate_curr_xfer_param(spi, tsd, t);
+ if (is_first_of_msg) {
+ command = tsd->def_command_reg;
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command |= SPI_CS_VAL_HIGH;
+
+ command &= ~SPI_MODES;
+ if (spi->mode & SPI_CPHA)
+ command |= SPI_CK_SDA_FALLING;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SPI_ACTIVE_SCLK_DRIVE_HIGH;
+ else
+ command |= SPI_ACTIVE_SCLK_DRIVE_LOW;
+ command |= SPI_CS0_EN << spi->chip_select;
+ } else {
+ command = tsd->command_reg;
+ command &= ~SPI_BIT_LENGTH(~0);
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command &= ~(SPI_RX_EN | SPI_TX_EN);
+ }
+
+ tsd->cur_direction = 0;
+ if (t->rx_buf) {
+ command |= SPI_RX_EN;
+ tsd->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command |= SPI_TX_EN;
+ tsd->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_sflash_writel(tsd, command, SPI_COMMAND);
+ tsd->command_reg = command;
+
+ return tegra_sflash_start_cpu_based_transfer(tsd, t);
+}
+
+static int tegra_sflash_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ ret = pm_runtime_get_sync(tsd->dev);
+ if (ret < 0) {
+ dev_err(tsd->dev, "pm_runtime_get() failed, err = %d\n", ret);
+ return ret;
+ }
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ INIT_COMPLETION(tsd->xfer_completion);
+ ret = tegra_sflash_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tsd->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tsd->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tsd->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tsd->tx_status || tsd->rx_status) {
+ dev_err(tsd->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay_usecs) {
+ tegra_sflash_writel(tsd, tsd->def_command_reg,
+ SPI_COMMAND);
+ udelay(xfer->delay_usecs);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ pm_runtime_put(tsd->dev);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
+{
+ struct spi_transfer *t = tsd->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsd->lock, flags);
+ if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) {
+ dev_err(tsd->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tsd->status_reg);
+ dev_err(tsd->dev,
+ "CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
+ tsd->dma_control_reg);
+ tegra_periph_reset_assert(tsd->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tsd->clk);
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t);
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->cur_pos = tsd->cur_tx_pos;
+ else
+ tsd->cur_pos = tsd->cur_rx_pos;
+
+ if (tsd->cur_pos == t->len) {
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
+ tegra_sflash_start_cpu_based_transfer(tsd, t);
+exit:
+ spin_unlock_irqrestore(&tsd->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
+{
+ struct tegra_sflash_data *tsd = context_data;
+
+ tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS);
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->tx_status = tsd->status_reg & SPI_TX_OVF;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tsd->rx_status = tsd->status_reg & SPI_RX_UNF;
+ tegra_sflash_clear_status(tsd);
+
+ return handle_cpu_based_xfer(tsd);
+}
+
+static struct tegra_spi_platform_data *tegra_sflash_parse_dt(
+ struct platform_device *pdev)
+{
+ struct tegra_spi_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ u32 max_freq;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
+ return NULL;
+ }
+
+ if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
+ pdata->spi_max_frequency = max_freq;
+
+ return pdata;
+}
+
+static struct of_device_id tegra_sflash_of_match[] = {
+ { .compatible = "nvidia,tegra20-sflash", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
+
+static int tegra_sflash_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_sflash_data *tsd;
+ struct resource *r;
+ struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(tegra_sflash_of_match),
+ &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ if (!pdata && pdev->dev.of_node)
+ pdata = tegra_sflash_parse_dt(pdev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data, exiting\n");
+ return -ENODEV;
+ }
+
+ if (!pdata->spi_max_frequency)
+ pdata->spi_max_frequency = 25000000; /* 25MHz */
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->transfer_one_message = tegra_sflash_transfer_one_message;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->bus_num = -1;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tsd = spi_master_get_devdata(master);
+ tsd->master = master;
+ tsd->dev = &pdev->dev;
+ spin_lock_init(&tsd->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tsd->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!tsd->base) {
+ dev_err(&pdev->dev,
+ "Cannot request memregion/iomap dma address\n");
+ ret = -EADDRNOTAVAIL;
+ goto exit_free_master;
+ }
+
+ tsd->irq = platform_get_irq(pdev, 0);
+ ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
+ dev_name(&pdev->dev), tsd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tsd->irq);
+ goto exit_free_master;
+ }
+
+ tsd->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(tsd->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tsd->clk);
+ goto exit_free_irq;
+ }
+
+ tsd->spi_max_frequency = pdata->spi_max_frequency;
+ init_completion(&tsd->xfer_completion);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_sflash_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ /* Reset controller */
+ tegra_periph_reset_assert(tsd->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tsd->clk);
+
+ tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+exit_free_irq:
+ free_irq(tsd->irq, tsd);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_sflash_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ free_irq(tsd->irq, tsd);
+ spi_unregister_master(master);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_sflash_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_sflash_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_sflash_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_sflash_readl(tsd, SPI_COMMAND);
+
+ clk_disable_unprepare(tsd->clk);
+ return 0;
+}
+
+static int tegra_sflash_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tsd->clk);
+ if (ret < 0) {
+ dev_err(tsd->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend,
+ tegra_sflash_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume)
+};
+static struct platform_driver tegra_sflash_driver = {
+ .driver = {
+ .name = "spi-tegra-sflash",
+ .owner = THIS_MODULE,
+ .pm = &slink_pm_ops,
+ .of_match_table = of_match_ptr(tegra_sflash_of_match),
+ },
+ .probe = tegra_sflash_probe,
+ .remove = tegra_sflash_remove,
+};
+module_platform_driver(tegra_sflash_driver);
+
+MODULE_ALIAS("platform:spi-tegra-sflash");
+MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
new file mode 100644
index 00000000000..651167f2e0a
--- /dev/null
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -0,0 +1,1358 @@
+/*
+ * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-tegra.h>
+#include <mach/clk.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+#define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \
+ SLINK_TX_UNF | SLINK_RX_OVF)
+
+#define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
+#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
+#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
+
+#define SLINK_STATUS2_RESET \
+ (TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
+
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 32
+
+struct tegra_slink_chip_data {
+ bool cs_hold_time;
+};
+
+struct tegra_slink_data {
+ struct device *dev;
+ struct spi_master *master;
+ const struct tegra_slink_chip_data *chip_data;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ int dma_req_sel;
+ u32 spi_max_frequency;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+ bool is_hw_based_cs;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ unsigned long packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static int tegra_slink_runtime_suspend(struct device *dev);
+static int tegra_slink_runtime_resume(struct device *dev);
+
+static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SLINK_TX_FIFO)
+ readl(tspi->base + SLINK_MAS_DATA);
+}
+
+static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
+{
+ unsigned long val;
+ unsigned long val_write = 0;
+
+ val = tegra_slink_readl(tspi, SLINK_STATUS);
+
+ /* Write 1 to clear status register */
+ val_write = SLINK_RDY | SLINK_FIFO_ERROR;
+ tegra_slink_writel(tspi, val_write, SLINK_STATUS);
+}
+
+static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned long val;
+
+ switch (tspi->bytes_per_word) {
+ case 0:
+ val = SLINK_PACK_SIZE_4;
+ break;
+ case 1:
+ val = SLINK_PACK_SIZE_8;
+ break;
+ case 2:
+ val = SLINK_PACK_SIZE_16;
+ break;
+ case 4:
+ val = SLINK_PACK_SIZE_32;
+ break;
+ default:
+ val = 0;
+ }
+ return val;
+}
+
+static unsigned tegra_slink_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word ;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+ tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+ tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = max_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ unsigned long fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (*tx_buf++) << (i*8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ unsigned long fifo_status;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ unsigned int bits_per_word;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ tspi->cur_spi->bits_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int x;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= ((*tx_buf++) << i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ unsigned int x;
+ unsigned int rx_mask, bits_per_word;
+
+ bits_per_word = t->bits_per_word ? t->bits_per_word :
+ tspi->cur_spi->bits_per_word;
+ rx_mask = (1 << bits_per_word) - 1;
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ x = tspi->rx_dma_buf[count];
+ x &= rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_slink_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
+{
+ INIT_COMPLETION(tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
+{
+ INIT_COMPLETION(tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_dma_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned long test_val;
+ unsigned int len;
+ int ret = 0;
+ unsigned long status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
+ dev_err(tspi->dev,
+ "Rx/Tx fifo are not empty status 0x%08lx\n", status);
+ return -EIO;
+ }
+
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ ret = tegra_slink_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for tx fifo to be fill before starting slink */
+ test_val = tegra_slink_readl(tspi, SLINK_STATUS);
+ while (!(test_val & SLINK_TX_FULL))
+ test_val = tegra_slink_readl(tspi, SLINK_STATUS);
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_slink_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ /* HW need small delay after settign Packed mode */
+ udelay(1);
+ }
+ tspi->dma_control_reg = val;
+
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
+}
+
+static int tegra_slink_start_cpu_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned cur_words;
+
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ tspi->dma_control_reg = val;
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
+}
+
+static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!dma_chan) {
+ dev_err(tspi->dev,
+ "Dma channel is not available, will try later\n");
+ return -EPROBE_DEFER;
+ }
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ dma_sconfig.slave_id = tspi->dma_req_sel;
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_slink_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ struct tegra_spi_device_controller_data *cdata = spi->controller_data;
+ unsigned long command;
+ unsigned long command2;
+
+ bits_per_word = t->bits_per_word;
+ speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+ if (!speed)
+ speed = tspi->spi_max_frequency;
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed * 4);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
+
+ if (is_first_of_msg) {
+ tegra_slink_clear_status(tspi);
+
+ command = tspi->def_command_reg;
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command2 = tspi->def_command2_reg;
+ command2 |= SLINK_SS_EN_CS(spi->chip_select);
+
+ /* possibly use the hw based chip select */
+ tspi->is_hw_based_cs = false;
+ if (cdata && cdata->is_hw_based_cs && is_single_xfer &&
+ ((tspi->curr_dma_words * tspi->bytes_per_word) ==
+ (t->len - tspi->cur_pos))) {
+ int setup_count;
+ int sts2;
+
+ setup_count = cdata->cs_setup_clk_count >> 1;
+ setup_count = max(setup_count, 3);
+ command2 |= SLINK_SS_SETUP(setup_count);
+ if (tspi->chip_data->cs_hold_time) {
+ int hold_count;
+
+ hold_count = cdata->cs_hold_clk_count;
+ hold_count = max(hold_count, 0xF);
+ sts2 = tegra_slink_readl(tspi, SLINK_STATUS2);
+ sts2 &= ~SLINK_SS_HOLD_TIME(0xF);
+ sts2 |= SLINK_SS_HOLD_TIME(hold_count);
+ tegra_slink_writel(tspi, sts2, SLINK_STATUS2);
+ }
+ tspi->is_hw_based_cs = true;
+ }
+
+ if (tspi->is_hw_based_cs)
+ command &= ~SLINK_CS_SW;
+ else
+ command |= SLINK_CS_SW | SLINK_CS_VALUE;
+
+ command &= ~SLINK_MODES;
+ if (spi->mode & SPI_CPHA)
+ command |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ command |= SLINK_IDLE_SCLK_DRIVE_LOW;
+ } else {
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command2 = tspi->command2_reg;
+ command2 &= ~(SLINK_RXEN | SLINK_TXEN);
+ }
+
+ tegra_slink_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command2 |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command2 |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
+ tspi->command2_reg = command2;
+
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ ret = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_slink_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_slink_setup(struct spi_device *spi)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned long val;
+ unsigned long flags;
+ int ret;
+ unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
+ SLINK_CS_POLARITY,
+ SLINK_CS_POLARITY1,
+ SLINK_CS_POLARITY2,
+ SLINK_CS_POLARITY3,
+ };
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
+
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_pol_bit[spi->chip_select];
+ else
+ val &= ~cs_pol_bit[spi->chip_select];
+ tspi->def_command_reg = val;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_slink_prepare_transfer(struct spi_master *master)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ return pm_runtime_get_sync(tspi->dev);
+}
+
+static int tegra_slink_unprepare_transfer(struct spi_master *master)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_slink_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ INIT_COMPLETION(tspi->xfer_completion);
+ ret = tegra_slink_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SLINK_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay_usecs) {
+ tegra_slink_writel(tspi, tspi->def_command_reg,
+ SLINK_COMMAND);
+ udelay(xfer->delay_usecs);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(tspi->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_slink_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev,
+ "DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ err = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_slink_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_slink_isr(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ tegra_slink_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct tegra_spi_platform_data *tegra_slink_parse_dt(
+ struct platform_device *pdev)
+{
+ struct tegra_spi_platform_data *pdata;
+ const unsigned int *prop;
+ struct device_node *np = pdev->dev.of_node;
+ u32 of_dma[2];
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
+ of_dma, 2) >= 0)
+ pdata->dma_req_sel = of_dma[1];
+
+ prop = of_get_property(np, "spi-max-frequency", NULL);
+ if (prop)
+ pdata->spi_max_frequency = be32_to_cpup(prop);
+
+ return pdata;
+}
+
+const struct tegra_slink_chip_data tegra30_spi_cdata = {
+ .cs_hold_time = true,
+};
+
+const struct tegra_slink_chip_data tegra20_spi_cdata = {
+ .cs_hold_time = false,
+};
+
+static struct of_device_id tegra_slink_of_match[] = {
+ { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
+ { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
+
+static int tegra_slink_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_slink_data *tspi;
+ struct resource *r;
+ struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
+ int ret, spi_irq;
+ const struct tegra_slink_chip_data *cdata = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(tegra_slink_of_match), &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ cdata = match->data;
+ if (!pdata && pdev->dev.of_node)
+ pdata = tegra_slink_parse_dt(pdev);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data, exiting\n");
+ return -ENODEV;
+ }
+
+ if (!pdata->spi_max_frequency)
+ pdata->spi_max_frequency = 25000000; /* 25MHz */
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_slink_setup;
+ master->prepare_transfer_hardware = tegra_slink_prepare_transfer;
+ master->transfer_one_message = tegra_slink_transfer_one_message;
+ master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->bus_num = -1;
+
+ dev_set_drvdata(&pdev->dev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->dma_req_sel = pdata->dma_req_sel;
+ tspi->dev = &pdev->dev;
+ tspi->chip_data = cdata;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+ tspi->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!tspi->base) {
+ dev_err(&pdev->dev,
+ "Cannot request memregion/iomap dma address\n");
+ ret = -EADDRNOTAVAIL;
+ goto exit_free_master;
+ }
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_free_master;
+ }
+
+ tspi->clk = devm_clk_get(&pdev->dev, "slink");
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_irq;
+ }
+
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+ tspi->spi_max_frequency = pdata->spi_max_frequency;
+
+ if (pdata->dma_req_sel) {
+ ret = tegra_slink_init_dma_param(tspi, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
+ goto exit_free_irq;
+ }
+
+ ret = tegra_slink_init_dma_param(tspi, false);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
+ goto exit_rx_dma_free;
+ }
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+ }
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_slink_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+ tspi->def_command_reg = SLINK_M_S;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+ tegra_slink_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_slink_deinit_dma_param(tspi, true);
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_slink_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+ spi_unregister_master(master);
+
+ if (tspi->tx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_slink_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_slink_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_slink_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_slink_readl(tspi, SLINK_MAS_DATA);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_slink_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
+ tegra_slink_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
+};
+static struct platform_driver tegra_slink_driver = {
+ .driver = {
+ .name = "spi-tegra-slink",
+ .owner = THIS_MODULE,
+ .pm = &slink_pm_ops,
+ .of_match_table = of_match_ptr(tegra_slink_of_match),
+ },
+ .probe = tegra_slink_probe,
+ .remove = tegra_slink_remove,
+};
+module_platform_driver(tegra_slink_driver);
+
+MODULE_ALIAS("platform:spi-tegra-slink");
+MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c
index 3f6f6e81c65..46992cab65f 100644
--- a/drivers/spi/spi-ti-ssp.c
+++ b/drivers/spi/spi-ti-ssp.c
@@ -289,7 +289,7 @@ error_unlock:
return error;
}
-static int __devinit ti_ssp_spi_probe(struct platform_device *pdev)
+static int ti_ssp_spi_probe(struct platform_device *pdev)
{
const struct ti_ssp_spi_data *pdata;
struct ti_ssp_spi *hw;
@@ -357,7 +357,7 @@ error_wq:
return error;
}
-static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
+static int ti_ssp_spi_remove(struct platform_device *pdev)
{
struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
int error;
@@ -378,7 +378,7 @@ static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
static struct platform_driver ti_ssp_spi_driver = {
.probe = ti_ssp_spi_probe,
- .remove = __devexit_p(ti_ssp_spi_remove),
+ .remove = ti_ssp_spi_remove,
.driver = {
.name = "ti-ssp-spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 24421024dea..6b0874d782e 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -240,7 +240,7 @@ static int to_gpio_num(struct device_attribute *attr)
return -1;
}
-static int __devinit tle62x0_probe(struct spi_device *spi)
+static int tle62x0_probe(struct spi_device *spi)
{
struct tle62x0_state *st;
struct tle62x0_pdata *pdata;
@@ -294,7 +294,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
return ret;
}
-static int __devexit tle62x0_remove(struct spi_device *spi)
+static int tle62x0_remove(struct spi_device *spi)
{
struct tle62x0_state *st = spi_get_drvdata(spi);
int ptr;
@@ -313,7 +313,7 @@ static struct spi_driver tle62x0_driver = {
.owner = THIS_MODULE,
},
.probe = tle62x0_probe,
- .remove = __devexit_p(tle62x0_remove),
+ .remove = tle62x0_remove,
};
module_spi_driver(tle62x0_driver);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 135f7406f4b..f756481b0fe 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1401,7 +1401,7 @@ static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
}
-static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
+static int pch_spi_pd_probe(struct platform_device *plat_dev)
{
int ret;
struct spi_master *master;
@@ -1498,7 +1498,7 @@ err_pci_iomap:
return ret;
}
-static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
+static int pch_spi_pd_remove(struct platform_device *plat_dev)
{
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data = platform_get_drvdata(plat_dev);
@@ -1619,12 +1619,12 @@ static struct platform_driver pch_spi_pd_driver = {
.owner = THIS_MODULE,
},
.probe = pch_spi_pd_probe,
- .remove = __devexit_p(pch_spi_pd_remove),
+ .remove = pch_spi_pd_remove,
.suspend = pch_spi_pd_suspend,
.resume = pch_spi_pd_resume
};
-static int __devinit pch_spi_probe(struct pci_dev *pdev,
+static int pch_spi_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pch_spi_board_data *board_dat;
@@ -1705,7 +1705,7 @@ err_no_mem:
return retval;
}
-static void __devexit pch_spi_remove(struct pci_dev *pdev)
+static void pch_spi_remove(struct pci_dev *pdev)
{
int i;
struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
@@ -1776,7 +1776,7 @@ static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
- .remove = __devexit_p(pch_spi_remove),
+ .remove = pch_spi_remove,
.suspend = pch_spi_suspend,
.resume = pch_spi_resume,
};
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 266a847e299..4d3ec8b9f47 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -217,7 +217,7 @@ static int spi_xcomm_setup(struct spi_device *spi)
return 0;
}
-static int __devinit spi_xcomm_probe(struct i2c_client *i2c,
+static int spi_xcomm_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct spi_xcomm *spi_xcomm;
@@ -246,7 +246,7 @@ static int __devinit spi_xcomm_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit spi_xcomm_remove(struct i2c_client *i2c)
+static int spi_xcomm_remove(struct i2c_client *i2c)
{
struct spi_master *master = i2c_get_clientdata(i2c);
@@ -267,7 +267,7 @@ static struct i2c_driver spi_xcomm_driver = {
},
.id_table = spi_xcomm_ids,
.probe = spi_xcomm_probe,
- .remove = __devexit_p(spi_xcomm_remove),
+ .remove = spi_xcomm_remove,
};
module_i2c_driver(spi_xcomm_driver);
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 4c5a663b9fa..e1d76960742 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -462,7 +462,7 @@ void xilinx_spi_deinit(struct spi_master *master)
}
EXPORT_SYMBOL(xilinx_spi_deinit);
-static int __devinit xilinx_spi_probe(struct platform_device *dev)
+static int xilinx_spi_probe(struct platform_device *dev)
{
struct xspi_platform_data *pdata;
struct resource *r;
@@ -518,7 +518,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev)
return 0;
}
-static int __devexit xilinx_spi_remove(struct platform_device *dev)
+static int xilinx_spi_remove(struct platform_device *dev)
{
xilinx_spi_deinit(platform_get_drvdata(dev));
platform_set_drvdata(dev, 0);
@@ -531,7 +531,7 @@ MODULE_ALIAS("platform:" XILINX_SPI_NAME);
static struct platform_driver xilinx_spi_driver = {
.probe = xilinx_spi_probe,
- .remove = __devexit_p(xilinx_spi_remove),
+ .remove = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 718cc1f4923..19ee901577d 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
+#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include <linux/sched.h>
@@ -333,6 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
+ spi->cs_gpio = -EINVAL;
device_initialize(&spi->dev);
return spi;
}
@@ -350,15 +352,16 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);
int spi_add_device(struct spi_device *spi)
{
static DEFINE_MUTEX(spi_add_lock);
- struct device *dev = spi->master->dev.parent;
+ struct spi_master *master = spi->master;
+ struct device *dev = master->dev.parent;
struct device *d;
int status;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= spi->master->num_chipselect) {
+ if (spi->chip_select >= master->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n",
spi->chip_select,
- spi->master->num_chipselect);
+ master->num_chipselect);
return -EINVAL;
}
@@ -382,6 +385,9 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
+ if (master->cs_gpios)
+ spi->cs_gpio = master->cs_gpios[spi->chip_select];
+
/* Drivers may modify this initial i/o setup, but will
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
@@ -492,8 +498,7 @@ static void spi_match_master_to_boardinfo(struct spi_master *master,
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
*/
-int __devinit
-spi_register_board_info(struct spi_board_info const *info, unsigned n)
+int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
struct boardinfo *bi;
int i;
@@ -806,7 +811,7 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_OF) && !defined(CONFIG_SPARC)
+#if defined(CONFIG_OF)
/**
* of_register_spi_devices() - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
@@ -819,6 +824,7 @@ static void of_register_spi_devices(struct spi_master *master)
struct spi_device *spi;
struct device_node *nc;
const __be32 *prop;
+ char modalias[SPI_NAME_SIZE + 4];
int rc;
int len;
@@ -861,6 +867,8 @@ static void of_register_spi_devices(struct spi_master *master)
spi->mode |= SPI_CPOL;
if (of_find_property(nc, "spi-cs-high", NULL))
spi->mode |= SPI_CS_HIGH;
+ if (of_find_property(nc, "spi-3wire", NULL))
+ spi->mode |= SPI_3WIRE;
/* Device speed */
prop = of_get_property(nc, "spi-max-frequency", &len);
@@ -880,7 +888,9 @@ static void of_register_spi_devices(struct spi_master *master)
spi->dev.of_node = nc;
/* Register the new device */
- request_module(spi->modalias);
+ snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX,
+ spi->modalias);
+ request_module(modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
@@ -1046,6 +1056,44 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
}
EXPORT_SYMBOL_GPL(spi_alloc_master);
+#ifdef CONFIG_OF
+static int of_spi_register_master(struct spi_master *master)
+{
+ u16 nb;
+ int i, *cs;
+ struct device_node *np = master->dev.of_node;
+
+ if (!np)
+ return 0;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ master->num_chipselect = max(nb, master->num_chipselect);
+
+ if (nb < 1)
+ return 0;
+
+ cs = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect,
+ GFP_KERNEL);
+ master->cs_gpios = cs;
+
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ memset(cs, -EINVAL, master->num_chipselect);
+
+ for (i = 0; i < nb; i++)
+ cs[i] = of_get_named_gpio(np, "cs-gpios", i);
+
+ return 0;
+}
+#else
+static int of_spi_register_master(struct spi_master *master)
+{
+ return 0;
+}
+#endif
+
/**
* spi_register_master - register SPI master controller
* @master: initialized master, originally from spi_alloc_master()
@@ -1077,6 +1125,10 @@ int spi_register_master(struct spi_master *master)
if (!dev)
return -ENODEV;
+ status = of_spi_register_master(master);
+ if (status)
+ return status;
+
/* even if it's just one always-selected device, there must
* be at least one chipselect
*/
@@ -1257,7 +1309,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits;
- int status;
+ int status = 0;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current master
@@ -1272,7 +1324,8 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- status = spi->master->setup(spi);
+ if (spi->master->setup)
+ status = spi->master->setup(spi);
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
"%u bits/w, %u Hz max --> %d\n",
@@ -1291,6 +1344,7 @@ EXPORT_SYMBOL_GPL(spi_setup);
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
+ struct spi_transfer *xfer;
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
@@ -1299,7 +1353,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
*/
if ((master->flags & SPI_MASTER_HALF_DUPLEX)
|| (spi->mode & SPI_3WIRE)) {
- struct spi_transfer *xfer;
unsigned flags = master->flags;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
@@ -1312,6 +1365,15 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
}
}
+ /**
+ * Set transfer bits_per_word as spi device default if it is not
+ * set for this transfer.
+ */
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ if (!xfer->bits_per_word)
+ xfer->bits_per_word = spi->bits_per_word;
+ }
+
message->spi = spi;
message->status = -EINPROGRESS;
return master->transfer(spi, message);
@@ -1588,12 +1650,18 @@ int spi_write_then_read(struct spi_device *spi,
struct spi_transfer x[2];
u8 *local_buf;
- /* Use preallocated DMA-safe buffer. We can't avoid copying here,
- * (as a pure convenience thing), but we can keep heap costs
- * out of the hot path ...
+ /* Use preallocated DMA-safe buffer if we can. We can't avoid
+ * copying here, (as a pure convenience thing), but we can
+ * keep heap costs out of the hot path unless someone else is
+ * using the pre-allocated buffer or the transfer is too large.
*/
- if ((n_tx + n_rx) > SPI_BUFSIZ)
- return -EINVAL;
+ if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
+ local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), GFP_KERNEL);
+ if (!local_buf)
+ return -ENOMEM;
+ } else {
+ local_buf = buf;
+ }
spi_message_init(&message);
memset(x, 0, sizeof x);
@@ -1606,14 +1674,6 @@ int spi_write_then_read(struct spi_device *spi,
spi_message_add_tail(&x[1], &message);
}
- /* ... unless someone else is using the pre-allocated buffer */
- if (!mutex_trylock(&lock)) {
- local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
- if (!local_buf)
- return -ENOMEM;
- } else
- local_buf = buf;
-
memcpy(local_buf, txbuf, n_tx);
x[0].tx_buf = local_buf;
x[1].rx_buf = local_buf + n_tx;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 830adbed1d7..2e0655dbe07 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -31,6 +31,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
@@ -571,7 +573,7 @@ static struct class *spidev_class;
/*-------------------------------------------------------------------------*/
-static int __devinit spidev_probe(struct spi_device *spi)
+static int spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
@@ -620,7 +622,7 @@ static int __devinit spidev_probe(struct spi_device *spi)
return status;
}
-static int __devexit spidev_remove(struct spi_device *spi)
+static int spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
@@ -642,13 +644,21 @@ static int __devexit spidev_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "rohm,dh2228fv" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spidev_dt_ids),
},
.probe = spidev_probe,
- .remove = __devexit_p(spidev_remove),
+ .remove = spidev_remove,
/* NOTE: suspend/resume methods are not necessary here.
* We don't do anything except pass the requests to/from
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 42cdaa9a4d8..ff3c8a21f10 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -160,4 +160,13 @@ config SSB_DRIVER_GIGE
If unsure, say N
+config SSB_DRIVER_GPIO
+ bool "SSB GPIO driver"
+ depends on SSB
+ select GPIOLIB
+ help
+ Driver to provide access to the GPIO pins on the bus.
+
+ If unsure, say N
+
endmenu
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile
index 656e58b9261..9159ba77c38 100644
--- a/drivers/ssb/Makefile
+++ b/drivers/ssb/Makefile
@@ -15,6 +15,7 @@ ssb-$(CONFIG_SSB_DRIVER_MIPS) += driver_mipscore.o
ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o
ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o
ssb-$(CONFIG_SSB_DRIVER_GIGE) += driver_gige.o
+ssb-$(CONFIG_SSB_DRIVER_GPIO) += driver_gpio.o
# b43 pci-ssb-bridge driver
# Not strictly a part of SSB, but kept here for convenience
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 95c33a05f43..71098a7b5fe 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -349,6 +349,9 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
+
+ spin_lock_init(&cc->gpio_lock);
+
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
@@ -505,28 +508,93 @@ u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask)
u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
- return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res = 0;
+
+ if (cc->dev->id.revision < 20)
+ return 0xffffffff;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLUP, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res = 0;
+
+ if (cc->dev->id.revision < 20)
+ return 0xffffffff;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLDOWN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
#ifdef CONFIG_SSB_SERIAL
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index 553227a3062..59385fdab5b 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -138,6 +138,13 @@ u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks)
return ticks;
}
+void ssb_extif_init(struct ssb_extif *extif)
+{
+ if (!extif->dev)
+ return; /* We don't have a Extif core */
+ spin_lock_init(&extif->gpio_lock);
+}
+
u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
{
return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask;
@@ -145,22 +152,50 @@ u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value)
{
- return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
+ unsigned long flags;
+ u32 res = 0;
+
+ spin_lock_irqsave(&extif->gpio_lock, flags);
+ res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
+ spin_unlock_irqrestore(&extif->gpio_lock, flags);
+
+ return res;
}
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
new file mode 100644
index 00000000000..97ac0a38e3d
--- /dev/null
+++ b/drivers/ssb/driver_gpio.c
@@ -0,0 +1,176 @@
+/*
+ * Sonics Silicon Backplane
+ * GPIO driver
+ *
+ * Copyright 2011, Broadcom Corporation
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/ssb/ssb.h>
+
+#include "ssb_private.h"
+
+static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
+{
+ return container_of(chip, struct ssb_bus, gpio);
+}
+
+static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
+}
+
+static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
+ unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0);
+ return 0;
+}
+
+static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio);
+ ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+}
+
+static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0);
+ /* clear pulldown */
+ ssb_chipco_gpio_pulldown(&bus->chipco, 1 << gpio, 0);
+ /* Set pullup */
+ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 1 << gpio);
+
+ return 0;
+}
+
+static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ /* clear pullup */
+ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
+}
+
+static int ssb_gpio_chipco_init(struct ssb_bus *bus)
+{
+ struct gpio_chip *chip = &bus->gpio;
+
+ chip->label = "ssb_chipco_gpio";
+ chip->owner = THIS_MODULE;
+ chip->request = ssb_gpio_chipco_request;
+ chip->free = ssb_gpio_chipco_free;
+ chip->get = ssb_gpio_chipco_get_value;
+ chip->set = ssb_gpio_chipco_set_value;
+ chip->direction_input = ssb_gpio_chipco_direction_input;
+ chip->direction_output = ssb_gpio_chipco_direction_output;
+ chip->ngpio = 16;
+ /* There is just one SoC in one device and its GPIO addresses should be
+ * deterministic to address them more easily. The other buses could get
+ * a random base number. */
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ chip->base = 0;
+ else
+ chip->base = -1;
+
+ return gpiochip_add(chip);
+}
+
+#ifdef CONFIG_SSB_DRIVER_EXTIF
+
+static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
+}
+
+static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
+ unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0);
+ return 0;
+}
+
+static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio);
+ ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+}
+
+static int ssb_gpio_extif_init(struct ssb_bus *bus)
+{
+ struct gpio_chip *chip = &bus->gpio;
+
+ chip->label = "ssb_extif_gpio";
+ chip->owner = THIS_MODULE;
+ chip->get = ssb_gpio_extif_get_value;
+ chip->set = ssb_gpio_extif_set_value;
+ chip->direction_input = ssb_gpio_extif_direction_input;
+ chip->direction_output = ssb_gpio_extif_direction_output;
+ chip->ngpio = 5;
+ /* There is just one SoC in one device and its GPIO addresses should be
+ * deterministic to address them more easily. The other buses could get
+ * a random base number. */
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ chip->base = 0;
+ else
+ chip->base = -1;
+
+ return gpiochip_add(chip);
+}
+
+#else
+static int ssb_gpio_extif_init(struct ssb_bus *bus)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+int ssb_gpio_init(struct ssb_bus *bus)
+{
+ if (ssb_chipco_available(&bus->chipco))
+ return ssb_gpio_chipco_init(bus);
+ else if (ssb_extif_available(&bus->extif))
+ return ssb_gpio_extif_init(bus);
+ else
+ SSB_WARN_ON(1);
+
+ return -1;
+}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 6e0daaa0e04..c82c5c95fe8 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -804,7 +804,14 @@ static int __devinit ssb_bus_register(struct ssb_bus *bus,
if (err)
goto err_pcmcia_exit;
ssb_chipcommon_init(&bus->chipco);
+ ssb_extif_init(&bus->extif);
ssb_mipscore_init(&bus->mipscore);
+ err = ssb_gpio_init(bus);
+ if (err == -ENOTSUPP)
+ ssb_dprintk(KERN_DEBUG PFX "GPIO driver not activated\n");
+ else if (err)
+ ssb_dprintk(KERN_ERR PFX
+ "Error registering GPIO driver: %i\n", err);
err = ssb_fetch_invariants(bus, get_invariants);
if (err) {
ssb_bus_may_powerdown(bus);
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 8942db1d855..6c10b66c796 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -242,4 +242,21 @@ static inline int ssb_watchdog_register(struct ssb_bus *bus)
}
#endif /* CONFIG_SSB_EMBEDDED */
+#ifdef CONFIG_SSB_DRIVER_EXTIF
+extern void ssb_extif_init(struct ssb_extif *extif);
+#else
+static inline void ssb_extif_init(struct ssb_extif *extif)
+{
+}
+#endif
+
+#ifdef CONFIG_SSB_DRIVER_GPIO
+extern int ssb_gpio_init(struct ssb_bus *bus);
+#else /* CONFIG_SSB_DRIVER_GPIO */
+static inline int ssb_gpio_init(struct ssb_bus *bus)
+{
+ return -ENOTSUPP;
+}
+#endif /* CONFIG_SSB_DRIVER_GPIO */
+
#endif /* LINUX_SSB_PRIVATE_H_ */
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 4a36e9ab8cf..2d12e8a1f82 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/pid_namespace.h>
#include "binder.h"
#include "binder_trace.h"
@@ -2320,7 +2321,7 @@ retry:
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
- current->nsproxy->pid_ns);
+ task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 78f3a2e013c..17b45ebb055 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -73,7 +73,7 @@ sampling rate. If you sample two channels you get 4kHz and so on.
* And loads of cleaning up, in particular streamlining the
* bulk transfers.
* 1.1: moved EP4 transfers to EP1 to make space for a PWM output on EP4
- * 1.2: added PWM suport via EP4
+ * 1.2: added PWM support via EP4
* 2.0: PWM seems to be stable and is not interfering with the other functions
* 2.1: changed PWM API
* 2.2: added firmware kernel request to fix an udev problem
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 238910373f5..479c643da2f 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -396,7 +396,9 @@ dt3155_open(struct file *filp)
pd->q->drv_priv = pd;
pd->curr_buf = NULL;
pd->field_count = 0;
- vb2_queue_init(pd->q); /* cannot fail */
+ ret = vb2_queue_init(pd->q);
+ if (ret < 0)
+ return ret;
INIT_LIST_HEAD(&pd->dmaq);
spin_lock_init(&pd->lock);
/* disable all irqs, clear all irq flags */
diff --git a/drivers/staging/media/go7007/go7007-fw.c b/drivers/staging/media/go7007/go7007-fw.c
index c9a6409edfe..f99c05b454b 100644
--- a/drivers/staging/media/go7007/go7007-fw.c
+++ b/drivers/staging/media/go7007/go7007-fw.c
@@ -382,8 +382,8 @@ static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space)
buf = kzalloc(4096, GFP_KERNEL);
if (buf == NULL) {
- printk(KERN_ERR "go7007: unable to allocate 4096 bytes for "
- "firmware construction\n");
+ dev_err(go->dev,
+ "unable to allocate 4096 bytes for firmware construction\n");
return -1;
}
@@ -652,8 +652,8 @@ static int gen_mpeg1hdr_to_package(struct go7007 *go,
buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL) {
- printk(KERN_ERR "go7007: unable to allocate 5120 bytes for "
- "firmware construction\n");
+ dev_err(go->dev,
+ "unable to allocate 5120 bytes for firmware construction\n");
return -1;
}
framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME);
@@ -839,8 +839,8 @@ static int gen_mpeg4hdr_to_package(struct go7007 *go,
buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL) {
- printk(KERN_ERR "go7007: unable to allocate 5120 bytes for "
- "firmware construction\n");
+ dev_err(go->dev,
+ "unable to allocate 5120 bytes for firmware construction\n");
return -1;
}
framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME);
@@ -1545,9 +1545,8 @@ static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
case SPECIAL_MODET:
return modet_to_package(go, code, space);
}
- printk(KERN_ERR
- "go7007: firmware file contains unsupported feature %04x\n",
- type);
+ dev_err(go->dev,
+ "firmware file contains unsupported feature %04x\n", type);
return -1;
}
@@ -1577,15 +1576,16 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
return -1;
}
if (request_firmware(&fw_entry, go->board_info->firmware, go->dev)) {
- printk(KERN_ERR
- "go7007: unable to load firmware from file \"%s\"\n",
+ dev_err(go->dev,
+ "unable to load firmware from file \"%s\"\n",
go->board_info->firmware);
return -1;
}
code = kzalloc(codespace * 2, GFP_KERNEL);
if (code == NULL) {
- printk(KERN_ERR "go7007: unable to allocate %d bytes for "
- "firmware construction\n", codespace * 2);
+ dev_err(go->dev,
+ "unable to allocate %d bytes for firmware construction\n",
+ codespace * 2);
goto fw_failed;
}
src = (__le16 *)fw_entry->data;
@@ -1594,9 +1594,9 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
chunk_flags = __le16_to_cpu(src[0]);
chunk_len = __le16_to_cpu(src[1]);
if (chunk_len + 2 > srclen) {
- printk(KERN_ERR "go7007: firmware file \"%s\" "
- "appears to be corrupted\n",
- go->board_info->firmware);
+ dev_err(go->dev,
+ "firmware file \"%s\" appears to be corrupted\n",
+ go->board_info->firmware);
goto fw_failed;
}
if (chunk_flags & mode_flag) {
@@ -1604,17 +1604,15 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
ret = do_special(go, __le16_to_cpu(src[2]),
&code[i], codespace - i, framelen);
if (ret < 0) {
- printk(KERN_ERR "go7007: insufficient "
- "memory for firmware "
- "construction\n");
+ dev_err(go->dev,
+ "insufficient memory for firmware construction\n");
goto fw_failed;
}
i += ret;
} else {
if (codespace - i < chunk_len) {
- printk(KERN_ERR "go7007: insufficient "
- "memory for firmware "
- "construction\n");
+ dev_err(go->dev,
+ "insufficient memory for firmware construction\n");
goto fw_failed;
}
memcpy(&code[i], &src[2], chunk_len * 2);
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index 980371b0274..a78133b67de 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -812,7 +812,7 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return retval;
mutex_lock(&gofh->lock);
- if (buf->index < 0 || buf->index >= gofh->buf_count)
+ if (buf->index >= gofh->buf_count)
goto unlock_and_return;
gobuf = &gofh->bufs[buf->index];
diff --git a/drivers/staging/media/go7007/s2250-board.c b/drivers/staging/media/go7007/s2250-board.c
index 014d38410c9..b3974100c6c 100644
--- a/drivers/staging/media/go7007/s2250-board.c
+++ b/drivers/staging/media/go7007/s2250-board.c
@@ -688,15 +688,4 @@ static struct i2c_driver s2250_driver = {
.id_table = s2250_id,
};
-static __init int init_s2250(void)
-{
- return i2c_add_driver(&s2250_driver);
-}
-
-static __exit void exit_s2250(void)
-{
- i2c_del_driver(&s2250_driver);
-}
-
-module_init(init_s2250);
-module_exit(exit_s2250);
+module_i2c_driver(s2250_driver);
diff --git a/drivers/staging/media/go7007/wis-ov7640.c b/drivers/staging/media/go7007/wis-ov7640.c
index 6bc9470fecb..9f01657f884 100644
--- a/drivers/staging/media/go7007/wis-ov7640.c
+++ b/drivers/staging/media/go7007/wis-ov7640.c
@@ -29,8 +29,7 @@ struct wis_ov7640 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x12, 0x80,
0x12, 0x54,
0x14, 0x24,
@@ -60,12 +59,12 @@ static int wis_ov7640_probe(struct i2c_client *client,
client->flags = I2C_CLIENT_SCCB;
- printk(KERN_DEBUG
+ dev_dbg(&client->dev,
"wis-ov7640: initializing OV7640 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR "wis-ov7640: error initializing OV7640\n");
+ dev_err(&client->dev, "wis-ov7640: error initializing OV7640\n");
return -ENODEV;
}
@@ -92,17 +91,6 @@ static struct i2c_driver wis_ov7640_driver = {
.id_table = wis_ov7640_id,
};
-static int __init wis_ov7640_init(void)
-{
- return i2c_add_driver(&wis_ov7640_driver);
-}
-
-static void __exit wis_ov7640_cleanup(void)
-{
- i2c_del_driver(&wis_ov7640_driver);
-}
-
-module_init(wis_ov7640_init);
-module_exit(wis_ov7640_cleanup);
+module_i2c_driver(wis_ov7640_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-saa7113.c b/drivers/staging/media/go7007/wis-saa7113.c
index 05e0e108386..8810c1e6e1e 100644
--- a/drivers/staging/media/go7007/wis-saa7113.c
+++ b/drivers/staging/media/go7007/wis-saa7113.c
@@ -32,8 +32,7 @@ struct wis_saa7113 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x01, 0x08,
0x02, 0xc0,
0x03, 0x33,
@@ -282,12 +281,12 @@ static int wis_saa7113_probe(struct i2c_client *client,
dec->hue = 0;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG
+ dev_dbg(&client->dev,
"wis-saa7113: initializing SAA7113 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR
+ dev_err(&client->dev,
"wis-saa7113: error initializing SAA7113\n");
kfree(dec);
return -ENODEV;
@@ -320,17 +319,6 @@ static struct i2c_driver wis_saa7113_driver = {
.id_table = wis_saa7113_id,
};
-static int __init wis_saa7113_init(void)
-{
- return i2c_add_driver(&wis_saa7113_driver);
-}
-
-static void __exit wis_saa7113_cleanup(void)
-{
- i2c_del_driver(&wis_saa7113_driver);
-}
-
-module_init(wis_saa7113_init);
-module_exit(wis_saa7113_cleanup);
+module_i2c_driver(wis_saa7113_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-saa7115.c b/drivers/staging/media/go7007/wis-saa7115.c
index 46cff59e28b..fa86acd3fdf 100644
--- a/drivers/staging/media/go7007/wis-saa7115.c
+++ b/drivers/staging/media/go7007/wis-saa7115.c
@@ -32,8 +32,7 @@ struct wis_saa7115 {
int hue;
};
-static u8 initial_registers[] =
-{
+static u8 initial_registers[] = {
0x01, 0x08,
0x02, 0xc0,
0x03, 0x20,
@@ -415,12 +414,12 @@ static int wis_saa7115_probe(struct i2c_client *client,
dec->hue = 0;
i2c_set_clientdata(client, dec);
- printk(KERN_DEBUG
+ dev_dbg(&client->dev,
"wis-saa7115: initializing SAA7115 at address %d on %s\n",
client->addr, adapter->name);
if (write_regs(client, initial_registers) < 0) {
- printk(KERN_ERR
+ dev_err(&client->dev,
"wis-saa7115: error initializing SAA7115\n");
kfree(dec);
return -ENODEV;
@@ -453,17 +452,6 @@ static struct i2c_driver wis_saa7115_driver = {
.id_table = wis_saa7115_id,
};
-static int __init wis_saa7115_init(void)
-{
- return i2c_add_driver(&wis_saa7115_driver);
-}
-
-static void __exit wis_saa7115_cleanup(void)
-{
- i2c_del_driver(&wis_saa7115_driver);
-}
-
-module_init(wis_saa7115_init);
-module_exit(wis_saa7115_cleanup);
+module_i2c_driver(wis_saa7115_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-sony-tuner.c b/drivers/staging/media/go7007/wis-sony-tuner.c
index 8f1b7d4f6a2..1291ab79d2a 100644
--- a/drivers/staging/media/go7007/wis-sony-tuner.c
+++ b/drivers/staging/media/go7007/wis-sony-tuner.c
@@ -704,17 +704,6 @@ static struct i2c_driver wis_sony_tuner_driver = {
.id_table = wis_sony_tuner_id,
};
-static int __init wis_sony_tuner_init(void)
-{
- return i2c_add_driver(&wis_sony_tuner_driver);
-}
-
-static void __exit wis_sony_tuner_cleanup(void)
-{
- i2c_del_driver(&wis_sony_tuner_driver);
-}
-
-module_init(wis_sony_tuner_init);
-module_exit(wis_sony_tuner_cleanup);
+module_i2c_driver(wis_sony_tuner_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-tw2804.c b/drivers/staging/media/go7007/wis-tw2804.c
index 9134f03e3cf..d6410ee01be 100644
--- a/drivers/staging/media/go7007/wis-tw2804.c
+++ b/drivers/staging/media/go7007/wis-tw2804.c
@@ -341,17 +341,6 @@ static struct i2c_driver wis_tw2804_driver = {
.id_table = wis_tw2804_id,
};
-static int __init wis_tw2804_init(void)
-{
- return i2c_add_driver(&wis_tw2804_driver);
-}
-
-static void __exit wis_tw2804_cleanup(void)
-{
- i2c_del_driver(&wis_tw2804_driver);
-}
-
-module_init(wis_tw2804_init);
-module_exit(wis_tw2804_cleanup);
+module_i2c_driver(wis_tw2804_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-tw9903.c b/drivers/staging/media/go7007/wis-tw9903.c
index 9230f4a8052..94071def3bb 100644
--- a/drivers/staging/media/go7007/wis-tw9903.c
+++ b/drivers/staging/media/go7007/wis-tw9903.c
@@ -325,17 +325,6 @@ static struct i2c_driver wis_tw9903_driver = {
.id_table = wis_tw9903_id,
};
-static int __init wis_tw9903_init(void)
-{
- return i2c_add_driver(&wis_tw9903_driver);
-}
-
-static void __exit wis_tw9903_cleanup(void)
-{
- i2c_del_driver(&wis_tw9903_driver);
-}
-
-module_init(wis_tw9903_init);
-module_exit(wis_tw9903_cleanup);
+module_i2c_driver(wis_tw9903_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/wis-uda1342.c b/drivers/staging/media/go7007/wis-uda1342.c
index 0127be2f3be..05ac798f35f 100644
--- a/drivers/staging/media/go7007/wis-uda1342.c
+++ b/drivers/staging/media/go7007/wis-uda1342.c
@@ -98,17 +98,6 @@ static struct i2c_driver wis_uda1342_driver = {
.id_table = wis_uda1342_id,
};
-static int __init wis_uda1342_init(void)
-{
- return i2c_add_driver(&wis_uda1342_driver);
-}
-
-static void __exit wis_uda1342_cleanup(void)
-{
- i2c_del_driver(&wis_uda1342_driver);
-}
-
-module_init(wis_uda1342_init);
-module_exit(wis_uda1342_cleanup);
+module_i2c_driver(wis_uda1342_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 71e3bf2937f..b5d0088f310 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -1239,6 +1239,10 @@ static int __init lirc_serial_init_module(void)
}
}
+ /* make sure sense is either -1, 0, or 1 */
+ if (sense != -1)
+ sense = !!sense;
+
result = lirc_serial_init();
if (result)
return result;
@@ -1298,7 +1302,7 @@ MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
module_param(share_irq, bool, S_IRUGO);
MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
-module_param(sense, bool, S_IRUGO);
+module_param(sense, int, S_IRUGO);
MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
" (0 = active high, 1 = active low )");
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index d4823fd6776..84943e5ba1d 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -565,6 +565,14 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = priv;
+ ret = omapdss_compat_init();
+ if (ret) {
+ dev_err(dev->dev, "coult not init omapdss\n");
+ dev->dev_private = NULL;
+ kfree(priv);
+ return ret;
+ }
+
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
INIT_LIST_HEAD(&priv->obj_list);
@@ -576,6 +584,7 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
dev->dev_private = NULL;
kfree(priv);
+ omapdss_compat_uninit();
return ret;
}
@@ -610,6 +619,8 @@ static int dev_unload(struct drm_device *dev)
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
+ omapdss_compat_uninit();
+
kfree(dev->dev_private);
dev->dev_private = NULL;
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 294e9b40f51..737f4a9d86a 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -736,7 +736,7 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
case 4:
break;
default:
- /* alignment value not suportted */
+ /* alignment value not supportted */
status = -EPERM;
break;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 035c2c76253..339f97f7085 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) &&
- (cmd->stat_sn < exp_statsn)) {
+ iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock);
iscsit_add_cmd_to_immediate_queue(cmd, conn,
@@ -767,9 +767,8 @@ static int iscsit_handle_scsi_cmd(
struct iscsi_conn *conn,
unsigned char *buf)
{
- int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
- int dump_immediate_data = 0, send_check_condition = 0, payload_length;
- struct iscsi_cmd *cmd = NULL;
+ int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
+ struct iscsi_cmd *cmd = NULL;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
int sam_task_attr;
@@ -956,38 +955,26 @@ done:
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
- /*
- * The CDB is going to an se_device_t.
- */
- ret = transport_lookup_cmd_lun(&cmd->se_cmd,
- scsilun_to_int(&hdr->lun));
- if (ret < 0) {
- if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
- pr_debug("Responding to non-acl'ed,"
- " non-existent or non-exported iSCSI LUN:"
- " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+ cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ scsilun_to_int(&hdr->lun));
+ if (cmd->sense_reason)
+ goto attach_cmd;
+
+ cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
+ if (cmd->sense_reason) {
+ if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
}
- send_check_condition = 1;
+
goto attach_cmd;
}
- transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
- if (transport_ret == -ENOMEM) {
+ if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
- } else if (transport_ret < 0) {
- /*
- * Unsupported SAM Opcode. CHECK_CONDITION will be sent
- * in iscsit_execute_cmd() during the CmdSN OOO Execution
- * Mechinism.
- */
- send_check_condition = 1;
- } else {
- if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
}
attach_cmd:
@@ -1000,11 +987,12 @@ attach_cmd:
*/
core_alua_check_nonop_delay(&cmd->se_cmd);
- ret = iscsit_allocate_iovecs(cmd);
- if (ret < 0)
+ if (iscsit_allocate_iovecs(cmd) < 0) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 0, buf, cmd);
+ }
+
/*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate
@@ -1031,10 +1019,7 @@ attach_cmd:
* If no Immediate Data is attached, it's OK to return now.
*/
if (!cmd->immediate_data) {
- if (send_check_condition)
- return 0;
-
- if (cmd->unsolicited_data) {
+ if (!cmd->sense_reason && cmd->unsolicited_data) {
iscsit_set_dataout_sequence_values(cmd);
spin_lock_bh(&cmd->dataout_timeout_lock);
@@ -1050,19 +1035,17 @@ attach_cmd:
* thread. They are processed in CmdSN order by
* iscsit_check_received_cmdsn() below.
*/
- if (send_check_condition) {
+ if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
- dump_immediate_data = 1;
goto after_immediate_data;
}
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
*/
- ret = transport_generic_new_cmd(&cmd->se_cmd);
- if (ret < 0) {
+ cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
+ if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
- dump_immediate_data = 1;
goto after_immediate_data;
}
@@ -1079,7 +1062,7 @@ after_immediate_data:
* Special case for Unsupported SAM WRITE Opcodes
* and ImmediateData=Yes.
*/
- if (dump_immediate_data) {
+ if (cmd->sense_reason) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return -1;
} else if (cmd->unsolicited_data) {
@@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+ if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd(
ret = transport_lookup_tmr_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (ret < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach;
}
@@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd(
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
- if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ if (se_tmr->response)
goto attach;
- }
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA:
@@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd(
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
@@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd(
* Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful.
*/
- if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+ if (se_tmr->response)
break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
@@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd(
default:
pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function);
- cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach;
}
@@ -2360,7 +2336,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
if (!conn_p)
return;
- cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index ff6fd4fb624..78d75c8567d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -235,7 +235,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
* iSER/SCTP (TODO, software emulation with osc-iwarp)
* iSER/IB (TODO, hardware available)
*
- * can be enabled with atributes under
+ * can be enabled with attributes under
* sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
*
*/
@@ -754,9 +754,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
+static ssize_t lio_target_nacl_show_tag(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
+}
+
+static ssize_t lio_target_nacl_store_tag(
+ struct se_node_acl *se_nacl,
+ const char *page,
+ size_t count)
+{
+ int ret;
+
+ ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_info.attr,
&lio_target_nacl_cmdsn_depth.attr,
+ &lio_target_nacl_tag.attr,
NULL,
};
@@ -803,7 +827,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
stats_cg = &se_nacl->acl_fabric_stat_group;
- stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
@@ -1268,7 +1292,7 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
*/
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
- stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 21048dbf7d1..7a333d28d9a 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -474,7 +474,7 @@ struct iscsi_cmd {
struct scatterlist *first_data_sg;
u32 first_data_sg_off;
u32 kmapped_nents;
-
+ sense_reason_t sense_reason;
} ____cacheline_aligned;
struct iscsi_tmr_req {
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 8aacf611b86..8e6298cc883 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -410,11 +410,11 @@ static int iscsit_dataout_pre_datapduinorder_yes(
/*
* For DataSequenceInOrder=Yes: If the offset is greater than the global
* DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
- * occured and fail the connection.
+ * occurred and fail the connection.
*
* For DataSequenceInOrder=No: If the offset is greater than the per
* sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
- * error has occured and fail the connection.
+ * error has occurred and fail the connection.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (be32_to_cpu(hdr->offset) != cmd->write_data_done) {
@@ -801,7 +801,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
{
int tpg_active;
/*
- * Only start Time2Retain timer when the assoicated TPG is still in
+ * Only start Time2Retain timer when the associated TPG is still in
* an ACTIVE (eg: not disabled or shutdown) state.
*/
spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 21f29d91a8c..0b52a237130 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
case ISCSI_OP_SCSI_CMD:
/*
* Go ahead and send the CHECK_CONDITION status for
- * any SCSI CDB exceptions that may have occurred, also
- * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+ * any SCSI CDB exceptions that may have occurred.
*/
- if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
+ if (cmd->sense_reason) {
+ if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
@@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* exception
*/
return transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
+ cmd->sense_reason, 0);
}
/*
* Special case for delayed CmdSN with Immediate
@@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break;
case ISCSI_OP_SCSI_TMFUNC:
- if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 17d8c20094f..9ac4c151eae 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* made generic here.
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
- (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
+ iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index f8dbec05d5e..fdb632f0ab8 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param = iscsi_find_param_from_key(
INITIATORNAME, conn->param_list);
- if (!initiatorname_param)
- return -1;
-
sessiontype_param = iscsi_find_param_from_key(
SESSIONTYPE, conn->param_list);
- if (!sessiontype_param)
+ if (!initiatorname_param || !sessiontype_param) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1;
+ }
sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
@@ -254,9 +254,9 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess);
return -ENOMEM;
}
- spin_lock(&sess_idr_lock);
+ spin_lock_bh(&sess_idr_lock);
ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
- spin_unlock(&sess_idr_lock);
+ spin_unlock_bh(&sess_idr_lock);
if (ret < 0) {
pr_err("idr_get_new() for sess_idr failed\n");
@@ -1118,10 +1118,8 @@ new_sess_out:
idr_remove(&sess_idr, conn->sess->session_index);
spin_unlock_bh(&sess_idr_lock);
}
- if (conn->sess->sess_ops)
- kfree(conn->sess->sess_ops);
- if (conn->sess)
- kfree(conn->sess);
+ kfree(conn->sess->sess_ops);
+ kfree(conn->sess);
old_sess_out:
iscsi_stop_login_thread_timer(np);
/*
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index e9053a04f24..9d902aefe01 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->req_buf,
payload_length,
conn);
- if (ret < 0)
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
+ }
if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0)
@@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->rsp_buf,
&login->rsp_length,
conn->param_list);
- if (ret < 0)
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
+ }
if (!login->auth_complete &&
ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 90b740048f2..d89164287d0 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -154,22 +154,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
}
INIT_LIST_HEAD(&param->p_list);
- param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ param->name = kstrdup(name, GFP_KERNEL);
if (!param->name) {
pr_err("Unable to allocate memory for parameter name.\n");
goto out;
}
- param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for parameter value.\n");
goto out;
}
- memcpy(param->name, name, strlen(name));
- param->name[strlen(name)] = '\0';
- memcpy(param->value, value, strlen(value));
- param->value[strlen(value)] = '\0';
param->phase = phase;
param->scope = scope;
param->sender = sender;
@@ -635,11 +631,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list)
list_del(&param->p_list);
kfree(param->name);
- param->name = NULL;
kfree(param->value);
- param->value = NULL;
kfree(param);
- param = NULL;
}
iscsi_release_extra_responses(param_list);
@@ -687,15 +680,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
{
kfree(param->value);
- param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for value.\n");
return -ENOMEM;
}
- memcpy(param->value, value, strlen(value));
- param->value[strlen(value)] = '\0';
-
pr_debug("iSCSI Parameter updated to %s=%s\n",
param->name, param->value);
return 0;
@@ -1432,6 +1422,7 @@ static struct iscsi_param *iscsi_check_key(
break;
case PHASE_OPERATIONAL:
pr_debug("Operational phase.\n");
+ break;
default:
pr_debug("Unknown phase.\n");
}
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 4a99820d063..9d4417aae92 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
- return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn &&
- be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ?
+ return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
+ iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 9d881a000e4..81289520f96 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -66,8 +66,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
return NULL;
}
- list_for_each_entry(ts, &inactive_ts_list, ts_list)
- break;
+ ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
list_del(&ts->ts_list);
iscsit_global->inactive_ts--;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1a91195ab61..7ce350578c8 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -500,8 +500,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
spin_unlock_bh(&conn->immed_queue_lock);
return NULL;
}
- list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
- break;
+ qr = list_first_entry(&conn->immed_queue_list,
+ struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
@@ -575,8 +575,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
return NULL;
}
- list_for_each_entry(qr, &conn->response_queue_list, qr_list)
- break;
+ qr = list_first_entry(&conn->response_queue_list,
+ struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
@@ -684,7 +684,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
void iscsit_free_cmd(struct iscsi_cmd *cmd)
{
/*
- * Determine if a struct se_cmd is assoicated with
+ * Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7b54893db66..dd7a84ee78e 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -53,7 +53,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
- struct se_device_s *se_dev_hba_ptr;
struct tcm_loop_nexus *tl_nexus;
struct device dev;
struct Scsi_Host *sh;
diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig
index 132da544eaf..1614bc710d4 100644
--- a/drivers/target/sbp/Kconfig
+++ b/drivers/target/sbp/Kconfig
@@ -1,6 +1,6 @@
config SBP_TARGET
tristate "FireWire SBP-2 fabric module"
- depends on FIREWIRE && EXPERIMENTAL
+ depends on FIREWIRE
help
Say Y or M here to enable SCSI target functionality over FireWire.
This enables you to expose SCSI devices to other nodes on the FireWire
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 0d6d7c1f025..2e8d06f198a 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -704,16 +704,17 @@ static void session_maintenance_work(struct work_struct *work)
static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent)
{
- __be32 state;
+ int state;
switch (tcode) {
case TCODE_READ_QUADLET_REQUEST:
pr_debug("tgt_agent AGENT_STATE READ\n");
spin_lock_bh(&agent->lock);
- state = cpu_to_be32(agent->state);
+ state = agent->state;
spin_unlock_bh(&agent->lock);
- memcpy(data, &state, sizeof(state));
+
+ *(__be32 *)data = cpu_to_be32(state);
return RCODE_COMPLETE;
@@ -2207,20 +2208,23 @@ static struct se_portal_group *sbp_make_tpg(
tport->mgt_agt = sbp_management_agent_register(tport);
if (IS_ERR(tport->mgt_agt)) {
ret = PTR_ERR(tport->mgt_agt);
- kfree(tpg);
- return ERR_PTR(ret);
+ goto out_free_tpg;
}
ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, (void *)tpg,
TRANSPORT_TPG_TYPE_NORMAL);
- if (ret < 0) {
- sbp_management_agent_unregister(tport->mgt_agt);
- kfree(tpg);
- return ERR_PTR(ret);
- }
+ if (ret < 0)
+ goto out_unreg_mgt_agt;
return &tpg->se_tpg;
+
+out_unreg_mgt_agt:
+ sbp_management_agent_unregister(tport->mgt_agt);
+out_free_tpg:
+ tport->tpg = NULL;
+ kfree(tpg);
+ return ERR_PTR(ret);
}
static void sbp_drop_tpg(struct se_portal_group *se_tpg)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 9a5f9a7aecd..85140f7dde1 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -3,8 +3,7 @@
*
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
*
- * Copyright (c) 2009-2010 Rising Tide Systems
- * Copyright (c) 2009-2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -41,7 +40,7 @@
#include "target_core_alua.h"
#include "target_core_ua.h"
-static int core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline);
@@ -59,15 +58,17 @@ struct t10_alua_lu_gp *default_lu_gp;
*
* See spc4r17 section 6.27
*/
-int target_emulate_report_target_port_groups(struct se_cmd *cmd)
+sense_reason_t
+target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *buf;
u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
+
/*
* Skip over RESERVED area to first Target port group descriptor
* depending on the PARAMETER DATA FORMAT type..
@@ -81,13 +82,14 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
" small for %s header\n", cmd->data_length,
(ext_hdr) ? "extended" : "normal");
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
* Check if the Target port group and Target port descriptor list
@@ -160,7 +162,7 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
@@ -200,32 +202,33 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
*
* See spc4r17 section 6.35
*/
-int target_emulate_set_target_port_groups(struct se_cmd *cmd)
+sense_reason_t
+target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf;
unsigned char *ptr;
+ sense_reason_t rc;
u32 len = 4; /* Skip over RESERVED area in header */
- int alua_access_state, primary = 0, rc;
+ int alua_access_state, primary = 0;
u16 tg_pt_id, rtpi;
- if (!l_port) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
+ if (!l_port)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
if (cmd->data_length < 4) {
pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
" small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@@ -234,8 +237,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -243,24 +245,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
- rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!rc) {
+ if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- rc = -EINVAL;
+ rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
ptr = &buf[4]; /* Skip over RESERVED area in header */
while (len < cmd->data_length) {
+ bool found = false;
alua_access_state = (ptr[0] & 0x0f);
/*
* Check the received ALUA access state, and determine if
@@ -268,7 +268,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* access state.
*/
rc = core_alua_check_transition(alua_access_state, &primary);
- if (rc != 0) {
+ if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
* an invalid combination of target port asymmetric
@@ -279,11 +279,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
goto out;
}
- rc = -1;
+
/*
* If the ASYMMETRIC ACCESS STATE field (see table 267)
* specifies a primary target port asymmetric access state,
@@ -303,9 +301,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &su_dev->t10_alua.tg_pt_gps_list,
+ &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
@@ -315,27 +313,20 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
- rc = core_alua_do_port_transition(tg_pt_gp,
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+ if (!core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
- alua_access_state, 1);
+ alua_access_state, 1))
+ found = true;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
break;
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
- /*
- * If not matching target port group ID can be located
- * throw an exception with ASCQ: INVALID_PARAMETER_LIST
- */
- if (rc != 0) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
- goto out;
- }
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@@ -354,25 +345,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
continue;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+
spin_unlock(&dev->se_port_lock);
- rc = core_alua_set_tg_pt_secondary_state(
- tg_pt_gp_mem, port, 1, 1);
+ if (!core_alua_set_tg_pt_secondary_state(
+ tg_pt_gp_mem, port, 1, 1))
+ found = true;
spin_lock(&dev->se_port_lock);
break;
}
spin_unlock(&dev->se_port_lock);
- /*
- * If not matching relative target port identifier can
- * be located, throw an exception with ASCQ:
- * INVALID_PARAMETER_LIST
- */
- if (rc != 0) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- rc = -EINVAL;
- goto out;
- }
+ }
+
+ if (!found) {
+ rc = TCM_INVALID_PARAMETER_LIST;
+ goto out;
}
ptr += 4;
@@ -523,40 +511,27 @@ static inline int core_alua_state_transition(
}
/*
- * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
- * in transport_cmd_sequencer(). This function is assigned to
- * struct t10_alua *->state_check() in core_setup_alua()
- */
-static int core_alua_state_check_nop(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
-{
- return 0;
-}
-
-/*
- * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
- * This function is assigned to struct t10_alua *->state_check() in
- * core_setup_alua()
- *
- * Also, this function can return three different return codes to
- * signal transport_generic_cmd_sequencer()
- *
* return 1: Is used to signal LUN not accecsable, and check condition/not ready
* return 0: Used to signal success
* reutrn -1: Used to signal failure, and invalid cdb field
*/
-static int core_alua_state_check(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+sense_reason_t
+target_alua_state_check(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
struct se_lun *lun = cmd->se_lun;
struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
+ u8 alua_ascq;
+ int ret;
+
+ if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+ return 0;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return 0;
if (!port)
return 0;
@@ -565,11 +540,11 @@ static int core_alua_state_check(
* access state: OFFLINE
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
- *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
- *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- return 1;
+ alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+ ret = 1;
+ goto out;
}
/*
* Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -594,14 +569,18 @@ static int core_alua_state_check(
switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
- return core_alua_state_nonoptimized(cmd, cdb,
- nonop_delay_msecs, alua_ascq);
+ ret = core_alua_state_nonoptimized(cmd, cdb,
+ nonop_delay_msecs, &alua_ascq);
+ break;
case ALUA_ACCESS_STATE_STANDBY:
- return core_alua_state_standby(cmd, cdb, alua_ascq);
+ ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+ break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+ ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+ break;
case ALUA_ACCESS_STATE_TRANSITION:
- return core_alua_state_transition(cmd, cdb, alua_ascq);
+ ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+ break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -610,7 +589,24 @@ static int core_alua_state_check(
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+out:
+ if (ret > 0) {
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/ASCQ: "
+ "0x04/0x%02x\n",
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+ cmd->scsi_asc = 0x04;
+ cmd->scsi_ascq = alua_ascq;
+ return TCM_CHECK_CONDITION_NOT_READY;
}
return 0;
@@ -619,7 +615,8 @@ static int core_alua_state_check(
/*
* Check implict and explict ALUA state change request.
*/
-static int core_alua_check_transition(int state, int *primary)
+static sense_reason_t
+core_alua_check_transition(int state, int *primary)
{
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
@@ -641,7 +638,7 @@ static int core_alua_check_transition(int state, int *primary)
break;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
return 0;
@@ -758,8 +755,7 @@ static int core_alua_update_tpg_primary_metadata(
int primary_state,
unsigned char *md_buf)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
- struct t10_wwn *wwn = &su_dev->t10_wwn;
+ struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
char path[ALUA_METADATA_PATH_LEN];
int len;
@@ -899,7 +895,6 @@ int core_alua_do_port_transition(
{
struct se_device *dev;
struct se_port *port;
- struct se_subsystem_dev *su_dev;
struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
@@ -949,14 +944,13 @@ int core_alua_do_port_transition(
lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
- su_dev = dev->se_sub_dev;
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&lu_gp->lu_gp_lock);
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &su_dev->t10_alua.tg_pt_gps_list,
+ &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
@@ -981,7 +975,7 @@ int core_alua_do_port_transition(
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -989,11 +983,11 @@ int core_alua_do_port_transition(
core_alua_do_transition_tg_pt(tg_pt_gp, port,
nacl, md_buf, new_state, explict);
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@@ -1268,14 +1262,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return;
-
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
return;
@@ -1358,10 +1347,8 @@ void __core_alua_drop_lu_gp_mem(
spin_unlock(&lu_gp->lu_gp_lock);
}
-struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
- struct se_subsystem_dev *su_dev,
- const char *name,
- int def_group)
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
+ const char *name, int def_group)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1375,7 +1362,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
- tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+ tg_pt_gp->tg_pt_gp_dev = dev;
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
@@ -1392,14 +1379,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
if (def_group) {
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
- su_dev->t10_alua.alua_tg_pt_gps_counter++;
+ dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
- su_dev->t10_alua.alua_tg_pt_gps_count++;
+ dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &su_dev->t10_alua.tg_pt_gps_list);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ &dev->t10_alua.tg_pt_gps_list);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
return tg_pt_gp;
@@ -1409,9 +1396,10 @@ int core_alua_set_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
u16 tg_pt_gp_id)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
u16 tg_pt_gp_id_tmp;
+
/*
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
@@ -1421,19 +1409,19 @@ int core_alua_set_tg_pt_gp_id(
return -EINVAL;
}
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
return -ENOSPC;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
- su_dev->t10_alua.alua_tg_pt_gps_counter++;
+ dev->t10_alua.alua_tg_pt_gps_counter++;
- list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
+ list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
if (!tg_pt_gp_id)
@@ -1441,7 +1429,7 @@ again:
pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return -EINVAL;
}
}
@@ -1449,9 +1437,9 @@ again:
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &su_dev->t10_alua.tg_pt_gps_list);
- su_dev->t10_alua.alua_tg_pt_gps_count++;
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ &dev->t10_alua.tg_pt_gps_list);
+ dev->t10_alua.alua_tg_pt_gps_count++;
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return 0;
}
@@ -1480,8 +1468,9 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+
/*
* Once we have reached this point, config_item_put() has already
* been called from target_core_alua_drop_tg_pt_gp().
@@ -1490,10 +1479,11 @@ void core_alua_free_tg_pt_gp(
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list);
- su_dev->t10_alua.alua_tg_pt_gps_counter--;
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ dev->t10_alua.alua_tg_pt_gps_counter--;
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
@@ -1502,6 +1492,7 @@ void core_alua_free_tg_pt_gp(
*/
while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
cpu_relax();
+
/*
* Release reference to struct t10_alua_tg_pt_gp from all associated
* struct se_port.
@@ -1525,9 +1516,9 @@ void core_alua_free_tg_pt_gp(
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
+ if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
} else
tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1541,14 +1532,9 @@ void core_alua_free_tg_pt_gp(
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return;
@@ -1574,25 +1560,24 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
}
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
- struct se_subsystem_dev *su_dev,
- const char *name)
+ struct se_device *dev, const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
}
}
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
return NULL;
}
@@ -1600,11 +1585,11 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
static void core_alua_put_tg_pt_gp_from_name(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
- spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
/*
@@ -1640,16 +1625,11 @@ static void __core_alua_drop_tg_pt_gp_mem(
ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci;
- struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
- if (alua->alua_type != SPC3_ALUA_EMULATED)
- return len;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return len;
@@ -1683,7 +1663,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
{
struct se_portal_group *tpg;
struct se_lun *lun;
- struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+ struct se_device *dev = port->sep_lun->lun_se_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
@@ -1692,13 +1672,9 @@ ssize_t core_alua_store_tg_pt_gp_info(
tpg = port->sep_tpg;
lun = port->sep_lun;
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
- pr_warn("SPC3_ALUA_EMULATED not enabled for"
- " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
- tpg->se_tpg_tfo->tpg_get_tag(tpg),
- config_item_name(&lun->lun_group.cg_item));
- return -EINVAL;
- }
+ tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+ if (!tg_pt_gp_mem)
+ return 0;
if (count > TG_PT_GROUP_NAME_BUF) {
pr_err("ALUA Target Port Group alias too large!\n");
@@ -1716,18 +1692,11 @@ ssize_t core_alua_store_tg_pt_gp_info(
* struct t10_alua_tg_pt_gp. This reference is released with
* core_alua_put_tg_pt_gp_from_name() below.
*/
- tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
strstrip(buf));
if (!tg_pt_gp_new)
return -ENODEV;
}
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem) {
- if (tg_pt_gp_new)
- core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
- pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
- return -EINVAL;
- }
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
@@ -1750,7 +1719,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count;
@@ -2054,32 +2023,12 @@ ssize_t core_alua_store_secondary_write_metadata(
return count;
}
-int core_setup_alua(struct se_device *dev, int force_pt)
+int core_setup_alua(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = &su_dev->t10_alua;
- struct t10_alua_lu_gp_member *lu_gp_mem;
- /*
- * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
- * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
- * cause a problem because libata and some SATA RAID HBAs appear
- * under Linux/SCSI, but emulate SCSI logic themselves.
- */
- if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
- alua->alua_type = SPC_ALUA_PASSTHROUGH;
- alua->alua_state_check = &core_alua_state_check_nop;
- pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
- " emulation\n", dev->transport->name);
- return 0;
- }
- /*
- * If SPC-3 or above is reported by real or emulated struct se_device,
- * use emulated ALUA.
- */
- if (dev->transport->get_device_rev(dev) >= SCSI_3) {
- pr_debug("%s: Enabling ALUA Emulation for SPC-3"
- " device\n", dev->transport->name);
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
+ struct t10_alua_lu_gp_member *lu_gp_mem;
+
/*
* Associate this struct se_device with the default ALUA
* LUN Group.
@@ -2088,8 +2037,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
if (IS_ERR(lu_gp_mem))
return PTR_ERR(lu_gp_mem);
- alua->alua_type = SPC3_ALUA_EMULATED;
- alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
default_lu_gp);
@@ -2098,11 +2045,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
dev->transport->name);
- } else {
- alua->alua_type = SPC2_ALUA_DISABLED;
- alua->alua_state_check = &core_alua_state_check_nop;
- pr_debug("%s: Disabling ALUA Emulation for SPC-2"
- " device\n", dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index f920c170d47..e539c3e7f4a 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
-extern int target_emulate_report_target_port_groups(struct se_cmd *);
-extern int target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
- struct se_subsystem_dev *, const char *, int);
+ struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
@@ -131,6 +131,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
-extern int core_setup_alua(struct se_device *, int);
+extern int core_setup_alua(struct se_device *);
+extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c123327499a..4efb61b8d00 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3,8 +3,7 @@
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
- * Copyright (c) 2008-2011 Rising Tide Systems
- * Copyright (c) 2008-2011 Linux-iSCSI.org
+ * (c) Copyright 2008-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -565,21 +564,8 @@ static ssize_t target_core_dev_show_attr_##_name( \
struct se_dev_attrib *da, \
char *page) \
{ \
- struct se_device *dev; \
- struct se_subsystem_dev *se_dev = da->da_sub_dev; \
- ssize_t rb; \
- \
- spin_lock(&se_dev->se_dev_lock); \
- dev = se_dev->se_dev_ptr; \
- if (!dev) { \
- spin_unlock(&se_dev->se_dev_lock); \
- return -ENODEV; \
- } \
- rb = snprintf(page, PAGE_SIZE, "%u\n", \
- (u32)dev->se_sub_dev->se_dev_attrib._name); \
- spin_unlock(&se_dev->se_dev_lock); \
- \
- return rb; \
+ return snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)da->da_dev->dev_attrib._name); \
}
#define DEF_DEV_ATTRIB_STORE(_name) \
@@ -588,26 +574,16 @@ static ssize_t target_core_dev_store_attr_##_name( \
const char *page, \
size_t count) \
{ \
- struct se_device *dev; \
- struct se_subsystem_dev *se_dev = da->da_sub_dev; \
unsigned long val; \
int ret; \
\
- spin_lock(&se_dev->se_dev_lock); \
- dev = se_dev->se_dev_ptr; \
- if (!dev) { \
- spin_unlock(&se_dev->se_dev_lock); \
- return -ENODEV; \
- } \
ret = strict_strtoul(page, 0, &val); \
if (ret < 0) { \
- spin_unlock(&se_dev->se_dev_lock); \
pr_err("strict_strtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
- ret = se_dev_set_##_name(dev, (u32)val); \
- spin_unlock(&se_dev->se_dev_lock); \
+ ret = se_dev_set_##_name(da->da_dev, (u32)val); \
\
return (!ret) ? count : -EINVAL; \
}
@@ -699,6 +675,9 @@ SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(unmap_granularity_alignment);
SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(max_write_same_len);
+SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
+
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
@@ -724,6 +703,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
&target_core_dev_attrib_unmap_granularity.attr,
&target_core_dev_attrib_unmap_granularity_alignment.attr,
+ &target_core_dev_attrib_max_write_same_len.attr,
NULL,
};
@@ -764,13 +744,6 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
struct t10_wwn *t10_wwn,
char *page)
{
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
-
- dev = se_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
&t10_wwn->unit_serial[0]);
}
@@ -780,8 +753,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
const char *page,
size_t count)
{
- struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
+ struct se_device *dev = t10_wwn->t10_dev;
unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
/*
@@ -794,7 +766,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* it is doing 'the right thing' wrt a world wide unique
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
- if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+ if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
@@ -811,15 +783,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* (underneath the initiator side OS dependent multipath code)
* could cause negative effects.
*/
- dev = su_dev->se_dev_ptr;
- if (dev) {
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- pr_err("Unable to set VPD Unit Serial while"
- " active %d $FABRIC_MOD exports exist\n",
- atomic_read(&dev->dev_export_obj.obj_access_count));
- return -EINVAL;
- }
+ if (dev->export_count) {
+ pr_err("Unable to set VPD Unit Serial while"
+ " active %d $FABRIC_MOD exports exist\n",
+ dev->export_count);
+ return -EINVAL;
}
+
/*
* This currently assumes ASCII encoding for emulated VPD Unit Serial.
*
@@ -828,12 +798,12 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
*/
memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
- snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+ snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
- su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+ dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
- " %s\n", su_dev->t10_wwn.unit_serial);
+ " %s\n", dev->t10_wwn.unit_serial);
return count;
}
@@ -847,16 +817,10 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
struct t10_wwn *t10_wwn,
char *page)
{
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
- struct se_device *dev;
struct t10_vpd *vpd;
unsigned char buf[VPD_TMP_BUF_SIZE];
ssize_t len = 0;
- dev = se_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
@@ -894,16 +858,10 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
struct t10_wwn *t10_wwn, \
char *page) \
{ \
- struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
- struct se_device *dev; \
struct t10_vpd *vpd; \
unsigned char buf[VPD_TMP_BUF_SIZE]; \
ssize_t len = 0; \
\
- dev = se_dev->se_dev_ptr; \
- if (!dev) \
- return -ENODEV; \
- \
spin_lock(&t10_wwn->t10_vpd_lock); \
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
if (vpd->association != _assoc) \
@@ -1003,7 +961,7 @@ static struct config_item_type target_core_dev_wwn_cit = {
/* Start functions for struct config_item_type target_core_dev_pr_cit */
-CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
#define SE_DEV_PR_ATTR(_name, _mode) \
static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
@@ -1015,13 +973,8 @@ static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_pr_show_attr_##_name);
-/*
- * res_holder
- */
-static ssize_t target_core_dev_pr_show_spc3_res(
- struct se_device *dev,
- char *page,
- ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
+ char *page)
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
@@ -1030,134 +983,82 @@ static ssize_t target_core_dev_pr_show_spc3_res(
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
- *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return *len;
- }
+ if (!pr_reg)
+ return sprintf(page, "No SPC-3 Reservation holder\n");
+
se_nacl = pr_reg->pr_reg_nacl;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+ return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
- spin_unlock(&dev->dev_reservation_lock);
-
- return *len;
}
-static ssize_t target_core_dev_pr_show_spc2_res(
- struct se_device *dev,
- char *page,
- ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
+ char *page)
{
struct se_node_acl *se_nacl;
+ ssize_t len;
- spin_lock(&dev->dev_reservation_lock);
se_nacl = dev->dev_reserved_node_acl;
- if (!se_nacl) {
- *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return *len;
+ if (se_nacl) {
+ len = sprintf(page,
+ "SPC-2 Reservation: %s Initiator: %s\n",
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_nacl->initiatorname);
+ } else {
+ len = sprintf(page, "No SPC-2 Reservation holder\n");
}
- *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
- se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- se_nacl->initiatorname);
- spin_unlock(&dev->dev_reservation_lock);
-
- return *len;
+ return len;
}
-static ssize_t target_core_dev_pr_show_attr_res_holder(
- struct se_subsystem_dev *su_dev,
- char *page)
+static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
+ char *page)
{
- ssize_t len = 0;
+ int ret;
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- switch (su_dev->t10_pr.res_type) {
- case SPC3_PERSISTENT_RESERVATIONS:
- target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
- page, &len);
- break;
- case SPC2_RESERVATIONS:
- target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
- page, &len);
- break;
- case SPC_PASSTHROUGH:
- len += sprintf(page+len, "Passthrough\n");
- break;
- default:
- len += sprintf(page+len, "Unknown\n");
- break;
- }
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return sprintf(page, "Passthrough\n");
- return len;
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ ret = target_core_dev_pr_show_spc2_res(dev, page);
+ else
+ ret = target_core_dev_pr_show_spc3_res(dev, page);
+ spin_unlock(&dev->dev_reservation_lock);
+ return ret;
}
SE_DEV_PR_ATTR_RO(res_holder);
-/*
- * res_pr_all_tgt_pts
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
- struct t10_pr_registration *pr_reg;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
- pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
+ if (!dev->dev_pr_res_holder) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
- }
- /*
- * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
- * Basic PERSISTENT RESERVER OUT parameter list, page 290
- */
- if (pr_reg->pr_reg_all_tg_pt)
+ } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
len = sprintf(page, "SPC-3 Reservation: All Target"
" Ports registration\n");
- else
+ } else {
len = sprintf(page, "SPC-3 Reservation: Single"
" Target Port registration\n");
- spin_unlock(&dev->dev_reservation_lock);
+ }
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
-/*
- * res_pr_generation
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return 0;
-
- return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
+ return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1166,10 +1067,8 @@ SE_DEV_PR_ATTR_RO(res_pr_generation);
* res_pr_holder_tg_port
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
struct se_node_acl *se_nacl;
struct se_lun *lun;
struct se_portal_group *se_tpg;
@@ -1177,20 +1076,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
struct target_core_fabric_ops *tfo;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
+ goto out_unlock;
}
+
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
@@ -1204,19 +1096,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
tfo->get_fabric_name(), lun->unpacked_lun);
- spin_unlock(&dev->dev_reservation_lock);
+out_unlock:
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
-/*
- * res_pr_registered_i_pts
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
@@ -1225,16 +1114,10 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
ssize_t len = 0;
int reg_count = 0, prf_isid;
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
@@ -1254,7 +1137,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
len += sprintf(page+len, "%s", buf);
reg_count++;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(page+len, "None\n");
@@ -1264,88 +1147,48 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
-/*
- * res_pr_type
- */
static ssize_t target_core_dev_pr_show_attr_res_pr_type(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- struct se_device *dev;
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
- return len;
-
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!pr_reg) {
+ if (pr_reg) {
+ len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+ core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+ } else {
len = sprintf(page, "No SPC-3 Reservation holder\n");
- spin_unlock(&dev->dev_reservation_lock);
- return len;
}
- len = sprintf(page, "SPC-3 Reservation Type: %s\n",
- core_scsi3_pr_dump_type(pr_reg->pr_res_type));
- spin_unlock(&dev->dev_reservation_lock);
+ spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_type);
-/*
- * res_type
- */
static ssize_t target_core_dev_pr_show_attr_res_type(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- ssize_t len = 0;
-
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- switch (su_dev->t10_pr.res_type) {
- case SPC3_PERSISTENT_RESERVATIONS:
- len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
- break;
- case SPC2_RESERVATIONS:
- len = sprintf(page, "SPC2_RESERVATIONS\n");
- break;
- case SPC_PASSTHROUGH:
- len = sprintf(page, "SPC_PASSTHROUGH\n");
- break;
- default:
- len = sprintf(page, "UNKNOWN\n");
- break;
- }
-
- return len;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return sprintf(page, "SPC_PASSTHROUGH\n");
+ else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ return sprintf(page, "SPC2_RESERVATIONS\n");
+ else
+ return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
}
SE_DEV_PR_ATTR_RO(res_type);
-/*
- * res_aptpl_active
- */
-
static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
- (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
+ (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1354,13 +1197,9 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
* res_aptpl_metadata
*/
static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
- struct se_subsystem_dev *su_dev,
- char *page)
+ struct se_device *dev, char *page)
{
- if (!su_dev->se_dev_ptr)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1392,11 +1231,10 @@ static match_table_t tokens = {
};
static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
- struct se_subsystem_dev *su_dev,
+ struct se_device *dev,
const char *page,
size_t count)
{
- struct se_device *dev;
unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
unsigned char *t_fabric = NULL, *t_port = NULL;
char *orig, *ptr, *arg_p, *opts;
@@ -1408,14 +1246,12 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u16 port_rpti = 0, tpgt = 0;
u8 type = 0, scope;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return 0;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
@@ -1558,7 +1394,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
goto out;
}
- ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
+ ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
@@ -1573,7 +1409,7 @@ out:
SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
-CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
static struct configfs_attribute *target_core_dev_pr_attrs[] = {
&target_core_dev_pr_res_holder.attr,
@@ -1605,18 +1441,14 @@ static struct config_item_type target_core_dev_pr_cit = {
static ssize_t target_core_show_dev_info(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
+ struct se_subsystem_api *t = dev->transport;
int bl = 0;
ssize_t read_bytes = 0;
- if (!se_dev->se_dev_ptr)
- return -ENODEV;
-
- transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+ transport_dump_dev_state(dev, page, &bl);
read_bytes += bl;
- read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+ read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
return read_bytes;
}
@@ -1633,17 +1465,10 @@ static ssize_t target_core_store_dev_control(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
+ struct se_subsystem_api *t = dev->transport;
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate struct se_subsystem_dev>se"
- "_dev_su_ptr\n");
- return -EINVAL;
- }
-
- return t->set_configfs_dev_params(hba, se_dev, page, count);
+ return t->set_configfs_dev_params(dev, page, count);
}
static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -1656,12 +1481,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
static ssize_t target_core_show_dev_alias(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
+ struct se_device *dev = p;
- if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+ if (!(dev->dev_flags & DF_USING_ALIAS))
return 0;
- return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+ return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
}
static ssize_t target_core_store_dev_alias(
@@ -1669,8 +1494,8 @@ static ssize_t target_core_store_dev_alias(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
@@ -1680,19 +1505,18 @@ static ssize_t target_core_store_dev_alias(
return -EINVAL;
}
- read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
- "%s", page);
+ read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
if (!read_bytes)
return -EINVAL;
- if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
- se_dev->se_dev_alias[read_bytes - 1] = '\0';
+ if (dev->dev_alias[read_bytes - 1] == '\n')
+ dev->dev_alias[read_bytes - 1] = '\0';
- se_dev->su_dev_flags |= SDF_USING_ALIAS;
+ dev->dev_flags |= DF_USING_ALIAS;
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&se_dev->se_dev_group.cg_item),
- se_dev->se_dev_alias);
+ config_item_name(&dev->dev_group.cg_item),
+ dev->dev_alias);
return read_bytes;
}
@@ -1707,12 +1531,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
static ssize_t target_core_show_dev_udev_path(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = p;
+ struct se_device *dev = p;
- if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+ if (!(dev->dev_flags & DF_USING_UDEV_PATH))
return 0;
- return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+ return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
}
static ssize_t target_core_store_dev_udev_path(
@@ -1720,8 +1544,8 @@ static ssize_t target_core_store_dev_udev_path(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_hba *hba = se_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
@@ -1731,19 +1555,19 @@ static ssize_t target_core_store_dev_udev_path(
return -EINVAL;
}
- read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+ read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
if (!read_bytes)
return -EINVAL;
- if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
- se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+ if (dev->udev_path[read_bytes - 1] == '\n')
+ dev->udev_path[read_bytes - 1] = '\0';
- se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+ dev->dev_flags |= DF_USING_UDEV_PATH;
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&se_dev->se_dev_group.cg_item),
- se_dev->se_dev_udev_path);
+ config_item_name(&dev->dev_group.cg_item),
+ dev->udev_path);
return read_bytes;
}
@@ -1761,11 +1585,9 @@ static ssize_t target_core_store_dev_enable(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = p;
- struct se_device *dev;
- struct se_hba *hba = se_dev->se_dev_hba;
- struct se_subsystem_api *t = hba->transport;
+ struct se_device *dev = p;
char *ptr;
+ int ret;
ptr = strstr(page, "1");
if (!ptr) {
@@ -1773,25 +1595,10 @@ static ssize_t target_core_store_dev_enable(
" is \"1\"\n");
return -EINVAL;
}
- if (se_dev->se_dev_ptr) {
- pr_err("se_dev->se_dev_ptr already set for storage"
- " object\n");
- return -EEXIST;
- }
-
- if (t->check_configfs_dev_params(hba, se_dev) < 0)
- return -EINVAL;
-
- dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- else if (!dev)
- return -EINVAL;
-
- se_dev->se_dev_ptr = dev;
- pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
- " %p\n", se_dev->se_dev_ptr);
+ ret = target_configure_device(dev);
+ if (ret)
+ return ret;
return count;
}
@@ -1805,26 +1612,15 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
{
- struct se_device *dev;
- struct se_subsystem_dev *su_dev = p;
+ struct se_device *dev = p;
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
-
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
- return len;
-
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!lu_gp_mem) {
- pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
- " pointer\n");
- return -EINVAL;
- }
+ if (!lu_gp_mem)
+ return 0;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
@@ -1843,24 +1639,17 @@ static ssize_t target_core_store_alua_lu_gp(
const char *page,
size_t count)
{
- struct se_device *dev;
- struct se_subsystem_dev *su_dev = p;
- struct se_hba *hba = su_dev->se_dev_hba;
+ struct se_device *dev = p;
+ struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
unsigned char buf[LU_GROUP_NAME_BUF];
int move = 0;
- dev = su_dev->se_dev_ptr;
- if (!dev)
- return -ENODEV;
+ lu_gp_mem = dev->dev_alua_lu_gp_mem;
+ if (!lu_gp_mem)
+ return 0;
- if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
- pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
- config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item));
- return -EINVAL;
- }
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
@@ -1881,14 +1670,6 @@ static ssize_t target_core_store_alua_lu_gp(
if (!lu_gp_new)
return -ENODEV;
}
- lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!lu_gp_mem) {
- if (lu_gp_new)
- core_alua_put_lu_gp_from_name(lu_gp_new);
- pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
- " pointer\n");
- return -EINVAL;
- }
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
@@ -1902,7 +1683,7 @@ static ssize_t target_core_store_alua_lu_gp(
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp->lu_gp_group.cg_item),
lu_gp->lu_gp_id);
@@ -1927,7 +1708,7 @@ static ssize_t target_core_store_alua_lu_gp(
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item),
+ config_item_name(&dev->dev_group.cg_item),
config_item_name(&lu_gp_new->lu_gp_group.cg_item),
lu_gp_new->lu_gp_id);
@@ -1955,69 +1736,44 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
static void target_core_dev_release(struct config_item *item)
{
- struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
- struct se_subsystem_dev, se_dev_group);
- struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
- struct se_subsystem_api *t = hba->transport;
- struct config_group *dev_cg = &se_dev->se_dev_group;
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
kfree(dev_cg->default_groups);
- /*
- * This pointer will set when the storage is enabled with:
- *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
- */
- if (se_dev->se_dev_ptr) {
- pr_debug("Target_Core_ConfigFS: Calling se_free_"
- "virtual_device() for se_dev_ptr: %p\n",
- se_dev->se_dev_ptr);
-
- se_free_virtual_device(se_dev->se_dev_ptr, hba);
- } else {
- /*
- * Release struct se_subsystem_dev->se_dev_su_ptr..
- */
- pr_debug("Target_Core_ConfigFS: Calling t->free_"
- "device() for se_dev_su_ptr: %p\n",
- se_dev->se_dev_su_ptr);
-
- t->free_device(se_dev->se_dev_su_ptr);
- }
-
- pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
- "_dev_t: %p\n", se_dev);
- kfree(se_dev);
+ target_free_device(dev);
}
static ssize_t target_core_dev_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(item), struct se_subsystem_dev,
- se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!tc_attr->show)
return -EINVAL;
- return tc_attr->show(se_dev, page);
+ return tc_attr->show(dev, page);
}
static ssize_t target_core_dev_store(struct config_item *item,
struct configfs_attribute *attr,
const char *page, size_t count)
{
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(item), struct se_subsystem_dev,
- se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!tc_attr->store)
return -EINVAL;
- return tc_attr->store(se_dev, page, count);
+ return tc_attr->store(dev, page, count);
}
static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2107,7 +1863,6 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
{
struct se_device *dev;
struct se_hba *hba;
- struct se_subsystem_dev *su_dev;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[LU_GROUP_NAME_BUF];
@@ -2117,12 +1872,11 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
- su_dev = dev->se_sub_dev;
- hba = su_dev->se_dev_hba;
+ hba = dev->se_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
- config_item_name(&su_dev->se_dev_group.cg_item));
+ config_item_name(&dev->dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
@@ -2260,7 +2014,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
const char *page,
size_t count)
{
- struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
unsigned long tmp;
int new_state, ret;
@@ -2284,7 +2038,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
return -EINVAL;
}
- ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+ ret = core_alua_do_port_transition(tg_pt_gp, dev,
NULL, NULL, new_state, 0);
return (!ret) ? count : -EINVAL;
}
@@ -2620,11 +2374,10 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
struct t10_alua *alua = container_of(group, struct t10_alua,
alua_tg_pt_gps_group);
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
struct config_group *alua_tg_pt_gp_cg = NULL;
struct config_item *alua_tg_pt_gp_ci = NULL;
- tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
if (!tg_pt_gp)
return NULL;
@@ -2721,10 +2474,10 @@ static struct config_group *target_core_make_subdev(
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_dev *se_dev;
struct se_subsystem_api *t;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
+ struct se_device *dev;
struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
struct config_group *dev_stat_grp = NULL;
int errno = -ENOMEM, ret;
@@ -2737,120 +2490,80 @@ static struct config_group *target_core_make_subdev(
*/
t = hba->transport;
- se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!se_dev) {
- pr_err("Unable to allocate memory for"
- " struct se_subsystem_dev\n");
- goto unlock;
- }
- INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
- spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_pr.registration_lock);
- spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
- INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
- spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
- spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
- se_dev->t10_wwn.t10_sub_dev = se_dev;
- se_dev->t10_alua.t10_sub_dev = se_dev;
- se_dev->se_dev_attrib.da_sub_dev = se_dev;
-
- se_dev->se_dev_hba = hba;
- dev_cg = &se_dev->se_dev_group;
-
- dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
+ dev = target_alloc_device(hba, name);
+ if (!dev)
+ goto out_unlock;
+
+ dev_cg = &dev->dev_group;
+
+ dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL);
if (!dev_cg->default_groups)
- goto out;
- /*
- * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
- * for ->allocate_virtdevice()
- *
- * se_dev->se_dev_ptr will be set after ->create_virtdev()
- * has been called successfully in the next level up in the
- * configfs tree for device object's struct config_group.
- */
- se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate subsystem dependent pointer"
- " from allocate_virtdevice()\n");
- goto out;
- }
+ goto out_free_device;
- config_group_init_type_name(&se_dev->se_dev_group, name,
- &target_core_dev_cit);
- config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+ config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+ config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
&target_core_dev_attrib_cit);
- config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+ config_group_init_type_name(&dev->dev_pr_group, "pr",
&target_core_dev_pr_cit);
- config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+ config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
&target_core_dev_wwn_cit);
- config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+ config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
"alua", &target_core_alua_tg_pt_gps_cit);
- config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+ config_group_init_type_name(&dev->dev_stat_grps.stat_group,
"statistics", &target_core_stat_cit);
- dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
- dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
- dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
- dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
- dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+ dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
+ dev_cg->default_groups[1] = &dev->dev_pr_group;
+ dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
+ dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
+ dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
dev_cg->default_groups[5] = NULL;
/*
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
- tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+ tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
if (!tg_pt_gp)
- goto out;
+ goto out_free_dev_cg_default_groups;
+ dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
- tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
- tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
+ tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!tg_pt_gp_cg->default_groups) {
pr_err("Unable to allocate tg_pt_gp_cg->"
"default_groups\n");
- goto out;
+ goto out_free_tg_pt_gp;
}
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
- se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
- dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
+ dev_stat_grp = &dev->dev_stat_grps.stat_group;
+ dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
GFP_KERNEL);
if (!dev_stat_grp->default_groups) {
pr_err("Unable to allocate dev_stat_grp->default_groups\n");
- goto out;
+ goto out_free_tg_pt_gp_cg_default_groups;
}
- target_stat_setup_dev_default_groups(se_dev);
-
- pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
- " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+ target_stat_setup_dev_default_groups(dev);
mutex_unlock(&hba->hba_access_mutex);
- return &se_dev->se_dev_group;
-out:
- if (se_dev->t10_alua.default_tg_pt_gp) {
- core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
- se_dev->t10_alua.default_tg_pt_gp = NULL;
- }
- if (dev_stat_grp)
- kfree(dev_stat_grp->default_groups);
- if (tg_pt_gp_cg)
- kfree(tg_pt_gp_cg->default_groups);
- if (dev_cg)
- kfree(dev_cg->default_groups);
- if (se_dev->se_dev_su_ptr)
- t->free_device(se_dev->se_dev_su_ptr);
- kfree(se_dev);
-unlock:
+ return dev_cg;
+
+out_free_tg_pt_gp_cg_default_groups:
+ kfree(tg_pt_gp_cg->default_groups);
+out_free_tg_pt_gp:
+ core_alua_free_tg_pt_gp(tg_pt_gp);
+out_free_dev_cg_default_groups:
+ kfree(dev_cg->default_groups);
+out_free_device:
+ target_free_device(dev);
+out_unlock:
mutex_unlock(&hba->hba_access_mutex);
return ERR_PTR(errno);
}
@@ -2859,18 +2572,19 @@ static void target_core_drop_subdev(
struct config_group *group,
struct config_item *item)
{
- struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
- struct se_subsystem_dev, se_dev_group);
+ struct config_group *dev_cg = to_config_group(item);
+ struct se_device *dev =
+ container_of(dev_cg, struct se_device, dev_group);
struct se_hba *hba;
struct config_item *df_item;
- struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
+ struct config_group *tg_pt_gp_cg, *dev_stat_grp;
int i;
- hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+ hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
mutex_lock(&hba->hba_access_mutex);
- dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+ dev_stat_grp = &dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
dev_stat_grp->default_groups[i] = NULL;
@@ -2878,7 +2592,7 @@ static void target_core_drop_subdev(
}
kfree(dev_stat_grp->default_groups);
- tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2889,17 +2603,15 @@ static void target_core_drop_subdev(
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
- se_dev->t10_alua.default_tg_pt_gp = NULL;
+ dev->t10_alua.default_tg_pt_gp = NULL;
- dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
df_item = &dev_cg->default_groups[i]->cg_item;
dev_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
/*
- * The releasing of se_dev and associated se_dev->se_dev_ptr is done
- * from target_core_dev_item_ops->release() ->target_core_dev_release().
+ * se_dev is released from target_core_dev_item_ops->release()
*/
config_item_put(item);
mutex_unlock(&hba->hba_access_mutex);
@@ -2962,13 +2674,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
return -EINVAL;
}
- spin_lock(&hba->device_lock);
- if (!list_empty(&hba->hba_dev_list)) {
+ if (hba->dev_count) {
pr_err("Unable to set hba_mode with active devices\n");
- spin_unlock(&hba->device_lock);
return -EINVAL;
}
- spin_unlock(&hba->device_lock);
ret = transport->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
@@ -3120,7 +2829,7 @@ static int __init target_core_init_configfs(void)
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
target_cg = &subsys->su_group;
- target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!target_cg->default_groups) {
pr_err("Unable to allocate target_cg->default_groups\n");
@@ -3136,7 +2845,7 @@ static int __init target_core_init_configfs(void)
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
hba_cg = &target_core_hbagroup;
- hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!hba_cg->default_groups) {
pr_err("Unable to allocate hba_cg->default_groups\n");
@@ -3152,7 +2861,7 @@ static int __init target_core_init_configfs(void)
* groups under /sys/kernel/config/target/core/alua/
*/
alua_cg = &alua_group;
- alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!alua_cg->default_groups) {
pr_err("Unable to allocate alua_cg->default_groups\n");
@@ -3174,7 +2883,7 @@ static int __init target_core_init_configfs(void)
}
lu_gp_cg = &alua_lu_gps_group;
- lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lu_gp_cg->default_groups) {
pr_err("Unable to allocate lu_gp_cg->default_groups\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9abef9f8eb7..e2695101bb9 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -4,10 +4,7 @@
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -50,26 +47,20 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-static void se_dev_start(struct se_device *dev);
-static void se_dev_stop(struct se_device *dev);
-
static struct se_hba *lun0_hba;
-static struct se_subsystem_dev *lun0_su_dev;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
-int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+sense_reason_t
+transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
struct se_device *dev;
unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
+ return TCM_NON_EXISTENT_LUN;
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@@ -81,14 +72,12 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
- return -EACCES;
+ return TCM_WRITE_PROTECTED;
}
if (se_cmd->data_direction == DMA_TO_DEVICE)
@@ -113,38 +102,24 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
* MappedLUN=0 exists for this Initiator Port.
*/
if (unpacked_lun != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- return -ENODEV;
+ return TCM_NON_EXISTENT_LUN;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -EACCES;
- }
+ (se_cmd->data_direction != DMA_NONE))
+ return TCM_WRITE_PROTECTED;
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
- /*
- * Determine if the struct se_lun is online.
- * FIXME: Check for LUN_RESET + UNIT Attention
- */
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
/* Directly associate cmd with se_dev */
se_cmd->se_dev = se_lun->lun_se_dev;
@@ -175,11 +150,8 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
return -ENODEV;
- }
spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@@ -199,15 +171,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -ENODEV;
- }
- /*
- * Determine if the struct se_lun is online.
- * FIXME: Check for LUN_RESET + UNIT Attention
- */
- if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -ENODEV;
}
@@ -565,7 +528,6 @@ static void core_export_port(
struct se_port *port,
struct se_lun *lun)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
@@ -578,7 +540,8 @@ static void core_export_port(
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
- if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
pr_err("Unable to allocate t10_alua_tg_pt"
@@ -587,7 +550,7 @@ static void core_export_port(
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- su_dev->t10_alua.default_tg_pt_gp);
+ dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
@@ -625,6 +588,7 @@ int core_dev_export(
struct se_portal_group *tpg,
struct se_lun *lun)
{
+ struct se_hba *hba = dev->se_hba;
struct se_port *port;
port = core_alloc_port(dev);
@@ -632,9 +596,11 @@ int core_dev_export(
return PTR_ERR(port);
lun->lun_se_dev = dev;
- se_dev_start(dev);
- atomic_inc(&dev->dev_export_obj.obj_access_count);
+ spin_lock(&hba->device_lock);
+ dev->export_count++;
+ spin_unlock(&hba->device_lock);
+
core_export_port(dev, tpg, port, lun);
return 0;
}
@@ -644,6 +610,7 @@ void core_dev_unexport(
struct se_portal_group *tpg,
struct se_lun *lun)
{
+ struct se_hba *hba = dev->se_hba;
struct se_port *port = lun->lun_sep;
spin_lock(&lun->lun_sep_lock);
@@ -654,198 +621,27 @@ void core_dev_unexport(
spin_unlock(&lun->lun_sep_lock);
spin_lock(&dev->se_port_lock);
- atomic_dec(&dev->dev_export_obj.obj_access_count);
core_release_port(dev, port);
spin_unlock(&dev->se_port_lock);
- se_dev_stop(dev);
- lun->lun_se_dev = NULL;
-}
-
-int target_report_luns(struct se_cmd *se_cmd)
-{
- struct se_dev_entry *deve;
- struct se_session *se_sess = se_cmd->se_sess;
- unsigned char *buf;
- u32 lun_count = 0, offset = 8, i;
-
- if (se_cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- se_cmd->data_length);
- se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
-
- buf = transport_kmap_data_sg(se_cmd);
- if (!buf)
- return -ENOMEM;
-
- /*
- * If no struct se_session pointer is present, this struct se_cmd is
- * coming via a target_core_mod PASSTHROUGH op, and not through
- * a $FABRIC_MOD. In that case, report LUN=0 only.
- */
- if (!se_sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
- goto done;
- }
-
- spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = se_sess->se_node_acl->device_list[i];
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
- /*
- * We determine the correct LUN LIST LENGTH even once we
- * have reached the initial allocation length.
- * See SPC2-R20 7.19.
- */
- lun_count++;
- if ((offset + 8) > se_cmd->data_length)
- continue;
-
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
- offset += 8;
- }
- spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
-
- /*
- * See SPC3 r07, page 159.
- */
-done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(se_cmd);
-
- target_complete_cmd(se_cmd, GOOD);
- return 0;
-}
-
-/* se_release_device_for_hba():
- *
- *
- */
-void se_release_device_for_hba(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
- se_dev_stop(dev);
-
- if (dev->dev_ptr) {
- destroy_workqueue(dev->tmr_wq);
- if (dev->transport->free_device)
- dev->transport->free_device(dev->dev_ptr);
- }
-
spin_lock(&hba->device_lock);
- list_del(&dev->dev_list);
- hba->dev_count--;
+ dev->export_count--;
spin_unlock(&hba->device_lock);
- core_scsi3_free_all_registrations(dev);
- se_release_vpd_for_dev(dev);
-
- kfree(dev);
+ lun->lun_se_dev = NULL;
}
-void se_release_vpd_for_dev(struct se_device *dev)
+static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
- spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
+ spin_lock(&dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
- &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
+ &dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
- spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
-}
-
-/* se_free_virtual_device():
- *
- * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
- */
-int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
-{
- if (!list_empty(&dev->dev_sep_list))
- dump_stack();
-
- core_alua_free_lu_gp_mem(dev);
- se_release_device_for_hba(dev);
-
- return 0;
-}
-
-static void se_dev_start(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- spin_lock(&hba->device_lock);
- atomic_inc(&dev->dev_obj.obj_access_count);
- if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
- if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
- } else if (dev->dev_status &
- TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
- dev->dev_status &=
- ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
- }
- }
- spin_unlock(&hba->device_lock);
-}
-
-static void se_dev_stop(struct se_device *dev)
-{
- struct se_hba *hba = dev->se_hba;
-
- spin_lock(&hba->device_lock);
- atomic_dec(&dev->dev_obj.obj_access_count);
- if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
- if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- } else if (dev->dev_status &
- TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
- dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
- dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
- }
- }
- spin_unlock(&hba->device_lock);
-}
-
-int se_dev_check_online(struct se_device *dev)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dev->dev_status_lock, flags);
- ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
- (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
- spin_unlock_irqrestore(&dev->dev_status_lock, flags);
-
- return ret;
-}
-
-int se_dev_check_shutdown(struct se_device *dev)
-{
- int ret;
-
- spin_lock_irq(&dev->dev_status_lock);
- ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
- spin_unlock_irq(&dev->dev_status_lock);
-
- return ret;
+ spin_unlock(&dev->t10_wwn.t10_vpd_lock);
}
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
@@ -866,72 +662,13 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
return aligned_max_sectors;
}
-void se_dev_set_default_attribs(
- struct se_device *dev,
- struct se_dev_limits *dev_limits)
-{
- struct queue_limits *limits = &dev_limits->limits;
-
- dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
- dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
- dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
- dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
- dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
- dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
- /*
- * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
- * iblock_create_virtdevice() from struct queue_limits values
- * if blk_queue_discard()==1
- */
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
- DA_MAX_UNMAP_BLOCK_DESC_COUNT;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
- DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
- /*
- * block_size is based on subsystem plugin dependent requirements.
- */
- dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
- dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
- /*
- * Align max_hw_sectors down to PAGE_SIZE I/O transfers
- */
- limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
- limits->logical_block_size);
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
-
- /*
- * Set fabric_max_sectors, which is reported in block limits
- * VPD page (B0h).
- */
- dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
- /*
- * Set optimal_sectors from fabric_max_sectors, which can be
- * lowered via configfs.
- */
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
- /*
- * queue_depth is based on subsystem plugin dependent requirements.
- */
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
-}
-
int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+ dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
+ dev, dev->dev_attrib.max_unmap_lba_count);
return 0;
}
@@ -939,10 +676,10 @@ int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ dev->dev_attrib.max_unmap_block_desc_count =
max_unmap_block_desc_count;
pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
+ dev, dev->dev_attrib.max_unmap_block_desc_count);
return 0;
}
@@ -950,9 +687,9 @@ int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
- dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+ dev->dev_attrib.unmap_granularity = unmap_granularity;
pr_debug("dev[%p]: Set unmap_granularity: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
+ dev, dev->dev_attrib.unmap_granularity);
return 0;
}
@@ -960,9 +697,19 @@ int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+ dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
+ dev, dev->dev_attrib.unmap_granularity_alignment);
+ return 0;
+}
+
+int se_dev_set_max_write_same_len(
+ struct se_device *dev,
+ u32 max_write_same_len)
+{
+ dev->dev_attrib.max_write_same_len = max_write_same_len;
+ pr_debug("dev[%p]: Set max_write_same_len: %u\n",
+ dev, dev->dev_attrib.max_write_same_len);
return 0;
}
@@ -993,9 +740,9 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
pr_err("emulate_fua_write not supported for pSCSI\n");
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+ dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
+ dev, dev->dev_attrib.emulate_fua_write);
return 0;
}
@@ -1025,9 +772,9 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("emulate_write_cache not supported for pSCSI\n");
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+ dev->dev_attrib.emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
+ dev, dev->dev_attrib.emulate_write_cache);
return 0;
}
@@ -1038,16 +785,15 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
return -EINVAL;
}
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " UA_INTRLCK_CTRL while dev_export_obj: %d count"
- " exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " UA_INTRLCK_CTRL while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
+ dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
return 0;
}
@@ -1059,15 +805,15 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
return -EINVAL;
}
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TAS while"
- " dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+ dev->dev_attrib.emulate_tas = flag;
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
+ dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
@@ -1082,12 +828,12 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+ dev->dev_attrib.emulate_tpu = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
@@ -1103,12 +849,12 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+ dev->dev_attrib.emulate_tpws = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
@@ -1120,9 +866,9 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+ dev->dev_attrib.enforce_pr_isids = flag;
pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+ (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
@@ -1132,7 +878,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
printk(KERN_ERR "Illegal value %d\n", flag);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+ dev->dev_attrib.is_nonrot = flag;
pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
dev, flag);
return 0;
@@ -1145,7 +891,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
" reordering not implemented\n", dev);
return -ENOSYS;
}
- dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+ dev->dev_attrib.emulate_rest_reord = flag;
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
@@ -1155,10 +901,10 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
- " dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (!queue_depth) {
@@ -1168,26 +914,26 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
} else {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
- if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ if (queue_depth > dev->dev_attrib.queue_depth) {
+ if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
- dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ dev->dev_attrib.hw_queue_depth);
return -EINVAL;
}
}
}
- dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
+ dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
@@ -1195,10 +941,10 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " fabric_max_sectors while dev_export_obj: %d count exists\n",
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ " fabric_max_sectors while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (!fabric_max_sectors) {
@@ -1213,11 +959,11 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+ if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
} else {
@@ -1233,9 +979,9 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.block_size);
+ dev->dev_attrib.block_size);
- dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
+ dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, fabric_max_sectors);
return 0;
@@ -1243,10 +989,10 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
- " optimal_sectors while dev_export_obj: %d count exists\n",
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ " optimal_sectors while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
@@ -1254,14 +1000,14 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
- if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
+ if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than fabric_max_sectors: %u\n", dev,
- optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
+ optimal_sectors, dev->dev_attrib.fabric_max_sectors);
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+ dev->dev_attrib.optimal_sectors = optimal_sectors;
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
@@ -1269,10 +1015,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device block_size"
- " while dev_export_obj: %d count exists\n", dev,
- atomic_read(&dev->dev_export_obj.obj_access_count));
+ " while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
@@ -1293,7 +1039,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
return -EINVAL;
}
- dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+ dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
return 0;
@@ -1307,12 +1053,6 @@ struct se_lun *core_dev_add_lun(
struct se_lun *lun_p;
int rc;
- if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
- pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
- atomic_read(&dev->dev_access_obj.obj_access_count));
- return ERR_PTR(-EACCES);
- }
-
lun_p = core_tpg_pre_addlun(tpg, lun);
if (IS_ERR(lun_p))
return lun_p;
@@ -1568,12 +1308,211 @@ void core_dev_free_initiator_node_lun_acl(
kfree(lacl);
}
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+ struct t10_wwn *wwn = &dev->t10_wwn;
+ char buf[17];
+ int i, device_type;
+ /*
+ * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+ */
+ for (i = 0; i < 8; i++)
+ if (wwn->vendor[i] >= 0x20)
+ buf[i] = wwn->vendor[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Vendor: %s\n", buf);
+
+ for (i = 0; i < 16; i++)
+ if (wwn->model[i] >= 0x20)
+ buf[i] = wwn->model[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Model: %s\n", buf);
+
+ for (i = 0; i < 4; i++)
+ if (wwn->revision[i] >= 0x20)
+ buf[i] = wwn->revision[i];
+ else
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Revision: %s\n", buf);
+
+ device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Type: %s ", scsi_device_type(device_type));
+}
+
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+{
+ struct se_device *dev;
+
+ dev = hba->transport->alloc_device(hba, name);
+ if (!dev)
+ return NULL;
+
+ dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+ dev->se_hba = hba;
+ dev->transport = hba->transport;
+
+ INIT_LIST_HEAD(&dev->dev_list);
+ INIT_LIST_HEAD(&dev->dev_sep_list);
+ INIT_LIST_HEAD(&dev->dev_tmr_list);
+ INIT_LIST_HEAD(&dev->delayed_cmd_list);
+ INIT_LIST_HEAD(&dev->state_list);
+ INIT_LIST_HEAD(&dev->qf_cmd_list);
+ spin_lock_init(&dev->stats_lock);
+ spin_lock_init(&dev->execute_task_lock);
+ spin_lock_init(&dev->delayed_cmd_lock);
+ spin_lock_init(&dev->dev_reservation_lock);
+ spin_lock_init(&dev->se_port_lock);
+ spin_lock_init(&dev->se_tmr_lock);
+ spin_lock_init(&dev->qf_cmd_lock);
+ atomic_set(&dev->dev_ordered_id, 0);
+ INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
+ spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
+ INIT_LIST_HEAD(&dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&dev->t10_pr.registration_lock);
+ spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
+ INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
+ spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+
+ dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ dev->t10_wwn.t10_dev = dev;
+ dev->t10_alua.t10_dev = dev;
+
+ dev->dev_attrib.da_dev = dev;
+ dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+ dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
+ dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+ dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->dev_attrib.is_nonrot = DA_IS_NONROT;
+ dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
+ dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ dev->dev_attrib.max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ dev->dev_attrib.unmap_granularity_alignment =
+ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+ dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+ dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+
+ return dev;
+}
+
+int target_configure_device(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+ int ret;
+
+ if (dev->dev_flags & DF_CONFIGURED) {
+ pr_err("se_dev->se_dev_ptr already set for storage"
+ " object\n");
+ return -EEXIST;
+ }
+
+ ret = dev->transport->configure_device(dev);
+ if (ret)
+ goto out;
+ dev->dev_flags |= DF_CONFIGURED;
+
+ /*
+ * XXX: there is not much point to have two different values here..
+ */
+ dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
+ dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
+
+ /*
+ * Align max_hw_sectors down to PAGE_SIZE I/O transfers
+ */
+ dev->dev_attrib.hw_max_sectors =
+ se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+ dev->dev_attrib.hw_block_size);
+
+ dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ dev->creation_time = get_jiffies_64();
+
+ ret = core_setup_alua(dev);
+ if (ret)
+ goto out;
+
+ /*
+ * Startup the struct se_device processing thread
+ */
+ dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+ dev->transport->name);
+ if (!dev->tmr_wq) {
+ pr_err("Unable to create tmr workqueue for %s\n",
+ dev->transport->name);
+ ret = -ENOMEM;
+ goto out_free_alua;
+ }
+
+ /*
+ * Setup work_queue for QUEUE_FULL
+ */
+ INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
+
+ /*
+ * Preload the initial INQUIRY const values if we are doing
+ * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+ * passthrough because this is being provided by the backend LLD.
+ */
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+ strncpy(&dev->t10_wwn.model[0],
+ dev->transport->inquiry_prod, 16);
+ strncpy(&dev->t10_wwn.revision[0],
+ dev->transport->inquiry_rev, 4);
+ }
+
+ scsi_dump_inquiry(dev);
+
+ spin_lock(&hba->device_lock);
+ hba->dev_count++;
+ spin_unlock(&hba->device_lock);
+ return 0;
+
+out_free_alua:
+ core_alua_free_lu_gp_mem(dev);
+out:
+ se_release_vpd_for_dev(dev);
+ return ret;
+}
+
+void target_free_device(struct se_device *dev)
+{
+ struct se_hba *hba = dev->se_hba;
+
+ WARN_ON(!list_empty(&dev->dev_sep_list));
+
+ if (dev->dev_flags & DF_CONFIGURED) {
+ destroy_workqueue(dev->tmr_wq);
+
+ spin_lock(&hba->device_lock);
+ hba->dev_count--;
+ spin_unlock(&hba->device_lock);
+ }
+
+ core_alua_free_lu_gp_mem(dev);
+ core_scsi3_free_all_registrations(dev);
+ se_release_vpd_for_dev(dev);
+
+ dev->transport->free_device(dev);
+}
+
int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
- struct se_subsystem_dev *se_dev = NULL;
- struct se_subsystem_api *t;
char buf[16];
int ret;
@@ -1581,60 +1520,28 @@ int core_dev_setup_virtual_lun0(void)
if (IS_ERR(hba))
return PTR_ERR(hba);
- lun0_hba = hba;
- t = hba->transport;
-
- se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!se_dev) {
- pr_err("Unable to allocate memory for"
- " struct se_subsystem_dev\n");
+ dev = target_alloc_device(hba, "virt_lun0");
+ if (!dev) {
ret = -ENOMEM;
- goto out;
+ goto out_free_hba;
}
- INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
- spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_pr.registration_lock);
- spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
- INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
- spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
- spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
- se_dev->t10_wwn.t10_sub_dev = se_dev;
- se_dev->t10_alua.t10_sub_dev = se_dev;
- se_dev->se_dev_attrib.da_sub_dev = se_dev;
- se_dev->se_dev_hba = hba;
-
- se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
- if (!se_dev->se_dev_su_ptr) {
- pr_err("Unable to locate subsystem dependent pointer"
- " from allocate_virtdevice()\n");
- ret = -ENOMEM;
- goto out;
- }
- lun0_su_dev = se_dev;
memset(buf, 0, 16);
sprintf(buf, "rd_pages=8");
- t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+ hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
- dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto out;
- }
- se_dev->se_dev_ptr = dev;
- g_lun0_dev = dev;
+ ret = target_configure_device(dev);
+ if (ret)
+ goto out_free_se_dev;
+ lun0_hba = hba;
+ g_lun0_dev = dev;
return 0;
-out:
- lun0_su_dev = NULL;
- kfree(se_dev);
- if (lun0_hba) {
- core_delete_hba(lun0_hba);
- lun0_hba = NULL;
- }
+
+out_free_se_dev:
+ target_free_device(dev);
+out_free_hba:
+ core_delete_hba(hba);
return ret;
}
@@ -1642,14 +1549,11 @@ out:
void core_dev_release_virtual_lun0(void)
{
struct se_hba *hba = lun0_hba;
- struct se_subsystem_dev *su_dev = lun0_su_dev;
if (!hba)
return;
if (g_lun0_dev)
- se_free_virtual_device(g_lun0_dev, hba);
-
- kfree(su_dev);
+ target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index bca737bb813..810263dfa4a 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -4,10 +4,9 @@
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
- * Copyright (c) 2010,2011 Rising Tide Systems
- * Copyright (c) 2010,2011 Linux-iSCSI.org
+ * (c) Copyright 2010-2012 RisingTide Systems LLC.
*
- * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -71,6 +70,12 @@ static int target_fabric_mappedlun_link(
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access;
+
+ if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
+ pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
+ " %p to struct lun: %p\n", lun_ci, lun);
+ return -EFAULT;
+ }
/*
* Ensure that the source port exists
*/
@@ -358,7 +363,7 @@ static struct config_group *target_fabric_make_mappedlun(
}
lacl_cg = &lacl->se_lun_group;
- lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
pr_err("Unable to allocate lacl_cg->default_groups\n");
@@ -374,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+ ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
pr_err("Unable to allocate ml_stat_grp->default_groups\n");
@@ -734,17 +739,21 @@ static int target_fabric_port_link(
struct config_item *se_dev_ci)
{
struct config_item *tpg_ci;
- struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun *lun_p;
struct se_portal_group *se_tpg;
- struct se_subsystem_dev *se_dev = container_of(
- to_config_group(se_dev_ci), struct se_subsystem_dev,
- se_dev_group);
+ struct se_device *dev =
+ container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
struct target_fabric_configfs *tf;
int ret;
+ if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
+ pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
+ " %p to struct se_device: %p\n", se_dev_ci, dev);
+ return -EFAULT;
+ }
+
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group);
@@ -755,14 +764,6 @@ static int target_fabric_port_link(
return -EEXIST;
}
- dev = se_dev->se_dev_ptr;
- if (!dev) {
- pr_err("Unable to locate struct se_device pointer from"
- " %s\n", config_item_name(se_dev_ci));
- ret = -ENODEV;
- goto out;
- }
-
lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n");
@@ -869,7 +870,7 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
- lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
pr_err("Unable to allocate lun_cg->default_groups\n");
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index e460d6233a0..687b0b0a4aa 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -4,8 +4,7 @@
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
- * Copyright (c) 2010 Rising Tide Systems, Inc.
- * Copyright (c) 2010 Linux-iSCSI.org
+ * (c) Copyright 2010-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0360383dfb9..b9c88497e8f 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -3,10 +3,7 @@
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
- * Copyright (c) 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2005-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -41,7 +38,10 @@
#include "target_core_file.h"
-static struct se_subsystem_api fileio_template;
+static inline struct fd_dev *FD_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct fd_dev, dev);
+}
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -82,7 +82,7 @@ static void fd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
-static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = hba->hba_ptr;
@@ -97,34 +97,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
- return fd_dev;
+ return &fd_dev->dev;
}
-/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static struct se_device *fd_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int fd_configure_device(struct se_device *dev)
{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct queue_limits *limits;
- struct fd_dev *fd_dev = p;
- struct fd_host *fd_host = hba->hba_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct fd_host *fd_host = dev->se_hba->hba_ptr;
struct file *file;
struct inode *inode = NULL;
- int dev_flags = 0, flags, ret = -EINVAL;
+ int flags, ret = -EINVAL;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+ pr_err("Missing fd_dev_name=\n");
+ return -EINVAL;
+ }
/*
* Use O_DSYNC by default instead of O_SYNC to forgo syncing
* of pure timestamp updates.
*/
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+
/*
* Optionally allow fd_buffered_io=1 to be enabled for people
* who want use the fs buffer cache as an WriteCache mechanism.
@@ -154,22 +148,17 @@ static struct se_device *fd_create_virtdevice(
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q;
+ struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size;
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = bdev_get_queue(inode->i_bdev);
- limits = &dev_limits.limits;
- limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
- limits->max_hw_sectors = queue_max_hw_sectors(q);
- limits->max_sectors = queue_max_sectors(q);
+
+ dev->dev_attrib.hw_block_size =
+ bdev_logical_block_size(inode->i_bdev);
+ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
*/
- fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
@@ -185,26 +174,18 @@ static struct se_device *fd_create_virtdevice(
goto fail;
}
- limits = &dev_limits.limits;
- limits->logical_block_size = FD_BLOCKSIZE;
- limits->max_hw_sectors = FD_MAX_SECTORS;
- limits->max_sectors = FD_MAX_SECTORS;
- fd_dev->fd_block_size = FD_BLOCKSIZE;
+ dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
}
- dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+ fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
- dev = transport_add_device_to_core_hba(hba, &fileio_template,
- se_dev, dev_flags, fd_dev,
- &dev_limits, "FILEIO", FD_VERSION);
- if (!dev)
- goto fail;
+ dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
" with FDBD_HAS_BUFFERED_IO_WCE\n");
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
+ dev->dev_attrib.emulate_write_cache = 1;
}
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
@@ -214,22 +195,18 @@ static struct se_device *fd_create_virtdevice(
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
- return dev;
+ return 0;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
- return ERR_PTR(ret);
+ return ret;
}
-/* fd_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void fd_free_device(void *p)
+static void fd_free_device(struct se_device *dev)
{
- struct fd_dev *fd_dev = p;
+ struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
@@ -239,17 +216,16 @@ static void fd_free_device(void *p)
kfree(fd_dev);
}
-static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents)
+static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
+ u32 sgl_nents, int is_write)
{
struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = se_dev->dev_ptr;
+ struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file;
struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (cmd->t_task_lba *
- se_dev->se_sub_dev->se_dev_attrib.block_size);
+ loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
@@ -260,81 +236,58 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
- iov[i].iov_base = sg_virt(sg);
+ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+
+ if (is_write)
+ ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+ else
+ ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+
set_fs(old_fs);
+ for_each_sg(sgl, sg, sgl_nents, i)
+ kunmap(sg_page(sg));
+
kfree(iov);
- /*
- * Return zeros and GOOD status even if the READ did not return
- * the expected virt_size for struct file w/o a backing struct
- * block_device.
- */
- if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+
+ if (is_write) {
if (ret < 0 || ret != cmd->data_length) {
- pr_err("vfs_readv() returned %d,"
- " expecting %d for S_ISBLK\n", ret,
- (int)cmd->data_length);
+ pr_err("%s() write returned %d\n", __func__, ret);
return (ret < 0 ? ret : -EINVAL);
}
} else {
- if (ret < 0) {
- pr_err("vfs_readv() returned %d for non"
- " S_ISBLK\n", ret);
- return ret;
+ /*
+ * Return zeros and GOOD status even if the READ did not return
+ * the expected virt_size for struct file w/o a backing struct
+ * block_device.
+ */
+ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+ if (ret < 0 || ret != cmd->data_length) {
+ pr_err("%s() returned %d, expecting %u for "
+ "S_ISBLK\n", __func__, ret,
+ cmd->data_length);
+ return (ret < 0 ? ret : -EINVAL);
+ }
+ } else {
+ if (ret < 0) {
+ pr_err("%s() returned %d for non S_ISBLK\n",
+ __func__, ret);
+ return ret;
+ }
}
}
-
- return 1;
-}
-
-static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents)
-{
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = se_dev->dev_ptr;
- struct file *fd = dev->fd_file;
- struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
- loff_t pos = (cmd->t_task_lba *
- se_dev->se_sub_dev->se_dev_attrib.block_size);
- int ret, i = 0;
-
- iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
- if (!iov) {
- pr_err("Unable to allocate fd_do_writev iov[]\n");
- return -ENOMEM;
- }
-
- for_each_sg(sgl, sg, sgl_nents, i) {
- iov[i].iov_len = sg->length;
- iov[i].iov_base = sg_virt(sg);
- }
-
- old_fs = get_fs();
- set_fs(get_ds());
- ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
- set_fs(old_fs);
-
- kfree(iov);
-
- if (ret < 0 || ret != cmd->data_length) {
- pr_err("vfs_writev() returned %d\n", ret);
- return (ret < 0 ? ret : -EINVAL);
- }
-
return 1;
}
-static int fd_execute_sync_cache(struct se_cmd *cmd)
+static sense_reason_t
+fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct fd_dev *fd_dev = dev->dev_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
@@ -353,7 +306,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
start = 0;
end = LLONG_MAX;
} else {
- start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
@@ -367,17 +320,16 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
if (immed)
return 0;
- if (ret) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (ret)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
- } else {
+ else
target_complete_cmd(cmd, SAM_STAT_GOOD);
- }
return 0;
}
-static int fd_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@@ -390,30 +342,29 @@ static int fd_execute_rw(struct se_cmd *cmd)
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
- ret = fd_do_readv(cmd, sgl, sgl_nents);
+ ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
} else {
- ret = fd_do_writev(cmd, sgl, sgl_nents);
+ ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/*
* Perform implict vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
if (ret > 0 &&
- dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) {
- struct fd_dev *fd_dev = dev->dev_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
loff_t start = cmd->t_task_lba *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ dev->dev_attrib.block_size;
loff_t end = start + cmd->data_length;
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
}
- if (ret < 0) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return ret;
- }
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
@@ -430,12 +381,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t fd_set_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page, ssize_t count)
+static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -502,24 +451,9 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
-
- if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
- pr_err("Missing fd_dev_name=\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t fd_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
@@ -530,27 +464,9 @@ static ssize_t fd_show_configfs_dev_params(
return bl;
}
-/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_rev(struct se_device *dev)
-{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-/* fd_get_device_type(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_type(struct se_device *dev)
-{
- return TYPE_DISK;
-}
-
static sector_t fd_get_blocks(struct se_device *dev)
{
- struct fd_dev *fd_dev = dev->dev_ptr;
+ struct fd_dev *fd_dev = FD_DEV(dev);
struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host;
unsigned long long dev_size;
@@ -564,34 +480,35 @@ static sector_t fd_get_blocks(struct se_device *dev)
else
dev_size = fd_dev->fd_dev_size;
- return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
+ return div_u64(dev_size, dev->dev_attrib.block_size);
}
-static struct spc_ops fd_spc_ops = {
+static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
};
-static int fd_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+fd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &fd_spc_ops);
+ return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
static struct se_subsystem_api fileio_template = {
.name = "fileio",
+ .inquiry_prod = "FILEIO",
+ .inquiry_rev = FD_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
- .allocate_virtdevice = fd_allocate_virtdevice,
- .create_virtdevice = fd_create_virtdevice,
+ .alloc_device = fd_alloc_device,
+ .configure_device = fd_configure_device,
.free_device = fd_free_device,
.parse_cdb = fd_parse_cdb,
- .check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
- .get_device_rev = fd_get_device_rev,
- .get_device_type = fd_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
};
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 876ae53ef5b..bc02b018ae4 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -17,6 +17,8 @@
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
struct fd_dev {
+ struct se_device dev;
+
u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 3dd1bd4b6f7..d2616cd48f1 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -3,10 +3,7 @@
*
* This file contains the TCM HBA Transport related functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex);
@@ -152,8 +148,7 @@ out_free_hba:
int
core_delete_hba(struct se_hba *hba)
{
- if (!list_empty(&hba->hba_dev_list))
- dump_stack();
+ WARN_ON(hba->dev_count);
hba->transport->detach_hba(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 57d7674c501..b526d23dcd4 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -50,9 +47,13 @@
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128
-static struct se_subsystem_api iblock_template;
+static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct iblock_dev, dev);
+}
+
-static void iblock_bio_done(struct bio *, int);
+static struct se_subsystem_api iblock_template;
/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -70,7 +71,7 @@ static void iblock_detach_hba(struct se_hba *hba)
{
}
-static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
{
struct iblock_dev *ib_dev = NULL;
@@ -82,40 +83,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
- return ib_dev;
+ return &ib_dev->dev;
}
-static struct se_device *iblock_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int iblock_configure_device(struct se_device *dev)
{
- struct iblock_dev *ib_dev = p;
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct block_device *bd = NULL;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct queue_limits *limits;
- u32 dev_flags = 0;
+ struct block_device *bd = NULL;
fmode_t mode;
- int ret = -EINVAL;
+ int ret = -ENOMEM;
- if (!ib_dev) {
- pr_err("Unable to locate struct iblock_dev parameter\n");
- return ERR_PTR(ret);
+ if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+ pr_err("Missing udev_path= parameters for IBLOCK\n");
+ return -EINVAL;
}
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
if (!ib_dev->ibd_bio_set) {
- pr_err("IBLOCK: Unable to create bioset()\n");
- return ERR_PTR(-ENOMEM);
+ pr_err("IBLOCK: Unable to create bioset\n");
+ goto out;
}
- pr_debug("IBLOCK: Created bio_set()\n");
- /*
- * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
- * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
- */
+
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
@@ -126,27 +115,15 @@ static struct se_device *iblock_create_virtdevice(
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
- goto failed;
+ goto out_free_bioset;
}
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = bdev_get_queue(bd);
- limits = &dev_limits.limits;
- limits->logical_block_size = bdev_logical_block_size(bd);
- limits->max_hw_sectors = UINT_MAX;
- limits->max_sectors = UINT_MAX;
- dev_limits.hw_queue_depth = q->nr_requests;
- dev_limits.queue_depth = q->nr_requests;
-
ib_dev->ibd_bd = bd;
- dev = transport_add_device_to_core_hba(hba,
- &iblock_template, se_dev, dev_flags, ib_dev,
- &dev_limits, "IBLOCK", IBLOCK_VERSION);
- if (!dev)
- goto failed;
+ q = bdev_get_queue(bd);
+
+ dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+ dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
* Check if the underlying struct block_device request_queue supports
@@ -154,38 +131,41 @@ static struct se_device *iblock_create_virtdevice(
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
- dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
+ dev->dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
+
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
- dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity =
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9;
- dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
+ dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
+ /*
+ * Enable write same emulation for IBLOCK and use 0xFFFF as
+ * the smaller WRITE_SAME(10) only has a two-byte block count.
+ */
+ dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q))
- dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
-
- return dev;
+ dev->dev_attrib.is_nonrot = 1;
+ return 0;
-failed:
- if (ib_dev->ibd_bio_set) {
- bioset_free(ib_dev->ibd_bio_set);
- ib_dev->ibd_bio_set = NULL;
- }
- ib_dev->ibd_bd = NULL;
- return ERR_PTR(ret);
+out_free_bioset:
+ bioset_free(ib_dev->ibd_bio_set);
+ ib_dev->ibd_bio_set = NULL;
+out:
+ return ret;
}
-static void iblock_free_device(void *p)
+static void iblock_free_device(struct se_device *dev)
{
- struct iblock_dev *ib_dev = p;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
@@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
- if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
+ if (block_size == dev->dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
@@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 2048:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
@@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 1024:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
@@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 512:
- switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+ switch (dev->dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
@@ -273,6 +253,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
return blocks_long;
}
+static void iblock_complete_cmd(struct se_cmd *cmd)
+{
+ struct iblock_req *ibr = cmd->priv;
+ u8 status;
+
+ if (!atomic_dec_and_test(&ibr->pending))
+ return;
+
+ if (atomic_read(&ibr->ib_bio_err_cnt))
+ status = SAM_STAT_CHECK_CONDITION;
+ else
+ status = SAM_STAT_GOOD;
+
+ target_complete_cmd(cmd, status);
+ kfree(ibr);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+ struct se_cmd *cmd = bio->bi_private;
+ struct iblock_req *ibr = cmd->priv;
+
+ /*
+ * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+ */
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
+ err = -EIO;
+
+ if (err != 0) {
+ pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
+ " err: %d\n", bio, err);
+ /*
+ * Bump the ib_bio_err_cnt and release bio.
+ */
+ atomic_inc(&ibr->ib_bio_err_cnt);
+ smp_mb__after_atomic_inc();
+ }
+
+ bio_put(bio);
+
+ iblock_complete_cmd(cmd);
+}
+
+static struct bio *
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+ struct bio *bio;
+
+ /*
+ * Only allocate as many vector entries as the bio code allows us to,
+ * we'll loop later on until we have handled the whole request.
+ */
+ if (sg_num > BIO_MAX_PAGES)
+ sg_num = BIO_MAX_PAGES;
+
+ bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ if (!bio) {
+ pr_err("Unable to allocate memory for bio\n");
+ return NULL;
+ }
+
+ bio->bi_bdev = ib_dev->ibd_bd;
+ bio->bi_private = cmd;
+ bio->bi_end_io = &iblock_bio_done;
+ bio->bi_sector = lba;
+
+ return bio;
+}
+
+static void iblock_submit_bios(struct bio_list *list, int rw)
+{
+ struct blk_plug plug;
+ struct bio *bio;
+
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(list)))
+ submit_bio(rw, bio);
+ blk_finish_plug(&plug);
+}
+
static void iblock_end_io_flush(struct bio *bio, int err)
{
struct se_cmd *cmd = bio->bi_private;
@@ -281,13 +342,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
pr_err("IBLOCK: cache flush failed: %d\n", err);
if (cmd) {
- if (err) {
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (err)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
- } else {
+ else
target_complete_cmd(cmd, SAM_STAT_GOOD);
- }
}
bio_put(bio);
@@ -297,9 +355,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
-static int iblock_execute_sync_cache(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_sync_cache(struct se_cmd *cmd)
{
- struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
struct bio *bio;
@@ -319,25 +378,27 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
-static int iblock_execute_unmap(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_unmap(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct iblock_dev *ibd = dev->dev_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
unsigned char *buf, *ptr = NULL;
sector_t lba;
int size;
u32 range;
- int ret = 0;
- int dl, bd_dl;
+ sense_reason_t ret = 0;
+ int dl, bd_dl, err;
if (cmd->data_length < 8) {
pr_warn("UNMAP parameter list length %u too small\n",
cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
dl = get_unaligned_be16(&buf[0]);
bd_dl = get_unaligned_be16(&buf[2]);
@@ -349,9 +410,8 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
else
size = bd_dl;
- if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
+ ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
@@ -366,23 +426,22 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
- if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ if (range > dev->dev_attrib.max_unmap_lba_count) {
+ ret = TCM_INVALID_PARAMETER_LIST;
goto err;
}
if (lba + range > dev->transport->get_blocks(dev) + 1) {
- cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
- ret = -EINVAL;
+ ret = TCM_ADDRESS_OUT_OF_RANGE;
goto err;
}
- ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
+ err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
GFP_KERNEL, 0);
- if (ret < 0) {
+ if (err < 0) {
pr_err("blkdev_issue_discard() failed: %d\n",
- ret);
+ err);
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto err;
}
@@ -397,23 +456,86 @@ err:
return ret;
}
-static int iblock_execute_write_same(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_write_same_unmap(struct se_cmd *cmd)
{
- struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
- int ret;
-
- ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
- spc_get_write_same_sectors(cmd), GFP_KERNEL,
- 0);
- if (ret < 0) {
- pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
- return ret;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+ int rc;
+
+ rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
+ spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
+ if (rc < 0) {
+ pr_warn("blkdev_issue_discard() failed: %d\n", rc);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
target_complete_cmd(cmd, GOOD);
return 0;
}
+static sense_reason_t
+iblock_execute_write_same(struct se_cmd *cmd)
+{
+ struct iblock_req *ibr;
+ struct scatterlist *sg;
+ struct bio *bio;
+ struct bio_list list;
+ sector_t block_lba = cmd->t_task_lba;
+ sector_t sectors = spc_get_write_same_sectors(cmd);
+
+ sg = &cmd->t_data_sg[0];
+
+ if (cmd->t_data_nents > 1 ||
+ sg->length != cmd->se_dev->dev_attrib.block_size) {
+ pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+ " block_size: %u\n", cmd->t_data_nents, sg->length,
+ cmd->se_dev->dev_attrib.block_size);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+ if (!ibr)
+ goto fail;
+ cmd->priv = ibr;
+
+ bio = iblock_get_bio(cmd, block_lba, 1);
+ if (!bio)
+ goto fail_free_ibr;
+
+ bio_list_init(&list);
+ bio_list_add(&list, bio);
+
+ atomic_set(&ibr->pending, 1);
+
+ while (sectors) {
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+
+ bio = iblock_get_bio(cmd, block_lba, 1);
+ if (!bio)
+ goto fail_put_bios;
+
+ atomic_inc(&ibr->pending);
+ bio_list_add(&list, bio);
+ }
+
+ /* Always in 512 byte units for Linux/Block */
+ block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ sectors -= 1;
+ }
+
+ iblock_submit_bios(&list, WRITE);
+ return 0;
+
+fail_put_bios:
+ while ((bio = bio_list_pop(&list)))
+ bio_put(bio);
+fail_free_ibr:
+ kfree(ibr);
+fail:
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
enum {
Opt_udev_path, Opt_readonly, Opt_force, Opt_err
};
@@ -425,11 +547,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page, ssize_t count)
+static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
@@ -491,43 +612,26 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t iblock_check_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev)
+static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
-
- if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
- pr_err("Missing udev_path= parameters for IBLOCK\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t iblock_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
- struct block_device *bd = ibd->ibd_bd;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
- if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
+ if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
bl += sprintf(b + bl, " UDEV PATH: %s",
- ibd->ibd_udev_path);
- bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly);
+ ib_dev->ibd_udev_path);
+ bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
- "" : (bd->bd_holder == ibd) ?
+ "" : (bd->bd_holder == ib_dev) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -536,61 +640,8 @@ static ssize_t iblock_show_configfs_dev_params(
return bl;
}
-static void iblock_complete_cmd(struct se_cmd *cmd)
-{
- struct iblock_req *ibr = cmd->priv;
- u8 status;
-
- if (!atomic_dec_and_test(&ibr->pending))
- return;
-
- if (atomic_read(&ibr->ib_bio_err_cnt))
- status = SAM_STAT_CHECK_CONDITION;
- else
- status = SAM_STAT_GOOD;
-
- target_complete_cmd(cmd, status);
- kfree(ibr);
-}
-
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
-{
- struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
- struct bio *bio;
-
- /*
- * Only allocate as many vector entries as the bio code allows us to,
- * we'll loop later on until we have handled the whole request.
- */
- if (sg_num > BIO_MAX_PAGES)
- sg_num = BIO_MAX_PAGES;
-
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
- if (!bio) {
- pr_err("Unable to allocate memory for bio\n");
- return NULL;
- }
-
- bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_private = cmd;
- bio->bi_end_io = &iblock_bio_done;
- bio->bi_sector = lba;
- return bio;
-}
-
-static void iblock_submit_bios(struct bio_list *list, int rw)
-{
- struct blk_plug plug;
- struct bio *bio;
-
- blk_start_plug(&plug);
- while ((bio = bio_list_pop(list)))
- submit_bio(rw, bio);
- blk_finish_plug(&plug);
-}
-
-static int iblock_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
@@ -611,8 +662,8 @@ static int iblock_execute_rw(struct se_cmd *cmd)
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ if (dev->dev_attrib.emulate_write_cache == 0 ||
+ (dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
@@ -625,19 +676,18 @@ static int iblock_execute_rw(struct se_cmd *cmd)
* Convert the blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
- if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
+ if (dev->dev_attrib.block_size == 4096)
block_lba = (cmd->t_task_lba << 3);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
+ else if (dev->dev_attrib.block_size == 2048)
block_lba = (cmd->t_task_lba << 2);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
+ else if (dev->dev_attrib.block_size == 1024)
block_lba = (cmd->t_task_lba << 1);
- else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
+ else if (dev->dev_attrib.block_size == 512)
block_lba = cmd->t_task_lba;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOSYS;
+ " %u\n", dev->dev_attrib.block_size);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -697,83 +747,48 @@ fail_put_bios:
bio_put(bio);
fail_free_ibr:
kfree(ibr);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
- return -ENOMEM;
-}
-
-static u32 iblock_get_device_rev(struct se_device *dev)
-{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-static u32 iblock_get_device_type(struct se_device *dev)
-{
- return TYPE_DISK;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static sector_t iblock_get_blocks(struct se_device *dev)
{
- struct iblock_dev *ibd = dev->dev_ptr;
- struct block_device *bd = ibd->ibd_bd;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
-static void iblock_bio_done(struct bio *bio, int err)
-{
- struct se_cmd *cmd = bio->bi_private;
- struct iblock_req *ibr = cmd->priv;
-
- /*
- * Set -EIO if !BIO_UPTODATE and the passed is still err=0
- */
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
- err = -EIO;
-
- if (err != 0) {
- pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
- " err: %d\n", bio, err);
- /*
- * Bump the ib_bio_err_cnt and release bio.
- */
- atomic_inc(&ibr->ib_bio_err_cnt);
- smp_mb__after_atomic_inc();
- }
-
- bio_put(bio);
-
- iblock_complete_cmd(cmd);
-}
-
-static struct spc_ops iblock_spc_ops = {
+static struct sbc_ops iblock_sbc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
+ .execute_write_same_unmap = iblock_execute_write_same_unmap,
.execute_unmap = iblock_execute_unmap,
};
-static int iblock_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+iblock_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &iblock_spc_ops);
+ return sbc_parse_cdb(cmd, &iblock_sbc_ops);
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
+ .inquiry_prod = "IBLOCK",
+ .inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
- .allocate_virtdevice = iblock_allocate_virtdevice,
- .create_virtdevice = iblock_create_virtdevice,
+ .alloc_device = iblock_alloc_device,
+ .configure_device = iblock_configure_device,
.free_device = iblock_free_device,
.parse_cdb = iblock_parse_cdb,
- .check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
- .get_device_rev = iblock_get_device_rev,
- .get_device_type = iblock_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
};
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 533627ae79e..01c2afd8150 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -14,6 +14,7 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev {
+ struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags;
struct bio_set *ibd_bio_set;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0fd428225d1..93e9c1f580b 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -19,18 +19,12 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
-int target_report_luns(struct se_cmd *);
-void se_release_device_for_hba(struct se_device *);
-void se_release_vpd_for_dev(struct se_device *);
-int se_free_virtual_device(struct se_device *, struct se_hba *);
-int se_dev_check_online(struct se_device *);
-int se_dev_check_shutdown(struct se_device *);
-void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void);
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
+int target_configure_device(struct se_device *dev);
+void target_free_device(struct se_device *);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
@@ -105,10 +102,11 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
-int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */
-void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8c323a98c4a..e35dbf85841 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4,8 +4,7 @@
* This file contains SPC-3 compliant persistent reservations and
* legacy SPC-2 reservations with compatible reservation handling (CRH=1)
*
- * Copyright (c) 2009, 2010 Rising Tide Systems
- * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -68,49 +67,33 @@ int core_pr_dump_initiator_port(
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
struct t10_pr_registration *, int);
-static int core_scsi2_reservation_seq_non_holder(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u32 pr_reg_type)
+static sense_reason_t
+target_scsi2_reservation_check(struct se_cmd *cmd)
{
- switch (cdb[0]) {
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case RELEASE:
case RELEASE_10:
return 0;
default:
- return 1;
+ break;
}
- return 1;
-}
-
-static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- int ret;
-
- if (!sess)
+ if (!dev->dev_reserved_node_acl || !sess)
return 0;
- spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_reserved_node_acl || !sess) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- if (dev->dev_reserved_node_acl != sess->se_node_acl) {
- spin_unlock(&dev->dev_reservation_lock);
- return -EINVAL;
- }
- if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
+ if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ return TCM_RESERVATION_CONFLICT;
+
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+ if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+ return TCM_RESERVATION_CONFLICT;
}
- ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
- spin_unlock(&dev->dev_reservation_lock);
- return ret;
+ return 0;
}
static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
@@ -120,15 +103,11 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
{
struct se_session *se_sess = cmd->se_sess;
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
- struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
- int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
int conflict = 0;
- if (!crh)
- return -EINVAL;
-
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
if (pr_reg) {
@@ -186,32 +165,28 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EBUSY;
}
return 0;
}
-int target_scsi2_reservation_release(struct se_cmd *cmd)
+sense_reason_t
+target_scsi2_reservation_release(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg;
- int ret = 0, rc;
+ int rc;
if (!sess || !sess->se_tpg)
goto out;
rc = target_check_scsi2_reservation_conflict(cmd);
if (rc == 1)
goto out;
- else if (rc < 0) {
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
- goto out;
- }
+ if (rc < 0)
+ return TCM_RESERVATION_CONFLICT;
- ret = 0;
spin_lock(&dev->dev_reservation_lock);
if (!dev->dev_reserved_node_acl || !sess)
goto out_unlock;
@@ -223,10 +198,10 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
goto out_unlock;
dev->dev_reserved_node_acl = NULL;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
- if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
dev->dev_res_bin_isid = 0;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
}
tpg = sess->se_tpg;
pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
@@ -237,25 +212,24 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
- if (!ret)
- target_complete_cmd(cmd, GOOD);
- return ret;
+ target_complete_cmd(cmd, GOOD);
+ return 0;
}
-int target_scsi2_reservation_reserve(struct se_cmd *cmd)
+sense_reason_t
+target_scsi2_reservation_reserve(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg;
- int ret = 0, rc;
+ sense_reason_t ret = 0;
+ int rc;
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- ret = -EINVAL;
- goto out;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
* This is currently the case for target_core_mod passthrough struct se_cmd
@@ -266,13 +240,10 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
rc = target_check_scsi2_reservation_conflict(cmd);
if (rc == 1)
goto out;
- else if (rc < 0) {
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
+ if (rc < 0)
+ return TCM_RESERVATION_CONFLICT;
+
tpg = sess->se_tpg;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
@@ -286,16 +257,15 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out_unlock;
}
dev->dev_reserved_node_acl = sess->se_node_acl;
- dev->dev_flags |= DF_SPC2_RESERVATIONS;
+ dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
if (sess->sess_bin_isid != 0) {
dev->dev_res_bin_isid = sess->sess_bin_isid;
- dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+ dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -319,9 +289,9 @@ out:
*/
static int core_scsi3_pr_seq_non_holder(
struct se_cmd *cmd,
- unsigned char *cdb,
u32 pr_reg_type)
{
+ unsigned char *cdb = cmd->t_task_cdb;
struct se_dev_entry *se_deve;
struct se_session *se_sess = cmd->se_sess;
int other_cdb = 0, ignore_reg;
@@ -330,17 +300,11 @@ static int core_scsi3_pr_seq_non_holder(
int we = 0; /* Write Exclusive */
int legacy = 0; /* Act like a legacy device and return
* RESERVATION CONFLICT on some CDBs */
- /*
- * A legacy SPC-2 reservation is being held.
- */
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
- return core_scsi2_reservation_seq_non_holder(cmd,
- cdb, pr_reg_type);
se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Determine if the registration should be ignored due to
- * non-matching ISIDs in core_scsi3_pr_reservation_check().
+ * non-matching ISIDs in target_scsi3_pr_reservation_check().
*/
ignore_reg = (pr_reg_type & 0x80000000);
if (ignore_reg)
@@ -563,10 +527,41 @@ static int core_scsi3_pr_seq_non_holder(
return 1; /* Conflict by default */
}
+static sense_reason_t
+target_scsi3_pr_reservation_check(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ u32 pr_reg_type;
+
+ if (!dev->dev_pr_res_holder)
+ return 0;
+
+ pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+ cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+ if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl)
+ goto check_nonholder;
+
+ if (dev->dev_pr_res_holder->isid_present_at_reg) {
+ if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
+ sess->sess_bin_isid) {
+ pr_reg_type |= 0x80000000;
+ goto check_nonholder;
+ }
+ }
+
+ return 0;
+
+check_nonholder:
+ if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type))
+ return TCM_RESERVATION_CONFLICT;
+ return 0;
+}
+
static u32 core_scsi3_pr_generation(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
u32 prg;
+
/*
* PRGeneration field shall contain the value of a 32-bit wrapping
* counter mainted by the device server.
@@ -577,56 +572,12 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
* See spc4r17 section 6.3.12 READ_KEYS service action
*/
spin_lock(&dev->dev_reservation_lock);
- prg = su_dev->t10_pr.pr_generation++;
+ prg = dev->t10_pr.pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
return prg;
}
-static int core_scsi3_pr_reservation_check(
- struct se_cmd *cmd,
- u32 *pr_reg_type)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_session *sess = cmd->se_sess;
- int ret;
-
- if (!sess)
- return 0;
- /*
- * A legacy SPC-2 reservation is being held.
- */
- if (dev->dev_flags & DF_SPC2_RESERVATIONS)
- return core_scsi2_reservation_check(cmd, pr_reg_type);
-
- spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_pr_res_holder) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
- cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
- if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
- spin_unlock(&dev->dev_reservation_lock);
- return -EINVAL;
- }
- if (!dev->dev_pr_res_holder->isid_present_at_reg) {
- spin_unlock(&dev->dev_reservation_lock);
- return 0;
- }
- ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
- sess->sess_bin_isid) ? 0 : -EINVAL;
- /*
- * Use bit in *pr_reg_type to notify ISID mismatch in
- * core_scsi3_pr_seq_non_holder().
- */
- if (ret != 0)
- *pr_reg_type |= 0x80000000;
- spin_unlock(&dev->dev_reservation_lock);
-
- return ret;
-}
-
static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
@@ -636,7 +587,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
int all_tg_pt,
int aptpl)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
@@ -645,7 +595,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
return NULL;
}
- pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
+ pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len,
GFP_ATOMIC);
if (!pr_reg->pr_aptpl_buf) {
pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
@@ -929,7 +879,7 @@ static int __core_scsi3_check_aptpl_registration(
struct se_dev_entry *deve)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
u16 tpgt;
@@ -996,11 +946,10 @@ int core_scsi3_check_aptpl_registration(
struct se_lun *lun,
struct se_lun_acl *lun_acl)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_node_acl *nacl = lun_acl->se_lun_nacl;
struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
- if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1051,10 +1000,9 @@ static void __core_scsi3_add_registration(
int register_type,
int register_move)
{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1066,7 +1014,7 @@ static void __core_scsi3_add_registration(
* for the REGISTER.
*/
pr_reg->pr_res_generation = (register_move) ?
- su_dev->t10_pr.pr_generation++ :
+ dev->t10_pr.pr_generation++ :
core_scsi3_pr_generation(dev);
spin_lock(&pr_tmpl->registration_lock);
@@ -1135,7 +1083,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
struct se_node_acl *nacl,
unsigned char *isid)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct se_portal_group *tpg;
@@ -1160,7 +1108,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* for fabric modules (iSCSI) requiring them.
*/
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
- if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
+ if (dev->dev_attrib.enforce_pr_isids)
continue;
}
atomic_inc(&pr_reg->pr_res_holders);
@@ -1274,7 +1222,7 @@ static void __core_scsi3_free_registration(
{
struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
@@ -1335,7 +1283,7 @@ void core_scsi3_free_pr_reg_from_nacl(
struct se_device *dev,
struct se_node_acl *nacl)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
/*
* If the passed se_node_acl matches the reservation holder,
@@ -1365,7 +1313,7 @@ void core_scsi3_free_pr_reg_from_nacl(
void core_scsi3_free_all_registrations(
struct se_device *dev)
{
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
spin_lock(&dev->dev_reservation_lock);
@@ -1479,7 +1427,8 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
smp_mb__after_atomic_dec();
}
-static int core_scsi3_decode_spec_i_port(
+static sense_reason_t
+core_scsi3_decode_spec_i_port(
struct se_cmd *cmd,
struct se_portal_group *tpg,
unsigned char *l_isid,
@@ -1501,8 +1450,9 @@ static int core_scsi3_decode_spec_i_port(
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ sense_reason_t ret;
u32 tpdl, tid_len = 0;
- int ret, dest_local_nexus, prf_isid;
+ int dest_local_nexus, prf_isid;
u32 dest_rtpi = 0;
memset(dest_iport, 0, 64);
@@ -1517,8 +1467,7 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1530,8 +1479,7 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1545,12 +1493,16 @@ static int core_scsi3_decode_spec_i_port(
if (cmd->data_length < 28) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@@ -1565,9 +1517,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
/*
* Start processing the received transport IDs using the
@@ -1610,16 +1561,13 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_inc();
spin_unlock(&dev->se_port_lock);
- ret = core_scsi3_tpg_depend_item(tmp_tpg);
- if (ret != 0) {
+ if (core_scsi3_tpg_depend_item(tmp_tpg)) {
pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
/*
* Locate the destination initiator ACL to be registered
@@ -1641,17 +1589,14 @@ static int core_scsi3_decode_spec_i_port(
continue;
}
- ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
- if (ret != 0) {
+ if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
dest_tpg = tmp_tpg;
@@ -1668,9 +1613,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
@@ -1683,9 +1627,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
/*
* Locate the desintation struct se_dev_entry pointer for matching
@@ -1702,23 +1645,19 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
- ret = core_scsi3_lunacl_depend_item(dest_se_deve);
- if (ret < 0) {
+ if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
@@ -1754,10 +1693,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -ENOMEM;
- goto out;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_unmap;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = dest_tpg;
@@ -1788,9 +1725,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
@@ -1848,8 +1784,9 @@ static int core_scsi3_decode_spec_i_port(
}
return 0;
-out:
+out_unmap:
transport_kunmap_data_sg(cmd);
+out:
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@@ -1899,7 +1836,6 @@ static int __core_scsi3_update_aptpl_buf(
{
struct se_lun *lun;
struct se_portal_group *tpg;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
ssize_t len = 0;
@@ -1917,8 +1853,8 @@ static int __core_scsi3_update_aptpl_buf(
/*
* Walk the registration list..
*/
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
tmp[0] = '\0';
@@ -1963,7 +1899,7 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
@@ -1981,13 +1917,13 @@ static int __core_scsi3_update_aptpl_buf(
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
reg_count++;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
if (!reg_count)
len += sprintf(buf+len, "No Registrations or Reservations");
@@ -2019,7 +1955,7 @@ static int __core_scsi3_write_aptpl_to_file(
unsigned char *buf,
u32 pr_aptpl_buf_len)
{
- struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+ struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
struct iovec iov[1];
mm_segment_t old_fs;
@@ -2065,14 +2001,15 @@ static int __core_scsi3_write_aptpl_to_file(
return 0;
}
-static int core_scsi3_update_and_write_aptpl(
- struct se_device *dev,
- unsigned char *in_buf,
- u32 in_pr_aptpl_buf_len)
+static int
+core_scsi3_update_and_write_aptpl(struct se_device *dev, unsigned char *in_buf,
+ u32 in_pr_aptpl_buf_len)
{
unsigned char null_buf[64], *buf;
u32 pr_aptpl_buf_len;
- int ret, clear_aptpl_metadata = 0;
+ int clear_aptpl_metadata = 0;
+ int ret;
+
/*
* Can be called with a NULL pointer from PROUT service action CLEAR
*/
@@ -2094,25 +2031,17 @@ static int core_scsi3_update_and_write_aptpl(
clear_aptpl_metadata);
if (ret != 0)
return ret;
+
/*
* __core_scsi3_write_aptpl_to_file() will call strlen()
* on the passed buf to determine pr_aptpl_buf_len.
*/
- ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
- if (ret != 0)
- return ret;
-
- return ret;
+ return __core_scsi3_write_aptpl_to_file(dev, buf, 0);
}
-static int core_scsi3_emulate_pro_register(
- struct se_cmd *cmd,
- u64 res_key,
- u64 sa_res_key,
- int aptpl,
- int all_tg_pt,
- int spec_i_pt,
- int ignore_key)
+static sense_reason_t
+core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+ int aptpl, int all_tg_pt, int spec_i_pt, int ignore_key)
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
@@ -2120,16 +2049,16 @@ static int core_scsi3_emulate_pro_register(
struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
/* Used for APTPL metadata w/ UNREGISTER */
unsigned char *pr_aptpl_buf = NULL;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
- int pr_holder = 0, ret = 0, type;
+ sense_reason_t ret;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
se_tpg = se_sess->se_tpg;
se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2148,8 +2077,7 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* Do nothing but return GOOD status.
@@ -2163,15 +2091,13 @@ static int core_scsi3_emulate_pro_register(
* Port Endpoint that the PRO was received from on the
* Logical Unit of the SCSI device server.
*/
- ret = core_scsi3_alloc_registration(cmd->se_dev,
+ if (core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, se_deve, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
- ignore_key, 0);
- if (ret != 0) {
+ ignore_key, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
} else {
/*
@@ -2205,201 +2131,192 @@ static int core_scsi3_emulate_pro_register(
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_tmpl->pr_aptpl_active = 1;
pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
}
- core_scsi3_put_pr_reg(pr_reg);
- return ret;
- } else {
- /*
- * Locate the existing *pr_reg via struct se_node_acl pointers
- */
- pr_reg = pr_reg_e;
- type = pr_reg->pr_res_type;
-
- if (!ignore_key) {
- if (res_key != pr_reg->pr_res_key) {
- pr_err("SPC-3 PR REGISTER: Received"
- " res_key: 0x%016Lx does not match"
- " existing SA REGISTER res_key:"
- " 0x%016Lx\n", res_key,
- pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
- }
+ goto out_put_pr_reg;
+ }
+
+ /*
+ * Locate the existing *pr_reg via struct se_node_acl pointers
+ */
+ pr_reg = pr_reg_e;
+ type = pr_reg->pr_res_type;
+
+ if (!ignore_key) {
+ if (res_key != pr_reg->pr_res_key) {
+ pr_err("SPC-3 PR REGISTER: Received"
+ " res_key: 0x%016Lx does not match"
+ " existing SA REGISTER res_key:"
+ " 0x%016Lx\n", res_key,
+ pr_reg->pr_res_key);
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
- if (spec_i_pt) {
- pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
- " set while sa_res_key=0\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ }
+
+ if (spec_i_pt) {
+ pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
+ " set while sa_res_key=0\n");
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
+ }
+
+ /*
+ * An existing ALL_TG_PT=1 registration being released
+ * must also set ALL_TG_PT=1 in the incoming PROUT.
+ */
+ if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+ pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+ " registration exists, but ALL_TG_PT=1 bit not"
+ " present in received PROUT\n");
+ ret = TCM_INVALID_CDB_FIELD;
+ goto out_put_pr_reg;
+ }
+
+ /*
+ * Allocate APTPL metadata buffer used for UNREGISTER ops
+ */
+ if (aptpl) {
+ pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+ GFP_KERNEL);
+ if (!pr_aptpl_buf) {
+ pr_err("Unable to allocate"
+ " pr_aptpl_buf\n");
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
}
- /*
- * An existing ALL_TG_PT=1 registration being released
- * must also set ALL_TG_PT=1 in the incoming PROUT.
- */
- if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
- pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
- " registration exists, but ALL_TG_PT=1 bit not"
- " present in received PROUT\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ }
+
+ /*
+ * sa_res_key=0 Unregister Reservation Key for registered I_T
+ * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+ * Nexus.
+ */
+ if (!sa_res_key) {
+ pr_holder = core_scsi3_check_implict_release(
+ cmd->se_dev, pr_reg);
+ if (pr_holder < 0) {
+ kfree(pr_aptpl_buf);
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
+
+ spin_lock(&pr_tmpl->registration_lock);
/*
- * Allocate APTPL metadata buffer used for UNREGISTER ops
+ * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+ * and matching pr_res_key.
*/
- if (aptpl) {
- pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
- GFP_KERNEL);
- if (!pr_aptpl_buf) {
- pr_err("Unable to allocate"
- " pr_aptpl_buf\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ if (pr_reg->pr_reg_all_tg_pt) {
+ list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ if (!pr_reg_p->pr_reg_all_tg_pt)
+ continue;
+ if (pr_reg_p->pr_res_key != res_key)
+ continue;
+ if (pr_reg == pr_reg_p)
+ continue;
+ if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg_p->pr_reg_nacl->initiatorname))
+ continue;
+
+ __core_scsi3_free_registration(dev,
+ pr_reg_p, NULL, 0);
}
}
+
/*
- * sa_res_key=0 Unregister Reservation Key for registered I_T
- * Nexus sa_res_key=1 Change Reservation Key for registered I_T
- * Nexus.
+ * Release the calling I_T Nexus registration now..
*/
- if (!sa_res_key) {
- pr_holder = core_scsi3_check_implict_release(
- cmd->se_dev, pr_reg);
- if (pr_holder < 0) {
- kfree(pr_aptpl_buf);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
- }
-
- spin_lock(&pr_tmpl->registration_lock);
- /*
- * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
- * and matching pr_res_key.
- */
- if (pr_reg->pr_reg_all_tg_pt) {
- list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
- &pr_tmpl->registration_list,
- pr_reg_list) {
-
- if (!pr_reg_p->pr_reg_all_tg_pt)
- continue;
+ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
- if (pr_reg_p->pr_res_key != res_key)
- continue;
-
- if (pr_reg == pr_reg_p)
- continue;
-
- if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
- pr_reg_p->pr_reg_nacl->initiatorname))
- continue;
-
- __core_scsi3_free_registration(dev,
- pr_reg_p, NULL, 0);
- }
- }
- /*
- * Release the calling I_T Nexus registration now..
- */
- __core_scsi3_free_registration(cmd->se_dev, pr_reg,
- NULL, 1);
- /*
- * From spc4r17, section 5.7.11.3 Unregistering
- *
- * If the persistent reservation is a registrants only
- * type, the device server shall establish a unit
- * attention condition for the initiator port associated
- * with every registered I_T nexus except for the I_T
- * nexus on which the PERSISTENT RESERVE OUT command was
- * received, with the additional sense code set to
- * RESERVATIONS RELEASED.
- */
- if (pr_holder &&
- ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
- (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
- list_for_each_entry(pr_reg_p,
- &pr_tmpl->registration_list,
- pr_reg_list) {
-
- core_scsi3_ua_allocate(
- pr_reg_p->pr_reg_nacl,
- pr_reg_p->pr_res_mapped_lun,
- 0x2A,
- ASCQ_2AH_RESERVATIONS_RELEASED);
- }
+ /*
+ * From spc4r17, section 5.7.11.3 Unregistering
+ *
+ * If the persistent reservation is a registrants only
+ * type, the device server shall establish a unit
+ * attention condition for the initiator port associated
+ * with every registered I_T nexus except for the I_T
+ * nexus on which the PERSISTENT RESERVE OUT command was
+ * received, with the additional sense code set to
+ * RESERVATIONS RELEASED.
+ */
+ if (pr_holder &&
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ list_for_each_entry(pr_reg_p,
+ &pr_tmpl->registration_list,
+ pr_reg_list) {
+
+ core_scsi3_ua_allocate(
+ pr_reg_p->pr_reg_nacl,
+ pr_reg_p->pr_res_mapped_lun,
+ 0x2A,
+ ASCQ_2AH_RESERVATIONS_RELEASED);
}
- spin_unlock(&pr_tmpl->registration_lock);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
- " for UNREGISTER\n");
- return 0;
- }
+ if (!aptpl) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for UNREGISTER\n");
+ return 0;
+ }
- ret = core_scsi3_update_and_write_aptpl(dev,
- &pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
- pr_tmpl->pr_aptpl_active = 1;
- pr_debug("SPC-3 PR: Set APTPL Bit Activated"
- " for UNREGISTER\n");
- }
+ if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+ " for UNREGISTER\n");
+ }
- kfree(pr_aptpl_buf);
- return ret;
- } else {
- /*
- * Increment PRgeneration counter for struct se_device"
- * upon a successful REGISTER, see spc4r17 section 6.3.2
- * READ_KEYS service action.
- */
- pr_reg->pr_res_generation = core_scsi3_pr_generation(
- cmd->se_dev);
- pr_reg->pr_res_key = sa_res_key;
- pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
- " Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
- (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
- pr_reg->pr_reg_nacl->initiatorname,
- pr_reg->pr_res_key, pr_reg->pr_res_generation);
-
- if (!aptpl) {
- pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- core_scsi3_put_pr_reg(pr_reg);
- pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
- " for REGISTER\n");
- return 0;
- }
+ goto out_free_aptpl_buf;
+ }
- ret = core_scsi3_update_and_write_aptpl(dev,
- &pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret) {
- pr_tmpl->pr_aptpl_active = 1;
- pr_debug("SPC-3 PR: Set APTPL Bit Activated"
- " for REGISTER\n");
- }
+ /*
+ * Increment PRgeneration counter for struct se_device"
+ * upon a successful REGISTER, see spc4r17 section 6.3.2
+ * READ_KEYS service action.
+ */
+ pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev);
+ pr_reg->pr_res_key = sa_res_key;
+ pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ " Key for %s to: 0x%016Lx PRgeneration:"
+ " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+ (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+ pr_reg->pr_reg_nacl->initiatorname,
+ pr_reg->pr_res_key, pr_reg->pr_res_generation);
- kfree(pr_aptpl_buf);
- core_scsi3_put_pr_reg(pr_reg);
- }
+ if (!aptpl) {
+ pr_tmpl->pr_aptpl_active = 0;
+ core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+ " for REGISTER\n");
+ ret = 0;
+ goto out_put_pr_reg;
}
- return 0;
+
+ if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
+ pr_tmpl->pr_aptpl_buf_len)) {
+ pr_tmpl->pr_aptpl_active = 1;
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+ " for REGISTER\n");
+ }
+
+out_free_aptpl_buf:
+ kfree(pr_aptpl_buf);
+ ret = 0;
+out_put_pr_reg:
+ core_scsi3_put_pr_reg(pr_reg);
+ return ret;
}
unsigned char *core_scsi3_pr_dump_type(int type)
@@ -2424,26 +2341,23 @@ unsigned char *core_scsi3_pr_dump_type(int type)
return "Unknown SPC-3 PR Type";
}
-static int core_scsi3_pro_reserve(
- struct se_cmd *cmd,
- struct se_device *dev,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
{
+ struct se_device *dev = cmd->se_dev;
struct se_session *se_sess = cmd->se_sess;
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
- int ret, prf_isid;
+ sense_reason_t ret;
+ int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2453,8 +2367,7 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2469,9 +2382,8 @@ static int core_scsi3_pro_reserve(
pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2485,9 +2397,8 @@ static int core_scsi3_pro_reserve(
*/
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2518,9 +2429,8 @@ static int core_scsi3_pro_reserve(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2542,9 +2452,8 @@ static int core_scsi3_pro_reserve(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2557,8 +2466,8 @@ static int core_scsi3_pro_reserve(
* shall completethe command with GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ ret = 0;
+ goto out_put_pr_reg;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2582,27 +2491,24 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata"
" for RESERVE\n");
+ }
}
+ ret = 0;
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ return ret;
}
-static int core_scsi3_emulate_pro_reserve(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
+ u64 res_key)
{
- struct se_device *dev = cmd->se_dev;
- int ret = 0;
-
switch (type) {
case PR_TYPE_WRITE_EXCLUSIVE:
case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -2610,16 +2516,12 @@ static int core_scsi3_emulate_pro_reserve(
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
- ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
- break;
+ return core_scsi3_pro_reserve(cmd, type, scope, res_key);
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
-
- return ret;
}
/*
@@ -2657,23 +2559,21 @@ static void __core_scsi3_complete_pro_release(
pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
}
-static int core_scsi3_emulate_pro_release(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
+ u64 res_key)
{
struct se_device *dev = cmd->se_dev;
struct se_session *se_sess = cmd->se_sess;
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
- int ret, all_reg = 0;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ int all_reg = 0;
+ sense_reason_t ret = 0;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2682,8 +2582,7 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2704,8 +2603,7 @@ static int core_scsi3_emulate_pro_release(
* No persistent reservation, return GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ goto out_put_pr_reg;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2718,9 +2616,9 @@ static int core_scsi3_emulate_pro_release(
* persistent reservation holder. return GOOD status.
*/
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ goto out_put_pr_reg;
}
+
/*
* From spc4r17 Section 5.7.11.2 Releasing:
*
@@ -2740,9 +2638,8 @@ static int core_scsi3_emulate_pro_release(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2763,9 +2660,8 @@ static int core_scsi3_emulate_pro_release(
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* In response to a persistent reservation release request from the
@@ -2818,25 +2714,23 @@ static int core_scsi3_emulate_pro_release(
write_aptpl:
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
- &pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+ }
}
-
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
- return 0;
+ return ret;
}
-static int core_scsi3_emulate_pro_clear(
- struct se_cmd *cmd,
- u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
struct se_session *se_sess = cmd->se_sess;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
u32 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
@@ -2848,8 +2742,7 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2868,8 +2761,7 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* a) Release the persistent reservation, if any;
@@ -2993,28 +2885,22 @@ static void core_scsi3_release_preempt_and_abort(
}
}
-static int core_scsi3_pro_preempt(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key,
- u64 sa_res_key,
- int abort)
+static sense_reason_t
+core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
+ u64 sa_res_key, int abort)
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
struct se_session *se_sess = cmd->se_sess;
LIST_HEAD(preempt_and_abort_list);
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
u32 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
- int prh_type = 0, prh_scope = 0, ret;
+ int prh_type = 0, prh_scope = 0;
- if (!se_sess) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
+ if (!se_sess)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
@@ -3022,19 +2908,16 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
spin_lock(&dev->dev_reservation_lock);
@@ -3047,8 +2930,7 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ return TCM_INVALID_PARAMETER_LIST;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3142,8 +3024,7 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* For an existing all registrants type reservation
@@ -3162,13 +3043,13 @@ static int core_scsi3_pro_preempt(
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL"
" metadata for PREEMPT%s\n", (abort) ?
"_AND_ABORT" : "");
+ }
}
core_scsi3_put_pr_reg(pr_reg_n);
@@ -3298,12 +3179,12 @@ static int core_scsi3_pro_preempt(
}
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
- "%s\n", (abort) ? "_AND_ABORT" : "");
+ "%s\n", abort ? "_AND_ABORT" : "");
+ }
}
core_scsi3_put_pr_reg(pr_reg_n);
@@ -3311,16 +3192,10 @@ static int core_scsi3_pro_preempt(
return 0;
}
-static int core_scsi3_emulate_pro_preempt(
- struct se_cmd *cmd,
- int type,
- int scope,
- u64 res_key,
- u64 sa_res_key,
- int abort)
+static sense_reason_t
+core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
+ u64 res_key, u64 sa_res_key, int abort)
{
- int ret = 0;
-
switch (type) {
case PR_TYPE_WRITE_EXCLUSIVE:
case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -3328,26 +3203,19 @@ static int core_scsi3_emulate_pro_preempt(
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
- ret = core_scsi3_pro_preempt(cmd, type, scope,
- res_key, sa_res_key, abort);
- break;
+ return core_scsi3_pro_preempt(cmd, type, scope, res_key,
+ sa_res_key, abort);
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
-
- return ret;
}
-static int core_scsi3_emulate_pro_register_and_move(
- struct se_cmd *cmd,
- u64 res_key,
- u64 sa_res_key,
- int aptpl,
- int unreg)
+static sense_reason_t
+core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+ u64 sa_res_key, int aptpl, int unreg)
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
@@ -3358,20 +3226,21 @@ static int core_scsi3_emulate_pro_register_and_move(
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
- int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+ int new_reg = 0, type, scope, matching_iname, prf_isid;
+ sense_reason_t ret;
unsigned short rtpi;
unsigned char proto_ident;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
@@ -3387,8 +3256,7 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3398,9 +3266,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
+ goto out_put_pr_reg;
}
/*
* The service active reservation key needs to be non zero
@@ -3408,9 +3275,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!sa_res_key) {
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
/*
@@ -3419,6 +3285,11 @@ static int core_scsi3_emulate_pro_register_and_move(
* information.
*/
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
+ }
+
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
@@ -3432,9 +3303,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
spin_lock(&dev->se_port_lock);
@@ -3452,15 +3322,13 @@ static int core_scsi3_emulate_pro_register_and_move(
smp_mb__after_atomic_inc();
spin_unlock(&dev->se_port_lock);
- ret = core_scsi3_tpg_depend_item(dest_se_tpg);
- if (ret != 0) {
+ if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
}
spin_lock(&dev->se_port_lock);
@@ -3472,12 +3340,15 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
- core_scsi3_put_pr_reg(pr_reg);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- return -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
+ goto out_put_pr_reg;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out_put_pr_reg;
+ }
proto_ident = (buf[24] & 0x0f);
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
@@ -3489,16 +3360,14 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3506,8 +3375,7 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
@@ -3536,8 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3545,8 +3412,7 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
after_iport_check:
@@ -3566,19 +3432,17 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
- if (ret != 0) {
+
+ if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
@@ -3594,19 +3458,16 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- ret = core_scsi3_lunacl_depend_item(dest_se_deve);
- if (ret < 0) {
+ if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
@@ -3625,8 +3486,7 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
+ ret = TCM_INVALID_CDB_FIELD;
goto out;
}
/*
@@ -3639,8 +3499,7 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out;
}
/*
@@ -3658,8 +3517,7 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
+ ret = TCM_RESERVATION_CONFLICT;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3691,13 +3549,11 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- ret = core_scsi3_alloc_registration(cmd->se_dev,
+ if (core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
- sa_res_key, 0, aptpl, 2, 1);
- if (ret != 0) {
+ sa_res_key, 0, aptpl, 2, 1)) {
spin_unlock(&dev->dev_reservation_lock);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
+ ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3768,12 +3624,12 @@ after_iport_check:
" REGISTER_AND_MOVE\n");
} else {
pr_tmpl->pr_aptpl_active = 1;
- ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+ if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
&dest_pr_reg->pr_aptpl_buf[0],
- pr_tmpl->pr_aptpl_buf_len);
- if (!ret)
+ pr_tmpl->pr_aptpl_buf_len)) {
pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
" REGISTER_AND_MOVE\n");
+ }
}
transport_kunmap_data_sg(cmd);
@@ -3788,6 +3644,8 @@ out:
if (dest_node_acl)
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_se_tpg);
+
+out_put_pr_reg:
core_scsi3_put_pr_reg(pr_reg);
return ret;
}
@@ -3805,14 +3663,15 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
/*
* See spc4r17 section 6.14 Table 170
*/
-int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
+sense_reason_t
+target_scsi3_emulate_pr_out(struct se_cmd *cmd)
{
unsigned char *cdb = &cmd->t_task_cdb[0];
unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
- int ret;
+ sense_reason_t ret;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -3823,32 +3682,26 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = -EINVAL;
- goto out;
+ return TCM_RESERVATION_CONFLICT;
}
/*
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = -EINVAL;
- goto out;
- }
+ if (!cmd->se_sess)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ return TCM_INVALID_PARAMETER_LIST;
}
+
/*
* From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
*/
@@ -3857,6 +3710,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
type = (cdb[2] & 0x0f);
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@@ -3880,11 +3736,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
- if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
- }
+ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+ return TCM_INVALID_PARAMETER_LIST;
/*
* From spc4r17 section 6.14:
@@ -3899,10 +3752,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- ret = -EINVAL;
- goto out;
+ return TCM_INVALID_PARAMETER_LIST;
}
+
/*
* (core_scsi3_emulate_pro_* function parameters
* are defined by spc4r17 Table 174:
@@ -3941,12 +3793,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
- break;
+ return TCM_INVALID_CDB_FIELD;
}
-out:
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
@@ -3957,10 +3806,10 @@ out:
*
* See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
*/
-static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_keys(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u32 add_len = 0, off = 8;
@@ -3968,18 +3817,20 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&su_dev->t10_pr.registration_lock);
- list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
pr_reg_list) {
/*
* Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3999,7 +3850,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
add_len += 8;
}
- spin_unlock(&su_dev->t10_pr.registration_lock);
+ spin_unlock(&dev->t10_pr.registration_lock);
buf[4] = ((add_len >> 24) & 0xff);
buf[5] = ((add_len >> 16) & 0xff);
@@ -4016,10 +3867,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
*
* See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
*/
-static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_reservation(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u64 pr_res_key;
@@ -4028,18 +3879,20 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- spin_lock(&se_dev->dev_reservation_lock);
- pr_reg = se_dev->dev_pr_res_holder;
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+ spin_lock(&dev->dev_reservation_lock);
+ pr_reg = dev->dev_pr_res_holder;
if (pr_reg) {
/*
* Set the hardcoded Additional Length
@@ -4090,7 +3943,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
}
err:
- spin_unlock(&se_dev->dev_reservation_lock);
+ spin_unlock(&dev->dev_reservation_lock);
transport_kunmap_data_sg(cmd);
return 0;
@@ -4101,21 +3954,23 @@ err:
*
* See spc4r17 section 6.13.4 Table 165
*/
-static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
u16 add_len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
@@ -4157,14 +4012,14 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
*
* See spc4r17 section 6.13.5 Table 168 and 169
*/
-static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_full_status(struct se_cmd *cmd)
{
- struct se_device *se_dev = cmd->se_dev;
+ struct se_device *dev = cmd->se_dev;
struct se_node_acl *se_nacl;
- struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+ struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
@@ -4173,16 +4028,17 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+ buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (dev->t10_pr.pr_generation & 0xff);
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4303,9 +4159,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
-int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
+sense_reason_t
+target_scsi3_emulate_pr_in(struct se_cmd *cmd)
{
- int ret;
+ sense_reason_t ret;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -4316,12 +4173,11 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
- if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+ if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EINVAL;
+ return TCM_RESERVATION_CONFLICT;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4340,9 +4196,7 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
- break;
+ return TCM_INVALID_CDB_FIELD;
}
if (!ret)
@@ -4350,56 +4204,25 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return ret;
}
-static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
-{
- return 0;
-}
-
-static int core_pt_seq_non_holder(
- struct se_cmd *cmd,
- unsigned char *cdb,
- u32 pr_reg_type)
+sense_reason_t
+target_check_reservation(struct se_cmd *cmd)
{
- return 0;
-}
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
-int core_setup_reservations(struct se_device *dev, int force_pt)
-{
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_reservation *rest = &su_dev->t10_pr;
- /*
- * If this device is from Target_Core_Mod/pSCSI, use the reservations
- * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
- * cause a problem because libata and some SATA RAID HBAs appear
- * under Linux/SCSI, but to emulate reservations themselves.
- */
- if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
- rest->res_type = SPC_PASSTHROUGH;
- rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
- rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
- pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
- " emulation\n", dev->transport->name);
+ if (!cmd->se_sess)
+ return 0;
+ if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+ return 0;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
- }
- /*
- * If SPC-3 or above is reported by real or emulated struct se_device,
- * use emulated Persistent Reservations.
- */
- if (dev->transport->get_device_rev(dev) >= SCSI_3) {
- rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
- rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
- rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
- pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
- " emulation\n", dev->transport->name);
- } else {
- rest->res_type = SPC2_RESERVATIONS;
- rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
- rest->pr_ops.t10_seq_non_holder =
- &core_scsi2_reservation_seq_non_holder;
- pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
- dev->transport->name);
- }
- return 0;
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ ret = target_scsi2_reservation_check(cmd);
+ else
+ ret = target_scsi3_pr_reservation_check(cmd);
+ spin_unlock(&dev->dev_reservation_lock);
+
+ return ret;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index af6c460d886..b4a004247ab 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
-extern int target_scsi2_reservation_release(struct se_cmd *);
-extern int target_scsi2_reservation_reserve(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
@@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
-extern int target_scsi3_emulate_pr_in(struct se_cmd *);
-extern int target_scsi3_emulate_pr_out(struct se_cmd *);
-extern int core_setup_reservations(struct se_device *, int);
+extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
+extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
+extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 617c086a8a0..2bcfd79cf59 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -3,10 +3,7 @@
*
* This file contains the generic target mode <-> Linux SCSI subsystem plugin.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -53,9 +50,14 @@
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct pscsi_dev_virt, dev);
+}
+
static struct se_subsystem_api pscsi_template;
-static int pscsi_execute_cmd(struct se_cmd *cmd);
+static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba():
@@ -219,7 +221,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
- wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+ wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
kfree(buf);
return 0;
@@ -299,23 +301,13 @@ out:
kfree(buf);
}
-/* pscsi_add_device_to_list():
- *
- *
- */
-static struct se_device *pscsi_add_device_to_list(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- struct pscsi_dev_virt *pdv,
- struct scsi_device *sd,
- int dev_flags)
+static int pscsi_add_device_to_list(struct se_device *dev,
+ struct scsi_device *sd)
{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct request_queue *q;
- struct queue_limits *limits;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct request_queue *q = sd->request_queue;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ pdv->pdv_sd = sd;
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
@@ -324,54 +316,27 @@ static struct se_device *pscsi_add_device_to_list(
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
- /*
- * Setup the local scope queue_limits from struct request_queue->limits
- * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
- */
- q = sd->request_queue;
- limits = &dev_limits.limits;
- limits->logical_block_size = sd->sector_size;
- limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
- limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
- dev_limits.hw_queue_depth = sd->queue_depth;
- dev_limits.queue_depth = sd->queue_depth;
- /*
- * Setup our standard INQUIRY info into se_dev->t10_wwn
- */
- pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_max_sectors =
+ min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
- * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
- * which has already been referenced with Linux SCSI code with
- * scsi_device_get() in this file's pscsi_create_virtdevice().
- *
- * The passthrough operations called by the transport_add_device_*
- * function below will require this pointer to be set for passthroug
- * ops.
- *
- * For the shutdown case in pscsi_free_device(), this struct
- * scsi_device reference is released with Linux SCSI code
- * scsi_device_put() and the pdv->pdv_sd cleared.
+ * Setup our standard INQUIRY info into se_dev->t10_wwn
*/
- pdv->pdv_sd = sd;
- dev = transport_add_device_to_core_hba(hba, &pscsi_template,
- se_dev, dev_flags, pdv,
- &dev_limits, NULL, NULL);
- if (!dev) {
- pdv->pdv_sd = NULL;
- return NULL;
- }
+ pscsi_set_inquiry_info(sd, &dev->t10_wwn);
/*
* Locate VPD WWN Information used for various purposes within
* the Storage Engine.
*/
- if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+ if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
/*
* If VPD Unit Serial returned GOOD status, try
* VPD Device Identification page (0x83).
*/
- pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+ pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
}
/*
@@ -379,10 +344,11 @@ static struct se_device *pscsi_add_device_to_list(
*/
if (sd->type == TYPE_TAPE)
pscsi_tape_read_blocksize(dev, sd);
- return dev;
+ return 0;
}
-static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *pscsi_alloc_device(struct se_hba *hba,
+ const char *name)
{
struct pscsi_dev_virt *pdv;
@@ -391,139 +357,125 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
- pdv->pdv_se_hba = hba;
pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
- return pdv;
+ return &pdv->dev;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_disk(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
- u32 dev_flags = 0;
+ int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
- return NULL;
+ return -EIO;
}
spin_unlock_irq(sh->host_lock);
/*
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK using supplied udev_path
*/
- bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+ bd = blkdev_get_by_path(dev->udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) {
pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
- return NULL;
+ return PTR_ERR(bd);
}
pdv->pdv_bd = bd;
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev) {
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
- return NULL;
+ return ret;
}
+
pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
-
- return dev;
+ return 0;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_rom(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
- u32 dev_flags = 0;
+ int ret;
if (scsi_device_get(sd)) {
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
- return NULL;
+ return -EIO;
}
spin_unlock_irq(sh->host_lock);
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev) {
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret) {
scsi_device_put(sd);
- return NULL;
+ return ret;
}
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
- return dev;
+ return 0;
}
/*
- *Called with struct Scsi_Host->host_lock called.
+ * Called with struct Scsi_Host->host_lock called.
*/
-static struct se_device *pscsi_create_type_other(
- struct scsi_device *sd,
- struct pscsi_dev_virt *pdv,
- struct se_subsystem_dev *se_dev,
- struct se_hba *hba)
+static int pscsi_create_type_other(struct se_device *dev,
+ struct scsi_device *sd)
__releases(sh->host_lock)
{
- struct se_device *dev;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
- u32 dev_flags = 0;
+ int ret;
spin_unlock_irq(sh->host_lock);
- dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!dev)
- return NULL;
+ ret = pscsi_add_device_to_list(dev, sd);
+ if (ret)
+ return ret;
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
-
- return dev;
+ return 0;
}
-static struct se_device *pscsi_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
+static int pscsi_configure_device(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = p;
- struct se_device *dev;
+ struct se_hba *hba = dev->se_hba;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd;
- struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
+ int ret;
- if (!pdv) {
- pr_err("Unable to locate struct pscsi_dev_virt"
- " parameter\n");
- return ERR_PTR(-EINVAL);
+ if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+ !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+ pr_err("Missing scsi_channel_id=, scsi_target_id= and"
+ " scsi_lun_id= parameters\n");
+ return -EINVAL;
}
+
/*
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
@@ -532,16 +484,16 @@ static struct se_device *pscsi_create_virtdevice(
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
/*
* For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
* reference, we enforce that udev_path has been set
*/
- if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+ if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/*
* If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
@@ -549,17 +501,14 @@ static struct se_device *pscsi_create_virtdevice(
* and enable for PHV_LLD_SCSI_HOST_NO mode.
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
- spin_lock(&hba->device_lock);
- if (!list_empty(&hba->hba_dev_list)) {
+ if (hba->dev_count) {
pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
- spin_unlock(&hba->device_lock);
- return ERR_PTR(-EEXIST);
+ return -EEXIST;
}
- spin_unlock(&hba->device_lock);
if (pscsi_pmode_enable_hba(hba, 1) != 1)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
legacy_mode_enable = 1;
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
@@ -569,14 +518,14 @@ static struct se_device *pscsi_create_virtdevice(
if (IS_ERR(sh)) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return ERR_CAST(sh);
+ return PTR_ERR(sh);
}
}
} else {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
" struct Scsi_Host exists\n");
- return ERR_PTR(-EEXIST);
+ return -EEXIST;
}
}
@@ -593,17 +542,17 @@ static struct se_device *pscsi_create_virtdevice(
*/
switch (sd->type) {
case TYPE_DISK:
- dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_disk(dev, sd);
break;
case TYPE_ROM:
- dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_rom(dev, sd);
break;
default:
- dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+ ret = pscsi_create_type_other(dev, sd);
break;
}
- if (!dev) {
+ if (ret) {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
@@ -611,9 +560,9 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
pdv->pdv_sd = NULL;
- return ERR_PTR(-ENODEV);
+ return ret;
}
- return dev;
+ return 0;
}
spin_unlock_irq(sh->host_lock);
@@ -627,17 +576,13 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-/* pscsi_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void pscsi_free_device(void *p)
+static void pscsi_free_device(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = p;
- struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd;
if (sd) {
@@ -670,7 +615,7 @@ static void pscsi_free_device(void *p)
static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
unsigned char *sense_buffer)
{
- struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = cmd->priv;
@@ -694,7 +639,11 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
- unsigned char *buf = transport_kmap_data_sg(cmd);
+ unsigned char *buf;
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -770,13 +719,11 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page,
- ssize_t count)
+static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
- struct pscsi_hba_virt *phv = hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -841,29 +788,10 @@ out:
return (!ret) ? count : ret;
}
-static ssize_t pscsi_check_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev)
+static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
-
- if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
- !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
- !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
- pr_err("Missing scsi_channel_id=, scsi_target_id= and"
- " scsi_lun_id= parameters\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct pscsi_hba_virt *phv = hba->hba_ptr;
- struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16];
ssize_t bl;
@@ -929,11 +857,11 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
-static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction,
- struct bio **hbio)
+static sense_reason_t
+pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction, struct bio **hbio)
{
- struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct bio *bio = NULL, *tbio = NULL;
struct page *page;
struct scatterlist *sg;
@@ -1019,7 +947,7 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
}
}
- return sgl_nents;
+ return 0;
fail:
while (*hbio) {
bio = *hbio;
@@ -1027,8 +955,7 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
@@ -1055,17 +982,13 @@ static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
}
}
-static int pscsi_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+pscsi_parse_cdb(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
- unsigned int dummy_size;
- int ret;
- if (cmd->se_cmd_flags & SCF_BIDI) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
- }
+ if (cmd->se_cmd_flags & SCF_BIDI)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
pscsi_clear_cdb_lun(cdb);
@@ -1076,10 +999,8 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
*/
switch (cdb[0]) {
case REPORT_LUNS:
- ret = spc_parse_cdb(cmd, &dummy_size);
- if (ret)
- return ret;
- break;
+ cmd->execute_cmd = spc_emulate_report_luns;
+ return 0;
case READ_6:
case READ_10:
case READ_12:
@@ -1093,22 +1014,21 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
/* FALLTHROUGH*/
default:
cmd->execute_cmd = pscsi_execute_cmd;
- break;
+ return 0;
}
-
- return 0;
}
-static int pscsi_execute_cmd(struct se_cmd *cmd)
+static sense_reason_t
+pscsi_execute_cmd(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
- struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct pscsi_plugin_task *pt;
struct request *req;
struct bio *hbio;
- int ret;
+ sense_reason_t ret;
/*
* Dynamically alloc cdb space, since it may be larger than
@@ -1116,8 +1036,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
*/
pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
if (!pt) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
cmd->priv = pt;
@@ -1131,24 +1050,21 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail;
}
} else {
BUG_ON(!cmd->data_length);
ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
- if (ret < 0) {
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (ret)
goto fail;
- }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n");
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail_free_bio;
}
}
@@ -1179,22 +1095,10 @@ fail_free_bio:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
kfree(pt);
- return -ENOMEM;
-}
-
-/* pscsi_get_device_rev():
- *
- *
- */
-static u32 pscsi_get_device_rev(struct se_device *dev)
-{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
- struct scsi_device *sd = pdv->pdv_sd;
-
- return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+ return ret;
}
/* pscsi_get_device_type():
@@ -1203,7 +1107,7 @@ static u32 pscsi_get_device_rev(struct se_device *dev)
*/
static u32 pscsi_get_device_type(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
return sd->type;
@@ -1211,7 +1115,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
static sector_t pscsi_get_blocks(struct se_device *dev)
{
- struct pscsi_dev_virt *pdv = dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
@@ -1243,7 +1147,6 @@ static void pscsi_req_done(struct request *req, int uptodate)
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
@@ -1259,15 +1162,13 @@ static struct se_subsystem_api pscsi_template = {
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
- .allocate_virtdevice = pscsi_allocate_virtdevice,
- .create_virtdevice = pscsi_create_virtdevice,
+ .alloc_device = pscsi_alloc_device,
+ .configure_device = pscsi_configure_device,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
.parse_cdb = pscsi_parse_cdb,
- .check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
- .get_device_rev = pscsi_get_device_rev,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
};
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index bc1e5e11eca..1bd757dff8e 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
#define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt {
+ struct se_device dev;
int pdv_flags;
int pdv_host_id;
int pdv_channel_id;
@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
int pdv_lun_id;
struct block_device *pdv_bd;
struct scsi_device *pdv_sd;
- struct se_hba *pdv_se_hba;
} ____cacheline_aligned;
typedef enum phv_modes {
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index d00bbe33ff8..0457de362e6 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -41,7 +38,10 @@
#include "target_core_rd.h"
-static struct se_subsystem_api rd_mcp_template;
+static inline struct rd_dev *RD_DEV(struct se_device *dev)
+{
+ return container_of(dev, struct rd_dev, dev);
+}
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
@@ -196,7 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
return 0;
}
-static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
@@ -209,39 +209,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
rd_dev->rd_host = rd_host;
- return rd_dev;
+ return &rd_dev->dev;
}
-static struct se_device *rd_create_virtdevice(struct se_hba *hba,
- struct se_subsystem_dev *se_dev, void *p)
+static int rd_configure_device(struct se_device *dev)
{
- struct se_device *dev;
- struct se_dev_limits dev_limits;
- struct rd_dev *rd_dev = p;
- struct rd_host *rd_host = hba->hba_ptr;
- int dev_flags = 0, ret;
- char prod[16], rev[4];
+ struct rd_dev *rd_dev = RD_DEV(dev);
+ struct rd_host *rd_host = dev->se_hba->hba_ptr;
+ int ret;
- memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+ if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+ pr_debug("Missing rd_pages= parameter\n");
+ return -EINVAL;
+ }
ret = rd_build_device_space(rd_dev);
if (ret < 0)
goto fail;
- snprintf(prod, 16, "RAMDISK-MCP");
- snprintf(rev, 4, "%s", RD_MCP_VERSION);
-
- dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
- dev_limits.limits.max_hw_sectors = UINT_MAX;
- dev_limits.limits.max_sectors = UINT_MAX;
- dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
-
- dev = transport_add_device_to_core_hba(hba,
- &rd_mcp_template, se_dev, dev_flags, rd_dev,
- &dev_limits, prod, rev);
- if (!dev)
- goto fail;
+ dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
+ dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
@@ -251,16 +239,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
- return dev;
+ return 0;
fail:
rd_release_device_space(rd_dev);
- return ERR_PTR(ret);
+ return ret;
}
-static void rd_free_device(void *p)
+static void rd_free_device(struct se_device *dev)
{
- struct rd_dev *rd_dev = p;
+ struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
kfree(rd_dev);
@@ -284,13 +272,14 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-static int rd_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+rd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev;
- struct rd_dev *dev = se_dev->dev_ptr;
+ struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
@@ -300,14 +289,14 @@ static int rd_execute_rw(struct se_cmd *cmd)
u32 src_len;
u64 tmp;
- tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page);
if (!table)
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
@@ -357,7 +346,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* since we increment, the first sg entry is correct */
@@ -378,13 +367,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
-static ssize_t rd_set_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- const char *page,
- ssize_t count)
+static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
@@ -417,24 +403,10 @@ static ssize_t rd_set_configfs_dev_params(
return (!ret) ? count : ret;
}
-static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
- if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
- pr_debug("Missing rd_pages= parameter\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t rd_show_configfs_dev_params(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- char *b)
-{
- struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
@@ -443,48 +415,40 @@ static ssize_t rd_show_configfs_dev_params(
return bl;
}
-static u32 rd_get_device_rev(struct se_device *dev)
-{
- return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-static u32 rd_get_device_type(struct se_device *dev)
-{
- return TYPE_DISK;
-}
-
static sector_t rd_get_blocks(struct se_device *dev)
{
- struct rd_dev *rd_dev = dev->dev_ptr;
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
- dev->se_sub_dev->se_dev_attrib.block_size) - 1;
+ dev->dev_attrib.block_size) - 1;
return blocks_long;
}
-static struct spc_ops rd_spc_ops = {
+static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw,
};
-static int rd_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+rd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &rd_spc_ops);
+ return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
+ .inquiry_prod = "RAMDISK-MCP",
+ .inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
- .allocate_virtdevice = rd_allocate_virtdevice,
- .create_virtdevice = rd_create_virtdevice,
+ .alloc_device = rd_alloc_device,
+ .configure_device = rd_configure_device,
.free_device = rd_free_device,
.parse_cdb = rd_parse_cdb,
- .check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_device_rev = rd_get_device_rev,
- .get_device_type = rd_get_device_type,
+ .get_device_type = sbc_get_device_type,
.get_blocks = rd_get_blocks,
};
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 21458125fe5..933b38b6e56 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev {
+ struct se_device dev;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a6e27d967c7..26a6d183ccb 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1,10 +1,7 @@
/*
* SCSI Block Commands (SBC) parsing and emulation.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -37,7 +34,8 @@
#include "target_core_ua.h"
-static int sbc_emulate_readcapacity(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
@@ -54,22 +52,24 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
- buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+ buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->dev_attrib.block_size & 0xff;
rbuf = transport_kmap_data_sg(cmd);
- if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
- transport_kunmap_data_sg(cmd);
- }
+ if (!rbuf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
-static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *rbuf;
@@ -85,28 +85,29 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
- buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+ buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
buf[14] = 0x80;
rbuf = transport_kmap_data_sg(cmd);
- if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
- transport_kunmap_data_sg(cmd);
- }
+ if (!rbuf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
-int spc_get_write_same_sectors(struct se_cmd *cmd)
+sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
{
u32 num_blocks;
@@ -129,13 +130,8 @@ int spc_get_write_same_sectors(struct se_cmd *cmd)
}
EXPORT_SYMBOL(spc_get_write_same_sectors);
-static int sbc_emulate_verify(struct se_cmd *cmd)
-{
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-static int sbc_emulate_noop(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_noop(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0;
@@ -143,7 +139,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd)
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
- return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
+ return cmd->se_dev->dev_attrib.block_size * sectors;
}
static int sbc_check_valid_sectors(struct se_cmd *cmd)
@@ -152,7 +148,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd)
unsigned long long end_lba;
u32 sectors;
- sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
@@ -236,26 +232,37 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
-static int sbc_write_same_supported(struct se_device *dev,
- unsigned char *flags)
+static sense_reason_t
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
{
+ unsigned int sectors = spc_get_write_same_sectors(cmd);
+
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
- return -ENOSYS;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+ if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
+ pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
+ sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+ return TCM_INVALID_CDB_FIELD;
}
-
/*
- * Currently for the emulated case we only accept
- * tpws with the UNMAP=1 bit set.
+ * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
+ * translated into block discard requests within backend code.
*/
- if (!(flags[0] & 0x08)) {
- pr_err("WRITE_SAME w/o UNMAP bit not"
- " supported for Block Discard Emulation\n");
- return -ENOSYS;
+ if (flags[0] & 0x08) {
+ if (!ops->execute_write_same_unmap)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ cmd->execute_cmd = ops->execute_write_same_unmap;
+ return 0;
}
+ if (!ops->execute_write_same)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ cmd->execute_cmd = ops->execute_write_same;
return 0;
}
@@ -313,14 +320,14 @@ out:
kfree(buf);
}
-int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
+sense_reason_t
+sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
{
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
unsigned int size;
u32 sectors = 0;
- int ret;
+ sense_reason_t ret;
switch (cdb[0]) {
case READ_6:
@@ -379,9 +386,9 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
cmd->execute_cmd = ops->execute_rw;
break;
case XDWRITEREAD_10:
- if ((cmd->data_direction != DMA_TO_DEVICE) ||
+ if (cmd->data_direction != DMA_TO_DEVICE ||
!(cmd->se_cmd_flags & SCF_BIDI))
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
@@ -419,27 +426,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
- if (!ops->execute_write_same)
- goto out_unsupported_cdb;
-
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- if (sbc_write_same_supported(dev, &cdb[10]) < 0)
- goto out_unsupported_cdb;
- cmd->execute_cmd = ops->execute_write_same;
+ ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+ if (ret)
+ return ret;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
}
break;
}
@@ -455,7 +459,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
@@ -463,7 +467,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
if (!ops->execute_sync_cache)
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
@@ -484,42 +488,36 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
cmd->execute_cmd = ops->execute_sync_cache;
break;
case UNMAP:
if (!ops->execute_unmap)
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = ops->execute_unmap;
break;
case WRITE_SAME_16:
- if (!ops->execute_write_same)
- goto out_unsupported_cdb;
-
sectors = transport_get_sectors_16(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- if (sbc_write_same_supported(dev, &cdb[1]) < 0)
- goto out_unsupported_cdb;
- cmd->execute_cmd = ops->execute_write_same;
+ ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ if (ret)
+ return ret;
break;
case WRITE_SAME:
- if (!ops->execute_write_same)
- goto out_unsupported_cdb;
-
sectors = transport_get_sectors_10(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, 1);
@@ -529,13 +527,13 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
- if (sbc_write_same_supported(dev, &cdb[1]) < 0)
- goto out_unsupported_cdb;
- cmd->execute_cmd = ops->execute_write_same;
+ ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ if (ret)
+ return ret;
break;
case VERIFY:
size = 0;
- cmd->execute_cmd = sbc_emulate_verify;
+ cmd->execute_cmd = sbc_emulate_noop;
break;
case REZERO_UNIT:
case SEEK_6:
@@ -557,24 +555,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
/* reject any command that we don't have a handler for */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
- goto out_unsupported_cdb;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
- if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
+ if (sectors > dev->dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.fabric_max_sectors);
- goto out_invalid_cdb_field;
+ dev->dev_attrib.fabric_max_sectors);
+ return TCM_INVALID_CDB_FIELD;
}
- if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
+ if (sectors > dev->dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.hw_max_sectors);
- goto out_invalid_cdb_field;
+ dev->dev_attrib.hw_max_sectors);
+ return TCM_INVALID_CDB_FIELD;
}
end_lba = dev->transport->get_blocks(dev) + 1;
@@ -582,25 +580,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
size = sbc_get_size(cmd, sectors);
}
- ret = target_cmd_size_check(cmd, size);
- if (ret < 0)
- return ret;
-
- return 0;
-
-out_unsupported_cdb:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
-out_invalid_cdb_field:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return target_cmd_size_check(cmd, size);
}
EXPORT_SYMBOL(sbc_parse_cdb);
+
+u32 sbc_get_device_type(struct se_device *dev)
+{
+ return TYPE_DISK;
+}
+EXPORT_SYMBOL(sbc_get_device_type);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 6fd434d3d7e..84f9e96e8ac 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1,10 +1,7 @@
/*
* SCSI Primary Commands (SPC) parsing and emulation.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -69,7 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
-static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+static sense_reason_t
+spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
@@ -78,7 +76,7 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
buf[1] = 0x80;
- buf[2] = dev->transport->get_device_rev(dev);
+ buf[2] = 0x05; /* SPC-3 */
/*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
@@ -95,34 +93,32 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
- if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
- spc_fill_alua_data(lun->lun_sep, buf);
+ spc_fill_alua_data(lun->lun_sep, buf);
buf[7] = 0x2; /* CmdQue=1 */
snprintf(&buf[8], 8, "LIO-ORG");
- snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
- snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
+ snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
+ snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
buf[4] = 31; /* Set additional length to 31 */
return 0;
}
/* unit serial number */
-static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u16 len = 0;
- if (dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
+ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
- unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
+ unit_serial_len = strlen(dev->t10_wwn.unit_serial);
unit_serial_len++; /* For NULL Terminator */
- len += sprintf(&buf[4], "%s",
- dev->se_sub_dev->t10_wwn.unit_serial);
+ len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -132,7 +128,7 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
unsigned char *buf)
{
- unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+ unsigned char *p = &dev->t10_wwn.unit_serial[0];
int cnt;
bool next = true;
@@ -164,7 +160,8 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
-static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
@@ -173,7 +170,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
+ unsigned char *prod = &dev->t10_wwn.model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
u16 len = 0, id_len;
@@ -188,7 +185,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
* value in order to return the NAA id.
*/
- if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+ if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
goto check_t10_vend_desc;
/* CODE SET == Binary */
@@ -236,14 +233,12 @@ check_t10_vend_desc:
prod_len += strlen(prod);
prod_len++; /* For : */
- if (dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- unit_serial_len =
- strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+ unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
id_len += sprintf(&buf[off+12], "%s:%s", prod,
- &dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ &dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
@@ -298,10 +293,6 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
- if (dev->se_sub_dev->t10_alua.alua_type !=
- SPC3_ALUA_EMULATED)
- goto check_scsi_name;
-
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
goto check_lu_gp;
@@ -415,20 +406,22 @@ check_scsi_name:
}
/* Extended INQUIRY Data VPD Page */
-static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+ if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
buf[6] = 0x01;
return 0;
}
/* Block Limits VPD page */
-static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
@@ -439,7 +432,7 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
@@ -456,62 +449,70 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ max_sectors = min(dev->dev_attrib.fabric_max_sectors,
+ dev->dev_attrib.hw_max_sectors);
put_unaligned_be32(max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
+ put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP.
*/
if (!have_tp)
- return 0;
+ goto max_write_same;
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
+ put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
+ put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
+ put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
+ put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
&buf[32]);
- if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
+ if (dev->dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
+ /*
+ * MAXIMUM WRITE SAME LENGTH
+ */
+max_write_same:
+ put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+
return 0;
}
/* Block Device Characteristics VPD page */
-static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
buf[0] = dev->transport->get_device_type(dev);
buf[3] = 0x3c;
- buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0;
+ buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
return 0;
}
/* Thin Provisioning VPD */
-static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@@ -546,7 +547,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
+ if (dev->dev_attrib.emulate_tpu != 0)
buf[5] = 0x80;
/*
@@ -555,17 +556,18 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
+ if (dev->dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40;
return 0;
}
-static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct {
uint8_t page;
- int (*emulate)(struct se_cmd *, unsigned char *);
+ sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = {
{ .page = 0x00, .emulate = spc_emulate_evpd_00 },
{ .page = 0x80, .emulate = spc_emulate_evpd_80 },
@@ -577,7 +579,8 @@ static struct {
};
/* supported vital product data pages */
-static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{
int p;
@@ -586,8 +589,7 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
* Registered Extended LUN WWN has been set via ConfigFS
* during device creation/restart.
*/
- if (cmd->se_dev->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
+ if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
buf[3] = ARRAY_SIZE(evpd_handlers);
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
buf[p + 4] = evpd_handlers[p].page;
@@ -596,14 +598,16 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static int spc_emulate_inquiry(struct se_cmd *cmd)
+static sense_reason_t
+spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char buf[SE_INQUIRY_BUF];
- int p, ret;
+ sense_reason_t ret;
+ int p;
memset(buf, 0, SE_INQUIRY_BUF);
@@ -616,8 +620,7 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
if (cdb[2]) {
pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
cdb[2]);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
+ ret = TCM_INVALID_CDB_FIELD;
goto out;
}
@@ -634,33 +637,43 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- ret = -EINVAL;
+ ret = TCM_INVALID_CDB_FIELD;
out:
rbuf = transport_kmap_data_sg(cmd);
- if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
- transport_kunmap_data_sg(cmd);
- }
+ if (!rbuf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
-static int spc_modesense_rwrecovery(unsigned char *p)
+static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
{
p[0] = 0x01;
p[1] = 0x0a;
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+out:
return 12;
}
-static int spc_modesense_control(struct se_device *dev, unsigned char *p)
+static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
{
p[0] = 0x0a;
p[1] = 0x0a;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
p[2] = 2;
/*
* From spc4r23, 7.4.7 Control mode page
@@ -690,7 +703,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* command sequence order shall be explicitly handled by the application client
* through the selection of appropriate ommands and task attributes.
*/
- p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+ p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -720,8 +733,8 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -734,25 +747,56 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
+ p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
+out:
return 12;
}
-static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
+static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
{
p[0] = 0x08;
p[1] = 0x12;
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+ if (dev->dev_attrib.emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
+out:
return 20;
}
+static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+{
+ p[0] = 0x1c;
+ p[1] = 0x0a;
+
+ /* No changeable values for now */
+ if (pc == 1)
+ goto out;
+
+out:
+ return 12;
+}
+
+static struct {
+ uint8_t page;
+ uint8_t subpage;
+ int (*emulate)(struct se_device *, u8, unsigned char *);
+} modesense_handlers[] = {
+ { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
+ { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
+ { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
+ { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
+};
+
static void spc_modesense_write_protect(unsigned char *buf, int type)
{
/*
@@ -779,82 +823,224 @@ static void spc_modesense_dpofua(unsigned char *buf, int type)
}
}
-static int spc_emulate_modesense(struct se_cmd *cmd)
+static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+ *buf++ = 8;
+ put_unaligned_be32(min(blocks, 0xffffffffull), buf);
+ buf += 4;
+ put_unaligned_be32(block_size, buf);
+ return 9;
+}
+
+static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+ if (blocks <= 0xffffffff)
+ return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
+
+ *buf++ = 1; /* LONGLBA */
+ buf += 2;
+ *buf++ = 16;
+ put_unaligned_be64(blocks, buf);
+ buf += 12;
+ put_unaligned_be32(block_size, buf);
+
+ return 17;
+}
+
+static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
- unsigned char *rbuf;
+ unsigned char *buf, *map_buf;
int type = dev->transport->get_device_type(dev);
int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
- u32 offset = ten ? 8 : 4;
+ bool dbd = !!(cdb[1] & 0x08);
+ bool llba = ten ? !!(cdb[1] & 0x10) : false;
+ u8 pc = cdb[2] >> 6;
+ u8 page = cdb[2] & 0x3f;
+ u8 subpage = cdb[3];
int length = 0;
- unsigned char buf[SE_MODE_PAGE_BUF];
-
- memset(buf, 0, SE_MODE_PAGE_BUF);
+ int ret;
+ int i;
- switch (cdb[2] & 0x3f) {
- case 0x01:
- length = spc_modesense_rwrecovery(&buf[offset]);
- break;
- case 0x08:
- length = spc_modesense_caching(dev, &buf[offset]);
- break;
- case 0x0a:
- length = spc_modesense_control(dev, &buf[offset]);
- break;
- case 0x3f:
- length = spc_modesense_rwrecovery(&buf[offset]);
- length += spc_modesense_caching(dev, &buf[offset+length]);
- length += spc_modesense_control(dev, &buf[offset+length]);
- break;
- default:
- pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
- cdb[2] & 0x3f, cdb[3]);
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- return -EINVAL;
+ map_buf = transport_kmap_data_sg(cmd);
+ if (!map_buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ /*
+ * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
+ * know we actually allocated a full page. Otherwise, if the
+ * data buffer is too small, allocate a temporary buffer so we
+ * don't have to worry about overruns in all our INQUIRY
+ * emulation handling.
+ */
+ if (cmd->data_length < SE_MODE_PAGE_BUF &&
+ (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+ buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL);
+ if (!buf) {
+ transport_kunmap_data_sg(cmd);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ } else {
+ buf = map_buf;
}
- offset += length;
-
- if (ten) {
- offset -= 2;
- buf[0] = (offset >> 8) & 0xff;
- buf[1] = offset & 0xff;
- offset += 2;
-
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- spc_modesense_write_protect(&buf[3], type);
-
- if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- spc_modesense_dpofua(&buf[3], type);
+ /*
+ * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
+ * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
+ */
+ length = ten ? 3 : 2;
+
+ /* DEVICE-SPECIFIC PARAMETER */
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+ (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ spc_modesense_write_protect(&buf[length], type);
+
+ if ((dev->dev_attrib.emulate_write_cache > 0) &&
+ (dev->dev_attrib.emulate_fua_write > 0))
+ spc_modesense_dpofua(&buf[length], type);
+
+ ++length;
+
+ /* BLOCK DESCRIPTOR */
+
+ /*
+ * For now we only include a block descriptor for disk (SBC)
+ * devices; other command sets use a slightly different format.
+ */
+ if (!dbd && type == TYPE_DISK) {
+ u64 blocks = dev->transport->get_blocks(dev);
+ u32 block_size = dev->dev_attrib.block_size;
+
+ if (ten) {
+ if (llba) {
+ length += spc_modesense_long_blockdesc(&buf[length],
+ blocks, block_size);
+ } else {
+ length += 3;
+ length += spc_modesense_blockdesc(&buf[length],
+ blocks, block_size);
+ }
+ } else {
+ length += spc_modesense_blockdesc(&buf[length], blocks,
+ block_size);
+ }
} else {
- offset -= 1;
- buf[0] = offset & 0xff;
- offset += 1;
-
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- spc_modesense_write_protect(&buf[2], type);
-
- if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- spc_modesense_dpofua(&buf[2], type);
+ if (ten)
+ length += 4;
+ else
+ length += 1;
}
- rbuf = transport_kmap_data_sg(cmd);
- if (rbuf) {
- memcpy(rbuf, buf, min(offset, cmd->data_length));
- transport_kunmap_data_sg(cmd);
+ if (page == 0x3f) {
+ if (subpage != 0x00 && subpage != 0xff) {
+ pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
+ kfree(buf);
+ transport_kunmap_data_sg(cmd);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
+ /*
+ * Tricky way to say all subpage 00h for
+ * subpage==0, all subpages for subpage==0xff
+ * (and we just checked above that those are
+ * the only two possibilities).
+ */
+ if ((modesense_handlers[i].subpage & ~subpage) == 0) {
+ ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ if (!ten && length + ret >= 255)
+ break;
+ length += ret;
+ }
+ }
+
+ goto set_length;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+ if (modesense_handlers[i].page == page &&
+ modesense_handlers[i].subpage == subpage) {
+ length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ goto set_length;
+ }
+
+ /*
+ * We don't intend to implement:
+ * - obsolete page 03h "format parameters" (checked by Solaris)
+ */
+ if (page != 0x03)
+ pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
+ page, subpage);
+
+ transport_kunmap_data_sg(cmd);
+ return TCM_UNKNOWN_MODE_PAGE;
+
+set_length:
+ if (ten)
+ put_unaligned_be16(length - 2, buf);
+ else
+ buf[0] = length - 1;
+
+ if (buf != map_buf) {
+ memcpy(map_buf, buf, cmd->data_length);
+ kfree(buf);
}
+ transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
-static int spc_emulate_request_sense(struct se_cmd *cmd)
+static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ char *cdb = cmd->t_task_cdb;
+ bool ten = cdb[0] == MODE_SELECT_10;
+ int off = ten ? 8 : 4;
+ bool pf = !!(cdb[1] & 0x10);
+ u8 page, subpage;
+ unsigned char *buf;
+ unsigned char tbuf[SE_MODE_PAGE_BUF];
+ int length;
+ int ret = 0;
+ int i;
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ if (!pf) {
+ ret = TCM_INVALID_CDB_FIELD;
+ goto out;
+ }
+
+ page = buf[off] & 0x3f;
+ subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
+
+ for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+ if (modesense_handlers[i].page == page &&
+ modesense_handlers[i].subpage == subpage) {
+ memset(tbuf, 0, SE_MODE_PAGE_BUF);
+ length = modesense_handlers[i].emulate(dev, 0, tbuf);
+ goto check_contents;
+ }
+
+ ret = TCM_UNKNOWN_MODE_PAGE;
+ goto out;
+
+check_contents:
+ if (memcmp(buf + off, tbuf, length))
+ ret = TCM_INVALID_PARAMETER_LIST;
+
+out:
+ transport_kunmap_data_sg(cmd);
+
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+ return ret;
+}
+
+static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *rbuf;
@@ -866,19 +1052,14 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -ENOSYS;
+ return TCM_INVALID_CDB_FIELD;
}
rbuf = transport_kmap_data_sg(cmd);
- if (cmd->scsi_sense_reason != 0) {
- /*
- * Out of memory. We will fail with CHECK CONDITION, so
- * we must not clear the unit attention condition.
- */
- target_complete_cmd(cmd, CHECK_CONDITION);
- return 0;
- } else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
+ if (!rbuf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
@@ -905,33 +1086,97 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A;
}
- if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
- transport_kunmap_data_sg(cmd);
+ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
+{
+ struct se_dev_entry *deve;
+ struct se_session *sess = cmd->se_sess;
+ unsigned char *buf;
+ u32 lun_count = 0, offset = 8, i;
+
+ if (cmd->data_length < 16) {
+ pr_warn("REPORT LUNS allocation length %u too small\n",
+ cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ /*
+ * If no struct se_session pointer is present, this struct se_cmd is
+ * coming via a target_core_mod PASSTHROUGH op, and not through
+ * a $FABRIC_MOD. In that case, report LUN=0 only.
+ */
+ if (!sess) {
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
+ lun_count = 1;
+ goto done;
+ }
+
+ spin_lock_irq(&sess->se_node_acl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ deve = sess->se_node_acl->device_list[i];
+ if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+ continue;
+ /*
+ * We determine the correct LUN LIST LENGTH even once we
+ * have reached the initial allocation length.
+ * See SPC2-R20 7.19.
+ */
+ lun_count++;
+ if ((offset + 8) > cmd->data_length)
+ continue;
+
+ int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ offset += 8;
}
+ spin_unlock_irq(&sess->se_node_acl->device_list_lock);
+
+ /*
+ * See SPC3 r07, page 159.
+ */
+done:
+ lun_count *= 8;
+ buf[0] = ((lun_count >> 24) & 0xff);
+ buf[1] = ((lun_count >> 16) & 0xff);
+ buf[2] = ((lun_count >> 8) & 0xff);
+ buf[3] = (lun_count & 0xff);
+ transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
+EXPORT_SYMBOL(spc_emulate_report_luns);
-static int spc_emulate_testunitready(struct se_cmd *cmd)
+static sense_reason_t
+spc_emulate_testunitready(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0;
}
-int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
+sense_reason_t
+spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) {
case MODE_SELECT:
*size = cdb[4];
+ cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SELECT_10:
*size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SENSE:
*size = cdb[4];
@@ -946,14 +1191,12 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[7] << 8) + cdb[8];
break;
case PERSISTENT_RESERVE_IN:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_in;
*size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = target_scsi3_emulate_pr_in;
break;
case PERSISTENT_RESERVE_OUT:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_out;
*size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
case RELEASE:
case RELEASE_10:
@@ -962,8 +1205,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
else
*size = cmd->data_length;
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_release;
+ cmd->execute_cmd = target_scsi2_reservation_release;
break;
case RESERVE:
case RESERVE_10:
@@ -976,15 +1218,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
else
*size = cmd->data_length;
- /*
- * Setup the legacy emulated handler for SPC-2 and
- * >= SPC-3 compatible reservation handling (CRH=1)
- * Otherwise, we assume the underlying SCSI logic is
- * is running in SPC_PASSTHROUGH, and wants reservations
- * emulation disabled.
- */
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_reserve;
+ cmd->execute_cmd = target_scsi2_reservation_reserve;
break;
case REQUEST_SENSE:
*size = cdb[4];
@@ -997,8 +1231,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry;
break;
case SECURITY_PROTOCOL_IN:
@@ -1020,14 +1253,13 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
break;
case REPORT_LUNS:
- cmd->execute_cmd = target_report_luns;
+ cmd->execute_cmd = spc_emulate_report_luns;
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = MSG_HEAD_TAG;
break;
case TEST_UNIT_READY:
cmd->execute_cmd = spc_emulate_testunitready;
@@ -1039,8 +1271,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* MAINTENANCE_IN from SCC-2
* Check for emulated MI_REPORT_TARGET_PGS
*/
- if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
cmd->execute_cmd =
target_emulate_report_target_port_groups;
}
@@ -1058,8 +1289,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* MAINTENANCE_OUT from SCC-2
* Check for emulated MO_SET_TARGET_PGS.
*/
- if (cdb[1] == MO_SET_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ if (cdb[1] == MO_SET_TARGET_PGS) {
cmd->execute_cmd =
target_emulate_set_target_port_groups;
}
@@ -1075,9 +1305,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
}
return 0;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index cb6b0036ae9..d154ce79718 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -1,13 +1,10 @@
/*******************************************************************************
* Filename: target_core_stat.c
*
- * Copyright (c) 2011 Rising Tide Systems
- * Copyright (c) 2011 Linux-iSCSI.org
- *
* Modern ConfigFS group context specific statistics based on original
* target_core_mib.c code
*
- * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
+ * (c) Copyright 2006-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
@@ -80,13 +77,9 @@ static struct target_stat_scsi_dev_attribute \
static ssize_t target_stat_scsi_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -95,12 +88,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -109,13 +98,6 @@ DEV_STAT_SCSI_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_dev_show_attr_role(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "Target\n");
}
DEV_STAT_SCSI_DEV_ATTR_RO(role);
@@ -123,12 +105,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(role);
static ssize_t target_stat_scsi_dev_show_attr_ports(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
}
@@ -176,13 +154,9 @@ static struct target_stat_scsi_tgt_dev_attribute \
static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -191,12 +165,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -205,13 +175,6 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
@@ -219,60 +182,27 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
- char status[16];
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
- if (!dev)
- return -ENODEV;
-
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
- strcpy(status, "activated");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- strcpy(status, "deactivated");
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- strcpy(status, "shutdown");
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- strcpy(status, "offline");
- break;
- default:
- sprintf(status, "unknown(%d)", dev->dev_status);
- break;
- }
-
- return snprintf(page, PAGE_SIZE, "%s\n", status);
+ if (dev->export_count)
+ return snprintf(page, PAGE_SIZE, "activated");
+ else
+ return snprintf(page, PAGE_SIZE, "deactivated");
}
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int non_accessible_lus;
- if (!dev)
- return -ENODEV;
-
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
+ if (dev->export_count)
non_accessible_lus = 0;
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- case TRANSPORT_DEVICE_SHUTDOWN:
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- default:
+ else
non_accessible_lus = 1;
- break;
- }
return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
}
@@ -281,12 +211,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
}
@@ -335,13 +261,9 @@ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
static ssize_t target_stat_scsi_lu_show_attr_inst(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_hba *hba = se_subdev->se_dev_hba;
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_hba *hba = dev->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
@@ -350,12 +272,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(inst);
static ssize_t target_stat_scsi_lu_show_attr_dev(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
}
@@ -364,13 +282,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev);
static ssize_t target_stat_scsi_lu_show_attr_indx(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
}
DEV_STAT_SCSI_LU_ATTR_RO(indx);
@@ -378,12 +289,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(indx);
static ssize_t target_stat_scsi_lu_show_attr_lun(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
/* FIXME: scsiLuDefaultLun */
return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
}
@@ -392,35 +297,28 @@ DEV_STAT_SCSI_LU_ATTR_RO(lun);
static ssize_t target_stat_scsi_lu_show_attr_lu_name(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
- if (!dev)
- return -ENODEV;
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
- (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
- dev->se_sub_dev->t10_wwn.unit_serial : "None");
+ (strlen(dev->t10_wwn.unit_serial)) ?
+ dev->t10_wwn.unit_serial : "None");
}
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.vendor)+1];
/* scsiLuVendorId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
- dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
+ dev->t10_wwn.vendor[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -429,19 +327,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(vend);
static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.model)+1];
/* scsiLuProductId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
- dev->se_sub_dev->t10_wwn.model[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
+ dev->t10_wwn.model[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -450,19 +344,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(prod);
static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
int i;
- char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
-
- if (!dev)
- return -ENODEV;
+ char str[sizeof(dev->t10_wwn.revision)+1];
/* scsiLuRevisionId */
- for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
- str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
- dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+ for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
+ str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
+ dev->t10_wwn.revision[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
@@ -471,12 +361,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(rev);
static ssize_t target_stat_scsi_lu_show_attr_dev_type(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
@@ -487,30 +373,18 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
static ssize_t target_stat_scsi_lu_show_attr_status(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuStatus */
return snprintf(page, PAGE_SIZE, "%s\n",
- (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
- "available" : "notavailable");
+ (dev->export_count) ? "available" : "notavailable");
}
DEV_STAT_SCSI_LU_ATTR_RO(status);
static ssize_t target_stat_scsi_lu_show_attr_state_bit(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* scsiLuState */
return snprintf(page, PAGE_SIZE, "exposed\n");
}
@@ -519,12 +393,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuNumCommands */
return snprintf(page, PAGE_SIZE, "%llu\n",
@@ -535,12 +405,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuReadMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
@@ -550,12 +416,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuWrittenMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
@@ -565,12 +427,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
static ssize_t target_stat_scsi_lu_show_attr_resets(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuInResets */
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
@@ -580,13 +438,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(resets);
static ssize_t target_stat_scsi_lu_show_attr_full_stat(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* FIXME: scsiLuOutTaskSetFullStatus */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
@@ -595,13 +446,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
-
/* FIXME: scsiLuHSInCommands */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
@@ -610,12 +454,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
static ssize_t target_stat_scsi_lu_show_attr_creation_time(
struct se_dev_stat_grps *sgrps, char *page)
{
- struct se_subsystem_dev *se_subdev = container_of(sgrps,
- struct se_subsystem_dev, dev_stat_grps);
- struct se_device *dev = se_subdev->se_dev_ptr;
-
- if (!dev)
- return -ENODEV;
+ struct se_device *dev =
+ container_of(sgrps, struct se_device, dev_stat_grps);
/* scsiLuCreationTime */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
@@ -662,20 +502,20 @@ static struct config_item_type target_stat_scsi_lu_cit = {
* Called from target_core_configfs.c:target_core_make_subdev() to setup
* the target statistics groups + configfs CITs located in target_core_stat.c
*/
-void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
+void target_stat_setup_dev_default_groups(struct se_device *dev)
{
- struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
+ struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
- config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
+ config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
- dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
- dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
+ dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
+ dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
+ dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL;
}
@@ -1161,7 +1001,7 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV;
}
tpg = sep->sep_tpg;
- wwn = &dev->se_sub_dev->t10_wwn;
+ wwn = &dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index be75c4331a9..c6e0293ffdb 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -3,8 +3,7 @@
*
* This file contains SPC-3 task management infrastructure
*
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -371,7 +370,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
+ tas = dev->dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
@@ -399,10 +398,10 @@ int core_tmr_lun_reset(
* LOGICAL UNIT RESET
*/
if (!preempt_and_abort_list &&
- (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+ (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
- dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index a531fe282b1..5192ac0337f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -3,10 +3,7 @@
*
* This file contains generic Target Portal Group related functions.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -619,6 +616,29 @@ int core_tpg_set_initiator_node_queue_depth(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+/* core_tpg_set_initiator_node_tag():
+ *
+ * Initiator nodeacl tags are not used internally, but may be used by
+ * userspace to emulate aliases or groups.
+ * Returns length of newly-set tag or -EINVAL.
+ */
+int core_tpg_set_initiator_node_tag(
+ struct se_portal_group *tpg,
+ struct se_node_acl *acl,
+ const char *new_tag)
+{
+ if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
+ return -EINVAL;
+
+ if (!strncmp("NULL", new_tag, 4)) {
+ acl->acl_tag[0] = '\0';
+ return 0;
+ }
+
+ return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
@@ -672,6 +692,7 @@ int core_tpg_register(
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
+ lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index dcecbfb1724..c23c76ccef6 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3,10 +3,7 @@
*
* This file contains the Generic Target Engine Core.
*
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -70,7 +67,6 @@ static void transport_handle_queue_full(struct se_cmd *cmd,
static int transport_generic_get_mem(struct se_cmd *cmd);
static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
static void transport_put_cmd(struct se_cmd *cmd);
-static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
@@ -297,7 +293,7 @@ void transport_register_session(
}
EXPORT_SYMBOL(transport_register_session);
-void target_release_session(struct kref *kref)
+static void target_release_session(struct kref *kref)
{
struct se_session *se_sess = container_of(kref,
struct se_session, sess_kref);
@@ -558,7 +554,8 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd);
+ transport_generic_request_failure(cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
}
/*
@@ -626,7 +623,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
complete(&cmd->t_transport_stop_comp);
return;
} else if (cmd->transport_state & CMD_T_FAILED) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -659,7 +655,7 @@ static void target_add_to_state_list(struct se_cmd *cmd)
static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd);
-static void target_qf_do_work(struct work_struct *work)
+void target_qf_do_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
qf_work_queue);
@@ -712,29 +708,15 @@ void transport_dump_dev_state(
int *bl)
{
*bl += sprintf(b + *bl, "Status: ");
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
+ if (dev->export_count)
*bl += sprintf(b + *bl, "ACTIVATED");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
+ else
*bl += sprintf(b + *bl, "DEACTIVATED");
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- *bl += sprintf(b + *bl, "SHUTDOWN");
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- *bl += sprintf(b + *bl, "OFFLINE");
- break;
- default:
- *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
- break;
- }
*bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
- dev->se_sub_dev->se_dev_attrib.block_size,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ dev->dev_attrib.block_size,
+ dev->dev_attrib.hw_max_sectors);
*bl += sprintf(b + *bl, " ");
}
@@ -991,186 +973,8 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
}
EXPORT_SYMBOL(transport_set_vpd_ident);
-static void core_setup_task_attr_emulation(struct se_device *dev)
-{
- /*
- * If this device is from Target_Core_Mod/pSCSI, disable the
- * SAM Task Attribute emulation.
- *
- * This is currently not available in upsream Linux/SCSI Target
- * mode code, and is assumed to be disabled while using TCM/pSCSI.
- */
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
- return;
- }
-
- dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
- pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
- " device\n", dev->transport->name,
- dev->transport->get_device_rev(dev));
-}
-
-static void scsi_dump_inquiry(struct se_device *dev)
-{
- struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
- char buf[17];
- int i, device_type;
- /*
- * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
- */
- for (i = 0; i < 8; i++)
- if (wwn->vendor[i] >= 0x20)
- buf[i] = wwn->vendor[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Vendor: %s\n", buf);
-
- for (i = 0; i < 16; i++)
- if (wwn->model[i] >= 0x20)
- buf[i] = wwn->model[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Model: %s\n", buf);
-
- for (i = 0; i < 4; i++)
- if (wwn->revision[i] >= 0x20)
- buf[i] = wwn->revision[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Revision: %s\n", buf);
-
- device_type = dev->transport->get_device_type(dev);
- pr_debug(" Type: %s ", scsi_device_type(device_type));
- pr_debug(" ANSI SCSI revision: %02x\n",
- dev->transport->get_device_rev(dev));
-}
-
-struct se_device *transport_add_device_to_core_hba(
- struct se_hba *hba,
- struct se_subsystem_api *transport,
- struct se_subsystem_dev *se_dev,
- u32 device_flags,
- void *transport_dev,
- struct se_dev_limits *dev_limits,
- const char *inquiry_prod,
- const char *inquiry_rev)
-{
- int force_pt;
- struct se_device *dev;
-
- dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
- if (!dev) {
- pr_err("Unable to allocate memory for se_dev_t\n");
- return NULL;
- }
-
- dev->dev_flags = device_flags;
- dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_ptr = transport_dev;
- dev->se_hba = hba;
- dev->se_sub_dev = se_dev;
- dev->transport = transport;
- INIT_LIST_HEAD(&dev->dev_list);
- INIT_LIST_HEAD(&dev->dev_sep_list);
- INIT_LIST_HEAD(&dev->dev_tmr_list);
- INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->state_list);
- INIT_LIST_HEAD(&dev->qf_cmd_list);
- spin_lock_init(&dev->execute_task_lock);
- spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->dev_reservation_lock);
- spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->se_port_lock);
- spin_lock_init(&dev->se_tmr_lock);
- spin_lock_init(&dev->qf_cmd_lock);
- atomic_set(&dev->dev_ordered_id, 0);
-
- se_dev_set_default_attribs(dev, dev_limits);
-
- dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
- dev->creation_time = get_jiffies_64();
- spin_lock_init(&dev->stats_lock);
-
- spin_lock(&hba->device_lock);
- list_add_tail(&dev->dev_list, &hba->hba_dev_list);
- hba->dev_count++;
- spin_unlock(&hba->device_lock);
- /*
- * Setup the SAM Task Attribute emulation for struct se_device
- */
- core_setup_task_attr_emulation(dev);
- /*
- * Force PR and ALUA passthrough emulation with internal object use.
- */
- force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
- /*
- * Setup the Reservations infrastructure for struct se_device
- */
- core_setup_reservations(dev, force_pt);
- /*
- * Setup the Asymmetric Logical Unit Assignment for struct se_device
- */
- if (core_setup_alua(dev, force_pt) < 0)
- goto err_dev_list;
-
- /*
- * Startup the struct se_device processing thread
- */
- dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
- dev->transport->name);
- if (!dev->tmr_wq) {
- pr_err("Unable to create tmr workqueue for %s\n",
- dev->transport->name);
- goto err_dev_list;
- }
- /*
- * Setup work_queue for QUEUE_FULL
- */
- INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
- /*
- * Preload the initial INQUIRY const values if we are doing
- * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
- * passthrough because this is being provided by the backend LLD.
- * This is required so that transport_get_inquiry() copies these
- * originals once back into DEV_T10_WWN(dev) for the virtual device
- * setup.
- */
- if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (!inquiry_prod || !inquiry_rev) {
- pr_err("All non TCM/pSCSI plugins require"
- " INQUIRY consts\n");
- goto err_wq;
- }
-
- strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
- strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
- strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
- }
- scsi_dump_inquiry(dev);
-
- return dev;
-
-err_wq:
- destroy_workqueue(dev->tmr_wq);
-err_dev_list:
- spin_lock(&hba->device_lock);
- list_del(&dev->dev_list);
- hba->dev_count--;
- spin_unlock(&hba->device_lock);
-
- se_release_vpd_for_dev(dev);
-
- kfree(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(transport_add_device_to_core_hba);
-
-int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+sense_reason_t
+target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
struct se_device *dev = cmd->se_dev;
@@ -1185,18 +989,18 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
if (cmd->data_direction == DMA_TO_DEVICE) {
pr_err("Rejecting underflow/overflow"
" WRITE data\n");
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_CDB.
*/
- if (dev->se_sub_dev->se_dev_attrib.block_size != 512) {
+ if (dev->dev_attrib.block_size != 512) {
pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem"
" plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
- goto out_invalid_cdb_field;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* For the overflow case keep the existing fabric provided
@@ -1216,10 +1020,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
return 0;
-out_invalid_cdb_field:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
}
/*
@@ -1259,45 +1059,41 @@ void transport_init_se_cmd(
}
EXPORT_SYMBOL(transport_init_se_cmd);
-static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+static sense_reason_t
+transport_check_alloc_task_attr(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+
/*
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
if (cmd->sam_task_attr == MSG_ACA_TAG) {
pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
+ cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
smp_mb__after_atomic_inc();
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
- cmd->se_dev->transport->name);
+ dev->transport->name);
return 0;
}
-/* target_setup_cmd_from_cdb():
- *
- * Called from fabric RX Thread.
- */
-int target_setup_cmd_from_cdb(
- struct se_cmd *cmd,
- unsigned char *cdb)
+sense_reason_t
+target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
{
- struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
- u32 pr_reg_type = 0;
- u8 alua_ascq = 0;
+ struct se_device *dev = cmd->se_dev;
unsigned long flags;
- int ret;
+ sense_reason_t ret;
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
@@ -1307,9 +1103,7 @@ int target_setup_cmd_from_cdb(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
+ return TCM_INVALID_CDB_FIELD;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
@@ -1324,10 +1118,7 @@ int target_setup_cmd_from_cdb(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -ENOMEM;
+ return TCM_OUT_OF_RESOURCES;
}
} else
cmd->t_task_cdb = &cmd->__t_task_cdb[0];
@@ -1339,70 +1130,30 @@ int target_setup_cmd_from_cdb(
/*
* Check for an existing UNIT ATTENTION condition
*/
- if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -EINVAL;
- }
+ ret = target_scsi3_ua_check(cmd);
+ if (ret)
+ return ret;
- ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
- if (ret != 0) {
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- if (ret > 0) {
- pr_debug("[%s]: ALUA TG Port not available, "
- "SenseKey: NOT_READY, ASC/ASCQ: "
- "0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
-
- transport_set_sense_codes(cmd, 0x04, alua_ascq);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -EINVAL;
- }
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
+ ret = target_alua_state_check(cmd);
+ if (ret)
+ return ret;
- /*
- * Check status for SPC-3 Persistent Reservations
- */
- if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
- if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EBUSY;
- }
- /*
- * This means the CDB is allowed for the SCSI Initiator port
- * when said port is *NOT* holding the legacy SPC-2 or
- * SPC-3 Persistent Reservation.
- */
- }
+ ret = target_check_reservation(cmd);
+ if (ret)
+ return ret;
- ret = cmd->se_dev->transport->parse_cdb(cmd);
- if (ret < 0)
+ ret = dev->transport->parse_cdb(cmd);
+ if (ret)
+ return ret;
+
+ ret = transport_check_alloc_task_attr(cmd);
+ if (ret)
return ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- /*
- * Check for SAM Task Attribute Emulation
- */
- if (transport_check_alloc_task_attr(cmd) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
@@ -1418,7 +1169,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
int transport_handle_cdb_direct(
struct se_cmd *cmd)
{
- int ret;
+ sense_reason_t ret;
if (!cmd->se_lun) {
dump_stack();
@@ -1448,13 +1199,41 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0)
- transport_generic_request_failure(cmd);
-
+ if (ret)
+ transport_generic_request_failure(cmd, ret);
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
+static sense_reason_t
+transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+ u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+{
+ if (!sgl || !sgl_count)
+ return 0;
+
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
+
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
+ }
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ return 0;
+}
+
/*
* target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
* se_cmd + use pre-allocated SGL memory.
@@ -1487,7 +1266,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
struct se_portal_group *se_tpg;
- int rc;
+ sense_reason_t rc;
+ int ret;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
@@ -1508,9 +1288,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
- if (rc)
- return rc;
+ ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (ret)
+ return ret;
/*
* Signal bidirectional data payloads to target-core
*/
@@ -1519,16 +1299,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
/*
* Locate se_lun pointer and attach it to struct se_cmd
*/
- if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
+ rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+ if (rc) {
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
target_put_sess_cmd(se_sess, se_cmd);
return 0;
}
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
- transport_generic_request_failure(se_cmd);
+ transport_generic_request_failure(se_cmd, rc);
return 0;
}
/*
@@ -1563,7 +1343,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
sgl_bidi, sgl_bidi_count);
if (rc != 0) {
- transport_generic_request_failure(se_cmd);
+ transport_generic_request_failure(se_cmd, rc);
return 0;
}
}
@@ -1709,16 +1489,17 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-void transport_generic_request_failure(struct se_cmd *cmd)
+void transport_generic_request_failure(struct se_cmd *cmd,
+ sense_reason_t sense_reason)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->scsi_sense_reason);
+ cmd->t_state, sense_reason);
pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
@@ -1727,10 +1508,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
- switch (cmd->scsi_sense_reason) {
+ switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
@@ -1743,6 +1523,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
break;
+ case TCM_OUT_OF_RESOURCES:
+ sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ break;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@@ -1759,7 +1542,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
@@ -1770,13 +1553,12 @@ void transport_generic_request_failure(struct se_cmd *cmd)
goto check_stop;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0], cmd->scsi_sense_reason);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ cmd->t_task_cdb[0], sense_reason);
+ sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
@@ -1794,69 +1576,30 @@ EXPORT_SYMBOL(transport_generic_request_failure);
static void __target_execute_cmd(struct se_cmd *cmd)
{
- int error = 0;
+ sense_reason_t ret;
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
spin_unlock_irq(&cmd->t_state_lock);
- if (cmd->execute_cmd)
- error = cmd->execute_cmd(cmd);
+ if (cmd->execute_cmd) {
+ ret = cmd->execute_cmd(cmd);
+ if (ret) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
- if (error) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
- spin_unlock_irq(&cmd->t_state_lock);
-
- transport_generic_request_failure(cmd);
+ transport_generic_request_failure(cmd, ret);
+ }
}
}
-void target_execute_cmd(struct se_cmd *cmd)
+static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- /*
- * If the received CDB has aleady been aborted stop processing it here.
- */
- if (transport_check_aborted_status(cmd, 1)) {
- complete(&cmd->t_transport_stop_comp);
- return;
- }
-
- /*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->transport_lun_stop_comp);
- return;
- }
- /*
- * Determine if frontend context caller is requesting the stopping of
- * this command for frontend exceptions.
- */
- if (cmd->transport_state & CMD_T_STOP) {
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
- __func__, __LINE__,
- cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->t_transport_stop_comp);
- return;
- }
-
- cmd->t_state = TRANSPORT_PROCESSING;
- spin_unlock_irq(&cmd->t_state_lock);
-
- if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
- goto execute;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return false;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
@@ -1867,7 +1610,7 @@ void target_execute_cmd(struct se_cmd *cmd)
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
"se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->se_ordered_id);
- goto execute;
+ return false;
case MSG_ORDERED_TAG:
atomic_inc(&dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -1881,7 +1624,7 @@ void target_execute_cmd(struct se_cmd *cmd)
* exist that need to be completed first.
*/
if (!atomic_read(&dev->simple_cmds))
- goto execute;
+ return false;
break;
default:
/*
@@ -1892,23 +1635,63 @@ void target_execute_cmd(struct se_cmd *cmd)
break;
}
- if (atomic_read(&dev->dev_ordered_sync) != 0) {
- spin_lock(&dev->delayed_cmd_lock);
- list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
- spin_unlock(&dev->delayed_cmd_lock);
+ if (atomic_read(&dev->dev_ordered_sync) == 0)
+ return false;
- pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
- " delayed CMD list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->sam_task_attr,
- cmd->se_ordered_id);
+ spin_lock(&dev->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+ spin_unlock(&dev->delayed_cmd_lock);
+
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ " delayed CMD list, se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->se_ordered_id);
+ return true;
+}
+
+void target_execute_cmd(struct se_cmd *cmd)
+{
+ /*
+ * If the received CDB has aleady been aborted stop processing it here.
+ */
+ if (transport_check_aborted_status(cmd, 1)) {
+ complete(&cmd->transport_lun_stop_comp);
return;
}
-execute:
/*
- * Otherwise, no ORDERED task attributes exist..
+ * Determine if IOCTL context caller in requesting the stopping of this
+ * command for LUN shutdown purposes.
*/
- __target_execute_cmd(cmd);
+ spin_lock_irq(&cmd->t_state_lock);
+ if (cmd->transport_state & CMD_T_LUN_STOP) {
+ pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
+
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->transport_lun_stop_comp);
+ return;
+ }
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
+ */
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__,
+ cmd->se_tfo->get_task_tag(cmd));
+
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->t_transport_stop_comp);
+ return;
+ }
+
+ cmd->t_state = TRANSPORT_PROCESSING;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ if (!target_handle_task_attr(cmd))
+ __target_execute_cmd(cmd);
}
EXPORT_SYMBOL(target_execute_cmd);
@@ -1947,6 +1730,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ return;
+
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
@@ -1975,8 +1761,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
{
int ret = 0;
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
ret = cmd->se_tfo->queue_status(cmd);
@@ -2034,8 +1819,8 @@ static void target_complete_ok_work(struct work_struct *work)
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- transport_complete_task_attr(cmd);
+ transport_complete_task_attr(cmd);
+
/*
* Check to schedule QUEUE_FULL work, or execute an existing
* cmd->transport_qf_callback()
@@ -2183,9 +1968,10 @@ static void transport_put_cmd(struct se_cmd *cmd)
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (atomic_read(&cmd->t_fe_count)) {
- if (!atomic_dec_and_test(&cmd->t_fe_count))
- goto out_busy;
+ if (atomic_read(&cmd->t_fe_count) &&
+ !atomic_dec_and_test(&cmd->t_fe_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
}
if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
@@ -2197,56 +1983,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
transport_free_pages(cmd);
transport_release_cmd(cmd);
return;
-out_busy:
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-/*
- * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
- * allocating in the core.
- * @cmd: Associated se_cmd descriptor
- * @mem: SGL style memory for TCM WRITE / READ
- * @sg_mem_num: Number of SGL elements
- * @mem_bidi_in: SGL style memory for TCM BIDI READ
- * @sg_mem_bidi_num: Number of BIDI READ SGL elements
- *
- * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
- * of parameters.
- */
-int transport_generic_map_mem_to_cmd(
- struct se_cmd *cmd,
- struct scatterlist *sgl,
- u32 sgl_count,
- struct scatterlist *sgl_bidi,
- u32 sgl_bidi_count)
-{
- if (!sgl || !sgl_count)
- return 0;
-
- /*
- * Reject SCSI data overflow with map_mem_to_cmd() as incoming
- * scatterlists already have been set to follow what the fabric
- * passes for the original expected data transfer length.
- */
- if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
- pr_warn("Rejecting SCSI DATA overflow for fabric using"
- " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
-
- cmd->t_data_sg = sgl;
- cmd->t_data_nents = sgl_count;
-
- if (sgl_bidi && sgl_bidi_count) {
- cmd->t_bidi_data_sg = sgl_bidi;
- cmd->t_bidi_data_nents = sgl_bidi_count;
- }
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- return 0;
}
-EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
@@ -2268,10 +2005,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
/* >1 page. use vmap */
pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
- if (!pages) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (!pages)
return NULL;
- }
/* convert sg[] to pages[] */
for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
@@ -2280,10 +2015,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
kfree(pages);
- if (!cmd->t_data_vmap) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (!cmd->t_data_vmap)
return NULL;
- }
return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
}
@@ -2349,7 +2082,8 @@ out:
* might not have the payload yet, so notify the fabric via a call to
* ->write_pending instead. Otherwise place it on the execution queue.
*/
-int transport_generic_new_cmd(struct se_cmd *cmd)
+sense_reason_t
+transport_generic_new_cmd(struct se_cmd *cmd)
{
int ret = 0;
@@ -2362,7 +2096,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- goto out_fail;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
atomic_inc(&cmd->t_fe_count);
@@ -2388,14 +2122,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
- if (ret < 0)
- return ret;
- return 1;
+ /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
+ WARN_ON(ret);
+
+ return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-out_fail:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
@@ -2839,21 +2570,9 @@ static int transport_get_sense_codes(
return 0;
}
-static int transport_set_sense_codes(
- struct se_cmd *cmd,
- u8 asc,
- u8 ascq)
-{
- cmd->scsi_asc = asc;
- cmd->scsi_ascq = ascq;
-
- return 0;
-}
-
-int transport_send_check_condition_and_sense(
- struct se_cmd *cmd,
- u8 reason,
- int from_transport)
+int
+transport_send_check_condition_and_sense(struct se_cmd *cmd,
+ sense_reason_t reason, int from_transport)
{
unsigned char *buffer = cmd->sense_buffer;
unsigned long flags;
@@ -3044,23 +2763,19 @@ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
- int ret = 0;
+ if (!(cmd->transport_state & CMD_T_ABORTED))
+ return 0;
- if (cmd->transport_state & CMD_T_ABORTED) {
- if (!send_status ||
- (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
- return 1;
+ if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ return 1;
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
- " status for CDB: 0x%02x ITT: 0x%08x\n",
- cmd->t_task_cdb[0],
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+ cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
- cmd->se_tfo->queue_status(cmd);
- ret = 1;
- }
- return ret;
+ cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+ cmd->se_tfo->queue_status(cmd);
+
+ return 1;
}
EXPORT_SYMBOL(transport_check_aborted_status);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 6666a0c74f6..bf0e390ce2d 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -3,8 +3,7 @@
*
* This file contains logic for SPC-3 Unit Attention emulation
*
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
@@ -38,9 +37,8 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
-int core_scsi3_ua_check(
- struct se_cmd *cmd,
- unsigned char *cdb)
+sense_reason_t
+target_scsi3_ua_check(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
@@ -71,16 +69,14 @@ int core_scsi3_ua_check(
* was received, then the device server shall process the command
* and either:
*/
- switch (cdb[0]) {
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
- return -EINVAL;
+ return TCM_CHECK_CONDITION_UNIT_ATTENTION;
}
-
- return -EINVAL;
}
int core_scsi3_ua_allocate(
@@ -237,7 +233,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
+ if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -265,8 +261,8 @@ void core_scsi3_ua_for_check_condition(
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+ (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 6e6b03460a1..0204952fe4d 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -26,7 +26,7 @@
extern struct kmem_cache *se_ua_cache;
-extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 9585010964e..12d6fa21e5e 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -430,7 +430,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
{
struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
- transport_deregister_session(sess->se_sess);
kfree(sess);
}
@@ -438,6 +437,7 @@ static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
+ transport_deregister_session(sess->se_sess);
call_rcu(&sess->rcu, ft_sess_rcu_free);
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8636fae1f7e..c2c77d1ac49 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -98,7 +98,7 @@ config EXYNOS_THERMAL
depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
depends on CPU_THERMAL
help
- If you say yes here you get support for TMU (Thermal Managment
+ If you say yes here you get support for TMU (Thermal Management
Unit) on SAMSUNG EXYNOS series of SoC.
config DB8500_THERMAL
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index d8e05eeab23..0ecf22b6a38 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -357,6 +357,7 @@ config TRACE_SINK
config PPC_EPAPR_HV_BYTECHAN
tristate "ePAPR hypervisor byte channel driver"
depends on PPC
+ select EPAPR_PARAVIRT
help
This driver creates /dev entries for each ePAPR hypervisor byte
channel, thereby allowing applications to communicate with byte
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index be1a9a1e749..cd69b48f6df 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -178,7 +178,7 @@ static int hvc_opal_probe(struct platform_device *dev)
proto = HV_PROTOCOL_HVSI;
ops = &hvc_opal_hvsi_ops;
} else {
- pr_err("hvc_opal: Unkown protocol for %s\n",
+ pr_err("hvc_opal: Unknown protocol for %s\n",
dev->dev.of_node->full_name);
return -ENXIO;
}
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index ed6f5f1f5a5..0c629807610 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -313,7 +313,7 @@ static int hvc_vio_probe(struct vio_dev *vdev,
proto = HV_PROTOCOL_HVSI;
ops = &hvterm_hvsi_ops;
} else {
- pr_err("hvc_vio: Unkown protocol for %s\n", vdev->dev.of_node->full_name);
+ pr_err("hvc_vio: Unknown protocol for %s\n", vdev->dev.of_node->full_name);
return -ENXIO;
}
diff --git a/drivers/tty/ipwireless/setup_protocol.h b/drivers/tty/ipwireless/setup_protocol.h
index 9d6bcc77c73..002c34e7252 100644
--- a/drivers/tty/ipwireless/setup_protocol.h
+++ b/drivers/tty/ipwireless/setup_protocol.h
@@ -59,7 +59,7 @@ struct tl_setup_config_done_msg {
unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_DONE_MSG */
} __attribute__ ((__packed__));
-/* Asyncronous messages */
+/* Asynchronous messages */
struct tl_setup_open_msg {
unsigned char sig_no; /* TL_SETUP_SIGNO_OPEN_MSG */
unsigned char port_no;
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index f3d283f2e3a..c31133a6ea8 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -271,7 +271,7 @@ config SERIAL_8250_DW
present in the Synopsys DesignWare APB UART.
config SERIAL_8250_EM
- tristate "Support for Emma Mobile intergrated serial port"
+ tristate "Support for Emma Mobile integrated serial port"
depends on SERIAL_8250 && ARM && HAVE_CLK
help
Selecting this option will add support for the integrated serial
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index e6a008f4939..2e2b2c1cb72 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -815,7 +815,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
lcr = WLS(5);
break;
default:
- printk(KERN_ERR "%s: word lengh not supported\n",
+ printk(KERN_ERR "%s: word length not supported\n",
__func__);
}
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 6197a69adb4..72b6334bcf1 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -505,7 +505,7 @@ static void load_code(struct icom_port *icom_port)
/* Stop processor */
stop_processor(icom_port);
- dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n");
+ dev_err(&icom_port->adapter->pci_dev->dev,"Port not operational\n");
}
if (new_page != NULL)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 23f797eb7a2..57d6b29c039 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -41,8 +41,7 @@
#include <linux/of.h>
#include <linux/gpio.h>
#include <linux/pinctrl/consumer.h>
-
-#include <plat/omap-serial.h>
+#include <linux/platform_data/serial-omap.h>
#define OMAP_MAX_HSUART_PORTS 6
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 88dde95b679..d938b2b99e3 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -222,7 +222,7 @@ static int usb_probe_device(struct device *dev)
/* TODO: Add real matching code */
/* The device should always appear to be in use
- * unless the driver suports autosuspend.
+ * unless the driver supports autosuspend.
*/
if (!udriver->supports_autosuspend)
error = usb_autoresume_device(udev);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 59dcea2f695..f4a21f6f081 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -51,7 +51,7 @@
* full speed USB controllers, including the at91rm9200 (arm920T, with MMU),
* at91sam926x (arm926ejs, with MMU), and several no-mmu versions.
*
- * This driver expects the board has been wired with two GPIOs suppporting
+ * This driver expects the board has been wired with two GPIOs supporting
* a VBUS sensing IRQ, and a D+ pullup. (They may be omitted, but the
* testing hasn't covered such cases.)
*
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 09699f6e87f..8bfe990caf1 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -44,7 +44,7 @@
#include <asm/unaligned.h>
#include <asm/mach-types.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <mach/usb.h>
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index f74794c9315..a7d1f5b4c4e 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -14,6 +14,9 @@
#include <linux/mbus.h>
#include <linux/clk.h>
#include <linux/platform_data/usb-ehci-orion.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
#define rdl(off) __raw_readl(hcd->regs + (off))
#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
@@ -167,6 +170,8 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
}
+static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32);
+
static int ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = pdev->dev.platform_data;
@@ -177,13 +182,17 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
struct clk *clk;
void __iomem *regs;
int irq, err;
+ enum orion_ehci_phy_ver phy_version;
if (usb_disabled())
return -ENODEV;
pr_debug("Initializing Orion-SoC USB Host Controller\n");
- irq = platform_get_irq(pdev, 0);
+ if (pdev->dev.of_node)
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ else
+ irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev,
"Found HC with no IRQ. Check %s setup!\n",
@@ -201,6 +210,14 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
goto err1;
}
+ /*
+ * Right now device-tree probed devices don't get dma_mask
+ * set. Since shared usb code relies on it, set it here for
+ * now. Once we have dma capability bindings this can go away.
+ */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &ehci_orion_dma_mask;
+
if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
@@ -248,7 +265,12 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
/*
* setup Orion USB controller.
*/
- switch (pd->phy_version) {
+ if (pdev->dev.of_node)
+ phy_version = EHCI_PHY_NA;
+ else
+ phy_version = pd->phy_version;
+
+ switch (phy_version) {
case EHCI_PHY_NA: /* dont change USB phy settings */
break;
case EHCI_PHY_ORION:
@@ -303,9 +325,19 @@ static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:orion-ehci");
+static const struct of_device_id ehci_orion_dt_ids[] __devinitdata = {
+ { .compatible = "marvell,orion-ehci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
+
static struct platform_driver ehci_orion_driver = {
.probe = ehci_orion_drv_probe,
.remove = __exit_p(ehci_orion_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
- .driver.name = "orion-ehci",
+ .driver = {
+ .name = "orion-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ehci_orion_dt_ids),
+ },
};
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 766dbda1981..fd3486745e6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -251,7 +251,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
/* best case is 32bit-aligned source address */
if ((0x02 & (unsigned long) src) == 0) {
if (len >= 4) {
- writesl(fifo, src + index, len >> 2);
+ iowrite32_rep(fifo, src + index, len >> 2);
index += len & ~0x03;
}
if (len & 0x02) {
@@ -260,7 +260,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
}
} else {
if (len >= 2) {
- writesw(fifo, src + index, len >> 1);
+ iowrite16_rep(fifo, src + index, len >> 1);
index += len & ~0x01;
}
}
@@ -268,7 +268,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
musb_writeb(fifo, 0, src[index]);
} else {
/* byte aligned */
- writesb(fifo, src, len);
+ iowrite8_rep(fifo, src, len);
}
}
@@ -294,7 +294,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
/* best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) dst) == 0) {
if (len >= 4) {
- readsl(fifo, dst, len >> 2);
+ ioread32_rep(fifo, dst, len >> 2);
index = len & ~0x03;
}
if (len & 0x02) {
@@ -303,7 +303,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
}
} else {
if (len >= 2) {
- readsw(fifo, dst, len >> 1);
+ ioread16_rep(fifo, dst, len >> 1);
index = len & ~0x01;
}
}
@@ -311,7 +311,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
dst[index] = musb_readb(fifo, 0);
} else {
/* byte aligned */
- readsb(fifo, dst, len);
+ ioread8_rep(fifo, dst, len);
}
}
#endif
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 565ad161783..eebeed78edd 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -37,27 +37,6 @@
#include <linux/io.h>
-#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
- && !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
- && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \
- && !defined(CONFIG_MIPS) && !defined(CONFIG_M68K) \
- && !defined(CONFIG_XTENSA)
-static inline void readsl(const void __iomem *addr, void *buf, int len)
- { insl((unsigned long)addr, buf, len); }
-static inline void readsw(const void __iomem *addr, void *buf, int len)
- { insw((unsigned long)addr, buf, len); }
-static inline void readsb(const void __iomem *addr, void *buf, int len)
- { insb((unsigned long)addr, buf, len); }
-
-static inline void writesl(const void __iomem *addr, const void *buf, int len)
- { outsl((unsigned long)addr, buf, len); }
-static inline void writesw(const void __iomem *addr, const void *buf, int len)
- { outsw((unsigned long)addr, buf, len); }
-static inline void writesb(const void __iomem *addr, const void *buf, int len)
- { outsb((unsigned long)addr, buf, len); }
-
-#endif
-
#ifndef CONFIG_BLACKFIN
/* NOTE: these offsets are all in bytes */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 8bde6fc5eb7..3969813c217 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -22,6 +22,7 @@
#include <linux/prefetch.h>
#include <linux/usb.h>
#include <linux/irq.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/nop-usb-xceiv.h>
@@ -198,7 +199,7 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
- writesl(fifo, buf, len >> 2);
+ iowrite32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
@@ -245,7 +246,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
/* Best case is 32bit-aligned destination address */
if ((0x02 & (unsigned long) buf) == 0) {
if (len >= 4) {
- readsl(fifo, buf, len >> 2);
+ ioread32_rep(fifo, buf, len >> 2);
buf += (len & ~0x03);
len &= 0x03;
}
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 9716850a430..98df17c984a 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -16,7 +16,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "musb_core.h"
#include "tusb6010.h"
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7eb73c561bd..5de6e7f39f9 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -6,6 +6,7 @@ comment "USB Physical Layer drivers"
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
+ depends on ARCH_OMAP2PLUS
select USB_OTG_UTILS
help
Enable this to support the transceiver that is part of SOC. This
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index ea5f2586fbd..6c3586a4c95 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -883,7 +883,7 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
} else {
US_DEBUGP("%s: NOT working scsi, not SS\n", __func__);
chip->proto_handler_backup(srb, us);
- /* Check wether card is plugged in */
+ /* Check whether card is plugged in */
if (srb->cmnd[0] == TEST_UNIT_READY) {
if (srb->result == SAM_STAT_GOOD) {
SET_LUN_READY(chip, srb->device->lun);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6c119944bbb..b28e66c4376 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -43,6 +43,10 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
u16 cmd;
u8 msix_pos;
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
vdev->reset_works = (pci_reset_function(pdev) == 0);
pci_save_state(pdev);
vdev->pci_saved_state = pci_store_saved_state(pdev);
@@ -51,8 +55,11 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
__func__, dev_name(&pdev->dev));
ret = vfio_config_init(vdev);
- if (ret)
- goto out;
+ if (ret) {
+ pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state);
+ pci_disable_device(pdev);
+ return ret;
+ }
if (likely(!nointxmask))
vdev->pci_2_3 = pci_intx_mask_supported(pdev);
@@ -77,24 +84,15 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
} else
vdev->msix_bar = 0xFF;
- ret = pci_enable_device(pdev);
- if (ret)
- goto out;
-
- return ret;
-
-out:
- kfree(vdev->pci_saved_state);
- vdev->pci_saved_state = NULL;
- vfio_config_free(vdev);
- return ret;
+ return 0;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
{
+ struct pci_dev *pdev = vdev->pdev;
int bar;
- pci_disable_device(vdev->pdev);
+ pci_disable_device(pdev);
vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
VFIO_IRQ_SET_ACTION_TRIGGER,
@@ -104,22 +102,40 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
vfio_config_free(vdev);
- pci_reset_function(vdev->pdev);
-
- if (pci_load_and_free_saved_state(vdev->pdev,
- &vdev->pci_saved_state) == 0)
- pci_restore_state(vdev->pdev);
- else
- pr_info("%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&vdev->pdev->dev));
-
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
if (!vdev->barmap[bar])
continue;
- pci_iounmap(vdev->pdev, vdev->barmap[bar]);
- pci_release_selected_regions(vdev->pdev, 1 << bar);
+ pci_iounmap(pdev, vdev->barmap[bar]);
+ pci_release_selected_regions(pdev, 1 << bar);
vdev->barmap[bar] = NULL;
}
+
+ /*
+ * If we have saved state, restore it. If we can reset the device,
+ * even better. Resetting with current state seems better than
+ * nothing, but saving and restoring current state without reset
+ * is just busy work.
+ */
+ if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
+ pr_info("%s: Couldn't reload %s saved state\n",
+ __func__, dev_name(&pdev->dev));
+
+ if (!vdev->reset_works)
+ return;
+
+ pci_save_state(pdev);
+ }
+
+ /*
+ * Disable INTx and MSI, presumably to avoid spurious interrupts
+ * during reset. Stolen from pci_reset_function()
+ */
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ if (vdev->reset_works)
+ __pci_reset_function(pdev);
+
+ pci_restore_state(pdev);
}
static void vfio_pci_release(void *device_data)
@@ -327,15 +343,10 @@ static long vfio_pci_ioctl(void *device_data,
hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
return -EINVAL;
- data = kmalloc(hdr.count * size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (copy_from_user(data, (void __user *)(arg + minsz),
- hdr.count * size)) {
- kfree(data);
- return -EFAULT;
- }
+ data = memdup_user((void __user *)(arg + minsz),
+ hdr.count * size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
}
mutex_lock(&vdev->igate);
@@ -562,9 +573,9 @@ static int __init vfio_pci_init(void)
return 0;
-out_virqfd:
- vfio_pci_virqfd_exit();
out_driver:
+ vfio_pci_virqfd_exit();
+out_virqfd:
vfio_pci_uninit_perm_bits();
return ret;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 56097c6d072..12c264d3b05 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -191,6 +191,17 @@ static void vfio_container_put(struct vfio_container *container)
kref_put(&container->kref, vfio_container_release);
}
+static void vfio_group_unlock_and_free(struct vfio_group *group)
+{
+ mutex_unlock(&vfio.group_lock);
+ /*
+ * Unregister outside of lock. A spurious callback is harmless now
+ * that the group is no longer in vfio.group_list.
+ */
+ iommu_group_unregister_notifier(group->iommu_group, &group->nb);
+ kfree(group);
+}
+
/**
* Group objects - create, release, get, put, search
*/
@@ -229,8 +240,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
minor = vfio_alloc_group_minor(group);
if (minor < 0) {
- mutex_unlock(&vfio.group_lock);
- kfree(group);
+ vfio_group_unlock_and_free(group);
return ERR_PTR(minor);
}
@@ -239,8 +249,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
if (tmp->iommu_group == iommu_group) {
vfio_group_get(tmp);
vfio_free_group_minor(minor);
- mutex_unlock(&vfio.group_lock);
- kfree(group);
+ vfio_group_unlock_and_free(group);
return tmp;
}
}
@@ -249,8 +258,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
group, "%d", iommu_group_id(iommu_group));
if (IS_ERR(dev)) {
vfio_free_group_minor(minor);
- mutex_unlock(&vfio.group_lock);
- kfree(group);
+ vfio_group_unlock_and_free(group);
return (struct vfio_group *)dev; /* ERR_PTR */
}
@@ -274,16 +282,7 @@ static void vfio_group_release(struct kref *kref)
device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor));
list_del(&group->vfio_next);
vfio_free_group_minor(group->minor);
-
- mutex_unlock(&vfio.group_lock);
-
- /*
- * Unregister outside of lock. A spurious callback is harmless now
- * that the group is no longer in vfio.group_list.
- */
- iommu_group_unregister_notifier(group->iommu_group, &group->nb);
-
- kfree(group);
+ vfio_group_unlock_and_free(group);
}
static void vfio_group_put(struct vfio_group *group)
@@ -466,8 +465,9 @@ static int vfio_dev_viable(struct device *dev, void *data)
{
struct vfio_group *group = data;
struct vfio_device *device;
+ struct device_driver *drv = ACCESS_ONCE(dev->driver);
- if (!dev->driver || vfio_whitelisted_driver(dev->driver))
+ if (!drv || vfio_whitelisted_driver(drv))
return 0;
device = vfio_group_get_device(group, dev);
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 79e7e4d45eb..b20df5c829f 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -230,7 +230,7 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
if (!nacl) {
- pr_err("Unable to alocate struct tcm_vhost_nacl\n");
+ pr_err("Unable to allocate struct tcm_vhost_nacl\n");
return NULL;
}
@@ -538,10 +538,6 @@ static void tcm_vhost_submission_work(struct work_struct *work)
if (tv_cmd->tvc_sgl_count) {
sg_ptr = tv_cmd->tvc_sgl;
- /*
- * For BIDI commands, pass in the extra READ buffer
- * to transport_generic_map_mem_to_cmd() below..
- */
/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
#if 0
if (se_cmd->se_cmd_flags & SCF_BIDI) {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d08d7998a4a..e7068c50880 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2045,7 +2045,7 @@ config FB_S3C_DEBUG_REGWRITE
bool "Debug register writes"
depends on FB_S3C
---help---
- Show all register writes via printk(KERN_DEBUG)
+ Show all register writes via pr_debug()
config FB_S3C2410
tristate "S3C2410 LCD framebuffer support"
@@ -2140,14 +2140,16 @@ config FB_UDL
To compile as a module, choose M here: the module name is udlfb.
config FB_IBM_GXT4500
- tristate "Framebuffer support for IBM GXT4500P adaptor"
+ tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
depends on FB && PPC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
---help---
- Say Y here to enable support for the IBM GXT4500P display
- adaptor, found on some IBM System P (pSeries) machines.
+ Say Y here to enable support for the IBM GXT4000P/6000P and
+ GXT4500P/6500P display adaptor based on Raster Engine RC1000,
+ found on some IBM System P (pSeries) machines. This driver
+ doesn't use Geometry Engine GT1000.
config FB_PS3
tristate "PS3 GPU framebuffer driver"
@@ -2442,4 +2444,19 @@ config FB_SH_MOBILE_MERAM
Up to 4 memory channels can be configured, allowing 4 RGB or
2 YCbCr framebuffers to be configured.
+config FB_SSD1307
+ tristate "Solomon SSD1307 framebuffer support"
+ depends on FB && I2C
+ depends on OF
+ depends on GENERIC_GPIO
+ select FB_SYS_FOPS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_DEFERRED_IO
+ select PWM
+ help
+ This driver implements support for the Solomon SSD1307
+ OLED controller over I2C.
+
endmenu
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 23e948ebfab..768a137a1ba 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -161,6 +161,7 @@ obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
obj-$(CONFIG_FB_MXS) += mxsfb.o
+obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b7ec34c57f4..c072ed9aea3 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -117,8 +117,8 @@ static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
data->current_brightness = value;
return 0;
out:
- dev_dbg(chip->dev, "set brightness %d failure with return "
- "value:%d\n", value, ret);
+ dev_dbg(chip->dev, "set brightness %d failure with return value: %d\n",
+ value, ret);
return ret;
}
@@ -208,22 +208,19 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "duty cycle");
if (!res) {
dev_err(&pdev->dev, "No REG resource for duty cycle\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_duty_cycle = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "always on");
if (!res) {
dev_err(&pdev->dev, "No REG resorce for always on\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_always_on = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "current");
if (!res) {
dev_err(&pdev->dev, "No REG resource for current\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_current = res->start;
@@ -231,8 +228,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
sprintf(name, "backlight-%d", pdev->id);
data->port = pdev->id;
data->chip = chip;
- data->i2c = (chip->id == CHIP_PM8606) ? chip->client \
- : chip->companion;
+ data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->current_brightness = MAX_BRIGHTNESS;
if (pm860x_backlight_dt_init(pdev, data, name)) {
if (pdata) {
@@ -263,8 +259,6 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
return 0;
out_brt:
backlight_device_unregister(bl);
-out:
- devm_kfree(&pdev->dev, data);
return ret;
}
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index df1cbb7ef6c..de5e5e74e2a 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -106,10 +106,9 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD,
pwmbl->pdata->pwm_compare_max);
- dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver "
- "(%lu Hz)\n", pwmbl->pwmc.mck /
- pwmbl->pdata->pwm_compare_max /
- (1 << prescale));
+ dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver (%lu Hz)\n",
+ pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max /
+ (1 << prescale));
return pwm_channel_enable(&pwmbl->pwmc);
}
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 297db2fa91f..345f6660d4b 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -370,6 +370,35 @@ void backlight_device_unregister(struct backlight_device *bd)
}
EXPORT_SYMBOL(backlight_device_unregister);
+#ifdef CONFIG_OF
+static int of_parent_match(struct device *dev, void *data)
+{
+ return dev->parent && dev->parent->of_node == data;
+}
+
+/**
+ * of_find_backlight_by_node() - find backlight device by device-tree node
+ * @node: device-tree node of the backlight device
+ *
+ * Returns a pointer to the backlight device corresponding to the given DT
+ * node or NULL if no such backlight device exists or if the device hasn't
+ * been probed yet.
+ *
+ * This function obtains a reference on the backlight device and it is the
+ * caller's responsibility to drop the reference by calling put_device() on
+ * the backlight device's .dev field.
+ */
+struct backlight_device *of_find_backlight_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device(backlight_class, NULL, node, of_parent_match);
+
+ return dev ? to_backlight_device(dev) : NULL;
+}
+EXPORT_SYMBOL(of_find_backlight_by_node);
+#endif
+
static void __exit backlight_class_exit(void)
{
class_destroy(backlight_class);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index eaaebf21993..e323fcbe884 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -6,8 +6,8 @@
* Based on Sharp's 2.4 Backlight Driver
*
* Copyright (c) 2008 Marvell International Ltd.
- * Converted to SPI device based LCD/Backlight device driver
- * by Eric Miao <eric.miao@marvell.com>
+ * Converted to SPI device based LCD/Backlight device driver
+ * by Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -192,7 +192,7 @@ static void lcdtg_set_phadadj(struct corgi_lcd *lcd, int mode)
{
int adj;
- switch(mode) {
+ switch (mode) {
case CORGI_LCD_MODE_VGA:
/* Setting for VGA */
adj = sharpsl_param.phadadj;
@@ -409,10 +409,10 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted;
if (gpio_is_valid(lcd->gpio_backlight_cont))
- gpio_set_value(lcd->gpio_backlight_cont, cont);
+ gpio_set_value_cansleep(lcd->gpio_backlight_cont, cont);
if (gpio_is_valid(lcd->gpio_backlight_on))
- gpio_set_value(lcd->gpio_backlight_on, intensity);
+ gpio_set_value_cansleep(lcd->gpio_backlight_on, intensity);
if (lcd->kick_battery)
lcd->kick_battery();
@@ -495,8 +495,9 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_on,
"BL_ON");
if (err) {
- dev_err(&spi->dev, "failed to request GPIO%d for "
- "backlight_on\n", pdata->gpio_backlight_on);
+ dev_err(&spi->dev,
+ "failed to request GPIO%d for backlight_on\n",
+ pdata->gpio_backlight_on);
return err;
}
@@ -508,8 +509,9 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_cont,
"BL_CONT");
if (err) {
- dev_err(&spi->dev, "failed to request GPIO%d for "
- "backlight_cont\n", pdata->gpio_backlight_cont);
+ dev_err(&spi->dev,
+ "failed to request GPIO%d for backlight_cont\n",
+ pdata->gpio_backlight_cont);
return err;
}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 573c7ece0fd..8179cef0730 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -2,10 +2,10 @@
* Backlight driver for Dialog Semiconductor DA9030/DA9034
*
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -164,15 +164,14 @@ static int da903x_backlight_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int da903x_backlight_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
+
return da903x_backlight_set(bl, 0);
}
static int da903x_backlight_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
backlight_update_status(bl);
return 0;
@@ -199,7 +198,7 @@ static struct platform_driver da903x_backlight_driver = {
module_platform_driver(da903x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
-MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
- "Mike Rapoport <mike@compulab.co.il>");
+MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da903x-backlight");
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index ac196181fe4..842da5a3ac4 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -34,7 +34,7 @@ enum {
DA9052_TYPE_WLED3,
};
-static unsigned char wled_bank[] = {
+static const unsigned char wled_bank[] = {
DA9052_LED1_CONF_REG,
DA9052_LED2_CONF_REG,
DA9052_LED3_CONF_REG,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 8c660fcd250..0ae155be9c8 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -97,8 +97,8 @@ static int genericbl_probe(struct platform_device *pdev)
props.max_brightness = machinfo->max_intensity;
bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
&props);
- if (IS_ERR (bd))
- return PTR_ERR (bd);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
platform_set_drvdata(pdev, bd);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index c9996634244..5cefd73526f 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -26,7 +26,7 @@
#define HP680_DEFAULT_INTENSITY 10
static int hp680bl_suspended;
-static int current_intensity = 0;
+static int current_intensity;
static DEFINE_SPINLOCK(bl_lock);
static void hp680bl_send_intensity(struct backlight_device *bd)
@@ -168,7 +168,7 @@ static int __init hp680bl_init(void)
static void __exit hp680bl_exit(void)
{
platform_device_unregister(hp680bl_device);
- platform_driver_unregister(&hp680bl_driver);
+ platform_driver_unregister(&hp680bl_driver);
}
module_init(hp680bl_init);
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 66cc313185a..1235bf9defc 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -45,7 +45,7 @@ static inline int ili9320_write_spi(struct ili9320 *ili,
/* second message is the data to transfer */
data[0] = spi->id | ILI9320_SPI_DATA | ILI9320_SPI_WRITE;
- data[1] = value >> 8;
+ data[1] = value >> 8;
data[2] = value;
return spi_sync(spi->dev, &spi->message);
@@ -56,11 +56,10 @@ int ili9320_write(struct ili9320 *ili, unsigned int reg, unsigned int value)
dev_dbg(ili->dev, "write: reg=%02x, val=%04x\n", reg, value);
return ili->write(ili, reg, value);
}
-
EXPORT_SYMBOL_GPL(ili9320_write);
int ili9320_write_regs(struct ili9320 *ili,
- struct ili9320_reg *values,
+ const struct ili9320_reg *values,
int nr_values)
{
int index;
@@ -74,7 +73,6 @@ int ili9320_write_regs(struct ili9320 *ili,
return 0;
}
-
EXPORT_SYMBOL_GPL(ili9320_write_regs);
static void ili9320_reset(struct ili9320 *lcd)
@@ -260,7 +258,6 @@ int ili9320_probe_spi(struct spi_device *spi,
return ret;
}
-
EXPORT_SYMBOL_GPL(ili9320_probe_spi);
int ili9320_remove(struct ili9320 *ili)
@@ -271,7 +268,6 @@ int ili9320_remove(struct ili9320 *ili)
return 0;
}
-
EXPORT_SYMBOL_GPL(ili9320_remove);
#ifdef CONFIG_PM
@@ -296,20 +292,17 @@ int ili9320_suspend(struct ili9320 *lcd, pm_message_t state)
return 0;
}
-
EXPORT_SYMBOL_GPL(ili9320_suspend);
int ili9320_resume(struct ili9320 *lcd)
{
dev_info(lcd->dev, "resuming from power state %d\n", lcd->power);
- if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) {
+ if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP)
ili9320_write(lcd, ILI9320_POWER1, 0x00);
- }
return ili9320_power(lcd, FB_BLANK_UNBLANK);
}
-
EXPORT_SYMBOL_GPL(ili9320_resume);
#endif
@@ -318,7 +311,6 @@ void ili9320_shutdown(struct ili9320 *lcd)
{
ili9320_power(lcd, FB_BLANK_POWERDOWN);
}
-
EXPORT_SYMBOL_GPL(ili9320_shutdown);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
diff --git a/drivers/video/backlight/ili9320.h b/drivers/video/backlight/ili9320.h
index e388eca7cac..e0db738f7bb 100644
--- a/drivers/video/backlight/ili9320.h
+++ b/drivers/video/backlight/ili9320.h
@@ -63,7 +63,7 @@ extern int ili9320_write(struct ili9320 *ili,
unsigned int reg, unsigned int value);
extern int ili9320_write_regs(struct ili9320 *ili,
- struct ili9320_reg *values,
+ const struct ili9320_reg *values,
int nr_values);
/* Device probe */
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 16f593b6442..fef6ce4fad7 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -48,7 +48,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
jornada_ssp_end();
- return (BL_MAX_BRIGHT - ret);
+ return BL_MAX_BRIGHT - ret;
}
static int jornada_bl_update_status(struct backlight_device *bd)
@@ -77,18 +77,23 @@ static int jornada_bl_update_status(struct backlight_device *bd)
goto out;
}
- /* at this point we expect that the mcu has accepted
- our command and is waiting for our new value
- please note that maximum brightness is 255,
- but due to physical layout it is equal to 0, so we simply
- invert the value (MAX VALUE - NEW VALUE). */
- if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
+ /*
+ * at this point we expect that the mcu has accepted
+ * our command and is waiting for our new value
+ * please note that maximum brightness is 255,
+ * but due to physical layout it is equal to 0, so we simply
+ * invert the value (MAX VALUE - NEW VALUE).
+ */
+ if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness)
+ != TXDUMMY) {
pr_err("set brightness failed\n");
ret = -ETIMEDOUT;
}
- /* If infact we get an TXDUMMY as output we are happy and dont
- make any further comments about it */
+ /*
+ * If infact we get an TXDUMMY as output we are happy and dont
+ * make any further comments about it
+ */
out:
jornada_ssp_end();
@@ -121,9 +126,11 @@ static int jornada_bl_probe(struct platform_device *pdev)
bd->props.power = FB_BLANK_UNBLANK;
bd->props.brightness = BL_DEF_BRIGHT;
- /* note. make sure max brightness is set otherwise
- you will get seemingly non-related errors when
- trying to change brightness */
+ /*
+ * note. make sure max brightness is set otherwise
+ * you will get seemingly non-related errors when
+ * trying to change brightness
+ */
jornada_bl_update_status(bd);
platform_set_drvdata(pdev, bd);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index f5aa0a5961d..9a35196d12d 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -4,7 +4,7 @@
* Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- * Inspired by Marek Vasut work in l4f00242t03.c
+ * Inspired by Marek Vasut work in l4f00242t03.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,7 +33,6 @@ struct l4f00242t03_priv {
struct regulator *core_reg;
};
-
static void l4f00242t03_reset(unsigned int gpio)
{
pr_debug("l4f00242t03_reset.\n");
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index a5d0d024bb9..34fb6bd798c 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -108,7 +108,7 @@ static ssize_t lcd_show_power(struct device *dev, struct device_attribute *attr,
static ssize_t lcd_store_power(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- int rc = -ENXIO;
+ int rc;
struct lcd_device *ld = to_lcd_device(dev);
unsigned long power;
@@ -116,6 +116,8 @@ static ssize_t lcd_store_power(struct device *dev,
if (rc)
return rc;
+ rc = -ENXIO;
+
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
pr_debug("set power to %lu\n", power);
@@ -144,7 +146,7 @@ static ssize_t lcd_show_contrast(struct device *dev,
static ssize_t lcd_store_contrast(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- int rc = -ENXIO;
+ int rc;
struct lcd_device *ld = to_lcd_device(dev);
unsigned long contrast;
@@ -152,6 +154,8 @@ static ssize_t lcd_store_contrast(struct device *dev,
if (rc)
return rc;
+ rc = -ENXIO;
+
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
pr_debug("set contrast to %lu\n", contrast);
diff --git a/drivers/video/backlight/lm3630_bl.c b/drivers/video/backlight/lm3630_bl.c
index 0207bc0a440..a6d637b5c68 100644
--- a/drivers/video/backlight/lm3630_bl.c
+++ b/drivers/video/backlight/lm3630_bl.c
@@ -37,7 +37,7 @@ enum lm3630_leds {
BLED_2
};
-static const char *bled_name[] = {
+static const char * const bled_name[] = {
[BLED_ALL] = "lm3630_bled", /*Bank1 controls all string */
[BLED_1] = "lm3630_bled1", /*Bank1 controls bled1 */
[BLED_2] = "lm3630_bled2", /*Bank1 or 2 controls bled2 */
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index b0e1e8ba4d9..7ab2d2a04e4 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -214,7 +214,7 @@ out_input:
}
-static DEVICE_ATTR(bled_mode, 0666, NULL, lm3639_bled_mode_store);
+static DEVICE_ATTR(bled_mode, S_IWUSR, NULL, lm3639_bled_mode_store);
/* torch */
static void lm3639_torch_brightness_set(struct led_classdev *cdev,
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index b29c7071c9d..55819b38470 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -31,7 +31,7 @@ struct lms283gf05_seq {
};
/* Magic sequences supplied by manufacturer, for details refer to datasheet */
-static struct lms283gf05_seq disp_initseq[] = {
+static const struct lms283gf05_seq disp_initseq[] = {
/* REG, VALUE, DELAY */
{ 0x07, 0x0000, 0 },
{ 0x13, 0x0000, 10 },
@@ -78,7 +78,7 @@ static struct lms283gf05_seq disp_initseq[] = {
{ 0x22, 0x0000, 0 }
};
-static struct lms283gf05_seq disp_pdwnseq[] = {
+static const struct lms283gf05_seq disp_pdwnseq[] = {
{ 0x07, 0x0016, 30 },
{ 0x07, 0x0004, 0 },
@@ -104,7 +104,7 @@ static void lms283gf05_reset(unsigned long gpio, bool inverted)
}
static void lms283gf05_toggle(struct spi_device *spi,
- struct lms283gf05_seq *seq, int sz)
+ const struct lms283gf05_seq *seq, int sz)
{
char buf[3];
int i;
@@ -158,13 +158,10 @@ static int lms283gf05_probe(struct spi_device *spi)
int ret = 0;
if (pdata != NULL) {
- ret = devm_gpio_request(&spi->dev, pdata->reset_gpio,
- "LMS285GF05 RESET");
- if (ret)
- return ret;
-
- ret = gpio_direction_output(pdata->reset_gpio,
- !pdata->reset_inverted);
+ ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
+ GPIOF_DIR_OUT | (!pdata->reset_inverted ?
+ GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
+ "LMS285GF05 RESET");
if (ret)
return ret;
}
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 3a6d5419e3e..146fea8aa43 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -107,7 +107,6 @@ void locomolcd_power(int on)
}
EXPORT_SYMBOL(locomolcd_power);
-
static int current_intensity;
static int locomolcd_set_intensity(struct backlight_device *bd)
@@ -122,13 +121,25 @@ static int locomolcd_set_intensity(struct backlight_device *bd)
intensity = 0;
switch (intensity) {
- /* AC and non-AC are handled differently, but produce same results in sharp code? */
- case 0: locomo_frontlight_set(locomolcd_dev, 0, 0, 161); break;
- case 1: locomo_frontlight_set(locomolcd_dev, 117, 0, 161); break;
- case 2: locomo_frontlight_set(locomolcd_dev, 163, 0, 148); break;
- case 3: locomo_frontlight_set(locomolcd_dev, 194, 0, 161); break;
- case 4: locomo_frontlight_set(locomolcd_dev, 194, 1, 161); break;
-
+ /*
+ * AC and non-AC are handled differently,
+ * but produce same results in sharp code?
+ */
+ case 0:
+ locomo_frontlight_set(locomolcd_dev, 0, 0, 161);
+ break;
+ case 1:
+ locomo_frontlight_set(locomolcd_dev, 117, 0, 161);
+ break;
+ case 2:
+ locomo_frontlight_set(locomolcd_dev, 163, 0, 148);
+ break;
+ case 3:
+ locomo_frontlight_set(locomolcd_dev, 194, 0, 161);
+ break;
+ case 4:
+ locomo_frontlight_set(locomolcd_dev, 194, 1, 161);
+ break;
default:
return -ENODEV;
}
@@ -175,9 +186,11 @@ static int locomolcd_probe(struct locomo_dev *ldev)
locomo_gpio_set_dir(ldev->dev.parent, LOCOMO_GPIO_FL_VR, 0);
- /* the poodle_lcd_power function is called for the first time
+ /*
+ * the poodle_lcd_power function is called for the first time
* from fs_initcall, which is before locomo is activated.
- * We need to recall poodle_lcd_power here*/
+ * We need to recall poodle_lcd_power here
+ */
if (machine_is_poodle())
locomolcd_power(1);
@@ -190,8 +203,8 @@ static int locomolcd_probe(struct locomo_dev *ldev)
&ldev->dev, NULL,
&locomobl_data, &props);
- if (IS_ERR (locomolcd_bl_device))
- return PTR_ERR (locomolcd_bl_device);
+ if (IS_ERR(locomolcd_bl_device))
+ return PTR_ERR(locomolcd_bl_device);
/* Set up frontlight so that screen is readable */
locomolcd_bl_device->props.brightness = 2;
@@ -226,7 +239,6 @@ static struct locomo_driver poodle_lcd_driver = {
.resume = locomolcd_resume,
};
-
static int __init locomolcd_init(void)
{
return locomo_driver_register(&poodle_lcd_driver);
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index fd985e0681e..6e4db0c874c 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -15,6 +15,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/platform_data/lp855x.h>
+#include <linux/pwm.h>
/* Registers */
#define BRIGHTNESS_CTRL 0x00
@@ -34,22 +35,19 @@ struct lp855x {
struct i2c_client *client;
struct backlight_device *bl;
struct device *dev;
- struct mutex xfer_lock;
struct lp855x_platform_data *pdata;
+ struct pwm_device *pwm;
};
static int lp855x_read_byte(struct lp855x *lp, u8 reg, u8 *data)
{
int ret;
- mutex_lock(&lp->xfer_lock);
ret = i2c_smbus_read_byte_data(lp->client, reg);
if (ret < 0) {
- mutex_unlock(&lp->xfer_lock);
dev_err(lp->dev, "failed to read 0x%.2x\n", reg);
return ret;
}
- mutex_unlock(&lp->xfer_lock);
*data = (u8)ret;
return 0;
@@ -57,13 +55,7 @@ static int lp855x_read_byte(struct lp855x *lp, u8 reg, u8 *data)
static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
{
- int ret;
-
- mutex_lock(&lp->xfer_lock);
- ret = i2c_smbus_write_byte_data(lp->client, reg, data);
- mutex_unlock(&lp->xfer_lock);
-
- return ret;
+ return i2c_smbus_write_byte_data(lp->client, reg, data);
}
static bool lp855x_is_valid_rom_area(struct lp855x *lp, u8 addr)
@@ -121,6 +113,28 @@ static int lp855x_init_registers(struct lp855x *lp)
return ret;
}
+static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
+{
+ unsigned int period = lp->pdata->period_ns;
+ unsigned int duty = br * period / max_br;
+ struct pwm_device *pwm;
+
+ /* request pwm device with the consumer name */
+ if (!lp->pwm) {
+ pwm = devm_pwm_get(lp->dev, lp->chipname);
+ if (IS_ERR(pwm))
+ return;
+
+ lp->pwm = pwm;
+ }
+
+ pwm_config(lp->pwm, duty, period);
+ if (duty)
+ pwm_enable(lp->pwm);
+ else
+ pwm_disable(lp->pwm);
+}
+
static int lp855x_bl_update_status(struct backlight_device *bl)
{
struct lp855x *lp = bl_get_data(bl);
@@ -130,12 +144,10 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
bl->props.brightness = 0;
if (mode == PWM_BASED) {
- struct lp855x_pwm_data *pd = &lp->pdata->pwm_data;
int br = bl->props.brightness;
int max_br = bl->props.max_brightness;
- if (pd->pwm_set_intensity)
- pd->pwm_set_intensity(br, max_br);
+ lp855x_pwm_ctrl(lp, br, max_br);
} else if (mode == REGISTER_BASED) {
u8 val = bl->props.brightness;
@@ -150,14 +162,7 @@ static int lp855x_bl_get_brightness(struct backlight_device *bl)
struct lp855x *lp = bl_get_data(bl);
enum lp855x_brightness_ctrl_mode mode = lp->pdata->mode;
- if (mode == PWM_BASED) {
- struct lp855x_pwm_data *pd = &lp->pdata->pwm_data;
- int max_br = bl->props.max_brightness;
-
- if (pd->pwm_get_intensity)
- bl->props.brightness = pd->pwm_get_intensity(max_br);
-
- } else if (mode == REGISTER_BASED) {
+ if (mode == REGISTER_BASED) {
u8 val = 0;
lp855x_read_byte(lp, BRIGHTNESS_CTRL, &val);
@@ -266,8 +271,6 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp->chip_id = id->driver_data;
i2c_set_clientdata(cl, lp);
- mutex_init(&lp->xfer_lock);
-
ret = lp855x_init_registers(lp);
if (ret) {
dev_err(lp->dev, "i2c communication err: %d", ret);
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index c6bec7aab87..2c9bce050aa 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -120,15 +120,13 @@ static int max8925_backlight_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (!res) {
dev_err(&pdev->dev, "No REG resource for mode control!\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_mode_cntl = res->start;
res = platform_get_resource(pdev, IORESOURCE_REG, 1);
if (!res) {
dev_err(&pdev->dev, "No REG resource for control!\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_cntl = res->start;
@@ -142,8 +140,7 @@ static int max8925_backlight_probe(struct platform_device *pdev)
&max8925_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out;
+ return PTR_ERR(bl);
}
bl->props.brightness = MAX_BRIGHTNESS;
@@ -166,8 +163,6 @@ static int max8925_backlight_probe(struct platform_device *pdev)
return 0;
out_brt:
backlight_device_unregister(bl);
-out:
- devm_kfree(&pdev->dev, data);
return ret;
}
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 9a046a4c98f..af31c269baa 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -42,12 +42,12 @@ struct omap_backlight {
struct omap_backlight_config *pdata;
};
-static void inline omapbl_send_intensity(int intensity)
+static inline void omapbl_send_intensity(int intensity)
{
omap_writeb(intensity, OMAP_PWL_ENABLE);
}
-static void inline omapbl_send_enable(int enable)
+static inline void omapbl_send_enable(int enable)
{
omap_writeb(enable, OMAP_PWL_CLK_ENABLE);
}
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index 4ec30748b44..633b0a22fd6 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -71,8 +71,7 @@ static int pandora_backlight_update_status(struct backlight_device *bl)
* set PWM duty cycle to max. TPS61161 seems to use this
* to calibrate it's PWM sensitivity when it starts.
*/
- twl_i2c_write_u8(TWL4030_MODULE_PWM0, MAX_VALUE,
- TWL_PWM0_OFF);
+ twl_i2c_write_u8(TWL_MODULE_PWM, MAX_VALUE, TWL_PWM0_OFF);
/* first enable clock, then PWM0 out */
twl_i2c_read_u8(TWL4030_MODULE_INTBR, &r, TWL_INTBR_GPBR1);
@@ -90,8 +89,7 @@ static int pandora_backlight_update_status(struct backlight_device *bl)
usleep_range(2000, 10000);
}
- twl_i2c_write_u8(TWL4030_MODULE_PWM0, MIN_VALUE + brightness,
- TWL_PWM0_OFF);
+ twl_i2c_write_u8(TWL_MODULE_PWM, MIN_VALUE + brightness, TWL_PWM0_OFF);
done:
if (brightness != 0)
@@ -132,7 +130,7 @@ static int pandora_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
/* 64 cycle period, ON position 0 */
- twl_i2c_write_u8(TWL4030_MODULE_PWM0, 0x80, TWL_PWM0_ON);
+ twl_i2c_write_u8(TWL_MODULE_PWM, 0x80, TWL_PWM0_ON);
bl->props.state |= PANDORABL_WAS_OFF;
bl->props.brightness = MAX_USER_VALUE;
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 0087396007e..e87c7a3394f 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -52,7 +52,7 @@ int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit)
pcf_bl->brightness_limit = limit & 0x3f;
backlight_update_status(pcf_bl->bl);
- return 0;
+ return 0;
}
static int pcf50633_bl_update_status(struct backlight_device *bl)
@@ -136,8 +136,10 @@ static int pcf50633_bl_probe(struct platform_device *pdev)
pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDDIM, pdata->ramp_time);
- /* Should be different from bl_props.brightness, so we do not exit
- * update_status early the first time it's called */
+ /*
+ * Should be different from bl_props.brightness, so we do not exit
+ * update_status early the first time it's called
+ */
pcf_bl->brightness = pcf_bl->bl->props.brightness + 1;
backlight_update_status(pcf_bl->bl);
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 894bfc5ce42..17a6b83f97a 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -27,7 +27,7 @@ struct platform_lcd {
struct plat_lcd_data *pdata;
unsigned int power;
- unsigned int suspended : 1;
+ unsigned int suspended:1;
};
static inline struct platform_lcd *to_our_lcd(struct lcd_device *lcd)
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 484e10dd1a8..3e1c1135f6d 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -757,7 +757,7 @@ static int s6e63m0_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->dev = &spi->dev;
- lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
+ lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
return -EFAULT;
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 146ffb9404d..ad2325f3d65 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -2,7 +2,7 @@
* tdo24m - SPI-based drivers for Toppoly TDO24M series LCD panels
*
* Copyright (C) 2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -47,7 +47,7 @@ struct tdo24m {
((x1) << 9) | 0x100 | (x2))
#define CMD_NULL (-1)
-static uint32_t lcd_panel_reset[] = {
+static const uint32_t lcd_panel_reset[] = {
CMD0(0x1), /* reset */
CMD0(0x0), /* nop */
CMD0(0x0), /* nop */
@@ -55,7 +55,7 @@ static uint32_t lcd_panel_reset[] = {
CMD_NULL,
};
-static uint32_t lcd_panel_on[] = {
+static const uint32_t lcd_panel_on[] = {
CMD0(0x29), /* Display ON */
CMD2(0xB8, 0xFF, 0xF9), /* Output Control */
CMD0(0x11), /* Sleep out */
@@ -63,7 +63,7 @@ static uint32_t lcd_panel_on[] = {
CMD_NULL,
};
-static uint32_t lcd_panel_off[] = {
+static const uint32_t lcd_panel_off[] = {
CMD0(0x28), /* Display OFF */
CMD2(0xB8, 0x80, 0x02), /* Output Control */
CMD0(0x10), /* Sleep in */
@@ -71,7 +71,7 @@ static uint32_t lcd_panel_off[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_pass_through_tdo24m[] = {
+static const uint32_t lcd_vga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
@@ -80,7 +80,7 @@ static uint32_t lcd_vga_pass_through_tdo24m[] = {
CMD_NULL,
};
-static uint32_t lcd_qvga_pass_through_tdo24m[] = {
+static const uint32_t lcd_qvga_pass_through_tdo24m[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
@@ -89,8 +89,8 @@ static uint32_t lcd_qvga_pass_through_tdo24m[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_transfer_tdo24m[] = {
- CMD1(0xcf, 0x02), /* Blanking period control (1) */
+static const uint32_t lcd_vga_transfer_tdo24m[] = {
+ CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x14, 0x00), /* CKV 1,2 timing control */
@@ -102,7 +102,7 @@ static uint32_t lcd_vga_transfer_tdo24m[] = {
CMD_NULL,
};
-static uint32_t lcd_qvga_transfer[] = {
+static const uint32_t lcd_qvga_transfer[] = {
CMD1(0xd6, 0x02), /* Blanking period control (1) */
CMD2(0xd7, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd8, 0x01), /* CKV timing control on/off */
@@ -115,7 +115,7 @@ static uint32_t lcd_qvga_transfer[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_pass_through_tdo35s[] = {
+static const uint32_t lcd_vga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x80),
CMD1(0xE1, 0x00),
@@ -123,7 +123,7 @@ static uint32_t lcd_vga_pass_through_tdo35s[] = {
CMD_NULL,
};
-static uint32_t lcd_qvga_pass_through_tdo35s[] = {
+static const uint32_t lcd_qvga_pass_through_tdo35s[] = {
CMD1(0xB0, 0x16),
CMD1(0xBC, 0x81),
CMD1(0xE1, 0x00),
@@ -131,8 +131,8 @@ static uint32_t lcd_qvga_pass_through_tdo35s[] = {
CMD_NULL,
};
-static uint32_t lcd_vga_transfer_tdo35s[] = {
- CMD1(0xcf, 0x02), /* Blanking period control (1) */
+static const uint32_t lcd_vga_transfer_tdo35s[] = {
+ CMD1(0xcf, 0x02), /* Blanking period control (1) */
CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */
CMD1(0xd1, 0x01), /* CKV timing control on/off */
CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */
@@ -144,7 +144,7 @@ static uint32_t lcd_vga_transfer_tdo35s[] = {
CMD_NULL,
};
-static uint32_t lcd_panel_config[] = {
+static const uint32_t lcd_panel_config[] = {
CMD2(0xb8, 0xff, 0xf9), /* Output control */
CMD0(0x11), /* sleep out */
CMD1(0xba, 0x01), /* Display mode (1) */
@@ -175,10 +175,11 @@ static uint32_t lcd_panel_config[] = {
CMD_NULL,
};
-static int tdo24m_writes(struct tdo24m *lcd, uint32_t *array)
+static int tdo24m_writes(struct tdo24m *lcd, const uint32_t *array)
{
struct spi_transfer *x = &lcd->xfer;
- uint32_t data, *p = array;
+ const uint32_t *p = array;
+ uint32_t data;
int nparams, err = 0;
for (; *p != CMD_NULL; p++) {
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index a0521abdcd8..588682cc161 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -92,14 +92,12 @@ static int tosa_bl_probe(struct i2c_client *client,
data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
- ret = devm_gpio_request(&client->dev, TOSA_GPIO_BL_C20MA, "backlight");
+ ret = devm_gpio_request_one(&client->dev, TOSA_GPIO_BL_C20MA,
+ GPIOF_OUT_INIT_LOW, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
return ret;
}
- ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
- if (ret)
- return ret;
i2c_set_clientdata(client, data);
data->i2c = client;
@@ -163,7 +161,6 @@ static const struct i2c_device_id tosa_bl_id[] = {
{ },
};
-
static struct i2c_driver tosa_bl_driver = {
.driver = {
.name = "tosa-bl",
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 86fff88c2e4..96bae941585 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -63,7 +63,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
int tosa_bl_enable(struct spi_device *spi, int enable)
{
/* bl_enable GP04=1 otherwise GP04=0*/
- return tosa_tg_send(spi, TG_GPODR2, enable? 0x01 : 0x00);
+ return tosa_tg_send(spi, TG_GPODR2, enable ? 0x01 : 0x00);
}
EXPORT_SYMBOL(tosa_bl_enable);
@@ -91,15 +91,17 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
tosa_tg_send(spi, TG_PNLCTL, value);
/* TG LCD pannel power up */
- tosa_tg_send(spi, TG_PINICTL,0x4);
+ tosa_tg_send(spi, TG_PINICTL, 0x4);
mdelay(50);
/* TG LCD GVSS */
- tosa_tg_send(spi, TG_PINICTL,0x0);
+ tosa_tg_send(spi, TG_PINICTL, 0x0);
if (!data->i2c) {
- /* after the pannel is powered up the first time, we can access the i2c bus */
- /* so probe for the DAC */
+ /*
+ * after the pannel is powered up the first time,
+ * we can access the i2c bus so probe for the DAC
+ */
struct i2c_adapter *adap = i2c_get_adapter(0);
struct i2c_board_info info = {
.type = "tosa-bl",
@@ -115,11 +117,11 @@ static void tosa_lcd_tg_off(struct tosa_lcd_data *data)
struct spi_device *spi = data->spi;
/* TG LCD VHSA off */
- tosa_tg_send(spi, TG_PINICTL,0x4);
+ tosa_tg_send(spi, TG_PINICTL, 0x4);
mdelay(50);
/* TG LCD signal off */
- tosa_tg_send(spi, TG_PINICTL,0x6);
+ tosa_tg_send(spi, TG_PINICTL, 0x6);
mdelay(50);
/* TG Off */
@@ -193,17 +195,13 @@ static int tosa_lcd_probe(struct spi_device *spi)
data->spi = spi;
dev_set_drvdata(&spi->dev, data);
- ret = devm_gpio_request(&spi->dev, TOSA_GPIO_TG_ON, "tg #pwr");
+ ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
+ GPIOF_OUT_INIT_LOW, "tg #pwr");
if (ret < 0)
goto err_gpio_tg;
mdelay(60);
- ret = gpio_direction_output(TOSA_GPIO_TG_ON, 0);
- if (ret < 0)
- goto err_gpio_tg;
-
- mdelay(60);
tosa_lcd_tg_init(data);
tosa_lcd_tg_on(data);
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 712b0acfd33..45e81b4cf8b 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -26,7 +26,7 @@
/* Device initialisation sequences */
-static struct ili9320_reg vgg_init1[] = {
+static const struct ili9320_reg vgg_init1[] = {
{
.address = ILI9320_POWER1,
.value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0),
@@ -43,7 +43,7 @@ static struct ili9320_reg vgg_init1[] = {
},
};
-static struct ili9320_reg vgg_init2[] = {
+static const struct ili9320_reg vgg_init2[] = {
{
.address = ILI9320_POWER1,
.value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE |
@@ -54,7 +54,7 @@ static struct ili9320_reg vgg_init2[] = {
}
};
-static struct ili9320_reg vgg_gamma[] = {
+static const struct ili9320_reg vgg_gamma[] = {
{
.address = ILI9320_GAMMA1,
.value = 0x0000,
@@ -89,7 +89,7 @@ static struct ili9320_reg vgg_gamma[] = {
};
-static struct ili9320_reg vgg_init0[] = {
+static const struct ili9320_reg vgg_init0[] = {
[0] = {
/* set direction and scan mode gate */
.address = ILI9320_DRIVER,
@@ -217,7 +217,7 @@ static int vgg2432a4_resume(struct spi_device *spi)
}
#else
#define vgg2432a4_suspend NULL
-#define vgg2432a4_resume NULL
+#define vgg2432a4_resume NULL
#endif
static struct ili9320_client vgg2432a4_client = {
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 6d159662904..b05afd03729 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -327,9 +327,16 @@ out_unmap:
static void newport_init(struct vc_data *vc, int init)
{
- vc->vc_cols = newport_xsize / 8;
- vc->vc_rows = newport_ysize / 16;
+ int cols, rows;
+
+ cols = newport_xsize / 8;
+ rows = newport_ysize / 16;
vc->vc_can_do_color = 1;
+ if (init) {
+ vc->vc_cols = cols;
+ vc->vc_rows = rows;
+ } else
+ vc_resize(vc, cols, rows);
}
static void newport_deinit(struct vc_data *c)
diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c
index 25f835bf3d7..46dd8f5d2e9 100644
--- a/drivers/video/console/softcursor.c
+++ b/drivers/video/console/softcursor.c
@@ -35,8 +35,7 @@ int soft_cursor(struct fb_info *info, struct fb_cursor *cursor)
dsize = s_pitch * cursor->image.height;
if (dsize + sizeof(struct fb_image) != ops->cursor_size) {
- if (ops->cursor_src != NULL)
- kfree(ops->cursor_src);
+ kfree(ops->cursor_src);
ops->cursor_size = dsize + sizeof(struct fb_image);
ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC);
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 80665f66ac1..46534e00fe0 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -213,62 +213,51 @@ static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = {
.accel = FB_ACCEL_NONE
};
-struct da8xx_panel {
- const char name[25]; /* Full name <vendor>_<model> */
- unsigned short width;
- unsigned short height;
- int hfp; /* Horizontal front porch */
- int hbp; /* Horizontal back porch */
- int hsw; /* Horizontal Sync Pulse Width */
- int vfp; /* Vertical front porch */
- int vbp; /* Vertical back porch */
- int vsw; /* Vertical Sync Pulse Width */
- unsigned int pxl_clk; /* Pixel clock */
- unsigned char invert_pxl_clk; /* Invert Pixel clock */
-};
-
-static struct da8xx_panel known_lcd_panels[] = {
+static struct fb_videomode known_lcd_panels[] = {
/* Sharp LCD035Q3DG01 */
[0] = {
- .name = "Sharp_LCD035Q3DG01",
- .width = 320,
- .height = 240,
- .hfp = 8,
- .hbp = 6,
- .hsw = 0,
- .vfp = 2,
- .vbp = 2,
- .vsw = 0,
- .pxl_clk = 4608000,
- .invert_pxl_clk = 1,
+ .name = "Sharp_LCD035Q3DG01",
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 4608000,
+ .left_margin = 6,
+ .right_margin = 8,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 0,
+ .vsync_len = 0,
+ .sync = FB_SYNC_CLK_INVERT |
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
/* Sharp LK043T1DG01 */
[1] = {
- .name = "Sharp_LK043T1DG01",
- .width = 480,
- .height = 272,
- .hfp = 2,
- .hbp = 2,
- .hsw = 41,
- .vfp = 2,
- .vbp = 2,
- .vsw = 10,
- .pxl_clk = 7833600,
- .invert_pxl_clk = 0,
+ .name = "Sharp_LK043T1DG01",
+ .xres = 480,
+ .yres = 272,
+ .pixclock = 7833600,
+ .left_margin = 2,
+ .right_margin = 2,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 41,
+ .vsync_len = 10,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = 0,
},
[2] = {
/* Hitachi SP10Q010 */
- .name = "SP10Q010",
- .width = 320,
- .height = 240,
- .hfp = 10,
- .hbp = 10,
- .hsw = 10,
- .vfp = 10,
- .vbp = 10,
- .vsw = 10,
- .pxl_clk = 7833600,
- .invert_pxl_clk = 0,
+ .name = "SP10Q010",
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 7833600,
+ .left_margin = 10,
+ .right_margin = 10,
+ .upper_margin = 10,
+ .lower_margin = 10,
+ .hsync_len = 10,
+ .vsync_len = 10,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = 0,
},
};
@@ -399,10 +388,9 @@ static int lcd_cfg_dma(int burst_size, int fifo_th)
reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_8);
break;
case 16:
+ default:
reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_16);
break;
- default:
- return -EINVAL;
}
reg |= (fifo_th << 8);
@@ -447,7 +435,8 @@ static void lcd_cfg_vertical_sync(int back_porch, int pulse_width,
lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
}
-static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
+static int lcd_cfg_display(const struct lcd_ctrl_config *cfg,
+ struct fb_videomode *panel)
{
u32 reg;
u32 reg_int;
@@ -456,7 +445,7 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
LCD_MONO_8BIT_MODE |
LCD_MONOCHROME_MODE);
- switch (cfg->p_disp_panel->panel_shade) {
+ switch (cfg->panel_shade) {
case MONOCHROME:
reg |= LCD_MONOCHROME_MODE;
if (cfg->mono_8bit_mode)
@@ -469,7 +458,9 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
break;
case COLOR_PASSIVE:
- if (cfg->stn_565_mode)
+ /* AC bias applicable only for Pasive panels */
+ lcd_cfg_ac_bias(cfg->ac_bias, cfg->ac_bias_intrpt);
+ if (cfg->bpp == 12 && cfg->stn_565_mode)
reg |= LCD_STN_565_ENABLE;
break;
@@ -490,22 +481,19 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
reg = lcdc_read(LCD_RASTER_TIMING_2_REG);
- if (cfg->sync_ctrl)
- reg |= LCD_SYNC_CTRL;
- else
- reg &= ~LCD_SYNC_CTRL;
+ reg |= LCD_SYNC_CTRL;
if (cfg->sync_edge)
reg |= LCD_SYNC_EDGE;
else
reg &= ~LCD_SYNC_EDGE;
- if (cfg->invert_line_clock)
+ if (panel->sync & FB_SYNC_HOR_HIGH_ACT)
reg |= LCD_INVERT_LINE_CLOCK;
else
reg &= ~LCD_INVERT_LINE_CLOCK;
- if (cfg->invert_frm_clock)
+ if (panel->sync & FB_SYNC_VERT_HIGH_ACT)
reg |= LCD_INVERT_FRAME_CLOCK;
else
reg &= ~LCD_INVERT_FRAME_CLOCK;
@@ -728,7 +716,7 @@ static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
}
static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
- struct da8xx_panel *panel)
+ struct fb_videomode *panel)
{
u32 bpp;
int ret = 0;
@@ -738,7 +726,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
/* Calculate the divider */
lcd_calc_clk_divider(par);
- if (panel->invert_pxl_clk)
+ if (panel->sync & FB_SYNC_CLK_INVERT)
lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
else
@@ -750,30 +738,23 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
if (ret < 0)
return ret;
- /* Configure the AC bias properties. */
- lcd_cfg_ac_bias(cfg->ac_bias, cfg->ac_bias_intrpt);
-
/* Configure the vertical and horizontal sync properties. */
- lcd_cfg_vertical_sync(panel->vbp, panel->vsw, panel->vfp);
- lcd_cfg_horizontal_sync(panel->hbp, panel->hsw, panel->hfp);
+ lcd_cfg_vertical_sync(panel->lower_margin, panel->vsync_len,
+ panel->upper_margin);
+ lcd_cfg_horizontal_sync(panel->right_margin, panel->hsync_len,
+ panel->left_margin);
/* Configure for disply */
- ret = lcd_cfg_display(cfg);
+ ret = lcd_cfg_display(cfg, panel);
if (ret < 0)
return ret;
- if (QVGA != cfg->p_disp_panel->panel_type)
- return -EINVAL;
+ bpp = cfg->bpp;
- if (cfg->bpp <= cfg->p_disp_panel->max_bpp &&
- cfg->bpp >= cfg->p_disp_panel->min_bpp)
- bpp = cfg->bpp;
- else
- bpp = cfg->p_disp_panel->max_bpp;
if (bpp == 12)
bpp = 16;
- ret = lcd_cfg_frame_buffer(par, (unsigned int)panel->width,
- (unsigned int)panel->height, bpp,
+ ret = lcd_cfg_frame_buffer(par, (unsigned int)panel->xres,
+ (unsigned int)panel->yres, bpp,
cfg->raster_order);
if (ret < 0)
return ret;
@@ -1235,7 +1216,7 @@ static int __devinit fb_probe(struct platform_device *device)
struct da8xx_lcdc_platform_data *fb_pdata =
device->dev.platform_data;
struct lcd_ctrl_config *lcd_cfg;
- struct da8xx_panel *lcdc_info;
+ struct fb_videomode *lcdc_info;
struct fb_info *da8xx_fb_info;
struct clk *fb_clk = NULL;
struct da8xx_fb_par *par;
@@ -1267,7 +1248,7 @@ static int __devinit fb_probe(struct platform_device *device)
goto err_request_mem;
}
- fb_clk = clk_get(&device->dev, NULL);
+ fb_clk = clk_get(&device->dev, "fck");
if (IS_ERR(fb_clk)) {
dev_err(&device->dev, "Can not get device clock\n");
ret = -ENODEV;
@@ -1283,6 +1264,7 @@ static int __devinit fb_probe(struct platform_device *device)
lcd_revision = LCD_VERSION_1;
break;
case 0x4F200800:
+ case 0x4F201000:
lcd_revision = LCD_VERSION_2;
break;
default:
@@ -1323,7 +1305,7 @@ static int __devinit fb_probe(struct platform_device *device)
#ifdef CONFIG_CPU_FREQ
par->lcd_fck_rate = clk_get_rate(fb_clk);
#endif
- par->pxl_clk = lcdc_info->pxl_clk;
+ par->pxl_clk = lcdc_info->pixclock;
if (fb_pdata->panel_power_ctrl) {
par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
par->panel_power_ctrl(1);
@@ -1336,8 +1318,8 @@ static int __devinit fb_probe(struct platform_device *device)
}
/* allocate frame buffer */
- par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp;
- ulcm = lcm((lcdc_info->width * lcd_cfg->bpp)/8, PAGE_SIZE);
+ par->vram_size = lcdc_info->xres * lcdc_info->yres * lcd_cfg->bpp;
+ ulcm = lcm((lcdc_info->xres * lcd_cfg->bpp)/8, PAGE_SIZE);
par->vram_size = roundup(par->vram_size/8, ulcm);
par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
@@ -1355,10 +1337,10 @@ static int __devinit fb_probe(struct platform_device *device)
da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt;
da8xx_fb_fix.smem_start = par->vram_phys;
da8xx_fb_fix.smem_len = par->vram_size;
- da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
+ da8xx_fb_fix.line_length = (lcdc_info->xres * lcd_cfg->bpp) / 8;
par->dma_start = par->vram_phys;
- par->dma_end = par->dma_start + lcdc_info->height *
+ par->dma_end = par->dma_start + lcdc_info->yres *
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
@@ -1384,22 +1366,22 @@ static int __devinit fb_probe(struct platform_device *device)
/* Initialize par */
da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
- da8xx_fb_var.xres = lcdc_info->width;
- da8xx_fb_var.xres_virtual = lcdc_info->width;
+ da8xx_fb_var.xres = lcdc_info->xres;
+ da8xx_fb_var.xres_virtual = lcdc_info->xres;
- da8xx_fb_var.yres = lcdc_info->height;
- da8xx_fb_var.yres_virtual = lcdc_info->height * LCD_NUM_BUFFERS;
+ da8xx_fb_var.yres = lcdc_info->yres;
+ da8xx_fb_var.yres_virtual = lcdc_info->yres * LCD_NUM_BUFFERS;
da8xx_fb_var.grayscale =
- lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0;
+ lcd_cfg->panel_shade == MONOCHROME ? 1 : 0;
da8xx_fb_var.bits_per_pixel = lcd_cfg->bpp;
- da8xx_fb_var.hsync_len = lcdc_info->hsw;
- da8xx_fb_var.vsync_len = lcdc_info->vsw;
- da8xx_fb_var.right_margin = lcdc_info->hfp;
- da8xx_fb_var.left_margin = lcdc_info->hbp;
- da8xx_fb_var.lower_margin = lcdc_info->vfp;
- da8xx_fb_var.upper_margin = lcdc_info->vbp;
+ da8xx_fb_var.hsync_len = lcdc_info->hsync_len;
+ da8xx_fb_var.vsync_len = lcdc_info->vsync_len;
+ da8xx_fb_var.right_margin = lcdc_info->right_margin;
+ da8xx_fb_var.left_margin = lcdc_info->left_margin;
+ da8xx_fb_var.lower_margin = lcdc_info->lower_margin;
+ da8xx_fb_var.upper_margin = lcdc_info->upper_margin;
da8xx_fb_var.pixclock = da8xxfb_pixel_clk_period(par);
/* Initialize fbinfo */
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index d55470e7541..28fd686c6b8 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <video/exynos_dp.h>
@@ -48,10 +49,6 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
{
int timeout_loop = 0;
- exynos_dp_init_hpd(dp);
-
- usleep_range(200, 210);
-
while (exynos_dp_get_plug_in_status(dp) != 0) {
timeout_loop++;
if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
@@ -90,9 +87,11 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
*/
/* Read Extension Flag, Number of 128-byte EDID extension blocks */
- exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
+ retval = exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
EDID_EXTENSION_FLAG,
&extend_block);
+ if (retval)
+ return retval;
if (extend_block > 0) {
dev_dbg(dp->dev, "EDID data includes a single extension!\n");
@@ -181,14 +180,15 @@ static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
int retval;
/* Read DPCD DPCD_ADDR_DPCD_REV~RECEIVE_PORT1_CAP_1 */
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_DPCD_REV,
- 12, buf);
+ retval = exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_DPCD_REV,
+ 12, buf);
+ if (retval)
+ return retval;
/* Read EDID */
for (i = 0; i < 3; i++) {
retval = exynos_dp_read_edid(dp);
- if (retval == 0)
+ if (!retval)
break;
}
@@ -261,11 +261,10 @@ static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
}
}
-static void exynos_dp_link_start(struct exynos_dp_device *dp)
+static int exynos_dp_link_start(struct exynos_dp_device *dp)
{
u8 buf[4];
- int lane;
- int lane_count;
+ int lane, lane_count, pll_tries, retval;
lane_count = dp->link_train.lane_count;
@@ -275,10 +274,6 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp)
for (lane = 0; lane < lane_count; lane++)
dp->link_train.cr_loop[lane] = 0;
- /* Set sink to D0 (Sink Not Ready) mode. */
- exynos_dp_write_byte_to_dpcd(dp, DPCD_ADDR_SINK_POWER_STATE,
- DPCD_SET_POWER_STATE_D0);
-
/* Set link rate and count as you want to establish*/
exynos_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
exynos_dp_set_lane_count(dp, dp->link_train.lane_count);
@@ -286,29 +281,46 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp)
/* Setup RX configuration */
buf[0] = dp->link_train.link_rate;
buf[1] = dp->link_train.lane_count;
- exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_LINK_BW_SET,
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_LINK_BW_SET,
2, buf);
+ if (retval)
+ return retval;
/* Set TX pre-emphasis to minimum */
for (lane = 0; lane < lane_count; lane++)
exynos_dp_set_lane_lane_pre_emphasis(dp,
PRE_EMPHASIS_LEVEL_0, lane);
+ /* Wait for PLL lock */
+ pll_tries = 0;
+ while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
+ dev_err(dp->dev, "Wait for PLL lock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ pll_tries++;
+ usleep_range(90, 120);
+ }
+
/* Set training pattern 1 */
exynos_dp_set_training_pattern(dp, TRAINING_PTN1);
/* Set RX training pattern */
- exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- DPCD_SCRAMBLING_DISABLED |
- DPCD_TRAINING_PATTERN_1);
+ retval = exynos_dp_write_byte_to_dpcd(dp,
+ DPCD_ADDR_TRAINING_PATTERN_SET,
+ DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_1);
+ if (retval)
+ return retval;
for (lane = 0; lane < lane_count; lane++)
buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 |
DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0;
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count, buf);
+
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
+ lane_count, buf);
+
+ return retval;
}
static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane)
@@ -332,18 +344,17 @@ static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
return 0;
}
-static int exynos_dp_channel_eq_ok(u8 link_align[3], int lane_count)
+static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
+ int lane_count)
{
int lane;
- u8 lane_align;
u8 lane_status;
- lane_align = link_align[2];
- if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
+ if ((link_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
return -EINVAL;
for (lane = 0; lane < lane_count; lane++) {
- lane_status = exynos_dp_get_lane_status(link_align, lane);
+ lane_status = exynos_dp_get_lane_status(link_status, lane);
lane_status &= DPCD_CHANNEL_EQ_BITS;
if (lane_status != DPCD_CHANNEL_EQ_BITS)
return -EINVAL;
@@ -427,60 +438,60 @@ static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp)
dp->link_train.lt_state = FAILED;
}
-static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
+static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
+ u8 adjust_request[2])
{
- u8 link_status[2];
- int lane;
- int lane_count;
+ int lane, lane_count;
+ u8 voltage_swing, pre_emphasis, training_lane;
- u8 adjust_request[2];
- u8 voltage_swing;
- u8 pre_emphasis;
- u8 training_lane;
+ lane_count = dp->link_train.lane_count;
+ for (lane = 0; lane < lane_count; lane++) {
+ voltage_swing = exynos_dp_get_adjust_request_voltage(
+ adjust_request, lane);
+ pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
+ adjust_request, lane);
+ training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
+ DPCD_PRE_EMPHASIS_SET(pre_emphasis);
+
+ if (voltage_swing == VOLTAGE_LEVEL_3)
+ training_lane |= DPCD_MAX_SWING_REACHED;
+ if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
+ training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
+
+ dp->link_train.training_lane[lane] = training_lane;
+ }
+}
+
+static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
+{
+ int lane, lane_count, retval;
+ u8 voltage_swing, pre_emphasis, training_lane;
+ u8 link_status[2], adjust_request[2];
usleep_range(100, 101);
lane_count = dp->link_train.lane_count;
- exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
- 2, link_status);
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
+
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
/* set training pattern 2 for EQ */
exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
- for (lane = 0; lane < lane_count; lane++) {
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
- 2, adjust_request);
- voltage_swing = exynos_dp_get_adjust_request_voltage(
- adjust_request, lane);
- pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
- adjust_request, lane);
- training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
- DPCD_PRE_EMPHASIS_SET(pre_emphasis);
-
- if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DPCD_MAX_SWING_REACHED;
- if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
-
- dp->link_train.training_lane[lane] = training_lane;
-
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane],
- lane);
- }
-
- exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
- DPCD_SCRAMBLING_DISABLED |
- DPCD_TRAINING_PATTERN_2);
-
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count,
- dp->link_train.training_lane);
+ retval = exynos_dp_write_byte_to_dpcd(dp,
+ DPCD_ADDR_TRAINING_PATTERN_SET,
+ DPCD_SCRAMBLING_DISABLED |
+ DPCD_TRAINING_PATTERN_2);
+ if (retval)
+ return retval;
dev_info(dp->dev, "Link Training Clock Recovery success\n");
dp->link_train.lt_state = EQUALIZER_TRAINING;
@@ -488,152 +499,116 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
for (lane = 0; lane < lane_count; lane++) {
training_lane = exynos_dp_get_lane_link_training(
dp, lane);
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
- 2, adjust_request);
voltage_swing = exynos_dp_get_adjust_request_voltage(
adjust_request, lane);
pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
adjust_request, lane);
- if (voltage_swing == VOLTAGE_LEVEL_3 ||
- pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
- dev_err(dp->dev, "voltage or pre emphasis reached max level\n");
- goto reduce_link_rate;
- }
-
- if ((DPCD_VOLTAGE_SWING_GET(training_lane) ==
- voltage_swing) &&
- (DPCD_PRE_EMPHASIS_GET(training_lane) ==
- pre_emphasis)) {
+ if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
+ voltage_swing &&
+ DPCD_PRE_EMPHASIS_GET(training_lane) ==
+ pre_emphasis)
dp->link_train.cr_loop[lane]++;
- if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP) {
- dev_err(dp->dev, "CR Max loop\n");
- goto reduce_link_rate;
- }
- }
-
- training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
- DPCD_PRE_EMPHASIS_SET(pre_emphasis);
- if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DPCD_MAX_SWING_REACHED;
- if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
+ if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
+ voltage_swing == VOLTAGE_LEVEL_3 ||
+ pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
+ dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
+ dp->link_train.cr_loop[lane],
+ voltage_swing, pre_emphasis);
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+ }
+ }
- dp->link_train.training_lane[lane] = training_lane;
+ exynos_dp_get_adjust_training_lane(dp, adjust_request);
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane], lane);
- }
+ for (lane = 0; lane < lane_count; lane++)
+ exynos_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count,
+ retval = exynos_dp_write_bytes_to_dpcd(dp,
+ DPCD_ADDR_TRAINING_LANE0_SET, lane_count,
dp->link_train.training_lane);
- }
-
- return 0;
+ if (retval)
+ return retval;
-reduce_link_rate:
- exynos_dp_reduce_link_rate(dp);
- return -EIO;
+ return retval;
}
static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
{
- u8 link_status[2];
- u8 link_align[3];
- int lane;
- int lane_count;
+ int lane, lane_count, retval;
u32 reg;
-
- u8 adjust_request[2];
- u8 voltage_swing;
- u8 pre_emphasis;
- u8 training_lane;
+ u8 link_align, link_status[2], adjust_request[2];
usleep_range(400, 401);
lane_count = dp->link_train.lane_count;
- exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
- 2, link_status);
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
- if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
- link_align[0] = link_status[0];
- link_align[1] = link_status[1];
+ if (exynos_dp_clock_recovery_ok(link_status, lane_count)) {
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
- exynos_dp_read_byte_from_dpcd(dp,
- DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED,
- &link_align[2]);
+ retval = exynos_dp_read_bytes_from_dpcd(dp,
+ DPCD_ADDR_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
- for (lane = 0; lane < lane_count; lane++) {
- exynos_dp_read_bytes_from_dpcd(dp,
- DPCD_ADDR_ADJUST_REQUEST_LANE0_1,
- 2, adjust_request);
- voltage_swing = exynos_dp_get_adjust_request_voltage(
- adjust_request, lane);
- pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
- adjust_request, lane);
- training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
- DPCD_PRE_EMPHASIS_SET(pre_emphasis);
+ retval = exynos_dp_read_byte_from_dpcd(dp,
+ DPCD_ADDR_LANE_ALIGN_STATUS_UPDATED, &link_align);
+ if (retval)
+ return retval;
- if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DPCD_MAX_SWING_REACHED;
- if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DPCD_MAX_PRE_EMPHASIS_REACHED;
+ exynos_dp_get_adjust_training_lane(dp, adjust_request);
- dp->link_train.training_lane[lane] = training_lane;
- }
+ if (!exynos_dp_channel_eq_ok(link_status, link_align, lane_count)) {
+ /* traing pattern Set to Normal */
+ exynos_dp_training_pattern_dis(dp);
- if (exynos_dp_channel_eq_ok(link_align, lane_count) == 0) {
- /* traing pattern Set to Normal */
- exynos_dp_training_pattern_dis(dp);
+ dev_info(dp->dev, "Link Training success!\n");
- dev_info(dp->dev, "Link Training success!\n");
-
- exynos_dp_get_link_bandwidth(dp, &reg);
- dp->link_train.link_rate = reg;
- dev_dbg(dp->dev, "final bandwidth = %.2x\n",
- dp->link_train.link_rate);
+ exynos_dp_get_link_bandwidth(dp, &reg);
+ dp->link_train.link_rate = reg;
+ dev_dbg(dp->dev, "final bandwidth = %.2x\n",
+ dp->link_train.link_rate);
- exynos_dp_get_lane_count(dp, &reg);
- dp->link_train.lane_count = reg;
- dev_dbg(dp->dev, "final lane count = %.2x\n",
- dp->link_train.lane_count);
+ exynos_dp_get_lane_count(dp, &reg);
+ dp->link_train.lane_count = reg;
+ dev_dbg(dp->dev, "final lane count = %.2x\n",
+ dp->link_train.lane_count);
- /* set enhanced mode if available */
- exynos_dp_set_enhanced_mode(dp);
- dp->link_train.lt_state = FINISHED;
- } else {
- /* not all locked */
- dp->link_train.eq_loop++;
+ /* set enhanced mode if available */
+ exynos_dp_set_enhanced_mode(dp);
+ dp->link_train.lt_state = FINISHED;
- if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
- dev_err(dp->dev, "EQ Max loop\n");
- goto reduce_link_rate;
- }
+ return 0;
+ }
- for (lane = 0; lane < lane_count; lane++)
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane],
- lane);
+ /* not all locked */
+ dp->link_train.eq_loop++;
- exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
- lane_count,
- dp->link_train.training_lane);
- }
- } else {
- goto reduce_link_rate;
+ if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
+ dev_err(dp->dev, "EQ Max loop\n");
+ exynos_dp_reduce_link_rate(dp);
+ return -EIO;
}
- return 0;
+ for (lane = 0; lane < lane_count; lane++)
+ exynos_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
+
+ retval = exynos_dp_write_bytes_to_dpcd(dp, DPCD_ADDR_TRAINING_LANE0_SET,
+ lane_count, dp->link_train.training_lane);
-reduce_link_rate:
- exynos_dp_reduce_link_rate(dp);
- return -EIO;
+ return retval;
}
static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
@@ -701,16 +676,17 @@ static void exynos_dp_init_training(struct exynos_dp_device *dp,
static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
{
- int retval = 0;
- int training_finished = 0;
+ int retval = 0, training_finished = 0;
dp->link_train.lt_state = START;
/* Process here */
- while (!training_finished) {
+ while (!retval && !training_finished) {
switch (dp->link_train.lt_state) {
case START:
- exynos_dp_link_start(dp);
+ retval = exynos_dp_link_start(dp);
+ if (retval)
+ dev_err(dp->dev, "LT link start failed!\n");
break;
case CLOCK_RECOVERY:
retval = exynos_dp_process_clock_recovery(dp);
@@ -729,6 +705,8 @@ static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
return -EREMOTEIO;
}
}
+ if (retval)
+ dev_err(dp->dev, "eDP link training failed (%d)\n", retval);
return retval;
}
@@ -752,19 +730,15 @@ static int exynos_dp_set_link_train(struct exynos_dp_device *dp,
return retval;
}
-static int exynos_dp_config_video(struct exynos_dp_device *dp,
- struct video_info *video_info)
+static int exynos_dp_config_video(struct exynos_dp_device *dp)
{
int retval = 0;
int timeout_loop = 0;
int done_count = 0;
- exynos_dp_config_video_slave_mode(dp, video_info);
+ exynos_dp_config_video_slave_mode(dp);
- exynos_dp_set_video_color_format(dp, video_info->color_depth,
- video_info->color_space,
- video_info->dynamic_range,
- video_info->ycbcr_coeff);
+ exynos_dp_set_video_color_format(dp);
if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
dev_err(dp->dev, "PLL is not locked yet.\n");
@@ -852,10 +826,213 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
{
struct exynos_dp_device *dp = arg;
- dev_err(dp->dev, "exynos_dp_irq_handler\n");
+ enum dp_irq_type irq_type;
+
+ irq_type = exynos_dp_get_irq_type(dp);
+ switch (irq_type) {
+ case DP_IRQ_TYPE_HP_CABLE_IN:
+ dev_dbg(dp->dev, "Received irq - cable in\n");
+ schedule_work(&dp->hotplug_work);
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ case DP_IRQ_TYPE_HP_CABLE_OUT:
+ dev_dbg(dp->dev, "Received irq - cable out\n");
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ case DP_IRQ_TYPE_HP_CHANGE:
+ /*
+ * We get these change notifications once in a while, but there
+ * is nothing we can do with them. Just ignore it for now and
+ * only handle cable changes.
+ */
+ dev_dbg(dp->dev, "Received irq - hotplug change; ignoring.\n");
+ exynos_dp_clear_hotplug_interrupts(dp);
+ break;
+ default:
+ dev_err(dp->dev, "Received irq - unknown type!\n");
+ break;
+ }
return IRQ_HANDLED;
}
+static void exynos_dp_hotplug(struct work_struct *work)
+{
+ struct exynos_dp_device *dp;
+ int ret;
+
+ dp = container_of(work, struct exynos_dp_device, hotplug_work);
+
+ ret = exynos_dp_detect_hpd(dp);
+ if (ret) {
+ /* Cable has been disconnected, we're done */
+ return;
+ }
+
+ ret = exynos_dp_handle_edid(dp);
+ if (ret) {
+ dev_err(dp->dev, "unable to handle edid\n");
+ return;
+ }
+
+ ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count,
+ dp->video_info->link_rate);
+ if (ret) {
+ dev_err(dp->dev, "unable to do link train\n");
+ return;
+ }
+
+ exynos_dp_enable_scramble(dp, 1);
+ exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
+ exynos_dp_enable_enhanced_mode(dp, 1);
+
+ exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
+ exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
+
+ exynos_dp_init_video(dp);
+ ret = exynos_dp_config_video(dp);
+ if (ret)
+ dev_err(dp->dev, "unable to config video\n");
+}
+
+#ifdef CONFIG_OF
+static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
+{
+ struct device_node *dp_node = dev->of_node;
+ struct exynos_dp_platdata *pd;
+ struct video_info *dp_video_config;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "memory allocation for pdata failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ dp_video_config = devm_kzalloc(dev,
+ sizeof(*dp_video_config), GFP_KERNEL);
+
+ if (!dp_video_config) {
+ dev_err(dev, "memory allocation for video config failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ pd->video_info = dp_video_config;
+
+ dp_video_config->h_sync_polarity =
+ of_property_read_bool(dp_node, "hsync-active-high");
+
+ dp_video_config->v_sync_polarity =
+ of_property_read_bool(dp_node, "vsync-active-high");
+
+ dp_video_config->interlaced =
+ of_property_read_bool(dp_node, "interlaced");
+
+ if (of_property_read_u32(dp_node, "samsung,color-space",
+ &dp_video_config->color_space)) {
+ dev_err(dev, "failed to get color-space\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,dynamic-range",
+ &dp_video_config->dynamic_range)) {
+ dev_err(dev, "failed to get dynamic-range\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
+ &dp_video_config->ycbcr_coeff)) {
+ dev_err(dev, "failed to get ycbcr-coeff\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,color-depth",
+ &dp_video_config->color_depth)) {
+ dev_err(dev, "failed to get color-depth\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,link-rate",
+ &dp_video_config->link_rate)) {
+ dev_err(dev, "failed to get link-rate\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dp_node, "samsung,lane-count",
+ &dp_video_config->lane_count)) {
+ dev_err(dev, "failed to get lane-count\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return pd;
+}
+
+static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
+{
+ struct device_node *dp_phy_node;
+ u32 phy_base;
+
+ dp_phy_node = of_find_node_by_name(dp->dev->of_node, "dptx-phy");
+ if (!dp_phy_node) {
+ dev_err(dp->dev, "could not find dptx-phy node\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
+ dev_err(dp->dev, "faild to get reg for dptx-phy\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
+ &dp->enable_mask)) {
+ dev_err(dp->dev, "faild to get enable-mask for dptx-phy\n");
+ return -EINVAL;
+ }
+
+ dp->phy_addr = ioremap(phy_base, SZ_4);
+ if (!dp->phy_addr) {
+ dev_err(dp->dev, "failed to ioremap dp-phy\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg |= dp->enable_mask;
+ __raw_writel(reg, dp->phy_addr);
+}
+
+static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = __raw_readl(dp->phy_addr);
+ reg &= ~(dp->enable_mask);
+ __raw_writel(reg, dp->phy_addr);
+}
+#else
+static struct exynos_dp_platdata *exynos_dp_dt_parse_pdata(struct device *dev)
+{
+ return NULL;
+}
+
+static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
+{
+ return -EINVAL;
+}
+
+static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+{
+ return;
+}
+
+static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+{
+ return;
+}
+#endif /* CONFIG_OF */
+
static int __devinit exynos_dp_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -864,12 +1041,6 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
int ret = 0;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data\n");
- return -EINVAL;
- }
-
dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
GFP_KERNEL);
if (!dp) {
@@ -879,6 +1050,22 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
dp->dev = &pdev->dev;
+ if (pdev->dev.of_node) {
+ pdata = exynos_dp_dt_parse_pdata(&pdev->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ ret = exynos_dp_dt_parse_phydata(dp);
+ if (ret)
+ return ret;
+ } else {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ }
+
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
dev_err(&pdev->dev, "failed to get clock\n");
@@ -896,50 +1083,29 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
}
dp->irq = platform_get_irq(pdev, 0);
- if (!dp->irq) {
+ if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
- "exynos-dp", dp);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
+ INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
dp->video_info = pdata->video_info;
- if (pdata->phy_init)
- pdata->phy_init();
-
- exynos_dp_init_dp(dp);
-
- ret = exynos_dp_detect_hpd(dp);
- if (ret) {
- dev_err(&pdev->dev, "unable to detect hpd\n");
- return ret;
- }
- exynos_dp_handle_edid(dp);
-
- ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count,
- dp->video_info->link_rate);
- if (ret) {
- dev_err(&pdev->dev, "unable to do link train\n");
- return ret;
+ if (pdev->dev.of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_init(dp);
+ } else {
+ if (pdata->phy_init)
+ pdata->phy_init();
}
- exynos_dp_enable_scramble(dp, 1);
- exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
- exynos_dp_enable_enhanced_mode(dp, 1);
-
- exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
- exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
+ exynos_dp_init_dp(dp);
- exynos_dp_init_video(dp);
- ret = exynos_dp_config_video(dp, dp->video_info);
+ ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
+ "exynos-dp", dp);
if (ret) {
- dev_err(&pdev->dev, "unable to config video\n");
+ dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
@@ -953,23 +1119,41 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit();
+ disable_irq(dp->irq);
+
+ if (work_pending(&dp->hotplug_work))
+ flush_work(&dp->hotplug_work);
+
+ if (pdev->dev.of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_exit(dp);
+ } else {
+ if (pdata->phy_exit)
+ pdata->phy_exit();
+ }
clk_disable_unprepare(dp->clock);
+
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int exynos_dp_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
- struct exynos_dp_device *dp = platform_get_drvdata(pdev);
+ struct exynos_dp_platdata *pdata = dev->platform_data;
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- if (pdata && pdata->phy_exit)
- pdata->phy_exit();
+ if (work_pending(&dp->hotplug_work))
+ flush_work(&dp->hotplug_work);
+
+ if (dev->of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_exit(dp);
+ } else {
+ if (pdata->phy_exit)
+ pdata->phy_exit();
+ }
clk_disable_unprepare(dp->clock);
@@ -978,32 +1162,22 @@ static int exynos_dp_suspend(struct device *dev)
static int exynos_dp_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
- struct exynos_dp_device *dp = platform_get_drvdata(pdev);
+ struct exynos_dp_platdata *pdata = dev->platform_data;
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
- if (pdata && pdata->phy_init)
- pdata->phy_init();
+ if (dev->of_node) {
+ if (dp->phy_addr)
+ exynos_dp_phy_init(dp);
+ } else {
+ if (pdata->phy_init)
+ pdata->phy_init();
+ }
clk_prepare_enable(dp->clock);
exynos_dp_init_dp(dp);
- exynos_dp_detect_hpd(dp);
- exynos_dp_handle_edid(dp);
-
- exynos_dp_set_link_train(dp, dp->video_info->lane_count,
- dp->video_info->link_rate);
-
- exynos_dp_enable_scramble(dp, 1);
- exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
- exynos_dp_enable_enhanced_mode(dp, 1);
-
- exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
- exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
-
- exynos_dp_init_video(dp);
- exynos_dp_config_video(dp, dp->video_info);
+ enable_irq(dp->irq);
return 0;
}
@@ -1013,6 +1187,12 @@ static const struct dev_pm_ops exynos_dp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
};
+static const struct of_device_id exynos_dp_match[] = {
+ { .compatible = "samsung,exynos5-dp" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
+
static struct platform_driver exynos_dp_driver = {
.probe = exynos_dp_probe,
.remove = __devexit_p(exynos_dp_remove),
@@ -1020,6 +1200,7 @@ static struct platform_driver exynos_dp_driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
.pm = &exynos_dp_pm_ops,
+ .of_match_table = of_match_ptr(exynos_dp_match),
},
};
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 57b8a6531c0..6c567bbf2fb 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -13,6 +13,13 @@
#ifndef _EXYNOS_DP_CORE_H
#define _EXYNOS_DP_CORE_H
+enum dp_irq_type {
+ DP_IRQ_TYPE_HP_CABLE_IN,
+ DP_IRQ_TYPE_HP_CABLE_OUT,
+ DP_IRQ_TYPE_HP_CHANGE,
+ DP_IRQ_TYPE_UNKNOWN,
+};
+
struct link_train {
int eq_loop;
int cr_loop[4];
@@ -29,9 +36,12 @@ struct exynos_dp_device {
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
+ void __iomem *phy_addr;
+ unsigned int enable_mask;
struct video_info *video_info;
struct link_train link_train;
+ struct work_struct hotplug_work;
};
/* exynos_dp_reg.c */
@@ -50,6 +60,8 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
bool enable);
void exynos_dp_init_analog_func(struct exynos_dp_device *dp);
void exynos_dp_init_hpd(struct exynos_dp_device *dp);
+enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp);
+void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp);
void exynos_dp_reset_aux(struct exynos_dp_device *dp);
void exynos_dp_init_aux(struct exynos_dp_device *dp);
int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp);
@@ -107,11 +119,7 @@ u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp);
void exynos_dp_reset_macro(struct exynos_dp_device *dp);
void exynos_dp_init_video(struct exynos_dp_device *dp);
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp,
- u32 color_depth,
- u32 color_space,
- u32 dynamic_range,
- u32 ycbcr_coeff);
+void exynos_dp_set_video_color_format(struct exynos_dp_device *dp);
int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp);
void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
enum clock_recovery_m_value_type type,
@@ -121,8 +129,7 @@ void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type);
void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable);
void exynos_dp_start_video(struct exynos_dp_device *dp);
int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp);
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp,
- struct video_info *video_info);
+void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp);
void exynos_dp_enable_scrambling(struct exynos_dp_device *dp);
void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 3f5ca8a0d5e..29d9d035c73 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -19,11 +19,11 @@
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
-#define COMMON_INT_MASK_1 (0)
-#define COMMON_INT_MASK_2 (0)
-#define COMMON_INT_MASK_3 (0)
-#define COMMON_INT_MASK_4 (0)
-#define INT_STA_MASK (0)
+#define COMMON_INT_MASK_1 0
+#define COMMON_INT_MASK_2 0
+#define COMMON_INT_MASK_3 0
+#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG)
+#define INT_STA_MASK INT_HPD
void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable)
{
@@ -88,7 +88,7 @@ void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
{
/* Set interrupt pin assertion polarity as high */
- writel(INT_POL, dp->reg_base + EXYNOS_DP_INT_CTL);
+ writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL);
/* Clear pending regisers */
writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
@@ -324,7 +324,7 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
}
-void exynos_dp_init_hpd(struct exynos_dp_device *dp)
+void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
{
u32 reg;
@@ -333,12 +333,38 @@ void exynos_dp_init_hpd(struct exynos_dp_device *dp)
reg = INT_HPD;
writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
+}
+
+void exynos_dp_init_hpd(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ exynos_dp_clear_hotplug_interrupts(dp);
reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
reg &= ~(F_HPD | HPD_CTRL);
writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
}
+enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ /* Parse hotplug interrupt status register */
+ reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
+
+ if (reg & PLUG)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
+
+ if (reg & HPD_LOST)
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
+
+ if (reg & HOTPLUG_CHG)
+ return DP_IRQ_TYPE_HP_CHANGE;
+
+ return DP_IRQ_TYPE_UNKNOWN;
+}
+
void exynos_dp_reset_aux(struct exynos_dp_device *dp)
{
u32 reg;
@@ -491,7 +517,7 @@ int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
int i;
int retval;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
@@ -552,7 +578,7 @@ int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
else
cur_data_count = count - start_offset;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
@@ -617,7 +643,7 @@ int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
cur_data_count = count - start_offset;
/* AUX CH Request Transaction process */
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Select DPCD device address */
reg = AUX_ADDR_7_0(reg_addr + start_offset);
writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
@@ -700,17 +726,15 @@ int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
int i;
int retval;
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
/* Select EDID device */
retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr);
- if (retval != 0) {
- dev_err(dp->dev, "Select EDID device fail!\n");
+ if (retval != 0)
continue;
- }
/*
* Set I2C transaction and read data
@@ -750,7 +774,7 @@ int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
int retval = 0;
for (i = 0; i < count; i += 16) {
- for (j = 0; j < 100; j++) {
+ for (j = 0; j < 3; j++) {
/* Clear AUX CH data buffer */
reg = BUF_CLR;
writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
@@ -1034,24 +1058,20 @@ void exynos_dp_init_video(struct exynos_dp_device *dp)
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8);
}
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp,
- u32 color_depth,
- u32 color_space,
- u32 dynamic_range,
- u32 ycbcr_coeff)
+void exynos_dp_set_video_color_format(struct exynos_dp_device *dp)
{
u32 reg;
/* Configure the input color depth, color space, dynamic range */
- reg = (dynamic_range << IN_D_RANGE_SHIFT) |
- (color_depth << IN_BPC_SHIFT) |
- (color_space << IN_COLOR_F_SHIFT);
+ reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) |
+ (dp->video_info->color_depth << IN_BPC_SHIFT) |
+ (dp->video_info->color_space << IN_COLOR_F_SHIFT);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2);
/* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
reg &= ~IN_YC_COEFFI_MASK;
- if (ycbcr_coeff)
+ if (dp->video_info->ycbcr_coeff)
reg |= IN_YC_COEFFI_ITU709;
else
reg |= IN_YC_COEFFI_ITU601;
@@ -1178,8 +1198,7 @@ int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp)
return 0;
}
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp,
- struct video_info *video_info)
+void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp)
{
u32 reg;
@@ -1190,17 +1209,17 @@ void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp,
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~INTERACE_SCAN_CFG;
- reg |= (video_info->interlaced << 2);
+ reg |= (dp->video_info->interlaced << 2);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~VSYNC_POLARITY_CFG;
- reg |= (video_info->v_sync_polarity << 1);
+ reg |= (dp->video_info->v_sync_polarity << 1);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg &= ~HSYNC_POLARITY_CFG;
- reg |= (video_info->h_sync_polarity << 0);
+ reg |= (dp->video_info->h_sync_polarity << 0);
writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 1f2f014cfe8..2e9bd0e0b9f 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -242,7 +242,8 @@
/* EXYNOS_DP_INT_CTL */
#define SOFT_INT_CTRL (0x1 << 2)
-#define INT_POL (0x1 << 0)
+#define INT_POL1 (0x1 << 1)
+#define INT_POL0 (0x1 << 0)
/* EXYNOS_DP_SYS_CTL_1 */
#define DET_STA (0x1 << 2)
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index ede9e55413f..d3fc92eaee8 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -337,13 +337,11 @@ struct mfb_info {
int registered;
unsigned long pseudo_palette[16];
struct diu_ad *ad;
- int cursor_reset;
unsigned char g_alpha;
unsigned int count;
int x_aoi_d; /* aoi display x offset to physical screen */
int y_aoi_d; /* aoi display y offset to physical screen */
struct fsl_diu_data *parent;
- u8 *edid_data;
};
/**
@@ -378,6 +376,8 @@ struct fsl_diu_data {
struct diu_ad ad[NUM_AOIS] __aligned(8);
u8 gamma[256 * 3] __aligned(32);
u8 cursor[MAX_CURS * MAX_CURS * 2] __aligned(32);
+ uint8_t edid_data[EDID_LENGTH];
+ bool has_edid;
} __aligned(32);
/* Determine the DMA address of a member of the fsl_diu_data structure */
@@ -430,6 +430,22 @@ static struct mfb_info mfb_template[] = {
},
};
+#ifdef DEBUG
+static void __attribute__ ((unused)) fsl_diu_dump(struct diu __iomem *hw)
+{
+ mb();
+ pr_debug("DIU: desc=%08x,%08x,%08x, gamma=%08x pallete=%08x "
+ "cursor=%08x curs_pos=%08x diu_mode=%08x bgnd=%08x "
+ "disp_size=%08x hsyn_para=%08x vsyn_para=%08x syn_pol=%08x "
+ "thresholds=%08x int_mask=%08x plut=%08x\n",
+ hw->desc[0], hw->desc[1], hw->desc[2], hw->gamma,
+ hw->pallete, hw->cursor, hw->curs_pos, hw->diu_mode,
+ hw->bgnd, hw->disp_size, hw->hsyn_para, hw->vsyn_para,
+ hw->syn_pol, hw->thresholds, hw->int_mask, hw->plut);
+ rmb();
+}
+#endif
+
/**
* fsl_diu_name_to_port - convert a port name to a monitor port enum
*
@@ -481,8 +497,7 @@ static void fsl_diu_enable_panel(struct fb_info *info)
switch (mfbi->index) {
case PLANE0:
- if (hw->desc[0] != ad->paddr)
- wr_reg_wa(&hw->desc[0], ad->paddr);
+ wr_reg_wa(&hw->desc[0], ad->paddr);
break;
case PLANE1_AOI0:
cmfbi = &data->mfb[2];
@@ -534,8 +549,7 @@ static void fsl_diu_disable_panel(struct fb_info *info)
switch (mfbi->index) {
case PLANE0:
- if (hw->desc[0] != data->dummy_ad.paddr)
- wr_reg_wa(&hw->desc[0], data->dummy_ad.paddr);
+ wr_reg_wa(&hw->desc[0], 0);
break;
case PLANE1_AOI0:
cmfbi = &data->mfb[2];
@@ -792,7 +806,8 @@ static void update_lcdc(struct fb_info *info)
hw = data->diu_reg;
- diu_ops.set_monitor_port(data->monitor_port);
+ if (diu_ops.set_monitor_port)
+ diu_ops.set_monitor_port(data->monitor_port);
gamma_table_base = data->gamma;
/* Prep for DIU init - gamma table, cursor table */
@@ -811,12 +826,8 @@ static void update_lcdc(struct fb_info *info)
out_be32(&hw->gamma, DMA_ADDR(data, gamma));
out_be32(&hw->cursor, DMA_ADDR(data, cursor));
- out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
- out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
- out_be32(&hw->disp_size, (var->yres << 16 | var->xres));
- /* DISP SIZE */
- out_be32(&hw->wb_size, 0); /* WB SIZE */
- out_be32(&hw->wb_mem_addr, 0); /* WB MEM ADDR */
+ out_be32(&hw->bgnd, 0x007F7F7F); /* Set background to grey */
+ out_be32(&hw->disp_size, (var->yres << 16) | var->xres);
/* Horizontal and vertical configuration register */
temp = var->left_margin << 22 | /* BP_H */
@@ -833,9 +844,20 @@ static void update_lcdc(struct fb_info *info)
diu_ops.set_pixel_clock(var->pixclock);
- out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
- out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
+#ifndef CONFIG_PPC_MPC512x
+ /*
+ * The PLUT register is defined differently on the MPC5121 than it
+ * is on other SOCs. Unfortunately, there's no documentation that
+ * explains how it's supposed to be programmed, so for now, we leave
+ * it at the default value on the MPC5121.
+ *
+ * For other SOCs, program it for the highest priority, which will
+ * reduce the chance of underrun. Technically, we should scale the
+ * priority to match the screen resolution, but doing that properly
+ * requires delicate fine-tuning for each use-case.
+ */
out_be32(&hw->plut, 0x01F5F666);
+#endif
/* Enable the DIU */
enable_lcdc(info);
@@ -965,7 +987,6 @@ static int fsl_diu_set_par(struct fb_info *info)
hw = data->diu_reg;
set_fix(info);
- mfbi->cursor_reset = 1;
len = info->var.yres_virtual * info->fix.line_length;
/* Alloc & dealloc each time resolution/bpp change */
@@ -1107,6 +1128,12 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
if (!arg)
return -EINVAL;
+
+ dev_dbg(info->dev, "ioctl %08x (dir=%s%s type=%u nr=%u size=%u)\n", cmd,
+ _IOC_DIR(cmd) & _IOC_READ ? "R" : "",
+ _IOC_DIR(cmd) & _IOC_WRITE ? "W" : "",
+ _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
+
switch (cmd) {
case MFB_SET_PIXFMT_OLD:
dev_warn(info->dev,
@@ -1180,6 +1207,23 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
ad->ckmin_b = ck.blue_min;
}
break;
+#ifdef CONFIG_PPC_MPC512x
+ case MFB_SET_GAMMA: {
+ struct fsl_diu_data *data = mfbi->parent;
+
+ if (copy_from_user(data->gamma, buf, sizeof(data->gamma)))
+ return -EFAULT;
+ setbits32(&data->diu_reg->gamma, 0); /* Force table reload */
+ break;
+ }
+ case MFB_GET_GAMMA: {
+ struct fsl_diu_data *data = mfbi->parent;
+
+ if (copy_to_user(buf, data->gamma, sizeof(data->gamma)))
+ return -EFAULT;
+ break;
+ }
+#endif
default:
dev_err(info->dev, "unknown ioctl command (0x%08X)\n", cmd);
return -ENOIOCTLCMD;
@@ -1206,8 +1250,22 @@ static int fsl_diu_open(struct fb_info *info, int user)
res = fsl_diu_set_par(info);
if (res < 0)
mfbi->count--;
- else
+ else {
+ struct fsl_diu_data *data = mfbi->parent;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ /*
+ * Enable underrun detection and vertical sync
+ * interrupts.
+ */
+ clrbits32(&data->diu_reg->int_mask,
+ INT_UNDRUN | INT_VSYNC);
+#else
+ /* Enable underrun detection */
+ clrbits32(&data->diu_reg->int_mask, INT_UNDRUN);
+#endif
fsl_diu_enable_panel(info);
+ }
}
spin_unlock(&diu_lock);
@@ -1223,8 +1281,13 @@ static int fsl_diu_release(struct fb_info *info, int user)
spin_lock(&diu_lock);
mfbi->count--;
- if (mfbi->count == 0)
+ if (mfbi->count == 0) {
+ struct fsl_diu_data *data = mfbi->parent;
+
+ /* Disable interrupts */
+ out_be32(&data->diu_reg->int_mask, 0xffffffff);
fsl_diu_disable_panel(info);
+ }
spin_unlock(&diu_lock);
return res;
@@ -1248,6 +1311,7 @@ static int __devinit install_fb(struct fb_info *info)
{
int rc;
struct mfb_info *mfbi = info->par;
+ struct fsl_diu_data *data = mfbi->parent;
const char *aoi_mode, *init_aoi_mode = "320x240";
struct fb_videomode *db = fsl_diu_mode_db;
unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db);
@@ -1264,9 +1328,9 @@ static int __devinit install_fb(struct fb_info *info)
return rc;
if (mfbi->index == PLANE0) {
- if (mfbi->edid_data) {
+ if (data->has_edid) {
/* Now build modedb from EDID */
- fb_edid_to_monspecs(mfbi->edid_data, &info->monspecs);
+ fb_edid_to_monspecs(data->edid_data, &info->monspecs);
fb_videomode_to_modelist(info->monspecs.modedb,
info->monspecs.modedb_len,
&info->modelist);
@@ -1284,7 +1348,7 @@ static int __devinit install_fb(struct fb_info *info)
* For plane 0 we continue and look into
* driver's internal modedb.
*/
- if ((mfbi->index == PLANE0) && mfbi->edid_data)
+ if ((mfbi->index == PLANE0) && data->has_edid)
has_default_mode = 0;
else
return -EINVAL;
@@ -1348,9 +1412,6 @@ static void uninstall_fb(struct fb_info *info)
if (!mfbi->registered)
return;
- if (mfbi->index == PLANE0)
- kfree(mfbi->edid_data);
-
unregister_framebuffer(info);
unmap_video_memory(info);
if (&info->cmap)
@@ -1362,7 +1423,7 @@ static void uninstall_fb(struct fb_info *info)
static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
{
struct diu __iomem *hw = dev_id;
- unsigned int status = in_be32(&hw->int_status);
+ uint32_t status = in_be32(&hw->int_status);
if (status) {
/* This is the workaround for underrun */
@@ -1387,40 +1448,6 @@ static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
return IRQ_NONE;
}
-static int request_irq_local(struct fsl_diu_data *data)
-{
- struct diu __iomem *hw = data->diu_reg;
- u32 ints;
- int ret;
-
- /* Read to clear the status */
- in_be32(&hw->int_status);
-
- ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
- if (!ret) {
- ints = INT_PARERR | INT_LS_BF_VS;
-#if !defined(CONFIG_NOT_COHERENT_CACHE)
- ints |= INT_VSYNC;
-#endif
-
- /* Read to clear the status */
- in_be32(&hw->int_status);
- out_be32(&hw->int_mask, ints);
- }
-
- return ret;
-}
-
-static void free_irq_local(struct fsl_diu_data *data)
-{
- struct diu __iomem *hw = data->diu_reg;
-
- /* Disable all LCDC interrupt */
- out_be32(&hw->int_mask, 0x1f);
-
- free_irq(data->irq, NULL);
-}
-
#ifdef CONFIG_PM
/*
* Power management hooks. Note that we won't be called from IRQ context,
@@ -1496,8 +1523,8 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mfb_info *mfbi;
struct fsl_diu_data *data;
- int diu_mode;
dma_addr_t dma_addr; /* DMA addr of fsl_diu_data struct */
+ const void *prop;
unsigned int i;
int ret;
@@ -1541,17 +1568,13 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
mfbi->parent = data;
mfbi->ad = &data->ad[i];
+ }
- if (mfbi->index == PLANE0) {
- const u8 *prop;
- int len;
-
- /* Get EDID */
- prop = of_get_property(np, "edid", &len);
- if (prop && len == EDID_LENGTH)
- mfbi->edid_data = kmemdup(prop, EDID_LENGTH,
- GFP_KERNEL);
- }
+ /* Get the EDID data from the device tree, if present */
+ prop = of_get_property(np, "edid", &ret);
+ if (prop && ret == EDID_LENGTH) {
+ memcpy(data->edid_data, prop, EDID_LENGTH);
+ data->has_edid = true;
}
data->diu_reg = of_iomap(np, 0);
@@ -1561,10 +1584,6 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
goto error;
}
- diu_mode = in_be32(&data->diu_reg->diu_mode);
- if (diu_mode == MFB_MODE0)
- out_be32(&data->diu_reg->diu_mode, 0); /* disable DIU */
-
/* Get the IRQ of the DIU */
data->irq = irq_of_parse_and_map(np, 0);
@@ -1586,11 +1605,11 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
data->dummy_ad.paddr = DMA_ADDR(data, dummy_ad);
/*
- * Let DIU display splash screen if it was pre-initialized
- * by the bootloader, set dummy area descriptor otherwise.
+ * Let DIU continue to display splash screen if it was pre-initialized
+ * by the bootloader; otherwise, clear the display.
*/
- if (diu_mode == MFB_MODE0)
- out_be32(&data->diu_reg->desc[0], data->dummy_ad.paddr);
+ if (in_be32(&data->diu_reg->diu_mode) == MFB_MODE0)
+ out_be32(&data->diu_reg->desc[0], 0);
out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr);
out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr);
@@ -1603,7 +1622,16 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
}
}
- if (request_irq_local(data)) {
+ /*
+ * Older versions of U-Boot leave interrupts enabled, so disable
+ * all of them and clear the status register.
+ */
+ out_be32(&data->diu_reg->int_mask, 0xffffffff);
+ in_be32(&data->diu_reg->int_status);
+
+ ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb",
+ &data->diu_reg);
+ if (ret) {
dev_err(&pdev->dev, "could not claim irq\n");
goto error;
}
@@ -1638,7 +1666,8 @@ static int fsl_diu_remove(struct platform_device *pdev)
data = dev_get_drvdata(&pdev->dev);
disable_lcdc(&data->fsl_diu_info[0]);
- free_irq_local(data);
+
+ free_irq(data->irq, &data->diu_reg);
for (i = 0; i < NUM_AOIS; i++)
uninstall_fb(&data->fsl_diu_info[i]);
@@ -1741,6 +1770,9 @@ static int __init fsl_diu_init(void)
coherence_data_size = be32_to_cpup(prop) * 13;
coherence_data_size /= 8;
+ pr_debug("fsl-diu-fb: coherence data size is %zu bytes\n",
+ coherence_data_size);
+
prop = of_get_property(np, "d-cache-line-size", NULL);
if (prop == NULL) {
pr_err("fsl-diu-fb: missing 'd-cache-line-size' property' "
@@ -1750,10 +1782,17 @@ static int __init fsl_diu_init(void)
}
d_cache_line_size = be32_to_cpup(prop);
+ pr_debug("fsl-diu-fb: cache lines size is %u bytes\n",
+ d_cache_line_size);
+
of_node_put(np);
coherence_data = vmalloc(coherence_data_size);
- if (!coherence_data)
+ if (!coherence_data) {
+ pr_err("fsl-diu-fb: could not allocate coherence data "
+ "(size=%zu)\n", coherence_data_size);
return -ENOMEM;
+ }
+
#endif
ret = platform_driver_register(&fsl_diu_driver);
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
index 0e9afa41d16..4bdea6e9bd5 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/gxt4500.c
@@ -1,5 +1,6 @@
/*
- * Frame buffer device for IBM GXT4500P and GXT6000P display adaptors
+ * Frame buffer device for IBM GXT4500P/6500P and GXT4000P/6000P
+ * display adaptors
*
* Copyright (C) 2006 Paul Mackerras, IBM Corp. <paulus@samba.org>
*/
@@ -14,6 +15,8 @@
#include <linux/string.h>
#define PCI_DEVICE_ID_IBM_GXT4500P 0x21c
+#define PCI_DEVICE_ID_IBM_GXT6500P 0x21b
+#define PCI_DEVICE_ID_IBM_GXT4000P 0x16e
#define PCI_DEVICE_ID_IBM_GXT6000P 0x170
/* GXT4500P registers */
@@ -173,6 +176,8 @@ static const struct fb_videomode defaultmode __devinitconst = {
/* List of supported cards */
enum gxt_cards {
GXT4500P,
+ GXT6500P,
+ GXT4000P,
GXT6000P
};
@@ -182,6 +187,8 @@ static const struct cardinfo {
const char *cardname;
} cardinfo[] = {
[GXT4500P] = { .refclk_ps = 9259, .cardname = "IBM GXT4500P" },
+ [GXT6500P] = { .refclk_ps = 9259, .cardname = "IBM GXT6500P" },
+ [GXT4000P] = { .refclk_ps = 40000, .cardname = "IBM GXT4000P" },
[GXT6000P] = { .refclk_ps = 40000, .cardname = "IBM GXT6000P" },
};
@@ -736,6 +743,10 @@ static void __devexit gxt4500_remove(struct pci_dev *pdev)
static const struct pci_device_id gxt4500_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT4500P),
.driver_data = GXT4500P },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT6500P),
+ .driver_data = GXT6500P },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT4000P),
+ .driver_data = GXT4000P },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_GXT6000P),
.driver_data = GXT6000P },
{ 0 }
@@ -768,7 +779,7 @@ static void __exit gxt4500_exit(void)
module_exit(gxt4500_exit);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
-MODULE_DESCRIPTION("FBDev driver for IBM GXT4500P/6000P");
+MODULE_DESCRIPTION("FBDev driver for IBM GXT4500P/6500P and GXT4000P/6000P");
MODULE_LICENSE("GPL");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index c39d6e46f8c..b52f62595f6 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -31,7 +31,7 @@
#include <linux/gfp.h>
#include <mach/lcdc.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include <asm/mach-types.h>
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 1b5ee8ec192..e31f5b33b50 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -30,7 +30,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "omapfb.h"
#include "lcdc.h"
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index c510a445739..d4e7684e704 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -25,7 +25,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <plat-omap/dma-omap.h>
+#include <linux/omap-dma.h>
#include "omapfb.h"
#include "lcdc.h"
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
index d877c361abd..b07b2b042e7 100644
--- a/drivers/video/omap2/Kconfig
+++ b/drivers/video/omap2/Kconfig
@@ -1,9 +1,10 @@
-config OMAP2_VRAM
- bool
-
config OMAP2_VRFB
bool
+if ARCH_OMAP2PLUS
+
source "drivers/video/omap2/dss/Kconfig"
source "drivers/video/omap2/omapfb/Kconfig"
source "drivers/video/omap2/displays/Kconfig"
+
+endif
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
index 5ddef129f79..5ea7cb9aed1 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/omap2/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_OMAP2_VRAM) += vram.o
obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
obj-$(CONFIG_OMAP2_DSS) += dss/
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index c835aa70f96..65eb76c840a 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -710,27 +710,6 @@ static void acx_panel_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int acx_panel_suspend(struct omap_dss_device *dssdev)
-{
- dev_dbg(&dssdev->dev, "%s\n", __func__);
- acx_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- return 0;
-}
-
-static int acx_panel_resume(struct omap_dss_device *dssdev)
-{
- int r;
-
- dev_dbg(&dssdev->dev, "%s\n", __func__);
- r = acx_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- return 0;
-}
-
static void acx_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -752,8 +731,6 @@ static struct omap_dss_driver acx_panel_driver = {
.enable = acx_panel_enable,
.disable = acx_panel_disable,
- .suspend = acx_panel_suspend,
- .resume = acx_panel_resume,
.set_timings = acx_panel_set_timings,
.check_timings = acx_panel_check_timings,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 88295c52681..54ca8ae2107 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -688,40 +688,6 @@ static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&drv_data->lock);
}
-static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&drv_data->lock);
-
- generic_dpi_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&drv_data->lock);
-
- return 0;
-}
-
-static int generic_dpi_panel_resume(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&drv_data->lock);
-
- r = generic_dpi_panel_power_on(dssdev);
- if (r)
- goto err;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-err:
- mutex_unlock(&drv_data->lock);
-
- return r;
-}
-
static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -769,8 +735,6 @@ static struct omap_dss_driver dpi_driver = {
.enable = generic_dpi_panel_enable,
.disable = generic_dpi_panel_disable,
- .suspend = generic_dpi_panel_suspend,
- .resume = generic_dpi_panel_resume,
.set_timings = generic_dpi_panel_set_timings,
.get_timings = generic_dpi_panel_get_timings,
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
index 90c1cabf244..ace419b801e 100644
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -143,46 +143,12 @@ static void lb035q02_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&ld->lock);
}
-static int lb035q02_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&ld->lock);
-
- lb035q02_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&ld->lock);
- return 0;
-}
-
-static int lb035q02_panel_resume(struct omap_dss_device *dssdev)
-{
- struct lb035q02_data *ld = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&ld->lock);
-
- r = lb035q02_panel_power_on(dssdev);
- if (r)
- goto err;
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ld->lock);
- return 0;
-err:
- mutex_unlock(&ld->lock);
- return r;
-}
-
static struct omap_dss_driver lb035q02_driver = {
.probe = lb035q02_panel_probe,
.remove = lb035q02_panel_remove,
.enable = lb035q02_panel_enable,
.disable = lb035q02_panel_disable,
- .suspend = lb035q02_panel_suspend,
- .resume = lb035q02_panel_resume,
.driver = {
.name = "lgphilips_lb035q02_panel",
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index 3fc5ad081a2..d1cb722fcdb 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -574,54 +574,6 @@ static void n8x0_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->lock);
}
-static int n8x0_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
-
- dev_dbg(&dssdev->dev, "suspend\n");
-
- mutex_lock(&ddata->lock);
-
- rfbi_bus_lock();
-
- n8x0_panel_power_off(dssdev);
-
- rfbi_bus_unlock();
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
-static int n8x0_panel_resume(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- int r;
-
- dev_dbg(&dssdev->dev, "resume\n");
-
- mutex_lock(&ddata->lock);
-
- rfbi_bus_lock();
-
- r = n8x0_panel_power_on(dssdev);
-
- rfbi_bus_unlock();
-
- if (r) {
- mutex_unlock(&ddata->lock);
- return r;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -683,8 +635,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
.enable = n8x0_panel_enable,
.disable = n8x0_panel_disable,
- .suspend = n8x0_panel_suspend,
- .resume = n8x0_panel_resume,
.update = n8x0_panel_update,
.sync = n8x0_panel_sync,
@@ -702,18 +652,25 @@ static struct omap_dss_driver n8x0_panel_driver = {
static int mipid_spi_probe(struct spi_device *spi)
{
+ int r;
+
dev_dbg(&spi->dev, "mipid_spi_probe\n");
spi->mode = SPI_MODE_0;
s_drv_data.spidev = spi;
- return 0;
+ r = omap_dss_register_driver(&n8x0_panel_driver);
+ if (r)
+ pr_err("n8x0_panel: dss driver registration failed\n");
+
+ return r;
}
static int mipid_spi_remove(struct spi_device *spi)
{
dev_dbg(&spi->dev, "mipid_spi_remove\n");
+ omap_dss_unregister_driver(&n8x0_panel_driver);
return 0;
}
@@ -725,34 +682,6 @@ static struct spi_driver mipid_spi_driver = {
.probe = mipid_spi_probe,
.remove = __devexit_p(mipid_spi_remove),
};
+module_spi_driver(mipid_spi_driver);
-static int __init n8x0_panel_drv_init(void)
-{
- int r;
-
- r = spi_register_driver(&mipid_spi_driver);
- if (r) {
- pr_err("n8x0_panel: spi driver registration failed\n");
- return r;
- }
-
- r = omap_dss_register_driver(&n8x0_panel_driver);
- if (r) {
- pr_err("n8x0_panel: dss driver registration failed\n");
- spi_unregister_driver(&mipid_spi_driver);
- return r;
- }
-
- return 0;
-}
-
-static void __exit n8x0_panel_drv_exit(void)
-{
- spi_unregister_driver(&mipid_spi_driver);
-
- omap_dss_unregister_driver(&n8x0_panel_driver);
-}
-
-module_init(n8x0_panel_drv_init);
-module_exit(n8x0_panel_drv_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index 908fd268f3d..2a79c283beb 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -236,28 +236,6 @@ static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int nec_8048_panel_suspend(struct omap_dss_device *dssdev)
-{
- nec_8048_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- return 0;
-}
-
-static int nec_8048_panel_resume(struct omap_dss_device *dssdev)
-{
- int r;
-
- r = nec_8048_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
{
return 16;
@@ -268,8 +246,6 @@ static struct omap_dss_driver nec_8048_driver = {
.remove = nec_8048_panel_remove,
.enable = nec_8048_panel_enable,
.disable = nec_8048_panel_disable,
- .suspend = nec_8048_panel_suspend,
- .resume = nec_8048_panel_resume,
.get_recommended_bpp = nec_8048_recommended_bpp,
.driver = {
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
index 9df87640ddd..1b94018aac3 100644
--- a/drivers/video/omap2/displays/panel-picodlp.c
+++ b/drivers/video/omap2/displays/panel-picodlp.c
@@ -50,6 +50,7 @@ struct picodlp_i2c_data {
static struct i2c_device_id picodlp_i2c_id[] = {
{ "picodlp_i2c_driver", 0 },
+ { }
};
struct picodlp_i2c_command {
@@ -503,47 +504,6 @@ static void picodlp_panel_disable(struct omap_dss_device *dssdev)
dev_dbg(&dssdev->dev, "disabling picodlp panel\n");
}
-static int picodlp_panel_suspend(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&picod->lock);
- /* Turn off DLP Power */
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- mutex_unlock(&picod->lock);
- dev_err(&dssdev->dev, "unable to suspend picodlp panel,"
- " panel is not ACTIVE\n");
- return -EINVAL;
- }
-
- picodlp_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- mutex_unlock(&picod->lock);
-
- dev_dbg(&dssdev->dev, "suspending picodlp panel\n");
- return 0;
-}
-
-static int picodlp_panel_resume(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&picod->lock);
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- mutex_unlock(&picod->lock);
- dev_err(&dssdev->dev, "unable to resume picodlp panel,"
- " panel is not ACTIVE\n");
- return -EINVAL;
- }
-
- r = picodlp_panel_power_on(dssdev);
- mutex_unlock(&picod->lock);
- dev_dbg(&dssdev->dev, "resuming picodlp panel\n");
- return r;
-}
-
static void picodlp_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -560,9 +520,6 @@ static struct omap_dss_driver picodlp_driver = {
.get_resolution = picodlp_get_resolution,
- .suspend = picodlp_panel_suspend,
- .resume = picodlp_panel_resume,
-
.driver = {
.name = "picodlp_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 1ec3b277ff1..cada8c621e0 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -194,29 +194,12 @@ static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int sharp_ls_panel_suspend(struct omap_dss_device *dssdev)
-{
- sharp_ls_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- return 0;
-}
-
-static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
-{
- int r;
- r = sharp_ls_power_on(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- return r;
-}
-
static struct omap_dss_driver sharp_ls_driver = {
.probe = sharp_ls_panel_probe,
.remove = __exit_p(sharp_ls_panel_remove),
.enable = sharp_ls_panel_enable,
.disable = sharp_ls_panel_disable,
- .suspend = sharp_ls_panel_suspend,
- .resume = sharp_ls_panel_resume,
.driver = {
.name = "sharp_ls_panel",
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index f2f644680ca..a32407a5735 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -1245,76 +1245,6 @@ static void taal_disable(struct omap_dss_device *dssdev)
mutex_unlock(&td->lock);
}
-static int taal_suspend(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- int r;
-
- dev_dbg(&dssdev->dev, "suspend\n");
-
- mutex_lock(&td->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = -EINVAL;
- goto err;
- }
-
- taal_cancel_ulps_work(dssdev);
- taal_cancel_esd_work(dssdev);
-
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (!r)
- taal_power_off(dssdev);
-
- dsi_bus_unlock(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&td->lock);
-
- return 0;
-err:
- mutex_unlock(&td->lock);
- return r;
-}
-
-static int taal_resume(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- int r;
-
- dev_dbg(&dssdev->dev, "resume\n");
-
- mutex_lock(&td->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- r = -EINVAL;
- goto err;
- }
-
- dsi_bus_lock(dssdev);
-
- r = taal_power_on(dssdev);
-
- dsi_bus_unlock(dssdev);
-
- if (r) {
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- } else {
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- taal_queue_esd_work(dssdev);
- }
-
- mutex_unlock(&td->lock);
-
- return r;
-err:
- mutex_unlock(&td->lock);
- return r;
-}
-
static void taal_framedone_cb(int err, void *data)
{
struct omap_dss_device *dssdev = data;
@@ -1818,8 +1748,6 @@ static struct omap_dss_driver taal_driver = {
.enable = taal_enable,
.disable = taal_disable,
- .suspend = taal_suspend,
- .resume = taal_resume,
.update = taal_update,
.sync = taal_sync,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 383811cf864..8281baafe1e 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -189,37 +189,6 @@ static void tfp410_disable(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->lock);
}
-static int tfp410_suspend(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
-
- mutex_lock(&ddata->lock);
-
- tfp410_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
-static int tfp410_resume(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
- int r;
-
- mutex_lock(&ddata->lock);
-
- r = tfp410_power_on(dssdev);
- if (r == 0)
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
static void tfp410_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -355,8 +324,6 @@ static struct omap_dss_driver tfp410_driver = {
.enable = tfp410_enable,
.disable = tfp410_disable,
- .suspend = tfp410_suspend,
- .resume = tfp410_resume,
.set_timings = tfp410_set_timings,
.get_timings = tfp410_get_timings,
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index b5e6dbc59f0..316b3da6d2c 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -401,24 +401,6 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static int tpo_td043_suspend(struct omap_dss_device *dssdev)
-{
- dev_dbg(&dssdev->dev, "suspend\n");
-
- tpo_td043_disable_dss(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- return 0;
-}
-
-static int tpo_td043_resume(struct omap_dss_device *dssdev)
-{
- dev_dbg(&dssdev->dev, "resume\n");
-
- return tpo_td043_enable_dss(dssdev);
-}
-
static int tpo_td043_probe(struct omap_dss_device *dssdev)
{
struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
@@ -500,8 +482,6 @@ static struct omap_dss_driver tpo_td043_driver = {
.enable = tpo_td043_enable,
.disable = tpo_td043_disable,
- .suspend = tpo_td043_suspend,
- .resume = tpo_td043_resume,
.set_mirror = tpo_td043_set_hmirror,
.get_mirror = tpo_td043_get_hmirror,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 80f5390aa13..cb0f145c707 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -1,33 +1,30 @@
menuconfig OMAP2_DSS
tristate "OMAP2+ Display Subsystem support"
- depends on ARCH_OMAP2PLUS
help
OMAP2+ Display Subsystem support.
if OMAP2_DSS
-config OMAP2_VRAM_SIZE
- int "VRAM size (MB)"
- range 0 32
- default 0
+config OMAP2_DSS_DEBUG
+ bool "Debug support"
+ default n
help
- The amount of SDRAM to reserve at boot time for video RAM use.
- This VRAM will be used by omapfb and other drivers that need
- large continuous RAM area for video use.
+ This enables printing of debug messages. Alternatively, debug messages
+ can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting
+ appropriate flags in <debugfs>/dynamic_debug/control.
- You can also set this with "vram=<bytes>" kernel argument, or
- in the board file.
-
-config OMAP2_DSS_DEBUG_SUPPORT
- bool "Debug support"
- default y
+config OMAP2_DSS_DEBUGFS
+ bool "Debugfs filesystem support"
+ depends on DEBUG_FS
+ default n
help
- This enables debug messages. You need to enable printing
- with 'debug' module parameter.
+ This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables
+ querying about clock configuration and register configuration of dss,
+ dispc, dsi, hdmi and rfbi.
config OMAP2_DSS_COLLECT_IRQ_STATS
bool "Collect DSS IRQ statistics"
- depends on OMAP2_DSS_DEBUG_SUPPORT
+ depends on OMAP2_DSS_DEBUGFS
default n
help
Collect DSS IRQ statistics, printable via debugfs.
@@ -62,7 +59,6 @@ config OMAP2_DSS_VENC
config OMAP4_DSS_HDMI
bool "HDMI support"
- depends on ARCH_OMAP4
default y
help
HDMI Interface. This adds the High Definition Multimedia Interface.
@@ -70,11 +66,9 @@ config OMAP4_DSS_HDMI
config OMAP4_DSS_HDMI_AUDIO
bool
- depends on OMAP4_DSS_HDMI
config OMAP2_DSS_SDI
bool "SDI support"
- depends on ARCH_OMAP3
default n
help
SDI (Serial Display Interface) support.
@@ -84,7 +78,6 @@ config OMAP2_DSS_SDI
config OMAP2_DSS_DSI
bool "DSI support"
- depends on ARCH_OMAP3 || ARCH_OMAP4 || ARCH_OMAP5
default n
help
MIPI DSI (Display Serial Interface) support.
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 4549869bfe1..61949ff7940 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,6 +1,10 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
+# Core DSS files
omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
- manager.o manager-sysfs.o overlay.o overlay-sysfs.o output.o apply.o
+ output.o
+# DSS compat layer files
+omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
+ dispc-compat.o display-sysfs.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o venc_panel.o
@@ -8,3 +12,4 @@ omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
hdmi_panel.o ti_hdmi_4xxx_ip.o
+ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index 19d66f471b4..d446bdfc4c8 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -18,6 +18,7 @@
#define DSS_SUBSYS_NAME "APPLY"
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
@@ -26,6 +27,7 @@
#include "dss.h"
#include "dss_features.h"
+#include "dispc-compat.h"
/*
* We have 4 levels of cache for the dispc settings. First two are in SW and
@@ -70,7 +72,6 @@ struct ovl_priv_data {
bool shadow_extra_info_dirty;
bool enabled;
- enum omap_channel channel;
u32 fifo_low, fifo_high;
/*
@@ -105,6 +106,9 @@ struct mgr_priv_data {
struct omap_video_timings timings;
struct dss_lcd_mgr_config lcd_config;
+
+ void (*framedone_handler)(void *);
+ void *framedone_handler_data;
};
static struct {
@@ -132,7 +136,7 @@ static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
return &dss_data.mgr_priv_data_array[mgr->id];
}
-void dss_apply_init(void)
+static void apply_init_priv(void)
{
const int num_ovls = dss_feat_get_num_ovls();
struct mgr_priv_data *mp;
@@ -414,11 +418,46 @@ static void wait_pending_extra_info_updates(void)
r = wait_for_completion_timeout(&extra_updated_completion, t);
if (r == 0)
DSSWARN("timeout in wait_pending_extra_info_updates\n");
- else if (r < 0)
- DSSERR("wait_pending_extra_info_updates failed: %d\n", r);
}
-int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
+static inline struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
+{
+ return ovl->manager ?
+ (ovl->manager->output ? ovl->manager->output->device : NULL) :
+ NULL;
+}
+
+static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
+{
+ return mgr->output ? mgr->output->device : NULL;
+}
+
+static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ struct omap_dss_device *dssdev = mgr->get_device(mgr);
+ u32 irq;
+ int r;
+
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
+ irq = DISPC_IRQ_EVSYNC_ODD;
+ else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
+ irq = DISPC_IRQ_EVSYNC_EVEN;
+ else
+ irq = dispc_mgr_get_vsync_irq(mgr->id);
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+
+ dispc_runtime_put();
+
+ return r;
+}
+
+static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
{
unsigned long timeout = msecs_to_jiffies(500);
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -488,7 +527,7 @@ int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
return r;
}
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
+static int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
{
unsigned long timeout = msecs_to_jiffies(500);
struct ovl_priv_data *op;
@@ -573,7 +612,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
struct mgr_priv_data *mp;
int r;
- DSSDBGF("%d", ovl->id);
+ DSSDBG("writing ovl %d regs", ovl->id);
if (!op->enabled || !op->info_dirty)
return;
@@ -608,7 +647,7 @@ static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
struct ovl_priv_data *op = get_ovl_priv(ovl);
struct mgr_priv_data *mp;
- DSSDBGF("%d", ovl->id);
+ DSSDBG("writing ovl %d regs extra", ovl->id);
if (!op->extra_info_dirty)
return;
@@ -617,7 +656,6 @@ static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
* disabled */
dispc_ovl_enable(ovl->id, op->enabled);
- dispc_ovl_set_channel_out(ovl->id, op->channel);
dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
mp = get_mgr_priv(ovl->manager);
@@ -632,7 +670,7 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
struct mgr_priv_data *mp = get_mgr_priv(mgr);
struct omap_overlay *ovl;
- DSSDBGF("%d", mgr->id);
+ DSSDBG("writing mgr %d regs", mgr->id);
if (!mp->enabled)
return;
@@ -658,7 +696,7 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
- DSSDBGF("%d", mgr->id);
+ DSSDBG("writing mgr %d regs extra", mgr->id);
if (!mp->extra_info_dirty)
return;
@@ -666,22 +704,8 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
dispc_mgr_set_timings(mgr->id, &mp->timings);
/* lcd_config parameters */
- if (dss_mgr_is_lcd(mgr->id)) {
- dispc_mgr_set_io_pad_mode(mp->lcd_config.io_pad_mode);
-
- dispc_mgr_enable_stallmode(mgr->id, mp->lcd_config.stallmode);
- dispc_mgr_enable_fifohandcheck(mgr->id,
- mp->lcd_config.fifohandcheck);
-
- dispc_mgr_set_clock_div(mgr->id, &mp->lcd_config.clock_info);
-
- dispc_mgr_set_tft_data_lines(mgr->id,
- mp->lcd_config.video_port_width);
-
- dispc_lcd_enable_signal_polarity(mp->lcd_config.lcden_sig_polarity);
-
- dispc_mgr_set_lcd_type_tft(mgr->id);
- }
+ if (dss_mgr_is_lcd(mgr->id))
+ dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config);
mp->extra_info_dirty = false;
if (mp->updating)
@@ -761,7 +785,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
}
}
-void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+static void dss_mgr_start_update_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
unsigned long flags;
@@ -786,9 +810,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
if (!dss_data.irq_enabled && need_isr())
dss_register_vsync_isr();
- dispc_mgr_enable(mgr->id, true);
-
- mgr_clear_shadow_dirty(mgr);
+ dispc_mgr_enable_sync(mgr->id);
spin_unlock_irqrestore(&data_lock, flags);
}
@@ -845,7 +867,6 @@ static void dss_apply_irq_handler(void *data, u32 mask)
for (i = 0; i < num_mgrs; i++) {
struct omap_overlay_manager *mgr;
struct mgr_priv_data *mp;
- bool was_updating;
mgr = omap_dss_get_overlay_manager(i);
mp = get_mgr_priv(mgr);
@@ -853,7 +874,6 @@ static void dss_apply_irq_handler(void *data, u32 mask)
if (!mp->enabled)
continue;
- was_updating = mp->updating;
mp->updating = dispc_mgr_is_enabled(i);
if (!mgr_manual_update(mgr)) {
@@ -872,6 +892,21 @@ static void dss_apply_irq_handler(void *data, u32 mask)
if (!extra_updating)
complete_all(&extra_updated_completion);
+ /* call framedone handlers for manual update displays */
+ for (i = 0; i < num_mgrs; i++) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mgr_manual_update(mgr) || !mp->framedone_handler)
+ continue;
+
+ if (mask & dispc_mgr_get_framedone_irq(i))
+ mp->framedone_handler(mp->framedone_handler_data);
+ }
+
if (!need_isr())
dss_unregister_vsync_isr();
@@ -906,7 +941,7 @@ static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
mp->info = mp->user_info;
}
-int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
{
unsigned long flags;
struct omap_overlay *ovl;
@@ -1005,7 +1040,7 @@ static void dss_setup_fifos(void)
}
}
-int dss_mgr_enable(struct omap_overlay_manager *mgr)
+static int dss_mgr_enable_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
unsigned long flags;
@@ -1035,10 +1070,13 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
if (!mgr_manual_update(mgr))
mp->updating = true;
+ if (!dss_data.irq_enabled && need_isr())
+ dss_register_vsync_isr();
+
spin_unlock_irqrestore(&data_lock, flags);
if (!mgr_manual_update(mgr))
- dispc_mgr_enable(mgr->id, true);
+ dispc_mgr_enable_sync(mgr->id);
out:
mutex_unlock(&apply_lock);
@@ -1052,7 +1090,7 @@ err:
return r;
}
-void dss_mgr_disable(struct omap_overlay_manager *mgr)
+static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
unsigned long flags;
@@ -1063,7 +1101,7 @@ void dss_mgr_disable(struct omap_overlay_manager *mgr)
goto out;
if (!mgr_manual_update(mgr))
- dispc_mgr_enable(mgr->id, false);
+ dispc_mgr_disable_sync(mgr->id);
spin_lock_irqsave(&data_lock, flags);
@@ -1076,7 +1114,7 @@ out:
mutex_unlock(&apply_lock);
}
-int dss_mgr_set_info(struct omap_overlay_manager *mgr,
+static int dss_mgr_set_info(struct omap_overlay_manager *mgr,
struct omap_overlay_manager_info *info)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -1097,7 +1135,7 @@ int dss_mgr_set_info(struct omap_overlay_manager *mgr,
return 0;
}
-void dss_mgr_get_info(struct omap_overlay_manager *mgr,
+static void dss_mgr_get_info(struct omap_overlay_manager *mgr,
struct omap_overlay_manager_info *info)
{
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -1110,7 +1148,7 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
spin_unlock_irqrestore(&data_lock, flags);
}
-int dss_mgr_set_output(struct omap_overlay_manager *mgr,
+static int dss_mgr_set_output(struct omap_overlay_manager *mgr,
struct omap_dss_output *output)
{
int r;
@@ -1142,7 +1180,7 @@ err:
return r;
}
-int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
+static int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
{
int r;
struct mgr_priv_data *mp = get_mgr_priv(mgr);
@@ -1189,7 +1227,7 @@ static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
mp->extra_info_dirty = true;
}
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+static void dss_mgr_set_timings_compat(struct omap_overlay_manager *mgr,
const struct omap_video_timings *timings)
{
unsigned long flags;
@@ -1217,7 +1255,7 @@ static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
mp->extra_info_dirty = true;
}
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+static void dss_mgr_set_lcd_config_compat(struct omap_overlay_manager *mgr,
const struct dss_lcd_mgr_config *config)
{
unsigned long flags;
@@ -1236,7 +1274,7 @@ out:
spin_unlock_irqrestore(&data_lock, flags);
}
-int dss_ovl_set_info(struct omap_overlay *ovl,
+static int dss_ovl_set_info(struct omap_overlay *ovl,
struct omap_overlay_info *info)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
@@ -1257,7 +1295,7 @@ int dss_ovl_set_info(struct omap_overlay *ovl,
return 0;
}
-void dss_ovl_get_info(struct omap_overlay *ovl,
+static void dss_ovl_get_info(struct omap_overlay *ovl,
struct omap_overlay_info *info)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
@@ -1270,7 +1308,7 @@ void dss_ovl_get_info(struct omap_overlay *ovl,
spin_unlock_irqrestore(&data_lock, flags);
}
-int dss_ovl_set_manager(struct omap_overlay *ovl,
+static int dss_ovl_set_manager(struct omap_overlay *ovl,
struct omap_overlay_manager *mgr)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
@@ -1289,45 +1327,40 @@ int dss_ovl_set_manager(struct omap_overlay *ovl,
goto err;
}
+ r = dispc_runtime_get();
+ if (r)
+ goto err;
+
spin_lock_irqsave(&data_lock, flags);
if (op->enabled) {
spin_unlock_irqrestore(&data_lock, flags);
DSSERR("overlay has to be disabled to change the manager\n");
r = -EINVAL;
- goto err;
+ goto err1;
}
- op->channel = mgr->id;
- op->extra_info_dirty = true;
+ dispc_ovl_set_channel_out(ovl->id, mgr->id);
ovl->manager = mgr;
list_add_tail(&ovl->list, &mgr->overlays);
spin_unlock_irqrestore(&data_lock, flags);
- /* XXX: When there is an overlay on a DSI manual update display, and
- * the overlay is first disabled, then moved to tv, and enabled, we
- * seem to get SYNC_LOST_DIGIT error.
- *
- * Waiting doesn't seem to help, but updating the manual update display
- * after disabling the overlay seems to fix this. This hints that the
- * overlay is perhaps somehow tied to the LCD output until the output
- * is updated.
- *
- * Userspace workaround for this is to update the LCD after disabling
- * the overlay, but before moving the overlay to TV.
- */
+ dispc_runtime_put();
mutex_unlock(&apply_lock);
return 0;
+
+err1:
+ dispc_runtime_put();
err:
mutex_unlock(&apply_lock);
return r;
}
-int dss_ovl_unset_manager(struct omap_overlay *ovl)
+static int dss_ovl_unset_manager(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1355,9 +1388,24 @@ int dss_ovl_unset_manager(struct omap_overlay *ovl)
/* wait for pending extra_info updates to ensure the ovl is disabled */
wait_pending_extra_info_updates();
+ /*
+ * For a manual update display, there is no guarantee that the overlay
+ * is really disabled in HW, we may need an extra update from this
+ * manager before the configurations can go in. Return an error if the
+ * overlay needed an update from the manager.
+ *
+ * TODO: Instead of returning an error, try to do a dummy manager update
+ * here to disable the overlay in hardware. Use the *GATED fields in
+ * the DISPC_CONFIG registers to do a dummy update.
+ */
spin_lock_irqsave(&data_lock, flags);
- op->channel = -1;
+ if (ovl_manual_update(ovl) && op->extra_info_dirty) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("need an update to change the manager\n");
+ r = -EINVAL;
+ goto err;
+ }
ovl->manager = NULL;
list_del(&ovl->list);
@@ -1372,7 +1420,7 @@ err:
return r;
}
-bool dss_ovl_is_enabled(struct omap_overlay *ovl)
+static bool dss_ovl_is_enabled(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1387,7 +1435,7 @@ bool dss_ovl_is_enabled(struct omap_overlay *ovl)
return e;
}
-int dss_ovl_enable(struct omap_overlay *ovl)
+static int dss_ovl_enable(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1437,7 +1485,7 @@ err1:
return r;
}
-int dss_ovl_disable(struct omap_overlay *ovl)
+static int dss_ovl_disable(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
unsigned long flags;
@@ -1472,3 +1520,152 @@ err:
return r;
}
+static int dss_mgr_register_framedone_handler_compat(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ if (mp->framedone_handler)
+ return -EBUSY;
+
+ mp->framedone_handler = handler;
+ mp->framedone_handler_data = data;
+
+ return 0;
+}
+
+static void dss_mgr_unregister_framedone_handler_compat(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ WARN_ON(mp->framedone_handler != handler ||
+ mp->framedone_handler_data != data);
+
+ mp->framedone_handler = NULL;
+ mp->framedone_handler_data = NULL;
+}
+
+static const struct dss_mgr_ops apply_mgr_ops = {
+ .start_update = dss_mgr_start_update_compat,
+ .enable = dss_mgr_enable_compat,
+ .disable = dss_mgr_disable_compat,
+ .set_timings = dss_mgr_set_timings_compat,
+ .set_lcd_config = dss_mgr_set_lcd_config_compat,
+ .register_framedone_handler = dss_mgr_register_framedone_handler_compat,
+ .unregister_framedone_handler = dss_mgr_unregister_framedone_handler_compat,
+};
+
+static int compat_refcnt;
+static DEFINE_MUTEX(compat_init_lock);
+
+int omapdss_compat_init(void)
+{
+ struct platform_device *pdev = dss_get_core_pdev();
+ struct omap_dss_device *dssdev = NULL;
+ int i, r;
+
+ mutex_lock(&compat_init_lock);
+
+ if (compat_refcnt++ > 0)
+ goto out;
+
+ apply_init_priv();
+
+ dss_init_overlay_managers(pdev);
+ dss_init_overlays(pdev);
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ struct omap_overlay_manager *mgr;
+
+ mgr = omap_dss_get_overlay_manager(i);
+
+ mgr->set_output = &dss_mgr_set_output;
+ mgr->unset_output = &dss_mgr_unset_output;
+ mgr->apply = &omap_dss_mgr_apply;
+ mgr->set_manager_info = &dss_mgr_set_info;
+ mgr->get_manager_info = &dss_mgr_get_info;
+ mgr->wait_for_go = &dss_mgr_wait_for_go;
+ mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
+ mgr->get_device = &dss_mgr_get_device;
+ }
+
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+
+ ovl->is_enabled = &dss_ovl_is_enabled;
+ ovl->enable = &dss_ovl_enable;
+ ovl->disable = &dss_ovl_disable;
+ ovl->set_manager = &dss_ovl_set_manager;
+ ovl->unset_manager = &dss_ovl_unset_manager;
+ ovl->set_overlay_info = &dss_ovl_set_info;
+ ovl->get_overlay_info = &dss_ovl_get_info;
+ ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
+ ovl->get_device = &dss_ovl_get_device;
+ }
+
+ r = dss_install_mgr_ops(&apply_mgr_ops);
+ if (r)
+ goto err_mgr_ops;
+
+ for_each_dss_dev(dssdev) {
+ r = display_init_sysfs(pdev, dssdev);
+ /* XXX uninit sysfs files on error */
+ if (r)
+ goto err_disp_sysfs;
+ }
+
+ dispc_runtime_get();
+
+ r = dss_dispc_initialize_irq();
+ if (r)
+ goto err_init_irq;
+
+ dispc_runtime_put();
+
+out:
+ mutex_unlock(&compat_init_lock);
+
+ return 0;
+
+err_init_irq:
+ dispc_runtime_put();
+
+err_disp_sysfs:
+ dss_uninstall_mgr_ops();
+
+err_mgr_ops:
+ dss_uninit_overlay_managers(pdev);
+ dss_uninit_overlays(pdev);
+
+ compat_refcnt--;
+
+ mutex_unlock(&compat_init_lock);
+
+ return r;
+}
+EXPORT_SYMBOL(omapdss_compat_init);
+
+void omapdss_compat_uninit(void)
+{
+ struct platform_device *pdev = dss_get_core_pdev();
+ struct omap_dss_device *dssdev = NULL;
+
+ mutex_lock(&compat_init_lock);
+
+ if (--compat_refcnt > 0)
+ goto out;
+
+ dss_dispc_uninitialize_irq();
+
+ for_each_dss_dev(dssdev)
+ display_uninit_sysfs(pdev, dssdev);
+
+ dss_uninstall_mgr_ops();
+
+ dss_uninit_overlay_managers(pdev);
+ dss_uninit_overlays(pdev);
+out:
+ mutex_unlock(&compat_init_lock);
+}
+EXPORT_SYMBOL(omapdss_compat_uninit);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index d94ef9e31a3..f8779d4750b 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -53,15 +53,23 @@ static char *def_disp_name;
module_param_named(def_disp, def_disp_name, charp, 0);
MODULE_PARM_DESC(def_disp, "default display name");
-#ifdef DEBUG
-bool dss_debug;
-module_param_named(debug, dss_debug, bool, 0644);
-#endif
-
-const char *dss_get_default_display_name(void)
+const char *omapdss_get_default_display_name(void)
{
return core.default_display_name;
}
+EXPORT_SYMBOL(omapdss_get_default_display_name);
+
+enum omapdss_version omapdss_get_version(void)
+{
+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+ return pdata->version;
+}
+EXPORT_SYMBOL(omapdss_get_version);
+
+struct platform_device *dss_get_core_pdev(void)
+{
+ return core.pdev;
+}
/* REGULATORS */
@@ -93,21 +101,6 @@ struct regulator *dss_get_vdds_sdi(void)
return reg;
}
-int dss_get_ctx_loss_count(struct device *dev)
-{
- struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
- int cnt;
-
- if (!board_data->get_context_loss_count)
- return -ENOENT;
-
- cnt = board_data->get_context_loss_count(dev);
-
- WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
- return cnt;
-}
-
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
@@ -122,7 +115,7 @@ void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
{
struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
- if (!board_data->dsi_enable_pads)
+ if (!board_data->dsi_disable_pads)
return;
return board_data->dsi_disable_pads(dsi_id, lane_mask);
@@ -138,7 +131,7 @@ int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
return 0;
}
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
static int dss_debug_show(struct seq_file *s, void *unused)
{
void (*func)(struct seq_file *) = s->private;
@@ -193,7 +186,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
return 0;
}
-#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+#else /* CONFIG_OMAP2_DSS_DEBUGFS */
static inline int dss_initialize_debugfs(void)
{
return 0;
@@ -205,7 +198,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
{
return 0;
}
-#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
/* PLATFORM DEVICE */
static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
@@ -237,12 +230,7 @@ static int __init omap_dss_probe(struct platform_device *pdev)
core.pdev = pdev;
- dss_features_init(pdata->version);
-
- dss_apply_init();
-
- dss_init_overlay_managers(pdev);
- dss_init_overlays(pdev);
+ dss_features_init(omapdss_get_version());
r = dss_initialize_debugfs();
if (r)
@@ -268,9 +256,6 @@ static int omap_dss_remove(struct platform_device *pdev)
dss_uninitialize_debugfs();
- dss_uninit_overlays(pdev);
- dss_uninit_overlay_managers(pdev);
-
return 0;
}
@@ -358,15 +343,10 @@ static int dss_driver_probe(struct device *dev)
dev_name(dev), dssdev->driver_name,
dssdrv->driver.name);
- r = dss_init_device(core.pdev, dssdev);
- if (r)
- return r;
-
r = dssdrv->probe(dssdev);
if (r) {
DSSERR("driver probe failed: %d\n", r);
- dss_uninit_device(core.pdev, dssdev);
return r;
}
@@ -387,8 +367,6 @@ static int dss_driver_remove(struct device *dev)
dssdrv->remove(dssdev);
- dss_uninit_device(core.pdev, dssdev);
-
dssdev->driver = NULL;
return 0;
@@ -507,6 +485,9 @@ static int __init omap_dss_bus_register(void)
/* INIT */
static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_init_platform_driver,
+#endif
#ifdef CONFIG_OMAP2_DSS_DPI
dpi_init_platform_driver,
#endif
@@ -519,15 +500,15 @@ static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
#ifdef CONFIG_OMAP2_DSS_VENC
venc_init_platform_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_init_platform_driver,
-#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
hdmi_init_platform_driver,
#endif
};
static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_uninit_platform_driver,
+#endif
#ifdef CONFIG_OMAP2_DSS_DPI
dpi_uninit_platform_driver,
#endif
@@ -540,9 +521,6 @@ static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
#ifdef CONFIG_OMAP2_DSS_VENC
venc_uninit_platform_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_uninit_platform_driver,
-#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
hdmi_uninit_platform_driver,
#endif
diff --git a/drivers/video/omap2/dss/dispc-compat.c b/drivers/video/omap2/dss/dispc-compat.c
new file mode 100644
index 00000000000..928884c9a0a
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc-compat.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "APPLY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "dss_features.h"
+#include "dispc-compat.h"
+
+#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
+ DISPC_IRQ_OCP_ERR | \
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+ DISPC_IRQ_SYNC_LOST | \
+ DISPC_IRQ_SYNC_LOST_DIGIT)
+
+#define DISPC_MAX_NR_ISRS 8
+
+struct omap_dispc_isr_data {
+ omap_dispc_isr_t isr;
+ void *arg;
+ u32 mask;
+};
+
+struct dispc_irq_stats {
+ unsigned long last_reset;
+ unsigned irq_count;
+ unsigned irqs[32];
+};
+
+static struct {
+ spinlock_t irq_lock;
+ u32 irq_error_mask;
+ struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+ u32 error_irqs;
+ struct work_struct error_work;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dispc_irq_stats irq_stats;
+#endif
+} dispc_compat;
+
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+static void dispc_dump_irqs(struct seq_file *s)
+{
+ unsigned long flags;
+ struct dispc_irq_stats stats;
+
+ spin_lock_irqsave(&dispc_compat.irq_stats_lock, flags);
+
+ stats = dispc_compat.irq_stats;
+ memset(&dispc_compat.irq_stats, 0, sizeof(dispc_compat.irq_stats));
+ dispc_compat.irq_stats.last_reset = jiffies;
+
+ spin_unlock_irqrestore(&dispc_compat.irq_stats_lock, flags);
+
+ seq_printf(s, "period %u ms\n",
+ jiffies_to_msecs(jiffies - stats.last_reset));
+
+ seq_printf(s, "irqs %d\n", stats.irq_count);
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
+
+ PIS(FRAMEDONE);
+ PIS(VSYNC);
+ PIS(EVSYNC_EVEN);
+ PIS(EVSYNC_ODD);
+ PIS(ACBIAS_COUNT_STAT);
+ PIS(PROG_LINE_NUM);
+ PIS(GFX_FIFO_UNDERFLOW);
+ PIS(GFX_END_WIN);
+ PIS(PAL_GAMMA_MASK);
+ PIS(OCP_ERR);
+ PIS(VID1_FIFO_UNDERFLOW);
+ PIS(VID1_END_WIN);
+ PIS(VID2_FIFO_UNDERFLOW);
+ PIS(VID2_END_WIN);
+ if (dss_feat_get_num_ovls() > 3) {
+ PIS(VID3_FIFO_UNDERFLOW);
+ PIS(VID3_END_WIN);
+ }
+ PIS(SYNC_LOST);
+ PIS(SYNC_LOST_DIGIT);
+ PIS(WAKEUP);
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ PIS(FRAMEDONE2);
+ PIS(VSYNC2);
+ PIS(ACBIAS_COUNT_STAT2);
+ PIS(SYNC_LOST2);
+ }
+ if (dss_has_feature(FEAT_MGR_LCD3)) {
+ PIS(FRAMEDONE3);
+ PIS(VSYNC3);
+ PIS(ACBIAS_COUNT_STAT3);
+ PIS(SYNC_LOST3);
+ }
+#undef PIS
+}
+#endif
+
+/* dispc.irq_lock has to be locked by the caller */
+static void _omap_dispc_set_irqs(void)
+{
+ u32 mask;
+ int i;
+ struct omap_dispc_isr_data *isr_data;
+
+ mask = dispc_compat.irq_error_mask;
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+
+ if (isr_data->isr == NULL)
+ continue;
+
+ mask |= isr_data->mask;
+ }
+
+ dispc_write_irqenable(mask);
+}
+
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct omap_dispc_isr_data *isr_data;
+
+ if (isr == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+
+ /* check for duplicate entry */
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+ if (isr_data->isr == isr && isr_data->arg == arg &&
+ isr_data->mask == mask) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ isr_data = NULL;
+ ret = -EBUSY;
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+
+ if (isr_data->isr != NULL)
+ continue;
+
+ isr_data->isr = isr;
+ isr_data->arg = arg;
+ isr_data->mask = mask;
+ ret = 0;
+
+ break;
+ }
+
+ if (ret)
+ goto err;
+
+ _omap_dispc_set_irqs();
+
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ return 0;
+err:
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(omap_dispc_register_isr);
+
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+ int i;
+ unsigned long flags;
+ int ret = -EINVAL;
+ struct omap_dispc_isr_data *isr_data;
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc_compat.registered_isr[i];
+ if (isr_data->isr != isr || isr_data->arg != arg ||
+ isr_data->mask != mask)
+ continue;
+
+ /* found the correct isr */
+
+ isr_data->isr = NULL;
+ isr_data->arg = NULL;
+ isr_data->mask = 0;
+
+ ret = 0;
+ break;
+ }
+
+ if (ret == 0)
+ _omap_dispc_set_irqs();
+
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(omap_dispc_unregister_isr);
+
+static void print_irq_status(u32 status)
+{
+ if ((status & dispc_compat.irq_error_mask) == 0)
+ return;
+
+#define PIS(x) (status & DISPC_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DISPC IRQ: 0x%x: %s%s%s%s%s%s%s%s%s\n",
+ status,
+ PIS(OCP_ERR),
+ PIS(GFX_FIFO_UNDERFLOW),
+ PIS(VID1_FIFO_UNDERFLOW),
+ PIS(VID2_FIFO_UNDERFLOW),
+ dss_feat_get_num_ovls() > 3 ? PIS(VID3_FIFO_UNDERFLOW) : "",
+ PIS(SYNC_LOST),
+ PIS(SYNC_LOST_DIGIT),
+ dss_has_feature(FEAT_MGR_LCD2) ? PIS(SYNC_LOST2) : "",
+ dss_has_feature(FEAT_MGR_LCD3) ? PIS(SYNC_LOST3) : "");
+#undef PIS
+}
+
+/* Called from dss.c. Note that we don't touch clocks here,
+ * but we presume they are on because we got an IRQ. However,
+ * an irq handler may turn the clocks off, so we may not have
+ * clock later in the function. */
+static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
+{
+ int i;
+ u32 irqstatus, irqenable;
+ u32 handledirqs = 0;
+ u32 unhandled_errors;
+ struct omap_dispc_isr_data *isr_data;
+ struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+
+ spin_lock(&dispc_compat.irq_lock);
+
+ irqstatus = dispc_read_irqstatus();
+ irqenable = dispc_read_irqenable();
+
+ /* IRQ is not for us */
+ if (!(irqstatus & irqenable)) {
+ spin_unlock(&dispc_compat.irq_lock);
+ return IRQ_NONE;
+ }
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock(&dispc_compat.irq_stats_lock);
+ dispc_compat.irq_stats.irq_count++;
+ dss_collect_irq_stats(irqstatus, dispc_compat.irq_stats.irqs);
+ spin_unlock(&dispc_compat.irq_stats_lock);
+#endif
+
+ print_irq_status(irqstatus);
+
+ /* Ack the interrupt. Do it here before clocks are possibly turned
+ * off */
+ dispc_clear_irqstatus(irqstatus);
+ /* flush posted write */
+ dispc_read_irqstatus();
+
+ /* make a copy and unlock, so that isrs can unregister
+ * themselves */
+ memcpy(registered_isr, dispc_compat.registered_isr,
+ sizeof(registered_isr));
+
+ spin_unlock(&dispc_compat.irq_lock);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &registered_isr[i];
+
+ if (!isr_data->isr)
+ continue;
+
+ if (isr_data->mask & irqstatus) {
+ isr_data->isr(isr_data->arg, irqstatus);
+ handledirqs |= isr_data->mask;
+ }
+ }
+
+ spin_lock(&dispc_compat.irq_lock);
+
+ unhandled_errors = irqstatus & ~handledirqs & dispc_compat.irq_error_mask;
+
+ if (unhandled_errors) {
+ dispc_compat.error_irqs |= unhandled_errors;
+
+ dispc_compat.irq_error_mask &= ~unhandled_errors;
+ _omap_dispc_set_irqs();
+
+ schedule_work(&dispc_compat.error_work);
+ }
+
+ spin_unlock(&dispc_compat.irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void dispc_error_worker(struct work_struct *work)
+{
+ int i;
+ u32 errors;
+ unsigned long flags;
+ static const unsigned fifo_underflow_bits[] = {
+ DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+ DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+ };
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+ errors = dispc_compat.error_irqs;
+ dispc_compat.error_irqs = 0;
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ dispc_runtime_get();
+
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ unsigned bit;
+
+ ovl = omap_dss_get_overlay(i);
+ bit = fifo_underflow_bits[i];
+
+ if (bit & errors) {
+ DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
+ ovl->name);
+ dispc_ovl_enable(ovl->id, false);
+ dispc_mgr_go(ovl->manager->id);
+ msleep(50);
+ }
+ }
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ unsigned bit;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ bit = dispc_mgr_get_sync_lost_irq(i);
+
+ if (bit & errors) {
+ int j;
+
+ DSSERR("SYNC_LOST on channel %s, restarting the output "
+ "with video overlays disabled\n",
+ mgr->name);
+
+ dss_mgr_disable(mgr);
+
+ for (j = 0; j < omap_dss_get_num_overlays(); ++j) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(j);
+
+ if (ovl->id != OMAP_DSS_GFX &&
+ ovl->manager == mgr)
+ ovl->disable(ovl);
+ }
+
+ dss_mgr_enable(mgr);
+ }
+ }
+
+ if (errors & DISPC_IRQ_OCP_ERR) {
+ DSSERR("OCP_ERR\n");
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ dss_mgr_disable(mgr);
+ }
+ }
+
+ spin_lock_irqsave(&dispc_compat.irq_lock, flags);
+ dispc_compat.irq_error_mask |= errors;
+ _omap_dispc_set_irqs();
+ spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
+
+ dispc_runtime_put();
+}
+
+int dss_dispc_initialize_irq(void)
+{
+ int r;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dispc_compat.irq_stats_lock);
+ dispc_compat.irq_stats.last_reset = jiffies;
+ dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
+#endif
+
+ spin_lock_init(&dispc_compat.irq_lock);
+
+ memset(dispc_compat.registered_isr, 0,
+ sizeof(dispc_compat.registered_isr));
+
+ dispc_compat.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST3;
+ if (dss_feat_get_num_ovls() > 3)
+ dispc_compat.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
+
+ /*
+ * there's SYNC_LOST_DIGIT waiting after enabling the DSS,
+ * so clear it
+ */
+ dispc_clear_irqstatus(dispc_read_irqstatus());
+
+ INIT_WORK(&dispc_compat.error_work, dispc_error_worker);
+
+ _omap_dispc_set_irqs();
+
+ r = dispc_request_irq(omap_dispc_irq_handler, &dispc_compat);
+ if (r) {
+ DSSERR("dispc_request_irq failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+void dss_dispc_uninitialize_irq(void)
+{
+ dispc_free_irq(&dispc_compat);
+}
+
+static void dispc_mgr_disable_isr(void *data, u32 mask)
+{
+ struct completion *compl = data;
+ complete(compl);
+}
+
+static void dispc_mgr_enable_lcd_out(enum omap_channel channel)
+{
+ dispc_mgr_enable(channel, true);
+}
+
+static void dispc_mgr_disable_lcd_out(enum omap_channel channel)
+{
+ DECLARE_COMPLETION_ONSTACK(framedone_compl);
+ int r;
+ u32 irq;
+
+ if (dispc_mgr_is_enabled(channel) == false)
+ return;
+
+ /*
+ * When we disable LCD output, we need to wait for FRAMEDONE to know
+ * that DISPC has finished with the LCD output.
+ */
+
+ irq = dispc_mgr_get_framedone_irq(channel);
+
+ r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq);
+ if (r)
+ DSSERR("failed to register FRAMEDONE isr\n");
+
+ dispc_mgr_enable(channel, false);
+
+ /* if we couldn't register for framedone, just sleep and exit */
+ if (r) {
+ msleep(100);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&framedone_compl,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for FRAME DONE\n");
+
+ r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq);
+ if (r)
+ DSSERR("failed to unregister FRAMEDONE isr\n");
+}
+
+static void dispc_digit_out_enable_isr(void *data, u32 mask)
+{
+ struct completion *compl = data;
+
+ /* ignore any sync lost interrupts */
+ if (mask & (DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD))
+ complete(compl);
+}
+
+static void dispc_mgr_enable_digit_out(void)
+{
+ DECLARE_COMPLETION_ONSTACK(vsync_compl);
+ int r;
+ u32 irq_mask;
+
+ if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT) == true)
+ return;
+
+ /*
+ * Digit output produces some sync lost interrupts during the first
+ * frame when enabling. Those need to be ignored, so we register for the
+ * sync lost irq to prevent the error handler from triggering.
+ */
+
+ irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT) |
+ dispc_mgr_get_sync_lost_irq(OMAP_DSS_CHANNEL_DIGIT);
+
+ r = omap_dispc_register_isr(dispc_digit_out_enable_isr, &vsync_compl,
+ irq_mask);
+ if (r) {
+ DSSERR("failed to register %x isr\n", irq_mask);
+ return;
+ }
+
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, true);
+
+ /* wait for the first evsync */
+ if (!wait_for_completion_timeout(&vsync_compl, msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for digit out to start\n");
+
+ r = omap_dispc_unregister_isr(dispc_digit_out_enable_isr, &vsync_compl,
+ irq_mask);
+ if (r)
+ DSSERR("failed to unregister %x isr\n", irq_mask);
+}
+
+static void dispc_mgr_disable_digit_out(void)
+{
+ DECLARE_COMPLETION_ONSTACK(framedone_compl);
+ int r, i;
+ u32 irq_mask;
+ int num_irqs;
+
+ if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT) == false)
+ return;
+
+ /*
+ * When we disable the digit output, we need to wait for FRAMEDONE to
+ * know that DISPC has finished with the output.
+ */
+
+ irq_mask = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_DIGIT);
+ num_irqs = 1;
+
+ if (!irq_mask) {
+ /*
+ * omap 2/3 don't have framedone irq for TV, so we need to use
+ * vsyncs for this.
+ */
+
+ irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT);
+ /*
+ * We need to wait for both even and odd vsyncs. Note that this
+ * is not totally reliable, as we could get a vsync interrupt
+ * before we disable the output, which leads to timeout in the
+ * wait_for_completion.
+ */
+ num_irqs = 2;
+ }
+
+ r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq_mask);
+ if (r)
+ DSSERR("failed to register %x isr\n", irq_mask);
+
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, false);
+
+ /* if we couldn't register the irq, just sleep and exit */
+ if (r) {
+ msleep(100);
+ return;
+ }
+
+ for (i = 0; i < num_irqs; ++i) {
+ if (!wait_for_completion_timeout(&framedone_compl,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for digit out to stop\n");
+ }
+
+ r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
+ irq_mask);
+ if (r)
+ DSSERR("failed to unregister %x isr\n", irq_mask);
+}
+
+void dispc_mgr_enable_sync(enum omap_channel channel)
+{
+ if (dss_mgr_is_lcd(channel))
+ dispc_mgr_enable_lcd_out(channel);
+ else if (channel == OMAP_DSS_CHANNEL_DIGIT)
+ dispc_mgr_enable_digit_out();
+ else
+ WARN_ON(1);
+}
+
+void dispc_mgr_disable_sync(enum omap_channel channel)
+{
+ if (dss_mgr_is_lcd(channel))
+ dispc_mgr_disable_lcd_out(channel);
+ else if (channel == OMAP_DSS_CHANNEL_DIGIT)
+ dispc_mgr_disable_digit_out();
+ else
+ WARN_ON(1);
+}
+
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+ unsigned long timeout)
+{
+ void dispc_irq_wait_handler(void *data, u32 mask)
+ {
+ complete((struct completion *)data);
+ }
+
+ int r;
+ DECLARE_COMPLETION_ONSTACK(completion);
+
+ r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
+ irqmask);
+
+ if (r)
+ return r;
+
+ timeout = wait_for_completion_interruptible_timeout(&completion,
+ timeout);
+
+ omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ if (timeout == -ERESTARTSYS)
+ return -ERESTARTSYS;
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/dispc-compat.h b/drivers/video/omap2/dss/dispc-compat.h
new file mode 100644
index 00000000000..14a69b3d4fb
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc-compat.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP2_DSS_DISPC_COMPAT_H
+#define __OMAP2_DSS_DISPC_COMPAT_H
+
+void dispc_mgr_enable_sync(enum omap_channel channel);
+void dispc_mgr_disable_sync(enum omap_channel channel);
+
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+ unsigned long timeout);
+
+int dss_dispc_initialize_irq(void);
+void dss_dispc_uninitialize_irq(void);
+
+#endif
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index a5ab354f267..05ff2b91d9e 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -33,9 +33,9 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/hardirq.h>
-#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <video/omapdss.h>
@@ -46,21 +46,6 @@
/* DISPC */
#define DISPC_SZ_REGS SZ_4K
-#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
- DISPC_IRQ_OCP_ERR | \
- DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
- DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
- DISPC_IRQ_SYNC_LOST | \
- DISPC_IRQ_SYNC_LOST_DIGIT)
-
-#define DISPC_MAX_NR_ISRS 8
-
-struct omap_dispc_isr_data {
- omap_dispc_isr_t isr;
- void *arg;
- u32 mask;
-};
-
enum omap_burst_size {
BURST_SIZE_X2 = 0,
BURST_SIZE_X4 = 1,
@@ -73,12 +58,6 @@ enum omap_burst_size {
#define REG_FLD_MOD(idx, val, start, end) \
dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
-struct dispc_irq_stats {
- unsigned long last_reset;
- unsigned irq_count;
- unsigned irqs[32];
-};
-
struct dispc_features {
u8 sw_start;
u8 fp_start;
@@ -86,19 +65,26 @@ struct dispc_features {
u16 sw_max;
u16 vp_max;
u16 hp_max;
- int (*calc_scaling) (enum omap_plane plane,
+ u8 mgr_width_start;
+ u8 mgr_height_start;
+ u16 mgr_width_max;
+ u16 mgr_height_max;
+ int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk, bool mem_to_mem);
- unsigned long (*calc_core_clk) (enum omap_plane plane,
+ unsigned long (*calc_core_clk) (unsigned long pclk,
u16 width, u16 height, u16 out_width, u16 out_height,
bool mem_to_mem);
u8 num_fifos;
/* swap GFX & WB fifos */
bool gfx_fifo_workaround:1;
+
+ /* no DISPC_IRQ_FRAMEDONETV on this SoC */
+ bool no_framedone_tv:1;
};
#define DISPC_MAX_NR_FIFOS 5
@@ -110,27 +96,15 @@ static struct {
int ctx_loss_cnt;
int irq;
- struct clk *dss_clk;
u32 fifo_size[DISPC_MAX_NR_FIFOS];
/* maps which plane is using a fifo. fifo-id -> plane-id */
int fifo_assignment[DISPC_MAX_NR_FIFOS];
- spinlock_t irq_lock;
- u32 irq_error_mask;
- struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
- u32 error_irqs;
- struct work_struct error_work;
-
bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
const struct dispc_features *feat;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spinlock_t irq_stats_lock;
- struct dispc_irq_stats irq_stats;
-#endif
} dispc;
enum omap_color_component {
@@ -186,7 +160,7 @@ static const struct {
[OMAP_DSS_CHANNEL_DIGIT] = {
.name = "DIGIT",
.vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
- .framedone_irq = 0,
+ .framedone_irq = DISPC_IRQ_FRAMEDONETV,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
@@ -241,7 +215,6 @@ struct color_conv_coef {
int full_range;
};
-static void _omap_dispc_set_irqs(void);
static unsigned long dispc_plane_pclk_rate(enum omap_plane plane);
static unsigned long dispc_plane_lclk_rate(enum omap_plane plane);
@@ -374,7 +347,7 @@ static void dispc_save_context(void)
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
- dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
+ dispc.ctx_loss_cnt = dss_get_ctx_loss_count();
dispc.ctx_valid = true;
DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -389,7 +362,7 @@ static void dispc_restore_context(void)
if (!dispc.ctx_valid)
return;
- ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
+ ctx = dss_get_ctx_loss_count();
if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
return;
@@ -496,7 +469,7 @@ static void dispc_restore_context(void)
if (dss_has_feature(FEAT_MGR_LCD3))
RR(CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
- dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
+ dispc_clear_irqstatus(DISPC_IRQ_SYNC_LOST_DIGIT);
/*
* enable last so IRQs won't trigger before
@@ -520,6 +493,7 @@ int dispc_runtime_get(void)
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
+EXPORT_SYMBOL(dispc_runtime_get);
void dispc_runtime_put(void)
{
@@ -530,16 +504,28 @@ void dispc_runtime_put(void)
r = pm_runtime_put_sync(&dispc.pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
+EXPORT_SYMBOL(dispc_runtime_put);
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
+EXPORT_SYMBOL(dispc_mgr_get_vsync_irq);
u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
{
+ if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc.feat->no_framedone_tv)
+ return 0;
+
return mgr_desc[channel].framedone_irq;
}
+EXPORT_SYMBOL(dispc_mgr_get_framedone_irq);
+
+u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel)
+{
+ return mgr_desc[channel].sync_lost_irq;
+}
+EXPORT_SYMBOL(dispc_mgr_get_sync_lost_irq);
u32 dispc_wb_get_framedone_irq(void)
{
@@ -550,28 +536,18 @@ bool dispc_mgr_go_busy(enum omap_channel channel)
{
return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
}
+EXPORT_SYMBOL(dispc_mgr_go_busy);
void dispc_mgr_go(enum omap_channel channel)
{
- bool enable_bit, go_bit;
-
- /* if the channel is not enabled, we don't need GO */
- enable_bit = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE) == 1;
-
- if (!enable_bit)
- return;
-
- go_bit = mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
-
- if (go_bit) {
- DSSERR("GO bit not down for channel %d\n", channel);
- return;
- }
+ WARN_ON(dispc_mgr_is_enabled(channel) == false);
+ WARN_ON(dispc_mgr_go_busy(channel));
DSSDBG("GO %s\n", mgr_desc[channel].name);
mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1);
}
+EXPORT_SYMBOL(dispc_mgr_go);
bool dispc_wb_go_busy(void)
{
@@ -975,6 +951,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
}
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
+EXPORT_SYMBOL(dispc_ovl_set_channel_out);
static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
{
@@ -1040,7 +1017,7 @@ static void dispc_configure_burst_sizes(void)
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
- for (i = 0; i < omap_dss_get_num_overlays(); ++i)
+ for (i = 0; i < dss_feat_get_num_ovls(); ++i)
dispc_ovl_set_burst_size(i, burst_size);
}
@@ -1074,7 +1051,7 @@ static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
}
static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
- struct omap_dss_cpr_coefs *coefs)
+ const struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
@@ -1122,7 +1099,9 @@ static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
{
u32 val;
- val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+ val = FLD_VAL(height - 1, dispc.feat->mgr_height_start, 16) |
+ FLD_VAL(width - 1, dispc.feat->mgr_width_start, 0);
+
dispc_write_reg(DISPC_SIZE_MGR(channel), val);
}
@@ -1244,7 +1223,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
if (use_fifomerge) {
total_fifo_size = 0;
- for (i = 0; i < omap_dss_get_num_overlays(); ++i)
+ for (i = 0; i < dss_feat_get_num_ovls(); ++i)
total_fifo_size += dispc_ovl_get_fifo_size(i);
} else {
total_fifo_size = ovl_fifo_size;
@@ -1989,16 +1968,14 @@ static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
* This function is used to avoid synclosts in OMAP3, because of some
* undocumented horizontal position and timing related limitations.
*/
-static int check_horiz_timing_omap3(enum omap_plane plane,
+static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *t, u16 pos_x,
u16 width, u16 height, u16 out_width, u16 out_height)
{
- int DS = DIV_ROUND_UP(height, out_height);
+ const int ds = DIV_ROUND_UP(height, out_height);
unsigned long nonactive;
static const u8 limits[3] = { 8, 10, 20 };
u64 val, blank;
- unsigned long pclk = dispc_plane_pclk_rate(plane);
- unsigned long lclk = dispc_plane_lclk_rate(plane);
int i;
nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
@@ -2020,8 +1997,8 @@ static int check_horiz_timing_omap3(enum omap_plane plane,
*/
val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
- val, max(0, DS - 2) * width);
- if (val < max(0, DS - 2) * width)
+ val, max(0, ds - 2) * width);
+ if (val < max(0, ds - 2) * width)
return -EINVAL;
/*
@@ -2031,21 +2008,20 @@ static int check_horiz_timing_omap3(enum omap_plane plane,
*/
val = div_u64((u64)nonactive * lclk, pclk);
DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n",
- val, max(0, DS - 1) * width);
- if (val < max(0, DS - 1) * width)
+ val, max(0, ds - 1) * width);
+ if (val < max(0, ds - 1) * width)
return -EINVAL;
return 0;
}
-static unsigned long calc_core_clk_five_taps(enum omap_plane plane,
+static unsigned long calc_core_clk_five_taps(unsigned long pclk,
const struct omap_video_timings *mgr_timings, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
u32 core_clk = 0;
u64 tmp;
- unsigned long pclk = dispc_plane_pclk_rate(plane);
if (height <= out_height && width <= out_width)
return (unsigned long) pclk;
@@ -2079,22 +2055,19 @@ static unsigned long calc_core_clk_five_taps(enum omap_plane plane,
return core_clk;
}
-static unsigned long calc_core_clk_24xx(enum omap_plane plane, u16 width,
+static unsigned long calc_core_clk_24xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
- unsigned long pclk = dispc_plane_pclk_rate(plane);
-
if (height > out_height && width > out_width)
return pclk * 4;
else
return pclk * 2;
}
-static unsigned long calc_core_clk_34xx(enum omap_plane plane, u16 width,
+static unsigned long calc_core_clk_34xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
unsigned int hf, vf;
- unsigned long pclk = dispc_plane_pclk_rate(plane);
/*
* FIXME how to determine the 'A' factor
@@ -2117,11 +2090,9 @@ static unsigned long calc_core_clk_34xx(enum omap_plane plane, u16 width,
return pclk * vf * hf;
}
-static unsigned long calc_core_clk_44xx(enum omap_plane plane, u16 width,
+static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
u16 height, u16 out_width, u16 out_height, bool mem_to_mem)
{
- unsigned long pclk;
-
/*
* If the overlay/writeback is in mem to mem mode, there are no
* downscaling limitations with respect to pixel clock, return 1 as
@@ -2131,15 +2102,13 @@ static unsigned long calc_core_clk_44xx(enum omap_plane plane, u16 width,
if (mem_to_mem)
return 1;
- pclk = dispc_plane_pclk_rate(plane);
-
if (width > out_width)
return DIV_ROUND_UP(pclk, out_width) * width;
else
return pclk;
}
-static int dispc_ovl_calc_scaling_24xx(enum omap_plane plane,
+static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
@@ -2157,7 +2126,7 @@ static int dispc_ovl_calc_scaling_24xx(enum omap_plane plane,
do {
in_height = DIV_ROUND_UP(height, *decim_y);
in_width = DIV_ROUND_UP(width, *decim_x);
- *core_clk = dispc.feat->calc_core_clk(plane, in_width,
+ *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
*core_clk > dispc_core_clk_rate());
@@ -2180,7 +2149,7 @@ static int dispc_ovl_calc_scaling_24xx(enum omap_plane plane,
return 0;
}
-static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
+static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
@@ -2196,10 +2165,10 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
do {
in_height = DIV_ROUND_UP(height, *decim_y);
in_width = DIV_ROUND_UP(width, *decim_x);
- *core_clk = calc_core_clk_five_taps(plane, mgr_timings,
+ *core_clk = calc_core_clk_five_taps(pclk, mgr_timings,
in_width, in_height, out_width, out_height, color_mode);
- error = check_horiz_timing_omap3(plane, mgr_timings,
+ error = check_horiz_timing_omap3(pclk, lclk, mgr_timings,
pos_x, in_width, in_height, out_width,
out_height);
@@ -2208,7 +2177,7 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
in_height < out_height * 2)
*five_taps = false;
if (!*five_taps)
- *core_clk = dispc.feat->calc_core_clk(plane, in_width,
+ *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
@@ -2227,8 +2196,8 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
}
} while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error);
- if (check_horiz_timing_omap3(plane, mgr_timings, pos_x, width, height,
- out_width, out_height)){
+ if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, width,
+ height, out_width, out_height)){
DSSERR("horizontal timing too tight\n");
return -EINVAL;
}
@@ -2246,7 +2215,7 @@ static int dispc_ovl_calc_scaling_34xx(enum omap_plane plane,
return 0;
}
-static int dispc_ovl_calc_scaling_44xx(enum omap_plane plane,
+static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode, bool *five_taps,
@@ -2258,14 +2227,14 @@ static int dispc_ovl_calc_scaling_44xx(enum omap_plane plane,
u16 in_height = DIV_ROUND_UP(height, *decim_y);
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
- unsigned long pclk = dispc_plane_pclk_rate(plane);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
- if (mem_to_mem)
- in_width_max = DIV_ROUND_UP(out_width, maxdownscale);
- else
+ if (mem_to_mem) {
+ in_width_max = out_width * maxdownscale;
+ } else {
in_width_max = dispc_core_clk_rate() /
DIV_ROUND_UP(pclk, out_width);
+ }
*decim_x = DIV_ROUND_UP(width, in_width_max);
@@ -2283,12 +2252,12 @@ static int dispc_ovl_calc_scaling_44xx(enum omap_plane plane,
return -EINVAL;
}
- *core_clk = dispc.feat->calc_core_clk(plane, in_width, in_height,
+ *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height,
out_width, out_height, mem_to_mem);
return 0;
}
-static int dispc_ovl_calc_scaling(enum omap_plane plane,
+static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
const struct omap_video_timings *mgr_timings,
u16 width, u16 height, u16 out_width, u16 out_height,
@@ -2307,9 +2276,14 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
if ((caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
return -EINVAL;
- *x_predecim = max_decim_limit;
- *y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
- dss_has_feature(FEAT_BURST_2D)) ? 2 : max_decim_limit;
+ if (mem_to_mem) {
+ *x_predecim = *y_predecim = 1;
+ } else {
+ *x_predecim = max_decim_limit;
+ *y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
+ dss_has_feature(FEAT_BURST_2D)) ?
+ 2 : max_decim_limit;
+ }
if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
color_mode == OMAP_DSS_COLOR_CLUT2 ||
@@ -2330,7 +2304,7 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(plane, mgr_timings, width, height,
+ ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height,
out_width, out_height, color_mode, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
@@ -2353,6 +2327,47 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
return 0;
}
+int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
+ const struct omap_overlay_info *oi,
+ const struct omap_video_timings *timings,
+ int *x_predecim, int *y_predecim)
+{
+ enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
+ bool five_taps = true;
+ bool fieldmode = 0;
+ u16 in_height = oi->height;
+ u16 in_width = oi->width;
+ bool ilace = timings->interlace;
+ u16 out_width, out_height;
+ int pos_x = oi->pos_x;
+ unsigned long pclk = dispc_mgr_pclk_rate(channel);
+ unsigned long lclk = dispc_mgr_lclk_rate(channel);
+
+ out_width = oi->out_width == 0 ? oi->width : oi->out_width;
+ out_height = oi->out_height == 0 ? oi->height : oi->out_height;
+
+ if (ilace && oi->height == out_height)
+ fieldmode = 1;
+
+ if (ilace) {
+ if (fieldmode)
+ in_height /= 2;
+ out_height /= 2;
+
+ DSSDBG("adjusting for ilace: height %d, out_height %d\n",
+ in_height, out_height);
+ }
+
+ if (!dss_feat_color_mode_supported(plane, oi->color_mode))
+ return -EINVAL;
+
+ return dispc_ovl_calc_scaling(pclk, lclk, caps, timings, in_width,
+ in_height, out_width, out_height, oi->color_mode,
+ &five_taps, x_predecim, y_predecim, pos_x,
+ oi->rotation_type, false);
+}
+EXPORT_SYMBOL(dispc_ovl_check);
+
static int dispc_ovl_setup_common(enum omap_plane plane,
enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
@@ -2368,12 +2383,14 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
unsigned offset0, offset1;
s32 row_inc;
s32 pix_inc;
- u16 frame_height = height;
+ u16 frame_width, frame_height;
unsigned int field_offset = 0;
u16 in_height = height;
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
bool ilace = mgr_timings->interlace;
+ unsigned long pclk = dispc_plane_pclk_rate(plane);
+ unsigned long lclk = dispc_plane_lclk_rate(plane);
if (paddr == 0)
return -EINVAL;
@@ -2398,7 +2415,7 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (!dss_feat_color_mode_supported(plane, color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(plane, caps, mgr_timings, in_width,
+ r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width,
in_height, out_width, out_height, color_mode,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
@@ -2436,20 +2453,28 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
row_inc = 0;
pix_inc = 0;
+ if (plane == OMAP_DSS_WB) {
+ frame_width = out_width;
+ frame_height = out_height;
+ } else {
+ frame_width = in_width;
+ frame_height = height;
+ }
+
if (rotation_type == OMAP_DSS_ROT_TILER)
- calc_tiler_rotation_offset(screen_width, in_width,
+ calc_tiler_rotation_offset(screen_width, frame_width,
color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc,
x_predecim, y_predecim);
else if (rotation_type == OMAP_DSS_ROT_DMA)
- calc_dma_rotation_offset(rotation, mirror,
- screen_width, in_width, frame_height,
+ calc_dma_rotation_offset(rotation, mirror, screen_width,
+ frame_width, frame_height,
color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc,
x_predecim, y_predecim);
else
calc_vrfb_rotation_offset(rotation, mirror,
- screen_width, in_width, frame_height,
+ screen_width, frame_width, frame_height,
color_mode, fieldmode, field_offset,
&offset0, &offset1, &row_inc, &pix_inc,
x_predecim, y_predecim);
@@ -2503,7 +2528,7 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
bool mem_to_mem)
{
int r;
- struct omap_overlay *ovl = omap_dss_get_overlay(plane);
+ enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
enum omap_channel channel;
channel = dispc_ovl_get_channel_out(plane);
@@ -2514,7 +2539,7 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height,
oi->color_mode, oi->rotation, oi->mirror, channel, replication);
- r = dispc_ovl_setup_common(plane, ovl->caps, oi->paddr, oi->p_uv_addr,
+ r = dispc_ovl_setup_common(plane, caps, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
@@ -2522,6 +2547,7 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
return r;
}
+EXPORT_SYMBOL(dispc_ovl_setup);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct omap_video_timings *mgr_timings)
@@ -2582,192 +2608,39 @@ int dispc_ovl_enable(enum omap_plane plane, bool enable)
return 0;
}
+EXPORT_SYMBOL(dispc_ovl_enable);
-static void dispc_disable_isr(void *data, u32 mask)
+bool dispc_ovl_enabled(enum omap_plane plane)
{
- struct completion *compl = data;
- complete(compl);
+ return REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
}
+EXPORT_SYMBOL(dispc_ovl_enabled);
-static void _enable_lcd_out(enum omap_channel channel, bool enable)
+void dispc_mgr_enable(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
/* flush posted write */
mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
}
-
-static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
-{
- struct completion frame_done_completion;
- bool is_on;
- int r;
- u32 irq;
-
- /* When we disable LCD output, we need to wait until frame is done.
- * Otherwise the DSS is still working, and turning off the clocks
- * prevents DSS from going to OFF mode */
- is_on = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
-
- irq = mgr_desc[channel].framedone_irq;
-
- if (!enable && is_on) {
- init_completion(&frame_done_completion);
-
- r = omap_dispc_register_isr(dispc_disable_isr,
- &frame_done_completion, irq);
-
- if (r)
- DSSERR("failed to register FRAMEDONE isr\n");
- }
-
- _enable_lcd_out(channel, enable);
-
- if (!enable && is_on) {
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for FRAME DONE\n");
-
- r = omap_dispc_unregister_isr(dispc_disable_isr,
- &frame_done_completion, irq);
-
- if (r)
- DSSERR("failed to unregister FRAMEDONE isr\n");
- }
-}
-
-static void _enable_digit_out(bool enable)
-{
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
- /* flush posted write */
- dispc_read_reg(DISPC_CONTROL);
-}
-
-static void dispc_mgr_enable_digit_out(bool enable)
-{
- struct completion frame_done_completion;
- enum dss_hdmi_venc_clk_source_select src;
- int r, i;
- u32 irq_mask;
- int num_irqs;
-
- if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
- return;
-
- src = dss_get_hdmi_venc_clk_source();
-
- if (enable) {
- unsigned long flags;
- /* When we enable digit output, we'll get an extra digit
- * sync lost interrupt, that we need to ignore */
- spin_lock_irqsave(&dispc.irq_lock, flags);
- dispc.irq_error_mask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
- _omap_dispc_set_irqs();
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
- }
-
- /* When we disable digit output, we need to wait until fields are done.
- * Otherwise the DSS is still working, and turning off the clocks
- * prevents DSS from going to OFF mode. And when enabling, we need to
- * wait for the extra sync losts */
- init_completion(&frame_done_completion);
-
- if (src == DSS_HDMI_M_PCLK && enable == false) {
- irq_mask = DISPC_IRQ_FRAMEDONETV;
- num_irqs = 1;
- } else {
- irq_mask = DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
- /* XXX I understand from TRM that we should only wait for the
- * current field to complete. But it seems we have to wait for
- * both fields */
- num_irqs = 2;
- }
-
- r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
- irq_mask);
- if (r)
- DSSERR("failed to register %x isr\n", irq_mask);
-
- _enable_digit_out(enable);
-
- for (i = 0; i < num_irqs; ++i) {
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for digit out to %s\n",
- enable ? "start" : "stop");
- }
-
- r = omap_dispc_unregister_isr(dispc_disable_isr, &frame_done_completion,
- irq_mask);
- if (r)
- DSSERR("failed to unregister %x isr\n", irq_mask);
-
- if (enable) {
- unsigned long flags;
- spin_lock_irqsave(&dispc.irq_lock, flags);
- dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST_DIGIT;
- dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
- _omap_dispc_set_irqs();
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
- }
-}
+EXPORT_SYMBOL(dispc_mgr_enable);
bool dispc_mgr_is_enabled(enum omap_channel channel)
{
return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
}
-
-void dispc_mgr_enable(enum omap_channel channel, bool enable)
-{
- if (dss_mgr_is_lcd(channel))
- dispc_mgr_enable_lcd_out(channel, enable);
- else if (channel == OMAP_DSS_CHANNEL_DIGIT)
- dispc_mgr_enable_digit_out(enable);
- else
- BUG();
-}
+EXPORT_SYMBOL(dispc_mgr_is_enabled);
void dispc_wb_enable(bool enable)
{
- enum omap_plane plane = OMAP_DSS_WB;
- struct completion frame_done_completion;
- bool is_on;
- int r;
- u32 irq;
-
- is_on = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
- irq = DISPC_IRQ_FRAMEDONEWB;
-
- if (!enable && is_on) {
- init_completion(&frame_done_completion);
-
- r = omap_dispc_register_isr(dispc_disable_isr,
- &frame_done_completion, irq);
- if (r)
- DSSERR("failed to register FRAMEDONEWB isr\n");
- }
-
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
-
- if (!enable && is_on) {
- if (!wait_for_completion_timeout(&frame_done_completion,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for FRAMEDONEWB\n");
-
- r = omap_dispc_unregister_isr(dispc_disable_isr,
- &frame_done_completion, irq);
- if (r)
- DSSERR("failed to unregister FRAMEDONEWB isr\n");
- }
+ dispc_ovl_enable(OMAP_DSS_WB, enable);
}
bool dispc_wb_is_enabled(void)
{
- enum omap_plane plane = OMAP_DSS_WB;
-
- return REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
+ return dispc_ovl_enabled(OMAP_DSS_WB);
}
-void dispc_lcd_enable_signal_polarity(bool act_high)
+static void dispc_lcd_enable_signal_polarity(bool act_high)
{
if (!dss_has_feature(FEAT_LCDENABLEPOL))
return;
@@ -2791,13 +2664,13 @@ void dispc_pck_free_enable(bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
-void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
}
-void dispc_mgr_set_lcd_type_tft(enum omap_channel channel)
+static void dispc_mgr_set_lcd_type_tft(enum omap_channel channel)
{
mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1);
}
@@ -2840,7 +2713,7 @@ static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
}
void dispc_mgr_setup(enum omap_channel channel,
- struct omap_overlay_manager_info *info)
+ const struct omap_overlay_manager_info *info)
{
dispc_mgr_set_default_color(channel, info->default_color);
dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key);
@@ -2852,8 +2725,9 @@ void dispc_mgr_setup(enum omap_channel channel,
dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
}
}
+EXPORT_SYMBOL(dispc_mgr_setup);
-void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
+static void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
@@ -2878,7 +2752,7 @@ void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code);
}
-void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
+static void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
{
u32 l;
int gpout0, gpout1;
@@ -2907,15 +2781,33 @@ void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
dispc_write_reg(DISPC_CONTROL, l);
}
-void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable);
}
+void dispc_mgr_set_lcd_config(enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config)
+{
+ dispc_mgr_set_io_pad_mode(config->io_pad_mode);
+
+ dispc_mgr_enable_stallmode(channel, config->stallmode);
+ dispc_mgr_enable_fifohandcheck(channel, config->fifohandcheck);
+
+ dispc_mgr_set_clock_div(channel, &config->clock_info);
+
+ dispc_mgr_set_tft_data_lines(channel, config->video_port_width);
+
+ dispc_lcd_enable_signal_polarity(config->lcden_sig_polarity);
+
+ dispc_mgr_set_lcd_type_tft(channel);
+}
+EXPORT_SYMBOL(dispc_mgr_set_lcd_config);
+
static bool _dispc_mgr_size_ok(u16 width, u16 height)
{
- return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
- height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
+ return width <= dispc.feat->mgr_width_max &&
+ height <= dispc.feat->mgr_height_max;
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -3010,7 +2902,7 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
/* change name to mode? */
void dispc_mgr_set_timings(enum omap_channel channel,
- struct omap_video_timings *timings)
+ const struct omap_video_timings *timings)
{
unsigned xtot, ytot;
unsigned long ht, vt;
@@ -3049,6 +2941,7 @@ void dispc_mgr_set_timings(enum omap_channel channel,
dispc_mgr_set_size(channel, t.x_res, t.y_res);
}
+EXPORT_SYMBOL(dispc_mgr_set_timings);
static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
u16 pck_div)
@@ -3076,7 +2969,7 @@ unsigned long dispc_fclk_rate(void)
switch (dss_get_dispc_clk_source()) {
case OMAP_DSS_CLK_SRC_FCK:
- r = clk_get_rate(dispc.dss_clk);
+ r = dss_get_dispc_clk_rate();
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -3101,28 +2994,32 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
unsigned long r;
u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ if (dss_mgr_is_lcd(channel)) {
+ l = dispc_read_reg(DISPC_DIVISORo(channel));
- lcd = FLD_GET(l, 23, 16);
+ lcd = FLD_GET(l, 23, 16);
- switch (dss_get_lcd_clk_source(channel)) {
- case OMAP_DSS_CLK_SRC_FCK:
- r = clk_get_rate(dispc.dss_clk);
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(0);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- dsidev = dsi_get_dsidev_from_id(1);
- r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
- break;
- default:
- BUG();
- return 0;
- }
+ switch (dss_get_lcd_clk_source(channel)) {
+ case OMAP_DSS_CLK_SRC_FCK:
+ r = dss_get_dispc_clk_rate();
+ break;
+ case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ dsidev = dsi_get_dsidev_from_id(0);
+ r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ break;
+ case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
+ dsidev = dsi_get_dsidev_from_id(1);
+ r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ break;
+ default:
+ BUG();
+ return 0;
+ }
- return r / lcd;
+ return r / lcd;
+ } else {
+ return dispc_fclk_rate();
+ }
}
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3172,21 +3069,28 @@ unsigned long dispc_core_clk_rate(void)
static unsigned long dispc_plane_pclk_rate(enum omap_plane plane)
{
- enum omap_channel channel = dispc_ovl_get_channel_out(plane);
+ enum omap_channel channel;
+
+ if (plane == OMAP_DSS_WB)
+ return 0;
+
+ channel = dispc_ovl_get_channel_out(plane);
return dispc_mgr_pclk_rate(channel);
}
static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
{
- enum omap_channel channel = dispc_ovl_get_channel_out(plane);
+ enum omap_channel channel;
- if (dss_mgr_is_lcd(channel))
- return dispc_mgr_lclk_rate(channel);
- else
- return dispc_fclk_rate();
+ if (plane == OMAP_DSS_WB)
+ return 0;
+
+ channel = dispc_ovl_get_channel_out(plane);
+ return dispc_mgr_lclk_rate(channel);
}
+
static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
{
int lcd, pcd;
@@ -3244,64 +3148,6 @@ void dispc_dump_clocks(struct seq_file *s)
dispc_runtime_put();
}
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-void dispc_dump_irqs(struct seq_file *s)
-{
- unsigned long flags;
- struct dispc_irq_stats stats;
-
- spin_lock_irqsave(&dispc.irq_stats_lock, flags);
-
- stats = dispc.irq_stats;
- memset(&dispc.irq_stats, 0, sizeof(dispc.irq_stats));
- dispc.irq_stats.last_reset = jiffies;
-
- spin_unlock_irqrestore(&dispc.irq_stats_lock, flags);
-
- seq_printf(s, "period %u ms\n",
- jiffies_to_msecs(jiffies - stats.last_reset));
-
- seq_printf(s, "irqs %d\n", stats.irq_count);
-#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
-
- PIS(FRAMEDONE);
- PIS(VSYNC);
- PIS(EVSYNC_EVEN);
- PIS(EVSYNC_ODD);
- PIS(ACBIAS_COUNT_STAT);
- PIS(PROG_LINE_NUM);
- PIS(GFX_FIFO_UNDERFLOW);
- PIS(GFX_END_WIN);
- PIS(PAL_GAMMA_MASK);
- PIS(OCP_ERR);
- PIS(VID1_FIFO_UNDERFLOW);
- PIS(VID1_END_WIN);
- PIS(VID2_FIFO_UNDERFLOW);
- PIS(VID2_END_WIN);
- if (dss_feat_get_num_ovls() > 3) {
- PIS(VID3_FIFO_UNDERFLOW);
- PIS(VID3_END_WIN);
- }
- PIS(SYNC_LOST);
- PIS(SYNC_LOST_DIGIT);
- PIS(WAKEUP);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
- PIS(FRAMEDONE2);
- PIS(VSYNC2);
- PIS(ACBIAS_COUNT_STAT2);
- PIS(SYNC_LOST2);
- }
- if (dss_has_feature(FEAT_MGR_LCD3)) {
- PIS(FRAMEDONE3);
- PIS(VSYNC3);
- PIS(ACBIAS_COUNT_STAT3);
- PIS(SYNC_LOST3);
- }
-#undef PIS
-}
-#endif
-
static void dispc_dump_regs(struct seq_file *s)
{
int i, j;
@@ -3351,7 +3197,7 @@ static void dispc_dump_regs(struct seq_file *s)
#define DISPC_REG(i, name) name(i)
#define DUMPREG(i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
- 48 - strlen(#r) - strlen(p_names[i]), " ", \
+ (int)(48 - strlen(#r) - strlen(p_names[i])), " ", \
dispc_read_reg(DISPC_REG(i, r)))
p_names = mgr_names;
@@ -3428,7 +3274,7 @@ static void dispc_dump_regs(struct seq_file *s)
#define DISPC_REG(plane, name, i) name(plane, i)
#define DUMPREG(plane, name, i) \
seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \
- 46 - strlen(#name) - strlen(p_names[plane]), " ", \
+ (int)(46 - strlen(#name) - strlen(p_names[plane])), " ", \
dispc_read_reg(DISPC_REG(plane, name, i)))
/* Video pipeline coefficient registers */
@@ -3531,7 +3377,7 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
}
void dispc_mgr_set_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo)
+ const struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
@@ -3555,403 +3401,34 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
return 0;
}
-/* dispc.irq_lock has to be locked by the caller */
-static void _omap_dispc_set_irqs(void)
+u32 dispc_read_irqstatus(void)
{
- u32 mask;
- u32 old_mask;
- int i;
- struct omap_dispc_isr_data *isr_data;
-
- mask = dispc.irq_error_mask;
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
-
- if (isr_data->isr == NULL)
- continue;
-
- mask |= isr_data->mask;
- }
-
- old_mask = dispc_read_reg(DISPC_IRQENABLE);
- /* clear the irqstatus for newly enabled irqs */
- dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
-
- dispc_write_reg(DISPC_IRQENABLE, mask);
-}
-
-int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
- int i;
- int ret;
- unsigned long flags;
- struct omap_dispc_isr_data *isr_data;
-
- if (isr == NULL)
- return -EINVAL;
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
-
- /* check for duplicate entry */
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
- if (isr_data->isr == isr && isr_data->arg == arg &&
- isr_data->mask == mask) {
- ret = -EINVAL;
- goto err;
- }
- }
-
- isr_data = NULL;
- ret = -EBUSY;
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
-
- if (isr_data->isr != NULL)
- continue;
-
- isr_data->isr = isr;
- isr_data->arg = arg;
- isr_data->mask = mask;
- ret = 0;
-
- break;
- }
-
- if (ret)
- goto err;
-
- _omap_dispc_set_irqs();
-
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- return 0;
-err:
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(omap_dispc_register_isr);
-
-int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
- int i;
- unsigned long flags;
- int ret = -EINVAL;
- struct omap_dispc_isr_data *isr_data;
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc.registered_isr[i];
- if (isr_data->isr != isr || isr_data->arg != arg ||
- isr_data->mask != mask)
- continue;
-
- /* found the correct isr */
-
- isr_data->isr = NULL;
- isr_data->arg = NULL;
- isr_data->mask = 0;
-
- ret = 0;
- break;
- }
-
- if (ret == 0)
- _omap_dispc_set_irqs();
-
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(omap_dispc_unregister_isr);
-
-#ifdef DEBUG
-static void print_irq_status(u32 status)
-{
- if ((status & dispc.irq_error_mask) == 0)
- return;
-
- printk(KERN_DEBUG "DISPC IRQ: 0x%x: ", status);
-
-#define PIS(x) \
- if (status & DISPC_IRQ_##x) \
- printk(#x " ");
- PIS(GFX_FIFO_UNDERFLOW);
- PIS(OCP_ERR);
- PIS(VID1_FIFO_UNDERFLOW);
- PIS(VID2_FIFO_UNDERFLOW);
- if (dss_feat_get_num_ovls() > 3)
- PIS(VID3_FIFO_UNDERFLOW);
- PIS(SYNC_LOST);
- PIS(SYNC_LOST_DIGIT);
- if (dss_has_feature(FEAT_MGR_LCD2))
- PIS(SYNC_LOST2);
- if (dss_has_feature(FEAT_MGR_LCD3))
- PIS(SYNC_LOST3);
-#undef PIS
-
- printk("\n");
-}
-#endif
-
-/* Called from dss.c. Note that we don't touch clocks here,
- * but we presume they are on because we got an IRQ. However,
- * an irq handler may turn the clocks off, so we may not have
- * clock later in the function. */
-static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
-{
- int i;
- u32 irqstatus, irqenable;
- u32 handledirqs = 0;
- u32 unhandled_errors;
- struct omap_dispc_isr_data *isr_data;
- struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
-
- spin_lock(&dispc.irq_lock);
-
- irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
- irqenable = dispc_read_reg(DISPC_IRQENABLE);
-
- /* IRQ is not for us */
- if (!(irqstatus & irqenable)) {
- spin_unlock(&dispc.irq_lock);
- return IRQ_NONE;
- }
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock(&dispc.irq_stats_lock);
- dispc.irq_stats.irq_count++;
- dss_collect_irq_stats(irqstatus, dispc.irq_stats.irqs);
- spin_unlock(&dispc.irq_stats_lock);
-#endif
-
-#ifdef DEBUG
- if (dss_debug)
- print_irq_status(irqstatus);
-#endif
- /* Ack the interrupt. Do it here before clocks are possibly turned
- * off */
- dispc_write_reg(DISPC_IRQSTATUS, irqstatus);
- /* flush posted write */
- dispc_read_reg(DISPC_IRQSTATUS);
-
- /* make a copy and unlock, so that isrs can unregister
- * themselves */
- memcpy(registered_isr, dispc.registered_isr,
- sizeof(registered_isr));
-
- spin_unlock(&dispc.irq_lock);
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &registered_isr[i];
-
- if (!isr_data->isr)
- continue;
-
- if (isr_data->mask & irqstatus) {
- isr_data->isr(isr_data->arg, irqstatus);
- handledirqs |= isr_data->mask;
- }
- }
-
- spin_lock(&dispc.irq_lock);
-
- unhandled_errors = irqstatus & ~handledirqs & dispc.irq_error_mask;
-
- if (unhandled_errors) {
- dispc.error_irqs |= unhandled_errors;
-
- dispc.irq_error_mask &= ~unhandled_errors;
- _omap_dispc_set_irqs();
-
- schedule_work(&dispc.error_work);
- }
-
- spin_unlock(&dispc.irq_lock);
-
- return IRQ_HANDLED;
-}
-
-static void dispc_error_worker(struct work_struct *work)
-{
- int i;
- u32 errors;
- unsigned long flags;
- static const unsigned fifo_underflow_bits[] = {
- DISPC_IRQ_GFX_FIFO_UNDERFLOW,
- DISPC_IRQ_VID1_FIFO_UNDERFLOW,
- DISPC_IRQ_VID2_FIFO_UNDERFLOW,
- DISPC_IRQ_VID3_FIFO_UNDERFLOW,
- };
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
- errors = dispc.error_irqs;
- dispc.error_irqs = 0;
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- dispc_runtime_get();
-
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- unsigned bit;
-
- ovl = omap_dss_get_overlay(i);
- bit = fifo_underflow_bits[i];
-
- if (bit & errors) {
- DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
- ovl->name);
- dispc_ovl_enable(ovl->id, false);
- dispc_mgr_go(ovl->manager->id);
- msleep(50);
- }
- }
-
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- unsigned bit;
-
- mgr = omap_dss_get_overlay_manager(i);
- bit = mgr_desc[i].sync_lost_irq;
-
- if (bit & errors) {
- struct omap_dss_device *dssdev = mgr->get_device(mgr);
- bool enable;
-
- DSSERR("SYNC_LOST on channel %s, restarting the output "
- "with video overlays disabled\n",
- mgr->name);
-
- enable = dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
- dssdev->driver->disable(dssdev);
-
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
-
- if (ovl->id != OMAP_DSS_GFX &&
- ovl->manager == mgr)
- dispc_ovl_enable(ovl->id, false);
- }
-
- dispc_mgr_go(mgr->id);
- msleep(50);
-
- if (enable)
- dssdev->driver->enable(dssdev);
- }
- }
-
- if (errors & DISPC_IRQ_OCP_ERR) {
- DSSERR("OCP_ERR\n");
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- struct omap_dss_device *dssdev;
-
- mgr = omap_dss_get_overlay_manager(i);
- dssdev = mgr->get_device(mgr);
-
- if (dssdev && dssdev->driver)
- dssdev->driver->disable(dssdev);
- }
- }
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
- dispc.irq_error_mask |= errors;
- _omap_dispc_set_irqs();
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
-
- dispc_runtime_put();
+ return dispc_read_reg(DISPC_IRQSTATUS);
}
+EXPORT_SYMBOL(dispc_read_irqstatus);
-int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
+void dispc_clear_irqstatus(u32 mask)
{
- void dispc_irq_wait_handler(void *data, u32 mask)
- {
- complete((struct completion *)data);
- }
-
- int r;
- DECLARE_COMPLETION_ONSTACK(completion);
-
- r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
- irqmask);
-
- if (r)
- return r;
-
- timeout = wait_for_completion_timeout(&completion, timeout);
-
- omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
-
- if (timeout == 0)
- return -ETIMEDOUT;
-
- if (timeout == -ERESTARTSYS)
- return -ERESTARTSYS;
-
- return 0;
+ dispc_write_reg(DISPC_IRQSTATUS, mask);
}
+EXPORT_SYMBOL(dispc_clear_irqstatus);
-int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
- unsigned long timeout)
+u32 dispc_read_irqenable(void)
{
- void dispc_irq_wait_handler(void *data, u32 mask)
- {
- complete((struct completion *)data);
- }
-
- int r;
- DECLARE_COMPLETION_ONSTACK(completion);
-
- r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
- irqmask);
-
- if (r)
- return r;
-
- timeout = wait_for_completion_interruptible_timeout(&completion,
- timeout);
-
- omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
-
- if (timeout == 0)
- return -ETIMEDOUT;
-
- if (timeout == -ERESTARTSYS)
- return -ERESTARTSYS;
-
- return 0;
+ return dispc_read_reg(DISPC_IRQENABLE);
}
+EXPORT_SYMBOL(dispc_read_irqenable);
-static void _omap_dispc_initialize_irq(void)
+void dispc_write_irqenable(u32 mask)
{
- unsigned long flags;
-
- spin_lock_irqsave(&dispc.irq_lock, flags);
-
- memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
-
- dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
- if (dss_has_feature(FEAT_MGR_LCD2))
- dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
- if (dss_has_feature(FEAT_MGR_LCD3))
- dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST3;
- if (dss_feat_get_num_ovls() > 3)
- dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
-
- /* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
- * so clear it */
- dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS));
+ u32 old_mask = dispc_read_reg(DISPC_IRQENABLE);
- _omap_dispc_set_irqs();
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_clear_irqstatus((mask ^ old_mask) & mask);
- spin_unlock_irqrestore(&dispc.irq_lock, flags);
+ dispc_write_reg(DISPC_IRQENABLE, mask);
}
+EXPORT_SYMBOL(dispc_write_irqenable);
void dispc_enable_sidle(void)
{
@@ -3998,9 +3475,14 @@ static const struct dispc_features omap24xx_dispc_feats __initconst = {
.sw_max = 64,
.vp_max = 255,
.hp_max = 256,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
.calc_scaling = dispc_ovl_calc_scaling_24xx,
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
+ .no_framedone_tv = true,
};
static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
@@ -4010,9 +3492,14 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats __initconst = {
.sw_max = 64,
.vp_max = 255,
.hp_max = 256,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .no_framedone_tv = true,
};
static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
@@ -4022,9 +3509,14 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats __initconst = {
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .no_framedone_tv = true,
};
static const struct dispc_features omap44xx_dispc_feats __initconst = {
@@ -4034,6 +3526,27 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
.sw_max = 256,
.vp_max = 4095,
.hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
+ .calc_scaling = dispc_ovl_calc_scaling_44xx,
+ .calc_core_clk = calc_core_clk_44xx,
+ .num_fifos = 5,
+ .gfx_fifo_workaround = true,
+};
+
+static const struct dispc_features omap54xx_dispc_feats __initconst = {
+ .sw_start = 7,
+ .fp_start = 19,
+ .bp_start = 31,
+ .sw_max = 256,
+ .vp_max = 4095,
+ .hp_max = 4096,
+ .mgr_width_start = 11,
+ .mgr_height_start = 27,
+ .mgr_width_max = 4096,
+ .mgr_height_max = 4096,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
@@ -4042,7 +3555,6 @@ static const struct dispc_features omap44xx_dispc_feats __initconst = {
static int __init dispc_init_features(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const struct dispc_features *src;
struct dispc_features *dst;
@@ -4052,7 +3564,7 @@ static int __init dispc_init_features(struct platform_device *pdev)
return -ENOMEM;
}
- switch (pdata->version) {
+ switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP24xx:
src = &omap24xx_dispc_feats;
break;
@@ -4074,7 +3586,7 @@ static int __init dispc_init_features(struct platform_device *pdev)
break;
case OMAPDSS_VER_OMAP5:
- src = &omap44xx_dispc_feats;
+ src = &omap54xx_dispc_feats;
break;
default:
@@ -4087,13 +3599,25 @@ static int __init dispc_init_features(struct platform_device *pdev)
return 0;
}
+int dispc_request_irq(irq_handler_t handler, void *dev_id)
+{
+ return devm_request_irq(&dispc.pdev->dev, dispc.irq, handler,
+ IRQF_SHARED, "OMAP DISPC", dev_id);
+}
+EXPORT_SYMBOL(dispc_request_irq);
+
+void dispc_free_irq(void *dev_id)
+{
+ devm_free_irq(&dispc.pdev->dev, dispc.irq, dev_id);
+}
+EXPORT_SYMBOL(dispc_free_irq);
+
/* DISPC HW IP initialisation */
static int __init omap_dispchw_probe(struct platform_device *pdev)
{
u32 rev;
int r = 0;
struct resource *dispc_mem;
- struct clk *clk;
dispc.pdev = pdev;
@@ -4101,15 +3625,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
if (r)
return r;
- spin_lock_init(&dispc.irq_lock);
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock_init(&dispc.irq_stats_lock);
- dispc.irq_stats.last_reset = jiffies;
-#endif
-
- INIT_WORK(&dispc.error_work, dispc_error_worker);
-
dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4129,22 +3644,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
return -ENODEV;
}
- r = devm_request_irq(&pdev->dev, dispc.irq, omap_dispc_irq_handler,
- IRQF_SHARED, "OMAP DISPC", dispc.pdev);
- if (r < 0) {
- DSSERR("request_irq failed\n");
- return r;
- }
-
- clk = clk_get(&pdev->dev, "fck");
- if (IS_ERR(clk)) {
- DSSERR("can't get fck\n");
- r = PTR_ERR(clk);
- return r;
- }
-
- dispc.dss_clk = clk;
-
pm_runtime_enable(&pdev->dev);
r = dispc_runtime_get();
@@ -4153,8 +3652,6 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
_omap_dispc_initial_config();
- _omap_dispc_initialize_irq();
-
rev = dispc_read_reg(DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
@@ -4163,14 +3660,10 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
dss_debugfs_create_file("dispc", dispc_dump_regs);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
-#endif
return 0;
err_runtime_get:
pm_runtime_disable(&pdev->dev);
- clk_put(dispc.dss_clk);
return r;
}
@@ -4178,8 +3671,6 @@ static int __exit omap_dispchw_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
- clk_put(dispc.dss_clk);
-
return 0;
}
diff --git a/drivers/video/omap2/dss/display-sysfs.c b/drivers/video/omap2/dss/display-sysfs.c
new file mode 100644
index 00000000000..18211a9ab35
--- /dev/null
+++ b/drivers/video/omap2/dss/display-sysfs.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DISPLAY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+
+#include <video/omapdss.h>
+#include "dss.h"
+#include "dss_features.h"
+
+static ssize_t display_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
+}
+
+static ssize_t display_enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int r;
+ bool enabled;
+
+ r = strtobool(buf, &enabled);
+ if (r)
+ return r;
+
+ if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
+ if (enabled) {
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ return r;
+ } else {
+ dssdev->driver->disable(dssdev);
+ }
+ }
+
+ return size;
+}
+
+static ssize_t display_tear_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ dssdev->driver->get_te ?
+ dssdev->driver->get_te(dssdev) : 0);
+}
+
+static ssize_t display_tear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int r;
+ bool te;
+
+ if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
+ return -ENOENT;
+
+ r = strtobool(buf, &te);
+ if (r)
+ return r;
+
+ r = dssdev->driver->enable_te(dssdev, te);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_timings_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_video_timings t;
+
+ if (!dssdev->driver->get_timings)
+ return -ENOENT;
+
+ dssdev->driver->get_timings(dssdev, &t);
+
+ return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
+ t.pixel_clock,
+ t.x_res, t.hfp, t.hbp, t.hsw,
+ t.y_res, t.vfp, t.vbp, t.vsw);
+}
+
+static ssize_t display_timings_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_video_timings t = dssdev->panel.timings;
+ int r, found;
+
+ if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
+ return -ENOENT;
+
+ found = 0;
+#ifdef CONFIG_OMAP2_DSS_VENC
+ if (strncmp("pal", buf, 3) == 0) {
+ t = omap_dss_pal_timings;
+ found = 1;
+ } else if (strncmp("ntsc", buf, 4) == 0) {
+ t = omap_dss_ntsc_timings;
+ found = 1;
+ }
+#endif
+ if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
+ &t.pixel_clock,
+ &t.x_res, &t.hfp, &t.hbp, &t.hsw,
+ &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
+ return -EINVAL;
+
+ r = dssdev->driver->check_timings(dssdev, &t);
+ if (r)
+ return r;
+
+ dssdev->driver->disable(dssdev);
+ dssdev->driver->set_timings(dssdev, &t);
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_rotate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int rotate;
+ if (!dssdev->driver->get_rotate)
+ return -ENOENT;
+ rotate = dssdev->driver->get_rotate(dssdev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
+}
+
+static ssize_t display_rotate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int rot, r;
+
+ if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
+ return -ENOENT;
+
+ r = kstrtoint(buf, 0, &rot);
+ if (r)
+ return r;
+
+ r = dssdev->driver->set_rotate(dssdev, rot);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_mirror_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int mirror;
+ if (!dssdev->driver->get_mirror)
+ return -ENOENT;
+ mirror = dssdev->driver->get_mirror(dssdev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
+}
+
+static ssize_t display_mirror_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int r;
+ bool mirror;
+
+ if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
+ return -ENOENT;
+
+ r = strtobool(buf, &mirror);
+ if (r)
+ return r;
+
+ r = dssdev->driver->set_mirror(dssdev, mirror);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_wss_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ unsigned int wss;
+
+ if (!dssdev->driver->get_wss)
+ return -ENOENT;
+
+ wss = dssdev->driver->get_wss(dssdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
+}
+
+static ssize_t display_wss_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ u32 wss;
+ int r;
+
+ if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
+ return -ENOENT;
+
+ r = kstrtou32(buf, 0, &wss);
+ if (r)
+ return r;
+
+ if (wss > 0xfffff)
+ return -EINVAL;
+
+ r = dssdev->driver->set_wss(dssdev, wss);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
+ display_enabled_show, display_enabled_store);
+static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
+ display_tear_show, display_tear_store);
+static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
+ display_timings_show, display_timings_store);
+static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
+ display_rotate_show, display_rotate_store);
+static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
+ display_mirror_show, display_mirror_store);
+static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
+ display_wss_show, display_wss_store);
+
+static struct device_attribute *display_sysfs_attrs[] = {
+ &dev_attr_enabled,
+ &dev_attr_tear_elim,
+ &dev_attr_timings,
+ &dev_attr_rotate,
+ &dev_attr_mirror,
+ &dev_attr_wss,
+ NULL
+};
+
+int display_init_sysfs(struct platform_device *pdev,
+ struct omap_dss_device *dssdev)
+{
+ struct device_attribute *attr;
+ int i, r;
+
+ /* create device sysfs files */
+ i = 0;
+ while ((attr = display_sysfs_attrs[i++]) != NULL) {
+ r = device_create_file(&dssdev->dev, attr);
+ if (r) {
+ for (i = i - 2; i >= 0; i--) {
+ attr = display_sysfs_attrs[i];
+ device_remove_file(&dssdev->dev, attr);
+ }
+
+ DSSERR("failed to create sysfs file\n");
+ return r;
+ }
+ }
+
+ /* create display? sysfs links */
+ r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
+ dev_name(&dssdev->dev));
+ if (r) {
+ while ((attr = display_sysfs_attrs[i++]) != NULL)
+ device_remove_file(&dssdev->dev, attr);
+
+ DSSERR("failed to create sysfs display link\n");
+ return r;
+ }
+
+ return 0;
+}
+
+void display_uninit_sysfs(struct platform_device *pdev,
+ struct omap_dss_device *dssdev)
+{
+ struct device_attribute *attr;
+ int i = 0;
+
+ sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
+
+ while ((attr = display_sysfs_attrs[i++]) != NULL)
+ device_remove_file(&dssdev->dev, attr);
+}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index ccf8550fafd..0aa8ad8f966 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -31,250 +31,6 @@
#include "dss.h"
#include "dss_features.h"
-static ssize_t display_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
-}
-
-static ssize_t display_enabled_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int r;
- bool enabled;
-
- r = strtobool(buf, &enabled);
- if (r)
- return r;
-
- if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
- if (enabled) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
- } else {
- dssdev->driver->disable(dssdev);
- }
- }
-
- return size;
-}
-
-static ssize_t display_tear_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- dssdev->driver->get_te ?
- dssdev->driver->get_te(dssdev) : 0);
-}
-
-static ssize_t display_tear_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int r;
- bool te;
-
- if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
- return -ENOENT;
-
- r = strtobool(buf, &te);
- if (r)
- return r;
-
- r = dssdev->driver->enable_te(dssdev, te);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_timings_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct omap_video_timings t;
-
- if (!dssdev->driver->get_timings)
- return -ENOENT;
-
- dssdev->driver->get_timings(dssdev, &t);
-
- return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
- t.pixel_clock,
- t.x_res, t.hfp, t.hbp, t.hsw,
- t.y_res, t.vfp, t.vbp, t.vsw);
-}
-
-static ssize_t display_timings_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct omap_video_timings t = dssdev->panel.timings;
- int r, found;
-
- if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
- return -ENOENT;
-
- found = 0;
-#ifdef CONFIG_OMAP2_DSS_VENC
- if (strncmp("pal", buf, 3) == 0) {
- t = omap_dss_pal_timings;
- found = 1;
- } else if (strncmp("ntsc", buf, 4) == 0) {
- t = omap_dss_ntsc_timings;
- found = 1;
- }
-#endif
- if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
- &t.pixel_clock,
- &t.x_res, &t.hfp, &t.hbp, &t.hsw,
- &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
- return -EINVAL;
-
- r = dssdev->driver->check_timings(dssdev, &t);
- if (r)
- return r;
-
- dssdev->driver->disable(dssdev);
- dssdev->driver->set_timings(dssdev, &t);
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_rotate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int rotate;
- if (!dssdev->driver->get_rotate)
- return -ENOENT;
- rotate = dssdev->driver->get_rotate(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
-}
-
-static ssize_t display_rotate_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int rot, r;
-
- if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
- return -ENOENT;
-
- r = kstrtoint(buf, 0, &rot);
- if (r)
- return r;
-
- r = dssdev->driver->set_rotate(dssdev, rot);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_mirror_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int mirror;
- if (!dssdev->driver->get_mirror)
- return -ENOENT;
- mirror = dssdev->driver->get_mirror(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
-}
-
-static ssize_t display_mirror_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int r;
- bool mirror;
-
- if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
- return -ENOENT;
-
- r = strtobool(buf, &mirror);
- if (r)
- return r;
-
- r = dssdev->driver->set_mirror(dssdev, mirror);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_wss_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- unsigned int wss;
-
- if (!dssdev->driver->get_wss)
- return -ENOENT;
-
- wss = dssdev->driver->get_wss(dssdev);
-
- return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
-}
-
-static ssize_t display_wss_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- u32 wss;
- int r;
-
- if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
- return -ENOENT;
-
- r = kstrtou32(buf, 0, &wss);
- if (r)
- return r;
-
- if (wss > 0xfffff)
- return -EINVAL;
-
- r = dssdev->driver->set_wss(dssdev, wss);
- if (r)
- return r;
-
- return size;
-}
-
-static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
- display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
- display_tear_show, display_tear_store);
-static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
- display_timings_show, display_timings_store);
-static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
- display_rotate_show, display_rotate_store);
-static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
- display_mirror_show, display_mirror_store);
-static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
- display_wss_show, display_wss_store);
-
-static struct device_attribute *display_sysfs_attrs[] = {
- &dev_attr_enabled,
- &dev_attr_tear_elim,
- &dev_attr_timings,
- &dev_attr_rotate,
- &dev_attr_mirror,
- &dev_attr_wss,
- NULL
-};
-
void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -320,136 +76,8 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omapdss_default_get_timings);
-/*
- * Connect dssdev to a manager if the manager is free or if force is specified.
- * Connect all overlays to that manager if they are free or if force is
- * specified.
- */
-static int dss_init_connections(struct omap_dss_device *dssdev, bool force)
-{
- struct omap_dss_output *out;
- struct omap_overlay_manager *mgr;
- int i, r;
-
- out = omapdss_get_output_from_dssdev(dssdev);
-
- WARN_ON(dssdev->output);
- WARN_ON(out->device);
-
- r = omapdss_output_set_device(out, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device\n");
- return r;
- }
-
- mgr = omap_dss_get_overlay_manager(dssdev->channel);
-
- if (mgr->output && !force)
- return 0;
-
- if (mgr->output)
- mgr->unset_output(mgr);
-
- r = mgr->set_output(mgr, out);
- if (r) {
- DSSERR("failed to connect manager to output of new device\n");
-
- /* remove the output-device connection we just made */
- omapdss_output_unset_device(out);
- return r;
- }
-
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl = omap_dss_get_overlay(i);
-
- if (!ovl->manager || force) {
- if (ovl->manager)
- ovl->unset_manager(ovl);
-
- r = ovl->set_manager(ovl, mgr);
- if (r) {
- DSSERR("failed to set initial overlay\n");
- return r;
- }
- }
- }
-
- return 0;
-}
-
-static void dss_uninit_connections(struct omap_dss_device *dssdev)
-{
- if (dssdev->output) {
- struct omap_overlay_manager *mgr = dssdev->output->manager;
-
- if (mgr)
- mgr->unset_output(mgr);
-
- omapdss_output_unset_device(dssdev->output);
- }
-}
-
-int dss_init_device(struct platform_device *pdev,
- struct omap_dss_device *dssdev)
-{
- struct device_attribute *attr;
- int i, r;
- const char *def_disp_name = dss_get_default_display_name();
- bool force;
-
- force = def_disp_name && strcmp(def_disp_name, dssdev->name) == 0;
- dss_init_connections(dssdev, force);
-
- /* create device sysfs files */
- i = 0;
- while ((attr = display_sysfs_attrs[i++]) != NULL) {
- r = device_create_file(&dssdev->dev, attr);
- if (r) {
- for (i = i - 2; i >= 0; i--) {
- attr = display_sysfs_attrs[i];
- device_remove_file(&dssdev->dev, attr);
- }
-
- dss_uninit_connections(dssdev);
-
- DSSERR("failed to create sysfs file\n");
- return r;
- }
- }
-
- /* create display? sysfs links */
- r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
- dev_name(&dssdev->dev));
- if (r) {
- while ((attr = display_sysfs_attrs[i++]) != NULL)
- device_remove_file(&dssdev->dev, attr);
-
- dss_uninit_connections(dssdev);
-
- DSSERR("failed to create sysfs display link\n");
- return r;
- }
-
- return 0;
-}
-
-void dss_uninit_device(struct platform_device *pdev,
- struct omap_dss_device *dssdev)
-{
- struct device_attribute *attr;
- int i = 0;
-
- sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
-
- while ((attr = display_sysfs_attrs[i++]) != NULL)
- device_remove_file(&dssdev->dev, attr);
-
- dss_uninit_connections(dssdev);
-}
-
static int dss_suspend_device(struct device *dev, void *data)
{
- int r;
struct omap_dss_device *dssdev = to_dss_device(dev);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
@@ -457,15 +85,7 @@ static int dss_suspend_device(struct device *dev, void *data)
return 0;
}
- if (!dssdev->driver->suspend) {
- DSSERR("display '%s' doesn't implement suspend\n",
- dssdev->name);
- return -ENOSYS;
- }
-
- r = dssdev->driver->suspend(dssdev);
- if (r)
- return r;
+ dssdev->driver->disable(dssdev);
dssdev->activate_after_resume = true;
@@ -492,8 +112,8 @@ static int dss_resume_device(struct device *dev, void *data)
int r;
struct omap_dss_device *dssdev = to_dss_device(dev);
- if (dssdev->activate_after_resume && dssdev->driver->resume) {
- r = dssdev->driver->resume(dssdev);
+ if (dssdev->activate_after_resume) {
+ r = dssdev->driver->enable(dssdev);
if (r)
return r;
}
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 56748cf8760..4af136a04e5 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -49,34 +49,53 @@ static struct {
struct omap_dss_output output;
} dpi;
-static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk)
+static struct platform_device *dpi_get_dsidev(enum omap_channel channel)
{
- int dsi_module;
-
- dsi_module = clk == OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ? 0 : 1;
+ /*
+ * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
+ * would also be used for DISPC fclk. Meaning, when the DPI output is
+ * disabled, DISPC clock will be disabled, and TV out will stop.
+ */
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP24xx:
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return NULL;
+ default:
+ break;
+ }
- return dsi_get_dsidev_from_id(dsi_module);
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return dsi_get_dsidev_from_id(0);
+ case OMAP_DSS_CHANNEL_LCD2:
+ return dsi_get_dsidev_from_id(1);
+ default:
+ return NULL;
+ }
}
-static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev)
+static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
{
- if (dssdev->clocks.dispc.dispc_fclk_src ==
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
- dssdev->clocks.dispc.dispc_fclk_src ==
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC ||
- dssdev->clocks.dispc.channel.lcd_clk_src ==
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
- dssdev->clocks.dispc.channel.lcd_clk_src ==
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC)
- return true;
- else
- return false;
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
+ default:
+ /* this shouldn't happen */
+ WARN_ON(1);
+ return OMAP_DSS_CLK_SRC_FCK;
+ }
}
static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
+ struct omap_overlay_manager *mgr = dssdev->output->manager;
struct dsi_clock_info dsi_cinfo;
struct dispc_clock_info dispc_cinfo;
int r;
@@ -90,7 +109,8 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
if (r)
return r;
- dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
+ dss_select_lcd_clk_source(mgr->id,
+ dpi_get_alt_clk_src(mgr->id));
dpi.mgr_config.clock_info = dispc_cinfo;
@@ -135,7 +155,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
unsigned long pck;
int r = 0;
- if (dpi_use_dsi_pll(dssdev))
+ if (dpi.dsidev)
r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck,
&lck_div, &pck_div);
else
@@ -214,7 +234,7 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_src_sel;
- if (dpi_use_dsi_pll(dssdev)) {
+ if (dpi.dsidev) {
r = dsi_runtime_get(dpi.dsidev);
if (r)
goto err_get_dsi;
@@ -242,10 +262,10 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
err_mgr_enable:
err_set_mode:
- if (dpi_use_dsi_pll(dssdev))
+ if (dpi.dsidev)
dsi_pll_uninit(dpi.dsidev, true);
err_dsi_pll_init:
- if (dpi_use_dsi_pll(dssdev))
+ if (dpi.dsidev)
dsi_runtime_put(dpi.dsidev);
err_get_dsi:
err_src_sel:
@@ -271,8 +291,8 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
dss_mgr_disable(mgr);
- if (dpi_use_dsi_pll(dssdev)) {
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
+ if (dpi.dsidev) {
+ dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
dsi_pll_uninit(dpi.dsidev, true);
dsi_runtime_put(dpi.dsidev);
}
@@ -311,13 +331,13 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
unsigned long pck;
struct dispc_clock_info dispc_cinfo;
- if (dss_mgr_check_timings(mgr, timings))
+ if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
return -EINVAL;
if (timings->pixel_clock == 0)
return -EINVAL;
- if (dpi_use_dsi_pll(dssdev)) {
+ if (dpi.dsidev) {
struct dsi_clock_info dsi_cinfo;
r = dsi_pll_calc_clock_div_pck(dpi.dsidev,
timings->pixel_clock * 1000,
@@ -359,8 +379,32 @@ void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
}
EXPORT_SYMBOL(omapdss_dpi_set_data_lines);
+static int __init dpi_verify_dsi_pll(struct platform_device *dsidev)
+{
+ int r;
+
+ /* do initial setup with the PLL to see if it is operational */
+
+ r = dsi_runtime_get(dsidev);
+ if (r)
+ return r;
+
+ r = dsi_pll_init(dsidev, 0, 1);
+ if (r) {
+ dsi_runtime_put(dsidev);
+ return r;
+ }
+
+ dsi_pll_uninit(dsidev, true);
+ dsi_runtime_put(dsidev);
+
+ return 0;
+}
+
static int __init dpi_init_display(struct omap_dss_device *dssdev)
{
+ struct platform_device *dsidev;
+
DSSDBG("init_display\n");
if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) &&
@@ -377,19 +421,30 @@ static int __init dpi_init_display(struct omap_dss_device *dssdev)
dpi.vdds_dsi_reg = vdds_dsi;
}
- if (dpi_use_dsi_pll(dssdev)) {
- enum omap_dss_clk_source dispc_fclk_src =
- dssdev->clocks.dispc.dispc_fclk_src;
- dpi.dsidev = dpi_get_dsidev(dispc_fclk_src);
+ /*
+ * XXX We shouldn't need dssdev->channel for this. The dsi pll clock
+ * source for DPI is SoC integration detail, not something that should
+ * be configured in the dssdev
+ */
+ dsidev = dpi_get_dsidev(dssdev->channel);
+
+ if (dsidev && dpi_verify_dsi_pll(dsidev)) {
+ dsidev = NULL;
+ DSSWARN("DSI PLL not operational\n");
}
+ if (dsidev)
+ DSSDBG("using DSI PLL for DPI clock\n");
+
+ dpi.dsidev = dsidev;
+
return 0;
}
static struct omap_dss_device * __init dpi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -438,9 +493,18 @@ static void __init dpi_probe_pdata(struct platform_device *dpidev)
return;
}
+ r = omapdss_output_set_device(&dpi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&dpi.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index bee92846cfa..28d41d16b7b 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -45,7 +45,6 @@
#include "dss.h"
#include "dss_features.h"
-/*#define VERBOSE_IRQ*/
#define DSI_CATCH_MISSING_TE
struct dsi_reg { u16 idx; };
@@ -535,42 +534,38 @@ static inline void dsi_perf_show(struct platform_device *dsidev,
}
#endif
+static int verbose_irq;
+
static void print_irq_status(u32 status)
{
if (status == 0)
return;
-#ifndef VERBOSE_IRQ
- if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
+ if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0)
return;
-#endif
- printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
-#define PIS(x) \
- if (status & DSI_IRQ_##x) \
- printk(#x " ");
-#ifdef VERBOSE_IRQ
- PIS(VC0);
- PIS(VC1);
- PIS(VC2);
- PIS(VC3);
-#endif
- PIS(WAKEUP);
- PIS(RESYNC);
- PIS(PLL_LOCK);
- PIS(PLL_UNLOCK);
- PIS(PLL_RECALL);
- PIS(COMPLEXIO_ERR);
- PIS(HS_TX_TIMEOUT);
- PIS(LP_RX_TIMEOUT);
- PIS(TE_TRIGGER);
- PIS(ACK_TRIGGER);
- PIS(SYNC_LOST);
- PIS(LDO_POWER_GOOD);
- PIS(TA_TIMEOUT);
+#define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ status,
+ verbose_irq ? PIS(VC0) : "",
+ verbose_irq ? PIS(VC1) : "",
+ verbose_irq ? PIS(VC2) : "",
+ verbose_irq ? PIS(VC3) : "",
+ PIS(WAKEUP),
+ PIS(RESYNC),
+ PIS(PLL_LOCK),
+ PIS(PLL_UNLOCK),
+ PIS(PLL_RECALL),
+ PIS(COMPLEXIO_ERR),
+ PIS(HS_TX_TIMEOUT),
+ PIS(LP_RX_TIMEOUT),
+ PIS(TE_TRIGGER),
+ PIS(ACK_TRIGGER),
+ PIS(SYNC_LOST),
+ PIS(LDO_POWER_GOOD),
+ PIS(TA_TIMEOUT));
#undef PIS
-
- printk("\n");
}
static void print_irq_status_vc(int channel, u32 status)
@@ -578,28 +573,24 @@ static void print_irq_status_vc(int channel, u32 status)
if (status == 0)
return;
-#ifndef VERBOSE_IRQ
- if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
+ if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
return;
-#endif
- printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
-#define PIS(x) \
- if (status & DSI_VC_IRQ_##x) \
- printk(#x " ");
- PIS(CS);
- PIS(ECC_CORR);
-#ifdef VERBOSE_IRQ
- PIS(PACKET_SENT);
-#endif
- PIS(FIFO_TX_OVF);
- PIS(FIFO_RX_OVF);
- PIS(BTA);
- PIS(ECC_NO_CORR);
- PIS(FIFO_TX_UDF);
- PIS(PP_BUSY_CHANGE);
+#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
+ channel,
+ status,
+ PIS(CS),
+ PIS(ECC_CORR),
+ PIS(ECC_NO_CORR),
+ verbose_irq ? PIS(PACKET_SENT) : "",
+ PIS(BTA),
+ PIS(FIFO_TX_OVF),
+ PIS(FIFO_RX_OVF),
+ PIS(FIFO_TX_UDF),
+ PIS(PP_BUSY_CHANGE));
#undef PIS
- printk("\n");
}
static void print_irq_status_cio(u32 status)
@@ -607,34 +598,31 @@ static void print_irq_status_cio(u32 status)
if (status == 0)
return;
- printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
-
-#define PIS(x) \
- if (status & DSI_CIO_IRQ_##x) \
- printk(#x " ");
- PIS(ERRSYNCESC1);
- PIS(ERRSYNCESC2);
- PIS(ERRSYNCESC3);
- PIS(ERRESC1);
- PIS(ERRESC2);
- PIS(ERRESC3);
- PIS(ERRCONTROL1);
- PIS(ERRCONTROL2);
- PIS(ERRCONTROL3);
- PIS(STATEULPS1);
- PIS(STATEULPS2);
- PIS(STATEULPS3);
- PIS(ERRCONTENTIONLP0_1);
- PIS(ERRCONTENTIONLP1_1);
- PIS(ERRCONTENTIONLP0_2);
- PIS(ERRCONTENTIONLP1_2);
- PIS(ERRCONTENTIONLP0_3);
- PIS(ERRCONTENTIONLP1_3);
- PIS(ULPSACTIVENOT_ALL0);
- PIS(ULPSACTIVENOT_ALL1);
+#define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : ""
+
+ pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ status,
+ PIS(ERRSYNCESC1),
+ PIS(ERRSYNCESC2),
+ PIS(ERRSYNCESC3),
+ PIS(ERRESC1),
+ PIS(ERRESC2),
+ PIS(ERRESC3),
+ PIS(ERRCONTROL1),
+ PIS(ERRCONTROL2),
+ PIS(ERRCONTROL3),
+ PIS(STATEULPS1),
+ PIS(STATEULPS2),
+ PIS(STATEULPS3),
+ PIS(ERRCONTENTIONLP0_1),
+ PIS(ERRCONTENTIONLP1_1),
+ PIS(ERRCONTENTIONLP0_2),
+ PIS(ERRCONTENTIONLP1_2),
+ PIS(ERRCONTENTIONLP0_3),
+ PIS(ERRCONTENTIONLP1_3),
+ PIS(ULPSACTIVENOT_ALL0),
+ PIS(ULPSACTIVENOT_ALL1));
#undef PIS
-
- printk("\n");
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -1116,28 +1104,16 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
}
}
-#ifdef DEBUG
static void _dsi_print_reset_status(struct platform_device *dsidev)
{
u32 l;
int b0, b1, b2;
- if (!dss_debug)
- return;
-
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
- printk(KERN_DEBUG "DSI resets: ");
-
- l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
- printk("PLL (%d) ", FLD_GET(l, 0, 0));
-
- l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
- printk("CIO (%d) ", FLD_GET(l, 29, 29));
-
if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
b0 = 28;
b1 = 27;
@@ -1148,18 +1124,21 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
b2 = 26;
}
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
- printk("PHY (%x%x%x, %d, %d, %d)\n",
- FLD_GET(l, b0, b0),
- FLD_GET(l, b1, b1),
- FLD_GET(l, b2, b2),
- FLD_GET(l, 29, 29),
- FLD_GET(l, 30, 30),
- FLD_GET(l, 31, 31));
+#define DSI_FLD_GET(fld, start, end)\
+ FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
+
+ pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
+ DSI_FLD_GET(PLL_STATUS, 0, 0),
+ DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29),
+ DSI_FLD_GET(DSIPHY_CFG5, b0, b0),
+ DSI_FLD_GET(DSIPHY_CFG5, b1, b1),
+ DSI_FLD_GET(DSIPHY_CFG5, b2, b2),
+ DSI_FLD_GET(DSIPHY_CFG5, 29, 29),
+ DSI_FLD_GET(DSIPHY_CFG5, 30, 30),
+ DSI_FLD_GET(DSIPHY_CFG5, 31, 31));
+
+#undef DSI_FLD_GET
}
-#else
-#define _dsi_print_reset_status(x)
-#endif
static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
{
@@ -1407,6 +1386,11 @@ retry:
cur.dsi_pll_hsdiv_dispc_clk =
cur.clkin4ddr / cur.regm_dispc;
+ if (cur.regm_dispc > 1 &&
+ cur.regm_dispc % 2 != 0 &&
+ req_pck >= 1000000)
+ continue;
+
/* this will narrow down the search a bit,
* but still give pixclocks below what was
* requested */
@@ -1621,7 +1605,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
u8 regn_start, regn_end, regm_start, regm_end;
u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
- DSSDBGF();
+ DSSDBG("DSI PLL clock config starts");
dsi->current_cinfo.clkin = cinfo->clkin;
dsi->current_cinfo.fint = cinfo->fint;
@@ -1757,11 +1741,21 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
DSSDBG("PLL init\n");
+ /*
+ * It seems that on many OMAPs we need to enable both to have a
+ * functional HSDivider.
+ */
+ enable_hsclk = enable_hsdiv = true;
+
if (dsi->vdds_dsi_reg == NULL) {
struct regulator *vdds_dsi;
vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
+ /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(vdds_dsi))
+ vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
+
if (IS_ERR(vdds_dsi)) {
DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
@@ -2440,7 +2434,7 @@ static int dsi_cio_init(struct platform_device *dsidev)
int r;
u32 l;
- DSSDBGF();
+ DSSDBG("DSI CIO init starts");
r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
if (r)
@@ -2791,7 +2785,7 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
{
u32 r;
- DSSDBGF("%d", channel);
+ DSSDBG("Initial config of virtual channel %d", channel);
r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
@@ -2823,7 +2817,7 @@ static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
if (dsi->vc[channel].source == source)
return 0;
- DSSDBGF("%d", channel);
+ DSSDBG("Source config of virtual channel %d", channel);
dsi_sync_vc(dsidev, channel);
@@ -3581,7 +3575,7 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
int r, i;
unsigned mask;
- DSSDBGF();
+ DSSDBG("Entering ULPS");
WARN_ON(!dsi_bus_is_locked(dsidev));
@@ -4285,7 +4279,7 @@ int omapdss_dsi_set_clocks(struct omap_dss_device *dssdev,
unsigned long pck;
int r;
- DSSDBGF("ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
+ DSSDBG("Setting DSI clocks: ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
mutex_lock(&dsi->lock);
@@ -4541,7 +4535,7 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
}
-static void dsi_framedone_irq_callback(void *data, u32 mask)
+static void dsi_framedone_irq_callback(void *data)
{
struct platform_device *dsidev = (struct platform_device *) data;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4615,7 +4609,6 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_overlay_manager *mgr = dssdev->output->manager;
int r;
- u32 irq = 0;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
dsi->timings.hsw = 1;
@@ -4625,12 +4618,10 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
dsi->timings.vfp = 0;
dsi->timings.vbp = 0;
- irq = dispc_mgr_get_framedone_irq(mgr->id);
-
- r = omap_dispc_register_isr(dsi_framedone_irq_callback,
- (void *) dsidev, irq);
+ r = dss_mgr_register_framedone_handler(mgr,
+ dsi_framedone_irq_callback, dsidev);
if (r) {
- DSSERR("can't get FRAMEDONE irq\n");
+ DSSERR("can't register FRAMEDONE handler\n");
goto err;
}
@@ -4668,8 +4659,8 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
return 0;
err1:
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- omap_dispc_unregister_isr(dsi_framedone_irq_callback,
- (void *) dsidev, irq);
+ dss_mgr_unregister_framedone_handler(mgr,
+ dsi_framedone_irq_callback, dsidev);
err:
return r;
}
@@ -4680,14 +4671,9 @@ static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_overlay_manager *mgr = dssdev->output->manager;
- if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
- u32 irq;
-
- irq = dispc_mgr_get_framedone_irq(mgr->id);
-
- omap_dispc_unregister_isr(dsi_framedone_irq_callback,
- (void *) dsidev, irq);
- }
+ if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
+ dss_mgr_unregister_framedone_handler(mgr,
+ dsi_framedone_irq_callback, dsidev);
}
static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
@@ -4730,7 +4716,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
if (r)
goto err1;
- dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
dss_select_lcd_clk_source(mgr->id,
dssdev->clocks.dispc.channel.lcd_clk_src);
@@ -4765,7 +4750,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
err3:
dsi_cio_uninit(dsidev);
err2:
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
@@ -4792,7 +4776,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_vc_enable(dsidev, 2, 0);
dsi_vc_enable(dsidev, 3, 0);
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsidev);
@@ -4981,6 +4964,10 @@ static int __init dsi_init_display(struct omap_dss_device *dssdev)
vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
+ /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(vdds_dsi))
+ vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
+
if (IS_ERR(vdds_dsi)) {
DSSERR("can't get VDDS_DSI regulator\n");
return PTR_ERR(vdds_dsi);
@@ -5121,7 +5108,7 @@ static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *p
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -5151,6 +5138,7 @@ static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *p
static void __init dsi_probe_pdata(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *plat_dssdev;
struct omap_dss_device *dssdev;
int r;
@@ -5173,9 +5161,18 @@ static void __init dsi_probe_pdata(struct platform_device *dsidev)
return;
}
+ r = omapdss_output_set_device(&dsi->output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&dsi->output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 602102cebcb..054c2a22b3f 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/gfp.h>
+#include <linux/sizes.h>
#include <video/omapdss.h>
@@ -76,6 +77,7 @@ static struct {
struct clk *dpll4_m4_ck;
struct clk *dss_clk;
+ unsigned long dss_clk_rate;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -96,6 +98,8 @@ static const char * const dss_generic_clk_source_names[] = {
[OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
[OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
[OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
+ [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC",
+ [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI",
};
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
@@ -151,6 +155,21 @@ static void dss_restore_context(void)
#undef SR
#undef RR
+int dss_get_ctx_loss_count(void)
+{
+ struct omap_dss_board_info *board_data = dss.pdev->dev.platform_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(&dss.pdev->dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
void dss_sdi_init(int datapairs)
{
u32 l;
@@ -301,7 +320,7 @@ static void dss_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
+static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
{
struct platform_device *dsidev;
int b;
@@ -372,8 +391,10 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
struct platform_device *dsidev;
int b, ix, pos;
- if (!dss_has_feature(FEAT_LCD_CLK_SRC))
+ if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
+ dss_select_dispc_clk_source(clk_src);
return;
+ }
switch (clk_src) {
case OMAP_DSS_CLK_SRC_FCK:
@@ -429,6 +450,29 @@ enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
}
}
+/* calculate clock rates using dividers in cinfo */
+int dss_calc_clock_rates(struct dss_clock_info *cinfo)
+{
+ if (dss.dpll4_m4_ck) {
+ unsigned long prate;
+
+ if (cinfo->fck_div > dss.feat->fck_div_max ||
+ cinfo->fck_div == 0)
+ return -EINVAL;
+
+ prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+
+ cinfo->fck = prate / cinfo->fck_div *
+ dss.feat->dss_fck_multiplier;
+ } else {
+ if (cinfo->fck_div != 0)
+ return -EINVAL;
+ cinfo->fck = clk_get_rate(dss.dss_clk);
+ }
+
+ return 0;
+}
+
int dss_set_clock_div(struct dss_clock_info *cinfo)
{
if (dss.dpll4_m4_ck) {
@@ -446,6 +490,10 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
return -EINVAL;
}
+ dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
+
+ WARN_ONCE(dss.dss_clk_rate != cinfo->fck, "clk rate mismatch");
+
DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
return 0;
@@ -459,6 +507,41 @@ unsigned long dss_get_dpll4_rate(void)
return 0;
}
+unsigned long dss_get_dispc_clk_rate(void)
+{
+ return dss.dss_clk_rate;
+}
+
+static int dss_setup_default_clock(void)
+{
+ unsigned long max_dss_fck, prate;
+ unsigned fck_div;
+ struct dss_clock_info dss_cinfo = { 0 };
+ int r;
+
+ if (dss.dpll4_m4_ck == NULL)
+ return 0;
+
+ max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+
+ prate = dss_get_dpll4_rate();
+
+ fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
+ max_dss_fck);
+
+ dss_cinfo.fck_div = fck_div;
+
+ r = dss_calc_clock_rates(&dss_cinfo);
+ if (r)
+ return r;
+
+ r = dss_set_clock_div(&dss_cinfo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
struct dispc_clock_info *dispc_cinfo)
{
@@ -748,7 +831,7 @@ static void dss_runtime_put(void)
}
/* DEBUGFS */
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
void dss_debug_dump_clocks(struct seq_file *s)
{
dss_dump_clocks(s);
@@ -796,7 +879,6 @@ static const struct dss_features omap54xx_dss_feats __initconst = {
static int __init dss_init_features(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
const struct dss_features *src;
struct dss_features *dst;
@@ -806,7 +888,7 @@ static int __init dss_init_features(struct platform_device *pdev)
return -ENOMEM;
}
- switch (pdata->version) {
+ switch (omapdss_get_version()) {
case OMAPDSS_VER_OMAP24xx:
src = &omap24xx_dss_feats;
break;
@@ -871,15 +953,23 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
if (r)
return r;
+ r = dss_setup_default_clock();
+ if (r)
+ goto err_setup_clocks;
+
pm_runtime_enable(&pdev->dev);
r = dss_runtime_get();
if (r)
goto err_runtime_get;
+ dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
+
/* Select DPLL */
REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+ dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
+
#ifdef CONFIG_OMAP2_DSS_VENC
REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
@@ -903,6 +993,7 @@ static int __init omap_dsshw_probe(struct platform_device *pdev)
err_runtime_get:
pm_runtime_disable(&pdev->dev);
+err_setup_clocks:
dss_put_clocks();
return r;
}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 6728892f9da..610c8e563da 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -23,44 +23,20 @@
#ifndef __OMAP2_DSS_H
#define __OMAP2_DSS_H
-#ifdef CONFIG_OMAP2_DSS_DEBUG_SUPPORT
-#define DEBUG
-#endif
+#include <linux/interrupt.h>
-#ifdef DEBUG
-extern bool dss_debug;
-#ifdef DSS_SUBSYS_NAME
-#define DSSDBG(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME ": " format, \
- ## __VA_ARGS__)
-#else
-#define DSSDBG(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss: " format, ## __VA_ARGS__)
+#ifdef pr_fmt
+#undef pr_fmt
#endif
#ifdef DSS_SUBSYS_NAME
-#define DSSDBGF(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME \
- ": %s(" format ")\n", \
- __func__, \
- ## __VA_ARGS__)
+#define pr_fmt(fmt) DSS_SUBSYS_NAME ": " fmt
#else
-#define DSSDBGF(format, ...) \
- if (dss_debug) \
- printk(KERN_DEBUG "omapdss: " \
- ": %s(" format ")\n", \
- __func__, \
- ## __VA_ARGS__)
-#endif
-
-#else /* DEBUG */
-#define DSSDBG(format, ...)
-#define DSSDBGF(format, ...)
+#define pr_fmt(fmt) fmt
#endif
+#define DSSDBG(format, ...) \
+ pr_debug(format, ## __VA_ARGS__)
#ifdef DSS_SUBSYS_NAME
#define DSSERR(format, ...) \
@@ -186,11 +162,10 @@ struct seq_file;
struct platform_device;
/* core */
-const char *dss_get_default_display_name(void);
+struct platform_device *dss_get_core_pdev(void);
struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
-int dss_get_ctx_loss_count(struct device *dev);
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
@@ -204,55 +179,18 @@ void dss_put_device(struct omap_dss_device *dssdev);
void dss_copy_device_pdata(struct omap_dss_device *dst,
const struct omap_dss_device *src);
-/* apply */
-void dss_apply_init(void);
-int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr);
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
-void dss_mgr_start_update(struct omap_overlay_manager *mgr);
-int omap_dss_mgr_apply(struct omap_overlay_manager *mgr);
-
-int dss_mgr_enable(struct omap_overlay_manager *mgr);
-void dss_mgr_disable(struct omap_overlay_manager *mgr);
-int dss_mgr_set_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info);
-void dss_mgr_get_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info);
-int dss_mgr_set_device(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev);
-int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
-int dss_mgr_set_output(struct omap_overlay_manager *mgr,
- struct omap_dss_output *output);
-int dss_mgr_unset_output(struct omap_overlay_manager *mgr);
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings);
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
- const struct dss_lcd_mgr_config *config);
-const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
-
-bool dss_ovl_is_enabled(struct omap_overlay *ovl);
-int dss_ovl_enable(struct omap_overlay *ovl);
-int dss_ovl_disable(struct omap_overlay *ovl);
-int dss_ovl_set_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info);
-void dss_ovl_get_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info);
-int dss_ovl_set_manager(struct omap_overlay *ovl,
- struct omap_overlay_manager *mgr);
-int dss_ovl_unset_manager(struct omap_overlay *ovl);
-
/* output */
void dss_register_output(struct omap_dss_output *out);
void dss_unregister_output(struct omap_dss_output *out);
-struct omap_dss_output *omapdss_get_output_from_dssdev(struct omap_dss_device *dssdev);
/* display */
int dss_suspend_all_devices(void);
int dss_resume_all_devices(void);
void dss_disable_all_devices(void);
-int dss_init_device(struct platform_device *pdev,
+int display_init_sysfs(struct platform_device *pdev,
struct omap_dss_device *dssdev);
-void dss_uninit_device(struct platform_device *pdev,
+void display_uninit_sysfs(struct platform_device *pdev,
struct omap_dss_device *dssdev);
/* manager */
@@ -299,21 +237,23 @@ void dss_overlay_kobj_uninit(struct omap_overlay *ovl);
int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
+unsigned long dss_get_dispc_clk_rate(void);
int dss_dpi_select_source(enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
void dss_debug_dump_clocks(struct seq_file *s);
#endif
+int dss_get_ctx_loss_count(void);
+
void dss_sdi_init(int datapairs);
int dss_sdi_enable(void);
void dss_sdi_disable(void);
-void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src);
void dss_select_dsi_clk_source(int dsi_module,
enum omap_dss_clk_source clk_src);
void dss_select_lcd_clk_source(enum omap_channel channel,
@@ -326,6 +266,7 @@ void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
unsigned long dss_get_dpll4_rate(void);
+int dss_calc_clock_rates(struct dss_clock_info *cinfo);
int dss_set_clock_div(struct dss_clock_info *cinfo);
int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
struct dispc_clock_info *dispc_cinfo);
@@ -413,8 +354,6 @@ static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
}
static inline struct platform_device *dsi_get_dsidev_from_id(int module)
{
- WARN("%s: DSI not compiled in, returning platform device as NULL\n",
- __func__);
return NULL;
}
#endif
@@ -427,15 +366,10 @@ void dpi_uninit_platform_driver(void) __exit;
int dispc_init_platform_driver(void) __init;
void dispc_uninit_platform_driver(void) __exit;
void dispc_dump_clocks(struct seq_file *s);
-void dispc_irq_handler(void);
-
-int dispc_runtime_get(void);
-void dispc_runtime_put(void);
void dispc_enable_sidle(void);
void dispc_disable_sidle(void);
-void dispc_lcd_enable_signal_polarity(bool act_high);
void dispc_lcd_enable_signal(bool enable);
void dispc_pck_free_enable(bool enable);
void dispc_enable_fifomerge(bool enable);
@@ -455,36 +389,14 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
bool manual_update);
-int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
- bool replication, const struct omap_video_timings *mgr_timings,
- bool mem_to_mem);
-int dispc_ovl_enable(enum omap_plane plane, bool enable);
-void dispc_ovl_set_channel_out(enum omap_plane plane,
- enum omap_channel channel);
-
-void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
-u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
-u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
-bool dispc_mgr_go_busy(enum omap_channel channel);
-void dispc_mgr_go(enum omap_channel channel);
-bool dispc_mgr_is_enabled(enum omap_channel channel);
-void dispc_mgr_enable(enum omap_channel channel, bool enable);
-bool dispc_mgr_is_channel_enabled(enum omap_channel channel);
-void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode);
-void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
-void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
-void dispc_mgr_set_lcd_type_tft(enum omap_channel channel);
-void dispc_mgr_set_timings(enum omap_channel channel,
- struct omap_video_timings *timings);
+
unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
unsigned long dispc_core_clk_rate(void);
void dispc_mgr_set_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo);
+ const struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
-void dispc_mgr_setup(enum omap_channel channel,
- struct omap_overlay_manager_info *info);
u32 dispc_wb_get_framedone_irq(void);
bool dispc_wb_go_busy(void);
@@ -536,6 +448,8 @@ static inline unsigned long hdmi_get_pixel_clock(void)
#endif
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
+int omapdss_hdmi_core_enable(struct omap_dss_device *dssdev);
+void omapdss_hdmi_core_disable(struct omap_dss_device *dssdev);
void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings);
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 3e8287c8709..18688c12e30 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -429,8 +430,6 @@ static const struct dss_param_range omap2_dss_param_range[] = {
* scaler cannot scale a image with width more than 768.
*/
[FEAT_PARAM_LINEWIDTH] = { 1, 768 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap3_dss_param_range[] = {
@@ -445,8 +444,6 @@ static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSI_FCK] = { 0, 173000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap4_dss_param_range[] = {
@@ -461,8 +458,6 @@ static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap5_dss_param_range[] = {
@@ -477,8 +472,6 @@ static const struct dss_param_range omap5_dss_param_range[] = {
[FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
- [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -820,6 +813,7 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
.audio_start = ti_hdmi_4xxx_audio_start,
.audio_stop = ti_hdmi_4xxx_audio_stop,
.audio_config = ti_hdmi_4xxx_audio_config,
+ .audio_get_dma_port = ti_hdmi_4xxx_audio_get_dma_port,
#endif
};
@@ -846,11 +840,13 @@ int dss_feat_get_num_mgrs(void)
{
return omap_current_dss_features->num_mgrs;
}
+EXPORT_SYMBOL(dss_feat_get_num_mgrs);
int dss_feat_get_num_ovls(void)
{
return omap_current_dss_features->num_ovls;
}
+EXPORT_SYMBOL(dss_feat_get_num_ovls);
int dss_feat_get_num_wbs(void)
{
@@ -871,16 +867,19 @@ enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel
{
return omap_current_dss_features->supported_displays[channel];
}
+EXPORT_SYMBOL(dss_feat_get_supported_displays);
enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel)
{
return omap_current_dss_features->supported_outputs[channel];
}
+EXPORT_SYMBOL(dss_feat_get_supported_outputs);
enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane)
{
return omap_current_dss_features->supported_color_modes[plane];
}
+EXPORT_SYMBOL(dss_feat_get_supported_color_modes);
enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index fc492ef72a5..489b9bec4a6 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -98,19 +98,12 @@ enum dss_range_param {
FEAT_PARAM_DSI_FCK,
FEAT_PARAM_DOWNSCALE,
FEAT_PARAM_LINEWIDTH,
- FEAT_PARAM_MGR_WIDTH,
- FEAT_PARAM_MGR_HEIGHT,
};
/* DSS Feature Functions */
-int dss_feat_get_num_mgrs(void);
-int dss_feat_get_num_ovls(void);
int dss_feat_get_num_wbs(void);
unsigned long dss_feat_get_param_min(enum dss_range_param param);
unsigned long dss_feat_get_param_max(enum dss_range_param param);
-enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
-enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel);
-enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 0d6d7213a85..769d0828581 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -60,6 +60,7 @@
static struct {
struct mutex lock;
struct platform_device *pdev;
+
struct hdmi_ip_data ip_data;
struct clk *sys_clk;
@@ -295,6 +296,12 @@ static const struct hdmi_config vesa_timings[] = {
false, },
{ 0x55, HDMI_DVI },
},
+ {
+ { 1920, 1200, 154000, 32, 48, 80, 6, 3, 26,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x44, HDMI_DVI },
+ },
};
static int hdmi_runtime_get(void)
@@ -323,7 +330,6 @@ static void hdmi_runtime_put(void)
static int __init hdmi_init_display(struct omap_dss_device *dssdev)
{
- struct omap_dss_board_info *pdata = hdmi.pdev->dev.platform_data;
int r;
struct gpio gpios[] = {
@@ -334,13 +340,17 @@ static int __init hdmi_init_display(struct omap_dss_device *dssdev)
DSSDBG("init_display\n");
- dss_init_hdmi_ip_ops(&hdmi.ip_data, pdata->version);
+ dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
if (hdmi.vdda_hdmi_dac_reg == NULL) {
struct regulator *reg;
reg = devm_regulator_get(&hdmi.pdev->dev, "vdda_hdmi_dac");
+ /* DT HACK: try VDAC to make omapdss work for o4 sdp/panda */
+ if (IS_ERR(reg))
+ reg = devm_regulator_get(&hdmi.pdev->dev, "VDAC");
+
if (IS_ERR(reg)) {
DSSERR("can't get VDDA_HDMI_DAC regulator\n");
return PTR_ERR(reg);
@@ -356,7 +366,7 @@ static int __init hdmi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-static void __exit hdmi_uninit_display(struct omap_dss_device *dssdev)
+static void hdmi_uninit_display(struct omap_dss_device *dssdev)
{
DSSDBG("uninit_display\n");
@@ -399,7 +409,8 @@ static bool hdmi_timings_compare(struct omap_video_timings *timing1,
{
int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
- if ((timing2->pixel_clock == timing1->pixel_clock) &&
+ if ((DIV_ROUND_CLOSEST(timing2->pixel_clock, 1000) ==
+ DIV_ROUND_CLOSEST(timing1->pixel_clock, 1000)) &&
(timing2->x_res == timing1->x_res) &&
(timing2->y_res == timing1->y_res)) {
@@ -501,12 +512,9 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
}
-static int hdmi_power_on(struct omap_dss_device *dssdev)
+static int hdmi_power_on_core(struct omap_dss_device *dssdev)
{
int r;
- struct omap_video_timings *p;
- struct omap_overlay_manager *mgr = dssdev->output->manager;
- unsigned long phy;
gpio_set_value(hdmi.ct_cp_hpd_gpio, 1);
gpio_set_value(hdmi.ls_oe_gpio, 1);
@@ -522,6 +530,38 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
if (r)
goto err_runtime_get;
+ /* Make selection of HDMI in DSS */
+ dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+
+ return 0;
+
+err_runtime_get:
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+err_vdac_enable:
+ gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
+ gpio_set_value(hdmi.ls_oe_gpio, 0);
+ return r;
+}
+
+static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+{
+ hdmi_runtime_put();
+ regulator_disable(hdmi.vdda_hdmi_dac_reg);
+ gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
+ gpio_set_value(hdmi.ls_oe_gpio, 0);
+}
+
+static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct omap_video_timings *p;
+ struct omap_overlay_manager *mgr = dssdev->output->manager;
+ unsigned long phy;
+
+ r = hdmi_power_on_core(dssdev);
+ if (r)
+ return r;
+
dss_mgr_disable(mgr);
p = &hdmi.ip_data.cfg.timings;
@@ -549,17 +589,6 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
hdmi.ip_data.ops->video_configure(&hdmi.ip_data);
- /* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
-
- /* Select the dispc clock source as PRCM clock, to ensure that it is not
- * DSI PLL source as the clock selected by DSI PLL might not be
- * sufficient for the resolution selected / that can be changed
- * dynamically by user. This can be moved to single location , say
- * Boardfile.
- */
- dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
-
/* bypass TV gamma table */
dispc_enable_gamma_table(0);
@@ -583,16 +612,11 @@ err_vid_enable:
err_phy_enable:
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
err_pll_enable:
- hdmi_runtime_put();
-err_runtime_get:
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
-err_vdac_enable:
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- gpio_set_value(hdmi.ls_oe_gpio, 0);
+ hdmi_power_off_core(dssdev);
return -EIO;
}
-static void hdmi_power_off(struct omap_dss_device *dssdev)
+static void hdmi_power_off_full(struct omap_dss_device *dssdev)
{
struct omap_overlay_manager *mgr = dssdev->output->manager;
@@ -601,12 +625,8 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
- hdmi_runtime_put();
-
- regulator_disable(hdmi.vdda_hdmi_dac_reg);
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- gpio_set_value(hdmi.ls_oe_gpio, 0);
+ hdmi_power_off_core(dssdev);
}
int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
@@ -716,7 +736,7 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- r = hdmi_power_on(dssdev);
+ r = hdmi_power_on_full(dssdev);
if (r) {
DSSERR("failed to power on device\n");
goto err1;
@@ -738,13 +758,48 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- hdmi_power_off(dssdev);
+ hdmi_power_off_full(dssdev);
omap_dss_stop_device(dssdev);
mutex_unlock(&hdmi.lock);
}
+int omapdss_hdmi_core_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi.ip_data.hpd_gpio = hdmi.hpd_gpio;
+
+ r = hdmi_power_on_core(dssdev);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
+
+ mutex_unlock(&hdmi.lock);
+ return 0;
+
+err0:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+void omapdss_hdmi_core_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi.lock);
+
+ hdmi_power_off_core(dssdev);
+
+ mutex_unlock(&hdmi.lock);
+}
+
static int hdmi_get_clocks(struct platform_device *pdev)
{
struct clk *clk;
@@ -913,7 +968,7 @@ int hdmi_audio_config(struct omap_dss_audio *audio)
static struct omap_dss_device * __init hdmi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -971,9 +1026,19 @@ static void __init hdmi_probe_pdata(struct platform_device *pdev)
return;
}
+ r = omapdss_output_set_device(&hdmi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&hdmi.output);
+ hdmi_uninit_display(dssdev);
dss_put_device(dssdev);
return;
}
@@ -1000,22 +1065,22 @@ static void __exit hdmi_uninit_output(struct platform_device *pdev)
/* HDMI HW IP initialisation */
static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
{
- struct resource *hdmi_mem;
+ struct resource *res;
int r;
hdmi.pdev = pdev;
mutex_init(&hdmi.lock);
+ mutex_init(&hdmi.ip_data.lock);
- hdmi_mem = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
- if (!hdmi_mem) {
+ res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
+ if (!res) {
DSSERR("can't get IORESOURCE_MEM HDMI\n");
return -EINVAL;
}
/* Base address taken from platform */
- hdmi.ip_data.base_wp = ioremap(hdmi_mem->start,
- resource_size(hdmi_mem));
+ hdmi.ip_data.base_wp = devm_request_and_ioremap(&pdev->dev, res);
if (!hdmi.ip_data.base_wp) {
DSSERR("can't ioremap WP\n");
return -ENOMEM;
@@ -1023,7 +1088,7 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
r = hdmi_get_clocks(pdev);
if (r) {
- iounmap(hdmi.ip_data.base_wp);
+ DSSERR("can't get clocks\n");
return r;
}
@@ -1034,9 +1099,11 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
hdmi.ip_data.phy_offset = HDMI_PHY;
- mutex_init(&hdmi.ip_data.lock);
-
- hdmi_panel_init();
+ r = hdmi_panel_init();
+ if (r) {
+ DSSERR("can't init panel\n");
+ goto err_panel_init;
+ }
dss_debugfs_create_file("hdmi", hdmi_dump_regs);
@@ -1045,6 +1112,10 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi_probe_pdata(pdev);
return 0;
+
+err_panel_init:
+ hdmi_put_clocks();
+ return r;
}
static int __exit hdmi_remove_child(struct device *dev, void *data)
@@ -1068,8 +1139,6 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
hdmi_put_clocks();
- iounmap(hdmi.ip_data.base_wp);
-
return 0;
}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 69fb115bab3..dfb8eda81b6 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -280,58 +280,6 @@ static void hdmi_panel_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- mutex_lock(&hdmi.lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = -EINVAL;
- goto err;
- }
-
- /*
- * TODO: notify audio users that the display was suspended. For now,
- * disable audio locally to not break our audio state machine.
- */
- hdmi_panel_audio_disable(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
- omapdss_hdmi_display_disable(dssdev);
-
-err:
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
-static int hdmi_panel_resume(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- mutex_lock(&hdmi.lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- r = -EINVAL;
- goto err;
- }
-
- r = omapdss_hdmi_display_enable(dssdev);
- if (r) {
- DSSERR("failed to power on\n");
- goto err;
- }
- /* TODO: notify audio users that the panel resumed. */
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-err:
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
static void hdmi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -379,20 +327,22 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
{
int r;
+ bool need_enable;
mutex_lock(&hdmi.lock);
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = omapdss_hdmi_display_enable(dssdev);
+ need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED;
+
+ if (need_enable) {
+ r = omapdss_hdmi_core_enable(dssdev);
if (r)
goto err;
}
r = omapdss_hdmi_read_edid(buf, len);
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
- dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- omapdss_hdmi_display_disable(dssdev);
+ if (need_enable)
+ omapdss_hdmi_core_disable(dssdev);
err:
mutex_unlock(&hdmi.lock);
@@ -402,20 +352,22 @@ err:
static bool hdmi_detect(struct omap_dss_device *dssdev)
{
int r;
+ bool need_enable;
mutex_lock(&hdmi.lock);
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = omapdss_hdmi_display_enable(dssdev);
+ need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED;
+
+ if (need_enable) {
+ r = omapdss_hdmi_core_enable(dssdev);
if (r)
goto err;
}
r = omapdss_hdmi_detect();
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
- dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- omapdss_hdmi_display_disable(dssdev);
+ if (need_enable)
+ omapdss_hdmi_core_disable(dssdev);
err:
mutex_unlock(&hdmi.lock);
@@ -427,8 +379,6 @@ static struct omap_dss_driver hdmi_driver = {
.remove = hdmi_panel_remove,
.enable = hdmi_panel_enable,
.disable = hdmi_panel_disable,
- .suspend = hdmi_panel_suspend,
- .resume = hdmi_panel_resume,
.get_timings = hdmi_get_timings,
.set_timings = hdmi_set_timings,
.check_timings = hdmi_check_timings,
@@ -454,9 +404,7 @@ int hdmi_panel_init(void)
spin_lock_init(&hdmi.audio_lock);
#endif
- omap_dss_register_driver(&hdmi_driver);
-
- return 0;
+ return omap_dss_register_driver(&hdmi_driver);
}
void hdmi_panel_exit(void)
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index c54d2f620ce..2551eaa14c4 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -36,36 +36,6 @@
static int num_managers;
static struct omap_overlay_manager *managers;
-static inline struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
-{
- return mgr->output ? mgr->output->device : NULL;
-}
-
-static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct omap_dss_device *dssdev = mgr->get_device(mgr);
- u32 irq;
- int r;
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
- irq = DISPC_IRQ_EVSYNC_ODD;
- else if (dssdev->type == OMAP_DISPLAY_TYPE_HDMI)
- irq = DISPC_IRQ_EVSYNC_EVEN;
- else
- irq = dispc_mgr_get_vsync_irq(mgr->id);
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
-
- dispc_runtime_put();
-
- return r;
-}
-
int dss_init_overlay_managers(struct platform_device *pdev)
{
int i, r;
@@ -99,15 +69,6 @@ int dss_init_overlay_managers(struct platform_device *pdev)
break;
}
- mgr->set_output = &dss_mgr_set_output;
- mgr->unset_output = &dss_mgr_unset_output;
- mgr->apply = &omap_dss_mgr_apply;
- mgr->set_manager_info = &dss_mgr_set_info;
- mgr->get_manager_info = &dss_mgr_get_info;
- mgr->wait_for_go = &dss_mgr_wait_for_go;
- mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
- mgr->get_device = &dss_mgr_get_device;
-
mgr->caps = 0;
mgr->supported_displays =
dss_feat_get_supported_displays(mgr->id);
diff --git a/drivers/video/omap2/dss/output.c b/drivers/video/omap2/dss/output.c
index 813f26682b7..79dea1a1a73 100644
--- a/drivers/video/omap2/dss/output.c
+++ b/drivers/video/omap2/dss/output.c
@@ -114,35 +114,67 @@ struct omap_dss_output *omap_dss_get_output(enum omap_dss_output_id id)
return NULL;
}
-struct omap_dss_output *omapdss_get_output_from_dssdev(struct omap_dss_device *dssdev)
+static const struct dss_mgr_ops *dss_mgr_ops;
+
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops)
{
- struct omap_dss_output *out = NULL;
- enum omap_dss_output_id id;
-
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_DPI);
- break;
- case OMAP_DISPLAY_TYPE_DBI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_DBI);
- break;
- case OMAP_DISPLAY_TYPE_SDI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_SDI);
- break;
- case OMAP_DISPLAY_TYPE_VENC:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_VENC);
- break;
- case OMAP_DISPLAY_TYPE_HDMI:
- out = omap_dss_get_output(OMAP_DSS_OUTPUT_HDMI);
- break;
- case OMAP_DISPLAY_TYPE_DSI:
- id = dssdev->phy.dsi.module == 0 ? OMAP_DSS_OUTPUT_DSI1 :
- OMAP_DSS_OUTPUT_DSI2;
- out = omap_dss_get_output(id);
- break;
- default:
- break;
- }
+ if (dss_mgr_ops)
+ return -EBUSY;
+
+ dss_mgr_ops = mgr_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL(dss_install_mgr_ops);
+
+void dss_uninstall_mgr_ops(void)
+{
+ dss_mgr_ops = NULL;
+}
+EXPORT_SYMBOL(dss_uninstall_mgr_ops);
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings)
+{
+ dss_mgr_ops->set_timings(mgr, timings);
+}
+EXPORT_SYMBOL(dss_mgr_set_timings);
+
+void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ dss_mgr_ops->set_lcd_config(mgr, config);
+}
+EXPORT_SYMBOL(dss_mgr_set_lcd_config);
+
+int dss_mgr_enable(struct omap_overlay_manager *mgr)
+{
+ return dss_mgr_ops->enable(mgr);
+}
+EXPORT_SYMBOL(dss_mgr_enable);
+
+void dss_mgr_disable(struct omap_overlay_manager *mgr)
+{
+ dss_mgr_ops->disable(mgr);
+}
+EXPORT_SYMBOL(dss_mgr_disable);
- return out;
+void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+{
+ dss_mgr_ops->start_update(mgr);
+}
+EXPORT_SYMBOL(dss_mgr_start_update);
+
+int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ return dss_mgr_ops->register_framedone_handler(mgr, handler, data);
+}
+EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
+
+void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data)
+{
+ dss_mgr_ops->unregister_framedone_handler(mgr, handler, data);
}
+EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 45f4994bc6b..eccde322c28 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -38,13 +38,6 @@
static int num_overlays;
static struct omap_overlay *overlays;
-static inline struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
-{
- return ovl->manager ?
- (ovl->manager->output ? ovl->manager->output->device : NULL) :
- NULL;
-}
-
int omap_dss_get_num_overlays(void)
{
return num_overlays;
@@ -93,16 +86,6 @@ void dss_init_overlays(struct platform_device *pdev)
break;
}
- ovl->is_enabled = &dss_ovl_is_enabled;
- ovl->enable = &dss_ovl_enable;
- ovl->disable = &dss_ovl_disable;
- ovl->set_manager = &dss_ovl_set_manager;
- ovl->unset_manager = &dss_ovl_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_info;
- ovl->get_overlay_info = &dss_ovl_get_info;
- ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
- ovl->get_device = &dss_ovl_get_device;
-
ovl->caps = dss_feat_get_overlay_caps(ovl->id);
ovl->supported_modes =
dss_feat_get_supported_color_modes(ovl->id);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 7282e5af3e1..e903dd3f54d 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -342,7 +342,7 @@ static int rfbi_transfer_area(struct omap_dss_device *dssdev,
return 0;
}
-static void framedone_callback(void *data, u32 mask)
+static void framedone_callback(void *data)
{
void (*callback)(void *data);
@@ -908,8 +908,8 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- r = omap_dispc_register_isr(framedone_callback, NULL,
- DISPC_IRQ_FRAMEDONE);
+ r = dss_mgr_register_framedone_handler(out->manager,
+ framedone_callback, NULL);
if (r) {
DSSERR("can't get FRAMEDONE irq\n");
goto err1;
@@ -933,8 +933,10 @@ EXPORT_SYMBOL(omapdss_rfbi_display_enable);
void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
{
- omap_dispc_unregister_isr(framedone_callback, NULL,
- DISPC_IRQ_FRAMEDONE);
+ struct omap_dss_output *out = dssdev->output;
+
+ dss_mgr_unregister_framedone_handler(out->manager,
+ framedone_callback, NULL);
omap_dss_stop_device(dssdev);
rfbi_runtime_put();
@@ -950,7 +952,7 @@ static int __init rfbi_init_display(struct omap_dss_device *dssdev)
static struct omap_dss_device * __init rfbi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -999,9 +1001,18 @@ static void __init rfbi_probe_pdata(struct platform_device *rfbidev)
return;
}
+ r = omapdss_output_set_device(&rfbi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&rfbi.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 7760851f6e5..62b5374ce43 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -205,7 +205,7 @@ static int __init sdi_init_display(struct omap_dss_device *dssdev)
static struct omap_dss_device * __init sdi_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -254,9 +254,18 @@ static void __init sdi_probe_pdata(struct platform_device *sdidev)
return;
}
+ r = omapdss_output_set_device(&sdi.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&sdi.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index b046c208cb9..216aa704f9d 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -102,6 +102,8 @@ struct ti_hdmi_ip_ops {
int (*audio_config)(struct hdmi_ip_data *ip_data,
struct omap_dss_audio *audio);
+
+ int (*audio_get_dma_port)(u32 *offset, u32 *size);
#endif
};
@@ -183,5 +185,6 @@ int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
struct omap_dss_audio *audio);
+int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size);
#endif
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index c23b85a20cd..e18b222ed73 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -899,7 +899,7 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
hdmi_read_reg(hdmi_av_base(ip_data), r))
#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
- (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
+ (i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \
hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
DUMPCORE(HDMI_CORE_SYS_VND_IDL);
@@ -1418,4 +1418,13 @@ void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
REG_FLD_MOD(hdmi_wp_base(ip_data),
HDMI_WP_AUDIO_CTRL, false, 30, 30);
}
+
+int ti_hdmi_4xxx_audio_get_dma_port(u32 *offset, u32 *size)
+{
+ if (!offset || !size)
+ return -EINVAL;
+ *offset = HDMI_WP_AUDIO_DATA;
+ *size = 4;
+ return 0;
+}
#endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 56efa3bb465..006caf3cb50 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -744,7 +744,7 @@ static void venc_put_clocks(void)
static struct omap_dss_device * __init venc_find_dssdev(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = dss_get_default_display_name();
+ const char *def_disp_name = omapdss_get_default_display_name();
struct omap_dss_device *def_dssdev;
int i;
@@ -795,9 +795,18 @@ static void __init venc_probe_pdata(struct platform_device *vencdev)
return;
}
+ r = omapdss_output_set_device(&venc.output, dssdev);
+ if (r) {
+ DSSERR("failed to connect output to new device: %s\n",
+ dssdev->name);
+ dss_put_device(dssdev);
+ return;
+ }
+
r = dss_add_device(dssdev);
if (r) {
DSSERR("device %s register failed: %d\n", dssdev->name, r);
+ omapdss_output_unset_device(&venc.output);
dss_put_device(dssdev);
return;
}
diff --git a/drivers/video/omap2/dss/venc_panel.c b/drivers/video/omap2/dss/venc_panel.c
index d55b8784ecf..0d2b1a0834a 100644
--- a/drivers/video/omap2/dss/venc_panel.c
+++ b/drivers/video/omap2/dss/venc_panel.c
@@ -157,12 +157,6 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
goto end;
- if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
- /* suspended is the same as disabled with venc */
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- goto end;
- }
-
omapdss_venc_display_disable(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -170,17 +164,6 @@ end:
mutex_unlock(&venc_panel.lock);
}
-static int venc_panel_suspend(struct omap_dss_device *dssdev)
-{
- venc_panel_disable(dssdev);
- return 0;
-}
-
-static int venc_panel_resume(struct omap_dss_device *dssdev)
-{
- return venc_panel_enable(dssdev);
-}
-
static void venc_panel_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -222,8 +205,6 @@ static struct omap_dss_driver venc_driver = {
.enable = venc_panel_enable,
.disable = venc_panel_disable,
- .suspend = venc_panel_suspend,
- .resume = venc_panel_resume,
.get_resolution = omapdss_default_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 4ea17dc3258..4cb12ce6885 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -2,7 +2,6 @@ menuconfig FB_OMAP2
tristate "OMAP2+ frame buffer support"
depends on FB && OMAP2_DSS && !DRM_OMAP
- select OMAP2_VRAM
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 532a31b3d96..d30b45d7264 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -28,10 +28,10 @@
#include <linux/omapfb.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <linux/sizes.h>
#include <video/omapdss.h>
#include <video/omapvrfb.h>
-#include <plat/vram.h>
#include "omapfb.h"
@@ -211,6 +211,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
struct omapfb2_mem_region *rg;
int r = 0, i;
size_t size;
@@ -220,6 +221,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
size = PAGE_ALIGN(mi->size);
+ if (display && display->driver->sync)
+ display->driver->sync(display);
+
rg = ofbi->region;
down_write_nested(&rg->lock, rg->id);
@@ -279,7 +283,7 @@ static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
return 0;
}
-static int omapfb_update_window_nolock(struct fb_info *fbi,
+static int omapfb_update_window(struct fb_info *fbi,
u32 x, u32 y, u32 w, u32 h)
{
struct omap_dss_device *display = fb2display(fbi);
@@ -299,27 +303,6 @@ static int omapfb_update_window_nolock(struct fb_info *fbi,
return display->driver->update(display, x, y, w, h);
}
-/* This function is exported for SGX driver use */
-int omapfb_update_window(struct fb_info *fbi,
- u32 x, u32 y, u32 w, u32 h)
-{
- struct omapfb_info *ofbi = FB2OFB(fbi);
- struct omapfb2_device *fbdev = ofbi->fbdev;
- int r;
-
- if (!lock_fb_info(fbi))
- return -ENODEV;
- omapfb_lock(fbdev);
-
- r = omapfb_update_window_nolock(fbi, x, y, w, h);
-
- omapfb_unlock(fbdev);
- unlock_fb_info(fbi);
-
- return r;
-}
-EXPORT_SYMBOL(omapfb_update_window);
-
int omapfb_set_update_mode(struct fb_info *fbi,
enum omapfb_update_mode mode)
{
@@ -646,7 +629,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- r = omapfb_update_window_nolock(fbi, p.uwnd_o.x, p.uwnd_o.y,
+ r = omapfb_update_window(fbi, p.uwnd_o.x, p.uwnd_o.y,
p.uwnd_o.width, p.uwnd_o.height);
break;
@@ -663,7 +646,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- r = omapfb_update_window_nolock(fbi, p.uwnd.x, p.uwnd.y,
+ r = omapfb_update_window(fbi, p.uwnd.x, p.uwnd.y,
p.uwnd.width, p.uwnd.height);
break;
@@ -853,14 +836,15 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
case OMAPFB_GET_VRAM_INFO: {
- unsigned long vram, free, largest;
-
DBG("ioctl GET_VRAM_INFO\n");
- omap_vram_get_info(&vram, &free, &largest);
- p.vram_info.total = vram;
- p.vram_info.free = free;
- p.vram_info.largest_free_block = largest;
+ /*
+ * We don't have the ability to get this vram info anymore.
+ * Fill in something that should keep the applications working.
+ */
+ p.vram_info.total = SZ_1M * 64;
+ p.vram_info.free = SZ_1M * 64;
+ p.vram_info.largest_free_block = SZ_1M * 64;
if (copy_to_user((void __user *)arg, &p.vram_info,
sizeof(p.vram_info)))
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index bc225e46fdd..ca585ef37f2 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -31,7 +31,6 @@
#include <linux/omapfb.h>
#include <video/omapdss.h>
-#include <plat/vram.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
@@ -1258,11 +1257,10 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
switch (blank) {
case FB_BLANK_UNBLANK:
- if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
+ if (display->state == OMAP_DSS_DISPLAY_ACTIVE)
goto exit;
- if (display->driver->resume)
- r = display->driver->resume(display);
+ r = display->driver->enable(display);
if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) &&
d->update_mode == OMAPFB_AUTO_UPDATE &&
@@ -1283,8 +1281,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (d->auto_update_work_enabled)
omapfb_stop_auto_update(fbdev, display);
- if (display->driver->suspend)
- r = display->driver->suspend(display);
+ display->driver->disable(display);
break;
@@ -1335,24 +1332,25 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
rg = ofbi->region;
- WARN_ON(atomic_read(&rg->map_count));
-
- if (rg->paddr)
- if (omap_vram_free(rg->paddr, rg->size))
- dev_err(fbdev->dev, "VRAM FREE failed\n");
+ if (rg->token == NULL)
+ return;
- if (rg->vaddr)
- iounmap(rg->vaddr);
+ WARN_ON(atomic_read(&rg->map_count));
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
/* unmap the 0 angle rotation */
if (rg->vrfb.vaddr[0]) {
iounmap(rg->vrfb.vaddr[0]);
- omap_vrfb_release_ctx(&rg->vrfb);
rg->vrfb.vaddr[0] = NULL;
}
+
+ omap_vrfb_release_ctx(&rg->vrfb);
}
+ dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle,
+ &rg->attrs);
+
+ rg->token = NULL;
rg->vaddr = NULL;
rg->paddr = 0;
rg->alloc = 0;
@@ -1387,7 +1385,9 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
- void __iomem *vaddr;
+ void *token;
+ DEFINE_DMA_ATTRS(attrs);
+ dma_addr_t dma_handle;
int r;
rg = ofbi->region;
@@ -1402,42 +1402,40 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
size = PAGE_ALIGN(size);
- if (!paddr) {
- DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
- r = omap_vram_alloc(size, &paddr);
- } else {
- DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr,
- ofbi->id);
- r = omap_vram_reserve(paddr, size);
- }
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- if (r) {
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+ DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
+
+ token = dma_alloc_attrs(fbdev->dev, size, &dma_handle,
+ GFP_KERNEL, &attrs);
+
+ if (token == NULL) {
dev_err(fbdev->dev, "failed to allocate framebuffer\n");
return -ENOMEM;
}
- if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) {
- vaddr = ioremap_wc(paddr, size);
-
- if (!vaddr) {
- dev_err(fbdev->dev, "failed to ioremap framebuffer\n");
- omap_vram_free(paddr, size);
- return -ENOMEM;
- }
+ DBG("allocated VRAM paddr %lx, vaddr %p\n",
+ (unsigned long)dma_handle, token);
- DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
- } else {
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
r = omap_vrfb_request_ctx(&rg->vrfb);
if (r) {
+ dma_free_attrs(fbdev->dev, size, token, dma_handle,
+ &attrs);
dev_err(fbdev->dev, "vrfb create ctx failed\n");
return r;
}
-
- vaddr = NULL;
}
- rg->paddr = paddr;
- rg->vaddr = vaddr;
+ rg->attrs = attrs;
+ rg->token = token;
+ rg->dma_handle = dma_handle;
+
+ rg->paddr = (unsigned long)dma_handle;
+ rg->vaddr = (void __iomem *)token;
rg->size = size;
rg->alloc = 1;
@@ -1531,6 +1529,9 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
}
+ WARN_ONCE(paddr,
+ "reserving memory at predefined address not supported\n");
+
paddrs[fbnum] = paddr;
sizes[fbnum] = size;
@@ -1610,7 +1611,6 @@ int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
- struct omap_dss_device *display = fb2display(fbi);
struct omapfb2_mem_region *rg = ofbi->region;
unsigned long old_size = rg->size;
unsigned long old_paddr = rg->paddr;
@@ -1625,9 +1625,6 @@ int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
if (old_size == size && old_type == type)
return 0;
- if (display && display->driver->sync)
- display->driver->sync(display);
-
omapfb_free_fbmem(fbi);
if (size == 0) {
@@ -1882,7 +1879,6 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
}
dev_set_drvdata(fbdev->dev, NULL);
- kfree(fbdev);
}
static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
@@ -2258,26 +2254,28 @@ static int omapfb_find_best_mode(struct omap_dss_device *display,
{
struct fb_monspecs *specs;
u8 *edid;
- int r, i, best_xres, best_idx, len;
+ int r, i, best_idx, len;
if (!display->driver->read_edid)
return -ENODEV;
len = 0x80 * 2;
edid = kmalloc(len, GFP_KERNEL);
+ if (edid == NULL)
+ return -ENOMEM;
r = display->driver->read_edid(display, edid, len);
if (r < 0)
goto err1;
specs = kzalloc(sizeof(*specs), GFP_KERNEL);
+ if (specs == NULL) {
+ r = -ENOMEM;
+ goto err1;
+ }
fb_edid_to_monspecs(edid, specs);
- if (edid[126] > 0)
- fb_edid_add_monspecs(edid + 0x80, specs);
-
- best_xres = 0;
best_idx = -1;
for (i = 0; i < specs->modedb_len; ++i) {
@@ -2293,16 +2291,20 @@ static int omapfb_find_best_mode(struct omap_dss_device *display,
if (m->xres == 2880 || m->xres == 1440)
continue;
+ if (m->vmode & FB_VMODE_INTERLACED ||
+ m->vmode & FB_VMODE_DOUBLE)
+ continue;
+
fb_videomode_to_omap_timings(m, display, &t);
r = display->driver->check_timings(display, &t);
- if (r == 0 && best_xres < m->xres) {
- best_xres = m->xres;
+ if (r == 0) {
best_idx = i;
+ break;
}
}
- if (best_xres == 0) {
+ if (best_idx == -1) {
r = -ENOENT;
goto err2;
}
@@ -2371,15 +2373,62 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return 0;
}
+static int omapfb_init_connections(struct omapfb2_device *fbdev,
+ struct omap_dss_device *def_dssdev)
+{
+ int i, r;
+ struct omap_overlay_manager *mgr;
+
+ if (!def_dssdev->output) {
+ dev_err(fbdev->dev, "no output for the default display\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fbdev->num_displays; ++i) {
+ struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
+ struct omap_dss_output *out = dssdev->output;
+
+ mgr = omap_dss_get_overlay_manager(dssdev->channel);
+
+ if (!mgr || !out)
+ continue;
+
+ if (mgr->output)
+ mgr->unset_output(mgr);
+
+ mgr->set_output(mgr, out);
+ }
+
+ mgr = def_dssdev->output->manager;
+
+ if (!mgr) {
+ dev_err(fbdev->dev, "no ovl manager for the default display\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fbdev->num_overlays; i++) {
+ struct omap_overlay *ovl = fbdev->overlays[i];
+
+ if (ovl->manager)
+ ovl->unset_manager(ovl);
+
+ r = ovl->set_manager(ovl, mgr);
+ if (r)
+ dev_warn(fbdev->dev,
+ "failed to connect overlay %s to manager %s\n",
+ ovl->name, mgr->name);
+ }
+
+ return 0;
+}
+
static int __init omapfb_probe(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = NULL;
int r = 0;
int i;
- struct omap_overlay *ovl;
struct omap_dss_device *def_display;
struct omap_dss_device *dssdev;
- struct omap_dss_device *ovl_device;
DBG("omapfb_probe\n");
@@ -2389,7 +2438,8 @@ static int __init omapfb_probe(struct platform_device *pdev)
goto err0;
}
- fbdev = kzalloc(sizeof(struct omapfb2_device), GFP_KERNEL);
+ fbdev = devm_kzalloc(&pdev->dev, sizeof(struct omapfb2_device),
+ GFP_KERNEL);
if (fbdev == NULL) {
r = -ENOMEM;
goto err0;
@@ -2401,13 +2451,15 @@ static int __init omapfb_probe(struct platform_device *pdev)
"ignoring the module parameter vrfb=y\n");
}
+ r = omapdss_compat_init();
+ if (r)
+ goto err0;
mutex_init(&fbdev->mtx);
fbdev->dev = &pdev->dev;
platform_set_drvdata(pdev, fbdev);
- r = 0;
fbdev->num_displays = 0;
dssdev = NULL;
for_each_dss_dev(dssdev) {
@@ -2430,9 +2482,6 @@ static int __init omapfb_probe(struct platform_device *pdev)
d->update_mode = OMAPFB_AUTO_UPDATE;
}
- if (r)
- goto cleanup;
-
if (fbdev->num_displays == 0) {
dev_err(&pdev->dev, "no displays\n");
r = -EINVAL;
@@ -2447,15 +2496,33 @@ static int __init omapfb_probe(struct platform_device *pdev)
for (i = 0; i < fbdev->num_managers; i++)
fbdev->managers[i] = omap_dss_get_overlay_manager(i);
- /* gfx overlay should be the default one. find a display
- * connected to that, and use it as default display */
- ovl = omap_dss_get_overlay(0);
- ovl_device = ovl->get_device(ovl);
- if (ovl_device) {
- def_display = ovl_device;
- } else {
- dev_warn(&pdev->dev, "cannot find default display\n");
- def_display = NULL;
+ def_display = NULL;
+
+ for (i = 0; i < fbdev->num_displays; ++i) {
+ struct omap_dss_device *dssdev;
+ const char *def_name;
+
+ def_name = omapdss_get_default_display_name();
+
+ dssdev = fbdev->displays[i].dssdev;
+
+ if (def_name == NULL ||
+ (dssdev->name && strcmp(def_name, dssdev->name) == 0)) {
+ def_display = dssdev;
+ break;
+ }
+ }
+
+ if (def_display == NULL) {
+ dev_err(fbdev->dev, "failed to find default display\n");
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ r = omapfb_init_connections(fbdev, def_display);
+ if (r) {
+ dev_err(fbdev->dev, "failed to init overlay connections\n");
+ goto cleanup;
}
if (def_mode && strlen(def_mode) > 0) {
@@ -2506,6 +2573,7 @@ static int __init omapfb_probe(struct platform_device *pdev)
cleanup:
omapfb_free_resources(fbdev);
+ omapdss_compat_uninit();
err0:
dev_err(&pdev->dev, "failed to setup omapfb\n");
return r;
@@ -2521,6 +2589,8 @@ static int __exit omapfb_remove(struct platform_device *pdev)
omapfb_free_resources(fbdev);
+ omapdss_compat_uninit();
+
return 0;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 17aa174e187..18fa9e1d003 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -441,6 +441,7 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
struct omapfb2_mem_region *rg;
unsigned long size;
int r;
@@ -455,6 +456,9 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
if (!lock_fb_info(fbi))
return -ENODEV;
+ if (display && display->driver->sync)
+ display->driver->sync(display);
+
rg = ofbi->region;
down_write_nested(&rg->lock, rg->id);
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index 5ced9b334d3..623cd872a36 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -28,6 +28,8 @@
#endif
#include <linux/rwsem.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
#include <video/omapdss.h>
@@ -49,6 +51,9 @@ extern bool omapfb_debug;
struct omapfb2_mem_region {
int id;
+ struct dma_attrs attrs;
+ void *token;
+ dma_addr_t dma_handle;
u32 paddr;
void __iomem *vaddr;
struct vrfb vrfb;
@@ -124,9 +129,6 @@ void omapfb_remove_sysfs(struct omapfb2_device *fbdev);
int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg);
-int omapfb_update_window(struct fb_info *fbi,
- u32 x, u32 y, u32 w, u32 h);
-
int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
struct fb_var_screeninfo *var);
@@ -144,16 +146,16 @@ int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode);
static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
- int i;
+ struct omap_overlay *ovl;
/* XXX: returns the display connected to first attached overlay */
- for (i = 0; i < ofbi->num_overlays; i++) {
- struct omap_overlay *ovl = ofbi->overlays[i];
- return ovl->get_device(ovl);
- }
+ if (ofbi->num_overlays == 0)
+ return NULL;
- return NULL;
+ ovl = ofbi->overlays[0];
+
+ return ovl->get_device(ovl);
}
static inline struct omapfb_display_data *get_display_data(
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
deleted file mode 100644
index f2b15c4a75b..00000000000
--- a/drivers/video/omap2/vram.c
+++ /dev/null
@@ -1,514 +0,0 @@
-/*
- * VRAM manager for OMAP
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*#define DEBUG*/
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/memblock.h>
-#include <linux/completion.h>
-#include <linux/debugfs.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-
-#include <asm/setup.h>
-
-#include <plat/vram.h>
-
-#ifdef DEBUG
-#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
-#else
-#define DBG(format, ...)
-#endif
-
-/* postponed regions are used to temporarily store region information at boot
- * time when we cannot yet allocate the region list */
-#define MAX_POSTPONED_REGIONS 10
-
-static bool vram_initialized;
-static int postponed_cnt;
-static struct {
- unsigned long paddr;
- size_t size;
-} postponed_regions[MAX_POSTPONED_REGIONS];
-
-struct vram_alloc {
- struct list_head list;
- unsigned long paddr;
- unsigned pages;
-};
-
-struct vram_region {
- struct list_head list;
- struct list_head alloc_list;
- unsigned long paddr;
- unsigned pages;
-};
-
-static DEFINE_MUTEX(region_mutex);
-static LIST_HEAD(region_list);
-
-static struct vram_region *omap_vram_create_region(unsigned long paddr,
- unsigned pages)
-{
- struct vram_region *rm;
-
- rm = kzalloc(sizeof(*rm), GFP_KERNEL);
-
- if (rm) {
- INIT_LIST_HEAD(&rm->alloc_list);
- rm->paddr = paddr;
- rm->pages = pages;
- }
-
- return rm;
-}
-
-#if 0
-static void omap_vram_free_region(struct vram_region *vr)
-{
- list_del(&vr->list);
- kfree(vr);
-}
-#endif
-
-static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
- unsigned long paddr, unsigned pages)
-{
- struct vram_alloc *va;
- struct vram_alloc *new;
-
- new = kzalloc(sizeof(*va), GFP_KERNEL);
-
- if (!new)
- return NULL;
-
- new->paddr = paddr;
- new->pages = pages;
-
- list_for_each_entry(va, &vr->alloc_list, list) {
- if (va->paddr > new->paddr)
- break;
- }
-
- list_add_tail(&new->list, &va->list);
-
- return new;
-}
-
-static void omap_vram_free_allocation(struct vram_alloc *va)
-{
- list_del(&va->list);
- kfree(va);
-}
-
-int omap_vram_add_region(unsigned long paddr, size_t size)
-{
- struct vram_region *rm;
- unsigned pages;
-
- if (vram_initialized) {
- DBG("adding region paddr %08lx size %d\n",
- paddr, size);
-
- size &= PAGE_MASK;
- pages = size >> PAGE_SHIFT;
-
- rm = omap_vram_create_region(paddr, pages);
- if (rm == NULL)
- return -ENOMEM;
-
- list_add(&rm->list, &region_list);
- } else {
- if (postponed_cnt == MAX_POSTPONED_REGIONS)
- return -ENOMEM;
-
- postponed_regions[postponed_cnt].paddr = paddr;
- postponed_regions[postponed_cnt].size = size;
-
- ++postponed_cnt;
- }
- return 0;
-}
-
-int omap_vram_free(unsigned long paddr, size_t size)
-{
- struct vram_region *rm;
- struct vram_alloc *alloc;
- unsigned start, end;
-
- DBG("free mem paddr %08lx size %d\n", paddr, size);
-
- size = PAGE_ALIGN(size);
-
- mutex_lock(&region_mutex);
-
- list_for_each_entry(rm, &region_list, list) {
- list_for_each_entry(alloc, &rm->alloc_list, list) {
- start = alloc->paddr;
- end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
-
- if (start >= paddr && end < paddr + size)
- goto found;
- }
- }
-
- mutex_unlock(&region_mutex);
- return -EINVAL;
-
-found:
- omap_vram_free_allocation(alloc);
-
- mutex_unlock(&region_mutex);
- return 0;
-}
-EXPORT_SYMBOL(omap_vram_free);
-
-static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
-{
- struct vram_region *rm;
- struct vram_alloc *alloc;
- size_t size;
-
- size = pages << PAGE_SHIFT;
-
- list_for_each_entry(rm, &region_list, list) {
- unsigned long start, end;
-
- DBG("checking region %lx %d\n", rm->paddr, rm->pages);
-
- start = rm->paddr;
- end = start + (rm->pages << PAGE_SHIFT) - 1;
- if (start > paddr || end < paddr + size - 1)
- continue;
-
- DBG("block ok, checking allocs\n");
-
- list_for_each_entry(alloc, &rm->alloc_list, list) {
- end = alloc->paddr - 1;
-
- if (start <= paddr && end >= paddr + size - 1)
- goto found;
-
- start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
- }
-
- end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
-
- if (!(start <= paddr && end >= paddr + size - 1))
- continue;
-found:
- DBG("found area start %lx, end %lx\n", start, end);
-
- if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
- return -ENOMEM;
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-int omap_vram_reserve(unsigned long paddr, size_t size)
-{
- unsigned pages;
- int r;
-
- DBG("reserve mem paddr %08lx size %d\n", paddr, size);
-
- size = PAGE_ALIGN(size);
- pages = size >> PAGE_SHIFT;
-
- mutex_lock(&region_mutex);
-
- r = _omap_vram_reserve(paddr, pages);
-
- mutex_unlock(&region_mutex);
-
- return r;
-}
-EXPORT_SYMBOL(omap_vram_reserve);
-
-static int _omap_vram_alloc(unsigned pages, unsigned long *paddr)
-{
- struct vram_region *rm;
- struct vram_alloc *alloc;
-
- list_for_each_entry(rm, &region_list, list) {
- unsigned long start, end;
-
- DBG("checking region %lx %d\n", rm->paddr, rm->pages);
-
- start = rm->paddr;
-
- list_for_each_entry(alloc, &rm->alloc_list, list) {
- end = alloc->paddr;
-
- if (end - start >= pages << PAGE_SHIFT)
- goto found;
-
- start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
- }
-
- end = rm->paddr + (rm->pages << PAGE_SHIFT);
-found:
- if (end - start < pages << PAGE_SHIFT)
- continue;
-
- DBG("found %lx, end %lx\n", start, end);
-
- alloc = omap_vram_create_allocation(rm, start, pages);
- if (alloc == NULL)
- return -ENOMEM;
-
- *paddr = start;
-
- return 0;
- }
-
- return -ENOMEM;
-}
-
-int omap_vram_alloc(size_t size, unsigned long *paddr)
-{
- unsigned pages;
- int r;
-
- BUG_ON(!size);
-
- DBG("alloc mem size %d\n", size);
-
- size = PAGE_ALIGN(size);
- pages = size >> PAGE_SHIFT;
-
- mutex_lock(&region_mutex);
-
- r = _omap_vram_alloc(pages, paddr);
-
- mutex_unlock(&region_mutex);
-
- return r;
-}
-EXPORT_SYMBOL(omap_vram_alloc);
-
-void omap_vram_get_info(unsigned long *vram,
- unsigned long *free_vram,
- unsigned long *largest_free_block)
-{
- struct vram_region *vr;
- struct vram_alloc *va;
-
- *vram = 0;
- *free_vram = 0;
- *largest_free_block = 0;
-
- mutex_lock(&region_mutex);
-
- list_for_each_entry(vr, &region_list, list) {
- unsigned free;
- unsigned long pa;
-
- pa = vr->paddr;
- *vram += vr->pages << PAGE_SHIFT;
-
- list_for_each_entry(va, &vr->alloc_list, list) {
- free = va->paddr - pa;
- *free_vram += free;
- if (free > *largest_free_block)
- *largest_free_block = free;
- pa = va->paddr + (va->pages << PAGE_SHIFT);
- }
-
- free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa;
- *free_vram += free;
- if (free > *largest_free_block)
- *largest_free_block = free;
- }
-
- mutex_unlock(&region_mutex);
-}
-EXPORT_SYMBOL(omap_vram_get_info);
-
-#if defined(CONFIG_DEBUG_FS)
-static int vram_debug_show(struct seq_file *s, void *unused)
-{
- struct vram_region *vr;
- struct vram_alloc *va;
- unsigned size;
-
- mutex_lock(&region_mutex);
-
- list_for_each_entry(vr, &region_list, list) {
- size = vr->pages << PAGE_SHIFT;
- seq_printf(s, "%08lx-%08lx (%d bytes)\n",
- vr->paddr, vr->paddr + size - 1,
- size);
-
- list_for_each_entry(va, &vr->alloc_list, list) {
- size = va->pages << PAGE_SHIFT;
- seq_printf(s, " %08lx-%08lx (%d bytes)\n",
- va->paddr, va->paddr + size - 1,
- size);
- }
- }
-
- mutex_unlock(&region_mutex);
-
- return 0;
-}
-
-static int vram_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, vram_debug_show, inode->i_private);
-}
-
-static const struct file_operations vram_debug_fops = {
- .open = vram_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init omap_vram_create_debugfs(void)
-{
- struct dentry *d;
-
- d = debugfs_create_file("vram", S_IRUGO, NULL,
- NULL, &vram_debug_fops);
- if (IS_ERR(d))
- return PTR_ERR(d);
-
- return 0;
-}
-#endif
-
-static __init int omap_vram_init(void)
-{
- int i;
-
- vram_initialized = 1;
-
- for (i = 0; i < postponed_cnt; i++)
- omap_vram_add_region(postponed_regions[i].paddr,
- postponed_regions[i].size);
-
-#ifdef CONFIG_DEBUG_FS
- if (omap_vram_create_debugfs())
- pr_err("VRAM: Failed to create debugfs file\n");
-#endif
-
- return 0;
-}
-
-arch_initcall(omap_vram_init);
-
-/* boottime vram alloc stuff */
-
-/* set from board file */
-static u32 omap_vram_sdram_start __initdata;
-static u32 omap_vram_sdram_size __initdata;
-
-/* set from kernel cmdline */
-static u32 omap_vram_def_sdram_size __initdata;
-static u32 omap_vram_def_sdram_start __initdata;
-
-static int __init omap_vram_early_vram(char *p)
-{
- omap_vram_def_sdram_size = memparse(p, &p);
- if (*p == ',')
- omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16);
- return 0;
-}
-early_param("vram", omap_vram_early_vram);
-
-/*
- * Called from map_io. We need to call to this early enough so that we
- * can reserve the fixed SDRAM regions before VM could get hold of them.
- */
-void __init omap_vram_reserve_sdram_memblock(void)
-{
- u32 paddr;
- u32 size = 0;
-
- /* cmdline arg overrides the board file definition */
- if (omap_vram_def_sdram_size) {
- size = omap_vram_def_sdram_size;
- paddr = omap_vram_def_sdram_start;
- }
-
- if (!size) {
- size = omap_vram_sdram_size;
- paddr = omap_vram_sdram_start;
- }
-
-#ifdef CONFIG_OMAP2_VRAM_SIZE
- if (!size) {
- size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024;
- paddr = 0;
- }
-#endif
-
- if (!size)
- return;
-
- size = ALIGN(size, SZ_2M);
-
- if (paddr) {
- if (paddr & ~PAGE_MASK) {
- pr_err("VRAM start address 0x%08x not page aligned\n",
- paddr);
- return;
- }
-
- if (!memblock_is_region_memory(paddr, size)) {
- pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n",
- paddr, paddr + size - 1);
- return;
- }
-
- if (memblock_is_region_reserved(paddr, size)) {
- pr_err("FB: failed to reserve VRAM - busy\n");
- return;
- }
-
- if (memblock_reserve(paddr, size) < 0) {
- pr_err("FB: failed to reserve VRAM - no memory\n");
- return;
- }
- } else {
- paddr = memblock_alloc(size, SZ_2M);
- }
-
- memblock_free(paddr, size);
- memblock_remove(paddr, size);
-
- omap_vram_add_region(paddr, size);
-
- pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
-}
-
-void __init omap_vram_set_sdram_vram(u32 size, u32 start)
-{
- omap_vram_sdram_start = start;
- omap_vram_sdram_size = size;
-}
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 2ed7b633bbd..1a00ad241ed 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -189,7 +189,7 @@ struct s3c_fb_vsync {
/**
* struct s3c_fb - overall hardware state of the hardware
- * @slock: The spinlock protection for this data sturucture.
+ * @slock: The spinlock protection for this data structure.
* @dev: The device that we bound to, for printing, etc.
* @bus_clk: The clk (hclk) feeding our interface and possibly pixclk.
* @lcd_clk: The clk (sclk) feeding pixclk.
@@ -268,10 +268,10 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
case 8:
if (sfb->variant.palette[win->index] != 0) {
/* non palletised, A:1,R:2,G:3,B:2 mode */
- var->red.offset = 4;
+ var->red.offset = 5;
var->green.offset = 2;
var->blue.offset = 0;
- var->red.length = 5;
+ var->red.length = 2;
var->green.length = 3;
var->blue.length = 2;
var->transp.offset = 7;
@@ -288,6 +288,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/* 666 with one bit alpha/transparency */
var->transp.offset = 18;
var->transp.length = 1;
+ /* drop through */
case 18:
var->bits_per_pixel = 32;
@@ -329,6 +330,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
default:
dev_err(sfb->dev, "invalid bpp\n");
+ return -EINVAL;
}
dev_dbg(sfb->dev, "%s: verified parameters\n", __func__);
@@ -1544,8 +1546,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int s3c_fb_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
struct s3c_fb_win *win;
int win_no;
@@ -1572,8 +1573,7 @@ static int s3c_fb_suspend(struct device *dev)
static int s3c_fb_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
struct s3c_fb_platdata *pd = sfb->pdata;
struct s3c_fb_win *win;
int win_no;
@@ -1623,7 +1623,7 @@ static int s3c_fb_resume(struct device *dev)
if (!win)
continue;
- dev_dbg(&pdev->dev, "resuming window %d\n", win_no);
+ dev_dbg(dev, "resuming window %d\n", win_no);
s3c_fb_set_par(win->fbinfo);
}
@@ -1636,8 +1636,7 @@ static int s3c_fb_resume(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
static int s3c_fb_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
if (!sfb->variant.has_clksel)
clk_disable_unprepare(sfb->lcd_clk);
@@ -1649,8 +1648,7 @@ static int s3c_fb_runtime_suspend(struct device *dev)
static int s3c_fb_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c_fb *sfb = platform_get_drvdata(pdev);
+ struct s3c_fb *sfb = dev_get_drvdata(dev);
struct s3c_fb_platdata *pd = sfb->pdata;
clk_prepare_enable(sfb->bus_clk);
@@ -1910,7 +1908,7 @@ static struct s3c_fb_driverdata s3c_fb_data_exynos4 = {
static struct s3c_fb_driverdata s3c_fb_data_exynos5 = {
.variant = {
.nr_windows = 5,
- .vidtcon = VIDTCON0,
+ .vidtcon = FIMD_V8_VIDTCON0,
.wincon = WINCON(0),
.winmap = WINxMAP(0),
.keycon = WKEYCON,
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c
index 3951fdae5f6..f4962292792 100644
--- a/drivers/video/sh_mipi_dsi.c
+++ b/drivers/video/sh_mipi_dsi.c
@@ -127,13 +127,12 @@ static void sh_mipi_shutdown(struct platform_device *pdev)
sh_mipi_dsi_enable(mipi, false);
}
-static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
+static int sh_mipi_setup(struct sh_mipi *mipi, const struct fb_videomode *mode)
{
void __iomem *base = mipi->base;
- struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
u32 pctype, datatype, pixfmt, linelength, vmctr2;
u32 tmp, top, bottom, delay, div;
- bool yuv;
int bpp;
/*
@@ -146,95 +145,79 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
pctype = 0;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_RGB565:
pctype = 1;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = false;
+ linelength = mode->xres * 2;
break;
case MIPI_RGB666_LP:
pctype = 2;
datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_RGB666:
pctype = 3;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
- linelength = (ch->lcd_modes[0].xres * 18 + 7) / 8;
- yuv = false;
+ linelength = (mode->xres * 18 + 7) / 8;
break;
case MIPI_BGR888:
pctype = 8;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_BGR565:
pctype = 9;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = false;
+ linelength = mode->xres * 2;
break;
case MIPI_BGR666_LP:
pctype = 0xa;
datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = ch->lcd_modes[0].xres * 3;
- yuv = false;
+ linelength = mode->xres * 3;
break;
case MIPI_BGR666:
pctype = 0xb;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
- linelength = (ch->lcd_modes[0].xres * 18 + 7) / 8;
- yuv = false;
+ linelength = (mode->xres * 18 + 7) / 8;
break;
case MIPI_YUYV:
pctype = 4;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = true;
+ linelength = mode->xres * 2;
break;
case MIPI_UYVY:
pctype = 5;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = ch->lcd_modes[0].xres * 2;
- yuv = true;
+ linelength = mode->xres * 2;
break;
case MIPI_YUV420_L:
pctype = 6;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
- linelength = (ch->lcd_modes[0].xres * 12 + 7) / 8;
- yuv = true;
+ linelength = (mode->xres * 12 + 7) / 8;
break;
case MIPI_YUV420:
pctype = 7;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
/* Length of U/V line */
- linelength = (ch->lcd_modes[0].xres + 1) / 2;
- yuv = true;
+ linelength = (mode->xres + 1) / 2;
break;
default:
return -EINVAL;
}
- if ((yuv && ch->interface_type != YUV422) ||
- (!yuv && ch->interface_type != RGB24))
- return -EINVAL;
-
if (!pdata->lane)
return -EINVAL;
@@ -293,7 +276,7 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
*/
iowrite32(0x00000006, mipi->linkbase + DTCTR);
/* VSYNC width = 2 (<< 17) */
- iowrite32((ch->lcd_modes[0].vsync_len << pdata->vsynw_offset) |
+ iowrite32((mode->vsync_len << pdata->vsynw_offset) |
(pdata->clksrc << 16) | (pctype << 12) | datatype,
mipi->linkbase + VMCTR1);
@@ -327,7 +310,7 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
top = linelength << 16; /* RGBLEN */
bottom = 0x00000001;
if (pdata->flags & SH_MIPI_DSI_HSABM) /* HSALEN */
- bottom = (pdata->lane * ch->lcd_modes[0].hsync_len) - 10;
+ bottom = (pdata->lane * mode->hsync_len) - 10;
iowrite32(top | bottom , mipi->linkbase + VMLEN1);
/*
@@ -347,18 +330,18 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
div = 2;
if (pdata->flags & SH_MIPI_DSI_HFPBM) { /* HBPLEN */
- top = ch->lcd_modes[0].hsync_len + ch->lcd_modes[0].left_margin;
+ top = mode->hsync_len + mode->left_margin;
top = ((pdata->lane * top / div) - 10) << 16;
}
if (pdata->flags & SH_MIPI_DSI_HBPBM) { /* HFPLEN */
- bottom = ch->lcd_modes[0].right_margin;
+ bottom = mode->right_margin;
bottom = (pdata->lane * bottom / div) - 12;
}
- bpp = linelength / ch->lcd_modes[0].xres; /* byte / pixel */
+ bpp = linelength / mode->xres; /* byte / pixel */
if ((pdata->lane / div) > bpp) {
- tmp = ch->lcd_modes[0].xres / bpp; /* output cycle */
- tmp = ch->lcd_modes[0].xres - tmp; /* (input - output) cycle */
+ tmp = mode->xres / bpp; /* output cycle */
+ tmp = mode->xres - tmp; /* (input - output) cycle */
delay = (pdata->lane * tmp);
}
@@ -369,7 +352,7 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
/* setup LCD panel */
/* cf. drivers/video/omap/lcd_mipid.c */
- sh_mipi_dcs(ch->chan, MIPI_DCS_EXIT_SLEEP_MODE);
+ sh_mipi_dcs(pdata->channel, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
/*
* [7] - Page Address Mode
@@ -381,11 +364,11 @@ static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
* [1] - Flip Horizontal
* [0] - Flip Vertical
*/
- sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ sh_mipi_dcs_param(pdata->channel, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
/* cf. set_data_lines() */
- sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_PIXEL_FORMAT,
+ sh_mipi_dcs_param(pdata->channel, MIPI_DCS_SET_PIXEL_FORMAT,
pixfmt << 4);
- sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON);
+ sh_mipi_dcs(pdata->channel, MIPI_DCS_SET_DISPLAY_ON);
/* Enable timeout counters */
iowrite32(0x00000f00, base + DSICTRL);
@@ -405,7 +388,7 @@ static int mipi_display_on(struct sh_mobile_lcdc_entity *entity)
if (ret < 0)
goto mipi_display_on_fail1;
- ret = sh_mipi_setup(mipi, pdata);
+ ret = sh_mipi_setup(mipi, &entity->def_mode);
if (ret < 0)
goto mipi_display_on_fail2;
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 699487c287b..e78fe4bc152 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -438,7 +438,7 @@ static unsigned long lcdc_sys_read_data(void *handle)
return lcdc_read(ch->lcdc, _LDDRDR) & LDDRDR_DRD_MASK;
}
-struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
+static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
lcdc_sys_write_index,
lcdc_sys_write_data,
lcdc_sys_read_data,
@@ -586,8 +586,8 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
* Just turn on, if we run a resume here, the
* logo disappears.
*/
- info->var.width = monspec->max_x * 10;
- info->var.height = monspec->max_y * 10;
+ info->var.width = ch->display.width;
+ info->var.height = ch->display.height;
sh_mobile_lcdc_display_on(ch);
} else {
/* New monitor or have to wake up */
@@ -1614,6 +1614,15 @@ static int sh_mobile_lcdc_overlay_blank(int blank, struct fb_info *info)
return 1;
}
+static int
+sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem,
+ ovl->dma_handle, ovl->fb_size);
+}
+
static struct fb_ops sh_mobile_lcdc_overlay_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
@@ -1626,6 +1635,7 @@ static struct fb_ops sh_mobile_lcdc_overlay_ops = {
.fb_ioctl = sh_mobile_lcdc_overlay_ioctl,
.fb_check_var = sh_mobile_lcdc_overlay_check_var,
.fb_set_par = sh_mobile_lcdc_overlay_set_par,
+ .fb_mmap = sh_mobile_lcdc_overlay_mmap,
};
static void
@@ -2093,6 +2103,15 @@ static int sh_mobile_lcdc_blank(int blank, struct fb_info *info)
return 0;
}
+static int
+sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct sh_mobile_lcdc_chan *ch = info->par;
+
+ return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem,
+ ch->dma_handle, ch->fb_size);
+}
+
static struct fb_ops sh_mobile_lcdc_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = sh_mobile_lcdc_setcolreg,
@@ -2108,6 +2127,7 @@ static struct fb_ops sh_mobile_lcdc_ops = {
.fb_release = sh_mobile_lcdc_release,
.fb_check_var = sh_mobile_lcdc_check_var,
.fb_set_par = sh_mobile_lcdc_set_par,
+ .fb_mmap = sh_mobile_lcdc_mmap,
};
static void
@@ -2167,7 +2187,7 @@ sh_mobile_lcdc_channel_fb_cleanup(struct sh_mobile_lcdc_chan *ch)
static int __devinit
sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
- const struct fb_videomode *mode,
+ const struct fb_videomode *modes,
unsigned int num_modes)
{
struct sh_mobile_lcdc_priv *priv = ch->lcdc;
@@ -2193,7 +2213,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
- fb_videomode_to_modelist(mode, num_modes, &info->modelist);
+ fb_videomode_to_modelist(modes, num_modes, &info->modelist);
ret = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
if (ret < 0) {
@@ -2227,9 +2247,9 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
* default.
*/
var = &info->var;
- fb_videomode_to_var(var, mode);
- var->width = ch->cfg->panel_cfg.width;
- var->height = ch->cfg->panel_cfg.height;
+ fb_videomode_to_var(var, modes);
+ var->width = ch->display.width;
+ var->height = ch->display.height;
var->xres_virtual = ch->xres_virtual;
var->yres_virtual = ch->yres_virtual;
var->activate = FB_ACTIVATE_NOW;
@@ -2262,6 +2282,7 @@ static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
brightness = 0;
+ ch->bl_brightness = brightness;
return ch->cfg->bl_info.set_brightness(brightness);
}
@@ -2269,7 +2290,7 @@ static int sh_mobile_lcdc_get_brightness(struct backlight_device *bdev)
{
struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
- return ch->cfg->bl_info.get_brightness();
+ return ch->bl_brightness;
}
static int sh_mobile_lcdc_check_fb(struct backlight_device *bdev,
@@ -2516,10 +2537,10 @@ static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *
}
static int __devinit
-sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
- struct sh_mobile_lcdc_overlay *ovl)
+sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_overlay *ovl)
{
const struct sh_mobile_lcdc_format_info *format;
+ struct device *dev = ovl->channel->lcdc->dev;
int ret;
if (ovl->cfg->fourcc == 0)
@@ -2528,7 +2549,7 @@ sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
/* Validate the format. */
format = sh_mobile_format_info(ovl->cfg->fourcc);
if (format == NULL) {
- dev_err(priv->dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc);
+ dev_err(dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc);
return -EINVAL;
}
@@ -2556,10 +2577,10 @@ sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
/* Allocate frame buffer memory. */
ovl->fb_size = ovl->cfg->max_xres * ovl->cfg->max_yres
* format->bpp / 8 * 2;
- ovl->fb_mem = dma_alloc_coherent(priv->dev, ovl->fb_size,
- &ovl->dma_handle, GFP_KERNEL);
+ ovl->fb_mem = dma_alloc_coherent(dev, ovl->fb_size, &ovl->dma_handle,
+ GFP_KERNEL);
if (!ovl->fb_mem) {
- dev_err(priv->dev, "unable to allocate buffer\n");
+ dev_err(dev, "unable to allocate buffer\n");
return -ENOMEM;
}
@@ -2571,11 +2592,11 @@ sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
}
static int __devinit
-sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
- struct sh_mobile_lcdc_chan *ch)
+sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch)
{
const struct sh_mobile_lcdc_format_info *format;
const struct sh_mobile_lcdc_chan_cfg *cfg = ch->cfg;
+ struct device *dev = ch->lcdc->dev;
const struct fb_videomode *max_mode;
const struct fb_videomode *mode;
unsigned int num_modes;
@@ -2588,7 +2609,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
/* Validate the format. */
format = sh_mobile_format_info(cfg->fourcc);
if (format == NULL) {
- dev_err(priv->dev, "Invalid FOURCC %08x.\n", cfg->fourcc);
+ dev_err(dev, "Invalid FOURCC %08x.\n", cfg->fourcc);
return -EINVAL;
}
@@ -2604,7 +2625,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
/* NV12/NV21 buffers must have even number of lines */
if ((cfg->fourcc == V4L2_PIX_FMT_NV12 ||
cfg->fourcc == V4L2_PIX_FMT_NV21) && (mode->yres & 0x1)) {
- dev_err(priv->dev, "yres must be multiple of 2 for "
+ dev_err(dev, "yres must be multiple of 2 for "
"YCbCr420 mode.\n");
return -EINVAL;
}
@@ -2618,7 +2639,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
if (!max_size)
max_size = MAX_XRES * MAX_YRES;
else
- dev_dbg(priv->dev, "Found largest videomode %ux%u\n",
+ dev_dbg(dev, "Found largest videomode %ux%u\n",
max_mode->xres, max_mode->yres);
if (cfg->lcd_modes == NULL) {
@@ -2652,10 +2673,10 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
/* Allocate frame buffer memory. */
ch->fb_size = max_size * format->bpp / 8 * 2;
- ch->fb_mem = dma_alloc_coherent(priv->dev, ch->fb_size, &ch->dma_handle,
+ ch->fb_mem = dma_alloc_coherent(dev, ch->fb_size, &ch->dma_handle,
GFP_KERNEL);
if (ch->fb_mem == NULL) {
- dev_err(priv->dev, "unable to allocate buffer\n");
+ dev_err(dev, "unable to allocate buffer\n");
return -ENOMEM;
}
@@ -2663,8 +2684,7 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
if (cfg->tx_dev) {
if (!cfg->tx_dev->dev.driver ||
!try_module_get(cfg->tx_dev->dev.driver->owner)) {
- dev_warn(priv->dev,
- "unable to get transmitter device\n");
+ dev_warn(dev, "unable to get transmitter device\n");
return -EINVAL;
}
ch->tx_dev = platform_get_drvdata(cfg->tx_dev);
@@ -2772,9 +2792,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
for (i = 0; i < num_channels; i++) {
- struct sh_mobile_lcdc_chan *ch = priv->ch + i;
+ struct sh_mobile_lcdc_chan *ch = &priv->ch[i];
- error = sh_mobile_lcdc_channel_init(priv, ch);
+ error = sh_mobile_lcdc_channel_init(ch);
if (error)
goto err1;
}
@@ -2785,7 +2805,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
ovl->cfg = &pdata->overlays[i];
ovl->channel = &priv->ch[0];
- error = sh_mobile_lcdc_overlay_init(priv, ovl);
+ error = sh_mobile_lcdc_overlay_init(ovl);
if (error)
goto err1;
}
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index 0f92f6544b9..f839adef1d9 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -94,6 +94,7 @@ struct sh_mobile_lcdc_chan {
/* Backlight */
struct backlight_device *bl;
+ unsigned int bl_brightness;
/* FB */
struct fb_info *info;
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/ssd1307fb.c
new file mode 100644
index 00000000000..6101f5c2f62
--- /dev/null
+++ b/drivers/video/ssd1307fb.c
@@ -0,0 +1,396 @@
+/*
+ * Driver for the Solomon SSD1307 OLED controler
+ *
+ * Copyright 2012 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pwm.h>
+#include <linux/delay.h>
+
+#define SSD1307FB_WIDTH 96
+#define SSD1307FB_HEIGHT 16
+
+#define SSD1307FB_DATA 0x40
+#define SSD1307FB_COMMAND 0x80
+
+#define SSD1307FB_CONTRAST 0x81
+#define SSD1307FB_SEG_REMAP_ON 0xa1
+#define SSD1307FB_DISPLAY_OFF 0xae
+#define SSD1307FB_DISPLAY_ON 0xaf
+#define SSD1307FB_START_PAGE_ADDRESS 0xb0
+
+struct ssd1307fb_par {
+ struct i2c_client *client;
+ struct fb_info *info;
+ struct pwm_device *pwm;
+ u32 pwm_period;
+ int reset;
+};
+
+static struct fb_fix_screeninfo ssd1307fb_fix __devinitdata = {
+ .id = "Solomon SSD1307",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO10,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = SSD1307FB_WIDTH / 8,
+ .accel = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ssd1307fb_var __devinitdata = {
+ .xres = SSD1307FB_WIDTH,
+ .yres = SSD1307FB_HEIGHT,
+ .xres_virtual = SSD1307FB_WIDTH,
+ .yres_virtual = SSD1307FB_HEIGHT,
+ .bits_per_pixel = 1,
+};
+
+static int ssd1307fb_write_array(struct i2c_client *client, u8 type, u8 *cmd, u32 len)
+{
+ u8 *buf;
+ int ret = 0;
+
+ buf = kzalloc(len + 1, GFP_KERNEL);
+ if (!buf) {
+ dev_err(&client->dev, "Couldn't allocate sending buffer.\n");
+ return -ENOMEM;
+ }
+
+ buf[0] = type;
+ memcpy(buf + 1, cmd, len);
+
+ ret = i2c_master_send(client, buf, len + 1);
+ if (ret != len + 1) {
+ dev_err(&client->dev, "Couldn't send I2C command.\n");
+ goto error;
+ }
+
+error:
+ kfree(buf);
+ return ret;
+}
+
+static inline int ssd1307fb_write_cmd_array(struct i2c_client *client, u8 *cmd, u32 len)
+{
+ return ssd1307fb_write_array(client, SSD1307FB_COMMAND, cmd, len);
+}
+
+static inline int ssd1307fb_write_cmd(struct i2c_client *client, u8 cmd)
+{
+ return ssd1307fb_write_cmd_array(client, &cmd, 1);
+}
+
+static inline int ssd1307fb_write_data_array(struct i2c_client *client, u8 *cmd, u32 len)
+{
+ return ssd1307fb_write_array(client, SSD1307FB_DATA, cmd, len);
+}
+
+static inline int ssd1307fb_write_data(struct i2c_client *client, u8 data)
+{
+ return ssd1307fb_write_data_array(client, &data, 1);
+}
+
+static void ssd1307fb_update_display(struct ssd1307fb_par *par)
+{
+ u8 *vmem = par->info->screen_base;
+ int i, j, k;
+
+ /*
+ * The screen is divided in pages, each having a height of 8
+ * pixels, and the width of the screen. When sending a byte of
+ * data to the controller, it gives the 8 bits for the current
+ * column. I.e, the first byte are the 8 bits of the first
+ * column, then the 8 bits for the second column, etc.
+ *
+ *
+ * Representation of the screen, assuming it is 5 bits
+ * wide. Each letter-number combination is a bit that controls
+ * one pixel.
+ *
+ * A0 A1 A2 A3 A4
+ * B0 B1 B2 B3 B4
+ * C0 C1 C2 C3 C4
+ * D0 D1 D2 D3 D4
+ * E0 E1 E2 E3 E4
+ * F0 F1 F2 F3 F4
+ * G0 G1 G2 G3 G4
+ * H0 H1 H2 H3 H4
+ *
+ * If you want to update this screen, you need to send 5 bytes:
+ * (1) A0 B0 C0 D0 E0 F0 G0 H0
+ * (2) A1 B1 C1 D1 E1 F1 G1 H1
+ * (3) A2 B2 C2 D2 E2 F2 G2 H2
+ * (4) A3 B3 C3 D3 E3 F3 G3 H3
+ * (5) A4 B4 C4 D4 E4 F4 G4 H4
+ */
+
+ for (i = 0; i < (SSD1307FB_HEIGHT / 8); i++) {
+ ssd1307fb_write_cmd(par->client, SSD1307FB_START_PAGE_ADDRESS + (i + 1));
+ ssd1307fb_write_cmd(par->client, 0x00);
+ ssd1307fb_write_cmd(par->client, 0x10);
+
+ for (j = 0; j < SSD1307FB_WIDTH; j++) {
+ u8 buf = 0;
+ for (k = 0; k < 8; k++) {
+ u32 page_length = SSD1307FB_WIDTH * i;
+ u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8;
+ u8 byte = *(vmem + index);
+ u8 bit = byte & (1 << (7 - (j % 8)));
+ bit = bit >> (7 - (j % 8));
+ buf |= bit << k;
+ }
+ ssd1307fb_write_data(par->client, buf);
+ }
+ }
+}
+
+
+static ssize_t ssd1307fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ssd1307fb_par *par = info->par;
+ unsigned long total_size;
+ unsigned long p = *ppos;
+ u8 __iomem *dst;
+
+ total_size = info->fix.smem_len;
+
+ if (p > total_size)
+ return -EINVAL;
+
+ if (count + p > total_size)
+ count = total_size - p;
+
+ if (!count)
+ return -EINVAL;
+
+ dst = (void __force *) (info->screen_base + p);
+
+ if (copy_from_user(dst, buf, count))
+ return -EFAULT;
+
+ ssd1307fb_update_display(par);
+
+ *ppos += count;
+
+ return count;
+}
+
+static void ssd1307fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct ssd1307fb_par *par = info->par;
+ sys_fillrect(info, rect);
+ ssd1307fb_update_display(par);
+}
+
+static void ssd1307fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct ssd1307fb_par *par = info->par;
+ sys_copyarea(info, area);
+ ssd1307fb_update_display(par);
+}
+
+static void ssd1307fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct ssd1307fb_par *par = info->par;
+ sys_imageblit(info, image);
+ ssd1307fb_update_display(par);
+}
+
+static struct fb_ops ssd1307fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = ssd1307fb_write,
+ .fb_fillrect = ssd1307fb_fillrect,
+ .fb_copyarea = ssd1307fb_copyarea,
+ .fb_imageblit = ssd1307fb_imageblit,
+};
+
+static void ssd1307fb_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ ssd1307fb_update_display(info->par);
+}
+
+static struct fb_deferred_io ssd1307fb_defio = {
+ .delay = HZ,
+ .deferred_io = ssd1307fb_deferred_io,
+};
+
+static int __devinit ssd1307fb_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct fb_info *info;
+ u32 vmem_size = SSD1307FB_WIDTH * SSD1307FB_HEIGHT / 8;
+ struct ssd1307fb_par *par;
+ u8 *vmem;
+ int ret;
+
+ if (!client->dev.of_node) {
+ dev_err(&client->dev, "No device tree data found!\n");
+ return -EINVAL;
+ }
+
+ info = framebuffer_alloc(sizeof(struct ssd1307fb_par), &client->dev);
+ if (!info) {
+ dev_err(&client->dev, "Couldn't allocate framebuffer.\n");
+ return -ENOMEM;
+ }
+
+ vmem = devm_kzalloc(&client->dev, vmem_size, GFP_KERNEL);
+ if (!vmem) {
+ dev_err(&client->dev, "Couldn't allocate graphical memory.\n");
+ ret = -ENOMEM;
+ goto fb_alloc_error;
+ }
+
+ info->fbops = &ssd1307fb_ops;
+ info->fix = ssd1307fb_fix;
+ info->fbdefio = &ssd1307fb_defio;
+
+ info->var = ssd1307fb_var;
+ info->var.red.length = 1;
+ info->var.red.offset = 0;
+ info->var.green.length = 1;
+ info->var.green.offset = 0;
+ info->var.blue.length = 1;
+ info->var.blue.offset = 0;
+
+ info->screen_base = (u8 __force __iomem *)vmem;
+ info->fix.smem_start = (unsigned long)vmem;
+ info->fix.smem_len = vmem_size;
+
+ fb_deferred_io_init(info);
+
+ par = info->par;
+ par->info = info;
+ par->client = client;
+
+ par->reset = of_get_named_gpio(client->dev.of_node,
+ "reset-gpios", 0);
+ if (!gpio_is_valid(par->reset)) {
+ ret = -EINVAL;
+ goto reset_oled_error;
+ }
+
+ ret = devm_gpio_request_one(&client->dev, par->reset,
+ GPIOF_OUT_INIT_HIGH,
+ "oled-reset");
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to request gpio %d: %d\n",
+ par->reset, ret);
+ goto reset_oled_error;
+ }
+
+ par->pwm = pwm_get(&client->dev, NULL);
+ if (IS_ERR(par->pwm)) {
+ dev_err(&client->dev, "Could not get PWM from device tree!\n");
+ ret = PTR_ERR(par->pwm);
+ goto pwm_error;
+ }
+
+ par->pwm_period = pwm_get_period(par->pwm);
+
+ dev_dbg(&client->dev, "Using PWM%d with a %dns period.\n", par->pwm->pwm, par->pwm_period);
+
+ ret = register_framebuffer(info);
+ if (ret) {
+ dev_err(&client->dev, "Couldn't register the framebuffer\n");
+ goto fbreg_error;
+ }
+
+ i2c_set_clientdata(client, info);
+
+ /* Reset the screen */
+ gpio_set_value(par->reset, 0);
+ udelay(4);
+ gpio_set_value(par->reset, 1);
+ udelay(4);
+
+ /* Enable the PWM */
+ pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period);
+ pwm_enable(par->pwm);
+
+ /* Map column 127 of the OLED to segment 0 */
+ ret = ssd1307fb_write_cmd(client, SSD1307FB_SEG_REMAP_ON);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't remap the screen.\n");
+ goto remap_error;
+ }
+
+ /* Turn on the display */
+ ret = ssd1307fb_write_cmd(client, SSD1307FB_DISPLAY_ON);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't turn the display on.\n");
+ goto remap_error;
+ }
+
+ dev_info(&client->dev, "fb%d: %s framebuffer device registered, using %d bytes of video memory\n", info->node, info->fix.id, vmem_size);
+
+ return 0;
+
+remap_error:
+ unregister_framebuffer(info);
+ pwm_disable(par->pwm);
+fbreg_error:
+ pwm_put(par->pwm);
+pwm_error:
+reset_oled_error:
+ fb_deferred_io_cleanup(info);
+fb_alloc_error:
+ framebuffer_release(info);
+ return ret;
+}
+
+static int __devexit ssd1307fb_remove(struct i2c_client *client)
+{
+ struct fb_info *info = i2c_get_clientdata(client);
+ struct ssd1307fb_par *par = info->par;
+
+ unregister_framebuffer(info);
+ pwm_disable(par->pwm);
+ pwm_put(par->pwm);
+ fb_deferred_io_cleanup(info);
+ framebuffer_release(info);
+
+ return 0;
+}
+
+static const struct i2c_device_id ssd1307fb_i2c_id[] = {
+ { "ssd1307fb", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id);
+
+static const struct of_device_id ssd1307fb_of_match[] = {
+ { .compatible = "solomon,ssd1307fb-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ssd1307fb_of_match);
+
+static struct i2c_driver ssd1307fb_driver = {
+ .probe = ssd1307fb_probe,
+ .remove = __devexit_p(ssd1307fb_remove),
+ .id_table = ssd1307fb_i2c_id,
+ .driver = {
+ .name = "ssd1307fb",
+ .of_match_table = of_match_ptr(ssd1307fb_of_match),
+ .owner = THIS_MODULE,
+ },
+};
+
+module_i2c_driver(ssd1307fb_driver);
+
+MODULE_DESCRIPTION("FB driver for the Solomon SSD1307 OLED controler");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 2dcdbc9364d..99ebdde590f 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -15,6 +15,7 @@ if VIRT_DRIVERS
config FSL_HV_MANAGER
tristate "Freescale hypervisor management driver"
depends on FSL_SOC
+ select EPAPR_PARAVIRT
help
The Freescale hypervisor management driver provides several services
to drivers and applications related to the Freescale hypervisor:
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 4939e0ccc4e..d294f67d6f8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -796,9 +796,6 @@ static int has_fsl_hypervisor(void)
struct device_node *node;
int ret;
- if (!(mfmsr() & MSR_GS))
- return 0;
-
node = of_find_node_by_path("/hypervisor");
if (!node)
return 0;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 809b0de59c0..ee59b74768d 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -10,33 +10,32 @@ static DEFINE_IDA(virtio_index_ida);
static ssize_t device_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.device);
}
static ssize_t vendor_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%04x\n", dev->id.vendor);
}
static ssize_t status_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
}
static ssize_t modalias_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
-
+ struct virtio_device *dev = dev_to_virtio(_d);
return sprintf(buf, "virtio:d%08Xv%08X\n",
dev->id.device, dev->id.vendor);
}
static ssize_t features_show(struct device *_d,
struct device_attribute *attr, char *buf)
{
- struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
unsigned int i;
ssize_t len = 0;
@@ -71,10 +70,10 @@ static inline int virtio_id_match(const struct virtio_device *dev,
static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
{
unsigned int i;
- struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_dv);
const struct virtio_device_id *ids;
- ids = container_of(_dr, struct virtio_driver, driver)->id_table;
+ ids = drv_to_virtio(_dr)->id_table;
for (i = 0; ids[i].device; i++)
if (virtio_id_match(dev, &ids[i]))
return 1;
@@ -83,7 +82,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
{
- struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
+ struct virtio_device *dev = dev_to_virtio(_dv);
return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X",
dev->id.device, dev->id.vendor);
@@ -98,8 +97,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
unsigned int fbit)
{
unsigned int i;
- struct virtio_driver *drv = container_of(vdev->dev.driver,
- struct virtio_driver, driver);
+ struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
for (i = 0; i < drv->feature_table_size; i++)
if (drv->feature_table[i] == fbit)
@@ -111,9 +109,8 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature);
static int virtio_dev_probe(struct device *_d)
{
int err, i;
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
- struct virtio_driver *drv = container_of(dev->dev.driver,
- struct virtio_driver, driver);
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
u32 device_features;
/* We have a driver! */
@@ -152,9 +149,8 @@ static int virtio_dev_probe(struct device *_d)
static int virtio_dev_remove(struct device *_d)
{
- struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
- struct virtio_driver *drv = container_of(dev->dev.driver,
- struct virtio_driver, driver);
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
drv->remove(dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 2a70558b36e..d19fe3e323b 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -139,10 +139,9 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
struct page *page = balloon_page_enqueue(vb_dev_info);
if (!page) {
- if (printk_ratelimit())
- dev_printk(KERN_INFO, &vb->vdev->dev,
- "Out of puff! Can't get %u pages\n",
- VIRTIO_BALLOON_PAGES_PER_PAGE);
+ dev_info_ratelimited(&vb->vdev->dev,
+ "Out of puff! Can't get %u pages\n",
+ VIRTIO_BALLOON_PAGES_PER_PAGE);
/* Sleep for at least 1/5 of a second before retry. */
msleep(200);
break;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 6b1b7e18493..634f80bcdbd 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq)
/* We write the queue's selector into the notification register to
* signal the other end */
- writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
+ writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
}
/* Notify all virtqueues on an interrupt. */
@@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq)
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags, size;
- unsigned int index = virtqueue_get_queue_index(vq);
+ unsigned int index = vq->index;
spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node);
@@ -521,25 +521,33 @@ static int vm_cmdline_set(const char *device,
int err;
struct resource resources[2] = {};
char *str;
- long long int base;
+ long long int base, size;
+ unsigned int irq;
int processed, consumed = 0;
struct platform_device *pdev;
- resources[0].flags = IORESOURCE_MEM;
- resources[1].flags = IORESOURCE_IRQ;
-
- resources[0].end = memparse(device, &str) - 1;
+ /* Consume "size" part of the command line parameter */
+ size = memparse(device, &str);
+ /* Get "@<base>:<irq>[:<id>]" chunks */
processed = sscanf(str, "@%lli:%u%n:%d%n",
- &base, &resources[1].start, &consumed,
+ &base, &irq, &consumed,
&vm_cmdline_id, &consumed);
- if (processed < 2 || processed > 3 || str[consumed])
+ /*
+ * sscanf() must processes at least 2 chunks; also there
+ * must be no extra characters after the last chunk, so
+ * str[consumed] must be '\0'
+ */
+ if (processed < 2 || str[consumed])
return -EINVAL;
+ resources[0].flags = IORESOURCE_MEM;
resources[0].start = base;
- resources[0].end += base;
- resources[1].end = resources[1].start;
+ resources[0].end = base + size - 1;
+
+ resources[1].flags = IORESOURCE_IRQ;
+ resources[1].start = resources[1].end = irq;
if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent);
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index c33aea36598..e3ecc94591a 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq)
/* we write the queue's selector into the notification register to
* signal the other end */
- iowrite16(virtqueue_get_queue_index(vq),
- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
+ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
}
/* Handle a configuration change: Tell driver if it wants to know. */
@@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
- iowrite16(virtqueue_get_queue_index(vq),
- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
@@ -830,16 +828,4 @@ static struct pci_driver virtio_pci_driver = {
#endif
};
-static int __init virtio_pci_init(void)
-{
- return pci_register_driver(&virtio_pci_driver);
-}
-
-module_init(virtio_pci_init);
-
-static void __exit virtio_pci_exit(void)
-{
- pci_unregister_driver(&virtio_pci_driver);
-}
-
-module_exit(virtio_pci_exit);
+module_pci_driver(virtio_pci_driver);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index e639584b2db..ffd7e7da5d3 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -93,8 +93,6 @@ struct vring_virtqueue
/* Host publishes avail event idx */
bool event;
- /* Number of free buffers */
- unsigned int num_free;
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
@@ -106,9 +104,6 @@ struct vring_virtqueue
/* How to notify other side. FIXME: commonalize hcalls! */
void (*notify)(struct virtqueue *vq);
- /* Index of the queue */
- int queue_index;
-
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
@@ -135,6 +130,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
unsigned head;
int i;
+ /*
+ * We require lowmem mappings for the descriptors because
+ * otherwise virt_to_phys will give us bogus addresses in the
+ * virtqueue.
+ */
+ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
+
desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
if (!desc)
return -ENOMEM;
@@ -160,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
desc[i-1].next = 0;
/* We're about to use a buffer */
- vq->num_free--;
+ vq->vq.num_free--;
/* Use a single buffer which doesn't continue */
head = vq->free_head;
@@ -174,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-int virtqueue_get_queue_index(struct virtqueue *_vq)
-{
- struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->queue_index;
-}
-EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
-
/**
* virtqueue_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
@@ -193,10 +188,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
- * Returns remaining capacity of queue or a negative error
- * (ie. ENOSPC). Note that it only really makes sense to treat all
- * positive return values as "available": indirect buffers mean that
- * we can put an entire sg[] array inside a single queue entry.
+ * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
*/
int virtqueue_add_buf(struct virtqueue *_vq,
struct scatterlist sg[],
@@ -228,7 +220,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
- if (vq->indirect && (out + in) > 1 && vq->num_free) {
+ if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
head = vring_add_indirect(vq, sg, out, in, gfp);
if (likely(head >= 0))
goto add_head;
@@ -237,9 +229,9 @@ int virtqueue_add_buf(struct virtqueue *_vq,
BUG_ON(out + in > vq->vring.num);
BUG_ON(out + in == 0);
- if (vq->num_free < out + in) {
+ if (vq->vq.num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n",
- out + in, vq->num_free);
+ out + in, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */
@@ -250,7 +242,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
}
/* We're about to use some buffers from the free list. */
- vq->num_free -= out + in;
+ vq->vq.num_free -= out + in;
head = vq->free_head;
for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
@@ -296,7 +288,7 @@ add_head:
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
- return vq->num_free;
+ return 0;
}
EXPORT_SYMBOL_GPL(virtqueue_add_buf);
@@ -393,13 +385,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next;
- vq->num_free++;
+ vq->vq.num_free++;
}
vq->vring.desc[i].next = vq->free_head;
vq->free_head = head;
/* Plus final descriptor */
- vq->num_free++;
+ vq->vq.num_free++;
}
static inline bool more_used(const struct vring_virtqueue *vq)
@@ -599,7 +591,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
return buf;
}
/* That should have freed everything. */
- BUG_ON(vq->num_free != vq->vring.num);
+ BUG_ON(vq->vq.num_free != vq->vring.num);
END_USE(vq);
return NULL;
@@ -653,12 +645,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
+ vq->vq.num_free = num;
+ vq->vq.index = index;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
- vq->queue_index = index;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
@@ -673,7 +666,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
/* Put everything in free lists. */
- vq->num_free = num;
vq->free_head = 0;
for (i = 0; i < num-1; i++) {
vq->vring.desc[i].next = i+1;
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index c433a746e3f..e8ca63a82b9 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,6 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
+ depends on ARCH_OMAP
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ad1bb9382a9..7f809fd4a57 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -76,6 +76,16 @@ config DA9052_WATCHDOG
Alternatively say M to compile the driver as a module,
which will be called da9052_wdt.
+config DA9055_WATCHDOG
+ tristate "Dialog Semiconductor DA9055 Watchdog"
+ depends on MFD_DA9055
+ help
+ If you say yes here you get support for watchdog on the Dialog
+ Semiconductor DA9055 PMIC.
+
+ This driver can also be built as a module. If so, the module
+ will be called da9055_wdt.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -232,6 +242,7 @@ config EP93XX_WATCHDOG
config OMAP_WATCHDOG
tristate "OMAP Watchdog"
depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+ select WATCHDOG_CORE
help
Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y'
here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
@@ -300,6 +311,7 @@ config COH901327_WATCHDOG
config TWL4030_WATCHDOG
tristate "TWL4030 Watchdog"
depends on TWL4030_CORE
+ select WATCHDOG_CORE
help
Support for TI TWL4030 watchdog. Say 'Y' here to enable the
watchdog timer support for TWL4030 chips.
@@ -342,7 +354,7 @@ config MAX63XX_WATCHDOG
config IMX2_WDT
tristate "IMX2+ Watchdog"
- depends on IMX_HAVE_PLATFORM_IMX2_WDT
+ depends on ARCH_MXC
help
This is the driver for the hardware watchdog
on the Freescale IMX2 and later processors.
@@ -431,7 +443,7 @@ config ALIM7101_WDT
config F71808E_WDT
tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
- depends on X86 && EXPERIMENTAL
+ depends on X86
help
This is the driver for the hardware watchdog on the Fintek
F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
@@ -622,7 +634,7 @@ config IT8712F_WDT
config IT87_WDT
tristate "IT87 Watchdog Timer"
- depends on X86 && EXPERIMENTAL
+ depends on X86
---help---
This is the driver for the hardware watchdog on the ITE IT8702,
IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 and IT8728
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 572b39bed06..97bbdb3a464 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -164,6 +164,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
+obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 05e1be85fde..dc42e44b6bc 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -32,6 +32,7 @@
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include "at91sam9_wdt.h"
@@ -302,11 +303,21 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
return res;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id at91_wdt_dt_ids[] __initconst = {
+ { .compatible = "atmel,at91sam9260-wdt" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_wdt_dt_ids);
+#endif
+
static struct platform_driver at91wdt_driver = {
.remove = __exit_p(at91wdt_remove),
.driver = {
.name = "at91_wdt",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91_wdt_dt_ids),
},
};
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 7c8ede7816b..38a999e60c0 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -284,6 +284,7 @@ static void ath97_wdt_shutdown(struct platform_device *pdev)
}
static struct platform_driver ath79_wdt_driver = {
+ .probe = ath79_wdt_probe,
.remove = ath79_wdt_remove,
.shutdown = ath97_wdt_shutdown,
.driver = {
@@ -292,17 +293,7 @@ static struct platform_driver ath79_wdt_driver = {
},
};
-static int __init ath79_wdt_init(void)
-{
- return platform_driver_probe(&ath79_wdt_driver, ath79_wdt_probe);
-}
-module_init(ath79_wdt_init);
-
-static void __exit ath79_wdt_exit(void)
-{
- platform_driver_unregister(&ath79_wdt_driver);
-}
-module_exit(ath79_wdt_exit);
+module_platform_driver(ath79_wdt_driver);
MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver");
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 5b06d31ab6a..c0bc92d8e43 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -212,7 +212,7 @@ static long booke_wdt_ioctl(struct file *file,
return 0;
}
-/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+/* wdt_is_active stores whether or not the /dev/watchdog device is opened */
static unsigned long wdt_is_active;
static int booke_wdt_open(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index cd87758abac..f270bb7bc45 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -266,6 +266,7 @@ static void cpu5wdt_exit(void)
if (cpu5wdt_device.queue) {
cpu5wdt_device.queue = 0;
wait_for_completion(&cpu5wdt_device.stop);
+ del_timer(&cpu5wdt_device.timer);
}
misc_deregister(&cpu5wdt_misc);
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 8be70d8f268..367445009c6 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -53,10 +53,6 @@ static const struct {
static void da9052_wdt_release_resources(struct kref *r)
{
- struct da9052_wdt_data *driver_data =
- container_of(r, struct da9052_wdt_data, kref);
-
- kfree(driver_data);
}
static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
new file mode 100644
index 00000000000..f5ad10546fc
--- /dev/null
+++ b/drivers/watchdog/da9055_wdt.c
@@ -0,0 +1,211 @@
+/*
+ * System monitoring driver for DA9055 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define DA9055_DEF_TIMEOUT 4
+#define DA9055_TWDMIN 256
+
+struct da9055_wdt_data {
+ struct watchdog_device wdt;
+ struct da9055 *da9055;
+ struct kref kref;
+};
+
+static const struct {
+ u8 reg_val;
+ int user_time; /* In seconds */
+} da9055_wdt_maps[] = {
+ { 0, 0 },
+ { 1, 2 },
+ { 2, 4 },
+ { 3, 8 },
+ { 4, 16 },
+ { 5, 32 },
+ { 5, 33 }, /* Actual time 32.768s so included both 32s and 33s */
+ { 6, 65 },
+ { 6, 66 }, /* Actual time 65.536s so include both, 65s and 66s */
+ { 7, 131 },
+};
+
+static int da9055_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9055 *da9055 = driver_data->da9055;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(da9055_wdt_maps); i++)
+ if (da9055_wdt_maps[i].user_time == timeout)
+ break;
+
+ if (i == ARRAY_SIZE(da9055_wdt_maps))
+ ret = -EINVAL;
+ else
+ ret = da9055_reg_update(da9055, DA9055_REG_CONTROL_B,
+ DA9055_TWDSCALE_MASK,
+ da9055_wdt_maps[i].reg_val <<
+ DA9055_TWDSCALE_SHIFT);
+ if (ret < 0) {
+ dev_err(da9055->dev,
+ "Failed to update timescale bit, %d\n", ret);
+ return ret;
+ }
+
+ wdt_dev->timeout = timeout;
+
+ return 0;
+}
+
+static int da9055_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9055 *da9055 = driver_data->da9055;
+
+ /*
+ * We have a minimum time for watchdog window called TWDMIN. A write
+ * to the watchdog before this elapsed time will cause an error.
+ */
+ mdelay(DA9055_TWDMIN);
+
+ /* Reset the watchdog timer */
+ return da9055_reg_update(da9055, DA9055_REG_CONTROL_E,
+ DA9055_WATCHDOG_MASK, 1);
+}
+
+static void da9055_wdt_release_resources(struct kref *r)
+{
+}
+
+static void da9055_wdt_ref(struct watchdog_device *wdt_dev)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_get(&driver_data->kref);
+}
+
+static void da9055_wdt_unref(struct watchdog_device *wdt_dev)
+{
+ struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_put(&driver_data->kref, da9055_wdt_release_resources);
+}
+
+static int da9055_wdt_start(struct watchdog_device *wdt_dev)
+{
+ return da9055_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9055_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ return da9055_wdt_set_timeout(wdt_dev, 0);
+}
+
+static struct watchdog_info da9055_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "DA9055 Watchdog",
+};
+
+static const struct watchdog_ops da9055_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = da9055_wdt_start,
+ .stop = da9055_wdt_stop,
+ .ping = da9055_wdt_ping,
+ .set_timeout = da9055_wdt_set_timeout,
+ .ref = da9055_wdt_ref,
+ .unref = da9055_wdt_unref,
+};
+
+static int da9055_wdt_probe(struct platform_device *pdev)
+{
+ struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct da9055_wdt_data *driver_data;
+ struct watchdog_device *da9055_wdt;
+ int ret;
+
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(da9055->dev, "Failed to allocate watchdog device\n");
+ return -ENOMEM;
+ }
+
+ driver_data->da9055 = da9055;
+
+ da9055_wdt = &driver_data->wdt;
+
+ da9055_wdt->timeout = DA9055_DEF_TIMEOUT;
+ da9055_wdt->info = &da9055_wdt_info;
+ da9055_wdt->ops = &da9055_wdt_ops;
+ watchdog_set_nowayout(da9055_wdt, nowayout);
+ watchdog_set_drvdata(da9055_wdt, driver_data);
+
+ kref_init(&driver_data->kref);
+
+ ret = da9055_wdt_stop(da9055_wdt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to stop watchdog, %d\n", ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, driver_data);
+
+ ret = watchdog_register_device(&driver_data->wdt);
+ if (ret != 0)
+ dev_err(da9055->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+
+err:
+ return ret;
+}
+
+static int da9055_wdt_remove(struct platform_device *pdev)
+{
+ struct da9055_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+ watchdog_unregister_device(&driver_data->wdt);
+ kref_put(&driver_data->kref, da9055_wdt_release_resources);
+
+ return 0;
+}
+
+static struct platform_driver da9055_wdt_driver = {
+ .probe = da9055_wdt_probe,
+ .remove = da9055_wdt_remove,
+ .driver = {
+ .name = "da9055-watchdog",
+ },
+};
+
+module_platform_driver(da9055_wdt_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9055 watchdog");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-watchdog");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 8791879e518..e8e87246ea6 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -208,7 +208,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
if (WARN_ON(IS_ERR(wdt_clk)))
return PTR_ERR(wdt_clk);
- clk_enable(wdt_clk);
+ clk_prepare_enable(wdt_clk);
if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
heartbeat = DEFAULT_HEARTBEAT;
@@ -256,16 +256,23 @@ static int davinci_wdt_remove(struct platform_device *pdev)
wdt_mem = NULL;
}
- clk_disable(wdt_clk);
+ clk_disable_unprepare(wdt_clk);
clk_put(wdt_clk);
return 0;
}
+static const struct of_device_id davinci_wdt_of_match[] = {
+ { .compatible = "ti,davinci-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_wdt_of_match);
+
static struct platform_driver platform_wdt_driver = {
.driver = {
.name = "watchdog",
.owner = THIS_MODULE,
+ .of_match_table = davinci_wdt_of_match,
},
.probe = davinci_wdt_probe,
.remove = davinci_wdt_remove,
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8717255ec7b..11796b9b864 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -39,7 +39,7 @@
#endif /* CONFIG_HPWDT_NMI_DECODING */
#include <asm/nmi.h>
-#define HPWDT_VERSION "1.3.0"
+#define HPWDT_VERSION "1.3.1"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index a84eb551ea2..233cfadcb21 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -80,8 +80,7 @@ static irqreturn_t mpcore_wdt_fire(int irq, void *arg)
/* Check it really was our interrupt */
if (readl(wdt->base + TWD_WDOG_INTSTAT)) {
- dev_printk(KERN_CRIT, wdt->dev,
- "Triggered - Reboot ignored.\n");
+ dev_crit(wdt->dev, "Triggered - Reboot ignored\n");
/* Clear the interrupt on the watchdog */
writel(1, wdt->base + TWD_WDOG_INTSTAT);
return IRQ_HANDLED;
@@ -123,7 +122,7 @@ static void mpcore_wdt_stop(struct mpcore_wdt *wdt)
static void mpcore_wdt_start(struct mpcore_wdt *wdt)
{
- dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
+ dev_info(wdt->dev, "enabling watchdog\n");
/* This loads the count register but does NOT start the count yet */
mpcore_wdt_keepalive(wdt);
@@ -180,8 +179,8 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
if (wdt->expect_close == 42)
mpcore_wdt_stop(wdt);
else {
- dev_printk(KERN_CRIT, wdt->dev,
- "unexpected close, not stopping watchdog!\n");
+ dev_crit(wdt->dev,
+ "unexpected close, not stopping watchdog!\n");
mpcore_wdt_keepalive(wdt);
}
clear_bit(0, &wdt->timer_alive);
@@ -351,9 +350,9 @@ static int mpcore_wdt_probe(struct platform_device *pdev)
ret = devm_request_irq(wdt->dev, wdt->irq, mpcore_wdt_fire, 0,
"mpcore_wdt", wdt);
if (ret) {
- dev_printk(KERN_ERR, wdt->dev,
- "cannot register IRQ%d for watchdog\n",
- wdt->irq);
+ dev_err(wdt->dev,
+ "cannot register IRQ%d for watchdog\n",
+ wdt->irq);
return ret;
}
}
@@ -365,9 +364,9 @@ static int mpcore_wdt_probe(struct platform_device *pdev)
mpcore_wdt_miscdev.parent = &pdev->dev;
ret = misc_register(&mpcore_wdt_miscdev);
if (ret) {
- dev_printk(KERN_ERR, wdt->dev,
+ dev_err(wdt->dev,
"cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
+ WATCHDOG_MINOR, ret);
return ret;
}
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 43cc1a1e25d..b0e541d022e 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -31,44 +31,34 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/moduleparam.h>
-#include <linux/bitops.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#include <mach/hardware.h>
-
#include <linux/platform_data/omap-wd-timer.h>
#include "omap_wdt.h"
-static struct platform_device *omap_wdt_dev;
-
static unsigned timer_margin;
module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
-static unsigned int wdt_trgr_pattern = 0x1234;
-static DEFINE_SPINLOCK(wdt_lock);
-
struct omap_wdt_dev {
void __iomem *base; /* physical */
struct device *dev;
- int omap_wdt_users;
+ bool omap_wdt_users;
struct resource *mem;
- struct miscdevice omap_wdt_miscdev;
+ int wdt_trgr_pattern;
+ struct mutex lock; /* to avoid races with PM */
};
-static void omap_wdt_ping(struct omap_wdt_dev *wdev)
+static void omap_wdt_reload(struct omap_wdt_dev *wdev)
{
void __iomem *base = wdev->base;
@@ -76,8 +66,8 @@ static void omap_wdt_ping(struct omap_wdt_dev *wdev)
while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08)
cpu_relax();
- wdt_trgr_pattern = ~wdt_trgr_pattern;
- __raw_writel(wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR));
+ wdev->wdt_trgr_pattern = ~wdev->wdt_trgr_pattern;
+ __raw_writel(wdev->wdt_trgr_pattern, (base + OMAP_WATCHDOG_TGR));
/* wait for posted write to complete */
while ((__raw_readl(base + OMAP_WATCHDOG_WPS)) & 0x08)
@@ -113,18 +103,10 @@ static void omap_wdt_disable(struct omap_wdt_dev *wdev)
cpu_relax();
}
-static void omap_wdt_adjust_timeout(unsigned new_timeout)
-{
- if (new_timeout < TIMER_MARGIN_MIN)
- new_timeout = TIMER_MARGIN_DEFAULT;
- if (new_timeout > TIMER_MARGIN_MAX)
- new_timeout = TIMER_MARGIN_MAX;
- timer_margin = new_timeout;
-}
-
-static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
+static void omap_wdt_set_timer(struct omap_wdt_dev *wdev,
+ unsigned int timeout)
{
- u32 pre_margin = GET_WLDR_VAL(timer_margin);
+ u32 pre_margin = GET_WLDR_VAL(timeout);
void __iomem *base = wdev->base;
/* just count up at 32 KHz */
@@ -136,16 +118,14 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
cpu_relax();
}
-/*
- * Allow only one task to hold it open
- */
-static int omap_wdt_open(struct inode *inode, struct file *file)
+static int omap_wdt_start(struct watchdog_device *wdog)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(omap_wdt_dev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
void __iomem *base = wdev->base;
- if (test_and_set_bit(1, (unsigned long *)&(wdev->omap_wdt_users)))
- return -EBUSY;
+ mutex_lock(&wdev->lock);
+
+ wdev->omap_wdt_users = true;
pm_runtime_get_sync(wdev->dev);
@@ -157,223 +137,168 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
cpu_relax();
- file->private_data = (void *) wdev;
-
- omap_wdt_set_timeout(wdev);
- omap_wdt_ping(wdev); /* trigger loading of new timeout value */
+ omap_wdt_set_timer(wdev, wdog->timeout);
+ omap_wdt_reload(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
- return nonseekable_open(inode, file);
+ mutex_unlock(&wdev->lock);
+
+ return 0;
}
-static int omap_wdt_release(struct inode *inode, struct file *file)
+static int omap_wdt_stop(struct watchdog_device *wdog)
{
- struct omap_wdt_dev *wdev = file->private_data;
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
- /*
- * Shut off the timer unless NOWAYOUT is defined.
- */
-#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ mutex_lock(&wdev->lock);
omap_wdt_disable(wdev);
-
pm_runtime_put_sync(wdev->dev);
-#else
- pr_crit("Unexpected close, not stopping!\n");
-#endif
- wdev->omap_wdt_users = 0;
-
+ wdev->omap_wdt_users = false;
+ mutex_unlock(&wdev->lock);
return 0;
}
-static ssize_t omap_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
+static int omap_wdt_ping(struct watchdog_device *wdog)
{
- struct omap_wdt_dev *wdev = file->private_data;
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
- /* Refresh LOAD_TIME. */
- if (len) {
- spin_lock(&wdt_lock);
- omap_wdt_ping(wdev);
- spin_unlock(&wdt_lock);
- }
- return len;
+ mutex_lock(&wdev->lock);
+ omap_wdt_reload(wdev);
+ mutex_unlock(&wdev->lock);
+
+ return 0;
}
-static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int omap_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int timeout)
{
- struct omap_wd_timer_platform_data *pdata;
- struct omap_wdt_dev *wdev;
- u32 rs;
- int new_margin, bs;
- static const struct watchdog_info ident = {
- .identity = "OMAP Watchdog",
- .options = WDIOF_SETTIMEOUT,
- .firmware_version = 0,
- };
-
- wdev = file->private_data;
- pdata = wdev->dev->platform_data;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user((struct watchdog_info __user *)arg, &ident,
- sizeof(ident));
- case WDIOC_GETSTATUS:
- return put_user(0, (int __user *)arg);
- case WDIOC_GETBOOTSTATUS:
- if (!pdata || !pdata->read_reset_sources)
- return put_user(0, (int __user *)arg);
- rs = pdata->read_reset_sources();
- bs = (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT)) ?
- WDIOF_CARDRESET : 0;
- return put_user(bs, (int __user *)arg);
- case WDIOC_KEEPALIVE:
- spin_lock(&wdt_lock);
- omap_wdt_ping(wdev);
- spin_unlock(&wdt_lock);
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, (int __user *)arg))
- return -EFAULT;
- omap_wdt_adjust_timeout(new_margin);
-
- spin_lock(&wdt_lock);
- omap_wdt_disable(wdev);
- omap_wdt_set_timeout(wdev);
- omap_wdt_enable(wdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
- omap_wdt_ping(wdev);
- spin_unlock(&wdt_lock);
- /* Fall */
- case WDIOC_GETTIMEOUT:
- return put_user(timer_margin, (int __user *)arg);
- default:
- return -ENOTTY;
- }
+ mutex_lock(&wdev->lock);
+ omap_wdt_disable(wdev);
+ omap_wdt_set_timer(wdev, timeout);
+ omap_wdt_enable(wdev);
+ omap_wdt_reload(wdev);
+ wdog->timeout = timeout;
+ mutex_unlock(&wdev->lock);
+
+ return 0;
}
-static const struct file_operations omap_wdt_fops = {
- .owner = THIS_MODULE,
- .write = omap_wdt_write,
- .unlocked_ioctl = omap_wdt_ioctl,
- .open = omap_wdt_open,
- .release = omap_wdt_release,
- .llseek = no_llseek,
+static const struct watchdog_info omap_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "OMAP Watchdog",
+};
+
+static const struct watchdog_ops omap_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = omap_wdt_start,
+ .stop = omap_wdt_stop,
+ .ping = omap_wdt_ping,
+ .set_timeout = omap_wdt_set_timeout,
};
static int omap_wdt_probe(struct platform_device *pdev)
{
+ struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data;
+ bool nowayout = WATCHDOG_NOWAYOUT;
+ struct watchdog_device *omap_wdt;
struct resource *res, *mem;
struct omap_wdt_dev *wdev;
+ u32 rs;
int ret;
+ omap_wdt = devm_kzalloc(&pdev->dev, sizeof(*omap_wdt), GFP_KERNEL);
+ if (!omap_wdt)
+ return -ENOMEM;
+
/* reserve static register mappings */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto err_get_resource;
- }
+ if (!res)
+ return -ENOENT;
- if (omap_wdt_dev) {
- ret = -EBUSY;
- goto err_busy;
- }
+ mem = devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name);
+ if (!mem)
+ return -EBUSY;
- mem = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!mem) {
- ret = -EBUSY;
- goto err_busy;
- }
+ wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return -ENOMEM;
- wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL);
- if (!wdev) {
- ret = -ENOMEM;
- goto err_kzalloc;
- }
+ wdev->omap_wdt_users = false;
+ wdev->mem = mem;
+ wdev->dev = &pdev->dev;
+ wdev->wdt_trgr_pattern = 0x1234;
+ mutex_init(&wdev->lock);
- wdev->omap_wdt_users = 0;
- wdev->mem = mem;
- wdev->dev = &pdev->dev;
+ wdev->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!wdev->base)
+ return -ENOMEM;
- wdev->base = ioremap(res->start, resource_size(res));
- if (!wdev->base) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ omap_wdt->info = &omap_wdt_info;
+ omap_wdt->ops = &omap_wdt_ops;
+ omap_wdt->min_timeout = TIMER_MARGIN_MIN;
+ omap_wdt->max_timeout = TIMER_MARGIN_MAX;
- platform_set_drvdata(pdev, wdev);
+ if (timer_margin >= TIMER_MARGIN_MIN &&
+ timer_margin <= TIMER_MARGIN_MAX)
+ omap_wdt->timeout = timer_margin;
+ else
+ omap_wdt->timeout = TIMER_MARGIN_DEFAULT;
+
+ watchdog_set_drvdata(omap_wdt, wdev);
+ watchdog_set_nowayout(omap_wdt, nowayout);
+
+ platform_set_drvdata(pdev, omap_wdt);
pm_runtime_enable(wdev->dev);
pm_runtime_get_sync(wdev->dev);
- omap_wdt_disable(wdev);
- omap_wdt_adjust_timeout(timer_margin);
+ if (pdata && pdata->read_reset_sources)
+ rs = pdata->read_reset_sources();
+ else
+ rs = 0;
+ omap_wdt->bootstatus = (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT)) ?
+ WDIOF_CARDRESET : 0;
- wdev->omap_wdt_miscdev.parent = &pdev->dev;
- wdev->omap_wdt_miscdev.minor = WATCHDOG_MINOR;
- wdev->omap_wdt_miscdev.name = "watchdog";
- wdev->omap_wdt_miscdev.fops = &omap_wdt_fops;
+ omap_wdt_disable(wdev);
- ret = misc_register(&(wdev->omap_wdt_miscdev));
- if (ret)
- goto err_misc;
+ ret = watchdog_register_device(omap_wdt);
+ if (ret) {
+ pm_runtime_disable(wdev->dev);
+ return ret;
+ }
pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
__raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
- timer_margin);
+ omap_wdt->timeout);
pm_runtime_put_sync(wdev->dev);
- omap_wdt_dev = pdev;
-
return 0;
-
-err_misc:
- pm_runtime_disable(wdev->dev);
- platform_set_drvdata(pdev, NULL);
- iounmap(wdev->base);
-
-err_ioremap:
- wdev->base = NULL;
- kfree(wdev);
-
-err_kzalloc:
- release_mem_region(res->start, resource_size(res));
-
-err_busy:
-err_get_resource:
-
- return ret;
}
static void omap_wdt_shutdown(struct platform_device *pdev)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+ mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
+ mutex_unlock(&wdev->lock);
}
static int omap_wdt_remove(struct platform_device *pdev)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
pm_runtime_disable(wdev->dev);
- if (!res)
- return -ENOENT;
-
- misc_deregister(&(wdev->omap_wdt_miscdev));
- release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
-
- iounmap(wdev->base);
-
- kfree(wdev);
- omap_wdt_dev = NULL;
+ watchdog_unregister_device(wdog);
return 0;
}
@@ -388,25 +313,31 @@ static int omap_wdt_remove(struct platform_device *pdev)
static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+ mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
+ mutex_unlock(&wdev->lock);
return 0;
}
static int omap_wdt_resume(struct platform_device *pdev)
{
- struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
+ struct watchdog_device *wdog = platform_get_drvdata(pdev);
+ struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+ mutex_lock(&wdev->lock);
if (wdev->omap_wdt_users) {
pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
- omap_wdt_ping(wdev);
+ omap_wdt_reload(wdev);
}
+ mutex_unlock(&wdev->lock);
return 0;
}
@@ -439,5 +370,4 @@ module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:omap_wdt");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 0478b001b1e..7c18b3bffcf 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -156,6 +156,8 @@ static int orion_wdt_probe(struct platform_device *pdev)
wdt_tclk = clk_get_rate(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
wdt_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!wdt_reg)
return -ENOMEM;
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index b0dab10fc6a..27bcd4e2c4a 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -354,7 +354,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
goto err_map;
}
- clk_enable(wdt_clock);
+ clk_prepare_enable(wdt_clock);
ret = s3c2410wdt_cpufreq_register();
if (ret < 0) {
@@ -421,7 +421,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
s3c2410wdt_cpufreq_deregister();
err_clk:
- clk_disable(wdt_clock);
+ clk_disable_unprepare(wdt_clock);
clk_put(wdt_clock);
wdt_clock = NULL;
@@ -445,7 +445,7 @@ static int s3c2410wdt_remove(struct platform_device *dev)
s3c2410wdt_cpufreq_deregister();
- clk_disable(wdt_clock);
+ clk_disable_unprepare(wdt_clock);
clk_put(wdt_clock);
wdt_clock = NULL;
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index b3876812ff0..2b0e000d437 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -13,7 +13,9 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide"
+ * See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide",
+ * AMD Publication 45482 "AMD SB800-Series Southbridges Register
+ * Reference Guide"
*/
/*
@@ -38,18 +40,24 @@
#include "sp5100_tco.h"
/* Module and version information */
-#define TCO_VERSION "0.01"
+#define TCO_VERSION "0.03"
#define TCO_MODULE_NAME "SP5100 TCO timer"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
/* internal variables */
static u32 tcobase_phys;
+static u32 resbase_phys;
+static u32 tco_wdt_fired;
static void __iomem *tcobase;
static unsigned int pm_iobase;
static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
static unsigned long timer_alive;
static char tco_expect_close;
static struct pci_dev *sp5100_tco_pci;
+static struct resource wdt_res = {
+ .name = "Watchdog Timer",
+ .flags = IORESOURCE_MEM,
+};
/* the watchdog platform device */
static struct platform_device *sp5100_tco_platform_device;
@@ -64,9 +72,15 @@ MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static unsigned int force_addr;
+module_param(force_addr, uint, 0);
+MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address."
+ " ONLY USE THIS PARAMETER IF YOU REALLY KNOW"
+ " WHAT YOU ARE DOING (default=none)");
+
/*
* Some TCO specific functions
*/
@@ -122,6 +136,79 @@ static int tco_timer_set_heartbeat(int t)
return 0;
}
+static void tco_timer_enable(void)
+{
+ int val;
+
+ if (sp5100_tco_pci->revision >= 0x40) {
+ /* For SB800 or later */
+ /* Set the Watchdog timer resolution to 1 sec */
+ outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ val |= SB800_PM_WATCHDOG_SECOND_RES;
+ outb(val, SB800_IO_PM_DATA_REG);
+
+ /* Enable watchdog decode bit and watchdog timer */
+ outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ val |= SB800_PCI_WATCHDOG_DECODE_EN;
+ val &= ~SB800_PM_WATCHDOG_DISABLE;
+ outb(val, SB800_IO_PM_DATA_REG);
+ } else {
+ /* For SP5100 or SB7x0 */
+ /* Enable watchdog decode bit */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ &val);
+
+ val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+ pci_write_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ val);
+
+ /* Enable Watchdog timer and set the resolution to 1 sec */
+ outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+ val = inb(SP5100_IO_PM_DATA_REG);
+ val |= SP5100_PM_WATCHDOG_SECOND_RES;
+ val &= ~SP5100_PM_WATCHDOG_DISABLE;
+ outb(val, SP5100_IO_PM_DATA_REG);
+ }
+}
+
+static void tco_timer_disable(void)
+{
+ int val;
+
+ if (sp5100_tco_pci->revision >= 0x40) {
+ /* For SB800 or later */
+ /* Enable watchdog decode bit and Disable watchdog timer */
+ outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ val |= SB800_PCI_WATCHDOG_DECODE_EN;
+ val |= SB800_PM_WATCHDOG_DISABLE;
+ outb(val, SB800_IO_PM_DATA_REG);
+ } else {
+ /* For SP5100 or SB7x0 */
+ /* Enable watchdog decode bit */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ &val);
+
+ val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+ pci_write_config_dword(sp5100_tco_pci,
+ SP5100_PCI_WATCHDOG_MISC_REG,
+ val);
+
+ /* Disable Watchdog timer */
+ outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+ val = inb(SP5100_IO_PM_DATA_REG);
+ val |= SP5100_PM_WATCHDOG_DISABLE;
+ outb(val, SP5100_IO_PM_DATA_REG);
+ }
+}
+
/*
* /dev/watchdog handling
*/
@@ -270,11 +357,12 @@ MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
/*
* Init & exit routines
*/
-
static unsigned char sp5100_tco_setupdevice(void)
{
struct pci_dev *dev = NULL;
+ const char *dev_name = NULL;
u32 val;
+ u32 index_reg, data_reg, base_addr;
/* Match the PCI device */
for_each_pci_dev(dev) {
@@ -287,29 +375,160 @@ static unsigned char sp5100_tco_setupdevice(void)
if (!sp5100_tco_pci)
return 0;
+ pr_info("PCI Revision ID: 0x%x\n", sp5100_tco_pci->revision);
+
+ /*
+ * Determine type of southbridge chipset.
+ */
+ if (sp5100_tco_pci->revision >= 0x40) {
+ dev_name = SB800_DEVNAME;
+ index_reg = SB800_IO_PM_INDEX_REG;
+ data_reg = SB800_IO_PM_DATA_REG;
+ base_addr = SB800_PM_WATCHDOG_BASE;
+ } else {
+ dev_name = SP5100_DEVNAME;
+ index_reg = SP5100_IO_PM_INDEX_REG;
+ data_reg = SP5100_IO_PM_DATA_REG;
+ base_addr = SP5100_PM_WATCHDOG_BASE;
+ }
+
/* Request the IO ports used by this driver */
pm_iobase = SP5100_IO_PM_INDEX_REG;
- if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
+ if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, dev_name)) {
pr_err("I/O address 0x%04x already in use\n", pm_iobase);
goto exit;
}
- /* Find the watchdog base address. */
- outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG);
- val = inb(SP5100_IO_PM_DATA_REG);
- outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG);
- val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
- outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG);
- val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
- outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG);
- /* Low three bits of BASE0 are reserved. */
- val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);
+ /*
+ * First, Find the watchdog timer MMIO address from indirect I/O.
+ */
+ outb(base_addr+3, index_reg);
+ val = inb(data_reg);
+ outb(base_addr+2, index_reg);
+ val = val << 8 | inb(data_reg);
+ outb(base_addr+1, index_reg);
+ val = val << 8 | inb(data_reg);
+ outb(base_addr+0, index_reg);
+ /* Low three bits of BASE are reserved */
+ val = val << 8 | (inb(data_reg) & 0xf8);
+
+ pr_debug("Got 0x%04x from indirect I/O\n", val);
+
+ /* Check MMIO address conflict */
+ if (request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
+ dev_name))
+ goto setup_wdt;
+ else
+ pr_debug("MMIO address 0x%04x already in use\n", val);
+
+ /*
+ * Secondly, Find the watchdog timer MMIO address
+ * from SBResource_MMIO register.
+ */
+ if (sp5100_tco_pci->revision >= 0x40) {
+ /* Read SBResource_MMIO from AcpiMmioEn(PM_Reg: 24h) */
+ outb(SB800_PM_ACPI_MMIO_EN+3, SB800_IO_PM_INDEX_REG);
+ val = inb(SB800_IO_PM_DATA_REG);
+ outb(SB800_PM_ACPI_MMIO_EN+2, SB800_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SB800_IO_PM_DATA_REG);
+ outb(SB800_PM_ACPI_MMIO_EN+1, SB800_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SB800_IO_PM_DATA_REG);
+ outb(SB800_PM_ACPI_MMIO_EN+0, SB800_IO_PM_INDEX_REG);
+ val = val << 8 | inb(SB800_IO_PM_DATA_REG);
+ } else {
+ /* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+ pci_read_config_dword(sp5100_tco_pci,
+ SP5100_SB_RESOURCE_MMIO_BASE, &val);
+ }
+
+ /* The SBResource_MMIO is enabled and mapped memory space? */
+ if ((val & (SB800_ACPI_MMIO_DECODE_EN | SB800_ACPI_MMIO_SEL)) ==
+ SB800_ACPI_MMIO_DECODE_EN) {
+ /* Clear unnecessary the low twelve bits */
+ val &= ~0xFFF;
+ /* Add the Watchdog Timer offset to base address. */
+ val += SB800_PM_WDT_MMIO_OFFSET;
+ /* Check MMIO address conflict */
+ if (request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
+ dev_name)) {
+ pr_debug("Got 0x%04x from SBResource_MMIO register\n",
+ val);
+ goto setup_wdt;
+ } else
+ pr_debug("MMIO address 0x%04x already in use\n", val);
+ } else
+ pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val);
+
+ /*
+ * Lastly re-programming the watchdog timer MMIO address,
+ * This method is a last resort...
+ *
+ * Before re-programming, to ensure that the watchdog timer
+ * is disabled, disable the watchdog timer.
+ */
+ tco_timer_disable();
+
+ if (force_addr) {
+ /*
+ * Force the use of watchdog timer MMIO address, and aligned to
+ * 8byte boundary.
+ */
+ force_addr &= ~0x7;
+ val = force_addr;
+
+ pr_info("Force the use of 0x%04x as MMIO address\n", val);
+ } else {
+ /*
+ * Get empty slot into the resource tree for watchdog timer.
+ */
+ if (allocate_resource(&iomem_resource,
+ &wdt_res,
+ SP5100_WDT_MEM_MAP_SIZE,
+ 0xf0000000,
+ 0xfffffff8,
+ 0x8,
+ NULL,
+ NULL)) {
+ pr_err("MMIO allocation failed\n");
+ goto unreg_region;
+ }
+
+ val = resbase_phys = wdt_res.start;
+ pr_debug("Got 0x%04x from resource tree\n", val);
+ }
+
+ /* Restore to the low three bits, if chipset is SB8x0(or later) */
+ if (sp5100_tco_pci->revision >= 0x40) {
+ u8 reserved_bit;
+ reserved_bit = inb(base_addr) & 0x7;
+ val |= (u32)reserved_bit;
+ }
+
+ /* Re-programming the watchdog timer base address */
+ outb(base_addr+0, index_reg);
+ /* Low three bits of BASE are reserved */
+ outb((val >> 0) & 0xf8, data_reg);
+ outb(base_addr+1, index_reg);
+ outb((val >> 8) & 0xff, data_reg);
+ outb(base_addr+2, index_reg);
+ outb((val >> 16) & 0xff, data_reg);
+ outb(base_addr+3, index_reg);
+ outb((val >> 24) & 0xff, data_reg);
+
+ /*
+ * Clear unnecessary the low three bits,
+ * if chipset is SB8x0(or later)
+ */
+ if (sp5100_tco_pci->revision >= 0x40)
+ val &= ~0x7;
if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
- "SP5100 TCO")) {
- pr_err("mmio address 0x%04x already in use\n", val);
- goto unreg_region;
+ dev_name)) {
+ pr_err("MMIO address 0x%04x already in use\n", val);
+ goto unreg_resource;
}
+
+setup_wdt:
tcobase_phys = val;
tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
@@ -318,26 +537,18 @@ static unsigned char sp5100_tco_setupdevice(void)
goto unreg_mem_region;
}
- /* Enable watchdog decode bit */
- pci_read_config_dword(sp5100_tco_pci,
- SP5100_PCI_WATCHDOG_MISC_REG,
- &val);
-
- val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+ pr_info("Using 0x%04x for watchdog MMIO address\n", val);
- pci_write_config_dword(sp5100_tco_pci,
- SP5100_PCI_WATCHDOG_MISC_REG,
- val);
+ /* Setup the watchdog timer */
+ tco_timer_enable();
- /* Enable Watchdog timer and set the resolution to 1 sec. */
- outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
- val = inb(SP5100_IO_PM_DATA_REG);
- val |= SP5100_PM_WATCHDOG_SECOND_RES;
- val &= ~SP5100_PM_WATCHDOG_DISABLE;
- outb(val, SP5100_IO_PM_DATA_REG);
-
- /* Check that the watchdog action is set to reset the system. */
+ /* Check that the watchdog action is set to reset the system */
val = readl(SP5100_WDT_CONTROL(tcobase));
+ /*
+ * Save WatchDogFired status, because WatchDogFired flag is
+ * cleared here.
+ */
+ tco_wdt_fired = val & SP5100_PM_WATCHDOG_FIRED;
val &= ~SP5100_PM_WATCHDOG_ACTION_RESET;
writel(val, SP5100_WDT_CONTROL(tcobase));
@@ -355,6 +566,9 @@ static unsigned char sp5100_tco_setupdevice(void)
unreg_mem_region:
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
+unreg_resource:
+ if (resbase_phys)
+ release_resource(&wdt_res);
unreg_region:
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
exit:
@@ -364,23 +578,18 @@ exit:
static int sp5100_tco_init(struct platform_device *dev)
{
int ret;
- u32 val;
+ char addr_str[16];
- /* Check whether or not the hardware watchdog is there. If found, then
+ /*
+ * Check whether or not the hardware watchdog is there. If found, then
* set it up.
*/
if (!sp5100_tco_setupdevice())
return -ENODEV;
/* Check to see if last reboot was due to watchdog timeout */
- pr_info("Watchdog reboot %sdetected\n",
- readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ?
- "" : "not ");
-
- /* Clear out the old status */
- val = readl(SP5100_WDT_CONTROL(tcobase));
- val &= ~SP5100_PM_WATCHDOG_FIRED;
- writel(val, SP5100_WDT_CONTROL(tcobase));
+ pr_info("Last reboot was %striggered by watchdog.\n",
+ tco_wdt_fired ? "" : "not ");
/*
* Check that the heartbeat value is within it's range.
@@ -400,14 +609,24 @@ static int sp5100_tco_init(struct platform_device *dev)
clear_bit(0, &timer_alive);
- pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
- tcobase, heartbeat, nowayout);
+ /* Show module parameters */
+ if (force_addr == tcobase_phys)
+ /* The force_addr is vaild */
+ sprintf(addr_str, "0x%04x", force_addr);
+ else
+ strcpy(addr_str, "none");
+
+ pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, "
+ "force_addr=%s)\n",
+ tcobase, heartbeat, nowayout, addr_str);
return 0;
exit:
iounmap(tcobase);
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
+ if (resbase_phys)
+ release_resource(&wdt_res);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
return ret;
}
@@ -422,6 +641,8 @@ static void sp5100_tco_cleanup(void)
misc_deregister(&sp5100_tco_miscdev);
iounmap(tcobase);
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
+ if (resbase_phys)
+ release_resource(&wdt_res);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
}
@@ -451,7 +672,7 @@ static int __init sp5100_tco_init_module(void)
{
int err;
- pr_info("SP5100 TCO WatchDog Timer Driver v%s\n", TCO_VERSION);
+ pr_info("SP5100/SB800 TCO WatchDog Timer Driver v%s\n", TCO_VERSION);
err = platform_driver_register(&sp5100_tco_driver);
if (err)
@@ -475,13 +696,13 @@ static void __exit sp5100_tco_cleanup_module(void)
{
platform_device_unregister(sp5100_tco_platform_device);
platform_driver_unregister(&sp5100_tco_driver);
- pr_info("SP5100 TCO Watchdog Module Unloaded\n");
+ pr_info("SP5100/SB800 TCO Watchdog Module Unloaded\n");
}
module_init(sp5100_tco_init_module);
module_exit(sp5100_tco_cleanup_module);
MODULE_AUTHOR("Priyanka Gupta");
-MODULE_DESCRIPTION("TCO timer driver for SP5100 chipset");
+MODULE_DESCRIPTION("TCO timer driver for SP5100/SB800 chipset");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index a5a16cc90a3..71594a0c14b 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -9,33 +9,57 @@
/*
* Some address definitions for the Watchdog
*/
-
#define SP5100_WDT_MEM_MAP_SIZE 0x08
#define SP5100_WDT_CONTROL(base) ((base) + 0x00) /* Watchdog Control */
#define SP5100_WDT_COUNT(base) ((base) + 0x04) /* Watchdog Count */
-#define SP5100_WDT_START_STOP_BIT 1
+#define SP5100_WDT_START_STOP_BIT (1 << 0)
#define SP5100_WDT_TRIGGER_BIT (1 << 7)
-#define SP5100_PCI_WATCHDOG_MISC_REG 0x41
-#define SP5100_PCI_WATCHDOG_DECODE_EN (1 << 3)
-
#define SP5100_PM_IOPORTS_SIZE 0x02
-/* These two IO registers are hardcoded and there doesn't seem to be a way to
+/*
+ * These two IO registers are hardcoded and there doesn't seem to be a way to
* read them from a register.
*/
+
+/* For SP5100/SB7x0 chipset */
#define SP5100_IO_PM_INDEX_REG 0xCD6
#define SP5100_IO_PM_DATA_REG 0xCD7
+#define SP5100_SB_RESOURCE_MMIO_BASE 0x9C
+
#define SP5100_PM_WATCHDOG_CONTROL 0x69
-#define SP5100_PM_WATCHDOG_BASE0 0x6C
-#define SP5100_PM_WATCHDOG_BASE1 0x6D
-#define SP5100_PM_WATCHDOG_BASE2 0x6E
-#define SP5100_PM_WATCHDOG_BASE3 0x6F
+#define SP5100_PM_WATCHDOG_BASE 0x6C
#define SP5100_PM_WATCHDOG_FIRED (1 << 1)
#define SP5100_PM_WATCHDOG_ACTION_RESET (1 << 2)
-#define SP5100_PM_WATCHDOG_DISABLE 1
+#define SP5100_PCI_WATCHDOG_MISC_REG 0x41
+#define SP5100_PCI_WATCHDOG_DECODE_EN (1 << 3)
+
+#define SP5100_PM_WATCHDOG_DISABLE (1 << 0)
#define SP5100_PM_WATCHDOG_SECOND_RES (3 << 1)
+
+#define SP5100_DEVNAME "SP5100 TCO"
+
+
+/* For SB8x0(or later) chipset */
+#define SB800_IO_PM_INDEX_REG 0xCD6
+#define SB800_IO_PM_DATA_REG 0xCD7
+
+#define SB800_PM_ACPI_MMIO_EN 0x24
+#define SB800_PM_WATCHDOG_CONTROL 0x48
+#define SB800_PM_WATCHDOG_BASE 0x48
+#define SB800_PM_WATCHDOG_CONFIG 0x4C
+
+#define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0)
+#define SB800_PM_WATCHDOG_DISABLE (1 << 2)
+#define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
+#define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
+#define SB800_ACPI_MMIO_SEL (1 << 2)
+
+
+#define SB800_PM_WDT_MMIO_OFFSET 0xB00
+
+#define SB800_DEVNAME "SB800 TCO"
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 76c73cbf004..8872642505c 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -130,16 +130,10 @@ static int wdt_config(struct watchdog_device *wdd, bool ping)
int ret;
if (!ping) {
- ret = clk_prepare(wdt->clk);
- if (ret) {
- dev_err(&wdt->adev->dev, "clock prepare fail");
- return ret;
- }
- ret = clk_enable(wdt->clk);
+ ret = clk_prepare_enable(wdt->clk);
if (ret) {
dev_err(&wdt->adev->dev, "clock enable fail");
- clk_unprepare(wdt->clk);
return ret;
}
}
@@ -190,8 +184,7 @@ static int wdt_disable(struct watchdog_device *wdd)
readl_relaxed(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
- clk_disable(wdt->clk);
- clk_unprepare(wdt->clk);
+ clk_disable_unprepare(wdt->clk);
return 0;
}
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index 9f54b1da718..0f03106f751 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -22,26 +22,12 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
#include <linux/i2c/twl.h>
#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3
-#define TWL4030_WDT_STATE_OPEN 0x1
-#define TWL4030_WDT_STATE_ACTIVE 0x8
-
-static struct platform_device *twl4030_wdt_dev;
-
-struct twl4030_wdt {
- struct miscdevice miscdev;
- int timer_margin;
- unsigned long state;
-};
-
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
@@ -49,175 +35,75 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
static int twl4030_wdt_write(unsigned char val)
{
- return twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
+ return twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, val,
TWL4030_WATCHDOG_CFG_REG_OFFS);
}
-static int twl4030_wdt_enable(struct twl4030_wdt *wdt)
+static int twl4030_wdt_start(struct watchdog_device *wdt)
{
- return twl4030_wdt_write(wdt->timer_margin + 1);
+ return twl4030_wdt_write(wdt->timeout + 1);
}
-static int twl4030_wdt_disable(struct twl4030_wdt *wdt)
+static int twl4030_wdt_stop(struct watchdog_device *wdt)
{
return twl4030_wdt_write(0);
}
-static int twl4030_wdt_set_timeout(struct twl4030_wdt *wdt, int timeout)
-{
- if (timeout < 0 || timeout > 30) {
- dev_warn(wdt->miscdev.parent,
- "Timeout can only be in the range [0-30] seconds");
- return -EINVAL;
- }
- wdt->timer_margin = timeout;
- return twl4030_wdt_enable(wdt);
-}
-
-static ssize_t twl4030_wdt_write_fop(struct file *file,
- const char __user *data, size_t len, loff_t *ppos)
-{
- struct twl4030_wdt *wdt = file->private_data;
-
- if (len)
- twl4030_wdt_enable(wdt);
-
- return len;
-}
-
-static long twl4030_wdt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static int twl4030_wdt_set_timeout(struct watchdog_device *wdt,
+ unsigned int timeout)
{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_margin;
- struct twl4030_wdt *wdt = file->private_data;
-
- static const struct watchdog_info twl4030_wd_ident = {
- .identity = "TWL4030 Watchdog",
- .options = WDIOF_SETTIMEOUT,
- .firmware_version = 0,
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &twl4030_wd_ident,
- sizeof(twl4030_wd_ident)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_KEEPALIVE:
- twl4030_wdt_enable(wdt);
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_margin, p))
- return -EFAULT;
- if (twl4030_wdt_set_timeout(wdt, new_margin))
- return -EINVAL;
- return put_user(wdt->timer_margin, p);
-
- case WDIOC_GETTIMEOUT:
- return put_user(wdt->timer_margin, p);
-
- default:
- return -ENOTTY;
- }
-
+ wdt->timeout = timeout;
return 0;
}
-static int twl4030_wdt_open(struct inode *inode, struct file *file)
-{
- struct twl4030_wdt *wdt = platform_get_drvdata(twl4030_wdt_dev);
-
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &wdt->state))
- return -EBUSY;
-
- wdt->state |= TWL4030_WDT_STATE_ACTIVE;
- file->private_data = (void *) wdt;
-
- twl4030_wdt_enable(wdt);
- return nonseekable_open(inode, file);
-}
-
-static int twl4030_wdt_release(struct inode *inode, struct file *file)
-{
- struct twl4030_wdt *wdt = file->private_data;
- if (nowayout) {
- dev_alert(wdt->miscdev.parent,
- "Unexpected close, watchdog still running!\n");
- twl4030_wdt_enable(wdt);
- } else {
- if (twl4030_wdt_disable(wdt))
- return -EFAULT;
- wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
- }
-
- clear_bit(0, &wdt->state);
- return 0;
-}
+static const struct watchdog_info twl4030_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "TWL4030 Watchdog",
+};
-static const struct file_operations twl4030_wdt_fops = {
+static const struct watchdog_ops twl4030_wdt_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = twl4030_wdt_open,
- .release = twl4030_wdt_release,
- .unlocked_ioctl = twl4030_wdt_ioctl,
- .write = twl4030_wdt_write_fop,
+ .start = twl4030_wdt_start,
+ .stop = twl4030_wdt_stop,
+ .set_timeout = twl4030_wdt_set_timeout,
};
static int twl4030_wdt_probe(struct platform_device *pdev)
{
int ret = 0;
- struct twl4030_wdt *wdt;
+ struct watchdog_device *wdt;
- wdt = kzalloc(sizeof(struct twl4030_wdt), GFP_KERNEL);
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
- wdt->state = 0;
- wdt->timer_margin = 30;
- wdt->miscdev.parent = &pdev->dev;
- wdt->miscdev.fops = &twl4030_wdt_fops;
- wdt->miscdev.minor = WATCHDOG_MINOR;
- wdt->miscdev.name = "watchdog";
+ wdt->info = &twl4030_wdt_info;
+ wdt->ops = &twl4030_wdt_ops;
+ wdt->status = 0;
+ wdt->timeout = 30;
+ wdt->min_timeout = 1;
+ wdt->max_timeout = 30;
+ watchdog_set_nowayout(wdt, nowayout);
platform_set_drvdata(pdev, wdt);
- twl4030_wdt_dev = pdev;
-
- twl4030_wdt_disable(wdt);
+ twl4030_wdt_stop(wdt);
- ret = misc_register(&wdt->miscdev);
+ ret = watchdog_register_device(wdt);
if (ret) {
- dev_err(wdt->miscdev.parent,
- "Failed to register misc device\n");
platform_set_drvdata(pdev, NULL);
- kfree(wdt);
- twl4030_wdt_dev = NULL;
return ret;
}
+
return 0;
}
static int twl4030_wdt_remove(struct platform_device *pdev)
{
- struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
-
- if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
- if (twl4030_wdt_disable(wdt))
- return -EFAULT;
-
- wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
- misc_deregister(&wdt->miscdev);
+ struct watchdog_device *wdt = platform_get_drvdata(pdev);
+ watchdog_unregister_device(wdt);
platform_set_drvdata(pdev, NULL);
- kfree(wdt);
- twl4030_wdt_dev = NULL;
return 0;
}
@@ -225,18 +111,18 @@ static int twl4030_wdt_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int twl4030_wdt_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
- if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
- return twl4030_wdt_disable(wdt);
+ struct watchdog_device *wdt = platform_get_drvdata(pdev);
+ if (watchdog_active(wdt))
+ return twl4030_wdt_stop(wdt);
return 0;
}
static int twl4030_wdt_resume(struct platform_device *pdev)
{
- struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
- if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
- return twl4030_wdt_enable(wdt);
+ struct watchdog_device *wdt = platform_get_drvdata(pdev);
+ if (watchdog_active(wdt))
+ return twl4030_wdt_start(wdt);
return 0;
}
@@ -245,14 +131,21 @@ static int twl4030_wdt_resume(struct platform_device *pdev)
#define twl4030_wdt_resume NULL
#endif
+static const struct of_device_id twl_wdt_of_match[] = {
+ { .compatible = "ti,twl4030-wdt", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_wdt_of_match);
+
static struct platform_driver twl4030_wdt_driver = {
.probe = twl4030_wdt_probe,
.remove = twl4030_wdt_remove,
.suspend = twl4030_wdt_suspend,
.resume = twl4030_wdt_resume,
.driver = {
- .owner = THIS_MODULE,
- .name = "twl4030_wdt",
+ .owner = THIS_MODULE,
+ .name = "twl4030_wdt",
+ .of_match_table = twl_wdt_of_match,
},
};
@@ -260,6 +153,5 @@ module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("platform:twl4030_wdt");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 126d8ce591c..cabfa97f467 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -206,4 +206,7 @@ config XEN_MCE_LOG
Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools
+config XEN_HAVE_PVMMU
+ bool
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 74354708c6c..fb213cf81a7 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,9 +1,9 @@
ifneq ($(CONFIG_ARM),y)
-obj-y += manage.o balloon.o
+obj-y += manage.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
obj-$(CONFIG_X86) += fallback.o
-obj-y += grant-table.o features.o events.o
+obj-y += grant-table.o features.o events.o balloon.o
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
@@ -11,7 +11,8 @@ CFLAGS_features.o := $(nostackp)
dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
-dom0-$(CONFIG_ACPI) += acpi.o
+dom0-$(CONFIG_ACPI) += acpi.o $(xen-pad-y)
+xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index d6886d90ccf..a56776dbe09 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -359,6 +359,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
set_phys_to_machine(pfn, frame_list[i]);
+#ifdef CONFIG_XEN_HAVE_PVMMU
/* Link back into the page tables if not highmem. */
if (xen_pv_domain() && !PageHighMem(page)) {
int ret;
@@ -368,6 +369,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
0);
BUG_ON(ret);
}
+#endif
/* Relinquish the page back to the allocator. */
ClearPageReserved(page);
@@ -416,13 +418,14 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
scrub_page(page);
+#ifdef CONFIG_XEN_HAVE_PVMMU
if (xen_pv_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
-
+#endif
}
/* Ensure that ballooned highmem pages don't have kmaps. */
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 71f5c459b08..0bbbccbb1f1 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -33,11 +33,14 @@
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include <xen/balloon.h>
#include "privcmd.h"
MODULE_LICENSE("GPL");
+#define PRIV_VMA_LOCKED ((void *)1)
+
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
@@ -178,7 +181,7 @@ static int mmap_mfn_range(void *data, void *state)
msg->va & PAGE_MASK,
msg->mfn, msg->npages,
vma->vm_page_prot,
- st->domain);
+ st->domain, NULL);
if (rc < 0)
return rc;
@@ -199,6 +202,10 @@ static long privcmd_ioctl_mmap(void __user *udata)
if (!xen_initial_domain())
return -EPERM;
+ /* We only support privcmd_ioctl_mmap_batch for auto translated. */
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -ENOSYS;
+
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT;
@@ -246,6 +253,7 @@ struct mmap_batch_state {
domid_t domain;
unsigned long va;
struct vm_area_struct *vma;
+ int index;
/* A tristate:
* 0 for no errors
* 1 if at least one error has happened (and no
@@ -260,14 +268,24 @@ struct mmap_batch_state {
xen_pfn_t __user *user_mfn;
};
+/* auto translated dom0 note: if domU being created is PV, then mfn is
+ * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
+ */
static int mmap_batch_fn(void *data, void *state)
{
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state;
+ struct vm_area_struct *vma = st->vma;
+ struct page **pages = vma->vm_private_data;
+ struct page *cur_page = NULL;
int ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ cur_page = pages[st->index++];
+
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
- st->vma->vm_page_prot, st->domain);
+ st->vma->vm_page_prot, st->domain,
+ &cur_page);
/* Store error code for second pass. */
*(st->err++) = ret;
@@ -303,6 +321,32 @@ static int mmap_return_errors_v1(void *data, void *state)
return __put_user(*mfnp, st->user_mfn++);
}
+/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
+ * the vma with the page info to use later.
+ * Returns: 0 if success, otherwise -errno
+ */
+static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
+{
+ int rc;
+ struct page **pages;
+
+ pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
+ if (pages == NULL)
+ return -ENOMEM;
+
+ rc = alloc_xenballooned_pages(numpgs, pages, 0);
+ if (rc != 0) {
+ pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
+ numpgs, rc);
+ kfree(pages);
+ return -ENOMEM;
+ }
+ BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
+ vma->vm_private_data = pages;
+
+ return 0;
+}
+
static struct vm_operations_struct privcmd_vm_ops;
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
@@ -370,10 +414,18 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
ret = -EINVAL;
goto out;
}
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ ret = alloc_empty_pages(vma, m.num);
+ if (ret < 0) {
+ up_write(&mm->mmap_sem);
+ goto out;
+ }
+ }
state.domain = m.dom;
state.vma = vma;
state.va = m.addr;
+ state.index = 0;
state.global_error = 0;
state.err = err_array;
@@ -442,6 +494,19 @@ static long privcmd_ioctl(struct file *file,
return ret;
}
+static void privcmd_close(struct vm_area_struct *vma)
+{
+ struct page **pages = vma->vm_private_data;
+ int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
+ return;
+
+ xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ free_xenballooned_pages(numpgs, pages);
+ kfree(pages);
+}
+
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
@@ -452,6 +517,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
static struct vm_operations_struct privcmd_vm_ops = {
+ .close = privcmd_close,
.fault = privcmd_fault
};
@@ -469,7 +535,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
- return (xchg(&vma->vm_private_data, (void *)1) == NULL);
+ return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
}
const struct file_operations xen_privcmd_fops = {
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 58db6df866e..af47e759446 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -338,9 +338,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- phys_addr_t phys = page_to_phys(page) + offset;
+ phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(phys);
- void *map;
BUG_ON(dir == DMA_NONE);
/*
@@ -356,10 +355,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* Oh well, have to allocate and map a bounce buffer.
*/
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
- if (!map)
+ if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
- dev_addr = xen_virt_to_bus(map);
+ dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -389,7 +388,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
- swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
return;
}
@@ -434,8 +433,7 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
- swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
- target);
+ swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
return;
}
@@ -494,11 +492,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
if (swiotlb_force ||
!dma_capable(hwdev, dev_addr, sg->length) ||
range_straddles_page_boundary(paddr, sg->length)) {
- void *map = swiotlb_tbl_map_single(hwdev,
- start_dma_addr,
- sg_phys(sg),
- sg->length, dir);
- if (!map) {
+ phys_addr_t map = swiotlb_tbl_map_single(hwdev,
+ start_dma_addr,
+ sg_phys(sg),
+ sg->length,
+ dir);
+ if (map == SWIOTLB_MAP_ERROR) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
@@ -506,7 +505,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sgl[0].dma_length = 0;
return DMA_ERROR_CODE;
}
- sg->dma_address = xen_virt_to_bus(map);
+ sg->dma_address = xen_phys_to_bus(map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
new file mode 100644
index 00000000000..da39191e727
--- /dev/null
+++ b/drivers/xen/xen-acpi-pad.c
@@ -0,0 +1,182 @@
+/*
+ * xen-acpi-pad.c - Xen pad interface
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ * Author: Liu, Jinsong <jinsong.liu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/version.h>
+#include <xen/xen-ops.h>
+
+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
+#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
+#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
+static DEFINE_MUTEX(xen_cpu_lock);
+
+static int xen_acpi_pad_idle_cpus(unsigned int idle_nums)
+{
+ struct xen_platform_op op;
+
+ op.cmd = XENPF_core_parking;
+ op.u.core_parking.type = XEN_CORE_PARKING_SET;
+ op.u.core_parking.idle_nums = idle_nums;
+
+ return HYPERVISOR_dom0_op(&op);
+}
+
+static int xen_acpi_pad_idle_cpus_num(void)
+{
+ struct xen_platform_op op;
+
+ op.cmd = XENPF_core_parking;
+ op.u.core_parking.type = XEN_CORE_PARKING_GET;
+
+ return HYPERVISOR_dom0_op(&op)
+ ?: op.u.core_parking.idle_nums;
+}
+
+/*
+ * Query firmware how many CPUs should be idle
+ * return -1 on failure
+ */
+static int acpi_pad_pur(acpi_handle handle)
+{
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *package;
+ int num = -1;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
+ return num;
+
+ if (!buffer.length || !buffer.pointer)
+ return num;
+
+ package = buffer.pointer;
+
+ if (package->type == ACPI_TYPE_PACKAGE &&
+ package->package.count == 2 &&
+ package->package.elements[0].integer.value == 1) /* rev 1 */
+ num = package->package.elements[1].integer.value;
+
+ kfree(buffer.pointer);
+ return num;
+}
+
+/* Notify firmware how many CPUs are idle */
+static void acpi_pad_ost(acpi_handle handle, int stat,
+ uint32_t idle_nums)
+{
+ union acpi_object params[3] = {
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_BUFFER,},
+ };
+ struct acpi_object_list arg_list = {3, params};
+
+ params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
+ params[1].integer.value = stat;
+ params[2].buffer.length = 4;
+ params[2].buffer.pointer = (void *)&idle_nums;
+ acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+}
+
+static void acpi_pad_handle_notify(acpi_handle handle)
+{
+ int idle_nums;
+
+ mutex_lock(&xen_cpu_lock);
+ idle_nums = acpi_pad_pur(handle);
+ if (idle_nums < 0) {
+ mutex_unlock(&xen_cpu_lock);
+ return;
+ }
+
+ idle_nums = xen_acpi_pad_idle_cpus(idle_nums)
+ ?: xen_acpi_pad_idle_cpus_num();
+ if (idle_nums >= 0)
+ acpi_pad_ost(handle, 0, idle_nums);
+ mutex_unlock(&xen_cpu_lock);
+}
+
+static void acpi_pad_notify(acpi_handle handle, u32 event,
+ void *data)
+{
+ switch (event) {
+ case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
+ acpi_pad_handle_notify(handle);
+ break;
+ default:
+ pr_warn("Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+static int acpi_pad_add(struct acpi_device *device)
+{
+ acpi_status status;
+
+ strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
+
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int acpi_pad_remove(struct acpi_device *device,
+ int type)
+{
+ mutex_lock(&xen_cpu_lock);
+ xen_acpi_pad_idle_cpus(0);
+ mutex_unlock(&xen_cpu_lock);
+
+ acpi_remove_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY, acpi_pad_notify);
+ return 0;
+}
+
+static const struct acpi_device_id pad_device_ids[] = {
+ {"ACPI000C", 0},
+ {"", 0},
+};
+
+static struct acpi_driver acpi_pad_driver = {
+ .name = "processor_aggregator",
+ .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
+ .ids = pad_device_ids,
+ .ops = {
+ .add = acpi_pad_add,
+ .remove = acpi_pad_remove,
+ },
+};
+
+static int __init xen_acpi_pad_init(void)
+{
+ /* Only DOM0 is responsible for Xen acpi pad */
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ /* Only Xen4.2 or later support Xen acpi pad */
+ if (!xen_running_on_version_or_later(4, 2))
+ return -ENODEV;
+
+ return acpi_bus_register_driver(&acpi_pad_driver);
+}
+subsys_initcall(xen_acpi_pad_init);
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 961d664e2d2..cd50d251998 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -142,7 +142,8 @@ static struct pcistub_device *pcistub_device_find(int domain, int bus,
if (psdev->dev != NULL
&& domain == pci_domain_nr(psdev->dev->bus)
&& bus == psdev->dev->bus->number
- && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
+ && slot == PCI_SLOT(psdev->dev->devfn)
+ && func == PCI_FUNC(psdev->dev->devfn)) {
pcistub_device_get(psdev);
goto out;
}
@@ -191,7 +192,8 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
if (psdev->dev != NULL
&& domain == pci_domain_nr(psdev->dev->bus)
&& bus == psdev->dev->bus->number
- && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
+ && slot == PCI_SLOT(psdev->dev->devfn)
+ && func == PCI_FUNC(psdev->dev->devfn)) {
found_dev = pcistub_device_get_pci_dev(pdev, psdev);
break;
}
@@ -360,7 +362,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
if (!dev_data->pci_saved_state)
dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
else {
- dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
+ dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
__pci_reset_function_locked(dev);
pci_restore_state(dev);
}
@@ -897,42 +899,35 @@ static struct pci_driver xen_pcibk_pci_driver = {
static inline int str_to_slot(const char *buf, int *domain, int *bus,
int *slot, int *func)
{
- int err;
- char wc = '*';
+ int parsed = 0;
- err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
- switch (err) {
+ switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
+ &parsed)) {
case 3:
*func = -1;
- err = sscanf(buf, " %x:%x:%x.%c", domain, bus, slot, &wc);
+ sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
break;
case 2:
*slot = *func = -1;
- err = sscanf(buf, " %x:%x:*.%c", domain, bus, &wc);
- if (err >= 2)
- ++err;
+ sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
break;
}
- if (err == 4 && wc == '*')
+ if (parsed && !buf[parsed])
return 0;
- else if (err < 0)
- return -EINVAL;
/* try again without domain */
*domain = 0;
- wc = '*';
- err = sscanf(buf, " %x:%x.%x", bus, slot, func);
- switch (err) {
+ switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
case 2:
*func = -1;
- err = sscanf(buf, " %x:%x.%c", bus, slot, &wc);
+ sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
break;
case 1:
*slot = *func = -1;
- err = sscanf(buf, " %x:*.%c", bus, &wc) + 1;
+ sscanf(buf, " %x:*.* %n", bus, &parsed);
break;
}
- if (err == 3 && wc == '*')
+ if (parsed && !buf[parsed])
return 0;
return -EINVAL;
@@ -941,13 +936,20 @@ static inline int str_to_slot(const char *buf, int *domain, int *bus,
static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
*slot, int *func, int *reg, int *size, int *mask)
{
- int err;
+ int parsed = 0;
- err =
- sscanf(buf, " %04x:%02x:%02x.%d-%08x:%1x:%08x", domain, bus, slot,
- func, reg, size, mask);
- if (err == 7)
+ sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
+ reg, size, mask, &parsed);
+ if (parsed && !buf[parsed])
return 0;
+
+ /* try again without domain */
+ *domain = 0;
+ sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
+ mask, &parsed);
+ if (parsed && !buf[parsed])
+ return 0;
+
return -EINVAL;
}
@@ -955,7 +957,7 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
{
struct pcistub_device_id *pci_dev_id;
unsigned long flags;
- int rc = 0;
+ int rc = 0, devfn = PCI_DEVFN(slot, func);
if (slot < 0) {
for (slot = 0; !rc && slot < 32; ++slot)
@@ -969,13 +971,24 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
return rc;
}
+ if ((
+#if !defined(MODULE) /* pci_domains_supported is not being exported */ \
+ || !defined(CONFIG_PCI_DOMAINS)
+ !pci_domains_supported ? domain :
+#endif
+ domain < 0 || domain > 0xffff)
+ || bus < 0 || bus > 0xff
+ || PCI_SLOT(devfn) != slot
+ || PCI_FUNC(devfn) != func)
+ return -EINVAL;
+
pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
if (!pci_dev_id)
return -ENOMEM;
pci_dev_id->domain = domain;
pci_dev_id->bus = bus;
- pci_dev_id->devfn = PCI_DEVFN(slot, func);
+ pci_dev_id->devfn = devfn;
pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n",
domain, bus, slot, func);
@@ -1016,14 +1029,18 @@ static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
return err;
}
-static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
- int size, int mask)
+static int pcistub_reg_add(int domain, int bus, int slot, int func,
+ unsigned int reg, unsigned int size,
+ unsigned int mask)
{
int err = 0;
struct pcistub_device *psdev;
struct pci_dev *dev;
struct config_field *field;
+ if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
+ return -EINVAL;
+
psdev = pcistub_device_find(domain, bus, slot, func);
if (!psdev) {
err = -ENODEV;
@@ -1254,13 +1271,11 @@ static ssize_t permissive_add(struct device_driver *drv, const char *buf,
int err;
struct pcistub_device *psdev;
struct xen_pcibk_dev_data *dev_data;
+
err = str_to_slot(buf, &domain, &bus, &slot, &func);
if (err)
goto out;
- if (slot < 0 || func < 0) {
- err = -EINVAL;
- goto out;
- }
+
psdev = pcistub_device_find(domain, bus, slot, func);
if (!psdev) {
err = -ENODEV;
@@ -1339,8 +1354,6 @@ static int __init pcistub_init(void)
if (pci_devs_to_hide && *pci_devs_to_hide) {
do {
- char wc = '*';
-
parsed = 0;
err = sscanf(pci_devs_to_hide + pos,
@@ -1349,51 +1362,48 @@ static int __init pcistub_init(void)
switch (err) {
case 3:
func = -1;
- err = sscanf(pci_devs_to_hide + pos,
- " (%x:%x:%x.%c) %n",
- &domain, &bus, &slot, &wc,
- &parsed);
+ sscanf(pci_devs_to_hide + pos,
+ " (%x:%x:%x.*) %n",
+ &domain, &bus, &slot, &parsed);
break;
case 2:
slot = func = -1;
- err = sscanf(pci_devs_to_hide + pos,
- " (%x:%x:*.%c) %n",
- &domain, &bus, &wc, &parsed) + 1;
+ sscanf(pci_devs_to_hide + pos,
+ " (%x:%x:*.*) %n",
+ &domain, &bus, &parsed);
break;
}
- if (err != 4 || wc != '*') {
+ if (!parsed) {
domain = 0;
- wc = '*';
err = sscanf(pci_devs_to_hide + pos,
" (%x:%x.%x) %n",
&bus, &slot, &func, &parsed);
switch (err) {
case 2:
func = -1;
- err = sscanf(pci_devs_to_hide + pos,
- " (%x:%x.%c) %n",
- &bus, &slot, &wc,
- &parsed);
+ sscanf(pci_devs_to_hide + pos,
+ " (%x:%x.*) %n",
+ &bus, &slot, &parsed);
break;
case 1:
slot = func = -1;
- err = sscanf(pci_devs_to_hide + pos,
- " (%x:*.%c) %n",
- &bus, &wc, &parsed) + 1;
+ sscanf(pci_devs_to_hide + pos,
+ " (%x:*.*) %n",
+ &bus, &parsed);
break;
}
- if (err != 3 || wc != '*')
- goto parse_error;
}
+ if (parsed <= 0)
+ goto parse_error;
+
err = pcistub_device_id_add(domain, bus, slot, func);
if (err)
goto out;
- /* if parsed<=0, we've reached the end of the string */
pos += parsed;
- } while (parsed > 0 && pci_devs_to_hide[pos]);
+ } while (pci_devs_to_hide[pos]);
}
/* If we're the first PCI Device Driver to register, we're the
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index acedeabe589..88e677b0de7 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -48,7 +48,6 @@
#include <xen/xenbus.h>
#include <xen/xen.h>
#include "xenbus_comms.h"
-#include <asm/xen/hypervisor.h>
struct xs_stored_msg {
struct list_head list;
diff --git a/fs/Kconfig b/fs/Kconfig
index f95ae3a027f..cfe512fd1ca 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -28,8 +28,8 @@ config FS_MBCACHE
tristate
default y if EXT2_FS=y && EXT2_FS_XATTR
default y if EXT3_FS=y && EXT3_FS_XATTR
- default y if EXT4_FS=y && EXT4_FS_XATTR
- default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS_XATTR
+ default y if EXT4_FS=y
+ default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS
source "fs/reiserfs/Kconfig"
source "fs/jfs/Kconfig"
@@ -220,6 +220,7 @@ source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
+source "fs/f2fs/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index 1d7af79288a..9d53192236f 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_DEBUG_FS) += debugfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
+obj-$(CONFIG_F2FS_FS) += f2fs/
obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index e9bad5093a3..5f95d1ed9c6 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -45,6 +45,14 @@ static int adfs_readpage(struct file *file, struct page *page)
return block_read_full_page(page, adfs_get_block);
}
+static void adfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size)
+ truncate_pagecache(inode, to, inode->i_size);
+}
+
static int adfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -55,11 +63,8 @@ static int adfs_write_begin(struct file *file, struct address_space *mapping,
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ adfs_write_failed(mapping, pos + len);
return ret;
}
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 2f4c935cb32..af3261b7810 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -39,7 +39,6 @@ const struct file_operations affs_file_operations = {
};
const struct inode_operations affs_file_inode_operations = {
- .truncate = affs_truncate,
.setattr = affs_notify_change,
};
@@ -402,6 +401,16 @@ static int affs_readpage(struct file *file, struct page *page)
return block_read_full_page(page, affs_get_block);
}
+static void affs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ affs_truncate(inode);
+ }
+}
+
static int affs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -412,11 +421,8 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
affs_get_block,
&AFFS_I(mapping->host)->mmu_private);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ affs_write_failed(mapping, pos + len);
return ret;
}
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 15c48426822..0e092d08680 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -237,9 +237,12 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
+ error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
+
+ truncate_setsize(inode, attr->ia_size);
+ affs_truncate(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/attr.c b/fs/attr.c
index cce7df53b69..1449adb14ef 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -49,14 +49,15 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
/* Make sure a caller can chown. */
if ((ia_valid & ATTR_UID) &&
(!uid_eq(current_fsuid(), inode->i_uid) ||
- !uid_eq(attr->ia_uid, inode->i_uid)) && !capable(CAP_CHOWN))
+ !uid_eq(attr->ia_uid, inode->i_uid)) &&
+ !inode_capable(inode, CAP_CHOWN))
return -EPERM;
/* Make sure caller can chgrp. */
if ((ia_valid & ATTR_GID) &&
(!uid_eq(current_fsuid(), inode->i_uid) ||
(!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) &&
- !capable(CAP_CHOWN))
+ !inode_capable(inode, CAP_CHOWN))
return -EPERM;
/* Make sure a caller can chmod. */
@@ -65,7 +66,8 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
return -EPERM;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
- inode->i_gid) && !capable(CAP_FSETID))
+ inode->i_gid) &&
+ !inode_capable(inode, CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
@@ -157,7 +159,8 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ if (!in_group_p(inode->i_gid) &&
+ !inode_capable(inode, CAP_FSETID))
mode &= ~S_ISGID;
inode->i_mode = mode;
}
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 908e1845541..b785e770795 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -74,8 +74,8 @@ struct autofs_info {
unsigned long last_used;
atomic_t count;
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
};
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
@@ -89,8 +89,8 @@ struct autofs_wait_queue {
struct qstr name;
u32 dev;
u64 ino;
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
pid_t pid;
pid_t tgid;
/* This is for status reporting upon return */
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index a16214109d3..9f68a37bb2b 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -437,8 +437,8 @@ static int autofs_dev_ioctl_requester(struct file *fp,
err = 0;
autofs4_expire_wait(path.dentry);
spin_lock(&sbi->fs_lock);
- param->requester.uid = ino->uid;
- param->requester.gid = ino->gid;
+ param->requester.uid = from_kuid_munged(current_user_ns(), ino->uid);
+ param->requester.gid = from_kgid_munged(current_user_ns(), ino->gid);
spin_unlock(&sbi->fs_lock);
}
path_put(&path);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 842d00048a6..01443ce43ee 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -548,15 +548,6 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_EXPIRING;
- spin_lock(&dentry->d_lock);
- if (!ret) {
- if ((IS_ROOT(dentry) ||
- (autofs_type_indirect(sbi->type) &&
- IS_ROOT(dentry->d_parent))) &&
- !(dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
- __managed_dentry_set_automount(dentry);
- }
- spin_unlock(&dentry->d_lock);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 8a4fed8ead3..b104726e2d0 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -36,8 +36,8 @@ struct autofs_info *autofs4_new_ino(struct autofs_sb_info *sbi)
void autofs4_clean_ino(struct autofs_info *ino)
{
- ino->uid = 0;
- ino->gid = 0;
+ ino->uid = GLOBAL_ROOT_UID;
+ ino->gid = GLOBAL_ROOT_GID;
ino->last_used = jiffies;
}
@@ -79,10 +79,12 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root)
return 0;
seq_printf(m, ",fd=%d", sbi->pipefd);
- if (root_inode->i_uid != 0)
- seq_printf(m, ",uid=%u", root_inode->i_uid);
- if (root_inode->i_gid != 0)
- seq_printf(m, ",gid=%u", root_inode->i_gid);
+ if (!uid_eq(root_inode->i_uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, root_inode->i_uid));
+ if (!gid_eq(root_inode->i_gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, root_inode->i_gid));
seq_printf(m, ",pgrp=%d", sbi->oz_pgrp);
seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ);
seq_printf(m, ",minproto=%d", sbi->min_proto);
@@ -126,7 +128,7 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
-static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid,
+static int parse_options(char *options, int *pipefd, kuid_t *uid, kgid_t *gid,
pid_t *pgrp, unsigned int *type, int *minproto, int *maxproto)
{
char *p;
@@ -159,12 +161,16 @@ static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid,
case Opt_uid:
if (match_int(args, &option))
return 1;
- *uid = option;
+ *uid = make_kuid(current_user_ns(), option);
+ if (!uid_valid(*uid))
+ return 1;
break;
case Opt_gid:
if (match_int(args, &option))
return 1;
- *gid = option;
+ *gid = make_kgid(current_user_ns(), option);
+ if (!gid_valid(*gid))
+ return 1;
break;
case Opt_pgrp:
if (match_int(args, &option))
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 91b11650722..c93447604da 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -124,13 +124,10 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
* it.
*/
spin_lock(&sbi->lookup_lock);
- spin_lock(&dentry->d_lock);
- if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
+ if (!d_mountpoint(dentry) && simple_empty(dentry)) {
spin_unlock(&sbi->lookup_lock);
return -ENOENT;
}
- spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);
out:
@@ -355,7 +352,6 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
status = autofs4_mount_wait(dentry);
if (status)
return ERR_PTR(status);
- spin_lock(&sbi->fs_lock);
goto done;
}
@@ -364,8 +360,11 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
* having d_mountpoint() true, so there's no need to call back
* to the daemon.
*/
- if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))
+ if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+ spin_unlock(&sbi->fs_lock);
goto done;
+ }
+
if (!d_mountpoint(dentry)) {
/*
* It's possible that user space hasn't removed directories
@@ -379,15 +378,13 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
* require user space behave.
*/
if (sbi->version > 4) {
- if (have_submounts(dentry))
+ if (have_submounts(dentry)) {
+ spin_unlock(&sbi->fs_lock);
goto done;
+ }
} else {
- spin_lock(&dentry->d_lock);
- if (!list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
+ if (!simple_empty(dentry))
goto done;
- }
- spin_unlock(&dentry->d_lock);
}
ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
@@ -399,28 +396,8 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
return ERR_PTR(status);
}
}
-done:
- if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
- /*
- * Any needed mounting has been completed and the path
- * updated so clear DCACHE_NEED_AUTOMOUNT so we don't
- * call ->d_automount() on rootless multi-mounts since
- * it can lead to an incorrect ELOOP error return.
- *
- * Only clear DMANAGED_AUTOMOUNT for rootless multi-mounts and
- * symlinks as in all other cases the dentry will be covered by
- * an actual mount so ->d_automount() won't be called during
- * the follow.
- */
- spin_lock(&dentry->d_lock);
- if ((!d_mountpoint(dentry) &&
- !list_empty(&dentry->d_subdirs)) ||
- (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
- __managed_dentry_clear_automount(dentry);
- spin_unlock(&dentry->d_lock);
- }
spin_unlock(&sbi->fs_lock);
-
+done:
/* Mount succeeded, check if we ended up with a new dentry */
dentry = autofs4_mountpoint_changed(path);
if (!dentry)
@@ -432,6 +409,8 @@ done:
int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ int status;
DPRINTK("dentry=%p %.*s",
dentry, dentry->d_name.len, dentry->d_name.name);
@@ -456,7 +435,32 @@ int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
* This dentry may be under construction so wait on mount
* completion.
*/
- return autofs4_mount_wait(dentry);
+ status = autofs4_mount_wait(dentry);
+ if (status)
+ return status;
+
+ spin_lock(&sbi->fs_lock);
+ /*
+ * If the dentry has been selected for expire while we slept
+ * on the lock then it might go away. We'll deal with that in
+ * ->d_automount() and wait on a new mount if the expire
+ * succeeds or return here if it doesn't (since there's no
+ * mount to follow with a rootless multi-mount).
+ */
+ if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+ /*
+ * Any needed mounting has been completed and the path
+ * updated so check if this is a rootless multi-mount so
+ * we can avoid needless calls ->d_automount() and avoid
+ * an incorrect ELOOP error return.
+ */
+ if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
+ (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+ status = -EISDIR;
+ }
+ spin_unlock(&sbi->fs_lock);
+
+ return status;
}
/* Lookups in the root directory */
@@ -599,9 +603,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
spin_lock(&sbi->lookup_lock);
__autofs4_add_expiring(dentry);
- spin_lock(&dentry->d_lock);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ d_drop(dentry);
spin_unlock(&sbi->lookup_lock);
return 0;
@@ -672,15 +674,12 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
return -EACCES;
spin_lock(&sbi->lookup_lock);
- spin_lock(&dentry->d_lock);
- if (!list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
+ if (!simple_empty(dentry)) {
spin_unlock(&sbi->lookup_lock);
return -ENOTEMPTY;
}
__autofs4_add_expiring(dentry);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ d_drop(dentry);
spin_unlock(&sbi->lookup_lock);
if (sbi->version < 5)
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index dce436e595c..03bc1d347d8 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -154,6 +154,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
case autofs_ptype_expire_direct:
{
struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
+ struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns;
pktsz = sizeof(*packet);
@@ -163,8 +164,8 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
packet->name[wq->name.len] = '\0';
packet->dev = wq->dev;
packet->ino = wq->ino;
- packet->uid = wq->uid;
- packet->gid = wq->gid;
+ packet->uid = from_kuid_munged(user_ns, wq->uid);
+ packet->gid = from_kgid_munged(user_ns, wq->gid);
packet->pid = wq->pid;
packet->tgid = wq->tgid;
break;
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index b1342ffb3cf..922ad460bff 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -16,7 +16,7 @@
#include <linux/poll.h>
-static loff_t bad_file_llseek(struct file *file, loff_t offset, int origin)
+static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence)
{
return -EIO;
}
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index f20e8a71062..ad3ea1497cc 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -161,6 +161,14 @@ static int bfs_readpage(struct file *file, struct page *page)
return block_read_full_page(page, bfs_get_block);
}
+static void bfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size)
+ truncate_pagecache(inode, to, inode->i_size);
+}
+
static int bfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -169,11 +177,8 @@ static int bfs_write_begin(struct file *file, struct address_space *mapping,
ret = block_write_begin(mapping, pos, len, flags, pagep,
bfs_get_block);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ bfs_write_failed(mapping, pos + len);
return ret;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6d7d1647a68..0c42cdbabec 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1601,8 +1601,10 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
info->thread = NULL;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
- if (psinfo == NULL)
+ if (psinfo == NULL) {
+ info->psinfo.data = NULL; /* So we don't free this wrongly */
return 0;
+ }
fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index 4e6cce57d11..037a3e2b045 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -42,7 +42,6 @@ static int load_em86(struct linux_binprm *bprm)
return -ENOEXEC;
}
- bprm->recursion_depth++; /* Well, the bang-shell is implicit... */
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index b0b70fbea06..0c8869fdd14 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -117,10 +117,6 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (!enabled)
goto _ret;
- retval = -ENOEXEC;
- if (bprm->recursion_depth > BINPRM_MAX_RECURSION)
- goto _ret;
-
/* to keep locking time low, we copy the interpreter string */
read_lock(&entries_lock);
fmt = check_file(bprm);
@@ -176,7 +172,10 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto _error;
bprm->argc ++;
- bprm->interp = iname; /* for binfmt_script */
+ /* Update interp in case binfmt_script needs it. */
+ retval = bprm_change_interp(iname, bprm);
+ if (retval < 0)
+ goto _error;
interp_file = open_exec (iname);
retval = PTR_ERR (interp_file);
@@ -197,8 +196,6 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (retval < 0)
goto _error;
- bprm->recursion_depth++;
-
retval = search_binary_handler(bprm);
if (retval < 0)
goto _error;
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index 8c954997e7f..5027a3e1492 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -22,15 +22,13 @@ static int load_script(struct linux_binprm *bprm)
char interp[BINPRM_BUF_SIZE];
int retval;
- if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') ||
- (bprm->recursion_depth > BINPRM_MAX_RECURSION))
+ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
/*
* This section does the #! interpretation.
* Sorta complicated, but hopefully it will work. -TYT
*/
- bprm->recursion_depth++;
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
@@ -82,7 +80,9 @@ static int load_script(struct linux_binprm *bprm)
retval = copy_strings_kernel(1, &i_name, bprm);
if (retval) return retval;
bprm->argc++;
- bprm->interp = interp;
+ retval = bprm_change_interp(interp, bprm);
+ if (retval < 0)
+ return retval;
/*
* OK, now restart the process with the interpreter's dentry.
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ab3a456f665..172f8491a2b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -321,7 +321,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
* for a block special file file->f_path.dentry->d_inode->i_size is zero
* so we compute the size by hand (just as in block_read/write above)
*/
-static loff_t block_llseek(struct file *file, loff_t offset, int origin)
+static loff_t block_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *bd_inode = file->f_mapping->host;
loff_t size;
@@ -331,7 +331,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
size = i_size_read(bd_inode);
retval = -EINVAL;
- switch (origin) {
+ switch (whence) {
case SEEK_END:
offset += size;
break;
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index d7fcdba141a..7df3e0f0ee5 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
- reada.o backref.o ulist.o qgroup.o send.o
+ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 0c16e3dbfd5..e15d2b0d8d3 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -121,6 +121,8 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
ret = posix_acl_equiv_mode(acl, &inode->i_mode);
if (ret < 0)
return ret;
+ if (ret == 0)
+ acl = NULL;
}
ret = 0;
break;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 208d8aa5b07..04edf69be87 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -461,6 +461,7 @@ static int __merge_refs(struct list_head *head, int mode)
pos2 = n2, n2 = pos2->next) {
struct __prelim_ref *ref2;
struct __prelim_ref *xchg;
+ struct extent_inode_elem *eie;
ref2 = list_entry(pos2, struct __prelim_ref, list);
@@ -472,12 +473,20 @@ static int __merge_refs(struct list_head *head, int mode)
ref1 = ref2;
ref2 = xchg;
}
- ref1->count += ref2->count;
} else {
if (ref1->parent != ref2->parent)
continue;
- ref1->count += ref2->count;
}
+
+ eie = ref1->inode_list;
+ while (eie && eie->next)
+ eie = eie->next;
+ if (eie)
+ eie->next = ref2->inode_list;
+ else
+ ref1->inode_list = ref2->inode_list;
+ ref1->count += ref2->count;
+
list_del(&ref2->list);
kfree(ref2);
}
@@ -890,8 +899,7 @@ again:
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
list_del(&ref->list);
- if (ref->count < 0)
- WARN_ON(1);
+ WARN_ON(ref->count < 0);
if (ref->count && ref->root_id && ref->parent == 0) {
/* no parent == root of tree */
ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index ed8ca7ca5ef..2a8c242bc4f 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -39,6 +39,7 @@
#define BTRFS_INODE_HAS_ORPHAN_ITEM 5
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
#define BTRFS_INODE_NEEDS_FULL_SYNC 7
+#define BTRFS_INODE_COPY_EVERYTHING 8
/* in memory btrfs inode */
struct btrfs_inode {
@@ -90,6 +91,9 @@ struct btrfs_inode {
unsigned long runtime_flags;
+ /* Keep track of who's O_SYNC/fsycing currently */
+ atomic_t sync_writers;
+
/* full 64 bit generation number, struct vfs_inode doesn't have a big
* enough field for this.
*/
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 5a3e45db642..11d47bfb62b 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -137,7 +137,7 @@ struct btrfsic_block {
unsigned int never_written:1; /* block was added because it was
* referenced, not because it was
* written */
- unsigned int mirror_num:2; /* large enough to hold
+ unsigned int mirror_num; /* large enough to hold
* BTRFS_SUPER_MIRROR_MAX */
struct btrfsic_dev_state *dev_state;
u64 dev_bytenr; /* key, physical byte num on disk */
@@ -723,7 +723,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
}
num_copies =
- btrfs_num_copies(&state->root->fs_info->mapping_tree,
+ btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
@@ -903,7 +903,7 @@ static int btrfsic_process_superblock_dev_mirror(
}
num_copies =
- btrfs_num_copies(&state->root->fs_info->mapping_tree,
+ btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
@@ -1287,7 +1287,7 @@ static int btrfsic_create_link_to_next_block(
*next_blockp = NULL;
if (0 == *num_copiesp) {
*num_copiesp =
- btrfs_num_copies(&state->root->fs_info->mapping_tree,
+ btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
@@ -1489,7 +1489,7 @@ static int btrfsic_handle_extent_data(
chunk_len = num_bytes;
num_copies =
- btrfs_num_copies(&state->root->fs_info->mapping_tree,
+ btrfs_num_copies(state->root->fs_info,
next_bytenr, state->datablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
@@ -1582,9 +1582,21 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfs_device *device;
length = len;
- ret = btrfs_map_block(&state->root->fs_info->mapping_tree, READ,
+ ret = btrfs_map_block(state->root->fs_info, READ,
bytenr, &length, &multi, mirror_num);
+ if (ret) {
+ block_ctx_out->start = 0;
+ block_ctx_out->dev_bytenr = 0;
+ block_ctx_out->len = 0;
+ block_ctx_out->dev = NULL;
+ block_ctx_out->datav = NULL;
+ block_ctx_out->pagev = NULL;
+ block_ctx_out->mem_to_free = NULL;
+
+ return ret;
+ }
+
device = multi->stripes[0].dev;
block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev);
block_ctx_out->dev_bytenr = multi->stripes[0].physical;
@@ -1594,8 +1606,7 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
block_ctx_out->pagev = NULL;
block_ctx_out->mem_to_free = NULL;
- if (0 == ret)
- kfree(multi);
+ kfree(multi);
if (NULL == block_ctx_out->dev) {
ret = -ENXIO;
printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
@@ -2463,7 +2474,7 @@ static int btrfsic_process_written_superblock(
}
num_copies =
- btrfs_num_copies(&state->root->fs_info->mapping_tree,
+ btrfs_num_copies(state->root->fs_info,
next_bytenr, BTRFS_SUPER_INFO_SIZE);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
@@ -2960,7 +2971,7 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
struct btrfsic_block_data_ctx block_ctx;
int match = 0;
- num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
+ num_copies = btrfs_num_copies(state->root->fs_info,
bytenr, state->metablock_size);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c6467aa88be..94ab2f80e7e 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -687,7 +687,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ bio_endio(comp_bio, ret);
bio_put(comp_bio);
@@ -712,7 +713,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ bio_endio(comp_bio, ret);
bio_put(comp_bio);
return 0;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index cdfb4c49a80..eea5da7a2b9 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -38,8 +38,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_path *path, int level, int slot,
- int tree_mod_log);
+ struct btrfs_path *path, int level, int slot);
static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
@@ -776,8 +775,7 @@ tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
static noinline void
tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_disk_key *disk_key, int slot, int atomic)
+ struct extent_buffer *eb, int slot, int atomic)
{
int ret;
@@ -1361,19 +1359,16 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
u64 search_start;
int ret;
- if (trans->transaction != root->fs_info->running_transaction) {
- printk(KERN_CRIT "trans %llu running %llu\n",
+ if (trans->transaction != root->fs_info->running_transaction)
+ WARN(1, KERN_CRIT "trans %llu running %llu\n",
(unsigned long long)trans->transid,
(unsigned long long)
root->fs_info->running_transaction->transid);
- WARN_ON(1);
- }
- if (trans->transid != root->fs_info->generation) {
- printk(KERN_CRIT "trans %llu running %llu\n",
+
+ if (trans->transid != root->fs_info->generation)
+ WARN(1, KERN_CRIT "trans %llu running %llu\n",
(unsigned long long)trans->transid,
(unsigned long long)root->fs_info->generation);
- WARN_ON(1);
- }
if (!should_cow_block(trans, root, buf)) {
*cow_ret = buf;
@@ -1469,10 +1464,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
if (cache_only && parent_level != 1)
return 0;
- if (trans->transaction != root->fs_info->running_transaction)
- WARN_ON(1);
- if (trans->transid != root->fs_info->generation)
- WARN_ON(1);
+ WARN_ON(trans->transaction != root->fs_info->running_transaction);
+ WARN_ON(trans->transid != root->fs_info->generation);
parent_nritems = btrfs_header_nritems(parent);
blocksize = btrfs_level_size(root, parent_level - 1);
@@ -1827,7 +1820,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (btrfs_header_nritems(right) == 0) {
clean_tree_block(trans, root, right);
btrfs_tree_unlock(right);
- del_ptr(trans, root, path, level + 1, pslot + 1, 1);
+ del_ptr(trans, root, path, level + 1, pslot + 1);
root_sub_used(root, right->len);
btrfs_free_tree_block(trans, root, right, 0, 1);
free_extent_buffer_stale(right);
@@ -1836,7 +1829,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
struct btrfs_disk_key right_key;
btrfs_node_key(right, &right_key, 0);
tree_mod_log_set_node_key(root->fs_info, parent,
- &right_key, pslot + 1, 0);
+ pslot + 1, 0);
btrfs_set_node_key(parent, &right_key, pslot + 1);
btrfs_mark_buffer_dirty(parent);
}
@@ -1871,7 +1864,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (btrfs_header_nritems(mid) == 0) {
clean_tree_block(trans, root, mid);
btrfs_tree_unlock(mid);
- del_ptr(trans, root, path, level + 1, pslot, 1);
+ del_ptr(trans, root, path, level + 1, pslot);
root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1);
free_extent_buffer_stale(mid);
@@ -1880,7 +1873,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* update the parent key to reflect our changes */
struct btrfs_disk_key mid_key;
btrfs_node_key(mid, &mid_key, 0);
- tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
+ tree_mod_log_set_node_key(root->fs_info, parent,
pslot, 0);
btrfs_set_node_key(parent, &mid_key, pslot);
btrfs_mark_buffer_dirty(parent);
@@ -1980,7 +1973,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
orig_slot += left_nr;
btrfs_node_key(mid, &disk_key, 0);
tree_mod_log_set_node_key(root->fs_info, parent,
- &disk_key, pslot, 0);
+ pslot, 0);
btrfs_set_node_key(parent, &disk_key, pslot);
btrfs_mark_buffer_dirty(parent);
if (btrfs_header_nritems(left) > orig_slot) {
@@ -2033,7 +2026,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_node_key(right, &disk_key, 0);
tree_mod_log_set_node_key(root->fs_info, parent,
- &disk_key, pslot + 1, 0);
+ pslot + 1, 0);
btrfs_set_node_key(parent, &disk_key, pslot + 1);
btrfs_mark_buffer_dirty(parent);
@@ -2219,6 +2212,9 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
int no_skips = 0;
struct extent_buffer *t;
+ if (path->really_keep_locks)
+ return;
+
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
if (!path->nodes[i])
break;
@@ -2266,7 +2262,7 @@ noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
{
int i;
- if (path->keep_locks)
+ if (path->keep_locks || path->really_keep_locks)
return;
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
@@ -2499,7 +2495,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
if (!cow)
write_lock_level = -1;
- if (cow && (p->keep_locks || p->lowest_level))
+ if (cow && (p->really_keep_locks || p->keep_locks || p->lowest_level))
write_lock_level = BTRFS_MAX_LEVEL;
min_write_lock_level = write_lock_level;
@@ -2568,7 +2564,10 @@ again:
* must have write locks on this node and the
* parent
*/
- if (level + 1 > write_lock_level) {
+ if (level > write_lock_level ||
+ (level + 1 > write_lock_level &&
+ level + 1 < BTRFS_MAX_LEVEL &&
+ p->nodes[level + 1])) {
write_lock_level = level + 1;
btrfs_release_path(p);
goto again;
@@ -2917,7 +2916,7 @@ static void fixup_low_keys(struct btrfs_trans_handle *trans,
if (!path->nodes[i])
break;
t = path->nodes[i];
- tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
+ tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
btrfs_set_node_key(t, key, tslot);
btrfs_mark_buffer_dirty(path->nodes[i]);
if (tslot != 0)
@@ -3302,14 +3301,21 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
*/
static int leaf_space_used(struct extent_buffer *l, int start, int nr)
{
+ struct btrfs_item *start_item;
+ struct btrfs_item *end_item;
+ struct btrfs_map_token token;
int data_len;
int nritems = btrfs_header_nritems(l);
int end = min(nritems, start + nr) - 1;
if (!nr)
return 0;
- data_len = btrfs_item_end_nr(l, start);
- data_len = data_len - btrfs_item_offset_nr(l, end);
+ btrfs_init_map_token(&token);
+ start_item = btrfs_item_nr(l, start);
+ end_item = btrfs_item_nr(l, end);
+ data_len = btrfs_token_item_offset(l, start_item, &token) +
+ btrfs_token_item_size(l, start_item, &token);
+ data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
data_len += sizeof(struct btrfs_item) * nr;
WARN_ON(data_len < 0);
return data_len;
@@ -3403,8 +3409,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (push_items == 0)
goto out_unlock;
- if (!empty && push_items == left_nritems)
- WARN_ON(1);
+ WARN_ON(!empty && push_items == left_nritems);
/* push left to right */
right_nritems = btrfs_header_nritems(right);
@@ -3642,11 +3647,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(left, old_left_nritems + push_items);
/* fixup right node */
- if (push_items > right_nritems) {
- printk(KERN_CRIT "push items %d nr %u\n", push_items,
+ if (push_items > right_nritems)
+ WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
right_nritems);
- WARN_ON(1);
- }
if (push_items < right_nritems) {
push_space = btrfs_item_offset_nr(right, push_items - 1) -
@@ -4602,8 +4605,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
* empty a node.
*/
static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_path *path, int level, int slot,
- int tree_mod_log)
+ struct btrfs_path *path, int level, int slot)
{
struct extent_buffer *parent = path->nodes[level];
u32 nritems;
@@ -4611,7 +4613,7 @@ static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
nritems = btrfs_header_nritems(parent);
if (slot != nritems - 1) {
- if (tree_mod_log && level)
+ if (level)
tree_mod_log_eb_move(root->fs_info, parent, slot,
slot + 1, nritems - slot - 1);
memmove_extent_buffer(parent,
@@ -4619,7 +4621,7 @@ static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_node_key_ptr_offset(slot + 1),
sizeof(struct btrfs_key_ptr) *
(nritems - slot - 1));
- } else if (tree_mod_log && level) {
+ } else if (level) {
ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
MOD_LOG_KEY_REMOVE);
BUG_ON(ret < 0);
@@ -4656,7 +4658,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf)
{
WARN_ON(btrfs_header_generation(leaf) != trans->transid);
- del_ptr(trans, root, path, 1, path->slots[1], 1);
+ del_ptr(trans, root, path, 1, path->slots[1]);
/*
* btrfs_free_extent is expensive, we want to make sure we
@@ -5123,13 +5125,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
right_path->search_commit_root = 1;
right_path->skip_locking = 1;
- spin_lock(&left_root->root_times_lock);
+ spin_lock(&left_root->root_item_lock);
left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
- spin_unlock(&left_root->root_times_lock);
+ spin_unlock(&left_root->root_item_lock);
- spin_lock(&right_root->root_times_lock);
+ spin_lock(&right_root->root_item_lock);
right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
- spin_unlock(&right_root->root_times_lock);
+ spin_unlock(&right_root->root_item_lock);
trans = btrfs_join_transaction(left_root);
if (IS_ERR(trans)) {
@@ -5224,15 +5226,15 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
}
- spin_lock(&left_root->root_times_lock);
+ spin_lock(&left_root->root_item_lock);
ctransid = btrfs_root_ctransid(&left_root->root_item);
- spin_unlock(&left_root->root_times_lock);
+ spin_unlock(&left_root->root_item_lock);
if (ctransid != left_start_ctransid)
left_start_ctransid = 0;
- spin_lock(&right_root->root_times_lock);
+ spin_lock(&right_root->root_item_lock);
ctransid = btrfs_root_ctransid(&right_root->root_item);
- spin_unlock(&right_root->root_times_lock);
+ spin_unlock(&right_root->root_item_lock);
if (ctransid != right_start_ctransid)
right_start_ctransid = 0;
@@ -5496,6 +5498,139 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
return btrfs_next_old_leaf(root, path, 0);
}
+/* Release the path up to but not including the given level */
+static void btrfs_release_level(struct btrfs_path *path, int level)
+{
+ int i;
+
+ for (i = 0; i < level; i++) {
+ path->slots[i] = 0;
+ if (!path->nodes[i])
+ continue;
+ if (path->locks[i]) {
+ btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
+ path->locks[i] = 0;
+ }
+ free_extent_buffer(path->nodes[i]);
+ path->nodes[i] = NULL;
+ }
+}
+
+/*
+ * This function assumes 2 things
+ *
+ * 1) You are using path->keep_locks
+ * 2) You are not inserting items.
+ *
+ * If either of these are not true do not use this function. If you need a next
+ * leaf with either of these not being true then this function can be easily
+ * adapted to do that, but at the moment these are the limitations.
+ */
+int btrfs_next_leaf_write(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ int del)
+{
+ struct extent_buffer *b;
+ struct btrfs_key key;
+ u32 nritems;
+ int level = 1;
+ int slot;
+ int ret = 1;
+ int write_lock_level = BTRFS_MAX_LEVEL;
+ int ins_len = del ? -1 : 0;
+
+ WARN_ON(!(path->keep_locks || path->really_keep_locks));
+
+ nritems = btrfs_header_nritems(path->nodes[0]);
+ btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
+
+ while (path->nodes[level]) {
+ nritems = btrfs_header_nritems(path->nodes[level]);
+ if (!(path->locks[level] & BTRFS_WRITE_LOCK)) {
+search:
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(trans, root, &key, path,
+ ins_len, 1);
+ if (ret < 0)
+ goto out;
+ level = 1;
+ continue;
+ }
+
+ if (path->slots[level] >= nritems - 1) {
+ level++;
+ continue;
+ }
+
+ btrfs_release_level(path, level);
+ break;
+ }
+
+ if (!path->nodes[level]) {
+ ret = 1;
+ goto out;
+ }
+
+ path->slots[level]++;
+ b = path->nodes[level];
+
+ while (b) {
+ level = btrfs_header_level(b);
+
+ if (!should_cow_block(trans, root, b))
+ goto cow_done;
+
+ btrfs_set_path_blocking(path);
+ ret = btrfs_cow_block(trans, root, b,
+ path->nodes[level + 1],
+ path->slots[level + 1], &b);
+ if (ret)
+ goto out;
+cow_done:
+ path->nodes[level] = b;
+ btrfs_clear_path_blocking(path, NULL, 0);
+ if (level != 0) {
+ ret = setup_nodes_for_search(trans, root, path, b,
+ level, ins_len,
+ &write_lock_level);
+ if (ret == -EAGAIN)
+ goto search;
+ if (ret)
+ goto out;
+
+ b = path->nodes[level];
+ slot = path->slots[level];
+
+ ret = read_block_for_search(trans, root, path,
+ &b, level, slot, &key, 0);
+ if (ret == -EAGAIN)
+ goto search;
+ if (ret)
+ goto out;
+ level = btrfs_header_level(b);
+ if (!btrfs_try_tree_write_lock(b)) {
+ btrfs_set_path_blocking(path);
+ btrfs_tree_lock(b);
+ btrfs_clear_path_blocking(path, b,
+ BTRFS_WRITE_LOCK);
+ }
+ path->locks[level] = BTRFS_WRITE_LOCK;
+ path->nodes[level] = b;
+ path->slots[level] = 0;
+ } else {
+ path->slots[level] = 0;
+ ret = 0;
+ break;
+ }
+ }
+
+out:
+ if (ret)
+ btrfs_release_path(path);
+
+ return ret;
+}
+
int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
u64 time_seq)
{
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index c72ead86950..547b7b05727 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -48,7 +48,7 @@ struct btrfs_ordered_sum;
#define BTRFS_MAGIC "_BHRfS_M"
-#define BTRFS_MAX_MIRRORS 2
+#define BTRFS_MAX_MIRRORS 3
#define BTRFS_MAX_LEVEL 8
@@ -142,6 +142,8 @@ struct btrfs_ordered_sum;
#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2
+#define BTRFS_DEV_REPLACE_DEVID 0
+
/*
* the max metadata block size. This limit is somewhat artificial,
* but the memmove costs go through the roof for larger blocks.
@@ -172,6 +174,9 @@ static int btrfs_csum_sizes[] = { 4, 0 };
/* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0
+/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
+#define REQ_GET_READ_MIRRORS (1 << 30)
+
#define BTRFS_FT_UNKNOWN 0
#define BTRFS_FT_REG_FILE 1
#define BTRFS_FT_DIR 2
@@ -413,7 +418,7 @@ struct btrfs_root_backup {
__le64 bytes_used;
__le64 num_devices;
/* future */
- __le64 unsed_64[4];
+ __le64 unused_64[4];
u8 tree_root_level;
u8 chunk_root_level;
@@ -571,6 +576,7 @@ struct btrfs_path {
unsigned int skip_locking:1;
unsigned int leave_spinning:1;
unsigned int search_commit_root:1;
+ unsigned int really_keep_locks:1;
};
/*
@@ -885,6 +891,59 @@ struct btrfs_dev_stats_item {
__le64 values[BTRFS_DEV_STAT_VALUES_MAX];
} __attribute__ ((__packed__));
+#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0
+#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1
+#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0
+#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1
+#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2
+#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3
+#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4
+
+struct btrfs_dev_replace {
+ u64 replace_state; /* see #define above */
+ u64 time_started; /* seconds since 1-Jan-1970 */
+ u64 time_stopped; /* seconds since 1-Jan-1970 */
+ atomic64_t num_write_errors;
+ atomic64_t num_uncorrectable_read_errors;
+
+ u64 cursor_left;
+ u64 committed_cursor_left;
+ u64 cursor_left_last_write_of_item;
+ u64 cursor_right;
+
+ u64 cont_reading_from_srcdev_mode; /* see #define above */
+
+ int is_valid;
+ int item_needs_writeback;
+ struct btrfs_device *srcdev;
+ struct btrfs_device *tgtdev;
+
+ pid_t lock_owner;
+ atomic_t nesting_level;
+ struct mutex lock_finishing_cancel_unmount;
+ struct mutex lock_management_lock;
+ struct mutex lock;
+
+ struct btrfs_scrub_progress scrub_progress;
+};
+
+struct btrfs_dev_replace_item {
+ /*
+ * grow this item struct at the end for future enhancements and keep
+ * the existing values unchanged
+ */
+ __le64 src_devid;
+ __le64 cursor_left;
+ __le64 cursor_right;
+ __le64 cont_reading_from_srcdev_mode;
+
+ __le64 replace_state;
+ __le64 time_started;
+ __le64 time_stopped;
+ __le64 num_write_errors;
+ __le64 num_uncorrectable_read_errors;
+} __attribute__ ((__packed__));
+
/* different types of block groups (and chunks) */
#define BTRFS_BLOCK_GROUP_DATA (1ULL << 0)
#define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1)
@@ -1333,6 +1392,7 @@ struct btrfs_fs_info {
struct btrfs_workers generic_worker;
struct btrfs_workers workers;
struct btrfs_workers delalloc_workers;
+ struct btrfs_workers flush_workers;
struct btrfs_workers endio_workers;
struct btrfs_workers endio_meta_workers;
struct btrfs_workers endio_meta_write_workers;
@@ -1429,6 +1489,8 @@ struct btrfs_fs_info {
struct rw_semaphore scrub_super_lock;
int scrub_workers_refcnt;
struct btrfs_workers scrub_workers;
+ struct btrfs_workers scrub_wr_completion_workers;
+ struct btrfs_workers scrub_nocow_workers;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
u32 check_integrity_print_mask;
@@ -1470,6 +1532,11 @@ struct btrfs_fs_info {
int backup_root_index;
int num_tolerated_disk_barrier_failures;
+
+ /* device replace state */
+ struct btrfs_dev_replace dev_replace;
+
+ atomic_t mutually_exclusive_operation_running;
};
/*
@@ -1579,7 +1646,7 @@ struct btrfs_root {
int force_cow;
- spinlock_t root_times_lock;
+ spinlock_t root_item_lock;
};
struct btrfs_ioctl_defrag_range_args {
@@ -1723,6 +1790,12 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_DEV_STATS_KEY 249
/*
+ * Persistantly stores the device replace state in the device tree.
+ * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0).
+ */
+#define BTRFS_DEV_REPLACE_KEY 250
+
+/*
* string items are for debugging. They just store a short string of
* data in the FS
*/
@@ -1787,7 +1860,7 @@ struct btrfs_map_token {
static inline void btrfs_init_map_token (struct btrfs_map_token *token)
{
- memset(token, 0, sizeof(*token));
+ token->kaddr = NULL;
}
/* some macros to generate set/get funcs for the struct fields. This
@@ -2755,6 +2828,49 @@ BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item,
BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item,
rsv_excl, 64);
+/* btrfs_dev_replace_item */
+BTRFS_SETGET_FUNCS(dev_replace_src_devid,
+ struct btrfs_dev_replace_item, src_devid, 64);
+BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode,
+ struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode,
+ 64);
+BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item,
+ replace_state, 64);
+BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item,
+ time_started, 64);
+BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item,
+ time_stopped, 64);
+BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item,
+ num_write_errors, 64);
+BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors,
+ struct btrfs_dev_replace_item, num_uncorrectable_read_errors,
+ 64);
+BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item,
+ cursor_left, 64);
+BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item,
+ cursor_right, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid,
+ struct btrfs_dev_replace_item, src_devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode,
+ struct btrfs_dev_replace_item,
+ cont_reading_from_srcdev_mode, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state,
+ struct btrfs_dev_replace_item, replace_state, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started,
+ struct btrfs_dev_replace_item, time_started, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped,
+ struct btrfs_dev_replace_item, time_stopped, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors,
+ struct btrfs_dev_replace_item, num_write_errors, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors,
+ struct btrfs_dev_replace_item,
+ num_uncorrectable_read_errors, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left,
+ struct btrfs_dev_replace_item, cursor_left, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
+ struct btrfs_dev_replace_item, cursor_right, 64);
+
static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
{
return sb->s_fs_info;
@@ -2900,6 +3016,18 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
+
+enum btrfs_reserve_flush_enum {
+ /* If we are in the transaction, we can't flush anything.*/
+ BTRFS_RESERVE_NO_FLUSH,
+ /*
+ * Flushing delalloc may cause deadlock somewhere, in this
+ * case, use FLUSH LIMIT
+ */
+ BTRFS_RESERVE_FLUSH_LIMIT,
+ BTRFS_RESERVE_FLUSH_ALL,
+};
+
int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
@@ -2919,19 +3047,13 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
void btrfs_free_block_rsv(struct btrfs_root *root,
struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 num_bytes);
-int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 num_bytes);
+ struct btrfs_block_rsv *block_rsv, u64 num_bytes,
+ enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_check(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved);
-int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved);
+ struct btrfs_block_rsv *block_rsv, u64 min_reserved,
+ enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes);
@@ -2955,6 +3077,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
+int __get_raid_index(u64 flags);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -3065,6 +3188,9 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
}
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
+int btrfs_next_leaf_write(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ int del);
int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
u64 time_seq);
static inline int btrfs_next_old_item(struct btrfs_root *root,
@@ -3157,6 +3283,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
/* dir-item.c */
+int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+ const char *name, int name_len);
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
@@ -3256,6 +3384,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid,
u64 bytenr, int mod);
+u64 btrfs_file_extent_length(struct btrfs_path *path);
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
@@ -3271,6 +3400,19 @@ int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit);
/* inode.c */
+struct btrfs_delalloc_work {
+ struct inode *inode;
+ int wait;
+ int delay_iput;
+ struct completion completion;
+ struct list_head list;
+ struct btrfs_work work;
+};
+
+struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
+ int wait, int delay_iput);
+void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
+
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create);
@@ -3370,9 +3512,12 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space);
/* file.c */
+int btrfs_auto_defrag_init(void);
+void btrfs_auto_defrag_exit(void);
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
+void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned);
@@ -3519,15 +3664,16 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
/* scrub.c */
-int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
- struct btrfs_scrub_progress *progress, int readonly);
+int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
+ u64 end, struct btrfs_scrub_progress *progress,
+ int readonly, int is_dev_replace);
void btrfs_scrub_pause(struct btrfs_root *root);
void btrfs_scrub_pause_super(struct btrfs_root *root);
void btrfs_scrub_continue(struct btrfs_root *root);
void btrfs_scrub_continue_super(struct btrfs_root *root);
-int __btrfs_scrub_cancel(struct btrfs_fs_info *info);
-int btrfs_scrub_cancel(struct btrfs_root *root);
-int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
+int btrfs_scrub_cancel(struct btrfs_fs_info *info);
+int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
+ struct btrfs_device *dev);
int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
struct btrfs_scrub_progress *progress);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 478f66bdc57..34836036f01 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -651,7 +651,8 @@ static int btrfs_delayed_inode_reserve_metadata(
*/
if (!src_rsv || (!trans->bytes_reserved &&
src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
- ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
+ ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
+ BTRFS_RESERVE_NO_FLUSH);
/*
* Since we're under a transaction reserve_metadata_bytes could
* try to commit the transaction which will make it return
@@ -686,7 +687,8 @@ static int btrfs_delayed_inode_reserve_metadata(
* reserve something strictly for us. If not be a pain and try
* to steal from the delalloc block rsv.
*/
- ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
+ ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
+ BTRFS_RESERVE_NO_FLUSH);
if (!ret)
goto out;
@@ -1255,7 +1257,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
struct btrfs_delayed_node *delayed_node = NULL;
struct btrfs_root *root;
struct btrfs_block_rsv *block_rsv;
- unsigned long nr = 0;
int need_requeue = 0;
int ret;
@@ -1316,11 +1317,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
delayed_node);
mutex_unlock(&delayed_node->mutex);
- nr = trans->blocks_used;
-
trans->block_rsv = block_rsv;
btrfs_end_transaction_dmeta(trans, root);
- __btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty_nodelay(root);
free_path:
btrfs_free_path(path);
out:
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
new file mode 100644
index 00000000000..66dbc8dbddf
--- /dev/null
+++ b/fs/btrfs/dev-replace.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (C) STRATO AG 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#include <linux/sched.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/iocontext.h>
+#include <linux/capability.h>
+#include <linux/kthread.h>
+#include <linux/math64.h>
+#include <asm/div64.h>
+#include "compat.h"
+#include "ctree.h"
+#include "extent_map.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "print-tree.h"
+#include "volumes.h"
+#include "async-thread.h"
+#include "check-integrity.h"
+#include "rcu-string.h"
+#include "dev-replace.h"
+
+static u64 btrfs_get_seconds_since_1970(void);
+static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
+ int scrub_ret);
+static void btrfs_dev_replace_update_device_in_mapping_tree(
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev,
+ struct btrfs_device *tgtdev);
+static int btrfs_dev_replace_find_srcdev(struct btrfs_root *root, u64 srcdevid,
+ char *srcdev_name,
+ struct btrfs_device **device);
+static u64 __btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
+static int btrfs_dev_replace_kthread(void *data);
+static int btrfs_dev_replace_continue_on_mount(struct btrfs_fs_info *fs_info);
+
+
+int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_key key;
+ struct btrfs_root *dev_root = fs_info->dev_root;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ struct extent_buffer *eb;
+ int slot;
+ int ret = 0;
+ struct btrfs_path *path = NULL;
+ int item_size;
+ struct btrfs_dev_replace_item *ptr;
+ u64 src_devid;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key.objectid = 0;
+ key.type = BTRFS_DEV_REPLACE_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
+ if (ret) {
+no_valid_dev_replace_entry_found:
+ ret = 0;
+ dev_replace->replace_state =
+ BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED;
+ dev_replace->cont_reading_from_srcdev_mode =
+ BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS;
+ dev_replace->replace_state = 0;
+ dev_replace->time_started = 0;
+ dev_replace->time_stopped = 0;
+ atomic64_set(&dev_replace->num_write_errors, 0);
+ atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
+ dev_replace->cursor_left = 0;
+ dev_replace->committed_cursor_left = 0;
+ dev_replace->cursor_left_last_write_of_item = 0;
+ dev_replace->cursor_right = 0;
+ dev_replace->srcdev = NULL;
+ dev_replace->tgtdev = NULL;
+ dev_replace->is_valid = 0;
+ dev_replace->item_needs_writeback = 0;
+ goto out;
+ }
+ slot = path->slots[0];
+ eb = path->nodes[0];
+ item_size = btrfs_item_size_nr(eb, slot);
+ ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item);
+
+ if (item_size != sizeof(struct btrfs_dev_replace_item)) {
+ pr_warn("btrfs: dev_replace entry found has unexpected size, ignore entry\n");
+ goto no_valid_dev_replace_entry_found;
+ }
+
+ src_devid = btrfs_dev_replace_src_devid(eb, ptr);
+ dev_replace->cont_reading_from_srcdev_mode =
+ btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr);
+ dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr);
+ dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr);
+ dev_replace->time_stopped =
+ btrfs_dev_replace_time_stopped(eb, ptr);
+ atomic64_set(&dev_replace->num_write_errors,
+ btrfs_dev_replace_num_write_errors(eb, ptr));
+ atomic64_set(&dev_replace->num_uncorrectable_read_errors,
+ btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr));
+ dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr);
+ dev_replace->committed_cursor_left = dev_replace->cursor_left;
+ dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left;
+ dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr);
+ dev_replace->is_valid = 1;
+
+ dev_replace->item_needs_writeback = 0;
+ switch (dev_replace->replace_state) {
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+ dev_replace->srcdev = NULL;
+ dev_replace->tgtdev = NULL;
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ dev_replace->srcdev = btrfs_find_device(fs_info, src_devid,
+ NULL, NULL);
+ dev_replace->tgtdev = btrfs_find_device(fs_info,
+ BTRFS_DEV_REPLACE_DEVID,
+ NULL, NULL);
+ /*
+ * allow 'btrfs dev replace_cancel' if src/tgt device is
+ * missing
+ */
+ if (!dev_replace->srcdev &&
+ !btrfs_test_opt(dev_root, DEGRADED)) {
+ ret = -EIO;
+ pr_warn("btrfs: cannot mount because device replace operation is ongoing and\n" "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?\n",
+ (unsigned long long)src_devid);
+ }
+ if (!dev_replace->tgtdev &&
+ !btrfs_test_opt(dev_root, DEGRADED)) {
+ ret = -EIO;
+ pr_warn("btrfs: cannot mount because device replace operation is ongoing and\n" "tgtdev (devid %llu) is missing, need to run btrfs dev scan?\n",
+ (unsigned long long)BTRFS_DEV_REPLACE_DEVID);
+ }
+ if (dev_replace->tgtdev) {
+ if (dev_replace->srcdev) {
+ dev_replace->tgtdev->total_bytes =
+ dev_replace->srcdev->total_bytes;
+ dev_replace->tgtdev->disk_total_bytes =
+ dev_replace->srcdev->disk_total_bytes;
+ dev_replace->tgtdev->bytes_used =
+ dev_replace->srcdev->bytes_used;
+ }
+ dev_replace->tgtdev->is_tgtdev_for_dev_replace = 1;
+ btrfs_init_dev_replace_tgtdev_for_resume(fs_info,
+ dev_replace->tgtdev);
+ }
+ break;
+ }
+
+out:
+ if (path)
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * called from commit_transaction. Writes changed device replace state to
+ * disk.
+ */
+int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ int ret;
+ struct btrfs_root *dev_root = fs_info->dev_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *eb;
+ struct btrfs_dev_replace_item *ptr;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+
+ btrfs_dev_replace_lock(dev_replace);
+ if (!dev_replace->is_valid ||
+ !dev_replace->item_needs_writeback) {
+ btrfs_dev_replace_unlock(dev_replace);
+ return 0;
+ }
+ btrfs_dev_replace_unlock(dev_replace);
+
+ key.objectid = 0;
+ key.type = BTRFS_DEV_REPLACE_KEY;
+ key.offset = 0;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
+ if (ret < 0) {
+ pr_warn("btrfs: error %d while searching for dev_replace item!\n",
+ ret);
+ goto out;
+ }
+
+ if (ret == 0 &&
+ btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
+ /*
+ * need to delete old one and insert a new one.
+ * Since no attempt is made to recover any old state, if the
+ * dev_replace state is 'running', the data on the target
+ * drive is lost.
+ * It would be possible to recover the state: just make sure
+ * that the beginning of the item is never changed and always
+ * contains all the essential information. Then read this
+ * minimal set of information and use it as a base for the
+ * new state.
+ */
+ ret = btrfs_del_item(trans, dev_root, path);
+ if (ret != 0) {
+ pr_warn("btrfs: delete too small dev_replace item failed %d!\n",
+ ret);
+ goto out;
+ }
+ ret = 1;
+ }
+
+ if (ret == 1) {
+ /* need to insert a new item */
+ btrfs_release_path(path);
+ ret = btrfs_insert_empty_item(trans, dev_root, path,
+ &key, sizeof(*ptr));
+ if (ret < 0) {
+ pr_warn("btrfs: insert dev_replace item failed %d!\n",
+ ret);
+ goto out;
+ }
+ }
+
+ eb = path->nodes[0];
+ ptr = btrfs_item_ptr(eb, path->slots[0],
+ struct btrfs_dev_replace_item);
+
+ btrfs_dev_replace_lock(dev_replace);
+ if (dev_replace->srcdev)
+ btrfs_set_dev_replace_src_devid(eb, ptr,
+ dev_replace->srcdev->devid);
+ else
+ btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1);
+ btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr,
+ dev_replace->cont_reading_from_srcdev_mode);
+ btrfs_set_dev_replace_replace_state(eb, ptr,
+ dev_replace->replace_state);
+ btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started);
+ btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped);
+ btrfs_set_dev_replace_num_write_errors(eb, ptr,
+ atomic64_read(&dev_replace->num_write_errors));
+ btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr,
+ atomic64_read(&dev_replace->num_uncorrectable_read_errors));
+ dev_replace->cursor_left_last_write_of_item =
+ dev_replace->cursor_left;
+ btrfs_set_dev_replace_cursor_left(eb, ptr,
+ dev_replace->cursor_left_last_write_of_item);
+ btrfs_set_dev_replace_cursor_right(eb, ptr,
+ dev_replace->cursor_right);
+ dev_replace->item_needs_writeback = 0;
+ btrfs_dev_replace_unlock(dev_replace);
+
+ btrfs_mark_buffer_dirty(eb);
+
+out:
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+
+ dev_replace->committed_cursor_left =
+ dev_replace->cursor_left_last_write_of_item;
+}
+
+static u64 btrfs_get_seconds_since_1970(void)
+{
+ struct timespec t = CURRENT_TIME_SEC;
+
+ return t.tv_sec;
+}
+
+int btrfs_dev_replace_start(struct btrfs_root *root,
+ struct btrfs_ioctl_dev_replace_args *args)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ int ret;
+ struct btrfs_device *tgt_device = NULL;
+ struct btrfs_device *src_device = NULL;
+
+ switch (args->start.cont_reading_from_srcdev_mode) {
+ case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
+ case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
+ args->start.tgtdev_name[0] == '\0')
+ return -EINVAL;
+
+ mutex_lock(&fs_info->volume_mutex);
+ ret = btrfs_init_dev_replace_tgtdev(root, args->start.tgtdev_name,
+ &tgt_device);
+ if (ret) {
+ pr_err("btrfs: target device %s is invalid!\n",
+ args->start.tgtdev_name);
+ mutex_unlock(&fs_info->volume_mutex);
+ return -EINVAL;
+ }
+
+ ret = btrfs_dev_replace_find_srcdev(root, args->start.srcdevid,
+ args->start.srcdev_name,
+ &src_device);
+ mutex_unlock(&fs_info->volume_mutex);
+ if (ret) {
+ ret = -EINVAL;
+ goto leave_no_lock;
+ }
+
+ if (tgt_device->total_bytes < src_device->total_bytes) {
+ pr_err("btrfs: target device is smaller than source device!\n");
+ ret = -EINVAL;
+ goto leave_no_lock;
+ }
+
+ btrfs_dev_replace_lock(dev_replace);
+ switch (dev_replace->replace_state) {
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
+ goto leave;
+ }
+
+ dev_replace->cont_reading_from_srcdev_mode =
+ args->start.cont_reading_from_srcdev_mode;
+ WARN_ON(!src_device);
+ dev_replace->srcdev = src_device;
+ WARN_ON(!tgt_device);
+ dev_replace->tgtdev = tgt_device;
+
+ printk_in_rcu(KERN_INFO
+ "btrfs: dev_replace from %s (devid %llu) to %s) started\n",
+ src_device->missing ? "<missing disk>" :
+ rcu_str_deref(src_device->name),
+ src_device->devid,
+ rcu_str_deref(tgt_device->name));
+
+ tgt_device->total_bytes = src_device->total_bytes;
+ tgt_device->disk_total_bytes = src_device->disk_total_bytes;
+ tgt_device->bytes_used = src_device->bytes_used;
+
+ /*
+ * from now on, the writes to the srcdev are all duplicated to
+ * go to the tgtdev as well (refer to btrfs_map_block()).
+ */
+ dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
+ dev_replace->time_started = btrfs_get_seconds_since_1970();
+ dev_replace->cursor_left = 0;
+ dev_replace->committed_cursor_left = 0;
+ dev_replace->cursor_left_last_write_of_item = 0;
+ dev_replace->cursor_right = 0;
+ dev_replace->is_valid = 1;
+ dev_replace->item_needs_writeback = 1;
+ args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+ btrfs_dev_replace_unlock(dev_replace);
+
+ btrfs_wait_ordered_extents(root, 0);
+
+ /* force writing the updated state information to disk */
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ btrfs_dev_replace_lock(dev_replace);
+ goto leave;
+ }
+
+ ret = btrfs_commit_transaction(trans, root);
+ WARN_ON(ret);
+
+ /* the disk copy procedure reuses the scrub code */
+ ret = btrfs_scrub_dev(fs_info, src_device->devid, 0,
+ src_device->total_bytes,
+ &dev_replace->scrub_progress, 0, 1);
+
+ ret = btrfs_dev_replace_finishing(root->fs_info, ret);
+ WARN_ON(ret);
+
+ return 0;
+
+leave:
+ dev_replace->srcdev = NULL;
+ dev_replace->tgtdev = NULL;
+ btrfs_dev_replace_unlock(dev_replace);
+leave_no_lock:
+ if (tgt_device)
+ btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ return ret;
+}
+
+static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
+ int scrub_ret)
+{
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ struct btrfs_device *tgt_device;
+ struct btrfs_device *src_device;
+ struct btrfs_root *root = fs_info->tree_root;
+ u8 uuid_tmp[BTRFS_UUID_SIZE];
+ struct btrfs_trans_handle *trans;
+ int ret = 0;
+
+ /* don't allow cancel or unmount to disturb the finishing procedure */
+ mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
+
+ btrfs_dev_replace_lock(dev_replace);
+ /* was the operation canceled, or is it finished? */
+ if (dev_replace->replace_state !=
+ BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) {
+ btrfs_dev_replace_unlock(dev_replace);
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return 0;
+ }
+
+ tgt_device = dev_replace->tgtdev;
+ src_device = dev_replace->srcdev;
+ btrfs_dev_replace_unlock(dev_replace);
+
+ /* replace old device with new one in mapping tree */
+ if (!scrub_ret)
+ btrfs_dev_replace_update_device_in_mapping_tree(fs_info,
+ src_device,
+ tgt_device);
+
+ /*
+ * flush all outstanding I/O and inode extent mappings before the
+ * copy operation is declared as being finished
+ */
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0);
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return PTR_ERR(trans);
+ }
+ ret = btrfs_commit_transaction(trans, root);
+ WARN_ON(ret);
+
+ /* keep away write_all_supers() during the finishing procedure */
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ btrfs_dev_replace_lock(dev_replace);
+ dev_replace->replace_state =
+ scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
+ : BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
+ dev_replace->tgtdev = NULL;
+ dev_replace->srcdev = NULL;
+ dev_replace->time_stopped = btrfs_get_seconds_since_1970();
+ dev_replace->item_needs_writeback = 1;
+
+ if (scrub_ret) {
+ printk_in_rcu(KERN_ERR
+ "btrfs: btrfs_scrub_dev(%s, %llu, %s) failed %d\n",
+ src_device->missing ? "<missing disk>" :
+ rcu_str_deref(src_device->name),
+ src_device->devid,
+ rcu_str_deref(tgt_device->name), scrub_ret);
+ btrfs_dev_replace_unlock(dev_replace);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ if (tgt_device)
+ btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+
+ return 0;
+ }
+
+ printk_in_rcu(KERN_INFO
+ "btrfs: dev_replace from %s (devid %llu) to %s) finished\n",
+ src_device->missing ? "<missing disk>" :
+ rcu_str_deref(src_device->name),
+ src_device->devid,
+ rcu_str_deref(tgt_device->name));
+ tgt_device->is_tgtdev_for_dev_replace = 0;
+ tgt_device->devid = src_device->devid;
+ src_device->devid = BTRFS_DEV_REPLACE_DEVID;
+ tgt_device->bytes_used = src_device->bytes_used;
+ memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp));
+ memcpy(tgt_device->uuid, src_device->uuid, sizeof(tgt_device->uuid));
+ memcpy(src_device->uuid, uuid_tmp, sizeof(src_device->uuid));
+ tgt_device->total_bytes = src_device->total_bytes;
+ tgt_device->disk_total_bytes = src_device->disk_total_bytes;
+ tgt_device->bytes_used = src_device->bytes_used;
+ if (fs_info->sb->s_bdev == src_device->bdev)
+ fs_info->sb->s_bdev = tgt_device->bdev;
+ if (fs_info->fs_devices->latest_bdev == src_device->bdev)
+ fs_info->fs_devices->latest_bdev = tgt_device->bdev;
+ list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
+
+ btrfs_rm_dev_replace_srcdev(fs_info, src_device);
+ if (src_device->bdev) {
+ /* zero out the old super */
+ btrfs_scratch_superblock(src_device);
+ }
+ /*
+ * this is again a consistent state where no dev_replace procedure
+ * is running, the target device is part of the filesystem, the
+ * source device is not part of the filesystem anymore and its 1st
+ * superblock is scratched out so that it is no longer marked to
+ * belong to this filesystem.
+ */
+ btrfs_dev_replace_unlock(dev_replace);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
+ /* write back the superblocks */
+ trans = btrfs_start_transaction(root, 0);
+ if (!IS_ERR(trans))
+ btrfs_commit_transaction(trans, root);
+
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+
+ return 0;
+}
+
+static void btrfs_dev_replace_update_device_in_mapping_tree(
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev,
+ struct btrfs_device *tgtdev)
+{
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+ struct extent_map *em;
+ struct map_lookup *map;
+ u64 start = 0;
+ int i;
+
+ write_lock(&em_tree->lock);
+ do {
+ em = lookup_extent_mapping(em_tree, start, (u64)-1);
+ if (!em)
+ break;
+ map = (struct map_lookup *)em->bdev;
+ for (i = 0; i < map->num_stripes; i++)
+ if (srcdev == map->stripes[i].dev)
+ map->stripes[i].dev = tgtdev;
+ start = em->start + em->len;
+ free_extent_map(em);
+ } while (start);
+ write_unlock(&em_tree->lock);
+}
+
+static int btrfs_dev_replace_find_srcdev(struct btrfs_root *root, u64 srcdevid,
+ char *srcdev_name,
+ struct btrfs_device **device)
+{
+ int ret;
+
+ if (srcdevid) {
+ ret = 0;
+ *device = btrfs_find_device(root->fs_info, srcdevid, NULL,
+ NULL);
+ if (!*device)
+ ret = -ENOENT;
+ } else {
+ ret = btrfs_find_device_missing_or_by_path(root, srcdev_name,
+ device);
+ }
+ return ret;
+}
+
+void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_dev_replace_args *args)
+{
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+
+ btrfs_dev_replace_lock(dev_replace);
+ /* even if !dev_replace_is_valid, the values are good enough for
+ * the replace_status ioctl */
+ args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+ args->status.replace_state = dev_replace->replace_state;
+ args->status.time_started = dev_replace->time_started;
+ args->status.time_stopped = dev_replace->time_stopped;
+ args->status.num_write_errors =
+ atomic64_read(&dev_replace->num_write_errors);
+ args->status.num_uncorrectable_read_errors =
+ atomic64_read(&dev_replace->num_uncorrectable_read_errors);
+ switch (dev_replace->replace_state) {
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+ args->status.progress_1000 = 0;
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ args->status.progress_1000 = 1000;
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ args->status.progress_1000 = div64_u64(dev_replace->cursor_left,
+ div64_u64(dev_replace->srcdev->total_bytes, 1000));
+ break;
+ }
+ btrfs_dev_replace_unlock(dev_replace);
+}
+
+int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_dev_replace_args *args)
+{
+ args->result = __btrfs_dev_replace_cancel(fs_info);
+ return 0;
+}
+
+static u64 __btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ struct btrfs_device *tgt_device = NULL;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = fs_info->tree_root;
+ u64 result;
+ int ret;
+
+ mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
+ btrfs_dev_replace_lock(dev_replace);
+ switch (dev_replace->replace_state) {
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
+ btrfs_dev_replace_unlock(dev_replace);
+ goto leave;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+ tgt_device = dev_replace->tgtdev;
+ dev_replace->tgtdev = NULL;
+ dev_replace->srcdev = NULL;
+ break;
+ }
+ dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
+ dev_replace->time_stopped = btrfs_get_seconds_since_1970();
+ dev_replace->item_needs_writeback = 1;
+ btrfs_dev_replace_unlock(dev_replace);
+ btrfs_scrub_cancel(fs_info);
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return PTR_ERR(trans);
+ }
+ ret = btrfs_commit_transaction(trans, root);
+ WARN_ON(ret);
+ if (tgt_device)
+ btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+
+leave:
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return result;
+}
+
+void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+
+ mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
+ btrfs_dev_replace_lock(dev_replace);
+ switch (dev_replace->replace_state) {
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
+ dev_replace->time_stopped = btrfs_get_seconds_since_1970();
+ dev_replace->item_needs_writeback = 1;
+ pr_info("btrfs: suspending dev_replace for unmount\n");
+ break;
+ }
+
+ btrfs_dev_replace_unlock(dev_replace);
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+}
+
+/* resume dev_replace procedure that was interrupted by unmount */
+int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
+{
+ struct task_struct *task;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+
+ btrfs_dev_replace_lock(dev_replace);
+ switch (dev_replace->replace_state) {
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+ btrfs_dev_replace_unlock(dev_replace);
+ return 0;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
+ break;
+ }
+ if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
+ pr_info("btrfs: cannot continue dev_replace, tgtdev is missing\n"
+ "btrfs: you may cancel the operation after 'mount -o degraded'\n");
+ btrfs_dev_replace_unlock(dev_replace);
+ return 0;
+ }
+ btrfs_dev_replace_unlock(dev_replace);
+
+ WARN_ON(atomic_xchg(
+ &fs_info->mutually_exclusive_operation_running, 1));
+ task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl");
+ return PTR_RET(task);
+}
+
+static int btrfs_dev_replace_kthread(void *data)
+{
+ struct btrfs_fs_info *fs_info = data;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ struct btrfs_ioctl_dev_replace_args *status_args;
+ u64 progress;
+
+ status_args = kzalloc(sizeof(*status_args), GFP_NOFS);
+ if (status_args) {
+ btrfs_dev_replace_status(fs_info, status_args);
+ progress = status_args->status.progress_1000;
+ kfree(status_args);
+ do_div(progress, 10);
+ printk_in_rcu(KERN_INFO
+ "btrfs: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
+ dev_replace->srcdev->missing ? "<missing disk>" :
+ rcu_str_deref(dev_replace->srcdev->name),
+ dev_replace->srcdev->devid,
+ dev_replace->tgtdev ?
+ rcu_str_deref(dev_replace->tgtdev->name) :
+ "<missing target disk>",
+ (unsigned int)progress);
+ }
+ btrfs_dev_replace_continue_on_mount(fs_info);
+ atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+
+ return 0;
+}
+
+static int btrfs_dev_replace_continue_on_mount(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ int ret;
+
+ ret = btrfs_scrub_dev(fs_info, dev_replace->srcdev->devid,
+ dev_replace->committed_cursor_left,
+ dev_replace->srcdev->total_bytes,
+ &dev_replace->scrub_progress, 0, 1);
+ ret = btrfs_dev_replace_finishing(fs_info, ret);
+ WARN_ON(ret);
+ return 0;
+}
+
+int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
+{
+ if (!dev_replace->is_valid)
+ return 0;
+
+ switch (dev_replace->replace_state) {
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+ return 0;
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ /*
+ * return true even if tgtdev is missing (this is
+ * something that can happen if the dev_replace
+ * procedure is suspended by an umount and then
+ * the tgtdev is missing (or "btrfs dev scan") was
+ * not called and the the filesystem is remounted
+ * in degraded state. This does not stop the
+ * dev_replace procedure. It needs to be canceled
+ * manually if the cancelation is wanted.
+ */
+ break;
+ }
+ return 1;
+}
+
+void btrfs_dev_replace_lock(struct btrfs_dev_replace *dev_replace)
+{
+ /* the beginning is just an optimization for the typical case */
+ if (atomic_read(&dev_replace->nesting_level) == 0) {
+acquire_lock:
+ /* this is not a nested case where the same thread
+ * is trying to acqurire the same lock twice */
+ mutex_lock(&dev_replace->lock);
+ mutex_lock(&dev_replace->lock_management_lock);
+ dev_replace->lock_owner = current->pid;
+ atomic_inc(&dev_replace->nesting_level);
+ mutex_unlock(&dev_replace->lock_management_lock);
+ return;
+ }
+
+ mutex_lock(&dev_replace->lock_management_lock);
+ if (atomic_read(&dev_replace->nesting_level) > 0 &&
+ dev_replace->lock_owner == current->pid) {
+ WARN_ON(!mutex_is_locked(&dev_replace->lock));
+ atomic_inc(&dev_replace->nesting_level);
+ mutex_unlock(&dev_replace->lock_management_lock);
+ return;
+ }
+
+ mutex_unlock(&dev_replace->lock_management_lock);
+ goto acquire_lock;
+}
+
+void btrfs_dev_replace_unlock(struct btrfs_dev_replace *dev_replace)
+{
+ WARN_ON(!mutex_is_locked(&dev_replace->lock));
+ mutex_lock(&dev_replace->lock_management_lock);
+ WARN_ON(atomic_read(&dev_replace->nesting_level) < 1);
+ WARN_ON(dev_replace->lock_owner != current->pid);
+ atomic_dec(&dev_replace->nesting_level);
+ if (atomic_read(&dev_replace->nesting_level) == 0) {
+ dev_replace->lock_owner = 0;
+ mutex_unlock(&dev_replace->lock_management_lock);
+ mutex_unlock(&dev_replace->lock);
+ } else {
+ mutex_unlock(&dev_replace->lock_management_lock);
+ }
+}
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
new file mode 100644
index 00000000000..20035cbbf02
--- /dev/null
+++ b/fs/btrfs/dev-replace.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) STRATO AG 2012. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#if !defined(__BTRFS_DEV_REPLACE__)
+#define __BTRFS_DEV_REPLACE__
+
+struct btrfs_ioctl_dev_replace_args;
+
+int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
+int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
+int btrfs_dev_replace_start(struct btrfs_root *root,
+ struct btrfs_ioctl_dev_replace_args *args);
+void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_dev_replace_args *args);
+int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_dev_replace_args *args);
+void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
+int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
+int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
+void btrfs_dev_replace_lock(struct btrfs_dev_replace *dev_replace);
+void btrfs_dev_replace_unlock(struct btrfs_dev_replace *dev_replace);
+
+static inline void btrfs_dev_replace_stats_inc(atomic64_t *stat_value)
+{
+ atomic64_inc(stat_value);
+}
+#endif
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index c1a074d0696..502c2158167 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -213,6 +213,65 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
return btrfs_match_dir_item_name(root, path, name, name_len);
}
+int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
+ const char *name, int name_len)
+{
+ int ret;
+ struct btrfs_key key;
+ struct btrfs_dir_item *di;
+ int data_size;
+ struct extent_buffer *leaf;
+ int slot;
+ struct btrfs_path *path;
+
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = dir;
+ btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
+ key.offset = btrfs_name_hash(name, name_len);
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+
+ /* return back any errors */
+ if (ret < 0)
+ goto out;
+
+ /* nothing found, we're safe */
+ if (ret > 0) {
+ ret = 0;
+ goto out;
+ }
+
+ /* we found an item, look for our name in the item */
+ di = btrfs_match_dir_item_name(root, path, name, name_len);
+ if (di) {
+ /* our exact name was found */
+ ret = -EEXIST;
+ goto out;
+ }
+
+ /*
+ * see if there is room in the item to insert this
+ * name
+ */
+ data_size = sizeof(*di) + name_len + sizeof(struct btrfs_item);
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ if (data_size + btrfs_item_size_nr(leaf, slot) +
+ sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) {
+ ret = -EOVERFLOW;
+ } else {
+ /* plenty of insertion room */
+ ret = 0;
+ }
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
/*
* lookup a directory item based on index. 'dir' is the objectid
* we're searching in, and 'mod' tells us if you plan on deleting the
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 22a0439e5a8..a8f652dc940 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -45,6 +45,7 @@
#include "inode-map.h"
#include "check-integrity.h"
#include "rcu-string.h"
+#include "dev-replace.h"
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
@@ -387,7 +388,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
break;
- num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
+ num_copies = btrfs_num_copies(root->fs_info,
eb->start, eb->len);
if (num_copies == 1)
break;
@@ -852,11 +853,16 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
+ int ret;
+
/*
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
+ ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
+ if (ret)
+ bio_endio(bio, ret);
+ return ret;
}
static int check_async_write(struct inode *inode, unsigned long bio_flags)
@@ -878,7 +884,6 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int ret;
if (!(rw & REQ_WRITE)) {
-
/*
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
@@ -886,26 +891,32 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
bio, 1);
if (ret)
- return ret;
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
- mirror_num, 0);
+ goto out_w_error;
+ ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+ mirror_num, 0);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
- return ret;
- return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
- mirror_num, 0);
+ goto out_w_error;
+ ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+ mirror_num, 0);
+ } else {
+ /*
+ * kthread helpers are used to submit writes so that
+ * checksumming can happen in parallel across all CPUs
+ */
+ ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+ inode, rw, bio, mirror_num, 0,
+ bio_offset,
+ __btree_submit_bio_start,
+ __btree_submit_bio_done);
}
- /*
- * kthread helpers are used to submit writes so that checksumming
- * can happen in parallel across all CPUs
- */
- return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, rw, bio, mirror_num, 0,
- bio_offset,
- __btree_submit_bio_start,
- __btree_submit_bio_done);
+ if (ret) {
+out_w_error:
+ bio_endio(bio, ret);
+ }
+ return ret;
}
#ifdef CONFIG_MIGRATION
@@ -990,6 +1001,7 @@ static void btree_invalidatepage(struct page *page, unsigned long offset)
static int btree_set_page_dirty(struct page *page)
{
+#ifdef DEBUG
struct extent_buffer *eb;
BUG_ON(!PagePrivate(page));
@@ -998,6 +1010,7 @@ static int btree_set_page_dirty(struct page *page)
BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
BUG_ON(!atomic_read(&eb->refs));
btrfs_assert_tree_locked(eb);
+#endif
return __set_page_dirty_nobuffers(page);
}
@@ -1129,11 +1142,11 @@ void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
root->fs_info->dirty_metadata_bytes);
}
spin_unlock(&root->fs_info->delalloc_lock);
- }
- /* ugh, clear_extent_buffer_dirty needs to lock the page */
- btrfs_set_lock_blocking(buf);
- clear_extent_buffer_dirty(buf);
+ /* ugh, clear_extent_buffer_dirty needs to lock the page */
+ btrfs_set_lock_blocking(buf);
+ clear_extent_buffer_dirty(buf);
+ }
}
}
@@ -1193,7 +1206,7 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->root_key.objectid = objectid;
root->anon_dev = 0;
- spin_lock_init(&root->root_times_lock);
+ spin_lock_init(&root->root_item_lock);
}
static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
@@ -2131,6 +2144,11 @@ int open_ctree(struct super_block *sb,
init_rwsem(&fs_info->extent_commit_sem);
init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
+ fs_info->dev_replace.lock_owner = 0;
+ atomic_set(&fs_info->dev_replace.nesting_level, 0);
+ mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
+ mutex_init(&fs_info->dev_replace.lock_management_lock);
+ mutex_init(&fs_info->dev_replace.lock);
spin_lock_init(&fs_info->qgroup_lock);
fs_info->qgroup_tree = RB_ROOT;
@@ -2279,6 +2297,10 @@ int open_ctree(struct super_block *sb,
fs_info->thread_pool_size,
&fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
+
btrfs_init_workers(&fs_info->submit_workers, "submit",
min_t(u64, fs_devices->num_devices,
fs_info->thread_pool_size),
@@ -2350,6 +2372,7 @@ int open_ctree(struct super_block *sb,
ret |= btrfs_start_workers(&fs_info->delayed_workers);
ret |= btrfs_start_workers(&fs_info->caching_workers);
ret |= btrfs_start_workers(&fs_info->readahead_workers);
+ ret |= btrfs_start_workers(&fs_info->flush_workers);
if (ret) {
err = -ENOMEM;
goto fail_sb_buffer;
@@ -2418,7 +2441,11 @@ int open_ctree(struct super_block *sb,
goto fail_tree_roots;
}
- btrfs_close_extra_devices(fs_devices);
+ /*
+ * keep the device that is marked to be the target device for the
+ * dev_replace procedure
+ */
+ btrfs_close_extra_devices(fs_info, fs_devices, 0);
if (!fs_devices->latest_bdev) {
printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
@@ -2490,6 +2517,14 @@ retry_root_backup:
goto fail_block_groups;
}
+ ret = btrfs_init_dev_replace(fs_info);
+ if (ret) {
+ pr_err("btrfs: failed to init dev_replace: %d\n", ret);
+ goto fail_block_groups;
+ }
+
+ btrfs_close_extra_devices(fs_info, fs_devices, 1);
+
ret = btrfs_init_space_info(fs_info);
if (ret) {
printk(KERN_ERR "Failed to initial space info: %d\n", ret);
@@ -2503,6 +2538,13 @@ retry_root_backup:
}
fs_info->num_tolerated_disk_barrier_failures =
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
+ if (fs_info->fs_devices->missing_devices >
+ fs_info->num_tolerated_disk_barrier_failures &&
+ !(sb->s_flags & MS_RDONLY)) {
+ printk(KERN_WARNING
+ "Btrfs: too many missing devices, writeable mount is not allowed\n");
+ goto fail_block_groups;
+ }
fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
"btrfs-cleaner");
@@ -2631,6 +2673,13 @@ retry_root_backup:
return ret;
}
+ ret = btrfs_resume_dev_replace_async(fs_info);
+ if (ret) {
+ pr_warn("btrfs: failed to resume dev_replace\n");
+ close_ctree(tree_root);
+ return ret;
+ }
+
return 0;
fail_qgroup:
@@ -2667,6 +2716,7 @@ fail_sb_buffer:
btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers);
btrfs_stop_workers(&fs_info->caching_workers);
+ btrfs_stop_workers(&fs_info->flush_workers);
fail_alloc:
fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -3270,16 +3320,18 @@ int close_ctree(struct btrfs_root *root)
smp_mb();
/* pause restriper - we want to resume on mount */
- btrfs_pause_balance(root->fs_info);
+ btrfs_pause_balance(fs_info);
- btrfs_scrub_cancel(root);
+ btrfs_dev_replace_suspend_for_unmount(fs_info);
+
+ btrfs_scrub_cancel(fs_info);
/* wait for any defraggers to finish */
wait_event(fs_info->transaction_wait,
(atomic_read(&fs_info->defrag_running) == 0));
/* clear out the rbtree of defraggable inodes */
- btrfs_run_defrag_inodes(fs_info);
+ btrfs_cleanup_defrag_inodes(fs_info);
if (!(fs_info->sb->s_flags & MS_RDONLY)) {
ret = btrfs_commit_super(root);
@@ -3339,6 +3391,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->delayed_workers);
btrfs_stop_workers(&fs_info->caching_workers);
btrfs_stop_workers(&fs_info->readahead_workers);
+ btrfs_stop_workers(&fs_info->flush_workers);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(root, CHECK_INTEGRITY))
@@ -3383,14 +3436,12 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
int was_dirty;
btrfs_assert_tree_locked(buf);
- if (transid != root->fs_info->generation) {
- printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
+ if (transid != root->fs_info->generation)
+ WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
"found %llu running %llu\n",
(unsigned long long)buf->start,
(unsigned long long)transid,
(unsigned long long)root->fs_info->generation);
- WARN_ON(1);
- }
was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty) {
spin_lock(&root->fs_info->delalloc_lock);
@@ -3399,7 +3450,8 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
}
}
-void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
+ int flush_delayed)
{
/*
* looks as though older kernels can get into trouble with
@@ -3411,7 +3463,8 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
if (current->flags & PF_MEMALLOC)
return;
- btrfs_balance_delayed_items(root);
+ if (flush_delayed)
+ btrfs_balance_delayed_items(root);
num_dirty = root->fs_info->dirty_metadata_bytes;
@@ -3422,25 +3475,14 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
return;
}
-void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+void btrfs_btree_balance_dirty(struct btrfs_root *root)
{
- /*
- * looks as though older kernels can get into trouble with
- * this code, they end up stuck in balance_dirty_pages forever
- */
- u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
-
- if (current->flags & PF_MEMALLOC)
- return;
-
- num_dirty = root->fs_info->dirty_metadata_bytes;
+ __btrfs_btree_balance_dirty(root, 1);
+}
- if (num_dirty > thresh) {
- balance_dirty_pages_ratelimited(
- root->fs_info->btree_inode->i_mapping);
- }
- return;
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
+{
+ __btrfs_btree_balance_dirty(root, 0);
}
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 2025a9132c1..305c33efb0e 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -62,8 +62,8 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location);
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
-void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
-void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
+void btrfs_btree_balance_dirty(struct btrfs_root *root);
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3d3e2c17d8d..521e9d4424f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -33,6 +33,7 @@
#include "volumes.h"
#include "locking.h"
#include "free-space-cache.h"
+#include "math.h"
#undef SCRAMBLE_DELAYED_REFS
@@ -649,24 +650,6 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
rcu_read_unlock();
}
-static u64 div_factor(u64 num, int factor)
-{
- if (factor == 10)
- return num;
- num *= factor;
- do_div(num, 10);
- return num;
-}
-
-static u64 div_factor_fine(u64 num, int factor)
-{
- if (factor == 100)
- return num;
- num *= factor;
- do_div(num, 100);
- return num;
-}
-
u64 btrfs_find_block_group(struct btrfs_root *root,
u64 search_start, u64 search_hint, int owner)
{
@@ -1835,7 +1818,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
/* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
+ ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
bytenr, &num_bytes, &bbio, 0);
/* Error condition is -ENOMEM */
if (!ret) {
@@ -2314,6 +2297,9 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
kfree(extent_op);
if (ret) {
+ list_del_init(&locked_ref->cluster);
+ mutex_unlock(&locked_ref->mutex);
+
printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
spin_lock(&delayed_refs->lock);
return ret;
@@ -2356,6 +2342,10 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
count++;
if (ret) {
+ if (locked_ref) {
+ list_del_init(&locked_ref->cluster);
+ mutex_unlock(&locked_ref->mutex);
+ }
printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
spin_lock(&delayed_refs->lock);
return ret;
@@ -3661,7 +3651,7 @@ out:
static int can_overcommit(struct btrfs_root *root,
struct btrfs_space_info *space_info, u64 bytes,
- int flush)
+ enum btrfs_reserve_flush_enum flush)
{
u64 profile = btrfs_get_alloc_profile(root, 0);
u64 avail;
@@ -3685,11 +3675,11 @@ static int can_overcommit(struct btrfs_root *root,
avail >>= 1;
/*
- * If we aren't flushing don't let us overcommit too much, say
- * 1/8th of the space. If we can flush, let it overcommit up to
- * 1/2 of the space.
+ * If we aren't flushing all things, let us overcommit up to
+ * 1/2th of the space. If we can flush, don't let us overcommit
+ * too much, let it overcommit up to 1/8 of the space.
*/
- if (flush)
+ if (flush == BTRFS_RESERVE_FLUSH_ALL)
avail >>= 3;
else
avail >>= 1;
@@ -3699,6 +3689,20 @@ static int can_overcommit(struct btrfs_root *root,
return 0;
}
+static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
+ unsigned long nr_pages,
+ enum wb_reason reason)
+{
+ if (!writeback_in_progress(sb->s_bdi) &&
+ down_read_trylock(&sb->s_umount)) {
+ writeback_inodes_sb_nr(sb, nr_pages, reason);
+ up_read(&sb->s_umount);
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* shrink metadata reservation for delalloc
*/
@@ -3713,6 +3717,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
long time_left;
unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
int loops = 0;
+ enum btrfs_reserve_flush_enum flush;
trans = (struct btrfs_trans_handle *)current->journal_info;
block_rsv = &root->fs_info->delalloc_block_rsv;
@@ -3730,8 +3735,9 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim);
nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
- writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
- WB_REASON_FS_FREE_SPACE);
+ writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb,
+ nr_pages,
+ WB_REASON_FS_FREE_SPACE);
/*
* We need to wait for the async pages to actually start before
@@ -3740,8 +3746,12 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
wait_event(root->fs_info->async_submit_wait,
!atomic_read(&root->fs_info->async_delalloc_pages));
+ if (!trans)
+ flush = BTRFS_RESERVE_FLUSH_ALL;
+ else
+ flush = BTRFS_RESERVE_NO_FLUSH;
spin_lock(&space_info->lock);
- if (can_overcommit(root, space_info, orig, !trans)) {
+ if (can_overcommit(root, space_info, orig, flush)) {
spin_unlock(&space_info->lock);
break;
}
@@ -3888,7 +3898,7 @@ static int flush_space(struct btrfs_root *root,
* @root - the root we're allocating for
* @block_rsv - the block_rsv we're allocating for
* @orig_bytes - the number of bytes we want
- * @flush - wether or not we can flush to make our reservation
+ * @flush - whether or not we can flush to make our reservation
*
* This will reserve orgi_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
@@ -3899,7 +3909,8 @@ static int flush_space(struct btrfs_root *root,
*/
static int reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
- u64 orig_bytes, int flush)
+ u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush)
{
struct btrfs_space_info *space_info = block_rsv->space_info;
u64 used;
@@ -3912,10 +3923,11 @@ again:
ret = 0;
spin_lock(&space_info->lock);
/*
- * We only want to wait if somebody other than us is flushing and we are
- * actually alloed to flush.
+ * We only want to wait if somebody other than us is flushing and we
+ * are actually allowed to flush all things.
*/
- while (flush && !flushing && space_info->flush) {
+ while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
+ space_info->flush) {
spin_unlock(&space_info->lock);
/*
* If we have a trans handle we can't wait because the flusher
@@ -3981,23 +3993,40 @@ again:
* Couldn't make our reservation, save our place so while we're trying
* to reclaim space we can actually use it instead of somebody else
* stealing it from us.
+ *
+ * We make the other tasks wait for the flush only when we can flush
+ * all things.
*/
- if (ret && flush) {
+ if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) {
flushing = true;
space_info->flush = 1;
}
spin_unlock(&space_info->lock);
- if (!ret || !flush)
+ if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
goto out;
ret = flush_space(root, space_info, num_bytes, orig_bytes,
flush_state);
flush_state++;
+
+ /*
+ * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
+ * would happen. So skip delalloc flush.
+ */
+ if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
+ (flush_state == FLUSH_DELALLOC ||
+ flush_state == FLUSH_DELALLOC_WAIT))
+ flush_state = ALLOC_CHUNK;
+
if (!ret)
goto again;
- else if (flush_state <= COMMIT_TRANS)
+ else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
+ flush_state < COMMIT_TRANS)
+ goto again;
+ else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
+ flush_state <= COMMIT_TRANS)
goto again;
out:
@@ -4148,9 +4177,9 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
kfree(rsv);
}
-static inline int __block_rsv_add(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 num_bytes, int flush)
+int btrfs_block_rsv_add(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv, u64 num_bytes,
+ enum btrfs_reserve_flush_enum flush)
{
int ret;
@@ -4166,20 +4195,6 @@ static inline int __block_rsv_add(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_add(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 num_bytes)
-{
- return __block_rsv_add(root, block_rsv, num_bytes, 1);
-}
-
-int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 num_bytes)
-{
- return __block_rsv_add(root, block_rsv, num_bytes, 0);
-}
-
int btrfs_block_rsv_check(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int min_factor)
{
@@ -4198,9 +4213,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
return ret;
}
-static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved, int flush)
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv, u64 min_reserved,
+ enum btrfs_reserve_flush_enum flush)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
@@ -4228,20 +4243,6 @@ static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved)
-{
- return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
-}
-
-int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved)
-{
- return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
-}
-
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes)
@@ -4532,17 +4533,27 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
u64 csum_bytes;
unsigned nr_extents = 0;
int extra_reserve = 0;
- int flush = 1;
+ enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret;
+ bool delalloc_lock = true;
- /* Need to be holding the i_mutex here if we aren't free space cache */
- if (btrfs_is_free_space_inode(inode))
- flush = 0;
+ /* If we are a free space inode we need to not flush since we will be in
+ * the middle of a transaction commit. We also don't need the delalloc
+ * mutex since we won't race with anybody. We need this mostly to make
+ * lockdep shut its filthy mouth.
+ */
+ if (btrfs_is_free_space_inode(inode)) {
+ flush = BTRFS_RESERVE_NO_FLUSH;
+ delalloc_lock = false;
+ }
- if (flush && btrfs_transaction_in_commit(root->fs_info))
+ if (flush != BTRFS_RESERVE_NO_FLUSH &&
+ btrfs_transaction_in_commit(root->fs_info))
schedule_timeout(1);
- mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
+ if (delalloc_lock)
+ mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
+
num_bytes = ALIGN(num_bytes, root->sectorsize);
spin_lock(&BTRFS_I(inode)->lock);
@@ -4572,7 +4583,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
ret = btrfs_qgroup_reserve(root, num_bytes +
nr_extents * root->leafsize);
if (ret) {
- mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+ spin_lock(&BTRFS_I(inode)->lock);
+ calc_csum_metadata_size(inode, num_bytes, 0);
+ spin_unlock(&BTRFS_I(inode)->lock);
+ if (delalloc_lock)
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
return ret;
}
}
@@ -4607,7 +4622,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
btrfs_ino(inode),
to_free, 0);
}
- mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+ if (root->fs_info->quota_enabled) {
+ btrfs_qgroup_free(root, num_bytes +
+ nr_extents * root->leafsize);
+ }
+ if (delalloc_lock)
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
return ret;
}
@@ -4619,7 +4639,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
}
BTRFS_I(inode)->reserved_extents += nr_extents;
spin_unlock(&BTRFS_I(inode)->lock);
- mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+
+ if (delalloc_lock)
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
if (to_reserve)
trace_btrfs_space_reservation(root->fs_info,"delalloc",
@@ -4969,9 +4991,13 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache = NULL;
+ struct btrfs_space_info *space_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
u64 len;
+ bool readonly;
while (start <= end) {
+ readonly = false;
if (!cache ||
start >= cache->key.objectid + cache->key.offset) {
if (cache)
@@ -4989,15 +5015,30 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
}
start += len;
+ space_info = cache->space_info;
- spin_lock(&cache->space_info->lock);
+ spin_lock(&space_info->lock);
spin_lock(&cache->lock);
cache->pinned -= len;
- cache->space_info->bytes_pinned -= len;
- if (cache->ro)
- cache->space_info->bytes_readonly += len;
+ space_info->bytes_pinned -= len;
+ if (cache->ro) {
+ space_info->bytes_readonly += len;
+ readonly = true;
+ }
spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
+ if (!readonly && global_rsv->space_info == space_info) {
+ spin_lock(&global_rsv->lock);
+ if (!global_rsv->full) {
+ len = min(len, global_rsv->size -
+ global_rsv->reserved);
+ global_rsv->reserved += len;
+ space_info->bytes_may_use += len;
+ if (global_rsv->reserved >= global_rsv->size)
+ global_rsv->full = 1;
+ }
+ spin_unlock(&global_rsv->lock);
+ }
+ spin_unlock(&space_info->lock);
}
if (cache)
@@ -5466,7 +5507,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
return 0;
}
-static int __get_block_group_index(u64 flags)
+int __get_raid_index(u64 flags)
{
int index;
@@ -5486,7 +5527,7 @@ static int __get_block_group_index(u64 flags)
static int get_block_group_index(struct btrfs_block_group_cache *cache)
{
- return __get_block_group_index(cache->flags);
+ return __get_raid_index(cache->flags);
}
enum btrfs_loop_type {
@@ -6269,7 +6310,8 @@ use_block_rsv(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root);
if (block_rsv->size == 0) {
- ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
+ ret = reserve_metadata_bytes(root, block_rsv, blocksize,
+ BTRFS_RESERVE_NO_FLUSH);
/*
* If we couldn't reserve metadata bytes try and use some from
* the global reserve.
@@ -6292,11 +6334,11 @@ use_block_rsv(struct btrfs_trans_handle *trans,
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
/*DEFAULT_RATELIMIT_BURST*/ 2);
- if (__ratelimit(&_rs)) {
- printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
- WARN_ON(1);
- }
- ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
+ if (__ratelimit(&_rs))
+ WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
+ ret);
+ ret = reserve_metadata_bytes(root, block_rsv, blocksize,
+ BTRFS_RESERVE_NO_FLUSH);
if (!ret) {
return block_rsv;
} else if (ret && block_rsv != global_rsv) {
@@ -7427,7 +7469,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
*/
target = get_restripe_target(root->fs_info, block_group->flags);
if (target) {
- index = __get_block_group_index(extended_to_chunk(target));
+ index = __get_raid_index(extended_to_chunk(target));
} else {
/*
* this is just a balance, so if we were marked as full
@@ -7461,7 +7503,8 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
* check to make sure we can actually find a chunk with enough
* space to fit our block group in.
*/
- if (device->total_bytes > device->bytes_used + min_free) {
+ if (device->total_bytes > device->bytes_used + min_free &&
+ !device->is_tgtdev_for_dev_replace) {
ret = find_free_dev_extent(device, min_free,
&dev_offset, NULL);
if (!ret)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 472873a94d9..1b319df29ee 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -341,12 +341,10 @@ static int insert_state(struct extent_io_tree *tree,
{
struct rb_node *node;
- if (end < start) {
- printk(KERN_ERR "btrfs end < start %llu %llu\n",
+ if (end < start)
+ WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
(unsigned long long)end,
(unsigned long long)start);
- WARN_ON(1);
- }
state->start = start;
state->end = end;
@@ -1919,12 +1917,12 @@ static void repair_io_failure_callback(struct bio *bio, int err)
* the standard behavior is to write all copies in a raid setup. here we only
* want to write the one bad copy. so we do the mapping for ourselves and issue
* submit_bio directly.
- * to avoid any synchonization issues, wait for the data after writing, which
+ * to avoid any synchronization issues, wait for the data after writing, which
* actually prevents the read that triggered the error from finishing.
* currently, there can be no more than two copies of every data bit. thus,
* exactly one rewrite is required.
*/
-int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
+int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
u64 length, u64 logical, struct page *page,
int mirror_num)
{
@@ -1946,7 +1944,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
bio->bi_size = 0;
map_length = length;
- ret = btrfs_map_block(map_tree, WRITE, logical,
+ ret = btrfs_map_block(fs_info, WRITE, logical,
&map_length, &bbio, mirror_num);
if (ret) {
bio_put(bio);
@@ -1984,14 +1982,13 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num)
{
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0;
for (i = 0; i < num_pages; i++) {
struct page *p = extent_buffer_page(eb, i);
- ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
+ ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
start, p, mirror_num);
if (ret)
break;
@@ -2010,7 +2007,7 @@ static int clean_io_failure(u64 start, struct page *page)
u64 private;
u64 private_failure;
struct io_failure_record *failrec;
- struct btrfs_mapping_tree *map_tree;
+ struct btrfs_fs_info *fs_info;
struct extent_state *state;
int num_copies;
int did_repair = 0;
@@ -2046,11 +2043,11 @@ static int clean_io_failure(u64 start, struct page *page)
spin_unlock(&BTRFS_I(inode)->io_tree.lock);
if (state && state->start == failrec->start) {
- map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
- num_copies = btrfs_num_copies(map_tree, failrec->logical,
- failrec->len);
+ fs_info = BTRFS_I(inode)->root->fs_info;
+ num_copies = btrfs_num_copies(fs_info, failrec->logical,
+ failrec->len);
if (num_copies > 1) {
- ret = repair_io_failure(map_tree, start, failrec->len,
+ ret = repair_io_failure(fs_info, start, failrec->len,
failrec->logical, page,
failrec->failed_mirror);
did_repair = !ret;
@@ -2159,9 +2156,8 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
* clean_io_failure() clean all those errors at once.
*/
}
- num_copies = btrfs_num_copies(
- &BTRFS_I(inode)->root->fs_info->mapping_tree,
- failrec->logical, failrec->len);
+ num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
+ failrec->logical, failrec->len);
if (num_copies == 1) {
/*
* we only have a single copy of the data, so don't bother with
@@ -2466,10 +2462,6 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
return bio;
}
-/*
- * Since writes are async, they will only return -ENOMEM.
- * Reads can return the full range of I/O error conditions.
- */
static int __must_check submit_one_bio(int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
@@ -4721,10 +4713,9 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
}
if (start + min_len > eb->len) {
- printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
+ WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
"wanted %lu %lu\n", (unsigned long long)eb->start,
eb->len, start, min_len);
- WARN_ON(1);
return -EINVAL;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 711d12b8002..2eacfabd326 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -337,9 +337,9 @@ struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
gfp_t gfp_flags);
-struct btrfs_mapping_tree;
+struct btrfs_fs_info;
-int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
+int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
u64 length, u64 logical, struct page *page,
int mirror_num);
int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index b8cbc8d5c7f..f169d6b11d7 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -49,7 +49,7 @@ void extent_map_tree_init(struct extent_map_tree *tree)
struct extent_map *alloc_extent_map(void)
{
struct extent_map *em;
- em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
+ em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
if (!em)
return NULL;
em->in_tree = 0;
@@ -198,16 +198,15 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(merge, em)) {
em->start = merge->start;
+ em->orig_start = merge->orig_start;
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
merge->in_tree = 0;
- if (merge->generation > em->generation) {
- em->mod_start = em->start;
- em->mod_len = em->len;
- em->generation = merge->generation;
- list_move(&em->list, &tree->modified_extents);
- }
+ em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
+ em->mod_start = merge->mod_start;
+ em->generation = max(em->generation, merge->generation);
+ list_move(&em->list, &tree->modified_extents);
list_del_init(&merge->list);
rb_erase(&merge->rb_node, &tree->map);
@@ -223,23 +222,19 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->block_len += merge->len;
rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0;
- if (merge->generation > em->generation) {
- em->mod_len = em->len;
- em->generation = merge->generation;
- list_move(&em->list, &tree->modified_extents);
- }
+ em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
+ em->generation = max(em->generation, merge->generation);
list_del_init(&merge->list);
free_extent_map(merge);
}
}
/**
- * unpint_extent_cache - unpin an extent from the cache
+ * unpin_extent_cache - unpin an extent from the cache
* @tree: tree to unpin the extent in
* @start: logical offset in the file
* @len: length of the extent
* @gen: generation that this extent has been modified in
- * @prealloc: if this is set we need to clear the prealloc flag
*
* Called after an extent has been written to disk properly. Set the generation
* to the generation that actually added the file item to the inode so we know
@@ -266,9 +261,9 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
em->mod_start = em->start;
em->mod_len = em->len;
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
prealloc = true;
- clear_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ clear_bit(EXTENT_FLAG_FILLING, &em->flags);
}
try_merge_map(tree, em);
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 679225555f7..922943ce29e 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -14,6 +14,7 @@
#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
+#define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
struct extent_map {
struct rb_node rb_node;
@@ -24,6 +25,7 @@ struct extent_map {
u64 mod_start;
u64 mod_len;
u64 orig_start;
+ u64 orig_block_len;
u64 block_start;
u64 block_len;
u64 generation;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 1ad08e4e4a1..bd38cef4235 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -133,7 +133,6 @@ fail:
return ERR_PTR(ret);
}
-
int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid,
@@ -151,6 +150,26 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
return ret;
}
+u64 btrfs_file_extent_length(struct btrfs_path *path)
+{
+ int extent_type;
+ struct btrfs_file_extent_item *fi;
+ u64 len;
+
+ fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(path->nodes[0], fi);
+
+ if (extent_type == BTRFS_FILE_EXTENT_REG ||
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC)
+ len = btrfs_file_extent_num_bytes(path->nodes[0], fi);
+ else if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ len = btrfs_file_extent_inline_len(path->nodes[0], fi);
+ else
+ BUG();
+
+ return len;
+}
static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
struct inode *inode, struct bio *bio,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a8ee75cb96e..77061bf43ed 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -41,6 +41,7 @@
#include "compat.h"
#include "volumes.h"
+static struct kmem_cache *btrfs_inode_defrag_cachep;
/*
* when auto defrag is enabled we
* queue up these defrag structs to remember which
@@ -90,7 +91,7 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
* If an existing record is found the defrag item you
* pass in is freed
*/
-static void __btrfs_add_inode_defrag(struct inode *inode,
+static int __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -118,18 +119,24 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
entry->transid = defrag->transid;
if (defrag->last_offset > entry->last_offset)
entry->last_offset = defrag->last_offset;
- goto exists;
+ return -EEXIST;
}
}
set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
- return;
+ return 0;
+}
-exists:
- kfree(defrag);
- return;
+static inline int __need_auto_defrag(struct btrfs_root *root)
+{
+ if (!btrfs_test_opt(root, AUTO_DEFRAG))
+ return 0;
+
+ if (btrfs_fs_closing(root->fs_info))
+ return 0;
+ return 1;
}
/*
@@ -142,11 +149,9 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag;
u64 transid;
+ int ret;
- if (!btrfs_test_opt(root, AUTO_DEFRAG))
- return 0;
-
- if (btrfs_fs_closing(root->fs_info))
+ if (!__need_auto_defrag(root))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
@@ -157,7 +162,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
else
transid = BTRFS_I(inode)->root->last_trans;
- defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
+ defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
if (!defrag)
return -ENOMEM;
@@ -166,20 +171,56 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
defrag->root = root->root_key.objectid;
spin_lock(&root->fs_info->defrag_inodes_lock);
- if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
- __btrfs_add_inode_defrag(inode, defrag);
- else
- kfree(defrag);
+ if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
+ /*
+ * If we set IN_DEFRAG flag and evict the inode from memory,
+ * and then re-read this inode, this new inode doesn't have
+ * IN_DEFRAG flag. At the case, we may find the existed defrag.
+ */
+ ret = __btrfs_add_inode_defrag(inode, defrag);
+ if (ret)
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ } else {
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ }
spin_unlock(&root->fs_info->defrag_inodes_lock);
return 0;
}
/*
- * must be called with the defrag_inodes lock held
+ * Requeue the defrag object. If there is a defrag object that points to
+ * the same inode in the tree, we will merge them together (by
+ * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
*/
-struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
- u64 root, u64 ino,
- struct rb_node **next)
+void btrfs_requeue_inode_defrag(struct inode *inode,
+ struct inode_defrag *defrag)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ if (!__need_auto_defrag(root))
+ goto out;
+
+ /*
+ * Here we don't check the IN_DEFRAG flag, because we need merge
+ * them together.
+ */
+ spin_lock(&root->fs_info->defrag_inodes_lock);
+ ret = __btrfs_add_inode_defrag(inode, defrag);
+ spin_unlock(&root->fs_info->defrag_inodes_lock);
+ if (ret)
+ goto out;
+ return;
+out:
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+}
+
+/*
+ * pick the defragable inode that we want, if it doesn't exist, we will get
+ * the next one.
+ */
+static struct inode_defrag *
+btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
{
struct inode_defrag *entry = NULL;
struct inode_defrag tmp;
@@ -190,7 +231,8 @@ struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
tmp.ino = ino;
tmp.root = root;
- p = info->defrag_inodes.rb_node;
+ spin_lock(&fs_info->defrag_inodes_lock);
+ p = fs_info->defrag_inodes.rb_node;
while (p) {
parent = p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
@@ -201,52 +243,131 @@ struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
else if (ret > 0)
p = parent->rb_right;
else
- return entry;
+ goto out;
}
- if (next) {
- while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
- parent = rb_next(parent);
+ if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
+ parent = rb_next(parent);
+ if (parent)
entry = rb_entry(parent, struct inode_defrag, rb_node);
- }
- *next = parent;
+ else
+ entry = NULL;
}
- return NULL;
+out:
+ if (entry)
+ rb_erase(parent, &fs_info->defrag_inodes);
+ spin_unlock(&fs_info->defrag_inodes_lock);
+ return entry;
}
-/*
- * run through the list of inodes in the FS that need
- * defragging
- */
-int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
+void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
{
struct inode_defrag *defrag;
+ struct rb_node *node;
+
+ spin_lock(&fs_info->defrag_inodes_lock);
+ node = rb_first(&fs_info->defrag_inodes);
+ while (node) {
+ rb_erase(node, &fs_info->defrag_inodes);
+ defrag = rb_entry(node, struct inode_defrag, rb_node);
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+
+ if (need_resched()) {
+ spin_unlock(&fs_info->defrag_inodes_lock);
+ cond_resched();
+ spin_lock(&fs_info->defrag_inodes_lock);
+ }
+
+ node = rb_first(&fs_info->defrag_inodes);
+ }
+ spin_unlock(&fs_info->defrag_inodes_lock);
+}
+
+#define BTRFS_DEFRAG_BATCH 1024
+
+static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
+ struct inode_defrag *defrag)
+{
struct btrfs_root *inode_root;
struct inode *inode;
- struct rb_node *n;
struct btrfs_key key;
struct btrfs_ioctl_defrag_range_args range;
- u64 first_ino = 0;
- u64 root_objectid = 0;
int num_defrag;
- int defrag_batch = 1024;
+ /* get the inode */
+ key.objectid = defrag->root;
+ btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.offset = (u64)-1;
+ inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(inode_root)) {
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ return PTR_ERR(inode_root);
+ }
+
+ key.objectid = defrag->ino;
+ btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.offset = 0;
+ inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
+ if (IS_ERR(inode)) {
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ return PTR_ERR(inode);
+ }
+
+ /* do a chunk of defrag */
+ clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
memset(&range, 0, sizeof(range));
range.len = (u64)-1;
+ range.start = defrag->last_offset;
+
+ sb_start_write(fs_info->sb);
+ num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+ BTRFS_DEFRAG_BATCH);
+ sb_end_write(fs_info->sb);
+ /*
+ * if we filled the whole defrag batch, there
+ * must be more work to do. Queue this defrag
+ * again
+ */
+ if (num_defrag == BTRFS_DEFRAG_BATCH) {
+ defrag->last_offset = range.start;
+ btrfs_requeue_inode_defrag(inode, defrag);
+ } else if (defrag->last_offset && !defrag->cycled) {
+ /*
+ * we didn't fill our defrag batch, but
+ * we didn't start at zero. Make sure we loop
+ * around to the start of the file.
+ */
+ defrag->last_offset = 0;
+ defrag->cycled = 1;
+ btrfs_requeue_inode_defrag(inode, defrag);
+ } else {
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+ }
+
+ iput(inode);
+ return 0;
+}
+
+/*
+ * run through the list of inodes in the FS that need
+ * defragging
+ */
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
+{
+ struct inode_defrag *defrag;
+ u64 first_ino = 0;
+ u64 root_objectid = 0;
atomic_inc(&fs_info->defrag_running);
- spin_lock(&fs_info->defrag_inodes_lock);
while(1) {
- n = NULL;
+ if (!__need_auto_defrag(fs_info->tree_root))
+ break;
/* find an inode to defrag */
- defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
- first_ino, &n);
+ defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
+ first_ino);
if (!defrag) {
- if (n) {
- defrag = rb_entry(n, struct inode_defrag,
- rb_node);
- } else if (root_objectid || first_ino) {
+ if (root_objectid || first_ino) {
root_objectid = 0;
first_ino = 0;
continue;
@@ -255,70 +376,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
}
}
- /* remove it from the rbtree */
first_ino = defrag->ino + 1;
root_objectid = defrag->root;
- rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
-
- if (btrfs_fs_closing(fs_info))
- goto next_free;
-
- spin_unlock(&fs_info->defrag_inodes_lock);
-
- /* get the inode */
- key.objectid = defrag->root;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
- key.offset = (u64)-1;
- inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
- if (IS_ERR(inode_root))
- goto next;
-
- key.objectid = defrag->ino;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
- key.offset = 0;
-
- inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
- if (IS_ERR(inode))
- goto next;
- /* do a chunk of defrag */
- clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
- range.start = defrag->last_offset;
- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
- defrag_batch);
- /*
- * if we filled the whole defrag batch, there
- * must be more work to do. Queue this defrag
- * again
- */
- if (num_defrag == defrag_batch) {
- defrag->last_offset = range.start;
- __btrfs_add_inode_defrag(inode, defrag);
- /*
- * we don't want to kfree defrag, we added it back to
- * the rbtree
- */
- defrag = NULL;
- } else if (defrag->last_offset && !defrag->cycled) {
- /*
- * we didn't fill our defrag batch, but
- * we didn't start at zero. Make sure we loop
- * around to the start of the file.
- */
- defrag->last_offset = 0;
- defrag->cycled = 1;
- __btrfs_add_inode_defrag(inode, defrag);
- defrag = NULL;
- }
-
- iput(inode);
-next:
- spin_lock(&fs_info->defrag_inodes_lock);
-next_free:
- kfree(defrag);
+ __btrfs_run_defrag_inode(fs_info, defrag);
}
- spin_unlock(&fs_info->defrag_inodes_lock);
-
atomic_dec(&fs_info->defrag_running);
/*
@@ -526,6 +588,8 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split->block_len = em->block_len;
else
split->block_len = split->len;
+ split->orig_block_len = max(split->block_len,
+ em->orig_block_len);
split->generation = gen;
split->bdev = em->bdev;
split->flags = flags;
@@ -547,6 +611,8 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split->flags = flags;
split->compress_type = em->compress_type;
split->generation = gen;
+ split->orig_block_len = max(em->block_len,
+ em->orig_block_len);
if (compressed) {
split->block_len = em->block_len;
@@ -555,7 +621,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
} else {
split->block_len = split->len;
split->block_start = em->block_start + diff;
- split->orig_start = split->start;
+ split->orig_start = em->orig_start;
}
ret = add_extent_mapping(em_tree, split);
@@ -1348,7 +1414,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
balance_dirty_pages_ratelimited(inode->i_mapping);
if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
- btrfs_btree_balance_dirty(root, 1);
+ btrfs_btree_balance_dirty(root);
pos += copied;
num_written += copied;
@@ -1397,6 +1463,24 @@ out:
return written ? written : err;
}
+static void update_time_for_write(struct inode *inode)
+{
+ struct timespec now;
+
+ if (IS_NOCMTIME(inode))
+ return;
+
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_mtime, &now))
+ inode->i_mtime = now;
+
+ if (!timespec_equal(&inode->i_ctime, &now))
+ inode->i_ctime = now;
+
+ if (IS_I_VERSION(inode))
+ inode_inc_iversion(inode);
+}
+
static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
@@ -1409,6 +1493,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
ssize_t num_written = 0;
ssize_t err = 0;
size_t count, ocount;
+ bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
sb_start_write(inode->i_sb);
@@ -1451,11 +1536,13 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
goto out;
}
- err = file_update_time(file);
- if (err) {
- mutex_unlock(&inode->i_mutex);
- goto out;
- }
+ /*
+ * We reserve space for updating the inode when we reserve space for the
+ * extent we are going to write, so we will enospc out there. We don't
+ * need to start yet another transaction to update the inode as we will
+ * update the inode when we finish writing whatever data we write.
+ */
+ update_time_for_write(inode);
start_pos = round_down(pos, root->sectorsize);
if (start_pos > i_size_read(inode)) {
@@ -1466,6 +1553,9 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
}
}
+ if (sync)
+ atomic_inc(&BTRFS_I(inode)->sync_writers);
+
if (unlikely(file->f_flags & O_DIRECT)) {
num_written = __btrfs_direct_write(iocb, iov, nr_segs,
pos, ppos, count, ocount);
@@ -1492,14 +1582,21 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
* this will either be one more than the running transaction
* or the generation used for the next transaction if there isn't
* one running right now.
+ *
+ * We also have to set last_sub_trans to the current log transid,
+ * otherwise subsequent syncs to a file that's been synced in this
+ * transaction will appear to have already occured.
*/
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
+ BTRFS_I(inode)->last_sub_trans = root->log_transid;
if (num_written > 0 || num_written == -EIOCBQUEUED) {
err = generic_write_sync(file, pos, num_written);
if (err < 0 && num_written > 0)
num_written = err;
}
out:
+ if (sync)
+ atomic_dec(&BTRFS_I(inode)->sync_writers);
sb_end_write(inode->i_sb);
current->backing_dev_info = NULL;
return num_written ? num_written : err;
@@ -1550,7 +1647,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* out of the ->i_mutex. If so, we can flush the dirty pages by
* multi-task, and make the performance up.
*/
+ atomic_inc(&BTRFS_I(inode)->sync_writers);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ atomic_dec(&BTRFS_I(inode)->sync_writers);
if (ret)
return ret;
@@ -1561,7 +1660,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* range being left.
*/
atomic_inc(&root->log_batch);
- btrfs_wait_ordered_range(inode, start, end);
+ btrfs_wait_ordered_range(inode, start, end - start + 1);
atomic_inc(&root->log_batch);
/*
@@ -1767,6 +1866,7 @@ out:
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
+ hole_em->orig_block_len = 0;
hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
@@ -1796,48 +1896,51 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
struct btrfs_path *path;
struct btrfs_block_rsv *rsv;
struct btrfs_trans_handle *trans;
- u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
- u64 lockstart = (offset + mask) & ~mask;
- u64 lockend = ((offset + len) & ~mask) - 1;
+ u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
+ u64 lockend = round_down(offset + len,
+ BTRFS_I(inode)->root->sectorsize) - 1;
u64 cur_offset = lockstart;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
u64 drop_end;
- unsigned long nr;
int ret = 0;
int err = 0;
- bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
- ((offset + len) >> PAGE_CACHE_SHIFT);
+ bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
+ ((offset + len - 1) >> PAGE_CACHE_SHIFT));
btrfs_wait_ordered_range(inode, offset, len);
mutex_lock(&inode->i_mutex);
- if (offset >= inode->i_size) {
- mutex_unlock(&inode->i_mutex);
- return 0;
- }
-
+ /*
+ * We needn't truncate any page which is beyond the end of the file
+ * because we are sure there is no data there.
+ */
/*
* Only do this if we are in the same page and we aren't doing the
* entire page.
*/
if (same_page && len < PAGE_CACHE_SIZE) {
- ret = btrfs_truncate_page(inode, offset, len, 0);
+ if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE))
+ ret = btrfs_truncate_page(inode, offset, len, 0);
mutex_unlock(&inode->i_mutex);
return ret;
}
/* zero back part of the first page */
- ret = btrfs_truncate_page(inode, offset, 0, 0);
- if (ret) {
- mutex_unlock(&inode->i_mutex);
- return ret;
+ if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
+ ret = btrfs_truncate_page(inode, offset, 0, 0);
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+ }
}
/* zero the front end of the last page */
- ret = btrfs_truncate_page(inode, offset + len, 0, 1);
- if (ret) {
- mutex_unlock(&inode->i_mutex);
- return ret;
+ if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
+ ret = btrfs_truncate_page(inode, offset + len, 0, 1);
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+ }
}
if (lockend < lockstart) {
@@ -1930,9 +2033,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
break;
}
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
@@ -1963,11 +2065,13 @@ out_trans:
if (!trans)
goto out_free;
+ inode_inc_iversion(inode);
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+
trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
out_free:
btrfs_free_path(path);
btrfs_free_block_rsv(root, rsv);
@@ -1991,12 +2095,12 @@ static long btrfs_fallocate(struct file *file, int mode,
u64 alloc_end;
u64 alloc_hint = 0;
u64 locked_end;
- u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
+ int blocksize = BTRFS_I(inode)->root->sectorsize;
int ret;
- alloc_start = offset & ~mask;
- alloc_end = (offset + len + mask) & ~mask;
+ alloc_start = round_down(offset, blocksize);
+ alloc_end = round_up(offset + len, blocksize);
/* Make sure we aren't being give some crap mode */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -2009,7 +2113,7 @@ static long btrfs_fallocate(struct file *file, int mode,
* Make sure we have enough space before we do the
* allocation.
*/
- ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1);
+ ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
if (ret)
return ret;
@@ -2077,7 +2181,7 @@ static long btrfs_fallocate(struct file *file, int mode,
}
last_byte = min(extent_map_end(em), alloc_end);
actual_end = min_t(u64, extent_map_end(em), offset + len);
- last_byte = (last_byte + mask) & ~mask;
+ last_byte = ALIGN(last_byte, blocksize);
if (em->block_start == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size &&
@@ -2116,11 +2220,11 @@ static long btrfs_fallocate(struct file *file, int mode,
out:
mutex_unlock(&inode->i_mutex);
/* Let go of our reservation. */
- btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1);
+ btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
return ret;
}
-static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
+static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map *em;
@@ -2154,7 +2258,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
* before the position we want in case there is outstanding delalloc
* going on here.
*/
- if (origin == SEEK_HOLE && start != 0) {
+ if (whence == SEEK_HOLE && start != 0) {
if (start <= root->sectorsize)
em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
root->sectorsize, 0);
@@ -2188,13 +2292,13 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
}
}
- if (origin == SEEK_HOLE) {
+ if (whence == SEEK_HOLE) {
*offset = start;
free_extent_map(em);
break;
}
} else {
- if (origin == SEEK_DATA) {
+ if (whence == SEEK_DATA) {
if (em->block_start == EXTENT_MAP_DELALLOC) {
if (start >= inode->i_size) {
free_extent_map(em);
@@ -2231,16 +2335,16 @@ out:
return ret;
}
-static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
+static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int ret;
mutex_lock(&inode->i_mutex);
- switch (origin) {
+ switch (whence) {
case SEEK_END:
case SEEK_CUR:
- offset = generic_file_llseek(file, offset, origin);
+ offset = generic_file_llseek(file, offset, whence);
goto out;
case SEEK_DATA:
case SEEK_HOLE:
@@ -2249,7 +2353,7 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
return -ENXIO;
}
- ret = find_desired_extent(inode, &offset, origin);
+ ret = find_desired_extent(inode, &offset, whence);
if (ret) {
mutex_unlock(&inode->i_mutex);
return ret;
@@ -2292,3 +2396,21 @@ const struct file_operations btrfs_file_operations = {
.compat_ioctl = btrfs_ioctl,
#endif
};
+
+void btrfs_auto_defrag_exit(void)
+{
+ if (btrfs_inode_defrag_cachep)
+ kmem_cache_destroy(btrfs_inode_defrag_cachep);
+}
+
+int btrfs_auto_defrag_init(void)
+{
+ btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
+ sizeof(struct inode_defrag), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!btrfs_inode_defrag_cachep)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 1027b854b90..59ea2e4349c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -307,7 +307,6 @@ static void io_ctl_unmap_page(struct io_ctl *io_ctl)
static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
{
- WARN_ON(io_ctl->cur);
BUG_ON(io_ctl->index >= io_ctl->num_pages);
io_ctl->page = io_ctl->pages[io_ctl->index++];
io_ctl->cur = kmap(io_ctl->page);
@@ -1250,18 +1249,13 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
* if previous extent entry covers the offset,
* we should return it instead of the bitmap entry
*/
- n = &entry->offset_index;
- while (1) {
- n = rb_prev(n);
- if (!n)
- break;
+ n = rb_prev(&entry->offset_index);
+ if (n) {
prev = rb_entry(n, struct btrfs_free_space,
offset_index);
- if (!prev->bitmap) {
- if (prev->offset + prev->bytes > offset)
- entry = prev;
- break;
- }
+ if (!prev->bitmap &&
+ prev->offset + prev->bytes > offset)
+ entry = prev;
}
}
return entry;
@@ -1287,18 +1281,13 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
}
if (entry->bitmap) {
- n = &entry->offset_index;
- while (1) {
- n = rb_prev(n);
- if (!n)
- break;
+ n = rb_prev(&entry->offset_index);
+ if (n) {
prev = rb_entry(n, struct btrfs_free_space,
offset_index);
- if (!prev->bitmap) {
- if (prev->offset + prev->bytes > offset)
- return prev;
- break;
- }
+ if (!prev->bitmap &&
+ prev->offset + prev->bytes > offset)
+ return prev;
}
if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
return entry;
@@ -1364,7 +1353,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
- u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
+ u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
BUG_ON(ctl->total_bitmaps > max_bitmaps);
@@ -1650,8 +1639,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* some block groups are so tiny they can't be enveloped by a bitmap, so
* don't even bother to create a bitmap for this
*/
- if (BITS_PER_BITMAP * block_group->sectorsize >
- block_group->key.offset)
+ if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
return false;
return true;
@@ -2298,10 +2286,10 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
unsigned long total_found = 0;
int ret;
- i = offset_to_bit(entry->offset, block_group->sectorsize,
+ i = offset_to_bit(entry->offset, ctl->unit,
max_t(u64, offset, entry->offset));
- want_bits = bytes_to_bits(bytes, block_group->sectorsize);
- min_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
+ want_bits = bytes_to_bits(bytes, ctl->unit);
+ min_bits = bytes_to_bits(min_bytes, ctl->unit);
again:
found_bits = 0;
@@ -2325,23 +2313,22 @@ again:
total_found += found_bits;
- if (cluster->max_size < found_bits * block_group->sectorsize)
- cluster->max_size = found_bits * block_group->sectorsize;
+ if (cluster->max_size < found_bits * ctl->unit)
+ cluster->max_size = found_bits * ctl->unit;
if (total_found < want_bits || cluster->max_size < cont1_bytes) {
i = next_zero + 1;
goto again;
}
- cluster->window_start = start * block_group->sectorsize +
- entry->offset;
+ cluster->window_start = start * ctl->unit + entry->offset;
rb_erase(&entry->offset_index, &ctl->free_space_offset);
ret = tree_insert_offset(&cluster->root, entry->offset,
&entry->offset_index, 1);
BUG_ON(ret); /* -EEXIST; Logic error */
trace_btrfs_setup_cluster(block_group, cluster,
- total_found * block_group->sectorsize, 1);
+ total_found * ctl->unit, 1);
return 0;
}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index b1a1c929ba8..d26f67a59e3 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -434,8 +434,9 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
* 3 items for pre-allocation
*/
trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
- ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
- trans->bytes_reserved);
+ ret = btrfs_block_rsv_add(root, trans->block_rsv,
+ trans->bytes_reserved,
+ BTRFS_RESERVE_NO_FLUSH);
if (ret)
goto out;
trace_btrfs_space_reservation(root->fs_info, "ino_cache",
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 95542a1b3df..16d9e8e191e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -71,6 +71,7 @@ static const struct file_operations btrfs_dir_file_operations;
static struct extent_io_ops btrfs_extent_io_ops;
static struct kmem_cache *btrfs_inode_cachep;
+static struct kmem_cache *btrfs_delalloc_work_cachep;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
@@ -94,6 +95,10 @@ static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
+static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
+ u64 len, u64 orig_start,
+ u64 block_start, u64 block_len,
+ u64 orig_block_len, int type);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
@@ -698,14 +703,19 @@ retry:
em->block_start = ins.objectid;
em->block_len = ins.offset;
+ em->orig_block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->compress_type = async_extent->compress_type;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+ em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
+ if (!ret)
+ list_move(&em->list,
+ &em_tree->modified_extents);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
@@ -803,14 +813,14 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
* required to start IO on it. It may be clean and already done with
* IO when we return.
*/
-static noinline int cow_file_range(struct inode *inode,
- struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written,
- int unlock)
+static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
+ struct inode *inode,
+ struct btrfs_root *root,
+ struct page *locked_page,
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written,
+ int unlock)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
u64 alloc_hint = 0;
u64 num_bytes;
unsigned long ram_size;
@@ -823,25 +833,10 @@ static noinline int cow_file_range(struct inode *inode,
int ret = 0;
BUG_ON(btrfs_is_free_space_inode(inode));
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- extent_clear_unlock_delalloc(inode,
- &BTRFS_I(inode)->io_tree,
- start, end, locked_page,
- EXTENT_CLEAR_UNLOCK_PAGE |
- EXTENT_CLEAR_UNLOCK |
- EXTENT_CLEAR_DELALLOC |
- EXTENT_CLEAR_DIRTY |
- EXTENT_SET_WRITEBACK |
- EXTENT_END_WRITEBACK);
- return PTR_ERR(trans);
- }
- trans->block_rsv = &root->fs_info->delalloc_block_rsv;
num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes);
disk_num_bytes = num_bytes;
- ret = 0;
/* if this is a small write inside eof, kick off defrag */
if (num_bytes < 64 * 1024 &&
@@ -900,12 +895,17 @@ static noinline int cow_file_range(struct inode *inode,
em->block_start = ins.objectid;
em->block_len = ins.offset;
+ em->orig_block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
+ if (!ret)
+ list_move(&em->list,
+ &em_tree->modified_extents);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
@@ -952,11 +952,9 @@ static noinline int cow_file_range(struct inode *inode,
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
}
- ret = 0;
out:
- btrfs_end_transaction(trans, root);
-
return ret;
+
out_unlock:
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
@@ -971,6 +969,39 @@ out_unlock:
goto out;
}
+static noinline int cow_file_range(struct inode *inode,
+ struct page *locked_page,
+ u64 start, u64 end, int *page_started,
+ unsigned long *nr_written,
+ int unlock)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ extent_clear_unlock_delalloc(inode,
+ &BTRFS_I(inode)->io_tree,
+ start, end, locked_page,
+ EXTENT_CLEAR_UNLOCK_PAGE |
+ EXTENT_CLEAR_UNLOCK |
+ EXTENT_CLEAR_DELALLOC |
+ EXTENT_CLEAR_DIRTY |
+ EXTENT_SET_WRITEBACK |
+ EXTENT_END_WRITEBACK);
+ return PTR_ERR(trans);
+ }
+ trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+
+ ret = __cow_file_range(trans, inode, root, locked_page, start, end,
+ page_started, nr_written, unlock);
+
+ btrfs_end_transaction(trans, root);
+
+ return ret;
+}
+
/*
* work queue call back to started compression on a file and pages
*/
@@ -1126,6 +1157,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 extent_offset;
u64 disk_bytenr;
u64 num_bytes;
+ u64 disk_num_bytes;
int extent_type;
int ret, err;
int type;
@@ -1228,6 +1260,8 @@ next_slot:
extent_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = found_key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
+ disk_num_bytes =
+ btrfs_file_extent_disk_num_bytes(leaf, fi);
if (extent_end <= start) {
path->slots[0]++;
goto next_slot;
@@ -1281,9 +1315,9 @@ out_check:
btrfs_release_path(path);
if (cow_start != (u64)-1) {
- ret = cow_file_range(inode, locked_page, cow_start,
- found_key.offset - 1, page_started,
- nr_written, 1);
+ ret = __cow_file_range(trans, inode, root, locked_page,
+ cow_start, found_key.offset - 1,
+ page_started, nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
@@ -1298,16 +1332,21 @@ out_check:
em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */
em->start = cur_offset;
- em->orig_start = em->start;
+ em->orig_start = found_key.offset - extent_offset;
em->len = num_bytes;
em->block_len = num_bytes;
em->block_start = disk_bytenr;
+ em->orig_block_len = disk_num_bytes;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
- set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ set_bit(EXTENT_FLAG_FILLING, &em->flags);
+ em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
+ if (!ret)
+ list_move(&em->list,
+ &em_tree->modified_extents);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
@@ -1352,8 +1391,9 @@ out_check:
}
if (cow_start != (u64)-1) {
- ret = cow_file_range(inode, locked_page, cow_start, end,
- page_started, nr_written, 1);
+ ret = __cow_file_range(trans, inode, root, locked_page,
+ cow_start, end,
+ page_started, nr_written, 1);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error;
@@ -1531,7 +1571,6 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
unsigned long bio_flags)
{
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
- struct btrfs_mapping_tree *map_tree;
u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
@@ -1541,11 +1580,10 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
return 0;
length = bio->bi_size;
- map_tree = &root->fs_info->mapping_tree;
map_length = length;
- ret = btrfs_map_block(map_tree, READ, logical,
+ ret = btrfs_map_block(root->fs_info, READ, logical,
&map_length, NULL, 0);
- /* Will always return 0 or 1 with map_multi == NULL */
+ /* Will always return 0 with map_multi == NULL */
BUG_ON(ret < 0);
if (map_length < length + size)
return 1;
@@ -1586,7 +1624,12 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- return btrfs_map_bio(root, rw, bio, mirror_num, 1);
+ int ret;
+
+ ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
+ if (ret)
+ bio_endio(bio, ret);
+ return ret;
}
/*
@@ -1601,6 +1644,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int ret = 0;
int skip_sum;
int metadata = 0;
+ int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -1610,31 +1654,43 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
if (!(rw & REQ_WRITE)) {
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
if (ret)
- return ret;
+ goto out;
if (bio_flags & EXTENT_BIO_COMPRESSED) {
- return btrfs_submit_compressed_read(inode, bio,
- mirror_num, bio_flags);
+ ret = btrfs_submit_compressed_read(inode, bio,
+ mirror_num,
+ bio_flags);
+ goto out;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
if (ret)
- return ret;
+ goto out;
}
goto mapit;
- } else if (!skip_sum) {
+ } else if (async && !skip_sum) {
/* csum items have already been cloned */
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
goto mapit;
/* we're doing a write, do the async checksumming */
- return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+ ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
inode, rw, bio, mirror_num,
bio_flags, bio_offset,
__btrfs_submit_bio_start,
__btrfs_submit_bio_done);
+ goto out;
+ } else if (!skip_sum) {
+ ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+ if (ret)
+ goto out;
}
mapit:
- return btrfs_map_bio(root, rw, bio, mirror_num, 0);
+ ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
+
+out:
+ if (ret < 0)
+ bio_endio(bio, ret);
+ return ret;
}
/*
@@ -1657,8 +1713,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state)
{
- if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
- WARN_ON(1);
+ WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
cached_state, GFP_NOFS);
}
@@ -1867,22 +1922,20 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
- ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
- if (!ret) {
- if (nolock)
- trans = btrfs_join_transaction_nolock(root);
- else
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- trans = NULL;
- goto out;
- }
- trans->block_rsv = &root->fs_info->delalloc_block_rsv;
- ret = btrfs_update_inode_fallback(trans, root, inode);
- if (ret) /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ if (nolock)
+ trans = btrfs_join_transaction_nolock(root);
+ else
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
}
+ trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+ ret = btrfs_update_inode_fallback(trans, root, inode);
+ if (ret) /* -ENOMEM or corruption */
+ btrfs_abort_transaction(trans, root, ret);
goto out;
}
@@ -1931,15 +1984,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
- ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
- if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
- ret = btrfs_update_inode_fallback(trans, root, inode);
- if (ret) { /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, root, ret);
- goto out_unlock;
- }
- } else {
- btrfs_set_inode_last_trans(trans, inode);
+ btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ ret = btrfs_update_inode_fallback(trans, root, inode);
+ if (ret) { /* -ENOMEM or corruption */
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_unlock;
}
ret = 0;
out_unlock:
@@ -3074,7 +3123,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
struct btrfs_trans_handle *trans;
struct inode *inode = dentry->d_inode;
int ret;
- unsigned long nr = 0;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
@@ -3094,9 +3142,8 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
}
out:
- nr = trans->blocks_used;
__unlink_end_trans(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
return ret;
}
@@ -3186,7 +3233,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
- unsigned long nr = 0;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
@@ -3215,9 +3261,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
if (!err)
btrfs_i_size_write(inode, 0);
out:
- nr = trans->blocks_used;
__unlink_end_trans(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
return err;
}
@@ -3497,11 +3542,11 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
if (ret)
goto out;
- ret = -ENOMEM;
again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
+ ret = -ENOMEM;
goto out;
}
@@ -3550,7 +3595,6 @@ again:
goto out_unlock;
}
- ret = 0;
if (offset != PAGE_CACHE_SIZE) {
if (!len)
len = PAGE_CACHE_SIZE - offset;
@@ -3668,6 +3712,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
+ hole_em->orig_block_len = 0;
hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
@@ -3783,7 +3828,6 @@ void btrfs_evict_inode(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
- unsigned long nr;
int ret;
trace_btrfs_inode_evict(inode);
@@ -3829,7 +3873,8 @@ void btrfs_evict_inode(struct inode *inode)
* inode item when doing the truncate.
*/
while (1) {
- ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
+ ret = btrfs_block_rsv_refill(root, rsv, min_size,
+ BTRFS_RESERVE_FLUSH_LIMIT);
/*
* Try and steal from the global reserve since we will
@@ -3847,7 +3892,7 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
- trans = btrfs_start_transaction_noflush(root, 1);
+ trans = btrfs_start_transaction_lflush(root, 1);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
@@ -3864,10 +3909,9 @@ void btrfs_evict_inode(struct inode *inode)
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
}
btrfs_free_block_rsv(root, rsv);
@@ -3883,9 +3927,8 @@ void btrfs_evict_inode(struct inode *inode)
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
btrfs_return_ino(root, btrfs_ino(inode));
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
no_delete:
clear_inode(inode);
return;
@@ -4219,16 +4262,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- if (unlikely(d_need_lookup(dentry))) {
- memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
- kfree(dentry->d_fsdata);
- dentry->d_fsdata = NULL;
- /* This thing is hashed, drop it for now */
- d_drop(dentry);
- } else {
- ret = btrfs_inode_by_name(dir, dentry, &location);
- }
-
+ ret = btrfs_inode_by_name(dir, dentry, &location);
if (ret < 0)
return ERR_PTR(ret);
@@ -4298,11 +4332,6 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *ret;
ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
- if (unlikely(d_need_lookup(dentry))) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
- spin_unlock(&dentry->d_lock);
- }
return ret;
}
@@ -4775,8 +4804,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if (S_ISREG(mode)) {
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW) ||
- (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
+ if (btrfs_test_opt(root, NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
}
@@ -4842,7 +4870,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
ret = btrfs_insert_dir_item(trans, root, name, name_len,
parent_inode, &key,
btrfs_inode_type(inode), index);
- if (ret == -EEXIST)
+ if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
else if (ret) {
btrfs_abort_transaction(trans, root, ret);
@@ -4897,7 +4925,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
int err;
int drop_inode = 0;
u64 objectid;
- unsigned long nr = 0;
u64 index = 0;
if (!new_valid_dev(rdev))
@@ -4930,6 +4957,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ err = btrfs_update_inode(trans, root, inode);
+ if (err) {
+ drop_inode = 1;
+ goto out_unlock;
+ }
+
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -4947,9 +4980,8 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
d_instantiate(dentry, inode);
}
out_unlock:
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
@@ -4963,9 +4995,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
- int drop_inode = 0;
+ int drop_inode_on_err = 0;
int err;
- unsigned long nr = 0;
u64 objectid;
u64 index = 0;
@@ -4989,12 +5020,15 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
err = PTR_ERR(inode);
goto out_unlock;
}
+ drop_inode_on_err = 1;
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
- if (err) {
- drop_inode = 1;
+ if (err)
+ goto out_unlock;
+
+ err = btrfs_update_inode(trans, root, inode);
+ if (err)
goto out_unlock;
- }
/*
* If the active LSM wants to access the inode during
@@ -5007,21 +5041,20 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
- drop_inode = 1;
- else {
- inode->i_mapping->a_ops = &btrfs_aops;
- inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
- d_instantiate(dentry, inode);
- }
+ goto out_unlock;
+
+ inode->i_mapping->a_ops = &btrfs_aops;
+ inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+ BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+ d_instantiate(dentry, inode);
+
out_unlock:
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- if (drop_inode) {
+ if (err && drop_inode_on_err) {
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
return err;
}
@@ -5032,7 +5065,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = old_dentry->d_inode;
u64 index;
- unsigned long nr = 0;
int err;
int drop_inode = 0;
@@ -5062,6 +5094,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
inode_inc_iversion(inode);
inode->i_ctime = CURRENT_TIME;
ihold(inode);
+ set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
@@ -5076,14 +5109,13 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_log_new_name(trans, inode, NULL, parent);
}
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
fail:
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
return err;
}
@@ -5096,7 +5128,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
- unsigned long nr = 1;
/*
* 2 items for inode and ref
@@ -5142,11 +5173,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
drop_on_err = 0;
out_fail:
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_on_err)
iput(inode);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
return err;
}
@@ -5340,6 +5370,7 @@ again:
if (start + len <= found_key.offset)
goto not_found;
em->start = start;
+ em->orig_start = start;
em->len = found_key.offset - start;
goto not_found_em;
}
@@ -5350,6 +5381,8 @@ again:
em->len = extent_end - extent_start;
em->orig_start = extent_start -
btrfs_file_extent_offset(leaf, item);
+ em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
+ item);
bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
if (bytenr == 0) {
em->block_start = EXTENT_MAP_HOLE;
@@ -5359,8 +5392,7 @@ again:
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
em->block_start = bytenr;
- em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
- item);
+ em->block_len = em->orig_block_len;
} else {
bytenr += btrfs_file_extent_offset(leaf, item);
em->block_start = bytenr;
@@ -5390,7 +5422,8 @@ again:
em->start = extent_start + extent_offset;
em->len = (copy_size + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
- em->orig_start = EXTENT_MAP_INLINE;
+ em->orig_block_len = em->len;
+ em->orig_start = em->start;
if (compress_type) {
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
em->compress_type = compress_type;
@@ -5439,11 +5472,11 @@ again:
extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
} else {
- printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
- WARN_ON(1);
+ WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
}
not_found:
em->start = start;
+ em->orig_start = start;
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
@@ -5645,38 +5678,19 @@ out:
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
- struct extent_map *em,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *em;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
- bool insert = false;
-
- /*
- * Ok if the extent map we looked up is a hole and is for the exact
- * range we want, there is no reason to allocate a new one, however if
- * it is not right then we need to free this one and drop the cache for
- * our range.
- */
- if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
- em->len != len) {
- free_extent_map(em);
- em = NULL;
- insert = true;
- btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
- }
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return ERR_CAST(trans);
- if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
- btrfs_add_inode_defrag(trans, inode);
-
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
alloc_hint = get_extent_allocation_hint(inode, start, len);
@@ -5687,37 +5701,10 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
goto out;
}
- if (!em) {
- em = alloc_extent_map();
- if (!em) {
- em = ERR_PTR(-ENOMEM);
- goto out;
- }
- }
-
- em->start = start;
- em->orig_start = em->start;
- em->len = ins.offset;
-
- em->block_start = ins.objectid;
- em->block_len = ins.offset;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
-
- /*
- * We need to do this because if we're using the original em we searched
- * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
- */
- em->flags = 0;
- set_bit(EXTENT_FLAG_PINNED, &em->flags);
-
- while (insert) {
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
- if (ret != -EEXIST)
- break;
- btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
- }
+ em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
+ ins.offset, ins.offset, 0);
+ if (IS_ERR(em))
+ goto out;
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0);
@@ -5894,7 +5881,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
u64 len, u64 orig_start,
u64 block_start, u64 block_len,
- int type)
+ u64 orig_block_len, int type)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
@@ -5912,15 +5899,20 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
em->block_len = block_len;
em->block_start = block_start;
em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->orig_block_len = orig_block_len;
+ em->generation = -1;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
if (type == BTRFS_ORDERED_PREALLOC)
- set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ set_bit(EXTENT_FLAG_FILLING, &em->flags);
do {
btrfs_drop_extent_cache(inode, em->start,
em->start + em->len - 1, 0);
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
+ if (!ret)
+ list_move(&em->list,
+ &em_tree->modified_extents);
write_unlock(&em_tree->lock);
} while (ret == -EEXIST);
@@ -6047,13 +6039,15 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
goto must_cow;
if (can_nocow_odirect(trans, inode, start, len) == 1) {
- u64 orig_start = em->start;
+ u64 orig_start = em->orig_start;
+ u64 orig_block_len = em->orig_block_len;
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em = create_pinned_em(inode, start, len,
orig_start,
- block_start, len, type);
+ block_start, len,
+ orig_block_len, type);
if (IS_ERR(em)) {
btrfs_end_transaction(trans, root);
goto unlock_err;
@@ -6077,7 +6071,8 @@ must_cow:
* it above
*/
len = bh_result->b_size;
- em = btrfs_new_extent_direct(inode, em, start, len);
+ free_extent_map(em);
+ em = btrfs_new_extent_direct(inode, start, len);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto unlock_err;
@@ -6318,6 +6313,9 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
+ if (async_submit)
+ async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
+
bio_get(bio);
if (!write) {
@@ -6362,7 +6360,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
{
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
struct bio *bio;
struct bio *orig_bio = dip->orig_bio;
struct bio_vec *bvec = orig_bio->bi_io_vec;
@@ -6375,7 +6372,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int async_submit = 0;
map_length = orig_bio->bi_size;
- ret = btrfs_map_block(map_tree, READ, start_sector << 9,
+ ret = btrfs_map_block(root->fs_info, READ, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(orig_bio);
@@ -6429,7 +6426,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio->bi_end_io = btrfs_end_dio_bio;
map_length = orig_bio->bi_size;
- ret = btrfs_map_block(map_tree, READ, start_sector << 9,
+ ret = btrfs_map_block(root->fs_info, READ,
+ start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(bio);
@@ -6582,9 +6580,17 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
btrfs_submit_direct, 0);
}
+#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
+
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
+ int ret;
+
+ ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
+ if (ret)
+ return ret;
+
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
@@ -6855,7 +6861,6 @@ static int btrfs_truncate(struct inode *inode)
int ret;
int err = 0;
struct btrfs_trans_handle *trans;
- unsigned long nr;
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
@@ -6978,9 +6983,8 @@ static int btrfs_truncate(struct inode *inode)
break;
}
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
@@ -7014,9 +7018,8 @@ static int btrfs_truncate(struct inode *inode)
if (ret && !err)
err = ret;
- nr = trans->blocks_used;
ret = btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
}
out:
@@ -7093,6 +7096,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
ei->io_tree.track_uptodate = 1;
ei->io_failure_tree.track_uptodate = 1;
+ atomic_set(&ei->sync_writers, 0);
mutex_init(&ei->log_mutex);
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
@@ -7203,6 +7207,8 @@ void btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_path_cachep);
if (btrfs_free_space_cachep)
kmem_cache_destroy(btrfs_free_space_cachep);
+ if (btrfs_delalloc_work_cachep)
+ kmem_cache_destroy(btrfs_delalloc_work_cachep);
}
int btrfs_init_cachep(void)
@@ -7237,6 +7243,13 @@ int btrfs_init_cachep(void)
if (!btrfs_free_space_cachep)
goto fail;
+ btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
+ sizeof(struct btrfs_delalloc_work), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!btrfs_delalloc_work_cachep)
+ goto fail;
+
return 0;
fail:
btrfs_destroy_cachep();
@@ -7308,6 +7321,28 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (S_ISDIR(old_inode->i_mode) && new_inode &&
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
+
+
+ /* check for collisions, even if the name isn't there */
+ ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len);
+
+ if (ret) {
+ if (ret == -EEXIST) {
+ /* we shouldn't get
+ * eexist without a new_inode */
+ if (!new_inode) {
+ WARN_ON(1);
+ return ret;
+ }
+ } else {
+ /* maybe -EOVERFLOW */
+ return ret;
+ }
+ }
+ ret = 0;
+
/*
* we're using rename to replace one file with another.
* and the replacement file is large. Start IO on it now so
@@ -7447,6 +7482,49 @@ out_notrans:
return ret;
}
+static void btrfs_run_delalloc_work(struct btrfs_work *work)
+{
+ struct btrfs_delalloc_work *delalloc_work;
+
+ delalloc_work = container_of(work, struct btrfs_delalloc_work,
+ work);
+ if (delalloc_work->wait)
+ btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1);
+ else
+ filemap_flush(delalloc_work->inode->i_mapping);
+
+ if (delalloc_work->delay_iput)
+ btrfs_add_delayed_iput(delalloc_work->inode);
+ else
+ iput(delalloc_work->inode);
+ complete(&delalloc_work->completion);
+}
+
+struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
+ int wait, int delay_iput)
+{
+ struct btrfs_delalloc_work *work;
+
+ work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
+ if (!work)
+ return NULL;
+
+ init_completion(&work->completion);
+ INIT_LIST_HEAD(&work->list);
+ work->inode = inode;
+ work->wait = wait;
+ work->delay_iput = delay_iput;
+ work->work.func = btrfs_run_delalloc_work;
+
+ return work;
+}
+
+void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
+{
+ wait_for_completion(&work->completion);
+ kmem_cache_free(btrfs_delalloc_work_cachep, work);
+}
+
/*
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
@@ -7456,10 +7534,15 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
struct list_head *head = &root->fs_info->delalloc_inodes;
struct btrfs_inode *binode;
struct inode *inode;
+ struct btrfs_delalloc_work *work, *next;
+ struct list_head works;
+ int ret = 0;
if (root->fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
+ INIT_LIST_HEAD(&works);
+
spin_lock(&root->fs_info->delalloc_lock);
while (!list_empty(head)) {
binode = list_entry(head->next, struct btrfs_inode,
@@ -7469,11 +7552,14 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
list_del_init(&binode->delalloc_inodes);
spin_unlock(&root->fs_info->delalloc_lock);
if (inode) {
- filemap_flush(inode->i_mapping);
- if (delay_iput)
- btrfs_add_delayed_iput(inode);
- else
- iput(inode);
+ work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
+ if (!work) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&work->list, &works);
+ btrfs_queue_worker(&root->fs_info->flush_workers,
+ &work->work);
}
cond_resched();
spin_lock(&root->fs_info->delalloc_lock);
@@ -7492,7 +7578,12 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
}
atomic_dec(&root->fs_info->async_submit_draining);
- return 0;
+out:
+ list_for_each_entry_safe(work, next, &works, list) {
+ list_del_init(&work->list);
+ btrfs_wait_and_free_delalloc_work(work);
+ }
+ return ret;
}
static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
@@ -7512,7 +7603,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
unsigned long ptr;
struct btrfs_file_extent_item *ei;
struct extent_buffer *leaf;
- unsigned long nr = 0;
name_len = strlen(symname) + 1;
if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
@@ -7610,13 +7700,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
out_unlock:
if (!err)
d_instantiate(dentry, inode);
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
return err;
}
@@ -7679,6 +7768,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
em->len = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
+ em->orig_block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
em->generation = trans->transid;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 5b3429ab8ec..4b4516770f0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -55,6 +55,7 @@
#include "backref.h"
#include "rcu-string.h"
#include "send.h"
+#include "dev-replace.h"
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -140,8 +141,11 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
}
- if (flags & BTRFS_INODE_NODATACOW)
+ if (flags & BTRFS_INODE_NODATACOW) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+ if (S_ISREG(inode->i_mode))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
+ }
btrfs_update_iflags(inode);
}
@@ -571,8 +575,12 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
ret = btrfs_commit_transaction(trans,
root->fs_info->extent_root);
}
- if (ret)
+ if (ret) {
+ /* cleanup_transaction has freed this for us */
+ if (trans->aborted)
+ pending_snapshot = NULL;
goto fail;
+ }
ret = pending_snapshot->error;
if (ret)
@@ -705,6 +713,16 @@ static noinline int btrfs_mksubvol(struct path *parent,
if (error)
goto out_dput;
+ /*
+ * even if this name doesn't exist, we may get hash collisions.
+ * check for them now when we can safely fail
+ */
+ error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
+ dir->i_ino, name,
+ namelen);
+ if (error)
+ goto out_dput;
+
down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
@@ -1293,12 +1311,13 @@ out_ra:
return ret;
}
-static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
+static noinline int btrfs_ioctl_resize(struct file *file,
void __user *arg)
{
u64 new_size;
u64 old_size;
u64 devid = 1;
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
struct btrfs_trans_handle *trans;
struct btrfs_device *device = NULL;
@@ -1313,13 +1332,17 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- mutex_lock(&root->fs_info->volume_mutex);
- if (root->fs_info->balance_ctl) {
- printk(KERN_INFO "btrfs: balance in progress\n");
- ret = -EINVAL;
- goto out;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
+ 1)) {
+ pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+ return -EINPROGRESS;
}
+ mutex_lock(&root->fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
@@ -1339,7 +1362,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
printk(KERN_INFO "btrfs: resizing devid %llu\n",
(unsigned long long)devid);
}
- device = btrfs_find_device(root, devid, NULL, NULL);
+ device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
if (!device) {
printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
(unsigned long long)devid);
@@ -1371,6 +1394,11 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
}
}
+ if (device->is_tgtdev_for_dev_replace) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
old_size = device->total_bytes;
if (mod < 0) {
@@ -1409,12 +1437,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
btrfs_commit_transaction(trans, root);
} else if (new_size < old_size) {
ret = btrfs_shrink_device(device, new_size);
- }
+ } /* equal, nothing need to do */
out_free:
kfree(vol_args);
out:
mutex_unlock(&root->fs_info->volume_mutex);
+ mnt_drop_write_file(file);
+ atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
@@ -2156,9 +2186,17 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
if (btrfs_root_readonly(root))
return -EROFS;
+ if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
+ 1)) {
+ pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+ return -EINPROGRESS;
+ }
ret = mnt_want_write_file(file);
- if (ret)
+ if (ret) {
+ atomic_set(&root->fs_info->mutually_exclusive_operation_running,
+ 0);
return ret;
+ }
switch (inode->i_mode & S_IFMT) {
case S_IFDIR:
@@ -2210,6 +2248,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
}
out:
mnt_drop_write_file(file);
+ atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
@@ -2221,13 +2260,13 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- mutex_lock(&root->fs_info->volume_mutex);
- if (root->fs_info->balance_ctl) {
- printk(KERN_INFO "btrfs: balance in progress\n");
- ret = -EINVAL;
- goto out;
+ if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
+ 1)) {
+ pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+ return -EINPROGRESS;
}
+ mutex_lock(&root->fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
@@ -2240,27 +2279,31 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
kfree(vol_args);
out:
mutex_unlock(&root->fs_info->volume_mutex);
+ atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
-static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
- mutex_lock(&root->fs_info->volume_mutex);
- if (root->fs_info->balance_ctl) {
- printk(KERN_INFO "btrfs: balance in progress\n");
- ret = -EINVAL;
- goto out;
+ if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
+ 1)) {
+ pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+ mnt_drop_write_file(file);
+ return -EINPROGRESS;
}
+ mutex_lock(&root->fs_info->volume_mutex);
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) {
ret = PTR_ERR(vol_args);
@@ -2273,6 +2316,8 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
kfree(vol_args);
out:
mutex_unlock(&root->fs_info->volume_mutex);
+ mnt_drop_write_file(file);
+ atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
return ret;
}
@@ -2328,7 +2373,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
s_uuid = di_args->uuid;
mutex_lock(&fs_devices->device_list_mutex);
- dev = btrfs_find_device(root, di_args->devid, s_uuid, NULL);
+ dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
@@ -2821,12 +2866,19 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
struct btrfs_disk_key disk_key;
u64 objectid = 0;
u64 dir_id;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (copy_from_user(&objectid, argp, sizeof(objectid)))
- return -EFAULT;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ if (copy_from_user(&objectid, argp, sizeof(objectid))) {
+ ret = -EFAULT;
+ goto out;
+ }
if (!objectid)
objectid = root->root_key.objectid;
@@ -2836,21 +2888,28 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
location.offset = (u64)-1;
new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
- if (IS_ERR(new_root))
- return PTR_ERR(new_root);
+ if (IS_ERR(new_root)) {
+ ret = PTR_ERR(new_root);
+ goto out;
+ }
- if (btrfs_root_refs(&new_root->root_item) == 0)
- return -ENOENT;
+ if (btrfs_root_refs(&new_root->root_item) == 0) {
+ ret = -ENOENT;
+ goto out;
+ }
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
path->leave_spinning = 1;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
btrfs_free_path(path);
- return PTR_ERR(trans);
+ ret = PTR_ERR(trans);
+ goto out;
}
dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
@@ -2861,7 +2920,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_end_transaction(trans, root);
printk(KERN_ERR "Umm, you don't have the default dir item, "
"this isn't going to work\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto out;
}
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
@@ -2871,8 +2931,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
btrfs_end_transaction(trans, root);
-
- return 0;
+out:
+ mnt_drop_write_file(file);
+ return ret;
}
void btrfs_get_block_group_info(struct list_head *groups_list,
@@ -3036,32 +3097,38 @@ long btrfs_ioctl_trans_end(struct file *file)
return 0;
}
-static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp)
+static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
+ void __user *argp)
{
- struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root;
struct btrfs_trans_handle *trans;
u64 transid;
int ret;
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ trans = btrfs_attach_transaction(root);
+ if (IS_ERR(trans)) {
+ if (PTR_ERR(trans) != -ENOENT)
+ return PTR_ERR(trans);
+
+ /* No running transaction, don't bother */
+ transid = root->fs_info->last_trans_committed;
+ goto out;
+ }
transid = trans->transid;
ret = btrfs_commit_transaction_async(trans, root, 0);
if (ret) {
btrfs_end_transaction(trans, root);
return ret;
}
-
+out:
if (argp)
if (copy_to_user(argp, &transid, sizeof(transid)))
return -EFAULT;
return 0;
}
-static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp)
+static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
+ void __user *argp)
{
- struct btrfs_root *root = BTRFS_I(file->f_dentry->d_inode)->root;
u64 transid;
if (argp) {
@@ -3073,10 +3140,11 @@ static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp)
return btrfs_wait_for_commit(root, transid);
}
-static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
{
- int ret;
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_scrub_args *sa;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3085,12 +3153,22 @@ static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg)
if (IS_ERR(sa))
return PTR_ERR(sa);
- ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end,
- &sa->progress, sa->flags & BTRFS_SCRUB_READONLY);
+ if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
+ ret = mnt_want_write_file(file);
+ if (ret)
+ goto out;
+ }
+
+ ret = btrfs_scrub_dev(root->fs_info, sa->devid, sa->start, sa->end,
+ &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
+ 0);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
+ if (!(sa->flags & BTRFS_SCRUB_READONLY))
+ mnt_drop_write_file(file);
+out:
kfree(sa);
return ret;
}
@@ -3100,7 +3178,7 @@ static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return btrfs_scrub_cancel(root);
+ return btrfs_scrub_cancel(root->fs_info);
}
static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
@@ -3149,6 +3227,51 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
return ret;
}
+static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_dev_replace_args *p;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ p = memdup_user(arg, sizeof(*p));
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ switch (p->cmd) {
+ case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
+ if (atomic_xchg(
+ &root->fs_info->mutually_exclusive_operation_running,
+ 1)) {
+ pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+ ret = -EINPROGRESS;
+ } else {
+ ret = btrfs_dev_replace_start(root, p);
+ atomic_set(
+ &root->fs_info->mutually_exclusive_operation_running,
+ 0);
+ }
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
+ btrfs_dev_replace_status(root->fs_info, p);
+ ret = 0;
+ break;
+ case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
+ ret = btrfs_dev_replace_cancel(root->fs_info, p);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (copy_to_user(arg, p, sizeof(*p)))
+ ret = -EFAULT;
+
+ kfree(p);
+ return ret;
+}
+
static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
{
int ret = 0;
@@ -3315,6 +3438,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
struct btrfs_ioctl_balance_args *bargs;
struct btrfs_balance_control *bctl;
int ret;
+ int need_to_clear_lock = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3350,10 +3474,13 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
bargs = NULL;
}
- if (fs_info->balance_ctl) {
+ if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
+ 1)) {
+ pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
ret = -EINPROGRESS;
goto out_bargs;
}
+ need_to_clear_lock = 1;
bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
if (!bctl) {
@@ -3387,6 +3514,9 @@ do_balance:
out_bargs:
kfree(bargs);
out:
+ if (need_to_clear_lock)
+ atomic_set(&root->fs_info->mutually_exclusive_operation_running,
+ 0);
mutex_unlock(&fs_info->balance_mutex);
mutex_unlock(&fs_info->volume_mutex);
mnt_drop_write_file(file);
@@ -3441,8 +3571,9 @@ out:
return ret;
}
-static long btrfs_ioctl_quota_ctl(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_quota_ctl_args *sa;
struct btrfs_trans_handle *trans = NULL;
int ret;
@@ -3451,12 +3582,15 @@ static long btrfs_ioctl_quota_ctl(struct btrfs_root *root, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
sa = memdup_user(arg, sizeof(*sa));
- if (IS_ERR(sa))
- return PTR_ERR(sa);
+ if (IS_ERR(sa)) {
+ ret = PTR_ERR(sa);
+ goto drop_write;
+ }
if (sa->cmd != BTRFS_QUOTA_CTL_RESCAN) {
trans = btrfs_start_transaction(root, 2);
@@ -3489,14 +3623,16 @@ static long btrfs_ioctl_quota_ctl(struct btrfs_root *root, void __user *arg)
if (err && !ret)
ret = err;
}
-
out:
kfree(sa);
+drop_write:
+ mnt_drop_write_file(file);
return ret;
}
-static long btrfs_ioctl_qgroup_assign(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_qgroup_assign_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -3505,12 +3641,15 @@ static long btrfs_ioctl_qgroup_assign(struct btrfs_root *root, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
sa = memdup_user(arg, sizeof(*sa));
- if (IS_ERR(sa))
- return PTR_ERR(sa);
+ if (IS_ERR(sa)) {
+ ret = PTR_ERR(sa);
+ goto drop_write;
+ }
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -3533,11 +3672,14 @@ static long btrfs_ioctl_qgroup_assign(struct btrfs_root *root, void __user *arg)
out:
kfree(sa);
+drop_write:
+ mnt_drop_write_file(file);
return ret;
}
-static long btrfs_ioctl_qgroup_create(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_qgroup_create_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -3546,12 +3688,15 @@ static long btrfs_ioctl_qgroup_create(struct btrfs_root *root, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
sa = memdup_user(arg, sizeof(*sa));
- if (IS_ERR(sa))
- return PTR_ERR(sa);
+ if (IS_ERR(sa)) {
+ ret = PTR_ERR(sa);
+ goto drop_write;
+ }
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -3573,11 +3718,14 @@ static long btrfs_ioctl_qgroup_create(struct btrfs_root *root, void __user *arg)
out:
kfree(sa);
+drop_write:
+ mnt_drop_write_file(file);
return ret;
}
-static long btrfs_ioctl_qgroup_limit(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_qgroup_limit_args *sa;
struct btrfs_trans_handle *trans;
int ret;
@@ -3587,12 +3735,15 @@ static long btrfs_ioctl_qgroup_limit(struct btrfs_root *root, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
sa = memdup_user(arg, sizeof(*sa));
- if (IS_ERR(sa))
- return PTR_ERR(sa);
+ if (IS_ERR(sa)) {
+ ret = PTR_ERR(sa);
+ goto drop_write;
+ }
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -3615,6 +3766,8 @@ static long btrfs_ioctl_qgroup_limit(struct btrfs_root *root, void __user *arg)
out:
kfree(sa);
+drop_write:
+ mnt_drop_write_file(file);
return ret;
}
@@ -3735,11 +3888,11 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_DEFRAG_RANGE:
return btrfs_ioctl_defrag(file, argp);
case BTRFS_IOC_RESIZE:
- return btrfs_ioctl_resize(root, argp);
+ return btrfs_ioctl_resize(file, argp);
case BTRFS_IOC_ADD_DEV:
return btrfs_ioctl_add_dev(root, argp);
case BTRFS_IOC_RM_DEV:
- return btrfs_ioctl_rm_dev(root, argp);
+ return btrfs_ioctl_rm_dev(file, argp);
case BTRFS_IOC_FS_INFO:
return btrfs_ioctl_fs_info(root, argp);
case BTRFS_IOC_DEV_INFO:
@@ -3768,11 +3921,11 @@ long btrfs_ioctl(struct file *file, unsigned int
btrfs_sync_fs(file->f_dentry->d_sb, 1);
return 0;
case BTRFS_IOC_START_SYNC:
- return btrfs_ioctl_start_sync(file, argp);
+ return btrfs_ioctl_start_sync(root, argp);
case BTRFS_IOC_WAIT_SYNC:
- return btrfs_ioctl_wait_sync(file, argp);
+ return btrfs_ioctl_wait_sync(root, argp);
case BTRFS_IOC_SCRUB:
- return btrfs_ioctl_scrub(root, argp);
+ return btrfs_ioctl_scrub(file, argp);
case BTRFS_IOC_SCRUB_CANCEL:
return btrfs_ioctl_scrub_cancel(root, argp);
case BTRFS_IOC_SCRUB_PROGRESS:
@@ -3790,13 +3943,15 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_GET_DEV_STATS:
return btrfs_ioctl_get_dev_stats(root, argp);
case BTRFS_IOC_QUOTA_CTL:
- return btrfs_ioctl_quota_ctl(root, argp);
+ return btrfs_ioctl_quota_ctl(file, argp);
case BTRFS_IOC_QGROUP_ASSIGN:
- return btrfs_ioctl_qgroup_assign(root, argp);
+ return btrfs_ioctl_qgroup_assign(file, argp);
case BTRFS_IOC_QGROUP_CREATE:
- return btrfs_ioctl_qgroup_create(root, argp);
+ return btrfs_ioctl_qgroup_create(file, argp);
case BTRFS_IOC_QGROUP_LIMIT:
- return btrfs_ioctl_qgroup_limit(root, argp);
+ return btrfs_ioctl_qgroup_limit(file, argp);
+ case BTRFS_IOC_DEV_REPLACE:
+ return btrfs_ioctl_dev_replace(root, argp);
}
return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 731e2875ab9..dabca9cc8c2 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -30,6 +30,8 @@ struct btrfs_ioctl_vol_args {
char name[BTRFS_PATH_NAME_MAX + 1];
};
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+
#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
@@ -123,7 +125,48 @@ struct btrfs_ioctl_scrub_args {
__u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8];
};
-#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0
+#define BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID 1
+struct btrfs_ioctl_dev_replace_start_params {
+ __u64 srcdevid; /* in, if 0, use srcdev_name instead */
+ __u64 cont_reading_from_srcdev_mode; /* in, see #define
+ * above */
+ __u8 srcdev_name[BTRFS_DEVICE_PATH_NAME_MAX + 1]; /* in */
+ __u8 tgtdev_name[BTRFS_DEVICE_PATH_NAME_MAX + 1]; /* in */
+};
+
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED 0
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED 1
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED 2
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED 3
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED 4
+struct btrfs_ioctl_dev_replace_status_params {
+ __u64 replace_state; /* out, see #define above */
+ __u64 progress_1000; /* out, 0 <= x <= 1000 */
+ __u64 time_started; /* out, seconds since 1-Jan-1970 */
+ __u64 time_stopped; /* out, seconds since 1-Jan-1970 */
+ __u64 num_write_errors; /* out */
+ __u64 num_uncorrectable_read_errors; /* out */
+};
+
+#define BTRFS_IOCTL_DEV_REPLACE_CMD_START 0
+#define BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS 1
+#define BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL 2
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR 0
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED 1
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED 2
+struct btrfs_ioctl_dev_replace_args {
+ __u64 cmd; /* in */
+ __u64 result; /* out */
+
+ union {
+ struct btrfs_ioctl_dev_replace_start_params start;
+ struct btrfs_ioctl_dev_replace_status_params status;
+ }; /* in/out */
+
+ __u64 spare[64];
+};
+
struct btrfs_ioctl_dev_info_args {
__u64 devid; /* in/out */
__u8 uuid[BTRFS_UUID_SIZE]; /* in/out */
@@ -453,4 +496,7 @@ struct btrfs_ioctl_send_args {
struct btrfs_ioctl_qgroup_limit_args)
#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
struct btrfs_ioctl_get_dev_stats)
+#define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
+ struct btrfs_ioctl_dev_replace_args)
+
#endif
diff --git a/fs/btrfs/math.h b/fs/btrfs/math.h
new file mode 100644
index 00000000000..b7816cefbd1
--- /dev/null
+++ b/fs/btrfs/math.h
@@ -0,0 +1,44 @@
+
+/*
+ * Copyright (C) 2012 Fujitsu. All rights reserved.
+ * Written by Miao Xie <miaox@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_MATH_H
+#define __BTRFS_MATH_H
+
+#include <asm/div64.h>
+
+static inline u64 div_factor(u64 num, int factor)
+{
+ if (factor == 10)
+ return num;
+ num *= factor;
+ do_div(num, 10);
+ return num;
+}
+
+static inline u64 div_factor_fine(u64 num, int factor)
+{
+ if (factor == 100)
+ return num;
+ num *= factor;
+ do_div(num, 100);
+ return num;
+}
+
+#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 7772f02ba28..f1073129704 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -211,6 +211,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
init_waitqueue_head(&entry->wait);
INIT_LIST_HEAD(&entry->list);
INIT_LIST_HEAD(&entry->root_extent_list);
+ INIT_LIST_HEAD(&entry->work_list);
+ init_completion(&entry->completion);
trace_btrfs_ordered_extent_add(inode, entry);
@@ -464,18 +466,28 @@ void btrfs_remove_ordered_extent(struct inode *inode,
wake_up(&entry->wait);
}
+static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
+{
+ struct btrfs_ordered_extent *ordered;
+
+ ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
+ btrfs_start_ordered_extent(ordered->inode, ordered, 1);
+ complete(&ordered->completion);
+}
+
/*
* wait for all the ordered extents in a root. This is done when balancing
* space between drives.
*/
void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
{
- struct list_head splice;
+ struct list_head splice, works;
struct list_head *cur;
- struct btrfs_ordered_extent *ordered;
+ struct btrfs_ordered_extent *ordered, *next;
struct inode *inode;
INIT_LIST_HEAD(&splice);
+ INIT_LIST_HEAD(&works);
spin_lock(&root->fs_info->ordered_extent_lock);
list_splice_init(&root->fs_info->ordered_extents, &splice);
@@ -494,19 +506,32 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
spin_unlock(&root->fs_info->ordered_extent_lock);
if (inode) {
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- if (delay_iput)
- btrfs_add_delayed_iput(inode);
- else
- iput(inode);
+ ordered->flush_work.func = btrfs_run_ordered_extent_work;
+ list_add_tail(&ordered->work_list, &works);
+ btrfs_queue_worker(&root->fs_info->flush_workers,
+ &ordered->flush_work);
} else {
btrfs_put_ordered_extent(ordered);
}
+ cond_resched();
spin_lock(&root->fs_info->ordered_extent_lock);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
+
+ list_for_each_entry_safe(ordered, next, &works, work_list) {
+ list_del_init(&ordered->work_list);
+ wait_for_completion(&ordered->completion);
+
+ inode = ordered->inode;
+ btrfs_put_ordered_extent(ordered);
+ if (delay_iput)
+ btrfs_add_delayed_iput(inode);
+ else
+ iput(inode);
+
+ cond_resched();
+ }
}
/*
@@ -519,13 +544,17 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
* extra check to make sure the ordered operation list really is empty
* before we return
*/
-void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
+int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
{
struct btrfs_inode *btrfs_inode;
struct inode *inode;
struct list_head splice;
+ struct list_head works;
+ struct btrfs_delalloc_work *work, *next;
+ int ret = 0;
INIT_LIST_HEAD(&splice);
+ INIT_LIST_HEAD(&works);
mutex_lock(&root->fs_info->ordered_operations_mutex);
spin_lock(&root->fs_info->ordered_extent_lock);
@@ -533,6 +562,7 @@ again:
list_splice_init(&root->fs_info->ordered_operations, &splice);
while (!list_empty(&splice)) {
+
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
@@ -549,15 +579,26 @@ again:
list_add_tail(&BTRFS_I(inode)->ordered_operations,
&root->fs_info->ordered_operations);
}
+
+ if (!inode)
+ continue;
spin_unlock(&root->fs_info->ordered_extent_lock);
- if (inode) {
- if (wait)
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
- else
- filemap_flush(inode->i_mapping);
- btrfs_add_delayed_iput(inode);
+ work = btrfs_alloc_delalloc_work(inode, wait, 1);
+ if (!work) {
+ if (list_empty(&BTRFS_I(inode)->ordered_operations))
+ list_add_tail(&btrfs_inode->ordered_operations,
+ &splice);
+ spin_lock(&root->fs_info->ordered_extent_lock);
+ list_splice_tail(&splice,
+ &root->fs_info->ordered_operations);
+ spin_unlock(&root->fs_info->ordered_extent_lock);
+ ret = -ENOMEM;
+ goto out;
}
+ list_add_tail(&work->list, &works);
+ btrfs_queue_worker(&root->fs_info->flush_workers,
+ &work->work);
cond_resched();
spin_lock(&root->fs_info->ordered_extent_lock);
@@ -566,7 +607,13 @@ again:
goto again;
spin_unlock(&root->fs_info->ordered_extent_lock);
+out:
+ list_for_each_entry_safe(work, next, &works, list) {
+ list_del_init(&work->list);
+ btrfs_wait_and_free_delalloc_work(work);
+ }
mutex_unlock(&root->fs_info->ordered_operations_mutex);
+ return ret;
}
/*
@@ -606,7 +653,6 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
u64 end;
u64 orig_end;
struct btrfs_ordered_extent *ordered;
- int found;
if (start + len < start) {
orig_end = INT_LIMIT(loff_t);
@@ -642,7 +688,6 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
filemap_fdatawait_range(inode->i_mapping, start, orig_end);
end = orig_end;
- found = 0;
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, end);
if (!ordered)
@@ -655,7 +700,6 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
btrfs_put_ordered_extent(ordered);
break;
}
- found++;
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
btrfs_put_ordered_extent(ordered);
@@ -934,15 +978,6 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
if (last_mod < root->fs_info->last_trans_committed)
return;
- /*
- * the transaction is already committing. Just start the IO and
- * don't bother with all of this list nonsense
- */
- if (trans && root->fs_info->running_transaction->blocked) {
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
- return;
- }
-
spin_lock(&root->fs_info->ordered_extent_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
list_add_tail(&BTRFS_I(inode)->ordered_operations,
@@ -959,6 +994,7 @@ int __init ordered_data_init(void)
NULL);
if (!btrfs_ordered_extent_cache)
return -ENOMEM;
+
return 0;
}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index dd27a0b46a3..f29d4bf5fbe 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -76,7 +76,7 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_IOERR 6 /* We had an io error when writing this out */
-#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates wether this ordered extent
+#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
* has done its due diligence in updating
* the isize. */
@@ -128,8 +128,11 @@ struct btrfs_ordered_extent {
struct list_head root_extent_list;
struct btrfs_work work;
-};
+ struct completion completion;
+ struct btrfs_work flush_work;
+ struct list_head work_list;
+};
/*
* calculates the total size you need to allocate for an ordered sum
@@ -186,7 +189,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-void btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
+int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 5e23684887e..50d95fd190a 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -297,6 +297,9 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
case BTRFS_DEV_STATS_KEY:
printk(KERN_INFO "\t\tdevice stats\n");
break;
+ case BTRFS_DEV_REPLACE_KEY:
+ printk(KERN_INFO "\t\tdev replace\n");
+ break;
};
}
}
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index a955669519a..96b93daa0bb 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -27,6 +27,7 @@
#include "volumes.h"
#include "disk-io.h"
#include "transaction.h"
+#include "dev-replace.h"
#undef DEBUG
@@ -323,7 +324,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
struct reada_extent *re = NULL;
struct reada_extent *re_exist = NULL;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct btrfs_bio *bbio = NULL;
struct btrfs_device *dev;
struct btrfs_device *prev_dev;
@@ -332,6 +332,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
int nzones = 0;
int i;
unsigned long index = logical >> PAGE_CACHE_SHIFT;
+ int dev_replace_is_ongoing;
spin_lock(&fs_info->reada_lock);
re = radix_tree_lookup(&fs_info->reada_tree, index);
@@ -358,7 +359,8 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
* map block
*/
length = blocksize;
- ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &bbio, 0);
+ ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
+ &bbio, 0);
if (ret || !bbio || length < blocksize)
goto error;
@@ -393,6 +395,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
}
/* insert extent in reada_tree + all per-device trees, all or nothing */
+ btrfs_dev_replace_lock(&fs_info->dev_replace);
spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&fs_info->reada_tree, index, re);
if (ret == -EEXIST) {
@@ -400,13 +403,17 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
BUG_ON(!re_exist);
re_exist->refcnt++;
spin_unlock(&fs_info->reada_lock);
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
goto error;
}
if (ret) {
spin_unlock(&fs_info->reada_lock);
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
goto error;
}
prev_dev = NULL;
+ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
+ &fs_info->dev_replace);
for (i = 0; i < nzones; ++i) {
dev = bbio->stripes[i].dev;
if (dev == prev_dev) {
@@ -419,21 +426,36 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
*/
continue;
}
+ if (!dev->bdev) {
+ /* cannot read ahead on missing device */
+ continue;
+ }
+ if (dev_replace_is_ongoing &&
+ dev == fs_info->dev_replace.tgtdev) {
+ /*
+ * as this device is selected for reading only as
+ * a last resort, skip it for read ahead.
+ */
+ continue;
+ }
prev_dev = dev;
ret = radix_tree_insert(&dev->reada_extents, index, re);
if (ret) {
while (--i >= 0) {
dev = bbio->stripes[i].dev;
BUG_ON(dev == NULL);
+ /* ignore whether the entry was inserted */
radix_tree_delete(&dev->reada_extents, index);
}
BUG_ON(fs_info == NULL);
radix_tree_delete(&fs_info->reada_tree, index);
spin_unlock(&fs_info->reada_lock);
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
goto error;
}
}
spin_unlock(&fs_info->reada_lock);
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
kfree(bbio);
return re;
@@ -915,7 +937,10 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
generation = btrfs_header_generation(node);
free_extent_buffer(node);
- reada_add_block(rc, start, &max_key, level, generation);
+ if (reada_add_block(rc, start, &max_key, level, generation)) {
+ kfree(rc);
+ return ERR_PTR(-ENOMEM);
+ }
reada_start_machine(root->fs_info);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 776f0aa128f..300e09ac365 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2025,7 +2025,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
struct btrfs_root_item *root_item;
struct btrfs_path *path;
struct extent_buffer *leaf;
- unsigned long nr;
int level;
int max_level;
int replaced = 0;
@@ -2074,7 +2073,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
BUG_ON(IS_ERR(trans));
trans->block_rsv = rc->block_rsv;
- ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved);
+ ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
+ BTRFS_RESERVE_FLUSH_ALL);
if (ret) {
BUG_ON(ret != -EAGAIN);
ret = btrfs_commit_transaction(trans, root);
@@ -2125,10 +2125,9 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
path->slots[level]);
root_item->drop_level = level;
- nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@@ -2155,10 +2154,9 @@ out:
btrfs_update_reloc_root(trans, root);
}
- nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@@ -2184,7 +2182,8 @@ int prepare_to_merge(struct reloc_control *rc, int err)
again:
if (!err) {
num_bytes = rc->merging_rsv_size;
- ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
+ ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
+ BTRFS_RESERVE_FLUSH_ALL);
if (ret)
err = ret;
}
@@ -2459,7 +2458,8 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
num_bytes = calcu_metadata_size(rc, node, 1) * 2;
trans->block_rsv = rc->block_rsv;
- ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
+ ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
+ BTRFS_RESERVE_FLUSH_ALL);
if (ret) {
if (ret == -EAGAIN)
rc->commit_transaction = 1;
@@ -3259,7 +3259,6 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
struct btrfs_path *path;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
- unsigned long nr;
int ret = 0;
if (inode)
@@ -3293,9 +3292,8 @@ truncate:
ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
btrfs_free_path(path);
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
out:
iput(inode);
return ret;
@@ -3685,7 +3683,8 @@ int prepare_to_relocate(struct reloc_control *rc)
* is no reservation in transaction handle.
*/
ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv,
- rc->extent_root->nodesize * 256);
+ rc->extent_root->nodesize * 256,
+ BTRFS_RESERVE_FLUSH_ALL);
if (ret)
return ret;
@@ -3711,7 +3710,6 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
struct btrfs_trans_handle *trans = NULL;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
- unsigned long nr;
u64 flags;
u32 item_size;
int ret;
@@ -3828,9 +3826,8 @@ restart:
ret = btrfs_commit_transaction(trans, rc->extent_root);
BUG_ON(ret);
} else {
- nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root, nr);
+ btrfs_btree_balance_dirty(rc->extent_root);
}
trans = NULL;
@@ -3860,9 +3857,8 @@ restart:
GFP_NOFS);
if (trans) {
- nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, rc->extent_root);
- btrfs_btree_balance_dirty(rc->extent_root, nr);
+ btrfs_btree_balance_dirty(rc->extent_root);
}
if (!err) {
@@ -3941,7 +3937,6 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans;
struct btrfs_root *root;
struct btrfs_key key;
- unsigned long nr;
u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
int err = 0;
@@ -3969,9 +3964,8 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
err = btrfs_orphan_add(trans, inode);
out:
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
+ btrfs_btree_balance_dirty(root);
if (err) {
if (inode)
iput(inode);
@@ -4057,7 +4051,11 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
(unsigned long long)rc->block_group->key.objectid,
(unsigned long long)rc->block_group->flags);
- btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
+ ret = btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
+ if (ret < 0) {
+ err = ret;
+ goto out;
+ }
btrfs_wait_ordered_extents(fs_info->tree_root, 0);
while (1) {
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index eb923d087da..668af537a3e 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -548,9 +548,9 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
struct btrfs_root_item *item = &root->root_item;
struct timespec ct = CURRENT_TIME;
- spin_lock(&root->root_times_lock);
+ spin_lock(&root->root_item_lock);
item->ctransid = cpu_to_le64(trans->transid);
item->ctime.sec = cpu_to_le64(ct.tv_sec);
item->ctime.nsec = cpu_to_le32(ct.tv_nsec);
- spin_unlock(&root->root_times_lock);
+ spin_unlock(&root->root_item_lock);
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 27892f67e69..bdbb94f245c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 STRATO. All rights reserved.
+ * Copyright (C) 2011, 2012 STRATO. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
@@ -25,6 +25,7 @@
#include "transaction.h"
#include "backref.h"
#include "extent_io.h"
+#include "dev-replace.h"
#include "check-integrity.h"
#include "rcu-string.h"
@@ -42,10 +43,23 @@
*/
struct scrub_block;
-struct scrub_dev;
+struct scrub_ctx;
-#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
-#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
+/*
+ * the following three values only influence the performance.
+ * The last one configures the number of parallel and outstanding I/O
+ * operations. The first two values configure an upper limit for the number
+ * of (dynamically allocated) pages that are added to a bio.
+ */
+#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
+#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
+#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
+
+/*
+ * the following value times PAGE_SIZE needs to be large enough to match the
+ * largest node/leaf/sector size that shall be supported.
+ * Values larger than BTRFS_STRIPE_LEN are not supported.
+ */
#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
struct scrub_page {
@@ -56,6 +70,8 @@ struct scrub_page {
u64 generation;
u64 logical;
u64 physical;
+ u64 physical_for_dev_replace;
+ atomic_t ref_count;
struct {
unsigned int mirror_num:8;
unsigned int have_csum:1;
@@ -66,23 +82,28 @@ struct scrub_page {
struct scrub_bio {
int index;
- struct scrub_dev *sdev;
+ struct scrub_ctx *sctx;
+ struct btrfs_device *dev;
struct bio *bio;
int err;
u64 logical;
u64 physical;
- struct scrub_page *pagev[SCRUB_PAGES_PER_BIO];
+#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
+ struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
+#else
+ struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
+#endif
int page_count;
int next_free;
struct btrfs_work work;
};
struct scrub_block {
- struct scrub_page pagev[SCRUB_MAX_PAGES_PER_BLOCK];
+ struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
int page_count;
atomic_t outstanding_pages;
atomic_t ref_count; /* free mem on transition to zero */
- struct scrub_dev *sdev;
+ struct scrub_ctx *sctx;
struct {
unsigned int header_error:1;
unsigned int checksum_error:1;
@@ -91,23 +112,35 @@ struct scrub_block {
};
};
-struct scrub_dev {
- struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
- struct btrfs_device *dev;
+struct scrub_wr_ctx {
+ struct scrub_bio *wr_curr_bio;
+ struct btrfs_device *tgtdev;
+ int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
+ atomic_t flush_all_writes;
+ struct mutex wr_lock;
+};
+
+struct scrub_ctx {
+ struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
+ struct btrfs_root *dev_root;
int first_free;
int curr;
- atomic_t in_flight;
- atomic_t fixup_cnt;
+ atomic_t bios_in_flight;
+ atomic_t workers_pending;
spinlock_t list_lock;
wait_queue_head_t list_wait;
u16 csum_size;
struct list_head csum_list;
atomic_t cancel_req;
int readonly;
- int pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
+ int pages_per_rd_bio;
u32 sectorsize;
u32 nodesize;
u32 leafsize;
+
+ int is_dev_replace;
+ struct scrub_wr_ctx wr_ctx;
+
/*
* statistics
*/
@@ -116,13 +149,23 @@ struct scrub_dev {
};
struct scrub_fixup_nodatasum {
- struct scrub_dev *sdev;
+ struct scrub_ctx *sctx;
+ struct btrfs_device *dev;
u64 logical;
struct btrfs_root *root;
struct btrfs_work work;
int mirror_num;
};
+struct scrub_copy_nocow_ctx {
+ struct scrub_ctx *sctx;
+ u64 logical;
+ u64 len;
+ int mirror_num;
+ u64 physical_for_dev_replace;
+ struct btrfs_work work;
+};
+
struct scrub_warning {
struct btrfs_path *path;
u64 extent_item_size;
@@ -137,15 +180,20 @@ struct scrub_warning {
};
+static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
+static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
+static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
+static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
-static int scrub_setup_recheck_block(struct scrub_dev *sdev,
- struct btrfs_mapping_tree *map_tree,
+static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
+ struct btrfs_fs_info *fs_info,
+ struct scrub_block *original_sblock,
u64 length, u64 logical,
- struct scrub_block *sblock);
-static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
- struct scrub_block *sblock, int is_metadata,
- int have_csum, u8 *csum, u64 generation,
- u16 csum_size);
+ struct scrub_block *sblocks_for_recheck);
+static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
+ struct scrub_block *sblock, int is_metadata,
+ int have_csum, u8 *csum, u64 generation,
+ u16 csum_size);
static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
struct scrub_block *sblock,
int is_metadata, int have_csum,
@@ -158,118 +206,221 @@ static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_block *sblock_good,
int page_num, int force_write);
+static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
+static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
+ int page_num);
static int scrub_checksum_data(struct scrub_block *sblock);
static int scrub_checksum_tree_block(struct scrub_block *sblock);
static int scrub_checksum_super(struct scrub_block *sblock);
static void scrub_block_get(struct scrub_block *sblock);
static void scrub_block_put(struct scrub_block *sblock);
-static int scrub_add_page_to_bio(struct scrub_dev *sdev,
- struct scrub_page *spage);
-static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
- u64 physical, u64 flags, u64 gen, int mirror_num,
- u8 *csum, int force);
+static void scrub_page_get(struct scrub_page *spage);
+static void scrub_page_put(struct scrub_page *spage);
+static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
+ struct scrub_page *spage);
+static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
+ u64 physical, struct btrfs_device *dev, u64 flags,
+ u64 gen, int mirror_num, u8 *csum, int force,
+ u64 physical_for_dev_replace);
static void scrub_bio_end_io(struct bio *bio, int err);
static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock);
+static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
+ u64 extent_logical, u64 extent_len,
+ u64 *extent_physical,
+ struct btrfs_device **extent_dev,
+ int *extent_mirror_num);
+static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
+ struct scrub_wr_ctx *wr_ctx,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_device *dev,
+ int is_dev_replace);
+static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
+static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
+ struct scrub_page *spage);
+static void scrub_wr_submit(struct scrub_ctx *sctx);
+static void scrub_wr_bio_end_io(struct bio *bio, int err);
+static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
+static int write_page_nocow(struct scrub_ctx *sctx,
+ u64 physical_for_dev_replace, struct page *page);
+static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
+ void *ctx);
+static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
+ int mirror_num, u64 physical_for_dev_replace);
+static void copy_nocow_pages_worker(struct btrfs_work *work);
+
+
+static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
+{
+ atomic_inc(&sctx->bios_in_flight);
+}
+
+static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
+{
+ atomic_dec(&sctx->bios_in_flight);
+ wake_up(&sctx->list_wait);
+}
+
+/*
+ * used for workers that require transaction commits (i.e., for the
+ * NOCOW case)
+ */
+static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
+{
+ struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+
+ /*
+ * increment scrubs_running to prevent cancel requests from
+ * completing as long as a worker is running. we must also
+ * increment scrubs_paused to prevent deadlocking on pause
+ * requests used for transactions commits (as the worker uses a
+ * transaction context). it is safe to regard the worker
+ * as paused for all matters practical. effectively, we only
+ * avoid cancellation requests from completing.
+ */
+ mutex_lock(&fs_info->scrub_lock);
+ atomic_inc(&fs_info->scrubs_running);
+ atomic_inc(&fs_info->scrubs_paused);
+ mutex_unlock(&fs_info->scrub_lock);
+ atomic_inc(&sctx->workers_pending);
+}
+/* used for workers that require transaction commits */
+static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
+{
+ struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
-static void scrub_free_csums(struct scrub_dev *sdev)
+ /*
+ * see scrub_pending_trans_workers_inc() why we're pretending
+ * to be paused in the scrub counters
+ */
+ mutex_lock(&fs_info->scrub_lock);
+ atomic_dec(&fs_info->scrubs_running);
+ atomic_dec(&fs_info->scrubs_paused);
+ mutex_unlock(&fs_info->scrub_lock);
+ atomic_dec(&sctx->workers_pending);
+ wake_up(&fs_info->scrub_pause_wait);
+ wake_up(&sctx->list_wait);
+}
+
+static void scrub_free_csums(struct scrub_ctx *sctx)
{
- while (!list_empty(&sdev->csum_list)) {
+ while (!list_empty(&sctx->csum_list)) {
struct btrfs_ordered_sum *sum;
- sum = list_first_entry(&sdev->csum_list,
+ sum = list_first_entry(&sctx->csum_list,
struct btrfs_ordered_sum, list);
list_del(&sum->list);
kfree(sum);
}
}
-static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
+static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
{
int i;
- if (!sdev)
+ if (!sctx)
return;
+ scrub_free_wr_ctx(&sctx->wr_ctx);
+
/* this can happen when scrub is cancelled */
- if (sdev->curr != -1) {
- struct scrub_bio *sbio = sdev->bios[sdev->curr];
+ if (sctx->curr != -1) {
+ struct scrub_bio *sbio = sctx->bios[sctx->curr];
for (i = 0; i < sbio->page_count; i++) {
- BUG_ON(!sbio->pagev[i]);
- BUG_ON(!sbio->pagev[i]->page);
+ WARN_ON(!sbio->pagev[i]->page);
scrub_block_put(sbio->pagev[i]->sblock);
}
bio_put(sbio->bio);
}
- for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
- struct scrub_bio *sbio = sdev->bios[i];
+ for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
+ struct scrub_bio *sbio = sctx->bios[i];
if (!sbio)
break;
kfree(sbio);
}
- scrub_free_csums(sdev);
- kfree(sdev);
+ scrub_free_csums(sctx);
+ kfree(sctx);
}
static noinline_for_stack
-struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
+struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
{
- struct scrub_dev *sdev;
+ struct scrub_ctx *sctx;
int i;
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
- int pages_per_bio;
+ int pages_per_rd_bio;
+ int ret;
- pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
- bio_get_nr_vecs(dev->bdev));
- sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
- if (!sdev)
+ /*
+ * the setting of pages_per_rd_bio is correct for scrub but might
+ * be wrong for the dev_replace code where we might read from
+ * different devices in the initial huge bios. However, that
+ * code is able to correctly handle the case when adding a page
+ * to a bio fails.
+ */
+ if (dev->bdev)
+ pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
+ bio_get_nr_vecs(dev->bdev));
+ else
+ pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
+ sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
+ if (!sctx)
goto nomem;
- sdev->dev = dev;
- sdev->pages_per_bio = pages_per_bio;
- sdev->curr = -1;
- for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
+ sctx->is_dev_replace = is_dev_replace;
+ sctx->pages_per_rd_bio = pages_per_rd_bio;
+ sctx->curr = -1;
+ sctx->dev_root = dev->dev_root;
+ for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
struct scrub_bio *sbio;
sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
if (!sbio)
goto nomem;
- sdev->bios[i] = sbio;
+ sctx->bios[i] = sbio;
sbio->index = i;
- sbio->sdev = sdev;
+ sbio->sctx = sctx;
sbio->page_count = 0;
sbio->work.func = scrub_bio_end_io_worker;
- if (i != SCRUB_BIOS_PER_DEV-1)
- sdev->bios[i]->next_free = i + 1;
+ if (i != SCRUB_BIOS_PER_SCTX - 1)
+ sctx->bios[i]->next_free = i + 1;
else
- sdev->bios[i]->next_free = -1;
- }
- sdev->first_free = 0;
- sdev->nodesize = dev->dev_root->nodesize;
- sdev->leafsize = dev->dev_root->leafsize;
- sdev->sectorsize = dev->dev_root->sectorsize;
- atomic_set(&sdev->in_flight, 0);
- atomic_set(&sdev->fixup_cnt, 0);
- atomic_set(&sdev->cancel_req, 0);
- sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
- INIT_LIST_HEAD(&sdev->csum_list);
-
- spin_lock_init(&sdev->list_lock);
- spin_lock_init(&sdev->stat_lock);
- init_waitqueue_head(&sdev->list_wait);
- return sdev;
+ sctx->bios[i]->next_free = -1;
+ }
+ sctx->first_free = 0;
+ sctx->nodesize = dev->dev_root->nodesize;
+ sctx->leafsize = dev->dev_root->leafsize;
+ sctx->sectorsize = dev->dev_root->sectorsize;
+ atomic_set(&sctx->bios_in_flight, 0);
+ atomic_set(&sctx->workers_pending, 0);
+ atomic_set(&sctx->cancel_req, 0);
+ sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ INIT_LIST_HEAD(&sctx->csum_list);
+
+ spin_lock_init(&sctx->list_lock);
+ spin_lock_init(&sctx->stat_lock);
+ init_waitqueue_head(&sctx->list_wait);
+
+ ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
+ fs_info->dev_replace.tgtdev, is_dev_replace);
+ if (ret) {
+ scrub_free_ctx(sctx);
+ return ERR_PTR(ret);
+ }
+ return sctx;
nomem:
- scrub_free_dev(sdev);
+ scrub_free_ctx(sctx);
return ERR_PTR(-ENOMEM);
}
-static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
+static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
+ void *warn_ctx)
{
u64 isize;
u32 nlink;
@@ -277,7 +428,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
int i;
struct extent_buffer *eb;
struct btrfs_inode_item *inode_item;
- struct scrub_warning *swarn = ctx;
+ struct scrub_warning *swarn = warn_ctx;
struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
struct inode_fs_paths *ipath = NULL;
struct btrfs_root *local_root;
@@ -345,8 +496,8 @@ err:
static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
{
- struct btrfs_device *dev = sblock->sdev->dev;
- struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+ struct btrfs_device *dev;
+ struct btrfs_fs_info *fs_info;
struct btrfs_path *path;
struct btrfs_key found_key;
struct extent_buffer *eb;
@@ -361,15 +512,18 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
const int bufsize = 4096;
int ret;
+ WARN_ON(sblock->page_count < 1);
+ dev = sblock->pagev[0]->dev;
+ fs_info = sblock->sctx->dev_root->fs_info;
+
path = btrfs_alloc_path();
swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
- BUG_ON(sblock->page_count < 1);
- swarn.sector = (sblock->pagev[0].physical) >> 9;
- swarn.logical = sblock->pagev[0].logical;
+ swarn.sector = (sblock->pagev[0]->physical) >> 9;
+ swarn.logical = sblock->pagev[0]->logical;
swarn.errstr = errstr;
- swarn.dev = dev;
+ swarn.dev = NULL;
swarn.msg_bufsize = bufsize;
swarn.scratch_bufsize = bufsize;
@@ -405,6 +559,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
} while (ret != 1);
} else {
swarn.path = path;
+ swarn.dev = dev;
iterate_extent_inodes(fs_info, found_key.objectid,
extent_item_pos, 1,
scrub_print_warning_inode, &swarn);
@@ -416,11 +571,11 @@ out:
kfree(swarn.msg_buf);
}
-static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
+static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
{
struct page *page = NULL;
unsigned long index;
- struct scrub_fixup_nodatasum *fixup = ctx;
+ struct scrub_fixup_nodatasum *fixup = fixup_ctx;
int ret;
int corrected = 0;
struct btrfs_key key;
@@ -451,7 +606,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
}
if (PageUptodate(page)) {
- struct btrfs_mapping_tree *map_tree;
+ struct btrfs_fs_info *fs_info;
if (PageDirty(page)) {
/*
* we need to write the data to the defect sector. the
@@ -472,8 +627,8 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
ret = -EIO;
goto out;
}
- map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
- ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
+ fs_info = BTRFS_I(inode)->root->fs_info;
+ ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
fixup->logical, page,
fixup->mirror_num);
unlock_page(page);
@@ -530,21 +685,21 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
{
int ret;
struct scrub_fixup_nodatasum *fixup;
- struct scrub_dev *sdev;
+ struct scrub_ctx *sctx;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_fs_info *fs_info;
struct btrfs_path *path;
int uncorrectable = 0;
fixup = container_of(work, struct scrub_fixup_nodatasum, work);
- sdev = fixup->sdev;
+ sctx = fixup->sctx;
fs_info = fixup->root->fs_info;
path = btrfs_alloc_path();
if (!path) {
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.malloc_errors;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ ++sctx->stat.malloc_errors;
+ spin_unlock(&sctx->stat_lock);
uncorrectable = 1;
goto out;
}
@@ -573,35 +728,30 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
}
WARN_ON(ret != 1);
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.corrected_errors;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ ++sctx->stat.corrected_errors;
+ spin_unlock(&sctx->stat_lock);
out:
if (trans && !IS_ERR(trans))
btrfs_end_transaction(trans, fixup->root);
if (uncorrectable) {
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.uncorrectable_errors;
- spin_unlock(&sdev->stat_lock);
-
+ spin_lock(&sctx->stat_lock);
+ ++sctx->stat.uncorrectable_errors;
+ spin_unlock(&sctx->stat_lock);
+ btrfs_dev_replace_stats_inc(
+ &sctx->dev_root->fs_info->dev_replace.
+ num_uncorrectable_read_errors);
printk_ratelimited_in_rcu(KERN_ERR
"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
(unsigned long long)fixup->logical,
- rcu_str_deref(sdev->dev->name));
+ rcu_str_deref(fixup->dev->name));
}
btrfs_free_path(path);
kfree(fixup);
- /* see caller why we're pretending to be paused in the scrub counters */
- mutex_lock(&fs_info->scrub_lock);
- atomic_dec(&fs_info->scrubs_running);
- atomic_dec(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
- atomic_dec(&sdev->fixup_cnt);
- wake_up(&fs_info->scrub_pause_wait);
- wake_up(&sdev->list_wait);
+ scrub_pending_trans_workers_dec(sctx);
}
/*
@@ -614,7 +764,8 @@ out:
*/
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
{
- struct scrub_dev *sdev = sblock_to_check->sdev;
+ struct scrub_ctx *sctx = sblock_to_check->sctx;
+ struct btrfs_device *dev;
struct btrfs_fs_info *fs_info;
u64 length;
u64 logical;
@@ -633,16 +784,33 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
DEFAULT_RATELIMIT_BURST);
BUG_ON(sblock_to_check->page_count < 1);
- fs_info = sdev->dev->dev_root->fs_info;
+ fs_info = sctx->dev_root->fs_info;
+ if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
+ /*
+ * if we find an error in a super block, we just report it.
+ * They will get written with the next transaction commit
+ * anyway
+ */
+ spin_lock(&sctx->stat_lock);
+ ++sctx->stat.super_errors;
+ spin_unlock(&sctx->stat_lock);
+ return 0;
+ }
length = sblock_to_check->page_count * PAGE_SIZE;
- logical = sblock_to_check->pagev[0].logical;
- generation = sblock_to_check->pagev[0].generation;
- BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
- failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
- is_metadata = !(sblock_to_check->pagev[0].flags &
+ logical = sblock_to_check->pagev[0]->logical;
+ generation = sblock_to_check->pagev[0]->generation;
+ BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
+ failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
+ is_metadata = !(sblock_to_check->pagev[0]->flags &
BTRFS_EXTENT_FLAG_DATA);
- have_csum = sblock_to_check->pagev[0].have_csum;
- csum = sblock_to_check->pagev[0].csum;
+ have_csum = sblock_to_check->pagev[0]->have_csum;
+ csum = sblock_to_check->pagev[0]->csum;
+ dev = sblock_to_check->pagev[0]->dev;
+
+ if (sctx->is_dev_replace && !is_metadata && !have_csum) {
+ sblocks_for_recheck = NULL;
+ goto nodatasum_case;
+ }
/*
* read all mirrors one after the other. This includes to
@@ -677,43 +845,32 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
sizeof(*sblocks_for_recheck),
GFP_NOFS);
if (!sblocks_for_recheck) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.malloc_errors++;
- sdev->stat.read_errors++;
- sdev->stat.uncorrectable_errors++;
- spin_unlock(&sdev->stat_lock);
- btrfs_dev_stat_inc_and_print(sdev->dev,
- BTRFS_DEV_STAT_READ_ERRS);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ sctx->stat.read_errors++;
+ sctx->stat.uncorrectable_errors++;
+ spin_unlock(&sctx->stat_lock);
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
goto out;
}
/* setup the context, map the logical blocks and alloc the pages */
- ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
+ ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
logical, sblocks_for_recheck);
if (ret) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.read_errors++;
- sdev->stat.uncorrectable_errors++;
- spin_unlock(&sdev->stat_lock);
- btrfs_dev_stat_inc_and_print(sdev->dev,
- BTRFS_DEV_STAT_READ_ERRS);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.read_errors++;
+ sctx->stat.uncorrectable_errors++;
+ spin_unlock(&sctx->stat_lock);
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
goto out;
}
BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
sblock_bad = sblocks_for_recheck + failed_mirror_index;
/* build and submit the bios for the failed mirror, check checksums */
- ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
- csum, generation, sdev->csum_size);
- if (ret) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.read_errors++;
- sdev->stat.uncorrectable_errors++;
- spin_unlock(&sdev->stat_lock);
- btrfs_dev_stat_inc_and_print(sdev->dev,
- BTRFS_DEV_STAT_READ_ERRS);
- goto out;
- }
+ scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
+ csum, generation, sctx->csum_size);
if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
sblock_bad->no_io_error_seen) {
@@ -725,50 +882,54 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
* different bio (usually one of the two latter cases is
* the cause)
*/
- spin_lock(&sdev->stat_lock);
- sdev->stat.unverified_errors++;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.unverified_errors++;
+ spin_unlock(&sctx->stat_lock);
+ if (sctx->is_dev_replace)
+ scrub_write_block_to_dev_replace(sblock_bad);
goto out;
}
if (!sblock_bad->no_io_error_seen) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.read_errors++;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.read_errors++;
+ spin_unlock(&sctx->stat_lock);
if (__ratelimit(&_rs))
scrub_print_warning("i/o error", sblock_to_check);
- btrfs_dev_stat_inc_and_print(sdev->dev,
- BTRFS_DEV_STAT_READ_ERRS);
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
} else if (sblock_bad->checksum_error) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.csum_errors++;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.csum_errors++;
+ spin_unlock(&sctx->stat_lock);
if (__ratelimit(&_rs))
scrub_print_warning("checksum error", sblock_to_check);
- btrfs_dev_stat_inc_and_print(sdev->dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
} else if (sblock_bad->header_error) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.verify_errors++;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.verify_errors++;
+ spin_unlock(&sctx->stat_lock);
if (__ratelimit(&_rs))
scrub_print_warning("checksum/header error",
sblock_to_check);
if (sblock_bad->generation_error)
- btrfs_dev_stat_inc_and_print(sdev->dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_GENERATION_ERRS);
else
- btrfs_dev_stat_inc_and_print(sdev->dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
}
- if (sdev->readonly)
+ if (sctx->readonly && !sctx->is_dev_replace)
goto did_not_correct_error;
if (!is_metadata && !have_csum) {
struct scrub_fixup_nodatasum *fixup_nodatasum;
+nodatasum_case:
+ WARN_ON(sctx->is_dev_replace);
+
/*
* !is_metadata and !have_csum, this means that the data
* might not be COW'ed, that it might be modified
@@ -779,24 +940,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
if (!fixup_nodatasum)
goto did_not_correct_error;
- fixup_nodatasum->sdev = sdev;
+ fixup_nodatasum->sctx = sctx;
+ fixup_nodatasum->dev = dev;
fixup_nodatasum->logical = logical;
fixup_nodatasum->root = fs_info->extent_root;
fixup_nodatasum->mirror_num = failed_mirror_index + 1;
- /*
- * increment scrubs_running to prevent cancel requests from
- * completing as long as a fixup worker is running. we must also
- * increment scrubs_paused to prevent deadlocking on pause
- * requests used for transactions commits (as the worker uses a
- * transaction context). it is safe to regard the fixup worker
- * as paused for all matters practical. effectively, we only
- * avoid cancellation requests from completing.
- */
- mutex_lock(&fs_info->scrub_lock);
- atomic_inc(&fs_info->scrubs_running);
- atomic_inc(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
- atomic_inc(&sdev->fixup_cnt);
+ scrub_pending_trans_workers_inc(sctx);
fixup_nodatasum->work.func = scrub_fixup_nodatasum;
btrfs_queue_worker(&fs_info->scrub_workers,
&fixup_nodatasum->work);
@@ -805,26 +954,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
/*
* now build and submit the bios for the other mirrors, check
- * checksums
- */
- for (mirror_index = 0;
- mirror_index < BTRFS_MAX_MIRRORS &&
- sblocks_for_recheck[mirror_index].page_count > 0;
- mirror_index++) {
- if (mirror_index == failed_mirror_index)
- continue;
-
- /* build and submit the bios, check checksums */
- ret = scrub_recheck_block(fs_info,
- sblocks_for_recheck + mirror_index,
- is_metadata, have_csum, csum,
- generation, sdev->csum_size);
- if (ret)
- goto did_not_correct_error;
- }
-
- /*
- * first try to pick the mirror which is completely without I/O
+ * checksums.
+ * First try to pick the mirror which is completely without I/O
* errors and also does not have a checksum error.
* If one is found, and if a checksum is present, the full block
* that is known to contain an error is rewritten. Afterwards
@@ -840,24 +971,93 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
mirror_index < BTRFS_MAX_MIRRORS &&
sblocks_for_recheck[mirror_index].page_count > 0;
mirror_index++) {
- struct scrub_block *sblock_other = sblocks_for_recheck +
- mirror_index;
+ struct scrub_block *sblock_other;
+
+ if (mirror_index == failed_mirror_index)
+ continue;
+ sblock_other = sblocks_for_recheck + mirror_index;
+
+ /* build and submit the bios, check checksums */
+ scrub_recheck_block(fs_info, sblock_other, is_metadata,
+ have_csum, csum, generation,
+ sctx->csum_size);
if (!sblock_other->header_error &&
!sblock_other->checksum_error &&
sblock_other->no_io_error_seen) {
- int force_write = is_metadata || have_csum;
-
- ret = scrub_repair_block_from_good_copy(sblock_bad,
- sblock_other,
- force_write);
+ if (sctx->is_dev_replace) {
+ scrub_write_block_to_dev_replace(sblock_other);
+ } else {
+ int force_write = is_metadata || have_csum;
+
+ ret = scrub_repair_block_from_good_copy(
+ sblock_bad, sblock_other,
+ force_write);
+ }
if (0 == ret)
goto corrected_error;
}
}
/*
- * in case of I/O errors in the area that is supposed to be
+ * for dev_replace, pick good pages and write to the target device.
+ */
+ if (sctx->is_dev_replace) {
+ success = 1;
+ for (page_num = 0; page_num < sblock_bad->page_count;
+ page_num++) {
+ int sub_success;
+
+ sub_success = 0;
+ for (mirror_index = 0;
+ mirror_index < BTRFS_MAX_MIRRORS &&
+ sblocks_for_recheck[mirror_index].page_count > 0;
+ mirror_index++) {
+ struct scrub_block *sblock_other =
+ sblocks_for_recheck + mirror_index;
+ struct scrub_page *page_other =
+ sblock_other->pagev[page_num];
+
+ if (!page_other->io_error) {
+ ret = scrub_write_page_to_dev_replace(
+ sblock_other, page_num);
+ if (ret == 0) {
+ /* succeeded for this page */
+ sub_success = 1;
+ break;
+ } else {
+ btrfs_dev_replace_stats_inc(
+ &sctx->dev_root->
+ fs_info->dev_replace.
+ num_write_errors);
+ }
+ }
+ }
+
+ if (!sub_success) {
+ /*
+ * did not find a mirror to fetch the page
+ * from. scrub_write_page_to_dev_replace()
+ * handles this case (page->io_error), by
+ * filling the block with zeros before
+ * submitting the write request
+ */
+ success = 0;
+ ret = scrub_write_page_to_dev_replace(
+ sblock_bad, page_num);
+ if (ret)
+ btrfs_dev_replace_stats_inc(
+ &sctx->dev_root->fs_info->
+ dev_replace.num_write_errors);
+ }
+ }
+
+ goto out;
+ }
+
+ /*
+ * for regular scrub, repair those pages that are errored.
+ * In case of I/O errors in the area that is supposed to be
* repaired, continue by picking good copies of those pages.
* Select the good pages from mirrors to rewrite bad pages from
* the area to fix. Afterwards verify the checksum of the block
@@ -887,7 +1087,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
success = 1;
for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
- struct scrub_page *page_bad = sblock_bad->pagev + page_num;
+ struct scrub_page *page_bad = sblock_bad->pagev[page_num];
if (!page_bad->io_error)
continue;
@@ -898,8 +1098,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
mirror_index++) {
struct scrub_block *sblock_other = sblocks_for_recheck +
mirror_index;
- struct scrub_page *page_other = sblock_other->pagev +
- page_num;
+ struct scrub_page *page_other = sblock_other->pagev[
+ page_num];
if (!page_other->io_error) {
ret = scrub_repair_page_from_good_copy(
@@ -928,10 +1128,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
* is verified, but most likely the data comes out
* of the page cache.
*/
- ret = scrub_recheck_block(fs_info, sblock_bad,
- is_metadata, have_csum, csum,
- generation, sdev->csum_size);
- if (!ret && !sblock_bad->header_error &&
+ scrub_recheck_block(fs_info, sblock_bad,
+ is_metadata, have_csum, csum,
+ generation, sctx->csum_size);
+ if (!sblock_bad->header_error &&
!sblock_bad->checksum_error &&
sblock_bad->no_io_error_seen)
goto corrected_error;
@@ -939,23 +1139,23 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
goto did_not_correct_error;
} else {
corrected_error:
- spin_lock(&sdev->stat_lock);
- sdev->stat.corrected_errors++;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.corrected_errors++;
+ spin_unlock(&sctx->stat_lock);
printk_ratelimited_in_rcu(KERN_ERR
"btrfs: fixed up error at logical %llu on dev %s\n",
(unsigned long long)logical,
- rcu_str_deref(sdev->dev->name));
+ rcu_str_deref(dev->name));
}
} else {
did_not_correct_error:
- spin_lock(&sdev->stat_lock);
- sdev->stat.uncorrectable_errors++;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.uncorrectable_errors++;
+ spin_unlock(&sctx->stat_lock);
printk_ratelimited_in_rcu(KERN_ERR
"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
(unsigned long long)logical,
- rcu_str_deref(sdev->dev->name));
+ rcu_str_deref(dev->name));
}
out:
@@ -966,11 +1166,11 @@ out:
mirror_index;
int page_index;
- for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
- page_index++)
- if (sblock->pagev[page_index].page)
- __free_page(
- sblock->pagev[page_index].page);
+ for (page_index = 0; page_index < sblock->page_count;
+ page_index++) {
+ sblock->pagev[page_index]->sblock = NULL;
+ scrub_page_put(sblock->pagev[page_index]);
+ }
}
kfree(sblocks_for_recheck);
}
@@ -978,8 +1178,9 @@ out:
return 0;
}
-static int scrub_setup_recheck_block(struct scrub_dev *sdev,
- struct btrfs_mapping_tree *map_tree,
+static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
+ struct btrfs_fs_info *fs_info,
+ struct scrub_block *original_sblock,
u64 length, u64 logical,
struct scrub_block *sblocks_for_recheck)
{
@@ -988,7 +1189,7 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
int ret;
/*
- * note: the three members sdev, ref_count and outstanding_pages
+ * note: the two members ref_count and outstanding_pages
* are not used (and not set) in the blocks that are used for
* the recheck procedure
*/
@@ -1003,14 +1204,14 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
* with a length of PAGE_SIZE, each returned stripe
* represents one mirror
*/
- ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
- &bbio, 0);
+ ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical,
+ &mapped_length, &bbio, 0);
if (ret || !bbio || mapped_length < sublen) {
kfree(bbio);
return -EIO;
}
- BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
+ BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
mirror_index++) {
struct scrub_block *sblock;
@@ -1020,21 +1221,31 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
continue;
sblock = sblocks_for_recheck + mirror_index;
- page = sblock->pagev + page_index;
+ sblock->sctx = sctx;
+ page = kzalloc(sizeof(*page), GFP_NOFS);
+ if (!page) {
+leave_nomem:
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ kfree(bbio);
+ return -ENOMEM;
+ }
+ scrub_page_get(page);
+ sblock->pagev[page_index] = page;
page->logical = logical;
page->physical = bbio->stripes[mirror_index].physical;
+ BUG_ON(page_index >= original_sblock->page_count);
+ page->physical_for_dev_replace =
+ original_sblock->pagev[page_index]->
+ physical_for_dev_replace;
/* for missing devices, dev->bdev is NULL */
page->dev = bbio->stripes[mirror_index].dev;
page->mirror_num = mirror_index + 1;
- page->page = alloc_page(GFP_NOFS);
- if (!page->page) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.malloc_errors++;
- spin_unlock(&sdev->stat_lock);
- kfree(bbio);
- return -ENOMEM;
- }
sblock->page_count++;
+ page->page = alloc_page(GFP_NOFS);
+ if (!page->page)
+ goto leave_nomem;
}
kfree(bbio);
length -= sublen;
@@ -1052,10 +1263,10 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
* to take those pages that are not errored from all the mirrors so that
* the pages that are errored in the just handled mirror can be repaired.
*/
-static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
- struct scrub_block *sblock, int is_metadata,
- int have_csum, u8 *csum, u64 generation,
- u16 csum_size)
+static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
+ struct scrub_block *sblock, int is_metadata,
+ int have_csum, u8 *csum, u64 generation,
+ u16 csum_size)
{
int page_num;
@@ -1065,8 +1276,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
for (page_num = 0; page_num < sblock->page_count; page_num++) {
struct bio *bio;
- int ret;
- struct scrub_page *page = sblock->pagev + page_num;
+ struct scrub_page *page = sblock->pagev[page_num];
DECLARE_COMPLETION_ONSTACK(complete);
if (page->dev->bdev == NULL) {
@@ -1075,20 +1285,19 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
continue;
}
- BUG_ON(!page->page);
+ WARN_ON(!page->page);
bio = bio_alloc(GFP_NOFS, 1);
- if (!bio)
- return -EIO;
+ if (!bio) {
+ page->io_error = 1;
+ sblock->no_io_error_seen = 0;
+ continue;
+ }
bio->bi_bdev = page->dev->bdev;
bio->bi_sector = page->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
bio->bi_private = &complete;
- ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
- if (PAGE_SIZE != ret) {
- bio_put(bio);
- return -EIO;
- }
+ bio_add_page(bio, page->page, PAGE_SIZE, 0);
btrfsic_submit_bio(READ, bio);
/* this will also unplug the queue */
@@ -1105,7 +1314,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
have_csum, csum, generation,
csum_size);
- return 0;
+ return;
}
static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
@@ -1120,14 +1329,14 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
struct btrfs_root *root = fs_info->extent_root;
void *mapped_buffer;
- BUG_ON(!sblock->pagev[0].page);
+ WARN_ON(!sblock->pagev[0]->page);
if (is_metadata) {
struct btrfs_header *h;
- mapped_buffer = kmap_atomic(sblock->pagev[0].page);
+ mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
h = (struct btrfs_header *)mapped_buffer;
- if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
+ if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr) ||
memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
BTRFS_UUID_SIZE)) {
@@ -1141,7 +1350,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
if (!have_csum)
return;
- mapped_buffer = kmap_atomic(sblock->pagev[0].page);
+ mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
}
for (page_num = 0;;) {
@@ -1157,9 +1366,9 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
page_num++;
if (page_num >= sblock->page_count)
break;
- BUG_ON(!sblock->pagev[page_num].page);
+ WARN_ON(!sblock->pagev[page_num]->page);
- mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
+ mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
}
btrfs_csum_final(crc, calculated_csum);
@@ -1197,17 +1406,23 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_block *sblock_good,
int page_num, int force_write)
{
- struct scrub_page *page_bad = sblock_bad->pagev + page_num;
- struct scrub_page *page_good = sblock_good->pagev + page_num;
+ struct scrub_page *page_bad = sblock_bad->pagev[page_num];
+ struct scrub_page *page_good = sblock_good->pagev[page_num];
- BUG_ON(sblock_bad->pagev[page_num].page == NULL);
- BUG_ON(sblock_good->pagev[page_num].page == NULL);
+ BUG_ON(page_bad->page == NULL);
+ BUG_ON(page_good->page == NULL);
if (force_write || sblock_bad->header_error ||
sblock_bad->checksum_error || page_bad->io_error) {
struct bio *bio;
int ret;
DECLARE_COMPLETION_ONSTACK(complete);
+ if (!page_bad->dev->bdev) {
+ printk_ratelimited(KERN_WARNING
+ "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
+ return -EIO;
+ }
+
bio = bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
@@ -1228,6 +1443,9 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
if (!bio_flagged(bio, BIO_UPTODATE)) {
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
+ btrfs_dev_replace_stats_inc(
+ &sblock_bad->sctx->dev_root->fs_info->
+ dev_replace.num_write_errors);
bio_put(bio);
return -EIO;
}
@@ -1237,13 +1455,174 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
return 0;
}
-static void scrub_checksum(struct scrub_block *sblock)
+static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
+{
+ int page_num;
+
+ for (page_num = 0; page_num < sblock->page_count; page_num++) {
+ int ret;
+
+ ret = scrub_write_page_to_dev_replace(sblock, page_num);
+ if (ret)
+ btrfs_dev_replace_stats_inc(
+ &sblock->sctx->dev_root->fs_info->dev_replace.
+ num_write_errors);
+ }
+}
+
+static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
+ int page_num)
+{
+ struct scrub_page *spage = sblock->pagev[page_num];
+
+ BUG_ON(spage->page == NULL);
+ if (spage->io_error) {
+ void *mapped_buffer = kmap_atomic(spage->page);
+
+ memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
+ flush_dcache_page(spage->page);
+ kunmap_atomic(mapped_buffer);
+ }
+ return scrub_add_page_to_wr_bio(sblock->sctx, spage);
+}
+
+static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
+ struct scrub_page *spage)
+{
+ struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
+ struct scrub_bio *sbio;
+ int ret;
+
+ mutex_lock(&wr_ctx->wr_lock);
+again:
+ if (!wr_ctx->wr_curr_bio) {
+ wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
+ GFP_NOFS);
+ if (!wr_ctx->wr_curr_bio) {
+ mutex_unlock(&wr_ctx->wr_lock);
+ return -ENOMEM;
+ }
+ wr_ctx->wr_curr_bio->sctx = sctx;
+ wr_ctx->wr_curr_bio->page_count = 0;
+ }
+ sbio = wr_ctx->wr_curr_bio;
+ if (sbio->page_count == 0) {
+ struct bio *bio;
+
+ sbio->physical = spage->physical_for_dev_replace;
+ sbio->logical = spage->logical;
+ sbio->dev = wr_ctx->tgtdev;
+ bio = sbio->bio;
+ if (!bio) {
+ bio = bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
+ if (!bio) {
+ mutex_unlock(&wr_ctx->wr_lock);
+ return -ENOMEM;
+ }
+ sbio->bio = bio;
+ }
+
+ bio->bi_private = sbio;
+ bio->bi_end_io = scrub_wr_bio_end_io;
+ bio->bi_bdev = sbio->dev->bdev;
+ bio->bi_sector = sbio->physical >> 9;
+ sbio->err = 0;
+ } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
+ spage->physical_for_dev_replace ||
+ sbio->logical + sbio->page_count * PAGE_SIZE !=
+ spage->logical) {
+ scrub_wr_submit(sctx);
+ goto again;
+ }
+
+ ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
+ if (ret != PAGE_SIZE) {
+ if (sbio->page_count < 1) {
+ bio_put(sbio->bio);
+ sbio->bio = NULL;
+ mutex_unlock(&wr_ctx->wr_lock);
+ return -EIO;
+ }
+ scrub_wr_submit(sctx);
+ goto again;
+ }
+
+ sbio->pagev[sbio->page_count] = spage;
+ scrub_page_get(spage);
+ sbio->page_count++;
+ if (sbio->page_count == wr_ctx->pages_per_wr_bio)
+ scrub_wr_submit(sctx);
+ mutex_unlock(&wr_ctx->wr_lock);
+
+ return 0;
+}
+
+static void scrub_wr_submit(struct scrub_ctx *sctx)
+{
+ struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
+ struct scrub_bio *sbio;
+
+ if (!wr_ctx->wr_curr_bio)
+ return;
+
+ sbio = wr_ctx->wr_curr_bio;
+ wr_ctx->wr_curr_bio = NULL;
+ WARN_ON(!sbio->bio->bi_bdev);
+ scrub_pending_bio_inc(sctx);
+ /* process all writes in a single worker thread. Then the block layer
+ * orders the requests before sending them to the driver which
+ * doubled the write performance on spinning disks when measured
+ * with Linux 3.5 */
+ btrfsic_submit_bio(WRITE, sbio->bio);
+}
+
+static void scrub_wr_bio_end_io(struct bio *bio, int err)
+{
+ struct scrub_bio *sbio = bio->bi_private;
+ struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
+
+ sbio->err = err;
+ sbio->bio = bio;
+
+ sbio->work.func = scrub_wr_bio_end_io_worker;
+ btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work);
+}
+
+static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
+{
+ struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
+ struct scrub_ctx *sctx = sbio->sctx;
+ int i;
+
+ WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
+ if (sbio->err) {
+ struct btrfs_dev_replace *dev_replace =
+ &sbio->sctx->dev_root->fs_info->dev_replace;
+
+ for (i = 0; i < sbio->page_count; i++) {
+ struct scrub_page *spage = sbio->pagev[i];
+
+ spage->io_error = 1;
+ btrfs_dev_replace_stats_inc(&dev_replace->
+ num_write_errors);
+ }
+ }
+
+ for (i = 0; i < sbio->page_count; i++)
+ scrub_page_put(sbio->pagev[i]);
+
+ bio_put(sbio->bio);
+ kfree(sbio);
+ scrub_pending_bio_dec(sctx);
+}
+
+static int scrub_checksum(struct scrub_block *sblock)
{
u64 flags;
int ret;
- BUG_ON(sblock->page_count < 1);
- flags = sblock->pagev[0].flags;
+ WARN_ON(sblock->page_count < 1);
+ flags = sblock->pagev[0]->flags;
ret = 0;
if (flags & BTRFS_EXTENT_FLAG_DATA)
ret = scrub_checksum_data(sblock);
@@ -1255,30 +1634,32 @@ static void scrub_checksum(struct scrub_block *sblock)
WARN_ON(1);
if (ret)
scrub_handle_errored_block(sblock);
+
+ return ret;
}
static int scrub_checksum_data(struct scrub_block *sblock)
{
- struct scrub_dev *sdev = sblock->sdev;
+ struct scrub_ctx *sctx = sblock->sctx;
u8 csum[BTRFS_CSUM_SIZE];
u8 *on_disk_csum;
struct page *page;
void *buffer;
u32 crc = ~(u32)0;
int fail = 0;
- struct btrfs_root *root = sdev->dev->dev_root;
+ struct btrfs_root *root = sctx->dev_root;
u64 len;
int index;
BUG_ON(sblock->page_count < 1);
- if (!sblock->pagev[0].have_csum)
+ if (!sblock->pagev[0]->have_csum)
return 0;
- on_disk_csum = sblock->pagev[0].csum;
- page = sblock->pagev[0].page;
+ on_disk_csum = sblock->pagev[0]->csum;
+ page = sblock->pagev[0]->page;
buffer = kmap_atomic(page);
- len = sdev->sectorsize;
+ len = sctx->sectorsize;
index = 0;
for (;;) {
u64 l = min_t(u64, len, PAGE_SIZE);
@@ -1290,13 +1671,13 @@ static int scrub_checksum_data(struct scrub_block *sblock)
break;
index++;
BUG_ON(index >= sblock->page_count);
- BUG_ON(!sblock->pagev[index].page);
- page = sblock->pagev[index].page;
+ BUG_ON(!sblock->pagev[index]->page);
+ page = sblock->pagev[index]->page;
buffer = kmap_atomic(page);
}
btrfs_csum_final(crc, csum);
- if (memcmp(csum, on_disk_csum, sdev->csum_size))
+ if (memcmp(csum, on_disk_csum, sctx->csum_size))
fail = 1;
return fail;
@@ -1304,9 +1685,9 @@ static int scrub_checksum_data(struct scrub_block *sblock)
static int scrub_checksum_tree_block(struct scrub_block *sblock)
{
- struct scrub_dev *sdev = sblock->sdev;
+ struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_header *h;
- struct btrfs_root *root = sdev->dev->dev_root;
+ struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE];
@@ -1321,10 +1702,10 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
int index;
BUG_ON(sblock->page_count < 1);
- page = sblock->pagev[0].page;
+ page = sblock->pagev[0]->page;
mapped_buffer = kmap_atomic(page);
h = (struct btrfs_header *)mapped_buffer;
- memcpy(on_disk_csum, h->csum, sdev->csum_size);
+ memcpy(on_disk_csum, h->csum, sctx->csum_size);
/*
* we don't use the getter functions here, as we
@@ -1332,10 +1713,10 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
* b) the page is already kmapped
*/
- if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
+ if (sblock->pagev[0]->logical != le64_to_cpu(h->bytenr))
++fail;
- if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
+ if (sblock->pagev[0]->generation != le64_to_cpu(h->generation))
++fail;
if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1345,8 +1726,8 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
BTRFS_UUID_SIZE))
++fail;
- BUG_ON(sdev->nodesize != sdev->leafsize);
- len = sdev->nodesize - BTRFS_CSUM_SIZE;
+ WARN_ON(sctx->nodesize != sctx->leafsize);
+ len = sctx->nodesize - BTRFS_CSUM_SIZE;
mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
index = 0;
@@ -1360,15 +1741,15 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
break;
index++;
BUG_ON(index >= sblock->page_count);
- BUG_ON(!sblock->pagev[index].page);
- page = sblock->pagev[index].page;
+ BUG_ON(!sblock->pagev[index]->page);
+ page = sblock->pagev[index]->page;
mapped_buffer = kmap_atomic(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}
btrfs_csum_final(crc, calculated_csum);
- if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
+ if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
++crc_fail;
return fail || crc_fail;
@@ -1377,8 +1758,8 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
static int scrub_checksum_super(struct scrub_block *sblock)
{
struct btrfs_super_block *s;
- struct scrub_dev *sdev = sblock->sdev;
- struct btrfs_root *root = sdev->dev->dev_root;
+ struct scrub_ctx *sctx = sblock->sctx;
+ struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE];
@@ -1393,15 +1774,15 @@ static int scrub_checksum_super(struct scrub_block *sblock)
int index;
BUG_ON(sblock->page_count < 1);
- page = sblock->pagev[0].page;
+ page = sblock->pagev[0]->page;
mapped_buffer = kmap_atomic(page);
s = (struct btrfs_super_block *)mapped_buffer;
- memcpy(on_disk_csum, s->csum, sdev->csum_size);
+ memcpy(on_disk_csum, s->csum, sctx->csum_size);
- if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
+ if (sblock->pagev[0]->logical != le64_to_cpu(s->bytenr))
++fail_cor;
- if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
+ if (sblock->pagev[0]->generation != le64_to_cpu(s->generation))
++fail_gen;
if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1421,15 +1802,15 @@ static int scrub_checksum_super(struct scrub_block *sblock)
break;
index++;
BUG_ON(index >= sblock->page_count);
- BUG_ON(!sblock->pagev[index].page);
- page = sblock->pagev[index].page;
+ BUG_ON(!sblock->pagev[index]->page);
+ page = sblock->pagev[index]->page;
mapped_buffer = kmap_atomic(page);
mapped_size = PAGE_SIZE;
p = mapped_buffer;
}
btrfs_csum_final(crc, calculated_csum);
- if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
+ if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
++fail_cor;
if (fail_cor + fail_gen) {
@@ -1438,14 +1819,14 @@ static int scrub_checksum_super(struct scrub_block *sblock)
* They will get written with the next transaction commit
* anyway
*/
- spin_lock(&sdev->stat_lock);
- ++sdev->stat.super_errors;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ ++sctx->stat.super_errors;
+ spin_unlock(&sctx->stat_lock);
if (fail_cor)
- btrfs_dev_stat_inc_and_print(sdev->dev,
+ btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
BTRFS_DEV_STAT_CORRUPTION_ERRS);
else
- btrfs_dev_stat_inc_and_print(sdev->dev,
+ btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
BTRFS_DEV_STAT_GENERATION_ERRS);
}
@@ -1463,28 +1844,54 @@ static void scrub_block_put(struct scrub_block *sblock)
int i;
for (i = 0; i < sblock->page_count; i++)
- if (sblock->pagev[i].page)
- __free_page(sblock->pagev[i].page);
+ scrub_page_put(sblock->pagev[i]);
kfree(sblock);
}
}
-static void scrub_submit(struct scrub_dev *sdev)
+static void scrub_page_get(struct scrub_page *spage)
+{
+ atomic_inc(&spage->ref_count);
+}
+
+static void scrub_page_put(struct scrub_page *spage)
+{
+ if (atomic_dec_and_test(&spage->ref_count)) {
+ if (spage->page)
+ __free_page(spage->page);
+ kfree(spage);
+ }
+}
+
+static void scrub_submit(struct scrub_ctx *sctx)
{
struct scrub_bio *sbio;
- if (sdev->curr == -1)
+ if (sctx->curr == -1)
return;
- sbio = sdev->bios[sdev->curr];
- sdev->curr = -1;
- atomic_inc(&sdev->in_flight);
+ sbio = sctx->bios[sctx->curr];
+ sctx->curr = -1;
+ scrub_pending_bio_inc(sctx);
- btrfsic_submit_bio(READ, sbio->bio);
+ if (!sbio->bio->bi_bdev) {
+ /*
+ * this case should not happen. If btrfs_map_block() is
+ * wrong, it could happen for dev-replace operations on
+ * missing devices when no mirrors are available, but in
+ * this case it should already fail the mount.
+ * This case is handled correctly (but _very_ slowly).
+ */
+ printk_ratelimited(KERN_WARNING
+ "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
+ bio_endio(sbio->bio, -EIO);
+ } else {
+ btrfsic_submit_bio(READ, sbio->bio);
+ }
}
-static int scrub_add_page_to_bio(struct scrub_dev *sdev,
- struct scrub_page *spage)
+static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
+ struct scrub_page *spage)
{
struct scrub_block *sblock = spage->sblock;
struct scrub_bio *sbio;
@@ -1494,28 +1901,29 @@ again:
/*
* grab a fresh bio or wait for one to become available
*/
- while (sdev->curr == -1) {
- spin_lock(&sdev->list_lock);
- sdev->curr = sdev->first_free;
- if (sdev->curr != -1) {
- sdev->first_free = sdev->bios[sdev->curr]->next_free;
- sdev->bios[sdev->curr]->next_free = -1;
- sdev->bios[sdev->curr]->page_count = 0;
- spin_unlock(&sdev->list_lock);
+ while (sctx->curr == -1) {
+ spin_lock(&sctx->list_lock);
+ sctx->curr = sctx->first_free;
+ if (sctx->curr != -1) {
+ sctx->first_free = sctx->bios[sctx->curr]->next_free;
+ sctx->bios[sctx->curr]->next_free = -1;
+ sctx->bios[sctx->curr]->page_count = 0;
+ spin_unlock(&sctx->list_lock);
} else {
- spin_unlock(&sdev->list_lock);
- wait_event(sdev->list_wait, sdev->first_free != -1);
+ spin_unlock(&sctx->list_lock);
+ wait_event(sctx->list_wait, sctx->first_free != -1);
}
}
- sbio = sdev->bios[sdev->curr];
+ sbio = sctx->bios[sctx->curr];
if (sbio->page_count == 0) {
struct bio *bio;
sbio->physical = spage->physical;
sbio->logical = spage->logical;
+ sbio->dev = spage->dev;
bio = sbio->bio;
if (!bio) {
- bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
+ bio = bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
if (!bio)
return -ENOMEM;
sbio->bio = bio;
@@ -1523,14 +1931,15 @@ again:
bio->bi_private = sbio;
bio->bi_end_io = scrub_bio_end_io;
- bio->bi_bdev = sdev->dev->bdev;
- bio->bi_sector = spage->physical >> 9;
+ bio->bi_bdev = sbio->dev->bdev;
+ bio->bi_sector = sbio->physical >> 9;
sbio->err = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical ||
sbio->logical + sbio->page_count * PAGE_SIZE !=
- spage->logical) {
- scrub_submit(sdev);
+ spage->logical ||
+ sbio->dev != spage->dev) {
+ scrub_submit(sctx);
goto again;
}
@@ -1542,81 +1951,87 @@ again:
sbio->bio = NULL;
return -EIO;
}
- scrub_submit(sdev);
+ scrub_submit(sctx);
goto again;
}
- scrub_block_get(sblock); /* one for the added page */
+ scrub_block_get(sblock); /* one for the page added to the bio */
atomic_inc(&sblock->outstanding_pages);
sbio->page_count++;
- if (sbio->page_count == sdev->pages_per_bio)
- scrub_submit(sdev);
+ if (sbio->page_count == sctx->pages_per_rd_bio)
+ scrub_submit(sctx);
return 0;
}
-static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
- u64 physical, u64 flags, u64 gen, int mirror_num,
- u8 *csum, int force)
+static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
+ u64 physical, struct btrfs_device *dev, u64 flags,
+ u64 gen, int mirror_num, u8 *csum, int force,
+ u64 physical_for_dev_replace)
{
struct scrub_block *sblock;
int index;
sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
if (!sblock) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.malloc_errors++;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
return -ENOMEM;
}
- /* one ref inside this function, plus one for each page later on */
+ /* one ref inside this function, plus one for each page added to
+ * a bio later on */
atomic_set(&sblock->ref_count, 1);
- sblock->sdev = sdev;
+ sblock->sctx = sctx;
sblock->no_io_error_seen = 1;
for (index = 0; len > 0; index++) {
- struct scrub_page *spage = sblock->pagev + index;
+ struct scrub_page *spage;
u64 l = min_t(u64, len, PAGE_SIZE);
- BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
- spage->page = alloc_page(GFP_NOFS);
- if (!spage->page) {
- spin_lock(&sdev->stat_lock);
- sdev->stat.malloc_errors++;
- spin_unlock(&sdev->stat_lock);
- while (index > 0) {
- index--;
- __free_page(sblock->pagev[index].page);
- }
- kfree(sblock);
+ spage = kzalloc(sizeof(*spage), GFP_NOFS);
+ if (!spage) {
+leave_nomem:
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ scrub_block_put(sblock);
return -ENOMEM;
}
+ BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
+ scrub_page_get(spage);
+ sblock->pagev[index] = spage;
spage->sblock = sblock;
- spage->dev = sdev->dev;
+ spage->dev = dev;
spage->flags = flags;
spage->generation = gen;
spage->logical = logical;
spage->physical = physical;
+ spage->physical_for_dev_replace = physical_for_dev_replace;
spage->mirror_num = mirror_num;
if (csum) {
spage->have_csum = 1;
- memcpy(spage->csum, csum, sdev->csum_size);
+ memcpy(spage->csum, csum, sctx->csum_size);
} else {
spage->have_csum = 0;
}
sblock->page_count++;
+ spage->page = alloc_page(GFP_NOFS);
+ if (!spage->page)
+ goto leave_nomem;
len -= l;
logical += l;
physical += l;
+ physical_for_dev_replace += l;
}
- BUG_ON(sblock->page_count == 0);
+ WARN_ON(sblock->page_count == 0);
for (index = 0; index < sblock->page_count; index++) {
- struct scrub_page *spage = sblock->pagev + index;
+ struct scrub_page *spage = sblock->pagev[index];
int ret;
- ret = scrub_add_page_to_bio(sdev, spage);
+ ret = scrub_add_page_to_rd_bio(sctx, spage);
if (ret) {
scrub_block_put(sblock);
return ret;
@@ -1624,7 +2039,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
}
if (force)
- scrub_submit(sdev);
+ scrub_submit(sctx);
/* last one frees, either here or in bio completion for last page */
scrub_block_put(sblock);
@@ -1634,8 +2049,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
static void scrub_bio_end_io(struct bio *bio, int err)
{
struct scrub_bio *sbio = bio->bi_private;
- struct scrub_dev *sdev = sbio->sdev;
- struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
sbio->err = err;
sbio->bio = bio;
@@ -1646,10 +2060,10 @@ static void scrub_bio_end_io(struct bio *bio, int err)
static void scrub_bio_end_io_worker(struct btrfs_work *work)
{
struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
- struct scrub_dev *sdev = sbio->sdev;
+ struct scrub_ctx *sctx = sbio->sctx;
int i;
- BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
+ BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
if (sbio->err) {
for (i = 0; i < sbio->page_count; i++) {
struct scrub_page *spage = sbio->pagev[i];
@@ -1671,23 +2085,37 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
bio_put(sbio->bio);
sbio->bio = NULL;
- spin_lock(&sdev->list_lock);
- sbio->next_free = sdev->first_free;
- sdev->first_free = sbio->index;
- spin_unlock(&sdev->list_lock);
- atomic_dec(&sdev->in_flight);
- wake_up(&sdev->list_wait);
+ spin_lock(&sctx->list_lock);
+ sbio->next_free = sctx->first_free;
+ sctx->first_free = sbio->index;
+ spin_unlock(&sctx->list_lock);
+
+ if (sctx->is_dev_replace &&
+ atomic_read(&sctx->wr_ctx.flush_all_writes)) {
+ mutex_lock(&sctx->wr_ctx.wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_ctx.wr_lock);
+ }
+
+ scrub_pending_bio_dec(sctx);
}
static void scrub_block_complete(struct scrub_block *sblock)
{
- if (!sblock->no_io_error_seen)
+ if (!sblock->no_io_error_seen) {
scrub_handle_errored_block(sblock);
- else
- scrub_checksum(sblock);
+ } else {
+ /*
+ * if has checksum error, write via repair mechanism in
+ * dev replace case, otherwise write here in dev replace
+ * case.
+ */
+ if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace)
+ scrub_write_block_to_dev_replace(sblock);
+ }
}
-static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
+static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
u8 *csum)
{
struct btrfs_ordered_sum *sum = NULL;
@@ -1695,15 +2123,15 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
unsigned long i;
unsigned long num_sectors;
- while (!list_empty(&sdev->csum_list)) {
- sum = list_first_entry(&sdev->csum_list,
+ while (!list_empty(&sctx->csum_list)) {
+ sum = list_first_entry(&sctx->csum_list,
struct btrfs_ordered_sum, list);
if (sum->bytenr > logical)
return 0;
if (sum->bytenr + sum->len > logical)
break;
- ++sdev->stat.csum_discards;
+ ++sctx->stat.csum_discards;
list_del(&sum->list);
kfree(sum);
sum = NULL;
@@ -1711,10 +2139,10 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
if (!sum)
return 0;
- num_sectors = sum->len / sdev->sectorsize;
+ num_sectors = sum->len / sctx->sectorsize;
for (i = 0; i < num_sectors; ++i) {
if (sum->sums[i].bytenr == logical) {
- memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
+ memcpy(csum, &sum->sums[i].sum, sctx->csum_size);
ret = 1;
break;
}
@@ -1727,29 +2155,30 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
}
/* scrub extent tries to collect up to 64 kB for each bio */
-static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
- u64 physical, u64 flags, u64 gen, int mirror_num)
+static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
+ u64 physical, struct btrfs_device *dev, u64 flags,
+ u64 gen, int mirror_num, u64 physical_for_dev_replace)
{
int ret;
u8 csum[BTRFS_CSUM_SIZE];
u32 blocksize;
if (flags & BTRFS_EXTENT_FLAG_DATA) {
- blocksize = sdev->sectorsize;
- spin_lock(&sdev->stat_lock);
- sdev->stat.data_extents_scrubbed++;
- sdev->stat.data_bytes_scrubbed += len;
- spin_unlock(&sdev->stat_lock);
+ blocksize = sctx->sectorsize;
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.data_extents_scrubbed++;
+ sctx->stat.data_bytes_scrubbed += len;
+ spin_unlock(&sctx->stat_lock);
} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- BUG_ON(sdev->nodesize != sdev->leafsize);
- blocksize = sdev->nodesize;
- spin_lock(&sdev->stat_lock);
- sdev->stat.tree_extents_scrubbed++;
- sdev->stat.tree_bytes_scrubbed += len;
- spin_unlock(&sdev->stat_lock);
+ WARN_ON(sctx->nodesize != sctx->leafsize);
+ blocksize = sctx->nodesize;
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.tree_extents_scrubbed++;
+ sctx->stat.tree_bytes_scrubbed += len;
+ spin_unlock(&sctx->stat_lock);
} else {
- blocksize = sdev->sectorsize;
- BUG_ON(1);
+ blocksize = sctx->sectorsize;
+ WARN_ON(1);
}
while (len) {
@@ -1758,26 +2187,38 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
if (flags & BTRFS_EXTENT_FLAG_DATA) {
/* push csums to sbio */
- have_csum = scrub_find_csum(sdev, logical, l, csum);
+ have_csum = scrub_find_csum(sctx, logical, l, csum);
if (have_csum == 0)
- ++sdev->stat.no_csum;
+ ++sctx->stat.no_csum;
+ if (sctx->is_dev_replace && !have_csum) {
+ ret = copy_nocow_pages(sctx, logical, l,
+ mirror_num,
+ physical_for_dev_replace);
+ goto behind_scrub_pages;
+ }
}
- ret = scrub_pages(sdev, logical, l, physical, flags, gen,
- mirror_num, have_csum ? csum : NULL, 0);
+ ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
+ mirror_num, have_csum ? csum : NULL, 0,
+ physical_for_dev_replace);
+behind_scrub_pages:
if (ret)
return ret;
len -= l;
logical += l;
physical += l;
+ physical_for_dev_replace += l;
}
return 0;
}
-static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
- struct map_lookup *map, int num, u64 base, u64 length)
+static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
+ struct map_lookup *map,
+ struct btrfs_device *scrub_dev,
+ int num, u64 base, u64 length,
+ int is_dev_replace)
{
struct btrfs_path *path;
- struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+ struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent;
@@ -1797,9 +2238,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
struct reada_control *reada2;
struct btrfs_key key_start;
struct btrfs_key key_end;
-
u64 increment = map->stripe_len;
u64 offset;
+ u64 extent_logical;
+ u64 extent_physical;
+ u64 extent_len;
+ struct btrfs_device *extent_dev;
+ int extent_mirror_num;
nstripes = length;
offset = 0;
@@ -1843,8 +2288,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
*/
logical = base + offset;
- wait_event(sdev->list_wait,
- atomic_read(&sdev->in_flight) == 0);
+ wait_event(sctx->list_wait,
+ atomic_read(&sctx->bios_in_flight) == 0);
atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait);
@@ -1898,7 +2343,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
* canceled?
*/
if (atomic_read(&fs_info->scrub_cancel_req) ||
- atomic_read(&sdev->cancel_req)) {
+ atomic_read(&sctx->cancel_req)) {
ret = -ECANCELED;
goto out;
}
@@ -1907,9 +2352,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
*/
if (atomic_read(&fs_info->scrub_pause_req)) {
/* push queued extents */
- scrub_submit(sdev);
- wait_event(sdev->list_wait,
- atomic_read(&sdev->in_flight) == 0);
+ atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
+ scrub_submit(sctx);
+ mutex_lock(&sctx->wr_ctx.wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_ctx.wr_lock);
+ wait_event(sctx->list_wait,
+ atomic_read(&sctx->bios_in_flight) == 0);
+ atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
atomic_inc(&fs_info->scrubs_paused);
wake_up(&fs_info->scrub_pause_wait);
mutex_lock(&fs_info->scrub_lock);
@@ -1926,7 +2376,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
ret = btrfs_lookup_csums_range(csum_root, logical,
logical + map->stripe_len - 1,
- &sdev->csum_list, 1);
+ &sctx->csum_list, 1);
if (ret)
goto out;
@@ -2004,9 +2454,20 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
key.objectid;
}
- ret = scrub_extent(sdev, key.objectid, key.offset,
- key.objectid - logical + physical,
- flags, generation, mirror_num);
+ extent_logical = key.objectid;
+ extent_physical = key.objectid - logical + physical;
+ extent_len = key.offset;
+ extent_dev = scrub_dev;
+ extent_mirror_num = mirror_num;
+ if (is_dev_replace)
+ scrub_remap_extent(fs_info, extent_logical,
+ extent_len, &extent_physical,
+ &extent_dev,
+ &extent_mirror_num);
+ ret = scrub_extent(sctx, extent_logical, extent_len,
+ extent_physical, extent_dev, flags,
+ generation, extent_mirror_num,
+ key.objectid - logical + physical);
if (ret)
goto out;
@@ -2016,29 +2477,34 @@ next:
btrfs_release_path(path);
logical += increment;
physical += map->stripe_len;
- spin_lock(&sdev->stat_lock);
- sdev->stat.last_physical = physical;
- spin_unlock(&sdev->stat_lock);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.last_physical = physical;
+ spin_unlock(&sctx->stat_lock);
}
+out:
/* push queued extents */
- scrub_submit(sdev);
+ scrub_submit(sctx);
+ mutex_lock(&sctx->wr_ctx.wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_ctx.wr_lock);
-out:
blk_finish_plug(&plug);
btrfs_free_path(path);
return ret < 0 ? ret : 0;
}
-static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
- u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
- u64 dev_offset)
+static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
+ struct btrfs_device *scrub_dev,
+ u64 chunk_tree, u64 chunk_objectid,
+ u64 chunk_offset, u64 length,
+ u64 dev_offset, int is_dev_replace)
{
struct btrfs_mapping_tree *map_tree =
- &sdev->dev->dev_root->fs_info->mapping_tree;
+ &sctx->dev_root->fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
int i;
- int ret = -EINVAL;
+ int ret = 0;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
@@ -2055,9 +2521,11 @@ static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
goto out;
for (i = 0; i < map->num_stripes; ++i) {
- if (map->stripes[i].dev == sdev->dev &&
+ if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
map->stripes[i].physical == dev_offset) {
- ret = scrub_stripe(sdev, map, i, chunk_offset, length);
+ ret = scrub_stripe(sctx, map, scrub_dev, i,
+ chunk_offset, length,
+ is_dev_replace);
if (ret)
goto out;
}
@@ -2069,11 +2537,13 @@ out:
}
static noinline_for_stack
-int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
+int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ struct btrfs_device *scrub_dev, u64 start, u64 end,
+ int is_dev_replace)
{
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
- struct btrfs_root *root = sdev->dev->dev_root;
+ struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 length;
u64 chunk_tree;
@@ -2085,6 +2555,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_block_group_cache *cache;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
path = btrfs_alloc_path();
if (!path)
@@ -2094,11 +2565,10 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
path->search_commit_root = 1;
path->skip_locking = 1;
- key.objectid = sdev->dev->devid;
+ key.objectid = scrub_dev->devid;
key.offset = 0ull;
key.type = BTRFS_DEV_EXTENT_KEY;
-
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -2117,7 +2587,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
btrfs_item_key_to_cpu(l, &found_key, slot);
- if (found_key.objectid != sdev->dev->devid)
+ if (found_key.objectid != scrub_dev->devid)
break;
if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
@@ -2151,11 +2621,62 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
ret = -ENOENT;
break;
}
- ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
- chunk_offset, length, found_key.offset);
+ dev_replace->cursor_right = found_key.offset + length;
+ dev_replace->cursor_left = found_key.offset;
+ dev_replace->item_needs_writeback = 1;
+ ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
+ chunk_offset, length, found_key.offset,
+ is_dev_replace);
+
+ /*
+ * flush, submit all pending read and write bios, afterwards
+ * wait for them.
+ * Note that in the dev replace case, a read request causes
+ * write requests that are submitted in the read completion
+ * worker. Therefore in the current situation, it is required
+ * that all write requests are flushed, so that all read and
+ * write requests are really completed when bios_in_flight
+ * changes to 0.
+ */
+ atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
+ scrub_submit(sctx);
+ mutex_lock(&sctx->wr_ctx.wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_ctx.wr_lock);
+
+ wait_event(sctx->list_wait,
+ atomic_read(&sctx->bios_in_flight) == 0);
+ atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
+ atomic_inc(&fs_info->scrubs_paused);
+ wake_up(&fs_info->scrub_pause_wait);
+ wait_event(sctx->list_wait,
+ atomic_read(&sctx->workers_pending) == 0);
+
+ mutex_lock(&fs_info->scrub_lock);
+ while (atomic_read(&fs_info->scrub_pause_req)) {
+ mutex_unlock(&fs_info->scrub_lock);
+ wait_event(fs_info->scrub_pause_wait,
+ atomic_read(&fs_info->scrub_pause_req) == 0);
+ mutex_lock(&fs_info->scrub_lock);
+ }
+ atomic_dec(&fs_info->scrubs_paused);
+ mutex_unlock(&fs_info->scrub_lock);
+ wake_up(&fs_info->scrub_pause_wait);
+
+ dev_replace->cursor_left = dev_replace->cursor_right;
+ dev_replace->item_needs_writeback = 1;
btrfs_put_block_group(cache);
if (ret)
break;
+ if (is_dev_replace &&
+ atomic64_read(&dev_replace->num_write_errors) > 0) {
+ ret = -EIO;
+ break;
+ }
+ if (sctx->stat.malloc_errors > 0) {
+ ret = -ENOMEM;
+ break;
+ }
key.offset = found_key.offset + length;
btrfs_release_path(path);
@@ -2170,14 +2691,14 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
return ret < 0 ? ret : 0;
}
-static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
+static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
+ struct btrfs_device *scrub_dev)
{
int i;
u64 bytenr;
u64 gen;
int ret;
- struct btrfs_device *device = sdev->dev;
- struct btrfs_root *root = device->dev_root;
+ struct btrfs_root *root = sctx->dev_root;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
return -EIO;
@@ -2186,15 +2707,16 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
+ if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
break;
- ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
- BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
+ ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
+ scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
+ NULL, 1, bytenr);
if (ret)
return ret;
}
- wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
+ wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
return 0;
}
@@ -2202,19 +2724,38 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
/*
* get a reference count on fs_info->scrub_workers. start worker if necessary
*/
-static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
+static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
+ int is_dev_replace)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
mutex_lock(&fs_info->scrub_lock);
if (fs_info->scrub_workers_refcnt == 0) {
- btrfs_init_workers(&fs_info->scrub_workers, "scrub",
- fs_info->thread_pool_size, &fs_info->generic_worker);
+ if (is_dev_replace)
+ btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1,
+ &fs_info->generic_worker);
+ else
+ btrfs_init_workers(&fs_info->scrub_workers, "scrub",
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
fs_info->scrub_workers.idle_thresh = 4;
ret = btrfs_start_workers(&fs_info->scrub_workers);
if (ret)
goto out;
+ btrfs_init_workers(&fs_info->scrub_wr_completion_workers,
+ "scrubwrc",
+ fs_info->thread_pool_size,
+ &fs_info->generic_worker);
+ fs_info->scrub_wr_completion_workers.idle_thresh = 2;
+ ret = btrfs_start_workers(
+ &fs_info->scrub_wr_completion_workers);
+ if (ret)
+ goto out;
+ btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1,
+ &fs_info->generic_worker);
+ ret = btrfs_start_workers(&fs_info->scrub_nocow_workers);
+ if (ret)
+ goto out;
}
++fs_info->scrub_workers_refcnt;
out:
@@ -2223,40 +2764,41 @@ out:
return ret;
}
-static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
+static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
-
mutex_lock(&fs_info->scrub_lock);
- if (--fs_info->scrub_workers_refcnt == 0)
+ if (--fs_info->scrub_workers_refcnt == 0) {
btrfs_stop_workers(&fs_info->scrub_workers);
+ btrfs_stop_workers(&fs_info->scrub_wr_completion_workers);
+ btrfs_stop_workers(&fs_info->scrub_nocow_workers);
+ }
WARN_ON(fs_info->scrub_workers_refcnt < 0);
mutex_unlock(&fs_info->scrub_lock);
}
-
-int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
- struct btrfs_scrub_progress *progress, int readonly)
+int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
+ u64 end, struct btrfs_scrub_progress *progress,
+ int readonly, int is_dev_replace)
{
- struct scrub_dev *sdev;
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct scrub_ctx *sctx;
int ret;
struct btrfs_device *dev;
- if (btrfs_fs_closing(root->fs_info))
+ if (btrfs_fs_closing(fs_info))
return -EINVAL;
/*
* check some assumptions
*/
- if (root->nodesize != root->leafsize) {
+ if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
printk(KERN_ERR
"btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
- root->nodesize, root->leafsize);
+ fs_info->chunk_root->nodesize,
+ fs_info->chunk_root->leafsize);
return -EINVAL;
}
- if (root->nodesize > BTRFS_STRIPE_LEN) {
+ if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
/*
* in this case scrub is unable to calculate the checksum
* the way scrub is implemented. Do not handle this
@@ -2264,80 +2806,105 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
*/
printk(KERN_ERR
"btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
- root->nodesize, BTRFS_STRIPE_LEN);
+ fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
return -EINVAL;
}
- if (root->sectorsize != PAGE_SIZE) {
+ if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
printk(KERN_ERR
"btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
- root->sectorsize, (unsigned long long)PAGE_SIZE);
+ fs_info->chunk_root->sectorsize,
+ (unsigned long long)PAGE_SIZE);
return -EINVAL;
}
- ret = scrub_workers_get(root);
+ if (fs_info->chunk_root->nodesize >
+ PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
+ fs_info->chunk_root->sectorsize >
+ PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
+ /*
+ * would exhaust the array bounds of pagev member in
+ * struct scrub_block
+ */
+ pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
+ fs_info->chunk_root->nodesize,
+ SCRUB_MAX_PAGES_PER_BLOCK,
+ fs_info->chunk_root->sectorsize,
+ SCRUB_MAX_PAGES_PER_BLOCK);
+ return -EINVAL;
+ }
+
+ ret = scrub_workers_get(fs_info, is_dev_replace);
if (ret)
return ret;
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(root, devid, NULL, NULL);
- if (!dev || dev->missing) {
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
- scrub_workers_put(root);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ if (!dev || (dev->missing && !is_dev_replace)) {
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(fs_info);
return -ENODEV;
}
mutex_lock(&fs_info->scrub_lock);
- if (!dev->in_fs_metadata) {
+ if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
mutex_unlock(&fs_info->scrub_lock);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
- scrub_workers_put(root);
- return -ENODEV;
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(fs_info);
+ return -EIO;
}
- if (dev->scrub_device) {
+ btrfs_dev_replace_lock(&fs_info->dev_replace);
+ if (dev->scrub_device ||
+ (!is_dev_replace &&
+ btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
mutex_unlock(&fs_info->scrub_lock);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
- scrub_workers_put(root);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(fs_info);
return -EINPROGRESS;
}
- sdev = scrub_setup_dev(dev);
- if (IS_ERR(sdev)) {
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
+ sctx = scrub_setup_ctx(dev, is_dev_replace);
+ if (IS_ERR(sctx)) {
mutex_unlock(&fs_info->scrub_lock);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
- scrub_workers_put(root);
- return PTR_ERR(sdev);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ scrub_workers_put(fs_info);
+ return PTR_ERR(sctx);
}
- sdev->readonly = readonly;
- dev->scrub_device = sdev;
+ sctx->readonly = readonly;
+ dev->scrub_device = sctx;
atomic_inc(&fs_info->scrubs_running);
mutex_unlock(&fs_info->scrub_lock);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- down_read(&fs_info->scrub_super_lock);
- ret = scrub_supers(sdev);
- up_read(&fs_info->scrub_super_lock);
+ if (!is_dev_replace) {
+ down_read(&fs_info->scrub_super_lock);
+ ret = scrub_supers(sctx, dev);
+ up_read(&fs_info->scrub_super_lock);
+ }
if (!ret)
- ret = scrub_enumerate_chunks(sdev, start, end);
+ ret = scrub_enumerate_chunks(sctx, dev, start, end,
+ is_dev_replace);
- wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
+ wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
atomic_dec(&fs_info->scrubs_running);
wake_up(&fs_info->scrub_pause_wait);
- wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
+ wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
if (progress)
- memcpy(progress, &sdev->stat, sizeof(*progress));
+ memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_lock(&fs_info->scrub_lock);
dev->scrub_device = NULL;
mutex_unlock(&fs_info->scrub_lock);
- scrub_free_dev(sdev);
- scrub_workers_put(root);
+ scrub_free_ctx(sctx);
+ scrub_workers_put(fs_info);
return ret;
}
@@ -2377,9 +2944,8 @@ void btrfs_scrub_continue_super(struct btrfs_root *root)
up_write(&root->fs_info->scrub_super_lock);
}
-int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
+int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
{
-
mutex_lock(&fs_info->scrub_lock);
if (!atomic_read(&fs_info->scrubs_running)) {
mutex_unlock(&fs_info->scrub_lock);
@@ -2399,23 +2965,18 @@ int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
return 0;
}
-int btrfs_scrub_cancel(struct btrfs_root *root)
+int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *dev)
{
- return __btrfs_scrub_cancel(root->fs_info);
-}
-
-int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct scrub_dev *sdev;
+ struct scrub_ctx *sctx;
mutex_lock(&fs_info->scrub_lock);
- sdev = dev->scrub_device;
- if (!sdev) {
+ sctx = dev->scrub_device;
+ if (!sctx) {
mutex_unlock(&fs_info->scrub_lock);
return -ENOTCONN;
}
- atomic_inc(&sdev->cancel_req);
+ atomic_inc(&sctx->cancel_req);
while (dev->scrub_device) {
mutex_unlock(&fs_info->scrub_lock);
wait_event(fs_info->scrub_pause_wait,
@@ -2438,12 +2999,12 @@ int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
* does not go away in cancel_dev. FIXME: find a better solution
*/
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(root, devid, NULL, NULL);
+ dev = btrfs_find_device(fs_info, devid, NULL, NULL);
if (!dev) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return -ENODEV;
}
- ret = btrfs_scrub_cancel_dev(root, dev);
+ ret = btrfs_scrub_cancel_dev(fs_info, dev);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return ret;
@@ -2453,15 +3014,284 @@ int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
struct btrfs_scrub_progress *progress)
{
struct btrfs_device *dev;
- struct scrub_dev *sdev = NULL;
+ struct scrub_ctx *sctx = NULL;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(root, devid, NULL, NULL);
+ dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
if (dev)
- sdev = dev->scrub_device;
- if (sdev)
- memcpy(progress, &sdev->stat, sizeof(*progress));
+ sctx = dev->scrub_device;
+ if (sctx)
+ memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
- return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
+ return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
+}
+
+static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
+ u64 extent_logical, u64 extent_len,
+ u64 *extent_physical,
+ struct btrfs_device **extent_dev,
+ int *extent_mirror_num)
+{
+ u64 mapped_length;
+ struct btrfs_bio *bbio = NULL;
+ int ret;
+
+ mapped_length = extent_len;
+ ret = btrfs_map_block(fs_info, READ, extent_logical,
+ &mapped_length, &bbio, 0);
+ if (ret || !bbio || mapped_length < extent_len ||
+ !bbio->stripes[0].dev->bdev) {
+ kfree(bbio);
+ return;
+ }
+
+ *extent_physical = bbio->stripes[0].physical;
+ *extent_mirror_num = bbio->mirror_num;
+ *extent_dev = bbio->stripes[0].dev;
+ kfree(bbio);
+}
+
+static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
+ struct scrub_wr_ctx *wr_ctx,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_device *dev,
+ int is_dev_replace)
+{
+ WARN_ON(wr_ctx->wr_curr_bio != NULL);
+
+ mutex_init(&wr_ctx->wr_lock);
+ wr_ctx->wr_curr_bio = NULL;
+ if (!is_dev_replace)
+ return 0;
+
+ WARN_ON(!dev->bdev);
+ wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
+ bio_get_nr_vecs(dev->bdev));
+ wr_ctx->tgtdev = dev;
+ atomic_set(&wr_ctx->flush_all_writes, 0);
+ return 0;
+}
+
+static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
+{
+ mutex_lock(&wr_ctx->wr_lock);
+ kfree(wr_ctx->wr_curr_bio);
+ wr_ctx->wr_curr_bio = NULL;
+ mutex_unlock(&wr_ctx->wr_lock);
+}
+
+static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
+ int mirror_num, u64 physical_for_dev_replace)
+{
+ struct scrub_copy_nocow_ctx *nocow_ctx;
+ struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+
+ nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
+ if (!nocow_ctx) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ return -ENOMEM;
+ }
+
+ scrub_pending_trans_workers_inc(sctx);
+
+ nocow_ctx->sctx = sctx;
+ nocow_ctx->logical = logical;
+ nocow_ctx->len = len;
+ nocow_ctx->mirror_num = mirror_num;
+ nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
+ nocow_ctx->work.func = copy_nocow_pages_worker;
+ btrfs_queue_worker(&fs_info->scrub_nocow_workers,
+ &nocow_ctx->work);
+
+ return 0;
+}
+
+static void copy_nocow_pages_worker(struct btrfs_work *work)
+{
+ struct scrub_copy_nocow_ctx *nocow_ctx =
+ container_of(work, struct scrub_copy_nocow_ctx, work);
+ struct scrub_ctx *sctx = nocow_ctx->sctx;
+ u64 logical = nocow_ctx->logical;
+ u64 len = nocow_ctx->len;
+ int mirror_num = nocow_ctx->mirror_num;
+ u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
+ int ret;
+ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_path *path;
+ struct btrfs_root *root;
+ int not_written = 0;
+
+ fs_info = sctx->dev_root->fs_info;
+ root = fs_info->extent_root;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ not_written = 1;
+ goto out;
+ }
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ not_written = 1;
+ goto out;
+ }
+
+ ret = iterate_inodes_from_logical(logical, fs_info, path,
+ copy_nocow_pages_for_inode,
+ nocow_ctx);
+ if (ret != 0 && ret != -ENOENT) {
+ pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %llu, ret %d\n",
+ (unsigned long long)logical,
+ (unsigned long long)physical_for_dev_replace,
+ (unsigned long long)len,
+ (unsigned long long)mirror_num, ret);
+ not_written = 1;
+ goto out;
+ }
+
+out:
+ if (trans && !IS_ERR(trans))
+ btrfs_end_transaction(trans, root);
+ if (not_written)
+ btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
+ num_uncorrectable_read_errors);
+
+ btrfs_free_path(path);
+ kfree(nocow_ctx);
+
+ scrub_pending_trans_workers_dec(sctx);
+}
+
+static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
+{
+ unsigned long index;
+ struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
+ int ret = 0;
+ struct btrfs_key key;
+ struct inode *inode = NULL;
+ struct btrfs_root *local_root;
+ u64 physical_for_dev_replace;
+ u64 len;
+ struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
+
+ key.objectid = root;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+ local_root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(local_root))
+ return PTR_ERR(local_root);
+
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.objectid = inum;
+ key.offset = 0;
+ inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
+ len = nocow_ctx->len;
+ while (len >= PAGE_CACHE_SIZE) {
+ struct page *page = NULL;
+ int ret_sub;
+
+ index = offset >> PAGE_CACHE_SHIFT;
+
+ page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+ if (!page) {
+ pr_err("find_or_create_page() failed\n");
+ ret = -ENOMEM;
+ goto next_page;
+ }
+
+ if (PageUptodate(page)) {
+ if (PageDirty(page))
+ goto next_page;
+ } else {
+ ClearPageError(page);
+ ret_sub = extent_read_full_page(&BTRFS_I(inode)->
+ io_tree,
+ page, btrfs_get_extent,
+ nocow_ctx->mirror_num);
+ if (ret_sub) {
+ ret = ret_sub;
+ goto next_page;
+ }
+ wait_on_page_locked(page);
+ if (!PageUptodate(page)) {
+ ret = -EIO;
+ goto next_page;
+ }
+ }
+ ret_sub = write_page_nocow(nocow_ctx->sctx,
+ physical_for_dev_replace, page);
+ if (ret_sub) {
+ ret = ret_sub;
+ goto next_page;
+ }
+
+next_page:
+ if (page) {
+ unlock_page(page);
+ put_page(page);
+ }
+ offset += PAGE_CACHE_SIZE;
+ physical_for_dev_replace += PAGE_CACHE_SIZE;
+ len -= PAGE_CACHE_SIZE;
+ }
+
+ if (inode)
+ iput(inode);
+ return ret;
+}
+
+static int write_page_nocow(struct scrub_ctx *sctx,
+ u64 physical_for_dev_replace, struct page *page)
+{
+ struct bio *bio;
+ struct btrfs_device *dev;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(compl);
+
+ dev = sctx->wr_ctx.tgtdev;
+ if (!dev)
+ return -EIO;
+ if (!dev->bdev) {
+ printk_ratelimited(KERN_WARNING
+ "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
+ return -EIO;
+ }
+ bio = bio_alloc(GFP_NOFS, 1);
+ if (!bio) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.malloc_errors++;
+ spin_unlock(&sctx->stat_lock);
+ return -ENOMEM;
+ }
+ bio->bi_private = &compl;
+ bio->bi_end_io = scrub_complete_bio_end_io;
+ bio->bi_size = 0;
+ bio->bi_sector = physical_for_dev_replace >> 9;
+ bio->bi_bdev = dev->bdev;
+ ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+ if (ret != PAGE_CACHE_SIZE) {
+leave_with_eio:
+ bio_put(bio);
+ btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
+ return -EIO;
+ }
+ btrfsic_submit_bio(WRITE_SYNC, bio);
+ wait_for_completion(&compl);
+
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ goto leave_with_eio;
+
+ bio_put(bio);
+ return 0;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index e78b297b0b0..54454542ad4 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4397,9 +4397,9 @@ static int full_send_tree(struct send_ctx *sctx)
if (!path)
return -ENOMEM;
- spin_lock(&send_root->root_times_lock);
+ spin_lock(&send_root->root_item_lock);
start_ctransid = btrfs_root_ctransid(&send_root->root_item);
- spin_unlock(&send_root->root_times_lock);
+ spin_unlock(&send_root->root_item_lock);
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_INODE_ITEM_KEY;
@@ -4422,9 +4422,9 @@ join_trans:
* Make sure the tree has not changed after re-joining. We detect this
* by comparing start_ctransid and ctransid. They should always match.
*/
- spin_lock(&send_root->root_times_lock);
+ spin_lock(&send_root->root_item_lock);
ctransid = btrfs_root_ctransid(&send_root->root_item);
- spin_unlock(&send_root->root_times_lock);
+ spin_unlock(&send_root->root_item_lock);
if (ctransid != start_ctransid) {
WARN(1, KERN_WARNING "btrfs: the root that you're trying to "
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 915ac14c206..99545df1b86 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -55,6 +55,7 @@
#include "export.h"
#include "compression.h"
#include "rcu-string.h"
+#include "dev-replace.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
@@ -116,7 +117,16 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
sb->s_flags |= MS_RDONLY;
printk(KERN_INFO "btrfs is forced readonly\n");
- __btrfs_scrub_cancel(fs_info);
+ /*
+ * Note that a running device replace operation is not
+ * canceled here although there is no way to update
+ * the progress. It would add the risk of a deadlock,
+ * therefore the canceling is ommited. The only penalty
+ * is that some I/O remains active until the procedure
+ * completes. The next time when the filesystem is
+ * mounted writeable again, the device replace
+ * operation continues.
+ */
// WARN_ON(1);
}
}
@@ -1186,7 +1196,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_set_max_workers(&fs_info->endio_freespace_worker, new_pool_size);
btrfs_set_max_workers(&fs_info->delayed_workers, new_pool_size);
btrfs_set_max_workers(&fs_info->readahead_workers, new_pool_size);
- btrfs_set_max_workers(&fs_info->scrub_workers, new_pool_size);
+ btrfs_set_max_workers(&fs_info->scrub_wr_completion_workers,
+ new_pool_size);
}
static int btrfs_remount(struct super_block *sb, int *flags, char *data)
@@ -1215,8 +1226,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
return 0;
if (*flags & MS_RDONLY) {
+ /*
+ * this also happens on 'umount -rf' or on shutdown, when
+ * the filesystem is busy.
+ */
sb->s_flags |= MS_RDONLY;
+ btrfs_dev_replace_suspend_for_unmount(fs_info);
+ btrfs_scrub_cancel(fs_info);
+
ret = btrfs_commit_super(root);
if (ret)
goto restore;
@@ -1226,6 +1244,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}
+ if (fs_info->fs_devices->missing_devices >
+ fs_info->num_tolerated_disk_barrier_failures &&
+ !(*flags & MS_RDONLY)) {
+ printk(KERN_WARNING
+ "Btrfs: too many missing devices, writeable remount is not allowed\n");
+ ret = -EACCES;
+ goto restore;
+ }
+
if (btrfs_super_log_root(fs_info->super_copy) != 0) {
ret = -EINVAL;
goto restore;
@@ -1244,6 +1271,11 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (ret)
goto restore;
+ ret = btrfs_resume_dev_replace_async(fs_info);
+ if (ret) {
+ pr_warn("btrfs: failed to resume dev_replace\n");
+ goto restore;
+ }
sb->s_flags &= ~MS_RDONLY;
}
@@ -1336,7 +1368,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
min_stripe_size = BTRFS_STRIPE_LEN;
list_for_each_entry(device, &fs_devices->devices, dev_list) {
- if (!device->in_fs_metadata || !device->bdev)
+ if (!device->in_fs_metadata || !device->bdev ||
+ device->is_tgtdev_for_dev_replace)
continue;
avail_space = device->total_bytes - device->bytes_used;
@@ -1647,10 +1680,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_ordered_data;
- err = btrfs_interface_init();
+ err = btrfs_auto_defrag_init();
if (err)
goto free_delayed_inode;
+ err = btrfs_interface_init();
+ if (err)
+ goto free_auto_defrag;
+
err = register_filesystem(&btrfs_fs_type);
if (err)
goto unregister_ioctl;
@@ -1662,6 +1699,8 @@ static int __init init_btrfs_fs(void)
unregister_ioctl:
btrfs_interface_exit();
+free_auto_defrag:
+ btrfs_auto_defrag_exit();
free_delayed_inode:
btrfs_delayed_inode_exit();
free_ordered_data:
@@ -1681,6 +1720,7 @@ free_compress:
static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
+ btrfs_auto_defrag_exit();
btrfs_delayed_inode_exit();
ordered_data_exit();
extent_map_exit();
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 04bbfb1052e..87fac9a21ea 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -30,6 +30,7 @@
#include "tree-log.h"
#include "inode-map.h"
#include "volumes.h"
+#include "dev-replace.h"
#define BTRFS_ROOT_TRANS_TAG 0
@@ -145,16 +146,12 @@ loop:
* the log must never go across transaction boundaries.
*/
smp_mb();
- if (!list_empty(&fs_info->tree_mod_seq_list)) {
- printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
+ if (!list_empty(&fs_info->tree_mod_seq_list))
+ WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
"creating a fresh transaction\n");
- WARN_ON(1);
- }
- if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
- printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
+ if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
+ WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
"creating a fresh transaction\n");
- WARN_ON(1);
- }
atomic_set(&fs_info->tree_mod_seq, 0);
spin_lock_init(&cur_trans->commit_lock);
@@ -295,9 +292,9 @@ static int may_wait_transaction(struct btrfs_root *root, int type)
return 0;
}
-static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
- u64 num_items, int type,
- int noflush)
+static struct btrfs_trans_handle *
+start_transaction(struct btrfs_root *root, u64 num_items, int type,
+ enum btrfs_reserve_flush_enum flush)
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
@@ -312,6 +309,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
h = current->journal_info;
h->use_count++;
+ WARN_ON(h->use_count > 2);
h->orig_rsv = h->block_rsv;
h->block_rsv = NULL;
goto got_it;
@@ -331,14 +329,9 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
}
num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
- if (noflush)
- ret = btrfs_block_rsv_add_noflush(root,
- &root->fs_info->trans_block_rsv,
- num_bytes);
- else
- ret = btrfs_block_rsv_add(root,
- &root->fs_info->trans_block_rsv,
- num_bytes);
+ ret = btrfs_block_rsv_add(root,
+ &root->fs_info->trans_block_rsv,
+ num_bytes, flush);
if (ret)
return ERR_PTR(ret);
}
@@ -422,13 +415,15 @@ got_it:
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
int num_items)
{
- return start_transaction(root, num_items, TRANS_START, 0);
+ return start_transaction(root, num_items, TRANS_START,
+ BTRFS_RESERVE_FLUSH_ALL);
}
-struct btrfs_trans_handle *btrfs_start_transaction_noflush(
+struct btrfs_trans_handle *btrfs_start_transaction_lflush(
struct btrfs_root *root, int num_items)
{
- return start_transaction(root, num_items, TRANS_START, 1);
+ return start_transaction(root, num_items, TRANS_START,
+ BTRFS_RESERVE_FLUSH_LIMIT);
}
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
@@ -461,28 +456,31 @@ static noinline void wait_for_commit(struct btrfs_root *root,
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
{
struct btrfs_transaction *cur_trans = NULL, *t;
- int ret;
+ int ret = 0;
- ret = 0;
if (transid) {
if (transid <= root->fs_info->last_trans_committed)
goto out;
+ ret = -EINVAL;
/* find specified transaction */
spin_lock(&root->fs_info->trans_lock);
list_for_each_entry(t, &root->fs_info->trans_list, list) {
if (t->transid == transid) {
cur_trans = t;
atomic_inc(&cur_trans->use_count);
+ ret = 0;
break;
}
- if (t->transid > transid)
+ if (t->transid > transid) {
+ ret = 0;
break;
+ }
}
spin_unlock(&root->fs_info->trans_lock);
- ret = -EINVAL;
+ /* The specified transaction doesn't exist */
if (!cur_trans)
- goto out; /* bad transid */
+ goto out;
} else {
/* find newest transaction that is committing | committed */
spin_lock(&root->fs_info->trans_lock);
@@ -502,9 +500,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
}
wait_for_commit(root, cur_trans);
-
put_transaction(cur_trans);
- ret = 0;
out:
return ret;
}
@@ -851,7 +847,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
return ret;
ret = btrfs_run_dev_stats(trans, root->fs_info);
- BUG_ON(ret);
+ WARN_ON(ret);
+ ret = btrfs_run_dev_replace(trans, root->fs_info);
+ WARN_ON(ret);
ret = btrfs_run_qgroups(trans, root->fs_info);
BUG_ON(ret);
@@ -874,6 +872,8 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
switch_commit_root(fs_info->extent_root);
up_write(&fs_info->extent_commit_sem);
+ btrfs_after_dev_replace_commit(fs_info);
+
return 0;
}
@@ -958,7 +958,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret;
- unsigned long nr;
if (xchg(&root->defrag_running, 1))
return 0;
@@ -970,9 +969,8 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
ret = btrfs_defrag_leaves(trans, root, cacheonly);
- nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(info->tree_root, nr);
+ btrfs_btree_balance_dirty(info->tree_root);
cond_resched();
if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
@@ -1032,8 +1030,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
if (to_reserve > 0) {
- ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
- to_reserve);
+ ret = btrfs_block_rsv_add(root, &pending->block_rsv,
+ to_reserve,
+ BTRFS_RESERVE_NO_FLUSH);
if (ret) {
pending->error = ret;
goto no_free_objectid;
@@ -1191,7 +1190,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
parent_inode, &key,
BTRFS_FT_DIR, index);
/* We have check then name at the beginning, so it is impossible. */
- BUG_ON(ret == -EEXIST);
+ BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto fail;
@@ -1309,9 +1308,10 @@ static void do_async_commit(struct work_struct *work)
* We've got freeze protection passed with the transaction.
* Tell lockdep about it.
*/
- rwsem_acquire_read(
- &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
- 0, 1, _THIS_IP_);
+ if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
+ rwsem_acquire_read(
+ &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
current->journal_info = ac->newtrans;
@@ -1349,8 +1349,10 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
* Tell lockdep we've released the freeze rwsem, since the
* async commit thread will be the one to unlock it.
*/
- rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
- 1, _THIS_IP_);
+ if (trans->type < TRANS_JOIN_NOLOCK)
+ rwsem_release(
+ &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 1, _THIS_IP_);
schedule_delayed_work(&ac->work, 0);
@@ -1400,6 +1402,48 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
+static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
+ int snap_pending = 0;
+ int ret;
+
+ if (!flush_on_commit) {
+ spin_lock(&root->fs_info->trans_lock);
+ if (!list_empty(&trans->transaction->pending_snapshots))
+ snap_pending = 1;
+ spin_unlock(&root->fs_info->trans_lock);
+ }
+
+ if (flush_on_commit || snap_pending) {
+ btrfs_start_delalloc_inodes(root, 1);
+ btrfs_wait_ordered_extents(root, 1);
+ }
+
+ ret = btrfs_run_delayed_items(trans, root);
+ if (ret)
+ return ret;
+
+ /*
+ * running the delayed items may have added new refs. account
+ * them now so that they hinder processing of more delayed refs
+ * as little as possible.
+ */
+ btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+
+ /*
+ * rename don't use btrfs_join_transaction, so, once we
+ * set the transaction to blocked above, we aren't going
+ * to get any new ordered operations. We can safely run
+ * it here and no for sure that nothing new will be added
+ * to the list
+ */
+ btrfs_run_ordered_operations(root, 1);
+
+ return 0;
+}
+
/*
* btrfs_transaction state sequence:
* in_commit = 0, blocked = 0 (initial)
@@ -1414,15 +1458,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
DEFINE_WAIT(wait);
- int ret = -EIO;
+ int ret;
int should_grow = 0;
unsigned long now = get_seconds();
- int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
- btrfs_run_ordered_operations(root, 0);
+ ret = btrfs_run_ordered_operations(root, 0);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto cleanup_transaction;
+ }
- if (cur_trans->aborted)
+ if (cur_trans->aborted) {
+ ret = cur_trans->aborted;
goto cleanup_transaction;
+ }
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
@@ -1490,39 +1539,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
should_grow = 1;
do {
- int snap_pending = 0;
-
joined = cur_trans->num_joined;
- if (!list_empty(&trans->transaction->pending_snapshots))
- snap_pending = 1;
WARN_ON(cur_trans != trans->transaction);
- if (flush_on_commit || snap_pending) {
- btrfs_start_delalloc_inodes(root, 1);
- btrfs_wait_ordered_extents(root, 1);
- }
-
- ret = btrfs_run_delayed_items(trans, root);
+ ret = btrfs_flush_all_pending_stuffs(trans, root);
if (ret)
goto cleanup_transaction;
- /*
- * running the delayed items may have added new refs. account
- * them now so that they hinder processing of more delayed refs
- * as little as possible.
- */
- btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
-
- /*
- * rename don't use btrfs_join_transaction, so, once we
- * set the transaction to blocked above, we aren't going
- * to get any new ordered operations. We can safely run
- * it here and no for sure that nothing new will be added
- * to the list
- */
- btrfs_run_ordered_operations(root, 1);
-
prepare_to_wait(&cur_trans->writer_wait, &wait,
TASK_UNINTERRUPTIBLE);
@@ -1535,6 +1559,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
} while (atomic_read(&cur_trans->num_writers) > 1 ||
(should_grow && cur_trans->num_joined != joined));
+ ret = btrfs_flush_all_pending_stuffs(trans, root);
+ if (ret)
+ goto cleanup_transaction;
+
/*
* Ok now we need to make sure to block out any other joins while we
* commit the transaction. We could have started a join before setting
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 80961947a6b..0e8aa1e6c28 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -105,7 +105,7 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
int num_items);
-struct btrfs_trans_handle *btrfs_start_transaction_noflush(
+struct btrfs_trans_handle *btrfs_start_transaction_lflush(
struct btrfs_root *root, int num_items);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 81e407d9677..83186c7e45d 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2952,33 +2952,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode, int log_inode_only)
{
- btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
- btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
- btrfs_set_inode_mode(leaf, item, inode->i_mode);
- btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
-
- btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
- inode->i_atime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
- inode->i_atime.tv_nsec);
-
- btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
- inode->i_mtime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
- inode->i_mtime.tv_nsec);
-
- btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
- inode->i_ctime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
- inode->i_ctime.tv_nsec);
-
- btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
-
- btrfs_set_inode_sequence(leaf, item, inode->i_version);
- btrfs_set_inode_transid(leaf, item, trans->transid);
- btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
- btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
- btrfs_set_inode_block_group(leaf, item, 0);
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
if (log_inode_only) {
/* set the generation to zero so the recover code
@@ -2986,14 +2962,63 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* just to say 'this inode exists' and a logging
* to say 'update this inode with these values'
*/
- btrfs_set_inode_generation(leaf, item, 0);
- btrfs_set_inode_size(leaf, item, 0);
+ btrfs_set_token_inode_generation(leaf, item, 0, &token);
+ btrfs_set_token_inode_size(leaf, item, 0, &token);
} else {
- btrfs_set_inode_generation(leaf, item,
- BTRFS_I(inode)->generation);
- btrfs_set_inode_size(leaf, item, inode->i_size);
- }
+ btrfs_set_token_inode_generation(leaf, item,
+ BTRFS_I(inode)->generation,
+ &token);
+ btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
+ }
+
+ btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
+ btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
+ btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
+ btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
+
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_nsec, &token);
+
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_nsec, &token);
+
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_nsec, &token);
+
+ btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
+ &token);
+
+ btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
+ btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
+ btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
+ btrfs_set_token_inode_block_group(leaf, item, 0, &token);
+}
+static int log_inode_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log, struct btrfs_path *path,
+ struct inode *inode)
+{
+ struct btrfs_inode_item *inode_item;
+ struct btrfs_key key;
+ int ret;
+
+ memcpy(&key, &BTRFS_I(inode)->location, sizeof(key));
+ ret = btrfs_insert_empty_item(trans, log, path, &key,
+ sizeof(*inode_item));
+ if (ret && ret != -EEXIST)
+ return ret;
+ inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
+ btrfs_release_path(path);
+ return 0;
}
static noinline int copy_items(struct btrfs_trans_handle *trans,
@@ -3130,151 +3155,234 @@ static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
return 0;
}
-struct log_args {
- struct extent_buffer *src;
- u64 next_offset;
- int start_slot;
- int nr;
-};
+static int drop_adjacent_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode,
+ struct extent_map *em,
+ struct btrfs_path *path)
+{
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *leaf;
+ struct btrfs_key key, new_key;
+ struct btrfs_map_token token;
+ u64 extent_end;
+ u64 extent_offset = 0;
+ int extent_type;
+ int del_slot = 0;
+ int del_nr = 0;
+ int ret = 0;
+
+ while (1) {
+ btrfs_init_map_token(&token);
+ leaf = path->nodes[0];
+ path->slots[0]++;
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ if (del_nr) {
+ ret = btrfs_del_items(trans, root, path,
+ del_slot, del_nr);
+ if (ret)
+ return ret;
+ del_nr = 0;
+ }
+
+ ret = btrfs_next_leaf_write(trans, root, path, 1);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
+ leaf = path->nodes[0];
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != btrfs_ino(inode) ||
+ key.type != BTRFS_EXTENT_DATA_KEY ||
+ key.offset >= em->start + em->len)
+ break;
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_token_file_extent_type(leaf, fi, &token);
+ if (extent_type == BTRFS_FILE_EXTENT_REG ||
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ extent_offset = btrfs_token_file_extent_offset(leaf,
+ fi, &token);
+ extent_end = key.offset +
+ btrfs_token_file_extent_num_bytes(leaf, fi,
+ &token);
+ } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ extent_end = key.offset +
+ btrfs_file_extent_inline_len(leaf, fi);
+ } else {
+ BUG();
+ }
+
+ if (extent_end <= em->len + em->start) {
+ if (!del_nr) {
+ del_slot = path->slots[0];
+ }
+ del_nr++;
+ continue;
+ }
+
+ /*
+ * Ok so we'll ignore previous items if we log a new extent,
+ * which can lead to overlapping extents, so if we have an
+ * existing extent we want to adjust we _have_ to check the next
+ * guy to make sure we even need this extent anymore, this keeps
+ * us from panicing in set_item_key_safe.
+ */
+ if (path->slots[0] < btrfs_header_nritems(leaf) - 1) {
+ struct btrfs_key tmp_key;
+
+ btrfs_item_key_to_cpu(leaf, &tmp_key,
+ path->slots[0] + 1);
+ if (tmp_key.objectid == btrfs_ino(inode) &&
+ tmp_key.type == BTRFS_EXTENT_DATA_KEY &&
+ tmp_key.offset <= em->start + em->len) {
+ if (!del_nr)
+ del_slot = path->slots[0];
+ del_nr++;
+ continue;
+ }
+ }
+
+ BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = em->start + em->len;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+ extent_offset += em->start + em->len - key.offset;
+ btrfs_set_token_file_extent_offset(leaf, fi, extent_offset,
+ &token);
+ btrfs_set_token_file_extent_num_bytes(leaf, fi, extent_end -
+ (em->start + em->len),
+ &token);
+ btrfs_mark_buffer_dirty(leaf);
+ }
+
+ if (del_nr)
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+
+ return ret;
+}
static int log_one_extent(struct btrfs_trans_handle *trans,
struct inode *inode, struct btrfs_root *root,
- struct extent_map *em, struct btrfs_path *path,
- struct btrfs_path *dst_path, struct log_args *args)
+ struct extent_map *em, struct btrfs_path *path)
{
struct btrfs_root *log = root->log_root;
struct btrfs_file_extent_item *fi;
+ struct extent_buffer *leaf;
+ struct list_head ordered_sums;
+ struct btrfs_map_token token;
struct btrfs_key key;
- u64 start = em->mod_start;
- u64 search_start = start;
- u64 len = em->mod_len;
- u64 num_bytes;
- int nritems;
+ u64 csum_offset = em->mod_start - em->start;
+ u64 csum_len = em->mod_len;
+ u64 extent_offset = em->start - em->orig_start;
+ u64 block_len;
int ret;
+ bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- if (BTRFS_I(inode)->logged_trans == trans->transid) {
- ret = __btrfs_drop_extents(trans, log, inode, dst_path, start,
- start + len, NULL, 0);
- if (ret)
- return ret;
+ INIT_LIST_HEAD(&ordered_sums);
+ btrfs_init_map_token(&token);
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = em->start;
+ path->really_keep_locks = 1;
+
+ ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi));
+ if (ret && ret != -EEXIST) {
+ path->really_keep_locks = 0;
+ return ret;
}
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
+ &token);
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ skip_csum = true;
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_PREALLOC,
+ &token);
+ } else {
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG,
+ &token);
+ if (em->block_start == 0)
+ skip_csum = true;
+ }
+
+ block_len = max(em->block_len, em->orig_block_len);
+ if (em->compress_type != BTRFS_COMPRESS_NONE) {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
+ em->block_start,
+ &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
+ &token);
+ } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
+ em->block_start -
+ extent_offset, &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
+ &token);
+ } else {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
+ &token);
+ }
+
+ btrfs_set_token_file_extent_offset(leaf, fi,
+ em->start - em->orig_start,
+ &token);
+ btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
+ btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->len, &token);
+ btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
+ &token);
+ btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
+ btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
+ btrfs_mark_buffer_dirty(leaf);
- while (len) {
- if (args->nr)
- goto next_slot;
-again:
- key.objectid = btrfs_ino(inode);
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = search_start;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- return ret;
-
- if (ret) {
- /*
- * A rare case were we can have an em for a section of a
- * larger extent so we need to make sure that this em
- * falls within the extent we've found. If not we just
- * bail and go back to ye-olde way of doing things but
- * it happens often enough in testing that we need to do
- * this dance to make sure.
- */
- do {
- if (path->slots[0] == 0) {
- btrfs_release_path(path);
- if (search_start == 0)
- return -ENOENT;
- search_start--;
- goto again;
- }
-
- path->slots[0]--;
- btrfs_item_key_to_cpu(path->nodes[0], &key,
- path->slots[0]);
- if (key.objectid != btrfs_ino(inode) ||
- key.type != BTRFS_EXTENT_DATA_KEY) {
- btrfs_release_path(path);
- return -ENOENT;
- }
- } while (key.offset > start);
+ /*
+ * Have to check the extent to the right of us to make sure it doesn't
+ * fall in our current range. We're ok if the previous extent is in our
+ * range since the recovery stuff will run us in key order and thus just
+ * drop the part we overwrote.
+ */
+ ret = drop_adjacent_extents(trans, log, inode, em, path);
+ btrfs_release_path(path);
+ path->really_keep_locks = 0;
+ if (ret) {
+ return ret;
+ }
- fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
- num_bytes = btrfs_file_extent_num_bytes(path->nodes[0],
- fi);
- if (key.offset + num_bytes <= start) {
- btrfs_release_path(path);
- return -ENOENT;
- }
- }
- args->src = path->nodes[0];
-next_slot:
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- fi = btrfs_item_ptr(args->src, path->slots[0],
- struct btrfs_file_extent_item);
- if (args->nr &&
- args->start_slot + args->nr == path->slots[0]) {
- args->nr++;
- } else if (args->nr) {
- ret = copy_items(trans, inode, dst_path, args->src,
- args->start_slot, args->nr,
- LOG_INODE_ALL);
- if (ret)
- return ret;
- args->nr = 1;
- args->start_slot = path->slots[0];
- } else if (!args->nr) {
- args->nr = 1;
- args->start_slot = path->slots[0];
- }
- nritems = btrfs_header_nritems(path->nodes[0]);
- path->slots[0]++;
- num_bytes = btrfs_file_extent_num_bytes(args->src, fi);
- if (len < num_bytes) {
- /* I _think_ this is ok, envision we write to a
- * preallocated space that is adjacent to a previously
- * written preallocated space that gets merged when we
- * mark this preallocated space written. If we do not
- * have the adjacent extent in cache then when we copy
- * this extent it could end up being larger than our EM
- * thinks it is, which is a-ok, so just set len to 0.
- */
- len = 0;
- } else {
- len -= num_bytes;
- }
- start = key.offset + num_bytes;
- args->next_offset = start;
- search_start = start;
+ if (skip_csum)
+ return 0;
- if (path->slots[0] < nritems) {
- if (len)
- goto next_slot;
- break;
- }
+ /* block start is already adjusted for the file extent offset. */
+ ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
+ em->block_start + csum_offset,
+ em->block_start + csum_offset +
+ csum_len - 1, &ordered_sums, 0);
+ if (ret)
+ return ret;
- if (args->nr) {
- ret = copy_items(trans, inode, dst_path, args->src,
- args->start_slot, args->nr,
- LOG_INODE_ALL);
- if (ret)
- return ret;
- args->nr = 0;
- btrfs_release_path(path);
- }
+ while (!list_empty(&ordered_sums)) {
+ struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
+ struct btrfs_ordered_sum,
+ list);
+ if (!ret)
+ ret = btrfs_csum_file_blocks(trans, log, sums);
+ list_del(&sums->list);
+ kfree(sums);
}
- return 0;
+ return ret;
}
static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode,
- struct btrfs_path *path,
- struct btrfs_path *dst_path)
+ struct btrfs_path *path)
{
- struct log_args args;
struct extent_map *em, *n;
struct list_head extents;
struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
@@ -3283,8 +3391,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&extents);
- memset(&args, 0, sizeof(args));
-
write_lock(&tree->lock);
test_gen = root->fs_info->last_trans_committed;
@@ -3317,34 +3423,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
write_unlock(&tree->lock);
- /*
- * If the previous EM and the last extent we left off on aren't
- * sequential then we need to copy the items we have and redo
- * our search
- */
- if (args.nr && em->mod_start != args.next_offset) {
- ret = copy_items(trans, inode, dst_path, args.src,
- args.start_slot, args.nr,
- LOG_INODE_ALL);
- if (ret) {
- free_extent_map(em);
- write_lock(&tree->lock);
- continue;
- }
- btrfs_release_path(path);
- args.nr = 0;
- }
-
- ret = log_one_extent(trans, inode, root, em, path, dst_path, &args);
+ ret = log_one_extent(trans, inode, root, em, path);
free_extent_map(em);
write_lock(&tree->lock);
}
WARN_ON(!list_empty(&extents));
write_unlock(&tree->lock);
- if (!ret && args.nr)
- ret = copy_items(trans, inode, dst_path, args.src,
- args.start_slot, args.nr, LOG_INODE_ALL);
btrfs_release_path(path);
return ret;
}
@@ -3400,7 +3485,10 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
/* today the code can only do partial logging of directories */
- if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode) ||
+ (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags) &&
+ inode_only == LOG_INODE_EXISTS))
max_key.type = BTRFS_XATTR_ITEM_KEY;
else
max_key.type = (u8)-1;
@@ -3432,14 +3520,28 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
} else {
if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags)) {
+ clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+ &BTRFS_I(inode)->runtime_flags);
ret = btrfs_truncate_inode_items(trans, log,
inode, 0, 0);
- } else {
- fast_search = true;
+ } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+ &BTRFS_I(inode)->runtime_flags)) {
+ if (inode_only == LOG_INODE_ALL)
+ fast_search = true;
max_key.type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino,
- BTRFS_XATTR_ITEM_KEY);
+ max_key.type);
+ } else {
+ if (inode_only == LOG_INODE_ALL)
+ fast_search = true;
+ ret = log_inode_item(trans, log, dst_path, inode);
+ if (ret) {
+ err = ret;
+ goto out_unlock;
+ }
+ goto log_extents;
}
+
}
if (ret) {
err = ret;
@@ -3518,11 +3620,10 @@ next_slot:
ins_nr = 0;
}
+log_extents:
if (fast_search) {
- btrfs_release_path(path);
btrfs_release_path(dst_path);
- ret = btrfs_log_changed_extents(trans, root, inode, path,
- dst_path);
+ ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
if (ret) {
err = ret;
goto out_unlock;
@@ -3531,8 +3632,10 @@ next_slot:
struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em, *n;
+ write_lock(&tree->lock);
list_for_each_entry_safe(em, n, &tree->modified_extents, list)
list_del_init(&em->list);
+ write_unlock(&tree->lock);
}
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0f5ebb72a5e..5cce6aa7401 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -25,7 +25,6 @@
#include <linux/capability.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
-#include <asm/div64.h>
#include "compat.h"
#include "ctree.h"
#include "extent_map.h"
@@ -36,6 +35,8 @@
#include "async-thread.h"
#include "check-integrity.h"
#include "rcu-string.h"
+#include "math.h"
+#include "dev-replace.h"
static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -71,6 +72,19 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
kfree(fs_devices);
}
+static void btrfs_kobject_uevent(struct block_device *bdev,
+ enum kobject_action action)
+{
+ int ret;
+
+ ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
+ if (ret)
+ pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
+ action,
+ kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
+ &disk_to_dev(bdev->bd_disk)->kobj);
+}
+
void btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
@@ -108,6 +122,44 @@ static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
return NULL;
}
+static int
+btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
+ int flush, struct block_device **bdev,
+ struct buffer_head **bh)
+{
+ int ret;
+
+ *bdev = blkdev_get_by_path(device_path, flags, holder);
+
+ if (IS_ERR(*bdev)) {
+ ret = PTR_ERR(*bdev);
+ printk(KERN_INFO "btrfs: open %s failed\n", device_path);
+ goto error;
+ }
+
+ if (flush)
+ filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
+ ret = set_blocksize(*bdev, 4096);
+ if (ret) {
+ blkdev_put(*bdev, flags);
+ goto error;
+ }
+ invalidate_bdev(*bdev);
+ *bh = btrfs_read_dev_super(*bdev);
+ if (!*bh) {
+ ret = -EINVAL;
+ blkdev_put(*bdev, flags);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ *bdev = NULL;
+ *bh = NULL;
+ return ret;
+}
+
static void requeue_list(struct btrfs_pending_bios *pending_bios,
struct bio *head, struct bio *tail)
{
@@ -467,7 +519,8 @@ error:
return ERR_PTR(-ENOMEM);
}
-void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
+void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
+ struct btrfs_fs_devices *fs_devices, int step)
{
struct btrfs_device *device, *next;
@@ -480,8 +533,9 @@ again:
/* This is the initialized path, it is safe to release the devices. */
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
if (device->in_fs_metadata) {
- if (!latest_transid ||
- device->generation > latest_transid) {
+ if (!device->is_tgtdev_for_dev_replace &&
+ (!latest_transid ||
+ device->generation > latest_transid)) {
latest_devid = device->devid;
latest_transid = device->generation;
latest_bdev = device->bdev;
@@ -489,6 +543,21 @@ again:
continue;
}
+ if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
+ /*
+ * In the first step, keep the device which has
+ * the correct fsid and the devid that is used
+ * for the dev_replace procedure.
+ * In the second step, the dev_replace state is
+ * read from the device tree and it is known
+ * whether the procedure is really active or
+ * not, which means whether this device is
+ * used or whether it should be removed.
+ */
+ if (step == 0 || device->is_tgtdev_for_dev_replace) {
+ continue;
+ }
+ }
if (device->bdev) {
blkdev_put(device->bdev, device->mode);
device->bdev = NULL;
@@ -497,7 +566,8 @@ again:
if (device->writeable) {
list_del_init(&device->dev_alloc_list);
device->writeable = 0;
- fs_devices->rw_devices--;
+ if (!device->is_tgtdev_for_dev_replace)
+ fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
@@ -555,7 +625,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
if (device->bdev)
fs_devices->open_devices--;
- if (device->writeable) {
+ if (device->writeable && !device->is_tgtdev_for_dev_replace) {
list_del_init(&device->dev_alloc_list);
fs_devices->rw_devices--;
}
@@ -637,18 +707,10 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
if (!device->name)
continue;
- bdev = blkdev_get_by_path(device->name->str, flags, holder);
- if (IS_ERR(bdev)) {
- printk(KERN_INFO "btrfs: open %s failed\n", device->name->str);
- goto error;
- }
- filemap_write_and_wait(bdev->bd_inode->i_mapping);
- invalidate_bdev(bdev);
- set_blocksize(bdev, 4096);
-
- bh = btrfs_read_dev_super(bdev);
- if (!bh)
- goto error_close;
+ ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
+ &bdev, &bh);
+ if (ret)
+ continue;
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
@@ -687,7 +749,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fs_devices->rotating = 1;
fs_devices->open_devices++;
- if (device->writeable) {
+ if (device->writeable && !device->is_tgtdev_for_dev_replace) {
fs_devices->rw_devices++;
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
@@ -697,9 +759,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
error_brelse:
brelse(bh);
-error_close:
blkdev_put(bdev, flags);
-error:
continue;
}
if (fs_devices->open_devices == 0) {
@@ -744,40 +804,30 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
u64 total_devices;
flags |= FMODE_EXCL;
- bdev = blkdev_get_by_path(path, flags, holder);
-
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
- goto error;
- }
-
mutex_lock(&uuid_mutex);
- ret = set_blocksize(bdev, 4096);
+ ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
if (ret)
- goto error_close;
- bh = btrfs_read_dev_super(bdev);
- if (!bh) {
- ret = -EINVAL;
- goto error_close;
- }
+ goto error;
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
transid = btrfs_super_generation(disk_super);
total_devices = btrfs_super_num_devices(disk_super);
- if (disk_super->label[0])
+ if (disk_super->label[0]) {
+ if (disk_super->label[BTRFS_LABEL_SIZE - 1])
+ disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
printk(KERN_INFO "device label %s ", disk_super->label);
- else
+ } else {
printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
+ }
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
if (!ret && fs_devices_ret)
(*fs_devices_ret)->total_devices = total_devices;
brelse(bh);
-error_close:
- mutex_unlock(&uuid_mutex);
blkdev_put(bdev, flags);
error:
+ mutex_unlock(&uuid_mutex);
return ret;
}
@@ -796,7 +846,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
*length = 0;
- if (start >= device->total_bytes)
+ if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
return 0;
path = btrfs_alloc_path();
@@ -913,7 +963,7 @@ int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
max_hole_size = 0;
hole_size = 0;
- if (search_start >= search_end) {
+ if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
ret = -ENOSPC;
goto error;
}
@@ -1096,6 +1146,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_key key;
WARN_ON(!device->in_fs_metadata);
+ WARN_ON(device->is_tgtdev_for_dev_replace);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1330,16 +1381,22 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
root->fs_info->avail_system_alloc_bits |
root->fs_info->avail_metadata_alloc_bits;
- if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
- root->fs_info->fs_devices->num_devices <= 4) {
+ num_devices = root->fs_info->fs_devices->num_devices;
+ btrfs_dev_replace_lock(&root->fs_info->dev_replace);
+ if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
+ WARN_ON(num_devices < 1);
+ num_devices--;
+ }
+ btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
+
+ if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
printk(KERN_ERR "btrfs: unable to go below four devices "
"on raid10\n");
ret = -EINVAL;
goto out;
}
- if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
- root->fs_info->fs_devices->num_devices <= 2) {
+ if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
printk(KERN_ERR "btrfs: unable to go below two "
"devices on raid1\n");
ret = -EINVAL;
@@ -1357,7 +1414,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
* is held.
*/
list_for_each_entry(tmp, devices, dev_list) {
- if (tmp->in_fs_metadata && !tmp->bdev) {
+ if (tmp->in_fs_metadata &&
+ !tmp->is_tgtdev_for_dev_replace &&
+ !tmp->bdev) {
device = tmp;
break;
}
@@ -1371,24 +1430,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
goto out;
}
} else {
- bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
- root->fs_info->bdev_holder);
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
+ ret = btrfs_get_bdev_and_sb(device_path,
+ FMODE_READ | FMODE_EXCL,
+ root->fs_info->bdev_holder, 0,
+ &bdev, &bh);
+ if (ret)
goto out;
- }
-
- set_blocksize(bdev, 4096);
- invalidate_bdev(bdev);
- bh = btrfs_read_dev_super(bdev);
- if (!bh) {
- ret = -EINVAL;
- goto error_close;
- }
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
- device = btrfs_find_device(root, devid, dev_uuid,
+ device = btrfs_find_device(root->fs_info, devid, dev_uuid,
disk_super->fsid);
if (!device) {
ret = -ENOENT;
@@ -1396,6 +1447,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
}
}
+ if (device->is_tgtdev_for_dev_replace) {
+ pr_err("btrfs: unable to remove the dev_replace target dev\n");
+ ret = -EINVAL;
+ goto error_brelse;
+ }
+
if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
printk(KERN_ERR "btrfs: unable to remove the only writeable "
"device\n");
@@ -1415,6 +1472,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
if (ret)
goto error_undo;
+ /*
+ * TODO: the superblock still includes this device in its num_devices
+ * counter although write_all_supers() is not locked out. This
+ * could give a filesystem state which requires a degraded mount.
+ */
ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
if (ret)
goto error_undo;
@@ -1425,7 +1487,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
spin_unlock(&root->fs_info->free_chunk_lock);
device->in_fs_metadata = 0;
- btrfs_scrub_cancel_dev(root, device);
+ btrfs_scrub_cancel_dev(root->fs_info, device);
/*
* the device list mutex makes sure that we don't change
@@ -1482,7 +1544,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
* at this point, the device is zero sized. We want to
* remove it from the devices list and zero out the old super
*/
- if (clear_super) {
+ if (clear_super && disk_super) {
/* make sure this device isn't detected as part of
* the FS anymore
*/
@@ -1493,9 +1555,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
ret = 0;
+ /* Notify udev that device has changed */
+ btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
+
error_brelse:
brelse(bh);
-error_close:
if (bdev)
blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
out:
@@ -1512,6 +1576,112 @@ error_undo:
goto error_brelse;
}
+void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev)
+{
+ WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
+ list_del_rcu(&srcdev->dev_list);
+ list_del_rcu(&srcdev->dev_alloc_list);
+ fs_info->fs_devices->num_devices--;
+ if (srcdev->missing) {
+ fs_info->fs_devices->missing_devices--;
+ fs_info->fs_devices->rw_devices++;
+ }
+ if (srcdev->can_discard)
+ fs_info->fs_devices->num_can_discard--;
+ if (srcdev->bdev)
+ fs_info->fs_devices->open_devices--;
+
+ call_rcu(&srcdev->rcu, free_device);
+}
+
+void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *tgtdev)
+{
+ struct btrfs_device *next_device;
+
+ WARN_ON(!tgtdev);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ if (tgtdev->bdev) {
+ btrfs_scratch_superblock(tgtdev);
+ fs_info->fs_devices->open_devices--;
+ }
+ fs_info->fs_devices->num_devices--;
+ if (tgtdev->can_discard)
+ fs_info->fs_devices->num_can_discard++;
+
+ next_device = list_entry(fs_info->fs_devices->devices.next,
+ struct btrfs_device, dev_list);
+ if (tgtdev->bdev == fs_info->sb->s_bdev)
+ fs_info->sb->s_bdev = next_device->bdev;
+ if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
+ fs_info->fs_devices->latest_bdev = next_device->bdev;
+ list_del_rcu(&tgtdev->dev_list);
+
+ call_rcu(&tgtdev->rcu, free_device);
+
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+}
+
+int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+ struct btrfs_device **device)
+{
+ int ret = 0;
+ struct btrfs_super_block *disk_super;
+ u64 devid;
+ u8 *dev_uuid;
+ struct block_device *bdev;
+ struct buffer_head *bh;
+
+ *device = NULL;
+ ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
+ root->fs_info->bdev_holder, 0, &bdev, &bh);
+ if (ret)
+ return ret;
+ disk_super = (struct btrfs_super_block *)bh->b_data;
+ devid = btrfs_stack_device_id(&disk_super->dev_item);
+ dev_uuid = disk_super->dev_item.uuid;
+ *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
+ disk_super->fsid);
+ brelse(bh);
+ if (!*device)
+ ret = -ENOENT;
+ blkdev_put(bdev, FMODE_READ);
+ return ret;
+}
+
+int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+ char *device_path,
+ struct btrfs_device **device)
+{
+ *device = NULL;
+ if (strcmp(device_path, "missing") == 0) {
+ struct list_head *devices;
+ struct btrfs_device *tmp;
+
+ devices = &root->fs_info->fs_devices->devices;
+ /*
+ * It is safe to read the devices since the volume_mutex
+ * is held by the caller.
+ */
+ list_for_each_entry(tmp, devices, dev_list) {
+ if (tmp->in_fs_metadata && !tmp->bdev) {
+ *device = tmp;
+ break;
+ }
+ }
+
+ if (!*device) {
+ pr_err("btrfs: no missing device found\n");
+ return -ENOENT;
+ }
+
+ return 0;
+ } else {
+ return btrfs_find_device_by_path(root, device_path, device);
+ }
+}
+
/*
* does all the dirty work required for changing file system's UUID.
*/
@@ -1630,7 +1800,8 @@ next_slot:
read_extent_buffer(leaf, fs_uuid,
(unsigned long)btrfs_device_fsid(dev_item),
BTRFS_UUID_SIZE);
- device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
+ device = btrfs_find_device(root->fs_info, devid, dev_uuid,
+ fs_uuid);
BUG_ON(!device); /* Logic error */
if (device->fs_devices->seeding) {
@@ -1678,16 +1849,17 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
filemap_write_and_wait(bdev->bd_inode->i_mapping);
devices = &root->fs_info->fs_devices->devices;
- /*
- * we have the volume lock, so we don't need the extra
- * device list mutex while reading the list here.
- */
+
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_for_each_entry(device, devices, dev_list) {
if (device->bdev == bdev) {
ret = -EEXIST;
+ mutex_unlock(
+ &root->fs_info->fs_devices->device_list_mutex);
goto error;
}
}
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
device = kzalloc(sizeof(*device), GFP_NOFS);
if (!device) {
@@ -1737,6 +1909,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
+ device->is_tgtdev_for_dev_replace = 0;
device->mode = FMODE_EXCL;
set_blocksize(device->bdev, 4096);
@@ -1844,6 +2017,98 @@ error:
return ret;
}
+int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+ struct btrfs_device **device_out)
+{
+ struct request_queue *q;
+ struct btrfs_device *device;
+ struct block_device *bdev;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct list_head *devices;
+ struct rcu_string *name;
+ int ret = 0;
+
+ *device_out = NULL;
+ if (fs_info->fs_devices->seeding)
+ return -EINVAL;
+
+ bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
+ fs_info->bdev_holder);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+
+ filemap_write_and_wait(bdev->bd_inode->i_mapping);
+
+ devices = &fs_info->fs_devices->devices;
+ list_for_each_entry(device, devices, dev_list) {
+ if (device->bdev == bdev) {
+ ret = -EEXIST;
+ goto error;
+ }
+ }
+
+ device = kzalloc(sizeof(*device), GFP_NOFS);
+ if (!device) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ name = rcu_string_strdup(device_path, GFP_NOFS);
+ if (!name) {
+ kfree(device);
+ ret = -ENOMEM;
+ goto error;
+ }
+ rcu_assign_pointer(device->name, name);
+
+ q = bdev_get_queue(bdev);
+ if (blk_queue_discard(q))
+ device->can_discard = 1;
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ device->writeable = 1;
+ device->work.func = pending_bios_fn;
+ generate_random_uuid(device->uuid);
+ device->devid = BTRFS_DEV_REPLACE_DEVID;
+ spin_lock_init(&device->io_lock);
+ device->generation = 0;
+ device->io_width = root->sectorsize;
+ device->io_align = root->sectorsize;
+ device->sector_size = root->sectorsize;
+ device->total_bytes = i_size_read(bdev->bd_inode);
+ device->disk_total_bytes = device->total_bytes;
+ device->dev_root = fs_info->dev_root;
+ device->bdev = bdev;
+ device->in_fs_metadata = 1;
+ device->is_tgtdev_for_dev_replace = 1;
+ device->mode = FMODE_EXCL;
+ set_blocksize(device->bdev, 4096);
+ device->fs_devices = fs_info->fs_devices;
+ list_add(&device->dev_list, &fs_info->fs_devices->devices);
+ fs_info->fs_devices->num_devices++;
+ fs_info->fs_devices->open_devices++;
+ if (device->can_discard)
+ fs_info->fs_devices->num_can_discard++;
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
+ *device_out = device;
+ return ret;
+
+error:
+ blkdev_put(bdev, FMODE_EXCL);
+ return ret;
+}
+
+void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *tgtdev)
+{
+ WARN_ON(fs_info->fs_devices->rw_devices == 0);
+ tgtdev->io_width = fs_info->dev_root->sectorsize;
+ tgtdev->io_align = fs_info->dev_root->sectorsize;
+ tgtdev->sector_size = fs_info->dev_root->sectorsize;
+ tgtdev->dev_root = fs_info->dev_root;
+ tgtdev->in_fs_metadata = 1;
+}
+
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
@@ -1900,7 +2165,8 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
if (!device->writeable)
return -EACCES;
- if (new_size <= device->total_bytes)
+ if (new_size <= device->total_bytes ||
+ device->is_tgtdev_for_dev_replace)
return -EINVAL;
btrfs_set_super_total_bytes(super_copy, old_total + diff);
@@ -2338,18 +2604,6 @@ static int chunk_profiles_filter(u64 chunk_type,
return 1;
}
-static u64 div_factor_fine(u64 num, int factor)
-{
- if (factor <= 0)
- return 0;
- if (factor >= 100)
- return num;
-
- num *= factor;
- do_div(num, 100);
- return num;
-}
-
static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
struct btrfs_balance_args *bargs)
{
@@ -2514,15 +2768,6 @@ static int should_balance_chunk(struct btrfs_root *root,
return 1;
}
-static u64 div_factor(u64 num, int factor)
-{
- if (factor == 10)
- return num;
- num *= factor;
- do_div(num, 10);
- return num;
-}
-
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
@@ -2550,7 +2795,8 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
size_to_free = div_factor(old_size, 1);
size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
if (!device->writeable ||
- device->total_bytes - device->bytes_used > size_to_free)
+ device->total_bytes - device->bytes_used > size_to_free ||
+ device->is_tgtdev_for_dev_replace)
continue;
ret = btrfs_shrink_device(device, old_size - size_to_free);
@@ -2728,6 +2974,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
u64 allowed;
int mixed = 0;
int ret;
+ u64 num_devices;
if (btrfs_fs_closing(fs_info) ||
atomic_read(&fs_info->balance_pause_req) ||
@@ -2756,10 +3003,17 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
}
+ num_devices = fs_info->fs_devices->num_devices;
+ btrfs_dev_replace_lock(&fs_info->dev_replace);
+ if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
+ BUG_ON(num_devices < 1);
+ num_devices--;
+ }
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
- if (fs_info->fs_devices->num_devices == 1)
+ if (num_devices == 1)
allowed |= BTRFS_BLOCK_GROUP_DUP;
- else if (fs_info->fs_devices->num_devices < 4)
+ else if (num_devices < 4)
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
else
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
@@ -2902,6 +3156,7 @@ static int balance_kthread(void *data)
ret = btrfs_balance(fs_info->balance_ctl, NULL);
}
+ atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
mutex_unlock(&fs_info->balance_mutex);
mutex_unlock(&fs_info->volume_mutex);
@@ -2924,6 +3179,7 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
return 0;
}
+ WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
if (IS_ERR(tsk))
return PTR_ERR(tsk);
@@ -3080,7 +3336,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
u64 old_size = device->total_bytes;
u64 diff = device->total_bytes - new_size;
- if (new_size >= device->total_bytes)
+ if (device->is_tgtdev_for_dev_replace)
return -EINVAL;
path = btrfs_alloc_path();
@@ -3235,6 +3491,14 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
return 0;
}
+struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
+ { 2, 1, 0, 4, 2, 2 /* raid10 */ },
+ { 1, 1, 2, 2, 2, 2 /* raid1 */ },
+ { 1, 2, 1, 1, 1, 2 /* dup */ },
+ { 1, 1, 0, 2, 1, 1 /* raid0 */ },
+ { 1, 1, 0, 1, 1, 1 /* single */ },
+};
+
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct map_lookup **map_ret,
@@ -3264,43 +3528,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
int ndevs;
int i;
int j;
+ int index;
BUG_ON(!alloc_profile_is_valid(type, 0));
if (list_empty(&fs_devices->alloc_list))
return -ENOSPC;
- sub_stripes = 1;
- dev_stripes = 1;
- devs_increment = 1;
- ncopies = 1;
- devs_max = 0; /* 0 == as many as possible */
- devs_min = 1;
+ index = __get_raid_index(type);
- /*
- * define the properties of each RAID type.
- * FIXME: move this to a global table and use it in all RAID
- * calculation code
- */
- if (type & (BTRFS_BLOCK_GROUP_DUP)) {
- dev_stripes = 2;
- ncopies = 2;
- devs_max = 1;
- } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
- devs_min = 2;
- } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
- devs_increment = 2;
- ncopies = 2;
- devs_max = 2;
- devs_min = 2;
- } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
- sub_stripes = 2;
- devs_increment = 2;
- ncopies = 2;
- devs_min = 4;
- } else {
- devs_max = 1;
- }
+ sub_stripes = btrfs_raid_array[index].sub_stripes;
+ dev_stripes = btrfs_raid_array[index].dev_stripes;
+ devs_max = btrfs_raid_array[index].devs_max;
+ devs_min = btrfs_raid_array[index].devs_min;
+ devs_increment = btrfs_raid_array[index].devs_increment;
+ ncopies = btrfs_raid_array[index].ncopies;
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = 1024 * 1024 * 1024;
@@ -3347,13 +3589,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
cur = cur->next;
if (!device->writeable) {
- printk(KERN_ERR
+ WARN(1, KERN_ERR
"btrfs: read-only device in alloc_list\n");
- WARN_ON(1);
continue;
}
- if (!device->in_fs_metadata)
+ if (!device->in_fs_metadata ||
+ device->is_tgtdev_for_dev_replace)
continue;
if (device->total_bytes > device->bytes_used)
@@ -3382,6 +3624,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
devices_info[ndevs].total_avail = total_avail;
devices_info[ndevs].dev = device;
++ndevs;
+ WARN_ON(ndevs > fs_devices->rw_devices);
}
/*
@@ -3740,8 +3983,9 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
}
}
-int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
+int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
@@ -3761,32 +4005,60 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
else
ret = 1;
free_extent_map(em);
+
+ btrfs_dev_replace_lock(&fs_info->dev_replace);
+ if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
+ ret++;
+ btrfs_dev_replace_unlock(&fs_info->dev_replace);
+
return ret;
}
-static int find_live_mirror(struct map_lookup *map, int first, int num,
- int optimal)
+static int find_live_mirror(struct btrfs_fs_info *fs_info,
+ struct map_lookup *map, int first, int num,
+ int optimal, int dev_replace_is_ongoing)
{
int i;
- if (map->stripes[optimal].dev->bdev)
- return optimal;
- for (i = first; i < first + num; i++) {
- if (map->stripes[i].dev->bdev)
- return i;
+ int tolerance;
+ struct btrfs_device *srcdev;
+
+ if (dev_replace_is_ongoing &&
+ fs_info->dev_replace.cont_reading_from_srcdev_mode ==
+ BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
+ srcdev = fs_info->dev_replace.srcdev;
+ else
+ srcdev = NULL;
+
+ /*
+ * try to avoid the drive that is the source drive for a
+ * dev-replace procedure, only choose it if no other non-missing
+ * mirror is available
+ */
+ for (tolerance = 0; tolerance < 2; tolerance++) {
+ if (map->stripes[optimal].dev->bdev &&
+ (tolerance || map->stripes[optimal].dev != srcdev))
+ return optimal;
+ for (i = first; i < first + num; i++) {
+ if (map->stripes[i].dev->bdev &&
+ (tolerance || map->stripes[i].dev != srcdev))
+ return i;
+ }
}
+
/* we couldn't find one that doesn't fail. Just return something
* and the io error handling code will clean up eventually
*/
return optimal;
}
-static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
+static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
int mirror_num)
{
struct extent_map *em;
struct map_lookup *map;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map_tree *em_tree = &map_tree->map_tree;
u64 offset;
u64 stripe_offset;
@@ -3800,6 +4072,11 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
int num_stripes;
int max_errors = 0;
struct btrfs_bio *bbio = NULL;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ int dev_replace_is_ongoing = 0;
+ int num_alloc_stripes;
+ int patch_the_first_stripe_for_dev_replace = 0;
+ u64 physical_to_patch_in_first_stripe = 0;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length);
@@ -3816,9 +4093,6 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
map = (struct map_lookup *)em->bdev;
offset = logical - em->start;
- if (mirror_num > map->num_stripes)
- mirror_num = 0;
-
stripe_nr = offset;
/*
* stripe_nr counts the total number of stripes we have to stride
@@ -3845,6 +4119,93 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
if (!bbio_ret)
goto out;
+ btrfs_dev_replace_lock(dev_replace);
+ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ if (!dev_replace_is_ongoing)
+ btrfs_dev_replace_unlock(dev_replace);
+
+ if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
+ !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
+ dev_replace->tgtdev != NULL) {
+ /*
+ * in dev-replace case, for repair case (that's the only
+ * case where the mirror is selected explicitly when
+ * calling btrfs_map_block), blocks left of the left cursor
+ * can also be read from the target drive.
+ * For REQ_GET_READ_MIRRORS, the target drive is added as
+ * the last one to the array of stripes. For READ, it also
+ * needs to be supported using the same mirror number.
+ * If the requested block is not left of the left cursor,
+ * EIO is returned. This can happen because btrfs_num_copies()
+ * returns one more in the dev-replace case.
+ */
+ u64 tmp_length = *length;
+ struct btrfs_bio *tmp_bbio = NULL;
+ int tmp_num_stripes;
+ u64 srcdev_devid = dev_replace->srcdev->devid;
+ int index_srcdev = 0;
+ int found = 0;
+ u64 physical_of_found = 0;
+
+ ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
+ logical, &tmp_length, &tmp_bbio, 0);
+ if (ret) {
+ WARN_ON(tmp_bbio != NULL);
+ goto out;
+ }
+
+ tmp_num_stripes = tmp_bbio->num_stripes;
+ if (mirror_num > tmp_num_stripes) {
+ /*
+ * REQ_GET_READ_MIRRORS does not contain this
+ * mirror, that means that the requested area
+ * is not left of the left cursor
+ */
+ ret = -EIO;
+ kfree(tmp_bbio);
+ goto out;
+ }
+
+ /*
+ * process the rest of the function using the mirror_num
+ * of the source drive. Therefore look it up first.
+ * At the end, patch the device pointer to the one of the
+ * target drive.
+ */
+ for (i = 0; i < tmp_num_stripes; i++) {
+ if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
+ /*
+ * In case of DUP, in order to keep it
+ * simple, only add the mirror with the
+ * lowest physical address
+ */
+ if (found &&
+ physical_of_found <=
+ tmp_bbio->stripes[i].physical)
+ continue;
+ index_srcdev = i;
+ found = 1;
+ physical_of_found =
+ tmp_bbio->stripes[i].physical;
+ }
+ }
+
+ if (found) {
+ mirror_num = index_srcdev + 1;
+ patch_the_first_stripe_for_dev_replace = 1;
+ physical_to_patch_in_first_stripe = physical_of_found;
+ } else {
+ WARN_ON(1);
+ ret = -EIO;
+ kfree(tmp_bbio);
+ goto out;
+ }
+
+ kfree(tmp_bbio);
+ } else if (mirror_num > map->num_stripes) {
+ mirror_num = 0;
+ }
+
num_stripes = 1;
stripe_index = 0;
stripe_nr_orig = stripe_nr;
@@ -3859,19 +4220,20 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
stripe_nr_end - stripe_nr_orig);
stripe_index = do_div(stripe_nr, map->num_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (rw & (REQ_WRITE | REQ_DISCARD))
+ if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
else {
- stripe_index = find_live_mirror(map, 0,
+ stripe_index = find_live_mirror(fs_info, map, 0,
map->num_stripes,
- current->pid % map->num_stripes);
+ current->pid % map->num_stripes,
+ dev_replace_is_ongoing);
mirror_num = stripe_index + 1;
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (rw & (REQ_WRITE | REQ_DISCARD)) {
+ if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
num_stripes = map->num_stripes;
} else if (mirror_num) {
stripe_index = mirror_num - 1;
@@ -3885,7 +4247,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
stripe_index = do_div(stripe_nr, factor);
stripe_index *= map->sub_stripes;
- if (rw & REQ_WRITE)
+ if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
num_stripes = map->sub_stripes;
else if (rw & REQ_DISCARD)
num_stripes = min_t(u64, map->sub_stripes *
@@ -3895,9 +4257,11 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
stripe_index += mirror_num - 1;
else {
int old_stripe_index = stripe_index;
- stripe_index = find_live_mirror(map, stripe_index,
+ stripe_index = find_live_mirror(fs_info, map,
+ stripe_index,
map->sub_stripes, stripe_index +
- current->pid % map->sub_stripes);
+ current->pid % map->sub_stripes,
+ dev_replace_is_ongoing);
mirror_num = stripe_index - old_stripe_index + 1;
}
} else {
@@ -3911,7 +4275,14 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
}
BUG_ON(stripe_index >= map->num_stripes);
- bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
+ num_alloc_stripes = num_stripes;
+ if (dev_replace_is_ongoing) {
+ if (rw & (REQ_WRITE | REQ_DISCARD))
+ num_alloc_stripes <<= 1;
+ if (rw & REQ_GET_READ_MIRRORS)
+ num_alloc_stripes++;
+ }
+ bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
if (!bbio) {
ret = -ENOMEM;
goto out;
@@ -3998,7 +4369,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
}
}
- if (rw & REQ_WRITE) {
+ if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_DUP)) {
@@ -4006,20 +4377,115 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
}
}
+ if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
+ dev_replace->tgtdev != NULL) {
+ int index_where_to_add;
+ u64 srcdev_devid = dev_replace->srcdev->devid;
+
+ /*
+ * duplicate the write operations while the dev replace
+ * procedure is running. Since the copying of the old disk
+ * to the new disk takes place at run time while the
+ * filesystem is mounted writable, the regular write
+ * operations to the old disk have to be duplicated to go
+ * to the new disk as well.
+ * Note that device->missing is handled by the caller, and
+ * that the write to the old disk is already set up in the
+ * stripes array.
+ */
+ index_where_to_add = num_stripes;
+ for (i = 0; i < num_stripes; i++) {
+ if (bbio->stripes[i].dev->devid == srcdev_devid) {
+ /* write to new disk, too */
+ struct btrfs_bio_stripe *new =
+ bbio->stripes + index_where_to_add;
+ struct btrfs_bio_stripe *old =
+ bbio->stripes + i;
+
+ new->physical = old->physical;
+ new->length = old->length;
+ new->dev = dev_replace->tgtdev;
+ index_where_to_add++;
+ max_errors++;
+ }
+ }
+ num_stripes = index_where_to_add;
+ } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
+ dev_replace->tgtdev != NULL) {
+ u64 srcdev_devid = dev_replace->srcdev->devid;
+ int index_srcdev = 0;
+ int found = 0;
+ u64 physical_of_found = 0;
+
+ /*
+ * During the dev-replace procedure, the target drive can
+ * also be used to read data in case it is needed to repair
+ * a corrupt block elsewhere. This is possible if the
+ * requested area is left of the left cursor. In this area,
+ * the target drive is a full copy of the source drive.
+ */
+ for (i = 0; i < num_stripes; i++) {
+ if (bbio->stripes[i].dev->devid == srcdev_devid) {
+ /*
+ * In case of DUP, in order to keep it
+ * simple, only add the mirror with the
+ * lowest physical address
+ */
+ if (found &&
+ physical_of_found <=
+ bbio->stripes[i].physical)
+ continue;
+ index_srcdev = i;
+ found = 1;
+ physical_of_found = bbio->stripes[i].physical;
+ }
+ }
+ if (found) {
+ u64 length = map->stripe_len;
+
+ if (physical_of_found + length <=
+ dev_replace->cursor_left) {
+ struct btrfs_bio_stripe *tgtdev_stripe =
+ bbio->stripes + num_stripes;
+
+ tgtdev_stripe->physical = physical_of_found;
+ tgtdev_stripe->length =
+ bbio->stripes[index_srcdev].length;
+ tgtdev_stripe->dev = dev_replace->tgtdev;
+
+ num_stripes++;
+ }
+ }
+ }
+
*bbio_ret = bbio;
bbio->num_stripes = num_stripes;
bbio->max_errors = max_errors;
bbio->mirror_num = mirror_num;
+
+ /*
+ * this is the case that REQ_READ && dev_replace_is_ongoing &&
+ * mirror_num == num_stripes + 1 && dev_replace target drive is
+ * available as a mirror
+ */
+ if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
+ WARN_ON(num_stripes > 1);
+ bbio->stripes[0].dev = dev_replace->tgtdev;
+ bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
+ bbio->mirror_num = map->num_stripes + 1;
+ }
out:
+ if (dev_replace_is_ongoing)
+ btrfs_dev_replace_unlock(dev_replace);
free_extent_map(em);
return ret;
}
-int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num)
{
- return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
+ return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
mirror_num);
}
@@ -4238,10 +4704,116 @@ static noinline void schedule_bio(struct btrfs_root *root,
&device->work);
}
+static int bio_size_ok(struct block_device *bdev, struct bio *bio,
+ sector_t sector)
+{
+ struct bio_vec *prev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned short max_sectors = queue_max_sectors(q);
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bdev,
+ .bi_sector = sector,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (bio->bi_vcnt == 0) {
+ WARN_ON(1);
+ return 1;
+ }
+
+ prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ if ((bio->bi_size >> 9) > max_sectors)
+ return 0;
+
+ if (!q->merge_bvec_fn)
+ return 1;
+
+ bvm.bi_size = bio->bi_size - prev->bv_len;
+ if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
+ return 0;
+ return 1;
+}
+
+static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
+ struct bio *bio, u64 physical, int dev_nr,
+ int rw, int async)
+{
+ struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
+
+ bio->bi_private = bbio;
+ bio->bi_private = merge_stripe_index_into_bio_private(
+ bio->bi_private, (unsigned int)dev_nr);
+ bio->bi_end_io = btrfs_end_bio;
+ bio->bi_sector = physical >> 9;
+#ifdef DEBUG
+ {
+ struct rcu_string *name;
+
+ rcu_read_lock();
+ name = rcu_dereference(dev->name);
+ pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
+ "(%s id %llu), size=%u\n", rw,
+ (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
+ name->str, dev->devid, bio->bi_size);
+ rcu_read_unlock();
+ }
+#endif
+ bio->bi_bdev = dev->bdev;
+ if (async)
+ schedule_bio(root, dev, rw, bio);
+ else
+ btrfsic_submit_bio(rw, bio);
+}
+
+static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
+ struct bio *first_bio, struct btrfs_device *dev,
+ int dev_nr, int rw, int async)
+{
+ struct bio_vec *bvec = first_bio->bi_io_vec;
+ struct bio *bio;
+ int nr_vecs = bio_get_nr_vecs(dev->bdev);
+ u64 physical = bbio->stripes[dev_nr].physical;
+
+again:
+ bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
+ if (!bio)
+ return -ENOMEM;
+
+ while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
+ if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset) < bvec->bv_len) {
+ u64 len = bio->bi_size;
+
+ atomic_inc(&bbio->stripes_pending);
+ submit_stripe_bio(root, bbio, bio, physical, dev_nr,
+ rw, async);
+ physical += len;
+ goto again;
+ }
+ bvec++;
+ }
+
+ submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
+ return 0;
+}
+
+static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
+{
+ atomic_inc(&bbio->error);
+ if (atomic_dec_and_test(&bbio->stripes_pending)) {
+ bio->bi_private = bbio->private;
+ bio->bi_end_io = bbio->end_io;
+ bio->bi_bdev = (struct block_device *)
+ (unsigned long)bbio->mirror_num;
+ bio->bi_sector = logical >> 9;
+ kfree(bbio);
+ bio_endio(bio, -EIO);
+ }
+}
+
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
int mirror_num, int async_submit)
{
- struct btrfs_mapping_tree *map_tree;
struct btrfs_device *dev;
struct bio *first_bio = bio;
u64 logical = (u64)bio->bi_sector << 9;
@@ -4253,12 +4825,11 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
struct btrfs_bio *bbio = NULL;
length = bio->bi_size;
- map_tree = &root->fs_info->mapping_tree;
map_length = length;
- ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
+ ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
mirror_num);
- if (ret) /* -ENOMEM */
+ if (ret)
return ret;
total_devs = bbio->num_stripes;
@@ -4276,52 +4847,48 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
while (dev_nr < total_devs) {
+ dev = bbio->stripes[dev_nr].dev;
+ if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
+ bbio_error(bbio, first_bio, logical);
+ dev_nr++;
+ continue;
+ }
+
+ /*
+ * Check and see if we're ok with this bio based on it's size
+ * and offset with the given device.
+ */
+ if (!bio_size_ok(dev->bdev, first_bio,
+ bbio->stripes[dev_nr].physical >> 9)) {
+ ret = breakup_stripe_bio(root, bbio, first_bio, dev,
+ dev_nr, rw, async_submit);
+ BUG_ON(ret);
+ dev_nr++;
+ continue;
+ }
+
if (dev_nr < total_devs - 1) {
bio = bio_clone(first_bio, GFP_NOFS);
BUG_ON(!bio); /* -ENOMEM */
} else {
bio = first_bio;
}
- bio->bi_private = bbio;
- bio->bi_private = merge_stripe_index_into_bio_private(
- bio->bi_private, (unsigned int)dev_nr);
- bio->bi_end_io = btrfs_end_bio;
- bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
- dev = bbio->stripes[dev_nr].dev;
- if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
-#ifdef DEBUG
- struct rcu_string *name;
-
- rcu_read_lock();
- name = rcu_dereference(dev->name);
- pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
- "(%s id %llu), size=%u\n", rw,
- (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
- name->str, dev->devid, bio->bi_size);
- rcu_read_unlock();
-#endif
- bio->bi_bdev = dev->bdev;
- if (async_submit)
- schedule_bio(root, dev, rw, bio);
- else
- btrfsic_submit_bio(rw, bio);
- } else {
- bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
- bio->bi_sector = logical >> 9;
- bio_endio(bio, -EIO);
- }
+
+ submit_stripe_bio(root, bbio, bio,
+ bbio->stripes[dev_nr].physical, dev_nr, rw,
+ async_submit);
dev_nr++;
}
return 0;
}
-struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
+struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
u8 *uuid, u8 *fsid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
- cur_devices = root->fs_info->fs_devices;
+ cur_devices = fs_info->fs_devices;
while (cur_devices) {
if (!fsid ||
!memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
@@ -4402,6 +4969,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
em->bdev = (struct block_device *)map;
em->start = logical;
em->len = length;
+ em->orig_start = 0;
em->block_start = 0;
em->block_len = em->len;
@@ -4419,8 +4987,8 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
- map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
- NULL);
+ map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
+ uuid, NULL);
if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
kfree(map);
free_extent_map(em);
@@ -4461,6 +5029,8 @@ static void fill_device_from_item(struct extent_buffer *leaf,
device->io_align = btrfs_device_io_align(leaf, dev_item);
device->io_width = btrfs_device_io_width(leaf, dev_item);
device->sector_size = btrfs_device_sector_size(leaf, dev_item);
+ WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
+ device->is_tgtdev_for_dev_replace = 0;
ptr = (unsigned long)btrfs_device_uuid(dev_item);
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
@@ -4538,7 +5108,7 @@ static int read_one_dev(struct btrfs_root *root,
return ret;
}
- device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
+ device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
if (!device || !device->bdev) {
if (!btrfs_test_opt(root, DEGRADED))
return -EIO;
@@ -4571,7 +5141,7 @@ static int read_one_dev(struct btrfs_root *root,
fill_device_from_item(leaf, dev_item, device);
device->dev_root = root->fs_info->dev_root;
device->in_fs_metadata = 1;
- if (device->writeable) {
+ if (device->writeable && !device->is_tgtdev_for_dev_replace) {
device->fs_devices->total_rw_bytes += device->total_bytes;
spin_lock(&root->fs_info->free_chunk_lock);
root->fs_info->free_chunk_space += device->total_bytes -
@@ -4930,7 +5500,7 @@ int btrfs_get_dev_stats(struct btrfs_root *root,
int i;
mutex_lock(&fs_devices->device_list_mutex);
- dev = btrfs_find_device(root, stats->devid, NULL, NULL);
+ dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
@@ -4958,3 +5528,21 @@ int btrfs_get_dev_stats(struct btrfs_root *root,
stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
return 0;
}
+
+int btrfs_scratch_superblock(struct btrfs_device *device)
+{
+ struct buffer_head *bh;
+ struct btrfs_super_block *disk_super;
+
+ bh = btrfs_read_dev_super(device->bdev);
+ if (!bh)
+ return -EINVAL;
+ disk_super = (struct btrfs_super_block *)bh->b_data;
+
+ memset(&disk_super->magic, 0, sizeof(disk_super->magic));
+ set_buffer_dirty(bh);
+ sync_dirty_buffer(bh);
+ brelse(bh);
+
+ return 0;
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 53c06af92e8..d3c3939ac75 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -50,6 +50,7 @@ struct btrfs_device {
int in_fs_metadata;
int missing;
int can_discard;
+ int is_tgtdev_for_dev_replace;
spinlock_t io_lock;
@@ -88,7 +89,7 @@ struct btrfs_device {
u8 uuid[BTRFS_UUID_SIZE];
/* per-device scrub information */
- struct scrub_dev *scrub_device;
+ struct scrub_ctx *scrub_device;
struct btrfs_work work;
struct rcu_head rcu;
@@ -179,6 +180,15 @@ struct btrfs_device_info {
u64 total_avail;
};
+struct btrfs_raid_attr {
+ int sub_stripes; /* sub_stripes info for map */
+ int dev_stripes; /* stripes per dev */
+ int devs_max; /* max devs to use */
+ int devs_min; /* min devs needed */
+ int devs_increment; /* ndevs has to be a multiple of this */
+ int ncopies; /* how many copies to data has */
+};
+
struct map_lookup {
u64 type;
int io_align;
@@ -248,7 +258,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset, u64 start, u64 num_bytes);
-int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num);
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -267,19 +277,27 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret);
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
-void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
+void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
+ struct btrfs_fs_devices *fs_devices, int step);
+int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+ char *device_path,
+ struct btrfs_device **device);
+int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+ struct btrfs_device **device);
int btrfs_add_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_device *device);
int btrfs_rm_device(struct btrfs_root *root, char *device_path);
void btrfs_cleanup_fs_uuids(void);
-int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
+int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size);
-struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
+struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
u8 *uuid, u8 *fsid);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_root *root, char *path);
+int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+ struct btrfs_device **device_out);
int btrfs_balance(struct btrfs_balance_control *bctl,
struct btrfs_ioctl_balance_args *bargs);
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
@@ -296,6 +314,13 @@ int btrfs_get_dev_stats(struct btrfs_root *root,
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
+void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *srcdev);
+void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *tgtdev);
+void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *tgtdev);
+int btrfs_scratch_superblock(struct btrfs_device *device);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 3f4e2d69e83..446a6848c55 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -122,6 +122,16 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
*/
if (!value)
goto out;
+ } else {
+ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
+ name, name_len, 0);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ if (!di && !value)
+ goto out;
+ btrfs_release_path(path);
}
again:
@@ -198,6 +208,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
inode_inc_iversion(inode);
inode->i_ctime = CURRENT_TIME;
+ set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
out:
@@ -265,7 +276,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
if (verify_dir_item(root, leaf, di))
- continue;
+ goto next;
name_len = btrfs_dir_name_len(leaf, di);
total_size += name_len + 1;
diff --git a/fs/buffer.c b/fs/buffer.c
index 6e9ed48064f..c017a2dfb90 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -46,8 +46,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
-inline void
-init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
+void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
bh->b_end_io = handler;
bh->b_private = private;
@@ -850,13 +849,10 @@ try_again:
if (!bh)
goto no_grow;
- bh->b_bdev = NULL;
bh->b_this_page = head;
bh->b_blocknr = -1;
head = bh;
- bh->b_state = 0;
- atomic_set(&bh->b_count, 0);
bh->b_size = size;
/* Link the buffer to its page */
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 67bef6d0148..746ce532e13 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -41,12 +41,12 @@ static struct fscache_object *cachefiles_alloc_object(
_enter("{%s},%p,", cache->cache.identifier, cookie);
- lookup_data = kmalloc(sizeof(*lookup_data), GFP_KERNEL);
+ lookup_data = kmalloc(sizeof(*lookup_data), cachefiles_gfp);
if (!lookup_data)
goto nomem_lookup_data;
/* create a new object record and a temporary leaf image */
- object = kmem_cache_alloc(cachefiles_object_jar, GFP_KERNEL);
+ object = kmem_cache_alloc(cachefiles_object_jar, cachefiles_gfp);
if (!object)
goto nomem_object;
@@ -63,7 +63,7 @@ static struct fscache_object *cachefiles_alloc_object(
* - stick the length on the front and leave space on the back for the
* encoder
*/
- buffer = kmalloc((2 + 512) + 3, GFP_KERNEL);
+ buffer = kmalloc((2 + 512) + 3, cachefiles_gfp);
if (!buffer)
goto nomem_buffer;
@@ -219,7 +219,7 @@ static void cachefiles_update_object(struct fscache_object *_object)
return;
}
- auxdata = kmalloc(2 + 512 + 3, GFP_KERNEL);
+ auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp);
if (!auxdata) {
_leave(" [nomem]");
return;
@@ -441,6 +441,54 @@ truncate_failed:
}
/*
+ * Invalidate an object
+ */
+static void cachefiles_invalidate_object(struct fscache_operation *op)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ const struct cred *saved_cred;
+ struct path path;
+ uint64_t ni_size;
+ int ret;
+
+ object = container_of(op->object, struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ op->object->cookie->def->get_attr(op->object->cookie->netfs_data,
+ &ni_size);
+
+ _enter("{OBJ%x},[%llu]",
+ op->object->debug_id, (unsigned long long)ni_size);
+
+ if (object->backer) {
+ ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+
+ fscache_set_store_limit(&object->fscache, ni_size);
+
+ path.dentry = object->backer;
+ path.mnt = cache->mnt;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = vfs_truncate(&path, 0);
+ if (ret == 0)
+ ret = vfs_truncate(&path, ni_size);
+ cachefiles_end_secure(cache, saved_cred);
+
+ if (ret != 0) {
+ fscache_set_store_limit(&object->fscache, 0);
+ if (ret == -EIO)
+ cachefiles_io_error_obj(object,
+ "Invalidate failed");
+ }
+ }
+
+ fscache_op_complete(op, true);
+ _leave("");
+}
+
+/*
* dissociate a cache from all the pages it was backing
*/
static void cachefiles_dissociate_pages(struct fscache_cache *cache)
@@ -455,6 +503,7 @@ const struct fscache_cache_ops cachefiles_cache_ops = {
.lookup_complete = cachefiles_lookup_complete,
.grab_object = cachefiles_grab_object,
.update_object = cachefiles_update_object,
+ .invalidate_object = cachefiles_invalidate_object,
.drop_object = cachefiles_drop_object,
.put_object = cachefiles_put_object,
.sync_cache = cachefiles_sync_cache,
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index bd6bc1bde2d..49382519907 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -23,6 +23,8 @@ extern unsigned cachefiles_debug;
#define CACHEFILES_DEBUG_KLEAVE 2
#define CACHEFILES_DEBUG_KDEBUG 4
+#define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC)
+
/*
* node records
*/
diff --git a/fs/cachefiles/key.c b/fs/cachefiles/key.c
index 81b8b2b3a67..33b58c60f2d 100644
--- a/fs/cachefiles/key.c
+++ b/fs/cachefiles/key.c
@@ -78,7 +78,7 @@ char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
_debug("max: %d", max);
- key = kmalloc(max, GFP_KERNEL);
+ key = kmalloc(max, cachefiles_gfp);
if (!key)
return NULL;
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index b0b5f7cdfff..8c01c5fcdf7 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -40,8 +40,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
prefix, fscache_object_states[object->fscache.state],
object->fscache.flags, work_busy(&object->fscache.work),
- object->fscache.events,
- object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
+ object->fscache.events, object->fscache.event_mask);
printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
prefix, object->fscache.n_ops, object->fscache.n_in_progress,
object->fscache.n_exclusive);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c994691d944..48099225970 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -77,25 +77,25 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
struct page *backpage = monitor->back_page, *backpage2;
int ret;
- kenter("{ino=%lx},{%lx,%lx}",
+ _enter("{ino=%lx},{%lx,%lx}",
object->backer->d_inode->i_ino,
backpage->index, backpage->flags);
/* skip if the page was truncated away completely */
if (backpage->mapping != bmapping) {
- kleave(" = -ENODATA [mapping]");
+ _leave(" = -ENODATA [mapping]");
return -ENODATA;
}
backpage2 = find_get_page(bmapping, backpage->index);
if (!backpage2) {
- kleave(" = -ENODATA [gone]");
+ _leave(" = -ENODATA [gone]");
return -ENODATA;
}
if (backpage != backpage2) {
put_page(backpage2);
- kleave(" = -ENODATA [different]");
+ _leave(" = -ENODATA [different]");
return -ENODATA;
}
@@ -114,7 +114,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
if (PageUptodate(backpage))
goto unlock_discard;
- kdebug("reissue read");
+ _debug("reissue read");
ret = bmapping->a_ops->readpage(NULL, backpage);
if (ret < 0)
goto unlock_discard;
@@ -129,7 +129,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
}
/* it'll reappear on the todo list */
- kleave(" = -EINPROGRESS");
+ _leave(" = -EINPROGRESS");
return -EINPROGRESS;
unlock_discard:
@@ -137,7 +137,7 @@ unlock_discard:
spin_lock_irq(&object->work_lock);
list_del(&monitor->op_link);
spin_unlock_irq(&object->work_lock);
- kleave(" = %d", ret);
+ _leave(" = %d", ret);
return ret;
}
@@ -174,11 +174,13 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
_debug("- copy {%lu}", monitor->back_page->index);
recheck:
- if (PageUptodate(monitor->back_page)) {
+ if (test_bit(FSCACHE_COOKIE_INVALIDATING,
+ &object->fscache.cookie->flags)) {
+ error = -ESTALE;
+ } else if (PageUptodate(monitor->back_page)) {
copy_highpage(monitor->netfs_page, monitor->back_page);
-
- pagevec_add(&pagevec, monitor->netfs_page);
- fscache_mark_pages_cached(monitor->op, &pagevec);
+ fscache_mark_page_cached(monitor->op,
+ monitor->netfs_page);
error = 0;
} else if (!PageError(monitor->back_page)) {
/* the page has probably been truncated */
@@ -198,6 +200,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
fscache_end_io(op, monitor->netfs_page, error);
page_cache_release(monitor->netfs_page);
+ fscache_retrieval_complete(op, 1);
fscache_put_retrieval(op);
kfree(monitor);
@@ -239,7 +242,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
_debug("read back %p{%lu,%d}",
netpage, netpage->index, page_count(netpage));
- monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
+ monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
if (!monitor)
goto nomem;
@@ -258,13 +261,14 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
goto backing_page_already_present;
if (!newpage) {
- newpage = page_cache_alloc_cold(bmapping);
+ newpage = __page_cache_alloc(cachefiles_gfp |
+ __GFP_COLD);
if (!newpage)
goto nomem_monitor;
}
ret = add_to_page_cache(newpage, bmapping,
- netpage->index, GFP_KERNEL);
+ netpage->index, cachefiles_gfp);
if (ret == 0)
goto installed_new_backing_page;
if (ret != -EEXIST)
@@ -335,11 +339,11 @@ backing_page_already_present:
backing_page_already_uptodate:
_debug("- uptodate");
- pagevec_add(pagevec, netpage);
- fscache_mark_pages_cached(op, pagevec);
+ fscache_mark_page_cached(op, netpage);
copy_highpage(netpage, backpage);
fscache_end_io(op, netpage, 0);
+ fscache_retrieval_complete(op, 1);
success:
_debug("success");
@@ -357,10 +361,13 @@ out:
read_error:
_debug("read error %d", ret);
- if (ret == -ENOMEM)
+ if (ret == -ENOMEM) {
+ fscache_retrieval_complete(op, 1);
goto out;
+ }
io_error:
cachefiles_io_error_obj(object, "Page read error on backing file");
+ fscache_retrieval_complete(op, 1);
ret = -ENOBUFS;
goto out;
@@ -370,6 +377,7 @@ nomem_monitor:
fscache_put_retrieval(monitor->op);
kfree(monitor);
nomem:
+ fscache_retrieval_complete(op, 1);
_leave(" = -ENOMEM");
return -ENOMEM;
}
@@ -408,7 +416,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
_enter("{%p},{%lx},,,", object, page->index);
if (!object->backer)
- return -ENOBUFS;
+ goto enobufs;
inode = object->backer->d_inode;
ASSERT(S_ISREG(inode->i_mode));
@@ -417,7 +425,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
/* calculate the shift required to use bmap */
if (inode->i_sb->s_blocksize > PAGE_SIZE)
- return -ENOBUFS;
+ goto enobufs;
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
@@ -448,15 +456,20 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
&pagevec);
} else if (cachefiles_has_space(cache, 0, 1) == 0) {
/* there's space in the cache we can use */
- pagevec_add(&pagevec, page);
- fscache_mark_pages_cached(op, &pagevec);
+ fscache_mark_page_cached(op, page);
+ fscache_retrieval_complete(op, 1);
ret = -ENODATA;
} else {
- ret = -ENOBUFS;
+ goto enobufs;
}
_leave(" = %d", ret);
return ret;
+
+enobufs:
+ fscache_retrieval_complete(op, 1);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
}
/*
@@ -465,8 +478,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
*/
static int cachefiles_read_backing_file(struct cachefiles_object *object,
struct fscache_retrieval *op,
- struct list_head *list,
- struct pagevec *mark_pvec)
+ struct list_head *list)
{
struct cachefiles_one_read *monitor = NULL;
struct address_space *bmapping = object->backer->d_inode->i_mapping;
@@ -485,7 +497,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
netpage, netpage->index, page_count(netpage));
if (!monitor) {
- monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
+ monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
if (!monitor)
goto nomem;
@@ -500,13 +512,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
goto backing_page_already_present;
if (!newpage) {
- newpage = page_cache_alloc_cold(bmapping);
+ newpage = __page_cache_alloc(cachefiles_gfp |
+ __GFP_COLD);
if (!newpage)
goto nomem;
}
ret = add_to_page_cache(newpage, bmapping,
- netpage->index, GFP_KERNEL);
+ netpage->index, cachefiles_gfp);
if (ret == 0)
goto installed_new_backing_page;
if (ret != -EEXIST)
@@ -536,10 +549,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
_debug("- monitor add");
ret = add_to_page_cache(netpage, op->mapping, netpage->index,
- GFP_KERNEL);
+ cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
page_cache_release(netpage);
+ fscache_retrieval_complete(op, 1);
continue;
}
goto nomem;
@@ -612,10 +626,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
_debug("- uptodate");
ret = add_to_page_cache(netpage, op->mapping, netpage->index,
- GFP_KERNEL);
+ cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
page_cache_release(netpage);
+ fscache_retrieval_complete(op, 1);
continue;
}
goto nomem;
@@ -626,16 +641,17 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
page_cache_release(backpage);
backpage = NULL;
- if (!pagevec_add(mark_pvec, netpage))
- fscache_mark_pages_cached(op, mark_pvec);
+ fscache_mark_page_cached(op, netpage);
page_cache_get(netpage);
if (!pagevec_add(&lru_pvec, netpage))
__pagevec_lru_add_file(&lru_pvec);
+ /* the netpage is unlocked and marked up to date here */
fscache_end_io(op, netpage, 0);
page_cache_release(netpage);
netpage = NULL;
+ fscache_retrieval_complete(op, 1);
continue;
}
@@ -661,6 +677,7 @@ out:
list_for_each_entry_safe(netpage, _n, list, lru) {
list_del(&netpage->lru);
page_cache_release(netpage);
+ fscache_retrieval_complete(op, 1);
}
_leave(" = %d", ret);
@@ -669,15 +686,17 @@ out:
nomem:
_debug("nomem");
ret = -ENOMEM;
- goto out;
+ goto record_page_complete;
read_error:
_debug("read error %d", ret);
if (ret == -ENOMEM)
- goto out;
+ goto record_page_complete;
io_error:
cachefiles_io_error_obj(object, "Page read error on backing file");
ret = -ENOBUFS;
+record_page_complete:
+ fscache_retrieval_complete(op, 1);
goto out;
}
@@ -709,7 +728,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
*nr_pages);
if (!object->backer)
- return -ENOBUFS;
+ goto all_enobufs;
space = 1;
if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
@@ -722,7 +741,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
/* calculate the shift required to use bmap */
if (inode->i_sb->s_blocksize > PAGE_SIZE)
- return -ENOBUFS;
+ goto all_enobufs;
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
@@ -762,7 +781,10 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
nrbackpages++;
} else if (space && pagevec_add(&pagevec, page) == 0) {
fscache_mark_pages_cached(op, &pagevec);
+ fscache_retrieval_complete(op, 1);
ret = -ENODATA;
+ } else {
+ fscache_retrieval_complete(op, 1);
}
}
@@ -775,18 +797,18 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
/* submit the apparently valid pages to the backing fs to be read from
* disk */
if (nrbackpages > 0) {
- ret2 = cachefiles_read_backing_file(object, op, &backpages,
- &pagevec);
+ ret2 = cachefiles_read_backing_file(object, op, &backpages);
if (ret2 == -ENOMEM || ret2 == -EINTR)
ret = ret2;
}
- if (pagevec_count(&pagevec) > 0)
- fscache_mark_pages_cached(op, &pagevec);
-
_leave(" = %d [nr=%u%s]",
ret, *nr_pages, list_empty(pages) ? " empty" : "");
return ret;
+
+all_enobufs:
+ fscache_retrieval_complete(op, *nr_pages);
+ return -ENOBUFS;
}
/*
@@ -806,7 +828,6 @@ int cachefiles_allocate_page(struct fscache_retrieval *op,
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
- struct pagevec pagevec;
int ret;
object = container_of(op->op.object,
@@ -817,14 +838,12 @@ int cachefiles_allocate_page(struct fscache_retrieval *op,
_enter("%p,{%lx},", object, page->index);
ret = cachefiles_has_space(cache, 0, 1);
- if (ret == 0) {
- pagevec_init(&pagevec, 0);
- pagevec_add(&pagevec, page);
- fscache_mark_pages_cached(op, &pagevec);
- } else {
+ if (ret == 0)
+ fscache_mark_page_cached(op, page);
+ else
ret = -ENOBUFS;
- }
+ fscache_retrieval_complete(op, 1);
_leave(" = %d", ret);
return ret;
}
@@ -874,6 +893,7 @@ int cachefiles_allocate_pages(struct fscache_retrieval *op,
ret = -ENOBUFS;
}
+ fscache_retrieval_complete(op, *nr_pages);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index e18b183b47e..73b46288b54 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -174,7 +174,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object,
ASSERT(dentry);
ASSERT(dentry->d_inode);
- auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL);
+ auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, cachefiles_gfp);
if (!auxbuf) {
_leave(" = -ENOMEM");
return -ENOMEM;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6690269f5dd..064d1a68d2c 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -267,6 +267,14 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
kfree(req->r_pages);
}
+static void ceph_unlock_page_vector(struct page **pages, int num_pages)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++)
+ unlock_page(pages[i]);
+}
+
/*
* start an async read(ahead) operation. return nr_pages we submitted
* a read for on success, or negative error code.
@@ -347,6 +355,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
return nr_pages;
out_pages:
+ ceph_unlock_page_vector(pages, nr_pages);
ceph_release_page_vector(pages, nr_pages);
out:
ceph_osdc_put_request(req);
@@ -1078,23 +1087,51 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_file_info *fi = file->private_data;
struct page *page;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- int r;
+ int r, want, got = 0;
+
+ if (fi->fmode & CEPH_FILE_MODE_LAZY)
+ want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
+ else
+ want = CEPH_CAP_FILE_BUFFER;
+
+ dout("write_begin %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
+ inode, ceph_vinop(inode), pos, len, inode->i_size);
+ r = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos+len);
+ if (r < 0)
+ return r;
+ dout("write_begin %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, len, ceph_cap_string(got));
+ if (!(got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO))) {
+ ceph_put_cap_refs(ci, got);
+ return -EAGAIN;
+ }
do {
/* get a page */
page = grab_cache_page_write_begin(mapping, index, 0);
- if (!page)
- return -ENOMEM;
- *pagep = page;
+ if (!page) {
+ r = -ENOMEM;
+ break;
+ }
dout("write_begin file %p inode %p page %p %d~%d\n", file,
inode, page, (int)pos, (int)len);
r = ceph_update_writeable_page(file, pos, len, page);
+ if (r)
+ page_cache_release(page);
} while (r == -EAGAIN);
+ if (r) {
+ ceph_put_cap_refs(ci, got);
+ } else {
+ *pagep = page;
+ *(int *)fsdata = got;
+ }
return r;
}
@@ -1108,10 +1145,12 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
int check_cap = 0;
+ int got = (unsigned long)fsdata;
dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
inode, page, (int)pos, (int)copied, (int)len);
@@ -1134,6 +1173,19 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
up_read(&mdsc->snap_rwsem);
page_cache_release(page);
+ if (copied > 0) {
+ int dirty;
+ spin_lock(&ci->i_ceph_lock);
+ dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
+ spin_unlock(&ci->i_ceph_lock);
+ if (dirty)
+ __mark_inode_dirty(inode, dirty);
+ }
+
+ dout("write_end %p %llx.%llx %llu~%u dropping cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, len, ceph_cap_string(got));
+ ceph_put_cap_refs(ci, got);
+
if (check_cap)
ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 3251e9cc640..a1d9bb30c1b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -236,8 +236,10 @@ static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
if (!ctx) {
cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
if (cap) {
+ spin_lock(&mdsc->caps_list_lock);
mdsc->caps_use_count++;
mdsc->caps_total_count++;
+ spin_unlock(&mdsc->caps_list_lock);
}
return cap;
}
@@ -1349,11 +1351,15 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
if (!ci->i_head_snapc)
ci->i_head_snapc = ceph_get_snap_context(
ci->i_snap_realm->cached_context);
- dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
- ci->i_head_snapc);
+ dout(" inode %p now dirty snapc %p auth cap %p\n",
+ &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock);
- list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
+ if (ci->i_auth_cap)
+ list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
+ else
+ list_add(&ci->i_dirty_item,
+ &mdsc->cap_dirty_migrating);
spin_unlock(&mdsc->cap_dirty_lock);
if (ci->i_flushing_caps == 0) {
ihold(inode);
@@ -2388,7 +2394,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
&atime);
/* max size increase? */
- if (max_size != ci->i_max_size) {
+ if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
ci->i_max_size = max_size;
if (max_size >= ci->i_wanted_max_size) {
@@ -2745,6 +2751,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
/* make sure we re-request max_size, if necessary */
spin_lock(&ci->i_ceph_lock);
+ ci->i_wanted_max_size = 0; /* reset */
ci->i_requested_max_size = 0;
spin_unlock(&ci->i_ceph_lock);
}
@@ -2840,8 +2847,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
case CEPH_CAP_OP_IMPORT:
handle_cap_import(mdsc, inode, h, session,
snaptrace, snaptrace_len);
- ceph_check_caps(ceph_inode(inode), 0, session);
- goto done_unlocked;
}
/* the rest require a cap */
@@ -2858,6 +2863,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
switch (op) {
case CEPH_CAP_OP_REVOKE:
case CEPH_CAP_OP_GRANT:
+ case CEPH_CAP_OP_IMPORT:
handle_cap_grant(inode, h, session, cap, msg->middle);
goto done_unlocked;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index e5b77319c97..8c1aabe93b6 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -454,7 +454,7 @@ static void reset_readdir(struct ceph_file_info *fi)
fi->flags &= ~CEPH_F_ATEND;
}
-static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
+static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
@@ -463,7 +463,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&inode->i_mutex);
retval = -EINVAL;
- switch (origin) {
+ switch (whence) {
case SEEK_END:
offset += inode->i_size + 2; /* FIXME */
break;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 9349bb37a2f..ca3ab3f9ca7 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -56,13 +56,15 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct ceph_nfs_confh *cfh = (void *)rawfh;
int connected_handle_length = sizeof(*cfh)/4;
int handle_length = sizeof(*fh)/4;
- struct dentry *dentry = d_find_alias(inode);
+ struct dentry *dentry;
struct dentry *parent;
/* don't re-export snaps */
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EINVAL;
+ dentry = d_find_alias(inode);
+
/* if we found an alias, generate a connectable fh */
if (*max_len >= connected_handle_length && dentry) {
dout("encode_fh %p connectable\n", dentry);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 5840d2aaed1..e51558fca3a 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -712,63 +712,53 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
loff_t endoff = pos + iov->iov_len;
- int want, got = 0;
- int ret, err;
+ int got = 0;
+ int ret, err, written;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
retry_snap:
+ written = 0;
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
return -ENOSPC;
__ceph_do_pending_vmtruncate(inode);
- dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
- inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
- inode->i_size);
- if (fi->fmode & CEPH_FILE_MODE_LAZY)
- want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
- else
- want = CEPH_CAP_FILE_BUFFER;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
- if (ret < 0)
- goto out_put;
-
- dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
- inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
- ceph_cap_string(got));
-
- if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
- (iocb->ki_filp->f_flags & O_DIRECT) ||
- (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
- (fi->flags & CEPH_F_SYNC)) {
- ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
- &iocb->ki_pos);
- } else {
- /*
- * buffered write; drop Fw early to avoid slow
- * revocation if we get stuck on balance_dirty_pages
- */
- int dirty;
-
- spin_lock(&ci->i_ceph_lock);
- dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
- spin_unlock(&ci->i_ceph_lock);
- ceph_put_cap_refs(ci, got);
+ /*
+ * try to do a buffered write. if we don't have sufficient
+ * caps, we'll get -EAGAIN from generic_file_aio_write, or a
+ * short write if we only get caps for some pages.
+ */
+ if (!(iocb->ki_filp->f_flags & O_DIRECT) &&
+ !(inode->i_sb->s_flags & MS_SYNCHRONOUS) &&
+ !(fi->flags & CEPH_F_SYNC)) {
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+ if (ret >= 0)
+ written = ret;
+
if ((ret >= 0 || ret == -EIOCBQUEUED) &&
((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
|| ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
- err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
+ err = vfs_fsync_range(file, pos, pos + written - 1, 1);
if (err < 0)
ret = err;
}
+ if ((ret < 0 && ret != -EAGAIN) || pos + written >= endoff)
+ goto out;
+ }
- if (dirty)
- __mark_inode_dirty(inode, dirty);
+ dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
+ inode, ceph_vinop(inode), pos + written,
+ (unsigned)iov->iov_len - written, inode->i_size);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, 0, &got, endoff);
+ if (ret < 0)
goto out;
- }
+ dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), pos + written,
+ (unsigned)iov->iov_len - written, ceph_cap_string(got));
+ ret = ceph_sync_write(file, iov->iov_base + written,
+ iov->iov_len - written, &iocb->ki_pos);
if (ret >= 0) {
int dirty;
spin_lock(&ci->i_ceph_lock);
@@ -777,13 +767,10 @@ retry_snap:
if (dirty)
__mark_inode_dirty(inode, dirty);
}
-
-out_put:
dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
- inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
- ceph_cap_string(got));
+ inode, ceph_vinop(inode), pos + written,
+ (unsigned)iov->iov_len - written, ceph_cap_string(got));
ceph_put_cap_refs(ci, got);
-
out:
if (ret == -EOLDSNAPC) {
dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
@@ -797,7 +784,7 @@ out:
/*
* llseek. be sure to verify file size on SEEK_END.
*/
-static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
+static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int ret;
@@ -805,7 +792,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&inode->i_mutex);
__ceph_do_pending_vmtruncate(inode);
- if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
+ if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
if (ret < 0) {
offset = ret;
@@ -813,7 +800,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
}
}
- switch (origin) {
+ switch (whence) {
case SEEK_END:
offset += inode->i_size;
break;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ba95eea201b..2971eaa65cd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1466,7 +1466,7 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
u64 to;
- int wrbuffer_refs, wake = 0;
+ int wrbuffer_refs, finish = 0;
retry:
spin_lock(&ci->i_ceph_lock);
@@ -1498,15 +1498,18 @@ retry:
truncate_inode_pages(inode->i_mapping, to);
spin_lock(&ci->i_ceph_lock);
- ci->i_truncate_pending--;
- if (ci->i_truncate_pending == 0)
- wake = 1;
+ if (to == ci->i_truncate_size) {
+ ci->i_truncate_pending = 0;
+ finish = 1;
+ }
spin_unlock(&ci->i_ceph_lock);
+ if (!finish)
+ goto retry;
if (wrbuffer_refs == 0)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
- if (wake)
- wake_up_all(&ci->i_cap_wq);
+
+ wake_up_all(&ci->i_cap_wq);
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 1bcf712655d..9165eb8309e 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1590,7 +1590,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
} else if (rpath || rino) {
*ino = rino;
*ppath = rpath;
- *pathlen = strlen(rpath);
+ *pathlen = rpath ? strlen(rpath) : 0;
dout(" path %.*s\n", *pathlen, rpath);
}
@@ -1876,9 +1876,14 @@ finish:
static void __wake_requests(struct ceph_mds_client *mdsc,
struct list_head *head)
{
- struct ceph_mds_request *req, *nreq;
+ struct ceph_mds_request *req;
+ LIST_HEAD(tmp_list);
+
+ list_splice_init(head, &tmp_list);
- list_for_each_entry_safe(req, nreq, head, r_wait) {
+ while (!list_empty(&tmp_list)) {
+ req = list_entry(tmp_list.next,
+ struct ceph_mds_request, r_wait);
list_del_init(&req->r_wait);
__do_request(mdsc, req);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2eb43f21132..e86aa994812 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -403,8 +403,6 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
- if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
- seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
seq_printf(m, ",osdkeepalivetimeout=%d",
opt->osd_keepalive_timeout);
@@ -849,7 +847,7 @@ static int ceph_register_bdi(struct super_block *sb,
fsc->backing_dev_info.ra_pages =
default_backing_dev_info.ra_pages;
- err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
+ err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
atomic_long_inc_return(&bdi_seq));
if (!err)
sb->s_bdi = &fsc->backing_dev_info;
diff --git a/fs/cifs/README b/fs/cifs/README
index 22ab7b5b8da..2d5622f60e1 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -480,7 +480,7 @@ A partial list of the supported mount options follows:
Unicode on the wire.
nomapchars Do not translate any of these seven characters (default).
nocase Request case insensitive path name matching (case
- sensitive is the default if the server suports it).
+ sensitive is the default if the server supports it).
(mount option "ignorecase" is identical to "nocase")
posixpaths If CIFS Unix extensions are supported, attempt to
negotiate posix path name support which allows certain
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 86e92ef2abc..69ae3d3c3b3 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -37,7 +37,6 @@ void dump_smb(void *, int);
#define CIFS_TIMER 0x04
extern int cifsFYI;
-extern int cifsERROR;
/*
* debug ON
@@ -64,10 +63,7 @@ do { \
/* error event message: e.g., i/o error */
#define cifserror(fmt, ...) \
-do { \
- if (cifsERROR) \
- printk(KERN_ERR "CIFS VFS: " fmt "\n", ##__VA_ARGS__); \
-} while (0)
+ printk(KERN_ERR "CIFS VFS: " fmt "\n", ##__VA_ARGS__); \
#define cERROR(set, fmt, ...) \
do { \
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 75c1ee69914..5cbd00e7406 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -346,19 +346,15 @@ init_cifs_idmap(void)
if (!cred)
return -ENOMEM;
- keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW | KEY_USR_READ,
- KEY_ALLOC_NOT_IN_QUOTA);
+ keyring = keyring_alloc(".cifs_idmap", 0, 0, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
- ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
- if (ret < 0)
- goto failed_put_key;
-
ret = register_key_type(&cifs_idmap_key_type);
if (ret < 0)
goto failed_put_key;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 210f0af83fc..f653835d067 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -54,7 +54,6 @@
#endif
int cifsFYI = 0;
-int cifsERROR = 1;
int traceSMB = 0;
bool enable_oplocks = true;
unsigned int linuxExtEnabled = 1;
@@ -695,13 +694,13 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return written;
}
-static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
+static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
{
/*
- * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
+ * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
* the cached file length
*/
- if (origin != SEEK_SET && origin != SEEK_CUR) {
+ if (whence != SEEK_SET && whence != SEEK_CUR) {
int rc;
struct inode *inode = file->f_path.dentry->d_inode;
@@ -728,7 +727,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
if (rc < 0)
return (loff_t)rc;
}
- return generic_file_llseek(file, offset, origin);
+ return generic_file_llseek(file, offset, whence);
}
static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7635b5db26a..17c3643e595 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1624,14 +1624,11 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_unc:
string = vol->UNC;
vol->UNC = match_strdup(args);
- if (vol->UNC == NULL) {
- kfree(string);
+ if (vol->UNC == NULL)
goto out_nomem;
- }
convert_delimiter(vol->UNC, '\\');
if (vol->UNC[0] != '\\' || vol->UNC[1] != '\\') {
- kfree(string);
printk(KERN_ERR "CIFS: UNC Path does not "
"begin with // or \\\\\n");
goto cifs_parse_mount_err;
@@ -1687,10 +1684,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
string = vol->prepath;
vol->prepath = match_strdup(args);
- if (vol->prepath == NULL) {
- kfree(string);
+ if (vol->prepath == NULL)
goto out_nomem;
- }
/* Compare old prefixpath= option to new one */
if (!string || strcmp(string, vol->prepath))
printk(KERN_WARNING "CIFS: the value of the "
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 6002fdc920a..cdd6ff48246 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -78,6 +78,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
struct dentry *dentry, *alias;
struct inode *inode;
struct super_block *sb = parent->d_inode->i_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cFYI(1, "%s: for %s", __func__, name->name);
@@ -91,10 +92,20 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
int err;
inode = dentry->d_inode;
- /* update inode in place if i_ino didn't change */
- if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
- cifs_fattr_to_inode(inode, fattr);
- goto out;
+ if (inode) {
+ /*
+ * If we're generating inode numbers, then we don't
+ * want to clobber the existing one with the one that
+ * the readdir code created.
+ */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
+ fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
+
+ /* update inode in place if i_ino didn't change */
+ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ cifs_fattr_to_inode(inode, fattr);
+ goto out;
+ }
}
err = d_invalidate(dentry);
dput(dentry);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 7414ae24a79..712b10f64c7 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1613,12 +1613,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
return 0;
}
-static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
+static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry * dentry = file->f_path.dentry;
mutex_lock(&dentry->d_inode->i_mutex);
- switch (origin) {
+ switch (whence) {
case 1:
offset += file->f_pos;
case 0:
diff --git a/fs/dcache.c b/fs/dcache.c
index 3a463d0c4fe..19153a0a810 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -455,24 +455,6 @@ void d_drop(struct dentry *dentry)
EXPORT_SYMBOL(d_drop);
/*
- * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag
- * @dentry: dentry to drop
- *
- * This is called when we do a lookup on a placeholder dentry that needed to be
- * looked up. The dentry should have been hashed in order for it to be found by
- * the lookup code, but now needs to be unhashed while we do the actual lookup
- * and clear the DCACHE_NEED_LOOKUP flag.
- */
-void d_clear_need_lookup(struct dentry *dentry)
-{
- spin_lock(&dentry->d_lock);
- __d_drop(dentry);
- dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
- spin_unlock(&dentry->d_lock);
-}
-EXPORT_SYMBOL(d_clear_need_lookup);
-
-/*
* Finish off a dentry we've decided to kill.
* dentry->d_lock must be held, returns with it unlocked.
* If ref is non-zero, then decrement the refcount too.
@@ -565,13 +547,7 @@ repeat:
if (d_unhashed(dentry))
goto kill_it;
- /*
- * If this dentry needs lookup, don't set the referenced flag so that it
- * is more likely to be cleaned up by the dcache shrinker in case of
- * memory pressure.
- */
- if (!d_need_lookup(dentry))
- dentry->d_flags |= DCACHE_REFERENCED;
+ dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);
dentry->d_count--;
@@ -1583,7 +1559,7 @@ EXPORT_SYMBOL(d_find_any_alias);
*/
struct dentry *d_obtain_alias(struct inode *inode)
{
- static const struct qstr anonstring = { .name = "" };
+ static const struct qstr anonstring = QSTR_INIT("/", 1);
struct dentry *tmp;
struct dentry *res;
@@ -1737,13 +1713,6 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
/*
- * We are going to instantiate this dentry, unhash it and clear the
- * lookup flag so we can do that.
- */
- if (unlikely(d_need_lookup(found)))
- d_clear_need_lookup(found);
-
- /*
* Negative dentry: instantiate it unless the inode is a directory and
* already has a dentry.
*/
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index ea993128155..a7b0c2dfb3d 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1935,7 +1935,7 @@ static const unsigned char filename_rev_map[256] = {
* @src: Source location for the filename to encode
* @src_size: Size of the source in bytes
*/
-void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
+static void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
unsigned char *src, size_t src_size)
{
size_t num_blocks;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 809e67d05ca..f1ea610362c 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -102,12 +102,12 @@ int __init ecryptfs_init_kthread(void)
void ecryptfs_destroy_kthread(void)
{
- struct ecryptfs_open_req *req;
+ struct ecryptfs_open_req *req, *tmp;
mutex_lock(&ecryptfs_kthread_ctl.mux);
ecryptfs_kthread_ctl.flags |= ECRYPTFS_KTHREAD_ZOMBIE;
- list_for_each_entry(req, &ecryptfs_kthread_ctl.req_list,
- kthread_ctl_list) {
+ list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list,
+ kthread_ctl_list) {
list_del(&req->kthread_ctl_list);
*req->lower_file = ERR_PTR(-EIO);
complete(&req->done);
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index bd1d57f98f7..564a1fa34b9 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -338,7 +338,8 @@ static int ecryptfs_write_begin(struct file *file,
if (prev_page_end_size
>= i_size_read(page->mapping->host)) {
zero_user(page, 0, PAGE_CACHE_SIZE);
- } else {
+ SetPageUptodate(page);
+ } else if (len < PAGE_CACHE_SIZE) {
rc = ecryptfs_decrypt_page(page);
if (rc) {
printk(KERN_ERR "%s: Error decrypting "
@@ -348,8 +349,8 @@ static int ecryptfs_write_begin(struct file *file,
ClearPageUptodate(page);
goto out;
}
+ SetPageUptodate(page);
}
- SetPageUptodate(page);
}
}
/* If creating a page or more of holes, zero them out via truncate.
@@ -499,6 +500,13 @@ static int ecryptfs_write_end(struct file *file,
}
goto out;
}
+ if (!PageUptodate(page)) {
+ if (copied < PAGE_CACHE_SIZE) {
+ rc = 0;
+ goto out;
+ }
+ SetPageUptodate(page);
+ }
/* Fills in zeros if 'to' goes beyond inode size */
rc = fill_zeros_to_end_of_page(page, to);
if (rc) {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index d81b9f65408..35470d9b96e 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -19,6 +19,8 @@
#include <linux/export.h>
#include <linux/kref.h>
#include <linux/eventfd.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
struct eventfd_ctx {
struct kref kref;
@@ -284,7 +286,25 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
return res;
}
+#ifdef CONFIG_PROC_FS
+static int eventfd_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct eventfd_ctx *ctx = f->private_data;
+ int ret;
+
+ spin_lock_irq(&ctx->wqh.lock);
+ ret = seq_printf(m, "eventfd-count: %16llx\n",
+ (unsigned long long)ctx->count);
+ spin_unlock_irq(&ctx->wqh.lock);
+
+ return ret;
+}
+#endif
+
static const struct file_operations eventfd_fops = {
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = eventfd_show_fdinfo,
+#endif
.release = eventfd_release,
.poll = eventfd_poll,
.read = eventfd_read,
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index cd96649bfe6..9fec1836057 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -38,6 +38,8 @@
#include <asm/io.h>
#include <asm/mman.h>
#include <linux/atomic.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
/*
* LOCKING:
@@ -783,8 +785,34 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
return pollflags != -1 ? pollflags : 0;
}
+#ifdef CONFIG_PROC_FS
+static int ep_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct eventpoll *ep = f->private_data;
+ struct rb_node *rbp;
+ int ret = 0;
+
+ mutex_lock(&ep->mtx);
+ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
+
+ ret = seq_printf(m, "tfd: %8d events: %8x data: %16llx\n",
+ epi->ffd.fd, epi->event.events,
+ (long long)epi->event.data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&ep->mtx);
+
+ return ret;
+}
+#endif
+
/* File callbacks that implement the eventpoll file behaviour */
static const struct file_operations eventpoll_fops = {
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = ep_show_fdinfo,
+#endif
.release = ep_eventpoll_release,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
@@ -1285,7 +1313,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
* otherwise we might miss an event that happens between the
* f_op->poll() call and the new event set registering.
*/
- epi->event.events = event->events;
+ epi->event.events = event->events; /* need barrier below */
pt._key = event->events;
epi->event.data = event->data; /* protected by mtx */
if (epi->event.events & EPOLLWAKEUP) {
@@ -1296,6 +1324,26 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
}
/*
+ * The following barrier has two effects:
+ *
+ * 1) Flush epi changes above to other CPUs. This ensures
+ * we do not miss events from ep_poll_callback if an
+ * event occurs immediately after we call f_op->poll().
+ * We need this because we did not take ep->lock while
+ * changing epi above (but ep_poll_callback does take
+ * ep->lock).
+ *
+ * 2) We also need to ensure we do not miss _past_ events
+ * when calling f_op->poll(). This barrier also
+ * pairs with the barrier in wq_has_sleeper (see
+ * comments for wq_has_sleeper).
+ *
+ * This barrier will now guarantee ep_poll_callback or f_op->poll
+ * (or both) will notice the readiness of an item.
+ */
+ smp_mb();
+
+ /*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
*/
diff --git a/fs/exec.c b/fs/exec.c
index 721a2992951..18c45cac368 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1175,9 +1175,24 @@ void free_bprm(struct linux_binprm *bprm)
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
+ /* If a binfmt changed the interp, free it. */
+ if (bprm->interp != bprm->filename)
+ kfree(bprm->interp);
kfree(bprm);
}
+int bprm_change_interp(char *interp, struct linux_binprm *bprm)
+{
+ /* If a binfmt changed the interp, free it first. */
+ if (bprm->interp != bprm->filename)
+ kfree(bprm->interp);
+ bprm->interp = kstrdup(interp, GFP_KERNEL);
+ if (!bprm->interp)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(bprm_change_interp);
+
/*
* install the new credentials for this executable
*/
@@ -1266,14 +1281,13 @@ int prepare_binprm(struct linux_binprm *bprm)
bprm->cred->egid = current_egid();
if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
- !current->no_new_privs) {
+ !current->no_new_privs &&
+ kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
+ kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
/* Set-uid? */
if (mode & S_ISUID) {
- if (!kuid_has_mapping(bprm->cred->user_ns, inode->i_uid))
- return -EPERM;
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->euid = inode->i_uid;
-
}
/* Set-gid? */
@@ -1283,8 +1297,6 @@ int prepare_binprm(struct linux_binprm *bprm)
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
- if (!kgid_has_mapping(bprm->cred->user_ns, inode->i_gid))
- return -EPERM;
bprm->per_clear |= PER_CLEAR_ON_SETID;
bprm->cred->egid = inode->i_gid;
}
@@ -1356,6 +1368,10 @@ int search_binary_handler(struct linux_binprm *bprm)
struct linux_binfmt *fmt;
pid_t old_pid, old_vpid;
+ /* This allows 4 levels of binfmt rewrites before failing hard. */
+ if (depth > 5)
+ return -ELOOP;
+
retval = security_bprm_check(bprm);
if (retval)
return retval;
@@ -1380,12 +1396,8 @@ int search_binary_handler(struct linux_binprm *bprm)
if (!try_module_get(fmt->module))
continue;
read_unlock(&binfmt_lock);
+ bprm->recursion_depth = depth + 1;
retval = fn(bprm);
- /*
- * Restore the depth counter to its starting value
- * in this call, so we don't have to rely on every
- * load_binary function to restore it on return.
- */
bprm->recursion_depth = depth;
if (retval >= 0) {
if (depth == 0) {
@@ -1657,7 +1669,6 @@ int get_dumpable(struct mm_struct *mm)
return __get_dumpable(mm->flags);
}
-#ifdef __ARCH_WANT_SYS_EXECVE
SYSCALL_DEFINE3(execve,
const char __user *, filename,
const char __user *const __user *, argv,
@@ -1685,23 +1696,3 @@ asmlinkage long compat_sys_execve(const char __user * filename,
return error;
}
#endif
-#endif
-
-#ifdef __ARCH_WANT_KERNEL_EXECVE
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
-{
- int ret = do_execve(filename,
- (const char __user *const __user *)argv,
- (const char __user *const __user *)envp);
- if (ret < 0)
- return ret;
-
- /*
- * We were successful. We won't be returning to our caller, but
- * instead to user space by manipulating the kernel stack.
- */
- ret_from_kernel_execve(current_pt_regs());
-}
-#endif
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index b5618104775..d1f80abd882 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -361,12 +361,12 @@ static int read_exec(struct page_collect *pcol)
return 0;
err:
- if (!pcol->read_4_write)
- _unlock_pcol_pages(pcol, ret, READ);
-
- pcol_free(pcol);
-
+ if (!pcol_copy) /* Failed before ownership transfer */
+ pcol_copy = pcol;
+ _unlock_pcol_pages(pcol_copy, ret, READ);
+ pcol_free(pcol_copy);
kfree(pcol_copy);
+
return ret;
}
@@ -676,8 +676,10 @@ static int write_exec(struct page_collect *pcol)
return 0;
err:
- _unlock_pcol_pages(pcol, ret, WRITE);
- pcol_free(pcol);
+ if (!pcol_copy) /* Failed before ownership transfer */
+ pcol_copy = pcol;
+ _unlock_pcol_pages(pcol_copy, ret, WRITE);
+ pcol_free(pcol_copy);
kfree(pcol_copy);
return ret;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 29ab099e3e0..5df4bb4aab1 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -322,10 +322,10 @@ static int export_encode_fh(struct inode *inode, struct fid *fid,
if (parent && (len < 4)) {
*max_len = 4;
- return 255;
+ return FILEID_INVALID;
} else if (len < 2) {
*max_len = 2;
- return 255;
+ return FILEID_INVALID;
}
len = 2;
@@ -341,10 +341,21 @@ static int export_encode_fh(struct inode *inode, struct fid *fid,
return type;
}
+int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
+ int *max_len, struct inode *parent)
+{
+ const struct export_operations *nop = inode->i_sb->s_export_op;
+
+ if (nop && nop->encode_fh)
+ return nop->encode_fh(inode, fid->raw, max_len, parent);
+
+ return export_encode_fh(inode, fid, max_len, parent);
+}
+EXPORT_SYMBOL_GPL(exportfs_encode_inode_fh);
+
int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
int connectable)
{
- const struct export_operations *nop = dentry->d_sb->s_export_op;
int error;
struct dentry *p = NULL;
struct inode *inode = dentry->d_inode, *parent = NULL;
@@ -357,10 +368,8 @@ int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
*/
parent = p->d_inode;
}
- if (nop->encode_fh)
- error = nop->encode_fh(inode, fid->raw, max_len, parent);
- else
- error = export_encode_fh(inode, fid, max_len, parent);
+
+ error = exportfs_encode_inode_fh(inode, fid, max_len, parent);
dput(p);
return error;
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index c8fff930790..dd91264ba94 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -296,17 +296,17 @@ static inline loff_t ext3_get_htree_eof(struct file *filp)
* NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
* will be invalid once the directory was converted into a dx directory
*/
-loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
+loff_t ext3_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int dx_dir = is_dx_dir(inode);
loff_t htree_max = ext3_get_htree_eof(file);
if (likely(dx_dir))
- return generic_file_llseek_size(file, offset, origin,
+ return generic_file_llseek_size(file, offset, whence,
htree_max, htree_max);
else
- return generic_file_llseek(file, offset, origin);
+ return generic_file_llseek(file, offset, whence);
}
/*
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 7e87e37a372..b176d425354 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1071,8 +1071,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
* mapped. 0 in case of a HOLE.
*/
if (err > 0) {
- if (err > 1)
- WARN_ON(1);
+ WARN_ON(err > 1);
err = 0;
}
*errp = err;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 5366393528d..6e50223b329 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1661,9 +1661,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
return -ENOMEM;
}
sb->s_fs_info = sbi;
- sbi->s_mount_opt = 0;
- sbi->s_resuid = make_kuid(&init_user_ns, EXT3_DEF_RESUID);
- sbi->s_resgid = make_kgid(&init_user_ns, EXT3_DEF_RESGID);
sbi->s_sb_block = sb_block;
blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index c22f17021b6..0a475c88185 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -39,22 +39,8 @@ config EXT4_USE_FOR_EXT23
compiled kernel size by using one file system driver for
ext2, ext3, and ext4 file systems.
-config EXT4_FS_XATTR
- bool "Ext4 extended attributes"
- depends on EXT4_FS
- default y
- help
- Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
-
- If unsure, say N.
-
- You need this for POSIX ACL support on ext4.
-
config EXT4_FS_POSIX_ACL
bool "Ext4 POSIX Access Control Lists"
- depends on EXT4_FS_XATTR
select FS_POSIX_ACL
help
POSIX Access Control Lists (ACLs) support permissions for users and
@@ -67,7 +53,6 @@ config EXT4_FS_POSIX_ACL
config EXT4_FS_SECURITY
bool "Ext4 Security Labels"
- depends on EXT4_FS_XATTR
help
Security labels support alternative access control models
implemented by security modules like SELinux. This option
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 56fd8f86593..0310fec2ee3 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -7,8 +7,8 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
- mmp.o indirect.o
+ mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
+ xattr_trusted.o inline.o
-ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index d3c5b88fd89..e6e0d988439 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -423,8 +423,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
retry:
handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ error = PTR_ERR(handle);
+ goto release_and_out;
+ }
error = ext4_set_acl(handle, inode, type, acl);
ext4_journal_stop(handle);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 8e07d2a5a13..80a28b29727 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -27,23 +27,11 @@
#include <linux/slab.h>
#include <linux/rbtree.h>
#include "ext4.h"
-
-static unsigned char ext4_filetype_table[] = {
- DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-};
+#include "xattr.h"
static int ext4_dx_readdir(struct file *filp,
void *dirent, filldir_t filldir);
-static unsigned char get_dtype(struct super_block *sb, int filetype)
-{
- if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
- (filetype >= EXT4_FT_MAX))
- return DT_UNKNOWN;
-
- return (ext4_filetype_table[filetype]);
-}
-
/**
* Check if the given dir-inode refers to an htree-indexed directory
* (or a directory which chould potentially get coverted to use htree
@@ -68,11 +56,14 @@ static int is_dx_dir(struct inode *inode)
* Return 0 if the directory entry is OK, and 1 if there is a problem
*
* Note: this is the opposite of what ext2 and ext3 historically returned...
+ *
+ * bh passed here can be an inode block or a dir data block, depending
+ * on the inode inline data flag.
*/
int __ext4_check_dir_entry(const char *function, unsigned int line,
struct inode *dir, struct file *filp,
struct ext4_dir_entry_2 *de,
- struct buffer_head *bh,
+ struct buffer_head *bh, char *buf, int size,
unsigned int offset)
{
const char *error_msg = NULL;
@@ -85,9 +76,8 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
error_msg = "rec_len % 4 != 0";
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
- else if (unlikely(((char *) de - bh->b_data) + rlen >
- dir->i_sb->s_blocksize))
- error_msg = "directory entry across blocks";
+ else if (unlikely(((char *) de - buf) + rlen > size))
+ error_msg = "directory entry across range";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
@@ -98,14 +88,14 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
ext4_error_file(filp, function, line, bh->b_blocknr,
"bad entry in directory: %s - offset=%u(%u), "
"inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % bh->b_size),
+ error_msg, (unsigned) (offset % size),
offset, le32_to_cpu(de->inode),
rlen, de->name_len);
else
ext4_error_inode(dir, function, line, bh->b_blocknr,
"bad entry in directory: %s - offset=%u(%u), "
"inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % bh->b_size),
+ error_msg, (unsigned) (offset % size),
offset, le32_to_cpu(de->inode),
rlen, de->name_len);
@@ -125,6 +115,14 @@ static int ext4_readdir(struct file *filp,
int ret = 0;
int dir_has_error = 0;
+ if (ext4_has_inline_data(inode)) {
+ int has_inline_data = 1;
+ ret = ext4_read_inline_dir(filp, dirent, filldir,
+ &has_inline_data);
+ if (has_inline_data)
+ return ret;
+ }
+
if (is_dx_dir(inode)) {
err = ext4_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
@@ -221,8 +219,9 @@ revalidate:
while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
- if (ext4_check_dir_entry(inode, filp, de,
- bh, offset)) {
+ if (ext4_check_dir_entry(inode, filp, de, bh,
+ bh->b_data, bh->b_size,
+ offset)) {
/*
* On error, skip the f_pos to the next block
*/
@@ -334,17 +333,17 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
*
* For non-htree, ext4_llseek already chooses the proper max offset.
*/
-loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
+loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int dx_dir = is_dx_dir(inode);
loff_t htree_max = ext4_get_htree_eof(file);
if (likely(dx_dir))
- return generic_file_llseek_size(file, offset, origin,
+ return generic_file_llseek_size(file, offset, whence,
htree_max, htree_max);
else
- return ext4_llseek(file, offset, origin);
+ return ext4_llseek(file, offset, whence);
}
/*
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 3c20de1d59d..8462eb3c33a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -57,6 +57,16 @@
#define ext4_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
+/*
+ * Turn on EXT_DEBUG to get lots of info about extents operations.
+ */
+#define EXT_DEBUG__
+#ifdef EXT_DEBUG
+#define ext_debug(fmt, ...) printk(fmt, ##__VA_ARGS__)
+#else
+#define ext_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#endif
+
#define EXT4_ERROR_INODE(inode, fmt, a...) \
ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a)
@@ -392,6 +402,7 @@ struct flex_groups {
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
+#define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
#define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */
@@ -448,28 +459,26 @@ enum {
EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */
+ EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */
EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
};
-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
-#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \
- printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \
- EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); }
-
-/*
- * Since it's pretty easy to mix up bit numbers and hex values, and we
- * can't do a compile-time test for ENUM values, we use a run-time
- * test to make sure that EXT4_XXX_FL is consistent with respect to
- * EXT4_INODE_XXX. If all is well the printk and BUG_ON will all drop
- * out so it won't cost any extra space in the compiled kernel image.
- * But it's important that these values are the same, since we are
- * using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL
- * must be consistent with the values of FS_XXX_FL defined in
- * include/linux/fs.h and the on-disk values found in ext2, ext3, and
- * ext4 filesystems, and of course the values defined in e2fsprogs.
+/*
+ * Since it's pretty easy to mix up bit numbers and hex values, we use a
+ * build-time check to make sure that EXT4_XXX_FL is consistent with respect to
+ * EXT4_INODE_XXX. If all is well, the macros will be dropped, so, it won't cost
+ * any extra space in the compiled kernel image, otherwise, the build will fail.
+ * It's important that these values are the same, since we are using
+ * EXT4_INODE_XXX to test for flag values, but EXT4_XXX_FL must be consistent
+ * with the values of FS_XXX_FL defined in include/linux/fs.h and the on-disk
+ * values found in ext2, ext3 and ext4 filesystems, and of course the values
+ * defined in e2fsprogs.
*
* It's not paranoia if the Murphy's Law really *is* out to get you. :-)
*/
+#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
+#define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG))
+
static inline void ext4_check_flag_values(void)
{
CHECK_FLAG_VALUE(SECRM);
@@ -494,6 +503,7 @@ static inline void ext4_check_flag_values(void)
CHECK_FLAG_VALUE(EXTENTS);
CHECK_FLAG_VALUE(EA_INODE);
CHECK_FLAG_VALUE(EOFBLOCKS);
+ CHECK_FLAG_VALUE(INLINE_DATA);
CHECK_FLAG_VALUE(RESERVED);
}
@@ -811,6 +821,8 @@ struct ext4_ext_cache {
__u32 ec_len; /* must be 32bit to return holes */
};
+#include "extents_status.h"
+
/*
* fourth extended file system inode data in memory
*/
@@ -833,7 +845,6 @@ struct ext4_inode_info {
#endif
unsigned long i_flags;
-#ifdef CONFIG_EXT4_FS_XATTR
/*
* Extended attributes can be read independently of the main file
* data. Taking i_mutex even when reading would cause contention
@@ -842,7 +853,6 @@ struct ext4_inode_info {
* EAs.
*/
struct rw_semaphore xattr_sem;
-#endif
struct list_head i_orphan; /* unlinked but open inodes */
@@ -888,6 +898,10 @@ struct ext4_inode_info {
struct list_head i_prealloc_list;
spinlock_t i_prealloc_lock;
+ /* extents status tree */
+ struct ext4_es_tree i_es_tree;
+ rwlock_t i_es_lock;
+
/* ialloc */
ext4_group_t i_last_alloc_group;
@@ -902,6 +916,10 @@ struct ext4_inode_info {
/* on-disk additional length */
__u16 i_extra_isize;
+ /* Indicate the inline data space. */
+ u16 i_inline_off;
+ u16 i_inline_size;
+
#ifdef CONFIG_QUOTA
/* quota space reservation, managed internally by quota code */
qsize_t i_reserved_quota;
@@ -1360,6 +1378,7 @@ enum {
EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */
EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read
nolocking */
+ EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */
};
#define EXT4_INODE_BIT_FNS(name, field, offset) \
@@ -1481,7 +1500,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
#define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */
#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */
-#define EXT4_FEATURE_INCOMPAT_INLINEDATA 0x8000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */
#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
@@ -1505,7 +1524,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
EXT4_FEATURE_INCOMPAT_EXTENTS| \
EXT4_FEATURE_INCOMPAT_64BIT| \
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
- EXT4_FEATURE_INCOMPAT_MMP)
+ EXT4_FEATURE_INCOMPAT_MMP | \
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -1592,6 +1612,11 @@ struct ext4_dir_entry_tail {
__le32 det_checksum; /* crc32c(uuid+inum+dirblock) */
};
+#define EXT4_DIRENT_TAIL(block, blocksize) \
+ ((struct ext4_dir_entry_tail *)(((void *)(block)) + \
+ ((blocksize) - \
+ sizeof(struct ext4_dir_entry_tail))))
+
/*
* Ext4 directory file types. Only the low 3 bits are used. The
* other bits are reserved for now.
@@ -1936,14 +1961,42 @@ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
struct file *,
struct ext4_dir_entry_2 *,
- struct buffer_head *, unsigned int);
-#define ext4_check_dir_entry(dir, filp, de, bh, offset) \
+ struct buffer_head *, char *, int,
+ unsigned int);
+#define ext4_check_dir_entry(dir, filp, de, bh, buf, size, offset) \
unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
- (de), (bh), (offset)))
+ (de), (bh), (buf), (size), (offset)))
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent);
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
+extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+ struct buffer_head *bh,
+ void *buf, int buf_size,
+ const char *name, int namelen,
+ struct ext4_dir_entry_2 **dest_de);
+void ext4_insert_dentry(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+ const char *name, int namelen);
+static inline void ext4_update_dx_flag(struct inode *inode)
+{
+ if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_COMPAT_DIR_INDEX))
+ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+}
+static unsigned char ext4_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+};
+
+static inline unsigned char get_dtype(struct super_block *sb, int filetype)
+{
+ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
+ (filetype >= EXT4_FT_MAX))
+ return DT_UNKNOWN;
+
+ return ext4_filetype_table[filetype];
+}
/* fsync.c */
extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
@@ -1994,8 +2047,23 @@ struct buffer_head *ext4_getblk(handle_t *, struct inode *,
ext4_lblk_t, int, int *);
struct buffer_head *ext4_bread(handle_t *, struct inode *,
ext4_lblk_t, int, int *);
+int ext4_get_block_write(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
+int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create);
+int ext4_walk_page_buffers(handle_t *handle,
+ struct buffer_head *head,
+ unsigned from,
+ unsigned to,
+ int *partial,
+ int (*fn)(handle_t *handle,
+ struct buffer_head *bh));
+int do_journal_get_write_access(handle_t *handle,
+ struct buffer_head *bh);
+#define FALL_BACK_TO_NONDELALLOC 1
+#define CONVERT_INLINE_DATA 2
extern struct inode *ext4_iget(struct super_block *, unsigned long);
extern int ext4_write_inode(struct inode *, struct writeback_control *);
@@ -2050,6 +2118,20 @@ extern int ext4_orphan_add(handle_t *, struct inode *);
extern int ext4_orphan_del(handle_t *, struct inode *);
extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
__u32 start_minor_hash, __u32 *next_hash);
+extern int search_dir(struct buffer_head *bh,
+ char *search_buf,
+ int buf_size,
+ struct inode *dir,
+ const struct qstr *d_name,
+ unsigned int offset,
+ struct ext4_dir_entry_2 **res_dir);
+extern int ext4_generic_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de_del,
+ struct buffer_head *bh,
+ void *entry_buf,
+ int buf_size,
+ int csum_size);
/* resize.c */
extern int ext4_group_add(struct super_block *sb,
@@ -2376,6 +2458,15 @@ extern void ext4_unwritten_wait(struct inode *inode);
extern const struct inode_operations ext4_dir_inode_operations;
extern const struct inode_operations ext4_special_inode_operations;
extern struct dentry *ext4_get_parent(struct dentry *child);
+extern struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int blocksize, int csum_size,
+ unsigned int parent_ino, int dotdot_real_len);
+extern void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+ unsigned int blocksize);
+extern int ext4_handle_dirty_dirent_node(handle_t *handle,
+ struct inode *inode,
+ struct buffer_head *bh);
/* symlink.c */
extern const struct inode_operations ext4_symlink_inode_operations;
@@ -2393,6 +2484,9 @@ extern int ext4_check_blockref(const char *, unsigned int,
struct inode *, __le32 *, unsigned int);
/* extents.c */
+struct ext4_ext_path;
+struct ext4_extent;
+
extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
@@ -2410,8 +2504,27 @@ extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
ssize_t len);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
+extern int ext4_ext_calc_metadata_amount(struct inode *inode,
+ ext4_lblk_t lblocks);
+extern int ext4_extent_tree_init(handle_t *, struct inode *);
+extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
+ int num,
+ struct ext4_ext_path *path);
+extern int ext4_can_extents_be_merged(struct inode *inode,
+ struct ext4_extent *ex1,
+ struct ext4_extent *ex2);
+extern int ext4_ext_insert_extent(handle_t *, struct inode *,
+ struct ext4_ext_path *,
+ struct ext4_extent *, int);
+extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
+ struct ext4_ext_path *);
+extern void ext4_ext_drop_refs(struct ext4_ext_path *);
+extern int ext4_ext_check_inode(struct inode *inode);
+extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
+
+
/* move_extent.c */
extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
__u64 start_orig, __u64 start_donor,
@@ -2445,17 +2558,13 @@ enum ext4_state_bits {
* never, ever appear in a buffer_head's state
* flag. See EXT4_MAP_FROM_CLUSTER to see where
* this is used. */
- BH_Da_Mapped, /* Delayed allocated block that now has a mapping. This
- * flag is set when ext4_map_blocks is called on a
- * delayed allocated block to get its real mapping. */
};
BUFFER_FNS(Uninit, uninit)
TAS_BUFFER_FNS(Uninit, uninit)
-BUFFER_FNS(Da_Mapped, da_mapped)
/*
- * Add new method to test wether block and inode bitmaps are properly
+ * Add new method to test whether block and inode bitmaps are properly
* initialized. With uninit_bg reading the block from disk is not enough
* to mark the bitmap uptodate. We need to also zero-out the bitmap
*/
@@ -2503,6 +2612,4 @@ extern void ext4_resize_end(struct super_block *sb);
#endif /* __KERNEL__ */
-#include "ext4_extents.h"
-
#endif /* _EXT4_H */
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index cb1b2c91996..487fda12bc0 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -43,16 +43,6 @@
#define CHECK_BINSEARCH__
/*
- * Turn on EXT_DEBUG to get lots of info about extents operations.
- */
-#define EXT_DEBUG__
-#ifdef EXT_DEBUG
-#define ext_debug(fmt, ...) printk(fmt, ##__VA_ARGS__)
-#else
-#define ext_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-/*
* If EXT_STATS is defined then stats numbers are collected.
* These number will be displayed at umount time.
*/
@@ -144,20 +134,6 @@ struct ext4_ext_path {
*/
/*
- * to be called by ext4_ext_walk_space()
- * negative retcode - error
- * positive retcode - signal for ext4_ext_walk_space(), see below
- * callback must return valid extent (passed or newly created)
- */
-typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t,
- struct ext4_ext_cache *,
- struct ext4_extent *, void *);
-
-#define EXT_CONTINUE 0
-#define EXT_BREAK 1
-#define EXT_REPEAT 2
-
-/*
* Maximum number of logical blocks in a file; ext4_extent's ee_block is
* __le32.
*/
@@ -300,21 +276,5 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
0xffff);
}
-extern int ext4_ext_calc_metadata_amount(struct inode *inode,
- ext4_lblk_t lblocks);
-extern int ext4_extent_tree_init(handle_t *, struct inode *);
-extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
- int num,
- struct ext4_ext_path *path);
-extern int ext4_can_extents_be_merged(struct inode *inode,
- struct ext4_extent *ex1,
- struct ext4_extent *ex2);
-extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *, int);
-extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
- struct ext4_ext_path *);
-extern void ext4_ext_drop_refs(struct ext4_ext_path *);
-extern int ext4_ext_check_inode(struct inode *inode);
-extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
- int search_hint_reverse);
#endif /* _EXT4_EXTENTS */
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 56d258c1830..7177f9b21cb 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -254,13 +254,6 @@ static inline void ext4_handle_sync(handle_t *handle)
handle->h_sync = 1;
}
-static inline void ext4_handle_release_buffer(handle_t *handle,
- struct buffer_head *bh)
-{
- if (ext4_handle_valid(handle))
- jbd2_journal_release_buffer(handle, bh);
-}
-
static inline int ext4_handle_is_aborted(handle_t *handle)
{
if (ext4_handle_valid(handle))
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 7011ac96720..5ae1674ec12 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -41,6 +41,8 @@
#include <asm/uaccess.h>
#include <linux/fiemap.h>
#include "ext4_jbd2.h"
+#include "ext4_extents.h"
+#include "xattr.h"
#include <trace/events/ext4.h>
@@ -109,6 +111,9 @@ static int ext4_split_extent_at(handle_t *handle,
int split_flag,
int flags);
+static int ext4_find_delayed_extent(struct inode *inode,
+ struct ext4_ext_cache *newex);
+
static int ext4_ext_truncate_extend_restart(handle_t *handle,
struct inode *inode,
int needed)
@@ -1959,27 +1964,33 @@ cleanup:
return err;
}
-static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
- ext4_lblk_t num, ext_prepare_callback func,
- void *cbdata)
+static int ext4_fill_fiemap_extents(struct inode *inode,
+ ext4_lblk_t block, ext4_lblk_t num,
+ struct fiemap_extent_info *fieinfo)
{
struct ext4_ext_path *path = NULL;
- struct ext4_ext_cache cbex;
+ struct ext4_ext_cache newex;
struct ext4_extent *ex;
- ext4_lblk_t next, start = 0, end = 0;
+ ext4_lblk_t next, next_del, start = 0, end = 0;
ext4_lblk_t last = block + num;
- int depth, exists, err = 0;
-
- BUG_ON(func == NULL);
- BUG_ON(inode == NULL);
+ int exists, depth = 0, err = 0;
+ unsigned int flags = 0;
+ unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
while (block < last && block != EXT_MAX_BLOCKS) {
num = last - block;
/* find extent for this block */
down_read(&EXT4_I(inode)->i_data_sem);
+
+ if (path && ext_depth(inode) != depth) {
+ /* depth was changed. we have to realloc path */
+ kfree(path);
+ path = NULL;
+ }
+
path = ext4_ext_find_extent(inode, block, path);
- up_read(&EXT4_I(inode)->i_data_sem);
if (IS_ERR(path)) {
+ up_read(&EXT4_I(inode)->i_data_sem);
err = PTR_ERR(path);
path = NULL;
break;
@@ -1987,13 +1998,16 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
depth = ext_depth(inode);
if (unlikely(path[depth].p_hdr == NULL)) {
+ up_read(&EXT4_I(inode)->i_data_sem);
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
err = -EIO;
break;
}
ex = path[depth].p_ext;
next = ext4_ext_next_allocated_block(path);
+ ext4_ext_drop_refs(path);
+ flags = 0;
exists = 0;
if (!ex) {
/* there is no extent yet, so try to allocate
@@ -2030,40 +2044,64 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
BUG_ON(end <= start);
if (!exists) {
- cbex.ec_block = start;
- cbex.ec_len = end - start;
- cbex.ec_start = 0;
+ newex.ec_block = start;
+ newex.ec_len = end - start;
+ newex.ec_start = 0;
} else {
- cbex.ec_block = le32_to_cpu(ex->ee_block);
- cbex.ec_len = ext4_ext_get_actual_len(ex);
- cbex.ec_start = ext4_ext_pblock(ex);
+ newex.ec_block = le32_to_cpu(ex->ee_block);
+ newex.ec_len = ext4_ext_get_actual_len(ex);
+ newex.ec_start = ext4_ext_pblock(ex);
+ if (ext4_ext_is_uninitialized(ex))
+ flags |= FIEMAP_EXTENT_UNWRITTEN;
}
- if (unlikely(cbex.ec_len == 0)) {
- EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
- err = -EIO;
- break;
+ /*
+ * Find delayed extent and update newex accordingly. We call
+ * it even in !exists case to find out whether newex is the
+ * last existing extent or not.
+ */
+ next_del = ext4_find_delayed_extent(inode, &newex);
+ if (!exists && next_del) {
+ exists = 1;
+ flags |= FIEMAP_EXTENT_DELALLOC;
}
- err = func(inode, next, &cbex, ex, cbdata);
- ext4_ext_drop_refs(path);
+ up_read(&EXT4_I(inode)->i_data_sem);
- if (err < 0)
+ if (unlikely(newex.ec_len == 0)) {
+ EXT4_ERROR_INODE(inode, "newex.ec_len == 0");
+ err = -EIO;
break;
+ }
- if (err == EXT_REPEAT)
- continue;
- else if (err == EXT_BREAK) {
- err = 0;
- break;
+ /* This is possible iff next == next_del == EXT_MAX_BLOCKS */
+ if (next == next_del) {
+ flags |= FIEMAP_EXTENT_LAST;
+ if (unlikely(next_del != EXT_MAX_BLOCKS ||
+ next != EXT_MAX_BLOCKS)) {
+ EXT4_ERROR_INODE(inode,
+ "next extent == %u, next "
+ "delalloc extent = %u",
+ next, next_del);
+ err = -EIO;
+ break;
+ }
}
- if (ext_depth(inode) != depth) {
- /* depth was changed. we have to realloc path */
- kfree(path);
- path = NULL;
+ if (exists) {
+ err = fiemap_fill_next_extent(fieinfo,
+ (__u64)newex.ec_block << blksize_bits,
+ (__u64)newex.ec_start << blksize_bits,
+ (__u64)newex.ec_len << blksize_bits,
+ flags);
+ if (err < 0)
+ break;
+ if (err == 1) {
+ err = 0;
+ break;
+ }
}
- block = cbex.ec_block + cbex.ec_len;
+ block = newex.ec_block + newex.ec_len;
}
if (path) {
@@ -2156,7 +2194,6 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
struct ext4_extent *ex)
{
struct ext4_ext_cache *cex;
- struct ext4_sb_info *sbi;
int ret = 0;
/*
@@ -2164,7 +2201,6 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
*/
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
cex = &EXT4_I(inode)->i_cached_extent;
- sbi = EXT4_SB(inode->i_sb);
/* has cache valid data? */
if (cex->ec_len == 0)
@@ -2190,13 +2226,14 @@ errout:
* removes index from the index block.
*/
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
- struct ext4_ext_path *path)
+ struct ext4_ext_path *path, int depth)
{
int err;
ext4_fsblk_t leaf;
/* free index block */
- path--;
+ depth--;
+ path = path + depth;
leaf = ext4_idx_pblock(path->p_idx);
if (unlikely(path->p_hdr->eh_entries == 0)) {
EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
@@ -2221,6 +2258,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+
+ while (--depth >= 0) {
+ if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
+ break;
+ path--;
+ err = ext4_ext_get_access(handle, inode, path);
+ if (err)
+ break;
+ path->p_idx->ei_block = (path+1)->p_idx->ei_block;
+ err = ext4_ext_dirty(handle, inode, path);
+ if (err)
+ break;
+ }
return err;
}
@@ -2273,7 +2323,13 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
int index;
- int depth = ext_depth(inode);
+ int depth;
+
+ /* If we are converting the inline data, only one is needed here. */
+ if (ext4_has_inline_data(inode))
+ return 1;
+
+ depth = ext_depth(inode);
if (chunk)
index = depth * 2;
@@ -2557,7 +2613,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
/* if this leaf is free, then we should
* remove it from index block above */
if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
- err = ext4_ext_rm_idx(handle, inode, path + depth);
+ err = ext4_ext_rm_idx(handle, inode, path, depth);
out:
return err;
@@ -2760,7 +2816,7 @@ again:
/* index is empty, remove it;
* handle must be already prepared by the
* truncatei_leaf() */
- err = ext4_ext_rm_idx(handle, inode, path + i);
+ err = ext4_ext_rm_idx(handle, inode, path, i);
}
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
@@ -3461,115 +3517,34 @@ out:
/**
* ext4_find_delalloc_range: find delayed allocated block in the given range.
*
- * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
- * whether there are any buffers marked for delayed allocation. It returns '1'
- * on the first delalloc'ed buffer head found. If no buffer head in the given
- * range is marked for delalloc, it returns 0.
- * lblk_start should always be <= lblk_end.
- * search_hint_reverse is to indicate that searching in reverse from lblk_end to
- * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
- * block sooner). This is useful when blocks are truncated sequentially from
- * lblk_start towards lblk_end.
+ * Return 1 if there is a delalloc block in the range, otherwise 0.
*/
static int ext4_find_delalloc_range(struct inode *inode,
ext4_lblk_t lblk_start,
- ext4_lblk_t lblk_end,
- int search_hint_reverse)
+ ext4_lblk_t lblk_end)
{
- struct address_space *mapping = inode->i_mapping;
- struct buffer_head *head, *bh = NULL;
- struct page *page;
- ext4_lblk_t i, pg_lblk;
- pgoff_t index;
+ struct extent_status es;
- if (!test_opt(inode->i_sb, DELALLOC))
- return 0;
-
- /* reverse search wont work if fs block size is less than page size */
- if (inode->i_blkbits < PAGE_CACHE_SHIFT)
- search_hint_reverse = 0;
-
- if (search_hint_reverse)
- i = lblk_end;
+ es.start = lblk_start;
+ ext4_es_find_extent(inode, &es);
+ if (es.len == 0)
+ return 0; /* there is no delay extent in this tree */
+ else if (es.start <= lblk_start && lblk_start < es.start + es.len)
+ return 1;
+ else if (lblk_start <= es.start && es.start <= lblk_end)
+ return 1;
else
- i = lblk_start;
-
- index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
- while ((i >= lblk_start) && (i <= lblk_end)) {
- page = find_get_page(mapping, index);
- if (!page)
- goto nextpage;
-
- if (!page_has_buffers(page))
- goto nextpage;
-
- head = page_buffers(page);
- if (!head)
- goto nextpage;
-
- bh = head;
- pg_lblk = index << (PAGE_CACHE_SHIFT -
- inode->i_blkbits);
- do {
- if (unlikely(pg_lblk < lblk_start)) {
- /*
- * This is possible when fs block size is less
- * than page size and our cluster starts/ends in
- * middle of the page. So we need to skip the
- * initial few blocks till we reach the 'lblk'
- */
- pg_lblk++;
- continue;
- }
-
- /* Check if the buffer is delayed allocated and that it
- * is not yet mapped. (when da-buffers are mapped during
- * their writeout, their da_mapped bit is set.)
- */
- if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
- page_cache_release(page);
- trace_ext4_find_delalloc_range(inode,
- lblk_start, lblk_end,
- search_hint_reverse,
- 1, i);
- return 1;
- }
- if (search_hint_reverse)
- i--;
- else
- i++;
- } while ((i >= lblk_start) && (i <= lblk_end) &&
- ((bh = bh->b_this_page) != head));
-nextpage:
- if (page)
- page_cache_release(page);
- /*
- * Move to next page. 'i' will be the first lblk in the next
- * page.
- */
- if (search_hint_reverse)
- index--;
- else
- index++;
- i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- }
-
- trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
- search_hint_reverse, 0, 0);
- return 0;
+ return 0;
}
-int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
- int search_hint_reverse)
+int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t lblk_start, lblk_end;
lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
- return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
- search_hint_reverse);
+ return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
}
/**
@@ -3630,7 +3605,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
lblk_to = lblk_from + c_offset - 1;
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
allocated_clusters--;
}
@@ -3640,7 +3615,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
allocated_clusters--;
}
@@ -3663,8 +3638,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
flags, allocated);
ext4_ext_show_leaf(inode, path);
- trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
- newblock);
+ trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
+ allocated, newblock);
/* get_block() before submit the IO, split the extent */
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
@@ -3911,7 +3886,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0;
- int free_on_err = 0, err = 0, depth, ret;
+ int free_on_err = 0, err = 0, depth;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
@@ -3927,7 +3902,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
if (!newex.ee_start_lo && !newex.ee_start_hi) {
if ((sbi->s_cluster_ratio > 1) &&
- ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
+ ext4_find_delalloc_cluster(inode, map->m_lblk))
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
@@ -4007,15 +3982,15 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ee_len, ee_start);
goto out;
}
- ret = ext4_ext_handle_uninitialized_extents(
+ allocated = ext4_ext_handle_uninitialized_extents(
handle, inode, map, path, flags,
allocated, newblock);
- return ret;
+ goto out3;
}
}
if ((sbi->s_cluster_ratio > 1) &&
- ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
+ ext4_find_delalloc_cluster(inode, map->m_lblk))
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
/*
@@ -4284,8 +4259,8 @@ out2:
kfree(path);
}
- trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
- newblock, map->m_len, err ? err : allocated);
+out3:
+ trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
return err ? err : allocated;
}
@@ -4344,6 +4319,8 @@ void ext4_ext_truncate(struct inode *inode)
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
+ err = ext4_es_remove_extent(inode, last_block,
+ EXT_MAX_BLOCKS - last_block);
err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
/* In a multi-transaction truncate, we only make the final
@@ -4434,6 +4411,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (mode & FALLOC_FL_PUNCH_HOLE)
return ext4_punch_hole(file, offset, len);
+ ret = ext4_convert_inline_data(inode);
+ if (ret)
+ return ret;
+
trace_ext4_fallocate_enter(inode, offset, len, mode);
map.m_lblk = offset >> blkbits;
/*
@@ -4572,206 +4553,43 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
}
/*
- * Callback function called for each extent to gather FIEMAP information.
+ * If newex is not existing extent (newex->ec_start equals zero) find
+ * delayed extent at start of newex and update newex accordingly and
+ * return start of the next delayed extent.
+ *
+ * If newex is existing extent (newex->ec_start is not equal zero)
+ * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
+ * extent found. Leave newex unmodified.
*/
-static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
- struct ext4_ext_cache *newex, struct ext4_extent *ex,
- void *data)
+static int ext4_find_delayed_extent(struct inode *inode,
+ struct ext4_ext_cache *newex)
{
- __u64 logical;
- __u64 physical;
- __u64 length;
- __u32 flags = 0;
- int ret = 0;
- struct fiemap_extent_info *fieinfo = data;
- unsigned char blksize_bits;
+ struct extent_status es;
+ ext4_lblk_t next_del;
- blksize_bits = inode->i_sb->s_blocksize_bits;
- logical = (__u64)newex->ec_block << blksize_bits;
+ es.start = newex->ec_block;
+ next_del = ext4_es_find_extent(inode, &es);
if (newex->ec_start == 0) {
/*
* No extent in extent-tree contains block @newex->ec_start,
* then the block may stay in 1)a hole or 2)delayed-extent.
- *
- * Holes or delayed-extents are processed as follows.
- * 1. lookup dirty pages with specified range in pagecache.
- * If no page is got, then there is no delayed-extent and
- * return with EXT_CONTINUE.
- * 2. find the 1st mapped buffer,
- * 3. check if the mapped buffer is both in the request range
- * and a delayed buffer. If not, there is no delayed-extent,
- * then return.
- * 4. a delayed-extent is found, the extent will be collected.
*/
- ext4_lblk_t end = 0;
- pgoff_t last_offset;
- pgoff_t offset;
- pgoff_t index;
- pgoff_t start_index = 0;
- struct page **pages = NULL;
- struct buffer_head *bh = NULL;
- struct buffer_head *head = NULL;
- unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
-
- pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (pages == NULL)
- return -ENOMEM;
-
- offset = logical >> PAGE_SHIFT;
-repeat:
- last_offset = offset;
- head = NULL;
- ret = find_get_pages_tag(inode->i_mapping, &offset,
- PAGECACHE_TAG_DIRTY, nr_pages, pages);
-
- if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
- /* First time, try to find a mapped buffer. */
- if (ret == 0) {
-out:
- for (index = 0; index < ret; index++)
- page_cache_release(pages[index]);
- /* just a hole. */
- kfree(pages);
- return EXT_CONTINUE;
- }
- index = 0;
-
-next_page:
- /* Try to find the 1st mapped buffer. */
- end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
- blksize_bits;
- if (!page_has_buffers(pages[index]))
- goto out;
- head = page_buffers(pages[index]);
- if (!head)
- goto out;
-
- index++;
- bh = head;
- do {
- if (end >= newex->ec_block +
- newex->ec_len)
- /* The buffer is out of
- * the request range.
- */
- goto out;
-
- if (buffer_mapped(bh) &&
- end >= newex->ec_block) {
- start_index = index - 1;
- /* get the 1st mapped buffer. */
- goto found_mapped_buffer;
- }
-
- bh = bh->b_this_page;
- end++;
- } while (bh != head);
-
- /* No mapped buffer in the range found in this page,
- * We need to look up next page.
- */
- if (index >= ret) {
- /* There is no page left, but we need to limit
- * newex->ec_len.
- */
- newex->ec_len = end - newex->ec_block;
- goto out;
- }
- goto next_page;
- } else {
- /*Find contiguous delayed buffers. */
- if (ret > 0 && pages[0]->index == last_offset)
- head = page_buffers(pages[0]);
- bh = head;
- index = 1;
- start_index = 0;
- }
-
-found_mapped_buffer:
- if (bh != NULL && buffer_delay(bh)) {
- /* 1st or contiguous delayed buffer found. */
- if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
- /*
- * 1st delayed buffer found, record
- * the start of extent.
- */
- flags |= FIEMAP_EXTENT_DELALLOC;
- newex->ec_block = end;
- logical = (__u64)end << blksize_bits;
- }
- /* Find contiguous delayed buffers. */
- do {
- if (!buffer_delay(bh))
- goto found_delayed_extent;
- bh = bh->b_this_page;
- end++;
- } while (bh != head);
-
- for (; index < ret; index++) {
- if (!page_has_buffers(pages[index])) {
- bh = NULL;
- break;
- }
- head = page_buffers(pages[index]);
- if (!head) {
- bh = NULL;
- break;
- }
-
- if (pages[index]->index !=
- pages[start_index]->index + index
- - start_index) {
- /* Blocks are not contiguous. */
- bh = NULL;
- break;
- }
- bh = head;
- do {
- if (!buffer_delay(bh))
- /* Delayed-extent ends. */
- goto found_delayed_extent;
- bh = bh->b_this_page;
- end++;
- } while (bh != head);
- }
- } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
- /* a hole found. */
- goto out;
+ if (es.len == 0)
+ /* A hole found. */
+ return 0;
-found_delayed_extent:
- newex->ec_len = min(end - newex->ec_block,
- (ext4_lblk_t)EXT_INIT_MAX_LEN);
- if (ret == nr_pages && bh != NULL &&
- newex->ec_len < EXT_INIT_MAX_LEN &&
- buffer_delay(bh)) {
- /* Have not collected an extent and continue. */
- for (index = 0; index < ret; index++)
- page_cache_release(pages[index]);
- goto repeat;
+ if (es.start > newex->ec_block) {
+ /* A hole found. */
+ newex->ec_len = min(es.start - newex->ec_block,
+ newex->ec_len);
+ return 0;
}
- for (index = 0; index < ret; index++)
- page_cache_release(pages[index]);
- kfree(pages);
+ newex->ec_len = es.start + es.len - newex->ec_block;
}
- physical = (__u64)newex->ec_start << blksize_bits;
- length = (__u64)newex->ec_len << blksize_bits;
-
- if (ex && ext4_ext_is_uninitialized(ex))
- flags |= FIEMAP_EXTENT_UNWRITTEN;
-
- if (next == EXT_MAX_BLOCKS)
- flags |= FIEMAP_EXTENT_LAST;
-
- ret = fiemap_fill_next_extent(fieinfo, logical, physical,
- length, flags);
- if (ret < 0)
- return ret;
- if (ret == 1)
- return EXT_BREAK;
- return EXT_CONTINUE;
+ return next_del;
}
/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
@@ -4971,6 +4789,8 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
ext4_ext_invalidate_cache(inode);
ext4_discard_preallocations(inode);
+ err = ext4_es_remove_extent(inode, first_block,
+ stop_block - first_block);
err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
ext4_ext_invalidate_cache(inode);
@@ -4991,12 +4811,22 @@ out_mutex:
mutex_unlock(&inode->i_mutex);
return err;
}
+
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
ext4_lblk_t start_blk;
int error = 0;
+ if (ext4_has_inline_data(inode)) {
+ int has_inline = 1;
+
+ error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
+
+ if (has_inline)
+ return error;
+ }
+
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return generic_block_fiemap(inode, fieinfo, start, len,
@@ -5018,11 +4848,11 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
/*
- * Walk the extent tree gathering extent information.
- * ext4_ext_fiemap_cb will push extents back to user.
+ * Walk the extent tree gathering extent information
+ * and pushing extents back to the user.
*/
- error = ext4_ext_walk_space(inode, start_blk, len_blks,
- ext4_ext_fiemap_cb, fieinfo);
+ error = ext4_fill_fiemap_extents(inode, start_blk,
+ len_blks, fieinfo);
}
return error;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
new file mode 100644
index 00000000000..564d981a2fc
--- /dev/null
+++ b/fs/ext4/extents_status.c
@@ -0,0 +1,500 @@
+/*
+ * fs/ext4/extents_status.c
+ *
+ * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
+ * Modified by
+ * Allison Henderson <achender@linux.vnet.ibm.com>
+ * Hugh Dickins <hughd@google.com>
+ * Zheng Liu <wenqing.lz@taobao.com>
+ *
+ * Ext4 extents status tree core functions.
+ */
+#include <linux/rbtree.h>
+#include "ext4.h"
+#include "extents_status.h"
+#include "ext4_extents.h"
+
+#include <trace/events/ext4.h>
+
+/*
+ * According to previous discussion in Ext4 Developer Workshop, we
+ * will introduce a new structure called io tree to track all extent
+ * status in order to solve some problems that we have met
+ * (e.g. Reservation space warning), and provide extent-level locking.
+ * Delay extent tree is the first step to achieve this goal. It is
+ * original built by Yongqiang Yang. At that time it is called delay
+ * extent tree, whose goal is only track delay extent in memory to
+ * simplify the implementation of fiemap and bigalloc, and introduce
+ * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
+ * delay extent tree at the following comment. But for better
+ * understand what it does, it has been rename to extent status tree.
+ *
+ * Currently the first step has been done. All delay extents are
+ * tracked in the tree. It maintains the delay extent when a delay
+ * allocation is issued, and the delay extent is written out or
+ * invalidated. Therefore the implementation of fiemap and bigalloc
+ * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
+ *
+ * The following comment describes the implemenmtation of extent
+ * status tree and future works.
+ */
+
+/*
+ * extents status tree implementation for ext4.
+ *
+ *
+ * ==========================================================================
+ * Extents status encompass delayed extents and extent locks
+ *
+ * 1. Why delayed extent implementation ?
+ *
+ * Without delayed extent, ext4 identifies a delayed extent by looking
+ * up page cache, this has several deficiencies - complicated, buggy,
+ * and inefficient code.
+ *
+ * FIEMAP, SEEK_HOLE/DATA, bigalloc, punch hole and writeout all need
+ * to know if a block or a range of blocks are belonged to a delayed
+ * extent.
+ *
+ * Let us have a look at how they do without delayed extents implementation.
+ * -- FIEMAP
+ * FIEMAP looks up page cache to identify delayed allocations from holes.
+ *
+ * -- SEEK_HOLE/DATA
+ * SEEK_HOLE/DATA has the same problem as FIEMAP.
+ *
+ * -- bigalloc
+ * bigalloc looks up page cache to figure out if a block is
+ * already under delayed allocation or not to determine whether
+ * quota reserving is needed for the cluster.
+ *
+ * -- punch hole
+ * punch hole looks up page cache to identify a delayed extent.
+ *
+ * -- writeout
+ * Writeout looks up whole page cache to see if a buffer is
+ * mapped, If there are not very many delayed buffers, then it is
+ * time comsuming.
+ *
+ * With delayed extents implementation, FIEMAP, SEEK_HOLE/DATA,
+ * bigalloc and writeout can figure out if a block or a range of
+ * blocks is under delayed allocation(belonged to a delayed extent) or
+ * not by searching the delayed extent tree.
+ *
+ *
+ * ==========================================================================
+ * 2. ext4 delayed extents impelmentation
+ *
+ * -- delayed extent
+ * A delayed extent is a range of blocks which are contiguous
+ * logically and under delayed allocation. Unlike extent in
+ * ext4, delayed extent in ext4 is a in-memory struct, there is
+ * no corresponding on-disk data. There is no limit on length of
+ * delayed extent, so a delayed extent can contain as many blocks
+ * as they are contiguous logically.
+ *
+ * -- delayed extent tree
+ * Every inode has a delayed extent tree and all under delayed
+ * allocation blocks are added to the tree as delayed extents.
+ * Delayed extents in the tree are ordered by logical block no.
+ *
+ * -- operations on a delayed extent tree
+ * There are three operations on a delayed extent tree: find next
+ * delayed extent, adding a space(a range of blocks) and removing
+ * a space.
+ *
+ * -- race on a delayed extent tree
+ * Delayed extent tree is protected inode->i_es_lock.
+ *
+ *
+ * ==========================================================================
+ * 3. performance analysis
+ * -- overhead
+ * 1. There is a cache extent for write access, so if writes are
+ * not very random, adding space operaions are in O(1) time.
+ *
+ * -- gain
+ * 2. Code is much simpler, more readable, more maintainable and
+ * more efficient.
+ *
+ *
+ * ==========================================================================
+ * 4. TODO list
+ * -- Track all extent status
+ *
+ * -- Improve get block process
+ *
+ * -- Extent-level locking
+ */
+
+static struct kmem_cache *ext4_es_cachep;
+
+int __init ext4_init_es(void)
+{
+ ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
+ if (ext4_es_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void ext4_exit_es(void)
+{
+ if (ext4_es_cachep)
+ kmem_cache_destroy(ext4_es_cachep);
+}
+
+void ext4_es_init_tree(struct ext4_es_tree *tree)
+{
+ tree->root = RB_ROOT;
+ tree->cache_es = NULL;
+}
+
+#ifdef ES_DEBUG__
+static void ext4_es_print_tree(struct inode *inode)
+{
+ struct ext4_es_tree *tree;
+ struct rb_node *node;
+
+ printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
+ tree = &EXT4_I(inode)->i_es_tree;
+ node = rb_first(&tree->root);
+ while (node) {
+ struct extent_status *es;
+ es = rb_entry(node, struct extent_status, rb_node);
+ printk(KERN_DEBUG " [%u/%u)", es->start, es->len);
+ node = rb_next(node);
+ }
+ printk(KERN_DEBUG "\n");
+}
+#else
+#define ext4_es_print_tree(inode)
+#endif
+
+static inline ext4_lblk_t extent_status_end(struct extent_status *es)
+{
+ BUG_ON(es->start + es->len < es->start);
+ return es->start + es->len - 1;
+}
+
+/*
+ * search through the tree for an delayed extent with a given offset. If
+ * it can't be found, try to find next extent.
+ */
+static struct extent_status *__es_tree_search(struct rb_root *root,
+ ext4_lblk_t offset)
+{
+ struct rb_node *node = root->rb_node;
+ struct extent_status *es = NULL;
+
+ while (node) {
+ es = rb_entry(node, struct extent_status, rb_node);
+ if (offset < es->start)
+ node = node->rb_left;
+ else if (offset > extent_status_end(es))
+ node = node->rb_right;
+ else
+ return es;
+ }
+
+ if (es && offset < es->start)
+ return es;
+
+ if (es && offset > extent_status_end(es)) {
+ node = rb_next(&es->rb_node);
+ return node ? rb_entry(node, struct extent_status, rb_node) :
+ NULL;
+ }
+
+ return NULL;
+}
+
+/*
+ * ext4_es_find_extent: find the 1st delayed extent covering @es->start
+ * if it exists, otherwise, the next extent after @es->start.
+ *
+ * @inode: the inode which owns delayed extents
+ * @es: delayed extent that we found
+ *
+ * Returns the first block of the next extent after es, otherwise
+ * EXT_MAX_BLOCKS if no delay extent is found.
+ * Delayed extent is returned via @es.
+ */
+ext4_lblk_t ext4_es_find_extent(struct inode *inode, struct extent_status *es)
+{
+ struct ext4_es_tree *tree = NULL;
+ struct extent_status *es1 = NULL;
+ struct rb_node *node;
+ ext4_lblk_t ret = EXT_MAX_BLOCKS;
+
+ trace_ext4_es_find_extent_enter(inode, es->start);
+
+ read_lock(&EXT4_I(inode)->i_es_lock);
+ tree = &EXT4_I(inode)->i_es_tree;
+
+ /* find delay extent in cache firstly */
+ if (tree->cache_es) {
+ es1 = tree->cache_es;
+ if (in_range(es->start, es1->start, es1->len)) {
+ es_debug("%u cached by [%u/%u)\n",
+ es->start, es1->start, es1->len);
+ goto out;
+ }
+ }
+
+ es->len = 0;
+ es1 = __es_tree_search(&tree->root, es->start);
+
+out:
+ if (es1) {
+ tree->cache_es = es1;
+ es->start = es1->start;
+ es->len = es1->len;
+ node = rb_next(&es1->rb_node);
+ if (node) {
+ es1 = rb_entry(node, struct extent_status, rb_node);
+ ret = es1->start;
+ }
+ }
+
+ read_unlock(&EXT4_I(inode)->i_es_lock);
+
+ trace_ext4_es_find_extent_exit(inode, es, ret);
+ return ret;
+}
+
+static struct extent_status *
+ext4_es_alloc_extent(ext4_lblk_t start, ext4_lblk_t len)
+{
+ struct extent_status *es;
+ es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
+ if (es == NULL)
+ return NULL;
+ es->start = start;
+ es->len = len;
+ return es;
+}
+
+static void ext4_es_free_extent(struct extent_status *es)
+{
+ kmem_cache_free(ext4_es_cachep, es);
+}
+
+static struct extent_status *
+ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
+{
+ struct extent_status *es1;
+ struct rb_node *node;
+
+ node = rb_prev(&es->rb_node);
+ if (!node)
+ return es;
+
+ es1 = rb_entry(node, struct extent_status, rb_node);
+ if (es->start == extent_status_end(es1) + 1) {
+ es1->len += es->len;
+ rb_erase(&es->rb_node, &tree->root);
+ ext4_es_free_extent(es);
+ es = es1;
+ }
+
+ return es;
+}
+
+static struct extent_status *
+ext4_es_try_to_merge_right(struct ext4_es_tree *tree, struct extent_status *es)
+{
+ struct extent_status *es1;
+ struct rb_node *node;
+
+ node = rb_next(&es->rb_node);
+ if (!node)
+ return es;
+
+ es1 = rb_entry(node, struct extent_status, rb_node);
+ if (es1->start == extent_status_end(es) + 1) {
+ es->len += es1->len;
+ rb_erase(node, &tree->root);
+ ext4_es_free_extent(es1);
+ }
+
+ return es;
+}
+
+static int __es_insert_extent(struct ext4_es_tree *tree, ext4_lblk_t offset,
+ ext4_lblk_t len)
+{
+ struct rb_node **p = &tree->root.rb_node;
+ struct rb_node *parent = NULL;
+ struct extent_status *es;
+ ext4_lblk_t end = offset + len - 1;
+
+ BUG_ON(end < offset);
+ es = tree->cache_es;
+ if (es && offset == (extent_status_end(es) + 1)) {
+ es_debug("cached by [%u/%u)\n", es->start, es->len);
+ es->len += len;
+ es = ext4_es_try_to_merge_right(tree, es);
+ goto out;
+ } else if (es && es->start == end + 1) {
+ es_debug("cached by [%u/%u)\n", es->start, es->len);
+ es->start = offset;
+ es->len += len;
+ es = ext4_es_try_to_merge_left(tree, es);
+ goto out;
+ } else if (es && es->start <= offset &&
+ end <= extent_status_end(es)) {
+ es_debug("cached by [%u/%u)\n", es->start, es->len);
+ goto out;
+ }
+
+ while (*p) {
+ parent = *p;
+ es = rb_entry(parent, struct extent_status, rb_node);
+
+ if (offset < es->start) {
+ if (es->start == end + 1) {
+ es->start = offset;
+ es->len += len;
+ es = ext4_es_try_to_merge_left(tree, es);
+ goto out;
+ }
+ p = &(*p)->rb_left;
+ } else if (offset > extent_status_end(es)) {
+ if (offset == extent_status_end(es) + 1) {
+ es->len += len;
+ es = ext4_es_try_to_merge_right(tree, es);
+ goto out;
+ }
+ p = &(*p)->rb_right;
+ } else {
+ if (extent_status_end(es) <= end)
+ es->len = offset - es->start + len;
+ goto out;
+ }
+ }
+
+ es = ext4_es_alloc_extent(offset, len);
+ if (!es)
+ return -ENOMEM;
+ rb_link_node(&es->rb_node, parent, p);
+ rb_insert_color(&es->rb_node, &tree->root);
+
+out:
+ tree->cache_es = es;
+ return 0;
+}
+
+/*
+ * ext4_es_insert_extent() adds a space to a delayed extent tree.
+ * Caller holds inode->i_es_lock.
+ *
+ * ext4_es_insert_extent is called by ext4_da_write_begin and
+ * ext4_es_remove_extent.
+ *
+ * Return 0 on success, error code on failure.
+ */
+int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t offset,
+ ext4_lblk_t len)
+{
+ struct ext4_es_tree *tree;
+ int err = 0;
+
+ trace_ext4_es_insert_extent(inode, offset, len);
+ es_debug("add [%u/%u) to extent status tree of inode %lu\n",
+ offset, len, inode->i_ino);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+ tree = &EXT4_I(inode)->i_es_tree;
+ err = __es_insert_extent(tree, offset, len);
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+
+ ext4_es_print_tree(inode);
+
+ return err;
+}
+
+/*
+ * ext4_es_remove_extent() removes a space from a delayed extent tree.
+ * Caller holds inode->i_es_lock.
+ *
+ * Return 0 on success, error code on failure.
+ */
+int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
+ ext4_lblk_t len)
+{
+ struct rb_node *node;
+ struct ext4_es_tree *tree;
+ struct extent_status *es;
+ struct extent_status orig_es;
+ ext4_lblk_t len1, len2, end;
+ int err = 0;
+
+ trace_ext4_es_remove_extent(inode, offset, len);
+ es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
+ offset, len, inode->i_ino);
+
+ end = offset + len - 1;
+ BUG_ON(end < offset);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+ tree = &EXT4_I(inode)->i_es_tree;
+ es = __es_tree_search(&tree->root, offset);
+ if (!es)
+ goto out;
+ if (es->start > end)
+ goto out;
+
+ /* Simply invalidate cache_es. */
+ tree->cache_es = NULL;
+
+ orig_es.start = es->start;
+ orig_es.len = es->len;
+ len1 = offset > es->start ? offset - es->start : 0;
+ len2 = extent_status_end(es) > end ?
+ extent_status_end(es) - end : 0;
+ if (len1 > 0)
+ es->len = len1;
+ if (len2 > 0) {
+ if (len1 > 0) {
+ err = __es_insert_extent(tree, end + 1, len2);
+ if (err) {
+ es->start = orig_es.start;
+ es->len = orig_es.len;
+ goto out;
+ }
+ } else {
+ es->start = end + 1;
+ es->len = len2;
+ }
+ goto out;
+ }
+
+ if (len1 > 0) {
+ node = rb_next(&es->rb_node);
+ if (node)
+ es = rb_entry(node, struct extent_status, rb_node);
+ else
+ es = NULL;
+ }
+
+ while (es && extent_status_end(es) <= end) {
+ node = rb_next(&es->rb_node);
+ rb_erase(&es->rb_node, &tree->root);
+ ext4_es_free_extent(es);
+ if (!node) {
+ es = NULL;
+ break;
+ }
+ es = rb_entry(node, struct extent_status, rb_node);
+ }
+
+ if (es && es->start < end + 1) {
+ len1 = extent_status_end(es) - end;
+ es->start = end + 1;
+ es->len = len1;
+ }
+
+out:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+ ext4_es_print_tree(inode);
+ return err;
+}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
new file mode 100644
index 00000000000..077f82db092
--- /dev/null
+++ b/fs/ext4/extents_status.h
@@ -0,0 +1,45 @@
+/*
+ * fs/ext4/extents_status.h
+ *
+ * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
+ * Modified by
+ * Allison Henderson <achender@linux.vnet.ibm.com>
+ * Zheng Liu <wenqing.lz@taobao.com>
+ *
+ */
+
+#ifndef _EXT4_EXTENTS_STATUS_H
+#define _EXT4_EXTENTS_STATUS_H
+
+/*
+ * Turn on ES_DEBUG__ to get lots of info about extent status operations.
+ */
+#ifdef ES_DEBUG__
+#define es_debug(fmt, ...) printk(fmt, ##__VA_ARGS__)
+#else
+#define es_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+struct extent_status {
+ struct rb_node rb_node;
+ ext4_lblk_t start; /* first block extent covers */
+ ext4_lblk_t len; /* length of extent in block */
+};
+
+struct ext4_es_tree {
+ struct rb_root root;
+ struct extent_status *cache_es; /* recently accessed extent */
+};
+
+extern int __init ext4_init_es(void);
+extern void ext4_exit_es(void);
+extern void ext4_es_init_tree(struct ext4_es_tree *tree);
+
+extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t start,
+ ext4_lblk_t len);
+extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t start,
+ ext4_lblk_t len);
+extern ext4_lblk_t ext4_es_find_extent(struct inode *inode,
+ struct extent_status *es);
+
+#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index bf3966bccd3..405565a6227 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -24,6 +24,7 @@
#include <linux/mount.h>
#include <linux/path.h>
#include <linux/quotaops.h>
+#include <linux/pagevec.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -107,14 +108,6 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
/* Unaligned direct AIO must be serialized; see comment above */
if (unaligned_aio) {
- static unsigned long unaligned_warn_time;
-
- /* Warn about this once per day */
- if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
- ext4_msg(inode->i_sb, KERN_WARNING,
- "Unaligned AIO/DIO on inode %ld by %s; "
- "performance will be poor.",
- inode->i_ino, current->comm);
mutex_lock(ext4_aio_mutex(inode));
ext4_unwritten_wait(inode);
}
@@ -286,11 +279,329 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
}
/*
+ * Here we use ext4_map_blocks() to get a block mapping for a extent-based
+ * file rather than ext4_ext_walk_space() because we can introduce
+ * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
+ * function. When extent status tree has been fully implemented, it will
+ * track all extent status for a file and we can directly use it to
+ * retrieve the offset for SEEK_DATA/SEEK_HOLE.
+ */
+
+/*
+ * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
+ * lookup page cache to check whether or not there has some data between
+ * [startoff, endoff] because, if this range contains an unwritten extent,
+ * we determine this extent as a data or a hole according to whether the
+ * page cache has data or not.
+ */
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+ int whence,
+ struct ext4_map_blocks *map,
+ loff_t *offset)
+{
+ struct pagevec pvec;
+ unsigned int blkbits;
+ pgoff_t index;
+ pgoff_t end;
+ loff_t endoff;
+ loff_t startoff;
+ loff_t lastoff;
+ int found = 0;
+
+ blkbits = inode->i_sb->s_blocksize_bits;
+ startoff = *offset;
+ lastoff = startoff;
+ endoff = (map->m_lblk + map->m_len) << blkbits;
+
+ index = startoff >> PAGE_CACHE_SHIFT;
+ end = endoff >> PAGE_CACHE_SHIFT;
+
+ pagevec_init(&pvec, 0);
+ do {
+ int i, num;
+ unsigned long nr_pages;
+
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
+ (pgoff_t)num);
+ if (nr_pages == 0) {
+ if (whence == SEEK_DATA)
+ break;
+
+ BUG_ON(whence != SEEK_HOLE);
+ /*
+ * If this is the first time to go into the loop and
+ * offset is not beyond the end offset, it will be a
+ * hole at this offset
+ */
+ if (lastoff == startoff || lastoff < endoff)
+ found = 1;
+ break;
+ }
+
+ /*
+ * If this is the first time to go into the loop and
+ * offset is smaller than the first page offset, it will be a
+ * hole at this offset.
+ */
+ if (lastoff == startoff && whence == SEEK_HOLE &&
+ lastoff < page_offset(pvec.pages[0])) {
+ found = 1;
+ break;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ struct buffer_head *bh, *head;
+
+ /*
+ * If the current offset is not beyond the end of given
+ * range, it will be a hole.
+ */
+ if (lastoff < endoff && whence == SEEK_HOLE &&
+ page->index > end) {
+ found = 1;
+ *offset = lastoff;
+ goto out;
+ }
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != inode->i_mapping)) {
+ unlock_page(page);
+ continue;
+ }
+
+ if (!page_has_buffers(page)) {
+ unlock_page(page);
+ continue;
+ }
+
+ if (page_has_buffers(page)) {
+ lastoff = page_offset(page);
+ bh = head = page_buffers(page);
+ do {
+ if (buffer_uptodate(bh) ||
+ buffer_unwritten(bh)) {
+ if (whence == SEEK_DATA)
+ found = 1;
+ } else {
+ if (whence == SEEK_HOLE)
+ found = 1;
+ }
+ if (found) {
+ *offset = max_t(loff_t,
+ startoff, lastoff);
+ unlock_page(page);
+ goto out;
+ }
+ lastoff += bh->b_size;
+ bh = bh->b_this_page;
+ } while (bh != head);
+ }
+
+ lastoff = page_offset(page) + PAGE_SIZE;
+ unlock_page(page);
+ }
+
+ /*
+ * The no. of pages is less than our desired, that would be a
+ * hole in there.
+ */
+ if (nr_pages < num && whence == SEEK_HOLE) {
+ found = 1;
+ *offset = lastoff;
+ break;
+ }
+
+ index = pvec.pages[i - 1]->index + 1;
+ pagevec_release(&pvec);
+ } while (index <= end);
+
+out:
+ pagevec_release(&pvec);
+ return found;
+}
+
+/*
+ * ext4_seek_data() retrieves the offset for SEEK_DATA.
+ */
+static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t dataoff, isize;
+ int blkbits;
+ int ret = 0;
+
+ mutex_lock(&inode->i_mutex);
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
+ mutex_unlock(&inode->i_mutex);
+ return -ENXIO;
+ }
+
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ dataoff = offset;
+
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ if (last != start)
+ dataoff = last << blkbits;
+ break;
+ }
+
+ /*
+ * If there is a delay extent at this offset,
+ * it will be as a data.
+ */
+ es.start = last;
+ (void)ext4_es_find_extent(inode, &es);
+ if (last >= es.start &&
+ last < es.start + es.len) {
+ if (last != start)
+ dataoff = last << blkbits;
+ break;
+ }
+
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+ &map, &dataoff);
+ if (unwritten)
+ break;
+ }
+
+ last++;
+ dataoff = last << blkbits;
+ } while (last <= end);
+
+ mutex_unlock(&inode->i_mutex);
+
+ if (dataoff > isize)
+ return -ENXIO;
+
+ if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+ return -EINVAL;
+ if (dataoff > maxsize)
+ return -EINVAL;
+
+ if (dataoff != file->f_pos) {
+ file->f_pos = dataoff;
+ file->f_version = 0;
+ }
+
+ return dataoff;
+}
+
+/*
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
+ */
+static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t holeoff, isize;
+ int blkbits;
+ int ret = 0;
+
+ mutex_lock(&inode->i_mutex);
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
+ mutex_unlock(&inode->i_mutex);
+ return -ENXIO;
+ }
+
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ holeoff = offset;
+
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ last += ret;
+ holeoff = last << blkbits;
+ continue;
+ }
+
+ /*
+ * If there is a delay extent at this offset,
+ * we will skip this extent.
+ */
+ es.start = last;
+ (void)ext4_es_find_extent(inode, &es);
+ if (last >= es.start &&
+ last < es.start + es.len) {
+ last = es.start + es.len;
+ holeoff = last << blkbits;
+ continue;
+ }
+
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+ &map, &holeoff);
+ if (!unwritten) {
+ last += ret;
+ holeoff = last << blkbits;
+ continue;
+ }
+ }
+
+ /* find a hole */
+ break;
+ } while (last <= end);
+
+ mutex_unlock(&inode->i_mutex);
+
+ if (holeoff > isize)
+ holeoff = isize;
+
+ if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+ return -EINVAL;
+ if (holeoff > maxsize)
+ return -EINVAL;
+
+ if (holeoff != file->f_pos) {
+ file->f_pos = holeoff;
+ file->f_version = 0;
+ }
+
+ return holeoff;
+}
+
+/*
* ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
* by calling generic_file_llseek_size() with the appropriate maxbytes
* value for each.
*/
-loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
+loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
loff_t maxbytes;
@@ -300,8 +611,19 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
else
maxbytes = inode->i_sb->s_maxbytes;
- return generic_file_llseek_size(file, offset, origin,
- maxbytes, i_size_read(inode));
+ switch (whence) {
+ case SEEK_SET:
+ case SEEK_CUR:
+ case SEEK_END:
+ return generic_file_llseek_size(file, offset, whence,
+ maxbytes, i_size_read(inode));
+ case SEEK_DATA:
+ return ext4_seek_data(file, offset, maxbytes);
+ case SEEK_HOLE:
+ return ext4_seek_hole(file, offset, maxbytes);
+ }
+
+ return -EINVAL;
}
const struct file_operations ext4_file_operations = {
@@ -326,12 +648,10 @@ const struct file_operations ext4_file_operations = {
const struct inode_operations ext4_file_inode_operations = {
.setattr = ext4_setattr,
.getattr = ext4_getattr,
-#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
-#endif
.get_acl = ext4_get_acl,
.fiemap = ext4_fiemap,
};
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index be1d89f385b..3278e64e57b 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -44,7 +44,6 @@
*/
static int ext4_sync_parent(struct inode *inode)
{
- struct writeback_control wbc;
struct dentry *dentry = NULL;
struct inode *next;
int ret = 0;
@@ -66,10 +65,7 @@ static int ext4_sync_parent(struct inode *inode)
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
break;
- memset(&wbc, 0, sizeof(wbc));
- wbc.sync_mode = WB_SYNC_ALL;
- wbc.nr_to_write = 0; /* only write out the inode */
- ret = sync_inode(inode, &wbc);
+ ret = sync_inode_metadata(inode, 1);
if (ret)
break;
}
@@ -113,8 +109,6 @@ static int __sync_inode(struct inode *inode, int datasync)
*
* What we do is just kick off a commit and wait on it. This will snapshot the
* inode to disk.
- *
- * i_mutex lock is held when entering and exiting this function
*/
int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3a100e7a62a..3f32c801244 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -762,7 +762,6 @@ got:
BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
- brelse(block_bitmap_bh);
/* recheck and clear flag under lock if we still need to */
ext4_lock_group(sb, group);
@@ -775,6 +774,7 @@ got:
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
+ brelse(block_bitmap_bh);
if (err)
goto fail;
@@ -902,6 +902,10 @@ got:
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
+ ei->i_inline_off = 0;
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
+ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+
ret = inode;
dquot_initialize(inode);
err = dquot_alloc_inode(inode);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 792e388e7b4..20862f96e8a 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -22,6 +22,7 @@
#include "ext4_jbd2.h"
#include "truncate.h"
+#include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */
#include <trace/events/ext4.h>
@@ -755,8 +756,7 @@ cleanup:
partial--;
}
out:
- trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
- map->m_pblk, map->m_len, err);
+ trace_ext4_ind_map_blocks_exit(inode, map, err);
return err;
}
@@ -1412,6 +1412,7 @@ void ext4_ind_truncate(struct inode *inode)
down_write(&ei->i_data_sem);
ext4_discard_preallocations(inode);
+ ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
/*
* The orphan list entry will now protect us from any crash which
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
new file mode 100644
index 00000000000..387c47c6cda
--- /dev/null
+++ b/fs/ext4/inline.c
@@ -0,0 +1,1884 @@
+/*
+ * Copyright (c) 2012 Taobao.
+ * Written by Tao Ma <boyu.mt@taobao.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ext4_jbd2.h"
+#include "ext4.h"
+#include "xattr.h"
+#include "truncate.h"
+#include <linux/fiemap.h>
+
+#define EXT4_XATTR_SYSTEM_DATA "data"
+#define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS))
+#define EXT4_INLINE_DOTDOT_SIZE 4
+
+int ext4_get_inline_size(struct inode *inode)
+{
+ if (EXT4_I(inode)->i_inline_off)
+ return EXT4_I(inode)->i_inline_size;
+
+ return 0;
+}
+
+static int get_max_inline_xattr_value_size(struct inode *inode,
+ struct ext4_iloc *iloc)
+{
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_entry *entry;
+ struct ext4_inode *raw_inode;
+ int free, min_offs;
+
+ min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
+ EXT4_GOOD_OLD_INODE_SIZE -
+ EXT4_I(inode)->i_extra_isize -
+ sizeof(struct ext4_xattr_ibody_header);
+
+ /*
+ * We need to subtract another sizeof(__u32) since an in-inode xattr
+ * needs an empty 4 bytes to indicate the gap between the xattr entry
+ * and the name/value pair.
+ */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
+ return EXT4_XATTR_SIZE(min_offs -
+ EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA)) -
+ EXT4_XATTR_ROUND - sizeof(__u32));
+
+ raw_inode = ext4_raw_inode(iloc);
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+
+ /* Compute min_offs. */
+ for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+ if (!entry->e_value_block && entry->e_value_size) {
+ size_t offs = le16_to_cpu(entry->e_value_offs);
+ if (offs < min_offs)
+ min_offs = offs;
+ }
+ }
+ free = min_offs -
+ ((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
+
+ if (EXT4_I(inode)->i_inline_off) {
+ entry = (struct ext4_xattr_entry *)
+ ((void *)raw_inode + EXT4_I(inode)->i_inline_off);
+
+ free += le32_to_cpu(entry->e_value_size);
+ goto out;
+ }
+
+ free -= EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA));
+
+ if (free > EXT4_XATTR_ROUND)
+ free = EXT4_XATTR_SIZE(free - EXT4_XATTR_ROUND);
+ else
+ free = 0;
+
+out:
+ return free;
+}
+
+/*
+ * Get the maximum size we now can store in an inode.
+ * If we can't find the space for a xattr entry, don't use the space
+ * of the extents since we have no space to indicate the inline data.
+ */
+int ext4_get_max_inline_size(struct inode *inode)
+{
+ int error, max_inline_size;
+ struct ext4_iloc iloc;
+
+ if (EXT4_I(inode)->i_extra_isize == 0)
+ return 0;
+
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error) {
+ ext4_error_inode(inode, __func__, __LINE__, 0,
+ "can't get inode location %lu",
+ inode->i_ino);
+ return 0;
+ }
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ max_inline_size = get_max_inline_xattr_value_size(inode, &iloc);
+ up_read(&EXT4_I(inode)->xattr_sem);
+
+ brelse(iloc.bh);
+
+ if (!max_inline_size)
+ return 0;
+
+ return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
+}
+
+int ext4_has_inline_data(struct inode *inode)
+{
+ return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
+ EXT4_I(inode)->i_inline_off;
+}
+
+/*
+ * this function does not take xattr_sem, which is OK because it is
+ * currently only used in a code path coming form ext4_iget, before
+ * the new inode has been unlocked
+ */
+int ext4_find_inline_data_nolock(struct inode *inode)
+{
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ };
+ int error;
+
+ if (EXT4_I(inode)->i_extra_isize == 0)
+ return 0;
+
+ error = ext4_get_inode_loc(inode, &is.iloc);
+ if (error)
+ return error;
+
+ error = ext4_xattr_ibody_find(inode, &i, &is);
+ if (error)
+ goto out;
+
+ if (!is.s.not_found) {
+ EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+ (void *)ext4_raw_inode(&is.iloc));
+ EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+ le32_to_cpu(is.s.here->e_value_size);
+ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ }
+out:
+ brelse(is.iloc.bh);
+ return error;
+}
+
+static int ext4_read_inline_data(struct inode *inode, void *buffer,
+ unsigned int len,
+ struct ext4_iloc *iloc)
+{
+ struct ext4_xattr_entry *entry;
+ struct ext4_xattr_ibody_header *header;
+ int cp_len = 0;
+ struct ext4_inode *raw_inode;
+
+ if (!len)
+ return 0;
+
+ BUG_ON(len > EXT4_I(inode)->i_inline_size);
+
+ cp_len = len < EXT4_MIN_INLINE_DATA_SIZE ?
+ len : EXT4_MIN_INLINE_DATA_SIZE;
+
+ raw_inode = ext4_raw_inode(iloc);
+ memcpy(buffer, (void *)(raw_inode->i_block), cp_len);
+
+ len -= cp_len;
+ buffer += cp_len;
+
+ if (!len)
+ goto out;
+
+ header = IHDR(inode, raw_inode);
+ entry = (struct ext4_xattr_entry *)((void *)raw_inode +
+ EXT4_I(inode)->i_inline_off);
+ len = min_t(unsigned int, len,
+ (unsigned int)le32_to_cpu(entry->e_value_size));
+
+ memcpy(buffer,
+ (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs), len);
+ cp_len += len;
+
+out:
+ return cp_len;
+}
+
+/*
+ * write the buffer to the inline inode.
+ * If 'create' is set, we don't need to do the extra copy in the xattr
+ * value since it is already handled by ext4_xattr_ibody_inline_set.
+ * That saves us one memcpy.
+ */
+void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
+ void *buffer, loff_t pos, unsigned int len)
+{
+ struct ext4_xattr_entry *entry;
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_inode *raw_inode;
+ int cp_len = 0;
+
+ BUG_ON(!EXT4_I(inode)->i_inline_off);
+ BUG_ON(pos + len > EXT4_I(inode)->i_inline_size);
+
+ raw_inode = ext4_raw_inode(iloc);
+ buffer += pos;
+
+ if (pos < EXT4_MIN_INLINE_DATA_SIZE) {
+ cp_len = pos + len > EXT4_MIN_INLINE_DATA_SIZE ?
+ EXT4_MIN_INLINE_DATA_SIZE - pos : len;
+ memcpy((void *)raw_inode->i_block + pos, buffer, cp_len);
+
+ len -= cp_len;
+ buffer += cp_len;
+ pos += cp_len;
+ }
+
+ if (!len)
+ return;
+
+ pos -= EXT4_MIN_INLINE_DATA_SIZE;
+ header = IHDR(inode, raw_inode);
+ entry = (struct ext4_xattr_entry *)((void *)raw_inode +
+ EXT4_I(inode)->i_inline_off);
+
+ memcpy((void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs) + pos,
+ buffer, len);
+}
+
+static int ext4_create_inline_data(handle_t *handle,
+ struct inode *inode, unsigned len)
+{
+ int error;
+ void *value = NULL;
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ };
+
+ error = ext4_get_inode_loc(inode, &is.iloc);
+ if (error)
+ return error;
+
+ error = ext4_journal_get_write_access(handle, is.iloc.bh);
+ if (error)
+ goto out;
+
+ if (len > EXT4_MIN_INLINE_DATA_SIZE) {
+ value = EXT4_ZERO_XATTR_VALUE;
+ len -= EXT4_MIN_INLINE_DATA_SIZE;
+ } else {
+ value = "";
+ len = 0;
+ }
+
+ /* Insert the the xttr entry. */
+ i.value = value;
+ i.value_len = len;
+
+ error = ext4_xattr_ibody_find(inode, &i, &is);
+ if (error)
+ goto out;
+
+ BUG_ON(!is.s.not_found);
+
+ error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+ if (error) {
+ if (error == -ENOSPC)
+ ext4_clear_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA);
+ goto out;
+ }
+
+ memset((void *)ext4_raw_inode(&is.iloc)->i_block,
+ 0, EXT4_MIN_INLINE_DATA_SIZE);
+
+ EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+ (void *)ext4_raw_inode(&is.iloc));
+ EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
+ ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
+ ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+ get_bh(is.iloc.bh);
+ error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+
+out:
+ brelse(is.iloc.bh);
+ return error;
+}
+
+static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ unsigned int len)
+{
+ int error;
+ void *value = NULL;
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ };
+
+ /* If the old space is ok, write the data directly. */
+ if (len <= EXT4_I(inode)->i_inline_size)
+ return 0;
+
+ error = ext4_get_inode_loc(inode, &is.iloc);
+ if (error)
+ return error;
+
+ error = ext4_xattr_ibody_find(inode, &i, &is);
+ if (error)
+ goto out;
+
+ BUG_ON(is.s.not_found);
+
+ len -= EXT4_MIN_INLINE_DATA_SIZE;
+ value = kzalloc(len, GFP_NOFS);
+ if (!value)
+ goto out;
+
+ error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
+ value, len);
+ if (error == -ENODATA)
+ goto out;
+
+ error = ext4_journal_get_write_access(handle, is.iloc.bh);
+ if (error)
+ goto out;
+
+ /* Update the xttr entry. */
+ i.value = value;
+ i.value_len = len;
+
+ error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+ if (error)
+ goto out;
+
+ EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+ (void *)ext4_raw_inode(&is.iloc));
+ EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+ le32_to_cpu(is.s.here->e_value_size);
+ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ get_bh(is.iloc.bh);
+ error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+
+out:
+ kfree(value);
+ brelse(is.iloc.bh);
+ return error;
+}
+
+int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+ unsigned int len)
+{
+ int ret, size;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+ return -ENOSPC;
+
+ size = ext4_get_max_inline_size(inode);
+ if (size < len)
+ return -ENOSPC;
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+
+ if (ei->i_inline_off)
+ ret = ext4_update_inline_data(handle, inode, len);
+ else
+ ret = ext4_create_inline_data(handle, inode, len);
+
+ up_write(&EXT4_I(inode)->xattr_sem);
+
+ return ret;
+}
+
+static int ext4_destroy_inline_data_nolock(handle_t *handle,
+ struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = 0, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ .value = NULL,
+ .value_len = 0,
+ };
+ int error;
+
+ if (!ei->i_inline_off)
+ return 0;
+
+ error = ext4_get_inode_loc(inode, &is.iloc);
+ if (error)
+ return error;
+
+ error = ext4_xattr_ibody_find(inode, &i, &is);
+ if (error)
+ goto out;
+
+ error = ext4_journal_get_write_access(handle, is.iloc.bh);
+ if (error)
+ goto out;
+
+ error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+ if (error)
+ goto out;
+
+ memset((void *)ext4_raw_inode(&is.iloc)->i_block,
+ 0, EXT4_MIN_INLINE_DATA_SIZE);
+
+ if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_INCOMPAT_EXTENTS)) {
+ if (S_ISDIR(inode->i_mode) ||
+ S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+ ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
+ ext4_ext_tree_init(handle, inode);
+ }
+ }
+ ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+
+ get_bh(is.iloc.bh);
+ error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+
+ EXT4_I(inode)->i_inline_off = 0;
+ EXT4_I(inode)->i_inline_size = 0;
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+out:
+ brelse(is.iloc.bh);
+ if (error == -ENODATA)
+ error = 0;
+ return error;
+}
+
+static int ext4_read_inline_page(struct inode *inode, struct page *page)
+{
+ void *kaddr;
+ int ret = 0;
+ size_t len;
+ struct ext4_iloc iloc;
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(!ext4_has_inline_data(inode));
+ BUG_ON(page->index);
+
+ if (!EXT4_I(inode)->i_inline_off) {
+ ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
+ inode->i_ino);
+ goto out;
+ }
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ goto out;
+
+ len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
+ kaddr = kmap_atomic(page);
+ ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr);
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ brelse(iloc.bh);
+
+out:
+ return ret;
+}
+
+int ext4_readpage_inline(struct inode *inode, struct page *page)
+{
+ int ret = 0;
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ up_read(&EXT4_I(inode)->xattr_sem);
+ return -EAGAIN;
+ }
+
+ /*
+ * Current inline data can only exist in the 1st page,
+ * So for all the other pages, just set them uptodate.
+ */
+ if (!page->index)
+ ret = ext4_read_inline_page(inode, page);
+ else if (!PageUptodate(page)) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ }
+
+ up_read(&EXT4_I(inode)->xattr_sem);
+
+ unlock_page(page);
+ return ret >= 0 ? 0 : ret;
+}
+
+static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
+ struct inode *inode,
+ unsigned flags)
+{
+ int ret, needed_blocks;
+ handle_t *handle = NULL;
+ int retries = 0, sem_held = 0;
+ struct page *page = NULL;
+ unsigned from, to;
+ struct ext4_iloc iloc;
+
+ if (!ext4_has_inline_data(inode)) {
+ /*
+ * clear the flag so that no new write
+ * will trap here again.
+ */
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ return 0;
+ }
+
+ needed_blocks = ext4_writepage_trans_blocks(inode);
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ret;
+
+retry:
+ handle = ext4_journal_start(inode, needed_blocks);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ goto out;
+ }
+
+ /* We cannot recurse into the filesystem as the transaction is already
+ * started */
+ flags |= AOP_FLAG_NOFS;
+
+ page = grab_cache_page_write_begin(mapping, 0, flags);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+ sem_held = 1;
+ /* If some one has already done this for us, just exit. */
+ if (!ext4_has_inline_data(inode)) {
+ ret = 0;
+ goto out;
+ }
+
+ from = 0;
+ to = ext4_get_inline_size(inode);
+ if (!PageUptodate(page)) {
+ ret = ext4_read_inline_page(inode, page);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = ext4_destroy_inline_data_nolock(handle, inode);
+ if (ret)
+ goto out;
+
+ if (ext4_should_dioread_nolock(inode))
+ ret = __block_write_begin(page, from, to, ext4_get_block_write);
+ else
+ ret = __block_write_begin(page, from, to, ext4_get_block);
+
+ if (!ret && ext4_should_journal_data(inode)) {
+ ret = ext4_walk_page_buffers(handle, page_buffers(page),
+ from, to, NULL,
+ do_journal_get_write_access);
+ }
+
+ if (ret) {
+ unlock_page(page);
+ page_cache_release(page);
+ ext4_orphan_add(handle, inode);
+ up_write(&EXT4_I(inode)->xattr_sem);
+ sem_held = 0;
+ ext4_journal_stop(handle);
+ handle = NULL;
+ ext4_truncate_failed_write(inode);
+ /*
+ * If truncate failed early the inode might
+ * still be on the orphan list; we need to
+ * make sure the inode is removed from the
+ * orphan list in that case.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+ }
+
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+
+ block_commit_write(page, from, to);
+out:
+ if (page) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ if (sem_held)
+ up_write(&EXT4_I(inode)->xattr_sem);
+ if (handle)
+ ext4_journal_stop(handle);
+ brelse(iloc.bh);
+ return ret;
+}
+
+/*
+ * Try to write data in the inode.
+ * If the inode has inline data, check whether the new write can be
+ * in the inode also. If not, create the page the handle, move the data
+ * to the page make it update and let the later codes create extent for it.
+ */
+int ext4_try_to_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned flags,
+ struct page **pagep)
+{
+ int ret;
+ handle_t *handle;
+ struct page *page;
+ struct ext4_iloc iloc;
+
+ if (pos + len > ext4_get_max_inline_size(inode))
+ goto convert;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ret;
+
+ /*
+ * The possible write could happen in the inode,
+ * so try to reserve the space in inode first.
+ */
+ handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ goto out;
+ }
+
+ ret = ext4_prepare_inline_data(handle, inode, pos + len);
+ if (ret && ret != -ENOSPC)
+ goto out;
+
+ /* We don't have space in inline inode, so convert it to extent. */
+ if (ret == -ENOSPC) {
+ ext4_journal_stop(handle);
+ brelse(iloc.bh);
+ goto convert;
+ }
+
+ flags |= AOP_FLAG_NOFS;
+
+ page = grab_cache_page_write_begin(mapping, 0, flags);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ *pagep = page;
+ down_read(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ ret = 0;
+ unlock_page(page);
+ page_cache_release(page);
+ goto out_up_read;
+ }
+
+ if (!PageUptodate(page)) {
+ ret = ext4_read_inline_page(inode, page);
+ if (ret < 0)
+ goto out_up_read;
+ }
+
+ ret = 1;
+ handle = NULL;
+out_up_read:
+ up_read(&EXT4_I(inode)->xattr_sem);
+out:
+ if (handle)
+ ext4_journal_stop(handle);
+ brelse(iloc.bh);
+ return ret;
+convert:
+ return ext4_convert_inline_data_to_extent(mapping,
+ inode, flags);
+}
+
+int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
+ unsigned copied, struct page *page)
+{
+ int ret;
+ void *kaddr;
+ struct ext4_iloc iloc;
+
+ if (unlikely(copied < len)) {
+ if (!PageUptodate(page)) {
+ copied = 0;
+ goto out;
+ }
+ }
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret) {
+ ext4_std_error(inode->i_sb, ret);
+ copied = 0;
+ goto out;
+ }
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+ BUG_ON(!ext4_has_inline_data(inode));
+
+ kaddr = kmap_atomic(page);
+ ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
+ kunmap_atomic(kaddr);
+ SetPageUptodate(page);
+ /* clear page dirty so that writepages wouldn't work for us. */
+ ClearPageDirty(page);
+
+ up_write(&EXT4_I(inode)->xattr_sem);
+ brelse(iloc.bh);
+out:
+ return copied;
+}
+
+struct buffer_head *
+ext4_journalled_write_inline_data(struct inode *inode,
+ unsigned len,
+ struct page *page)
+{
+ int ret;
+ void *kaddr;
+ struct ext4_iloc iloc;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret) {
+ ext4_std_error(inode->i_sb, ret);
+ return NULL;
+ }
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+ kaddr = kmap_atomic(page);
+ ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
+ kunmap_atomic(kaddr);
+ up_write(&EXT4_I(inode)->xattr_sem);
+
+ return iloc.bh;
+}
+
+/*
+ * Try to make the page cache and handle ready for the inline data case.
+ * We can call this function in 2 cases:
+ * 1. The inode is created and the first write exceeds inline size. We can
+ * clear the inode state safely.
+ * 2. The inode has inline data, then we need to read the data, make it
+ * update and dirty so that ext4_da_writepages can handle it. We don't
+ * need to start the journal since the file's metatdata isn't changed now.
+ */
+static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
+ struct inode *inode,
+ unsigned flags,
+ void **fsdata)
+{
+ int ret = 0, inline_size;
+ struct page *page;
+
+ page = grab_cache_page_write_begin(mapping, 0, flags);
+ if (!page)
+ return -ENOMEM;
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ goto out;
+ }
+
+ inline_size = ext4_get_inline_size(inode);
+
+ if (!PageUptodate(page)) {
+ ret = ext4_read_inline_page(inode, page);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = __block_write_begin(page, 0, inline_size,
+ ext4_da_get_block_prep);
+ if (ret) {
+ ext4_truncate_failed_write(inode);
+ goto out;
+ }
+
+ SetPageDirty(page);
+ SetPageUptodate(page);
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ *fsdata = (void *)CONVERT_INLINE_DATA;
+
+out:
+ up_read(&EXT4_I(inode)->xattr_sem);
+ if (page) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ return ret;
+}
+
+/*
+ * Prepare the write for the inline data.
+ * If the the data can be written into the inode, we just read
+ * the page and make it uptodate, and start the journal.
+ * Otherwise read the page, makes it dirty so that it can be
+ * handle in writepages(the i_disksize update is left to the
+ * normal ext4_da_write_end).
+ */
+int ext4_da_write_inline_data_begin(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned flags,
+ struct page **pagep,
+ void **fsdata)
+{
+ int ret, inline_size;
+ handle_t *handle;
+ struct page *page;
+ struct ext4_iloc iloc;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ret;
+
+ handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ goto out;
+ }
+
+ inline_size = ext4_get_max_inline_size(inode);
+
+ ret = -ENOSPC;
+ if (inline_size >= pos + len) {
+ ret = ext4_prepare_inline_data(handle, inode, pos + len);
+ if (ret && ret != -ENOSPC)
+ goto out;
+ }
+
+ if (ret == -ENOSPC) {
+ ret = ext4_da_convert_inline_data_to_extent(mapping,
+ inode,
+ flags,
+ fsdata);
+ goto out;
+ }
+
+ /*
+ * We cannot recurse into the filesystem as the transaction
+ * is already started.
+ */
+ flags |= AOP_FLAG_NOFS;
+
+ page = grab_cache_page_write_begin(mapping, 0, flags);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ ret = 0;
+ goto out_release_page;
+ }
+
+ if (!PageUptodate(page)) {
+ ret = ext4_read_inline_page(inode, page);
+ if (ret < 0)
+ goto out_release_page;
+ }
+
+ up_read(&EXT4_I(inode)->xattr_sem);
+ *pagep = page;
+ handle = NULL;
+ brelse(iloc.bh);
+ return 1;
+out_release_page:
+ up_read(&EXT4_I(inode)->xattr_sem);
+ unlock_page(page);
+ page_cache_release(page);
+out:
+ if (handle)
+ ext4_journal_stop(handle);
+ brelse(iloc.bh);
+ return ret;
+}
+
+int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
+ unsigned len, unsigned copied,
+ struct page *page)
+{
+ int i_size_changed = 0;
+
+ copied = ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+ /*
+ * No need to use i_size_read() here, the i_size
+ * cannot change under us because we hold i_mutex.
+ *
+ * But it's important to update i_size while still holding page lock:
+ * page writeout could otherwise come in and zero beyond i_size.
+ */
+ if (pos+copied > inode->i_size) {
+ i_size_write(inode, pos+copied);
+ i_size_changed = 1;
+ }
+ unlock_page(page);
+ page_cache_release(page);
+
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+ if (i_size_changed)
+ mark_inode_dirty(inode);
+
+ return copied;
+}
+
+#ifdef INLINE_DIR_DEBUG
+void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
+ void *inline_start, int inline_size)
+{
+ int offset;
+ unsigned short de_len;
+ struct ext4_dir_entry_2 *de = inline_start;
+ void *dlimit = inline_start + inline_size;
+
+ trace_printk("inode %lu\n", dir->i_ino);
+ offset = 0;
+ while ((void *)de < dlimit) {
+ de_len = ext4_rec_len_from_disk(de->rec_len, inline_size);
+ trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n",
+ offset, de_len, de->name_len, de->name,
+ de->name_len, le32_to_cpu(de->inode));
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ inline_start, inline_size, offset))
+ BUG();
+
+ offset += de_len;
+ de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
+ }
+}
+#else
+#define ext4_show_inline_dir(dir, bh, inline_start, inline_size)
+#endif
+
+/*
+ * Add a new entry into a inline dir.
+ * It will return -ENOSPC if no space is available, and -EIO
+ * and -EEXIST if directory entry already exists.
+ */
+static int ext4_add_dirent_to_inline(handle_t *handle,
+ struct dentry *dentry,
+ struct inode *inode,
+ struct ext4_iloc *iloc,
+ void *inline_start, int inline_size)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+ const char *name = dentry->d_name.name;
+ int namelen = dentry->d_name.len;
+ unsigned short reclen;
+ int err;
+ struct ext4_dir_entry_2 *de;
+
+ reclen = EXT4_DIR_REC_LEN(namelen);
+ err = ext4_find_dest_de(dir, inode, iloc->bh,
+ inline_start, inline_size,
+ name, namelen, &de);
+ if (err)
+ return err;
+
+ err = ext4_journal_get_write_access(handle, iloc->bh);
+ if (err)
+ return err;
+ ext4_insert_dentry(inode, de, inline_size, name, namelen);
+
+ ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
+
+ /*
+ * XXX shouldn't update any times until successful
+ * completion of syscall, but too many callers depend
+ * on this.
+ *
+ * XXX similarly, too many callers depend on
+ * ext4_new_inode() setting the times, but error
+ * recovery deletes the inode, so the worst that can
+ * happen is that the times are slightly out of date
+ * and/or different from the directory change time.
+ */
+ dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+ ext4_update_dx_flag(dir);
+ dir->i_version++;
+ ext4_mark_inode_dirty(handle, dir);
+ return 1;
+}
+
+static void *ext4_get_inline_xattr_pos(struct inode *inode,
+ struct ext4_iloc *iloc)
+{
+ struct ext4_xattr_entry *entry;
+ struct ext4_xattr_ibody_header *header;
+
+ BUG_ON(!EXT4_I(inode)->i_inline_off);
+
+ header = IHDR(inode, ext4_raw_inode(iloc));
+ entry = (struct ext4_xattr_entry *)((void *)ext4_raw_inode(iloc) +
+ EXT4_I(inode)->i_inline_off);
+
+ return (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs);
+}
+
+/* Set the final de to cover the whole block. */
+static void ext4_update_final_de(void *de_buf, int old_size, int new_size)
+{
+ struct ext4_dir_entry_2 *de, *prev_de;
+ void *limit;
+ int de_len;
+
+ de = (struct ext4_dir_entry_2 *)de_buf;
+ if (old_size) {
+ limit = de_buf + old_size;
+ do {
+ prev_de = de;
+ de_len = ext4_rec_len_from_disk(de->rec_len, old_size);
+ de_buf += de_len;
+ de = (struct ext4_dir_entry_2 *)de_buf;
+ } while (de_buf < limit);
+
+ prev_de->rec_len = ext4_rec_len_to_disk(de_len + new_size -
+ old_size, new_size);
+ } else {
+ /* this is just created, so create an empty entry. */
+ de->inode = 0;
+ de->rec_len = ext4_rec_len_to_disk(new_size, new_size);
+ }
+}
+
+static int ext4_update_inline_dir(handle_t *handle, struct inode *dir,
+ struct ext4_iloc *iloc)
+{
+ int ret;
+ int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
+ int new_size = get_max_inline_xattr_value_size(dir, iloc);
+
+ if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
+ return -ENOSPC;
+
+ ret = ext4_update_inline_data(handle, dir,
+ new_size + EXT4_MIN_INLINE_DATA_SIZE);
+ if (ret)
+ return ret;
+
+ ext4_update_final_de(ext4_get_inline_xattr_pos(dir, iloc), old_size,
+ EXT4_I(dir)->i_inline_size -
+ EXT4_MIN_INLINE_DATA_SIZE);
+ dir->i_size = EXT4_I(dir)->i_disksize = EXT4_I(dir)->i_inline_size;
+ return 0;
+}
+
+static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
+ struct ext4_iloc *iloc,
+ void *buf, int inline_size)
+{
+ ext4_create_inline_data(handle, inode, inline_size);
+ ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
+ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+}
+
+static int ext4_finish_convert_inline_dir(handle_t *handle,
+ struct inode *inode,
+ struct buffer_head *dir_block,
+ void *buf,
+ int inline_size)
+{
+ int err, csum_size = 0, header_size = 0;
+ struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_tail *t;
+ void *target = dir_block->b_data;
+
+ /*
+ * First create "." and ".." and then copy the dir information
+ * back to the block.
+ */
+ de = (struct ext4_dir_entry_2 *)target;
+ de = ext4_init_dot_dotdot(inode, de,
+ inode->i_sb->s_blocksize, csum_size,
+ le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode), 1);
+ header_size = (void *)de - target;
+
+ memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
+ inline_size - EXT4_INLINE_DOTDOT_SIZE);
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ inode->i_size = inode->i_sb->s_blocksize;
+ i_size_write(inode, inode->i_sb->s_blocksize);
+ EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+ ext4_update_final_de(dir_block->b_data,
+ inline_size - EXT4_INLINE_DOTDOT_SIZE + header_size,
+ inode->i_sb->s_blocksize - csum_size);
+
+ if (csum_size) {
+ t = EXT4_DIRENT_TAIL(dir_block->b_data,
+ inode->i_sb->s_blocksize);
+ initialize_dirent_tail(t, inode->i_sb->s_blocksize);
+ }
+ set_buffer_uptodate(dir_block);
+ err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
+ if (err)
+ goto out;
+ set_buffer_verified(dir_block);
+out:
+ return err;
+}
+
+static int ext4_convert_inline_data_nolock(handle_t *handle,
+ struct inode *inode,
+ struct ext4_iloc *iloc)
+{
+ int error;
+ void *buf = NULL;
+ struct buffer_head *data_bh = NULL;
+ struct ext4_map_blocks map;
+ int inline_size;
+
+ inline_size = ext4_get_inline_size(inode);
+ buf = kmalloc(inline_size, GFP_NOFS);
+ if (!buf) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ error = ext4_read_inline_data(inode, buf, inline_size, iloc);
+ if (error < 0)
+ goto out;
+
+ error = ext4_destroy_inline_data_nolock(handle, inode);
+ if (error)
+ goto out;
+
+ map.m_lblk = 0;
+ map.m_len = 1;
+ map.m_flags = 0;
+ error = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_CREATE);
+ if (error < 0)
+ goto out_restore;
+ if (!(map.m_flags & EXT4_MAP_MAPPED)) {
+ error = -EIO;
+ goto out_restore;
+ }
+
+ data_bh = sb_getblk(inode->i_sb, map.m_pblk);
+ if (!data_bh) {
+ error = -EIO;
+ goto out_restore;
+ }
+
+ lock_buffer(data_bh);
+ error = ext4_journal_get_create_access(handle, data_bh);
+ if (error) {
+ unlock_buffer(data_bh);
+ error = -EIO;
+ goto out_restore;
+ }
+ memset(data_bh->b_data, 0, inode->i_sb->s_blocksize);
+
+ if (!S_ISDIR(inode->i_mode)) {
+ memcpy(data_bh->b_data, buf, inline_size);
+ set_buffer_uptodate(data_bh);
+ error = ext4_handle_dirty_metadata(handle,
+ inode, data_bh);
+ } else {
+ error = ext4_finish_convert_inline_dir(handle, inode, data_bh,
+ buf, inline_size);
+ }
+
+ unlock_buffer(data_bh);
+out_restore:
+ if (error)
+ ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
+
+out:
+ brelse(data_bh);
+ kfree(buf);
+ return error;
+}
+
+/*
+ * Try to add the new entry to the inline data.
+ * If succeeds, return 0. If not, extended the inline dir and copied data to
+ * the new created block.
+ */
+int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
+ struct inode *inode)
+{
+ int ret, inline_size;
+ void *inline_start;
+ struct ext4_iloc iloc;
+ struct inode *dir = dentry->d_parent->d_inode;
+
+ ret = ext4_get_inode_loc(dir, &iloc);
+ if (ret)
+ return ret;
+
+ down_write(&EXT4_I(dir)->xattr_sem);
+ if (!ext4_has_inline_data(dir))
+ goto out;
+
+ inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+ EXT4_INLINE_DOTDOT_SIZE;
+ inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
+
+ ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
+ inline_start, inline_size);
+ if (ret != -ENOSPC)
+ goto out;
+
+ /* check whether it can be inserted to inline xattr space. */
+ inline_size = EXT4_I(dir)->i_inline_size -
+ EXT4_MIN_INLINE_DATA_SIZE;
+ if (!inline_size) {
+ /* Try to use the xattr space.*/
+ ret = ext4_update_inline_dir(handle, dir, &iloc);
+ if (ret && ret != -ENOSPC)
+ goto out;
+
+ inline_size = EXT4_I(dir)->i_inline_size -
+ EXT4_MIN_INLINE_DATA_SIZE;
+ }
+
+ if (inline_size) {
+ inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+
+ ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
+ inline_start, inline_size);
+
+ if (ret != -ENOSPC)
+ goto out;
+ }
+
+ /*
+ * The inline space is filled up, so create a new block for it.
+ * As the extent tree will be created, we have to save the inline
+ * dir first.
+ */
+ ret = ext4_convert_inline_data_nolock(handle, dir, &iloc);
+
+out:
+ ext4_mark_inode_dirty(handle, dir);
+ up_write(&EXT4_I(dir)->xattr_sem);
+ brelse(iloc.bh);
+ return ret;
+}
+
+int ext4_read_inline_dir(struct file *filp,
+ void *dirent, filldir_t filldir,
+ int *has_inline_data)
+{
+ int error = 0;
+ unsigned int offset, parent_ino;
+ int i, stored;
+ struct ext4_dir_entry_2 *de;
+ struct super_block *sb;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int ret, inline_size = 0;
+ struct ext4_iloc iloc;
+ void *dir_buf = NULL;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ret;
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ up_read(&EXT4_I(inode)->xattr_sem);
+ *has_inline_data = 0;
+ goto out;
+ }
+
+ inline_size = ext4_get_inline_size(inode);
+ dir_buf = kmalloc(inline_size, GFP_NOFS);
+ if (!dir_buf) {
+ ret = -ENOMEM;
+ up_read(&EXT4_I(inode)->xattr_sem);
+ goto out;
+ }
+
+ ret = ext4_read_inline_data(inode, dir_buf, inline_size, &iloc);
+ up_read(&EXT4_I(inode)->xattr_sem);
+ if (ret < 0)
+ goto out;
+
+ sb = inode->i_sb;
+ stored = 0;
+ parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode);
+
+ while (!error && !stored && filp->f_pos < inode->i_size) {
+revalidate:
+ /*
+ * If the version has changed since the last call to
+ * readdir(2), then we might be pointing to an invalid
+ * dirent right now. Scan from the start of the inline
+ * dir to make sure.
+ */
+ if (filp->f_version != inode->i_version) {
+ for (i = 0;
+ i < inode->i_size && i < offset;) {
+ if (!i) {
+ /* skip "." and ".." if needed. */
+ i += EXT4_INLINE_DOTDOT_SIZE;
+ continue;
+ }
+ de = (struct ext4_dir_entry_2 *)
+ (dir_buf + i);
+ /* It's too expensive to do a full
+ * dirent test each time round this
+ * loop, but we do have to test at
+ * least that it is non-zero. A
+ * failure will be detected in the
+ * dirent test below. */
+ if (ext4_rec_len_from_disk(de->rec_len,
+ inline_size) < EXT4_DIR_REC_LEN(1))
+ break;
+ i += ext4_rec_len_from_disk(de->rec_len,
+ inline_size);
+ }
+ offset = i;
+ filp->f_pos = offset;
+ filp->f_version = inode->i_version;
+ }
+
+ while (!error && filp->f_pos < inode->i_size) {
+ if (filp->f_pos == 0) {
+ error = filldir(dirent, ".", 1, 0, inode->i_ino,
+ DT_DIR);
+ if (error)
+ break;
+ stored++;
+
+ error = filldir(dirent, "..", 2, 0, parent_ino,
+ DT_DIR);
+ if (error)
+ break;
+ stored++;
+
+ filp->f_pos = offset = EXT4_INLINE_DOTDOT_SIZE;
+ continue;
+ }
+
+ de = (struct ext4_dir_entry_2 *)(dir_buf + offset);
+ if (ext4_check_dir_entry(inode, filp, de,
+ iloc.bh, dir_buf,
+ inline_size, offset)) {
+ ret = stored;
+ goto out;
+ }
+ offset += ext4_rec_len_from_disk(de->rec_len,
+ inline_size);
+ if (le32_to_cpu(de->inode)) {
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ u64 version = filp->f_version;
+
+ error = filldir(dirent, de->name,
+ de->name_len,
+ filp->f_pos,
+ le32_to_cpu(de->inode),
+ get_dtype(sb, de->file_type));
+ if (error)
+ break;
+ if (version != filp->f_version)
+ goto revalidate;
+ stored++;
+ }
+ filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
+ inline_size);
+ }
+ offset = 0;
+ }
+out:
+ kfree(dir_buf);
+ brelse(iloc.bh);
+ return ret;
+}
+
+struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
+ struct ext4_dir_entry_2 **parent_de,
+ int *retval)
+{
+ struct ext4_iloc iloc;
+
+ *retval = ext4_get_inode_loc(inode, &iloc);
+ if (*retval)
+ return NULL;
+
+ *parent_de = (struct ext4_dir_entry_2 *)ext4_raw_inode(&iloc)->i_block;
+
+ return iloc.bh;
+}
+
+/*
+ * Try to create the inline data for the new dir.
+ * If it succeeds, return 0, otherwise return the error.
+ * In case of ENOSPC, the caller should create the normal disk layout dir.
+ */
+int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent,
+ struct inode *inode)
+{
+ int ret, inline_size = EXT4_MIN_INLINE_DATA_SIZE;
+ struct ext4_iloc iloc;
+ struct ext4_dir_entry_2 *de;
+
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret)
+ return ret;
+
+ ret = ext4_prepare_inline_data(handle, inode, inline_size);
+ if (ret)
+ goto out;
+
+ /*
+ * For inline dir, we only save the inode information for the ".."
+ * and create a fake dentry to cover the left space.
+ */
+ de = (struct ext4_dir_entry_2 *)ext4_raw_inode(&iloc)->i_block;
+ de->inode = cpu_to_le32(parent->i_ino);
+ de = (struct ext4_dir_entry_2 *)((void *)de + EXT4_INLINE_DOTDOT_SIZE);
+ de->inode = 0;
+ de->rec_len = ext4_rec_len_to_disk(
+ inline_size - EXT4_INLINE_DOTDOT_SIZE,
+ inline_size);
+ set_nlink(inode, 2);
+ inode->i_size = EXT4_I(inode)->i_disksize = inline_size;
+out:
+ brelse(iloc.bh);
+ return ret;
+}
+
+struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir,
+ int *has_inline_data)
+{
+ int ret;
+ struct ext4_iloc iloc;
+ void *inline_start;
+ int inline_size;
+
+ if (ext4_get_inode_loc(dir, &iloc))
+ return NULL;
+
+ down_read(&EXT4_I(dir)->xattr_sem);
+ if (!ext4_has_inline_data(dir)) {
+ *has_inline_data = 0;
+ goto out;
+ }
+
+ inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+ EXT4_INLINE_DOTDOT_SIZE;
+ inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
+ ret = search_dir(iloc.bh, inline_start, inline_size,
+ dir, d_name, 0, res_dir);
+ if (ret == 1)
+ goto out_find;
+ if (ret < 0)
+ goto out;
+
+ if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
+ goto out;
+
+ inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+ inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
+
+ ret = search_dir(iloc.bh, inline_start, inline_size,
+ dir, d_name, 0, res_dir);
+ if (ret == 1)
+ goto out_find;
+
+out:
+ brelse(iloc.bh);
+ iloc.bh = NULL;
+out_find:
+ up_read(&EXT4_I(dir)->xattr_sem);
+ return iloc.bh;
+}
+
+int ext4_delete_inline_entry(handle_t *handle,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de_del,
+ struct buffer_head *bh,
+ int *has_inline_data)
+{
+ int err, inline_size;
+ struct ext4_iloc iloc;
+ void *inline_start;
+
+ err = ext4_get_inode_loc(dir, &iloc);
+ if (err)
+ return err;
+
+ down_write(&EXT4_I(dir)->xattr_sem);
+ if (!ext4_has_inline_data(dir)) {
+ *has_inline_data = 0;
+ goto out;
+ }
+
+ if ((void *)de_del - ((void *)ext4_raw_inode(&iloc)->i_block) <
+ EXT4_MIN_INLINE_DATA_SIZE) {
+ inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+ EXT4_INLINE_DOTDOT_SIZE;
+ inline_size = EXT4_MIN_INLINE_DATA_SIZE -
+ EXT4_INLINE_DOTDOT_SIZE;
+ } else {
+ inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+ inline_size = ext4_get_inline_size(dir) -
+ EXT4_MIN_INLINE_DATA_SIZE;
+ }
+
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err)
+ goto out;
+
+ err = ext4_generic_delete_entry(handle, dir, de_del, bh,
+ inline_start, inline_size, 0);
+ if (err)
+ goto out;
+
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_mark_inode_dirty(handle, dir);
+ if (unlikely(err))
+ goto out;
+
+ ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
+out:
+ up_write(&EXT4_I(dir)->xattr_sem);
+ brelse(iloc.bh);
+ if (err != -ENOENT)
+ ext4_std_error(dir->i_sb, err);
+ return err;
+}
+
+/*
+ * Get the inline dentry at offset.
+ */
+static inline struct ext4_dir_entry_2 *
+ext4_get_inline_entry(struct inode *inode,
+ struct ext4_iloc *iloc,
+ unsigned int offset,
+ void **inline_start,
+ int *inline_size)
+{
+ void *inline_pos;
+
+ BUG_ON(offset > ext4_get_inline_size(inode));
+
+ if (offset < EXT4_MIN_INLINE_DATA_SIZE) {
+ inline_pos = (void *)ext4_raw_inode(iloc)->i_block;
+ *inline_size = EXT4_MIN_INLINE_DATA_SIZE;
+ } else {
+ inline_pos = ext4_get_inline_xattr_pos(inode, iloc);
+ offset -= EXT4_MIN_INLINE_DATA_SIZE;
+ *inline_size = ext4_get_inline_size(inode) -
+ EXT4_MIN_INLINE_DATA_SIZE;
+ }
+
+ if (inline_start)
+ *inline_start = inline_pos;
+ return (struct ext4_dir_entry_2 *)(inline_pos + offset);
+}
+
+int empty_inline_dir(struct inode *dir, int *has_inline_data)
+{
+ int err, inline_size;
+ struct ext4_iloc iloc;
+ void *inline_pos;
+ unsigned int offset;
+ struct ext4_dir_entry_2 *de;
+ int ret = 1;
+
+ err = ext4_get_inode_loc(dir, &iloc);
+ if (err) {
+ EXT4_ERROR_INODE(dir, "error %d getting inode %lu block",
+ err, dir->i_ino);
+ return 1;
+ }
+
+ down_read(&EXT4_I(dir)->xattr_sem);
+ if (!ext4_has_inline_data(dir)) {
+ *has_inline_data = 0;
+ goto out;
+ }
+
+ de = (struct ext4_dir_entry_2 *)ext4_raw_inode(&iloc)->i_block;
+ if (!le32_to_cpu(de->inode)) {
+ ext4_warning(dir->i_sb,
+ "bad inline directory (dir #%lu) - no `..'",
+ dir->i_ino);
+ ret = 1;
+ goto out;
+ }
+
+ offset = EXT4_INLINE_DOTDOT_SIZE;
+ while (offset < dir->i_size) {
+ de = ext4_get_inline_entry(dir, &iloc, offset,
+ &inline_pos, &inline_size);
+ if (ext4_check_dir_entry(dir, NULL, de,
+ iloc.bh, inline_pos,
+ inline_size, offset)) {
+ ext4_warning(dir->i_sb,
+ "bad inline directory (dir #%lu) - "
+ "inode %u, rec_len %u, name_len %d"
+ "inline size %d\n",
+ dir->i_ino, le32_to_cpu(de->inode),
+ le16_to_cpu(de->rec_len), de->name_len,
+ inline_size);
+ ret = 1;
+ goto out;
+ }
+ if (le32_to_cpu(de->inode)) {
+ ret = 0;
+ goto out;
+ }
+ offset += ext4_rec_len_from_disk(de->rec_len, inline_size);
+ }
+
+out:
+ up_read(&EXT4_I(dir)->xattr_sem);
+ brelse(iloc.bh);
+ return ret;
+}
+
+int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
+{
+ int ret;
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+ ret = ext4_destroy_inline_data_nolock(handle, inode);
+ up_write(&EXT4_I(inode)->xattr_sem);
+
+ return ret;
+}
+
+int ext4_inline_data_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ int *has_inline)
+{
+ __u64 physical = 0;
+ __u64 length;
+ __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST;
+ int error = 0;
+ struct ext4_iloc iloc;
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ *has_inline = 0;
+ goto out;
+ }
+
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error)
+ goto out;
+
+ physical = iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
+ physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
+ physical += offsetof(struct ext4_inode, i_block);
+ length = i_size_read(inode);
+
+ if (physical)
+ error = fiemap_fill_next_extent(fieinfo, 0, physical,
+ length, flags);
+ brelse(iloc.bh);
+out:
+ up_read(&EXT4_I(inode)->xattr_sem);
+ return (error < 0 ? error : 0);
+}
+
+/*
+ * Called during xattr set, and if we can sparse space 'needed',
+ * just create the extent tree evict the data to the outer block.
+ *
+ * We use jbd2 instead of page cache to move data to the 1st block
+ * so that the whole transaction can be committed as a whole and
+ * the data isn't lost because of the delayed page cache write.
+ */
+int ext4_try_to_evict_inline_data(handle_t *handle,
+ struct inode *inode,
+ int needed)
+{
+ int error;
+ struct ext4_xattr_entry *entry;
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_inode *raw_inode;
+ struct ext4_iloc iloc;
+
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error)
+ return error;
+
+ raw_inode = ext4_raw_inode(&iloc);
+ header = IHDR(inode, raw_inode);
+ entry = (struct ext4_xattr_entry *)((void *)raw_inode +
+ EXT4_I(inode)->i_inline_off);
+ if (EXT4_XATTR_LEN(entry->e_name_len) +
+ EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
+ error = -ENOSPC;
+ goto out;
+ }
+
+ error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+out:
+ brelse(iloc.bh);
+ return error;
+}
+
+void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+{
+ handle_t *handle;
+ int inline_size, value_len, needed_blocks;
+ size_t i_size;
+ void *value = NULL;
+ struct ext4_xattr_ibody_find is = {
+ .s = { .not_found = -ENODATA, },
+ };
+ struct ext4_xattr_info i = {
+ .name_index = EXT4_XATTR_INDEX_SYSTEM,
+ .name = EXT4_XATTR_SYSTEM_DATA,
+ };
+
+
+ needed_blocks = ext4_writepage_trans_blocks(inode);
+ handle = ext4_journal_start(inode, needed_blocks);
+ if (IS_ERR(handle))
+ return;
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ *has_inline = 0;
+ ext4_journal_stop(handle);
+ return;
+ }
+
+ if (ext4_orphan_add(handle, inode))
+ goto out;
+
+ if (ext4_get_inode_loc(inode, &is.iloc))
+ goto out;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ i_size = inode->i_size;
+ inline_size = ext4_get_inline_size(inode);
+ EXT4_I(inode)->i_disksize = i_size;
+
+ if (i_size < inline_size) {
+ /* Clear the content in the xattr space. */
+ if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
+ if (ext4_xattr_ibody_find(inode, &i, &is))
+ goto out_error;
+
+ BUG_ON(is.s.not_found);
+
+ value_len = le32_to_cpu(is.s.here->e_value_size);
+ value = kmalloc(value_len, GFP_NOFS);
+ if (!value)
+ goto out_error;
+
+ if (ext4_xattr_ibody_get(inode, i.name_index, i.name,
+ value, value_len))
+ goto out_error;
+
+ i.value = value;
+ i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ?
+ i_size - EXT4_MIN_INLINE_DATA_SIZE : 0;
+ if (ext4_xattr_ibody_inline_set(handle, inode, &i, &is))
+ goto out_error;
+ }
+
+ /* Clear the content within i_blocks. */
+ if (i_size < EXT4_MIN_INLINE_DATA_SIZE)
+ memset(ext4_raw_inode(&is.iloc)->i_block + i_size, 0,
+ EXT4_MIN_INLINE_DATA_SIZE - i_size);
+
+ EXT4_I(inode)->i_inline_size = i_size <
+ EXT4_MIN_INLINE_DATA_SIZE ?
+ EXT4_MIN_INLINE_DATA_SIZE : i_size;
+ }
+
+out_error:
+ up_write(&EXT4_I(inode)->i_data_sem);
+out:
+ brelse(is.iloc.bh);
+ up_write(&EXT4_I(inode)->xattr_sem);
+ kfree(value);
+ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+
+ ext4_journal_stop(handle);
+ return;
+}
+
+int ext4_convert_inline_data(struct inode *inode)
+{
+ int error, needed_blocks;
+ handle_t *handle;
+ struct ext4_iloc iloc;
+
+ if (!ext4_has_inline_data(inode)) {
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ return 0;
+ }
+
+ needed_blocks = ext4_writepage_trans_blocks(inode);
+
+ iloc.bh = NULL;
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error)
+ return error;
+
+ handle = ext4_journal_start(inode, needed_blocks);
+ if (IS_ERR(handle)) {
+ error = PTR_ERR(handle);
+ goto out_free;
+ }
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ up_write(&EXT4_I(inode)->xattr_sem);
+ goto out;
+ }
+
+ error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+ up_write(&EXT4_I(inode)->xattr_sem);
+out:
+ ext4_journal_stop(handle);
+out_free:
+ brelse(iloc.bh);
+ return error;
+}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b3c243b9afa..cbfe13bf5b2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -484,49 +484,6 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
}
/*
- * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
- */
-static void set_buffers_da_mapped(struct inode *inode,
- struct ext4_map_blocks *map)
-{
- struct address_space *mapping = inode->i_mapping;
- struct pagevec pvec;
- int i, nr_pages;
- pgoff_t index, end;
-
- index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
- end = (map->m_lblk + map->m_len - 1) >>
- (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
- pagevec_init(&pvec, 0);
- while (index <= end) {
- nr_pages = pagevec_lookup(&pvec, mapping, index,
- min(end - index + 1,
- (pgoff_t)PAGEVEC_SIZE));
- if (nr_pages == 0)
- break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- struct buffer_head *bh, *head;
-
- if (unlikely(page->mapping != mapping) ||
- !PageDirty(page))
- break;
-
- if (page_has_buffers(page)) {
- bh = head = page_buffers(page);
- do {
- set_buffer_da_mapped(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- }
- index++;
- }
- pagevec_release(&pvec);
- }
-}
-
-/*
* The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped.
*
@@ -574,7 +531,16 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
up_read((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret = check_block_validity(inode, map);
+ int ret;
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
+ /* delayed alloc may be allocated by fallocate and
+ * coverted to initialized by directIO.
+ * we need to handle delayed extent here.
+ */
+ down_write((&EXT4_I(inode)->i_data_sem));
+ goto delayed_mapped;
+ }
+ ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
@@ -652,12 +618,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
- /* If we have successfully mapped the delayed allocated blocks,
- * set the BH_Da_Mapped bit on them. Its important to do this
- * under the protection of i_data_sem.
- */
- if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
- set_buffers_da_mapped(inode, map);
+ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ int ret;
+delayed_mapped:
+ /* delayed allocation blocks has been allocated */
+ ret = ext4_es_remove_extent(inode, map->m_lblk,
+ map->m_len);
+ if (ret < 0)
+ retval = ret;
+ }
}
up_write((&EXT4_I(inode)->i_data_sem));
@@ -680,10 +649,13 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
int ret = 0, started = 0;
int dio_credits;
+ if (ext4_has_inline_data(inode))
+ return -ERANGE;
+
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
- if (flags && !handle) {
+ if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
/* Direct IO write... */
if (map.m_len > DIO_MAX_BLOCKS)
map.m_len = DIO_MAX_BLOCKS;
@@ -798,13 +770,13 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
return NULL;
}
-static int walk_page_buffers(handle_t *handle,
- struct buffer_head *head,
- unsigned from,
- unsigned to,
- int *partial,
- int (*fn)(handle_t *handle,
- struct buffer_head *bh))
+int ext4_walk_page_buffers(handle_t *handle,
+ struct buffer_head *head,
+ unsigned from,
+ unsigned to,
+ int *partial,
+ int (*fn)(handle_t *handle,
+ struct buffer_head *bh))
{
struct buffer_head *bh;
unsigned block_start, block_end;
@@ -854,8 +826,8 @@ static int walk_page_buffers(handle_t *handle,
* is elevated. We'll still have enough credits for the tiny quotafile
* write.
*/
-static int do_journal_get_write_access(handle_t *handle,
- struct buffer_head *bh)
+int do_journal_get_write_access(handle_t *handle,
+ struct buffer_head *bh)
{
int dirty = buffer_dirty(bh);
int ret;
@@ -878,7 +850,7 @@ static int do_journal_get_write_access(handle_t *handle,
return ret;
}
-static int ext4_get_block_write(struct inode *inode, sector_t iblock,
+static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -902,6 +874,17 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
from = pos & (PAGE_CACHE_SIZE - 1);
to = from + len;
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+ ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
+ flags, pagep);
+ if (ret < 0)
+ goto out;
+ if (ret == 1) {
+ ret = 0;
+ goto out;
+ }
+ }
+
retry:
handle = ext4_journal_start(inode, needed_blocks);
if (IS_ERR(handle)) {
@@ -919,6 +902,7 @@ retry:
ret = -ENOMEM;
goto out;
}
+
*pagep = page;
if (ext4_should_dioread_nolock(inode))
@@ -927,8 +911,9 @@ retry:
ret = __block_write_begin(page, pos, len, ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
- ret = walk_page_buffers(handle, page_buffers(page),
- from, to, NULL, do_journal_get_write_access);
+ ret = ext4_walk_page_buffers(handle, page_buffers(page),
+ from, to, NULL,
+ do_journal_get_write_access);
}
if (ret) {
@@ -983,7 +968,12 @@ static int ext4_generic_write_end(struct file *file,
struct inode *inode = mapping->host;
handle_t *handle = ext4_journal_current_handle();
- copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+ if (ext4_has_inline_data(inode))
+ copied = ext4_write_inline_data_end(inode, pos, len,
+ copied, page);
+ else
+ copied = block_write_end(file, mapping, pos,
+ len, copied, page, fsdata);
/*
* No need to use i_size_read() here, the i_size
@@ -1134,16 +1124,21 @@ static int ext4_journalled_write_end(struct file *file,
BUG_ON(!ext4_handle_valid(handle));
- if (copied < len) {
- if (!PageUptodate(page))
- copied = 0;
- page_zero_new_buffers(page, from+copied, to);
- }
+ if (ext4_has_inline_data(inode))
+ copied = ext4_write_inline_data_end(inode, pos, len,
+ copied, page);
+ else {
+ if (copied < len) {
+ if (!PageUptodate(page))
+ copied = 0;
+ page_zero_new_buffers(page, from+copied, to);
+ }
- ret = walk_page_buffers(handle, page_buffers(page), from,
- to, &partial, write_end_fn);
- if (!partial)
- SetPageUptodate(page);
+ ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
+ to, &partial, write_end_fn);
+ if (!partial)
+ SetPageUptodate(page);
+ }
new_i_size = pos + copied;
if (new_i_size > inode->i_size)
i_size_write(inode, pos+copied);
@@ -1301,6 +1296,7 @@ static void ext4_da_page_release_reservation(struct page *page,
struct inode *inode = page->mapping->host;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int num_clusters;
+ ext4_fsblk_t lblk;
head = page_buffers(page);
bh = head;
@@ -1310,20 +1306,23 @@ static void ext4_da_page_release_reservation(struct page *page,
if ((offset <= curr_off) && (buffer_delay(bh))) {
to_release++;
clear_buffer_delay(bh);
- clear_buffer_da_mapped(bh);
}
curr_off = next_off;
} while ((bh = bh->b_this_page) != head);
+ if (to_release) {
+ lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ ext4_es_remove_extent(inode, lblk, to_release);
+ }
+
/* If we have released all the blocks belonging to a cluster, then we
* need to release the reserved space for that cluster. */
num_clusters = EXT4_NUM_B2C(sbi, to_release);
while (num_clusters > 0) {
- ext4_fsblk_t lblk;
lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
((num_clusters - 1) << sbi->s_cluster_bits);
if (sbi->s_cluster_ratio == 1 ||
- !ext4_find_delalloc_cluster(inode, lblk, 1))
+ !ext4_find_delalloc_cluster(inode, lblk))
ext4_da_release_space(inode, 1);
num_clusters--;
@@ -1429,8 +1428,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
clear_buffer_delay(bh);
bh->b_blocknr = pblock;
}
- if (buffer_da_mapped(bh))
- clear_buffer_da_mapped(bh);
if (buffer_unwritten(bh) ||
buffer_mapped(bh))
BUG_ON(bh->b_blocknr != pblock);
@@ -1500,9 +1497,16 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
struct pagevec pvec;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
+ ext4_lblk_t start, last;
index = mpd->first_page;
end = mpd->next_page - 1;
+
+ start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ ext4_es_remove_extent(inode, start, last - start + 1);
+
+ pagevec_init(&pvec, 0);
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
@@ -1656,15 +1660,6 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
for (i = 0; i < map.m_len; i++)
unmap_underlying_metadata(bdev, map.m_pblk + i);
-
- if (ext4_should_order_data(mpd->inode)) {
- err = ext4_jbd2_file_inode(handle, mpd->inode);
- if (err) {
- /* Only if the journal is aborted */
- mpd->retval = err;
- goto submit_io;
- }
- }
}
/*
@@ -1795,7 +1790,19 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
* file system block.
*/
down_read((&EXT4_I(inode)->i_data_sem));
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ if (ext4_has_inline_data(inode)) {
+ /*
+ * We will soon create blocks for this page, and let
+ * us pretend as if the blocks aren't allocated yet.
+ * In case of clusters, we have to handle the work
+ * of mapping from cluster so that the reserved space
+ * is calculated properly.
+ */
+ if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
+ ext4_find_delalloc_cluster(inode, map->m_lblk))
+ map->m_flags |= EXT4_MAP_FROM_CLUSTER;
+ retval = 0;
+ } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
retval = ext4_ind_map_blocks(NULL, inode, map, 0);
@@ -1814,6 +1821,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
goto out_unlock;
}
+ retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len);
+ if (retval)
+ goto out_unlock;
+
/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
* and it should not appear on the bh->b_state.
*/
@@ -1842,8 +1853,8 @@ out_unlock:
* We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
* initialized properly.
*/
-static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
+int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create)
{
struct ext4_map_blocks map;
int ret = 0;
@@ -1917,15 +1928,29 @@ static int __ext4_journalled_writepage(struct page *page,
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
- struct buffer_head *page_bufs;
+ struct buffer_head *page_bufs = NULL;
handle_t *handle = NULL;
- int ret = 0;
- int err;
+ int ret = 0, err = 0;
+ int inline_data = ext4_has_inline_data(inode);
+ struct buffer_head *inode_bh = NULL;
ClearPageChecked(page);
- page_bufs = page_buffers(page);
- BUG_ON(!page_bufs);
- walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
+
+ if (inline_data) {
+ BUG_ON(page->index != 0);
+ BUG_ON(len > ext4_get_max_inline_size(inode));
+ inode_bh = ext4_journalled_write_inline_data(inode, len, page);
+ if (inode_bh == NULL)
+ goto out;
+ } else {
+ page_bufs = page_buffers(page);
+ if (!page_bufs) {
+ BUG();
+ goto out;
+ }
+ ext4_walk_page_buffers(handle, page_bufs, 0, len,
+ NULL, bget_one);
+ }
/* As soon as we unlock the page, it can go away, but we have
* references to buffers so we are safe */
unlock_page(page);
@@ -1938,11 +1963,18 @@ static int __ext4_journalled_writepage(struct page *page,
BUG_ON(!ext4_handle_valid(handle));
- ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
- do_journal_get_write_access);
+ if (inline_data) {
+ ret = ext4_journal_get_write_access(handle, inode_bh);
- err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
- write_end_fn);
+ err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
+
+ } else {
+ ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
+ do_journal_get_write_access);
+
+ err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
+ write_end_fn);
+ }
if (ret == 0)
ret = err;
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
@@ -1950,9 +1982,12 @@ static int __ext4_journalled_writepage(struct page *page,
if (!ret)
ret = err;
- walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
+ if (!ext4_has_inline_data(inode))
+ ext4_walk_page_buffers(handle, page_bufs, 0, len,
+ NULL, bput_one);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
+ brelse(inode_bh);
return ret;
}
@@ -2029,8 +2064,8 @@ static int ext4_writepage(struct page *page,
commit_write = 1;
}
page_bufs = page_buffers(page);
- if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
- ext4_bh_delay_or_unwritten)) {
+ if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
+ ext4_bh_delay_or_unwritten)) {
/*
* We don't want to do block allocation, so redirty
* the page and return. We may reach here when we do
@@ -2096,7 +2131,8 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
* mpage_da_map_and_submit to map a single contiguous memory region
* and then write them.
*/
-static int write_cache_pages_da(struct address_space *mapping,
+static int write_cache_pages_da(handle_t *handle,
+ struct address_space *mapping,
struct writeback_control *wbc,
struct mpage_da_data *mpd,
pgoff_t *done_index)
@@ -2175,6 +2211,17 @@ static int write_cache_pages_da(struct address_space *mapping,
wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
+ /*
+ * If we have inline data and arrive here, it means that
+ * we will soon create the block for the 1st page, so
+ * we'd better clear the inline data here.
+ */
+ if (ext4_has_inline_data(inode)) {
+ BUG_ON(ext4_test_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA));
+ ext4_destroy_inline_data(handle, inode);
+ }
+
if (mpd->next_page != page->index)
mpd->first_page = page->index;
mpd->next_page = page->index + 1;
@@ -2381,7 +2428,8 @@ retry:
* contiguous region of logical blocks that need
* blocks to be allocated by ext4 and submit them.
*/
- ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
+ ret = write_cache_pages_da(handle, mapping,
+ wbc, &mpd, &done_index);
/*
* If we have a contiguous extent of pages and we
* haven't done the I/O yet, map the blocks and submit
@@ -2445,7 +2493,6 @@ out_writepages:
return ret;
}
-#define FALL_BACK_TO_NONDELALLOC 1
static int ext4_nonda_switch(struct super_block *sb)
{
s64 free_blocks, dirty_blocks;
@@ -2502,6 +2549,19 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len, flags);
+
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+ ret = ext4_da_write_inline_data_begin(mapping, inode,
+ pos, len, flags,
+ pagep, fsdata);
+ if (ret < 0)
+ goto out;
+ if (ret == 1) {
+ ret = 0;
+ goto out;
+ }
+ }
+
retry:
/*
* With delayed allocation, we don't log the i_disksize update
@@ -2603,22 +2663,13 @@ static int ext4_da_write_end(struct file *file,
* changes. So let's piggyback the i_disksize mark_inode_dirty
* into that.
*/
-
new_i_size = pos + copied;
if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
- if (ext4_da_should_update_i_disksize(page, end)) {
+ if (ext4_has_inline_data(inode) ||
+ ext4_da_should_update_i_disksize(page, end)) {
down_write(&EXT4_I(inode)->i_data_sem);
- if (new_i_size > EXT4_I(inode)->i_disksize) {
- /*
- * Updating i_disksize when extending file
- * without needing block allocation
- */
- if (ext4_should_order_data(inode))
- ret = ext4_jbd2_file_inode(handle,
- inode);
-
+ if (new_i_size > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = new_i_size;
- }
up_write(&EXT4_I(inode)->i_data_sem);
/* We need to mark inode dirty even if
* new_i_size is less that inode->i_size
@@ -2627,8 +2678,16 @@ static int ext4_da_write_end(struct file *file,
ext4_mark_inode_dirty(handle, inode);
}
}
- ret2 = generic_write_end(file, mapping, pos, len, copied,
+
+ if (write_mode != CONVERT_INLINE_DATA &&
+ ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
+ ext4_has_inline_data(inode))
+ ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
+ page);
+ else
+ ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
+
copied = ret2;
if (ret2 < 0)
ret = ret2;
@@ -2721,6 +2780,12 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
journal_t *journal;
int err;
+ /*
+ * We can get here for an inline file via the FIBMAP ioctl
+ */
+ if (ext4_has_inline_data(inode))
+ return 0;
+
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
test_opt(inode->i_sb, DELALLOC)) {
/*
@@ -2766,14 +2831,30 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
static int ext4_readpage(struct file *file, struct page *page)
{
+ int ret = -EAGAIN;
+ struct inode *inode = page->mapping->host;
+
trace_ext4_readpage(page);
- return mpage_readpage(page, ext4_get_block);
+
+ if (ext4_has_inline_data(inode))
+ ret = ext4_readpage_inline(inode, page);
+
+ if (ret == -EAGAIN)
+ return mpage_readpage(page, ext4_get_block);
+
+ return ret;
}
static int
ext4_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
+ struct inode *inode = mapping->host;
+
+ /* If the file has inline data, no need to do readpages. */
+ if (ext4_has_inline_data(inode))
+ return 0;
+
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
@@ -2799,8 +2880,6 @@ static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offs
static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
- journal_t *journal = EXT4_JOURNAL(page->mapping->host);
-
trace_ext4_invalidatepage(page, offset);
/*
@@ -2808,16 +2887,34 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset)
*/
if (ext4_should_dioread_nolock(page->mapping->host))
ext4_invalidatepage_free_endio(page, offset);
+
+ /* No journalling happens on data buffers when this function is used */
+ WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
+
+ block_invalidatepage(page, offset);
+}
+
+static int __ext4_journalled_invalidatepage(struct page *page,
+ unsigned long offset)
+{
+ journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+
+ trace_ext4_journalled_invalidatepage(page, offset);
+
/*
* If it's a full truncate we just forget about the pending dirtying
*/
if (offset == 0)
ClearPageChecked(page);
- if (journal)
- jbd2_journal_invalidatepage(journal, page, offset);
- else
- block_invalidatepage(page, offset);
+ return jbd2_journal_invalidatepage(journal, page, offset);
+}
+
+/* Wrapper for aops... */
+static void ext4_journalled_invalidatepage(struct page *page,
+ unsigned long offset)
+{
+ WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -2840,7 +2937,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
* We allocate an uinitialized extent if blocks haven't been allocated.
* The extent will be converted to initialized after the IO is complete.
*/
-static int ext4_get_block_write(struct inode *inode, sector_t iblock,
+int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
@@ -2850,29 +2947,12 @@ static int ext4_get_block_write(struct inode *inode, sector_t iblock,
}
static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int flags)
+ struct buffer_head *bh_result, int create)
{
- handle_t *handle = ext4_journal_current_handle();
- struct ext4_map_blocks map;
- int ret = 0;
-
- ext4_debug("ext4_get_block_write_nolock: inode %lu, flag %d\n",
- inode->i_ino, flags);
-
- flags = EXT4_GET_BLOCKS_NO_LOCK;
-
- map.m_lblk = iblock;
- map.m_len = bh_result->b_size >> inode->i_blkbits;
-
- ret = ext4_map_blocks(handle, inode, &map, flags);
- if (ret > 0) {
- map_bh(bh_result, inode->i_sb, map.m_pblk);
- bh_result->b_state = (bh_result->b_state & ~EXT4_MAP_FLAGS) |
- map.m_flags;
- bh_result->b_size = inode->i_sb->s_blocksize * map.m_len;
- ret = 0;
- }
- return ret;
+ ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
+ inode->i_ino, create);
+ return _ext4_get_block(inode, iblock, bh_result,
+ EXT4_GET_BLOCKS_NO_LOCK);
}
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
@@ -2978,10 +3058,10 @@ retry:
* fall back to buffered IO.
*
* For holes, we fallocate those blocks, mark them as uninitialized
- * If those blocks were preallocated, we mark sure they are splited, but
+ * If those blocks were preallocated, we mark sure they are split, but
* still keep the range to write as uninitialized.
*
- * The unwrritten extents will be converted to written when DIO is completed.
+ * The unwritten extents will be converted to written when DIO is completed.
* For async direct IO, since the IO may still pending when return, we
* set up an end_io call back function, which will do the conversion
* when async direct IO completed.
@@ -2999,125 +3079,120 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode = file->f_mapping->host;
ssize_t ret;
size_t count = iov_length(iov, nr_segs);
-
+ int overwrite = 0;
+ get_block_t *get_block_func = NULL;
+ int dio_flags = 0;
loff_t final_size = offset + count;
- if (rw == WRITE && final_size <= inode->i_size) {
- int overwrite = 0;
- BUG_ON(iocb->private == NULL);
+ /* Use the old path for reads and writes beyond i_size. */
+ if (rw != WRITE || final_size > inode->i_size)
+ return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
- /* If we do a overwrite dio, i_mutex locking can be released */
- overwrite = *((int *)iocb->private);
+ BUG_ON(iocb->private == NULL);
- if (overwrite) {
- atomic_inc(&inode->i_dio_count);
- down_read(&EXT4_I(inode)->i_data_sem);
- mutex_unlock(&inode->i_mutex);
- }
+ /* If we do a overwrite dio, i_mutex locking can be released */
+ overwrite = *((int *)iocb->private);
- /*
- * We could direct write to holes and fallocate.
- *
- * Allocated blocks to fill the hole are marked as uninitialized
- * to prevent parallel buffered read to expose the stale data
- * before DIO complete the data IO.
- *
- * As to previously fallocated extents, ext4 get_block
- * will just simply mark the buffer mapped but still
- * keep the extents uninitialized.
- *
- * for non AIO case, we will convert those unwritten extents
- * to written after return back from blockdev_direct_IO.
- *
- * for async DIO, the conversion needs to be defered when
- * the IO is completed. The ext4 end_io callback function
- * will be called to take care of the conversion work.
- * Here for async case, we allocate an io_end structure to
- * hook to the iocb.
- */
- iocb->private = NULL;
- ext4_inode_aio_set(inode, NULL);
- if (!is_sync_kiocb(iocb)) {
- ext4_io_end_t *io_end =
- ext4_init_io_end(inode, GFP_NOFS);
- if (!io_end) {
- ret = -ENOMEM;
- goto retake_lock;
- }
- io_end->flag |= EXT4_IO_END_DIRECT;
- iocb->private = io_end;
- /*
- * we save the io structure for current async
- * direct IO, so that later ext4_map_blocks()
- * could flag the io structure whether there
- * is a unwritten extents needs to be converted
- * when IO is completed.
- */
- ext4_inode_aio_set(inode, io_end);
- }
+ if (overwrite) {
+ atomic_inc(&inode->i_dio_count);
+ down_read(&EXT4_I(inode)->i_data_sem);
+ mutex_unlock(&inode->i_mutex);
+ }
- if (overwrite)
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iov,
- offset, nr_segs,
- ext4_get_block_write_nolock,
- ext4_end_io_dio,
- NULL,
- 0);
- else
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iov,
- offset, nr_segs,
- ext4_get_block_write,
- ext4_end_io_dio,
- NULL,
- DIO_LOCKING);
- if (iocb->private)
- ext4_inode_aio_set(inode, NULL);
+ /*
+ * We could direct write to holes and fallocate.
+ *
+ * Allocated blocks to fill the hole are marked as
+ * uninitialized to prevent parallel buffered read to expose
+ * the stale data before DIO complete the data IO.
+ *
+ * As to previously fallocated extents, ext4 get_block will
+ * just simply mark the buffer mapped but still keep the
+ * extents uninitialized.
+ *
+ * For non AIO case, we will convert those unwritten extents
+ * to written after return back from blockdev_direct_IO.
+ *
+ * For async DIO, the conversion needs to be deferred when the
+ * IO is completed. The ext4 end_io callback function will be
+ * called to take care of the conversion work. Here for async
+ * case, we allocate an io_end structure to hook to the iocb.
+ */
+ iocb->private = NULL;
+ ext4_inode_aio_set(inode, NULL);
+ if (!is_sync_kiocb(iocb)) {
+ ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
+ if (!io_end) {
+ ret = -ENOMEM;
+ goto retake_lock;
+ }
+ io_end->flag |= EXT4_IO_END_DIRECT;
+ iocb->private = io_end;
/*
- * The io_end structure takes a reference to the inode,
- * that structure needs to be destroyed and the
- * reference to the inode need to be dropped, when IO is
- * complete, even with 0 byte write, or failed.
- *
- * In the successful AIO DIO case, the io_end structure will be
- * desctroyed and the reference to the inode will be dropped
- * after the end_io call back function is called.
- *
- * In the case there is 0 byte write, or error case, since
- * VFS direct IO won't invoke the end_io call back function,
- * we need to free the end_io structure here.
+ * we save the io structure for current async direct
+ * IO, so that later ext4_map_blocks() could flag the
+ * io structure whether there is a unwritten extents
+ * needs to be converted when IO is completed.
*/
- if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
- ext4_free_io_end(iocb->private);
- iocb->private = NULL;
- } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
- EXT4_STATE_DIO_UNWRITTEN)) {
- int err;
- /*
- * for non AIO case, since the IO is already
- * completed, we could do the conversion right here
- */
- err = ext4_convert_unwritten_extents(inode,
- offset, ret);
- if (err < 0)
- ret = err;
- ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
- }
+ ext4_inode_aio_set(inode, io_end);
+ }
- retake_lock:
- /* take i_mutex locking again if we do a ovewrite dio */
- if (overwrite) {
- inode_dio_done(inode);
- up_read(&EXT4_I(inode)->i_data_sem);
- mutex_lock(&inode->i_mutex);
- }
+ if (overwrite) {
+ get_block_func = ext4_get_block_write_nolock;
+ } else {
+ get_block_func = ext4_get_block_write;
+ dio_flags = DIO_LOCKING;
+ }
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov,
+ offset, nr_segs,
+ get_block_func,
+ ext4_end_io_dio,
+ NULL,
+ dio_flags);
+
+ if (iocb->private)
+ ext4_inode_aio_set(inode, NULL);
+ /*
+ * The io_end structure takes a reference to the inode, that
+ * structure needs to be destroyed and the reference to the
+ * inode need to be dropped, when IO is complete, even with 0
+ * byte write, or failed.
+ *
+ * In the successful AIO DIO case, the io_end structure will
+ * be destroyed and the reference to the inode will be dropped
+ * after the end_io call back function is called.
+ *
+ * In the case there is 0 byte write, or error case, since VFS
+ * direct IO won't invoke the end_io call back function, we
+ * need to free the end_io structure here.
+ */
+ if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+ ext4_free_io_end(iocb->private);
+ iocb->private = NULL;
+ } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+ EXT4_STATE_DIO_UNWRITTEN)) {
+ int err;
+ /*
+ * for non AIO case, since the IO is already
+ * completed, we could do the conversion right here
+ */
+ err = ext4_convert_unwritten_extents(inode,
+ offset, ret);
+ if (err < 0)
+ ret = err;
+ ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+ }
- return ret;
+retake_lock:
+ /* take i_mutex locking again if we do a ovewrite dio */
+ if (overwrite) {
+ inode_dio_done(inode);
+ up_read(&EXT4_I(inode)->i_data_sem);
+ mutex_lock(&inode->i_mutex);
}
- /* for write the the end of file case, we fall back to old way */
- return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+ return ret;
}
static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
@@ -3134,6 +3209,10 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
if (ext4_should_journal_data(inode))
return 0;
+ /* Let buffer I/O handle the inline data case. */
+ if (ext4_has_inline_data(inode))
+ return 0;
+
trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
@@ -3201,7 +3280,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
.bmap = ext4_bmap,
- .invalidatepage = ext4_invalidatepage,
+ .invalidatepage = ext4_journalled_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
@@ -3531,6 +3610,14 @@ void ext4_truncate(struct inode *inode)
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
+ if (ext4_has_inline_data(inode)) {
+ int has_inline = 1;
+
+ ext4_inline_data_truncate(inode, &has_inline);
+ if (has_inline)
+ return;
+ }
+
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ext4_ext_truncate(inode);
else
@@ -3756,6 +3843,19 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
}
}
+static inline void ext4_iget_extra_inode(struct inode *inode,
+ struct ext4_inode *raw_inode,
+ struct ext4_inode_info *ei)
+{
+ __le32 *magic = (void *)raw_inode +
+ EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
+ if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+ ext4_find_inline_data_nolock(inode);
+ } else
+ EXT4_I(inode)->i_inline_off = 0;
+}
+
struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
{
struct ext4_iloc iloc;
@@ -3826,6 +3926,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
+ ei->i_inline_off = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
@@ -3898,11 +3999,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ei->i_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
} else {
- __le32 *magic = (void *)raw_inode +
- EXT4_GOOD_OLD_INODE_SIZE +
- ei->i_extra_isize;
- if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
- ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+ ext4_iget_extra_inode(inode, raw_inode, ei);
}
}
@@ -3925,17 +4022,19 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ei->i_file_acl);
ret = -EIO;
goto bad_inode;
- } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- (S_ISLNK(inode->i_mode) &&
- !ext4_inode_is_fast_symlink(inode)))
- /* Validate extent which is part of inode */
- ret = ext4_ext_check_inode(inode);
- } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- (S_ISLNK(inode->i_mode) &&
- !ext4_inode_is_fast_symlink(inode))) {
- /* Validate block references which are part of inode */
- ret = ext4_ind_check_inode(inode);
+ } else if (!ext4_has_inline_data(inode)) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ (S_ISLNK(inode->i_mode) &&
+ !ext4_inode_is_fast_symlink(inode))))
+ /* Validate extent which is part of inode */
+ ret = ext4_ext_check_inode(inode);
+ } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ (S_ISLNK(inode->i_mode) &&
+ !ext4_inode_is_fast_symlink(inode))) {
+ /* Validate block references which are part of inode */
+ ret = ext4_ind_check_inode(inode);
+ }
}
if (ret)
goto bad_inode;
@@ -4122,9 +4221,10 @@ static int ext4_do_update_inode(handle_t *handle,
cpu_to_le32(new_encode_dev(inode->i_rdev));
raw_inode->i_block[2] = 0;
}
- } else
+ } else if (!ext4_has_inline_data(inode)) {
for (block = 0; block < EXT4_N_BLOCKS; block++)
raw_inode->i_block[block] = ei->i_data[block];
+ }
raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
if (ei->i_extra_isize) {
@@ -4221,6 +4321,47 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
+ * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
+ * buffers that are attached to a page stradding i_size and are undergoing
+ * commit. In that case we have to wait for commit to finish and try again.
+ */
+static void ext4_wait_for_tail_page_commit(struct inode *inode)
+{
+ struct page *page;
+ unsigned offset;
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = 0;
+ int ret;
+
+ offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ /*
+ * All buffers in the last page remain valid? Then there's nothing to
+ * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
+ * blocksize case
+ */
+ if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
+ return;
+ while (1) {
+ page = find_lock_page(inode->i_mapping,
+ inode->i_size >> PAGE_CACHE_SHIFT);
+ if (!page)
+ return;
+ ret = __ext4_journalled_invalidatepage(page, offset);
+ unlock_page(page);
+ page_cache_release(page);
+ if (ret != -EBUSY)
+ return;
+ commit_tid = 0;
+ read_lock(&journal->j_state_lock);
+ if (journal->j_committing_transaction)
+ commit_tid = journal->j_committing_transaction->t_tid;
+ read_unlock(&journal->j_state_lock);
+ if (commit_tid)
+ jbd2_log_wait_commit(journal, commit_tid);
+ }
+}
+
+/*
* ext4_setattr()
*
* Called from notify_change.
@@ -4333,16 +4474,28 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size != i_size_read(inode)) {
- truncate_setsize(inode, attr->ia_size);
- /* Inode size will be reduced, wait for dio in flight.
- * Temporarily disable dioread_nolock to prevent
- * livelock. */
+ if (attr->ia_size != inode->i_size) {
+ loff_t oldsize = inode->i_size;
+
+ i_size_write(inode, attr->ia_size);
+ /*
+ * Blocks are going to be removed from the inode. Wait
+ * for dio in flight. Temporarily disable
+ * dioread_nolock to prevent livelock.
+ */
if (orphan) {
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
- ext4_inode_resume_unlocked_dio(inode);
+ if (!ext4_should_journal_data(inode)) {
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+ ext4_inode_resume_unlocked_dio(inode);
+ } else
+ ext4_wait_for_tail_page_commit(inode);
}
+ /*
+ * Truncate pagecache after we've waited for commit
+ * in data=journal mode to make pages freeable.
+ */
+ truncate_pagecache(inode, oldsize, inode->i_size);
}
ext4_truncate(inode);
}
@@ -4811,8 +4964,9 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
* journal_start/journal_stop which can block and take a long time
*/
if (page_has_buffers(page)) {
- if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
- ext4_bh_unmapped)) {
+ if (!ext4_walk_page_buffers(NULL, page_buffers(page),
+ 0, len, NULL,
+ ext4_bh_unmapped)) {
/* Wait so that we don't change page under IO */
wait_on_page_writeback(page);
ret = VM_FAULT_LOCKED;
@@ -4833,7 +4987,7 @@ retry_alloc:
}
ret = __block_page_mkwrite(vma, vmf, get_block);
if (!ret && ext4_should_journal_data(inode)) {
- if (walk_page_buffers(handle, page_buffers(page), 0,
+ if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 526e5535860..1bf6fe785c4 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1373,7 +1373,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
ex->fe_start += next;
while (needed > ex->fe_len &&
- (buddy = mb_find_buddy(e4b, order, &max))) {
+ mb_find_buddy(e4b, order, &max)) {
if (block + 1 >= max)
break;
@@ -2607,9 +2607,17 @@ static void ext4_free_data_callback(struct super_block *sb,
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->efd_count, entry->efd_group, entry);
- if (test_opt(sb, DISCARD))
- ext4_issue_discard(sb, entry->efd_group,
- entry->efd_start_cluster, entry->efd_count);
+ if (test_opt(sb, DISCARD)) {
+ err = ext4_issue_discard(sb, entry->efd_group,
+ entry->efd_start_cluster,
+ entry->efd_count);
+ if (err && err != -EOPNOTSUPP)
+ ext4_msg(sb, KERN_WARNING, "discard request in"
+ " group:%d block:%d count:%d failed"
+ " with %d", entry->efd_group,
+ entry->efd_start_cluster,
+ entry->efd_count, err);
+ }
err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
/* we expect to find existing buddy because it's pinned */
@@ -4310,8 +4318,10 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
repeat:
/* allocate space in core */
*errp = ext4_mb_regular_allocator(ac);
- if (*errp)
+ if (*errp) {
+ ext4_discard_allocated_blocks(ac);
goto errout;
+ }
/* as we've just preallocated more space than
* user requested orinally, we store allocated
@@ -4333,10 +4343,10 @@ repeat:
ac->ac_b_ex.fe_len = 0;
ac->ac_status = AC_STATUS_CONTINUE;
goto repeat;
- } else if (*errp)
- errout:
+ } else if (*errp) {
ext4_discard_allocated_blocks(ac);
- else {
+ goto errout;
+ } else {
block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
ar->len = ac->ac_b_ex.fe_len;
}
@@ -4347,6 +4357,7 @@ repeat:
*errp = -ENOSPC;
}
+errout:
if (*errp) {
ac->ac_b_ex.fe_len = 0;
ar->len = 0;
@@ -4656,8 +4667,16 @@ do_more:
* with group lock held. generate_buddy look at
* them with group lock_held
*/
- if (test_opt(sb, DISCARD))
- ext4_issue_discard(sb, block_group, bit, count);
+ if (test_opt(sb, DISCARD)) {
+ err = ext4_issue_discard(sb, block_group, bit, count);
+ if (err && err != -EOPNOTSUPP)
+ ext4_msg(sb, KERN_WARNING, "discard request in"
+ " group:%d block:%d count:%lu failed"
+ " with %d", block_group, bit, count,
+ err);
+ }
+
+
ext4_lock_group(sb, block_group);
mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
mb_free_blocks(inode, &e4b, bit, count_clusters);
@@ -4851,10 +4870,11 @@ error_return:
* one will allocate those blocks, mark it as used in buddy bitmap. This must
* be called with under the group lock.
*/
-static void ext4_trim_extent(struct super_block *sb, int start, int count,
+static int ext4_trim_extent(struct super_block *sb, int start, int count,
ext4_group_t group, struct ext4_buddy *e4b)
{
struct ext4_free_extent ex;
+ int ret = 0;
trace_ext4_trim_extent(sb, group, start, count);
@@ -4870,9 +4890,10 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
*/
mb_mark_used(e4b, &ex);
ext4_unlock_group(sb, group);
- ext4_issue_discard(sb, group, start, count);
+ ret = ext4_issue_discard(sb, group, start, count);
ext4_lock_group(sb, group);
mb_free_blocks(NULL, e4b, start, ex.fe_len);
+ return ret;
}
/**
@@ -4901,7 +4922,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
void *bitmap;
ext4_grpblk_t next, count = 0, free_count = 0;
struct ext4_buddy e4b;
- int ret;
+ int ret = 0;
trace_ext4_trim_all_free(sb, group, start, max);
@@ -4928,8 +4949,11 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
next = mb_find_next_bit(bitmap, max + 1, start);
if ((next - start) >= minblocks) {
- ext4_trim_extent(sb, start,
- next - start, group, &e4b);
+ ret = ext4_trim_extent(sb, start,
+ next - start, group, &e4b);
+ if (ret && ret != -EOPNOTSUPP)
+ break;
+ ret = 0;
count += next - start;
}
free_count += next - start;
@@ -4950,8 +4974,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
break;
}
- if (!ret)
+ if (!ret) {
+ ret = count;
EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
+ }
out:
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
@@ -4959,7 +4985,7 @@ out:
ext4_debug("trimmed %d blocks in the group %d\n",
count, group);
- return count;
+ return ret;
}
/**
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index f1bb32ec016..db8226d595f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include "ext4_jbd2.h"
+#include "ext4_extents.h"
/*
* The contiguous blocks details which can be
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 292daeeed45..d9cc5ee42f5 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include "ext4_jbd2.h"
#include "ext4.h"
+#include "ext4_extents.h"
/**
* get_ext_path - Find an extent path for designated logical block number.
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6d600a69fc9..8990165346e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -202,13 +202,8 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode);
/* checksumming functions */
-#define EXT4_DIRENT_TAIL(block, blocksize) \
- ((struct ext4_dir_entry_tail *)(((void *)(block)) + \
- ((blocksize) - \
- sizeof(struct ext4_dir_entry_tail))))
-
-static void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
- unsigned int blocksize)
+void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+ unsigned int blocksize)
{
memset(t, 0, sizeof(struct ext4_dir_entry_tail));
t->det_rec_len = ext4_rec_len_to_disk(
@@ -261,6 +256,12 @@ static __le32 ext4_dirent_csum(struct inode *inode,
return cpu_to_le32(csum);
}
+static void warn_no_space_for_csum(struct inode *inode)
+{
+ ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for "
+ "checksum. Please run e2fsck -D.", inode->i_ino);
+}
+
int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
{
struct ext4_dir_entry_tail *t;
@@ -271,8 +272,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
t = get_dirent_tail(inode, dirent);
if (!t) {
- EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
- "leaf for checksum. Please run e2fsck -D.");
+ warn_no_space_for_csum(inode);
return 0;
}
@@ -294,8 +294,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
t = get_dirent_tail(inode, dirent);
if (!t) {
- EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
- "leaf for checksum. Please run e2fsck -D.");
+ warn_no_space_for_csum(inode);
return;
}
@@ -303,9 +302,9 @@ static void ext4_dirent_csum_set(struct inode *inode,
(void *)t - (void *)dirent);
}
-static inline int ext4_handle_dirty_dirent_node(handle_t *handle,
- struct inode *inode,
- struct buffer_head *bh)
+int ext4_handle_dirty_dirent_node(handle_t *handle,
+ struct inode *inode,
+ struct buffer_head *bh)
{
ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
return ext4_handle_dirty_metadata(handle, inode, bh);
@@ -377,8 +376,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
count = le16_to_cpu(c->count);
if (count_offset + (limit * sizeof(struct dx_entry)) >
EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
- EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
- "tree checksum found. Run e2fsck -D.");
+ warn_no_space_for_csum(inode);
return 1;
}
t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
@@ -408,8 +406,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
count = le16_to_cpu(c->count);
if (count_offset + (limit * sizeof(struct dx_entry)) >
EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
- EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
- "tree checksum. Run e2fsck -D.");
+ warn_no_space_for_csum(inode);
return;
}
t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
@@ -890,6 +887,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
EXT4_DIR_REC_LEN(0));
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
+ bh->b_data, bh->b_size,
(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ ((char *)de - bh->b_data))) {
/* On error, skip the f_pos to the next block. */
@@ -1007,6 +1005,15 @@ errout:
return (err);
}
+static inline int search_dirblock(struct buffer_head *bh,
+ struct inode *dir,
+ const struct qstr *d_name,
+ unsigned int offset,
+ struct ext4_dir_entry_2 **res_dir)
+{
+ return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
+ d_name, offset, res_dir);
+}
/*
* Directory block splitting, compacting
@@ -1081,13 +1088,6 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
dx_set_count(entries, count + 1);
}
-static void ext4_update_dx_flag(struct inode *inode)
-{
- if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
- EXT4_FEATURE_COMPAT_DIR_INDEX))
- ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
-}
-
/*
* NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
*
@@ -1107,11 +1107,13 @@ static inline int ext4_match (int len, const char * const name,
/*
* Returns 0 if not found, -1 on failure, and 1 on success
*/
-static inline int search_dirblock(struct buffer_head *bh,
- struct inode *dir,
- const struct qstr *d_name,
- unsigned int offset,
- struct ext4_dir_entry_2 ** res_dir)
+int search_dir(struct buffer_head *bh,
+ char *search_buf,
+ int buf_size,
+ struct inode *dir,
+ const struct qstr *d_name,
+ unsigned int offset,
+ struct ext4_dir_entry_2 **res_dir)
{
struct ext4_dir_entry_2 * de;
char * dlimit;
@@ -1119,8 +1121,8 @@ static inline int search_dirblock(struct buffer_head *bh,
const char *name = d_name->name;
int namelen = d_name->len;
- de = (struct ext4_dir_entry_2 *) bh->b_data;
- dlimit = bh->b_data + dir->i_sb->s_blocksize;
+ de = (struct ext4_dir_entry_2 *)search_buf;
+ dlimit = search_buf + buf_size;
while ((char *) de < dlimit) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
@@ -1128,7 +1130,8 @@ static inline int search_dirblock(struct buffer_head *bh,
if ((char *) de + namelen <= dlimit &&
ext4_match (namelen, name, de)) {
/* found a match - just to be sure, do a full check */
- if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
+ if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
+ bh->b_size, offset))
return -1;
*res_dir = de;
return 1;
@@ -1144,6 +1147,21 @@ static inline int search_dirblock(struct buffer_head *bh,
return 0;
}
+static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
+ struct ext4_dir_entry *de)
+{
+ struct super_block *sb = dir->i_sb;
+
+ if (!is_dx(dir))
+ return 0;
+ if (block == 0)
+ return 1;
+ if (de->inode == 0 &&
+ ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) ==
+ sb->s_blocksize)
+ return 1;
+ return 0;
+}
/*
* ext4_find_entry()
@@ -1158,7 +1176,8 @@ static inline int search_dirblock(struct buffer_head *bh,
*/
static struct buffer_head * ext4_find_entry (struct inode *dir,
const struct qstr *d_name,
- struct ext4_dir_entry_2 ** res_dir)
+ struct ext4_dir_entry_2 **res_dir,
+ int *inlined)
{
struct super_block *sb;
struct buffer_head *bh_use[NAMEI_RA_SIZE];
@@ -1179,6 +1198,18 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
namelen = d_name->len;
if (namelen > EXT4_NAME_LEN)
return NULL;
+
+ if (ext4_has_inline_data(dir)) {
+ int has_inline_data = 1;
+ ret = ext4_find_inline_entry(dir, d_name, res_dir,
+ &has_inline_data);
+ if (has_inline_data) {
+ if (inlined)
+ *inlined = 1;
+ return ret;
+ }
+ }
+
if ((namelen <= 2) && (name[0] == '.') &&
(name[1] == '.' || name[1] == '\0')) {
/*
@@ -1244,6 +1275,8 @@ restart:
goto next;
}
if (!buffer_verified(bh) &&
+ !is_dx_internal_node(dir, block,
+ (struct ext4_dir_entry *)bh->b_data) &&
!ext4_dirent_csum_verify(dir,
(struct ext4_dir_entry *)bh->b_data)) {
EXT4_ERROR_INODE(dir, "checksumming directory "
@@ -1361,7 +1394,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
if (dentry->d_name.len > EXT4_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- bh = ext4_find_entry(dir, &dentry->d_name, &de);
+ bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
inode = NULL;
if (bh) {
__u32 ino = le32_to_cpu(de->inode);
@@ -1395,7 +1428,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
struct ext4_dir_entry_2 * de;
struct buffer_head *bh;
- bh = ext4_find_entry(child->d_inode, &dotdot, &de);
+ bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
if (!bh)
return ERR_PTR(-ENOENT);
ino = le32_to_cpu(de->inode);
@@ -1593,6 +1626,63 @@ errout:
return NULL;
}
+int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+ struct buffer_head *bh,
+ void *buf, int buf_size,
+ const char *name, int namelen,
+ struct ext4_dir_entry_2 **dest_de)
+{
+ struct ext4_dir_entry_2 *de;
+ unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
+ int nlen, rlen;
+ unsigned int offset = 0;
+ char *top;
+
+ de = (struct ext4_dir_entry_2 *)buf;
+ top = buf + buf_size - reclen;
+ while ((char *) de <= top) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ buf, buf_size, offset))
+ return -EIO;
+ if (ext4_match(namelen, name, de))
+ return -EEXIST;
+ nlen = EXT4_DIR_REC_LEN(de->name_len);
+ rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+ if ((de->inode ? rlen - nlen : rlen) >= reclen)
+ break;
+ de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+ offset += rlen;
+ }
+ if ((char *) de > top)
+ return -ENOSPC;
+
+ *dest_de = de;
+ return 0;
+}
+
+void ext4_insert_dentry(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+ const char *name, int namelen)
+{
+
+ int nlen, rlen;
+
+ nlen = EXT4_DIR_REC_LEN(de->name_len);
+ rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+ if (de->inode) {
+ struct ext4_dir_entry_2 *de1 =
+ (struct ext4_dir_entry_2 *)((char *)de + nlen);
+ de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
+ de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
+ de = de1;
+ }
+ de->file_type = EXT4_FT_UNKNOWN;
+ de->inode = cpu_to_le32(inode->i_ino);
+ ext4_set_de_type(inode->i_sb, de, inode->i_mode);
+ de->name_len = namelen;
+ memcpy(de->name, name, namelen);
+}
/*
* Add a new entry into a directory (leaf) block. If de is non-NULL,
* it points to a directory entry which is guaranteed to be large
@@ -1608,12 +1698,10 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
- unsigned int offset = 0;
unsigned int blocksize = dir->i_sb->s_blocksize;
unsigned short reclen;
- int nlen, rlen, err;
- char *top;
int csum_size = 0;
+ int err;
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
@@ -1621,22 +1709,11 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
reclen = EXT4_DIR_REC_LEN(namelen);
if (!de) {
- de = (struct ext4_dir_entry_2 *)bh->b_data;
- top = bh->b_data + (blocksize - csum_size) - reclen;
- while ((char *) de <= top) {
- if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
- return -EIO;
- if (ext4_match(namelen, name, de))
- return -EEXIST;
- nlen = EXT4_DIR_REC_LEN(de->name_len);
- rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
- if ((de->inode? rlen - nlen: rlen) >= reclen)
- break;
- de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
- offset += rlen;
- }
- if ((char *) de > top)
- return -ENOSPC;
+ err = ext4_find_dest_de(dir, inode,
+ bh, bh->b_data, blocksize - csum_size,
+ name, namelen, &de);
+ if (err)
+ return err;
}
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
@@ -1646,19 +1723,8 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
}
/* By now the buffer is marked for journaling */
- nlen = EXT4_DIR_REC_LEN(de->name_len);
- rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
- if (de->inode) {
- struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
- de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, blocksize);
- de->rec_len = ext4_rec_len_to_disk(nlen, blocksize);
- de = de1;
- }
- de->file_type = EXT4_FT_UNKNOWN;
- de->inode = cpu_to_le32(inode->i_ino);
- ext4_set_de_type(dir->i_sb, de, inode->i_mode);
- de->name_len = namelen;
- memcpy(de->name, name, namelen);
+ ext4_insert_dentry(inode, de, blocksize, name, namelen);
+
/*
* XXX shouldn't update any times until successful
* completion of syscall, but too many callers depend
@@ -1831,6 +1897,17 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
blocksize = sb->s_blocksize;
if (!dentry->d_name.len)
return -EINVAL;
+
+ if (ext4_has_inline_data(dir)) {
+ retval = ext4_try_add_inline_entry(handle, dentry, inode);
+ if (retval < 0)
+ return retval;
+ if (retval == 1) {
+ retval = 0;
+ return retval;
+ }
+ }
+
if (is_dx(dir)) {
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
@@ -2036,36 +2113,29 @@ cleanup:
}
/*
- * ext4_delete_entry deletes a directory entry by merging it with the
- * previous entry
+ * ext4_generic_delete_entry deletes a directory entry by merging it
+ * with the previous entry
*/
-static int ext4_delete_entry(handle_t *handle,
- struct inode *dir,
- struct ext4_dir_entry_2 *de_del,
- struct buffer_head *bh)
+int ext4_generic_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de_del,
+ struct buffer_head *bh,
+ void *entry_buf,
+ int buf_size,
+ int csum_size)
{
struct ext4_dir_entry_2 *de, *pde;
unsigned int blocksize = dir->i_sb->s_blocksize;
- int csum_size = 0;
- int i, err;
-
- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
- csum_size = sizeof(struct ext4_dir_entry_tail);
+ int i;
i = 0;
pde = NULL;
- de = (struct ext4_dir_entry_2 *) bh->b_data;
- while (i < bh->b_size - csum_size) {
- if (ext4_check_dir_entry(dir, NULL, de, bh, i))
+ de = (struct ext4_dir_entry_2 *)entry_buf;
+ while (i < buf_size - csum_size) {
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ bh->b_data, bh->b_size, i))
return -EIO;
if (de == de_del) {
- BUFFER_TRACE(bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, bh);
- if (unlikely(err)) {
- ext4_std_error(dir->i_sb, err);
- return err;
- }
if (pde)
pde->rec_len = ext4_rec_len_to_disk(
ext4_rec_len_from_disk(pde->rec_len,
@@ -2076,12 +2146,6 @@ static int ext4_delete_entry(handle_t *handle,
else
de->inode = 0;
dir->i_version++;
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_dirent_node(handle, dir, bh);
- if (unlikely(err)) {
- ext4_std_error(dir->i_sb, err);
- return err;
- }
return 0;
}
i += ext4_rec_len_from_disk(de->rec_len, blocksize);
@@ -2091,6 +2155,48 @@ static int ext4_delete_entry(handle_t *handle,
return -ENOENT;
}
+static int ext4_delete_entry(handle_t *handle,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de_del,
+ struct buffer_head *bh)
+{
+ int err, csum_size = 0;
+
+ if (ext4_has_inline_data(dir)) {
+ int has_inline_data = 1;
+ err = ext4_delete_inline_entry(handle, dir, de_del, bh,
+ &has_inline_data);
+ if (has_inline_data)
+ return err;
+ }
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ BUFFER_TRACE(bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(err))
+ goto out;
+
+ err = ext4_generic_delete_entry(handle, dir, de_del,
+ bh, bh->b_data,
+ dir->i_sb->s_blocksize, csum_size);
+ if (err)
+ goto out;
+
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_dirent_node(handle, dir, bh);
+ if (unlikely(err))
+ goto out;
+
+ return 0;
+out:
+ if (err != -ENOENT)
+ ext4_std_error(dir->i_sb, err);
+ return err;
+}
+
/*
* DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
* since this indicates that nlinks count was previously 1.
@@ -2211,21 +2317,95 @@ retry:
return err;
}
-static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int blocksize, int csum_size,
+ unsigned int parent_ino, int dotdot_real_len)
+{
+ de->inode = cpu_to_le32(inode->i_ino);
+ de->name_len = 1;
+ de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
+ blocksize);
+ strcpy(de->name, ".");
+ ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+
+ de = ext4_next_entry(de, blocksize);
+ de->inode = cpu_to_le32(parent_ino);
+ de->name_len = 2;
+ if (!dotdot_real_len)
+ de->rec_len = ext4_rec_len_to_disk(blocksize -
+ (csum_size + EXT4_DIR_REC_LEN(1)),
+ blocksize);
+ else
+ de->rec_len = ext4_rec_len_to_disk(
+ EXT4_DIR_REC_LEN(de->name_len), blocksize);
+ strcpy(de->name, "..");
+ ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+
+ return ext4_next_entry(de, blocksize);
+}
+
+static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+ struct inode *inode)
{
- handle_t *handle;
- struct inode *inode;
struct buffer_head *dir_block = NULL;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
unsigned int blocksize = dir->i_sb->s_blocksize;
int csum_size = 0;
- int err, retries = 0;
+ int err;
if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
csum_size = sizeof(struct ext4_dir_entry_tail);
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+ err = ext4_try_create_inline_dir(handle, dir, inode);
+ if (err < 0 && err != -ENOSPC)
+ goto out;
+ if (!err)
+ goto out;
+ }
+
+ inode->i_size = EXT4_I(inode)->i_disksize = blocksize;
+ dir_block = ext4_bread(handle, inode, 0, 1, &err);
+ if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
+ if (!err) {
+ err = -EIO;
+ ext4_error(inode->i_sb,
+ "Directory hole detected on inode %lu\n",
+ inode->i_ino);
+ }
+ goto out;
+ }
+ BUFFER_TRACE(dir_block, "get_write_access");
+ err = ext4_journal_get_write_access(handle, dir_block);
+ if (err)
+ goto out;
+ de = (struct ext4_dir_entry_2 *)dir_block->b_data;
+ ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
+ set_nlink(inode, 2);
+ if (csum_size) {
+ t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
+ initialize_dirent_tail(t, blocksize);
+ }
+
+ BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
+ if (err)
+ goto out;
+ set_buffer_verified(dir_block);
+out:
+ brelse(dir_block);
+ return err;
+}
+
+static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ handle_t *handle;
+ struct inode *inode;
+ int err, retries = 0;
+
if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
@@ -2249,47 +2429,9 @@ retry:
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
- inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
- if (!err) {
- err = -EIO;
- ext4_error(inode->i_sb,
- "Directory hole detected on inode %lu\n",
- inode->i_ino);
- }
- goto out_clear_inode;
- }
- BUFFER_TRACE(dir_block, "get_write_access");
- err = ext4_journal_get_write_access(handle, dir_block);
- if (err)
- goto out_clear_inode;
- de = (struct ext4_dir_entry_2 *) dir_block->b_data;
- de->inode = cpu_to_le32(inode->i_ino);
- de->name_len = 1;
- de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
- blocksize);
- strcpy(de->name, ".");
- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
- de = ext4_next_entry(de, blocksize);
- de->inode = cpu_to_le32(dir->i_ino);
- de->rec_len = ext4_rec_len_to_disk(blocksize -
- (csum_size + EXT4_DIR_REC_LEN(1)),
- blocksize);
- de->name_len = 2;
- strcpy(de->name, "..");
- ext4_set_de_type(dir->i_sb, de, S_IFDIR);
- set_nlink(inode, 2);
-
- if (csum_size) {
- t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
- initialize_dirent_tail(t, blocksize);
- }
-
- BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
+ err = ext4_init_new_dir(handle, dir, inode);
if (err)
goto out_clear_inode;
- set_buffer_verified(dir_block);
err = ext4_mark_inode_dirty(handle, inode);
if (!err)
err = ext4_add_entry(handle, dentry, inode);
@@ -2309,7 +2451,6 @@ out_clear_inode:
unlock_new_inode(inode);
d_instantiate(dentry, inode);
out_stop:
- brelse(dir_block);
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
@@ -2327,6 +2468,14 @@ static int empty_dir(struct inode *inode)
struct super_block *sb;
int err = 0;
+ if (ext4_has_inline_data(inode)) {
+ int has_inline_data = 1;
+
+ err = empty_inline_dir(inode, &has_inline_data);
+ if (has_inline_data)
+ return err;
+ }
+
sb = inode->i_sb;
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
@@ -2393,7 +2542,8 @@ static int empty_dir(struct inode *inode)
set_buffer_verified(bh);
de = (struct ext4_dir_entry_2 *) bh->b_data;
}
- if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
+ if (ext4_check_dir_entry(inode, NULL, de, bh,
+ bh->b_data, bh->b_size, offset)) {
de = (struct ext4_dir_entry_2 *)(bh->b_data +
sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
@@ -2498,7 +2648,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
struct ext4_iloc iloc;
int err = 0;
- if (!EXT4_SB(inode->i_sb)->s_journal)
+ if ((!EXT4_SB(inode->i_sb)->s_journal) &&
+ !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
return 0;
mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
@@ -2579,7 +2730,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
return PTR_ERR(handle);
retval = -ENOENT;
- bh = ext4_find_entry(dir, &dentry->d_name, &de);
+ bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (!bh)
goto end_rmdir;
@@ -2644,7 +2795,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
ext4_handle_sync(handle);
retval = -ENOENT;
- bh = ext4_find_entry(dir, &dentry->d_name, &de);
+ bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (!bh)
goto end_unlink;
@@ -2826,8 +2977,39 @@ retry:
return err;
}
-#define PARENT_INO(buffer, size) \
- (ext4_next_entry((struct ext4_dir_entry_2 *)(buffer), size)->inode)
+
+/*
+ * Try to find buffer head where contains the parent block.
+ * It should be the inode block if it is inlined or the 1st block
+ * if it is a normal dir.
+ */
+static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
+ struct inode *inode,
+ int *retval,
+ struct ext4_dir_entry_2 **parent_de,
+ int *inlined)
+{
+ struct buffer_head *bh;
+
+ if (!ext4_has_inline_data(inode)) {
+ if (!(bh = ext4_bread(handle, inode, 0, 0, retval))) {
+ if (!*retval) {
+ *retval = -EIO;
+ ext4_error(inode->i_sb,
+ "Directory hole detected on inode %lu\n",
+ inode->i_ino);
+ }
+ return NULL;
+ }
+ *parent_de = ext4_next_entry(
+ (struct ext4_dir_entry_2 *)bh->b_data,
+ inode->i_sb->s_blocksize);
+ return bh;
+ }
+
+ *inlined = 1;
+ return ext4_get_first_inline_block(inode, parent_de, retval);
+}
/*
* Anybody can rename anything with this: the permission checks are left to the
@@ -2841,6 +3023,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
struct buffer_head *old_bh, *new_bh, *dir_bh;
struct ext4_dir_entry_2 *old_de, *new_de;
int retval, force_da_alloc = 0;
+ int inlined = 0, new_inlined = 0;
+ struct ext4_dir_entry_2 *parent_de;
dquot_initialize(old_dir);
dquot_initialize(new_dir);
@@ -2860,7 +3044,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
ext4_handle_sync(handle);
- old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
+ old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
/*
* Check for inode number is _not_ due to possible IO errors.
* We might rmdir the source, keep it as pwd of some process
@@ -2873,7 +3057,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
goto end_rename;
new_inode = new_dentry->d_inode;
- new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
+ new_bh = ext4_find_entry(new_dir, &new_dentry->d_name,
+ &new_de, &new_inlined);
if (new_bh) {
if (!new_inode) {
brelse(new_bh);
@@ -2887,22 +3072,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
goto end_rename;
}
retval = -EIO;
- if (!(dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval))) {
- if (!retval) {
- retval = -EIO;
- ext4_error(old_inode->i_sb,
- "Directory hole detected on inode %lu\n",
- old_inode->i_ino);
- }
+ dir_bh = ext4_get_first_dir_block(handle, old_inode,
+ &retval, &parent_de,
+ &inlined);
+ if (!dir_bh)
goto end_rename;
- }
- if (!buffer_verified(dir_bh) &&
+ if (!inlined && !buffer_verified(dir_bh) &&
!ext4_dirent_csum_verify(old_inode,
(struct ext4_dir_entry *)dir_bh->b_data))
goto end_rename;
set_buffer_verified(dir_bh);
- if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
- old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
+ if (le32_to_cpu(parent_de->inode) != old_dir->i_ino)
goto end_rename;
retval = -EMLINK;
if (!new_inode && new_dir != old_dir &&
@@ -2931,10 +3111,13 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_current_time(new_dir);
ext4_mark_inode_dirty(handle, new_dir);
BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
- retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh);
- if (unlikely(retval)) {
- ext4_std_error(new_dir->i_sb, retval);
- goto end_rename;
+ if (!new_inlined) {
+ retval = ext4_handle_dirty_dirent_node(handle,
+ new_dir, new_bh);
+ if (unlikely(retval)) {
+ ext4_std_error(new_dir->i_sb, retval);
+ goto end_rename;
+ }
}
brelse(new_bh);
new_bh = NULL;
@@ -2962,7 +3145,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
struct buffer_head *old_bh2;
struct ext4_dir_entry_2 *old_de2;
- old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
+ old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
+ &old_de2, NULL);
if (old_bh2) {
retval = ext4_delete_entry(handle, old_dir,
old_de2, old_bh2);
@@ -2982,17 +3166,19 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
ext4_update_dx_flag(old_dir);
if (dir_bh) {
- PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
- cpu_to_le32(new_dir->i_ino);
+ parent_de->inode = cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
- if (is_dx(old_inode)) {
- retval = ext4_handle_dirty_dx_node(handle,
- old_inode,
- dir_bh);
+ if (!inlined) {
+ if (is_dx(old_inode)) {
+ retval = ext4_handle_dirty_dx_node(handle,
+ old_inode,
+ dir_bh);
+ } else {
+ retval = ext4_handle_dirty_dirent_node(handle,
+ old_inode, dir_bh);
+ }
} else {
- retval = ext4_handle_dirty_dirent_node(handle,
- old_inode,
- dir_bh);
+ retval = ext4_mark_inode_dirty(handle, old_inode);
}
if (retval) {
ext4_std_error(old_dir->i_sb, retval);
@@ -3043,23 +3229,19 @@ const struct inode_operations ext4_dir_inode_operations = {
.mknod = ext4_mknod,
.rename = ext4_rename,
.setattr = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
-#endif
.get_acl = ext4_get_acl,
.fiemap = ext4_fiemap,
};
const struct inode_operations ext4_special_inode_operations = {
.setattr = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
-#endif
.get_acl = ext4_get_acl,
};
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 68e896e12a6..0016fbca2a4 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -27,7 +27,6 @@
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
-#include "ext4_extents.h"
static struct kmem_cache *io_page_cachep, *io_end_cachep;
@@ -111,7 +110,7 @@ static int ext4_end_io(ext4_io_end_t *io)
inode_dio_done(inode);
/* Wake up anyone waiting on unwritten extent conversion */
if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
- wake_up_all(ext4_ioend_wq(io->inode));
+ wake_up_all(ext4_ioend_wq(inode));
return ret;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 47bf06a2765..d99387b89ed 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -783,7 +783,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_journal_get_write_access(handle, gdb_bh);
if (unlikely(err))
- goto exit_sbh;
+ goto exit_dind;
err = ext4_journal_get_write_access(handle, dind);
if (unlikely(err))
@@ -792,7 +792,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
/* ext4_reserve_inode_write() gets a reference on the iloc */
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (unlikely(err))
- goto exit_dindj;
+ goto exit_dind;
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
sizeof(struct buffer_head *),
@@ -846,12 +846,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
exit_inode:
ext4_kvfree(n_group_desc);
- /* ext4_handle_release_buffer(handle, iloc.bh); */
brelse(iloc.bh);
-exit_dindj:
- /* ext4_handle_release_buffer(handle, dind); */
-exit_sbh:
- /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
exit_dind:
brelse(dind);
exit_bh:
@@ -969,14 +964,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
}
for (i = 0; i < reserved_gdb; i++) {
- if ((err = ext4_journal_get_write_access(handle, primary[i]))) {
- /*
- int j;
- for (j = 0; j < i; j++)
- ext4_handle_release_buffer(handle, primary[j]);
- */
+ if ((err = ext4_journal_get_write_access(handle, primary[i])))
goto exit_bh;
- }
}
if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 80928f71685..3d4fb81bacd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -45,7 +45,7 @@
#include <linux/freezer.h>
#include "ext4.h"
-#include "ext4_extents.h"
+#include "ext4_extents.h" /* Needed for trace points definition */
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
@@ -939,10 +939,11 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
return NULL;
ei->vfs_inode.i_version = 1;
- ei->vfs_inode.i_data.writeback_index = 0;
memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
INIT_LIST_HEAD(&ei->i_prealloc_list);
spin_lock_init(&ei->i_prealloc_lock);
+ ext4_es_init_tree(&ei->i_es_tree);
+ rwlock_init(&ei->i_es_lock);
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
@@ -996,9 +997,7 @@ static void init_once(void *foo)
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
INIT_LIST_HEAD(&ei->i_orphan);
-#ifdef CONFIG_EXT4_FS_XATTR
init_rwsem(&ei->xattr_sem);
-#endif
init_rwsem(&ei->i_data_sem);
inode_init_once(&ei->vfs_inode);
}
@@ -1031,6 +1030,7 @@ void ext4_clear_inode(struct inode *inode)
clear_inode(inode);
dquot_drop(inode);
ext4_discard_preallocations(inode);
+ ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode);
@@ -1447,13 +1447,8 @@ static const struct mount_opts {
{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_DATAJ},
{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_DATAJ},
{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, MOPT_DATAJ},
-#ifdef CONFIG_EXT4_FS_XATTR
{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
-#else
- {Opt_user_xattr, 0, MOPT_NOSUPPORT},
- {Opt_nouser_xattr, 0, MOPT_NOSUPPORT},
-#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
@@ -1650,9 +1645,7 @@ static int parse_options(char *options, struct super_block *sb,
unsigned int *journal_ioprio,
int is_remount)
{
-#ifdef CONFIG_QUOTA
struct ext4_sb_info *sbi = EXT4_SB(sb);
-#endif
char *p;
substring_t args[MAX_OPT_ARGS];
int token;
@@ -1701,6 +1694,16 @@ static int parse_options(char *options, struct super_block *sb,
}
}
#endif
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ int blocksize =
+ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
+
+ if (blocksize < PAGE_CACHE_SIZE) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "dioread_nolock if block size != PAGE_SIZE");
+ return 0;
+ }
+ }
return 1;
}
@@ -2217,7 +2220,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
__func__, inode->i_ino, inode->i_size);
jbd_debug(2, "truncating inode %lu to %lld bytes\n",
inode->i_ino, inode->i_size);
+ mutex_lock(&inode->i_mutex);
ext4_truncate(inode);
+ mutex_unlock(&inode->i_mutex);
nr_truncates++;
} else {
ext4_msg(sb, KERN_DEBUG,
@@ -3202,7 +3207,6 @@ int ext4_calculate_overhead(struct super_block *sb)
ext4_fsblk_t overhead = 0;
char *buf = (char *) get_zeroed_page(GFP_KERNEL);
- memset(buf, 0, PAGE_SIZE);
if (!buf)
return -ENOMEM;
@@ -3229,6 +3233,10 @@ int ext4_calculate_overhead(struct super_block *sb)
memset(buf, 0, PAGE_SIZE);
cond_resched();
}
+ /* Add the journal blocks as well */
+ if (sbi->s_journal)
+ overhead += EXT4_B2C(sbi, sbi->s_journal->j_maxlen);
+
sbi->s_overhead = overhead;
smp_wmb();
free_page((unsigned long) buf);
@@ -3256,7 +3264,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
unsigned int i;
int needs_recovery, has_huge_files, has_bigalloc;
__u64 blocks_count;
- int err;
+ int err = 0;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
@@ -3272,9 +3280,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
sb->s_fs_info = sbi;
sbi->s_sb = sb;
- sbi->s_mount_opt = 0;
- sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID);
- sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID);
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sb_block = sb_block;
if (sb->s_bdev->bd_part)
@@ -3285,6 +3290,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
*cp = '!';
+ /* -EINVAL is default */
ret = -EINVAL;
blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
if (!blocksize) {
@@ -3369,9 +3375,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sb, NO_UID32);
/* xattr user namespace & acls are now defaulted on */
-#ifdef CONFIG_EXT4_FS_XATTR
set_opt(sb, XATTR_USER);
-#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
#endif
@@ -3446,15 +3450,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
clear_opt(sb, DELALLOC);
}
- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- if (blocksize < PAGE_SIZE) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "dioread_nolock if block size != PAGE_SIZE");
- goto failed_mount;
- }
- }
-
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
@@ -3496,6 +3491,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
goto failed_mount;
+ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
@@ -3662,7 +3658,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
" too large to mount safely on this system");
if (sizeof(sector_t) < 8)
ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
- ret = err;
goto failed_mount;
}
@@ -3770,7 +3765,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
- ret = err;
goto failed_mount3;
}
@@ -3801,7 +3795,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
- sbi->s_resize_flags = 0;
sb->s_root = NULL;
@@ -3897,8 +3890,8 @@ no_journal:
if (es->s_overhead_clusters)
sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
else {
- ret = ext4_calculate_overhead(sb);
- if (ret)
+ err = ext4_calculate_overhead(sb);
+ if (err)
goto failed_mount_wq;
}
@@ -3910,6 +3903,7 @@ no_journal:
alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
+ ret = -ENOMEM;
goto failed_mount_wq;
}
@@ -4012,12 +4006,20 @@ no_journal:
/* Enable quota usage during mount. */
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
!(sb->s_flags & MS_RDONLY)) {
- ret = ext4_enable_quotas(sb);
- if (ret)
+ err = ext4_enable_quotas(sb);
+ if (err)
goto failed_mount7;
}
#endif /* CONFIG_QUOTA */
+ if (test_opt(sb, DISCARD)) {
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
+ if (!blk_queue_discard(q))
+ ext4_msg(sb, KERN_WARNING,
+ "mounting with \"discard\" option, but "
+ "the device does not support discard");
+ }
+
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
"Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
@@ -4084,7 +4086,7 @@ out_fail:
kfree(sbi);
out_free_orig:
kfree(orig_data);
- return ret;
+ return err ? err : ret;
}
/*
@@ -4729,7 +4731,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
ext4_setup_system_zone(sb);
- if (sbi->s_journal == NULL)
+ if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
ext4_commit_super(sb, 1);
#ifdef CONFIG_QUOTA
@@ -4790,7 +4792,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = EXT4_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
+ buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
/* prevent underflow in case that few free space is available */
@@ -5282,6 +5284,7 @@ static int __init ext4_init_fs(void)
ext4_li_info = NULL;
mutex_init(&ext4_li_mtx);
+ /* Build-time check for flags consistency */
ext4_check_flag_values();
for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
@@ -5289,9 +5292,14 @@ static int __init ext4_init_fs(void)
init_waitqueue_head(&ext4__ioend_wq[i]);
}
- err = ext4_init_pageio();
+ err = ext4_init_es();
if (err)
return err;
+
+ err = ext4_init_pageio();
+ if (err)
+ goto out7;
+
err = ext4_init_system_zone();
if (err)
goto out6;
@@ -5341,6 +5349,9 @@ out5:
ext4_exit_system_zone();
out6:
ext4_exit_pageio();
+out7:
+ ext4_exit_es();
+
return err;
}
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index ed9354aff27..ff371193201 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -35,22 +35,18 @@ const struct inode_operations ext4_symlink_inode_operations = {
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.setattr = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
-#endif
};
const struct inode_operations ext4_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext4_follow_link,
.setattr = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
-#endif
};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 2cdb98d6298..3a91ebc2b66 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -61,11 +61,6 @@
#include "xattr.h"
#include "acl.h"
-#define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
-#define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
-#define BFIRST(bh) ENTRY(BHDR(bh)+1)
-#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
-
#ifdef EXT4_XATTR_DEBUG
# define ea_idebug(inode, f...) do { \
printk(KERN_DEBUG "inode %s:%lu: ", \
@@ -312,7 +307,7 @@ cleanup:
return error;
}
-static int
+int
ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
void *buffer, size_t buffer_size)
{
@@ -581,21 +576,6 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
return (*min_offs - ((void *)last - base) - sizeof(__u32));
}
-struct ext4_xattr_info {
- int name_index;
- const char *name;
- const void *value;
- size_t value_len;
-};
-
-struct ext4_xattr_search {
- struct ext4_xattr_entry *first;
- void *base;
- void *end;
- struct ext4_xattr_entry *here;
- int not_found;
-};
-
static int
ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
{
@@ -648,9 +628,14 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
size. Just replace. */
s->here->e_value_size =
cpu_to_le32(i->value_len);
- memset(val + size - EXT4_XATTR_PAD, 0,
- EXT4_XATTR_PAD); /* Clear pad bytes. */
- memcpy(val, i->value, i->value_len);
+ if (i->value == EXT4_ZERO_XATTR_VALUE) {
+ memset(val, 0, size);
+ } else {
+ /* Clear pad bytes first. */
+ memset(val + size - EXT4_XATTR_PAD, 0,
+ EXT4_XATTR_PAD);
+ memcpy(val, i->value, i->value_len);
+ }
return 0;
}
@@ -689,9 +674,14 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
size_t size = EXT4_XATTR_SIZE(i->value_len);
void *val = s->base + min_offs - size;
s->here->e_value_offs = cpu_to_le16(min_offs - size);
- memset(val + size - EXT4_XATTR_PAD, 0,
- EXT4_XATTR_PAD); /* Clear the pad bytes. */
- memcpy(val, i->value, i->value_len);
+ if (i->value == EXT4_ZERO_XATTR_VALUE) {
+ memset(val, 0, size);
+ } else {
+ /* Clear the pad bytes first. */
+ memset(val + size - EXT4_XATTR_PAD, 0,
+ EXT4_XATTR_PAD);
+ memcpy(val, i->value, i->value_len);
+ }
}
}
return 0;
@@ -794,7 +784,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
int offset = (char *)s->here - bs->bh->b_data;
unlock_buffer(bs->bh);
- ext4_handle_release_buffer(handle, bs->bh);
if (ce) {
mb_cache_entry_release(ce);
ce = NULL;
@@ -950,14 +939,8 @@ bad_block:
#undef header
}
-struct ext4_xattr_ibody_find {
- struct ext4_xattr_search s;
- struct ext4_iloc iloc;
-};
-
-static int
-ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
- struct ext4_xattr_ibody_find *is)
+int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ struct ext4_xattr_ibody_find *is)
{
struct ext4_xattr_ibody_header *header;
struct ext4_inode *raw_inode;
@@ -985,10 +968,47 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
return 0;
}
-static int
-ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
- struct ext4_xattr_info *i,
- struct ext4_xattr_ibody_find *is)
+int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
+ struct ext4_xattr_info *i,
+ struct ext4_xattr_ibody_find *is)
+{
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_search *s = &is->s;
+ int error;
+
+ if (EXT4_I(inode)->i_extra_isize == 0)
+ return -ENOSPC;
+ error = ext4_xattr_set_entry(i, s);
+ if (error) {
+ if (error == -ENOSPC &&
+ ext4_has_inline_data(inode)) {
+ error = ext4_try_to_evict_inline_data(handle, inode,
+ EXT4_XATTR_LEN(strlen(i->name) +
+ EXT4_XATTR_SIZE(i->value_len)));
+ if (error)
+ return error;
+ error = ext4_xattr_ibody_find(inode, i, is);
+ if (error)
+ return error;
+ error = ext4_xattr_set_entry(i, s);
+ }
+ if (error)
+ return error;
+ }
+ header = IHDR(inode, ext4_raw_inode(&is->iloc));
+ if (!IS_LAST_ENTRY(s->first)) {
+ header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+ ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+ } else {
+ header->h_magic = cpu_to_le32(0);
+ ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
+ }
+ return 0;
+}
+
+static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ struct ext4_xattr_info *i,
+ struct ext4_xattr_ibody_find *is)
{
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_search *s = &is->s;
@@ -1144,9 +1164,17 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
{
handle_t *handle;
int error, retries = 0;
+ int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
retry:
- handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+ /*
+ * In case of inline data, we may push out the data to a block,
+ * So reserve the journal space first.
+ */
+ if (ext4_has_inline_data(inode))
+ credits += ext4_writepage_trans_blocks(inode) + 1;
+
+ handle = ext4_journal_start(inode, credits);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
} else {
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 91f31ca7d9a..69eda787a96 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -21,6 +21,7 @@
#define EXT4_XATTR_INDEX_TRUSTED 4
#define EXT4_XATTR_INDEX_LUSTRE 5
#define EXT4_XATTR_INDEX_SECURITY 6
+#define EXT4_XATTR_INDEX_SYSTEM 7
struct ext4_xattr_header {
__le32 h_magic; /* magic number for identification */
@@ -65,7 +66,32 @@ struct ext4_xattr_entry {
EXT4_I(inode)->i_extra_isize))
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
-# ifdef CONFIG_EXT4_FS_XATTR
+#define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
+#define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
+#define BFIRST(bh) ENTRY(BHDR(bh)+1)
+#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
+
+#define EXT4_ZERO_XATTR_VALUE ((void *)-1)
+
+struct ext4_xattr_info {
+ int name_index;
+ const char *name;
+ const void *value;
+ size_t value_len;
+};
+
+struct ext4_xattr_search {
+ struct ext4_xattr_entry *first;
+ void *base;
+ void *end;
+ struct ext4_xattr_entry *here;
+ int not_found;
+};
+
+struct ext4_xattr_ibody_find {
+ struct ext4_xattr_search s;
+ struct ext4_iloc iloc;
+};
extern const struct xattr_handler ext4_xattr_user_handler;
extern const struct xattr_handler ext4_xattr_trusted_handler;
@@ -90,60 +116,82 @@ extern void ext4_exit_xattr(void);
extern const struct xattr_handler *ext4_xattr_handlers[];
-# else /* CONFIG_EXT4_FS_XATTR */
-
-static inline int
-ext4_xattr_get(struct inode *inode, int name_index, const char *name,
- void *buffer, size_t size, int flags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ext4_xattr_set(struct inode *inode, int name_index, const char *name,
- const void *value, size_t size, int flags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
- const char *name, const void *value, size_t size, int flags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void
-ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-}
-
-static inline void
-ext4_xattr_put_super(struct super_block *sb)
-{
-}
-
-static __init inline int
-ext4_init_xattr(void)
-{
- return 0;
-}
-
-static inline void
-ext4_exit_xattr(void)
-{
-}
-
-static inline int
-ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
- struct ext4_inode *raw_inode, handle_t *handle)
-{
- return -EOPNOTSUPP;
-}
-
-#define ext4_xattr_handlers NULL
-
-# endif /* CONFIG_EXT4_FS_XATTR */
+extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ struct ext4_xattr_ibody_find *is);
+extern int ext4_xattr_ibody_get(struct inode *inode, int name_index,
+ const char *name,
+ void *buffer, size_t buffer_size);
+extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
+ struct ext4_xattr_info *i,
+ struct ext4_xattr_ibody_find *is);
+
+extern int ext4_has_inline_data(struct inode *inode);
+extern int ext4_get_inline_size(struct inode *inode);
+extern int ext4_get_max_inline_size(struct inode *inode);
+extern int ext4_find_inline_data_nolock(struct inode *inode);
+extern void ext4_write_inline_data(struct inode *inode,
+ struct ext4_iloc *iloc,
+ void *buffer, loff_t pos,
+ unsigned int len);
+extern int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+ unsigned int len);
+extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
+ unsigned int len);
+extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
+
+extern int ext4_readpage_inline(struct inode *inode, struct page *page);
+extern int ext4_try_to_write_inline_data(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned flags,
+ struct page **pagep);
+extern int ext4_write_inline_data_end(struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned copied,
+ struct page *page);
+extern struct buffer_head *
+ext4_journalled_write_inline_data(struct inode *inode,
+ unsigned len,
+ struct page *page);
+extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
+ struct inode *inode,
+ loff_t pos, unsigned len,
+ unsigned flags,
+ struct page **pagep,
+ void **fsdata);
+extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
+ unsigned len, unsigned copied,
+ struct page *page);
+extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
+ struct inode *inode);
+extern int ext4_try_create_inline_dir(handle_t *handle,
+ struct inode *parent,
+ struct inode *inode);
+extern int ext4_read_inline_dir(struct file *filp,
+ void *dirent, filldir_t filldir,
+ int *has_inline_data);
+extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+ const struct qstr *d_name,
+ struct ext4_dir_entry_2 **res_dir,
+ int *has_inline_data);
+extern int ext4_delete_inline_entry(handle_t *handle,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de_del,
+ struct buffer_head *bh,
+ int *has_inline_data);
+extern int empty_inline_dir(struct inode *dir, int *has_inline_data);
+extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
+ struct ext4_dir_entry_2 **parent_de,
+ int *retval);
+extern int ext4_inline_data_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ int *has_inline);
+extern int ext4_try_to_evict_inline_data(handle_t *handle,
+ struct inode *inode,
+ int needed);
+extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
+
+extern int ext4_convert_inline_data(struct inode *inode);
#ifdef CONFIG_EXT4_FS_SECURITY
extern int ext4_init_security(handle_t *handle, struct inode *inode,
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
new file mode 100644
index 00000000000..fd27e7e6326
--- /dev/null
+++ b/fs/f2fs/Kconfig
@@ -0,0 +1,53 @@
+config F2FS_FS
+ tristate "F2FS filesystem support (EXPERIMENTAL)"
+ depends on BLOCK
+ help
+ F2FS is based on Log-structured File System (LFS), which supports
+ versatile "flash-friendly" features. The design has been focused on
+ addressing the fundamental issues in LFS, which are snowball effect
+ of wandering tree and high cleaning overhead.
+
+ Since flash-based storages show different characteristics according to
+ the internal geometry or flash memory management schemes aka FTL, F2FS
+ and tools support various parameters not only for configuring on-disk
+ layout, but also for selecting allocation and cleaning algorithms.
+
+ If unsure, say N.
+
+config F2FS_STAT_FS
+ bool "F2FS Status Information"
+ depends on F2FS_FS && DEBUG_FS
+ default y
+ help
+ /sys/kernel/debug/f2fs/ contains information about all the partitions
+ mounted as f2fs. Each file shows the whole f2fs information.
+
+ /sys/kernel/debug/f2fs/status includes:
+ - major file system information managed by f2fs currently
+ - average SIT information about whole segments
+ - current memory footprint consumed by f2fs.
+
+config F2FS_FS_XATTR
+ bool "F2FS extended attributes"
+ depends on F2FS_FS
+ default y
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ If unsure, say N.
+
+config F2FS_FS_POSIX_ACL
+ bool "F2FS Access Control Lists"
+ depends on F2FS_FS_XATTR
+ select FS_POSIX_ACL
+ default y
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ gourps beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
new file mode 100644
index 00000000000..27a0820340b
--- /dev/null
+++ b/fs/f2fs/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_F2FS_FS) += f2fs.o
+
+f2fs-y := dir.o file.o inode.o namei.o hash.o super.o
+f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o
+f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
+f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
+f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
new file mode 100644
index 00000000000..e95b94945d5
--- /dev/null
+++ b/fs/f2fs/acl.c
@@ -0,0 +1,413 @@
+/*
+ * fs/f2fs/acl.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Portions of this code from linux/fs/ext2/acl.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/f2fs_fs.h>
+#include "f2fs.h"
+#include "xattr.h"
+#include "acl.h"
+
+#define get_inode_mode(i) ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
+ (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
+
+static inline size_t f2fs_acl_size(int count)
+{
+ if (count <= 4) {
+ return sizeof(struct f2fs_acl_header) +
+ count * sizeof(struct f2fs_acl_entry_short);
+ } else {
+ return sizeof(struct f2fs_acl_header) +
+ 4 * sizeof(struct f2fs_acl_entry_short) +
+ (count - 4) * sizeof(struct f2fs_acl_entry);
+ }
+}
+
+static inline int f2fs_acl_count(size_t size)
+{
+ ssize_t s;
+ size -= sizeof(struct f2fs_acl_header);
+ s = size - 4 * sizeof(struct f2fs_acl_entry_short);
+ if (s < 0) {
+ if (size % sizeof(struct f2fs_acl_entry_short))
+ return -1;
+ return size / sizeof(struct f2fs_acl_entry_short);
+ } else {
+ if (s % sizeof(struct f2fs_acl_entry))
+ return -1;
+ return s / sizeof(struct f2fs_acl_entry) + 4;
+ }
+}
+
+static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
+{
+ int i, count;
+ struct posix_acl *acl;
+ struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
+ struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
+ const char *end = value + size;
+
+ if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION))
+ return ERR_PTR(-EINVAL);
+
+ count = f2fs_acl_count(size);
+ if (count < 0)
+ return ERR_PTR(-EINVAL);
+ if (count == 0)
+ return NULL;
+
+ acl = posix_acl_alloc(count, GFP_KERNEL);
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < count; i++) {
+
+ if ((char *)entry > end)
+ goto fail;
+
+ acl->a_entries[i].e_tag = le16_to_cpu(entry->e_tag);
+ acl->a_entries[i].e_perm = le16_to_cpu(entry->e_perm);
+
+ switch (acl->a_entries[i].e_tag) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ entry = (struct f2fs_acl_entry *)((char *)entry +
+ sizeof(struct f2fs_acl_entry_short));
+ break;
+
+ case ACL_USER:
+ acl->a_entries[i].e_uid =
+ make_kuid(&init_user_ns,
+ le32_to_cpu(entry->e_id));
+ entry = (struct f2fs_acl_entry *)((char *)entry +
+ sizeof(struct f2fs_acl_entry));
+ break;
+ case ACL_GROUP:
+ acl->a_entries[i].e_gid =
+ make_kgid(&init_user_ns,
+ le32_to_cpu(entry->e_id));
+ entry = (struct f2fs_acl_entry *)((char *)entry +
+ sizeof(struct f2fs_acl_entry));
+ break;
+ default:
+ goto fail;
+ }
+ }
+ if ((char *)entry != end)
+ goto fail;
+ return acl;
+fail:
+ posix_acl_release(acl);
+ return ERR_PTR(-EINVAL);
+}
+
+static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
+{
+ struct f2fs_acl_header *f2fs_acl;
+ struct f2fs_acl_entry *entry;
+ int i;
+
+ f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
+ sizeof(struct f2fs_acl_entry), GFP_KERNEL);
+ if (!f2fs_acl)
+ return ERR_PTR(-ENOMEM);
+
+ f2fs_acl->a_version = cpu_to_le32(F2FS_ACL_VERSION);
+ entry = (struct f2fs_acl_entry *)(f2fs_acl + 1);
+
+ for (i = 0; i < acl->a_count; i++) {
+
+ entry->e_tag = cpu_to_le16(acl->a_entries[i].e_tag);
+ entry->e_perm = cpu_to_le16(acl->a_entries[i].e_perm);
+
+ switch (acl->a_entries[i].e_tag) {
+ case ACL_USER:
+ entry->e_id = cpu_to_le32(
+ from_kuid(&init_user_ns,
+ acl->a_entries[i].e_uid));
+ entry = (struct f2fs_acl_entry *)((char *)entry +
+ sizeof(struct f2fs_acl_entry));
+ break;
+ case ACL_GROUP:
+ entry->e_id = cpu_to_le32(
+ from_kgid(&init_user_ns,
+ acl->a_entries[i].e_gid));
+ entry = (struct f2fs_acl_entry *)((char *)entry +
+ sizeof(struct f2fs_acl_entry));
+ break;
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ entry = (struct f2fs_acl_entry *)((char *)entry +
+ sizeof(struct f2fs_acl_entry_short));
+ break;
+ default:
+ goto fail;
+ }
+ }
+ *size = f2fs_acl_size(acl->a_count);
+ return (void *)f2fs_acl;
+
+fail:
+ kfree(f2fs_acl);
+ return ERR_PTR(-EINVAL);
+}
+
+struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ int name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ void *value = NULL;
+ struct posix_acl *acl;
+ int retval;
+
+ if (!test_opt(sbi, POSIX_ACL))
+ return NULL;
+
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
+
+ if (type == ACL_TYPE_ACCESS)
+ name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
+
+ retval = f2fs_getxattr(inode, name_index, "", NULL, 0);
+ if (retval > 0) {
+ value = kmalloc(retval, GFP_KERNEL);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ retval = f2fs_getxattr(inode, name_index, "", value, retval);
+ }
+
+ if (retval < 0) {
+ if (retval == -ENODATA)
+ acl = NULL;
+ else
+ acl = ERR_PTR(retval);
+ } else {
+ acl = f2fs_acl_from_disk(value, retval);
+ }
+ kfree(value);
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
+
+ return acl;
+}
+
+static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ int name_index;
+ void *value = NULL;
+ size_t size = 0;
+ int error;
+
+ if (!test_opt(sbi, POSIX_ACL))
+ return 0;
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
+ if (acl) {
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
+ if (error < 0)
+ return error;
+ set_acl_inode(fi, inode->i_mode);
+ if (error == 0)
+ acl = NULL;
+ }
+ break;
+
+ case ACL_TYPE_DEFAULT:
+ name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ if (!S_ISDIR(inode->i_mode))
+ return acl ? -EACCES : 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (acl) {
+ value = f2fs_acl_to_disk(acl, &size);
+ if (IS_ERR(value)) {
+ cond_clear_inode_flag(fi, FI_ACL_MODE);
+ return (int)PTR_ERR(value);
+ }
+ }
+
+ error = f2fs_setxattr(inode, name_index, "", value, size);
+
+ kfree(value);
+ if (!error)
+ set_cached_acl(inode, type, acl);
+
+ cond_clear_inode_flag(fi, FI_ACL_MODE);
+ return error;
+}
+
+int f2fs_init_acl(struct inode *inode, struct inode *dir)
+{
+ struct posix_acl *acl = NULL;
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ int error = 0;
+
+ if (!S_ISLNK(inode->i_mode)) {
+ if (test_opt(sbi, POSIX_ACL)) {
+ acl = f2fs_get_acl(dir, ACL_TYPE_DEFAULT);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ }
+ if (!acl)
+ inode->i_mode &= ~current_umask();
+ }
+
+ if (test_opt(sbi, POSIX_ACL) && acl) {
+
+ if (S_ISDIR(inode->i_mode)) {
+ error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
+ if (error)
+ goto cleanup;
+ }
+ error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
+ if (error < 0)
+ return error;
+ if (error > 0)
+ error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+ }
+cleanup:
+ posix_acl_release(acl);
+ return error;
+}
+
+int f2fs_acl_chmod(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct posix_acl *acl;
+ int error;
+ mode_t mode = get_inode_mode(inode);
+
+ if (!test_opt(sbi, POSIX_ACL))
+ return 0;
+ if (S_ISLNK(mode))
+ return -EOPNOTSUPP;
+
+ acl = f2fs_get_acl(inode, ACL_TYPE_ACCESS);
+ if (IS_ERR(acl) || !acl)
+ return PTR_ERR(acl);
+
+ error = posix_acl_chmod(&acl, GFP_KERNEL, mode);
+ if (error)
+ return error;
+ error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+ posix_acl_release(acl);
+ return error;
+}
+
+static size_t f2fs_xattr_list_acl(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ const char *xname = POSIX_ACL_XATTR_DEFAULT;
+ size_t size;
+
+ if (!test_opt(sbi, POSIX_ACL))
+ return 0;
+
+ if (type == ACL_TYPE_ACCESS)
+ xname = POSIX_ACL_XATTR_ACCESS;
+
+ size = strlen(xname) + 1;
+ if (list && size <= list_size)
+ memcpy(list, xname, size);
+ return size;
+}
+
+static int f2fs_xattr_get_acl(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ struct posix_acl *acl;
+ int error;
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (!test_opt(sbi, POSIX_ACL))
+ return -EOPNOTSUPP;
+
+ acl = f2fs_get_acl(dentry->d_inode, type);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (!acl)
+ return -ENODATA;
+ error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+ posix_acl_release(acl);
+
+ return error;
+}
+
+static int f2fs_xattr_set_acl(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ struct inode *inode = dentry->d_inode;
+ struct posix_acl *acl = NULL;
+ int error;
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (!test_opt(sbi, POSIX_ACL))
+ return -EOPNOTSUPP;
+ if (!inode_owner_or_capable(inode))
+ return -EPERM;
+
+ if (value) {
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ error = posix_acl_valid(acl);
+ if (error)
+ goto release_and_out;
+ }
+ } else {
+ acl = NULL;
+ }
+
+ error = f2fs_set_acl(inode, type, acl);
+
+release_and_out:
+ posix_acl_release(acl);
+ return error;
+}
+
+const struct xattr_handler f2fs_xattr_acl_default_handler = {
+ .prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
+ .list = f2fs_xattr_list_acl,
+ .get = f2fs_xattr_get_acl,
+ .set = f2fs_xattr_set_acl,
+};
+
+const struct xattr_handler f2fs_xattr_acl_access_handler = {
+ .prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
+ .list = f2fs_xattr_list_acl,
+ .get = f2fs_xattr_get_acl,
+ .set = f2fs_xattr_set_acl,
+};
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
new file mode 100644
index 00000000000..80f43067441
--- /dev/null
+++ b/fs/f2fs/acl.h
@@ -0,0 +1,57 @@
+/*
+ * fs/f2fs/acl.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Portions of this code from linux/fs/ext2/acl.h
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __F2FS_ACL_H__
+#define __F2FS_ACL_H__
+
+#include <linux/posix_acl_xattr.h>
+
+#define F2FS_ACL_VERSION 0x0001
+
+struct f2fs_acl_entry {
+ __le16 e_tag;
+ __le16 e_perm;
+ __le32 e_id;
+};
+
+struct f2fs_acl_entry_short {
+ __le16 e_tag;
+ __le16 e_perm;
+};
+
+struct f2fs_acl_header {
+ __le32 a_version;
+};
+
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+
+extern struct posix_acl *f2fs_get_acl(struct inode *inode, int type);
+extern int f2fs_acl_chmod(struct inode *inode);
+extern int f2fs_init_acl(struct inode *inode, struct inode *dir);
+#else
+#define f2fs_check_acl NULL
+#define f2fs_get_acl NULL
+#define f2fs_set_acl NULL
+
+static inline int f2fs_acl_chmod(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int f2fs_init_acl(struct inode *inode, struct inode *dir)
+{
+ return 0;
+}
+#endif
+#endif /* __F2FS_ACL_H__ */
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
new file mode 100644
index 00000000000..6ef36c37e2b
--- /dev/null
+++ b/fs/f2fs/checkpoint.c
@@ -0,0 +1,794 @@
+/*
+ * fs/f2fs/checkpoint.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/f2fs_fs.h>
+#include <linux/pagevec.h>
+#include <linux/swap.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include "segment.h"
+
+static struct kmem_cache *orphan_entry_slab;
+static struct kmem_cache *inode_entry_slab;
+
+/*
+ * We guarantee no failure on the returned page.
+ */
+struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+ struct address_space *mapping = sbi->meta_inode->i_mapping;
+ struct page *page = NULL;
+repeat:
+ page = grab_cache_page(mapping, index);
+ if (!page) {
+ cond_resched();
+ goto repeat;
+ }
+
+ /* We wait writeback only inside grab_meta_page() */
+ wait_on_page_writeback(page);
+ SetPageUptodate(page);
+ return page;
+}
+
+/*
+ * We guarantee no failure on the returned page.
+ */
+struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+ struct address_space *mapping = sbi->meta_inode->i_mapping;
+ struct page *page;
+repeat:
+ page = grab_cache_page(mapping, index);
+ if (!page) {
+ cond_resched();
+ goto repeat;
+ }
+ if (f2fs_readpage(sbi, page, index, READ_SYNC)) {
+ f2fs_put_page(page, 1);
+ goto repeat;
+ }
+ mark_page_accessed(page);
+
+ /* We do not allow returning an errorneous page */
+ return page;
+}
+
+static int f2fs_write_meta_page(struct page *page,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = page->mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ int err;
+
+ wait_on_page_writeback(page);
+
+ err = write_meta_page(sbi, page, wbc);
+ if (err) {
+ wbc->pages_skipped++;
+ set_page_dirty(page);
+ }
+
+ dec_page_count(sbi, F2FS_DIRTY_META);
+
+ /* In this case, we should not unlock this page */
+ if (err != AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
+ return err;
+}
+
+static int f2fs_write_meta_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+ struct block_device *bdev = sbi->sb->s_bdev;
+ long written;
+
+ if (wbc->for_kupdate)
+ return 0;
+
+ if (get_pages(sbi, F2FS_DIRTY_META) == 0)
+ return 0;
+
+ /* if mounting is failed, skip writing node pages */
+ mutex_lock(&sbi->cp_mutex);
+ written = sync_meta_pages(sbi, META, bio_get_nr_vecs(bdev));
+ mutex_unlock(&sbi->cp_mutex);
+ wbc->nr_to_write -= written;
+ return 0;
+}
+
+long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
+ long nr_to_write)
+{
+ struct address_space *mapping = sbi->meta_inode->i_mapping;
+ pgoff_t index = 0, end = LONG_MAX;
+ struct pagevec pvec;
+ long nwritten = 0;
+ struct writeback_control wbc = {
+ .for_reclaim = 0,
+ };
+
+ pagevec_init(&pvec, 0);
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ lock_page(page);
+ BUG_ON(page->mapping != mapping);
+ BUG_ON(!PageDirty(page));
+ clear_page_dirty_for_io(page);
+ f2fs_write_meta_page(page, &wbc);
+ if (nwritten++ >= nr_to_write)
+ break;
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+
+ if (nwritten)
+ f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX);
+
+ return nwritten;
+}
+
+static int f2fs_set_meta_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+
+ SetPageUptodate(page);
+ if (!PageDirty(page)) {
+ __set_page_dirty_nobuffers(page);
+ inc_page_count(sbi, F2FS_DIRTY_META);
+ F2FS_SET_SB_DIRT(sbi);
+ return 1;
+ }
+ return 0;
+}
+
+const struct address_space_operations f2fs_meta_aops = {
+ .writepage = f2fs_write_meta_page,
+ .writepages = f2fs_write_meta_pages,
+ .set_page_dirty = f2fs_set_meta_page_dirty,
+};
+
+int check_orphan_space(struct f2fs_sb_info *sbi)
+{
+ unsigned int max_orphans;
+ int err = 0;
+
+ /*
+ * considering 512 blocks in a segment 5 blocks are needed for cp
+ * and log segment summaries. Remaining blocks are used to keep
+ * orphan entries with the limitation one reserved segment
+ * for cp pack we can have max 1020*507 orphan entries
+ */
+ max_orphans = (sbi->blocks_per_seg - 5) * F2FS_ORPHANS_PER_BLOCK;
+ mutex_lock(&sbi->orphan_inode_mutex);
+ if (sbi->n_orphans >= max_orphans)
+ err = -ENOSPC;
+ mutex_unlock(&sbi->orphan_inode_mutex);
+ return err;
+}
+
+void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct list_head *head, *this;
+ struct orphan_inode_entry *new = NULL, *orphan = NULL;
+
+ mutex_lock(&sbi->orphan_inode_mutex);
+ head = &sbi->orphan_inode_list;
+ list_for_each(this, head) {
+ orphan = list_entry(this, struct orphan_inode_entry, list);
+ if (orphan->ino == ino)
+ goto out;
+ if (orphan->ino > ino)
+ break;
+ orphan = NULL;
+ }
+retry:
+ new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
+ if (!new) {
+ cond_resched();
+ goto retry;
+ }
+ new->ino = ino;
+ INIT_LIST_HEAD(&new->list);
+
+ /* add new_oentry into list which is sorted by inode number */
+ if (orphan) {
+ struct orphan_inode_entry *prev;
+
+ /* get previous entry */
+ prev = list_entry(orphan->list.prev, typeof(*prev), list);
+ if (&prev->list != head)
+ /* insert new orphan inode entry */
+ list_add(&new->list, &prev->list);
+ else
+ list_add(&new->list, head);
+ } else {
+ list_add_tail(&new->list, head);
+ }
+ sbi->n_orphans++;
+out:
+ mutex_unlock(&sbi->orphan_inode_mutex);
+}
+
+void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct list_head *this, *next, *head;
+ struct orphan_inode_entry *orphan;
+
+ mutex_lock(&sbi->orphan_inode_mutex);
+ head = &sbi->orphan_inode_list;
+ list_for_each_safe(this, next, head) {
+ orphan = list_entry(this, struct orphan_inode_entry, list);
+ if (orphan->ino == ino) {
+ list_del(&orphan->list);
+ kmem_cache_free(orphan_entry_slab, orphan);
+ sbi->n_orphans--;
+ break;
+ }
+ }
+ mutex_unlock(&sbi->orphan_inode_mutex);
+}
+
+static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct inode *inode = f2fs_iget(sbi->sb, ino);
+ BUG_ON(IS_ERR(inode));
+ clear_nlink(inode);
+
+ /* truncate all the data during iput */
+ iput(inode);
+}
+
+int recover_orphan_inodes(struct f2fs_sb_info *sbi)
+{
+ block_t start_blk, orphan_blkaddr, i, j;
+
+ if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
+ return 0;
+
+ sbi->por_doing = 1;
+ start_blk = __start_cp_addr(sbi) + 1;
+ orphan_blkaddr = __start_sum_addr(sbi) - 1;
+
+ for (i = 0; i < orphan_blkaddr; i++) {
+ struct page *page = get_meta_page(sbi, start_blk + i);
+ struct f2fs_orphan_block *orphan_blk;
+
+ orphan_blk = (struct f2fs_orphan_block *)page_address(page);
+ for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
+ nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
+ recover_orphan_inode(sbi, ino);
+ }
+ f2fs_put_page(page, 1);
+ }
+ /* clear Orphan Flag */
+ clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
+ sbi->por_doing = 0;
+ return 0;
+}
+
+static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
+{
+ struct list_head *head, *this, *next;
+ struct f2fs_orphan_block *orphan_blk = NULL;
+ struct page *page = NULL;
+ unsigned int nentries = 0;
+ unsigned short index = 1;
+ unsigned short orphan_blocks;
+
+ orphan_blocks = (unsigned short)((sbi->n_orphans +
+ (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
+
+ mutex_lock(&sbi->orphan_inode_mutex);
+ head = &sbi->orphan_inode_list;
+
+ /* loop for each orphan inode entry and write them in Jornal block */
+ list_for_each_safe(this, next, head) {
+ struct orphan_inode_entry *orphan;
+
+ orphan = list_entry(this, struct orphan_inode_entry, list);
+
+ if (nentries == F2FS_ORPHANS_PER_BLOCK) {
+ /*
+ * an orphan block is full of 1020 entries,
+ * then we need to flush current orphan blocks
+ * and bring another one in memory
+ */
+ orphan_blk->blk_addr = cpu_to_le16(index);
+ orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
+ orphan_blk->entry_count = cpu_to_le32(nentries);
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+ index++;
+ start_blk++;
+ nentries = 0;
+ page = NULL;
+ }
+ if (page)
+ goto page_exist;
+
+ page = grab_meta_page(sbi, start_blk);
+ orphan_blk = (struct f2fs_orphan_block *)page_address(page);
+ memset(orphan_blk, 0, sizeof(*orphan_blk));
+page_exist:
+ orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
+ }
+ if (!page)
+ goto end;
+
+ orphan_blk->blk_addr = cpu_to_le16(index);
+ orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
+ orphan_blk->entry_count = cpu_to_le32(nentries);
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+end:
+ mutex_unlock(&sbi->orphan_inode_mutex);
+}
+
+static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+ block_t cp_addr, unsigned long long *version)
+{
+ struct page *cp_page_1, *cp_page_2 = NULL;
+ unsigned long blk_size = sbi->blocksize;
+ struct f2fs_checkpoint *cp_block;
+ unsigned long long cur_version = 0, pre_version = 0;
+ unsigned int crc = 0;
+ size_t crc_offset;
+
+ /* Read the 1st cp block in this CP pack */
+ cp_page_1 = get_meta_page(sbi, cp_addr);
+
+ /* get the version number */
+ cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
+ crc_offset = le32_to_cpu(cp_block->checksum_offset);
+ if (crc_offset >= blk_size)
+ goto invalid_cp1;
+
+ crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
+ if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+ goto invalid_cp1;
+
+ pre_version = le64_to_cpu(cp_block->checkpoint_ver);
+
+ /* Read the 2nd cp block in this CP pack */
+ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ cp_page_2 = get_meta_page(sbi, cp_addr);
+
+ cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
+ crc_offset = le32_to_cpu(cp_block->checksum_offset);
+ if (crc_offset >= blk_size)
+ goto invalid_cp2;
+
+ crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
+ if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+ goto invalid_cp2;
+
+ cur_version = le64_to_cpu(cp_block->checkpoint_ver);
+
+ if (cur_version == pre_version) {
+ *version = cur_version;
+ f2fs_put_page(cp_page_2, 1);
+ return cp_page_1;
+ }
+invalid_cp2:
+ f2fs_put_page(cp_page_2, 1);
+invalid_cp1:
+ f2fs_put_page(cp_page_1, 1);
+ return NULL;
+}
+
+int get_valid_checkpoint(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_checkpoint *cp_block;
+ struct f2fs_super_block *fsb = sbi->raw_super;
+ struct page *cp1, *cp2, *cur_page;
+ unsigned long blk_size = sbi->blocksize;
+ unsigned long long cp1_version = 0, cp2_version = 0;
+ unsigned long long cp_start_blk_no;
+
+ sbi->ckpt = kzalloc(blk_size, GFP_KERNEL);
+ if (!sbi->ckpt)
+ return -ENOMEM;
+ /*
+ * Finding out valid cp block involves read both
+ * sets( cp pack1 and cp pack 2)
+ */
+ cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
+ cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
+
+ /* The second checkpoint pack should start at the next segment */
+ cp_start_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
+ cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
+
+ if (cp1 && cp2) {
+ if (ver_after(cp2_version, cp1_version))
+ cur_page = cp2;
+ else
+ cur_page = cp1;
+ } else if (cp1) {
+ cur_page = cp1;
+ } else if (cp2) {
+ cur_page = cp2;
+ } else {
+ goto fail_no_cp;
+ }
+
+ cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
+ memcpy(sbi->ckpt, cp_block, blk_size);
+
+ f2fs_put_page(cp1, 1);
+ f2fs_put_page(cp2, 1);
+ return 0;
+
+fail_no_cp:
+ kfree(sbi->ckpt);
+ return -EINVAL;
+}
+
+void set_dirty_dir_page(struct inode *inode, struct page *page)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct list_head *head = &sbi->dir_inode_list;
+ struct dir_inode_entry *new;
+ struct list_head *this;
+
+ if (!S_ISDIR(inode->i_mode))
+ return;
+retry:
+ new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+ if (!new) {
+ cond_resched();
+ goto retry;
+ }
+ new->inode = inode;
+ INIT_LIST_HEAD(&new->list);
+
+ spin_lock(&sbi->dir_inode_lock);
+ list_for_each(this, head) {
+ struct dir_inode_entry *entry;
+ entry = list_entry(this, struct dir_inode_entry, list);
+ if (entry->inode == inode) {
+ kmem_cache_free(inode_entry_slab, new);
+ goto out;
+ }
+ }
+ list_add_tail(&new->list, head);
+ sbi->n_dirty_dirs++;
+
+ BUG_ON(!S_ISDIR(inode->i_mode));
+out:
+ inc_page_count(sbi, F2FS_DIRTY_DENTS);
+ inode_inc_dirty_dents(inode);
+ SetPagePrivate(page);
+
+ spin_unlock(&sbi->dir_inode_lock);
+}
+
+void remove_dirty_dir_inode(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct list_head *head = &sbi->dir_inode_list;
+ struct list_head *this;
+
+ if (!S_ISDIR(inode->i_mode))
+ return;
+
+ spin_lock(&sbi->dir_inode_lock);
+ if (atomic_read(&F2FS_I(inode)->dirty_dents))
+ goto out;
+
+ list_for_each(this, head) {
+ struct dir_inode_entry *entry;
+ entry = list_entry(this, struct dir_inode_entry, list);
+ if (entry->inode == inode) {
+ list_del(&entry->list);
+ kmem_cache_free(inode_entry_slab, entry);
+ sbi->n_dirty_dirs--;
+ break;
+ }
+ }
+out:
+ spin_unlock(&sbi->dir_inode_lock);
+}
+
+void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
+{
+ struct list_head *head = &sbi->dir_inode_list;
+ struct dir_inode_entry *entry;
+ struct inode *inode;
+retry:
+ spin_lock(&sbi->dir_inode_lock);
+ if (list_empty(head)) {
+ spin_unlock(&sbi->dir_inode_lock);
+ return;
+ }
+ entry = list_entry(head->next, struct dir_inode_entry, list);
+ inode = igrab(entry->inode);
+ spin_unlock(&sbi->dir_inode_lock);
+ if (inode) {
+ filemap_flush(inode->i_mapping);
+ iput(inode);
+ } else {
+ /*
+ * We should submit bio, since it exists several
+ * wribacking dentry pages in the freeing inode.
+ */
+ f2fs_submit_bio(sbi, DATA, true);
+ }
+ goto retry;
+}
+
+/*
+ * Freeze all the FS-operations for checkpoint.
+ */
+void block_operations(struct f2fs_sb_info *sbi)
+{
+ int t;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .for_reclaim = 0,
+ };
+
+ /* Stop renaming operation */
+ mutex_lock_op(sbi, RENAME);
+ mutex_lock_op(sbi, DENTRY_OPS);
+
+retry_dents:
+ /* write all the dirty dentry pages */
+ sync_dirty_dir_inodes(sbi);
+
+ mutex_lock_op(sbi, DATA_WRITE);
+ if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
+ mutex_unlock_op(sbi, DATA_WRITE);
+ goto retry_dents;
+ }
+
+ /* block all the operations */
+ for (t = DATA_NEW; t <= NODE_TRUNC; t++)
+ mutex_lock_op(sbi, t);
+
+ mutex_lock(&sbi->write_inode);
+
+ /*
+ * POR: we should ensure that there is no dirty node pages
+ * until finishing nat/sit flush.
+ */
+retry:
+ sync_node_pages(sbi, 0, &wbc);
+
+ mutex_lock_op(sbi, NODE_WRITE);
+
+ if (get_pages(sbi, F2FS_DIRTY_NODES)) {
+ mutex_unlock_op(sbi, NODE_WRITE);
+ goto retry;
+ }
+ mutex_unlock(&sbi->write_inode);
+}
+
+static void unblock_operations(struct f2fs_sb_info *sbi)
+{
+ int t;
+ for (t = NODE_WRITE; t >= RENAME; t--)
+ mutex_unlock_op(sbi, t);
+}
+
+static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ nid_t last_nid = 0;
+ block_t start_blk;
+ struct page *cp_page;
+ unsigned int data_sum_blocks, orphan_blocks;
+ unsigned int crc32 = 0;
+ void *kaddr;
+ int i;
+
+ /* Flush all the NAT/SIT pages */
+ while (get_pages(sbi, F2FS_DIRTY_META))
+ sync_meta_pages(sbi, META, LONG_MAX);
+
+ next_free_nid(sbi, &last_nid);
+
+ /*
+ * modify checkpoint
+ * version number is already updated
+ */
+ ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
+ ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
+ ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
+ for (i = 0; i < 3; i++) {
+ ckpt->cur_node_segno[i] =
+ cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
+ ckpt->cur_node_blkoff[i] =
+ cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
+ ckpt->alloc_type[i + CURSEG_HOT_NODE] =
+ curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
+ }
+ for (i = 0; i < 3; i++) {
+ ckpt->cur_data_segno[i] =
+ cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
+ ckpt->cur_data_blkoff[i] =
+ cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
+ ckpt->alloc_type[i + CURSEG_HOT_DATA] =
+ curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
+ }
+
+ ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
+ ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
+ ckpt->next_free_nid = cpu_to_le32(last_nid);
+
+ /* 2 cp + n data seg summary + orphan inode blocks */
+ data_sum_blocks = npages_for_summary_flush(sbi);
+ if (data_sum_blocks < 3)
+ set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ else
+ clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+
+ orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
+ / F2FS_ORPHANS_PER_BLOCK;
+ ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks);
+
+ if (is_umount) {
+ set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
+ data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE);
+ } else {
+ clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
+ data_sum_blocks + orphan_blocks);
+ }
+
+ if (sbi->n_orphans)
+ set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+ else
+ clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+
+ /* update SIT/NAT bitmap */
+ get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
+ get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
+
+ crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
+ *(__le32 *)((unsigned char *)ckpt +
+ le32_to_cpu(ckpt->checksum_offset))
+ = cpu_to_le32(crc32);
+
+ start_blk = __start_cp_addr(sbi);
+
+ /* write out checkpoint buffer at block 0 */
+ cp_page = grab_meta_page(sbi, start_blk++);
+ kaddr = page_address(cp_page);
+ memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
+ set_page_dirty(cp_page);
+ f2fs_put_page(cp_page, 1);
+
+ if (sbi->n_orphans) {
+ write_orphan_inodes(sbi, start_blk);
+ start_blk += orphan_blocks;
+ }
+
+ write_data_summaries(sbi, start_blk);
+ start_blk += data_sum_blocks;
+ if (is_umount) {
+ write_node_summaries(sbi, start_blk);
+ start_blk += NR_CURSEG_NODE_TYPE;
+ }
+
+ /* writeout checkpoint block */
+ cp_page = grab_meta_page(sbi, start_blk);
+ kaddr = page_address(cp_page);
+ memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
+ set_page_dirty(cp_page);
+ f2fs_put_page(cp_page, 1);
+
+ /* wait for previous submitted node/meta pages writeback */
+ while (get_pages(sbi, F2FS_WRITEBACK))
+ congestion_wait(BLK_RW_ASYNC, HZ / 50);
+
+ filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX);
+ filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX);
+
+ /* update user_block_counts */
+ sbi->last_valid_block_count = sbi->total_valid_block_count;
+ sbi->alloc_valid_block_count = 0;
+
+ /* Here, we only have one bio having CP pack */
+ if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))
+ sbi->sb->s_flags |= MS_RDONLY;
+ else
+ sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
+
+ clear_prefree_segments(sbi);
+ F2FS_RESET_SB_DIRT(sbi);
+}
+
+/*
+ * We guarantee that this checkpoint procedure should not fail.
+ */
+void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned long long ckpt_ver;
+
+ if (!blocked) {
+ mutex_lock(&sbi->cp_mutex);
+ block_operations(sbi);
+ }
+
+ f2fs_submit_bio(sbi, DATA, true);
+ f2fs_submit_bio(sbi, NODE, true);
+ f2fs_submit_bio(sbi, META, true);
+
+ /*
+ * update checkpoint pack index
+ * Increase the version number so that
+ * SIT entries and seg summaries are written at correct place
+ */
+ ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver);
+ ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
+
+ /* write cached NAT/SIT entries to NAT/SIT area */
+ flush_nat_entries(sbi);
+ flush_sit_entries(sbi);
+
+ reset_victim_segmap(sbi);
+
+ /* unlock all the fs_lock[] in do_checkpoint() */
+ do_checkpoint(sbi, is_umount);
+
+ unblock_operations(sbi);
+ mutex_unlock(&sbi->cp_mutex);
+}
+
+void init_orphan_info(struct f2fs_sb_info *sbi)
+{
+ mutex_init(&sbi->orphan_inode_mutex);
+ INIT_LIST_HEAD(&sbi->orphan_inode_list);
+ sbi->n_orphans = 0;
+}
+
+int create_checkpoint_caches(void)
+{
+ orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
+ sizeof(struct orphan_inode_entry), NULL);
+ if (unlikely(!orphan_entry_slab))
+ return -ENOMEM;
+ inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
+ sizeof(struct dir_inode_entry), NULL);
+ if (unlikely(!inode_entry_slab)) {
+ kmem_cache_destroy(orphan_entry_slab);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void destroy_checkpoint_caches(void)
+{
+ kmem_cache_destroy(orphan_entry_slab);
+ kmem_cache_destroy(inode_entry_slab);
+}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
new file mode 100644
index 00000000000..655aeabc1dd
--- /dev/null
+++ b/fs/f2fs/data.c
@@ -0,0 +1,702 @@
+/*
+ * fs/f2fs/data.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/buffer_head.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include "segment.h"
+
+/*
+ * Lock ordering for the change of data block address:
+ * ->data_page
+ * ->node_page
+ * update block addresses in the node page
+ */
+static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
+{
+ struct f2fs_node *rn;
+ __le32 *addr_array;
+ struct page *node_page = dn->node_page;
+ unsigned int ofs_in_node = dn->ofs_in_node;
+
+ wait_on_page_writeback(node_page);
+
+ rn = (struct f2fs_node *)page_address(node_page);
+
+ /* Get physical address of data block */
+ addr_array = blkaddr_in_node(rn);
+ addr_array[ofs_in_node] = cpu_to_le32(new_addr);
+ set_page_dirty(node_page);
+}
+
+int reserve_new_block(struct dnode_of_data *dn)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+
+ if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
+ return -EPERM;
+ if (!inc_valid_block_count(sbi, dn->inode, 1))
+ return -ENOSPC;
+
+ __set_data_blkaddr(dn, NEW_ADDR);
+ dn->data_blkaddr = NEW_ADDR;
+ sync_inode_page(dn);
+ return 0;
+}
+
+static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct buffer_head *bh_result)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ pgoff_t start_fofs, end_fofs;
+ block_t start_blkaddr;
+
+ read_lock(&fi->ext.ext_lock);
+ if (fi->ext.len == 0) {
+ read_unlock(&fi->ext.ext_lock);
+ return 0;
+ }
+
+ sbi->total_hit_ext++;
+ start_fofs = fi->ext.fofs;
+ end_fofs = fi->ext.fofs + fi->ext.len - 1;
+ start_blkaddr = fi->ext.blk_addr;
+
+ if (pgofs >= start_fofs && pgofs <= end_fofs) {
+ unsigned int blkbits = inode->i_sb->s_blocksize_bits;
+ size_t count;
+
+ clear_buffer_new(bh_result);
+ map_bh(bh_result, inode->i_sb,
+ start_blkaddr + pgofs - start_fofs);
+ count = end_fofs - pgofs + 1;
+ if (count < (UINT_MAX >> blkbits))
+ bh_result->b_size = (count << blkbits);
+ else
+ bh_result->b_size = UINT_MAX;
+
+ sbi->read_hit_ext++;
+ read_unlock(&fi->ext.ext_lock);
+ return 1;
+ }
+ read_unlock(&fi->ext.ext_lock);
+ return 0;
+}
+
+void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
+{
+ struct f2fs_inode_info *fi = F2FS_I(dn->inode);
+ pgoff_t fofs, start_fofs, end_fofs;
+ block_t start_blkaddr, end_blkaddr;
+
+ BUG_ON(blk_addr == NEW_ADDR);
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
+
+ /* Update the page address in the parent node */
+ __set_data_blkaddr(dn, blk_addr);
+
+ write_lock(&fi->ext.ext_lock);
+
+ start_fofs = fi->ext.fofs;
+ end_fofs = fi->ext.fofs + fi->ext.len - 1;
+ start_blkaddr = fi->ext.blk_addr;
+ end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
+
+ /* Drop and initialize the matched extent */
+ if (fi->ext.len == 1 && fofs == start_fofs)
+ fi->ext.len = 0;
+
+ /* Initial extent */
+ if (fi->ext.len == 0) {
+ if (blk_addr != NULL_ADDR) {
+ fi->ext.fofs = fofs;
+ fi->ext.blk_addr = blk_addr;
+ fi->ext.len = 1;
+ }
+ goto end_update;
+ }
+
+ /* Frone merge */
+ if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
+ fi->ext.fofs--;
+ fi->ext.blk_addr--;
+ fi->ext.len++;
+ goto end_update;
+ }
+
+ /* Back merge */
+ if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
+ fi->ext.len++;
+ goto end_update;
+ }
+
+ /* Split the existing extent */
+ if (fi->ext.len > 1 &&
+ fofs >= start_fofs && fofs <= end_fofs) {
+ if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
+ fi->ext.len = fofs - start_fofs;
+ } else {
+ fi->ext.fofs = fofs + 1;
+ fi->ext.blk_addr = start_blkaddr +
+ fofs - start_fofs + 1;
+ fi->ext.len -= fofs - start_fofs + 1;
+ }
+ goto end_update;
+ }
+ write_unlock(&fi->ext.ext_lock);
+ return;
+
+end_update:
+ write_unlock(&fi->ext.ext_lock);
+ sync_inode_page(dn);
+ return;
+}
+
+struct page *find_data_page(struct inode *inode, pgoff_t index)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ struct dnode_of_data dn;
+ struct page *page;
+ int err;
+
+ page = find_get_page(mapping, index);
+ if (page && PageUptodate(page))
+ return page;
+ f2fs_put_page(page, 0);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, index, RDONLY_NODE);
+ if (err)
+ return ERR_PTR(err);
+ f2fs_put_dnode(&dn);
+
+ if (dn.data_blkaddr == NULL_ADDR)
+ return ERR_PTR(-ENOENT);
+
+ /* By fallocate(), there is no cached page, but with NEW_ADDR */
+ if (dn.data_blkaddr == NEW_ADDR)
+ return ERR_PTR(-EINVAL);
+
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+ }
+ unlock_page(page);
+ return page;
+}
+
+/*
+ * If it tries to access a hole, return an error.
+ * Because, the callers, functions in dir.c and GC, should be able to know
+ * whether this page exists or not.
+ */
+struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ struct dnode_of_data dn;
+ struct page *page;
+ int err;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, index, RDONLY_NODE);
+ if (err)
+ return ERR_PTR(err);
+ f2fs_put_dnode(&dn);
+
+ if (dn.data_blkaddr == NULL_ADDR)
+ return ERR_PTR(-ENOENT);
+
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ if (PageUptodate(page))
+ return page;
+
+ BUG_ON(dn.data_blkaddr == NEW_ADDR);
+ BUG_ON(dn.data_blkaddr == NULL_ADDR);
+
+ err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+ }
+ return page;
+}
+
+/*
+ * Caller ensures that this data page is never allocated.
+ * A new zero-filled data page is allocated in the page cache.
+ */
+struct page *get_new_data_page(struct inode *inode, pgoff_t index,
+ bool new_i_size)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ struct dnode_of_data dn;
+ int err;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, index, 0);
+ if (err)
+ return ERR_PTR(err);
+
+ if (dn.data_blkaddr == NULL_ADDR) {
+ if (reserve_new_block(&dn)) {
+ f2fs_put_dnode(&dn);
+ return ERR_PTR(-ENOSPC);
+ }
+ }
+ f2fs_put_dnode(&dn);
+
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ if (PageUptodate(page))
+ return page;
+
+ if (dn.data_blkaddr == NEW_ADDR) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ } else {
+ err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+ }
+ }
+ SetPageUptodate(page);
+
+ if (new_i_size &&
+ i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
+ i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
+ mark_inode_dirty_sync(inode);
+ }
+ return page;
+}
+
+static void read_end_io(struct bio *bio, int err)
+{
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+
+ do {
+ struct page *page = bvec->bv_page;
+
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+
+ if (uptodate) {
+ SetPageUptodate(page);
+ } else {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ }
+ unlock_page(page);
+ } while (bvec >= bio->bi_io_vec);
+ kfree(bio->bi_private);
+ bio_put(bio);
+}
+
+/*
+ * Fill the locked page with data located in the block address.
+ * Read operation is synchronous, and caller must unlock the page.
+ */
+int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
+ block_t blk_addr, int type)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ bool sync = (type == READ_SYNC);
+ struct bio *bio;
+
+ /* This page can be already read by other threads */
+ if (PageUptodate(page)) {
+ if (!sync)
+ unlock_page(page);
+ return 0;
+ }
+
+ down_read(&sbi->bio_sem);
+
+ /* Allocate a new bio */
+ bio = f2fs_bio_alloc(bdev, 1);
+
+ /* Initialize the bio */
+ bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+ bio->bi_end_io = read_end_io;
+
+ if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ kfree(bio->bi_private);
+ bio_put(bio);
+ up_read(&sbi->bio_sem);
+ return -EFAULT;
+ }
+
+ submit_bio(type, bio);
+ up_read(&sbi->bio_sem);
+
+ /* wait for read completion if sync */
+ if (sync) {
+ lock_page(page);
+ if (PageError(page))
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * This function should be used by the data read flow only where it
+ * does not check the "create" flag that indicates block allocation.
+ * The reason for this special functionality is to exploit VFS readahead
+ * mechanism.
+ */
+static int get_data_block_ro(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ unsigned int blkbits = inode->i_sb->s_blocksize_bits;
+ unsigned maxblocks = bh_result->b_size >> blkbits;
+ struct dnode_of_data dn;
+ pgoff_t pgofs;
+ int err;
+
+ /* Get the page offset from the block offset(iblock) */
+ pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
+
+ if (check_extent_cache(inode, pgofs, bh_result))
+ return 0;
+
+ /* When reading holes, we need its node page */
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, pgofs, RDONLY_NODE);
+ if (err)
+ return (err == -ENOENT) ? 0 : err;
+
+ /* It does not support data allocation */
+ BUG_ON(create);
+
+ if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
+ int i;
+ unsigned int end_offset;
+
+ end_offset = IS_INODE(dn.node_page) ?
+ ADDRS_PER_INODE :
+ ADDRS_PER_BLOCK;
+
+ clear_buffer_new(bh_result);
+
+ /* Give more consecutive addresses for the read ahead */
+ for (i = 0; i < end_offset - dn.ofs_in_node; i++)
+ if (((datablock_addr(dn.node_page,
+ dn.ofs_in_node + i))
+ != (dn.data_blkaddr + i)) || maxblocks == i)
+ break;
+ map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
+ bh_result->b_size = (i << blkbits);
+ }
+ f2fs_put_dnode(&dn);
+ return 0;
+}
+
+static int f2fs_read_data_page(struct file *file, struct page *page)
+{
+ return mpage_readpage(page, get_data_block_ro);
+}
+
+static int f2fs_read_data_pages(struct file *file,
+ struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
+{
+ return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
+}
+
+int do_write_data_page(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ block_t old_blk_addr, new_blk_addr;
+ struct dnode_of_data dn;
+ int err = 0;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, page->index, RDONLY_NODE);
+ if (err)
+ return err;
+
+ old_blk_addr = dn.data_blkaddr;
+
+ /* This page is already truncated */
+ if (old_blk_addr == NULL_ADDR)
+ goto out_writepage;
+
+ set_page_writeback(page);
+
+ /*
+ * If current allocation needs SSR,
+ * it had better in-place writes for updated data.
+ */
+ if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
+ need_inplace_update(inode)) {
+ rewrite_data_page(F2FS_SB(inode->i_sb), page,
+ old_blk_addr);
+ } else {
+ write_data_page(inode, page, &dn,
+ old_blk_addr, &new_blk_addr);
+ update_extent_cache(new_blk_addr, &dn);
+ F2FS_I(inode)->data_version =
+ le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
+ }
+out_writepage:
+ f2fs_put_dnode(&dn);
+ return err;
+}
+
+static int f2fs_write_data_page(struct page *page,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = page->mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ loff_t i_size = i_size_read(inode);
+ const pgoff_t end_index = ((unsigned long long) i_size)
+ >> PAGE_CACHE_SHIFT;
+ unsigned offset;
+ int err = 0;
+
+ if (page->index < end_index)
+ goto out;
+
+ /*
+ * If the offset is out-of-range of file size,
+ * this page does not have to be written to disk.
+ */
+ offset = i_size & (PAGE_CACHE_SIZE - 1);
+ if ((page->index >= end_index + 1) || !offset) {
+ if (S_ISDIR(inode->i_mode)) {
+ dec_page_count(sbi, F2FS_DIRTY_DENTS);
+ inode_dec_dirty_dents(inode);
+ }
+ goto unlock_out;
+ }
+
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+out:
+ if (sbi->por_doing)
+ goto redirty_out;
+
+ if (wbc->for_reclaim && !S_ISDIR(inode->i_mode) && !is_cold_data(page))
+ goto redirty_out;
+
+ mutex_lock_op(sbi, DATA_WRITE);
+ if (S_ISDIR(inode->i_mode)) {
+ dec_page_count(sbi, F2FS_DIRTY_DENTS);
+ inode_dec_dirty_dents(inode);
+ }
+ err = do_write_data_page(page);
+ if (err && err != -ENOENT) {
+ wbc->pages_skipped++;
+ set_page_dirty(page);
+ }
+ mutex_unlock_op(sbi, DATA_WRITE);
+
+ if (wbc->for_reclaim)
+ f2fs_submit_bio(sbi, DATA, true);
+
+ if (err == -ENOENT)
+ goto unlock_out;
+
+ clear_cold_data(page);
+ unlock_page(page);
+
+ if (!wbc->for_reclaim && !S_ISDIR(inode->i_mode))
+ f2fs_balance_fs(sbi);
+ return 0;
+
+unlock_out:
+ unlock_page(page);
+ return (err == -ENOENT) ? 0 : err;
+
+redirty_out:
+ wbc->pages_skipped++;
+ set_page_dirty(page);
+ return AOP_WRITEPAGE_ACTIVATE;
+}
+
+#define MAX_DESIRED_PAGES_WP 4096
+
+static int f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ int ret;
+ long excess_nrtw = 0, desired_nrtw;
+
+ if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
+ desired_nrtw = MAX_DESIRED_PAGES_WP;
+ excess_nrtw = desired_nrtw - wbc->nr_to_write;
+ wbc->nr_to_write = desired_nrtw;
+ }
+
+ if (!S_ISDIR(inode->i_mode))
+ mutex_lock(&sbi->writepages);
+ ret = generic_writepages(mapping, wbc);
+ if (!S_ISDIR(inode->i_mode))
+ mutex_unlock(&sbi->writepages);
+ f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
+
+ remove_dirty_dir_inode(inode);
+
+ wbc->nr_to_write -= excess_nrtw;
+ return ret;
+}
+
+static int f2fs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct inode *inode = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct page *page;
+ pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
+ struct dnode_of_data dn;
+ int err = 0;
+
+ /* for nobh_write_end */
+ *fsdata = NULL;
+
+ f2fs_balance_fs(sbi);
+
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+ *pagep = page;
+
+ mutex_lock_op(sbi, DATA_NEW);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, index, 0);
+ if (err) {
+ mutex_unlock_op(sbi, DATA_NEW);
+ f2fs_put_page(page, 1);
+ return err;
+ }
+
+ if (dn.data_blkaddr == NULL_ADDR) {
+ err = reserve_new_block(&dn);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ mutex_unlock_op(sbi, DATA_NEW);
+ f2fs_put_page(page, 1);
+ return err;
+ }
+ }
+ f2fs_put_dnode(&dn);
+
+ mutex_unlock_op(sbi, DATA_NEW);
+
+ if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
+ return 0;
+
+ if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
+ unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned end = start + len;
+
+ /* Reading beyond i_size is simple: memset to zero */
+ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+ return 0;
+ }
+
+ if (dn.data_blkaddr == NEW_ADDR) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ } else {
+ err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
+ }
+ }
+ SetPageUptodate(page);
+ clear_cold_data(page);
+ return 0;
+}
+
+static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
+ const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+
+ if (rw == WRITE)
+ return 0;
+
+ /* Needs synchronization with the cleaner */
+ return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+ get_data_block_ro);
+}
+
+static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
+{
+ struct inode *inode = page->mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
+ dec_page_count(sbi, F2FS_DIRTY_DENTS);
+ inode_dec_dirty_dents(inode);
+ }
+ ClearPagePrivate(page);
+}
+
+static int f2fs_release_data_page(struct page *page, gfp_t wait)
+{
+ ClearPagePrivate(page);
+ return 0;
+}
+
+static int f2fs_set_data_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+
+ SetPageUptodate(page);
+ if (!PageDirty(page)) {
+ __set_page_dirty_nobuffers(page);
+ set_dirty_dir_page(inode, page);
+ return 1;
+ }
+ return 0;
+}
+
+const struct address_space_operations f2fs_dblock_aops = {
+ .readpage = f2fs_read_data_page,
+ .readpages = f2fs_read_data_pages,
+ .writepage = f2fs_write_data_page,
+ .writepages = f2fs_write_data_pages,
+ .write_begin = f2fs_write_begin,
+ .write_end = nobh_write_end,
+ .set_page_dirty = f2fs_set_data_page_dirty,
+ .invalidatepage = f2fs_invalidate_data_page,
+ .releasepage = f2fs_release_data_page,
+ .direct_IO = f2fs_direct_IO,
+};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
new file mode 100644
index 00000000000..0e0380a588a
--- /dev/null
+++ b/fs/f2fs/debug.c
@@ -0,0 +1,361 @@
+/*
+ * f2fs debugging statistics
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Copyright (c) 2012 Linux Foundation
+ * Copyright (c) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/backing-dev.h>
+#include <linux/proc_fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include "segment.h"
+#include "gc.h"
+
+static LIST_HEAD(f2fs_stat_list);
+static struct dentry *debugfs_root;
+
+static void update_general_status(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_stat_info *si = sbi->stat_info;
+ int i;
+
+ /* valid check of the segment numbers */
+ si->hit_ext = sbi->read_hit_ext;
+ si->total_ext = sbi->total_hit_ext;
+ si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
+ si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
+ si->ndirty_dirs = sbi->n_dirty_dirs;
+ si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+ si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
+ si->rsvd_segs = reserved_segments(sbi);
+ si->overp_segs = overprovision_segments(sbi);
+ si->valid_count = valid_user_blocks(sbi);
+ si->valid_node_count = valid_node_count(sbi);
+ si->valid_inode_count = valid_inode_count(sbi);
+ si->utilization = utilization(sbi);
+
+ si->free_segs = free_segments(sbi);
+ si->free_secs = free_sections(sbi);
+ si->prefree_count = prefree_segments(sbi);
+ si->dirty_count = dirty_segments(sbi);
+ si->node_pages = sbi->node_inode->i_mapping->nrpages;
+ si->meta_pages = sbi->meta_inode->i_mapping->nrpages;
+ si->nats = NM_I(sbi)->nat_cnt;
+ si->sits = SIT_I(sbi)->dirty_sentries;
+ si->fnids = NM_I(sbi)->fcnt;
+ si->bg_gc = sbi->bg_gc;
+ si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
+ * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
+ / 2;
+ si->util_valid = (int)(written_block_count(sbi) >>
+ sbi->log_blocks_per_seg)
+ * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
+ / 2;
+ si->util_invalid = 50 - si->util_free - si->util_valid;
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_NODE; i++) {
+ struct curseg_info *curseg = CURSEG_I(sbi, i);
+ si->curseg[i] = curseg->segno;
+ si->cursec[i] = curseg->segno / sbi->segs_per_sec;
+ si->curzone[i] = si->cursec[i] / sbi->secs_per_zone;
+ }
+
+ for (i = 0; i < 2; i++) {
+ si->segment_count[i] = sbi->segment_count[i];
+ si->block_count[i] = sbi->block_count[i];
+ }
+}
+
+/*
+ * This function calculates BDF of every segments
+ */
+static void update_sit_info(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_stat_info *si = sbi->stat_info;
+ unsigned int blks_per_sec, hblks_per_sec, total_vblocks, bimodal, dist;
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned int segno, vblocks;
+ int ndirty = 0;
+
+ bimodal = 0;
+ total_vblocks = 0;
+ blks_per_sec = sbi->segs_per_sec * (1 << sbi->log_blocks_per_seg);
+ hblks_per_sec = blks_per_sec / 2;
+ mutex_lock(&sit_i->sentry_lock);
+ for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
+ vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ dist = abs(vblocks - hblks_per_sec);
+ bimodal += dist * dist;
+
+ if (vblocks > 0 && vblocks < blks_per_sec) {
+ total_vblocks += vblocks;
+ ndirty++;
+ }
+ }
+ mutex_unlock(&sit_i->sentry_lock);
+ dist = sbi->total_sections * hblks_per_sec * hblks_per_sec / 100;
+ si->bimodal = bimodal / dist;
+ if (si->dirty_count)
+ si->avg_vblocks = total_vblocks / ndirty;
+ else
+ si->avg_vblocks = 0;
+}
+
+/*
+ * This function calculates memory footprint.
+ */
+static void update_mem_info(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_stat_info *si = sbi->stat_info;
+ unsigned npages;
+
+ if (si->base_mem)
+ goto get_cache;
+
+ si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
+ si->base_mem += 2 * sizeof(struct f2fs_inode_info);
+ si->base_mem += sizeof(*sbi->ckpt);
+
+ /* build sm */
+ si->base_mem += sizeof(struct f2fs_sm_info);
+
+ /* build sit */
+ si->base_mem += sizeof(struct sit_info);
+ si->base_mem += TOTAL_SEGS(sbi) * sizeof(struct seg_entry);
+ si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * TOTAL_SEGS(sbi);
+ if (sbi->segs_per_sec > 1)
+ si->base_mem += sbi->total_sections *
+ sizeof(struct sec_entry);
+ si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
+
+ /* build free segmap */
+ si->base_mem += sizeof(struct free_segmap_info);
+ si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ si->base_mem += f2fs_bitmap_size(sbi->total_sections);
+
+ /* build curseg */
+ si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
+ si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+
+ /* build dirty segmap */
+ si->base_mem += sizeof(struct dirty_seglist_info);
+ si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ si->base_mem += 2 * f2fs_bitmap_size(TOTAL_SEGS(sbi));
+
+ /* buld nm */
+ si->base_mem += sizeof(struct f2fs_nm_info);
+ si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
+
+ /* build gc */
+ si->base_mem += sizeof(struct f2fs_gc_kthread);
+
+get_cache:
+ /* free nids */
+ si->cache_mem = NM_I(sbi)->fcnt;
+ si->cache_mem += NM_I(sbi)->nat_cnt;
+ npages = sbi->node_inode->i_mapping->nrpages;
+ si->cache_mem += npages << PAGE_CACHE_SHIFT;
+ npages = sbi->meta_inode->i_mapping->nrpages;
+ si->cache_mem += npages << PAGE_CACHE_SHIFT;
+ si->cache_mem += sbi->n_orphans * sizeof(struct orphan_inode_entry);
+ si->cache_mem += sbi->n_dirty_dirs * sizeof(struct dir_inode_entry);
+}
+
+static int stat_show(struct seq_file *s, void *v)
+{
+ struct f2fs_stat_info *si, *next;
+ int i = 0;
+ int j;
+
+ list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
+
+ mutex_lock(&si->stat_lock);
+ if (!si->sbi) {
+ mutex_unlock(&si->stat_lock);
+ continue;
+ }
+ update_general_status(si->sbi);
+
+ seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
+ seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ",
+ si->nat_area_segs, si->sit_area_segs);
+ seq_printf(s, "[SSA: %d] [MAIN: %d",
+ si->ssa_area_segs, si->main_area_segs);
+ seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
+ si->overp_segs, si->rsvd_segs);
+ seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
+ si->utilization, si->valid_count);
+ seq_printf(s, " - Node: %u (Inode: %u, ",
+ si->valid_node_count, si->valid_inode_count);
+ seq_printf(s, "Other: %u)\n - Data: %u\n",
+ si->valid_node_count - si->valid_inode_count,
+ si->valid_count - si->valid_node_count);
+ seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
+ si->main_area_segs, si->main_area_sections,
+ si->main_area_zones);
+ seq_printf(s, " - COLD data: %d, %d, %d\n",
+ si->curseg[CURSEG_COLD_DATA],
+ si->cursec[CURSEG_COLD_DATA],
+ si->curzone[CURSEG_COLD_DATA]);
+ seq_printf(s, " - WARM data: %d, %d, %d\n",
+ si->curseg[CURSEG_WARM_DATA],
+ si->cursec[CURSEG_WARM_DATA],
+ si->curzone[CURSEG_WARM_DATA]);
+ seq_printf(s, " - HOT data: %d, %d, %d\n",
+ si->curseg[CURSEG_HOT_DATA],
+ si->cursec[CURSEG_HOT_DATA],
+ si->curzone[CURSEG_HOT_DATA]);
+ seq_printf(s, " - Dir dnode: %d, %d, %d\n",
+ si->curseg[CURSEG_HOT_NODE],
+ si->cursec[CURSEG_HOT_NODE],
+ si->curzone[CURSEG_HOT_NODE]);
+ seq_printf(s, " - File dnode: %d, %d, %d\n",
+ si->curseg[CURSEG_WARM_NODE],
+ si->cursec[CURSEG_WARM_NODE],
+ si->curzone[CURSEG_WARM_NODE]);
+ seq_printf(s, " - Indir nodes: %d, %d, %d\n",
+ si->curseg[CURSEG_COLD_NODE],
+ si->cursec[CURSEG_COLD_NODE],
+ si->curzone[CURSEG_COLD_NODE]);
+ seq_printf(s, "\n - Valid: %d\n - Dirty: %d\n",
+ si->main_area_segs - si->dirty_count -
+ si->prefree_count - si->free_segs,
+ si->dirty_count);
+ seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
+ si->prefree_count, si->free_segs, si->free_secs);
+ seq_printf(s, "GC calls: %d (BG: %d)\n",
+ si->call_count, si->bg_gc);
+ seq_printf(s, " - data segments : %d\n", si->data_segs);
+ seq_printf(s, " - node segments : %d\n", si->node_segs);
+ seq_printf(s, "Try to move %d blocks\n", si->tot_blks);
+ seq_printf(s, " - data blocks : %d\n", si->data_blks);
+ seq_printf(s, " - node blocks : %d\n", si->node_blks);
+ seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
+ si->hit_ext, si->total_ext);
+ seq_printf(s, "\nBalancing F2FS Async:\n");
+ seq_printf(s, " - nodes %4d in %4d\n",
+ si->ndirty_node, si->node_pages);
+ seq_printf(s, " - dents %4d in dirs:%4d\n",
+ si->ndirty_dent, si->ndirty_dirs);
+ seq_printf(s, " - meta %4d in %4d\n",
+ si->ndirty_meta, si->meta_pages);
+ seq_printf(s, " - NATs %5d > %lu\n",
+ si->nats, NM_WOUT_THRESHOLD);
+ seq_printf(s, " - SITs: %5d\n - free_nids: %5d\n",
+ si->sits, si->fnids);
+ seq_printf(s, "\nDistribution of User Blocks:");
+ seq_printf(s, " [ valid | invalid | free ]\n");
+ seq_printf(s, " [");
+
+ for (j = 0; j < si->util_valid; j++)
+ seq_printf(s, "-");
+ seq_printf(s, "|");
+
+ for (j = 0; j < si->util_invalid; j++)
+ seq_printf(s, "-");
+ seq_printf(s, "|");
+
+ for (j = 0; j < si->util_free; j++)
+ seq_printf(s, "-");
+ seq_printf(s, "]\n\n");
+ seq_printf(s, "SSR: %u blocks in %u segments\n",
+ si->block_count[SSR], si->segment_count[SSR]);
+ seq_printf(s, "LFS: %u blocks in %u segments\n",
+ si->block_count[LFS], si->segment_count[LFS]);
+
+ /* segment usage info */
+ update_sit_info(si->sbi);
+ seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n",
+ si->bimodal, si->avg_vblocks);
+
+ /* memory footprint */
+ update_mem_info(si->sbi);
+ seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",
+ (si->base_mem + si->cache_mem) >> 10,
+ si->base_mem >> 10, si->cache_mem >> 10);
+ mutex_unlock(&si->stat_lock);
+ }
+ return 0;
+}
+
+static int stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stat_show, inode->i_private);
+}
+
+static const struct file_operations stat_fops = {
+ .open = stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int init_stats(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ struct f2fs_stat_info *si;
+
+ sbi->stat_info = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
+ if (!sbi->stat_info)
+ return -ENOMEM;
+
+ si = sbi->stat_info;
+ mutex_init(&si->stat_lock);
+ list_add_tail(&si->stat_list, &f2fs_stat_list);
+
+ si->all_area_segs = le32_to_cpu(raw_super->segment_count);
+ si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
+ si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
+ si->ssa_area_segs = le32_to_cpu(raw_super->segment_count_ssa);
+ si->main_area_segs = le32_to_cpu(raw_super->segment_count_main);
+ si->main_area_sections = le32_to_cpu(raw_super->section_count);
+ si->main_area_zones = si->main_area_sections /
+ le32_to_cpu(raw_super->secs_per_zone);
+ si->sbi = sbi;
+ return 0;
+}
+
+int f2fs_build_stats(struct f2fs_sb_info *sbi)
+{
+ int retval;
+
+ retval = init_stats(sbi);
+ if (retval)
+ return retval;
+
+ if (!debugfs_root)
+ debugfs_root = debugfs_create_dir("f2fs", NULL);
+
+ debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops);
+ return 0;
+}
+
+void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_stat_info *si = sbi->stat_info;
+
+ list_del(&si->stat_list);
+ mutex_lock(&si->stat_lock);
+ si->sbi = NULL;
+ mutex_unlock(&si->stat_lock);
+ kfree(sbi->stat_info);
+}
+
+void destroy_root_stats(void)
+{
+ debugfs_remove_recursive(debugfs_root);
+ debugfs_root = NULL;
+}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
new file mode 100644
index 00000000000..b4e24f32b54
--- /dev/null
+++ b/fs/f2fs/dir.c
@@ -0,0 +1,672 @@
+/*
+ * fs/f2fs/dir.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include "f2fs.h"
+#include "acl.h"
+
+static unsigned long dir_blocks(struct inode *inode)
+{
+ return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
+ >> PAGE_CACHE_SHIFT;
+}
+
+static unsigned int dir_buckets(unsigned int level)
+{
+ if (level < MAX_DIR_HASH_DEPTH / 2)
+ return 1 << level;
+ else
+ return 1 << ((MAX_DIR_HASH_DEPTH / 2) - 1);
+}
+
+static unsigned int bucket_blocks(unsigned int level)
+{
+ if (level < MAX_DIR_HASH_DEPTH / 2)
+ return 2;
+ else
+ return 4;
+}
+
+static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
+ [F2FS_FT_UNKNOWN] = DT_UNKNOWN,
+ [F2FS_FT_REG_FILE] = DT_REG,
+ [F2FS_FT_DIR] = DT_DIR,
+ [F2FS_FT_CHRDEV] = DT_CHR,
+ [F2FS_FT_BLKDEV] = DT_BLK,
+ [F2FS_FT_FIFO] = DT_FIFO,
+ [F2FS_FT_SOCK] = DT_SOCK,
+ [F2FS_FT_SYMLINK] = DT_LNK,
+};
+
+#define S_SHIFT 12
+static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
+ [S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
+ [S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
+ [S_IFCHR >> S_SHIFT] = F2FS_FT_CHRDEV,
+ [S_IFBLK >> S_SHIFT] = F2FS_FT_BLKDEV,
+ [S_IFIFO >> S_SHIFT] = F2FS_FT_FIFO,
+ [S_IFSOCK >> S_SHIFT] = F2FS_FT_SOCK,
+ [S_IFLNK >> S_SHIFT] = F2FS_FT_SYMLINK,
+};
+
+static void set_de_type(struct f2fs_dir_entry *de, struct inode *inode)
+{
+ mode_t mode = inode->i_mode;
+ de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
+}
+
+static unsigned long dir_block_index(unsigned int level, unsigned int idx)
+{
+ unsigned long i;
+ unsigned long bidx = 0;
+
+ for (i = 0; i < level; i++)
+ bidx += dir_buckets(i) * bucket_blocks(i);
+ bidx += idx * bucket_blocks(level);
+ return bidx;
+}
+
+static bool early_match_name(const char *name, int namelen,
+ f2fs_hash_t namehash, struct f2fs_dir_entry *de)
+{
+ if (le16_to_cpu(de->name_len) != namelen)
+ return false;
+
+ if (de->hash_code != namehash)
+ return false;
+
+ return true;
+}
+
+static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
+ const char *name, int namelen, int *max_slots,
+ f2fs_hash_t namehash, struct page **res_page)
+{
+ struct f2fs_dir_entry *de;
+ unsigned long bit_pos, end_pos, next_pos;
+ struct f2fs_dentry_block *dentry_blk = kmap(dentry_page);
+ int slots;
+
+ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+ NR_DENTRY_IN_BLOCK, 0);
+ while (bit_pos < NR_DENTRY_IN_BLOCK) {
+ de = &dentry_blk->dentry[bit_pos];
+ slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+
+ if (early_match_name(name, namelen, namehash, de)) {
+ if (!memcmp(dentry_blk->filename[bit_pos],
+ name, namelen)) {
+ *res_page = dentry_page;
+ goto found;
+ }
+ }
+ next_pos = bit_pos + slots;
+ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+ NR_DENTRY_IN_BLOCK, next_pos);
+ if (bit_pos >= NR_DENTRY_IN_BLOCK)
+ end_pos = NR_DENTRY_IN_BLOCK;
+ else
+ end_pos = bit_pos;
+ if (*max_slots < end_pos - next_pos)
+ *max_slots = end_pos - next_pos;
+ }
+
+ de = NULL;
+ kunmap(dentry_page);
+found:
+ return de;
+}
+
+static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ unsigned int level, const char *name, int namelen,
+ f2fs_hash_t namehash, struct page **res_page)
+{
+ int s = GET_DENTRY_SLOTS(namelen);
+ unsigned int nbucket, nblock;
+ unsigned int bidx, end_block;
+ struct page *dentry_page;
+ struct f2fs_dir_entry *de = NULL;
+ bool room = false;
+ int max_slots = 0;
+
+ BUG_ON(level > MAX_DIR_HASH_DEPTH);
+
+ nbucket = dir_buckets(level);
+ nblock = bucket_blocks(level);
+
+ bidx = dir_block_index(level, le32_to_cpu(namehash) % nbucket);
+ end_block = bidx + nblock;
+
+ for (; bidx < end_block; bidx++) {
+ /* no need to allocate new dentry pages to all the indices */
+ dentry_page = find_data_page(dir, bidx);
+ if (IS_ERR(dentry_page)) {
+ room = true;
+ continue;
+ }
+
+ de = find_in_block(dentry_page, name, namelen,
+ &max_slots, namehash, res_page);
+ if (de)
+ break;
+
+ if (max_slots >= s)
+ room = true;
+ f2fs_put_page(dentry_page, 0);
+ }
+
+ if (!de && room && F2FS_I(dir)->chash != namehash) {
+ F2FS_I(dir)->chash = namehash;
+ F2FS_I(dir)->clevel = level;
+ }
+
+ return de;
+}
+
+/*
+ * Find an entry in the specified directory with the wanted name.
+ * It returns the page where the entry was found (as a parameter - res_page),
+ * and the entry itself. Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+ struct qstr *child, struct page **res_page)
+{
+ const char *name = child->name;
+ int namelen = child->len;
+ unsigned long npages = dir_blocks(dir);
+ struct f2fs_dir_entry *de = NULL;
+ f2fs_hash_t name_hash;
+ unsigned int max_depth;
+ unsigned int level;
+
+ if (npages == 0)
+ return NULL;
+
+ *res_page = NULL;
+
+ name_hash = f2fs_dentry_hash(name, namelen);
+ max_depth = F2FS_I(dir)->i_current_depth;
+
+ for (level = 0; level < max_depth; level++) {
+ de = find_in_level(dir, level, name,
+ namelen, name_hash, res_page);
+ if (de)
+ break;
+ }
+ if (!de && F2FS_I(dir)->chash != name_hash) {
+ F2FS_I(dir)->chash = name_hash;
+ F2FS_I(dir)->clevel = level - 1;
+ }
+ return de;
+}
+
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+{
+ struct page *page = NULL;
+ struct f2fs_dir_entry *de = NULL;
+ struct f2fs_dentry_block *dentry_blk = NULL;
+
+ page = get_lock_data_page(dir, 0);
+ if (IS_ERR(page))
+ return NULL;
+
+ dentry_blk = kmap(page);
+ de = &dentry_blk->dentry[1];
+ *p = page;
+ unlock_page(page);
+ return de;
+}
+
+ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
+{
+ ino_t res = 0;
+ struct f2fs_dir_entry *de;
+ struct page *page;
+
+ de = f2fs_find_entry(dir, qstr, &page);
+ if (de) {
+ res = le32_to_cpu(de->ino);
+ kunmap(page);
+ f2fs_put_page(page, 0);
+ }
+
+ return res;
+}
+
+void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
+ struct page *page, struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+
+ mutex_lock_op(sbi, DENTRY_OPS);
+ lock_page(page);
+ wait_on_page_writeback(page);
+ de->ino = cpu_to_le32(inode->i_ino);
+ set_de_type(de, inode);
+ kunmap(page);
+ set_page_dirty(page);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(dir);
+
+ /* update parent inode number before releasing dentry page */
+ F2FS_I(inode)->i_pino = dir->i_ino;
+
+ f2fs_put_page(page, 1);
+ mutex_unlock_op(sbi, DENTRY_OPS);
+}
+
+void init_dent_inode(struct dentry *dentry, struct page *ipage)
+{
+ struct f2fs_node *rn;
+
+ if (IS_ERR(ipage))
+ return;
+
+ wait_on_page_writeback(ipage);
+
+ /* copy dentry info. to this inode page */
+ rn = (struct f2fs_node *)page_address(ipage);
+ rn->i.i_namelen = cpu_to_le32(dentry->d_name.len);
+ memcpy(rn->i.i_name, dentry->d_name.name, dentry->d_name.len);
+ set_page_dirty(ipage);
+}
+
+static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+
+ if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
+ int err;
+ err = new_inode_page(inode, dentry);
+ if (err)
+ return err;
+
+ if (S_ISDIR(inode->i_mode)) {
+ err = f2fs_make_empty(inode, dir);
+ if (err) {
+ remove_inode_page(inode);
+ return err;
+ }
+ }
+
+ err = f2fs_init_acl(inode, dir);
+ if (err) {
+ remove_inode_page(inode);
+ return err;
+ }
+ } else {
+ struct page *ipage;
+ ipage = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
+ init_dent_inode(dentry, ipage);
+ f2fs_put_page(ipage, 1);
+ }
+ if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
+ inc_nlink(inode);
+ f2fs_write_inode(inode, NULL);
+ }
+ return 0;
+}
+
+static void update_parent_metadata(struct inode *dir, struct inode *inode,
+ unsigned int current_depth)
+{
+ bool need_dir_update = false;
+
+ if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
+ if (S_ISDIR(inode->i_mode)) {
+ inc_nlink(dir);
+ need_dir_update = true;
+ }
+ clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ }
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ if (F2FS_I(dir)->i_current_depth != current_depth) {
+ F2FS_I(dir)->i_current_depth = current_depth;
+ need_dir_update = true;
+ }
+
+ if (need_dir_update)
+ f2fs_write_inode(dir, NULL);
+ else
+ mark_inode_dirty(dir);
+
+ if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
+ clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+}
+
+static int room_for_filename(struct f2fs_dentry_block *dentry_blk, int slots)
+{
+ int bit_start = 0;
+ int zero_start, zero_end;
+next:
+ zero_start = find_next_zero_bit_le(&dentry_blk->dentry_bitmap,
+ NR_DENTRY_IN_BLOCK,
+ bit_start);
+ if (zero_start >= NR_DENTRY_IN_BLOCK)
+ return NR_DENTRY_IN_BLOCK;
+
+ zero_end = find_next_bit_le(&dentry_blk->dentry_bitmap,
+ NR_DENTRY_IN_BLOCK,
+ zero_start);
+ if (zero_end - zero_start >= slots)
+ return zero_start;
+
+ bit_start = zero_end + 1;
+
+ if (zero_end + 1 >= NR_DENTRY_IN_BLOCK)
+ return NR_DENTRY_IN_BLOCK;
+ goto next;
+}
+
+int f2fs_add_link(struct dentry *dentry, struct inode *inode)
+{
+ unsigned int bit_pos;
+ unsigned int level;
+ unsigned int current_depth;
+ unsigned long bidx, block;
+ f2fs_hash_t dentry_hash;
+ struct f2fs_dir_entry *de;
+ unsigned int nbucket, nblock;
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ const char *name = dentry->d_name.name;
+ int namelen = dentry->d_name.len;
+ struct page *dentry_page = NULL;
+ struct f2fs_dentry_block *dentry_blk = NULL;
+ int slots = GET_DENTRY_SLOTS(namelen);
+ int err = 0;
+ int i;
+
+ dentry_hash = f2fs_dentry_hash(name, dentry->d_name.len);
+ level = 0;
+ current_depth = F2FS_I(dir)->i_current_depth;
+ if (F2FS_I(dir)->chash == dentry_hash) {
+ level = F2FS_I(dir)->clevel;
+ F2FS_I(dir)->chash = 0;
+ }
+
+start:
+ if (current_depth == MAX_DIR_HASH_DEPTH)
+ return -ENOSPC;
+
+ /* Increase the depth, if required */
+ if (level == current_depth)
+ ++current_depth;
+
+ nbucket = dir_buckets(level);
+ nblock = bucket_blocks(level);
+
+ bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));
+
+ for (block = bidx; block <= (bidx + nblock - 1); block++) {
+ mutex_lock_op(sbi, DENTRY_OPS);
+ dentry_page = get_new_data_page(dir, block, true);
+ if (IS_ERR(dentry_page)) {
+ mutex_unlock_op(sbi, DENTRY_OPS);
+ return PTR_ERR(dentry_page);
+ }
+
+ dentry_blk = kmap(dentry_page);
+ bit_pos = room_for_filename(dentry_blk, slots);
+ if (bit_pos < NR_DENTRY_IN_BLOCK)
+ goto add_dentry;
+
+ kunmap(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ mutex_unlock_op(sbi, DENTRY_OPS);
+ }
+
+ /* Move to next level to find the empty slot for new dentry */
+ ++level;
+ goto start;
+add_dentry:
+ err = init_inode_metadata(inode, dentry);
+ if (err)
+ goto fail;
+
+ wait_on_page_writeback(dentry_page);
+
+ de = &dentry_blk->dentry[bit_pos];
+ de->hash_code = dentry_hash;
+ de->name_len = cpu_to_le16(namelen);
+ memcpy(dentry_blk->filename[bit_pos], name, namelen);
+ de->ino = cpu_to_le32(inode->i_ino);
+ set_de_type(de, inode);
+ for (i = 0; i < slots; i++)
+ test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+ set_page_dirty(dentry_page);
+
+ update_parent_metadata(dir, inode, current_depth);
+
+ /* update parent inode number before releasing dentry page */
+ F2FS_I(inode)->i_pino = dir->i_ino;
+fail:
+ kunmap(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ mutex_unlock_op(sbi, DENTRY_OPS);
+ return err;
+}
+
+/*
+ * It only removes the dentry from the dentry page,corresponding name
+ * entry in name page does not need to be touched during deletion.
+ */
+void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+ struct inode *inode)
+{
+ struct f2fs_dentry_block *dentry_blk;
+ unsigned int bit_pos;
+ struct address_space *mapping = page->mapping;
+ struct inode *dir = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
+ void *kaddr = page_address(page);
+ int i;
+
+ mutex_lock_op(sbi, DENTRY_OPS);
+
+ lock_page(page);
+ wait_on_page_writeback(page);
+
+ dentry_blk = (struct f2fs_dentry_block *)kaddr;
+ bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry;
+ for (i = 0; i < slots; i++)
+ test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+
+ /* Let's check and deallocate this dentry page */
+ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+ NR_DENTRY_IN_BLOCK,
+ 0);
+ kunmap(page); /* kunmap - pair of f2fs_find_entry */
+ set_page_dirty(page);
+
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+
+ if (inode && S_ISDIR(inode->i_mode)) {
+ drop_nlink(dir);
+ f2fs_write_inode(dir, NULL);
+ } else {
+ mark_inode_dirty(dir);
+ }
+
+ if (inode) {
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ drop_nlink(inode);
+ if (S_ISDIR(inode->i_mode)) {
+ drop_nlink(inode);
+ i_size_write(inode, 0);
+ }
+ f2fs_write_inode(inode, NULL);
+ if (inode->i_nlink == 0)
+ add_orphan_inode(sbi, inode->i_ino);
+ }
+
+ if (bit_pos == NR_DENTRY_IN_BLOCK) {
+ truncate_hole(dir, page->index, page->index + 1);
+ clear_page_dirty_for_io(page);
+ ClearPageUptodate(page);
+ dec_page_count(sbi, F2FS_DIRTY_DENTS);
+ inode_dec_dirty_dents(dir);
+ }
+ f2fs_put_page(page, 1);
+
+ mutex_unlock_op(sbi, DENTRY_OPS);
+}
+
+int f2fs_make_empty(struct inode *inode, struct inode *parent)
+{
+ struct page *dentry_page;
+ struct f2fs_dentry_block *dentry_blk;
+ struct f2fs_dir_entry *de;
+ void *kaddr;
+
+ dentry_page = get_new_data_page(inode, 0, true);
+ if (IS_ERR(dentry_page))
+ return PTR_ERR(dentry_page);
+
+ kaddr = kmap_atomic(dentry_page);
+ dentry_blk = (struct f2fs_dentry_block *)kaddr;
+
+ de = &dentry_blk->dentry[0];
+ de->name_len = cpu_to_le16(1);
+ de->hash_code = 0;
+ de->ino = cpu_to_le32(inode->i_ino);
+ memcpy(dentry_blk->filename[0], ".", 1);
+ set_de_type(de, inode);
+
+ de = &dentry_blk->dentry[1];
+ de->hash_code = 0;
+ de->name_len = cpu_to_le16(2);
+ de->ino = cpu_to_le32(parent->i_ino);
+ memcpy(dentry_blk->filename[1], "..", 2);
+ set_de_type(de, inode);
+
+ test_and_set_bit_le(0, &dentry_blk->dentry_bitmap);
+ test_and_set_bit_le(1, &dentry_blk->dentry_bitmap);
+ kunmap_atomic(kaddr);
+
+ set_page_dirty(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ return 0;
+}
+
+bool f2fs_empty_dir(struct inode *dir)
+{
+ unsigned long bidx;
+ struct page *dentry_page;
+ unsigned int bit_pos;
+ struct f2fs_dentry_block *dentry_blk;
+ unsigned long nblock = dir_blocks(dir);
+
+ for (bidx = 0; bidx < nblock; bidx++) {
+ void *kaddr;
+ dentry_page = get_lock_data_page(dir, bidx);
+ if (IS_ERR(dentry_page)) {
+ if (PTR_ERR(dentry_page) == -ENOENT)
+ continue;
+ else
+ return false;
+ }
+
+ kaddr = kmap_atomic(dentry_page);
+ dentry_blk = (struct f2fs_dentry_block *)kaddr;
+ if (bidx == 0)
+ bit_pos = 2;
+ else
+ bit_pos = 0;
+ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+ NR_DENTRY_IN_BLOCK,
+ bit_pos);
+ kunmap_atomic(kaddr);
+
+ f2fs_put_page(dentry_page, 1);
+
+ if (bit_pos < NR_DENTRY_IN_BLOCK)
+ return false;
+ }
+ return true;
+}
+
+static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+ unsigned long pos = file->f_pos;
+ struct inode *inode = file->f_dentry->d_inode;
+ unsigned long npages = dir_blocks(inode);
+ unsigned char *types = NULL;
+ unsigned int bit_pos = 0, start_bit_pos = 0;
+ int over = 0;
+ struct f2fs_dentry_block *dentry_blk = NULL;
+ struct f2fs_dir_entry *de = NULL;
+ struct page *dentry_page = NULL;
+ unsigned int n = 0;
+ unsigned char d_type = DT_UNKNOWN;
+ int slots;
+
+ types = f2fs_filetype_table;
+ bit_pos = (pos % NR_DENTRY_IN_BLOCK);
+ n = (pos / NR_DENTRY_IN_BLOCK);
+
+ for ( ; n < npages; n++) {
+ dentry_page = get_lock_data_page(inode, n);
+ if (IS_ERR(dentry_page))
+ continue;
+
+ start_bit_pos = bit_pos;
+ dentry_blk = kmap(dentry_page);
+ while (bit_pos < NR_DENTRY_IN_BLOCK) {
+ d_type = DT_UNKNOWN;
+ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+ NR_DENTRY_IN_BLOCK,
+ bit_pos);
+ if (bit_pos >= NR_DENTRY_IN_BLOCK)
+ break;
+
+ de = &dentry_blk->dentry[bit_pos];
+ if (types && de->file_type < F2FS_FT_MAX)
+ d_type = types[de->file_type];
+
+ over = filldir(dirent,
+ dentry_blk->filename[bit_pos],
+ le16_to_cpu(de->name_len),
+ (n * NR_DENTRY_IN_BLOCK) + bit_pos,
+ le32_to_cpu(de->ino), d_type);
+ if (over) {
+ file->f_pos += bit_pos - start_bit_pos;
+ goto success;
+ }
+ slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+ bit_pos += slots;
+ }
+ bit_pos = 0;
+ file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK;
+ kunmap(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ dentry_page = NULL;
+ }
+success:
+ if (dentry_page && !IS_ERR(dentry_page)) {
+ kunmap(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ }
+
+ return 0;
+}
+
+const struct file_operations f2fs_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .readdir = f2fs_readdir,
+ .fsync = f2fs_sync_file,
+ .unlocked_ioctl = f2fs_ioctl,
+};
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
new file mode 100644
index 00000000000..a18d63db2fb
--- /dev/null
+++ b/fs/f2fs/f2fs.h
@@ -0,0 +1,1083 @@
+/*
+ * fs/f2fs/f2fs.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_F2FS_H
+#define _LINUX_F2FS_H
+
+#include <linux/types.h>
+#include <linux/page-flags.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/magic.h>
+
+/*
+ * For mount options
+ */
+#define F2FS_MOUNT_BG_GC 0x00000001
+#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
+#define F2FS_MOUNT_DISCARD 0x00000004
+#define F2FS_MOUNT_NOHEAP 0x00000008
+#define F2FS_MOUNT_XATTR_USER 0x00000010
+#define F2FS_MOUNT_POSIX_ACL 0x00000020
+#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
+
+#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
+#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
+#define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
+
+#define ver_after(a, b) (typecheck(unsigned long long, a) && \
+ typecheck(unsigned long long, b) && \
+ ((long long)((a) - (b)) > 0))
+
+typedef u64 block_t;
+typedef u32 nid_t;
+
+struct f2fs_mount_info {
+ unsigned int opt;
+};
+
+static inline __u32 f2fs_crc32(void *buff, size_t len)
+{
+ return crc32_le(F2FS_SUPER_MAGIC, buff, len);
+}
+
+static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size)
+{
+ return f2fs_crc32(buff, buff_size) == blk_crc;
+}
+
+/*
+ * For checkpoint manager
+ */
+enum {
+ NAT_BITMAP,
+ SIT_BITMAP
+};
+
+/* for the list of orphan inodes */
+struct orphan_inode_entry {
+ struct list_head list; /* list head */
+ nid_t ino; /* inode number */
+};
+
+/* for the list of directory inodes */
+struct dir_inode_entry {
+ struct list_head list; /* list head */
+ struct inode *inode; /* vfs inode pointer */
+};
+
+/* for the list of fsync inodes, used only during recovery */
+struct fsync_inode_entry {
+ struct list_head list; /* list head */
+ struct inode *inode; /* vfs inode pointer */
+ block_t blkaddr; /* block address locating the last inode */
+};
+
+#define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
+#define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
+
+#define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
+#define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
+#define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
+#define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
+
+static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
+{
+ int before = nats_in_cursum(rs);
+ rs->n_nats = cpu_to_le16(before + i);
+ return before;
+}
+
+static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
+{
+ int before = sits_in_cursum(rs);
+ rs->n_sits = cpu_to_le16(before + i);
+ return before;
+}
+
+/*
+ * For INODE and NODE manager
+ */
+#define XATTR_NODE_OFFSET (-1) /*
+ * store xattrs to one node block per
+ * file keeping -1 as its node offset to
+ * distinguish from index node blocks.
+ */
+#define RDONLY_NODE 1 /*
+ * specify a read-only mode when getting
+ * a node block. 0 is read-write mode.
+ * used by get_dnode_of_data().
+ */
+#define F2FS_LINK_MAX 32000 /* maximum link count per file */
+
+/* for in-memory extent cache entry */
+struct extent_info {
+ rwlock_t ext_lock; /* rwlock for consistency */
+ unsigned int fofs; /* start offset in a file */
+ u32 blk_addr; /* start block address of the extent */
+ unsigned int len; /* lenth of the extent */
+};
+
+/*
+ * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
+ */
+#define FADVISE_COLD_BIT 0x01
+
+struct f2fs_inode_info {
+ struct inode vfs_inode; /* serve a vfs inode */
+ unsigned long i_flags; /* keep an inode flags for ioctl */
+ unsigned char i_advise; /* use to give file attribute hints */
+ unsigned int i_current_depth; /* use only in directory structure */
+ unsigned int i_pino; /* parent inode number */
+ umode_t i_acl_mode; /* keep file acl mode temporarily */
+
+ /* Use below internally in f2fs*/
+ unsigned long flags; /* use to pass per-file flags */
+ unsigned long long data_version;/* lastes version of data for fsync */
+ atomic_t dirty_dents; /* # of dirty dentry pages */
+ f2fs_hash_t chash; /* hash value of given file name */
+ unsigned int clevel; /* maximum level of given file name */
+ nid_t i_xattr_nid; /* node id that contains xattrs */
+ struct extent_info ext; /* in-memory extent cache entry */
+};
+
+static inline void get_extent_info(struct extent_info *ext,
+ struct f2fs_extent i_ext)
+{
+ write_lock(&ext->ext_lock);
+ ext->fofs = le32_to_cpu(i_ext.fofs);
+ ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
+ ext->len = le32_to_cpu(i_ext.len);
+ write_unlock(&ext->ext_lock);
+}
+
+static inline void set_raw_extent(struct extent_info *ext,
+ struct f2fs_extent *i_ext)
+{
+ read_lock(&ext->ext_lock);
+ i_ext->fofs = cpu_to_le32(ext->fofs);
+ i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
+ i_ext->len = cpu_to_le32(ext->len);
+ read_unlock(&ext->ext_lock);
+}
+
+struct f2fs_nm_info {
+ block_t nat_blkaddr; /* base disk address of NAT */
+ nid_t max_nid; /* maximum possible node ids */
+ nid_t init_scan_nid; /* the first nid to be scanned */
+ nid_t next_scan_nid; /* the next nid to be scanned */
+
+ /* NAT cache management */
+ struct radix_tree_root nat_root;/* root of the nat entry cache */
+ rwlock_t nat_tree_lock; /* protect nat_tree_lock */
+ unsigned int nat_cnt; /* the # of cached nat entries */
+ struct list_head nat_entries; /* cached nat entry list (clean) */
+ struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
+
+ /* free node ids management */
+ struct list_head free_nid_list; /* a list for free nids */
+ spinlock_t free_nid_list_lock; /* protect free nid list */
+ unsigned int fcnt; /* the number of free node id */
+ struct mutex build_lock; /* lock for build free nids */
+
+ /* for checkpoint */
+ char *nat_bitmap; /* NAT bitmap pointer */
+ int bitmap_size; /* bitmap size */
+};
+
+/*
+ * this structure is used as one of function parameters.
+ * all the information are dedicated to a given direct node block determined
+ * by the data offset in a file.
+ */
+struct dnode_of_data {
+ struct inode *inode; /* vfs inode pointer */
+ struct page *inode_page; /* its inode page, NULL is possible */
+ struct page *node_page; /* cached direct node page */
+ nid_t nid; /* node id of the direct node block */
+ unsigned int ofs_in_node; /* data offset in the node page */
+ bool inode_page_locked; /* inode page is locked or not */
+ block_t data_blkaddr; /* block address of the node block */
+};
+
+static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
+ struct page *ipage, struct page *npage, nid_t nid)
+{
+ dn->inode = inode;
+ dn->inode_page = ipage;
+ dn->node_page = npage;
+ dn->nid = nid;
+ dn->inode_page_locked = 0;
+}
+
+/*
+ * For SIT manager
+ *
+ * By default, there are 6 active log areas across the whole main area.
+ * When considering hot and cold data separation to reduce cleaning overhead,
+ * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
+ * respectively.
+ * In the current design, you should not change the numbers intentionally.
+ * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
+ * logs individually according to the underlying devices. (default: 6)
+ * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
+ * data and 8 for node logs.
+ */
+#define NR_CURSEG_DATA_TYPE (3)
+#define NR_CURSEG_NODE_TYPE (3)
+#define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
+
+enum {
+ CURSEG_HOT_DATA = 0, /* directory entry blocks */
+ CURSEG_WARM_DATA, /* data blocks */
+ CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
+ CURSEG_HOT_NODE, /* direct node blocks of directory files */
+ CURSEG_WARM_NODE, /* direct node blocks of normal files */
+ CURSEG_COLD_NODE, /* indirect node blocks */
+ NO_CHECK_TYPE
+};
+
+struct f2fs_sm_info {
+ struct sit_info *sit_info; /* whole segment information */
+ struct free_segmap_info *free_info; /* free segment information */
+ struct dirty_seglist_info *dirty_info; /* dirty segment information */
+ struct curseg_info *curseg_array; /* active segment information */
+
+ struct list_head wblist_head; /* list of under-writeback pages */
+ spinlock_t wblist_lock; /* lock for checkpoint */
+
+ block_t seg0_blkaddr; /* block address of 0'th segment */
+ block_t main_blkaddr; /* start block address of main area */
+ block_t ssa_blkaddr; /* start block address of SSA area */
+
+ unsigned int segment_count; /* total # of segments */
+ unsigned int main_segments; /* # of segments in main area */
+ unsigned int reserved_segments; /* # of reserved segments */
+ unsigned int ovp_segments; /* # of overprovision segments */
+};
+
+/*
+ * For directory operation
+ */
+#define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
+#define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
+#define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
+#define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
+#define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
+
+/*
+ * For superblock
+ */
+/*
+ * COUNT_TYPE for monitoring
+ *
+ * f2fs monitors the number of several block types such as on-writeback,
+ * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
+ */
+enum count_type {
+ F2FS_WRITEBACK,
+ F2FS_DIRTY_DENTS,
+ F2FS_DIRTY_NODES,
+ F2FS_DIRTY_META,
+ NR_COUNT_TYPE,
+};
+
+/*
+ * FS_LOCK nesting subclasses for the lock validator:
+ *
+ * The locking order between these classes is
+ * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW
+ * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC
+ */
+enum lock_type {
+ RENAME, /* for renaming operations */
+ DENTRY_OPS, /* for directory operations */
+ DATA_WRITE, /* for data write */
+ DATA_NEW, /* for data allocation */
+ DATA_TRUNC, /* for data truncate */
+ NODE_NEW, /* for node allocation */
+ NODE_TRUNC, /* for node truncate */
+ NODE_WRITE, /* for node write */
+ NR_LOCK_TYPE,
+};
+
+/*
+ * The below are the page types of bios used in submti_bio().
+ * The available types are:
+ * DATA User data pages. It operates as async mode.
+ * NODE Node pages. It operates as async mode.
+ * META FS metadata pages such as SIT, NAT, CP.
+ * NR_PAGE_TYPE The number of page types.
+ * META_FLUSH Make sure the previous pages are written
+ * with waiting the bio's completion
+ * ... Only can be used with META.
+ */
+enum page_type {
+ DATA,
+ NODE,
+ META,
+ NR_PAGE_TYPE,
+ META_FLUSH,
+};
+
+struct f2fs_sb_info {
+ struct super_block *sb; /* pointer to VFS super block */
+ struct buffer_head *raw_super_buf; /* buffer head of raw sb */
+ struct f2fs_super_block *raw_super; /* raw super block pointer */
+ int s_dirty; /* dirty flag for checkpoint */
+
+ /* for node-related operations */
+ struct f2fs_nm_info *nm_info; /* node manager */
+ struct inode *node_inode; /* cache node blocks */
+
+ /* for segment-related operations */
+ struct f2fs_sm_info *sm_info; /* segment manager */
+ struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */
+ sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */
+ struct rw_semaphore bio_sem; /* IO semaphore */
+
+ /* for checkpoint */
+ struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
+ struct inode *meta_inode; /* cache meta blocks */
+ struct mutex cp_mutex; /* for checkpoint procedure */
+ struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */
+ struct mutex write_inode; /* mutex for write inode */
+ struct mutex writepages; /* mutex for writepages() */
+ int por_doing; /* recovery is doing or not */
+
+ /* for orphan inode management */
+ struct list_head orphan_inode_list; /* orphan inode list */
+ struct mutex orphan_inode_mutex; /* for orphan inode list */
+ unsigned int n_orphans; /* # of orphan inodes */
+
+ /* for directory inode management */
+ struct list_head dir_inode_list; /* dir inode list */
+ spinlock_t dir_inode_lock; /* for dir inode list lock */
+ unsigned int n_dirty_dirs; /* # of dir inodes */
+
+ /* basic file system units */
+ unsigned int log_sectors_per_block; /* log2 sectors per block */
+ unsigned int log_blocksize; /* log2 block size */
+ unsigned int blocksize; /* block size */
+ unsigned int root_ino_num; /* root inode number*/
+ unsigned int node_ino_num; /* node inode number*/
+ unsigned int meta_ino_num; /* meta inode number*/
+ unsigned int log_blocks_per_seg; /* log2 blocks per segment */
+ unsigned int blocks_per_seg; /* blocks per segment */
+ unsigned int segs_per_sec; /* segments per section */
+ unsigned int secs_per_zone; /* sections per zone */
+ unsigned int total_sections; /* total section count */
+ unsigned int total_node_count; /* total node block count */
+ unsigned int total_valid_node_count; /* valid node block count */
+ unsigned int total_valid_inode_count; /* valid inode count */
+ int active_logs; /* # of active logs */
+
+ block_t user_block_count; /* # of user blocks */
+ block_t total_valid_block_count; /* # of valid blocks */
+ block_t alloc_valid_block_count; /* # of allocated blocks */
+ block_t last_valid_block_count; /* for recovery */
+ u32 s_next_generation; /* for NFS support */
+ atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
+
+ struct f2fs_mount_info mount_opt; /* mount options */
+
+ /* for cleaning operations */
+ struct mutex gc_mutex; /* mutex for GC */
+ struct f2fs_gc_kthread *gc_thread; /* GC thread */
+
+ /*
+ * for stat information.
+ * one is for the LFS mode, and the other is for the SSR mode.
+ */
+ struct f2fs_stat_info *stat_info; /* FS status information */
+ unsigned int segment_count[2]; /* # of allocated segments */
+ unsigned int block_count[2]; /* # of allocated blocks */
+ unsigned int last_victim[2]; /* last victim segment # */
+ int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
+ int bg_gc; /* background gc calls */
+ spinlock_t stat_lock; /* lock for stat operations */
+};
+
+/*
+ * Inline functions
+ */
+static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
+{
+ return container_of(inode, struct f2fs_inode_info, vfs_inode);
+}
+
+static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
+{
+ return (struct f2fs_super_block *)(sbi->raw_super);
+}
+
+static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
+{
+ return (struct f2fs_checkpoint *)(sbi->ckpt);
+}
+
+static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
+{
+ return (struct f2fs_nm_info *)(sbi->nm_info);
+}
+
+static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
+{
+ return (struct f2fs_sm_info *)(sbi->sm_info);
+}
+
+static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
+{
+ return (struct sit_info *)(SM_I(sbi)->sit_info);
+}
+
+static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
+{
+ return (struct free_segmap_info *)(SM_I(sbi)->free_info);
+}
+
+static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
+{
+ return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
+}
+
+static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
+{
+ sbi->s_dirty = 1;
+}
+
+static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
+{
+ sbi->s_dirty = 0;
+}
+
+static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ return ckpt_flags & f;
+}
+
+static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ ckpt_flags |= f;
+ cp->ckpt_flags = cpu_to_le32(ckpt_flags);
+}
+
+static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ ckpt_flags &= (~f);
+ cp->ckpt_flags = cpu_to_le32(ckpt_flags);
+}
+
+static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t)
+{
+ mutex_lock_nested(&sbi->fs_lock[t], t);
+}
+
+static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t)
+{
+ mutex_unlock(&sbi->fs_lock[t]);
+}
+
+/*
+ * Check whether the given nid is within node id range.
+ */
+static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ BUG_ON((nid >= NM_I(sbi)->max_nid));
+}
+
+#define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
+
+/*
+ * Check whether the inode has blocks or not
+ */
+static inline int F2FS_HAS_BLOCKS(struct inode *inode)
+{
+ if (F2FS_I(inode)->i_xattr_nid)
+ return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
+ else
+ return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
+}
+
+static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
+ struct inode *inode, blkcnt_t count)
+{
+ block_t valid_block_count;
+
+ spin_lock(&sbi->stat_lock);
+ valid_block_count =
+ sbi->total_valid_block_count + (block_t)count;
+ if (valid_block_count > sbi->user_block_count) {
+ spin_unlock(&sbi->stat_lock);
+ return false;
+ }
+ inode->i_blocks += count;
+ sbi->total_valid_block_count = valid_block_count;
+ sbi->alloc_valid_block_count += (block_t)count;
+ spin_unlock(&sbi->stat_lock);
+ return true;
+}
+
+static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
+ struct inode *inode,
+ blkcnt_t count)
+{
+ spin_lock(&sbi->stat_lock);
+ BUG_ON(sbi->total_valid_block_count < (block_t) count);
+ BUG_ON(inode->i_blocks < count);
+ inode->i_blocks -= count;
+ sbi->total_valid_block_count -= (block_t)count;
+ spin_unlock(&sbi->stat_lock);
+ return 0;
+}
+
+static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
+{
+ atomic_inc(&sbi->nr_pages[count_type]);
+ F2FS_SET_SB_DIRT(sbi);
+}
+
+static inline void inode_inc_dirty_dents(struct inode *inode)
+{
+ atomic_inc(&F2FS_I(inode)->dirty_dents);
+}
+
+static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
+{
+ atomic_dec(&sbi->nr_pages[count_type]);
+}
+
+static inline void inode_dec_dirty_dents(struct inode *inode)
+{
+ atomic_dec(&F2FS_I(inode)->dirty_dents);
+}
+
+static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
+{
+ return atomic_read(&sbi->nr_pages[count_type]);
+}
+
+static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
+{
+ block_t ret;
+ spin_lock(&sbi->stat_lock);
+ ret = sbi->total_valid_block_count;
+ spin_unlock(&sbi->stat_lock);
+ return ret;
+}
+
+static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+
+ /* return NAT or SIT bitmap */
+ if (flag == NAT_BITMAP)
+ return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
+ else if (flag == SIT_BITMAP)
+ return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
+
+ return 0;
+}
+
+static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ int offset = (flag == NAT_BITMAP) ?
+ le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
+ return &ckpt->sit_nat_version_bitmap + offset;
+}
+
+static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
+{
+ block_t start_addr;
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
+
+ start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+
+ /*
+ * odd numbered checkpoint should at cp segment 0
+ * and even segent must be at cp segment 1
+ */
+ if (!(ckpt_version & 1))
+ start_addr += sbi->blocks_per_seg;
+
+ return start_addr;
+}
+
+static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
+{
+ return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
+}
+
+static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
+ struct inode *inode,
+ unsigned int count)
+{
+ block_t valid_block_count;
+ unsigned int valid_node_count;
+
+ spin_lock(&sbi->stat_lock);
+
+ valid_block_count = sbi->total_valid_block_count + (block_t)count;
+ sbi->alloc_valid_block_count += (block_t)count;
+ valid_node_count = sbi->total_valid_node_count + count;
+
+ if (valid_block_count > sbi->user_block_count) {
+ spin_unlock(&sbi->stat_lock);
+ return false;
+ }
+
+ if (valid_node_count > sbi->total_node_count) {
+ spin_unlock(&sbi->stat_lock);
+ return false;
+ }
+
+ if (inode)
+ inode->i_blocks += count;
+ sbi->total_valid_node_count = valid_node_count;
+ sbi->total_valid_block_count = valid_block_count;
+ spin_unlock(&sbi->stat_lock);
+
+ return true;
+}
+
+static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
+ struct inode *inode,
+ unsigned int count)
+{
+ spin_lock(&sbi->stat_lock);
+
+ BUG_ON(sbi->total_valid_block_count < count);
+ BUG_ON(sbi->total_valid_node_count < count);
+ BUG_ON(inode->i_blocks < count);
+
+ inode->i_blocks -= count;
+ sbi->total_valid_node_count -= count;
+ sbi->total_valid_block_count -= (block_t)count;
+
+ spin_unlock(&sbi->stat_lock);
+}
+
+static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
+{
+ unsigned int ret;
+ spin_lock(&sbi->stat_lock);
+ ret = sbi->total_valid_node_count;
+ spin_unlock(&sbi->stat_lock);
+ return ret;
+}
+
+static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
+{
+ spin_lock(&sbi->stat_lock);
+ BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
+ sbi->total_valid_inode_count++;
+ spin_unlock(&sbi->stat_lock);
+}
+
+static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
+{
+ spin_lock(&sbi->stat_lock);
+ BUG_ON(!sbi->total_valid_inode_count);
+ sbi->total_valid_inode_count--;
+ spin_unlock(&sbi->stat_lock);
+ return 0;
+}
+
+static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
+{
+ unsigned int ret;
+ spin_lock(&sbi->stat_lock);
+ ret = sbi->total_valid_inode_count;
+ spin_unlock(&sbi->stat_lock);
+ return ret;
+}
+
+static inline void f2fs_put_page(struct page *page, int unlock)
+{
+ if (!page || IS_ERR(page))
+ return;
+
+ if (unlock) {
+ BUG_ON(!PageLocked(page));
+ unlock_page(page);
+ }
+ page_cache_release(page);
+}
+
+static inline void f2fs_put_dnode(struct dnode_of_data *dn)
+{
+ if (dn->node_page)
+ f2fs_put_page(dn->node_page, 1);
+ if (dn->inode_page && dn->node_page != dn->inode_page)
+ f2fs_put_page(dn->inode_page, 0);
+ dn->node_page = NULL;
+ dn->inode_page = NULL;
+}
+
+static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
+ size_t size, void (*ctor)(void *))
+{
+ return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
+}
+
+#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
+
+static inline bool IS_INODE(struct page *page)
+{
+ struct f2fs_node *p = (struct f2fs_node *)page_address(page);
+ return RAW_IS_INODE(p);
+}
+
+static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
+{
+ return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
+}
+
+static inline block_t datablock_addr(struct page *node_page,
+ unsigned int offset)
+{
+ struct f2fs_node *raw_node;
+ __le32 *addr_array;
+ raw_node = (struct f2fs_node *)page_address(node_page);
+ addr_array = blkaddr_in_node(raw_node);
+ return le32_to_cpu(addr_array[offset]);
+}
+
+static inline int f2fs_test_bit(unsigned int nr, char *addr)
+{
+ int mask;
+
+ addr += (nr >> 3);
+ mask = 1 << (7 - (nr & 0x07));
+ return mask & *addr;
+}
+
+static inline int f2fs_set_bit(unsigned int nr, char *addr)
+{
+ int mask;
+ int ret;
+
+ addr += (nr >> 3);
+ mask = 1 << (7 - (nr & 0x07));
+ ret = mask & *addr;
+ *addr |= mask;
+ return ret;
+}
+
+static inline int f2fs_clear_bit(unsigned int nr, char *addr)
+{
+ int mask;
+ int ret;
+
+ addr += (nr >> 3);
+ mask = 1 << (7 - (nr & 0x07));
+ ret = mask & *addr;
+ *addr &= ~mask;
+ return ret;
+}
+
+/* used for f2fs_inode_info->flags */
+enum {
+ FI_NEW_INODE, /* indicate newly allocated inode */
+ FI_NEED_CP, /* need to do checkpoint during fsync */
+ FI_INC_LINK, /* need to increment i_nlink */
+ FI_ACL_MODE, /* indicate acl mode */
+ FI_NO_ALLOC, /* should not allocate any blocks */
+};
+
+static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
+{
+ set_bit(flag, &fi->flags);
+}
+
+static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
+{
+ return test_bit(flag, &fi->flags);
+}
+
+static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+{
+ clear_bit(flag, &fi->flags);
+}
+
+static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
+{
+ fi->i_acl_mode = mode;
+ set_inode_flag(fi, FI_ACL_MODE);
+}
+
+static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+{
+ if (is_inode_flag_set(fi, FI_ACL_MODE)) {
+ clear_inode_flag(fi, FI_ACL_MODE);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * file.c
+ */
+int f2fs_sync_file(struct file *, loff_t, loff_t, int);
+void truncate_data_blocks(struct dnode_of_data *);
+void f2fs_truncate(struct inode *);
+int f2fs_setattr(struct dentry *, struct iattr *);
+int truncate_hole(struct inode *, pgoff_t, pgoff_t);
+long f2fs_ioctl(struct file *, unsigned int, unsigned long);
+
+/*
+ * inode.c
+ */
+void f2fs_set_inode_flags(struct inode *);
+struct inode *f2fs_iget_nowait(struct super_block *, unsigned long);
+struct inode *f2fs_iget(struct super_block *, unsigned long);
+void update_inode(struct inode *, struct page *);
+int f2fs_write_inode(struct inode *, struct writeback_control *);
+void f2fs_evict_inode(struct inode *);
+
+/*
+ * namei.c
+ */
+struct dentry *f2fs_get_parent(struct dentry *child);
+
+/*
+ * dir.c
+ */
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
+ struct page **);
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
+ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
+void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
+ struct page *, struct inode *);
+void init_dent_inode(struct dentry *, struct page *);
+int f2fs_add_link(struct dentry *, struct inode *);
+void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
+int f2fs_make_empty(struct inode *, struct inode *);
+bool f2fs_empty_dir(struct inode *);
+
+/*
+ * super.c
+ */
+int f2fs_sync_fs(struct super_block *, int);
+
+/*
+ * hash.c
+ */
+f2fs_hash_t f2fs_dentry_hash(const char *, int);
+
+/*
+ * node.c
+ */
+struct dnode_of_data;
+struct node_info;
+
+int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
+void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
+int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
+int truncate_inode_blocks(struct inode *, pgoff_t);
+int remove_inode_page(struct inode *);
+int new_inode_page(struct inode *, struct dentry *);
+struct page *new_node_page(struct dnode_of_data *, unsigned int);
+void ra_node_page(struct f2fs_sb_info *, nid_t);
+struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
+struct page *get_node_page_ra(struct page *, int);
+void sync_inode_page(struct dnode_of_data *);
+int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
+bool alloc_nid(struct f2fs_sb_info *, nid_t *);
+void alloc_nid_done(struct f2fs_sb_info *, nid_t);
+void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
+void recover_node_page(struct f2fs_sb_info *, struct page *,
+ struct f2fs_summary *, struct node_info *, block_t);
+int recover_inode_page(struct f2fs_sb_info *, struct page *);
+int restore_node_summary(struct f2fs_sb_info *, unsigned int,
+ struct f2fs_summary_block *);
+void flush_nat_entries(struct f2fs_sb_info *);
+int build_node_manager(struct f2fs_sb_info *);
+void destroy_node_manager(struct f2fs_sb_info *);
+int create_node_manager_caches(void);
+void destroy_node_manager_caches(void);
+
+/*
+ * segment.c
+ */
+void f2fs_balance_fs(struct f2fs_sb_info *);
+void invalidate_blocks(struct f2fs_sb_info *, block_t);
+void locate_dirty_segment(struct f2fs_sb_info *, unsigned int);
+void clear_prefree_segments(struct f2fs_sb_info *);
+int npages_for_summary_flush(struct f2fs_sb_info *);
+void allocate_new_segments(struct f2fs_sb_info *);
+struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
+struct bio *f2fs_bio_alloc(struct block_device *, int);
+void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
+int write_meta_page(struct f2fs_sb_info *, struct page *,
+ struct writeback_control *);
+void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
+ block_t, block_t *);
+void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
+ block_t, block_t *);
+void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t);
+void recover_data_page(struct f2fs_sb_info *, struct page *,
+ struct f2fs_summary *, block_t, block_t);
+void rewrite_node_page(struct f2fs_sb_info *, struct page *,
+ struct f2fs_summary *, block_t, block_t);
+void write_data_summaries(struct f2fs_sb_info *, block_t);
+void write_node_summaries(struct f2fs_sb_info *, block_t);
+int lookup_journal_in_cursum(struct f2fs_summary_block *,
+ int, unsigned int, int);
+void flush_sit_entries(struct f2fs_sb_info *);
+int build_segment_manager(struct f2fs_sb_info *);
+void reset_victim_segmap(struct f2fs_sb_info *);
+void destroy_segment_manager(struct f2fs_sb_info *);
+
+/*
+ * checkpoint.c
+ */
+struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
+struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
+long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
+int check_orphan_space(struct f2fs_sb_info *);
+void add_orphan_inode(struct f2fs_sb_info *, nid_t);
+void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
+int recover_orphan_inodes(struct f2fs_sb_info *);
+int get_valid_checkpoint(struct f2fs_sb_info *);
+void set_dirty_dir_page(struct inode *, struct page *);
+void remove_dirty_dir_inode(struct inode *);
+void sync_dirty_dir_inodes(struct f2fs_sb_info *);
+void block_operations(struct f2fs_sb_info *);
+void write_checkpoint(struct f2fs_sb_info *, bool, bool);
+void init_orphan_info(struct f2fs_sb_info *);
+int create_checkpoint_caches(void);
+void destroy_checkpoint_caches(void);
+
+/*
+ * data.c
+ */
+int reserve_new_block(struct dnode_of_data *);
+void update_extent_cache(block_t, struct dnode_of_data *);
+struct page *find_data_page(struct inode *, pgoff_t);
+struct page *get_lock_data_page(struct inode *, pgoff_t);
+struct page *get_new_data_page(struct inode *, pgoff_t, bool);
+int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
+int do_write_data_page(struct page *);
+
+/*
+ * gc.c
+ */
+int start_gc_thread(struct f2fs_sb_info *);
+void stop_gc_thread(struct f2fs_sb_info *);
+block_t start_bidx_of_node(unsigned int);
+int f2fs_gc(struct f2fs_sb_info *, int);
+void build_gc_manager(struct f2fs_sb_info *);
+int create_gc_caches(void);
+void destroy_gc_caches(void);
+
+/*
+ * recovery.c
+ */
+void recover_fsync_data(struct f2fs_sb_info *);
+bool space_for_roll_forward(struct f2fs_sb_info *);
+
+/*
+ * debug.c
+ */
+#ifdef CONFIG_F2FS_STAT_FS
+struct f2fs_stat_info {
+ struct list_head stat_list;
+ struct f2fs_sb_info *sbi;
+ struct mutex stat_lock;
+ int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
+ int main_area_segs, main_area_sections, main_area_zones;
+ int hit_ext, total_ext;
+ int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
+ int nats, sits, fnids;
+ int total_count, utilization;
+ int bg_gc;
+ unsigned int valid_count, valid_node_count, valid_inode_count;
+ unsigned int bimodal, avg_vblocks;
+ int util_free, util_valid, util_invalid;
+ int rsvd_segs, overp_segs;
+ int dirty_count, node_pages, meta_pages;
+ int prefree_count, call_count;
+ int tot_segs, node_segs, data_segs, free_segs, free_secs;
+ int tot_blks, data_blks, node_blks;
+ int curseg[NR_CURSEG_TYPE];
+ int cursec[NR_CURSEG_TYPE];
+ int curzone[NR_CURSEG_TYPE];
+
+ unsigned int segment_count[2];
+ unsigned int block_count[2];
+ unsigned base_mem, cache_mem;
+};
+
+#define stat_inc_call_count(si) ((si)->call_count++)
+
+#define stat_inc_seg_count(sbi, type) \
+ do { \
+ struct f2fs_stat_info *si = sbi->stat_info; \
+ (si)->tot_segs++; \
+ if (type == SUM_TYPE_DATA) \
+ si->data_segs++; \
+ else \
+ si->node_segs++; \
+ } while (0)
+
+#define stat_inc_tot_blk_count(si, blks) \
+ (si->tot_blks += (blks))
+
+#define stat_inc_data_blk_count(sbi, blks) \
+ do { \
+ struct f2fs_stat_info *si = sbi->stat_info; \
+ stat_inc_tot_blk_count(si, blks); \
+ si->data_blks += (blks); \
+ } while (0)
+
+#define stat_inc_node_blk_count(sbi, blks) \
+ do { \
+ struct f2fs_stat_info *si = sbi->stat_info; \
+ stat_inc_tot_blk_count(si, blks); \
+ si->node_blks += (blks); \
+ } while (0)
+
+int f2fs_build_stats(struct f2fs_sb_info *);
+void f2fs_destroy_stats(struct f2fs_sb_info *);
+void destroy_root_stats(void);
+#else
+#define stat_inc_call_count(si)
+#define stat_inc_seg_count(si, type)
+#define stat_inc_tot_blk_count(si, blks)
+#define stat_inc_data_blk_count(si, blks)
+#define stat_inc_node_blk_count(sbi, blks)
+
+static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
+static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
+static inline void destroy_root_stats(void) { }
+#endif
+
+extern const struct file_operations f2fs_dir_operations;
+extern const struct file_operations f2fs_file_operations;
+extern const struct inode_operations f2fs_file_inode_operations;
+extern const struct address_space_operations f2fs_dblock_aops;
+extern const struct address_space_operations f2fs_node_aops;
+extern const struct address_space_operations f2fs_meta_aops;
+extern const struct inode_operations f2fs_dir_inode_operations;
+extern const struct inode_operations f2fs_symlink_inode_operations;
+extern const struct inode_operations f2fs_special_inode_operations;
+#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
new file mode 100644
index 00000000000..f9e085dfb1f
--- /dev/null
+++ b/fs/f2fs/file.c
@@ -0,0 +1,636 @@
+/*
+ * fs/f2fs/file.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/stat.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/falloc.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/mount.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include "segment.h"
+#include "xattr.h"
+#include "acl.h"
+
+static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ block_t old_blk_addr;
+ struct dnode_of_data dn;
+ int err;
+
+ f2fs_balance_fs(sbi);
+
+ sb_start_pagefault(inode->i_sb);
+
+ mutex_lock_op(sbi, DATA_NEW);
+
+ /* block allocation */
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, page->index, 0);
+ if (err) {
+ mutex_unlock_op(sbi, DATA_NEW);
+ goto out;
+ }
+
+ old_blk_addr = dn.data_blkaddr;
+
+ if (old_blk_addr == NULL_ADDR) {
+ err = reserve_new_block(&dn);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ mutex_unlock_op(sbi, DATA_NEW);
+ goto out;
+ }
+ }
+ f2fs_put_dnode(&dn);
+
+ mutex_unlock_op(sbi, DATA_NEW);
+
+ lock_page(page);
+ if (page->mapping != inode->i_mapping ||
+ page_offset(page) >= i_size_read(inode) ||
+ !PageUptodate(page)) {
+ unlock_page(page);
+ err = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * check to see if the page is mapped already (no holes)
+ */
+ if (PageMappedToDisk(page))
+ goto out;
+
+ /* fill the page */
+ wait_on_page_writeback(page);
+
+ /* page is wholly or partially inside EOF */
+ if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
+ unsigned offset;
+ offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ }
+ set_page_dirty(page);
+ SetPageUptodate(page);
+
+ file_update_time(vma->vm_file);
+out:
+ sb_end_pagefault(inode->i_sb);
+ return block_page_mkwrite_return(err);
+}
+
+static const struct vm_operations_struct f2fs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = f2fs_vm_page_mkwrite,
+};
+
+static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
+{
+ struct dentry *dentry;
+ nid_t pino;
+
+ inode = igrab(inode);
+ dentry = d_find_any_alias(inode);
+ if (!dentry) {
+ iput(inode);
+ return 0;
+ }
+ pino = dentry->d_parent->d_inode->i_ino;
+ dput(dentry);
+ iput(inode);
+ return !is_checkpointed_node(sbi, pino);
+}
+
+int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ unsigned long long cur_version;
+ int ret = 0;
+ bool need_cp = false;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .for_reclaim = 0,
+ };
+
+ if (inode->i_sb->s_flags & MS_RDONLY)
+ return 0;
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (ret)
+ return ret;
+
+ mutex_lock(&inode->i_mutex);
+
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ goto out;
+
+ mutex_lock(&sbi->cp_mutex);
+ cur_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
+ mutex_unlock(&sbi->cp_mutex);
+
+ if (F2FS_I(inode)->data_version != cur_version &&
+ !(inode->i_state & I_DIRTY))
+ goto out;
+ F2FS_I(inode)->data_version--;
+
+ if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
+ need_cp = true;
+ if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
+ need_cp = true;
+ if (!space_for_roll_forward(sbi))
+ need_cp = true;
+ if (need_to_sync_dir(sbi, inode))
+ need_cp = true;
+
+ f2fs_write_inode(inode, NULL);
+
+ if (need_cp) {
+ /* all the dirty node pages should be flushed for POR */
+ ret = f2fs_sync_fs(inode->i_sb, 1);
+ clear_inode_flag(F2FS_I(inode), FI_NEED_CP);
+ } else {
+ while (sync_node_pages(sbi, inode->i_ino, &wbc) == 0)
+ f2fs_write_inode(inode, NULL);
+ filemap_fdatawait_range(sbi->node_inode->i_mapping,
+ 0, LONG_MAX);
+ }
+out:
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+}
+
+static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+ vma->vm_ops = &f2fs_file_vm_ops;
+ return 0;
+}
+
+static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+{
+ int nr_free = 0, ofs = dn->ofs_in_node;
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct f2fs_node *raw_node;
+ __le32 *addr;
+
+ raw_node = page_address(dn->node_page);
+ addr = blkaddr_in_node(raw_node) + ofs;
+
+ for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
+ block_t blkaddr = le32_to_cpu(*addr);
+ if (blkaddr == NULL_ADDR)
+ continue;
+
+ update_extent_cache(NULL_ADDR, dn);
+ invalidate_blocks(sbi, blkaddr);
+ dec_valid_block_count(sbi, dn->inode, 1);
+ nr_free++;
+ }
+ if (nr_free) {
+ set_page_dirty(dn->node_page);
+ sync_inode_page(dn);
+ }
+ dn->ofs_in_node = ofs;
+ return nr_free;
+}
+
+void truncate_data_blocks(struct dnode_of_data *dn)
+{
+ truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
+}
+
+static void truncate_partial_data_page(struct inode *inode, u64 from)
+{
+ unsigned offset = from & (PAGE_CACHE_SIZE - 1);
+ struct page *page;
+
+ if (!offset)
+ return;
+
+ page = find_data_page(inode, from >> PAGE_CACHE_SHIFT);
+ if (IS_ERR(page))
+ return;
+
+ lock_page(page);
+ wait_on_page_writeback(page);
+ zero_user(page, offset, PAGE_CACHE_SIZE - offset);
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+}
+
+static int truncate_blocks(struct inode *inode, u64 from)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ unsigned int blocksize = inode->i_sb->s_blocksize;
+ struct dnode_of_data dn;
+ pgoff_t free_from;
+ int count = 0;
+ int err;
+
+ free_from = (pgoff_t)
+ ((from + blocksize - 1) >> (sbi->log_blocksize));
+
+ mutex_lock_op(sbi, DATA_TRUNC);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, free_from, RDONLY_NODE);
+ if (err) {
+ if (err == -ENOENT)
+ goto free_next;
+ mutex_unlock_op(sbi, DATA_TRUNC);
+ return err;
+ }
+
+ if (IS_INODE(dn.node_page))
+ count = ADDRS_PER_INODE;
+ else
+ count = ADDRS_PER_BLOCK;
+
+ count -= dn.ofs_in_node;
+ BUG_ON(count < 0);
+ if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
+ truncate_data_blocks_range(&dn, count);
+ free_from += count;
+ }
+
+ f2fs_put_dnode(&dn);
+free_next:
+ err = truncate_inode_blocks(inode, free_from);
+ mutex_unlock_op(sbi, DATA_TRUNC);
+
+ /* lastly zero out the first data page */
+ truncate_partial_data_page(inode, from);
+
+ return err;
+}
+
+void f2fs_truncate(struct inode *inode)
+{
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)))
+ return;
+
+ if (!truncate_blocks(inode, i_size_read(inode))) {
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ }
+
+ f2fs_balance_fs(F2FS_SB(inode->i_sb));
+}
+
+static int f2fs_getattr(struct vfsmount *mnt,
+ struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+ generic_fillattr(inode, stat);
+ stat->blocks <<= 3;
+ return 0;
+}
+
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+static void __setattr_copy(struct inode *inode, const struct iattr *attr)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ unsigned int ia_valid = attr->ia_valid;
+
+ if (ia_valid & ATTR_UID)
+ inode->i_uid = attr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+ inode->i_atime = timespec_trunc(attr->ia_atime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_MTIME)
+ inode->i_mtime = timespec_trunc(attr->ia_mtime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_CTIME)
+ inode->i_ctime = timespec_trunc(attr->ia_ctime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = attr->ia_mode;
+
+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ mode &= ~S_ISGID;
+ set_acl_inode(fi, mode);
+ }
+}
+#else
+#define __setattr_copy setattr_copy
+#endif
+
+int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ int err;
+
+ err = inode_change_ok(inode, attr);
+ if (err)
+ return err;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ truncate_setsize(inode, attr->ia_size);
+ f2fs_truncate(inode);
+ }
+
+ __setattr_copy(inode, attr);
+
+ if (attr->ia_valid & ATTR_MODE) {
+ err = f2fs_acl_chmod(inode);
+ if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
+ inode->i_mode = fi->i_acl_mode;
+ clear_inode_flag(fi, FI_ACL_MODE);
+ }
+ }
+
+ mark_inode_dirty(inode);
+ return err;
+}
+
+const struct inode_operations f2fs_file_inode_operations = {
+ .getattr = f2fs_getattr,
+ .setattr = f2fs_setattr,
+ .get_acl = f2fs_get_acl,
+#ifdef CONFIG_F2FS_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = f2fs_listxattr,
+ .removexattr = generic_removexattr,
+#endif
+};
+
+static void fill_zero(struct inode *inode, pgoff_t index,
+ loff_t start, loff_t len)
+{
+ struct page *page;
+
+ if (!len)
+ return;
+
+ page = get_new_data_page(inode, index, false);
+
+ if (!IS_ERR(page)) {
+ wait_on_page_writeback(page);
+ zero_user(page, start, len);
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+ }
+}
+
+int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
+{
+ pgoff_t index;
+ int err;
+
+ for (index = pg_start; index < pg_end; index++) {
+ struct dnode_of_data dn;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+
+ mutex_lock_op(sbi, DATA_TRUNC);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, index, RDONLY_NODE);
+ if (err) {
+ mutex_unlock_op(sbi, DATA_TRUNC);
+ if (err == -ENOENT)
+ continue;
+ return err;
+ }
+
+ if (dn.data_blkaddr != NULL_ADDR)
+ truncate_data_blocks_range(&dn, 1);
+ f2fs_put_dnode(&dn);
+ mutex_unlock_op(sbi, DATA_TRUNC);
+ }
+ return 0;
+}
+
+static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
+{
+ pgoff_t pg_start, pg_end;
+ loff_t off_start, off_end;
+ int ret = 0;
+
+ pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+
+ off_start = offset & (PAGE_CACHE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+
+ if (pg_start == pg_end) {
+ fill_zero(inode, pg_start, off_start,
+ off_end - off_start);
+ } else {
+ if (off_start)
+ fill_zero(inode, pg_start++, off_start,
+ PAGE_CACHE_SIZE - off_start);
+ if (off_end)
+ fill_zero(inode, pg_end, 0, off_end);
+
+ if (pg_start < pg_end) {
+ struct address_space *mapping = inode->i_mapping;
+ loff_t blk_start, blk_end;
+
+ blk_start = pg_start << PAGE_CACHE_SHIFT;
+ blk_end = pg_end << PAGE_CACHE_SHIFT;
+ truncate_inode_pages_range(mapping, blk_start,
+ blk_end - 1);
+ ret = truncate_hole(inode, pg_start, pg_end);
+ }
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ i_size_read(inode) <= (offset + len)) {
+ i_size_write(inode, offset);
+ mark_inode_dirty(inode);
+ }
+
+ return ret;
+}
+
+static int expand_inode_data(struct inode *inode, loff_t offset,
+ loff_t len, int mode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ pgoff_t index, pg_start, pg_end;
+ loff_t new_size = i_size_read(inode);
+ loff_t off_start, off_end;
+ int ret = 0;
+
+ ret = inode_newsize_ok(inode, (len + offset));
+ if (ret)
+ return ret;
+
+ pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+
+ off_start = offset & (PAGE_CACHE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+
+ for (index = pg_start; index <= pg_end; index++) {
+ struct dnode_of_data dn;
+
+ mutex_lock_op(sbi, DATA_NEW);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, index, 0);
+ if (ret) {
+ mutex_unlock_op(sbi, DATA_NEW);
+ break;
+ }
+
+ if (dn.data_blkaddr == NULL_ADDR) {
+ ret = reserve_new_block(&dn);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ mutex_unlock_op(sbi, DATA_NEW);
+ break;
+ }
+ }
+ f2fs_put_dnode(&dn);
+
+ mutex_unlock_op(sbi, DATA_NEW);
+
+ if (pg_start == pg_end)
+ new_size = offset + len;
+ else if (index == pg_start && off_start)
+ new_size = (index + 1) << PAGE_CACHE_SHIFT;
+ else if (index == pg_end)
+ new_size = (index << PAGE_CACHE_SHIFT) + off_end;
+ else
+ new_size += PAGE_CACHE_SIZE;
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ i_size_read(inode) < new_size) {
+ i_size_write(inode, new_size);
+ mark_inode_dirty(inode);
+ }
+
+ return ret;
+}
+
+static long f2fs_fallocate(struct file *file, int mode,
+ loff_t offset, loff_t len)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ long ret;
+
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ return -EOPNOTSUPP;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ ret = punch_hole(inode, offset, len, mode);
+ else
+ ret = expand_inode_data(inode, offset, len, mode);
+
+ f2fs_balance_fs(sbi);
+ return ret;
+}
+
+#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
+#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
+
+static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
+{
+ if (S_ISDIR(mode))
+ return flags;
+ else if (S_ISREG(mode))
+ return flags & F2FS_REG_FLMASK;
+ else
+ return flags & F2FS_OTHER_FLMASK;
+}
+
+long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ unsigned int flags;
+ int ret;
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ flags = fi->i_flags & FS_FL_USER_VISIBLE;
+ return put_user(flags, (int __user *) arg);
+ case FS_IOC_SETFLAGS:
+ {
+ unsigned int oldflags;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ if (!inode_owner_or_capable(inode)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ if (get_user(flags, (int __user *) arg)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ flags = f2fs_mask_flags(inode->i_mode, flags);
+
+ mutex_lock(&inode->i_mutex);
+
+ oldflags = fi->i_flags;
+
+ if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
+ if (!capable(CAP_LINUX_IMMUTABLE)) {
+ mutex_unlock(&inode->i_mutex);
+ ret = -EPERM;
+ goto out;
+ }
+ }
+
+ flags = flags & FS_FL_USER_MODIFIABLE;
+ flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
+ fi->i_flags = flags;
+ mutex_unlock(&inode->i_mutex);
+
+ f2fs_set_inode_flags(inode);
+ inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+out:
+ mnt_drop_write(filp->f_path.mnt);
+ return ret;
+ }
+ default:
+ return -ENOTTY;
+ }
+}
+
+const struct file_operations f2fs_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .open = generic_file_open,
+ .mmap = f2fs_file_mmap,
+ .fsync = f2fs_sync_file,
+ .fallocate = f2fs_fallocate,
+ .unlocked_ioctl = f2fs_ioctl,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+};
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
new file mode 100644
index 00000000000..644aa380827
--- /dev/null
+++ b/fs/f2fs/gc.c
@@ -0,0 +1,742 @@
+/*
+ * fs/f2fs/gc.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/f2fs_fs.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/blkdev.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include "segment.h"
+#include "gc.h"
+
+static struct kmem_cache *winode_slab;
+
+static int gc_thread_func(void *data)
+{
+ struct f2fs_sb_info *sbi = data;
+ wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
+ long wait_ms;
+
+ wait_ms = GC_THREAD_MIN_SLEEP_TIME;
+
+ do {
+ if (try_to_freeze())
+ continue;
+ else
+ wait_event_interruptible_timeout(*wq,
+ kthread_should_stop(),
+ msecs_to_jiffies(wait_ms));
+ if (kthread_should_stop())
+ break;
+
+ f2fs_balance_fs(sbi);
+
+ if (!test_opt(sbi, BG_GC))
+ continue;
+
+ /*
+ * [GC triggering condition]
+ * 0. GC is not conducted currently.
+ * 1. There are enough dirty segments.
+ * 2. IO subsystem is idle by checking the # of writeback pages.
+ * 3. IO subsystem is idle by checking the # of requests in
+ * bdev's request list.
+ *
+ * Note) We have to avoid triggering GCs too much frequently.
+ * Because it is possible that some segments can be
+ * invalidated soon after by user update or deletion.
+ * So, I'd like to wait some time to collect dirty segments.
+ */
+ if (!mutex_trylock(&sbi->gc_mutex))
+ continue;
+
+ if (!is_idle(sbi)) {
+ wait_ms = increase_sleep_time(wait_ms);
+ mutex_unlock(&sbi->gc_mutex);
+ continue;
+ }
+
+ if (has_enough_invalid_blocks(sbi))
+ wait_ms = decrease_sleep_time(wait_ms);
+ else
+ wait_ms = increase_sleep_time(wait_ms);
+
+ sbi->bg_gc++;
+
+ if (f2fs_gc(sbi, 1) == GC_NONE)
+ wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
+ else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
+ wait_ms = GC_THREAD_MAX_SLEEP_TIME;
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+int start_gc_thread(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_gc_kthread *gc_th;
+
+ gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
+ if (!gc_th)
+ return -ENOMEM;
+
+ sbi->gc_thread = gc_th;
+ init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
+ sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
+ GC_THREAD_NAME);
+ if (IS_ERR(gc_th->f2fs_gc_task)) {
+ kfree(gc_th);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void stop_gc_thread(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
+ if (!gc_th)
+ return;
+ kthread_stop(gc_th->f2fs_gc_task);
+ kfree(gc_th);
+ sbi->gc_thread = NULL;
+}
+
+static int select_gc_type(int gc_type)
+{
+ return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
+}
+
+static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
+ int type, struct victim_sel_policy *p)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+
+ if (p->alloc_mode) {
+ p->gc_mode = GC_GREEDY;
+ p->dirty_segmap = dirty_i->dirty_segmap[type];
+ p->ofs_unit = 1;
+ } else {
+ p->gc_mode = select_gc_type(gc_type);
+ p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
+ p->ofs_unit = sbi->segs_per_sec;
+ }
+ p->offset = sbi->last_victim[p->gc_mode];
+}
+
+static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
+ struct victim_sel_policy *p)
+{
+ if (p->gc_mode == GC_GREEDY)
+ return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
+ else if (p->gc_mode == GC_CB)
+ return UINT_MAX;
+ else /* No other gc_mode */
+ return 0;
+}
+
+static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int segno;
+
+ /*
+ * If the gc_type is FG_GC, we can select victim segments
+ * selected by background GC before.
+ * Those segments guarantee they have small valid blocks.
+ */
+ segno = find_next_bit(dirty_i->victim_segmap[BG_GC],
+ TOTAL_SEGS(sbi), 0);
+ if (segno < TOTAL_SEGS(sbi)) {
+ clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
+ return segno;
+ }
+ return NULL_SEGNO;
+}
+
+static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned int secno = GET_SECNO(sbi, segno);
+ unsigned int start = secno * sbi->segs_per_sec;
+ unsigned long long mtime = 0;
+ unsigned int vblocks;
+ unsigned char age = 0;
+ unsigned char u;
+ unsigned int i;
+
+ for (i = 0; i < sbi->segs_per_sec; i++)
+ mtime += get_seg_entry(sbi, start + i)->mtime;
+ vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+
+ mtime = div_u64(mtime, sbi->segs_per_sec);
+ vblocks = div_u64(vblocks, sbi->segs_per_sec);
+
+ u = (vblocks * 100) >> sbi->log_blocks_per_seg;
+
+ /* Handle if the system time is changed by user */
+ if (mtime < sit_i->min_mtime)
+ sit_i->min_mtime = mtime;
+ if (mtime > sit_i->max_mtime)
+ sit_i->max_mtime = mtime;
+ if (sit_i->max_mtime != sit_i->min_mtime)
+ age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
+ sit_i->max_mtime - sit_i->min_mtime);
+
+ return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
+}
+
+static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
+ struct victim_sel_policy *p)
+{
+ if (p->alloc_mode == SSR)
+ return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+
+ /* alloc_mode == LFS */
+ if (p->gc_mode == GC_GREEDY)
+ return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ else
+ return get_cb_cost(sbi, segno);
+}
+
+/*
+ * This function is called from two pathes.
+ * One is garbage collection and the other is SSR segment selection.
+ * When it is called during GC, it just gets a victim segment
+ * and it does not remove it from dirty seglist.
+ * When it is called from SSR segment selection, it finds a segment
+ * which has minimum valid blocks and removes it from dirty seglist.
+ */
+static int get_victim_by_default(struct f2fs_sb_info *sbi,
+ unsigned int *result, int gc_type, int type, char alloc_mode)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ struct victim_sel_policy p;
+ unsigned int segno;
+ int nsearched = 0;
+
+ p.alloc_mode = alloc_mode;
+ select_policy(sbi, gc_type, type, &p);
+
+ p.min_segno = NULL_SEGNO;
+ p.min_cost = get_max_cost(sbi, &p);
+
+ mutex_lock(&dirty_i->seglist_lock);
+
+ if (p.alloc_mode == LFS && gc_type == FG_GC) {
+ p.min_segno = check_bg_victims(sbi);
+ if (p.min_segno != NULL_SEGNO)
+ goto got_it;
+ }
+
+ while (1) {
+ unsigned long cost;
+
+ segno = find_next_bit(p.dirty_segmap,
+ TOTAL_SEGS(sbi), p.offset);
+ if (segno >= TOTAL_SEGS(sbi)) {
+ if (sbi->last_victim[p.gc_mode]) {
+ sbi->last_victim[p.gc_mode] = 0;
+ p.offset = 0;
+ continue;
+ }
+ break;
+ }
+ p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+
+ if (test_bit(segno, dirty_i->victim_segmap[FG_GC]))
+ continue;
+ if (gc_type == BG_GC &&
+ test_bit(segno, dirty_i->victim_segmap[BG_GC]))
+ continue;
+ if (IS_CURSEC(sbi, GET_SECNO(sbi, segno)))
+ continue;
+
+ cost = get_gc_cost(sbi, segno, &p);
+
+ if (p.min_cost > cost) {
+ p.min_segno = segno;
+ p.min_cost = cost;
+ }
+
+ if (cost == get_max_cost(sbi, &p))
+ continue;
+
+ if (nsearched++ >= MAX_VICTIM_SEARCH) {
+ sbi->last_victim[p.gc_mode] = segno;
+ break;
+ }
+ }
+got_it:
+ if (p.min_segno != NULL_SEGNO) {
+ *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
+ if (p.alloc_mode == LFS) {
+ int i;
+ for (i = 0; i < p.ofs_unit; i++)
+ set_bit(*result + i,
+ dirty_i->victim_segmap[gc_type]);
+ }
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+
+ return (p.min_segno == NULL_SEGNO) ? 0 : 1;
+}
+
+static const struct victim_selection default_v_ops = {
+ .get_victim = get_victim_by_default,
+};
+
+static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
+{
+ struct list_head *this;
+ struct inode_entry *ie;
+
+ list_for_each(this, ilist) {
+ ie = list_entry(this, struct inode_entry, list);
+ if (ie->inode->i_ino == ino)
+ return ie->inode;
+ }
+ return NULL;
+}
+
+static void add_gc_inode(struct inode *inode, struct list_head *ilist)
+{
+ struct list_head *this;
+ struct inode_entry *new_ie, *ie;
+
+ list_for_each(this, ilist) {
+ ie = list_entry(this, struct inode_entry, list);
+ if (ie->inode == inode) {
+ iput(inode);
+ return;
+ }
+ }
+repeat:
+ new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
+ if (!new_ie) {
+ cond_resched();
+ goto repeat;
+ }
+ new_ie->inode = inode;
+ list_add_tail(&new_ie->list, ilist);
+}
+
+static void put_gc_inode(struct list_head *ilist)
+{
+ struct inode_entry *ie, *next_ie;
+ list_for_each_entry_safe(ie, next_ie, ilist, list) {
+ iput(ie->inode);
+ list_del(&ie->list);
+ kmem_cache_free(winode_slab, ie);
+ }
+}
+
+static int check_valid_map(struct f2fs_sb_info *sbi,
+ unsigned int segno, int offset)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ struct seg_entry *sentry;
+ int ret;
+
+ mutex_lock(&sit_i->sentry_lock);
+ sentry = get_seg_entry(sbi, segno);
+ ret = f2fs_test_bit(offset, sentry->cur_valid_map);
+ mutex_unlock(&sit_i->sentry_lock);
+ return ret ? GC_OK : GC_NEXT;
+}
+
+/*
+ * This function compares node address got in summary with that in NAT.
+ * On validity, copy that node with cold status, otherwise (invalid node)
+ * ignore that.
+ */
+static int gc_node_segment(struct f2fs_sb_info *sbi,
+ struct f2fs_summary *sum, unsigned int segno, int gc_type)
+{
+ bool initial = true;
+ struct f2fs_summary *entry;
+ int off;
+
+next_step:
+ entry = sum;
+ for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
+ nid_t nid = le32_to_cpu(entry->nid);
+ struct page *node_page;
+ int err;
+
+ /*
+ * It makes sure that free segments are able to write
+ * all the dirty node pages before CP after this CP.
+ * So let's check the space of dirty node pages.
+ */
+ if (should_do_checkpoint(sbi)) {
+ mutex_lock(&sbi->cp_mutex);
+ block_operations(sbi);
+ return GC_BLOCKED;
+ }
+
+ err = check_valid_map(sbi, segno, off);
+ if (err == GC_ERROR)
+ return err;
+ else if (err == GC_NEXT)
+ continue;
+
+ if (initial) {
+ ra_node_page(sbi, nid);
+ continue;
+ }
+ node_page = get_node_page(sbi, nid);
+ if (IS_ERR(node_page))
+ continue;
+
+ /* set page dirty and write it */
+ if (!PageWriteback(node_page))
+ set_page_dirty(node_page);
+ f2fs_put_page(node_page, 1);
+ stat_inc_node_blk_count(sbi, 1);
+ }
+ if (initial) {
+ initial = false;
+ goto next_step;
+ }
+
+ if (gc_type == FG_GC) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .for_reclaim = 0,
+ };
+ sync_node_pages(sbi, 0, &wbc);
+ }
+ return GC_DONE;
+}
+
+/*
+ * Calculate start block index that this node page contains
+ */
+block_t start_bidx_of_node(unsigned int node_ofs)
+{
+ block_t start_bidx;
+ unsigned int bidx, indirect_blks;
+ int dec;
+
+ indirect_blks = 2 * NIDS_PER_BLOCK + 4;
+
+ start_bidx = 1;
+ if (node_ofs == 0) {
+ start_bidx = 0;
+ } else if (node_ofs <= 2) {
+ bidx = node_ofs - 1;
+ } else if (node_ofs <= indirect_blks) {
+ dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
+ bidx = node_ofs - 2 - dec;
+ } else {
+ dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
+ bidx = node_ofs - 5 - dec;
+ }
+
+ if (start_bidx)
+ start_bidx = bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
+ return start_bidx;
+}
+
+static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ struct node_info *dni, block_t blkaddr, unsigned int *nofs)
+{
+ struct page *node_page;
+ nid_t nid;
+ unsigned int ofs_in_node;
+ block_t source_blkaddr;
+
+ nid = le32_to_cpu(sum->nid);
+ ofs_in_node = le16_to_cpu(sum->ofs_in_node);
+
+ node_page = get_node_page(sbi, nid);
+ if (IS_ERR(node_page))
+ return GC_NEXT;
+
+ get_node_info(sbi, nid, dni);
+
+ if (sum->version != dni->version) {
+ f2fs_put_page(node_page, 1);
+ return GC_NEXT;
+ }
+
+ *nofs = ofs_of_node(node_page);
+ source_blkaddr = datablock_addr(node_page, ofs_in_node);
+ f2fs_put_page(node_page, 1);
+
+ if (source_blkaddr != blkaddr)
+ return GC_NEXT;
+ return GC_OK;
+}
+
+static void move_data_page(struct inode *inode, struct page *page, int gc_type)
+{
+ if (page->mapping != inode->i_mapping)
+ goto out;
+
+ if (inode != page->mapping->host)
+ goto out;
+
+ if (PageWriteback(page))
+ goto out;
+
+ if (gc_type == BG_GC) {
+ set_page_dirty(page);
+ set_cold_data(page);
+ } else {
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ mutex_lock_op(sbi, DATA_WRITE);
+ if (clear_page_dirty_for_io(page) &&
+ S_ISDIR(inode->i_mode)) {
+ dec_page_count(sbi, F2FS_DIRTY_DENTS);
+ inode_dec_dirty_dents(inode);
+ }
+ set_cold_data(page);
+ do_write_data_page(page);
+ mutex_unlock_op(sbi, DATA_WRITE);
+ clear_cold_data(page);
+ }
+out:
+ f2fs_put_page(page, 1);
+}
+
+/*
+ * This function tries to get parent node of victim data block, and identifies
+ * data block validity. If the block is valid, copy that with cold status and
+ * modify parent node.
+ * If the parent node is not valid or the data block address is different,
+ * the victim data block is ignored.
+ */
+static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ struct list_head *ilist, unsigned int segno, int gc_type)
+{
+ struct super_block *sb = sbi->sb;
+ struct f2fs_summary *entry;
+ block_t start_addr;
+ int err, off;
+ int phase = 0;
+
+ start_addr = START_BLOCK(sbi, segno);
+
+next_step:
+ entry = sum;
+ for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
+ struct page *data_page;
+ struct inode *inode;
+ struct node_info dni; /* dnode info for the data */
+ unsigned int ofs_in_node, nofs;
+ block_t start_bidx;
+
+ /*
+ * It makes sure that free segments are able to write
+ * all the dirty node pages before CP after this CP.
+ * So let's check the space of dirty node pages.
+ */
+ if (should_do_checkpoint(sbi)) {
+ mutex_lock(&sbi->cp_mutex);
+ block_operations(sbi);
+ err = GC_BLOCKED;
+ goto stop;
+ }
+
+ err = check_valid_map(sbi, segno, off);
+ if (err == GC_ERROR)
+ goto stop;
+ else if (err == GC_NEXT)
+ continue;
+
+ if (phase == 0) {
+ ra_node_page(sbi, le32_to_cpu(entry->nid));
+ continue;
+ }
+
+ /* Get an inode by ino with checking validity */
+ err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs);
+ if (err == GC_ERROR)
+ goto stop;
+ else if (err == GC_NEXT)
+ continue;
+
+ if (phase == 1) {
+ ra_node_page(sbi, dni.ino);
+ continue;
+ }
+
+ start_bidx = start_bidx_of_node(nofs);
+ ofs_in_node = le16_to_cpu(entry->ofs_in_node);
+
+ if (phase == 2) {
+ inode = f2fs_iget_nowait(sb, dni.ino);
+ if (IS_ERR(inode))
+ continue;
+
+ data_page = find_data_page(inode,
+ start_bidx + ofs_in_node);
+ if (IS_ERR(data_page))
+ goto next_iput;
+
+ f2fs_put_page(data_page, 0);
+ add_gc_inode(inode, ilist);
+ } else {
+ inode = find_gc_inode(dni.ino, ilist);
+ if (inode) {
+ data_page = get_lock_data_page(inode,
+ start_bidx + ofs_in_node);
+ if (IS_ERR(data_page))
+ continue;
+ move_data_page(inode, data_page, gc_type);
+ stat_inc_data_blk_count(sbi, 1);
+ }
+ }
+ continue;
+next_iput:
+ iput(inode);
+ }
+ if (++phase < 4)
+ goto next_step;
+ err = GC_DONE;
+stop:
+ if (gc_type == FG_GC)
+ f2fs_submit_bio(sbi, DATA, true);
+ return err;
+}
+
+static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
+ int gc_type, int type)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ int ret;
+ mutex_lock(&sit_i->sentry_lock);
+ ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
+ mutex_unlock(&sit_i->sentry_lock);
+ return ret;
+}
+
+static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+ struct list_head *ilist, int gc_type)
+{
+ struct page *sum_page;
+ struct f2fs_summary_block *sum;
+ int ret = GC_DONE;
+
+ /* read segment summary of victim */
+ sum_page = get_sum_page(sbi, segno);
+ if (IS_ERR(sum_page))
+ return GC_ERROR;
+
+ /*
+ * CP needs to lock sum_page. In this time, we don't need
+ * to lock this page, because this summary page is not gone anywhere.
+ * Also, this page is not gonna be updated before GC is done.
+ */
+ unlock_page(sum_page);
+ sum = page_address(sum_page);
+
+ switch (GET_SUM_TYPE((&sum->footer))) {
+ case SUM_TYPE_NODE:
+ ret = gc_node_segment(sbi, sum->entries, segno, gc_type);
+ break;
+ case SUM_TYPE_DATA:
+ ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
+ break;
+ }
+ stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
+ stat_inc_call_count(sbi->stat_info);
+
+ f2fs_put_page(sum_page, 0);
+ return ret;
+}
+
+int f2fs_gc(struct f2fs_sb_info *sbi, int nGC)
+{
+ unsigned int segno;
+ int old_free_secs, cur_free_secs;
+ int gc_status, nfree;
+ struct list_head ilist;
+ int gc_type = BG_GC;
+
+ INIT_LIST_HEAD(&ilist);
+gc_more:
+ nfree = 0;
+ gc_status = GC_NONE;
+
+ if (has_not_enough_free_secs(sbi))
+ old_free_secs = reserved_sections(sbi);
+ else
+ old_free_secs = free_sections(sbi);
+
+ while (sbi->sb->s_flags & MS_ACTIVE) {
+ int i;
+ if (has_not_enough_free_secs(sbi))
+ gc_type = FG_GC;
+
+ cur_free_secs = free_sections(sbi) + nfree;
+
+ /* We got free space successfully. */
+ if (nGC < cur_free_secs - old_free_secs)
+ break;
+
+ if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
+ break;
+
+ for (i = 0; i < sbi->segs_per_sec; i++) {
+ /*
+ * do_garbage_collect will give us three gc_status:
+ * GC_ERROR, GC_DONE, and GC_BLOCKED.
+ * If GC is finished uncleanly, we have to return
+ * the victim to dirty segment list.
+ */
+ gc_status = do_garbage_collect(sbi, segno + i,
+ &ilist, gc_type);
+ if (gc_status != GC_DONE)
+ goto stop;
+ nfree++;
+ }
+ }
+stop:
+ if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) {
+ write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
+ if (nfree)
+ goto gc_more;
+ }
+ mutex_unlock(&sbi->gc_mutex);
+
+ put_gc_inode(&ilist);
+ BUG_ON(!list_empty(&ilist));
+ return gc_status;
+}
+
+void build_gc_manager(struct f2fs_sb_info *sbi)
+{
+ DIRTY_I(sbi)->v_ops = &default_v_ops;
+}
+
+int create_gc_caches(void)
+{
+ winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
+ sizeof(struct inode_entry), NULL);
+ if (!winode_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void destroy_gc_caches(void)
+{
+ kmem_cache_destroy(winode_slab);
+}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
new file mode 100644
index 00000000000..b026d9354cc
--- /dev/null
+++ b/fs/f2fs/gc.h
@@ -0,0 +1,117 @@
+/*
+ * fs/f2fs/gc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define GC_THREAD_NAME "f2fs_gc_task"
+#define GC_THREAD_MIN_WB_PAGES 1 /*
+ * a threshold to determine
+ * whether IO subsystem is idle
+ * or not
+ */
+#define GC_THREAD_MIN_SLEEP_TIME 10000 /* milliseconds */
+#define GC_THREAD_MAX_SLEEP_TIME 30000
+#define GC_THREAD_NOGC_SLEEP_TIME 10000
+#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
+#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
+
+/* Search max. number of dirty segments to select a victim segment */
+#define MAX_VICTIM_SEARCH 20
+
+enum {
+ GC_NONE = 0,
+ GC_ERROR,
+ GC_OK,
+ GC_NEXT,
+ GC_BLOCKED,
+ GC_DONE,
+};
+
+struct f2fs_gc_kthread {
+ struct task_struct *f2fs_gc_task;
+ wait_queue_head_t gc_wait_queue_head;
+};
+
+struct inode_entry {
+ struct list_head list;
+ struct inode *inode;
+};
+
+/*
+ * inline functions
+ */
+static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
+{
+ if (free_segments(sbi) < overprovision_segments(sbi))
+ return 0;
+ else
+ return (free_segments(sbi) - overprovision_segments(sbi))
+ << sbi->log_blocks_per_seg;
+}
+
+static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
+{
+ return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
+}
+
+static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
+{
+ block_t reclaimable_user_blocks = sbi->user_block_count -
+ written_block_count(sbi);
+ return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
+}
+
+static inline long increase_sleep_time(long wait)
+{
+ wait += GC_THREAD_MIN_SLEEP_TIME;
+ if (wait > GC_THREAD_MAX_SLEEP_TIME)
+ wait = GC_THREAD_MAX_SLEEP_TIME;
+ return wait;
+}
+
+static inline long decrease_sleep_time(long wait)
+{
+ wait -= GC_THREAD_MIN_SLEEP_TIME;
+ if (wait <= GC_THREAD_MIN_SLEEP_TIME)
+ wait = GC_THREAD_MIN_SLEEP_TIME;
+ return wait;
+}
+
+static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
+{
+ block_t invalid_user_blocks = sbi->user_block_count -
+ written_block_count(sbi);
+ /*
+ * Background GC is triggered with the following condition.
+ * 1. There are a number of invalid blocks.
+ * 2. There is not enough free space.
+ */
+ if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
+ free_user_blocks(sbi) < limit_free_user_blocks(sbi))
+ return true;
+ return false;
+}
+
+static inline int is_idle(struct f2fs_sb_info *sbi)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct request_list *rl = &q->root_rl;
+ return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
+}
+
+static inline bool should_do_checkpoint(struct f2fs_sb_info *sbi)
+{
+ unsigned int pages_per_sec = sbi->segs_per_sec *
+ (1 << sbi->log_blocks_per_seg);
+ int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
+ >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
+ >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ return free_sections(sbi) <= (node_secs + 2 * dent_secs + 2);
+}
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
new file mode 100644
index 00000000000..a60f04200f8
--- /dev/null
+++ b/fs/f2fs/hash.c
@@ -0,0 +1,97 @@
+/*
+ * fs/f2fs/hash.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Portions of this code from linux/fs/ext3/hash.c
+ *
+ * Copyright (C) 2002 by Theodore Ts'o
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/cryptohash.h>
+#include <linux/pagemap.h>
+
+#include "f2fs.h"
+
+/*
+ * Hashing code copied from ext3
+ */
+#define DELTA 0x9E3779B9
+
+static void TEA_transform(unsigned int buf[4], unsigned int const in[])
+{
+ __u32 sum = 0;
+ __u32 b0 = buf[0], b1 = buf[1];
+ __u32 a = in[0], b = in[1], c = in[2], d = in[3];
+ int n = 16;
+
+ do {
+ sum += DELTA;
+ b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
+ b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
+ } while (--n);
+
+ buf[0] += b0;
+ buf[1] += b1;
+}
+
+static void str2hashbuf(const char *msg, int len, unsigned int *buf, int num)
+{
+ unsigned pad, val;
+ int i;
+
+ pad = (__u32)len | ((__u32)len << 8);
+ pad |= pad << 16;
+
+ val = pad;
+ if (len > num * 4)
+ len = num * 4;
+ for (i = 0; i < len; i++) {
+ if ((i % 4) == 0)
+ val = pad;
+ val = msg[i] + (val << 8);
+ if ((i % 4) == 3) {
+ *buf++ = val;
+ val = pad;
+ num--;
+ }
+ }
+ if (--num >= 0)
+ *buf++ = val;
+ while (--num >= 0)
+ *buf++ = pad;
+}
+
+f2fs_hash_t f2fs_dentry_hash(const char *name, int len)
+{
+ __u32 hash, minor_hash;
+ f2fs_hash_t f2fs_hash;
+ const char *p;
+ __u32 in[8], buf[4];
+
+ /* Initialize the default seed for the hash checksum functions */
+ buf[0] = 0x67452301;
+ buf[1] = 0xefcdab89;
+ buf[2] = 0x98badcfe;
+ buf[3] = 0x10325476;
+
+ p = name;
+ while (len > 0) {
+ str2hashbuf(p, len, in, 4);
+ TEA_transform(buf, in);
+ len -= 16;
+ p += 16;
+ }
+ hash = buf[0];
+ minor_hash = buf[1];
+
+ f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
+ return f2fs_hash;
+}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
new file mode 100644
index 00000000000..df5fb381ebf
--- /dev/null
+++ b/fs/f2fs/inode.c
@@ -0,0 +1,268 @@
+/*
+ * fs/f2fs/inode.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+
+#include "f2fs.h"
+#include "node.h"
+
+struct f2fs_iget_args {
+ u64 ino;
+ int on_free;
+};
+
+void f2fs_set_inode_flags(struct inode *inode)
+{
+ unsigned int flags = F2FS_I(inode)->i_flags;
+
+ inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE |
+ S_NOATIME | S_DIRSYNC);
+
+ if (flags & FS_SYNC_FL)
+ inode->i_flags |= S_SYNC;
+ if (flags & FS_APPEND_FL)
+ inode->i_flags |= S_APPEND;
+ if (flags & FS_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
+ if (flags & FS_NOATIME_FL)
+ inode->i_flags |= S_NOATIME;
+ if (flags & FS_DIRSYNC_FL)
+ inode->i_flags |= S_DIRSYNC;
+}
+
+static int f2fs_iget_test(struct inode *inode, void *data)
+{
+ struct f2fs_iget_args *args = data;
+
+ if (inode->i_ino != args->ino)
+ return 0;
+ if (inode->i_state & (I_FREEING | I_WILL_FREE)) {
+ args->on_free = 1;
+ return 0;
+ }
+ return 1;
+}
+
+struct inode *f2fs_iget_nowait(struct super_block *sb, unsigned long ino)
+{
+ struct f2fs_iget_args args = {
+ .ino = ino,
+ .on_free = 0
+ };
+ struct inode *inode = ilookup5(sb, ino, f2fs_iget_test, &args);
+
+ if (inode)
+ return inode;
+ if (!args.on_free)
+ return f2fs_iget(sb, ino);
+ return ERR_PTR(-ENOENT);
+}
+
+static int do_read_inode(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct page *node_page;
+ struct f2fs_node *rn;
+ struct f2fs_inode *ri;
+
+ /* Check if ino is within scope */
+ check_nid_range(sbi, inode->i_ino);
+
+ node_page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(node_page))
+ return PTR_ERR(node_page);
+
+ rn = page_address(node_page);
+ ri = &(rn->i);
+
+ inode->i_mode = le16_to_cpu(ri->i_mode);
+ i_uid_write(inode, le32_to_cpu(ri->i_uid));
+ i_gid_write(inode, le32_to_cpu(ri->i_gid));
+ set_nlink(inode, le32_to_cpu(ri->i_links));
+ inode->i_size = le64_to_cpu(ri->i_size);
+ inode->i_blocks = le64_to_cpu(ri->i_blocks);
+
+ inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
+ inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
+ inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
+ inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
+ inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
+ inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
+ inode->i_generation = le32_to_cpu(ri->i_generation);
+
+ fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
+ fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
+ fi->i_flags = le32_to_cpu(ri->i_flags);
+ fi->flags = 0;
+ fi->data_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver) - 1;
+ fi->i_advise = ri->i_advise;
+ fi->i_pino = le32_to_cpu(ri->i_pino);
+ get_extent_info(&fi->ext, ri->i_ext);
+ f2fs_put_page(node_page, 1);
+ return 0;
+}
+
+struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode;
+ int ret;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+ if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
+ goto make_now;
+
+ ret = do_read_inode(inode);
+ if (ret)
+ goto bad_inode;
+
+ if (!sbi->por_doing && inode->i_nlink == 0) {
+ ret = -ENOENT;
+ goto bad_inode;
+ }
+
+make_now:
+ if (ino == F2FS_NODE_INO(sbi)) {
+ inode->i_mapping->a_ops = &f2fs_node_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+ } else if (ino == F2FS_META_INO(sbi)) {
+ inode->i_mapping->a_ops = &f2fs_meta_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+ } else if (S_ISREG(inode->i_mode)) {
+ inode->i_op = &f2fs_file_inode_operations;
+ inode->i_fop = &f2fs_file_operations;
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op = &f2fs_dir_inode_operations;
+ inode->i_fop = &f2fs_dir_operations;
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE |
+ __GFP_ZERO);
+ } else if (S_ISLNK(inode->i_mode)) {
+ inode->i_op = &f2fs_symlink_inode_operations;
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ inode->i_op = &f2fs_special_inode_operations;
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ } else {
+ ret = -EIO;
+ goto bad_inode;
+ }
+ unlock_new_inode(inode);
+
+ return inode;
+
+bad_inode:
+ iget_failed(inode);
+ return ERR_PTR(ret);
+}
+
+void update_inode(struct inode *inode, struct page *node_page)
+{
+ struct f2fs_node *rn;
+ struct f2fs_inode *ri;
+
+ wait_on_page_writeback(node_page);
+
+ rn = page_address(node_page);
+ ri = &(rn->i);
+
+ ri->i_mode = cpu_to_le16(inode->i_mode);
+ ri->i_advise = F2FS_I(inode)->i_advise;
+ ri->i_uid = cpu_to_le32(i_uid_read(inode));
+ ri->i_gid = cpu_to_le32(i_gid_read(inode));
+ ri->i_links = cpu_to_le32(inode->i_nlink);
+ ri->i_size = cpu_to_le64(i_size_read(inode));
+ ri->i_blocks = cpu_to_le64(inode->i_blocks);
+ set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
+
+ ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
+ ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
+ ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
+ ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
+ ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
+ ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
+ ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
+ ri->i_generation = cpu_to_le32(inode->i_generation);
+ set_page_dirty(node_page);
+}
+
+int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct page *node_page;
+ bool need_lock = false;
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_META_INO(sbi))
+ return 0;
+
+ node_page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(node_page))
+ return PTR_ERR(node_page);
+
+ if (!PageDirty(node_page)) {
+ need_lock = true;
+ f2fs_put_page(node_page, 1);
+ mutex_lock(&sbi->write_inode);
+ node_page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(node_page)) {
+ mutex_unlock(&sbi->write_inode);
+ return PTR_ERR(node_page);
+ }
+ }
+ update_inode(inode, node_page);
+ f2fs_put_page(node_page, 1);
+ if (need_lock)
+ mutex_unlock(&sbi->write_inode);
+ return 0;
+}
+
+/*
+ * Called at the last iput() if i_nlink is zero
+ */
+void f2fs_evict_inode(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+
+ truncate_inode_pages(&inode->i_data, 0);
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_META_INO(sbi))
+ goto no_delete;
+
+ BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents));
+ remove_dirty_dir_inode(inode);
+
+ if (inode->i_nlink || is_bad_inode(inode))
+ goto no_delete;
+
+ set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
+ i_size_write(inode, 0);
+
+ if (F2FS_HAS_BLOCKS(inode))
+ f2fs_truncate(inode);
+
+ remove_inode_page(inode);
+no_delete:
+ clear_inode(inode);
+}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
new file mode 100644
index 00000000000..89b7675dc37
--- /dev/null
+++ b/fs/f2fs/namei.c
@@ -0,0 +1,503 @@
+/*
+ * fs/f2fs/namei.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+
+#include "f2fs.h"
+#include "xattr.h"
+#include "acl.h"
+
+static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
+{
+ struct super_block *sb = dir->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ nid_t ino;
+ struct inode *inode;
+ bool nid_free = false;
+ int err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock_op(sbi, NODE_NEW);
+ if (!alloc_nid(sbi, &ino)) {
+ mutex_unlock_op(sbi, NODE_NEW);
+ err = -ENOSPC;
+ goto fail;
+ }
+ mutex_unlock_op(sbi, NODE_NEW);
+
+ inode->i_uid = current_fsuid();
+
+ if (dir->i_mode & S_ISGID) {
+ inode->i_gid = dir->i_gid;
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ } else {
+ inode->i_gid = current_fsgid();
+ }
+
+ inode->i_ino = ino;
+ inode->i_mode = mode;
+ inode->i_blocks = 0;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_generation = sbi->s_next_generation++;
+
+ err = insert_inode_locked(inode);
+ if (err) {
+ err = -EINVAL;
+ nid_free = true;
+ goto out;
+ }
+
+ mark_inode_dirty(inode);
+ return inode;
+
+out:
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+fail:
+ iput(inode);
+ if (nid_free)
+ alloc_nid_failed(sbi, ino);
+ return ERR_PTR(err);
+}
+
+static int is_multimedia_file(const unsigned char *s, const char *sub)
+{
+ int slen = strlen(s);
+ int sublen = strlen(sub);
+ int ret;
+
+ if (sublen > slen)
+ return 1;
+
+ ret = memcmp(s + slen - sublen, sub, sublen);
+ if (ret) { /* compare upper case */
+ int i;
+ char upper_sub[8];
+ for (i = 0; i < sublen && i < sizeof(upper_sub); i++)
+ upper_sub[i] = toupper(sub[i]);
+ return memcmp(s + slen - sublen, upper_sub, sublen);
+ }
+
+ return ret;
+}
+
+/*
+ * Set multimedia files as cold files for hot/cold data separation
+ */
+static inline void set_cold_file(struct f2fs_sb_info *sbi, struct inode *inode,
+ const unsigned char *name)
+{
+ int i;
+ __u8 (*extlist)[8] = sbi->raw_super->extension_list;
+
+ int count = le32_to_cpu(sbi->raw_super->extension_count);
+ for (i = 0; i < count; i++) {
+ if (!is_multimedia_file(name, extlist[i])) {
+ F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT;
+ break;
+ }
+ }
+}
+
+static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
+{
+ struct super_block *sb = dir->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode;
+ nid_t ino = 0;
+ int err;
+
+ inode = f2fs_new_inode(dir, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
+ set_cold_file(sbi, inode, dentry->d_name.name);
+
+ inode->i_op = &f2fs_file_inode_operations;
+ inode->i_fop = &f2fs_file_operations;
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ ino = inode->i_ino;
+
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+ goto out;
+
+ alloc_nid_done(sbi, ino);
+
+ if (!sbi->por_doing)
+ d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
+
+ f2fs_balance_fs(sbi);
+ return 0;
+out:
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ alloc_nid_failed(sbi, ino);
+ return err;
+}
+
+static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ struct super_block *sb = dir->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int err;
+
+ inode->i_ctime = CURRENT_TIME;
+ atomic_inc(&inode->i_count);
+
+ set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+ goto out;
+
+ d_instantiate(dentry, inode);
+
+ f2fs_balance_fs(sbi);
+ return 0;
+out:
+ clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ iput(inode);
+ return err;
+}
+
+struct dentry *f2fs_get_parent(struct dentry *child)
+{
+ struct qstr dotdot = QSTR_INIT("..", 2);
+ unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot);
+ if (!ino)
+ return ERR_PTR(-ENOENT);
+ return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino));
+}
+
+static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct inode *inode = NULL;
+ struct f2fs_dir_entry *de;
+ struct page *page;
+
+ if (dentry->d_name.len > F2FS_MAX_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ de = f2fs_find_entry(dir, &dentry->d_name, &page);
+ if (de) {
+ nid_t ino = le32_to_cpu(de->ino);
+ kunmap(page);
+ f2fs_put_page(page, 0);
+
+ inode = f2fs_iget(dir->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ }
+
+ return d_splice_alias(inode, dentry);
+}
+
+static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct super_block *sb = dir->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode = dentry->d_inode;
+ struct f2fs_dir_entry *de;
+ struct page *page;
+ int err = -ENOENT;
+
+ de = f2fs_find_entry(dir, &dentry->d_name, &page);
+ if (!de)
+ goto fail;
+
+ err = check_orphan_space(sbi);
+ if (err) {
+ kunmap(page);
+ f2fs_put_page(page, 0);
+ goto fail;
+ }
+
+ f2fs_delete_entry(de, page, inode);
+
+ /* In order to evict this inode, we set it dirty */
+ mark_inode_dirty(inode);
+ f2fs_balance_fs(sbi);
+fail:
+ return err;
+}
+
+static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct super_block *sb = dir->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode;
+ unsigned symlen = strlen(symname) + 1;
+ int err;
+
+ inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ inode->i_op = &f2fs_symlink_inode_operations;
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+ goto out;
+
+ err = page_symlink(inode, symname, symlen);
+ alloc_nid_done(sbi, inode->i_ino);
+
+ d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
+
+ f2fs_balance_fs(sbi);
+
+ return err;
+out:
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ alloc_nid_failed(sbi, inode->i_ino);
+ return err;
+}
+
+static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ struct inode *inode;
+ int err;
+
+ inode = f2fs_new_inode(dir, S_IFDIR | mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ inode->i_op = &f2fs_dir_inode_operations;
+ inode->i_fop = &f2fs_dir_operations;
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+
+ set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+ goto out_fail;
+
+ alloc_nid_done(sbi, inode->i_ino);
+
+ d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
+
+ f2fs_balance_fs(sbi);
+ return 0;
+
+out_fail:
+ clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ alloc_nid_failed(sbi, inode->i_ino);
+ return err;
+}
+
+static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ if (f2fs_empty_dir(inode))
+ return f2fs_unlink(dir, dentry);
+ return -ENOTEMPTY;
+}
+
+static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t rdev)
+{
+ struct super_block *sb = dir->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode;
+ int err = 0;
+
+ if (!new_valid_dev(rdev))
+ return -EINVAL;
+
+ inode = f2fs_new_inode(dir, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ init_special_inode(inode, inode->i_mode, rdev);
+ inode->i_op = &f2fs_special_inode_operations;
+
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+ goto out;
+
+ alloc_nid_done(sbi, inode->i_ino);
+ d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
+
+ f2fs_balance_fs(sbi);
+
+ return 0;
+out:
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ alloc_nid_failed(sbi, inode->i_ino);
+ return err;
+}
+
+static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct super_block *sb = old_dir->i_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *old_inode = old_dentry->d_inode;
+ struct inode *new_inode = new_dentry->d_inode;
+ struct page *old_dir_page;
+ struct page *old_page;
+ struct f2fs_dir_entry *old_dir_entry = NULL;
+ struct f2fs_dir_entry *old_entry;
+ struct f2fs_dir_entry *new_entry;
+ int err = -ENOENT;
+
+ old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ if (!old_entry)
+ goto out;
+
+ if (S_ISDIR(old_inode->i_mode)) {
+ err = -EIO;
+ old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
+ if (!old_dir_entry)
+ goto out_old;
+ }
+
+ mutex_lock_op(sbi, RENAME);
+
+ if (new_inode) {
+ struct page *new_page;
+
+ err = -ENOTEMPTY;
+ if (old_dir_entry && !f2fs_empty_dir(new_inode))
+ goto out_dir;
+
+ err = -ENOENT;
+ new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
+ &new_page);
+ if (!new_entry)
+ goto out_dir;
+
+ f2fs_set_link(new_dir, new_entry, new_page, old_inode);
+
+ new_inode->i_ctime = CURRENT_TIME;
+ if (old_dir_entry)
+ drop_nlink(new_inode);
+ drop_nlink(new_inode);
+ if (!new_inode->i_nlink)
+ add_orphan_inode(sbi, new_inode->i_ino);
+ f2fs_write_inode(new_inode, NULL);
+ } else {
+ err = f2fs_add_link(new_dentry, old_inode);
+ if (err)
+ goto out_dir;
+
+ if (old_dir_entry) {
+ inc_nlink(new_dir);
+ f2fs_write_inode(new_dir, NULL);
+ }
+ }
+
+ old_inode->i_ctime = CURRENT_TIME;
+ set_inode_flag(F2FS_I(old_inode), FI_NEED_CP);
+ mark_inode_dirty(old_inode);
+
+ f2fs_delete_entry(old_entry, old_page, NULL);
+
+ if (old_dir_entry) {
+ if (old_dir != new_dir) {
+ f2fs_set_link(old_inode, old_dir_entry,
+ old_dir_page, new_dir);
+ } else {
+ kunmap(old_dir_page);
+ f2fs_put_page(old_dir_page, 0);
+ }
+ drop_nlink(old_dir);
+ f2fs_write_inode(old_dir, NULL);
+ }
+
+ mutex_unlock_op(sbi, RENAME);
+
+ f2fs_balance_fs(sbi);
+ return 0;
+
+out_dir:
+ if (old_dir_entry) {
+ kunmap(old_dir_page);
+ f2fs_put_page(old_dir_page, 0);
+ }
+ mutex_unlock_op(sbi, RENAME);
+out_old:
+ kunmap(old_page);
+ f2fs_put_page(old_page, 0);
+out:
+ return err;
+}
+
+const struct inode_operations f2fs_dir_inode_operations = {
+ .create = f2fs_create,
+ .lookup = f2fs_lookup,
+ .link = f2fs_link,
+ .unlink = f2fs_unlink,
+ .symlink = f2fs_symlink,
+ .mkdir = f2fs_mkdir,
+ .rmdir = f2fs_rmdir,
+ .mknod = f2fs_mknod,
+ .rename = f2fs_rename,
+ .setattr = f2fs_setattr,
+ .get_acl = f2fs_get_acl,
+#ifdef CONFIG_F2FS_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = f2fs_listxattr,
+ .removexattr = generic_removexattr,
+#endif
+};
+
+const struct inode_operations f2fs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+ .setattr = f2fs_setattr,
+#ifdef CONFIG_F2FS_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = f2fs_listxattr,
+ .removexattr = generic_removexattr,
+#endif
+};
+
+const struct inode_operations f2fs_special_inode_operations = {
+ .setattr = f2fs_setattr,
+ .get_acl = f2fs_get_acl,
+#ifdef CONFIG_F2FS_FS_XATTR
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .listxattr = f2fs_listxattr,
+ .removexattr = generic_removexattr,
+#endif
+};
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
new file mode 100644
index 00000000000..19870361497
--- /dev/null
+++ b/fs/f2fs/node.c
@@ -0,0 +1,1764 @@
+/*
+ * fs/f2fs/node.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/mpage.h>
+#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
+#include <linux/pagevec.h>
+#include <linux/swap.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include "segment.h"
+
+static struct kmem_cache *nat_entry_slab;
+static struct kmem_cache *free_nid_slab;
+
+static void clear_node_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+ unsigned int long flags;
+
+ if (PageDirty(page)) {
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ radix_tree_tag_clear(&mapping->page_tree,
+ page_index(page),
+ PAGECACHE_TAG_DIRTY);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+
+ clear_page_dirty_for_io(page);
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ }
+ ClearPageUptodate(page);
+}
+
+static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ pgoff_t index = current_nat_addr(sbi, nid);
+ return get_meta_page(sbi, index);
+}
+
+static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ struct page *src_page;
+ struct page *dst_page;
+ pgoff_t src_off;
+ pgoff_t dst_off;
+ void *src_addr;
+ void *dst_addr;
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ src_off = current_nat_addr(sbi, nid);
+ dst_off = next_nat_addr(sbi, src_off);
+
+ /* get current nat block page with lock */
+ src_page = get_meta_page(sbi, src_off);
+
+ /* Dirty src_page means that it is already the new target NAT page. */
+ if (PageDirty(src_page))
+ return src_page;
+
+ dst_page = grab_meta_page(sbi, dst_off);
+
+ src_addr = page_address(src_page);
+ dst_addr = page_address(dst_page);
+ memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ set_page_dirty(dst_page);
+ f2fs_put_page(src_page, 1);
+
+ set_to_next_nat(nm_i, nid);
+
+ return dst_page;
+}
+
+/*
+ * Readahead NAT pages
+ */
+static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
+{
+ struct address_space *mapping = sbi->meta_inode->i_mapping;
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct page *page;
+ pgoff_t index;
+ int i;
+
+ for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
+ if (nid >= nm_i->max_nid)
+ nid = 0;
+ index = current_nat_addr(sbi, nid);
+
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ continue;
+ if (f2fs_readpage(sbi, page, index, READ)) {
+ f2fs_put_page(page, 1);
+ continue;
+ }
+ page_cache_release(page);
+ }
+}
+
+static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
+{
+ return radix_tree_lookup(&nm_i->nat_root, n);
+}
+
+static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
+ nid_t start, unsigned int nr, struct nat_entry **ep)
+{
+ return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
+}
+
+static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
+{
+ list_del(&e->list);
+ radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
+ nm_i->nat_cnt--;
+ kmem_cache_free(nat_entry_slab, e);
+}
+
+int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct nat_entry *e;
+ int is_cp = 1;
+
+ read_lock(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid);
+ if (e && !e->checkpointed)
+ is_cp = 0;
+ read_unlock(&nm_i->nat_tree_lock);
+ return is_cp;
+}
+
+static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
+{
+ struct nat_entry *new;
+
+ new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
+ if (!new)
+ return NULL;
+ if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
+ kmem_cache_free(nat_entry_slab, new);
+ return NULL;
+ }
+ memset(new, 0, sizeof(struct nat_entry));
+ nat_set_nid(new, nid);
+ list_add_tail(&new->list, &nm_i->nat_entries);
+ nm_i->nat_cnt++;
+ return new;
+}
+
+static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
+ struct f2fs_nat_entry *ne)
+{
+ struct nat_entry *e;
+retry:
+ write_lock(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid);
+ if (!e) {
+ e = grab_nat_entry(nm_i, nid);
+ if (!e) {
+ write_unlock(&nm_i->nat_tree_lock);
+ goto retry;
+ }
+ nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
+ nat_set_ino(e, le32_to_cpu(ne->ino));
+ nat_set_version(e, ne->version);
+ e->checkpointed = true;
+ }
+ write_unlock(&nm_i->nat_tree_lock);
+}
+
+static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
+ block_t new_blkaddr)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct nat_entry *e;
+retry:
+ write_lock(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, ni->nid);
+ if (!e) {
+ e = grab_nat_entry(nm_i, ni->nid);
+ if (!e) {
+ write_unlock(&nm_i->nat_tree_lock);
+ goto retry;
+ }
+ e->ni = *ni;
+ e->checkpointed = true;
+ BUG_ON(ni->blk_addr == NEW_ADDR);
+ } else if (new_blkaddr == NEW_ADDR) {
+ /*
+ * when nid is reallocated,
+ * previous nat entry can be remained in nat cache.
+ * So, reinitialize it with new information.
+ */
+ e->ni = *ni;
+ BUG_ON(ni->blk_addr != NULL_ADDR);
+ }
+
+ if (new_blkaddr == NEW_ADDR)
+ e->checkpointed = false;
+
+ /* sanity check */
+ BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
+ BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
+ new_blkaddr == NULL_ADDR);
+ BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
+ new_blkaddr == NEW_ADDR);
+ BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
+ nat_get_blkaddr(e) != NULL_ADDR &&
+ new_blkaddr == NEW_ADDR);
+
+ /* increament version no as node is removed */
+ if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
+ unsigned char version = nat_get_version(e);
+ nat_set_version(e, inc_node_version(version));
+ }
+
+ /* change address */
+ nat_set_blkaddr(e, new_blkaddr);
+ __set_nat_cache_dirty(nm_i, e);
+ write_unlock(&nm_i->nat_tree_lock);
+}
+
+static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
+ return 0;
+
+ write_lock(&nm_i->nat_tree_lock);
+ while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+ struct nat_entry *ne;
+ ne = list_first_entry(&nm_i->nat_entries,
+ struct nat_entry, list);
+ __del_from_nat_cache(nm_i, ne);
+ nr_shrink--;
+ }
+ write_unlock(&nm_i->nat_tree_lock);
+ return nr_shrink;
+}
+
+/*
+ * This function returns always success
+ */
+void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ struct f2fs_summary_block *sum = curseg->sum_blk;
+ nid_t start_nid = START_NID(nid);
+ struct f2fs_nat_block *nat_blk;
+ struct page *page = NULL;
+ struct f2fs_nat_entry ne;
+ struct nat_entry *e;
+ int i;
+
+ memset(&ne, 0, sizeof(struct f2fs_nat_entry));
+ ni->nid = nid;
+
+ /* Check nat cache */
+ read_lock(&nm_i->nat_tree_lock);
+ e = __lookup_nat_cache(nm_i, nid);
+ if (e) {
+ ni->ino = nat_get_ino(e);
+ ni->blk_addr = nat_get_blkaddr(e);
+ ni->version = nat_get_version(e);
+ }
+ read_unlock(&nm_i->nat_tree_lock);
+ if (e)
+ return;
+
+ /* Check current segment summary */
+ mutex_lock(&curseg->curseg_mutex);
+ i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
+ if (i >= 0) {
+ ne = nat_in_journal(sum, i);
+ node_info_from_raw_nat(ni, &ne);
+ }
+ mutex_unlock(&curseg->curseg_mutex);
+ if (i >= 0)
+ goto cache;
+
+ /* Fill node_info from nat page */
+ page = get_current_nat_page(sbi, start_nid);
+ nat_blk = (struct f2fs_nat_block *)page_address(page);
+ ne = nat_blk->entries[nid - start_nid];
+ node_info_from_raw_nat(ni, &ne);
+ f2fs_put_page(page, 1);
+cache:
+ /* cache nat entry */
+ cache_nat_entry(NM_I(sbi), nid, &ne);
+}
+
+/*
+ * The maximum depth is four.
+ * Offset[0] will have raw inode offset.
+ */
+static int get_node_path(long block, int offset[4], unsigned int noffset[4])
+{
+ const long direct_index = ADDRS_PER_INODE;
+ const long direct_blks = ADDRS_PER_BLOCK;
+ const long dptrs_per_blk = NIDS_PER_BLOCK;
+ const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
+ int n = 0;
+ int level = 0;
+
+ noffset[0] = 0;
+
+ if (block < direct_index) {
+ offset[n++] = block;
+ level = 0;
+ goto got;
+ }
+ block -= direct_index;
+ if (block < direct_blks) {
+ offset[n++] = NODE_DIR1_BLOCK;
+ noffset[n] = 1;
+ offset[n++] = block;
+ level = 1;
+ goto got;
+ }
+ block -= direct_blks;
+ if (block < direct_blks) {
+ offset[n++] = NODE_DIR2_BLOCK;
+ noffset[n] = 2;
+ offset[n++] = block;
+ level = 1;
+ goto got;
+ }
+ block -= direct_blks;
+ if (block < indirect_blks) {
+ offset[n++] = NODE_IND1_BLOCK;
+ noffset[n] = 3;
+ offset[n++] = block / direct_blks;
+ noffset[n] = 4 + offset[n - 1];
+ offset[n++] = block % direct_blks;
+ level = 2;
+ goto got;
+ }
+ block -= indirect_blks;
+ if (block < indirect_blks) {
+ offset[n++] = NODE_IND2_BLOCK;
+ noffset[n] = 4 + dptrs_per_blk;
+ offset[n++] = block / direct_blks;
+ noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
+ offset[n++] = block % direct_blks;
+ level = 2;
+ goto got;
+ }
+ block -= indirect_blks;
+ if (block < dindirect_blks) {
+ offset[n++] = NODE_DIND_BLOCK;
+ noffset[n] = 5 + (dptrs_per_blk * 2);
+ offset[n++] = block / indirect_blks;
+ noffset[n] = 6 + (dptrs_per_blk * 2) +
+ offset[n - 1] * (dptrs_per_blk + 1);
+ offset[n++] = (block / direct_blks) % dptrs_per_blk;
+ noffset[n] = 7 + (dptrs_per_blk * 2) +
+ offset[n - 2] * (dptrs_per_blk + 1) +
+ offset[n - 1];
+ offset[n++] = block % direct_blks;
+ level = 3;
+ goto got;
+ } else {
+ BUG();
+ }
+got:
+ return level;
+}
+
+/*
+ * Caller should call f2fs_put_dnode(dn).
+ */
+int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int ro)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct page *npage[4];
+ struct page *parent;
+ int offset[4];
+ unsigned int noffset[4];
+ nid_t nids[4];
+ int level, i;
+ int err = 0;
+
+ level = get_node_path(index, offset, noffset);
+
+ nids[0] = dn->inode->i_ino;
+ npage[0] = get_node_page(sbi, nids[0]);
+ if (IS_ERR(npage[0]))
+ return PTR_ERR(npage[0]);
+
+ parent = npage[0];
+ nids[1] = get_nid(parent, offset[0], true);
+ dn->inode_page = npage[0];
+ dn->inode_page_locked = true;
+
+ /* get indirect or direct nodes */
+ for (i = 1; i <= level; i++) {
+ bool done = false;
+
+ if (!nids[i] && !ro) {
+ mutex_lock_op(sbi, NODE_NEW);
+
+ /* alloc new node */
+ if (!alloc_nid(sbi, &(nids[i]))) {
+ mutex_unlock_op(sbi, NODE_NEW);
+ err = -ENOSPC;
+ goto release_pages;
+ }
+
+ dn->nid = nids[i];
+ npage[i] = new_node_page(dn, noffset[i]);
+ if (IS_ERR(npage[i])) {
+ alloc_nid_failed(sbi, nids[i]);
+ mutex_unlock_op(sbi, NODE_NEW);
+ err = PTR_ERR(npage[i]);
+ goto release_pages;
+ }
+
+ set_nid(parent, offset[i - 1], nids[i], i == 1);
+ alloc_nid_done(sbi, nids[i]);
+ mutex_unlock_op(sbi, NODE_NEW);
+ done = true;
+ } else if (ro && i == level && level > 1) {
+ npage[i] = get_node_page_ra(parent, offset[i - 1]);
+ if (IS_ERR(npage[i])) {
+ err = PTR_ERR(npage[i]);
+ goto release_pages;
+ }
+ done = true;
+ }
+ if (i == 1) {
+ dn->inode_page_locked = false;
+ unlock_page(parent);
+ } else {
+ f2fs_put_page(parent, 1);
+ }
+
+ if (!done) {
+ npage[i] = get_node_page(sbi, nids[i]);
+ if (IS_ERR(npage[i])) {
+ err = PTR_ERR(npage[i]);
+ f2fs_put_page(npage[0], 0);
+ goto release_out;
+ }
+ }
+ if (i < level) {
+ parent = npage[i];
+ nids[i + 1] = get_nid(parent, offset[i], false);
+ }
+ }
+ dn->nid = nids[level];
+ dn->ofs_in_node = offset[level];
+ dn->node_page = npage[level];
+ dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ return 0;
+
+release_pages:
+ f2fs_put_page(parent, 1);
+ if (i > 1)
+ f2fs_put_page(npage[0], 0);
+release_out:
+ dn->inode_page = NULL;
+ dn->node_page = NULL;
+ return err;
+}
+
+static void truncate_node(struct dnode_of_data *dn)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct node_info ni;
+
+ get_node_info(sbi, dn->nid, &ni);
+ BUG_ON(ni.blk_addr == NULL_ADDR);
+
+ if (ni.blk_addr != NULL_ADDR)
+ invalidate_blocks(sbi, ni.blk_addr);
+
+ /* Deallocate node address */
+ dec_valid_node_count(sbi, dn->inode, 1);
+ set_node_addr(sbi, &ni, NULL_ADDR);
+
+ if (dn->nid == dn->inode->i_ino) {
+ remove_orphan_inode(sbi, dn->nid);
+ dec_valid_inode_count(sbi);
+ } else {
+ sync_inode_page(dn);
+ }
+
+ clear_node_page_dirty(dn->node_page);
+ F2FS_SET_SB_DIRT(sbi);
+
+ f2fs_put_page(dn->node_page, 1);
+ dn->node_page = NULL;
+}
+
+static int truncate_dnode(struct dnode_of_data *dn)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct page *page;
+
+ if (dn->nid == 0)
+ return 1;
+
+ /* get direct node */
+ page = get_node_page(sbi, dn->nid);
+ if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
+ return 1;
+ else if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ /* Make dnode_of_data for parameter */
+ dn->node_page = page;
+ dn->ofs_in_node = 0;
+ truncate_data_blocks(dn);
+ truncate_node(dn);
+ return 1;
+}
+
+static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
+ int ofs, int depth)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct dnode_of_data rdn = *dn;
+ struct page *page;
+ struct f2fs_node *rn;
+ nid_t child_nid;
+ unsigned int child_nofs;
+ int freed = 0;
+ int i, ret;
+
+ if (dn->nid == 0)
+ return NIDS_PER_BLOCK + 1;
+
+ page = get_node_page(sbi, dn->nid);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ rn = (struct f2fs_node *)page_address(page);
+ if (depth < 3) {
+ for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
+ child_nid = le32_to_cpu(rn->in.nid[i]);
+ if (child_nid == 0)
+ continue;
+ rdn.nid = child_nid;
+ ret = truncate_dnode(&rdn);
+ if (ret < 0)
+ goto out_err;
+ set_nid(page, i, 0, false);
+ }
+ } else {
+ child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
+ for (i = ofs; i < NIDS_PER_BLOCK; i++) {
+ child_nid = le32_to_cpu(rn->in.nid[i]);
+ if (child_nid == 0) {
+ child_nofs += NIDS_PER_BLOCK + 1;
+ continue;
+ }
+ rdn.nid = child_nid;
+ ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
+ if (ret == (NIDS_PER_BLOCK + 1)) {
+ set_nid(page, i, 0, false);
+ child_nofs += ret;
+ } else if (ret < 0 && ret != -ENOENT) {
+ goto out_err;
+ }
+ }
+ freed = child_nofs;
+ }
+
+ if (!ofs) {
+ /* remove current indirect node */
+ dn->node_page = page;
+ truncate_node(dn);
+ freed++;
+ } else {
+ f2fs_put_page(page, 1);
+ }
+ return freed;
+
+out_err:
+ f2fs_put_page(page, 1);
+ return ret;
+}
+
+static int truncate_partial_nodes(struct dnode_of_data *dn,
+ struct f2fs_inode *ri, int *offset, int depth)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct page *pages[2];
+ nid_t nid[3];
+ nid_t child_nid;
+ int err = 0;
+ int i;
+ int idx = depth - 2;
+
+ nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
+ if (!nid[0])
+ return 0;
+
+ /* get indirect nodes in the path */
+ for (i = 0; i < depth - 1; i++) {
+ /* refernece count'll be increased */
+ pages[i] = get_node_page(sbi, nid[i]);
+ if (IS_ERR(pages[i])) {
+ depth = i + 1;
+ err = PTR_ERR(pages[i]);
+ goto fail;
+ }
+ nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
+ }
+
+ /* free direct nodes linked to a partial indirect node */
+ for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
+ child_nid = get_nid(pages[idx], i, false);
+ if (!child_nid)
+ continue;
+ dn->nid = child_nid;
+ err = truncate_dnode(dn);
+ if (err < 0)
+ goto fail;
+ set_nid(pages[idx], i, 0, false);
+ }
+
+ if (offset[depth - 1] == 0) {
+ dn->node_page = pages[idx];
+ dn->nid = nid[idx];
+ truncate_node(dn);
+ } else {
+ f2fs_put_page(pages[idx], 1);
+ }
+ offset[idx]++;
+ offset[depth - 1] = 0;
+fail:
+ for (i = depth - 3; i >= 0; i--)
+ f2fs_put_page(pages[i], 1);
+ return err;
+}
+
+/*
+ * All the block addresses of data and nodes should be nullified.
+ */
+int truncate_inode_blocks(struct inode *inode, pgoff_t from)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ int err = 0, cont = 1;
+ int level, offset[4], noffset[4];
+ unsigned int nofs;
+ struct f2fs_node *rn;
+ struct dnode_of_data dn;
+ struct page *page;
+
+ level = get_node_path(from, offset, noffset);
+
+ page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ set_new_dnode(&dn, inode, page, NULL, 0);
+ unlock_page(page);
+
+ rn = page_address(page);
+ switch (level) {
+ case 0:
+ case 1:
+ nofs = noffset[1];
+ break;
+ case 2:
+ nofs = noffset[1];
+ if (!offset[level - 1])
+ goto skip_partial;
+ err = truncate_partial_nodes(&dn, &rn->i, offset, level);
+ if (err < 0 && err != -ENOENT)
+ goto fail;
+ nofs += 1 + NIDS_PER_BLOCK;
+ break;
+ case 3:
+ nofs = 5 + 2 * NIDS_PER_BLOCK;
+ if (!offset[level - 1])
+ goto skip_partial;
+ err = truncate_partial_nodes(&dn, &rn->i, offset, level);
+ if (err < 0 && err != -ENOENT)
+ goto fail;
+ break;
+ default:
+ BUG();
+ }
+
+skip_partial:
+ while (cont) {
+ dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
+ switch (offset[0]) {
+ case NODE_DIR1_BLOCK:
+ case NODE_DIR2_BLOCK:
+ err = truncate_dnode(&dn);
+ break;
+
+ case NODE_IND1_BLOCK:
+ case NODE_IND2_BLOCK:
+ err = truncate_nodes(&dn, nofs, offset[1], 2);
+ break;
+
+ case NODE_DIND_BLOCK:
+ err = truncate_nodes(&dn, nofs, offset[1], 3);
+ cont = 0;
+ break;
+
+ default:
+ BUG();
+ }
+ if (err < 0 && err != -ENOENT)
+ goto fail;
+ if (offset[1] == 0 &&
+ rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
+ lock_page(page);
+ wait_on_page_writeback(page);
+ rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
+ set_page_dirty(page);
+ unlock_page(page);
+ }
+ offset[1] = 0;
+ offset[0]++;
+ nofs += err;
+ }
+fail:
+ f2fs_put_page(page, 0);
+ return err > 0 ? 0 : err;
+}
+
+int remove_inode_page(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct page *page;
+ nid_t ino = inode->i_ino;
+ struct dnode_of_data dn;
+
+ mutex_lock_op(sbi, NODE_TRUNC);
+ page = get_node_page(sbi, ino);
+ if (IS_ERR(page)) {
+ mutex_unlock_op(sbi, NODE_TRUNC);
+ return PTR_ERR(page);
+ }
+
+ if (F2FS_I(inode)->i_xattr_nid) {
+ nid_t nid = F2FS_I(inode)->i_xattr_nid;
+ struct page *npage = get_node_page(sbi, nid);
+
+ if (IS_ERR(npage)) {
+ mutex_unlock_op(sbi, NODE_TRUNC);
+ return PTR_ERR(npage);
+ }
+
+ F2FS_I(inode)->i_xattr_nid = 0;
+ set_new_dnode(&dn, inode, page, npage, nid);
+ dn.inode_page_locked = 1;
+ truncate_node(&dn);
+ }
+ if (inode->i_blocks == 1) {
+ /* inernally call f2fs_put_page() */
+ set_new_dnode(&dn, inode, page, page, ino);
+ truncate_node(&dn);
+ } else if (inode->i_blocks == 0) {
+ struct node_info ni;
+ get_node_info(sbi, inode->i_ino, &ni);
+
+ /* called after f2fs_new_inode() is failed */
+ BUG_ON(ni.blk_addr != NULL_ADDR);
+ f2fs_put_page(page, 1);
+ } else {
+ BUG();
+ }
+ mutex_unlock_op(sbi, NODE_TRUNC);
+ return 0;
+}
+
+int new_inode_page(struct inode *inode, struct dentry *dentry)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct page *page;
+ struct dnode_of_data dn;
+
+ /* allocate inode page for new inode */
+ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
+ mutex_lock_op(sbi, NODE_NEW);
+ page = new_node_page(&dn, 0);
+ init_dent_inode(dentry, page);
+ mutex_unlock_op(sbi, NODE_NEW);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ f2fs_put_page(page, 1);
+ return 0;
+}
+
+struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+ struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct node_info old_ni, new_ni;
+ struct page *page;
+ int err;
+
+ if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
+ return ERR_PTR(-EPERM);
+
+ page = grab_cache_page(mapping, dn->nid);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ get_node_info(sbi, dn->nid, &old_ni);
+
+ SetPageUptodate(page);
+ fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
+
+ /* Reinitialize old_ni with new node page */
+ BUG_ON(old_ni.blk_addr != NULL_ADDR);
+ new_ni = old_ni;
+ new_ni.ino = dn->inode->i_ino;
+
+ if (!inc_valid_node_count(sbi, dn->inode, 1)) {
+ err = -ENOSPC;
+ goto fail;
+ }
+ set_node_addr(sbi, &new_ni, NEW_ADDR);
+
+ dn->node_page = page;
+ sync_inode_page(dn);
+ set_page_dirty(page);
+ set_cold_node(dn->inode, page);
+ if (ofs == 0)
+ inc_valid_inode_count(sbi);
+
+ return page;
+
+fail:
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+}
+
+static int read_node_page(struct page *page, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+ struct node_info ni;
+
+ get_node_info(sbi, page->index, &ni);
+
+ if (ni.blk_addr == NULL_ADDR)
+ return -ENOENT;
+ return f2fs_readpage(sbi, page, ni.blk_addr, type);
+}
+
+/*
+ * Readahead a node page
+ */
+void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct page *apage;
+
+ apage = find_get_page(mapping, nid);
+ if (apage && PageUptodate(apage))
+ goto release_out;
+ f2fs_put_page(apage, 0);
+
+ apage = grab_cache_page(mapping, nid);
+ if (!apage)
+ return;
+
+ if (read_node_page(apage, READA))
+ goto unlock_out;
+
+ page_cache_release(apage);
+ return;
+
+unlock_out:
+ unlock_page(apage);
+release_out:
+ page_cache_release(apage);
+}
+
+struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+{
+ int err;
+ struct page *page;
+ struct address_space *mapping = sbi->node_inode->i_mapping;
+
+ page = grab_cache_page(mapping, nid);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ err = read_node_page(page, READ_SYNC);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+ }
+
+ BUG_ON(nid != nid_of_node(page));
+ mark_page_accessed(page);
+ return page;
+}
+
+/*
+ * Return a locked page for the desired node page.
+ * And, readahead MAX_RA_NODE number of node pages.
+ */
+struct page *get_node_page_ra(struct page *parent, int start)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
+ struct address_space *mapping = sbi->node_inode->i_mapping;
+ int i, end;
+ int err = 0;
+ nid_t nid;
+ struct page *page;
+
+ /* First, try getting the desired direct node. */
+ nid = get_nid(parent, start, false);
+ if (!nid)
+ return ERR_PTR(-ENOENT);
+
+ page = find_get_page(mapping, nid);
+ if (page && PageUptodate(page))
+ goto page_hit;
+ f2fs_put_page(page, 0);
+
+repeat:
+ page = grab_cache_page(mapping, nid);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ err = read_node_page(page, READA);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+ }
+
+ /* Then, try readahead for siblings of the desired node */
+ end = start + MAX_RA_NODE;
+ end = min(end, NIDS_PER_BLOCK);
+ for (i = start + 1; i < end; i++) {
+ nid = get_nid(parent, i, false);
+ if (!nid)
+ continue;
+ ra_node_page(sbi, nid);
+ }
+
+page_hit:
+ lock_page(page);
+ if (PageError(page)) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
+
+ /* Has the page been truncated? */
+ if (page->mapping != mapping) {
+ f2fs_put_page(page, 1);
+ goto repeat;
+ }
+ return page;
+}
+
+void sync_inode_page(struct dnode_of_data *dn)
+{
+ if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
+ update_inode(dn->inode, dn->node_page);
+ } else if (dn->inode_page) {
+ if (!dn->inode_page_locked)
+ lock_page(dn->inode_page);
+ update_inode(dn->inode, dn->inode_page);
+ if (!dn->inode_page_locked)
+ unlock_page(dn->inode_page);
+ } else {
+ f2fs_write_inode(dn->inode, NULL);
+ }
+}
+
+int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
+ struct writeback_control *wbc)
+{
+ struct address_space *mapping = sbi->node_inode->i_mapping;
+ pgoff_t index, end;
+ struct pagevec pvec;
+ int step = ino ? 2 : 0;
+ int nwritten = 0, wrote = 0;
+
+ pagevec_init(&pvec, 0);
+
+next_step:
+ index = 0;
+ end = LONG_MAX;
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ /*
+ * flushing sequence with step:
+ * 0. indirect nodes
+ * 1. dentry dnodes
+ * 2. file dnodes
+ */
+ if (step == 0 && IS_DNODE(page))
+ continue;
+ if (step == 1 && (!IS_DNODE(page) ||
+ is_cold_node(page)))
+ continue;
+ if (step == 2 && (!IS_DNODE(page) ||
+ !is_cold_node(page)))
+ continue;
+
+ /*
+ * If an fsync mode,
+ * we should not skip writing node pages.
+ */
+ if (ino && ino_of_node(page) == ino)
+ lock_page(page);
+ else if (!trylock_page(page))
+ continue;
+
+ if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+ if (ino && ino_of_node(page) != ino)
+ goto continue_unlock;
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
+
+ /* called by fsync() */
+ if (ino && IS_DNODE(page)) {
+ int mark = !is_checkpointed_node(sbi, ino);
+ set_fsync_mark(page, 1);
+ if (IS_INODE(page))
+ set_dentry_mark(page, mark);
+ nwritten++;
+ } else {
+ set_fsync_mark(page, 0);
+ set_dentry_mark(page, 0);
+ }
+ mapping->a_ops->writepage(page, wbc);
+ wrote++;
+
+ if (--wbc->nr_to_write == 0)
+ break;
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+
+ if (wbc->nr_to_write == 0) {
+ step = 2;
+ break;
+ }
+ }
+
+ if (step < 2) {
+ step++;
+ goto next_step;
+ }
+
+ if (wrote)
+ f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
+
+ return nwritten;
+}
+
+static int f2fs_write_node_page(struct page *page,
+ struct writeback_control *wbc)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+ nid_t nid;
+ unsigned int nofs;
+ block_t new_addr;
+ struct node_info ni;
+
+ if (wbc->for_reclaim) {
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ wbc->pages_skipped++;
+ set_page_dirty(page);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+
+ wait_on_page_writeback(page);
+
+ mutex_lock_op(sbi, NODE_WRITE);
+
+ /* get old block addr of this node page */
+ nid = nid_of_node(page);
+ nofs = ofs_of_node(page);
+ BUG_ON(page->index != nid);
+
+ get_node_info(sbi, nid, &ni);
+
+ /* This page is already truncated */
+ if (ni.blk_addr == NULL_ADDR)
+ return 0;
+
+ set_page_writeback(page);
+
+ /* insert node offset */
+ write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
+ set_node_addr(sbi, &ni, new_addr);
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+
+ mutex_unlock_op(sbi, NODE_WRITE);
+ unlock_page(page);
+ return 0;
+}
+
+static int f2fs_write_node_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+ struct block_device *bdev = sbi->sb->s_bdev;
+ long nr_to_write = wbc->nr_to_write;
+
+ if (wbc->for_kupdate)
+ return 0;
+
+ if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
+ return 0;
+
+ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
+ write_checkpoint(sbi, false, false);
+ return 0;
+ }
+
+ /* if mounting is failed, skip writing node pages */
+ wbc->nr_to_write = bio_get_nr_vecs(bdev);
+ sync_node_pages(sbi, 0, wbc);
+ wbc->nr_to_write = nr_to_write -
+ (bio_get_nr_vecs(bdev) - wbc->nr_to_write);
+ return 0;
+}
+
+static int f2fs_set_node_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+
+ SetPageUptodate(page);
+ if (!PageDirty(page)) {
+ __set_page_dirty_nobuffers(page);
+ inc_page_count(sbi, F2FS_DIRTY_NODES);
+ SetPagePrivate(page);
+ return 1;
+ }
+ return 0;
+}
+
+static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
+{
+ struct inode *inode = page->mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ if (PageDirty(page))
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ ClearPagePrivate(page);
+}
+
+static int f2fs_release_node_page(struct page *page, gfp_t wait)
+{
+ ClearPagePrivate(page);
+ return 0;
+}
+
+/*
+ * Structure of the f2fs node operations
+ */
+const struct address_space_operations f2fs_node_aops = {
+ .writepage = f2fs_write_node_page,
+ .writepages = f2fs_write_node_pages,
+ .set_page_dirty = f2fs_set_node_page_dirty,
+ .invalidatepage = f2fs_invalidate_node_page,
+ .releasepage = f2fs_release_node_page,
+};
+
+static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
+{
+ struct list_head *this;
+ struct free_nid *i = NULL;
+ list_for_each(this, head) {
+ i = list_entry(this, struct free_nid, list);
+ if (i->nid == n)
+ break;
+ i = NULL;
+ }
+ return i;
+}
+
+static void __del_from_free_nid_list(struct free_nid *i)
+{
+ list_del(&i->list);
+ kmem_cache_free(free_nid_slab, i);
+}
+
+static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+{
+ struct free_nid *i;
+
+ if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
+ return 0;
+retry:
+ i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
+ if (!i) {
+ cond_resched();
+ goto retry;
+ }
+ i->nid = nid;
+ i->state = NID_NEW;
+
+ spin_lock(&nm_i->free_nid_list_lock);
+ if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
+ spin_unlock(&nm_i->free_nid_list_lock);
+ kmem_cache_free(free_nid_slab, i);
+ return 0;
+ }
+ list_add_tail(&i->list, &nm_i->free_nid_list);
+ nm_i->fcnt++;
+ spin_unlock(&nm_i->free_nid_list_lock);
+ return 1;
+}
+
+static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+{
+ struct free_nid *i;
+ spin_lock(&nm_i->free_nid_list_lock);
+ i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
+ if (i && i->state == NID_NEW) {
+ __del_from_free_nid_list(i);
+ nm_i->fcnt--;
+ }
+ spin_unlock(&nm_i->free_nid_list_lock);
+}
+
+static int scan_nat_page(struct f2fs_nm_info *nm_i,
+ struct page *nat_page, nid_t start_nid)
+{
+ struct f2fs_nat_block *nat_blk = page_address(nat_page);
+ block_t blk_addr;
+ int fcnt = 0;
+ int i;
+
+ /* 0 nid should not be used */
+ if (start_nid == 0)
+ ++start_nid;
+
+ i = start_nid % NAT_ENTRY_PER_BLOCK;
+
+ for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
+ blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
+ BUG_ON(blk_addr == NEW_ADDR);
+ if (blk_addr == NULL_ADDR)
+ fcnt += add_free_nid(nm_i, start_nid);
+ }
+ return fcnt;
+}
+
+static void build_free_nids(struct f2fs_sb_info *sbi)
+{
+ struct free_nid *fnid, *next_fnid;
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ struct f2fs_summary_block *sum = curseg->sum_blk;
+ nid_t nid = 0;
+ bool is_cycled = false;
+ int fcnt = 0;
+ int i;
+
+ nid = nm_i->next_scan_nid;
+ nm_i->init_scan_nid = nid;
+
+ ra_nat_pages(sbi, nid);
+
+ while (1) {
+ struct page *page = get_current_nat_page(sbi, nid);
+
+ fcnt += scan_nat_page(nm_i, page, nid);
+ f2fs_put_page(page, 1);
+
+ nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
+
+ if (nid >= nm_i->max_nid) {
+ nid = 0;
+ is_cycled = true;
+ }
+ if (fcnt > MAX_FREE_NIDS)
+ break;
+ if (is_cycled && nm_i->init_scan_nid <= nid)
+ break;
+ }
+
+ nm_i->next_scan_nid = nid;
+
+ /* find free nids from current sum_pages */
+ mutex_lock(&curseg->curseg_mutex);
+ for (i = 0; i < nats_in_cursum(sum); i++) {
+ block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
+ nid = le32_to_cpu(nid_in_journal(sum, i));
+ if (addr == NULL_ADDR)
+ add_free_nid(nm_i, nid);
+ else
+ remove_free_nid(nm_i, nid);
+ }
+ mutex_unlock(&curseg->curseg_mutex);
+
+ /* remove the free nids from current allocated nids */
+ list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
+ struct nat_entry *ne;
+
+ read_lock(&nm_i->nat_tree_lock);
+ ne = __lookup_nat_cache(nm_i, fnid->nid);
+ if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
+ remove_free_nid(nm_i, fnid->nid);
+ read_unlock(&nm_i->nat_tree_lock);
+ }
+}
+
+/*
+ * If this function returns success, caller can obtain a new nid
+ * from second parameter of this function.
+ * The returned nid could be used ino as well as nid when inode is created.
+ */
+bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct free_nid *i = NULL;
+ struct list_head *this;
+retry:
+ mutex_lock(&nm_i->build_lock);
+ if (!nm_i->fcnt) {
+ /* scan NAT in order to build free nid list */
+ build_free_nids(sbi);
+ if (!nm_i->fcnt) {
+ mutex_unlock(&nm_i->build_lock);
+ return false;
+ }
+ }
+ mutex_unlock(&nm_i->build_lock);
+
+ /*
+ * We check fcnt again since previous check is racy as
+ * we didn't hold free_nid_list_lock. So other thread
+ * could consume all of free nids.
+ */
+ spin_lock(&nm_i->free_nid_list_lock);
+ if (!nm_i->fcnt) {
+ spin_unlock(&nm_i->free_nid_list_lock);
+ goto retry;
+ }
+
+ BUG_ON(list_empty(&nm_i->free_nid_list));
+ list_for_each(this, &nm_i->free_nid_list) {
+ i = list_entry(this, struct free_nid, list);
+ if (i->state == NID_NEW)
+ break;
+ }
+
+ BUG_ON(i->state != NID_NEW);
+ *nid = i->nid;
+ i->state = NID_ALLOC;
+ nm_i->fcnt--;
+ spin_unlock(&nm_i->free_nid_list_lock);
+ return true;
+}
+
+/*
+ * alloc_nid() should be called prior to this function.
+ */
+void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct free_nid *i;
+
+ spin_lock(&nm_i->free_nid_list_lock);
+ i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
+ if (i) {
+ BUG_ON(i->state != NID_ALLOC);
+ __del_from_free_nid_list(i);
+ }
+ spin_unlock(&nm_i->free_nid_list_lock);
+}
+
+/*
+ * alloc_nid() should be called prior to this function.
+ */
+void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
+{
+ alloc_nid_done(sbi, nid);
+ add_free_nid(NM_I(sbi), nid);
+}
+
+void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
+ struct f2fs_summary *sum, struct node_info *ni,
+ block_t new_blkaddr)
+{
+ rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
+ set_node_addr(sbi, ni, new_blkaddr);
+ clear_node_page_dirty(page);
+}
+
+int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct f2fs_node *src, *dst;
+ nid_t ino = ino_of_node(page);
+ struct node_info old_ni, new_ni;
+ struct page *ipage;
+
+ ipage = grab_cache_page(mapping, ino);
+ if (!ipage)
+ return -ENOMEM;
+
+ /* Should not use this inode from free nid list */
+ remove_free_nid(NM_I(sbi), ino);
+
+ get_node_info(sbi, ino, &old_ni);
+ SetPageUptodate(ipage);
+ fill_node_footer(ipage, ino, ino, 0, true);
+
+ src = (struct f2fs_node *)page_address(page);
+ dst = (struct f2fs_node *)page_address(ipage);
+
+ memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
+ dst->i.i_size = 0;
+ dst->i.i_blocks = cpu_to_le64(1);
+ dst->i.i_links = cpu_to_le32(1);
+ dst->i.i_xattr_nid = 0;
+
+ new_ni = old_ni;
+ new_ni.ino = ino;
+
+ set_node_addr(sbi, &new_ni, NEW_ADDR);
+ inc_valid_inode_count(sbi);
+
+ f2fs_put_page(ipage, 1);
+ return 0;
+}
+
+int restore_node_summary(struct f2fs_sb_info *sbi,
+ unsigned int segno, struct f2fs_summary_block *sum)
+{
+ struct f2fs_node *rn;
+ struct f2fs_summary *sum_entry;
+ struct page *page;
+ block_t addr;
+ int i, last_offset;
+
+ /* alloc temporal page for read node */
+ page = alloc_page(GFP_NOFS | __GFP_ZERO);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ lock_page(page);
+
+ /* scan the node segment */
+ last_offset = sbi->blocks_per_seg;
+ addr = START_BLOCK(sbi, segno);
+ sum_entry = &sum->entries[0];
+
+ for (i = 0; i < last_offset; i++, sum_entry++) {
+ if (f2fs_readpage(sbi, page, addr, READ_SYNC))
+ goto out;
+
+ rn = (struct f2fs_node *)page_address(page);
+ sum_entry->nid = rn->footer.nid;
+ sum_entry->version = 0;
+ sum_entry->ofs_in_node = 0;
+ addr++;
+
+ /*
+ * In order to read next node page,
+ * we must clear PageUptodate flag.
+ */
+ ClearPageUptodate(page);
+ }
+out:
+ unlock_page(page);
+ __free_pages(page, 0);
+ return 0;
+}
+
+static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ struct f2fs_summary_block *sum = curseg->sum_blk;
+ int i;
+
+ mutex_lock(&curseg->curseg_mutex);
+
+ if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
+ mutex_unlock(&curseg->curseg_mutex);
+ return false;
+ }
+
+ for (i = 0; i < nats_in_cursum(sum); i++) {
+ struct nat_entry *ne;
+ struct f2fs_nat_entry raw_ne;
+ nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
+
+ raw_ne = nat_in_journal(sum, i);
+retry:
+ write_lock(&nm_i->nat_tree_lock);
+ ne = __lookup_nat_cache(nm_i, nid);
+ if (ne) {
+ __set_nat_cache_dirty(nm_i, ne);
+ write_unlock(&nm_i->nat_tree_lock);
+ continue;
+ }
+ ne = grab_nat_entry(nm_i, nid);
+ if (!ne) {
+ write_unlock(&nm_i->nat_tree_lock);
+ goto retry;
+ }
+ nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
+ nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
+ nat_set_version(ne, raw_ne.version);
+ __set_nat_cache_dirty(nm_i, ne);
+ write_unlock(&nm_i->nat_tree_lock);
+ }
+ update_nats_in_cursum(sum, -i);
+ mutex_unlock(&curseg->curseg_mutex);
+ return true;
+}
+
+/*
+ * This function is called during the checkpointing process.
+ */
+void flush_nat_entries(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct list_head *cur, *n;
+ struct page *page = NULL;
+ struct f2fs_nat_block *nat_blk = NULL;
+ nid_t start_nid = 0, end_nid = 0;
+ bool flushed;
+
+ flushed = flush_nats_in_journal(sbi);
+
+ if (!flushed)
+ mutex_lock(&curseg->curseg_mutex);
+
+ /* 1) flush dirty nat caches */
+ list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
+ struct nat_entry *ne;
+ nid_t nid;
+ struct f2fs_nat_entry raw_ne;
+ int offset = -1;
+ block_t old_blkaddr, new_blkaddr;
+
+ ne = list_entry(cur, struct nat_entry, list);
+ nid = nat_get_nid(ne);
+
+ if (nat_get_blkaddr(ne) == NEW_ADDR)
+ continue;
+ if (flushed)
+ goto to_nat_page;
+
+ /* if there is room for nat enries in curseg->sumpage */
+ offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
+ if (offset >= 0) {
+ raw_ne = nat_in_journal(sum, offset);
+ old_blkaddr = le32_to_cpu(raw_ne.block_addr);
+ goto flush_now;
+ }
+to_nat_page:
+ if (!page || (start_nid > nid || nid > end_nid)) {
+ if (page) {
+ f2fs_put_page(page, 1);
+ page = NULL;
+ }
+ start_nid = START_NID(nid);
+ end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
+
+ /*
+ * get nat block with dirty flag, increased reference
+ * count, mapped and lock
+ */
+ page = get_next_nat_page(sbi, start_nid);
+ nat_blk = page_address(page);
+ }
+
+ BUG_ON(!nat_blk);
+ raw_ne = nat_blk->entries[nid - start_nid];
+ old_blkaddr = le32_to_cpu(raw_ne.block_addr);
+flush_now:
+ new_blkaddr = nat_get_blkaddr(ne);
+
+ raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
+ raw_ne.block_addr = cpu_to_le32(new_blkaddr);
+ raw_ne.version = nat_get_version(ne);
+
+ if (offset < 0) {
+ nat_blk->entries[nid - start_nid] = raw_ne;
+ } else {
+ nat_in_journal(sum, offset) = raw_ne;
+ nid_in_journal(sum, offset) = cpu_to_le32(nid);
+ }
+
+ if (nat_get_blkaddr(ne) == NULL_ADDR) {
+ write_lock(&nm_i->nat_tree_lock);
+ __del_from_nat_cache(nm_i, ne);
+ write_unlock(&nm_i->nat_tree_lock);
+
+ /* We can reuse this freed nid at this point */
+ add_free_nid(NM_I(sbi), nid);
+ } else {
+ write_lock(&nm_i->nat_tree_lock);
+ __clear_nat_cache_dirty(nm_i, ne);
+ ne->checkpointed = true;
+ write_unlock(&nm_i->nat_tree_lock);
+ }
+ }
+ if (!flushed)
+ mutex_unlock(&curseg->curseg_mutex);
+ f2fs_put_page(page, 1);
+
+ /* 2) shrink nat caches if necessary */
+ try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
+}
+
+static int init_node_manager(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned char *version_bitmap;
+ unsigned int nat_segs, nat_blocks;
+
+ nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
+
+ /* segment_count_nat includes pair segment so divide to 2. */
+ nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
+ nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
+ nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
+ nm_i->fcnt = 0;
+ nm_i->nat_cnt = 0;
+
+ INIT_LIST_HEAD(&nm_i->free_nid_list);
+ INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
+ INIT_LIST_HEAD(&nm_i->nat_entries);
+ INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
+
+ mutex_init(&nm_i->build_lock);
+ spin_lock_init(&nm_i->free_nid_list_lock);
+ rwlock_init(&nm_i->nat_tree_lock);
+
+ nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
+ nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
+ nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
+
+ nm_i->nat_bitmap = kzalloc(nm_i->bitmap_size, GFP_KERNEL);
+ if (!nm_i->nat_bitmap)
+ return -ENOMEM;
+ version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
+ if (!version_bitmap)
+ return -EFAULT;
+
+ /* copy version bitmap */
+ memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
+ return 0;
+}
+
+int build_node_manager(struct f2fs_sb_info *sbi)
+{
+ int err;
+
+ sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
+ if (!sbi->nm_info)
+ return -ENOMEM;
+
+ err = init_node_manager(sbi);
+ if (err)
+ return err;
+
+ build_free_nids(sbi);
+ return 0;
+}
+
+void destroy_node_manager(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct free_nid *i, *next_i;
+ struct nat_entry *natvec[NATVEC_SIZE];
+ nid_t nid = 0;
+ unsigned int found;
+
+ if (!nm_i)
+ return;
+
+ /* destroy free nid list */
+ spin_lock(&nm_i->free_nid_list_lock);
+ list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
+ BUG_ON(i->state == NID_ALLOC);
+ __del_from_free_nid_list(i);
+ nm_i->fcnt--;
+ }
+ BUG_ON(nm_i->fcnt);
+ spin_unlock(&nm_i->free_nid_list_lock);
+
+ /* destroy nat cache */
+ write_lock(&nm_i->nat_tree_lock);
+ while ((found = __gang_lookup_nat_cache(nm_i,
+ nid, NATVEC_SIZE, natvec))) {
+ unsigned idx;
+ for (idx = 0; idx < found; idx++) {
+ struct nat_entry *e = natvec[idx];
+ nid = nat_get_nid(e) + 1;
+ __del_from_nat_cache(nm_i, e);
+ }
+ }
+ BUG_ON(nm_i->nat_cnt);
+ write_unlock(&nm_i->nat_tree_lock);
+
+ kfree(nm_i->nat_bitmap);
+ sbi->nm_info = NULL;
+ kfree(nm_i);
+}
+
+int create_node_manager_caches(void)
+{
+ nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
+ sizeof(struct nat_entry), NULL);
+ if (!nat_entry_slab)
+ return -ENOMEM;
+
+ free_nid_slab = f2fs_kmem_cache_create("free_nid",
+ sizeof(struct free_nid), NULL);
+ if (!free_nid_slab) {
+ kmem_cache_destroy(nat_entry_slab);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void destroy_node_manager_caches(void)
+{
+ kmem_cache_destroy(free_nid_slab);
+ kmem_cache_destroy(nat_entry_slab);
+}
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
new file mode 100644
index 00000000000..afdb130f782
--- /dev/null
+++ b/fs/f2fs/node.h
@@ -0,0 +1,353 @@
+/*
+ * fs/f2fs/node.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/* start node id of a node block dedicated to the given node id */
+#define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
+
+/* node block offset on the NAT area dedicated to the given start node id */
+#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
+
+/* # of pages to perform readahead before building free nids */
+#define FREE_NID_PAGES 4
+
+/* maximum # of free node ids to produce during build_free_nids */
+#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
+
+/* maximum readahead size for node during getting data blocks */
+#define MAX_RA_NODE 128
+
+/* maximum cached nat entries to manage memory footprint */
+#define NM_WOUT_THRESHOLD (64 * NAT_ENTRY_PER_BLOCK)
+
+/* vector size for gang look-up from nat cache that consists of radix tree */
+#define NATVEC_SIZE 64
+
+/*
+ * For node information
+ */
+struct node_info {
+ nid_t nid; /* node id */
+ nid_t ino; /* inode number of the node's owner */
+ block_t blk_addr; /* block address of the node */
+ unsigned char version; /* version of the node */
+};
+
+struct nat_entry {
+ struct list_head list; /* for clean or dirty nat list */
+ bool checkpointed; /* whether it is checkpointed or not */
+ struct node_info ni; /* in-memory node information */
+};
+
+#define nat_get_nid(nat) (nat->ni.nid)
+#define nat_set_nid(nat, n) (nat->ni.nid = n)
+#define nat_get_blkaddr(nat) (nat->ni.blk_addr)
+#define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b)
+#define nat_get_ino(nat) (nat->ni.ino)
+#define nat_set_ino(nat, i) (nat->ni.ino = i)
+#define nat_get_version(nat) (nat->ni.version)
+#define nat_set_version(nat, v) (nat->ni.version = v)
+
+#define __set_nat_cache_dirty(nm_i, ne) \
+ list_move_tail(&ne->list, &nm_i->dirty_nat_entries);
+#define __clear_nat_cache_dirty(nm_i, ne) \
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+#define inc_node_version(version) (++version)
+
+static inline void node_info_from_raw_nat(struct node_info *ni,
+ struct f2fs_nat_entry *raw_ne)
+{
+ ni->ino = le32_to_cpu(raw_ne->ino);
+ ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
+ ni->version = raw_ne->version;
+}
+
+/*
+ * For free nid mangement
+ */
+enum nid_state {
+ NID_NEW, /* newly added to free nid list */
+ NID_ALLOC /* it is allocated */
+};
+
+struct free_nid {
+ struct list_head list; /* for free node id list */
+ nid_t nid; /* node id */
+ int state; /* in use or not: NID_NEW or NID_ALLOC */
+};
+
+static inline int next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct free_nid *fnid;
+
+ if (nm_i->fcnt <= 0)
+ return -1;
+ spin_lock(&nm_i->free_nid_list_lock);
+ fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
+ *nid = fnid->nid;
+ spin_unlock(&nm_i->free_nid_list_lock);
+ return 0;
+}
+
+/*
+ * inline functions
+ */
+static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
+}
+
+static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ pgoff_t block_off;
+ pgoff_t block_addr;
+ int seg_off;
+
+ block_off = NAT_BLOCK_OFFSET(start);
+ seg_off = block_off >> sbi->log_blocks_per_seg;
+
+ block_addr = (pgoff_t)(nm_i->nat_blkaddr +
+ (seg_off << sbi->log_blocks_per_seg << 1) +
+ (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
+
+ if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
+ block_addr += sbi->blocks_per_seg;
+
+ return block_addr;
+}
+
+static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
+ pgoff_t block_addr)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ block_addr -= nm_i->nat_blkaddr;
+ if ((block_addr >> sbi->log_blocks_per_seg) % 2)
+ block_addr -= sbi->blocks_per_seg;
+ else
+ block_addr += sbi->blocks_per_seg;
+
+ return block_addr + nm_i->nat_blkaddr;
+}
+
+static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
+{
+ unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
+
+ if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
+ f2fs_clear_bit(block_off, nm_i->nat_bitmap);
+ else
+ f2fs_set_bit(block_off, nm_i->nat_bitmap);
+}
+
+static inline void fill_node_footer(struct page *page, nid_t nid,
+ nid_t ino, unsigned int ofs, bool reset)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ if (reset)
+ memset(rn, 0, sizeof(*rn));
+ rn->footer.nid = cpu_to_le32(nid);
+ rn->footer.ino = cpu_to_le32(ino);
+ rn->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT);
+}
+
+static inline void copy_node_footer(struct page *dst, struct page *src)
+{
+ void *src_addr = page_address(src);
+ void *dst_addr = page_address(dst);
+ struct f2fs_node *src_rn = (struct f2fs_node *)src_addr;
+ struct f2fs_node *dst_rn = (struct f2fs_node *)dst_addr;
+ memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
+}
+
+static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ rn->footer.cp_ver = ckpt->checkpoint_ver;
+ rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
+}
+
+static inline nid_t ino_of_node(struct page *node_page)
+{
+ void *kaddr = page_address(node_page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ return le32_to_cpu(rn->footer.ino);
+}
+
+static inline nid_t nid_of_node(struct page *node_page)
+{
+ void *kaddr = page_address(node_page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ return le32_to_cpu(rn->footer.nid);
+}
+
+static inline unsigned int ofs_of_node(struct page *node_page)
+{
+ void *kaddr = page_address(node_page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned flag = le32_to_cpu(rn->footer.flag);
+ return flag >> OFFSET_BIT_SHIFT;
+}
+
+static inline unsigned long long cpver_of_node(struct page *node_page)
+{
+ void *kaddr = page_address(node_page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ return le64_to_cpu(rn->footer.cp_ver);
+}
+
+static inline block_t next_blkaddr_of_node(struct page *node_page)
+{
+ void *kaddr = page_address(node_page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ return le32_to_cpu(rn->footer.next_blkaddr);
+}
+
+/*
+ * f2fs assigns the following node offsets described as (num).
+ * N = NIDS_PER_BLOCK
+ *
+ * Inode block (0)
+ * |- direct node (1)
+ * |- direct node (2)
+ * |- indirect node (3)
+ * | `- direct node (4 => 4 + N - 1)
+ * |- indirect node (4 + N)
+ * | `- direct node (5 + N => 5 + 2N - 1)
+ * `- double indirect node (5 + 2N)
+ * `- indirect node (6 + 2N)
+ * `- direct node (x(N + 1))
+ */
+static inline bool IS_DNODE(struct page *node_page)
+{
+ unsigned int ofs = ofs_of_node(node_page);
+ if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
+ ofs == 5 + 2 * NIDS_PER_BLOCK)
+ return false;
+ if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
+ ofs -= 6 + 2 * NIDS_PER_BLOCK;
+ if ((long int)ofs % (NIDS_PER_BLOCK + 1))
+ return false;
+ }
+ return true;
+}
+
+static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
+{
+ struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+
+ wait_on_page_writeback(p);
+
+ if (i)
+ rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
+ else
+ rn->in.nid[off] = cpu_to_le32(nid);
+ set_page_dirty(p);
+}
+
+static inline nid_t get_nid(struct page *p, int off, bool i)
+{
+ struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+ if (i)
+ return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
+ return le32_to_cpu(rn->in.nid[off]);
+}
+
+/*
+ * Coldness identification:
+ * - Mark cold files in f2fs_inode_info
+ * - Mark cold node blocks in their node footer
+ * - Mark cold data pages in page cache
+ */
+static inline int is_cold_file(struct inode *inode)
+{
+ return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT;
+}
+
+static inline int is_cold_data(struct page *page)
+{
+ return PageChecked(page);
+}
+
+static inline void set_cold_data(struct page *page)
+{
+ SetPageChecked(page);
+}
+
+static inline void clear_cold_data(struct page *page)
+{
+ ClearPageChecked(page);
+}
+
+static inline int is_cold_node(struct page *page)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ return flag & (0x1 << COLD_BIT_SHIFT);
+}
+
+static inline unsigned char is_fsync_dnode(struct page *page)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ return flag & (0x1 << FSYNC_BIT_SHIFT);
+}
+
+static inline unsigned char is_dent_dnode(struct page *page)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ return flag & (0x1 << DENT_BIT_SHIFT);
+}
+
+static inline void set_cold_node(struct inode *inode, struct page *page)
+{
+ struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+
+ if (S_ISDIR(inode->i_mode))
+ flag &= ~(0x1 << COLD_BIT_SHIFT);
+ else
+ flag |= (0x1 << COLD_BIT_SHIFT);
+ rn->footer.flag = cpu_to_le32(flag);
+}
+
+static inline void set_fsync_mark(struct page *page, int mark)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ if (mark)
+ flag |= (0x1 << FSYNC_BIT_SHIFT);
+ else
+ flag &= ~(0x1 << FSYNC_BIT_SHIFT);
+ rn->footer.flag = cpu_to_le32(flag);
+}
+
+static inline void set_dentry_mark(struct page *page, int mark)
+{
+ void *kaddr = page_address(page);
+ struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ unsigned int flag = le32_to_cpu(rn->footer.flag);
+ if (mark)
+ flag |= (0x1 << DENT_BIT_SHIFT);
+ else
+ flag &= ~(0x1 << DENT_BIT_SHIFT);
+ rn->footer.flag = cpu_to_le32(flag);
+}
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
new file mode 100644
index 00000000000..b07e9b6ef37
--- /dev/null
+++ b/fs/f2fs/recovery.c
@@ -0,0 +1,375 @@
+/*
+ * fs/f2fs/recovery.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include "f2fs.h"
+#include "node.h"
+#include "segment.h"
+
+static struct kmem_cache *fsync_entry_slab;
+
+bool space_for_roll_forward(struct f2fs_sb_info *sbi)
+{
+ if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
+ > sbi->user_block_count)
+ return false;
+ return true;
+}
+
+static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
+ nid_t ino)
+{
+ struct list_head *this;
+ struct fsync_inode_entry *entry;
+
+ list_for_each(this, head) {
+ entry = list_entry(this, struct fsync_inode_entry, list);
+ if (entry->inode->i_ino == ino)
+ return entry;
+ }
+ return NULL;
+}
+
+static int recover_dentry(struct page *ipage, struct inode *inode)
+{
+ struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
+ struct f2fs_inode *raw_inode = &(raw_node->i);
+ struct dentry dent, parent;
+ struct f2fs_dir_entry *de;
+ struct page *page;
+ struct inode *dir;
+ int err = 0;
+
+ if (!is_dent_dnode(ipage))
+ goto out;
+
+ dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
+ if (IS_ERR(dir)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ parent.d_inode = dir;
+ dent.d_parent = &parent;
+ dent.d_name.len = le32_to_cpu(raw_inode->i_namelen);
+ dent.d_name.name = raw_inode->i_name;
+
+ de = f2fs_find_entry(dir, &dent.d_name, &page);
+ if (de) {
+ kunmap(page);
+ f2fs_put_page(page, 0);
+ } else {
+ f2fs_add_link(&dent, inode);
+ }
+ iput(dir);
+out:
+ kunmap(ipage);
+ return err;
+}
+
+static int recover_inode(struct inode *inode, struct page *node_page)
+{
+ void *kaddr = page_address(node_page);
+ struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
+ struct f2fs_inode *raw_inode = &(raw_node->i);
+
+ inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+ i_size_write(inode, le64_to_cpu(raw_inode->i_size));
+ inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
+ inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
+ inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
+ inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+ inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
+ inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+
+ return recover_dentry(node_page, inode);
+}
+
+static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+{
+ unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
+ struct curseg_info *curseg;
+ struct page *page;
+ block_t blkaddr;
+ int err = 0;
+
+ /* get node pages in the current segment */
+ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
+ blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
+
+ /* read node page */
+ page = alloc_page(GFP_F2FS_ZERO);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ lock_page(page);
+
+ while (1) {
+ struct fsync_inode_entry *entry;
+
+ if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
+ goto out;
+
+ if (cp_ver != cpver_of_node(page))
+ goto out;
+
+ if (!is_fsync_dnode(page))
+ goto next;
+
+ entry = get_fsync_inode(head, ino_of_node(page));
+ if (entry) {
+ entry->blkaddr = blkaddr;
+ if (IS_INODE(page) && is_dent_dnode(page))
+ set_inode_flag(F2FS_I(entry->inode),
+ FI_INC_LINK);
+ } else {
+ if (IS_INODE(page) && is_dent_dnode(page)) {
+ if (recover_inode_page(sbi, page)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* add this fsync inode to the list */
+ entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
+ if (!entry) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&entry->list);
+ list_add_tail(&entry->list, head);
+
+ entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
+ if (IS_ERR(entry->inode)) {
+ err = PTR_ERR(entry->inode);
+ goto out;
+ }
+ entry->blkaddr = blkaddr;
+ }
+ if (IS_INODE(page)) {
+ err = recover_inode(entry->inode, page);
+ if (err)
+ goto out;
+ }
+next:
+ /* check next segment */
+ blkaddr = next_blkaddr_of_node(page);
+ ClearPageUptodate(page);
+ }
+out:
+ unlock_page(page);
+ __free_pages(page, 0);
+ return err;
+}
+
+static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
+ struct list_head *head)
+{
+ struct list_head *this;
+ struct fsync_inode_entry *entry;
+ list_for_each(this, head) {
+ entry = list_entry(this, struct fsync_inode_entry, list);
+ iput(entry->inode);
+ list_del(&entry->list);
+ kmem_cache_free(fsync_entry_slab, entry);
+ }
+}
+
+static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ struct seg_entry *sentry;
+ unsigned int segno = GET_SEGNO(sbi, blkaddr);
+ unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
+ (sbi->blocks_per_seg - 1);
+ struct f2fs_summary sum;
+ nid_t ino;
+ void *kaddr;
+ struct inode *inode;
+ struct page *node_page;
+ block_t bidx;
+ int i;
+
+ sentry = get_seg_entry(sbi, segno);
+ if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
+ return;
+
+ /* Get the previous summary */
+ for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
+ struct curseg_info *curseg = CURSEG_I(sbi, i);
+ if (curseg->segno == segno) {
+ sum = curseg->sum_blk->entries[blkoff];
+ break;
+ }
+ }
+ if (i > CURSEG_COLD_DATA) {
+ struct page *sum_page = get_sum_page(sbi, segno);
+ struct f2fs_summary_block *sum_node;
+ kaddr = page_address(sum_page);
+ sum_node = (struct f2fs_summary_block *)kaddr;
+ sum = sum_node->entries[blkoff];
+ f2fs_put_page(sum_page, 1);
+ }
+
+ /* Get the node page */
+ node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
+ bidx = start_bidx_of_node(ofs_of_node(node_page)) +
+ le16_to_cpu(sum.ofs_in_node);
+ ino = ino_of_node(node_page);
+ f2fs_put_page(node_page, 1);
+
+ /* Deallocate previous index in the node page */
+ inode = f2fs_iget_nowait(sbi->sb, ino);
+ truncate_hole(inode, bidx, bidx + 1);
+ iput(inode);
+}
+
+static void do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct page *page, block_t blkaddr)
+{
+ unsigned int start, end;
+ struct dnode_of_data dn;
+ struct f2fs_summary sum;
+ struct node_info ni;
+
+ start = start_bidx_of_node(ofs_of_node(page));
+ if (IS_INODE(page))
+ end = start + ADDRS_PER_INODE;
+ else
+ end = start + ADDRS_PER_BLOCK;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ if (get_dnode_of_data(&dn, start, 0))
+ return;
+
+ wait_on_page_writeback(dn.node_page);
+
+ get_node_info(sbi, dn.nid, &ni);
+ BUG_ON(ni.ino != ino_of_node(page));
+ BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
+
+ for (; start < end; start++) {
+ block_t src, dest;
+
+ src = datablock_addr(dn.node_page, dn.ofs_in_node);
+ dest = datablock_addr(page, dn.ofs_in_node);
+
+ if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
+ if (src == NULL_ADDR) {
+ int err = reserve_new_block(&dn);
+ /* We should not get -ENOSPC */
+ BUG_ON(err);
+ }
+
+ /* Check the previous node page having this index */
+ check_index_in_prev_nodes(sbi, dest);
+
+ set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
+
+ /* write dummy data page */
+ recover_data_page(sbi, NULL, &sum, src, dest);
+ update_extent_cache(dest, &dn);
+ }
+ dn.ofs_in_node++;
+ }
+
+ /* write node page in place */
+ set_summary(&sum, dn.nid, 0, 0);
+ if (IS_INODE(dn.node_page))
+ sync_inode_page(&dn);
+
+ copy_node_footer(dn.node_page, page);
+ fill_node_footer(dn.node_page, dn.nid, ni.ino,
+ ofs_of_node(page), false);
+ set_page_dirty(dn.node_page);
+
+ recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
+ f2fs_put_dnode(&dn);
+}
+
+static void recover_data(struct f2fs_sb_info *sbi,
+ struct list_head *head, int type)
+{
+ unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
+ struct curseg_info *curseg;
+ struct page *page;
+ block_t blkaddr;
+
+ /* get node pages in the current segment */
+ curseg = CURSEG_I(sbi, type);
+ blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
+
+ /* read node page */
+ page = alloc_page(GFP_NOFS | __GFP_ZERO);
+ if (IS_ERR(page))
+ return;
+ lock_page(page);
+
+ while (1) {
+ struct fsync_inode_entry *entry;
+
+ if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
+ goto out;
+
+ if (cp_ver != cpver_of_node(page))
+ goto out;
+
+ entry = get_fsync_inode(head, ino_of_node(page));
+ if (!entry)
+ goto next;
+
+ do_recover_data(sbi, entry->inode, page, blkaddr);
+
+ if (entry->blkaddr == blkaddr) {
+ iput(entry->inode);
+ list_del(&entry->list);
+ kmem_cache_free(fsync_entry_slab, entry);
+ }
+next:
+ /* check next segment */
+ blkaddr = next_blkaddr_of_node(page);
+ ClearPageUptodate(page);
+ }
+out:
+ unlock_page(page);
+ __free_pages(page, 0);
+
+ allocate_new_segments(sbi);
+}
+
+void recover_fsync_data(struct f2fs_sb_info *sbi)
+{
+ struct list_head inode_list;
+
+ fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
+ sizeof(struct fsync_inode_entry), NULL);
+ if (unlikely(!fsync_entry_slab))
+ return;
+
+ INIT_LIST_HEAD(&inode_list);
+
+ /* step #1: find fsynced inode numbers */
+ if (find_fsync_dnodes(sbi, &inode_list))
+ goto out;
+
+ if (list_empty(&inode_list))
+ goto out;
+
+ /* step #2: recover data */
+ sbi->por_doing = 1;
+ recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
+ sbi->por_doing = 0;
+ BUG_ON(!list_empty(&inode_list));
+out:
+ destroy_fsync_dnodes(sbi, &inode_list);
+ kmem_cache_destroy(fsync_entry_slab);
+ write_checkpoint(sbi, false, false);
+}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
new file mode 100644
index 00000000000..1b26e4ea101
--- /dev/null
+++ b/fs/f2fs/segment.c
@@ -0,0 +1,1791 @@
+/*
+ * fs/f2fs/segment.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+#include "f2fs.h"
+#include "segment.h"
+#include "node.h"
+
+static int need_to_flush(struct f2fs_sb_info *sbi)
+{
+ unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) *
+ sbi->segs_per_sec;
+ int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
+ >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
+ >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+
+ if (sbi->por_doing)
+ return 0;
+
+ if (free_sections(sbi) <= (node_secs + 2 * dent_secs +
+ reserved_sections(sbi)))
+ return 1;
+ return 0;
+}
+
+/*
+ * This function balances dirty node and dentry pages.
+ * In addition, it controls garbage collection.
+ */
+void f2fs_balance_fs(struct f2fs_sb_info *sbi)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .for_reclaim = 0,
+ };
+
+ if (sbi->por_doing)
+ return;
+
+ /*
+ * We should do checkpoint when there are so many dirty node pages
+ * with enough free segments. After then, we should do GC.
+ */
+ if (need_to_flush(sbi)) {
+ sync_dirty_dir_inodes(sbi);
+ sync_node_pages(sbi, 0, &wbc);
+ }
+
+ if (has_not_enough_free_secs(sbi)) {
+ mutex_lock(&sbi->gc_mutex);
+ f2fs_gc(sbi, 1);
+ }
+}
+
+static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
+ enum dirty_type dirty_type)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+
+ /* need not be added */
+ if (IS_CURSEG(sbi, segno))
+ return;
+
+ if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
+ dirty_i->nr_dirty[dirty_type]++;
+
+ if (dirty_type == DIRTY) {
+ struct seg_entry *sentry = get_seg_entry(sbi, segno);
+ dirty_type = sentry->type;
+ if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
+ dirty_i->nr_dirty[dirty_type]++;
+ }
+}
+
+static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
+ enum dirty_type dirty_type)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+
+ if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
+ dirty_i->nr_dirty[dirty_type]--;
+
+ if (dirty_type == DIRTY) {
+ struct seg_entry *sentry = get_seg_entry(sbi, segno);
+ dirty_type = sentry->type;
+ if (test_and_clear_bit(segno,
+ dirty_i->dirty_segmap[dirty_type]))
+ dirty_i->nr_dirty[dirty_type]--;
+ clear_bit(segno, dirty_i->victim_segmap[FG_GC]);
+ clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
+ }
+}
+
+/*
+ * Should not occur error such as -ENOMEM.
+ * Adding dirty entry into seglist is not critical operation.
+ * If a given segment is one of current working segments, it won't be added.
+ */
+void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned short valid_blocks;
+
+ if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
+ return;
+
+ mutex_lock(&dirty_i->seglist_lock);
+
+ valid_blocks = get_valid_blocks(sbi, segno, 0);
+
+ if (valid_blocks == 0) {
+ __locate_dirty_segment(sbi, segno, PRE);
+ __remove_dirty_segment(sbi, segno, DIRTY);
+ } else if (valid_blocks < sbi->blocks_per_seg) {
+ __locate_dirty_segment(sbi, segno, DIRTY);
+ } else {
+ /* Recovery routine with SSR needs this */
+ __remove_dirty_segment(sbi, segno, DIRTY);
+ }
+
+ mutex_unlock(&dirty_i->seglist_lock);
+ return;
+}
+
+/*
+ * Should call clear_prefree_segments after checkpoint is done.
+ */
+static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int segno, offset = 0;
+ unsigned int total_segs = TOTAL_SEGS(sbi);
+
+ mutex_lock(&dirty_i->seglist_lock);
+ while (1) {
+ segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
+ offset);
+ if (segno >= total_segs)
+ break;
+ __set_test_and_free(sbi, segno);
+ offset = segno + 1;
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+}
+
+void clear_prefree_segments(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int segno, offset = 0;
+ unsigned int total_segs = TOTAL_SEGS(sbi);
+
+ mutex_lock(&dirty_i->seglist_lock);
+ while (1) {
+ segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
+ offset);
+ if (segno >= total_segs)
+ break;
+
+ offset = segno + 1;
+ if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
+ dirty_i->nr_dirty[PRE]--;
+
+ /* Let's use trim */
+ if (test_opt(sbi, DISCARD))
+ blkdev_issue_discard(sbi->sb->s_bdev,
+ START_BLOCK(sbi, segno) <<
+ sbi->log_sectors_per_block,
+ 1 << (sbi->log_sectors_per_block +
+ sbi->log_blocks_per_seg),
+ GFP_NOFS, 0);
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+}
+
+static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
+ sit_i->dirty_sentries++;
+}
+
+static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
+ unsigned int segno, int modified)
+{
+ struct seg_entry *se = get_seg_entry(sbi, segno);
+ se->type = type;
+ if (modified)
+ __mark_sit_entry_dirty(sbi, segno);
+}
+
+static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+{
+ struct seg_entry *se;
+ unsigned int segno, offset;
+ long int new_vblocks;
+
+ segno = GET_SEGNO(sbi, blkaddr);
+
+ se = get_seg_entry(sbi, segno);
+ new_vblocks = se->valid_blocks + del;
+ offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
+
+ BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
+ (new_vblocks > sbi->blocks_per_seg)));
+
+ se->valid_blocks = new_vblocks;
+ se->mtime = get_mtime(sbi);
+ SIT_I(sbi)->max_mtime = se->mtime;
+
+ /* Update valid block bitmap */
+ if (del > 0) {
+ if (f2fs_set_bit(offset, se->cur_valid_map))
+ BUG();
+ } else {
+ if (!f2fs_clear_bit(offset, se->cur_valid_map))
+ BUG();
+ }
+ if (!f2fs_test_bit(offset, se->ckpt_valid_map))
+ se->ckpt_valid_blocks += del;
+
+ __mark_sit_entry_dirty(sbi, segno);
+
+ /* update total number of valid blocks to be written in ckpt area */
+ SIT_I(sbi)->written_valid_blocks += del;
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, segno)->valid_blocks += del;
+}
+
+static void refresh_sit_entry(struct f2fs_sb_info *sbi,
+ block_t old_blkaddr, block_t new_blkaddr)
+{
+ update_sit_entry(sbi, new_blkaddr, 1);
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ update_sit_entry(sbi, old_blkaddr, -1);
+}
+
+void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
+{
+ unsigned int segno = GET_SEGNO(sbi, addr);
+ struct sit_info *sit_i = SIT_I(sbi);
+
+ BUG_ON(addr == NULL_ADDR);
+ if (addr == NEW_ADDR)
+ return;
+
+ /* add it into sit main buffer */
+ mutex_lock(&sit_i->sentry_lock);
+
+ update_sit_entry(sbi, addr, -1);
+
+ /* add it into dirty seglist */
+ locate_dirty_segment(sbi, segno);
+
+ mutex_unlock(&sit_i->sentry_lock);
+}
+
+/*
+ * This function should be resided under the curseg_mutex lock
+ */
+static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
+ struct f2fs_summary *sum, unsigned short offset)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ void *addr = curseg->sum_blk;
+ addr += offset * sizeof(struct f2fs_summary);
+ memcpy(addr, sum, sizeof(struct f2fs_summary));
+ return;
+}
+
+/*
+ * Calculate the number of current summary pages for writing
+ */
+int npages_for_summary_flush(struct f2fs_sb_info *sbi)
+{
+ int total_size_bytes = 0;
+ int valid_sum_count = 0;
+ int i, sum_space;
+
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ if (sbi->ckpt->alloc_type[i] == SSR)
+ valid_sum_count += sbi->blocks_per_seg;
+ else
+ valid_sum_count += curseg_blkoff(sbi, i);
+ }
+
+ total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
+ + sizeof(struct nat_journal) + 2
+ + sizeof(struct sit_journal) + 2;
+ sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
+ if (total_size_bytes < sum_space)
+ return 1;
+ else if (total_size_bytes < 2 * sum_space)
+ return 2;
+ return 3;
+}
+
+/*
+ * Caller should put this summary page
+ */
+struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
+}
+
+static void write_sum_page(struct f2fs_sb_info *sbi,
+ struct f2fs_summary_block *sum_blk, block_t blk_addr)
+{
+ struct page *page = grab_meta_page(sbi, blk_addr);
+ void *kaddr = page_address(page);
+ memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+}
+
+static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
+ int ofs_unit, int type)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
+ unsigned int segno, next_segno, i;
+ int ofs = 0;
+
+ /*
+ * If there is not enough reserved sections,
+ * we should not reuse prefree segments.
+ */
+ if (has_not_enough_free_secs(sbi))
+ return NULL_SEGNO;
+
+ /*
+ * NODE page should not reuse prefree segment,
+ * since those information is used for SPOR.
+ */
+ if (IS_NODESEG(type))
+ return NULL_SEGNO;
+next:
+ segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs++);
+ ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit;
+ if (segno < TOTAL_SEGS(sbi)) {
+ /* skip intermediate segments in a section */
+ if (segno % ofs_unit)
+ goto next;
+
+ /* skip if whole section is not prefree */
+ next_segno = find_next_zero_bit(prefree_segmap,
+ TOTAL_SEGS(sbi), segno + 1);
+ if (next_segno - segno < ofs_unit)
+ goto next;
+
+ /* skip if whole section was not free at the last checkpoint */
+ for (i = 0; i < ofs_unit; i++)
+ if (get_seg_entry(sbi, segno)->ckpt_valid_blocks)
+ goto next;
+ return segno;
+ }
+ return NULL_SEGNO;
+}
+
+/*
+ * Find a new segment from the free segments bitmap to right order
+ * This function should be returned with success, otherwise BUG
+ */
+static void get_new_segment(struct f2fs_sb_info *sbi,
+ unsigned int *newseg, bool new_sec, int dir)
+{
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int total_secs = sbi->total_sections;
+ unsigned int segno, secno, zoneno;
+ unsigned int total_zones = sbi->total_sections / sbi->secs_per_zone;
+ unsigned int hint = *newseg / sbi->segs_per_sec;
+ unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
+ unsigned int left_start = hint;
+ bool init = true;
+ int go_left = 0;
+ int i;
+
+ write_lock(&free_i->segmap_lock);
+
+ if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
+ segno = find_next_zero_bit(free_i->free_segmap,
+ TOTAL_SEGS(sbi), *newseg + 1);
+ if (segno < TOTAL_SEGS(sbi))
+ goto got_it;
+ }
+find_other_zone:
+ secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint);
+ if (secno >= total_secs) {
+ if (dir == ALLOC_RIGHT) {
+ secno = find_next_zero_bit(free_i->free_secmap,
+ total_secs, 0);
+ BUG_ON(secno >= total_secs);
+ } else {
+ go_left = 1;
+ left_start = hint - 1;
+ }
+ }
+ if (go_left == 0)
+ goto skip_left;
+
+ while (test_bit(left_start, free_i->free_secmap)) {
+ if (left_start > 0) {
+ left_start--;
+ continue;
+ }
+ left_start = find_next_zero_bit(free_i->free_secmap,
+ total_secs, 0);
+ BUG_ON(left_start >= total_secs);
+ break;
+ }
+ secno = left_start;
+skip_left:
+ hint = secno;
+ segno = secno * sbi->segs_per_sec;
+ zoneno = secno / sbi->secs_per_zone;
+
+ /* give up on finding another zone */
+ if (!init)
+ goto got_it;
+ if (sbi->secs_per_zone == 1)
+ goto got_it;
+ if (zoneno == old_zoneno)
+ goto got_it;
+ if (dir == ALLOC_LEFT) {
+ if (!go_left && zoneno + 1 >= total_zones)
+ goto got_it;
+ if (go_left && zoneno == 0)
+ goto got_it;
+ }
+ for (i = 0; i < NR_CURSEG_TYPE; i++)
+ if (CURSEG_I(sbi, i)->zone == zoneno)
+ break;
+
+ if (i < NR_CURSEG_TYPE) {
+ /* zone is in user, try another */
+ if (go_left)
+ hint = zoneno * sbi->secs_per_zone - 1;
+ else if (zoneno + 1 >= total_zones)
+ hint = 0;
+ else
+ hint = (zoneno + 1) * sbi->secs_per_zone;
+ init = false;
+ goto find_other_zone;
+ }
+got_it:
+ /* set it as dirty segment in free segmap */
+ BUG_ON(test_bit(segno, free_i->free_segmap));
+ __set_inuse(sbi, segno);
+ *newseg = segno;
+ write_unlock(&free_i->segmap_lock);
+}
+
+static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ struct summary_footer *sum_footer;
+
+ curseg->segno = curseg->next_segno;
+ curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
+ curseg->next_blkoff = 0;
+ curseg->next_segno = NULL_SEGNO;
+
+ sum_footer = &(curseg->sum_blk->footer);
+ memset(sum_footer, 0, sizeof(struct summary_footer));
+ if (IS_DATASEG(type))
+ SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
+ if (IS_NODESEG(type))
+ SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
+ __set_sit_entry_type(sbi, type, curseg->segno, modified);
+}
+
+/*
+ * Allocate a current working segment.
+ * This function always allocates a free segment in LFS manner.
+ */
+static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ unsigned int segno = curseg->segno;
+ int dir = ALLOC_LEFT;
+
+ write_sum_page(sbi, curseg->sum_blk,
+ GET_SUM_BLOCK(sbi, curseg->segno));
+ if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
+ dir = ALLOC_RIGHT;
+
+ if (test_opt(sbi, NOHEAP))
+ dir = ALLOC_RIGHT;
+
+ get_new_segment(sbi, &segno, new_sec, dir);
+ curseg->next_segno = segno;
+ reset_curseg(sbi, type, 1);
+ curseg->alloc_type = LFS;
+}
+
+static void __next_free_blkoff(struct f2fs_sb_info *sbi,
+ struct curseg_info *seg, block_t start)
+{
+ struct seg_entry *se = get_seg_entry(sbi, seg->segno);
+ block_t ofs;
+ for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
+ if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
+ && !f2fs_test_bit(ofs, se->cur_valid_map))
+ break;
+ }
+ seg->next_blkoff = ofs;
+}
+
+/*
+ * If a segment is written by LFS manner, next block offset is just obtained
+ * by increasing the current block offset. However, if a segment is written by
+ * SSR manner, next block offset obtained by calling __next_free_blkoff
+ */
+static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
+ struct curseg_info *seg)
+{
+ if (seg->alloc_type == SSR)
+ __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
+ else
+ seg->next_blkoff++;
+}
+
+/*
+ * This function always allocates a used segment (from dirty seglist) by SSR
+ * manner, so it should recover the existing segment information of valid blocks
+ */
+static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ unsigned int new_segno = curseg->next_segno;
+ struct f2fs_summary_block *sum_node;
+ struct page *sum_page;
+
+ write_sum_page(sbi, curseg->sum_blk,
+ GET_SUM_BLOCK(sbi, curseg->segno));
+ __set_test_and_inuse(sbi, new_segno);
+
+ mutex_lock(&dirty_i->seglist_lock);
+ __remove_dirty_segment(sbi, new_segno, PRE);
+ __remove_dirty_segment(sbi, new_segno, DIRTY);
+ mutex_unlock(&dirty_i->seglist_lock);
+
+ reset_curseg(sbi, type, 1);
+ curseg->alloc_type = SSR;
+ __next_free_blkoff(sbi, curseg, 0);
+
+ if (reuse) {
+ sum_page = get_sum_page(sbi, new_segno);
+ sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
+ f2fs_put_page(sum_page, 1);
+ }
+}
+
+/*
+ * flush out current segment and replace it with new segment
+ * This function should be returned with success, otherwise BUG
+ */
+static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
+ int type, bool force)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ unsigned int ofs_unit;
+
+ if (force) {
+ new_curseg(sbi, type, true);
+ goto out;
+ }
+
+ ofs_unit = need_SSR(sbi) ? 1 : sbi->segs_per_sec;
+ curseg->next_segno = check_prefree_segments(sbi, ofs_unit, type);
+
+ if (curseg->next_segno != NULL_SEGNO)
+ change_curseg(sbi, type, false);
+ else if (type == CURSEG_WARM_NODE)
+ new_curseg(sbi, type, false);
+ else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
+ change_curseg(sbi, type, true);
+ else
+ new_curseg(sbi, type, false);
+out:
+ sbi->segment_count[curseg->alloc_type]++;
+}
+
+void allocate_new_segments(struct f2fs_sb_info *sbi)
+{
+ struct curseg_info *curseg;
+ unsigned int old_curseg;
+ int i;
+
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ curseg = CURSEG_I(sbi, i);
+ old_curseg = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+ locate_dirty_segment(sbi, old_curseg);
+ }
+}
+
+static const struct segment_allocation default_salloc_ops = {
+ .allocate_segment = allocate_segment_by_default,
+};
+
+static void f2fs_end_io_write(struct bio *bio, int err)
+{
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct bio_private *p = bio->bi_private;
+
+ do {
+ struct page *page = bvec->bv_page;
+
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+ if (!uptodate) {
+ SetPageError(page);
+ if (page->mapping)
+ set_bit(AS_EIO, &page->mapping->flags);
+ set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
+ set_page_dirty(page);
+ }
+ end_page_writeback(page);
+ dec_page_count(p->sbi, F2FS_WRITEBACK);
+ } while (bvec >= bio->bi_io_vec);
+
+ if (p->is_sync)
+ complete(p->wait);
+ kfree(p);
+ bio_put(bio);
+}
+
+struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
+{
+ struct bio *bio;
+ struct bio_private *priv;
+retry:
+ priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
+ if (!priv) {
+ cond_resched();
+ goto retry;
+ }
+
+ /* No failure on bio allocation */
+ bio = bio_alloc(GFP_NOIO, npages);
+ bio->bi_bdev = bdev;
+ bio->bi_private = priv;
+ return bio;
+}
+
+static void do_submit_bio(struct f2fs_sb_info *sbi,
+ enum page_type type, bool sync)
+{
+ int rw = sync ? WRITE_SYNC : WRITE;
+ enum page_type btype = type > META ? META : type;
+
+ if (type >= META_FLUSH)
+ rw = WRITE_FLUSH_FUA;
+
+ if (sbi->bio[btype]) {
+ struct bio_private *p = sbi->bio[btype]->bi_private;
+ p->sbi = sbi;
+ sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
+ if (type == META_FLUSH) {
+ DECLARE_COMPLETION_ONSTACK(wait);
+ p->is_sync = true;
+ p->wait = &wait;
+ submit_bio(rw, sbi->bio[btype]);
+ wait_for_completion(&wait);
+ } else {
+ p->is_sync = false;
+ submit_bio(rw, sbi->bio[btype]);
+ }
+ sbi->bio[btype] = NULL;
+ }
+}
+
+void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
+{
+ down_write(&sbi->bio_sem);
+ do_submit_bio(sbi, type, sync);
+ up_write(&sbi->bio_sem);
+}
+
+static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
+ block_t blk_addr, enum page_type type)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+
+ verify_block_addr(sbi, blk_addr);
+
+ down_write(&sbi->bio_sem);
+
+ inc_page_count(sbi, F2FS_WRITEBACK);
+
+ if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
+ do_submit_bio(sbi, type, false);
+alloc_new:
+ if (sbi->bio[type] == NULL) {
+ sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev));
+ sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+ /*
+ * The end_io will be assigned at the sumbission phase.
+ * Until then, let bio_add_page() merge consecutive IOs as much
+ * as possible.
+ */
+ }
+
+ if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
+ PAGE_CACHE_SIZE) {
+ do_submit_bio(sbi, type, false);
+ goto alloc_new;
+ }
+
+ sbi->last_block_in_bio[type] = blk_addr;
+
+ up_write(&sbi->bio_sem);
+}
+
+static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ if (curseg->next_blkoff < sbi->blocks_per_seg)
+ return true;
+ return false;
+}
+
+static int __get_segment_type_2(struct page *page, enum page_type p_type)
+{
+ if (p_type == DATA)
+ return CURSEG_HOT_DATA;
+ else
+ return CURSEG_HOT_NODE;
+}
+
+static int __get_segment_type_4(struct page *page, enum page_type p_type)
+{
+ if (p_type == DATA) {
+ struct inode *inode = page->mapping->host;
+
+ if (S_ISDIR(inode->i_mode))
+ return CURSEG_HOT_DATA;
+ else
+ return CURSEG_COLD_DATA;
+ } else {
+ if (IS_DNODE(page) && !is_cold_node(page))
+ return CURSEG_HOT_NODE;
+ else
+ return CURSEG_COLD_NODE;
+ }
+}
+
+static int __get_segment_type_6(struct page *page, enum page_type p_type)
+{
+ if (p_type == DATA) {
+ struct inode *inode = page->mapping->host;
+
+ if (S_ISDIR(inode->i_mode))
+ return CURSEG_HOT_DATA;
+ else if (is_cold_data(page) || is_cold_file(inode))
+ return CURSEG_COLD_DATA;
+ else
+ return CURSEG_WARM_DATA;
+ } else {
+ if (IS_DNODE(page))
+ return is_cold_node(page) ? CURSEG_WARM_NODE :
+ CURSEG_HOT_NODE;
+ else
+ return CURSEG_COLD_NODE;
+ }
+}
+
+static int __get_segment_type(struct page *page, enum page_type p_type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+ switch (sbi->active_logs) {
+ case 2:
+ return __get_segment_type_2(page, p_type);
+ case 4:
+ return __get_segment_type_4(page, p_type);
+ case 6:
+ return __get_segment_type_6(page, p_type);
+ default:
+ BUG();
+ }
+}
+
+static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
+ block_t old_blkaddr, block_t *new_blkaddr,
+ struct f2fs_summary *sum, enum page_type p_type)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ struct curseg_info *curseg;
+ unsigned int old_cursegno;
+ int type;
+
+ type = __get_segment_type(page, p_type);
+ curseg = CURSEG_I(sbi, type);
+
+ mutex_lock(&curseg->curseg_mutex);
+
+ *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
+ old_cursegno = curseg->segno;
+
+ /*
+ * __add_sum_entry should be resided under the curseg_mutex
+ * because, this function updates a summary entry in the
+ * current summary block.
+ */
+ __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+
+ mutex_lock(&sit_i->sentry_lock);
+ __refresh_next_blkoff(sbi, curseg);
+ sbi->block_count[curseg->alloc_type]++;
+
+ /*
+ * SIT information should be updated before segment allocation,
+ * since SSR needs latest valid block information.
+ */
+ refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
+
+ if (!__has_curseg_space(sbi, type))
+ sit_i->s_ops->allocate_segment(sbi, type, false);
+
+ locate_dirty_segment(sbi, old_cursegno);
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+ mutex_unlock(&sit_i->sentry_lock);
+
+ if (p_type == NODE)
+ fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
+
+ /* writeout dirty page into bdev */
+ submit_write_page(sbi, page, *new_blkaddr, p_type);
+
+ mutex_unlock(&curseg->curseg_mutex);
+}
+
+int write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+ struct writeback_control *wbc)
+{
+ if (wbc->for_reclaim)
+ return AOP_WRITEPAGE_ACTIVATE;
+
+ set_page_writeback(page);
+ submit_write_page(sbi, page, page->index, META);
+ return 0;
+}
+
+void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
+ unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
+{
+ struct f2fs_summary sum;
+ set_summary(&sum, nid, 0, 0);
+ do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
+}
+
+void write_data_page(struct inode *inode, struct page *page,
+ struct dnode_of_data *dn, block_t old_blkaddr,
+ block_t *new_blkaddr)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_summary sum;
+ struct node_info ni;
+
+ BUG_ON(old_blkaddr == NULL_ADDR);
+ get_node_info(sbi, dn->nid, &ni);
+ set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+
+ do_write_page(sbi, page, old_blkaddr,
+ new_blkaddr, &sum, DATA);
+}
+
+void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
+ block_t old_blk_addr)
+{
+ submit_write_page(sbi, page, old_blk_addr, DATA);
+}
+
+void recover_data_page(struct f2fs_sb_info *sbi,
+ struct page *page, struct f2fs_summary *sum,
+ block_t old_blkaddr, block_t new_blkaddr)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ struct curseg_info *curseg;
+ unsigned int segno, old_cursegno;
+ struct seg_entry *se;
+ int type;
+
+ segno = GET_SEGNO(sbi, new_blkaddr);
+ se = get_seg_entry(sbi, segno);
+ type = se->type;
+
+ if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
+ if (old_blkaddr == NULL_ADDR)
+ type = CURSEG_COLD_DATA;
+ else
+ type = CURSEG_WARM_DATA;
+ }
+ curseg = CURSEG_I(sbi, type);
+
+ mutex_lock(&curseg->curseg_mutex);
+ mutex_lock(&sit_i->sentry_lock);
+
+ old_cursegno = curseg->segno;
+
+ /* change the current segment */
+ if (segno != curseg->segno) {
+ curseg->next_segno = segno;
+ change_curseg(sbi, type, true);
+ }
+
+ curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
+ (sbi->blocks_per_seg - 1);
+ __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+
+ refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
+
+ locate_dirty_segment(sbi, old_cursegno);
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+
+ mutex_unlock(&sit_i->sentry_lock);
+ mutex_unlock(&curseg->curseg_mutex);
+}
+
+void rewrite_node_page(struct f2fs_sb_info *sbi,
+ struct page *page, struct f2fs_summary *sum,
+ block_t old_blkaddr, block_t new_blkaddr)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ int type = CURSEG_WARM_NODE;
+ struct curseg_info *curseg;
+ unsigned int segno, old_cursegno;
+ block_t next_blkaddr = next_blkaddr_of_node(page);
+ unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
+
+ curseg = CURSEG_I(sbi, type);
+
+ mutex_lock(&curseg->curseg_mutex);
+ mutex_lock(&sit_i->sentry_lock);
+
+ segno = GET_SEGNO(sbi, new_blkaddr);
+ old_cursegno = curseg->segno;
+
+ /* change the current segment */
+ if (segno != curseg->segno) {
+ curseg->next_segno = segno;
+ change_curseg(sbi, type, true);
+ }
+ curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
+ (sbi->blocks_per_seg - 1);
+ __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+
+ /* change the current log to the next block addr in advance */
+ if (next_segno != segno) {
+ curseg->next_segno = next_segno;
+ change_curseg(sbi, type, true);
+ }
+ curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
+ (sbi->blocks_per_seg - 1);
+
+ /* rewrite node page */
+ set_page_writeback(page);
+ submit_write_page(sbi, page, new_blkaddr, NODE);
+ f2fs_submit_bio(sbi, NODE, true);
+ refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
+
+ locate_dirty_segment(sbi, old_cursegno);
+ locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+
+ mutex_unlock(&sit_i->sentry_lock);
+ mutex_unlock(&curseg->curseg_mutex);
+}
+
+static int read_compacted_summaries(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct curseg_info *seg_i;
+ unsigned char *kaddr;
+ struct page *page;
+ block_t start;
+ int i, j, offset;
+
+ start = start_sum_block(sbi);
+
+ page = get_meta_page(sbi, start++);
+ kaddr = (unsigned char *)page_address(page);
+
+ /* Step 1: restore nat cache */
+ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
+
+ /* Step 2: restore sit cache */
+ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
+ memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
+ SUM_JOURNAL_SIZE);
+ offset = 2 * SUM_JOURNAL_SIZE;
+
+ /* Step 3: restore summary entries */
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ unsigned short blk_off;
+ unsigned int segno;
+
+ seg_i = CURSEG_I(sbi, i);
+ segno = le32_to_cpu(ckpt->cur_data_segno[i]);
+ blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
+ seg_i->next_segno = segno;
+ reset_curseg(sbi, i, 0);
+ seg_i->alloc_type = ckpt->alloc_type[i];
+ seg_i->next_blkoff = blk_off;
+
+ if (seg_i->alloc_type == SSR)
+ blk_off = sbi->blocks_per_seg;
+
+ for (j = 0; j < blk_off; j++) {
+ struct f2fs_summary *s;
+ s = (struct f2fs_summary *)(kaddr + offset);
+ seg_i->sum_blk->entries[j] = *s;
+ offset += SUMMARY_SIZE;
+ if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ SUM_FOOTER_SIZE)
+ continue;
+
+ f2fs_put_page(page, 1);
+ page = NULL;
+
+ page = get_meta_page(sbi, start++);
+ kaddr = (unsigned char *)page_address(page);
+ offset = 0;
+ }
+ }
+ f2fs_put_page(page, 1);
+ return 0;
+}
+
+static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct f2fs_summary_block *sum;
+ struct curseg_info *curseg;
+ struct page *new;
+ unsigned short blk_off;
+ unsigned int segno = 0;
+ block_t blk_addr = 0;
+
+ /* get segment number and block addr */
+ if (IS_DATASEG(type)) {
+ segno = le32_to_cpu(ckpt->cur_data_segno[type]);
+ blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
+ CURSEG_HOT_DATA]);
+ if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
+ blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
+ else
+ blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
+ } else {
+ segno = le32_to_cpu(ckpt->cur_node_segno[type -
+ CURSEG_HOT_NODE]);
+ blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
+ CURSEG_HOT_NODE]);
+ if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
+ blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
+ type - CURSEG_HOT_NODE);
+ else
+ blk_addr = GET_SUM_BLOCK(sbi, segno);
+ }
+
+ new = get_meta_page(sbi, blk_addr);
+ sum = (struct f2fs_summary_block *)page_address(new);
+
+ if (IS_NODESEG(type)) {
+ if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
+ struct f2fs_summary *ns = &sum->entries[0];
+ int i;
+ for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
+ ns->version = 0;
+ ns->ofs_in_node = 0;
+ }
+ } else {
+ if (restore_node_summary(sbi, segno, sum)) {
+ f2fs_put_page(new, 1);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* set uncompleted segment to curseg */
+ curseg = CURSEG_I(sbi, type);
+ mutex_lock(&curseg->curseg_mutex);
+ memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
+ curseg->next_segno = segno;
+ reset_curseg(sbi, type, 0);
+ curseg->alloc_type = ckpt->alloc_type[type];
+ curseg->next_blkoff = blk_off;
+ mutex_unlock(&curseg->curseg_mutex);
+ f2fs_put_page(new, 1);
+ return 0;
+}
+
+static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
+{
+ int type = CURSEG_HOT_DATA;
+
+ if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
+ /* restore for compacted data summary */
+ if (read_compacted_summaries(sbi))
+ return -EINVAL;
+ type = CURSEG_HOT_NODE;
+ }
+
+ for (; type <= CURSEG_COLD_NODE; type++)
+ if (read_normal_summaries(sbi, type))
+ return -EINVAL;
+ return 0;
+}
+
+static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ struct page *page;
+ unsigned char *kaddr;
+ struct f2fs_summary *summary;
+ struct curseg_info *seg_i;
+ int written_size = 0;
+ int i, j;
+
+ page = grab_meta_page(sbi, blkaddr++);
+ kaddr = (unsigned char *)page_address(page);
+
+ /* Step 1: write nat cache */
+ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
+ written_size += SUM_JOURNAL_SIZE;
+
+ /* Step 2: write sit cache */
+ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
+ memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
+ SUM_JOURNAL_SIZE);
+ written_size += SUM_JOURNAL_SIZE;
+
+ set_page_dirty(page);
+
+ /* Step 3: write summary entries */
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ unsigned short blkoff;
+ seg_i = CURSEG_I(sbi, i);
+ if (sbi->ckpt->alloc_type[i] == SSR)
+ blkoff = sbi->blocks_per_seg;
+ else
+ blkoff = curseg_blkoff(sbi, i);
+
+ for (j = 0; j < blkoff; j++) {
+ if (!page) {
+ page = grab_meta_page(sbi, blkaddr++);
+ kaddr = (unsigned char *)page_address(page);
+ written_size = 0;
+ }
+ summary = (struct f2fs_summary *)(kaddr + written_size);
+ *summary = seg_i->sum_blk->entries[j];
+ written_size += SUMMARY_SIZE;
+ set_page_dirty(page);
+
+ if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ SUM_FOOTER_SIZE)
+ continue;
+
+ f2fs_put_page(page, 1);
+ page = NULL;
+ }
+ }
+ if (page)
+ f2fs_put_page(page, 1);
+}
+
+static void write_normal_summaries(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+{
+ int i, end;
+ if (IS_DATASEG(type))
+ end = type + NR_CURSEG_DATA_TYPE;
+ else
+ end = type + NR_CURSEG_NODE_TYPE;
+
+ for (i = type; i < end; i++) {
+ struct curseg_info *sum = CURSEG_I(sbi, i);
+ mutex_lock(&sum->curseg_mutex);
+ write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
+ mutex_unlock(&sum->curseg_mutex);
+ }
+}
+
+void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
+{
+ if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
+ write_compacted_summaries(sbi, start_blk);
+ else
+ write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
+}
+
+void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
+{
+ if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
+ write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
+ return;
+}
+
+int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
+ unsigned int val, int alloc)
+{
+ int i;
+
+ if (type == NAT_JOURNAL) {
+ for (i = 0; i < nats_in_cursum(sum); i++) {
+ if (le32_to_cpu(nid_in_journal(sum, i)) == val)
+ return i;
+ }
+ if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
+ return update_nats_in_cursum(sum, 1);
+ } else if (type == SIT_JOURNAL) {
+ for (i = 0; i < sits_in_cursum(sum); i++)
+ if (le32_to_cpu(segno_in_journal(sum, i)) == val)
+ return i;
+ if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
+ return update_sits_in_cursum(sum, 1);
+ }
+ return -1;
+}
+
+static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
+ block_t blk_addr = sit_i->sit_base_addr + offset;
+
+ check_seg_range(sbi, segno);
+
+ /* calculate sit block address */
+ if (f2fs_test_bit(offset, sit_i->sit_bitmap))
+ blk_addr += sit_i->sit_blocks;
+
+ return get_meta_page(sbi, blk_addr);
+}
+
+static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
+ unsigned int start)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ struct page *src_page, *dst_page;
+ pgoff_t src_off, dst_off;
+ void *src_addr, *dst_addr;
+
+ src_off = current_sit_addr(sbi, start);
+ dst_off = next_sit_addr(sbi, src_off);
+
+ /* get current sit block page without lock */
+ src_page = get_meta_page(sbi, src_off);
+ dst_page = grab_meta_page(sbi, dst_off);
+ BUG_ON(PageDirty(src_page));
+
+ src_addr = page_address(src_page);
+ dst_addr = page_address(dst_page);
+ memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+
+ set_page_dirty(dst_page);
+ f2fs_put_page(src_page, 1);
+
+ set_to_next_sit(sit_i, start);
+
+ return dst_page;
+}
+
+static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
+ struct f2fs_summary_block *sum = curseg->sum_blk;
+ int i;
+
+ /*
+ * If the journal area in the current summary is full of sit entries,
+ * all the sit entries will be flushed. Otherwise the sit entries
+ * are not able to replace with newly hot sit entries.
+ */
+ if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
+ for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
+ unsigned int segno;
+ segno = le32_to_cpu(segno_in_journal(sum, i));
+ __mark_sit_entry_dirty(sbi, segno);
+ }
+ update_sits_in_cursum(sum, -sits_in_cursum(sum));
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * CP calls this function, which flushes SIT entries including sit_journal,
+ * and moves prefree segs to free segs.
+ */
+void flush_sit_entries(struct f2fs_sb_info *sbi)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
+ struct f2fs_summary_block *sum = curseg->sum_blk;
+ unsigned long nsegs = TOTAL_SEGS(sbi);
+ struct page *page = NULL;
+ struct f2fs_sit_block *raw_sit = NULL;
+ unsigned int start = 0, end = 0;
+ unsigned int segno = -1;
+ bool flushed;
+
+ mutex_lock(&curseg->curseg_mutex);
+ mutex_lock(&sit_i->sentry_lock);
+
+ /*
+ * "flushed" indicates whether sit entries in journal are flushed
+ * to the SIT area or not.
+ */
+ flushed = flush_sits_in_journal(sbi);
+
+ while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
+ struct seg_entry *se = get_seg_entry(sbi, segno);
+ int sit_offset, offset;
+
+ sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
+
+ if (flushed)
+ goto to_sit_page;
+
+ offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
+ if (offset >= 0) {
+ segno_in_journal(sum, offset) = cpu_to_le32(segno);
+ seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
+ goto flush_done;
+ }
+to_sit_page:
+ if (!page || (start > segno) || (segno > end)) {
+ if (page) {
+ f2fs_put_page(page, 1);
+ page = NULL;
+ }
+
+ start = START_SEGNO(sit_i, segno);
+ end = start + SIT_ENTRY_PER_BLOCK - 1;
+
+ /* read sit block that will be updated */
+ page = get_next_sit_page(sbi, start);
+ raw_sit = page_address(page);
+ }
+
+ /* udpate entry in SIT block */
+ seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
+flush_done:
+ __clear_bit(segno, bitmap);
+ sit_i->dirty_sentries--;
+ }
+ mutex_unlock(&sit_i->sentry_lock);
+ mutex_unlock(&curseg->curseg_mutex);
+
+ /* writeout last modified SIT block */
+ f2fs_put_page(page, 1);
+
+ set_prefree_as_free_segments(sbi);
+}
+
+static int build_sit_info(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct sit_info *sit_i;
+ unsigned int sit_segs, start;
+ char *src_bitmap, *dst_bitmap;
+ unsigned int bitmap_size;
+
+ /* allocate memory for SIT information */
+ sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
+ if (!sit_i)
+ return -ENOMEM;
+
+ SM_I(sbi)->sit_info = sit_i;
+
+ sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
+ if (!sit_i->sentries)
+ return -ENOMEM;
+
+ bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!sit_i->dirty_sentries_bitmap)
+ return -ENOMEM;
+
+ for (start = 0; start < TOTAL_SEGS(sbi); start++) {
+ sit_i->sentries[start].cur_valid_map
+ = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ sit_i->sentries[start].ckpt_valid_map
+ = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->sentries[start].cur_valid_map
+ || !sit_i->sentries[start].ckpt_valid_map)
+ return -ENOMEM;
+ }
+
+ if (sbi->segs_per_sec > 1) {
+ sit_i->sec_entries = vzalloc(sbi->total_sections *
+ sizeof(struct sec_entry));
+ if (!sit_i->sec_entries)
+ return -ENOMEM;
+ }
+
+ /* get information related with SIT */
+ sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
+
+ /* setup SIT bitmap from ckeckpoint pack */
+ bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
+ src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
+
+ dst_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dst_bitmap)
+ return -ENOMEM;
+ memcpy(dst_bitmap, src_bitmap, bitmap_size);
+
+ /* init SIT information */
+ sit_i->s_ops = &default_salloc_ops;
+
+ sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
+ sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
+ sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
+ sit_i->sit_bitmap = dst_bitmap;
+ sit_i->bitmap_size = bitmap_size;
+ sit_i->dirty_sentries = 0;
+ sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
+ sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
+ sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
+ mutex_init(&sit_i->sentry_lock);
+ return 0;
+}
+
+static int build_free_segmap(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_sm_info *sm_info = SM_I(sbi);
+ struct free_segmap_info *free_i;
+ unsigned int bitmap_size, sec_bitmap_size;
+
+ /* allocate memory for free segmap information */
+ free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
+ if (!free_i)
+ return -ENOMEM;
+
+ SM_I(sbi)->free_info = free_i;
+
+ bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
+ if (!free_i->free_segmap)
+ return -ENOMEM;
+
+ sec_bitmap_size = f2fs_bitmap_size(sbi->total_sections);
+ free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
+ if (!free_i->free_secmap)
+ return -ENOMEM;
+
+ /* set all segments as dirty temporarily */
+ memset(free_i->free_segmap, 0xff, bitmap_size);
+ memset(free_i->free_secmap, 0xff, sec_bitmap_size);
+
+ /* init free segmap information */
+ free_i->start_segno =
+ (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
+ free_i->free_segments = 0;
+ free_i->free_sections = 0;
+ rwlock_init(&free_i->segmap_lock);
+ return 0;
+}
+
+static int build_curseg(struct f2fs_sb_info *sbi)
+{
+ struct curseg_info *array;
+ int i;
+
+ array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ SM_I(sbi)->curseg_array = array;
+
+ for (i = 0; i < NR_CURSEG_TYPE; i++) {
+ mutex_init(&array[i].curseg_mutex);
+ array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ if (!array[i].sum_blk)
+ return -ENOMEM;
+ array[i].segno = NULL_SEGNO;
+ array[i].next_blkoff = 0;
+ }
+ return restore_curseg_summaries(sbi);
+}
+
+static void build_sit_entries(struct f2fs_sb_info *sbi)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
+ struct f2fs_summary_block *sum = curseg->sum_blk;
+ unsigned int start;
+
+ for (start = 0; start < TOTAL_SEGS(sbi); start++) {
+ struct seg_entry *se = &sit_i->sentries[start];
+ struct f2fs_sit_block *sit_blk;
+ struct f2fs_sit_entry sit;
+ struct page *page;
+ int i;
+
+ mutex_lock(&curseg->curseg_mutex);
+ for (i = 0; i < sits_in_cursum(sum); i++) {
+ if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
+ sit = sit_in_journal(sum, i);
+ mutex_unlock(&curseg->curseg_mutex);
+ goto got_it;
+ }
+ }
+ mutex_unlock(&curseg->curseg_mutex);
+ page = get_current_sit_page(sbi, start);
+ sit_blk = (struct f2fs_sit_block *)page_address(page);
+ sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
+ f2fs_put_page(page, 1);
+got_it:
+ check_block_count(sbi, start, &sit);
+ seg_info_from_raw_sit(se, &sit);
+ if (sbi->segs_per_sec > 1) {
+ struct sec_entry *e = get_sec_entry(sbi, start);
+ e->valid_blocks += se->valid_blocks;
+ }
+ }
+}
+
+static void init_free_segmap(struct f2fs_sb_info *sbi)
+{
+ unsigned int start;
+ int type;
+
+ for (start = 0; start < TOTAL_SEGS(sbi); start++) {
+ struct seg_entry *sentry = get_seg_entry(sbi, start);
+ if (!sentry->valid_blocks)
+ __set_free(sbi, start);
+ }
+
+ /* set use the current segments */
+ for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
+ struct curseg_info *curseg_t = CURSEG_I(sbi, type);
+ __set_test_and_inuse(sbi, curseg_t->segno);
+ }
+}
+
+static void init_dirty_segmap(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int segno = 0, offset = 0;
+ unsigned short valid_blocks;
+
+ while (segno < TOTAL_SEGS(sbi)) {
+ /* find dirty segment based on free segmap */
+ segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
+ if (segno >= TOTAL_SEGS(sbi))
+ break;
+ offset = segno + 1;
+ valid_blocks = get_valid_blocks(sbi, segno, 0);
+ if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
+ continue;
+ mutex_lock(&dirty_i->seglist_lock);
+ __locate_dirty_segment(sbi, segno, DIRTY);
+ mutex_unlock(&dirty_i->seglist_lock);
+ }
+}
+
+static int init_victim_segmap(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+
+ dirty_i->victim_segmap[FG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->victim_segmap[BG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC])
+ return -ENOMEM;
+ return 0;
+}
+
+static int build_dirty_segmap(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i;
+ unsigned int bitmap_size, i;
+
+ /* allocate memory for dirty segments list information */
+ dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
+ if (!dirty_i)
+ return -ENOMEM;
+
+ SM_I(sbi)->dirty_info = dirty_i;
+ mutex_init(&dirty_i->seglist_lock);
+
+ bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+
+ for (i = 0; i < NR_DIRTY_TYPE; i++) {
+ dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->nr_dirty[i] = 0;
+ if (!dirty_i->dirty_segmap[i])
+ return -ENOMEM;
+ }
+
+ init_dirty_segmap(sbi);
+ return init_victim_segmap(sbi);
+}
+
+/*
+ * Update min, max modified time for cost-benefit GC algorithm
+ */
+static void init_min_max_mtime(struct f2fs_sb_info *sbi)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned int segno;
+
+ mutex_lock(&sit_i->sentry_lock);
+
+ sit_i->min_mtime = LLONG_MAX;
+
+ for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
+ unsigned int i;
+ unsigned long long mtime = 0;
+
+ for (i = 0; i < sbi->segs_per_sec; i++)
+ mtime += get_seg_entry(sbi, segno + i)->mtime;
+
+ mtime = div_u64(mtime, sbi->segs_per_sec);
+
+ if (sit_i->min_mtime > mtime)
+ sit_i->min_mtime = mtime;
+ }
+ sit_i->max_mtime = get_mtime(sbi);
+ mutex_unlock(&sit_i->sentry_lock);
+}
+
+int build_segment_manager(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct f2fs_sm_info *sm_info;
+ int err;
+
+ sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
+ if (!sm_info)
+ return -ENOMEM;
+
+ /* init sm info */
+ sbi->sm_info = sm_info;
+ INIT_LIST_HEAD(&sm_info->wblist_head);
+ spin_lock_init(&sm_info->wblist_lock);
+ sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
+ sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
+ sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
+ sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
+ sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+ sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
+ sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
+
+ err = build_sit_info(sbi);
+ if (err)
+ return err;
+ err = build_free_segmap(sbi);
+ if (err)
+ return err;
+ err = build_curseg(sbi);
+ if (err)
+ return err;
+
+ /* reinit free segmap based on SIT */
+ build_sit_entries(sbi);
+
+ init_free_segmap(sbi);
+ err = build_dirty_segmap(sbi);
+ if (err)
+ return err;
+
+ init_min_max_mtime(sbi);
+ return 0;
+}
+
+static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
+ enum dirty_type dirty_type)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+
+ mutex_lock(&dirty_i->seglist_lock);
+ kfree(dirty_i->dirty_segmap[dirty_type]);
+ dirty_i->nr_dirty[dirty_type] = 0;
+ mutex_unlock(&dirty_i->seglist_lock);
+}
+
+void reset_victim_segmap(struct f2fs_sb_info *sbi)
+{
+ unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+ memset(DIRTY_I(sbi)->victim_segmap[FG_GC], 0, bitmap_size);
+}
+
+static void destroy_victim_segmap(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+
+ kfree(dirty_i->victim_segmap[FG_GC]);
+ kfree(dirty_i->victim_segmap[BG_GC]);
+}
+
+static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ int i;
+
+ if (!dirty_i)
+ return;
+
+ /* discard pre-free/dirty segments list */
+ for (i = 0; i < NR_DIRTY_TYPE; i++)
+ discard_dirty_segmap(sbi, i);
+
+ destroy_victim_segmap(sbi);
+ SM_I(sbi)->dirty_info = NULL;
+ kfree(dirty_i);
+}
+
+static void destroy_curseg(struct f2fs_sb_info *sbi)
+{
+ struct curseg_info *array = SM_I(sbi)->curseg_array;
+ int i;
+
+ if (!array)
+ return;
+ SM_I(sbi)->curseg_array = NULL;
+ for (i = 0; i < NR_CURSEG_TYPE; i++)
+ kfree(array[i].sum_blk);
+ kfree(array);
+}
+
+static void destroy_free_segmap(struct f2fs_sb_info *sbi)
+{
+ struct free_segmap_info *free_i = SM_I(sbi)->free_info;
+ if (!free_i)
+ return;
+ SM_I(sbi)->free_info = NULL;
+ kfree(free_i->free_segmap);
+ kfree(free_i->free_secmap);
+ kfree(free_i);
+}
+
+static void destroy_sit_info(struct f2fs_sb_info *sbi)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned int start;
+
+ if (!sit_i)
+ return;
+
+ if (sit_i->sentries) {
+ for (start = 0; start < TOTAL_SEGS(sbi); start++) {
+ kfree(sit_i->sentries[start].cur_valid_map);
+ kfree(sit_i->sentries[start].ckpt_valid_map);
+ }
+ }
+ vfree(sit_i->sentries);
+ vfree(sit_i->sec_entries);
+ kfree(sit_i->dirty_sentries_bitmap);
+
+ SM_I(sbi)->sit_info = NULL;
+ kfree(sit_i->sit_bitmap);
+ kfree(sit_i);
+}
+
+void destroy_segment_manager(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_sm_info *sm_info = SM_I(sbi);
+ destroy_dirty_segmap(sbi);
+ destroy_curseg(sbi);
+ destroy_free_segmap(sbi);
+ destroy_sit_info(sbi);
+ sbi->sm_info = NULL;
+ kfree(sm_info);
+}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
new file mode 100644
index 00000000000..0948405af6f
--- /dev/null
+++ b/fs/f2fs/segment.h
@@ -0,0 +1,618 @@
+/*
+ * fs/f2fs/segment.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/* constant macro */
+#define NULL_SEGNO ((unsigned int)(~0))
+
+/* V: Logical segment # in volume, R: Relative segment # in main area */
+#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
+#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
+
+#define IS_DATASEG(t) \
+ ((t == CURSEG_HOT_DATA) || (t == CURSEG_COLD_DATA) || \
+ (t == CURSEG_WARM_DATA))
+
+#define IS_NODESEG(t) \
+ ((t == CURSEG_HOT_NODE) || (t == CURSEG_COLD_NODE) || \
+ (t == CURSEG_WARM_NODE))
+
+#define IS_CURSEG(sbi, segno) \
+ ((segno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
+ (segno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
+ (segno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
+ (segno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
+ (segno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
+ (segno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
+
+#define IS_CURSEC(sbi, secno) \
+ ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
+ sbi->segs_per_sec) || \
+ (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
+ sbi->segs_per_sec) || \
+ (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
+ sbi->segs_per_sec) || \
+ (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
+ sbi->segs_per_sec) || \
+ (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
+ sbi->segs_per_sec) || \
+ (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
+ sbi->segs_per_sec)) \
+
+#define START_BLOCK(sbi, segno) \
+ (SM_I(sbi)->seg0_blkaddr + \
+ (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
+#define NEXT_FREE_BLKADDR(sbi, curseg) \
+ (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
+
+#define MAIN_BASE_BLOCK(sbi) (SM_I(sbi)->main_blkaddr)
+
+#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) \
+ ((blk_addr) - SM_I(sbi)->seg0_blkaddr)
+#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
+ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
+#define GET_SEGNO(sbi, blk_addr) \
+ (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
+ NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
+ GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
+#define GET_SECNO(sbi, segno) \
+ ((segno) / sbi->segs_per_sec)
+#define GET_ZONENO_FROM_SEGNO(sbi, segno) \
+ ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
+
+#define GET_SUM_BLOCK(sbi, segno) \
+ ((sbi->sm_info->ssa_blkaddr) + segno)
+
+#define GET_SUM_TYPE(footer) ((footer)->entry_type)
+#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
+
+#define SIT_ENTRY_OFFSET(sit_i, segno) \
+ (segno % sit_i->sents_per_block)
+#define SIT_BLOCK_OFFSET(sit_i, segno) \
+ (segno / SIT_ENTRY_PER_BLOCK)
+#define START_SEGNO(sit_i, segno) \
+ (SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK)
+#define f2fs_bitmap_size(nr) \
+ (BITS_TO_LONGS(nr) * sizeof(unsigned long))
+#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
+
+#define SECTOR_FROM_BLOCK(sbi, blk_addr) \
+ (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
+
+/* during checkpoint, bio_private is used to synchronize the last bio */
+struct bio_private {
+ struct f2fs_sb_info *sbi;
+ bool is_sync;
+ void *wait;
+};
+
+/*
+ * indicate a block allocation direction: RIGHT and LEFT.
+ * RIGHT means allocating new sections towards the end of volume.
+ * LEFT means the opposite direction.
+ */
+enum {
+ ALLOC_RIGHT = 0,
+ ALLOC_LEFT
+};
+
+/*
+ * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
+ * LFS writes data sequentially with cleaning operations.
+ * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
+ */
+enum {
+ LFS = 0,
+ SSR
+};
+
+/*
+ * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
+ * GC_CB is based on cost-benefit algorithm.
+ * GC_GREEDY is based on greedy algorithm.
+ */
+enum {
+ GC_CB = 0,
+ GC_GREEDY
+};
+
+/*
+ * BG_GC means the background cleaning job.
+ * FG_GC means the on-demand cleaning job.
+ */
+enum {
+ BG_GC = 0,
+ FG_GC
+};
+
+/* for a function parameter to select a victim segment */
+struct victim_sel_policy {
+ int alloc_mode; /* LFS or SSR */
+ int gc_mode; /* GC_CB or GC_GREEDY */
+ unsigned long *dirty_segmap; /* dirty segment bitmap */
+ unsigned int offset; /* last scanned bitmap offset */
+ unsigned int ofs_unit; /* bitmap search unit */
+ unsigned int min_cost; /* minimum cost */
+ unsigned int min_segno; /* segment # having min. cost */
+};
+
+struct seg_entry {
+ unsigned short valid_blocks; /* # of valid blocks */
+ unsigned char *cur_valid_map; /* validity bitmap of blocks */
+ /*
+ * # of valid blocks and the validity bitmap stored in the the last
+ * checkpoint pack. This information is used by the SSR mode.
+ */
+ unsigned short ckpt_valid_blocks;
+ unsigned char *ckpt_valid_map;
+ unsigned char type; /* segment type like CURSEG_XXX_TYPE */
+ unsigned long long mtime; /* modification time of the segment */
+};
+
+struct sec_entry {
+ unsigned int valid_blocks; /* # of valid blocks in a section */
+};
+
+struct segment_allocation {
+ void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
+};
+
+struct sit_info {
+ const struct segment_allocation *s_ops;
+
+ block_t sit_base_addr; /* start block address of SIT area */
+ block_t sit_blocks; /* # of blocks used by SIT area */
+ block_t written_valid_blocks; /* # of valid blocks in main area */
+ char *sit_bitmap; /* SIT bitmap pointer */
+ unsigned int bitmap_size; /* SIT bitmap size */
+
+ unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
+ unsigned int dirty_sentries; /* # of dirty sentries */
+ unsigned int sents_per_block; /* # of SIT entries per block */
+ struct mutex sentry_lock; /* to protect SIT cache */
+ struct seg_entry *sentries; /* SIT segment-level cache */
+ struct sec_entry *sec_entries; /* SIT section-level cache */
+
+ /* for cost-benefit algorithm in cleaning procedure */
+ unsigned long long elapsed_time; /* elapsed time after mount */
+ unsigned long long mounted_time; /* mount time */
+ unsigned long long min_mtime; /* min. modification time */
+ unsigned long long max_mtime; /* max. modification time */
+};
+
+struct free_segmap_info {
+ unsigned int start_segno; /* start segment number logically */
+ unsigned int free_segments; /* # of free segments */
+ unsigned int free_sections; /* # of free sections */
+ rwlock_t segmap_lock; /* free segmap lock */
+ unsigned long *free_segmap; /* free segment bitmap */
+ unsigned long *free_secmap; /* free section bitmap */
+};
+
+/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
+enum dirty_type {
+ DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
+ DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
+ DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
+ DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
+ DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
+ DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
+ DIRTY, /* to count # of dirty segments */
+ PRE, /* to count # of entirely obsolete segments */
+ NR_DIRTY_TYPE
+};
+
+struct dirty_seglist_info {
+ const struct victim_selection *v_ops; /* victim selction operation */
+ unsigned long *dirty_segmap[NR_DIRTY_TYPE];
+ struct mutex seglist_lock; /* lock for segment bitmaps */
+ int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
+ unsigned long *victim_segmap[2]; /* BG_GC, FG_GC */
+};
+
+/* victim selection function for cleaning and SSR */
+struct victim_selection {
+ int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
+ int, int, char);
+};
+
+/* for active log information */
+struct curseg_info {
+ struct mutex curseg_mutex; /* lock for consistency */
+ struct f2fs_summary_block *sum_blk; /* cached summary block */
+ unsigned char alloc_type; /* current allocation type */
+ unsigned int segno; /* current segment number */
+ unsigned short next_blkoff; /* next block offset to write */
+ unsigned int zone; /* current zone number */
+ unsigned int next_segno; /* preallocated segment */
+};
+
+/*
+ * inline functions
+ */
+static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
+{
+ return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
+}
+
+static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ return &sit_i->sentries[segno];
+}
+
+static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
+}
+
+static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno, int section)
+{
+ /*
+ * In order to get # of valid blocks in a section instantly from many
+ * segments, f2fs manages two counting structures separately.
+ */
+ if (section > 1)
+ return get_sec_entry(sbi, segno)->valid_blocks;
+ else
+ return get_seg_entry(sbi, segno)->valid_blocks;
+}
+
+static inline void seg_info_from_raw_sit(struct seg_entry *se,
+ struct f2fs_sit_entry *rs)
+{
+ se->valid_blocks = GET_SIT_VBLOCKS(rs);
+ se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
+ memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
+ memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
+ se->type = GET_SIT_TYPE(rs);
+ se->mtime = le64_to_cpu(rs->mtime);
+}
+
+static inline void seg_info_to_raw_sit(struct seg_entry *se,
+ struct f2fs_sit_entry *rs)
+{
+ unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
+ se->valid_blocks;
+ rs->vblocks = cpu_to_le16(raw_vblocks);
+ memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
+ memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
+ se->ckpt_valid_blocks = se->valid_blocks;
+ rs->mtime = cpu_to_le64(se->mtime);
+}
+
+static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
+ unsigned int max, unsigned int segno)
+{
+ unsigned int ret;
+ read_lock(&free_i->segmap_lock);
+ ret = find_next_bit(free_i->free_segmap, max, segno);
+ read_unlock(&free_i->segmap_lock);
+ return ret;
+}
+
+static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int secno = segno / sbi->segs_per_sec;
+ unsigned int start_segno = secno * sbi->segs_per_sec;
+ unsigned int next;
+
+ write_lock(&free_i->segmap_lock);
+ clear_bit(segno, free_i->free_segmap);
+ free_i->free_segments++;
+
+ next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno);
+ if (next >= start_segno + sbi->segs_per_sec) {
+ clear_bit(secno, free_i->free_secmap);
+ free_i->free_sections++;
+ }
+ write_unlock(&free_i->segmap_lock);
+}
+
+static inline void __set_inuse(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int secno = segno / sbi->segs_per_sec;
+ set_bit(segno, free_i->free_segmap);
+ free_i->free_segments--;
+ if (!test_and_set_bit(secno, free_i->free_secmap))
+ free_i->free_sections--;
+}
+
+static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int secno = segno / sbi->segs_per_sec;
+ unsigned int start_segno = secno * sbi->segs_per_sec;
+ unsigned int next;
+
+ write_lock(&free_i->segmap_lock);
+ if (test_and_clear_bit(segno, free_i->free_segmap)) {
+ free_i->free_segments++;
+
+ next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi),
+ start_segno);
+ if (next >= start_segno + sbi->segs_per_sec) {
+ if (test_and_clear_bit(secno, free_i->free_secmap))
+ free_i->free_sections++;
+ }
+ }
+ write_unlock(&free_i->segmap_lock);
+}
+
+static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int secno = segno / sbi->segs_per_sec;
+ write_lock(&free_i->segmap_lock);
+ if (!test_and_set_bit(segno, free_i->free_segmap)) {
+ free_i->free_segments--;
+ if (!test_and_set_bit(secno, free_i->free_secmap))
+ free_i->free_sections--;
+ }
+ write_unlock(&free_i->segmap_lock);
+}
+
+static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
+ void *dst_addr)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
+}
+
+static inline block_t written_block_count(struct f2fs_sb_info *sbi)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ block_t vblocks;
+
+ mutex_lock(&sit_i->sentry_lock);
+ vblocks = sit_i->written_valid_blocks;
+ mutex_unlock(&sit_i->sentry_lock);
+
+ return vblocks;
+}
+
+static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
+{
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int free_segs;
+
+ read_lock(&free_i->segmap_lock);
+ free_segs = free_i->free_segments;
+ read_unlock(&free_i->segmap_lock);
+
+ return free_segs;
+}
+
+static inline int reserved_segments(struct f2fs_sb_info *sbi)
+{
+ return SM_I(sbi)->reserved_segments;
+}
+
+static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
+{
+ struct free_segmap_info *free_i = FREE_I(sbi);
+ unsigned int free_secs;
+
+ read_lock(&free_i->segmap_lock);
+ free_secs = free_i->free_sections;
+ read_unlock(&free_i->segmap_lock);
+
+ return free_secs;
+}
+
+static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
+{
+ return DIRTY_I(sbi)->nr_dirty[PRE];
+}
+
+static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
+{
+ return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
+ DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
+ DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
+ DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
+ DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
+ DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
+}
+
+static inline int overprovision_segments(struct f2fs_sb_info *sbi)
+{
+ return SM_I(sbi)->ovp_segments;
+}
+
+static inline int overprovision_sections(struct f2fs_sb_info *sbi)
+{
+ return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
+}
+
+static inline int reserved_sections(struct f2fs_sb_info *sbi)
+{
+ return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
+}
+
+static inline bool need_SSR(struct f2fs_sb_info *sbi)
+{
+ return (free_sections(sbi) < overprovision_sections(sbi));
+}
+
+static inline int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ return DIRTY_I(sbi)->v_ops->get_victim(sbi,
+ &(curseg)->next_segno, BG_GC, type, SSR);
+}
+
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi)
+{
+ return free_sections(sbi) <= reserved_sections(sbi);
+}
+
+static inline int utilization(struct f2fs_sb_info *sbi)
+{
+ return (long int)valid_user_blocks(sbi) * 100 /
+ (long int)sbi->user_block_count;
+}
+
+/*
+ * Sometimes f2fs may be better to drop out-of-place update policy.
+ * So, if fs utilization is over MIN_IPU_UTIL, then f2fs tries to write
+ * data in the original place likewise other traditional file systems.
+ * But, currently set 100 in percentage, which means it is disabled.
+ * See below need_inplace_update().
+ */
+#define MIN_IPU_UTIL 100
+static inline bool need_inplace_update(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ if (S_ISDIR(inode->i_mode))
+ return false;
+ if (need_SSR(sbi) && utilization(sbi) > MIN_IPU_UTIL)
+ return true;
+ return false;
+}
+
+static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
+ int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ return curseg->segno;
+}
+
+static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
+ int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ return curseg->alloc_type;
+}
+
+static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ return curseg->next_blkoff;
+}
+
+static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
+{
+ unsigned int end_segno = SM_I(sbi)->segment_count - 1;
+ BUG_ON(segno > end_segno);
+}
+
+/*
+ * This function is used for only debugging.
+ * NOTE: In future, we have to remove this function.
+ */
+static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
+{
+ struct f2fs_sm_info *sm_info = SM_I(sbi);
+ block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg;
+ block_t start_addr = sm_info->seg0_blkaddr;
+ block_t end_addr = start_addr + total_blks - 1;
+ BUG_ON(blk_addr < start_addr);
+ BUG_ON(blk_addr > end_addr);
+}
+
+/*
+ * Summary block is always treated as invalid block
+ */
+static inline void check_block_count(struct f2fs_sb_info *sbi,
+ int segno, struct f2fs_sit_entry *raw_sit)
+{
+ struct f2fs_sm_info *sm_info = SM_I(sbi);
+ unsigned int end_segno = sm_info->segment_count - 1;
+ int valid_blocks = 0;
+ int i;
+
+ /* check segment usage */
+ BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
+
+ /* check boundary of a given segment number */
+ BUG_ON(segno > end_segno);
+
+ /* check bitmap with valid block count */
+ for (i = 0; i < sbi->blocks_per_seg; i++)
+ if (f2fs_test_bit(i, raw_sit->valid_map))
+ valid_blocks++;
+ BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
+}
+
+static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
+ unsigned int start)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start);
+ block_t blk_addr = sit_i->sit_base_addr + offset;
+
+ check_seg_range(sbi, start);
+
+ /* calculate sit block address */
+ if (f2fs_test_bit(offset, sit_i->sit_bitmap))
+ blk_addr += sit_i->sit_blocks;
+
+ return blk_addr;
+}
+
+static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
+ pgoff_t block_addr)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ block_addr -= sit_i->sit_base_addr;
+ if (block_addr < sit_i->sit_blocks)
+ block_addr += sit_i->sit_blocks;
+ else
+ block_addr -= sit_i->sit_blocks;
+
+ return block_addr + sit_i->sit_base_addr;
+}
+
+static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
+{
+ unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start);
+
+ if (f2fs_test_bit(block_off, sit_i->sit_bitmap))
+ f2fs_clear_bit(block_off, sit_i->sit_bitmap);
+ else
+ f2fs_set_bit(block_off, sit_i->sit_bitmap);
+}
+
+static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
+{
+ struct sit_info *sit_i = SIT_I(sbi);
+ return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
+ sit_i->mounted_time;
+}
+
+static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
+ unsigned int ofs_in_node, unsigned char version)
+{
+ sum->nid = cpu_to_le32(nid);
+ sum->ofs_in_node = cpu_to_le16(ofs_in_node);
+ sum->version = version;
+}
+
+static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
+{
+ return __start_cp_addr(sbi) +
+ le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
+}
+
+static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
+{
+ return __start_cp_addr(sbi) +
+ le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
+ - (base + 1) + type;
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
new file mode 100644
index 00000000000..13867322cf5
--- /dev/null
+++ b/fs/f2fs/super.c
@@ -0,0 +1,657 @@
+/*
+ * fs/f2fs/super.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/statfs.h>
+#include <linux/proc_fs.h>
+#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
+#include <linux/kthread.h>
+#include <linux/parser.h>
+#include <linux/mount.h>
+#include <linux/seq_file.h>
+#include <linux/random.h>
+#include <linux/exportfs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include "xattr.h"
+
+static struct kmem_cache *f2fs_inode_cachep;
+
+enum {
+ Opt_gc_background_off,
+ Opt_disable_roll_forward,
+ Opt_discard,
+ Opt_noheap,
+ Opt_nouser_xattr,
+ Opt_noacl,
+ Opt_active_logs,
+ Opt_disable_ext_identify,
+ Opt_err,
+};
+
+static match_table_t f2fs_tokens = {
+ {Opt_gc_background_off, "background_gc_off"},
+ {Opt_disable_roll_forward, "disable_roll_forward"},
+ {Opt_discard, "discard"},
+ {Opt_noheap, "no_heap"},
+ {Opt_nouser_xattr, "nouser_xattr"},
+ {Opt_noacl, "noacl"},
+ {Opt_active_logs, "active_logs=%u"},
+ {Opt_disable_ext_identify, "disable_ext_identify"},
+ {Opt_err, NULL},
+};
+
+static void init_once(void *foo)
+{
+ struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
+
+ inode_init_once(&fi->vfs_inode);
+}
+
+static struct inode *f2fs_alloc_inode(struct super_block *sb)
+{
+ struct f2fs_inode_info *fi;
+
+ fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
+ if (!fi)
+ return NULL;
+
+ init_once((void *) fi);
+
+ /* Initilize f2fs-specific inode info */
+ fi->vfs_inode.i_version = 1;
+ atomic_set(&fi->dirty_dents, 0);
+ fi->i_current_depth = 1;
+ fi->i_advise = 0;
+ rwlock_init(&fi->ext.ext_lock);
+
+ set_inode_flag(fi, FI_NEW_INODE);
+
+ return &fi->vfs_inode;
+}
+
+static void f2fs_i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+ kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
+}
+
+static void f2fs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, f2fs_i_callback);
+}
+
+static void f2fs_put_super(struct super_block *sb)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ f2fs_destroy_stats(sbi);
+ stop_gc_thread(sbi);
+
+ write_checkpoint(sbi, false, true);
+
+ iput(sbi->node_inode);
+ iput(sbi->meta_inode);
+
+ /* destroy f2fs internal modules */
+ destroy_node_manager(sbi);
+ destroy_segment_manager(sbi);
+
+ kfree(sbi->ckpt);
+
+ sb->s_fs_info = NULL;
+ brelse(sbi->raw_super_buf);
+ kfree(sbi);
+}
+
+int f2fs_sync_fs(struct super_block *sb, int sync)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int ret = 0;
+
+ if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
+ return 0;
+
+ if (sync)
+ write_checkpoint(sbi, false, false);
+
+ return ret;
+}
+
+static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ block_t total_count, user_block_count, start_count, ovp_count;
+
+ total_count = le64_to_cpu(sbi->raw_super->block_count);
+ user_block_count = sbi->user_block_count;
+ start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
+ ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
+ buf->f_type = F2FS_SUPER_MAGIC;
+ buf->f_bsize = sbi->blocksize;
+
+ buf->f_blocks = total_count - start_count;
+ buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
+ buf->f_bavail = user_block_count - valid_user_blocks(sbi);
+
+ buf->f_files = valid_inode_count(sbi);
+ buf->f_ffree = sbi->total_node_count - valid_node_count(sbi);
+
+ buf->f_namelen = F2FS_MAX_NAME_LEN;
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+
+ return 0;
+}
+
+static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
+
+ if (test_opt(sbi, BG_GC))
+ seq_puts(seq, ",background_gc_on");
+ else
+ seq_puts(seq, ",background_gc_off");
+ if (test_opt(sbi, DISABLE_ROLL_FORWARD))
+ seq_puts(seq, ",disable_roll_forward");
+ if (test_opt(sbi, DISCARD))
+ seq_puts(seq, ",discard");
+ if (test_opt(sbi, NOHEAP))
+ seq_puts(seq, ",no_heap_alloc");
+#ifdef CONFIG_F2FS_FS_XATTR
+ if (test_opt(sbi, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+ else
+ seq_puts(seq, ",nouser_xattr");
+#endif
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+ if (test_opt(sbi, POSIX_ACL))
+ seq_puts(seq, ",acl");
+ else
+ seq_puts(seq, ",noacl");
+#endif
+ if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
+ seq_puts(seq, ",disable_ext_indentify");
+
+ seq_printf(seq, ",active_logs=%u", sbi->active_logs);
+
+ return 0;
+}
+
+static struct super_operations f2fs_sops = {
+ .alloc_inode = f2fs_alloc_inode,
+ .destroy_inode = f2fs_destroy_inode,
+ .write_inode = f2fs_write_inode,
+ .show_options = f2fs_show_options,
+ .evict_inode = f2fs_evict_inode,
+ .put_super = f2fs_put_super,
+ .sync_fs = f2fs_sync_fs,
+ .statfs = f2fs_statfs,
+};
+
+static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
+ u64 ino, u32 generation)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct inode *inode;
+
+ if (ino < F2FS_ROOT_INO(sbi))
+ return ERR_PTR(-ESTALE);
+
+ /*
+ * f2fs_iget isn't quite right if the inode is currently unallocated!
+ * However f2fs_iget currently does appropriate checks to handle stale
+ * inodes so everything is OK.
+ */
+ inode = f2fs_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
+ /* we didn't find the right inode.. */
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+ return inode;
+}
+
+static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ f2fs_nfs_get_inode);
+}
+
+static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ f2fs_nfs_get_inode);
+}
+
+static const struct export_operations f2fs_export_ops = {
+ .fh_to_dentry = f2fs_fh_to_dentry,
+ .fh_to_parent = f2fs_fh_to_parent,
+ .get_parent = f2fs_get_parent,
+};
+
+static int parse_options(struct f2fs_sb_info *sbi, char *options)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p;
+ int arg = 0;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+ if (!*p)
+ continue;
+ /*
+ * Initialize args struct so we know whether arg was
+ * found; some options take optional arguments.
+ */
+ args[0].to = args[0].from = NULL;
+ token = match_token(p, f2fs_tokens, args);
+
+ switch (token) {
+ case Opt_gc_background_off:
+ clear_opt(sbi, BG_GC);
+ break;
+ case Opt_disable_roll_forward:
+ set_opt(sbi, DISABLE_ROLL_FORWARD);
+ break;
+ case Opt_discard:
+ set_opt(sbi, DISCARD);
+ break;
+ case Opt_noheap:
+ set_opt(sbi, NOHEAP);
+ break;
+#ifdef CONFIG_F2FS_FS_XATTR
+ case Opt_nouser_xattr:
+ clear_opt(sbi, XATTR_USER);
+ break;
+#else
+ case Opt_nouser_xattr:
+ pr_info("nouser_xattr options not supported\n");
+ break;
+#endif
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+ case Opt_noacl:
+ clear_opt(sbi, POSIX_ACL);
+ break;
+#else
+ case Opt_noacl:
+ pr_info("noacl options not supported\n");
+ break;
+#endif
+ case Opt_active_logs:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (arg != 2 && arg != 4 && arg != 6)
+ return -EINVAL;
+ sbi->active_logs = arg;
+ break;
+ case Opt_disable_ext_identify:
+ set_opt(sbi, DISABLE_EXT_IDENTIFY);
+ break;
+ default:
+ pr_err("Unrecognized mount option \"%s\" or missing value\n",
+ p);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static loff_t max_file_size(unsigned bits)
+{
+ loff_t result = ADDRS_PER_INODE;
+ loff_t leaf_count = ADDRS_PER_BLOCK;
+
+ /* two direct node blocks */
+ result += (leaf_count * 2);
+
+ /* two indirect node blocks */
+ leaf_count *= NIDS_PER_BLOCK;
+ result += (leaf_count * 2);
+
+ /* one double indirect node block */
+ leaf_count *= NIDS_PER_BLOCK;
+ result += leaf_count;
+
+ result <<= bits;
+ return result;
+}
+
+static int sanity_check_raw_super(struct f2fs_super_block *raw_super)
+{
+ unsigned int blocksize;
+
+ if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic))
+ return 1;
+
+ /* Currently, support only 4KB block size */
+ blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
+ if (blocksize != PAGE_CACHE_SIZE)
+ return 1;
+ if (le32_to_cpu(raw_super->log_sectorsize) !=
+ F2FS_LOG_SECTOR_SIZE)
+ return 1;
+ if (le32_to_cpu(raw_super->log_sectors_per_block) !=
+ F2FS_LOG_SECTORS_PER_BLOCK)
+ return 1;
+ return 0;
+}
+
+static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
+ struct f2fs_checkpoint *ckpt)
+{
+ unsigned int total, fsmeta;
+
+ total = le32_to_cpu(raw_super->segment_count);
+ fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
+ fsmeta += le32_to_cpu(raw_super->segment_count_sit);
+ fsmeta += le32_to_cpu(raw_super->segment_count_nat);
+ fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
+ fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
+
+ if (fsmeta >= total)
+ return 1;
+ return 0;
+}
+
+static void init_sb_info(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = sbi->raw_super;
+ int i;
+
+ sbi->log_sectors_per_block =
+ le32_to_cpu(raw_super->log_sectors_per_block);
+ sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
+ sbi->blocksize = 1 << sbi->log_blocksize;
+ sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
+ sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
+ sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
+ sbi->total_sections = le32_to_cpu(raw_super->section_count);
+ sbi->total_node_count =
+ (le32_to_cpu(raw_super->segment_count_nat) / 2)
+ * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
+ sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
+ sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
+ sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
+
+ for (i = 0; i < NR_COUNT_TYPE; i++)
+ atomic_set(&sbi->nr_pages[i], 0);
+}
+
+static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct f2fs_sb_info *sbi;
+ struct f2fs_super_block *raw_super;
+ struct buffer_head *raw_super_buf;
+ struct inode *root;
+ long err = -EINVAL;
+ int i;
+
+ /* allocate memory for f2fs-specific super block info */
+ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ /* set a temporary block size */
+ if (!sb_set_blocksize(sb, F2FS_BLKSIZE))
+ goto free_sbi;
+
+ /* read f2fs raw super block */
+ raw_super_buf = sb_bread(sb, 0);
+ if (!raw_super_buf) {
+ err = -EIO;
+ goto free_sbi;
+ }
+ raw_super = (struct f2fs_super_block *)
+ ((char *)raw_super_buf->b_data + F2FS_SUPER_OFFSET);
+
+ /* init some FS parameters */
+ sbi->active_logs = NR_CURSEG_TYPE;
+
+ set_opt(sbi, BG_GC);
+
+#ifdef CONFIG_F2FS_FS_XATTR
+ set_opt(sbi, XATTR_USER);
+#endif
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+ set_opt(sbi, POSIX_ACL);
+#endif
+ /* parse mount options */
+ if (parse_options(sbi, (char *)data))
+ goto free_sb_buf;
+
+ /* sanity checking of raw super */
+ if (sanity_check_raw_super(raw_super))
+ goto free_sb_buf;
+
+ sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
+ sb->s_max_links = F2FS_LINK_MAX;
+ get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+
+ sb->s_op = &f2fs_sops;
+ sb->s_xattr = f2fs_xattr_handlers;
+ sb->s_export_op = &f2fs_export_ops;
+ sb->s_magic = F2FS_SUPER_MAGIC;
+ sb->s_fs_info = sbi;
+ sb->s_time_gran = 1;
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+ memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+
+ /* init f2fs-specific super block info */
+ sbi->sb = sb;
+ sbi->raw_super = raw_super;
+ sbi->raw_super_buf = raw_super_buf;
+ mutex_init(&sbi->gc_mutex);
+ mutex_init(&sbi->write_inode);
+ mutex_init(&sbi->writepages);
+ mutex_init(&sbi->cp_mutex);
+ for (i = 0; i < NR_LOCK_TYPE; i++)
+ mutex_init(&sbi->fs_lock[i]);
+ sbi->por_doing = 0;
+ spin_lock_init(&sbi->stat_lock);
+ init_rwsem(&sbi->bio_sem);
+ init_sb_info(sbi);
+
+ /* get an inode for meta space */
+ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
+ if (IS_ERR(sbi->meta_inode)) {
+ err = PTR_ERR(sbi->meta_inode);
+ goto free_sb_buf;
+ }
+
+ err = get_valid_checkpoint(sbi);
+ if (err)
+ goto free_meta_inode;
+
+ /* sanity checking of checkpoint */
+ err = -EINVAL;
+ if (sanity_check_ckpt(raw_super, sbi->ckpt))
+ goto free_cp;
+
+ sbi->total_valid_node_count =
+ le32_to_cpu(sbi->ckpt->valid_node_count);
+ sbi->total_valid_inode_count =
+ le32_to_cpu(sbi->ckpt->valid_inode_count);
+ sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
+ sbi->total_valid_block_count =
+ le64_to_cpu(sbi->ckpt->valid_block_count);
+ sbi->last_valid_block_count = sbi->total_valid_block_count;
+ sbi->alloc_valid_block_count = 0;
+ INIT_LIST_HEAD(&sbi->dir_inode_list);
+ spin_lock_init(&sbi->dir_inode_lock);
+
+ /* init super block */
+ if (!sb_set_blocksize(sb, sbi->blocksize))
+ goto free_cp;
+
+ init_orphan_info(sbi);
+
+ /* setup f2fs internal modules */
+ err = build_segment_manager(sbi);
+ if (err)
+ goto free_sm;
+ err = build_node_manager(sbi);
+ if (err)
+ goto free_nm;
+
+ build_gc_manager(sbi);
+
+ /* get an inode for node space */
+ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
+ if (IS_ERR(sbi->node_inode)) {
+ err = PTR_ERR(sbi->node_inode);
+ goto free_nm;
+ }
+
+ /* if there are nt orphan nodes free them */
+ err = -EINVAL;
+ if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
+ recover_orphan_inodes(sbi))
+ goto free_node_inode;
+
+ /* read root inode and dentry */
+ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto free_node_inode;
+ }
+ if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
+ goto free_root_inode;
+
+ sb->s_root = d_make_root(root); /* allocate root dentry */
+ if (!sb->s_root) {
+ err = -ENOMEM;
+ goto free_root_inode;
+ }
+
+ /* recover fsynced data */
+ if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
+ !test_opt(sbi, DISABLE_ROLL_FORWARD))
+ recover_fsync_data(sbi);
+
+ /* After POR, we can run background GC thread */
+ err = start_gc_thread(sbi);
+ if (err)
+ goto fail;
+
+ err = f2fs_build_stats(sbi);
+ if (err)
+ goto fail;
+
+ return 0;
+fail:
+ stop_gc_thread(sbi);
+free_root_inode:
+ dput(sb->s_root);
+ sb->s_root = NULL;
+free_node_inode:
+ iput(sbi->node_inode);
+free_nm:
+ destroy_node_manager(sbi);
+free_sm:
+ destroy_segment_manager(sbi);
+free_cp:
+ kfree(sbi->ckpt);
+free_meta_inode:
+ make_bad_inode(sbi->meta_inode);
+ iput(sbi->meta_inode);
+free_sb_buf:
+ brelse(raw_super_buf);
+free_sbi:
+ kfree(sbi);
+ return err;
+}
+
+static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
+}
+
+static struct file_system_type f2fs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "f2fs",
+ .mount = f2fs_mount,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+static int init_inodecache(void)
+{
+ f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
+ sizeof(struct f2fs_inode_info), NULL);
+ if (f2fs_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void destroy_inodecache(void)
+{
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(f2fs_inode_cachep);
+}
+
+static int __init init_f2fs_fs(void)
+{
+ int err;
+
+ err = init_inodecache();
+ if (err)
+ goto fail;
+ err = create_node_manager_caches();
+ if (err)
+ goto fail;
+ err = create_gc_caches();
+ if (err)
+ goto fail;
+ err = create_checkpoint_caches();
+ if (err)
+ goto fail;
+ return register_filesystem(&f2fs_fs_type);
+fail:
+ return err;
+}
+
+static void __exit exit_f2fs_fs(void)
+{
+ destroy_root_stats();
+ unregister_filesystem(&f2fs_fs_type);
+ destroy_checkpoint_caches();
+ destroy_gc_caches();
+ destroy_node_manager_caches();
+ destroy_inodecache();
+}
+
+module_init(init_f2fs_fs)
+module_exit(exit_f2fs_fs)
+
+MODULE_AUTHOR("Samsung Electronics's Praesto Team");
+MODULE_DESCRIPTION("Flash Friendly File System");
+MODULE_LICENSE("GPL");
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
new file mode 100644
index 00000000000..7d52e8dc0c5
--- /dev/null
+++ b/fs/f2fs/xattr.c
@@ -0,0 +1,440 @@
+/*
+ * fs/f2fs/xattr.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Portions of this code from linux/fs/ext2/xattr.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
+ *
+ * Fix by Harrison Xing <harrison@mountainviewdata.com>.
+ * Extended attributes for symlinks and special files added per
+ * suggestion of Luka Renko <luka.renko@hermes.si>.
+ * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
+ * Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/rwsem.h>
+#include <linux/f2fs_fs.h>
+#include "f2fs.h"
+#include "xattr.h"
+
+static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ int total_len, prefix_len = 0;
+ const char *prefix = NULL;
+
+ switch (type) {
+ case F2FS_XATTR_INDEX_USER:
+ if (!test_opt(sbi, XATTR_USER))
+ return -EOPNOTSUPP;
+ prefix = XATTR_USER_PREFIX;
+ prefix_len = XATTR_USER_PREFIX_LEN;
+ break;
+ case F2FS_XATTR_INDEX_TRUSTED:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ prefix = XATTR_TRUSTED_PREFIX;
+ prefix_len = XATTR_TRUSTED_PREFIX_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ total_len = prefix_len + name_len + 1;
+ if (list && total_len <= list_size) {
+ memcpy(list, prefix, prefix_len);
+ memcpy(list+prefix_len, name, name_len);
+ list[prefix_len + name_len] = '\0';
+ }
+ return total_len;
+}
+
+static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+
+ switch (type) {
+ case F2FS_XATTR_INDEX_USER:
+ if (!test_opt(sbi, XATTR_USER))
+ return -EOPNOTSUPP;
+ break;
+ case F2FS_XATTR_INDEX_TRUSTED:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (strcmp(name, "") == 0)
+ return -EINVAL;
+ return f2fs_getxattr(dentry->d_inode, type, name,
+ buffer, size);
+}
+
+static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+
+ switch (type) {
+ case F2FS_XATTR_INDEX_USER:
+ if (!test_opt(sbi, XATTR_USER))
+ return -EOPNOTSUPP;
+ break;
+ case F2FS_XATTR_INDEX_TRUSTED:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (strcmp(name, "") == 0)
+ return -EINVAL;
+
+ return f2fs_setxattr(dentry->d_inode, type, name, value, size);
+}
+
+static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
+{
+ const char *xname = F2FS_SYSTEM_ADVISE_PREFIX;
+ size_t size;
+
+ if (type != F2FS_XATTR_INDEX_ADVISE)
+ return 0;
+
+ size = strlen(xname) + 1;
+ if (list && size <= list_size)
+ memcpy(list, xname, size);
+ return size;
+}
+
+static int f2fs_xattr_advise_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+
+ *((char *)buffer) = F2FS_I(inode)->i_advise;
+ return sizeof(char);
+}
+
+static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (!inode_owner_or_capable(inode))
+ return -EPERM;
+ if (value == NULL)
+ return -EINVAL;
+
+ F2FS_I(inode)->i_advise |= *(char *)value;
+ return 0;
+}
+
+const struct xattr_handler f2fs_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .flags = F2FS_XATTR_INDEX_USER,
+ .list = f2fs_xattr_generic_list,
+ .get = f2fs_xattr_generic_get,
+ .set = f2fs_xattr_generic_set,
+};
+
+const struct xattr_handler f2fs_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .flags = F2FS_XATTR_INDEX_TRUSTED,
+ .list = f2fs_xattr_generic_list,
+ .get = f2fs_xattr_generic_get,
+ .set = f2fs_xattr_generic_set,
+};
+
+const struct xattr_handler f2fs_xattr_advise_handler = {
+ .prefix = F2FS_SYSTEM_ADVISE_PREFIX,
+ .flags = F2FS_XATTR_INDEX_ADVISE,
+ .list = f2fs_xattr_advise_list,
+ .get = f2fs_xattr_advise_get,
+ .set = f2fs_xattr_advise_set,
+};
+
+static const struct xattr_handler *f2fs_xattr_handler_map[] = {
+ [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+ [F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &f2fs_xattr_acl_access_handler,
+ [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler,
+#endif
+ [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
+ [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
+};
+
+const struct xattr_handler *f2fs_xattr_handlers[] = {
+ &f2fs_xattr_user_handler,
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+ &f2fs_xattr_acl_access_handler,
+ &f2fs_xattr_acl_default_handler,
+#endif
+ &f2fs_xattr_trusted_handler,
+ &f2fs_xattr_advise_handler,
+ NULL,
+};
+
+static inline const struct xattr_handler *f2fs_xattr_handler(int name_index)
+{
+ const struct xattr_handler *handler = NULL;
+
+ if (name_index > 0 && name_index < ARRAY_SIZE(f2fs_xattr_handler_map))
+ handler = f2fs_xattr_handler_map[name_index];
+ return handler;
+}
+
+int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
+ void *buffer, size_t buffer_size)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_xattr_entry *entry;
+ struct page *page;
+ void *base_addr;
+ int error = 0, found = 0;
+ int value_len, name_len;
+
+ if (name == NULL)
+ return -EINVAL;
+ name_len = strlen(name);
+
+ if (!fi->i_xattr_nid)
+ return -ENODATA;
+
+ page = get_node_page(sbi, fi->i_xattr_nid);
+ base_addr = page_address(page);
+
+ list_for_each_xattr(entry, base_addr) {
+ if (entry->e_name_index != name_index)
+ continue;
+ if (entry->e_name_len != name_len)
+ continue;
+ if (!memcmp(entry->e_name, name, name_len)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ error = -ENODATA;
+ goto cleanup;
+ }
+
+ value_len = le16_to_cpu(entry->e_value_size);
+
+ if (buffer && value_len > buffer_size) {
+ error = -ERANGE;
+ goto cleanup;
+ }
+
+ if (buffer) {
+ char *pval = entry->e_name + entry->e_name_len;
+ memcpy(buffer, pval, value_len);
+ }
+ error = value_len;
+
+cleanup:
+ f2fs_put_page(page, 1);
+ return error;
+}
+
+ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_xattr_entry *entry;
+ struct page *page;
+ void *base_addr;
+ int error = 0;
+ size_t rest = buffer_size;
+
+ if (!fi->i_xattr_nid)
+ return 0;
+
+ page = get_node_page(sbi, fi->i_xattr_nid);
+ base_addr = page_address(page);
+
+ list_for_each_xattr(entry, base_addr) {
+ const struct xattr_handler *handler =
+ f2fs_xattr_handler(entry->e_name_index);
+ size_t size;
+
+ if (!handler)
+ continue;
+
+ size = handler->list(dentry, buffer, rest, entry->e_name,
+ entry->e_name_len, handler->flags);
+ if (buffer && size > rest) {
+ error = -ERANGE;
+ goto cleanup;
+ }
+
+ if (buffer)
+ buffer += size;
+ rest -= size;
+ }
+ error = buffer_size - rest;
+cleanup:
+ f2fs_put_page(page, 1);
+ return error;
+}
+
+int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
+ const void *value, size_t value_len)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_xattr_header *header = NULL;
+ struct f2fs_xattr_entry *here, *last;
+ struct page *page;
+ void *base_addr;
+ int error, found, free, name_len, newsize;
+ char *pval;
+
+ if (name == NULL)
+ return -EINVAL;
+ name_len = strlen(name);
+
+ if (value == NULL)
+ value_len = 0;
+
+ if (name_len > 255 || value_len > MAX_VALUE_LEN)
+ return -ERANGE;
+
+ mutex_lock_op(sbi, NODE_NEW);
+ if (!fi->i_xattr_nid) {
+ /* Allocate new attribute block */
+ struct dnode_of_data dn;
+
+ if (!alloc_nid(sbi, &fi->i_xattr_nid)) {
+ mutex_unlock_op(sbi, NODE_NEW);
+ return -ENOSPC;
+ }
+ set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
+ mark_inode_dirty(inode);
+
+ page = new_node_page(&dn, XATTR_NODE_OFFSET);
+ if (IS_ERR(page)) {
+ alloc_nid_failed(sbi, fi->i_xattr_nid);
+ fi->i_xattr_nid = 0;
+ mutex_unlock_op(sbi, NODE_NEW);
+ return PTR_ERR(page);
+ }
+
+ alloc_nid_done(sbi, fi->i_xattr_nid);
+ base_addr = page_address(page);
+ header = XATTR_HDR(base_addr);
+ header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
+ header->h_refcount = cpu_to_le32(1);
+ } else {
+ /* The inode already has an extended attribute block. */
+ page = get_node_page(sbi, fi->i_xattr_nid);
+ if (IS_ERR(page)) {
+ mutex_unlock_op(sbi, NODE_NEW);
+ return PTR_ERR(page);
+ }
+
+ base_addr = page_address(page);
+ header = XATTR_HDR(base_addr);
+ }
+
+ if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
+ error = -EIO;
+ goto cleanup;
+ }
+
+ /* find entry with wanted name. */
+ found = 0;
+ list_for_each_xattr(here, base_addr) {
+ if (here->e_name_index != name_index)
+ continue;
+ if (here->e_name_len != name_len)
+ continue;
+ if (!memcmp(here->e_name, name, name_len)) {
+ found = 1;
+ break;
+ }
+ }
+
+ last = here;
+
+ while (!IS_XATTR_LAST_ENTRY(last))
+ last = XATTR_NEXT_ENTRY(last);
+
+ newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) +
+ name_len + value_len);
+
+ /* 1. Check space */
+ if (value) {
+ /* If value is NULL, it is remove operation.
+ * In case of update operation, we caculate free.
+ */
+ free = MIN_OFFSET - ((char *)last - (char *)header);
+ if (found)
+ free = free - ENTRY_SIZE(here);
+
+ if (free < newsize) {
+ error = -ENOSPC;
+ goto cleanup;
+ }
+ }
+
+ /* 2. Remove old entry */
+ if (found) {
+ /* If entry is found, remove old entry.
+ * If not found, remove operation is not needed.
+ */
+ struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here);
+ int oldsize = ENTRY_SIZE(here);
+
+ memmove(here, next, (char *)last - (char *)next);
+ last = (struct f2fs_xattr_entry *)((char *)last - oldsize);
+ memset(last, 0, oldsize);
+ }
+
+ /* 3. Write new entry */
+ if (value) {
+ /* Before we come here, old entry is removed.
+ * We just write new entry. */
+ memset(last, 0, newsize);
+ last->e_name_index = name_index;
+ last->e_name_len = name_len;
+ memcpy(last->e_name, name, name_len);
+ pval = last->e_name + name_len;
+ memcpy(pval, value, value_len);
+ last->e_value_size = cpu_to_le16(value_len);
+ }
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+
+ if (is_inode_flag_set(fi, FI_ACL_MODE)) {
+ inode->i_mode = fi->i_acl_mode;
+ inode->i_ctime = CURRENT_TIME;
+ clear_inode_flag(fi, FI_ACL_MODE);
+ }
+ f2fs_write_inode(inode, NULL);
+ mutex_unlock_op(sbi, NODE_NEW);
+
+ return 0;
+cleanup:
+ f2fs_put_page(page, 1);
+ mutex_unlock_op(sbi, NODE_NEW);
+ return error;
+}
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
new file mode 100644
index 00000000000..49c9558305e
--- /dev/null
+++ b/fs/f2fs/xattr.h
@@ -0,0 +1,145 @@
+/*
+ * fs/f2fs/xattr.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Portions of this code from linux/fs/ext2/xattr.h
+ *
+ * On-disk format of extended attributes for the ext2 filesystem.
+ *
+ * (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __F2FS_XATTR_H__
+#define __F2FS_XATTR_H__
+
+#include <linux/init.h>
+#include <linux/xattr.h>
+
+/* Magic value in attribute blocks */
+#define F2FS_XATTR_MAGIC 0xF2F52011
+
+/* Maximum number of references to one attribute block */
+#define F2FS_XATTR_REFCOUNT_MAX 1024
+
+/* Name indexes */
+#define F2FS_SYSTEM_ADVISE_PREFIX "system.advise"
+#define F2FS_XATTR_INDEX_USER 1
+#define F2FS_XATTR_INDEX_POSIX_ACL_ACCESS 2
+#define F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT 3
+#define F2FS_XATTR_INDEX_TRUSTED 4
+#define F2FS_XATTR_INDEX_LUSTRE 5
+#define F2FS_XATTR_INDEX_SECURITY 6
+#define F2FS_XATTR_INDEX_ADVISE 7
+
+struct f2fs_xattr_header {
+ __le32 h_magic; /* magic number for identification */
+ __le32 h_refcount; /* reference count */
+ __u32 h_reserved[4]; /* zero right now */
+};
+
+struct f2fs_xattr_entry {
+ __u8 e_name_index;
+ __u8 e_name_len;
+ __le16 e_value_size; /* size of attribute value */
+ char e_name[0]; /* attribute name */
+};
+
+#define XATTR_HDR(ptr) ((struct f2fs_xattr_header *)(ptr))
+#define XATTR_ENTRY(ptr) ((struct f2fs_xattr_entry *)(ptr))
+#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr)+1))
+#define XATTR_ROUND (3)
+
+#define XATTR_ALIGN(size) ((size + XATTR_ROUND) & ~XATTR_ROUND)
+
+#define ENTRY_SIZE(entry) (XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + \
+ entry->e_name_len + le16_to_cpu(entry->e_value_size)))
+
+#define XATTR_NEXT_ENTRY(entry) ((struct f2fs_xattr_entry *)((char *)(entry) +\
+ ENTRY_SIZE(entry)))
+
+#define IS_XATTR_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
+
+#define list_for_each_xattr(entry, addr) \
+ for (entry = XATTR_FIRST_ENTRY(addr);\
+ !IS_XATTR_LAST_ENTRY(entry);\
+ entry = XATTR_NEXT_ENTRY(entry))
+
+
+#define MIN_OFFSET XATTR_ALIGN(PAGE_SIZE - \
+ sizeof(struct node_footer) - \
+ sizeof(__u32))
+
+#define MAX_VALUE_LEN (MIN_OFFSET - sizeof(struct f2fs_xattr_header) - \
+ sizeof(struct f2fs_xattr_entry))
+
+/*
+ * On-disk structure of f2fs_xattr
+ * We use only 1 block for xattr.
+ *
+ * +--------------------+
+ * | f2fs_xattr_header |
+ * | |
+ * +--------------------+
+ * | f2fs_xattr_entry |
+ * | .e_name_index = 1 |
+ * | .e_name_len = 3 |
+ * | .e_value_size = 14 |
+ * | .e_name = "foo" |
+ * | "value_of_xattr" |<- value_offs = e_name + e_name_len
+ * +--------------------+
+ * | f2fs_xattr_entry |
+ * | .e_name_index = 4 |
+ * | .e_name = "bar" |
+ * +--------------------+
+ * | |
+ * | Free |
+ * | |
+ * +--------------------+<- MIN_OFFSET
+ * | node_footer |
+ * | (nid, ino, offset) |
+ * +--------------------+
+ *
+ **/
+
+#ifdef CONFIG_F2FS_FS_XATTR
+extern const struct xattr_handler f2fs_xattr_user_handler;
+extern const struct xattr_handler f2fs_xattr_trusted_handler;
+extern const struct xattr_handler f2fs_xattr_acl_access_handler;
+extern const struct xattr_handler f2fs_xattr_acl_default_handler;
+extern const struct xattr_handler f2fs_xattr_advise_handler;
+
+extern const struct xattr_handler *f2fs_xattr_handlers[];
+
+extern int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
+ const void *value, size_t value_len);
+extern int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
+ void *buffer, size_t buffer_size);
+extern ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
+ size_t buffer_size);
+
+#else
+
+#define f2fs_xattr_handlers NULL
+static inline int f2fs_setxattr(struct inode *inode, int name_index,
+ const char *name, const void *value, size_t value_len)
+{
+ return -EOPNOTSUPP;
+}
+static inline int f2fs_getxattr(struct inode *inode, int name_index,
+ const char *name, void *buffer, size_t buffer_size)
+{
+ return -EOPNOTSUPP;
+}
+static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
+ size_t buffer_size)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* __F2FS_XATTR_H__ */
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 2a182342442..58bf744dbf3 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -461,8 +461,7 @@ static int fat_parse_short(struct super_block *sb,
}
/*
- * Return values: negative -> error, 0 -> not found, positive -> found,
- * value is the total amount of slots, including the shortname entry.
+ * Return values: negative -> error/not found, 0 -> found.
*/
int fat_search_long(struct inode *inode, const unsigned char *name,
int name_len, struct fat_slot_info *sinfo)
@@ -1255,7 +1254,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
sinfo->nr_slots = nr_slots;
- /* First stage: search free direcotry entries */
+ /* First stage: search free directory entries */
free_slots = nr_bhs = 0;
bh = prev = NULL;
pos = 0;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 623f36f0423..12701a56775 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -29,6 +29,7 @@ struct fat_mount_options {
unsigned short fs_fmask;
unsigned short fs_dmask;
unsigned short codepage; /* Codepage for shortname conversions */
+ int time_offset; /* Offset of timestamps from UTC (in minutes) */
char *iocharset; /* Charset used for filename input/display */
unsigned short shortname; /* flags for shortname display/create rule */
unsigned char name_check; /* r = relaxed, n = normal, s = strict */
@@ -45,7 +46,7 @@ struct fat_mount_options {
flush:1, /* write things quickly */
nocase:1, /* Does this need case conversion? 0=need case conversion*/
usefree:1, /* Use free_clusters for FAT32 */
- tz_utc:1, /* Filesystem timestamps are in UTC */
+ tz_set:1, /* Filesystem timestamps' offset set */
rodir:1, /* allow ATTR_RO for directory */
discard:1, /* Issue discard requests on deletions */
nfs:1; /* Do extra work needed for NFS export */
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5bafaad0053..f8f491677a4 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -26,6 +26,7 @@
#include <linux/writeback.h>
#include <linux/log2.h>
#include <linux/hash.h>
+#include <linux/blkdev.h>
#include <asm/unaligned.h>
#include "fat.h"
@@ -725,7 +726,8 @@ static int fat_show_options(struct seq_file *m, struct dentry *root)
if (opts->allow_utime)
seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
if (sbi->nls_disk)
- seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
+ /* strip "cp" prefix from displayed option */
+ seq_printf(m, ",codepage=%s", &sbi->nls_disk->charset[2]);
if (isvfat) {
if (sbi->nls_io)
seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
@@ -777,8 +779,12 @@ static int fat_show_options(struct seq_file *m, struct dentry *root)
}
if (opts->flush)
seq_puts(m, ",flush");
- if (opts->tz_utc)
- seq_puts(m, ",tz=UTC");
+ if (opts->tz_set) {
+ if (opts->time_offset)
+ seq_printf(m, ",time_offset=%d", opts->time_offset);
+ else
+ seq_puts(m, ",tz=UTC");
+ }
if (opts->errors == FAT_ERRORS_CONT)
seq_puts(m, ",errors=continue");
else if (opts->errors == FAT_ERRORS_PANIC)
@@ -800,7 +806,8 @@ enum {
Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
Opt_obsolete, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
- Opt_err_panic, Opt_err_ro, Opt_discard, Opt_nfs, Opt_err,
+ Opt_err_panic, Opt_err_ro, Opt_discard, Opt_nfs, Opt_time_offset,
+ Opt_err,
};
static const match_table_t fat_tokens = {
@@ -825,6 +832,7 @@ static const match_table_t fat_tokens = {
{Opt_immutable, "sys_immutable"},
{Opt_flush, "flush"},
{Opt_tz_utc, "tz=UTC"},
+ {Opt_time_offset, "time_offset=%d"},
{Opt_err_cont, "errors=continue"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
@@ -909,7 +917,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
opts->utf8 = opts->unicode_xlate = 0;
opts->numtail = 1;
opts->usefree = opts->nocase = 0;
- opts->tz_utc = 0;
+ opts->tz_set = 0;
opts->nfs = 0;
opts->errors = FAT_ERRORS_RO;
*debug = 0;
@@ -965,48 +973,57 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
break;
case Opt_uid:
if (match_int(&args[0], &option))
- return 0;
+ return -EINVAL;
opts->fs_uid = make_kuid(current_user_ns(), option);
if (!uid_valid(opts->fs_uid))
- return 0;
+ return -EINVAL;
break;
case Opt_gid:
if (match_int(&args[0], &option))
- return 0;
+ return -EINVAL;
opts->fs_gid = make_kgid(current_user_ns(), option);
if (!gid_valid(opts->fs_gid))
- return 0;
+ return -EINVAL;
break;
case Opt_umask:
if (match_octal(&args[0], &option))
- return 0;
+ return -EINVAL;
opts->fs_fmask = opts->fs_dmask = option;
break;
case Opt_dmask:
if (match_octal(&args[0], &option))
- return 0;
+ return -EINVAL;
opts->fs_dmask = option;
break;
case Opt_fmask:
if (match_octal(&args[0], &option))
- return 0;
+ return -EINVAL;
opts->fs_fmask = option;
break;
case Opt_allow_utime:
if (match_octal(&args[0], &option))
- return 0;
+ return -EINVAL;
opts->allow_utime = option & (S_IWGRP | S_IWOTH);
break;
case Opt_codepage:
if (match_int(&args[0], &option))
- return 0;
+ return -EINVAL;
opts->codepage = option;
break;
case Opt_flush:
opts->flush = 1;
break;
+ case Opt_time_offset:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ if (option < -12 * 60 || option > 12 * 60)
+ return -EINVAL;
+ opts->tz_set = 1;
+ opts->time_offset = option;
+ break;
case Opt_tz_utc:
- opts->tz_utc = 1;
+ opts->tz_set = 1;
+ opts->time_offset = 0;
break;
case Opt_err_cont:
opts->errors = FAT_ERRORS_CONT;
@@ -1327,7 +1344,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sbi->dir_entries = get_unaligned_le16(&b->dir_entries);
if (sbi->dir_entries & (sbi->dir_per_block - 1)) {
if (!silent)
- fat_msg(sb, KERN_ERR, "bogus directroy-entries per block"
+ fat_msg(sb, KERN_ERR, "bogus directory-entries per block"
" (%u)", sbi->dir_entries);
brelse(bh);
goto out_invalid;
@@ -1431,6 +1448,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
goto out_fail;
}
+ if (sbi->options.discard) {
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
+ if (!blk_queue_discard(q))
+ fat_msg(sb, KERN_WARNING,
+ "mounting with \"discard\" option, but "
+ "the device does not support discard");
+ }
+
return 0;
out_invalid:
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 6d93360ca0c..359d307b550 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -135,6 +135,10 @@ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
}
if (ret < 0)
return ret;
+ /*
+ * FIXME:Although we can add this cache, fat_cache_add() is
+ * assuming to be called after linear search with fat_cache_id.
+ */
// fat_cache_add(inode, new_fclus, new_dclus);
} else {
MSDOS_I(inode)->i_start = new_dclus;
@@ -212,8 +216,10 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
+ days_in_year[month] + day
+ DAYS_DELTA) * SECS_PER_DAY;
- if (!sbi->options.tz_utc)
+ if (!sbi->options.tz_set)
second += sys_tz.tz_minuteswest * SECS_PER_MIN;
+ else
+ second -= sbi->options.time_offset * SECS_PER_MIN;
if (time_cs) {
ts->tv_sec = second + (time_cs / 100);
@@ -229,8 +235,9 @@ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
- time_to_tm(ts->tv_sec, sbi->options.tz_utc ? 0 :
- -sys_tz.tz_minuteswest * 60, &tm);
+ time_to_tm(ts->tv_sec,
+ (sbi->options.tz_set ? sbi->options.time_offset :
+ -sys_tz.tz_minuteswest) * SECS_PER_MIN, &tm);
/* FAT can only support year between 1980 to 2107 */
if (tm.tm_year < 1980 - 1900) {
diff --git a/fs/fhandle.c b/fs/fhandle.c
index f775bfdd6e4..999ff5c3cab 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -22,7 +22,7 @@ static long do_sys_name_to_handle(struct path *path,
struct file_handle *handle = NULL;
/*
- * We need t make sure wether the file system
+ * We need to make sure whether the file system
* support decoding of the file handle
*/
if (!path->dentry->d_sb->s_export_op ||
@@ -40,7 +40,7 @@ static long do_sys_name_to_handle(struct path *path,
if (!handle)
return -ENOMEM;
- /* convert handle size to multiple of sizeof(u32) */
+ /* convert handle size to multiple of sizeof(u32) */
handle_dwords = f_handle.handle_bytes >> 2;
/* we ask for a non connected handle */
@@ -52,7 +52,7 @@ static long do_sys_name_to_handle(struct path *path,
handle_bytes = handle_dwords * sizeof(u32);
handle->handle_bytes = handle_bytes;
if ((handle->handle_bytes > f_handle.handle_bytes) ||
- (retval == 255) || (retval == -ENOSPC)) {
+ (retval == FILEID_INVALID) || (retval == -ENOSPC)) {
/* As per old exportfs_encode_fh documentation
* we could return ENOSPC to indicate overflow
* But file system returned 255 always. So handle
diff --git a/fs/file_table.c b/fs/file_table.c
index a72bf9ddd0d..de9e9653d61 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -458,8 +458,8 @@ void mark_files_ro(struct super_block *sb)
spin_unlock(&f->f_lock);
if (file_check_writeable(f) != 0)
continue;
+ __mnt_drop_write(f->f_path.mnt);
file_release_write(f);
- mnt_drop_write_file(f);
} while_file_list_for_each_entry;
lg_global_unlock(&files_lglock);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 3e3422f7f0a..310972b72a6 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1034,7 +1034,7 @@ int bdi_writeback_thread(void *data)
while (!kthread_freezable_should_stop(NULL)) {
/*
* Remove own delayed wake-up timer, since we are already awake
- * and we'll take care of the preriodic write-back.
+ * and we'll take care of the periodic write-back.
*/
del_timer(&wb->wakeup_timer);
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index 6a3c48abd67..b52aed1dca9 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -314,10 +314,10 @@ EXPORT_SYMBOL(fscache_add_cache);
*/
void fscache_io_error(struct fscache_cache *cache)
{
- set_bit(FSCACHE_IOERROR, &cache->flags);
-
- printk(KERN_ERR "FS-Cache: Cache %s stopped due to I/O error\n",
- cache->ops->name);
+ if (!test_and_set_bit(FSCACHE_IOERROR, &cache->flags))
+ printk(KERN_ERR "FS-Cache:"
+ " Cache '%s' stopped due to I/O error\n",
+ cache->ops->name);
}
EXPORT_SYMBOL(fscache_io_error);
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 990535071a8..8dcb114758e 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -370,6 +370,66 @@ cant_attach_object:
}
/*
+ * Invalidate an object. Callable with spinlocks held.
+ */
+void __fscache_invalidate(struct fscache_cookie *cookie)
+{
+ struct fscache_object *object;
+
+ _enter("{%s}", cookie->def->name);
+
+ fscache_stat(&fscache_n_invalidates);
+
+ /* Only permit invalidation of data files. Invalidating an index will
+ * require the caller to release all its attachments to the tree rooted
+ * there, and if it's doing that, it may as well just retire the
+ * cookie.
+ */
+ ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
+
+ /* We will be updating the cookie too. */
+ BUG_ON(!cookie->def->get_aux);
+
+ /* If there's an object, we tell the object state machine to handle the
+ * invalidation on our behalf, otherwise there's nothing to do.
+ */
+ if (!hlist_empty(&cookie->backing_objects)) {
+ spin_lock(&cookie->lock);
+
+ if (!hlist_empty(&cookie->backing_objects) &&
+ !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
+ &cookie->flags)) {
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object,
+ cookie_link);
+ if (object->state < FSCACHE_OBJECT_DYING)
+ fscache_raise_event(
+ object, FSCACHE_OBJECT_EV_INVALIDATE);
+ }
+
+ spin_unlock(&cookie->lock);
+ }
+
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_invalidate);
+
+/*
+ * Wait for object invalidation to complete.
+ */
+void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+{
+ _enter("%p", cookie);
+
+ wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
+ fscache_wait_bit_interruptible,
+ TASK_UNINTERRUPTIBLE);
+
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_wait_on_invalidate);
+
+/*
* update the index entries backing a cookie
*/
void __fscache_update_cookie(struct fscache_cookie *cookie)
@@ -442,16 +502,34 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
+try_again:
spin_lock(&cookie->lock);
/* break links with all the active objects */
while (!hlist_empty(&cookie->backing_objects)) {
+ int n_reads;
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object,
cookie_link);
_debug("RELEASE OBJ%x", object->debug_id);
+ set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags);
+ n_reads = atomic_read(&object->n_reads);
+ if (n_reads) {
+ int n_ops = object->n_ops;
+ int n_in_progress = object->n_in_progress;
+ spin_unlock(&cookie->lock);
+ printk(KERN_ERR "FS-Cache:"
+ " Cookie '%s' still has %d outstanding reads (%d,%d)\n",
+ cookie->def->name,
+ n_reads, n_ops, n_in_progress);
+ wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ printk("Wait finished\n");
+ goto try_again;
+ }
+
/* detach each cache object from the object cookie */
spin_lock(&object->lock);
hlist_del_init(&object->cookie_link);
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index f6aad48d38a..ee38fef4be5 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -121,12 +121,19 @@ extern int fscache_submit_exclusive_op(struct fscache_object *,
struct fscache_operation *);
extern int fscache_submit_op(struct fscache_object *,
struct fscache_operation *);
-extern int fscache_cancel_op(struct fscache_operation *);
+extern int fscache_cancel_op(struct fscache_operation *,
+ void (*)(struct fscache_operation *));
+extern void fscache_cancel_all_ops(struct fscache_object *);
extern void fscache_abort_object(struct fscache_object *);
extern void fscache_start_operations(struct fscache_object *);
extern void fscache_operation_gc(struct work_struct *);
/*
+ * page.c
+ */
+extern void fscache_invalidate_writes(struct fscache_cookie *);
+
+/*
* proc.c
*/
#ifdef CONFIG_PROC_FS
@@ -194,6 +201,7 @@ extern atomic_t fscache_n_store_vmscan_not_storing;
extern atomic_t fscache_n_store_vmscan_gone;
extern atomic_t fscache_n_store_vmscan_busy;
extern atomic_t fscache_n_store_vmscan_cancelled;
+extern atomic_t fscache_n_store_vmscan_wait;
extern atomic_t fscache_n_marks;
extern atomic_t fscache_n_uncaches;
@@ -205,6 +213,9 @@ extern atomic_t fscache_n_acquires_ok;
extern atomic_t fscache_n_acquires_nobufs;
extern atomic_t fscache_n_acquires_oom;
+extern atomic_t fscache_n_invalidates;
+extern atomic_t fscache_n_invalidates_run;
+
extern atomic_t fscache_n_updates;
extern atomic_t fscache_n_updates_null;
extern atomic_t fscache_n_updates_run;
@@ -237,6 +248,7 @@ extern atomic_t fscache_n_cop_alloc_object;
extern atomic_t fscache_n_cop_lookup_object;
extern atomic_t fscache_n_cop_lookup_complete;
extern atomic_t fscache_n_cop_grab_object;
+extern atomic_t fscache_n_cop_invalidate_object;
extern atomic_t fscache_n_cop_update_object;
extern atomic_t fscache_n_cop_drop_object;
extern atomic_t fscache_n_cop_put_object;
@@ -278,6 +290,7 @@ extern const struct file_operations fscache_stats_fops;
static inline void fscache_raise_event(struct fscache_object *object,
unsigned event)
{
+ BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS);
if (!test_and_set_bit(event, &object->events) &&
test_bit(event, &object->event_mask))
fscache_enqueue_object(object);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index ebe29c58138..f27c89d1788 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -245,7 +245,7 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
obj->n_in_progress,
obj->n_exclusive,
atomic_read(&obj->n_reads),
- obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
+ obj->event_mask,
obj->events,
obj->flags,
work_busy(&obj->work));
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index b6b897c550a..50d41c18021 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -14,6 +14,7 @@
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
+#include <linux/slab.h>
#include "internal.h"
const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
@@ -22,6 +23,7 @@ const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
[FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
[FSCACHE_OBJECT_AVAILABLE] = "OBJECT_AVAILABLE",
[FSCACHE_OBJECT_ACTIVE] = "OBJECT_ACTIVE",
+ [FSCACHE_OBJECT_INVALIDATING] = "OBJECT_INVALIDATING",
[FSCACHE_OBJECT_UPDATING] = "OBJECT_UPDATING",
[FSCACHE_OBJECT_DYING] = "OBJECT_DYING",
[FSCACHE_OBJECT_LC_DYING] = "OBJECT_LC_DYING",
@@ -39,6 +41,7 @@ const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
[FSCACHE_OBJECT_CREATING] = "CRTN",
[FSCACHE_OBJECT_AVAILABLE] = "AVBL",
[FSCACHE_OBJECT_ACTIVE] = "ACTV",
+ [FSCACHE_OBJECT_INVALIDATING] = "INVL",
[FSCACHE_OBJECT_UPDATING] = "UPDT",
[FSCACHE_OBJECT_DYING] = "DYNG",
[FSCACHE_OBJECT_LC_DYING] = "LCDY",
@@ -54,6 +57,7 @@ static void fscache_put_object(struct fscache_object *);
static void fscache_initialise_object(struct fscache_object *);
static void fscache_lookup_object(struct fscache_object *);
static void fscache_object_available(struct fscache_object *);
+static void fscache_invalidate_object(struct fscache_object *);
static void fscache_release_object(struct fscache_object *);
static void fscache_withdraw_object(struct fscache_object *);
static void fscache_enqueue_dependents(struct fscache_object *);
@@ -79,6 +83,15 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
}
/*
+ * Notify netfs of invalidation completion.
+ */
+static inline void fscache_invalidation_complete(struct fscache_cookie *cookie)
+{
+ if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
+}
+
+/*
* process events that have been sent to an object's state machine
* - initiates parent lookup
* - does object lookup
@@ -90,6 +103,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
{
enum fscache_object_state new_state;
struct fscache_cookie *cookie;
+ int event;
ASSERT(object != NULL);
@@ -101,7 +115,8 @@ static void fscache_object_state_machine(struct fscache_object *object)
/* wait for the parent object to become ready */
case FSCACHE_OBJECT_INIT:
object->event_mask =
- ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED);
+ FSCACHE_OBJECT_EVENTS_MASK &
+ ~(1 << FSCACHE_OBJECT_EV_CLEARED);
fscache_initialise_object(object);
goto done;
@@ -125,6 +140,16 @@ static void fscache_object_state_machine(struct fscache_object *object)
case FSCACHE_OBJECT_ACTIVE:
goto active_transit;
+ /* Invalidate an object on disk */
+ case FSCACHE_OBJECT_INVALIDATING:
+ clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
+ fscache_stat(&fscache_n_invalidates_run);
+ fscache_stat(&fscache_n_cop_invalidate_object);
+ fscache_invalidate_object(object);
+ fscache_stat_d(&fscache_n_cop_invalidate_object);
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+ goto active_transit;
+
/* update the object metadata on disk */
case FSCACHE_OBJECT_UPDATING:
clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
@@ -251,13 +276,17 @@ static void fscache_object_state_machine(struct fscache_object *object)
/* determine the transition from a lookup state */
lookup_transit:
- switch (fls(object->events & object->event_mask) - 1) {
+ event = fls(object->events & object->event_mask) - 1;
+ switch (event) {
case FSCACHE_OBJECT_EV_WITHDRAW:
case FSCACHE_OBJECT_EV_RETIRE:
case FSCACHE_OBJECT_EV_RELEASE:
case FSCACHE_OBJECT_EV_ERROR:
new_state = FSCACHE_OBJECT_LC_DYING;
goto change_state;
+ case FSCACHE_OBJECT_EV_INVALIDATE:
+ new_state = FSCACHE_OBJECT_INVALIDATING;
+ goto change_state;
case FSCACHE_OBJECT_EV_REQUEUE:
goto done;
case -1:
@@ -268,13 +297,17 @@ lookup_transit:
/* determine the transition from an active state */
active_transit:
- switch (fls(object->events & object->event_mask) - 1) {
+ event = fls(object->events & object->event_mask) - 1;
+ switch (event) {
case FSCACHE_OBJECT_EV_WITHDRAW:
case FSCACHE_OBJECT_EV_RETIRE:
case FSCACHE_OBJECT_EV_RELEASE:
case FSCACHE_OBJECT_EV_ERROR:
new_state = FSCACHE_OBJECT_DYING;
goto change_state;
+ case FSCACHE_OBJECT_EV_INVALIDATE:
+ new_state = FSCACHE_OBJECT_INVALIDATING;
+ goto change_state;
case FSCACHE_OBJECT_EV_UPDATE:
new_state = FSCACHE_OBJECT_UPDATING;
goto change_state;
@@ -287,7 +320,8 @@ active_transit:
/* determine the transition from a terminal state */
terminal_transit:
- switch (fls(object->events & object->event_mask) - 1) {
+ event = fls(object->events & object->event_mask) - 1;
+ switch (event) {
case FSCACHE_OBJECT_EV_WITHDRAW:
new_state = FSCACHE_OBJECT_WITHDRAWING;
goto change_state;
@@ -320,8 +354,8 @@ done:
unsupported_event:
printk(KERN_ERR "FS-Cache:"
- " Unsupported event %lx [mask %lx] in state %s\n",
- object->events, object->event_mask,
+ " Unsupported event %d [%lx/%lx] in state %s\n",
+ event, object->events, object->event_mask,
fscache_object_states[object->state]);
BUG();
}
@@ -587,8 +621,6 @@ static void fscache_object_available(struct fscache_object *object)
if (object->n_in_progress == 0) {
if (object->n_ops > 0) {
ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
- ASSERTIF(object->n_ops > object->n_obj_ops,
- !list_empty(&object->pending_ops));
fscache_start_operations(object);
} else {
ASSERT(list_empty(&object->pending_ops));
@@ -681,6 +713,7 @@ static void fscache_withdraw_object(struct fscache_object *object)
if (object->cookie == cookie) {
hlist_del_init(&object->cookie_link);
object->cookie = NULL;
+ fscache_invalidation_complete(cookie);
detached = true;
}
spin_unlock(&cookie->lock);
@@ -890,3 +923,55 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
return result;
}
EXPORT_SYMBOL(fscache_check_aux);
+
+/*
+ * Asynchronously invalidate an object.
+ */
+static void fscache_invalidate_object(struct fscache_object *object)
+{
+ struct fscache_operation *op;
+ struct fscache_cookie *cookie = object->cookie;
+
+ _enter("{OBJ%x}", object->debug_id);
+
+ /* Reject any new read/write ops and abort any that are pending. */
+ fscache_invalidate_writes(cookie);
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+ fscache_cancel_all_ops(object);
+
+ /* Now we have to wait for in-progress reads and writes */
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op) {
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
+ _leave(" [ENOMEM]");
+ return;
+ }
+
+ fscache_operation_init(op, object->cache->ops->invalidate_object, NULL);
+ op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
+
+ spin_lock(&cookie->lock);
+ if (fscache_submit_exclusive_op(object, op) < 0)
+ goto submit_op_failed;
+ spin_unlock(&cookie->lock);
+ fscache_put_operation(op);
+
+ /* Once we've completed the invalidation, we know there will be no data
+ * stored in the cache and thus we can reinstate the data-check-skip
+ * optimisation.
+ */
+ set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+
+ /* We can allow read and write requests to come in once again. They'll
+ * queue up behind our exclusive invalidation operation.
+ */
+ fscache_invalidation_complete(cookie);
+ _leave("");
+ return;
+
+submit_op_failed:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
+ _leave(" [EIO]");
+}
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 30afdfa7aec..762a9ec4ffa 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -37,6 +37,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
ASSERT(op->processor != NULL);
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
ASSERTCMP(atomic_read(&op->usage), >, 0);
+ ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
@@ -64,6 +65,9 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
static void fscache_run_op(struct fscache_object *object,
struct fscache_operation *op)
{
+ ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
+
+ op->state = FSCACHE_OP_ST_IN_PROGRESS;
object->n_in_progress++;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -84,18 +88,21 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
+ ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
ASSERT(list_empty(&op->pend_link));
- ret = -ENOBUFS;
+ op->state = FSCACHE_OP_ST_PENDING;
if (fscache_object_is_active(object)) {
op->object = object;
object->n_ops++;
object->n_exclusive++; /* reads and writes must wait */
- if (object->n_ops > 1) {
+ if (object->n_in_progress > 0) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
@@ -121,8 +128,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
fscache_stat(&fscache_n_op_pend);
ret = 0;
} else {
- /* not allowed to submit ops in any other state */
- BUG();
+ /* If we're in any other state, there must have been an I/O
+ * error of some nature.
+ */
+ ASSERT(test_bit(FSCACHE_IOERROR, &object->cache->flags));
+ ret = -EIO;
}
spin_unlock(&object->lock);
@@ -186,6 +196,7 @@ int fscache_submit_op(struct fscache_object *object,
_enter("{OBJ%x OP%x},{%u}",
object->debug_id, op->debug_id, atomic_read(&op->usage));
+ ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
ASSERTCMP(atomic_read(&op->usage), >, 0);
spin_lock(&object->lock);
@@ -196,6 +207,7 @@ int fscache_submit_op(struct fscache_object *object,
ostate = object->state;
smp_rmb();
+ op->state = FSCACHE_OP_ST_PENDING;
if (fscache_object_is_active(object)) {
op->object = object;
object->n_ops++;
@@ -225,12 +237,15 @@ int fscache_submit_op(struct fscache_object *object,
object->state == FSCACHE_OBJECT_LC_DYING ||
object->state == FSCACHE_OBJECT_WITHDRAWING) {
fscache_stat(&fscache_n_op_rejected);
+ op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
fscache_report_unexpected_submission(object, op, ostate);
ASSERT(!fscache_object_is_active(object));
+ op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
} else {
+ op->state = FSCACHE_OP_ST_CANCELLED;
ret = -ENOBUFS;
}
@@ -283,20 +298,28 @@ void fscache_start_operations(struct fscache_object *object)
/*
* cancel an operation that's pending on an object
*/
-int fscache_cancel_op(struct fscache_operation *op)
+int fscache_cancel_op(struct fscache_operation *op,
+ void (*do_cancel)(struct fscache_operation *))
{
struct fscache_object *object = op->object;
int ret;
_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
+ ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
+ ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
spin_lock(&object->lock);
ret = -EBUSY;
- if (!list_empty(&op->pend_link)) {
+ if (op->state == FSCACHE_OP_ST_PENDING) {
+ ASSERT(!list_empty(&op->pend_link));
fscache_stat(&fscache_n_op_cancelled);
list_del_init(&op->pend_link);
- object->n_ops--;
+ if (do_cancel)
+ do_cancel(op);
+ op->state = FSCACHE_OP_ST_CANCELLED;
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
object->n_exclusive--;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
@@ -311,6 +334,70 @@ int fscache_cancel_op(struct fscache_operation *op)
}
/*
+ * Cancel all pending operations on an object
+ */
+void fscache_cancel_all_ops(struct fscache_object *object)
+{
+ struct fscache_operation *op;
+
+ _enter("OBJ%x", object->debug_id);
+
+ spin_lock(&object->lock);
+
+ while (!list_empty(&object->pending_ops)) {
+ op = list_entry(object->pending_ops.next,
+ struct fscache_operation, pend_link);
+ fscache_stat(&fscache_n_op_cancelled);
+ list_del_init(&op->pend_link);
+
+ ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
+ op->state = FSCACHE_OP_ST_CANCELLED;
+
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+ object->n_exclusive--;
+ if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ fscache_put_operation(op);
+ cond_resched_lock(&object->lock);
+ }
+
+ spin_unlock(&object->lock);
+ _leave("");
+}
+
+/*
+ * Record the completion or cancellation of an in-progress operation.
+ */
+void fscache_op_complete(struct fscache_operation *op, bool cancelled)
+{
+ struct fscache_object *object = op->object;
+
+ _enter("OBJ%x", object->debug_id);
+
+ ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+ ASSERTCMP(object->n_in_progress, >, 0);
+ ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
+ object->n_exclusive, >, 0);
+ ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
+ object->n_in_progress, ==, 1);
+
+ spin_lock(&object->lock);
+
+ op->state = cancelled ?
+ FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE;
+
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+ object->n_exclusive--;
+ object->n_in_progress--;
+ if (object->n_in_progress == 0)
+ fscache_start_operations(object);
+
+ spin_unlock(&object->lock);
+ _leave("");
+}
+EXPORT_SYMBOL(fscache_op_complete);
+
+/*
* release an operation
* - queues pending ops if this is the last in-progress op
*/
@@ -328,8 +415,9 @@ void fscache_put_operation(struct fscache_operation *op)
return;
_debug("PUT OP");
- if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
- BUG();
+ ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
+ op->state, ==, FSCACHE_OP_ST_CANCELLED);
+ op->state = FSCACHE_OP_ST_DEAD;
fscache_stat(&fscache_n_op_release);
@@ -340,8 +428,14 @@ void fscache_put_operation(struct fscache_operation *op)
object = op->object;
- if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
- atomic_dec(&object->n_reads);
+ if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) {
+ if (atomic_dec_and_test(&object->n_reads)) {
+ clear_bit(FSCACHE_COOKIE_WAITING_ON_READS,
+ &object->cookie->flags);
+ wake_up_bit(&object->cookie->flags,
+ FSCACHE_COOKIE_WAITING_ON_READS);
+ }
+ }
/* now... we may get called with the object spinlock held, so we
* complete the cleanup here only if we can immediately acquire the
@@ -359,16 +453,6 @@ void fscache_put_operation(struct fscache_operation *op)
return;
}
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
- ASSERTCMP(object->n_exclusive, >, 0);
- object->n_exclusive--;
- }
-
- ASSERTCMP(object->n_in_progress, >, 0);
- object->n_in_progress--;
- if (object->n_in_progress == 0)
- fscache_start_operations(object);
-
ASSERTCMP(object->n_ops, >, 0);
object->n_ops--;
if (object->n_ops == 0)
@@ -407,23 +491,14 @@ void fscache_operation_gc(struct work_struct *work)
spin_unlock(&cache->op_gc_list_lock);
object = op->object;
+ spin_lock(&object->lock);
_debug("GC DEFERRED REL OBJ%x OP%x",
object->debug_id, op->debug_id);
fscache_stat(&fscache_n_op_gc);
ASSERTCMP(atomic_read(&op->usage), ==, 0);
-
- spin_lock(&object->lock);
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
- ASSERTCMP(object->n_exclusive, >, 0);
- object->n_exclusive--;
- }
-
- ASSERTCMP(object->n_in_progress, >, 0);
- object->n_in_progress--;
- if (object->n_in_progress == 0)
- fscache_start_operations(object);
+ ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
ASSERTCMP(object->n_ops, >, 0);
object->n_ops--;
@@ -431,6 +506,7 @@ void fscache_operation_gc(struct work_struct *work)
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
spin_unlock(&object->lock);
+ kfree(op);
} while (count++ < 20);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 3f7a59bfa7a..ff000e52072 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -56,6 +56,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
_enter("%p,%p,%x", cookie, page, gfp);
+try_again:
rcu_read_lock();
val = radix_tree_lookup(&cookie->stores, page->index);
if (!val) {
@@ -104,11 +105,19 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
return true;
page_busy:
- /* we might want to wait here, but that could deadlock the allocator as
- * the work threads writing to the cache may all end up sleeping
- * on memory allocation */
- fscache_stat(&fscache_n_store_vmscan_busy);
- return false;
+ /* We will wait here if we're allowed to, but that could deadlock the
+ * allocator as the work threads writing to the cache may all end up
+ * sleeping on memory allocation, so we may need to impose a timeout
+ * too. */
+ if (!(gfp & __GFP_WAIT)) {
+ fscache_stat(&fscache_n_store_vmscan_busy);
+ return false;
+ }
+
+ fscache_stat(&fscache_n_store_vmscan_wait);
+ __fscache_wait_on_page_write(cookie, page);
+ gfp &= ~__GFP_WAIT;
+ goto try_again;
}
EXPORT_SYMBOL(__fscache_maybe_release_page);
@@ -162,6 +171,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
fscache_abort_object(object);
}
+ fscache_op_complete(op, true);
_leave("");
}
@@ -223,6 +233,8 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
_enter("{OP%x}", op->op.debug_id);
+ ASSERTCMP(op->n_pages, ==, 0);
+
fscache_hist(fscache_retrieval_histogram, op->start_time);
if (op->context)
fscache_put_context(op->op.object->cookie, op->context);
@@ -291,6 +303,17 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
}
/*
+ * Handle cancellation of a pending retrieval op
+ */
+static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
+{
+ struct fscache_retrieval *op =
+ container_of(_op, struct fscache_retrieval, op);
+
+ op->n_pages = 0;
+}
+
+/*
* wait for an object to become active (or dead)
*/
static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
@@ -307,8 +330,8 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
fscache_stat(stat_op_waits);
if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
fscache_wait_bit_interruptible,
- TASK_INTERRUPTIBLE) < 0) {
- ret = fscache_cancel_op(&op->op);
+ TASK_INTERRUPTIBLE) != 0) {
+ ret = fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
if (ret == 0)
return -ERESTARTSYS;
@@ -320,7 +343,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
_debug("<<< GO");
check_if_dead:
+ if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
+ fscache_stat(stat_object_dead);
+ _leave(" = -ENOBUFS [cancelled]");
+ return -ENOBUFS;
+ }
if (unlikely(fscache_object_is_dead(object))) {
+ pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
+ fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
fscache_stat(stat_object_dead);
return -ENOBUFS;
}
@@ -353,6 +383,11 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
if (hlist_empty(&cookie->backing_objects))
goto nobufs;
+ if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
+ _leave(" = -ENOBUFS [invalidating]");
+ return -ENOBUFS;
+ }
+
ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
ASSERTCMP(page, !=, NULL);
@@ -364,6 +399,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
_leave(" = -ENOMEM");
return -ENOMEM;
}
+ op->n_pages = 1;
spin_lock(&cookie->lock);
@@ -375,10 +411,10 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
atomic_inc(&object->n_reads);
- set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
+ __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock;
+ goto nobufs_unlock_dec;
spin_unlock(&cookie->lock);
fscache_stat(&fscache_n_retrieval_ops);
@@ -425,6 +461,8 @@ error:
_leave(" = %d", ret);
return ret;
+nobufs_unlock_dec:
+ atomic_dec(&object->n_reads);
nobufs_unlock:
spin_unlock(&cookie->lock);
kfree(op);
@@ -472,6 +510,11 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
if (hlist_empty(&cookie->backing_objects))
goto nobufs;
+ if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
+ _leave(" = -ENOBUFS [invalidating]");
+ return -ENOBUFS;
+ }
+
ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
ASSERTCMP(*nr_pages, >, 0);
ASSERT(!list_empty(pages));
@@ -482,6 +525,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
op = fscache_alloc_retrieval(mapping, end_io_func, context);
if (!op)
return -ENOMEM;
+ op->n_pages = *nr_pages;
spin_lock(&cookie->lock);
@@ -491,10 +535,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
struct fscache_object, cookie_link);
atomic_inc(&object->n_reads);
- set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
+ __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock;
+ goto nobufs_unlock_dec;
spin_unlock(&cookie->lock);
fscache_stat(&fscache_n_retrieval_ops);
@@ -541,6 +585,8 @@ error:
_leave(" = %d", ret);
return ret;
+nobufs_unlock_dec:
+ atomic_dec(&object->n_reads);
nobufs_unlock:
spin_unlock(&cookie->lock);
kfree(op);
@@ -577,12 +623,18 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
ASSERTCMP(page, !=, NULL);
+ if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
+ _leave(" = -ENOBUFS [invalidating]");
+ return -ENOBUFS;
+ }
+
if (fscache_wait_for_deferred_lookup(cookie) < 0)
return -ERESTARTSYS;
op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
if (!op)
return -ENOMEM;
+ op->n_pages = 1;
spin_lock(&cookie->lock);
@@ -658,9 +710,27 @@ static void fscache_write_op(struct fscache_operation *_op)
spin_lock(&object->lock);
cookie = object->cookie;
- if (!fscache_object_is_active(object) || !cookie) {
+ if (!fscache_object_is_active(object)) {
+ /* If we get here, then the on-disk cache object likely longer
+ * exists, so we should just cancel this write operation.
+ */
+ spin_unlock(&object->lock);
+ fscache_op_complete(&op->op, false);
+ _leave(" [inactive]");
+ return;
+ }
+
+ if (!cookie) {
+ /* If we get here, then the cookie belonging to the object was
+ * detached, probably by the cookie being withdrawn due to
+ * memory pressure, which means that the pages we might write
+ * to the cache from no longer exist - therefore, we can just
+ * cancel this write operation.
+ */
spin_unlock(&object->lock);
- _leave("");
+ fscache_op_complete(&op->op, false);
+ _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}",
+ _op->flags, _op->state, object->state, object->flags);
return;
}
@@ -696,6 +766,7 @@ static void fscache_write_op(struct fscache_operation *_op)
fscache_end_page_write(object, page);
if (ret < 0) {
fscache_abort_object(object);
+ fscache_op_complete(&op->op, true);
} else {
fscache_enqueue_operation(&op->op);
}
@@ -710,6 +781,38 @@ superseded:
spin_unlock(&cookie->stores_lock);
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
spin_unlock(&object->lock);
+ fscache_op_complete(&op->op, true);
+ _leave("");
+}
+
+/*
+ * Clear the pages pending writing for invalidation
+ */
+void fscache_invalidate_writes(struct fscache_cookie *cookie)
+{
+ struct page *page;
+ void *results[16];
+ int n, i;
+
+ _enter("");
+
+ while (spin_lock(&cookie->stores_lock),
+ n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
+ ARRAY_SIZE(results),
+ FSCACHE_COOKIE_PENDING_TAG),
+ n > 0) {
+ for (i = n - 1; i >= 0; i--) {
+ page = results[i];
+ radix_tree_delete(&cookie->stores, page->index);
+ }
+
+ spin_unlock(&cookie->stores_lock);
+
+ for (i = n - 1; i >= 0; i--)
+ page_cache_release(results[i]);
+ }
+
+ spin_unlock(&cookie->stores_lock);
_leave("");
}
@@ -759,7 +862,12 @@ int __fscache_write_page(struct fscache_cookie *cookie,
fscache_stat(&fscache_n_stores);
- op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
+ _leave(" = -ENOBUFS [invalidating]");
+ return -ENOBUFS;
+ }
+
+ op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
if (!op)
goto nomem;
@@ -915,6 +1023,40 @@ done:
EXPORT_SYMBOL(__fscache_uncache_page);
/**
+ * fscache_mark_page_cached - Mark a page as being cached
+ * @op: The retrieval op pages are being marked for
+ * @page: The page to be marked
+ *
+ * Mark a netfs page as being cached. After this is called, the netfs
+ * must call fscache_uncache_page() to remove the mark.
+ */
+void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
+{
+ struct fscache_cookie *cookie = op->op.object->cookie;
+
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_inc(&fscache_n_marks);
+#endif
+
+ _debug("- mark %p{%lx}", page, page->index);
+ if (TestSetPageFsCache(page)) {
+ static bool once_only;
+ if (!once_only) {
+ once_only = true;
+ printk(KERN_WARNING "FS-Cache:"
+ " Cookie type %s marked page %lx"
+ " multiple times\n",
+ cookie->def->name, page->index);
+ }
+ }
+
+ if (cookie->def->mark_page_cached)
+ cookie->def->mark_page_cached(cookie->netfs_data,
+ op->mapping, page);
+}
+EXPORT_SYMBOL(fscache_mark_page_cached);
+
+/**
* fscache_mark_pages_cached - Mark pages as being cached
* @op: The retrieval op pages are being marked for
* @pagevec: The pages to be marked
@@ -925,32 +1067,11 @@ EXPORT_SYMBOL(__fscache_uncache_page);
void fscache_mark_pages_cached(struct fscache_retrieval *op,
struct pagevec *pagevec)
{
- struct fscache_cookie *cookie = op->op.object->cookie;
unsigned long loop;
-#ifdef CONFIG_FSCACHE_STATS
- atomic_add(pagevec->nr, &fscache_n_marks);
-#endif
-
- for (loop = 0; loop < pagevec->nr; loop++) {
- struct page *page = pagevec->pages[loop];
-
- _debug("- mark %p{%lx}", page, page->index);
- if (TestSetPageFsCache(page)) {
- static bool once_only;
- if (!once_only) {
- once_only = true;
- printk(KERN_WARNING "FS-Cache:"
- " Cookie type %s marked page %lx"
- " multiple times\n",
- cookie->def->name, page->index);
- }
- }
- }
+ for (loop = 0; loop < pagevec->nr; loop++)
+ fscache_mark_page_cached(op, pagevec->pages[loop]);
- if (cookie->def->mark_pages_cached)
- cookie->def->mark_pages_cached(cookie->netfs_data,
- op->mapping, pagevec);
pagevec_reinit(pagevec);
}
EXPORT_SYMBOL(fscache_mark_pages_cached);
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 4765190d537..8179e8bc4a3 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -69,6 +69,7 @@ atomic_t fscache_n_store_vmscan_not_storing;
atomic_t fscache_n_store_vmscan_gone;
atomic_t fscache_n_store_vmscan_busy;
atomic_t fscache_n_store_vmscan_cancelled;
+atomic_t fscache_n_store_vmscan_wait;
atomic_t fscache_n_marks;
atomic_t fscache_n_uncaches;
@@ -80,6 +81,9 @@ atomic_t fscache_n_acquires_ok;
atomic_t fscache_n_acquires_nobufs;
atomic_t fscache_n_acquires_oom;
+atomic_t fscache_n_invalidates;
+atomic_t fscache_n_invalidates_run;
+
atomic_t fscache_n_updates;
atomic_t fscache_n_updates_null;
atomic_t fscache_n_updates_run;
@@ -112,6 +116,7 @@ atomic_t fscache_n_cop_alloc_object;
atomic_t fscache_n_cop_lookup_object;
atomic_t fscache_n_cop_lookup_complete;
atomic_t fscache_n_cop_grab_object;
+atomic_t fscache_n_cop_invalidate_object;
atomic_t fscache_n_cop_update_object;
atomic_t fscache_n_cop_drop_object;
atomic_t fscache_n_cop_put_object;
@@ -168,6 +173,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_object_created),
atomic_read(&fscache_n_object_lookups_timed_out));
+ seq_printf(m, "Invals : n=%u run=%u\n",
+ atomic_read(&fscache_n_invalidates),
+ atomic_read(&fscache_n_invalidates_run));
+
seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
atomic_read(&fscache_n_updates),
atomic_read(&fscache_n_updates_null),
@@ -224,11 +233,12 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_store_radix_deletes),
atomic_read(&fscache_n_store_pages_over_limit));
- seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
+ seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
atomic_read(&fscache_n_store_vmscan_not_storing),
atomic_read(&fscache_n_store_vmscan_gone),
atomic_read(&fscache_n_store_vmscan_busy),
- atomic_read(&fscache_n_store_vmscan_cancelled));
+ atomic_read(&fscache_n_store_vmscan_cancelled),
+ atomic_read(&fscache_n_store_vmscan_wait));
seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
atomic_read(&fscache_n_op_pend),
@@ -246,7 +256,8 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_cop_lookup_object),
atomic_read(&fscache_n_cop_lookup_complete),
atomic_read(&fscache_n_cop_grab_object));
- seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
+ seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n",
+ atomic_read(&fscache_n_cop_invalidate_object),
atomic_read(&fscache_n_cop_update_object),
atomic_read(&fscache_n_cop_drop_object),
atomic_read(&fscache_n_cop_put_object),
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8c23fa7a91e..c16335315e5 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -92,8 +92,8 @@ static void __fuse_put_request(struct fuse_req *req)
static void fuse_req_init_context(struct fuse_req *req)
{
- req->in.h.uid = current_fsuid();
- req->in.h.gid = current_fsgid();
+ req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
+ req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
req->in.h.pid = current->pid;
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 324bc085053..b7c09f9eb40 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -818,8 +818,8 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
stat->ino = attr->ino;
stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
stat->nlink = attr->nlink;
- stat->uid = attr->uid;
- stat->gid = attr->gid;
+ stat->uid = make_kuid(&init_user_ns, attr->uid);
+ stat->gid = make_kgid(&init_user_ns, attr->gid);
stat->rdev = inode->i_rdev;
stat->atime.tv_sec = attr->atime;
stat->atime.tv_nsec = attr->atimensec;
@@ -1007,12 +1007,12 @@ int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task)
rcu_read_lock();
ret = 0;
cred = __task_cred(task);
- if (cred->euid == fc->user_id &&
- cred->suid == fc->user_id &&
- cred->uid == fc->user_id &&
- cred->egid == fc->group_id &&
- cred->sgid == fc->group_id &&
- cred->gid == fc->group_id)
+ if (uid_eq(cred->euid, fc->user_id) &&
+ uid_eq(cred->suid, fc->user_id) &&
+ uid_eq(cred->uid, fc->user_id) &&
+ gid_eq(cred->egid, fc->group_id) &&
+ gid_eq(cred->sgid, fc->group_id) &&
+ gid_eq(cred->gid, fc->group_id))
ret = 1;
rcu_read_unlock();
@@ -1306,9 +1306,9 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
if (ivalid & ATTR_MODE)
arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
if (ivalid & ATTR_UID)
- arg->valid |= FATTR_UID, arg->uid = iattr->ia_uid;
+ arg->valid |= FATTR_UID, arg->uid = from_kuid(&init_user_ns, iattr->ia_uid);
if (ivalid & ATTR_GID)
- arg->valid |= FATTR_GID, arg->gid = iattr->ia_gid;
+ arg->valid |= FATTR_GID, arg->gid = from_kgid(&init_user_ns, iattr->ia_gid);
if (ivalid & ATTR_SIZE)
arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
if (ivalid & ATTR_ATIME) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 78d2837bc94..e21d4d8f87e 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1599,19 +1599,19 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
return err ? 0 : outarg.block;
}
-static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
+static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
{
loff_t retval;
struct inode *inode = file->f_path.dentry->d_inode;
/* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
- if (origin == SEEK_CUR || origin == SEEK_SET)
- return generic_file_llseek(file, offset, origin);
+ if (whence == SEEK_CUR || whence == SEEK_SET)
+ return generic_file_llseek(file, offset, whence);
mutex_lock(&inode->i_mutex);
retval = fuse_update_attributes(inode, NULL, file, NULL);
if (!retval)
- retval = generic_file_llseek(file, offset, origin);
+ retval = generic_file_llseek(file, offset, whence);
mutex_unlock(&inode->i_mutex);
return retval;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e24dd74e306..e105a53fc72 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -333,10 +333,10 @@ struct fuse_conn {
atomic_t count;
/** The user id for this mount */
- uid_t user_id;
+ kuid_t user_id;
/** The group id for this mount */
- gid_t group_id;
+ kgid_t group_id;
/** The fuse mount flags for this mount */
unsigned flags;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index f0eda124cff..73ca6b72bea 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -60,8 +60,8 @@ MODULE_PARM_DESC(max_user_congthresh,
struct fuse_mount_data {
int fd;
unsigned rootmode;
- unsigned user_id;
- unsigned group_id;
+ kuid_t user_id;
+ kgid_t group_id;
unsigned fd_present:1;
unsigned rootmode_present:1;
unsigned user_id_present:1;
@@ -164,8 +164,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
inode->i_ino = fuse_squash_ino(attr->ino);
inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
set_nlink(inode, attr->nlink);
- inode->i_uid = attr->uid;
- inode->i_gid = attr->gid;
+ inode->i_uid = make_kuid(&init_user_ns, attr->uid);
+ inode->i_gid = make_kgid(&init_user_ns, attr->gid);
inode->i_blocks = attr->blocks;
inode->i_atime.tv_sec = attr->atime;
inode->i_atime.tv_nsec = attr->atimensec;
@@ -492,14 +492,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
case OPT_USER_ID:
if (match_int(&args[0], &value))
return 0;
- d->user_id = value;
+ d->user_id = make_kuid(current_user_ns(), value);
+ if (!uid_valid(d->user_id))
+ return 0;
d->user_id_present = 1;
break;
case OPT_GROUP_ID:
if (match_int(&args[0], &value))
return 0;
- d->group_id = value;
+ d->group_id = make_kgid(current_user_ns(), value);
+ if (!gid_valid(d->group_id))
+ return 0;
d->group_id_present = 1;
break;
@@ -540,8 +544,8 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
struct super_block *sb = root->d_sb;
struct fuse_conn *fc = get_fuse_conn_super(sb);
- seq_printf(m, ",user_id=%u", fc->user_id);
- seq_printf(m, ",group_id=%u", fc->group_id);
+ seq_printf(m, ",user_id=%u", from_kuid_munged(&init_user_ns, fc->user_id));
+ seq_printf(m, ",group_id=%u", from_kgid_munged(&init_user_ns, fc->group_id));
if (fc->flags & FUSE_DEFAULT_PERMISSIONS)
seq_puts(m, ",default_permissions");
if (fc->flags & FUSE_ALLOW_OTHER)
@@ -989,7 +993,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (!file)
goto err;
- if (file->f_op != &fuse_dev_operations)
+ if ((file->f_op != &fuse_dev_operations) ||
+ (file->f_cred->user_ns != &init_user_ns))
goto err_fput;
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 01c4975da4b..30de4f2a2ea 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -643,7 +643,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
goto out_unlock;
requested = data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip, requested);
+ error = gfs2_inplace_reserve(ip, requested, 0);
if (error)
goto out_qunlock;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 1fd3ae237bd..a68e91bcef3 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -991,6 +991,41 @@ unlock:
return err;
}
+/**
+ * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
+ * @inode: The inode being truncated
+ * @oldsize: The original (larger) size
+ * @newsize: The new smaller size
+ *
+ * With jdata files, we have to journal a revoke for each block which is
+ * truncated. As a result, we need to split this into separate transactions
+ * if the number of pages being truncated gets too large.
+ */
+
+#define GFS2_JTRUNC_REVOKES 8192
+
+static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
+ u64 chunk;
+ int error;
+
+ while (oldsize != newsize) {
+ chunk = oldsize - newsize;
+ if (chunk > max_chunk)
+ chunk = max_chunk;
+ truncate_pagecache(inode, oldsize, oldsize - chunk);
+ oldsize -= chunk;
+ gfs2_trans_end(sdp);
+ error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
@@ -1000,8 +1035,10 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
int journaled = gfs2_is_jdata(ip);
int error;
- error = gfs2_trans_begin(sdp,
- RES_DINODE + (journaled ? RES_JDATA : 0), 0);
+ if (journaled)
+ error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
+ else
+ error = gfs2_trans_begin(sdp, RES_DINODE, 0);
if (error)
return error;
@@ -1026,7 +1063,16 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data);
- truncate_pagecache(inode, oldsize, newsize);
+ if (journaled)
+ error = gfs2_journaled_truncate(inode, oldsize, newsize);
+ else
+ truncate_pagecache(inode, oldsize, newsize);
+
+ if (error) {
+ brelse(dibh);
+ return error;
+ }
+
out_brelse:
brelse(dibh);
out:
@@ -1178,7 +1224,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
return error;
- error = gfs2_inplace_reserve(ip, 1);
+ error = gfs2_inplace_reserve(ip, 1, 0);
if (error)
goto do_grow_qunlock;
unstuff = 1;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 259b088cfc4..9a35670fdc3 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1676,16 +1676,11 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
be16_add_cpu(&leaf->lf_entries, 1);
}
brelse(bh);
- error = gfs2_meta_inode_buffer(ip, &bh);
- if (error)
- break;
- gfs2_trans_add_bh(ip->i_gl, bh, 1);
ip->i_entries++;
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
- gfs2_dinode_out(ip, bh->b_data);
- brelse(bh);
+ mark_inode_dirty(inode);
error = 0;
break;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index e056b4ce487..991ab2d484d 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -44,7 +44,7 @@
* gfs2_llseek - seek to a location in a file
* @file: the file
* @offset: the offset
- * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
+ * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
*
* SEEK_END requires the glock for the file because it references the
* file's size.
@@ -52,26 +52,26 @@
* Returns: The new offset, or errno
*/
-static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
+static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
{
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_holder i_gh;
loff_t error;
- switch (origin) {
+ switch (whence) {
case SEEK_END: /* These reference inode->i_size */
case SEEK_DATA:
case SEEK_HOLE:
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
&i_gh);
if (!error) {
- error = generic_file_llseek(file, offset, origin);
+ error = generic_file_llseek(file, offset, whence);
gfs2_glock_dq_uninit(&i_gh);
}
break;
case SEEK_CUR:
case SEEK_SET:
- error = generic_file_llseek(file, offset, origin);
+ error = generic_file_llseek(file, offset, whence);
break;
default:
error = -EINVAL;
@@ -432,7 +432,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
- ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
+ ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
if (ret)
goto out_quota_unlock;
@@ -825,7 +825,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
- error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
+ error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 0f22d09f358..992c5c0cb50 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -55,8 +55,6 @@ struct gfs2_glock_iter {
typedef void (*glock_examiner) (struct gfs2_glock * gl);
-static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
-#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
static struct dentry *gfs2_root;
@@ -107,10 +105,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
- if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- else
+ } else {
+ kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
+ }
}
void gfs2_glock_free(struct gfs2_glock *gl)
@@ -537,8 +537,8 @@ __acquires(&gl->gl_spin)
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
- if (glops->go_xmote_th)
- glops->go_xmote_th(gl);
+ if (glops->go_sync)
+ glops->go_sync(gl);
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
@@ -547,7 +547,10 @@ __acquires(&gl->gl_spin)
if (sdp->sd_lockstruct.ls_ops->lm_lock) {
/* lock_dlm */
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
- GLOCK_BUG_ON(gl, ret);
+ if (ret) {
+ printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
+ GLOCK_BUG_ON(gl, 1);
+ }
} else { /* lock_nolock */
finish_xmote(gl, target);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
@@ -736,6 +739,16 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!gl)
return -ENOMEM;
+ memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
+
+ if (glops->go_flags & GLOF_LVB) {
+ gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+ if (!gl->gl_lksb.sb_lvbptr) {
+ kmem_cache_free(cachep, gl);
+ return -ENOMEM;
+ }
+ }
+
atomic_inc(&sdp->sd_glock_disposal);
gl->gl_sbd = sdp;
gl->gl_flags = 0;
@@ -753,9 +766,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
preempt_enable();
gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
- memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
- memset(gl->gl_lvb, 0, 32 * sizeof(char));
- gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
@@ -777,6 +787,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
tmp = search_bucket(hash, sdp, &name);
if (tmp) {
spin_unlock_bucket(hash);
+ kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal);
gl = tmp;
@@ -1013,7 +1024,7 @@ trap_recursive:
printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
printk(KERN_ERR "lock type: %d req lock state : %d\n",
gh->gh_gl->gl_name.ln_type, gh->gh_state);
- __dump_glock(NULL, gl);
+ gfs2_dump_glock(NULL, gl);
BUG();
}
@@ -1508,7 +1519,7 @@ static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
{
int ret;
spin_lock(&gl->gl_spin);
- ret = __dump_glock(seq, gl);
+ ret = gfs2_dump_glock(seq, gl);
spin_unlock(&gl->gl_spin);
return ret;
}
@@ -1528,6 +1539,7 @@ static void dump_glock_func(struct gfs2_glock *gl)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
+ set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
glock_hash_walk(clear_glock, sdp);
flush_workqueue(glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
@@ -1655,7 +1667,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
}
/**
- * __dump_glock - print information about a glock
+ * gfs2_dump_glock - print information about a glock
* @seq: The seq_file struct
* @gl: the glock
*
@@ -1672,7 +1684,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
* Returns: 0 on success, -ENOBUFS when we run out of space
*/
-static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
+int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned long long dtime;
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 307ac31df78..fd580b7861d 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -178,33 +178,33 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
return NULL;
}
-int gfs2_glock_get(struct gfs2_sbd *sdp,
- u64 number, const struct gfs2_glock_operations *glops,
- int create, struct gfs2_glock **glp);
-void gfs2_glock_hold(struct gfs2_glock *gl);
-void gfs2_glock_put_nolock(struct gfs2_glock *gl);
-void gfs2_glock_put(struct gfs2_glock *gl);
-void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
- struct gfs2_holder *gh);
-void gfs2_holder_reinit(unsigned int state, unsigned flags,
- struct gfs2_holder *gh);
-void gfs2_holder_uninit(struct gfs2_holder *gh);
-int gfs2_glock_nq(struct gfs2_holder *gh);
-int gfs2_glock_poll(struct gfs2_holder *gh);
-int gfs2_glock_wait(struct gfs2_holder *gh);
-void gfs2_glock_dq(struct gfs2_holder *gh);
-void gfs2_glock_dq_wait(struct gfs2_holder *gh);
-
-void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
-int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
- u64 number, const struct gfs2_glock_operations *glops,
- unsigned int state, int flags, struct gfs2_holder *gh);
-
-int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
-
-__printf(2, 3)
+extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ const struct gfs2_glock_operations *glops,
+ int create, struct gfs2_glock **glp);
+extern void gfs2_glock_hold(struct gfs2_glock *gl);
+extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
+extern void gfs2_glock_put(struct gfs2_glock *gl);
+extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
+ unsigned flags, struct gfs2_holder *gh);
+extern void gfs2_holder_reinit(unsigned int state, unsigned flags,
+ struct gfs2_holder *gh);
+extern void gfs2_holder_uninit(struct gfs2_holder *gh);
+extern int gfs2_glock_nq(struct gfs2_holder *gh);
+extern int gfs2_glock_poll(struct gfs2_holder *gh);
+extern int gfs2_glock_wait(struct gfs2_holder *gh);
+extern void gfs2_glock_dq(struct gfs2_holder *gh);
+extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
+extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
+extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
+ const struct gfs2_glock_operations *glops,
+ unsigned int state, int flags,
+ struct gfs2_holder *gh);
+extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+extern void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
+extern int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
+#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0)
+extern __printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
/**
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 32cc4fde975..78d4184ffc7 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -74,7 +74,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
gfs2_trans_add_revoke(sdp, bd);
}
- BUG_ON(!fsync && atomic_read(&gl->gl_ail_count));
+ GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
}
@@ -96,7 +96,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
tr.tr_ip = (unsigned long)__builtin_return_address(0);
sb_start_intwrite(sdp->sd_vfs);
gfs2_log_reserve(sdp, tr.tr_reserved);
- BUG_ON(current->journal_info);
+ WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr;
__gfs2_ail_flush(gl, 0);
@@ -139,7 +139,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return;
- BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
+ GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
gfs2_log_flush(gl->gl_sbd, gl);
filemap_fdatawrite(metamapping);
@@ -168,7 +168,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
struct address_space *mapping = gfs2_glock2aspace(gl);
- BUG_ON(!(flags & DIO_METADATA));
+ WARN_ON_ONCE(!(flags & DIO_METADATA));
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
truncate_inode_pages(mapping, 0);
@@ -197,7 +197,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return;
- BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
+ GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
gfs2_log_flush(gl->gl_sbd, gl);
filemap_fdatawrite(metamapping);
@@ -536,7 +536,7 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
};
const struct gfs2_glock_operations gfs2_inode_glops = {
- .go_xmote_th = inode_go_sync,
+ .go_sync = inode_go_sync,
.go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok,
.go_lock = inode_go_lock,
@@ -546,17 +546,17 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
- .go_xmote_th = rgrp_go_sync,
+ .go_sync = rgrp_go_sync,
.go_inval = rgrp_go_inval,
.go_lock = gfs2_rgrp_go_lock,
.go_unlock = gfs2_rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
- .go_flags = GLOF_ASPACE,
+ .go_flags = GLOF_ASPACE | GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
- .go_xmote_th = trans_go_sync,
+ .go_sync = trans_go_sync,
.go_xmote_bh = trans_go_xmote_bh,
.go_demote_ok = trans_go_demote_ok,
.go_type = LM_TYPE_NONDISK,
@@ -577,6 +577,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = {
const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA,
+ .go_flags = GLOF_LVB,
};
const struct gfs2_glock_operations gfs2_journal_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 3d469d37345..c373a24fedd 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -205,7 +205,7 @@ struct lm_lockname {
struct gfs2_glock_operations {
- void (*go_xmote_th) (struct gfs2_glock *gl);
+ void (*go_sync) (struct gfs2_glock *gl);
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl);
@@ -216,6 +216,7 @@ struct gfs2_glock_operations {
const int go_type;
const unsigned long go_flags;
#define GLOF_ASPACE 1
+#define GLOF_LVB 2
};
enum {
@@ -321,7 +322,6 @@ struct gfs2_glock {
ktime_t gl_dstamp;
struct gfs2_lkstats gl_stats;
struct dlm_lksb gl_lksb;
- char gl_lvb[32];
unsigned long gl_tchange;
void *gl_object;
@@ -539,6 +539,7 @@ enum {
SDF_DEMOTE = 5,
SDF_NOJOURNALID = 6,
SDF_RORECOVERY = 7, /* read only recovery */
+ SDF_SKIP_DLM_UNLOCK = 8,
};
#define GFS2_FSNAME_LEN 256
@@ -621,6 +622,7 @@ struct gfs2_sbd {
u32 sd_hash_bsize_shift;
u32 sd_hash_ptrs; /* Number of pointers in a hash block */
u32 sd_qc_per_block;
+ u32 sd_blocks_per_bitmap;
u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
u32 sd_max_height; /* Max height of a file's metadata tree */
u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 381893ceefa..2b6f5698ef1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -364,34 +364,34 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
return 0;
}
-static void munge_mode_uid_gid(struct gfs2_inode *dip, umode_t *mode,
- unsigned int *uid, unsigned int *gid)
+static void munge_mode_uid_gid(const struct gfs2_inode *dip,
+ struct inode *inode)
{
if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
(dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
- if (S_ISDIR(*mode))
- *mode |= S_ISUID;
+ if (S_ISDIR(inode->i_mode))
+ inode->i_mode |= S_ISUID;
else if (dip->i_inode.i_uid != current_fsuid())
- *mode &= ~07111;
- *uid = dip->i_inode.i_uid;
+ inode->i_mode &= ~07111;
+ inode->i_uid = dip->i_inode.i_uid;
} else
- *uid = current_fsuid();
+ inode->i_uid = current_fsuid();
if (dip->i_inode.i_mode & S_ISGID) {
- if (S_ISDIR(*mode))
- *mode |= S_ISGID;
- *gid = dip->i_inode.i_gid;
+ if (S_ISDIR(inode->i_mode))
+ inode->i_mode |= S_ISGID;
+ inode->i_gid = dip->i_inode.i_gid;
} else
- *gid = current_fsgid();
+ inode->i_gid = current_fsgid();
}
-static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
+static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
{
- struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
int error;
int dblocks = 1;
- error = gfs2_inplace_reserve(dip, RES_DINODE);
+ error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
if (error)
goto out;
@@ -399,12 +399,15 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
if (error)
goto out_ipreserv;
- error = gfs2_alloc_blocks(dip, no_addr, &dblocks, 1, generation);
+ error = gfs2_alloc_blocks(ip, &ip->i_no_addr, &dblocks, 1, &ip->i_generation);
+ ip->i_no_formal_ino = ip->i_generation;
+ ip->i_inode.i_ino = ip->i_no_addr;
+ ip->i_goal = ip->i_no_addr;
gfs2_trans_end(sdp);
out_ipreserv:
- gfs2_inplace_release(dip);
+ gfs2_inplace_release(ip);
out:
return error;
}
@@ -429,52 +432,42 @@ static void gfs2_init_dir(struct buffer_head *dibh,
/**
* init_dinode - Fill in a new dinode structure
* @dip: The directory this inode is being created in
- * @gl: The glock covering the new inode
- * @inum: The inode number
- * @mode: The file permissions
- * @uid: The uid of the new inode
- * @gid: The gid of the new inode
- * @generation: The generation number of the new inode
- * @dev: The device number (if a device node)
+ * @ip: The inode
* @symname: The symlink destination (if a symlink)
- * @size: The inode size (ignored for directories)
* @bhp: The buffer head (returned to caller)
*
*/
-static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
- const struct gfs2_inum_host *inum, umode_t mode,
- unsigned int uid, unsigned int gid,
- const u64 *generation, dev_t dev, const char *symname,
- unsigned size, struct buffer_head **bhp)
+static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
+ const char *symname, struct buffer_head **bhp)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_dinode *di;
struct buffer_head *dibh;
struct timespec tv = CURRENT_TIME;
- dibh = gfs2_meta_new(gl, inum->no_addr);
- gfs2_trans_add_bh(gl, dibh, 1);
+ dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr);
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
di = (struct gfs2_dinode *)dibh->b_data;
- di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
- di->di_num.no_addr = cpu_to_be64(inum->no_addr);
- di->di_mode = cpu_to_be32(mode);
- di->di_uid = cpu_to_be32(uid);
- di->di_gid = cpu_to_be32(gid);
+ di->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
+ di->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
+ di->di_mode = cpu_to_be32(ip->i_inode.i_mode);
+ di->di_uid = cpu_to_be32(ip->i_inode.i_uid);
+ di->di_gid = cpu_to_be32(ip->i_inode.i_gid);
di->di_nlink = 0;
- di->di_size = cpu_to_be64(size);
+ di->di_size = cpu_to_be64(ip->i_inode.i_size);
di->di_blocks = cpu_to_be64(1);
di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
- di->di_major = cpu_to_be32(MAJOR(dev));
- di->di_minor = cpu_to_be32(MINOR(dev));
- di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
- di->di_generation = cpu_to_be64(*generation);
+ di->di_major = cpu_to_be32(MAJOR(ip->i_inode.i_rdev));
+ di->di_minor = cpu_to_be32(MINOR(ip->i_inode.i_rdev));
+ di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_no_addr);
+ di->di_generation = cpu_to_be64(ip->i_generation);
di->di_flags = 0;
di->__pad1 = 0;
- di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
+ di->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) ? GFS2_FORMAT_DE : 0);
di->di_height = 0;
di->__pad2 = 0;
di->__pad3 = 0;
@@ -487,7 +480,7 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
memset(&di->di_reserved, 0, sizeof(di->di_reserved));
- switch(mode & S_IFMT) {
+ switch(ip->i_inode.i_mode & S_IFMT) {
case S_IFREG:
if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
gfs2_tune_get(sdp, gt_new_files_jdata))
@@ -502,7 +495,7 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
gfs2_init_dir(dibh, dip);
break;
case S_IFLNK:
- memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, size);
+ memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, ip->i_inode.i_size);
break;
}
@@ -511,25 +504,22 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
*bhp = dibh;
}
-static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
- umode_t mode, const struct gfs2_inum_host *inum,
- const u64 *generation, dev_t dev, const char *symname,
- unsigned int size, struct buffer_head **bhp)
+static int make_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
+ const char *symname, struct buffer_head **bhp)
{
+ struct inode *inode = &ip->i_inode;
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- unsigned int uid, gid;
int error;
- munge_mode_uid_gid(dip, &mode, &uid, &gid);
error = gfs2_rindex_update(sdp);
if (error)
return error;
- error = gfs2_quota_lock(dip, uid, gid);
+ error = gfs2_quota_lock(dip, inode->i_uid, inode->i_gid);
if (error)
return error;
- error = gfs2_quota_check(dip, uid, gid);
+ error = gfs2_quota_check(dip, inode->i_uid, inode->i_gid);
if (error)
goto out_quota;
@@ -537,8 +527,8 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
if (error)
goto out_quota;
- init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, symname, size, bhp);
- gfs2_quota_change(dip, +1, uid, gid);
+ init_dinode(dip, ip, symname, bhp);
+ gfs2_quota_change(dip, +1, inode->i_uid, inode->i_gid);
gfs2_trans_end(sdp);
out_quota:
@@ -570,7 +560,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
goto fail_quota_locks;
- error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
+ error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
if (error)
goto fail_quota_locks;
@@ -657,19 +647,14 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
struct inode *inode = NULL;
struct gfs2_inode *dip = GFS2_I(dir), *ip;
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
+ struct gfs2_glock *io_gl;
int error;
- u64 generation;
struct buffer_head *bh = NULL;
+ u32 aflags = 0;
if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG;
- /* We need a reservation to allocate the new dinode block. The
- directory ip temporarily points to the reservation, but this is
- being done to get a set of contiguous blocks for the new dinode.
- Since this is a create, we don't have a sizehint yet, so it will
- have to use the minimum reservation size. */
error = gfs2_rs_alloc(dip);
if (error)
return error;
@@ -688,45 +673,72 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock;
- error = alloc_dinode(dip, &inum.no_addr, &generation);
+ inode = new_inode(sdp->sd_vfs);
+ if (!inode) {
+ gfs2_glock_dq_uninit(ghs);
+ return -ENOMEM;
+ }
+ ip = GFS2_I(inode);
+ error = gfs2_rs_alloc(ip);
if (error)
- goto fail_gunlock;
- inum.no_formal_ino = generation;
+ goto fail_free_inode;
+
+ set_bit(GIF_INVALID, &ip->i_flags);
+ inode->i_mode = mode;
+ inode->i_rdev = dev;
+ inode->i_size = size;
+ munge_mode_uid_gid(dip, inode);
+ ip->i_goal = dip->i_goal;
- error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
- LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+ if ((GFS2_I(sdp->sd_root_dir->d_inode) == dip) ||
+ (dip->i_diskflags & GFS2_DIF_TOPDIR))
+ aflags |= GFS2_AF_ORLOV;
+
+ error = alloc_dinode(ip, aflags);
if (error)
- goto fail_gunlock;
+ goto fail_free_inode;
- error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, symname, size, &bh);
+ error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (error)
- goto fail_gunlock2;
+ goto fail_free_inode;
- inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
- inum.no_formal_ino, 0);
- if (IS_ERR(inode))
+ ip->i_gl->gl_object = ip;
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+ if (error)
+ goto fail_free_inode;
+
+ error = make_dinode(dip, ip, symname, &bh);
+ if (error)
goto fail_gunlock2;
- ip = GFS2_I(inode);
- error = gfs2_inode_refresh(ip);
+ error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
goto fail_gunlock2;
- error = gfs2_rs_alloc(ip);
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (error)
goto fail_gunlock2;
+ ip->i_iopen_gh.gh_gl->gl_object = ip;
+ gfs2_glock_put(io_gl);
+ gfs2_set_iop(inode);
+ insert_inode_hash(inode);
+
+ error = gfs2_inode_refresh(ip);
+ if (error)
+ goto fail_gunlock3;
+
error = gfs2_acl_create(dip, inode);
if (error)
- goto fail_gunlock2;
+ goto fail_gunlock3;
error = gfs2_security_init(dip, ip, name);
if (error)
- goto fail_gunlock2;
+ goto fail_gunlock3;
error = link_dinode(dip, name, ip);
if (error)
- goto fail_gunlock2;
+ goto fail_gunlock3;
if (bh)
brelse(bh);
@@ -739,8 +751,20 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
d_instantiate(dentry, inode);
return 0;
+fail_gunlock3:
+ gfs2_glock_dq_uninit(ghs + 1);
+ if (ip->i_gl)
+ gfs2_glock_put(ip->i_gl);
+ goto fail_gunlock;
+
fail_gunlock2:
gfs2_glock_dq_uninit(ghs + 1);
+fail_free_inode:
+ if (ip->i_gl)
+ gfs2_glock_put(ip->i_gl);
+ gfs2_rs_delete(ip);
+ free_inode_nonrcu(inode);
+ inode = NULL;
fail_gunlock:
gfs2_glock_dq_uninit(ghs);
if (inode && !IS_ERR(inode)) {
@@ -748,7 +772,6 @@ fail_gunlock:
iput(inode);
}
fail:
- gfs2_rs_delete(dip);
if (bh)
brelse(bh);
return error;
@@ -884,7 +907,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
+ error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
if (error)
goto out_gunlock_q;
@@ -977,7 +1000,6 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
* gfs2_unlink_inode - Removes an inode from its parent dir and unlinks it
* @dip: The parent directory
* @name: The name of the entry in the parent directory
- * @bh: The inode buffer for the inode to be removed
* @inode: The inode to be removed
*
* Called with all the locks and in a transaction. This will only be
@@ -987,8 +1009,7 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
*/
static int gfs2_unlink_inode(struct gfs2_inode *dip,
- const struct dentry *dentry,
- struct buffer_head *bh)
+ const struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct gfs2_inode *ip = GFS2_I(inode);
@@ -1028,7 +1049,6 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
struct gfs2_sbd *sdp = GFS2_SB(dir);
struct inode *inode = dentry->d_inode;
struct gfs2_inode *ip = GFS2_I(inode);
- struct buffer_head *bh;
struct gfs2_holder ghs[3];
struct gfs2_rgrpd *rgd;
int error;
@@ -1077,14 +1097,9 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
error = gfs2_trans_begin(sdp, 2*RES_DINODE + 3*RES_LEAF + RES_RG_BIT, 0);
if (error)
- goto out_gunlock;
-
- error = gfs2_meta_inode_buffer(ip, &bh);
- if (error)
goto out_end_trans;
- error = gfs2_unlink_inode(dip, dentry, bh);
- brelse(bh);
+ error = gfs2_unlink_inode(dip, dentry);
out_end_trans:
gfs2_trans_end(sdp);
@@ -1365,7 +1380,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres);
+ error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres, 0);
if (error)
goto out_gunlock_q;
@@ -1384,14 +1399,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
/* Remove the target file, if it exists */
- if (nip) {
- struct buffer_head *bh;
- error = gfs2_meta_inode_buffer(nip, &bh);
- if (error)
- goto out_end_trans;
- error = gfs2_unlink_inode(ndip, ndentry, bh);
- brelse(bh);
- }
+ if (nip)
+ error = gfs2_unlink_inode(ndip, ndentry);
if (dir_rename) {
error = gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 0fb6539b0c8..8dad6b09371 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -120,8 +120,8 @@ static void gdlm_ast(void *arg)
gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
- if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID)
- memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
+ if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
+ memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
@@ -203,8 +203,10 @@ static int make_mode(const unsigned int lmstate)
static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
const int req)
{
- u32 lkf = DLM_LKF_VALBLK;
- u32 lkid = gl->gl_lksb.sb_lkid;
+ u32 lkf = 0;
+
+ if (gl->gl_lksb.sb_lvbptr)
+ lkf |= DLM_LKF_VALBLK;
if (gfs_flags & LM_FLAG_TRY)
lkf |= DLM_LKF_NOQUEUE;
@@ -228,7 +230,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
BUG();
}
- if (lkid != 0) {
+ if (gl->gl_lksb.sb_lkid != 0) {
lkf |= DLM_LKF_CONVERT;
if (test_bit(GLF_BLOCKING, &gl->gl_flags))
lkf |= DLM_LKF_QUECVT;
@@ -289,6 +291,14 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl);
+
+ /* don't want to skip dlm_unlock writing the lvb when lock is ex */
+ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+ gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) {
+ gfs2_glock_free(gl);
+ return;
+ }
+
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
NULL, gl);
if (error) {
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index e443966c810..0e3554edb8f 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -278,6 +278,9 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_meta_header)) /
sizeof(struct gfs2_quota_change);
+ sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
+ sizeof(struct gfs2_meta_header))
+ * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
/* Compute maximum reservation required to add a entry to a directory */
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c5af8e18f27..ae55e248c3b 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -816,7 +816,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
reserved = 1 + (nalloc * (data_blocks + ind_blocks));
- error = gfs2_inplace_reserve(ip, reserved);
+ error = gfs2_inplace_reserve(ip, reserved, 0);
if (error)
goto out_alloc;
@@ -869,7 +869,7 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
if (error < 0)
return error;
- qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
qlvb->__pad = 0;
qlvb->qb_limit = q.qu_limit;
@@ -893,7 +893,7 @@ restart:
if (error)
return error;
- qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
gfs2_glock_dq_uninit(q_gh);
@@ -1506,7 +1506,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
if (error)
goto out;
- qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
fdq->d_version = FS_DQUOT_VERSION;
fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
fdq->d_id = from_kqid(&init_user_ns, qid);
@@ -1605,7 +1605,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = 1 + data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip, blocks);
+ error = gfs2_inplace_reserve(ip, blocks, 0);
if (error)
goto out_i;
blocks += gfs2_rg_blocks(ip, blocks);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 38fe18f2f05..37ee061d899 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -16,6 +16,7 @@
#include <linux/prefetch.h>
#include <linux/blkdev.h>
#include <linux/rbtree.h>
+#include <linux/random.h>
#include "gfs2.h"
#include "incore.h"
@@ -251,22 +252,25 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
u64 rblock = block - rbm->rgd->rd_data0;
- u32 goal = (u32)rblock;
- int x;
+ u32 x;
if (WARN_ON_ONCE(rblock > UINT_MAX))
return -EINVAL;
if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
return -E2BIG;
- for (x = 0; x < rbm->rgd->rd_length; x++) {
- rbm->bi = rbm->rgd->rd_bits + x;
- if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
- rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
- break;
- }
- }
+ rbm->bi = rbm->rgd->rd_bits;
+ rbm->offset = (u32)(rblock);
+ /* Check if the block is within the first block */
+ if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
+ return 0;
+ /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
+ rbm->offset += (sizeof(struct gfs2_rgrp) -
+ sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
+ x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+ rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+ rbm->bi += x;
return 0;
}
@@ -875,7 +879,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
goto fail;
rgd->rd_gl->gl_object = rgd;
- rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
+ rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
if (rgd->rd_data > sdp->sd_max_rg_data)
sdp->sd_max_rg_data = rgd->rd_data;
@@ -1678,13 +1682,105 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
return;
}
+/**
+ * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
+ * @rgd: The rgrp in question
+ * @loops: An indication of how picky we can be (0=very, 1=less so)
+ *
+ * This function uses the recently added glock statistics in order to
+ * figure out whether a parciular resource group is suffering from
+ * contention from multiple nodes. This is done purely on the basis
+ * of timings, since this is the only data we have to work with and
+ * our aim here is to reject a resource group which is highly contended
+ * but (very important) not to do this too often in order to ensure that
+ * we do not land up introducing fragmentation by changing resource
+ * groups when not actually required.
+ *
+ * The calculation is fairly simple, we want to know whether the SRTTB
+ * (i.e. smoothed round trip time for blocking operations) to acquire
+ * the lock for this rgrp's glock is significantly greater than the
+ * time taken for resource groups on average. We introduce a margin in
+ * the form of the variable @var which is computed as the sum of the two
+ * respective variences, and multiplied by a factor depending on @loops
+ * and whether we have a lot of data to base the decision on. This is
+ * then tested against the square difference of the means in order to
+ * decide whether the result is statistically significant or not.
+ *
+ * Returns: A boolean verdict on the congestion status
+ */
+
+static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
+{
+ const struct gfs2_glock *gl = rgd->rd_gl;
+ const struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct gfs2_lkstats *st;
+ s64 r_dcount, l_dcount;
+ s64 r_srttb, l_srttb;
+ s64 srttb_diff;
+ s64 sqr_diff;
+ s64 var;
+
+ preempt_disable();
+ st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
+ r_srttb = st->stats[GFS2_LKS_SRTTB];
+ r_dcount = st->stats[GFS2_LKS_DCOUNT];
+ var = st->stats[GFS2_LKS_SRTTVARB] +
+ gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
+ preempt_enable();
+
+ l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
+ l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
+
+ if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0))
+ return false;
+
+ srttb_diff = r_srttb - l_srttb;
+ sqr_diff = srttb_diff * srttb_diff;
+
+ var *= 2;
+ if (l_dcount < 8 || r_dcount < 8)
+ var *= 2;
+ if (loops == 1)
+ var *= 2;
+
+ return ((srttb_diff < 0) && (sqr_diff > var));
+}
+
+/**
+ * gfs2_rgrp_used_recently
+ * @rs: The block reservation with the rgrp to test
+ * @msecs: The time limit in milliseconds
+ *
+ * Returns: True if the rgrp glock has been used within the time limit
+ */
+static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
+ u64 msecs)
+{
+ u64 tdiff;
+
+ tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
+ rs->rs_rbm.rgd->rd_gl->gl_dstamp));
+
+ return tdiff > (msecs * 1000 * 1000);
+}
+
+static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
+{
+ const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ u32 skip;
+
+ get_random_bytes(&skip, sizeof(skip));
+ return skip % sdp->sd_rgrps;
+}
+
static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
{
struct gfs2_rgrpd *rgd = *pos;
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd = gfs2_rgrpd_get_next(rgd);
if (rgd == NULL)
- rgd = gfs2_rgrpd_get_next(NULL);
+ rgd = gfs2_rgrpd_get_first(sdp);
*pos = rgd;
if (rgd != begin) /* If we didn't wrap */
return true;
@@ -1699,14 +1795,15 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
* Returns: errno
*/
-int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
struct gfs2_blkreserv *rs = ip->i_res;
- int error = 0, rg_locked, flags = LM_FLAG_TRY;
+ int error = 0, rg_locked, flags = 0;
u64 last_unlinked = NO_BLOCK;
int loops = 0;
+ u32 skip = 0;
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
@@ -1720,6 +1817,8 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
} else {
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
}
+ if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
+ skip = gfs2_orlov_skip(ip);
if (rs->rs_rbm.rgd == NULL)
return -EBADSLT;
@@ -1728,13 +1827,20 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
rg_locked = 0;
+ if (skip && skip--)
+ goto next_rgrp;
+ if (!gfs2_rs_active(rs) && (loops < 2) &&
+ gfs2_rgrp_used_recently(rs, 1000) &&
+ gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
+ goto next_rgrp;
error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
LM_ST_EXCLUSIVE, flags,
&rs->rs_rgd_gh);
- if (error == GLR_TRYFAILED)
- goto next_rgrp;
if (unlikely(error))
return error;
+ if (!gfs2_rs_active(rs) && (loops < 2) &&
+ gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
+ goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb) {
error = update_rgrp_lvb(rs->rs_rbm.rgd);
if (unlikely(error)) {
@@ -1781,12 +1887,13 @@ next_rgrp:
/* Find the next rgrp, and continue looking */
if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
continue;
+ if (skip)
+ continue;
/* If we've scanned all the rgrps, but found no free blocks
* then this checks for some less likely conditions before
* trying again.
*/
- flags &= ~LM_FLAG_TRY;
loops++;
/* Check that fs hasn't grown if writing to rindex */
if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 24077958dcf..842185853f6 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -39,7 +39,8 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested);
+#define GFS2_AF_ORLOV 1
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index bbdc78af60c..2ee13e841e9 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -486,7 +486,7 @@ TRACE_EVENT(gfs2_block_alloc,
),
TP_fast_assign(
- __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
+ __entry->dev = rgd->rd_gl->gl_sbd->sd_vfs->s_dev;
__entry->start = block;
__entry->inum = ip->i_no_addr;
__entry->len = len;
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index db330e5518c..76c144b3c9b 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -734,7 +734,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
return error;
- error = gfs2_inplace_reserve(ip, blks);
+ error = gfs2_inplace_reserve(ip, blks, 0);
if (error)
goto out_gunlock_q;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 0b35903219b..d47f11658c1 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -35,6 +35,16 @@ static int hfs_readpage(struct file *file, struct page *page)
return block_read_full_page(page, hfs_get_block);
}
+static void hfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ hfs_file_truncate(inode);
+ }
+}
+
static int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -45,11 +55,8 @@ static int hfs_write_begin(struct file *file, struct address_space *mapping,
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ hfs_write_failed(mapping, pos + len);
return ret;
}
@@ -120,6 +127,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
ssize_t ret;
@@ -135,7 +143,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
- vmtruncate(inode, isize);
+ hfs_write_failed(mapping, end);
}
return ret;
@@ -617,9 +625,12 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
attr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
- error = vmtruncate(inode, attr->ia_size);
+ error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
+
+ truncate_setsize(inode, attr->ia_size);
+ hfs_file_truncate(inode);
}
setattr_copy(inode, attr);
@@ -668,7 +679,6 @@ static const struct file_operations hfs_file_operations = {
static const struct inode_operations hfs_file_inode_operations = {
.lookup = hfs_file_lookup,
- .truncate = hfs_file_truncate,
.setattr = hfs_inode_setattr,
.setxattr = hfs_setxattr,
.getxattr = hfs_getxattr,
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index 4cfbe2edd29..6feefc0cb48 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -176,12 +176,14 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
/* are all of the bits in range? */
if ((offset + count) > sbi->total_blocks)
- return -2;
+ return -ENOENT;
mutex_lock(&sbi->alloc_mutex);
mapping = sbi->alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_mapping_page(mapping, pnr, NULL);
+ if (IS_ERR(page))
+ goto kaboom;
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
@@ -214,6 +216,8 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
set_page_dirty(page);
kunmap(page);
page = read_mapping_page(mapping, ++pnr, NULL);
+ if (IS_ERR(page))
+ goto kaboom;
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
@@ -232,4 +236,11 @@ out:
mutex_unlock(&sbi->alloc_mutex);
return 0;
+
+kaboom:
+ printk(KERN_CRIT "hfsplus: unable to mark blocks free: error %ld\n",
+ PTR_ERR(page));
+ mutex_unlock(&sbi->alloc_mutex);
+
+ return -EIO;
}
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 21023d9f8ff..685d07d0ed1 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -159,7 +159,7 @@ void hfs_btree_close(struct hfs_btree *tree)
kfree(tree);
}
-void hfs_btree_write(struct hfs_btree *tree)
+int hfs_btree_write(struct hfs_btree *tree)
{
struct hfs_btree_header_rec *head;
struct hfs_bnode *node;
@@ -168,7 +168,7 @@ void hfs_btree_write(struct hfs_btree *tree)
node = hfs_bnode_find(tree, 0);
if (IS_ERR(node))
/* panic? */
- return;
+ return -EIO;
/* Load the header */
page = node->page[0];
head = (struct hfs_btree_header_rec *)(kmap(page) +
@@ -186,6 +186,7 @@ void hfs_btree_write(struct hfs_btree *tree)
kunmap(page);
set_page_dirty(page);
hfs_bnode_put(node);
+ return 0;
}
static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 5849e3ef35c..eba76eab6d6 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -329,6 +329,7 @@ static int hfsplus_free_extents(struct super_block *sb,
{
u32 count, start;
int i;
+ int err = 0;
hfsplus_dump_extent(extent);
for (i = 0; i < 8; extent++, i++) {
@@ -345,18 +346,33 @@ found:
for (;;) {
start = be32_to_cpu(extent->start_block);
if (count <= block_nr) {
- hfsplus_block_free(sb, start, count);
+ err = hfsplus_block_free(sb, start, count);
+ if (err) {
+ printk(KERN_ERR "hfs: can't free extent\n");
+ dprint(DBG_EXTENT, " start: %u count: %u\n",
+ start, count);
+ }
extent->block_count = 0;
extent->start_block = 0;
block_nr -= count;
} else {
count -= block_nr;
- hfsplus_block_free(sb, start + count, block_nr);
+ err = hfsplus_block_free(sb, start + count, block_nr);
+ if (err) {
+ printk(KERN_ERR "hfs: can't free extent\n");
+ dprint(DBG_EXTENT, " start: %u count: %u\n",
+ start, count);
+ }
extent->block_count = cpu_to_be32(count);
block_nr = 0;
}
- if (!block_nr || !i)
- return 0;
+ if (!block_nr || !i) {
+ /*
+ * Try to free all extents and
+ * return only last error
+ */
+ return err;
+ }
i--;
extent--;
count = be32_to_cpu(extent->block_count);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index c571de224b1..a6da86b1b4c 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -335,7 +335,7 @@ int hfsplus_block_free(struct super_block *, u32, u32);
/* btree.c */
struct hfs_btree *hfs_btree_open(struct super_block *, u32);
void hfs_btree_close(struct hfs_btree *);
-void hfs_btree_write(struct hfs_btree *);
+int hfs_btree_write(struct hfs_btree *);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *);
void hfs_bmap_free(struct hfs_bnode *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 2172aa5976f..799b336b59f 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -28,6 +28,16 @@ static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, hfsplus_get_block, wbc);
}
+static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ hfsplus_file_truncate(inode);
+ }
+}
+
static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -38,11 +48,8 @@ static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ hfsplus_write_failed(mapping, pos + len);
return ret;
}
@@ -116,6 +123,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
ssize_t ret;
@@ -131,7 +139,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
- vmtruncate(inode, isize);
+ hfsplus_write_failed(mapping, end);
}
return ret;
@@ -300,10 +308,8 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
-
- error = vmtruncate(inode, attr->ia_size);
- if (error)
- return error;
+ truncate_setsize(inode, attr->ia_size);
+ hfsplus_file_truncate(inode);
}
setattr_copy(inode, attr);
@@ -358,7 +364,6 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
static const struct inode_operations hfsplus_file_inode_operations = {
.lookup = hfsplus_file_lookup,
- .truncate = hfsplus_file_truncate,
.setattr = hfsplus_setattr,
.setxattr = hfsplus_setxattr,
.getxattr = hfsplus_getxattr,
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 811a84d2d96..796198d2655 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -127,8 +127,14 @@ static int hfsplus_system_write_inode(struct inode *inode)
hfsplus_mark_mdb_dirty(inode->i_sb);
}
hfsplus_inode_write_fork(inode, fork);
- if (tree)
- hfs_btree_write(tree);
+ if (tree) {
+ int err = hfs_btree_write(tree);
+ if (err) {
+ printk(KERN_ERR "hfs: b-tree write err: %d, ino %lu\n",
+ err, inode->i_ino);
+ return err;
+ }
+ }
return 0;
}
@@ -226,6 +232,7 @@ out:
static void delayed_sync_fs(struct work_struct *work)
{
+ int err;
struct hfsplus_sb_info *sbi;
sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
@@ -234,7 +241,9 @@ static void delayed_sync_fs(struct work_struct *work)
sbi->work_queued = 0;
spin_unlock(&sbi->work_lock);
- hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
+ err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
+ if (err)
+ printk(KERN_ERR "hfs: delayed sync fs err %d\n", err);
}
void hfsplus_mark_mdb_dirty(struct super_block *sb)
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 89d2a5803ae..fbfe2df5624 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -50,7 +50,7 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno)
return disk_secno;
}
-static void hpfs_truncate(struct inode *i)
+void hpfs_truncate(struct inode *i)
{
if (IS_IMMUTABLE(i)) return /*-EPERM*/;
hpfs_lock_assert(i->i_sb);
@@ -105,6 +105,16 @@ static int hpfs_readpage(struct file *file, struct page *page)
return block_read_full_page(page,hpfs_get_block);
}
+static void hpfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ hpfs_truncate(inode);
+ }
+}
+
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -115,11 +125,8 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ hpfs_write_failed(mapping, pos + len);
return ret;
}
@@ -166,6 +173,5 @@ const struct file_operations hpfs_file_ops =
const struct inode_operations hpfs_file_iops =
{
- .truncate = hpfs_truncate,
.setattr = hpfs_setattr,
};
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 7102aaecc24..b7ae286646b 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -252,6 +252,7 @@ void hpfs_set_ea(struct inode *, struct fnode *, const char *,
/* file.c */
int hpfs_file_fsync(struct file *, loff_t, loff_t, int);
+void hpfs_truncate(struct inode *);
extern const struct file_operations hpfs_file_ops;
extern const struct inode_operations hpfs_file_iops;
extern const struct address_space_operations hpfs_aops;
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 804a9a842cb..5dc06c83710 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -277,9 +277,12 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
+ error = inode_newsize_ok(inode, attr->ia_size);
if (error)
goto out_unlock;
+
+ truncate_setsize(inode, attr->ia_size);
+ hpfs_truncate(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 78f21f8dc2e..43b315f2002 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -710,7 +710,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
struct vfsmount *proc_mnt;
int err = -ENOENT;
- proc_mnt = mntget(current->nsproxy->pid_ns->proc_mnt);
+ proc_mnt = mntget(task_active_pid_ns(current)->proc_mnt);
if (IS_ERR(proc_mnt))
goto out;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 4a55f35a6ce..78bde32ea95 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1,7 +1,7 @@
/*
* hugetlbpage-backed filesystem. Based on ramfs.
*
- * William Irwin, 2002
+ * Nadia Yvette Chambers, 2002
*
* Copyright (C) 2002 Linus Torvalds.
*/
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 7f5120bf0ec..071d6905f0d 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1259,7 +1259,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
goto not_jbd;
}
- /* keep track of wether or not this transaction modified us */
+ /* keep track of whether or not this transaction modified us */
was_modified = jh->b_modified;
/*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 484b8d1c6cb..dbf41f9452d 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -60,7 +60,6 @@ EXPORT_SYMBOL(jbd2_journal_get_create_access);
EXPORT_SYMBOL(jbd2_journal_get_undo_access);
EXPORT_SYMBOL(jbd2_journal_set_triggers);
EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
-EXPORT_SYMBOL(jbd2_journal_release_buffer);
EXPORT_SYMBOL(jbd2_journal_forget);
#if 0
EXPORT_SYMBOL(journal_sync_buffer);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a74ba465954..df9f29760ef 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -209,7 +209,8 @@ repeat:
if (!new_transaction)
goto alloc_transaction;
write_lock(&journal->j_state_lock);
- if (!journal->j_running_transaction) {
+ if (!journal->j_running_transaction &&
+ !journal->j_barrier_count) {
jbd2_get_transaction(journal, new_transaction);
new_transaction = NULL;
}
@@ -1207,17 +1208,6 @@ out:
return ret;
}
-/*
- * jbd2_journal_release_buffer: undo a get_write_access without any buffer
- * updates, if the update decided in the end that it didn't need access.
- *
- */
-void
-jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
- BUFFER_TRACE(bh, "entry");
-}
-
/**
* void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
* @handle: transaction handle
@@ -1261,7 +1251,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
goto not_jbd;
}
- /* keep track of wether or not this transaction modified us */
+ /* keep track of whether or not this transaction modified us */
was_modified = jh->b_modified;
/*
@@ -1850,7 +1840,6 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
BUFFER_TRACE(bh, "entry");
-retry:
/*
* It is safe to proceed here without the j_list_lock because the
* buffers cannot be stolen by try_to_free_buffers as long as we are
@@ -1945,14 +1934,11 @@ retry:
* for commit and try again.
*/
if (partial_page) {
- tid_t tid = journal->j_committing_transaction->t_tid;
-
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
write_unlock(&journal->j_state_lock);
- jbd2_log_wait_commit(journal, tid);
- goto retry;
+ return -EBUSY;
}
/*
* OK, buffer won't be reachable after truncate. We just set
@@ -2013,21 +1999,23 @@ zap_buffer_unlocked:
* @page: page to flush
* @offset: length of page to invalidate.
*
- * Reap page buffers containing data after offset in page.
- *
+ * Reap page buffers containing data after offset in page. Can return -EBUSY
+ * if buffers are part of the committing transaction and the page is straddling
+ * i_size. Caller then has to wait for current commit and try again.
*/
-void jbd2_journal_invalidatepage(journal_t *journal,
- struct page *page,
- unsigned long offset)
+int jbd2_journal_invalidatepage(journal_t *journal,
+ struct page *page,
+ unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
int may_free = 1;
+ int ret = 0;
if (!PageLocked(page))
BUG();
if (!page_has_buffers(page))
- return;
+ return 0;
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
@@ -2041,9 +2029,11 @@ void jbd2_journal_invalidatepage(journal_t *journal,
if (offset <= curr_off) {
/* This block is wholly outside the truncation point */
lock_buffer(bh);
- may_free &= journal_unmap_buffer(journal, bh,
- offset > 0);
+ ret = journal_unmap_buffer(journal, bh, offset > 0);
unlock_buffer(bh);
+ if (ret < 0)
+ return ret;
+ may_free &= ret;
}
curr_off = next_off;
bh = next;
@@ -2054,6 +2044,7 @@ void jbd2_journal_invalidatepage(journal_t *journal,
if (may_free && try_to_free_buffers(page))
J_ASSERT(!page_has_buffers(page));
}
+ return 0;
}
/*
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 0c96eb52c79..03310721712 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -417,14 +417,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
spin_unlock(&c->erase_completion_lock);
ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
- if (ret)
- return ret;
+
/* Just lock it again and continue. Nothing much can change because
we hold c->alloc_sem anyway. In fact, it's not entirely clear why
we hold c->erase_completion_lock in the majority of this function...
but that's a question for another (more caffeine-rich) day. */
spin_lock(&c->erase_completion_lock);
+ if (ret)
+ return ret;
+
waste = jeb->free_size;
jffs2_link_node_ref(c, jeb,
(jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 9d3afd157f9..dd7442c5835 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -119,9 +119,12 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
iattr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
- rc = vmtruncate(inode, iattr->ia_size);
+ rc = inode_newsize_ok(inode, iattr->ia_size);
if (rc)
return rc;
+
+ truncate_setsize(inode, iattr->ia_size);
+ jfs_truncate(inode);
}
setattr_copy(inode, iattr);
@@ -133,7 +136,6 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
}
const struct inode_operations jfs_file_inode_operations = {
- .truncate = jfs_truncate,
.setxattr = jfs_setxattr,
.getxattr = jfs_getxattr,
.listxattr = jfs_listxattr,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 4692bf3ca8c..b7dc47ba675 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -300,6 +300,16 @@ static int jfs_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, jfs_get_block);
}
+static void jfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ jfs_truncate(inode);
+ }
+}
+
static int jfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -308,11 +318,8 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping,
ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
jfs_get_block);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ jfs_write_failed(mapping, pos + len);
return ret;
}
@@ -326,6 +333,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
@@ -341,7 +349,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
- vmtruncate(inode, isize);
+ jfs_write_failed(mapping, end);
}
return ret;
diff --git a/fs/libfs.c b/fs/libfs.c
index 7cc37ca19cd..916da8c4158 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -81,11 +81,11 @@ int dcache_dir_close(struct inode *inode, struct file *file)
return 0;
}
-loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
+loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry *dentry = file->f_path.dentry;
mutex_lock(&dentry->d_inode->i_mutex);
- switch (origin) {
+ switch (whence) {
case 1:
offset += file->f_pos;
case 0:
@@ -369,8 +369,6 @@ int simple_setattr(struct dentry *dentry, struct iattr *iattr)
struct inode *inode = dentry->d_inode;
int error;
- WARN_ON_ONCE(inode->i_op->truncate);
-
error = inode_change_ok(inode, iattr);
if (error)
return error;
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
index 13ad1539fbf..00ec0b9c94d 100644
--- a/fs/lockd/clnt4xdr.c
+++ b/fs/lockd/clnt4xdr.c
@@ -64,10 +64,6 @@ static void nlm4_compute_offsets(const struct nlm_lock *lock,
{
const struct file_lock *fl = &lock->fl;
- BUG_ON(fl->fl_start > NLM4_OFFSET_MAX);
- BUG_ON(fl->fl_end > NLM4_OFFSET_MAX &&
- fl->fl_end != OFFSET_MAX);
-
*l_offset = loff_t_to_s64(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
*l_len = 0;
@@ -122,7 +118,6 @@ static void encode_netobj(struct xdr_stream *xdr,
{
__be32 *p;
- BUG_ON(length > XDR_MAX_NETOBJ);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, data, length);
}
@@ -156,7 +151,6 @@ out_overflow:
static void encode_cookie(struct xdr_stream *xdr,
const struct nlm_cookie *cookie)
{
- BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
}
@@ -198,7 +192,6 @@ out_overflow:
*/
static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
{
- BUG_ON(fh->size > NFS3_FHSIZE);
encode_netobj(xdr, (u8 *)&fh->data, fh->size);
}
@@ -336,7 +329,6 @@ static void encode_caller_name(struct xdr_stream *xdr, const char *name)
u32 length = strlen(name);
__be32 *p;
- BUG_ON(length > NLM_MAXSTRLEN);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, name, length);
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 05d29124c6a..54f9e6ce043 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -141,7 +141,7 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
static void nlmclnt_release_lockargs(struct nlm_rqst *req)
{
- BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
+ WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
}
/**
@@ -465,7 +465,6 @@ static const struct file_lock_operations nlmclnt_lock_ops = {
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
{
- BUG_ON(fl->fl_ops != NULL);
fl->fl_u.nfs_fl.state = 0;
fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index 982d2676e1f..9a55797a1cd 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -60,10 +60,6 @@ static void nlm_compute_offsets(const struct nlm_lock *lock,
{
const struct file_lock *fl = &lock->fl;
- BUG_ON(fl->fl_start > NLM_OFFSET_MAX);
- BUG_ON(fl->fl_end > NLM_OFFSET_MAX &&
- fl->fl_end != OFFSET_MAX);
-
*l_offset = loff_t_to_s32(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
*l_len = 0;
@@ -119,7 +115,6 @@ static void encode_netobj(struct xdr_stream *xdr,
{
__be32 *p;
- BUG_ON(length > XDR_MAX_NETOBJ);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, data, length);
}
@@ -153,7 +148,6 @@ out_overflow:
static void encode_cookie(struct xdr_stream *xdr,
const struct nlm_cookie *cookie)
{
- BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
}
@@ -195,7 +189,6 @@ out_overflow:
*/
static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
{
- BUG_ON(fh->size != NFS2_FHSIZE);
encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE);
}
@@ -330,7 +323,6 @@ static void encode_caller_name(struct xdr_stream *xdr, const char *name)
u32 length = strlen(name);
__be32 *p;
- BUG_ON(length > NLM_MAXSTRLEN);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, name, length);
}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index f9b22e58f78..0e17090c310 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -177,9 +177,6 @@ static void nlm_destroy_host_locked(struct nlm_host *host)
dprintk("lockd: destroy host %s\n", host->h_name);
- BUG_ON(!list_empty(&host->h_lockowners));
- BUG_ON(atomic_read(&host->h_count));
-
hlist_del_init(&host->h_hash);
nsm_unmonitor(host);
@@ -289,13 +286,12 @@ void nlmclnt_release_host(struct nlm_host *host)
dprintk("lockd: release client host %s\n", host->h_name);
- BUG_ON(atomic_read(&host->h_count) < 0);
- BUG_ON(host->h_server);
+ WARN_ON_ONCE(host->h_server);
if (atomic_dec_and_test(&host->h_count)) {
- BUG_ON(!list_empty(&host->h_lockowners));
- BUG_ON(!list_empty(&host->h_granted));
- BUG_ON(!list_empty(&host->h_reclaim));
+ WARN_ON_ONCE(!list_empty(&host->h_lockowners));
+ WARN_ON_ONCE(!list_empty(&host->h_granted));
+ WARN_ON_ONCE(!list_empty(&host->h_reclaim));
mutex_lock(&nlm_host_mutex);
nlm_destroy_host_locked(host);
@@ -412,8 +408,7 @@ void nlmsvc_release_host(struct nlm_host *host)
dprintk("lockd: release server host %s\n", host->h_name);
- BUG_ON(atomic_read(&host->h_count) < 0);
- BUG_ON(!host->h_server);
+ WARN_ON_ONCE(!host->h_server);
atomic_dec(&host->h_count);
}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 3d7e09bcc0e..3c2cfc68363 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -154,8 +154,6 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
.rpc_resp = res,
};
- BUG_ON(clnt == NULL);
-
memset(res, 0, sizeof(*res));
msg.rpc_proc = &clnt->cl_procinfo[proc];
@@ -466,7 +464,6 @@ static void encode_nsm_string(struct xdr_stream *xdr, const char *string)
const u32 len = strlen(string);
__be32 *p;
- BUG_ON(len > SM_MAXSTRLEN);
p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
}
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index adb90116d36..af49e2d6941 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -33,7 +33,7 @@
* are being written out - and waiting for GC to make progress, naturally.
*
* So we cannot just call iget() or some variant of it, but first have to check
- * wether the inode in question might be in I_FREEING state. Therefore we
+ * whether the inode in question might be in I_FREEING state. Therefore we
* maintain our own per-sb list of "almost deleted" inodes and check against
* that list first. Normally this should be at most 1-2 entries long.
*
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index e1a3b6bf632..9a59cbade2f 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1887,9 +1887,15 @@ int logfs_truncate(struct inode *inode, u64 target)
logfs_put_wblocks(sb, NULL, 1);
}
- if (!err)
- err = vmtruncate(inode, target);
+ if (!err) {
+ err = inode_newsize_ok(inode, target);
+ if (err)
+ goto out;
+
+ truncate_setsize(inode, target);
+ }
+ out:
/* I don't trust error recovery yet. */
WARN_ON(err);
return err;
diff --git a/fs/minix/file.c b/fs/minix/file.c
index 4493ce695ab..adc6f549423 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -34,9 +34,12 @@ static int minix_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
+ error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
+
+ truncate_setsize(inode, attr->ia_size);
+ minix_truncate(inode);
}
setattr_copy(inode, attr);
@@ -45,7 +48,6 @@ static int minix_setattr(struct dentry *dentry, struct iattr *attr)
}
const struct inode_operations minix_file_inode_operations = {
- .truncate = minix_truncate,
.setattr = minix_setattr,
.getattr = minix_getattr,
};
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 4fc5f8ab1c4..99541cceb58 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -395,6 +395,16 @@ int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
return __block_write_begin(page, pos, len, minix_get_block);
}
+static void minix_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ minix_truncate(inode);
+ }
+}
+
static int minix_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -403,11 +413,8 @@ static int minix_write_begin(struct file *file, struct address_space *mapping,
ret = block_write_begin(mapping, pos, len, flags, pagep,
minix_get_block);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ minix_write_failed(mapping, pos + len);
return ret;
}
diff --git a/fs/mount.h b/fs/mount.h
index 4f291f9de64..cd500798040 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -4,8 +4,11 @@
struct mnt_namespace {
atomic_t count;
+ unsigned int proc_inum;
struct mount * root;
struct list_head list;
+ struct user_namespace *user_ns;
+ u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
int event;
};
diff --git a/fs/namei.c b/fs/namei.c
index 5f4cdf3ad91..43a97ee1d4c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1275,9 +1275,7 @@ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
*need_lookup = false;
dentry = d_lookup(dir, name);
if (dentry) {
- if (d_need_lookup(dentry)) {
- *need_lookup = true;
- } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
+ if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
error = d_revalidate(dentry, flags);
if (unlikely(error <= 0)) {
if (error < 0) {
@@ -1383,8 +1381,6 @@ static int lookup_fast(struct nameidata *nd, struct qstr *name,
return -ECHILD;
nd->seq = seq;
- if (unlikely(d_need_lookup(dentry)))
- goto unlazy;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
@@ -1410,11 +1406,6 @@ unlazy:
if (unlikely(!dentry))
goto need_lookup;
- if (unlikely(d_need_lookup(dentry))) {
- dput(dentry);
- goto need_lookup;
- }
-
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
@@ -1859,7 +1850,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
if (flags & LOOKUP_ROOT) {
struct inode *inode = nd->root.dentry->d_inode;
if (*name) {
- if (!inode->i_op->lookup)
+ if (!can_lookup(inode))
return -ENOTDIR;
retval = inode_permission(inode, MAY_EXEC);
if (retval)
@@ -1903,6 +1894,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
get_fs_pwd(current->fs, &nd->path);
}
} else {
+ /* Caller must check execute permissions on the starting path component */
struct fd f = fdget_raw(dfd);
struct dentry *dentry;
@@ -1912,16 +1904,10 @@ static int path_init(int dfd, const char *name, unsigned int flags,
dentry = f.file->f_path.dentry;
if (*name) {
- if (!S_ISDIR(dentry->d_inode->i_mode)) {
+ if (!can_lookup(dentry->d_inode)) {
fdput(f);
return -ENOTDIR;
}
-
- retval = inode_permission(dentry->d_inode, MAY_EXEC);
- if (retval) {
- fdput(f);
- return retval;
- }
}
nd->path = f.file->f_path;
@@ -2189,15 +2175,19 @@ int user_path_at(int dfd, const char __user *name, unsigned flags,
* path-walking is complete.
*/
static struct filename *
-user_path_parent(int dfd, const char __user *path, struct nameidata *nd)
+user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
+ unsigned int flags)
{
struct filename *s = getname(path);
int error;
+ /* only LOOKUP_REVAL is allowed in extra flags */
+ flags &= LOOKUP_REVAL;
+
if (IS_ERR(s))
return s;
- error = filename_lookup(dfd, s, LOOKUP_PARENT, nd);
+ error = filename_lookup(dfd, s, flags | LOOKUP_PARENT, nd);
if (error) {
putname(s);
return ERR_PTR(error);
@@ -3044,12 +3034,22 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
return file;
}
-struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir)
+struct dentry *kern_path_create(int dfd, const char *pathname,
+ struct path *path, unsigned int lookup_flags)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct nameidata nd;
int err2;
- int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd);
+ int error;
+ bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
+
+ /*
+ * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
+ * other flags passed in are ignored!
+ */
+ lookup_flags &= LOOKUP_REVAL;
+
+ error = do_path_lookup(dfd, pathname, LOOKUP_PARENT|lookup_flags, &nd);
if (error)
return ERR_PTR(error);
@@ -3113,13 +3113,14 @@ void done_path_create(struct path *path, struct dentry *dentry)
}
EXPORT_SYMBOL(done_path_create);
-struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir)
+struct dentry *user_path_create(int dfd, const char __user *pathname,
+ struct path *path, unsigned int lookup_flags)
{
struct filename *tmp = getname(pathname);
struct dentry *res;
if (IS_ERR(tmp))
return ERR_CAST(tmp);
- res = kern_path_create(dfd, tmp->name, path, is_dir);
+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
putname(tmp);
return res;
}
@@ -3175,12 +3176,13 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
struct dentry *dentry;
struct path path;
int error;
+ unsigned int lookup_flags = 0;
error = may_mknod(mode);
if (error)
return error;
-
- dentry = user_path_create(dfd, filename, &path, 0);
+retry:
+ dentry = user_path_create(dfd, filename, &path, lookup_flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -3203,6 +3205,10 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
}
out:
done_path_create(&path, dentry);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -3241,8 +3247,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
struct dentry *dentry;
struct path path;
int error;
+ unsigned int lookup_flags = LOOKUP_DIRECTORY;
- dentry = user_path_create(dfd, pathname, &path, 1);
+retry:
+ dentry = user_path_create(dfd, pathname, &path, lookup_flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -3252,6 +3260,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
if (!error)
error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
done_path_create(&path, dentry);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -3327,8 +3339,9 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
-
- name = user_path_parent(dfd, pathname, &nd);
+ unsigned int lookup_flags = 0;
+retry:
+ name = user_path_parent(dfd, pathname, &nd, lookup_flags);
if (IS_ERR(name))
return PTR_ERR(name);
@@ -3370,6 +3383,10 @@ exit2:
exit1:
path_put(&nd.path);
putname(name);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -3423,8 +3440,9 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct dentry *dentry;
struct nameidata nd;
struct inode *inode = NULL;
-
- name = user_path_parent(dfd, pathname, &nd);
+ unsigned int lookup_flags = 0;
+retry:
+ name = user_path_parent(dfd, pathname, &nd, lookup_flags);
if (IS_ERR(name))
return PTR_ERR(name);
@@ -3462,6 +3480,11 @@ exit2:
exit1:
path_put(&nd.path);
putname(name);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ inode = NULL;
+ goto retry;
+ }
return error;
slashes:
@@ -3513,12 +3536,13 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
struct filename *from;
struct dentry *dentry;
struct path path;
+ unsigned int lookup_flags = 0;
from = getname(oldname);
if (IS_ERR(from))
return PTR_ERR(from);
-
- dentry = user_path_create(newdfd, newname, &path, 0);
+retry:
+ dentry = user_path_create(newdfd, newname, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putname;
@@ -3527,6 +3551,10 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
if (!error)
error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
done_path_create(&path, dentry);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
out_putname:
putname(from);
return error;
@@ -3613,12 +3641,13 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
-
+retry:
error = user_path_at(olddfd, oldname, how, &old_path);
if (error)
return error;
- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
+ new_dentry = user_path_create(newdfd, newname, &new_path,
+ (how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
@@ -3635,6 +3664,10 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
out_dput:
done_path_create(&new_path, new_dentry);
+ if (retry_estale(error, how)) {
+ how |= LOOKUP_REVAL;
+ goto retry;
+ }
out:
path_put(&old_path);
@@ -3807,15 +3840,17 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
struct nameidata oldnd, newnd;
struct filename *from;
struct filename *to;
+ unsigned int lookup_flags = 0;
+ bool should_retry = false;
int error;
-
- from = user_path_parent(olddfd, oldname, &oldnd);
+retry:
+ from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags);
if (IS_ERR(from)) {
error = PTR_ERR(from);
goto exit;
}
- to = user_path_parent(newdfd, newname, &newnd);
+ to = user_path_parent(newdfd, newname, &newnd, lookup_flags);
if (IS_ERR(to)) {
error = PTR_ERR(to);
goto exit1;
@@ -3887,11 +3922,18 @@ exit3:
unlock_rename(new_dir, old_dir);
mnt_drop_write(oldnd.path.mnt);
exit2:
+ if (retry_estale(error, lookup_flags))
+ should_retry = true;
path_put(&newnd.path);
putname(to);
exit1:
path_put(&oldnd.path);
putname(from);
+ if (should_retry) {
+ should_retry = false;
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
exit:
return error;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 24960626bb6..55605c55278 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/capability.h>
#include <linux/mnt_namespace.h>
+#include <linux/user_namespace.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/idr.h>
@@ -20,6 +21,7 @@
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
#include "pnode.h"
#include "internal.h"
@@ -311,7 +313,7 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
@@ -784,7 +786,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
if (!mnt)
return ERR_PTR(-ENOMEM);
- if (flag & (CL_SLAVE | CL_PRIVATE))
+ if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
mnt->mnt_group_id = old->mnt_group_id;
@@ -805,7 +807,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
br_write_unlock(&vfsmount_lock);
- if (flag & CL_SLAVE) {
+ if ((flag & CL_SLAVE) ||
+ ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
list_add(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
CLEAR_MNT_SHARED(mnt);
@@ -1266,7 +1269,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
goto dput_and_out;
retval = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
goto dput_and_out;
retval = do_umount(mnt, flags);
@@ -1292,7 +1295,7 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
static int mount_is_safe(struct path *path)
{
- if (capable(CAP_SYS_ADMIN))
+ if (ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN))
return 0;
return -EPERM;
#ifdef notyet
@@ -1308,6 +1311,26 @@ static int mount_is_safe(struct path *path)
#endif
}
+static bool mnt_ns_loop(struct path *path)
+{
+ /* Could bind mounting the mount namespace inode cause a
+ * mount namespace loop?
+ */
+ struct inode *inode = path->dentry->d_inode;
+ struct proc_inode *ei;
+ struct mnt_namespace *mnt_ns;
+
+ if (!proc_ns_inode(inode))
+ return false;
+
+ ei = PROC_I(inode);
+ if (ei->ns_ops != &mntns_operations)
+ return false;
+
+ mnt_ns = ei->ns;
+ return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
+}
+
struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
int flag)
{
@@ -1610,7 +1633,7 @@ static int do_change_type(struct path *path, int flag)
int type;
int err = 0;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (path->dentry != path->mnt->mnt_root)
@@ -1655,6 +1678,10 @@ static int do_loopback(struct path *path, const char *old_name,
if (err)
return err;
+ err = -EINVAL;
+ if (mnt_ns_loop(&old_path))
+ goto out;
+
err = lock_mount(path);
if (err)
goto out;
@@ -1770,7 +1797,7 @@ static int do_move_mount(struct path *path, const char *old_name)
struct mount *p;
struct mount *old;
int err = 0;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (!old_name || !*old_name)
return -EINVAL;
@@ -1857,21 +1884,6 @@ static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
return ERR_PTR(err);
}
-static struct vfsmount *
-do_kern_mount(const char *fstype, int flags, const char *name, void *data)
-{
- struct file_system_type *type = get_fs_type(fstype);
- struct vfsmount *mnt;
- if (!type)
- return ERR_PTR(-ENODEV);
- mnt = vfs_kern_mount(type, flags, name, data);
- if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
- !mnt->mnt_sb->s_subtype)
- mnt = fs_set_subtype(mnt, fstype);
- put_filesystem(type);
- return mnt;
-}
-
/*
* add a mount into a namespace's mount tree
*/
@@ -1917,20 +1929,46 @@ unlock:
* create a new mount for userspace and request it to be added into the
* namespace's tree
*/
-static int do_new_mount(struct path *path, const char *type, int flags,
+static int do_new_mount(struct path *path, const char *fstype, int flags,
int mnt_flags, const char *name, void *data)
{
+ struct file_system_type *type;
+ struct user_namespace *user_ns;
struct vfsmount *mnt;
int err;
- if (!type)
+ if (!fstype)
return -EINVAL;
/* we need capabilities... */
- if (!capable(CAP_SYS_ADMIN))
+ user_ns = real_mount(path->mnt)->mnt_ns->user_ns;
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
- mnt = do_kern_mount(type, flags, name, data);
+ type = get_fs_type(fstype);
+ if (!type)
+ return -ENODEV;
+
+ if (user_ns != &init_user_ns) {
+ if (!(type->fs_flags & FS_USERNS_MOUNT)) {
+ put_filesystem(type);
+ return -EPERM;
+ }
+ /* Only in special cases allow devices from mounts
+ * created outside the initial user namespace.
+ */
+ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
+ flags |= MS_NODEV;
+ mnt_flags |= MNT_NODEV;
+ }
+ }
+
+ mnt = vfs_kern_mount(type, flags, name, data);
+ if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
+ !mnt->mnt_sb->s_subtype)
+ mnt = fs_set_subtype(mnt, fstype);
+
+ put_filesystem(type);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
@@ -2261,18 +2299,42 @@ dput_out:
return retval;
}
-static struct mnt_namespace *alloc_mnt_ns(void)
+static void free_mnt_ns(struct mnt_namespace *ns)
+{
+ proc_free_inum(ns->proc_inum);
+ put_user_ns(ns->user_ns);
+ kfree(ns);
+}
+
+/*
+ * Assign a sequence number so we can detect when we attempt to bind
+ * mount a reference to an older mount namespace into the current
+ * mount namespace, preventing reference counting loops. A 64bit
+ * number incrementing at 10Ghz will take 12,427 years to wrap which
+ * is effectively never, so we can ignore the possibility.
+ */
+static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
+
+static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
struct mnt_namespace *new_ns;
+ int ret;
new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns)
return ERR_PTR(-ENOMEM);
+ ret = proc_alloc_inum(&new_ns->proc_inum);
+ if (ret) {
+ kfree(new_ns);
+ return ERR_PTR(ret);
+ }
+ new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
+ new_ns->user_ns = get_user_ns(user_ns);
return new_ns;
}
@@ -2281,24 +2343,28 @@ static struct mnt_namespace *alloc_mnt_ns(void)
* copied from the namespace of the passed in task structure.
*/
static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
- struct fs_struct *fs)
+ struct user_namespace *user_ns, struct fs_struct *fs)
{
struct mnt_namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
struct mount *p, *q;
struct mount *old = mnt_ns->root;
struct mount *new;
+ int copy_flags;
- new_ns = alloc_mnt_ns();
+ new_ns = alloc_mnt_ns(user_ns);
if (IS_ERR(new_ns))
return new_ns;
down_write(&namespace_sem);
/* First pass: copy the tree topology */
- new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
+ copy_flags = CL_COPY_ALL | CL_EXPIRE;
+ if (user_ns != mnt_ns->user_ns)
+ copy_flags |= CL_SHARED_TO_SLAVE;
+ new = copy_tree(old, old->mnt.mnt_root, copy_flags);
if (IS_ERR(new)) {
up_write(&namespace_sem);
- kfree(new_ns);
+ free_mnt_ns(new_ns);
return ERR_CAST(new);
}
new_ns->root = new;
@@ -2339,7 +2405,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
}
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
- struct fs_struct *new_fs)
+ struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
@@ -2349,7 +2415,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
if (!(flags & CLONE_NEWNS))
return ns;
- new_ns = dup_mnt_ns(ns, new_fs);
+ new_ns = dup_mnt_ns(ns, user_ns, new_fs);
put_mnt_ns(ns);
return new_ns;
@@ -2361,7 +2427,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
*/
static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
{
- struct mnt_namespace *new_ns = alloc_mnt_ns();
+ struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
if (!IS_ERR(new_ns)) {
struct mount *mnt = real_mount(m);
mnt->mnt_ns = new_ns;
@@ -2501,7 +2567,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
struct mount *new_mnt, *root_mnt;
int error;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
error = user_path_dir(new_root, &new);
@@ -2583,8 +2649,13 @@ static void __init init_mount_tree(void)
struct vfsmount *mnt;
struct mnt_namespace *ns;
struct path root;
+ struct file_system_type *type;
- mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
+ type = get_fs_type("rootfs");
+ if (!type)
+ panic("Can't find rootfs type");
+ mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
+ put_filesystem(type);
if (IS_ERR(mnt))
panic("Can't create rootfs");
@@ -2647,7 +2718,7 @@ void put_mnt_ns(struct mnt_namespace *ns)
br_write_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
- kfree(ns);
+ free_mnt_ns(ns);
}
struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
@@ -2681,3 +2752,72 @@ bool our_mnt(struct vfsmount *mnt)
{
return check_mnt(real_mount(mnt));
}
+
+static void *mntns_get(struct task_struct *task)
+{
+ struct mnt_namespace *ns = NULL;
+ struct nsproxy *nsproxy;
+
+ rcu_read_lock();
+ nsproxy = task_nsproxy(task);
+ if (nsproxy) {
+ ns = nsproxy->mnt_ns;
+ get_mnt_ns(ns);
+ }
+ rcu_read_unlock();
+
+ return ns;
+}
+
+static void mntns_put(void *ns)
+{
+ put_mnt_ns(ns);
+}
+
+static int mntns_install(struct nsproxy *nsproxy, void *ns)
+{
+ struct fs_struct *fs = current->fs;
+ struct mnt_namespace *mnt_ns = ns;
+ struct path root;
+
+ if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
+ !nsown_capable(CAP_SYS_CHROOT) ||
+ !nsown_capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (fs->users != 1)
+ return -EINVAL;
+
+ get_mnt_ns(mnt_ns);
+ put_mnt_ns(nsproxy->mnt_ns);
+ nsproxy->mnt_ns = mnt_ns;
+
+ /* Find the root */
+ root.mnt = &mnt_ns->root->mnt;
+ root.dentry = mnt_ns->root->mnt.mnt_root;
+ path_get(&root);
+ while(d_mountpoint(root.dentry) && follow_down_one(&root))
+ ;
+
+ /* Update the pwd and root */
+ set_fs_pwd(fs, &root);
+ set_fs_root(fs, &root);
+
+ path_put(&root);
+ return 0;
+}
+
+static unsigned int mntns_inum(void *ns)
+{
+ struct mnt_namespace *mnt_ns = ns;
+ return mnt_ns->proc_inum;
+}
+
+const struct proc_ns_operations mntns_operations = {
+ .name = "mnt",
+ .type = CLONE_NEWNS,
+ .get = mntns_get,
+ .put = mntns_put,
+ .install = mntns_install,
+ .inum = mntns_inum,
+};
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index d7e9fe77188..1acdad7fcec 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -976,9 +976,7 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
goto out;
if (attr->ia_size != i_size_read(inode)) {
- result = vmtruncate(inode, attr->ia_size);
- if (result)
- goto out;
+ truncate_setsize(inode, attr->ia_size);
mark_inode_dirty(inode);
}
}
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index be20a7e171a..63d14a99483 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -89,7 +89,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
/*
* If I understand ncp_read_kernel() properly, the above always
* fetches from the network, here the analogue of disk.
- * -- wli
+ * -- nyc
*/
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index b7db60897f9..cce2c057bd2 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -24,7 +24,7 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o
delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o nfs4getroot.o nfs4client.o
nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
-nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
+nfsv4-$(CONFIG_NFS_V4_1) += nfs4session.o pnfs.o pnfs_dev.o
obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index f1027b06a1a..4fa788c93f4 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -40,6 +40,7 @@
#include <linux/pagevec.h>
#include "../pnfs.h"
+#include "../nfs4session.h"
#include "../internal.h"
#include "blocklayout.h"
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
index dded2636811..862a2f16db6 100644
--- a/fs/nfs/cache_lib.c
+++ b/fs/nfs/cache_lib.c
@@ -118,7 +118,6 @@ int nfs_cache_register_sb(struct super_block *sb, struct cache_detail *cd)
struct dentry *dir;
dir = rpc_d_lookup_sb(sb, "cache");
- BUG_ON(dir == NULL);
ret = sunrpc_cache_register_pipefs(dir, cd->name, 0600, cd);
dput(dir);
return ret;
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 4251c2ae06a..efd54f0a4c4 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -142,7 +142,7 @@ extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
struct cb_recallslotargs {
struct sockaddr *crsa_addr;
- uint32_t crsa_target_max_slots;
+ uint32_t crsa_target_highest_slotid;
};
extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
void *dummy,
@@ -167,8 +167,6 @@ extern __be32 nfs4_callback_layoutrecall(
struct cb_layoutrecallargs *args,
void *dummy, struct cb_process_state *cps);
-extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
-
struct cb_devicenotifyitem {
uint32_t cbd_notify_type;
uint32_t cbd_layout_type;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 76b4a7a3e55..c89b26bc975 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -14,6 +14,7 @@
#include "delegation.h"
#include "internal.h"
#include "pnfs.h"
+#include "nfs4session.h"
#ifdef NFS_DEBUG
#define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -216,7 +217,6 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
}
pnfs_get_layout_hdr(lo);
spin_unlock(&ino->i_lock);
- BUG_ON(!list_empty(&lo->plh_bulk_recall));
list_add(&lo->plh_bulk_recall, &recall_list);
}
}
@@ -562,23 +562,16 @@ __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
if (!cps->clp) /* set in cb_sequence */
goto out;
- dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
+ dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %d\n",
rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
- args->crsa_target_max_slots);
+ args->crsa_target_highest_slotid);
fc_tbl = &cps->clp->cl_session->fc_slot_table;
- status = htonl(NFS4ERR_BAD_HIGH_SLOT);
- if (args->crsa_target_max_slots > fc_tbl->max_slots ||
- args->crsa_target_max_slots < 1)
- goto out;
-
status = htonl(NFS4_OK);
- if (args->crsa_target_max_slots == fc_tbl->max_slots)
- goto out;
- fc_tbl->target_max_slots = args->crsa_target_max_slots;
- nfs41_handle_recall_slot(cps->clp);
+ nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
+ nfs41_server_notify_target_slotid_update(cps->clp);
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 742ff4ffced..59461c957d9 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -16,6 +16,7 @@
#include "nfs4_fs.h"
#include "callback.h"
#include "internal.h"
+#include "nfs4session.h"
#define CB_OP_TAGLEN_MAXSZ (512)
#define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ)
@@ -520,7 +521,7 @@ static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
p = read_buf(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
- args->crsa_target_max_slots = ntohl(*p++);
+ args->crsa_target_highest_slotid = ntohl(*p++);
return 0;
}
@@ -762,7 +763,7 @@ static void nfs4_callback_free_slot(struct nfs4_session *session)
* A single slot, so highest used slotid is either 0 or -1
*/
tbl->highest_used_slotid = NFS4_NO_SLOT;
- nfs4_check_drain_bc_complete(session);
+ nfs4_session_drain_complete(session, tbl);
spin_unlock(&tbl->slot_tbl_lock);
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 8b39a42ac35..9f3c66438d0 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -277,7 +277,7 @@ void nfs_put_client(struct nfs_client *clp)
nfs_cb_idr_remove_locked(clp);
spin_unlock(&nn->nfs_client_lock);
- BUG_ON(!list_empty(&clp->cl_superblocks));
+ WARN_ON_ONCE(!list_empty(&clp->cl_superblocks));
clp->rpc_ops->free_client(clp);
}
@@ -615,8 +615,7 @@ EXPORT_SYMBOL_GPL(nfs_create_rpc_client);
*/
static void nfs_destroy_server(struct nfs_server *server)
{
- if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) ||
- !(server->flags & NFS_MOUNT_LOCAL_FCNTL))
+ if (server->nlm_host)
nlmclnt_done(server->nlm_host);
}
@@ -1061,10 +1060,6 @@ struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
if (error < 0)
goto error;
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
/* Probe the root fh to retrieve its FSID */
error = nfs_probe_fsinfo(server, mount_info->mntfh, fattr);
if (error < 0)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index b9e66b7e0c1..32e6c53520e 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -871,7 +871,7 @@ out:
return res;
}
-static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
+static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
@@ -880,10 +880,10 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
dentry->d_parent->d_name.name,
dentry->d_name.name,
- offset, origin);
+ offset, whence);
mutex_lock(&inode->i_mutex);
- switch (origin) {
+ switch (whence) {
case 1:
offset += filp->f_pos;
case 0:
@@ -979,10 +979,11 @@ static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
* particular file and the "nocto" mount flag is not set.
*
*/
-static inline
+static
int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
{
struct nfs_server *server = NFS_SERVER(inode);
+ int ret;
if (IS_AUTOMOUNT(inode))
return 0;
@@ -993,9 +994,13 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
if ((flags & LOOKUP_OPEN) && !(server->flags & NFS_MOUNT_NOCTO) &&
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
goto out_force;
- return 0;
+out:
+ return (inode->i_nlink == 0) ? -ENOENT : 0;
out_force:
- return __nfs_revalidate_inode(server, inode);
+ ret = __nfs_revalidate_inode(server, inode);
+ if (ret != 0)
+ return ret;
+ goto out;
}
/*
@@ -1156,11 +1161,14 @@ static int nfs_dentry_delete(const struct dentry *dentry)
}
+/* Ensure that we revalidate inode->i_nlink */
static void nfs_drop_nlink(struct inode *inode)
{
spin_lock(&inode->i_lock);
- if (inode->i_nlink > 0)
- drop_nlink(inode);
+ /* drop the inode if we're reasonably sure this is the last link */
+ if (inode->i_nlink == 1)
+ clear_nlink(inode);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
spin_unlock(&inode->i_lock);
}
@@ -1175,8 +1183,8 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
- drop_nlink(inode);
nfs_complete_unlink(dentry, inode);
+ nfs_drop_nlink(inode);
}
iput(inode);
}
@@ -1647,10 +1655,8 @@ static int nfs_safe_remove(struct dentry *dentry)
if (inode != NULL) {
NFS_PROTO(inode)->return_delegation(inode);
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
- /* The VFS may want to delete this inode */
if (error == 0)
nfs_drop_nlink(inode);
- nfs_mark_for_revalidate(inode);
} else
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
if (error == -ENOENT)
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index cae26cbd59e..0bd7a55a5f0 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -266,21 +266,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
- if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
- if (bytes > hdr->good_bytes)
- zero_user(page, 0, PAGE_SIZE);
- else if (hdr->good_bytes - bytes < PAGE_SIZE)
- zero_user_segment(page,
- hdr->good_bytes & ~PAGE_MASK,
- PAGE_SIZE);
- }
- if (!PageCompound(page)) {
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
- if (bytes < hdr->good_bytes)
- set_page_dirty(page);
- } else
- set_page_dirty(page);
- }
+ if (!PageCompound(page) && bytes < hdr->good_bytes)
+ set_page_dirty(page);
bytes += req->wb_bytes;
nfs_list_remove_request(req);
nfs_direct_readpage_release(req);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 582bb886613..3c2b893665b 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -119,18 +119,18 @@ force_reval:
return __nfs_revalidate_inode(server, inode);
}
-loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
{
dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
filp->f_path.dentry->d_parent->d_name.name,
filp->f_path.dentry->d_name.name,
- offset, origin);
+ offset, whence);
/*
- * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
+ * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
* the cached file length
*/
- if (origin != SEEK_SET && origin != SEEK_CUR) {
+ if (whence != SEEK_SET && whence != SEEK_CUR) {
struct inode *inode = filp->f_mapping->host;
int retval = nfs_revalidate_file_size(inode, filp);
@@ -138,7 +138,7 @@ loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
return (loff_t)retval;
}
- return generic_file_llseek(filp, offset, origin);
+ return generic_file_llseek(filp, offset, whence);
}
EXPORT_SYMBOL_GPL(nfs_file_llseek);
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index c817787fbdb..24d1d1c5fca 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -307,6 +307,7 @@ void nfs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
nfs_fscache_inode_unlock(inode);
}
}
+EXPORT_SYMBOL_GPL(nfs_fscache_set_inode_cookie);
/*
* Replace a per-inode cookie due to revalidation detecting a file having
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index c5b11b53ff3..4ecb76652eb 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -153,6 +153,22 @@ static inline void nfs_readpage_to_fscache(struct inode *inode,
}
/*
+ * Invalidate the contents of fscache for this inode. This will not sleep.
+ */
+static inline void nfs_fscache_invalidate(struct inode *inode)
+{
+ fscache_invalidate(NFS_I(inode)->fscache);
+}
+
+/*
+ * Wait for an object to finish being invalidated.
+ */
+static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
+{
+ fscache_wait_on_invalidate(NFS_I(inode)->fscache);
+}
+
+/*
* indicate the client caching state as readable text
*/
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
@@ -162,7 +178,6 @@ static inline const char *nfs_server_fscache_state(struct nfs_server *server)
return "no ";
}
-
#else /* CONFIG_NFS_FSCACHE */
static inline int nfs_fscache_register(void) { return 0; }
static inline void nfs_fscache_unregister(void) {}
@@ -205,6 +220,10 @@ static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
static inline void nfs_readpage_to_fscache(struct inode *inode,
struct page *page, int sync) {}
+
+static inline void nfs_fscache_invalidate(struct inode *inode) {}
+static inline void nfs_fscache_wait_on_invalidate(struct inode *inode) {}
+
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
return "no ";
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 9cc4a3fbf4b..bc3968fa81e 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -193,19 +193,15 @@ static int nfs_idmap_init_keyring(void)
if (!cred)
return -ENOMEM;
- keyring = key_alloc(&key_type_keyring, ".id_resolver", 0, 0, cred,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW | KEY_USR_READ,
- KEY_ALLOC_NOT_IN_QUOTA);
+ keyring = keyring_alloc(".id_resolver", 0, 0, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
- ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
- if (ret < 0)
- goto failed_put_key;
-
ret = register_key_type(&key_type_id_resolver);
if (ret < 0)
goto failed_put_key;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6fa01aea248..ebeb94ce1b0 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -107,13 +107,19 @@ u64 nfs_compat_user_ino64(u64 fileid)
return ino;
}
+int nfs_drop_inode(struct inode *inode)
+{
+ return NFS_STALE(inode) || generic_drop_inode(inode);
+}
+EXPORT_SYMBOL_GPL(nfs_drop_inode);
+
void nfs_clear_inode(struct inode *inode)
{
/*
* The following should never happen...
*/
- BUG_ON(nfs_have_writebacks(inode));
- BUG_ON(!list_empty(&NFS_I(inode)->open_files));
+ WARN_ON_ONCE(nfs_have_writebacks(inode));
+ WARN_ON_ONCE(!list_empty(&NFS_I(inode)->open_files));
nfs_zap_acl_cache(inode);
nfs_access_zap_cache(inode);
nfs_fscache_release_inode_cookie(inode);
@@ -155,10 +161,12 @@ static void nfs_zap_caches_locked(struct inode *inode)
nfsi->attrtimeo_timestamp = jiffies;
memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
- if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
- else
+ nfs_fscache_invalidate(inode);
+ } else {
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ }
}
void nfs_zap_caches(struct inode *inode)
@@ -173,6 +181,7 @@ void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
if (mapping->nrpages != 0) {
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
+ nfs_fscache_invalidate(inode);
spin_unlock(&inode->i_lock);
}
}
@@ -875,7 +884,7 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
spin_unlock(&inode->i_lock);
nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
- nfs_fscache_reset_inode_cookie(inode);
+ nfs_fscache_wait_on_invalidate(inode);
dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
inode->i_sb->s_id, (long long)NFS_FILEID(inode));
return 0;
@@ -951,6 +960,10 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
i_size_write(inode, nfs_size_to_loff_t(fattr->size));
ret |= NFS_INO_INVALID_ATTR;
}
+
+ if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+ nfs_fscache_invalidate(inode);
+
return ret;
}
@@ -1199,8 +1212,10 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
struct nfs_inode *nfsi = NFS_I(inode);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode)) {
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+ nfs_fscache_invalidate(inode);
+ }
if ((fattr->valid & NFS_ATTR_FATTR) == 0)
return 0;
return nfs_refresh_inode_locked(inode, fattr);
@@ -1488,6 +1503,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
(save_cache_validity & NFS_INO_REVAL_FORCED))
nfsi->cache_validity |= invalid;
+ if (invalid & NFS_INO_INVALID_DATA)
+ nfs_fscache_invalidate(inode);
+
return 0;
out_err:
/*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 05521cadac2..f0e6c7df1a0 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -18,27 +18,6 @@ struct nfs_string;
*/
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
-/*
- * Determine if sessions are in use.
- */
-static inline int nfs4_has_session(const struct nfs_client *clp)
-{
-#ifdef CONFIG_NFS_V4_1
- if (clp->cl_session)
- return 1;
-#endif /* CONFIG_NFS_V4_1 */
- return 0;
-}
-
-static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp))
- return (clp->cl_session->flags & SESSION4_PERSIST);
-#endif /* CONFIG_NFS_V4_1 */
- return 0;
-}
-
static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
{
if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))
@@ -276,8 +255,6 @@ extern const u32 nfs41_maxwrite_overhead;
extern struct rpc_procinfo nfs4_procedures[];
#endif
-extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
-
/* proc.c */
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
@@ -319,6 +296,7 @@ extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
+extern int nfs_drop_inode(struct inode *);
extern void nfs_clear_inode(struct inode *);
extern void nfs_evict_inode(struct inode *);
void nfs_zap_acl_cache(struct inode *inode);
@@ -386,9 +364,6 @@ extern int nfs_initiate_read(struct rpc_clnt *clnt,
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
-extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
- struct inode *inode,
- const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_readdata_release(struct nfs_read_data *rdata);
@@ -411,9 +386,6 @@ extern struct nfs_write_header *nfs_writehdr_alloc(void);
extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
-extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags,
- const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_writedata_release(struct nfs_write_data *wdata);
extern void nfs_commit_free(struct nfs_commit_data *p);
@@ -474,18 +446,6 @@ extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
const char *ip_addr,
rpc_authflavor_t authflavour);
-extern int _nfs4_call_sync(struct rpc_clnt *clnt,
- struct nfs_server *server,
- struct rpc_message *msg,
- struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- int cache_reply);
-extern int _nfs4_call_sync_session(struct rpc_clnt *clnt,
- struct nfs_server *server,
- struct rpc_message *msg,
- struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- int cache_reply);
extern int nfs40_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
struct rpc_cred *cred);
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 015f71f8f62..91a6faf811a 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -169,6 +169,9 @@ int nfs_mount(struct nfs_mount_request *info)
(info->hostname ? info->hostname : "server"),
info->dirpath);
+ if (strlen(info->dirpath) > MNTPATHLEN)
+ return -ENAMETOOLONG;
+
if (info->noresvport)
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
@@ -242,6 +245,9 @@ void nfs_umount(const struct nfs_mount_request *info)
struct rpc_clnt *clnt;
int status;
+ if (strlen(info->dirpath) > MNTPATHLEN)
+ return;
+
if (info->noresvport)
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
@@ -283,7 +289,6 @@ static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
const u32 pathname_len = strlen(pathname);
__be32 *p;
- BUG_ON(pathname_len > MNTPATHLEN);
p = xdr_reserve_space(xdr, 4 + pathname_len);
xdr_encode_opaque(p, pathname, pathname_len);
}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index d04f0df7be5..06b9df49f7f 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -195,7 +195,6 @@ static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh)
{
__be32 *p;
- BUG_ON(fh->size != NFS2_FHSIZE);
p = xdr_reserve_space(xdr, NFS2_FHSIZE);
memcpy(p, fh->data, NFS2_FHSIZE);
}
@@ -388,7 +387,7 @@ static void encode_filename(struct xdr_stream *xdr,
{
__be32 *p;
- BUG_ON(length > NFS2_MAXNAMLEN);
+ WARN_ON_ONCE(length > NFS2_MAXNAMLEN);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, name, length);
}
@@ -428,7 +427,6 @@ static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
{
__be32 *p;
- BUG_ON(length > NFS2_MAXPATHLEN);
p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(length);
xdr_write_pages(xdr, pages, 0, length);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 69322096c32..70efb63b1e4 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -24,14 +24,14 @@
#define NFSDBG_FACILITY NFSDBG_PROC
-/* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */
+/* A wrapper to handle the EJUKEBOX error messages */
static int
nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
{
int res;
do {
res = rpc_call_sync(clnt, msg, flags);
- if (res != -EJUKEBOX && res != -EKEYEXPIRED)
+ if (res != -EJUKEBOX)
break;
freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
@@ -44,7 +44,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
static int
nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)
{
- if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED)
+ if (task->tk_status != -EJUKEBOX)
return 0;
if (task->tk_status == -EJUKEBOX)
nfs_inc_stats(inode, NFSIOS_DELAY);
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 6cbe89400df..bffc32406fb 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -198,7 +198,7 @@ static void encode_filename3(struct xdr_stream *xdr,
{
__be32 *p;
- BUG_ON(length > NFS3_MAXNAMLEN);
+ WARN_ON_ONCE(length > NFS3_MAXNAMLEN);
p = xdr_reserve_space(xdr, 4 + length);
xdr_encode_opaque(p, name, length);
}
@@ -238,7 +238,6 @@ out_overflow:
static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
const u32 length)
{
- BUG_ON(length > NFS3_MAXPATHLEN);
encode_uint32(xdr, length);
xdr_write_pages(xdr, pages, 0, length);
}
@@ -388,7 +387,6 @@ out_overflow:
*/
static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
{
- BUG_ON(type > NF3FIFO);
encode_uint32(xdr, type);
}
@@ -443,7 +441,7 @@ static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
{
__be32 *p;
- BUG_ON(fh->size > NFS3_FHSIZE);
+ WARN_ON_ONCE(fh->size > NFS3_FHSIZE);
p = xdr_reserve_space(xdr, 4 + fh->size);
xdr_encode_opaque(p, fh->data, fh->size);
}
@@ -1339,6 +1337,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
error = nfsacl_encode(xdr->buf, base, args->inode,
(args->mask & NFS_ACL) ?
args->acl_access : NULL, 1, 0);
+ /* FIXME: this is just broken */
BUG_ON(error < 0);
error = nfsacl_encode(xdr->buf, base + error, args->inode,
(args->mask & NFS_DFACL) ?
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index a525fdefccd..a3f488b074a 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -11,6 +11,8 @@
#if IS_ENABLED(CONFIG_NFS_V4)
+#define NFS4_MAX_LOOP_ON_RECOVER (10)
+
struct idmap;
enum nfs4_client_state {
@@ -21,18 +23,12 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
NFS4CLNT_SESSION_RESET,
- NFS4CLNT_RECALL_SLOT,
NFS4CLNT_LEASE_CONFIRM,
NFS4CLNT_SERVER_SCOPE_MISMATCH,
NFS4CLNT_PURGE_STATE,
NFS4CLNT_BIND_CONN_TO_SESSION,
};
-enum nfs4_session_state {
- NFS4_SESSION_INITING,
- NFS4_SESSION_DRAINING,
-};
-
#define NFS4_RENEW_TIMEOUT 0x01
#define NFS4_RENEW_DELEGATION_CB 0x02
@@ -43,8 +39,7 @@ struct nfs4_minor_version_ops {
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- int cache_reply);
+ struct nfs4_sequence_res *res);
bool (*match_stateid)(const nfs4_stateid *,
const nfs4_stateid *);
int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
@@ -241,18 +236,14 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser
return server->nfs_client->cl_session;
}
-extern bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy);
extern int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
struct rpc_task *task);
extern int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
struct rpc_task *task);
-extern void nfs4_destroy_session(struct nfs4_session *session);
-extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
-extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
@@ -280,11 +271,7 @@ static inline int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
struct rpc_task *task)
{
- return 0;
-}
-
-static inline int nfs4_init_session(struct nfs_server *server)
-{
+ rpc_call_start(task);
return 0;
}
@@ -321,17 +308,20 @@ extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
int nfs4_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **);
int nfs40_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **, struct rpc_cred *);
#if defined(CONFIG_NFS_V4_1)
-struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
int nfs41_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **, struct rpc_cred *);
extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
+extern void nfs41_server_notify_target_slotid_update(struct nfs_client *clp);
+extern void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp);
+
#else
static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
@@ -349,11 +339,12 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs_inode_find_state_and_recover(struct inode *inode,
const nfs4_stateid *stateid);
extern void nfs4_schedule_lease_recovery(struct nfs_client *);
+extern int nfs4_wait_clnt_recover(struct nfs_client *clp);
+extern int nfs4_client_recover_expired_lease(struct nfs_client *clp);
extern void nfs4_schedule_state_manager(struct nfs_client *);
extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
-extern void nfs41_handle_recall_slot(struct nfs_client *clp);
extern void nfs41_handle_server_scope(struct nfs_client *,
struct nfs41_server_scope **);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 6bacfde1319..acc34726812 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -12,6 +12,7 @@
#include "internal.h"
#include "callback.h"
#include "delegation.h"
+#include "nfs4session.h"
#include "pnfs.h"
#include "netns.h"
@@ -713,10 +714,6 @@ static int nfs4_server_common_setup(struct nfs_server *server,
struct nfs_fattr *fattr;
int error;
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
/* data servers support only a subset of NFSv4.1 */
if (is_ds_only_client(server->nfs_client))
return -EPROTONOSUPPORT;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index afddd6639af..08ddcccb888 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -5,6 +5,7 @@
*/
#include <linux/nfs_fs.h>
#include "internal.h"
+#include "fscache.h"
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_FILE
@@ -20,7 +21,6 @@ nfs4_file_open(struct inode *inode, struct file *filp)
struct iattr attr;
int err;
- BUG_ON(inode != dentry->d_inode);
/*
* If no cached dentry exists or if it's negative, NFSv4 handled the
* opens in ->lookup() or ->create().
@@ -75,6 +75,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
nfs_file_set_open_context(filp, ctx);
+ nfs_fscache_set_inode_cookie(inode, filp);
err = 0;
out_put_ctx:
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 2e45fd9c02a..194c4841033 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -35,6 +35,7 @@
#include <linux/sunrpc/metrics.h>
+#include "nfs4session.h"
#include "internal.h"
#include "delegation.h"
#include "nfs4filelayout.h"
@@ -178,7 +179,6 @@ static int filelayout_async_handle_error(struct rpc_task *task,
break;
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
- case -EKEYEXPIRED:
rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
@@ -306,12 +306,10 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
}
rdata->read_done_cb = filelayout_read_done_cb;
- if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
- &rdata->args.seq_args, &rdata->res.seq_res,
- task))
- return;
-
- rpc_call_start(task);
+ nfs41_setup_sequence(rdata->ds_clp->cl_session,
+ &rdata->args.seq_args,
+ &rdata->res.seq_res,
+ task);
}
static void filelayout_read_call_done(struct rpc_task *task, void *data)
@@ -408,12 +406,10 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
rpc_exit(task, 0);
return;
}
- if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
- &wdata->args.seq_args, &wdata->res.seq_res,
- task))
- return;
-
- rpc_call_start(task);
+ nfs41_setup_sequence(wdata->ds_clp->cl_session,
+ &wdata->args.seq_args,
+ &wdata->res.seq_res,
+ task);
}
static void filelayout_write_call_done(struct rpc_task *task, void *data)
@@ -449,12 +445,10 @@ static void filelayout_commit_prepare(struct rpc_task *task, void *data)
{
struct nfs_commit_data *wdata = data;
- if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
- &wdata->args.seq_args, &wdata->res.seq_res,
- task))
- return;
-
- rpc_call_start(task);
+ nfs41_setup_sequence(wdata->ds_clp->cl_session,
+ &wdata->args.seq_args,
+ &wdata->res.seq_res,
+ task);
}
static void filelayout_write_commit_done(struct rpc_task *task, void *data)
@@ -512,7 +506,6 @@ filelayout_read_pagelist(struct nfs_read_data *data)
loff_t offset = data->args.offset;
u32 j, idx;
struct nfs_fh *fh;
- int status;
dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
__func__, hdr->inode->i_ino,
@@ -538,9 +531,8 @@ filelayout_read_pagelist(struct nfs_read_data *data)
data->mds_offset = offset;
/* Perform an asynchronous read to ds */
- status = nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
+ nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
&filelayout_read_call_ops, RPC_TASK_SOFTCONN);
- BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
@@ -554,7 +546,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
loff_t offset = data->args.offset;
u32 j, idx;
struct nfs_fh *fh;
- int status;
/* Retrieve the correct rpc_client for the byte range */
j = nfs4_fl_calc_j_index(lseg, offset);
@@ -579,10 +570,9 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
/* Perform an asynchronous write */
- status = nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
+ nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
&filelayout_write_call_ops, sync,
RPC_TASK_SOFTCONN);
- BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
@@ -909,7 +899,7 @@ static void
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
- BUG_ON(pgio->pg_lseg != NULL);
+ WARN_ON_ONCE(pgio->pg_lseg != NULL);
if (req->wb_offset != req->wb_pgbase) {
/*
@@ -939,7 +929,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_commit_info cinfo;
int status;
- BUG_ON(pgio->pg_lseg != NULL);
+ WARN_ON_ONCE(pgio->pg_lseg != NULL);
if (req->wb_offset != req->wb_pgbase)
goto out_mds;
@@ -1187,7 +1177,6 @@ static void filelayout_recover_commit_reqs(struct list_head *dst,
*/
for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
- BUG_ON(!list_empty(&b->written));
pnfs_put_lseg(b->wlseg);
b->wlseg = NULL;
}
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index a8eaa9b7bb0..b720064bcd7 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include "internal.h"
+#include "nfs4session.h"
#include "nfs4filelayout.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
@@ -162,8 +163,6 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor);
- BUG_ON(list_empty(&ds->ds_addrs));
-
list_for_each_entry(da, &ds->ds_addrs, da_node) {
dprintk("%s: DS %s: trying address %s\n",
__func__, ds->ds_remotestr, da->da_remotestr);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5eec4429970..5d864fb3657 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -52,7 +52,6 @@
#include <linux/mount.h>
#include <linux/module.h>
#include <linux/nfs_idmap.h>
-#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
@@ -64,14 +63,14 @@
#include "callback.h"
#include "pnfs.h"
#include "netns.h"
+#include "nfs4session.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PROC
#define NFS4_POLL_RETRY_MIN (HZ/10)
#define NFS4_POLL_RETRY_MAX (15*HZ)
-#define NFS4_MAX_LOOP_ON_RECOVER (10)
-
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
@@ -206,7 +205,6 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
{
__be32 *start, *p;
- BUG_ON(readdir->count < 80);
if (cookie > 2) {
readdir->cookie = cookie;
memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
@@ -256,22 +254,6 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
kunmap_atomic(start);
}
-static int nfs4_wait_clnt_recover(struct nfs_client *clp)
-{
- int res;
-
- might_sleep();
-
- res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
- nfs_wait_bit_killable, TASK_KILLABLE);
- if (res)
- return res;
-
- if (clp->cl_cons_state < 0)
- return clp->cl_cons_state;
- return 0;
-}
-
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
{
int res = 0;
@@ -351,7 +333,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
}
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
- case -EKEYEXPIRED:
ret = nfs4_delay(server->client, &exception->timeout);
if (ret != 0)
break;
@@ -397,144 +378,136 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
#if defined(CONFIG_NFS_V4_1)
-/*
- * nfs4_free_slot - free a slot and efficiently update slot table.
- *
- * freeing a slot is trivially done by clearing its respective bit
- * in the bitmap.
- * If the freed slotid equals highest_used_slotid we want to update it
- * so that the server would be able to size down the slot table if needed,
- * otherwise we know that the highest_used_slotid is still in use.
- * When updating highest_used_slotid there may be "holes" in the bitmap
- * so we need to scan down from highest_used_slotid to 0 looking for the now
- * highest slotid in use.
- * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
- *
- * Must be called while holding tbl->slot_tbl_lock
- */
-static void
-nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
-{
- BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
- /* clear used bit in bitmap */
- __clear_bit(slotid, tbl->used_slots);
-
- /* update highest_used_slotid when it is freed */
- if (slotid == tbl->highest_used_slotid) {
- slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
- if (slotid < tbl->max_slots)
- tbl->highest_used_slotid = slotid;
- else
- tbl->highest_used_slotid = NFS4_NO_SLOT;
- }
- dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
- slotid, tbl->highest_used_slotid);
-}
-
-bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
-{
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
- return true;
-}
-
-/*
- * Signal state manager thread if session fore channel is drained
- */
-static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
-{
- if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
- rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
- nfs4_set_task_privileged, NULL);
- return;
- }
-
- if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
- return;
-
- dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
- complete(&ses->fc_slot_table.complete);
-}
-
-/*
- * Signal state manager thread if session back channel is drained
- */
-void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
-{
- if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
- ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
- return;
- dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
- complete(&ses->bc_slot_table.complete);
-}
-
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
{
+ struct nfs4_session *session;
struct nfs4_slot_table *tbl;
+ bool send_new_highest_used_slotid = false;
- tbl = &res->sr_session->fc_slot_table;
if (!res->sr_slot) {
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
dprintk("%s: No slot\n", __func__);
return;
}
+ tbl = res->sr_slot->table;
+ session = tbl->session;
spin_lock(&tbl->slot_tbl_lock);
- nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
- nfs4_check_drain_fc_complete(res->sr_session);
+ /* Be nice to the server: try to ensure that the last transmitted
+ * value for highest_user_slotid <= target_highest_slotid
+ */
+ if (tbl->highest_used_slotid > tbl->target_highest_slotid)
+ send_new_highest_used_slotid = true;
+
+ if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) {
+ send_new_highest_used_slotid = false;
+ goto out_unlock;
+ }
+ nfs4_free_slot(tbl, res->sr_slot);
+
+ if (tbl->highest_used_slotid != NFS4_NO_SLOT)
+ send_new_highest_used_slotid = false;
+out_unlock:
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
+ if (send_new_highest_used_slotid)
+ nfs41_server_notify_highest_slotid_update(session->clp);
}
static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
- unsigned long timestamp;
+ struct nfs4_session *session;
+ struct nfs4_slot *slot;
struct nfs_client *clp;
-
- /*
- * sr_status remains 1 if an RPC level error occurred. The server
- * may or may not have processed the sequence operation..
- * Proceed as if the server received and processed the sequence
- * operation.
- */
- if (res->sr_status == 1)
- res->sr_status = NFS_OK;
+ bool interrupted = false;
+ int ret = 1;
/* don't increment the sequence number if the task wasn't sent */
if (!RPC_WAS_SENT(task))
goto out;
+ slot = res->sr_slot;
+ session = slot->table->session;
+
+ if (slot->interrupted) {
+ slot->interrupted = 0;
+ interrupted = true;
+ }
+
/* Check the SEQUENCE operation status */
switch (res->sr_status) {
case 0:
/* Update the slot's sequence and clientid lease timer */
- ++res->sr_slot->seq_nr;
- timestamp = res->sr_renewal_time;
- clp = res->sr_session->clp;
- do_renew_lease(clp, timestamp);
+ ++slot->seq_nr;
+ clp = session->clp;
+ do_renew_lease(clp, res->sr_timestamp);
/* Check sequence flags */
if (res->sr_status_flags != 0)
nfs4_schedule_lease_recovery(clp);
+ nfs41_update_target_slotid(slot->table, slot, res);
break;
+ case 1:
+ /*
+ * sr_status remains 1 if an RPC level error occurred.
+ * The server may or may not have processed the sequence
+ * operation..
+ * Mark the slot as having hosted an interrupted RPC call.
+ */
+ slot->interrupted = 1;
+ goto out;
case -NFS4ERR_DELAY:
/* The server detected a resend of the RPC call and
* returned NFS4ERR_DELAY as per Section 2.10.6.2
* of RFC5661.
*/
- dprintk("%s: slot=%td seq=%d: Operation in progress\n",
+ dprintk("%s: slot=%u seq=%u: Operation in progress\n",
__func__,
- res->sr_slot - res->sr_session->fc_slot_table.slots,
- res->sr_slot->seq_nr);
+ slot->slot_nr,
+ slot->seq_nr);
goto out_retry;
+ case -NFS4ERR_BADSLOT:
+ /*
+ * The slot id we used was probably retired. Try again
+ * using a different slot id.
+ */
+ goto retry_nowait;
+ case -NFS4ERR_SEQ_MISORDERED:
+ /*
+ * Was the last operation on this sequence interrupted?
+ * If so, retry after bumping the sequence number.
+ */
+ if (interrupted) {
+ ++slot->seq_nr;
+ goto retry_nowait;
+ }
+ /*
+ * Could this slot have been previously retired?
+ * If so, then the server may be expecting seq_nr = 1!
+ */
+ if (slot->seq_nr != 1) {
+ slot->seq_nr = 1;
+ goto retry_nowait;
+ }
+ break;
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ ++slot->seq_nr;
+ goto retry_nowait;
default:
/* Just update the slot sequence no. */
- ++res->sr_slot->seq_nr;
+ ++slot->seq_nr;
}
out:
/* The session may be reset by one of the error handlers. */
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
nfs41_sequence_free_slot(res);
- return 1;
+ return ret;
+retry_nowait:
+ if (rpc_restart_call_prepare(task)) {
+ task->tk_status = 0;
+ ret = 0;
+ }
+ goto out;
out_retry:
if (!rpc_restart_call(task))
goto out;
@@ -545,55 +518,27 @@ out_retry:
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
- if (res->sr_session == NULL)
+ if (res->sr_slot == NULL)
return 1;
return nfs41_sequence_done(task, res);
}
-/*
- * nfs4_find_slot - efficiently look for a free slot
- *
- * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
- * If found, we mark the slot as used, update the highest_used_slotid,
- * and respectively set up the sequence operation args.
- * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
- *
- * Note: must be called with under the slot_tbl_lock.
- */
-static u32
-nfs4_find_slot(struct nfs4_slot_table *tbl)
-{
- u32 slotid;
- u32 ret_id = NFS4_NO_SLOT;
-
- dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
- __func__, tbl->used_slots[0], tbl->highest_used_slotid,
- tbl->max_slots);
- slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
- if (slotid >= tbl->max_slots)
- goto out;
- __set_bit(slotid, tbl->used_slots);
- if (slotid > tbl->highest_used_slotid ||
- tbl->highest_used_slotid == NFS4_NO_SLOT)
- tbl->highest_used_slotid = slotid;
- ret_id = slotid;
-out:
- dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
- __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
- return ret_id;
-}
-
static void nfs41_init_sequence(struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res, int cache_reply)
{
- args->sa_session = NULL;
+ args->sa_slot = NULL;
args->sa_cache_this = 0;
+ args->sa_privileged = 0;
if (cache_reply)
args->sa_cache_this = 1;
- res->sr_session = NULL;
res->sr_slot = NULL;
}
+static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
+{
+ args->sa_privileged = 1;
+}
+
int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -601,59 +546,59 @@ int nfs41_setup_sequence(struct nfs4_session *session,
{
struct nfs4_slot *slot;
struct nfs4_slot_table *tbl;
- u32 slotid;
dprintk("--> %s\n", __func__);
/* slot already allocated? */
if (res->sr_slot != NULL)
- return 0;
+ goto out_success;
tbl = &session->fc_slot_table;
+ task->tk_timeout = 0;
+
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
- !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
+ !args->sa_privileged) {
/* The state manager will wait until the slot table is empty */
- rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
- spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s session is draining\n", __func__);
- return -EAGAIN;
- }
-
- if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
- !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
- rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
- spin_unlock(&tbl->slot_tbl_lock);
- dprintk("%s enforce FIFO order\n", __func__);
- return -EAGAIN;
+ goto out_sleep;
}
- slotid = nfs4_find_slot(tbl);
- if (slotid == NFS4_NO_SLOT) {
- rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
- spin_unlock(&tbl->slot_tbl_lock);
+ slot = nfs4_alloc_slot(tbl);
+ if (IS_ERR(slot)) {
+ /* If out of memory, try again in 1/4 second */
+ if (slot == ERR_PTR(-ENOMEM))
+ task->tk_timeout = HZ >> 2;
dprintk("<-- %s: no free slots\n", __func__);
- return -EAGAIN;
+ goto out_sleep;
}
spin_unlock(&tbl->slot_tbl_lock);
- rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
- slot = tbl->slots + slotid;
- args->sa_session = session;
- args->sa_slotid = slotid;
+ args->sa_slot = slot;
- dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
+ dprintk("<-- %s slotid=%d seqid=%d\n", __func__,
+ slot->slot_nr, slot->seq_nr);
- res->sr_session = session;
res->sr_slot = slot;
- res->sr_renewal_time = jiffies;
+ res->sr_timestamp = jiffies;
res->sr_status_flags = 0;
/*
* sr_status is only set in decode_sequence, and so will remain
* set to 1 if an rpc level failure occurs.
*/
res->sr_status = 1;
+out_success:
+ rpc_call_start(task);
return 0;
+out_sleep:
+ /* Privileged tasks are queued with top priority */
+ if (args->sa_privileged)
+ rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
+ NULL, RPC_PRIORITY_PRIVILEGED);
+ else
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ return -EAGAIN;
}
EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
@@ -665,12 +610,14 @@ int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_session *session = nfs4_get_session(server);
int ret = 0;
- if (session == NULL)
+ if (session == NULL) {
+ rpc_call_start(task);
goto out;
+ }
- dprintk("--> %s clp %p session %p sr_slot %td\n",
+ dprintk("--> %s clp %p session %p sr_slot %d\n",
__func__, session->clp, session, res->sr_slot ?
- res->sr_slot - session->fc_slot_table.slots : -1);
+ res->sr_slot->slot_nr : -1);
ret = nfs41_setup_sequence(session, args, res, task);
out:
@@ -687,19 +634,11 @@ struct nfs41_call_sync_data {
static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
+ struct nfs4_session *session = nfs4_get_session(data->seq_server);
dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
- if (nfs4_setup_sequence(data->seq_server, data->seq_args,
- data->seq_res, task))
- return;
- rpc_call_start(task);
-}
-
-static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
-{
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
- nfs41_call_sync_prepare(task, calldata);
+ nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
}
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
@@ -714,17 +653,11 @@ static const struct rpc_call_ops nfs41_call_sync_ops = {
.rpc_call_done = nfs41_call_sync_done,
};
-static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
- .rpc_call_prepare = nfs41_call_priv_sync_prepare,
- .rpc_call_done = nfs41_call_sync_done,
-};
-
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- int privileged)
+ struct nfs4_sequence_res *res)
{
int ret;
struct rpc_task *task;
@@ -740,8 +673,6 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
.callback_data = &data
};
- if (privileged)
- task_setup.callback_ops = &nfs41_call_priv_sync_ops;
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
ret = PTR_ERR(task);
@@ -752,24 +683,18 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
return ret;
}
-int _nfs4_call_sync_session(struct rpc_clnt *clnt,
- struct nfs_server *server,
- struct rpc_message *msg,
- struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- int cache_reply)
-{
- nfs41_init_sequence(args, res, cache_reply);
- return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
-}
-
#else
-static inline
+static
void nfs41_init_sequence(struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res, int cache_reply)
{
}
+static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
+{
+}
+
+
static int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
@@ -777,18 +702,17 @@ static int nfs4_sequence_done(struct rpc_task *task,
}
#endif /* CONFIG_NFS_V4_1 */
+static
int _nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res,
- int cache_reply)
+ struct nfs4_sequence_res *res)
{
- nfs41_init_sequence(args, res, cache_reply);
return rpc_call_sync(clnt, msg, 0);
}
-static inline
+static
int nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
@@ -796,8 +720,9 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs4_sequence_res *res,
int cache_reply)
{
+ nfs41_init_sequence(args, res, cache_reply);
return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
- args, res, cache_reply);
+ args, res);
}
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
@@ -809,6 +734,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
dir->i_version = cinfo->after;
+ nfs_fscache_invalidate(dir);
spin_unlock(&dir->i_lock);
}
@@ -1445,13 +1371,6 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
nfs_inode_find_state_and_recover(state->inode,
stateid);
nfs4_schedule_stateid_recovery(server, state);
- case -EKEYEXPIRED:
- /*
- * User RPCSEC_GSS context has expired.
- * We cannot recover this stateid now, so
- * skip it and allow recovery thread to
- * proceed.
- */
case -ENOMEM:
err = 0;
goto out;
@@ -1574,20 +1493,12 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
&data->o_res.seq_res,
task) != 0)
nfs_release_seqid(data->o_arg.seqid);
- else
- rpc_call_start(task);
return;
unlock_no_action:
rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
-
-}
-
-static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
-{
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
- nfs4_open_prepare(task, calldata);
+ nfs4_sequence_done(task, &data->o_res.seq_res);
}
static void nfs4_open_done(struct rpc_task *task, void *calldata)
@@ -1648,12 +1559,6 @@ static const struct rpc_call_ops nfs4_open_ops = {
.rpc_release = nfs4_open_release,
};
-static const struct rpc_call_ops nfs4_recover_open_ops = {
- .rpc_call_prepare = nfs4_recover_open_prepare,
- .rpc_call_done = nfs4_open_done,
- .rpc_release = nfs4_open_release,
-};
-
static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
{
struct inode *dir = data->dir->d_inode;
@@ -1683,7 +1588,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
data->rpc_status = 0;
data->cancelled = 0;
if (isrecover)
- task_setup_data.callback_ops = &nfs4_recover_open_ops;
+ nfs4_set_sequence_privileged(&o_arg->seq_args);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1789,24 +1694,6 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
return 0;
}
-static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
-{
- unsigned int loop;
- int ret;
-
- for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
- ret = nfs4_wait_clnt_recover(clp);
- if (ret != 0)
- break;
- if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
- !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
- break;
- nfs4_schedule_state_manager(clp);
- ret = -EIO;
- }
- return ret;
-}
-
static int nfs4_recover_expired_lease(struct nfs_server *server)
{
return nfs4_client_recover_expired_lease(server->nfs_client);
@@ -2282,6 +2169,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
+ nfs4_sequence_done(task, &calldata->res.seq_res);
goto out;
}
@@ -2299,8 +2187,6 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
&calldata->res.seq_res,
task) != 0)
nfs_release_seqid(calldata->arg.seqid);
- else
- rpc_call_start(task);
out:
dprintk("%s: done!\n", __func__);
}
@@ -2533,7 +2419,8 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array));
- BUG_ON(len < 0);
+ if (len < 0)
+ return len;
for (i = 0; i < len; i++) {
/* AUTH_UNIX is the default flavor if none was specified,
@@ -3038,12 +2925,10 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
{
- if (nfs4_setup_sequence(NFS_SERVER(data->dir),
- &data->args.seq_args,
- &data->res.seq_res,
- task))
- return;
- rpc_call_start(task);
+ nfs4_setup_sequence(NFS_SERVER(data->dir),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task);
}
static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
@@ -3071,12 +2956,10 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
{
- if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
- &data->args.seq_args,
- &data->res.seq_res,
- task))
- return;
- rpc_call_start(task);
+ nfs4_setup_sequence(NFS_SERVER(data->old_dir),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task);
}
static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
@@ -3362,9 +3245,6 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
int mode = sattr->ia_mode;
int status = -ENOMEM;
- BUG_ON(!(sattr->ia_valid & ATTR_MODE));
- BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
-
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
if (data == NULL)
goto out;
@@ -3380,10 +3260,13 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
data->arg.ftype = NF4CHR;
data->arg.u.device.specdata1 = MAJOR(rdev);
data->arg.u.device.specdata2 = MINOR(rdev);
+ } else if (!S_ISSOCK(mode)) {
+ status = -EINVAL;
+ goto out_free;
}
status = nfs4_do_create(dir, dentry, data);
-
+out_free:
nfs4_free_createdata(data);
out:
return status;
@@ -3565,12 +3448,10 @@ static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message
static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
{
- if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
- &data->args.seq_args,
- &data->res.seq_res,
- task))
- return;
- rpc_call_start(task);
+ nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task);
}
static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
@@ -3631,22 +3512,18 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag
static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
{
- if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
- &data->args.seq_args,
- &data->res.seq_res,
- task))
- return;
- rpc_call_start(task);
+ nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task);
}
static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
{
- if (nfs4_setup_sequence(NFS_SERVER(data->inode),
- &data->args.seq_args,
- &data->res.seq_res,
- task))
- return;
- rpc_call_start(task);
+ nfs4_setup_sequence(NFS_SERVER(data->inode),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task);
}
static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
@@ -3937,8 +3814,13 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
goto out_free;
}
nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
- if (buf)
+ if (buf) {
+ if (res.acl_len > buflen) {
+ ret = -ERANGE;
+ goto out_free;
+ }
_copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
+ }
out_ok:
ret = res.acl_len;
out_free:
@@ -4085,7 +3967,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
- case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
return -EAGAIN;
@@ -4293,11 +4174,10 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
d_data = (struct nfs4_delegreturndata *)data;
- if (nfs4_setup_sequence(d_data->res.server,
- &d_data->args.seq_args,
- &d_data->res.seq_res, task))
- return;
- rpc_call_start(task);
+ nfs4_setup_sequence(d_data->res.server,
+ &d_data->args.seq_args,
+ &d_data->res.seq_res,
+ task);
}
#endif /* CONFIG_NFS_V4_1 */
@@ -4543,6 +4423,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
task->tk_action = NULL;
+ nfs4_sequence_done(task, &calldata->res.seq_res);
return;
}
calldata->timestamp = jiffies;
@@ -4551,8 +4432,6 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
&calldata->res.seq_res,
task) != 0)
nfs_release_seqid(calldata->arg.seqid);
- else
- rpc_call_start(task);
}
static const struct rpc_call_ops nfs4_locku_ops = {
@@ -4696,8 +4575,9 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
return;
/* Do we need to do an open_to_lock_owner? */
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
- if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
+ if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
goto out_release_lock_seqid;
+ }
data->arg.open_stateid = &state->stateid;
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
@@ -4707,20 +4587,12 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
if (nfs4_setup_sequence(data->server,
&data->arg.seq_args,
&data->res.seq_res,
- task) == 0) {
- rpc_call_start(task);
+ task) == 0)
return;
- }
nfs_release_seqid(data->arg.open_seqid);
out_release_lock_seqid:
nfs_release_seqid(data->arg.lock_seqid);
- dprintk("%s: done!, ret = %d\n", __func__, task->tk_status);
-}
-
-static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
-{
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
- nfs4_lock_prepare(task, calldata);
+ dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
@@ -4775,12 +4647,6 @@ static const struct rpc_call_ops nfs4_lock_ops = {
.rpc_release = nfs4_lock_release,
};
-static const struct rpc_call_ops nfs4_recover_lock_ops = {
- .rpc_call_prepare = nfs4_recover_lock_prepare,
- .rpc_call_done = nfs4_lock_done,
- .rpc_release = nfs4_lock_release,
-};
-
static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
{
switch (error) {
@@ -4823,15 +4689,15 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
return -ENOMEM;
if (IS_SETLKW(cmd))
data->arg.block = 1;
- if (recovery_type > NFS_LOCK_NEW) {
- if (recovery_type == NFS_LOCK_RECLAIM)
- data->arg.reclaim = NFS_LOCK_RECLAIM;
- task_setup_data.callback_ops = &nfs4_recover_lock_ops;
- }
nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
msg.rpc_argp = &data->arg;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
+ if (recovery_type > NFS_LOCK_NEW) {
+ if (recovery_type == NFS_LOCK_RECLAIM)
+ data->arg.reclaim = NFS_LOCK_RECLAIM;
+ nfs4_set_sequence_privileged(&data->arg.seq_args);
+ }
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -5100,15 +4966,6 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
nfs4_schedule_stateid_recovery(server, state);
err = 0;
goto out;
- case -EKEYEXPIRED:
- /*
- * User RPCSEC_GSS context has expired.
- * We cannot recover this stateid now, so
- * skip it and allow recovery thread to
- * proceed.
- */
- err = 0;
- goto out;
case -ENOMEM:
case -NFS4ERR_DENIED:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
@@ -5357,7 +5214,6 @@ int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred
};
dprintk("--> %s\n", __func__);
- BUG_ON(clp == NULL);
res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
if (unlikely(res.session == NULL)) {
@@ -5569,20 +5425,16 @@ struct nfs4_get_lease_time_data {
static void nfs4_get_lease_time_prepare(struct rpc_task *task,
void *calldata)
{
- int ret;
struct nfs4_get_lease_time_data *data =
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
/* just setup sequence, do not trigger session recovery
since we're invoked within one */
- ret = nfs41_setup_sequence(data->clp->cl_session,
- &data->args->la_seq_args,
- &data->res->lr_seq_res, task);
-
- BUG_ON(ret == -EAGAIN);
- rpc_call_start(task);
+ nfs41_setup_sequence(data->clp->cl_session,
+ &data->args->la_seq_args,
+ &data->res->lr_seq_res,
+ task);
dprintk("<-- %s\n", __func__);
}
@@ -5644,6 +5496,7 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
int status;
nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
+ nfs4_set_sequence_privileged(&args.la_seq_args);
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup);
@@ -5658,145 +5511,6 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
return status;
}
-static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
-{
- return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
-}
-
-static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
- struct nfs4_slot *new,
- u32 max_slots,
- u32 ivalue)
-{
- struct nfs4_slot *old = NULL;
- u32 i;
-
- spin_lock(&tbl->slot_tbl_lock);
- if (new) {
- old = tbl->slots;
- tbl->slots = new;
- tbl->max_slots = max_slots;
- }
- tbl->highest_used_slotid = NFS4_NO_SLOT;
- for (i = 0; i < tbl->max_slots; i++)
- tbl->slots[i].seq_nr = ivalue;
- spin_unlock(&tbl->slot_tbl_lock);
- kfree(old);
-}
-
-/*
- * (re)Initialise a slot table
- */
-static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
- u32 ivalue)
-{
- struct nfs4_slot *new = NULL;
- int ret = -ENOMEM;
-
- dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
- max_reqs, tbl->max_slots);
-
- /* Does the newly negotiated max_reqs match the existing slot table? */
- if (max_reqs != tbl->max_slots) {
- new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
- if (!new)
- goto out;
- }
- ret = 0;
-
- nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
- dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
- tbl, tbl->slots, tbl->max_slots);
-out:
- dprintk("<-- %s: return %d\n", __func__, ret);
- return ret;
-}
-
-/* Destroy the slot table */
-static void nfs4_destroy_slot_tables(struct nfs4_session *session)
-{
- if (session->fc_slot_table.slots != NULL) {
- kfree(session->fc_slot_table.slots);
- session->fc_slot_table.slots = NULL;
- }
- if (session->bc_slot_table.slots != NULL) {
- kfree(session->bc_slot_table.slots);
- session->bc_slot_table.slots = NULL;
- }
- return;
-}
-
-/*
- * Initialize or reset the forechannel and backchannel tables
- */
-static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
-{
- struct nfs4_slot_table *tbl;
- int status;
-
- dprintk("--> %s\n", __func__);
- /* Fore channel */
- tbl = &ses->fc_slot_table;
- status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
- if (status) /* -ENOMEM */
- return status;
- /* Back channel */
- tbl = &ses->bc_slot_table;
- status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
- if (status && tbl->slots == NULL)
- /* Fore and back channel share a connection so get
- * both slot tables or neither */
- nfs4_destroy_slot_tables(ses);
- return status;
-}
-
-struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
-{
- struct nfs4_session *session;
- struct nfs4_slot_table *tbl;
-
- session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
- if (!session)
- return NULL;
-
- tbl = &session->fc_slot_table;
- tbl->highest_used_slotid = NFS4_NO_SLOT;
- spin_lock_init(&tbl->slot_tbl_lock);
- rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
- init_completion(&tbl->complete);
-
- tbl = &session->bc_slot_table;
- tbl->highest_used_slotid = NFS4_NO_SLOT;
- spin_lock_init(&tbl->slot_tbl_lock);
- rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
- init_completion(&tbl->complete);
-
- session->session_state = 1<<NFS4_SESSION_INITING;
-
- session->clp = clp;
- return session;
-}
-
-void nfs4_destroy_session(struct nfs4_session *session)
-{
- struct rpc_xprt *xprt;
- struct rpc_cred *cred;
-
- cred = nfs4_get_exchange_id_cred(session->clp);
- nfs4_proc_destroy_session(session, cred);
- if (cred)
- put_rpccred(cred);
-
- rcu_read_lock();
- xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
- rcu_read_unlock();
- dprintk("%s Destroy backchannel for xprt %p\n",
- __func__, xprt);
- xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
- nfs4_destroy_slot_tables(session);
- kfree(session);
-}
-
/*
* Initialize the values to be used by the client in CREATE_SESSION
* If nfs4_init_session set the fore channel request and response sizes,
@@ -5809,8 +5523,8 @@ void nfs4_destroy_session(struct nfs4_session *session)
static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
{
struct nfs4_session *session = args->client->cl_session;
- unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
- mxresp_sz = session->fc_attrs.max_resp_sz;
+ unsigned int mxrqst_sz = session->fc_target_max_rqst_sz,
+ mxresp_sz = session->fc_target_max_resp_sz;
if (mxrqst_sz == 0)
mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
@@ -5919,10 +5633,9 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
- if (!status)
+ if (!status) {
/* Verify the session's negotiated channel_attrs values */
status = nfs4_verify_channel_attrs(&args, session);
- if (!status) {
/* Increment the clientid slot sequence id */
clp->cl_seqid++;
}
@@ -5992,83 +5705,6 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
}
/*
- * With sessions, the client is not marked ready until after a
- * successful EXCHANGE_ID and CREATE_SESSION.
- *
- * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
- * other versions of NFS can be tried.
- */
-static int nfs41_check_session_ready(struct nfs_client *clp)
-{
- int ret;
-
- if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
- ret = nfs4_client_recover_expired_lease(clp);
- if (ret)
- return ret;
- }
- if (clp->cl_cons_state < NFS_CS_READY)
- return -EPROTONOSUPPORT;
- smp_rmb();
- return 0;
-}
-
-int nfs4_init_session(struct nfs_server *server)
-{
- struct nfs_client *clp = server->nfs_client;
- struct nfs4_session *session;
- unsigned int rsize, wsize;
-
- if (!nfs4_has_session(clp))
- return 0;
-
- session = clp->cl_session;
- spin_lock(&clp->cl_lock);
- if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
-
- rsize = server->rsize;
- if (rsize == 0)
- rsize = NFS_MAX_FILE_IO_SIZE;
- wsize = server->wsize;
- if (wsize == 0)
- wsize = NFS_MAX_FILE_IO_SIZE;
-
- session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
- session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
- }
- spin_unlock(&clp->cl_lock);
-
- return nfs41_check_session_ready(clp);
-}
-
-int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
-{
- struct nfs4_session *session = clp->cl_session;
- int ret;
-
- spin_lock(&clp->cl_lock);
- if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
- /*
- * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
- * DS lease to be equal to the MDS lease.
- */
- clp->cl_lease_time = lease_time;
- clp->cl_last_renewal = jiffies;
- }
- spin_unlock(&clp->cl_lock);
-
- ret = nfs41_check_session_ready(clp);
- if (ret)
- return ret;
- /* Test for the DS role */
- if (!is_ds_client(clp))
- return -ENODEV;
- return 0;
-}
-EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
-
-
-/*
* Renew the cl_session lease.
*/
struct nfs4_sequence_data {
@@ -6133,9 +5769,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
args = task->tk_msg.rpc_argp;
res = task->tk_msg.rpc_resp;
- if (nfs41_setup_sequence(clp->cl_session, args, res, task))
- return;
- rpc_call_start(task);
+ nfs41_setup_sequence(clp->cl_session, args, res, task);
}
static const struct rpc_call_ops nfs41_sequence_ops = {
@@ -6144,7 +5778,9 @@ static const struct rpc_call_ops nfs41_sequence_ops = {
.rpc_release = nfs41_sequence_release,
};
-static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
+ struct rpc_cred *cred,
+ bool is_privileged)
{
struct nfs4_sequence_data *calldata;
struct rpc_message msg = {
@@ -6166,6 +5802,8 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
return ERR_PTR(-ENOMEM);
}
nfs41_init_sequence(&calldata->args, &calldata->res, 0);
+ if (is_privileged)
+ nfs4_set_sequence_privileged(&calldata->args);
msg.rpc_argp = &calldata->args;
msg.rpc_resp = &calldata->res;
calldata->clp = clp;
@@ -6181,7 +5819,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
return 0;
- task = _nfs41_proc_sequence(clp, cred);
+ task = _nfs41_proc_sequence(clp, cred, false);
if (IS_ERR(task))
ret = PTR_ERR(task);
else
@@ -6195,7 +5833,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
struct rpc_task *task;
int ret;
- task = _nfs41_proc_sequence(clp, cred);
+ task = _nfs41_proc_sequence(clp, cred, true);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
goto out;
@@ -6224,13 +5862,10 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
- if (nfs41_setup_sequence(calldata->clp->cl_session,
- &calldata->arg.seq_args,
- &calldata->res.seq_res, task))
- return;
-
- rpc_call_start(task);
+ nfs41_setup_sequence(calldata->clp->cl_session,
+ &calldata->arg.seq_args,
+ &calldata->res.seq_res,
+ task);
}
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
@@ -6307,6 +5942,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
calldata->arg.one_fs = 0;
nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
+ nfs4_set_sequence_privileged(&calldata->arg.seq_args);
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
@@ -6330,6 +5966,7 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct nfs4_session *session = nfs4_get_session(server);
dprintk("--> %s\n", __func__);
/* Note the is a race here, where a CB_LAYOUTRECALL can come in
@@ -6337,16 +5974,14 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
* However, that is not so catastrophic, and there seems
* to be no way to prevent it completely.
*/
- if (nfs4_setup_sequence(server, &lgp->args.seq_args,
+ if (nfs41_setup_sequence(session, &lgp->args.seq_args,
&lgp->res.seq_res, task))
return;
if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
NFS_I(lgp->args.inode)->layout,
lgp->args.ctx->state)) {
rpc_exit(task, NFS4_OK);
- return;
}
- rpc_call_start(task);
}
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
@@ -6359,7 +5994,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
dprintk("--> %s\n", __func__);
- if (!nfs4_sequence_done(task, &lgp->res.seq_res))
+ if (!nfs41_sequence_done(task, &lgp->res.seq_res))
goto out;
switch (task->tk_status) {
@@ -6510,10 +6145,10 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
- if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
- &lrp->res.seq_res, task))
- return;
- rpc_call_start(task);
+ nfs41_setup_sequence(lrp->clp->cl_session,
+ &lrp->args.seq_args,
+ &lrp->res.seq_res,
+ task);
}
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
@@ -6523,7 +6158,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
dprintk("--> %s\n", __func__);
- if (!nfs4_sequence_done(task, &lrp->res.seq_res))
+ if (!nfs41_sequence_done(task, &lrp->res.seq_res))
return;
server = NFS_SERVER(lrp->args.inode);
@@ -6672,11 +6307,12 @@ static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
+ struct nfs4_session *session = nfs4_get_session(server);
- if (nfs4_setup_sequence(server, &data->args.seq_args,
- &data->res.seq_res, task))
- return;
- rpc_call_start(task);
+ nfs41_setup_sequence(session,
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task);
}
static void
@@ -6685,7 +6321,7 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
struct nfs4_layoutcommit_data *data = calldata;
struct nfs_server *server = NFS_SERVER(data->args.inode);
- if (!nfs4_sequence_done(task, &data->res.seq_res))
+ if (!nfs41_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) { /* Just ignore these failures */
@@ -6873,7 +6509,9 @@ static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
dprintk("NFS call test_stateid %p\n", stateid);
nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
- status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
+ nfs4_set_sequence_privileged(&args.seq_args);
+ status = nfs4_call_sync_sequence(server->client, server, &msg,
+ &args.seq_args, &res.seq_res);
if (status != NFS_OK) {
dprintk("NFS reply test_stateid: failed, %d\n", status);
return status;
@@ -6920,8 +6558,9 @@ static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
dprintk("NFS call free_stateid %p\n", stateid);
nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
+ nfs4_set_sequence_privileged(&args.seq_args);
status = nfs4_call_sync_sequence(server->client, server, &msg,
- &args.seq_args, &res.seq_res, 1);
+ &args.seq_args, &res.seq_res);
dprintk("NFS reply free_stateid: %d\n", status);
return status;
}
@@ -7041,7 +6680,7 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
.minor_version = 1,
- .call_sync = _nfs4_call_sync_session,
+ .call_sync = nfs4_call_sync_sequence,
.match_stateid = nfs41_match_stateid,
.find_root_sec = nfs41_find_root_sec,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
new file mode 100644
index 00000000000..ebda5f4a031
--- /dev/null
+++ b/fs/nfs/nfs4session.c
@@ -0,0 +1,552 @@
+/*
+ * fs/nfs/nfs4session.c
+ *
+ * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/module.h>
+
+#include "nfs4_fs.h"
+#include "internal.h"
+#include "nfs4session.h"
+#include "callback.h"
+
+#define NFSDBG_FACILITY NFSDBG_STATE
+
+/*
+ * nfs4_shrink_slot_table - free retired slots from the slot table
+ */
+static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize)
+{
+ struct nfs4_slot **p;
+ if (newsize >= tbl->max_slots)
+ return;
+
+ p = &tbl->slots;
+ while (newsize--)
+ p = &(*p)->next;
+ while (*p) {
+ struct nfs4_slot *slot = *p;
+
+ *p = slot->next;
+ kfree(slot);
+ tbl->max_slots--;
+ }
+}
+
+/*
+ * nfs4_free_slot - free a slot and efficiently update slot table.
+ *
+ * freeing a slot is trivially done by clearing its respective bit
+ * in the bitmap.
+ * If the freed slotid equals highest_used_slotid we want to update it
+ * so that the server would be able to size down the slot table if needed,
+ * otherwise we know that the highest_used_slotid is still in use.
+ * When updating highest_used_slotid there may be "holes" in the bitmap
+ * so we need to scan down from highest_used_slotid to 0 looking for the now
+ * highest slotid in use.
+ * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
+ *
+ * Must be called while holding tbl->slot_tbl_lock
+ */
+void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
+{
+ u32 slotid = slot->slot_nr;
+
+ /* clear used bit in bitmap */
+ __clear_bit(slotid, tbl->used_slots);
+
+ /* update highest_used_slotid when it is freed */
+ if (slotid == tbl->highest_used_slotid) {
+ u32 new_max = find_last_bit(tbl->used_slots, slotid);
+ if (new_max < slotid)
+ tbl->highest_used_slotid = new_max;
+ else {
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ nfs4_session_drain_complete(tbl->session, tbl);
+ }
+ }
+ dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
+ slotid, tbl->highest_used_slotid);
+}
+
+static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl,
+ u32 slotid, u32 seq_init, gfp_t gfp_mask)
+{
+ struct nfs4_slot *slot;
+
+ slot = kzalloc(sizeof(*slot), gfp_mask);
+ if (slot) {
+ slot->table = tbl;
+ slot->slot_nr = slotid;
+ slot->seq_nr = seq_init;
+ }
+ return slot;
+}
+
+static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl,
+ u32 slotid, u32 seq_init, gfp_t gfp_mask)
+{
+ struct nfs4_slot **p, *slot;
+
+ p = &tbl->slots;
+ for (;;) {
+ if (*p == NULL) {
+ *p = nfs4_new_slot(tbl, tbl->max_slots,
+ seq_init, gfp_mask);
+ if (*p == NULL)
+ break;
+ tbl->max_slots++;
+ }
+ slot = *p;
+ if (slot->slot_nr == slotid)
+ return slot;
+ p = &slot->next;
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * nfs4_alloc_slot - efficiently look for a free slot
+ *
+ * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
+ * If found, we mark the slot as used, update the highest_used_slotid,
+ * and respectively set up the sequence operation args.
+ *
+ * Note: must be called with under the slot_tbl_lock.
+ */
+struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
+{
+ struct nfs4_slot *ret = ERR_PTR(-EBUSY);
+ u32 slotid;
+
+ dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid,
+ tbl->max_slotid + 1);
+ slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
+ if (slotid > tbl->max_slotid)
+ goto out;
+ ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
+ if (IS_ERR(ret))
+ goto out;
+ __set_bit(slotid, tbl->used_slots);
+ if (slotid > tbl->highest_used_slotid ||
+ tbl->highest_used_slotid == NFS4_NO_SLOT)
+ tbl->highest_used_slotid = slotid;
+ ret->generation = tbl->generation;
+
+out:
+ dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid,
+ !IS_ERR(ret) ? ret->slot_nr : -1);
+ return ret;
+}
+
+static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
+ u32 max_reqs, u32 ivalue)
+{
+ if (max_reqs <= tbl->max_slots)
+ return 0;
+ if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)))
+ return 0;
+ return -ENOMEM;
+}
+
+static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
+ u32 server_highest_slotid,
+ u32 ivalue)
+{
+ struct nfs4_slot **p;
+
+ nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
+ p = &tbl->slots;
+ while (*p) {
+ (*p)->seq_nr = ivalue;
+ (*p)->interrupted = 0;
+ p = &(*p)->next;
+ }
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ tbl->target_highest_slotid = server_highest_slotid;
+ tbl->server_highest_slotid = server_highest_slotid;
+ tbl->d_target_highest_slotid = 0;
+ tbl->d2_target_highest_slotid = 0;
+ tbl->max_slotid = server_highest_slotid;
+}
+
+/*
+ * (re)Initialise a slot table
+ */
+static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
+ u32 max_reqs, u32 ivalue)
+{
+ int ret;
+
+ dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
+ max_reqs, tbl->max_slots);
+
+ if (max_reqs > NFS4_MAX_SLOT_TABLE)
+ max_reqs = NFS4_MAX_SLOT_TABLE;
+
+ ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
+ if (ret)
+ goto out;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
+ spin_unlock(&tbl->slot_tbl_lock);
+
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+}
+
+/* Destroy the slot table */
+static void nfs4_destroy_slot_tables(struct nfs4_session *session)
+{
+ nfs4_shrink_slot_table(&session->fc_slot_table, 0);
+ nfs4_shrink_slot_table(&session->bc_slot_table, 0);
+}
+
+static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
+{
+ struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
+ struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
+ struct nfs4_slot *slot = pslot;
+ struct nfs4_slot_table *tbl = slot->table;
+
+ if (nfs4_session_draining(tbl->session) && !args->sa_privileged)
+ return false;
+ slot->generation = tbl->generation;
+ args->sa_slot = slot;
+ res->sr_timestamp = jiffies;
+ res->sr_slot = slot;
+ res->sr_status_flags = 0;
+ res->sr_status = 1;
+ return true;
+}
+
+static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot)
+{
+ if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
+ return true;
+ return false;
+}
+
+bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot)
+{
+ if (slot->slot_nr > tbl->max_slotid)
+ return false;
+ return __nfs41_wake_and_assign_slot(tbl, slot);
+}
+
+static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
+{
+ struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
+ if (!IS_ERR(slot)) {
+ bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
+ if (ret)
+ return ret;
+ nfs4_free_slot(tbl, slot);
+ }
+ return false;
+}
+
+void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
+{
+ for (;;) {
+ if (!nfs41_try_wake_next_slot_table_entry(tbl))
+ break;
+ }
+}
+
+static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
+ u32 target_highest_slotid)
+{
+ u32 max_slotid;
+
+ max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, target_highest_slotid);
+ if (max_slotid > tbl->server_highest_slotid)
+ max_slotid = tbl->server_highest_slotid;
+ if (max_slotid > tbl->target_highest_slotid)
+ max_slotid = tbl->target_highest_slotid;
+ tbl->max_slotid = max_slotid;
+ nfs41_wake_slot_table(tbl);
+}
+
+/* Update the client's idea of target_highest_slotid */
+static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
+ u32 target_highest_slotid)
+{
+ if (tbl->target_highest_slotid == target_highest_slotid)
+ return;
+ tbl->target_highest_slotid = target_highest_slotid;
+ tbl->generation++;
+}
+
+void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
+ u32 target_highest_slotid)
+{
+ spin_lock(&tbl->slot_tbl_lock);
+ nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
+ tbl->d_target_highest_slotid = 0;
+ tbl->d2_target_highest_slotid = 0;
+ nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
+ spin_unlock(&tbl->slot_tbl_lock);
+}
+
+static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
+ u32 highest_slotid)
+{
+ if (tbl->server_highest_slotid == highest_slotid)
+ return;
+ if (tbl->highest_used_slotid > highest_slotid)
+ return;
+ /* Deallocate slots */
+ nfs4_shrink_slot_table(tbl, highest_slotid + 1);
+ tbl->server_highest_slotid = highest_slotid;
+}
+
+static s32 nfs41_derivative_target_slotid(s32 s1, s32 s2)
+{
+ s1 -= s2;
+ if (s1 == 0)
+ return 0;
+ if (s1 < 0)
+ return (s1 - 1) >> 1;
+ return (s1 + 1) >> 1;
+}
+
+static int nfs41_sign_s32(s32 s1)
+{
+ if (s1 > 0)
+ return 1;
+ if (s1 < 0)
+ return -1;
+ return 0;
+}
+
+static bool nfs41_same_sign_or_zero_s32(s32 s1, s32 s2)
+{
+ if (!s1 || !s2)
+ return true;
+ return nfs41_sign_s32(s1) == nfs41_sign_s32(s2);
+}
+
+/* Try to eliminate outliers by checking for sharp changes in the
+ * derivatives and second derivatives
+ */
+static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl,
+ u32 new_target)
+{
+ s32 d_target, d2_target;
+ bool ret = true;
+
+ d_target = nfs41_derivative_target_slotid(new_target,
+ tbl->target_highest_slotid);
+ d2_target = nfs41_derivative_target_slotid(d_target,
+ tbl->d_target_highest_slotid);
+ /* Is first derivative same sign? */
+ if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid))
+ ret = false;
+ /* Is second derivative same sign? */
+ if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid))
+ ret = false;
+ tbl->d_target_highest_slotid = d_target;
+ tbl->d2_target_highest_slotid = d2_target;
+ return ret;
+}
+
+void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot,
+ struct nfs4_sequence_res *res)
+{
+ spin_lock(&tbl->slot_tbl_lock);
+ if (!nfs41_is_outlier_target_slotid(tbl, res->sr_target_highest_slotid))
+ nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid);
+ if (tbl->generation == slot->generation)
+ nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid);
+ nfs41_set_max_slotid_locked(tbl, res->sr_target_highest_slotid);
+ spin_unlock(&tbl->slot_tbl_lock);
+}
+
+/*
+ * Initialize or reset the forechannel and backchannel tables
+ */
+int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
+{
+ struct nfs4_slot_table *tbl;
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ /* Fore channel */
+ tbl = &ses->fc_slot_table;
+ tbl->session = ses;
+ status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
+ if (status) /* -ENOMEM */
+ return status;
+ /* Back channel */
+ tbl = &ses->bc_slot_table;
+ tbl->session = ses;
+ status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
+ if (status && tbl->slots == NULL)
+ /* Fore and back channel share a connection so get
+ * both slot tables or neither */
+ nfs4_destroy_slot_tables(ses);
+ return status;
+}
+
+struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session;
+ struct nfs4_slot_table *tbl;
+
+ session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
+ if (!session)
+ return NULL;
+
+ tbl = &session->fc_slot_table;
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+ init_completion(&tbl->complete);
+
+ tbl = &session->bc_slot_table;
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+ init_completion(&tbl->complete);
+
+ session->session_state = 1<<NFS4_SESSION_INITING;
+
+ session->clp = clp;
+ return session;
+}
+
+void nfs4_destroy_session(struct nfs4_session *session)
+{
+ struct rpc_xprt *xprt;
+ struct rpc_cred *cred;
+
+ cred = nfs4_get_exchange_id_cred(session->clp);
+ nfs4_proc_destroy_session(session, cred);
+ if (cred)
+ put_rpccred(cred);
+
+ rcu_read_lock();
+ xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
+ rcu_read_unlock();
+ dprintk("%s Destroy backchannel for xprt %p\n",
+ __func__, xprt);
+ xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
+ nfs4_destroy_slot_tables(session);
+ kfree(session);
+}
+
+/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+static int nfs41_check_session_ready(struct nfs_client *clp)
+{
+ int ret;
+
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+ ret = nfs4_client_recover_expired_lease(clp);
+ if (ret)
+ return ret;
+ }
+ if (clp->cl_cons_state < NFS_CS_READY)
+ return -EPROTONOSUPPORT;
+ smp_rmb();
+ return 0;
+}
+
+int nfs4_init_session(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_session *session;
+ unsigned int target_max_rqst_sz = NFS_MAX_FILE_IO_SIZE;
+ unsigned int target_max_resp_sz = NFS_MAX_FILE_IO_SIZE;
+
+ if (!nfs4_has_session(clp))
+ return 0;
+
+ if (server->rsize != 0)
+ target_max_resp_sz = server->rsize;
+ target_max_resp_sz += nfs41_maxread_overhead;
+
+ if (server->wsize != 0)
+ target_max_rqst_sz = server->wsize;
+ target_max_rqst_sz += nfs41_maxwrite_overhead;
+
+ session = clp->cl_session;
+ spin_lock(&clp->cl_lock);
+ if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
+ /* Initialise targets and channel attributes */
+ session->fc_target_max_rqst_sz = target_max_rqst_sz;
+ session->fc_attrs.max_rqst_sz = target_max_rqst_sz;
+ session->fc_target_max_resp_sz = target_max_resp_sz;
+ session->fc_attrs.max_resp_sz = target_max_resp_sz;
+ } else {
+ /* Just adjust the targets */
+ if (target_max_rqst_sz > session->fc_target_max_rqst_sz) {
+ session->fc_target_max_rqst_sz = target_max_rqst_sz;
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ }
+ if (target_max_resp_sz > session->fc_target_max_resp_sz) {
+ session->fc_target_max_resp_sz = target_max_resp_sz;
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ }
+ }
+ spin_unlock(&clp->cl_lock);
+
+ if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
+ nfs4_schedule_lease_recovery(clp);
+
+ return nfs41_check_session_ready(clp);
+}
+
+int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
+{
+ struct nfs4_session *session = clp->cl_session;
+ int ret;
+
+ spin_lock(&clp->cl_lock);
+ if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
+ /*
+ * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
+ * DS lease to be equal to the MDS lease.
+ */
+ clp->cl_lease_time = lease_time;
+ clp->cl_last_renewal = jiffies;
+ }
+ spin_unlock(&clp->cl_lock);
+
+ ret = nfs41_check_session_ready(clp);
+ if (ret)
+ return ret;
+ /* Test for the DS role */
+ if (!is_ds_client(clp))
+ return -ENODEV;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
+
+
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
new file mode 100644
index 00000000000..6f3cb39386d
--- /dev/null
+++ b/fs/nfs/nfs4session.h
@@ -0,0 +1,142 @@
+/*
+ * fs/nfs/nfs4session.h
+ *
+ * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
+ *
+ */
+#ifndef __LINUX_FS_NFS_NFS4SESSION_H
+#define __LINUX_FS_NFS_NFS4SESSION_H
+
+/* maximum number of slots to use */
+#define NFS4_DEF_SLOT_TABLE_SIZE (16U)
+#define NFS4_MAX_SLOT_TABLE (1024U)
+#define NFS4_NO_SLOT ((u32)-1)
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+
+/* Sessions slot seqid */
+struct nfs4_slot {
+ struct nfs4_slot_table *table;
+ struct nfs4_slot *next;
+ unsigned long generation;
+ u32 slot_nr;
+ u32 seq_nr;
+ unsigned int interrupted : 1;
+};
+
+/* Sessions */
+#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
+struct nfs4_slot_table {
+ struct nfs4_session *session; /* Parent session */
+ struct nfs4_slot *slots; /* seqid per slot */
+ unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
+ spinlock_t slot_tbl_lock;
+ struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
+ u32 max_slots; /* # slots in table */
+ u32 max_slotid; /* Max allowed slotid value */
+ u32 highest_used_slotid; /* sent to server on each SEQ.
+ * op for dynamic resizing */
+ u32 target_highest_slotid; /* Server max_slot target */
+ u32 server_highest_slotid; /* Server highest slotid */
+ s32 d_target_highest_slotid; /* Derivative */
+ s32 d2_target_highest_slotid; /* 2nd derivative */
+ unsigned long generation; /* Generation counter for
+ target_highest_slotid */
+ struct completion complete;
+};
+
+/*
+ * Session related parameters
+ */
+struct nfs4_session {
+ struct nfs4_sessionid sess_id;
+ u32 flags;
+ unsigned long session_state;
+ u32 hash_alg;
+ u32 ssv_len;
+
+ /* The fore and back channel */
+ struct nfs4_channel_attrs fc_attrs;
+ struct nfs4_slot_table fc_slot_table;
+ struct nfs4_channel_attrs bc_attrs;
+ struct nfs4_slot_table bc_slot_table;
+ struct nfs_client *clp;
+ /* Create session arguments */
+ unsigned int fc_target_max_rqst_sz;
+ unsigned int fc_target_max_resp_sz;
+};
+
+enum nfs4_session_state {
+ NFS4_SESSION_INITING,
+ NFS4_SESSION_DRAINING,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
+extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
+
+extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
+ u32 target_highest_slotid);
+extern void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot,
+ struct nfs4_sequence_res *res);
+
+extern int nfs4_setup_session_slot_tables(struct nfs4_session *ses);
+
+extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
+extern void nfs4_destroy_session(struct nfs4_session *session);
+extern int nfs4_init_session(struct nfs_server *server);
+extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
+
+extern void nfs4_session_drain_complete(struct nfs4_session *session,
+ struct nfs4_slot_table *tbl);
+
+static inline bool nfs4_session_draining(struct nfs4_session *session)
+{
+ return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state);
+}
+
+bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot);
+void nfs41_wake_slot_table(struct nfs4_slot_table *tbl);
+
+/*
+ * Determine if sessions are in use.
+ */
+static inline int nfs4_has_session(const struct nfs_client *clp)
+{
+ if (clp->cl_session)
+ return 1;
+ return 0;
+}
+
+static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
+{
+ if (nfs4_has_session(clp))
+ return (clp->cl_session->flags & SESSION4_PERSIST);
+ return 0;
+}
+
+#else /* defined(CONFIG_NFS_V4_1) */
+
+static inline int nfs4_init_session(struct nfs_server *server)
+{
+ return 0;
+}
+
+/*
+ * Determine if sessions are in use.
+ */
+static inline int nfs4_has_session(const struct nfs_client *clp)
+{
+ return 0;
+}
+
+static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
+{
+ return 0;
+}
+
+#endif /* defined(CONFIG_NFS_V4_1) */
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+#endif /* __LINUX_FS_NFS_NFS4SESSION_H */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index c351e6b3983..9448c579d41 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -57,6 +57,7 @@
#include "callback.h"
#include "delegation.h"
#include "internal.h"
+#include "nfs4session.h"
#include "pnfs.h"
#include "netns.h"
@@ -66,7 +67,6 @@
const nfs4_stateid zero_stateid;
static DEFINE_MUTEX(nfs_clid_init_mutex);
-static LIST_HEAD(nfs4_clientid_list);
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
@@ -254,24 +254,27 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
{
struct nfs4_session *ses = clp->cl_session;
struct nfs4_slot_table *tbl;
- int max_slots;
if (ses == NULL)
return;
tbl = &ses->fc_slot_table;
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
spin_lock(&tbl->slot_tbl_lock);
- max_slots = tbl->max_slots;
- while (max_slots--) {
- if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
- nfs4_set_task_privileged,
- NULL) == NULL)
- break;
- }
+ nfs41_wake_slot_table(tbl);
spin_unlock(&tbl->slot_tbl_lock);
}
}
+/*
+ * Signal state manager thread if session fore channel is drained
+ */
+void nfs4_session_drain_complete(struct nfs4_session *session,
+ struct nfs4_slot_table *tbl)
+{
+ if (nfs4_session_draining(session))
+ complete(&tbl->complete);
+}
+
static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
{
spin_lock(&tbl->slot_tbl_lock);
@@ -303,7 +306,6 @@ static void nfs41_finish_session_reset(struct nfs_client *clp)
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* create_session negotiated new slot table */
- clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
nfs41_setup_state_renewal(clp);
}
@@ -1086,7 +1088,6 @@ void nfs_free_seqid(struct nfs_seqid *seqid)
*/
static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
{
- BUG_ON(list_first_entry(&seqid->sequence->list, struct nfs_seqid, list) != seqid);
switch (status) {
case 0:
break;
@@ -1209,6 +1210,40 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
}
EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
+int nfs4_wait_clnt_recover(struct nfs_client *clp)
+{
+ int res;
+
+ might_sleep();
+
+ res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+ if (res)
+ return res;
+
+ if (clp->cl_cons_state < 0)
+ return clp->cl_cons_state;
+ return 0;
+}
+
+int nfs4_client_recover_expired_lease(struct nfs_client *clp)
+{
+ unsigned int loop;
+ int ret;
+
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret != 0)
+ break;
+ if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
+ !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
+ break;
+ nfs4_schedule_state_manager(clp);
+ ret = -EIO;
+ }
+ return ret;
+}
+
/*
* nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
* @clp: client to process
@@ -1401,14 +1436,6 @@ restart:
/* Mark the file as being 'closed' */
state->state = 0;
break;
- case -EKEYEXPIRED:
- /*
- * User RPCSEC_GSS context has expired.
- * We cannot recover this stateid now, so
- * skip it and allow recovery thread to
- * proceed.
- */
- break;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_BAD_STATEID:
@@ -1561,14 +1588,6 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
}
-static void nfs4_warn_keyexpired(const char *s)
-{
- printk_ratelimited(KERN_WARNING "Error: state manager"
- " encountered RPCSEC_GSS session"
- " expired against NFSv4 server %s.\n",
- s);
-}
-
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
@@ -1602,10 +1621,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
break;
- case -EKEYEXPIRED:
- /* Nothing we can do */
- nfs4_warn_keyexpired(clp->cl_hostname);
- break;
default:
dprintk("%s: failed to handle error %d for server %s\n",
__func__, error, clp->cl_hostname);
@@ -1722,8 +1737,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
dprintk("%s: exit with error %d for server %s\n",
__func__, -EPROTONOSUPPORT, clp->cl_hostname);
return -EPROTONOSUPPORT;
- case -EKEYEXPIRED:
- nfs4_warn_keyexpired(clp->cl_hostname);
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
* in nfs4_exchange_id */
default:
@@ -1876,7 +1889,6 @@ again:
break;
case -EKEYEXPIRED:
- nfs4_warn_keyexpired(clp->cl_hostname);
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
* in nfs4_exchange_id */
status = -EKEYEXPIRED;
@@ -1907,14 +1919,23 @@ void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
}
EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
-void nfs41_handle_recall_slot(struct nfs_client *clp)
+static void nfs41_ping_server(struct nfs_client *clp)
{
- set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
- dprintk("%s: scheduling slot recall for server %s\n", __func__,
- clp->cl_hostname);
+ /* Use CHECK_LEASE to ping the server with a SEQUENCE */
+ set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
nfs4_schedule_state_manager(clp);
}
+void nfs41_server_notify_target_slotid_update(struct nfs_client *clp)
+{
+ nfs41_ping_server(clp);
+}
+
+void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp)
+{
+ nfs41_ping_server(clp);
+}
+
static void nfs4_reset_all_state(struct nfs_client *clp)
{
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
@@ -2024,35 +2045,6 @@ out:
return status;
}
-static int nfs4_recall_slot(struct nfs_client *clp)
-{
- struct nfs4_slot_table *fc_tbl;
- struct nfs4_slot *new, *old;
- int i;
-
- if (!nfs4_has_session(clp))
- return 0;
- nfs4_begin_drain_session(clp);
- fc_tbl = &clp->cl_session->fc_slot_table;
- new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
- GFP_NOFS);
- if (!new)
- return -ENOMEM;
-
- spin_lock(&fc_tbl->slot_tbl_lock);
- for (i = 0; i < fc_tbl->target_max_slots; i++)
- new[i].seq_nr = fc_tbl->slots[i].seq_nr;
- old = fc_tbl->slots;
- fc_tbl->slots = new;
- fc_tbl->max_slots = fc_tbl->target_max_slots;
- fc_tbl->target_max_slots = 0;
- clp->cl_session->fc_attrs.max_reqs = fc_tbl->max_slots;
- spin_unlock(&fc_tbl->slot_tbl_lock);
-
- kfree(old);
- return 0;
-}
-
static int nfs4_bind_conn_to_session(struct nfs_client *clp)
{
struct rpc_cred *cred;
@@ -2083,7 +2075,6 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
#else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
-static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
static int nfs4_bind_conn_to_session(struct nfs_client *clp)
{
@@ -2115,15 +2106,6 @@ static void nfs4_state_manager(struct nfs_client *clp)
continue;
}
- if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
- section = "check lease";
- status = nfs4_check_lease(clp);
- if (status < 0)
- goto out_error;
- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
- continue;
- }
-
/* Initialize or reset the session */
if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
section = "reset session";
@@ -2144,10 +2126,9 @@ static void nfs4_state_manager(struct nfs_client *clp)
continue;
}
- /* Recall session slots */
- if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)) {
- section = "recall slot";
- status = nfs4_recall_slot(clp);
+ if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
+ section = "check lease";
+ status = nfs4_check_lease(clp);
if (status < 0)
goto out_error;
continue;
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index bd61221ad2c..84d2e9e2f31 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -51,6 +51,7 @@ static const struct super_operations nfs4_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs4_write_inode,
+ .drop_inode = nfs_drop_inode,
.put_super = nfs_put_super,
.statfs = nfs_statfs,
.evict_inode = nfs4_evict_inode,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 40836ee5dc3..26b14392043 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -56,6 +56,7 @@
#include "nfs4_fs.h"
#include "internal.h"
+#include "nfs4session.h"
#include "pnfs.h"
#include "netns.h"
@@ -270,6 +271,8 @@ static int nfs4_stat_to_errno(int);
#if defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MACHINE_NAME_LEN (64)
+#define IMPL_NAME_LIMIT (sizeof(utsname()->sysname) + sizeof(utsname()->release) + \
+ sizeof(utsname()->version) + sizeof(utsname()->machine) + 8)
#define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \
encode_verifier_maxsz + \
@@ -282,7 +285,7 @@ static int nfs4_stat_to_errno(int);
1 /* nii_domain */ + \
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
1 /* nii_name */ + \
- XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
+ XDR_QUADLEN(IMPL_NAME_LIMIT) + \
3 /* nii_date */)
#define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \
2 /* eir_clientid */ + \
@@ -936,7 +939,7 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
* but this is not required as a MUST for the server to do so. */
hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen;
- BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
+ WARN_ON_ONCE(hdr->taglen > NFS4_MAXTAGLEN);
encode_string(xdr, hdr->taglen, hdr->tag);
p = reserve_space(xdr, 8);
*p++ = cpu_to_be32(hdr->minorversion);
@@ -955,7 +958,7 @@ static void encode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 op,
static void encode_nops(struct compound_hdr *hdr)
{
- BUG_ON(hdr->nops > NFS4_MAX_OPS);
+ WARN_ON_ONCE(hdr->nops > NFS4_MAX_OPS);
*hdr->nops_p = htonl(hdr->nops);
}
@@ -1403,7 +1406,6 @@ static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *a
*p = cpu_to_be32(NFS4_OPEN_NOCREATE);
break;
default:
- BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
*p = cpu_to_be32(NFS4_OPEN_CREATE);
encode_createmode(xdr, arg);
}
@@ -1621,7 +1623,6 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun
p = reserve_space(xdr, 2*4);
*p++ = cpu_to_be32(1);
*p = cpu_to_be32(FATTR4_WORD0_ACL);
- BUG_ON(arg->acl_len % 4);
p = reserve_space(xdr, 4);
*p = cpu_to_be32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
@@ -1713,7 +1714,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
__be32 *p;
- char impl_name[NFS4_OPAQUE_LIMIT];
+ char impl_name[IMPL_NAME_LIMIT];
int len = 0;
encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
@@ -1728,7 +1729,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
if (send_implementation_id &&
sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN) > 1 &&
sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN)
- <= NFS4_OPAQUE_LIMIT + 1)
+ <= sizeof(impl_name) + 1)
len = snprintf(impl_name, sizeof(impl_name), "%s %s %s %s",
utsname()->sysname, utsname()->release,
utsname()->version, utsname()->machine);
@@ -1835,18 +1836,16 @@ static void encode_sequence(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
#if defined(CONFIG_NFS_V4_1)
- struct nfs4_session *session = args->sa_session;
+ struct nfs4_session *session;
struct nfs4_slot_table *tp;
- struct nfs4_slot *slot;
+ struct nfs4_slot *slot = args->sa_slot;
__be32 *p;
- if (!session)
+ if (slot == NULL)
return;
- tp = &session->fc_slot_table;
-
- WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
- slot = tp->slots + args->sa_slotid;
+ tp = slot->table;
+ session = tp->session;
encode_op_hdr(xdr, OP_SEQUENCE, decode_sequence_maxsz, hdr);
@@ -1860,12 +1859,12 @@ static void encode_sequence(struct xdr_stream *xdr,
((u32 *)session->sess_id.data)[1],
((u32 *)session->sess_id.data)[2],
((u32 *)session->sess_id.data)[3],
- slot->seq_nr, args->sa_slotid,
+ slot->seq_nr, slot->slot_nr,
tp->highest_used_slotid, args->sa_cache_this);
p = reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 16);
p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
*p++ = cpu_to_be32(slot->seq_nr);
- *p++ = cpu_to_be32(args->sa_slotid);
+ *p++ = cpu_to_be32(slot->slot_nr);
*p++ = cpu_to_be32(tp->highest_used_slotid);
*p = cpu_to_be32(args->sa_cache_this);
#endif /* CONFIG_NFS_V4_1 */
@@ -2027,8 +2026,9 @@ static void encode_free_stateid(struct xdr_stream *xdr,
static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
{
#if defined(CONFIG_NFS_V4_1)
- if (args->sa_session)
- return args->sa_session->clp->cl_mvops->minor_version;
+
+ if (args->sa_slot)
+ return args->sa_slot->table->session->clp->cl_mvops->minor_version;
#endif /* CONFIG_NFS_V4_1 */
return 0;
}
@@ -5509,12 +5509,13 @@ static int decode_sequence(struct xdr_stream *xdr,
struct rpc_rqst *rqstp)
{
#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_session *session;
struct nfs4_sessionid id;
u32 dummy;
int status;
__be32 *p;
- if (!res->sr_session)
+ if (res->sr_slot == NULL)
return 0;
status = decode_op_hdr(xdr, OP_SEQUENCE);
@@ -5528,8 +5529,9 @@ static int decode_sequence(struct xdr_stream *xdr,
* sequence number, the server is looney tunes.
*/
status = -EREMOTEIO;
+ session = res->sr_slot->table->session;
- if (memcmp(id.data, res->sr_session->sess_id.data,
+ if (memcmp(id.data, session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
dprintk("%s Invalid session id\n", __func__);
goto out_err;
@@ -5547,14 +5549,14 @@ static int decode_sequence(struct xdr_stream *xdr,
}
/* slot id */
dummy = be32_to_cpup(p++);
- if (dummy != res->sr_slot - res->sr_session->fc_slot_table.slots) {
+ if (dummy != res->sr_slot->slot_nr) {
dprintk("%s Invalid slot id\n", __func__);
goto out_err;
}
- /* highest slot id - currently not processed */
- dummy = be32_to_cpup(p++);
- /* target highest slot id - currently not processed */
- dummy = be32_to_cpup(p++);
+ /* highest slot id */
+ res->sr_highest_slotid = be32_to_cpup(p++);
+ /* target highest slot id */
+ res->sr_target_highest_slotid = be32_to_cpup(p++);
/* result flags */
res->sr_status_flags = be32_to_cpup(p);
status = 0;
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 87461354530..a9ebd817278 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -148,17 +148,6 @@ end_offset(u64 start, u64 len)
return end >= start ? end : NFS4_MAX_UINT64;
}
-/* last octet in a range */
-static inline u64
-last_byte_offset(u64 start, u64 len)
-{
- u64 end;
-
- BUG_ON(!len);
- end = start + len;
- return end > start ? end - 1 : NFS4_MAX_UINT64;
-}
-
static void _fix_verify_io_params(struct pnfs_layout_segment *lseg,
struct page ***p_pages, unsigned *p_pgbase,
u64 offset, unsigned long count)
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2878f97bd78..e7165d91536 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -369,17 +369,6 @@ end_offset(u64 start, u64 len)
return end >= start ? end : NFS4_MAX_UINT64;
}
-/* last octet in a range */
-static inline u64
-last_byte_offset(u64 start, u64 len)
-{
- u64 end;
-
- BUG_ON(!len);
- end = start + len;
- return end > start ? end - 1 : NFS4_MAX_UINT64;
-}
-
/*
* is l2 fully contained in l1?
* start1 end1
@@ -645,7 +634,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
dprintk("--> %s\n", __func__);
- BUG_ON(ctx == NULL);
lgp = kzalloc(sizeof(*lgp), gfp_flags);
if (lgp == NULL)
return NULL;
@@ -1126,7 +1114,6 @@ pnfs_update_layout(struct inode *ino,
* chance of a CB_LAYOUTRECALL(FILE) coming in.
*/
spin_lock(&clp->cl_lock);
- BUG_ON(!list_empty(&lo->plh_layouts));
list_add_tail(&lo->plh_layouts, &server->layouts);
spin_unlock(&clp->cl_lock);
}
@@ -1222,7 +1209,7 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
{
u64 rd_size = req->wb_bytes;
- BUG_ON(pgio->pg_lseg != NULL);
+ WARN_ON_ONCE(pgio->pg_lseg != NULL);
if (req->wb_offset != req->wb_pgbase) {
nfs_pageio_reset_read_mds(pgio);
@@ -1251,7 +1238,7 @@ void
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req, u64 wb_size)
{
- BUG_ON(pgio->pg_lseg != NULL);
+ WARN_ON_ONCE(pgio->pg_lseg != NULL);
if (req->wb_offset != req->wb_pgbase) {
nfs_pageio_reset_write_mds(pgio);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 50a88c3546e..f084dac948e 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -47,39 +47,6 @@
#define NFSDBG_FACILITY NFSDBG_PROC
/*
- * wrapper to handle the -EKEYEXPIRED error message. This should generally
- * only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't
- * support the NFSERR_JUKEBOX error code, but we handle this situation in the
- * same way that we handle that error with NFSv3.
- */
-static int
-nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
-{
- int res;
- do {
- res = rpc_call_sync(clnt, msg, flags);
- if (res != -EKEYEXPIRED)
- break;
- freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
- res = -ERESTARTSYS;
- } while (!fatal_signal_pending(current));
- return res;
-}
-
-#define rpc_call_sync(clnt, msg, flags) nfs_rpc_wrapper(clnt, msg, flags)
-
-static int
-nfs_async_handle_expired_key(struct rpc_task *task)
-{
- if (task->tk_status != -EKEYEXPIRED)
- return 0;
- task->tk_status = 0;
- rpc_restart_call(task);
- rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
- return 1;
-}
-
-/*
* Bare-bones access to getattr: this is for nfs_read_super.
*/
static int
@@ -364,8 +331,6 @@ static void nfs_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlink
static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
- if (nfs_async_handle_expired_key(task))
- return 0;
nfs_mark_for_revalidate(dir);
return 1;
}
@@ -385,8 +350,6 @@ static int
nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
struct inode *new_dir)
{
- if (nfs_async_handle_expired_key(task))
- return 0;
nfs_mark_for_revalidate(old_dir);
nfs_mark_for_revalidate(new_dir);
return 1;
@@ -642,9 +605,6 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
struct inode *inode = data->header->inode;
- if (nfs_async_handle_expired_key(task))
- return -EAGAIN;
-
nfs_invalidate_atime(inode);
if (task->tk_status >= 0) {
nfs_refresh_inode(inode, data->res.fattr);
@@ -671,9 +631,6 @@ static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->header->inode;
- if (nfs_async_handle_expired_key(task))
- return -EAGAIN;
-
if (task->tk_status >= 0)
nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
return 0;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 652d3f7176a..c25cadf8f8c 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -64,6 +64,7 @@
#include "iostat.h"
#include "internal.h"
#include "fscache.h"
+#include "nfs4session.h"
#include "pnfs.h"
#include "nfs.h"
@@ -307,6 +308,7 @@ const struct super_operations nfs_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
+ .drop_inode = nfs_drop_inode,
.put_super = nfs_put_super,
.statfs = nfs_statfs,
.evict_inode = nfs_evict_inode,
@@ -2373,19 +2375,30 @@ static void nfs_get_cache_cookie(struct super_block *sb,
struct nfs_parsed_mount_data *parsed,
struct nfs_clone_mount *cloned)
{
+ struct nfs_server *nfss = NFS_SB(sb);
char *uniq = NULL;
int ulen = 0;
- if (parsed && parsed->fscache_uniq) {
- uniq = parsed->fscache_uniq;
- ulen = strlen(parsed->fscache_uniq);
+ nfss->fscache_key = NULL;
+ nfss->fscache = NULL;
+
+ if (parsed) {
+ if (!(parsed->options & NFS_OPTION_FSCACHE))
+ return;
+ if (parsed->fscache_uniq) {
+ uniq = parsed->fscache_uniq;
+ ulen = strlen(parsed->fscache_uniq);
+ }
} else if (cloned) {
struct nfs_server *mnt_s = NFS_SB(cloned->sb);
+ if (!(mnt_s->options & NFS_OPTION_FSCACHE))
+ return;
if (mnt_s->fscache_key) {
uniq = mnt_s->fscache_key->key.uniquifier;
ulen = mnt_s->fscache_key->key.uniq_len;
};
- }
+ } else
+ return;
nfs_fscache_get_super_cookie(sb, uniq, ulen);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9347ab7c957..b673be31590 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -202,7 +202,6 @@ out:
/* A writeback failed: mark the page as bad, and invalidate the page cache */
static void nfs_set_pageerror(struct page *page)
{
- SetPageError(page);
nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
}
@@ -239,21 +238,18 @@ int nfs_congestion_kb;
#define NFS_CONGESTION_OFF_THRESH \
(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
-static int nfs_set_page_writeback(struct page *page)
+static void nfs_set_page_writeback(struct page *page)
{
+ struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host);
int ret = test_set_page_writeback(page);
- if (!ret) {
- struct inode *inode = page_file_mapping(page)->host;
- struct nfs_server *nfss = NFS_SERVER(inode);
+ WARN_ON_ONCE(ret != 0);
- if (atomic_long_inc_return(&nfss->writeback) >
- NFS_CONGESTION_ON_THRESH) {
- set_bdi_congested(&nfss->backing_dev_info,
- BLK_RW_ASYNC);
- }
+ if (atomic_long_inc_return(&nfss->writeback) >
+ NFS_CONGESTION_ON_THRESH) {
+ set_bdi_congested(&nfss->backing_dev_info,
+ BLK_RW_ASYNC);
}
- return ret;
}
static void nfs_end_page_writeback(struct page *page)
@@ -315,10 +311,10 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
if (IS_ERR(req))
goto out;
- ret = nfs_set_page_writeback(page);
- BUG_ON(ret != 0);
- BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
+ nfs_set_page_writeback(page);
+ WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
+ ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
nfs_redirty_request(req);
ret = pgio->pg_error;
@@ -451,8 +447,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
struct inode *inode = req->wb_context->dentry->d_inode;
struct nfs_inode *nfsi = NFS_I(inode);
- BUG_ON (!NFS_WBACK_BUSY(req));
-
spin_lock(&inode->i_lock);
if (likely(!PageSwapCache(req->wb_page))) {
set_page_private(req->wb_page, 0);
@@ -884,7 +878,7 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
{
if (nfs_have_delegated_attributes(inode))
goto out;
- if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ if (NFS_I(inode)->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
return false;
out:
return PageUptodate(page) != 0;
@@ -1727,7 +1721,6 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
struct nfs_page *req;
int ret = 0;
- BUG_ON(!PageLocked(page));
for (;;) {
wait_on_page_writeback(page);
req = nfs_page_find_request(page);
@@ -1801,7 +1794,8 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
if (PagePrivate(page))
return -EBUSY;
- nfs_fscache_release_page(page, GFP_KERNEL);
+ if (!nfs_fscache_release_page(page, GFP_KERNEL))
+ return -EBUSY;
return migrate_page(mapping, newpage, page, mode);
}
@@ -1829,7 +1823,7 @@ int __init nfs_init_writepagecache(void)
goto out_destroy_write_mempool;
nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
- nfs_wdata_cachep);
+ nfs_cdata_cachep);
if (nfs_commit_mempool == NULL)
goto out_destroy_commit_cache;
diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c
index e6c38159622..e761ee95617 100644
--- a/fs/nfsd/fault_inject.c
+++ b/fs/nfsd/fault_inject.c
@@ -8,61 +8,144 @@
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/nsproxy.h>
+#include <linux/sunrpc/clnt.h>
+#include <asm/uaccess.h>
#include "state.h"
-#include "fault_inject.h"
+#include "netns.h"
struct nfsd_fault_inject_op {
char *file;
- void (*func)(u64);
+ u64 (*forget)(struct nfs4_client *, u64);
+ u64 (*print)(struct nfs4_client *, u64);
};
static struct nfsd_fault_inject_op inject_ops[] = {
{
.file = "forget_clients",
- .func = nfsd_forget_clients,
+ .forget = nfsd_forget_client,
+ .print = nfsd_print_client,
},
{
.file = "forget_locks",
- .func = nfsd_forget_locks,
+ .forget = nfsd_forget_client_locks,
+ .print = nfsd_print_client_locks,
},
{
.file = "forget_openowners",
- .func = nfsd_forget_openowners,
+ .forget = nfsd_forget_client_openowners,
+ .print = nfsd_print_client_openowners,
},
{
.file = "forget_delegations",
- .func = nfsd_forget_delegations,
+ .forget = nfsd_forget_client_delegations,
+ .print = nfsd_print_client_delegations,
},
{
.file = "recall_delegations",
- .func = nfsd_recall_delegations,
+ .forget = nfsd_recall_client_delegations,
+ .print = nfsd_print_client_delegations,
},
};
static long int NUM_INJECT_OPS = sizeof(inject_ops) / sizeof(struct nfsd_fault_inject_op);
static struct dentry *debug_dir;
-static int nfsd_inject_set(void *op_ptr, u64 val)
+static void nfsd_inject_set(struct nfsd_fault_inject_op *op, u64 val)
{
- struct nfsd_fault_inject_op *op = op_ptr;
+ u64 count = 0;
if (val == 0)
printk(KERN_INFO "NFSD Fault Injection: %s (all)", op->file);
else
printk(KERN_INFO "NFSD Fault Injection: %s (n = %llu)", op->file, val);
- op->func(val);
- return 0;
+ nfs4_lock_state();
+ count = nfsd_for_n_state(val, op->forget);
+ nfs4_unlock_state();
+ printk(KERN_INFO "NFSD: %s: found %llu", op->file, count);
}
-static int nfsd_inject_get(void *data, u64 *val)
+static void nfsd_inject_set_client(struct nfsd_fault_inject_op *op,
+ struct sockaddr_storage *addr,
+ size_t addr_size)
{
- *val = 0;
- return 0;
+ char buf[INET6_ADDRSTRLEN];
+ struct nfs4_client *clp;
+ u64 count;
+
+ nfs4_lock_state();
+ clp = nfsd_find_client(addr, addr_size);
+ if (clp) {
+ count = op->forget(clp, 0);
+ rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
+ printk(KERN_INFO "NFSD [%s]: Client %s had %llu state object(s)\n", op->file, buf, count);
+ }
+ nfs4_unlock_state();
+}
+
+static void nfsd_inject_get(struct nfsd_fault_inject_op *op, u64 *val)
+{
+ nfs4_lock_state();
+ *val = nfsd_for_n_state(0, op->print);
+ nfs4_unlock_state();
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_nfsd, nfsd_inject_get, nfsd_inject_set, "%llu\n");
+static ssize_t fault_inject_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ static u64 val;
+ char read_buf[25];
+ size_t size, ret;
+ loff_t pos = *ppos;
+
+ if (!pos)
+ nfsd_inject_get(file->f_dentry->d_inode->i_private, &val);
+ size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val);
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= size || !len)
+ return 0;
+ if (len > size - pos)
+ len = size - pos;
+ ret = copy_to_user(buf, read_buf + pos, len);
+ if (ret == len)
+ return -EFAULT;
+ len -= ret;
+ *ppos = pos + len;
+ return len;
+}
+
+static ssize_t fault_inject_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ char write_buf[INET6_ADDRSTRLEN];
+ size_t size = min(sizeof(write_buf) - 1, len);
+ struct net *net = current->nsproxy->net_ns;
+ struct sockaddr_storage sa;
+ u64 val;
+
+ if (copy_from_user(write_buf, buf, size))
+ return -EFAULT;
+ write_buf[size] = '\0';
+
+ size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa));
+ if (size > 0)
+ nfsd_inject_set_client(file->f_dentry->d_inode->i_private, &sa, size);
+ else {
+ val = simple_strtoll(write_buf, NULL, 0);
+ nfsd_inject_set(file->f_dentry->d_inode->i_private, val);
+ }
+ return len; /* on success, claim we got the whole input */
+}
+
+static const struct file_operations fops_nfsd = {
+ .owner = THIS_MODULE,
+ .read = fault_inject_read,
+ .write = fault_inject_write,
+};
void nfsd_fault_inject_cleanup(void)
{
diff --git a/fs/nfsd/fault_inject.h b/fs/nfsd/fault_inject.h
deleted file mode 100644
index 90bd0570956..00000000000
--- a/fs/nfsd/fault_inject.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2011 Bryan Schumaker <bjschuma@netapp.com>
- *
- * Function definitions for fault injection
- */
-
-#ifndef LINUX_NFSD_FAULT_INJECT_H
-#define LINUX_NFSD_FAULT_INJECT_H
-
-#ifdef CONFIG_NFSD_FAULT_INJECTION
-int nfsd_fault_inject_init(void);
-void nfsd_fault_inject_cleanup(void);
-void nfsd_forget_clients(u64);
-void nfsd_forget_locks(u64);
-void nfsd_forget_openowners(u64);
-void nfsd_forget_delegations(u64);
-void nfsd_recall_delegations(u64);
-#else /* CONFIG_NFSD_FAULT_INJECTION */
-static inline int nfsd_fault_inject_init(void) { return 0; }
-static inline void nfsd_fault_inject_cleanup(void) {}
-static inline void nfsd_forget_clients(u64 num) {}
-static inline void nfsd_forget_locks(u64 num) {}
-static inline void nfsd_forget_openowners(u64 num) {}
-static inline void nfsd_forget_delegations(u64 num) {}
-static inline void nfsd_recall_delegations(u64 num) {}
-#endif /* CONFIG_NFSD_FAULT_INJECTION */
-
-#endif /* LINUX_NFSD_FAULT_INJECT_H */
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 65c2431ea32..1051bebff1b 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -24,7 +24,18 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+/* Hash tables for nfs4_clientid state */
+#define CLIENT_HASH_BITS 4
+#define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
+#define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
+
+#define LOCKOWNER_INO_HASH_BITS 8
+#define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS)
+
+#define SESSION_HASH_SIZE 512
+
struct cld_net;
+struct nfsd4_client_tracking_ops;
struct nfsd_net {
struct cld_net *cld_net;
@@ -38,7 +49,62 @@ struct nfsd_net {
struct lock_manager nfsd4_manager;
bool grace_ended;
time_t boot_time;
+
+ /*
+ * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
+ * used in reboot/reset lease grace period processing
+ *
+ * conf_id_hashtbl[], and conf_name_tree hold confirmed
+ * setclientid_confirmed info.
+ *
+ * unconf_str_hastbl[] and unconf_name_tree hold unconfirmed
+ * setclientid info.
+ */
+ struct list_head *reclaim_str_hashtbl;
+ int reclaim_str_hashtbl_size;
+ struct list_head *conf_id_hashtbl;
+ struct rb_root conf_name_tree;
+ struct list_head *unconf_id_hashtbl;
+ struct rb_root unconf_name_tree;
+ struct list_head *ownerstr_hashtbl;
+ struct list_head *lockowner_ino_hashtbl;
+ struct list_head *sessionid_hashtbl;
+ /*
+ * client_lru holds client queue ordered by nfs4_client.cl_time
+ * for lease renewal.
+ *
+ * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
+ * for last close replay.
+ *
+ * All of the above fields are protected by the client_mutex.
+ */
+ struct list_head client_lru;
+ struct list_head close_lru;
+
+ struct delayed_work laundromat_work;
+
+ /* client_lock protects the client lru list and session hash table */
+ spinlock_t client_lock;
+
+ struct file *rec_file;
+ bool in_grace;
+ struct nfsd4_client_tracking_ops *client_tracking_ops;
+
+ time_t nfsd4_lease;
+ time_t nfsd4_grace;
+
+ bool nfsd_net_up;
+
+ /*
+ * Time of server startup
+ */
+ struct timeval nfssvc_boot;
+
+ struct svc_serv *nfsd_serv;
};
+/* Simple check to find out if a given net was properly initialized */
+#define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
+
extern int nfsd_net_id;
#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index b314888825d..9170861c804 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -253,7 +253,7 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
(resp->mask & NFS_ACL) ? resp->acl_access : NULL,
(resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
while (w > 0) {
- if (!rqstp->rq_respages[rqstp->rq_resused++])
+ if (!*(rqstp->rq_next_page++))
return 0;
w -= PAGE_SIZE;
}
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index a596e9d987e..9cbc1a841f8 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -184,7 +184,7 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
(resp->mask & NFS_ACL) ? resp->acl_access : NULL,
(resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
while (w > 0) {
- if (!rqstp->rq_respages[rqstp->rq_resused++])
+ if (!*(rqstp->rq_next_page++))
return 0;
w -= PAGE_SIZE;
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 97d90d1c860..1fc02dfdc5c 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -460,7 +460,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
__be32 nfserr;
int count = 0;
loff_t offset;
- int i;
+ struct page **p;
caddr_t page_addr = NULL;
dprintk("nfsd: READDIR+(3) %s %d bytes at %d\n",
@@ -484,8 +484,8 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
&resp->common,
nfs3svc_encode_entry_plus);
memcpy(resp->verf, argp->verf, 8);
- for (i=1; i<rqstp->rq_resused ; i++) {
- page_addr = page_address(rqstp->rq_respages[i]);
+ for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) {
+ page_addr = page_address(*p);
if (((caddr_t)resp->buffer >= page_addr) &&
((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 43f46cd9ede..324c0baf7cd 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -7,8 +7,10 @@
*/
#include <linux/namei.h>
+#include <linux/sunrpc/svc_xprt.h>
#include "xdr3.h"
#include "auth.h"
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
@@ -323,7 +325,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_readargs *args)
{
unsigned int len;
- int v,pn;
+ int v;
u32 max_blocksize = svc_max_payload(rqstp);
if (!(p = decode_fh(p, &args->fh)))
@@ -338,8 +340,9 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
/* set up the kvec */
v=0;
while (len > 0) {
- pn = rqstp->rq_resused++;
- rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
+ struct page *p = *(rqstp->rq_next_page++);
+
+ rqstp->rq_vec[v].iov_base = page_address(p);
rqstp->rq_vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE;
len -= rqstp->rq_vec[v].iov_len;
v++;
@@ -461,8 +464,7 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
len = ntohl(*p++);
if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
return 0;
- args->tname = new =
- page_address(rqstp->rq_respages[rqstp->rq_resused++]);
+ args->tname = new = page_address(*(rqstp->rq_next_page++));
args->tlen = len;
/* first copy and check from the first page */
old = (char*)p;
@@ -533,8 +535,7 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
{
if (!(p = decode_fh(p, &args->fh)))
return 0;
- args->buffer =
- page_address(rqstp->rq_respages[rqstp->rq_resused++]);
+ args->buffer = page_address(*(rqstp->rq_next_page++));
return xdr_argsize_check(rqstp, p);
}
@@ -565,8 +566,7 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
if (args->count > PAGE_SIZE)
args->count = PAGE_SIZE;
- args->buffer =
- page_address(rqstp->rq_respages[rqstp->rq_resused++]);
+ args->buffer = page_address(*(rqstp->rq_next_page++));
return xdr_argsize_check(rqstp, p);
}
@@ -575,7 +575,7 @@ int
nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_readdirargs *args)
{
- int len, pn;
+ int len;
u32 max_blocksize = svc_max_payload(rqstp);
if (!(p = decode_fh(p, &args->fh)))
@@ -590,9 +590,9 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
args->count = len;
while (len > 0) {
- pn = rqstp->rq_resused++;
+ struct page *p = *(rqstp->rq_next_page++);
if (!args->buffer)
- args->buffer = page_address(rqstp->rq_respages[pn]);
+ args->buffer = page_address(p);
len -= PAGE_SIZE;
}
@@ -720,12 +720,14 @@ int
nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_writeres *resp)
{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+
p = encode_wcc_data(rqstp, p, &resp->fh);
if (resp->status == 0) {
*p++ = htonl(resp->count);
*p++ = htonl(resp->committed);
- *p++ = htonl(nfssvc_boot.tv_sec);
- *p++ = htonl(nfssvc_boot.tv_usec);
+ *p++ = htonl(nn->nfssvc_boot.tv_sec);
+ *p++ = htonl(nn->nfssvc_boot.tv_usec);
}
return xdr_ressize_check(rqstp, p);
}
@@ -876,7 +878,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
common);
__be32 *p = cd->buffer;
caddr_t curr_page_addr = NULL;
- int pn; /* current page number */
+ struct page ** page;
int slen; /* string (name) length */
int elen; /* estimated entry length in words */
int num_entry_words = 0; /* actual number of words */
@@ -913,8 +915,9 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
}
/* determine which page in rq_respages[] we are currently filling */
- for (pn=1; pn < cd->rqstp->rq_resused; pn++) {
- curr_page_addr = page_address(cd->rqstp->rq_respages[pn]);
+ for (page = cd->rqstp->rq_respages + 1;
+ page < cd->rqstp->rq_next_page; page++) {
+ curr_page_addr = page_address(*page);
if (((caddr_t)cd->buffer >= curr_page_addr) &&
((caddr_t)cd->buffer < curr_page_addr + PAGE_SIZE))
@@ -929,14 +932,14 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
if (plus)
p = encode_entryplus_baggage(cd, p, name, namlen);
num_entry_words = p - cd->buffer;
- } else if (cd->rqstp->rq_respages[pn+1] != NULL) {
+ } else if (*(page+1) != NULL) {
/* temporarily encode entry into next page, then move back to
* current and next page in rq_respages[] */
__be32 *p1, *tmp;
int len1, len2;
/* grab next page for temporary storage of entry */
- p1 = tmp = page_address(cd->rqstp->rq_respages[pn+1]);
+ p1 = tmp = page_address(*(page+1));
p1 = encode_entry_baggage(cd, p1, name, namlen, ino);
@@ -1082,11 +1085,13 @@ int
nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_commitres *resp)
{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+
p = encode_wcc_data(rqstp, p, &resp->fh);
/* Write verifier */
if (resp->status == 0) {
- *p++ = htonl(nfssvc_boot.tv_sec);
- *p++ = htonl(nfssvc_boot.tv_usec);
+ *p++ = htonl(nn->nfssvc_boot.tv_sec);
+ *p++ = htonl(nn->nfssvc_boot.tv_usec);
}
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index bdf29c96e4c..99bc85ff021 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include "nfsd.h"
#include "state.h"
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -625,20 +626,46 @@ static const struct rpc_program cb_program = {
.pipe_dir_name = "nfsd4_cb",
};
-static int max_cb_time(void)
+static int max_cb_time(struct net *net)
{
- return max(nfsd4_lease/10, (time_t)1) * HZ;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
}
+static struct rpc_cred *callback_cred;
+
+int set_callback_cred(void)
+{
+ if (callback_cred)
+ return 0;
+ callback_cred = rpc_lookup_machine_cred("nfs");
+ if (!callback_cred)
+ return -ENOMEM;
+ return 0;
+}
+
+static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
+{
+ if (clp->cl_minorversion == 0) {
+ return get_rpccred(callback_cred);
+ } else {
+ struct rpc_auth *auth = client->cl_auth;
+ struct auth_cred acred = {};
+
+ acred.uid = ses->se_cb_sec.uid;
+ acred.gid = ses->se_cb_sec.gid;
+ return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0);
+ }
+}
static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
{
struct rpc_timeout timeparms = {
- .to_initval = max_cb_time(),
+ .to_initval = max_cb_time(clp->net),
.to_retries = 0,
};
struct rpc_create_args args = {
- .net = &init_net,
+ .net = clp->net,
.address = (struct sockaddr *) &conn->cb_addr,
.addrsize = conn->cb_addrlen,
.saddress = (struct sockaddr *) &conn->cb_saddr,
@@ -648,6 +675,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
};
struct rpc_clnt *client;
+ struct rpc_cred *cred;
if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal &&
@@ -666,7 +694,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog;
args.protocol = XPRT_TRANSPORT_BC_TCP;
- args.authflavor = RPC_AUTH_UNIX;
+ args.authflavor = ses->se_cb_sec.flavor;
}
/* Create RPC client */
client = rpc_create(&args);
@@ -675,9 +703,14 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
PTR_ERR(client));
return PTR_ERR(client);
}
+ cred = get_backchannel_cred(clp, client, ses);
+ if (IS_ERR(cred)) {
+ rpc_shutdown_client(client);
+ return PTR_ERR(cred);
+ }
clp->cl_cb_client = client;
+ clp->cl_cb_cred = cred;
return 0;
-
}
static void warn_no_callback_path(struct nfs4_client *clp, int reason)
@@ -714,18 +747,6 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
.rpc_call_done = nfsd4_cb_probe_done,
};
-static struct rpc_cred *callback_cred;
-
-int set_callback_cred(void)
-{
- if (callback_cred)
- return 0;
- callback_cred = rpc_lookup_machine_cred("nfs");
- if (!callback_cred)
- return -ENOMEM;
- return 0;
-}
-
static struct workqueue_struct *callback_wq;
static void run_nfsd4_cb(struct nfsd4_callback *cb)
@@ -743,7 +764,6 @@ static void do_probe_callback(struct nfs4_client *clp)
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
cb->cb_msg.rpc_argp = NULL;
cb->cb_msg.rpc_resp = NULL;
- cb->cb_msg.rpc_cred = callback_cred;
cb->cb_ops = &nfsd4_cb_probe_ops;
@@ -962,6 +982,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
if (clp->cl_cb_client) {
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
+ put_rpccred(clp->cl_cb_cred);
+ clp->cl_cb_cred = NULL;
}
if (clp->cl_cb_conn.cb_xprt) {
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
@@ -995,7 +1017,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
run_nfsd4_cb(cb);
}
-void nfsd4_do_callback_rpc(struct work_struct *w)
+static void nfsd4_do_callback_rpc(struct work_struct *w)
{
struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
struct nfs4_client *clp = cb->cb_clp;
@@ -1010,10 +1032,16 @@ void nfsd4_do_callback_rpc(struct work_struct *w)
nfsd4_release_cb(cb);
return;
}
+ cb->cb_msg.rpc_cred = clp->cl_cb_cred;
rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
cb->cb_ops, cb);
}
+void nfsd4_init_callback(struct nfsd4_callback *cb)
+{
+ INIT_WORK(&cb->cb_work, nfsd4_do_callback_rpc);
+}
+
void nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfsd4_callback *cb = &dp->dl_recall;
@@ -1025,7 +1053,6 @@ void nfsd4_cb_recall(struct nfs4_delegation *dp)
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
- cb->cb_msg.rpc_cred = callback_cred;
cb->cb_ops = &nfsd4_cb_recall_ops;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 6c9a4b291db..9d1c5dba2bb 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -40,6 +40,7 @@
#include "xdr4.h"
#include "vfs.h"
#include "current_stateid.h"
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -194,6 +195,7 @@ static __be32
do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
struct svc_fh *resfh;
+ int accmode;
__be32 status;
resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
@@ -253,9 +255,10 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
/* set reply cache */
fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
&resfh->fh_handle);
- if (!open->op_created)
- status = do_open_permission(rqstp, resfh, open,
- NFSD_MAY_NOP);
+ accmode = NFSD_MAY_NOP;
+ if (open->op_created)
+ accmode |= NFSD_MAY_OWNER_OVERRIDE;
+ status = do_open_permission(rqstp, resfh, open, accmode);
set_change_info(&open->op_cinfo, current_fh);
fh_dup2(current_fh, resfh);
out:
@@ -304,6 +307,8 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
__be32 status;
struct nfsd4_compoundres *resp;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_open filename %.*s op_openowner %p\n",
(int)open->op_fname.len, open->op_fname.data,
@@ -331,7 +336,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* check seqid for replay. set nfs4_owner */
resp = rqstp->rq_resp;
- status = nfsd4_process_open1(&resp->cstate, open);
+ status = nfsd4_process_open1(&resp->cstate, open, nn);
if (status == nfserr_replay_me) {
struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
fh_put(&cstate->current_fh);
@@ -354,10 +359,10 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
- if (locks_in_grace(SVC_NET(rqstp)) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
+ if (locks_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
status = nfserr_no_grace;
- if (!locks_in_grace(SVC_NET(rqstp)) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
+ if (!locks_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
switch (open->op_claim_type) {
@@ -370,7 +375,9 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
- status = nfs4_check_open_reclaim(&open->op_clientid, cstate->minorversion);
+ status = nfs4_check_open_reclaim(&open->op_clientid,
+ cstate->minorversion,
+ nn);
if (status)
goto out;
case NFS4_OPEN_CLAIM_FH:
@@ -490,12 +497,13 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&access->ac_supported);
}
-static void gen_boot_verifier(nfs4_verifier *verifier)
+static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
{
__be32 verf[2];
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- verf[0] = (__be32)nfssvc_boot.tv_sec;
- verf[1] = (__be32)nfssvc_boot.tv_usec;
+ verf[0] = (__be32)nn->nfssvc_boot.tv_sec;
+ verf[1] = (__be32)nn->nfssvc_boot.tv_usec;
memcpy(verifier->data, verf, sizeof(verifier->data));
}
@@ -503,7 +511,7 @@ static __be32
nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_commit *commit)
{
- gen_boot_verifier(&commit->co_verf);
+ gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
commit->co_count);
}
@@ -684,6 +692,17 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (read->rd_offset >= OFFSET_MAX)
return nfserr_inval;
+ /*
+ * If we do a zero copy read, then a client will see read data
+ * that reflects the state of the file *after* performing the
+ * following compound.
+ *
+ * To ensure proper ordering, we therefore turn off zero copy if
+ * the client wants us to do more in this compound:
+ */
+ if (!nfsd4_last_compound_op(rqstp))
+ rqstp->rq_splice_ok = false;
+
nfs4_lock_state();
/* check stateid */
if ((status = nfs4_preprocess_stateid_op(SVC_NET(rqstp),
@@ -876,6 +895,24 @@ out:
return status;
}
+static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
+{
+ int i = 1;
+ int buflen = write->wr_buflen;
+
+ vec[0].iov_base = write->wr_head.iov_base;
+ vec[0].iov_len = min_t(int, buflen, write->wr_head.iov_len);
+ buflen -= vec[0].iov_len;
+
+ while (buflen) {
+ vec[i].iov_base = page_address(write->wr_pagelist[i - 1]);
+ vec[i].iov_len = min_t(int, PAGE_SIZE, buflen);
+ buflen -= vec[i].iov_len;
+ i++;
+ }
+ return i;
+}
+
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_write *write)
@@ -884,6 +921,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct file *filp = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
+ int nvecs;
/* no need to check permission - this will be done in nfsd_write() */
@@ -904,10 +942,13 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
cnt = write->wr_buflen;
write->wr_how_written = write->wr_stable_how;
- gen_boot_verifier(&write->wr_verifier);
+ gen_boot_verifier(&write->wr_verifier, SVC_NET(rqstp));
+
+ nvecs = fill_in_write_vector(rqstp->rq_vec, write);
+ WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
status = nfsd_write(rqstp, &cstate->current_fh, filp,
- write->wr_offset, rqstp->rq_vec, write->wr_vlen,
+ write->wr_offset, rqstp->rq_vec, nvecs,
&cnt, &write->wr_how_written);
if (filp)
fput(filp);
@@ -1666,6 +1707,12 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_EXCHANGE_ID",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_exchange_id_rsize,
},
+ [OP_BACKCHANNEL_CTL] = {
+ .op_func = (nfsd4op_func)nfsd4_backchannel_ctl,
+ .op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
+ .op_name = "OP_BACKCHANNEL_CTL",
+ .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
+ },
[OP_BIND_CONN_TO_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP
@@ -1719,6 +1766,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_func = (nfsd4op_func)nfsd4_free_stateid,
.op_flags = ALLOWED_WITHOUT_FH | OP_MODIFIES_SOMETHING,
.op_name = "OP_FREE_STATEID",
+ .op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid,
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
},
};
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 43295d45cc2..ba6fdd4a045 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -58,13 +58,11 @@ struct nfsd4_client_tracking_ops {
void (*create)(struct nfs4_client *);
void (*remove)(struct nfs4_client *);
int (*check)(struct nfs4_client *);
- void (*grace_done)(struct net *, time_t);
+ void (*grace_done)(struct nfsd_net *, time_t);
};
/* Globals */
-static struct file *rec_file;
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
-static struct nfsd4_client_tracking_ops *client_tracking_ops;
static int
nfs4_save_creds(const struct cred **original_creds)
@@ -102,33 +100,39 @@ md5_to_hex(char *out, char *md5)
*out = '\0';
}
-__be32
-nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
+static int
+nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
{
struct xdr_netobj cksum;
struct hash_desc desc;
struct scatterlist sg;
- __be32 status = nfserr_jukebox;
+ int status;
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
clname->len, clname->data);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(desc.tfm))
+ if (IS_ERR(desc.tfm)) {
+ status = PTR_ERR(desc.tfm);
goto out_no_tfm;
+ }
+
cksum.len = crypto_hash_digestsize(desc.tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL)
+ if (cksum.data == NULL) {
+ status = -ENOMEM;
goto out;
+ }
sg_init_one(&sg, clname->data, clname->len);
- if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data))
+ status = crypto_hash_digest(&desc, &sg, sg.length, cksum.data);
+ if (status)
goto out;
md5_to_hex(dname, cksum.data);
- status = nfs_ok;
+ status = 0;
out:
kfree(cksum.data);
crypto_free_hash(desc.tfm);
@@ -136,29 +140,61 @@ out_no_tfm:
return status;
}
+/*
+ * If we had an error generating the recdir name for the legacy tracker
+ * then warn the admin. If the error doesn't appear to be transient,
+ * then disable recovery tracking.
+ */
+static void
+legacy_recdir_name_error(int error)
+{
+ printk(KERN_ERR "NFSD: unable to generate recoverydir "
+ "name (%d).\n", error);
+
+ /*
+ * if the algorithm just doesn't exist, then disable the recovery
+ * tracker altogether. The crypto libs will generally return this if
+ * FIPS is enabled as well.
+ */
+ if (error == -ENOENT) {
+ printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
+ "Reboot recovery will not function correctly!\n");
+
+ /* the argument is ignored by the legacy exit function */
+ nfsd4_client_tracking_exit(NULL);
+ }
+}
+
static void
nfsd4_create_clid_dir(struct nfs4_client *clp)
{
const struct cred *original_cred;
- char *dname = clp->cl_recdir;
+ char dname[HEXDIR_LEN];
struct dentry *dir, *dentry;
+ struct nfs4_client_reclaim *crp;
int status;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname);
if (test_and_set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
- if (!rec_file)
+ if (!nn->rec_file)
return;
+
+ status = nfs4_make_rec_clidname(dname, &clp->cl_name);
+ if (status)
+ return legacy_recdir_name_error(status);
+
status = nfs4_save_creds(&original_cred);
if (status < 0)
return;
- status = mnt_want_write_file(rec_file);
+ status = mnt_want_write_file(nn->rec_file);
if (status)
return;
- dir = rec_file->f_path.dentry;
+ dir = nn->rec_file->f_path.dentry;
/* lock the parent */
mutex_lock(&dir->d_inode->i_mutex);
@@ -182,18 +218,24 @@ out_put:
dput(dentry);
out_unlock:
mutex_unlock(&dir->d_inode->i_mutex);
- if (status == 0)
- vfs_fsync(rec_file, 0);
- else
+ if (status == 0) {
+ if (nn->in_grace) {
+ crp = nfs4_client_to_reclaim(dname, nn);
+ if (crp)
+ crp->cr_clp = clp;
+ }
+ vfs_fsync(nn->rec_file, 0);
+ } else {
printk(KERN_ERR "NFSD: failed to write recovery record"
" (err %d); please check that %s exists"
" and is writeable", status,
user_recovery_dirname);
- mnt_drop_write_file(rec_file);
+ }
+ mnt_drop_write_file(nn->rec_file);
nfs4_reset_creds(original_cred);
}
-typedef int (recdir_func)(struct dentry *, struct dentry *);
+typedef int (recdir_func)(struct dentry *, struct dentry *, struct nfsd_net *);
struct name_list {
char name[HEXDIR_LEN];
@@ -219,10 +261,10 @@ nfsd4_build_namelist(void *arg, const char *name, int namlen,
}
static int
-nfsd4_list_rec_dir(recdir_func *f)
+nfsd4_list_rec_dir(recdir_func *f, struct nfsd_net *nn)
{
const struct cred *original_cred;
- struct dentry *dir = rec_file->f_path.dentry;
+ struct dentry *dir = nn->rec_file->f_path.dentry;
LIST_HEAD(names);
int status;
@@ -230,13 +272,13 @@ nfsd4_list_rec_dir(recdir_func *f)
if (status < 0)
return status;
- status = vfs_llseek(rec_file, 0, SEEK_SET);
+ status = vfs_llseek(nn->rec_file, 0, SEEK_SET);
if (status < 0) {
nfs4_reset_creds(original_cred);
return status;
}
- status = vfs_readdir(rec_file, nfsd4_build_namelist, &names);
+ status = vfs_readdir(nn->rec_file, nfsd4_build_namelist, &names);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
while (!list_empty(&names)) {
struct name_list *entry;
@@ -248,7 +290,7 @@ nfsd4_list_rec_dir(recdir_func *f)
status = PTR_ERR(dentry);
break;
}
- status = f(dir, dentry);
+ status = f(dir, dentry, nn);
dput(dentry);
}
list_del(&entry->list);
@@ -260,14 +302,14 @@ nfsd4_list_rec_dir(recdir_func *f)
}
static int
-nfsd4_unlink_clid_dir(char *name, int namlen)
+nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn)
{
struct dentry *dir, *dentry;
int status;
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
- dir = rec_file->f_path.dentry;
+ dir = nn->rec_file->f_path.dentry;
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name, dir, namlen);
if (IS_ERR(dentry)) {
@@ -289,37 +331,52 @@ static void
nfsd4_remove_clid_dir(struct nfs4_client *clp)
{
const struct cred *original_cred;
+ struct nfs4_client_reclaim *crp;
+ char dname[HEXDIR_LEN];
int status;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- if (!rec_file || !test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
+ if (!nn->rec_file || !test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return;
- status = mnt_want_write_file(rec_file);
+ status = nfs4_make_rec_clidname(dname, &clp->cl_name);
+ if (status)
+ return legacy_recdir_name_error(status);
+
+ status = mnt_want_write_file(nn->rec_file);
if (status)
goto out;
clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
status = nfs4_save_creds(&original_cred);
if (status < 0)
- goto out;
+ goto out_drop_write;
- status = nfsd4_unlink_clid_dir(clp->cl_recdir, HEXDIR_LEN-1);
+ status = nfsd4_unlink_clid_dir(dname, HEXDIR_LEN-1, nn);
nfs4_reset_creds(original_cred);
- if (status == 0)
- vfs_fsync(rec_file, 0);
- mnt_drop_write_file(rec_file);
+ if (status == 0) {
+ vfs_fsync(nn->rec_file, 0);
+ if (nn->in_grace) {
+ /* remove reclaim record */
+ crp = nfsd4_find_reclaim_client(dname, nn);
+ if (crp)
+ nfs4_remove_reclaim_record(crp, nn);
+ }
+ }
+out_drop_write:
+ mnt_drop_write_file(nn->rec_file);
out:
if (status)
printk("NFSD: Failed to remove expired client state directory"
- " %.*s\n", HEXDIR_LEN, clp->cl_recdir);
+ " %.*s\n", HEXDIR_LEN, dname);
}
static int
-purge_old(struct dentry *parent, struct dentry *child)
+purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
{
int status;
- if (nfs4_has_reclaimed_state(child->d_name.name, false))
+ if (nfs4_has_reclaimed_state(child->d_name.name, nn))
return 0;
status = vfs_rmdir(parent->d_inode, child);
@@ -331,27 +388,29 @@ purge_old(struct dentry *parent, struct dentry *child)
}
static void
-nfsd4_recdir_purge_old(struct net *net, time_t boot_time)
+nfsd4_recdir_purge_old(struct nfsd_net *nn, time_t boot_time)
{
int status;
- if (!rec_file)
+ nn->in_grace = false;
+ if (!nn->rec_file)
return;
- status = mnt_want_write_file(rec_file);
+ status = mnt_want_write_file(nn->rec_file);
if (status)
goto out;
- status = nfsd4_list_rec_dir(purge_old);
+ status = nfsd4_list_rec_dir(purge_old, nn);
if (status == 0)
- vfs_fsync(rec_file, 0);
- mnt_drop_write_file(rec_file);
+ vfs_fsync(nn->rec_file, 0);
+ mnt_drop_write_file(nn->rec_file);
out:
+ nfs4_release_reclaim(nn);
if (status)
printk("nfsd4: failed to purge old clients from recovery"
- " directory %s\n", rec_file->f_path.dentry->d_name.name);
+ " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
}
static int
-load_recdir(struct dentry *parent, struct dentry *child)
+load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
{
if (child->d_name.len != HEXDIR_LEN - 1) {
printk("nfsd4: illegal name %s in recovery directory\n",
@@ -359,21 +418,22 @@ load_recdir(struct dentry *parent, struct dentry *child)
/* Keep trying; maybe the others are OK: */
return 0;
}
- nfs4_client_to_reclaim(child->d_name.name);
+ nfs4_client_to_reclaim(child->d_name.name, nn);
return 0;
}
static int
-nfsd4_recdir_load(void) {
+nfsd4_recdir_load(struct net *net) {
int status;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- if (!rec_file)
+ if (!nn->rec_file)
return 0;
- status = nfsd4_list_rec_dir(load_recdir);
+ status = nfsd4_list_rec_dir(load_recdir, nn);
if (status)
printk("nfsd4: failed loading clients from recovery"
- " directory %s\n", rec_file->f_path.dentry->d_name.name);
+ " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
return status;
}
@@ -382,15 +442,16 @@ nfsd4_recdir_load(void) {
*/
static int
-nfsd4_init_recdir(void)
+nfsd4_init_recdir(struct net *net)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
const struct cred *original_cred;
int status;
printk("NFSD: Using %s as the NFSv4 state recovery directory\n",
user_recovery_dirname);
- BUG_ON(rec_file);
+ BUG_ON(nn->rec_file);
status = nfs4_save_creds(&original_cred);
if (status < 0) {
@@ -400,23 +461,65 @@ nfsd4_init_recdir(void)
return status;
}
- rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0);
- if (IS_ERR(rec_file)) {
+ nn->rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0);
+ if (IS_ERR(nn->rec_file)) {
printk("NFSD: unable to find recovery directory %s\n",
user_recovery_dirname);
- status = PTR_ERR(rec_file);
- rec_file = NULL;
+ status = PTR_ERR(nn->rec_file);
+ nn->rec_file = NULL;
}
nfs4_reset_creds(original_cred);
+ if (!status)
+ nn->in_grace = true;
return status;
}
+
+static int
+nfs4_legacy_state_init(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int i;
+
+ nn->reclaim_str_hashtbl = kmalloc(sizeof(struct list_head) *
+ CLIENT_HASH_SIZE, GFP_KERNEL);
+ if (!nn->reclaim_str_hashtbl)
+ return -ENOMEM;
+
+ for (i = 0; i < CLIENT_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&nn->reclaim_str_hashtbl[i]);
+ nn->reclaim_str_hashtbl_size = 0;
+
+ return 0;
+}
+
+static void
+nfs4_legacy_state_shutdown(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ kfree(nn->reclaim_str_hashtbl);
+}
+
static int
nfsd4_load_reboot_recovery_data(struct net *net)
{
int status;
+ status = nfsd4_init_recdir(net);
+ if (!status)
+ status = nfsd4_recdir_load(net);
+ if (status)
+ printk(KERN_ERR "NFSD: Failure reading reboot recovery data\n");
+ return status;
+}
+
+static int
+nfsd4_legacy_tracking_init(struct net *net)
+{
+ int status;
+
/* XXX: The legacy code won't work in a container */
if (net != &init_net) {
WARN(1, KERN_ERR "NFSD: attempt to initialize legacy client "
@@ -424,30 +527,37 @@ nfsd4_load_reboot_recovery_data(struct net *net)
return -EINVAL;
}
- nfs4_lock_state();
- status = nfsd4_init_recdir();
- if (!status)
- status = nfsd4_recdir_load();
- nfs4_unlock_state();
+ status = nfs4_legacy_state_init(net);
if (status)
- printk(KERN_ERR "NFSD: Failure reading reboot recovery data\n");
+ return status;
+
+ status = nfsd4_load_reboot_recovery_data(net);
+ if (status)
+ goto err;
+ return 0;
+
+err:
+ nfs4_legacy_state_shutdown(net);
return status;
}
static void
-nfsd4_shutdown_recdir(void)
+nfsd4_shutdown_recdir(struct nfsd_net *nn)
{
- if (!rec_file)
+ if (!nn->rec_file)
return;
- fput(rec_file);
- rec_file = NULL;
+ fput(nn->rec_file);
+ nn->rec_file = NULL;
}
static void
nfsd4_legacy_tracking_exit(struct net *net)
{
- nfs4_release_reclaim();
- nfsd4_shutdown_recdir();
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs4_release_reclaim(nn);
+ nfsd4_shutdown_recdir(nn);
+ nfs4_legacy_state_shutdown(net);
}
/*
@@ -480,13 +590,26 @@ nfs4_recoverydir(void)
static int
nfsd4_check_legacy_client(struct nfs4_client *clp)
{
+ int status;
+ char dname[HEXDIR_LEN];
+ struct nfs4_client_reclaim *crp;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
/* did we already find that this client is stable? */
if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
return 0;
+ status = nfs4_make_rec_clidname(dname, &clp->cl_name);
+ if (status) {
+ legacy_recdir_name_error(status);
+ return status;
+ }
+
/* look for it in the reclaim hashtable otherwise */
- if (nfsd4_find_reclaim_client(clp)) {
+ crp = nfsd4_find_reclaim_client(dname, nn);
+ if (crp) {
set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
+ crp->cr_clp = clp;
return 0;
}
@@ -494,7 +617,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
}
static struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = {
- .init = nfsd4_load_reboot_recovery_data,
+ .init = nfsd4_legacy_tracking_init,
.exit = nfsd4_legacy_tracking_exit,
.create = nfsd4_create_clid_dir,
.remove = nfsd4_remove_clid_dir,
@@ -785,8 +908,7 @@ nfsd4_cld_create(struct nfs4_client *clp)
{
int ret;
struct cld_upcall *cup;
- /* FIXME: determine net from clp */
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
/* Don't upcall if it's already stored */
@@ -823,8 +945,7 @@ nfsd4_cld_remove(struct nfs4_client *clp)
{
int ret;
struct cld_upcall *cup;
- /* FIXME: determine net from clp */
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
/* Don't upcall if it's already removed */
@@ -861,8 +982,7 @@ nfsd4_cld_check(struct nfs4_client *clp)
{
int ret;
struct cld_upcall *cup;
- /* FIXME: determine net from clp */
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
/* Don't upcall if one was already stored during this grace pd */
@@ -892,11 +1012,10 @@ nfsd4_cld_check(struct nfs4_client *clp)
}
static void
-nfsd4_cld_grace_done(struct net *net, time_t boot_time)
+nfsd4_cld_grace_done(struct nfsd_net *nn, time_t boot_time)
{
int ret;
struct cld_upcall *cup;
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct cld_net *cn = nn->cld_net;
cup = alloc_cld_upcall(cn);
@@ -926,28 +1045,261 @@ static struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops = {
.grace_done = nfsd4_cld_grace_done,
};
+/* upcall via usermodehelper */
+static char cltrack_prog[PATH_MAX] = "/sbin/nfsdcltrack";
+module_param_string(cltrack_prog, cltrack_prog, sizeof(cltrack_prog),
+ S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cltrack_prog, "Path to the nfsdcltrack upcall program");
+
+static bool cltrack_legacy_disable;
+module_param(cltrack_legacy_disable, bool, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cltrack_legacy_disable,
+ "Disable legacy recoverydir conversion. Default: false");
+
+#define LEGACY_TOPDIR_ENV_PREFIX "NFSDCLTRACK_LEGACY_TOPDIR="
+#define LEGACY_RECDIR_ENV_PREFIX "NFSDCLTRACK_LEGACY_RECDIR="
+
+static char *
+nfsd4_cltrack_legacy_topdir(void)
+{
+ int copied;
+ size_t len;
+ char *result;
+
+ if (cltrack_legacy_disable)
+ return NULL;
+
+ len = strlen(LEGACY_TOPDIR_ENV_PREFIX) +
+ strlen(nfs4_recoverydir()) + 1;
+
+ result = kmalloc(len, GFP_KERNEL);
+ if (!result)
+ return result;
+
+ copied = snprintf(result, len, LEGACY_TOPDIR_ENV_PREFIX "%s",
+ nfs4_recoverydir());
+ if (copied >= len) {
+ /* just return nothing if output was truncated */
+ kfree(result);
+ return NULL;
+ }
+
+ return result;
+}
+
+static char *
+nfsd4_cltrack_legacy_recdir(const struct xdr_netobj *name)
+{
+ int copied;
+ size_t len;
+ char *result;
+
+ if (cltrack_legacy_disable)
+ return NULL;
+
+ /* +1 is for '/' between "topdir" and "recdir" */
+ len = strlen(LEGACY_RECDIR_ENV_PREFIX) +
+ strlen(nfs4_recoverydir()) + 1 + HEXDIR_LEN;
+
+ result = kmalloc(len, GFP_KERNEL);
+ if (!result)
+ return result;
+
+ copied = snprintf(result, len, LEGACY_RECDIR_ENV_PREFIX "%s/",
+ nfs4_recoverydir());
+ if (copied > (len - HEXDIR_LEN)) {
+ /* just return nothing if output will be truncated */
+ kfree(result);
+ return NULL;
+ }
+
+ copied = nfs4_make_rec_clidname(result + copied, name);
+ if (copied) {
+ kfree(result);
+ return NULL;
+ }
+
+ return result;
+}
+
+static int
+nfsd4_umh_cltrack_upcall(char *cmd, char *arg, char *legacy)
+{
+ char *envp[2];
+ char *argv[4];
+ int ret;
+
+ if (unlikely(!cltrack_prog[0])) {
+ dprintk("%s: cltrack_prog is disabled\n", __func__);
+ return -EACCES;
+ }
+
+ dprintk("%s: cmd: %s\n", __func__, cmd);
+ dprintk("%s: arg: %s\n", __func__, arg ? arg : "(null)");
+ dprintk("%s: legacy: %s\n", __func__, legacy ? legacy : "(null)");
+
+ envp[0] = legacy;
+ envp[1] = NULL;
+
+ argv[0] = (char *)cltrack_prog;
+ argv[1] = cmd;
+ argv[2] = arg;
+ argv[3] = NULL;
+
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ /*
+ * Disable the upcall mechanism if we're getting an ENOENT or EACCES
+ * error. The admin can re-enable it on the fly by using sysfs
+ * once the problem has been fixed.
+ */
+ if (ret == -ENOENT || ret == -EACCES) {
+ dprintk("NFSD: %s was not found or isn't executable (%d). "
+ "Setting cltrack_prog to blank string!",
+ cltrack_prog, ret);
+ cltrack_prog[0] = '\0';
+ }
+ dprintk("%s: %s return value: %d\n", __func__, cltrack_prog, ret);
+
+ return ret;
+}
+
+static char *
+bin_to_hex_dup(const unsigned char *src, int srclen)
+{
+ int i;
+ char *buf, *hex;
+
+ /* +1 for terminating NULL */
+ buf = kmalloc((srclen * 2) + 1, GFP_KERNEL);
+ if (!buf)
+ return buf;
+
+ hex = buf;
+ for (i = 0; i < srclen; i++) {
+ sprintf(hex, "%2.2x", *src++);
+ hex += 2;
+ }
+ return buf;
+}
+
+static int
+nfsd4_umh_cltrack_init(struct net __attribute__((unused)) *net)
+{
+ return nfsd4_umh_cltrack_upcall("init", NULL, NULL);
+}
+
+static void
+nfsd4_umh_cltrack_create(struct nfs4_client *clp)
+{
+ char *hexid;
+
+ hexid = bin_to_hex_dup(clp->cl_name.data, clp->cl_name.len);
+ if (!hexid) {
+ dprintk("%s: can't allocate memory for upcall!\n", __func__);
+ return;
+ }
+ nfsd4_umh_cltrack_upcall("create", hexid, NULL);
+ kfree(hexid);
+}
+
+static void
+nfsd4_umh_cltrack_remove(struct nfs4_client *clp)
+{
+ char *hexid;
+
+ hexid = bin_to_hex_dup(clp->cl_name.data, clp->cl_name.len);
+ if (!hexid) {
+ dprintk("%s: can't allocate memory for upcall!\n", __func__);
+ return;
+ }
+ nfsd4_umh_cltrack_upcall("remove", hexid, NULL);
+ kfree(hexid);
+}
+
+static int
+nfsd4_umh_cltrack_check(struct nfs4_client *clp)
+{
+ int ret;
+ char *hexid, *legacy;
+
+ hexid = bin_to_hex_dup(clp->cl_name.data, clp->cl_name.len);
+ if (!hexid) {
+ dprintk("%s: can't allocate memory for upcall!\n", __func__);
+ return -ENOMEM;
+ }
+ legacy = nfsd4_cltrack_legacy_recdir(&clp->cl_name);
+ ret = nfsd4_umh_cltrack_upcall("check", hexid, legacy);
+ kfree(legacy);
+ kfree(hexid);
+ return ret;
+}
+
+static void
+nfsd4_umh_cltrack_grace_done(struct nfsd_net __attribute__((unused)) *nn,
+ time_t boot_time)
+{
+ char *legacy;
+ char timestr[22]; /* FIXME: better way to determine max size? */
+
+ sprintf(timestr, "%ld", boot_time);
+ legacy = nfsd4_cltrack_legacy_topdir();
+ nfsd4_umh_cltrack_upcall("gracedone", timestr, legacy);
+ kfree(legacy);
+}
+
+static struct nfsd4_client_tracking_ops nfsd4_umh_tracking_ops = {
+ .init = nfsd4_umh_cltrack_init,
+ .exit = NULL,
+ .create = nfsd4_umh_cltrack_create,
+ .remove = nfsd4_umh_cltrack_remove,
+ .check = nfsd4_umh_cltrack_check,
+ .grace_done = nfsd4_umh_cltrack_grace_done,
+};
+
int
nfsd4_client_tracking_init(struct net *net)
{
int status;
struct path path;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- if (!client_tracking_ops) {
- client_tracking_ops = &nfsd4_cld_tracking_ops;
- status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
- if (!status) {
- if (S_ISDIR(path.dentry->d_inode->i_mode))
- client_tracking_ops =
- &nfsd4_legacy_tracking_ops;
- path_put(&path);
- }
+ /* just run the init if it the method is already decided */
+ if (nn->client_tracking_ops)
+ goto do_init;
+
+ /*
+ * First, try a UMH upcall. It should succeed or fail quickly, so
+ * there's little harm in trying that first.
+ */
+ nn->client_tracking_ops = &nfsd4_umh_tracking_ops;
+ status = nn->client_tracking_ops->init(net);
+ if (!status)
+ return status;
+
+ /*
+ * See if the recoverydir exists and is a directory. If it is,
+ * then use the legacy ops.
+ */
+ nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
+ status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
+ if (!status) {
+ status = S_ISDIR(path.dentry->d_inode->i_mode);
+ path_put(&path);
+ if (status)
+ goto do_init;
}
- status = client_tracking_ops->init(net);
+ /* Finally, try to use nfsdcld */
+ nn->client_tracking_ops = &nfsd4_cld_tracking_ops;
+ printk(KERN_WARNING "NFSD: the nfsdcld client tracking upcall will be "
+ "removed in 3.10. Please transition to using "
+ "nfsdcltrack.\n");
+do_init:
+ status = nn->client_tracking_ops->init(net);
if (status) {
printk(KERN_WARNING "NFSD: Unable to initialize client "
"recovery tracking! (%d)\n", status);
- client_tracking_ops = NULL;
+ nn->client_tracking_ops = NULL;
}
return status;
}
@@ -955,40 +1307,49 @@ nfsd4_client_tracking_init(struct net *net)
void
nfsd4_client_tracking_exit(struct net *net)
{
- if (client_tracking_ops) {
- client_tracking_ops->exit(net);
- client_tracking_ops = NULL;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ if (nn->client_tracking_ops) {
+ if (nn->client_tracking_ops->exit)
+ nn->client_tracking_ops->exit(net);
+ nn->client_tracking_ops = NULL;
}
}
void
nfsd4_client_record_create(struct nfs4_client *clp)
{
- if (client_tracking_ops)
- client_tracking_ops->create(clp);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+ if (nn->client_tracking_ops)
+ nn->client_tracking_ops->create(clp);
}
void
nfsd4_client_record_remove(struct nfs4_client *clp)
{
- if (client_tracking_ops)
- client_tracking_ops->remove(clp);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+ if (nn->client_tracking_ops)
+ nn->client_tracking_ops->remove(clp);
}
int
nfsd4_client_record_check(struct nfs4_client *clp)
{
- if (client_tracking_ops)
- return client_tracking_ops->check(clp);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+ if (nn->client_tracking_ops)
+ return nn->client_tracking_ops->check(clp);
return -EOPNOTSUPP;
}
void
-nfsd4_record_grace_done(struct net *net, time_t boot_time)
+nfsd4_record_grace_done(struct nfsd_net *nn, time_t boot_time)
{
- if (client_tracking_ops)
- client_tracking_ops->grace_done(net, boot_time);
+ if (nn->client_tracking_ops)
+ nn->client_tracking_ops->grace_done(nn, boot_time);
}
static int
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d0237f872cc..ac8ed96c419 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -44,16 +44,11 @@
#include "xdr4.h"
#include "vfs.h"
#include "current_stateid.h"
-#include "fault_inject.h"
#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
-/* Globals */
-time_t nfsd4_lease = 90; /* default lease time */
-time_t nfsd4_grace = 90;
-
#define all_ones {{~0,~0},~0}
static const stateid_t one_stateid = {
.si_generation = ~0,
@@ -176,8 +171,6 @@ static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
return ret & OWNER_HASH_MASK;
}
-static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
-
/* hash table for nfs4_file */
#define FILE_HASH_BITS 8
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
@@ -192,7 +185,7 @@ static struct list_head file_hashtbl[FILE_HASH_SIZE];
static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
{
- BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
+ WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
atomic_inc(&fp->fi_access[oflag]);
}
@@ -251,7 +244,7 @@ static inline int get_new_stid(struct nfs4_stid *stid)
* preallocations that can exist at a time, but the state lock
* prevents anyone from using ours before we get here:
*/
- BUG_ON(error);
+ WARN_ON_ONCE(error);
/*
* It shouldn't be a problem to reuse an opaque stateid value.
* I don't think it is for 4.1. But with 4.0 I worry that, for
@@ -340,7 +333,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
dp->dl_time = 0;
atomic_set(&dp->dl_count, 1);
- INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
+ nfsd4_init_callback(&dp->dl_recall);
return dp;
}
@@ -390,14 +383,6 @@ unhash_delegation(struct nfs4_delegation *dp)
* SETCLIENTID state
*/
-/* client_lock protects the client lru list and session hash table */
-static DEFINE_SPINLOCK(client_lock);
-
-/* Hash tables for nfs4_clientid state */
-#define CLIENT_HASH_BITS 4
-#define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
-#define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
-
static unsigned int clientid_hashval(u32 id)
{
return id & CLIENT_HASH_MASK;
@@ -409,31 +394,6 @@ static unsigned int clientstr_hashval(const char *name)
}
/*
- * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
- * used in reboot/reset lease grace period processing
- *
- * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
- * setclientid_confirmed info.
- *
- * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed
- * setclientid info.
- *
- * client_lru holds client queue ordered by nfs4_client.cl_time
- * for lease renewal.
- *
- * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
- * for last close replay.
- */
-static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
-static int reclaim_str_hashtbl_size = 0;
-static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
-static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
-static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
-static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
-static struct list_head client_lru;
-static struct list_head close_lru;
-
-/*
* We store the NONE, READ, WRITE, and BOTH bits separately in the
* st_{access,deny}_bmap field of the stateid, in order to track not
* only what share bits are currently in force, but also what
@@ -526,7 +486,8 @@ static int nfs4_access_to_omode(u32 access)
case NFS4_SHARE_ACCESS_BOTH:
return O_RDWR;
}
- BUG();
+ WARN_ON_ONCE(1);
+ return O_RDONLY;
}
/* release all access and file references for a given stateid */
@@ -652,9 +613,6 @@ static void release_openowner(struct nfs4_openowner *oo)
nfs4_free_openowner(oo);
}
-#define SESSION_HASH_SIZE 512
-static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
-
static inline int
hash_sessionid(struct nfs4_sessionid *sessionid)
{
@@ -785,9 +743,12 @@ out_free:
return NULL;
}
-static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
+static void init_forechannel_attrs(struct nfsd4_channel_attrs *new,
+ struct nfsd4_channel_attrs *req,
+ int numslots, int slotsize,
+ struct nfsd_net *nn)
{
- u32 maxrpc = nfsd_serv->sv_max_mesg;
+ u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
new->maxreqs = numslots;
new->maxresp_cached = min_t(u32, req->maxresp_cached,
@@ -906,21 +867,27 @@ static void __free_session(struct nfsd4_session *ses)
static void free_session(struct kref *kref)
{
struct nfsd4_session *ses;
+ struct nfsd_net *nn;
- lockdep_assert_held(&client_lock);
ses = container_of(kref, struct nfsd4_session, se_ref);
+ nn = net_generic(ses->se_client->net, nfsd_net_id);
+
+ lockdep_assert_held(&nn->client_lock);
nfsd4_del_conns(ses);
__free_session(ses);
}
void nfsd4_put_session(struct nfsd4_session *ses)
{
- spin_lock(&client_lock);
+ struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
+
+ spin_lock(&nn->client_lock);
nfsd4_put_session_locked(ses);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
}
-static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan)
+static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan,
+ struct nfsd_net *nn)
{
struct nfsd4_session *new;
int numslots, slotsize;
@@ -941,13 +908,14 @@ static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan)
nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
return NULL;
}
- init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
+ init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize, nn);
return new;
}
-static struct nfsd4_session *init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
+static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
{
int idx;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
new->se_client = clp;
gen_sessionid(new);
@@ -957,14 +925,15 @@ static struct nfsd4_session *init_session(struct svc_rqst *rqstp, struct nfsd4_s
new->se_cb_seq_nr = 1;
new->se_flags = cses->flags;
new->se_cb_prog = cses->callback_prog;
+ new->se_cb_sec = cses->cb_sec;
kref_init(&new->se_ref);
idx = hash_sessionid(&new->se_sessionid);
- spin_lock(&client_lock);
- list_add(&new->se_hash, &sessionid_hashtbl[idx]);
+ spin_lock(&nn->client_lock);
+ list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
spin_lock(&clp->cl_lock);
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&clp->cl_lock);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
if (cses->flags & SESSION4_BACK_CHAN) {
struct sockaddr *sa = svc_addr(rqstp);
@@ -978,20 +947,20 @@ static struct nfsd4_session *init_session(struct svc_rqst *rqstp, struct nfsd4_s
rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
}
- return new;
}
/* caller must hold client_lock */
static struct nfsd4_session *
-find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
+find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
{
struct nfsd4_session *elem;
int idx;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dump_sessionid(__func__, sessionid);
idx = hash_sessionid(sessionid);
/* Search in the appropriate list */
- list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
+ list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
if (!memcmp(elem->se_sessionid.data, sessionid->data,
NFS4_MAX_SESSIONID_LEN)) {
return elem;
@@ -1016,6 +985,8 @@ unhash_session(struct nfsd4_session *ses)
static inline void
renew_client_locked(struct nfs4_client *clp)
{
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
if (is_client_expired(clp)) {
WARN_ON(1);
printk("%s: client (clientid %08x/%08x) already expired\n",
@@ -1028,16 +999,18 @@ renew_client_locked(struct nfs4_client *clp)
dprintk("renewing client (clientid %08x/%08x)\n",
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
- list_move_tail(&clp->cl_lru, &client_lru);
+ list_move_tail(&clp->cl_lru, &nn->client_lru);
clp->cl_time = get_seconds();
}
static inline void
renew_client(struct nfs4_client *clp)
{
- spin_lock(&client_lock);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+ spin_lock(&nn->client_lock);
renew_client_locked(clp);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
}
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
@@ -1075,7 +1048,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
static inline void
free_client(struct nfs4_client *clp)
{
- lockdep_assert_held(&client_lock);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+ lockdep_assert_held(&nn->client_lock);
while (!list_empty(&clp->cl_sessions)) {
struct nfsd4_session *ses;
ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
@@ -1092,15 +1067,16 @@ void
release_session_client(struct nfsd4_session *session)
{
struct nfs4_client *clp = session->se_client;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
+ if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
return;
if (is_client_expired(clp)) {
free_client(clp);
session->se_client = NULL;
} else
renew_client_locked(clp);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
}
/* must be called under the client_lock */
@@ -1123,6 +1099,7 @@ destroy_client(struct nfs4_client *clp)
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head reaplist;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
INIT_LIST_HEAD(&reaplist);
spin_lock(&recall_lock);
@@ -1144,12 +1121,15 @@ destroy_client(struct nfs4_client *clp)
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
list_del(&clp->cl_idhash);
- list_del(&clp->cl_strhash);
- spin_lock(&client_lock);
+ if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
+ rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
+ else
+ rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
+ spin_lock(&nn->client_lock);
unhash_client_locked(clp);
if (atomic_read(&clp->cl_refcount) == 0)
free_client(clp);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
}
static void expire_client(struct nfs4_client *clp)
@@ -1187,6 +1167,17 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
return 0;
}
+static long long
+compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
+{
+ long long res;
+
+ res = o1->len - o2->len;
+ if (res)
+ return res;
+ return (long long)memcmp(o1->data, o2->data, o1->len);
+}
+
static int same_name(const char *n1, const char *n2)
{
return 0 == memcmp(n1, n2, HEXDIR_LEN);
@@ -1247,10 +1238,9 @@ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
}
-static void gen_clid(struct nfs4_client *clp)
+static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
{
static u32 current_clientid = 1;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
clp->cl_clientid.cl_boot = nn->boot_time;
clp->cl_clientid.cl_id = current_clientid++;
@@ -1283,12 +1273,14 @@ static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t
return NULL;
}
-static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
+static struct nfs4_client *create_client(struct xdr_netobj name,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
struct nfs4_client *clp;
struct sockaddr *sa = svc_addr(rqstp);
int ret;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
clp = alloc_client(name);
if (clp == NULL)
@@ -1297,23 +1289,21 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
INIT_LIST_HEAD(&clp->cl_sessions);
ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
if (ret) {
- spin_lock(&client_lock);
+ spin_lock(&nn->client_lock);
free_client(clp);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
return NULL;
}
idr_init(&clp->cl_stateids);
- memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
atomic_set(&clp->cl_refcount, 0);
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
INIT_LIST_HEAD(&clp->cl_idhash);
- INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
INIT_LIST_HEAD(&clp->cl_lru);
INIT_LIST_HEAD(&clp->cl_callbacks);
spin_lock_init(&clp->cl_lock);
- INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
+ nfsd4_init_callback(&clp->cl_cb_null);
clp->cl_time = get_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
@@ -1321,17 +1311,60 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
gen_confirm(clp);
clp->cl_cb_session = NULL;
+ clp->net = net;
return clp;
}
static void
-add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
+add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct nfs4_client *clp;
+
+ while (*new) {
+ clp = rb_entry(*new, struct nfs4_client, cl_namenode);
+ parent = *new;
+
+ if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ rb_link_node(&new_clp->cl_namenode, parent, new);
+ rb_insert_color(&new_clp->cl_namenode, root);
+}
+
+static struct nfs4_client *
+find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
+{
+ long long cmp;
+ struct rb_node *node = root->rb_node;
+ struct nfs4_client *clp;
+
+ while (node) {
+ clp = rb_entry(node, struct nfs4_client, cl_namenode);
+ cmp = compare_blob(&clp->cl_name, name);
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else
+ return clp;
+ }
+ return NULL;
+}
+
+static void
+add_to_unconfirmed(struct nfs4_client *clp)
{
unsigned int idhashval;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
+ clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
+ add_clp_to_name_tree(clp, &nn->unconf_name_tree);
idhashval = clientid_hashval(clp->cl_clientid.cl_id);
- list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
+ list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
renew_client(clp);
}
@@ -1339,22 +1372,23 @@ static void
move_to_confirmed(struct nfs4_client *clp)
{
unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
- unsigned int strhashval;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
- list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
- strhashval = clientstr_hashval(clp->cl_recdir);
- list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
+ list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
+ rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
+ add_clp_to_name_tree(clp, &nn->conf_name_tree);
+ set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
renew_client(clp);
}
static struct nfs4_client *
-find_confirmed_client(clientid_t *clid, bool sessions)
+find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
struct nfs4_client *clp;
unsigned int idhashval = clientid_hashval(clid->cl_id);
- list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
+ list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) {
if (same_clid(&clp->cl_clientid, clid)) {
if ((bool)clp->cl_minorversion != sessions)
return NULL;
@@ -1366,12 +1400,12 @@ find_confirmed_client(clientid_t *clid, bool sessions)
}
static struct nfs4_client *
-find_unconfirmed_client(clientid_t *clid, bool sessions)
+find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
struct nfs4_client *clp;
unsigned int idhashval = clientid_hashval(clid->cl_id);
- list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
+ list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) {
if (same_clid(&clp->cl_clientid, clid)) {
if ((bool)clp->cl_minorversion != sessions)
return NULL;
@@ -1387,27 +1421,15 @@ static bool clp_used_exchangeid(struct nfs4_client *clp)
}
static struct nfs4_client *
-find_confirmed_client_by_str(const char *dname, unsigned int hashval)
+find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{
- struct nfs4_client *clp;
-
- list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
- if (same_name(clp->cl_recdir, dname))
- return clp;
- }
- return NULL;
+ return find_clp_in_name_tree(name, &nn->conf_name_tree);
}
static struct nfs4_client *
-find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
+find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
{
- struct nfs4_client *clp;
-
- list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
- if (same_name(clp->cl_recdir, dname))
- return clp;
- }
- return NULL;
+ return find_clp_in_name_tree(name, &nn->unconf_name_tree);
}
static void
@@ -1428,7 +1450,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r
else
goto out_err;
- conn->cb_addrlen = rpc_uaddr2sockaddr(&init_net, se->se_callback_addr_val,
+ conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
se->se_callback_addr_len,
(struct sockaddr *)&conn->cb_addr,
sizeof(conn->cb_addr));
@@ -1572,12 +1594,11 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
{
struct nfs4_client *unconf, *conf, *new;
__be32 status;
- unsigned int strhashval;
- char dname[HEXDIR_LEN];
char addr_str[INET6_ADDRSTRLEN];
nfs4_verifier verf = exid->verifier;
struct sockaddr *sa = svc_addr(rqstp);
bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
rpc_ntop(sa, addr_str, sizeof(addr_str));
dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
@@ -1592,24 +1613,16 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
switch (exid->spa_how) {
case SP4_NONE:
break;
+ default: /* checked by xdr code */
+ WARN_ON_ONCE(1);
case SP4_SSV:
- return nfserr_serverfault;
- default:
- BUG(); /* checked by xdr code */
case SP4_MACH_CRED:
return nfserr_serverfault; /* no excuse :-/ */
}
- status = nfs4_make_rec_clidname(dname, &exid->clname);
-
- if (status)
- return status;
-
- strhashval = clientstr_hashval(dname);
-
/* Cases below refer to rfc 5661 section 18.35.4: */
nfs4_lock_state();
- conf = find_confirmed_client_by_str(dname, strhashval);
+ conf = find_confirmed_client_by_name(&exid->clname, nn);
if (conf) {
bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
bool verfs_match = same_verf(&verf, &conf->cl_verifier);
@@ -1654,21 +1667,21 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
goto out;
}
- unconf = find_unconfirmed_client_by_str(dname, strhashval);
+ unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
if (unconf) /* case 4, possible retry or client restart */
expire_client(unconf);
/* case 1 (normal case) */
out_new:
- new = create_client(exid->clname, dname, rqstp, &verf);
+ new = create_client(exid->clname, rqstp, &verf);
if (new == NULL) {
status = nfserr_jukebox;
goto out;
}
new->cl_minorversion = 1;
- gen_clid(new);
- add_to_unconfirmed(new, strhashval);
+ gen_clid(new, nn);
+ add_to_unconfirmed(new);
out_copy:
exid->clientid.cl_boot = new->cl_clientid.cl_boot;
exid->clientid.cl_id = new->cl_clientid.cl_id;
@@ -1761,12 +1774,13 @@ nfsd4_create_session(struct svc_rqst *rqstp,
struct nfsd4_conn *conn;
struct nfsd4_clid_slot *cs_slot = NULL;
__be32 status = 0;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
return nfserr_inval;
if (check_forechannel_attrs(cr_ses->fore_channel))
return nfserr_toosmall;
- new = alloc_session(&cr_ses->fore_channel);
+ new = alloc_session(&cr_ses->fore_channel, nn);
if (!new)
return nfserr_jukebox;
status = nfserr_jukebox;
@@ -1775,8 +1789,8 @@ nfsd4_create_session(struct svc_rqst *rqstp,
goto out_free_session;
nfs4_lock_state();
- unconf = find_unconfirmed_client(&cr_ses->clientid, true);
- conf = find_confirmed_client(&cr_ses->clientid, true);
+ unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
+ conf = find_confirmed_client(&cr_ses->clientid, true, nn);
if (conf) {
cs_slot = &conf->cl_cs_slot;
@@ -1789,7 +1803,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
goto out_free_conn;
}
} else if (unconf) {
- unsigned int hash;
struct nfs4_client *old;
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
@@ -1803,8 +1816,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
status = nfserr_seq_misordered;
goto out_free_conn;
}
- hash = clientstr_hashval(unconf->cl_recdir);
- old = find_confirmed_client_by_str(unconf->cl_recdir, hash);
+ old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old)
expire_client(old);
move_to_confirmed(unconf);
@@ -1843,14 +1855,6 @@ out_free_session:
goto out;
}
-static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
-{
- struct nfsd4_compoundres *resp = rqstp->rq_resp;
- struct nfsd4_compoundargs *argp = rqstp->rq_argp;
-
- return argp->opcnt == resp->opcnt;
-}
-
static __be32 nfsd4_map_bcts_dir(u32 *dir)
{
switch (*dir) {
@@ -1865,24 +1869,40 @@ static __be32 nfsd4_map_bcts_dir(u32 *dir)
return nfserr_inval;
}
+__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
+{
+ struct nfsd4_session *session = cstate->session;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+
+ spin_lock(&nn->client_lock);
+ session->se_cb_prog = bc->bc_cb_program;
+ session->se_cb_sec = bc->bc_cb_sec;
+ spin_unlock(&nn->client_lock);
+
+ nfsd4_probe_callback(session->se_client);
+
+ return nfs_ok;
+}
+
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_bind_conn_to_session *bcts)
{
__be32 status;
struct nfsd4_conn *conn;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (!nfsd4_last_compound_op(rqstp))
return nfserr_not_only_op;
- spin_lock(&client_lock);
- cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
+ spin_lock(&nn->client_lock);
+ cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp));
/* Sorta weird: we only need the refcnt'ing because new_conn acquires
* client_lock iself: */
if (cstate->session) {
nfsd4_get_session(cstate->session);
atomic_inc(&cstate->session->se_client->cl_refcount);
}
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
if (!cstate->session)
return nfserr_badsession;
@@ -1910,6 +1930,7 @@ nfsd4_destroy_session(struct svc_rqst *r,
{
struct nfsd4_session *ses;
__be32 status = nfserr_badsession;
+ struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id);
/* Notes:
* - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
@@ -1923,24 +1944,24 @@ nfsd4_destroy_session(struct svc_rqst *r,
return nfserr_not_only_op;
}
dump_sessionid(__func__, &sessionid->sessionid);
- spin_lock(&client_lock);
- ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
+ spin_lock(&nn->client_lock);
+ ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r));
if (!ses) {
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
goto out;
}
unhash_session(ses);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
nfs4_lock_state();
nfsd4_probe_callback_sync(ses->se_client);
nfs4_unlock_state();
- spin_lock(&client_lock);
+ spin_lock(&nn->client_lock);
nfsd4_del_conns(ses);
nfsd4_put_session_locked(ses);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
status = nfs_ok;
out:
dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -2006,6 +2027,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_slot *slot;
struct nfsd4_conn *conn;
__be32 status;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (resp->opcnt != 1)
return nfserr_sequence_pos;
@@ -2018,9 +2040,9 @@ nfsd4_sequence(struct svc_rqst *rqstp,
if (!conn)
return nfserr_jukebox;
- spin_lock(&client_lock);
+ spin_lock(&nn->client_lock);
status = nfserr_badsession;
- session = find_in_sessionid_hashtbl(&seq->sessionid);
+ session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp));
if (!session)
goto out;
@@ -2094,7 +2116,7 @@ out:
}
}
kfree(conn);
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
dprintk("%s: return %d\n", __func__, ntohl(status));
return status;
}
@@ -2104,10 +2126,11 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
{
struct nfs4_client *conf, *unconf, *clp;
__be32 status = 0;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
nfs4_lock_state();
- unconf = find_unconfirmed_client(&dc->clientid, true);
- conf = find_confirmed_client(&dc->clientid, true);
+ unconf = find_unconfirmed_client(&dc->clientid, true, nn);
+ conf = find_confirmed_client(&dc->clientid, true, nn);
if (conf) {
clp = conf;
@@ -2181,20 +2204,13 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct xdr_netobj clname = setclid->se_name;
nfs4_verifier clverifier = setclid->se_verf;
- unsigned int strhashval;
struct nfs4_client *conf, *unconf, *new;
__be32 status;
- char dname[HEXDIR_LEN];
-
- status = nfs4_make_rec_clidname(dname, &clname);
- if (status)
- return status;
-
- strhashval = clientstr_hashval(dname);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
/* Cases below refer to rfc 3530 section 14.2.33: */
nfs4_lock_state();
- conf = find_confirmed_client_by_str(dname, strhashval);
+ conf = find_confirmed_client_by_name(&clname, nn);
if (conf) {
/* case 0: */
status = nfserr_clid_inuse;
@@ -2209,21 +2225,21 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
}
- unconf = find_unconfirmed_client_by_str(dname, strhashval);
+ unconf = find_unconfirmed_client_by_name(&clname, nn);
if (unconf)
expire_client(unconf);
status = nfserr_jukebox;
- new = create_client(clname, dname, rqstp, &clverifier);
+ new = create_client(clname, rqstp, &clverifier);
if (new == NULL)
goto out;
if (conf && same_verf(&conf->cl_verifier, &clverifier))
/* case 1: probable callback update */
copy_clid(new, conf);
else /* case 4 (new client) or cases 2, 3 (client reboot): */
- gen_clid(new);
+ gen_clid(new, nn);
new->cl_minorversion = 0;
gen_callback(new, setclid, rqstp);
- add_to_unconfirmed(new, strhashval);
+ add_to_unconfirmed(new);
setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
@@ -2243,14 +2259,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
nfs4_verifier confirm = setclientid_confirm->sc_confirm;
clientid_t * clid = &setclientid_confirm->sc_clientid;
__be32 status;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
nfs4_lock_state();
- conf = find_confirmed_client(clid, false);
- unconf = find_unconfirmed_client(clid, false);
+ conf = find_confirmed_client(clid, false, nn);
+ unconf = find_unconfirmed_client(clid, false, nn);
/*
* We try hard to give out unique clientid's, so if we get an
* attempt to confirm the same clientid with a different cred,
@@ -2276,9 +2292,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
nfsd4_probe_callback(conf);
expire_client(unconf);
} else { /* case 3: normal case; new or rebooted client */
- unsigned int hash = clientstr_hashval(unconf->cl_recdir);
-
- conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
+ conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (conf)
expire_client(conf);
move_to_confirmed(unconf);
@@ -2340,7 +2354,7 @@ nfsd4_init_slabs(void)
if (openowner_slab == NULL)
goto out_nomem;
lockowner_slab = kmem_cache_create("nfsd4_lockowners",
- sizeof(struct nfs4_openowner), 0, 0, NULL);
+ sizeof(struct nfs4_lockowner), 0, 0, NULL);
if (lockowner_slab == NULL)
goto out_nomem;
file_slab = kmem_cache_create("nfsd4_files",
@@ -2404,7 +2418,9 @@ static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj
static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
{
- list_add(&oo->oo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+
+ list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
list_add(&oo->oo_perclient, &clp->cl_openowners);
}
@@ -2444,11 +2460,13 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
}
static void
-move_to_close_lru(struct nfs4_openowner *oo)
+move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
- list_move_tail(&oo->oo_close_lru, &close_lru);
+ list_move_tail(&oo->oo_close_lru, &nn->close_lru);
oo->oo_time = get_seconds();
}
@@ -2462,13 +2480,14 @@ same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
}
static struct nfs4_openowner *
-find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, bool sessions)
+find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
+ bool sessions, struct nfsd_net *nn)
{
struct nfs4_stateowner *so;
struct nfs4_openowner *oo;
struct nfs4_client *clp;
- list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
+ list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
if (!so->so_is_open_owner)
continue;
if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
@@ -2555,9 +2574,14 @@ static void nfsd_break_deleg_cb(struct file_lock *fl)
struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
struct nfs4_delegation *dp;
- BUG_ON(!fp);
- /* We assume break_lease is only called once per lease: */
- BUG_ON(fp->fi_had_conflict);
+ if (!fp) {
+ WARN(1, "(%p)->fl_owner NULL\n", fl);
+ return;
+ }
+ if (fp->fi_had_conflict) {
+ WARN(1, "duplicate break on %p\n", fp);
+ return;
+ }
/*
* We don't want the locks code to timeout the lease for us;
* we'll remove it ourself if a delegation isn't returned
@@ -2599,14 +2623,13 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4
__be32
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
- struct nfsd4_open *open)
+ struct nfsd4_open *open, struct nfsd_net *nn)
{
clientid_t *clientid = &open->op_clientid;
struct nfs4_client *clp = NULL;
unsigned int strhashval;
struct nfs4_openowner *oo = NULL;
__be32 status;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
if (STALE_CLIENTID(&open->op_clientid, nn))
return nfserr_stale_clientid;
@@ -2619,10 +2642,11 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
return nfserr_jukebox;
strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
- oo = find_openstateowner_str(strhashval, open, cstate->minorversion);
+ oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
open->op_openowner = oo;
if (!oo) {
- clp = find_confirmed_client(clientid, cstate->minorversion);
+ clp = find_confirmed_client(clientid, cstate->minorversion,
+ nn);
if (clp == NULL)
return nfserr_expired;
goto new_owner;
@@ -2891,7 +2915,7 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
open->op_why_no_deleg = WND4_CANCELLED;
break;
case NFS4_SHARE_WANT_NO_DELEG:
- BUG(); /* not supposed to get here */
+ WARN_ON_ONCE(1);
}
}
}
@@ -2959,6 +2983,7 @@ out:
}
return;
out_free:
+ unhash_stid(&dp->dl_stid);
nfs4_put_delegation(dp);
out_no_deleg:
flag = NFS4_OPEN_DELEGATE_NONE;
@@ -3104,27 +3129,32 @@ void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
free_generic_stateid(open->op_stp);
}
+static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp)
+{
+ struct nfs4_client *found;
+
+ if (STALE_CLIENTID(clid, nn))
+ return nfserr_stale_clientid;
+ found = find_confirmed_client(clid, session, nn);
+ if (clp)
+ *clp = found;
+ return found ? nfs_ok : nfserr_expired;
+}
+
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
clientid_t *clid)
{
struct nfs4_client *clp;
__be32 status;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
nfs4_lock_state();
dprintk("process_renew(%08x/%08x): starting\n",
clid->cl_boot, clid->cl_id);
- status = nfserr_stale_clientid;
- if (STALE_CLIENTID(clid, nn))
- goto out;
- clp = find_confirmed_client(clid, cstate->minorversion);
- status = nfserr_expired;
- if (clp == NULL) {
- /* We assume the client took too long to RENEW. */
- dprintk("nfsd4_renew: clientid not found!\n");
+ status = lookup_clientid(clid, cstate->minorversion, nn, &clp);
+ if (status)
goto out;
- }
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
&& clp->cl_cb_state != NFSD4_CB_UP)
@@ -3136,44 +3166,42 @@ out:
}
static void
-nfsd4_end_grace(struct net *net)
+nfsd4_end_grace(struct nfsd_net *nn)
{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
/* do nothing if grace period already ended */
if (nn->grace_ended)
return;
dprintk("NFSD: end of grace period\n");
nn->grace_ended = true;
- nfsd4_record_grace_done(net, nn->boot_time);
+ nfsd4_record_grace_done(nn, nn->boot_time);
locks_end_grace(&nn->nfsd4_manager);
/*
* Now that every NFSv4 client has had the chance to recover and
* to see the (possibly new, possibly shorter) lease time, we
* can safely set the next grace time to the current lease time:
*/
- nfsd4_grace = nfsd4_lease;
+ nn->nfsd4_grace = nn->nfsd4_lease;
}
static time_t
-nfs4_laundromat(void)
+nfs4_laundromat(struct nfsd_net *nn)
{
struct nfs4_client *clp;
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
struct list_head *pos, *next, reaplist;
- time_t cutoff = get_seconds() - nfsd4_lease;
- time_t t, clientid_val = nfsd4_lease;
- time_t u, test_val = nfsd4_lease;
+ time_t cutoff = get_seconds() - nn->nfsd4_lease;
+ time_t t, clientid_val = nn->nfsd4_lease;
+ time_t u, test_val = nn->nfsd4_lease;
nfs4_lock_state();
dprintk("NFSD: laundromat service - starting\n");
- nfsd4_end_grace(&init_net);
+ nfsd4_end_grace(nn);
INIT_LIST_HEAD(&reaplist);
- spin_lock(&client_lock);
- list_for_each_safe(pos, next, &client_lru) {
+ spin_lock(&nn->client_lock);
+ list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
t = clp->cl_time - cutoff;
@@ -3189,7 +3217,7 @@ nfs4_laundromat(void)
unhash_client_locked(clp);
list_add(&clp->cl_lru, &reaplist);
}
- spin_unlock(&client_lock);
+ spin_unlock(&nn->client_lock);
list_for_each_safe(pos, next, &reaplist) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
dprintk("NFSD: purging unused client (clientid %08x)\n",
@@ -3199,6 +3227,8 @@ nfs4_laundromat(void)
spin_lock(&recall_lock);
list_for_each_safe(pos, next, &del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+ if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
+ continue;
if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
u = dp->dl_time - cutoff;
if (test_val > u)
@@ -3212,8 +3242,8 @@ nfs4_laundromat(void)
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
unhash_delegation(dp);
}
- test_val = nfsd4_lease;
- list_for_each_safe(pos, next, &close_lru) {
+ test_val = nn->nfsd4_lease;
+ list_for_each_safe(pos, next, &nn->close_lru) {
oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
u = oo->oo_time - cutoff;
@@ -3231,16 +3261,19 @@ nfs4_laundromat(void)
static struct workqueue_struct *laundry_wq;
static void laundromat_main(struct work_struct *);
-static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
static void
-laundromat_main(struct work_struct *not_used)
+laundromat_main(struct work_struct *laundry)
{
time_t t;
+ struct delayed_work *dwork = container_of(laundry, struct delayed_work,
+ work);
+ struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
+ laundromat_work);
- t = nfs4_laundromat();
+ t = nfs4_laundromat(nn);
dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
- queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
+ queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
@@ -3385,16 +3418,17 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
return nfs_ok;
}
-static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s, bool sessions)
+static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask,
+ struct nfs4_stid **s, bool sessions,
+ struct nfsd_net *nn)
{
struct nfs4_client *cl;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return nfserr_bad_stateid;
if (STALE_STATEID(stateid, nn))
return nfserr_stale_stateid;
- cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions);
+ cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions, nn);
if (!cl)
return nfserr_expired;
*s = find_stateid_by_type(cl, stateid, typemask);
@@ -3416,6 +3450,7 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
struct nfs4_delegation *dp = NULL;
struct svc_fh *current_fh = &cstate->current_fh;
struct inode *ino = current_fh->fh_dentry->d_inode;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
__be32 status;
if (filpp)
@@ -3427,7 +3462,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return check_special_stateids(net, current_fh, stateid, flags);
- status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s, cstate->minorversion);
+ status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
+ &s, cstate->minorversion, nn);
if (status)
return status;
status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
@@ -3441,7 +3477,11 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
goto out;
if (filpp) {
*filpp = dp->dl_file->fi_deleg_file;
- BUG_ON(!*filpp);
+ if (!*filpp) {
+ WARN_ON_ONCE(1);
+ status = nfserr_serverfault;
+ goto out;
+ }
}
break;
case NFS4_OPEN_STID:
@@ -3568,7 +3608,8 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
stateid_t *stateid, char typemask,
- struct nfs4_ol_stateid **stpp)
+ struct nfs4_ol_stateid **stpp,
+ struct nfsd_net *nn)
{
__be32 status;
struct nfs4_stid *s;
@@ -3577,7 +3618,8 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
seqid, STATEID_VAL(stateid));
*stpp = NULL;
- status = nfsd4_lookup_stateid(stateid, typemask, &s, cstate->minorversion);
+ status = nfsd4_lookup_stateid(stateid, typemask, &s,
+ cstate->minorversion, nn);
if (status)
return status;
*stpp = openlockstateid(s);
@@ -3586,13 +3628,14 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
}
-static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
+static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
+ stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
{
__be32 status;
struct nfs4_openowner *oo;
status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
- NFS4_OPEN_STID, stpp);
+ NFS4_OPEN_STID, stpp, nn);
if (status)
return status;
oo = openowner((*stpp)->st_stateowner);
@@ -3608,6 +3651,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
@@ -3621,7 +3665,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_seqid_op(cstate,
oc->oc_seqid, &oc->oc_req_stateid,
- NFS4_OPEN_STID, &stp);
+ NFS4_OPEN_STID, &stp, nn);
if (status)
goto out;
oo = openowner(stp->st_stateowner);
@@ -3664,7 +3708,7 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
case NFS4_SHARE_ACCESS_BOTH:
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
}
}
@@ -3685,6 +3729,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
{
__be32 status;
struct nfs4_ol_stateid *stp;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
@@ -3697,7 +3742,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
nfs4_lock_state();
status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
- &od->od_stateid, &stp);
+ &od->od_stateid, &stp, nn);
if (status)
goto out;
status = nfserr_inval;
@@ -3760,6 +3805,8 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_close on file %.*s\n",
(int)cstate->current_fh.fh_dentry->d_name.len,
@@ -3769,7 +3816,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
&close->cl_stateid,
NFS4_OPEN_STID|NFS4_CLOSED_STID,
- &stp);
+ &stp, nn);
if (status)
goto out;
oo = openowner(stp->st_stateowner);
@@ -3791,7 +3838,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* little while to handle CLOSE replay.
*/
if (list_empty(&oo->oo_owner.so_stateids))
- move_to_close_lru(oo);
+ move_to_close_lru(oo, SVC_NET(rqstp));
}
}
out:
@@ -3807,15 +3854,15 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_delegation *dp;
stateid_t *stateid = &dr->dr_stateid;
struct nfs4_stid *s;
- struct inode *inode;
__be32 status;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
return status;
- inode = cstate->current_fh.fh_dentry->d_inode;
nfs4_lock_state();
- status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s, cstate->minorversion);
+ status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s,
+ cstate->minorversion, nn);
if (status)
goto out;
dp = delegstateid(s);
@@ -3833,8 +3880,6 @@ out:
#define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
-#define LOCKOWNER_INO_HASH_BITS 8
-#define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS)
#define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
static inline u64
@@ -3852,7 +3897,7 @@ last_byte_offset(u64 start, u64 len)
{
u64 end;
- BUG_ON(!len);
+ WARN_ON_ONCE(!len);
end = start + len;
return end > start ? end - 1: NFS4_MAX_UINT64;
}
@@ -3864,8 +3909,6 @@ static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct
& LOCKOWNER_INO_HASH_MASK;
}
-static struct list_head lockowner_ino_hashtbl[LOCKOWNER_INO_HASH_SIZE];
-
/*
* TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
* we can't properly handle lock requests that go beyond the (2^63 - 1)-th
@@ -3931,12 +3974,12 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
static struct nfs4_lockowner *
find_lockowner_str(struct inode *inode, clientid_t *clid,
- struct xdr_netobj *owner)
+ struct xdr_netobj *owner, struct nfsd_net *nn)
{
unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
struct nfs4_lockowner *lo;
- list_for_each_entry(lo, &lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
+ list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
if (same_lockowner_ino(lo, inode, clid, owner))
return lo;
}
@@ -3948,9 +3991,10 @@ static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, s
struct inode *inode = open_stp->st_file->fi_inode;
unsigned int inohash = lockowner_ino_hashval(inode,
clp->cl_clientid.cl_id, &lo->lo_owner.so_owner);
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- list_add(&lo->lo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
- list_add(&lo->lo_owner_ino_hash, &lockowner_ino_hashtbl[inohash]);
+ list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
+ list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]);
list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
}
@@ -4024,8 +4068,10 @@ static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, s
struct nfs4_client *cl = oo->oo_owner.so_client;
struct nfs4_lockowner *lo;
unsigned int strhashval;
+ struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
- lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, &lock->v.new.owner);
+ lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid,
+ &lock->v.new.owner, nn);
if (lo) {
if (!cstate->minorversion)
return nfserr_bad_seqid;
@@ -4065,7 +4111,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
bool new_state = false;
int lkflg;
int err;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct net *net = SVC_NET(rqstp);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
(long long) lock->lk_offset,
@@ -4099,7 +4146,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_confirmed_seqid_op(cstate,
lock->lk_new_open_seqid,
&lock->lk_new_open_stateid,
- &open_stp);
+ &open_stp, nn);
if (status)
goto out;
open_sop = openowner(open_stp->st_stateowner);
@@ -4113,7 +4160,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
&lock->lk_old_lock_stateid,
- NFS4_LOCK_STID, &lock_stp);
+ NFS4_LOCK_STID, &lock_stp, nn);
if (status)
goto out;
lock_sop = lockowner(lock_stp->st_stateowner);
@@ -4124,10 +4171,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
status = nfserr_grace;
- if (locks_in_grace(SVC_NET(rqstp)) && !lock->lk_reclaim)
+ if (locks_in_grace(net) && !lock->lk_reclaim)
goto out;
status = nfserr_no_grace;
- if (!locks_in_grace(SVC_NET(rqstp)) && lock->lk_reclaim)
+ if (!locks_in_grace(net) && lock->lk_reclaim)
goto out;
file_lock = locks_alloc_lock();
@@ -4238,7 +4285,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct file_lock *file_lock = NULL;
struct nfs4_lockowner *lo;
__be32 status;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (locks_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
@@ -4248,9 +4295,11 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
- status = nfserr_stale_clientid;
- if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid, nn))
- goto out;
+ if (!nfsd4_has_session(cstate)) {
+ status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL);
+ if (status)
+ goto out;
+ }
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
goto out;
@@ -4278,7 +4327,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
- lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
+ lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn);
if (lo)
file_lock->fl_owner = (fl_owner_t)lo;
file_lock->fl_pid = current->tgid;
@@ -4313,7 +4362,8 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct file_lock *file_lock = NULL;
__be32 status;
int err;
-
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+
dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
(long long) locku->lu_offset,
(long long) locku->lu_length);
@@ -4324,7 +4374,8 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
- &locku->lu_stateid, NFS4_LOCK_STID, &stp);
+ &locku->lu_stateid, NFS4_LOCK_STID,
+ &stp, nn);
if (status)
goto out;
filp = find_any_file(stp->st_file);
@@ -4414,23 +4465,21 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct list_head matches;
unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
__be32 status;
- struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
- /* XXX check for lease expiration */
-
- status = nfserr_stale_clientid;
- if (STALE_CLIENTID(clid, nn))
- return status;
-
nfs4_lock_state();
+ status = lookup_clientid(clid, cstate->minorversion, nn, NULL);
+ if (status)
+ goto out;
+
status = nfserr_locks_held;
INIT_LIST_HEAD(&matches);
- list_for_each_entry(sop, &ownerstr_hashtbl[hashval], so_strhash) {
+ list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) {
if (sop->so_is_open_owner)
continue;
if (!same_owner_str(sop, owner, clid))
@@ -4466,73 +4515,74 @@ alloc_reclaim(void)
return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
}
-int
-nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
+bool
+nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
{
- unsigned int strhashval = clientstr_hashval(name);
- struct nfs4_client *clp;
+ struct nfs4_client_reclaim *crp;
- clp = find_confirmed_client_by_str(name, strhashval);
- if (!clp)
- return 0;
- return test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
+ crp = nfsd4_find_reclaim_client(name, nn);
+ return (crp && crp->cr_clp);
}
/*
* failure => all reset bets are off, nfserr_no_grace...
*/
-int
-nfs4_client_to_reclaim(const char *name)
+struct nfs4_client_reclaim *
+nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
{
unsigned int strhashval;
- struct nfs4_client_reclaim *crp = NULL;
+ struct nfs4_client_reclaim *crp;
dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
crp = alloc_reclaim();
- if (!crp)
- return 0;
- strhashval = clientstr_hashval(name);
- INIT_LIST_HEAD(&crp->cr_strhash);
- list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
- memcpy(crp->cr_recdir, name, HEXDIR_LEN);
- reclaim_str_hashtbl_size++;
- return 1;
+ if (crp) {
+ strhashval = clientstr_hashval(name);
+ INIT_LIST_HEAD(&crp->cr_strhash);
+ list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
+ memcpy(crp->cr_recdir, name, HEXDIR_LEN);
+ crp->cr_clp = NULL;
+ nn->reclaim_str_hashtbl_size++;
+ }
+ return crp;
+}
+
+void
+nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
+{
+ list_del(&crp->cr_strhash);
+ kfree(crp);
+ nn->reclaim_str_hashtbl_size--;
}
void
-nfs4_release_reclaim(void)
+nfs4_release_reclaim(struct nfsd_net *nn)
{
struct nfs4_client_reclaim *crp = NULL;
int i;
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
- while (!list_empty(&reclaim_str_hashtbl[i])) {
- crp = list_entry(reclaim_str_hashtbl[i].next,
+ while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
+ crp = list_entry(nn->reclaim_str_hashtbl[i].next,
struct nfs4_client_reclaim, cr_strhash);
- list_del(&crp->cr_strhash);
- kfree(crp);
- reclaim_str_hashtbl_size--;
+ nfs4_remove_reclaim_record(crp, nn);
}
}
- BUG_ON(reclaim_str_hashtbl_size);
+ WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
}
/*
* called from OPEN, CLAIM_PREVIOUS with a new clientid. */
struct nfs4_client_reclaim *
-nfsd4_find_reclaim_client(struct nfs4_client *clp)
+nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
{
unsigned int strhashval;
struct nfs4_client_reclaim *crp = NULL;
- dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
- clp->cl_name.len, clp->cl_name.data,
- clp->cl_recdir);
+ dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
- /* find clp->cl_name in reclaim_str_hashtbl */
- strhashval = clientstr_hashval(clp->cl_recdir);
- list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
- if (same_name(crp->cr_recdir, clp->cl_recdir)) {
+ strhashval = clientstr_hashval(recdir);
+ list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
+ if (same_name(crp->cr_recdir, recdir)) {
return crp;
}
}
@@ -4543,12 +4593,12 @@ nfsd4_find_reclaim_client(struct nfs4_client *clp)
* Called from OPEN. Look for clientid in reclaim list.
*/
__be32
-nfs4_check_open_reclaim(clientid_t *clid, bool sessions)
+nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
struct nfs4_client *clp;
/* find clientid in conf_id_hashtbl */
- clp = find_confirmed_client(clid, sessions);
+ clp = find_confirmed_client(clid, sessions, nn);
if (clp == NULL)
return nfserr_reclaim_bad;
@@ -4557,124 +4607,177 @@ nfs4_check_open_reclaim(clientid_t *clid, bool sessions)
#ifdef CONFIG_NFSD_FAULT_INJECTION
-void nfsd_forget_clients(u64 num)
+u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
{
- struct nfs4_client *clp, *next;
- int count = 0;
-
- nfs4_lock_state();
- list_for_each_entry_safe(clp, next, &client_lru, cl_lru) {
- expire_client(clp);
- if (++count == num)
- break;
- }
- nfs4_unlock_state();
-
- printk(KERN_INFO "NFSD: Forgot %d clients", count);
+ expire_client(clp);
+ return 1;
}
-static void release_lockowner_sop(struct nfs4_stateowner *sop)
+u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
{
- release_lockowner(lockowner(sop));
+ char buf[INET6_ADDRSTRLEN];
+ rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
+ printk(KERN_INFO "NFS Client: %s\n", buf);
+ return 1;
}
-static void release_openowner_sop(struct nfs4_stateowner *sop)
+static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
+ const char *type)
{
- release_openowner(openowner(sop));
+ char buf[INET6_ADDRSTRLEN];
+ rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
+ printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
}
-static int nfsd_release_n_owners(u64 num, bool is_open_owner,
- void (*release_sop)(struct nfs4_stateowner *))
+static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *))
{
- int i, count = 0;
- struct nfs4_stateowner *sop, *next;
+ struct nfs4_openowner *oop;
+ struct nfs4_lockowner *lop, *lo_next;
+ struct nfs4_ol_stateid *stp, *st_next;
+ u64 count = 0;
- for (i = 0; i < OWNER_HASH_SIZE; i++) {
- list_for_each_entry_safe(sop, next, &ownerstr_hashtbl[i], so_strhash) {
- if (sop->so_is_open_owner != is_open_owner)
- continue;
- release_sop(sop);
- if (++count == num)
- return count;
+ list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
+ list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) {
+ list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) {
+ if (func)
+ func(lop);
+ if (++count == max)
+ return count;
+ }
}
}
+
return count;
}
-void nfsd_forget_locks(u64 num)
+u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
{
- int count;
-
- nfs4_lock_state();
- count = nfsd_release_n_owners(num, false, release_lockowner_sop);
- nfs4_unlock_state();
+ return nfsd_foreach_client_lock(clp, max, release_lockowner);
+}
- printk(KERN_INFO "NFSD: Forgot %d locks", count);
+u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
+{
+ u64 count = nfsd_foreach_client_lock(clp, max, NULL);
+ nfsd_print_count(clp, count, "locked files");
+ return count;
}
-void nfsd_forget_openowners(u64 num)
+static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
{
- int count;
+ struct nfs4_openowner *oop, *next;
+ u64 count = 0;
- nfs4_lock_state();
- count = nfsd_release_n_owners(num, true, release_openowner_sop);
- nfs4_unlock_state();
+ list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
+ if (func)
+ func(oop);
+ if (++count == max)
+ break;
+ }
- printk(KERN_INFO "NFSD: Forgot %d open owners", count);
+ return count;
}
-static int nfsd_process_n_delegations(u64 num, struct list_head *list)
+u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
{
- int i, count = 0;
- struct nfs4_file *fp, *fnext;
- struct nfs4_delegation *dp, *dnext;
+ return nfsd_foreach_client_open(clp, max, release_openowner);
+}
- for (i = 0; i < FILE_HASH_SIZE; i++) {
- list_for_each_entry_safe(fp, fnext, &file_hashtbl[i], fi_hash) {
- list_for_each_entry_safe(dp, dnext, &fp->fi_delegations, dl_perfile) {
- list_move(&dp->dl_recall_lru, list);
- if (++count == num)
- return count;
- }
- }
- }
+u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
+{
+ u64 count = nfsd_foreach_client_open(clp, max, NULL);
+ nfsd_print_count(clp, count, "open files");
+ return count;
+}
+
+static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
+ struct list_head *victims)
+{
+ struct nfs4_delegation *dp, *next;
+ u64 count = 0;
+ list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
+ if (victims)
+ list_move(&dp->dl_recall_lru, victims);
+ if (++count == max)
+ break;
+ }
return count;
}
-void nfsd_forget_delegations(u64 num)
+u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
{
- unsigned int count;
+ struct nfs4_delegation *dp, *next;
LIST_HEAD(victims);
- struct nfs4_delegation *dp, *dnext;
+ u64 count;
spin_lock(&recall_lock);
- count = nfsd_process_n_delegations(num, &victims);
+ count = nfsd_find_all_delegations(clp, max, &victims);
spin_unlock(&recall_lock);
- nfs4_lock_state();
- list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru)
+ list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
unhash_delegation(dp);
- nfs4_unlock_state();
- printk(KERN_INFO "NFSD: Forgot %d delegations", count);
+ return count;
}
-void nfsd_recall_delegations(u64 num)
+u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
{
- unsigned int count;
+ struct nfs4_delegation *dp, *next;
LIST_HEAD(victims);
- struct nfs4_delegation *dp, *dnext;
+ u64 count;
spin_lock(&recall_lock);
- count = nfsd_process_n_delegations(num, &victims);
- list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru) {
- list_del(&dp->dl_recall_lru);
+ count = nfsd_find_all_delegations(clp, max, &victims);
+ list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
nfsd_break_one_deleg(dp);
- }
spin_unlock(&recall_lock);
- printk(KERN_INFO "NFSD: Recalled %d delegations", count);
+ return count;
+}
+
+u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
+{
+ u64 count = 0;
+
+ spin_lock(&recall_lock);
+ count = nfsd_find_all_delegations(clp, max, NULL);
+ spin_unlock(&recall_lock);
+
+ nfsd_print_count(clp, count, "delegations");
+ return count;
+}
+
+u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
+{
+ struct nfs4_client *clp, *next;
+ u64 count = 0;
+ struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
+
+ if (!nfsd_netns_ready(nn))
+ return 0;
+
+ list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
+ count += func(clp, max - count);
+ if ((max != 0) && (count >= max))
+ break;
+ }
+
+ return count;
+}
+
+struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
+{
+ struct nfs4_client *clp;
+ struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
+
+ if (!nfsd_netns_ready(nn))
+ return NULL;
+
+ list_for_each_entry(clp, &nn->client_lru, cl_lru) {
+ if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
+ return clp;
+ }
+ return NULL;
}
#endif /* CONFIG_NFSD_FAULT_INJECTION */
@@ -4686,27 +4789,10 @@ nfs4_state_init(void)
{
int i;
- for (i = 0; i < CLIENT_HASH_SIZE; i++) {
- INIT_LIST_HEAD(&conf_id_hashtbl[i]);
- INIT_LIST_HEAD(&conf_str_hashtbl[i]);
- INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
- INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
- INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
- }
- for (i = 0; i < SESSION_HASH_SIZE; i++)
- INIT_LIST_HEAD(&sessionid_hashtbl[i]);
for (i = 0; i < FILE_HASH_SIZE; i++) {
INIT_LIST_HEAD(&file_hashtbl[i]);
}
- for (i = 0; i < OWNER_HASH_SIZE; i++) {
- INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
- }
- for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
- INIT_LIST_HEAD(&lockowner_ino_hashtbl[i]);
- INIT_LIST_HEAD(&close_lru);
- INIT_LIST_HEAD(&client_lru);
INIT_LIST_HEAD(&del_recall_lru);
- reclaim_str_hashtbl_size = 0;
}
/*
@@ -4730,12 +4816,100 @@ set_max_delegations(void)
max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
}
-/* initialization to perform when the nfsd service is started: */
+static int nfs4_state_create_net(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int i;
+
+ nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
+ CLIENT_HASH_SIZE, GFP_KERNEL);
+ if (!nn->conf_id_hashtbl)
+ goto err;
+ nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
+ CLIENT_HASH_SIZE, GFP_KERNEL);
+ if (!nn->unconf_id_hashtbl)
+ goto err_unconf_id;
+ nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
+ OWNER_HASH_SIZE, GFP_KERNEL);
+ if (!nn->ownerstr_hashtbl)
+ goto err_ownerstr;
+ nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) *
+ LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL);
+ if (!nn->lockowner_ino_hashtbl)
+ goto err_lockowner_ino;
+ nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
+ SESSION_HASH_SIZE, GFP_KERNEL);
+ if (!nn->sessionid_hashtbl)
+ goto err_sessionid;
+
+ for (i = 0; i < CLIENT_HASH_SIZE; i++) {
+ INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
+ INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
+ }
+ for (i = 0; i < OWNER_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
+ for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]);
+ for (i = 0; i < SESSION_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
+ nn->conf_name_tree = RB_ROOT;
+ nn->unconf_name_tree = RB_ROOT;
+ INIT_LIST_HEAD(&nn->client_lru);
+ INIT_LIST_HEAD(&nn->close_lru);
+ spin_lock_init(&nn->client_lock);
+
+ INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
+ get_net(net);
+
+ return 0;
+
+err_sessionid:
+ kfree(nn->lockowner_ino_hashtbl);
+err_lockowner_ino:
+ kfree(nn->ownerstr_hashtbl);
+err_ownerstr:
+ kfree(nn->unconf_id_hashtbl);
+err_unconf_id:
+ kfree(nn->conf_id_hashtbl);
+err:
+ return -ENOMEM;
+}
+
+static void
+nfs4_state_destroy_net(struct net *net)
+{
+ int i;
+ struct nfs4_client *clp = NULL;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct rb_node *node, *tmp;
+
+ for (i = 0; i < CLIENT_HASH_SIZE; i++) {
+ while (!list_empty(&nn->conf_id_hashtbl[i])) {
+ clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
+ destroy_client(clp);
+ }
+ }
+
+ node = rb_first(&nn->unconf_name_tree);
+ while (node != NULL) {
+ tmp = node;
+ node = rb_next(tmp);
+ clp = rb_entry(tmp, struct nfs4_client, cl_namenode);
+ rb_erase(tmp, &nn->unconf_name_tree);
+ destroy_client(clp);
+ }
+
+ kfree(nn->sessionid_hashtbl);
+ kfree(nn->lockowner_ino_hashtbl);
+ kfree(nn->ownerstr_hashtbl);
+ kfree(nn->unconf_id_hashtbl);
+ kfree(nn->conf_id_hashtbl);
+ put_net(net);
+}
int
-nfs4_state_start(void)
+nfs4_state_start_net(struct net *net)
{
- struct net *net = &init_net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
@@ -4746,18 +4920,32 @@ nfs4_state_start(void)
* to that instead and then do most of the rest of this on a per-net
* basis.
*/
- get_net(net);
+ if (net != &init_net)
+ return -EINVAL;
+
+ ret = nfs4_state_create_net(net);
+ if (ret)
+ return ret;
nfsd4_client_tracking_init(net);
nn->boot_time = get_seconds();
locks_start_grace(net, &nn->nfsd4_manager);
nn->grace_ended = false;
- printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
- nfsd4_grace);
+ printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
+ nn->nfsd4_grace, net);
+ queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
+ return 0;
+}
+
+/* initialization to perform when the nfsd service is started: */
+
+int
+nfs4_state_start(void)
+{
+ int ret;
+
ret = set_callback_cred();
- if (ret) {
- ret = -ENOMEM;
- goto out_recovery;
- }
+ if (ret)
+ return -ENOMEM;
laundry_wq = create_singlethread_workqueue("nfsd4");
if (laundry_wq == NULL) {
ret = -ENOMEM;
@@ -4766,39 +4954,34 @@ nfs4_state_start(void)
ret = nfsd4_create_callback_queue();
if (ret)
goto out_free_laundry;
- queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
+
set_max_delegations();
+
return 0;
+
out_free_laundry:
destroy_workqueue(laundry_wq);
out_recovery:
- nfsd4_client_tracking_exit(net);
- put_net(net);
return ret;
}
-static void
-__nfs4_state_shutdown(void)
+/* should be called with the state lock held */
+void
+nfs4_state_shutdown_net(struct net *net)
{
- int i;
- struct nfs4_client *clp = NULL;
struct nfs4_delegation *dp = NULL;
struct list_head *pos, *next, reaplist;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ cancel_delayed_work_sync(&nn->laundromat_work);
+ locks_end_grace(&nn->nfsd4_manager);
- for (i = 0; i < CLIENT_HASH_SIZE; i++) {
- while (!list_empty(&conf_id_hashtbl[i])) {
- clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
- destroy_client(clp);
- }
- while (!list_empty(&unconf_str_hashtbl[i])) {
- clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
- destroy_client(clp);
- }
- }
INIT_LIST_HEAD(&reaplist);
spin_lock(&recall_lock);
list_for_each_safe(pos, next, &del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+ if (dp->dl_stid.sc_client->net != net)
+ continue;
list_move(&dp->dl_recall_lru, &reaplist);
}
spin_unlock(&recall_lock);
@@ -4807,22 +4990,14 @@ __nfs4_state_shutdown(void)
unhash_delegation(dp);
}
- nfsd4_client_tracking_exit(&init_net);
- put_net(&init_net);
+ nfsd4_client_tracking_exit(net);
+ nfs4_state_destroy_net(net);
}
void
nfs4_state_shutdown(void)
{
- struct net *net = &init_net;
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- cancel_delayed_work_sync(&laundromat_work);
destroy_workqueue(laundry_wq);
- locks_end_grace(&nn->nfsd4_manager);
- nfs4_lock_state();
- __nfs4_state_shutdown();
- nfs4_unlock_state();
nfsd4_destroy_callback_queue();
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index fd548d15508..0dc11586682 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -53,6 +53,7 @@
#include "vfs.h"
#include "state.h"
#include "cache.h"
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
@@ -65,17 +66,17 @@
#define NFS4_REFERRAL_FSID_MINOR 0x8000000ULL
static __be32
-check_filename(char *str, int len, __be32 err)
+check_filename(char *str, int len)
{
int i;
if (len == 0)
return nfserr_inval;
if (isdotent(str, len))
- return err;
+ return nfserr_badname;
for (i = 0; i < len; i++)
if (str[i] == '/')
- return err;
+ return nfserr_badname;
return 0;
}
@@ -422,6 +423,86 @@ nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access
DECODE_TAIL;
}
+static __be32 nfsd4_decode_cb_sec(struct nfsd4_compoundargs *argp, struct nfsd4_cb_sec *cbs)
+{
+ DECODE_HEAD;
+ u32 dummy, uid, gid;
+ char *machine_name;
+ int i;
+ int nr_secflavs;
+
+ /* callback_sec_params4 */
+ READ_BUF(4);
+ READ32(nr_secflavs);
+ cbs->flavor = (u32)(-1);
+ for (i = 0; i < nr_secflavs; ++i) {
+ READ_BUF(4);
+ READ32(dummy);
+ switch (dummy) {
+ case RPC_AUTH_NULL:
+ /* Nothing to read */
+ if (cbs->flavor == (u32)(-1))
+ cbs->flavor = RPC_AUTH_NULL;
+ break;
+ case RPC_AUTH_UNIX:
+ READ_BUF(8);
+ /* stamp */
+ READ32(dummy);
+
+ /* machine name */
+ READ32(dummy);
+ READ_BUF(dummy);
+ SAVEMEM(machine_name, dummy);
+
+ /* uid, gid */
+ READ_BUF(8);
+ READ32(uid);
+ READ32(gid);
+
+ /* more gids */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy * 4);
+ if (cbs->flavor == (u32)(-1)) {
+ cbs->uid = uid;
+ cbs->gid = gid;
+ cbs->flavor = RPC_AUTH_UNIX;
+ }
+ break;
+ case RPC_AUTH_GSS:
+ dprintk("RPC_AUTH_GSS callback secflavor "
+ "not supported!\n");
+ READ_BUF(8);
+ /* gcbp_service */
+ READ32(dummy);
+ /* gcbp_handle_from_server */
+ READ32(dummy);
+ READ_BUF(dummy);
+ p += XDR_QUADLEN(dummy);
+ /* gcbp_handle_from_client */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+ break;
+ default:
+ dprintk("Illegal callback secflavor\n");
+ return nfserr_inval;
+ }
+ }
+ DECODE_TAIL;
+}
+
+static __be32 nfsd4_decode_backchannel_ctl(struct nfsd4_compoundargs *argp, struct nfsd4_backchannel_ctl *bc)
+{
+ DECODE_HEAD;
+
+ READ_BUF(4);
+ READ32(bc->bc_cb_program);
+ nfsd4_decode_cb_sec(argp, &bc->bc_cb_sec);
+
+ DECODE_TAIL;
+}
+
static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
{
DECODE_HEAD;
@@ -490,7 +571,7 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
READ32(create->cr_namelen);
READ_BUF(create->cr_namelen);
SAVEMEM(create->cr_name, create->cr_namelen);
- if ((status = check_filename(create->cr_name, create->cr_namelen, nfserr_inval)))
+ if ((status = check_filename(create->cr_name, create->cr_namelen)))
return status;
status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
@@ -522,7 +603,7 @@ nfsd4_decode_link(struct nfsd4_compoundargs *argp, struct nfsd4_link *link)
READ32(link->li_namelen);
READ_BUF(link->li_namelen);
SAVEMEM(link->li_name, link->li_namelen);
- if ((status = check_filename(link->li_name, link->li_namelen, nfserr_inval)))
+ if ((status = check_filename(link->li_name, link->li_namelen)))
return status;
DECODE_TAIL;
@@ -616,7 +697,7 @@ nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup
READ32(lookup->lo_len);
READ_BUF(lookup->lo_len);
SAVEMEM(lookup->lo_name, lookup->lo_len);
- if ((status = check_filename(lookup->lo_name, lookup->lo_len, nfserr_noent)))
+ if ((status = check_filename(lookup->lo_name, lookup->lo_len)))
return status;
DECODE_TAIL;
@@ -780,7 +861,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
READ32(open->op_fname.len);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
- if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval)))
+ if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
@@ -795,7 +876,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
READ32(open->op_fname.len);
READ_BUF(open->op_fname.len);
SAVEMEM(open->op_fname.data, open->op_fname.len);
- if ((status = check_filename(open->op_fname.data, open->op_fname.len, nfserr_inval)))
+ if ((status = check_filename(open->op_fname.data, open->op_fname.len)))
return status;
break;
case NFS4_OPEN_CLAIM_FH:
@@ -907,7 +988,7 @@ nfsd4_decode_remove(struct nfsd4_compoundargs *argp, struct nfsd4_remove *remove
READ32(remove->rm_namelen);
READ_BUF(remove->rm_namelen);
SAVEMEM(remove->rm_name, remove->rm_namelen);
- if ((status = check_filename(remove->rm_name, remove->rm_namelen, nfserr_noent)))
+ if ((status = check_filename(remove->rm_name, remove->rm_namelen)))
return status;
DECODE_TAIL;
@@ -925,9 +1006,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
READ32(rename->rn_tnamelen);
READ_BUF(rename->rn_tnamelen);
SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
- if ((status = check_filename(rename->rn_sname, rename->rn_snamelen, nfserr_noent)))
+ if ((status = check_filename(rename->rn_sname, rename->rn_snamelen)))
return status;
- if ((status = check_filename(rename->rn_tname, rename->rn_tnamelen, nfserr_inval)))
+ if ((status = check_filename(rename->rn_tname, rename->rn_tnamelen)))
return status;
DECODE_TAIL;
@@ -954,8 +1035,7 @@ nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
READ32(secinfo->si_namelen);
READ_BUF(secinfo->si_namelen);
SAVEMEM(secinfo->si_name, secinfo->si_namelen);
- status = check_filename(secinfo->si_name, secinfo->si_namelen,
- nfserr_noent);
+ status = check_filename(secinfo->si_name, secinfo->si_namelen);
if (status)
return status;
DECODE_TAIL;
@@ -1026,31 +1106,14 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
static __be32
nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify)
{
-#if 0
- struct nfsd4_compoundargs save = {
- .p = argp->p,
- .end = argp->end,
- .rqstp = argp->rqstp,
- };
- u32 ve_bmval[2];
- struct iattr ve_iattr; /* request */
- struct nfs4_acl *ve_acl; /* request */
-#endif
DECODE_HEAD;
if ((status = nfsd4_decode_bitmap(argp, verify->ve_bmval)))
goto out;
/* For convenience's sake, we compare raw xdr'd attributes in
- * nfsd4_proc_verify; however we still decode here just to return
- * correct error in case of bad xdr. */
-#if 0
- status = nfsd4_decode_fattr(ve_bmval, &ve_iattr, &ve_acl);
- if (status == nfserr_inval) {
- status = nfserrno(status);
- goto out;
- }
-#endif
+ * nfsd4_proc_verify */
+
READ_BUF(4);
READ32(verify->ve_attrlen);
READ_BUF(verify->ve_attrlen);
@@ -1063,7 +1126,6 @@ static __be32
nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
{
int avail;
- int v;
int len;
DECODE_HEAD;
@@ -1087,27 +1149,26 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
__FILE__, __LINE__);
goto xdr_error;
}
- argp->rqstp->rq_vec[0].iov_base = p;
- argp->rqstp->rq_vec[0].iov_len = avail;
- v = 0;
- len = write->wr_buflen;
- while (len > argp->rqstp->rq_vec[v].iov_len) {
- len -= argp->rqstp->rq_vec[v].iov_len;
- v++;
- argp->rqstp->rq_vec[v].iov_base = page_address(argp->pagelist[0]);
- argp->pagelist++;
- if (argp->pagelen >= PAGE_SIZE) {
- argp->rqstp->rq_vec[v].iov_len = PAGE_SIZE;
- argp->pagelen -= PAGE_SIZE;
- } else {
- argp->rqstp->rq_vec[v].iov_len = argp->pagelen;
- argp->pagelen -= len;
- }
+ write->wr_head.iov_base = p;
+ write->wr_head.iov_len = avail;
+ WARN_ON(avail != (XDR_QUADLEN(avail) << 2));
+ write->wr_pagelist = argp->pagelist;
+
+ len = XDR_QUADLEN(write->wr_buflen) << 2;
+ if (len >= avail) {
+ int pages;
+
+ len -= avail;
+
+ pages = len >> PAGE_SHIFT;
+ argp->pagelist += pages;
+ argp->pagelen -= pages * PAGE_SIZE;
+ len -= pages * PAGE_SIZE;
+
+ argp->p = (__be32 *)page_address(argp->pagelist[0]);
+ argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
}
- argp->end = (__be32*) (argp->rqstp->rq_vec[v].iov_base + argp->rqstp->rq_vec[v].iov_len);
- argp->p = (__be32*) (argp->rqstp->rq_vec[v].iov_base + (XDR_QUADLEN(len) << 2));
- argp->rqstp->rq_vec[v].iov_len = len;
- write->wr_vlen = v+1;
+ argp->p += XDR_QUADLEN(len);
DECODE_TAIL;
}
@@ -1237,11 +1298,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
struct nfsd4_create_session *sess)
{
DECODE_HEAD;
-
u32 dummy;
- char *machine_name;
- int i;
- int nr_secflavs;
READ_BUF(16);
COPYMEM(&sess->clientid, 8);
@@ -1282,58 +1339,9 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
goto xdr_error;
}
- READ_BUF(8);
+ READ_BUF(4);
READ32(sess->callback_prog);
-
- /* callback_sec_params4 */
- READ32(nr_secflavs);
- for (i = 0; i < nr_secflavs; ++i) {
- READ_BUF(4);
- READ32(dummy);
- switch (dummy) {
- case RPC_AUTH_NULL:
- /* Nothing to read */
- break;
- case RPC_AUTH_UNIX:
- READ_BUF(8);
- /* stamp */
- READ32(dummy);
-
- /* machine name */
- READ32(dummy);
- READ_BUF(dummy);
- SAVEMEM(machine_name, dummy);
-
- /* uid, gid */
- READ_BUF(8);
- READ32(sess->uid);
- READ32(sess->gid);
-
- /* more gids */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy * 4);
- break;
- case RPC_AUTH_GSS:
- dprintk("RPC_AUTH_GSS callback secflavor "
- "not supported!\n");
- READ_BUF(8);
- /* gcbp_service */
- READ32(dummy);
- /* gcbp_handle_from_server */
- READ32(dummy);
- READ_BUF(dummy);
- p += XDR_QUADLEN(dummy);
- /* gcbp_handle_from_client */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy);
- break;
- default:
- dprintk("Illegal callback secflavor\n");
- return nfserr_inval;
- }
- }
+ nfsd4_decode_cb_sec(argp, &sess->cb_sec);
DECODE_TAIL;
}
@@ -1528,7 +1536,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
[OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_notsupp,
/* new operations for NFSv4.1 */
- [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_backchannel_ctl,
[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
[OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
@@ -1568,12 +1576,6 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
bool cachethis = false;
int i;
- /*
- * XXX: According to spec, we should check the tag
- * for UTF-8 compliance. I'm postponing this for
- * now because it seems that some clients do use
- * binary tags.
- */
READ_BUF(4);
READ32(argp->taglen);
READ_BUF(argp->taglen + 8);
@@ -1603,38 +1605,8 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
op = &argp->ops[i];
op->replay = NULL;
- /*
- * We can't use READ_BUF() here because we need to handle
- * a missing opcode as an OP_WRITE + 1. So we need to check
- * to see if we're truly at the end of our buffer or if there
- * is another page we need to flip to.
- */
-
- if (argp->p == argp->end) {
- if (argp->pagelen < 4) {
- /* There isn't an opcode still on the wire */
- op->opnum = OP_WRITE + 1;
- op->status = nfserr_bad_xdr;
- argp->opcnt = i+1;
- break;
- }
-
- /*
- * False alarm. We just hit a page boundary, but there
- * is still data available. Move pointer across page
- * boundary. *snip from READ_BUF*
- */
- argp->p = page_address(argp->pagelist[0]);
- argp->pagelist++;
- if (argp->pagelen < PAGE_SIZE) {
- argp->end = argp->p + (argp->pagelen>>2);
- argp->pagelen = 0;
- } else {
- argp->end = argp->p + (PAGE_SIZE>>2);
- argp->pagelen -= PAGE_SIZE;
- }
- }
- op->opnum = ntohl(*argp->p++);
+ READ_BUF(4);
+ READ32(op->opnum);
if (op->opnum >= FIRST_NFS4_OP && op->opnum <= LAST_NFS4_OP)
op->status = ops->decoders[op->opnum](argp, &op->u);
@@ -2014,6 +1986,22 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
return 0;
}
+
+static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
+{
+ struct path path = exp->ex_path;
+ int err;
+
+ path_get(&path);
+ while (follow_up(&path)) {
+ if (path.dentry != path.mnt->mnt_root)
+ break;
+ }
+ err = vfs_getattr(path.mnt, path.dentry, stat);
+ path_put(&path);
+ return err;
+}
+
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
@@ -2048,6 +2036,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
@@ -2208,7 +2197,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
if ((buflen -= 4) < 0)
goto out_resource;
- WRITE32(nfsd4_lease);
+ WRITE32(nn->nfsd4_lease);
}
if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
if ((buflen -= 4) < 0)
@@ -2430,18 +2419,8 @@ out_acl:
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
- dentry == exp->ex_path.mnt->mnt_root) {
- struct path path = exp->ex_path;
- path_get(&path);
- while (follow_up(&path)) {
- if (path.dentry != path.mnt->mnt_root)
- break;
- }
- err = vfs_getattr(path.mnt, path.dentry, &stat);
- path_put(&path);
- if (err)
- goto out_nfserr;
- }
+ dentry == exp->ex_path.mnt->mnt_root)
+ get_parent_attributes(exp, &stat);
WRITE64(stat.ino);
}
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
@@ -2927,7 +2906,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_read *read)
{
u32 eof;
- int v, pn;
+ int v;
+ struct page *page;
unsigned long maxcount;
long len;
__be32 *p;
@@ -2946,11 +2926,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
len = maxcount;
v = 0;
while (len > 0) {
- pn = resp->rqstp->rq_resused++;
- resp->rqstp->rq_vec[v].iov_base =
- page_address(resp->rqstp->rq_respages[pn]);
+ page = *(resp->rqstp->rq_next_page);
+ if (!page) { /* ran out of pages */
+ maxcount -= len;
+ break;
+ }
+ resp->rqstp->rq_vec[v].iov_base = page_address(page);
resp->rqstp->rq_vec[v].iov_len =
len < PAGE_SIZE ? len : PAGE_SIZE;
+ resp->rqstp->rq_next_page++;
v++;
len -= PAGE_SIZE;
}
@@ -2996,8 +2980,10 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
return nfserr;
if (resp->xbuf->page_len)
return nfserr_resource;
+ if (!*resp->rqstp->rq_next_page)
+ return nfserr_resource;
- page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
+ page = page_address(*(resp->rqstp->rq_next_page++));
maxcount = PAGE_SIZE;
RESERVE_SPACE(4);
@@ -3045,6 +3031,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
return nfserr;
if (resp->xbuf->page_len)
return nfserr_resource;
+ if (!*resp->rqstp->rq_next_page)
+ return nfserr_resource;
RESERVE_SPACE(NFS4_VERIFIER_SIZE);
savep = p;
@@ -3071,7 +3059,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
goto err_no_verf;
}
- page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
+ page = page_address(*(resp->rqstp->rq_next_page++));
readdir->common.err = 0;
readdir->buflen = maxcount;
readdir->buffer = page;
@@ -3094,8 +3082,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
p = readdir->buffer;
*p++ = 0; /* no more entries */
*p++ = htonl(readdir->common.err == nfserr_eof);
- resp->xbuf->page_len = ((char*)p) - (char*)page_address(
- resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
+ resp->xbuf->page_len = ((char*)p) -
+ (char*)page_address(*(resp->rqstp->rq_next_page-1));
/* Use rest of head for padding and remaining ops: */
resp->xbuf->tail[0].iov_base = tailbase;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index dab350dfc37..74934284d9a 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -19,7 +19,7 @@
#include "idmap.h"
#include "nfsd.h"
#include "cache.h"
-#include "fault_inject.h"
+#include "state.h"
#include "netns.h"
/*
@@ -186,9 +186,6 @@ static struct file_operations supported_enctypes_ops = {
};
#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
-extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
-extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
-
static const struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
.read = seq_read,
@@ -399,6 +396,8 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
{
char *mesg = buf;
int rv;
+ struct net *net = &init_net;
+
if (size > 0) {
int newthreads;
rv = get_int(&mesg, &newthreads);
@@ -406,11 +405,11 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return rv;
if (newthreads < 0)
return -EINVAL;
- rv = nfsd_svc(newthreads);
+ rv = nfsd_svc(newthreads, net);
if (rv < 0)
return rv;
} else
- rv = nfsd_nrthreads();
+ rv = nfsd_nrthreads(net);
return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
}
@@ -448,9 +447,10 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
int len;
int npools;
int *nthreads;
+ struct net *net = &init_net;
mutex_lock(&nfsd_mutex);
- npools = nfsd_nrpools();
+ npools = nfsd_nrpools(net);
if (npools == 0) {
/*
* NFS is shut down. The admin can start it by
@@ -478,12 +478,12 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
if (nthreads[i] < 0)
goto out_free;
}
- rv = nfsd_set_nrthreads(i, nthreads);
+ rv = nfsd_set_nrthreads(i, nthreads, net);
if (rv)
goto out_free;
}
- rv = nfsd_get_nrthreads(npools, nthreads);
+ rv = nfsd_get_nrthreads(npools, nthreads, net);
if (rv)
goto out_free;
@@ -510,11 +510,13 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
unsigned minor;
ssize_t tlen = 0;
char *sep;
+ struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (size>0) {
- if (nfsd_serv)
+ if (nn->nfsd_serv)
/* Cannot change versions without updating
- * nfsd_serv->sv_xdrsize, and reallocing
+ * nn->nfsd_serv->sv_xdrsize, and reallocing
* rq_argp and rq_resp
*/
return -EBUSY;
@@ -645,11 +647,13 @@ static ssize_t write_versions(struct file *file, char *buf, size_t size)
* Zero-length write. Return a list of NFSD's current listener
* transports.
*/
-static ssize_t __write_ports_names(char *buf)
+static ssize_t __write_ports_names(char *buf, struct net *net)
{
- if (nfsd_serv == NULL)
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ if (nn->nfsd_serv == NULL)
return 0;
- return svc_xprt_names(nfsd_serv, buf, SIMPLE_TRANSACTION_LIMIT);
+ return svc_xprt_names(nn->nfsd_serv, buf, SIMPLE_TRANSACTION_LIMIT);
}
/*
@@ -657,28 +661,28 @@ static ssize_t __write_ports_names(char *buf)
* a socket of a supported family/protocol, and we use it as an
* nfsd listener.
*/
-static ssize_t __write_ports_addfd(char *buf)
+static ssize_t __write_ports_addfd(char *buf, struct net *net)
{
char *mesg = buf;
int fd, err;
- struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
err = get_int(&mesg, &fd);
if (err != 0 || fd < 0)
return -EINVAL;
- err = nfsd_create_serv();
+ err = nfsd_create_serv(net);
if (err != 0)
return err;
- err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
+ err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
if (err < 0) {
nfsd_destroy(net);
return err;
}
/* Decrease the count, but don't shut down the service */
- nfsd_serv->sv_nrthreads--;
+ nn->nfsd_serv->sv_nrthreads--;
return err;
}
@@ -686,12 +690,12 @@ static ssize_t __write_ports_addfd(char *buf)
* A transport listener is added by writing it's transport name and
* a port number.
*/
-static ssize_t __write_ports_addxprt(char *buf)
+static ssize_t __write_ports_addxprt(char *buf, struct net *net)
{
char transport[16];
struct svc_xprt *xprt;
int port, err;
- struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (sscanf(buf, "%15s %5u", transport, &port) != 2)
return -EINVAL;
@@ -699,25 +703,25 @@ static ssize_t __write_ports_addxprt(char *buf)
if (port < 1 || port > USHRT_MAX)
return -EINVAL;
- err = nfsd_create_serv();
+ err = nfsd_create_serv(net);
if (err != 0)
return err;
- err = svc_create_xprt(nfsd_serv, transport, net,
+ err = svc_create_xprt(nn->nfsd_serv, transport, net,
PF_INET, port, SVC_SOCK_ANONYMOUS);
if (err < 0)
goto out_err;
- err = svc_create_xprt(nfsd_serv, transport, net,
+ err = svc_create_xprt(nn->nfsd_serv, transport, net,
PF_INET6, port, SVC_SOCK_ANONYMOUS);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_close;
/* Decrease the count, but don't shut down the service */
- nfsd_serv->sv_nrthreads--;
+ nn->nfsd_serv->sv_nrthreads--;
return 0;
out_close:
- xprt = svc_find_xprt(nfsd_serv, transport, net, PF_INET, port);
+ xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port);
if (xprt != NULL) {
svc_close_xprt(xprt);
svc_xprt_put(xprt);
@@ -727,16 +731,17 @@ out_err:
return err;
}
-static ssize_t __write_ports(struct file *file, char *buf, size_t size)
+static ssize_t __write_ports(struct file *file, char *buf, size_t size,
+ struct net *net)
{
if (size == 0)
- return __write_ports_names(buf);
+ return __write_ports_names(buf, net);
if (isdigit(buf[0]))
- return __write_ports_addfd(buf);
+ return __write_ports_addfd(buf, net);
if (isalpha(buf[0]))
- return __write_ports_addxprt(buf);
+ return __write_ports_addxprt(buf, net);
return -EINVAL;
}
@@ -787,9 +792,10 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size)
static ssize_t write_ports(struct file *file, char *buf, size_t size)
{
ssize_t rv;
+ struct net *net = &init_net;
mutex_lock(&nfsd_mutex);
- rv = __write_ports(file, buf, size);
+ rv = __write_ports(file, buf, size, net);
mutex_unlock(&nfsd_mutex);
return rv;
}
@@ -821,6 +827,9 @@ int nfsd_max_blksize;
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
{
char *mesg = buf;
+ struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
if (size > 0) {
int bsize;
int rv = get_int(&mesg, &bsize);
@@ -835,7 +844,7 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
bsize = NFSSVC_MAXBLKSIZE;
bsize &= ~(1024-1);
mutex_lock(&nfsd_mutex);
- if (nfsd_serv) {
+ if (nn->nfsd_serv) {
mutex_unlock(&nfsd_mutex);
return -EBUSY;
}
@@ -848,13 +857,14 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
}
#ifdef CONFIG_NFSD_V4
-static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time)
+static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
+ time_t *time, struct nfsd_net *nn)
{
char *mesg = buf;
int rv, i;
if (size > 0) {
- if (nfsd_serv)
+ if (nn->nfsd_serv)
return -EBUSY;
rv = get_int(&mesg, &i);
if (rv)
@@ -879,12 +889,13 @@ static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, tim
return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", *time);
}
-static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time)
+static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
+ time_t *time, struct nfsd_net *nn)
{
ssize_t rv;
mutex_lock(&nfsd_mutex);
- rv = __nfsd4_write_time(file, buf, size, time);
+ rv = __nfsd4_write_time(file, buf, size, time, nn);
mutex_unlock(&nfsd_mutex);
return rv;
}
@@ -912,7 +923,8 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, time_
*/
static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
{
- return nfsd4_write_time(file, buf, size, &nfsd4_lease);
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn);
}
/**
@@ -927,17 +939,19 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
*/
static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
{
- return nfsd4_write_time(file, buf, size, &nfsd4_grace);
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+ return nfsd4_write_time(file, buf, size, &nn->nfsd4_grace, nn);
}
-static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size)
+static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
+ struct nfsd_net *nn)
{
char *mesg = buf;
char *recdir;
int len, status;
if (size > 0) {
- if (nfsd_serv)
+ if (nn->nfsd_serv)
return -EBUSY;
if (size > PATH_MAX || buf[size-1] != '\n')
return -EINVAL;
@@ -981,9 +995,10 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size)
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
{
ssize_t rv;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
mutex_lock(&nfsd_mutex);
- rv = __write_recoverydir(file, buf, size);
+ rv = __write_recoverydir(file, buf, size, nn);
mutex_unlock(&nfsd_mutex);
return rv;
}
@@ -1063,6 +1078,7 @@ int nfsd_net_id;
static __net_init int nfsd_init_net(struct net *net)
{
int retval;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
retval = nfsd_export_init(net);
if (retval)
@@ -1070,6 +1086,8 @@ static __net_init int nfsd_init_net(struct net *net)
retval = nfsd_idmap_init(net);
if (retval)
goto out_idmap_error;
+ nn->nfsd4_lease = 90; /* default lease time */
+ nn->nfsd4_grace = 90;
return 0;
out_idmap_error:
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 80d5ce40aad..de23db255c6 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -55,7 +55,6 @@ extern struct svc_version nfsd_version2, nfsd_version3,
nfsd_version4;
extern u32 nfsd_supported_minorversion;
extern struct mutex nfsd_mutex;
-extern struct svc_serv *nfsd_serv;
extern spinlock_t nfsd_drc_lock;
extern unsigned int nfsd_drc_max_mem;
extern unsigned int nfsd_drc_mem_used;
@@ -65,26 +64,17 @@ extern const struct seq_operations nfs_exports_op;
/*
* Function prototypes.
*/
-int nfsd_svc(int nrservs);
+int nfsd_svc(int nrservs, struct net *net);
int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp);
-int nfsd_nrthreads(void);
-int nfsd_nrpools(void);
-int nfsd_get_nrthreads(int n, int *);
-int nfsd_set_nrthreads(int n, int *);
+int nfsd_nrthreads(struct net *);
+int nfsd_nrpools(struct net *);
+int nfsd_get_nrthreads(int n, int *, struct net *);
+int nfsd_set_nrthreads(int n, int *, struct net *);
int nfsd_pool_stats_open(struct inode *, struct file *);
int nfsd_pool_stats_release(struct inode *, struct file *);
-static inline void nfsd_destroy(struct net *net)
-{
- int destroy = (nfsd_serv->sv_nrthreads == 1);
-
- if (destroy)
- svc_shutdown_net(nfsd_serv, net);
- svc_destroy(nfsd_serv);
- if (destroy)
- nfsd_serv = NULL;
-}
+void nfsd_destroy(struct net *net);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
#ifdef CONFIG_NFSD_V2_ACL
@@ -103,7 +93,7 @@ enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
int nfsd_vers(int vers, enum vers_op change);
int nfsd_minorversion(u32 minorversion, enum vers_op change);
void nfsd_reset_versions(void);
-int nfsd_create_serv(void);
+int nfsd_create_serv(struct net *net);
extern int nfsd_max_blksize;
@@ -121,7 +111,9 @@ void nfs4_state_init(void);
int nfsd4_init_slabs(void);
void nfsd4_free_slabs(void);
int nfs4_state_start(void);
+int nfs4_state_start_net(struct net *net);
void nfs4_state_shutdown(void);
+void nfs4_state_shutdown_net(struct net *net);
void nfs4_reset_lease(time_t leasetime);
int nfs4_reset_recoverydir(char *recdir);
char * nfs4_recoverydir(void);
@@ -130,7 +122,9 @@ static inline void nfs4_state_init(void) { }
static inline int nfsd4_init_slabs(void) { return 0; }
static inline void nfsd4_free_slabs(void) { }
static inline int nfs4_state_start(void) { return 0; }
+static inline int nfs4_state_start_net(struct net *net) { return 0; }
static inline void nfs4_state_shutdown(void) { }
+static inline void nfs4_state_shutdown_net(struct net *net) { }
static inline void nfs4_reset_lease(time_t leasetime) { }
static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
static inline char * nfs4_recoverydir(void) {return NULL; }
@@ -265,16 +259,8 @@ void nfsd_lockd_shutdown(void);
/* Check for dir entries '.' and '..' */
#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
-/*
- * Time of server startup
- */
-extern struct timeval nfssvc_boot;
-
#ifdef CONFIG_NFSD_V4
-extern time_t nfsd4_lease;
-extern time_t nfsd4_grace;
-
/* before processing a COMPOUND operation, we have to check that there
* is enough space in the buffer for XDR encode to succeed. otherwise,
* we might process an operation with side effects, and be unable to
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 032af381b3a..814afaa4458 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -572,7 +572,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
if (inode)
_fh_update(fhp, exp, dentry);
- if (fhp->fh_handle.fh_fileid_type == 255) {
+ if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
fh_put(fhp);
return nfserr_opnotsupp;
}
@@ -603,7 +603,7 @@ fh_update(struct svc_fh *fhp)
goto out;
_fh_update(fhp, fhp->fh_export, dentry);
- if (fhp->fh_handle.fh_fileid_type == 255)
+ if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
return nfserr_opnotsupp;
}
out:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 2013aa001da..cee62ab9d4a 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/fs_struct.h>
#include <linux/swap.h>
-#include <linux/nsproxy.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
@@ -22,19 +21,19 @@
#include "nfsd.h"
#include "cache.h"
#include "vfs.h"
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_SVC
extern struct svc_program nfsd_program;
static int nfsd(void *vrqstp);
-struct timeval nfssvc_boot;
/*
- * nfsd_mutex protects nfsd_serv -- both the pointer itself and the members
+ * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members
* of the svc_serv struct. In particular, ->sv_nrthreads but also to some
* extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt
*
- * If (out side the lock) nfsd_serv is non-NULL, then it must point to a
+ * If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
* properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number
* of nfsd threads must exist and each must listed in ->sp_all_threads in each
* entry of ->sv_pools[].
@@ -52,7 +51,6 @@ struct timeval nfssvc_boot;
* nfsd_versions
*/
DEFINE_MUTEX(nfsd_mutex);
-struct svc_serv *nfsd_serv;
/*
* nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
@@ -173,28 +171,32 @@ int nfsd_minorversion(u32 minorversion, enum vers_op change)
*/
#define NFSD_MAXSERVS 8192
-int nfsd_nrthreads(void)
+int nfsd_nrthreads(struct net *net)
{
int rv = 0;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
mutex_lock(&nfsd_mutex);
- if (nfsd_serv)
- rv = nfsd_serv->sv_nrthreads;
+ if (nn->nfsd_serv)
+ rv = nn->nfsd_serv->sv_nrthreads;
mutex_unlock(&nfsd_mutex);
return rv;
}
-static int nfsd_init_socks(void)
+static int nfsd_init_socks(struct net *net)
{
int error;
- if (!list_empty(&nfsd_serv->sv_permsocks))
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ if (!list_empty(&nn->nfsd_serv->sv_permsocks))
return 0;
- error = svc_create_xprt(nfsd_serv, "udp", &init_net, PF_INET, NFS_PORT,
+ error = svc_create_xprt(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
- error = svc_create_xprt(nfsd_serv, "tcp", &init_net, PF_INET, NFS_PORT,
+ error = svc_create_xprt(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
@@ -202,14 +204,15 @@ static int nfsd_init_socks(void)
return 0;
}
-static bool nfsd_up = false;
+static int nfsd_users = 0;
-static int nfsd_startup(int nrservs)
+static int nfsd_startup_generic(int nrservs)
{
int ret;
- if (nfsd_up)
+ if (nfsd_users++)
return 0;
+
/*
* Readahead param cache - will no-op if it already exists.
* (Note therefore results will be suboptimal if number of
@@ -218,43 +221,79 @@ static int nfsd_startup(int nrservs)
ret = nfsd_racache_init(2*nrservs);
if (ret)
return ret;
- ret = nfsd_init_socks();
+ ret = nfs4_state_start();
if (ret)
goto out_racache;
- ret = lockd_up(&init_net);
+ return 0;
+
+out_racache:
+ nfsd_racache_shutdown();
+ return ret;
+}
+
+static void nfsd_shutdown_generic(void)
+{
+ if (--nfsd_users)
+ return;
+
+ nfs4_state_shutdown();
+ nfsd_racache_shutdown();
+}
+
+static int nfsd_startup_net(int nrservs, struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int ret;
+
+ if (nn->nfsd_net_up)
+ return 0;
+
+ ret = nfsd_startup_generic(nrservs);
if (ret)
- goto out_racache;
- ret = nfs4_state_start();
+ return ret;
+ ret = nfsd_init_socks(net);
+ if (ret)
+ goto out_socks;
+ ret = lockd_up(net);
+ if (ret)
+ goto out_socks;
+ ret = nfs4_state_start_net(net);
if (ret)
goto out_lockd;
- nfsd_up = true;
+
+ nn->nfsd_net_up = true;
return 0;
+
out_lockd:
- lockd_down(&init_net);
-out_racache:
- nfsd_racache_shutdown();
+ lockd_down(net);
+out_socks:
+ nfsd_shutdown_generic();
return ret;
}
-static void nfsd_shutdown(void)
+static void nfsd_shutdown_net(struct net *net)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nfs4_state_shutdown_net(net);
+ lockd_down(net);
+ nn->nfsd_net_up = false;
+ nfsd_shutdown_generic();
+}
+
+static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
/*
* write_ports can create the server without actually starting
* any threads--if we get shut down before any threads are
* started, then nfsd_last_thread will be run before any of this
* other initialization has been done.
*/
- if (!nfsd_up)
+ if (!nn->nfsd_net_up)
return;
- nfs4_state_shutdown();
- lockd_down(&init_net);
- nfsd_racache_shutdown();
- nfsd_up = false;
-}
-
-static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
-{
- nfsd_shutdown();
+ nfsd_shutdown_net(net);
svc_rpcb_cleanup(serv, net);
@@ -327,69 +366,84 @@ static int nfsd_get_default_max_blksize(void)
return ret;
}
-int nfsd_create_serv(void)
+int nfsd_create_serv(struct net *net)
{
int error;
- struct net *net = current->nsproxy->net_ns;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
WARN_ON(!mutex_is_locked(&nfsd_mutex));
- if (nfsd_serv) {
- svc_get(nfsd_serv);
+ if (nn->nfsd_serv) {
+ svc_get(nn->nfsd_serv);
return 0;
}
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions();
- nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
+ nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
nfsd_last_thread, nfsd, THIS_MODULE);
- if (nfsd_serv == NULL)
+ if (nn->nfsd_serv == NULL)
return -ENOMEM;
- error = svc_bind(nfsd_serv, net);
+ error = svc_bind(nn->nfsd_serv, net);
if (error < 0) {
- svc_destroy(nfsd_serv);
+ svc_destroy(nn->nfsd_serv);
return error;
}
set_max_drc();
- do_gettimeofday(&nfssvc_boot); /* record boot time */
+ do_gettimeofday(&nn->nfssvc_boot); /* record boot time */
return 0;
}
-int nfsd_nrpools(void)
+int nfsd_nrpools(struct net *net)
{
- if (nfsd_serv == NULL)
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ if (nn->nfsd_serv == NULL)
return 0;
else
- return nfsd_serv->sv_nrpools;
+ return nn->nfsd_serv->sv_nrpools;
}
-int nfsd_get_nrthreads(int n, int *nthreads)
+int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
{
int i = 0;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- if (nfsd_serv != NULL) {
- for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++)
- nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads;
+ if (nn->nfsd_serv != NULL) {
+ for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
+ nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
}
return 0;
}
-int nfsd_set_nrthreads(int n, int *nthreads)
+void nfsd_destroy(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ int destroy = (nn->nfsd_serv->sv_nrthreads == 1);
+
+ if (destroy)
+ svc_shutdown_net(nn->nfsd_serv, net);
+ svc_destroy(nn->nfsd_serv);
+ if (destroy)
+ nn->nfsd_serv = NULL;
+}
+
+int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
{
int i = 0;
int tot = 0;
int err = 0;
- struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
WARN_ON(!mutex_is_locked(&nfsd_mutex));
- if (nfsd_serv == NULL || n <= 0)
+ if (nn->nfsd_serv == NULL || n <= 0)
return 0;
- if (n > nfsd_serv->sv_nrpools)
- n = nfsd_serv->sv_nrpools;
+ if (n > nn->nfsd_serv->sv_nrpools)
+ n = nn->nfsd_serv->sv_nrpools;
/* enforce a global maximum number of threads */
tot = 0;
@@ -419,9 +473,9 @@ int nfsd_set_nrthreads(int n, int *nthreads)
nthreads[0] = 1;
/* apply the new numbers */
- svc_get(nfsd_serv);
+ svc_get(nn->nfsd_serv);
for (i = 0; i < n; i++) {
- err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i],
+ err = svc_set_num_threads(nn->nfsd_serv, &nn->nfsd_serv->sv_pools[i],
nthreads[i]);
if (err)
break;
@@ -436,11 +490,11 @@ int nfsd_set_nrthreads(int n, int *nthreads)
* this is the first time nrservs is nonzero.
*/
int
-nfsd_svc(int nrservs)
+nfsd_svc(int nrservs, struct net *net)
{
int error;
bool nfsd_up_before;
- struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
mutex_lock(&nfsd_mutex);
dprintk("nfsd: creating service\n");
@@ -449,29 +503,29 @@ nfsd_svc(int nrservs)
if (nrservs > NFSD_MAXSERVS)
nrservs = NFSD_MAXSERVS;
error = 0;
- if (nrservs == 0 && nfsd_serv == NULL)
+ if (nrservs == 0 && nn->nfsd_serv == NULL)
goto out;
- error = nfsd_create_serv();
+ error = nfsd_create_serv(net);
if (error)
goto out;
- nfsd_up_before = nfsd_up;
+ nfsd_up_before = nn->nfsd_net_up;
- error = nfsd_startup(nrservs);
+ error = nfsd_startup_net(nrservs, net);
if (error)
goto out_destroy;
- error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
+ error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
if (error)
goto out_shutdown;
- /* We are holding a reference to nfsd_serv which
+ /* We are holding a reference to nn->nfsd_serv which
* we don't want to count in the return value,
* so subtract 1
*/
- error = nfsd_serv->sv_nrthreads - 1;
+ error = nn->nfsd_serv->sv_nrthreads - 1;
out_shutdown:
if (error < 0 && !nfsd_up_before)
- nfsd_shutdown();
+ nfsd_shutdown_net(net);
out_destroy:
nfsd_destroy(net); /* Release server */
out:
@@ -487,6 +541,8 @@ static int
nfsd(void *vrqstp)
{
struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
+ struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
+ struct net *net = perm_sock->xpt_net;
int err;
/* Lock module and set up kernel thread */
@@ -551,7 +607,7 @@ out:
/* Release the thread */
svc_exit_thread(rqstp);
- nfsd_destroy(&init_net);
+ nfsd_destroy(net);
/* Release module */
mutex_unlock(&nfsd_mutex);
@@ -640,21 +696,24 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
}
/* Store reply in cache. */
- nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
+ nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
return 1;
}
int nfsd_pool_stats_open(struct inode *inode, struct file *file)
{
int ret;
+ struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
mutex_lock(&nfsd_mutex);
- if (nfsd_serv == NULL) {
+ if (nn->nfsd_serv == NULL) {
mutex_unlock(&nfsd_mutex);
return -ENODEV;
}
/* bump up the psudo refcount while traversing */
- svc_get(nfsd_serv);
- ret = svc_pool_stats_open(nfsd_serv, file);
+ svc_get(nn->nfsd_serv);
+ ret = svc_pool_stats_open(nn->nfsd_serv, file);
mutex_unlock(&nfsd_mutex);
return ret;
}
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 65ec595e222..979b4210697 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -246,7 +246,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_readargs *args)
{
unsigned int len;
- int v,pn;
+ int v;
if (!(p = decode_fh(p, &args->fh)))
return 0;
@@ -262,8 +262,9 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
*/
v=0;
while (len > 0) {
- pn = rqstp->rq_resused++;
- rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
+ struct page *p = *(rqstp->rq_next_page++);
+
+ rqstp->rq_vec[v].iov_base = page_address(p);
rqstp->rq_vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE;
len -= rqstp->rq_vec[v].iov_len;
v++;
@@ -355,7 +356,7 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
{
if (!(p = decode_fh(p, &args->fh)))
return 0;
- args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]);
+ args->buffer = page_address(*(rqstp->rq_next_page++));
return xdr_argsize_check(rqstp, p);
}
@@ -396,7 +397,7 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
if (args->count > PAGE_SIZE)
args->count = PAGE_SIZE;
- args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]);
+ args->buffer = page_address(*(rqstp->rq_next_page++));
return xdr_argsize_check(rqstp, p);
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e036894bce5..d1c229feed5 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -150,6 +150,12 @@ struct nfsd4_channel_attrs {
u32 rdma_attrs;
};
+struct nfsd4_cb_sec {
+ u32 flavor; /* (u32)(-1) used to mean "no valid flavor" */
+ u32 uid;
+ u32 gid;
+};
+
struct nfsd4_create_session {
clientid_t clientid;
struct nfs4_sessionid sessionid;
@@ -158,8 +164,12 @@ struct nfsd4_create_session {
struct nfsd4_channel_attrs fore_channel;
struct nfsd4_channel_attrs back_channel;
u32 callback_prog;
- u32 uid;
- u32 gid;
+ struct nfsd4_cb_sec cb_sec;
+};
+
+struct nfsd4_backchannel_ctl {
+ u32 bc_cb_program;
+ struct nfsd4_cb_sec bc_cb_sec;
};
struct nfsd4_bind_conn_to_session {
@@ -192,6 +202,7 @@ struct nfsd4_session {
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
struct nfsd4_channel_attrs se_bchannel;
+ struct nfsd4_cb_sec se_cb_sec;
struct list_head se_conns;
u32 se_cb_prog;
u32 se_cb_seq_nr;
@@ -221,13 +232,12 @@ struct nfsd4_sessionid {
*/
struct nfs4_client {
struct list_head cl_idhash; /* hash by cl_clientid.id */
- struct list_head cl_strhash; /* hash by cl_name */
+ struct rb_node cl_namenode; /* link into by-name trees */
struct list_head cl_openowners;
struct idr cl_stateids; /* stateid lookup */
struct list_head cl_delegations;
struct list_head cl_lru; /* tail queue */
struct xdr_netobj cl_name; /* id generated by client */
- char cl_recdir[HEXDIR_LEN]; /* recovery dir */
nfs4_verifier cl_verifier; /* generated by client */
time_t cl_time; /* time of last lease renewal */
struct sockaddr_storage cl_addr; /* client ipaddress */
@@ -242,9 +252,11 @@ struct nfs4_client {
#define NFSD4_CLIENT_CB_KILL (1)
#define NFSD4_CLIENT_STABLE (2) /* client on stable storage */
#define NFSD4_CLIENT_RECLAIM_COMPLETE (3) /* reclaim_complete done */
+#define NFSD4_CLIENT_CONFIRMED (4) /* client is confirmed */
#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
1 << NFSD4_CLIENT_CB_KILL)
unsigned long cl_flags;
+ struct rpc_cred *cl_cb_cred;
struct rpc_clnt *cl_cb_client;
u32 cl_cb_ident;
#define NFSD4_CB_UP 0
@@ -271,6 +283,7 @@ struct nfs4_client {
unsigned long cl_cb_slot_busy;
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
+ struct net *net;
};
static inline void
@@ -292,6 +305,7 @@ is_client_expired(struct nfs4_client *clp)
*/
struct nfs4_client_reclaim {
struct list_head cr_strhash; /* hash by cr_name */
+ struct nfs4_client *cr_clp; /* pointer to associated clp */
char cr_recdir[HEXDIR_LEN]; /* recover dir */
};
@@ -452,25 +466,26 @@ extern __be32 nfs4_preprocess_stateid_op(struct net *net,
stateid_t *stateid, int flags, struct file **filp);
extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
-extern int nfs4_in_grace(void);
-extern void nfs4_release_reclaim(void);
-extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(struct nfs4_client *crp);
-extern __be32 nfs4_check_open_reclaim(clientid_t *clid, bool sessions);
+void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *);
+extern void nfs4_release_reclaim(struct nfsd_net *);
+extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
+ struct nfsd_net *nn);
+extern __be32 nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn);
extern void nfs4_free_openowner(struct nfs4_openowner *);
extern void nfs4_free_lockowner(struct nfs4_lockowner *);
extern int set_callback_cred(void);
+extern void nfsd4_init_callback(struct nfsd4_callback *);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
-extern void nfsd4_do_callback_rpc(struct work_struct *);
extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
extern int nfsd4_create_callback_queue(void);
extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
-extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
-extern int nfs4_client_to_reclaim(const char *name);
-extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
+extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name,
+ struct nfsd_net *nn);
+extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn);
extern void release_session_client(struct nfsd4_session *);
extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
@@ -480,5 +495,28 @@ extern void nfsd4_client_tracking_exit(struct net *net);
extern void nfsd4_client_record_create(struct nfs4_client *clp);
extern void nfsd4_client_record_remove(struct nfs4_client *clp);
extern int nfsd4_client_record_check(struct nfs4_client *clp);
-extern void nfsd4_record_grace_done(struct net *net, time_t boot_time);
+extern void nfsd4_record_grace_done(struct nfsd_net *nn, time_t boot_time);
+
+/* nfs fault injection functions */
+#ifdef CONFIG_NFSD_FAULT_INJECTION
+int nfsd_fault_inject_init(void);
+void nfsd_fault_inject_cleanup(void);
+u64 nfsd_for_n_state(u64, u64 (*)(struct nfs4_client *, u64));
+struct nfs4_client *nfsd_find_client(struct sockaddr_storage *, size_t);
+
+u64 nfsd_forget_client(struct nfs4_client *, u64);
+u64 nfsd_forget_client_locks(struct nfs4_client*, u64);
+u64 nfsd_forget_client_openowners(struct nfs4_client *, u64);
+u64 nfsd_forget_client_delegations(struct nfs4_client *, u64);
+u64 nfsd_recall_client_delegations(struct nfs4_client *, u64);
+
+u64 nfsd_print_client(struct nfs4_client *, u64);
+u64 nfsd_print_client_locks(struct nfs4_client *, u64);
+u64 nfsd_print_client_openowners(struct nfs4_client *, u64);
+u64 nfsd_print_client_delegations(struct nfs4_client *, u64);
+#else /* CONFIG_NFSD_FAULT_INJECTION */
+static inline int nfsd_fault_inject_init(void) { return 0; }
+static inline void nfsd_fault_inject_cleanup(void) {}
+#endif /* CONFIG_NFSD_FAULT_INJECTION */
+
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c120b48ec30..d586117fa94 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -886,7 +886,7 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct svc_rqst *rqstp = sd->u.data;
- struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
+ struct page **pp = rqstp->rq_next_page;
struct page *page = buf->page;
size_t size;
@@ -894,17 +894,15 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (rqstp->rq_res.page_len == 0) {
get_page(page);
- put_page(*pp);
- *pp = page;
- rqstp->rq_resused++;
+ put_page(*rqstp->rq_next_page);
+ *(rqstp->rq_next_page++) = page;
rqstp->rq_res.page_base = buf->offset;
rqstp->rq_res.page_len = size;
} else if (page != pp[-1]) {
get_page(page);
- if (*pp)
- put_page(*pp);
- *pp = page;
- rqstp->rq_resused++;
+ if (*rqstp->rq_next_page)
+ put_page(*rqstp->rq_next_page);
+ *(rqstp->rq_next_page++) = page;
rqstp->rq_res.page_len += size;
} else
rqstp->rq_res.page_len += size;
@@ -936,7 +934,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
.u.data = rqstp,
};
- rqstp->rq_resused = 1;
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
} else {
oldfs = get_fs();
@@ -1020,28 +1018,10 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
inode = dentry->d_inode;
exp = fhp->fh_export;
- /*
- * Request sync writes if
- * - the sync export option has been set, or
- * - the client requested O_SYNC behavior (NFSv3 feature).
- * - The file system doesn't support fsync().
- * When NFSv2 gathered writes have been configured for this volume,
- * flushing the data to disk is handled separately below.
- */
use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
- if (!file->f_op->fsync) {/* COMMIT3 cannot work */
- stable = 2;
- *stablep = 2; /* FILE_SYNC */
- }
-
if (!EX_ISSYNC(exp))
stable = 0;
- if (stable && !use_wgather) {
- spin_lock(&file->f_lock);
- file->f_flags |= O_SYNC;
- spin_unlock(&file->f_lock);
- }
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
@@ -1057,8 +1037,12 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
if (inode->i_mode & (S_ISUID | S_ISGID))
kill_suid(dentry);
- if (stable && use_wgather)
- host_err = wait_for_concurrent_writes(file);
+ if (stable) {
+ if (use_wgather)
+ host_err = wait_for_concurrent_writes(file);
+ else
+ host_err = vfs_fsync_range(file, offset, offset+*cnt, 0);
+ }
out_nfserr:
dprintk("nfsd: write complete host_err=%d\n", host_err);
@@ -1485,13 +1469,19 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
case NFS3_CREATE_EXCLUSIVE:
if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
&& dchild->d_inode->i_atime.tv_sec == v_atime
- && dchild->d_inode->i_size == 0 )
+ && dchild->d_inode->i_size == 0 ) {
+ if (created)
+ *created = 1;
break;
+ }
case NFS4_CREATE_EXCLUSIVE4_1:
if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
&& dchild->d_inode->i_atime.tv_sec == v_atime
- && dchild->d_inode->i_size == 0 )
+ && dchild->d_inode->i_size == 0 ) {
+ if (created)
+ *created = 1;
goto set_attr;
+ }
/* fallthru */
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index acd127d4ee8..0889bfb43dc 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -385,7 +385,8 @@ struct nfsd4_write {
u64 wr_offset; /* request */
u32 wr_stable_how; /* request */
u32 wr_buflen; /* request */
- int wr_vlen;
+ struct kvec wr_head;
+ struct page ** wr_pagelist; /* request */
u32 wr_bytes_written; /* response */
u32 wr_how_written; /* response */
@@ -462,6 +463,7 @@ struct nfsd4_op {
/* NFSv4.1 */
struct nfsd4_exchange_id exchange_id;
+ struct nfsd4_backchannel_ctl backchannel_ctl;
struct nfsd4_bind_conn_to_session bind_conn_to_session;
struct nfsd4_create_session create_session;
struct nfsd4_destroy_session destroy_session;
@@ -526,6 +528,14 @@ static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
|| nfsd4_is_solo_sequence(resp);
}
+static inline bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
+{
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+
+ return argp->opcnt == resp->opcnt;
+}
+
#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
static inline void
@@ -566,6 +576,7 @@ extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq);
extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
+extern __be32 nfsd4_backchannel_ctl(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_backchannel_ctl *);
extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *);
extern __be32 nfsd4_create_session(struct svc_rqst *,
struct nfsd4_compound_state *,
@@ -579,7 +590,7 @@ extern __be32 nfsd4_destroy_session(struct svc_rqst *,
extern __be32 nfsd4_destroy_clientid(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_destroy_clientid *);
__be32 nfsd4_reclaim_complete(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_reclaim_complete *);
extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
- struct nfsd4_open *open);
+ struct nfsd4_open *open, struct nfsd_net *nn);
extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
struct svc_fh *current_fh, struct nfsd4_open *open);
extern void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status);
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 16f35f7423c..61946883025 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -167,7 +167,6 @@ const struct file_operations nilfs_file_operations = {
};
const struct inode_operations nilfs_file_inode_operations = {
- .truncate = nilfs_truncate,
.setattr = nilfs_setattr,
.permission = nilfs_permission,
.fiemap = nilfs_fiemap,
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 4d31d2cca7f..6b49f14eac8 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -213,6 +213,16 @@ static int nilfs_set_page_dirty(struct page *page)
return ret;
}
+void nilfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ nilfs_truncate(inode);
+ }
+}
+
static int nilfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -227,10 +237,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
err = block_write_begin(mapping, pos, len, flags, pagep,
nilfs_get_block);
if (unlikely(err)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
-
+ nilfs_write_failed(mapping, pos + len);
nilfs_transaction_abort(inode->i_sb);
}
return err;
@@ -259,6 +266,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_mapping->host;
ssize_t size;
@@ -278,7 +286,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
- vmtruncate(inode, isize);
+ nilfs_write_failed(mapping, end);
}
return size;
@@ -786,10 +794,8 @@ int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
-
- err = vmtruncate(inode, iattr->ia_size);
- if (unlikely(err))
- goto out_err;
+ truncate_setsize(inode, iattr->ia_size);
+ nilfs_truncate(inode);
}
setattr_copy(inode, iattr);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 74cece80e9a..9bc72dec3fa 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -277,6 +277,7 @@ extern void nilfs_update_inode(struct inode *, struct buffer_head *);
extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *);
extern int nilfs_setattr(struct dentry *, struct iattr *);
+extern void nilfs_write_failed(struct address_space *mapping, loff_t to);
int nilfs_permission(struct inode *inode, int mask);
int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh);
extern int nilfs_inode_dirty(struct inode *);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index f1626f5011c..ff00a0b7acb 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -527,7 +527,8 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
if (unlikely(err)) {
loff_t isize = inode->i_size;
if (pos + blocksize > isize)
- vmtruncate(inode, isize);
+ nilfs_write_failed(inode->i_mapping,
+ pos + blocksize);
goto failed_inode;
}
diff --git a/fs/notify/Makefile b/fs/notify/Makefile
index ae5f33a6d86..96d3420d024 100644
--- a/fs/notify/Makefile
+++ b/fs/notify/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o \
- mark.o vfsmount_mark.o
+ mark.o vfsmount_mark.o fdinfo.o
obj-y += dnotify/
obj-y += inotify/
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 3344bdd5506..08b886f119c 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -201,7 +201,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
/* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dn_mark->dn == NULL)
- fsnotify_destroy_mark(fsn_mark);
+ fsnotify_destroy_mark(fsn_mark, dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
@@ -385,7 +385,7 @@ out:
spin_unlock(&fsn_mark->lock);
if (destroy)
- fsnotify_destroy_mark(fsn_mark);
+ fsnotify_destroy_mark(fsn_mark, dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(fsn_mark);
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
index 7dceff005a6..e5f911bd80d 100644
--- a/fs/notify/fanotify/Kconfig
+++ b/fs/notify/fanotify/Kconfig
@@ -4,7 +4,7 @@ config FANOTIFY
select ANON_INODES
default n
---help---
- Say Y here to enable fanotify suport. fanotify is a file access
+ Say Y here to enable fanotify support. fanotify is a file access
notification system which differs from inotify in that it sends
an open file descriptor to the userspace listener along with
the event.
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index a5063602536..0c2f9122b26 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
old->tgid == new->tgid) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_PATH):
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ /* dont merge two permission events */
+ if ((old->mask & FAN_ALL_PERM_EVENTS) &&
+ (new->mask & FAN_ALL_PERM_EVENTS))
+ return false;
+#endif
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 6fcaeb8c902..9ff4a5ee6e2 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -17,6 +17,7 @@
#include <asm/ioctls.h>
#include "../../mount.h"
+#include "../fdinfo.h"
#define FANOTIFY_DEFAULT_MAX_EVENTS 16384
#define FANOTIFY_DEFAULT_MAX_MARKS 8192
@@ -396,8 +397,12 @@ static int fanotify_release(struct inode *ignored, struct file *file)
wake_up(&group->fanotify_data.access_waitq);
#endif
+
+ if (file->f_flags & FASYNC)
+ fsnotify_fasync(-1, file, 0);
+
/* matches the fanotify_init->fsnotify_alloc_group */
- fsnotify_put_group(group);
+ fsnotify_destroy_group(group);
return 0;
}
@@ -428,6 +433,7 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar
}
static const struct file_operations fanotify_fops = {
+ .show_fdinfo = fanotify_show_fdinfo,
.poll = fanotify_poll,
.read = fanotify_read,
.write = fanotify_write,
@@ -491,7 +497,8 @@ out:
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
__u32 mask,
- unsigned int flags)
+ unsigned int flags,
+ int *destroy)
{
__u32 oldmask;
@@ -505,8 +512,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
}
spin_unlock(&fsn_mark->lock);
- if (!(oldmask & ~mask))
- fsnotify_destroy_mark(fsn_mark);
+ *destroy = !(oldmask & ~mask);
return mask & oldmask;
}
@@ -517,12 +523,17 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
+ int destroy_mark;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark)
return -ENOENT;
- removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
+ &destroy_mark);
+ if (destroy_mark)
+ fsnotify_destroy_mark(fsn_mark, group);
+
fsnotify_put_mark(fsn_mark);
if (removed & real_mount(mnt)->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
@@ -536,12 +547,16 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group,
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
+ int destroy_mark;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
- removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
+ &destroy_mark);
+ if (destroy_mark)
+ fsnotify_destroy_mark(fsn_mark, group);
/* matches the fsnotify_find_inode_mark() */
fsnotify_put_mark(fsn_mark);
if (removed & inode->i_fsnotify_mask)
@@ -708,13 +723,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
break;
default:
fd = -EINVAL;
- goto out_put_group;
+ goto out_destroy_group;
}
if (flags & FAN_UNLIMITED_QUEUE) {
fd = -EPERM;
if (!capable(CAP_SYS_ADMIN))
- goto out_put_group;
+ goto out_destroy_group;
group->max_events = UINT_MAX;
} else {
group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
@@ -723,7 +738,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if (flags & FAN_UNLIMITED_MARKS) {
fd = -EPERM;
if (!capable(CAP_SYS_ADMIN))
- goto out_put_group;
+ goto out_destroy_group;
group->fanotify_data.max_marks = UINT_MAX;
} else {
group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
@@ -731,12 +746,12 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
if (fd < 0)
- goto out_put_group;
+ goto out_destroy_group;
return fd;
-out_put_group:
- fsnotify_put_group(group);
+out_destroy_group:
+ fsnotify_destroy_group(group);
return fd;
}
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
new file mode 100644
index 00000000000..238a5930cb3
--- /dev/null
+++ b/fs/notify/fdinfo.c
@@ -0,0 +1,179 @@
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/inotify.h>
+#include <linux/fanotify.h>
+#include <linux/kernel.h>
+#include <linux/namei.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/exportfs.h>
+
+#include "inotify/inotify.h"
+#include "../fs/mount.h"
+
+#if defined(CONFIG_PROC_FS)
+
+#if defined(CONFIG_INOTIFY_USER) || defined(CONFIG_FANOTIFY)
+
+static int show_fdinfo(struct seq_file *m, struct file *f,
+ int (*show)(struct seq_file *m, struct fsnotify_mark *mark))
+{
+ struct fsnotify_group *group = f->private_data;
+ struct fsnotify_mark *mark;
+ int ret = 0;
+
+ mutex_lock(&group->mark_mutex);
+ list_for_each_entry(mark, &group->marks_list, g_list) {
+ ret = show(m, mark);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&group->mark_mutex);
+ return ret;
+}
+
+#if defined(CONFIG_EXPORTFS)
+static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
+{
+ struct {
+ struct file_handle handle;
+ u8 pad[64];
+ } f;
+ int size, ret, i;
+
+ f.handle.handle_bytes = sizeof(f.pad);
+ size = f.handle.handle_bytes >> 2;
+
+ ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
+ if ((ret == 255) || (ret == -ENOSPC)) {
+ WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
+ return 0;
+ }
+
+ f.handle.handle_type = ret;
+ f.handle.handle_bytes = size * sizeof(u32);
+
+ ret = seq_printf(m, "fhandle-bytes:%x fhandle-type:%x f_handle:",
+ f.handle.handle_bytes, f.handle.handle_type);
+
+ for (i = 0; i < f.handle.handle_bytes; i++)
+ ret |= seq_printf(m, "%02x", (int)f.handle.f_handle[i]);
+
+ return ret;
+}
+#else
+static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_INOTIFY_USER
+
+static int inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
+{
+ struct inotify_inode_mark *inode_mark;
+ struct inode *inode;
+ int ret = 0;
+
+ if (!(mark->flags & (FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_INODE)))
+ return 0;
+
+ inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
+ inode = igrab(mark->i.inode);
+ if (inode) {
+ ret = seq_printf(m, "inotify wd:%x ino:%lx sdev:%x "
+ "mask:%x ignored_mask:%x ",
+ inode_mark->wd, inode->i_ino,
+ inode->i_sb->s_dev,
+ mark->mask, mark->ignored_mask);
+ ret |= show_mark_fhandle(m, inode);
+ ret |= seq_putc(m, '\n');
+ iput(inode);
+ }
+
+ return ret;
+}
+
+int inotify_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ return show_fdinfo(m, f, inotify_fdinfo);
+}
+
+#endif /* CONFIG_INOTIFY_USER */
+
+#ifdef CONFIG_FANOTIFY
+
+static int fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
+{
+ unsigned int mflags = 0;
+ struct inode *inode;
+ int ret = 0;
+
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE))
+ return 0;
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)
+ mflags |= FAN_MARK_IGNORED_SURV_MODIFY;
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
+ inode = igrab(mark->i.inode);
+ if (!inode)
+ goto out;
+ ret = seq_printf(m, "fanotify ino:%lx sdev:%x "
+ "mflags:%x mask:%x ignored_mask:%x ",
+ inode->i_ino, inode->i_sb->s_dev,
+ mflags, mark->mask, mark->ignored_mask);
+ ret |= show_mark_fhandle(m, inode);
+ ret |= seq_putc(m, '\n');
+ iput(inode);
+ } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT) {
+ struct mount *mnt = real_mount(mark->m.mnt);
+
+ ret = seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x "
+ "ignored_mask:%x\n", mnt->mnt_id, mflags,
+ mark->mask, mark->ignored_mask);
+ }
+out:
+ return ret;
+}
+
+int fanotify_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct fsnotify_group *group = f->private_data;
+ unsigned int flags = 0;
+
+ switch (group->priority) {
+ case FS_PRIO_0:
+ flags |= FAN_CLASS_NOTIF;
+ break;
+ case FS_PRIO_1:
+ flags |= FAN_CLASS_CONTENT;
+ break;
+ case FS_PRIO_2:
+ flags |= FAN_CLASS_PRE_CONTENT;
+ break;
+ }
+
+ if (group->max_events == UINT_MAX)
+ flags |= FAN_UNLIMITED_QUEUE;
+
+ if (group->fanotify_data.max_marks == UINT_MAX)
+ flags |= FAN_UNLIMITED_MARKS;
+
+ seq_printf(m, "fanotify flags:%x event-flags:%x\n",
+ flags, group->fanotify_data.f_flags);
+
+ return show_fdinfo(m, f, fanotify_fdinfo);
+}
+
+#endif /* CONFIG_FANOTIFY */
+
+#endif /* CONFIG_INOTIFY_USER || CONFIG_FANOTIFY */
+
+#endif /* CONFIG_PROC_FS */
diff --git a/fs/notify/fdinfo.h b/fs/notify/fdinfo.h
new file mode 100644
index 00000000000..556afda990e
--- /dev/null
+++ b/fs/notify/fdinfo.h
@@ -0,0 +1,27 @@
+#ifndef __FSNOTIFY_FDINFO_H__
+#define __FSNOTIFY_FDINFO_H__
+
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+
+struct seq_file;
+struct file;
+
+#ifdef CONFIG_PROC_FS
+
+#ifdef CONFIG_INOTIFY_USER
+extern int inotify_show_fdinfo(struct seq_file *m, struct file *f);
+#endif
+
+#ifdef CONFIG_FANOTIFY
+extern int fanotify_show_fdinfo(struct seq_file *m, struct file *f);
+#endif
+
+#else /* CONFIG_PROC_FS */
+
+#define inotify_show_fdinfo NULL
+#define fanotify_show_fdinfo NULL
+
+#endif /* CONFIG_PROC_FS */
+
+#endif /* __FSNOTIFY_FDINFO_H__ */
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 63fc294a469..bd2625bd88b 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -33,9 +33,6 @@
*/
void fsnotify_final_destroy_group(struct fsnotify_group *group)
{
- /* clear the notification queue of all events */
- fsnotify_flush_notify(group);
-
if (group->ops->free_group_priv)
group->ops->free_group_priv(group);
@@ -43,23 +40,30 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)
}
/*
- * Trying to get rid of a group. We need to first get rid of any outstanding
- * allocations and then free the group. Remember that fsnotify_clear_marks_by_group
- * could miss marks that are being freed by inode and those marks could still
- * hold a reference to this group (via group->num_marks) If we get into that
- * situtation, the fsnotify_final_destroy_group will get called when that final
- * mark is freed.
+ * Trying to get rid of a group. Remove all marks, flush all events and release
+ * the group reference.
+ * Note that another thread calling fsnotify_clear_marks_by_group() may still
+ * hold a ref to the group.
*/
-static void fsnotify_destroy_group(struct fsnotify_group *group)
+void fsnotify_destroy_group(struct fsnotify_group *group)
{
/* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group);
synchronize_srcu(&fsnotify_mark_srcu);
- /* past the point of no return, matches the initial value of 1 */
- if (atomic_dec_and_test(&group->num_marks))
- fsnotify_final_destroy_group(group);
+ /* clear the notification queue of all events */
+ fsnotify_flush_notify(group);
+
+ fsnotify_put_group(group);
+}
+
+/*
+ * Get reference to a group.
+ */
+void fsnotify_get_group(struct fsnotify_group *group)
+{
+ atomic_inc(&group->refcnt);
}
/*
@@ -68,7 +72,7 @@ static void fsnotify_destroy_group(struct fsnotify_group *group)
void fsnotify_put_group(struct fsnotify_group *group)
{
if (atomic_dec_and_test(&group->refcnt))
- fsnotify_destroy_group(group);
+ fsnotify_final_destroy_group(group);
}
/*
@@ -84,21 +88,24 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
/* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1);
- /*
- * hits 0 when there are no external references AND no marks for
- * this group
- */
- atomic_set(&group->num_marks, 1);
+ atomic_set(&group->num_marks, 0);
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
group->max_events = UINT_MAX;
- spin_lock_init(&group->mark_lock);
+ mutex_init(&group->mark_mutex);
INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
return group;
}
+
+int fsnotify_fasync(int fd, struct file *file, int on)
+{
+ struct fsnotify_group *group = file->private_data;
+
+ return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO;
+}
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index b13c00ac48e..f31e90fc050 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -63,8 +63,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{
struct inode *inode = mark->i.inode;
+ BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&mark->group->mark_lock);
spin_lock(&inode->i_lock);
@@ -99,8 +99,16 @@ void fsnotify_clear_marks_by_inode(struct inode *inode)
spin_unlock(&inode->i_lock);
list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
- fsnotify_destroy_mark(mark);
+ struct fsnotify_group *group;
+
+ spin_lock(&mark->lock);
+ fsnotify_get_group(mark->group);
+ group = mark->group;
+ spin_unlock(&mark->lock);
+
+ fsnotify_destroy_mark(mark, group);
fsnotify_put_mark(mark);
+ fsnotify_put_group(group);
}
}
@@ -116,8 +124,9 @@ void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
* given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
-struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group,
- struct inode *inode)
+static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
+ struct fsnotify_group *group,
+ struct inode *inode)
{
struct fsnotify_mark *mark;
struct hlist_node *pos;
@@ -191,8 +200,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&group->mark_lock);
spin_lock(&inode->i_lock);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index e3cbd746f64..871569c7d60 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -118,6 +118,7 @@ static int inotify_handle_event(struct fsnotify_group *group,
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
+ fsnotify_get_group(group);
fsn_event_priv->group = group;
event_priv->wd = wd;
@@ -131,7 +132,7 @@ static int inotify_handle_event(struct fsnotify_group *group,
}
if (inode_mark->mask & IN_ONESHOT)
- fsnotify_destroy_mark(inode_mark);
+ fsnotify_destroy_mark(inode_mark, group);
return ret;
}
@@ -210,6 +211,7 @@ void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
+ fsnotify_put_group(fsn_event_priv->group);
kmem_cache_free(event_priv_cachep, event_priv);
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index c311dda054a..228a2c2ad8d 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -40,6 +40,7 @@
#include <linux/wait.h>
#include "inotify.h"
+#include "../fdinfo.h"
#include <asm/ioctls.h>
@@ -264,7 +265,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
- ret = -EINTR;
+ ret = -ERESTARTSYS;
if (signal_pending(current))
break;
@@ -280,23 +281,17 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
return ret;
}
-static int inotify_fasync(int fd, struct file *file, int on)
-{
- struct fsnotify_group *group = file->private_data;
-
- return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
-}
-
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
- fsnotify_clear_marks_by_group(group);
+ if (file->f_flags & FASYNC)
+ fsnotify_fasync(-1, file, 0);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
- fsnotify_put_group(group);
+ fsnotify_destroy_group(group);
return 0;
}
@@ -335,9 +330,10 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
}
static const struct file_operations inotify_fops = {
+ .show_fdinfo = inotify_show_fdinfo,
.poll = inotify_poll,
.read = inotify_read,
- .fasync = inotify_fasync,
+ .fasync = fsnotify_fasync,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
@@ -519,13 +515,13 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
+
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS);
if (!ignored_event)
- return;
-
- i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
+ goto skip_send_ignore;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
@@ -533,6 +529,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
+ fsnotify_get_group(group);
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
@@ -546,9 +543,9 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
}
skip_send_ignore:
-
/* matches the reference taken when the event was created */
- fsnotify_put_event(ignored_event);
+ if (ignored_event)
+ fsnotify_put_event(ignored_event);
/* remove this mark from the idr */
inotify_remove_from_idr(group, i_mark);
@@ -707,12 +704,11 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
- group->inotify_data.fa = NULL;
group->inotify_data.user = get_current_user();
if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
inotify_max_user_instances) {
- fsnotify_put_group(group);
+ fsnotify_destroy_group(group);
return ERR_PTR(-EMFILE);
}
@@ -741,7 +737,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret < 0)
- fsnotify_put_group(group);
+ fsnotify_destroy_group(group);
return ret;
}
@@ -817,7 +813,7 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
ret = 0;
- fsnotify_destroy_mark(&i_mark->fsn_mark);
+ fsnotify_destroy_mark(&i_mark->fsn_mark, group);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index f104d565b68..fc6b49bf736 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -109,8 +109,11 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
void fsnotify_put_mark(struct fsnotify_mark *mark)
{
- if (atomic_dec_and_test(&mark->refcnt))
+ if (atomic_dec_and_test(&mark->refcnt)) {
+ if (mark->group)
+ fsnotify_put_group(mark->group);
mark->free_mark(mark);
+ }
}
/*
@@ -118,14 +121,14 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
* The caller had better be holding a reference to this mark so we don't actually
* do the final put under the mark->lock
*/
-void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
+ struct fsnotify_group *group)
{
- struct fsnotify_group *group;
struct inode *inode = NULL;
- spin_lock(&mark->lock);
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
- group = mark->group;
+ spin_lock(&mark->lock);
/* something else already called this function on this mark */
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
@@ -135,8 +138,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
- spin_lock(&group->mark_lock);
-
if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
inode = mark->i.inode;
fsnotify_destroy_inode_mark(mark);
@@ -147,13 +148,22 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
list_del_init(&mark->g_list);
- spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock);
+ if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
+ iput(inode);
+ /* release lock temporarily */
+ mutex_unlock(&group->mark_mutex);
+
spin_lock(&destroy_lock);
list_add(&mark->destroy_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
+ /*
+ * We don't necessarily have a ref on mark from caller so the above destroy
+ * may have actually freed it, unless this group provides a 'freeing_mark'
+ * function which must be holding a reference.
+ */
/*
* Some groups like to know that marks are being freed. This is a
@@ -175,21 +185,17 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
* is just a lazy update (and could be a perf win...)
*/
- if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
- iput(inode);
+ atomic_dec(&group->num_marks);
- /*
- * We don't necessarily have a ref on mark from caller so the above iput
- * may have already destroyed it. Don't touch from now on.
- */
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+}
- /*
- * it's possible that this group tried to destroy itself, but this
- * this mark was simultaneously being freed by inode. If that's the
- * case, we finish freeing the group here.
- */
- if (unlikely(atomic_dec_and_test(&group->num_marks)))
- fsnotify_final_destroy_group(group);
+void fsnotify_destroy_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group)
+{
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ fsnotify_destroy_mark_locked(mark, group);
+ mutex_unlock(&group->mark_mutex);
}
void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
@@ -214,26 +220,26 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group.
*/
-int fsnotify_add_mark(struct fsnotify_mark *mark,
- struct fsnotify_group *group, struct inode *inode,
- struct vfsmount *mnt, int allow_dups)
+int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, int allow_dups)
{
int ret = 0;
BUG_ON(inode && mnt);
BUG_ON(!inode && !mnt);
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
/*
* LOCKING ORDER!!!!
+ * group->mark_mutex
* mark->lock
- * group->mark_lock
* inode->i_lock
*/
spin_lock(&mark->lock);
- spin_lock(&group->mark_lock);
-
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
+ fsnotify_get_group(group);
mark->group = group;
list_add(&mark->g_list, &group->marks_list);
atomic_inc(&group->num_marks);
@@ -251,11 +257,8 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
BUG();
}
- spin_unlock(&group->mark_lock);
-
/* this will pin the object if appropriate */
fsnotify_set_mark_mask_locked(mark, mark->mask);
-
spin_unlock(&mark->lock);
if (inode)
@@ -265,10 +268,10 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
err:
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
list_del_init(&mark->g_list);
+ fsnotify_put_group(group);
mark->group = NULL;
atomic_dec(&group->num_marks);
- spin_unlock(&group->mark_lock);
spin_unlock(&mark->lock);
spin_lock(&destroy_lock);
@@ -279,6 +282,16 @@ err:
return ret;
}
+int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
+ struct inode *inode, struct vfsmount *mnt, int allow_dups)
+{
+ int ret;
+ mutex_lock(&group->mark_mutex);
+ ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups);
+ mutex_unlock(&group->mark_mutex);
+ return ret;
+}
+
/*
* clear any marks in a group in which mark->flags & flags is true
*/
@@ -286,22 +299,16 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
unsigned int flags)
{
struct fsnotify_mark *lmark, *mark;
- LIST_HEAD(free_list);
- spin_lock(&group->mark_lock);
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
if (mark->flags & flags) {
- list_add(&mark->free_g_list, &free_list);
- list_del_init(&mark->g_list);
fsnotify_get_mark(mark);
+ fsnotify_destroy_mark_locked(mark, group);
+ fsnotify_put_mark(mark);
}
}
- spin_unlock(&group->mark_lock);
-
- list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
- fsnotify_destroy_mark(mark);
- fsnotify_put_mark(mark);
- }
+ mutex_unlock(&group->mark_mutex);
}
/*
@@ -317,6 +324,8 @@ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *ol
assert_spin_locked(&old->lock);
new->i.inode = old->i.inode;
new->m.mnt = old->m.mnt;
+ if (old->group)
+ fsnotify_get_group(old->group);
new->group = old->group;
new->mask = old->mask;
new->free_mark = old->free_mark;
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index c887b1378f7..7b51b05f160 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -18,7 +18,7 @@
/*
* Basic idea behind the notification queue: An fsnotify group (like inotify)
- * sends the userspace notification about events asyncronously some time after
+ * sends the userspace notification about events asynchronously some time after
* the event happened. When inotify gets an event it will need to add that
* event to the group notify queue. Since a single event might need to be on
* multiple group's notification queues we can't add the event directly to each
@@ -225,6 +225,7 @@ alloc_holder:
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
+ kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
return return_event;
}
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index b7b4b0e8554..4df58b8ea64 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -46,8 +46,16 @@ void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
spin_unlock(&mnt->mnt_root->d_lock);
list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
- fsnotify_destroy_mark(mark);
+ struct fsnotify_group *group;
+
+ spin_lock(&mark->lock);
+ fsnotify_get_group(mark->group);
+ group = mark->group;
+ spin_unlock(&mark->lock);
+
+ fsnotify_destroy_mark(mark, group);
fsnotify_put_mark(mark);
+ fsnotify_put_group(group);
}
}
@@ -88,8 +96,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
{
struct vfsmount *mnt = mark->m.mnt;
+ BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&mark->group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock);
@@ -151,8 +159,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
+ BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock);
- assert_spin_locked(&group->mark_lock);
spin_lock(&mnt->mnt_root->d_lock);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 1ecf46448f8..5b2d4f0853a 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1762,6 +1762,16 @@ err_out:
return err;
}
+static void ntfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ ntfs_truncate_vfs(inode);
+ }
+}
+
/**
* ntfs_file_buffered_write -
*
@@ -2022,8 +2032,9 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
* allocated space, which is not a disaster.
*/
i_size = i_size_read(vi);
- if (pos + bytes > i_size)
- vmtruncate(vi, i_size);
+ if (pos + bytes > i_size) {
+ ntfs_write_failed(mapping, pos + bytes);
+ }
break;
}
}
@@ -2227,7 +2238,6 @@ const struct file_operations ntfs_file_ops = {
const struct inode_operations ntfs_file_inode_ops = {
#ifdef NTFS_RW
- .truncate = ntfs_truncate_vfs,
.setattr = ntfs_setattr,
#endif /* NTFS_RW */
};
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 1d27331e6fc..d3e118cc6ff 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2866,9 +2866,11 @@ conv_err_out:
*
* See ntfs_truncate() description above for details.
*/
+#ifdef NTFS_RW
void ntfs_truncate_vfs(struct inode *vi) {
ntfs_truncate(vi);
}
+#endif
/**
* ntfs_setattr - called from notify_change() when an attribute is being changed
@@ -2914,8 +2916,10 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
NInoCompressed(ni) ?
"compressed" : "encrypted");
err = -EOPNOTSUPP;
- } else
- err = vmtruncate(vi, attr->ia_size);
+ } else {
+ truncate_setsize(vi, attr->ia_size);
+ ntfs_truncate_vfs(vi);
+ }
if (err || ia_valid == ATTR_SIZE)
goto out;
} else {
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index db29695f845..76b6cfb579d 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -316,6 +316,10 @@ static inline void ntfs_commit_inode(struct inode *vi)
return;
}
+#else
+
+static inline void ntfs_truncate_vfs(struct inode *vi) {}
+
#endif /* NTFS_RW */
#endif /* _LINUX_NTFS_INODE_H */
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 70b5863a2d6..f487aa34344 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -832,7 +832,7 @@ out:
return ret;
}
-int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int ret;
@@ -843,7 +843,7 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
struct buffer_head *di_bh = NULL;
struct ocfs2_extent_rec rec;
- BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
+ BUG_ON(whence != SEEK_DATA && whence != SEEK_HOLE);
ret = ocfs2_inode_lock(inode, &di_bh, 0);
if (ret) {
@@ -859,7 +859,7 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
}
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
- if (origin == SEEK_HOLE)
+ if (whence == SEEK_HOLE)
*offset = inode->i_size;
goto out_unlock;
}
@@ -888,8 +888,8 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
}
- if ((!is_data && origin == SEEK_HOLE) ||
- (is_data && origin == SEEK_DATA)) {
+ if ((!is_data && whence == SEEK_HOLE) ||
+ (is_data && whence == SEEK_DATA)) {
if (extoff > *offset)
*offset = extoff;
goto out_unlock;
@@ -899,7 +899,7 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
cpos += clen;
}
- if (origin == SEEK_HOLE) {
+ if (whence == SEEK_HOLE) {
extoff = cpos;
extoff <<= cs_bits;
extlen = clen;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index dda08980494..37d313ede15 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1218,24 +1218,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
}
- /*
- * This will intentionally not wind up calling truncate_setsize(),
- * since all the work for a size change has been done above.
- * Otherwise, we could get into problems with truncate as
- * ip_alloc_sem is used there to protect against i_size
- * changes.
- *
- * XXX: this means the conditional below can probably be removed.
- */
- if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size != i_size_read(inode)) {
- status = vmtruncate(inode, attr->ia_size);
- if (status) {
- mlog_errno(status);
- goto bail_commit;
- }
- }
-
setattr_copy(inode, attr);
mark_inode_dirty(inode);
@@ -2637,14 +2619,14 @@ bail:
}
/* Refer generic_file_llseek_unlocked() */
-static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
+static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int ret = 0;
mutex_lock(&inode->i_mutex);
- switch (origin) {
+ switch (whence) {
case SEEK_SET:
break;
case SEEK_END:
@@ -2659,7 +2641,7 @@ static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
break;
case SEEK_DATA:
case SEEK_HOLE:
- ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
+ ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
if (ret)
goto out;
break;
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 77e3cb2962b..e0d9b3e722b 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -306,6 +306,16 @@ omfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
return mpage_writepages(mapping, wbc, omfs_get_block);
}
+static void omfs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ omfs_truncate(inode);
+ }
+}
+
static int omfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -314,11 +324,8 @@ static int omfs_write_begin(struct file *file, struct address_space *mapping,
ret = block_write_begin(mapping, pos, len, flags, pagep,
omfs_get_block);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ omfs_write_failed(mapping, pos + len);
return ret;
}
@@ -350,9 +357,11 @@ static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
+ error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
+ truncate_setsize(inode, attr->ia_size);
+ omfs_truncate(inode);
}
setattr_copy(inode, attr);
@@ -362,7 +371,6 @@ static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
const struct inode_operations omfs_file_inops = {
.setattr = omfs_setattr,
- .truncate = omfs_truncate
};
const struct address_space_operations omfs_aops = {
diff --git a/fs/open.c b/fs/open.c
index 59071f55bf7..9b33c0cbfac 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -61,33 +61,22 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
return ret;
}
-static long do_sys_truncate(const char __user *pathname, loff_t length)
+long vfs_truncate(struct path *path, loff_t length)
{
- struct path path;
struct inode *inode;
- int error;
-
- error = -EINVAL;
- if (length < 0) /* sorry, but loff_t says... */
- goto out;
+ long error;
- error = user_path(pathname, &path);
- if (error)
- goto out;
- inode = path.dentry->d_inode;
+ inode = path->dentry->d_inode;
/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
- error = -EISDIR;
if (S_ISDIR(inode->i_mode))
- goto dput_and_out;
-
- error = -EINVAL;
+ return -EISDIR;
if (!S_ISREG(inode->i_mode))
- goto dput_and_out;
+ return -EINVAL;
- error = mnt_want_write(path.mnt);
+ error = mnt_want_write(path->mnt);
if (error)
- goto dput_and_out;
+ goto out;
error = inode_permission(inode, MAY_WRITE);
if (error)
@@ -111,19 +100,40 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
error = locks_verify_truncate(inode, NULL, length);
if (!error)
- error = security_path_truncate(&path);
+ error = security_path_truncate(path);
if (!error)
- error = do_truncate(path.dentry, length, 0, NULL);
+ error = do_truncate(path->dentry, length, 0, NULL);
put_write_and_out:
put_write_access(inode);
mnt_drop_write_and_out:
- mnt_drop_write(path.mnt);
-dput_and_out:
- path_put(&path);
+ mnt_drop_write(path->mnt);
out:
return error;
}
+EXPORT_SYMBOL_GPL(vfs_truncate);
+
+static long do_sys_truncate(const char __user *pathname, loff_t length)
+{
+ unsigned int lookup_flags = LOOKUP_FOLLOW;
+ struct path path;
+ int error;
+
+ if (length < 0) /* sorry, but loff_t says... */
+ return -EINVAL;
+
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
+ if (!error) {
+ error = vfs_truncate(&path, length);
+ path_put(&path);
+ }
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
+ return error;
+}
SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
{
@@ -306,6 +316,7 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
struct path path;
struct inode *inode;
int res;
+ unsigned int lookup_flags = LOOKUP_FOLLOW;
if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */
return -EINVAL;
@@ -328,8 +339,8 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
}
old_cred = override_creds(override_cred);
-
- res = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
+retry:
+ res = user_path_at(dfd, filename, lookup_flags, &path);
if (res)
goto out;
@@ -364,6 +375,10 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
out_path_release:
path_put(&path);
+ if (retry_estale(res, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
out:
revert_creds(old_cred);
put_cred(override_cred);
@@ -379,8 +394,9 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
{
struct path path;
int error;
-
- error = user_path_dir(filename, &path);
+ unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+retry:
+ error = user_path_at(AT_FDCWD, filename, lookup_flags, &path);
if (error)
goto out;
@@ -392,6 +408,10 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
dput_and_out:
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
out:
return error;
}
@@ -425,8 +445,9 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
{
struct path path;
int error;
-
- error = user_path_dir(filename, &path);
+ unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+retry:
+ error = user_path_at(AT_FDCWD, filename, lookup_flags, &path);
if (error)
goto out;
@@ -435,7 +456,7 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
goto dput_and_out;
error = -EPERM;
- if (!capable(CAP_SYS_CHROOT))
+ if (!nsown_capable(CAP_SYS_CHROOT))
goto dput_and_out;
error = security_path_chroot(&path);
if (error)
@@ -445,6 +466,10 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
error = 0;
dput_and_out:
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
out:
return error;
}
@@ -489,11 +514,16 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode
{
struct path path;
int error;
-
- error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
+ unsigned int lookup_flags = LOOKUP_FOLLOW;
+retry:
+ error = user_path_at(dfd, filename, lookup_flags, &path);
if (!error) {
error = chmod_common(&path, mode);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
}
return error;
}
@@ -552,6 +582,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
if (flag & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
+retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
@@ -562,6 +593,10 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
out:
return error;
}
diff --git a/fs/pnode.h b/fs/pnode.h
index 65c60979d54..19b853a3445 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -22,6 +22,7 @@
#define CL_COPY_ALL 0x04
#define CL_MAKE_SHARED 0x08
#define CL_PRIVATE 0x10
+#define CL_SHARED_TO_SLAVE 0x20
static inline void set_mnt_shared(struct mount *mnt)
{
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 99349efbbc2..981b0560193 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -21,6 +21,7 @@ proc-y += uptime.o
proc-y += version.o
proc-y += softirqs.o
proc-y += namespaces.o
+proc-y += self.o
proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o
proc-$(CONFIG_NET) += proc_net.o
proc-$(CONFIG_PROC_KCORE) += kcore.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index d3696708fc1..6a91e6ffbcb 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -162,7 +162,7 @@ static inline const char *get_task_state(struct task_struct *tsk)
static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = seq_user_ns(m);
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
@@ -212,7 +212,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
group_info = cred->group_info;
task_unlock(p);
- for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
+ for (g = 0; g < group_info->ngroups; g++)
seq_printf(m, "%d ",
from_kgid_munged(user_ns, GROUP_AT(group_info, g)));
put_cred(cred);
@@ -220,7 +220,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
seq_putc(m, '\n');
}
-static void render_sigset_t(struct seq_file *m, const char *header,
+void render_sigset_t(struct seq_file *m, const char *header,
sigset_t *set)
{
int i;
@@ -308,6 +308,10 @@ static void render_cap_t(struct seq_file *m, const char *header,
seq_putc(m, '\n');
}
+/* Remove non-existent capabilities */
+#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
+ CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
+
static inline void task_cap(struct seq_file *m, struct task_struct *p)
{
const struct cred *cred;
@@ -321,12 +325,24 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
cap_bset = cred->cap_bset;
rcu_read_unlock();
+ NORM_CAPS(cap_inheritable);
+ NORM_CAPS(cap_permitted);
+ NORM_CAPS(cap_effective);
+ NORM_CAPS(cap_bset);
+
render_cap_t(m, "CapInh:\t", &cap_inheritable);
render_cap_t(m, "CapPrm:\t", &cap_permitted);
render_cap_t(m, "CapEff:\t", &cap_effective);
render_cap_t(m, "CapBnd:\t", &cap_bset);
}
+static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+{
+#ifdef CONFIG_SECCOMP
+ seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
+#endif
+}
+
static inline void task_context_switch_counts(struct seq_file *m,
struct task_struct *p)
{
@@ -360,6 +376,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
}
task_sig(m, task);
task_cap(m, task);
+ task_seccomp(m, task);
task_cpus_allowed(m, task);
cpuset_task_status_allowed(m, task);
task_context_switch_counts(m, task);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index aa63d25157b..9b43ff77a51 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -542,13 +542,6 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
- if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
- if (error)
- return error;
- }
-
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
@@ -2345,146 +2338,6 @@ static const struct file_operations proc_coredump_filter_operations = {
};
#endif
-/*
- * /proc/self:
- */
-static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
- int buflen)
-{
- struct pid_namespace *ns = dentry->d_sb->s_fs_info;
- pid_t tgid = task_tgid_nr_ns(current, ns);
- char tmp[PROC_NUMBUF];
- if (!tgid)
- return -ENOENT;
- sprintf(tmp, "%d", tgid);
- return vfs_readlink(dentry,buffer,buflen,tmp);
-}
-
-static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
- struct pid_namespace *ns = dentry->d_sb->s_fs_info;
- pid_t tgid = task_tgid_nr_ns(current, ns);
- char *name = ERR_PTR(-ENOENT);
- if (tgid) {
- /* 11 for max length of signed int in decimal + NULL term */
- name = kmalloc(12, GFP_KERNEL);
- if (!name)
- name = ERR_PTR(-ENOMEM);
- else
- sprintf(name, "%d", tgid);
- }
- nd_set_link(nd, name);
- return NULL;
-}
-
-static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
- void *cookie)
-{
- char *s = nd_get_link(nd);
- if (!IS_ERR(s))
- kfree(s);
-}
-
-static const struct inode_operations proc_self_inode_operations = {
- .readlink = proc_self_readlink,
- .follow_link = proc_self_follow_link,
- .put_link = proc_self_put_link,
-};
-
-/*
- * proc base
- *
- * These are the directory entries in the root directory of /proc
- * that properly belong to the /proc filesystem, as they describe
- * describe something that is process related.
- */
-static const struct pid_entry proc_base_stuff[] = {
- NOD("self", S_IFLNK|S_IRWXUGO,
- &proc_self_inode_operations, NULL, {}),
-};
-
-static struct dentry *proc_base_instantiate(struct inode *dir,
- struct dentry *dentry, struct task_struct *task, const void *ptr)
-{
- const struct pid_entry *p = ptr;
- struct inode *inode;
- struct proc_inode *ei;
- struct dentry *error;
-
- /* Allocate the inode */
- error = ERR_PTR(-ENOMEM);
- inode = new_inode(dir->i_sb);
- if (!inode)
- goto out;
-
- /* Initialize the inode */
- ei = PROC_I(inode);
- inode->i_ino = get_next_ino();
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
-
- /*
- * grab the reference to the task.
- */
- ei->pid = get_task_pid(task, PIDTYPE_PID);
- if (!ei->pid)
- goto out_iput;
-
- inode->i_mode = p->mode;
- if (S_ISDIR(inode->i_mode))
- set_nlink(inode, 2);
- if (S_ISLNK(inode->i_mode))
- inode->i_size = 64;
- if (p->iop)
- inode->i_op = p->iop;
- if (p->fop)
- inode->i_fop = p->fop;
- ei->op = p->op;
- d_add(dentry, inode);
- error = NULL;
-out:
- return error;
-out_iput:
- iput(inode);
- goto out;
-}
-
-static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
-{
- struct dentry *error;
- struct task_struct *task = get_proc_task(dir);
- const struct pid_entry *p, *last;
-
- error = ERR_PTR(-ENOENT);
-
- if (!task)
- goto out_no_task;
-
- /* Lookup the directory entry */
- last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1];
- for (p = proc_base_stuff; p <= last; p++) {
- if (p->len != dentry->d_name.len)
- continue;
- if (!memcmp(dentry->d_name.name, p->name, p->len))
- break;
- }
- if (p > last)
- goto out;
-
- error = proc_base_instantiate(dir, dentry, task, p);
-
-out:
- put_task_struct(task);
-out_no_task:
- return error;
-}
-
-static int proc_base_fill_cache(struct file *filp, void *dirent,
- filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
-{
- return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
- proc_base_instantiate, task, p);
-}
-
#ifdef CONFIG_TASK_IO_ACCOUNTING
static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
{
@@ -2839,10 +2692,6 @@ void proc_flush_task(struct task_struct *task)
proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
tgid->numbers[i].nr);
}
-
- upid = &pid->numbers[pid->level];
- if (upid->nr == 1)
- pid_ns_release_proc(upid->ns);
}
static struct dentry *proc_pid_instantiate(struct inode *dir,
@@ -2876,15 +2725,11 @@ out:
struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
{
- struct dentry *result;
+ struct dentry *result = NULL;
struct task_struct *task;
unsigned tgid;
struct pid_namespace *ns;
- result = proc_base_lookup(dir, dentry);
- if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
- goto out;
-
tgid = name_to_int(dentry);
if (tgid == ~0U)
goto out;
@@ -2947,7 +2792,7 @@ retry:
return iter;
}
-#define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
+#define TGID_OFFSET (FIRST_PROCESS_ENTRY)
static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
struct tgid_iter iter)
@@ -2967,25 +2812,12 @@ static int fake_filldir(void *buf, const char *name, int namelen,
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int nr;
- struct task_struct *reaper;
struct tgid_iter iter;
struct pid_namespace *ns;
filldir_t __filldir;
if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
- goto out_no_task;
- nr = filp->f_pos - FIRST_PROCESS_ENTRY;
-
- reaper = get_proc_task(filp->f_path.dentry->d_inode);
- if (!reaper)
- goto out_no_task;
-
- for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) {
- const struct pid_entry *p = &proc_base_stuff[nr];
- if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0)
- goto out;
- }
+ goto out;
ns = filp->f_dentry->d_sb->s_fs_info;
iter.task = NULL;
@@ -3006,8 +2838,6 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
}
filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
out:
- put_task_struct(reaper);
-out_no_task:
return 0;
}
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index f28a875f877..d7a4a28ef63 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -50,6 +50,8 @@ static int seq_show(struct seq_file *m, void *v)
if (!ret) {
seq_printf(m, "pos:\t%lli\nflags:\t0%o\n",
(long long)file->f_pos, f_flags);
+ if (file->f_op->show_fdinfo)
+ ret = file->f_op->show_fdinfo(m, file);
fput(file);
}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 0d80cef4cfb..76ddae83daa 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -261,16 +261,9 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
if (error)
return error;
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, iattr->ia_size);
- if (error)
- return error;
- }
-
setattr_copy(inode, iattr);
mark_inode_dirty(inode);
-
+
de->uid = inode->i_uid;
de->gid = inode->i_gid;
de->mode = inode->i_mode;
@@ -350,37 +343,39 @@ static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
* Return an inode number between PROC_DYNAMIC_FIRST and
* 0xffffffff, or zero on failure.
*/
-static unsigned int get_inode_number(void)
+int proc_alloc_inum(unsigned int *inum)
{
unsigned int i;
int error;
retry:
- if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0)
- return 0;
+ if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
+ return -ENOMEM;
- spin_lock(&proc_inum_lock);
+ spin_lock_irq(&proc_inum_lock);
error = ida_get_new(&proc_inum_ida, &i);
- spin_unlock(&proc_inum_lock);
+ spin_unlock_irq(&proc_inum_lock);
if (error == -EAGAIN)
goto retry;
else if (error)
- return 0;
+ return error;
if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
- spin_lock(&proc_inum_lock);
+ spin_lock_irq(&proc_inum_lock);
ida_remove(&proc_inum_ida, i);
- spin_unlock(&proc_inum_lock);
- return 0;
+ spin_unlock_irq(&proc_inum_lock);
+ return -ENOSPC;
}
- return PROC_DYNAMIC_FIRST + i;
+ *inum = PROC_DYNAMIC_FIRST + i;
+ return 0;
}
-static void release_inode_number(unsigned int inum)
+void proc_free_inum(unsigned int inum)
{
- spin_lock(&proc_inum_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&proc_inum_lock, flags);
ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
- spin_unlock(&proc_inum_lock);
+ spin_unlock_irqrestore(&proc_inum_lock, flags);
}
static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
@@ -554,13 +549,12 @@ static const struct inode_operations proc_dir_inode_operations = {
static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
{
- unsigned int i;
struct proc_dir_entry *tmp;
+ int ret;
- i = get_inode_number();
- if (i == 0)
- return -EAGAIN;
- dp->low_ino = i;
+ ret = proc_alloc_inum(&dp->low_ino);
+ if (ret)
+ return ret;
if (S_ISDIR(dp->mode)) {
if (dp->proc_iops == NULL) {
@@ -764,7 +758,7 @@ EXPORT_SYMBOL(proc_create_data);
static void free_proc_entry(struct proc_dir_entry *de)
{
- release_inode_number(de->low_ino);
+ proc_free_inum(de->low_ino);
if (S_ISLNK(de->mode))
kfree(de->data);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 3b22bbdee9e..439ae688650 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -31,6 +31,7 @@ static void proc_evict_inode(struct inode *inode)
struct proc_dir_entry *de;
struct ctl_table_header *head;
const struct proc_ns_operations *ns_ops;
+ void *ns;
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
@@ -49,8 +50,9 @@ static void proc_evict_inode(struct inode *inode)
}
/* Release any associated namespace */
ns_ops = PROC_I(inode)->ns_ops;
- if (ns_ops && ns_ops->put)
- ns_ops->put(PROC_I(inode)->ns);
+ ns = PROC_I(inode)->ns;
+ if (ns_ops && ns)
+ ns_ops->put(ns);
}
static struct kmem_cache * proc_inode_cachep;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 43973b084ab..252544c0520 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -15,6 +15,7 @@ struct ctl_table_header;
struct mempolicy;
extern struct proc_dir_entry proc_root;
+extern void proc_self_init(void);
#ifdef CONFIG_PROC_SYSCTL
extern int proc_sys_init(void);
extern void sysctl_head_put(struct ctl_table_header *head);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 86c67eee439..e96d4f18ca3 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -249,7 +249,7 @@ static int kcore_update_ram(void)
/* Not inialized....update now */
/* find out "max pfn" */
end_pfn = 0;
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
unsigned long node_end;
node_end = NODE_DATA(nid)->node_start_pfn +
NODE_DATA(nid)->node_spanned_pages;
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index b178ed733c3..b7a47196c8c 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -11,6 +11,7 @@
#include <net/net_namespace.h>
#include <linux/ipc_namespace.h>
#include <linux/pid_namespace.h>
+#include <linux/user_namespace.h>
#include "internal.h"
@@ -24,12 +25,168 @@ static const struct proc_ns_operations *ns_entries[] = {
#ifdef CONFIG_IPC_NS
&ipcns_operations,
#endif
+#ifdef CONFIG_PID_NS
+ &pidns_operations,
+#endif
+#ifdef CONFIG_USER_NS
+ &userns_operations,
+#endif
+ &mntns_operations,
};
static const struct file_operations ns_file_operations = {
.llseek = no_llseek,
};
+static const struct inode_operations ns_inode_operations = {
+ .setattr = proc_setattr,
+};
+
+static int ns_delete_dentry(const struct dentry *dentry)
+{
+ /* Don't cache namespace inodes when not in use */
+ return 1;
+}
+
+static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
+
+ return dynamic_dname(dentry, buffer, buflen, "%s:[%lu]",
+ ns_ops->name, inode->i_ino);
+}
+
+const struct dentry_operations ns_dentry_operations =
+{
+ .d_delete = ns_delete_dentry,
+ .d_dname = ns_dname,
+};
+
+static struct dentry *proc_ns_get_dentry(struct super_block *sb,
+ struct task_struct *task, const struct proc_ns_operations *ns_ops)
+{
+ struct dentry *dentry, *result;
+ struct inode *inode;
+ struct proc_inode *ei;
+ struct qstr qname = { .name = "", };
+ void *ns;
+
+ ns = ns_ops->get(task);
+ if (!ns)
+ return ERR_PTR(-ENOENT);
+
+ dentry = d_alloc_pseudo(sb, &qname);
+ if (!dentry) {
+ ns_ops->put(ns);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ inode = iget_locked(sb, ns_ops->inum(ns));
+ if (!inode) {
+ dput(dentry);
+ ns_ops->put(ns);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ei = PROC_I(inode);
+ if (inode->i_state & I_NEW) {
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_op = &ns_inode_operations;
+ inode->i_mode = S_IFREG | S_IRUGO;
+ inode->i_fop = &ns_file_operations;
+ ei->ns_ops = ns_ops;
+ ei->ns = ns;
+ unlock_new_inode(inode);
+ } else {
+ ns_ops->put(ns);
+ }
+
+ d_set_d_op(dentry, &ns_dentry_operations);
+ result = d_instantiate_unique(dentry, inode);
+ if (result) {
+ dput(dentry);
+ dentry = result;
+ }
+
+ return dentry;
+}
+
+static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct proc_inode *ei = PROC_I(inode);
+ struct task_struct *task;
+ struct dentry *ns_dentry;
+ void *error = ERR_PTR(-EACCES);
+
+ task = get_proc_task(inode);
+ if (!task)
+ goto out;
+
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ goto out_put_task;
+
+ ns_dentry = proc_ns_get_dentry(sb, task, ei->ns_ops);
+ if (IS_ERR(ns_dentry)) {
+ error = ERR_CAST(ns_dentry);
+ goto out_put_task;
+ }
+
+ dput(nd->path.dentry);
+ nd->path.dentry = ns_dentry;
+ error = NULL;
+
+out_put_task:
+ put_task_struct(task);
+out:
+ return error;
+}
+
+static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ struct proc_inode *ei = PROC_I(inode);
+ const struct proc_ns_operations *ns_ops = ei->ns_ops;
+ struct task_struct *task;
+ void *ns;
+ char name[50];
+ int len = -EACCES;
+
+ task = get_proc_task(inode);
+ if (!task)
+ goto out;
+
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ goto out_put_task;
+
+ len = -ENOENT;
+ ns = ns_ops->get(task);
+ if (!ns)
+ goto out_put_task;
+
+ snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns));
+ len = strlen(name);
+
+ if (len > buflen)
+ len = buflen;
+ if (copy_to_user(buffer, name, len))
+ len = -EFAULT;
+
+ ns_ops->put(ns);
+out_put_task:
+ put_task_struct(task);
+out:
+ return len;
+}
+
+static const struct inode_operations proc_ns_link_inode_operations = {
+ .readlink = proc_ns_readlink,
+ .follow_link = proc_ns_follow_link,
+ .setattr = proc_setattr,
+};
+
static struct dentry *proc_ns_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
@@ -37,21 +194,15 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
- void *ns;
inode = proc_pid_make_inode(dir->i_sb, task);
if (!inode)
goto out;
- ns = ns_ops->get(task);
- if (!ns)
- goto out_iput;
-
ei = PROC_I(inode);
- inode->i_mode = S_IFREG|S_IRUSR;
- inode->i_fop = &ns_file_operations;
- ei->ns_ops = ns_ops;
- ei->ns = ns;
+ inode->i_mode = S_IFLNK|S_IRWXUGO;
+ inode->i_op = &proc_ns_link_inode_operations;
+ ei->ns_ops = ns_ops;
d_set_d_op(dentry, &pid_dentry_operations);
d_add(dentry, inode);
@@ -60,9 +211,6 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
error = NULL;
out:
return error;
-out_iput:
- iput(inode);
- goto out;
}
static int proc_ns_fill_cache(struct file *filp, void *dirent,
@@ -89,10 +237,6 @@ static int proc_ns_dir_readdir(struct file *filp, void *dirent,
if (!task)
goto out_no_task;
- ret = -EPERM;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out;
-
ret = 0;
i = filp->f_pos;
switch (i) {
@@ -152,10 +296,6 @@ static struct dentry *proc_ns_dir_lookup(struct inode *dir,
if (!task)
goto out_no_task;
- error = ERR_PTR(-EPERM);
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- goto out;
-
last = &ns_entries[ARRAY_SIZE(ns_entries)];
for (entry = ns_entries; entry < last; entry++) {
if (strlen((*entry)->name) != len)
@@ -163,7 +303,6 @@ static struct dentry *proc_ns_dir_lookup(struct inode *dir,
if (!memcmp(dentry->d_name.name, (*entry)->name, len))
break;
}
- error = ERR_PTR(-ENOENT);
if (entry == last)
goto out;
@@ -198,3 +337,7 @@ out_invalid:
return ERR_PTR(-EINVAL);
}
+bool proc_ns_inode(struct inode *inode)
+{
+ return inode->i_fop == &ns_file_operations;
+}
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index df7dd08d439..de20ec480fa 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -195,11 +195,7 @@ void proc_device_tree_add_node(struct device_node *np,
set_node_proc_entry(np, de);
for (child = NULL; (child = of_get_next_child(np, child));) {
/* Use everything after the last slash, or the full name */
- p = strrchr(child->full_name, '/');
- if (!p)
- p = child->full_name;
- else
- ++p;
+ p = kbasename(child->full_name);
if (duplicate_name(de, p))
p = fixup_name(np, de, p);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 701580ddfcc..1827d88ad58 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -736,13 +736,6 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
- if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
- if (error)
- return error;
- }
-
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 9889a92d2e0..c6e9fac26ba 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -100,14 +100,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
int err;
struct super_block *sb;
struct pid_namespace *ns;
- struct proc_inode *ei;
char *options;
if (flags & MS_KERNMOUNT) {
ns = (struct pid_namespace *)data;
options = NULL;
} else {
- ns = current->nsproxy->pid_ns;
+ ns = task_active_pid_ns(current);
options = data;
}
@@ -130,13 +129,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
sb->s_flags |= MS_ACTIVE;
}
- ei = PROC_I(sb->s_root->d_inode);
- if (!ei->pid) {
- rcu_read_lock();
- ei->pid = get_pid(find_pid_ns(1, ns));
- rcu_read_unlock();
- }
-
return dget(sb->s_root);
}
@@ -153,6 +145,7 @@ static struct file_system_type proc_fs_type = {
.name = "proc",
.mount = proc_mount,
.kill_sb = proc_kill_sb,
+ .fs_flags = FS_USERNS_MOUNT,
};
void __init proc_root_init(void)
@@ -163,12 +156,8 @@ void __init proc_root_init(void)
err = register_filesystem(&proc_fs_type);
if (err)
return;
- err = pid_ns_prepare_proc(&init_pid_ns);
- if (err) {
- unregister_filesystem(&proc_fs_type);
- return;
- }
+ proc_self_init();
proc_symlink("mounts", NULL, "self/mounts");
proc_net_init();
diff --git a/fs/proc/self.c b/fs/proc/self.c
new file mode 100644
index 00000000000..aa5cc3bff14
--- /dev/null
+++ b/fs/proc/self.c
@@ -0,0 +1,59 @@
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+
+/*
+ * /proc/self:
+ */
+static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
+ int buflen)
+{
+ struct pid_namespace *ns = dentry->d_sb->s_fs_info;
+ pid_t tgid = task_tgid_nr_ns(current, ns);
+ char tmp[PROC_NUMBUF];
+ if (!tgid)
+ return -ENOENT;
+ sprintf(tmp, "%d", tgid);
+ return vfs_readlink(dentry,buffer,buflen,tmp);
+}
+
+static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct pid_namespace *ns = dentry->d_sb->s_fs_info;
+ pid_t tgid = task_tgid_nr_ns(current, ns);
+ char *name = ERR_PTR(-ENOENT);
+ if (tgid) {
+ /* 11 for max length of signed int in decimal + NULL term */
+ name = kmalloc(12, GFP_KERNEL);
+ if (!name)
+ name = ERR_PTR(-ENOMEM);
+ else
+ sprintf(name, "%d", tgid);
+ }
+ nd_set_link(nd, name);
+ return NULL;
+}
+
+static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ kfree(s);
+}
+
+static const struct inode_operations proc_self_inode_operations = {
+ .readlink = proc_self_readlink,
+ .follow_link = proc_self_follow_link,
+ .put_link = proc_self_put_link,
+};
+
+void __init proc_self_init(void)
+{
+ struct proc_dir_entry *proc_self_symlink;
+ mode_t mode;
+
+ mode = S_IFLNK | S_IRWXUGO;
+ proc_self_symlink = proc_create("self", mode, NULL, NULL );
+ proc_self_symlink->proc_iops = &proc_self_inode_operations;
+}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 90c63f9392a..ca5ce7f9f80 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -526,6 +526,57 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return 0;
}
+static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
+{
+ /*
+ * Don't forget to update Documentation/ on changes.
+ */
+ static const char mnemonics[BITS_PER_LONG][2] = {
+ /*
+ * In case if we meet a flag we don't know about.
+ */
+ [0 ... (BITS_PER_LONG-1)] = "??",
+
+ [ilog2(VM_READ)] = "rd",
+ [ilog2(VM_WRITE)] = "wr",
+ [ilog2(VM_EXEC)] = "ex",
+ [ilog2(VM_SHARED)] = "sh",
+ [ilog2(VM_MAYREAD)] = "mr",
+ [ilog2(VM_MAYWRITE)] = "mw",
+ [ilog2(VM_MAYEXEC)] = "me",
+ [ilog2(VM_MAYSHARE)] = "ms",
+ [ilog2(VM_GROWSDOWN)] = "gd",
+ [ilog2(VM_PFNMAP)] = "pf",
+ [ilog2(VM_DENYWRITE)] = "dw",
+ [ilog2(VM_LOCKED)] = "lo",
+ [ilog2(VM_IO)] = "io",
+ [ilog2(VM_SEQ_READ)] = "sr",
+ [ilog2(VM_RAND_READ)] = "rr",
+ [ilog2(VM_DONTCOPY)] = "dc",
+ [ilog2(VM_DONTEXPAND)] = "de",
+ [ilog2(VM_ACCOUNT)] = "ac",
+ [ilog2(VM_NORESERVE)] = "nr",
+ [ilog2(VM_HUGETLB)] = "ht",
+ [ilog2(VM_NONLINEAR)] = "nl",
+ [ilog2(VM_ARCH_1)] = "ar",
+ [ilog2(VM_DONTDUMP)] = "dd",
+ [ilog2(VM_MIXEDMAP)] = "mm",
+ [ilog2(VM_HUGEPAGE)] = "hg",
+ [ilog2(VM_NOHUGEPAGE)] = "nh",
+ [ilog2(VM_MERGEABLE)] = "mg",
+ };
+ size_t i;
+
+ seq_puts(m, "VmFlags: ");
+ for (i = 0; i < BITS_PER_LONG; i++) {
+ if (vma->vm_flags & (1UL << i)) {
+ seq_printf(m, "%c%c ",
+ mnemonics[i][0], mnemonics[i][1]);
+ }
+ }
+ seq_putc(m, '\n');
+}
+
static int show_smap(struct seq_file *m, void *v, int is_pid)
{
struct proc_maps_private *priv = m->private;
@@ -581,6 +632,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
seq_printf(m, "Nonlinear: %8lu kB\n",
mss.nonlinear >> 10);
+ show_smap_vma_flags(m, vma);
+
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
@@ -643,7 +696,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl;
struct page *page;
- split_huge_page_pmd(walk->mm, pmd);
+ split_huge_page_pmd(vma, addr, pmd);
if (pmd_trans_unstable(pmd))
return 0;
@@ -1126,7 +1179,7 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
return NULL;
nid = page_to_nid(page);
- if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
+ if (!node_isset(nid, node_states[N_MEMORY]))
return NULL;
return page;
@@ -1225,7 +1278,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
walk.mm = mm;
pol = get_vma_policy(task, vma, vma->vm_start);
- mpol_to_str(buffer, sizeof(buffer), pol, 0);
+ mpol_to_str(buffer, sizeof(buffer), pol);
mpol_cond_put(pol);
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
@@ -1279,7 +1332,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
if (md->writeback)
seq_printf(m, " writeback=%lu", md->writeback);
- for_each_node_state(n, N_HIGH_MEMORY)
+ for_each_node_state(n, N_MEMORY)
if (md->node[n])
seq_printf(m, " N%d=%lu", n, md->node[n]);
out:
diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
index 2d57e1ac011..43b12807a51 100644
--- a/fs/pstore/ftrace.c
+++ b/fs/pstore/ftrace.c
@@ -28,7 +28,9 @@
#include "internal.h"
static void notrace pstore_ftrace_call(unsigned long ip,
- unsigned long parent_ip)
+ unsigned long parent_ip,
+ struct ftrace_ops *op,
+ struct pt_regs *regs)
{
unsigned long flags;
struct pstore_ftrace_record rec = {};
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index ed1d8c7212d..67de74ca85f 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -151,13 +151,13 @@ static int pstore_file_open(struct inode *inode, struct file *file)
return 0;
}
-static loff_t pstore_file_llseek(struct file *file, loff_t off, int origin)
+static loff_t pstore_file_llseek(struct file *file, loff_t off, int whence)
{
struct seq_file *sf = file->private_data;
if (sf->op)
- return seq_lseek(file, off, origin);
- return default_llseek(file, off, origin);
+ return seq_lseek(file, off, whence);
+ return default_llseek(file, off, whence);
}
static const struct file_operations pstore_file_operations = {
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 2bfa36e0ffe..f883e7e7430 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -188,7 +188,7 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
struct pstore_info *psi)
{
struct ramoops_context *cxt = psi->data;
- struct persistent_ram_zone *prz = cxt->przs[cxt->dump_write_cnt];
+ struct persistent_ram_zone *prz;
size_t hlen;
if (type == PSTORE_TYPE_CONSOLE) {
@@ -225,6 +225,11 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
if (part != 1)
return -ENOSPC;
+ if (!cxt->przs)
+ return -ENOSPC;
+
+ prz = cxt->przs[cxt->dump_write_cnt];
+
hlen = ramoops_write_kmsg_hdr(prz);
if (size + hlen > prz->buffer_size)
size = prz->buffer_size - hlen;
@@ -286,8 +291,9 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
kfree(cxt->przs);
}
-static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
- phys_addr_t *paddr, size_t dump_mem_sz)
+static int __devinit ramoops_init_przs(struct device *dev,
+ struct ramoops_context *cxt,
+ phys_addr_t *paddr, size_t dump_mem_sz)
{
int err = -ENOMEM;
int i;
@@ -295,6 +301,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
if (!cxt->record_size)
return 0;
+ if (*paddr + dump_mem_sz - cxt->phys_addr > cxt->size) {
+ dev_err(dev, "no room for dumps\n");
+ return -ENOMEM;
+ }
+
cxt->max_dump_cnt = dump_mem_sz / cxt->record_size;
if (!cxt->max_dump_cnt)
return -ENOMEM;
@@ -325,15 +336,20 @@ fail_prz:
return err;
}
-static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
- struct persistent_ram_zone **prz,
- phys_addr_t *paddr, size_t sz, u32 sig)
+static int __devinit ramoops_init_prz(struct device *dev,
+ struct ramoops_context *cxt,
+ struct persistent_ram_zone **prz,
+ phys_addr_t *paddr, size_t sz, u32 sig)
{
if (!sz)
return 0;
- if (*paddr + sz > *paddr + cxt->size)
+ if (*paddr + sz - cxt->phys_addr > cxt->size) {
+ dev_err(dev, "no room for mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n",
+ sz, (unsigned long long)*paddr,
+ cxt->size, (unsigned long long)cxt->phys_addr);
return -ENOMEM;
+ }
*prz = persistent_ram_new(*paddr, sz, sig, cxt->ecc_size);
if (IS_ERR(*prz)) {
@@ -373,10 +389,14 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
goto fail_out;
}
- pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
- pdata->record_size = rounddown_pow_of_two(pdata->record_size);
- pdata->console_size = rounddown_pow_of_two(pdata->console_size);
- pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+ if (!is_power_of_2(pdata->mem_size))
+ pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
+ if (!is_power_of_2(pdata->record_size))
+ pdata->record_size = rounddown_pow_of_two(pdata->record_size);
+ if (!is_power_of_2(pdata->console_size))
+ pdata->console_size = rounddown_pow_of_two(pdata->console_size);
+ if (!is_power_of_2(pdata->ftrace_size))
+ pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
cxt->dump_read_cnt = 0;
cxt->size = pdata->mem_size;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index af1661f7a54..c7314f1771f 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -307,6 +307,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
}
}
+#ifdef CONFIG_BLOCK
+
/* Return 1 if 'cmd' will block on frozen filesystem */
static int quotactl_cmd_write(int cmd)
{
@@ -322,6 +324,8 @@ static int quotactl_cmd_write(int cmd)
return 1;
}
+#endif /* CONFIG_BLOCK */
+
/*
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
diff --git a/fs/read_write.c b/fs/read_write.c
index d06534857e9..bb34af31528 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -54,7 +54,7 @@ static loff_t lseek_execute(struct file *file, struct inode *inode,
* generic_file_llseek_size - generic llseek implementation for regular files
* @file: file structure to seek on
* @offset: file offset to seek to
- * @origin: type of seek
+ * @whence: type of seek
* @size: max size of this file in file system
* @eof: offset used for SEEK_END position
*
@@ -67,12 +67,12 @@ static loff_t lseek_execute(struct file *file, struct inode *inode,
* read/writes behave like SEEK_SET against seeks.
*/
loff_t
-generic_file_llseek_size(struct file *file, loff_t offset, int origin,
+generic_file_llseek_size(struct file *file, loff_t offset, int whence,
loff_t maxsize, loff_t eof)
{
struct inode *inode = file->f_mapping->host;
- switch (origin) {
+ switch (whence) {
case SEEK_END:
offset += eof;
break;
@@ -122,17 +122,17 @@ EXPORT_SYMBOL(generic_file_llseek_size);
* generic_file_llseek - generic llseek implementation for regular files
* @file: file structure to seek on
* @offset: file offset to seek to
- * @origin: type of seek
+ * @whence: type of seek
*
* This is a generic implemenation of ->llseek useable for all normal local
* filesystems. It just updates the file offset to the value specified by
- * @offset and @origin under i_mutex.
+ * @offset and @whence under i_mutex.
*/
-loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
+loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
- return generic_file_llseek_size(file, offset, origin,
+ return generic_file_llseek_size(file, offset, whence,
inode->i_sb->s_maxbytes,
i_size_read(inode));
}
@@ -142,32 +142,32 @@ EXPORT_SYMBOL(generic_file_llseek);
* noop_llseek - No Operation Performed llseek implementation
* @file: file structure to seek on
* @offset: file offset to seek to
- * @origin: type of seek
+ * @whence: type of seek
*
* This is an implementation of ->llseek useable for the rare special case when
* userspace expects the seek to succeed but the (device) file is actually not
* able to perform the seek. In this case you use noop_llseek() instead of
* falling back to the default implementation of ->llseek.
*/
-loff_t noop_llseek(struct file *file, loff_t offset, int origin)
+loff_t noop_llseek(struct file *file, loff_t offset, int whence)
{
return file->f_pos;
}
EXPORT_SYMBOL(noop_llseek);
-loff_t no_llseek(struct file *file, loff_t offset, int origin)
+loff_t no_llseek(struct file *file, loff_t offset, int whence)
{
return -ESPIPE;
}
EXPORT_SYMBOL(no_llseek);
-loff_t default_llseek(struct file *file, loff_t offset, int origin)
+loff_t default_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_path.dentry->d_inode;
loff_t retval;
mutex_lock(&inode->i_mutex);
- switch (origin) {
+ switch (whence) {
case SEEK_END:
offset += i_size_read(inode);
break;
@@ -216,7 +216,7 @@ out:
}
EXPORT_SYMBOL(default_llseek);
-loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
+loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
{
loff_t (*fn)(struct file *, loff_t, int);
@@ -225,11 +225,11 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
if (file->f_op && file->f_op->llseek)
fn = file->f_op->llseek;
}
- return fn(file, offset, origin);
+ return fn(file, offset, whence);
}
EXPORT_SYMBOL(vfs_llseek);
-SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
+SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
{
off_t retval;
struct fd f = fdget(fd);
@@ -237,8 +237,8 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
return -EBADF;
retval = -EINVAL;
- if (origin <= SEEK_MAX) {
- loff_t res = vfs_llseek(f.file, offset, origin);
+ if (whence <= SEEK_MAX) {
+ loff_t res = vfs_llseek(f.file, offset, whence);
retval = res;
if (res != (loff_t)retval)
retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
@@ -250,7 +250,7 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
#ifdef __ARCH_WANT_SYS_LLSEEK
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, loff_t __user *, result,
- unsigned int, origin)
+ unsigned int, whence)
{
int retval;
struct fd f = fdget(fd);
@@ -260,11 +260,11 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
return -EBADF;
retval = -EINVAL;
- if (origin > SEEK_MAX)
+ if (whence > SEEK_MAX)
goto out_putf;
offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
- origin);
+ whence);
retval = (int)offset;
if (offset >= 0) {
@@ -935,6 +935,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
if (retval > 0) {
add_rchar(current, retval);
add_wchar(current, retval);
+ fsnotify_access(in.file);
+ fsnotify_modify(out.file);
}
inc_syscr(current);
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 8375c922c0d..50302d6f889 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -126,7 +126,7 @@ static int reiserfs_file_open(struct inode *inode, struct file *file)
return err;
}
-static void reiserfs_vfs_truncate_file(struct inode *inode)
+void reiserfs_vfs_truncate_file(struct inode *inode)
{
mutex_lock(&(REISERFS_I(inode)->tailpack));
reiserfs_truncate_file(inode, 1);
@@ -312,7 +312,6 @@ const struct file_operations reiserfs_file_operations = {
};
const struct inode_operations reiserfs_file_inode_operations = {
- .truncate = reiserfs_vfs_truncate_file,
.setattr = reiserfs_setattr,
.setxattr = reiserfs_setxattr,
.getxattr = reiserfs_getxattr,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index d83736fbc26..95d7680ead4 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3085,8 +3085,10 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
- if (end > isize)
- vmtruncate(inode, isize);
+ if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
+ truncate_setsize(inode, isize);
+ reiserfs_vfs_truncate_file(inode);
+ }
}
return ret;
@@ -3200,8 +3202,13 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
*/
reiserfs_write_unlock_once(inode->i_sb, depth);
if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size != i_size_read(inode))
- error = vmtruncate(inode, attr->ia_size);
+ attr->ia_size != i_size_read(inode)) {
+ error = inode_newsize_ok(inode, attr->ia_size);
+ if (!error) {
+ truncate_setsize(inode, attr->ia_size);
+ reiserfs_vfs_truncate_file(inode);
+ }
+ }
if (!error) {
setattr_copy(inode, attr);
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 33215f57ea0..157e474ab30 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -2455,6 +2455,7 @@ struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
*,
int count);
int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
+void reiserfs_vfs_truncate_file(struct inode *inode);
int reiserfs_commit_page(struct inode *inode, struct page *page,
unsigned from, unsigned to);
void reiserfs_flush_old_commits(struct super_block *);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 99dffab4c4e..9d863fb501f 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -300,14 +300,14 @@ EXPORT_SYMBOL(seq_read);
*
* Ready-made ->f_op->llseek()
*/
-loff_t seq_lseek(struct file *file, loff_t offset, int origin)
+loff_t seq_lseek(struct file *file, loff_t offset, int whence)
{
struct seq_file *m = file->private_data;
loff_t retval = -EINVAL;
mutex_lock(&m->lock);
m->version = file->f_version;
- switch (origin) {
+ switch (whence) {
case 1:
offset += file->f_pos;
case 0:
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 8bee4e57091..b5348696173 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -29,6 +29,7 @@
#include <linux/anon_inodes.h>
#include <linux/signalfd.h>
#include <linux/syscalls.h>
+#include <linux/proc_fs.h>
void signalfd_cleanup(struct sighand_struct *sighand)
{
@@ -227,7 +228,24 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
return total ? total: ret;
}
+#ifdef CONFIG_PROC_FS
+static int signalfd_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct signalfd_ctx *ctx = f->private_data;
+ sigset_t sigmask;
+
+ sigmask = ctx->sigmask;
+ signotset(&sigmask);
+ render_sigset_t(m, "sigmask:\t", &sigmask);
+
+ return 0;
+}
+#endif
+
static const struct file_operations signalfd_fops = {
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = signalfd_show_fdinfo,
+#endif
.release = signalfd_release,
.poll = signalfd_poll,
.read = signalfd_read,
diff --git a/fs/stat.c b/fs/stat.c
index eae494630a3..14f45459c83 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -74,7 +74,7 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
{
struct path path;
int error = -EINVAL;
- int lookup_flags = 0;
+ unsigned int lookup_flags = 0;
if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
AT_EMPTY_PATH)) != 0)
@@ -84,13 +84,17 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
lookup_flags |= LOOKUP_FOLLOW;
if (flag & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
-
+retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
error = vfs_getattr(path.mnt, path.dentry, stat);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
out:
return error;
}
@@ -296,11 +300,13 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
struct path path;
int error;
int empty = 0;
+ unsigned int lookup_flags = LOOKUP_EMPTY;
if (bufsiz <= 0)
return -EINVAL;
- error = user_path_at_empty(dfd, pathname, LOOKUP_EMPTY, &path, &empty);
+retry:
+ error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
if (!error) {
struct inode *inode = path.dentry->d_inode;
@@ -314,6 +320,10 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
}
}
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
}
return error;
}
diff --git a/fs/statfs.c b/fs/statfs.c
index f8e832e6f0a..c219e733f55 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -77,10 +77,17 @@ EXPORT_SYMBOL(vfs_statfs);
int user_statfs(const char __user *pathname, struct kstatfs *st)
{
struct path path;
- int error = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
+ int error;
+ unsigned int lookup_flags = LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (!error) {
error = vfs_statfs(&path, st);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
}
return error;
}
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 71eb7e25392..db940a9be04 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -149,6 +149,7 @@ static struct file_system_type sysfs_fs_type = {
.name = "sysfs",
.mount = sysfs_mount,
.kill_sb = sysfs_kill_sb,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init sysfs_init(void)
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index 0a65939508e..9d4dc683179 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -41,9 +41,11 @@ static int sysv_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
+ error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
+ truncate_setsize(inode, attr->ia_size);
+ sysv_truncate(inode);
}
setattr_copy(inode, attr);
@@ -52,7 +54,6 @@ static int sysv_setattr(struct dentry *dentry, struct iattr *attr)
}
const struct inode_operations sysv_file_inode_operations = {
- .truncate = sysv_truncate,
.setattr = sysv_setattr,
.getattr = sysv_getattr,
};
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 90b54b43878..c1a591a4725 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -464,6 +464,16 @@ int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
return __block_write_begin(page, pos, len, get_block);
}
+static void sysv_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ truncate_pagecache(inode, to, inode->i_size);
+ sysv_truncate(inode);
+ }
+}
+
static int sysv_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -471,11 +481,8 @@ static int sysv_write_begin(struct file *file, struct address_space *mapping,
int ret;
ret = block_write_begin(mapping, pos, len, flags, pagep, get_block);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ sysv_write_failed(mapping, pos + len);
return ret;
}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 62911637e12..12817ffc734 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2560,7 +2560,7 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
static int corrupt_data(const struct ubifs_info *c, const void *buf,
unsigned int len)
{
- unsigned int from, to, i, ffs = chance(1, 2);
+ unsigned int from, to, ffs = chance(1, 2);
unsigned char *p = (void *)buf;
from = random32() % (len + 1);
@@ -2571,11 +2571,9 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
ffs ? "0xFFs" : "random data");
if (ffs)
- for (i = from; i < to; i++)
- p[i] = 0xFF;
+ memset(p + from, 0xFF, to - from);
else
- for (i = from; i < to; i++)
- p[i] = random32() % 0x100;
+ prandom_bytes(p + from, to - from);
return to;
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index e271fba1651..8a574776a49 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -453,11 +453,11 @@ out:
}
/* If a directory is seeked, we have to free saved readdir() state */
-static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
+static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence)
{
kfree(file->private_data);
file->private_data = NULL;
- return generic_file_llseek(file, offset, origin);
+ return generic_file_llseek(file, offset, whence);
}
/* Free saved readdir() state when the directory is closed */
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index df88b957ccf..cbae1ed0b7c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -587,7 +587,6 @@ out:
static sector_t inode_getblk(struct inode *inode, sector_t block,
int *err, int *new)
{
- static sector_t last_block;
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
struct extent_position prev_epos, cur_epos, next_epos;
int count = 0, startnum = 0, endnum = 0;
@@ -601,6 +600,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
struct udf_inode_info *iinfo = UDF_I(inode);
int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
int lastblock = 0;
+ bool isBeyondEOF;
*err = 0;
*new = 0;
@@ -676,11 +676,10 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
return newblock;
}
- last_block = block;
/* Are we beyond EOF? */
if (etype == -1) {
int ret;
-
+ isBeyondEOF = 1;
if (count) {
if (c)
laarr[0] = laarr[1];
@@ -718,11 +717,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
memset(&laarr[c].extLocation, 0x00,
sizeof(struct kernel_lb_addr));
count++;
- endnum++;
}
endnum = c + 1;
lastblock = 1;
} else {
+ isBeyondEOF = 0;
endnum = startnum = ((count > 2) ? 2 : count);
/* if the current extent is in position 0,
@@ -765,10 +764,13 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
goal, err);
if (!newblocknum) {
brelse(prev_epos.bh);
+ brelse(cur_epos.bh);
+ brelse(next_epos.bh);
*err = -ENOSPC;
return 0;
}
- iinfo->i_lenExtents += inode->i_sb->s_blocksize;
+ if (isBeyondEOF)
+ iinfo->i_lenExtents += inode->i_sb->s_blocksize;
}
/* if the extent the requsted block is located in contains multiple
@@ -795,6 +797,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
brelse(prev_epos.bh);
+ brelse(cur_epos.bh);
+ brelse(next_epos.bh);
newblock = udf_get_pblock(inode->i_sb, newblocknum,
iinfo->i_location.partitionReferenceNum, 0);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index eb6d0b7dc87..ff24e4449ec 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -526,6 +526,14 @@ int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
return __block_write_begin(page, pos, len, ufs_getfrag_block);
}
+static void ufs_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size)
+ truncate_pagecache(inode, to, inode->i_size);
+}
+
static int ufs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -534,11 +542,8 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping,
ret = block_write_begin(mapping, pos, len, flags, pagep,
ufs_getfrag_block);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
+ if (unlikely(ret))
+ ufs_write_failed(mapping, pos + len);
return ret;
}
diff --git a/fs/utimes.c b/fs/utimes.c
index bb0696a4173..f4fb7eca10e 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -158,13 +158,17 @@ long do_utimes(int dfd, const char __user *filename, struct timespec *times,
if (!(flags & AT_SYMLINK_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
-
+retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
error = utimes_common(&path, times);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
}
out:
diff --git a/fs/xattr.c b/fs/xattr.c
index e21c119f4f9..3377dff1840 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -370,8 +370,9 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
{
struct path path;
int error;
-
- error = user_path(pathname, &path);
+ unsigned int lookup_flags = LOOKUP_FOLLOW;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = mnt_want_write(path.mnt);
@@ -380,6 +381,10 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
mnt_drop_write(path.mnt);
}
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -389,8 +394,9 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
{
struct path path;
int error;
-
- error = user_lpath(pathname, &path);
+ unsigned int lookup_flags = 0;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = mnt_want_write(path.mnt);
@@ -399,6 +405,10 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
mnt_drop_write(path.mnt);
}
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -476,12 +486,17 @@ SYSCALL_DEFINE4(getxattr, const char __user *, pathname,
{
struct path path;
ssize_t error;
-
- error = user_path(pathname, &path);
+ unsigned int lookup_flags = LOOKUP_FOLLOW;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = getxattr(path.dentry, name, value, size);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -490,12 +505,17 @@ SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
{
struct path path;
ssize_t error;
-
- error = user_lpath(pathname, &path);
+ unsigned int lookup_flags = 0;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = getxattr(path.dentry, name, value, size);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -556,12 +576,17 @@ SYSCALL_DEFINE3(listxattr, const char __user *, pathname, char __user *, list,
{
struct path path;
ssize_t error;
-
- error = user_path(pathname, &path);
+ unsigned int lookup_flags = LOOKUP_FOLLOW;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = listxattr(path.dentry, list, size);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -570,12 +595,17 @@ SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
{
struct path path;
ssize_t error;
-
- error = user_lpath(pathname, &path);
+ unsigned int lookup_flags = 0;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = listxattr(path.dentry, list, size);
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -615,8 +645,9 @@ SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
{
struct path path;
int error;
-
- error = user_path(pathname, &path);
+ unsigned int lookup_flags = LOOKUP_FOLLOW;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = mnt_want_write(path.mnt);
@@ -625,6 +656,10 @@ SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
mnt_drop_write(path.mnt);
}
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
@@ -633,8 +668,9 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
{
struct path path;
int error;
-
- error = user_lpath(pathname, &path);
+ unsigned int lookup_flags = 0;
+retry:
+ error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = mnt_want_write(path.mnt);
@@ -643,6 +679,10 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
mnt_drop_write(path.mnt);
}
path_put(&path);
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
return error;
}
diff --git a/include/Kbuild b/include/Kbuild
index 83256b64166..1dfd33e8d43 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -1,8 +1,5 @@
# Top-level Makefile calls into asm-$(ARCH)
# List only non-arch directories below
-header-y += linux/
-header-y += sound/
-header-y += rdma/
header-y += video/
header-y += scsi/
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index bb145e4b935..8b1d7a6a969 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -92,8 +92,8 @@ int acpi_pci_link_free_irq(acpi_handle handle);
/* ACPI PCI Interrupt Routing (pci_irq.c) */
-int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus);
-void acpi_pci_irq_del_prt(struct pci_bus *bus);
+int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus);
+void acpi_pci_irq_del_prt(int segment, int bus);
/* ACPI PCI Device Binding (pci_bind.c) */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 448303bdb85..33bbbae4ddc 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -12,7 +12,6 @@
#define __ASM_GENERIC_IO_H
#include <asm/page.h> /* I/O is all done through memory accesses */
-#include <asm/cacheflush.h>
#include <linux/types.h>
#ifdef CONFIG_GENERIC_IOMAP
@@ -83,19 +82,25 @@ static inline void __raw_writel(u32 b, volatile void __iomem *addr)
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
#ifdef CONFIG_64BIT
+#ifndef __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
return *(const volatile u64 __force *) addr;
}
+#endif
+
#define readq(addr) __le64_to_cpu(__raw_readq(addr))
+#ifndef __raw_writeq
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
{
*(volatile u64 __force *) addr = b;
}
-#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
+#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
+#endif /* CONFIG_64BIT */
+
#ifndef PCI_IOBASE
#define PCI_IOBASE ((void __iomem *) 0)
#endif
@@ -148,7 +153,7 @@ static inline void insb(unsigned long addr, void *buffer, int count)
if (count) {
u8 *buf = buffer;
do {
- u8 x = inb(addr);
+ u8 x = __raw_readb(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
@@ -161,7 +166,7 @@ static inline void insw(unsigned long addr, void *buffer, int count)
if (count) {
u16 *buf = buffer;
do {
- u16 x = inw(addr);
+ u16 x = __raw_readw(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
@@ -174,7 +179,7 @@ static inline void insl(unsigned long addr, void *buffer, int count)
if (count) {
u32 *buf = buffer;
do {
- u32 x = inl(addr);
+ u32 x = __raw_readl(addr + PCI_IOBASE);
*buf++ = x;
} while (--count);
}
@@ -187,7 +192,7 @@ static inline void outsb(unsigned long addr, const void *buffer, int count)
if (count) {
const u8 *buf = buffer;
do {
- outb(*buf++, addr);
+ __raw_writeb(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
@@ -199,7 +204,7 @@ static inline void outsw(unsigned long addr, const void *buffer, int count)
if (count) {
const u16 *buf = buffer;
do {
- outw(*buf++, addr);
+ __raw_writew(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
@@ -211,42 +216,12 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
if (count) {
const u32 *buf = buffer;
do {
- outl(*buf++, addr);
+ __raw_writel(*buf++, addr + PCI_IOBASE);
} while (--count);
}
}
#endif
-static inline void readsl(const void __iomem *addr, void *buf, int len)
-{
- insl(addr - PCI_IOBASE, buf, len);
-}
-
-static inline void readsw(const void __iomem *addr, void *buf, int len)
-{
- insw(addr - PCI_IOBASE, buf, len);
-}
-
-static inline void readsb(const void __iomem *addr, void *buf, int len)
-{
- insb(addr - PCI_IOBASE, buf, len);
-}
-
-static inline void writesl(const void __iomem *addr, const void *buf, int len)
-{
- outsl(addr - PCI_IOBASE, buf, len);
-}
-
-static inline void writesw(const void __iomem *addr, const void *buf, int len)
-{
- outsw(addr - PCI_IOBASE, buf, len);
-}
-
-static inline void writesb(const void __iomem *addr, const void *buf, int len)
-{
- outsb(addr - PCI_IOBASE, buf, len);
-}
-
#ifndef CONFIG_GENERIC_IOMAP
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
@@ -286,15 +261,20 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
#ifndef CONFIG_GENERIC_IOMAP
struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+
+#ifndef pci_iounmap
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
}
+#endif
#endif /* CONFIG_GENERIC_IOMAP */
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
+#ifndef virt_to_phys
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
@@ -304,6 +284,7 @@ static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
+#endif
/*
* Change "struct page" to physical address.
@@ -363,9 +344,16 @@ static inline void *bus_to_virt(unsigned long address)
}
#endif
+#ifndef memset_io
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
+#endif
+
+#ifndef memcpy_fromio
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
+#endif
+#ifndef memcpy_toio
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
+#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h
index 4f4aa56d6b5..0ed3f1cfb85 100644
--- a/include/asm-generic/mmu.h
+++ b/include/asm-generic/mmu.h
@@ -7,8 +7,12 @@
*/
#ifndef __ASSEMBLY__
typedef struct {
- struct vm_list_struct *vmlist;
unsigned long end_brk;
+
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
} mm_context_t;
#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index b36ce40bd1c..701beab27aa 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -219,6 +219,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif
+#ifndef pte_accessible
+# define pte_accessible(pte) ((void)(pte),1)
+#endif
+
#ifndef flush_tlb_fix_spurious_fault
#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
#endif
@@ -449,6 +453,32 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size);
#endif
+#ifdef __HAVE_COLOR_ZERO_PAGE
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ return page_to_pfn(ZERO_PAGE(addr));
+}
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ return pfn == zero_pfn;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ extern unsigned long zero_pfn;
+ return zero_pfn;
+}
+#endif
+
#ifdef CONFIG_MMU
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
@@ -554,6 +584,112 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#endif
}
+#ifdef CONFIG_NUMA_BALANCING
+#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+/*
+ * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the
+ * same bit too). It's set only when _PAGE_PRESET is not set and it's
+ * never set if _PAGE_PRESENT is set.
+ *
+ * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
+ * fault triggers on those regions if pte/pmd_numa returns true
+ * (because _PAGE_PRESENT is not set).
+ */
+#ifndef pte_numa
+static inline int pte_numa(pte_t pte)
+{
+ return (pte_flags(pte) &
+ (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+#endif
+
+#ifndef pmd_numa
+static inline int pmd_numa(pmd_t pmd)
+{
+ return (pmd_flags(pmd) &
+ (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+#endif
+
+/*
+ * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically
+ * because they're called by the NUMA hinting minor page fault. If we
+ * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler
+ * would be forced to set it later while filling the TLB after we
+ * return to userland. That would trigger a second write to memory
+ * that we optimize away by setting _PAGE_ACCESSED here.
+ */
+#ifndef pte_mknonnuma
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+ pte = pte_clear_flags(pte, _PAGE_NUMA);
+ return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED);
+}
+#endif
+
+#ifndef pmd_mknonnuma
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+ pmd = pmd_clear_flags(pmd, _PAGE_NUMA);
+ return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED);
+}
+#endif
+
+#ifndef pte_mknuma
+static inline pte_t pte_mknuma(pte_t pte)
+{
+ pte = pte_set_flags(pte, _PAGE_NUMA);
+ return pte_clear_flags(pte, _PAGE_PRESENT);
+}
+#endif
+
+#ifndef pmd_mknuma
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+ pmd = pmd_set_flags(pmd, _PAGE_NUMA);
+ return pmd_clear_flags(pmd, _PAGE_PRESENT);
+}
+#endif
+#else
+extern int pte_numa(pte_t pte);
+extern int pmd_numa(pmd_t pmd);
+extern pte_t pte_mknonnuma(pte_t pte);
+extern pmd_t pmd_mknonnuma(pmd_t pmd);
+extern pte_t pte_mknuma(pte_t pte);
+extern pmd_t pmd_mknuma(pmd_t pmd);
+#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
+#else
+static inline int pmd_numa(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline int pte_numa(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_mknuma(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
#endif /* CONFIG_MMU */
#endif /* !__ASSEMBLY__ */
diff --git a/include/crypto/cast5.h b/include/crypto/cast5.h
index 586183a0406..14fbf39d638 100644
--- a/include/crypto/cast5.h
+++ b/include/crypto/cast5.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
+#include <crypto/cast_common.h>
#define CAST5_BLOCK_SIZE 8
#define CAST5_MIN_KEY_SIZE 5
@@ -19,9 +20,4 @@ int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
-extern const u32 cast5_s1[256];
-extern const u32 cast5_s2[256];
-extern const u32 cast5_s3[256];
-extern const u32 cast5_s4[256];
-
#endif
diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h
index 157af6f342c..32b60eb8bd2 100644
--- a/include/crypto/cast6.h
+++ b/include/crypto/cast6.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
+#include <crypto/cast_common.h>
#define CAST6_BLOCK_SIZE 16
#define CAST6_MIN_KEY_SIZE 16
@@ -20,9 +21,4 @@ int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
-extern const u32 cast6_s1[256];
-extern const u32 cast6_s2[256];
-extern const u32 cast6_s3[256];
-extern const u32 cast6_s4[256];
-
#endif
diff --git a/include/crypto/cast_common.h b/include/crypto/cast_common.h
new file mode 100644
index 00000000000..b7df35cd9f0
--- /dev/null
+++ b/include/crypto/cast_common.h
@@ -0,0 +1,9 @@
+#ifndef _CRYPTO_CAST_COMMON_H
+#define _CRYPTO_CAST_COMMON_H
+
+extern const u32 cast_s1[256];
+extern const u32 cast_s2[256];
+extern const u32 cast_s3[256];
+extern const u32 cast_s4[256];
+
+#endif
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
index c4467c55df1..6b700c7b2fe 100644
--- a/include/crypto/vmac.h
+++ b/include/crypto/vmac.h
@@ -56,6 +56,8 @@ typedef u64 vmac_t;
struct vmac_ctx_t {
struct crypto_cipher *child;
struct vmac_ctx __vmac_ctx;
+ u8 partial[VMAC_NHBYTES]; /* partial block */
+ int partial_size; /* size of the partial block */
};
#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3fd82809b2d..fad21c927a3 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1431,6 +1431,8 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
@@ -1503,6 +1505,7 @@ extern unsigned int drm_debug;
extern unsigned int drm_vblank_offdelay;
extern unsigned int drm_timestamp_precision;
+extern unsigned int drm_timestamp_monotonic;
extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3fa18b7e949..00d78b5161c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -792,6 +792,7 @@ struct drm_mode_config {
/* output poll support */
bool poll_enabled;
+ bool poll_running;
struct delayed_work output_poll_work;
/* pointers to standard properties */
@@ -887,14 +888,14 @@ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
-extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern void drm_mode_set_name(struct drm_display_mode *mode);
-extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
-extern int drm_mode_width(struct drm_display_mode *mode);
-extern int drm_mode_height(struct drm_display_mode *mode);
+extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern int drm_mode_width(const struct drm_display_mode *mode);
+extern int drm_mode_height(const struct drm_display_mode *mode);
/* for us by fb module */
extern int drm_mode_attachmode_crtc(struct drm_device *dev,
@@ -919,12 +920,6 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
extern void drm_mode_connector_list_update(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid);
-extern int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t value);
-extern int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t *value);
extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
@@ -946,8 +941,6 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
-extern void drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val);
extern void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val);
@@ -1037,6 +1030,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern u8 *drm_find_cea_extension(struct edid *edid);
+extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
extern bool drm_detect_monitor_audio(struct edid *edid);
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
@@ -1053,6 +1047,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int GTF_2C, int GTF_K, int GTF_2J);
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
+extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index e01cc80c9c3..f43d556bf40 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -137,6 +137,8 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd);
@@ -162,6 +164,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
extern void drm_kms_helper_poll_init(struct drm_device *dev);
extern void drm_kms_helper_poll_fini(struct drm_device *dev);
extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index fe061489f91..e8e1417af3d 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/i2c.h>
+#include <linux/delay.h>
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -311,6 +312,14 @@
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ * aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ * the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
struct i2c_algo_dp_aux_data {
bool running;
u16 address;
@@ -322,4 +331,34 @@ struct i2c_algo_dp_aux_data {
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+
+#define DP_LINK_STATUS_SIZE 6
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+#define DP_RECEIVER_CAP_SIZE 0xf
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+static inline int
+drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index 3650d5d011e..fce2ef3fdff 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -61,5 +61,19 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern void drm_ht_remove(struct drm_open_hash *ht);
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define drm_ht_insert_item_rcu drm_ht_insert_item
+#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
+#define drm_ht_remove_key_rcu drm_ht_remove_key
+#define drm_ht_remove_item_rcu drm_ht_remove_item
+#define drm_ht_find_item_rcu drm_ht_find_item
#endif
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 06d7f798a08..0f4a366f6fa 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -158,12 +158,29 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 1);
}
-extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment);
+
+extern int drm_mm_insert_node(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment);
extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end);
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+extern int drm_mm_insert_node_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color);
+extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end);
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 3c13a3a4b15..808dad29607 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -85,4 +85,30 @@ struct exynos_drm_hdmi_pdata {
int (*get_hpd)(void);
};
+/**
+ * Platform Specific Structure for DRM based IPP.
+ *
+ * @inv_pclk: if set 1. invert pixel clock
+ * @inv_vsync: if set 1. invert vsync signal for wb
+ * @inv_href: if set 1. invert href signal
+ * @inv_hsync: if set 1. invert hsync signal for wb
+ */
+struct exynos_drm_ipp_pol {
+ unsigned int inv_pclk;
+ unsigned int inv_vsync;
+ unsigned int inv_href;
+ unsigned int inv_hsync;
+};
+
+/**
+ * Platform Specific Structure for DRM based FIMC.
+ *
+ * @pol: current hardware block polarity settings.
+ * @clk_rate: current hardware clock rate.
+ */
+struct exynos_drm_fimc_pdata {
+ struct exynos_drm_ipp_pol pol;
+ int clk_rate;
+};
+
#endif /* _EXYNOS_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 2e37e9f02e7..6eb76a1f11a 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,7 +3,7 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-const struct intel_gtt {
+struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
@@ -17,6 +17,7 @@ const struct intel_gtt {
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
+ struct page *scratch_page;
/* for ppgtt PDE access */
u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */
@@ -39,10 +40,6 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
#define AGP_DCACHE_MEMORY 1
#define AGP_PHYS_MEMORY 2
-/* New caching attributes for gen6/sandybridge */
-#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
-#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
-
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index e8028ade567..3cb5d848fb6 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -141,8 +141,6 @@ struct ttm_tt;
* struct ttm_buffer_object
*
* @bdev: Pointer to the buffer object device structure.
- * @buffer_start: The virtual user-space start address of ttm_bo_type_user
- * buffers.
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
@@ -172,7 +170,6 @@ struct ttm_tt;
* @seq_valid: The value of @val_seq is valid. This value is protected by
* the bo_device::lru_lock.
* @reserved: Deadlock-free lock used for synchronization state transitions.
- * @sync_obj_arg: Opaque argument to synchronization object function.
* @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
* @vm_rb: Rb node for the vm rb tree.
@@ -200,7 +197,6 @@ struct ttm_buffer_object {
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- unsigned long buffer_start;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
@@ -255,7 +251,6 @@ struct ttm_buffer_object {
* checking NULL while reserved but not holding the mentioned lock.
*/
- void *sync_obj_arg;
void *sync_obj;
unsigned long priv_flags;
@@ -342,7 +337,6 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Changes placement and caching policy of the buffer object
@@ -355,7 +349,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
*/
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu);
/**
@@ -429,8 +423,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
* @no_wait: Return immediately if buffer is busy.
*
* Synchronizes a buffer object for CPU RW access. This means
- * blocking command submission that affects the buffer and
- * waiting for buffer idle. This lock is recursive.
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
@@ -472,8 +467,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -505,7 +498,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interrubtible,
struct file *persistent_swap_storage,
size_t acc_size,
@@ -521,8 +513,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -545,7 +535,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo);
@@ -736,4 +725,18 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+/**
+ * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
+ *
+ * @bo: The buffer object to check.
+ *
+ * This function returns an indication if a bo is reserved or not, and should
+ * only be used to print an error when it is not from incorrect api usage, since
+ * there's no guarantee that it is the caller that is holding the reservation.
+ */
+static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
+{
+ return atomic_read(&bo->reserved);
+}
+
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d803b92b032..e3a43a47d78 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -394,7 +394,7 @@ struct ttm_bo_driver {
*/
int (*move) (struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
@@ -422,10 +422,10 @@ struct ttm_bo_driver {
* documentation.
*/
- bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
- int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
+ bool (*sync_obj_signaled) (void *sync_obj);
+ int (*sync_obj_wait) (void *sync_obj,
bool lazy, bool interruptible);
- int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
+ int (*sync_obj_flush) (void *sync_obj);
void (*sync_obj_unref) (void **sync_obj);
void *(*sync_obj_ref) (void *sync_obj);
@@ -521,8 +521,6 @@ struct ttm_bo_global {
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
* @val_seq: Current validation sequence.
- * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
- * If a GPU lockup has been detected, this is forced to 0.
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
@@ -556,7 +554,6 @@ struct ttm_bo_device {
* Protected by load / firstopen / lastclose /unload sync.
*/
- bool nice_mode;
struct address_space *dev_mapping;
/*
@@ -706,7 +703,6 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* @proposed_placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_mem_reg.
* @interruptible: Sleep interruptible when sliping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Allocate memory space for the buffer object pointed to by @bo, using
@@ -722,27 +718,13 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu);
+ bool no_wait_gpu);
extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-/**
- * ttm_bo_wait_for_cpu
- *
- * @bo: Pointer to a struct ttm_buffer_object.
- * @no_wait: Don't sleep while waiting.
- *
- * Wait until a buffer object is no longer sync'ed for CPU access.
- * Returns:
- * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-
-extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-
extern void ttm_bo_global_release(struct drm_global_reference *ref);
extern int ttm_bo_global_init(struct drm_global_reference *ref);
@@ -918,7 +900,6 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -933,15 +914,14 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -956,8 +936,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* ttm_bo_free_old_node
@@ -973,10 +953,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*
* @bo: A pointer to a struct ttm_buffer_object.
* @sync_obj: A sync object that signals when moving is complete.
- * @sync_obj_arg: An argument to pass to the sync object idle / wait
- * functions.
* @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -990,9 +967,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- void *sync_obj_arg,
- bool evict, bool no_wait_reserve,
- bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
* ttm_io_prot
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 1926cae373b..547e19f06e5 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -39,8 +39,6 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
- * adding a new sync object.
* @reserved: Indicates whether @bo has been reserved for validation.
* @removed: Indicates whether @bo has been removed from lru lists.
* @put_count: Number of outstanding references on bo::list_kref.
@@ -50,7 +48,6 @@
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- void *new_sync_obj_arg;
bool reserved;
bool removed;
int put_count;
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index d6d1da468c9..72dcbe81dd0 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -60,7 +60,6 @@ struct ttm_mem_shrink {
* for the GPU, and this will otherwise block other workqueue tasks(?)
* At this point we use only a single-threaded workqueue.
* @work: The workqueue callback for the shrink queue.
- * @queue: Wait queue for processes suspended waiting for memory.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
* @zones: Array of pointers to accounting zones.
@@ -80,7 +79,6 @@ struct ttm_mem_global {
struct ttm_mem_shrink *shrink;
struct workqueue_struct *swap_queue;
struct work_struct work;
- wait_queue_head_t queue;
spinlock_t lock;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index b01c563b275..fc0cf064990 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -40,6 +40,7 @@
#include <linux/list.h>
#include <drm/drm_hashtab.h>
#include <linux/kref.h>
+#include <linux/rcupdate.h>
#include <ttm/ttm_memory.h>
/**
@@ -120,6 +121,7 @@ struct ttm_object_device;
*/
struct ttm_base_object {
+ struct rcu_head rhead;
struct drm_hash_item hash;
enum ttm_object_type object_type;
bool shareable;
@@ -268,4 +270,6 @@ extern struct ttm_object_device *ttm_object_device_init
extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+#define ttm_base_object_kfree(__object, __base)\
+ kfree_rcu(__object, __base.rhead)
#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
deleted file mode 100644
index 7fe2dae251e..00000000000
--- a/include/linux/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-header-y += dvb/
-header-y += hdlc/
-header-y += hsi/
-header-y += raid/
-header-y += usb/
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c33fa3ce9b7..3994d7790b2 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -78,6 +78,14 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
+#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+void acpi_initrd_override(void *data, size_t size);
+#else
+static inline void acpi_initrd_override(void *data, size_t size)
+{
+}
+#endif
+
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
@@ -479,6 +487,14 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_sleep(u8 sleep_state,
u32 pm1a_control, u32 pm1b_control);
+#ifdef CONFIG_X86
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
+#else
+static inline void arch_reserve_mem_area(acpi_physical_address addr,
+ size_t size)
+{
+}
+#endif /* CONFIG_X86 */
#else
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
diff --git a/include/linux/asn1.h b/include/linux/asn1.h
index 5c3f4e4b9a2..eed6982860b 100644
--- a/include/linux/asn1.h
+++ b/include/linux/asn1.h
@@ -64,4 +64,6 @@ enum asn1_tag {
ASN1_LONG_TAG = 31 /* Long form tag */
};
+#define ASN1_INDEFINITE_LENGTH 0x80
+
#endif /* _LINUX_ASN1_H */
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index b856a2a590d..fe9989636b6 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -22,8 +22,6 @@ extern int __devinit __pata_platform_probe(struct device *dev,
unsigned int ioport_shift,
int __pio_mask);
-extern int __devexit __pata_platform_remove(struct device *dev);
-
/*
* Marvell SATA private data
*/
diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h
index 4eb31752e2b..deb0ae58b99 100644
--- a/include/linux/atmel-ssc.h
+++ b/include/linux/atmel-ssc.h
@@ -5,10 +5,16 @@
#include <linux/list.h>
#include <linux/io.h>
+struct atmel_ssc_platform_data {
+ int use_dma;
+};
+
struct ssc_device {
struct list_head list;
+ resource_size_t phybase;
void __iomem *regs;
struct platform_device *pdev;
+ struct atmel_ssc_platform_data *pdata;
struct clk *clk;
int user;
int irq;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 2a9a9abc912..12731a19ef0 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -114,6 +114,7 @@ struct backing_dev_info {
int bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi);
+__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 5ffc6dda467..da9a0825e00 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -134,4 +134,14 @@ struct generic_bl_info {
void (*kick_battery)(void);
};
+#ifdef CONFIG_OF
+struct backlight_device *of_find_backlight_by_node(struct device_node *node);
+#else
+static inline struct backlight_device *
+of_find_backlight_by_node(struct device_node *node)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 93b1e091b1e..e0ce311011c 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -350,6 +350,7 @@ extern void bcma_core_set_clockmode(struct bcma_device *core,
enum bcma_clkmode clkmode);
extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status,
bool on);
+extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset);
#define BCMA_DMA_TRANSLATION_MASK 0xC0000000
#define BCMA_DMA_TRANSLATION_NONE 0x00000000
#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index e51359180b6..9a0e3fa3ca9 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -2,6 +2,7 @@
#define LINUX_BCMA_DRIVER_CC_H_
#include <linux/platform_device.h>
+#include <linux/gpio.h>
/** ChipCommon core registers. **/
#define BCMA_CC_ID 0x0000
@@ -574,6 +575,12 @@ struct bcma_drv_cc {
#endif /* CONFIG_BCMA_DRIVER_MIPS */
u32 ticks_per_ms;
struct platform_device *watchdog;
+
+ /* Lock for GPIO register access. */
+ spinlock_t gpio_lock;
+#ifdef CONFIG_BCMA_DRIVER_GPIO
+ struct gpio_chip gpio;
+#endif
};
/* Register access */
@@ -610,6 +617,8 @@ u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
/* PMU support */
extern void bcma_pmu_init(struct bcma_drv_cc *cc);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 2630c9b41a8..0530b986035 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -54,8 +54,6 @@ struct linux_binprm {
#define BINPRM_FLAGS_EXECFD_BIT 1
#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
-#define BINPRM_MAX_RECURSION 4
-
/* Function parameter for binfmt->coredump */
struct coredump_params {
siginfo_t *siginfo;
@@ -114,6 +112,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
unsigned long stack_top,
int executable_stack);
extern int bprm_mm_init(struct linux_binprm *bprm);
+extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
extern int prepare_bprm_creds(struct linux_binprm *bprm);
@@ -121,8 +120,4 @@ extern void install_exec_creds(struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern void free_bprm(struct linux_binprm *);
-#ifdef __ARCH_WANT_KERNEL_EXECVE
-extern void ret_from_kernel_execve(struct pt_regs *normal) __noreturn;
-#endif
-
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1756001210d..f94bc83011e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -378,6 +378,12 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
+ /*
+ * Number of active block driver functions for which blk_drain_queue()
+ * must wait. Must be incremented around functions that unlock the
+ * queue_lock internally, e.g. scsi_request_fn().
+ */
+ unsigned int request_fn_active;
unsigned int rq_timeout;
struct timer_list timeout;
@@ -437,7 +443,7 @@ struct request_queue {
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
+#define QUEUE_FLAG_DYING 5 /* queue being torn down */
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
@@ -452,6 +458,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -521,6 +528,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -1180,13 +1188,25 @@ static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
{
- unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+ unsigned int alignment, granularity, offset;
if (!lim->max_discard_sectors)
return 0;
- return (lim->discard_granularity + lim->discard_alignment - alignment)
- & (lim->discard_granularity - 1);
+ /* Why are these in bytes, not sectors? */
+ alignment = lim->discard_alignment >> 9;
+ granularity = lim->discard_granularity >> 9;
+ if (!granularity)
+ return 0;
+
+ /* Offset of the partition start in 'granularity' sectors */
+ offset = sector_div(sector, granularity);
+
+ /* And why do we do this modulus *again* in blkdev_issue_discard()? */
+ offset = (granularity + alignment - offset) % granularity;
+
+ /* Turn it back into bytes, gaah */
+ return offset << 9;
}
static inline int bdev_discard_alignment(struct block_device *bdev)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7b74452c531..3f778c27f82 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -137,9 +137,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#define alloc_bootmem_low_pages_node(pgdat, x) \
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
- int flags);
-
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
extern void *alloc_remap(int nid, unsigned long size);
#else
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 4d0fb3df2f4..a226652a5a6 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -67,6 +67,5 @@ void bsg_job_done(struct bsg_job *job, int result,
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
bsg_job_fn *job_fn, int dd_job_size);
void bsg_request_fn(struct request_queue *q);
-void bsg_goose_queue(struct request_queue *q);
#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 6470792b13d..084d3c622b1 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -43,7 +43,6 @@ struct ceph_options {
struct ceph_entity_addr my_addr;
int mount_timeout;
int osd_idle_ttl;
- int osd_timeout;
int osd_keepalive_timeout;
/*
@@ -63,7 +62,6 @@ struct ceph_options {
* defaults
*/
#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
-#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
#define CEPH_OSD_KEEPALIVE_DEFAULT 5
#define CEPH_OSD_IDLE_TTL_DEFAULT 60
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e37acbe989a..10a417f9f76 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -123,6 +123,7 @@ extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
struct ceph_pg pgid);
+extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index de91fbdf127..2c04afeead1 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -87,6 +87,8 @@ struct ceph_pg {
*
* lpgp_num -- as above.
*/
+#define CEPH_NOPOOL ((__u64) (-1)) /* pool id not defined */
+
#define CEPH_PG_TYPE_REP 1
#define CEPH_PG_TYPE_RAID4 2
#define CEPH_PG_POOL_VERSION 2
diff --git a/include/linux/clk/mvebu.h b/include/linux/clk/mvebu.h
new file mode 100644
index 00000000000..8c4ae713b06
--- /dev/null
+++ b/include/linux/clk/mvebu.h
@@ -0,0 +1,22 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __CLK_MVEBU_H_
+#define __CLK_MVEBU_H_
+
+void __init mvebu_clocks_init(void);
+
+#endif
diff --git a/arch/arm/mach-vt8500/include/mach/irqs.h b/include/linux/clk/zynq.h
index a129fd1222f..56be7cd9aa8 100644
--- a/arch/arm/mach-vt8500/include/mach/irqs.h
+++ b/include/linux/clk/zynq.h
@@ -1,7 +1,5 @@
/*
- * arch/arm/mach-vt8500/include/mach/irqs.h
- *
- * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ * Copyright (C) 2012 National Instruments
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,5 +16,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/* This value is just to make the core happy, never used otherwise */
-#define NR_IRQS 128
+#ifndef __LINUX_CLK_ZYNQ_H_
+#define __LINUX_CLK_ZYNQ_H_
+
+void __init xilinx_zynq_clocks_init(void __iomem *slcr);
+
+#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 784ebfe63c4..dec7e2d1887 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -23,6 +23,61 @@
#define COMPAT_USE_64BIT_TIME 0
#endif
+#ifndef __SC_DELOUSE
+#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
+#endif
+
+#define __SC_CCAST1(t1, a1) __SC_DELOUSE(t1,a1)
+#define __SC_CCAST2(t2, a2, ...) __SC_DELOUSE(t2,a2), __SC_CCAST1(__VA_ARGS__)
+#define __SC_CCAST3(t3, a3, ...) __SC_DELOUSE(t3,a3), __SC_CCAST2(__VA_ARGS__)
+#define __SC_CCAST4(t4, a4, ...) __SC_DELOUSE(t4,a4), __SC_CCAST3(__VA_ARGS__)
+#define __SC_CCAST5(t5, a5, ...) __SC_DELOUSE(t5,a5), __SC_CCAST4(__VA_ARGS__)
+#define __SC_CCAST6(t6, a6, ...) __SC_DELOUSE(t6,a6), __SC_CCAST5(__VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE1(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE2(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE3(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE4(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE5(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE6(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__)); \
+ static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
+ asmlinkage long compat_SyS##name(__SC_LONG##x(__VA_ARGS__)) \
+ { \
+ return (long) C_SYSC##name(__SC_CCAST##x(__VA_ARGS__)); \
+ } \
+ SYSCALL_ALIAS(compat_sys##name, compat_SyS##name); \
+ static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__))
+
+#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__))
+
+#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
+
+#ifndef compat_user_stack_pointer
+#define compat_user_stack_pointer() current_user_stack_pointer()
+#endif
+#ifdef CONFIG_GENERIC_SIGALTSTACK
+#ifndef compat_sigaltstack /* we'll need that for MIPS */
+typedef struct compat_sigaltstack {
+ compat_uptr_t ss_sp;
+ int ss_flags;
+ compat_size_t ss_size;
+} compat_stack_t;
+#endif
+#endif
+
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -587,6 +642,16 @@ asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t __user *offset, compat_size_t count);
+#ifdef CONFIG_GENERIC_SIGALTSTACK
+asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
+ compat_stack_t __user *uoss_ptr);
+
+int compat_restore_altstack(const compat_stack_t __user *uss);
+int __compat_save_altstack(compat_stack_t __user *, unsigned long);
+#endif
+
+asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
+ struct compat_timespec __user *interval);
#else
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 412bc6c2b02..662fd1b4c42 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -31,6 +31,8 @@
#define __linktime_error(message) __attribute__((__error__(message)))
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
#if __GNUC_MINOR__ >= 5
/*
* Mark a position in code as unreachable. This can be used to
@@ -63,3 +65,13 @@
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if __GNUC_MINOR__ >= 4
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index d8e636e5607..973ce10c40b 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -29,3 +29,10 @@
#endif
#define uninitialized_var(x) x
+
+#ifndef __HAVE_BUILTIN_BSWAP16__
+/* icc has this, but it's called _bswap16 */
+#define __HAVE_BUILTIN_BSWAP16__
+#define __builtin_bswap16 _bswap16
+#endif
+
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f430e4162f4..dd852b73b28 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -10,6 +10,7 @@
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
# define __iomem __attribute__((noderef, address_space(2)))
+# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
@@ -33,6 +34,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
# define __builtin_warning(x, y...) (1)
+# define __must_hold(x)
# define __acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
@@ -42,6 +44,10 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __rcu
#endif
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
#ifdef __KERNEL__
#ifdef __GNUC__
@@ -164,6 +170,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
(typeof(ptr)) (__ptr + (off)); })
#endif
+/* Not-quite-unique ID. */
+#ifndef __UNIQUE_ID
+# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 838320fc3d1..8c8a60d2940 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -144,7 +144,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
return node_possible_map;
}
-#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
+#define cpuset_current_mems_allowed (node_states[N_MEMORY])
static inline void cpuset_init_current_mems_allowed(void) {}
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
diff --git a/include/linux/cred.h b/include/linux/cred.h
index ebbed2ce663..abb2cd50f6b 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -77,21 +77,6 @@ extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
/*
- * The common credentials for a thread group
- * - shared by CLONE_THREAD
- */
-#ifdef CONFIG_KEYS
-struct thread_group_cred {
- atomic_t usage;
- pid_t tgid; /* thread group process ID */
- spinlock_t lock;
- struct key __rcu *session_keyring; /* keyring inherited over fork */
- struct key *process_keyring; /* keyring private to this process */
- struct rcu_head rcu; /* RCU deletion hook */
-};
-#endif
-
-/*
* The security context of a task
*
* The parts of the context break down into two categories:
@@ -139,6 +124,8 @@ struct cred {
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
+ struct key __rcu *session_keyring; /* keyring inherited over fork */
+ struct key *process_keyring; /* keyring private to this process */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
struct thread_group_cred *tgcred; /* thread-group shared credentials */
@@ -357,10 +344,8 @@ static inline void put_cred(const struct cred *_cred)
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
#define current_user_ns() (current_cred_xxx(user_ns))
-#define task_user_ns(task) (task_cred_xxx((task), user_ns))
#else
#define current_user_ns() (&init_user_ns)
-#define task_user_ns(task) (&init_user_ns)
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 59200795482..c1754b59ddd 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -202,7 +202,6 @@ struct dentry_operations {
#define DCACHE_MOUNTED 0x10000 /* is a mountpoint */
#define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */
#define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */
-#define DCACHE_NEED_LOOKUP 0x80000 /* dentry requires i_op->lookup */
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
@@ -408,13 +407,6 @@ static inline bool d_mountpoint(struct dentry *dentry)
return dentry->d_flags & DCACHE_MOUNTED;
}
-static inline bool d_need_lookup(struct dentry *dentry)
-{
- return dentry->d_flags & DCACHE_NEED_LOOKUP;
-}
-
-extern void d_clear_need_lookup(struct dentry *dentry);
-
extern int sysctl_vfs_cache_pressure;
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 38d27a10aa5..bf6afa2fc43 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -23,7 +23,6 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
union map_info {
void *ptr;
unsigned long long ll;
- unsigned target_request_nr;
};
/*
@@ -46,8 +45,7 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
* = 1: simple remap complete
* = 2: The target wants to push back the io
*/
-typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
- union map_info *map_context);
+typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
union map_info *map_context);
@@ -60,8 +58,7 @@ typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
* 2 : The target wants to push back the io
*/
typedef int (*dm_endio_fn) (struct dm_target *ti,
- struct bio *bio, int error,
- union map_info *map_context);
+ struct bio *bio, int error);
typedef int (*dm_request_endio_fn) (struct dm_target *ti,
struct request *clone, int error,
union map_info *map_context);
@@ -193,18 +190,30 @@ struct dm_target {
* A number of zero-length barrier requests that will be submitted
* to the target for the purpose of flushing cache.
*
- * The request number will be placed in union map_info->target_request_nr.
+ * The request number can be accessed with dm_bio_get_target_request_nr.
* It is a responsibility of the target driver to remap these requests
* to the real underlying devices.
*/
unsigned num_flush_requests;
/*
- * The number of discard requests that will be submitted to the
- * target. map_info->request_nr is used just like num_flush_requests.
+ * The number of discard requests that will be submitted to the target.
+ * The request number can be accessed with dm_bio_get_target_request_nr.
*/
unsigned num_discard_requests;
+ /*
+ * The number of WRITE SAME requests that will be submitted to the target.
+ * The request number can be accessed with dm_bio_get_target_request_nr.
+ */
+ unsigned num_write_same_requests;
+
+ /*
+ * The minimum number of extra bytes allocated in each bio for the
+ * target to use. dm_per_bio_data returns the data location.
+ */
+ unsigned per_bio_data_size;
+
/* target specific data */
void *private;
@@ -241,6 +250,36 @@ struct dm_target_callbacks {
int (*congested_fn) (struct dm_target_callbacks *, int);
};
+/*
+ * For bio-based dm.
+ * One of these is allocated for each bio.
+ * This structure shouldn't be touched directly by target drivers.
+ * It is here so that we can inline dm_per_bio_data and
+ * dm_bio_from_per_bio_data
+ */
+struct dm_target_io {
+ struct dm_io *io;
+ struct dm_target *ti;
+ union map_info info;
+ unsigned target_request_nr;
+ struct bio clone;
+};
+
+static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
+{
+ return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
+}
+
+static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+{
+ return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
+}
+
+static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio)
+{
+ return container_of(bio, struct dm_target_io, clone)->target_request_nr;
+}
+
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
diff --git a/include/linux/device.h b/include/linux/device.h
index 05292e48834..43dcda937dd 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -498,6 +498,10 @@ ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
@@ -507,6 +511,9 @@ ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
#define DEVICE_INT_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
+#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index 1d47dcce11e..d02da2c6fc1 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -98,7 +98,7 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
/*
* dlm_lock
*
- * Make an asyncronous request to acquire or convert a lock on a named
+ * Make an asynchronous request to acquire or convert a lock on a named
* resource.
*
* lockspace: context for the request
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index f83f793223f..c8e1831d757 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -17,6 +17,7 @@ enum dma_attr {
DMA_ATTR_NON_CONSISTENT,
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
+ DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_MAX,
};
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index eb48f3816df..bd2e52ccc4f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -156,7 +156,6 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
-#ifdef CONFIG_DMA_SHARED_BUFFER
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
void dma_buf_detach(struct dma_buf *dmabuf,
@@ -184,103 +183,5 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
-#else
-
-static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach)
-{
- return;
-}
-
-static inline struct dma_buf *dma_buf_export(void *priv,
- const struct dma_buf_ops *ops,
- size_t size, int flags)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
-{
- return -ENODEV;
-}
-
-static inline struct dma_buf *dma_buf_get(int fd)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline void dma_buf_put(struct dma_buf *dmabuf)
-{
- return;
-}
-
-static inline struct sg_table *dma_buf_map_attachment(
- struct dma_buf_attachment *attach, enum dma_data_direction write)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- return;
-}
-
-static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
- size_t start, size_t len,
- enum dma_data_direction dir)
-{
- return -ENODEV;
-}
-
-static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
- size_t start, size_t len,
- enum dma_data_direction dir)
-{
-}
-
-static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
- unsigned long pnum)
-{
- return NULL;
-}
-
-static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
- unsigned long pnum, void *vaddr)
-{
-}
-
-static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
-{
- return NULL;
-}
-
-static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
- unsigned long pnum, void *vaddr)
-{
-}
-
-static inline int dma_buf_mmap(struct dma_buf *dmabuf,
- struct vm_area_struct *vma,
- unsigned long pgoff)
-{
- return -ENODEV;
-}
-
-static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
-{
- return NULL;
-}
-
-static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
-{
-}
-#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index 171ad8aedc8..fc0e34ce038 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -39,6 +39,8 @@ extern void debug_dma_map_page(struct device *dev, struct page *page,
int direction, dma_addr_t dma_addr,
bool map_single);
+extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, bool map_single);
@@ -105,6 +107,11 @@ static inline void debug_dma_map_page(struct device *dev, struct page *page,
{
}
+static inline void debug_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+}
+
static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction,
bool map_single)
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 47e3d485058..0c5a18ec322 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -51,12 +51,11 @@
#endif
-
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.13"
-#define API_VERSION 88
+#define REL_VERSION "8.4.2"
+#define API_VERSION 1
#define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 96
+#define PRO_VERSION_MAX 101
enum drbd_io_error_p {
@@ -66,7 +65,8 @@ enum drbd_io_error_p {
};
enum drbd_fencing_p {
- FP_DONT_CARE,
+ FP_NOT_AVAIL = -1, /* Not a policy */
+ FP_DONT_CARE = 0,
FP_RESOURCE,
FP_STONITH
};
@@ -102,6 +102,20 @@ enum drbd_on_congestion {
OC_DISCONNECT,
};
+enum drbd_read_balancing {
+ RB_PREFER_LOCAL,
+ RB_PREFER_REMOTE,
+ RB_ROUND_ROBIN,
+ RB_LEAST_PENDING,
+ RB_CONGESTED_REMOTE,
+ RB_32K_STRIPING,
+ RB_64K_STRIPING,
+ RB_128K_STRIPING,
+ RB_256K_STRIPING,
+ RB_512K_STRIPING,
+ RB_1M_STRIPING,
+};
+
/* KEEP the order, do not delete or insert. Only append. */
enum drbd_ret_code {
ERR_CODE_BASE = 100,
@@ -122,7 +136,7 @@ enum drbd_ret_code {
ERR_AUTH_ALG = 120,
ERR_AUTH_ALG_ND = 121,
ERR_NOMEM = 122,
- ERR_DISCARD = 123,
+ ERR_DISCARD_IMPOSSIBLE = 123,
ERR_DISK_CONFIGURED = 124,
ERR_NET_CONFIGURED = 125,
ERR_MANDATORY_TAG = 126,
@@ -130,8 +144,8 @@ enum drbd_ret_code {
ERR_INTR = 129, /* EINTR */
ERR_RESIZE_RESYNC = 130,
ERR_NO_PRIMARY = 131,
- ERR_SYNC_AFTER = 132,
- ERR_SYNC_AFTER_CYCLE = 133,
+ ERR_RESYNC_AFTER = 132,
+ ERR_RESYNC_AFTER_CYCLE = 133,
ERR_PAUSE_IS_SET = 134,
ERR_PAUSE_IS_CLEAR = 135,
ERR_PACKET_NR = 137,
@@ -155,6 +169,14 @@ enum drbd_ret_code {
ERR_CONG_NOT_PROTO_A = 155,
ERR_PIC_AFTER_DEP = 156,
ERR_PIC_PEER_DEP = 157,
+ ERR_RES_NOT_KNOWN = 158,
+ ERR_RES_IN_USE = 159,
+ ERR_MINOR_CONFIGURED = 160,
+ ERR_MINOR_EXISTS = 161,
+ ERR_INVALID_REQUEST = 162,
+ ERR_NEED_APV_100 = 163,
+ ERR_NEED_ALLOW_TWO_PRI = 164,
+ ERR_MD_UNCLEAN = 165,
/* insert new ones above this line */
AFTER_LAST_ERR_CODE
@@ -296,7 +318,8 @@ enum drbd_state_rv {
SS_NOT_SUPPORTED = -17, /* drbd-8.2 only */
SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */
SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */
- SS_AFTER_LAST_ERROR = -20, /* Keep this at bottom */
+ SS_O_VOL_PEER_PRI = -20,
+ SS_AFTER_LAST_ERROR = -21, /* Keep this at bottom */
};
/* from drbd_strings.c */
@@ -313,7 +336,9 @@ extern const char *drbd_set_st_err_str(enum drbd_state_rv);
#define MDF_FULL_SYNC (1 << 3)
#define MDF_WAS_UP_TO_DATE (1 << 4)
#define MDF_PEER_OUT_DATED (1 << 5)
-#define MDF_CRASHED_PRIMARY (1 << 6)
+#define MDF_CRASHED_PRIMARY (1 << 6)
+#define MDF_AL_CLEAN (1 << 7)
+#define MDF_AL_DISABLED (1 << 8)
enum drbd_uuid_index {
UI_CURRENT,
@@ -333,37 +358,23 @@ enum drbd_timeout_flag {
#define UUID_JUST_CREATED ((__u64)4)
+/* magic numbers used in meta data and network packets */
#define DRBD_MAGIC 0x83740267
-#define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC)
#define DRBD_MAGIC_BIG 0x835a
-#define BE_DRBD_MAGIC_BIG __constant_cpu_to_be16(DRBD_MAGIC_BIG)
+#define DRBD_MAGIC_100 0x8620ec20
+
+#define DRBD_MD_MAGIC_07 (DRBD_MAGIC+3)
+#define DRBD_MD_MAGIC_08 (DRBD_MAGIC+4)
+#define DRBD_MD_MAGIC_84_UNCLEAN (DRBD_MAGIC+5)
+
+
+/* how I came up with this magic?
+ * base64 decode "actlog==" ;) */
+#define DRBD_AL_MAGIC 0x69cb65a2
/* these are of type "int" */
#define DRBD_MD_INDEX_INTERNAL -1
#define DRBD_MD_INDEX_FLEX_EXT -2
#define DRBD_MD_INDEX_FLEX_INT -3
-/* Start of the new netlink/connector stuff */
-
-#define DRBD_NL_CREATE_DEVICE 0x01
-#define DRBD_NL_SET_DEFAULTS 0x02
-
-
-/* For searching a vacant cn_idx value */
-#define CN_IDX_STEP 6977
-
-struct drbd_nl_cfg_req {
- int packet_type;
- unsigned int drbd_minor;
- int flags;
- unsigned short tag_list[];
-};
-
-struct drbd_nl_cfg_reply {
- int packet_type;
- unsigned int minor;
- int ret_code; /* enum ret_code or set_st_err_t */
- unsigned short tag_list[]; /* only used with get_* calls */
-};
-
#endif
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
new file mode 100644
index 00000000000..d0d8fac8a6e
--- /dev/null
+++ b/include/linux/drbd_genl.h
@@ -0,0 +1,378 @@
+/*
+ * General overview:
+ * full generic netlink message:
+ * |nlmsghdr|genlmsghdr|<payload>
+ *
+ * payload:
+ * |optional fixed size family header|<sequence of netlink attributes>
+ *
+ * sequence of netlink attributes:
+ * I chose to have all "top level" attributes NLA_NESTED,
+ * corresponding to some real struct.
+ * So we have a sequence of |tla, len|<nested nla sequence>
+ *
+ * nested nla sequence:
+ * may be empty, or contain a sequence of netlink attributes
+ * representing the struct fields.
+ *
+ * The tag number of any field (regardless of containing struct)
+ * will be available as T_ ## field_name,
+ * so you cannot have the same field name in two differnt structs.
+ *
+ * The tag numbers themselves are per struct, though,
+ * so should always begin at 1 (not 0, that is the special "NLA_UNSPEC" type,
+ * which we won't use here).
+ * The tag numbers are used as index in the respective nla_policy array.
+ *
+ * GENL_struct(tag_name, tag_number, struct name, struct fields) - struct and policy
+ * genl_magic_struct.h
+ * generates the struct declaration,
+ * generates an entry in the tla enum,
+ * genl_magic_func.h
+ * generates an entry in the static tla policy
+ * with .type = NLA_NESTED
+ * generates the static <struct_name>_nl_policy definition,
+ * and static conversion functions
+ *
+ * genl_magic_func.h
+ *
+ * GENL_mc_group(group)
+ * genl_magic_struct.h
+ * does nothing
+ * genl_magic_func.h
+ * defines and registers the mcast group,
+ * and provides a send helper
+ *
+ * GENL_notification(op_name, op_num, mcast_group, tla list)
+ * These are notifications to userspace.
+ *
+ * genl_magic_struct.h
+ * generates an entry in the genl_ops enum,
+ * genl_magic_func.h
+ * does nothing
+ *
+ * mcast group: the name of the mcast group this notification should be
+ * expected on
+ * tla list: the list of expected top level attributes,
+ * for documentation and sanity checking.
+ *
+ * GENL_op(op_name, op_num, flags and handler, tla list) - "genl operations"
+ * These are requests from userspace.
+ *
+ * _op and _notification share the same "number space",
+ * op_nr will be assigned to "genlmsghdr->cmd"
+ *
+ * genl_magic_struct.h
+ * generates an entry in the genl_ops enum,
+ * genl_magic_func.h
+ * generates an entry in the static genl_ops array,
+ * and static register/unregister functions to
+ * genl_register_family_with_ops().
+ *
+ * flags and handler:
+ * GENL_op_init( .doit = x, .dumpit = y, .flags = something)
+ * GENL_doit(x) => .dumpit = NULL, .flags = GENL_ADMIN_PERM
+ * tla list: the list of expected top level attributes,
+ * for documentation and sanity checking.
+ */
+
+/*
+ * STRUCTS
+ */
+
+/* this is sent kernel -> userland on various error conditions, and contains
+ * informational textual info, which is supposedly human readable.
+ * The computer relevant return code is in the drbd_genlmsghdr.
+ */
+GENL_struct(DRBD_NLA_CFG_REPLY, 1, drbd_cfg_reply,
+ /* "arbitrary" size strings, nla_policy.len = 0 */
+ __str_field(1, DRBD_GENLA_F_MANDATORY, info_text, 0)
+)
+
+/* Configuration requests typically need a context to operate on.
+ * Possible keys are device minor (fits in the drbd_genlmsghdr),
+ * the replication link (aka connection) name,
+ * and/or the replication group (aka resource) name,
+ * and the volume id within the resource. */
+GENL_struct(DRBD_NLA_CFG_CONTEXT, 2, drbd_cfg_context,
+ __u32_field(1, DRBD_GENLA_F_MANDATORY, ctx_volume)
+ __str_field(2, DRBD_GENLA_F_MANDATORY, ctx_resource_name, 128)
+ __bin_field(3, DRBD_GENLA_F_MANDATORY, ctx_my_addr, 128)
+ __bin_field(4, DRBD_GENLA_F_MANDATORY, ctx_peer_addr, 128)
+)
+
+GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
+ __str_field(1, DRBD_F_REQUIRED | DRBD_F_INVARIANT, backing_dev, 128)
+ __str_field(2, DRBD_F_REQUIRED | DRBD_F_INVARIANT, meta_dev, 128)
+ __s32_field(3, DRBD_F_REQUIRED | DRBD_F_INVARIANT, meta_dev_idx)
+
+ /* use the resize command to try and change the disk_size */
+ __u64_field(4, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, disk_size)
+ /* we could change the max_bio_bvecs,
+ * but it won't propagate through the stack */
+ __u32_field(5, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, max_bio_bvecs)
+
+ __u32_field_def(6, DRBD_GENLA_F_MANDATORY, on_io_error, DRBD_ON_IO_ERROR_DEF)
+ __u32_field_def(7, DRBD_GENLA_F_MANDATORY, fencing, DRBD_FENCING_DEF)
+
+ __u32_field_def(8, DRBD_GENLA_F_MANDATORY, resync_rate, DRBD_RESYNC_RATE_DEF)
+ __s32_field_def(9, DRBD_GENLA_F_MANDATORY, resync_after, DRBD_MINOR_NUMBER_DEF)
+ __u32_field_def(10, DRBD_GENLA_F_MANDATORY, al_extents, DRBD_AL_EXTENTS_DEF)
+ __u32_field_def(11, DRBD_GENLA_F_MANDATORY, c_plan_ahead, DRBD_C_PLAN_AHEAD_DEF)
+ __u32_field_def(12, DRBD_GENLA_F_MANDATORY, c_delay_target, DRBD_C_DELAY_TARGET_DEF)
+ __u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF)
+ __u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF)
+ __u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF)
+
+ __flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF)
+ __flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF)
+ __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
+ __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
+ __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF)
+ __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF)
+ /* 9: __u32_field_def(22, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF) */
+ __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
+)
+
+GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
+ __str_field_def(1, DRBD_GENLA_F_MANDATORY, cpu_mask, 32)
+ __u32_field_def(2, DRBD_GENLA_F_MANDATORY, on_no_data, DRBD_ON_NO_DATA_DEF)
+)
+
+GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf,
+ __str_field_def(1, DRBD_GENLA_F_MANDATORY | DRBD_F_SENSITIVE,
+ shared_secret, SHARED_SECRET_MAX)
+ __str_field_def(2, DRBD_GENLA_F_MANDATORY, cram_hmac_alg, SHARED_SECRET_MAX)
+ __str_field_def(3, DRBD_GENLA_F_MANDATORY, integrity_alg, SHARED_SECRET_MAX)
+ __str_field_def(4, DRBD_GENLA_F_MANDATORY, verify_alg, SHARED_SECRET_MAX)
+ __str_field_def(5, DRBD_GENLA_F_MANDATORY, csums_alg, SHARED_SECRET_MAX)
+ __u32_field_def(6, DRBD_GENLA_F_MANDATORY, wire_protocol, DRBD_PROTOCOL_DEF)
+ __u32_field_def(7, DRBD_GENLA_F_MANDATORY, connect_int, DRBD_CONNECT_INT_DEF)
+ __u32_field_def(8, DRBD_GENLA_F_MANDATORY, timeout, DRBD_TIMEOUT_DEF)
+ __u32_field_def(9, DRBD_GENLA_F_MANDATORY, ping_int, DRBD_PING_INT_DEF)
+ __u32_field_def(10, DRBD_GENLA_F_MANDATORY, ping_timeo, DRBD_PING_TIMEO_DEF)
+ __u32_field_def(11, DRBD_GENLA_F_MANDATORY, sndbuf_size, DRBD_SNDBUF_SIZE_DEF)
+ __u32_field_def(12, DRBD_GENLA_F_MANDATORY, rcvbuf_size, DRBD_RCVBUF_SIZE_DEF)
+ __u32_field_def(13, DRBD_GENLA_F_MANDATORY, ko_count, DRBD_KO_COUNT_DEF)
+ __u32_field_def(14, DRBD_GENLA_F_MANDATORY, max_buffers, DRBD_MAX_BUFFERS_DEF)
+ __u32_field_def(15, DRBD_GENLA_F_MANDATORY, max_epoch_size, DRBD_MAX_EPOCH_SIZE_DEF)
+ __u32_field_def(16, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF)
+ __u32_field_def(17, DRBD_GENLA_F_MANDATORY, after_sb_0p, DRBD_AFTER_SB_0P_DEF)
+ __u32_field_def(18, DRBD_GENLA_F_MANDATORY, after_sb_1p, DRBD_AFTER_SB_1P_DEF)
+ __u32_field_def(19, DRBD_GENLA_F_MANDATORY, after_sb_2p, DRBD_AFTER_SB_2P_DEF)
+ __u32_field_def(20, DRBD_GENLA_F_MANDATORY, rr_conflict, DRBD_RR_CONFLICT_DEF)
+ __u32_field_def(21, DRBD_GENLA_F_MANDATORY, on_congestion, DRBD_ON_CONGESTION_DEF)
+ __u32_field_def(22, DRBD_GENLA_F_MANDATORY, cong_fill, DRBD_CONG_FILL_DEF)
+ __u32_field_def(23, DRBD_GENLA_F_MANDATORY, cong_extents, DRBD_CONG_EXTENTS_DEF)
+ __flg_field_def(24, DRBD_GENLA_F_MANDATORY, two_primaries, DRBD_ALLOW_TWO_PRIMARIES_DEF)
+ __flg_field(25, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, discard_my_data)
+ __flg_field_def(26, DRBD_GENLA_F_MANDATORY, tcp_cork, DRBD_TCP_CORK_DEF)
+ __flg_field_def(27, DRBD_GENLA_F_MANDATORY, always_asbp, DRBD_ALWAYS_ASBP_DEF)
+ __flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative)
+ __flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF)
+ /* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */
+)
+
+GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, assume_uptodate)
+)
+
+GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms,
+ __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size)
+ __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force)
+ __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync)
+)
+
+GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info,
+ /* the reason of the broadcast,
+ * if this is an event triggered broadcast. */
+ __u32_field(1, DRBD_GENLA_F_MANDATORY, sib_reason)
+ __u32_field(2, DRBD_F_REQUIRED, current_state)
+ __u64_field(3, DRBD_GENLA_F_MANDATORY, capacity)
+ __u64_field(4, DRBD_GENLA_F_MANDATORY, ed_uuid)
+
+ /* These are for broadcast from after state change work.
+ * prev_state and new_state are from the moment the state change took
+ * place, new_state is not neccessarily the same as current_state,
+ * there may have been more state changes since. Which will be
+ * broadcasted soon, in their respective after state change work. */
+ __u32_field(5, DRBD_GENLA_F_MANDATORY, prev_state)
+ __u32_field(6, DRBD_GENLA_F_MANDATORY, new_state)
+
+ /* if we have a local disk: */
+ __bin_field(7, DRBD_GENLA_F_MANDATORY, uuids, (UI_SIZE*sizeof(__u64)))
+ __u32_field(8, DRBD_GENLA_F_MANDATORY, disk_flags)
+ __u64_field(9, DRBD_GENLA_F_MANDATORY, bits_total)
+ __u64_field(10, DRBD_GENLA_F_MANDATORY, bits_oos)
+ /* and in case resync or online verify is active */
+ __u64_field(11, DRBD_GENLA_F_MANDATORY, bits_rs_total)
+ __u64_field(12, DRBD_GENLA_F_MANDATORY, bits_rs_failed)
+
+ /* for pre and post notifications of helper execution */
+ __str_field(13, DRBD_GENLA_F_MANDATORY, helper, 32)
+ __u32_field(14, DRBD_GENLA_F_MANDATORY, helper_exit_code)
+
+ __u64_field(15, 0, send_cnt)
+ __u64_field(16, 0, recv_cnt)
+ __u64_field(17, 0, read_cnt)
+ __u64_field(18, 0, writ_cnt)
+ __u64_field(19, 0, al_writ_cnt)
+ __u64_field(20, 0, bm_writ_cnt)
+ __u32_field(21, 0, ap_bio_cnt)
+ __u32_field(22, 0, ap_pending_cnt)
+ __u32_field(23, 0, rs_pending_cnt)
+)
+
+GENL_struct(DRBD_NLA_START_OV_PARMS, 9, start_ov_parms,
+ __u64_field(1, DRBD_GENLA_F_MANDATORY, ov_start_sector)
+ __u64_field(2, DRBD_GENLA_F_MANDATORY, ov_stop_sector)
+)
+
+GENL_struct(DRBD_NLA_NEW_C_UUID_PARMS, 10, new_c_uuid_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, clear_bm)
+)
+
+GENL_struct(DRBD_NLA_TIMEOUT_PARMS, 11, timeout_parms,
+ __u32_field(1, DRBD_F_REQUIRED, timeout_type)
+)
+
+GENL_struct(DRBD_NLA_DISCONNECT_PARMS, 12, disconnect_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, force_disconnect)
+)
+
+GENL_struct(DRBD_NLA_DETACH_PARMS, 13, detach_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, force_detach)
+)
+
+/*
+ * Notifications and commands (genlmsghdr->cmd)
+ */
+GENL_mc_group(events)
+
+ /* kernel -> userspace announcement of changes */
+GENL_notification(
+ DRBD_EVENT, 1, events,
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_STATE_INFO, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_DISK_CONF, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_SYNCER_CONF, DRBD_GENLA_F_MANDATORY)
+)
+
+ /* query kernel for specific or all info */
+GENL_op(
+ DRBD_ADM_GET_STATUS, 2,
+ GENL_op_init(
+ .doit = drbd_adm_get_status,
+ .dumpit = drbd_adm_get_status_all,
+ /* anyone may ask for the status,
+ * it is broadcasted anyways */
+ ),
+ /* To select the object .doit.
+ * Or a subset of objects in .dumpit. */
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
+)
+
+ /* add DRBD minor devices as volumes to resources */
+GENL_op(DRBD_ADM_NEW_MINOR, 5, GENL_doit(drbd_adm_add_minor),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_DEL_MINOR, 6, GENL_doit(drbd_adm_delete_minor),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+
+ /* add or delete resources */
+GENL_op(DRBD_ADM_NEW_RESOURCE, 7, GENL_doit(drbd_adm_new_resource),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_DEL_RESOURCE, 8, GENL_doit(drbd_adm_del_resource),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+
+GENL_op(DRBD_ADM_RESOURCE_OPTS, 9,
+ GENL_doit(drbd_adm_resource_opts),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_OPTS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(
+ DRBD_ADM_CONNECT, 10,
+ GENL_doit(drbd_adm_connect),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_CHG_NET_OPTS, 29,
+ GENL_doit(drbd_adm_net_opts),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_F_REQUIRED)
+)
+
+GENL_op(DRBD_ADM_DISCONNECT, 11, GENL_doit(drbd_adm_disconnect),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+
+GENL_op(DRBD_ADM_ATTACH, 12,
+ GENL_doit(drbd_adm_attach),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DISK_CONF, DRBD_F_REQUIRED)
+)
+
+GENL_op(DRBD_ADM_CHG_DISK_OPTS, 28,
+ GENL_doit(drbd_adm_disk_opts),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DISK_OPTS, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_RESIZE, 13,
+ GENL_doit(drbd_adm_resize),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_RESIZE_PARMS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(
+ DRBD_ADM_PRIMARY, 14,
+ GENL_doit(drbd_adm_set_role),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_SET_ROLE_PARMS, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_SECONDARY, 15,
+ GENL_doit(drbd_adm_set_role),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_SET_ROLE_PARMS, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_NEW_C_UUID, 16,
+ GENL_doit(drbd_adm_new_c_uuid),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NEW_C_UUID_PARMS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(
+ DRBD_ADM_START_OV, 17,
+ GENL_doit(drbd_adm_start_ov),
+ GENL_tla_expected(DRBD_NLA_START_OV_PARMS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(DRBD_ADM_DETACH, 18, GENL_doit(drbd_adm_detach),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DETACH_PARMS, DRBD_GENLA_F_MANDATORY))
+
+GENL_op(DRBD_ADM_INVALIDATE, 19, GENL_doit(drbd_adm_invalidate),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_INVAL_PEER, 20, GENL_doit(drbd_adm_invalidate_peer),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_PAUSE_SYNC, 21, GENL_doit(drbd_adm_pause_sync),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_RESUME_SYNC, 22, GENL_doit(drbd_adm_resume_sync),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_SUSPEND_IO, 23, GENL_doit(drbd_adm_suspend_io),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_RESUME_IO, 24, GENL_doit(drbd_adm_resume_io),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_OUTDATE, 25, GENL_doit(drbd_adm_outdate),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_GET_TIMEOUT_TYPE, 26, GENL_doit(drbd_adm_get_timeout_type),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_DOWN, 27, GENL_doit(drbd_adm_down),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h
new file mode 100644
index 00000000000..9ef50d51e34
--- /dev/null
+++ b/include/linux/drbd_genl_api.h
@@ -0,0 +1,55 @@
+#ifndef DRBD_GENL_STRUCT_H
+#define DRBD_GENL_STRUCT_H
+
+/**
+ * struct drbd_genlmsghdr - DRBD specific header used in NETLINK_GENERIC requests
+ * @minor:
+ * For admin requests (user -> kernel): which minor device to operate on.
+ * For (unicast) replies or informational (broadcast) messages
+ * (kernel -> user): which minor device the information is about.
+ * If we do not operate on minors, but on connections or resources,
+ * the minor value shall be (~0), and the attribute DRBD_NLA_CFG_CONTEXT
+ * is used instead.
+ * @flags: possible operation modifiers (relevant only for user->kernel):
+ * DRBD_GENL_F_SET_DEFAULTS
+ * @volume:
+ * When creating a new minor (adding it to a resource), the resource needs
+ * to know which volume number within the resource this is supposed to be.
+ * The volume number corresponds to the same volume number on the remote side,
+ * whereas the minor number on the remote side may be different
+ * (union with flags).
+ * @ret_code: kernel->userland unicast cfg reply return code (union with flags);
+ */
+struct drbd_genlmsghdr {
+ __u32 minor;
+ union {
+ __u32 flags;
+ __s32 ret_code;
+ };
+};
+
+/* To be used in drbd_genlmsghdr.flags */
+enum {
+ DRBD_GENL_F_SET_DEFAULTS = 1,
+};
+
+enum drbd_state_info_bcast_reason {
+ SIB_GET_STATUS_REPLY = 1,
+ SIB_STATE_CHANGE = 2,
+ SIB_HELPER_PRE = 3,
+ SIB_HELPER_POST = 4,
+ SIB_SYNC_PROGRESS = 5,
+};
+
+/* hack around predefined gcc/cpp "linux=1",
+ * we cannot possibly include <1/drbd_genl.h> */
+#undef linux
+
+#include <linux/drbd.h>
+#define GENL_MAGIC_VERSION API_VERSION
+#define GENL_MAGIC_FAMILY drbd
+#define GENL_MAGIC_FAMILY_HDRSZ sizeof(struct drbd_genlmsghdr)
+#define GENL_MAGIC_INCLUDE_FILE <linux/drbd_genl.h>
+#include <linux/genl_magic_struct.h>
+
+#endif
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index fb670bf603f..1fa19c5f5e6 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -16,29 +16,37 @@
#define DEBUG_RANGE_CHECK 0
#define DRBD_MINOR_COUNT_MIN 1
-#define DRBD_MINOR_COUNT_MAX 256
+#define DRBD_MINOR_COUNT_MAX 255
#define DRBD_MINOR_COUNT_DEF 32
+#define DRBD_MINOR_COUNT_SCALE '1'
+
+#define DRBD_VOLUME_MAX 65535
#define DRBD_DIALOG_REFRESH_MIN 0
#define DRBD_DIALOG_REFRESH_MAX 600
+#define DRBD_DIALOG_REFRESH_SCALE '1'
/* valid port number */
#define DRBD_PORT_MIN 1
#define DRBD_PORT_MAX 0xffff
+#define DRBD_PORT_SCALE '1'
/* startup { */
/* if you want more than 3.4 days, disable */
#define DRBD_WFC_TIMEOUT_MIN 0
#define DRBD_WFC_TIMEOUT_MAX 300000
#define DRBD_WFC_TIMEOUT_DEF 0
+#define DRBD_WFC_TIMEOUT_SCALE '1'
#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
+#define DRBD_DEGR_WFC_TIMEOUT_SCALE '1'
#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1'
/* }*/
/* net { */
@@ -47,75 +55,91 @@
#define DRBD_TIMEOUT_MIN 1
#define DRBD_TIMEOUT_MAX 600
#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+#define DRBD_TIMEOUT_SCALE '1'
/* If backing disk takes longer than disk_timeout, mark the disk as failed */
#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+#define DRBD_DISK_TIMEOUT_SCALE '1'
/* active connection retries when C_WF_CONNECTION */
#define DRBD_CONNECT_INT_MIN 1
#define DRBD_CONNECT_INT_MAX 120
#define DRBD_CONNECT_INT_DEF 10 /* seconds */
+#define DRBD_CONNECT_INT_SCALE '1'
/* keep-alive probes when idle */
#define DRBD_PING_INT_MIN 1
#define DRBD_PING_INT_MAX 120
#define DRBD_PING_INT_DEF 10
+#define DRBD_PING_INT_SCALE '1'
/* timeout for the ping packets.*/
#define DRBD_PING_TIMEO_MIN 1
#define DRBD_PING_TIMEO_MAX 300
#define DRBD_PING_TIMEO_DEF 5
+#define DRBD_PING_TIMEO_SCALE '1'
/* max number of write requests between write barriers */
#define DRBD_MAX_EPOCH_SIZE_MIN 1
#define DRBD_MAX_EPOCH_SIZE_MAX 20000
#define DRBD_MAX_EPOCH_SIZE_DEF 2048
+#define DRBD_MAX_EPOCH_SIZE_SCALE '1'
/* I don't think that a tcp send buffer of more than 10M is useful */
#define DRBD_SNDBUF_SIZE_MIN 0
#define DRBD_SNDBUF_SIZE_MAX (10<<20)
#define DRBD_SNDBUF_SIZE_DEF 0
+#define DRBD_SNDBUF_SIZE_SCALE '1'
#define DRBD_RCVBUF_SIZE_MIN 0
#define DRBD_RCVBUF_SIZE_MAX (10<<20)
#define DRBD_RCVBUF_SIZE_DEF 0
+#define DRBD_RCVBUF_SIZE_SCALE '1'
/* @4k PageSize -> 128kB - 512MB */
#define DRBD_MAX_BUFFERS_MIN 32
#define DRBD_MAX_BUFFERS_MAX 131072
#define DRBD_MAX_BUFFERS_DEF 2048
+#define DRBD_MAX_BUFFERS_SCALE '1'
/* @4k PageSize -> 4kB - 512MB */
#define DRBD_UNPLUG_WATERMARK_MIN 1
#define DRBD_UNPLUG_WATERMARK_MAX 131072
#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
+#define DRBD_UNPLUG_WATERMARK_SCALE '1'
/* 0 is disabled.
* 200 should be more than enough even for very short timeouts */
#define DRBD_KO_COUNT_MIN 0
#define DRBD_KO_COUNT_MAX 200
-#define DRBD_KO_COUNT_DEF 0
+#define DRBD_KO_COUNT_DEF 7
+#define DRBD_KO_COUNT_SCALE '1'
/* } */
/* syncer { */
/* FIXME allow rate to be zero? */
-#define DRBD_RATE_MIN 1
+#define DRBD_RESYNC_RATE_MIN 1
/* channel bonding 10 GbE, or other hardware */
-#define DRBD_RATE_MAX (4 << 20)
-#define DRBD_RATE_DEF 250 /* kb/second */
+#define DRBD_RESYNC_RATE_MAX (4 << 20)
+#define DRBD_RESYNC_RATE_DEF 250
+#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
/* less than 7 would hit performance unnecessarily.
- * 3833 is the largest prime that still does fit
- * into 64 sectors of activity log */
+ * 919 slots context information per transaction,
+ * 32k activity log, 4k transaction size,
+ * one transaction in flight:
+ * 919 * 7 = 6433 */
#define DRBD_AL_EXTENTS_MIN 7
-#define DRBD_AL_EXTENTS_MAX 3833
-#define DRBD_AL_EXTENTS_DEF 127
+#define DRBD_AL_EXTENTS_MAX 6433
+#define DRBD_AL_EXTENTS_DEF 1237
+#define DRBD_AL_EXTENTS_SCALE '1'
-#define DRBD_AFTER_MIN -1
-#define DRBD_AFTER_MAX 255
-#define DRBD_AFTER_DEF -1
+#define DRBD_MINOR_NUMBER_MIN -1
+#define DRBD_MINOR_NUMBER_MAX ((1 << 20) - 1)
+#define DRBD_MINOR_NUMBER_DEF -1
+#define DRBD_MINOR_NUMBER_SCALE '1'
/* } */
@@ -124,11 +148,12 @@
* the upper limit with 64bit kernel, enough ram and flexible meta data
* is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */
-#define DRBD_DISK_SIZE_SECT_MIN 0
-#define DRBD_DISK_SIZE_SECT_MAX (1 * (2LLU << 40))
-#define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */
+#define DRBD_DISK_SIZE_MIN 0
+#define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40))
+#define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */
+#define DRBD_DISK_SIZE_SCALE 's' /* sectors */
-#define DRBD_ON_IO_ERROR_DEF EP_PASS_ON
+#define DRBD_ON_IO_ERROR_DEF EP_DETACH
#define DRBD_FENCING_DEF FP_DONT_CARE
#define DRBD_AFTER_SB_0P_DEF ASB_DISCONNECT
#define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT
@@ -136,38 +161,59 @@
#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT
#define DRBD_ON_NO_DATA_DEF OND_IO_ERROR
#define DRBD_ON_CONGESTION_DEF OC_BLOCK
+#define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL
#define DRBD_MAX_BIO_BVECS_MIN 0
#define DRBD_MAX_BIO_BVECS_MAX 128
#define DRBD_MAX_BIO_BVECS_DEF 0
+#define DRBD_MAX_BIO_BVECS_SCALE '1'
#define DRBD_C_PLAN_AHEAD_MIN 0
#define DRBD_C_PLAN_AHEAD_MAX 300
-#define DRBD_C_PLAN_AHEAD_DEF 0 /* RS rate controller disabled by default */
+#define DRBD_C_PLAN_AHEAD_DEF 20
+#define DRBD_C_PLAN_AHEAD_SCALE '1'
#define DRBD_C_DELAY_TARGET_MIN 1
#define DRBD_C_DELAY_TARGET_MAX 100
#define DRBD_C_DELAY_TARGET_DEF 10
+#define DRBD_C_DELAY_TARGET_SCALE '1'
#define DRBD_C_FILL_TARGET_MIN 0
#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */
-#define DRBD_C_FILL_TARGET_DEF 0 /* By default disabled -> controlled by delay_target */
+#define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */
+#define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */
-#define DRBD_C_MAX_RATE_MIN 250 /* kByte/sec */
+#define DRBD_C_MAX_RATE_MIN 250
#define DRBD_C_MAX_RATE_MAX (4 << 20)
#define DRBD_C_MAX_RATE_DEF 102400
+#define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_C_MIN_RATE_MIN 0 /* kByte/sec */
+#define DRBD_C_MIN_RATE_MIN 0
#define DRBD_C_MIN_RATE_MAX (4 << 20)
-#define DRBD_C_MIN_RATE_DEF 4096
+#define DRBD_C_MIN_RATE_DEF 250
+#define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */
#define DRBD_CONG_FILL_MIN 0
#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */
#define DRBD_CONG_FILL_DEF 0
+#define DRBD_CONG_FILL_SCALE 's' /* sectors */
#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN
#define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX
#define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF
+#define DRBD_CONG_EXTENTS_SCALE DRBD_AL_EXTENTS_SCALE
+
+#define DRBD_PROTOCOL_DEF DRBD_PROT_C
+
+#define DRBD_DISK_BARRIER_DEF 0
+#define DRBD_DISK_FLUSHES_DEF 1
+#define DRBD_DISK_DRAIN_DEF 1
+#define DRBD_MD_FLUSHES_DEF 1
+#define DRBD_TCP_CORK_DEF 1
+#define DRBD_AL_UPDATES_DEF 1
+
+#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
+#define DRBD_ALWAYS_ASBP_DEF 0
+#define DRBD_USE_RLE_DEF 1
-#undef RANGE
#endif
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
deleted file mode 100644
index a8706f08ab3..00000000000
--- a/include/linux/drbd_nl.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- PAKET( name,
- TYPE ( pn, pr, member )
- ...
- )
-
- You may never reissue one of the pn arguments
-*/
-
-#if !defined(NL_PACKET) || !defined(NL_STRING) || !defined(NL_INTEGER) || !defined(NL_BIT) || !defined(NL_INT64)
-#error "The macros NL_PACKET, NL_STRING, NL_INTEGER, NL_INT64 and NL_BIT needs to be defined"
-#endif
-
-NL_PACKET(primary, 1,
- NL_BIT( 1, T_MAY_IGNORE, primary_force)
-)
-
-NL_PACKET(secondary, 2, )
-
-NL_PACKET(disk_conf, 3,
- NL_INT64( 2, T_MAY_IGNORE, disk_size)
- NL_STRING( 3, T_MANDATORY, backing_dev, 128)
- NL_STRING( 4, T_MANDATORY, meta_dev, 128)
- NL_INTEGER( 5, T_MANDATORY, meta_dev_idx)
- NL_INTEGER( 6, T_MAY_IGNORE, on_io_error)
- NL_INTEGER( 7, T_MAY_IGNORE, fencing)
- NL_BIT( 37, T_MAY_IGNORE, use_bmbv)
- NL_BIT( 53, T_MAY_IGNORE, no_disk_flush)
- NL_BIT( 54, T_MAY_IGNORE, no_md_flush)
- /* 55 max_bio_size was available in 8.2.6rc2 */
- NL_INTEGER( 56, T_MAY_IGNORE, max_bio_bvecs)
- NL_BIT( 57, T_MAY_IGNORE, no_disk_barrier)
- NL_BIT( 58, T_MAY_IGNORE, no_disk_drain)
- NL_INTEGER( 89, T_MAY_IGNORE, disk_timeout)
-)
-
-NL_PACKET(detach, 4,
- NL_BIT( 88, T_MANDATORY, detach_force)
-)
-
-NL_PACKET(net_conf, 5,
- NL_STRING( 8, T_MANDATORY, my_addr, 128)
- NL_STRING( 9, T_MANDATORY, peer_addr, 128)
- NL_STRING( 10, T_MAY_IGNORE, shared_secret, SHARED_SECRET_MAX)
- NL_STRING( 11, T_MAY_IGNORE, cram_hmac_alg, SHARED_SECRET_MAX)
- NL_STRING( 44, T_MAY_IGNORE, integrity_alg, SHARED_SECRET_MAX)
- NL_INTEGER( 14, T_MAY_IGNORE, timeout)
- NL_INTEGER( 15, T_MANDATORY, wire_protocol)
- NL_INTEGER( 16, T_MAY_IGNORE, try_connect_int)
- NL_INTEGER( 17, T_MAY_IGNORE, ping_int)
- NL_INTEGER( 18, T_MAY_IGNORE, max_epoch_size)
- NL_INTEGER( 19, T_MAY_IGNORE, max_buffers)
- NL_INTEGER( 20, T_MAY_IGNORE, unplug_watermark)
- NL_INTEGER( 21, T_MAY_IGNORE, sndbuf_size)
- NL_INTEGER( 22, T_MAY_IGNORE, ko_count)
- NL_INTEGER( 24, T_MAY_IGNORE, after_sb_0p)
- NL_INTEGER( 25, T_MAY_IGNORE, after_sb_1p)
- NL_INTEGER( 26, T_MAY_IGNORE, after_sb_2p)
- NL_INTEGER( 39, T_MAY_IGNORE, rr_conflict)
- NL_INTEGER( 40, T_MAY_IGNORE, ping_timeo)
- NL_INTEGER( 67, T_MAY_IGNORE, rcvbuf_size)
- NL_INTEGER( 81, T_MAY_IGNORE, on_congestion)
- NL_INTEGER( 82, T_MAY_IGNORE, cong_fill)
- NL_INTEGER( 83, T_MAY_IGNORE, cong_extents)
- /* 59 addr_family was available in GIT, never released */
- NL_BIT( 60, T_MANDATORY, mind_af)
- NL_BIT( 27, T_MAY_IGNORE, want_lose)
- NL_BIT( 28, T_MAY_IGNORE, two_primaries)
- NL_BIT( 41, T_MAY_IGNORE, always_asbp)
- NL_BIT( 61, T_MAY_IGNORE, no_cork)
- NL_BIT( 62, T_MANDATORY, auto_sndbuf_size)
- NL_BIT( 70, T_MANDATORY, dry_run)
-)
-
-NL_PACKET(disconnect, 6,
- NL_BIT( 84, T_MAY_IGNORE, force)
-)
-
-NL_PACKET(resize, 7,
- NL_INT64( 29, T_MAY_IGNORE, resize_size)
- NL_BIT( 68, T_MAY_IGNORE, resize_force)
- NL_BIT( 69, T_MANDATORY, no_resync)
-)
-
-NL_PACKET(syncer_conf, 8,
- NL_INTEGER( 30, T_MAY_IGNORE, rate)
- NL_INTEGER( 31, T_MAY_IGNORE, after)
- NL_INTEGER( 32, T_MAY_IGNORE, al_extents)
-/* NL_INTEGER( 71, T_MAY_IGNORE, dp_volume)
- * NL_INTEGER( 72, T_MAY_IGNORE, dp_interval)
- * NL_INTEGER( 73, T_MAY_IGNORE, throttle_th)
- * NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th)
- * feature will be reimplemented differently with 8.3.9 */
- NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX)
- NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32)
- NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX)
- NL_BIT( 65, T_MAY_IGNORE, use_rle)
- NL_INTEGER( 75, T_MAY_IGNORE, on_no_data)
- NL_INTEGER( 76, T_MAY_IGNORE, c_plan_ahead)
- NL_INTEGER( 77, T_MAY_IGNORE, c_delay_target)
- NL_INTEGER( 78, T_MAY_IGNORE, c_fill_target)
- NL_INTEGER( 79, T_MAY_IGNORE, c_max_rate)
- NL_INTEGER( 80, T_MAY_IGNORE, c_min_rate)
-)
-
-NL_PACKET(invalidate, 9, )
-NL_PACKET(invalidate_peer, 10, )
-NL_PACKET(pause_sync, 11, )
-NL_PACKET(resume_sync, 12, )
-NL_PACKET(suspend_io, 13, )
-NL_PACKET(resume_io, 14, )
-NL_PACKET(outdate, 15, )
-NL_PACKET(get_config, 16, )
-NL_PACKET(get_state, 17,
- NL_INTEGER( 33, T_MAY_IGNORE, state_i)
-)
-
-NL_PACKET(get_uuids, 18,
- NL_STRING( 34, T_MAY_IGNORE, uuids, (UI_SIZE*sizeof(__u64)))
- NL_INTEGER( 35, T_MAY_IGNORE, uuids_flags)
-)
-
-NL_PACKET(get_timeout_flag, 19,
- NL_BIT( 36, T_MAY_IGNORE, use_degraded)
-)
-
-NL_PACKET(call_helper, 20,
- NL_STRING( 38, T_MAY_IGNORE, helper, 32)
-)
-
-/* Tag nr 42 already allocated in drbd-8.1 development. */
-
-NL_PACKET(sync_progress, 23,
- NL_INTEGER( 43, T_MAY_IGNORE, sync_progress)
-)
-
-NL_PACKET(dump_ee, 24,
- NL_STRING( 45, T_MAY_IGNORE, dump_ee_reason, 32)
- NL_STRING( 46, T_MAY_IGNORE, seen_digest, SHARED_SECRET_MAX)
- NL_STRING( 47, T_MAY_IGNORE, calc_digest, SHARED_SECRET_MAX)
- NL_INT64( 48, T_MAY_IGNORE, ee_sector)
- NL_INT64( 49, T_MAY_IGNORE, ee_block_id)
- NL_STRING( 50, T_MAY_IGNORE, ee_data, 32 << 10)
-)
-
-NL_PACKET(start_ov, 25,
- NL_INT64( 66, T_MAY_IGNORE, start_sector)
-)
-
-NL_PACKET(new_c_uuid, 26,
- NL_BIT( 63, T_MANDATORY, clear_bm)
-)
-
-#ifdef NL_RESPONSE
-NL_RESPONSE(return_code_only, 27)
-#endif
-
-#undef NL_PACKET
-#undef NL_INTEGER
-#undef NL_INT64
-#undef NL_BIT
-#undef NL_STRING
-#undef NL_RESPONSE
diff --git a/include/linux/drbd_tag_magic.h b/include/linux/drbd_tag_magic.h
deleted file mode 100644
index 82de1f9e48b..00000000000
--- a/include/linux/drbd_tag_magic.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef DRBD_TAG_MAGIC_H
-#define DRBD_TAG_MAGIC_H
-
-#define TT_END 0
-#define TT_REMOVED 0xE000
-
-/* declare packet_type enums */
-enum packet_types {
-#define NL_PACKET(name, number, fields) P_ ## name = number,
-#define NL_RESPONSE(name, number) P_ ## name = number,
-#define NL_INTEGER(pn, pr, member)
-#define NL_INT64(pn, pr, member)
-#define NL_BIT(pn, pr, member)
-#define NL_STRING(pn, pr, member, len)
-#include <linux/drbd_nl.h>
- P_nl_after_last_packet,
-};
-
-/* These struct are used to deduce the size of the tag lists: */
-#define NL_PACKET(name, number, fields) \
- struct name ## _tag_len_struct { fields };
-#define NL_INTEGER(pn, pr, member) \
- int member; int tag_and_len ## member;
-#define NL_INT64(pn, pr, member) \
- __u64 member; int tag_and_len ## member;
-#define NL_BIT(pn, pr, member) \
- unsigned char member:1; int tag_and_len ## member;
-#define NL_STRING(pn, pr, member, len) \
- unsigned char member[len]; int member ## _len; \
- int tag_and_len ## member;
-#include <linux/drbd_nl.h>
-
-/* declare tag-list-sizes */
-static const int tag_list_sizes[] = {
-#define NL_PACKET(name, number, fields) 2 fields ,
-#define NL_INTEGER(pn, pr, member) + 4 + 4
-#define NL_INT64(pn, pr, member) + 4 + 8
-#define NL_BIT(pn, pr, member) + 4 + 1
-#define NL_STRING(pn, pr, member, len) + 4 + (len)
-#include <linux/drbd_nl.h>
-};
-
-/* The two highest bits are used for the tag type */
-#define TT_MASK 0xC000
-#define TT_INTEGER 0x0000
-#define TT_INT64 0x4000
-#define TT_BIT 0x8000
-#define TT_STRING 0xC000
-/* The next bit indicates if processing of the tag is mandatory */
-#define T_MANDATORY 0x2000
-#define T_MAY_IGNORE 0x0000
-#define TN_MASK 0x1fff
-/* The remaining 13 bits are used to enumerate the tags */
-
-#define tag_type(T) ((T) & TT_MASK)
-#define tag_number(T) ((T) & TN_MASK)
-
-/* declare tag enums */
-#define NL_PACKET(name, number, fields) fields
-enum drbd_tags {
-#define NL_INTEGER(pn, pr, member) T_ ## member = pn | TT_INTEGER | pr ,
-#define NL_INT64(pn, pr, member) T_ ## member = pn | TT_INT64 | pr ,
-#define NL_BIT(pn, pr, member) T_ ## member = pn | TT_BIT | pr ,
-#define NL_STRING(pn, pr, member, len) T_ ## member = pn | TT_STRING | pr ,
-#include <linux/drbd_nl.h>
-};
-
-struct tag {
- const char *name;
- int type_n_flags;
- int max_len;
-};
-
-/* declare tag names */
-#define NL_PACKET(name, number, fields) fields
-static const struct tag tag_descriptions[] = {
-#define NL_INTEGER(pn, pr, member) [ pn ] = { #member, TT_INTEGER | pr, sizeof(int) },
-#define NL_INT64(pn, pr, member) [ pn ] = { #member, TT_INT64 | pr, sizeof(__u64) },
-#define NL_BIT(pn, pr, member) [ pn ] = { #member, TT_BIT | pr, sizeof(int) },
-#define NL_STRING(pn, pr, member, len) [ pn ] = { #member, TT_STRING | pr, (len) },
-#include <linux/drbd_nl.h>
-};
-
-#endif
diff --git a/include/linux/dvb/Kbuild b/include/linux/dvb/Kbuild
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/include/linux/dvb/Kbuild
+++ /dev/null
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
deleted file mode 100644
index 0be6d8f2b52..00000000000
--- a/include/linux/dvb/dmx.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * dmx.h
- *
- * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
- * & Ralph Metzler <ralph@convergence.de>
- * for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#ifndef _DVBDMX_H_
-#define _DVBDMX_H_
-
-#include <linux/time.h>
-#include <uapi/linux/dvb/dmx.h>
-
-#endif /*_DVBDMX_H_*/
diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h
deleted file mode 100644
index 85c20d92569..00000000000
--- a/include/linux/dvb/video.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * video.h
- *
- * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de>
- * & Ralph Metzler <ralph@convergence.de>
- * for convergence integrated media GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * as published by the Free Software Foundation; either version 2.1
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#ifndef _DVBVIDEO_H_
-#define _DVBVIDEO_H_
-
-#include <linux/compiler.h>
-#include <uapi/linux/dvb/video.h>
-
-#endif /*_DVBVIDEO_H_*/
diff --git a/include/linux/earlycpio.h b/include/linux/earlycpio.h
new file mode 100644
index 00000000000..111f46d83d0
--- /dev/null
+++ b/include/linux/earlycpio.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_EARLYCPIO_H
+#define _LINUX_EARLYCPIO_H
+
+#include <linux/types.h>
+
+#define MAX_CPIO_FILE_NAME 18
+
+struct cpio_data {
+ void *data;
+ size_t size;
+ char name[MAX_CPIO_FILE_NAME];
+};
+
+struct cpio_data find_cpio_data(const char *path, void *data, size_t len,
+ long *offset);
+
+#endif /* _LINUX_EARLYCPIO_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index c47ec36f3f3..8b84916dc67 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -29,7 +29,12 @@
#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
typedef unsigned long efi_status_t;
typedef u8 efi_bool_t;
@@ -196,6 +201,77 @@ typedef struct {
void *create_event_ex;
} efi_boot_services_t;
+typedef enum {
+ EfiPciIoWidthUint8,
+ EfiPciIoWidthUint16,
+ EfiPciIoWidthUint32,
+ EfiPciIoWidthUint64,
+ EfiPciIoWidthFifoUint8,
+ EfiPciIoWidthFifoUint16,
+ EfiPciIoWidthFifoUint32,
+ EfiPciIoWidthFifoUint64,
+ EfiPciIoWidthFillUint8,
+ EfiPciIoWidthFillUint16,
+ EfiPciIoWidthFillUint32,
+ EfiPciIoWidthFillUint64,
+ EfiPciIoWidthMaximum
+} EFI_PCI_IO_PROTOCOL_WIDTH;
+
+typedef enum {
+ EfiPciIoAttributeOperationGet,
+ EfiPciIoAttributeOperationSet,
+ EfiPciIoAttributeOperationEnable,
+ EfiPciIoAttributeOperationDisable,
+ EfiPciIoAttributeOperationSupported,
+ EfiPciIoAttributeOperationMaximum
+} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
+
+
+typedef struct {
+ void *read;
+ void *write;
+} efi_pci_io_protocol_access_t;
+
+typedef struct {
+ void *poll_mem;
+ void *poll_io;
+ efi_pci_io_protocol_access_t mem;
+ efi_pci_io_protocol_access_t io;
+ efi_pci_io_protocol_access_t pci;
+ void *copy_mem;
+ void *map;
+ void *unmap;
+ void *allocate_buffer;
+ void *free_buffer;
+ void *flush;
+ void *get_location;
+ void *attributes;
+ void *get_bar_attributes;
+ void *set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol;
+
+#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
+#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
+#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
+#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
+#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
+#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
+#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -658,6 +734,7 @@ struct efivars {
spinlock_t lock;
struct list_head list;
struct kset *kset;
+ struct kobject *kobject;
struct bin_attribute *new_var, *del_var;
const struct efivar_operations *ops;
struct efivar_entry *walk_entry;
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 12291a7ee27..5b9b5b31718 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -83,6 +83,11 @@ enum fid_type {
* 64 bit parent inode number.
*/
FILEID_NILFS_WITH_PARENT = 0x62,
+
+ /*
+ * Filesystems must not use 0xff file ID.
+ */
+ FILEID_INVALID = 0xff,
};
struct fid {
@@ -177,6 +182,8 @@ struct export_operations {
int (*commit_metadata)(struct inode *inode);
};
+extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
+ int *max_len, struct inode *parent);
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
int *max_len, int connectable);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 2c26c14cd71..fcb51c88319 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -23,7 +23,9 @@
#ifndef __LINUX_EXTCON_H__
#define __LINUX_EXTCON_H__
+#include <linux/device.h>
#include <linux/notifier.h>
+#include <linux/sysfs.h>
#define SUPPORTED_CABLE_MAX 32
#define CABLE_NAME_MAX 30
@@ -74,12 +76,12 @@ struct extcon_cable;
/**
* struct extcon_dev - An extcon device represents one external connector.
- * @name The name of this extcon device. Parent device name is used
+ * @name: The name of this extcon device. Parent device name is used
* if NULL.
- * @supported_cable Array of supported cable names ending with NULL.
+ * @supported_cable: Array of supported cable names ending with NULL.
* If supported_cable is NULL, cable name related APIs
* are disabled.
- * @mutually_exclusive Array of mutually exclusive set of cables that cannot
+ * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
* be attached simultaneously. The array should be
* ending with NULL or be NULL (no mutually exclusive
* cables). For example, if it is { 0x7, 0x30, 0}, then,
@@ -87,21 +89,21 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
- * @print_name An optional callback to override the method to print the
+ * @print_name: An optional callback to override the method to print the
* name of the extcon device.
- * @print_state An optional callback to override the method to print the
+ * @print_state: An optional callback to override the method to print the
* status of the extcon device.
- * @dev Device of this extcon. Do not provide at register-time.
- * @state Attach/detach state of this extcon. Do not provide at
+ * @dev: Device of this extcon. Do not provide at register-time.
+ * @state: Attach/detach state of this extcon. Do not provide at
* register-time
- * @nh Notifier for the state change events from this extcon
- * @entry To support list of extcon devices so that users can search
+ * @nh: Notifier for the state change events from this extcon
+ * @entry: To support list of extcon devices so that users can search
* for extcon devices based on the extcon name.
- * @lock
- * @max_supported Internal value to store the number of cables.
- * @extcon_dev_type Device_type struct to provide attribute_groups
+ * @lock:
+ * @max_supported: Internal value to store the number of cables.
+ * @extcon_dev_type: Device_type struct to provide attribute_groups
* customized for each extcon device.
- * @cables Sysfs subdirectories. Each represents one cable.
+ * @cables: Sysfs subdirectories. Each represents one cable.
*
* In most cases, users only need to provide "User initializing data" of
* this struct when registering an extcon. In some exceptional cases,
@@ -137,12 +139,12 @@ struct extcon_dev {
/**
* struct extcon_cable - An internal data for each cable of extcon device.
- * @edev The extcon device
- * @cable_index Index of this cable in the edev
- * @attr_g Attribute group for the cable
- * @attr_name "name" sysfs entry
- * @attr_state "state" sysfs entry
- * @attrs Array pointing to attr_name and attr_state for attr_g
+ * @edev: The extcon device
+ * @cable_index: Index of this cable in the edev
+ * @attr_g: Attribute group for the cable
+ * @attr_name: "name" sysfs entry
+ * @attr_state: "state" sysfs entry
+ * @attrs: Array pointing to attr_name and attr_state for attr_g
*/
struct extcon_cable {
struct extcon_dev *edev;
@@ -158,11 +160,11 @@ struct extcon_cable {
/**
* struct extcon_specific_cable_nb - An internal data for
* extcon_register_interest().
- * @internal_nb a notifier block bridging extcon notifier and cable notifier.
- * @user_nb user provided notifier block for events from a specific cable.
- * @cable_index the target cable.
- * @edev the target extcon device.
- * @previous_value the saved previous event value.
+ * @internal_nb: a notifier block bridging extcon notifier and cable notifier.
+ * @user_nb: user provided notifier block for events from a specific cable.
+ * @cable_index: the target cable.
+ * @edev: the target extcon device.
+ * @previous_value: the saved previous event value.
*/
struct extcon_specific_cable_nb {
struct notifier_block internal_nb;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
new file mode 100644
index 00000000000..f9a12f6243a
--- /dev/null
+++ b/include/linux/f2fs_fs.h
@@ -0,0 +1,413 @@
+/**
+ * include/linux/f2fs_fs.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_F2FS_FS_H
+#define _LINUX_F2FS_FS_H
+
+#include <linux/pagemap.h>
+#include <linux/types.h>
+
+#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
+#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */
+#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */
+#define F2FS_BLKSIZE 4096 /* support only 4KB block */
+#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
+
+#define NULL_ADDR 0x0U
+#define NEW_ADDR -1U
+
+#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
+#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
+#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
+
+/* This flag is used by node and meta inodes, and by recovery */
+#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
+
+/*
+ * For further optimization on multi-head logs, on-disk layout supports maximum
+ * 16 logs by default. The number, 16, is expected to cover all the cases
+ * enoughly. The implementaion currently uses no more than 6 logs.
+ * Half the logs are used for nodes, and the other half are used for data.
+ */
+#define MAX_ACTIVE_LOGS 16
+#define MAX_ACTIVE_NODE_LOGS 8
+#define MAX_ACTIVE_DATA_LOGS 8
+
+/*
+ * For superblock
+ */
+struct f2fs_super_block {
+ __le32 magic; /* Magic Number */
+ __le16 major_ver; /* Major Version */
+ __le16 minor_ver; /* Minor Version */
+ __le32 log_sectorsize; /* log2 sector size in bytes */
+ __le32 log_sectors_per_block; /* log2 # of sectors per block */
+ __le32 log_blocksize; /* log2 block size in bytes */
+ __le32 log_blocks_per_seg; /* log2 # of blocks per segment */
+ __le32 segs_per_sec; /* # of segments per section */
+ __le32 secs_per_zone; /* # of sections per zone */
+ __le32 checksum_offset; /* checksum offset inside super block */
+ __le64 block_count; /* total # of user blocks */
+ __le32 section_count; /* total # of sections */
+ __le32 segment_count; /* total # of segments */
+ __le32 segment_count_ckpt; /* # of segments for checkpoint */
+ __le32 segment_count_sit; /* # of segments for SIT */
+ __le32 segment_count_nat; /* # of segments for NAT */
+ __le32 segment_count_ssa; /* # of segments for SSA */
+ __le32 segment_count_main; /* # of segments for main area */
+ __le32 segment0_blkaddr; /* start block address of segment 0 */
+ __le32 cp_blkaddr; /* start block address of checkpoint */
+ __le32 sit_blkaddr; /* start block address of SIT */
+ __le32 nat_blkaddr; /* start block address of NAT */
+ __le32 ssa_blkaddr; /* start block address of SSA */
+ __le32 main_blkaddr; /* start block address of main area */
+ __le32 root_ino; /* root inode number */
+ __le32 node_ino; /* node inode number */
+ __le32 meta_ino; /* meta inode number */
+ __u8 uuid[16]; /* 128-bit uuid for volume */
+ __le16 volume_name[512]; /* volume name */
+ __le32 extension_count; /* # of extensions below */
+ __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
+} __packed;
+
+/*
+ * For checkpoint
+ */
+#define CP_ERROR_FLAG 0x00000008
+#define CP_COMPACT_SUM_FLAG 0x00000004
+#define CP_ORPHAN_PRESENT_FLAG 0x00000002
+#define CP_UMOUNT_FLAG 0x00000001
+
+struct f2fs_checkpoint {
+ __le64 checkpoint_ver; /* checkpoint block version number */
+ __le64 user_block_count; /* # of user blocks */
+ __le64 valid_block_count; /* # of valid blocks in main area */
+ __le32 rsvd_segment_count; /* # of reserved segments for gc */
+ __le32 overprov_segment_count; /* # of overprovision segments */
+ __le32 free_segment_count; /* # of free segments in main area */
+
+ /* information of current node segments */
+ __le32 cur_node_segno[MAX_ACTIVE_NODE_LOGS];
+ __le16 cur_node_blkoff[MAX_ACTIVE_NODE_LOGS];
+ /* information of current data segments */
+ __le32 cur_data_segno[MAX_ACTIVE_DATA_LOGS];
+ __le16 cur_data_blkoff[MAX_ACTIVE_DATA_LOGS];
+ __le32 ckpt_flags; /* Flags : umount and journal_present */
+ __le32 cp_pack_total_block_count; /* total # of one cp pack */
+ __le32 cp_pack_start_sum; /* start block number of data summary */
+ __le32 valid_node_count; /* Total number of valid nodes */
+ __le32 valid_inode_count; /* Total number of valid inodes */
+ __le32 next_free_nid; /* Next free node number */
+ __le32 sit_ver_bitmap_bytesize; /* Default value 64 */
+ __le32 nat_ver_bitmap_bytesize; /* Default value 256 */
+ __le32 checksum_offset; /* checksum offset inside cp block */
+ __le64 elapsed_time; /* mounted time */
+ /* allocation type of current segment */
+ unsigned char alloc_type[MAX_ACTIVE_LOGS];
+
+ /* SIT and NAT version bitmap */
+ unsigned char sit_nat_version_bitmap[1];
+} __packed;
+
+/*
+ * For orphan inode management
+ */
+#define F2FS_ORPHANS_PER_BLOCK 1020
+
+struct f2fs_orphan_block {
+ __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
+ __le32 reserved; /* reserved */
+ __le16 blk_addr; /* block index in current CP */
+ __le16 blk_count; /* Number of orphan inode blocks in CP */
+ __le32 entry_count; /* Total number of orphan nodes in current CP */
+ __le32 check_sum; /* CRC32 for orphan inode block */
+} __packed;
+
+/*
+ * For NODE structure
+ */
+struct f2fs_extent {
+ __le32 fofs; /* start file offset of the extent */
+ __le32 blk_addr; /* start block address of the extent */
+ __le32 len; /* lengh of the extent */
+} __packed;
+
+#define F2FS_MAX_NAME_LEN 256
+#define ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+
+struct f2fs_inode {
+ __le16 i_mode; /* file mode */
+ __u8 i_advise; /* file hints */
+ __u8 i_reserved; /* reserved */
+ __le32 i_uid; /* user ID */
+ __le32 i_gid; /* group ID */
+ __le32 i_links; /* links count */
+ __le64 i_size; /* file size in bytes */
+ __le64 i_blocks; /* file size in blocks */
+ __le64 i_atime; /* access time */
+ __le64 i_ctime; /* change time */
+ __le64 i_mtime; /* modification time */
+ __le32 i_atime_nsec; /* access time in nano scale */
+ __le32 i_ctime_nsec; /* change time in nano scale */
+ __le32 i_mtime_nsec; /* modification time in nano scale */
+ __le32 i_generation; /* file version (for NFS) */
+ __le32 i_current_depth; /* only for directory depth */
+ __le32 i_xattr_nid; /* nid to save xattr */
+ __le32 i_flags; /* file attributes */
+ __le32 i_pino; /* parent inode number */
+ __le32 i_namelen; /* file name length */
+ __u8 i_name[F2FS_MAX_NAME_LEN]; /* file name for SPOR */
+
+ struct f2fs_extent i_ext; /* caching a largest extent */
+
+ __le32 i_addr[ADDRS_PER_INODE]; /* Pointers to data blocks */
+
+ __le32 i_nid[5]; /* direct(2), indirect(2),
+ double_indirect(1) node id */
+} __packed;
+
+struct direct_node {
+ __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */
+} __packed;
+
+struct indirect_node {
+ __le32 nid[NIDS_PER_BLOCK]; /* array of data block address */
+} __packed;
+
+enum {
+ COLD_BIT_SHIFT = 0,
+ FSYNC_BIT_SHIFT,
+ DENT_BIT_SHIFT,
+ OFFSET_BIT_SHIFT
+};
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode nunmber */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+struct f2fs_node {
+ /* can be one of three types: inode, direct, and indirect types */
+ union {
+ struct f2fs_inode i;
+ struct direct_node dn;
+ struct indirect_node in;
+ };
+ struct node_footer footer;
+} __packed;
+
+/*
+ * For NAT entries
+ */
+#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+
+struct f2fs_nat_entry {
+ __u8 version; /* latest version of cached nat entry */
+ __le32 ino; /* inode number */
+ __le32 block_addr; /* block address */
+} __packed;
+
+struct f2fs_nat_block {
+ struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK];
+} __packed;
+
+/*
+ * For SIT entries
+ *
+ * Each segment is 2MB in size by default so that a bitmap for validity of
+ * there-in blocks should occupy 64 bytes, 512 bits.
+ * Not allow to change this.
+ */
+#define SIT_VBLOCK_MAP_SIZE 64
+#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+
+/*
+ * Note that f2fs_sit_entry->vblocks has the following bit-field information.
+ * [15:10] : allocation type such as CURSEG_XXXX_TYPE
+ * [9:0] : valid block count
+ */
+#define SIT_VBLOCKS_SHIFT 10
+#define SIT_VBLOCKS_MASK ((1 << SIT_VBLOCKS_SHIFT) - 1)
+#define GET_SIT_VBLOCKS(raw_sit) \
+ (le16_to_cpu((raw_sit)->vblocks) & SIT_VBLOCKS_MASK)
+#define GET_SIT_TYPE(raw_sit) \
+ ((le16_to_cpu((raw_sit)->vblocks) & ~SIT_VBLOCKS_MASK) \
+ >> SIT_VBLOCKS_SHIFT)
+
+struct f2fs_sit_entry {
+ __le16 vblocks; /* reference above */
+ __u8 valid_map[SIT_VBLOCK_MAP_SIZE]; /* bitmap for valid blocks */
+ __le64 mtime; /* segment age for cleaning */
+} __packed;
+
+struct f2fs_sit_block {
+ struct f2fs_sit_entry entries[SIT_ENTRY_PER_BLOCK];
+} __packed;
+
+/*
+ * For segment summary
+ *
+ * One summary block contains exactly 512 summary entries, which represents
+ * exactly 2MB segment by default. Not allow to change the basic units.
+ *
+ * NOTE: For initializing fields, you must use set_summary
+ *
+ * - If data page, nid represents dnode's nid
+ * - If node page, nid represents the node page's nid.
+ *
+ * The ofs_in_node is used by only data page. It represents offset
+ * from node's page's beginning to get a data block address.
+ * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
+ */
+#define ENTRIES_IN_SUM 512
+#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
+#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
+
+/* a summary entry for a 4KB-sized block in a segment */
+struct f2fs_summary {
+ __le32 nid; /* parent node id */
+ union {
+ __u8 reserved[3];
+ struct {
+ __u8 version; /* node version number */
+ __le16 ofs_in_node; /* block index in parent node */
+ } __packed;
+ };
+} __packed;
+
+/* summary block type, node or data, is stored to the summary_footer */
+#define SUM_TYPE_NODE (1)
+#define SUM_TYPE_DATA (0)
+
+struct summary_footer {
+ unsigned char entry_type; /* SUM_TYPE_XXX */
+ __u32 check_sum; /* summary checksum */
+} __packed;
+
+#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
+ SUM_ENTRY_SIZE)
+#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
+ sizeof(struct nat_journal_entry))
+#define NAT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
+ sizeof(struct nat_journal_entry))
+#define SIT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
+ sizeof(struct sit_journal_entry))
+#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
+ sizeof(struct sit_journal_entry))
+/*
+ * frequently updated NAT/SIT entries can be stored in the spare area in
+ * summary blocks
+ */
+enum {
+ NAT_JOURNAL = 0,
+ SIT_JOURNAL
+};
+
+struct nat_journal_entry {
+ __le32 nid;
+ struct f2fs_nat_entry ne;
+} __packed;
+
+struct nat_journal {
+ struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES];
+ __u8 reserved[NAT_JOURNAL_RESERVED];
+} __packed;
+
+struct sit_journal_entry {
+ __le32 segno;
+ struct f2fs_sit_entry se;
+} __packed;
+
+struct sit_journal {
+ struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES];
+ __u8 reserved[SIT_JOURNAL_RESERVED];
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+ struct f2fs_summary entries[ENTRIES_IN_SUM];
+ union {
+ __le16 n_nats;
+ __le16 n_sits;
+ };
+ /* spare area is used by NAT or SIT journals */
+ union {
+ struct nat_journal nat_j;
+ struct sit_journal sit_j;
+ };
+ struct summary_footer footer;
+} __packed;
+
+/*
+ * For directory operations
+ */
+#define F2FS_DOT_HASH 0
+#define F2FS_DDOT_HASH F2FS_DOT_HASH
+#define F2FS_MAX_HASH (~((0x3ULL) << 62))
+#define F2FS_HASH_COL_BIT ((0x1ULL) << 63)
+
+typedef __le32 f2fs_hash_t;
+
+/* One directory entry slot covers 8bytes-long file name */
+#define F2FS_NAME_LEN 8
+#define F2FS_NAME_LEN_BITS 3
+
+#define GET_DENTRY_SLOTS(x) ((x + F2FS_NAME_LEN - 1) >> F2FS_NAME_LEN_BITS)
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK 214
+
+/* MAX level for dir lookup */
+#define MAX_DIR_HASH_DEPTH 63
+
+#define SIZE_OF_DIR_ENTRY 11 /* by byte */
+#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
+ BITS_PER_BYTE)
+#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+ F2FS_NAME_LEN) * \
+ NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
+
+/* One directory entry slot representing F2FS_NAME_LEN-sized file name */
+struct f2fs_dir_entry {
+ __le32 hash_code; /* hash code of file name */
+ __le32 ino; /* inode number */
+ __le16 name_len; /* lengh of file name */
+ __u8 file_type; /* file type */
+} __packed;
+
+/* 4KB-sized directory entry block */
+struct f2fs_dentry_block {
+ /* validity bitmap for directory entries in each block */
+ __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
+ __u8 reserved[SIZE_OF_RESERVED];
+ struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK];
+ __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_NAME_LEN];
+} __packed;
+
+/* file types used in inode_info->flags */
+enum {
+ F2FS_FT_UNKNOWN,
+ F2FS_FT_REG_FILE,
+ F2FS_FT_DIR,
+ F2FS_FT_CHRDEV,
+ F2FS_FT_BLKDEV,
+ F2FS_FT_FIFO,
+ F2FS_FT_SOCK,
+ F2FS_FT_SYMLINK,
+ F2FS_FT_MAX
+};
+
+#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 408fb1e77a0..7617ee04f06 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -44,6 +44,7 @@ struct vm_area_struct;
struct vfsmount;
struct cred;
struct swap_info_struct;
+struct seq_file;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -1444,10 +1445,6 @@ static inline void sb_start_intwrite(struct super_block *sb)
extern bool inode_owner_or_capable(const struct inode *inode);
-/* not quite ready to be deprecated, but... */
-extern void lock_super(struct super_block *);
-extern void unlock_super(struct super_block *);
-
/*
* VFS helper functions..
*/
@@ -1543,6 +1540,7 @@ struct file_operations {
int (*setlease)(struct file *, long, struct file_lock **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
+ int (*show_fdinfo)(struct seq_file *m, struct file *f);
};
struct inode_operations {
@@ -1563,7 +1561,6 @@ struct inode_operations {
int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
- void (*truncate) (struct inode *);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1578,8 +1575,6 @@ struct inode_operations {
umode_t create_mode, int *opened);
} ____cacheline_aligned;
-struct seq_file;
-
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_pointer,
@@ -1810,6 +1805,8 @@ struct file_system_type {
#define FS_REQUIRES_DEV 1
#define FS_BINARY_MOUNTDATA 2
#define FS_HAS_SUBTYPE 4
+#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
+#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
struct dentry *(*mount) (struct file_system_type *, int,
@@ -1997,6 +1994,7 @@ struct filename {
bool separate; /* should "name" be freed? */
};
+extern long vfs_truncate(struct path *, loff_t);
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
extern int do_fallocate(struct file *file, int mode, loff_t offset,
@@ -2286,9 +2284,9 @@ extern ino_t find_inode_number(struct dentry *, struct qstr *);
#include <linux/err.h>
/* needed for stackable file system support */
-extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
-extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
@@ -2396,11 +2394,11 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
-extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
+extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
- int origin, loff_t maxsize, loff_t eof);
+ int whence, loff_t maxsize, loff_t eof);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index ce31408b1e4..5dfa0aa216b 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -75,6 +75,16 @@ extern wait_queue_head_t fscache_cache_cleared_wq;
typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
+enum fscache_operation_state {
+ FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
+ FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
+ FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
+ FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
+ FSCACHE_OP_ST_COMPLETE, /* Op is complete */
+ FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
+ FSCACHE_OP_ST_DEAD /* Op is now dead */
+};
+
struct fscache_operation {
struct work_struct work; /* record for async ops */
struct list_head pend_link; /* link in object->pending_ops */
@@ -86,10 +96,10 @@ struct fscache_operation {
#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
-#define FSCACHE_OP_DEAD 6 /* op is now dead */
-#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
+#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
+#define FSCACHE_OP_KEEP_FLAGS 0x0070 /* flags to keep when repurposing an op */
+ enum fscache_operation_state state;
atomic_t usage;
unsigned debug_id; /* debugging ID */
@@ -106,6 +116,7 @@ extern atomic_t fscache_op_debug_id;
extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *);
+extern void fscache_op_complete(struct fscache_operation *, bool);
extern void fscache_put_operation(struct fscache_operation *);
/**
@@ -122,6 +133,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
{
INIT_WORK(&op->work, fscache_op_work_func);
atomic_set(&op->usage, 1);
+ op->state = FSCACHE_OP_ST_INITIALISED;
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
op->processor = processor;
op->release = release;
@@ -138,6 +150,7 @@ struct fscache_retrieval {
void *context; /* netfs read context (pinned) */
struct list_head to_do; /* list of things to be done by the backend */
unsigned long start_time; /* time at which retrieval started */
+ unsigned n_pages; /* number of pages to be retrieved */
};
typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
@@ -174,8 +187,22 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
}
/**
+ * fscache_retrieval_complete - Record (partial) completion of a retrieval
+ * @op: The retrieval operation affected
+ * @n_pages: The number of pages to account for
+ */
+static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
+ int n_pages)
+{
+ op->n_pages -= n_pages;
+ if (op->n_pages <= 0)
+ fscache_op_complete(&op->op, true);
+}
+
+/**
* fscache_put_retrieval - Drop a reference to a retrieval operation
* @op: The retrieval operation affected
+ * @n_pages: The number of pages to account for
*
* Drop a reference to a retrieval operation.
*/
@@ -227,6 +254,9 @@ struct fscache_cache_ops {
/* store the updated auxiliary data on an object */
void (*update_object)(struct fscache_object *object);
+ /* Invalidate an object */
+ void (*invalidate_object)(struct fscache_operation *op);
+
/* discard the resources pinned by an object and effect retirement if
* necessary */
void (*drop_object)(struct fscache_object *object);
@@ -301,11 +331,30 @@ struct fscache_cookie {
#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
+#define FSCACHE_COOKIE_WAITING_ON_READS 6 /* T if cookie is waiting on reads */
+#define FSCACHE_COOKIE_INVALIDATING 7 /* T if cookie is being invalidated */
};
extern struct fscache_cookie fscache_fsdef_index;
/*
+ * Event list for fscache_object::{event_mask,events}
+ */
+enum {
+ FSCACHE_OBJECT_EV_REQUEUE, /* T if object should be requeued */
+ FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
+ FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
+ FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
+ FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
+ FSCACHE_OBJECT_EV_RELEASE, /* T if netfs requested object release */
+ FSCACHE_OBJECT_EV_RETIRE, /* T if netfs requested object retirement */
+ FSCACHE_OBJECT_EV_WITHDRAW, /* T if cache requested object withdrawal */
+ NR_FSCACHE_OBJECT_EVENTS
+};
+
+#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
+
+/*
* on-disk cache file or index handle
*/
struct fscache_object {
@@ -317,6 +366,7 @@ struct fscache_object {
/* active states */
FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
FSCACHE_OBJECT_ACTIVE, /* object is usable */
+ FSCACHE_OBJECT_INVALIDATING, /* object is invalidating */
FSCACHE_OBJECT_UPDATING, /* object is updating */
/* terminal states */
@@ -332,10 +382,10 @@ struct fscache_object {
int debug_id; /* debugging ID */
int n_children; /* number of child objects */
- int n_ops; /* number of ops outstanding on object */
+ int n_ops; /* number of extant ops on object */
int n_obj_ops; /* number of object ops outstanding on object */
int n_in_progress; /* number of ops in progress */
- int n_exclusive; /* number of exclusive ops queued */
+ int n_exclusive; /* number of exclusive ops queued or in progress */
atomic_t n_reads; /* number of read ops in progress */
spinlock_t lock; /* state and operations lock */
@@ -343,14 +393,6 @@ struct fscache_object {
unsigned long event_mask; /* events this object is interested in */
unsigned long events; /* events to be processed by this object
* (order is important - using fls) */
-#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
-#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
-#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
-#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
-#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
-#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
-#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
-#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
unsigned long flags;
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
@@ -504,6 +546,9 @@ extern void fscache_withdraw_cache(struct fscache_cache *cache);
extern void fscache_io_error(struct fscache_cache *cache);
+extern void fscache_mark_page_cached(struct fscache_retrieval *op,
+ struct page *page);
+
extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
struct pagevec *pagevec);
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 9ec20dec335..7a086235da4 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -135,14 +135,14 @@ struct fscache_cookie_def {
*/
void (*put_context)(void *cookie_netfs_data, void *context);
- /* indicate pages that now have cache metadata retained
- * - this function should mark the specified pages as now being cached
- * - the pages will have been marked with PG_fscache before this is
+ /* indicate page that now have cache metadata retained
+ * - this function should mark the specified page as now being cached
+ * - the page will have been marked with PG_fscache before this is
* called, so this is optional
*/
- void (*mark_pages_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct pagevec *cached_pvec);
+ void (*mark_page_cached)(void *cookie_netfs_data,
+ struct address_space *mapping,
+ struct page *page);
/* indicate the cookie is no longer cached
* - this function is called when the backing store currently caching
@@ -185,6 +185,8 @@ extern struct fscache_cookie *__fscache_acquire_cookie(
extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
extern void __fscache_update_cookie(struct fscache_cookie *);
extern int __fscache_attr_changed(struct fscache_cookie *);
+extern void __fscache_invalidate(struct fscache_cookie *);
+extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
struct page *,
fscache_rw_complete_t,
@@ -390,6 +392,42 @@ int fscache_attr_changed(struct fscache_cookie *cookie)
}
/**
+ * fscache_invalidate - Notify cache that an object needs invalidation
+ * @cookie: The cookie representing the cache object
+ *
+ * Notify the cache that an object is needs to be invalidated and that it
+ * should abort any retrievals or stores it is doing on the cache. The object
+ * is then marked non-caching until such time as the invalidation is complete.
+ *
+ * This can be called with spinlocks held.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_invalidate(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_invalidate(cookie);
+}
+
+/**
+ * fscache_wait_on_invalidate - Wait for invalidation to complete
+ * @cookie: The cookie representing the cache object
+ *
+ * Wait for the invalidation of an object to complete.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_wait_on_invalidate(cookie);
+}
+
+/**
* fscache_reserve_space - Reserve data space for a cached object
* @cookie: The cookie representing the cache object
* @i_size: The amount of space to be reserved
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h
index 11c16a1fb9e..a1e8277120c 100644
--- a/include/linux/fsl-diu-fb.h
+++ b/include/linux/fsl-diu-fb.h
@@ -47,6 +47,15 @@ struct aoi_display_offset {
#define MFB_GET_PIXFMT _IOR('M', 8, __u32)
/*
+ * The MPC5121 BSP comes with a gamma_set utility that initializes the
+ * gamma table. Unfortunately, it uses bad values for the IOCTL commands,
+ * but there's nothing we can do about it now. These ioctls are only
+ * supported on the MPC5121.
+ */
+#define MFB_SET_GAMMA _IOW('M', 1, __u8)
+#define MFB_GET_GAMMA _IOR('M', 1, __u8)
+
+/*
* The original definitions of MFB_SET_PIXFMT and MFB_GET_PIXFMT used the
* wrong value for 'size' field of the ioctl. The current macros above use the
* right size, but we still need to provide backwards compatibility, at least
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 63d966d5c2e..d5b0910d496 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -88,9 +88,10 @@ struct fsnotify_event_private_data;
* if the group is interested in this event.
* handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
- * freeing-mark - this means that a mark has been flagged to die when everything
- * finishes using it. The function is supplied with what must be a
- * valid group and inode to use to clean up.
+ * freeing_mark - called when a mark is being destroyed for some reason. The group
+ * MUST be holding a reference on each mark and that reference must be
+ * dropped in this function. inotify uses this function to send
+ * userspace messages that marks have been removed.
*/
struct fsnotify_ops {
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
@@ -141,12 +142,14 @@ struct fsnotify_group {
unsigned int priority;
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
- spinlock_t mark_lock; /* protect marks_list */
+ struct mutex mark_mutex; /* protect marks_list */
atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing
* a group */
struct list_head marks_list; /* all inode marks for this group */
+ struct fasync_struct *fsn_fa; /* async notification */
+
/* groups can define private fields here or use the void *private */
union {
void *private;
@@ -155,7 +158,6 @@ struct fsnotify_group {
spinlock_t idr_lock;
struct idr idr;
u32 last_wd;
- struct fasync_struct *fa; /* async notification */
struct user_struct *user;
} inotify_data;
#endif
@@ -287,7 +289,6 @@ struct fsnotify_mark {
struct fsnotify_inode_mark i;
struct fsnotify_vfsmount_mark m;
};
- struct list_head free_g_list; /* tmp list used when freeing this mark */
__u32 ignored_mask; /* events types to ignore */
#define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
@@ -360,11 +361,16 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode
/* called from fsnotify listeners, such as fanotify or dnotify */
-/* get a reference to an existing or create a new group */
+/* create a new group */
extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
+/* get reference to a group */
+extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
-
+/* destroy group */
+extern void fsnotify_destroy_group(struct fsnotify_group *group);
+/* fasync handler function */
+extern int fsnotify_fasync(int fd, struct file *file, int on);
/* take a reference to an event */
extern void fsnotify_get_event(struct fsnotify_event *event);
extern void fsnotify_put_event(struct fsnotify_event *event);
@@ -405,8 +411,13 @@ extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask
/* attach the mark to both the group and the inode */
extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups);
-/* given a mark, flag it to be freed when all references are dropped */
-extern void fsnotify_destroy_mark(struct fsnotify_mark *mark);
+extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group,
+ struct inode *inode, struct vfsmount *mnt, int allow_dups);
+/* given a group and a mark, flag mark to be freed when all references are dropped */
+extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group);
+extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
+ struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a52f2f4fe03..92691d85c32 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -394,7 +394,7 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
-loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin);
+loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
int ftrace_regex_release(struct inode *inode, struct file *file);
void __init
@@ -559,7 +559,7 @@ static inline ssize_t ftrace_filter_write(struct file *file, const char __user *
size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos) { return -ENODEV; }
-static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
+static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
{
return -ENODEV;
}
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 4f440b3e89f..79b8bba1936 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -88,10 +88,14 @@ struct disk_stats {
};
#define PARTITION_META_INFO_VOLNAMELTH 64
-#define PARTITION_META_INFO_UUIDLTH 16
+/*
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
+ */
+#define PARTITION_META_INFO_UUIDLTH 37
struct partition_meta_info {
- u8 uuid[PARTITION_META_INFO_UUIDLTH]; /* always big endian */
+ char uuid[PARTITION_META_INFO_UUIDLTH];
u8 volname[PARTITION_META_INFO_VOLNAMELTH];
};
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
new file mode 100644
index 00000000000..023bc346b87
--- /dev/null
+++ b/include/linux/genl_magic_func.h
@@ -0,0 +1,422 @@
+#ifndef GENL_MAGIC_FUNC_H
+#define GENL_MAGIC_FUNC_H
+
+#include <linux/genl_magic_struct.h>
+
+/*
+ * Magic: declare tla policy {{{1
+ * Magic: declare nested policies
+ * {{{2
+ */
+#undef GENL_mc_group
+#define GENL_mc_group(group)
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list)
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+ [tag_name] = { .type = NLA_NESTED },
+
+static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static struct nla_policy s_name ## _nl_policy[] __read_mostly = \
+{ s_fields };
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, _type, __get, \
+ __put, __is_signed) \
+ [attr_nr] = { .type = nla_type },
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, _type, maxlen, \
+ __get, __put, __is_signed) \
+ [attr_nr] = { .type = nla_type, \
+ .len = maxlen - (nla_type == NLA_NUL_STRING) },
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#ifndef __KERNEL__
+#ifndef pr_info
+#define pr_info(args...) fprintf(stderr, args);
+#endif
+#endif
+
+#ifdef GENL_MAGIC_DEBUG
+static void dprint_field(const char *dir, int nla_type,
+ const char *name, void *valp)
+{
+ __u64 val = valp ? *(__u32 *)valp : 1;
+ switch (nla_type) {
+ case NLA_U8: val = (__u8)val;
+ case NLA_U16: val = (__u16)val;
+ case NLA_U32: val = (__u32)val;
+ pr_info("%s attr %s: %d 0x%08x\n", dir,
+ name, (int)val, (unsigned)val);
+ break;
+ case NLA_U64:
+ val = *(__u64*)valp;
+ pr_info("%s attr %s: %lld 0x%08llx\n", dir,
+ name, (long long)val, (unsigned long long)val);
+ break;
+ case NLA_FLAG:
+ if (val)
+ pr_info("%s attr %s: set\n", dir, name);
+ break;
+ }
+}
+
+static void dprint_array(const char *dir, int nla_type,
+ const char *name, const char *val, unsigned len)
+{
+ switch (nla_type) {
+ case NLA_NUL_STRING:
+ if (len && val[len-1] == '\0')
+ len--;
+ pr_info("%s attr %s: [len:%u] '%s'\n", dir, name, len, val);
+ break;
+ default:
+ /* we can always show 4 byte,
+ * thats what nlattr are aligned to. */
+ pr_info("%s attr %s: [len:%u] %02x%02x%02x%02x ...\n",
+ dir, name, len, val[0], val[1], val[2], val[3]);
+ }
+}
+
+#define DPRINT_TLA(a, op, b) pr_info("%s %s %s\n", a, op, b);
+
+/* Name is a member field name of the struct s.
+ * If s is NULL (only parsing, no copy requested in *_from_attrs()),
+ * nla is supposed to point to the attribute containing the information
+ * corresponding to that struct member. */
+#define DPRINT_FIELD(dir, nla_type, name, s, nla) \
+ do { \
+ if (s) \
+ dprint_field(dir, nla_type, #name, &s->name); \
+ else if (nla) \
+ dprint_field(dir, nla_type, #name, \
+ (nla_type == NLA_FLAG) ? NULL \
+ : nla_data(nla)); \
+ } while (0)
+
+#define DPRINT_ARRAY(dir, nla_type, name, s, nla) \
+ do { \
+ if (s) \
+ dprint_array(dir, nla_type, #name, \
+ s->name, s->name ## _len); \
+ else if (nla) \
+ dprint_array(dir, nla_type, #name, \
+ nla_data(nla), nla_len(nla)); \
+ } while (0)
+#else
+#define DPRINT_TLA(a, op, b) do {} while (0)
+#define DPRINT_FIELD(dir, nla_type, name, s, nla) do {} while (0)
+#define DPRINT_ARRAY(dir, nla_type, name, s, nla) do {} while (0)
+#endif
+
+/*
+ * Magic: provide conversion functions {{{1
+ * populate struct from attribute table:
+ * {{{2
+ */
+
+/* processing of generic netlink messages is serialized.
+ * use one static buffer for parsing of nested attributes */
+static struct nlattr *nested_attr_tb[128];
+
+#ifndef BUILD_BUG_ON
+/* Force a compilation error if condition is true */
+#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
+/* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+ aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
+#endif
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+/* *_from_attrs functions are static, but potentially unused */ \
+static int __ ## s_name ## _from_attrs(struct s_name *s, \
+ struct genl_info *info, bool exclude_invariants) \
+{ \
+ const int maxtype = ARRAY_SIZE(s_name ## _nl_policy)-1; \
+ struct nlattr *tla = info->attrs[tag_number]; \
+ struct nlattr **ntb = nested_attr_tb; \
+ struct nlattr *nla; \
+ int err; \
+ BUILD_BUG_ON(ARRAY_SIZE(s_name ## _nl_policy) > ARRAY_SIZE(nested_attr_tb)); \
+ if (!tla) \
+ return -ENOMSG; \
+ DPRINT_TLA(#s_name, "<=-", #tag_name); \
+ err = drbd_nla_parse_nested(ntb, maxtype, tla, s_name ## _nl_policy); \
+ if (err) \
+ return err; \
+ \
+ s_fields \
+ return 0; \
+} __attribute__((unused)) \
+static int s_name ## _from_attrs(struct s_name *s, \
+ struct genl_info *info) \
+{ \
+ return __ ## s_name ## _from_attrs(s, info, false); \
+} __attribute__((unused)) \
+static int s_name ## _from_attrs_for_change(struct s_name *s, \
+ struct genl_info *info) \
+{ \
+ return __ ## s_name ## _from_attrs(s, info, true); \
+} __attribute__((unused)) \
+
+#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \
+ nla = ntb[attr_nr]; \
+ if (nla) { \
+ if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \
+ pr_info("<< must not change invariant attr: %s\n", #name); \
+ return -EEXIST; \
+ } \
+ assignment; \
+ } else if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \
+ /* attribute missing from payload, */ \
+ /* which was expected */ \
+ } else if ((attr_flag) & DRBD_F_REQUIRED) { \
+ pr_info("<< missing attr: %s\n", #name); \
+ return -ENOMSG; \
+ }
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ __assign(attr_nr, attr_flag, name, nla_type, type, \
+ if (s) \
+ s->name = __get(nla); \
+ DPRINT_FIELD("<<", nla_type, name, s, nla))
+
+/* validate_nla() already checked nla_len <= maxlen appropriately. */
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ __assign(attr_nr, attr_flag, name, nla_type, type, \
+ if (s) \
+ s->name ## _len = \
+ __get(s->name, nla, maxlen); \
+ DPRINT_ARRAY("<<", nla_type, name, s, nla))
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields)
+
+/*
+ * Magic: define op number to op name mapping {{{1
+ * {{{2
+ */
+const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+{
+ switch (cmd) {
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list) \
+ case op_num: return #op_name;
+#include GENL_MAGIC_INCLUDE_FILE
+ default:
+ return "unknown";
+ }
+}
+
+#ifdef __KERNEL__
+#include <linux/stringify.h>
+/*
+ * Magic: define genl_ops {{{1
+ * {{{2
+ */
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list) \
+{ \
+ handler \
+ .cmd = op_name, \
+ .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), \
+},
+
+#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list)
+
+/*
+ * Define the genl_family, multicast groups, {{{1
+ * and provide register/unregister functions.
+ * {{{2
+ */
+#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
+static struct genl_family ZZZ_genl_family __read_mostly = {
+ .id = GENL_ID_GENERATE,
+ .name = __stringify(GENL_MAGIC_FAMILY),
+ .version = GENL_MAGIC_VERSION,
+#ifdef GENL_MAGIC_FAMILY_HDRSZ
+ .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
+#endif
+ .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
+};
+
+/*
+ * Magic: define multicast groups
+ * Magic: define multicast group registration helper
+ */
+#undef GENL_mc_group
+#define GENL_mc_group(group) \
+static struct genl_multicast_group \
+CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group) __read_mostly = { \
+ .name = #group, \
+}; \
+static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
+ struct sk_buff *skb, gfp_t flags) \
+{ \
+ unsigned int group_id = \
+ CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id; \
+ if (!group_id) \
+ return -EINVAL; \
+ return genlmsg_multicast(skb, 0, group_id, flags); \
+}
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+{
+ int err = genl_register_family_with_ops(&ZZZ_genl_family,
+ ZZZ_genl_ops, ARRAY_SIZE(ZZZ_genl_ops));
+ if (err)
+ return err;
+#undef GENL_mc_group
+#define GENL_mc_group(group) \
+ err = genl_register_mc_group(&ZZZ_genl_family, \
+ &CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group)); \
+ if (err) \
+ goto fail; \
+ else \
+ pr_info("%s: mcg %s: %u\n", #group, \
+ __stringify(GENL_MAGIC_FAMILY), \
+ CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id);
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#undef GENL_mc_group
+#define GENL_mc_group(group)
+ return 0;
+fail:
+ genl_unregister_family(&ZZZ_genl_family);
+ return err;
+}
+
+void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
+{
+ genl_unregister_family(&ZZZ_genl_family);
+}
+
+/*
+ * Magic: provide conversion functions {{{1
+ * populate skb from struct.
+ * {{{2
+ */
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static int s_name ## _to_skb(struct sk_buff *skb, struct s_name *s, \
+ const bool exclude_sensitive) \
+{ \
+ struct nlattr *tla = nla_nest_start(skb, tag_number); \
+ if (!tla) \
+ goto nla_put_failure; \
+ DPRINT_TLA(#s_name, "-=>", #tag_name); \
+ s_fields \
+ nla_nest_end(skb, tla); \
+ return 0; \
+ \
+nla_put_failure: \
+ if (tla) \
+ nla_nest_cancel(skb, tla); \
+ return -EMSGSIZE; \
+} \
+static inline int s_name ## _to_priv_skb(struct sk_buff *skb, \
+ struct s_name *s) \
+{ \
+ return s_name ## _to_skb(skb, s, 0); \
+} \
+static inline int s_name ## _to_unpriv_skb(struct sk_buff *skb, \
+ struct s_name *s) \
+{ \
+ return s_name ## _to_skb(skb, s, 1); \
+}
+
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ if (!exclude_sensitive || !((attr_flag) & DRBD_F_SENSITIVE)) { \
+ DPRINT_FIELD(">>", nla_type, name, s, NULL); \
+ if (__put(skb, attr_nr, s->name)) \
+ goto nla_put_failure; \
+ }
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ if (!exclude_sensitive || !((attr_flag) & DRBD_F_SENSITIVE)) { \
+ DPRINT_ARRAY(">>",nla_type, name, s, NULL); \
+ if (__put(skb, attr_nr, min_t(int, maxlen, \
+ s->name ## _len + (nla_type == NLA_NUL_STRING)),\
+ s->name)) \
+ goto nla_put_failure; \
+ }
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+
+/* Functions for initializing structs to default values. */
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed)
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed)
+#undef __u32_field_def
+#define __u32_field_def(attr_nr, attr_flag, name, default) \
+ x->name = default;
+#undef __s32_field_def
+#define __s32_field_def(attr_nr, attr_flag, name, default) \
+ x->name = default;
+#undef __flg_field_def
+#define __flg_field_def(attr_nr, attr_flag, name, default) \
+ x->name = default;
+#undef __str_field_def
+#define __str_field_def(attr_nr, attr_flag, name, maxlen) \
+ memset(x->name, 0, sizeof(x->name)); \
+ x->name ## _len = 0;
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static void set_ ## s_name ## _defaults(struct s_name *x) __attribute__((unused)); \
+static void set_ ## s_name ## _defaults(struct s_name *x) { \
+s_fields \
+}
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#endif /* __KERNEL__ */
+
+/* }}}1 */
+#endif /* GENL_MAGIC_FUNC_H */
+/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
new file mode 100644
index 00000000000..eecd19b3700
--- /dev/null
+++ b/include/linux/genl_magic_struct.h
@@ -0,0 +1,277 @@
+#ifndef GENL_MAGIC_STRUCT_H
+#define GENL_MAGIC_STRUCT_H
+
+#ifndef GENL_MAGIC_FAMILY
+# error "you need to define GENL_MAGIC_FAMILY before inclusion"
+#endif
+
+#ifndef GENL_MAGIC_VERSION
+# error "you need to define GENL_MAGIC_VERSION before inclusion"
+#endif
+
+#ifndef GENL_MAGIC_INCLUDE_FILE
+# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion"
+#endif
+
+#include <linux/genetlink.h>
+#include <linux/types.h>
+
+#define CONCAT__(a,b) a ## b
+#define CONCAT_(a,b) CONCAT__(a,b)
+
+extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void);
+extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
+
+/*
+ * Extension of genl attribute validation policies {{{2
+ */
+
+/*
+ * @DRBD_GENLA_F_MANDATORY: By default, netlink ignores attributes it does not
+ * know about. This flag can be set in nlattr->nla_type to indicate that this
+ * attribute must not be ignored.
+ *
+ * We check and remove this flag in drbd_nla_check_mandatory() before
+ * validating the attribute types and lengths via nla_parse_nested().
+ */
+#define DRBD_GENLA_F_MANDATORY (1 << 14)
+
+/*
+ * Flags specific to drbd and not visible at the netlink layer, used in
+ * <struct>_from_attrs and <struct>_to_skb:
+ *
+ * @DRBD_F_REQUIRED: Attribute is required; a request without this attribute is
+ * invalid.
+ *
+ * @DRBD_F_SENSITIVE: Attribute includes sensitive information and must not be
+ * included in unpriviledged get requests or broadcasts.
+ *
+ * @DRBD_F_INVARIANT: Attribute is set when an object is initially created, but
+ * cannot subsequently be changed.
+ */
+#define DRBD_F_REQUIRED (1 << 0)
+#define DRBD_F_SENSITIVE (1 << 1)
+#define DRBD_F_INVARIANT (1 << 2)
+
+#define __nla_type(x) ((__u16)((x) & NLA_TYPE_MASK & ~DRBD_GENLA_F_MANDATORY))
+
+/* }}}1
+ * MAGIC
+ * multi-include macro expansion magic starts here
+ */
+
+/* MAGIC helpers {{{2 */
+
+/* possible field types */
+#define __flg_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U8, char, \
+ nla_get_u8, nla_put_u8, false)
+#define __u8_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U8, unsigned char, \
+ nla_get_u8, nla_put_u8, false)
+#define __u16_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U16, __u16, \
+ nla_get_u16, nla_put_u16, false)
+#define __u32_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U32, __u32, \
+ nla_get_u32, nla_put_u32, false)
+#define __s32_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U32, __s32, \
+ nla_get_u32, nla_put_u32, true)
+#define __u64_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U64, __u64, \
+ nla_get_u64, nla_put_u64, false)
+#define __str_field(attr_nr, attr_flag, name, maxlen) \
+ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
+ nla_strlcpy, nla_put, false)
+#define __bin_field(attr_nr, attr_flag, name, maxlen) \
+ __array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \
+ nla_memcpy, nla_put, false)
+
+/* fields with default values */
+#define __flg_field_def(attr_nr, attr_flag, name, default) \
+ __flg_field(attr_nr, attr_flag, name)
+#define __u32_field_def(attr_nr, attr_flag, name, default) \
+ __u32_field(attr_nr, attr_flag, name)
+#define __s32_field_def(attr_nr, attr_flag, name, default) \
+ __s32_field(attr_nr, attr_flag, name)
+#define __str_field_def(attr_nr, attr_flag, name, maxlen) \
+ __str_field(attr_nr, attr_flag, name, maxlen)
+
+#define GENL_op_init(args...) args
+#define GENL_doit(handler) \
+ .doit = handler, \
+ .flags = GENL_ADMIN_PERM,
+#define GENL_dumpit(handler) \
+ .dumpit = handler, \
+ .flags = GENL_ADMIN_PERM,
+
+/* }}}1
+ * Magic: define the enum symbols for genl_ops
+ * Magic: define the enum symbols for top level attributes
+ * Magic: define the enum symbols for nested attributes
+ * {{{2
+ */
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields)
+
+#undef GENL_mc_group
+#define GENL_mc_group(group)
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list) \
+ op_name = op_num,
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list) \
+ op_name = op_num,
+
+enum {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list)
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, attr_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+ tag_name = tag_number,
+
+enum {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+enum { \
+ s_fields \
+};
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, \
+ __get, __put, __is_signed) \
+ T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)),
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, \
+ maxlen, __get, __put, __is_signed) \
+ T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)),
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+/* }}}1
+ * Magic: compile time assert unique numbers for operations
+ * Magic: -"- unique numbers for top level attributes
+ * Magic: -"- unique numbers for nested attributes
+ * {{{2
+ */
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields)
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, attr_list) \
+ case op_name:
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list) \
+ case op_name:
+
+static inline void ct_assert_unique_operations(void)
+{
+ switch (0) {
+#include GENL_MAGIC_INCLUDE_FILE
+ ;
+ }
+}
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, attr_list)
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+ case tag_number:
+
+static inline void ct_assert_unique_top_level_attributes(void)
+{
+ switch (0) {
+#include GENL_MAGIC_INCLUDE_FILE
+ ;
+ }
+}
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
+{ \
+ switch (0) { \
+ s_fields \
+ ; \
+ } \
+}
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ case attr_nr:
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ case attr_nr:
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+/* }}}1
+ * Magic: declare structs
+ * struct <name> {
+ * fields
+ * };
+ * {{{2
+ */
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+struct s_name { s_fields };
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ type name;
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ type name[maxlen]; \
+ __u32 name ## _len;
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+enum { \
+ s_fields \
+};
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ is_signed) \
+ F_ ## name ## _IS_SIGNED = is_signed,
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, is_signed) \
+ F_ ## name ## _IS_SIGNED = is_signed,
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+/* }}}1 */
+#endif /* GENL_MAGIC_STRUCT_H */
+/* vim: set foldmethod=marker nofoldenable : */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 31e8041274f..0f615eb23d0 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,10 +30,12 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
+#define ___GFP_KMEMCG 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
+/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
* GFP bitmasks..
@@ -88,6 +90,7 @@ struct vm_area_struct;
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
+#define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
/*
@@ -364,6 +367,9 @@ extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, int cold);
extern void free_hot_cold_page_list(struct list_head *list, int cold);
+extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
+extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
+
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 24df9e70406..61c97ae22e0 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_HASH_H
#define _LINUX_HASH_H
/* Fast hashing routine for ints, longs and pointers.
- (C) 2002 William Lee Irwin III, IBM */
+ (C) 2002 Nadia Yvette Chambers, IBM */
/*
* Knuth recommends primes in approximately golden ratio to the maximum
diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/include/linux/hdlc/Kbuild
+++ /dev/null
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index ca8d7e94eb3..55f277372fe 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -19,7 +19,6 @@
#ifndef _HID_SENSORS_IDS_H
#define _HID_SENSORS_IDS_H
-#define HID_UP_SENSOR 0x00200000
#define HID_MAX_PHY_DEVICES 0xFF
/* Accel 3D (200073) */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index c076041a069..7330a0fef0c 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -167,6 +167,7 @@ struct hid_item {
#define HID_UP_MSVENDOR 0xff000000
#define HID_UP_CUSTOM 0x00ff0000
#define HID_UP_LOGIVENDOR 0xffbc0000
+#define HID_UP_SENSOR 0x00200000
#define HID_USAGE 0x0000ffff
@@ -292,6 +293,7 @@ struct hid_item {
*/
#define HID_GROUP_GENERIC 0x0001
#define HID_GROUP_MULTITOUCH 0x0002
+#define HID_GROUP_SENSOR_HUB 0x0003
/*
* This is the global environment of the parser. This information is
@@ -342,6 +344,7 @@ struct hid_collection {
struct hid_usage {
unsigned hid; /* hid usage code */
unsigned collection_index; /* index into collection array */
+ unsigned usage_index; /* index into usage array */
/* hidinput data */
__u16 code; /* input driver code */
__u8 type; /* input driver type */
@@ -684,6 +687,7 @@ struct hid_ll_driver {
extern int hid_debug;
+extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
@@ -706,6 +710,7 @@ int hid_input_report(struct hid_device *, int type, u8 *, int, int);
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
+__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
@@ -716,6 +721,7 @@ int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id);
+s32 hid_snto32(__u32 value, unsigned n);
/**
* hid_map_usage - map usage input bits
diff --git a/include/linux/hsi/Kbuild b/include/linux/hsi/Kbuild
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/include/linux/hsi/Kbuild
+++ /dev/null
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 1af47755245..1d76f8ca90f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -31,7 +31,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot);
+ unsigned long addr, pgprot_t newprot,
+ int prot_numa);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
@@ -39,6 +40,7 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
+ TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
#ifdef CONFIG_DEBUG_VM
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
#endif
@@ -78,6 +80,9 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
(__vma)->vm_flags & VM_HUGEPAGE))
+#define transparent_hugepage_use_zero_page() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
#ifdef CONFIG_DEBUG_VM
#define transparent_hugepage_debug_cow() \
(transparent_hugepage_flags & \
@@ -95,21 +100,25 @@ extern int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags);
extern int split_huge_page(struct page *page);
-extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
-#define split_huge_page_pmd(__mm, __pmd) \
+extern void __split_huge_page_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd);
+#define split_huge_page_pmd(__vma, __address, __pmd) \
do { \
pmd_t *____pmd = (__pmd); \
if (unlikely(pmd_trans_huge(*____pmd))) \
- __split_huge_page_pmd(__mm, ____pmd); \
+ __split_huge_page_pmd(__vma, __address, \
+ ____pmd); \
} while (0)
#define wait_split_huge_page(__anon_vma, __pmd) \
do { \
pmd_t *____pmd = (__pmd); \
- anon_vma_lock(__anon_vma); \
+ anon_vma_lock_write(__anon_vma); \
anon_vma_unlock(__anon_vma); \
BUG_ON(pmd_trans_splitting(*____pmd) || \
pmd_trans_huge(*____pmd)); \
} while (0)
+extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd);
#if HPAGE_PMD_ORDER > MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator"
#endif
@@ -163,6 +172,10 @@ static inline struct page *compound_trans_head(struct page *page)
}
return page;
}
+
+extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd, pmd_t *pmdp);
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -177,10 +190,12 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-#define split_huge_page_pmd(__mm, __pmd) \
+#define split_huge_page_pmd(__vma, __address, __pmd) \
do { } while (0)
#define wait_split_huge_page(__anon_vma, __pmd) \
do { } while (0)
+#define split_huge_page_pmd_mm(__mm, __address, __pmd) \
+ do { } while (0)
#define compound_trans_head(page) compound_head(page)
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
@@ -199,6 +214,13 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd,
{
return 0;
}
+
+static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+{
+ return 0;
+}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3e7fa1acf09..0c80d3f57a5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -87,7 +87,7 @@ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pmd);
-void hugetlb_change_protection(struct vm_area_struct *vma,
+unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -132,7 +132,11 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
{
}
-#define hugetlb_change_protection(vma, address, end, newprot)
+static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot)
+{
+ return 0;
+}
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index d73878c694b..ce8217f7b5c 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -62,7 +62,7 @@ extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page);
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
-extern int hugetlb_cgroup_file_init(int idx) __init;
+extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage,
struct page *newhpage);
@@ -111,9 +111,8 @@ hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
return;
}
-static inline int __init hugetlb_cgroup_file_init(int idx)
+static inline void hugetlb_cgroup_file_init(void)
{
- return 0;
}
static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h
index 92a0dc75bc7..babe0cf6d56 100644
--- a/include/linux/i2c-omap.h
+++ b/include/linux/i2c-omap.h
@@ -20,8 +20,6 @@
#define OMAP_I2C_FLAG_NO_FIFO BIT(0)
#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1)
#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2)
-#define OMAP_I2C_FLAG_RESET_REGS_POSTIDLE BIT(3)
-#define OMAP_I2C_FLAG_APPLY_ERRATA_I207 BIT(4)
#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5)
#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6)
/* how the CPU address bus must be translated for I2C unit access */
diff --git a/include/linux/i2c/i2c-hid.h b/include/linux/i2c/i2c-hid.h
new file mode 100644
index 00000000000..60e411d764d
--- /dev/null
+++ b/include/linux/i2c/i2c-hid.h
@@ -0,0 +1,35 @@
+/*
+ * HID over I2C protocol implementation
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef __LINUX_I2C_HID_H
+#define __LINUX_I2C_HID_H
+
+#include <linux/types.h>
+
+/**
+ * struct i2chid_platform_data - used by hid over i2c implementation.
+ * @hid_descriptor_address: i2c register where the HID descriptor is stored.
+ *
+ * Note that it is the responsibility of the platform driver (or the acpi 5.0
+ * driver) to setup the irq related to the gpio in the struct i2c_board_info.
+ * The platform driver should also setup the gpio according to the device:
+ *
+ * A typical example is the following:
+ * irq = gpio_to_irq(intr_gpio);
+ * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info
+ * gpio_request(intr_gpio, "elan-irq");
+ * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP);
+ */
+struct i2c_hid_platform_data {
+ u16 hid_descriptor_address;
+};
+
+#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/i2c/i2c-sh_mobile.h b/include/linux/i2c/i2c-sh_mobile.h
index beda7081aea..06e3089795f 100644
--- a/include/linux/i2c/i2c-sh_mobile.h
+++ b/include/linux/i2c/i2c-sh_mobile.h
@@ -5,6 +5,7 @@
struct i2c_sh_mobile_platform_data {
unsigned long bus_speed;
+ unsigned int clks_per_count;
};
#endif /* __I2C_SH_MOBILE_H__ */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 9a5e2846232..1ff54b114ef 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -39,52 +39,51 @@
* address each module uses within a given i2c slave.
*/
-/* Slave 0 (i2c address 0x48) */
-#define TWL4030_MODULE_USB 0x00
-
-/* Slave 1 (i2c address 0x49) */
-#define TWL4030_MODULE_AUDIO_VOICE 0x01
-#define TWL4030_MODULE_GPIO 0x02
-#define TWL4030_MODULE_INTBR 0x03
-#define TWL4030_MODULE_PIH 0x04
-#define TWL4030_MODULE_TEST 0x05
-
-/* Slave 2 (i2c address 0x4a) */
-#define TWL4030_MODULE_KEYPAD 0x06
-#define TWL4030_MODULE_MADC 0x07
-#define TWL4030_MODULE_INTERRUPTS 0x08
-#define TWL4030_MODULE_LED 0x09
-#define TWL4030_MODULE_MAIN_CHARGE 0x0A
-#define TWL4030_MODULE_PRECHARGE 0x0B
-#define TWL4030_MODULE_PWM0 0x0C
-#define TWL4030_MODULE_PWM1 0x0D
-#define TWL4030_MODULE_PWMA 0x0E
-#define TWL4030_MODULE_PWMB 0x0F
-
-#define TWL5031_MODULE_ACCESSORY 0x10
-#define TWL5031_MODULE_INTERRUPTS 0x11
-
-/* Slave 3 (i2c address 0x4b) */
-#define TWL4030_MODULE_BACKUP 0x12
-#define TWL4030_MODULE_INT 0x13
-#define TWL4030_MODULE_PM_MASTER 0x14
-#define TWL4030_MODULE_PM_RECEIVER 0x15
-#define TWL4030_MODULE_RTC 0x16
-#define TWL4030_MODULE_SECURED_REG 0x17
+enum twl4030_module_ids {
+ TWL4030_MODULE_USB = 0, /* Slave 0 (i2c address 0x48) */
+ TWL4030_MODULE_AUDIO_VOICE, /* Slave 1 (i2c address 0x49) */
+ TWL4030_MODULE_GPIO,
+ TWL4030_MODULE_INTBR,
+ TWL4030_MODULE_PIH,
+
+ TWL4030_MODULE_TEST,
+ TWL4030_MODULE_KEYPAD, /* Slave 2 (i2c address 0x4a) */
+ TWL4030_MODULE_MADC,
+ TWL4030_MODULE_INTERRUPTS,
+ TWL4030_MODULE_LED,
+
+ TWL4030_MODULE_MAIN_CHARGE,
+ TWL4030_MODULE_PRECHARGE,
+ TWL4030_MODULE_PWM0,
+ TWL4030_MODULE_PWM1,
+ TWL4030_MODULE_PWMA,
+
+ TWL4030_MODULE_PWMB,
+ TWL5031_MODULE_ACCESSORY,
+ TWL5031_MODULE_INTERRUPTS,
+ TWL4030_MODULE_BACKUP, /* Slave 3 (i2c address 0x4b) */
+ TWL4030_MODULE_INT,
+
+ TWL4030_MODULE_PM_MASTER,
+ TWL4030_MODULE_PM_RECEIVER,
+ TWL4030_MODULE_RTC,
+ TWL4030_MODULE_SECURED_REG,
+ TWL4030_MODULE_LAST,
+};
+/* Similar functionalities implemented in TWL4030/6030 */
#define TWL_MODULE_USB TWL4030_MODULE_USB
-#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
#define TWL_MODULE_PIH TWL4030_MODULE_PIH
-#define TWL_MODULE_MADC TWL4030_MODULE_MADC
#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
#define TWL_MODULE_RTC TWL4030_MODULE_RTC
#define TWL_MODULE_PWM TWL4030_MODULE_PWM0
+#define TWL_MODULE_LED TWL4030_MODULE_LED
-#define TWL6030_MODULE_ID0 0x0D
-#define TWL6030_MODULE_ID1 0x0E
-#define TWL6030_MODULE_ID2 0x0F
+#define TWL6030_MODULE_ID0 13
+#define TWL6030_MODULE_ID1 14
+#define TWL6030_MODULE_ID2 15
#define GPIO_INTR_OFFSET 0
#define KEYPAD_INTR_OFFSET 1
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 87259a44c25..de7e190f1af 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -152,4 +152,15 @@ void ida_simple_remove(struct ida *ida, unsigned int id);
void __init idr_init_cache(void);
+/**
+ * idr_for_each_entry - iterate over an idr's elements of a given type
+ * @idp: idr handle
+ * @entry: the type * to use as cursor
+ * @id: id entry's key
+ */
+#define idr_for_each_entry(idp, entry, id) \
+ for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
+ entry != NULL; \
+ ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
+
#endif /* __IDR_H__ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 2c7223d7e73..86c361e947b 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -18,6 +18,7 @@ extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_module_check(struct file *file);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -40,6 +41,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
+static inline int ima_module_check(struct file *file)
+{
+ return 0;
+}
+
#endif /* CONFIG_IMA_H */
#ifdef CONFIG_IMA_APPRAISE
diff --git a/include/linux/init.h b/include/linux/init.h
index f63692d6902..a799273714a 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -182,16 +182,16 @@ extern bool initcall_debug;
* can point at the same handler without causing duplicate-symbol build errors.
*/
-#define __define_initcall(level,fn,id) \
+#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" level ".init"))) = fn
+ __attribute__((__section__(".initcall" #id ".init"))) = fn
/*
* Early initcalls run before initializing SMP.
*
* Only for built-in code, not modules.
*/
-#define early_initcall(fn) __define_initcall("early",fn,early)
+#define early_initcall(fn) __define_initcall(fn, early)
/*
* A "pure" initcall has no dependencies on anything else, and purely
@@ -200,23 +200,23 @@ extern bool initcall_debug;
* This only exists for built-in code, not for modules.
* Keep main.c:initcall_level_names[] in sync.
*/
-#define pure_initcall(fn) __define_initcall("0",fn,0)
-
-#define core_initcall(fn) __define_initcall("1",fn,1)
-#define core_initcall_sync(fn) __define_initcall("1s",fn,1s)
-#define postcore_initcall(fn) __define_initcall("2",fn,2)
-#define postcore_initcall_sync(fn) __define_initcall("2s",fn,2s)
-#define arch_initcall(fn) __define_initcall("3",fn,3)
-#define arch_initcall_sync(fn) __define_initcall("3s",fn,3s)
-#define subsys_initcall(fn) __define_initcall("4",fn,4)
-#define subsys_initcall_sync(fn) __define_initcall("4s",fn,4s)
-#define fs_initcall(fn) __define_initcall("5",fn,5)
-#define fs_initcall_sync(fn) __define_initcall("5s",fn,5s)
-#define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs)
-#define device_initcall(fn) __define_initcall("6",fn,6)
-#define device_initcall_sync(fn) __define_initcall("6s",fn,6s)
-#define late_initcall(fn) __define_initcall("7",fn,7)
-#define late_initcall_sync(fn) __define_initcall("7s",fn,7s)
+#define pure_initcall(fn) __define_initcall(fn, 0)
+
+#define core_initcall(fn) __define_initcall(fn, 1)
+#define core_initcall_sync(fn) __define_initcall(fn, 1s)
+#define postcore_initcall(fn) __define_initcall(fn, 2)
+#define postcore_initcall_sync(fn) __define_initcall(fn, 2s)
+#define arch_initcall(fn) __define_initcall(fn, 3)
+#define arch_initcall_sync(fn) __define_initcall(fn, 3s)
+#define subsys_initcall(fn) __define_initcall(fn, 4)
+#define subsys_initcall_sync(fn) __define_initcall(fn, 4s)
+#define fs_initcall(fn) __define_initcall(fn, 5)
+#define fs_initcall_sync(fn) __define_initcall(fn, 5s)
+#define rootfs_initcall(fn) __define_initcall(fn, rootfs)
+#define device_initcall(fn) __define_initcall(fn, 6)
+#define device_initcall_sync(fn) __define_initcall(fn, 6s)
+#define late_initcall(fn) __define_initcall(fn, 7)
+#define late_initcall_sync(fn) __define_initcall(fn, 7s)
#define __initcall(fn) device_initcall(fn)
diff --git a/include/linux/input.h b/include/linux/input.h
index cab994ba6d9..82ce323b998 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -112,6 +112,11 @@ struct input_value {
* @h_list: list of input handles associated with the device. When
* accessing the list dev->mutex must be held
* @node: used to place the device onto input_dev_list
+ * @num_vals: number of values queued in the current frame
+ * @max_vals: maximum number of values queued in a frame
+ * @vals: array of values queued in the current frame
+ * @devres_managed: indicates that devices is managed with devres framework
+ * and needs not be explicitly unregistered or freed.
*/
struct input_dev {
const char *name;
@@ -180,6 +185,8 @@ struct input_dev {
unsigned int num_vals;
unsigned int max_vals;
struct input_value *vals;
+
+ bool devres_managed;
};
#define to_input_dev(d) container_of(d, struct input_dev, dev)
@@ -323,7 +330,8 @@ struct input_handle {
struct list_head h_node;
};
-struct input_dev *input_allocate_device(void);
+struct input_dev __must_check *input_allocate_device(void);
+struct input_dev __must_check *devm_input_allocate_device(struct device *);
void input_free_device(struct input_dev *dev);
static inline struct input_dev *input_get_device(struct input_dev *dev)
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
index 05e03284b92..6230d76bde5 100644
--- a/include/linux/input/bu21013.h
+++ b/include/linux/input/bu21013.h
@@ -9,13 +9,10 @@
/**
* struct bu21013_platform_device - Handle the platform data
- * @cs_en: pointer to the cs enable function
- * @cs_dis: pointer to the cs disable function
- * @irq_read_val: pointer to read the pen irq value function
* @touch_x_max: touch x max
* @touch_y_max: touch y max
* @cs_pin: chip select pin
- * @irq: irq pin
+ * @touch_pin: touch gpio pin
* @ext_clk: external clock flag
* @x_flip: x flip flag
* @y_flip: y flip flag
@@ -24,13 +21,10 @@
* This is used to handle the platform data
*/
struct bu21013_platform_device {
- int (*cs_en)(int reset_pin);
- int (*cs_dis)(int reset_pin);
- int (*irq_read_val)(void);
int touch_x_max;
int touch_y_max;
unsigned int cs_pin;
- unsigned int irq;
+ unsigned int touch_pin;
bool ext_clk;
bool x_flip;
bool y_flip;
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index cc5cca774ba..2e86bd0bfba 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -69,6 +69,12 @@ static inline bool input_mt_is_active(const struct input_mt_slot *slot)
return input_mt_get_value(slot, ABS_MT_TRACKING_ID) >= 0;
}
+static inline bool input_mt_is_used(const struct input_mt *mt,
+ const struct input_mt_slot *slot)
+{
+ return slot->frame == mt->frame;
+}
+
int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
unsigned int flags);
void input_mt_destroy_slots(struct input_dev *dev);
diff --git a/include/linux/input/ti_am335x_tsc.h b/include/linux/input/ti_am335x_tsc.h
new file mode 100644
index 00000000000..49269a2aa32
--- /dev/null
+++ b/include/linux/input/ti_am335x_tsc.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_TI_AM335X_TSC_H
+#define __LINUX_TI_AM335X_TSC_H
+
+/**
+ * struct tsc_data Touchscreen wire configuration
+ * @wires: Wires refer to application modes
+ * i.e. 4/5/8 wire touchscreen support
+ * on the platform.
+ * @x_plate_resistance: X plate resistance.
+ * @steps_to_configure: The sequencer supports a total of
+ * 16 programmable steps.
+ * A step configured to read a single
+ * co-ordinate value, can be applied
+ * more number of times for better results.
+ */
+
+struct tsc_data {
+ int wires;
+ int x_plate_resistance;
+ int steps_to_configure;
+};
+
+#endif
diff --git a/include/linux/input/ti_tscadc.h b/include/linux/input/ti_tscadc.h
deleted file mode 100644
index b10a527a92a..00000000000
--- a/include/linux/input/ti_tscadc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __LINUX_TI_TSCADC_H
-#define __LINUX_TI_TSCADC_H
-
-/**
- * struct tsc_data Touchscreen wire configuration
- * @wires: Wires refer to application modes
- * i.e. 4/5/8 wire touchscreen support
- * on the platform.
- * @x_plate_resistance: X plate resistance.
- */
-
-struct tsc_data {
- int wires;
- int x_plate_resistance;
-};
-
-#endif
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 5499c92a915..fe771978e87 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -67,6 +67,8 @@ struct ipc_namespace {
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
+
+ unsigned int proc_inum;
};
extern struct ipc_namespace init_ipc_ns;
@@ -133,7 +135,8 @@ static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
#if defined(CONFIG_IPC_NS)
extern struct ipc_namespace *copy_ipcs(unsigned long flags,
- struct task_struct *tsk);
+ struct user_namespace *user_ns, struct ipc_namespace *ns);
+
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
@@ -144,12 +147,12 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
- struct task_struct *tsk)
+ struct user_namespace *user_ns, struct ipc_namespace *ns)
{
if (flags & CLONE_NEWIPC)
return ERR_PTR(-EINVAL);
- return tsk->nsproxy->ipc_ns;
+ return ns;
}
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index fcb5d44ea63..8ea3fe0b975 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -216,7 +216,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf);
/*
* The lower layer reports received messages through this interface.
- * The data_size should be zero if this is an asyncronous message. If
+ * The data_size should be zero if this is an asynchronous message. If
* the lower layer gets an error sending a message, it should format
* an error response in the message response.
*/
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 526f10a637c..fdf2c4a238c 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -10,9 +10,6 @@
*/
#include <linux/smp.h>
-
-#ifndef CONFIG_S390
-
#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
@@ -746,8 +743,11 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
-#endif /* CONFIG_GENERIC_HARDIRQS */
+#else /* !CONFIG_GENERIC_HARDIRQS */
-#endif /* !CONFIG_S390 */
+extern struct msi_desc *irq_get_msi_desc(unsigned int irq);
+extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
#endif /* _LINUX_IRQ_H */
diff --git a/arch/arm/plat-spear/include/plat/shirq.h b/include/linux/irqchip/spear-shirq.h
index 88a7fbd2479..c8be16d213a 100644
--- a/arch/arm/plat-spear/include/plat/shirq.h
+++ b/include/linux/irqchip/spear-shirq.h
@@ -1,9 +1,7 @@
/*
- * arch/arm/plat-spear/include/plat/shirq.h
- *
* SPEAr platform shared irq layer header file
*
- * Copyright (C) 2009 ST Microelectronics
+ * Copyright (C) 2009-2012 ST Microelectronics
* Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
@@ -11,31 +9,15 @@
* warranty of any kind, whether express or implied.
*/
-#ifndef __PLAT_SHIRQ_H
-#define __PLAT_SHIRQ_H
+#ifndef __SPEAR_SHIRQ_H
+#define __SPEAR_SHIRQ_H
#include <linux/irq.h>
#include <linux/types.h>
/*
- * struct shirq_dev_config: shared irq device configuration
- *
- * virq: virtual irq number of device
- * enb_mask: enable mask of device
- * status_mask: status mask of device
- * clear_mask: clear mask of device
- */
-struct shirq_dev_config {
- u32 virq;
- u32 enb_mask;
- u32 status_mask;
- u32 clear_mask;
-};
-
-/*
* struct shirq_regs: shared irq register configuration
*
- * base: base address of shared irq register
* enb_reg: enable register offset
* reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt
* status_reg: status register offset
@@ -44,11 +26,9 @@ struct shirq_dev_config {
* reset_to_clear: val 1 indicates, we need to clear bit for clearing interrupt
*/
struct shirq_regs {
- void __iomem *base;
u32 enb_reg;
u32 reset_to_enb;
u32 status_reg;
- u32 status_reg_mask;
u32 clear_reg;
u32 reset_to_clear;
};
@@ -57,17 +37,28 @@ struct shirq_regs {
* struct spear_shirq: shared irq structure
*
* irq: hardware irq number
- * dev_config: array of device config structures which are using "irq" line
- * dev_count: size of dev_config array
+ * irq_base: base irq in linux domain
+ * irq_nr: no. of shared interrupts in a particular block
+ * irq_bit_off: starting bit offset in the status register
+ * invalid_irq: irq group is currently disabled
+ * base: base address of shared irq register
* regs: register configuration for shared irq block
*/
struct spear_shirq {
u32 irq;
- struct shirq_dev_config *dev_config;
- u32 dev_count;
+ u32 irq_base;
+ u32 irq_nr;
+ u32 irq_bit_off;
+ int invalid_irq;
+ void __iomem *base;
struct shirq_regs regs;
};
-int spear_shirq_register(struct spear_shirq *shirq);
+int __init spear300_shirq_of_init(struct device_node *np,
+ struct device_node *parent);
+int __init spear310_shirq_of_init(struct device_node *np,
+ struct device_node *parent);
+int __init spear320_shirq_of_init(struct device_node *np,
+ struct device_node *parent);
-#endif /* __PLAT_SHIRQ_H */
+#endif /* __SPEAR_SHIRQ_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 3efc43f3f16..e30b6634694 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1096,10 +1096,9 @@ extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *);
-extern void jbd2_journal_invalidatepage(journal_t *,
+extern int jbd2_journal_invalidatepage(journal_t *,
struct page *, unsigned long);
extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int jbd2_journal_stop(handle_t *);
@@ -1303,15 +1302,21 @@ static inline int jbd_space_needed(journal_t *journal)
extern int jbd_blocks_per_page(struct inode *inode);
+/* JBD uses a CRC32 checksum */
+#define JBD_MAX_CHECKSUM_SIZE 4
+
static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
const void *address, unsigned int length)
{
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(journal->j_chksum_driver)];
+ char ctx[JBD_MAX_CHECKSUM_SIZE];
} desc;
int err;
+ BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
+ JBD_MAX_CHECKSUM_SIZE);
+
desc.shash.tfm = journal->j_chksum_driver;
desc.shash.flags = 0;
*(u32 *)desc.ctx = crc;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d97ed589744..c566927efcb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -77,13 +77,15 @@
/*
* Divide positive or negative dividend by positive divisor and round
- * to closest integer. Result is undefined for negative divisors.
+ * to closest integer. Result is undefined for negative divisors and
+ * for negative dividends if the divisor variable type is unsigned.
*/
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
typeof(x) __x = x; \
typeof(divisor) __d = divisor; \
- (((typeof(x))-1) > 0 || (__x) > 0) ? \
+ (((typeof(x))-1) > 0 || \
+ ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
(((__x) + ((__d) / 2)) / (__d)) : \
(((__x) - ((__d) / 2)) / (__d)); \
} \
@@ -220,6 +222,23 @@ int __must_check _kstrtol(const char *s, unsigned int base, long *res);
int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+*/
static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
{
/*
@@ -233,6 +252,22 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign
return _kstrtoul(s, base, res);
}
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+ */
static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
{
/*
diff --git a/include/linux/key.h b/include/linux/key.h
index 2393b1c040b..4dfde1161c5 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -265,6 +265,7 @@ extern int key_unlink(struct key *keyring,
extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
const struct cred *cred,
+ key_perm_t perm,
unsigned long flags,
struct key *dest);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 65af6887872..4972e6e9ca9 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -111,4 +111,25 @@ static inline int kref_put_mutex(struct kref *kref,
}
return 0;
}
+
+/**
+ * kref_get_unless_zero - Increment refcount for object unless it is zero.
+ * @kref: object.
+ *
+ * Return non-zero if the increment succeeded. Otherwise return 0.
+ *
+ * This function is intended to simplify locking around refcounting for
+ * objects that can be looked up from a lookup structure, and which are
+ * removed from that lookup structure in the object destructor.
+ * Operations on such objects require at least a read lock around
+ * lookup + kref_get, and a write lock around kref_put + remove from lookup
+ * structure. Furthermore, RCU implementations become extremely tricky.
+ * With a lookup followed by a kref_get_unless_zero *with return value check*
+ * locking in the kref_put path can be deferred to the actual removal from
+ * the lookup structure and RCU lookups become trivial.
+ */
+static inline int __must_check kref_get_unless_zero(struct kref *kref)
+{
+ return atomic_add_unless(&kref->refcount, 1, 0);
+}
#endif /* _KREF_H_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d5cddd8dcc5..2c497ab0d03 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -47,28 +47,40 @@
/*
* For the normal pfn, the highest 12 bits should be zero,
- * so we can mask these bits to indicate the error.
+ * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
+ * mask bit 63 to indicate the noslot pfn.
*/
-#define KVM_PFN_ERR_MASK (0xfffULL << 52)
+#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
+#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
+#define KVM_PFN_NOSLOT (0x1ULL << 63)
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
-#define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2)
-#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3)
+#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+/*
+ * error pfns indicate that the gfn is in slot but faild to
+ * translate it to pfn on host.
+ */
static inline bool is_error_pfn(pfn_t pfn)
{
return !!(pfn & KVM_PFN_ERR_MASK);
}
-static inline bool is_noslot_pfn(pfn_t pfn)
+/*
+ * error_noslot pfns indicate that the gfn can not be
+ * translated to pfn - it is not in slot or failed to
+ * translate it to pfn.
+ */
+static inline bool is_error_noslot_pfn(pfn_t pfn)
{
- return pfn == KVM_PFN_ERR_BAD;
+ return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
}
-static inline bool is_invalid_pfn(pfn_t pfn)
+/* noslot pfn indicates that the gfn is not in slot. */
+static inline bool is_noslot_pfn(pfn_t pfn)
{
- return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
+ return pfn == KVM_PFN_NOSLOT;
}
#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
@@ -107,6 +119,9 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_IMMEDIATE_EXIT 15
#define KVM_REQ_PMU 16
#define KVM_REQ_PMI 17
+#define KVM_REQ_WATCHDOG 18
+#define KVM_REQ_MASTERCLOCK_UPDATE 19
+#define KVM_REQ_MCLOCK_INPROGRESS 20
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -516,6 +531,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
+void kvm_make_mclock_inprogress_request(struct kvm *kvm);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -569,9 +585,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
-int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
int kvm_arch_hardware_enable(void *garbage);
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
@@ -666,6 +682,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
unsigned long *deliver_bitmask);
#endif
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
@@ -838,9 +855,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
+static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(vcpu->kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_notifier_count))
return 1;
/*
* Ensure the read of mmu_notifier_count happens before the read
@@ -853,7 +870,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
* can't rely on kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_notifier_seq != mmu_seq)
return 1;
return 0;
}
@@ -881,10 +898,20 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
#ifdef CONFIG_HAVE_KVM_EVENTFD
void kvm_eventfd_init(struct kvm *kvm);
+int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
+
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
-int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
+#else
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+{
+ return -EINVAL;
+}
+
+static inline void kvm_irqfd_release(struct kvm *kvm) {}
+#endif
#else
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6e53bb31c22..0d9b5eed714 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -179,6 +179,23 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_on,
unsigned long *delay_off,
int invert);
+/**
+ * led_trigger_rename_static - rename a trigger
+ * @name: the new trigger name
+ * @trig: the LED trigger to rename
+ *
+ * Change a LED trigger name by copying the string passed in
+ * name into current trigger name, which MUST be large
+ * enough for the new string.
+ *
+ * Note that name must NOT point to the same string used
+ * during LED registration, as that could lead to races.
+ *
+ * This is meant to be used on triggers with statically
+ * allocated name.
+ */
+extern void led_trigger_rename_static(const char *name,
+ struct led_trigger *trig);
#else
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 77eeeda2b6e..83ba0ab2c91 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -163,6 +163,7 @@ enum {
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
+ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@@ -1114,6 +1115,10 @@ extern int ata_pci_device_resume(struct pci_dev *pdev);
#endif /* CONFIG_PM */
#endif /* CONFIG_PCI */
+struct platform_device;
+
+extern int ata_platform_remove_one(struct platform_device *pdev);
+
/*
* ACPI - drivers/ata/libata-acpi.c
*/
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 6492181bcb1..460b60fa7ad 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -53,10 +53,13 @@ struct loop_device {
spinlock_t lo_lock;
struct bio_list lo_bio_list;
+ unsigned int lo_bio_count;
int lo_state;
struct mutex lo_ctl_mutex;
struct task_struct *lo_thread;
wait_queue_head_t lo_event;
+ /* wait queue for incoming requests */
+ wait_queue_head_t lo_req_wait;
struct request_queue *lo_queue;
struct gendisk *lo_disk;
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 7a71ffad037..4019013c659 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -52,8 +52,8 @@ We replicate IO (more or less synchronously) to local and remote disk.
For crash recovery after replication node failure,
we need to resync all regions that have been target of in-flight WRITE IO
- (in use, or "hot", regions), as we don't know wether or not those WRITEs have
- made it to stable storage.
+ (in use, or "hot", regions), as we don't know whether or not those WRITEs
+ have made it to stable storage.
To avoid a "full resync", we need to persistently track these regions.
@@ -166,9 +166,11 @@ struct lc_element {
/* if we want to track a larger set of objects,
* it needs to become arch independend u64 */
unsigned lc_number;
-
/* special label when on free list */
#define LC_FREE (~0U)
+
+ /* for pending changes */
+ unsigned lc_new_number;
};
struct lru_cache {
@@ -176,6 +178,7 @@ struct lru_cache {
struct list_head lru;
struct list_head free;
struct list_head in_use;
+ struct list_head to_be_changed;
/* the pre-created kmem cache to allocate the objects from */
struct kmem_cache *lc_cache;
@@ -186,7 +189,7 @@ struct lru_cache {
size_t element_off;
/* number of elements (indices) */
- unsigned int nr_elements;
+ unsigned int nr_elements;
/* Arbitrary limit on maximum tracked objects. Practical limit is much
* lower due to allocation failures, probably. For typical use cases,
* nr_elements should be a few thousand at most.
@@ -194,18 +197,19 @@ struct lru_cache {
* 8 high bits of .lc_index to be overloaded with flags in the future. */
#define LC_MAX_ACTIVE (1<<24)
+ /* allow to accumulate a few (index:label) changes,
+ * but no more than max_pending_changes */
+ unsigned int max_pending_changes;
+ /* number of elements currently on to_be_changed list */
+ unsigned int pending_changes;
+
/* statistics */
- unsigned used; /* number of lelements currently on in_use list */
- unsigned long hits, misses, starving, dirty, changed;
+ unsigned used; /* number of elements currently on in_use list */
+ unsigned long hits, misses, starving, locked, changed;
/* see below: flag-bits for lru_cache */
unsigned long flags;
- /* when changing the label of an index element */
- unsigned int new_number;
-
- /* for paranoia when changing the label of an index element */
- struct lc_element *changing_element;
void *lc_private;
const char *name;
@@ -221,10 +225,15 @@ enum {
/* debugging aid, to catch concurrent access early.
* user needs to guarantee exclusive access by proper locking! */
__LC_PARANOIA,
- /* if we need to change the set, but currently there is a changing
- * transaction pending, we are "dirty", and must deferr further
- * changing requests */
+
+ /* annotate that the set is "dirty", possibly accumulating further
+ * changes, until a transaction is finally triggered */
__LC_DIRTY,
+
+ /* Locked, no further changes allowed.
+ * Also used to serialize changing transactions. */
+ __LC_LOCKED,
+
/* if we need to change the set, but currently there is no free nor
* unused element available, we are "starving", and must not give out
* further references, to guarantee that eventually some refcnt will
@@ -236,9 +245,11 @@ enum {
};
#define LC_PARANOIA (1<<__LC_PARANOIA)
#define LC_DIRTY (1<<__LC_DIRTY)
+#define LC_LOCKED (1<<__LC_LOCKED)
#define LC_STARVING (1<<__LC_STARVING)
extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
+ unsigned max_pending_changes,
unsigned e_count, size_t e_size, size_t e_off);
extern void lc_reset(struct lru_cache *lc);
extern void lc_destroy(struct lru_cache *lc);
@@ -249,7 +260,7 @@ extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
-extern void lc_changed(struct lru_cache *lc, struct lc_element *e);
+extern void lc_committed(struct lru_cache *lc);
struct seq_file;
extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
@@ -258,32 +269,40 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
void (*detail) (struct seq_file *, struct lc_element *));
/**
- * lc_try_lock - can be used to stop lc_get() from changing the tracked set
+ * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set
* @lc: the lru cache to operate on
*
- * Note that the reference counts and order on the active and lru lists may
- * still change. Returns true if we acquired the lock.
+ * Allows (expects) the set to be "dirty". Note that the reference counts and
+ * order on the active and lru lists may still change. Used to serialize
+ * changing transactions. Returns true if we aquired the lock.
*/
-static inline int lc_try_lock(struct lru_cache *lc)
+static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{
- return !test_and_set_bit(__LC_DIRTY, &lc->flags);
+ return !test_and_set_bit(__LC_LOCKED, &lc->flags);
}
/**
+ * lc_try_lock - variant to stop lc_get() from changing the tracked set
+ * @lc: the lru cache to operate on
+ *
+ * Note that the reference counts and order on the active and lru lists may
+ * still change. Only works on a "clean" set. Returns true if we aquired the
+ * lock, which means there are no pending changes, and any further attempt to
+ * change the set will not succeed until the next lc_unlock().
+ */
+extern int lc_try_lock(struct lru_cache *lc);
+
+/**
* lc_unlock - unlock @lc, allow lc_get() to change the set again
* @lc: the lru cache to operate on
*/
static inline void lc_unlock(struct lru_cache *lc)
{
clear_bit(__LC_DIRTY, &lc->flags);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(__LC_LOCKED, &lc->flags);
}
-static inline int lc_is_used(struct lru_cache *lc, unsigned int enr)
-{
- struct lc_element *e = lc_find(lc, enr);
- return e && e->refcnt;
-}
+extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
#define lc_entry(ptr, type, member) \
container_of(ptr, type, member)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 11ddc7ffeba..0108a56f814 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -21,11 +21,14 @@
#define _LINUX_MEMCONTROL_H
#include <linux/cgroup.h>
#include <linux/vm_event_item.h>
+#include <linux/hardirq.h>
+#include <linux/jump_label.h>
struct mem_cgroup;
struct page_cgroup;
struct page;
struct mm_struct;
+struct kmem_cache;
/* Stats that can be updated by kernel. */
enum mem_cgroup_page_stat_item {
@@ -181,7 +184,14 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
-void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
+void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
+static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
+ enum vm_event_item idx)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_count_vm_event(mm, idx);
+}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void mem_cgroup_split_huge_fixup(struct page *head);
#endif
@@ -407,5 +417,211 @@ static inline void sock_release_memcg(struct sock *sk)
{
}
#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
+
+#ifdef CONFIG_MEMCG_KMEM
+extern struct static_key memcg_kmem_enabled_key;
+
+extern int memcg_limited_groups_array_size;
+
+/*
+ * Helper macro to loop through all memcg-specific caches. Callers must still
+ * check if the cache is valid (it is either valid or NULL).
+ * the slab_mutex must be held when looping through those caches
+ */
+#define for_each_memcg_cache_index(_idx) \
+ for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++)
+
+static inline bool memcg_kmem_enabled(void)
+{
+ return static_key_false(&memcg_kmem_enabled_key);
+}
+
+/*
+ * In general, we'll do everything in our power to not incur in any overhead
+ * for non-memcg users for the kmem functions. Not even a function call, if we
+ * can avoid it.
+ *
+ * Therefore, we'll inline all those functions so that in the best case, we'll
+ * see that kmemcg is off for everybody and proceed quickly. If it is on,
+ * we'll still do most of the flag checking inline. We check a lot of
+ * conditions, but because they are pretty simple, they are expected to be
+ * fast.
+ */
+bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
+ int order);
+void __memcg_kmem_commit_charge(struct page *page,
+ struct mem_cgroup *memcg, int order);
+void __memcg_kmem_uncharge_pages(struct page *page, int order);
+
+int memcg_cache_id(struct mem_cgroup *memcg);
+int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
+ struct kmem_cache *root_cache);
+void memcg_release_cache(struct kmem_cache *cachep);
+void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
+
+int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
+void memcg_update_array_size(int num_groups);
+
+struct kmem_cache *
+__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
+
+void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
+void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
+
+/**
+ * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
+ * @gfp: the gfp allocation flags.
+ * @memcg: a pointer to the memcg this was charged against.
+ * @order: allocation order.
+ *
+ * returns true if the memcg where the current task belongs can hold this
+ * allocation.
+ *
+ * We return true automatically if this allocation is not to be accounted to
+ * any memcg.
+ */
+static inline bool
+memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
+{
+ if (!memcg_kmem_enabled())
+ return true;
+
+ /*
+ * __GFP_NOFAIL allocations will move on even if charging is not
+ * possible. Therefore we don't even try, and have this allocation
+ * unaccounted. We could in theory charge it with
+ * res_counter_charge_nofail, but we hope those allocations are rare,
+ * and won't be worth the trouble.
+ */
+ if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
+ return true;
+ if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
+ return true;
+
+ /* If the test is dying, just let it go. */
+ if (unlikely(fatal_signal_pending(current)))
+ return true;
+
+ return __memcg_kmem_newpage_charge(gfp, memcg, order);
+}
+
+/**
+ * memcg_kmem_uncharge_pages: uncharge pages from memcg
+ * @page: pointer to struct page being freed
+ * @order: allocation order.
+ *
+ * there is no need to specify memcg here, since it is embedded in page_cgroup
+ */
+static inline void
+memcg_kmem_uncharge_pages(struct page *page, int order)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_uncharge_pages(page, order);
+}
+
+/**
+ * memcg_kmem_commit_charge: embeds correct memcg in a page
+ * @page: pointer to struct page recently allocated
+ * @memcg: the memcg structure we charged against
+ * @order: allocation order.
+ *
+ * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
+ * failure of the allocation. if @page is NULL, this function will revert the
+ * charges. Otherwise, it will commit the memcg given by @memcg to the
+ * corresponding page_cgroup.
+ */
+static inline void
+memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
+{
+ if (memcg_kmem_enabled() && memcg)
+ __memcg_kmem_commit_charge(page, memcg, order);
+}
+
+/**
+ * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
+ * @cachep: the original global kmem cache
+ * @gfp: allocation flags.
+ *
+ * This function assumes that the task allocating, which determines the memcg
+ * in the page allocator, belongs to the same cgroup throughout the whole
+ * process. Misacounting can happen if the task calls memcg_kmem_get_cache()
+ * while belonging to a cgroup, and later on changes. This is considered
+ * acceptable, and should only happen upon task migration.
+ *
+ * Before the cache is created by the memcg core, there is also a possible
+ * imbalance: the task belongs to a memcg, but the cache being allocated from
+ * is the global cache, since the child cache is not yet guaranteed to be
+ * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
+ * passed and the page allocator will not attempt any cgroup accounting.
+ */
+static __always_inline struct kmem_cache *
+memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
+{
+ if (!memcg_kmem_enabled())
+ return cachep;
+ if (gfp & __GFP_NOFAIL)
+ return cachep;
+ if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
+ return cachep;
+ if (unlikely(fatal_signal_pending(current)))
+ return cachep;
+
+ return __memcg_kmem_get_cache(cachep, gfp);
+}
+#else
+#define for_each_memcg_cache_index(_idx) \
+ for (; NULL; )
+
+static inline bool memcg_kmem_enabled(void)
+{
+ return false;
+}
+
+static inline bool
+memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
+{
+ return true;
+}
+
+static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
+{
+}
+
+static inline void
+memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
+{
+}
+
+static inline int memcg_cache_id(struct mem_cgroup *memcg)
+{
+ return -1;
+}
+
+static inline int
+memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
+ struct kmem_cache *root_cache)
+{
+ return 0;
+}
+
+static inline void memcg_release_cache(struct kmem_cache *cachep)
+{
+}
+
+static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
+ struct kmem_cache *s)
+{
+}
+
+static inline struct kmem_cache *
+memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
+{
+ return cachep;
+}
+
+static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index a09216d0dcc..45e93b46887 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -54,6 +54,7 @@ struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
int status_change_nid_normal;
+ int status_change_nid_high;
int status_change_nid;
};
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index dbd212723b7..0d7df39a588 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -123,7 +123,7 @@ struct sp_node {
struct shared_policy {
struct rb_root root;
- struct mutex mutex;
+ spinlock_t lock;
};
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
@@ -165,11 +165,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
#ifdef CONFIG_TMPFS
-extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
+extern int mpol_parse_str(char *str, struct mempolicy **mpol);
#endif
-extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
- int no_context);
+extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
static inline int vma_migratable(struct vm_area_struct *vma)
@@ -188,6 +187,8 @@ static inline int vma_migratable(struct vm_area_struct *vma)
return 1;
}
+extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+
#else
struct mempolicy {};
@@ -294,18 +295,22 @@ static inline void check_highest_zone(int k)
}
#ifdef CONFIG_TMPFS
-static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
- int no_context)
+static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
{
return 1; /* error */
}
#endif
-static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
- int no_context)
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
return 0;
}
+static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+ unsigned long address)
+{
+ return -1; /* no node preference */
+}
+
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 5d5298d5602..2138bd33021 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -267,39 +267,21 @@ struct abx500_bm_data {
int gnd_lift_resistance;
const struct abx500_maxim_parameters *maxi;
const struct abx500_bm_capacity_levels *cap_levels;
- const struct abx500_battery_type *bat_type;
+ struct abx500_battery_type *bat_type;
const struct abx500_bm_charger_parameters *chg_params;
const struct abx500_fg_parameters *fg_params;
};
-struct abx500_chargalg_platform_data {
- char **supplied_to;
- size_t num_supplicants;
-};
-
-struct abx500_charger_platform_data {
- char **supplied_to;
- size_t num_supplicants;
- bool autopower_cfg;
-};
+extern struct abx500_bm_data ab8500_bm_data;
-struct abx500_btemp_platform_data {
- char **supplied_to;
- size_t num_supplicants;
+enum {
+ NTC_EXTERNAL = 0,
+ NTC_INTERNAL,
};
-struct abx500_fg_platform_data {
- char **supplied_to;
- size_t num_supplicants;
-};
-
-struct abx500_bm_plat_data {
- struct abx500_bm_data *battery;
- struct abx500_charger_platform_data *charger;
- struct abx500_btemp_platform_data *btemp;
- struct abx500_fg_platform_data *fg;
- struct abx500_chargalg_platform_data *chargalg;
-};
+int bmdevs_of_probe(struct device *dev,
+ struct device_node *np,
+ struct abx500_bm_data **battery);
int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
u8 value);
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index dd231ac0bb1..a580363a7d2 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -78,6 +78,8 @@ enum arizona_type {
#define ARIZONA_NUM_IRQ 50
+struct snd_soc_dapm_context;
+
struct arizona {
struct regmap *regmap;
struct device *dev;
@@ -98,6 +100,8 @@ struct arizona {
struct mutex clk_lock;
int clk32k_ref;
+
+ struct snd_soc_dapm_context *dapm;
};
int arizona_clk32k_enable(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 7ab442905a5..8b1d1daaae1 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -62,6 +62,9 @@
#define ARIZONA_MAX_OUTPUT 6
+#define ARIZONA_HAP_ACT_ERM 0
+#define ARIZONA_HAP_ACT_LRA 2
+
#define ARIZONA_MAX_PDM_SPK 2
struct regulator_init_data;
@@ -114,6 +117,9 @@ struct arizona_pdata {
/** PDM speaker format */
unsigned int spk_fmt[ARIZONA_MAX_PDM_SPK];
+
+ /** Haptic actuator type */
+ unsigned int hap_act;
};
#endif
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 7671a287dfe..1f6fe31a4d5 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -76,6 +76,7 @@
#define ARIZONA_RATE_ESTIMATOR_3 0x154
#define ARIZONA_RATE_ESTIMATOR_4 0x155
#define ARIZONA_RATE_ESTIMATOR_5 0x156
+#define ARIZONA_DYNAMIC_FREQUENCY_SCALING_1 0x161
#define ARIZONA_FLL1_CONTROL_1 0x171
#define ARIZONA_FLL1_CONTROL_2 0x172
#define ARIZONA_FLL1_CONTROL_3 0x173
@@ -110,6 +111,7 @@
#define ARIZONA_FLL2_GPIO_CLOCK 0x1AA
#define ARIZONA_MIC_CHARGE_PUMP_1 0x200
#define ARIZONA_LDO1_CONTROL_1 0x210
+#define ARIZONA_LDO1_CONTROL_2 0x212
#define ARIZONA_LDO2_CONTROL_1 0x213
#define ARIZONA_MIC_BIAS_CTRL_1 0x218
#define ARIZONA_MIC_BIAS_CTRL_2 0x219
@@ -979,6 +981,7 @@
#define ARIZONA_DSP1_CLOCKING_1 0x1101
#define ARIZONA_DSP1_STATUS_1 0x1104
#define ARIZONA_DSP1_STATUS_2 0x1105
+#define ARIZONA_DSP1_STATUS_3 0x1106
#define ARIZONA_DSP2_CONTROL_1 0x1200
#define ARIZONA_DSP2_CLOCKING_1 0x1201
#define ARIZONA_DSP2_STATUS_1 0x1204
@@ -1574,6 +1577,13 @@
#define ARIZONA_SAMPLE_RATE_DETECT_D_WIDTH 5 /* SAMPLE_RATE_DETECT_D - [4:0] */
/*
+ * R353 (0x161) - Dynamic Frequency Scaling 1
+ */
+#define ARIZONA_SUBSYS_MAX_FREQ 0x0001 /* SUBSYS_MAX_FREQ */
+#define ARIZONA_SUBSYS_MAX_FREQ_SHIFT 0 /* SUBSYS_MAX_FREQ */
+#define ARIZONA_SUBSYS_MAX_FREQ_WIDTH 1 /* SUBSYS_MAX_FREQ */
+
+/*
* R369 (0x171) - FLL1 Control 1
*/
#define ARIZONA_FLL1_FREERUN 0x0002 /* FLL1_FREERUN */
@@ -1889,6 +1899,13 @@
#define ARIZONA_LDO1_ENA_WIDTH 1 /* LDO1_ENA */
/*
+ * R530 (0x212) - LDO1 Control 2
+ */
+#define ARIZONA_LDO1_HI_PWR 0x0001 /* LDO1_HI_PWR */
+#define ARIZONA_LDO1_HI_PWR_SHIFT 0 /* LDO1_HI_PWR */
+#define ARIZONA_LDO1_HI_PWR_WIDTH 1 /* LDO1_HI_PWR */
+
+/*
* R531 (0x213) - LDO2 Control 1
*/
#define ARIZONA_LDO2_VSEL_MASK 0x07E0 /* LDO2_VSEL - [10:5] */
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
new file mode 100644
index 00000000000..38452ce1e89
--- /dev/null
+++ b/include/linux/mfd/as3711.h
@@ -0,0 +1,126 @@
+/*
+ * AS3711 PMIC MFC driver header
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#ifndef MFD_AS3711_H
+#define MFD_AS3711_H
+
+/*
+ * Client data
+ */
+
+/* Register addresses */
+#define AS3711_SD_1_VOLTAGE 0 /* Digital Step-Down */
+#define AS3711_SD_2_VOLTAGE 1
+#define AS3711_SD_3_VOLTAGE 2
+#define AS3711_SD_4_VOLTAGE 3
+#define AS3711_LDO_1_VOLTAGE 4 /* Analog LDO */
+#define AS3711_LDO_2_VOLTAGE 5
+#define AS3711_LDO_3_VOLTAGE 6 /* Digital LDO */
+#define AS3711_LDO_4_VOLTAGE 7
+#define AS3711_LDO_5_VOLTAGE 8
+#define AS3711_LDO_6_VOLTAGE 9
+#define AS3711_LDO_7_VOLTAGE 0xa
+#define AS3711_LDO_8_VOLTAGE 0xb
+#define AS3711_SD_CONTROL 0x10
+#define AS3711_GPIO_SIGNAL_OUT 0x20
+#define AS3711_GPIO_SIGNAL_IN 0x21
+#define AS3711_SD_CONTROL_1 0x30
+#define AS3711_SD_CONTROL_2 0x31
+#define AS3711_CURR_CONTROL 0x40
+#define AS3711_CURR1_VALUE 0x43
+#define AS3711_CURR2_VALUE 0x44
+#define AS3711_CURR3_VALUE 0x45
+#define AS3711_STEPUP_CONTROL_1 0x50
+#define AS3711_STEPUP_CONTROL_2 0x51
+#define AS3711_STEPUP_CONTROL_4 0x53
+#define AS3711_STEPUP_CONTROL_5 0x54
+#define AS3711_REG_STATUS 0x73
+#define AS3711_INTERRUPT_STATUS_1 0x77
+#define AS3711_INTERRUPT_STATUS_2 0x78
+#define AS3711_INTERRUPT_STATUS_3 0x79
+#define AS3711_CHARGER_STATUS_1 0x86
+#define AS3711_CHARGER_STATUS_2 0x87
+#define AS3711_ASIC_ID_1 0x90
+#define AS3711_ASIC_ID_2 0x91
+
+#define AS3711_MAX_REGS 0x92
+
+/* Regulators */
+enum {
+ AS3711_REGULATOR_SD_1,
+ AS3711_REGULATOR_SD_2,
+ AS3711_REGULATOR_SD_3,
+ AS3711_REGULATOR_SD_4,
+ AS3711_REGULATOR_LDO_1,
+ AS3711_REGULATOR_LDO_2,
+ AS3711_REGULATOR_LDO_3,
+ AS3711_REGULATOR_LDO_4,
+ AS3711_REGULATOR_LDO_5,
+ AS3711_REGULATOR_LDO_6,
+ AS3711_REGULATOR_LDO_7,
+ AS3711_REGULATOR_LDO_8,
+
+ AS3711_REGULATOR_MAX,
+};
+
+struct device;
+struct regmap;
+
+struct as3711 {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#define AS3711_MAX_STEPDOWN 4
+#define AS3711_MAX_STEPUP 2
+#define AS3711_MAX_LDO 8
+
+enum as3711_su2_feedback {
+ AS3711_SU2_VOLTAGE,
+ AS3711_SU2_CURR1,
+ AS3711_SU2_CURR2,
+ AS3711_SU2_CURR3,
+ AS3711_SU2_CURR_AUTO,
+};
+
+enum as3711_su2_fbprot {
+ AS3711_SU2_LX_SD4,
+ AS3711_SU2_GPIO2,
+ AS3711_SU2_GPIO3,
+ AS3711_SU2_GPIO4,
+};
+
+/*
+ * Platform data
+ */
+
+struct as3711_regulator_pdata {
+ struct regulator_init_data *init_data[AS3711_REGULATOR_MAX];
+};
+
+struct as3711_bl_pdata {
+ const char *su1_fb;
+ int su1_max_uA;
+ const char *su2_fb;
+ int su2_max_uA;
+ enum as3711_su2_feedback su2_feedback;
+ enum as3711_su2_fbprot su2_fbprot;
+ bool su2_auto_curr1;
+ bool su2_auto_curr2;
+ bool su2_auto_curr3;
+};
+
+struct as3711_platform_data {
+ struct as3711_regulator_pdata regulator;
+ struct as3711_bl_pdata backlight;
+};
+
+#endif
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 0507c4c21a7..86dd93de6ff 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -146,4 +146,14 @@ void da9052_device_exit(struct da9052 *da9052);
extern struct regmap_config da9052_regmap_config;
+int da9052_irq_init(struct da9052 *da9052);
+int da9052_irq_exit(struct da9052 *da9052);
+int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
+ irq_handler_t handler, void *data);
+void da9052_free_irq(struct da9052 *da9052, int irq, void *data);
+
+int da9052_enable_irq(struct da9052 *da9052, int irq);
+int da9052_disable_irq(struct da9052 *da9052, int irq);
+int da9052_disable_irq_nosync(struct da9052 *da9052, int irq);
+
#endif /* __MFD_DA9052_DA9052_H */
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index c96ad682c59..956afa44599 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -1,4 +1,4 @@
-/*
+/*
* da9055 declarations for DA9055 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index 147293b4471..04e092be4b0 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012 Dialog Semiconductor Ltd.
+/* Copyright (C) 2012 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,8 +25,29 @@ struct da9055_pdata {
int gpio_base;
struct regulator_init_data *regulators[DA9055_MAX_REGULATORS];
- bool reset_enable; /* Enable RTC in RESET Mode */
- enum gpio_select *gpio_rsel; /* Select regulator set thru GPIO 1/2 */
- enum gpio_select *gpio_ren; /* Enable regulator thru GPIO 1/2 */
+ /* Enable RTC in RESET Mode */
+ bool reset_enable;
+ /*
+ * GPI muxed pin to control
+ * regulator state A/B, 0 if not available.
+ */
+ int *gpio_ren;
+ /*
+ * GPI muxed pin to control
+ * regulator set, 0 if not available.
+ */
+ int *gpio_rsel;
+ /*
+ * Regulator mode control bits value (GPI offset) that
+ * that controls the regulator state, 0 if not available.
+ */
+ enum gpio_select *reg_ren;
+ /*
+ * Regulator mode control bits value (GPI offset) that
+ * controls the regulator set A/B, 0 if not available.
+ */
+ enum gpio_select *reg_rsel;
+ /* GPIOs to enable regulator, 0 if not available */
+ int *ena_gpio;
};
#endif /* __DA9055_PDATA_H */
diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h
index df237ee5480..2b592e072db 100644
--- a/include/linux/mfd/da9055/reg.h
+++ b/include/linux/mfd/da9055/reg.h
@@ -1,4 +1,4 @@
-/*
+/*
* DA9055 declarations for DA9055 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index cec364bdccf..2a32b16f79c 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -211,16 +211,16 @@ struct lp8788_chg_param {
/*
* struct lp8788_charger_platform_data
- * @vbatt_adc : adc selection id for battery voltage
- * @batt_temp_adc : adc selection id for battery temperature
+ * @adc_vbatt : adc channel name for battery voltage
+ * @adc_batt_temp : adc channel name for battery temperature
* @max_vbatt_mv : used for calculating battery capacity
* @chg_params : initial charging parameters
* @num_chg_params : numbers of charging parameters
* @charger_event : the charger event can be reported to the platform side
*/
struct lp8788_charger_platform_data {
- enum lp8788_adc_id vbatt_adc;
- enum lp8788_adc_id batt_temp_adc;
+ const char *adc_vbatt;
+ const char *adc_batt_temp;
unsigned int max_vbatt_mv;
struct lp8788_chg_param *chg_params;
int num_chg_params;
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 830152cfae3..6ae21bf47d6 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -316,6 +316,7 @@ enum max8997_irq {
#define MAX8997_NUM_GPIO 12
struct max8997_dev {
struct device *dev;
+ struct max8997_platform_data *pdata;
struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
struct i2c_client *rtc; /* slave addr 0x0c */
struct i2c_client *haptic; /* slave addr 0x90 */
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index 328d8e24b53..1d4a4fe6ac3 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -75,6 +75,7 @@ enum max8998_regulators {
struct max8997_regulator_data {
int id;
struct regulator_init_data *initdata;
+ struct device_node *reg_node;
};
enum max8997_muic_usb_type {
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index 36c242e52ef..fd413ccab91 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -33,6 +33,7 @@
/* Maximum number of main interrupts */
#define MAX_MAIN_INTERRUPT 5
#define RC5T583_MAX_GPEDGE_REG 2
+#define RC5T583_MAX_INTERRUPT_EN_REGS 8
#define RC5T583_MAX_INTERRUPT_MASK_REGS 9
/* Interrupt enable register */
@@ -304,7 +305,7 @@ struct rc5t583 {
uint8_t intc_inten_reg;
/* For group interrupt bits and address */
- uint8_t irq_en_reg[RC5T583_MAX_INTERRUPT_MASK_REGS];
+ uint8_t irq_en_reg[RC5T583_MAX_INTERRUPT_EN_REGS];
/* For gpio edge */
uint8_t gpedge_reg[RC5T583_MAX_GPEDGE_REG];
diff --git a/include/linux/mfd/retu.h b/include/linux/mfd/retu.h
new file mode 100644
index 00000000000..1e2715d5b83
--- /dev/null
+++ b/include/linux/mfd/retu.h
@@ -0,0 +1,22 @@
+/*
+ * Retu MFD driver interface
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#ifndef __LINUX_MFD_RETU_H
+#define __LINUX_MFD_RETU_H
+
+struct retu_dev;
+
+int retu_read(struct retu_dev *, u8);
+int retu_write(struct retu_dev *, u8, u16);
+
+/* Registers */
+#define RETU_REG_WATCHDOG 0x17 /* Watchdog */
+#define RETU_REG_CC1 0x0d /* Common control register 1 */
+#define RETU_REG_STATUS 0x16 /* Status register */
+
+#endif /* __LINUX_MFD_RETU_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
index d179227e866..9a855ac11cb 100644
--- a/include/linux/mfd/sta2x11-mfd.h
+++ b/include/linux/mfd/sta2x11-mfd.h
@@ -26,6 +26,28 @@
#include <linux/types.h>
#include <linux/pci.h>
+enum sta2x11_mfd_plat_dev {
+ sta2x11_sctl = 0,
+ sta2x11_gpio,
+ sta2x11_scr,
+ sta2x11_time,
+ sta2x11_apbreg,
+ sta2x11_apb_soc_regs,
+ sta2x11_vic,
+ sta2x11_n_mfd_plat_devs,
+};
+
+#define STA2X11_MFD_SCTL_NAME "sta2x11-sctl"
+#define STA2X11_MFD_GPIO_NAME "sta2x11-gpio"
+#define STA2X11_MFD_SCR_NAME "sta2x11-scr"
+#define STA2X11_MFD_TIME_NAME "sta2x11-time"
+#define STA2X11_MFD_APBREG_NAME "sta2x11-apbreg"
+#define STA2X11_MFD_APB_SOC_REGS_NAME "sta2x11-apb-soc-regs"
+#define STA2X11_MFD_VIC_NAME "sta2x11-vic"
+
+extern u32
+__sta2x11_mfd_mask(struct pci_dev *, u32, u32, u32, enum sta2x11_mfd_plat_dev);
+
/*
* The MFD PCI block includes the GPIO peripherals and other register blocks.
* For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
@@ -182,7 +204,11 @@ struct sta2x11_gpio_pdata {
* The APB bridge has its own registers, needed by our users as well.
* They are accessed with the following read/mask/write function.
*/
-u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+static inline u32
+sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apbreg);
+}
/* CAN and MLB */
#define APBREG_BSR 0x00 /* Bridge Status Reg */
@@ -211,19 +237,45 @@ u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
* The system controller has its own registers. Some of these are accessed
* by out users as well, using the following read/mask/write/function
*/
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+static inline
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_sctl);
+}
#define SCTL_SCCTL 0x00 /* System controller control register */
#define SCTL_ARMCFG 0x04 /* ARM configuration register */
#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
+
+#define SCTL_SCPLLCTL_AUDIO_PLL_PD BIT(1)
+#define SCTL_SCPLLCTL_FRAC_CONTROL BIT(3)
+#define SCTL_SCPLLCTL_STRB_BYPASS BIT(6)
+#define SCTL_SCPLLCTL_STRB_INPUT BIT(8)
+
#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
+
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_MASK 0xff
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_SHIFT 10
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_MASK 7
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_SHIFT 21
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_MASK 7
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_SHIFT 18
+#define SCTL_SCPLLFCTRL_DITHER_DISABLE_MASK 0x03
+#define SCTL_SCPLLFCTRL_DITHER_DISABLE_SHIFT 4
+
+
#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
+
+#define SCTL_SCRESFRACT_MASK 0x0000ffff
+
+
#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
#define SCTL_SCGRST 0x28 /* Peripheral global reset */
+#define SCTL_SCPCIECSBRST 0x2c /* PCIe PAB CSB reset status register */
#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
@@ -321,4 +373,146 @@ u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
#define SCTL_SCPEREN1_I2C3 (1 << 16)
#define SCTL_SCPEREN1_USB_PHY (1 << 17)
+/*
+ * APB-SOC registers
+ */
+static inline
+u32 sta2x11_apb_soc_regs_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apb_soc_regs);
+}
+
+#define PCIE_EP1_FUNC3_0_INTR_REG 0x000
+#define PCIE_EP1_FUNC7_4_INTR_REG 0x004
+#define PCIE_EP2_FUNC3_0_INTR_REG 0x008
+#define PCIE_EP2_FUNC7_4_INTR_REG 0x00c
+#define PCIE_EP3_FUNC3_0_INTR_REG 0x010
+#define PCIE_EP3_FUNC7_4_INTR_REG 0x014
+#define PCIE_EP4_FUNC3_0_INTR_REG 0x018
+#define PCIE_EP4_FUNC7_4_INTR_REG 0x01c
+#define PCIE_INTR_ENABLE0_REG 0x020
+#define PCIE_INTR_ENABLE1_REG 0x024
+#define PCIE_EP1_FUNC_TC_REG 0x028
+#define PCIE_EP2_FUNC_TC_REG 0x02c
+#define PCIE_EP3_FUNC_TC_REG 0x030
+#define PCIE_EP4_FUNC_TC_REG 0x034
+#define PCIE_EP1_FUNC_F_REG 0x038
+#define PCIE_EP2_FUNC_F_REG 0x03c
+#define PCIE_EP3_FUNC_F_REG 0x040
+#define PCIE_EP4_FUNC_F_REG 0x044
+#define PCIE_PAB_AMBA_SW_RST_REG 0x048
+#define PCIE_PM_STATUS_0_PORT_0_4 0x04c
+#define PCIE_PM_STATUS_7_0_EP1 0x050
+#define PCIE_PM_STATUS_7_0_EP2 0x054
+#define PCIE_PM_STATUS_7_0_EP3 0x058
+#define PCIE_PM_STATUS_7_0_EP4 0x05c
+#define PCIE_DEV_ID_0_EP1_REG 0x060
+#define PCIE_CC_REV_ID_0_EP1_REG 0x064
+#define PCIE_DEV_ID_1_EP1_REG 0x068
+#define PCIE_CC_REV_ID_1_EP1_REG 0x06c
+#define PCIE_DEV_ID_2_EP1_REG 0x070
+#define PCIE_CC_REV_ID_2_EP1_REG 0x074
+#define PCIE_DEV_ID_3_EP1_REG 0x078
+#define PCIE_CC_REV_ID_3_EP1_REG 0x07c
+#define PCIE_DEV_ID_4_EP1_REG 0x080
+#define PCIE_CC_REV_ID_4_EP1_REG 0x084
+#define PCIE_DEV_ID_5_EP1_REG 0x088
+#define PCIE_CC_REV_ID_5_EP1_REG 0x08c
+#define PCIE_DEV_ID_6_EP1_REG 0x090
+#define PCIE_CC_REV_ID_6_EP1_REG 0x094
+#define PCIE_DEV_ID_7_EP1_REG 0x098
+#define PCIE_CC_REV_ID_7_EP1_REG 0x09c
+#define PCIE_DEV_ID_0_EP2_REG 0x0a0
+#define PCIE_CC_REV_ID_0_EP2_REG 0x0a4
+#define PCIE_DEV_ID_1_EP2_REG 0x0a8
+#define PCIE_CC_REV_ID_1_EP2_REG 0x0ac
+#define PCIE_DEV_ID_2_EP2_REG 0x0b0
+#define PCIE_CC_REV_ID_2_EP2_REG 0x0b4
+#define PCIE_DEV_ID_3_EP2_REG 0x0b8
+#define PCIE_CC_REV_ID_3_EP2_REG 0x0bc
+#define PCIE_DEV_ID_4_EP2_REG 0x0c0
+#define PCIE_CC_REV_ID_4_EP2_REG 0x0c4
+#define PCIE_DEV_ID_5_EP2_REG 0x0c8
+#define PCIE_CC_REV_ID_5_EP2_REG 0x0cc
+#define PCIE_DEV_ID_6_EP2_REG 0x0d0
+#define PCIE_CC_REV_ID_6_EP2_REG 0x0d4
+#define PCIE_DEV_ID_7_EP2_REG 0x0d8
+#define PCIE_CC_REV_ID_7_EP2_REG 0x0dC
+#define PCIE_DEV_ID_0_EP3_REG 0x0e0
+#define PCIE_CC_REV_ID_0_EP3_REG 0x0e4
+#define PCIE_DEV_ID_1_EP3_REG 0x0e8
+#define PCIE_CC_REV_ID_1_EP3_REG 0x0ec
+#define PCIE_DEV_ID_2_EP3_REG 0x0f0
+#define PCIE_CC_REV_ID_2_EP3_REG 0x0f4
+#define PCIE_DEV_ID_3_EP3_REG 0x0f8
+#define PCIE_CC_REV_ID_3_EP3_REG 0x0fc
+#define PCIE_DEV_ID_4_EP3_REG 0x100
+#define PCIE_CC_REV_ID_4_EP3_REG 0x104
+#define PCIE_DEV_ID_5_EP3_REG 0x108
+#define PCIE_CC_REV_ID_5_EP3_REG 0x10c
+#define PCIE_DEV_ID_6_EP3_REG 0x110
+#define PCIE_CC_REV_ID_6_EP3_REG 0x114
+#define PCIE_DEV_ID_7_EP3_REG 0x118
+#define PCIE_CC_REV_ID_7_EP3_REG 0x11c
+#define PCIE_DEV_ID_0_EP4_REG 0x120
+#define PCIE_CC_REV_ID_0_EP4_REG 0x124
+#define PCIE_DEV_ID_1_EP4_REG 0x128
+#define PCIE_CC_REV_ID_1_EP4_REG 0x12c
+#define PCIE_DEV_ID_2_EP4_REG 0x130
+#define PCIE_CC_REV_ID_2_EP4_REG 0x134
+#define PCIE_DEV_ID_3_EP4_REG 0x138
+#define PCIE_CC_REV_ID_3_EP4_REG 0x13c
+#define PCIE_DEV_ID_4_EP4_REG 0x140
+#define PCIE_CC_REV_ID_4_EP4_REG 0x144
+#define PCIE_DEV_ID_5_EP4_REG 0x148
+#define PCIE_CC_REV_ID_5_EP4_REG 0x14c
+#define PCIE_DEV_ID_6_EP4_REG 0x150
+#define PCIE_CC_REV_ID_6_EP4_REG 0x154
+#define PCIE_DEV_ID_7_EP4_REG 0x158
+#define PCIE_CC_REV_ID_7_EP4_REG 0x15c
+#define PCIE_SUBSYS_VEN_ID_REG 0x160
+#define PCIE_COMMON_CLOCK_CONFIG_0_4_0 0x164
+#define PCIE_MIPHYP_SSC_EN_REG 0x168
+#define PCIE_MIPHYP_ADDR_REG 0x16c
+#define PCIE_L1_ASPM_READY_REG 0x170
+#define PCIE_EXT_CFG_RDY_REG 0x174
+#define PCIE_SoC_INT_ROUTER_STATUS0_REG 0x178
+#define PCIE_SoC_INT_ROUTER_STATUS1_REG 0x17c
+#define PCIE_SoC_INT_ROUTER_STATUS2_REG 0x180
+#define PCIE_SoC_INT_ROUTER_STATUS3_REG 0x184
+#define DMA_IP_CTRL_REG 0x324
+#define DISP_BRIDGE_PU_PD_CTRL_REG 0x328
+#define VIP_PU_PD_CTRL_REG 0x32c
+#define USB_MLB_PU_PD_CTRL_REG 0x330
+#define SDIO_PU_PD_MISCFUNC_CTRL_REG1 0x334
+#define SDIO_PU_PD_MISCFUNC_CTRL_REG2 0x338
+#define UART_PU_PD_CTRL_REG 0x33c
+#define ARM_Lock 0x340
+#define SYS_IO_CHAR_REG1 0x344
+#define SYS_IO_CHAR_REG2 0x348
+#define SATA_CORE_ID_REG 0x34c
+#define SATA_CTRL_REG 0x350
+#define I2C_HSFIX_MISC_REG 0x354
+#define SPARE2_RESERVED 0x358
+#define SPARE3_RESERVED 0x35c
+#define MASTER_LOCK_REG 0x368
+#define SYSTEM_CONFIG_STATUS_REG 0x36c
+#define MSP_CLK_CTRL_REG 0x39c
+#define COMPENSATION_REG1 0x3c4
+#define COMPENSATION_REG2 0x3c8
+#define COMPENSATION_REG3 0x3cc
+#define TEST_CTL_REG 0x3d0
+
+/*
+ * SECR (OTP) registers
+ */
+#define STA2X11_SECR_CR 0x00
+#define STA2X11_SECR_FVR0 0x10
+#define STA2X11_SECR_FVR1 0x14
+
+extern int sta2x11_mfd_get_regs_data(struct platform_device *pdev,
+ enum sta2x11_mfd_plat_dev index,
+ void __iomem **regs,
+ spinlock_t **lock);
+
#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index f8d5b4d5843..383ac1512a3 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -62,6 +62,7 @@ struct stmpe_client_info;
* @lock: lock protecting I/O operations
* @irq_lock: IRQ bus lock
* @dev: device, mostly for dev_dbg()
+ * @irq_domain: IRQ domain
* @client: client - i2c or spi
* @ci: client specific information
* @partnum: part number
@@ -79,6 +80,7 @@ struct stmpe {
struct mutex lock;
struct mutex irq_lock;
struct device *dev;
+ struct irq_domain *domain;
void *client;
struct stmpe_client_info *ci;
enum stmpe_partnum partnum;
@@ -188,7 +190,6 @@ struct stmpe_ts_platform_data {
* @id: device id to distinguish between multiple STMPEs on the same board
* @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
* @irq_trigger: IRQ trigger to use for the interrupt to the host
- * @irq_invert_polarity: IRQ line is connected with reversed polarity
* @autosleep: bool to enable/disable stmpe autosleep
* @autosleep_timeout: inactivity timeout in milliseconds for autosleep
* @irq_base: base IRQ number. %STMPE_NR_IRQS irqs will be used, or
@@ -205,7 +206,6 @@ struct stmpe_platform_data {
unsigned int blocks;
int irq_base;
unsigned int irq_trigger;
- bool irq_invert_polarity;
bool autosleep;
bool irq_over_gpio;
int irq_gpio;
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
new file mode 100644
index 00000000000..c79ad5d2f27
--- /dev/null
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -0,0 +1,152 @@
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+/*
+ * TI Touch Screen / ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/core.h>
+
+#define REG_RAWIRQSTATUS 0x024
+#define REG_IRQSTATUS 0x028
+#define REG_IRQENABLE 0x02C
+#define REG_IRQCLR 0x030
+#define REG_IRQWAKEUP 0x034
+#define REG_CTRL 0x040
+#define REG_ADCFSM 0x044
+#define REG_CLKDIV 0x04C
+#define REG_SE 0x054
+#define REG_IDLECONFIG 0x058
+#define REG_CHARGECONFIG 0x05C
+#define REG_CHARGEDELAY 0x060
+#define REG_STEPCONFIG(n) (0x64 + ((n - 1) * 8))
+#define REG_STEPDELAY(n) (0x68 + ((n - 1) * 8))
+#define REG_FIFO0CNT 0xE4
+#define REG_FIFO0THR 0xE8
+#define REG_FIFO1CNT 0xF0
+#define REG_FIFO1THR 0xF4
+#define REG_FIFO0 0x100
+#define REG_FIFO1 0x200
+
+/* Register Bitfields */
+/* IRQ wakeup enable */
+#define IRQWKUP_ENB BIT(0)
+
+/* Step Enable */
+#define STEPENB_MASK (0x1FFFF << 0)
+#define STEPENB(val) ((val) << 0)
+#define STPENB_STEPENB STEPENB(0x1FFFF)
+#define STPENB_STEPENB_TC STEPENB(0x1FFF)
+
+/* IRQ enable */
+#define IRQENB_HW_PEN BIT(0)
+#define IRQENB_FIFO0THRES BIT(2)
+#define IRQENB_FIFO1THRES BIT(5)
+#define IRQENB_PENUP BIT(9)
+
+/* Step Configuration */
+#define STEPCONFIG_MODE_MASK (3 << 0)
+#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
+#define STEPCONFIG_AVG_MASK (7 << 2)
+#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
+#define STEPCONFIG_XPP BIT(5)
+#define STEPCONFIG_XNN BIT(6)
+#define STEPCONFIG_YPP BIT(7)
+#define STEPCONFIG_YNN BIT(8)
+#define STEPCONFIG_XNP BIT(9)
+#define STEPCONFIG_YPN BIT(10)
+#define STEPCONFIG_INM_MASK (0xF << 15)
+#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
+#define STEPCONFIG_INP_MASK (0xF << 19)
+#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP_AN2 STEPCONFIG_INP(2)
+#define STEPCONFIG_INP_AN3 STEPCONFIG_INP(3)
+#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
+#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
+#define STEPCONFIG_FIFO1 BIT(26)
+
+/* Delay register */
+#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
+#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
+#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
+#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+
+/* Charge Config */
+#define STEPCHARGE_RFP_MASK (7 << 12)
+#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
+#define STEPCHARGE_INM_MASK (0xF << 15)
+#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
+#define STEPCHARGE_INP_MASK (0xF << 19)
+#define STEPCHARGE_INP(val) ((val) << 19)
+#define STEPCHARGE_INP_AN1 STEPCHARGE_INP(1)
+#define STEPCHARGE_RFM_MASK (3 << 23)
+#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
+
+/* Charge delay */
+#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
+#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(1)
+
+/* Control register */
+#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_STEPID BIT(1)
+#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_POWERDOWN BIT(4)
+#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
+#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
+#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
+#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
+#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
+#define CNTRLREG_TSCENB BIT(7)
+
+#define ADC_CLK 3000000
+#define MAX_CLK_DIV 7
+#define TOTAL_STEPS 16
+#define TOTAL_CHANNELS 8
+
+#define TSCADC_CELLS 2
+
+enum tscadc_cells {
+ TSC_CELL,
+ ADC_CELL,
+};
+
+struct mfd_tscadc_board {
+ struct tsc_data *tsc_init;
+ struct adc_data *adc_init;
+};
+
+struct ti_tscadc_dev {
+ struct device *dev;
+ struct regmap *regmap_tscadc;
+ void __iomem *tscadc_base;
+ int irq;
+ struct mfd_cell cells[TSCADC_CELLS];
+
+ /* tsc device */
+ struct titsc *tsc;
+
+ /* adc device */
+ struct adc_device *adc;
+};
+
+#endif
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 6bc31d85462..6694cf43e8b 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -23,37 +23,109 @@
#define __LINUX_MFD_TPS65090_H
#include <linux/irq.h>
+#include <linux/regmap.h>
+
+/* TPS65090 IRQs */
+enum {
+ TPS65090_IRQ_VAC_STATUS_CHANGE,
+ TPS65090_IRQ_VSYS_STATUS_CHANGE,
+ TPS65090_IRQ_BAT_STATUS_CHANGE,
+ TPS65090_IRQ_CHARGING_STATUS_CHANGE,
+ TPS65090_IRQ_CHARGING_COMPLETE,
+ TPS65090_IRQ_OVERLOAD_DCDC1,
+ TPS65090_IRQ_OVERLOAD_DCDC2,
+ TPS65090_IRQ_OVERLOAD_DCDC3,
+ TPS65090_IRQ_OVERLOAD_FET1,
+ TPS65090_IRQ_OVERLOAD_FET2,
+ TPS65090_IRQ_OVERLOAD_FET3,
+ TPS65090_IRQ_OVERLOAD_FET4,
+ TPS65090_IRQ_OVERLOAD_FET5,
+ TPS65090_IRQ_OVERLOAD_FET6,
+ TPS65090_IRQ_OVERLOAD_FET7,
+};
+
+/* TPS65090 Regulator ID */
+enum {
+ TPS65090_REGULATOR_DCDC1,
+ TPS65090_REGULATOR_DCDC2,
+ TPS65090_REGULATOR_DCDC3,
+ TPS65090_REGULATOR_FET1,
+ TPS65090_REGULATOR_FET2,
+ TPS65090_REGULATOR_FET3,
+ TPS65090_REGULATOR_FET4,
+ TPS65090_REGULATOR_FET5,
+ TPS65090_REGULATOR_FET6,
+ TPS65090_REGULATOR_FET7,
+ TPS65090_REGULATOR_LDO1,
+ TPS65090_REGULATOR_LDO2,
+
+ /* Last entry for maximum ID */
+ TPS65090_REGULATOR_MAX,
+};
struct tps65090 {
- struct mutex lock;
struct device *dev;
- struct i2c_client *client;
struct regmap *rmap;
- struct irq_chip irq_chip;
- struct mutex irq_lock;
- int irq_base;
- unsigned int id;
+ struct regmap_irq_chip_data *irq_data;
};
-struct tps65090_subdev_info {
- int id;
- const char *name;
- void *platform_data;
+/*
+ * struct tps65090_regulator_plat_data
+ *
+ * @reg_init_data: The regulator init data.
+ * @enable_ext_control: Enable extrenal control or not. Only available for
+ * DCDC1, DCDC2 and DCDC3.
+ * @gpio: Gpio number if external control is enabled and controlled through
+ * gpio.
+ */
+struct tps65090_regulator_plat_data {
+ struct regulator_init_data *reg_init_data;
+ bool enable_ext_control;
+ int gpio;
};
struct tps65090_platform_data {
int irq_base;
- int num_subdevs;
- struct tps65090_subdev_info *subdevs;
+ struct tps65090_regulator_plat_data *reg_pdata[TPS65090_REGULATOR_MAX];
};
/*
* NOTE: the functions below are not intended for use outside
* of the TPS65090 sub-device drivers
*/
-extern int tps65090_write(struct device *dev, int reg, uint8_t val);
-extern int tps65090_read(struct device *dev, int reg, uint8_t *val);
-extern int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num);
-extern int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num);
+static inline int tps65090_write(struct device *dev, int reg, uint8_t val)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+
+ return regmap_write(tps->rmap, reg, val);
+}
+
+static inline int tps65090_read(struct device *dev, int reg, uint8_t *val)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+ unsigned int temp_val;
+ int ret;
+
+ ret = regmap_read(tps->rmap, reg, &temp_val);
+ if (!ret)
+ *val = temp_val;
+ return ret;
+}
+
+static inline int tps65090_set_bits(struct device *dev, int reg,
+ uint8_t bit_num)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
+}
+
+static inline int tps65090_clr_bits(struct device *dev, int reg,
+ uint8_t bit_num)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
+}
#endif /*__LINUX_MFD_TPS65090_H */
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
index 2dd12319495..87994542573 100644
--- a/include/linux/mfd/tps6586x.h
+++ b/include/linux/mfd/tps6586x.h
@@ -29,6 +29,7 @@ enum {
TPS6586X_ID_LDO_8,
TPS6586X_ID_LDO_9,
TPS6586X_ID_LDO_RTC,
+ TPS6586X_ID_MAX_REGULATOR,
};
enum {
@@ -79,6 +80,8 @@ struct tps6586x_platform_data {
int gpio_base;
int irq_base;
bool pm_off;
+
+ struct regulator_init_data *reg_init_data[TPS6586X_ID_MAX_REGULATOR];
};
/*
@@ -93,5 +96,6 @@ extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
extern int tps6586x_update(struct device *dev, int reg, uint8_t val,
uint8_t mask);
+extern int tps6586x_irq_get_virq(struct device *dev, int irq);
#endif /*__LINUX_MFD_TPS6586X_H */
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 02e894f3ff4..20e433e551e 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -572,6 +572,49 @@
#define SPARE_SPARE_MASK 0xFF
#define SPARE_SPARE_SHIFT 0
+#define TPS65910_INT_STS_RTC_PERIOD_IT_MASK 0x80
+#define TPS65910_INT_STS_RTC_PERIOD_IT_SHIFT 7
+#define TPS65910_INT_STS_RTC_ALARM_IT_MASK 0x40
+#define TPS65910_INT_STS_RTC_ALARM_IT_SHIFT 6
+#define TPS65910_INT_STS_HOTDIE_IT_MASK 0x20
+#define TPS65910_INT_STS_HOTDIE_IT_SHIFT 5
+#define TPS65910_INT_STS_PWRHOLD_F_IT_MASK 0x10
+#define TPS65910_INT_STS_PWRHOLD_F_IT_SHIFT 4
+#define TPS65910_INT_STS_PWRON_LP_IT_MASK 0x08
+#define TPS65910_INT_STS_PWRON_LP_IT_SHIFT 3
+#define TPS65910_INT_STS_PWRON_IT_MASK 0x04
+#define TPS65910_INT_STS_PWRON_IT_SHIFT 2
+#define TPS65910_INT_STS_VMBHI_IT_MASK 0x02
+#define TPS65910_INT_STS_VMBHI_IT_SHIFT 1
+#define TPS65910_INT_STS_VMBDCH_IT_MASK 0x01
+#define TPS65910_INT_STS_VMBDCH_IT_SHIFT 0
+
+#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80
+#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7
+#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40
+#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6
+#define TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK 0x20
+#define TPS65910_INT_MSK_HOTDIE_IT_MSK_SHIFT 5
+#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK 0x10
+#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_SHIFT 4
+#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK 0x08
+#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_SHIFT 3
+#define TPS65910_INT_MSK_PWRON_IT_MSK_MASK 0x04
+#define TPS65910_INT_MSK_PWRON_IT_MSK_SHIFT 2
+#define TPS65910_INT_MSK_VMBHI_IT_MSK_MASK 0x02
+#define TPS65910_INT_MSK_VMBHI_IT_MSK_SHIFT 1
+#define TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK 0x01
+#define TPS65910_INT_MSK_VMBDCH_IT_MSK_SHIFT 0
+
+#define TPS65910_INT_STS2_GPIO0_F_IT_SHIFT 2
+#define TPS65910_INT_STS2_GPIO0_F_IT_MASK 0x02
+#define TPS65910_INT_STS2_GPIO0_R_IT_SHIFT 1
+#define TPS65910_INT_STS2_GPIO0_R_IT_MASK 0x01
+
+#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_SHIFT 2
+#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02
+#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_SHIFT 1
+#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01
/*Register INT_STS (0x80) register.RegisterDescription */
#define INT_STS_RTC_PERIOD_IT_MASK 0x80
@@ -580,16 +623,16 @@
#define INT_STS_RTC_ALARM_IT_SHIFT 6
#define INT_STS_HOTDIE_IT_MASK 0x20
#define INT_STS_HOTDIE_IT_SHIFT 5
-#define INT_STS_PWRHOLD_IT_MASK 0x10
-#define INT_STS_PWRHOLD_IT_SHIFT 4
+#define INT_STS_PWRHOLD_R_IT_MASK 0x10
+#define INT_STS_PWRHOLD_R_IT_SHIFT 4
#define INT_STS_PWRON_LP_IT_MASK 0x08
#define INT_STS_PWRON_LP_IT_SHIFT 3
#define INT_STS_PWRON_IT_MASK 0x04
#define INT_STS_PWRON_IT_SHIFT 2
#define INT_STS_VMBHI_IT_MASK 0x02
#define INT_STS_VMBHI_IT_SHIFT 1
-#define INT_STS_VMBDCH_IT_MASK 0x01
-#define INT_STS_VMBDCH_IT_SHIFT 0
+#define INT_STS_PWRHOLD_F_IT_MASK 0x01
+#define INT_STS_PWRHOLD_F_IT_SHIFT 0
/*Register INT_MSK (0x80) register.RegisterDescription */
@@ -599,16 +642,16 @@
#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6
#define INT_MSK_HOTDIE_IT_MSK_MASK 0x20
#define INT_MSK_HOTDIE_IT_MSK_SHIFT 5
-#define INT_MSK_PWRHOLD_IT_MSK_MASK 0x10
-#define INT_MSK_PWRHOLD_IT_MSK_SHIFT 4
+#define INT_MSK_PWRHOLD_R_IT_MSK_MASK 0x10
+#define INT_MSK_PWRHOLD_R_IT_MSK_SHIFT 4
#define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08
#define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3
#define INT_MSK_PWRON_IT_MSK_MASK 0x04
#define INT_MSK_PWRON_IT_MSK_SHIFT 2
#define INT_MSK_VMBHI_IT_MSK_MASK 0x02
#define INT_MSK_VMBHI_IT_MSK_SHIFT 1
-#define INT_MSK_VMBDCH_IT_MSK_MASK 0x01
-#define INT_MSK_VMBDCH_IT_MSK_SHIFT 0
+#define INT_MSK_PWRHOLD_F_IT_MSK_MASK 0x01
+#define INT_MSK_PWRHOLD_F_IT_MSK_SHIFT 0
/*Register INT_STS2 (0x80) register.RegisterDescription */
@@ -650,6 +693,14 @@
/*Register INT_STS3 (0x80) register.RegisterDescription */
+#define INT_STS3_PWRDN_IT_MASK 0x80
+#define INT_STS3_PWRDN_IT_SHIFT 7
+#define INT_STS3_VMBCH2_L_IT_MASK 0x40
+#define INT_STS3_VMBCH2_L_IT_SHIFT 6
+#define INT_STS3_VMBCH2_H_IT_MASK 0x20
+#define INT_STS3_VMBCH2_H_IT_SHIFT 5
+#define INT_STS3_WTCHDG_IT_MASK 0x10
+#define INT_STS3_WTCHDG_IT_SHIFT 4
#define INT_STS3_GPIO5_F_IT_MASK 0x08
#define INT_STS3_GPIO5_F_IT_SHIFT 3
#define INT_STS3_GPIO5_R_IT_MASK 0x04
@@ -661,6 +712,14 @@
/*Register INT_MSK3 (0x80) register.RegisterDescription */
+#define INT_MSK3_PWRDN_IT_MSK_MASK 0x80
+#define INT_MSK3_PWRDN_IT_MSK_SHIFT 7
+#define INT_MSK3_VMBCH2_L_IT_MSK_MASK 0x40
+#define INT_MSK3_VMBCH2_L_IT_MSK_SHIFT 6
+#define INT_MSK3_VMBCH2_H_IT_MSK_MASK 0x20
+#define INT_MSK3_VMBCH2_H_IT_MSK_SHIFT 5
+#define INT_MSK3_WTCHDG_IT_MSK_MASK 0x10
+#define INT_MSK3_WTCHDG_IT_MSK_SHIFT 4
#define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08
#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3
#define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04
@@ -721,34 +780,32 @@
#define TPS65910_IRQ_GPIO_F 9
#define TPS65910_NUM_IRQ 10
-#define TPS65911_IRQ_VBAT_VMBDCH 0
-#define TPS65911_IRQ_VBAT_VMBDCH2L 1
-#define TPS65911_IRQ_VBAT_VMBDCH2H 2
-#define TPS65911_IRQ_VBAT_VMHI 3
-#define TPS65911_IRQ_PWRON 4
-#define TPS65911_IRQ_PWRON_LP 5
-#define TPS65911_IRQ_PWRHOLD_F 6
-#define TPS65911_IRQ_PWRHOLD_R 7
-#define TPS65911_IRQ_HOTDIE 8
-#define TPS65911_IRQ_RTC_ALARM 9
-#define TPS65911_IRQ_RTC_PERIOD 10
-#define TPS65911_IRQ_GPIO0_R 11
-#define TPS65911_IRQ_GPIO0_F 12
-#define TPS65911_IRQ_GPIO1_R 13
-#define TPS65911_IRQ_GPIO1_F 14
-#define TPS65911_IRQ_GPIO2_R 15
-#define TPS65911_IRQ_GPIO2_F 16
-#define TPS65911_IRQ_GPIO3_R 17
-#define TPS65911_IRQ_GPIO3_F 18
-#define TPS65911_IRQ_GPIO4_R 19
-#define TPS65911_IRQ_GPIO4_F 20
-#define TPS65911_IRQ_GPIO5_R 21
-#define TPS65911_IRQ_GPIO5_F 22
-#define TPS65911_IRQ_WTCHDG 23
-#define TPS65911_IRQ_PWRDN 24
-
-#define TPS65911_NUM_IRQ 25
-
+#define TPS65911_IRQ_PWRHOLD_F 0
+#define TPS65911_IRQ_VBAT_VMHI 1
+#define TPS65911_IRQ_PWRON 2
+#define TPS65911_IRQ_PWRON_LP 3
+#define TPS65911_IRQ_PWRHOLD_R 4
+#define TPS65911_IRQ_HOTDIE 5
+#define TPS65911_IRQ_RTC_ALARM 6
+#define TPS65911_IRQ_RTC_PERIOD 7
+#define TPS65911_IRQ_GPIO0_R 8
+#define TPS65911_IRQ_GPIO0_F 9
+#define TPS65911_IRQ_GPIO1_R 10
+#define TPS65911_IRQ_GPIO1_F 11
+#define TPS65911_IRQ_GPIO2_R 12
+#define TPS65911_IRQ_GPIO2_F 13
+#define TPS65911_IRQ_GPIO3_R 14
+#define TPS65911_IRQ_GPIO3_F 15
+#define TPS65911_IRQ_GPIO4_R 16
+#define TPS65911_IRQ_GPIO4_F 17
+#define TPS65911_IRQ_GPIO5_R 18
+#define TPS65911_IRQ_GPIO5_F 19
+#define TPS65911_IRQ_WTCHDG 20
+#define TPS65911_IRQ_VMBCH2_H 21
+#define TPS65911_IRQ_VMBCH2_L 22
+#define TPS65911_IRQ_PWRDN 23
+
+#define TPS65911_NUM_IRQ 24
/* GPIO Register Definitions */
#define TPS65910_GPIO_DEB BIT(2)
@@ -836,7 +893,6 @@ struct tps65910 {
struct device *dev;
struct i2c_client *i2c_client;
struct regmap *regmap;
- struct mutex io_mutex;
unsigned int id;
/* Client devices */
@@ -848,12 +904,8 @@ struct tps65910 {
struct tps65910_board *of_plat_data;
/* IRQ Handling */
- struct mutex irq_lock;
int chip_irq;
- int irq_base;
- int irq_num;
- u32 irq_mask;
- struct irq_domain *domain;
+ struct regmap_irq_chip_data *irq_data;
};
struct tps65910_platform_data {
@@ -861,10 +913,6 @@ struct tps65910_platform_data {
int irq_base;
};
-int tps65910_irq_init(struct tps65910 *tps65910, int irq,
- struct tps65910_platform_data *pdata);
-int tps65910_irq_exit(struct tps65910 *tps65910);
-
static inline int tps65910_chip_id(struct tps65910 *tps65910)
{
return tps65910->id;
@@ -900,4 +948,9 @@ static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
return regmap_update_bits(tps65910->regmap, reg, mask, val);
}
+static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq)
+{
+ return regmap_irq_get_virq(tps65910->irq_data, irq);
+}
+
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
new file mode 100644
index 00000000000..2c75c9c9318
--- /dev/null
+++ b/include/linux/mfd/tps80031.h
@@ -0,0 +1,637 @@
+/*
+ * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __LINUX_MFD_TPS80031_H
+#define __LINUX_MFD_TPS80031_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* Pull-ups/Pull-downs */
+#define TPS80031_CFG_INPUT_PUPD1 0xF0
+#define TPS80031_CFG_INPUT_PUPD2 0xF1
+#define TPS80031_CFG_INPUT_PUPD3 0xF2
+#define TPS80031_CFG_INPUT_PUPD4 0xF3
+#define TPS80031_CFG_LDO_PD1 0xF4
+#define TPS80031_CFG_LDO_PD2 0xF5
+#define TPS80031_CFG_SMPS_PD 0xF6
+
+/* Real Time Clock */
+#define TPS80031_SECONDS_REG 0x00
+#define TPS80031_MINUTES_REG 0x01
+#define TPS80031_HOURS_REG 0x02
+#define TPS80031_DAYS_REG 0x03
+#define TPS80031_MONTHS_REG 0x04
+#define TPS80031_YEARS_REG 0x05
+#define TPS80031_WEEKS_REG 0x06
+#define TPS80031_ALARM_SECONDS_REG 0x08
+#define TPS80031_ALARM_MINUTES_REG 0x09
+#define TPS80031_ALARM_HOURS_REG 0x0A
+#define TPS80031_ALARM_DAYS_REG 0x0B
+#define TPS80031_ALARM_MONTHS_REG 0x0C
+#define TPS80031_ALARM_YEARS_REG 0x0D
+#define TPS80031_RTC_CTRL_REG 0x10
+#define TPS80031_RTC_STATUS_REG 0x11
+#define TPS80031_RTC_INTERRUPTS_REG 0x12
+#define TPS80031_RTC_COMP_LSB_REG 0x13
+#define TPS80031_RTC_COMP_MSB_REG 0x14
+#define TPS80031_RTC_RESET_STATUS_REG 0x16
+
+/*PMC Master Module */
+#define TPS80031_PHOENIX_START_CONDITION 0x1F
+#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
+#define TPS80031_STS_HW_CONDITIONS 0x21
+#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
+#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
+#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
+#define TPS80031_PHOENIX_DEV_ON 0x25
+#define TPS80031_STS_PWR_GRP_STATE 0x27
+#define TPS80031_PH_CFG_VSYSLOW 0x28
+#define TPS80031_PH_STS_BOOT 0x29
+#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
+#define TPS80031_PHOENIX_SEQ_CFG 0x2B
+#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
+#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
+#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
+
+/* PMC Slave Module - Broadcast */
+#define TPS80031_BROADCAST_ADDR_ALL 0x31
+#define TPS80031_BROADCAST_ADDR_REF 0x32
+#define TPS80031_BROADCAST_ADDR_PROV 0x33
+#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
+
+/* PMC Slave Module SMPS Regulators */
+#define TPS80031_SMPS4_CFG_TRANS 0x41
+#define TPS80031_SMPS4_CFG_STATE 0x42
+#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
+#define TPS80031_VIO_CFG_TRANS 0x47
+#define TPS80031_VIO_CFG_STATE 0x48
+#define TPS80031_VIO_CFG_FORCE 0x49
+#define TPS80031_VIO_CFG_VOLTAGE 0x4A
+#define TPS80031_VIO_CFG_STEP 0x48
+#define TPS80031_SMPS1_CFG_TRANS 0x53
+#define TPS80031_SMPS1_CFG_STATE 0x54
+#define TPS80031_SMPS1_CFG_FORCE 0x55
+#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
+#define TPS80031_SMPS1_CFG_STEP 0x57
+#define TPS80031_SMPS2_CFG_TRANS 0x59
+#define TPS80031_SMPS2_CFG_STATE 0x5A
+#define TPS80031_SMPS2_CFG_FORCE 0x5B
+#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
+#define TPS80031_SMPS2_CFG_STEP 0x5D
+#define TPS80031_SMPS3_CFG_TRANS 0x65
+#define TPS80031_SMPS3_CFG_STATE 0x66
+#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
+
+/* PMC Slave Module LDO Regulators */
+#define TPS80031_VANA_CFG_TRANS 0x81
+#define TPS80031_VANA_CFG_STATE 0x82
+#define TPS80031_VANA_CFG_VOLTAGE 0x83
+#define TPS80031_LDO2_CFG_TRANS 0x85
+#define TPS80031_LDO2_CFG_STATE 0x86
+#define TPS80031_LDO2_CFG_VOLTAGE 0x87
+#define TPS80031_LDO4_CFG_TRANS 0x89
+#define TPS80031_LDO4_CFG_STATE 0x8A
+#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
+#define TPS80031_LDO3_CFG_TRANS 0x8D
+#define TPS80031_LDO3_CFG_STATE 0x8E
+#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
+#define TPS80031_LDO6_CFG_TRANS 0x91
+#define TPS80031_LDO6_CFG_STATE 0x92
+#define TPS80031_LDO6_CFG_VOLTAGE 0x93
+#define TPS80031_LDOLN_CFG_TRANS 0x95
+#define TPS80031_LDOLN_CFG_STATE 0x96
+#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
+#define TPS80031_LDO5_CFG_TRANS 0x99
+#define TPS80031_LDO5_CFG_STATE 0x9A
+#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
+#define TPS80031_LDO1_CFG_TRANS 0x9D
+#define TPS80031_LDO1_CFG_STATE 0x9E
+#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
+#define TPS80031_LDOUSB_CFG_TRANS 0xA1
+#define TPS80031_LDOUSB_CFG_STATE 0xA2
+#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
+#define TPS80031_LDO7_CFG_TRANS 0xA5
+#define TPS80031_LDO7_CFG_STATE 0xA6
+#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
+
+/* PMC Slave Module External Control */
+#define TPS80031_REGEN1_CFG_TRANS 0xAE
+#define TPS80031_REGEN1_CFG_STATE 0xAF
+#define TPS80031_REGEN2_CFG_TRANS 0xB1
+#define TPS80031_REGEN2_CFG_STATE 0xB2
+#define TPS80031_SYSEN_CFG_TRANS 0xB4
+#define TPS80031_SYSEN_CFG_STATE 0xB5
+
+/* PMC Slave Module Internal Control */
+#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
+#define TPS80031_NRESPWRON_CFG_STATE 0xB8
+#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
+#define TPS80031_CLK32KAO_CFG_STATE 0xBB
+#define TPS80031_CLK32KG_CFG_TRANS 0xBD
+#define TPS80031_CLK32KG_CFG_STATE 0xBE
+#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
+#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
+#define TPS80031_VRTC_CFG_TRANS 0xC3
+#define TPS80031_VRTC_CFG_STATE 0xC4
+#define TPS80031_BIAS_CFG_TRANS 0xC6
+#define TPS80031_BIAS_CFG_STATE 0xC7
+#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
+#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
+#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
+#define TPS80031_RC6MHZ_CFG_STATE 0xCD
+#define TPS80031_TMP_CFG_TRANS 0xCF
+#define TPS80031_TMP_CFG_STATE 0xD0
+
+/* PMC Slave Module resources assignment */
+#define TPS80031_PREQ1_RES_ASS_A 0xD7
+#define TPS80031_PREQ1_RES_ASS_B 0xD8
+#define TPS80031_PREQ1_RES_ASS_C 0xD9
+#define TPS80031_PREQ2_RES_ASS_A 0xDA
+#define TPS80031_PREQ2_RES_ASS_B 0xDB
+#define TPS80031_PREQ2_RES_ASS_C 0xDC
+#define TPS80031_PREQ3_RES_ASS_A 0xDD
+#define TPS80031_PREQ3_RES_ASS_B 0xDE
+#define TPS80031_PREQ3_RES_ASS_C 0xDF
+
+/* PMC Slave Module Miscellaneous */
+#define TPS80031_SMPS_OFFSET 0xE0
+#define TPS80031_SMPS_MULT 0xE3
+#define TPS80031_MISC1 0xE4
+#define TPS80031_MISC2 0xE5
+#define TPS80031_BBSPOR_CFG 0xE6
+#define TPS80031_TMP_CFG 0xE7
+
+/* Battery Charging Controller and Indicator LED */
+#define TPS80031_CONTROLLER_CTRL2 0xDA
+#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
+#define TPS80031_CHARGERUSB_VSYSREG 0xDC
+#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
+#define TPS80031_LINEAR_CHRG_STS 0xDE
+#define TPS80031_CONTROLLER_INT_MASK 0xE0
+#define TPS80031_CONTROLLER_CTRL1 0xE1
+#define TPS80031_CONTROLLER_WDG 0xE2
+#define TPS80031_CONTROLLER_STAT1 0xE3
+#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
+#define TPS80031_CHARGERUSB_INT_MASK 0xE5
+#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
+#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
+#define TPS80031_CHARGERUSB_CTRL1 0xE8
+#define TPS80031_CHARGERUSB_CTRL2 0xE9
+#define TPS80031_CHARGERUSB_CTRL3 0xEA
+#define TPS80031_CHARGERUSB_STAT1 0xEB
+#define TPS80031_CHARGERUSB_VOREG 0xEC
+#define TPS80031_CHARGERUSB_VICHRG 0xED
+#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
+#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
+#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
+#define TPS80031_LED_PWM_CTRL1 0xF4
+#define TPS80031_LED_PWM_CTRL2 0xF5
+
+/* USB On-The-Go */
+#define TPS80031_BACKUP_REG 0xFA
+#define TPS80031_USB_VENDOR_ID_LSB 0x00
+#define TPS80031_USB_VENDOR_ID_MSB 0x01
+#define TPS80031_USB_PRODUCT_ID_LSB 0x02
+#define TPS80031_USB_PRODUCT_ID_MSB 0x03
+#define TPS80031_USB_VBUS_CTRL_SET 0x04
+#define TPS80031_USB_VBUS_CTRL_CLR 0x05
+#define TPS80031_USB_ID_CTRL_SET 0x06
+#define TPS80031_USB_ID_CTRL_CLR 0x07
+#define TPS80031_USB_VBUS_INT_SRC 0x08
+#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
+#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
+#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
+#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
+#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
+#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
+#define TPS80031_USB_ID_INT_SRC 0x0F
+#define TPS80031_USB_ID_INT_LATCH_SET 0x10
+#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
+#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
+#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
+#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
+#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
+#define TPS80031_USB_OTG_ADP_CTRL 0x16
+#define TPS80031_USB_OTG_ADP_HIGH 0x17
+#define TPS80031_USB_OTG_ADP_LOW 0x18
+#define TPS80031_USB_OTG_ADP_RISE 0x19
+#define TPS80031_USB_OTG_REVISION 0x1A
+
+/* Gas Gauge */
+#define TPS80031_FG_REG_00 0xC0
+#define TPS80031_FG_REG_01 0xC1
+#define TPS80031_FG_REG_02 0xC2
+#define TPS80031_FG_REG_03 0xC3
+#define TPS80031_FG_REG_04 0xC4
+#define TPS80031_FG_REG_05 0xC5
+#define TPS80031_FG_REG_06 0xC6
+#define TPS80031_FG_REG_07 0xC7
+#define TPS80031_FG_REG_08 0xC8
+#define TPS80031_FG_REG_09 0xC9
+#define TPS80031_FG_REG_10 0xCA
+#define TPS80031_FG_REG_11 0xCB
+
+/* General Purpose ADC */
+#define TPS80031_GPADC_CTRL 0x2E
+#define TPS80031_GPADC_CTRL2 0x2F
+#define TPS80031_RTSELECT_LSB 0x32
+#define TPS80031_RTSELECT_ISB 0x33
+#define TPS80031_RTSELECT_MSB 0x34
+#define TPS80031_GPSELECT_ISB 0x35
+#define TPS80031_CTRL_P1 0x36
+#define TPS80031_RTCH0_LSB 0x37
+#define TPS80031_RTCH0_MSB 0x38
+#define TPS80031_RTCH1_LSB 0x39
+#define TPS80031_RTCH1_MSB 0x3A
+#define TPS80031_GPCH0_LSB 0x3B
+#define TPS80031_GPCH0_MSB 0x3C
+
+/* SIM, MMC and Battery Detection */
+#define TPS80031_SIMDEBOUNCING 0xEB
+#define TPS80031_SIMCTRL 0xEC
+#define TPS80031_MMCDEBOUNCING 0xED
+#define TPS80031_MMCCTRL 0xEE
+#define TPS80031_BATDEBOUNCING 0xEF
+
+/* Vibrator Driver and PWMs */
+#define TPS80031_VIBCTRL 0x9B
+#define TPS80031_VIBMODE 0x9C
+#define TPS80031_PWM1ON 0xBA
+#define TPS80031_PWM1OFF 0xBB
+#define TPS80031_PWM2ON 0xBD
+#define TPS80031_PWM2OFF 0xBE
+
+/* Control Interface */
+#define TPS80031_INT_STS_A 0xD0
+#define TPS80031_INT_STS_B 0xD1
+#define TPS80031_INT_STS_C 0xD2
+#define TPS80031_INT_MSK_LINE_A 0xD3
+#define TPS80031_INT_MSK_LINE_B 0xD4
+#define TPS80031_INT_MSK_LINE_C 0xD5
+#define TPS80031_INT_MSK_STS_A 0xD6
+#define TPS80031_INT_MSK_STS_B 0xD7
+#define TPS80031_INT_MSK_STS_C 0xD8
+#define TPS80031_TOGGLE1 0x90
+#define TPS80031_TOGGLE2 0x91
+#define TPS80031_TOGGLE3 0x92
+#define TPS80031_PWDNSTATUS1 0x93
+#define TPS80031_PWDNSTATUS2 0x94
+#define TPS80031_VALIDITY0 0x17
+#define TPS80031_VALIDITY1 0x18
+#define TPS80031_VALIDITY2 0x19
+#define TPS80031_VALIDITY3 0x1A
+#define TPS80031_VALIDITY4 0x1B
+#define TPS80031_VALIDITY5 0x1C
+#define TPS80031_VALIDITY6 0x1D
+#define TPS80031_VALIDITY7 0x1E
+
+/* Version number related register */
+#define TPS80031_JTAGVERNUM 0x87
+#define TPS80031_EPROM_REV 0xDF
+
+/* GPADC Trimming Bits. */
+#define TPS80031_GPADC_TRIM0 0xCC
+#define TPS80031_GPADC_TRIM1 0xCD
+#define TPS80031_GPADC_TRIM2 0xCE
+#define TPS80031_GPADC_TRIM3 0xCF
+#define TPS80031_GPADC_TRIM4 0xD0
+#define TPS80031_GPADC_TRIM5 0xD1
+#define TPS80031_GPADC_TRIM6 0xD2
+#define TPS80031_GPADC_TRIM7 0xD3
+#define TPS80031_GPADC_TRIM8 0xD4
+#define TPS80031_GPADC_TRIM9 0xD5
+#define TPS80031_GPADC_TRIM10 0xD6
+#define TPS80031_GPADC_TRIM11 0xD7
+#define TPS80031_GPADC_TRIM12 0xD8
+#define TPS80031_GPADC_TRIM13 0xD9
+#define TPS80031_GPADC_TRIM14 0xDA
+#define TPS80031_GPADC_TRIM15 0xDB
+#define TPS80031_GPADC_TRIM16 0xDC
+#define TPS80031_GPADC_TRIM17 0xDD
+#define TPS80031_GPADC_TRIM18 0xDE
+
+/* TPS80031_CONTROLLER_STAT1 bit fields */
+#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
+#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
+#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
+#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
+#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
+#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
+/* TPS80031_CONTROLLER_INT_MASK bit filed */
+#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
+#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
+#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
+#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
+#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
+#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
+
+#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
+
+/* TPS80031_PHOENIX_DEV_ON bit field */
+#define TPS80031_DEVOFF 0x1
+
+#define TPS80031_EXT_CONTROL_CFG_TRANS 0
+#define TPS80031_EXT_CONTROL_CFG_STATE 1
+
+/* State register field */
+#define TPS80031_STATE_OFF 0x00
+#define TPS80031_STATE_ON 0x01
+#define TPS80031_STATE_MASK 0x03
+
+/* Trans register field */
+#define TPS80031_TRANS_ACTIVE_OFF 0x00
+#define TPS80031_TRANS_ACTIVE_ON 0x01
+#define TPS80031_TRANS_ACTIVE_MASK 0x03
+#define TPS80031_TRANS_SLEEP_OFF 0x00
+#define TPS80031_TRANS_SLEEP_ON 0x04
+#define TPS80031_TRANS_SLEEP_MASK 0x0C
+#define TPS80031_TRANS_OFF_OFF 0x00
+#define TPS80031_TRANS_OFF_ACTIVE 0x10
+#define TPS80031_TRANS_OFF_MASK 0x30
+
+#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
+ TPS80031_PWR_REQ_INPUT_PREQ2 | \
+ TPS80031_PWR_REQ_INPUT_PREQ3)
+
+/* TPS80031_BBSPOR_CFG bit field */
+#define TPS80031_BBSPOR_CHG_EN 0x8
+#define TPS80031_MAX_REGISTER 0xFF
+
+struct i2c_client;
+
+/* Supported chips */
+enum chips {
+ TPS80031 = 0x00000001,
+ TPS80032 = 0x00000002,
+};
+
+enum {
+ TPS80031_INT_PWRON,
+ TPS80031_INT_RPWRON,
+ TPS80031_INT_SYS_VLOW,
+ TPS80031_INT_RTC_ALARM,
+ TPS80031_INT_RTC_PERIOD,
+ TPS80031_INT_HOT_DIE,
+ TPS80031_INT_VXX_SHORT,
+ TPS80031_INT_SPDURATION,
+ TPS80031_INT_WATCHDOG,
+ TPS80031_INT_BAT,
+ TPS80031_INT_SIM,
+ TPS80031_INT_MMC,
+ TPS80031_INT_RES,
+ TPS80031_INT_GPADC_RT,
+ TPS80031_INT_GPADC_SW2_EOC,
+ TPS80031_INT_CC_AUTOCAL,
+ TPS80031_INT_ID_WKUP,
+ TPS80031_INT_VBUSS_WKUP,
+ TPS80031_INT_ID,
+ TPS80031_INT_VBUS,
+ TPS80031_INT_CHRG_CTRL,
+ TPS80031_INT_EXT_CHRG,
+ TPS80031_INT_INT_CHRG,
+ TPS80031_INT_RES2,
+ TPS80031_INT_BAT_TEMP_OVRANGE,
+ TPS80031_INT_BAT_REMOVED,
+ TPS80031_INT_VBUS_DET,
+ TPS80031_INT_VAC_DET,
+ TPS80031_INT_FAULT_WDG,
+ TPS80031_INT_LINCH_GATED,
+
+ /* Last interrupt id to get the end number */
+ TPS80031_INT_NR,
+};
+
+/* TPS80031 Slave IDs */
+#define TPS80031_NUM_SLAVES 4
+#define TPS80031_SLAVE_ID0 0
+#define TPS80031_SLAVE_ID1 1
+#define TPS80031_SLAVE_ID2 2
+#define TPS80031_SLAVE_ID3 3
+
+/* TPS80031 I2C addresses */
+#define TPS80031_I2C_ID0_ADDR 0x12
+#define TPS80031_I2C_ID1_ADDR 0x48
+#define TPS80031_I2C_ID2_ADDR 0x49
+#define TPS80031_I2C_ID3_ADDR 0x4A
+
+enum {
+ TPS80031_REGULATOR_VIO,
+ TPS80031_REGULATOR_SMPS1,
+ TPS80031_REGULATOR_SMPS2,
+ TPS80031_REGULATOR_SMPS3,
+ TPS80031_REGULATOR_SMPS4,
+ TPS80031_REGULATOR_VANA,
+ TPS80031_REGULATOR_LDO1,
+ TPS80031_REGULATOR_LDO2,
+ TPS80031_REGULATOR_LDO3,
+ TPS80031_REGULATOR_LDO4,
+ TPS80031_REGULATOR_LDO5,
+ TPS80031_REGULATOR_LDO6,
+ TPS80031_REGULATOR_LDO7,
+ TPS80031_REGULATOR_LDOLN,
+ TPS80031_REGULATOR_LDOUSB,
+ TPS80031_REGULATOR_VBUS,
+ TPS80031_REGULATOR_REGEN1,
+ TPS80031_REGULATOR_REGEN2,
+ TPS80031_REGULATOR_SYSEN,
+ TPS80031_REGULATOR_MAX,
+};
+
+/* Different configurations for the rails */
+enum {
+ /* USBLDO input selection */
+ TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
+ TPS80031_USBLDO_INPUT_PMID = 0x00000002,
+
+ /* LDO3 output mode */
+ TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
+
+ /* VBUS configuration */
+ TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
+ TPS80031_VBUS_SW_ONLY = 0x00000008,
+ TPS80031_VBUS_SW_N_ID = 0x00000010,
+};
+
+/* External controls requests */
+enum tps80031_ext_control {
+ TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
+ TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
+ TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
+ TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
+ TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
+ TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
+};
+
+enum tps80031_pupd_pins {
+ TPS80031_PREQ1 = 0,
+ TPS80031_PREQ2A,
+ TPS80031_PREQ2B,
+ TPS80031_PREQ2C,
+ TPS80031_PREQ3,
+ TPS80031_NRES_WARM,
+ TPS80031_PWM_FORCE,
+ TPS80031_CHRG_EXT_CHRG_STATZ,
+ TPS80031_SIM,
+ TPS80031_MMC,
+ TPS80031_GPADC_START,
+ TPS80031_DVSI2C_SCL,
+ TPS80031_DVSI2C_SDA,
+ TPS80031_CTLI2C_SCL,
+ TPS80031_CTLI2C_SDA,
+};
+
+enum tps80031_pupd_settings {
+ TPS80031_PUPD_NORMAL,
+ TPS80031_PUPD_PULLDOWN,
+ TPS80031_PUPD_PULLUP,
+};
+
+struct tps80031 {
+ struct device *dev;
+ unsigned long chip_info;
+ int es_version;
+ struct i2c_client *clients[TPS80031_NUM_SLAVES];
+ struct regmap *regmap[TPS80031_NUM_SLAVES];
+ struct regmap_irq_chip_data *irq_data;
+};
+
+struct tps80031_pupd_init_data {
+ int input_pin;
+ int setting;
+};
+
+/*
+ * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @ext_ctrl_flag: External control flag for sleep/power request control.
+ * @config_flags: Configuration flag to configure the rails.
+ * It should be ORed of config enums.
+ */
+
+struct tps80031_regulator_platform_data {
+ struct regulator_init_data *reg_init_data;
+ unsigned int ext_ctrl_flag;
+ unsigned int config_flags;
+};
+
+struct tps80031_platform_data {
+ int irq_base;
+ bool use_power_off;
+ struct tps80031_pupd_init_data *pupd_init_data;
+ int pupd_init_data_size;
+ struct tps80031_regulator_platform_data
+ *regulator_pdata[TPS80031_REGULATOR_MAX];
+};
+
+static inline int tps80031_write(struct device *dev, int sid,
+ int reg, uint8_t val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_write(tps80031->regmap[sid], reg, val);
+}
+
+static inline int tps80031_writes(struct device *dev, int sid, int reg,
+ int len, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
+}
+
+static inline int tps80031_read(struct device *dev, int sid,
+ int reg, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ unsigned int ival;
+ int ret;
+
+ ret = regmap_read(tps80031->regmap[sid], reg, &ival);
+ if (ret < 0) {
+ dev_err(dev, "failed reading from reg 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = ival;
+ return ret;
+}
+
+static inline int tps80031_reads(struct device *dev, int sid,
+ int reg, int len, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
+}
+
+static inline int tps80031_set_bits(struct device *dev, int sid,
+ int reg, uint8_t bit_mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps80031->regmap[sid], reg,
+ bit_mask, bit_mask);
+}
+
+static inline int tps80031_clr_bits(struct device *dev, int sid,
+ int reg, uint8_t bit_mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
+}
+
+static inline int tps80031_update(struct device *dev, int sid,
+ int reg, uint8_t val, uint8_t mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
+}
+
+static inline unsigned long tps80031_get_chip_info(struct device *dev)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return tps80031->chip_info;
+}
+
+static inline int tps80031_get_pmu_version(struct device *dev)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return tps80031->es_version;
+}
+
+static inline int tps80031_irq_get_virq(struct device *dev, int irq)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_irq_get_virq(tps80031->irq_data, irq);
+}
+
+extern int tps80031_ext_power_req_config(struct device *dev,
+ unsigned long ext_ctrl_flag, int preq_bit,
+ int state_reg_add, int trans_reg_add);
+#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index a8eff4ad9be..94ac944d12f 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -207,10 +207,12 @@ struct twl6040_platform_data {
};
struct regmap;
+struct regmap_irq_chips_data;
struct twl6040 {
struct device *dev;
struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
struct mutex mutex;
struct mutex irq_mutex;
@@ -228,9 +230,8 @@ struct twl6040 {
unsigned int mclk;
unsigned int irq;
- unsigned int irq_base;
- u8 irq_masks_cur;
- u8 irq_masks_cache;
+ unsigned int irq_ready;
+ unsigned int irq_th;
};
int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg);
@@ -245,8 +246,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
unsigned int freq_in, unsigned int freq_out);
int twl6040_get_pll(struct twl6040 *twl6040);
unsigned int twl6040_get_sysclk(struct twl6040 *twl6040);
-int twl6040_irq_init(struct twl6040 *twl6040);
-void twl6040_irq_exit(struct twl6040 *twl6040);
+
/* Get the combined status of the vibra control register */
int twl6040_get_vibralr_status(struct twl6040 *twl6040);
diff --git a/include/linux/mfd/viperboard.h b/include/linux/mfd/viperboard.h
new file mode 100644
index 00000000000..193452848c0
--- /dev/null
+++ b/include/linux/mfd/viperboard.h
@@ -0,0 +1,110 @@
+/*
+ * include/linux/mfd/viperboard.h
+ *
+ * Nano River Technologies viperboard definitions
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_VIPERBOARD_H__
+#define __MFD_VIPERBOARD_H__
+
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#define VPRBRD_EP_OUT 0x02
+#define VPRBRD_EP_IN 0x86
+
+#define VPRBRD_I2C_MSG_LEN 512 /* max length of a msg on USB level */
+
+#define VPRBRD_I2C_FREQ_6MHZ 1 /* 6 MBit/s */
+#define VPRBRD_I2C_FREQ_3MHZ 2 /* 3 MBit/s */
+#define VPRBRD_I2C_FREQ_1MHZ 3 /* 1 MBit/s */
+#define VPRBRD_I2C_FREQ_FAST 4 /* 400 kbit/s */
+#define VPRBRD_I2C_FREQ_400KHZ VPRBRD_I2C_FREQ_FAST
+#define VPRBRD_I2C_FREQ_200KHZ 5 /* 200 kbit/s */
+#define VPRBRD_I2C_FREQ_STD 6 /* 100 kbit/s */
+#define VPRBRD_I2C_FREQ_100KHZ VPRBRD_I2C_FREQ_STD
+#define VPRBRD_I2C_FREQ_10KHZ 7 /* 10 kbit/s */
+
+#define VPRBRD_I2C_CMD_WRITE 0x00
+#define VPRBRD_I2C_CMD_READ 0x01
+#define VPRBRD_I2C_CMD_ADDR 0x02
+
+#define VPRBRD_USB_TYPE_OUT 0x40
+#define VPRBRD_USB_TYPE_IN 0xc0
+#define VPRBRD_USB_TIMEOUT_MS 100
+#define VPRBRD_USB_REQUEST_I2C_FREQ 0xe6
+#define VPRBRD_USB_REQUEST_I2C 0xe9
+#define VPRBRD_USB_REQUEST_MAJOR 0xea
+#define VPRBRD_USB_REQUEST_MINOR 0xeb
+#define VPRBRD_USB_REQUEST_ADC 0xec
+#define VPRBRD_USB_REQUEST_GPIOA 0xed
+#define VPRBRD_USB_REQUEST_GPIOB 0xdd
+
+struct vprbrd_i2c_write_hdr {
+ u8 cmd;
+ u16 addr;
+ u8 len1;
+ u8 len2;
+ u8 last;
+ u8 chan;
+ u16 spi;
+} __packed;
+
+struct vprbrd_i2c_read_hdr {
+ u8 cmd;
+ u16 addr;
+ u8 len0;
+ u8 len1;
+ u8 len2;
+ u8 len3;
+ u8 len4;
+ u8 len5;
+ u16 tf1; /* transfer 1 length */
+ u16 tf2; /* transfer 2 length */
+} __packed;
+
+struct vprbrd_i2c_status {
+ u8 unknown[11];
+ u8 status;
+} __packed;
+
+struct vprbrd_i2c_write_msg {
+ struct vprbrd_i2c_write_hdr header;
+ u8 data[VPRBRD_I2C_MSG_LEN
+ - sizeof(struct vprbrd_i2c_write_hdr)];
+} __packed;
+
+struct vprbrd_i2c_read_msg {
+ struct vprbrd_i2c_read_hdr header;
+ u8 data[VPRBRD_I2C_MSG_LEN
+ - sizeof(struct vprbrd_i2c_read_hdr)];
+} __packed;
+
+struct vprbrd_i2c_addr_msg {
+ u8 cmd;
+ u8 addr;
+ u8 unknown1;
+ u16 len;
+ u8 unknown2;
+ u8 unknown3;
+} __packed;
+
+/* Structure to hold all device specific stuff */
+struct vprbrd {
+ struct usb_device *usb_dev; /* the usb device for this device */
+ struct mutex lock;
+ u8 buf[sizeof(struct vprbrd_i2c_write_msg)];
+ struct platform_device pdev;
+};
+
+#endif /* __MFD_VIPERBOARD_H__ */
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 1f173306bf0..ae5c249530b 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -19,6 +19,8 @@
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/mfd/wm8994/pdata.h>
+
enum wm8994_type {
WM8994 = 0,
WM8958 = 1,
@@ -55,6 +57,8 @@ struct regulator_bulk_data;
struct wm8994 {
struct mutex irq_lock;
+ struct wm8994_pdata pdata;
+
enum wm8994_type type;
int revision;
int cust_id;
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index fc87be4fdc2..8e21a094836 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -176,6 +176,11 @@ struct wm8994_pdata {
unsigned int lineout1fb:1;
unsigned int lineout2fb:1;
+ /* Delay between detecting a jack and starting microphone
+ * detect (specified in ms)
+ */
+ int micdet_delay;
+
/* IRQ for microphone detection if brought out directly as a
* signal.
*/
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 0b5865c61ef..1e9f627967a 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -23,6 +23,15 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
* sucessful migration case.
*/
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CMA
+};
#ifdef CONFIG_MIGRATION
@@ -32,7 +41,7 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- enum migrate_mode mode);
+ enum migrate_mode mode, int reason);
extern int migrate_huge_page(struct page *, new_page_t x,
unsigned long private, bool offlining,
enum migrate_mode mode);
@@ -54,7 +63,7 @@ static inline void putback_lru_pages(struct list_head *l) {}
static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- enum migrate_mode mode) { return -ENOSYS; }
+ enum migrate_mode mode, int reason) { return -ENOSYS; }
static inline int migrate_huge_page(struct page *page, new_page_t x,
unsigned long private, bool offlining,
enum migrate_mode mode) { return -ENOSYS; }
@@ -83,4 +92,37 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#define fail_migrate_page NULL
#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_NUMA_BALANCING
+extern int migrate_misplaced_page(struct page *page, int node);
+extern int migrate_misplaced_page(struct page *page, int node);
+extern bool migrate_ratelimited(int node);
+#else
+static inline int migrate_misplaced_page(struct page *page, int node)
+{
+ return -EAGAIN; /* can't migrate now */
+}
+static inline bool migrate_ratelimited(int node)
+{
+ return false;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ pmd_t *pmd, pmd_t entry,
+ unsigned long address,
+ struct page *page, int node);
+#else
+static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ pmd_t *pmd, pmd_t entry,
+ unsigned long address,
+ struct page *page, int node)
+{
+ return -EAGAIN;
+}
+#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
+
#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index e0deeb2cc93..09c2300ddb3 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -34,6 +34,7 @@
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
#define MPT2SAS_MINOR 221
+#define MPT3SAS_MINOR 222
#define UINPUT_MINOR 223
#define MISC_MCELOG_MINOR 227
#define HPET_MINOR 228
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6d1acb04cd1..20ea939c22a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -142,6 +142,8 @@ enum {
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
+ MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61,
+ MLX4_DEV_CAP_FLAG_64B_CQE = 1LL << 62
};
enum {
@@ -151,6 +153,20 @@ enum {
MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3
};
+enum {
+ MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
+ MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1
+};
+
+enum {
+ MLX4_USER_DEV_CAP_64B_CQE = 1L << 0
+};
+
+enum {
+ MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0
+};
+
+
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
@@ -419,6 +435,11 @@ struct mlx4_caps {
u32 max_counters;
u8 port_ib_mtu[MLX4_MAX_PORTS + 1];
u16 sqp_demux;
+ u32 eqe_size;
+ u32 cqe_size;
+ u8 eqe_factor;
+ u32 userspace_caps; /* userspace must be aware of these */
+ u32 function_caps; /* VFs must be aware of these */
};
struct mlx4_buf_list {
@@ -604,6 +625,7 @@ struct mlx4_dev {
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
int num_vfs;
+ int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4af4f0b1be4..63204078f72 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -693,6 +693,36 @@ static inline int page_to_nid(const struct page *page)
}
#endif
+#ifdef CONFIG_NUMA_BALANCING
+static inline int page_xchg_last_nid(struct page *page, int nid)
+{
+ return xchg(&page->_last_nid, nid);
+}
+
+static inline int page_last_nid(struct page *page)
+{
+ return page->_last_nid;
+}
+static inline void reset_page_last_nid(struct page *page)
+{
+ page->_last_nid = -1;
+}
+#else
+static inline int page_xchg_last_nid(struct page *page, int nid)
+{
+ return page_to_nid(page);
+}
+
+static inline int page_last_nid(struct page *page)
+{
+ return page_to_nid(page);
+}
+
+static inline void reset_page_last_nid(struct page *page)
+{
+}
+#endif
+
static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
@@ -977,7 +1007,6 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
-extern int vmtruncate(struct inode *inode, loff_t offset);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
@@ -1078,6 +1107,9 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
extern unsigned long do_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr);
+extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgprot_t newprot,
+ int dirty_accountable, int prot_numa);
extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
@@ -1579,6 +1611,11 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
}
#endif
+#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+unsigned long change_prot_numa(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+#endif
+
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
@@ -1600,6 +1637,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_MLOCK 0x40 /* mark page as mlocked */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
+#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7ade2731b5d..f8f5162a357 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -128,10 +128,7 @@ struct page {
};
struct list_head list; /* slobs list of pages */
- struct { /* slab fields */
- struct kmem_cache *slab_cache;
- struct slab *slab_page;
- };
+ struct slab *slab_page; /* slab fields */
};
/* Remainder is not double word aligned */
@@ -146,7 +143,7 @@ struct page {
#if USE_SPLIT_PTLOCKS
spinlock_t ptl;
#endif
- struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};
@@ -175,6 +172,10 @@ struct page {
*/
void *shadow;
#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+ int _last_nid;
+#endif
}
/*
* The struct page can be forced to be double word aligned so that atomic ops
@@ -411,9 +412,36 @@ struct mm_struct {
#ifdef CONFIG_CPUMASK_OFFSTACK
struct cpumask cpumask_allocation;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /*
+ * numa_next_scan is the next time when the PTEs will me marked
+ * pte_numa to gather statistics and migrate pages to new nodes
+ * if necessary
+ */
+ unsigned long numa_next_scan;
+
+ /* numa_next_reset is when the PTE scanner period will be reset */
+ unsigned long numa_next_reset;
+
+ /* Restart point for scanning and setting pte_numa */
+ unsigned long numa_scan_offset;
+
+ /* numa_scan_seq prevents two threads setting pte_numa */
+ int numa_scan_seq;
+
+ /*
+ * The first node a task was scheduled on. If a task runs on
+ * a different node than Make PTE Scan Go Now.
+ */
+ int first_nid;
+#endif
struct uprobes_state uprobes_state;
};
+/* first nid will either be a valid NID or one of these values */
+#define NUMA_PTE_SCAN_INIT -1
+#define NUMA_PTE_SCAN_ACTIVE -2
+
static inline void mm_init_cpumask(struct mm_struct *mm)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c0b1d608a6..4bec5be82ca 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -460,17 +460,44 @@ struct zone {
unsigned long zone_start_pfn;
/*
- * zone_start_pfn, spanned_pages and present_pages are all
- * protected by span_seqlock. It is a seqlock because it has
- * to be read outside of zone->lock, and it is done in the main
- * allocator path. But, it is written quite infrequently.
+ * spanned_pages is the total pages spanned by the zone, including
+ * holes, which is calculated as:
+ * spanned_pages = zone_end_pfn - zone_start_pfn;
*
- * The lock is declared along with zone->lock because it is
+ * present_pages is physical pages existing within the zone, which
+ * is calculated as:
+ * present_pages = spanned_pages - absent_pages(pags in holes);
+ *
+ * managed_pages is present pages managed by the buddy system, which
+ * is calculated as (reserved_pages includes pages allocated by the
+ * bootmem allocator):
+ * managed_pages = present_pages - reserved_pages;
+ *
+ * So present_pages may be used by memory hotplug or memory power
+ * management logic to figure out unmanaged pages by checking
+ * (present_pages - managed_pages). And managed_pages should be used
+ * by page allocator and vm scanner to calculate all kinds of watermarks
+ * and thresholds.
+ *
+ * Locking rules:
+ *
+ * zone_start_pfn and spanned_pages are protected by span_seqlock.
+ * It is a seqlock because it has to be read outside of zone->lock,
+ * and it is done in the main allocator path. But, it is written
+ * quite infrequently.
+ *
+ * The span_seq lock is declared along with zone->lock because it is
* frequently read in proximity to zone->lock. It's good to
* give them a chance of being in the same cacheline.
+ *
+ * Write access to present_pages and managed_pages at runtime should
+ * be protected by lock_memory_hotplug()/unlock_memory_hotplug().
+ * Any reader who can't tolerant drift of present_pages and
+ * managed_pages should hold memory hotplug lock to get a stable value.
*/
- unsigned long spanned_pages; /* total size, including holes */
- unsigned long present_pages; /* amount of memory (excluding holes) */
+ unsigned long spanned_pages;
+ unsigned long present_pages;
+ unsigned long managed_pages;
/*
* rarely used fields:
@@ -708,6 +735,19 @@ typedef struct pglist_data {
struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
int kswapd_max_order;
enum zone_type classzone_idx;
+#ifdef CONFIG_NUMA_BALANCING
+ /*
+ * Lock serializing the per destination node AutoNUMA memory
+ * migration rate limiting data.
+ */
+ spinlock_t numabalancing_migrate_lock;
+
+ /* Rate limiting time interval */
+ unsigned long numabalancing_migrate_next_window;
+
+ /* Number of pages migrated during the rate limiting time interval */
+ unsigned long numabalancing_migrate_nr_pages;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 5a8e3903d77..12b2ab51032 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -4,9 +4,10 @@
struct mnt_namespace;
struct fs_struct;
+struct user_namespace;
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
- struct fs_struct *);
+ struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
extern const struct file_operations proc_mounts_operations;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index d6a58065c09..137b4198fc0 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,17 +16,15 @@
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
-#define ___module_cat(a,b) __mod_ ## a ## b
-#define __module_cat(a,b) ___module_cat(a,b)
#ifdef MODULE
#define __MODULE_INFO(tag, name, info) \
-static const char __module_cat(name,__LINE__)[] \
+static const char __UNIQUE_ID(name)[] \
__used __attribute__((section(".modinfo"), unused, aligned(1))) \
= __stringify(tag) "=" info
#else /* !MODULE */
/* This struct is here for syntactic coherency, it is not used */
#define __MODULE_INFO(tag, name, info) \
- struct __module_cat(name,__LINE__) {}
+ struct __UNIQUE_ID(name) {}
#endif
#define __MODULE_PARM_TYPE(name, _type) \
__MODULE_INFO(parmtype, name##type, #name ":" _type)
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index ed270bd2e4d..4eb0a50d0c5 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
+#include <linux/workqueue.h>
struct hd_geometry;
struct mtd_info;
@@ -43,7 +44,8 @@ struct mtd_blktrans_dev {
struct kref ref;
struct gendisk *disk;
struct attribute_group *disk_attributes;
- struct task_struct *thread;
+ struct workqueue_struct *wq;
+ struct work_struct work;
struct request_queue *rq;
spinlock_t queue_lock;
void *priv;
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
index 0f6fea73a1f..407d1e556c3 100644
--- a/include/linux/mtd/doc2000.h
+++ b/include/linux/mtd/doc2000.h
@@ -92,12 +92,26 @@
* Others use readb/writeb
*/
#if defined(__arm__)
-#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2))))
-#define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0)
+static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg)
+{
+ return __raw_readl(addr + reg);
+}
+static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg)
+{
+ __raw_writel(data, addr + reg);
+ wmb();
+}
#define DOC_IOREMAP_LEN 0x8000
#elif defined(__ppc__)
-#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1))))
-#define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0)
+static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg)
+{
+ return __raw_readw(addr + reg);
+}
+static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg)
+{
+ __raw_writew(data, addr + reg);
+ wmb();
+}
#define DOC_IOREMAP_LEN 0x4000
#else
#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg))
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index b20029221fb..d6ed61ef451 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -155,9 +155,6 @@ struct fsmc_nand_platform_data {
unsigned int width;
unsigned int bank;
- /* CLE, ALE offsets */
- unsigned int cle_off;
- unsigned int ale_off;
enum access_mode mode;
void (*select_bank)(uint32_t bank, uint32_t busw);
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h
deleted file mode 100644
index ed3c4e09f3d..00000000000
--- a/include/linux/mtd/gpmi-nand.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef __MACH_MXS_GPMI_NAND_H__
-#define __MACH_MXS_GPMI_NAND_H__
-
-/* The size of the resources is fixed. */
-#define GPMI_NAND_RES_SIZE 6
-
-/* Resource names for the GPMI NAND driver. */
-#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
-#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt"
-#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
-#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
-#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels"
-#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
-
-/**
- * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
- *
- * This structure communicates platform-specific information to the GPMI NAND
- * driver that can't be expressed as resources.
- *
- * @platform_init: A pointer to a function the driver will call to
- * initialize the platform (e.g., set up the pin mux).
- * @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and
- * from the NAND Flash device, in nanoseconds.
- * @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and
- * from the NAND Flash device, in nanoseconds.
- * @max_chip_count: The maximum number of chips for which the driver
- * should configure the hardware. This value most
- * likely reflects the number of pins that are
- * connected to a NAND Flash device. If this is
- * greater than the SoC hardware can support, the
- * driver will print a message and fail to initialize.
- * @partitions: An optional pointer to an array of partition
- * descriptions.
- * @partition_count: The number of elements in the partitions array.
- */
-struct gpmi_nand_platform_data {
- /* SoC hardware information. */
- int (*platform_init)(void);
-
- /* NAND Flash information. */
- unsigned int min_prop_delay_in_ns;
- unsigned int max_prop_delay_in_ns;
- unsigned int max_chip_count;
-
- /* Medium information. */
- struct mtd_partition *partitions;
- unsigned partition_count;
-};
-#endif
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3595a0236b0..f6eb4332ac9 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -328,7 +328,7 @@ static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
- map_word r;
+ map_word r = {{0} };
if (map_bankwidth_is_1(map))
r.x[0] = *(unsigned char *)ptr;
@@ -391,7 +391,7 @@ static inline map_word map_word_ff(struct map_info *map)
static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
{
- map_word r;
+ map_word uninitialized_var(r);
if (map_bankwidth_is_1(map))
r.x[0] = __raw_readb(map->virt + ofs);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 81d61e70459..f9ac2897b86 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -98,7 +98,7 @@ struct mtd_oob_ops {
};
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 448
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
/*
* Internal ECC layout control structure. For historical reasons, there is a
* similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 24e915957e4..7ccb3c59ed6 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -219,6 +219,13 @@ typedef enum {
#define NAND_OWN_BUFFERS 0x00020000
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
+/*
+ * Autodetect nand buswidth with readid/onfi.
+ * This suppose the driver will configure the hardware in 8 bits mode
+ * when calling nand_scan_ident, and update its configuration
+ * before calling nand_scan_tail.
+ */
+#define NAND_BUSWIDTH_AUTO 0x00080000
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
@@ -471,8 +478,8 @@ struct nand_buffers {
* non 0 if ONFI supported.
* @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
* supported, 0 otherwise.
- * @onfi_set_features [REPLACEABLE] set the features for ONFI nand
- * @onfi_get_features [REPLACEABLE] get the features for ONFI nand
+ * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
+ * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
* @ecclayout: [REPLACEABLE] the default ECC placement scheme
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 01e4b15b280..1c28f8879b1 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -20,6 +20,7 @@
#ifndef __SH_FLCTL_H__
#define __SH_FLCTL_H__
+#include <linux/completion.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -107,6 +108,7 @@
#define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */
#define AC1CLR (0x1 << 19) /* ECC FIFO clear */
#define AC0CLR (0x1 << 18) /* Data FIFO clear */
+#define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */
#define ECERB (0x1 << 9) /* ECC error */
#define STERB (0x1 << 8) /* Status error */
#define STERINTE (0x1 << 4) /* Status error enable */
@@ -138,6 +140,8 @@ enum flctl_ecc_res_t {
FL_TIMEOUT
};
+struct dma_chan;
+
struct sh_flctl {
struct mtd_info mtd;
struct nand_chip chip;
@@ -147,7 +151,7 @@ struct sh_flctl {
uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
int read_bytes;
- int index;
+ unsigned int index;
int seqin_column; /* column in SEQIN cmd */
int seqin_page_addr; /* page_addr in SEQIN cmd */
uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */
@@ -161,6 +165,11 @@ struct sh_flctl {
unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */
unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */
unsigned qos_request:1; /* QoS request to prevent deep power shutdown */
+
+ /* DMA related objects */
+ struct dma_chan *chan_fifo0_rx;
+ struct dma_chan *chan_fifo0_tx;
+ struct completion dma_complete;
};
struct sh_flctl_platform_data {
@@ -170,6 +179,9 @@ struct sh_flctl_platform_data {
unsigned has_hwecc:1;
unsigned use_holden:1;
+
+ unsigned int slave_id_fifo0_tx;
+ unsigned int slave_id_fifo0_rx;
};
static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 4bf19d8174e..e998c030061 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -65,8 +65,8 @@ extern int user_path_at_empty(int, const char __user *, unsigned, struct path *,
extern int kern_path(const char *, unsigned, struct path *);
-extern struct dentry *kern_path_create(int, const char *, struct path *, int);
-extern struct dentry *user_path_create(int, const char __user *, struct path *, int);
+extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
+extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
@@ -98,4 +98,20 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
((char *) name)[min(len, maxlen)] = '\0';
}
+/**
+ * retry_estale - determine whether the caller should retry an operation
+ * @error: the error that would currently be returned
+ * @flags: flags being used for next lookup attempt
+ *
+ * Check to see if the error code was -ESTALE, and then determine whether
+ * to retry the call based on whether "flags" already has LOOKUP_REVAL set.
+ *
+ * Returns true if the caller should try the operation again.
+ */
+static inline bool
+retry_estale(const long error, const unsigned int flags)
+{
+ return error == -ESTALE && !(flags & LOOKUP_REVAL);
+}
+
#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ef9336c9d46..c599e4782d4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -369,7 +369,7 @@ typedef enum gro_result gro_result_t;
*
* If the rx_handler consider the skb should be ignored, it should return
* RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
- * are registred on exact device (ptype->dev == skb->dev).
+ * are registered on exact device (ptype->dev == skb->dev).
*
* If the rx_handler didn't changed skb->dev, but want the skb to be normally
* delivered, it should return RX_HANDLER_PASS.
@@ -1576,7 +1576,7 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern rwlock_t dev_base_lock; /* Device list lock */
-extern seqlock_t devnet_rename_seq; /* Device rename lock */
+extern seqcount_t devnet_rename_seq; /* Device rename seq */
#define for_each_netdev(net, d) \
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a9e76ee1adc..6c6ed153a9b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -198,51 +198,4 @@ struct nfs_server {
#define NFS_CAP_POSIX_LOCK (1U << 14)
#define NFS_CAP_UIDGID_NOMAP (1U << 15)
-
-/* maximum number of slots to use */
-#define NFS4_DEF_SLOT_TABLE_SIZE (16U)
-#define NFS4_MAX_SLOT_TABLE (256U)
-#define NFS4_NO_SLOT ((u32)-1)
-
-#if IS_ENABLED(CONFIG_NFS_V4)
-
-/* Sessions */
-#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
-struct nfs4_slot_table {
- struct nfs4_slot *slots; /* seqid per slot */
- unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
- spinlock_t slot_tbl_lock;
- struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
- u32 max_slots; /* # slots in table */
- u32 highest_used_slotid; /* sent to server on each SEQ.
- * op for dynamic resizing */
- u32 target_max_slots; /* Set by CB_RECALL_SLOT as
- * the new max_slots */
- struct completion complete;
-};
-
-static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
-{
- return sp - tbl->slots;
-}
-
-/*
- * Session related parameters
- */
-struct nfs4_session {
- struct nfs4_sessionid sess_id;
- u32 flags;
- unsigned long session_state;
- u32 hash_alg;
- u32 ssv_len;
-
- /* The fore and back channel */
- struct nfs4_channel_attrs fc_attrs;
- struct nfs4_slot_table fc_slot_table;
- struct nfs4_channel_attrs bc_attrs;
- struct nfs4_slot_table bc_slot_table;
- struct nfs_client *clp;
-};
-
-#endif /* CONFIG_NFS_V4 */
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index a73ea89789d..29adb12c7ec 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -185,23 +185,20 @@ struct nfs4_channel_attrs {
u32 max_reqs;
};
-/* nfs41 sessions slot seqid */
-struct nfs4_slot {
- u32 seq_nr;
-};
-
+struct nfs4_slot;
struct nfs4_sequence_args {
- struct nfs4_session *sa_session;
- u32 sa_slotid;
- u8 sa_cache_this;
+ struct nfs4_slot *sa_slot;
+ u8 sa_cache_this : 1,
+ sa_privileged : 1;
};
struct nfs4_sequence_res {
- struct nfs4_session *sr_session;
struct nfs4_slot *sr_slot; /* slot used to send request */
+ unsigned long sr_timestamp;
int sr_status; /* sequence operation status */
- unsigned long sr_renewal_time;
u32 sr_status_flags;
+ u32 sr_highest_slotid;
+ u32 sr_target_highest_slotid;
};
struct nfs4_get_lease_time_args {
@@ -209,8 +206,8 @@ struct nfs4_get_lease_time_args {
};
struct nfs4_get_lease_time_res {
- struct nfs_fsinfo *lr_fsinfo;
struct nfs4_sequence_res lr_seq_res;
+ struct nfs_fsinfo *lr_fsinfo;
};
#define PNFS_LAYOUT_MAXSIZE 4096
@@ -228,23 +225,23 @@ struct pnfs_layout_range {
};
struct nfs4_layoutget_args {
+ struct nfs4_sequence_args seq_args;
__u32 type;
struct pnfs_layout_range range;
__u64 minlength;
__u32 maxcount;
struct inode *inode;
struct nfs_open_context *ctx;
- struct nfs4_sequence_args seq_args;
nfs4_stateid stateid;
struct nfs4_layoutdriver_data layout;
};
struct nfs4_layoutget_res {
+ struct nfs4_sequence_res seq_res;
__u32 return_on_close;
struct pnfs_layout_range range;
__u32 type;
nfs4_stateid stateid;
- struct nfs4_sequence_res seq_res;
struct nfs4_layoutdriver_data *layoutp;
};
@@ -255,38 +252,38 @@ struct nfs4_layoutget {
};
struct nfs4_getdevicelist_args {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh *fh;
u32 layoutclass;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_getdevicelist_res {
- struct pnfs_devicelist *devlist;
struct nfs4_sequence_res seq_res;
+ struct pnfs_devicelist *devlist;
};
struct nfs4_getdeviceinfo_args {
- struct pnfs_device *pdev;
struct nfs4_sequence_args seq_args;
+ struct pnfs_device *pdev;
};
struct nfs4_getdeviceinfo_res {
- struct pnfs_device *pdev;
struct nfs4_sequence_res seq_res;
+ struct pnfs_device *pdev;
};
struct nfs4_layoutcommit_args {
+ struct nfs4_sequence_args seq_args;
nfs4_stateid stateid;
__u64 lastbytewritten;
struct inode *inode;
const u32 *bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_layoutcommit_res {
+ struct nfs4_sequence_res seq_res;
struct nfs_fattr *fattr;
const struct nfs_server *server;
- struct nfs4_sequence_res seq_res;
int status;
};
@@ -300,11 +297,11 @@ struct nfs4_layoutcommit_data {
};
struct nfs4_layoutreturn_args {
+ struct nfs4_sequence_args seq_args;
struct pnfs_layout_hdr *layout;
struct inode *inode;
nfs4_stateid stateid;
__u32 layout_type;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_layoutreturn_res {
@@ -330,6 +327,7 @@ struct stateowner_id {
* Arguments to the open call.
*/
struct nfs_openargs {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
struct nfs_seqid * seqid;
int open_flags;
@@ -350,10 +348,10 @@ struct nfs_openargs {
const u32 * bitmask;
const u32 * open_bitmap;
__u32 claim;
- struct nfs4_sequence_args seq_args;
};
struct nfs_openres {
+ struct nfs4_sequence_res seq_res;
nfs4_stateid stateid;
struct nfs_fh fh;
struct nfs4_change_info cinfo;
@@ -368,7 +366,6 @@ struct nfs_openres {
__u32 attrset[NFS4_BITMAP_SIZE];
struct nfs4_string *owner;
struct nfs4_string *group_owner;
- struct nfs4_sequence_res seq_res;
__u32 access_request;
__u32 access_supported;
__u32 access_result;
@@ -392,20 +389,20 @@ struct nfs_open_confirmres {
* Arguments to the close call.
*/
struct nfs_closeargs {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
nfs4_stateid * stateid;
struct nfs_seqid * seqid;
fmode_t fmode;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs_closeres {
+ struct nfs4_sequence_res seq_res;
nfs4_stateid stateid;
struct nfs_fattr * fattr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
- struct nfs4_sequence_res seq_res;
};
/*
* * Arguments to the lock,lockt, and locku call.
@@ -417,6 +414,7 @@ struct nfs_lowner {
};
struct nfs_lock_args {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * lock_seqid;
@@ -427,40 +425,39 @@ struct nfs_lock_args {
unsigned char block : 1;
unsigned char reclaim : 1;
unsigned char new_lock_owner : 1;
- struct nfs4_sequence_args seq_args;
};
struct nfs_lock_res {
+ struct nfs4_sequence_res seq_res;
nfs4_stateid stateid;
struct nfs_seqid * lock_seqid;
struct nfs_seqid * open_seqid;
- struct nfs4_sequence_res seq_res;
};
struct nfs_locku_args {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * seqid;
nfs4_stateid * stateid;
- struct nfs4_sequence_args seq_args;
};
struct nfs_locku_res {
+ struct nfs4_sequence_res seq_res;
nfs4_stateid stateid;
struct nfs_seqid * seqid;
- struct nfs4_sequence_res seq_res;
};
struct nfs_lockt_args {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_lowner lock_owner;
- struct nfs4_sequence_args seq_args;
};
struct nfs_lockt_res {
- struct file_lock * denied; /* LOCK, LOCKT failed */
struct nfs4_sequence_res seq_res;
+ struct file_lock * denied; /* LOCK, LOCKT failed */
};
struct nfs_release_lockowner_args {
@@ -468,22 +465,23 @@ struct nfs_release_lockowner_args {
};
struct nfs4_delegreturnargs {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_delegreturnres {
+ struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
const struct nfs_server *server;
- struct nfs4_sequence_res seq_res;
};
/*
* Arguments to the read call.
*/
struct nfs_readargs {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
struct nfs_open_context *context;
struct nfs_lock_context *lock_context;
@@ -491,20 +489,20 @@ struct nfs_readargs {
__u32 count;
unsigned int pgbase;
struct page ** pages;
- struct nfs4_sequence_args seq_args;
};
struct nfs_readres {
+ struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
__u32 count;
int eof;
- struct nfs4_sequence_res seq_res;
};
/*
* Arguments to the write call.
*/
struct nfs_writeargs {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
struct nfs_open_context *context;
struct nfs_lock_context *lock_context;
@@ -514,7 +512,6 @@ struct nfs_writeargs {
unsigned int pgbase;
struct page ** pages;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs_write_verifier {
@@ -527,65 +524,65 @@ struct nfs_writeverf {
};
struct nfs_writeres {
+ struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
struct nfs_writeverf * verf;
__u32 count;
const struct nfs_server *server;
- struct nfs4_sequence_res seq_res;
};
/*
* Arguments to the commit call.
*/
struct nfs_commitargs {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh *fh;
__u64 offset;
__u32 count;
const u32 *bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs_commitres {
+ struct nfs4_sequence_res seq_res;
struct nfs_fattr *fattr;
struct nfs_writeverf *verf;
const struct nfs_server *server;
- struct nfs4_sequence_res seq_res;
};
/*
* Common arguments to the unlink call
*/
struct nfs_removeargs {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh *fh;
struct qstr name;
- struct nfs4_sequence_args seq_args;
};
struct nfs_removeres {
+ struct nfs4_sequence_res seq_res;
const struct nfs_server *server;
struct nfs_fattr *dir_attr;
struct nfs4_change_info cinfo;
- struct nfs4_sequence_res seq_res;
};
/*
* Common arguments to the rename call
*/
struct nfs_renameargs {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh *old_dir;
const struct nfs_fh *new_dir;
const struct qstr *old_name;
const struct qstr *new_name;
- struct nfs4_sequence_args seq_args;
};
struct nfs_renameres {
+ struct nfs4_sequence_res seq_res;
const struct nfs_server *server;
struct nfs4_change_info old_cinfo;
struct nfs_fattr *old_fattr;
struct nfs4_change_info new_cinfo;
struct nfs_fattr *new_fattr;
- struct nfs4_sequence_res seq_res;
};
/*
@@ -626,20 +623,20 @@ struct nfs_createargs {
};
struct nfs_setattrargs {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
nfs4_stateid stateid;
struct iattr * iap;
const struct nfs_server * server; /* Needed for name mapping */
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs_setaclargs {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
- struct nfs4_sequence_args seq_args;
};
struct nfs_setaclres {
@@ -647,27 +644,27 @@ struct nfs_setaclres {
};
struct nfs_getaclargs {
+ struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
- struct nfs4_sequence_args seq_args;
};
/* getxattr ACL interface flags */
#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
struct nfs_getaclres {
+ struct nfs4_sequence_res seq_res;
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
struct page * acl_scratch;
- struct nfs4_sequence_res seq_res;
};
struct nfs_setattrres {
+ struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
const struct nfs_server * server;
- struct nfs4_sequence_res seq_res;
};
struct nfs_linkargs {
@@ -832,21 +829,22 @@ struct nfs3_getaclres {
typedef u64 clientid4;
struct nfs4_accessargs {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
const u32 * bitmask;
u32 access;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_accessres {
+ struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
u32 supported;
u32 access;
- struct nfs4_sequence_res seq_res;
};
struct nfs4_create_arg {
+ struct nfs4_sequence_args seq_args;
u32 ftype;
union {
struct {
@@ -863,88 +861,88 @@ struct nfs4_create_arg {
const struct iattr * attrs;
const struct nfs_fh * dir_fh;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_create_res {
+ struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
- struct nfs4_sequence_res seq_res;
};
struct nfs4_fsinfo_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_fsinfo_res {
- struct nfs_fsinfo *fsinfo;
struct nfs4_sequence_res seq_res;
+ struct nfs_fsinfo *fsinfo;
};
struct nfs4_getattr_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_getattr_res {
+ struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_sequence_res seq_res;
};
struct nfs4_link_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_link_res {
+ struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
- struct nfs4_sequence_res seq_res;
};
struct nfs4_lookup_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_lookup_res {
+ struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
- struct nfs4_sequence_res seq_res;
};
struct nfs4_lookup_root_arg {
- const u32 * bitmask;
struct nfs4_sequence_args seq_args;
+ const u32 * bitmask;
};
struct nfs4_pathconf_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_pathconf_res {
- struct nfs_pathconf *pathconf;
struct nfs4_sequence_res seq_res;
+ struct nfs_pathconf *pathconf;
};
struct nfs4_readdir_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
u64 cookie;
nfs4_verifier verifier;
@@ -953,21 +951,20 @@ struct nfs4_readdir_arg {
unsigned int pgbase; /* zero-copy data */
const u32 * bitmask;
int plus;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_readdir_res {
+ struct nfs4_sequence_res seq_res;
nfs4_verifier verifier;
unsigned int pgbase;
- struct nfs4_sequence_res seq_res;
};
struct nfs4_readlink {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
unsigned int pgbase;
unsigned int pglen; /* zero-copy data */
struct page ** pages; /* zero-copy data */
- struct nfs4_sequence_args seq_args;
};
struct nfs4_readlink_res {
@@ -993,28 +990,28 @@ struct nfs4_setclientid_res {
};
struct nfs4_statfs_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
const u32 * bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_statfs_res {
- struct nfs_fsstat *fsstat;
struct nfs4_sequence_res seq_res;
+ struct nfs_fsstat *fsstat;
};
struct nfs4_server_caps_arg {
- struct nfs_fh *fhandle;
struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fhandle;
};
struct nfs4_server_caps_res {
+ struct nfs4_sequence_res seq_res;
u32 attr_bitmask[3];
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
u32 fh_expire_type;
- struct nfs4_sequence_res seq_res;
};
#define NFS4_PATHNAME_MAXCOMPONENTS 512
@@ -1040,16 +1037,16 @@ struct nfs4_fs_locations {
};
struct nfs4_fs_locations_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh *dir_fh;
const struct qstr *name;
struct page *page;
const u32 *bitmask;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_fs_locations_res {
- struct nfs4_fs_locations *fs_locations;
struct nfs4_sequence_res seq_res;
+ struct nfs4_fs_locations *fs_locations;
};
struct nfs4_secinfo_oid {
@@ -1074,14 +1071,14 @@ struct nfs4_secinfo_flavors {
};
struct nfs4_secinfo_arg {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh *dir_fh;
const struct qstr *name;
- struct nfs4_sequence_args seq_args;
};
struct nfs4_secinfo_res {
- struct nfs4_secinfo_flavors *flavors;
struct nfs4_sequence_res seq_res;
+ struct nfs4_secinfo_flavors *flavors;
};
#endif /* CONFIG_NFS_V4 */
@@ -1161,9 +1158,9 @@ struct nfs41_create_session_res {
};
struct nfs41_reclaim_complete_args {
+ struct nfs4_sequence_args seq_args;
/* In the future extend to include curr_fh for use with migration */
unsigned char one_fs:1;
- struct nfs4_sequence_args seq_args;
};
struct nfs41_reclaim_complete_res {
@@ -1173,28 +1170,28 @@ struct nfs41_reclaim_complete_res {
#define SECINFO_STYLE_CURRENT_FH 0
#define SECINFO_STYLE_PARENT 1
struct nfs41_secinfo_no_name_args {
- int style;
struct nfs4_sequence_args seq_args;
+ int style;
};
struct nfs41_test_stateid_args {
- nfs4_stateid *stateid;
struct nfs4_sequence_args seq_args;
+ nfs4_stateid *stateid;
};
struct nfs41_test_stateid_res {
- unsigned int status;
struct nfs4_sequence_res seq_res;
+ unsigned int status;
};
struct nfs41_free_stateid_args {
- nfs4_stateid *stateid;
struct nfs4_sequence_args seq_args;
+ nfs4_stateid *stateid;
};
struct nfs41_free_stateid_res {
- unsigned int status;
struct nfs4_sequence_res seq_res;
+ unsigned int status;
};
#else
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 7afc36334d5..4e2cbfa640b 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -380,6 +380,11 @@ enum node_states {
#else
N_HIGH_MEMORY = N_NORMAL_MEMORY,
#endif
+#ifdef CONFIG_MOVABLE_NODE
+ N_MEMORY, /* The node has memory(regular, high, movable) */
+#else
+ N_MEMORY = N_HIGH_MEMORY,
+#endif
N_CPU, /* The node has one or more cpus */
NR_NODE_STATES
};
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index cc37a55ad00..10e5947491c 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -67,7 +67,7 @@ void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
void free_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
- struct fs_struct *);
+ struct cred *, struct fs_struct *);
int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
diff --git a/include/linux/of.h b/include/linux/of.h
index 60053bd7e79..5ebcc5c8e42 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -22,6 +22,7 @@
#include <linux/mod_devicetable.h>
#include <linux/spinlock.h>
#include <linux/topology.h>
+#include <linux/notifier.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
@@ -273,7 +274,7 @@ extern int of_modalias_node(struct device_node *node, char *modalias, int len);
extern struct device_node *of_parse_phandle(const struct device_node *np,
const char *phandle_name,
int index);
-extern int of_parse_phandle_with_args(struct device_node *np,
+extern int of_parse_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name, int index,
struct of_phandle_args *out_args);
@@ -282,16 +283,28 @@ extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_machine_is_compatible(const char *compat);
-extern int prom_add_property(struct device_node* np, struct property* prop);
-extern int prom_remove_property(struct device_node *np, struct property *prop);
-extern int prom_update_property(struct device_node *np,
- struct property *newprop);
+extern int of_add_property(struct device_node *np, struct property *prop);
+extern int of_remove_property(struct device_node *np, struct property *prop);
+extern int of_update_property(struct device_node *np, struct property *newprop);
-#if defined(CONFIG_OF_DYNAMIC)
/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(struct device_node *);
-#endif
+#define OF_RECONFIG_ATTACH_NODE 0x0001
+#define OF_RECONFIG_DETACH_NODE 0x0002
+#define OF_RECONFIG_ADD_PROPERTY 0x0003
+#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
+#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
+
+struct of_prop_reconfig {
+ struct device_node *dn;
+ struct property *prop;
+};
+
+extern int of_reconfig_notifier_register(struct notifier_block *);
+extern int of_reconfig_notifier_unregister(struct notifier_block *);
+extern int of_reconfig_notify(unsigned long, void *);
+
+extern int of_attach_node(struct device_node *);
+extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
index 1cb775f8e66..cfb545cd86b 100644
--- a/include/linux/of_i2c.h
+++ b/include/linux/of_i2c.h
@@ -29,6 +29,18 @@ static inline void of_i2c_register_devices(struct i2c_adapter *adap)
{
return;
}
+
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return NULL;
+}
+
+/* must call put_device() when done with returned i2c_adapter device */
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(
+ struct device_node *node)
+{
+ return NULL;
+}
#endif /* CONFIG_OF_I2C */
#endif /* __LINUX_OF_I2C_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index b47d2040c9f..3863a4dbdf1 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -100,6 +100,7 @@ extern int of_platform_populate(struct device_node *root,
#if !defined(CONFIG_OF_ADDRESS)
struct of_dev_auxdata;
+struct device;
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index eb475a8ea25..7af25a9c9c5 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -19,4 +19,370 @@ static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
}
#endif
+/*
+ * Legacy OMAP DMA handling defines and functions
+ *
+ * NOTE: Do not use these any longer.
+ *
+ * Use the generic dmaengine functions as defined in
+ * include/linux/dmaengine.h.
+ *
+ * Copyright (C) 2003 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ */
+
+#include <linux/platform_device.h>
+
+#define INT_DMA_LCD 25
+
+#define OMAP1_DMA_TOUT_IRQ (1 << 0)
+#define OMAP_DMA_DROP_IRQ (1 << 1)
+#define OMAP_DMA_HALF_IRQ (1 << 2)
+#define OMAP_DMA_FRAME_IRQ (1 << 3)
+#define OMAP_DMA_LAST_IRQ (1 << 4)
+#define OMAP_DMA_BLOCK_IRQ (1 << 5)
+#define OMAP1_DMA_SYNC_IRQ (1 << 6)
+#define OMAP2_DMA_PKT_IRQ (1 << 7)
+#define OMAP2_DMA_TRANS_ERR_IRQ (1 << 8)
+#define OMAP2_DMA_SECURE_ERR_IRQ (1 << 9)
+#define OMAP2_DMA_SUPERVISOR_ERR_IRQ (1 << 10)
+#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
+
+#define OMAP_DMA_CCR_EN (1 << 7)
+#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9)
+#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10)
+#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
+#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25)
+
+#define OMAP_DMA_DATA_TYPE_S8 0x00
+#define OMAP_DMA_DATA_TYPE_S16 0x01
+#define OMAP_DMA_DATA_TYPE_S32 0x02
+
+#define OMAP_DMA_SYNC_ELEMENT 0x00
+#define OMAP_DMA_SYNC_FRAME 0x01
+#define OMAP_DMA_SYNC_BLOCK 0x02
+#define OMAP_DMA_SYNC_PACKET 0x03
+
+#define OMAP_DMA_DST_SYNC_PREFETCH 0x02
+#define OMAP_DMA_SRC_SYNC 0x01
+#define OMAP_DMA_DST_SYNC 0x00
+
+#define OMAP_DMA_PORT_EMIFF 0x00
+#define OMAP_DMA_PORT_EMIFS 0x01
+#define OMAP_DMA_PORT_OCP_T1 0x02
+#define OMAP_DMA_PORT_TIPB 0x03
+#define OMAP_DMA_PORT_OCP_T2 0x04
+#define OMAP_DMA_PORT_MPUI 0x05
+
+#define OMAP_DMA_AMODE_CONSTANT 0x00
+#define OMAP_DMA_AMODE_POST_INC 0x01
+#define OMAP_DMA_AMODE_SINGLE_IDX 0x02
+#define OMAP_DMA_AMODE_DOUBLE_IDX 0x03
+
+#define DMA_DEFAULT_FIFO_DEPTH 0x10
+#define DMA_DEFAULT_ARB_RATE 0x01
+/* Pass THREAD_RESERVE ORed with THREAD_FIFO for tparams */
+#define DMA_THREAD_RESERVE_NORM (0x00 << 12) /* Def */
+#define DMA_THREAD_RESERVE_ONET (0x01 << 12)
+#define DMA_THREAD_RESERVE_TWOT (0x02 << 12)
+#define DMA_THREAD_RESERVE_THREET (0x03 << 12)
+#define DMA_THREAD_FIFO_NONE (0x00 << 14) /* Def */
+#define DMA_THREAD_FIFO_75 (0x01 << 14)
+#define DMA_THREAD_FIFO_25 (0x02 << 14)
+#define DMA_THREAD_FIFO_50 (0x03 << 14)
+
+/* DMA4_OCP_SYSCONFIG bits */
+#define DMA_SYSCONFIG_MIDLEMODE_MASK (3 << 12)
+#define DMA_SYSCONFIG_CLOCKACTIVITY_MASK (3 << 8)
+#define DMA_SYSCONFIG_EMUFREE (1 << 5)
+#define DMA_SYSCONFIG_SIDLEMODE_MASK (3 << 3)
+#define DMA_SYSCONFIG_SOFTRESET (1 << 2)
+#define DMA_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12)
+#define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3)
+
+#define DMA_IDLEMODE_SMARTIDLE 0x2
+#define DMA_IDLEMODE_NO_IDLE 0x1
+#define DMA_IDLEMODE_FORCE_IDLE 0x0
+
+/* Chaining modes*/
+#ifndef CONFIG_ARCH_OMAP1
+#define OMAP_DMA_STATIC_CHAIN 0x1
+#define OMAP_DMA_DYNAMIC_CHAIN 0x2
+#define OMAP_DMA_CHAIN_ACTIVE 0x1
+#define OMAP_DMA_CHAIN_INACTIVE 0x0
+#endif
+
+#define DMA_CH_PRIO_HIGH 0x1
+#define DMA_CH_PRIO_LOW 0x0 /* Def */
+
+/* Errata handling */
+#define IS_DMA_ERRATA(id) (errata & (id))
+#define SET_DMA_ERRATA(id) (errata |= (id))
+
+#define DMA_ERRATA_IFRAME_BUFFERING BIT(0x0)
+#define DMA_ERRATA_PARALLEL_CHANNELS BIT(0x1)
+#define DMA_ERRATA_i378 BIT(0x2)
+#define DMA_ERRATA_i541 BIT(0x3)
+#define DMA_ERRATA_i88 BIT(0x4)
+#define DMA_ERRATA_3_3 BIT(0x5)
+#define DMA_ROMCODE_BUG BIT(0x6)
+
+/* Attributes for OMAP DMA Contrller */
+#define DMA_LINKED_LCH BIT(0x0)
+#define GLOBAL_PRIORITY BIT(0x1)
+#define RESERVE_CHANNEL BIT(0x2)
+#define IS_CSSA_32 BIT(0x3)
+#define IS_CDSA_32 BIT(0x4)
+#define IS_RW_PRIORITY BIT(0x5)
+#define ENABLE_1510_MODE BIT(0x6)
+#define SRC_PORT BIT(0x7)
+#define DST_PORT BIT(0x8)
+#define SRC_INDEX BIT(0x9)
+#define DST_INDEX BIT(0xa)
+#define IS_BURST_ONLY4 BIT(0xb)
+#define CLEAR_CSR_ON_READ BIT(0xc)
+#define IS_WORD_16 BIT(0xd)
+#define ENABLE_16XX_MODE BIT(0xe)
+#define HS_CHANNELS_RESERVED BIT(0xf)
+
+/* Defines for DMA Capabilities */
+#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
+#define DMA_HAS_CONSTANT_FILL_CAPS (0x1 << 19)
+#define DMA_HAS_DESCRIPTOR_CAPS (0x3 << 20)
+
+enum omap_reg_offsets {
+
+GCR, GSCR, GRST1, HW_ID,
+PCH2_ID, PCH0_ID, PCH1_ID, PCHG_ID,
+PCHD_ID, CAPS_0, CAPS_1, CAPS_2,
+CAPS_3, CAPS_4, PCH2_SR, PCH0_SR,
+PCH1_SR, PCHD_SR, REVISION, IRQSTATUS_L0,
+IRQSTATUS_L1, IRQSTATUS_L2, IRQSTATUS_L3, IRQENABLE_L0,
+IRQENABLE_L1, IRQENABLE_L2, IRQENABLE_L3, SYSSTATUS,
+OCP_SYSCONFIG,
+
+/* omap1+ specific */
+CPC, CCR2, LCH_CTRL,
+
+/* Common registers for all omap's */
+CSDP, CCR, CICR, CSR,
+CEN, CFN, CSFI, CSEI,
+CSAC, CDAC, CDEI,
+CDFI, CLNK_CTRL,
+
+/* Channel specific registers */
+CSSA, CDSA, COLOR,
+CCEN, CCFN,
+
+/* omap3630 and omap4 specific */
+CDP, CNDP, CCDN,
+
+};
+
+enum omap_dma_burst_mode {
+ OMAP_DMA_DATA_BURST_DIS = 0,
+ OMAP_DMA_DATA_BURST_4,
+ OMAP_DMA_DATA_BURST_8,
+ OMAP_DMA_DATA_BURST_16,
+};
+
+enum end_type {
+ OMAP_DMA_LITTLE_ENDIAN = 0,
+ OMAP_DMA_BIG_ENDIAN
+};
+
+enum omap_dma_color_mode {
+ OMAP_DMA_COLOR_DIS = 0,
+ OMAP_DMA_CONSTANT_FILL,
+ OMAP_DMA_TRANSPARENT_COPY
+};
+
+enum omap_dma_write_mode {
+ OMAP_DMA_WRITE_NON_POSTED = 0,
+ OMAP_DMA_WRITE_POSTED,
+ OMAP_DMA_WRITE_LAST_NON_POSTED
+};
+
+enum omap_dma_channel_mode {
+ OMAP_DMA_LCH_2D = 0,
+ OMAP_DMA_LCH_G,
+ OMAP_DMA_LCH_P,
+ OMAP_DMA_LCH_PD
+};
+
+struct omap_dma_channel_params {
+ int data_type; /* data type 8,16,32 */
+ int elem_count; /* number of elements in a frame */
+ int frame_count; /* number of frames in a element */
+
+ int src_port; /* Only on OMAP1 REVISIT: Is this needed? */
+ int src_amode; /* constant, post increment, indexed,
+ double indexed */
+ unsigned long src_start; /* source address : physical */
+ int src_ei; /* source element index */
+ int src_fi; /* source frame index */
+
+ int dst_port; /* Only on OMAP1 REVISIT: Is this needed? */
+ int dst_amode; /* constant, post increment, indexed,
+ double indexed */
+ unsigned long dst_start; /* source address : physical */
+ int dst_ei; /* source element index */
+ int dst_fi; /* source frame index */
+
+ int trigger; /* trigger attached if the channel is
+ synchronized */
+ int sync_mode; /* sycn on element, frame , block or packet */
+ int src_or_dst_synch; /* source synch(1) or destination synch(0) */
+
+ int ie; /* interrupt enabled */
+
+ unsigned char read_prio;/* read priority */
+ unsigned char write_prio;/* write priority */
+
+#ifndef CONFIG_ARCH_OMAP1
+ enum omap_dma_burst_mode burst_mode; /* Burst mode 4/8/16 words */
+#endif
+};
+
+struct omap_dma_lch {
+ int next_lch;
+ int dev_id;
+ u16 saved_csr;
+ u16 enabled_irqs;
+ const char *dev_name;
+ void (*callback)(int lch, u16 ch_status, void *data);
+ void *data;
+ long flags;
+ /* required for Dynamic chaining */
+ int prev_linked_ch;
+ int next_linked_ch;
+ int state;
+ int chain_id;
+ int status;
+};
+
+struct omap_dma_dev_attr {
+ u32 dev_caps;
+ u16 lch_count;
+ u16 chan_count;
+ struct omap_dma_lch *chan;
+};
+
+/* System DMA platform data structure */
+struct omap_system_dma_plat_info {
+ struct omap_dma_dev_attr *dma_attr;
+ u32 errata;
+ void (*disable_irq_lch)(int lch);
+ void (*show_dma_caps)(void);
+ void (*clear_lch_regs)(int lch);
+ void (*clear_dma)(int lch);
+ void (*dma_write)(u32 val, int reg, int lch);
+ u32 (*dma_read)(int reg, int lch);
+};
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+#define dma_omap2plus() 1
+#else
+#define dma_omap2plus() 0
#endif
+#define dma_omap1() (!dma_omap2plus())
+#define dma_omap15xx() ((dma_omap1() && (d->dev_caps & ENABLE_1510_MODE)))
+#define dma_omap16xx() ((dma_omap1() && (d->dev_caps & ENABLE_16XX_MODE)))
+
+extern void omap_set_dma_priority(int lch, int dst_port, int priority);
+extern int omap_request_dma(int dev_id, const char *dev_name,
+ void (*callback)(int lch, u16 ch_status, void *data),
+ void *data, int *dma_ch);
+extern void omap_enable_dma_irq(int ch, u16 irq_bits);
+extern void omap_disable_dma_irq(int ch, u16 irq_bits);
+extern void omap_free_dma(int ch);
+extern void omap_start_dma(int lch);
+extern void omap_stop_dma(int lch);
+extern void omap_set_dma_transfer_params(int lch, int data_type,
+ int elem_count, int frame_count,
+ int sync_mode,
+ int dma_trigger, int src_or_dst_synch);
+extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode,
+ u32 color);
+extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
+extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
+
+extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
+ unsigned long src_start,
+ int src_ei, int src_fi);
+extern void omap_set_dma_src_index(int lch, int eidx, int fidx);
+extern void omap_set_dma_src_data_pack(int lch, int enable);
+extern void omap_set_dma_src_burst_mode(int lch,
+ enum omap_dma_burst_mode burst_mode);
+
+extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
+ unsigned long dest_start,
+ int dst_ei, int dst_fi);
+extern void omap_set_dma_dest_index(int lch, int eidx, int fidx);
+extern void omap_set_dma_dest_data_pack(int lch, int enable);
+extern void omap_set_dma_dest_burst_mode(int lch,
+ enum omap_dma_burst_mode burst_mode);
+
+extern void omap_set_dma_params(int lch,
+ struct omap_dma_channel_params *params);
+
+extern void omap_dma_link_lch(int lch_head, int lch_queue);
+extern void omap_dma_unlink_lch(int lch_head, int lch_queue);
+
+extern int omap_set_dma_callback(int lch,
+ void (*callback)(int lch, u16 ch_status, void *data),
+ void *data);
+extern dma_addr_t omap_get_dma_src_pos(int lch);
+extern dma_addr_t omap_get_dma_dst_pos(int lch);
+extern void omap_clear_dma(int lch);
+extern int omap_get_dma_active_status(int lch);
+extern int omap_dma_running(void);
+extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
+ int tparams);
+extern int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
+ unsigned char write_prio);
+extern void omap_set_dma_dst_endian_type(int lch, enum end_type etype);
+extern void omap_set_dma_src_endian_type(int lch, enum end_type etype);
+extern int omap_get_dma_index(int lch, int *ei, int *fi);
+
+void omap_dma_global_context_save(void);
+void omap_dma_global_context_restore(void);
+
+extern void omap_dma_disable_irq(int lch);
+
+/* Chaining APIs */
+#ifndef CONFIG_ARCH_OMAP1
+extern int omap_request_dma_chain(int dev_id, const char *dev_name,
+ void (*callback) (int lch, u16 ch_status,
+ void *data),
+ int *chain_id, int no_of_chans,
+ int chain_mode,
+ struct omap_dma_channel_params params);
+extern int omap_free_dma_chain(int chain_id);
+extern int omap_dma_chain_a_transfer(int chain_id, int src_start,
+ int dest_start, int elem_count,
+ int frame_count, void *callbk_data);
+extern int omap_start_dma_chain_transfers(int chain_id);
+extern int omap_stop_dma_chain_transfers(int chain_id);
+extern int omap_get_dma_chain_index(int chain_id, int *ei, int *fi);
+extern int omap_get_dma_chain_dst_pos(int chain_id);
+extern int omap_get_dma_chain_src_pos(int chain_id);
+
+extern int omap_modify_dma_chain_params(int chain_id,
+ struct omap_dma_channel_params params);
+extern int omap_dma_chain_status(int chain_id);
+#endif
+
+#if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_FB_OMAP)
+#include <mach/lcd_dma.h>
+#else
+static inline int omap_lcd_dma_running(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b5d13841604..70473da47b3 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -362,7 +362,7 @@ static inline void ClearPageCompound(struct page *page)
* pages on the LRU and/or pagecache.
*/
TESTPAGEFLAG(Compound, compound)
-__PAGEFLAG(Head, compound)
+__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
/*
* PG_reclaim is used in combination with PG_compound to mark the
@@ -374,8 +374,14 @@ __PAGEFLAG(Head, compound)
* PG_compound & PG_reclaim => Tail page
* PG_compound & ~PG_reclaim => Head page
*/
+#define PG_head_mask ((1L << PG_compound))
#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
+static inline int PageHead(struct page *page)
+{
+ return ((page->flags & PG_head_tail_mask) == PG_head_mask);
+}
+
static inline int PageTail(struct page *page)
{
return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index af8229244ee..15472d691ee 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -333,6 +333,8 @@ struct pci_dev {
};
struct pci_ats *ats; /* Address Translation Service */
#endif
+ phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
+ size_t romlen; /* Length of ROM if it's not from the BAR */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -538,6 +540,9 @@ enum pci_ers_result {
/* Device driver is fully recovered and operational */
PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
+
+ /* No AER capabilities registered for the driver */
+ PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
};
/* PCI bus error event callbacks */
@@ -573,6 +578,7 @@ struct pci_driver {
int (*resume_early) (struct pci_dev *dev);
int (*resume) (struct pci_dev *dev); /* Device woken up */
void (*shutdown) (struct pci_dev *dev);
+ int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
const struct pci_error_handlers *err_handler;
struct device_driver driver;
struct pci_dynids dynids;
@@ -726,6 +732,8 @@ extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
extern void pci_dev_put(struct pci_dev *dev);
extern void pci_remove_bus(struct pci_bus *b);
extern void pci_stop_and_remove_bus_device(struct pci_dev *dev);
+void pci_stop_root_bus(struct pci_bus *bus);
+void pci_remove_root_bus(struct pci_bus *bus);
void pci_setup_cardbus(struct pci_bus *bus);
extern void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
@@ -970,6 +978,7 @@ void pci_bus_size_bridges(struct pci_bus *bus);
int pci_claim_resource(struct pci_dev *, int);
void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
+void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
@@ -1604,6 +1613,7 @@ void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
+int pcibios_add_device(struct pci_dev *dev);
#ifdef CONFIG_PCI_MMCONFIG
extern void __init pci_mmcfg_early_init(void);
@@ -1613,7 +1623,7 @@ static inline void pci_mmcfg_early_init(void) { }
static inline void pci_mmcfg_late_init(void) { }
#endif
-int pci_ext_cfg_avail(struct pci_dev *dev);
+int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
@@ -1622,6 +1632,8 @@ extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
extern void pci_disable_sriov(struct pci_dev *dev);
extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
extern int pci_num_vf(struct pci_dev *dev);
+extern int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
+extern int pci_sriov_get_totalvfs(struct pci_dev *dev);
#else
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{
@@ -1638,6 +1650,14 @@ static inline int pci_num_vf(struct pci_dev *dev)
{
return 0;
}
+static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
+{
+ return 0;
+}
+static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 0f8447376dd..0eb65796bcb 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1568,6 +1568,7 @@
#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
#define PCI_DEVICE_ID_RICOH_R5C822 0x0822
+#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822
#define PCI_DEVICE_ID_RICOH_R5CE823 0xe823
#define PCI_DEVICE_ID_RICOH_R5C832 0x0832
#define PCI_DEVICE_ID_RICOH_R5C843 0x0843
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index bd1e86071e5..3e88c9a7d57 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -1,83 +1,34 @@
#ifndef _LINUX_PERCPU_RWSEM_H
#define _LINUX_PERCPU_RWSEM_H
-#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/rwsem.h>
#include <linux/percpu.h>
-#include <linux/rcupdate.h>
-#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/lockdep.h>
struct percpu_rw_semaphore {
- unsigned __percpu *counters;
- bool locked;
- struct mutex mtx;
+ unsigned int __percpu *fast_read_ctr;
+ atomic_t write_ctr;
+ struct rw_semaphore rw_sem;
+ atomic_t slow_read_ctr;
+ wait_queue_head_t write_waitq;
};
-#define light_mb() barrier()
-#define heavy_mb() synchronize_sched_expedited()
+extern void percpu_down_read(struct percpu_rw_semaphore *);
+extern void percpu_up_read(struct percpu_rw_semaphore *);
-static inline void percpu_down_read(struct percpu_rw_semaphore *p)
-{
- rcu_read_lock_sched();
- if (unlikely(p->locked)) {
- rcu_read_unlock_sched();
- mutex_lock(&p->mtx);
- this_cpu_inc(*p->counters);
- mutex_unlock(&p->mtx);
- return;
- }
- this_cpu_inc(*p->counters);
- rcu_read_unlock_sched();
- light_mb(); /* A, between read of p->locked and read of data, paired with D */
-}
+extern void percpu_down_write(struct percpu_rw_semaphore *);
+extern void percpu_up_write(struct percpu_rw_semaphore *);
-static inline void percpu_up_read(struct percpu_rw_semaphore *p)
-{
- light_mb(); /* B, between read of the data and write to p->counter, paired with C */
- this_cpu_dec(*p->counters);
-}
+extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
+ const char *, struct lock_class_key *);
+extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
-static inline unsigned __percpu_count(unsigned __percpu *counters)
-{
- unsigned total = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
-
- return total;
-}
-
-static inline void percpu_down_write(struct percpu_rw_semaphore *p)
-{
- mutex_lock(&p->mtx);
- p->locked = true;
- synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
- while (__percpu_count(p->counters))
- msleep(1);
- heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
-}
-
-static inline void percpu_up_write(struct percpu_rw_semaphore *p)
-{
- heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
- p->locked = false;
- mutex_unlock(&p->mtx);
-}
-
-static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
-{
- p->counters = alloc_percpu(unsigned);
- if (unlikely(!p->counters))
- return -ENOMEM;
- p->locked = false;
- mutex_init(&p->mtx);
- return 0;
-}
-
-static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
-{
- free_percpu(p->counters);
- p->counters = NULL; /* catch use after free bugs */
-}
+#define percpu_init_rwsem(brw) \
+({ \
+ static struct lock_class_key rwsem_key; \
+ __percpu_init_rwsem(brw, #brw, &rwsem_key); \
+})
#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index b152d44fb18..2381c973d89 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -121,6 +121,7 @@ int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
extern struct pid *alloc_pid(struct pid_namespace *ns);
extern void free_pid(struct pid *pid);
+extern void disable_pid_allocation(struct pid_namespace *ns);
/*
* ns_of_pid() returns the pid namespace in which the specified pid was
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 65e3e87eacc..215e5e3dda1 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -21,6 +21,7 @@ struct pid_namespace {
struct kref kref;
struct pidmap pidmap[PIDMAP_ENTRIES];
int last_pid;
+ unsigned int nr_hashed;
struct task_struct *child_reaper;
struct kmem_cache *pid_cachep;
unsigned int level;
@@ -31,13 +32,18 @@ struct pid_namespace {
#ifdef CONFIG_BSD_PROCESS_ACCT
struct bsd_acct_struct *bacct;
#endif
+ struct user_namespace *user_ns;
+ struct work_struct proc_work;
kgid_t pid_gid;
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
+ unsigned int proc_inum;
};
extern struct pid_namespace init_pid_ns;
+#define PIDNS_HASH_ADDING (1U << 31)
+
#ifdef CONFIG_PID_NS
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
@@ -46,7 +52,8 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
-extern struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *ns);
+extern struct pid_namespace *copy_pid_ns(unsigned long flags,
+ struct user_namespace *user_ns, struct pid_namespace *ns);
extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd);
extern void put_pid_ns(struct pid_namespace *ns);
@@ -59,8 +66,8 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
-static inline struct pid_namespace *
-copy_pid_ns(unsigned long flags, struct pid_namespace *ns)
+static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
+ struct user_namespace *user_ns, struct pid_namespace *ns)
{
if (flags & CLONE_NEWPID)
ns = ERR_PTR(-EINVAL);
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index aa9875f77c4..88272591a89 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -38,12 +38,6 @@ struct samsung_i2s {
#define QUIRK_NEED_RSTCLR (1 << 3)
/* Quirks of the I2S controller */
u32 quirks;
-
- /*
- * Array of clock names that can be used to generate I2S signals.
- * Also corresponds to clocks of I2SMOD[10]
- */
- const char **src_clk;
dma_addr_t idma_addr;
};
diff --git a/arch/arm/plat-nomadik/include/plat/mtu.h b/include/linux/platform_data/clocksource-nomadik-mtu.h
index 582641f3dc0..80088973b73 100644
--- a/arch/arm/plat-nomadik/include/plat/mtu.h
+++ b/include/linux/platform_data/clocksource-nomadik-mtu.h
@@ -1,7 +1,7 @@
#ifndef __PLAT_MTU_H
#define __PLAT_MTU_H
-void nmdk_timer_init(void __iomem *base);
+void nmdk_timer_init(void __iomem *base, int irq);
void nmdk_clkevt_reset(void);
void nmdk_clksrc_reset(void);
diff --git a/include/linux/platform_data/crypto-ux500.h b/include/linux/platform_data/crypto-ux500.h
index 5b2d0817e26..94df96d9a33 100644
--- a/include/linux/platform_data/crypto-ux500.h
+++ b/include/linux/platform_data/crypto-ux500.h
@@ -7,7 +7,7 @@
#ifndef _CRYPTO_UX500_H
#define _CRYPTO_UX500_H
#include <linux/dmaengine.h>
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
struct hash_platform_data {
void *mem_to_engine;
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index d0c5825876f..8db5ae03b6e 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -16,12 +16,13 @@
#ifndef __DAVINCI_ASP_H
#define __DAVINCI_ASP_H
+#include <linux/genalloc.h>
+
struct snd_platform_data {
u32 tx_dma_offset;
u32 rx_dma_offset;
int asp_chan_q; /* event queue number for ASP channel */
int ram_chan_q; /* event queue number for RAM channel */
- unsigned int codec_fmt;
/*
* Allowing this is more efficient and eliminates left and right swaps
* caused by underruns, but will swap the left and right channels
@@ -30,6 +31,7 @@ struct snd_platform_data {
unsigned enable_channel_combine:1;
unsigned sram_size_playback;
unsigned sram_size_capture;
+ struct gen_pool *sram_pool;
/*
* If McBSP peripheral gets the clock from an external pin,
diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h
index 2ba1f7d76ee..8ec18f64e39 100644
--- a/include/linux/platform_data/dma-mv_xor.h
+++ b/include/linux/platform_data/dma-mv_xor.h
@@ -10,15 +10,14 @@
#include <linux/dmaengine.h>
#include <linux/mbus.h>
-#define MV_XOR_SHARED_NAME "mv_xor_shared"
-#define MV_XOR_NAME "mv_xor"
+#define MV_XOR_NAME "mv_xor"
-struct mv_xor_platform_data {
- struct platform_device *shared;
- int hw_id;
+struct mv_xor_channel_data {
dma_cap_mask_t cap_mask;
- size_t pool_size;
};
+struct mv_xor_platform_data {
+ struct mv_xor_channel_data *channels;
+};
#endif
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/include/linux/platform_data/dma-ste-dma40.h
index 9ff93b06568..9ff93b06568 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/include/linux/platform_data/dma-ste-dma40.h
diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h
new file mode 100644
index 00000000000..6faa992a950
--- /dev/null
+++ b/include/linux/platform_data/i2c-cbus-gpio.h
@@ -0,0 +1,27 @@
+/*
+ * i2c-cbus-gpio.h - CBUS I2C platform_data definition
+ *
+ * Copyright (C) 2004-2009 Nokia Corporation
+ *
+ * Written by Felipe Balbi and Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H
+#define __INCLUDE_LINUX_I2C_CBUS_GPIO_H
+
+struct i2c_cbus_platform_data {
+ int dat_gpio;
+ int clk_gpio;
+ int sel_gpio;
+};
+
+#endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index c677b9f2fef..5b429c43a29 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/platform_device.h>
+
#define MMU_REG_SIZE 256
/**
@@ -42,8 +44,11 @@ struct omap_mmu_dev_attr {
struct iommu_platform_data {
const char *name;
- const char *clk_name;
- const int nr_tlb_entries;
+ const char *reset_name;
+ int nr_tlb_entries;
u32 da_start;
u32 da_end;
+
+ int (*assert_reset)(struct platform_device *pdev, const char *name);
+ int (*deassert_reset)(struct platform_device *pdev, const char *name);
};
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 761f3175236..e81f62d24ee 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -89,11 +89,6 @@ enum lp8556_brightness_source {
LP8556_COMBINED2, /* pwm + i2c after the shaper block */
};
-struct lp855x_pwm_data {
- void (*pwm_set_intensity) (int brightness, int max_brightness);
- int (*pwm_get_intensity) (int max_brightness);
-};
-
struct lp855x_rom_data {
u8 addr;
u8 val;
@@ -105,7 +100,7 @@ struct lp855x_rom_data {
* @mode : brightness control by pwm or lp855x register
* @device_control : value of DEVICE CONTROL register
* @initial_brightness : initial value of backlight brightness
- * @pwm_data : platform specific pwm generation functions.
+ * @period_ns : platform specific pwm period value. unit is nano.
Only valid when mode is PWM_BASED.
* @load_new_rom_data :
0 : use default configuration data
@@ -118,7 +113,7 @@ struct lp855x_platform_data {
enum lp855x_brightness_ctrl_mode mode;
u8 device_control;
int initial_brightness;
- struct lp855x_pwm_data pwm_data;
+ unsigned int period_ns;
u8 load_new_rom_data;
int size_program;
struct lp855x_rom_data *rom_data;
diff --git a/include/linux/platform_data/mtd-nomadik-nand.h b/include/linux/platform_data/mtd-nomadik-nand.h
deleted file mode 100644
index c3c8254c22a..00000000000
--- a/include/linux/platform_data/mtd-nomadik-nand.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __ASM_ARCH_NAND_H
-#define __ASM_ARCH_NAND_H
-
-struct nomadik_nand_platform_data {
- struct mtd_partition *parts;
- int nparts;
- int options;
- int (*init) (void);
- int (*exit) (void);
-};
-
-#define NAND_IO_DATA 0x40000000
-#define NAND_IO_CMD 0x40800000
-#define NAND_IO_ADDR 0x41000000
-
-#endif /* __ASM_ARCH_NAND_H */
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index c7bef788daa..ee60ef79d79 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -25,8 +25,34 @@
#ifndef _OMAP_TWL4030_H_
#define _OMAP_TWL4030_H_
+/* To select if only one channel is connected in a stereo port */
+#define OMAP_TWL4030_LEFT (1 << 0)
+#define OMAP_TWL4030_RIGHT (1 << 1)
+
struct omap_tw4030_pdata {
const char *card_name;
+ /* Voice port is connected to McBSP3 */
+ bool voice_connected;
+
+ /* The driver will parse the connection flags if this flag is set */
+ bool custom_routing;
+ /* Flags to indicate connected audio ports. */
+ u8 has_hs;
+ u8 has_hf;
+ u8 has_predriv;
+ u8 has_carkit;
+ bool has_ear;
+
+ bool has_mainmic;
+ bool has_submic;
+ bool has_hsmic;
+ bool has_carkitmic;
+ bool has_digimic0;
+ bool has_digimic1;
+ u8 has_linein;
+
+ /* Jack detect GPIO or <= 0 if it is not implemented */
+ int jack_detect;
};
#endif /* _OMAP_TWL4030_H_ */
diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/include/linux/platform_data/serial-omap.h
index ff9b0aab528..ff9b0aab528 100644
--- a/arch/arm/plat-omap/include/plat/omap-serial.h
+++ b/include/linux/platform_data/serial-omap.h
diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h
new file mode 100644
index 00000000000..301956e6314
--- /dev/null
+++ b/include/linux/platform_data/spi-clps711x.h
@@ -0,0 +1,21 @@
+/*
+ * CLPS711X SPI bus driver definitions
+ *
+ * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
+#define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
+
+/* Board specific platform_data */
+struct spi_clps711x_pdata {
+ int *chipselect; /* Array of GPIO-numbers */
+ int num_chipselect; /* Total count of GPIOs */
+};
+
+#endif
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index a357eb26bd2..a65572d5321 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -7,9 +7,13 @@
#define OMAP4_MCSPI_REG_OFFSET 0x100
+#define MCSPI_PINDIR_D0_IN_D1_OUT 0
+#define MCSPI_PINDIR_D0_OUT_D1_IN 1
+
struct omap2_mcspi_platform_config {
unsigned short num_cs;
unsigned int regs_offset;
+ unsigned int pin_dir:1;
};
struct omap2_mcspi_dev_attr {
diff --git a/include/linux/platform_data/ti_am335x_adc.h b/include/linux/platform_data/ti_am335x_adc.h
new file mode 100644
index 00000000000..e41d5834cb8
--- /dev/null
+++ b/include/linux/platform_data/ti_am335x_adc.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_TI_AM335X_ADC_H
+#define __LINUX_TI_AM335X_ADC_H
+
+/**
+ * struct adc_data ADC Input information
+ * @adc_channels: Number of analog inputs
+ * available for ADC.
+ */
+
+struct adc_data {
+ unsigned int adc_channels;
+};
+
+#endif
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
index 8570bcfe631..ef65b67c56c 100644
--- a/include/linux/platform_data/usb-omap.h
+++ b/include/linux/platform_data/usb-omap.h
@@ -59,6 +59,9 @@ struct usbhs_omap_platform_data {
struct ehci_hcd_omap_platform_data *ehci_data;
struct ohci_hcd_omap_platform_data *ohci_data;
+
+ /* OMAP3 <= ES2.1 have a single ulpi bypass control bit */
+ unsigned single_ulpi_bypass:1;
};
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h
new file mode 100644
index 00000000000..97a1665eaea
--- /dev/null
+++ b/include/linux/power/bq2415x_charger.h
@@ -0,0 +1,95 @@
+/*
+ * bq2415x charger driver
+ *
+ * Copyright (C) 2011-2012 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef BQ2415X_CHARGER_H
+#define BQ2415X_CHARGER_H
+
+/*
+ * This is platform data for bq2415x chip. It contains default board
+ * voltages and currents which can be also later configured via sysfs. If
+ * value is -1 then default chip value (specified in datasheet) will be
+ * used.
+ *
+ * Value resistor_sense is needed for for configuring charge and
+ * termination current. It it is less or equal to zero, configuring charge
+ * and termination current will not be possible.
+ *
+ * Function set_mode_hook is needed for automode (setting correct current
+ * limit when charger is connected/disconnected or setting boost mode).
+ * When is NULL, automode function is disabled. When is not NULL, it must
+ * have this prototype:
+ *
+ * int (*set_mode_hook)(
+ * void (*hook)(enum bq2415x_mode mode, void *data),
+ * void *data)
+ *
+ * hook is hook function (see below) and data is pointer to driver private
+ * data
+ *
+ * bq2415x driver will call it as:
+ *
+ * platform_data->set_mode_hook(bq2415x_hook_function, bq2415x_device);
+ *
+ * Board/platform function set_mode_hook return non zero value when hook
+ * function was successful registered. Platform code should call that hook
+ * function (which get from pointer, with data) every time when charger
+ * was connected/disconnected or require to enable boost mode. bq2415x
+ * driver then will set correct current limit, enable/disable charger or
+ * boost mode.
+ *
+ * Hook function has this prototype:
+ *
+ * void hook(enum bq2415x_mode mode, void *data);
+ *
+ * mode is bq2415x mode (charger or boost)
+ * data is pointer to driver private data (which get from
+ * set_charger_type_hook)
+ *
+ * When bq driver is being unloaded, it call function:
+ *
+ * platform_data->set_mode_hook(NULL, NULL);
+ *
+ * (hook function and driver private data are NULL)
+ *
+ * After that board/platform code must not call driver hook function! It
+ * is possible that pointer to hook function will not be valid and calling
+ * will cause undefined result.
+ */
+
+/* Supported modes with maximal current limit */
+enum bq2415x_mode {
+ BQ2415X_MODE_NONE, /* unknown or no charger (100mA) */
+ BQ2415X_MODE_HOST_CHARGER, /* usb host/hub charger (500mA) */
+ BQ2415X_MODE_DEDICATED_CHARGER, /* dedicated charger (unlimited) */
+ BQ2415X_MODE_BOOST, /* boost mode (charging disabled) */
+};
+
+struct bq2415x_platform_data {
+ int current_limit; /* mA */
+ int weak_battery_voltage; /* mV */
+ int battery_regulation_voltage; /* mV */
+ int charge_current; /* mA */
+ int termination_current; /* mA */
+ int resistor_sense; /* m ohm */
+ int (*set_mode_hook)(void (*hook)(enum bq2415x_mode mode, void *data),
+ void *data);
+};
+
+#endif
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index 4a496ebc7d7..c0f44c2b006 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -260,8 +260,13 @@ struct omap_sr_nvalue_table {
*
* @name: instance name
* @ip_type: Smartreflex IP type.
- * @senp_mod: SENPENABLE value for the sr
- * @senn_mod: SENNENABLE value for sr
+ * @senp_mod: SENPENABLE value of the sr CONFIG register
+ * @senn_mod: SENNENABLE value for sr CONFIG register
+ * @err_weight ERRWEIGHT value of the sr ERRCONFIG register
+ * @err_maxlimit ERRMAXLIMIT value of the sr ERRCONFIG register
+ * @accum_data ACCUMDATA value of the sr CONFIG register
+ * @senn_avgweight SENNAVGWEIGHT value of the sr AVGWEIGHT register
+ * @senp_avgweight SENPAVGWEIGHT value of the sr AVGWEIGHT register
* @nvalue_count: Number of distinct nvalues in the nvalue table
* @enable_on_init: whether this sr module needs to enabled at
* boot up or not.
@@ -274,6 +279,11 @@ struct omap_sr_data {
int ip_type;
u32 senp_mod;
u32 senn_mod;
+ u32 err_weight;
+ u32 err_maxlimit;
+ u32 accum_data;
+ u32 senn_avgweight;
+ u32 senp_avgweight;
int nvalue_count;
bool enable_on_init;
struct omap_sr_nvalue_table *nvalue_table;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index e5ef45834c3..1f0ab90aff0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -114,6 +114,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
@@ -186,6 +188,7 @@ struct power_supply {
struct work_struct changed_work;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
+ struct thermal_cooling_device *tcd;
#endif
#ifdef CONFIG_LEDS_TRIGGERS
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 3fd2e871ff1..32676b35d2f 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -28,7 +28,11 @@ struct mm_struct;
*/
enum {
- PROC_ROOT_INO = 1,
+ PROC_ROOT_INO = 1,
+ PROC_IPC_INIT_INO = 0xEFFFFFFFU,
+ PROC_UTS_INIT_INO = 0xEFFFFFFEU,
+ PROC_USER_INIT_INO = 0xEFFFFFFDU,
+ PROC_PID_INIT_INO = 0xEFFFFFFCU,
};
/*
@@ -174,7 +178,10 @@ extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
struct proc_dir_entry *parent);
extern struct file *proc_ns_fget(int fd);
+extern bool proc_ns_inode(struct inode *inode);
+extern int proc_alloc_inum(unsigned int *pino);
+extern void proc_free_inum(unsigned int inum);
#else
#define proc_net_fops_create(net, name, mode, fops) ({ (void)(mode), NULL; })
@@ -229,6 +236,19 @@ static inline struct file *proc_ns_fget(int fd)
return ERR_PTR(-EINVAL);
}
+static inline bool proc_ns_inode(struct inode *inode)
+{
+ return false;
+}
+
+static inline int proc_alloc_inum(unsigned int *inum)
+{
+ *inum = 1;
+ return 0;
+}
+static inline void proc_free_inum(unsigned int inum)
+{
+}
#endif /* CONFIG_PROC_FS */
#if !defined(CONFIG_PROC_KCORE)
@@ -247,10 +267,14 @@ struct proc_ns_operations {
void *(*get)(struct task_struct *task);
void (*put)(void *ns);
int (*install)(struct nsproxy *nsproxy, void *ns);
+ unsigned int (*inum)(void *ns);
};
extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
extern const struct proc_ns_operations ipcns_operations;
+extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations userns_operations;
+extern const struct proc_ns_operations mntns_operations;
union proc_op {
int (*proc_get_link)(struct dentry *, struct path *);
@@ -290,4 +314,7 @@ static inline struct net *PDE_NET(struct proc_dir_entry *pde)
return pde->parent->data;
}
+#include <linux/signal.h>
+
+void render_sigset_t(struct seq_file *m, const char *header, sigset_t *set);
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index a89ff04bddd..1693775ecfe 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -32,6 +32,8 @@
#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
+#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
+
/* single stepping state bits (used on ARM and PA-RISC) */
#define PT_SINGLESTEP_BIT 31
#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
@@ -342,6 +344,10 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
#define signal_pt_regs() task_pt_regs(current)
#endif
+#ifndef current_user_stack_pointer
+#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
+#endif
+
extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/pvclock_gtod.h b/include/linux/pvclock_gtod.h
new file mode 100644
index 00000000000..0ca75825b60
--- /dev/null
+++ b/include/linux/pvclock_gtod.h
@@ -0,0 +1,9 @@
+#ifndef _PVCLOCK_GTOD_H
+#define _PVCLOCK_GTOD_H
+
+#include <linux/notifier.h>
+
+extern int pvclock_gtod_register_notifier(struct notifier_block *nb);
+extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb);
+
+#endif /* _PVCLOCK_GTOD_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 112b3143684..6d661f32e0e 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -171,6 +171,9 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+
struct pwm_device *pwm_get(struct device *dev, const char *consumer);
void pwm_put(struct pwm_device *pwm);
diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/include/linux/raid/Kbuild
+++ /dev/null
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 640c69ceec9..8dfaa2ce2e9 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -98,6 +98,9 @@ extern const struct raid6_calls raid6_altivec1;
extern const struct raid6_calls raid6_altivec2;
extern const struct raid6_calls raid6_altivec4;
extern const struct raid6_calls raid6_altivec8;
+extern const struct raid6_calls raid6_avx2x1;
+extern const struct raid6_calls raid6_avx2x2;
+extern const struct raid6_calls raid6_avx2x4;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -109,6 +112,7 @@ struct raid6_recov_calls {
extern const struct raid6_recov_calls raid6_recov_intx1;
extern const struct raid6_recov_calls raid6_recov_ssse3;
+extern const struct raid6_recov_calls raid6_recov_avx2;
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
diff --git a/include/linux/random.h b/include/linux/random.h
index 6330ed47b38..d9846088c2c 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -25,10 +25,19 @@ extern const struct file_operations random_fops, urandom_fops;
unsigned int get_random_int(void);
unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
-u32 random32(void);
-void srandom32(u32 seed);
+u32 prandom_u32(void);
+void prandom_bytes(void *buf, int nbytes);
+void prandom_seed(u32 seed);
-u32 prandom32(struct rnd_state *);
+/*
+ * These macros are preserved for backward compatibility and should be
+ * removed as soon as a transition is finished.
+ */
+#define random32() prandom_u32()
+#define srandom32(seed) prandom_seed(seed)
+
+u32 prandom_u32_state(struct rnd_state *);
+void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
/*
* Handle minimum values for seeds
@@ -39,11 +48,11 @@ static inline u32 __seed(u32 x, u32 m)
}
/**
- * prandom32_seed - set seed for prandom32().
+ * prandom_seed_state - set seed for prandom_u32_state().
* @state: pointer to state structure to receive the seed.
* @seed: arbitrary 64-bit value to use as a seed.
*/
-static inline void prandom32_seed(struct rnd_state *state, u64 seed)
+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
{
u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index c43cd3556b1..7bc732ce6e5 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -160,6 +160,7 @@ int regulator_bulk_force_disable(int num_consumers,
void regulator_bulk_free(int num_consumers,
struct regulator_bulk_data *consumers);
+int regulator_can_change_voltage(struct regulator *regulator);
int regulator_count_voltages(struct regulator *regulator);
int regulator_list_voltage(struct regulator *regulator, unsigned selector);
int regulator_is_supported_voltage(struct regulator *regulator,
@@ -358,6 +359,10 @@ static inline void regulator_set_drvdata(struct regulator *regulator,
{
}
+static inline int regulator_count_voltages(struct regulator *regulator)
+{
+ return 0;
+}
#endif
static inline int regulator_set_voltage_tol(struct regulator *regulator,
@@ -367,4 +372,12 @@ static inline int regulator_set_voltage_tol(struct regulator *regulator,
new_uV - tol_uV, new_uV + tol_uV);
}
+static inline int regulator_is_supported_voltage_tol(struct regulator *regulator,
+ int target_uV, int tol_uV)
+{
+ return regulator_is_supported_voltage(regulator,
+ target_uV - tol_uV,
+ target_uV + tol_uV);
+}
+
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 7932a3bf21b..d10bb0f39c5 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -181,10 +181,13 @@ enum regulator_type {
* @type: Indicates if the regulator is a voltage or current regulator.
* @owner: Module providing the regulator, used for refcounting.
*
+ * @continuous_voltage_range: Indicates if the regulator can set any
+ * voltage within constrains range.
* @n_voltages: Number of selectors available for ops.list_voltage().
*
* @min_uV: Voltage given by the lowest selector (if linear mapping)
* @uV_step: Voltage increase with each selector (if linear mapping)
+ * @linear_min_sel: Minimal selector for starting linear mapping
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @volt_table: Voltage mapping table (if table based mapping)
*
@@ -199,6 +202,7 @@ struct regulator_desc {
const char *name;
const char *supply_name;
int id;
+ bool continuous_voltage_range;
unsigned n_voltages;
struct regulator_ops *ops;
int irq;
@@ -207,6 +211,7 @@ struct regulator_desc {
unsigned int min_uV;
unsigned int uV_step;
+ unsigned int linear_min_sel;
unsigned int ramp_delay;
const unsigned int *volt_table;
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
new file mode 100644
index 00000000000..f8acc052e35
--- /dev/null
+++ b/include/linux/regulator/max8973-regulator.h
@@ -0,0 +1,72 @@
+/*
+ * max8973-regulator.h -- MAXIM 8973 regulator
+ *
+ * Interface for regulator driver for MAXIM 8973 DC-DC step-down
+ * switching regulator.
+ *
+ * Copyright (C) 2012 NVIDIA Corporation
+
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8973_H
+#define __LINUX_REGULATOR_MAX8973_H
+
+/*
+ * Control flags for configuration of the device.
+ * Client need to pass this information with ORed
+ */
+#define MAX8973_CONTROL_REMOTE_SENSE_ENABLE 0x00000001
+#define MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE 0x00000002
+#define MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE 0x00000004
+#define MAX8973_CONTROL_BIAS_ENABLE 0x00000008
+#define MAX8973_CONTROL_PULL_DOWN_ENABLE 0x00000010
+#define MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE 0x00000020
+
+#define MAX8973_CONTROL_CLKADV_TRIP_DISABLED 0x00000000
+#define MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US 0x00010000
+#define MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US 0x00020000
+#define MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US_HIST_DIS 0x00030000
+
+#define MAX8973_CONTROL_INDUCTOR_VALUE_NOMINAL 0x00000000
+#define MAX8973_CONTROL_INDUCTOR_VALUE_MINUS_30_PER 0x00100000
+#define MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_30_PER 0x00200000
+#define MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_60_PER 0x00300000
+
+/*
+ * struct max8973_regulator_platform_data - max8973 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @control_flags: Control flags which are ORed value of above flags to
+ * configure device.
+ * @enable_ext_control: Enable the voltage enable/disable through external
+ * control signal from EN input pin. If it is false then
+ * voltage output will be enabled/disabled through EN bit of
+ * device register.
+ * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
+ * @dvs_def_state: Default state of dvs. 1 if it is high else 0.
+ */
+struct max8973_regulator_platform_data {
+ struct regulator_init_data *reg_init_data;
+ unsigned long control_flags;
+ bool enable_ext_control;
+ int dvs_gpio;
+ unsigned dvs_def_state:1;
+};
+
+#endif /* __LINUX_REGULATOR_MAX8973_H */
diff --git a/include/linux/regulator/tps51632-regulator.h b/include/linux/regulator/tps51632-regulator.h
new file mode 100644
index 00000000000..d00841e1a75
--- /dev/null
+++ b/include/linux/regulator/tps51632-regulator.h
@@ -0,0 +1,47 @@
+/*
+ * tps51632-regulator.h -- TPS51632 regulator
+ *
+ * Interface for regulator driver for TPS51632 3-2-1 Phase D-Cap Step Down
+ * Driverless Controller with serial VID control and DVFS.
+ *
+ * Copyright (C) 2012 NVIDIA Corporation
+
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_TPS51632_H
+#define __LINUX_REGULATOR_TPS51632_H
+
+/*
+ * struct tps51632_regulator_platform_data - tps51632 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @enable_pwm_dvfs: Enable PWM DVFS or not.
+ * @dvfs_step_20mV: Step for DVFS is 20mV or 10mV.
+ * @max_voltage_uV: Maximum possible voltage in PWM-DVFS mode.
+ * @base_voltage_uV: Base voltage when PWM-DVFS enabled.
+ */
+struct tps51632_regulator_platform_data {
+ struct regulator_init_data *reg_init_data;
+ bool enable_pwm_dvfs;
+ bool dvfs_step_20mV;
+ int max_voltage_uV;
+ int base_voltage_uV;
+};
+
+#endif /* __LINUX_REGULATOR_TPS51632_H */
diff --git a/include/linux/regulator/tps65090-regulator.h b/include/linux/regulator/tps65090-regulator.h
deleted file mode 100644
index 0fa04b64db3..00000000000
--- a/include/linux/regulator/tps65090-regulator.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Regulator driver interface for TI TPS65090 PMIC family
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
-
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
-
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __REGULATOR_TPS65090_H
-#define __REGULATOR_TPS65090_H
-
-#include <linux/regulator/machine.h>
-
-#define tps65090_rails(_name) "tps65090_"#_name
-
-enum {
- TPS65090_ID_DCDC1,
- TPS65090_ID_DCDC2,
- TPS65090_ID_DCDC3,
- TPS65090_ID_FET1,
- TPS65090_ID_FET2,
- TPS65090_ID_FET3,
- TPS65090_ID_FET4,
- TPS65090_ID_FET5,
- TPS65090_ID_FET6,
- TPS65090_ID_FET7,
-};
-
-/*
- * struct tps65090_regulator_platform_data
- *
- * @regulator: The regulator init data.
- * @slew_rate_uV_per_us: Slew rate microvolt per microsec.
- */
-
-struct tps65090_regulator_platform_data {
- struct regulator_init_data regulator;
-};
-
-#endif /* __REGULATOR_TPS65090_H */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 7d7fbe2ef78..5ae8456d967 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -74,14 +74,9 @@ ssize_t res_counter_read(struct res_counter *counter, int member,
const char __user *buf, size_t nbytes, loff_t *pos,
int (*read_strategy)(unsigned long long val, char *s));
-typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val);
-
int res_counter_memparse_write_strategy(const char *buf,
unsigned long long *res);
-int res_counter_write(struct res_counter *counter, int member,
- const char *buffer, write_strategy_fn write_strategy);
-
/*
* the field descriptors. one for each member of res_counter
*/
@@ -130,14 +125,16 @@ int res_counter_charge_nofail(struct res_counter *counter,
*
* these calls check for usage underflow and show a warning on the console
* _locked call expects the counter->lock to be taken
+ *
+ * returns the total charges still present in @counter.
*/
-void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
-void res_counter_uncharge(struct res_counter *counter, unsigned long val);
+u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
+u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
-void res_counter_uncharge_until(struct res_counter *counter,
- struct res_counter *top,
- unsigned long val);
+u64 res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val);
/**
* res_counter_margin - calculate chargeable space of a counter
* @cnt: the counter
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bfe1f478064..c20635c527a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -7,7 +7,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/memcontrol.h>
/*
@@ -25,8 +25,8 @@
* pointing to this anon_vma once its vma list is empty.
*/
struct anon_vma {
- struct anon_vma *root; /* Root of this anon_vma tree */
- struct mutex mutex; /* Serialize access to vma list */
+ struct anon_vma *root; /* Root of this anon_vma tree */
+ struct rw_semaphore rwsem; /* W: modification, R: walking the list */
/*
* The refcount is taken on an anon_vma when there is no
* guarantee that the vma of page tables will exist for
@@ -64,7 +64,7 @@ struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
- struct rb_node rb; /* locked by anon_vma->mutex */
+ struct rb_node rb; /* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
unsigned long cached_vma_start, cached_vma_last;
@@ -108,26 +108,37 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
- mutex_lock(&anon_vma->root->mutex);
+ down_write(&anon_vma->root->rwsem);
}
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
- mutex_unlock(&anon_vma->root->mutex);
+ up_write(&anon_vma->root->rwsem);
}
-static inline void anon_vma_lock(struct anon_vma *anon_vma)
+static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
- mutex_lock(&anon_vma->root->mutex);
+ down_write(&anon_vma->root->rwsem);
}
static inline void anon_vma_unlock(struct anon_vma *anon_vma)
{
- mutex_unlock(&anon_vma->root->mutex);
+ up_write(&anon_vma->root->rwsem);
}
+static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
+{
+ down_read(&anon_vma->root->rwsem);
+}
+
+static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
+{
+ up_read(&anon_vma->root->rwsem);
+}
+
+
/*
* anon_vma helper functions.
*/
@@ -220,8 +231,8 @@ int try_to_munlock(struct page *);
/*
* Called by memory-failure.c to kill processes.
*/
-struct anon_vma *page_lock_anon_vma(struct page *page);
-void page_unlock_anon_vma(struct anon_vma *anon_vma);
+struct anon_vma *page_lock_anon_vma_read(struct page *page);
+void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 651b51a3671..206bb089c06 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -107,6 +107,14 @@ extern unsigned long this_cpu_load(void);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
+/* Notifier for when a task gets migrated to a new CPU */
+struct task_migration_notifier {
+ struct task_struct *task;
+ int from_cpu;
+ int to_cpu;
+};
+extern void register_task_migration_notifier(struct notifier_block *n);
+
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu);
@@ -1519,6 +1527,14 @@ struct task_struct {
short il_next;
short pref_node_fork;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ int numa_scan_seq;
+ int numa_migrate_seq;
+ unsigned int numa_scan_period;
+ u64 node_stamp; /* migration stamp */
+ struct callback_head numa_work;
+#endif /* CONFIG_NUMA_BALANCING */
+
struct rcu_head rcu;
/*
@@ -1581,6 +1597,7 @@ struct task_struct {
unsigned long nr_pages; /* uncharged usage */
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
+ unsigned int memcg_kmem_skip_account;
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
@@ -1593,6 +1610,18 @@ struct task_struct {
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+#ifdef CONFIG_NUMA_BALANCING
+extern void task_numa_fault(int node, int pages, bool migrated);
+extern void set_numabalancing_state(bool enabled);
+#else
+static inline void task_numa_fault(int node, int pages, bool migrated)
+{
+}
+static inline void set_numabalancing_state(bool enabled)
+{
+}
+#endif
+
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@ -1750,12 +1779,6 @@ static inline int is_global_init(struct task_struct *tsk)
return tsk->pid == 1;
}
-/*
- * is_container_init:
- * check whether in the task is init in its own pid namespace.
- */
-extern int is_container_init(struct task_struct *tsk);
-
extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
@@ -2022,6 +2045,13 @@ enum sched_tunable_scaling {
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+extern unsigned int sysctl_numa_balancing_scan_delay;
+extern unsigned int sysctl_numa_balancing_scan_period_min;
+extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_period_reset;
+extern unsigned int sysctl_numa_balancing_scan_size;
+extern unsigned int sysctl_numa_balancing_settle_count;
+
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
@@ -2323,9 +2353,7 @@ extern int do_execve(const char *,
const char __user * const __user *);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
-#ifdef CONFIG_GENERIC_KERNEL_THREAD
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-#endif
extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
diff --git a/include/linux/security.h b/include/linux/security.h
index 05e88bdcf7d..0f6afc657f7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -694,6 +694,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* userspace to load a kernel module with the given name.
* @kmod_name name of the module requested by the kernel
* Return 0 if successful.
+ * @kernel_module_from_file:
+ * Load a kernel module from userspace.
+ * @file contains the file structure pointing to the file containing
+ * the kernel module to load. If the module is being loaded from a blob,
+ * this argument will be NULL.
+ * Return 0 if permission is granted.
* @task_fix_setuid:
* Update the module's state after setting one or more of the user
* identity attributes of the current process. The @flags parameter
@@ -1508,6 +1514,7 @@ struct security_operations {
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
+ int (*kernel_module_from_file)(struct file *file);
int (*task_fix_setuid) (struct cred *new, const struct cred *old,
int flags);
int (*task_setpgid) (struct task_struct *p, pid_t pgid);
@@ -1765,6 +1772,7 @@ void security_transfer_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
+int security_kernel_module_from_file(struct file *file);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
@@ -2278,6 +2286,11 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
+static inline int security_kernel_module_from_file(struct file *file)
+{
+ return 0;
+}
+
static inline int security_task_fix_setuid(struct cred *new,
const struct cred *old,
int flags)
diff --git a/include/linux/signal.h b/include/linux/signal.h
index e19a011b43b..0a89ffc4846 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -385,4 +385,7 @@ int unhandled_signal(struct task_struct *tsk, int sig);
void signals_init(void);
+int restore_altstack(const stack_t __user *);
+int __save_altstack(stack_t __user *, unsigned long);
+
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 83d1a1454b7..5d168d7e0a2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -11,6 +11,8 @@
#include <linux/gfp.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
+
/*
* Flags to pass to kmem_cache_create().
@@ -116,6 +118,7 @@ struct kmem_cache {
};
#endif
+struct mem_cgroup;
/*
* struct kmem_cache related prototypes
*/
@@ -125,10 +128,12 @@ int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
+struct kmem_cache *
+kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
+ unsigned long, void (*)(void *), struct kmem_cache *);
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
-unsigned int kmem_cache_size(struct kmem_cache *);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -176,6 +181,48 @@ unsigned int kmem_cache_size(struct kmem_cache *);
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
+/*
+ * This is the main placeholder for memcg-related information in kmem caches.
+ * struct kmem_cache will hold a pointer to it, so the memory cost while
+ * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
+ * would otherwise be if that would be bundled in kmem_cache: we'll need an
+ * extra pointer chase. But the trade off clearly lays in favor of not
+ * penalizing non-users.
+ *
+ * Both the root cache and the child caches will have it. For the root cache,
+ * this will hold a dynamically allocated array large enough to hold
+ * information about the currently limited memcgs in the system.
+ *
+ * Child caches will hold extra metadata needed for its operation. Fields are:
+ *
+ * @memcg: pointer to the memcg this cache belongs to
+ * @list: list_head for the list of all caches in this memcg
+ * @root_cache: pointer to the global, root cache, this cache was derived from
+ * @dead: set to true after the memcg dies; the cache may still be around.
+ * @nr_pages: number of pages that belongs to this cache.
+ * @destroy: worker to be called whenever we are ready, or believe we may be
+ * ready, to destroy this cache.
+ */
+struct memcg_cache_params {
+ bool is_root_cache;
+ union {
+ struct kmem_cache *memcg_caches[0];
+ struct {
+ struct mem_cgroup *memcg;
+ struct list_head list;
+ struct kmem_cache *root_cache;
+ bool dead;
+ atomic_t nr_pages;
+ struct work_struct destroy;
+ };
+ };
+};
+
+int memcg_update_all_caches(int num_memcgs);
+
+struct seq_file;
+int cache_show(struct kmem_cache *s, struct seq_file *m);
+void print_slabinfo_header(struct seq_file *m);
/*
* Common kmalloc functions provided by all allocators
@@ -388,6 +435,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+/*
+ * Determine the size of a slab object
+ */
+static inline unsigned int kmem_cache_size(struct kmem_cache *s)
+{
+ return s->object_size;
+}
+
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cc290f0bdb3..8bb6e0eaf3c 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -81,6 +81,9 @@ struct kmem_cache {
*/
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
+#ifdef CONFIG_MEMCG_KMEM
+ struct memcg_cache_params *memcg_params;
+#endif
/* 6) per-cpu/per-node data, touched during every alloc/free */
/*
@@ -89,9 +92,13 @@ struct kmem_cache {
* (see kmem_cache_init())
* We still use [NR_CPUS] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of cpus.
+ *
+ * We also need to guarantee that the list is able to accomodate a
+ * pointer for each node since "nodelists" uses the remainder of
+ * available pointers.
*/
struct kmem_list3 **nodelists;
- struct array_cache *array[NR_CPUS];
+ struct array_cache *array[NR_CPUS + MAX_NUMNODES];
/*
* Do not add fields after array[]
*/
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index df448adb728..9db4825cd39 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -101,6 +101,10 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ struct memcg_cache_params *memcg_params;
+ int max_attr_size; /* for propagation, maximum size of a stored attr */
+#endif
#ifdef CONFIG_NUMA
/*
@@ -222,7 +226,10 @@ void *__kmalloc(size_t size, gfp_t flags);
static __always_inline void *
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
- void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+ void *ret;
+
+ flags |= (__GFP_COMP | __GFP_KMEMCG);
+ ret = (void *) __get_free_pages(flags, order);
kmemleak_alloc(ret, size, 1, flags);
return ret;
}
diff --git a/include/linux/spi/spi-tegra.h b/include/linux/spi/spi-tegra.h
new file mode 100644
index 00000000000..786932c62ed
--- /dev/null
+++ b/include/linux/spi/spi-tegra.h
@@ -0,0 +1,40 @@
+/*
+ * spi-tegra.h: SPI interface for Nvidia Tegra20 SLINK controller.
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_SPI_TEGRA_H
+#define _LINUX_SPI_TEGRA_H
+
+struct tegra_spi_platform_data {
+ int dma_req_sel;
+ unsigned int spi_max_frequency;
+};
+
+/*
+ * Controller data from device to pass some info like
+ * hw based chip select can be used or not and if yes
+ * then CS hold and setup time.
+ */
+struct tegra_spi_device_controller_data {
+ bool is_hw_based_cs;
+ int cs_setup_clk_count;
+ int cs_hold_clk_count;
+};
+
+#endif /* _LINUX_SPI_TEGRA_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index fa702aeb503..f62918946d8 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -90,6 +90,7 @@ struct spi_device {
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
+ int cs_gpio; /* chip select gpio */
/*
* likely need more hooks for more protocol options affecting how
@@ -362,6 +363,8 @@ struct spi_master {
int (*transfer_one_message)(struct spi_master *master,
struct spi_message *mesg);
int (*unprepare_transfer_hardware)(struct spi_master *master);
+ /* gpio chip select */
+ int *cs_gpios;
};
static inline void *spi_master_get_devdata(struct spi_master *master)
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 1f64e3f1f22..22958d68ecf 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
@@ -435,6 +436,9 @@ struct ssb_bus {
spinlock_t gpio_lock;
struct platform_device *watchdog;
#endif /* EMBEDDED */
+#ifdef CONFIG_SSB_DRIVER_GPIO
+ struct gpio_chip gpio;
+#endif /* DRIVER_GPIO */
/* Internal-only stuff follows. Do not touch. */
struct list_head list;
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 38339fd68a5..9e492be5244 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -590,6 +590,7 @@ struct ssb_chipcommon {
u32 status;
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
+ spinlock_t gpio_lock;
struct ssb_chipcommon_pmu pmu;
u32 ticks_per_ms;
u32 max_timer_ms;
@@ -645,6 +646,8 @@ u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value);
#ifdef CONFIG_SSB_SERIAL
extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h
index 99511d0e931..a410e841eb9 100644
--- a/include/linux/ssb/ssb_driver_extif.h
+++ b/include/linux/ssb/ssb_driver_extif.h
@@ -161,6 +161,7 @@
struct ssb_extif {
struct ssb_device *dev;
+ spinlock_t gpio_lock;
};
static inline bool ssb_extif_available(struct ssb_extif *extif)
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index de5b2f8176c..c1b3ed3fb78 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -61,7 +61,7 @@
#define STMMAC_CSR_I_16 0xE /* clk_csr_i/16 */
#define STMMAC_CSR_I_18 0xF /* clk_csr_i/18 */
-/* AXI DMA Burst length suported */
+/* AXI DMA Burst length supported */
#define DMA_AXI_BLEN_4 (1 << 1)
#define DMA_AXI_BLEN_8 (1 << 2)
#define DMA_AXI_BLEN_16 (1 << 3)
diff --git a/include/linux/string.h b/include/linux/string.h
index 630125818ca..ac889c5ea11 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -143,4 +143,15 @@ static inline bool strstarts(const char *str, const char *prefix)
extern size_t memweight(const void *ptr, size_t bytes);
+/**
+ * kbasename - return the last part of a pathname.
+ *
+ * @path: path to extract the filename from.
+ */
+static inline const char *kbasename(const char *path)
+{
+ const char *tail = strrchr(path, '/');
+ return tail ? tail + 1 : path;
+}
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index f792794f663..5dc9ee4d616 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -217,6 +217,8 @@ extern int qword_get(char **bpp, char *dest, int bufsize);
static inline int get_int(char **bpp, int *anint)
{
char buf[50];
+ char *ep;
+ int rv;
int len = qword_get(bpp, buf, sizeof(buf));
if (len < 0)
@@ -224,9 +226,11 @@ static inline int get_int(char **bpp, int *anint)
if (len == 0)
return -ENOENT;
- if (kstrtoint(buf, 0, anint))
+ rv = simple_strtol(buf, &ep, 0);
+ if (*ep)
return -EINVAL;
+ *anint = rv;
return 0;
}
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index dc0c3cc3ada..b64f8eb0b97 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -192,7 +192,6 @@ struct rpc_wait_queue {
pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
- unsigned char count; /* # task groups remaining serviced so far */
unsigned char nr; /* # tasks remaining for cookie */
unsigned short qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index d83db800fe0..676ddf53b3e 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -243,6 +243,7 @@ struct svc_rqst {
struct page * rq_pages[RPCSVC_MAXPAGES];
struct page * *rq_respages; /* points into rq_pages */
int rq_resused; /* number of pages used for result */
+ struct page * *rq_next_page; /* next reply page to use */
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
@@ -338,9 +339,8 @@ xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
static inline void svc_free_res_pages(struct svc_rqst *rqstp)
{
- while (rqstp->rq_resused) {
- struct page **pp = (rqstp->rq_respages +
- --rqstp->rq_resused);
+ while (rqstp->rq_next_page != rqstp->rq_respages) {
+ struct page **pp = --rqstp->rq_next_page;
if (*pp) {
put_page(*pp);
*pp = NULL;
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 92ad02f0dcc..62fd1b756e9 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -26,11 +26,28 @@ struct svc_sock {
void (*sk_owspace)(struct sock *);
/* private TCP part */
- u32 sk_reclen; /* length of record */
- u32 sk_tcplen; /* current read length */
+ /* On-the-wire fragment header: */
+ __be32 sk_reclen;
+ /* As we receive a record, this includes the length received so
+ * far (including the fragment header): */
+ u32 sk_tcplen;
+ /* Total length of the data (not including fragment headers)
+ * received so far in the fragments making up this rpc: */
+ u32 sk_datalen;
+
struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
};
+static inline u32 svc_sock_reclen(struct svc_sock *svsk)
+{
+ return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK;
+}
+
+static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
+{
+ return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT;
+}
+
/*
* Function prototypes.
*/
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 8d08b3ed406..071d62c214a 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -34,21 +34,25 @@ enum dma_sync_target {
SYNC_FOR_CPU = 0,
SYNC_FOR_DEVICE = 1,
};
-extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
- phys_addr_t phys, size_t size,
- enum dma_data_direction dir);
-extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr,
+/* define the last possible byte of physical address space as a mapping error */
+#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
+
+extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+ dma_addr_t tbl_dma_addr,
+ phys_addr_t phys, size_t size,
+ enum dma_data_direction dir);
+
+extern void swiotlb_tbl_unmap_single(struct device *hwdev,
+ phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir);
-extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr,
+extern void swiotlb_tbl_sync_single(struct device *hwdev,
+ phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
/* Accessory functions. */
-extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
- enum dma_data_direction dir);
-
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 91835e7f364..45e2db27025 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -63,6 +63,7 @@ struct getcpu_cache;
struct old_linux_dirent;
struct perf_event_attr;
struct file_handle;
+struct sigaltstack;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -299,6 +300,11 @@ asmlinkage long sys_personality(unsigned int personality);
asmlinkage long sys_sigpending(old_sigset_t __user *set);
asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set,
old_sigset_t __user *oset);
+#ifdef CONFIG_GENERIC_SIGALTSTACK
+asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss,
+ struct sigaltstack __user *uoss);
+#endif
+
asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
asmlinkage long sys_setitimer(int which,
struct itimerval __user *value,
@@ -560,10 +566,10 @@ asmlinkage long sys_utime(char __user *filename,
asmlinkage long sys_utimes(char __user *filename,
struct timeval __user *utimes);
asmlinkage long sys_lseek(unsigned int fd, off_t offset,
- unsigned int origin);
+ unsigned int whence);
asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
unsigned long offset_low, loff_t __user *result,
- unsigned int origin);
+ unsigned int whence);
asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count);
asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
asmlinkage long sys_readv(unsigned long fd,
@@ -827,15 +833,6 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
const char __user *pathname);
asmlinkage long sys_syncfs(int fd);
-#ifndef CONFIG_GENERIC_KERNEL_EXECVE
-int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]);
-#else
-#define kernel_execve(filename, argv, envp) \
- do_execve(filename, \
- (const char __user *const __user *)argv, \
- (const char __user *const __user *)envp)
-#endif
-
asmlinkage long sys_fork(void);
asmlinkage long sys_vfork(void);
#ifdef CONFIG_CLONE_BACKWARDS
@@ -880,4 +877,5 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
unsigned long idx1, unsigned long idx2);
+asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags);
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index ccc1899bd62..e7e04736802 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -61,6 +61,8 @@ extern long do_no_restart_syscall(struct restart_block *parm);
# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
#endif
+#define THREADINFO_GFP_ACCOUNTED (THREADINFO_GFP | __GFP_KMEMCG)
+
/*
* flag set/clear/test wrappers
* - pass TIF_xxxx constants to these functions
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/include/linux/usb/Kbuild
+++ /dev/null
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9bbeabf66c5..bd45eb7bedc 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -69,6 +69,7 @@ struct usbnet {
# define EVENT_DEV_ASLEEP 6
# define EVENT_DEV_OPEN 7
# define EVENT_DEVICE_REPORT_IDLE 8
+# define EVENT_NO_RUNTIME_PM 9
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -240,4 +241,6 @@ extern void usbnet_set_msglevel(struct net_device *, u32);
extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
extern int usbnet_nway_reset(struct net_device *net);
+extern int usbnet_manage_power(struct usbnet *, int);
+
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 95142cae446..b9bd2e6c73c 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -25,6 +25,7 @@ struct user_namespace {
struct user_namespace *parent;
kuid_t owner;
kgid_t group;
+ unsigned int proc_inum;
};
extern struct user_namespace init_user_ns;
@@ -39,6 +40,7 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
}
extern int create_user_ns(struct cred *new);
+extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
extern void free_user_ns(struct kref *kref);
static inline void put_user_ns(struct user_namespace *ns)
@@ -66,6 +68,14 @@ static inline int create_user_ns(struct cred *new)
return -EINVAL;
}
+static inline int unshare_userns(unsigned long unshare_flags,
+ struct cred **new_cred)
+{
+ if (unshare_flags & CLONE_NEWUSER)
+ return -EINVAL;
+ return 0;
+}
+
static inline void put_user_ns(struct user_namespace *ns)
{
}
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 2b345206722..239e27733d6 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -23,6 +23,7 @@ struct uts_namespace {
struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
+ unsigned int proc_inum;
};
extern struct uts_namespace init_uts_ns;
@@ -33,7 +34,7 @@ static inline void get_uts_ns(struct uts_namespace *ns)
}
extern struct uts_namespace *copy_utsname(unsigned long flags,
- struct task_struct *tsk);
+ struct user_namespace *user_ns, struct uts_namespace *old_ns);
extern void free_uts_ns(struct kref *kref);
static inline void put_uts_ns(struct uts_namespace *ns)
@@ -50,12 +51,12 @@ static inline void put_uts_ns(struct uts_namespace *ns)
}
static inline struct uts_namespace *copy_utsname(unsigned long flags,
- struct task_struct *tsk)
+ struct user_namespace *user_ns, struct uts_namespace *old_ns)
{
if (flags & CLONE_NEWUTS)
return ERR_PTR(-EINVAL);
- return tsk->nsproxy->uts_ns;
+ return old_ns;
}
#endif
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 0ee42d9acdc..2c02f3a8d2b 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -78,7 +78,7 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev,
* This function acquires VGA resources for the given
* card and mark those resources locked. If the resource requested
* are "normal" (and not legacy) resources, the arbiter will first check
- * wether the card is doing legacy decoding for that type of resource. If
+ * whether the card is doing legacy decoding for that type of resource. If
* yes, the lock is "converted" into a legacy resource lock.
* The arbiter will first look for all VGA cards that might conflict
* and disable their IOs and/or Memory access, including VGA forwarding
@@ -89,7 +89,7 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev,
* This function will block if some conflicting card is already locking
* one of the required resources (or any resource on a different bus
* segment, since P2P bridges don't differenciate VGA memory and IO
- * afaik). You can indicate wether this blocking should be interruptible
+ * afaik). You can indicate whether this blocking should be interruptible
* by a signal (for userland interface) or not.
* Must not be called at interrupt time or in atomic context.
* If the card already owns the resources, the function succeeds.
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 533b1157f22..cf8adb1f5b2 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -16,12 +16,20 @@
* @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
* @priv: a pointer for the virtqueue implementation to use.
+ * @index: the zero-based ordinal number for this queue.
+ * @num_free: number of elements we expect to be able to fit.
+ *
+ * A note on @num_free: with indirect buffers, each buffer needs one
+ * element in the queue, otherwise a buffer will need one element per
+ * sg element.
*/
struct virtqueue {
struct list_head list;
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
+ unsigned int index;
+ unsigned int num_free;
void *priv;
};
@@ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
-int virtqueue_get_queue_index(struct virtqueue *vq);
+/* FIXME: Obsolete accessor, but required for virtio_net merge. */
+static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq)
+{
+ return vq->index;
+}
/**
* virtio_device - representation of a device using virtio
@@ -73,7 +85,11 @@ struct virtio_device {
void *priv;
};
-#define dev_to_virtio(dev) container_of(dev, struct virtio_device, dev)
+static inline struct virtio_device *dev_to_virtio(struct device *_dev)
+{
+ return container_of(_dev, struct virtio_device, dev);
+}
+
int register_virtio_device(struct virtio_device *dev);
void unregister_virtio_device(struct virtio_device *dev);
@@ -103,6 +119,11 @@ struct virtio_driver {
#endif
};
+static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
+{
+ return container_of(drv, struct virtio_driver, driver);
+}
+
int register_virtio_driver(struct virtio_driver *drv);
void unregister_virtio_driver(struct virtio_driver *drv);
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
index d6b4440387b..4195b97a3de 100644
--- a/include/linux/virtio_scsi.h
+++ b/include/linux/virtio_scsi.h
@@ -1,7 +1,31 @@
+/*
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
#ifndef _LINUX_VIRTIO_SCSI_H
#define _LINUX_VIRTIO_SCSI_H
-/* This header is BSD licensed so anyone can use the definitions to implement
- * compatible drivers/servers. */
#define VIRTIO_SCSI_CDB_SIZE 32
#define VIRTIO_SCSI_SENSE_SIZE 96
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 3d311459437..fce0a2799d4 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -38,8 +38,18 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_NUMA_BALANCING
+ NUMA_PTE_UPDATES,
+ NUMA_HINT_FAULTS,
+ NUMA_HINT_FAULTS_LOCAL,
+ NUMA_PAGE_MIGRATE,
+#endif
+#ifdef CONFIG_MIGRATION
+ PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+#endif
#ifdef CONFIG_COMPACTION
- COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
+ COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
+ COMPACTISOLATED,
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
#endif
#ifdef CONFIG_HUGETLB_PAGE
@@ -58,6 +68,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_COLLAPSE_ALLOC,
THP_COLLAPSE_ALLOC_FAILED,
THP_SPLIT,
+ THP_ZERO_PAGE_ALLOC,
+ THP_ZERO_PAGE_ALLOC_FAILED,
#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 92a86b2cce3..a13291f7da8 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -80,6 +80,14 @@ static inline void vm_events_fold_cpu(int cpu)
#endif /* CONFIG_VM_EVENT_COUNTERS */
+#ifdef CONFIG_NUMA_BALANCING
+#define count_vm_numa_event(x) count_vm_event(x)
+#define count_vm_numa_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_numa_event(x) do {} while (0)
+#define count_vm_numa_events(x, y) do {} while (0)
+#endif /* CONFIG_NUMA_BALANCING */
+
#define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta)
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 168dfe122dd..7cb64d4b499 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -550,6 +550,170 @@ do { \
__ret; \
})
+
+#define __wait_event_lock_irq(wq, condition, lock, cmd) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock); \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_lock_irq_cmd - sleep until a condition gets true. The
+ * condition is checked under the lock. This
+ * is expected to be called with the lock
+ * taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before cmd
+ * and schedule() and reacquired afterwards.
+ * @cmd: a command which is invoked outside the critical section before
+ * sleep
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before invoking the cmd and going to sleep and is reacquired
+ * afterwards.
+ */
+#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_lock_irq(wq, condition, lock, cmd); \
+} while (0)
+
+/**
+ * wait_event_lock_irq - sleep until a condition gets true. The
+ * condition is checked under the lock. This
+ * is expected to be called with the lock
+ * taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ */
+#define wait_event_lock_irq(wq, condition, lock) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_lock_irq(wq, condition, lock, ); \
+} while (0)
+
+
+#define __wait_event_interruptible_lock_irq(wq, condition, \
+ lock, ret, cmd) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (signal_pending(current)) { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock); \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
+ * The condition is checked under the lock. This is expected to
+ * be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before cmd and
+ * schedule() and reacquired afterwards.
+ * @cmd: a command which is invoked outside the critical section before
+ * sleep
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before invoking the cmd and going to sleep and is reacquired
+ * afterwards.
+ *
+ * The macro will return -ERESTARTSYS if it was interrupted by a signal
+ * and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
+({ \
+ int __ret = 0; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq(wq, condition, \
+ lock, __ret, cmd); \
+ __ret; \
+})
+
+/**
+ * wait_event_interruptible_lock_irq - sleep until a condition gets true.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The macro will return -ERESTARTSYS if it was interrupted by a signal
+ * and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_lock_irq(wq, condition, lock) \
+({ \
+ int __ret = 0; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq(wq, condition, \
+ lock, __ret, ); \
+ __ret; \
+})
+
+
/*
* These are the old interfaces to sleep waiting for an event.
* They are racy. DO NOT use them, use the wait_event* interfaces above.
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index b7f45d48b2d..3a9df2f43be 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -105,7 +105,7 @@ struct watchdog_device {
#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
#endif
-/* Use the following function to check wether or not the watchdog is active */
+/* Use the following function to check whether or not the watchdog is active */
static inline bool watchdog_active(struct watchdog_device *wdd)
{
return test_bit(WDOG_ACTIVE, &wdd->status);
@@ -129,7 +129,7 @@ static inline void *watchdog_get_drvdata(struct watchdog_device *wdd)
return wdd->driver_data;
}
-/* drivers/watchdog/core/watchdog_core.c */
+/* drivers/watchdog/watchdog_core.c */
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
diff --git a/include/media/adp1653.h b/include/media/adp1653.h
index 50a1af88aed..1d9b48a3bd8 100644
--- a/include/media/adp1653.h
+++ b/include/media/adp1653.h
@@ -3,10 +3,10 @@
*
* Copyright (C) 2008--2011 Nokia Corporation
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* Contributors:
- * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
* Tuukka Toivonen <tuukkat76@gmail.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index dbf6b37682c..8dffffedbb5 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -16,7 +16,7 @@
/* Header files */
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-dma-contig.h>
#include <media/davinci/vpbe_types.h>
#include <media/davinci/vpbe_osd.h>
#include <media/davinci/vpbe.h>
@@ -62,6 +62,11 @@ struct display_layer_info {
enum osd_v_exp_ratio v_exp;
};
+struct vpbe_disp_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
/* vpbe display object structure */
struct vpbe_layer {
/* number of buffers in fbuffers */
@@ -69,13 +74,15 @@ struct vpbe_layer {
/* Pointer to the vpbe_display */
struct vpbe_display *disp_dev;
/* Pointer pointing to current v4l2_buffer */
- struct videobuf_buffer *cur_frm;
+ struct vpbe_disp_buffer *cur_frm;
/* Pointer pointing to next v4l2_buffer */
- struct videobuf_buffer *next_frm;
+ struct vpbe_disp_buffer *next_frm;
/* videobuf specific parameters
* Buffer queue used in video-buf
*/
- struct videobuf_queue buffer_queue;
+ struct vb2_queue buffer_queue;
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
/* Queue of filled frames */
struct list_head dma_queue;
/* Used in video-buf */
diff --git a/include/media/davinci/vpbe_osd.h b/include/media/davinci/vpbe_osd.h
index d7e397a444e..5ab0d8d41f6 100644
--- a/include/media/davinci/vpbe_osd.h
+++ b/include/media/davinci/vpbe_osd.h
@@ -357,7 +357,7 @@ struct osd_state {
spinlock_t lock;
struct device *dev;
dma_addr_t osd_base_phys;
- unsigned long osd_base;
+ void __iomem *osd_base;
unsigned long osd_size;
/* 1-->the isr will toggle the VID0 ping-pong buffer */
int pingpong;
diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h
index 768aa77925c..e221bc74020 100644
--- a/include/media/ir-kbd-i2c.h
+++ b/include/media/ir-kbd-i2c.h
@@ -37,7 +37,7 @@ enum ir_kbd_get_key_fn {
struct IR_i2c_init_data {
char *ir_codes;
const char *name;
- u64 type; /* RC_TYPE_RC5, etc */
+ u64 type; /* RC_BIT_RC5, etc */
u32 polling_interval; /* 0 means DEFAULT_POLLING_INTERVAL */
/*
diff --git a/include/media/mt9v022.h b/include/media/mt9v022.h
new file mode 100644
index 00000000000..40561801321
--- /dev/null
+++ b/include/media/mt9v022.h
@@ -0,0 +1,16 @@
+/*
+ * mt9v022 sensor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MT9V022_H__
+#define __MT9V022_H__
+
+struct mt9v022_platform_data {
+ unsigned short y_skip_top; /* Lines to skip at the top */
+};
+
+#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index b0c494a6907..f03445f3c76 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -50,7 +50,7 @@ enum rc_driver_type {
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
* @idle: used to keep track of RX state
- * @allowed_protos: bitmask with the supported RC_TYPE_* protocols
+ * @allowed_protos: bitmask with the supported RC_BIT_* protocols
* @scanmask: some hardware decoders are not capable of providing the full
* scancode to the application. As this is a hardware limit, we can't do
* anything with it. Yet, as the same keycode table can be used with other
@@ -113,7 +113,7 @@ struct rc_dev {
u32 max_timeout;
u32 rx_resolution;
u32 tx_resolution;
- int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
int (*open)(struct rc_dev *dev);
void (*close)(struct rc_dev *dev);
int (*s_tx_mask)(struct rc_dev *dev, u32 mask);
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index cfd5163ff7f..74f55a3f14e 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -11,22 +11,54 @@
#include <linux/input.h>
-#define RC_TYPE_UNKNOWN 0
-#define RC_TYPE_RC5 (1 << 0) /* Philips RC5 protocol */
-#define RC_TYPE_NEC (1 << 1)
-#define RC_TYPE_RC6 (1 << 2) /* Philips RC6 protocol */
-#define RC_TYPE_JVC (1 << 3) /* JVC protocol */
-#define RC_TYPE_SONY (1 << 4) /* Sony12/15/20 protocol */
-#define RC_TYPE_RC5_SZ (1 << 5) /* RC5 variant used by Streamzap */
-#define RC_TYPE_SANYO (1 << 6) /* Sanyo protocol */
-#define RC_TYPE_MCE_KBD (1 << 29) /* RC6-ish MCE keyboard/mouse */
-#define RC_TYPE_LIRC (1 << 30) /* Pass raw IR to lirc userspace */
-#define RC_TYPE_OTHER (1u << 31)
+enum rc_type {
+ RC_TYPE_UNKNOWN = 0, /* Protocol not known */
+ RC_TYPE_OTHER = 1, /* Protocol known but proprietary */
+ RC_TYPE_LIRC = 2, /* Pass raw IR to lirc userspace */
+ RC_TYPE_RC5 = 3, /* Philips RC5 protocol */
+ RC_TYPE_RC5X = 4, /* Philips RC5x protocol */
+ RC_TYPE_RC5_SZ = 5, /* StreamZap variant of RC5 */
+ RC_TYPE_JVC = 6, /* JVC protocol */
+ RC_TYPE_SONY12 = 7, /* Sony 12 bit protocol */
+ RC_TYPE_SONY15 = 8, /* Sony 15 bit protocol */
+ RC_TYPE_SONY20 = 9, /* Sony 20 bit protocol */
+ RC_TYPE_NEC = 10, /* NEC protocol */
+ RC_TYPE_SANYO = 11, /* Sanyo protocol */
+ RC_TYPE_MCE_KBD = 12, /* RC6-ish MCE keyboard/mouse */
+ RC_TYPE_RC6_0 = 13, /* Philips RC6-0-16 protocol */
+ RC_TYPE_RC6_6A_20 = 14, /* Philips RC6-6A-20 protocol */
+ RC_TYPE_RC6_6A_24 = 15, /* Philips RC6-6A-24 protocol */
+ RC_TYPE_RC6_6A_32 = 16, /* Philips RC6-6A-32 protocol */
+ RC_TYPE_RC6_MCE = 17, /* MCE (Philips RC6-6A-32 subtype) protocol */
+};
+
+#define RC_BIT_NONE 0
+#define RC_BIT_UNKNOWN (1 << RC_TYPE_UNKNOWN)
+#define RC_BIT_OTHER (1 << RC_TYPE_OTHER)
+#define RC_BIT_LIRC (1 << RC_TYPE_LIRC)
+#define RC_BIT_RC5 (1 << RC_TYPE_RC5)
+#define RC_BIT_RC5X (1 << RC_TYPE_RC5X)
+#define RC_BIT_RC5_SZ (1 << RC_TYPE_RC5_SZ)
+#define RC_BIT_JVC (1 << RC_TYPE_JVC)
+#define RC_BIT_SONY12 (1 << RC_TYPE_SONY12)
+#define RC_BIT_SONY15 (1 << RC_TYPE_SONY15)
+#define RC_BIT_SONY20 (1 << RC_TYPE_SONY20)
+#define RC_BIT_NEC (1 << RC_TYPE_NEC)
+#define RC_BIT_SANYO (1 << RC_TYPE_SANYO)
+#define RC_BIT_MCE_KBD (1 << RC_TYPE_MCE_KBD)
+#define RC_BIT_RC6_0 (1 << RC_TYPE_RC6_0)
+#define RC_BIT_RC6_6A_20 (1 << RC_TYPE_RC6_6A_20)
+#define RC_BIT_RC6_6A_24 (1 << RC_TYPE_RC6_6A_24)
+#define RC_BIT_RC6_6A_32 (1 << RC_TYPE_RC6_6A_32)
+#define RC_BIT_RC6_MCE (1 << RC_TYPE_RC6_MCE)
-#define RC_TYPE_ALL (RC_TYPE_RC5 | RC_TYPE_NEC | RC_TYPE_RC6 | \
- RC_TYPE_JVC | RC_TYPE_SONY | RC_TYPE_LIRC | \
- RC_TYPE_RC5_SZ | RC_TYPE_SANYO | RC_TYPE_MCE_KBD | \
- RC_TYPE_OTHER)
+#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | RC_BIT_LIRC | \
+ RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
+ RC_BIT_JVC | \
+ RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
+ RC_BIT_NEC | RC_BIT_SANYO | RC_BIT_MCE_KBD | \
+ RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
+ RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)
struct rc_map_table {
u32 scancode;
@@ -38,7 +70,7 @@ struct rc_map {
unsigned int size; /* Max number of entries */
unsigned int len; /* Used number of entries */
unsigned int alloc; /* Size of *scan in bytes */
- u64 rc_type;
+ enum rc_type rc_type;
const char *name;
spinlock_t lock;
};
diff --git a/include/media/s3c_camif.h b/include/media/s3c_camif.h
new file mode 100644
index 00000000000..df96c2c789b
--- /dev/null
+++ b/include/media/s3c_camif.h
@@ -0,0 +1,45 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef MEDIA_S3C_CAMIF_
+#define MEDIA_S3C_CAMIF_
+
+#include <linux/i2c.h>
+#include <media/v4l2-mediabus.h>
+
+/**
+ * struct s3c_camif_sensor_info - an image sensor description
+ * @i2c_board_info: pointer to an I2C sensor subdevice board info
+ * @clock_frequency: frequency of the clock the host provides to a sensor
+ * @mbus_type: media bus type
+ * @i2c_bus_num: i2c control bus id the sensor is attached to
+ * @flags: the parallel bus flags defining signals polarity (V4L2_MBUS_*)
+ * @use_field: 1 if parallel bus FIELD signal is used (only s3c64xx)
+ */
+struct s3c_camif_sensor_info {
+ struct i2c_board_info i2c_board_info;
+ unsigned long clock_frequency;
+ enum v4l2_mbus_type mbus_type;
+ u16 i2c_bus_num;
+ u16 flags;
+ u8 use_field;
+};
+
+struct s3c_camif_plat_data {
+ struct s3c_camif_sensor_info sensor;
+ int (*gpio_get)(void);
+ int (*gpio_put)(void);
+};
+
+/* Platform default helper functions */
+int s3c_camif_gpio_get(void);
+int s3c_camif_gpio_put(void);
+
+#endif /* MEDIA_S3C_CAMIF_ */
diff --git a/include/media/smiapp.h b/include/media/smiapp.h
index 9ab07fd45d5..07f96a89e18 100644
--- a/include/media/smiapp.h
+++ b/include/media/smiapp.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index e7c5d170a9c..eff85f934b2 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index 52513c225c1..a62ee18cb7b 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -6,7 +6,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index e48b571ca37..4118ad1324c 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -111,6 +111,8 @@ struct v4l2_ioctl_ops {
int (*vidioc_reqbufs) (struct file *file, void *fh, struct v4l2_requestbuffers *b);
int (*vidioc_querybuf)(struct file *file, void *fh, struct v4l2_buffer *b);
int (*vidioc_qbuf) (struct file *file, void *fh, struct v4l2_buffer *b);
+ int (*vidioc_expbuf) (struct file *file, void *fh,
+ struct v4l2_exportbuffer *e);
int (*vidioc_dqbuf) (struct file *file, void *fh, struct v4l2_buffer *b);
int (*vidioc_create_bufs)(struct file *file, void *fh, struct v4l2_create_buffers *b);
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 131cc4a5367..7e82d2b193d 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -111,6 +111,9 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
+int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_exportbuffer *eb);
+
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index e04252a9fea..9cfd4ee9e56 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/videodev2.h>
+#include <linux/dma-buf.h>
struct vb2_alloc_ctx;
struct vb2_fileio_data;
@@ -41,6 +42,24 @@ struct vb2_fileio_data;
* argument to other ops in this structure
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
* be used
+ * @attach_dmabuf: attach a shared struct dma_buf for a hardware operation;
+ * used for DMABUF memory types; alloc_ctx is the alloc context
+ * dbuf is the shared dma_buf; returns NULL on failure;
+ * allocator private per-buffer structure on success;
+ * this needs to be used for further accesses to the buffer
+ * @detach_dmabuf: inform the exporter of the buffer that the current DMABUF
+ * buffer is no longer used; the buf_priv argument is the
+ * allocator private per-buffer structure previously returned
+ * from the attach_dmabuf callback
+ * @map_dmabuf: request for access to the dmabuf from allocator; the allocator
+ * of dmabuf is informed that this driver is going to use the
+ * dmabuf
+ * @unmap_dmabuf: releases access control to the dmabuf - allocator is notified
+ * that this driver is done using the dmabuf for now
+ * @prepare: called every time the buffer is passed from userspace to the
+ * driver, useful for cache synchronisation, optional
+ * @finish: called every time the buffer is passed back from the driver
+ * to the userspace, also optional
* @vaddr: return a kernel virtual address to a given memory buffer
* associated with the passed private structure or NULL if no
* such mapping exists
@@ -56,15 +75,27 @@ struct vb2_fileio_data;
* Required ops for USERPTR types: get_userptr, put_userptr.
* Required ops for MMAP types: alloc, put, num_users, mmap.
* Required ops for read/write access types: alloc, put, num_users, vaddr
+ * Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf,
+ * unmap_dmabuf.
*/
struct vb2_mem_ops {
void *(*alloc)(void *alloc_ctx, unsigned long size);
void (*put)(void *buf_priv);
+ struct dma_buf *(*get_dmabuf)(void *buf_priv);
void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write);
void (*put_userptr)(void *buf_priv);
+ void (*prepare)(void *buf_priv);
+ void (*finish)(void *buf_priv);
+
+ void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write);
+ void (*detach_dmabuf)(void *buf_priv);
+ int (*map_dmabuf)(void *buf_priv);
+ void (*unmap_dmabuf)(void *buf_priv);
+
void *(*vaddr)(void *buf_priv);
void *(*cookie)(void *buf_priv);
@@ -75,6 +106,8 @@ struct vb2_mem_ops {
struct vb2_plane {
void *mem_priv;
+ struct dma_buf *dbuf;
+ unsigned int dbuf_mapped;
};
/**
@@ -83,12 +116,14 @@ struct vb2_plane {
* @VB2_USERPTR: driver supports USERPTR with streaming API
* @VB2_READ: driver supports read() style access
* @VB2_WRITE: driver supports write() style access
+ * @VB2_DMABUF: driver supports DMABUF with streaming API
*/
enum vb2_io_modes {
VB2_MMAP = (1 << 0),
VB2_USERPTR = (1 << 1),
VB2_READ = (1 << 2),
VB2_WRITE = (1 << 3),
+ VB2_DMABUF = (1 << 4),
};
/**
@@ -329,6 +364,7 @@ int __must_check vb2_queue_init(struct vb2_queue *q);
void vb2_queue_release(struct vb2_queue *q);
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
@@ -438,6 +474,8 @@ int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p);
int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i);
int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i);
+int vb2_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *p);
/* struct v4l2_file_operations helpers */
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index 84e1f6c031c..f05444ca8c0 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -33,11 +33,6 @@ extern const struct vm_operations_struct vb2_common_vm_ops;
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
struct vm_area_struct **res_vma, dma_addr_t *res_pa);
-int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
- unsigned long size,
- const struct vm_operations_struct *vm_ops,
- void *priv);
-
struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma);
void vb2_put_vma(struct vm_area_struct *vma);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index ba1d3615acb..183292722f6 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -318,6 +318,7 @@ extern void inet_csk_reqsk_queue_prune(struct sock *parent,
const unsigned long max_rto);
extern void inet_csk_destroy_sock(struct sock *sk);
+extern void inet_csk_prepare_forced_close(struct sock *sk);
/*
* LISTEN is a special case for poll..
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
index fff11b7fe8a..591f78631f1 100644
--- a/include/net/irda/irlmp.h
+++ b/include/net/irda/irlmp.h
@@ -134,7 +134,7 @@ typedef struct {
} CACHE_ENTRY;
/*
- * Information about each registred IrLAP layer
+ * Information about each registered IrLAP layer
*/
struct lap_cb {
irda_queue_t queue; /* Must be first */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 7af1ea89303..23b3a7c5878 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -78,6 +78,13 @@ struct ra_msg {
__be32 retrans_timer;
};
+struct rd_msg {
+ struct icmp6hdr icmph;
+ struct in6_addr target;
+ struct in6_addr dest;
+ __u8 opt[0];
+};
+
struct nd_opt_hdr {
__u8 nd_opt_type;
__u8 nd_opt_len;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index c5a43f56b79..de644bcd861 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -56,6 +56,8 @@ struct net {
struct user_namespace *user_ns; /* Owning user namespace */
+ unsigned int proc_inum;
+
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
diff --git a/include/net/sock.h b/include/net/sock.h
index 0a9a01a5b0d..182ca99405a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -231,7 +231,7 @@ struct cg_proto;
* @sk_sndbuf: size of send buffer in bytes
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
- * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
+ * @sk_no_check: %SO_NO_CHECK setting, whether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
@@ -367,7 +367,7 @@ struct sock {
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
-#ifdef CONFIG_CGROUPS
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
__u32 sk_cgrp_prioidx;
#endif
struct pid *sk_peer_pid;
diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild
deleted file mode 100644
index ea56f76c0c2..00000000000
--- a/include/rdma/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-header-y += ib_user_cm.h
-header-y += ib_user_mad.h
-header-y += ib_user_sa.h
-header-y += ib_user_verbs.h
-header-y += rdma_netlink.h
-header-y += rdma_user_cm.h
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index bd3d8b24b42..e38de79eeb4 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -1,41 +1,9 @@
#ifndef _RDMA_NETLINK_H
#define _RDMA_NETLINK_H
-#include <linux/types.h>
-
-enum {
- RDMA_NL_RDMA_CM = 1
-};
-
-#define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10)
-#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1))
-#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op)
-
-enum {
- RDMA_NL_RDMA_CM_ID_STATS = 0,
- RDMA_NL_RDMA_CM_NUM_OPS
-};
-
-enum {
- RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1,
- RDMA_NL_RDMA_CM_ATTR_DST_ADDR,
- RDMA_NL_RDMA_CM_NUM_ATTR,
-};
-
-struct rdma_cm_id_stats {
- __u32 qp_num;
- __u32 bound_dev_if;
- __u32 port_space;
- __s32 pid;
- __u8 cm_state;
- __u8 node_type;
- __u8 port_num;
- __u8 qp_type;
-};
-
-#ifdef __KERNEL__
#include <linux/netlink.h>
+#include <uapi/rdma/rdma_netlink.h>
struct ibnl_client_cbs {
int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
@@ -88,6 +56,4 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type);
-#endif /* __KERNEL__ */
-
#endif /* _RDMA_NETLINK_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 55367b04dc9..e65c62e82c5 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -137,6 +137,7 @@ struct scsi_device {
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
unsigned no_write_same:1; /* no WRITE SAME command */
+ unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
unsigned skip_vpd_pages:1; /* do not read VPD pages */
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 98b3a20a010..9b8e08879cf 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -36,6 +36,7 @@ enum sas_linkrate {
SAS_LINK_RATE_3_0_GBPS = 9,
SAS_LINK_RATE_G2 = SAS_LINK_RATE_3_0_GBPS,
SAS_LINK_RATE_6_0_GBPS = 10,
+ SAS_LINK_RATE_12_0_GBPS = 11,
/* These are virtual to the transport class and may never
* be signalled normally since the standard defined field
* is only 4 bits */
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index 9c60ca1c08c..ff0f04ac91a 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -14,13 +14,21 @@ struct srp_rport_identifiers {
};
struct srp_rport {
+ /* for initiator and target drivers */
+
struct device dev;
u8 port_id[16];
u8 roles;
+
+ /* for initiator drivers */
+
+ void *lld_data; /* LLD private data */
};
struct srp_function_template {
+ /* for initiator drivers */
+ void (*rport_delete)(struct srp_rport *rport);
/* for target drivers */
int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
int (* it_nexus_response)(struct Scsi_Host *, u64, int);
diff --git a/include/sound/Kbuild b/include/sound/Kbuild
deleted file mode 100644
index 6df30ed1581..00000000000
--- a/include/sound/Kbuild
+++ /dev/null
@@ -1,10 +0,0 @@
-header-y += asequencer.h
-header-y += asound.h
-header-y += asound_fm.h
-header-y += emu10k1.h
-header-y += hdsp.h
-header-y += hdspm.h
-header-y += sb16_csp.h
-header-y += sfnt_info.h
-header-y += compress_params.h
-header-y += compress_offload.h
diff --git a/include/sound/asequencer.h b/include/sound/asequencer.h
index 1505e6d5ef8..75935ce739c 100644
--- a/include/sound/asequencer.h
+++ b/include/sound/asequencer.h
@@ -22,294 +22,9 @@
#ifndef __SOUND_ASEQUENCER_H
#define __SOUND_ASEQUENCER_H
-#ifdef __KERNEL__
#include <linux/ioctl.h>
#include <sound/asound.h>
-#endif
-
-/** version of the sequencer */
-#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION (1, 0, 1)
-
-/**
- * definition of sequencer event types
- */
-
-/** system messages
- * event data type = #snd_seq_result
- */
-#define SNDRV_SEQ_EVENT_SYSTEM 0
-#define SNDRV_SEQ_EVENT_RESULT 1
-
-/** note messages (channel specific)
- * event data type = #snd_seq_ev_note
- */
-#define SNDRV_SEQ_EVENT_NOTE 5
-#define SNDRV_SEQ_EVENT_NOTEON 6
-#define SNDRV_SEQ_EVENT_NOTEOFF 7
-#define SNDRV_SEQ_EVENT_KEYPRESS 8
-
-/** control messages (channel specific)
- * event data type = #snd_seq_ev_ctrl
- */
-#define SNDRV_SEQ_EVENT_CONTROLLER 10
-#define SNDRV_SEQ_EVENT_PGMCHANGE 11
-#define SNDRV_SEQ_EVENT_CHANPRESS 12
-#define SNDRV_SEQ_EVENT_PITCHBEND 13 /**< from -8192 to 8191 */
-#define SNDRV_SEQ_EVENT_CONTROL14 14 /**< 14 bit controller value */
-#define SNDRV_SEQ_EVENT_NONREGPARAM 15 /**< 14 bit NRPN address + 14 bit unsigned value */
-#define SNDRV_SEQ_EVENT_REGPARAM 16 /**< 14 bit RPN address + 14 bit unsigned value */
-
-/** synchronisation messages
- * event data type = #snd_seq_ev_ctrl
- */
-#define SNDRV_SEQ_EVENT_SONGPOS 20 /* Song Position Pointer with LSB and MSB values */
-#define SNDRV_SEQ_EVENT_SONGSEL 21 /* Song Select with song ID number */
-#define SNDRV_SEQ_EVENT_QFRAME 22 /* midi time code quarter frame */
-#define SNDRV_SEQ_EVENT_TIMESIGN 23 /* SMF Time Signature event */
-#define SNDRV_SEQ_EVENT_KEYSIGN 24 /* SMF Key Signature event */
-
-/** timer messages
- * event data type = snd_seq_ev_queue_control
- */
-#define SNDRV_SEQ_EVENT_START 30 /* midi Real Time Start message */
-#define SNDRV_SEQ_EVENT_CONTINUE 31 /* midi Real Time Continue message */
-#define SNDRV_SEQ_EVENT_STOP 32 /* midi Real Time Stop message */
-#define SNDRV_SEQ_EVENT_SETPOS_TICK 33 /* set tick queue position */
-#define SNDRV_SEQ_EVENT_SETPOS_TIME 34 /* set realtime queue position */
-#define SNDRV_SEQ_EVENT_TEMPO 35 /* (SMF) Tempo event */
-#define SNDRV_SEQ_EVENT_CLOCK 36 /* midi Real Time Clock message */
-#define SNDRV_SEQ_EVENT_TICK 37 /* midi Real Time Tick message */
-#define SNDRV_SEQ_EVENT_QUEUE_SKEW 38 /* skew queue tempo */
-
-/** others
- * event data type = none
- */
-#define SNDRV_SEQ_EVENT_TUNE_REQUEST 40 /* tune request */
-#define SNDRV_SEQ_EVENT_RESET 41 /* reset to power-on state */
-#define SNDRV_SEQ_EVENT_SENSING 42 /* "active sensing" event */
-
-/** echo back, kernel private messages
- * event data type = any type
- */
-#define SNDRV_SEQ_EVENT_ECHO 50 /* echo event */
-#define SNDRV_SEQ_EVENT_OSS 51 /* OSS raw event */
-
-/** system status messages (broadcast for subscribers)
- * event data type = snd_seq_addr
- */
-#define SNDRV_SEQ_EVENT_CLIENT_START 60 /* new client has connected */
-#define SNDRV_SEQ_EVENT_CLIENT_EXIT 61 /* client has left the system */
-#define SNDRV_SEQ_EVENT_CLIENT_CHANGE 62 /* client status/info has changed */
-#define SNDRV_SEQ_EVENT_PORT_START 63 /* new port was created */
-#define SNDRV_SEQ_EVENT_PORT_EXIT 64 /* port was deleted from system */
-#define SNDRV_SEQ_EVENT_PORT_CHANGE 65 /* port status/info has changed */
-
-/** port connection changes
- * event data type = snd_seq_connect
- */
-#define SNDRV_SEQ_EVENT_PORT_SUBSCRIBED 66 /* ports connected */
-#define SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED 67 /* ports disconnected */
-
-/* 70-89: synthesizer events - obsoleted */
-
-/** user-defined events with fixed length
- * event data type = any
- */
-#define SNDRV_SEQ_EVENT_USR0 90
-#define SNDRV_SEQ_EVENT_USR1 91
-#define SNDRV_SEQ_EVENT_USR2 92
-#define SNDRV_SEQ_EVENT_USR3 93
-#define SNDRV_SEQ_EVENT_USR4 94
-#define SNDRV_SEQ_EVENT_USR5 95
-#define SNDRV_SEQ_EVENT_USR6 96
-#define SNDRV_SEQ_EVENT_USR7 97
-#define SNDRV_SEQ_EVENT_USR8 98
-#define SNDRV_SEQ_EVENT_USR9 99
-
-/* 100-118: instrument layer - obsoleted */
-/* 119-129: reserved */
-
-/* 130-139: variable length events
- * event data type = snd_seq_ev_ext
- * (SNDRV_SEQ_EVENT_LENGTH_VARIABLE must be set)
- */
-#define SNDRV_SEQ_EVENT_SYSEX 130 /* system exclusive data (variable length) */
-#define SNDRV_SEQ_EVENT_BOUNCE 131 /* error event */
-/* 132-134: reserved */
-#define SNDRV_SEQ_EVENT_USR_VAR0 135
-#define SNDRV_SEQ_EVENT_USR_VAR1 136
-#define SNDRV_SEQ_EVENT_USR_VAR2 137
-#define SNDRV_SEQ_EVENT_USR_VAR3 138
-#define SNDRV_SEQ_EVENT_USR_VAR4 139
-
-/* 150-151: kernel events with quote - DO NOT use in user clients */
-#define SNDRV_SEQ_EVENT_KERNEL_ERROR 150
-#define SNDRV_SEQ_EVENT_KERNEL_QUOTE 151 /* obsolete */
-
-/* 152-191: reserved */
-
-/* 192-254: hardware specific events */
-
-/* 255: special event */
-#define SNDRV_SEQ_EVENT_NONE 255
-
-
-typedef unsigned char snd_seq_event_type_t;
-
-/** event address */
-struct snd_seq_addr {
- unsigned char client; /**< Client number: 0..255, 255 = broadcast to all clients */
- unsigned char port; /**< Port within client: 0..255, 255 = broadcast to all ports */
-};
-
-/** port connection */
-struct snd_seq_connect {
- struct snd_seq_addr sender;
- struct snd_seq_addr dest;
-};
-
-
-#define SNDRV_SEQ_ADDRESS_UNKNOWN 253 /* unknown source */
-#define SNDRV_SEQ_ADDRESS_SUBSCRIBERS 254 /* send event to all subscribed ports */
-#define SNDRV_SEQ_ADDRESS_BROADCAST 255 /* send event to all queues/clients/ports/channels */
-#define SNDRV_SEQ_QUEUE_DIRECT 253 /* direct dispatch */
-
- /* event mode flag - NOTE: only 8 bits available! */
-#define SNDRV_SEQ_TIME_STAMP_TICK (0<<0) /* timestamp in clock ticks */
-#define SNDRV_SEQ_TIME_STAMP_REAL (1<<0) /* timestamp in real time */
-#define SNDRV_SEQ_TIME_STAMP_MASK (1<<0)
-
-#define SNDRV_SEQ_TIME_MODE_ABS (0<<1) /* absolute timestamp */
-#define SNDRV_SEQ_TIME_MODE_REL (1<<1) /* relative to current time */
-#define SNDRV_SEQ_TIME_MODE_MASK (1<<1)
-
-#define SNDRV_SEQ_EVENT_LENGTH_FIXED (0<<2) /* fixed event size */
-#define SNDRV_SEQ_EVENT_LENGTH_VARIABLE (1<<2) /* variable event size */
-#define SNDRV_SEQ_EVENT_LENGTH_VARUSR (2<<2) /* variable event size - user memory space */
-#define SNDRV_SEQ_EVENT_LENGTH_MASK (3<<2)
-
-#define SNDRV_SEQ_PRIORITY_NORMAL (0<<4) /* normal priority */
-#define SNDRV_SEQ_PRIORITY_HIGH (1<<4) /* event should be processed before others */
-#define SNDRV_SEQ_PRIORITY_MASK (1<<4)
-
-
- /* note event */
-struct snd_seq_ev_note {
- unsigned char channel;
- unsigned char note;
- unsigned char velocity;
- unsigned char off_velocity; /* only for SNDRV_SEQ_EVENT_NOTE */
- unsigned int duration; /* only for SNDRV_SEQ_EVENT_NOTE */
-};
-
- /* controller event */
-struct snd_seq_ev_ctrl {
- unsigned char channel;
- unsigned char unused1, unused2, unused3; /* pad */
- unsigned int param;
- signed int value;
-};
-
- /* generic set of bytes (12x8 bit) */
-struct snd_seq_ev_raw8 {
- unsigned char d[12]; /* 8 bit value */
-};
-
- /* generic set of integers (3x32 bit) */
-struct snd_seq_ev_raw32 {
- unsigned int d[3]; /* 32 bit value */
-};
-
- /* external stored data */
-struct snd_seq_ev_ext {
- unsigned int len; /* length of data */
- void *ptr; /* pointer to data (note: maybe 64-bit) */
-} __attribute__((packed));
-
-struct snd_seq_result {
- int event; /* processed event type */
- int result;
-};
-
-
-struct snd_seq_real_time {
- unsigned int tv_sec; /* seconds */
- unsigned int tv_nsec; /* nanoseconds */
-};
-
-typedef unsigned int snd_seq_tick_time_t; /* midi ticks */
-
-union snd_seq_timestamp {
- snd_seq_tick_time_t tick;
- struct snd_seq_real_time time;
-};
-
-struct snd_seq_queue_skew {
- unsigned int value;
- unsigned int base;
-};
-
- /* queue timer control */
-struct snd_seq_ev_queue_control {
- unsigned char queue; /* affected queue */
- unsigned char pad[3]; /* reserved */
- union {
- signed int value; /* affected value (e.g. tempo) */
- union snd_seq_timestamp time; /* time */
- unsigned int position; /* sync position */
- struct snd_seq_queue_skew skew;
- unsigned int d32[2];
- unsigned char d8[8];
- } param;
-};
-
- /* quoted event - inside the kernel only */
-struct snd_seq_ev_quote {
- struct snd_seq_addr origin; /* original sender */
- unsigned short value; /* optional data */
- struct snd_seq_event *event; /* quoted event */
-} __attribute__((packed));
-
-
- /* sequencer event */
-struct snd_seq_event {
- snd_seq_event_type_t type; /* event type */
- unsigned char flags; /* event flags */
- char tag;
-
- unsigned char queue; /* schedule queue */
- union snd_seq_timestamp time; /* schedule time */
-
-
- struct snd_seq_addr source; /* source address */
- struct snd_seq_addr dest; /* destination address */
-
- union { /* event data... */
- struct snd_seq_ev_note note;
- struct snd_seq_ev_ctrl control;
- struct snd_seq_ev_raw8 raw8;
- struct snd_seq_ev_raw32 raw32;
- struct snd_seq_ev_ext ext;
- struct snd_seq_ev_queue_control queue;
- union snd_seq_timestamp time;
- struct snd_seq_addr addr;
- struct snd_seq_connect connect;
- struct snd_seq_result result;
- struct snd_seq_ev_quote quote;
- } data;
-};
-
-
-/*
- * bounce event - stored as variable size data
- */
-struct snd_seq_event_bounce {
- int err;
- struct snd_seq_event event;
- /* external data follows here. */
-};
-
-#ifdef __KERNEL__
+#include <uapi/sound/asequencer.h>
/* helper macro */
#define snd_seq_event_bounce_ext_data(ev) ((void*)((char *)(ev)->data.ext.ptr + sizeof(struct snd_seq_event_bounce)))
@@ -368,311 +83,4 @@ struct snd_seq_event_bounce {
/* queue sync port */
#define snd_seq_queue_sync_port(q) ((q) + 16)
-#endif /* __KERNEL__ */
-
- /* system information */
-struct snd_seq_system_info {
- int queues; /* maximum queues count */
- int clients; /* maximum clients count */
- int ports; /* maximum ports per client */
- int channels; /* maximum channels per port */
- int cur_clients; /* current clients */
- int cur_queues; /* current queues */
- char reserved[24];
-};
-
-
- /* system running information */
-struct snd_seq_running_info {
- unsigned char client; /* client id */
- unsigned char big_endian; /* 1 = big-endian */
- unsigned char cpu_mode; /* 4 = 32bit, 8 = 64bit */
- unsigned char pad; /* reserved */
- unsigned char reserved[12];
-};
-
-
- /* known client numbers */
-#define SNDRV_SEQ_CLIENT_SYSTEM 0
- /* internal client numbers */
-#define SNDRV_SEQ_CLIENT_DUMMY 14 /* midi through */
-#define SNDRV_SEQ_CLIENT_OSS 15 /* oss sequencer emulator */
-
-
- /* client types */
-typedef int __bitwise snd_seq_client_type_t;
-#define NO_CLIENT ((__force snd_seq_client_type_t) 0)
-#define USER_CLIENT ((__force snd_seq_client_type_t) 1)
-#define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2)
-
- /* event filter flags */
-#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */
-#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */
-#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
-#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
-
-struct snd_seq_client_info {
- int client; /* client number to inquire */
- snd_seq_client_type_t type; /* client type */
- char name[64]; /* client name */
- unsigned int filter; /* filter flags */
- unsigned char multicast_filter[8]; /* multicast filter bitmap */
- unsigned char event_filter[32]; /* event filter bitmap */
- int num_ports; /* RO: number of ports */
- int event_lost; /* number of lost events */
- char reserved[64]; /* for future use */
-};
-
-
-/* client pool size */
-struct snd_seq_client_pool {
- int client; /* client number to inquire */
- int output_pool; /* outgoing (write) pool size */
- int input_pool; /* incoming (read) pool size */
- int output_room; /* minimum free pool size for select/blocking mode */
- int output_free; /* unused size */
- int input_free; /* unused size */
- char reserved[64];
-};
-
-
-/* Remove events by specified criteria */
-
-#define SNDRV_SEQ_REMOVE_INPUT (1<<0) /* Flush input queues */
-#define SNDRV_SEQ_REMOVE_OUTPUT (1<<1) /* Flush output queues */
-#define SNDRV_SEQ_REMOVE_DEST (1<<2) /* Restrict by destination q:client:port */
-#define SNDRV_SEQ_REMOVE_DEST_CHANNEL (1<<3) /* Restrict by channel */
-#define SNDRV_SEQ_REMOVE_TIME_BEFORE (1<<4) /* Restrict to before time */
-#define SNDRV_SEQ_REMOVE_TIME_AFTER (1<<5) /* Restrict to time or after */
-#define SNDRV_SEQ_REMOVE_TIME_TICK (1<<6) /* Time is in ticks */
-#define SNDRV_SEQ_REMOVE_EVENT_TYPE (1<<7) /* Restrict to event type */
-#define SNDRV_SEQ_REMOVE_IGNORE_OFF (1<<8) /* Do not flush off events */
-#define SNDRV_SEQ_REMOVE_TAG_MATCH (1<<9) /* Restrict to events with given tag */
-
-struct snd_seq_remove_events {
- unsigned int remove_mode; /* Flags that determine what gets removed */
-
- union snd_seq_timestamp time;
-
- unsigned char queue; /* Queue for REMOVE_DEST */
- struct snd_seq_addr dest; /* Address for REMOVE_DEST */
- unsigned char channel; /* Channel for REMOVE_DEST */
-
- int type; /* For REMOVE_EVENT_TYPE */
- char tag; /* Tag for REMOVE_TAG */
-
- int reserved[10]; /* To allow for future binary compatibility */
-
-};
-
-
- /* known port numbers */
-#define SNDRV_SEQ_PORT_SYSTEM_TIMER 0
-#define SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE 1
-
- /* port capabilities (32 bits) */
-#define SNDRV_SEQ_PORT_CAP_READ (1<<0) /* readable from this port */
-#define SNDRV_SEQ_PORT_CAP_WRITE (1<<1) /* writable to this port */
-
-#define SNDRV_SEQ_PORT_CAP_SYNC_READ (1<<2)
-#define SNDRV_SEQ_PORT_CAP_SYNC_WRITE (1<<3)
-
-#define SNDRV_SEQ_PORT_CAP_DUPLEX (1<<4)
-
-#define SNDRV_SEQ_PORT_CAP_SUBS_READ (1<<5) /* allow read subscription */
-#define SNDRV_SEQ_PORT_CAP_SUBS_WRITE (1<<6) /* allow write subscription */
-#define SNDRV_SEQ_PORT_CAP_NO_EXPORT (1<<7) /* routing not allowed */
-
- /* port type */
-#define SNDRV_SEQ_PORT_TYPE_SPECIFIC (1<<0) /* hardware specific */
-#define SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC (1<<1) /* generic MIDI device */
-#define SNDRV_SEQ_PORT_TYPE_MIDI_GM (1<<2) /* General MIDI compatible device */
-#define SNDRV_SEQ_PORT_TYPE_MIDI_GS (1<<3) /* GS compatible device */
-#define SNDRV_SEQ_PORT_TYPE_MIDI_XG (1<<4) /* XG compatible device */
-#define SNDRV_SEQ_PORT_TYPE_MIDI_MT32 (1<<5) /* MT-32 compatible device */
-#define SNDRV_SEQ_PORT_TYPE_MIDI_GM2 (1<<6) /* General MIDI 2 compatible device */
-
-/* other standards...*/
-#define SNDRV_SEQ_PORT_TYPE_SYNTH (1<<10) /* Synth device (no MIDI compatible - direct wavetable) */
-#define SNDRV_SEQ_PORT_TYPE_DIRECT_SAMPLE (1<<11) /* Sampling device (support sample download) */
-#define SNDRV_SEQ_PORT_TYPE_SAMPLE (1<<12) /* Sampling device (sample can be downloaded at any time) */
-/*...*/
-#define SNDRV_SEQ_PORT_TYPE_HARDWARE (1<<16) /* driver for a hardware device */
-#define SNDRV_SEQ_PORT_TYPE_SOFTWARE (1<<17) /* implemented in software */
-#define SNDRV_SEQ_PORT_TYPE_SYNTHESIZER (1<<18) /* generates sound */
-#define SNDRV_SEQ_PORT_TYPE_PORT (1<<19) /* connects to other device(s) */
-#define SNDRV_SEQ_PORT_TYPE_APPLICATION (1<<20) /* application (sequencer/editor) */
-
-/* misc. conditioning flags */
-#define SNDRV_SEQ_PORT_FLG_GIVEN_PORT (1<<0)
-#define SNDRV_SEQ_PORT_FLG_TIMESTAMP (1<<1)
-#define SNDRV_SEQ_PORT_FLG_TIME_REAL (1<<2)
-
-struct snd_seq_port_info {
- struct snd_seq_addr addr; /* client/port numbers */
- char name[64]; /* port name */
-
- unsigned int capability; /* port capability bits */
- unsigned int type; /* port type bits */
- int midi_channels; /* channels per MIDI port */
- int midi_voices; /* voices per MIDI port */
- int synth_voices; /* voices per SYNTH port */
-
- int read_use; /* R/O: subscribers for output (from this port) */
- int write_use; /* R/O: subscribers for input (to this port) */
-
- void *kernel; /* reserved for kernel use (must be NULL) */
- unsigned int flags; /* misc. conditioning */
- unsigned char time_queue; /* queue # for timestamping */
- char reserved[59]; /* for future use */
-};
-
-
-/* queue flags */
-#define SNDRV_SEQ_QUEUE_FLG_SYNC (1<<0) /* sync enabled */
-
-/* queue information */
-struct snd_seq_queue_info {
- int queue; /* queue id */
-
- /*
- * security settings, only owner of this queue can start/stop timer
- * etc. if the queue is locked for other clients
- */
- int owner; /* client id for owner of the queue */
- unsigned locked:1; /* timing queue locked for other queues */
- char name[64]; /* name of this queue */
- unsigned int flags; /* flags */
- char reserved[60]; /* for future use */
-
-};
-
-/* queue info/status */
-struct snd_seq_queue_status {
- int queue; /* queue id */
- int events; /* read-only - queue size */
- snd_seq_tick_time_t tick; /* current tick */
- struct snd_seq_real_time time; /* current time */
- int running; /* running state of queue */
- int flags; /* various flags */
- char reserved[64]; /* for the future */
-};
-
-
-/* queue tempo */
-struct snd_seq_queue_tempo {
- int queue; /* sequencer queue */
- unsigned int tempo; /* current tempo, us/tick */
- int ppq; /* time resolution, ticks/quarter */
- unsigned int skew_value; /* queue skew */
- unsigned int skew_base; /* queue skew base */
- char reserved[24]; /* for the future */
-};
-
-
-/* sequencer timer sources */
-#define SNDRV_SEQ_TIMER_ALSA 0 /* ALSA timer */
-#define SNDRV_SEQ_TIMER_MIDI_CLOCK 1 /* Midi Clock (CLOCK event) */
-#define SNDRV_SEQ_TIMER_MIDI_TICK 2 /* Midi Timer Tick (TICK event) */
-
-/* queue timer info */
-struct snd_seq_queue_timer {
- int queue; /* sequencer queue */
- int type; /* source timer type */
- union {
- struct {
- struct snd_timer_id id; /* ALSA's timer ID */
- unsigned int resolution; /* resolution in Hz */
- } alsa;
- } u;
- char reserved[64]; /* for the future use */
-};
-
-
-struct snd_seq_queue_client {
- int queue; /* sequencer queue */
- int client; /* sequencer client */
- int used; /* queue is used with this client
- (must be set for accepting events) */
- /* per client watermarks */
- char reserved[64]; /* for future use */
-};
-
-
-#define SNDRV_SEQ_PORT_SUBS_EXCLUSIVE (1<<0) /* exclusive connection */
-#define SNDRV_SEQ_PORT_SUBS_TIMESTAMP (1<<1)
-#define SNDRV_SEQ_PORT_SUBS_TIME_REAL (1<<2)
-
-struct snd_seq_port_subscribe {
- struct snd_seq_addr sender; /* sender address */
- struct snd_seq_addr dest; /* destination address */
- unsigned int voices; /* number of voices to be allocated (0 = don't care) */
- unsigned int flags; /* modes */
- unsigned char queue; /* input time-stamp queue (optional) */
- unsigned char pad[3]; /* reserved */
- char reserved[64];
-};
-
-/* type of query subscription */
-#define SNDRV_SEQ_QUERY_SUBS_READ 0
-#define SNDRV_SEQ_QUERY_SUBS_WRITE 1
-
-struct snd_seq_query_subs {
- struct snd_seq_addr root; /* client/port id to be searched */
- int type; /* READ or WRITE */
- int index; /* 0..N-1 */
- int num_subs; /* R/O: number of subscriptions on this port */
- struct snd_seq_addr addr; /* R/O: result */
- unsigned char queue; /* R/O: result */
- unsigned int flags; /* R/O: result */
- char reserved[64]; /* for future use */
-};
-
-
-/*
- * IOCTL commands
- */
-
-#define SNDRV_SEQ_IOCTL_PVERSION _IOR ('S', 0x00, int)
-#define SNDRV_SEQ_IOCTL_CLIENT_ID _IOR ('S', 0x01, int)
-#define SNDRV_SEQ_IOCTL_SYSTEM_INFO _IOWR('S', 0x02, struct snd_seq_system_info)
-#define SNDRV_SEQ_IOCTL_RUNNING_MODE _IOWR('S', 0x03, struct snd_seq_running_info)
-
-#define SNDRV_SEQ_IOCTL_GET_CLIENT_INFO _IOWR('S', 0x10, struct snd_seq_client_info)
-#define SNDRV_SEQ_IOCTL_SET_CLIENT_INFO _IOW ('S', 0x11, struct snd_seq_client_info)
-
-#define SNDRV_SEQ_IOCTL_CREATE_PORT _IOWR('S', 0x20, struct snd_seq_port_info)
-#define SNDRV_SEQ_IOCTL_DELETE_PORT _IOW ('S', 0x21, struct snd_seq_port_info)
-#define SNDRV_SEQ_IOCTL_GET_PORT_INFO _IOWR('S', 0x22, struct snd_seq_port_info)
-#define SNDRV_SEQ_IOCTL_SET_PORT_INFO _IOW ('S', 0x23, struct snd_seq_port_info)
-
-#define SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT _IOW ('S', 0x30, struct snd_seq_port_subscribe)
-#define SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT _IOW ('S', 0x31, struct snd_seq_port_subscribe)
-
-#define SNDRV_SEQ_IOCTL_CREATE_QUEUE _IOWR('S', 0x32, struct snd_seq_queue_info)
-#define SNDRV_SEQ_IOCTL_DELETE_QUEUE _IOW ('S', 0x33, struct snd_seq_queue_info)
-#define SNDRV_SEQ_IOCTL_GET_QUEUE_INFO _IOWR('S', 0x34, struct snd_seq_queue_info)
-#define SNDRV_SEQ_IOCTL_SET_QUEUE_INFO _IOWR('S', 0x35, struct snd_seq_queue_info)
-#define SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE _IOWR('S', 0x36, struct snd_seq_queue_info)
-#define SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS _IOWR('S', 0x40, struct snd_seq_queue_status)
-#define SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO _IOWR('S', 0x41, struct snd_seq_queue_tempo)
-#define SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO _IOW ('S', 0x42, struct snd_seq_queue_tempo)
-#define SNDRV_SEQ_IOCTL_GET_QUEUE_OWNER _IOWR('S', 0x43, struct snd_seq_queue_owner)
-#define SNDRV_SEQ_IOCTL_SET_QUEUE_OWNER _IOW ('S', 0x44, struct snd_seq_queue_owner)
-#define SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER _IOWR('S', 0x45, struct snd_seq_queue_timer)
-#define SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER _IOW ('S', 0x46, struct snd_seq_queue_timer)
-/* XXX
-#define SNDRV_SEQ_IOCTL_GET_QUEUE_SYNC _IOWR('S', 0x53, struct snd_seq_queue_sync)
-#define SNDRV_SEQ_IOCTL_SET_QUEUE_SYNC _IOW ('S', 0x54, struct snd_seq_queue_sync)
-*/
-#define SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT _IOWR('S', 0x49, struct snd_seq_queue_client)
-#define SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT _IOW ('S', 0x4a, struct snd_seq_queue_client)
-#define SNDRV_SEQ_IOCTL_GET_CLIENT_POOL _IOWR('S', 0x4b, struct snd_seq_client_pool)
-#define SNDRV_SEQ_IOCTL_SET_CLIENT_POOL _IOW ('S', 0x4c, struct snd_seq_client_pool)
-#define SNDRV_SEQ_IOCTL_REMOVE_EVENTS _IOW ('S', 0x4e, struct snd_seq_remove_events)
-#define SNDRV_SEQ_IOCTL_QUERY_SUBS _IOWR('S', 0x4f, struct snd_seq_query_subs)
-#define SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION _IOWR('S', 0x50, struct snd_seq_port_subscribe)
-#define SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT _IOWR('S', 0x51, struct snd_seq_client_info)
-#define SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT _IOWR('S', 0x52, struct snd_seq_port_info)
-
#endif /* __SOUND_ASEQUENCER_H */
diff --git a/include/sound/asound.h b/include/sound/asound.h
index dfe7d441748..c2dff5369d3 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -19,13 +19,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
-
#ifndef __SOUND_ASOUND_H
#define __SOUND_ASOUND_H
-#include <linux/types.h>
-
-#ifdef __KERNEL__
#include <linux/ioctl.h>
#include <linux/time.h>
#include <asm/byteorder.h>
@@ -40,934 +36,5 @@
#endif
#endif
-#endif /* __KERNEL__ **/
-
-/*
- * protocol version
- */
-
-#define SNDRV_PROTOCOL_VERSION(major, minor, subminor) (((major)<<16)|((minor)<<8)|(subminor))
-#define SNDRV_PROTOCOL_MAJOR(version) (((version)>>16)&0xffff)
-#define SNDRV_PROTOCOL_MINOR(version) (((version)>>8)&0xff)
-#define SNDRV_PROTOCOL_MICRO(version) ((version)&0xff)
-#define SNDRV_PROTOCOL_INCOMPATIBLE(kversion, uversion) \
- (SNDRV_PROTOCOL_MAJOR(kversion) != SNDRV_PROTOCOL_MAJOR(uversion) || \
- (SNDRV_PROTOCOL_MAJOR(kversion) == SNDRV_PROTOCOL_MAJOR(uversion) && \
- SNDRV_PROTOCOL_MINOR(kversion) != SNDRV_PROTOCOL_MINOR(uversion)))
-
-/****************************************************************************
- * *
- * Digital audio interface *
- * *
- ****************************************************************************/
-
-struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
- unsigned char subcode[147]; /* AES/IEC958 subcode bits */
- unsigned char pad; /* nothing */
- unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
-};
-
-/****************************************************************************
- * *
- * CEA-861 Audio InfoFrame. Used in HDMI and DisplayPort *
- * *
- ****************************************************************************/
-
-struct snd_cea_861_aud_if {
- unsigned char db1_ct_cc; /* coding type and channel count */
- unsigned char db2_sf_ss; /* sample frequency and size */
- unsigned char db3; /* not used, all zeros */
- unsigned char db4_ca; /* channel allocation code */
- unsigned char db5_dminh_lsv; /* downmix inhibit & level-shit values */
-};
-
-/****************************************************************************
- * *
- * Section for driver hardware dependent interface - /dev/snd/hw? *
- * *
- ****************************************************************************/
-
-#define SNDRV_HWDEP_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 1)
-
-enum {
- SNDRV_HWDEP_IFACE_OPL2 = 0,
- SNDRV_HWDEP_IFACE_OPL3,
- SNDRV_HWDEP_IFACE_OPL4,
- SNDRV_HWDEP_IFACE_SB16CSP, /* Creative Signal Processor */
- SNDRV_HWDEP_IFACE_EMU10K1, /* FX8010 processor in EMU10K1 chip */
- SNDRV_HWDEP_IFACE_YSS225, /* Yamaha FX processor */
- SNDRV_HWDEP_IFACE_ICS2115, /* Wavetable synth */
- SNDRV_HWDEP_IFACE_SSCAPE, /* Ensoniq SoundScape ISA card (MC68EC000) */
- SNDRV_HWDEP_IFACE_VX, /* Digigram VX cards */
- SNDRV_HWDEP_IFACE_MIXART, /* Digigram miXart cards */
- SNDRV_HWDEP_IFACE_USX2Y, /* Tascam US122, US224 & US428 usb */
- SNDRV_HWDEP_IFACE_EMUX_WAVETABLE, /* EmuX wavetable */
- SNDRV_HWDEP_IFACE_BLUETOOTH, /* Bluetooth audio */
- SNDRV_HWDEP_IFACE_USX2Y_PCM, /* Tascam US122, US224 & US428 rawusb pcm */
- SNDRV_HWDEP_IFACE_PCXHR, /* Digigram PCXHR */
- SNDRV_HWDEP_IFACE_SB_RC, /* SB Extigy/Audigy2NX remote control */
- SNDRV_HWDEP_IFACE_HDA, /* HD-audio */
- SNDRV_HWDEP_IFACE_USB_STREAM, /* direct access to usb stream */
-
- /* Don't forget to change the following: */
- SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_USB_STREAM
-};
-
-struct snd_hwdep_info {
- unsigned int device; /* WR: device number */
- int card; /* R: card number */
- unsigned char id[64]; /* ID (user selectable) */
- unsigned char name[80]; /* hwdep name */
- int iface; /* hwdep interface */
- unsigned char reserved[64]; /* reserved for future */
-};
-
-/* generic DSP loader */
-struct snd_hwdep_dsp_status {
- unsigned int version; /* R: driver-specific version */
- unsigned char id[32]; /* R: driver-specific ID string */
- unsigned int num_dsps; /* R: number of DSP images to transfer */
- unsigned int dsp_loaded; /* R: bit flags indicating the loaded DSPs */
- unsigned int chip_ready; /* R: 1 = initialization finished */
- unsigned char reserved[16]; /* reserved for future use */
-};
-
-struct snd_hwdep_dsp_image {
- unsigned int index; /* W: DSP index */
- unsigned char name[64]; /* W: ID (e.g. file name) */
- unsigned char __user *image; /* W: binary image */
- size_t length; /* W: size of image in bytes */
- unsigned long driver_data; /* W: driver-specific data */
-};
-
-#define SNDRV_HWDEP_IOCTL_PVERSION _IOR ('H', 0x00, int)
-#define SNDRV_HWDEP_IOCTL_INFO _IOR ('H', 0x01, struct snd_hwdep_info)
-#define SNDRV_HWDEP_IOCTL_DSP_STATUS _IOR('H', 0x02, struct snd_hwdep_dsp_status)
-#define SNDRV_HWDEP_IOCTL_DSP_LOAD _IOW('H', 0x03, struct snd_hwdep_dsp_image)
-
-/*****************************************************************************
- * *
- * Digital Audio (PCM) interface - /dev/snd/pcm?? *
- * *
- *****************************************************************************/
-
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 10)
-
-typedef unsigned long snd_pcm_uframes_t;
-typedef signed long snd_pcm_sframes_t;
-
-enum {
- SNDRV_PCM_CLASS_GENERIC = 0, /* standard mono or stereo device */
- SNDRV_PCM_CLASS_MULTI, /* multichannel device */
- SNDRV_PCM_CLASS_MODEM, /* software modem class */
- SNDRV_PCM_CLASS_DIGITIZER, /* digitizer class */
- /* Don't forget to change the following: */
- SNDRV_PCM_CLASS_LAST = SNDRV_PCM_CLASS_DIGITIZER,
-};
-
-enum {
- SNDRV_PCM_SUBCLASS_GENERIC_MIX = 0, /* mono or stereo subdevices are mixed together */
- SNDRV_PCM_SUBCLASS_MULTI_MIX, /* multichannel subdevices are mixed together */
- /* Don't forget to change the following: */
- SNDRV_PCM_SUBCLASS_LAST = SNDRV_PCM_SUBCLASS_MULTI_MIX,
-};
-
-enum {
- SNDRV_PCM_STREAM_PLAYBACK = 0,
- SNDRV_PCM_STREAM_CAPTURE,
- SNDRV_PCM_STREAM_LAST = SNDRV_PCM_STREAM_CAPTURE,
-};
-
-typedef int __bitwise snd_pcm_access_t;
-#define SNDRV_PCM_ACCESS_MMAP_INTERLEAVED ((__force snd_pcm_access_t) 0) /* interleaved mmap */
-#define SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED ((__force snd_pcm_access_t) 1) /* noninterleaved mmap */
-#define SNDRV_PCM_ACCESS_MMAP_COMPLEX ((__force snd_pcm_access_t) 2) /* complex mmap */
-#define SNDRV_PCM_ACCESS_RW_INTERLEAVED ((__force snd_pcm_access_t) 3) /* readi/writei */
-#define SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ((__force snd_pcm_access_t) 4) /* readn/writen */
-#define SNDRV_PCM_ACCESS_LAST SNDRV_PCM_ACCESS_RW_NONINTERLEAVED
-
-typedef int __bitwise snd_pcm_format_t;
-#define SNDRV_PCM_FORMAT_S8 ((__force snd_pcm_format_t) 0)
-#define SNDRV_PCM_FORMAT_U8 ((__force snd_pcm_format_t) 1)
-#define SNDRV_PCM_FORMAT_S16_LE ((__force snd_pcm_format_t) 2)
-#define SNDRV_PCM_FORMAT_S16_BE ((__force snd_pcm_format_t) 3)
-#define SNDRV_PCM_FORMAT_U16_LE ((__force snd_pcm_format_t) 4)
-#define SNDRV_PCM_FORMAT_U16_BE ((__force snd_pcm_format_t) 5)
-#define SNDRV_PCM_FORMAT_S24_LE ((__force snd_pcm_format_t) 6) /* low three bytes */
-#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
-#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
-#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
-#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
-#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
-#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
-#define SNDRV_PCM_FORMAT_U32_BE ((__force snd_pcm_format_t) 13)
-#define SNDRV_PCM_FORMAT_FLOAT_LE ((__force snd_pcm_format_t) 14) /* 4-byte float, IEEE-754 32-bit, range -1.0 to 1.0 */
-#define SNDRV_PCM_FORMAT_FLOAT_BE ((__force snd_pcm_format_t) 15) /* 4-byte float, IEEE-754 32-bit, range -1.0 to 1.0 */
-#define SNDRV_PCM_FORMAT_FLOAT64_LE ((__force snd_pcm_format_t) 16) /* 8-byte float, IEEE-754 64-bit, range -1.0 to 1.0 */
-#define SNDRV_PCM_FORMAT_FLOAT64_BE ((__force snd_pcm_format_t) 17) /* 8-byte float, IEEE-754 64-bit, range -1.0 to 1.0 */
-#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE ((__force snd_pcm_format_t) 18) /* IEC-958 subframe, Little Endian */
-#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE ((__force snd_pcm_format_t) 19) /* IEC-958 subframe, Big Endian */
-#define SNDRV_PCM_FORMAT_MU_LAW ((__force snd_pcm_format_t) 20)
-#define SNDRV_PCM_FORMAT_A_LAW ((__force snd_pcm_format_t) 21)
-#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22)
-#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23)
-#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24)
-#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31)
-#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */
-#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */
-#define SNDRV_PCM_FORMAT_U24_3LE ((__force snd_pcm_format_t) 34) /* in three bytes */
-#define SNDRV_PCM_FORMAT_U24_3BE ((__force snd_pcm_format_t) 35) /* in three bytes */
-#define SNDRV_PCM_FORMAT_S20_3LE ((__force snd_pcm_format_t) 36) /* in three bytes */
-#define SNDRV_PCM_FORMAT_S20_3BE ((__force snd_pcm_format_t) 37) /* in three bytes */
-#define SNDRV_PCM_FORMAT_U20_3LE ((__force snd_pcm_format_t) 38) /* in three bytes */
-#define SNDRV_PCM_FORMAT_U20_3BE ((__force snd_pcm_format_t) 39) /* in three bytes */
-#define SNDRV_PCM_FORMAT_S18_3LE ((__force snd_pcm_format_t) 40) /* in three bytes */
-#define SNDRV_PCM_FORMAT_S18_3BE ((__force snd_pcm_format_t) 41) /* in three bytes */
-#define SNDRV_PCM_FORMAT_U18_3LE ((__force snd_pcm_format_t) 42) /* in three bytes */
-#define SNDRV_PCM_FORMAT_U18_3BE ((__force snd_pcm_format_t) 43) /* in three bytes */
-#define SNDRV_PCM_FORMAT_G723_24 ((__force snd_pcm_format_t) 44) /* 8 samples in 3 bytes */
-#define SNDRV_PCM_FORMAT_G723_24_1B ((__force snd_pcm_format_t) 45) /* 1 sample in 1 byte */
-#define SNDRV_PCM_FORMAT_G723_40 ((__force snd_pcm_format_t) 46) /* 8 Samples in 5 bytes */
-#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */
-#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_G723_40_1B
-
-#ifdef SNDRV_LITTLE_ENDIAN
-#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
-#define SNDRV_PCM_FORMAT_U16 SNDRV_PCM_FORMAT_U16_LE
-#define SNDRV_PCM_FORMAT_S24 SNDRV_PCM_FORMAT_S24_LE
-#define SNDRV_PCM_FORMAT_U24 SNDRV_PCM_FORMAT_U24_LE
-#define SNDRV_PCM_FORMAT_S32 SNDRV_PCM_FORMAT_S32_LE
-#define SNDRV_PCM_FORMAT_U32 SNDRV_PCM_FORMAT_U32_LE
-#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE
-#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE
-#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
-#endif
-#ifdef SNDRV_BIG_ENDIAN
-#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE
-#define SNDRV_PCM_FORMAT_U16 SNDRV_PCM_FORMAT_U16_BE
-#define SNDRV_PCM_FORMAT_S24 SNDRV_PCM_FORMAT_S24_BE
-#define SNDRV_PCM_FORMAT_U24 SNDRV_PCM_FORMAT_U24_BE
-#define SNDRV_PCM_FORMAT_S32 SNDRV_PCM_FORMAT_S32_BE
-#define SNDRV_PCM_FORMAT_U32 SNDRV_PCM_FORMAT_U32_BE
-#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE
-#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE
-#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
-#endif
-
-typedef int __bitwise snd_pcm_subformat_t;
-#define SNDRV_PCM_SUBFORMAT_STD ((__force snd_pcm_subformat_t) 0)
-#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_STD
-
-#define SNDRV_PCM_INFO_MMAP 0x00000001 /* hardware supports mmap */
-#define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */
-#define SNDRV_PCM_INFO_DOUBLE 0x00000004 /* Double buffering needed for PCM start/stop */
-#define SNDRV_PCM_INFO_BATCH 0x00000010 /* double buffering */
-#define SNDRV_PCM_INFO_INTERLEAVED 0x00000100 /* channels are interleaved */
-#define SNDRV_PCM_INFO_NONINTERLEAVED 0x00000200 /* channels are not interleaved */
-#define SNDRV_PCM_INFO_COMPLEX 0x00000400 /* complex frame organization (mmap only) */
-#define SNDRV_PCM_INFO_BLOCK_TRANSFER 0x00010000 /* hardware transfer block of samples */
-#define SNDRV_PCM_INFO_OVERRANGE 0x00020000 /* hardware supports ADC (capture) overrange detection */
-#define SNDRV_PCM_INFO_RESUME 0x00040000 /* hardware supports stream resume after suspend */
-#define SNDRV_PCM_INFO_PAUSE 0x00080000 /* pause ioctl is supported */
-#define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */
-#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */
-#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
-#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */
-#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
-
-typedef int __bitwise snd_pcm_state_t;
-#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
-#define SNDRV_PCM_STATE_SETUP ((__force snd_pcm_state_t) 1) /* stream has a setup */
-#define SNDRV_PCM_STATE_PREPARED ((__force snd_pcm_state_t) 2) /* stream is ready to start */
-#define SNDRV_PCM_STATE_RUNNING ((__force snd_pcm_state_t) 3) /* stream is running */
-#define SNDRV_PCM_STATE_XRUN ((__force snd_pcm_state_t) 4) /* stream reached an xrun */
-#define SNDRV_PCM_STATE_DRAINING ((__force snd_pcm_state_t) 5) /* stream is draining */
-#define SNDRV_PCM_STATE_PAUSED ((__force snd_pcm_state_t) 6) /* stream is paused */
-#define SNDRV_PCM_STATE_SUSPENDED ((__force snd_pcm_state_t) 7) /* hardware is suspended */
-#define SNDRV_PCM_STATE_DISCONNECTED ((__force snd_pcm_state_t) 8) /* hardware is disconnected */
-#define SNDRV_PCM_STATE_LAST SNDRV_PCM_STATE_DISCONNECTED
-
-enum {
- SNDRV_PCM_MMAP_OFFSET_DATA = 0x00000000,
- SNDRV_PCM_MMAP_OFFSET_STATUS = 0x80000000,
- SNDRV_PCM_MMAP_OFFSET_CONTROL = 0x81000000,
-};
-
-union snd_pcm_sync_id {
- unsigned char id[16];
- unsigned short id16[8];
- unsigned int id32[4];
-};
-
-struct snd_pcm_info {
- unsigned int device; /* RO/WR (control): device number */
- unsigned int subdevice; /* RO/WR (control): subdevice number */
- int stream; /* RO/WR (control): stream direction */
- int card; /* R: card number */
- unsigned char id[64]; /* ID (user selectable) */
- unsigned char name[80]; /* name of this device */
- unsigned char subname[32]; /* subdevice name */
- int dev_class; /* SNDRV_PCM_CLASS_* */
- int dev_subclass; /* SNDRV_PCM_SUBCLASS_* */
- unsigned int subdevices_count;
- unsigned int subdevices_avail;
- union snd_pcm_sync_id sync; /* hardware synchronization ID */
- unsigned char reserved[64]; /* reserved for future... */
-};
-
-typedef int snd_pcm_hw_param_t;
-#define SNDRV_PCM_HW_PARAM_ACCESS 0 /* Access type */
-#define SNDRV_PCM_HW_PARAM_FORMAT 1 /* Format */
-#define SNDRV_PCM_HW_PARAM_SUBFORMAT 2 /* Subformat */
-#define SNDRV_PCM_HW_PARAM_FIRST_MASK SNDRV_PCM_HW_PARAM_ACCESS
-#define SNDRV_PCM_HW_PARAM_LAST_MASK SNDRV_PCM_HW_PARAM_SUBFORMAT
-
-#define SNDRV_PCM_HW_PARAM_SAMPLE_BITS 8 /* Bits per sample */
-#define SNDRV_PCM_HW_PARAM_FRAME_BITS 9 /* Bits per frame */
-#define SNDRV_PCM_HW_PARAM_CHANNELS 10 /* Channels */
-#define SNDRV_PCM_HW_PARAM_RATE 11 /* Approx rate */
-#define SNDRV_PCM_HW_PARAM_PERIOD_TIME 12 /* Approx distance between
- * interrupts in us
- */
-#define SNDRV_PCM_HW_PARAM_PERIOD_SIZE 13 /* Approx frames between
- * interrupts
- */
-#define SNDRV_PCM_HW_PARAM_PERIOD_BYTES 14 /* Approx bytes between
- * interrupts
- */
-#define SNDRV_PCM_HW_PARAM_PERIODS 15 /* Approx interrupts per
- * buffer
- */
-#define SNDRV_PCM_HW_PARAM_BUFFER_TIME 16 /* Approx duration of buffer
- * in us
- */
-#define SNDRV_PCM_HW_PARAM_BUFFER_SIZE 17 /* Size of buffer in frames */
-#define SNDRV_PCM_HW_PARAM_BUFFER_BYTES 18 /* Size of buffer in bytes */
-#define SNDRV_PCM_HW_PARAM_TICK_TIME 19 /* Approx tick duration in us */
-#define SNDRV_PCM_HW_PARAM_FIRST_INTERVAL SNDRV_PCM_HW_PARAM_SAMPLE_BITS
-#define SNDRV_PCM_HW_PARAM_LAST_INTERVAL SNDRV_PCM_HW_PARAM_TICK_TIME
-
-#define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */
-#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */
-#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */
-
-struct snd_interval {
- unsigned int min, max;
- unsigned int openmin:1,
- openmax:1,
- integer:1,
- empty:1;
-};
-
-#define SNDRV_MASK_MAX 256
-
-struct snd_mask {
- __u32 bits[(SNDRV_MASK_MAX+31)/32];
-};
-
-struct snd_pcm_hw_params {
- unsigned int flags;
- struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK -
- SNDRV_PCM_HW_PARAM_FIRST_MASK + 1];
- struct snd_mask mres[5]; /* reserved masks */
- struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL -
- SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1];
- struct snd_interval ires[9]; /* reserved intervals */
- unsigned int rmask; /* W: requested masks */
- unsigned int cmask; /* R: changed masks */
- unsigned int info; /* R: Info flags for returned setup */
- unsigned int msbits; /* R: used most significant bits */
- unsigned int rate_num; /* R: rate numerator */
- unsigned int rate_den; /* R: rate denominator */
- snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
- unsigned char reserved[64]; /* reserved for future */
-};
-
-enum {
- SNDRV_PCM_TSTAMP_NONE = 0,
- SNDRV_PCM_TSTAMP_ENABLE,
- SNDRV_PCM_TSTAMP_LAST = SNDRV_PCM_TSTAMP_ENABLE,
-};
-
-struct snd_pcm_sw_params {
- int tstamp_mode; /* timestamp mode */
- unsigned int period_step;
- unsigned int sleep_min; /* min ticks to sleep */
- snd_pcm_uframes_t avail_min; /* min avail frames for wakeup */
- snd_pcm_uframes_t xfer_align; /* obsolete: xfer size need to be a multiple */
- snd_pcm_uframes_t start_threshold; /* min hw_avail frames for automatic start */
- snd_pcm_uframes_t stop_threshold; /* min avail frames for automatic stop */
- snd_pcm_uframes_t silence_threshold; /* min distance from noise for silence filling */
- snd_pcm_uframes_t silence_size; /* silence block size */
- snd_pcm_uframes_t boundary; /* pointers wrap point */
- unsigned char reserved[64]; /* reserved for future */
-};
-
-struct snd_pcm_channel_info {
- unsigned int channel;
- __kernel_off_t offset; /* mmap offset */
- unsigned int first; /* offset to first sample in bits */
- unsigned int step; /* samples distance in bits */
-};
-
-struct snd_pcm_status {
- snd_pcm_state_t state; /* stream state */
- struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */
- struct timespec tstamp; /* reference timestamp */
- snd_pcm_uframes_t appl_ptr; /* appl ptr */
- snd_pcm_uframes_t hw_ptr; /* hw ptr */
- snd_pcm_sframes_t delay; /* current delay in frames */
- snd_pcm_uframes_t avail; /* number of frames available */
- snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */
- snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */
- snd_pcm_state_t suspended_state; /* suspended stream state */
- unsigned char reserved[60]; /* must be filled with zero */
-};
-
-struct snd_pcm_mmap_status {
- snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */
- int pad1; /* Needed for 64 bit alignment */
- snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */
- struct timespec tstamp; /* Timestamp */
- snd_pcm_state_t suspended_state; /* RO: suspended stream state */
-};
-
-struct snd_pcm_mmap_control {
- snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */
- snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */
-};
-
-#define SNDRV_PCM_SYNC_PTR_HWSYNC (1<<0) /* execute hwsync */
-#define SNDRV_PCM_SYNC_PTR_APPL (1<<1) /* get appl_ptr from driver (r/w op) */
-#define SNDRV_PCM_SYNC_PTR_AVAIL_MIN (1<<2) /* get avail_min from driver */
-
-struct snd_pcm_sync_ptr {
- unsigned int flags;
- union {
- struct snd_pcm_mmap_status status;
- unsigned char reserved[64];
- } s;
- union {
- struct snd_pcm_mmap_control control;
- unsigned char reserved[64];
- } c;
-};
-
-struct snd_xferi {
- snd_pcm_sframes_t result;
- void __user *buf;
- snd_pcm_uframes_t frames;
-};
-
-struct snd_xfern {
- snd_pcm_sframes_t result;
- void __user * __user *bufs;
- snd_pcm_uframes_t frames;
-};
-
-enum {
- SNDRV_PCM_TSTAMP_TYPE_GETTIMEOFDAY = 0, /* gettimeofday equivalent */
- SNDRV_PCM_TSTAMP_TYPE_MONOTONIC, /* posix_clock_monotonic equivalent */
- SNDRV_PCM_TSTAMP_TYPE_LAST = SNDRV_PCM_TSTAMP_TYPE_MONOTONIC,
-};
-
-/* channel positions */
-enum {
- SNDRV_CHMAP_UNKNOWN = 0,
- SNDRV_CHMAP_NA, /* N/A, silent */
- SNDRV_CHMAP_MONO, /* mono stream */
- /* this follows the alsa-lib mixer channel value + 3 */
- SNDRV_CHMAP_FL, /* front left */
- SNDRV_CHMAP_FR, /* front right */
- SNDRV_CHMAP_RL, /* rear left */
- SNDRV_CHMAP_RR, /* rear right */
- SNDRV_CHMAP_FC, /* front center */
- SNDRV_CHMAP_LFE, /* LFE */
- SNDRV_CHMAP_SL, /* side left */
- SNDRV_CHMAP_SR, /* side right */
- SNDRV_CHMAP_RC, /* rear center */
- /* new definitions */
- SNDRV_CHMAP_FLC, /* front left center */
- SNDRV_CHMAP_FRC, /* front right center */
- SNDRV_CHMAP_RLC, /* rear left center */
- SNDRV_CHMAP_RRC, /* rear right center */
- SNDRV_CHMAP_FLW, /* front left wide */
- SNDRV_CHMAP_FRW, /* front right wide */
- SNDRV_CHMAP_FLH, /* front left high */
- SNDRV_CHMAP_FCH, /* front center high */
- SNDRV_CHMAP_FRH, /* front right high */
- SNDRV_CHMAP_TC, /* top center */
- SNDRV_CHMAP_TFL, /* top front left */
- SNDRV_CHMAP_TFR, /* top front right */
- SNDRV_CHMAP_TFC, /* top front center */
- SNDRV_CHMAP_TRL, /* top rear left */
- SNDRV_CHMAP_TRR, /* top rear right */
- SNDRV_CHMAP_TRC, /* top rear center */
- SNDRV_CHMAP_LAST = SNDRV_CHMAP_TRC,
-};
-
-#define SNDRV_CHMAP_POSITION_MASK 0xffff
-#define SNDRV_CHMAP_PHASE_INVERSE (0x01 << 16)
-#define SNDRV_CHMAP_DRIVER_SPEC (0x02 << 16)
-
-#define SNDRV_PCM_IOCTL_PVERSION _IOR('A', 0x00, int)
-#define SNDRV_PCM_IOCTL_INFO _IOR('A', 0x01, struct snd_pcm_info)
-#define SNDRV_PCM_IOCTL_TSTAMP _IOW('A', 0x02, int)
-#define SNDRV_PCM_IOCTL_TTSTAMP _IOW('A', 0x03, int)
-#define SNDRV_PCM_IOCTL_HW_REFINE _IOWR('A', 0x10, struct snd_pcm_hw_params)
-#define SNDRV_PCM_IOCTL_HW_PARAMS _IOWR('A', 0x11, struct snd_pcm_hw_params)
-#define SNDRV_PCM_IOCTL_HW_FREE _IO('A', 0x12)
-#define SNDRV_PCM_IOCTL_SW_PARAMS _IOWR('A', 0x13, struct snd_pcm_sw_params)
-#define SNDRV_PCM_IOCTL_STATUS _IOR('A', 0x20, struct snd_pcm_status)
-#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t)
-#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22)
-#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr)
-#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info)
-#define SNDRV_PCM_IOCTL_PREPARE _IO('A', 0x40)
-#define SNDRV_PCM_IOCTL_RESET _IO('A', 0x41)
-#define SNDRV_PCM_IOCTL_START _IO('A', 0x42)
-#define SNDRV_PCM_IOCTL_DROP _IO('A', 0x43)
-#define SNDRV_PCM_IOCTL_DRAIN _IO('A', 0x44)
-#define SNDRV_PCM_IOCTL_PAUSE _IOW('A', 0x45, int)
-#define SNDRV_PCM_IOCTL_REWIND _IOW('A', 0x46, snd_pcm_uframes_t)
-#define SNDRV_PCM_IOCTL_RESUME _IO('A', 0x47)
-#define SNDRV_PCM_IOCTL_XRUN _IO('A', 0x48)
-#define SNDRV_PCM_IOCTL_FORWARD _IOW('A', 0x49, snd_pcm_uframes_t)
-#define SNDRV_PCM_IOCTL_WRITEI_FRAMES _IOW('A', 0x50, struct snd_xferi)
-#define SNDRV_PCM_IOCTL_READI_FRAMES _IOR('A', 0x51, struct snd_xferi)
-#define SNDRV_PCM_IOCTL_WRITEN_FRAMES _IOW('A', 0x52, struct snd_xfern)
-#define SNDRV_PCM_IOCTL_READN_FRAMES _IOR('A', 0x53, struct snd_xfern)
-#define SNDRV_PCM_IOCTL_LINK _IOW('A', 0x60, int)
-#define SNDRV_PCM_IOCTL_UNLINK _IO('A', 0x61)
-
-/*****************************************************************************
- * *
- * MIDI v1.0 interface *
- * *
- *****************************************************************************/
-
-/*
- * Raw MIDI section - /dev/snd/midi??
- */
-
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 0)
-
-enum {
- SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
- SNDRV_RAWMIDI_STREAM_INPUT,
- SNDRV_RAWMIDI_STREAM_LAST = SNDRV_RAWMIDI_STREAM_INPUT,
-};
-
-#define SNDRV_RAWMIDI_INFO_OUTPUT 0x00000001
-#define SNDRV_RAWMIDI_INFO_INPUT 0x00000002
-#define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004
-
-struct snd_rawmidi_info {
- unsigned int device; /* RO/WR (control): device number */
- unsigned int subdevice; /* RO/WR (control): subdevice number */
- int stream; /* WR: stream */
- int card; /* R: card number */
- unsigned int flags; /* SNDRV_RAWMIDI_INFO_XXXX */
- unsigned char id[64]; /* ID (user selectable) */
- unsigned char name[80]; /* name of device */
- unsigned char subname[32]; /* name of active or selected subdevice */
- unsigned int subdevices_count;
- unsigned int subdevices_avail;
- unsigned char reserved[64]; /* reserved for future use */
-};
-
-struct snd_rawmidi_params {
- int stream;
- size_t buffer_size; /* queue size in bytes */
- size_t avail_min; /* minimum avail bytes for wakeup */
- unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */
- unsigned char reserved[16]; /* reserved for future use */
-};
-
-struct snd_rawmidi_status {
- int stream;
- struct timespec tstamp; /* Timestamp */
- size_t avail; /* available bytes */
- size_t xruns; /* count of overruns since last status (in bytes) */
- unsigned char reserved[16]; /* reserved for future use */
-};
-
-#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
-#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
-#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
-#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
-#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
-#define SNDRV_RAWMIDI_IOCTL_DRAIN _IOW('W', 0x31, int)
-
-/*
- * Timer section - /dev/snd/timer
- */
-
-#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 6)
-
-enum {
- SNDRV_TIMER_CLASS_NONE = -1,
- SNDRV_TIMER_CLASS_SLAVE = 0,
- SNDRV_TIMER_CLASS_GLOBAL,
- SNDRV_TIMER_CLASS_CARD,
- SNDRV_TIMER_CLASS_PCM,
- SNDRV_TIMER_CLASS_LAST = SNDRV_TIMER_CLASS_PCM,
-};
-
-/* slave timer classes */
-enum {
- SNDRV_TIMER_SCLASS_NONE = 0,
- SNDRV_TIMER_SCLASS_APPLICATION,
- SNDRV_TIMER_SCLASS_SEQUENCER, /* alias */
- SNDRV_TIMER_SCLASS_OSS_SEQUENCER, /* alias */
- SNDRV_TIMER_SCLASS_LAST = SNDRV_TIMER_SCLASS_OSS_SEQUENCER,
-};
-
-/* global timers (device member) */
-#define SNDRV_TIMER_GLOBAL_SYSTEM 0
-#define SNDRV_TIMER_GLOBAL_RTC 1
-#define SNDRV_TIMER_GLOBAL_HPET 2
-#define SNDRV_TIMER_GLOBAL_HRTIMER 3
-
-/* info flags */
-#define SNDRV_TIMER_FLG_SLAVE (1<<0) /* cannot be controlled */
-
-struct snd_timer_id {
- int dev_class;
- int dev_sclass;
- int card;
- int device;
- int subdevice;
-};
-
-struct snd_timer_ginfo {
- struct snd_timer_id tid; /* requested timer ID */
- unsigned int flags; /* timer flags - SNDRV_TIMER_FLG_* */
- int card; /* card number */
- unsigned char id[64]; /* timer identification */
- unsigned char name[80]; /* timer name */
- unsigned long reserved0; /* reserved for future use */
- unsigned long resolution; /* average period resolution in ns */
- unsigned long resolution_min; /* minimal period resolution in ns */
- unsigned long resolution_max; /* maximal period resolution in ns */
- unsigned int clients; /* active timer clients */
- unsigned char reserved[32];
-};
-
-struct snd_timer_gparams {
- struct snd_timer_id tid; /* requested timer ID */
- unsigned long period_num; /* requested precise period duration (in seconds) - numerator */
- unsigned long period_den; /* requested precise period duration (in seconds) - denominator */
- unsigned char reserved[32];
-};
-
-struct snd_timer_gstatus {
- struct snd_timer_id tid; /* requested timer ID */
- unsigned long resolution; /* current period resolution in ns */
- unsigned long resolution_num; /* precise current period resolution (in seconds) - numerator */
- unsigned long resolution_den; /* precise current period resolution (in seconds) - denominator */
- unsigned char reserved[32];
-};
-
-struct snd_timer_select {
- struct snd_timer_id id; /* bind to timer ID */
- unsigned char reserved[32]; /* reserved */
-};
-
-struct snd_timer_info {
- unsigned int flags; /* timer flags - SNDRV_TIMER_FLG_* */
- int card; /* card number */
- unsigned char id[64]; /* timer identificator */
- unsigned char name[80]; /* timer name */
- unsigned long reserved0; /* reserved for future use */
- unsigned long resolution; /* average period resolution in ns */
- unsigned char reserved[64]; /* reserved */
-};
-
-#define SNDRV_TIMER_PSFLG_AUTO (1<<0) /* auto start, otherwise one-shot */
-#define SNDRV_TIMER_PSFLG_EXCLUSIVE (1<<1) /* exclusive use, precise start/stop/pause/continue */
-#define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */
-
-struct snd_timer_params {
- unsigned int flags; /* flags - SNDRV_MIXER_PSFLG_* */
- unsigned int ticks; /* requested resolution in ticks */
- unsigned int queue_size; /* total size of queue (32-1024) */
- unsigned int reserved0; /* reserved, was: failure locations */
- unsigned int filter; /* event filter (bitmask of SNDRV_TIMER_EVENT_*) */
- unsigned char reserved[60]; /* reserved */
-};
-
-struct snd_timer_status {
- struct timespec tstamp; /* Timestamp - last update */
- unsigned int resolution; /* current period resolution in ns */
- unsigned int lost; /* counter of master tick lost */
- unsigned int overrun; /* count of read queue overruns */
- unsigned int queue; /* used queue size */
- unsigned char reserved[64]; /* reserved */
-};
-
-#define SNDRV_TIMER_IOCTL_PVERSION _IOR('T', 0x00, int)
-#define SNDRV_TIMER_IOCTL_NEXT_DEVICE _IOWR('T', 0x01, struct snd_timer_id)
-#define SNDRV_TIMER_IOCTL_TREAD _IOW('T', 0x02, int)
-#define SNDRV_TIMER_IOCTL_GINFO _IOWR('T', 0x03, struct snd_timer_ginfo)
-#define SNDRV_TIMER_IOCTL_GPARAMS _IOW('T', 0x04, struct snd_timer_gparams)
-#define SNDRV_TIMER_IOCTL_GSTATUS _IOWR('T', 0x05, struct snd_timer_gstatus)
-#define SNDRV_TIMER_IOCTL_SELECT _IOW('T', 0x10, struct snd_timer_select)
-#define SNDRV_TIMER_IOCTL_INFO _IOR('T', 0x11, struct snd_timer_info)
-#define SNDRV_TIMER_IOCTL_PARAMS _IOW('T', 0x12, struct snd_timer_params)
-#define SNDRV_TIMER_IOCTL_STATUS _IOR('T', 0x14, struct snd_timer_status)
-/* The following four ioctls are changed since 1.0.9 due to confliction */
-#define SNDRV_TIMER_IOCTL_START _IO('T', 0xa0)
-#define SNDRV_TIMER_IOCTL_STOP _IO('T', 0xa1)
-#define SNDRV_TIMER_IOCTL_CONTINUE _IO('T', 0xa2)
-#define SNDRV_TIMER_IOCTL_PAUSE _IO('T', 0xa3)
-
-struct snd_timer_read {
- unsigned int resolution;
- unsigned int ticks;
-};
-
-enum {
- SNDRV_TIMER_EVENT_RESOLUTION = 0, /* val = resolution in ns */
- SNDRV_TIMER_EVENT_TICK, /* val = ticks */
- SNDRV_TIMER_EVENT_START, /* val = resolution in ns */
- SNDRV_TIMER_EVENT_STOP, /* val = 0 */
- SNDRV_TIMER_EVENT_CONTINUE, /* val = resolution in ns */
- SNDRV_TIMER_EVENT_PAUSE, /* val = 0 */
- SNDRV_TIMER_EVENT_EARLY, /* val = 0, early event */
- SNDRV_TIMER_EVENT_SUSPEND, /* val = 0 */
- SNDRV_TIMER_EVENT_RESUME, /* val = resolution in ns */
- /* master timer events for slave timer instances */
- SNDRV_TIMER_EVENT_MSTART = SNDRV_TIMER_EVENT_START + 10,
- SNDRV_TIMER_EVENT_MSTOP = SNDRV_TIMER_EVENT_STOP + 10,
- SNDRV_TIMER_EVENT_MCONTINUE = SNDRV_TIMER_EVENT_CONTINUE + 10,
- SNDRV_TIMER_EVENT_MPAUSE = SNDRV_TIMER_EVENT_PAUSE + 10,
- SNDRV_TIMER_EVENT_MSUSPEND = SNDRV_TIMER_EVENT_SUSPEND + 10,
- SNDRV_TIMER_EVENT_MRESUME = SNDRV_TIMER_EVENT_RESUME + 10,
-};
-
-struct snd_timer_tread {
- int event;
- struct timespec tstamp;
- unsigned int val;
-};
-
-/****************************************************************************
- * *
- * Section for driver control interface - /dev/snd/control? *
- * *
- ****************************************************************************/
-
-#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
-
-struct snd_ctl_card_info {
- int card; /* card number */
- int pad; /* reserved for future (was type) */
- unsigned char id[16]; /* ID of card (user selectable) */
- unsigned char driver[16]; /* Driver name */
- unsigned char name[32]; /* Short name of soundcard */
- unsigned char longname[80]; /* name + info text about soundcard */
- unsigned char reserved_[16]; /* reserved for future (was ID of mixer) */
- unsigned char mixername[80]; /* visual mixer identification */
- unsigned char components[128]; /* card components / fine identification, delimited with one space (AC97 etc..) */
-};
-
-typedef int __bitwise snd_ctl_elem_type_t;
-#define SNDRV_CTL_ELEM_TYPE_NONE ((__force snd_ctl_elem_type_t) 0) /* invalid */
-#define SNDRV_CTL_ELEM_TYPE_BOOLEAN ((__force snd_ctl_elem_type_t) 1) /* boolean type */
-#define SNDRV_CTL_ELEM_TYPE_INTEGER ((__force snd_ctl_elem_type_t) 2) /* integer type */
-#define SNDRV_CTL_ELEM_TYPE_ENUMERATED ((__force snd_ctl_elem_type_t) 3) /* enumerated type */
-#define SNDRV_CTL_ELEM_TYPE_BYTES ((__force snd_ctl_elem_type_t) 4) /* byte array */
-#define SNDRV_CTL_ELEM_TYPE_IEC958 ((__force snd_ctl_elem_type_t) 5) /* IEC958 (S/PDIF) setup */
-#define SNDRV_CTL_ELEM_TYPE_INTEGER64 ((__force snd_ctl_elem_type_t) 6) /* 64-bit integer type */
-#define SNDRV_CTL_ELEM_TYPE_LAST SNDRV_CTL_ELEM_TYPE_INTEGER64
-
-typedef int __bitwise snd_ctl_elem_iface_t;
-#define SNDRV_CTL_ELEM_IFACE_CARD ((__force snd_ctl_elem_iface_t) 0) /* global control */
-#define SNDRV_CTL_ELEM_IFACE_HWDEP ((__force snd_ctl_elem_iface_t) 1) /* hardware dependent device */
-#define SNDRV_CTL_ELEM_IFACE_MIXER ((__force snd_ctl_elem_iface_t) 2) /* virtual mixer device */
-#define SNDRV_CTL_ELEM_IFACE_PCM ((__force snd_ctl_elem_iface_t) 3) /* PCM device */
-#define SNDRV_CTL_ELEM_IFACE_RAWMIDI ((__force snd_ctl_elem_iface_t) 4) /* RawMidi device */
-#define SNDRV_CTL_ELEM_IFACE_TIMER ((__force snd_ctl_elem_iface_t) 5) /* timer device */
-#define SNDRV_CTL_ELEM_IFACE_SEQUENCER ((__force snd_ctl_elem_iface_t) 6) /* sequencer client */
-#define SNDRV_CTL_ELEM_IFACE_LAST SNDRV_CTL_ELEM_IFACE_SEQUENCER
-
-#define SNDRV_CTL_ELEM_ACCESS_READ (1<<0)
-#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
-#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
-#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
-#define SNDRV_CTL_ELEM_ACCESS_TIMESTAMP (1<<3) /* when was control changed */
-#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
-#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
-#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
-#define SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND (1<<6) /* TLV command is possible */
-#define SNDRV_CTL_ELEM_ACCESS_INACTIVE (1<<8) /* control does actually nothing, but may be updated */
-#define SNDRV_CTL_ELEM_ACCESS_LOCK (1<<9) /* write lock */
-#define SNDRV_CTL_ELEM_ACCESS_OWNER (1<<10) /* write lock owner */
-#define SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK (1<<28) /* kernel use a TLV callback */
-#define SNDRV_CTL_ELEM_ACCESS_USER (1<<29) /* user space element */
-/* bits 30 and 31 are obsoleted (for indirect access) */
-
-/* for further details see the ACPI and PCI power management specification */
-#define SNDRV_CTL_POWER_D0 0x0000 /* full On */
-#define SNDRV_CTL_POWER_D1 0x0100 /* partial On */
-#define SNDRV_CTL_POWER_D2 0x0200 /* partial On */
-#define SNDRV_CTL_POWER_D3 0x0300 /* Off */
-#define SNDRV_CTL_POWER_D3hot (SNDRV_CTL_POWER_D3|0x0000) /* Off, with power */
-#define SNDRV_CTL_POWER_D3cold (SNDRV_CTL_POWER_D3|0x0001) /* Off, without power */
-
-struct snd_ctl_elem_id {
- unsigned int numid; /* numeric identifier, zero = invalid */
- snd_ctl_elem_iface_t iface; /* interface identifier */
- unsigned int device; /* device/client number */
- unsigned int subdevice; /* subdevice (substream) number */
- unsigned char name[44]; /* ASCII name of item */
- unsigned int index; /* index of item */
-};
-
-struct snd_ctl_elem_list {
- unsigned int offset; /* W: first element ID to get */
- unsigned int space; /* W: count of element IDs to get */
- unsigned int used; /* R: count of element IDs set */
- unsigned int count; /* R: count of all elements */
- struct snd_ctl_elem_id __user *pids; /* R: IDs */
- unsigned char reserved[50];
-};
-
-struct snd_ctl_elem_info {
- struct snd_ctl_elem_id id; /* W: element ID */
- snd_ctl_elem_type_t type; /* R: value type - SNDRV_CTL_ELEM_TYPE_* */
- unsigned int access; /* R: value access (bitmask) - SNDRV_CTL_ELEM_ACCESS_* */
- unsigned int count; /* count of values */
- __kernel_pid_t owner; /* owner's PID of this control */
- union {
- struct {
- long min; /* R: minimum value */
- long max; /* R: maximum value */
- long step; /* R: step (0 variable) */
- } integer;
- struct {
- long long min; /* R: minimum value */
- long long max; /* R: maximum value */
- long long step; /* R: step (0 variable) */
- } integer64;
- struct {
- unsigned int items; /* R: number of items */
- unsigned int item; /* W: item number */
- char name[64]; /* R: value name */
- __u64 names_ptr; /* W: names list (ELEM_ADD only) */
- unsigned int names_length;
- } enumerated;
- unsigned char reserved[128];
- } value;
- union {
- unsigned short d[4]; /* dimensions */
- unsigned short *d_ptr; /* indirect - obsoleted */
- } dimen;
- unsigned char reserved[64-4*sizeof(unsigned short)];
-};
-
-struct snd_ctl_elem_value {
- struct snd_ctl_elem_id id; /* W: element ID */
- unsigned int indirect: 1; /* W: indirect access - obsoleted */
- union {
- union {
- long value[128];
- long *value_ptr; /* obsoleted */
- } integer;
- union {
- long long value[64];
- long long *value_ptr; /* obsoleted */
- } integer64;
- union {
- unsigned int item[128];
- unsigned int *item_ptr; /* obsoleted */
- } enumerated;
- union {
- unsigned char data[512];
- unsigned char *data_ptr; /* obsoleted */
- } bytes;
- struct snd_aes_iec958 iec958;
- } value; /* RO */
- struct timespec tstamp;
- unsigned char reserved[128-sizeof(struct timespec)];
-};
-
-struct snd_ctl_tlv {
- unsigned int numid; /* control element numeric identification */
- unsigned int length; /* in bytes aligned to 4 */
- unsigned int tlv[0]; /* first TLV */
-};
-
-#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int)
-#define SNDRV_CTL_IOCTL_CARD_INFO _IOR('U', 0x01, struct snd_ctl_card_info)
-#define SNDRV_CTL_IOCTL_ELEM_LIST _IOWR('U', 0x10, struct snd_ctl_elem_list)
-#define SNDRV_CTL_IOCTL_ELEM_INFO _IOWR('U', 0x11, struct snd_ctl_elem_info)
-#define SNDRV_CTL_IOCTL_ELEM_READ _IOWR('U', 0x12, struct snd_ctl_elem_value)
-#define SNDRV_CTL_IOCTL_ELEM_WRITE _IOWR('U', 0x13, struct snd_ctl_elem_value)
-#define SNDRV_CTL_IOCTL_ELEM_LOCK _IOW('U', 0x14, struct snd_ctl_elem_id)
-#define SNDRV_CTL_IOCTL_ELEM_UNLOCK _IOW('U', 0x15, struct snd_ctl_elem_id)
-#define SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS _IOWR('U', 0x16, int)
-#define SNDRV_CTL_IOCTL_ELEM_ADD _IOWR('U', 0x17, struct snd_ctl_elem_info)
-#define SNDRV_CTL_IOCTL_ELEM_REPLACE _IOWR('U', 0x18, struct snd_ctl_elem_info)
-#define SNDRV_CTL_IOCTL_ELEM_REMOVE _IOWR('U', 0x19, struct snd_ctl_elem_id)
-#define SNDRV_CTL_IOCTL_TLV_READ _IOWR('U', 0x1a, struct snd_ctl_tlv)
-#define SNDRV_CTL_IOCTL_TLV_WRITE _IOWR('U', 0x1b, struct snd_ctl_tlv)
-#define SNDRV_CTL_IOCTL_TLV_COMMAND _IOWR('U', 0x1c, struct snd_ctl_tlv)
-#define SNDRV_CTL_IOCTL_HWDEP_NEXT_DEVICE _IOWR('U', 0x20, int)
-#define SNDRV_CTL_IOCTL_HWDEP_INFO _IOR('U', 0x21, struct snd_hwdep_info)
-#define SNDRV_CTL_IOCTL_PCM_NEXT_DEVICE _IOR('U', 0x30, int)
-#define SNDRV_CTL_IOCTL_PCM_INFO _IOWR('U', 0x31, struct snd_pcm_info)
-#define SNDRV_CTL_IOCTL_PCM_PREFER_SUBDEVICE _IOW('U', 0x32, int)
-#define SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE _IOWR('U', 0x40, int)
-#define SNDRV_CTL_IOCTL_RAWMIDI_INFO _IOWR('U', 0x41, struct snd_rawmidi_info)
-#define SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE _IOW('U', 0x42, int)
-#define SNDRV_CTL_IOCTL_POWER _IOWR('U', 0xd0, int)
-#define SNDRV_CTL_IOCTL_POWER_STATE _IOR('U', 0xd1, int)
-
-/*
- * Read interface.
- */
-
-enum sndrv_ctl_event_type {
- SNDRV_CTL_EVENT_ELEM = 0,
- SNDRV_CTL_EVENT_LAST = SNDRV_CTL_EVENT_ELEM,
-};
-
-#define SNDRV_CTL_EVENT_MASK_VALUE (1<<0) /* element value was changed */
-#define SNDRV_CTL_EVENT_MASK_INFO (1<<1) /* element info was changed */
-#define SNDRV_CTL_EVENT_MASK_ADD (1<<2) /* element was added */
-#define SNDRV_CTL_EVENT_MASK_TLV (1<<3) /* element TLV tree was changed */
-#define SNDRV_CTL_EVENT_MASK_REMOVE (~0U) /* element was removed */
-
-struct snd_ctl_event {
- int type; /* event type - SNDRV_CTL_EVENT_* */
- union {
- struct {
- unsigned int mask;
- struct snd_ctl_elem_id id;
- } elem;
- unsigned char data8[60];
- } data;
-};
-
-/*
- * Control names
- */
-
-#define SNDRV_CTL_NAME_NONE ""
-#define SNDRV_CTL_NAME_PLAYBACK "Playback "
-#define SNDRV_CTL_NAME_CAPTURE "Capture "
-
-#define SNDRV_CTL_NAME_IEC958_NONE ""
-#define SNDRV_CTL_NAME_IEC958_SWITCH "Switch"
-#define SNDRV_CTL_NAME_IEC958_VOLUME "Volume"
-#define SNDRV_CTL_NAME_IEC958_DEFAULT "Default"
-#define SNDRV_CTL_NAME_IEC958_MASK "Mask"
-#define SNDRV_CTL_NAME_IEC958_CON_MASK "Con Mask"
-#define SNDRV_CTL_NAME_IEC958_PRO_MASK "Pro Mask"
-#define SNDRV_CTL_NAME_IEC958_PCM_STREAM "PCM Stream"
-#define SNDRV_CTL_NAME_IEC958(expl,direction,what) "IEC958 " expl SNDRV_CTL_NAME_##direction SNDRV_CTL_NAME_IEC958_##what
-
+#include <uapi/sound/asound.h>
#endif /* __SOUND_ASOUND_H */
diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h
index 50a059e7d11..6d9e15ed1dc 100644
--- a/include/sound/cs4271.h
+++ b/include/sound/cs4271.h
@@ -19,6 +19,7 @@
struct cs4271_platform_data {
int gpio_nreset; /* GPIO driving Reset pin, if any */
+ int amutec_eq_bmutec:1; /* flag to enable AMUTEC=BMUTEC */
};
#endif /* __CS4271_H */
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 1a33f48ebe7..f841ba4bacb 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1,8 +1,3 @@
-#ifndef __SOUND_EMU10K1_H
-#define __SOUND_EMU10K1_H
-
-#include <linux/types.h>
-
/*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>,
* Creative Labs, Inc.
@@ -24,8 +19,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
+#ifndef __SOUND_EMU10K1_H
+#define __SOUND_EMU10K1_H
-#ifdef __KERNEL__
#include <sound/pcm.h>
#include <sound/rawmidi.h>
@@ -36,8 +32,10 @@
#include <sound/timer.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/firmware.h>
#include <asm/io.h>
+#include <uapi/sound/emu10k1.h>
/* ------------------- DEFINES -------------------- */
@@ -1788,6 +1786,8 @@ struct snd_emu10k1 {
unsigned int efx_voices_mask[2];
unsigned int next_free_voice;
+ const struct firmware *firmware;
+
#ifdef CONFIG_PM_SLEEP
unsigned int *saved_ptr;
unsigned int *saved_gpr;
@@ -1796,6 +1796,7 @@ struct snd_emu10k1 {
unsigned int *saved_icode;
unsigned int *p16v_saved;
unsigned int saved_a_iocfg, saved_hcfg;
+ bool suspend;
#endif
};
@@ -1899,350 +1900,4 @@ int snd_emu10k1_fx8010_register_irq_handler(struct snd_emu10k1 *emu,
int snd_emu10k1_fx8010_unregister_irq_handler(struct snd_emu10k1 *emu,
struct snd_emu10k1_fx8010_irq *irq);
-#endif /* __KERNEL__ */
-
-/*
- * ---- FX8010 ----
- */
-
-#define EMU10K1_CARD_CREATIVE 0x00000000
-#define EMU10K1_CARD_EMUAPS 0x00000001
-
-#define EMU10K1_FX8010_PCM_COUNT 8
-
-/* instruction set */
-#define iMAC0 0x00 /* R = A + (X * Y >> 31) ; saturation */
-#define iMAC1 0x01 /* R = A + (-X * Y >> 31) ; saturation */
-#define iMAC2 0x02 /* R = A + (X * Y >> 31) ; wraparound */
-#define iMAC3 0x03 /* R = A + (-X * Y >> 31) ; wraparound */
-#define iMACINT0 0x04 /* R = A + X * Y ; saturation */
-#define iMACINT1 0x05 /* R = A + X * Y ; wraparound (31-bit) */
-#define iACC3 0x06 /* R = A + X + Y ; saturation */
-#define iMACMV 0x07 /* R = A, acc += X * Y >> 31 */
-#define iANDXOR 0x08 /* R = (A & X) ^ Y */
-#define iTSTNEG 0x09 /* R = (A >= Y) ? X : ~X */
-#define iLIMITGE 0x0a /* R = (A >= Y) ? X : Y */
-#define iLIMITLT 0x0b /* R = (A < Y) ? X : Y */
-#define iLOG 0x0c /* R = linear_data, A (log_data), X (max_exp), Y (format_word) */
-#define iEXP 0x0d /* R = log_data, A (linear_data), X (max_exp), Y (format_word) */
-#define iINTERP 0x0e /* R = A + (X * (Y - A) >> 31) ; saturation */
-#define iSKIP 0x0f /* R = A (cc_reg), X (count), Y (cc_test) */
-
-/* GPRs */
-#define FXBUS(x) (0x00 + (x)) /* x = 0x00 - 0x0f */
-#define EXTIN(x) (0x10 + (x)) /* x = 0x00 - 0x0f */
-#define EXTOUT(x) (0x20 + (x)) /* x = 0x00 - 0x0f physical outs -> FXWC low 16 bits */
-#define FXBUS2(x) (0x30 + (x)) /* x = 0x00 - 0x0f copies of fx buses for capture -> FXWC high 16 bits */
- /* NB: 0x31 and 0x32 are shared with Center/LFE on SB live 5.1 */
-
-#define C_00000000 0x40
-#define C_00000001 0x41
-#define C_00000002 0x42
-#define C_00000003 0x43
-#define C_00000004 0x44
-#define C_00000008 0x45
-#define C_00000010 0x46
-#define C_00000020 0x47
-#define C_00000100 0x48
-#define C_00010000 0x49
-#define C_00080000 0x4a
-#define C_10000000 0x4b
-#define C_20000000 0x4c
-#define C_40000000 0x4d
-#define C_80000000 0x4e
-#define C_7fffffff 0x4f
-#define C_ffffffff 0x50
-#define C_fffffffe 0x51
-#define C_c0000000 0x52
-#define C_4f1bbcdc 0x53
-#define C_5a7ef9db 0x54
-#define C_00100000 0x55 /* ?? */
-#define GPR_ACCU 0x56 /* ACCUM, accumulator */
-#define GPR_COND 0x57 /* CCR, condition register */
-#define GPR_NOISE0 0x58 /* noise source */
-#define GPR_NOISE1 0x59 /* noise source */
-#define GPR_IRQ 0x5a /* IRQ register */
-#define GPR_DBAC 0x5b /* TRAM Delay Base Address Counter */
-#define GPR(x) (FXGPREGBASE + (x)) /* free GPRs: x = 0x00 - 0xff */
-#define ITRAM_DATA(x) (TANKMEMDATAREGBASE + 0x00 + (x)) /* x = 0x00 - 0x7f */
-#define ETRAM_DATA(x) (TANKMEMDATAREGBASE + 0x80 + (x)) /* x = 0x00 - 0x1f */
-#define ITRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x00 + (x)) /* x = 0x00 - 0x7f */
-#define ETRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x80 + (x)) /* x = 0x00 - 0x1f */
-
-#define A_ITRAM_DATA(x) (TANKMEMDATAREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
-#define A_ETRAM_DATA(x) (TANKMEMDATAREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
-#define A_ITRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
-#define A_ETRAM_ADDR(x) (TANKMEMADDRREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
-#define A_ITRAM_CTL(x) (A_TANKMEMCTLREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
-#define A_ETRAM_CTL(x) (A_TANKMEMCTLREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
-
-#define A_FXBUS(x) (0x00 + (x)) /* x = 0x00 - 0x3f FX buses */
-#define A_EXTIN(x) (0x40 + (x)) /* x = 0x00 - 0x0f physical ins */
-#define A_P16VIN(x) (0x50 + (x)) /* x = 0x00 - 0x0f p16v ins (A2 only) "EMU32 inputs" */
-#define A_EXTOUT(x) (0x60 + (x)) /* x = 0x00 - 0x1f physical outs -> A_FXWC1 0x79-7f unknown */
-#define A_FXBUS2(x) (0x80 + (x)) /* x = 0x00 - 0x1f extra outs used for EFX capture -> A_FXWC2 */
-#define A_EMU32OUTH(x) (0xa0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_10 - _1F" - ??? */
-#define A_EMU32OUTL(x) (0xb0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_1 - _F" - ??? */
-#define A3_EMU32IN(x) (0x160 + (x)) /* x = 0x00 - 0x3f "EMU32_IN_00 - _3F" - Only when .device = 0x0008 */
-#define A3_EMU32OUT(x) (0x1E0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_00 - _3F" - Only when .device = 0x0008 */
-#define A_GPR(x) (A_FXGPREGBASE + (x))
-
-/* cc_reg constants */
-#define CC_REG_NORMALIZED C_00000001
-#define CC_REG_BORROW C_00000002
-#define CC_REG_MINUS C_00000004
-#define CC_REG_ZERO C_00000008
-#define CC_REG_SATURATE C_00000010
-#define CC_REG_NONZERO C_00000100
-
-/* FX buses */
-#define FXBUS_PCM_LEFT 0x00
-#define FXBUS_PCM_RIGHT 0x01
-#define FXBUS_PCM_LEFT_REAR 0x02
-#define FXBUS_PCM_RIGHT_REAR 0x03
-#define FXBUS_MIDI_LEFT 0x04
-#define FXBUS_MIDI_RIGHT 0x05
-#define FXBUS_PCM_CENTER 0x06
-#define FXBUS_PCM_LFE 0x07
-#define FXBUS_PCM_LEFT_FRONT 0x08
-#define FXBUS_PCM_RIGHT_FRONT 0x09
-#define FXBUS_MIDI_REVERB 0x0c
-#define FXBUS_MIDI_CHORUS 0x0d
-#define FXBUS_PCM_LEFT_SIDE 0x0e
-#define FXBUS_PCM_RIGHT_SIDE 0x0f
-#define FXBUS_PT_LEFT 0x14
-#define FXBUS_PT_RIGHT 0x15
-
-/* Inputs */
-#define EXTIN_AC97_L 0x00 /* AC'97 capture channel - left */
-#define EXTIN_AC97_R 0x01 /* AC'97 capture channel - right */
-#define EXTIN_SPDIF_CD_L 0x02 /* internal S/PDIF CD - onboard - left */
-#define EXTIN_SPDIF_CD_R 0x03 /* internal S/PDIF CD - onboard - right */
-#define EXTIN_ZOOM_L 0x04 /* Zoom Video I2S - left */
-#define EXTIN_ZOOM_R 0x05 /* Zoom Video I2S - right */
-#define EXTIN_TOSLINK_L 0x06 /* LiveDrive - TOSLink Optical - left */
-#define EXTIN_TOSLINK_R 0x07 /* LiveDrive - TOSLink Optical - right */
-#define EXTIN_LINE1_L 0x08 /* LiveDrive - Line/Mic 1 - left */
-#define EXTIN_LINE1_R 0x09 /* LiveDrive - Line/Mic 1 - right */
-#define EXTIN_COAX_SPDIF_L 0x0a /* LiveDrive - Coaxial S/PDIF - left */
-#define EXTIN_COAX_SPDIF_R 0x0b /* LiveDrive - Coaxial S/PDIF - right */
-#define EXTIN_LINE2_L 0x0c /* LiveDrive - Line/Mic 2 - left */
-#define EXTIN_LINE2_R 0x0d /* LiveDrive - Line/Mic 2 - right */
-
-/* Outputs */
-#define EXTOUT_AC97_L 0x00 /* AC'97 playback channel - left */
-#define EXTOUT_AC97_R 0x01 /* AC'97 playback channel - right */
-#define EXTOUT_TOSLINK_L 0x02 /* LiveDrive - TOSLink Optical - left */
-#define EXTOUT_TOSLINK_R 0x03 /* LiveDrive - TOSLink Optical - right */
-#define EXTOUT_AC97_CENTER 0x04 /* SB Live 5.1 - center */
-#define EXTOUT_AC97_LFE 0x05 /* SB Live 5.1 - LFE */
-#define EXTOUT_HEADPHONE_L 0x06 /* LiveDrive - Headphone - left */
-#define EXTOUT_HEADPHONE_R 0x07 /* LiveDrive - Headphone - right */
-#define EXTOUT_REAR_L 0x08 /* Rear channel - left */
-#define EXTOUT_REAR_R 0x09 /* Rear channel - right */
-#define EXTOUT_ADC_CAP_L 0x0a /* ADC Capture buffer - left */
-#define EXTOUT_ADC_CAP_R 0x0b /* ADC Capture buffer - right */
-#define EXTOUT_MIC_CAP 0x0c /* MIC Capture buffer */
-#define EXTOUT_AC97_REAR_L 0x0d /* SB Live 5.1 (c) 2003 - Rear Left */
-#define EXTOUT_AC97_REAR_R 0x0e /* SB Live 5.1 (c) 2003 - Rear Right */
-#define EXTOUT_ACENTER 0x11 /* Analog Center */
-#define EXTOUT_ALFE 0x12 /* Analog LFE */
-
-/* Audigy Inputs */
-#define A_EXTIN_AC97_L 0x00 /* AC'97 capture channel - left */
-#define A_EXTIN_AC97_R 0x01 /* AC'97 capture channel - right */
-#define A_EXTIN_SPDIF_CD_L 0x02 /* digital CD left */
-#define A_EXTIN_SPDIF_CD_R 0x03 /* digital CD left */
-#define A_EXTIN_OPT_SPDIF_L 0x04 /* audigy drive Optical SPDIF - left */
-#define A_EXTIN_OPT_SPDIF_R 0x05 /* right */
-#define A_EXTIN_LINE2_L 0x08 /* audigy drive line2/mic2 - left */
-#define A_EXTIN_LINE2_R 0x09 /* right */
-#define A_EXTIN_ADC_L 0x0a /* Philips ADC - left */
-#define A_EXTIN_ADC_R 0x0b /* right */
-#define A_EXTIN_AUX2_L 0x0c /* audigy drive aux2 - left */
-#define A_EXTIN_AUX2_R 0x0d /* - right */
-
-/* Audigiy Outputs */
-#define A_EXTOUT_FRONT_L 0x00 /* digital front left */
-#define A_EXTOUT_FRONT_R 0x01 /* right */
-#define A_EXTOUT_CENTER 0x02 /* digital front center */
-#define A_EXTOUT_LFE 0x03 /* digital front lfe */
-#define A_EXTOUT_HEADPHONE_L 0x04 /* headphone audigy drive left */
-#define A_EXTOUT_HEADPHONE_R 0x05 /* right */
-#define A_EXTOUT_REAR_L 0x06 /* digital rear left */
-#define A_EXTOUT_REAR_R 0x07 /* right */
-#define A_EXTOUT_AFRONT_L 0x08 /* analog front left */
-#define A_EXTOUT_AFRONT_R 0x09 /* right */
-#define A_EXTOUT_ACENTER 0x0a /* analog center */
-#define A_EXTOUT_ALFE 0x0b /* analog LFE */
-#define A_EXTOUT_ASIDE_L 0x0c /* analog side left - Audigy 2 ZS */
-#define A_EXTOUT_ASIDE_R 0x0d /* right - Audigy 2 ZS */
-#define A_EXTOUT_AREAR_L 0x0e /* analog rear left */
-#define A_EXTOUT_AREAR_R 0x0f /* right */
-#define A_EXTOUT_AC97_L 0x10 /* AC97 left (front) */
-#define A_EXTOUT_AC97_R 0x11 /* right */
-#define A_EXTOUT_ADC_CAP_L 0x16 /* ADC capture buffer left */
-#define A_EXTOUT_ADC_CAP_R 0x17 /* right */
-#define A_EXTOUT_MIC_CAP 0x18 /* Mic capture buffer */
-
-/* Audigy constants */
-#define A_C_00000000 0xc0
-#define A_C_00000001 0xc1
-#define A_C_00000002 0xc2
-#define A_C_00000003 0xc3
-#define A_C_00000004 0xc4
-#define A_C_00000008 0xc5
-#define A_C_00000010 0xc6
-#define A_C_00000020 0xc7
-#define A_C_00000100 0xc8
-#define A_C_00010000 0xc9
-#define A_C_00000800 0xca
-#define A_C_10000000 0xcb
-#define A_C_20000000 0xcc
-#define A_C_40000000 0xcd
-#define A_C_80000000 0xce
-#define A_C_7fffffff 0xcf
-#define A_C_ffffffff 0xd0
-#define A_C_fffffffe 0xd1
-#define A_C_c0000000 0xd2
-#define A_C_4f1bbcdc 0xd3
-#define A_C_5a7ef9db 0xd4
-#define A_C_00100000 0xd5
-#define A_GPR_ACCU 0xd6 /* ACCUM, accumulator */
-#define A_GPR_COND 0xd7 /* CCR, condition register */
-#define A_GPR_NOISE0 0xd8 /* noise source */
-#define A_GPR_NOISE1 0xd9 /* noise source */
-#define A_GPR_IRQ 0xda /* IRQ register */
-#define A_GPR_DBAC 0xdb /* TRAM Delay Base Address Counter - internal */
-#define A_GPR_DBACE 0xde /* TRAM Delay Base Address Counter - external */
-
-/* definitions for debug register */
-#define EMU10K1_DBG_ZC 0x80000000 /* zero tram counter */
-#define EMU10K1_DBG_SATURATION_OCCURED 0x02000000 /* saturation control */
-#define EMU10K1_DBG_SATURATION_ADDR 0x01ff0000 /* saturation address */
-#define EMU10K1_DBG_SINGLE_STEP 0x00008000 /* single step mode */
-#define EMU10K1_DBG_STEP 0x00004000 /* start single step */
-#define EMU10K1_DBG_CONDITION_CODE 0x00003e00 /* condition code */
-#define EMU10K1_DBG_SINGLE_STEP_ADDR 0x000001ff /* single step address */
-
-/* tank memory address line */
-#ifndef __KERNEL__
-#define TANKMEMADDRREG_ADDR_MASK 0x000fffff /* 20 bit tank address field */
-#define TANKMEMADDRREG_CLEAR 0x00800000 /* Clear tank memory */
-#define TANKMEMADDRREG_ALIGN 0x00400000 /* Align read or write relative to tank access */
-#define TANKMEMADDRREG_WRITE 0x00200000 /* Write to tank memory */
-#define TANKMEMADDRREG_READ 0x00100000 /* Read from tank memory */
-#endif
-
-struct snd_emu10k1_fx8010_info {
- unsigned int internal_tram_size; /* in samples */
- unsigned int external_tram_size; /* in samples */
- char fxbus_names[16][32]; /* names of FXBUSes */
- char extin_names[16][32]; /* names of external inputs */
- char extout_names[32][32]; /* names of external outputs */
- unsigned int gpr_controls; /* count of GPR controls */
-};
-
-#define EMU10K1_GPR_TRANSLATION_NONE 0
-#define EMU10K1_GPR_TRANSLATION_TABLE100 1
-#define EMU10K1_GPR_TRANSLATION_BASS 2
-#define EMU10K1_GPR_TRANSLATION_TREBLE 3
-#define EMU10K1_GPR_TRANSLATION_ONOFF 4
-
-struct snd_emu10k1_fx8010_control_gpr {
- struct snd_ctl_elem_id id; /* full control ID definition */
- unsigned int vcount; /* visible count */
- unsigned int count; /* count of GPR (1..16) */
- unsigned short gpr[32]; /* GPR number(s) */
- unsigned int value[32]; /* initial values */
- unsigned int min; /* minimum range */
- unsigned int max; /* maximum range */
- unsigned int translation; /* translation type (EMU10K1_GPR_TRANSLATION*) */
- const unsigned int *tlv;
-};
-
-/* old ABI without TLV support */
-struct snd_emu10k1_fx8010_control_old_gpr {
- struct snd_ctl_elem_id id;
- unsigned int vcount;
- unsigned int count;
- unsigned short gpr[32];
- unsigned int value[32];
- unsigned int min;
- unsigned int max;
- unsigned int translation;
-};
-
-struct snd_emu10k1_fx8010_code {
- char name[128];
-
- DECLARE_BITMAP(gpr_valid, 0x200); /* bitmask of valid initializers */
- __u32 __user *gpr_map; /* initializers */
-
- unsigned int gpr_add_control_count; /* count of GPR controls to add/replace */
- struct snd_emu10k1_fx8010_control_gpr __user *gpr_add_controls; /* GPR controls to add/replace */
-
- unsigned int gpr_del_control_count; /* count of GPR controls to remove */
- struct snd_ctl_elem_id __user *gpr_del_controls; /* IDs of GPR controls to remove */
-
- unsigned int gpr_list_control_count; /* count of GPR controls to list */
- unsigned int gpr_list_control_total; /* total count of GPR controls */
- struct snd_emu10k1_fx8010_control_gpr __user *gpr_list_controls; /* listed GPR controls */
-
- DECLARE_BITMAP(tram_valid, 0x100); /* bitmask of valid initializers */
- __u32 __user *tram_data_map; /* data initializers */
- __u32 __user *tram_addr_map; /* map initializers */
-
- DECLARE_BITMAP(code_valid, 1024); /* bitmask of valid instructions */
- __u32 __user *code; /* one instruction - 64 bits */
-};
-
-struct snd_emu10k1_fx8010_tram {
- unsigned int address; /* 31.bit == 1 -> external TRAM */
- unsigned int size; /* size in samples (4 bytes) */
- unsigned int *samples; /* pointer to samples (20-bit) */
- /* NULL->clear memory */
-};
-
-struct snd_emu10k1_fx8010_pcm_rec {
- unsigned int substream; /* substream number */
- unsigned int res1; /* reserved */
- unsigned int channels; /* 16-bit channels count, zero = remove this substream */
- unsigned int tram_start; /* ring buffer position in TRAM (in samples) */
- unsigned int buffer_size; /* count of buffered samples */
- unsigned short gpr_size; /* GPR containing size of ringbuffer in samples (host) */
- unsigned short gpr_ptr; /* GPR containing current pointer in the ring buffer (host = reset, FX8010) */
- unsigned short gpr_count; /* GPR containing count of samples between two interrupts (host) */
- unsigned short gpr_tmpcount; /* GPR containing current count of samples to interrupt (host = set, FX8010) */
- unsigned short gpr_trigger; /* GPR containing trigger (activate) information (host) */
- unsigned short gpr_running; /* GPR containing info if PCM is running (FX8010) */
- unsigned char pad; /* reserved */
- unsigned char etram[32]; /* external TRAM address & data (one per channel) */
- unsigned int res2; /* reserved */
-};
-
-#define SNDRV_EMU10K1_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 1)
-
-#define SNDRV_EMU10K1_IOCTL_INFO _IOR ('H', 0x10, struct snd_emu10k1_fx8010_info)
-#define SNDRV_EMU10K1_IOCTL_CODE_POKE _IOW ('H', 0x11, struct snd_emu10k1_fx8010_code)
-#define SNDRV_EMU10K1_IOCTL_CODE_PEEK _IOWR('H', 0x12, struct snd_emu10k1_fx8010_code)
-#define SNDRV_EMU10K1_IOCTL_TRAM_SETUP _IOW ('H', 0x20, int)
-#define SNDRV_EMU10K1_IOCTL_TRAM_POKE _IOW ('H', 0x21, struct snd_emu10k1_fx8010_tram)
-#define SNDRV_EMU10K1_IOCTL_TRAM_PEEK _IOWR('H', 0x22, struct snd_emu10k1_fx8010_tram)
-#define SNDRV_EMU10K1_IOCTL_PCM_POKE _IOW ('H', 0x30, struct snd_emu10k1_fx8010_pcm_rec)
-#define SNDRV_EMU10K1_IOCTL_PCM_PEEK _IOWR('H', 0x31, struct snd_emu10k1_fx8010_pcm_rec)
-#define SNDRV_EMU10K1_IOCTL_PVERSION _IOR ('H', 0x40, int)
-#define SNDRV_EMU10K1_IOCTL_STOP _IO ('H', 0x80)
-#define SNDRV_EMU10K1_IOCTL_CONTINUE _IO ('H', 0x81)
-#define SNDRV_EMU10K1_IOCTL_ZERO_TRAM_COUNTER _IO ('H', 0x82)
-#define SNDRV_EMU10K1_IOCTL_SINGLE_STEP _IOW ('H', 0x83, int)
-#define SNDRV_EMU10K1_IOCTL_DBG_READ _IOR ('H', 0x84, int)
-
-/* typedefs for compatibility to user-space */
-typedef struct snd_emu10k1_fx8010_info emu10k1_fx8010_info_t;
-typedef struct snd_emu10k1_fx8010_control_gpr emu10k1_fx8010_control_gpr_t;
-typedef struct snd_emu10k1_fx8010_code emu10k1_fx8010_code_t;
-typedef struct snd_emu10k1_fx8010_tram emu10k1_fx8010_tram_t;
-typedef struct snd_emu10k1_fx8010_pcm_rec emu10k1_fx8010_pcm_t;
-
#endif /* __SOUND_EMU10K1_H */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6268a4192d5..45c1981c9ca 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -71,6 +71,8 @@ struct snd_pcm_ops {
int (*prepare)(struct snd_pcm_substream *substream);
int (*trigger)(struct snd_pcm_substream *substream, int cmd);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream);
+ int (*wall_clock)(struct snd_pcm_substream *substream,
+ struct timespec *audio_ts);
int (*copy)(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos,
void __user *buf, snd_pcm_uframes_t count);
@@ -281,6 +283,7 @@ struct snd_pcm_runtime {
unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */
unsigned long hw_ptr_buffer_jiffies; /* buffer time in jiffies */
snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */
+ u64 hw_ptr_wrap; /* offset for hw_ptr due to boundary wrap-around */
/* -- HW params -- */
snd_pcm_access_t access; /* access mode */
diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
index 7e950560e59..c7c7788005e 100644
--- a/include/sound/sb16_csp.h
+++ b/include/sound/sb16_csp.h
@@ -1,6 +1,3 @@
-#ifndef __SOUND_SB16_CSP_H
-#define __SOUND_SB16_CSP_H
-
/*
* Copyright (c) 1999 by Uros Bizjak <uros@kss-loka.si>
* Takashi Iwai <tiwai@suse.de>
@@ -22,106 +19,13 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
+#ifndef __SOUND_SB16_CSP_H
+#define __SOUND_SB16_CSP_H
-/* CSP modes */
-#define SNDRV_SB_CSP_MODE_NONE 0x00
-#define SNDRV_SB_CSP_MODE_DSP_READ 0x01 /* Record from DSP */
-#define SNDRV_SB_CSP_MODE_DSP_WRITE 0x02 /* Play to DSP */
-#define SNDRV_SB_CSP_MODE_QSOUND 0x04 /* QSound */
-
-/* CSP load flags */
-#define SNDRV_SB_CSP_LOAD_FROMUSER 0x01
-#define SNDRV_SB_CSP_LOAD_INITBLOCK 0x02
-
-/* CSP sample width */
-#define SNDRV_SB_CSP_SAMPLE_8BIT 0x01
-#define SNDRV_SB_CSP_SAMPLE_16BIT 0x02
-
-/* CSP channels */
-#define SNDRV_SB_CSP_MONO 0x01
-#define SNDRV_SB_CSP_STEREO 0x02
-
-/* CSP rates */
-#define SNDRV_SB_CSP_RATE_8000 0x01
-#define SNDRV_SB_CSP_RATE_11025 0x02
-#define SNDRV_SB_CSP_RATE_22050 0x04
-#define SNDRV_SB_CSP_RATE_44100 0x08
-#define SNDRV_SB_CSP_RATE_ALL 0x0f
-
-/* CSP running state */
-#define SNDRV_SB_CSP_ST_IDLE 0x00
-#define SNDRV_SB_CSP_ST_LOADED 0x01
-#define SNDRV_SB_CSP_ST_RUNNING 0x02
-#define SNDRV_SB_CSP_ST_PAUSED 0x04
-#define SNDRV_SB_CSP_ST_AUTO 0x08
-#define SNDRV_SB_CSP_ST_QSOUND 0x10
-
-/* maximum QSound value (180 degrees right) */
-#define SNDRV_SB_CSP_QSOUND_MAX_RIGHT 0x20
-
-/* maximum microcode RIFF file size */
-#define SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE 0x3000
-
-/* microcode header */
-struct snd_sb_csp_mc_header {
- char codec_name[16]; /* id name of codec */
- unsigned short func_req; /* requested function */
-};
-
-/* microcode to be loaded */
-struct snd_sb_csp_microcode {
- struct snd_sb_csp_mc_header info;
- unsigned char data[SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE];
-};
-
-/* start CSP with sample_width in mono/stereo */
-struct snd_sb_csp_start {
- int sample_width; /* sample width, look above */
- int channels; /* channels, look above */
-};
-
-/* CSP information */
-struct snd_sb_csp_info {
- char codec_name[16]; /* id name of codec */
- unsigned short func_nr; /* function number */
- unsigned int acc_format; /* accepted PCM formats */
- unsigned short acc_channels; /* accepted channels */
- unsigned short acc_width; /* accepted sample width */
- unsigned short acc_rates; /* accepted sample rates */
- unsigned short csp_mode; /* CSP mode, see above */
- unsigned short run_channels; /* current channels */
- unsigned short run_width; /* current sample width */
- unsigned short version; /* version id: 0x10 - 0x1f */
- unsigned short state; /* state bits */
-};
-
-/* HWDEP controls */
-/* get CSP information */
-#define SNDRV_SB_CSP_IOCTL_INFO _IOR('H', 0x10, struct snd_sb_csp_info)
-/* load microcode to CSP */
-/* NOTE: struct snd_sb_csp_microcode overflows the max size (13 bits)
- * defined for some architectures like MIPS, and it leads to build errors.
- * (x86 and co have 14-bit size, thus it's valid, though.)
- * As a workaround for skipping the size-limit check, here we don't use the
- * normal _IOW() macro but _IOC() with the manual argument.
- */
-#define SNDRV_SB_CSP_IOCTL_LOAD_CODE \
- _IOC(_IOC_WRITE, 'H', 0x11, sizeof(struct snd_sb_csp_microcode))
-/* unload microcode from CSP */
-#define SNDRV_SB_CSP_IOCTL_UNLOAD_CODE _IO('H', 0x12)
-/* start CSP */
-#define SNDRV_SB_CSP_IOCTL_START _IOW('H', 0x13, struct snd_sb_csp_start)
-/* stop CSP */
-#define SNDRV_SB_CSP_IOCTL_STOP _IO('H', 0x14)
-/* pause CSP and DMA transfer */
-#define SNDRV_SB_CSP_IOCTL_PAUSE _IO('H', 0x15)
-/* restart CSP and DMA transfer */
-#define SNDRV_SB_CSP_IOCTL_RESTART _IO('H', 0x16)
-
-#ifdef __KERNEL__
#include <sound/sb.h>
#include <sound/hwdep.h>
#include <linux/firmware.h>
+#include <uapi/sound/sb16_csp.h>
struct snd_sb_csp;
@@ -183,6 +87,4 @@ struct snd_sb_csp {
};
int snd_sb_csp_new(struct snd_sb *chip, int device, struct snd_hwdep ** rhwdep);
-#endif
-
#endif /* __SOUND_SB16_CSP */
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
index 906010344dd..cc1c919c643 100644
--- a/include/sound/sh_fsi.h
+++ b/include/sound/sh_fsi.h
@@ -26,6 +26,7 @@
* A: inversion
* B: format mode
* C: chip specific
+ * D: clock selecter if master mode
*/
/* A: clock inversion */
@@ -44,6 +45,11 @@
#define SH_FSI_OPTION_MASK 0x00000F00
#define SH_FSI_ENABLE_STREAM_MODE (1 << 8) /* for 16bit data */
+/* D: clock selecter if master mode */
+#define SH_FSI_CLK_MASK 0x0000F000
+#define SH_FSI_CLK_EXTERNAL (0 << 12)
+#define SH_FSI_CLK_CPG (1 << 12) /* FSIxCK + FSI-DIV */
+
/*
* set_rate return value
*
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 628db7bca4f..3953cea0ecf 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -242,7 +242,6 @@ struct snd_soc_dai {
unsigned int symmetric_rates:1;
struct snd_pcm_runtime *runtime;
unsigned int active;
- unsigned char pop_wait:1;
unsigned char probed:1;
struct snd_soc_dapm_widget *playback_widget;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 91244a096c1..769e27c774a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1039,6 +1039,7 @@ struct snd_soc_pcm_runtime {
struct snd_soc_dpcm_runtime dpcm[2];
long pmdown_time;
+ unsigned char pop_wait:1;
/* runtime devices */
struct snd_pcm *pcm;
diff --git a/include/sound/tlv320aic32x4.h b/include/sound/tlv320aic32x4.h
index c009f70b402..24e5d991f14 100644
--- a/include/sound/tlv320aic32x4.h
+++ b/include/sound/tlv320aic32x4.h
@@ -26,6 +26,7 @@ struct aic32x4_pdata {
u32 power_cfg;
u32 micpga_routing;
bool swapdacs;
+ int rstn_gpio;
};
#endif
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index 4f67c762cd7..f634f8f85db 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -27,12 +27,6 @@
#include <sound/hwdep.h>
#include <linux/interrupt.h>
-#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
-#if !defined(CONFIG_USE_VXLOADER) && !defined(CONFIG_SND_VX_LIB) /* built-in kernel */
-#define SND_VX_FW_LOADER /* use the standard firmware loader */
-#endif
-#endif
-
struct firmware;
struct device;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 2acd54018b6..507910992c5 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -9,6 +9,8 @@ struct se_subsystem_api {
struct list_head sub_api_list;
char name[16];
+ char inquiry_prod[16];
+ char inquiry_rev[4];
struct module *owner;
u8 transport_type;
@@ -16,46 +18,45 @@ struct se_subsystem_api {
int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *);
int (*pmode_enable_hba)(struct se_hba *, unsigned long);
- void *(*allocate_virtdevice)(struct se_hba *, const char *);
- struct se_device *(*create_virtdevice)(struct se_hba *,
- struct se_subsystem_dev *, void *);
- void (*free_device)(void *);
+
+ struct se_device *(*alloc_device)(struct se_hba *, const char *);
+ int (*configure_device)(struct se_device *);
+ void (*free_device)(struct se_device *device);
+
+ ssize_t (*set_configfs_dev_params)(struct se_device *,
+ const char *, ssize_t);
+ ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
+
void (*transport_complete)(struct se_cmd *cmd,
struct scatterlist *,
unsigned char *);
- int (*parse_cdb)(struct se_cmd *cmd);
- ssize_t (*check_configfs_dev_params)(struct se_hba *,
- struct se_subsystem_dev *);
- ssize_t (*set_configfs_dev_params)(struct se_hba *,
- struct se_subsystem_dev *, const char *, ssize_t);
- ssize_t (*show_configfs_dev_params)(struct se_hba *,
- struct se_subsystem_dev *, char *);
- u32 (*get_device_rev)(struct se_device *);
+ sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_cmd *);
};
-struct spc_ops {
- int (*execute_rw)(struct se_cmd *cmd);
- int (*execute_sync_cache)(struct se_cmd *cmd);
- int (*execute_write_same)(struct se_cmd *cmd);
- int (*execute_unmap)(struct se_cmd *cmd);
+struct sbc_ops {
+ sense_reason_t (*execute_rw)(struct se_cmd *cmd);
+ sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
+ sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
+ sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd);
+ sense_reason_t (*execute_unmap)(struct se_cmd *cmd);
};
int transport_subsystem_register(struct se_subsystem_api *);
void transport_subsystem_release(struct se_subsystem_api *);
-struct se_device *transport_add_device_to_core_hba(struct se_hba *,
- struct se_subsystem_api *, struct se_subsystem_dev *, u32,
- void *, struct se_dev_limits *, const char *, const char *);
-
void target_complete_cmd(struct se_cmd *, u8);
-int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops);
-int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
-int spc_get_write_same_sectors(struct se_cmd *cmd);
+sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
+sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
+sector_t spc_get_write_same_sectors(struct se_cmd *cmd);
+
+sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
+u32 sbc_get_device_rev(struct se_device *dev);
+u32 sbc_get_device_type(struct se_device *dev);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5be89373cea..7cae2360221 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -62,20 +62,6 @@
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
-/*
- * struct se_subsystem_dev->su_dev_flags
-*/
-#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
-#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
-#define SDF_USING_UDEV_PATH 0x00000004
-#define SDF_USING_ALIAS 0x00000008
-
-/*
- * struct se_device->dev_flags
- */
-#define DF_SPC2_RESERVATIONS 0x00000001
-#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000002
-
/* struct se_dev_attrib sanity values */
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
@@ -85,6 +71,8 @@
#define DA_UNMAP_GRANULARITY_DEFAULT 0
/* Default unmap_granularity_alignment */
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
+/* Default max_write_same_len, disabled by default */
+#define DA_MAX_WRITE_SAME_LEN 0
/* Default max transfer length */
#define DA_FABRIC_MAX_SECTORS 8192
/* Emulation for Direct Page Out */
@@ -107,8 +95,6 @@
*/
#define DA_EMULATE_TPWS 0
/* No Emulation for PSCSI by default */
-#define DA_EMULATE_RESERVATIONS 0
-/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
@@ -160,8 +146,6 @@ enum se_cmd_flags_table {
SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_CDB = 0x00000008,
SCF_SCSI_TMR_CDB = 0x00000010,
- SCF_SCSI_CDB_EXCEPTION = 0x00000020,
- SCF_SCSI_RESERVATION_CONFLICT = 0x00000040,
SCF_FUA = 0x00000080,
SCF_SE_LUN_CMD = 0x00000100,
SCF_BIDI = 0x00000400,
@@ -182,38 +166,33 @@ enum transport_lunflags_table {
TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
};
-/* struct se_device->dev_status */
-enum transport_device_status_table {
- TRANSPORT_DEVICE_ACTIVATED = 0x01,
- TRANSPORT_DEVICE_DEACTIVATED = 0x02,
- TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
- TRANSPORT_DEVICE_SHUTDOWN = 0x08,
- TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
- TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
-};
-
/*
- * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
+ * Used by transport_send_check_condition_and_sense()
* to signal which ASC/ASCQ sense payload should be built.
*/
+typedef unsigned __bitwise__ sense_reason_t;
+
enum tcm_sense_reason_table {
- TCM_NON_EXISTENT_LUN = 0x01,
- TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
- TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
- TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
- TCM_SERVICE_CRC_ERROR = 0x05,
- TCM_SNACK_REJECTED = 0x06,
- TCM_SECTOR_COUNT_TOO_MANY = 0x07,
- TCM_INVALID_CDB_FIELD = 0x08,
- TCM_INVALID_PARAMETER_LIST = 0x09,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
- TCM_UNKNOWN_MODE_PAGE = 0x0b,
- TCM_WRITE_PROTECTED = 0x0c,
- TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
- TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
- TCM_CHECK_CONDITION_NOT_READY = 0x0f,
- TCM_RESERVATION_CONFLICT = 0x10,
- TCM_ADDRESS_OUT_OF_RANGE = 0x11,
+#define R(x) (__force sense_reason_t )(x)
+ TCM_NON_EXISTENT_LUN = R(0x01),
+ TCM_UNSUPPORTED_SCSI_OPCODE = R(0x02),
+ TCM_INCORRECT_AMOUNT_OF_DATA = R(0x03),
+ TCM_UNEXPECTED_UNSOLICITED_DATA = R(0x04),
+ TCM_SERVICE_CRC_ERROR = R(0x05),
+ TCM_SNACK_REJECTED = R(0x06),
+ TCM_SECTOR_COUNT_TOO_MANY = R(0x07),
+ TCM_INVALID_CDB_FIELD = R(0x08),
+ TCM_INVALID_PARAMETER_LIST = R(0x09),
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = R(0x0a),
+ TCM_UNKNOWN_MODE_PAGE = R(0x0b),
+ TCM_WRITE_PROTECTED = R(0x0c),
+ TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d),
+ TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e),
+ TCM_CHECK_CONDITION_NOT_READY = R(0x0f),
+ TCM_RESERVATION_CONFLICT = R(0x10),
+ TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
+ TCM_OUT_OF_RESOURCES = R(0x12),
+#undef R
};
enum target_sc_flags_table {
@@ -246,30 +225,6 @@ enum tcm_tmrsp_table {
TMR_FUNCTION_REJECTED = 255,
};
-struct se_obj {
- atomic_t obj_access_count;
-};
-
-/*
- * Used by TCM Core internally to signal if ALUA emulation is enabled or
- * disabled, or running in with TCM/pSCSI passthrough mode
- */
-typedef enum {
- SPC_ALUA_PASSTHROUGH,
- SPC2_ALUA_DISABLED,
- SPC3_ALUA_EMULATED
-} t10_alua_index_t;
-
-/*
- * Used by TCM Core internally to signal if SAM Task Attribute emulation
- * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
- */
-typedef enum {
- SAM_TASK_ATTR_PASSTHROUGH,
- SAM_TASK_ATTR_UNTAGGED,
- SAM_TASK_ATTR_EMULATED
-} t10_task_attr_index_t;
-
/*
* Used for target SCSI statistics
*/
@@ -283,17 +238,15 @@ typedef enum {
struct se_cmd;
struct t10_alua {
- t10_alua_index_t alua_type;
/* ALUA Target Port Group ID */
u16 alua_tg_pt_gps_counter;
u32 alua_tg_pt_gps_count;
spinlock_t tg_pt_gps_lock;
- struct se_subsystem_dev *t10_sub_dev;
+ struct se_device *t10_dev;
/* Used for default ALUA Target Port Group */
struct t10_alua_tg_pt_gp *default_tg_pt_gp;
/* Used for default ALUA Target Port Group ConfigFS group */
struct config_group alua_tg_pt_gps_group;
- int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
struct list_head tg_pt_gps_list;
};
@@ -335,7 +288,7 @@ struct t10_alua_tg_pt_gp {
atomic_t tg_pt_gp_ref_cnt;
spinlock_t tg_pt_gp_lock;
struct mutex tg_pt_gp_md_mutex;
- struct se_subsystem_dev *tg_pt_gp_su_dev;
+ struct se_device *tg_pt_gp_dev;
struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_mem_list;
@@ -366,23 +319,11 @@ struct t10_wwn {
char revision[4];
char unit_serial[INQUIRY_VPD_SERIAL_LEN];
spinlock_t t10_vpd_lock;
- struct se_subsystem_dev *t10_sub_dev;
+ struct se_device *t10_dev;
struct config_group t10_wwn_group;
struct list_head t10_vpd_list;
};
-
-/*
- * Used by TCM Core internally to signal if >= SPC-3 persistent reservations
- * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
- * mode
- */
-typedef enum {
- SPC_PASSTHROUGH,
- SPC2_RESERVATIONS,
- SPC3_PERSISTENT_RESERVATIONS
-} t10_reservations_index_t;
-
struct t10_pr_registration {
/* Used for fabrics that contain WWN+ISID */
#define PR_REG_ISID_LEN 16
@@ -424,18 +365,6 @@ struct t10_pr_registration {
struct list_head pr_reg_atp_mem_list;
};
-/*
- * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
- * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
- * core_setup_reservations()
- */
-struct t10_reservation_ops {
- int (*t10_reservation_check)(struct se_cmd *, u32 *);
- int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
- int (*t10_pr_register)(struct se_cmd *);
- int (*t10_pr_clear)(struct se_cmd *);
-};
-
struct t10_reservation {
/* Reservation effects all target ports */
int pr_all_tg_pt;
@@ -446,7 +375,6 @@ struct t10_reservation {
#define PR_APTPL_BUF_LEN 8192
u32 pr_aptpl_buf_len;
u32 pr_generation;
- t10_reservations_index_t res_type;
spinlock_t registration_lock;
spinlock_t aptpl_reg_lock;
/*
@@ -462,7 +390,6 @@ struct t10_reservation {
struct se_node_acl *pr_res_holder;
struct list_head registration_list;
struct list_head aptpl_reg_list;
- struct t10_reservation_ops pr_ops;
};
struct se_tmr_req {
@@ -485,7 +412,6 @@ struct se_cmd {
u8 scsi_status;
u8 scsi_asc;
u8 scsi_ascq;
- u8 scsi_sense_reason;
u16 scsi_sense_length;
/* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay;
@@ -523,7 +449,7 @@ struct se_cmd {
struct completion cmd_wait_comp;
struct kref cmd_kref;
struct target_core_fabric_ops *se_tfo;
- int (*execute_cmd)(struct se_cmd *);
+ sense_reason_t (*execute_cmd)(struct se_cmd *);
void (*transport_complete_callback)(struct se_cmd *);
unsigned char *t_task_cdb;
@@ -581,6 +507,8 @@ struct se_node_acl {
bool acl_stop:1;
u32 queue_depth;
u32 acl_index;
+#define MAX_ACL_TAG_SIZE 64
+ char acl_tag[MAX_ACL_TAG_SIZE];
u64 num_cmds;
u64 read_bytes;
u64 write_bytes;
@@ -662,15 +590,6 @@ struct se_dev_entry {
struct list_head ua_list;
};
-struct se_dev_limits {
- /* Max supported HW queue depth */
- u32 hw_queue_depth;
- /* Max supported virtual queue depth */
- u32 queue_depth;
- /* From include/linux/blkdev.h for the other HW/SW limits. */
- struct queue_limits limits;
-};
-
struct se_dev_attrib {
int emulate_dpo;
int emulate_fua_write;
@@ -680,8 +599,6 @@ struct se_dev_attrib {
int emulate_tas;
int emulate_tpu;
int emulate_tpws;
- int emulate_reservations;
- int emulate_alua;
int enforce_pr_isids;
int is_nonrot;
int emulate_rest_reord;
@@ -696,7 +613,8 @@ struct se_dev_attrib {
u32 max_unmap_block_desc_count;
u32 unmap_granularity;
u32 unmap_granularity_alignment;
- struct se_subsystem_dev *da_sub_dev;
+ u32 max_write_same_len;
+ struct se_device *da_dev;
struct config_group da_group;
};
@@ -707,48 +625,25 @@ struct se_dev_stat_grps {
struct config_group scsi_lu_group;
};
-struct se_subsystem_dev {
-/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
-#define SE_DEV_ALIAS_LEN 512
- unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
-/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
-#define SE_UDEV_PATH_LEN 512
- unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
- u32 su_dev_flags;
- struct se_hba *se_dev_hba;
- struct se_device *se_dev_ptr;
- struct se_dev_attrib se_dev_attrib;
- /* T10 Asymmetric Logical Unit Assignment for Target Ports */
- struct t10_alua t10_alua;
- /* T10 Inquiry and VPD WWN Information */
- struct t10_wwn t10_wwn;
- /* T10 SPC-2 + SPC-3 Reservations */
- struct t10_reservation t10_pr;
- spinlock_t se_dev_lock;
- void *se_dev_su_ptr;
- struct config_group se_dev_group;
- /* For T10 Reservations */
- struct config_group se_dev_pr_group;
- /* For target_core_stat.c groups */
- struct se_dev_stat_grps dev_stat_grps;
-};
-
struct se_device {
+#define SE_DEV_LINK_MAGIC 0xfeeddeef
+ u32 dev_link_magic;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
u32 dev_cur_ordered_id;
u32 dev_flags;
+#define DF_CONFIGURED 0x00000001
+#define DF_FIRMWARE_VPD_UNIT_SERIAL 0x00000002
+#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
+#define DF_USING_UDEV_PATH 0x00000008
+#define DF_USING_ALIAS 0x00000010
u32 dev_port_count;
- /* See transport_device_status_table */
- u32 dev_status;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
u64 dev_res_bin_isid;
- t10_task_attr_index_t dev_task_attr_type;
/* Pointer to transport specific device structure */
- void *dev_ptr;
u32 dev_index;
u64 creation_time;
u32 num_resets;
@@ -761,13 +656,13 @@ struct se_device {
atomic_t dev_ordered_id;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
- struct se_obj dev_obj;
- struct se_obj dev_access_obj;
- struct se_obj dev_export_obj;
+ int export_count;
spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock;
- spinlock_t dev_status_lock;
+ unsigned int dev_reservation_flags;
+#define DRF_SPC2_RESERVATIONS 0x00000001
+#define DRF_SPC2_RESERVATIONS_WITH_ISID 0x00000002
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
@@ -786,7 +681,20 @@ struct se_device {
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
- struct se_subsystem_dev *se_sub_dev;
+ /* T10 Inquiry and VPD WWN Information */
+ struct t10_wwn t10_wwn;
+ /* T10 Asymmetric Logical Unit Assignment for Target Ports */
+ struct t10_alua t10_alua;
+ /* T10 SPC-2 + SPC-3 Reservations */
+ struct t10_reservation t10_pr;
+ struct se_dev_attrib dev_attrib;
+ struct config_group dev_group;
+ struct config_group dev_pr_group;
+ struct se_dev_stat_grps dev_stat_grps;
+#define SE_DEV_ALIAS_LEN 512 /* must be less than PAGE_SIZE */
+ unsigned char dev_alias[SE_DEV_ALIAS_LEN];
+#define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */
+ unsigned char udev_path[SE_UDEV_PATH_LEN];
/* Pointer to template of function pointers for transport */
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
@@ -803,8 +711,6 @@ struct se_hba {
u32 hba_index;
/* Pointer to transport specific host structure. */
void *hba_ptr;
- /* Linked list for struct se_device */
- struct list_head hba_dev_list;
struct list_head hba_node;
spinlock_t device_lock;
struct config_group hba_group;
@@ -820,6 +726,8 @@ struct se_port_stat_grps {
};
struct se_lun {
+#define SE_LUN_LINK_MAGIC 0xffff7771
+ u32 lun_link_magic;
/* See transport_lun_status_table */
enum transport_lun_status_table lun_status;
u32 lun_access;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 81ddb4ae6c3..aaa1ee6ab39 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -98,8 +98,8 @@ void transport_deregister_session(struct se_session *);
void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
-int transport_lookup_cmd_lun(struct se_cmd *, u32);
-int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
+sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
+sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
unsigned char *, unsigned char *, u32, u32, int, int, int,
struct scatterlist *, u32, struct scatterlist *, u32);
@@ -110,9 +110,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, unsigned int, int);
int transport_handle_cdb_direct(struct se_cmd *);
-int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
- struct scatterlist *, u32, struct scatterlist *, u32);
-int transport_generic_new_cmd(struct se_cmd *);
+sense_reason_t transport_generic_new_cmd(struct se_cmd *);
void target_execute_cmd(struct se_cmd *cmd);
@@ -120,7 +118,8 @@ void transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int);
-int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
+int transport_send_check_condition_and_sense(struct se_cmd *,
+ sense_reason_t, int);
int target_put_sess_cmd(struct se_session *, struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
@@ -131,7 +130,7 @@ int core_alua_check_nonop_delay(struct se_cmd *);
int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
-void transport_generic_request_failure(struct se_cmd *);
+void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
int transport_lookup_tmr_lun(struct se_cmd *, u32);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
@@ -143,6 +142,8 @@ int core_tpg_del_initiator_node_acl(struct se_portal_group *,
struct se_node_acl *, int);
int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
+int core_tpg_set_initiator_node_tag(struct se_portal_group *,
+ struct se_node_acl *, const char *);
int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
struct se_portal_group *, void *, int);
int core_tpg_deregister(struct se_portal_group *);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 54fab041b22..ea546a4e960 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -45,7 +45,8 @@ struct extent_buffer;
#define show_root_type(obj) \
obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
- (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
+ (obj >= BTRFS_ROOT_TREE_OBJECTID && \
+ obj <= BTRFS_CSUM_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
#define BTRFS_GROUP_FLAGS \
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d49b285385e..7e8c36bc708 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -15,6 +15,7 @@ struct ext4_inode_info;
struct mpage_da_data;
struct ext4_map_blocks;
struct ext4_extent;
+struct extent_status;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -450,7 +451,7 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
TP_ARGS(page)
);
-TRACE_EVENT(ext4_invalidatepage,
+DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
TP_PROTO(struct page *page, unsigned long offset),
TP_ARGS(page, offset),
@@ -476,6 +477,18 @@ TRACE_EVENT(ext4_invalidatepage,
(unsigned long) __entry->index, __entry->offset)
);
+DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
+ TP_PROTO(struct page *page, unsigned long offset),
+
+ TP_ARGS(page, offset)
+);
+
+DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
+ TP_PROTO(struct page *page, unsigned long offset),
+
+ TP_ARGS(page, offset)
+);
+
TRACE_EVENT(ext4_discard_blocks,
TP_PROTO(struct super_block *sb, unsigned long long blk,
unsigned long long count),
@@ -1519,10 +1532,9 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
);
DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
- ext4_fsblk_t pblk, unsigned int len, int ret),
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
- TP_ARGS(inode, lblk, pblk, len, ret),
+ TP_ARGS(inode, map, ret),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -1530,37 +1542,37 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
__field( ext4_fsblk_t, pblk )
__field( ext4_lblk_t, lblk )
__field( unsigned int, len )
+ __field( unsigned int, flags )
__field( int, ret )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->pblk = pblk;
- __entry->lblk = lblk;
- __entry->len = len;
+ __entry->pblk = map->m_pblk;
+ __entry->lblk = map->m_lblk;
+ __entry->len = map->m_len;
+ __entry->flags = map->m_flags;
__entry->ret = ret;
),
- TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
+ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u flags %x ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->pblk,
- __entry->len, __entry->ret)
+ __entry->len, __entry->flags, __entry->ret)
);
DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
- ext4_fsblk_t pblk, unsigned len, int ret),
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
- TP_ARGS(inode, lblk, pblk, len, ret)
+ TP_ARGS(inode, map, ret)
);
DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
- TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
- ext4_fsblk_t pblk, unsigned len, int ret),
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
- TP_ARGS(inode, lblk, pblk, len, ret)
+ TP_ARGS(inode, map, ret)
);
TRACE_EVENT(ext4_ext_load_extent,
@@ -1680,10 +1692,10 @@ DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
);
TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
- TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
unsigned int allocated, ext4_fsblk_t newblock),
- TP_ARGS(inode, map, allocated, newblock),
+ TP_ARGS(inode, map, flags, allocated, newblock),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -1699,7 +1711,7 @@ TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->flags = map->m_flags;
+ __entry->flags = flags;
__entry->lblk = map->m_lblk;
__entry->pblk = map->m_pblk;
__entry->len = map->m_len;
@@ -1707,7 +1719,7 @@ TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
__entry->newblk = newblock;
),
- TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+ TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %x "
"allocated %d newblock %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
@@ -2055,6 +2067,106 @@ TRACE_EVENT(ext4_ext_remove_space_done,
(unsigned short) __entry->eh_entries)
);
+TRACE_EVENT(ext4_es_insert_extent,
+ TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+
+ TP_ARGS(inode, start, len),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, start )
+ __field( loff_t, len )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->start, __entry->len)
+);
+
+TRACE_EVENT(ext4_es_remove_extent,
+ TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+
+ TP_ARGS(inode, start, len),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, start )
+ __field( loff_t, len )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->start, __entry->len)
+);
+
+TRACE_EVENT(ext4_es_find_extent_enter,
+ TP_PROTO(struct inode *inode, ext4_lblk_t start),
+
+ TP_ARGS(inode, start),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ext4_lblk_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start = start;
+ ),
+
+ TP_printk("dev %d,%d ino %lu start %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->start)
+);
+
+TRACE_EVENT(ext4_es_find_extent_exit,
+ TP_PROTO(struct inode *inode, struct extent_status *es,
+ ext4_lblk_t ret),
+
+ TP_ARGS(inode, es, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ext4_lblk_t, start )
+ __field( ext4_lblk_t, len )
+ __field( ext4_lblk_t, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start = es->start;
+ __entry->len = es->len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu es [%u/%u) ret %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->start, __entry->len, __entry->ret)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
index d6fd8e5b14b..1eddbf1557f 100644
--- a/include/trace/events/gfpflags.h
+++ b/include/trace/events/gfpflags.h
@@ -34,6 +34,7 @@
{(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
{(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
+ {(unsigned long)__GFP_KMEMCG, "GFP_KMEMCG"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
new file mode 100644
index 00000000000..ec2a6ccfd7e
--- /dev/null
+++ b/include/trace/events/migrate.h
@@ -0,0 +1,51 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM migrate
+
+#if !defined(_TRACE_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MIGRATE_H
+
+#define MIGRATE_MODE \
+ {MIGRATE_ASYNC, "MIGRATE_ASYNC"}, \
+ {MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT"}, \
+ {MIGRATE_SYNC, "MIGRATE_SYNC"}
+
+#define MIGRATE_REASON \
+ {MR_COMPACTION, "compaction"}, \
+ {MR_MEMORY_FAILURE, "memory_failure"}, \
+ {MR_MEMORY_HOTPLUG, "memory_hotplug"}, \
+ {MR_SYSCALL, "syscall_or_cpuset"}, \
+ {MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \
+ {MR_CMA, "cma"}
+
+TRACE_EVENT(mm_migrate_pages,
+
+ TP_PROTO(unsigned long succeeded, unsigned long failed,
+ enum migrate_mode mode, int reason),
+
+ TP_ARGS(succeeded, failed, mode, reason),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, succeeded)
+ __field( unsigned long, failed)
+ __field( enum migrate_mode, mode)
+ __field( int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->succeeded = succeeded;
+ __entry->failed = failed;
+ __entry->mode = mode;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s",
+ __entry->succeeded,
+ __entry->failed,
+ __print_symbolic(__entry->mode, MIGRATE_MODE),
+ __print_symbolic(__entry->reason, MIGRATE_REASON))
+);
+
+#endif /* _TRACE_MIGRATE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h
index 0a78028984d..6fae30fd16a 100644
--- a/include/uapi/asm-generic/signal.h
+++ b/include/uapi/asm-generic/signal.h
@@ -80,12 +80,6 @@
* SA_RESTORER 0x04000000
*/
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6e595ba545f..2c531f47841 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -690,9 +690,11 @@ __SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
compat_sys_process_vm_writev)
#define __NR_kcmp 272
__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 273
+__SYSCALL(__NR_finit_module, sys_finit_module)
#undef __NR_syscalls
-#define __NR_syscalls 273
+#define __NR_syscalls 274
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 1e3481edf06..8d1e2bbee83 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -778,6 +778,7 @@ struct drm_event_vblank {
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
#define DRM_CAP_PRIME 0x5
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_PRIME_CAP_IMPORT 0x1
#define DRM_PRIME_CAP_EXPORT 0x2
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index c0494d586e2..e7f52c33400 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
__u32 data;
};
+enum drm_exynos_g2d_buf_type {
+ G2D_BUF_USERPTR = 1 << 31,
+};
+
enum drm_exynos_g2d_event_type {
G2D_EVENT_NOT,
G2D_EVENT_NONSTOP,
G2D_EVENT_STOP, /* not yet */
};
+struct drm_exynos_g2d_userptr {
+ unsigned long userptr;
+ unsigned long size;
+};
+
struct drm_exynos_g2d_set_cmdlist {
__u64 cmd;
- __u64 cmd_gem;
+ __u64 cmd_buf;
__u32 cmd_nr;
- __u32 cmd_gem_nr;
+ __u32 cmd_buf_nr;
/* for g2d event */
__u64 event_type;
@@ -154,6 +163,170 @@ struct drm_exynos_g2d_exec {
__u64 async;
};
+enum drm_exynos_ops_id {
+ EXYNOS_DRM_OPS_SRC,
+ EXYNOS_DRM_OPS_DST,
+ EXYNOS_DRM_OPS_MAX,
+};
+
+struct drm_exynos_sz {
+ __u32 hsize;
+ __u32 vsize;
+};
+
+struct drm_exynos_pos {
+ __u32 x;
+ __u32 y;
+ __u32 w;
+ __u32 h;
+};
+
+enum drm_exynos_flip {
+ EXYNOS_DRM_FLIP_NONE = (0 << 0),
+ EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
+ EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
+};
+
+enum drm_exynos_degree {
+ EXYNOS_DRM_DEGREE_0,
+ EXYNOS_DRM_DEGREE_90,
+ EXYNOS_DRM_DEGREE_180,
+ EXYNOS_DRM_DEGREE_270,
+};
+
+enum drm_exynos_planer {
+ EXYNOS_DRM_PLANAR_Y,
+ EXYNOS_DRM_PLANAR_CB,
+ EXYNOS_DRM_PLANAR_CR,
+ EXYNOS_DRM_PLANAR_MAX,
+};
+
+/**
+ * A structure for ipp supported property list.
+ *
+ * @version: version of this structure.
+ * @ipp_id: id of ipp driver.
+ * @count: count of ipp driver.
+ * @writeback: flag of writeback supporting.
+ * @flip: flag of flip supporting.
+ * @degree: flag of degree information.
+ * @csc: flag of csc supporting.
+ * @crop: flag of crop supporting.
+ * @scale: flag of scale supporting.
+ * @refresh_min: min hz of refresh.
+ * @refresh_max: max hz of refresh.
+ * @crop_min: crop min resolution.
+ * @crop_max: crop max resolution.
+ * @scale_min: scale min resolution.
+ * @scale_max: scale max resolution.
+ */
+struct drm_exynos_ipp_prop_list {
+ __u32 version;
+ __u32 ipp_id;
+ __u32 count;
+ __u32 writeback;
+ __u32 flip;
+ __u32 degree;
+ __u32 csc;
+ __u32 crop;
+ __u32 scale;
+ __u32 refresh_min;
+ __u32 refresh_max;
+ __u32 reserved;
+ struct drm_exynos_sz crop_min;
+ struct drm_exynos_sz crop_max;
+ struct drm_exynos_sz scale_min;
+ struct drm_exynos_sz scale_max;
+};
+
+/**
+ * A structure for ipp config.
+ *
+ * @ops_id: property of operation directions.
+ * @flip: property of mirror, flip.
+ * @degree: property of rotation degree.
+ * @fmt: property of image format.
+ * @sz: property of image size.
+ * @pos: property of image position(src-cropped,dst-scaler).
+ */
+struct drm_exynos_ipp_config {
+ enum drm_exynos_ops_id ops_id;
+ enum drm_exynos_flip flip;
+ enum drm_exynos_degree degree;
+ __u32 fmt;
+ struct drm_exynos_sz sz;
+ struct drm_exynos_pos pos;
+};
+
+enum drm_exynos_ipp_cmd {
+ IPP_CMD_NONE,
+ IPP_CMD_M2M,
+ IPP_CMD_WB,
+ IPP_CMD_OUTPUT,
+ IPP_CMD_MAX,
+};
+
+/**
+ * A structure for ipp property.
+ *
+ * @config: source, destination config.
+ * @cmd: definition of command.
+ * @ipp_id: id of ipp driver.
+ * @prop_id: id of property.
+ * @refresh_rate: refresh rate.
+ */
+struct drm_exynos_ipp_property {
+ struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
+ enum drm_exynos_ipp_cmd cmd;
+ __u32 ipp_id;
+ __u32 prop_id;
+ __u32 refresh_rate;
+};
+
+enum drm_exynos_ipp_buf_type {
+ IPP_BUF_ENQUEUE,
+ IPP_BUF_DEQUEUE,
+};
+
+/**
+ * A structure for ipp buffer operations.
+ *
+ * @ops_id: operation directions.
+ * @buf_type: definition of buffer.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @handle: Y, Cb, Cr each planar handle.
+ * @user_data: user data.
+ */
+struct drm_exynos_ipp_queue_buf {
+ enum drm_exynos_ops_id ops_id;
+ enum drm_exynos_ipp_buf_type buf_type;
+ __u32 prop_id;
+ __u32 buf_id;
+ __u32 handle[EXYNOS_DRM_PLANAR_MAX];
+ __u32 reserved;
+ __u64 user_data;
+};
+
+enum drm_exynos_ipp_ctrl {
+ IPP_CTRL_PLAY,
+ IPP_CTRL_STOP,
+ IPP_CTRL_PAUSE,
+ IPP_CTRL_RESUME,
+ IPP_CTRL_MAX,
+};
+
+/**
+ * A structure for ipp start/stop operations.
+ *
+ * @prop_id: id of property.
+ * @ctrl: definition of control.
+ */
+struct drm_exynos_ipp_cmd_ctrl {
+ __u32 prop_id;
+ enum drm_exynos_ipp_ctrl ctrl;
+};
+
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
@@ -166,6 +339,12 @@ struct drm_exynos_g2d_exec {
#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
#define DRM_EXYNOS_G2D_EXEC 0x22
+/* IPP - Image Post Processing */
+#define DRM_EXYNOS_IPP_GET_PROPERTY 0x30
+#define DRM_EXYNOS_IPP_SET_PROPERTY 0x31
+#define DRM_EXYNOS_IPP_QUEUE_BUF 0x32
+#define DRM_EXYNOS_IPP_CMD_CTRL 0x33
+
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -188,8 +367,18 @@ struct drm_exynos_g2d_exec {
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
+#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
+#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
+#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
+#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
+
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
+#define DRM_EXYNOS_IPP_EVENT 0x80000001
struct drm_exynos_g2d_event {
struct drm_event base;
@@ -200,4 +389,14 @@ struct drm_exynos_g2d_event {
__u32 reserved;
};
+struct drm_exynos_ipp_event {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 prop_id;
+ __u32 reserved;
+ __u32 buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
#endif /* _UAPI_EXYNOS_DRM_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 4322b1e7d2e..c4d2e9c7400 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -306,6 +306,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
typedef struct drm_i915_getparam {
int param;
@@ -671,6 +673,20 @@ struct drm_i915_gem_execbuffer2 {
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 4766c0f6a83..eeda91774c8 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -913,9 +913,11 @@ struct drm_radeon_gem_va {
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
#define RADEON_CS_USE_VM 0x02
+#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
#define RADEON_CS_RING_GFX 0
#define RADEON_CS_RING_COMPUTE 1
+#define RADEON_CS_RING_DMA 2
/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
/* 0 = normal, + = higher priority, - = lower priority */
@@ -966,6 +968,10 @@ struct drm_radeon_cs {
#define RADEON_INFO_MAX_PIPES 0x10
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
#define RADEON_INFO_TIMESTAMP 0x11
+/* max shader engines (SE) - needed for geometry shaders, etc. */
+#define RADEON_INFO_MAX_SE 0x12
+/* max SH per SE */
+#define RADEON_INFO_MAX_SH_PER_SE 0x13
struct drm_radeon_info {
uint32_t request;
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 91e3a360f61..539b179b349 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -268,8 +268,8 @@ enum {
#define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 23
-#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2012-07-25)"
+#define DM_VERSION_PATCHLEVEL 1
+#define DM_VERSION_EXTRA "-ioctl (2012-12-18)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index be8c41e2dc1..0c9b44871df 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -503,9 +503,20 @@ union ethtool_flow_union {
__u8 hdata[52];
};
+/**
+ * struct ethtool_flow_ext - additional RX flow fields
+ * @h_dest: destination MAC address
+ * @vlan_etype: VLAN EtherType
+ * @vlan_tci: VLAN tag control information
+ * @data: user defined data
+ *
+ * Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
+ * is set in &struct ethtool_rx_flow_spec @flow_type.
+ * @h_dest is valid if %FLOW_MAC_EXT is set.
+ */
struct ethtool_flow_ext {
__u8 padding[2];
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_dest[ETH_ALEN];
__be16 vlan_etype;
__be16 vlan_tci;
__be32 data[2];
@@ -519,7 +530,8 @@ struct ethtool_flow_ext {
* @m_u: Masks for flow field bits to be matched
* @m_ext: Masks for additional field bits to be matched
* Note, all additional fields must be ignored unless @flow_type
- * includes the %FLOW_EXT flag.
+ * includes the %FLOW_EXT or %FLOW_MAC_EXT flag
+ * (see &struct ethtool_flow_ext description).
* @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC
* if packets should be discarded
* @location: Location of rule in the table. Locations must be
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index afbb18a0227..5db297514ae 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -163,6 +163,9 @@ struct br_port_msg {
struct br_mdb_entry {
__u32 ifindex;
+#define MDB_TEMPORARY 0
+#define MDB_PERMANENT 1
+ __u8 state;
struct {
union {
__be32 ip4;
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 558828590a6..935119c698a 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -851,6 +851,7 @@ struct input_keymap_entry {
#define MSC_GESTURE 0x02
#define MSC_RAW 0x03
#define MSC_SCAN 0x04
+#define MSC_TIMESTAMP 0x05
#define MSC_MAX 0x07
#define MSC_CNT (MSC_MAX+1)
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 0a6d6ba44c8..e6e5d4b1370 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -167,10 +167,15 @@ struct kvm_pit_config {
#define KVM_EXIT_OSI 18
#define KVM_EXIT_PAPR_HCALL 19
#define KVM_EXIT_S390_UCONTROL 20
+#define KVM_EXIT_WATCHDOG 21
/* For KVM_EXIT_INTERNAL_ERROR */
-#define KVM_INTERNAL_ERROR_EMULATION 1
-#define KVM_INTERNAL_ERROR_SIMUL_EX 2
+/* Emulate instruction failed. */
+#define KVM_INTERNAL_ERROR_EMULATION 1
+/* Encounter unexpected simultaneous exceptions. */
+#define KVM_INTERNAL_ERROR_SIMUL_EX 2
+/* Encounter unexpected vm-exit due to delivery event. */
+#define KVM_INTERNAL_ERROR_DELIVERY_EV 3
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
@@ -477,6 +482,8 @@ struct kvm_ppc_smmu_info {
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
+#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
+
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -626,6 +633,8 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_READONLY_MEM 81
#endif
#define KVM_CAP_IRQFD_RESAMPLE 82
+#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
+#define KVM_CAP_PPC_HTAB_FD 84
#ifdef KVM_CAP_IRQ_ROUTING
@@ -848,6 +857,11 @@ struct kvm_s390_ucas_mapping {
#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
/* Available with KVM_CAP_PPC_ALLOC_HTAB */
#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
+#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
+/* Available with KVM_CAP_RMA */
+#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
+/* Available with KVM_CAP_PPC_HTAB_FD */
+#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd)
/*
* ioctls for vcpu fds
@@ -911,9 +925,6 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_XCRS */
#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
-#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
-/* Available with KVM_CAP_RMA */
-#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
/* Available with KVM_CAP_SW_TLB */
#define KVM_DIRTY_TLB _IOW(KVMIO, 0xaa, struct kvm_dirty_tlb)
/* Available with KVM_CAP_ONE_REG */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index e15192cb9cf..873e086ce3a 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -23,10 +23,12 @@
#define EXT4_SUPER_MAGIC 0xEF53
#define BTRFS_SUPER_MAGIC 0x9123683E
#define NILFS_SUPER_MAGIC 0x3434
+#define F2FS_SUPER_MAGIC 0xF2F52010
#define HPFS_SUPER_MAGIC 0xf995e849
#define ISOFS_SUPER_MAGIC 0x9660
#define JFFS2_SUPER_MAGIC 0x72b6
#define PSTOREFS_MAGIC 0x6165676C
+#define EFIVARFS_MAGIC 0xde5e81e4
#define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */
#define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 23e62e0537e..0d11c3dcd3a 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -20,6 +20,7 @@ enum {
MPOL_PREFERRED,
MPOL_BIND,
MPOL_INTERLEAVE,
+ MPOL_LOCAL,
MPOL_MAX, /* always last member of enum */
};
@@ -47,9 +48,15 @@ enum mpol_rebind_step {
/* Flags for mbind */
#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
-#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
-#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
-#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
+#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform
+ to policy */
+#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */
+#define MPOL_MF_LAZY (1<<3) /* Modifies '_MOVE: lazy migrate on fault */
+#define MPOL_MF_INTERNAL (1<<4) /* Internal flags start here */
+
+#define MPOL_MF_VALID (MPOL_MF_STRICT | \
+ MPOL_MF_MOVE | \
+ MPOL_MF_MOVE_ALL)
/*
* Internal flags that share the struct mempolicy flags word with
@@ -59,6 +66,8 @@ enum mpol_rebind_step {
#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
+#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
+#define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */
#endif /* _UAPI_LINUX_MEMPOLICY_H */
diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h
new file mode 100644
index 00000000000..38da4258b12
--- /dev/null
+++ b/include/uapi/linux/module.h
@@ -0,0 +1,8 @@
+#ifndef _UAPI_LINUX_MODULE_H
+#define _UAPI_LINUX_MODULE_H
+
+/* Flags for sys_finit_module: */
+#define MODULE_INIT_IGNORE_MODVERSIONS 1
+#define MODULE_INIT_IGNORE_VERMAGIC 2
+
+#endif /* _UAPI_LINUX_MODULE_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 20ae747ddf3..ebfadc56d1b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -349,7 +349,7 @@
#define PCI_AF_STATUS_TP 0x01
#define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */
-/* PCI-X registers */
+/* PCI-X registers (Type 0 (non-bridge) devices) */
#define PCI_X_CMD 2 /* Modes & Features */
#define PCI_X_CMD_DPERR_E 0x0001 /* Data Parity Error Recovery Enable */
@@ -389,6 +389,19 @@
#define PCI_CAP_PCIX_SIZEOF_V1 24 /* size for Version 1 */
#define PCI_CAP_PCIX_SIZEOF_V2 PCI_CAP_PCIX_SIZEOF_V1 /* Same for v2 */
+/* PCI-X registers (Type 1 (bridge) devices) */
+
+#define PCI_X_BRIDGE_SSTATUS 2 /* Secondary Status */
+#define PCI_X_SSTATUS_64BIT 0x0001 /* Secondary AD interface is 64 bits */
+#define PCI_X_SSTATUS_133MHZ 0x0002 /* 133 MHz capable */
+#define PCI_X_SSTATUS_FREQ 0x03c0 /* Secondary Bus Mode and Frequency */
+#define PCI_X_SSTATUS_VERS 0x3000 /* PCI-X Capability Version */
+#define PCI_X_SSTATUS_V1 0x1000 /* Mode 2, not Mode 1 */
+#define PCI_X_SSTATUS_V2 0x2000 /* Mode 1 or Modes 1 and 2 */
+#define PCI_X_SSTATUS_266MHZ 0x4000 /* 266 MHz capable */
+#define PCI_X_SSTATUS_533MHZ 0x8000 /* 533 MHz capable */
+#define PCI_X_BRIDGE_STATUS 4 /* Bridge Status */
+
/* PCI Bridge Subsystem ID registers */
#define PCI_SSVID_VENDOR_ID 4 /* PCI-Bridge subsystem vendor id register */
@@ -445,6 +458,8 @@
#define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */
#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
+#define PCI_EXP_LNKCAP_SLS_2_5GB 0x1 /* LNKCAP2 SLS Vector bit 0 (2.5GT/s) */
+#define PCI_EXP_LNKCAP_SLS_5_0GB 0x2 /* LNKCAP2 SLS Vector bit 1 (5.0GT/s) */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
@@ -456,6 +471,8 @@
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
#define PCI_EXP_LNKCTL 16 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
+#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
+#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
#define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */
#define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */
#define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */
@@ -544,9 +561,9 @@
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
-#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
-#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
-#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index 1ef6c056a9e..022ab186a81 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -73,7 +73,10 @@
#define PTRACE_O_TRACEEXIT (1 << PTRACE_EVENT_EXIT)
#define PTRACE_O_TRACESECCOMP (1 << PTRACE_EVENT_SECCOMP)
-#define PTRACE_O_MASK 0x000000ff
+/* eventless options */
+#define PTRACE_O_EXITKILL (1 << 20)
+
+#define PTRACE_O_MASK (0x000000ff | PTRACE_O_EXITKILL)
#include <asm/ptrace.h>
diff --git a/include/uapi/linux/signal.h b/include/uapi/linux/signal.h
index dff452ed6d0..e1bd50c29de 100644
--- a/include/uapi/linux/signal.h
+++ b/include/uapi/linux/signal.h
@@ -4,5 +4,7 @@
#include <asm/signal.h>
#include <asm/siginfo.h>
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
#endif /* _UAPI_LINUX_SIGNAL_H */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index e811474724c..0e011eb91b5 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,7 +45,9 @@
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
-#ifdef __arch_swab16
+#ifdef __HAVE_BUILTIN_BSWAP16__
+ return __builtin_bswap16(val);
+#elif defined (__arch_swab16)
return __arch_swab16(val);
#else
return ___constant_swab16(val);
@@ -54,7 +56,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
-#ifdef __arch_swab32
+#ifdef __HAVE_BUILTIN_BSWAP32__
+ return __builtin_bswap32(val);
+#elif defined(__arch_swab32)
return __arch_swab32(val);
#else
return ___constant_swab32(val);
@@ -63,7 +67,9 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
-#ifdef __arch_swab64
+#ifdef __HAVE_BUILTIN_BSWAP64__
+ return __builtin_bswap64(val);
+#elif defined (__arch_swab64)
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 57bfa59cda7..3cf3e946e33 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -186,6 +186,7 @@ enum v4l2_memory {
V4L2_MEMORY_MMAP = 1,
V4L2_MEMORY_USERPTR = 2,
V4L2_MEMORY_OVERLAY = 3,
+ V4L2_MEMORY_DMABUF = 4,
};
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
@@ -602,6 +603,8 @@ struct v4l2_requestbuffers {
* should be passed to mmap() called on the video node)
* @userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
* pointing to this plane
+ * @fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
+ * descriptor associated with this plane
* @data_offset: offset in the plane to the start of data; usually 0,
* unless there is a header in front of the data
*
@@ -616,6 +619,7 @@ struct v4l2_plane {
union {
__u32 mem_offset;
unsigned long userptr;
+ __s32 fd;
} m;
__u32 data_offset;
__u32 reserved[11];
@@ -640,6 +644,8 @@ struct v4l2_plane {
* (or a "cookie" that should be passed to mmap() as offset)
* @userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
* a userspace pointer pointing to this buffer
+ * @fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
+ * a userspace file descriptor associated with this buffer
* @planes: for multiplanar buffers; userspace pointer to the array of plane
* info structs for this buffer
* @length: size in bytes of the buffer (NOT its payload) for single-plane
@@ -666,6 +672,7 @@ struct v4l2_buffer {
__u32 offset;
unsigned long userptr;
struct v4l2_plane *planes;
+ __s32 fd;
} m;
__u32 length;
__u32 reserved2;
@@ -687,6 +694,33 @@ struct v4l2_buffer {
#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x0800
#define V4L2_BUF_FLAG_NO_CACHE_CLEAN 0x1000
+/**
+ * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
+ *
+ * @index: id number of the buffer
+ * @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for
+ * multiplanar buffers);
+ * @plane: index of the plane to be exported, 0 for single plane queues
+ * @flags: flags for newly created file, currently only O_CLOEXEC is
+ * supported, refer to manual of open syscall for more details
+ * @fd: file descriptor associated with DMABUF (set by driver)
+ *
+ * Contains data used for exporting a video buffer as DMABUF file descriptor.
+ * The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF
+ * (identical to the cookie used to mmap() the buffer to userspace). All
+ * reserved fields must be set to zero. The field reserved0 is expected to
+ * become a structure 'type' allowing an alternative layout of the structure
+ * content. Therefore this field should not be used for any other extensions.
+ */
+struct v4l2_exportbuffer {
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 index;
+ __u32 plane;
+ __u32 flags;
+ __s32 fd;
+ __u32 reserved[11];
+};
+
/*
* O V E R L A Y P R E V I E W
*/
@@ -737,7 +771,7 @@ struct v4l2_window {
struct v4l2_captureparm {
__u32 capability; /* Supported modes */
__u32 capturemode; /* Current mode */
- struct v4l2_fract timeperframe; /* Time per frame in .1us units */
+ struct v4l2_fract timeperframe; /* Time per frame in seconds */
__u32 extendedmode; /* Driver-specific extensions */
__u32 readbuffers; /* # of buffers for read */
__u32 reserved[4];
@@ -1888,6 +1922,7 @@ struct v4l2_create_buffers {
#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer)
#define VIDIOC_OVERLAY _IOW('V', 14, int)
#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer)
+#define VIDIOC_EXPBUF _IOWR('V', 16, struct v4l2_exportbuffer)
#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer)
#define VIDIOC_STREAMON _IOW('V', 18, int)
#define VIDIOC_STREAMOFF _IOW('V', 19, int)
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 270fb22c581..a7630d04029 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -37,5 +37,6 @@
#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
#define VIRTIO_ID_SCSI 8 /* virtio scsi */
#define VIRTIO_ID_9P 9 /* 9p virtio console */
+#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index aafaa5aa54d..687ae332200 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -1 +1,7 @@
# UAPI Header export list
+header-y += ib_user_cm.h
+header-y += ib_user_mad.h
+header-y += ib_user_sa.h
+header-y += ib_user_verbs.h
+header-y += rdma_netlink.h
+header-y += rdma_user_cm.h
diff --git a/include/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h
index f79014aa28f..f79014aa28f 100644
--- a/include/rdma/ib_user_cm.h
+++ b/include/uapi/rdma/ib_user_cm.h
diff --git a/include/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h
index d6fce1cbdb9..d6fce1cbdb9 100644
--- a/include/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
diff --git a/include/rdma/ib_user_sa.h b/include/uapi/rdma/ib_user_sa.h
index cfc7c9ba781..cfc7c9ba781 100644
--- a/include/rdma/ib_user_sa.h
+++ b/include/uapi/rdma/ib_user_sa.h
diff --git a/include/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 81aba3a73aa..81aba3a73aa 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
new file mode 100644
index 00000000000..8297285b628
--- /dev/null
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -0,0 +1,37 @@
+#ifndef _UAPI_RDMA_NETLINK_H
+#define _UAPI_RDMA_NETLINK_H
+
+#include <linux/types.h>
+
+enum {
+ RDMA_NL_RDMA_CM = 1
+};
+
+#define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10)
+#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1))
+#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op)
+
+enum {
+ RDMA_NL_RDMA_CM_ID_STATS = 0,
+ RDMA_NL_RDMA_CM_NUM_OPS
+};
+
+enum {
+ RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1,
+ RDMA_NL_RDMA_CM_ATTR_DST_ADDR,
+ RDMA_NL_RDMA_CM_NUM_ATTR,
+};
+
+struct rdma_cm_id_stats {
+ __u32 qp_num;
+ __u32 bound_dev_if;
+ __u32 port_space;
+ __s32 pid;
+ __u8 cm_state;
+ __u8 node_type;
+ __u8 port_num;
+ __u8 qp_type;
+};
+
+
+#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 1ee9239ff8c..1ee9239ff8c 100644
--- a/include/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index aafaa5aa54d..0f7d279ebde 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -1 +1,11 @@
# UAPI Header export list
+header-y += asequencer.h
+header-y += asound.h
+header-y += asound_fm.h
+header-y += compress_offload.h
+header-y += compress_params.h
+header-y += emu10k1.h
+header-y += hdsp.h
+header-y += hdspm.h
+header-y += sb16_csp.h
+header-y += sfnt_info.h
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
new file mode 100644
index 00000000000..09c8a00ea50
--- /dev/null
+++ b/include/uapi/sound/asequencer.h
@@ -0,0 +1,614 @@
+/*
+ * Main header file for the ALSA sequencer
+ * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
+ * (c) 1998-1999 by Jaroslav Kysela <perex@perex.cz>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _UAPI__SOUND_ASEQUENCER_H
+#define _UAPI__SOUND_ASEQUENCER_H
+
+
+/** version of the sequencer */
+#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION (1, 0, 1)
+
+/**
+ * definition of sequencer event types
+ */
+
+/** system messages
+ * event data type = #snd_seq_result
+ */
+#define SNDRV_SEQ_EVENT_SYSTEM 0
+#define SNDRV_SEQ_EVENT_RESULT 1
+
+/** note messages (channel specific)
+ * event data type = #snd_seq_ev_note
+ */
+#define SNDRV_SEQ_EVENT_NOTE 5
+#define SNDRV_SEQ_EVENT_NOTEON 6
+#define SNDRV_SEQ_EVENT_NOTEOFF 7
+#define SNDRV_SEQ_EVENT_KEYPRESS 8
+
+/** control messages (channel specific)
+ * event data type = #snd_seq_ev_ctrl
+ */
+#define SNDRV_SEQ_EVENT_CONTROLLER 10
+#define SNDRV_SEQ_EVENT_PGMCHANGE 11
+#define SNDRV_SEQ_EVENT_CHANPRESS 12
+#define SNDRV_SEQ_EVENT_PITCHBEND 13 /**< from -8192 to 8191 */
+#define SNDRV_SEQ_EVENT_CONTROL14 14 /**< 14 bit controller value */
+#define SNDRV_SEQ_EVENT_NONREGPARAM 15 /**< 14 bit NRPN address + 14 bit unsigned value */
+#define SNDRV_SEQ_EVENT_REGPARAM 16 /**< 14 bit RPN address + 14 bit unsigned value */
+
+/** synchronisation messages
+ * event data type = #snd_seq_ev_ctrl
+ */
+#define SNDRV_SEQ_EVENT_SONGPOS 20 /* Song Position Pointer with LSB and MSB values */
+#define SNDRV_SEQ_EVENT_SONGSEL 21 /* Song Select with song ID number */
+#define SNDRV_SEQ_EVENT_QFRAME 22 /* midi time code quarter frame */
+#define SNDRV_SEQ_EVENT_TIMESIGN 23 /* SMF Time Signature event */
+#define SNDRV_SEQ_EVENT_KEYSIGN 24 /* SMF Key Signature event */
+
+/** timer messages
+ * event data type = snd_seq_ev_queue_control
+ */
+#define SNDRV_SEQ_EVENT_START 30 /* midi Real Time Start message */
+#define SNDRV_SEQ_EVENT_CONTINUE 31 /* midi Real Time Continue message */
+#define SNDRV_SEQ_EVENT_STOP 32 /* midi Real Time Stop message */
+#define SNDRV_SEQ_EVENT_SETPOS_TICK 33 /* set tick queue position */
+#define SNDRV_SEQ_EVENT_SETPOS_TIME 34 /* set realtime queue position */
+#define SNDRV_SEQ_EVENT_TEMPO 35 /* (SMF) Tempo event */
+#define SNDRV_SEQ_EVENT_CLOCK 36 /* midi Real Time Clock message */
+#define SNDRV_SEQ_EVENT_TICK 37 /* midi Real Time Tick message */
+#define SNDRV_SEQ_EVENT_QUEUE_SKEW 38 /* skew queue tempo */
+
+/** others
+ * event data type = none
+ */
+#define SNDRV_SEQ_EVENT_TUNE_REQUEST 40 /* tune request */
+#define SNDRV_SEQ_EVENT_RESET 41 /* reset to power-on state */
+#define SNDRV_SEQ_EVENT_SENSING 42 /* "active sensing" event */
+
+/** echo back, kernel private messages
+ * event data type = any type
+ */
+#define SNDRV_SEQ_EVENT_ECHO 50 /* echo event */
+#define SNDRV_SEQ_EVENT_OSS 51 /* OSS raw event */
+
+/** system status messages (broadcast for subscribers)
+ * event data type = snd_seq_addr
+ */
+#define SNDRV_SEQ_EVENT_CLIENT_START 60 /* new client has connected */
+#define SNDRV_SEQ_EVENT_CLIENT_EXIT 61 /* client has left the system */
+#define SNDRV_SEQ_EVENT_CLIENT_CHANGE 62 /* client status/info has changed */
+#define SNDRV_SEQ_EVENT_PORT_START 63 /* new port was created */
+#define SNDRV_SEQ_EVENT_PORT_EXIT 64 /* port was deleted from system */
+#define SNDRV_SEQ_EVENT_PORT_CHANGE 65 /* port status/info has changed */
+
+/** port connection changes
+ * event data type = snd_seq_connect
+ */
+#define SNDRV_SEQ_EVENT_PORT_SUBSCRIBED 66 /* ports connected */
+#define SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED 67 /* ports disconnected */
+
+/* 70-89: synthesizer events - obsoleted */
+
+/** user-defined events with fixed length
+ * event data type = any
+ */
+#define SNDRV_SEQ_EVENT_USR0 90
+#define SNDRV_SEQ_EVENT_USR1 91
+#define SNDRV_SEQ_EVENT_USR2 92
+#define SNDRV_SEQ_EVENT_USR3 93
+#define SNDRV_SEQ_EVENT_USR4 94
+#define SNDRV_SEQ_EVENT_USR5 95
+#define SNDRV_SEQ_EVENT_USR6 96
+#define SNDRV_SEQ_EVENT_USR7 97
+#define SNDRV_SEQ_EVENT_USR8 98
+#define SNDRV_SEQ_EVENT_USR9 99
+
+/* 100-118: instrument layer - obsoleted */
+/* 119-129: reserved */
+
+/* 130-139: variable length events
+ * event data type = snd_seq_ev_ext
+ * (SNDRV_SEQ_EVENT_LENGTH_VARIABLE must be set)
+ */
+#define SNDRV_SEQ_EVENT_SYSEX 130 /* system exclusive data (variable length) */
+#define SNDRV_SEQ_EVENT_BOUNCE 131 /* error event */
+/* 132-134: reserved */
+#define SNDRV_SEQ_EVENT_USR_VAR0 135
+#define SNDRV_SEQ_EVENT_USR_VAR1 136
+#define SNDRV_SEQ_EVENT_USR_VAR2 137
+#define SNDRV_SEQ_EVENT_USR_VAR3 138
+#define SNDRV_SEQ_EVENT_USR_VAR4 139
+
+/* 150-151: kernel events with quote - DO NOT use in user clients */
+#define SNDRV_SEQ_EVENT_KERNEL_ERROR 150
+#define SNDRV_SEQ_EVENT_KERNEL_QUOTE 151 /* obsolete */
+
+/* 152-191: reserved */
+
+/* 192-254: hardware specific events */
+
+/* 255: special event */
+#define SNDRV_SEQ_EVENT_NONE 255
+
+
+typedef unsigned char snd_seq_event_type_t;
+
+/** event address */
+struct snd_seq_addr {
+ unsigned char client; /**< Client number: 0..255, 255 = broadcast to all clients */
+ unsigned char port; /**< Port within client: 0..255, 255 = broadcast to all ports */
+};
+
+/** port connection */
+struct snd_seq_connect {
+ struct snd_seq_addr sender;
+ struct snd_seq_addr dest;
+};
+
+
+#define SNDRV_SEQ_ADDRESS_UNKNOWN 253 /* unknown source */
+#define SNDRV_SEQ_ADDRESS_SUBSCRIBERS 254 /* send event to all subscribed ports */
+#define SNDRV_SEQ_ADDRESS_BROADCAST 255 /* send event to all queues/clients/ports/channels */
+#define SNDRV_SEQ_QUEUE_DIRECT 253 /* direct dispatch */
+
+ /* event mode flag - NOTE: only 8 bits available! */
+#define SNDRV_SEQ_TIME_STAMP_TICK (0<<0) /* timestamp in clock ticks */
+#define SNDRV_SEQ_TIME_STAMP_REAL (1<<0) /* timestamp in real time */
+#define SNDRV_SEQ_TIME_STAMP_MASK (1<<0)
+
+#define SNDRV_SEQ_TIME_MODE_ABS (0<<1) /* absolute timestamp */
+#define SNDRV_SEQ_TIME_MODE_REL (1<<1) /* relative to current time */
+#define SNDRV_SEQ_TIME_MODE_MASK (1<<1)
+
+#define SNDRV_SEQ_EVENT_LENGTH_FIXED (0<<2) /* fixed event size */
+#define SNDRV_SEQ_EVENT_LENGTH_VARIABLE (1<<2) /* variable event size */
+#define SNDRV_SEQ_EVENT_LENGTH_VARUSR (2<<2) /* variable event size - user memory space */
+#define SNDRV_SEQ_EVENT_LENGTH_MASK (3<<2)
+
+#define SNDRV_SEQ_PRIORITY_NORMAL (0<<4) /* normal priority */
+#define SNDRV_SEQ_PRIORITY_HIGH (1<<4) /* event should be processed before others */
+#define SNDRV_SEQ_PRIORITY_MASK (1<<4)
+
+
+ /* note event */
+struct snd_seq_ev_note {
+ unsigned char channel;
+ unsigned char note;
+ unsigned char velocity;
+ unsigned char off_velocity; /* only for SNDRV_SEQ_EVENT_NOTE */
+ unsigned int duration; /* only for SNDRV_SEQ_EVENT_NOTE */
+};
+
+ /* controller event */
+struct snd_seq_ev_ctrl {
+ unsigned char channel;
+ unsigned char unused1, unused2, unused3; /* pad */
+ unsigned int param;
+ signed int value;
+};
+
+ /* generic set of bytes (12x8 bit) */
+struct snd_seq_ev_raw8 {
+ unsigned char d[12]; /* 8 bit value */
+};
+
+ /* generic set of integers (3x32 bit) */
+struct snd_seq_ev_raw32 {
+ unsigned int d[3]; /* 32 bit value */
+};
+
+ /* external stored data */
+struct snd_seq_ev_ext {
+ unsigned int len; /* length of data */
+ void *ptr; /* pointer to data (note: maybe 64-bit) */
+} __attribute__((packed));
+
+struct snd_seq_result {
+ int event; /* processed event type */
+ int result;
+};
+
+
+struct snd_seq_real_time {
+ unsigned int tv_sec; /* seconds */
+ unsigned int tv_nsec; /* nanoseconds */
+};
+
+typedef unsigned int snd_seq_tick_time_t; /* midi ticks */
+
+union snd_seq_timestamp {
+ snd_seq_tick_time_t tick;
+ struct snd_seq_real_time time;
+};
+
+struct snd_seq_queue_skew {
+ unsigned int value;
+ unsigned int base;
+};
+
+ /* queue timer control */
+struct snd_seq_ev_queue_control {
+ unsigned char queue; /* affected queue */
+ unsigned char pad[3]; /* reserved */
+ union {
+ signed int value; /* affected value (e.g. tempo) */
+ union snd_seq_timestamp time; /* time */
+ unsigned int position; /* sync position */
+ struct snd_seq_queue_skew skew;
+ unsigned int d32[2];
+ unsigned char d8[8];
+ } param;
+};
+
+ /* quoted event - inside the kernel only */
+struct snd_seq_ev_quote {
+ struct snd_seq_addr origin; /* original sender */
+ unsigned short value; /* optional data */
+ struct snd_seq_event *event; /* quoted event */
+} __attribute__((packed));
+
+
+ /* sequencer event */
+struct snd_seq_event {
+ snd_seq_event_type_t type; /* event type */
+ unsigned char flags; /* event flags */
+ char tag;
+
+ unsigned char queue; /* schedule queue */
+ union snd_seq_timestamp time; /* schedule time */
+
+
+ struct snd_seq_addr source; /* source address */
+ struct snd_seq_addr dest; /* destination address */
+
+ union { /* event data... */
+ struct snd_seq_ev_note note;
+ struct snd_seq_ev_ctrl control;
+ struct snd_seq_ev_raw8 raw8;
+ struct snd_seq_ev_raw32 raw32;
+ struct snd_seq_ev_ext ext;
+ struct snd_seq_ev_queue_control queue;
+ union snd_seq_timestamp time;
+ struct snd_seq_addr addr;
+ struct snd_seq_connect connect;
+ struct snd_seq_result result;
+ struct snd_seq_ev_quote quote;
+ } data;
+};
+
+
+/*
+ * bounce event - stored as variable size data
+ */
+struct snd_seq_event_bounce {
+ int err;
+ struct snd_seq_event event;
+ /* external data follows here. */
+};
+
+
+ /* system information */
+struct snd_seq_system_info {
+ int queues; /* maximum queues count */
+ int clients; /* maximum clients count */
+ int ports; /* maximum ports per client */
+ int channels; /* maximum channels per port */
+ int cur_clients; /* current clients */
+ int cur_queues; /* current queues */
+ char reserved[24];
+};
+
+
+ /* system running information */
+struct snd_seq_running_info {
+ unsigned char client; /* client id */
+ unsigned char big_endian; /* 1 = big-endian */
+ unsigned char cpu_mode; /* 4 = 32bit, 8 = 64bit */
+ unsigned char pad; /* reserved */
+ unsigned char reserved[12];
+};
+
+
+ /* known client numbers */
+#define SNDRV_SEQ_CLIENT_SYSTEM 0
+ /* internal client numbers */
+#define SNDRV_SEQ_CLIENT_DUMMY 14 /* midi through */
+#define SNDRV_SEQ_CLIENT_OSS 15 /* oss sequencer emulator */
+
+
+ /* client types */
+typedef int __bitwise snd_seq_client_type_t;
+#define NO_CLIENT ((__force snd_seq_client_type_t) 0)
+#define USER_CLIENT ((__force snd_seq_client_type_t) 1)
+#define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2)
+
+ /* event filter flags */
+#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */
+#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */
+#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
+#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
+
+struct snd_seq_client_info {
+ int client; /* client number to inquire */
+ snd_seq_client_type_t type; /* client type */
+ char name[64]; /* client name */
+ unsigned int filter; /* filter flags */
+ unsigned char multicast_filter[8]; /* multicast filter bitmap */
+ unsigned char event_filter[32]; /* event filter bitmap */
+ int num_ports; /* RO: number of ports */
+ int event_lost; /* number of lost events */
+ char reserved[64]; /* for future use */
+};
+
+
+/* client pool size */
+struct snd_seq_client_pool {
+ int client; /* client number to inquire */
+ int output_pool; /* outgoing (write) pool size */
+ int input_pool; /* incoming (read) pool size */
+ int output_room; /* minimum free pool size for select/blocking mode */
+ int output_free; /* unused size */
+ int input_free; /* unused size */
+ char reserved[64];
+};
+
+
+/* Remove events by specified criteria */
+
+#define SNDRV_SEQ_REMOVE_INPUT (1<<0) /* Flush input queues */
+#define SNDRV_SEQ_REMOVE_OUTPUT (1<<1) /* Flush output queues */
+#define SNDRV_SEQ_REMOVE_DEST (1<<2) /* Restrict by destination q:client:port */
+#define SNDRV_SEQ_REMOVE_DEST_CHANNEL (1<<3) /* Restrict by channel */
+#define SNDRV_SEQ_REMOVE_TIME_BEFORE (1<<4) /* Restrict to before time */
+#define SNDRV_SEQ_REMOVE_TIME_AFTER (1<<5) /* Restrict to time or after */
+#define SNDRV_SEQ_REMOVE_TIME_TICK (1<<6) /* Time is in ticks */
+#define SNDRV_SEQ_REMOVE_EVENT_TYPE (1<<7) /* Restrict to event type */
+#define SNDRV_SEQ_REMOVE_IGNORE_OFF (1<<8) /* Do not flush off events */
+#define SNDRV_SEQ_REMOVE_TAG_MATCH (1<<9) /* Restrict to events with given tag */
+
+struct snd_seq_remove_events {
+ unsigned int remove_mode; /* Flags that determine what gets removed */
+
+ union snd_seq_timestamp time;
+
+ unsigned char queue; /* Queue for REMOVE_DEST */
+ struct snd_seq_addr dest; /* Address for REMOVE_DEST */
+ unsigned char channel; /* Channel for REMOVE_DEST */
+
+ int type; /* For REMOVE_EVENT_TYPE */
+ char tag; /* Tag for REMOVE_TAG */
+
+ int reserved[10]; /* To allow for future binary compatibility */
+
+};
+
+
+ /* known port numbers */
+#define SNDRV_SEQ_PORT_SYSTEM_TIMER 0
+#define SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE 1
+
+ /* port capabilities (32 bits) */
+#define SNDRV_SEQ_PORT_CAP_READ (1<<0) /* readable from this port */
+#define SNDRV_SEQ_PORT_CAP_WRITE (1<<1) /* writable to this port */
+
+#define SNDRV_SEQ_PORT_CAP_SYNC_READ (1<<2)
+#define SNDRV_SEQ_PORT_CAP_SYNC_WRITE (1<<3)
+
+#define SNDRV_SEQ_PORT_CAP_DUPLEX (1<<4)
+
+#define SNDRV_SEQ_PORT_CAP_SUBS_READ (1<<5) /* allow read subscription */
+#define SNDRV_SEQ_PORT_CAP_SUBS_WRITE (1<<6) /* allow write subscription */
+#define SNDRV_SEQ_PORT_CAP_NO_EXPORT (1<<7) /* routing not allowed */
+
+ /* port type */
+#define SNDRV_SEQ_PORT_TYPE_SPECIFIC (1<<0) /* hardware specific */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC (1<<1) /* generic MIDI device */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_GM (1<<2) /* General MIDI compatible device */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_GS (1<<3) /* GS compatible device */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_XG (1<<4) /* XG compatible device */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_MT32 (1<<5) /* MT-32 compatible device */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_GM2 (1<<6) /* General MIDI 2 compatible device */
+
+/* other standards...*/
+#define SNDRV_SEQ_PORT_TYPE_SYNTH (1<<10) /* Synth device (no MIDI compatible - direct wavetable) */
+#define SNDRV_SEQ_PORT_TYPE_DIRECT_SAMPLE (1<<11) /* Sampling device (support sample download) */
+#define SNDRV_SEQ_PORT_TYPE_SAMPLE (1<<12) /* Sampling device (sample can be downloaded at any time) */
+/*...*/
+#define SNDRV_SEQ_PORT_TYPE_HARDWARE (1<<16) /* driver for a hardware device */
+#define SNDRV_SEQ_PORT_TYPE_SOFTWARE (1<<17) /* implemented in software */
+#define SNDRV_SEQ_PORT_TYPE_SYNTHESIZER (1<<18) /* generates sound */
+#define SNDRV_SEQ_PORT_TYPE_PORT (1<<19) /* connects to other device(s) */
+#define SNDRV_SEQ_PORT_TYPE_APPLICATION (1<<20) /* application (sequencer/editor) */
+
+/* misc. conditioning flags */
+#define SNDRV_SEQ_PORT_FLG_GIVEN_PORT (1<<0)
+#define SNDRV_SEQ_PORT_FLG_TIMESTAMP (1<<1)
+#define SNDRV_SEQ_PORT_FLG_TIME_REAL (1<<2)
+
+struct snd_seq_port_info {
+ struct snd_seq_addr addr; /* client/port numbers */
+ char name[64]; /* port name */
+
+ unsigned int capability; /* port capability bits */
+ unsigned int type; /* port type bits */
+ int midi_channels; /* channels per MIDI port */
+ int midi_voices; /* voices per MIDI port */
+ int synth_voices; /* voices per SYNTH port */
+
+ int read_use; /* R/O: subscribers for output (from this port) */
+ int write_use; /* R/O: subscribers for input (to this port) */
+
+ void *kernel; /* reserved for kernel use (must be NULL) */
+ unsigned int flags; /* misc. conditioning */
+ unsigned char time_queue; /* queue # for timestamping */
+ char reserved[59]; /* for future use */
+};
+
+
+/* queue flags */
+#define SNDRV_SEQ_QUEUE_FLG_SYNC (1<<0) /* sync enabled */
+
+/* queue information */
+struct snd_seq_queue_info {
+ int queue; /* queue id */
+
+ /*
+ * security settings, only owner of this queue can start/stop timer
+ * etc. if the queue is locked for other clients
+ */
+ int owner; /* client id for owner of the queue */
+ unsigned locked:1; /* timing queue locked for other queues */
+ char name[64]; /* name of this queue */
+ unsigned int flags; /* flags */
+ char reserved[60]; /* for future use */
+
+};
+
+/* queue info/status */
+struct snd_seq_queue_status {
+ int queue; /* queue id */
+ int events; /* read-only - queue size */
+ snd_seq_tick_time_t tick; /* current tick */
+ struct snd_seq_real_time time; /* current time */
+ int running; /* running state of queue */
+ int flags; /* various flags */
+ char reserved[64]; /* for the future */
+};
+
+
+/* queue tempo */
+struct snd_seq_queue_tempo {
+ int queue; /* sequencer queue */
+ unsigned int tempo; /* current tempo, us/tick */
+ int ppq; /* time resolution, ticks/quarter */
+ unsigned int skew_value; /* queue skew */
+ unsigned int skew_base; /* queue skew base */
+ char reserved[24]; /* for the future */
+};
+
+
+/* sequencer timer sources */
+#define SNDRV_SEQ_TIMER_ALSA 0 /* ALSA timer */
+#define SNDRV_SEQ_TIMER_MIDI_CLOCK 1 /* Midi Clock (CLOCK event) */
+#define SNDRV_SEQ_TIMER_MIDI_TICK 2 /* Midi Timer Tick (TICK event) */
+
+/* queue timer info */
+struct snd_seq_queue_timer {
+ int queue; /* sequencer queue */
+ int type; /* source timer type */
+ union {
+ struct {
+ struct snd_timer_id id; /* ALSA's timer ID */
+ unsigned int resolution; /* resolution in Hz */
+ } alsa;
+ } u;
+ char reserved[64]; /* for the future use */
+};
+
+
+struct snd_seq_queue_client {
+ int queue; /* sequencer queue */
+ int client; /* sequencer client */
+ int used; /* queue is used with this client
+ (must be set for accepting events) */
+ /* per client watermarks */
+ char reserved[64]; /* for future use */
+};
+
+
+#define SNDRV_SEQ_PORT_SUBS_EXCLUSIVE (1<<0) /* exclusive connection */
+#define SNDRV_SEQ_PORT_SUBS_TIMESTAMP (1<<1)
+#define SNDRV_SEQ_PORT_SUBS_TIME_REAL (1<<2)
+
+struct snd_seq_port_subscribe {
+ struct snd_seq_addr sender; /* sender address */
+ struct snd_seq_addr dest; /* destination address */
+ unsigned int voices; /* number of voices to be allocated (0 = don't care) */
+ unsigned int flags; /* modes */
+ unsigned char queue; /* input time-stamp queue (optional) */
+ unsigned char pad[3]; /* reserved */
+ char reserved[64];
+};
+
+/* type of query subscription */
+#define SNDRV_SEQ_QUERY_SUBS_READ 0
+#define SNDRV_SEQ_QUERY_SUBS_WRITE 1
+
+struct snd_seq_query_subs {
+ struct snd_seq_addr root; /* client/port id to be searched */
+ int type; /* READ or WRITE */
+ int index; /* 0..N-1 */
+ int num_subs; /* R/O: number of subscriptions on this port */
+ struct snd_seq_addr addr; /* R/O: result */
+ unsigned char queue; /* R/O: result */
+ unsigned int flags; /* R/O: result */
+ char reserved[64]; /* for future use */
+};
+
+
+/*
+ * IOCTL commands
+ */
+
+#define SNDRV_SEQ_IOCTL_PVERSION _IOR ('S', 0x00, int)
+#define SNDRV_SEQ_IOCTL_CLIENT_ID _IOR ('S', 0x01, int)
+#define SNDRV_SEQ_IOCTL_SYSTEM_INFO _IOWR('S', 0x02, struct snd_seq_system_info)
+#define SNDRV_SEQ_IOCTL_RUNNING_MODE _IOWR('S', 0x03, struct snd_seq_running_info)
+
+#define SNDRV_SEQ_IOCTL_GET_CLIENT_INFO _IOWR('S', 0x10, struct snd_seq_client_info)
+#define SNDRV_SEQ_IOCTL_SET_CLIENT_INFO _IOW ('S', 0x11, struct snd_seq_client_info)
+
+#define SNDRV_SEQ_IOCTL_CREATE_PORT _IOWR('S', 0x20, struct snd_seq_port_info)
+#define SNDRV_SEQ_IOCTL_DELETE_PORT _IOW ('S', 0x21, struct snd_seq_port_info)
+#define SNDRV_SEQ_IOCTL_GET_PORT_INFO _IOWR('S', 0x22, struct snd_seq_port_info)
+#define SNDRV_SEQ_IOCTL_SET_PORT_INFO _IOW ('S', 0x23, struct snd_seq_port_info)
+
+#define SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT _IOW ('S', 0x30, struct snd_seq_port_subscribe)
+#define SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT _IOW ('S', 0x31, struct snd_seq_port_subscribe)
+
+#define SNDRV_SEQ_IOCTL_CREATE_QUEUE _IOWR('S', 0x32, struct snd_seq_queue_info)
+#define SNDRV_SEQ_IOCTL_DELETE_QUEUE _IOW ('S', 0x33, struct snd_seq_queue_info)
+#define SNDRV_SEQ_IOCTL_GET_QUEUE_INFO _IOWR('S', 0x34, struct snd_seq_queue_info)
+#define SNDRV_SEQ_IOCTL_SET_QUEUE_INFO _IOWR('S', 0x35, struct snd_seq_queue_info)
+#define SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE _IOWR('S', 0x36, struct snd_seq_queue_info)
+#define SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS _IOWR('S', 0x40, struct snd_seq_queue_status)
+#define SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO _IOWR('S', 0x41, struct snd_seq_queue_tempo)
+#define SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO _IOW ('S', 0x42, struct snd_seq_queue_tempo)
+#define SNDRV_SEQ_IOCTL_GET_QUEUE_OWNER _IOWR('S', 0x43, struct snd_seq_queue_owner)
+#define SNDRV_SEQ_IOCTL_SET_QUEUE_OWNER _IOW ('S', 0x44, struct snd_seq_queue_owner)
+#define SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER _IOWR('S', 0x45, struct snd_seq_queue_timer)
+#define SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER _IOW ('S', 0x46, struct snd_seq_queue_timer)
+/* XXX
+#define SNDRV_SEQ_IOCTL_GET_QUEUE_SYNC _IOWR('S', 0x53, struct snd_seq_queue_sync)
+#define SNDRV_SEQ_IOCTL_SET_QUEUE_SYNC _IOW ('S', 0x54, struct snd_seq_queue_sync)
+*/
+#define SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT _IOWR('S', 0x49, struct snd_seq_queue_client)
+#define SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT _IOW ('S', 0x4a, struct snd_seq_queue_client)
+#define SNDRV_SEQ_IOCTL_GET_CLIENT_POOL _IOWR('S', 0x4b, struct snd_seq_client_pool)
+#define SNDRV_SEQ_IOCTL_SET_CLIENT_POOL _IOW ('S', 0x4c, struct snd_seq_client_pool)
+#define SNDRV_SEQ_IOCTL_REMOVE_EVENTS _IOW ('S', 0x4e, struct snd_seq_remove_events)
+#define SNDRV_SEQ_IOCTL_QUERY_SUBS _IOWR('S', 0x4f, struct snd_seq_query_subs)
+#define SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION _IOWR('S', 0x50, struct snd_seq_port_subscribe)
+#define SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT _IOWR('S', 0x51, struct snd_seq_client_info)
+#define SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT _IOWR('S', 0x52, struct snd_seq_port_info)
+
+#endif /* _UAPI__SOUND_ASEQUENCER_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
new file mode 100644
index 00000000000..1774a5c3ef1
--- /dev/null
+++ b/include/uapi/sound/asound.h
@@ -0,0 +1,971 @@
+/*
+ * Advanced Linux Sound Architecture - ALSA - Driver
+ * Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
+ * Abramo Bagnara <abramo@alsa-project.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _UAPI__SOUND_ASOUND_H
+#define _UAPI__SOUND_ASOUND_H
+
+#include <linux/types.h>
+
+
+/*
+ * protocol version
+ */
+
+#define SNDRV_PROTOCOL_VERSION(major, minor, subminor) (((major)<<16)|((minor)<<8)|(subminor))
+#define SNDRV_PROTOCOL_MAJOR(version) (((version)>>16)&0xffff)
+#define SNDRV_PROTOCOL_MINOR(version) (((version)>>8)&0xff)
+#define SNDRV_PROTOCOL_MICRO(version) ((version)&0xff)
+#define SNDRV_PROTOCOL_INCOMPATIBLE(kversion, uversion) \
+ (SNDRV_PROTOCOL_MAJOR(kversion) != SNDRV_PROTOCOL_MAJOR(uversion) || \
+ (SNDRV_PROTOCOL_MAJOR(kversion) == SNDRV_PROTOCOL_MAJOR(uversion) && \
+ SNDRV_PROTOCOL_MINOR(kversion) != SNDRV_PROTOCOL_MINOR(uversion)))
+
+/****************************************************************************
+ * *
+ * Digital audio interface *
+ * *
+ ****************************************************************************/
+
+struct snd_aes_iec958 {
+ unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char subcode[147]; /* AES/IEC958 subcode bits */
+ unsigned char pad; /* nothing */
+ unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
+};
+
+/****************************************************************************
+ * *
+ * CEA-861 Audio InfoFrame. Used in HDMI and DisplayPort *
+ * *
+ ****************************************************************************/
+
+struct snd_cea_861_aud_if {
+ unsigned char db1_ct_cc; /* coding type and channel count */
+ unsigned char db2_sf_ss; /* sample frequency and size */
+ unsigned char db3; /* not used, all zeros */
+ unsigned char db4_ca; /* channel allocation code */
+ unsigned char db5_dminh_lsv; /* downmix inhibit & level-shit values */
+};
+
+/****************************************************************************
+ * *
+ * Section for driver hardware dependent interface - /dev/snd/hw? *
+ * *
+ ****************************************************************************/
+
+#define SNDRV_HWDEP_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 1)
+
+enum {
+ SNDRV_HWDEP_IFACE_OPL2 = 0,
+ SNDRV_HWDEP_IFACE_OPL3,
+ SNDRV_HWDEP_IFACE_OPL4,
+ SNDRV_HWDEP_IFACE_SB16CSP, /* Creative Signal Processor */
+ SNDRV_HWDEP_IFACE_EMU10K1, /* FX8010 processor in EMU10K1 chip */
+ SNDRV_HWDEP_IFACE_YSS225, /* Yamaha FX processor */
+ SNDRV_HWDEP_IFACE_ICS2115, /* Wavetable synth */
+ SNDRV_HWDEP_IFACE_SSCAPE, /* Ensoniq SoundScape ISA card (MC68EC000) */
+ SNDRV_HWDEP_IFACE_VX, /* Digigram VX cards */
+ SNDRV_HWDEP_IFACE_MIXART, /* Digigram miXart cards */
+ SNDRV_HWDEP_IFACE_USX2Y, /* Tascam US122, US224 & US428 usb */
+ SNDRV_HWDEP_IFACE_EMUX_WAVETABLE, /* EmuX wavetable */
+ SNDRV_HWDEP_IFACE_BLUETOOTH, /* Bluetooth audio */
+ SNDRV_HWDEP_IFACE_USX2Y_PCM, /* Tascam US122, US224 & US428 rawusb pcm */
+ SNDRV_HWDEP_IFACE_PCXHR, /* Digigram PCXHR */
+ SNDRV_HWDEP_IFACE_SB_RC, /* SB Extigy/Audigy2NX remote control */
+ SNDRV_HWDEP_IFACE_HDA, /* HD-audio */
+ SNDRV_HWDEP_IFACE_USB_STREAM, /* direct access to usb stream */
+
+ /* Don't forget to change the following: */
+ SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_USB_STREAM
+};
+
+struct snd_hwdep_info {
+ unsigned int device; /* WR: device number */
+ int card; /* R: card number */
+ unsigned char id[64]; /* ID (user selectable) */
+ unsigned char name[80]; /* hwdep name */
+ int iface; /* hwdep interface */
+ unsigned char reserved[64]; /* reserved for future */
+};
+
+/* generic DSP loader */
+struct snd_hwdep_dsp_status {
+ unsigned int version; /* R: driver-specific version */
+ unsigned char id[32]; /* R: driver-specific ID string */
+ unsigned int num_dsps; /* R: number of DSP images to transfer */
+ unsigned int dsp_loaded; /* R: bit flags indicating the loaded DSPs */
+ unsigned int chip_ready; /* R: 1 = initialization finished */
+ unsigned char reserved[16]; /* reserved for future use */
+};
+
+struct snd_hwdep_dsp_image {
+ unsigned int index; /* W: DSP index */
+ unsigned char name[64]; /* W: ID (e.g. file name) */
+ unsigned char __user *image; /* W: binary image */
+ size_t length; /* W: size of image in bytes */
+ unsigned long driver_data; /* W: driver-specific data */
+};
+
+#define SNDRV_HWDEP_IOCTL_PVERSION _IOR ('H', 0x00, int)
+#define SNDRV_HWDEP_IOCTL_INFO _IOR ('H', 0x01, struct snd_hwdep_info)
+#define SNDRV_HWDEP_IOCTL_DSP_STATUS _IOR('H', 0x02, struct snd_hwdep_dsp_status)
+#define SNDRV_HWDEP_IOCTL_DSP_LOAD _IOW('H', 0x03, struct snd_hwdep_dsp_image)
+
+/*****************************************************************************
+ * *
+ * Digital Audio (PCM) interface - /dev/snd/pcm?? *
+ * *
+ *****************************************************************************/
+
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 11)
+
+typedef unsigned long snd_pcm_uframes_t;
+typedef signed long snd_pcm_sframes_t;
+
+enum {
+ SNDRV_PCM_CLASS_GENERIC = 0, /* standard mono or stereo device */
+ SNDRV_PCM_CLASS_MULTI, /* multichannel device */
+ SNDRV_PCM_CLASS_MODEM, /* software modem class */
+ SNDRV_PCM_CLASS_DIGITIZER, /* digitizer class */
+ /* Don't forget to change the following: */
+ SNDRV_PCM_CLASS_LAST = SNDRV_PCM_CLASS_DIGITIZER,
+};
+
+enum {
+ SNDRV_PCM_SUBCLASS_GENERIC_MIX = 0, /* mono or stereo subdevices are mixed together */
+ SNDRV_PCM_SUBCLASS_MULTI_MIX, /* multichannel subdevices are mixed together */
+ /* Don't forget to change the following: */
+ SNDRV_PCM_SUBCLASS_LAST = SNDRV_PCM_SUBCLASS_MULTI_MIX,
+};
+
+enum {
+ SNDRV_PCM_STREAM_PLAYBACK = 0,
+ SNDRV_PCM_STREAM_CAPTURE,
+ SNDRV_PCM_STREAM_LAST = SNDRV_PCM_STREAM_CAPTURE,
+};
+
+typedef int __bitwise snd_pcm_access_t;
+#define SNDRV_PCM_ACCESS_MMAP_INTERLEAVED ((__force snd_pcm_access_t) 0) /* interleaved mmap */
+#define SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED ((__force snd_pcm_access_t) 1) /* noninterleaved mmap */
+#define SNDRV_PCM_ACCESS_MMAP_COMPLEX ((__force snd_pcm_access_t) 2) /* complex mmap */
+#define SNDRV_PCM_ACCESS_RW_INTERLEAVED ((__force snd_pcm_access_t) 3) /* readi/writei */
+#define SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ((__force snd_pcm_access_t) 4) /* readn/writen */
+#define SNDRV_PCM_ACCESS_LAST SNDRV_PCM_ACCESS_RW_NONINTERLEAVED
+
+typedef int __bitwise snd_pcm_format_t;
+#define SNDRV_PCM_FORMAT_S8 ((__force snd_pcm_format_t) 0)
+#define SNDRV_PCM_FORMAT_U8 ((__force snd_pcm_format_t) 1)
+#define SNDRV_PCM_FORMAT_S16_LE ((__force snd_pcm_format_t) 2)
+#define SNDRV_PCM_FORMAT_S16_BE ((__force snd_pcm_format_t) 3)
+#define SNDRV_PCM_FORMAT_U16_LE ((__force snd_pcm_format_t) 4)
+#define SNDRV_PCM_FORMAT_U16_BE ((__force snd_pcm_format_t) 5)
+#define SNDRV_PCM_FORMAT_S24_LE ((__force snd_pcm_format_t) 6) /* low three bytes */
+#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
+#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
+#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
+#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
+#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
+#define SNDRV_PCM_FORMAT_U32_BE ((__force snd_pcm_format_t) 13)
+#define SNDRV_PCM_FORMAT_FLOAT_LE ((__force snd_pcm_format_t) 14) /* 4-byte float, IEEE-754 32-bit, range -1.0 to 1.0 */
+#define SNDRV_PCM_FORMAT_FLOAT_BE ((__force snd_pcm_format_t) 15) /* 4-byte float, IEEE-754 32-bit, range -1.0 to 1.0 */
+#define SNDRV_PCM_FORMAT_FLOAT64_LE ((__force snd_pcm_format_t) 16) /* 8-byte float, IEEE-754 64-bit, range -1.0 to 1.0 */
+#define SNDRV_PCM_FORMAT_FLOAT64_BE ((__force snd_pcm_format_t) 17) /* 8-byte float, IEEE-754 64-bit, range -1.0 to 1.0 */
+#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE ((__force snd_pcm_format_t) 18) /* IEC-958 subframe, Little Endian */
+#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE ((__force snd_pcm_format_t) 19) /* IEC-958 subframe, Big Endian */
+#define SNDRV_PCM_FORMAT_MU_LAW ((__force snd_pcm_format_t) 20)
+#define SNDRV_PCM_FORMAT_A_LAW ((__force snd_pcm_format_t) 21)
+#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22)
+#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23)
+#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24)
+#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31)
+#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */
+#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */
+#define SNDRV_PCM_FORMAT_U24_3LE ((__force snd_pcm_format_t) 34) /* in three bytes */
+#define SNDRV_PCM_FORMAT_U24_3BE ((__force snd_pcm_format_t) 35) /* in three bytes */
+#define SNDRV_PCM_FORMAT_S20_3LE ((__force snd_pcm_format_t) 36) /* in three bytes */
+#define SNDRV_PCM_FORMAT_S20_3BE ((__force snd_pcm_format_t) 37) /* in three bytes */
+#define SNDRV_PCM_FORMAT_U20_3LE ((__force snd_pcm_format_t) 38) /* in three bytes */
+#define SNDRV_PCM_FORMAT_U20_3BE ((__force snd_pcm_format_t) 39) /* in three bytes */
+#define SNDRV_PCM_FORMAT_S18_3LE ((__force snd_pcm_format_t) 40) /* in three bytes */
+#define SNDRV_PCM_FORMAT_S18_3BE ((__force snd_pcm_format_t) 41) /* in three bytes */
+#define SNDRV_PCM_FORMAT_U18_3LE ((__force snd_pcm_format_t) 42) /* in three bytes */
+#define SNDRV_PCM_FORMAT_U18_3BE ((__force snd_pcm_format_t) 43) /* in three bytes */
+#define SNDRV_PCM_FORMAT_G723_24 ((__force snd_pcm_format_t) 44) /* 8 samples in 3 bytes */
+#define SNDRV_PCM_FORMAT_G723_24_1B ((__force snd_pcm_format_t) 45) /* 1 sample in 1 byte */
+#define SNDRV_PCM_FORMAT_G723_40 ((__force snd_pcm_format_t) 46) /* 8 Samples in 5 bytes */
+#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */
+#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_G723_40_1B
+
+#ifdef SNDRV_LITTLE_ENDIAN
+#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
+#define SNDRV_PCM_FORMAT_U16 SNDRV_PCM_FORMAT_U16_LE
+#define SNDRV_PCM_FORMAT_S24 SNDRV_PCM_FORMAT_S24_LE
+#define SNDRV_PCM_FORMAT_U24 SNDRV_PCM_FORMAT_U24_LE
+#define SNDRV_PCM_FORMAT_S32 SNDRV_PCM_FORMAT_S32_LE
+#define SNDRV_PCM_FORMAT_U32 SNDRV_PCM_FORMAT_U32_LE
+#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE
+#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE
+#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
+#endif
+#ifdef SNDRV_BIG_ENDIAN
+#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE
+#define SNDRV_PCM_FORMAT_U16 SNDRV_PCM_FORMAT_U16_BE
+#define SNDRV_PCM_FORMAT_S24 SNDRV_PCM_FORMAT_S24_BE
+#define SNDRV_PCM_FORMAT_U24 SNDRV_PCM_FORMAT_U24_BE
+#define SNDRV_PCM_FORMAT_S32 SNDRV_PCM_FORMAT_S32_BE
+#define SNDRV_PCM_FORMAT_U32 SNDRV_PCM_FORMAT_U32_BE
+#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE
+#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE
+#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
+#endif
+
+typedef int __bitwise snd_pcm_subformat_t;
+#define SNDRV_PCM_SUBFORMAT_STD ((__force snd_pcm_subformat_t) 0)
+#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_STD
+
+#define SNDRV_PCM_INFO_MMAP 0x00000001 /* hardware supports mmap */
+#define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */
+#define SNDRV_PCM_INFO_DOUBLE 0x00000004 /* Double buffering needed for PCM start/stop */
+#define SNDRV_PCM_INFO_BATCH 0x00000010 /* double buffering */
+#define SNDRV_PCM_INFO_INTERLEAVED 0x00000100 /* channels are interleaved */
+#define SNDRV_PCM_INFO_NONINTERLEAVED 0x00000200 /* channels are not interleaved */
+#define SNDRV_PCM_INFO_COMPLEX 0x00000400 /* complex frame organization (mmap only) */
+#define SNDRV_PCM_INFO_BLOCK_TRANSFER 0x00010000 /* hardware transfer block of samples */
+#define SNDRV_PCM_INFO_OVERRANGE 0x00020000 /* hardware supports ADC (capture) overrange detection */
+#define SNDRV_PCM_INFO_RESUME 0x00040000 /* hardware supports stream resume after suspend */
+#define SNDRV_PCM_INFO_PAUSE 0x00080000 /* pause ioctl is supported */
+#define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */
+#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */
+#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
+#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */
+#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
+
+typedef int __bitwise snd_pcm_state_t;
+#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
+#define SNDRV_PCM_STATE_SETUP ((__force snd_pcm_state_t) 1) /* stream has a setup */
+#define SNDRV_PCM_STATE_PREPARED ((__force snd_pcm_state_t) 2) /* stream is ready to start */
+#define SNDRV_PCM_STATE_RUNNING ((__force snd_pcm_state_t) 3) /* stream is running */
+#define SNDRV_PCM_STATE_XRUN ((__force snd_pcm_state_t) 4) /* stream reached an xrun */
+#define SNDRV_PCM_STATE_DRAINING ((__force snd_pcm_state_t) 5) /* stream is draining */
+#define SNDRV_PCM_STATE_PAUSED ((__force snd_pcm_state_t) 6) /* stream is paused */
+#define SNDRV_PCM_STATE_SUSPENDED ((__force snd_pcm_state_t) 7) /* hardware is suspended */
+#define SNDRV_PCM_STATE_DISCONNECTED ((__force snd_pcm_state_t) 8) /* hardware is disconnected */
+#define SNDRV_PCM_STATE_LAST SNDRV_PCM_STATE_DISCONNECTED
+
+enum {
+ SNDRV_PCM_MMAP_OFFSET_DATA = 0x00000000,
+ SNDRV_PCM_MMAP_OFFSET_STATUS = 0x80000000,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL = 0x81000000,
+};
+
+union snd_pcm_sync_id {
+ unsigned char id[16];
+ unsigned short id16[8];
+ unsigned int id32[4];
+};
+
+struct snd_pcm_info {
+ unsigned int device; /* RO/WR (control): device number */
+ unsigned int subdevice; /* RO/WR (control): subdevice number */
+ int stream; /* RO/WR (control): stream direction */
+ int card; /* R: card number */
+ unsigned char id[64]; /* ID (user selectable) */
+ unsigned char name[80]; /* name of this device */
+ unsigned char subname[32]; /* subdevice name */
+ int dev_class; /* SNDRV_PCM_CLASS_* */
+ int dev_subclass; /* SNDRV_PCM_SUBCLASS_* */
+ unsigned int subdevices_count;
+ unsigned int subdevices_avail;
+ union snd_pcm_sync_id sync; /* hardware synchronization ID */
+ unsigned char reserved[64]; /* reserved for future... */
+};
+
+typedef int snd_pcm_hw_param_t;
+#define SNDRV_PCM_HW_PARAM_ACCESS 0 /* Access type */
+#define SNDRV_PCM_HW_PARAM_FORMAT 1 /* Format */
+#define SNDRV_PCM_HW_PARAM_SUBFORMAT 2 /* Subformat */
+#define SNDRV_PCM_HW_PARAM_FIRST_MASK SNDRV_PCM_HW_PARAM_ACCESS
+#define SNDRV_PCM_HW_PARAM_LAST_MASK SNDRV_PCM_HW_PARAM_SUBFORMAT
+
+#define SNDRV_PCM_HW_PARAM_SAMPLE_BITS 8 /* Bits per sample */
+#define SNDRV_PCM_HW_PARAM_FRAME_BITS 9 /* Bits per frame */
+#define SNDRV_PCM_HW_PARAM_CHANNELS 10 /* Channels */
+#define SNDRV_PCM_HW_PARAM_RATE 11 /* Approx rate */
+#define SNDRV_PCM_HW_PARAM_PERIOD_TIME 12 /* Approx distance between
+ * interrupts in us
+ */
+#define SNDRV_PCM_HW_PARAM_PERIOD_SIZE 13 /* Approx frames between
+ * interrupts
+ */
+#define SNDRV_PCM_HW_PARAM_PERIOD_BYTES 14 /* Approx bytes between
+ * interrupts
+ */
+#define SNDRV_PCM_HW_PARAM_PERIODS 15 /* Approx interrupts per
+ * buffer
+ */
+#define SNDRV_PCM_HW_PARAM_BUFFER_TIME 16 /* Approx duration of buffer
+ * in us
+ */
+#define SNDRV_PCM_HW_PARAM_BUFFER_SIZE 17 /* Size of buffer in frames */
+#define SNDRV_PCM_HW_PARAM_BUFFER_BYTES 18 /* Size of buffer in bytes */
+#define SNDRV_PCM_HW_PARAM_TICK_TIME 19 /* Approx tick duration in us */
+#define SNDRV_PCM_HW_PARAM_FIRST_INTERVAL SNDRV_PCM_HW_PARAM_SAMPLE_BITS
+#define SNDRV_PCM_HW_PARAM_LAST_INTERVAL SNDRV_PCM_HW_PARAM_TICK_TIME
+
+#define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */
+#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */
+#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */
+
+struct snd_interval {
+ unsigned int min, max;
+ unsigned int openmin:1,
+ openmax:1,
+ integer:1,
+ empty:1;
+};
+
+#define SNDRV_MASK_MAX 256
+
+struct snd_mask {
+ __u32 bits[(SNDRV_MASK_MAX+31)/32];
+};
+
+struct snd_pcm_hw_params {
+ unsigned int flags;
+ struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK -
+ SNDRV_PCM_HW_PARAM_FIRST_MASK + 1];
+ struct snd_mask mres[5]; /* reserved masks */
+ struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL -
+ SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1];
+ struct snd_interval ires[9]; /* reserved intervals */
+ unsigned int rmask; /* W: requested masks */
+ unsigned int cmask; /* R: changed masks */
+ unsigned int info; /* R: Info flags for returned setup */
+ unsigned int msbits; /* R: used most significant bits */
+ unsigned int rate_num; /* R: rate numerator */
+ unsigned int rate_den; /* R: rate denominator */
+ snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
+ unsigned char reserved[64]; /* reserved for future */
+};
+
+enum {
+ SNDRV_PCM_TSTAMP_NONE = 0,
+ SNDRV_PCM_TSTAMP_ENABLE,
+ SNDRV_PCM_TSTAMP_LAST = SNDRV_PCM_TSTAMP_ENABLE,
+};
+
+struct snd_pcm_sw_params {
+ int tstamp_mode; /* timestamp mode */
+ unsigned int period_step;
+ unsigned int sleep_min; /* min ticks to sleep */
+ snd_pcm_uframes_t avail_min; /* min avail frames for wakeup */
+ snd_pcm_uframes_t xfer_align; /* obsolete: xfer size need to be a multiple */
+ snd_pcm_uframes_t start_threshold; /* min hw_avail frames for automatic start */
+ snd_pcm_uframes_t stop_threshold; /* min avail frames for automatic stop */
+ snd_pcm_uframes_t silence_threshold; /* min distance from noise for silence filling */
+ snd_pcm_uframes_t silence_size; /* silence block size */
+ snd_pcm_uframes_t boundary; /* pointers wrap point */
+ unsigned char reserved[64]; /* reserved for future */
+};
+
+struct snd_pcm_channel_info {
+ unsigned int channel;
+ __kernel_off_t offset; /* mmap offset */
+ unsigned int first; /* offset to first sample in bits */
+ unsigned int step; /* samples distance in bits */
+};
+
+struct snd_pcm_status {
+ snd_pcm_state_t state; /* stream state */
+ struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */
+ struct timespec tstamp; /* reference timestamp */
+ snd_pcm_uframes_t appl_ptr; /* appl ptr */
+ snd_pcm_uframes_t hw_ptr; /* hw ptr */
+ snd_pcm_sframes_t delay; /* current delay in frames */
+ snd_pcm_uframes_t avail; /* number of frames available */
+ snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */
+ snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */
+ snd_pcm_state_t suspended_state; /* suspended stream state */
+ __u32 reserved_alignment; /* must be filled with zero */
+ struct timespec audio_tstamp; /* from sample counter or wall clock */
+ unsigned char reserved[56-sizeof(struct timespec)]; /* must be filled with zero */
+};
+
+struct snd_pcm_mmap_status {
+ snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */
+ int pad1; /* Needed for 64 bit alignment */
+ snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */
+ struct timespec tstamp; /* Timestamp */
+ snd_pcm_state_t suspended_state; /* RO: suspended stream state */
+ struct timespec audio_tstamp; /* from sample counter or wall clock */
+};
+
+struct snd_pcm_mmap_control {
+ snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */
+ snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */
+};
+
+#define SNDRV_PCM_SYNC_PTR_HWSYNC (1<<0) /* execute hwsync */
+#define SNDRV_PCM_SYNC_PTR_APPL (1<<1) /* get appl_ptr from driver (r/w op) */
+#define SNDRV_PCM_SYNC_PTR_AVAIL_MIN (1<<2) /* get avail_min from driver */
+
+struct snd_pcm_sync_ptr {
+ unsigned int flags;
+ union {
+ struct snd_pcm_mmap_status status;
+ unsigned char reserved[64];
+ } s;
+ union {
+ struct snd_pcm_mmap_control control;
+ unsigned char reserved[64];
+ } c;
+};
+
+struct snd_xferi {
+ snd_pcm_sframes_t result;
+ void __user *buf;
+ snd_pcm_uframes_t frames;
+};
+
+struct snd_xfern {
+ snd_pcm_sframes_t result;
+ void __user * __user *bufs;
+ snd_pcm_uframes_t frames;
+};
+
+enum {
+ SNDRV_PCM_TSTAMP_TYPE_GETTIMEOFDAY = 0, /* gettimeofday equivalent */
+ SNDRV_PCM_TSTAMP_TYPE_MONOTONIC, /* posix_clock_monotonic equivalent */
+ SNDRV_PCM_TSTAMP_TYPE_LAST = SNDRV_PCM_TSTAMP_TYPE_MONOTONIC,
+};
+
+/* channel positions */
+enum {
+ SNDRV_CHMAP_UNKNOWN = 0,
+ SNDRV_CHMAP_NA, /* N/A, silent */
+ SNDRV_CHMAP_MONO, /* mono stream */
+ /* this follows the alsa-lib mixer channel value + 3 */
+ SNDRV_CHMAP_FL, /* front left */
+ SNDRV_CHMAP_FR, /* front right */
+ SNDRV_CHMAP_RL, /* rear left */
+ SNDRV_CHMAP_RR, /* rear right */
+ SNDRV_CHMAP_FC, /* front center */
+ SNDRV_CHMAP_LFE, /* LFE */
+ SNDRV_CHMAP_SL, /* side left */
+ SNDRV_CHMAP_SR, /* side right */
+ SNDRV_CHMAP_RC, /* rear center */
+ /* new definitions */
+ SNDRV_CHMAP_FLC, /* front left center */
+ SNDRV_CHMAP_FRC, /* front right center */
+ SNDRV_CHMAP_RLC, /* rear left center */
+ SNDRV_CHMAP_RRC, /* rear right center */
+ SNDRV_CHMAP_FLW, /* front left wide */
+ SNDRV_CHMAP_FRW, /* front right wide */
+ SNDRV_CHMAP_FLH, /* front left high */
+ SNDRV_CHMAP_FCH, /* front center high */
+ SNDRV_CHMAP_FRH, /* front right high */
+ SNDRV_CHMAP_TC, /* top center */
+ SNDRV_CHMAP_TFL, /* top front left */
+ SNDRV_CHMAP_TFR, /* top front right */
+ SNDRV_CHMAP_TFC, /* top front center */
+ SNDRV_CHMAP_TRL, /* top rear left */
+ SNDRV_CHMAP_TRR, /* top rear right */
+ SNDRV_CHMAP_TRC, /* top rear center */
+ /* new definitions for UAC2 */
+ SNDRV_CHMAP_TFLC, /* top front left center */
+ SNDRV_CHMAP_TFRC, /* top front right center */
+ SNDRV_CHMAP_TSL, /* top side left */
+ SNDRV_CHMAP_TSR, /* top side right */
+ SNDRV_CHMAP_LLFE, /* left LFE */
+ SNDRV_CHMAP_RLFE, /* right LFE */
+ SNDRV_CHMAP_BC, /* bottom center */
+ SNDRV_CHMAP_BLC, /* bottom left center */
+ SNDRV_CHMAP_BRC, /* bottom right center */
+ SNDRV_CHMAP_LAST = SNDRV_CHMAP_BRC,
+};
+
+#define SNDRV_CHMAP_POSITION_MASK 0xffff
+#define SNDRV_CHMAP_PHASE_INVERSE (0x01 << 16)
+#define SNDRV_CHMAP_DRIVER_SPEC (0x02 << 16)
+
+#define SNDRV_PCM_IOCTL_PVERSION _IOR('A', 0x00, int)
+#define SNDRV_PCM_IOCTL_INFO _IOR('A', 0x01, struct snd_pcm_info)
+#define SNDRV_PCM_IOCTL_TSTAMP _IOW('A', 0x02, int)
+#define SNDRV_PCM_IOCTL_TTSTAMP _IOW('A', 0x03, int)
+#define SNDRV_PCM_IOCTL_HW_REFINE _IOWR('A', 0x10, struct snd_pcm_hw_params)
+#define SNDRV_PCM_IOCTL_HW_PARAMS _IOWR('A', 0x11, struct snd_pcm_hw_params)
+#define SNDRV_PCM_IOCTL_HW_FREE _IO('A', 0x12)
+#define SNDRV_PCM_IOCTL_SW_PARAMS _IOWR('A', 0x13, struct snd_pcm_sw_params)
+#define SNDRV_PCM_IOCTL_STATUS _IOR('A', 0x20, struct snd_pcm_status)
+#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t)
+#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22)
+#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr)
+#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info)
+#define SNDRV_PCM_IOCTL_PREPARE _IO('A', 0x40)
+#define SNDRV_PCM_IOCTL_RESET _IO('A', 0x41)
+#define SNDRV_PCM_IOCTL_START _IO('A', 0x42)
+#define SNDRV_PCM_IOCTL_DROP _IO('A', 0x43)
+#define SNDRV_PCM_IOCTL_DRAIN _IO('A', 0x44)
+#define SNDRV_PCM_IOCTL_PAUSE _IOW('A', 0x45, int)
+#define SNDRV_PCM_IOCTL_REWIND _IOW('A', 0x46, snd_pcm_uframes_t)
+#define SNDRV_PCM_IOCTL_RESUME _IO('A', 0x47)
+#define SNDRV_PCM_IOCTL_XRUN _IO('A', 0x48)
+#define SNDRV_PCM_IOCTL_FORWARD _IOW('A', 0x49, snd_pcm_uframes_t)
+#define SNDRV_PCM_IOCTL_WRITEI_FRAMES _IOW('A', 0x50, struct snd_xferi)
+#define SNDRV_PCM_IOCTL_READI_FRAMES _IOR('A', 0x51, struct snd_xferi)
+#define SNDRV_PCM_IOCTL_WRITEN_FRAMES _IOW('A', 0x52, struct snd_xfern)
+#define SNDRV_PCM_IOCTL_READN_FRAMES _IOR('A', 0x53, struct snd_xfern)
+#define SNDRV_PCM_IOCTL_LINK _IOW('A', 0x60, int)
+#define SNDRV_PCM_IOCTL_UNLINK _IO('A', 0x61)
+
+/*****************************************************************************
+ * *
+ * MIDI v1.0 interface *
+ * *
+ *****************************************************************************/
+
+/*
+ * Raw MIDI section - /dev/snd/midi??
+ */
+
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 0)
+
+enum {
+ SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
+ SNDRV_RAWMIDI_STREAM_INPUT,
+ SNDRV_RAWMIDI_STREAM_LAST = SNDRV_RAWMIDI_STREAM_INPUT,
+};
+
+#define SNDRV_RAWMIDI_INFO_OUTPUT 0x00000001
+#define SNDRV_RAWMIDI_INFO_INPUT 0x00000002
+#define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004
+
+struct snd_rawmidi_info {
+ unsigned int device; /* RO/WR (control): device number */
+ unsigned int subdevice; /* RO/WR (control): subdevice number */
+ int stream; /* WR: stream */
+ int card; /* R: card number */
+ unsigned int flags; /* SNDRV_RAWMIDI_INFO_XXXX */
+ unsigned char id[64]; /* ID (user selectable) */
+ unsigned char name[80]; /* name of device */
+ unsigned char subname[32]; /* name of active or selected subdevice */
+ unsigned int subdevices_count;
+ unsigned int subdevices_avail;
+ unsigned char reserved[64]; /* reserved for future use */
+};
+
+struct snd_rawmidi_params {
+ int stream;
+ size_t buffer_size; /* queue size in bytes */
+ size_t avail_min; /* minimum avail bytes for wakeup */
+ unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */
+ unsigned char reserved[16]; /* reserved for future use */
+};
+
+struct snd_rawmidi_status {
+ int stream;
+ struct timespec tstamp; /* Timestamp */
+ size_t avail; /* available bytes */
+ size_t xruns; /* count of overruns since last status (in bytes) */
+ unsigned char reserved[16]; /* reserved for future use */
+};
+
+#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
+#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
+#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
+#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
+#define SNDRV_RAWMIDI_IOCTL_DRAIN _IOW('W', 0x31, int)
+
+/*
+ * Timer section - /dev/snd/timer
+ */
+
+#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 6)
+
+enum {
+ SNDRV_TIMER_CLASS_NONE = -1,
+ SNDRV_TIMER_CLASS_SLAVE = 0,
+ SNDRV_TIMER_CLASS_GLOBAL,
+ SNDRV_TIMER_CLASS_CARD,
+ SNDRV_TIMER_CLASS_PCM,
+ SNDRV_TIMER_CLASS_LAST = SNDRV_TIMER_CLASS_PCM,
+};
+
+/* slave timer classes */
+enum {
+ SNDRV_TIMER_SCLASS_NONE = 0,
+ SNDRV_TIMER_SCLASS_APPLICATION,
+ SNDRV_TIMER_SCLASS_SEQUENCER, /* alias */
+ SNDRV_TIMER_SCLASS_OSS_SEQUENCER, /* alias */
+ SNDRV_TIMER_SCLASS_LAST = SNDRV_TIMER_SCLASS_OSS_SEQUENCER,
+};
+
+/* global timers (device member) */
+#define SNDRV_TIMER_GLOBAL_SYSTEM 0
+#define SNDRV_TIMER_GLOBAL_RTC 1
+#define SNDRV_TIMER_GLOBAL_HPET 2
+#define SNDRV_TIMER_GLOBAL_HRTIMER 3
+
+/* info flags */
+#define SNDRV_TIMER_FLG_SLAVE (1<<0) /* cannot be controlled */
+
+struct snd_timer_id {
+ int dev_class;
+ int dev_sclass;
+ int card;
+ int device;
+ int subdevice;
+};
+
+struct snd_timer_ginfo {
+ struct snd_timer_id tid; /* requested timer ID */
+ unsigned int flags; /* timer flags - SNDRV_TIMER_FLG_* */
+ int card; /* card number */
+ unsigned char id[64]; /* timer identification */
+ unsigned char name[80]; /* timer name */
+ unsigned long reserved0; /* reserved for future use */
+ unsigned long resolution; /* average period resolution in ns */
+ unsigned long resolution_min; /* minimal period resolution in ns */
+ unsigned long resolution_max; /* maximal period resolution in ns */
+ unsigned int clients; /* active timer clients */
+ unsigned char reserved[32];
+};
+
+struct snd_timer_gparams {
+ struct snd_timer_id tid; /* requested timer ID */
+ unsigned long period_num; /* requested precise period duration (in seconds) - numerator */
+ unsigned long period_den; /* requested precise period duration (in seconds) - denominator */
+ unsigned char reserved[32];
+};
+
+struct snd_timer_gstatus {
+ struct snd_timer_id tid; /* requested timer ID */
+ unsigned long resolution; /* current period resolution in ns */
+ unsigned long resolution_num; /* precise current period resolution (in seconds) - numerator */
+ unsigned long resolution_den; /* precise current period resolution (in seconds) - denominator */
+ unsigned char reserved[32];
+};
+
+struct snd_timer_select {
+ struct snd_timer_id id; /* bind to timer ID */
+ unsigned char reserved[32]; /* reserved */
+};
+
+struct snd_timer_info {
+ unsigned int flags; /* timer flags - SNDRV_TIMER_FLG_* */
+ int card; /* card number */
+ unsigned char id[64]; /* timer identificator */
+ unsigned char name[80]; /* timer name */
+ unsigned long reserved0; /* reserved for future use */
+ unsigned long resolution; /* average period resolution in ns */
+ unsigned char reserved[64]; /* reserved */
+};
+
+#define SNDRV_TIMER_PSFLG_AUTO (1<<0) /* auto start, otherwise one-shot */
+#define SNDRV_TIMER_PSFLG_EXCLUSIVE (1<<1) /* exclusive use, precise start/stop/pause/continue */
+#define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */
+
+struct snd_timer_params {
+ unsigned int flags; /* flags - SNDRV_MIXER_PSFLG_* */
+ unsigned int ticks; /* requested resolution in ticks */
+ unsigned int queue_size; /* total size of queue (32-1024) */
+ unsigned int reserved0; /* reserved, was: failure locations */
+ unsigned int filter; /* event filter (bitmask of SNDRV_TIMER_EVENT_*) */
+ unsigned char reserved[60]; /* reserved */
+};
+
+struct snd_timer_status {
+ struct timespec tstamp; /* Timestamp - last update */
+ unsigned int resolution; /* current period resolution in ns */
+ unsigned int lost; /* counter of master tick lost */
+ unsigned int overrun; /* count of read queue overruns */
+ unsigned int queue; /* used queue size */
+ unsigned char reserved[64]; /* reserved */
+};
+
+#define SNDRV_TIMER_IOCTL_PVERSION _IOR('T', 0x00, int)
+#define SNDRV_TIMER_IOCTL_NEXT_DEVICE _IOWR('T', 0x01, struct snd_timer_id)
+#define SNDRV_TIMER_IOCTL_TREAD _IOW('T', 0x02, int)
+#define SNDRV_TIMER_IOCTL_GINFO _IOWR('T', 0x03, struct snd_timer_ginfo)
+#define SNDRV_TIMER_IOCTL_GPARAMS _IOW('T', 0x04, struct snd_timer_gparams)
+#define SNDRV_TIMER_IOCTL_GSTATUS _IOWR('T', 0x05, struct snd_timer_gstatus)
+#define SNDRV_TIMER_IOCTL_SELECT _IOW('T', 0x10, struct snd_timer_select)
+#define SNDRV_TIMER_IOCTL_INFO _IOR('T', 0x11, struct snd_timer_info)
+#define SNDRV_TIMER_IOCTL_PARAMS _IOW('T', 0x12, struct snd_timer_params)
+#define SNDRV_TIMER_IOCTL_STATUS _IOR('T', 0x14, struct snd_timer_status)
+/* The following four ioctls are changed since 1.0.9 due to confliction */
+#define SNDRV_TIMER_IOCTL_START _IO('T', 0xa0)
+#define SNDRV_TIMER_IOCTL_STOP _IO('T', 0xa1)
+#define SNDRV_TIMER_IOCTL_CONTINUE _IO('T', 0xa2)
+#define SNDRV_TIMER_IOCTL_PAUSE _IO('T', 0xa3)
+
+struct snd_timer_read {
+ unsigned int resolution;
+ unsigned int ticks;
+};
+
+enum {
+ SNDRV_TIMER_EVENT_RESOLUTION = 0, /* val = resolution in ns */
+ SNDRV_TIMER_EVENT_TICK, /* val = ticks */
+ SNDRV_TIMER_EVENT_START, /* val = resolution in ns */
+ SNDRV_TIMER_EVENT_STOP, /* val = 0 */
+ SNDRV_TIMER_EVENT_CONTINUE, /* val = resolution in ns */
+ SNDRV_TIMER_EVENT_PAUSE, /* val = 0 */
+ SNDRV_TIMER_EVENT_EARLY, /* val = 0, early event */
+ SNDRV_TIMER_EVENT_SUSPEND, /* val = 0 */
+ SNDRV_TIMER_EVENT_RESUME, /* val = resolution in ns */
+ /* master timer events for slave timer instances */
+ SNDRV_TIMER_EVENT_MSTART = SNDRV_TIMER_EVENT_START + 10,
+ SNDRV_TIMER_EVENT_MSTOP = SNDRV_TIMER_EVENT_STOP + 10,
+ SNDRV_TIMER_EVENT_MCONTINUE = SNDRV_TIMER_EVENT_CONTINUE + 10,
+ SNDRV_TIMER_EVENT_MPAUSE = SNDRV_TIMER_EVENT_PAUSE + 10,
+ SNDRV_TIMER_EVENT_MSUSPEND = SNDRV_TIMER_EVENT_SUSPEND + 10,
+ SNDRV_TIMER_EVENT_MRESUME = SNDRV_TIMER_EVENT_RESUME + 10,
+};
+
+struct snd_timer_tread {
+ int event;
+ struct timespec tstamp;
+ unsigned int val;
+};
+
+/****************************************************************************
+ * *
+ * Section for driver control interface - /dev/snd/control? *
+ * *
+ ****************************************************************************/
+
+#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
+
+struct snd_ctl_card_info {
+ int card; /* card number */
+ int pad; /* reserved for future (was type) */
+ unsigned char id[16]; /* ID of card (user selectable) */
+ unsigned char driver[16]; /* Driver name */
+ unsigned char name[32]; /* Short name of soundcard */
+ unsigned char longname[80]; /* name + info text about soundcard */
+ unsigned char reserved_[16]; /* reserved for future (was ID of mixer) */
+ unsigned char mixername[80]; /* visual mixer identification */
+ unsigned char components[128]; /* card components / fine identification, delimited with one space (AC97 etc..) */
+};
+
+typedef int __bitwise snd_ctl_elem_type_t;
+#define SNDRV_CTL_ELEM_TYPE_NONE ((__force snd_ctl_elem_type_t) 0) /* invalid */
+#define SNDRV_CTL_ELEM_TYPE_BOOLEAN ((__force snd_ctl_elem_type_t) 1) /* boolean type */
+#define SNDRV_CTL_ELEM_TYPE_INTEGER ((__force snd_ctl_elem_type_t) 2) /* integer type */
+#define SNDRV_CTL_ELEM_TYPE_ENUMERATED ((__force snd_ctl_elem_type_t) 3) /* enumerated type */
+#define SNDRV_CTL_ELEM_TYPE_BYTES ((__force snd_ctl_elem_type_t) 4) /* byte array */
+#define SNDRV_CTL_ELEM_TYPE_IEC958 ((__force snd_ctl_elem_type_t) 5) /* IEC958 (S/PDIF) setup */
+#define SNDRV_CTL_ELEM_TYPE_INTEGER64 ((__force snd_ctl_elem_type_t) 6) /* 64-bit integer type */
+#define SNDRV_CTL_ELEM_TYPE_LAST SNDRV_CTL_ELEM_TYPE_INTEGER64
+
+typedef int __bitwise snd_ctl_elem_iface_t;
+#define SNDRV_CTL_ELEM_IFACE_CARD ((__force snd_ctl_elem_iface_t) 0) /* global control */
+#define SNDRV_CTL_ELEM_IFACE_HWDEP ((__force snd_ctl_elem_iface_t) 1) /* hardware dependent device */
+#define SNDRV_CTL_ELEM_IFACE_MIXER ((__force snd_ctl_elem_iface_t) 2) /* virtual mixer device */
+#define SNDRV_CTL_ELEM_IFACE_PCM ((__force snd_ctl_elem_iface_t) 3) /* PCM device */
+#define SNDRV_CTL_ELEM_IFACE_RAWMIDI ((__force snd_ctl_elem_iface_t) 4) /* RawMidi device */
+#define SNDRV_CTL_ELEM_IFACE_TIMER ((__force snd_ctl_elem_iface_t) 5) /* timer device */
+#define SNDRV_CTL_ELEM_IFACE_SEQUENCER ((__force snd_ctl_elem_iface_t) 6) /* sequencer client */
+#define SNDRV_CTL_ELEM_IFACE_LAST SNDRV_CTL_ELEM_IFACE_SEQUENCER
+
+#define SNDRV_CTL_ELEM_ACCESS_READ (1<<0)
+#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
+#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
+#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
+#define SNDRV_CTL_ELEM_ACCESS_TIMESTAMP (1<<3) /* when was control changed */
+#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
+#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
+#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
+#define SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND (1<<6) /* TLV command is possible */
+#define SNDRV_CTL_ELEM_ACCESS_INACTIVE (1<<8) /* control does actually nothing, but may be updated */
+#define SNDRV_CTL_ELEM_ACCESS_LOCK (1<<9) /* write lock */
+#define SNDRV_CTL_ELEM_ACCESS_OWNER (1<<10) /* write lock owner */
+#define SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK (1<<28) /* kernel use a TLV callback */
+#define SNDRV_CTL_ELEM_ACCESS_USER (1<<29) /* user space element */
+/* bits 30 and 31 are obsoleted (for indirect access) */
+
+/* for further details see the ACPI and PCI power management specification */
+#define SNDRV_CTL_POWER_D0 0x0000 /* full On */
+#define SNDRV_CTL_POWER_D1 0x0100 /* partial On */
+#define SNDRV_CTL_POWER_D2 0x0200 /* partial On */
+#define SNDRV_CTL_POWER_D3 0x0300 /* Off */
+#define SNDRV_CTL_POWER_D3hot (SNDRV_CTL_POWER_D3|0x0000) /* Off, with power */
+#define SNDRV_CTL_POWER_D3cold (SNDRV_CTL_POWER_D3|0x0001) /* Off, without power */
+
+struct snd_ctl_elem_id {
+ unsigned int numid; /* numeric identifier, zero = invalid */
+ snd_ctl_elem_iface_t iface; /* interface identifier */
+ unsigned int device; /* device/client number */
+ unsigned int subdevice; /* subdevice (substream) number */
+ unsigned char name[44]; /* ASCII name of item */
+ unsigned int index; /* index of item */
+};
+
+struct snd_ctl_elem_list {
+ unsigned int offset; /* W: first element ID to get */
+ unsigned int space; /* W: count of element IDs to get */
+ unsigned int used; /* R: count of element IDs set */
+ unsigned int count; /* R: count of all elements */
+ struct snd_ctl_elem_id __user *pids; /* R: IDs */
+ unsigned char reserved[50];
+};
+
+struct snd_ctl_elem_info {
+ struct snd_ctl_elem_id id; /* W: element ID */
+ snd_ctl_elem_type_t type; /* R: value type - SNDRV_CTL_ELEM_TYPE_* */
+ unsigned int access; /* R: value access (bitmask) - SNDRV_CTL_ELEM_ACCESS_* */
+ unsigned int count; /* count of values */
+ __kernel_pid_t owner; /* owner's PID of this control */
+ union {
+ struct {
+ long min; /* R: minimum value */
+ long max; /* R: maximum value */
+ long step; /* R: step (0 variable) */
+ } integer;
+ struct {
+ long long min; /* R: minimum value */
+ long long max; /* R: maximum value */
+ long long step; /* R: step (0 variable) */
+ } integer64;
+ struct {
+ unsigned int items; /* R: number of items */
+ unsigned int item; /* W: item number */
+ char name[64]; /* R: value name */
+ __u64 names_ptr; /* W: names list (ELEM_ADD only) */
+ unsigned int names_length;
+ } enumerated;
+ unsigned char reserved[128];
+ } value;
+ union {
+ unsigned short d[4]; /* dimensions */
+ unsigned short *d_ptr; /* indirect - obsoleted */
+ } dimen;
+ unsigned char reserved[64-4*sizeof(unsigned short)];
+};
+
+struct snd_ctl_elem_value {
+ struct snd_ctl_elem_id id; /* W: element ID */
+ unsigned int indirect: 1; /* W: indirect access - obsoleted */
+ union {
+ union {
+ long value[128];
+ long *value_ptr; /* obsoleted */
+ } integer;
+ union {
+ long long value[64];
+ long long *value_ptr; /* obsoleted */
+ } integer64;
+ union {
+ unsigned int item[128];
+ unsigned int *item_ptr; /* obsoleted */
+ } enumerated;
+ union {
+ unsigned char data[512];
+ unsigned char *data_ptr; /* obsoleted */
+ } bytes;
+ struct snd_aes_iec958 iec958;
+ } value; /* RO */
+ struct timespec tstamp;
+ unsigned char reserved[128-sizeof(struct timespec)];
+};
+
+struct snd_ctl_tlv {
+ unsigned int numid; /* control element numeric identification */
+ unsigned int length; /* in bytes aligned to 4 */
+ unsigned int tlv[0]; /* first TLV */
+};
+
+#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int)
+#define SNDRV_CTL_IOCTL_CARD_INFO _IOR('U', 0x01, struct snd_ctl_card_info)
+#define SNDRV_CTL_IOCTL_ELEM_LIST _IOWR('U', 0x10, struct snd_ctl_elem_list)
+#define SNDRV_CTL_IOCTL_ELEM_INFO _IOWR('U', 0x11, struct snd_ctl_elem_info)
+#define SNDRV_CTL_IOCTL_ELEM_READ _IOWR('U', 0x12, struct snd_ctl_elem_value)
+#define SNDRV_CTL_IOCTL_ELEM_WRITE _IOWR('U', 0x13, struct snd_ctl_elem_value)
+#define SNDRV_CTL_IOCTL_ELEM_LOCK _IOW('U', 0x14, struct snd_ctl_elem_id)
+#define SNDRV_CTL_IOCTL_ELEM_UNLOCK _IOW('U', 0x15, struct snd_ctl_elem_id)
+#define SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS _IOWR('U', 0x16, int)
+#define SNDRV_CTL_IOCTL_ELEM_ADD _IOWR('U', 0x17, struct snd_ctl_elem_info)
+#define SNDRV_CTL_IOCTL_ELEM_REPLACE _IOWR('U', 0x18, struct snd_ctl_elem_info)
+#define SNDRV_CTL_IOCTL_ELEM_REMOVE _IOWR('U', 0x19, struct snd_ctl_elem_id)
+#define SNDRV_CTL_IOCTL_TLV_READ _IOWR('U', 0x1a, struct snd_ctl_tlv)
+#define SNDRV_CTL_IOCTL_TLV_WRITE _IOWR('U', 0x1b, struct snd_ctl_tlv)
+#define SNDRV_CTL_IOCTL_TLV_COMMAND _IOWR('U', 0x1c, struct snd_ctl_tlv)
+#define SNDRV_CTL_IOCTL_HWDEP_NEXT_DEVICE _IOWR('U', 0x20, int)
+#define SNDRV_CTL_IOCTL_HWDEP_INFO _IOR('U', 0x21, struct snd_hwdep_info)
+#define SNDRV_CTL_IOCTL_PCM_NEXT_DEVICE _IOR('U', 0x30, int)
+#define SNDRV_CTL_IOCTL_PCM_INFO _IOWR('U', 0x31, struct snd_pcm_info)
+#define SNDRV_CTL_IOCTL_PCM_PREFER_SUBDEVICE _IOW('U', 0x32, int)
+#define SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE _IOWR('U', 0x40, int)
+#define SNDRV_CTL_IOCTL_RAWMIDI_INFO _IOWR('U', 0x41, struct snd_rawmidi_info)
+#define SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE _IOW('U', 0x42, int)
+#define SNDRV_CTL_IOCTL_POWER _IOWR('U', 0xd0, int)
+#define SNDRV_CTL_IOCTL_POWER_STATE _IOR('U', 0xd1, int)
+
+/*
+ * Read interface.
+ */
+
+enum sndrv_ctl_event_type {
+ SNDRV_CTL_EVENT_ELEM = 0,
+ SNDRV_CTL_EVENT_LAST = SNDRV_CTL_EVENT_ELEM,
+};
+
+#define SNDRV_CTL_EVENT_MASK_VALUE (1<<0) /* element value was changed */
+#define SNDRV_CTL_EVENT_MASK_INFO (1<<1) /* element info was changed */
+#define SNDRV_CTL_EVENT_MASK_ADD (1<<2) /* element was added */
+#define SNDRV_CTL_EVENT_MASK_TLV (1<<3) /* element TLV tree was changed */
+#define SNDRV_CTL_EVENT_MASK_REMOVE (~0U) /* element was removed */
+
+struct snd_ctl_event {
+ int type; /* event type - SNDRV_CTL_EVENT_* */
+ union {
+ struct {
+ unsigned int mask;
+ struct snd_ctl_elem_id id;
+ } elem;
+ unsigned char data8[60];
+ } data;
+};
+
+/*
+ * Control names
+ */
+
+#define SNDRV_CTL_NAME_NONE ""
+#define SNDRV_CTL_NAME_PLAYBACK "Playback "
+#define SNDRV_CTL_NAME_CAPTURE "Capture "
+
+#define SNDRV_CTL_NAME_IEC958_NONE ""
+#define SNDRV_CTL_NAME_IEC958_SWITCH "Switch"
+#define SNDRV_CTL_NAME_IEC958_VOLUME "Volume"
+#define SNDRV_CTL_NAME_IEC958_DEFAULT "Default"
+#define SNDRV_CTL_NAME_IEC958_MASK "Mask"
+#define SNDRV_CTL_NAME_IEC958_CON_MASK "Con Mask"
+#define SNDRV_CTL_NAME_IEC958_PRO_MASK "Pro Mask"
+#define SNDRV_CTL_NAME_IEC958_PCM_STREAM "PCM Stream"
+#define SNDRV_CTL_NAME_IEC958(expl,direction,what) "IEC958 " expl SNDRV_CTL_NAME_##direction SNDRV_CTL_NAME_IEC958_##what
+
+#endif /* _UAPI__SOUND_ASOUND_H */
diff --git a/include/sound/asound_fm.h b/include/uapi/sound/asound_fm.h
index c2a4b967d5b..c2a4b967d5b 100644
--- a/include/sound/asound_fm.h
+++ b/include/uapi/sound/asound_fm.h
diff --git a/include/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 05341a43fed..05341a43fed 100644
--- a/include/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
diff --git a/include/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 602dc6c45d1..602dc6c45d1 100644
--- a/include/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
new file mode 100644
index 00000000000..d1bbaf78457
--- /dev/null
+++ b/include/uapi/sound/emu10k1.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) by Jaroslav Kysela <perex@perex.cz>,
+ * Creative Labs, Inc.
+ * Definitions for EMU10K1 (SB Live!) chips
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _UAPI__SOUND_EMU10K1_H
+#define _UAPI__SOUND_EMU10K1_H
+
+#include <linux/types.h>
+
+
+
+/*
+ * ---- FX8010 ----
+ */
+
+#define EMU10K1_CARD_CREATIVE 0x00000000
+#define EMU10K1_CARD_EMUAPS 0x00000001
+
+#define EMU10K1_FX8010_PCM_COUNT 8
+
+/* instruction set */
+#define iMAC0 0x00 /* R = A + (X * Y >> 31) ; saturation */
+#define iMAC1 0x01 /* R = A + (-X * Y >> 31) ; saturation */
+#define iMAC2 0x02 /* R = A + (X * Y >> 31) ; wraparound */
+#define iMAC3 0x03 /* R = A + (-X * Y >> 31) ; wraparound */
+#define iMACINT0 0x04 /* R = A + X * Y ; saturation */
+#define iMACINT1 0x05 /* R = A + X * Y ; wraparound (31-bit) */
+#define iACC3 0x06 /* R = A + X + Y ; saturation */
+#define iMACMV 0x07 /* R = A, acc += X * Y >> 31 */
+#define iANDXOR 0x08 /* R = (A & X) ^ Y */
+#define iTSTNEG 0x09 /* R = (A >= Y) ? X : ~X */
+#define iLIMITGE 0x0a /* R = (A >= Y) ? X : Y */
+#define iLIMITLT 0x0b /* R = (A < Y) ? X : Y */
+#define iLOG 0x0c /* R = linear_data, A (log_data), X (max_exp), Y (format_word) */
+#define iEXP 0x0d /* R = log_data, A (linear_data), X (max_exp), Y (format_word) */
+#define iINTERP 0x0e /* R = A + (X * (Y - A) >> 31) ; saturation */
+#define iSKIP 0x0f /* R = A (cc_reg), X (count), Y (cc_test) */
+
+/* GPRs */
+#define FXBUS(x) (0x00 + (x)) /* x = 0x00 - 0x0f */
+#define EXTIN(x) (0x10 + (x)) /* x = 0x00 - 0x0f */
+#define EXTOUT(x) (0x20 + (x)) /* x = 0x00 - 0x0f physical outs -> FXWC low 16 bits */
+#define FXBUS2(x) (0x30 + (x)) /* x = 0x00 - 0x0f copies of fx buses for capture -> FXWC high 16 bits */
+ /* NB: 0x31 and 0x32 are shared with Center/LFE on SB live 5.1 */
+
+#define C_00000000 0x40
+#define C_00000001 0x41
+#define C_00000002 0x42
+#define C_00000003 0x43
+#define C_00000004 0x44
+#define C_00000008 0x45
+#define C_00000010 0x46
+#define C_00000020 0x47
+#define C_00000100 0x48
+#define C_00010000 0x49
+#define C_00080000 0x4a
+#define C_10000000 0x4b
+#define C_20000000 0x4c
+#define C_40000000 0x4d
+#define C_80000000 0x4e
+#define C_7fffffff 0x4f
+#define C_ffffffff 0x50
+#define C_fffffffe 0x51
+#define C_c0000000 0x52
+#define C_4f1bbcdc 0x53
+#define C_5a7ef9db 0x54
+#define C_00100000 0x55 /* ?? */
+#define GPR_ACCU 0x56 /* ACCUM, accumulator */
+#define GPR_COND 0x57 /* CCR, condition register */
+#define GPR_NOISE0 0x58 /* noise source */
+#define GPR_NOISE1 0x59 /* noise source */
+#define GPR_IRQ 0x5a /* IRQ register */
+#define GPR_DBAC 0x5b /* TRAM Delay Base Address Counter */
+#define GPR(x) (FXGPREGBASE + (x)) /* free GPRs: x = 0x00 - 0xff */
+#define ITRAM_DATA(x) (TANKMEMDATAREGBASE + 0x00 + (x)) /* x = 0x00 - 0x7f */
+#define ETRAM_DATA(x) (TANKMEMDATAREGBASE + 0x80 + (x)) /* x = 0x00 - 0x1f */
+#define ITRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x00 + (x)) /* x = 0x00 - 0x7f */
+#define ETRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x80 + (x)) /* x = 0x00 - 0x1f */
+
+#define A_ITRAM_DATA(x) (TANKMEMDATAREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
+#define A_ETRAM_DATA(x) (TANKMEMDATAREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
+#define A_ITRAM_ADDR(x) (TANKMEMADDRREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
+#define A_ETRAM_ADDR(x) (TANKMEMADDRREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
+#define A_ITRAM_CTL(x) (A_TANKMEMCTLREGBASE + 0x00 + (x)) /* x = 0x00 - 0xbf */
+#define A_ETRAM_CTL(x) (A_TANKMEMCTLREGBASE + 0xc0 + (x)) /* x = 0x00 - 0x3f */
+
+#define A_FXBUS(x) (0x00 + (x)) /* x = 0x00 - 0x3f FX buses */
+#define A_EXTIN(x) (0x40 + (x)) /* x = 0x00 - 0x0f physical ins */
+#define A_P16VIN(x) (0x50 + (x)) /* x = 0x00 - 0x0f p16v ins (A2 only) "EMU32 inputs" */
+#define A_EXTOUT(x) (0x60 + (x)) /* x = 0x00 - 0x1f physical outs -> A_FXWC1 0x79-7f unknown */
+#define A_FXBUS2(x) (0x80 + (x)) /* x = 0x00 - 0x1f extra outs used for EFX capture -> A_FXWC2 */
+#define A_EMU32OUTH(x) (0xa0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_10 - _1F" - ??? */
+#define A_EMU32OUTL(x) (0xb0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_1 - _F" - ??? */
+#define A3_EMU32IN(x) (0x160 + (x)) /* x = 0x00 - 0x3f "EMU32_IN_00 - _3F" - Only when .device = 0x0008 */
+#define A3_EMU32OUT(x) (0x1E0 + (x)) /* x = 0x00 - 0x0f "EMU32_OUT_00 - _3F" - Only when .device = 0x0008 */
+#define A_GPR(x) (A_FXGPREGBASE + (x))
+
+/* cc_reg constants */
+#define CC_REG_NORMALIZED C_00000001
+#define CC_REG_BORROW C_00000002
+#define CC_REG_MINUS C_00000004
+#define CC_REG_ZERO C_00000008
+#define CC_REG_SATURATE C_00000010
+#define CC_REG_NONZERO C_00000100
+
+/* FX buses */
+#define FXBUS_PCM_LEFT 0x00
+#define FXBUS_PCM_RIGHT 0x01
+#define FXBUS_PCM_LEFT_REAR 0x02
+#define FXBUS_PCM_RIGHT_REAR 0x03
+#define FXBUS_MIDI_LEFT 0x04
+#define FXBUS_MIDI_RIGHT 0x05
+#define FXBUS_PCM_CENTER 0x06
+#define FXBUS_PCM_LFE 0x07
+#define FXBUS_PCM_LEFT_FRONT 0x08
+#define FXBUS_PCM_RIGHT_FRONT 0x09
+#define FXBUS_MIDI_REVERB 0x0c
+#define FXBUS_MIDI_CHORUS 0x0d
+#define FXBUS_PCM_LEFT_SIDE 0x0e
+#define FXBUS_PCM_RIGHT_SIDE 0x0f
+#define FXBUS_PT_LEFT 0x14
+#define FXBUS_PT_RIGHT 0x15
+
+/* Inputs */
+#define EXTIN_AC97_L 0x00 /* AC'97 capture channel - left */
+#define EXTIN_AC97_R 0x01 /* AC'97 capture channel - right */
+#define EXTIN_SPDIF_CD_L 0x02 /* internal S/PDIF CD - onboard - left */
+#define EXTIN_SPDIF_CD_R 0x03 /* internal S/PDIF CD - onboard - right */
+#define EXTIN_ZOOM_L 0x04 /* Zoom Video I2S - left */
+#define EXTIN_ZOOM_R 0x05 /* Zoom Video I2S - right */
+#define EXTIN_TOSLINK_L 0x06 /* LiveDrive - TOSLink Optical - left */
+#define EXTIN_TOSLINK_R 0x07 /* LiveDrive - TOSLink Optical - right */
+#define EXTIN_LINE1_L 0x08 /* LiveDrive - Line/Mic 1 - left */
+#define EXTIN_LINE1_R 0x09 /* LiveDrive - Line/Mic 1 - right */
+#define EXTIN_COAX_SPDIF_L 0x0a /* LiveDrive - Coaxial S/PDIF - left */
+#define EXTIN_COAX_SPDIF_R 0x0b /* LiveDrive - Coaxial S/PDIF - right */
+#define EXTIN_LINE2_L 0x0c /* LiveDrive - Line/Mic 2 - left */
+#define EXTIN_LINE2_R 0x0d /* LiveDrive - Line/Mic 2 - right */
+
+/* Outputs */
+#define EXTOUT_AC97_L 0x00 /* AC'97 playback channel - left */
+#define EXTOUT_AC97_R 0x01 /* AC'97 playback channel - right */
+#define EXTOUT_TOSLINK_L 0x02 /* LiveDrive - TOSLink Optical - left */
+#define EXTOUT_TOSLINK_R 0x03 /* LiveDrive - TOSLink Optical - right */
+#define EXTOUT_AC97_CENTER 0x04 /* SB Live 5.1 - center */
+#define EXTOUT_AC97_LFE 0x05 /* SB Live 5.1 - LFE */
+#define EXTOUT_HEADPHONE_L 0x06 /* LiveDrive - Headphone - left */
+#define EXTOUT_HEADPHONE_R 0x07 /* LiveDrive - Headphone - right */
+#define EXTOUT_REAR_L 0x08 /* Rear channel - left */
+#define EXTOUT_REAR_R 0x09 /* Rear channel - right */
+#define EXTOUT_ADC_CAP_L 0x0a /* ADC Capture buffer - left */
+#define EXTOUT_ADC_CAP_R 0x0b /* ADC Capture buffer - right */
+#define EXTOUT_MIC_CAP 0x0c /* MIC Capture buffer */
+#define EXTOUT_AC97_REAR_L 0x0d /* SB Live 5.1 (c) 2003 - Rear Left */
+#define EXTOUT_AC97_REAR_R 0x0e /* SB Live 5.1 (c) 2003 - Rear Right */
+#define EXTOUT_ACENTER 0x11 /* Analog Center */
+#define EXTOUT_ALFE 0x12 /* Analog LFE */
+
+/* Audigy Inputs */
+#define A_EXTIN_AC97_L 0x00 /* AC'97 capture channel - left */
+#define A_EXTIN_AC97_R 0x01 /* AC'97 capture channel - right */
+#define A_EXTIN_SPDIF_CD_L 0x02 /* digital CD left */
+#define A_EXTIN_SPDIF_CD_R 0x03 /* digital CD left */
+#define A_EXTIN_OPT_SPDIF_L 0x04 /* audigy drive Optical SPDIF - left */
+#define A_EXTIN_OPT_SPDIF_R 0x05 /* right */
+#define A_EXTIN_LINE2_L 0x08 /* audigy drive line2/mic2 - left */
+#define A_EXTIN_LINE2_R 0x09 /* right */
+#define A_EXTIN_ADC_L 0x0a /* Philips ADC - left */
+#define A_EXTIN_ADC_R 0x0b /* right */
+#define A_EXTIN_AUX2_L 0x0c /* audigy drive aux2 - left */
+#define A_EXTIN_AUX2_R 0x0d /* - right */
+
+/* Audigiy Outputs */
+#define A_EXTOUT_FRONT_L 0x00 /* digital front left */
+#define A_EXTOUT_FRONT_R 0x01 /* right */
+#define A_EXTOUT_CENTER 0x02 /* digital front center */
+#define A_EXTOUT_LFE 0x03 /* digital front lfe */
+#define A_EXTOUT_HEADPHONE_L 0x04 /* headphone audigy drive left */
+#define A_EXTOUT_HEADPHONE_R 0x05 /* right */
+#define A_EXTOUT_REAR_L 0x06 /* digital rear left */
+#define A_EXTOUT_REAR_R 0x07 /* right */
+#define A_EXTOUT_AFRONT_L 0x08 /* analog front left */
+#define A_EXTOUT_AFRONT_R 0x09 /* right */
+#define A_EXTOUT_ACENTER 0x0a /* analog center */
+#define A_EXTOUT_ALFE 0x0b /* analog LFE */
+#define A_EXTOUT_ASIDE_L 0x0c /* analog side left - Audigy 2 ZS */
+#define A_EXTOUT_ASIDE_R 0x0d /* right - Audigy 2 ZS */
+#define A_EXTOUT_AREAR_L 0x0e /* analog rear left */
+#define A_EXTOUT_AREAR_R 0x0f /* right */
+#define A_EXTOUT_AC97_L 0x10 /* AC97 left (front) */
+#define A_EXTOUT_AC97_R 0x11 /* right */
+#define A_EXTOUT_ADC_CAP_L 0x16 /* ADC capture buffer left */
+#define A_EXTOUT_ADC_CAP_R 0x17 /* right */
+#define A_EXTOUT_MIC_CAP 0x18 /* Mic capture buffer */
+
+/* Audigy constants */
+#define A_C_00000000 0xc0
+#define A_C_00000001 0xc1
+#define A_C_00000002 0xc2
+#define A_C_00000003 0xc3
+#define A_C_00000004 0xc4
+#define A_C_00000008 0xc5
+#define A_C_00000010 0xc6
+#define A_C_00000020 0xc7
+#define A_C_00000100 0xc8
+#define A_C_00010000 0xc9
+#define A_C_00000800 0xca
+#define A_C_10000000 0xcb
+#define A_C_20000000 0xcc
+#define A_C_40000000 0xcd
+#define A_C_80000000 0xce
+#define A_C_7fffffff 0xcf
+#define A_C_ffffffff 0xd0
+#define A_C_fffffffe 0xd1
+#define A_C_c0000000 0xd2
+#define A_C_4f1bbcdc 0xd3
+#define A_C_5a7ef9db 0xd4
+#define A_C_00100000 0xd5
+#define A_GPR_ACCU 0xd6 /* ACCUM, accumulator */
+#define A_GPR_COND 0xd7 /* CCR, condition register */
+#define A_GPR_NOISE0 0xd8 /* noise source */
+#define A_GPR_NOISE1 0xd9 /* noise source */
+#define A_GPR_IRQ 0xda /* IRQ register */
+#define A_GPR_DBAC 0xdb /* TRAM Delay Base Address Counter - internal */
+#define A_GPR_DBACE 0xde /* TRAM Delay Base Address Counter - external */
+
+/* definitions for debug register */
+#define EMU10K1_DBG_ZC 0x80000000 /* zero tram counter */
+#define EMU10K1_DBG_SATURATION_OCCURED 0x02000000 /* saturation control */
+#define EMU10K1_DBG_SATURATION_ADDR 0x01ff0000 /* saturation address */
+#define EMU10K1_DBG_SINGLE_STEP 0x00008000 /* single step mode */
+#define EMU10K1_DBG_STEP 0x00004000 /* start single step */
+#define EMU10K1_DBG_CONDITION_CODE 0x00003e00 /* condition code */
+#define EMU10K1_DBG_SINGLE_STEP_ADDR 0x000001ff /* single step address */
+
+/* tank memory address line */
+#ifndef __KERNEL__
+#define TANKMEMADDRREG_ADDR_MASK 0x000fffff /* 20 bit tank address field */
+#define TANKMEMADDRREG_CLEAR 0x00800000 /* Clear tank memory */
+#define TANKMEMADDRREG_ALIGN 0x00400000 /* Align read or write relative to tank access */
+#define TANKMEMADDRREG_WRITE 0x00200000 /* Write to tank memory */
+#define TANKMEMADDRREG_READ 0x00100000 /* Read from tank memory */
+#endif
+
+struct snd_emu10k1_fx8010_info {
+ unsigned int internal_tram_size; /* in samples */
+ unsigned int external_tram_size; /* in samples */
+ char fxbus_names[16][32]; /* names of FXBUSes */
+ char extin_names[16][32]; /* names of external inputs */
+ char extout_names[32][32]; /* names of external outputs */
+ unsigned int gpr_controls; /* count of GPR controls */
+};
+
+#define EMU10K1_GPR_TRANSLATION_NONE 0
+#define EMU10K1_GPR_TRANSLATION_TABLE100 1
+#define EMU10K1_GPR_TRANSLATION_BASS 2
+#define EMU10K1_GPR_TRANSLATION_TREBLE 3
+#define EMU10K1_GPR_TRANSLATION_ONOFF 4
+
+struct snd_emu10k1_fx8010_control_gpr {
+ struct snd_ctl_elem_id id; /* full control ID definition */
+ unsigned int vcount; /* visible count */
+ unsigned int count; /* count of GPR (1..16) */
+ unsigned short gpr[32]; /* GPR number(s) */
+ unsigned int value[32]; /* initial values */
+ unsigned int min; /* minimum range */
+ unsigned int max; /* maximum range */
+ unsigned int translation; /* translation type (EMU10K1_GPR_TRANSLATION*) */
+ const unsigned int *tlv;
+};
+
+/* old ABI without TLV support */
+struct snd_emu10k1_fx8010_control_old_gpr {
+ struct snd_ctl_elem_id id;
+ unsigned int vcount;
+ unsigned int count;
+ unsigned short gpr[32];
+ unsigned int value[32];
+ unsigned int min;
+ unsigned int max;
+ unsigned int translation;
+};
+
+struct snd_emu10k1_fx8010_code {
+ char name[128];
+
+ DECLARE_BITMAP(gpr_valid, 0x200); /* bitmask of valid initializers */
+ __u32 __user *gpr_map; /* initializers */
+
+ unsigned int gpr_add_control_count; /* count of GPR controls to add/replace */
+ struct snd_emu10k1_fx8010_control_gpr __user *gpr_add_controls; /* GPR controls to add/replace */
+
+ unsigned int gpr_del_control_count; /* count of GPR controls to remove */
+ struct snd_ctl_elem_id __user *gpr_del_controls; /* IDs of GPR controls to remove */
+
+ unsigned int gpr_list_control_count; /* count of GPR controls to list */
+ unsigned int gpr_list_control_total; /* total count of GPR controls */
+ struct snd_emu10k1_fx8010_control_gpr __user *gpr_list_controls; /* listed GPR controls */
+
+ DECLARE_BITMAP(tram_valid, 0x100); /* bitmask of valid initializers */
+ __u32 __user *tram_data_map; /* data initializers */
+ __u32 __user *tram_addr_map; /* map initializers */
+
+ DECLARE_BITMAP(code_valid, 1024); /* bitmask of valid instructions */
+ __u32 __user *code; /* one instruction - 64 bits */
+};
+
+struct snd_emu10k1_fx8010_tram {
+ unsigned int address; /* 31.bit == 1 -> external TRAM */
+ unsigned int size; /* size in samples (4 bytes) */
+ unsigned int *samples; /* pointer to samples (20-bit) */
+ /* NULL->clear memory */
+};
+
+struct snd_emu10k1_fx8010_pcm_rec {
+ unsigned int substream; /* substream number */
+ unsigned int res1; /* reserved */
+ unsigned int channels; /* 16-bit channels count, zero = remove this substream */
+ unsigned int tram_start; /* ring buffer position in TRAM (in samples) */
+ unsigned int buffer_size; /* count of buffered samples */
+ unsigned short gpr_size; /* GPR containing size of ringbuffer in samples (host) */
+ unsigned short gpr_ptr; /* GPR containing current pointer in the ring buffer (host = reset, FX8010) */
+ unsigned short gpr_count; /* GPR containing count of samples between two interrupts (host) */
+ unsigned short gpr_tmpcount; /* GPR containing current count of samples to interrupt (host = set, FX8010) */
+ unsigned short gpr_trigger; /* GPR containing trigger (activate) information (host) */
+ unsigned short gpr_running; /* GPR containing info if PCM is running (FX8010) */
+ unsigned char pad; /* reserved */
+ unsigned char etram[32]; /* external TRAM address & data (one per channel) */
+ unsigned int res2; /* reserved */
+};
+
+#define SNDRV_EMU10K1_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 1)
+
+#define SNDRV_EMU10K1_IOCTL_INFO _IOR ('H', 0x10, struct snd_emu10k1_fx8010_info)
+#define SNDRV_EMU10K1_IOCTL_CODE_POKE _IOW ('H', 0x11, struct snd_emu10k1_fx8010_code)
+#define SNDRV_EMU10K1_IOCTL_CODE_PEEK _IOWR('H', 0x12, struct snd_emu10k1_fx8010_code)
+#define SNDRV_EMU10K1_IOCTL_TRAM_SETUP _IOW ('H', 0x20, int)
+#define SNDRV_EMU10K1_IOCTL_TRAM_POKE _IOW ('H', 0x21, struct snd_emu10k1_fx8010_tram)
+#define SNDRV_EMU10K1_IOCTL_TRAM_PEEK _IOWR('H', 0x22, struct snd_emu10k1_fx8010_tram)
+#define SNDRV_EMU10K1_IOCTL_PCM_POKE _IOW ('H', 0x30, struct snd_emu10k1_fx8010_pcm_rec)
+#define SNDRV_EMU10K1_IOCTL_PCM_PEEK _IOWR('H', 0x31, struct snd_emu10k1_fx8010_pcm_rec)
+#define SNDRV_EMU10K1_IOCTL_PVERSION _IOR ('H', 0x40, int)
+#define SNDRV_EMU10K1_IOCTL_STOP _IO ('H', 0x80)
+#define SNDRV_EMU10K1_IOCTL_CONTINUE _IO ('H', 0x81)
+#define SNDRV_EMU10K1_IOCTL_ZERO_TRAM_COUNTER _IO ('H', 0x82)
+#define SNDRV_EMU10K1_IOCTL_SINGLE_STEP _IOW ('H', 0x83, int)
+#define SNDRV_EMU10K1_IOCTL_DBG_READ _IOR ('H', 0x84, int)
+
+/* typedefs for compatibility to user-space */
+typedef struct snd_emu10k1_fx8010_info emu10k1_fx8010_info_t;
+typedef struct snd_emu10k1_fx8010_control_gpr emu10k1_fx8010_control_gpr_t;
+typedef struct snd_emu10k1_fx8010_code emu10k1_fx8010_code_t;
+typedef struct snd_emu10k1_fx8010_tram emu10k1_fx8010_tram_t;
+typedef struct snd_emu10k1_fx8010_pcm_rec emu10k1_fx8010_pcm_t;
+
+#endif /* _UAPI__SOUND_EMU10K1_H */
diff --git a/include/sound/hdsp.h b/include/uapi/sound/hdsp.h
index 0909a384347..0909a384347 100644
--- a/include/sound/hdsp.h
+++ b/include/uapi/sound/hdsp.h
diff --git a/include/sound/hdspm.h b/include/uapi/sound/hdspm.h
index 1f59ea2a4a7..1f59ea2a4a7 100644
--- a/include/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
diff --git a/include/uapi/sound/sb16_csp.h b/include/uapi/sound/sb16_csp.h
new file mode 100644
index 00000000000..3b96907e2af
--- /dev/null
+++ b/include/uapi/sound/sb16_csp.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 1999 by Uros Bizjak <uros@kss-loka.si>
+ * Takashi Iwai <tiwai@suse.de>
+ *
+ * SB16ASP/AWE32 CSP control
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _UAPI__SOUND_SB16_CSP_H
+#define _UAPI__SOUND_SB16_CSP_H
+
+
+/* CSP modes */
+#define SNDRV_SB_CSP_MODE_NONE 0x00
+#define SNDRV_SB_CSP_MODE_DSP_READ 0x01 /* Record from DSP */
+#define SNDRV_SB_CSP_MODE_DSP_WRITE 0x02 /* Play to DSP */
+#define SNDRV_SB_CSP_MODE_QSOUND 0x04 /* QSound */
+
+/* CSP load flags */
+#define SNDRV_SB_CSP_LOAD_FROMUSER 0x01
+#define SNDRV_SB_CSP_LOAD_INITBLOCK 0x02
+
+/* CSP sample width */
+#define SNDRV_SB_CSP_SAMPLE_8BIT 0x01
+#define SNDRV_SB_CSP_SAMPLE_16BIT 0x02
+
+/* CSP channels */
+#define SNDRV_SB_CSP_MONO 0x01
+#define SNDRV_SB_CSP_STEREO 0x02
+
+/* CSP rates */
+#define SNDRV_SB_CSP_RATE_8000 0x01
+#define SNDRV_SB_CSP_RATE_11025 0x02
+#define SNDRV_SB_CSP_RATE_22050 0x04
+#define SNDRV_SB_CSP_RATE_44100 0x08
+#define SNDRV_SB_CSP_RATE_ALL 0x0f
+
+/* CSP running state */
+#define SNDRV_SB_CSP_ST_IDLE 0x00
+#define SNDRV_SB_CSP_ST_LOADED 0x01
+#define SNDRV_SB_CSP_ST_RUNNING 0x02
+#define SNDRV_SB_CSP_ST_PAUSED 0x04
+#define SNDRV_SB_CSP_ST_AUTO 0x08
+#define SNDRV_SB_CSP_ST_QSOUND 0x10
+
+/* maximum QSound value (180 degrees right) */
+#define SNDRV_SB_CSP_QSOUND_MAX_RIGHT 0x20
+
+/* maximum microcode RIFF file size */
+#define SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE 0x3000
+
+/* microcode header */
+struct snd_sb_csp_mc_header {
+ char codec_name[16]; /* id name of codec */
+ unsigned short func_req; /* requested function */
+};
+
+/* microcode to be loaded */
+struct snd_sb_csp_microcode {
+ struct snd_sb_csp_mc_header info;
+ unsigned char data[SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE];
+};
+
+/* start CSP with sample_width in mono/stereo */
+struct snd_sb_csp_start {
+ int sample_width; /* sample width, look above */
+ int channels; /* channels, look above */
+};
+
+/* CSP information */
+struct snd_sb_csp_info {
+ char codec_name[16]; /* id name of codec */
+ unsigned short func_nr; /* function number */
+ unsigned int acc_format; /* accepted PCM formats */
+ unsigned short acc_channels; /* accepted channels */
+ unsigned short acc_width; /* accepted sample width */
+ unsigned short acc_rates; /* accepted sample rates */
+ unsigned short csp_mode; /* CSP mode, see above */
+ unsigned short run_channels; /* current channels */
+ unsigned short run_width; /* current sample width */
+ unsigned short version; /* version id: 0x10 - 0x1f */
+ unsigned short state; /* state bits */
+};
+
+/* HWDEP controls */
+/* get CSP information */
+#define SNDRV_SB_CSP_IOCTL_INFO _IOR('H', 0x10, struct snd_sb_csp_info)
+/* load microcode to CSP */
+/* NOTE: struct snd_sb_csp_microcode overflows the max size (13 bits)
+ * defined for some architectures like MIPS, and it leads to build errors.
+ * (x86 and co have 14-bit size, thus it's valid, though.)
+ * As a workaround for skipping the size-limit check, here we don't use the
+ * normal _IOW() macro but _IOC() with the manual argument.
+ */
+#define SNDRV_SB_CSP_IOCTL_LOAD_CODE \
+ _IOC(_IOC_WRITE, 'H', 0x11, sizeof(struct snd_sb_csp_microcode))
+/* unload microcode from CSP */
+#define SNDRV_SB_CSP_IOCTL_UNLOAD_CODE _IO('H', 0x12)
+/* start CSP */
+#define SNDRV_SB_CSP_IOCTL_START _IOW('H', 0x13, struct snd_sb_csp_start)
+/* stop CSP */
+#define SNDRV_SB_CSP_IOCTL_STOP _IO('H', 0x14)
+/* pause CSP and DMA transfer */
+#define SNDRV_SB_CSP_IOCTL_PAUSE _IO('H', 0x15)
+/* restart CSP and DMA transfer */
+#define SNDRV_SB_CSP_IOCTL_RESTART _IO('H', 0x16)
+
+
+#endif /* _UAPI__SOUND_SB16_CSP_H */
diff --git a/include/sound/sfnt_info.h b/include/uapi/sound/sfnt_info.h
index 1bce7fd1725..1bce7fd1725 100644
--- a/include/sound/sfnt_info.h
+++ b/include/uapi/sound/sfnt_info.h
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
index 5a0e4f9efb5..f88825928dd 100644
--- a/include/video/da8xx-fb.h
+++ b/include/video/da8xx-fb.h
@@ -12,10 +12,6 @@
#ifndef DA8XX_FB_H
#define DA8XX_FB_H
-enum panel_type {
- QVGA = 0
-};
-
enum panel_shade {
MONOCHROME = 0,
COLOR_ACTIVE,
@@ -27,13 +23,6 @@ enum raster_load_mode {
LOAD_PALETTE,
};
-struct display_panel {
- enum panel_type panel_type; /* QVGA */
- int max_bpp;
- int min_bpp;
- enum panel_shade panel_shade;
-};
-
struct da8xx_lcdc_platform_data {
const char manu_name[10];
void *controller_data;
@@ -42,7 +31,7 @@ struct da8xx_lcdc_platform_data {
};
struct lcd_ctrl_config {
- const struct display_panel *p_disp_panel;
+ enum panel_shade panel_shade;
/* AC Bias Pin Frequency */
int ac_bias;
@@ -68,18 +57,9 @@ struct lcd_ctrl_config {
/* Mono 8-bit Mode: 1=D0-D7 or 0=D0-D3 */
unsigned char mono_8bit_mode;
- /* Invert line clock */
- unsigned char invert_line_clock;
-
- /* Invert frame clock */
- unsigned char invert_frm_clock;
-
/* Horizontal and Vertical Sync Edge: 0=rising 1=falling */
unsigned char sync_edge;
- /* Horizontal and Vertical Sync: Control: 0=ignore */
- unsigned char sync_ctrl;
-
/* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
unsigned char raster_order;
@@ -103,5 +83,8 @@ struct lcd_sync_arg {
#define FBIPUT_HSYNC _IOW('F', 9, int)
#define FBIPUT_VSYNC _IOW('F', 10, int)
+/* Proprietary FB_SYNC_ flags */
+#define FB_SYNC_CLK_INVERT 0x40000000
+
#endif /* ifndef DA8XX_FB_H */
diff --git a/include/video/omap-panel-tfp410.h b/include/video/omap-panel-tfp410.h
index 68c31d79c57..aef35e48bc7 100644
--- a/include/video/omap-panel-tfp410.h
+++ b/include/video/omap-panel-tfp410.h
@@ -28,7 +28,7 @@ struct omap_dss_device;
* @power_down_gpio: gpio number for PD pin (or -1 if not available)
*/
struct tfp410_platform_data {
- u16 i2c_bus_num;
+ int i2c_bus_num;
int power_down_gpio;
};
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 88c829466fc..caefa093337 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -55,6 +56,7 @@
struct omap_dss_device;
struct omap_overlay_manager;
+struct dss_lcd_mgr_config;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
@@ -158,7 +160,6 @@ enum omap_display_caps {
enum omap_dss_display_state {
OMAP_DSS_DISPLAY_DISABLED = 0,
OMAP_DSS_DISPLAY_ACTIVE,
- OMAP_DSS_DISPLAY_SUSPENDED,
};
enum omap_dss_audio_state {
@@ -621,10 +622,6 @@ struct omap_dss_device {
struct {
struct omap_video_timings timings;
- int acbi; /* ac-bias pin transitions per interrupt */
- /* Unit: line clocks */
- int acb; /* ac-bias pin frequency */
-
enum omap_dss_dsi_pixel_format dsi_pix_fmt;
enum omap_dss_dsi_mode dsi_mode;
struct omap_dss_dsi_videomode_timings dsi_vm_timings;
@@ -686,8 +683,6 @@ struct omap_dss_driver {
int (*enable)(struct omap_dss_device *display);
void (*disable)(struct omap_dss_device *display);
- int (*suspend)(struct omap_dss_device *display);
- int (*resume)(struct omap_dss_device *display);
int (*run_test)(struct omap_dss_device *display, int test);
int (*update)(struct omap_dss_device *dssdev,
@@ -745,6 +740,8 @@ struct omap_dss_driver {
};
+enum omapdss_version omapdss_get_version(void);
+
int omap_dss_register_driver(struct omap_dss_driver *);
void omap_dss_unregister_driver(struct omap_dss_driver *);
@@ -754,10 +751,19 @@ void omap_dss_put_device(struct omap_dss_device *dssdev);
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
+const char *omapdss_get_default_display_name(void);
int omap_dss_start_device(struct omap_dss_device *dssdev);
void omap_dss_stop_device(struct omap_dss_device *dssdev);
+int dss_feat_get_num_mgrs(void);
+int dss_feat_get_num_ovls(void);
+enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
+enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel);
+enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+
+
+
int omap_dss_get_num_overlay_managers(void);
struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
@@ -779,9 +785,43 @@ typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
-int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout);
-int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
- unsigned long timeout);
+u32 dispc_read_irqstatus(void);
+void dispc_clear_irqstatus(u32 mask);
+u32 dispc_read_irqenable(void);
+void dispc_write_irqenable(u32 mask);
+
+int dispc_request_irq(irq_handler_t handler, void *dev_id);
+void dispc_free_irq(void *dev_id);
+
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
+
+void dispc_mgr_enable(enum omap_channel channel, bool enable);
+bool dispc_mgr_is_enabled(enum omap_channel channel);
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
+bool dispc_mgr_go_busy(enum omap_channel channel);
+void dispc_mgr_go(enum omap_channel channel);
+void dispc_mgr_set_lcd_config(enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(enum omap_channel channel,
+ const struct omap_video_timings *timings);
+void dispc_mgr_setup(enum omap_channel channel,
+ const struct omap_overlay_manager_info *info);
+
+int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
+ const struct omap_overlay_info *oi,
+ const struct omap_video_timings *timings,
+ int *x_predecim, int *y_predecim);
+
+int dispc_ovl_enable(enum omap_plane plane, bool enable);
+bool dispc_ovl_enabled(enum omap_plane plane);
+void dispc_ovl_set_channel_out(enum omap_plane plane,
+ enum omap_channel channel);
+int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
+ bool replication, const struct omap_video_timings *mgr_timings,
+ bool mem_to_mem);
#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
@@ -840,4 +880,35 @@ void omapdss_rfbi_set_data_lines(struct omap_dss_device *dssdev,
void omapdss_rfbi_set_interface_timings(struct omap_dss_device *dssdev,
struct rfbi_timings *timings);
+int omapdss_compat_init(void);
+void omapdss_compat_uninit(void);
+
+struct dss_mgr_ops {
+ void (*start_update)(struct omap_overlay_manager *mgr);
+ int (*enable)(struct omap_overlay_manager *mgr);
+ void (*disable)(struct omap_overlay_manager *mgr);
+ void (*set_timings)(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings);
+ void (*set_lcd_config)(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config);
+ int (*register_framedone_handler)(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data);
+ void (*unregister_framedone_handler)(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data);
+};
+
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
+void dss_uninstall_mgr_ops(void);
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings);
+void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config);
+int dss_mgr_enable(struct omap_overlay_manager *mgr);
+void dss_mgr_disable(struct omap_overlay_manager *mgr);
+void dss_mgr_start_update(struct omap_overlay_manager *mgr);
+int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data);
+void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
+ void (*handler)(void *), void *data);
#endif
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index 7ae6c07f2ef..e7554486a2b 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -24,12 +24,15 @@
#define VIDCON0 (0x00)
#define VIDCON0_INTERLACE (1 << 29)
-#define VIDCON0_VIDOUT_MASK (0x3 << 26)
+#define VIDCON0_VIDOUT_MASK (0x7 << 26)
#define VIDCON0_VIDOUT_SHIFT (26)
#define VIDCON0_VIDOUT_RGB (0x0 << 26)
#define VIDCON0_VIDOUT_TV (0x1 << 26)
#define VIDCON0_VIDOUT_I80_LDI0 (0x2 << 26)
#define VIDCON0_VIDOUT_I80_LDI1 (0x3 << 26)
+#define VIDCON0_VIDOUT_WB_RGB (0x4 << 26)
+#define VIDCON0_VIDOUT_WB_I80_LDI0 (0x6 << 26)
+#define VIDCON0_VIDOUT_WB_I80_LDI1 (0x7 << 26)
#define VIDCON0_L1_DATA_MASK (0x7 << 23)
#define VIDCON0_L1_DATA_SHIFT (23)
@@ -77,6 +80,7 @@
#define VIDCON1_LINECNT_MASK (0x7ff << 16)
#define VIDCON1_LINECNT_SHIFT (16)
#define VIDCON1_LINECNT_GET(_v) (((_v) >> 16) & 0x7ff)
+#define VIDCON1_FSTATUS_EVEN (1 << 15)
#define VIDCON1_VSTATUS_MASK (0x3 << 13)
#define VIDCON1_VSTATUS_SHIFT (13)
#define VIDCON1_VSTATUS_VSYNC (0x0 << 13)
@@ -116,6 +120,7 @@
/* VIDTCON0 */
+#define VIDTCON0 (0x10)
#define VIDTCON0_VBPDE_MASK (0xff << 24)
#define VIDTCON0_VBPDE_SHIFT (24)
#define VIDTCON0_VBPDE_LIMIT (0xff)
@@ -138,6 +143,7 @@
/* VIDTCON1 */
+#define VIDTCON1 (0x14)
#define VIDTCON1_VFPDE_MASK (0xff << 24)
#define VIDTCON1_VFPDE_SHIFT (24)
#define VIDTCON1_VFPDE_LIMIT (0xff)
@@ -159,6 +165,7 @@
#define VIDTCON1_HSPW(_x) ((_x) << 0)
#define VIDTCON2 (0x18)
+#define VIDTCON2 (0x18)
#define VIDTCON2_LINEVAL_E(_x) ((((_x) & 0x800) >> 11) << 23)
#define VIDTCON2_LINEVAL_MASK (0x7ff << 11)
#define VIDTCON2_LINEVAL_SHIFT (11)
@@ -173,18 +180,27 @@
/* WINCONx */
-
+#define WINCON(_win) (0x20 + ((_win) * 4))
+#define WINCONx_CSCWIDTH_MASK (0x3 << 26)
+#define WINCONx_CSCWIDTH_SHIFT (26)
+#define WINCONx_CSCWIDTH_WIDE (0x0 << 26)
+#define WINCONx_CSCWIDTH_NARROW (0x3 << 26)
+#define WINCONx_ENLOCAL (1 << 22)
+#define WINCONx_BUFSTATUS (1 << 21)
+#define WINCONx_BUFSEL (1 << 20)
+#define WINCONx_BUFAUTOEN (1 << 19)
#define WINCONx_BITSWP (1 << 18)
#define WINCONx_BYTSWP (1 << 17)
#define WINCONx_HAWSWP (1 << 16)
#define WINCONx_WSWP (1 << 15)
+#define WINCONx_YCbCr (1 << 13)
#define WINCONx_BURSTLEN_MASK (0x3 << 9)
#define WINCONx_BURSTLEN_SHIFT (9)
#define WINCONx_BURSTLEN_16WORD (0x0 << 9)
#define WINCONx_BURSTLEN_8WORD (0x1 << 9)
#define WINCONx_BURSTLEN_4WORD (0x2 << 9)
-
#define WINCONx_ENWIN (1 << 0)
+
#define WINCON0_BPPMODE_MASK (0xf << 2)
#define WINCON0_BPPMODE_SHIFT (2)
#define WINCON0_BPPMODE_1BPP (0x0 << 2)
@@ -196,9 +212,8 @@
#define WINCON0_BPPMODE_18BPP_666 (0x8 << 2)
#define WINCON0_BPPMODE_24BPP_888 (0xb << 2)
+#define WINCON1_LOCALSEL_CAMIF (1 << 23)
#define WINCON1_BLD_PIX (1 << 6)
-
-#define WINCON1_ALPHA_SEL (1 << 1)
#define WINCON1_BPPMODE_MASK (0xf << 2)
#define WINCON1_BPPMODE_SHIFT (2)
#define WINCON1_BPPMODE_1BPP (0x0 << 2)
@@ -216,6 +231,7 @@
#define WINCON1_BPPMODE_24BPP_A1887 (0xc << 2)
#define WINCON1_BPPMODE_25BPP_A1888 (0xd << 2)
#define WINCON1_BPPMODE_28BPP_A4888 (0xd << 2)
+#define WINCON1_ALPHA_SEL (1 << 1)
/* S5PV210 */
#define SHADOWCON (0x34)
@@ -225,6 +241,9 @@
/* Local input channels (windows 0-2) */
#define SHADOWCON_CHx_LOCAL_ENABLE(_win) (1 << (5 + (_win)))
+/* VIDOSDx */
+
+#define VIDOSD_BASE (0x40)
#define VIDOSDxA_TOPLEFT_X_E(_x) ((((_x) & 0x800) >> 11) << 23)
#define VIDOSDxA_TOPLEFT_X_MASK (0x7ff << 11)
#define VIDOSDxA_TOPLEFT_X_SHIFT (11)
@@ -293,6 +312,7 @@
/* Interrupt controls and status */
+#define VIDINTCON0 (0x130)
#define VIDINTCON0_FIFOINTERVAL_MASK (0x3f << 20)
#define VIDINTCON0_FIFOINTERVAL_SHIFT (20)
#define VIDINTCON0_FIFOINTERVAL_LIMIT (0x3f)
@@ -321,6 +341,9 @@
#define VIDINTCON0_FIFIOSEL_SHIFT (5)
#define VIDINTCON0_FIFIOSEL_WINDOW0 (0x1 << 5)
#define VIDINTCON0_FIFIOSEL_WINDOW1 (0x2 << 5)
+#define VIDINTCON0_FIFIOSEL_WINDOW2 (0x10 << 5)
+#define VIDINTCON0_FIFIOSEL_WINDOW3 (0x20 << 5)
+#define VIDINTCON0_FIFIOSEL_WINDOW4 (0x40 << 5)
#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 2)
#define VIDINTCON0_FIFOLEVEL_SHIFT (2)
@@ -357,16 +380,39 @@
#define WxKEYCON1_COLVAL_LIMIT (0xffffff)
#define WxKEYCON1_COLVAL(_x) ((_x) << 0)
+/* Dithering control */
+#define DITHMODE (0x170)
+#define DITHMODE_R_POS_MASK (0x3 << 5)
+#define DITHMODE_R_POS_SHIFT (5)
+#define DITHMODE_R_POS_8BIT (0x0 << 5)
+#define DITHMODE_R_POS_6BIT (0x1 << 5)
+#define DITHMODE_R_POS_5BIT (0x2 << 5)
+#define DITHMODE_G_POS_MASK (0x3 << 3)
+#define DITHMODE_G_POS_SHIFT (3)
+#define DITHMODE_G_POS_8BIT (0x0 << 3)
+#define DITHMODE_G_POS_6BIT (0x1 << 3)
+#define DITHMODE_G_POS_5BIT (0x2 << 3)
+#define DITHMODE_B_POS_MASK (0x3 << 1)
+#define DITHMODE_B_POS_SHIFT (1)
+#define DITHMODE_B_POS_8BIT (0x0 << 1)
+#define DITHMODE_B_POS_6BIT (0x1 << 1)
+#define DITHMODE_B_POS_5BIT (0x2 << 1)
+#define DITHMODE_DITH_EN (1 << 0)
/* Window blanking (MAP) */
-
+#define WINxMAP(_win) (0x180 + ((_win) * 4))
#define WINxMAP_MAP (1 << 24)
#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0)
#define WINxMAP_MAP_COLOUR_SHIFT (0)
#define WINxMAP_MAP_COLOUR_LIMIT (0xffffff)
#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0)
+/* Winodw palette control */
+#define WPALCON (0x1A0)
#define WPALCON_PAL_UPDATE (1 << 9)
+#define WPALCON_W4PAL_16BPP_A555 (1 << 8)
+#define WPALCON_W3PAL_16BPP_A555 (1 << 7)
+#define WPALCON_W2PAL_16BPP_A555 (1 << 6)
#define WPALCON_W1PAL_MASK (0x7 << 3)
#define WPALCON_W1PAL_SHIFT (3)
#define WPALCON_W1PAL_25BPP_A888 (0x0 << 3)
@@ -376,7 +422,6 @@
#define WPALCON_W1PAL_18BPP (0x4 << 3)
#define WPALCON_W1PAL_16BPP_A555 (0x5 << 3)
#define WPALCON_W1PAL_16BPP_565 (0x6 << 3)
-
#define WPALCON_W0PAL_MASK (0x7 << 0)
#define WPALCON_W0PAL_SHIFT (0)
#define WPALCON_W0PAL_25BPP_A888 (0x0 << 0)
@@ -394,115 +439,6 @@
#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
#define S3C_FB_MAX_WIN (5) /* number of hardware windows available. */
-#define VIDCON1_FSTATUS_EVEN (1 << 15)
-
-/* Video timing controls */
-#define VIDTCON0 (0x10)
-#define VIDTCON1 (0x14)
-#define VIDTCON2 (0x18)
-
-/* Window position controls */
-
-#define WINCON(_win) (0x20 + ((_win) * 4))
-
-/* OSD1 and OSD4 do not have register D */
-
-#define VIDOSD_BASE (0x40)
-
-#define VIDINTCON0 (0x130)
-
-/* WINCONx */
-
-#define WINCONx_CSCWIDTH_MASK (0x3 << 26)
-#define WINCONx_CSCWIDTH_SHIFT (26)
-#define WINCONx_CSCWIDTH_WIDE (0x0 << 26)
-#define WINCONx_CSCWIDTH_NARROW (0x3 << 26)
-
-#define WINCONx_ENLOCAL (1 << 22)
-#define WINCONx_BUFSTATUS (1 << 21)
-#define WINCONx_BUFSEL (1 << 20)
-#define WINCONx_BUFAUTOEN (1 << 19)
-#define WINCONx_YCbCr (1 << 13)
-
-#define WINCON1_LOCALSEL_CAMIF (1 << 23)
-
-#define WINCON2_LOCALSEL_CAMIF (1 << 23)
-#define WINCON2_BLD_PIX (1 << 6)
-
-#define WINCON2_ALPHA_SEL (1 << 1)
-#define WINCON2_BPPMODE_MASK (0xf << 2)
-#define WINCON2_BPPMODE_SHIFT (2)
-#define WINCON2_BPPMODE_1BPP (0x0 << 2)
-#define WINCON2_BPPMODE_2BPP (0x1 << 2)
-#define WINCON2_BPPMODE_4BPP (0x2 << 2)
-#define WINCON2_BPPMODE_8BPP_1232 (0x4 << 2)
-#define WINCON2_BPPMODE_16BPP_565 (0x5 << 2)
-#define WINCON2_BPPMODE_16BPP_A1555 (0x6 << 2)
-#define WINCON2_BPPMODE_16BPP_I1555 (0x7 << 2)
-#define WINCON2_BPPMODE_18BPP_666 (0x8 << 2)
-#define WINCON2_BPPMODE_18BPP_A1665 (0x9 << 2)
-#define WINCON2_BPPMODE_19BPP_A1666 (0xa << 2)
-#define WINCON2_BPPMODE_24BPP_888 (0xb << 2)
-#define WINCON2_BPPMODE_24BPP_A1887 (0xc << 2)
-#define WINCON2_BPPMODE_25BPP_A1888 (0xd << 2)
-#define WINCON2_BPPMODE_28BPP_A4888 (0xd << 2)
-
-#define WINCON3_BLD_PIX (1 << 6)
-
-#define WINCON3_ALPHA_SEL (1 << 1)
-#define WINCON3_BPPMODE_MASK (0xf << 2)
-#define WINCON3_BPPMODE_SHIFT (2)
-#define WINCON3_BPPMODE_1BPP (0x0 << 2)
-#define WINCON3_BPPMODE_2BPP (0x1 << 2)
-#define WINCON3_BPPMODE_4BPP (0x2 << 2)
-#define WINCON3_BPPMODE_16BPP_565 (0x5 << 2)
-#define WINCON3_BPPMODE_16BPP_A1555 (0x6 << 2)
-#define WINCON3_BPPMODE_16BPP_I1555 (0x7 << 2)
-#define WINCON3_BPPMODE_18BPP_666 (0x8 << 2)
-#define WINCON3_BPPMODE_18BPP_A1665 (0x9 << 2)
-#define WINCON3_BPPMODE_19BPP_A1666 (0xa << 2)
-#define WINCON3_BPPMODE_24BPP_888 (0xb << 2)
-#define WINCON3_BPPMODE_24BPP_A1887 (0xc << 2)
-#define WINCON3_BPPMODE_25BPP_A1888 (0xd << 2)
-#define WINCON3_BPPMODE_28BPP_A4888 (0xd << 2)
-
-#define VIDINTCON0_FIFIOSEL_WINDOW2 (0x10 << 5)
-#define VIDINTCON0_FIFIOSEL_WINDOW3 (0x20 << 5)
-#define VIDINTCON0_FIFIOSEL_WINDOW4 (0x40 << 5)
-
-#define DITHMODE (0x170)
-#define WINxMAP(_win) (0x180 + ((_win) * 4))
-
-
-#define DITHMODE_R_POS_MASK (0x3 << 5)
-#define DITHMODE_R_POS_SHIFT (5)
-#define DITHMODE_R_POS_8BIT (0x0 << 5)
-#define DITHMODE_R_POS_6BIT (0x1 << 5)
-#define DITHMODE_R_POS_5BIT (0x2 << 5)
-
-#define DITHMODE_G_POS_MASK (0x3 << 3)
-#define DITHMODE_G_POS_SHIFT (3)
-#define DITHMODE_G_POS_8BIT (0x0 << 3)
-#define DITHMODE_G_POS_6BIT (0x1 << 3)
-#define DITHMODE_G_POS_5BIT (0x2 << 3)
-
-#define DITHMODE_B_POS_MASK (0x3 << 1)
-#define DITHMODE_B_POS_SHIFT (1)
-#define DITHMODE_B_POS_8BIT (0x0 << 1)
-#define DITHMODE_B_POS_6BIT (0x1 << 1)
-#define DITHMODE_B_POS_5BIT (0x2 << 1)
-
-#define DITHMODE_DITH_EN (1 << 0)
-
-#define WPALCON (0x1A0)
-
-/* Palette control */
-/* Note for S5PC100: you can still use those macros on WPALCON (aka WPALCON_L),
- * but make sure that WPALCON_H W2PAL-W4PAL entries are zeroed out */
-#define WPALCON_W4PAL_16BPP_A555 (1 << 8)
-#define WPALCON_W3PAL_16BPP_A555 (1 << 7)
-#define WPALCON_W2PAL_16BPP_A555 (1 << 6)
-
/* Notes on per-window bpp settings
*
diff --git a/include/video/sh_mipi_dsi.h b/include/video/sh_mipi_dsi.h
index 06c67fbc4ee..a01f197e6ac 100644
--- a/include/video/sh_mipi_dsi.h
+++ b/include/video/sh_mipi_dsi.h
@@ -25,8 +25,6 @@ enum sh_mipi_dsi_data_fmt {
MIPI_YUV420,
};
-struct sh_mobile_lcdc_chan_cfg;
-
#define SH_MIPI_DSI_HSABM (1 << 0)
#define SH_MIPI_DSI_HBPBM (1 << 1)
#define SH_MIPI_DSI_HFPBM (1 << 2)
@@ -47,7 +45,7 @@ struct sh_mobile_lcdc_chan_cfg;
struct sh_mipi_dsi_info {
enum sh_mipi_dsi_data_fmt data_format;
- struct sh_mobile_lcdc_chan_cfg *lcd_chan;
+ int channel;
int lane;
unsigned long flags;
u32 clksrc;
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index ff43ffc1aab..2605fa8adb9 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -163,7 +163,6 @@ struct sh_mobile_lcdc_bl_info {
const char *name;
int max_brightness;
int (*set_brightness)(int brightness);
- int (*get_brightness)(void);
};
struct sh_mobile_lcdc_overlay_cfg {
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
index 2090881c365..f4942921e20 100644
--- a/include/xen/interface/event_channel.h
+++ b/include/xen/interface/event_channel.h
@@ -177,6 +177,19 @@ struct evtchn_unmask {
evtchn_port_t port;
};
+/*
+ * EVTCHNOP_reset: Close all event channels associated with specified domain.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
+ */
+#define EVTCHNOP_reset 10
+struct evtchn_reset {
+ /* IN parameters. */
+ domid_t dom;
+};
+typedef struct evtchn_reset evtchn_reset_t;
+
struct evtchn_op {
uint32_t cmd; /* EVTCHNOP_* */
union {
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 90712e2072d..b40a4315cb8 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -153,6 +153,14 @@ struct xen_machphys_mapping {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t);
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+#define XENMAPSPACE_gmfn 2 /* GMFN */
+#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
+#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
+ * XENMEM_add_to_physmap_range only.
+ */
+
/*
* Sets the GPFN at which a particular page appears in the specified guest's
* pseudophysical address space.
@@ -167,8 +175,6 @@ struct xen_add_to_physmap {
uint16_t size;
/* Source mapping space. */
-#define XENMAPSPACE_shared_info 0 /* shared info page */
-#define XENMAPSPACE_grant_table 1 /* grant table page */
unsigned int space;
/* Index into source mapping space. */
@@ -182,6 +188,24 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
/*** REMOVED ***/
/*#define XENMEM_translate_gpfn_list 8*/
+#define XENMEM_add_to_physmap_range 23
+struct xen_add_to_physmap_range {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+ uint16_t space; /* => enum phys_map_space */
+
+ /* Number of pages to go through */
+ uint16_t size;
+ domid_t foreign_domid; /* IFF gmfn_foreign */
+
+ /* Indexes into space being mapped. */
+ GUEST_HANDLE(xen_ulong_t) idxs;
+
+ /* GPFN in domid where the source mapping page should appear. */
+ GUEST_HANDLE(xen_pfn_t) gpfns;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
+
/*
* Returns the pseudo-physical memory map as it was when the domain
* was started (specified by XENMEM_set_memory_map).
@@ -217,4 +241,20 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
* during a driver critical region.
*/
extern spinlock_t xen_reservation_lock;
+
+/*
+ * Unmaps the page appearing at a particular GPFN from the specified guest's
+ * pseudophysical address space.
+ * arg == addr of xen_remove_from_physmap_t.
+ */
+#define XENMEM_remove_from_physmap 15
+struct xen_remove_from_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* GPFN of the current mapping of the page. */
+ xen_pfn_t gpfn;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap);
+
#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 4755b5fac9c..5e36932ab40 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -324,6 +324,22 @@ struct xenpf_cpu_ol {
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
+/*
+ * CMD 58 and 59 are reserved for cpu hotadd and memory hotadd,
+ * which are already occupied at Xen hypervisor side.
+ */
+#define XENPF_core_parking 60
+struct xenpf_core_parking {
+ /* IN variables */
+#define XEN_CORE_PARKING_SET 1
+#define XEN_CORE_PARKING_GET 2
+ uint32_t type;
+ /* IN variables: set cpu nums expected to be idled */
+ /* OUT variables: get cpu nums actually be idled */
+ uint32_t idle_nums;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking);
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -341,6 +357,7 @@ struct xen_platform_op {
struct xenpf_set_processor_pminfo set_pminfo;
struct xenpf_pcpuinfo pcpu_info;
struct xenpf_cpu_ol cpu_ol;
+ struct xenpf_core_parking core_parking;
uint8_t pad[128];
} u;
};
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 6a198e46ab6..d6fe062cad6 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -2,6 +2,7 @@
#define INCLUDE_XEN_OPS_H
#include <linux/percpu.h>
+#include <asm/xen/interface.h>
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
@@ -26,7 +27,11 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
struct vm_area_struct;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- unsigned long mfn, int nr,
- pgprot_t prot, unsigned domid);
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages);
+int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+ int numpgs, struct page **pages);
+bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
#endif /* INCLUDE_XEN_OPS_H */
diff --git a/init/Kconfig b/init/Kconfig
index 2054e048bb9..7d30240e5bf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -717,6 +717,50 @@ config LOG_BUF_SHIFT
config HAVE_UNSTABLE_SCHED_CLOCK
bool
+#
+# For architectures that want to enable the support for NUMA-affine scheduler
+# balancing logic:
+#
+config ARCH_SUPPORTS_NUMA_BALANCING
+ bool
+
+# For architectures that (ab)use NUMA to represent different memory regions
+# all cpu-local but of different latencies, such as SuperH.
+#
+config ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ bool
+
+#
+# For architectures that are willing to define _PAGE_NUMA as _PAGE_PROTNONE
+config ARCH_WANTS_PROT_NUMA_PROT_NONE
+ bool
+
+config ARCH_USES_NUMA_PROT_NONE
+ bool
+ default y
+ depends on ARCH_WANTS_PROT_NUMA_PROT_NONE
+ depends on NUMA_BALANCING
+
+config NUMA_BALANCING_DEFAULT_ENABLED
+ bool "Automatically enable NUMA aware memory/task placement"
+ default y
+ depends on NUMA_BALANCING
+ help
+ If set, autonumic NUMA balancing will be enabled if running on a NUMA
+ machine.
+
+config NUMA_BALANCING
+ bool "Memory placement aware NUMA scheduler"
+ depends on ARCH_SUPPORTS_NUMA_BALANCING
+ depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ depends on SMP && NUMA && MIGRATION
+ help
+ This option adds support for automatic NUMA aware memory/task placement.
+ The mechanism is quite primitive and is based on migrating memory when
+ it is references to the node the task is running on.
+
+ This system will be inactive on UMA systems.
+
menuconfig CGROUPS
boolean "Control Group support"
depends on EVENTFD
@@ -838,7 +882,7 @@ config MEMCG_SWAP_ENABLED
config MEMCG_KMEM
bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
depends on MEMCG && EXPERIMENTAL
- default n
+ depends on SLUB || SLAB
help
The Kernel Memory extension for Memory Resource Controller can limit
the amount of memory used by kernel objects in the system. Those are
@@ -1025,11 +1069,9 @@ config UIDGID_CONVERTED
# Filesystems
depends on 9P_FS = n
depends on AFS_FS = n
- depends on AUTOFS4_FS = n
depends on CEPH_FS = n
depends on CIFS = n
depends on CODA_FS = n
- depends on FUSE_FS = n
depends on GFS2_FS = n
depends on NCP_FS = n
depends on NFSD = n
diff --git a/init/do_mounts.c b/init/do_mounts.c
index f8a66424360..1d1b6348f90 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -69,23 +69,28 @@ __setup("ro", readonly);
__setup("rw", readwrite);
#ifdef CONFIG_BLOCK
+struct uuidcmp {
+ const char *uuid;
+ int len;
+};
+
/**
* match_dev_by_uuid - callback for finding a partition using its uuid
* @dev: device passed in by the caller
- * @data: opaque pointer to a 36 byte char array with a UUID
+ * @data: opaque pointer to the desired struct uuidcmp to match
*
* Returns 1 if the device matches, and 0 otherwise.
*/
static int match_dev_by_uuid(struct device *dev, void *data)
{
- u8 *uuid = data;
+ struct uuidcmp *cmp = data;
struct hd_struct *part = dev_to_part(dev);
if (!part->info)
goto no_match;
- if (memcmp(uuid, part->info->uuid, sizeof(part->info->uuid)))
- goto no_match;
+ if (strncasecmp(cmp->uuid, part->info->uuid, cmp->len))
+ goto no_match;
return 1;
no_match:
@@ -95,7 +100,7 @@ no_match:
/**
* devt_from_partuuid - looks up the dev_t of a partition by its UUID
- * @uuid: min 36 byte char array containing a hex ascii UUID
+ * @uuid: char array containing ascii UUID
*
* The function will return the first partition which contains a matching
* UUID value in its partition_meta_info struct. This does not search
@@ -106,38 +111,41 @@ no_match:
*
* Returns the matching dev_t on success or 0 on failure.
*/
-static dev_t devt_from_partuuid(char *uuid_str)
+static dev_t devt_from_partuuid(const char *uuid_str)
{
dev_t res = 0;
+ struct uuidcmp cmp;
struct device *dev = NULL;
- u8 uuid[16];
struct gendisk *disk;
struct hd_struct *part;
int offset = 0;
+ bool clear_root_wait = false;
+ char *slash;
- if (strlen(uuid_str) < 36)
- goto done;
+ cmp.uuid = uuid_str;
+ slash = strchr(uuid_str, '/');
/* Check for optional partition number offset attributes. */
- if (uuid_str[36]) {
+ if (slash) {
char c = 0;
/* Explicitly fail on poor PARTUUID syntax. */
- if (sscanf(&uuid_str[36],
- "/PARTNROFF=%d%c", &offset, &c) != 1) {
- printk(KERN_ERR "VFS: PARTUUID= is invalid.\n"
- "Expected PARTUUID=<valid-uuid-id>[/PARTNROFF=%%d]\n");
- if (root_wait)
- printk(KERN_ERR
- "Disabling rootwait; root= is invalid.\n");
- root_wait = 0;
+ if (sscanf(slash + 1,
+ "PARTNROFF=%d%c", &offset, &c) != 1) {
+ clear_root_wait = true;
goto done;
}
+ cmp.len = slash - uuid_str;
+ } else {
+ cmp.len = strlen(uuid_str);
}
- /* Pack the requested UUID in the expected format. */
- part_pack_uuid(uuid_str, uuid);
+ if (!cmp.len) {
+ clear_root_wait = true;
+ goto done;
+ }
- dev = class_find_device(&block_class, NULL, uuid, &match_dev_by_uuid);
+ dev = class_find_device(&block_class, NULL, &cmp,
+ &match_dev_by_uuid);
if (!dev)
goto done;
@@ -158,6 +166,13 @@ static dev_t devt_from_partuuid(char *uuid_str)
no_offset:
put_device(dev);
done:
+ if (clear_root_wait) {
+ pr_err("VFS: PARTUUID= is invalid.\n"
+ "Expected PARTUUID=<valid-uuid-id>[/PARTNROFF=%%d]\n");
+ if (root_wait)
+ pr_err("Disabling rootwait; root= is invalid.\n");
+ root_wait = 0;
+ }
return res;
}
#endif
@@ -174,6 +189,10 @@ done:
* used when disk name of partitioned disk ends on a digit.
* 6) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the
* unique id of a partition if the partition table provides it.
+ * The UUID may be either an EFI/GPT UUID, or refer to an MSDOS
+ * partition using the format SSSSSSSS-PP, where SSSSSSSS is a zero-
+ * filled hex representation of the 32-bit "NT disk signature", and PP
+ * is a zero-filled hex representation of the 1-based partition number.
* 7) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation to
* a partition with a known unique id.
*
diff --git a/init/main.c b/init/main.c
index e33e09df3cb..85d69dffe86 100644
--- a/init/main.c
+++ b/init/main.c
@@ -797,7 +797,9 @@ static void __init do_pre_smp_initcalls(void)
static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
- return kernel_execve(init_filename, argv_init, envp_init);
+ return do_execve(init_filename,
+ (const char __user *const __user *)argv_init,
+ (const char __user *const __user *)envp_init);
}
static void __init kernel_init_freeable(void);
@@ -812,7 +814,6 @@ static int __ref kernel_init(void *unused)
system_state = SYSTEM_RUNNING;
numa_default_policy();
- current->signal->flags |= SIGNAL_UNKILLABLE;
flush_delayed_fput();
if (ramdisk_execute_command) {
@@ -857,7 +858,7 @@ static void __init kernel_init_freeable(void)
/*
* init can allocate pages on any node
*/
- set_mems_allowed(node_states[N_HIGH_MEMORY]);
+ set_mems_allowed(node_states[N_MEMORY]);
/*
* init can run on any cpu.
*/
diff --git a/init/version.c b/init/version.c
index 86fe0ccb997..58170f18912 100644
--- a/init/version.c
+++ b/init/version.c
@@ -12,6 +12,7 @@
#include <linux/utsname.h>
#include <generated/utsrelease.h>
#include <linux/version.h>
+#include <linux/proc_fs.h>
#ifndef CONFIG_KALLSYMS
#define version(a) Version_ ## a
@@ -34,6 +35,7 @@ struct uts_namespace init_uts_ns = {
.domainname = UTS_DOMAINNAME,
},
.user_ns = &init_user_ns,
+ .proc_inum = PROC_UTS_INIT_INO,
};
EXPORT_SYMBOL_GPL(init_uts_ns);
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 26143d377c9..6471f1bdae9 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -16,6 +16,7 @@
#include <linux/msg.h>
#include <linux/ipc_namespace.h>
#include <linux/utsname.h>
+#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include "util.h"
@@ -30,6 +31,7 @@ DEFINE_SPINLOCK(mq_lock);
struct ipc_namespace init_ipc_ns = {
.count = ATOMIC_INIT(1),
.user_ns = &init_user_ns,
+ .proc_inum = PROC_IPC_INIT_INO,
};
atomic_t nr_ipc_ns = ATOMIC_INIT(1);
diff --git a/ipc/namespace.c b/ipc/namespace.c
index f362298c5ce..7c1fa451b0b 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -16,7 +16,7 @@
#include "util.h"
-static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
+static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
struct ipc_namespace *old_ns)
{
struct ipc_namespace *ns;
@@ -26,9 +26,16 @@ static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
if (ns == NULL)
return ERR_PTR(-ENOMEM);
+ err = proc_alloc_inum(&ns->proc_inum);
+ if (err) {
+ kfree(ns);
+ return ERR_PTR(err);
+ }
+
atomic_set(&ns->count, 1);
err = mq_init_ns(ns);
if (err) {
+ proc_free_inum(ns->proc_inum);
kfree(ns);
return ERR_PTR(err);
}
@@ -46,19 +53,17 @@ static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
ipcns_notify(IPCNS_CREATED);
register_ipcns_notifier(ns);
- ns->user_ns = get_user_ns(task_cred_xxx(tsk, user_ns));
+ ns->user_ns = get_user_ns(user_ns);
return ns;
}
struct ipc_namespace *copy_ipcs(unsigned long flags,
- struct task_struct *tsk)
+ struct user_namespace *user_ns, struct ipc_namespace *ns)
{
- struct ipc_namespace *ns = tsk->nsproxy->ipc_ns;
-
if (!(flags & CLONE_NEWIPC))
return get_ipc_ns(ns);
- return create_ipc_ns(tsk, ns);
+ return create_ipc_ns(user_ns, ns);
}
/*
@@ -113,6 +118,7 @@ static void free_ipc_ns(struct ipc_namespace *ns)
*/
ipcns_notify(IPCNS_REMOVED);
put_user_ns(ns->user_ns);
+ proc_free_inum(ns->proc_inum);
kfree(ns);
}
@@ -161,8 +167,13 @@ static void ipcns_put(void *ns)
return put_ipc_ns(ns);
}
-static int ipcns_install(struct nsproxy *nsproxy, void *ns)
+static int ipcns_install(struct nsproxy *nsproxy, void *new)
{
+ struct ipc_namespace *ns = new;
+ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
+ !nsown_capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
/* Ditch state from the old ipc namespace */
exit_sem(current);
put_ipc_ns(nsproxy->ipc_ns);
@@ -170,10 +181,18 @@ static int ipcns_install(struct nsproxy *nsproxy, void *ns)
return 0;
}
+static unsigned int ipcns_inum(void *vp)
+{
+ struct ipc_namespace *ns = vp;
+
+ return ns->proc_inum;
+}
+
const struct proc_ns_operations ipcns_operations = {
.name = "ipc",
.type = CLONE_NEWIPC,
.get = ipcns_get,
.put = ipcns_put,
.install = ipcns_install,
+ .inum = ipcns_inum,
};
diff --git a/kernel/Makefile b/kernel/Makefile
index ac0d533eb7d..6c072b6da23 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -54,7 +54,7 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o
+obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
@@ -137,10 +137,14 @@ ifeq ($(CONFIG_MODULE_SIG),y)
#
# Pull the signing certificate and any extra certificates into the kernel
#
+
+quiet_cmd_touch = TOUCH $@
+ cmd_touch = touch $@
+
extra_certificates:
- touch $@
+ $(call cmd,touch)
-kernel/modsign_pubkey.o: signing_key.x509 extra_certificates
+kernel/modsign_certificate.o: signing_key.x509 extra_certificates
###############################################################################
#
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index ed206fd88cc..e81175ef25f 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -249,7 +249,7 @@ static void untag_chunk(struct node *p)
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
- fsnotify_destroy_mark(entry);
+ fsnotify_destroy_mark(entry, audit_tree_group);
goto out;
}
@@ -291,7 +291,7 @@ static void untag_chunk(struct node *p)
owner->root = new;
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
- fsnotify_destroy_mark(entry);
+ fsnotify_destroy_mark(entry, audit_tree_group);
fsnotify_put_mark(&new->mark); /* drop initial reference */
goto out;
@@ -331,7 +331,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock);
chunk->dead = 1;
spin_unlock(&entry->lock);
- fsnotify_destroy_mark(entry);
+ fsnotify_destroy_mark(entry, audit_tree_group);
fsnotify_put_mark(entry);
return 0;
}
@@ -412,7 +412,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
- fsnotify_destroy_mark(chunk_entry);
+ fsnotify_destroy_mark(chunk_entry, audit_tree_group);
fsnotify_put_mark(chunk_entry);
fsnotify_put_mark(old_entry);
@@ -443,7 +443,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock);
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
- fsnotify_destroy_mark(old_entry);
+ fsnotify_destroy_mark(old_entry, audit_tree_group);
fsnotify_put_mark(chunk_entry); /* drop initial reference */
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
return 0;
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 9a9ae6e3d29..4a599f699ad 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -350,7 +350,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
}
mutex_unlock(&audit_filter_mutex);
- fsnotify_destroy_mark(&parent->mark);
+ fsnotify_destroy_mark(&parent->mark, audit_watch_group);
}
/* Get path information necessary for adding watches. */
@@ -457,7 +457,7 @@ void audit_remove_watch_rule(struct audit_krule *krule)
if (list_empty(&parent->watches)) {
audit_get_parent(parent);
- fsnotify_destroy_mark(&parent->mark);
+ fsnotify_destroy_mark(&parent->mark, audit_watch_group);
audit_put_parent(parent);
}
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f34c41bfaa3..4855892798f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1333,7 +1333,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
if (ret)
goto out_unlock;
- /* See feature-removal-schedule.txt */
if (opts.subsys_mask != root->actual_subsys_mask || opts.release_agent)
pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
task_tgid_nr(current), current->comm);
@@ -3409,7 +3408,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
{
struct cgroup_pidlist *l;
/* don't need task_nsproxy() if we're looking at ourself */
- struct pid_namespace *ns = current->nsproxy->pid_ns;
+ struct pid_namespace *ns = task_active_pid_ns(current);
/*
* We can't drop the pidlist_mutex before taking the l->mutex in case
diff --git a/kernel/compat.c b/kernel/compat.c
index c28a306ae05..f6150e92dfc 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1215,6 +1215,23 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
return 0;
}
+#ifdef __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
+asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
+ struct compat_timespec __user *interval)
+{
+ struct timespec t;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
+ set_fs(old_fs);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+ return ret;
+}
+#endif /* __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL */
+
/*
* Allocate user-space memory for the duration of a single system call,
* in order to marshall parameters inside a compat thunk.
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b017887d632..7bb63eea6eb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -302,10 +302,10 @@ static void guarantee_online_cpus(const struct cpuset *cs,
* are online, with memory. If none are online with memory, walk
* up the cpuset hierarchy until we find one that does have some
* online mems. If we get all the way to the top and still haven't
- * found any online mems, return node_states[N_HIGH_MEMORY].
+ * found any online mems, return node_states[N_MEMORY].
*
* One way or another, we guarantee to return some non-empty subset
- * of node_states[N_HIGH_MEMORY].
+ * of node_states[N_MEMORY].
*
* Call with callback_mutex held.
*/
@@ -313,14 +313,14 @@ static void guarantee_online_cpus(const struct cpuset *cs,
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
{
while (cs && !nodes_intersects(cs->mems_allowed,
- node_states[N_HIGH_MEMORY]))
+ node_states[N_MEMORY]))
cs = cs->parent;
if (cs)
nodes_and(*pmask, cs->mems_allowed,
- node_states[N_HIGH_MEMORY]);
+ node_states[N_MEMORY]);
else
- *pmask = node_states[N_HIGH_MEMORY];
- BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
+ *pmask = node_states[N_MEMORY];
+ BUG_ON(!nodes_intersects(*pmask, node_states[N_MEMORY]));
}
/*
@@ -1100,7 +1100,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
return -ENOMEM;
/*
- * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
+ * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
* it's read-only
*/
if (cs == &top_cpuset) {
@@ -1122,7 +1122,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
goto done;
if (!nodes_subset(trialcs->mems_allowed,
- node_states[N_HIGH_MEMORY])) {
+ node_states[N_MEMORY])) {
retval = -EINVAL;
goto done;
}
@@ -2026,7 +2026,7 @@ static struct cpuset *cpuset_next(struct list_head *queue)
* before dropping down to the next. It always processes a node before
* any of its children.
*
- * In the case of memory hot-unplug, it will remove nodes from N_HIGH_MEMORY
+ * In the case of memory hot-unplug, it will remove nodes from N_MEMORY
* if all present pages from a node are offlined.
*/
static void
@@ -2065,7 +2065,7 @@ scan_cpusets_upon_hotplug(struct cpuset *root, enum hotplug_event event)
/* Continue past cpusets with all mems online */
if (nodes_subset(cp->mems_allowed,
- node_states[N_HIGH_MEMORY]))
+ node_states[N_MEMORY]))
continue;
oldmems = cp->mems_allowed;
@@ -2073,7 +2073,7 @@ scan_cpusets_upon_hotplug(struct cpuset *root, enum hotplug_event event)
/* Remove offline mems from this cpuset. */
mutex_lock(&callback_mutex);
nodes_and(cp->mems_allowed, cp->mems_allowed,
- node_states[N_HIGH_MEMORY]);
+ node_states[N_MEMORY]);
mutex_unlock(&callback_mutex);
/* Move tasks from the empty cpuset to a parent */
@@ -2126,8 +2126,8 @@ void cpuset_update_active_cpus(bool cpu_online)
#ifdef CONFIG_MEMORY_HOTPLUG
/*
- * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
- * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
+ * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
+ * Call this routine anytime after node_states[N_MEMORY] changes.
* See cpuset_update_active_cpus() for CPU hotplug handling.
*/
static int cpuset_track_online_nodes(struct notifier_block *self,
@@ -2140,7 +2140,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
case MEM_ONLINE:
oldmems = top_cpuset.mems_allowed;
mutex_lock(&callback_mutex);
- top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+ top_cpuset.mems_allowed = node_states[N_MEMORY];
mutex_unlock(&callback_mutex);
update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
break;
@@ -2169,7 +2169,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
void __init cpuset_init_smp(void)
{
cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
- top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+ top_cpuset.mems_allowed = node_states[N_MEMORY];
hotplug_memory_notifier(cpuset_track_online_nodes, 10);
@@ -2237,7 +2237,7 @@ void cpuset_init_current_mems_allowed(void)
*
* Description: Returns the nodemask_t mems_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
- * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
+ * subset of node_states[N_MEMORY], even if this means going outside the
* tasks cpuset.
**/
diff --git a/kernel/cred.c b/kernel/cred.c
index 48cea3da6d0..e0573a43c7d 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -30,17 +30,6 @@
static struct kmem_cache *cred_jar;
/*
- * The common credentials for the initial task's thread group
- */
-#ifdef CONFIG_KEYS
-static struct thread_group_cred init_tgcred = {
- .usage = ATOMIC_INIT(2),
- .tgid = 0,
- .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
-};
-#endif
-
-/*
* The initial credentials for the initial task
*/
struct cred init_cred = {
@@ -65,9 +54,6 @@ struct cred init_cred = {
.user = INIT_USER,
.user_ns = &init_user_ns,
.group_info = &init_groups,
-#ifdef CONFIG_KEYS
- .tgcred = &init_tgcred,
-#endif
};
static inline void set_cred_subscribers(struct cred *cred, int n)
@@ -96,36 +82,6 @@ static inline void alter_cred_subscribers(const struct cred *_cred, int n)
}
/*
- * Dispose of the shared task group credentials
- */
-#ifdef CONFIG_KEYS
-static void release_tgcred_rcu(struct rcu_head *rcu)
-{
- struct thread_group_cred *tgcred =
- container_of(rcu, struct thread_group_cred, rcu);
-
- BUG_ON(atomic_read(&tgcred->usage) != 0);
-
- key_put(tgcred->session_keyring);
- key_put(tgcred->process_keyring);
- kfree(tgcred);
-}
-#endif
-
-/*
- * Release a set of thread group credentials.
- */
-static void release_tgcred(struct cred *cred)
-{
-#ifdef CONFIG_KEYS
- struct thread_group_cred *tgcred = cred->tgcred;
-
- if (atomic_dec_and_test(&tgcred->usage))
- call_rcu(&tgcred->rcu, release_tgcred_rcu);
-#endif
-}
-
-/*
* The RCU callback to actually dispose of a set of credentials
*/
static void put_cred_rcu(struct rcu_head *rcu)
@@ -150,9 +106,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
#endif
security_cred_free(cred);
+ key_put(cred->session_keyring);
+ key_put(cred->process_keyring);
key_put(cred->thread_keyring);
key_put(cred->request_key_auth);
- release_tgcred(cred);
if (cred->group_info)
put_group_info(cred->group_info);
free_uid(cred->user);
@@ -246,15 +203,6 @@ struct cred *cred_alloc_blank(void)
if (!new)
return NULL;
-#ifdef CONFIG_KEYS
- new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
- if (!new->tgcred) {
- kmem_cache_free(cred_jar, new);
- return NULL;
- }
- atomic_set(&new->tgcred->usage, 1);
-#endif
-
atomic_set(&new->usage, 1);
#ifdef CONFIG_DEBUG_CREDENTIALS
new->magic = CRED_MAGIC;
@@ -308,9 +256,10 @@ struct cred *prepare_creds(void)
get_user_ns(new->user_ns);
#ifdef CONFIG_KEYS
+ key_get(new->session_keyring);
+ key_get(new->process_keyring);
key_get(new->thread_keyring);
key_get(new->request_key_auth);
- atomic_inc(&new->tgcred->usage);
#endif
#ifdef CONFIG_SECURITY
@@ -334,39 +283,20 @@ EXPORT_SYMBOL(prepare_creds);
*/
struct cred *prepare_exec_creds(void)
{
- struct thread_group_cred *tgcred = NULL;
struct cred *new;
-#ifdef CONFIG_KEYS
- tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
- if (!tgcred)
- return NULL;
-#endif
-
new = prepare_creds();
- if (!new) {
- kfree(tgcred);
+ if (!new)
return new;
- }
#ifdef CONFIG_KEYS
/* newly exec'd tasks don't get a thread keyring */
key_put(new->thread_keyring);
new->thread_keyring = NULL;
- /* create a new per-thread-group creds for all this set of threads to
- * share */
- memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred));
-
- atomic_set(&tgcred->usage, 1);
- spin_lock_init(&tgcred->lock);
-
/* inherit the session keyring; new process keyring */
- key_get(tgcred->session_keyring);
- tgcred->process_keyring = NULL;
-
- release_tgcred(new);
- new->tgcred = tgcred;
+ key_put(new->process_keyring);
+ new->process_keyring = NULL;
#endif
return new;
@@ -383,9 +313,6 @@ struct cred *prepare_exec_creds(void)
*/
int copy_creds(struct task_struct *p, unsigned long clone_flags)
{
-#ifdef CONFIG_KEYS
- struct thread_group_cred *tgcred;
-#endif
struct cred *new;
int ret;
@@ -425,22 +352,12 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
install_thread_keyring_to_cred(new);
}
- /* we share the process and session keyrings between all the threads in
- * a process - this is slightly icky as we violate COW credentials a
- * bit */
+ /* The process keyring is only shared between the threads in a process;
+ * anything outside of those threads doesn't inherit.
+ */
if (!(clone_flags & CLONE_THREAD)) {
- tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
- if (!tgcred) {
- ret = -ENOMEM;
- goto error_put;
- }
- atomic_set(&tgcred->usage, 1);
- spin_lock_init(&tgcred->lock);
- tgcred->process_keyring = NULL;
- tgcred->session_keyring = key_get(new->tgcred->session_keyring);
-
- release_tgcred(new);
- new->tgcred = tgcred;
+ key_put(new->process_keyring);
+ new->process_keyring = NULL;
}
#endif
@@ -455,6 +372,31 @@ error_put:
return ret;
}
+static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
+{
+ const struct user_namespace *set_ns = set->user_ns;
+ const struct user_namespace *subset_ns = subset->user_ns;
+
+ /* If the two credentials are in the same user namespace see if
+ * the capabilities of subset are a subset of set.
+ */
+ if (set_ns == subset_ns)
+ return cap_issubset(subset->cap_permitted, set->cap_permitted);
+
+ /* The credentials are in a different user namespaces
+ * therefore one is a subset of the other only if a set is an
+ * ancestor of subset and set->euid is owner of subset or one
+ * of subsets ancestors.
+ */
+ for (;subset_ns != &init_user_ns; subset_ns = subset_ns->parent) {
+ if ((set_ns == subset_ns->parent) &&
+ uid_eq(subset_ns->owner, set->euid))
+ return true;
+ }
+
+ return false;
+}
+
/**
* commit_creds - Install new credentials upon the current task
* @new: The credentials to be assigned
@@ -493,7 +435,7 @@ int commit_creds(struct cred *new)
!gid_eq(old->egid, new->egid) ||
!uid_eq(old->fsuid, new->fsuid) ||
!gid_eq(old->fsgid, new->fsgid) ||
- !cap_issubset(new->cap_permitted, old->cap_permitted)) {
+ !cred_cap_issubset(old, new)) {
if (task->mm)
set_dumpable(task->mm, suid_dumpable);
task->pdeath_signal = 0;
@@ -643,9 +585,6 @@ void __init cred_init(void)
*/
struct cred *prepare_kernel_cred(struct task_struct *daemon)
{
-#ifdef CONFIG_KEYS
- struct thread_group_cred *tgcred;
-#endif
const struct cred *old;
struct cred *new;
@@ -653,14 +592,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (!new)
return NULL;
-#ifdef CONFIG_KEYS
- tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
- if (!tgcred) {
- kmem_cache_free(cred_jar, new);
- return NULL;
- }
-#endif
-
kdebug("prepare_kernel_cred() alloc %p", new);
if (daemon)
@@ -678,13 +609,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
get_group_info(new->group_info);
#ifdef CONFIG_KEYS
- atomic_set(&tgcred->usage, 1);
- spin_lock_init(&tgcred->lock);
- tgcred->process_keyring = NULL;
- tgcred->session_keyring = NULL;
- new->tgcred = tgcred;
- new->request_key_auth = NULL;
+ new->session_keyring = NULL;
+ new->process_keyring = NULL;
new->thread_keyring = NULL;
+ new->request_key_auth = NULL;
new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f9ff5493171..301079d06f2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6155,7 +6155,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->parent = parent_event;
- event->ns = get_pid_ns(current->nsproxy->pid_ns);
+ event->ns = get_pid_ns(task_active_pid_ns(current));
event->id = atomic64_inc_return(&perf_event_id);
event->state = PERF_EVENT_STATE_INACTIVE;
diff --git a/kernel/exit.c b/kernel/exit.c
index 50d2e93c36e..b4df2193721 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -72,18 +72,6 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
list_del_rcu(&p->tasks);
list_del_init(&p->sibling);
__this_cpu_dec(process_counts);
- /*
- * If we are the last child process in a pid namespace to be
- * reaped, notify the reaper sleeping zap_pid_ns_processes().
- */
- if (IS_ENABLED(CONFIG_PID_NS)) {
- struct task_struct *parent = p->real_parent;
-
- if ((task_active_pid_ns(parent)->child_reaper == parent) &&
- list_empty(&parent->children) &&
- (parent->flags & PF_EXITING))
- wake_up_process(parent);
- }
}
list_del_rcu(&p->thread_group);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 3c31e874afa..65ca6d27f24 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -146,7 +146,7 @@ void __weak arch_release_thread_info(struct thread_info *ti)
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
{
- struct page *page = alloc_pages_node(node, THREADINFO_GFP,
+ struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL;
@@ -154,7 +154,7 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
static inline void free_thread_info(struct thread_info *ti)
{
- free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+ free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_info_cache;
@@ -823,6 +823,9 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ mm->first_nid = NUMA_PTE_SCAN_INIT;
+#endif
if (!mm_init(mm, tsk))
goto fail_nomem;
@@ -1041,8 +1044,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
init_waitqueue_head(&sig->wait_chldexit);
- if (clone_flags & CLONE_NEWPID)
- sig->flags |= SIGNAL_UNKILLABLE;
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
@@ -1165,6 +1166,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);
+ /*
+ * If the new process will be in a different pid namespace
+ * don't allow the creation of threads.
+ */
+ if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
+ (task_active_pid_ns(current) != current->nsproxy->pid_ns))
+ return ERR_PTR(-EINVAL);
+
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
@@ -1435,8 +1444,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
if (thread_group_leader(p)) {
- if (is_child_reaper(pid))
- p->nsproxy->pid_ns->child_reaper = p;
+ if (is_child_reaper(pid)) {
+ ns_of_pid(pid)->child_reaper = p;
+ p->signal->flags |= SIGNAL_UNKILLABLE;
+ }
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
@@ -1470,8 +1481,6 @@ bad_fork_cleanup_io:
if (p->io_context)
exit_io_context(p);
bad_fork_cleanup_namespaces:
- if (unlikely(clone_flags & CLONE_NEWPID))
- pid_ns_release_proc(p->nsproxy->pid_ns);
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm)
@@ -1551,15 +1560,9 @@ long do_fork(unsigned long clone_flags,
* Do some preliminary argument and permissions checking before we
* actually start allocating stuff
*/
- if (clone_flags & CLONE_NEWUSER) {
- if (clone_flags & CLONE_THREAD)
+ if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) {
+ if (clone_flags & (CLONE_THREAD|CLONE_PARENT))
return -EINVAL;
- /* hopefully this check will go away when userns support is
- * complete
- */
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
- !capable(CAP_SETGID))
- return -EPERM;
}
/*
@@ -1618,7 +1621,6 @@ long do_fork(unsigned long clone_flags,
return nr;
}
-#ifdef CONFIG_GENERIC_KERNEL_THREAD
/*
* Create a kernel thread.
*/
@@ -1627,7 +1629,6 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
(unsigned long)arg, NULL, NULL);
}
-#endif
#ifdef __ARCH_WANT_SYS_FORK
SYSCALL_DEFINE0(fork)
@@ -1721,7 +1722,8 @@ static int check_unshare_flags(unsigned long unshare_flags)
{
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
- CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
+ CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
+ CLONE_NEWUSER|CLONE_NEWPID))
return -EINVAL;
/*
* Not implemented, but pretend it works if there is nothing to
@@ -1788,19 +1790,40 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
struct fs_struct *fs, *new_fs = NULL;
struct files_struct *fd, *new_fd = NULL;
+ struct cred *new_cred = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
int err;
- err = check_unshare_flags(unshare_flags);
- if (err)
- goto bad_unshare_out;
-
+ /*
+ * If unsharing a user namespace must also unshare the thread.
+ */
+ if (unshare_flags & CLONE_NEWUSER)
+ unshare_flags |= CLONE_THREAD;
+ /*
+ * If unsharing a pid namespace must also unshare the thread.
+ */
+ if (unshare_flags & CLONE_NEWPID)
+ unshare_flags |= CLONE_THREAD;
+ /*
+ * If unsharing a thread from a thread group, must also unshare vm.
+ */
+ if (unshare_flags & CLONE_THREAD)
+ unshare_flags |= CLONE_VM;
+ /*
+ * If unsharing vm, must also unshare signal handlers.
+ */
+ if (unshare_flags & CLONE_VM)
+ unshare_flags |= CLONE_SIGHAND;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
+
+ err = check_unshare_flags(unshare_flags);
+ if (err)
+ goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
@@ -1814,11 +1837,15 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
err = unshare_fd(unshare_flags, &new_fd);
if (err)
goto bad_unshare_cleanup_fs;
- err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
+ err = unshare_userns(unshare_flags, &new_cred);
if (err)
goto bad_unshare_cleanup_fd;
+ err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
+ new_cred, new_fs);
+ if (err)
+ goto bad_unshare_cleanup_cred;
- if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
+ if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
@@ -1851,11 +1878,20 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
}
task_unlock(current);
+
+ if (new_cred) {
+ /* Install the new user namespace */
+ commit_creds(new_cred);
+ new_cred = NULL;
+ }
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
+bad_unshare_cleanup_cred:
+ if (new_cred)
+ put_cred(new_cred);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 35c70c9e24d..e49a288fa47 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -818,7 +818,7 @@ static void irq_thread_dtor(struct callback_head *unused)
action = kthread_data(tsk);
pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
- tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
+ tsk->comm, tsk->pid, action->irq);
desc = irq_to_desc(action->irq);
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index 30b7b225306..e30ac0fe61c 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -4,6 +4,7 @@
#include <linux/string.h>
#include <linux/random.h>
#include <linux/module.h>
+#include <linux/ptrace.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cache.h>
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 1c317e38683..0023a87e8de 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -219,9 +219,9 @@ static int ____call_usermodehelper(void *data)
commit_creds(new);
- retval = kernel_execve(sub_info->path,
- (const char *const *)sub_info->argv,
- (const char *const *)sub_info->envp);
+ retval = do_execve(sub_info->path,
+ (const char __user *const __user *)sub_info->argv,
+ (const char __user *const __user *)sub_info->envp);
if (!retval)
return 0;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 29fb60caecb..691dc2ef9ba 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -428,7 +428,7 @@ int kthreadd(void *unused)
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_cpus_allowed_ptr(tsk, cpu_all_mask);
- set_mems_allowed(node_states[N_HIGH_MEMORY]);
+ set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
diff --git a/kernel/modsign_certificate.S b/kernel/modsign_certificate.S
new file mode 100644
index 00000000000..246b4c6e613
--- /dev/null
+++ b/kernel/modsign_certificate.S
@@ -0,0 +1,19 @@
+/* SYMBOL_PREFIX defined on commandline from CONFIG_SYMBOL_PREFIX */
+#ifndef SYMBOL_PREFIX
+#define ASM_SYMBOL(sym) sym
+#else
+#define PASTE2(x,y) x##y
+#define PASTE(x,y) PASTE2(x,y)
+#define ASM_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
+#endif
+
+#define GLOBAL(name) \
+ .globl ASM_SYMBOL(name); \
+ ASM_SYMBOL(name):
+
+ .section ".init.data","aw"
+
+GLOBAL(modsign_certificate_list)
+ .incbin "signing_key.x509"
+ .incbin "extra_certificates"
+GLOBAL(modsign_certificate_list_end)
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 767e559dfb1..2b6e69909c3 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -20,12 +20,6 @@ struct key *modsign_keyring;
extern __initdata const u8 modsign_certificate_list[];
extern __initdata const u8 modsign_certificate_list_end[];
-asm(".section .init.data,\"aw\"\n"
- SYMBOL_PREFIX "modsign_certificate_list:\n"
- ".incbin \"signing_key.x509\"\n"
- ".incbin \"extra_certificates\"\n"
- SYMBOL_PREFIX "modsign_certificate_list_end:"
- );
/*
* We need to make sure ccache doesn't cache the .o file as it doesn't notice
@@ -40,18 +34,15 @@ static __init int module_verify_init(void)
{
pr_notice("Initialise module verification\n");
- modsign_keyring = key_alloc(&key_type_keyring, ".module_sign",
- KUIDT_INIT(0), KGIDT_INIT(0),
- current_cred(),
- (KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW | KEY_USR_READ,
- KEY_ALLOC_NOT_IN_QUOTA);
+ modsign_keyring = keyring_alloc(".module_sign",
+ KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(modsign_keyring))
panic("Can't allocate module signing keyring\n");
- if (key_instantiate_and_link(modsign_keyring, NULL, 0, NULL, NULL) < 0)
- panic("Can't instantiate module signing keyring\n");
-
return 0;
}
diff --git a/kernel/module.c b/kernel/module.c
index 6e48c3a4359..250092c1d57 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -21,6 +21,7 @@
#include <linux/ftrace_event.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
+#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kernel.h>
@@ -28,6 +29,7 @@
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/proc_fs.h>
+#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
@@ -59,6 +61,7 @@
#include <linux/pfn.h>
#include <linux/bsearch.h>
#include <linux/fips.h>
+#include <uapi/linux/module.h>
#include "module-internal.h"
#define CREATE_TRACE_POINTS
@@ -372,9 +375,6 @@ static bool check_symbol(const struct symsearch *syms,
printk(KERN_WARNING "Symbol %s is being used "
"by a non-GPL module, which will not "
"be allowed in the future\n", fsa->name);
- printk(KERN_WARNING "Please see the file "
- "Documentation/feature-removal-schedule.txt "
- "in the kernel source tree for more details.\n");
}
}
@@ -2282,7 +2282,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
Elf_Shdr *symsect = info->sechdrs + info->index.sym;
Elf_Shdr *strsect = info->sechdrs + info->index.str;
const Elf_Sym *src;
- unsigned int i, nsrc, ndst, strtab_size;
+ unsigned int i, nsrc, ndst, strtab_size = 0;
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
@@ -2293,9 +2293,6 @@ static void layout_symtab(struct module *mod, struct load_info *info)
src = (void *)info->hdr + symsect->sh_offset;
nsrc = symsect->sh_size / sizeof(*src);
- /* strtab always starts with a nul, so offset 0 is the empty string. */
- strtab_size = 1;
-
/* Compute total space required for the core symbols' strtab. */
for (ndst = i = 0; i < nsrc; i++) {
if (i == 0 ||
@@ -2337,7 +2334,6 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
mod->core_symtab = dst = mod->module_core + info->symoffs;
mod->core_strtab = s = mod->module_core + info->stroffs;
src = mod->symtab;
- *s++ = 0;
for (ndst = i = 0; i < mod->num_symtab; i++) {
if (i == 0 ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
@@ -2378,7 +2374,7 @@ static void dynamic_debug_remove(struct _ddebug *debug)
void * __weak module_alloc(unsigned long size)
{
- return size == 0 ? NULL : vmalloc_exec(size);
+ return vmalloc_exec(size);
}
static void *module_alloc_update_bounds(unsigned long size)
@@ -2425,18 +2421,17 @@ static inline void kmemleak_load_module(const struct module *mod,
#endif
#ifdef CONFIG_MODULE_SIG
-static int module_sig_check(struct load_info *info,
- const void *mod, unsigned long *_len)
+static int module_sig_check(struct load_info *info)
{
int err = -ENOKEY;
- unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
- unsigned long len = *_len;
+ const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
+ const void *mod = info->hdr;
- if (len > markerlen &&
- memcmp(mod + len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
+ if (info->len > markerlen &&
+ memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
/* We truncate the module to discard the signature */
- *_len -= markerlen;
- err = mod_verify_sig(mod, _len);
+ info->len -= markerlen;
+ err = mod_verify_sig(mod, &info->len);
}
if (!err) {
@@ -2454,59 +2449,107 @@ static int module_sig_check(struct load_info *info,
return err;
}
#else /* !CONFIG_MODULE_SIG */
-static int module_sig_check(struct load_info *info,
- void *mod, unsigned long *len)
+static int module_sig_check(struct load_info *info)
{
return 0;
}
#endif /* !CONFIG_MODULE_SIG */
-/* Sets info->hdr, info->len and info->sig_ok. */
-static int copy_and_check(struct load_info *info,
- const void __user *umod, unsigned long len,
- const char __user *uargs)
+/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
+static int elf_header_check(struct load_info *info)
+{
+ if (info->len < sizeof(*(info->hdr)))
+ return -ENOEXEC;
+
+ if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
+ || info->hdr->e_type != ET_REL
+ || !elf_check_arch(info->hdr)
+ || info->hdr->e_shentsize != sizeof(Elf_Shdr))
+ return -ENOEXEC;
+
+ if (info->hdr->e_shoff >= info->len
+ || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
+ info->len - info->hdr->e_shoff))
+ return -ENOEXEC;
+
+ return 0;
+}
+
+/* Sets info->hdr and info->len. */
+static int copy_module_from_user(const void __user *umod, unsigned long len,
+ struct load_info *info)
{
int err;
- Elf_Ehdr *hdr;
- if (len < sizeof(*hdr))
+ info->len = len;
+ if (info->len < sizeof(*(info->hdr)))
return -ENOEXEC;
+ err = security_kernel_module_from_file(NULL);
+ if (err)
+ return err;
+
/* Suck in entire file: we'll want most of it. */
- if ((hdr = vmalloc(len)) == NULL)
+ info->hdr = vmalloc(info->len);
+ if (!info->hdr)
return -ENOMEM;
- if (copy_from_user(hdr, umod, len) != 0) {
- err = -EFAULT;
- goto free_hdr;
+ if (copy_from_user(info->hdr, umod, info->len) != 0) {
+ vfree(info->hdr);
+ return -EFAULT;
}
- err = module_sig_check(info, hdr, &len);
+ return 0;
+}
+
+/* Sets info->hdr and info->len. */
+static int copy_module_from_fd(int fd, struct load_info *info)
+{
+ struct file *file;
+ int err;
+ struct kstat stat;
+ loff_t pos;
+ ssize_t bytes = 0;
+
+ file = fget(fd);
+ if (!file)
+ return -ENOEXEC;
+
+ err = security_kernel_module_from_file(file);
if (err)
- goto free_hdr;
+ goto out;
- /* Sanity checks against insmoding binaries or wrong arch,
- weird elf version */
- if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
- || hdr->e_type != ET_REL
- || !elf_check_arch(hdr)
- || hdr->e_shentsize != sizeof(Elf_Shdr)) {
- err = -ENOEXEC;
- goto free_hdr;
- }
+ err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
+ if (err)
+ goto out;
- if (hdr->e_shoff >= len ||
- hdr->e_shnum * sizeof(Elf_Shdr) > len - hdr->e_shoff) {
- err = -ENOEXEC;
- goto free_hdr;
+ if (stat.size > INT_MAX) {
+ err = -EFBIG;
+ goto out;
+ }
+ info->hdr = vmalloc(stat.size);
+ if (!info->hdr) {
+ err = -ENOMEM;
+ goto out;
}
- info->hdr = hdr;
- info->len = len;
- return 0;
+ pos = 0;
+ while (pos < stat.size) {
+ bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
+ stat.size - pos);
+ if (bytes < 0) {
+ vfree(info->hdr);
+ err = bytes;
+ goto out;
+ }
+ if (bytes == 0)
+ break;
+ pos += bytes;
+ }
+ info->len = pos;
-free_hdr:
- vfree(hdr);
+out:
+ fput(file);
return err;
}
@@ -2515,7 +2558,7 @@ static void free_copy(struct load_info *info)
vfree(info->hdr);
}
-static int rewrite_section_headers(struct load_info *info)
+static int rewrite_section_headers(struct load_info *info, int flags)
{
unsigned int i;
@@ -2543,7 +2586,10 @@ static int rewrite_section_headers(struct load_info *info)
}
/* Track but don't keep modinfo and version sections. */
- info->index.vers = find_sec(info, "__versions");
+ if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
+ info->index.vers = 0; /* Pretend no __versions section! */
+ else
+ info->index.vers = find_sec(info, "__versions");
info->index.info = find_sec(info, ".modinfo");
info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
@@ -2558,7 +2604,7 @@ static int rewrite_section_headers(struct load_info *info)
* Return the temporary module pointer (we'll replace it with the final
* one when we move the module sections around).
*/
-static struct module *setup_load_info(struct load_info *info)
+static struct module *setup_load_info(struct load_info *info, int flags)
{
unsigned int i;
int err;
@@ -2569,7 +2615,7 @@ static struct module *setup_load_info(struct load_info *info)
info->secstrings = (void *)info->hdr
+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
- err = rewrite_section_headers(info);
+ err = rewrite_section_headers(info, flags);
if (err)
return ERR_PTR(err);
@@ -2607,11 +2653,14 @@ static struct module *setup_load_info(struct load_info *info)
return mod;
}
-static int check_modinfo(struct module *mod, struct load_info *info)
+static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
const char *modmagic = get_modinfo(info, "vermagic");
int err;
+ if (flags & MODULE_INIT_IGNORE_VERMAGIC)
+ modmagic = NULL;
+
/* This is allowed: modprobe --force will invalidate it. */
if (!modmagic) {
err = try_to_force_load(mod, "bad vermagic");
@@ -2741,20 +2790,23 @@ static int move_module(struct module *mod, struct load_info *info)
memset(ptr, 0, mod->core_size);
mod->module_core = ptr;
- ptr = module_alloc_update_bounds(mod->init_size);
- /*
- * The pointer to this block is stored in the module structure
- * which is inside the block. This block doesn't need to be
- * scanned as it contains data and code that will be freed
- * after the module is initialized.
- */
- kmemleak_ignore(ptr);
- if (!ptr && mod->init_size) {
- module_free(mod, mod->module_core);
- return -ENOMEM;
- }
- memset(ptr, 0, mod->init_size);
- mod->module_init = ptr;
+ if (mod->init_size) {
+ ptr = module_alloc_update_bounds(mod->init_size);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. This block doesn't need to be
+ * scanned as it contains data and code that will be freed
+ * after the module is initialized.
+ */
+ kmemleak_ignore(ptr);
+ if (!ptr) {
+ module_free(mod, mod->module_core);
+ return -ENOMEM;
+ }
+ memset(ptr, 0, mod->init_size);
+ mod->module_init = ptr;
+ } else
+ mod->module_init = NULL;
/* Transfer each section which specifies SHF_ALLOC */
pr_debug("final section addresses:\n");
@@ -2847,18 +2899,18 @@ int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
return 0;
}
-static struct module *layout_and_allocate(struct load_info *info)
+static struct module *layout_and_allocate(struct load_info *info, int flags)
{
/* Module within temporary copy. */
struct module *mod;
Elf_Shdr *pcpusec;
int err;
- mod = setup_load_info(info);
+ mod = setup_load_info(info, flags);
if (IS_ERR(mod))
return mod;
- err = check_modinfo(mod, info);
+ err = check_modinfo(mod, info, flags);
if (err)
return ERR_PTR(err);
@@ -2945,33 +2997,124 @@ static bool finished_loading(const char *name)
return ret;
}
+/* Call module constructors. */
+static void do_mod_ctors(struct module *mod)
+{
+#ifdef CONFIG_CONSTRUCTORS
+ unsigned long i;
+
+ for (i = 0; i < mod->num_ctors; i++)
+ mod->ctors[i]();
+#endif
+}
+
+/* This is where the real work happens */
+static int do_init_module(struct module *mod)
+{
+ int ret = 0;
+
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_COMING, mod);
+
+ /* Set RO and NX regions for core */
+ set_section_ro_nx(mod->module_core,
+ mod->core_text_size,
+ mod->core_ro_size,
+ mod->core_size);
+
+ /* Set RO and NX regions for init */
+ set_section_ro_nx(mod->module_init,
+ mod->init_text_size,
+ mod->init_ro_size,
+ mod->init_size);
+
+ do_mod_ctors(mod);
+ /* Start the module */
+ if (mod->init != NULL)
+ ret = do_one_initcall(mod->init);
+ if (ret < 0) {
+ /* Init routine failed: abort. Try to protect us from
+ buggy refcounters. */
+ mod->state = MODULE_STATE_GOING;
+ synchronize_sched();
+ module_put(mod);
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_GOING, mod);
+ free_module(mod);
+ wake_up_all(&module_wq);
+ return ret;
+ }
+ if (ret > 0) {
+ printk(KERN_WARNING
+"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
+"%s: loading module anyway...\n",
+ __func__, mod->name, ret,
+ __func__);
+ dump_stack();
+ }
+
+ /* Now it's a first class citizen! */
+ mod->state = MODULE_STATE_LIVE;
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_LIVE, mod);
+
+ /* We need to finish all async code before the module init sequence is done */
+ async_synchronize_full();
+
+ mutex_lock(&module_mutex);
+ /* Drop initial reference. */
+ module_put(mod);
+ trim_init_extable(mod);
+#ifdef CONFIG_KALLSYMS
+ mod->num_symtab = mod->core_num_syms;
+ mod->symtab = mod->core_symtab;
+ mod->strtab = mod->core_strtab;
+#endif
+ unset_module_init_ro_nx(mod);
+ module_free(mod, mod->module_init);
+ mod->module_init = NULL;
+ mod->init_size = 0;
+ mod->init_ro_size = 0;
+ mod->init_text_size = 0;
+ mutex_unlock(&module_mutex);
+ wake_up_all(&module_wq);
+
+ return 0;
+}
+
+static int may_init_module(void)
+{
+ if (!capable(CAP_SYS_MODULE) || modules_disabled)
+ return -EPERM;
+
+ return 0;
+}
+
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
-static struct module *load_module(void __user *umod,
- unsigned long len,
- const char __user *uargs)
+static int load_module(struct load_info *info, const char __user *uargs,
+ int flags)
{
- struct load_info info = { NULL, };
struct module *mod, *old;
long err;
- pr_debug("load_module: umod=%p, len=%lu, uargs=%p\n",
- umod, len, uargs);
+ err = module_sig_check(info);
+ if (err)
+ goto free_copy;
- /* Copy in the blobs from userspace, check they are vaguely sane. */
- err = copy_and_check(&info, umod, len, uargs);
+ err = elf_header_check(info);
if (err)
- return ERR_PTR(err);
+ goto free_copy;
/* Figure out module layout, and allocate all the memory. */
- mod = layout_and_allocate(&info);
+ mod = layout_and_allocate(info, flags);
if (IS_ERR(mod)) {
err = PTR_ERR(mod);
goto free_copy;
}
#ifdef CONFIG_MODULE_SIG
- mod->sig_ok = info.sig_ok;
+ mod->sig_ok = info->sig_ok;
if (!mod->sig_ok)
add_taint_module(mod, TAINT_FORCED_MODULE);
#endif
@@ -2983,25 +3126,25 @@ static struct module *load_module(void __user *umod,
/* Now we've got everything in the final locations, we can
* find optional sections. */
- find_module_sections(mod, &info);
+ find_module_sections(mod, info);
err = check_module_license_and_versions(mod);
if (err)
goto free_unload;
/* Set up MODINFO_ATTR fields */
- setup_modinfo(mod, &info);
+ setup_modinfo(mod, info);
/* Fix up syms, so that st_value is a pointer to location. */
- err = simplify_symbols(mod, &info);
+ err = simplify_symbols(mod, info);
if (err < 0)
goto free_modinfo;
- err = apply_relocations(mod, &info);
+ err = apply_relocations(mod, info);
if (err < 0)
goto free_modinfo;
- err = post_relocation(mod, &info);
+ err = post_relocation(mod, info);
if (err < 0)
goto free_modinfo;
@@ -3041,14 +3184,14 @@ again:
}
/* This has to be done once we're sure module name is unique. */
- dynamic_debug_setup(info.debug, info.num_debug);
+ dynamic_debug_setup(info->debug, info->num_debug);
/* Find duplicate symbols */
err = verify_export_symbols(mod);
if (err < 0)
goto ddebug;
- module_bug_finalize(info.hdr, info.sechdrs, mod);
+ module_bug_finalize(info->hdr, info->sechdrs, mod);
list_add_rcu(&mod->list, &modules);
mutex_unlock(&module_mutex);
@@ -3059,16 +3202,17 @@ again:
goto unlink;
/* Link in to syfs. */
- err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp);
+ err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
if (err < 0)
goto unlink;
/* Get rid of temporary copy. */
- free_copy(&info);
+ free_copy(info);
/* Done! */
trace_module_load(mod);
- return mod;
+
+ return do_init_module(mod);
unlink:
mutex_lock(&module_mutex);
@@ -3077,7 +3221,7 @@ again:
module_bug_cleanup(mod);
wake_up_all(&module_wq);
ddebug:
- dynamic_debug_remove(info.debug);
+ dynamic_debug_remove(info->debug);
unlock:
mutex_unlock(&module_mutex);
synchronize_sched();
@@ -3089,106 +3233,52 @@ again:
free_unload:
module_unload_free(mod);
free_module:
- module_deallocate(mod, &info);
+ module_deallocate(mod, info);
free_copy:
- free_copy(&info);
- return ERR_PTR(err);
-}
-
-/* Call module constructors. */
-static void do_mod_ctors(struct module *mod)
-{
-#ifdef CONFIG_CONSTRUCTORS
- unsigned long i;
-
- for (i = 0; i < mod->num_ctors; i++)
- mod->ctors[i]();
-#endif
+ free_copy(info);
+ return err;
}
-/* This is where the real work happens */
SYSCALL_DEFINE3(init_module, void __user *, umod,
unsigned long, len, const char __user *, uargs)
{
- struct module *mod;
- int ret = 0;
+ int err;
+ struct load_info info = { };
- /* Must have permission */
- if (!capable(CAP_SYS_MODULE) || modules_disabled)
- return -EPERM;
+ err = may_init_module();
+ if (err)
+ return err;
- /* Do all the hard work */
- mod = load_module(umod, len, uargs);
- if (IS_ERR(mod))
- return PTR_ERR(mod);
+ pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
+ umod, len, uargs);
- blocking_notifier_call_chain(&module_notify_list,
- MODULE_STATE_COMING, mod);
+ err = copy_module_from_user(umod, len, &info);
+ if (err)
+ return err;
- /* Set RO and NX regions for core */
- set_section_ro_nx(mod->module_core,
- mod->core_text_size,
- mod->core_ro_size,
- mod->core_size);
+ return load_module(&info, uargs, 0);
+}
- /* Set RO and NX regions for init */
- set_section_ro_nx(mod->module_init,
- mod->init_text_size,
- mod->init_ro_size,
- mod->init_size);
+SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
+{
+ int err;
+ struct load_info info = { };
- do_mod_ctors(mod);
- /* Start the module */
- if (mod->init != NULL)
- ret = do_one_initcall(mod->init);
- if (ret < 0) {
- /* Init routine failed: abort. Try to protect us from
- buggy refcounters. */
- mod->state = MODULE_STATE_GOING;
- synchronize_sched();
- module_put(mod);
- blocking_notifier_call_chain(&module_notify_list,
- MODULE_STATE_GOING, mod);
- free_module(mod);
- wake_up_all(&module_wq);
- return ret;
- }
- if (ret > 0) {
- printk(KERN_WARNING
-"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
-"%s: loading module anyway...\n",
- __func__, mod->name, ret,
- __func__);
- dump_stack();
- }
+ err = may_init_module();
+ if (err)
+ return err;
- /* Now it's a first class citizen! */
- mod->state = MODULE_STATE_LIVE;
- blocking_notifier_call_chain(&module_notify_list,
- MODULE_STATE_LIVE, mod);
+ pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
- /* We need to finish all async code before the module init sequence is done */
- async_synchronize_full();
+ if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
+ |MODULE_INIT_IGNORE_VERMAGIC))
+ return -EINVAL;
- mutex_lock(&module_mutex);
- /* Drop initial reference. */
- module_put(mod);
- trim_init_extable(mod);
-#ifdef CONFIG_KALLSYMS
- mod->num_symtab = mod->core_num_syms;
- mod->symtab = mod->core_symtab;
- mod->strtab = mod->core_strtab;
-#endif
- unset_module_init_ro_nx(mod);
- module_free(mod, mod->module_init);
- mod->module_init = NULL;
- mod->init_size = 0;
- mod->init_ro_size = 0;
- mod->init_text_size = 0;
- mutex_unlock(&module_mutex);
- wake_up_all(&module_wq);
+ err = copy_module_from_fd(fd, &info);
+ if (err)
+ return err;
- return 0;
+ return load_module(&info, uargs, flags);
}
static inline int within(unsigned long addr, void *start, unsigned long size)
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 7e1c3de1ce4..78e2ecb2016 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -57,7 +57,8 @@ static inline struct nsproxy *create_nsproxy(void)
* leave it to the caller to do proper locking and attach it to task.
*/
static struct nsproxy *create_new_namespaces(unsigned long flags,
- struct task_struct *tsk, struct fs_struct *new_fs)
+ struct task_struct *tsk, struct user_namespace *user_ns,
+ struct fs_struct *new_fs)
{
struct nsproxy *new_nsp;
int err;
@@ -66,31 +67,31 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
if (!new_nsp)
return ERR_PTR(-ENOMEM);
- new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs);
+ new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs);
if (IS_ERR(new_nsp->mnt_ns)) {
err = PTR_ERR(new_nsp->mnt_ns);
goto out_ns;
}
- new_nsp->uts_ns = copy_utsname(flags, tsk);
+ new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns);
if (IS_ERR(new_nsp->uts_ns)) {
err = PTR_ERR(new_nsp->uts_ns);
goto out_uts;
}
- new_nsp->ipc_ns = copy_ipcs(flags, tsk);
+ new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns);
if (IS_ERR(new_nsp->ipc_ns)) {
err = PTR_ERR(new_nsp->ipc_ns);
goto out_ipc;
}
- new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk));
+ new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns);
if (IS_ERR(new_nsp->pid_ns)) {
err = PTR_ERR(new_nsp->pid_ns);
goto out_pid;
}
- new_nsp->net_ns = copy_net_ns(flags, task_cred_xxx(tsk, user_ns), tsk->nsproxy->net_ns);
+ new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns);
if (IS_ERR(new_nsp->net_ns)) {
err = PTR_ERR(new_nsp->net_ns);
goto out_net;
@@ -122,6 +123,7 @@ out_ns:
int copy_namespaces(unsigned long flags, struct task_struct *tsk)
{
struct nsproxy *old_ns = tsk->nsproxy;
+ struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns);
struct nsproxy *new_ns;
int err = 0;
@@ -134,7 +136,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
CLONE_NEWPID | CLONE_NEWNET)))
return 0;
- if (!capable(CAP_SYS_ADMIN)) {
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN)) {
err = -EPERM;
goto out;
}
@@ -151,7 +153,8 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
goto out;
}
- new_ns = create_new_namespaces(flags, tsk, tsk->fs);
+ new_ns = create_new_namespaces(flags, tsk,
+ task_cred_xxx(tsk, user_ns), tsk->fs);
if (IS_ERR(new_ns)) {
err = PTR_ERR(new_ns);
goto out;
@@ -183,19 +186,21 @@ void free_nsproxy(struct nsproxy *ns)
* On success, returns the new nsproxy.
*/
int unshare_nsproxy_namespaces(unsigned long unshare_flags,
- struct nsproxy **new_nsp, struct fs_struct *new_fs)
+ struct nsproxy **new_nsp, struct cred *new_cred, struct fs_struct *new_fs)
{
+ struct user_namespace *user_ns;
int err = 0;
if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
- CLONE_NEWNET)))
+ CLONE_NEWNET | CLONE_NEWPID)))
return 0;
- if (!capable(CAP_SYS_ADMIN))
+ user_ns = new_cred ? new_cred->user_ns : current_user_ns();
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
- *new_nsp = create_new_namespaces(unshare_flags, current,
- new_fs ? new_fs : current->fs);
+ *new_nsp = create_new_namespaces(unshare_flags, current, user_ns,
+ new_fs ? new_fs : current->fs);
if (IS_ERR(*new_nsp)) {
err = PTR_ERR(*new_nsp);
goto out;
@@ -241,9 +246,6 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
struct file *file;
int err;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
file = proc_ns_fget(fd);
if (IS_ERR(file))
return PTR_ERR(file);
@@ -254,7 +256,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
if (nstype && (ops->type != nstype))
goto out;
- new_nsproxy = create_new_namespaces(0, tsk, tsk->fs);
+ new_nsproxy = create_new_namespaces(0, tsk, current_user_ns(), tsk->fs);
if (IS_ERR(new_nsproxy)) {
err = PTR_ERR(new_nsproxy);
goto out;
diff --git a/kernel/padata.c b/kernel/padata.c
index 89fe3d1b9ef..072f4ee4eb8 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -171,7 +171,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
int cpu, num_cpus;
unsigned int next_nr, next_index;
- struct padata_parallel_queue *queue, *next_queue;
+ struct padata_parallel_queue *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
@@ -204,8 +204,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
goto out;
}
- queue = per_cpu_ptr(pd->pqueue, smp_processor_id());
- if (queue->cpu_index == next_queue->cpu_index) {
+ if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
padata = ERR_PTR(-ENODATA);
goto out;
}
diff --git a/kernel/pid.c b/kernel/pid.c
index aebd4f5aaf4..de9af600006 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -1,8 +1,8 @@
/*
* Generic pidhash and scalable, time-bounded PID allocator
*
- * (C) 2002-2003 William Irwin, IBM
- * (C) 2004 William Irwin, Oracle
+ * (C) 2002-2003 Nadia Yvette Chambers, IBM
+ * (C) 2004 Nadia Yvette Chambers, Oracle
* (C) 2002-2004 Ingo Molnar, Red Hat
*
* pid-structures are backing objects for tasks sharing a given ID to chain
@@ -36,6 +36,7 @@
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
#include <linux/syscalls.h>
+#include <linux/proc_fs.h>
#define pid_hashfn(nr, ns) \
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -78,24 +79,11 @@ struct pid_namespace init_pid_ns = {
.last_pid = 0,
.level = 0,
.child_reaper = &init_task,
+ .user_ns = &init_user_ns,
+ .proc_inum = PROC_PID_INIT_INO,
};
EXPORT_SYMBOL_GPL(init_pid_ns);
-int is_container_init(struct task_struct *tsk)
-{
- int ret = 0;
- struct pid *pid;
-
- rcu_read_lock();
- pid = task_pid(tsk);
- if (pid != NULL && pid->numbers[pid->level].nr == 1)
- ret = 1;
- rcu_read_unlock();
-
- return ret;
-}
-EXPORT_SYMBOL(is_container_init);
-
/*
* Note: disable interrupts while the pidmap_lock is held as an
* interrupt might come in and do read_lock(&tasklist_lock).
@@ -269,8 +257,23 @@ void free_pid(struct pid *pid)
unsigned long flags;
spin_lock_irqsave(&pidmap_lock, flags);
- for (i = 0; i <= pid->level; i++)
- hlist_del_rcu(&pid->numbers[i].pid_chain);
+ for (i = 0; i <= pid->level; i++) {
+ struct upid *upid = pid->numbers + i;
+ struct pid_namespace *ns = upid->ns;
+ hlist_del_rcu(&upid->pid_chain);
+ switch(--ns->nr_hashed) {
+ case 1:
+ /* When all that is left in the pid namespace
+ * is the reaper wake up the reaper. The reaper
+ * may be sleeping in zap_pid_ns_processes().
+ */
+ wake_up_process(ns->child_reaper);
+ break;
+ case 0:
+ schedule_work(&ns->proc_work);
+ break;
+ }
+ }
spin_unlock_irqrestore(&pidmap_lock, flags);
for (i = 0; i <= pid->level; i++)
@@ -292,6 +295,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
goto out;
tmp = ns;
+ pid->level = ns->level;
for (i = ns->level; i >= 0; i--) {
nr = alloc_pidmap(tmp);
if (nr < 0)
@@ -302,22 +306,32 @@ struct pid *alloc_pid(struct pid_namespace *ns)
tmp = tmp->parent;
}
+ if (unlikely(is_child_reaper(pid))) {
+ if (pid_ns_prepare_proc(ns))
+ goto out_free;
+ }
+
get_pid_ns(ns);
- pid->level = ns->level;
atomic_set(&pid->count, 1);
for (type = 0; type < PIDTYPE_MAX; ++type)
INIT_HLIST_HEAD(&pid->tasks[type]);
upid = pid->numbers + ns->level;
spin_lock_irq(&pidmap_lock);
- for ( ; upid >= pid->numbers; --upid)
+ if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
+ goto out_unlock;
+ for ( ; upid >= pid->numbers; --upid) {
hlist_add_head_rcu(&upid->pid_chain,
&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
+ upid->ns->nr_hashed++;
+ }
spin_unlock_irq(&pidmap_lock);
out:
return pid;
+out_unlock:
+ spin_unlock(&pidmap_lock);
out_free:
while (++i <= ns->level)
free_pidmap(pid->numbers + i);
@@ -327,6 +341,13 @@ out_free:
goto out;
}
+void disable_pid_allocation(struct pid_namespace *ns)
+{
+ spin_lock_irq(&pidmap_lock);
+ ns->nr_hashed &= ~PIDNS_HASH_ADDING;
+ spin_unlock_irq(&pidmap_lock);
+}
+
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
struct hlist_node *elem;
@@ -344,7 +365,7 @@ EXPORT_SYMBOL_GPL(find_pid_ns);
struct pid *find_vpid(int nr)
{
- return find_pid_ns(nr, current->nsproxy->pid_ns);
+ return find_pid_ns(nr, task_active_pid_ns(current));
}
EXPORT_SYMBOL_GPL(find_vpid);
@@ -428,7 +449,7 @@ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
struct task_struct *find_task_by_vpid(pid_t vnr)
{
- return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
+ return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
}
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
@@ -483,7 +504,7 @@ EXPORT_SYMBOL_GPL(pid_nr_ns);
pid_t pid_vnr(struct pid *pid)
{
- return pid_nr_ns(pid, current->nsproxy->pid_ns);
+ return pid_nr_ns(pid, task_active_pid_ns(current));
}
EXPORT_SYMBOL_GPL(pid_vnr);
@@ -494,7 +515,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
rcu_read_lock();
if (!ns)
- ns = current->nsproxy->pid_ns;
+ ns = task_active_pid_ns(current);
if (likely(pid_alive(task))) {
if (type != PIDTYPE_PID)
task = task->group_leader;
@@ -558,6 +579,9 @@ void __init pidhash_init(void)
void __init pidmap_init(void)
{
+ /* Veryify no one has done anything silly */
+ BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
+
/* bump default and minimum pid_max based on number of cpus */
pid_max = min(pid_max_max, max_t(int, pid_max,
PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
@@ -569,6 +593,7 @@ void __init pidmap_init(void)
/* Reserve PID 0. We never call free_pidmap(0) */
set_bit(0, init_pid_ns.pidmap[0].page);
atomic_dec(&init_pid_ns.pidmap[0].nr_free);
+ init_pid_ns.nr_hashed = PIDNS_HASH_ADDING;
init_pid_ns.pid_cachep = KMEM_CACHE(pid,
SLAB_HWCACHE_ALIGN | SLAB_PANIC);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 7b07cc0dfb7..c1c3dc1c602 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -10,6 +10,7 @@
#include <linux/pid.h>
#include <linux/pid_namespace.h>
+#include <linux/user_namespace.h>
#include <linux/syscalls.h>
#include <linux/err.h>
#include <linux/acct.h>
@@ -71,10 +72,17 @@ err_alloc:
return NULL;
}
+static void proc_cleanup_work(struct work_struct *work)
+{
+ struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work);
+ pid_ns_release_proc(ns);
+}
+
/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
#define MAX_PID_NS_LEVEL 32
-static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
+static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
+ struct pid_namespace *parent_pid_ns)
{
struct pid_namespace *ns;
unsigned int level = parent_pid_ns->level + 1;
@@ -99,9 +107,16 @@ static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_p
if (ns->pid_cachep == NULL)
goto out_free_map;
+ err = proc_alloc_inum(&ns->proc_inum);
+ if (err)
+ goto out_free_map;
+
kref_init(&ns->kref);
ns->level = level;
ns->parent = get_pid_ns(parent_pid_ns);
+ ns->user_ns = get_user_ns(user_ns);
+ ns->nr_hashed = PIDNS_HASH_ADDING;
+ INIT_WORK(&ns->proc_work, proc_cleanup_work);
set_bit(0, ns->pidmap[0].page);
atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
@@ -109,14 +124,8 @@ static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_p
for (i = 1; i < PIDMAP_ENTRIES; i++)
atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
- err = pid_ns_prepare_proc(ns);
- if (err)
- goto out_put_parent_pid_ns;
-
return ns;
-out_put_parent_pid_ns:
- put_pid_ns(parent_pid_ns);
out_free_map:
kfree(ns->pidmap[0].page);
out_free:
@@ -129,18 +138,21 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
{
int i;
+ proc_free_inum(ns->proc_inum);
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
+ put_user_ns(ns->user_ns);
kmem_cache_free(pid_ns_cachep, ns);
}
-struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
+struct pid_namespace *copy_pid_ns(unsigned long flags,
+ struct user_namespace *user_ns, struct pid_namespace *old_ns)
{
if (!(flags & CLONE_NEWPID))
return get_pid_ns(old_ns);
- if (flags & (CLONE_THREAD|CLONE_PARENT))
+ if (task_active_pid_ns(current) != old_ns)
return ERR_PTR(-EINVAL);
- return create_pid_namespace(old_ns);
+ return create_pid_namespace(user_ns, old_ns);
}
static void free_pid_ns(struct kref *kref)
@@ -170,6 +182,9 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
int rc;
struct task_struct *task, *me = current;
+ /* Don't allow any more processes into the pid namespace */
+ disable_pid_allocation(pid_ns);
+
/* Ignore SIGCHLD causing any terminated children to autoreap */
spin_lock_irq(&me->sighand->siglock);
me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
@@ -211,22 +226,15 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
/*
* sys_wait4() above can't reap the TASK_DEAD children.
- * Make sure they all go away, see __unhash_process().
+ * Make sure they all go away, see free_pid().
*/
for (;;) {
- bool need_wait = false;
-
- read_lock(&tasklist_lock);
- if (!list_empty(&current->children)) {
- __set_current_state(TASK_UNINTERRUPTIBLE);
- need_wait = true;
- }
- read_unlock(&tasklist_lock);
-
- if (!need_wait)
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (pid_ns->nr_hashed == 1)
break;
schedule();
}
+ __set_current_state(TASK_RUNNING);
if (pid_ns->reboot)
current->signal->group_exit_code = pid_ns->reboot;
@@ -239,9 +247,10 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
static int pid_ns_ctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ struct pid_namespace *pid_ns = task_active_pid_ns(current);
struct ctl_table tmp = *table;
- if (write && !capable(CAP_SYS_ADMIN))
+ if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
/*
@@ -250,7 +259,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
* it should synchronize its usage with external means.
*/
- tmp.data = &current->nsproxy->pid_ns->last_pid;
+ tmp.data = &pid_ns->last_pid;
return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
}
@@ -299,6 +308,68 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
return 0;
}
+static void *pidns_get(struct task_struct *task)
+{
+ struct pid_namespace *ns;
+
+ rcu_read_lock();
+ ns = get_pid_ns(task_active_pid_ns(task));
+ rcu_read_unlock();
+
+ return ns;
+}
+
+static void pidns_put(void *ns)
+{
+ put_pid_ns(ns);
+}
+
+static int pidns_install(struct nsproxy *nsproxy, void *ns)
+{
+ struct pid_namespace *active = task_active_pid_ns(current);
+ struct pid_namespace *ancestor, *new = ns;
+
+ if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
+ !nsown_capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * Only allow entering the current active pid namespace
+ * or a child of the current active pid namespace.
+ *
+ * This is required for fork to return a usable pid value and
+ * this maintains the property that processes and their
+ * children can not escape their current pid namespace.
+ */
+ if (new->level < active->level)
+ return -EINVAL;
+
+ ancestor = new;
+ while (ancestor->level > active->level)
+ ancestor = ancestor->parent;
+ if (ancestor != active)
+ return -EINVAL;
+
+ put_pid_ns(nsproxy->pid_ns);
+ nsproxy->pid_ns = get_pid_ns(new);
+ return 0;
+}
+
+static unsigned int pidns_inum(void *ns)
+{
+ struct pid_namespace *pid_ns = ns;
+ return pid_ns->proc_inum;
+}
+
+const struct proc_ns_operations pidns_operations = {
+ .name = "pid",
+ .type = CLONE_NEWPID,
+ .get = pidns_get,
+ .put = pidns_put,
+ .install = pidns_install,
+ .inum = pidns_inum,
+};
+
static __init int pid_namespaces_init(void)
{
pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index d73840271dc..a278cad1d5d 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -9,6 +9,7 @@
#include <asm/uaccess.h>
#include <linux/kernel_stat.h>
#include <trace/events/timer.h>
+#include <linux/random.h>
/*
* Called after updating RLIMIT_CPU to run cpu timer and update
@@ -470,6 +471,8 @@ static void cleanup_timers(struct list_head *head,
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
+ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
+ sizeof(unsigned long long));
cleanup_timers(tsk->cpu_timers,
tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
diff --git a/kernel/printk.c b/kernel/printk.c
index 22e070f3470..19c0d7bcf24 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -747,6 +747,21 @@ void __init setup_log_buf(int early)
free, (free * 100) / __LOG_BUF_LEN);
}
+static bool __read_mostly ignore_loglevel;
+
+static int __init ignore_loglevel_setup(char *str)
+{
+ ignore_loglevel = 1;
+ printk(KERN_INFO "debug: ignoring loglevel setting.\n");
+
+ return 0;
+}
+
+early_param("ignore_loglevel", ignore_loglevel_setup);
+module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
+ "print all kernel messages to the console.");
+
#ifdef CONFIG_BOOT_PRINTK_DELAY
static int boot_delay; /* msecs delay after each printk during bootup */
@@ -770,13 +785,15 @@ static int __init boot_delay_setup(char *str)
}
__setup("boot_delay=", boot_delay_setup);
-static void boot_delay_msec(void)
+static void boot_delay_msec(int level)
{
unsigned long long k;
unsigned long timeout;
- if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
+ if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
+ || (level >= console_loglevel && !ignore_loglevel)) {
return;
+ }
k = (unsigned long long)loops_per_msec * boot_delay;
@@ -795,7 +812,7 @@ static void boot_delay_msec(void)
}
}
#else
-static inline void boot_delay_msec(void)
+static inline void boot_delay_msec(int level)
{
}
#endif
@@ -1238,21 +1255,6 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
-static bool __read_mostly ignore_loglevel;
-
-static int __init ignore_loglevel_setup(char *str)
-{
- ignore_loglevel = 1;
- printk(KERN_INFO "debug: ignoring loglevel setting.\n");
-
- return 0;
-}
-
-early_param("ignore_loglevel", ignore_loglevel_setup);
-module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
- "print all kernel messages to the console.");
-
/*
* Call the console drivers, asking them to write out
* log_buf[start] to log_buf[end - 1].
@@ -1498,7 +1500,7 @@ asmlinkage int vprintk_emit(int facility, int level,
int this_cpu;
int printed_len = 0;
- boot_delay_msec();
+ boot_delay_msec(level);
printk_delay();
/* This stops the holder of console_sem just where we want him */
diff --git a/kernel/profile.c b/kernel/profile.c
index 76b8e77773e..1f391819c42 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -8,9 +8,10 @@
* Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
* Red Hat, July 2004
* Consolidation of architecture support code for profiling,
- * William Irwin, Oracle, July 2004
+ * Nadia Yvette Chambers, Oracle, July 2004
* Amortized hit count accounting via per-cpu open-addressed hashtables
- * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
+ * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
+ * Oracle, 2004
*/
#include <linux/export.h>
@@ -256,7 +257,7 @@ EXPORT_SYMBOL_GPL(unregister_timer_hook);
* pagetable hash functions, but uses a full hashtable full of finite
* collision chains, not just pairs of them.
*
- * -- wli
+ * -- nyc
*/
static void __profile_flip_buffers(void *unused)
{
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1f5e55dda95..1599157336a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -215,8 +215,12 @@ ok:
smp_rmb();
if (task->mm)
dumpable = get_dumpable(task->mm);
- if (!dumpable && !ptrace_has_cap(task_user_ns(task), mode))
+ rcu_read_lock();
+ if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
+ rcu_read_unlock();
return -EPERM;
+ }
+ rcu_read_unlock();
return security_ptrace_access_check(task, mode);
}
@@ -280,8 +284,10 @@ static int ptrace_attach(struct task_struct *task, long request,
if (seize)
flags |= PT_SEIZED;
- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
+ rcu_read_lock();
+ if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
flags |= PT_PTRACE_CAP;
+ rcu_read_unlock();
task->ptrace = flags;
__ptrace_link(task, current);
@@ -457,6 +463,9 @@ void exit_ptrace(struct task_struct *tracer)
return;
list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
+ if (unlikely(p->ptrace & PT_EXITKILL))
+ send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
+
if (__ptrace_detach(tracer, p))
list_add(&p->ptrace_entry, &ptrace_dead);
}
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index ad581aa2369..ff55247e704 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -86,33 +86,39 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
return __res_counter_charge(counter, val, limit_fail_at, true);
}
-void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
+u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
{
if (WARN_ON(counter->usage < val))
val = counter->usage;
counter->usage -= val;
+ return counter->usage;
}
-void res_counter_uncharge_until(struct res_counter *counter,
- struct res_counter *top,
- unsigned long val)
+u64 res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val)
{
unsigned long flags;
struct res_counter *c;
+ u64 ret = 0;
local_irq_save(flags);
for (c = counter; c != top; c = c->parent) {
+ u64 r;
spin_lock(&c->lock);
- res_counter_uncharge_locked(c, val);
+ r = res_counter_uncharge_locked(c, val);
+ if (c == counter)
+ ret = r;
spin_unlock(&c->lock);
}
local_irq_restore(flags);
+ return ret;
}
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
{
- res_counter_uncharge_until(counter, NULL, val);
+ return res_counter_uncharge_until(counter, NULL, val);
}
static inline unsigned long long *
@@ -192,25 +198,3 @@ int res_counter_memparse_write_strategy(const char *buf,
*res = PAGE_ALIGN(*res);
return 0;
}
-
-int res_counter_write(struct res_counter *counter, int member,
- const char *buf, write_strategy_fn write_strategy)
-{
- char *end;
- unsigned long flags;
- unsigned long long tmp, *val;
-
- if (write_strategy) {
- if (write_strategy(buf, &tmp))
- return -EINVAL;
- } else {
- tmp = simple_strtoull(buf, &end, 10);
- if (*end != '\0')
- return -EINVAL;
- }
- spin_lock_irqsave(&counter->lock, flags);
- val = res_counter_member(counter, member);
- *val = tmp;
- spin_unlock_irqrestore(&counter->lock, flags);
- return 0;
-}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6271b89f87a..257002c13bb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -193,23 +193,10 @@ static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */
-static ssize_t
-sched_feat_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int sched_feat_set(char *cmp)
{
- char buf[64];
- char *cmp;
- int neg = 0;
int i;
-
- if (cnt > 63)
- cnt = 63;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
- cmp = strstrip(buf);
+ int neg = 0;
if (strncmp(cmp, "NO_", 3) == 0) {
neg = 1;
@@ -229,6 +216,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
}
}
+ return i;
+}
+
+static ssize_t
+sched_feat_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ char *cmp;
+ int i;
+
+ if (cnt > 63)
+ cnt = 63;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+ cmp = strstrip(buf);
+
+ i = sched_feat_set(cmp);
if (i == __SCHED_FEAT_NR)
return -EINVAL;
@@ -923,6 +931,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq->skip_clock_update = 1;
}
+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
+
+void register_task_migration_notifier(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&task_migration_notifier, n);
+}
+
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
@@ -953,10 +968,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
+ struct task_migration_notifier tmn;
+
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+
+ tmn.task = p;
+ tmn.from_cpu = task_cpu(p);
+ tmn.to_cpu = new_cpu;
+
+ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}
__set_task_cpu(p, new_cpu);
@@ -1545,8 +1568,41 @@ static void __sched_fork(struct task_struct *p)
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+ if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
+ p->mm->numa_next_scan = jiffies;
+ p->mm->numa_next_reset = jiffies;
+ p->mm->numa_scan_seq = 0;
+ }
+
+ p->node_stamp = 0ULL;
+ p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
+ p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
+ p->numa_scan_period = sysctl_numa_balancing_scan_delay;
+ p->numa_work.next = &p->numa_work;
+#endif /* CONFIG_NUMA_BALANCING */
}
+#ifdef CONFIG_NUMA_BALANCING
+#ifdef CONFIG_SCHED_DEBUG
+void set_numabalancing_state(bool enabled)
+{
+ if (enabled)
+ sched_feat_set("NUMA");
+ else
+ sched_feat_set("NO_NUMA");
+}
+#else
+__read_mostly bool numabalancing_enabled;
+
+void set_numabalancing_state(bool enabled)
+{
+ numabalancing_enabled = enabled;
+}
+#endif /* CONFIG_SCHED_DEBUG */
+#endif /* CONFIG_NUMA_BALANCING */
+
/*
* fork()/clone()-time setup:
*/
@@ -4041,8 +4097,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
goto out_free_cpus_allowed;
}
retval = -EPERM;
- if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
- goto out_unlock;
+ if (!check_same_owner(p)) {
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ goto out_unlock;
+ }
+ rcu_read_unlock();
+ }
retval = security_task_setscheduler(p);
if (retval)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 59e072b2db9..5eea8707234 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -26,6 +26,9 @@
#include <linux/slab.h>
#include <linux/profile.h>
#include <linux/interrupt.h>
+#include <linux/mempolicy.h>
+#include <linux/migrate.h>
+#include <linux/task_work.h>
#include <trace/events/sched.h>
@@ -774,6 +777,230 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Scheduling class queueing methods:
*/
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * numa task sample period in ms
+ */
+unsigned int sysctl_numa_balancing_scan_period_min = 100;
+unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
+unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
+
+/* Portion of address space to scan in MB */
+unsigned int sysctl_numa_balancing_scan_size = 256;
+
+/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
+unsigned int sysctl_numa_balancing_scan_delay = 1000;
+
+static void task_numa_placement(struct task_struct *p)
+{
+ int seq;
+
+ if (!p->mm) /* for example, ksmd faulting in a user's mm */
+ return;
+ seq = ACCESS_ONCE(p->mm->numa_scan_seq);
+ if (p->numa_scan_seq == seq)
+ return;
+ p->numa_scan_seq = seq;
+
+ /* FIXME: Scheduling placement policy hints go here */
+}
+
+/*
+ * Got a PROT_NONE fault for a page on @node.
+ */
+void task_numa_fault(int node, int pages, bool migrated)
+{
+ struct task_struct *p = current;
+
+ if (!sched_feat_numa(NUMA))
+ return;
+
+ /* FIXME: Allocate task-specific structure for placement policy here */
+
+ /*
+ * If pages are properly placed (did not migrate) then scan slower.
+ * This is reset periodically in case of phase changes
+ */
+ if (!migrated)
+ p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
+ p->numa_scan_period + jiffies_to_msecs(10));
+
+ task_numa_placement(p);
+}
+
+static void reset_ptenuma_scan(struct task_struct *p)
+{
+ ACCESS_ONCE(p->mm->numa_scan_seq)++;
+ p->mm->numa_scan_offset = 0;
+}
+
+/*
+ * The expensive part of numa migration is done from task_work context.
+ * Triggered from task_tick_numa().
+ */
+void task_numa_work(struct callback_head *work)
+{
+ unsigned long migrate, next_scan, now = jiffies;
+ struct task_struct *p = current;
+ struct mm_struct *mm = p->mm;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ long pages;
+
+ WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
+
+ work->next = work; /* protect against double add */
+ /*
+ * Who cares about NUMA placement when they're dying.
+ *
+ * NOTE: make sure not to dereference p->mm before this check,
+ * exit_task_work() happens _after_ exit_mm() so we could be called
+ * without p->mm even though we still had it when we enqueued this
+ * work.
+ */
+ if (p->flags & PF_EXITING)
+ return;
+
+ /*
+ * We do not care about task placement until a task runs on a node
+ * other than the first one used by the address space. This is
+ * largely because migrations are driven by what CPU the task
+ * is running on. If it's never scheduled on another node, it'll
+ * not migrate so why bother trapping the fault.
+ */
+ if (mm->first_nid == NUMA_PTE_SCAN_INIT)
+ mm->first_nid = numa_node_id();
+ if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
+ /* Are we running on a new node yet? */
+ if (numa_node_id() == mm->first_nid &&
+ !sched_feat_numa(NUMA_FORCE))
+ return;
+
+ mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
+ }
+
+ /*
+ * Reset the scan period if enough time has gone by. Objective is that
+ * scanning will be reduced if pages are properly placed. As tasks
+ * can enter different phases this needs to be re-examined. Lacking
+ * proper tracking of reference behaviour, this blunt hammer is used.
+ */
+ migrate = mm->numa_next_reset;
+ if (time_after(now, migrate)) {
+ p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
+ next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
+ xchg(&mm->numa_next_reset, next_scan);
+ }
+
+ /*
+ * Enforce maximal scan/migration frequency..
+ */
+ migrate = mm->numa_next_scan;
+ if (time_before(now, migrate))
+ return;
+
+ if (p->numa_scan_period == 0)
+ p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
+
+ next_scan = now + msecs_to_jiffies(p->numa_scan_period);
+ if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
+ return;
+
+ /*
+ * Do not set pte_numa if the current running node is rate-limited.
+ * This loses statistics on the fault but if we are unwilling to
+ * migrate to this node, it is less likely we can do useful work
+ */
+ if (migrate_ratelimited(numa_node_id()))
+ return;
+
+ start = mm->numa_scan_offset;
+ pages = sysctl_numa_balancing_scan_size;
+ pages <<= 20 - PAGE_SHIFT; /* MB in pages */
+ if (!pages)
+ return;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+ if (!vma) {
+ reset_ptenuma_scan(p);
+ start = 0;
+ vma = mm->mmap;
+ }
+ for (; vma; vma = vma->vm_next) {
+ if (!vma_migratable(vma))
+ continue;
+
+ /* Skip small VMAs. They are not likely to be of relevance */
+ if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+ continue;
+
+ do {
+ start = max(start, vma->vm_start);
+ end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
+ end = min(end, vma->vm_end);
+ pages -= change_prot_numa(vma, start, end);
+
+ start = end;
+ if (pages <= 0)
+ goto out;
+ } while (end != vma->vm_end);
+ }
+
+out:
+ /*
+ * It is possible to reach the end of the VMA list but the last few VMAs are
+ * not guaranteed to the vma_migratable. If they are not, we would find the
+ * !migratable VMA on the next scan but not reset the scanner to the start
+ * so check it now.
+ */
+ if (vma)
+ mm->numa_scan_offset = start;
+ else
+ reset_ptenuma_scan(p);
+ up_read(&mm->mmap_sem);
+}
+
+/*
+ * Drive the periodic memory faults..
+ */
+void task_tick_numa(struct rq *rq, struct task_struct *curr)
+{
+ struct callback_head *work = &curr->numa_work;
+ u64 period, now;
+
+ /*
+ * We don't care about NUMA placement if we don't have memory.
+ */
+ if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
+ return;
+
+ /*
+ * Using runtime rather than walltime has the dual advantage that
+ * we (mostly) drive the selection from busy threads and that the
+ * task needs to have done some actual work before we bother with
+ * NUMA placement.
+ */
+ now = curr->se.sum_exec_runtime;
+ period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
+
+ if (now - curr->node_stamp > period) {
+ if (!curr->node_stamp)
+ curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
+ curr->node_stamp = now;
+
+ if (!time_before(jiffies, curr->mm->numa_next_scan)) {
+ init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
+ task_work_add(curr, work, true);
+ }
+ }
+}
+#else
+static void task_tick_numa(struct rq *rq, struct task_struct *curr)
+{
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -1265,7 +1492,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
}
__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
- update_cfs_shares(cfs_rq);
}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
@@ -1475,8 +1701,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
- account_entity_enqueue(cfs_rq, se);
enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
+ account_entity_enqueue(cfs_rq, se);
+ update_cfs_shares(cfs_rq);
if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
@@ -1549,6 +1776,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+ dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
update_stats_dequeue(cfs_rq, se);
if (flags & DEQUEUE_SLEEP) {
@@ -1568,8 +1796,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
+ se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
- dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
/*
* Normalize the entity after updating the min_vruntime because the
@@ -1583,7 +1811,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
return_cfs_rq_runtime(cfs_rq);
update_min_vruntime(cfs_rq);
- se->on_rq = 0;
+ update_cfs_shares(cfs_rq);
}
/*
@@ -2595,8 +2823,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
+ update_cfs_shares(cfs_rq);
update_entity_load_avg(se, 1);
- update_cfs_rq_blocked_load(cfs_rq, 0);
}
if (!se) {
@@ -2656,8 +2884,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
+ update_cfs_shares(cfs_rq);
update_entity_load_avg(se, 1);
- update_cfs_rq_blocked_load(cfs_rq, 0);
}
if (!se) {
@@ -5500,6 +5728,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
entity_tick(cfs_rq, se, queued);
}
+ if (sched_feat_numa(NUMA))
+ task_tick_numa(rq, curr);
+
update_rq_runnable_avg(rq, 1);
}
@@ -5837,11 +6068,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
se = tg->se[i];
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
- for_each_sched_entity(se) {
+ for_each_sched_entity(se)
update_cfs_shares(group_cfs_rq(se));
- /* update contribution to parent */
- update_entity_load_avg(se, 1);
- }
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index e68e69ab917..1ad1d2b5395 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -66,3 +66,14 @@ SCHED_FEAT(TTWU_QUEUE, true)
SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
+
+/*
+ * Apply the automatic NUMA scheduling policy. Enabled automatically
+ * at runtime if running on a NUMA machine. Can be controlled via
+ * numa_balancing=. Allow PTE scanning to be forced on UMA machines
+ * for debugging the core machinery.
+ */
+#ifdef CONFIG_NUMA_BALANCING
+SCHED_FEAT(NUMA, false)
+SCHED_FEAT(NUMA_FORCE, false)
+#endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5eca173b563..fc886441436 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -663,6 +663,18 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
+#ifdef CONFIG_NUMA_BALANCING
+#define sched_feat_numa(x) sched_feat(x)
+#ifdef CONFIG_SCHED_DEBUG
+#define numabalancing_enabled sched_feat_numa(NUMA)
+#else
+extern bool numabalancing_enabled;
+#endif /* CONFIG_SCHED_DEBUG */
+#else
+#define sched_feat_numa(x) (0)
+#define numabalancing_enabled (0)
+#endif /* CONFIG_NUMA_BALANCING */
+
static inline u64 global_rt_period(void)
{
return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index ee376beedaf..5af44b59377 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -396,25 +396,29 @@ int __secure_computing(int this_syscall)
#ifdef CONFIG_SECCOMP_FILTER
case SECCOMP_MODE_FILTER: {
int data;
+ struct pt_regs *regs = task_pt_regs(current);
ret = seccomp_run_filters(this_syscall);
data = ret & SECCOMP_RET_DATA;
ret &= SECCOMP_RET_ACTION;
switch (ret) {
case SECCOMP_RET_ERRNO:
/* Set the low-order 16-bits as a errno. */
- syscall_set_return_value(current, task_pt_regs(current),
+ syscall_set_return_value(current, regs,
-data, 0);
goto skip;
case SECCOMP_RET_TRAP:
/* Show the handler the original registers. */
- syscall_rollback(current, task_pt_regs(current));
+ syscall_rollback(current, regs);
/* Let the filter pass back 16 bits of data. */
seccomp_send_sigsys(this_syscall, data);
goto skip;
case SECCOMP_RET_TRACE:
/* Skip these calls if there is no tracer. */
- if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP))
+ if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
+ syscall_set_return_value(current, regs,
+ -ENOSYS, 0);
goto skip;
+ }
/* Allow the BPF to provide the event message */
ptrace_event(PTRACE_EVENT_SECCOMP, data);
/*
@@ -425,6 +429,9 @@ int __secure_computing(int this_syscall)
*/
if (fatal_signal_pending(current))
break;
+ if (syscall_get_nr(current, regs) < 0)
+ goto skip; /* Explicit request to skip. */
+
return 0;
case SECCOMP_RET_ALLOW:
return 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index a49c7f36ceb..7aaa51d8e5b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -31,6 +31,7 @@
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/uprobes.h>
+#include <linux/compat.h>
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
@@ -1753,7 +1754,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
* see comment in do_notify_parent() about the following 4 lines
*/
rcu_read_lock();
- info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
+ info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
rcu_read_unlock();
@@ -3094,6 +3095,79 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
out:
return error;
}
+#ifdef CONFIG_GENERIC_SIGALTSTACK
+SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
+{
+ return do_sigaltstack(uss, uoss, current_user_stack_pointer());
+}
+#endif
+
+int restore_altstack(const stack_t __user *uss)
+{
+ int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
+ /* squash all but EFAULT for now */
+ return err == -EFAULT ? err : 0;
+}
+
+int __save_altstack(stack_t __user *uss, unsigned long sp)
+{
+ struct task_struct *t = current;
+ return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
+ __put_user(sas_ss_flags(sp), &uss->ss_flags) |
+ __put_user(t->sas_ss_size, &uss->ss_size);
+}
+
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_GENERIC_SIGALTSTACK
+asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
+ compat_stack_t __user *uoss_ptr)
+{
+ stack_t uss, uoss;
+ int ret;
+ mm_segment_t seg;
+
+ if (uss_ptr) {
+ compat_stack_t uss32;
+
+ memset(&uss, 0, sizeof(stack_t));
+ if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
+ return -EFAULT;
+ uss.ss_sp = compat_ptr(uss32.ss_sp);
+ uss.ss_flags = uss32.ss_flags;
+ uss.ss_size = uss32.ss_size;
+ }
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+ ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
+ (stack_t __force __user *) &uoss,
+ compat_user_stack_pointer());
+ set_fs(seg);
+ if (ret >= 0 && uoss_ptr) {
+ if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
+ __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
+ __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
+ __put_user(uoss.ss_size, &uoss_ptr->ss_size))
+ ret = -EFAULT;
+ }
+ return ret;
+}
+
+int compat_restore_altstack(const compat_stack_t __user *uss)
+{
+ int err = compat_sys_sigaltstack(uss, NULL);
+ /* squash all but -EFAULT for now */
+ return err == -EFAULT ? err : 0;
+}
+
+int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
+{
+ struct task_struct *t = current;
+ return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
+ __put_user(sas_ss_flags(sp), &uss->ss_flags) |
+ __put_user(t->sas_ss_size, &uss->ss_size);
+}
+#endif
+#endif
#ifdef __ARCH_WANT_SYS_SIGPENDING
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index dbff751e408..395084d4ce1 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -25,6 +25,7 @@ cond_syscall(sys_swapoff);
cond_syscall(sys_kexec_load);
cond_syscall(compat_sys_kexec_load);
cond_syscall(sys_init_module);
+cond_syscall(sys_finit_module);
cond_syscall(sys_delete_module);
cond_syscall(sys_socketpair);
cond_syscall(sys_bind);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 33f71f37267..c88878db491 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -256,9 +256,11 @@ static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+#ifdef CONFIG_SMP
static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
-#endif
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_SCHED_DEBUG */
#ifdef CONFIG_COMPACTION
static int min_extfrag_threshold;
@@ -301,6 +303,7 @@ static struct ctl_table kern_table[] = {
.extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns,
},
+#ifdef CONFIG_SMP
{
.procname = "sched_tunable_scaling",
.data = &sysctl_sched_tunable_scaling,
@@ -347,7 +350,45 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
.extra2 = &one,
},
-#endif
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_NUMA_BALANCING
+ {
+ .procname = "numa_balancing_scan_delay_ms",
+ .data = &sysctl_numa_balancing_scan_delay,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "numa_balancing_scan_period_min_ms",
+ .data = &sysctl_numa_balancing_scan_period_min,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "numa_balancing_scan_period_reset",
+ .data = &sysctl_numa_balancing_scan_period_reset,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "numa_balancing_scan_period_max_ms",
+ .data = &sysctl_numa_balancing_scan_period_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "numa_balancing_scan_size_mb",
+ .data = &sysctl_numa_balancing_scan_size,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif /* CONFIG_NUMA_BALANCING */
+#endif /* CONFIG_SCHED_DEBUG */
{
.procname = "sched_rt_period_us",
.data = &sysctl_sched_rt_period,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 65bdcf198d4..5a638445050 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1344,7 +1344,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
goto out_putname;
}
- mnt = current->nsproxy->pid_ns->proc_mnt;
+ mnt = task_active_pid_ns(current)->proc_mnt;
file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
result = PTR_ERR(file);
if (IS_ERR(file))
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4c7de02eacd..cbc6acb0db3 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -21,6 +21,7 @@
#include <linux/time.h>
#include <linux/tick.h>
#include <linux/stop_machine.h>
+#include <linux/pvclock_gtod.h>
static struct timekeeper timekeeper;
@@ -174,6 +175,54 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
return nsec + arch_gettimeoffset();
}
+static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
+
+static void update_pvclock_gtod(struct timekeeper *tk)
+{
+ raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
+}
+
+/**
+ * pvclock_gtod_register_notifier - register a pvclock timedata update listener
+ *
+ * Must hold write on timekeeper.lock
+ */
+int pvclock_gtod_register_notifier(struct notifier_block *nb)
+{
+ struct timekeeper *tk = &timekeeper;
+ unsigned long flags;
+ int ret;
+
+ write_seqlock_irqsave(&tk->lock, flags);
+ ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
+ /* update timekeeping data */
+ update_pvclock_gtod(tk);
+ write_sequnlock_irqrestore(&tk->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
+
+/**
+ * pvclock_gtod_unregister_notifier - unregister a pvclock
+ * timedata update listener
+ *
+ * Must hold write on timekeeper.lock
+ */
+int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
+{
+ struct timekeeper *tk = &timekeeper;
+ unsigned long flags;
+ int ret;
+
+ write_seqlock_irqsave(&tk->lock, flags);
+ ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
+ write_sequnlock_irqrestore(&tk->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
+
/* must hold write on timekeeper.lock */
static void timekeeping_update(struct timekeeper *tk, bool clearntp)
{
@@ -182,6 +231,7 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp)
ntp_clear();
}
update_vsyscall(tk);
+ update_pvclock_gtod(tk);
}
/**
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7693aaf324c..3ffe4c5ad3f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -10,7 +10,7 @@
* Based on code in the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/stop_machine.h>
@@ -2675,12 +2675,12 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
}
loff_t
-ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
+ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
{
loff_t ret;
if (file->f_mode & FMODE_READ)
- ret = seq_lseek(file, offset, origin);
+ ret = seq_lseek(file, offset, whence);
else
file->f_pos = ret = 1;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b69cc380322..e5125677efa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9,7 +9,7 @@
*
* Based on code from the latency_tracer, that is:
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/ring_buffer.h>
#include <generated/utsrelease.h>
@@ -3034,6 +3034,31 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
tr->data[cpu]->entries = val;
}
+/* resize @tr's buffer to the size of @size_tr's entries */
+static int resize_buffer_duplicate_size(struct trace_array *tr,
+ struct trace_array *size_tr, int cpu_id)
+{
+ int cpu, ret = 0;
+
+ if (cpu_id == RING_BUFFER_ALL_CPUS) {
+ for_each_tracing_cpu(cpu) {
+ ret = ring_buffer_resize(tr->buffer,
+ size_tr->data[cpu]->entries, cpu);
+ if (ret < 0)
+ break;
+ tr->data[cpu]->entries = size_tr->data[cpu]->entries;
+ }
+ } else {
+ ret = ring_buffer_resize(tr->buffer,
+ size_tr->data[cpu_id]->entries, cpu_id);
+ if (ret == 0)
+ tr->data[cpu_id]->entries =
+ size_tr->data[cpu_id]->entries;
+ }
+
+ return ret;
+}
+
static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
{
int ret;
@@ -3058,23 +3083,8 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
ret = ring_buffer_resize(max_tr.buffer, size, cpu);
if (ret < 0) {
- int r = 0;
-
- if (cpu == RING_BUFFER_ALL_CPUS) {
- int i;
- for_each_tracing_cpu(i) {
- r = ring_buffer_resize(global_trace.buffer,
- global_trace.data[i]->entries,
- i);
- if (r < 0)
- break;
- }
- } else {
- r = ring_buffer_resize(global_trace.buffer,
- global_trace.data[cpu]->entries,
- cpu);
- }
-
+ int r = resize_buffer_duplicate_size(&global_trace,
+ &global_trace, cpu);
if (r < 0) {
/*
* AARGH! We are left with different
@@ -3212,17 +3222,11 @@ static int tracing_set_tracer(const char *buf)
topts = create_trace_option_files(t);
if (t->use_max_tr) {
- int cpu;
/* we need to make per cpu buffer sizes equivalent */
- for_each_tracing_cpu(cpu) {
- ret = ring_buffer_resize(max_tr.buffer,
- global_trace.data[cpu]->entries,
- cpu);
- if (ret < 0)
- goto out;
- max_tr.data[cpu]->entries =
- global_trace.data[cpu]->entries;
- }
+ ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
+ RING_BUFFER_ALL_CPUS);
+ if (ret < 0)
+ goto out;
}
if (t->init) {
@@ -4271,13 +4275,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
return -ENOMEM;
if (*ppos & (PAGE_SIZE - 1)) {
- WARN_ONCE(1, "Ftrace: previous read must page-align\n");
ret = -EINVAL;
goto out;
}
if (len & (PAGE_SIZE - 1)) {
- WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
if (len < PAGE_SIZE) {
ret = -EINVAL;
goto out;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index bb227e380cb..8e3ad8082ab 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -7,7 +7,7 @@
* Based on code from the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/ring_buffer.h>
#include <linux/debugfs.h>
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5ffce7b0f33..713a2cac488 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -7,7 +7,7 @@
* From code in the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index bc64fc13755..9fe45fcefca 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -7,7 +7,7 @@
* Based on code from the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/module.h>
#include <linux/fs.h>
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0c1b165778e..42ca822fc70 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -33,7 +33,6 @@ static unsigned long max_stack_size;
static arch_spinlock_t max_stack_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
static DEFINE_MUTEX(stack_sysctl_mutex);
@@ -116,9 +115,6 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
{
int cpu;
- if (unlikely(!ftrace_enabled || stack_trace_disabled))
- return;
-
preempt_disable_notrace();
cpu = raw_smp_processor_id();
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 9614db8b0f8..c86e6d4f67f 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
#include <linux/uprobes.h>
#include <linux/namei.h>
+#include <linux/string.h>
#include "trace_probe.h"
@@ -263,16 +264,15 @@ static int create_trace_uprobe(int argc, char **argv)
/* setup a probe */
if (!event) {
- char *tail = strrchr(filename, '/');
+ char *tail;
char *ptr;
- ptr = kstrdup((tail ? tail + 1 : filename), GFP_KERNEL);
- if (!ptr) {
+ tail = kstrdup(kbasename(filename), GFP_KERNEL);
+ if (!tail) {
ret = -ENOMEM;
goto fail_address_parse;
}
- tail = ptr;
ptr = strpbrk(tail, ".-_");
if (ptr)
*ptr = '\0';
diff --git a/kernel/user.c b/kernel/user.c
index 750acffbe9e..33acb5e53a5 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/user_namespace.h>
+#include <linux/proc_fs.h>
/*
* userns count is 1 for root user, 1 for init_uts_ns,
@@ -51,6 +52,7 @@ struct user_namespace init_user_ns = {
},
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
+ .proc_inum = PROC_USER_INIT_INO,
};
EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 456a6b9fba3..2b042c42fbc 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -9,6 +9,7 @@
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
+#include <linux/proc_fs.h>
#include <linux/highuid.h>
#include <linux/cred.h>
#include <linux/securebits.h>
@@ -26,6 +27,24 @@ static struct kmem_cache *user_ns_cachep __read_mostly;
static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
struct uid_gid_map *map);
+static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
+{
+ /* Start with the same capabilities as init but useless for doing
+ * anything as the capabilities are bound to the new user namespace.
+ */
+ cred->securebits = SECUREBITS_DEFAULT;
+ cred->cap_inheritable = CAP_EMPTY_SET;
+ cred->cap_permitted = CAP_FULL_SET;
+ cred->cap_effective = CAP_FULL_SET;
+ cred->cap_bset = CAP_FULL_SET;
+#ifdef CONFIG_KEYS
+ key_put(cred->request_key_auth);
+ cred->request_key_auth = NULL;
+#endif
+ /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
+ cred->user_ns = user_ns;
+}
+
/*
* Create a new user namespace, deriving the creator from the user in the
* passed credentials, and replacing that user with the new root user for the
@@ -39,6 +58,7 @@ int create_user_ns(struct cred *new)
struct user_namespace *ns, *parent_ns = new->user_ns;
kuid_t owner = new->euid;
kgid_t group = new->egid;
+ int ret;
/* The creator needs a mapping in the parent user namespace
* or else we won't be able to reasonably tell userspace who
@@ -52,38 +72,45 @@ int create_user_ns(struct cred *new)
if (!ns)
return -ENOMEM;
+ ret = proc_alloc_inum(&ns->proc_inum);
+ if (ret) {
+ kmem_cache_free(user_ns_cachep, ns);
+ return ret;
+ }
+
kref_init(&ns->kref);
+ /* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
ns->owner = owner;
ns->group = group;
- /* Start with the same capabilities as init but useless for doing
- * anything as the capabilities are bound to the new user namespace.
- */
- new->securebits = SECUREBITS_DEFAULT;
- new->cap_inheritable = CAP_EMPTY_SET;
- new->cap_permitted = CAP_FULL_SET;
- new->cap_effective = CAP_FULL_SET;
- new->cap_bset = CAP_FULL_SET;
-#ifdef CONFIG_KEYS
- key_put(new->request_key_auth);
- new->request_key_auth = NULL;
-#endif
- /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
-
- /* Leave the new->user_ns reference with the new user namespace. */
- /* Leave the reference to our user_ns with the new cred. */
- new->user_ns = ns;
+ set_cred_user_ns(new, ns);
return 0;
}
+int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
+{
+ struct cred *cred;
+
+ if (!(unshare_flags & CLONE_NEWUSER))
+ return 0;
+
+ cred = prepare_creds();
+ if (!cred)
+ return -ENOMEM;
+
+ *new_cred = cred;
+ return create_user_ns(cred);
+}
+
void free_user_ns(struct kref *kref)
{
struct user_namespace *parent, *ns =
container_of(kref, struct user_namespace, kref);
parent = ns->parent;
+ proc_free_inum(ns->proc_inum);
kmem_cache_free(user_ns_cachep, ns);
put_user_ns(parent);
}
@@ -372,7 +399,7 @@ static int uid_m_show(struct seq_file *seq, void *v)
struct user_namespace *lower_ns;
uid_t lower;
- lower_ns = current_user_ns();
+ lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
@@ -393,7 +420,7 @@ static int gid_m_show(struct seq_file *seq, void *v)
struct user_namespace *lower_ns;
gid_t lower;
- lower_ns = current_user_ns();
+ lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
@@ -669,10 +696,14 @@ ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t siz
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
+ struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
+ if ((seq_ns != ns) && (seq_ns != ns->parent))
+ return -EPERM;
+
return map_write(file, buf, size, ppos, CAP_SETUID,
&ns->uid_map, &ns->parent->uid_map);
}
@@ -681,10 +712,14 @@ ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t siz
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
+ struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
+ if ((seq_ns != ns) && (seq_ns != ns->parent))
+ return -EPERM;
+
return map_write(file, buf, size, ppos, CAP_SETGID,
&ns->gid_map, &ns->parent->gid_map);
}
@@ -709,6 +744,21 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t
static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
struct uid_gid_map *new_map)
{
+ /* Allow mapping to your own filesystem ids */
+ if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
+ u32 id = new_map->extent[0].lower_first;
+ if (cap_setid == CAP_SETUID) {
+ kuid_t uid = make_kuid(ns->parent, id);
+ if (uid_eq(uid, current_fsuid()))
+ return true;
+ }
+ else if (cap_setid == CAP_SETGID) {
+ kgid_t gid = make_kgid(ns->parent, id);
+ if (gid_eq(gid, current_fsgid()))
+ return true;
+ }
+ }
+
/* Allow anyone to set a mapping that doesn't require privilege */
if (!cap_valid(cap_setid))
return true;
@@ -722,6 +772,65 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
return false;
}
+static void *userns_get(struct task_struct *task)
+{
+ struct user_namespace *user_ns;
+
+ rcu_read_lock();
+ user_ns = get_user_ns(__task_cred(task)->user_ns);
+ rcu_read_unlock();
+
+ return user_ns;
+}
+
+static void userns_put(void *ns)
+{
+ put_user_ns(ns);
+}
+
+static int userns_install(struct nsproxy *nsproxy, void *ns)
+{
+ struct user_namespace *user_ns = ns;
+ struct cred *cred;
+
+ /* Don't allow gaining capabilities by reentering
+ * the same user namespace.
+ */
+ if (user_ns == current_user_ns())
+ return -EINVAL;
+
+ /* Threaded processes may not enter a different user namespace */
+ if (atomic_read(&current->mm->mm_users) > 1)
+ return -EINVAL;
+
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ cred = prepare_creds();
+ if (!cred)
+ return -ENOMEM;
+
+ put_user_ns(cred->user_ns);
+ set_cred_user_ns(cred, get_user_ns(user_ns));
+
+ return commit_creds(cred);
+}
+
+static unsigned int userns_inum(void *ns)
+{
+ struct user_namespace *user_ns = ns;
+ return user_ns->proc_inum;
+}
+
+const struct proc_ns_operations userns_operations = {
+ .name = "user",
+ .type = CLONE_NEWUSER,
+ .get = userns_get,
+ .put = userns_put,
+ .install = userns_install,
+ .inum = userns_inum,
+};
+
static __init int user_namespaces_init(void)
{
user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 679d97a5d3f..08b197e8c48 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -32,18 +32,25 @@ static struct uts_namespace *create_uts_ns(void)
* @old_ns: namespace to clone
* Return NULL on error (failure to kmalloc), new ns otherwise
*/
-static struct uts_namespace *clone_uts_ns(struct task_struct *tsk,
+static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
struct uts_namespace *old_ns)
{
struct uts_namespace *ns;
+ int err;
ns = create_uts_ns();
if (!ns)
return ERR_PTR(-ENOMEM);
+ err = proc_alloc_inum(&ns->proc_inum);
+ if (err) {
+ kfree(ns);
+ return ERR_PTR(err);
+ }
+
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
- ns->user_ns = get_user_ns(task_cred_xxx(tsk, user_ns));
+ ns->user_ns = get_user_ns(user_ns);
up_read(&uts_sem);
return ns;
}
@@ -55,9 +62,8 @@ static struct uts_namespace *clone_uts_ns(struct task_struct *tsk,
* versa.
*/
struct uts_namespace *copy_utsname(unsigned long flags,
- struct task_struct *tsk)
+ struct user_namespace *user_ns, struct uts_namespace *old_ns)
{
- struct uts_namespace *old_ns = tsk->nsproxy->uts_ns;
struct uts_namespace *new_ns;
BUG_ON(!old_ns);
@@ -66,7 +72,7 @@ struct uts_namespace *copy_utsname(unsigned long flags,
if (!(flags & CLONE_NEWUTS))
return old_ns;
- new_ns = clone_uts_ns(tsk, old_ns);
+ new_ns = clone_uts_ns(user_ns, old_ns);
put_uts_ns(old_ns);
return new_ns;
@@ -78,6 +84,7 @@ void free_uts_ns(struct kref *kref)
ns = container_of(kref, struct uts_namespace, kref);
put_user_ns(ns->user_ns);
+ proc_free_inum(ns->proc_inum);
kfree(ns);
}
@@ -102,19 +109,32 @@ static void utsns_put(void *ns)
put_uts_ns(ns);
}
-static int utsns_install(struct nsproxy *nsproxy, void *ns)
+static int utsns_install(struct nsproxy *nsproxy, void *new)
{
+ struct uts_namespace *ns = new;
+
+ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
+ !nsown_capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
get_uts_ns(ns);
put_uts_ns(nsproxy->uts_ns);
nsproxy->uts_ns = ns;
return 0;
}
+static unsigned int utsns_inum(void *vp)
+{
+ struct uts_namespace *ns = vp;
+
+ return ns->proc_inum;
+}
+
const struct proc_ns_operations utsns_operations = {
.name = "uts",
.type = CLONE_NEWUTS,
.get = utsns_get,
.put = utsns_put,
.install = utsns_install,
+ .inum = utsns_inum,
};
-
diff --git a/kernel/wait.c b/kernel/wait.c
index 7fdd9eaca2c..6698e0c04ea 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -1,7 +1,7 @@
/*
* Generic waiting primitives.
*
- * (C) 2004 William Irwin, Oracle
+ * (C) 2004 Nadia Yvette Chambers, Oracle
*/
#include <linux/init.h>
#include <linux/export.h>
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index c8c21be11ab..75a2ab3d0b0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -31,6 +31,7 @@
int watchdog_enabled = 1;
int __read_mostly watchdog_thresh = 10;
static int __read_mostly watchdog_disabled;
+static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
@@ -116,7 +117,7 @@ static unsigned long get_timestamp(int this_cpu)
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
-static u64 get_sample_period(void)
+static void set_sample_period(void)
{
/*
* convert watchdog_thresh from seconds to ns
@@ -125,7 +126,7 @@ static u64 get_sample_period(void)
* and hard thresholds) to increment before the
* hardlockup detector generates a warning
*/
- return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
+ sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
}
/* Commands for resetting the watchdog */
@@ -275,7 +276,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
wake_up_process(__this_cpu_read(softlockup_watchdog));
/* .. and repeat */
- hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
+ hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
if (touch_ts == 0) {
if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
@@ -343,6 +344,10 @@ static void watchdog_enable(unsigned int cpu)
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ /* kick off the timer for the hardlockup detector */
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
+
if (!watchdog_enabled) {
kthread_park(current);
return;
@@ -351,12 +356,8 @@ static void watchdog_enable(unsigned int cpu)
/* Enable the perf event */
watchdog_nmi_enable(cpu);
- /* kick off the timer for the hardlockup detector */
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
-
/* done here because hrtimer_start can only pin to smp_processor_id() */
- hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period),
HRTIMER_MODE_REL_PINNED);
/* initialize timestamp */
@@ -368,9 +369,6 @@ static void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
- if (!watchdog_enabled)
- return;
-
watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer);
/* disable the perf event */
@@ -386,7 +384,7 @@ static int watchdog_should_run(unsigned int cpu)
/*
* The watchdog thread function - touches the timestamp.
*
- * It only runs once every get_sample_period() seconds (4 seconds by
+ * It only runs once every sample_period seconds (4 seconds by
* default) to reset the softlockup timestamp. If this gets delayed
* for more than 2*watchdog_thresh seconds then the debug-printout
* triggers in watchdog_timer_fn().
@@ -519,6 +517,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
if (ret || !write)
return ret;
+ set_sample_period();
if (watchdog_enabled && watchdog_thresh)
watchdog_enable_all_cpus();
else
@@ -540,6 +539,7 @@ static struct smp_hotplug_thread watchdog_threads = {
void __init lockup_detector_init(void)
{
+ set_sample_period();
if (smpboot_register_percpu_thread(&watchdog_threads)) {
pr_err("Failed to create watchdog threads, disabled\n");
watchdog_disabled = -ENODEV;
diff --git a/lib/Kconfig b/lib/Kconfig
index 4b31a46fb30..75cdb77fa49 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -42,6 +42,9 @@ config GENERIC_IO
config STMP_DEVICE
bool
+config PERCPU_RWSEM
+ boolean
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 41faf0b8df1..3a353091a90 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1115,7 +1115,7 @@ config NOTIFIER_ERROR_INJECTION
depends on DEBUG_KERNEL
select DEBUG_FS
help
- This option provides the ability to inject artifical errors to
+ This option provides the ability to inject artificial errors to
specified notifier chain callbacks. It is useful to test the error
handling of notifier call chain failures.
@@ -1126,7 +1126,7 @@ config CPU_NOTIFIER_ERROR_INJECT
depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION
help
This option provides a kernel module that can be used to test
- the error handling of the cpu notifiers by injecting artifical
+ the error handling of the cpu notifiers by injecting artificial
errors to CPU notifier chain callbacks. It is controlled through
debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu
@@ -1150,7 +1150,7 @@ config PM_NOTIFIER_ERROR_INJECT
depends on PM && NOTIFIER_ERROR_INJECTION
default m if PM_DEBUG
help
- This option provides the ability to inject artifical errors to
+ This option provides the ability to inject artificial errors to
PM notifier chain callbacks. It is controlled through debugfs
interface /sys/kernel/debug/notifier-error-inject/pm
@@ -1173,7 +1173,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT
tristate "Memory hotplug notifier error injection module"
depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION
help
- This option provides the ability to inject artifical errors to
+ This option provides the ability to inject artificial errors to
memory hotplug notifier chain callbacks. It is controlled through
debugfs interface under /sys/kernel/debug/notifier-error-inject/memory
@@ -1192,14 +1192,14 @@ config MEMORY_NOTIFIER_ERROR_INJECT
If unsure, say N.
-config PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT
- tristate "pSeries reconfig notifier error injection module"
- depends on PPC_PSERIES && NOTIFIER_ERROR_INJECTION
+config OF_RECONFIG_NOTIFIER_ERROR_INJECT
+ tristate "OF reconfig notifier error injection module"
+ depends on OF_DYNAMIC && NOTIFIER_ERROR_INJECTION
help
- This option provides the ability to inject artifical errors to
- pSeries reconfig notifier chain callbacks. It is controlled
+ This option provides the ability to inject artificial errors to
+ OF reconfig notifier chain callbacks. It is controlled
through debugfs interface under
- /sys/kernel/debug/notifier-error-inject/pSeries-reconfig/
+ /sys/kernel/debug/notifier-error-inject/OF-reconfig/
If the notifier call chain should be failed with some events
notified, write the error code to "actions/<notifier event>/error".
diff --git a/lib/Makefile b/lib/Makefile
index e3723c7527d..02ed6c04cd7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
- is_single_threaded.o plist.o decompress.o kobject_uevent.o
+ is_single_threaded.o plist.o decompress.o kobject_uevent.o \
+ earlycpio.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
@@ -93,8 +95,8 @@ obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
-obj-$(CONFIG_PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT) += \
- pSeries-reconfig-notifier-error-inject.o
+obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
+ of-reconfig-notifier-error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 5293d243302..11b9b01fda6 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -81,7 +81,7 @@ next_tag:
goto next_tag;
}
- if (unlikely((tag & 0x1f) == 0x1f)) {
+ if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) {
do {
if (unlikely(datalen - dp < 2))
goto data_overrun_error;
@@ -96,7 +96,7 @@ next_tag:
goto next_tag;
}
- if (unlikely(len == 0x80)) {
+ if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5))
goto indefinite_len_primitive;
@@ -222,7 +222,7 @@ next_op:
if (unlikely(dp >= datalen - 1))
goto data_overrun_error;
tag = data[dp++];
- if (unlikely((tag & 0x1f) == 0x1f))
+ if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
goto long_tag_not_supported;
if (op & ASN1_OP_MATCH__ANY) {
@@ -254,7 +254,7 @@ next_op:
len = data[dp++];
if (len > 0x7f) {
- if (unlikely(len == 0x80)) {
+ if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
if (unlikely(!(tag & ASN1_CONS_BIT)))
goto indefinite_len_primitive;
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 978537809d8..08a4f068e61 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -31,7 +31,11 @@
static union {
raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
-} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
+} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
+ [0 ... (NR_LOCKS - 1)] = {
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ },
+};
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
@@ -173,14 +177,3 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
return ret;
}
EXPORT_SYMBOL(atomic64_add_unless);
-
-static int init_atomic64_lock(void)
-{
- int i;
-
- for (i = 0; i < NR_LOCKS; ++i)
- raw_spin_lock_init(&atomic64_lock[i].lock);
- return 0;
-}
-
-pure_initcall(init_atomic64_lock);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06fdfa1aeba..06f7e4fe8d2 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -353,7 +353,7 @@ again:
EXPORT_SYMBOL(bitmap_find_next_zero_area);
/*
- * Bitmap printing & parsing functions: first version by Bill Irwin,
+ * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
* second version by Paul Jackson, third by Joe Korty.
*/
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d84beb994f3..5e396accd3d 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -45,6 +45,12 @@ enum {
dma_debug_coherent,
};
+enum map_err_types {
+ MAP_ERR_CHECK_NOT_APPLICABLE,
+ MAP_ERR_NOT_CHECKED,
+ MAP_ERR_CHECKED,
+};
+
#define DMA_DEBUG_STACKTRACE_ENTRIES 5
struct dma_debug_entry {
@@ -57,6 +63,7 @@ struct dma_debug_entry {
int direction;
int sg_call_ents;
int sg_mapped_ents;
+ enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE
struct stack_trace stacktrace;
unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
@@ -114,6 +121,12 @@ static struct device_driver *current_driver __read_mostly;
static DEFINE_RWLOCK(driver_name_lock);
+static const char *const maperr2str[] = {
+ [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
+ [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
+ [MAP_ERR_CHECKED] = "dma map error checked",
+};
+
static const char *type2name[4] = { "single", "page",
"scather-gather", "coherent" };
@@ -376,11 +389,12 @@ void debug_dma_dump_mappings(struct device *dev)
list_for_each_entry(entry, &bucket->list, list) {
if (!dev || dev == entry->dev) {
dev_info(entry->dev,
- "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
+ "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n",
type2name[entry->type], idx,
(unsigned long long)entry->paddr,
entry->dev_addr, entry->size,
- dir2name[entry->direction]);
+ dir2name[entry->direction],
+ maperr2str[entry->map_err_type]);
}
}
@@ -844,16 +858,16 @@ static void check_unmap(struct dma_debug_entry *ref)
struct hash_bucket *bucket;
unsigned long flags;
- if (dma_mapping_error(ref->dev, ref->dev_addr)) {
- err_printk(ref->dev, NULL, "DMA-API: device driver tries "
- "to free an invalid DMA memory address\n");
- return;
- }
-
bucket = get_hash_bucket(ref, &flags);
entry = bucket_find_exact(bucket, ref);
if (!entry) {
+ if (dma_mapping_error(ref->dev, ref->dev_addr)) {
+ err_printk(ref->dev, NULL,
+ "DMA-API: device driver tries "
+ "to free an invalid DMA memory address\n");
+ return;
+ }
err_printk(ref->dev, NULL, "DMA-API: device driver tries "
"to free DMA memory it has not allocated "
"[device address=0x%016llx] [size=%llu bytes]\n",
@@ -910,6 +924,15 @@ static void check_unmap(struct dma_debug_entry *ref)
dir2name[ref->direction]);
}
+ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+ err_printk(ref->dev, entry,
+ "DMA-API: device driver failed to check map error"
+ "[device address=0x%016llx] [size=%llu bytes] "
+ "[mapped as %s]",
+ ref->dev_addr, ref->size,
+ type2name[entry->type]);
+ }
+
hash_bucket_del(entry);
dma_entry_free(entry);
@@ -1017,7 +1040,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
if (unlikely(global_disable))
return;
- if (unlikely(dma_mapping_error(dev, dma_addr)))
+ if (dma_mapping_error(dev, dma_addr))
return;
entry = dma_entry_alloc();
@@ -1030,6 +1053,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
entry->dev_addr = dma_addr;
entry->size = size;
entry->direction = direction;
+ entry->map_err_type = MAP_ERR_NOT_CHECKED;
if (map_single)
entry->type = dma_debug_single;
@@ -1045,6 +1069,30 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
}
EXPORT_SYMBOL(debug_dma_map_page);
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_debug_entry ref;
+ struct dma_debug_entry *entry;
+ struct hash_bucket *bucket;
+ unsigned long flags;
+
+ if (unlikely(global_disable))
+ return;
+
+ ref.dev = dev;
+ ref.dev_addr = dma_addr;
+ bucket = get_hash_bucket(&ref, &flags);
+ entry = bucket_find_exact(bucket, &ref);
+
+ if (!entry)
+ goto out;
+
+ entry->map_err_type = MAP_ERR_CHECKED;
+out:
+ put_hash_bucket(bucket, &flags);
+}
+EXPORT_SYMBOL(debug_dma_mapping_error);
+
void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, bool map_single)
{
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e7f7d993357..1db1fc66053 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -62,13 +62,6 @@ static LIST_HEAD(ddebug_tables);
static int verbose = 0;
module_param(verbose, int, 0644);
-/* Return the last part of a pathname */
-static inline const char *basename(const char *path)
-{
- const char *tail = strrchr(path, '/');
- return tail ? tail+1 : path;
-}
-
/* Return the path relative to source root */
static inline const char *trim_prefix(const char *path)
{
@@ -154,7 +147,7 @@ static int ddebug_change(const struct ddebug_query *query,
/* match against the source filename */
if (query->filename &&
strcmp(query->filename, dp->filename) &&
- strcmp(query->filename, basename(dp->filename)) &&
+ strcmp(query->filename, kbasename(dp->filename)) &&
strcmp(query->filename, trim_prefix(dp->filename)))
continue;
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
new file mode 100644
index 00000000000..8078ef49cb7
--- /dev/null
+++ b/lib/earlycpio.c
@@ -0,0 +1,145 @@
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2012 Intel Corporation; author H. Peter Anvin
+ *
+ * This file is part of the Linux kernel, and is made available
+ * under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * earlycpio.c
+ *
+ * Find a specific cpio member; must precede any compressed content.
+ * This is used to locate data items in the initramfs used by the
+ * kernel itself during early boot (before the main initramfs is
+ * decompressed.) It is the responsibility of the initramfs creator
+ * to ensure that these items are uncompressed at the head of the
+ * blob. Depending on the boot loader or package tool that may be a
+ * separate file or part of the same file.
+ */
+
+#include <linux/earlycpio.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+enum cpio_fields {
+ C_MAGIC,
+ C_INO,
+ C_MODE,
+ C_UID,
+ C_GID,
+ C_NLINK,
+ C_MTIME,
+ C_FILESIZE,
+ C_MAJ,
+ C_MIN,
+ C_RMAJ,
+ C_RMIN,
+ C_NAMESIZE,
+ C_CHKSUM,
+ C_NFIELDS
+};
+
+/**
+ * cpio_data find_cpio_data - Search for files in an uncompressed cpio
+ * @path: The directory to search for, including a slash at the end
+ * @data: Pointer to the the cpio archive or a header inside
+ * @len: Remaining length of the cpio based on data pointer
+ * @offset: When a matching file is found, this is the offset to the
+ * beginning of the cpio. It can be used to iterate through
+ * the cpio to find all files inside of a directory path
+ *
+ * @return: struct cpio_data containing the address, length and
+ * filename (with the directory path cut off) of the found file.
+ * If you search for a filename and not for files in a directory,
+ * pass the absolute path of the filename in the cpio and make sure
+ * the match returned an empty filename string.
+ */
+
+struct cpio_data __cpuinit find_cpio_data(const char *path, void *data,
+ size_t len, long *offset)
+{
+ const size_t cpio_header_len = 8*C_NFIELDS - 2;
+ struct cpio_data cd = { NULL, 0, "" };
+ const char *p, *dptr, *nptr;
+ unsigned int ch[C_NFIELDS], *chp, v;
+ unsigned char c, x;
+ size_t mypathsize = strlen(path);
+ int i, j;
+
+ p = data;
+
+ while (len > cpio_header_len) {
+ if (!*p) {
+ /* All cpio headers need to be 4-byte aligned */
+ p += 4;
+ len -= 4;
+ continue;
+ }
+
+ j = 6; /* The magic field is only 6 characters */
+ chp = ch;
+ for (i = C_NFIELDS; i; i--) {
+ v = 0;
+ while (j--) {
+ v <<= 4;
+ c = *p++;
+
+ x = c - '0';
+ if (x < 10) {
+ v += x;
+ continue;
+ }
+
+ x = (c | 0x20) - 'a';
+ if (x < 6) {
+ v += x + 10;
+ continue;
+ }
+
+ goto quit; /* Invalid hexadecimal */
+ }
+ *chp++ = v;
+ j = 8; /* All other fields are 8 characters */
+ }
+
+ if ((ch[C_MAGIC] - 0x070701) > 1)
+ goto quit; /* Invalid magic */
+
+ len -= cpio_header_len;
+
+ dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4);
+ nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4);
+
+ if (nptr > p + len || dptr < p || nptr < dptr)
+ goto quit; /* Buffer overrun */
+
+ if ((ch[C_MODE] & 0170000) == 0100000 &&
+ ch[C_NAMESIZE] >= mypathsize &&
+ !memcmp(p, path, mypathsize)) {
+ *offset = (long)nptr - (long)data;
+ if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) {
+ pr_warn(
+ "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n",
+ p, MAX_CPIO_FILE_NAME);
+ }
+ strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME);
+
+ cd.data = (void *)dptr;
+ cd.size = ch[C_FILESIZE];
+ return cd; /* Found it! */
+ }
+ len -= (nptr - p);
+ p = nptr;
+ }
+
+quit:
+ return cd;
+}
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c
index b25903987f7..245900b98c8 100644
--- a/lib/interval_tree_test_main.c
+++ b/lib/interval_tree_test_main.c
@@ -30,7 +30,8 @@ static void init(void)
{
int i;
for (i = 0; i < NODES; i++) {
- u32 a = prandom32(&rnd), b = prandom32(&rnd);
+ u32 a = prandom_u32_state(&rnd);
+ u32 b = prandom_u32_state(&rnd);
if (a <= b) {
nodes[i].start = a;
nodes[i].last = b;
@@ -40,7 +41,7 @@ static void init(void)
}
}
for (i = 0; i < SEARCHES; i++)
- queries[i] = prandom32(&rnd);
+ queries[i] = prandom_u32_state(&rnd);
}
static int interval_tree_test_init(void)
@@ -51,7 +52,7 @@ static int interval_tree_test_init(void)
printk(KERN_ALERT "interval tree insert/remove");
- prandom32_seed(&rnd, 3141592653589793238ULL);
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index c3615eab0cc..f78ae0c0c4e 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -104,6 +104,22 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
return 0;
}
+/**
+ * kstrtoull - convert a string to an unsigned long long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+ */
int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
if (s[0] == '+')
@@ -112,6 +128,22 @@ int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
}
EXPORT_SYMBOL(kstrtoull);
+/**
+ * kstrtoll - convert a string to a long long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+ */
int kstrtoll(const char *s, unsigned int base, long long *res)
{
unsigned long long tmp;
@@ -168,6 +200,22 @@ int _kstrtol(const char *s, unsigned int base, long *res)
}
EXPORT_SYMBOL(_kstrtol);
+/**
+ * kstrtouint - convert a string to an unsigned int
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+ */
int kstrtouint(const char *s, unsigned int base, unsigned int *res)
{
unsigned long long tmp;
@@ -183,6 +231,22 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
}
EXPORT_SYMBOL(kstrtouint);
+/**
+ * kstrtoint - convert a string to an int
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+ */
int kstrtoint(const char *s, unsigned int base, int *res)
{
long long tmp;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index a07e7268d7e..d71d8949894 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -44,8 +44,8 @@ MODULE_LICENSE("GPL");
} while (0)
#define RETURN(x...) do { \
- clear_bit(__LC_PARANOIA, &lc->flags); \
- smp_mb__after_clear_bit(); return x ; } while (0)
+ clear_bit_unlock(__LC_PARANOIA, &lc->flags); \
+ return x ; } while (0)
/* BUG() if e is not one of the elements tracked by lc */
#define PARANOIA_LC_ELEMENT(lc, e) do { \
@@ -55,9 +55,40 @@ MODULE_LICENSE("GPL");
BUG_ON(i >= lc_->nr_elements); \
BUG_ON(lc_->lc_element[i] != e_); } while (0)
+
+/* We need to atomically
+ * - try to grab the lock (set LC_LOCKED)
+ * - only if there is no pending transaction
+ * (neither LC_DIRTY nor LC_STARVING is set)
+ * Because of PARANOIA_ENTRY() above abusing lc->flags as well,
+ * it is not sufficient to just say
+ * return 0 == cmpxchg(&lc->flags, 0, LC_LOCKED);
+ */
+int lc_try_lock(struct lru_cache *lc)
+{
+ unsigned long val;
+ do {
+ val = cmpxchg(&lc->flags, 0, LC_LOCKED);
+ } while (unlikely (val == LC_PARANOIA));
+ /* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */
+ return 0 == val;
+#if 0
+ /* Alternative approach, spin in case someone enters or leaves a
+ * PARANOIA_ENTRY()/RETURN() section. */
+ unsigned long old, new, val;
+ do {
+ old = lc->flags & LC_PARANOIA;
+ new = old | LC_LOCKED;
+ val = cmpxchg(&lc->flags, old, new);
+ } while (unlikely (val == (old ^ LC_PARANOIA)));
+ return old == val;
+#endif
+}
+
/**
* lc_create - prepares to track objects in an active set
* @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
+ * @max_pending_changes: maximum changes to accumulate until a transaction is required
* @e_count: number of elements allowed to be active simultaneously
* @e_size: size of the tracked objects
* @e_off: offset to the &struct lc_element member in a tracked object
@@ -66,6 +97,7 @@ MODULE_LICENSE("GPL");
* or NULL on (allocation) failure.
*/
struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
+ unsigned max_pending_changes,
unsigned e_count, size_t e_size, size_t e_off)
{
struct hlist_head *slot = NULL;
@@ -98,12 +130,13 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
INIT_LIST_HEAD(&lc->in_use);
INIT_LIST_HEAD(&lc->lru);
INIT_LIST_HEAD(&lc->free);
+ INIT_LIST_HEAD(&lc->to_be_changed);
lc->name = name;
lc->element_size = e_size;
lc->element_off = e_off;
lc->nr_elements = e_count;
- lc->new_number = LC_FREE;
+ lc->max_pending_changes = max_pending_changes;
lc->lc_cache = cache;
lc->lc_element = element;
lc->lc_slot = slot;
@@ -117,6 +150,7 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
e = p + e_off;
e->lc_index = i;
e->lc_number = LC_FREE;
+ e->lc_new_number = LC_FREE;
list_add(&e->list, &lc->free);
element[i] = e;
}
@@ -175,15 +209,15 @@ void lc_reset(struct lru_cache *lc)
INIT_LIST_HEAD(&lc->in_use);
INIT_LIST_HEAD(&lc->lru);
INIT_LIST_HEAD(&lc->free);
+ INIT_LIST_HEAD(&lc->to_be_changed);
lc->used = 0;
lc->hits = 0;
lc->misses = 0;
lc->starving = 0;
- lc->dirty = 0;
+ lc->locked = 0;
lc->changed = 0;
+ lc->pending_changes = 0;
lc->flags = 0;
- lc->changing_element = NULL;
- lc->new_number = LC_FREE;
memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
for (i = 0; i < lc->nr_elements; i++) {
@@ -194,6 +228,7 @@ void lc_reset(struct lru_cache *lc)
/* re-init it */
e->lc_index = i;
e->lc_number = LC_FREE;
+ e->lc_new_number = LC_FREE;
list_add(&e->list, &lc->free);
}
}
@@ -208,14 +243,14 @@ size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
/* NOTE:
* total calls to lc_get are
* (starving + hits + misses)
- * misses include "dirty" count (update from an other thread in
+ * misses include "locked" count (update from an other thread in
* progress) and "changed", when this in fact lead to an successful
* update of the cache.
*/
return seq_printf(seq, "\t%s: used:%u/%u "
- "hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n",
+ "hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
lc->name, lc->used, lc->nr_elements,
- lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed);
+ lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
}
static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
@@ -224,16 +259,8 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
}
-/**
- * lc_find - find element by label, if present in the hash table
- * @lc: The lru_cache object
- * @enr: element number
- *
- * Returns the pointer to an element, if the element with the requested
- * "label" or element number is present in the hash table,
- * or NULL if not found. Does not change the refcnt.
- */
-struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
+static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
+ bool include_changing)
{
struct hlist_node *n;
struct lc_element *e;
@@ -241,29 +268,48 @@ struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
- if (e->lc_number == enr)
+ /* "about to be changed" elements, pending transaction commit,
+ * are hashed by their "new number". "Normal" elements have
+ * lc_number == lc_new_number. */
+ if (e->lc_new_number != enr)
+ continue;
+ if (e->lc_new_number == e->lc_number || include_changing)
return e;
+ break;
}
return NULL;
}
-/* returned element will be "recycled" immediately */
-static struct lc_element *lc_evict(struct lru_cache *lc)
+/**
+ * lc_find - find element by label, if present in the hash table
+ * @lc: The lru_cache object
+ * @enr: element number
+ *
+ * Returns the pointer to an element, if the element with the requested
+ * "label" or element number is present in the hash table,
+ * or NULL if not found. Does not change the refcnt.
+ * Ignores elements that are "about to be used", i.e. not yet in the active
+ * set, but still pending transaction commit.
+ */
+struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
{
- struct list_head *n;
- struct lc_element *e;
-
- if (list_empty(&lc->lru))
- return NULL;
-
- n = lc->lru.prev;
- e = list_entry(n, struct lc_element, list);
-
- PARANOIA_LC_ELEMENT(lc, e);
+ return __lc_find(lc, enr, 0);
+}
- list_del(&e->list);
- hlist_del(&e->colision);
- return e;
+/**
+ * lc_is_used - find element by label
+ * @lc: The lru_cache object
+ * @enr: element number
+ *
+ * Returns true, if the element with the requested "label" or element number is
+ * present in the hash table, and is used (refcnt > 0).
+ * Also finds elements that are not _currently_ used but only "about to be
+ * used", i.e. on the "to_be_changed" list, pending transaction commit.
+ */
+bool lc_is_used(struct lru_cache *lc, unsigned int enr)
+{
+ struct lc_element *e = __lc_find(lc, enr, 1);
+ return e && e->refcnt;
}
/**
@@ -280,22 +326,34 @@ void lc_del(struct lru_cache *lc, struct lc_element *e)
PARANOIA_LC_ELEMENT(lc, e);
BUG_ON(e->refcnt);
- e->lc_number = LC_FREE;
+ e->lc_number = e->lc_new_number = LC_FREE;
hlist_del_init(&e->colision);
list_move(&e->list, &lc->free);
RETURN();
}
-static struct lc_element *lc_get_unused_element(struct lru_cache *lc)
+static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned new_number)
{
struct list_head *n;
+ struct lc_element *e;
+
+ if (!list_empty(&lc->free))
+ n = lc->free.next;
+ else if (!list_empty(&lc->lru))
+ n = lc->lru.prev;
+ else
+ return NULL;
+
+ e = list_entry(n, struct lc_element, list);
+ PARANOIA_LC_ELEMENT(lc, e);
- if (list_empty(&lc->free))
- return lc_evict(lc);
+ e->lc_new_number = new_number;
+ if (!hlist_unhashed(&e->colision))
+ __hlist_del(&e->colision);
+ hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
+ list_move(&e->list, &lc->to_be_changed);
- n = lc->free.next;
- list_del(n);
- return list_entry(n, struct lc_element, list);
+ return e;
}
static int lc_unused_element_available(struct lru_cache *lc)
@@ -308,45 +366,7 @@ static int lc_unused_element_available(struct lru_cache *lc)
return 0;
}
-
-/**
- * lc_get - get element by label, maybe change the active set
- * @lc: the lru cache to operate on
- * @enr: the label to look up
- *
- * Finds an element in the cache, increases its usage count,
- * "touches" and returns it.
- *
- * In case the requested number is not present, it needs to be added to the
- * cache. Therefore it is possible that an other element becomes evicted from
- * the cache. In either case, the user is notified so he is able to e.g. keep
- * a persistent log of the cache changes, and therefore the objects in use.
- *
- * Return values:
- * NULL
- * The cache was marked %LC_STARVING,
- * or the requested label was not in the active set
- * and a changing transaction is still pending (@lc was marked %LC_DIRTY).
- * Or no unused or free element could be recycled (@lc will be marked as
- * %LC_STARVING, blocking further lc_get() operations).
- *
- * pointer to the element with the REQUESTED element number.
- * In this case, it can be used right away
- *
- * pointer to an UNUSED element with some different element number,
- * where that different number may also be %LC_FREE.
- *
- * In this case, the cache is marked %LC_DIRTY (blocking further changes),
- * and the returned element pointer is removed from the lru list and
- * hash collision chains. The user now should do whatever housekeeping
- * is necessary.
- * Then he must call lc_changed(lc,element_pointer), to finish
- * the change.
- *
- * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
- * any cache set change.
- */
-struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
+static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool may_change)
{
struct lc_element *e;
@@ -356,8 +376,12 @@ struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
RETURN(NULL);
}
- e = lc_find(lc, enr);
- if (e) {
+ e = __lc_find(lc, enr, 1);
+ /* if lc_new_number != lc_number,
+ * this enr is currently being pulled in already,
+ * and will be available once the pending transaction
+ * has been committed. */
+ if (e && e->lc_new_number == e->lc_number) {
++lc->hits;
if (e->refcnt++ == 0)
lc->used++;
@@ -366,6 +390,26 @@ struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
}
++lc->misses;
+ if (!may_change)
+ RETURN(NULL);
+
+ /* It has been found above, but on the "to_be_changed" list, not yet
+ * committed. Don't pull it in twice, wait for the transaction, then
+ * try again */
+ if (e)
+ RETURN(NULL);
+
+ /* To avoid races with lc_try_lock(), first, mark us dirty
+ * (using test_and_set_bit, as it implies memory barriers), ... */
+ test_and_set_bit(__LC_DIRTY, &lc->flags);
+
+ /* ... only then check if it is locked anyways. If lc_unlock clears
+ * the dirty bit again, that's not a problem, we will come here again.
+ */
+ if (test_bit(__LC_LOCKED, &lc->flags)) {
+ ++lc->locked;
+ RETURN(NULL);
+ }
/* In case there is nothing available and we can not kick out
* the LRU element, we have to wait ...
@@ -375,71 +419,109 @@ struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
RETURN(NULL);
}
- /* it was not present in the active set.
- * we are going to recycle an unused (or even "free") element.
- * user may need to commit a transaction to record that change.
- * we serialize on flags & TF_DIRTY */
- if (test_and_set_bit(__LC_DIRTY, &lc->flags)) {
- ++lc->dirty;
+ /* It was not present in the active set. We are going to recycle an
+ * unused (or even "free") element, but we won't accumulate more than
+ * max_pending_changes changes. */
+ if (lc->pending_changes >= lc->max_pending_changes)
RETURN(NULL);
- }
- e = lc_get_unused_element(lc);
+ e = lc_prepare_for_change(lc, enr);
BUG_ON(!e);
clear_bit(__LC_STARVING, &lc->flags);
BUG_ON(++e->refcnt != 1);
lc->used++;
-
- lc->changing_element = e;
- lc->new_number = enr;
+ lc->pending_changes++;
RETURN(e);
}
-/* similar to lc_get,
- * but only gets a new reference on an existing element.
- * you either get the requested element, or NULL.
- * will be consolidated into one function.
+/**
+ * lc_get - get element by label, maybe change the active set
+ * @lc: the lru cache to operate on
+ * @enr: the label to look up
+ *
+ * Finds an element in the cache, increases its usage count,
+ * "touches" and returns it.
+ *
+ * In case the requested number is not present, it needs to be added to the
+ * cache. Therefore it is possible that an other element becomes evicted from
+ * the cache. In either case, the user is notified so he is able to e.g. keep
+ * a persistent log of the cache changes, and therefore the objects in use.
+ *
+ * Return values:
+ * NULL
+ * The cache was marked %LC_STARVING,
+ * or the requested label was not in the active set
+ * and a changing transaction is still pending (@lc was marked %LC_DIRTY).
+ * Or no unused or free element could be recycled (@lc will be marked as
+ * %LC_STARVING, blocking further lc_get() operations).
+ *
+ * pointer to the element with the REQUESTED element number.
+ * In this case, it can be used right away
+ *
+ * pointer to an UNUSED element with some different element number,
+ * where that different number may also be %LC_FREE.
+ *
+ * In this case, the cache is marked %LC_DIRTY,
+ * so lc_try_lock() will no longer succeed.
+ * The returned element pointer is moved to the "to_be_changed" list,
+ * and registered with the new element number on the hash collision chains,
+ * so it is possible to pick it up from lc_is_used().
+ * Up to "max_pending_changes" (see lc_create()) can be accumulated.
+ * The user now should do whatever housekeeping is necessary,
+ * typically serialize on lc_try_lock_for_transaction(), then call
+ * lc_committed(lc) and lc_unlock(), to finish the change.
+ *
+ * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
+ * any cache set change.
*/
-struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
+struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
{
- struct lc_element *e;
-
- PARANOIA_ENTRY();
- if (lc->flags & LC_STARVING) {
- ++lc->starving;
- RETURN(NULL);
- }
+ return __lc_get(lc, enr, 1);
+}
- e = lc_find(lc, enr);
- if (e) {
- ++lc->hits;
- if (e->refcnt++ == 0)
- lc->used++;
- list_move(&e->list, &lc->in_use); /* Not evictable... */
- }
- RETURN(e);
+/**
+ * lc_try_get - get element by label, if present; do not change the active set
+ * @lc: the lru cache to operate on
+ * @enr: the label to look up
+ *
+ * Finds an element in the cache, increases its usage count,
+ * "touches" and returns it.
+ *
+ * Return values:
+ * NULL
+ * The cache was marked %LC_STARVING,
+ * or the requested label was not in the active set
+ *
+ * pointer to the element with the REQUESTED element number.
+ * In this case, it can be used right away
+ */
+struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
+{
+ return __lc_get(lc, enr, 0);
}
/**
- * lc_changed - tell @lc that the change has been recorded
+ * lc_committed - tell @lc that pending changes have been recorded
* @lc: the lru cache to operate on
- * @e: the element pending label change
+ *
+ * User is expected to serialize on explicit lc_try_lock_for_transaction()
+ * before the transaction is started, and later needs to lc_unlock() explicitly
+ * as well.
*/
-void lc_changed(struct lru_cache *lc, struct lc_element *e)
+void lc_committed(struct lru_cache *lc)
{
+ struct lc_element *e, *tmp;
+
PARANOIA_ENTRY();
- BUG_ON(e != lc->changing_element);
- PARANOIA_LC_ELEMENT(lc, e);
- ++lc->changed;
- e->lc_number = lc->new_number;
- list_add(&e->list, &lc->in_use);
- hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number));
- lc->changing_element = NULL;
- lc->new_number = LC_FREE;
- clear_bit(__LC_DIRTY, &lc->flags);
- smp_mb__after_clear_bit();
+ list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) {
+ /* count number of changes, not number of transactions */
+ ++lc->changed;
+ e->lc_number = e->lc_new_number;
+ list_move(&e->list, &lc->in_use);
+ }
+ lc->pending_changes = 0;
RETURN();
}
@@ -458,13 +540,12 @@ unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
PARANOIA_ENTRY();
PARANOIA_LC_ELEMENT(lc, e);
BUG_ON(e->refcnt == 0);
- BUG_ON(e == lc->changing_element);
+ BUG_ON(e->lc_number != e->lc_new_number);
if (--e->refcnt == 0) {
/* move it to the front of LRU. */
list_move(&e->list, &lc->lru);
lc->used--;
- clear_bit(__LC_STARVING, &lc->flags);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(__LC_STARVING, &lc->flags);
}
RETURN(e->refcnt);
}
@@ -504,16 +585,24 @@ unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
void lc_set(struct lru_cache *lc, unsigned int enr, int index)
{
struct lc_element *e;
+ struct list_head *lh;
if (index < 0 || index >= lc->nr_elements)
return;
e = lc_element_by_index(lc, index);
- e->lc_number = enr;
+ BUG_ON(e->lc_number != e->lc_new_number);
+ BUG_ON(e->refcnt != 0);
+ e->lc_number = e->lc_new_number = enr;
hlist_del_init(&e->colision);
- hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
- list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
+ if (enr == LC_FREE)
+ lh = &lc->free;
+ else {
+ hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
+ lh = &lc->lru;
+ }
+ list_move(&e->list, lh);
}
/**
@@ -553,8 +642,10 @@ EXPORT_SYMBOL(lc_try_get);
EXPORT_SYMBOL(lc_find);
EXPORT_SYMBOL(lc_get);
EXPORT_SYMBOL(lc_put);
-EXPORT_SYMBOL(lc_changed);
+EXPORT_SYMBOL(lc_committed);
EXPORT_SYMBOL(lc_element_by_index);
EXPORT_SYMBOL(lc_index_of);
EXPORT_SYMBOL(lc_seq_printf_stats);
EXPORT_SYMBOL(lc_seq_dump_details);
+EXPORT_SYMBOL(lc_try_lock);
+EXPORT_SYMBOL(lc_is_used);
diff --git a/lib/pSeries-reconfig-notifier-error-inject.c b/lib/of-reconfig-notifier-error-inject.c
index 7f7c98dcd5c..8dc79861758 100644
--- a/lib/pSeries-reconfig-notifier-error-inject.c
+++ b/lib/of-reconfig-notifier-error-inject.c
@@ -1,20 +1,20 @@
#include <linux/kernel.h>
#include <linux/module.h>
-
-#include <asm/pSeries_reconfig.h>
+#include <linux/of.h>
#include "notifier-error-inject.h"
static int priority;
module_param(priority, int, 0);
-MODULE_PARM_DESC(priority, "specify pSeries reconfig notifier priority");
+MODULE_PARM_DESC(priority, "specify OF reconfig notifier priority");
static struct notifier_err_inject reconfig_err_inject = {
.actions = {
- { NOTIFIER_ERR_INJECT_ACTION(PSERIES_RECONFIG_ADD) },
- { NOTIFIER_ERR_INJECT_ACTION(PSERIES_RECONFIG_REMOVE) },
- { NOTIFIER_ERR_INJECT_ACTION(PSERIES_DRCONF_MEM_ADD) },
- { NOTIFIER_ERR_INJECT_ACTION(PSERIES_DRCONF_MEM_REMOVE) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_ATTACH_NODE) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_DETACH_NODE) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_ADD_PROPERTY) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_REMOVE_PROPERTY) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_UPDATE_PROPERTY) },
{}
}
};
@@ -25,12 +25,12 @@ static int err_inject_init(void)
{
int err;
- dir = notifier_err_inject_init("pSeries-reconfig",
+ dir = notifier_err_inject_init("OF-reconfig",
notifier_err_inject_dir, &reconfig_err_inject, priority);
if (IS_ERR(dir))
return PTR_ERR(dir);
- err = pSeries_reconfig_notifier_register(&reconfig_err_inject.nb);
+ err = of_reconfig_notifier_register(&reconfig_err_inject.nb);
if (err)
debugfs_remove_recursive(dir);
@@ -39,13 +39,13 @@ static int err_inject_init(void)
static void err_inject_exit(void)
{
- pSeries_reconfig_notifier_unregister(&reconfig_err_inject.nb);
+ of_reconfig_notifier_unregister(&reconfig_err_inject.nb);
debugfs_remove_recursive(dir);
}
module_init(err_inject_init);
module_exit(err_inject_exit);
-MODULE_DESCRIPTION("pSeries reconfig notifier error injection module");
+MODULE_DESCRIPTION("OF reconfig notifier error injection module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c
new file mode 100644
index 00000000000..652a8ee8efe
--- /dev/null
+++ b/lib/percpu-rwsem.c
@@ -0,0 +1,165 @@
+#include <linux/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/percpu.h>
+#include <linux/wait.h>
+#include <linux/lockdep.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+
+int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
+ const char *name, struct lock_class_key *rwsem_key)
+{
+ brw->fast_read_ctr = alloc_percpu(int);
+ if (unlikely(!brw->fast_read_ctr))
+ return -ENOMEM;
+
+ /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
+ __init_rwsem(&brw->rw_sem, name, rwsem_key);
+ atomic_set(&brw->write_ctr, 0);
+ atomic_set(&brw->slow_read_ctr, 0);
+ init_waitqueue_head(&brw->write_waitq);
+ return 0;
+}
+
+void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
+{
+ free_percpu(brw->fast_read_ctr);
+ brw->fast_read_ctr = NULL; /* catch use after free bugs */
+}
+
+/*
+ * This is the fast-path for down_read/up_read, it only needs to ensure
+ * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
+ * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
+ * serialize with the preempt-disabled section below.
+ *
+ * The nontrivial part is that we should guarantee acquire/release semantics
+ * in case when
+ *
+ * R_W: down_write() comes after up_read(), the writer should see all
+ * changes done by the reader
+ * or
+ * W_R: down_read() comes after up_write(), the reader should see all
+ * changes done by the writer
+ *
+ * If this helper fails the callers rely on the normal rw_semaphore and
+ * atomic_dec_and_test(), so in this case we have the necessary barriers.
+ *
+ * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
+ * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
+ * reader inside the critical section. See the comments in down_write and
+ * up_write below.
+ */
+static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
+{
+ bool success = false;
+
+ preempt_disable();
+ if (likely(!atomic_read(&brw->write_ctr))) {
+ __this_cpu_add(*brw->fast_read_ctr, val);
+ success = true;
+ }
+ preempt_enable();
+
+ return success;
+}
+
+/*
+ * Like the normal down_read() this is not recursive, the writer can
+ * come after the first percpu_down_read() and create the deadlock.
+ *
+ * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
+ * percpu_up_read() does rwsem_release(). This pairs with the usage
+ * of ->rw_sem in percpu_down/up_write().
+ */
+void percpu_down_read(struct percpu_rw_semaphore *brw)
+{
+ might_sleep();
+ if (likely(update_fast_ctr(brw, +1))) {
+ rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+ return;
+ }
+
+ down_read(&brw->rw_sem);
+ atomic_inc(&brw->slow_read_ctr);
+ /* avoid up_read()->rwsem_release() */
+ __up_read(&brw->rw_sem);
+}
+
+void percpu_up_read(struct percpu_rw_semaphore *brw)
+{
+ rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
+
+ if (likely(update_fast_ctr(brw, -1)))
+ return;
+
+ /* false-positive is possible but harmless */
+ if (atomic_dec_and_test(&brw->slow_read_ctr))
+ wake_up_all(&brw->write_waitq);
+}
+
+static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
+{
+ unsigned int sum = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ sum += per_cpu(*brw->fast_read_ctr, cpu);
+ per_cpu(*brw->fast_read_ctr, cpu) = 0;
+ }
+
+ return sum;
+}
+
+/*
+ * A writer increments ->write_ctr to force the readers to switch to the
+ * slow mode, note the atomic_read() check in update_fast_ctr().
+ *
+ * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
+ * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
+ * counter it represents the number of active readers.
+ *
+ * Finally the writer takes ->rw_sem for writing and blocks the new readers,
+ * then waits until the slow counter becomes zero.
+ */
+void percpu_down_write(struct percpu_rw_semaphore *brw)
+{
+ /* tell update_fast_ctr() there is a pending writer */
+ atomic_inc(&brw->write_ctr);
+ /*
+ * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
+ * so that update_fast_ctr() can't succeed.
+ *
+ * 2. Ensures we see the result of every previous this_cpu_add() in
+ * update_fast_ctr().
+ *
+ * 3. Ensures that if any reader has exited its critical section via
+ * fast-path, it executes a full memory barrier before we return.
+ * See R_W case in the comment above update_fast_ctr().
+ */
+ synchronize_sched_expedited();
+
+ /* exclude other writers, and block the new readers completely */
+ down_write(&brw->rw_sem);
+
+ /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
+ atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
+
+ /* wait for all readers to complete their percpu_up_read() */
+ wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
+}
+
+void percpu_up_write(struct percpu_rw_semaphore *brw)
+{
+ /* release the lock, but the readers can't use the fast-path */
+ up_write(&brw->rw_sem);
+ /*
+ * Insert the barrier before the next fast-path in down_read,
+ * see W_R case in the comment above update_fast_ctr().
+ */
+ synchronize_sched_expedited();
+ /* the last writer unblocks update_fast_ctr() */
+ atomic_dec(&brw->write_ctr);
+}
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index de06dfe165b..9f7c184725d 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -1,8 +1,11 @@
obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
-raid6_pq-y += algos.o recov.o recov_ssse3.o tables.o int1.o int2.o int4.o \
- int8.o int16.o int32.o altivec1.o altivec2.o altivec4.o \
- altivec8.o mmx.o sse1.o sse2.o
+raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
+ int8.o int16.o int32.o
+
+raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
+raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
+
hostprogs-y += mktables
quiet_cmd_unroll = UNROLL $@
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 589f5f50ad2..6d7316fe9f3 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -45,11 +45,20 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_sse1x2,
&raid6_sse2x1,
&raid6_sse2x2,
+#ifdef CONFIG_AS_AVX2
+ &raid6_avx2x1,
+ &raid6_avx2x2,
+#endif
#endif
#if defined(__x86_64__) && !defined(__arch_um__)
&raid6_sse2x1,
&raid6_sse2x2,
&raid6_sse2x4,
+#ifdef CONFIG_AS_AVX2
+ &raid6_avx2x1,
+ &raid6_avx2x2,
+ &raid6_avx2x4,
+#endif
#endif
#ifdef CONFIG_ALTIVEC
&raid6_altivec1,
@@ -72,6 +81,9 @@ EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
+#ifdef CONFIG_AS_AVX2
+ &raid6_recov_avx2,
+#endif
&raid6_recov_ssse3,
#endif
&raid6_recov_intx1,
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index b71012b756f..7cc12b532e9 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -24,13 +24,10 @@
#include <linux/raid/pq.h>
-#ifdef CONFIG_ALTIVEC
-
#include <altivec.h>
#ifdef __KERNEL__
# include <asm/cputable.h>
# include <asm/switch_to.h>
-#endif
/*
* This is the C data type to use. We use a vector of
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
new file mode 100644
index 00000000000..bc3b1dd436e
--- /dev/null
+++ b/lib/raid6/avx2.c
@@ -0,0 +1,251 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+ *
+ * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * AVX2 implementation of RAID-6 syndrome functions
+ *
+ */
+
+#ifdef CONFIG_AS_AVX2
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static const struct raid6_avx2_constants {
+ u64 x1d[4];
+} raid6_avx2_constants __aligned(32) = {
+ { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
+};
+
+static int raid6_have_avx2(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
+}
+
+/*
+ * Plain AVX2 implementation
+ */
+static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+ asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */
+
+ for (d = 0; d < bytes; d += 32) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
+ asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */
+ asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d]));
+ for (z = z0-2; z >= 0; z--) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm6,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm4,%ymm4");
+ asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d]));
+ }
+ asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm6,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm4,%ymm4");
+
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vpxor %ymm2,%ymm2,%ymm2");
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vpxor %ymm4,%ymm4,%ymm4");
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx2x1 = {
+ raid6_avx21_gen_syndrome,
+ raid6_have_avx2,
+ "avx2x1",
+ 1 /* Has cache hints */
+};
+
+/*
+ * Unrolled-by-2 AVX2 implementation
+ */
+static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+ asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
+
+ /* We uniformly assume a single prefetch covers at least 32 bytes */
+ for (d = 0; d < bytes; d += 64) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */
+ asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */
+ asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */
+ for (z = z0-1; z >= 0; z--) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
+ asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ }
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx2x2 = {
+ raid6_avx22_gen_syndrome,
+ raid6_have_avx2,
+ "avx2x2",
+ 1 /* Has cache hints */
+};
+
+#ifdef CONFIG_X86_64
+
+/*
+ * Unrolled-by-4 AVX2 implementation
+ */
+static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+ asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
+ asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */
+ asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */
+ asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */
+ asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */
+ asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */
+ asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */
+ asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */
+ asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */
+
+ for (d = 0; d < bytes; d += 128) {
+ for (z = z0; z >= 0; z--) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96]));
+ asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
+ asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13");
+ asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+ asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpand %ymm0,%ymm13,%ymm13");
+ asm volatile("vpand %ymm0,%ymm15,%ymm15");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
+ asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64]));
+ asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm13,%ymm10,%ymm10");
+ asm volatile("vpxor %ymm15,%ymm11,%ymm11");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ }
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vpxor %ymm2,%ymm2,%ymm2");
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vpxor %ymm3,%ymm3,%ymm3");
+ asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
+ asm volatile("vpxor %ymm10,%ymm10,%ymm10");
+ asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+ asm volatile("vpxor %ymm11,%ymm11,%ymm11");
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vpxor %ymm4,%ymm4,%ymm4");
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vpxor %ymm6,%ymm6,%ymm6");
+ asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
+ asm volatile("vpxor %ymm12,%ymm12,%ymm12");
+ asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+ asm volatile("vpxor %ymm14,%ymm14,%ymm14");
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx2x4 = {
+ raid6_avx24_gen_syndrome,
+ raid6_have_avx2,
+ "avx2x4",
+ 1 /* Has cache hints */
+};
+#endif
+
+#endif /* CONFIG_AS_AVX2 */
diff --git a/lib/raid6/mmx.c b/lib/raid6/mmx.c
index 279347f2309..590c71c9e20 100644
--- a/lib/raid6/mmx.c
+++ b/lib/raid6/mmx.c
@@ -16,7 +16,7 @@
* MMX implementation of RAID-6 syndrome functions
*/
-#if defined(__i386__) && !defined(__arch_um__)
+#ifdef CONFIG_X86_32
#include <linux/raid/pq.h>
#include "x86.h"
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
new file mode 100644
index 00000000000..e1eea433a49
--- /dev/null
+++ b/lib/raid6/recov_avx2.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#if CONFIG_AS_AVX2
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static int raid6_has_avx2(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX);
+}
+
+static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ Use the dead data pages as temporary storage for
+ delta p and delta q */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /* ymm0 = x0f[16] */
+ asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
+ asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
+ asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
+ asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
+ asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0]));
+ asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32]));
+ asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0]));
+ asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32]));
+
+ /*
+ * 1 = dq[0] ^ q[0]
+ * 9 = dq[32] ^ q[32]
+ * 0 = dp[0] ^ p[0]
+ * 8 = dp[32] ^ p[32]
+ */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %ymm1, %ymm3");
+ asm volatile("vpsraw $4, %ymm9, %ymm12");
+ asm volatile("vpand %ymm7, %ymm1, %ymm1");
+ asm volatile("vpand %ymm7, %ymm9, %ymm9");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpand %ymm7, %ymm12, %ymm12");
+ asm volatile("vpshufb %ymm9, %ymm4, %ymm14");
+ asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm12, %ymm5, %ymm15");
+ asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
+ asm volatile("vpxor %ymm14, %ymm15, %ymm15");
+ asm volatile("vpxor %ymm4, %ymm5, %ymm5");
+
+ /*
+ * 5 = qx[0]
+ * 15 = qx[32]
+ */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
+ asm volatile("vpsraw $4, %ymm0, %ymm2");
+ asm volatile("vpsraw $4, %ymm8, %ymm6");
+ asm volatile("vpand %ymm7, %ymm0, %ymm3");
+ asm volatile("vpand %ymm7, %ymm8, %ymm14");
+ asm volatile("vpand %ymm7, %ymm2, %ymm2");
+ asm volatile("vpand %ymm7, %ymm6, %ymm6");
+ asm volatile("vpshufb %ymm14, %ymm4, %ymm12");
+ asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm6, %ymm1, %ymm13");
+ asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm4, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm12, %ymm13, %ymm13");
+
+ /*
+ * 1 = pbmul[px[0]]
+ * 13 = pbmul[px[32]]
+ */
+ asm volatile("vpxor %ymm5, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm15, %ymm13, %ymm13");
+
+ /*
+ * 1 = db = DQ
+ * 13 = db[32] = DQ[32]
+ */
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+ asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32]));
+ asm volatile("vpxor %ymm1, %ymm0, %ymm0");
+ asm volatile("vpxor %ymm13, %ymm8, %ymm8");
+
+ asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
+ asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+#else
+ asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
+ asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
+ asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq));
+ asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp));
+
+ /* 1 = dq ^ q; 0 = dp ^ p */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
+
+ /*
+ * 1 = dq ^ q
+ * 3 = dq ^ p >> 4
+ */
+ asm volatile("vpsraw $4, %ymm1, %ymm3");
+ asm volatile("vpand %ymm7, %ymm1, %ymm1");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
+ asm volatile("vpxor %ymm4, %ymm5, %ymm5");
+
+ /* 5 = qx */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
+
+ asm volatile("vpsraw $4, %ymm0, %ymm2");
+ asm volatile("vpand %ymm7, %ymm0, %ymm3");
+ asm volatile("vpand %ymm7, %ymm2, %ymm2");
+ asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm4, %ymm1, %ymm1");
+
+ /* 1 = pbmul[px] */
+ asm volatile("vpxor %ymm5, %ymm1, %ymm1");
+ /* 1 = db = DQ */
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+
+ asm volatile("vpxor %ymm1, %ymm0, %ymm0");
+ asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
+
+ bytes -= 32;
+ p += 32;
+ q += 32;
+ dp += 32;
+ dq += 32;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ Use the dead data page as temporary storage for delta q */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
+ asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32]));
+ asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
+ asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
+
+ /*
+ * 3 = q[0] ^ dq[0]
+ * 8 = q[32] ^ dq[32]
+ */
+ asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
+ asm volatile("vmovapd %ymm0, %ymm13");
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
+ asm volatile("vmovapd %ymm1, %ymm14");
+
+ asm volatile("vpsraw $4, %ymm3, %ymm6");
+ asm volatile("vpsraw $4, %ymm8, %ymm12");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpand %ymm7, %ymm8, %ymm8");
+ asm volatile("vpand %ymm7, %ymm6, %ymm6");
+ asm volatile("vpand %ymm7, %ymm12, %ymm12");
+ asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
+ asm volatile("vpshufb %ymm8, %ymm13, %ymm13");
+ asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
+ asm volatile("vpshufb %ymm12, %ymm14, %ymm14");
+ asm volatile("vpxor %ymm0, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm13, %ymm14, %ymm14");
+
+ /*
+ * 1 = qmul[q[0] ^ dq[0]]
+ * 14 = qmul[q[32] ^ dq[32]]
+ */
+ asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
+ asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
+ asm volatile("vpxor %ymm1, %ymm2, %ymm2");
+ asm volatile("vpxor %ymm14, %ymm12, %ymm12");
+
+ /*
+ * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
+ * 12 = p[32] ^ qmul[q[32] ^ dq[32]]
+ */
+
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+ asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32]));
+ asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
+ asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+#else
+ asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
+ asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
+
+ /* 3 = q ^ dq */
+
+ asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %ymm3, %ymm6");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpand %ymm7, %ymm6, %ymm6");
+ asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
+ asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm0, %ymm1, %ymm1");
+
+ /* 1 = qmul[q ^ dq] */
+
+ asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
+ asm volatile("vpxor %ymm1, %ymm2, %ymm2");
+
+ /* 2 = p ^ qmul[q ^ dq] */
+
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+ asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
+
+ bytes -= 32;
+ p += 32;
+ q += 32;
+ dq += 32;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_avx2 = {
+ .data2 = raid6_2data_recov_avx2,
+ .datap = raid6_datap_recov_avx2,
+ .valid = raid6_has_avx2,
+#ifdef CONFIG_X86_64
+ .name = "avx2x2",
+#else
+ .name = "avx2x1",
+#endif
+ .priority = 2,
+};
+
+#else
+#warning "your version of binutils lacks AVX2 support"
+#endif
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index ecb710c0b4d..a9168328f03 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -7,8 +7,6 @@
* of the License.
*/
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -332,5 +330,3 @@ const struct raid6_recov_calls raid6_recov_ssse3 = {
#endif
.priority = 1,
};
-
-#endif
diff --git a/lib/raid6/sse1.c b/lib/raid6/sse1.c
index 10dd91948c0..f7629713944 100644
--- a/lib/raid6/sse1.c
+++ b/lib/raid6/sse1.c
@@ -21,7 +21,7 @@
* worthwhile as a separate implementation.
*/
-#if defined(__i386__) && !defined(__arch_um__)
+#ifdef CONFIG_X86_32
#include <linux/raid/pq.h>
#include "x86.h"
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index bc2d57daa58..85b82c85f28 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -17,8 +17,6 @@
*
*/
-#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
-
#include <linux/raid/pq.h>
#include "x86.h"
@@ -159,9 +157,7 @@ const struct raid6_calls raid6_sse2x2 = {
1 /* Has cache hints */
};
-#endif
-
-#if defined(__x86_64__) && !defined(__arch_um__)
+#ifdef CONFIG_X86_64
/*
* Unrolled-by-4 SSE2 implementation
@@ -259,4 +255,4 @@ const struct raid6_calls raid6_sse2x4 = {
1 /* Has cache hints */
};
-#endif
+#endif /* CONFIG_X86_64 */
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index c76151d9476..087332dbf8a 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -10,6 +10,31 @@ LD = ld
AWK = awk -f
AR = ar
RANLIB = ranlib
+OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
+
+ARCH := $(shell uname -m 2>/dev/null | sed -e /s/i.86/i386/)
+ifeq ($(ARCH),i386)
+ CFLAGS += -DCONFIG_X86_32
+ IS_X86 = yes
+endif
+ifeq ($(ARCH),x86_64)
+ CFLAGS += -DCONFIG_X86_64
+ IS_X86 = yes
+endif
+
+ifeq ($(IS_X86),yes)
+ OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o
+ CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
+ gcc -c -x assembler - >&/dev/null && \
+ rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+else
+ HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\
+ gcc -c -x c - >&/dev/null && \
+ rm ./-.o && echo yes)
+ ifeq ($(HAS_ALTIVEC),yes)
+ OBJS += altivec1.o altivec2.o altivec4.o altivec8.o
+ endif
+endif
.c.o:
$(CC) $(CFLAGS) -c -o $@ $<
@@ -22,9 +47,7 @@ RANLIB = ranlib
all: raid6.a raid6test
-raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o \
- altivec1.o altivec2.o altivec4.o altivec8.o recov.o recov_ssse3.o algos.o \
- tables.o
+raid6.a: $(OBJS)
rm -f $@
$(AR) cq $@ $^
$(RANLIB) $@
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h
index d55d63232c5..b7595484a81 100644
--- a/lib/raid6/x86.h
+++ b/lib/raid6/x86.h
@@ -45,19 +45,23 @@ static inline void kernel_fpu_end(void)
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
/* Should work well enough on modern CPUs for testing */
static inline int boot_cpu_has(int flag)
{
- u32 eax = (flag & 0x20) ? 0x80000001 : 1;
- u32 ecx, edx;
+ u32 eax, ebx, ecx, edx;
+
+ eax = (flag & 0x100) ? 7 :
+ (flag & 0x20) ? 0x80000001 : 1;
+ ecx = 0;
asm volatile("cpuid"
- : "+a" (eax), "=d" (edx), "=c" (ecx)
- : : "ebx");
+ : "+a" (eax), "=b" (ebx), "=d" (edx), "+c" (ecx));
- return ((flag & 0x80 ? ecx : edx) >> (flag & 31)) & 1;
+ return ((flag & 0x100 ? ebx :
+ (flag & 0x80) ? ecx : edx) >> (flag & 31)) & 1;
}
#endif /* ndef __KERNEL__ */
diff --git a/lib/random32.c b/lib/random32.c
index 938bde5876a..52280d5526b 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -42,13 +42,13 @@
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
/**
- * prandom32 - seeded pseudo-random number generator.
+ * prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
*
* This is used for pseudo-randomness with no outside seeding.
- * For more random results, use random32().
+ * For more random results, use prandom_u32().
*/
-u32 prandom32(struct rnd_state *state)
+u32 prandom_u32_state(struct rnd_state *state)
{
#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
@@ -58,32 +58,81 @@ u32 prandom32(struct rnd_state *state)
return (state->s1 ^ state->s2 ^ state->s3);
}
-EXPORT_SYMBOL(prandom32);
+EXPORT_SYMBOL(prandom_u32_state);
/**
- * random32 - pseudo random number generator
+ * prandom_u32 - pseudo random number generator
*
* A 32 bit pseudo-random number is generated using a fast
* algorithm suitable for simulation. This algorithm is NOT
* considered safe for cryptographic use.
*/
-u32 random32(void)
+u32 prandom_u32(void)
{
unsigned long r;
struct rnd_state *state = &get_cpu_var(net_rand_state);
- r = prandom32(state);
+ r = prandom_u32_state(state);
put_cpu_var(state);
return r;
}
-EXPORT_SYMBOL(random32);
+EXPORT_SYMBOL(prandom_u32);
+
+/*
+ * prandom_bytes_state - get the requested number of pseudo-random bytes
+ *
+ * @state: pointer to state structure holding seeded state.
+ * @buf: where to copy the pseudo-random bytes to
+ * @bytes: the requested number of bytes
+ *
+ * This is used for pseudo-randomness with no outside seeding.
+ * For more random results, use prandom_bytes().
+ */
+void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes)
+{
+ unsigned char *p = buf;
+ int i;
+
+ for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
+ u32 random = prandom_u32_state(state);
+ int j;
+
+ for (j = 0; j < sizeof(u32); j++) {
+ p[i + j] = random;
+ random >>= BITS_PER_BYTE;
+ }
+ }
+ if (i < bytes) {
+ u32 random = prandom_u32_state(state);
+
+ for (; i < bytes; i++) {
+ p[i] = random;
+ random >>= BITS_PER_BYTE;
+ }
+ }
+}
+EXPORT_SYMBOL(prandom_bytes_state);
+
+/**
+ * prandom_bytes - get the requested number of pseudo-random bytes
+ * @buf: where to copy the pseudo-random bytes to
+ * @bytes: the requested number of bytes
+ */
+void prandom_bytes(void *buf, int bytes)
+{
+ struct rnd_state *state = &get_cpu_var(net_rand_state);
+
+ prandom_bytes_state(state, buf, bytes);
+ put_cpu_var(state);
+}
+EXPORT_SYMBOL(prandom_bytes);
/**
- * srandom32 - add entropy to pseudo random number generator
+ * prandom_seed - add entropy to pseudo random number generator
* @seed: seed value
*
- * Add some additional seeding to the random32() pool.
+ * Add some additional seeding to the prandom pool.
*/
-void srandom32(u32 entropy)
+void prandom_seed(u32 entropy)
{
int i;
/*
@@ -95,13 +144,13 @@ void srandom32(u32 entropy)
state->s1 = __seed(state->s1 ^ entropy, 1);
}
}
-EXPORT_SYMBOL(srandom32);
+EXPORT_SYMBOL(prandom_seed);
/*
* Generate some initially weak seeding values to allow
- * to start the random32() engine.
+ * to start the prandom_u32() engine.
*/
-static int __init random32_init(void)
+static int __init prandom_init(void)
{
int i;
@@ -114,22 +163,22 @@ static int __init random32_init(void)
state->s3 = __seed(LCG(state->s2), 15);
/* "warm it up" */
- prandom32(state);
- prandom32(state);
- prandom32(state);
- prandom32(state);
- prandom32(state);
- prandom32(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
}
return 0;
}
-core_initcall(random32_init);
+core_initcall(prandom_init);
/*
* Generate better values after random number generator
* is fully initialized.
*/
-static int __init random32_reseed(void)
+static int __init prandom_reseed(void)
{
int i;
@@ -143,8 +192,8 @@ static int __init random32_reseed(void)
state->s3 = __seed(seeds[2], 15);
/* mix it in */
- prandom32(state);
+ prandom_u32_state(state);
}
return 0;
}
-late_initcall(random32_reseed);
+late_initcall(prandom_reseed);
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 268b23951fe..af38aedbd87 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -96,8 +96,8 @@ static void init(void)
{
int i;
for (i = 0; i < NODES; i++) {
- nodes[i].key = prandom32(&rnd);
- nodes[i].val = prandom32(&rnd);
+ nodes[i].key = prandom_u32_state(&rnd);
+ nodes[i].val = prandom_u32_state(&rnd);
}
}
@@ -118,7 +118,7 @@ static void check(int nr_nodes)
{
struct rb_node *rb;
int count = 0;
- int blacks;
+ int blacks = 0;
u32 prev_key = 0;
for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
@@ -155,7 +155,7 @@ static int rbtree_test_init(void)
printk(KERN_ALERT "rbtree testing");
- prandom32_seed(&rnd, 3141592653589793238ULL);
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 3675452b23c..7874b01e816 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -248,7 +248,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int left;
#ifndef ARCH_HAS_SG_CHAIN
- BUG_ON(nents > max_ents);
+ if (WARN_ON_ONCE(nents > max_ents))
+ return -EINVAL;
#endif
memset(table, 0, sizeof(*table));
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index f114bf6a8e1..196b06984de 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -57,7 +57,7 @@ int swiotlb_force;
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
-static char *io_tlb_start, *io_tlb_end;
+static phys_addr_t io_tlb_start, io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) between io_tlb_start and
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
*/
static unsigned long io_tlb_overflow = 32*1024;
-static void *io_tlb_overflow_buffer;
+static phys_addr_t io_tlb_overflow_buffer;
/*
* This is a free list describing the number of free entries available from
@@ -125,27 +125,38 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
- phys_addr_t pstart, pend;
+ unsigned char *vstart, *vend;
- pstart = virt_to_phys(io_tlb_start);
- pend = virt_to_phys(io_tlb_end);
+ vstart = phys_to_virt(io_tlb_start);
+ vend = phys_to_virt(io_tlb_end);
printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
- (unsigned long long)pstart, (unsigned long long)pend - 1,
- bytes >> 20, io_tlb_start, io_tlb_end - 1);
+ (unsigned long long)io_tlb_start,
+ (unsigned long long)io_tlb_end,
+ bytes >> 20, vstart, vend - 1);
}
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
+ void *v_overflow_buffer;
unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
- io_tlb_start = tlb;
+ io_tlb_start = __pa(tlb);
io_tlb_end = io_tlb_start + bytes;
/*
+ * Get the overflow emergency buffer
+ */
+ v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
+ if (!v_overflow_buffer)
+ panic("Cannot allocate SWIOTLB overflow buffer!\n");
+
+ io_tlb_overflow_buffer = __pa(v_overflow_buffer);
+
+ /*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
@@ -156,12 +167,6 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_index = 0;
io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
- /*
- * Get the overflow emergency buffer
- */
- io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
- if (!io_tlb_overflow_buffer)
- panic("Cannot allocate SWIOTLB overflow buffer!\n");
if (verbose)
swiotlb_print_info();
}
@@ -173,6 +178,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
static void __init
swiotlb_init_with_default_size(size_t default_size, int verbose)
{
+ unsigned char *vstart;
unsigned long bytes;
if (!io_tlb_nslabs) {
@@ -185,11 +191,11 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
- if (!io_tlb_start)
+ vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
+ if (!vstart)
panic("Cannot allocate SWIOTLB buffer");
- swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
+ swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose);
}
void __init
@@ -207,6 +213,7 @@ int
swiotlb_late_init_with_default_size(size_t default_size)
{
unsigned long bytes, req_nslabs = io_tlb_nslabs;
+ unsigned char *vstart = NULL;
unsigned int order;
int rc = 0;
@@ -223,14 +230,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
- order);
- if (io_tlb_start)
+ vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+ order);
+ if (vstart)
break;
order--;
}
- if (!io_tlb_start) {
+ if (!vstart) {
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
@@ -239,9 +246,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
}
- rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs);
+ rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
if (rc)
- free_pages((unsigned long)io_tlb_start, order);
+ free_pages((unsigned long)vstart, order);
return rc;
}
@@ -249,14 +256,25 @@ int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
unsigned long i, bytes;
+ unsigned char *v_overflow_buffer;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
- io_tlb_start = tlb;
+ io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes;
- memset(io_tlb_start, 0, bytes);
+ memset(tlb, 0, bytes);
+
+ /*
+ * Get the overflow emergency buffer
+ */
+ v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+ get_order(io_tlb_overflow));
+ if (!v_overflow_buffer)
+ goto cleanup2;
+
+ io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
/*
* Allocate and initialize the free list array. This array is used
@@ -266,7 +284,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
- goto cleanup2;
+ goto cleanup3;
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
@@ -277,18 +295,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
get_order(io_tlb_nslabs *
sizeof(phys_addr_t)));
if (!io_tlb_orig_addr)
- goto cleanup3;
+ goto cleanup4;
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
- /*
- * Get the overflow emergency buffer
- */
- io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
- get_order(io_tlb_overflow));
- if (!io_tlb_overflow_buffer)
- goto cleanup4;
-
swiotlb_print_info();
late_alloc = 1;
@@ -296,42 +306,42 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
return 0;
cleanup4:
- free_pages((unsigned long)io_tlb_orig_addr,
- get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
- io_tlb_orig_addr = NULL;
-cleanup3:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
io_tlb_list = NULL;
+cleanup3:
+ free_pages((unsigned long)v_overflow_buffer,
+ get_order(io_tlb_overflow));
+ io_tlb_overflow_buffer = 0;
cleanup2:
- io_tlb_end = NULL;
- io_tlb_start = NULL;
+ io_tlb_end = 0;
+ io_tlb_start = 0;
io_tlb_nslabs = 0;
return -ENOMEM;
}
void __init swiotlb_free(void)
{
- if (!io_tlb_overflow_buffer)
+ if (!io_tlb_orig_addr)
return;
if (late_alloc) {
- free_pages((unsigned long)io_tlb_overflow_buffer,
+ free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
get_order(io_tlb_overflow));
free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
- free_pages((unsigned long)io_tlb_start,
+ free_pages((unsigned long)phys_to_virt(io_tlb_start),
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
- free_bootmem_late(__pa(io_tlb_overflow_buffer),
+ free_bootmem_late(io_tlb_overflow_buffer,
PAGE_ALIGN(io_tlb_overflow));
free_bootmem_late(__pa(io_tlb_orig_addr),
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
free_bootmem_late(__pa(io_tlb_list),
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
- free_bootmem_late(__pa(io_tlb_start),
+ free_bootmem_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
@@ -339,21 +349,21 @@ void __init swiotlb_free(void)
static int is_swiotlb_buffer(phys_addr_t paddr)
{
- return paddr >= virt_to_phys(io_tlb_start) &&
- paddr < virt_to_phys(io_tlb_end);
+ return paddr >= io_tlb_start && paddr < io_tlb_end;
}
/*
* Bounce: copy the swiotlb buffer back to the original dma location
*/
-void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
- enum dma_data_direction dir)
+static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir)
{
- unsigned long pfn = PFN_DOWN(phys);
+ unsigned long pfn = PFN_DOWN(orig_addr);
+ unsigned char *vaddr = phys_to_virt(tlb_addr);
if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */
- unsigned int offset = phys & ~PAGE_MASK;
+ unsigned int offset = orig_addr & ~PAGE_MASK;
char *buffer;
unsigned int sz = 0;
unsigned long flags;
@@ -364,32 +374,31 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn));
if (dir == DMA_TO_DEVICE)
- memcpy(dma_addr, buffer + offset, sz);
+ memcpy(vaddr, buffer + offset, sz);
else
- memcpy(buffer + offset, dma_addr, sz);
+ memcpy(buffer + offset, vaddr, sz);
kunmap_atomic(buffer);
local_irq_restore(flags);
size -= sz;
pfn++;
- dma_addr += sz;
+ vaddr += sz;
offset = 0;
}
+ } else if (dir == DMA_TO_DEVICE) {
+ memcpy(vaddr, phys_to_virt(orig_addr), size);
} else {
- if (dir == DMA_TO_DEVICE)
- memcpy(dma_addr, phys_to_virt(phys), size);
- else
- memcpy(phys_to_virt(phys), dma_addr, size);
+ memcpy(phys_to_virt(orig_addr), vaddr, size);
}
}
-EXPORT_SYMBOL_GPL(swiotlb_bounce);
-void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
- phys_addr_t phys, size_t size,
- enum dma_data_direction dir)
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+ dma_addr_t tbl_dma_addr,
+ phys_addr_t orig_addr, size_t size,
+ enum dma_data_direction dir)
{
unsigned long flags;
- char *dma_addr;
+ phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
@@ -453,7 +462,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
- dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+ tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in the next
@@ -471,7 +480,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
- return NULL;
+ return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
@@ -481,11 +490,11 @@ found:
* needed.
*/
for (i = 0; i < nslots; i++)
- io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
+ io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
+ swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
- return dma_addr;
+ return tlb_addr;
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
@@ -493,11 +502,10 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
* Allocates bounce buffer and returns its kernel virtual address.
*/
-static void *
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
- enum dma_data_direction dir)
+phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
{
- dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
+ dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
}
@@ -505,20 +513,19 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
-void
-swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
- enum dma_data_direction dir)
+void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t phys = io_tlb_orig_addr[index];
+ int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
- if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
+ if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
@@ -547,26 +554,27 @@ swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
-void
-swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
- enum dma_data_direction dir,
- enum dma_sync_target target)
+void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir,
+ enum dma_sync_target target)
{
- int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t phys = io_tlb_orig_addr[index];
+ int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = io_tlb_orig_addr[index];
- phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
+ orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
+ swiotlb_bounce(orig_addr, tlb_addr,
+ size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
+ swiotlb_bounce(orig_addr, tlb_addr,
+ size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
@@ -589,12 +597,15 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
- /*
- * The allocated memory isn't reachable by the device.
- */
- free_pages((unsigned long) ret, order);
- ret = NULL;
+ if (ret) {
+ dev_addr = swiotlb_virt_to_bus(hwdev, ret);
+ if (dev_addr + size - 1 > dma_mask) {
+ /*
+ * The allocated memory isn't reachable by the device.
+ */
+ free_pages((unsigned long) ret, order);
+ ret = NULL;
+ }
}
if (!ret) {
/*
@@ -602,25 +613,29 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* GFP_DMA memory; fall back on map_single(), which
* will grab memory from the lowest available address range.
*/
- ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
- if (!ret)
+ phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
+ if (paddr == SWIOTLB_MAP_ERROR)
return NULL;
- }
- memset(ret, 0, size);
- dev_addr = swiotlb_virt_to_bus(hwdev, ret);
+ ret = phys_to_virt(paddr);
+ dev_addr = phys_to_dma(hwdev, paddr);
- /* Confirm address can be DMA'd by device */
- if (dev_addr + size - 1 > dma_mask) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)dma_mask,
- (unsigned long long)dev_addr);
+ /* Confirm address can be DMA'd by device */
+ if (dev_addr + size - 1 > dma_mask) {
+ printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+ (unsigned long long)dma_mask,
+ (unsigned long long)dev_addr);
- /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
- return NULL;
+ /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+ swiotlb_tbl_unmap_single(hwdev, paddr,
+ size, DMA_TO_DEVICE);
+ return NULL;
+ }
}
+
*dma_handle = dev_addr;
+ memset(ret, 0, size);
+
return ret;
}
EXPORT_SYMBOL(swiotlb_alloc_coherent);
@@ -636,7 +651,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
- swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
@@ -677,9 +692,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- phys_addr_t phys = page_to_phys(page) + offset;
+ phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys);
- void *map;
BUG_ON(dir == DMA_NONE);
/*
@@ -690,23 +704,19 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
return dev_addr;
- /*
- * Oh well, have to allocate and map a bounce buffer.
- */
+ /* Oh well, have to allocate and map a bounce buffer. */
map = map_single(dev, phys, size, dir);
- if (!map) {
+ if (map == SWIOTLB_MAP_ERROR) {
swiotlb_full(dev, size, dir, 1);
- map = io_tlb_overflow_buffer;
+ return phys_to_dma(dev, io_tlb_overflow_buffer);
}
- dev_addr = swiotlb_virt_to_bus(dev, map);
+ dev_addr = phys_to_dma(dev, map);
- /*
- * Ensure that the address returned is DMA'ble
- */
+ /* Ensure that the address returned is DMA'ble */
if (!dma_capable(dev, dev_addr, size)) {
swiotlb_tbl_unmap_single(dev, map, size, dir);
- dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+ return phys_to_dma(dev, io_tlb_overflow_buffer);
}
return dev_addr;
@@ -729,7 +739,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
return;
}
@@ -773,8 +783,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
- swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
- target);
+ swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
return;
}
@@ -831,9 +840,9 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
if (swiotlb_force ||
!dma_capable(hwdev, dev_addr, sg->length)) {
- void *map = map_single(hwdev, sg_phys(sg),
- sg->length, dir);
- if (!map) {
+ phys_addr_t map = map_single(hwdev, sg_phys(sg),
+ sg->length, dir);
+ if (map == SWIOTLB_MAP_ERROR) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0);
@@ -842,7 +851,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
sgl[0].dma_length = 0;
return 0;
}
- sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
+ sg->dma_address = phys_to_dma(hwdev, map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
@@ -925,7 +934,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
- return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
+ return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
@@ -938,6 +947,6 @@ EXPORT_SYMBOL(swiotlb_dma_mapping_error);
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
+ return phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 39c99fea7c0..fab33a9c531 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -23,12 +23,12 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
+#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/ioport.h>
#include <net/addrconf.h>
#include <asm/page.h> /* for PAGE_SIZE */
-#include <asm/div64.h>
#include <asm/sections.h> /* for dereference_function_descriptor() */
#include "kstrtox.h"
@@ -38,6 +38,8 @@
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
+ *
+ * This function is obsolete. Please use kstrtoull instead.
*/
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
@@ -61,6 +63,8 @@ EXPORT_SYMBOL(simple_strtoull);
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
+ *
+ * This function is obsolete. Please use kstrtoul instead.
*/
unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
@@ -73,6 +77,8 @@ EXPORT_SYMBOL(simple_strtoul);
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
+ *
+ * This function is obsolete. Please use kstrtol instead.
*/
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
@@ -88,6 +94,8 @@ EXPORT_SYMBOL(simple_strtol);
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
+ *
+ * This function is obsolete. Please use kstrtoll instead.
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
@@ -1485,7 +1493,10 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
num = va_arg(args, long);
break;
case FORMAT_TYPE_SIZE_T:
- num = va_arg(args, size_t);
+ if (spec.flags & SIGN)
+ num = va_arg(args, ssize_t);
+ else
+ num = va_arg(args, size_t);
break;
case FORMAT_TYPE_PTRDIFF:
num = va_arg(args, ptrdiff_t);
@@ -2013,7 +2024,11 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
char digit;
int num = 0;
u8 qualifier;
- u8 base;
+ unsigned int base;
+ union {
+ long long s;
+ unsigned long long u;
+ } val;
s16 field_width;
bool is_sign;
@@ -2053,8 +2068,11 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
/* get field width */
field_width = -1;
- if (isdigit(*fmt))
+ if (isdigit(*fmt)) {
field_width = skip_atoi(&fmt);
+ if (field_width <= 0)
+ break;
+ }
/* get conversion qualifier */
qualifier = -1;
@@ -2154,58 +2172,61 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
|| (base == 0 && !isdigit(digit)))
break;
+ if (is_sign)
+ val.s = qualifier != 'L' ?
+ simple_strtol(str, &next, base) :
+ simple_strtoll(str, &next, base);
+ else
+ val.u = qualifier != 'L' ?
+ simple_strtoul(str, &next, base) :
+ simple_strtoull(str, &next, base);
+
+ if (field_width > 0 && next - str > field_width) {
+ if (base == 0)
+ _parse_integer_fixup_radix(str, &base);
+ while (next - str > field_width) {
+ if (is_sign)
+ val.s = div_s64(val.s, base);
+ else
+ val.u = div_u64(val.u, base);
+ --next;
+ }
+ }
+
switch (qualifier) {
case 'H': /* that's 'hh' in format */
- if (is_sign) {
- signed char *s = (signed char *)va_arg(args, signed char *);
- *s = (signed char)simple_strtol(str, &next, base);
- } else {
- unsigned char *s = (unsigned char *)va_arg(args, unsigned char *);
- *s = (unsigned char)simple_strtoul(str, &next, base);
- }
+ if (is_sign)
+ *va_arg(args, signed char *) = val.s;
+ else
+ *va_arg(args, unsigned char *) = val.u;
break;
case 'h':
- if (is_sign) {
- short *s = (short *)va_arg(args, short *);
- *s = (short)simple_strtol(str, &next, base);
- } else {
- unsigned short *s = (unsigned short *)va_arg(args, unsigned short *);
- *s = (unsigned short)simple_strtoul(str, &next, base);
- }
+ if (is_sign)
+ *va_arg(args, short *) = val.s;
+ else
+ *va_arg(args, unsigned short *) = val.u;
break;
case 'l':
- if (is_sign) {
- long *l = (long *)va_arg(args, long *);
- *l = simple_strtol(str, &next, base);
- } else {
- unsigned long *l = (unsigned long *)va_arg(args, unsigned long *);
- *l = simple_strtoul(str, &next, base);
- }
+ if (is_sign)
+ *va_arg(args, long *) = val.s;
+ else
+ *va_arg(args, unsigned long *) = val.u;
break;
case 'L':
- if (is_sign) {
- long long *l = (long long *)va_arg(args, long long *);
- *l = simple_strtoll(str, &next, base);
- } else {
- unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *);
- *l = simple_strtoull(str, &next, base);
- }
+ if (is_sign)
+ *va_arg(args, long long *) = val.s;
+ else
+ *va_arg(args, unsigned long long *) = val.u;
break;
case 'Z':
case 'z':
- {
- size_t *s = (size_t *)va_arg(args, size_t *);
- *s = (size_t)simple_strtoul(str, &next, base);
- }
- break;
+ *va_arg(args, size_t *) = val.u;
+ break;
default:
- if (is_sign) {
- int *i = (int *)va_arg(args, int *);
- *i = (int)simple_strtol(str, &next, base);
- } else {
- unsigned int *i = (unsigned int *)va_arg(args, unsigned int*);
- *i = (unsigned int)simple_strtoul(str, &next, base);
- }
+ if (is_sign)
+ *va_arg(args, int *) = val.s;
+ else
+ *va_arg(args, unsigned int *) = val.u;
break;
}
num++;
diff --git a/mm/Kconfig b/mm/Kconfig
index e6651c5de14..278e3ab1f16 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -143,6 +143,25 @@ config NO_BOOTMEM
config MEMORY_ISOLATION
boolean
+config MOVABLE_NODE
+ boolean "Enable to assign a node which has only movable memory"
+ depends on HAVE_MEMBLOCK
+ depends on NO_BOOTMEM
+ depends on X86_64
+ depends on NUMA
+ default n
+ help
+ Allow a node to have only movable memory. Pages used by the kernel,
+ such as direct mapping pages cannot be migrated. So the corresponding
+ memory device cannot be hotplugged. This option allows users to
+ online all the memory of a node as movable memory so that the whole
+ node can be hotplugged. Users who don't use the memory hotplug
+ feature are fine with this option on since they don't online memory
+ as movable.
+
+ Say Y here if you want to hotplug a whole node.
+ Say N here if you want kernel to use memory on all nodes evenly.
+
# eventually, we can have this option just 'select SPARSEMEM'
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
diff --git a/mm/bootmem.c b/mm/bootmem.c
index ecc45958ac0..1324cd74fae 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -229,6 +229,22 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
return count;
}
+static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+{
+ struct zone *z;
+
+ /*
+ * In free_area_init_core(), highmem zone's managed_pages is set to
+ * present_pages, and bootmem allocator doesn't allocate from highmem
+ * zones. So there's no need to recalculate managed_pages because all
+ * highmem pages will be managed by the buddy system. Here highmem
+ * zone also includes highmem movable zone.
+ */
+ for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+ if (!is_highmem(z))
+ z->managed_pages = 0;
+}
+
/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
* @pgdat: node to be released
@@ -238,6 +254,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
+ reset_node_lowmem_managed_pages(pgdat);
return free_all_bootmem_core(pgdat->bdata);
}
@@ -250,6 +267,10 @@ unsigned long __init free_all_bootmem(void)
{
unsigned long total_pages = 0;
bootmem_data_t *bdata;
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_lowmem_managed_pages(pgdat);
list_for_each_entry(bdata, &bdata_list, list)
total_pages += free_all_bootmem_core(bdata);
@@ -439,12 +460,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
return mark_bootmem(start, end, 1, flags);
}
-int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
- int flags)
-{
- return reserve_bootmem(phys, len, flags);
-}
-
static unsigned long __init align_idx(struct bootmem_data *bdata,
unsigned long idx, unsigned long step)
{
@@ -575,27 +590,6 @@ find_block:
return NULL;
}
-static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
- unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
-{
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc(size, GFP_NOWAIT);
-
-#ifdef CONFIG_HAVE_ARCH_BOOTMEM
- {
- bootmem_data_t *p_bdata;
-
- p_bdata = bootmem_arch_preferred_node(bdata, size, align,
- goal, limit);
- if (p_bdata)
- return alloc_bootmem_bdata(p_bdata, size, align,
- goal, limit);
- }
-#endif
- return NULL;
-}
-
static void * __init alloc_bootmem_core(unsigned long size,
unsigned long align,
unsigned long goal,
@@ -604,9 +598,8 @@ static void * __init alloc_bootmem_core(unsigned long size,
bootmem_data_t *bdata;
void *region;
- region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
- if (region)
- return region;
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
list_for_each_entry(bdata, &bdata_list, list) {
if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
@@ -704,11 +697,9 @@ void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
{
void *ptr;
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc(size, GFP_NOWAIT);
again:
- ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
- align, goal, limit);
- if (ptr)
- return ptr;
/* do not panic in alloc_bootmem_bdata() */
if (limit && goal + size > limit)
diff --git a/mm/compaction.c b/mm/compaction.c
index d24dd2d7bad..6b807e46649 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -17,6 +17,21 @@
#include <linux/balloon_compaction.h>
#include "internal.h"
+#ifdef CONFIG_COMPACTION
+static inline void count_compact_event(enum vm_event_item item)
+{
+ count_vm_event(item);
+}
+
+static inline void count_compact_events(enum vm_event_item item, long delta)
+{
+ count_vm_events(item, delta);
+}
+#else
+#define count_compact_event(item) do { } while (0)
+#define count_compact_events(item, delta) do { } while (0)
+#endif
+
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#define CREATE_TRACE_POINTS
@@ -215,60 +230,6 @@ static bool suitable_migration_target(struct page *page)
return false;
}
-static void compact_capture_page(struct compact_control *cc)
-{
- unsigned long flags;
- int mtype, mtype_low, mtype_high;
-
- if (!cc->page || *cc->page)
- return;
-
- /*
- * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
- * regardless of the migratetype of the freelist is is captured from.
- * This is fine because the order for a high-order MIGRATE_MOVABLE
- * allocation is typically at least a pageblock size and overall
- * fragmentation is not impaired. Other allocation types must
- * capture pages from their own migratelist because otherwise they
- * could pollute other pageblocks like MIGRATE_MOVABLE with
- * difficult to move pages and making fragmentation worse overall.
- */
- if (cc->migratetype == MIGRATE_MOVABLE) {
- mtype_low = 0;
- mtype_high = MIGRATE_PCPTYPES;
- } else {
- mtype_low = cc->migratetype;
- mtype_high = cc->migratetype + 1;
- }
-
- /* Speculatively examine the free lists without zone lock */
- for (mtype = mtype_low; mtype < mtype_high; mtype++) {
- int order;
- for (order = cc->order; order < MAX_ORDER; order++) {
- struct page *page;
- struct free_area *area;
- area = &(cc->zone->free_area[order]);
- if (list_empty(&area->free_list[mtype]))
- continue;
-
- /* Take the lock and attempt capture of the page */
- if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
- return;
- if (!list_empty(&area->free_list[mtype])) {
- page = list_entry(area->free_list[mtype].next,
- struct page, lru);
- if (capture_free_page(page, cc->order, mtype)) {
- spin_unlock_irqrestore(&cc->zone->lock,
- flags);
- *cc->page = page;
- return;
- }
- }
- spin_unlock_irqrestore(&cc->zone->lock, flags);
- }
- }
-}
-
/*
* Isolate free pages onto a private freelist. Caller must hold zone->lock.
* If @strict is true, will abort returning 0 on any invalid PFNs or non-free
@@ -357,6 +318,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false);
+ count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
+ if (total_isolated)
+ count_compact_events(COMPACTISOLATED, total_isolated);
return total_isolated;
}
@@ -663,6 +627,10 @@ next_pageblock:
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+ count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
+ if (nr_isolated)
+ count_compact_events(COMPACTISOLATED, nr_isolated);
+
return low_pfn;
}
@@ -953,6 +921,60 @@ unsigned long compaction_suitable(struct zone *zone, int order)
return COMPACT_CONTINUE;
}
+static void compact_capture_page(struct compact_control *cc)
+{
+ unsigned long flags;
+ int mtype, mtype_low, mtype_high;
+
+ if (!cc->page || *cc->page)
+ return;
+
+ /*
+ * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
+ * regardless of the migratetype of the freelist is is captured from.
+ * This is fine because the order for a high-order MIGRATE_MOVABLE
+ * allocation is typically at least a pageblock size and overall
+ * fragmentation is not impaired. Other allocation types must
+ * capture pages from their own migratelist because otherwise they
+ * could pollute other pageblocks like MIGRATE_MOVABLE with
+ * difficult to move pages and making fragmentation worse overall.
+ */
+ if (cc->migratetype == MIGRATE_MOVABLE) {
+ mtype_low = 0;
+ mtype_high = MIGRATE_PCPTYPES;
+ } else {
+ mtype_low = cc->migratetype;
+ mtype_high = cc->migratetype + 1;
+ }
+
+ /* Speculatively examine the free lists without zone lock */
+ for (mtype = mtype_low; mtype < mtype_high; mtype++) {
+ int order;
+ for (order = cc->order; order < MAX_ORDER; order++) {
+ struct page *page;
+ struct free_area *area;
+ area = &(cc->zone->free_area[order]);
+ if (list_empty(&area->free_list[mtype]))
+ continue;
+
+ /* Take the lock and attempt capture of the page */
+ if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
+ return;
+ if (!list_empty(&area->free_list[mtype])) {
+ page = list_entry(area->free_list[mtype].next,
+ struct page, lru);
+ if (capture_free_page(page, cc->order, mtype)) {
+ spin_unlock_irqrestore(&cc->zone->lock,
+ flags);
+ *cc->page = page;
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+ }
+ }
+}
+
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
int ret;
@@ -1015,14 +1037,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
(unsigned long)cc, false,
- cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+ cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
+ MR_COMPACTION);
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;
- count_vm_event(COMPACTBLOCKS);
- count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
- if (nr_remaining)
- count_vm_events(COMPACTPAGEFAILED, nr_remaining);
trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
nr_remaining);
@@ -1105,7 +1124,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
if (!order || !may_enter_fs || !may_perform_io)
return rc;
- count_vm_event(COMPACTSTALL);
+ count_compact_event(COMPACTSTALL);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
diff --git a/mm/highmem.c b/mm/highmem.c
index d999077431d..b32b70cdaed 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -105,6 +105,7 @@ struct page *kmap_to_page(void *vaddr)
return virt_to_page(addr);
}
+EXPORT_SYMBOL(kmap_to_page);
static void flush_all_zero_pkmaps(void)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5f902e20e8c..9e894edc781 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -12,12 +12,15 @@
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
+#include <linux/shrinker.h>
#include <linux/mm_inline.h>
#include <linux/kthread.h>
#include <linux/khugepaged.h>
#include <linux/freezer.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
+#include <linux/migrate.h>
+
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
@@ -37,7 +40,8 @@ unsigned long transparent_hugepage_flags __read_mostly =
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
+ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
/* default scan 8*512 pte (or vmas) every 30 second */
static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
@@ -159,6 +163,77 @@ static int start_khugepaged(void)
return err;
}
+static atomic_t huge_zero_refcount;
+static unsigned long huge_zero_pfn __read_mostly;
+
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ unsigned long zero_pfn = ACCESS_ONCE(huge_zero_pfn);
+ return zero_pfn && pfn == zero_pfn;
+}
+
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return is_huge_zero_pfn(pmd_pfn(pmd));
+}
+
+static unsigned long get_huge_zero_page(void)
+{
+ struct page *zero_page;
+retry:
+ if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
+ return ACCESS_ONCE(huge_zero_pfn);
+
+ zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+ HPAGE_PMD_ORDER);
+ if (!zero_page) {
+ count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
+ return 0;
+ }
+ count_vm_event(THP_ZERO_PAGE_ALLOC);
+ preempt_disable();
+ if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) {
+ preempt_enable();
+ __free_page(zero_page);
+ goto retry;
+ }
+
+ /* We take additional reference here. It will be put back by shrinker */
+ atomic_set(&huge_zero_refcount, 2);
+ preempt_enable();
+ return ACCESS_ONCE(huge_zero_pfn);
+}
+
+static void put_huge_zero_page(void)
+{
+ /*
+ * Counter should never go to zero here. Only shrinker can put
+ * last reference.
+ */
+ BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
+}
+
+static int shrink_huge_zero_page(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ if (!sc->nr_to_scan)
+ /* we can free zero page only if last reference remains */
+ return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
+
+ if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ unsigned long zero_pfn = xchg(&huge_zero_pfn, 0);
+ BUG_ON(zero_pfn == 0);
+ __free_page(__pfn_to_page(zero_pfn));
+ }
+
+ return 0;
+}
+
+static struct shrinker huge_zero_page_shrinker = {
+ .shrink = shrink_huge_zero_page,
+ .seeks = DEFAULT_SEEKS,
+};
+
#ifdef CONFIG_SYSFS
static ssize_t double_flag_show(struct kobject *kobj,
@@ -284,6 +359,20 @@ static ssize_t defrag_store(struct kobject *kobj,
static struct kobj_attribute defrag_attr =
__ATTR(defrag, 0644, defrag_show, defrag_store);
+static ssize_t use_zero_page_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return single_flag_show(kobj, attr, buf,
+ TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+}
+static ssize_t use_zero_page_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ return single_flag_store(kobj, attr, buf, count,
+ TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+}
+static struct kobj_attribute use_zero_page_attr =
+ __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
#ifdef CONFIG_DEBUG_VM
static ssize_t debug_cow_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -305,6 +394,7 @@ static struct kobj_attribute debug_cow_attr =
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
+ &use_zero_page_attr.attr,
#ifdef CONFIG_DEBUG_VM
&debug_cow_attr.attr,
#endif
@@ -484,19 +574,19 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
- printk(KERN_ERR "hugepage: failed kobject create\n");
+ printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
return -ENOMEM;
}
err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
if (err) {
- printk(KERN_ERR "hugepage: failed register hugeage group\n");
+ printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
goto delete_obj;
}
err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
if (err) {
- printk(KERN_ERR "hugepage: failed register hugeage group\n");
+ printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
goto remove_hp_group;
}
@@ -550,6 +640,8 @@ static int __init hugepage_init(void)
goto out;
}
+ register_shrinker(&huge_zero_page_shrinker);
+
/*
* By default disable transparent hugepages on smaller systems,
* where the extra memory used could hurt more than TLB overhead
@@ -599,7 +691,7 @@ out:
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
-static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pmd = pmd_mkwrite(pmd);
@@ -678,6 +770,22 @@ static inline struct page *alloc_hugepage(int defrag)
}
#endif
+static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
+ unsigned long zero_pfn)
+{
+ pmd_t entry;
+ if (!pmd_none(*pmd))
+ return false;
+ entry = pfn_pmd(zero_pfn, vma->vm_page_prot);
+ entry = pmd_wrprotect(entry);
+ entry = pmd_mkhuge(entry);
+ set_pmd_at(mm, haddr, pmd, entry);
+ pgtable_trans_huge_deposit(mm, pgtable);
+ mm->nr_ptes++;
+ return true;
+}
+
int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
unsigned int flags)
@@ -691,6 +799,30 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma)))
return VM_FAULT_OOM;
+ if (!(flags & FAULT_FLAG_WRITE) &&
+ transparent_hugepage_use_zero_page()) {
+ pgtable_t pgtable;
+ unsigned long zero_pfn;
+ bool set;
+ pgtable = pte_alloc_one(mm, haddr);
+ if (unlikely(!pgtable))
+ return VM_FAULT_OOM;
+ zero_pfn = get_huge_zero_page();
+ if (unlikely(!zero_pfn)) {
+ pte_free(mm, pgtable);
+ count_vm_event(THP_FAULT_FALLBACK);
+ goto out;
+ }
+ spin_lock(&mm->page_table_lock);
+ set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
+ zero_pfn);
+ spin_unlock(&mm->page_table_lock);
+ if (!set) {
+ pte_free(mm, pgtable);
+ put_huge_zero_page();
+ }
+ return 0;
+ }
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
if (unlikely(!page)) {
@@ -717,7 +849,8 @@ out:
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
+ if (unlikely(pmd_none(*pmd)) &&
+ unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
@@ -755,6 +888,26 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_free(dst_mm, pgtable);
goto out_unlock;
}
+ /*
+ * mm->page_table_lock is enough to be sure that huge zero pmd is not
+ * under splitting since we don't split the page itself, only pmd to
+ * a page table.
+ */
+ if (is_huge_zero_pmd(pmd)) {
+ unsigned long zero_pfn;
+ bool set;
+ /*
+ * get_huge_zero_page() will never allocate a new page here,
+ * since we already have a zero page to copy. It just takes a
+ * reference.
+ */
+ zero_pfn = get_huge_zero_page();
+ set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
+ zero_pfn);
+ BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
+ ret = 0;
+ goto out_unlock;
+ }
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(&src_mm->page_table_lock);
@@ -806,6 +959,80 @@ unlock:
spin_unlock(&mm->page_table_lock);
}
+static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
+{
+ pgtable_t pgtable;
+ pmd_t _pmd;
+ struct page *page;
+ int i, ret = 0;
+ unsigned long mmun_start; /* For mmu_notifiers */
+ unsigned long mmun_end; /* For mmu_notifiers */
+
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (!page) {
+ ret |= VM_FAULT_OOM;
+ goto out;
+ }
+
+ if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+ put_page(page);
+ ret |= VM_FAULT_OOM;
+ goto out;
+ }
+
+ clear_user_highpage(page, address);
+ __SetPageUptodate(page);
+
+ mmun_start = haddr;
+ mmun_end = haddr + HPAGE_PMD_SIZE;
+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
+ spin_lock(&mm->page_table_lock);
+ if (unlikely(!pmd_same(*pmd, orig_pmd)))
+ goto out_free_page;
+
+ pmdp_clear_flush(vma, haddr, pmd);
+ /* leave pmd empty until pte is filled */
+
+ pgtable = pgtable_trans_huge_withdraw(mm);
+ pmd_populate(mm, &_pmd, pgtable);
+
+ for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ pte_t *pte, entry;
+ if (haddr == (address & PAGE_MASK)) {
+ entry = mk_pte(page, vma->vm_page_prot);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ page_add_new_anon_rmap(page, vma, haddr);
+ } else {
+ entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+ entry = pte_mkspecial(entry);
+ }
+ pte = pte_offset_map(&_pmd, haddr);
+ VM_BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, haddr, pte, entry);
+ pte_unmap(pte);
+ }
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pgtable);
+ spin_unlock(&mm->page_table_lock);
+ put_huge_zero_page();
+ inc_mm_counter(mm, MM_ANONPAGES);
+
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+
+ ret |= VM_FAULT_WRITE;
+out:
+ return ret;
+out_free_page:
+ spin_unlock(&mm->page_table_lock);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ mem_cgroup_uncharge_page(page);
+ put_page(page);
+ goto out;
+}
+
static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address,
@@ -912,19 +1139,21 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
{
int ret = 0;
- struct page *page, *new_page;
+ struct page *page = NULL, *new_page;
unsigned long haddr;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
VM_BUG_ON(!vma->anon_vma);
+ haddr = address & HPAGE_PMD_MASK;
+ if (is_huge_zero_pmd(orig_pmd))
+ goto alloc;
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_unlock;
page = pmd_page(orig_pmd);
VM_BUG_ON(!PageCompound(page) || !PageHead(page));
- haddr = address & HPAGE_PMD_MASK;
if (page_mapcount(page) == 1) {
pmd_t entry;
entry = pmd_mkyoung(orig_pmd);
@@ -936,7 +1165,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
get_page(page);
spin_unlock(&mm->page_table_lock);
-
+alloc:
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow())
new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
@@ -946,24 +1175,34 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!new_page)) {
count_vm_event(THP_FAULT_FALLBACK);
- ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
- pmd, orig_pmd, page, haddr);
- if (ret & VM_FAULT_OOM)
- split_huge_page(page);
- put_page(page);
+ if (is_huge_zero_pmd(orig_pmd)) {
+ ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
+ address, pmd, orig_pmd, haddr);
+ } else {
+ ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+ pmd, orig_pmd, page, haddr);
+ if (ret & VM_FAULT_OOM)
+ split_huge_page(page);
+ put_page(page);
+ }
goto out;
}
count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
- split_huge_page(page);
- put_page(page);
+ if (page) {
+ split_huge_page(page);
+ put_page(page);
+ }
ret |= VM_FAULT_OOM;
goto out;
}
- copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
+ if (is_huge_zero_pmd(orig_pmd))
+ clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
+ else
+ copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page);
mmun_start = haddr;
@@ -971,7 +1210,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
spin_lock(&mm->page_table_lock);
- put_page(page);
+ if (page)
+ put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(new_page);
@@ -979,14 +1219,19 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_mn;
} else {
pmd_t entry;
- VM_BUG_ON(!PageHead(page));
entry = mk_huge_pmd(new_page, vma);
pmdp_clear_flush(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, pmd);
- page_remove_rmap(page);
- put_page(page);
+ if (is_huge_zero_pmd(orig_pmd)) {
+ add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ put_huge_zero_page();
+ } else {
+ VM_BUG_ON(!PageHead(page));
+ page_remove_rmap(page);
+ put_page(page);
+ }
ret |= VM_FAULT_WRITE;
}
spin_unlock(&mm->page_table_lock);
@@ -1044,6 +1289,81 @@ out:
return page;
}
+/* NUMA hinting page fault entry point for trans huge pmds */
+int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+{
+ struct page *page;
+ unsigned long haddr = addr & HPAGE_PMD_MASK;
+ int target_nid;
+ int current_nid = -1;
+ bool migrated;
+ bool page_locked = false;
+
+ spin_lock(&mm->page_table_lock);
+ if (unlikely(!pmd_same(pmd, *pmdp)))
+ goto out_unlock;
+
+ page = pmd_page(pmd);
+ get_page(page);
+ current_nid = page_to_nid(page);
+ count_vm_numa_event(NUMA_HINT_FAULTS);
+ if (current_nid == numa_node_id())
+ count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+
+ target_nid = mpol_misplaced(page, vma, haddr);
+ if (target_nid == -1) {
+ put_page(page);
+ goto clear_pmdnuma;
+ }
+
+ /* Acquire the page lock to serialise THP migrations */
+ spin_unlock(&mm->page_table_lock);
+ lock_page(page);
+ page_locked = true;
+
+ /* Confirm the PTE did not while locked */
+ spin_lock(&mm->page_table_lock);
+ if (unlikely(!pmd_same(pmd, *pmdp))) {
+ unlock_page(page);
+ put_page(page);
+ goto out_unlock;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ /* Migrate the THP to the requested node */
+ migrated = migrate_misplaced_transhuge_page(mm, vma,
+ pmdp, pmd, addr,
+ page, target_nid);
+ if (migrated)
+ current_nid = target_nid;
+ else {
+ spin_lock(&mm->page_table_lock);
+ if (unlikely(!pmd_same(pmd, *pmdp))) {
+ unlock_page(page);
+ goto out_unlock;
+ }
+ goto clear_pmdnuma;
+ }
+
+ task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
+ return 0;
+
+clear_pmdnuma:
+ pmd = pmd_mknonnuma(pmd);
+ set_pmd_at(mm, haddr, pmdp, pmd);
+ VM_BUG_ON(pmd_numa(*pmdp));
+ update_mmu_cache_pmd(vma, addr, pmdp);
+ if (page_locked)
+ unlock_page(page);
+
+out_unlock:
+ spin_unlock(&mm->page_table_lock);
+ if (current_nid != -1)
+ task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
+ return 0;
+}
+
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr)
{
@@ -1055,15 +1375,21 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd;
pgtable = pgtable_trans_huge_withdraw(tlb->mm);
orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
- page = pmd_page(orig_pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
- page_remove_rmap(page);
- VM_BUG_ON(page_mapcount(page) < 0);
- add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
- VM_BUG_ON(!PageHead(page));
- tlb->mm->nr_ptes--;
- spin_unlock(&tlb->mm->page_table_lock);
- tlb_remove_page(tlb, page);
+ if (is_huge_zero_pmd(orig_pmd)) {
+ tlb->mm->nr_ptes--;
+ spin_unlock(&tlb->mm->page_table_lock);
+ put_huge_zero_page();
+ } else {
+ page = pmd_page(orig_pmd);
+ page_remove_rmap(page);
+ VM_BUG_ON(page_mapcount(page) < 0);
+ add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+ VM_BUG_ON(!PageHead(page));
+ tlb->mm->nr_ptes--;
+ spin_unlock(&tlb->mm->page_table_lock);
+ tlb_remove_page(tlb, page);
+ }
pte_free(tlb->mm, pgtable);
ret = 1;
}
@@ -1126,7 +1452,7 @@ out:
}
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot)
+ unsigned long addr, pgprot_t newprot, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 0;
@@ -1134,7 +1460,18 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (__pmd_trans_huge_lock(pmd, vma) == 1) {
pmd_t entry;
entry = pmdp_get_and_clear(mm, addr, pmd);
- entry = pmd_modify(entry, newprot);
+ if (!prot_numa) {
+ entry = pmd_modify(entry, newprot);
+ BUG_ON(pmd_write(entry));
+ } else {
+ struct page *page = pmd_page(*pmd);
+
+ /* only check non-shared pages */
+ if (page_mapcount(page) == 1 &&
+ !pmd_numa(*pmd)) {
+ entry = pmd_mknuma(entry);
+ }
+ }
set_pmd_at(mm, addr, pmd, entry);
spin_unlock(&vma->vm_mm->page_table_lock);
ret = 1;
@@ -1224,7 +1561,7 @@ static int __split_huge_page_splitting(struct page *page,
* We can't temporarily set the pmd to null in order
* to split it, the pmd must remain marked huge at all
* times or the VM won't take the pmd_trans_huge paths
- * and it won't wait on the anon_vma->root->mutex to
+ * and it won't wait on the anon_vma->root->rwsem to
* serialize against split_huge_page*.
*/
pmdp_splitting_flush(vma, address, pmd);
@@ -1315,6 +1652,7 @@ static void __split_huge_page_refcount(struct page *page)
page_tail->mapping = page->mapping;
page_tail->index = page->index + i;
+ page_xchg_last_nid(page_tail, page_last_nid(page));
BUG_ON(!PageAnon(page_tail));
BUG_ON(!PageUptodate(page_tail));
@@ -1382,6 +1720,8 @@ static int __split_huge_page_map(struct page *page,
BUG_ON(page_mapcount(page) != 1);
if (!pmd_young(*pmd))
entry = pte_mkold(entry);
+ if (pmd_numa(*pmd))
+ entry = pte_mknuma(entry);
pte = pte_offset_map(&_pmd, haddr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
@@ -1424,7 +1764,7 @@ static int __split_huge_page_map(struct page *page,
return ret;
}
-/* must be called with anon_vma->root->mutex hold */
+/* must be called with anon_vma->root->rwsem held */
static void __split_huge_page(struct page *page,
struct anon_vma *anon_vma)
{
@@ -1477,8 +1817,9 @@ int split_huge_page(struct page *page)
struct anon_vma *anon_vma;
int ret = 1;
+ BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
BUG_ON(!PageAnon(page));
- anon_vma = page_lock_anon_vma(page);
+ anon_vma = page_lock_anon_vma_read(page);
if (!anon_vma)
goto out;
ret = 0;
@@ -1491,7 +1832,7 @@ int split_huge_page(struct page *page)
BUG_ON(PageCompound(page));
out_unlock:
- page_unlock_anon_vma(anon_vma);
+ page_unlock_anon_vma_read(anon_vma);
out:
return ret;
}
@@ -1983,7 +2324,7 @@ static void collapse_huge_page(struct mm_struct *mm,
if (pmd_trans_huge(*pmd))
goto out;
- anon_vma_lock(vma->anon_vma);
+ anon_vma_lock_write(vma->anon_vma);
pte = pte_offset_map(pmd, address);
ptl = pte_lockptr(mm, pmd);
@@ -2336,19 +2677,65 @@ static int khugepaged(void *none)
return 0;
}
-void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
+static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ unsigned long haddr, pmd_t *pmd)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgtable_t pgtable;
+ pmd_t _pmd;
+ int i;
+
+ pmdp_clear_flush(vma, haddr, pmd);
+ /* leave pmd empty until pte is filled */
+
+ pgtable = pgtable_trans_huge_withdraw(mm);
+ pmd_populate(mm, &_pmd, pgtable);
+
+ for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ pte_t *pte, entry;
+ entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+ entry = pte_mkspecial(entry);
+ pte = pte_offset_map(&_pmd, haddr);
+ VM_BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, haddr, pte, entry);
+ pte_unmap(pte);
+ }
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pgtable);
+ put_huge_zero_page();
+}
+
+void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd)
{
struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long haddr = address & HPAGE_PMD_MASK;
+ unsigned long mmun_start; /* For mmu_notifiers */
+ unsigned long mmun_end; /* For mmu_notifiers */
+
+ BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
+ mmun_start = haddr;
+ mmun_end = haddr + HPAGE_PMD_SIZE;
+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(&mm->page_table_lock);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ return;
+ }
+ if (is_huge_zero_pmd(*pmd)) {
+ __split_huge_zero_page_pmd(vma, haddr, pmd);
+ spin_unlock(&mm->page_table_lock);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
return;
}
page = pmd_page(*pmd);
VM_BUG_ON(!page_count(page));
get_page(page);
spin_unlock(&mm->page_table_lock);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
split_huge_page(page);
@@ -2356,6 +2743,16 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
BUG_ON(pmd_trans_huge(*pmd));
}
+void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd)
+{
+ struct vm_area_struct *vma;
+
+ vma = find_vma(mm, address);
+ BUG_ON(vma == NULL);
+ split_huge_page_pmd(vma, address, pmd);
+}
+
static void split_huge_page_address(struct mm_struct *mm,
unsigned long address)
{
@@ -2370,7 +2767,7 @@ static void split_huge_page_address(struct mm_struct *mm,
* Caller holds the mmap_sem write mode, so a huge pmd cannot
* materialize from under us.
*/
- split_huge_page_pmd(mm, pmd);
+ split_huge_page_pmd_mm(mm, address, pmd);
}
void __vma_adjust_trans_huge(struct vm_area_struct *vma,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1ef2cd4ae3c..4f3ea0b1e57 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1,6 +1,6 @@
/*
* Generic hugetlb support.
- * (C) William Irwin, April 2004
+ * (C) Nadia Yvette Chambers, April 2004
*/
#include <linux/list.h>
#include <linux/init.h>
@@ -1057,7 +1057,7 @@ static void return_unused_surplus_pages(struct hstate *h,
* on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
- if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
+ if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
break;
}
}
@@ -1180,14 +1180,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
+ int nr_nodes = nodes_weight(node_states[N_MEMORY]);
while (nr_nodes) {
void *addr;
addr = __alloc_bootmem_node_nopanic(
NODE_DATA(hstate_next_node_to_alloc(h,
- &node_states[N_HIGH_MEMORY])),
+ &node_states[N_MEMORY])),
huge_page_size(h), huge_page_size(h), 0);
if (addr) {
@@ -1259,7 +1259,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
if (!alloc_bootmem_huge_page(h))
break;
} else if (!alloc_fresh_huge_page(h,
- &node_states[N_HIGH_MEMORY]))
+ &node_states[N_MEMORY]))
break;
}
h->max_huge_pages = i;
@@ -1527,7 +1527,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
- nodes_allowed = &node_states[N_HIGH_MEMORY];
+ nodes_allowed = &node_states[N_MEMORY];
}
} else if (nodes_allowed) {
/*
@@ -1537,11 +1537,11 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
init_nodemask_of_node(nodes_allowed, nid);
} else
- nodes_allowed = &node_states[N_HIGH_MEMORY];
+ nodes_allowed = &node_states[N_MEMORY];
h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
- if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ if (nodes_allowed != &node_states[N_MEMORY])
NODEMASK_FREE(nodes_allowed);
return len;
@@ -1844,7 +1844,7 @@ static void hugetlb_register_all_nodes(void)
{
int nid;
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
struct node *node = node_devices[nid];
if (node->dev.id == nid)
hugetlb_register_node(node);
@@ -1906,14 +1906,12 @@ static int __init hugetlb_init(void)
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
hugetlb_init_hstates();
-
gather_bootmem_prealloc();
-
report_hugepages();
hugetlb_sysfs_init();
-
hugetlb_register_all_nodes();
+ hugetlb_cgroup_file_init();
return 0;
}
@@ -1939,17 +1937,10 @@ void __init hugetlb_add_hstate(unsigned order)
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
INIT_LIST_HEAD(&h->hugepage_activelist);
- h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
- h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
+ h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
+ h->next_nid_to_free = first_node(node_states[N_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
- /*
- * Add cgroup control files only if the huge page consists
- * of more than two normal pages. This is because we use
- * page[2].lru.next for storing cgoup details.
- */
- if (order >= HUGETLB_CGROUP_MIN_ORDER)
- hugetlb_cgroup_file_init(hugetlb_max_hstate - 1);
parsed_hstate = h;
}
@@ -2035,11 +2026,11 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
- nodes_allowed = &node_states[N_HIGH_MEMORY];
+ nodes_allowed = &node_states[N_MEMORY];
}
h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
- if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ if (nodes_allowed != &node_states[N_MEMORY])
NODEMASK_FREE(nodes_allowed);
}
out:
@@ -2386,8 +2377,10 @@ again:
/*
* HWPoisoned hugepage is already unmapped and dropped reference
*/
- if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+ pte_clear(mm, address, ptep);
continue;
+ }
page = pte_page(pte);
/*
@@ -3014,7 +3007,7 @@ same_page:
return i ? i : -EFAULT;
}
-void hugetlb_change_protection(struct vm_area_struct *vma,
+unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
struct mm_struct *mm = vma->vm_mm;
@@ -3022,6 +3015,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
pte_t *ptep;
pte_t pte;
struct hstate *h = hstate_vma(vma);
+ unsigned long pages = 0;
BUG_ON(address >= end);
flush_cache_range(vma, address, end);
@@ -3032,12 +3026,15 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
- if (huge_pmd_unshare(mm, &address, ptep))
+ if (huge_pmd_unshare(mm, &address, ptep)) {
+ pages++;
continue;
+ }
if (!huge_pte_none(huge_ptep_get(ptep))) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(pte_modify(pte, newprot));
set_huge_pte_at(mm, address, ptep, pte);
+ pages++;
}
}
spin_unlock(&mm->page_table_lock);
@@ -3049,6 +3046,8 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
*/
flush_tlb_range(vma, start, end);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+
+ return pages << h->order;
}
int hugetlb_reserve_pages(struct inode *inode,
@@ -3170,7 +3169,13 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
spin_lock(&hugetlb_lock);
if (is_hugepage_on_freelist(hpage)) {
- list_del(&hpage->lru);
+ /*
+ * Hwpoisoned hugepage isn't linked to activelist or freelist,
+ * but dangling hpage->lru can trigger list-debug warnings
+ * (this happens when we call unpoison_memory() on it),
+ * so let it point to itself with list_del_init().
+ */
+ list_del_init(&hpage->lru);
set_page_refcounted(hpage);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index b5bde7a5c01..9cea7de22ff 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -333,7 +333,7 @@ static char *mem_fmt(char *buf, int size, unsigned long hsize)
return buf;
}
-int __init hugetlb_cgroup_file_init(int idx)
+static void __init __hugetlb_cgroup_file_init(int idx)
{
char buf[32];
struct cftype *cft;
@@ -375,7 +375,22 @@ int __init hugetlb_cgroup_file_init(int idx)
WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
- return 0;
+ return;
+}
+
+void __init hugetlb_cgroup_file_init(void)
+{
+ struct hstate *h;
+
+ for_each_hstate(h) {
+ /*
+ * Add cgroup control files only if the huge page consists
+ * of more than two normal pages. This is because we use
+ * page[2].lru.next for storing cgroup details.
+ */
+ if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
+ __hugetlb_cgroup_file_init(hstate_index(h));
+ }
}
/*
diff --git a/mm/internal.h b/mm/internal.h
index 52d1fa95719..d597f94cc20 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -217,15 +217,18 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
{
if (TestClearPageMlocked(page)) {
unsigned long flags;
+ int nr_pages = hpage_nr_pages(page);
local_irq_save(flags);
- __dec_zone_page_state(page, NR_MLOCK);
+ __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
SetPageMlocked(newpage);
- __inc_zone_page_state(newpage, NR_MLOCK);
+ __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
local_irq_restore(flags);
}
}
+extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long vma_address(struct page *page,
struct vm_area_struct *vma);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a217cc54406..752a705c77c 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1556,7 +1556,8 @@ static int dump_str_object_info(const char *str)
struct kmemleak_object *object;
unsigned long addr;
- addr= simple_strtoul(str, NULL, 0);
+ if (kstrtoul(str, 0, &addr))
+ return -EINVAL;
object = find_and_get_object(addr, 0);
if (!object) {
pr_info("Unknown object at 0x%08lx\n", addr);
diff --git a/mm/ksm.c b/mm/ksm.c
index 382d930a0bf..51573858938 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1624,7 +1624,7 @@ again:
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
- anon_vma_lock(anon_vma);
+ anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) {
vma = vmac->vma;
@@ -1648,7 +1648,7 @@ again:
if (!search_new_forks || !mapcount)
break;
}
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
if (!mapcount)
goto out;
}
@@ -1678,7 +1678,7 @@ again:
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
- anon_vma_lock(anon_vma);
+ anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) {
vma = vmac->vma;
@@ -1697,11 +1697,11 @@ again:
ret = try_to_unmap_one(page, vma,
rmap_item->address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) {
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
goto out;
}
}
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
}
if (!search_new_forks++)
goto again;
@@ -1731,7 +1731,7 @@ again:
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
- anon_vma_lock(anon_vma);
+ anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) {
vma = vmac->vma;
@@ -1749,11 +1749,11 @@ again:
ret = rmap_one(page, vma, rmap_item->address, arg);
if (ret != SWAP_AGAIN) {
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
goto out;
}
}
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
}
if (!search_new_forks++)
goto again;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 12307b3838f..09255ec8159 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -10,6 +10,10 @@
* Copyright (C) 2009 Nokia Corporation
* Author: Kirill A. Shutemov
*
+ * Kernel Memory Controller
+ * Copyright (C) 2012 Parallels Inc. and Google Inc.
+ * Authors: Glauber Costa and Suleiman Souhlal
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -59,6 +63,8 @@
#include <trace/events/vmscan.h>
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
+EXPORT_SYMBOL(mem_cgroup_subsys);
+
#define MEM_CGROUP_RECLAIM_RETRIES 5
static struct mem_cgroup *root_mem_cgroup __read_mostly;
@@ -266,6 +272,10 @@ struct mem_cgroup {
};
/*
+ * the counter to account for kernel memory usage.
+ */
+ struct res_counter kmem;
+ /*
* Per cgroup active and inactive list, similar to the
* per zone LRU lists.
*/
@@ -280,6 +290,7 @@ struct mem_cgroup {
* Should the accounting and control be hierarchical, per subtree?
*/
bool use_hierarchy;
+ unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
bool oom_lock;
atomic_t under_oom;
@@ -330,8 +341,61 @@ struct mem_cgroup {
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
struct tcp_memcontrol tcp_mem;
#endif
+#if defined(CONFIG_MEMCG_KMEM)
+ /* analogous to slab_common's slab_caches list. per-memcg */
+ struct list_head memcg_slab_caches;
+ /* Not a spinlock, we can take a lot of time walking the list */
+ struct mutex slab_caches_mutex;
+ /* Index in the kmem_cache->memcg_params->memcg_caches array */
+ int kmemcg_id;
+#endif
};
+/* internal only representation about the status of kmem accounting. */
+enum {
+ KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
+ KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
+ KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
+};
+
+/* We account when limit is on, but only after call sites are patched */
+#define KMEM_ACCOUNTED_MASK \
+ ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
+
+#ifdef CONFIG_MEMCG_KMEM
+static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
+{
+ set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
+}
+
+static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+ return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
+}
+
+static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
+{
+ set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
+}
+
+static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
+{
+ clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
+}
+
+static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
+{
+ if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
+ set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
+}
+
+static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
+{
+ return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
+ &memcg->kmem_account_flags);
+}
+#endif
+
/* Stuffs for move charges at task migration. */
/*
* Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
@@ -386,9 +450,13 @@ enum charge_type {
};
/* for encoding cft->private value on file */
-#define _MEM (0)
-#define _MEMSWAP (1)
-#define _OOM_TYPE (2)
+enum res_type {
+ _MEM,
+ _MEMSWAP,
+ _OOM_TYPE,
+ _KMEM,
+};
+
#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
@@ -485,6 +553,75 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
}
#endif
+#ifdef CONFIG_MEMCG_KMEM
+/*
+ * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
+ * There are two main reasons for not using the css_id for this:
+ * 1) this works better in sparse environments, where we have a lot of memcgs,
+ * but only a few kmem-limited. Or also, if we have, for instance, 200
+ * memcgs, and none but the 200th is kmem-limited, we'd have to have a
+ * 200 entry array for that.
+ *
+ * 2) In order not to violate the cgroup API, we would like to do all memory
+ * allocation in ->create(). At that point, we haven't yet allocated the
+ * css_id. Having a separate index prevents us from messing with the cgroup
+ * core for this
+ *
+ * The current size of the caches array is stored in
+ * memcg_limited_groups_array_size. It will double each time we have to
+ * increase it.
+ */
+static DEFINE_IDA(kmem_limited_groups);
+int memcg_limited_groups_array_size;
+
+/*
+ * MIN_SIZE is different than 1, because we would like to avoid going through
+ * the alloc/free process all the time. In a small machine, 4 kmem-limited
+ * cgroups is a reasonable guess. In the future, it could be a parameter or
+ * tunable, but that is strictly not necessary.
+ *
+ * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
+ * this constant directly from cgroup, but it is understandable that this is
+ * better kept as an internal representation in cgroup.c. In any case, the
+ * css_id space is not getting any smaller, and we don't have to necessarily
+ * increase ours as well if it increases.
+ */
+#define MEMCG_CACHES_MIN_SIZE 4
+#define MEMCG_CACHES_MAX_SIZE 65535
+
+/*
+ * A lot of the calls to the cache allocation functions are expected to be
+ * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
+ * conditional to this static branch, we'll have to allow modules that does
+ * kmem_cache_alloc and the such to see this symbol as well
+ */
+struct static_key memcg_kmem_enabled_key;
+EXPORT_SYMBOL(memcg_kmem_enabled_key);
+
+static void disarm_kmem_keys(struct mem_cgroup *memcg)
+{
+ if (memcg_kmem_is_active(memcg)) {
+ static_key_slow_dec(&memcg_kmem_enabled_key);
+ ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
+ }
+ /*
+ * This check can't live in kmem destruction function,
+ * since the charges will outlive the cgroup
+ */
+ WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
+}
+#else
+static void disarm_kmem_keys(struct mem_cgroup *memcg)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
+static void disarm_static_keys(struct mem_cgroup *memcg)
+{
+ disarm_sock_keys(memcg);
+ disarm_kmem_keys(memcg);
+}
+
static void drain_all_stock_async(struct mem_cgroup *memcg);
static struct mem_cgroup_per_zone *
@@ -800,7 +937,7 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
int nid;
u64 total = 0;
- for_each_node_state(nid, N_HIGH_MEMORY)
+ for_each_node_state(nid, N_MEMORY)
total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
return total;
}
@@ -1015,13 +1152,10 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
iter != NULL; \
iter = mem_cgroup_iter(NULL, iter, NULL))
-void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
{
struct mem_cgroup *memcg;
- if (!mm)
- return;
-
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (unlikely(!memcg))
@@ -1040,7 +1174,7 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
out:
rcu_read_unlock();
}
-EXPORT_SYMBOL(mem_cgroup_count_vm_event);
+EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
/**
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
@@ -1454,6 +1588,10 @@ done:
res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
+ printk(KERN_INFO "kmem: usage %llukB, limit %llukB, failcnt %llu\n",
+ res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
+ res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
+ res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
}
/*
@@ -1644,9 +1782,9 @@ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
return;
/* make a nodemask where this memcg uses memory from */
- memcg->scan_nodes = node_states[N_HIGH_MEMORY];
+ memcg->scan_nodes = node_states[N_MEMORY];
- for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
+ for_each_node_mask(nid, node_states[N_MEMORY]) {
if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
node_clear(nid, memcg->scan_nodes);
@@ -1717,7 +1855,7 @@ static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
/*
* Check rest of nodes.
*/
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
if (node_isset(nid, memcg->scan_nodes))
continue;
if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
@@ -2061,20 +2199,28 @@ struct memcg_stock_pcp {
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
static DEFINE_MUTEX(percpu_charge_mutex);
-/*
- * Try to consume stocked charge on this cpu. If success, one page is consumed
- * from local stock and true is returned. If the stock is 0 or charges from a
- * cgroup which is not current target, returns false. This stock will be
- * refilled.
+/**
+ * consume_stock: Try to consume stocked charge on this cpu.
+ * @memcg: memcg to consume from.
+ * @nr_pages: how many pages to charge.
+ *
+ * The charges will only happen if @memcg matches the current cpu's memcg
+ * stock, and at least @nr_pages are available in that stock. Failure to
+ * service an allocation will refill the stock.
+ *
+ * returns true if successful, false otherwise.
*/
-static bool consume_stock(struct mem_cgroup *memcg)
+static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
struct memcg_stock_pcp *stock;
bool ret = true;
+ if (nr_pages > CHARGE_BATCH)
+ return false;
+
stock = &get_cpu_var(memcg_stock);
- if (memcg == stock->cached && stock->nr_pages)
- stock->nr_pages--;
+ if (memcg == stock->cached && stock->nr_pages >= nr_pages)
+ stock->nr_pages -= nr_pages;
else /* need to call res_counter_charge */
ret = false;
put_cpu_var(memcg_stock);
@@ -2251,7 +2397,8 @@ enum {
};
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
- unsigned int nr_pages, bool oom_check)
+ unsigned int nr_pages, unsigned int min_pages,
+ bool oom_check)
{
unsigned long csize = nr_pages * PAGE_SIZE;
struct mem_cgroup *mem_over_limit;
@@ -2274,18 +2421,18 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
} else
mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
/*
- * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
- * of regular pages (CHARGE_BATCH), or a single regular page (1).
- *
* Never reclaim on behalf of optional batching, retry with a
* single page instead.
*/
- if (nr_pages == CHARGE_BATCH)
+ if (nr_pages > min_pages)
return CHARGE_RETRY;
if (!(gfp_mask & __GFP_WAIT))
return CHARGE_WOULDBLOCK;
+ if (gfp_mask & __GFP_NORETRY)
+ return CHARGE_NOMEM;
+
ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
return CHARGE_RETRY;
@@ -2298,7 +2445,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
* unlikely to succeed so close to the limit, and we fall back
* to regular pages anyway in case of failure.
*/
- if (nr_pages == 1 && ret)
+ if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
return CHARGE_RETRY;
/*
@@ -2372,7 +2519,7 @@ again:
memcg = *ptr;
if (mem_cgroup_is_root(memcg))
goto done;
- if (nr_pages == 1 && consume_stock(memcg))
+ if (consume_stock(memcg, nr_pages))
goto done;
css_get(&memcg->css);
} else {
@@ -2397,7 +2544,7 @@ again:
rcu_read_unlock();
goto done;
}
- if (nr_pages == 1 && consume_stock(memcg)) {
+ if (consume_stock(memcg, nr_pages)) {
/*
* It seems dagerous to access memcg without css_get().
* But considering how consume_stok works, it's not
@@ -2432,7 +2579,8 @@ again:
nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
}
- ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
+ ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
+ oom_check);
switch (ret) {
case CHARGE_OK:
break;
@@ -2625,6 +2773,766 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
memcg_check_events(memcg, page);
}
+static DEFINE_MUTEX(set_limit_mutex);
+
+#ifdef CONFIG_MEMCG_KMEM
+static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
+{
+ return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
+ (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
+}
+
+/*
+ * This is a bit cumbersome, but it is rarely used and avoids a backpointer
+ * in the memcg_cache_params struct.
+ */
+static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
+{
+ struct kmem_cache *cachep;
+
+ VM_BUG_ON(p->is_root_cache);
+ cachep = p->root_cache;
+ return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
+}
+
+#ifdef CONFIG_SLABINFO
+static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
+ struct seq_file *m)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct memcg_cache_params *params;
+
+ if (!memcg_can_account_kmem(memcg))
+ return -EIO;
+
+ print_slabinfo_header(m);
+
+ mutex_lock(&memcg->slab_caches_mutex);
+ list_for_each_entry(params, &memcg->memcg_slab_caches, list)
+ cache_show(memcg_params_to_cache(params), m);
+ mutex_unlock(&memcg->slab_caches_mutex);
+
+ return 0;
+}
+#endif
+
+static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
+{
+ struct res_counter *fail_res;
+ struct mem_cgroup *_memcg;
+ int ret = 0;
+ bool may_oom;
+
+ ret = res_counter_charge(&memcg->kmem, size, &fail_res);
+ if (ret)
+ return ret;
+
+ /*
+ * Conditions under which we can wait for the oom_killer. Those are
+ * the same conditions tested by the core page allocator
+ */
+ may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
+
+ _memcg = memcg;
+ ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
+ &_memcg, may_oom);
+
+ if (ret == -EINTR) {
+ /*
+ * __mem_cgroup_try_charge() chosed to bypass to root due to
+ * OOM kill or fatal signal. Since our only options are to
+ * either fail the allocation or charge it to this cgroup, do
+ * it as a temporary condition. But we can't fail. From a
+ * kmem/slab perspective, the cache has already been selected,
+ * by mem_cgroup_kmem_get_cache(), so it is too late to change
+ * our minds.
+ *
+ * This condition will only trigger if the task entered
+ * memcg_charge_kmem in a sane state, but was OOM-killed during
+ * __mem_cgroup_try_charge() above. Tasks that were already
+ * dying when the allocation triggers should have been already
+ * directed to the root cgroup in memcontrol.h
+ */
+ res_counter_charge_nofail(&memcg->res, size, &fail_res);
+ if (do_swap_account)
+ res_counter_charge_nofail(&memcg->memsw, size,
+ &fail_res);
+ ret = 0;
+ } else if (ret)
+ res_counter_uncharge(&memcg->kmem, size);
+
+ return ret;
+}
+
+static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
+{
+ res_counter_uncharge(&memcg->res, size);
+ if (do_swap_account)
+ res_counter_uncharge(&memcg->memsw, size);
+
+ /* Not down to 0 */
+ if (res_counter_uncharge(&memcg->kmem, size))
+ return;
+
+ if (memcg_kmem_test_and_clear_dead(memcg))
+ mem_cgroup_put(memcg);
+}
+
+void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
+{
+ if (!memcg)
+ return;
+
+ mutex_lock(&memcg->slab_caches_mutex);
+ list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
+ mutex_unlock(&memcg->slab_caches_mutex);
+}
+
+/*
+ * helper for acessing a memcg's index. It will be used as an index in the
+ * child cache array in kmem_cache, and also to derive its name. This function
+ * will return -1 when this is not a kmem-limited memcg.
+ */
+int memcg_cache_id(struct mem_cgroup *memcg)
+{
+ return memcg ? memcg->kmemcg_id : -1;
+}
+
+/*
+ * This ends up being protected by the set_limit mutex, during normal
+ * operation, because that is its main call site.
+ *
+ * But when we create a new cache, we can call this as well if its parent
+ * is kmem-limited. That will have to hold set_limit_mutex as well.
+ */
+int memcg_update_cache_sizes(struct mem_cgroup *memcg)
+{
+ int num, ret;
+
+ num = ida_simple_get(&kmem_limited_groups,
+ 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
+ if (num < 0)
+ return num;
+ /*
+ * After this point, kmem_accounted (that we test atomically in
+ * the beginning of this conditional), is no longer 0. This
+ * guarantees only one process will set the following boolean
+ * to true. We don't need test_and_set because we're protected
+ * by the set_limit_mutex anyway.
+ */
+ memcg_kmem_set_activated(memcg);
+
+ ret = memcg_update_all_caches(num+1);
+ if (ret) {
+ ida_simple_remove(&kmem_limited_groups, num);
+ memcg_kmem_clear_activated(memcg);
+ return ret;
+ }
+
+ memcg->kmemcg_id = num;
+ INIT_LIST_HEAD(&memcg->memcg_slab_caches);
+ mutex_init(&memcg->slab_caches_mutex);
+ return 0;
+}
+
+static size_t memcg_caches_array_size(int num_groups)
+{
+ ssize_t size;
+ if (num_groups <= 0)
+ return 0;
+
+ size = 2 * num_groups;
+ if (size < MEMCG_CACHES_MIN_SIZE)
+ size = MEMCG_CACHES_MIN_SIZE;
+ else if (size > MEMCG_CACHES_MAX_SIZE)
+ size = MEMCG_CACHES_MAX_SIZE;
+
+ return size;
+}
+
+/*
+ * We should update the current array size iff all caches updates succeed. This
+ * can only be done from the slab side. The slab mutex needs to be held when
+ * calling this.
+ */
+void memcg_update_array_size(int num)
+{
+ if (num > memcg_limited_groups_array_size)
+ memcg_limited_groups_array_size = memcg_caches_array_size(num);
+}
+
+int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
+{
+ struct memcg_cache_params *cur_params = s->memcg_params;
+
+ VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
+
+ if (num_groups > memcg_limited_groups_array_size) {
+ int i;
+ ssize_t size = memcg_caches_array_size(num_groups);
+
+ size *= sizeof(void *);
+ size += sizeof(struct memcg_cache_params);
+
+ s->memcg_params = kzalloc(size, GFP_KERNEL);
+ if (!s->memcg_params) {
+ s->memcg_params = cur_params;
+ return -ENOMEM;
+ }
+
+ s->memcg_params->is_root_cache = true;
+
+ /*
+ * There is the chance it will be bigger than
+ * memcg_limited_groups_array_size, if we failed an allocation
+ * in a cache, in which case all caches updated before it, will
+ * have a bigger array.
+ *
+ * But if that is the case, the data after
+ * memcg_limited_groups_array_size is certainly unused
+ */
+ for (i = 0; i < memcg_limited_groups_array_size; i++) {
+ if (!cur_params->memcg_caches[i])
+ continue;
+ s->memcg_params->memcg_caches[i] =
+ cur_params->memcg_caches[i];
+ }
+
+ /*
+ * Ideally, we would wait until all caches succeed, and only
+ * then free the old one. But this is not worth the extra
+ * pointer per-cache we'd have to have for this.
+ *
+ * It is not a big deal if some caches are left with a size
+ * bigger than the others. And all updates will reset this
+ * anyway.
+ */
+ kfree(cur_params);
+ }
+ return 0;
+}
+
+int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
+ struct kmem_cache *root_cache)
+{
+ size_t size = sizeof(struct memcg_cache_params);
+
+ if (!memcg_kmem_enabled())
+ return 0;
+
+ if (!memcg)
+ size += memcg_limited_groups_array_size * sizeof(void *);
+
+ s->memcg_params = kzalloc(size, GFP_KERNEL);
+ if (!s->memcg_params)
+ return -ENOMEM;
+
+ if (memcg) {
+ s->memcg_params->memcg = memcg;
+ s->memcg_params->root_cache = root_cache;
+ }
+ return 0;
+}
+
+void memcg_release_cache(struct kmem_cache *s)
+{
+ struct kmem_cache *root;
+ struct mem_cgroup *memcg;
+ int id;
+
+ /*
+ * This happens, for instance, when a root cache goes away before we
+ * add any memcg.
+ */
+ if (!s->memcg_params)
+ return;
+
+ if (s->memcg_params->is_root_cache)
+ goto out;
+
+ memcg = s->memcg_params->memcg;
+ id = memcg_cache_id(memcg);
+
+ root = s->memcg_params->root_cache;
+ root->memcg_params->memcg_caches[id] = NULL;
+ mem_cgroup_put(memcg);
+
+ mutex_lock(&memcg->slab_caches_mutex);
+ list_del(&s->memcg_params->list);
+ mutex_unlock(&memcg->slab_caches_mutex);
+
+out:
+ kfree(s->memcg_params);
+}
+
+/*
+ * During the creation a new cache, we need to disable our accounting mechanism
+ * altogether. This is true even if we are not creating, but rather just
+ * enqueing new caches to be created.
+ *
+ * This is because that process will trigger allocations; some visible, like
+ * explicit kmallocs to auxiliary data structures, name strings and internal
+ * cache structures; some well concealed, like INIT_WORK() that can allocate
+ * objects during debug.
+ *
+ * If any allocation happens during memcg_kmem_get_cache, we will recurse back
+ * to it. This may not be a bounded recursion: since the first cache creation
+ * failed to complete (waiting on the allocation), we'll just try to create the
+ * cache again, failing at the same point.
+ *
+ * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
+ * memcg_kmem_skip_account. So we enclose anything that might allocate memory
+ * inside the following two functions.
+ */
+static inline void memcg_stop_kmem_account(void)
+{
+ VM_BUG_ON(!current->mm);
+ current->memcg_kmem_skip_account++;
+}
+
+static inline void memcg_resume_kmem_account(void)
+{
+ VM_BUG_ON(!current->mm);
+ current->memcg_kmem_skip_account--;
+}
+
+static void kmem_cache_destroy_work_func(struct work_struct *w)
+{
+ struct kmem_cache *cachep;
+ struct memcg_cache_params *p;
+
+ p = container_of(w, struct memcg_cache_params, destroy);
+
+ cachep = memcg_params_to_cache(p);
+
+ /*
+ * If we get down to 0 after shrink, we could delete right away.
+ * However, memcg_release_pages() already puts us back in the workqueue
+ * in that case. If we proceed deleting, we'll get a dangling
+ * reference, and removing the object from the workqueue in that case
+ * is unnecessary complication. We are not a fast path.
+ *
+ * Note that this case is fundamentally different from racing with
+ * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
+ * kmem_cache_shrink, not only we would be reinserting a dead cache
+ * into the queue, but doing so from inside the worker racing to
+ * destroy it.
+ *
+ * So if we aren't down to zero, we'll just schedule a worker and try
+ * again
+ */
+ if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
+ kmem_cache_shrink(cachep);
+ if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
+ return;
+ } else
+ kmem_cache_destroy(cachep);
+}
+
+void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
+{
+ if (!cachep->memcg_params->dead)
+ return;
+
+ /*
+ * There are many ways in which we can get here.
+ *
+ * We can get to a memory-pressure situation while the delayed work is
+ * still pending to run. The vmscan shrinkers can then release all
+ * cache memory and get us to destruction. If this is the case, we'll
+ * be executed twice, which is a bug (the second time will execute over
+ * bogus data). In this case, cancelling the work should be fine.
+ *
+ * But we can also get here from the worker itself, if
+ * kmem_cache_shrink is enough to shake all the remaining objects and
+ * get the page count to 0. In this case, we'll deadlock if we try to
+ * cancel the work (the worker runs with an internal lock held, which
+ * is the same lock we would hold for cancel_work_sync().)
+ *
+ * Since we can't possibly know who got us here, just refrain from
+ * running if there is already work pending
+ */
+ if (work_pending(&cachep->memcg_params->destroy))
+ return;
+ /*
+ * We have to defer the actual destroying to a workqueue, because
+ * we might currently be in a context that cannot sleep.
+ */
+ schedule_work(&cachep->memcg_params->destroy);
+}
+
+static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
+{
+ char *name;
+ struct dentry *dentry;
+
+ rcu_read_lock();
+ dentry = rcu_dereference(memcg->css.cgroup->dentry);
+ rcu_read_unlock();
+
+ BUG_ON(dentry == NULL);
+
+ name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
+ memcg_cache_id(memcg), dentry->d_name.name);
+
+ return name;
+}
+
+static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
+ struct kmem_cache *s)
+{
+ char *name;
+ struct kmem_cache *new;
+
+ name = memcg_cache_name(memcg, s);
+ if (!name)
+ return NULL;
+
+ new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
+ (s->flags & ~SLAB_PANIC), s->ctor, s);
+
+ if (new)
+ new->allocflags |= __GFP_KMEMCG;
+
+ kfree(name);
+ return new;
+}
+
+/*
+ * This lock protects updaters, not readers. We want readers to be as fast as
+ * they can, and they will either see NULL or a valid cache value. Our model
+ * allow them to see NULL, in which case the root memcg will be selected.
+ *
+ * We need this lock because multiple allocations to the same cache from a non
+ * will span more than one worker. Only one of them can create the cache.
+ */
+static DEFINE_MUTEX(memcg_cache_mutex);
+static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
+ struct kmem_cache *cachep)
+{
+ struct kmem_cache *new_cachep;
+ int idx;
+
+ BUG_ON(!memcg_can_account_kmem(memcg));
+
+ idx = memcg_cache_id(memcg);
+
+ mutex_lock(&memcg_cache_mutex);
+ new_cachep = cachep->memcg_params->memcg_caches[idx];
+ if (new_cachep)
+ goto out;
+
+ new_cachep = kmem_cache_dup(memcg, cachep);
+ if (new_cachep == NULL) {
+ new_cachep = cachep;
+ goto out;
+ }
+
+ mem_cgroup_get(memcg);
+ atomic_set(&new_cachep->memcg_params->nr_pages , 0);
+
+ cachep->memcg_params->memcg_caches[idx] = new_cachep;
+ /*
+ * the readers won't lock, make sure everybody sees the updated value,
+ * so they won't put stuff in the queue again for no reason
+ */
+ wmb();
+out:
+ mutex_unlock(&memcg_cache_mutex);
+ return new_cachep;
+}
+
+void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+{
+ struct kmem_cache *c;
+ int i;
+
+ if (!s->memcg_params)
+ return;
+ if (!s->memcg_params->is_root_cache)
+ return;
+
+ /*
+ * If the cache is being destroyed, we trust that there is no one else
+ * requesting objects from it. Even if there are, the sanity checks in
+ * kmem_cache_destroy should caught this ill-case.
+ *
+ * Still, we don't want anyone else freeing memcg_caches under our
+ * noses, which can happen if a new memcg comes to life. As usual,
+ * we'll take the set_limit_mutex to protect ourselves against this.
+ */
+ mutex_lock(&set_limit_mutex);
+ for (i = 0; i < memcg_limited_groups_array_size; i++) {
+ c = s->memcg_params->memcg_caches[i];
+ if (!c)
+ continue;
+
+ /*
+ * We will now manually delete the caches, so to avoid races
+ * we need to cancel all pending destruction workers and
+ * proceed with destruction ourselves.
+ *
+ * kmem_cache_destroy() will call kmem_cache_shrink internally,
+ * and that could spawn the workers again: it is likely that
+ * the cache still have active pages until this very moment.
+ * This would lead us back to mem_cgroup_destroy_cache.
+ *
+ * But that will not execute at all if the "dead" flag is not
+ * set, so flip it down to guarantee we are in control.
+ */
+ c->memcg_params->dead = false;
+ cancel_work_sync(&c->memcg_params->destroy);
+ kmem_cache_destroy(c);
+ }
+ mutex_unlock(&set_limit_mutex);
+}
+
+struct create_work {
+ struct mem_cgroup *memcg;
+ struct kmem_cache *cachep;
+ struct work_struct work;
+};
+
+static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
+{
+ struct kmem_cache *cachep;
+ struct memcg_cache_params *params;
+
+ if (!memcg_kmem_is_active(memcg))
+ return;
+
+ mutex_lock(&memcg->slab_caches_mutex);
+ list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
+ cachep = memcg_params_to_cache(params);
+ cachep->memcg_params->dead = true;
+ INIT_WORK(&cachep->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
+ schedule_work(&cachep->memcg_params->destroy);
+ }
+ mutex_unlock(&memcg->slab_caches_mutex);
+}
+
+static void memcg_create_cache_work_func(struct work_struct *w)
+{
+ struct create_work *cw;
+
+ cw = container_of(w, struct create_work, work);
+ memcg_create_kmem_cache(cw->memcg, cw->cachep);
+ /* Drop the reference gotten when we enqueued. */
+ css_put(&cw->memcg->css);
+ kfree(cw);
+}
+
+/*
+ * Enqueue the creation of a per-memcg kmem_cache.
+ * Called with rcu_read_lock.
+ */
+static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
+ struct kmem_cache *cachep)
+{
+ struct create_work *cw;
+
+ cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
+ if (cw == NULL)
+ return;
+
+ /* The corresponding put will be done in the workqueue. */
+ if (!css_tryget(&memcg->css)) {
+ kfree(cw);
+ return;
+ }
+
+ cw->memcg = memcg;
+ cw->cachep = cachep;
+
+ INIT_WORK(&cw->work, memcg_create_cache_work_func);
+ schedule_work(&cw->work);
+}
+
+static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
+ struct kmem_cache *cachep)
+{
+ /*
+ * We need to stop accounting when we kmalloc, because if the
+ * corresponding kmalloc cache is not yet created, the first allocation
+ * in __memcg_create_cache_enqueue will recurse.
+ *
+ * However, it is better to enclose the whole function. Depending on
+ * the debugging options enabled, INIT_WORK(), for instance, can
+ * trigger an allocation. This too, will make us recurse. Because at
+ * this point we can't allow ourselves back into memcg_kmem_get_cache,
+ * the safest choice is to do it like this, wrapping the whole function.
+ */
+ memcg_stop_kmem_account();
+ __memcg_create_cache_enqueue(memcg, cachep);
+ memcg_resume_kmem_account();
+}
+/*
+ * Return the kmem_cache we're supposed to use for a slab allocation.
+ * We try to use the current memcg's version of the cache.
+ *
+ * If the cache does not exist yet, if we are the first user of it,
+ * we either create it immediately, if possible, or create it asynchronously
+ * in a workqueue.
+ * In the latter case, we will let the current allocation go through with
+ * the original cache.
+ *
+ * Can't be called in interrupt context or from kernel threads.
+ * This function needs to be called with rcu_read_lock() held.
+ */
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
+ gfp_t gfp)
+{
+ struct mem_cgroup *memcg;
+ int idx;
+
+ VM_BUG_ON(!cachep->memcg_params);
+ VM_BUG_ON(!cachep->memcg_params->is_root_cache);
+
+ if (!current->mm || current->memcg_kmem_skip_account)
+ return cachep;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
+ rcu_read_unlock();
+
+ if (!memcg_can_account_kmem(memcg))
+ return cachep;
+
+ idx = memcg_cache_id(memcg);
+
+ /*
+ * barrier to mare sure we're always seeing the up to date value. The
+ * code updating memcg_caches will issue a write barrier to match this.
+ */
+ read_barrier_depends();
+ if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
+ /*
+ * If we are in a safe context (can wait, and not in interrupt
+ * context), we could be be predictable and return right away.
+ * This would guarantee that the allocation being performed
+ * already belongs in the new cache.
+ *
+ * However, there are some clashes that can arrive from locking.
+ * For instance, because we acquire the slab_mutex while doing
+ * kmem_cache_dup, this means no further allocation could happen
+ * with the slab_mutex held.
+ *
+ * Also, because cache creation issue get_online_cpus(), this
+ * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
+ * that ends up reversed during cpu hotplug. (cpuset allocates
+ * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
+ * better to defer everything.
+ */
+ memcg_create_cache_enqueue(memcg, cachep);
+ return cachep;
+ }
+
+ return cachep->memcg_params->memcg_caches[idx];
+}
+EXPORT_SYMBOL(__memcg_kmem_get_cache);
+
+/*
+ * We need to verify if the allocation against current->mm->owner's memcg is
+ * possible for the given order. But the page is not allocated yet, so we'll
+ * need a further commit step to do the final arrangements.
+ *
+ * It is possible for the task to switch cgroups in this mean time, so at
+ * commit time, we can't rely on task conversion any longer. We'll then use
+ * the handle argument to return to the caller which cgroup we should commit
+ * against. We could also return the memcg directly and avoid the pointer
+ * passing, but a boolean return value gives better semantics considering
+ * the compiled-out case as well.
+ *
+ * Returning true means the allocation is possible.
+ */
+bool
+__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
+{
+ struct mem_cgroup *memcg;
+ int ret;
+
+ *_memcg = NULL;
+ memcg = try_get_mem_cgroup_from_mm(current->mm);
+
+ /*
+ * very rare case described in mem_cgroup_from_task. Unfortunately there
+ * isn't much we can do without complicating this too much, and it would
+ * be gfp-dependent anyway. Just let it go
+ */
+ if (unlikely(!memcg))
+ return true;
+
+ if (!memcg_can_account_kmem(memcg)) {
+ css_put(&memcg->css);
+ return true;
+ }
+
+ ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
+ if (!ret)
+ *_memcg = memcg;
+
+ css_put(&memcg->css);
+ return (ret == 0);
+}
+
+void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
+ int order)
+{
+ struct page_cgroup *pc;
+
+ VM_BUG_ON(mem_cgroup_is_root(memcg));
+
+ /* The page allocation failed. Revert */
+ if (!page) {
+ memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
+ return;
+ }
+
+ pc = lookup_page_cgroup(page);
+ lock_page_cgroup(pc);
+ pc->mem_cgroup = memcg;
+ SetPageCgroupUsed(pc);
+ unlock_page_cgroup(pc);
+}
+
+void __memcg_kmem_uncharge_pages(struct page *page, int order)
+{
+ struct mem_cgroup *memcg = NULL;
+ struct page_cgroup *pc;
+
+
+ pc = lookup_page_cgroup(page);
+ /*
+ * Fast unlocked return. Theoretically might have changed, have to
+ * check again after locking.
+ */
+ if (!PageCgroupUsed(pc))
+ return;
+
+ lock_page_cgroup(pc);
+ if (PageCgroupUsed(pc)) {
+ memcg = pc->mem_cgroup;
+ ClearPageCgroupUsed(pc);
+ }
+ unlock_page_cgroup(pc);
+
+ /*
+ * We trust that only if there is a memcg associated with the page, it
+ * is a valid allocation
+ */
+ if (!memcg)
+ return;
+
+ VM_BUG_ON(mem_cgroup_is_root(memcg));
+ memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
+}
+#else
+static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
@@ -3290,15 +4198,18 @@ void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
struct mem_cgroup **memcgp)
{
struct mem_cgroup *memcg = NULL;
+ unsigned int nr_pages = 1;
struct page_cgroup *pc;
enum charge_type ctype;
*memcgp = NULL;
- VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
return;
+ if (PageTransHuge(page))
+ nr_pages <<= compound_order(page);
+
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
@@ -3360,7 +4271,7 @@ void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
* charged to the res_counter since we plan on replacing the
* old one and only one page is going to be left afterwards.
*/
- __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
+ __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
}
/* remove redundant charge if migration failed*/
@@ -3484,8 +4395,6 @@ void mem_cgroup_print_bad_page(struct page *page)
}
#endif
-static DEFINE_MUTEX(set_limit_mutex);
-
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
@@ -3770,13 +4679,14 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
{
int node, zid;
+ u64 usage;
do {
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
drain_all_stock_sync(memcg);
mem_cgroup_start_move(memcg);
- for_each_node_state(node, N_HIGH_MEMORY) {
+ for_each_node_state(node, N_MEMORY) {
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
enum lru_list lru;
for_each_lru(lru) {
@@ -3790,13 +4700,20 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
cond_resched();
/*
+ * Kernel memory may not necessarily be trackable to a specific
+ * process. So they are not migrated, and therefore we can't
+ * expect their value to drop to 0 here.
+ * Having res filled up with kmem only is enough.
+ *
* This is a safety check because mem_cgroup_force_empty_list
* could have raced with mem_cgroup_replace_page_cache callers
* so the lru seemed empty but the page could have been added
* right after the check. RES_USAGE should be safe as we always
* charge before adding to the LRU.
*/
- } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0);
+ usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
+ res_counter_read_u64(&memcg->kmem, RES_USAGE);
+ } while (usage > 0);
}
/*
@@ -3940,7 +4857,8 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
char str[64];
u64 val;
- int type, name, len;
+ int name, len;
+ enum res_type type;
type = MEMFILE_TYPE(cft->private);
name = MEMFILE_ATTR(cft->private);
@@ -3961,6 +4879,9 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
else
val = res_counter_read_u64(&memcg->memsw, name);
break;
+ case _KMEM:
+ val = res_counter_read_u64(&memcg->kmem, name);
+ break;
default:
BUG();
}
@@ -3968,6 +4889,125 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
}
+
+static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
+{
+ int ret = -EINVAL;
+#ifdef CONFIG_MEMCG_KMEM
+ bool must_inc_static_branch = false;
+
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ /*
+ * For simplicity, we won't allow this to be disabled. It also can't
+ * be changed if the cgroup has children already, or if tasks had
+ * already joined.
+ *
+ * If tasks join before we set the limit, a person looking at
+ * kmem.usage_in_bytes will have no way to determine when it took
+ * place, which makes the value quite meaningless.
+ *
+ * After it first became limited, changes in the value of the limit are
+ * of course permitted.
+ *
+ * Taking the cgroup_lock is really offensive, but it is so far the only
+ * way to guarantee that no children will appear. There are plenty of
+ * other offenders, and they should all go away. Fine grained locking
+ * is probably the way to go here. When we are fully hierarchical, we
+ * can also get rid of the use_hierarchy check.
+ */
+ cgroup_lock();
+ mutex_lock(&set_limit_mutex);
+ if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
+ if (cgroup_task_count(cont) || (memcg->use_hierarchy &&
+ !list_empty(&cont->children))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = res_counter_set_limit(&memcg->kmem, val);
+ VM_BUG_ON(ret);
+
+ ret = memcg_update_cache_sizes(memcg);
+ if (ret) {
+ res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
+ goto out;
+ }
+ must_inc_static_branch = true;
+ /*
+ * kmem charges can outlive the cgroup. In the case of slab
+ * pages, for instance, a page contain objects from various
+ * processes, so it is unfeasible to migrate them away. We
+ * need to reference count the memcg because of that.
+ */
+ mem_cgroup_get(memcg);
+ } else
+ ret = res_counter_set_limit(&memcg->kmem, val);
+out:
+ mutex_unlock(&set_limit_mutex);
+ cgroup_unlock();
+
+ /*
+ * We are by now familiar with the fact that we can't inc the static
+ * branch inside cgroup_lock. See disarm functions for details. A
+ * worker here is overkill, but also wrong: After the limit is set, we
+ * must start accounting right away. Since this operation can't fail,
+ * we can safely defer it to here - no rollback will be needed.
+ *
+ * The boolean used to control this is also safe, because
+ * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be
+ * able to set it to true;
+ */
+ if (must_inc_static_branch) {
+ static_key_slow_inc(&memcg_kmem_enabled_key);
+ /*
+ * setting the active bit after the inc will guarantee no one
+ * starts accounting before all call sites are patched
+ */
+ memcg_kmem_set_active(memcg);
+ }
+
+#endif
+ return ret;
+}
+
+static int memcg_propagate_kmem(struct mem_cgroup *memcg)
+{
+ int ret = 0;
+ struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+ if (!parent)
+ goto out;
+
+ memcg->kmem_account_flags = parent->kmem_account_flags;
+#ifdef CONFIG_MEMCG_KMEM
+ /*
+ * When that happen, we need to disable the static branch only on those
+ * memcgs that enabled it. To achieve this, we would be forced to
+ * complicate the code by keeping track of which memcgs were the ones
+ * that actually enabled limits, and which ones got it from its
+ * parents.
+ *
+ * It is a lot simpler just to do static_key_slow_inc() on every child
+ * that is accounted.
+ */
+ if (!memcg_kmem_is_active(memcg))
+ goto out;
+
+ /*
+ * destroy(), called if we fail, will issue static_key_slow_inc() and
+ * mem_cgroup_put() if kmem is enabled. We have to either call them
+ * unconditionally, or clear the KMEM_ACTIVE flag. I personally find
+ * this more consistent, since it always leads to the same destroy path
+ */
+ mem_cgroup_get(memcg);
+ static_key_slow_inc(&memcg_kmem_enabled_key);
+
+ mutex_lock(&set_limit_mutex);
+ ret = memcg_update_cache_sizes(memcg);
+ mutex_unlock(&set_limit_mutex);
+#endif
+out:
+ return ret;
+}
+
/*
* The user of this function is...
* RES_LIMIT.
@@ -3976,7 +5016,8 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
const char *buffer)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- int type, name;
+ enum res_type type;
+ int name;
unsigned long long val;
int ret;
@@ -3998,8 +5039,12 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
break;
if (type == _MEM)
ret = mem_cgroup_resize_limit(memcg, val);
- else
+ else if (type == _MEMSWAP)
ret = mem_cgroup_resize_memsw_limit(memcg, val);
+ else if (type == _KMEM)
+ ret = memcg_update_kmem_limit(cont, val);
+ else
+ return -EINVAL;
break;
case RES_SOFT_LIMIT:
ret = res_counter_memparse_write_strategy(buffer, &val);
@@ -4052,7 +5097,8 @@ out:
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- int type, name;
+ int name;
+ enum res_type type;
type = MEMFILE_TYPE(event);
name = MEMFILE_ATTR(event);
@@ -4064,14 +5110,22 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
case RES_MAX_USAGE:
if (type == _MEM)
res_counter_reset_max(&memcg->res);
- else
+ else if (type == _MEMSWAP)
res_counter_reset_max(&memcg->memsw);
+ else if (type == _KMEM)
+ res_counter_reset_max(&memcg->kmem);
+ else
+ return -EINVAL;
break;
case RES_FAILCNT:
if (type == _MEM)
res_counter_reset_failcnt(&memcg->res);
- else
+ else if (type == _MEMSWAP)
res_counter_reset_failcnt(&memcg->memsw);
+ else if (type == _KMEM)
+ res_counter_reset_failcnt(&memcg->kmem);
+ else
+ return -EINVAL;
break;
}
@@ -4122,7 +5176,7 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
seq_printf(m, "total=%lu", total_nr);
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
seq_printf(m, " N%d=%lu", nid, node_nr);
}
@@ -4130,7 +5184,7 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
seq_printf(m, "file=%lu", file_nr);
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
LRU_ALL_FILE);
seq_printf(m, " N%d=%lu", nid, node_nr);
@@ -4139,7 +5193,7 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
seq_printf(m, "anon=%lu", anon_nr);
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
LRU_ALL_ANON);
seq_printf(m, " N%d=%lu", nid, node_nr);
@@ -4148,7 +5202,7 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
seq_printf(m, "unevictable=%lu", unevictable_nr);
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
BIT(LRU_UNEVICTABLE));
seq_printf(m, " N%d=%lu", nid, node_nr);
@@ -4388,7 +5442,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
- int type = MEMFILE_TYPE(cft->private);
+ enum res_type type = MEMFILE_TYPE(cft->private);
u64 threshold, usage;
int i, size, ret;
@@ -4471,7 +5525,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
- int type = MEMFILE_TYPE(cft->private);
+ enum res_type type = MEMFILE_TYPE(cft->private);
u64 usage;
int i, j, size;
@@ -4549,7 +5603,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_eventfd_list *event;
- int type = MEMFILE_TYPE(cft->private);
+ enum res_type type = MEMFILE_TYPE(cft->private);
BUG_ON(type != _OOM_TYPE);
event = kmalloc(sizeof(*event), GFP_KERNEL);
@@ -4574,7 +5628,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup_eventfd_list *ev, *tmp;
- int type = MEMFILE_TYPE(cft->private);
+ enum res_type type = MEMFILE_TYPE(cft->private);
BUG_ON(type != _OOM_TYPE);
@@ -4633,12 +5687,33 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
#ifdef CONFIG_MEMCG_KMEM
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
+ int ret;
+
+ memcg->kmemcg_id = -1;
+ ret = memcg_propagate_kmem(memcg);
+ if (ret)
+ return ret;
+
return mem_cgroup_sockets_init(memcg, ss);
};
static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
{
mem_cgroup_sockets_destroy(memcg);
+
+ memcg_kmem_mark_dead(memcg);
+
+ if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
+ return;
+
+ /*
+ * Charges already down to 0, undo mem_cgroup_get() done in the charge
+ * path here, being careful not to race with memcg_uncharge_kmem: it is
+ * possible that the charges went down to 0 between mark_dead and the
+ * res_counter read, so in that case, we don't need the put
+ */
+ if (memcg_kmem_test_and_clear_dead(memcg))
+ mem_cgroup_put(memcg);
}
#else
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
@@ -4747,6 +5822,37 @@ static struct cftype mem_cgroup_files[] = {
.read = mem_cgroup_read,
},
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ {
+ .name = "kmem.limit_in_bytes",
+ .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
+ .write_string = mem_cgroup_write,
+ .read = mem_cgroup_read,
+ },
+ {
+ .name = "kmem.usage_in_bytes",
+ .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+ .read = mem_cgroup_read,
+ },
+ {
+ .name = "kmem.failcnt",
+ .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
+ .trigger = mem_cgroup_reset,
+ .read = mem_cgroup_read,
+ },
+ {
+ .name = "kmem.max_usage_in_bytes",
+ .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
+ .trigger = mem_cgroup_reset,
+ .read = mem_cgroup_read,
+ },
+#ifdef CONFIG_SLABINFO
+ {
+ .name = "kmem.slabinfo",
+ .read_seq_string = mem_cgroup_slabinfo_read,
+ },
+#endif
+#endif
{ }, /* terminate */
};
@@ -4814,16 +5920,29 @@ out_free:
}
/*
- * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
- * but in process context. The work_freeing structure is overlaid
- * on the rcu_freeing structure, which itself is overlaid on memsw.
+ * At destroying mem_cgroup, references from swap_cgroup can remain.
+ * (scanning all at force_empty is too costly...)
+ *
+ * Instead of clearing all references at force_empty, we remember
+ * the number of reference from swap_cgroup and free mem_cgroup when
+ * it goes down to 0.
+ *
+ * Removal of cgroup itself succeeds regardless of refs from swap.
*/
-static void free_work(struct work_struct *work)
+
+static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
- struct mem_cgroup *memcg;
+ int node;
int size = sizeof(struct mem_cgroup);
- memcg = container_of(work, struct mem_cgroup, work_freeing);
+ mem_cgroup_remove_from_trees(memcg);
+ free_css_id(&mem_cgroup_subsys, &memcg->css);
+
+ for_each_node(node)
+ free_mem_cgroup_per_zone_info(memcg, node);
+
+ free_percpu(memcg->stat);
+
/*
* We need to make sure that (at least for now), the jump label
* destruction code runs outside of the cgroup lock. This is because
@@ -4835,45 +5954,34 @@ static void free_work(struct work_struct *work)
* to move this code around, and make sure it is outside
* the cgroup_lock.
*/
- disarm_sock_keys(memcg);
+ disarm_static_keys(memcg);
if (size < PAGE_SIZE)
kfree(memcg);
else
vfree(memcg);
}
-static void free_rcu(struct rcu_head *rcu_head)
-{
- struct mem_cgroup *memcg;
-
- memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
- INIT_WORK(&memcg->work_freeing, free_work);
- schedule_work(&memcg->work_freeing);
-}
/*
- * At destroying mem_cgroup, references from swap_cgroup can remain.
- * (scanning all at force_empty is too costly...)
- *
- * Instead of clearing all references at force_empty, we remember
- * the number of reference from swap_cgroup and free mem_cgroup when
- * it goes down to 0.
- *
- * Removal of cgroup itself succeeds regardless of refs from swap.
+ * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
+ * but in process context. The work_freeing structure is overlaid
+ * on the rcu_freeing structure, which itself is overlaid on memsw.
*/
-
-static void __mem_cgroup_free(struct mem_cgroup *memcg)
+static void free_work(struct work_struct *work)
{
- int node;
+ struct mem_cgroup *memcg;
- mem_cgroup_remove_from_trees(memcg);
- free_css_id(&mem_cgroup_subsys, &memcg->css);
+ memcg = container_of(work, struct mem_cgroup, work_freeing);
+ __mem_cgroup_free(memcg);
+}
- for_each_node(node)
- free_mem_cgroup_per_zone_info(memcg, node);
+static void free_rcu(struct rcu_head *rcu_head)
+{
+ struct mem_cgroup *memcg;
- free_percpu(memcg->stat);
- call_rcu(&memcg->rcu_freeing, free_rcu);
+ memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
+ INIT_WORK(&memcg->work_freeing, free_work);
+ schedule_work(&memcg->work_freeing);
}
static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -4885,7 +5993,7 @@ static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
{
if (atomic_sub_and_test(count, &memcg->refcnt)) {
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
- __mem_cgroup_free(memcg);
+ call_rcu(&memcg->rcu_freeing, free_rcu);
if (parent)
mem_cgroup_put(parent);
}
@@ -4982,7 +6090,6 @@ mem_cgroup_css_alloc(struct cgroup *cont)
&per_cpu(memcg_stock, cpu);
INIT_WORK(&stock->work, drain_local_stock);
}
- hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
} else {
parent = mem_cgroup_from_cont(cont->parent);
memcg->use_hierarchy = parent->use_hierarchy;
@@ -4992,6 +6099,8 @@ mem_cgroup_css_alloc(struct cgroup *cont)
if (parent && parent->use_hierarchy) {
res_counter_init(&memcg->res, &parent->res);
res_counter_init(&memcg->memsw, &parent->memsw);
+ res_counter_init(&memcg->kmem, &parent->kmem);
+
/*
* We increment refcnt of the parent to ensure that we can
* safely access it on res_counter_charge/uncharge.
@@ -5002,6 +6111,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
} else {
res_counter_init(&memcg->res, NULL);
res_counter_init(&memcg->memsw, NULL);
+ res_counter_init(&memcg->kmem, NULL);
/*
* Deeper hierachy with use_hierarchy == false doesn't make
* much sense so let cgroup subsystem know about this
@@ -5041,6 +6151,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
mem_cgroup_reparent_charges(memcg);
+ mem_cgroup_destroy_all_caches(memcg);
}
static void mem_cgroup_css_free(struct cgroup *cont)
@@ -5644,6 +6755,19 @@ struct cgroup_subsys mem_cgroup_subsys = {
.use_id = 1,
};
+/*
+ * The rest of init is performed during ->css_alloc() for root css which
+ * happens before initcalls. hotcpu_notifier() can't be done together as
+ * it would introduce circular locking by adding cgroup_lock -> cpu hotplug
+ * dependency. Do it from a subsys_initcall().
+ */
+static int __init mem_cgroup_init(void)
+{
+ hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+ return 0;
+}
+subsys_initcall(mem_cgroup_init);
+
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 108c52fa60f..c6e4dd3e1c0 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -402,7 +402,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
struct anon_vma *av;
pgoff_t pgoff;
- av = page_lock_anon_vma(page);
+ av = page_lock_anon_vma_read(page);
if (av == NULL) /* Not actually mapped anymore */
return;
@@ -423,7 +423,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
}
}
read_unlock(&tasklist_lock);
- page_unlock_anon_vma(av);
+ page_unlock_anon_vma_read(av);
}
/*
@@ -1566,7 +1566,8 @@ int soft_offline_page(struct page *page, int flags)
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
- false, MIGRATE_SYNC);
+ false, MIGRATE_SYNC,
+ MR_MEMORY_FAILURE);
if (ret) {
putback_lru_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
diff --git a/mm/memory.c b/mm/memory.c
index 76537738563..e0a9b0ce4f1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -57,6 +57,8 @@
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
+#include <linux/migrate.h>
+#include <linux/string.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
@@ -717,20 +719,6 @@ static inline bool is_cow_mapping(vm_flags_t flags)
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
-#ifndef is_zero_pfn
-static inline int is_zero_pfn(unsigned long pfn)
-{
- return pfn == zero_pfn;
-}
-#endif
-
-#ifndef my_zero_pfn
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
- return zero_pfn;
-}
-#endif
-
/*
* vm_normal_page -- This function gets the "struct page" associated with a pte.
*
@@ -1250,7 +1238,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
BUG();
}
#endif
- split_huge_page_pmd(vma->vm_mm, pmd);
+ split_huge_page_pmd(vma, addr, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next;
/* fall through */
@@ -1517,9 +1505,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
goto out;
}
+ if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ goto no_page_table;
if (pmd_trans_huge(*pmd)) {
if (flags & FOLL_SPLIT) {
- split_huge_page_pmd(mm, pmd);
+ split_huge_page_pmd(vma, address, pmd);
goto split_fallthrough;
}
spin_lock(&mm->page_table_lock);
@@ -1546,6 +1536,8 @@ split_fallthrough:
pte = *ptep;
if (!pte_present(pte))
goto no_page;
+ if ((flags & FOLL_NUMA) && pte_numa(pte))
+ goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
@@ -1697,6 +1689,19 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+
+ /*
+ * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
+ * would be called on PROT_NONE ranges. We must never invoke
+ * handle_mm_fault on PROT_NONE ranges or the NUMA hinting
+ * page faults would unprotect the PROT_NONE ranges if
+ * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
+ * bitflag. So to avoid that, don't set FOLL_NUMA if
+ * FOLL_FORCE is set.
+ */
+ if (!(gup_flags & FOLL_FORCE))
+ gup_flags |= FOLL_NUMA;
+
i = 0;
do {
@@ -2794,13 +2799,8 @@ unlock:
oom_free_new:
page_cache_release(new_page);
oom:
- if (old_page) {
- if (page_mkwrite) {
- unlock_page(old_page);
- page_cache_release(old_page);
- }
+ if (old_page)
page_cache_release(old_page);
- }
return VM_FAULT_OOM;
unwritable_page:
@@ -3431,6 +3431,170 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
+int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, int current_nid)
+{
+ get_page(page);
+
+ count_vm_numa_event(NUMA_HINT_FAULTS);
+ if (current_nid == numa_node_id())
+ count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+
+ return mpol_misplaced(page, vma, addr);
+}
+
+int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
+{
+ struct page *page = NULL;
+ spinlock_t *ptl;
+ int current_nid = -1;
+ int target_nid;
+ bool migrated = false;
+
+ /*
+ * The "pte" at this point cannot be used safely without
+ * validation through pte_unmap_same(). It's of NUMA type but
+ * the pfn may be screwed if the read is non atomic.
+ *
+ * ptep_modify_prot_start is not called as this is clearing
+ * the _PAGE_NUMA bit and it is not really expected that there
+ * would be concurrent hardware modifications to the PTE.
+ */
+ ptl = pte_lockptr(mm, pmd);
+ spin_lock(ptl);
+ if (unlikely(!pte_same(*ptep, pte))) {
+ pte_unmap_unlock(ptep, ptl);
+ goto out;
+ }
+
+ pte = pte_mknonnuma(pte);
+ set_pte_at(mm, addr, ptep, pte);
+ update_mmu_cache(vma, addr, ptep);
+
+ page = vm_normal_page(vma, addr, pte);
+ if (!page) {
+ pte_unmap_unlock(ptep, ptl);
+ return 0;
+ }
+
+ current_nid = page_to_nid(page);
+ target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+ pte_unmap_unlock(ptep, ptl);
+ if (target_nid == -1) {
+ /*
+ * Account for the fault against the current node if it not
+ * being replaced regardless of where the page is located.
+ */
+ current_nid = numa_node_id();
+ put_page(page);
+ goto out;
+ }
+
+ /* Migrate to the requested node */
+ migrated = migrate_misplaced_page(page, target_nid);
+ if (migrated)
+ current_nid = target_nid;
+
+out:
+ if (current_nid != -1)
+ task_numa_fault(current_nid, 1, migrated);
+ return 0;
+}
+
+/* NUMA hinting page fault entry point for regular pmds */
+#ifdef CONFIG_NUMA_BALANCING
+static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ pmd_t pmd;
+ pte_t *pte, *orig_pte;
+ unsigned long _addr = addr & PMD_MASK;
+ unsigned long offset;
+ spinlock_t *ptl;
+ bool numa = false;
+ int local_nid = numa_node_id();
+
+ spin_lock(&mm->page_table_lock);
+ pmd = *pmdp;
+ if (pmd_numa(pmd)) {
+ set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
+ numa = true;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ if (!numa)
+ return 0;
+
+ /* we're in a page fault so some vma must be in the range */
+ BUG_ON(!vma);
+ BUG_ON(vma->vm_start >= _addr + PMD_SIZE);
+ offset = max(_addr, vma->vm_start) & ~PMD_MASK;
+ VM_BUG_ON(offset >= PMD_SIZE);
+ orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl);
+ pte += offset >> PAGE_SHIFT;
+ for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
+ pte_t pteval = *pte;
+ struct page *page;
+ int curr_nid = local_nid;
+ int target_nid;
+ bool migrated;
+ if (!pte_present(pteval))
+ continue;
+ if (!pte_numa(pteval))
+ continue;
+ if (addr >= vma->vm_end) {
+ vma = find_vma(mm, addr);
+ /* there's a pte present so there must be a vma */
+ BUG_ON(!vma);
+ BUG_ON(addr < vma->vm_start);
+ }
+ if (pte_numa(pteval)) {
+ pteval = pte_mknonnuma(pteval);
+ set_pte_at(mm, addr, pte, pteval);
+ }
+ page = vm_normal_page(vma, addr, pteval);
+ if (unlikely(!page))
+ continue;
+ /* only check non-shared pages */
+ if (unlikely(page_mapcount(page) != 1))
+ continue;
+
+ /*
+ * Note that the NUMA fault is later accounted to either
+ * the node that is currently running or where the page is
+ * migrated to.
+ */
+ curr_nid = local_nid;
+ target_nid = numa_migrate_prep(page, vma, addr,
+ page_to_nid(page));
+ if (target_nid == -1) {
+ put_page(page);
+ continue;
+ }
+
+ /* Migrate to the requested node */
+ pte_unmap_unlock(pte, ptl);
+ migrated = migrate_misplaced_page(page, target_nid);
+ if (migrated)
+ curr_nid = target_nid;
+ task_numa_fault(curr_nid, 1, migrated);
+
+ pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+ }
+ pte_unmap_unlock(orig_pte, ptl);
+
+ return 0;
+}
+#else
+static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ BUG();
+ return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/*
* These routines also need to handle stuff like marking pages dirty
* and/or accessed for architectures that don't do it in hardware (most
@@ -3469,6 +3633,9 @@ int handle_pte_fault(struct mm_struct *mm,
pte, pmd, flags, entry);
}
+ if (pte_numa(entry))
+ return do_numa_page(mm, vma, address, entry, pte, pmd);
+
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
@@ -3539,8 +3706,11 @@ retry:
if (pmd_trans_huge(orig_pmd)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE;
- if (dirty && !pmd_write(orig_pmd) &&
- !pmd_trans_splitting(orig_pmd)) {
+ if (pmd_numa(orig_pmd))
+ return do_huge_pmd_numa_page(mm, vma, address,
+ orig_pmd, pmd);
+
+ if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
/*
@@ -3555,16 +3725,21 @@ retry:
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
}
+
return 0;
}
}
+ if (pmd_numa(*pmd))
+ return do_pmd_numa_page(mm, vma, address, pmd);
+
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
+ if (unlikely(pmd_none(*pmd)) &&
+ unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
@@ -3944,15 +4119,12 @@ void print_vma_addr(char *prefix, unsigned long ip)
struct file *f = vma->vm_file;
char *buf = (char *)__get_free_page(GFP_KERNEL);
if (buf) {
- char *p, *s;
+ char *p;
p = d_path(&f->f_path, buf, PAGE_SIZE);
if (IS_ERR(p))
p = "?";
- s = strrchr(p, '/');
- if (s)
- p = s+1;
- printk("%s%s[%lx+%lx]", prefix, p,
+ printk("%s%s[%lx+%lx]", prefix, kbasename(p),
vma->vm_start,
vma->vm_end - vma->vm_start);
free_page((unsigned long)buf);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index de9cb14ae75..d04ed87bfac 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -106,6 +106,7 @@ static void get_page_bootmem(unsigned long info, struct page *page,
void __ref put_page_bootmem(struct page *page)
{
unsigned long type;
+ static DEFINE_MUTEX(ppb_lock);
type = (unsigned long) page->lru.next;
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
@@ -115,7 +116,14 @@ void __ref put_page_bootmem(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
+
+ /*
+ * Please refer to comment for __free_pages_bootmem()
+ * for why we serialize here.
+ */
+ mutex_lock(&ppb_lock);
__free_pages_bootmem(page, 0);
+ mutex_unlock(&ppb_lock);
}
}
@@ -581,11 +589,22 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
return 0;
}
+#ifdef CONFIG_MOVABLE_NODE
+/*
+ * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
+ * normal memory.
+ */
+static bool can_online_high_movable(struct zone *zone)
+{
+ return true;
+}
+#else /* CONFIG_MOVABLE_NODE */
/* ensure every online node has NORMAL memory */
static bool can_online_high_movable(struct zone *zone)
{
return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
}
+#endif /* CONFIG_MOVABLE_NODE */
/* check which state of node_states will be changed when online memory */
static void node_states_check_changes_online(unsigned long nr_pages,
@@ -595,13 +614,15 @@ static void node_states_check_changes_online(unsigned long nr_pages,
enum zone_type zone_last = ZONE_NORMAL;
/*
- * If we have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
- * which have 0...ZONE_NORMAL, set zone_last to ZONE_NORMAL.
+ * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
+ * contains nodes which have zones of 0...ZONE_NORMAL,
+ * set zone_last to ZONE_NORMAL.
*
- * If we don't have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
- * which have 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
+ * If we don't have HIGHMEM nor movable node,
+ * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
+ * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
*/
- if (N_HIGH_MEMORY == N_NORMAL_MEMORY)
+ if (N_MEMORY == N_NORMAL_MEMORY)
zone_last = ZONE_MOVABLE;
/*
@@ -615,12 +636,34 @@ static void node_states_check_changes_online(unsigned long nr_pages,
else
arg->status_change_nid_normal = -1;
+#ifdef CONFIG_HIGHMEM
+ /*
+ * If we have movable node, node_states[N_HIGH_MEMORY]
+ * contains nodes which have zones of 0...ZONE_HIGHMEM,
+ * set zone_last to ZONE_HIGHMEM.
+ *
+ * If we don't have movable node, node_states[N_NORMAL_MEMORY]
+ * contains nodes which have zones of 0...ZONE_MOVABLE,
+ * set zone_last to ZONE_MOVABLE.
+ */
+ zone_last = ZONE_HIGHMEM;
+ if (N_MEMORY == N_HIGH_MEMORY)
+ zone_last = ZONE_MOVABLE;
+
+ if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
+ arg->status_change_nid_high = nid;
+ else
+ arg->status_change_nid_high = -1;
+#else
+ arg->status_change_nid_high = arg->status_change_nid_normal;
+#endif
+
/*
* if the node don't have memory befor online, we will need to
- * set the node to node_states[N_HIGH_MEMORY] after the memory
+ * set the node to node_states[N_MEMORY] after the memory
* is online.
*/
- if (!node_state(nid, N_HIGH_MEMORY))
+ if (!node_state(nid, N_MEMORY))
arg->status_change_nid = nid;
else
arg->status_change_nid = -1;
@@ -631,7 +674,10 @@ static void node_states_set_node(int node, struct memory_notify *arg)
if (arg->status_change_nid_normal >= 0)
node_set_state(node, N_NORMAL_MEMORY);
- node_set_state(node, N_HIGH_MEMORY);
+ if (arg->status_change_nid_high >= 0)
+ node_set_state(node, N_HIGH_MEMORY);
+
+ node_set_state(node, N_MEMORY);
}
@@ -713,6 +759,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
return ret;
}
+ zone->managed_pages += onlined_pages;
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
if (onlined_pages) {
@@ -1011,7 +1058,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* migrate_pages returns # of failed pages.
*/
ret = migrate_pages(&source, alloc_migrate_target, 0,
- true, MIGRATE_SYNC);
+ true, MIGRATE_SYNC,
+ MR_MEMORY_HOTPLUG);
if (ret)
putback_lru_pages(&source);
}
@@ -1066,6 +1114,16 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
return offlined;
}
+#ifdef CONFIG_MOVABLE_NODE
+/*
+ * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
+ * normal memory.
+ */
+static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
+{
+ return true;
+}
+#else /* CONFIG_MOVABLE_NODE */
/* ensure the node has NORMAL memory if it is still online */
static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
{
@@ -1089,6 +1147,7 @@ static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
*/
return present_pages == 0;
}
+#endif /* CONFIG_MOVABLE_NODE */
/* check which state of node_states will be changed when offline memory */
static void node_states_check_changes_offline(unsigned long nr_pages,
@@ -1099,13 +1158,15 @@ static void node_states_check_changes_offline(unsigned long nr_pages,
enum zone_type zt, zone_last = ZONE_NORMAL;
/*
- * If we have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
- * which have 0...ZONE_NORMAL, set zone_last to ZONE_NORMAL.
+ * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
+ * contains nodes which have zones of 0...ZONE_NORMAL,
+ * set zone_last to ZONE_NORMAL.
*
- * If we don't have HIGHMEM, node_states[N_NORMAL_MEMORY] contains nodes
- * which have 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
+ * If we don't have HIGHMEM nor movable node,
+ * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
+ * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
*/
- if (N_HIGH_MEMORY == N_NORMAL_MEMORY)
+ if (N_MEMORY == N_NORMAL_MEMORY)
zone_last = ZONE_MOVABLE;
/*
@@ -1122,6 +1183,30 @@ static void node_states_check_changes_offline(unsigned long nr_pages,
else
arg->status_change_nid_normal = -1;
+#ifdef CONFIG_HIGHMEM
+ /*
+ * If we have movable node, node_states[N_HIGH_MEMORY]
+ * contains nodes which have zones of 0...ZONE_HIGHMEM,
+ * set zone_last to ZONE_HIGHMEM.
+ *
+ * If we don't have movable node, node_states[N_NORMAL_MEMORY]
+ * contains nodes which have zones of 0...ZONE_MOVABLE,
+ * set zone_last to ZONE_MOVABLE.
+ */
+ zone_last = ZONE_HIGHMEM;
+ if (N_MEMORY == N_HIGH_MEMORY)
+ zone_last = ZONE_MOVABLE;
+
+ for (; zt <= zone_last; zt++)
+ present_pages += pgdat->node_zones[zt].present_pages;
+ if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
+ arg->status_change_nid_high = zone_to_nid(zone);
+ else
+ arg->status_change_nid_high = -1;
+#else
+ arg->status_change_nid_high = arg->status_change_nid_normal;
+#endif
+
/*
* node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
*/
@@ -1146,9 +1231,13 @@ static void node_states_clear_node(int node, struct memory_notify *arg)
if (arg->status_change_nid_normal >= 0)
node_clear_state(node, N_NORMAL_MEMORY);
- if ((N_HIGH_MEMORY != N_NORMAL_MEMORY) &&
- (arg->status_change_nid >= 0))
+ if ((N_MEMORY != N_NORMAL_MEMORY) &&
+ (arg->status_change_nid_high >= 0))
node_clear_state(node, N_HIGH_MEMORY);
+
+ if ((N_MEMORY != N_HIGH_MEMORY) &&
+ (arg->status_change_nid >= 0))
+ node_clear_state(node, N_MEMORY);
}
static int __ref __offline_pages(unsigned long start_pfn,
@@ -1230,10 +1319,10 @@ repeat:
goto repeat;
}
}
- /* drain all zone's lru pagevec, this is asyncronous... */
+ /* drain all zone's lru pagevec, this is asynchronous... */
lru_add_drain_all();
yield();
- /* drain pcp pages , this is synchrouns. */
+ /* drain pcp pages, this is synchronous. */
drain_all_pages();
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
@@ -1242,12 +1331,13 @@ repeat:
goto failed_removal;
}
printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
- /* Ok, all of our target is islaoted.
+ /* Ok, all of our target is isolated.
We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn);
/* reset pagetype flags and makes migrate type to be MOVABLE */
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
/* removal success */
+ zone->managed_pages -= offlined_pages;
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
totalram_pages -= offlined_pages;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 05b28361a39..e2df1c1fb41 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -90,6 +90,7 @@
#include <linux/syscalls.h>
#include <linux/ctype.h>
#include <linux/mm_inline.h>
+#include <linux/mmu_notifier.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -117,6 +118,26 @@ static struct mempolicy default_policy = {
.flags = MPOL_F_LOCAL,
};
+static struct mempolicy preferred_node_policy[MAX_NUMNODES];
+
+static struct mempolicy *get_task_policy(struct task_struct *p)
+{
+ struct mempolicy *pol = p->mempolicy;
+ int node;
+
+ if (!pol) {
+ node = numa_node_id();
+ if (node != -1)
+ pol = &preferred_node_policy[node];
+
+ /* preferred_node_policy is not initialised early in boot */
+ if (!pol->mode)
+ pol = NULL;
+ }
+
+ return pol;
+}
+
static const struct mempolicy_operations {
int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
/*
@@ -212,9 +233,9 @@ static int mpol_set_nodemask(struct mempolicy *pol,
/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
if (pol == NULL)
return 0;
- /* Check N_HIGH_MEMORY */
+ /* Check N_MEMORY */
nodes_and(nsc->mask1,
- cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
+ cpuset_current_mems_allowed, node_states[N_MEMORY]);
VM_BUG_ON(!nodes);
if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
@@ -254,7 +275,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
if (mode == MPOL_DEFAULT) {
if (nodes && !nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
- return NULL; /* simply delete any existing policy */
+ return NULL;
}
VM_BUG_ON(!nodes);
@@ -269,6 +290,10 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
(flags & MPOL_F_RELATIVE_NODES)))
return ERR_PTR(-EINVAL);
}
+ } else if (mode == MPOL_LOCAL) {
+ if (!nodes_empty(*nodes))
+ return ERR_PTR(-EINVAL);
+ mode = MPOL_PREFERRED;
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
@@ -511,7 +536,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- split_huge_page_pmd(vma->vm_mm, pmd);
+ split_huge_page_pmd(vma, addr, pmd);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
if (check_pte_range(vma, pmd, addr, next, nodes,
@@ -561,6 +586,36 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
return 0;
}
+#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+/*
+ * This is used to mark a range of virtual addresses to be inaccessible.
+ * These are later cleared by a NUMA hinting fault. Depending on these
+ * faults, pages may be migrated for better NUMA placement.
+ *
+ * This is assuming that NUMA faults are handled using PROT_NONE. If
+ * an architecture makes a different choice, it will need further
+ * changes to the core.
+ */
+unsigned long change_prot_numa(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ int nr_updated;
+ BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
+
+ nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
+ if (nr_updated)
+ count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
+
+ return nr_updated;
+}
+#else
+static unsigned long change_prot_numa(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
+
/*
* Check if all pages in a range are on a set of nodes.
* If pagelist != NULL then isolate pages from the LRU and
@@ -579,22 +634,32 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
return ERR_PTR(-EFAULT);
prev = NULL;
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+ unsigned long endvma = vma->vm_end;
+
+ if (endvma > end)
+ endvma = end;
+ if (vma->vm_start > start)
+ start = vma->vm_start;
+
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
if (!vma->vm_next && vma->vm_end < end)
return ERR_PTR(-EFAULT);
if (prev && prev->vm_end < vma->vm_start)
return ERR_PTR(-EFAULT);
}
- if (!is_vm_hugetlb_page(vma) &&
- ((flags & MPOL_MF_STRICT) ||
+
+ if (is_vm_hugetlb_page(vma))
+ goto next;
+
+ if (flags & MPOL_MF_LAZY) {
+ change_prot_numa(vma, start, endvma);
+ goto next;
+ }
+
+ if ((flags & MPOL_MF_STRICT) ||
((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
- vma_migratable(vma)))) {
- unsigned long endvma = vma->vm_end;
+ vma_migratable(vma))) {
- if (endvma > end)
- endvma = end;
- if (vma->vm_start > start)
- start = vma->vm_start;
err = check_pgd_range(vma, start, endvma, nodes,
flags, private);
if (err) {
@@ -602,6 +667,7 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
break;
}
}
+next:
prev = vma;
}
return first;
@@ -961,7 +1027,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
- false, MIGRATE_SYNC);
+ false, MIGRATE_SYNC,
+ MR_SYSCALL);
if (err)
putback_lru_pages(&pagelist);
}
@@ -1133,8 +1200,7 @@ static long do_mbind(unsigned long start, unsigned long len,
int err;
LIST_HEAD(pagelist);
- if (flags & ~(unsigned long)(MPOL_MF_STRICT |
- MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ if (flags & ~(unsigned long)MPOL_MF_VALID)
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
@@ -1157,6 +1223,9 @@ static long do_mbind(unsigned long start, unsigned long len,
if (IS_ERR(new))
return PTR_ERR(new);
+ if (flags & MPOL_MF_LAZY)
+ new->flags |= MPOL_F_MOF;
+
/*
* If we are using the default policy then operation
* on discontinuous address spaces is okay after all
@@ -1193,21 +1262,24 @@ static long do_mbind(unsigned long start, unsigned long len,
vma = check_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
- err = PTR_ERR(vma);
- if (!IS_ERR(vma)) {
- int nr_failed = 0;
-
+ err = PTR_ERR(vma); /* maybe ... */
+ if (!IS_ERR(vma))
err = mbind_range(mm, start, end, new);
+ if (!err) {
+ int nr_failed = 0;
+
if (!list_empty(&pagelist)) {
+ WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_vma_page,
(unsigned long)vma,
- false, MIGRATE_SYNC);
+ false, MIGRATE_SYNC,
+ MR_MEMPOLICY_MBIND);
if (nr_failed)
putback_lru_pages(&pagelist);
}
- if (!err && nr_failed && (flags & MPOL_MF_STRICT))
+ if (nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
} else
putback_lru_pages(&pagelist);
@@ -1388,7 +1460,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
goto out_put;
}
- if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
+ if (!nodes_subset(*new, node_states[N_MEMORY])) {
err = -EINVAL;
goto out_put;
}
@@ -1546,7 +1618,7 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
struct mempolicy *get_vma_policy(struct task_struct *task,
struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = task->mempolicy;
+ struct mempolicy *pol = get_task_policy(task);
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) {
@@ -1956,7 +2028,7 @@ retry_cpuset:
*/
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{
- struct mempolicy *pol = current->mempolicy;
+ struct mempolicy *pol = get_task_policy(current);
struct page *page;
unsigned int cpuset_mems_cookie;
@@ -2060,7 +2132,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
*/
/* lookup first element intersecting start-end */
-/* Caller holds sp->mutex */
+/* Caller holds sp->lock */
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
@@ -2124,13 +2196,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
if (!sp->root.rb_node)
return NULL;
- mutex_lock(&sp->mutex);
+ spin_lock(&sp->lock);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
- mutex_unlock(&sp->mutex);
+ spin_unlock(&sp->lock);
return pol;
}
@@ -2140,6 +2212,115 @@ static void sp_free(struct sp_node *n)
kmem_cache_free(sn_cache, n);
}
+/**
+ * mpol_misplaced - check whether current page node is valid in policy
+ *
+ * @page - page to be checked
+ * @vma - vm area where page mapped
+ * @addr - virtual address where page mapped
+ *
+ * Lookup current policy node id for vma,addr and "compare to" page's
+ * node id.
+ *
+ * Returns:
+ * -1 - not misplaced, page is in the right node
+ * node - node id where the page should be
+ *
+ * Policy determination "mimics" alloc_page_vma().
+ * Called from fault path where we know the vma and faulting address.
+ */
+int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
+{
+ struct mempolicy *pol;
+ struct zone *zone;
+ int curnid = page_to_nid(page);
+ unsigned long pgoff;
+ int polnid = -1;
+ int ret = -1;
+
+ BUG_ON(!vma);
+
+ pol = get_vma_policy(current, vma, addr);
+ if (!(pol->flags & MPOL_F_MOF))
+ goto out;
+
+ switch (pol->mode) {
+ case MPOL_INTERLEAVE:
+ BUG_ON(addr >= vma->vm_end);
+ BUG_ON(addr < vma->vm_start);
+
+ pgoff = vma->vm_pgoff;
+ pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
+ polnid = offset_il_node(pol, vma, pgoff);
+ break;
+
+ case MPOL_PREFERRED:
+ if (pol->flags & MPOL_F_LOCAL)
+ polnid = numa_node_id();
+ else
+ polnid = pol->v.preferred_node;
+ break;
+
+ case MPOL_BIND:
+ /*
+ * allows binding to multiple nodes.
+ * use current page if in policy nodemask,
+ * else select nearest allowed node, if any.
+ * If no allowed nodes, use current [!misplaced].
+ */
+ if (node_isset(curnid, pol->v.nodes))
+ goto out;
+ (void)first_zones_zonelist(
+ node_zonelist(numa_node_id(), GFP_HIGHUSER),
+ gfp_zone(GFP_HIGHUSER),
+ &pol->v.nodes, &zone);
+ polnid = zone->node;
+ break;
+
+ default:
+ BUG();
+ }
+
+ /* Migrate the page towards the node whose CPU is referencing it */
+ if (pol->flags & MPOL_F_MORON) {
+ int last_nid;
+
+ polnid = numa_node_id();
+
+ /*
+ * Multi-stage node selection is used in conjunction
+ * with a periodic migration fault to build a temporal
+ * task<->page relation. By using a two-stage filter we
+ * remove short/unlikely relations.
+ *
+ * Using P(p) ~ n_p / n_t as per frequentist
+ * probability, we can equate a task's usage of a
+ * particular page (n_p) per total usage of this
+ * page (n_t) (in a given time-span) to a probability.
+ *
+ * Our periodic faults will sample this probability and
+ * getting the same result twice in a row, given these
+ * samples are fully independent, is then given by
+ * P(n)^2, provided our sample period is sufficiently
+ * short compared to the usage pattern.
+ *
+ * This quadric squishes small probabilities, making
+ * it less likely we act on an unlikely task<->page
+ * relation.
+ */
+ last_nid = page_xchg_last_nid(page, polnid);
+ if (last_nid != polnid)
+ goto out;
+ }
+
+ if (curnid != polnid)
+ ret = polnid;
+out:
+ mpol_cond_put(pol);
+
+ return ret;
+}
+
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
@@ -2147,6 +2328,14 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
sp_free(n);
}
+static void sp_node_init(struct sp_node *node, unsigned long start,
+ unsigned long end, struct mempolicy *pol)
+{
+ node->start = start;
+ node->end = end;
+ node->policy = pol;
+}
+
static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
struct mempolicy *pol)
{
@@ -2163,10 +2352,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
return NULL;
}
newpol->flags |= MPOL_F_SHARED;
-
- n->start = start;
- n->end = end;
- n->policy = newpol;
+ sp_node_init(n, start, end, newpol);
return n;
}
@@ -2176,9 +2362,12 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new)
{
struct sp_node *n;
+ struct sp_node *n_new = NULL;
+ struct mempolicy *mpol_new = NULL;
int ret = 0;
- mutex_lock(&sp->mutex);
+restart:
+ spin_lock(&sp->lock);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
@@ -2191,14 +2380,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
} else {
/* Old policy spanning whole new range. */
if (n->end > end) {
- struct sp_node *new2;
- new2 = sp_alloc(end, n->end, n->policy);
- if (!new2) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!n_new)
+ goto alloc_new;
+
+ *mpol_new = *n->policy;
+ atomic_set(&mpol_new->refcnt, 1);
+ sp_node_init(n_new, n->end, end, mpol_new);
+ sp_insert(sp, n_new);
n->end = start;
- sp_insert(sp, new2);
+ n_new = NULL;
+ mpol_new = NULL;
break;
} else
n->end = start;
@@ -2209,9 +2400,27 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
}
if (new)
sp_insert(sp, new);
-out:
- mutex_unlock(&sp->mutex);
+ spin_unlock(&sp->lock);
+ ret = 0;
+
+err_out:
+ if (mpol_new)
+ mpol_put(mpol_new);
+ if (n_new)
+ kmem_cache_free(sn_cache, n_new);
+
return ret;
+
+alloc_new:
+ spin_unlock(&sp->lock);
+ ret = -ENOMEM;
+ n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+ if (!n_new)
+ goto err_out;
+ mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
+ if (!mpol_new)
+ goto err_out;
+ goto restart;
}
/**
@@ -2229,7 +2438,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */
- mutex_init(&sp->mutex);
+ spin_lock_init(&sp->lock);
if (mpol) {
struct vm_area_struct pvma;
@@ -2295,16 +2504,60 @@ void mpol_free_shared_policy(struct shared_policy *p)
if (!p->root.rb_node)
return;
- mutex_lock(&p->mutex);
+ spin_lock(&p->lock);
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
sp_delete(p, n);
}
- mutex_unlock(&p->mutex);
+ spin_unlock(&p->lock);
}
+#ifdef CONFIG_NUMA_BALANCING
+static bool __initdata numabalancing_override;
+
+static void __init check_numabalancing_enable(void)
+{
+ bool numabalancing_default = false;
+
+ if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
+ numabalancing_default = true;
+
+ if (nr_node_ids > 1 && !numabalancing_override) {
+ printk(KERN_INFO "Enabling automatic NUMA balancing. "
+ "Configure with numa_balancing= or sysctl");
+ set_numabalancing_state(numabalancing_default);
+ }
+}
+
+static int __init setup_numabalancing(char *str)
+{
+ int ret = 0;
+ if (!str)
+ goto out;
+ numabalancing_override = true;
+
+ if (!strcmp(str, "enable")) {
+ set_numabalancing_state(true);
+ ret = 1;
+ } else if (!strcmp(str, "disable")) {
+ set_numabalancing_state(false);
+ ret = 1;
+ }
+out:
+ if (!ret)
+ printk(KERN_WARNING "Unable to parse numa_balancing=\n");
+
+ return ret;
+}
+__setup("numa_balancing=", setup_numabalancing);
+#else
+static inline void __init check_numabalancing_enable(void)
+{
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/* assumes fs == KERNEL_DS */
void __init numa_policy_init(void)
{
@@ -2320,13 +2573,22 @@ void __init numa_policy_init(void)
sizeof(struct sp_node),
0, SLAB_PANIC, NULL);
+ for_each_node(nid) {
+ preferred_node_policy[nid] = (struct mempolicy) {
+ .refcnt = ATOMIC_INIT(1),
+ .mode = MPOL_PREFERRED,
+ .flags = MPOL_F_MOF | MPOL_F_MORON,
+ .v = { .preferred_node = nid, },
+ };
+ }
+
/*
* Set interleaving policy for system init. Interleaving is only
* enabled across suitably sized nodes (default is >= 16MB), or
* fall back to the largest node if they're all smaller.
*/
nodes_clear(interleave_nodes);
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
unsigned long total_pages = node_present_pages(nid);
/* Preserve the largest node */
@@ -2346,6 +2608,8 @@ void __init numa_policy_init(void)
if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
printk("numa_policy_init: interleaving failed\n");
+
+ check_numabalancing_enable();
}
/* Reset policy of current process to default */
@@ -2359,44 +2623,34 @@ void numa_default_policy(void)
*/
/*
- * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
- * Used only for mpol_parse_str() and mpol_to_str()
+ * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
*/
-#define MPOL_LOCAL MPOL_MAX
static const char * const policy_modes[] =
{
[MPOL_DEFAULT] = "default",
[MPOL_PREFERRED] = "prefer",
[MPOL_BIND] = "bind",
[MPOL_INTERLEAVE] = "interleave",
- [MPOL_LOCAL] = "local"
+ [MPOL_LOCAL] = "local",
};
#ifdef CONFIG_TMPFS
/**
- * mpol_parse_str - parse string to mempolicy
+ * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
* @str: string containing mempolicy to parse
* @mpol: pointer to struct mempolicy pointer, returned on success.
- * @no_context: flag whether to "contextualize" the mempolicy
*
* Format of input:
* <mode>[=<flags>][:<nodelist>]
*
- * if @no_context is true, save the input nodemask in w.user_nodemask in
- * the returned mempolicy. This will be used to "clone" the mempolicy in
- * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
- * mount option. Note that if 'static' or 'relative' mode flags were
- * specified, the input nodemask will already have been saved. Saving
- * it again is redundant, but safe.
- *
* On success, returns 0, else 1
*/
-int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
+int mpol_parse_str(char *str, struct mempolicy **mpol)
{
struct mempolicy *new = NULL;
unsigned short mode;
- unsigned short uninitialized_var(mode_flags);
+ unsigned short mode_flags;
nodemask_t nodes;
char *nodelist = strchr(str, ':');
char *flags = strchr(str, '=');
@@ -2407,7 +2661,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
*nodelist++ = '\0';
if (nodelist_parse(nodelist, nodes))
goto out;
- if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
+ if (!nodes_subset(nodes, node_states[N_MEMORY]))
goto out;
} else
nodes_clear(nodes);
@@ -2415,12 +2669,12 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
if (flags)
*flags++ = '\0'; /* terminate mode string */
- for (mode = 0; mode <= MPOL_LOCAL; mode++) {
+ for (mode = 0; mode < MPOL_MAX; mode++) {
if (!strcmp(str, policy_modes[mode])) {
break;
}
}
- if (mode > MPOL_LOCAL)
+ if (mode >= MPOL_MAX)
goto out;
switch (mode) {
@@ -2441,7 +2695,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
* Default to online nodes with memory if no nodelist
*/
if (!nodelist)
- nodes = node_states[N_HIGH_MEMORY];
+ nodes = node_states[N_MEMORY];
break;
case MPOL_LOCAL:
/*
@@ -2484,24 +2738,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
if (IS_ERR(new))
goto out;
- if (no_context) {
- /* save for contextualization */
- new->w.user_nodemask = nodes;
- } else {
- int ret;
- NODEMASK_SCRATCH(scratch);
- if (scratch) {
- task_lock(current);
- ret = mpol_set_nodemask(new, &nodes, scratch);
- task_unlock(current);
- } else
- ret = -ENOMEM;
- NODEMASK_SCRATCH_FREE(scratch);
- if (ret) {
- mpol_put(new);
- goto out;
- }
- }
+ /*
+ * Save nodes for mpol_to_str() to show the tmpfs mount options
+ * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
+ */
+ if (mode != MPOL_PREFERRED)
+ new->v.nodes = nodes;
+ else if (nodelist)
+ new->v.preferred_node = first_node(nodes);
+ else
+ new->flags |= MPOL_F_LOCAL;
+
+ /*
+ * Save nodes for contextualization: this will be used to "clone"
+ * the mempolicy in a specific context [cpuset] at a later time.
+ */
+ new->w.user_nodemask = nodes;
+
err = 0;
out:
@@ -2521,13 +2774,12 @@ out:
* @buffer: to contain formatted mempolicy string
* @maxlen: length of @buffer
* @pol: pointer to mempolicy to be formatted
- * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
*
* Convert a mempolicy into a string.
* Returns the number of characters in buffer (if positive)
* or an error (negative)
*/
-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
char *p = buffer;
int l;
@@ -2553,7 +2805,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
case MPOL_PREFERRED:
nodes_clear(nodes);
if (flags & MPOL_F_LOCAL)
- mode = MPOL_LOCAL; /* pseudo-policy */
+ mode = MPOL_LOCAL;
else
node_set(pol->v.preferred_node, nodes);
break;
@@ -2561,10 +2813,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
- if (no_context)
- nodes = pol->w.user_nodemask;
- else
- nodes = pol->v.nodes;
+ nodes = pol->v.nodes;
break;
default:
diff --git a/mm/migrate.c b/mm/migrate.c
index 3f675ca0827..3b676b0c5c3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -39,6 +39,9 @@
#include <asm/tlbflush.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/migrate.h>
+
#include "internal.h"
/*
@@ -293,7 +296,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
struct buffer_head *head, enum migrate_mode mode)
{
- int expected_count;
+ int expected_count = 0;
void **pslot;
if (!mapping) {
@@ -421,7 +424,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
*/
void migrate_page_copy(struct page *newpage, struct page *page)
{
- if (PageHuge(page))
+ if (PageHuge(page) || PageTransHuge(page))
copy_huge_page(newpage, page);
else
copy_highpage(newpage, page);
@@ -765,7 +768,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
*/
if (PageAnon(page)) {
/*
- * Only page_lock_anon_vma() understands the subtleties of
+ * Only page_lock_anon_vma_read() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
*/
anon_vma = page_get_anon_vma(page);
@@ -998,10 +1001,11 @@ out:
*/
int migrate_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private, bool offlining,
- enum migrate_mode mode)
+ enum migrate_mode mode, int reason)
{
int retry = 1;
int nr_failed = 0;
+ int nr_succeeded = 0;
int pass = 0;
struct page *page;
struct page *page2;
@@ -1028,6 +1032,7 @@ int migrate_pages(struct list_head *from,
retry++;
break;
case MIGRATEPAGE_SUCCESS:
+ nr_succeeded++;
break;
default:
/* Permanent failure */
@@ -1038,6 +1043,12 @@ int migrate_pages(struct list_head *from,
}
rc = nr_failed + retry;
out:
+ if (nr_succeeded)
+ count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
+ if (nr_failed)
+ count_vm_events(PGMIGRATE_FAIL, nr_failed);
+ trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
+
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
@@ -1176,7 +1187,8 @@ set_status:
err = 0;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node,
- (unsigned long)pm, 0, MIGRATE_SYNC);
+ (unsigned long)pm, 0, MIGRATE_SYNC,
+ MR_SYSCALL);
if (err)
putback_lru_pages(&pagelist);
}
@@ -1238,7 +1250,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
if (node < 0 || node >= MAX_NUMNODES)
goto out_pm;
- if (!node_state(node, N_HIGH_MEMORY))
+ if (!node_state(node, N_MEMORY))
goto out_pm;
err = -EACCES;
@@ -1440,4 +1452,317 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
}
return err;
}
-#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * Returns true if this is a safe migration target node for misplaced NUMA
+ * pages. Currently it only checks the watermarks which crude
+ */
+static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
+ int nr_migrate_pages)
+{
+ int z;
+ for (z = pgdat->nr_zones - 1; z >= 0; z--) {
+ struct zone *zone = pgdat->node_zones + z;
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone->all_unreclaimable)
+ continue;
+
+ /* Avoid waking kswapd by allocating pages_to_migrate pages. */
+ if (!zone_watermark_ok(zone, 0,
+ high_wmark_pages(zone) +
+ nr_migrate_pages,
+ 0, 0))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+static struct page *alloc_misplaced_dst_page(struct page *page,
+ unsigned long data,
+ int **result)
+{
+ int nid = (int) data;
+ struct page *newpage;
+
+ newpage = alloc_pages_exact_node(nid,
+ (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
+ __GFP_NOMEMALLOC | __GFP_NORETRY |
+ __GFP_NOWARN) &
+ ~GFP_IOFS, 0);
+ if (newpage)
+ page_xchg_last_nid(newpage, page_last_nid(page));
+
+ return newpage;
+}
+
+/*
+ * page migration rate limiting control.
+ * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
+ * window of time. Default here says do not migrate more than 1280M per second.
+ * If a node is rate-limited then PTE NUMA updates are also rate-limited. However
+ * as it is faults that reset the window, pte updates will happen unconditionally
+ * if there has not been a fault since @pteupdate_interval_millisecs after the
+ * throttle window closed.
+ */
+static unsigned int migrate_interval_millisecs __read_mostly = 100;
+static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
+static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
+
+/* Returns true if NUMA migration is currently rate limited */
+bool migrate_ratelimited(int node)
+{
+ pg_data_t *pgdat = NODE_DATA(node);
+
+ if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
+ msecs_to_jiffies(pteupdate_interval_millisecs)))
+ return false;
+
+ if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
+ return false;
+
+ return true;
+}
+
+/* Returns true if the node is migrate rate-limited after the update */
+bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
+{
+ bool rate_limited = false;
+
+ /*
+ * Rate-limit the amount of data that is being migrated to a node.
+ * Optimal placement is no good if the memory bus is saturated and
+ * all the time is being spent migrating!
+ */
+ spin_lock(&pgdat->numabalancing_migrate_lock);
+ if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
+ pgdat->numabalancing_migrate_nr_pages = 0;
+ pgdat->numabalancing_migrate_next_window = jiffies +
+ msecs_to_jiffies(migrate_interval_millisecs);
+ }
+ if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
+ rate_limited = true;
+ else
+ pgdat->numabalancing_migrate_nr_pages += nr_pages;
+ spin_unlock(&pgdat->numabalancing_migrate_lock);
+
+ return rate_limited;
+}
+
+int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+{
+ int ret = 0;
+
+ /* Avoid migrating to a node that is nearly full */
+ if (migrate_balanced_pgdat(pgdat, 1)) {
+ int page_lru;
+
+ if (isolate_lru_page(page)) {
+ put_page(page);
+ return 0;
+ }
+
+ /* Page is isolated */
+ ret = 1;
+ page_lru = page_is_file_cache(page);
+ if (!PageTransHuge(page))
+ inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru);
+ else
+ mod_zone_page_state(page_zone(page),
+ NR_ISOLATED_ANON + page_lru,
+ HPAGE_PMD_NR);
+ }
+
+ /*
+ * Page is either isolated or there is not enough space on the target
+ * node. If isolated, then it has taken a reference count and the
+ * callers reference can be safely dropped without the page
+ * disappearing underneath us during migration. Otherwise the page is
+ * not to be migrated but the callers reference should still be
+ * dropped so it does not leak.
+ */
+ put_page(page);
+
+ return ret;
+}
+
+/*
+ * Attempt to migrate a misplaced page to the specified destination
+ * node. Caller is expected to have an elevated reference count on
+ * the page that will be dropped by this function before returning.
+ */
+int migrate_misplaced_page(struct page *page, int node)
+{
+ pg_data_t *pgdat = NODE_DATA(node);
+ int isolated = 0;
+ int nr_remaining;
+ LIST_HEAD(migratepages);
+
+ /*
+ * Don't migrate pages that are mapped in multiple processes.
+ * TODO: Handle false sharing detection instead of this hammer
+ */
+ if (page_mapcount(page) != 1) {
+ put_page(page);
+ goto out;
+ }
+
+ /*
+ * Rate-limit the amount of data that is being migrated to a node.
+ * Optimal placement is no good if the memory bus is saturated and
+ * all the time is being spent migrating!
+ */
+ if (numamigrate_update_ratelimit(pgdat, 1)) {
+ put_page(page);
+ goto out;
+ }
+
+ isolated = numamigrate_isolate_page(pgdat, page);
+ if (!isolated)
+ goto out;
+
+ list_add(&page->lru, &migratepages);
+ nr_remaining = migrate_pages(&migratepages,
+ alloc_misplaced_dst_page,
+ node, false, MIGRATE_ASYNC,
+ MR_NUMA_MISPLACED);
+ if (nr_remaining) {
+ putback_lru_pages(&migratepages);
+ isolated = 0;
+ } else
+ count_vm_numa_event(NUMA_PAGE_MIGRATE);
+ BUG_ON(!list_empty(&migratepages));
+out:
+ return isolated;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ pmd_t *pmd, pmd_t entry,
+ unsigned long address,
+ struct page *page, int node)
+{
+ unsigned long haddr = address & HPAGE_PMD_MASK;
+ pg_data_t *pgdat = NODE_DATA(node);
+ int isolated = 0;
+ struct page *new_page = NULL;
+ struct mem_cgroup *memcg = NULL;
+ int page_lru = page_is_file_cache(page);
+
+ /*
+ * Don't migrate pages that are mapped in multiple processes.
+ * TODO: Handle false sharing detection instead of this hammer
+ */
+ if (page_mapcount(page) != 1)
+ goto out_dropref;
+
+ /*
+ * Rate-limit the amount of data that is being migrated to a node.
+ * Optimal placement is no good if the memory bus is saturated and
+ * all the time is being spent migrating!
+ */
+ if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
+ goto out_dropref;
+
+ new_page = alloc_pages_node(node,
+ (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
+ if (!new_page) {
+ count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
+ goto out_dropref;
+ }
+ page_xchg_last_nid(new_page, page_last_nid(page));
+
+ isolated = numamigrate_isolate_page(pgdat, page);
+ if (!isolated) {
+ count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
+ put_page(new_page);
+ goto out_keep_locked;
+ }
+
+ /* Prepare a page as a migration target */
+ __set_page_locked(new_page);
+ SetPageSwapBacked(new_page);
+
+ /* anon mapping, we can simply copy page->mapping to the new page: */
+ new_page->mapping = page->mapping;
+ new_page->index = page->index;
+ migrate_page_copy(new_page, page);
+ WARN_ON(PageLRU(new_page));
+
+ /* Recheck the target PMD */
+ spin_lock(&mm->page_table_lock);
+ if (unlikely(!pmd_same(*pmd, entry))) {
+ spin_unlock(&mm->page_table_lock);
+
+ /* Reverse changes made by migrate_page_copy() */
+ if (TestClearPageActive(new_page))
+ SetPageActive(page);
+ if (TestClearPageUnevictable(new_page))
+ SetPageUnevictable(page);
+ mlock_migrate_page(page, new_page);
+
+ unlock_page(new_page);
+ put_page(new_page); /* Free it */
+
+ unlock_page(page);
+ putback_lru_page(page);
+
+ count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
+ goto out;
+ }
+
+ /*
+ * Traditional migration needs to prepare the memcg charge
+ * transaction early to prevent the old page from being
+ * uncharged when installing migration entries. Here we can
+ * save the potential rollback and start the charge transfer
+ * only when migration is already known to end successfully.
+ */
+ mem_cgroup_prepare_migration(page, new_page, &memcg);
+
+ entry = mk_pmd(new_page, vma->vm_page_prot);
+ entry = pmd_mknonnuma(entry);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ entry = pmd_mkhuge(entry);
+
+ page_add_new_anon_rmap(new_page, vma, haddr);
+
+ set_pmd_at(mm, haddr, pmd, entry);
+ update_mmu_cache_pmd(vma, address, &entry);
+ page_remove_rmap(page);
+ /*
+ * Finish the charge transaction under the page table lock to
+ * prevent split_huge_page() from dividing up the charge
+ * before it's fully transferred to the new page.
+ */
+ mem_cgroup_end_migration(memcg, page, new_page, true);
+ spin_unlock(&mm->page_table_lock);
+
+ unlock_page(new_page);
+ unlock_page(page);
+ put_page(page); /* Drop the rmap reference */
+ put_page(page); /* Drop the LRU isolation reference */
+
+ count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
+ count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
+
+out:
+ mod_zone_page_state(page_zone(page),
+ NR_ISOLATED_ANON + page_lru,
+ -HPAGE_PMD_NR);
+ return isolated;
+
+out_dropref:
+ put_page(page);
+out_keep_locked:
+ return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_NUMA */
diff --git a/mm/mmap.c b/mm/mmap.c
index f940062c8d4..f54b235f29a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -736,7 +736,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (anon_vma) {
VM_BUG_ON(adjust_next && next->anon_vma &&
anon_vma != next->anon_vma);
- anon_vma_lock(anon_vma);
+ anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_pre_update_vma(next);
@@ -1488,7 +1488,11 @@ munmap_back:
*
* Answer: Yes, several device drivers can do it in their
* f_op->mmap method. -DaveM
+ * Bug: If addr is changed, prev, rb_link, rb_parent should
+ * be updated for vma_link()
*/
+ WARN_ON_ONCE(addr != vma->vm_start);
+
addr = vma->vm_start;
pgoff = vma->vm_pgoff;
vm_flags = vma->vm_flags;
@@ -2065,6 +2069,18 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
+ /*
+ * vma_gap_update() doesn't support concurrent
+ * updates, but we only hold a shared mmap_sem
+ * lock here, so we need to protect against
+ * concurrent vma expansions.
+ * vma_lock_anon_vma() doesn't help here, as
+ * we don't guarantee that all growable vmas
+ * in a mm share the same root anon vma.
+ * So, we reuse mm->page_table_lock to guard
+ * against concurrent vma expansions.
+ */
+ spin_lock(&vma->vm_mm->page_table_lock);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
anon_vma_interval_tree_post_update_vma(vma);
@@ -2072,6 +2088,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
vma_gap_update(vma->vm_next);
else
vma->vm_mm->highest_vm_end = address;
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
perf_event_mmap(vma);
}
}
@@ -2122,11 +2140,25 @@ int expand_downwards(struct vm_area_struct *vma,
if (grow <= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
+ /*
+ * vma_gap_update() doesn't support concurrent
+ * updates, but we only hold a shared mmap_sem
+ * lock here, so we need to protect against
+ * concurrent vma expansions.
+ * vma_lock_anon_vma() doesn't help here, as
+ * we don't guarantee that all growable vmas
+ * in a mm share the same root anon vma.
+ * So, we reuse mm->page_table_lock to guard
+ * against concurrent vma expansions.
+ */
+ spin_lock(&vma->vm_mm->page_table_lock);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_start = address;
vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
perf_event_mmap(vma);
}
}
@@ -2854,15 +2886,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
- mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
+ down_write(&anon_vma->root->rwsem);
/*
* We can safely modify head.next after taking the
- * anon_vma->root->mutex. If some other vma in this mm shares
+ * anon_vma->root->rwsem. If some other vma in this mm shares
* the same anon_vma we won't take it again.
*
* No need of atomic instructions here, head.next
* can't change from under us thanks to the
- * anon_vma->root->mutex.
+ * anon_vma->root->rwsem.
*/
if (__test_and_set_bit(0, (unsigned long *)
&anon_vma->root->rb_root.rb_node))
@@ -2964,7 +2996,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
*
* No need of atomic instructions here, head.next
* can't change from under us until we release the
- * anon_vma->root->mutex.
+ * anon_vma->root->rwsem.
*/
if (!__test_and_clear_bit(0, (unsigned long *)
&anon_vma->root->rb_root.rb_node))
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a40992610ab..94722a4d6b4 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -35,12 +35,16 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
}
#endif
-static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
+static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable)
+ int dirty_accountable, int prot_numa, bool *ret_all_same_node)
{
+ struct mm_struct *mm = vma->vm_mm;
pte_t *pte, oldpte;
spinlock_t *ptl;
+ unsigned long pages = 0;
+ bool all_same_node = true;
+ int last_nid = -1;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
@@ -48,17 +52,43 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
oldpte = *pte;
if (pte_present(oldpte)) {
pte_t ptent;
+ bool updated = false;
ptent = ptep_modify_prot_start(mm, addr, pte);
- ptent = pte_modify(ptent, newprot);
+ if (!prot_numa) {
+ ptent = pte_modify(ptent, newprot);
+ updated = true;
+ } else {
+ struct page *page;
+
+ page = vm_normal_page(vma, addr, oldpte);
+ if (page) {
+ int this_nid = page_to_nid(page);
+ if (last_nid == -1)
+ last_nid = this_nid;
+ if (last_nid != this_nid)
+ all_same_node = false;
+
+ /* only check non-shared pages */
+ if (!pte_numa(oldpte) &&
+ page_mapcount(page) == 1) {
+ ptent = pte_mknuma(ptent);
+ updated = true;
+ }
+ }
+ }
/*
* Avoid taking write faults for pages we know to be
* dirty.
*/
- if (dirty_accountable && pte_dirty(ptent))
+ if (dirty_accountable && pte_dirty(ptent)) {
ptent = pte_mkwrite(ptent);
+ updated = true;
+ }
+ if (updated)
+ pages++;
ptep_modify_prot_commit(mm, addr, pte, ptent);
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
@@ -72,61 +102,101 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
set_pte_at(mm, addr, pte,
swp_entry_to_pte(entry));
}
+ pages++;
}
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
+
+ *ret_all_same_node = all_same_node;
+ return pages;
}
-static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
- unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable)
+#ifdef CONFIG_NUMA_BALANCING
+static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmd)
+{
+ spin_lock(&mm->page_table_lock);
+ set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
+ spin_unlock(&mm->page_table_lock);
+}
+#else
+static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmd)
+{
+ BUG();
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr, unsigned long end,
+ pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pmd_t *pmd;
unsigned long next;
+ unsigned long pages = 0;
+ bool all_same_node;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
- split_huge_page_pmd(vma->vm_mm, pmd);
- else if (change_huge_pmd(vma, pmd, addr, newprot))
+ split_huge_page_pmd(vma, addr, pmd);
+ else if (change_huge_pmd(vma, pmd, addr, newprot,
+ prot_numa)) {
+ pages += HPAGE_PMD_NR;
continue;
+ }
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
continue;
- change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
- dirty_accountable);
+ pages += change_pte_range(vma, pmd, addr, next, newprot,
+ dirty_accountable, prot_numa, &all_same_node);
+
+ /*
+ * If we are changing protections for NUMA hinting faults then
+ * set pmd_numa if the examined pages were all on the same
+ * node. This allows a regular PMD to be handled as one fault
+ * and effectively batches the taking of the PTL
+ */
+ if (prot_numa && all_same_node)
+ change_pmd_protnuma(vma->vm_mm, addr, pmd);
} while (pmd++, addr = next, addr != end);
+
+ return pages;
}
-static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
- unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable)
+static inline unsigned long change_pud_range(struct vm_area_struct *vma,
+ pgd_t *pgd, unsigned long addr, unsigned long end,
+ pgprot_t newprot, int dirty_accountable, int prot_numa)
{
pud_t *pud;
unsigned long next;
+ unsigned long pages = 0;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- change_pmd_range(vma, pud, addr, next, newprot,
- dirty_accountable);
+ pages += change_pmd_range(vma, pud, addr, next, newprot,
+ dirty_accountable, prot_numa);
} while (pud++, addr = next, addr != end);
+
+ return pages;
}
-static void change_protection(struct vm_area_struct *vma,
+static unsigned long change_protection_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable)
+ int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
unsigned long next;
unsigned long start = addr;
+ unsigned long pages = 0;
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
@@ -135,10 +205,32 @@ static void change_protection(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- change_pud_range(vma, pgd, addr, next, newprot,
- dirty_accountable);
+ pages += change_pud_range(vma, pgd, addr, next, newprot,
+ dirty_accountable, prot_numa);
} while (pgd++, addr = next, addr != end);
- flush_tlb_range(vma, start, end);
+
+ /* Only flush the TLB if we actually modified any entries: */
+ if (pages)
+ flush_tlb_range(vma, start, end);
+
+ return pages;
+}
+
+unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgprot_t newprot,
+ int dirty_accountable, int prot_numa)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long pages;
+
+ mmu_notifier_invalidate_range_start(mm, start, end);
+ if (is_vm_hugetlb_page(vma))
+ pages = hugetlb_change_protection(vma, start, end, newprot);
+ else
+ pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
+ mmu_notifier_invalidate_range_end(mm, start, end);
+
+ return pages;
}
int
@@ -213,12 +305,9 @@ success:
dirty_accountable = 1;
}
- mmu_notifier_invalidate_range_start(mm, start, end);
- if (is_vm_hugetlb_page(vma))
- hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
- else
- change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
- mmu_notifier_invalidate_range_end(mm, start, end);
+ change_protection(vma, start, end, vma->vm_page_prot,
+ dirty_accountable, 0);
+
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
perf_event_mmap(vma);
@@ -274,8 +363,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
error = -EINVAL;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out;
- }
- else {
+ } else {
if (vma->vm_start > start)
goto out;
if (unlikely(grows & PROT_GROWSUP)) {
@@ -291,9 +379,10 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
for (nstart = start ; ; ) {
unsigned long newflags;
- /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
+ /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
- newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
+ newflags = vm_flags;
+ newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
/* newflags >> 4 shift VM_MAY% in place of VM_% */
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
diff --git a/mm/mremap.c b/mm/mremap.c
index 1b61c2d3307..e1031e1f6a6 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -104,7 +104,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
}
if (vma->anon_vma) {
anon_vma = vma->anon_vma;
- anon_vma_lock(anon_vma);
+ anon_vma_lock_write(anon_vma);
}
}
@@ -182,7 +182,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
need_flush = true;
continue;
} else if (!err) {
- split_huge_page_pmd(vma->vm_mm, old_pmd);
+ split_huge_page_pmd(vma, old_addr, old_pmd);
}
VM_BUG_ON(pmd_trans_huge(*old_pmd));
}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index bd82f6b3141..b8294fc03df 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -137,6 +137,22 @@ unsigned long __init free_low_memory_core_early(int nodeid)
return count;
}
+static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+{
+ struct zone *z;
+
+ /*
+ * In free_area_init_core(), highmem zone's managed_pages is set to
+ * present_pages, and bootmem allocator doesn't allocate from highmem
+ * zones. So there's no need to recalculate managed_pages because all
+ * highmem pages will be managed by the buddy system. Here highmem
+ * zone also includes highmem movable zone.
+ */
+ for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+ if (!is_highmem(z))
+ z->managed_pages = 0;
+}
+
/**
* free_all_bootmem_node - release a node's free pages to the buddy allocator
* @pgdat: node to be released
@@ -146,6 +162,7 @@ unsigned long __init free_low_memory_core_early(int nodeid)
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
+ reset_node_lowmem_managed_pages(pgdat);
/* free_low_memory_core_early(MAX_NUMNODES) will be called later */
return 0;
@@ -158,6 +175,11 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
*/
unsigned long __init free_all_bootmem(void)
{
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ reset_node_lowmem_managed_pages(pgdat);
+
/*
* We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
* because in some case like Node0 doesn't have RAM installed
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 18f1ae2b45d..0399f146ae4 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -215,7 +215,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
* the page allocator means a mempolicy is in effect. Cpuset policy
* is enforced in get_page_from_freelist().
*/
- if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
+ if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) {
*totalpages = total_swap_pages;
for_each_node_mask(nid, *nodemask)
*totalpages += node_spanned_pages(nid);
@@ -591,43 +591,6 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
spin_unlock(&zone_scan_lock);
}
-/*
- * Try to acquire the oom killer lock for all system zones. Returns zero if a
- * parallel oom killing is taking place, otherwise locks all zones and returns
- * non-zero.
- */
-static int try_set_system_oom(void)
-{
- struct zone *zone;
- int ret = 1;
-
- spin_lock(&zone_scan_lock);
- for_each_populated_zone(zone)
- if (zone_is_oom_locked(zone)) {
- ret = 0;
- goto out;
- }
- for_each_populated_zone(zone)
- zone_set_flag(zone, ZONE_OOM_LOCKED);
-out:
- spin_unlock(&zone_scan_lock);
- return ret;
-}
-
-/*
- * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
- * attempts or page faults may now recall the oom killer, if necessary.
- */
-static void clear_system_oom(void)
-{
- struct zone *zone;
-
- spin_lock(&zone_scan_lock);
- for_each_populated_zone(zone)
- zone_clear_flag(zone, ZONE_OOM_LOCKED);
- spin_unlock(&zone_scan_lock);
-}
-
/**
* out_of_memory - kill the "best" process when we run out of memory
* @zonelist: zonelist pointer
@@ -708,15 +671,16 @@ out:
/*
* The pagefault handler calls here because it is out of memory, so kill a
- * memory-hogging task. If a populated zone has ZONE_OOM_LOCKED set, a parallel
- * oom killing is already in progress so do nothing. If a task is found with
- * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
+ * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a
+ * parallel oom killing is already in progress so do nothing.
*/
void pagefault_out_of_memory(void)
{
- if (try_set_system_oom()) {
+ struct zonelist *zonelist = node_zonelist(first_online_node,
+ GFP_KERNEL);
+
+ if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
out_of_memory(NULL, 0, 0, NULL, false);
- clear_system_oom();
+ clear_zonelist_oom(zonelist, GFP_KERNEL);
}
- schedule_timeout_killable(1);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6f427122449..0713bfbf095 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
}
/*
+ * Unreclaimable memory (kernel memory or anonymous memory
+ * without swap) can bring down the dirtyable pages below
+ * the zone's dirty balance reserve and the above calculation
+ * will underflow. However we still want to add in nodes
+ * which are below threshold (negative values) to get a more
+ * accurate calculation but make sure that the total never
+ * underflows.
+ */
+ if ((long)x < 0)
+ x = 0;
+
+ /*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
- dirty_balance_reserve;
+ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+ x -= min(x, dirty_balance_reserve);
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
* highmem zone can hold its share of dirty pages, so we don't
* care about vm_highmem_is_dirtyable here.
*/
- return zone_page_state(zone, NR_FREE_PAGES) +
- zone_reclaimable_pages(zone) -
- zone->dirty_balance_reserve;
+ unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
+ zone_reclaimable_pages(zone);
+
+ /* don't allow this to underflow */
+ nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+ return nr_pages;
}
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5a8d339d282..4ba5e37127f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -90,6 +90,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
#ifdef CONFIG_HIGHMEM
[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
+#ifdef CONFIG_MOVABLE_NODE
+ [N_MEMORY] = { { [0] = 1UL } },
+#endif
[N_CPU] = { { [0] = 1UL } },
#endif /* NUMA */
};
@@ -368,8 +371,7 @@ static int destroy_compound_page(struct page *page, unsigned long order)
int nr_pages = 1 << order;
int bad = 0;
- if (unlikely(compound_order(page) != order) ||
- unlikely(!PageHead(page))) {
+ if (unlikely(compound_order(page) != order)) {
bad_page(page);
bad++;
}
@@ -523,7 +525,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* If a block is freed, and its buddy is also free, then this
* triggers coalescing into a block of larger size.
*
- * -- wli
+ * -- nyc
*/
static inline void __free_one_page(struct page *page,
@@ -608,6 +610,7 @@ static inline int free_pages_check(struct page *page)
bad_page(page);
return 1;
}
+ reset_page_last_nid(page);
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return 0;
@@ -732,6 +735,13 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
+/*
+ * Read access to zone->managed_pages is safe because it's unsigned long,
+ * but we still need to serialize writers. Currently all callers of
+ * __free_pages_bootmem() except put_page_bootmem() should only be used
+ * at boot time. So for shorter boot time, we shift the burden to
+ * put_page_bootmem() to serialize writers.
+ */
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
@@ -747,6 +757,7 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
set_page_count(p, 0);
}
+ page_zone(page)->managed_pages += 1 << order;
set_page_refcounted(page);
__free_pages(page, order);
}
@@ -782,7 +793,7 @@ void __init init_cma_reserved_pageblock(struct page *page)
* large block of memory acted on by a series of small allocations.
* This behavior is a critical factor in sglist merging's success.
*
- * -- wli
+ * -- nyc
*/
static inline void expand(struct zone *zone, struct page *page,
int low, int high, struct free_area *area,
@@ -1695,7 +1706,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
*
* If the zonelist cache is present in the passed in zonelist, then
* returns a pointer to the allowed node mask (either the current
- * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
+ * tasks mems_allowed, or node_states[N_MEMORY].)
*
* If the zonelist cache is not available for this zonelist, does
* nothing and returns NULL.
@@ -1724,7 +1735,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
&cpuset_current_mems_allowed :
- &node_states[N_HIGH_MEMORY];
+ &node_states[N_MEMORY];
return allowednodes;
}
@@ -2601,6 +2612,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
int migratetype = allocflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
+ struct mem_cgroup *memcg = NULL;
gfp_mask &= gfp_allowed_mask;
@@ -2619,6 +2631,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
+ /*
+ * Will only have any effect when __GFP_KMEMCG is set. This is
+ * verified in the (always inline) callee
+ */
+ if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
+ return NULL;
+
retry_cpuset:
cpuset_mems_cookie = get_mems_allowed();
@@ -2654,6 +2673,8 @@ out:
if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
goto retry_cpuset;
+ memcg_kmem_commit_charge(page, memcg, order);
+
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2706,6 +2727,31 @@ void free_pages(unsigned long addr, unsigned int order)
EXPORT_SYMBOL(free_pages);
+/*
+ * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
+ * pages allocated with __GFP_KMEMCG.
+ *
+ * Those pages are accounted to a particular memcg, embedded in the
+ * corresponding page_cgroup. To avoid adding a hit in the allocator to search
+ * for that information only to find out that it is NULL for users who have no
+ * interest in that whatsoever, we provide these functions.
+ *
+ * The caller knows better which flags it relies on.
+ */
+void __free_memcg_kmem_pages(struct page *page, unsigned int order)
+{
+ memcg_kmem_uncharge_pages(page, order);
+ __free_pages(page, order);
+}
+
+void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
+{
+ if (addr != 0) {
+ VM_BUG_ON(!virt_addr_valid((void *)addr));
+ __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
+ }
+}
+
static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
{
if (addr) {
@@ -2981,6 +3027,7 @@ void show_free_areas(unsigned int filter)
" isolated(anon):%lukB"
" isolated(file):%lukB"
" present:%lukB"
+ " managed:%lukB"
" mlocked:%lukB"
" dirty:%lukB"
" writeback:%lukB"
@@ -3010,6 +3057,7 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_ISOLATED_ANON)),
K(zone_page_state(zone, NR_ISOLATED_FILE)),
K(zone->present_pages),
+ K(zone->managed_pages),
K(zone_page_state(zone, NR_MLOCK)),
K(zone_page_state(zone, NR_FILE_DIRTY)),
K(zone_page_state(zone, NR_WRITEBACK)),
@@ -3238,7 +3286,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
return node;
}
- for_each_node_state(n, N_HIGH_MEMORY) {
+ for_each_node_state(n, N_MEMORY) {
/* Don't want a node to appear more than once */
if (node_isset(n, *used_node_mask))
@@ -3380,7 +3428,7 @@ static int default_zonelist_order(void)
* local memory, NODE_ORDER may be suitable.
*/
average_size = total_size /
- (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
+ (nodes_weight(node_states[N_MEMORY]) + 1);
for_each_online_node(nid) {
low_kmem_size = 0;
total_size = 0;
@@ -3870,6 +3918,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
reset_page_mapcount(page);
+ reset_page_last_nid(page);
SetPageReserved(page);
/*
* Mark the block movable so that blocks are reserved for
@@ -4476,6 +4525,26 @@ void __init set_pageblock_order(void)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
+ unsigned long present_pages)
+{
+ unsigned long pages = spanned_pages;
+
+ /*
+ * Provide a more accurate estimation if there are holes within
+ * the zone and SPARSEMEM is in use. If there are holes within the
+ * zone, each populated memory region may cost us one or two extra
+ * memmap pages due to alignment because memmap pages for each
+ * populated regions may not naturally algined on page boundary.
+ * So the (present_pages >> 4) heuristic is a tradeoff for that.
+ */
+ if (spanned_pages > present_pages + (present_pages >> 4) &&
+ IS_ENABLED(CONFIG_SPARSEMEM))
+ pages = present_pages;
+
+ return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
+}
+
/*
* Set up the zone data structures:
* - mark all pages reserved
@@ -4493,54 +4562,67 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
int ret;
pgdat_resize_init(pgdat);
+#ifdef CONFIG_NUMA_BALANCING
+ spin_lock_init(&pgdat->numabalancing_migrate_lock);
+ pgdat->numabalancing_migrate_nr_pages = 0;
+ pgdat->numabalancing_migrate_next_window = jiffies;
+#endif
init_waitqueue_head(&pgdat->kswapd_wait);
init_waitqueue_head(&pgdat->pfmemalloc_wait);
pgdat_page_cgroup_init(pgdat);
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
- unsigned long size, realsize, memmap_pages;
+ unsigned long size, realsize, freesize, memmap_pages;
size = zone_spanned_pages_in_node(nid, j, zones_size);
- realsize = size - zone_absent_pages_in_node(nid, j,
+ realsize = freesize = size - zone_absent_pages_in_node(nid, j,
zholes_size);
/*
- * Adjust realsize so that it accounts for how much memory
+ * Adjust freesize so that it accounts for how much memory
* is used by this zone for memmap. This affects the watermark
* and per-cpu initialisations
*/
- memmap_pages =
- PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
- if (realsize >= memmap_pages) {
- realsize -= memmap_pages;
+ memmap_pages = calc_memmap_size(size, realsize);
+ if (freesize >= memmap_pages) {
+ freesize -= memmap_pages;
if (memmap_pages)
printk(KERN_DEBUG
" %s zone: %lu pages used for memmap\n",
zone_names[j], memmap_pages);
} else
printk(KERN_WARNING
- " %s zone: %lu pages exceeds realsize %lu\n",
- zone_names[j], memmap_pages, realsize);
+ " %s zone: %lu pages exceeds freesize %lu\n",
+ zone_names[j], memmap_pages, freesize);
/* Account for reserved pages */
- if (j == 0 && realsize > dma_reserve) {
- realsize -= dma_reserve;
+ if (j == 0 && freesize > dma_reserve) {
+ freesize -= dma_reserve;
printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
zone_names[0], dma_reserve);
}
if (!is_highmem_idx(j))
- nr_kernel_pages += realsize;
- nr_all_pages += realsize;
+ nr_kernel_pages += freesize;
+ /* Charge for highmem memmap if there are enough kernel pages */
+ else if (nr_kernel_pages > memmap_pages * 2)
+ nr_kernel_pages -= memmap_pages;
+ nr_all_pages += freesize;
zone->spanned_pages = size;
- zone->present_pages = realsize;
+ zone->present_pages = freesize;
+ /*
+ * Set an approximate value for lowmem here, it will be adjusted
+ * when the bootmem allocator frees pages into the buddy system.
+ * And all highmem pages will be managed by the buddy system.
+ */
+ zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
#ifdef CONFIG_NUMA
zone->node = nid;
- zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
+ zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
/ 100;
- zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
+ zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
#endif
zone->name = zone_names[j];
spin_lock_init(&zone->lock);
@@ -4731,7 +4813,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
/*
* early_calculate_totalpages()
* Sum pages in active regions for movable zone.
- * Populate N_HIGH_MEMORY for calculating usable_nodes.
+ * Populate N_MEMORY for calculating usable_nodes.
*/
static unsigned long __init early_calculate_totalpages(void)
{
@@ -4744,7 +4826,7 @@ static unsigned long __init early_calculate_totalpages(void)
totalpages += pages;
if (pages)
- node_set_state(nid, N_HIGH_MEMORY);
+ node_set_state(nid, N_MEMORY);
}
return totalpages;
}
@@ -4761,9 +4843,9 @@ static void __init find_zone_movable_pfns_for_nodes(void)
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
/* save the state before borrow the nodemask */
- nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
+ nodemask_t saved_node_state = node_states[N_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
- int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
+ int usable_nodes = nodes_weight(node_states[N_MEMORY]);
/*
* If movablecore was specified, calculate what size of
@@ -4798,7 +4880,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
restart:
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
unsigned long start_pfn, end_pfn;
/*
@@ -4890,23 +4972,27 @@ restart:
out:
/* restore the node_state */
- node_states[N_HIGH_MEMORY] = saved_node_state;
+ node_states[N_MEMORY] = saved_node_state;
}
-/* Any regular memory on that node ? */
-static void __init check_for_regular_memory(pg_data_t *pgdat)
+/* Any regular or high memory on that node ? */
+static void check_for_memory(pg_data_t *pgdat, int nid)
{
-#ifdef CONFIG_HIGHMEM
enum zone_type zone_type;
- for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
+ if (N_MEMORY == N_NORMAL_MEMORY)
+ return;
+
+ for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
struct zone *zone = &pgdat->node_zones[zone_type];
if (zone->present_pages) {
- node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
+ node_set_state(nid, N_HIGH_MEMORY);
+ if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
+ zone_type <= ZONE_NORMAL)
+ node_set_state(nid, N_NORMAL_MEMORY);
break;
}
}
-#endif
}
/**
@@ -4989,8 +5075,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Any memory on that node */
if (pgdat->node_present_pages)
- node_set_state(nid, N_HIGH_MEMORY);
- check_for_regular_memory(pgdat);
+ node_set_state(nid, N_MEMORY);
+ check_for_memory(pgdat, nid);
}
}
@@ -5727,7 +5813,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned int tries = 0;
int ret = 0;
- migrate_prep_local();
+ migrate_prep();
while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {
@@ -5755,7 +5841,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
ret = migrate_pages(&cc->migratepages,
alloc_migrate_target,
- 0, false, MIGRATE_SYNC);
+ 0, false, MIGRATE_SYNC,
+ MR_CMA);
}
putback_movable_pages(&cc->migratepages);
@@ -5891,8 +5978,15 @@ done:
void free_contig_range(unsigned long pfn, unsigned nr_pages)
{
- for (; nr_pages--; ++pfn)
- __free_page(pfn_to_page(pfn));
+ unsigned int count = 0;
+
+ for (; nr_pages--; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ count += page_count(page) != 1;
+ __free_page(page);
+ }
+ WARN(count != 0, "%d pages are still in use!\n", count);
}
#endif
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 44db00e253e..6d757e3a872 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -274,7 +274,7 @@ void __init page_cgroup_init(void)
if (mem_cgroup_disabled())
return;
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
unsigned long start_pfn, end_pfn;
start_pfn = node_start_pfn(nid);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 6c118d012bb..35aa294656c 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -58,7 +58,7 @@ again:
if (!walk->pte_entry)
continue;
- split_huge_page_pmd(walk->mm, pmd);
+ split_huge_page_pmd_mm(walk->mm, addr, pmd);
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
goto again;
err = walk_pte_range(pmd, addr, next, walk);
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index e642627da6b..0c8323fe6c8 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -12,8 +12,8 @@
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
- * Only sets the access flags (dirty, accessed, and
- * writable). Furthermore, we know it always gets set to a "more
+ * Only sets the access flags (dirty, accessed), as well as write
+ * permission. Furthermore, we know it always gets set to a "more
* permissive" setting, which allows most architectures to optimize
* this. We return whether the PTE actually changed, which in turn
* instructs the caller to do things like update__mmu_cache. This
@@ -27,7 +27,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(*ptep, entry);
if (changed) {
set_pte_at(vma->vm_mm, address, ptep, entry);
- flush_tlb_page(vma, address);
+ flush_tlb_fix_spurious_fault(vma, address);
}
return changed;
}
@@ -88,7 +88,8 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
{
pte_t pte;
pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
- flush_tlb_page(vma, address);
+ if (pte_accessible(pte))
+ flush_tlb_page(vma, address);
return pte;
}
#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index cf7e99a87c3..2c78f8cadc9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -24,7 +24,7 @@
* mm->mmap_sem
* page->flags PG_locked (lock_page)
* mapping->i_mmap_mutex
- * anon_vma->mutex
+ * anon_vma->rwsem
* mm->page_table_lock or pte_lock
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
* swap_lock (in swap_duplicate, swap_info_get)
@@ -37,7 +37,7 @@
* in arch-dependent flush_dcache_mmap_lock,
* within bdi.wb->list_lock in __sync_single_inode)
*
- * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
+ * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
* ->tasklist_lock
* pte map lock
*/
@@ -87,24 +87,24 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
VM_BUG_ON(atomic_read(&anon_vma->refcount));
/*
- * Synchronize against page_lock_anon_vma() such that
+ * Synchronize against page_lock_anon_vma_read() such that
* we can safely hold the lock without the anon_vma getting
* freed.
*
* Relies on the full mb implied by the atomic_dec_and_test() from
* put_anon_vma() against the acquire barrier implied by
- * mutex_trylock() from page_lock_anon_vma(). This orders:
+ * down_read_trylock() from page_lock_anon_vma_read(). This orders:
*
- * page_lock_anon_vma() VS put_anon_vma()
- * mutex_trylock() atomic_dec_and_test()
+ * page_lock_anon_vma_read() VS put_anon_vma()
+ * down_read_trylock() atomic_dec_and_test()
* LOCK MB
- * atomic_read() mutex_is_locked()
+ * atomic_read() rwsem_is_locked()
*
* LOCK should suffice since the actual taking of the lock must
* happen _before_ what follows.
*/
- if (mutex_is_locked(&anon_vma->root->mutex)) {
- anon_vma_lock(anon_vma);
+ if (rwsem_is_locked(&anon_vma->root->rwsem)) {
+ anon_vma_lock_write(anon_vma);
anon_vma_unlock(anon_vma);
}
@@ -146,7 +146,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
* allocate a new one.
*
* Anon-vma allocations are very subtle, because we may have
- * optimistically looked up an anon_vma in page_lock_anon_vma()
+ * optimistically looked up an anon_vma in page_lock_anon_vma_read()
* and that may actually touch the spinlock even in the newly
* allocated vma (it depends on RCU to make sure that the
* anon_vma isn't actually destroyed).
@@ -181,7 +181,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
allocated = anon_vma;
}
- anon_vma_lock(anon_vma);
+ anon_vma_lock_write(anon_vma);
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
@@ -219,9 +219,9 @@ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct
struct anon_vma *new_root = anon_vma->root;
if (new_root != root) {
if (WARN_ON_ONCE(root))
- mutex_unlock(&root->mutex);
+ up_write(&root->rwsem);
root = new_root;
- mutex_lock(&root->mutex);
+ down_write(&root->rwsem);
}
return root;
}
@@ -229,7 +229,7 @@ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct
static inline void unlock_anon_vma_root(struct anon_vma *root)
{
if (root)
- mutex_unlock(&root->mutex);
+ up_write(&root->rwsem);
}
/*
@@ -306,7 +306,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
get_anon_vma(anon_vma->root);
/* Mark this anon_vma as the one where our new (COWed) pages go. */
vma->anon_vma = anon_vma;
- anon_vma_lock(anon_vma);
+ anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
anon_vma_unlock(anon_vma);
@@ -349,7 +349,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
/*
* Iterate the list once more, it now only contains empty and unlinked
* anon_vmas, destroy them. Could not do before due to __put_anon_vma()
- * needing to acquire the anon_vma->root->mutex.
+ * needing to write-acquire the anon_vma->root->rwsem.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
@@ -365,7 +365,7 @@ static void anon_vma_ctor(void *data)
{
struct anon_vma *anon_vma = data;
- mutex_init(&anon_vma->mutex);
+ init_rwsem(&anon_vma->rwsem);
atomic_set(&anon_vma->refcount, 0);
anon_vma->rb_root = RB_ROOT;
}
@@ -442,7 +442,7 @@ out:
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
* reference like with page_get_anon_vma() and then block on the mutex.
*/
-struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *page_lock_anon_vma_read(struct page *page)
{
struct anon_vma *anon_vma = NULL;
struct anon_vma *root_anon_vma;
@@ -457,14 +457,14 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
root_anon_vma = ACCESS_ONCE(anon_vma->root);
- if (mutex_trylock(&root_anon_vma->mutex)) {
+ if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
* If the page is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
*/
if (!page_mapped(page)) {
- mutex_unlock(&root_anon_vma->mutex);
+ up_read(&root_anon_vma->rwsem);
anon_vma = NULL;
}
goto out;
@@ -484,15 +484,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
/* we pinned the anon_vma, its safe to sleep */
rcu_read_unlock();
- anon_vma_lock(anon_vma);
+ anon_vma_lock_read(anon_vma);
if (atomic_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
* and bail -- can't simply use put_anon_vma() because
- * we'll deadlock on the anon_vma_lock() recursion.
+ * we'll deadlock on the anon_vma_lock_write() recursion.
*/
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
__put_anon_vma(anon_vma);
anon_vma = NULL;
}
@@ -504,9 +504,9 @@ out:
return anon_vma;
}
-void page_unlock_anon_vma(struct anon_vma *anon_vma)
+void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
{
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
}
/*
@@ -744,7 +744,7 @@ static int page_referenced_anon(struct page *page,
struct anon_vma_chain *avc;
int referenced = 0;
- anon_vma = page_lock_anon_vma(page);
+ anon_vma = page_lock_anon_vma_read(page);
if (!anon_vma)
return referenced;
@@ -766,7 +766,7 @@ static int page_referenced_anon(struct page *page,
break;
}
- page_unlock_anon_vma(anon_vma);
+ page_unlock_anon_vma_read(anon_vma);
return referenced;
}
@@ -1249,12 +1249,14 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
update_hiwater_rss(mm);
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
- if (PageAnon(page))
- dec_mm_counter(mm, MM_ANONPAGES);
- else
- dec_mm_counter(mm, MM_FILEPAGES);
+ if (!PageHuge(page)) {
+ if (PageAnon(page))
+ dec_mm_counter(mm, MM_ANONPAGES);
+ else
+ dec_mm_counter(mm, MM_FILEPAGES);
+ }
set_pte_at(mm, address, pte,
- swp_entry_to_pte(make_hwpoison_entry(page)));
+ swp_entry_to_pte(make_hwpoison_entry(page)));
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
@@ -1313,7 +1315,7 @@ out_mlock:
/*
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
* unstable result and race. Plus, We can't wait here because
- * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
+ * we now hold anon_vma->rwsem or mapping->i_mmap_mutex.
* if trylock failed, the page remain in evictable lru and later
* vmscan could retry to move the page to unevictable lru if the
* page is actually mlocked.
@@ -1478,7 +1480,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
- anon_vma = page_lock_anon_vma(page);
+ anon_vma = page_lock_anon_vma_read(page);
if (!anon_vma)
return ret;
@@ -1505,7 +1507,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
break;
}
- page_unlock_anon_vma(anon_vma);
+ page_unlock_anon_vma_read(anon_vma);
return ret;
}
@@ -1700,7 +1702,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
int ret = SWAP_AGAIN;
/*
- * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
+ * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
* because that depends on page_mapped(); but not all its usages
* are holding mmap_sem. Users without mmap_sem are required to
* take a reference count to prevent the anon_vma disappearing
@@ -1708,7 +1710,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
anon_vma = page_anon_vma(page);
if (!anon_vma)
return ret;
- anon_vma_lock(anon_vma);
+ anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
@@ -1716,7 +1718,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
if (ret != SWAP_AGAIN)
break;
}
- anon_vma_unlock(anon_vma);
+ anon_vma_unlock_read(anon_vma);
return ret;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 50c5b8f3a35..5dd56f6efdb 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -889,7 +889,7 @@ static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
if (!mpol || mpol->mode == MPOL_DEFAULT)
return; /* show nothing */
- mpol_to_str(buffer, sizeof(buffer), mpol, 1);
+ mpol_to_str(buffer, sizeof(buffer), mpol);
seq_printf(seq, ",mpol=%s", buffer);
}
@@ -1715,6 +1715,96 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
+/*
+ * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
+ */
+static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
+ pgoff_t index, pgoff_t end, int whence)
+{
+ struct page *page;
+ struct pagevec pvec;
+ pgoff_t indices[PAGEVEC_SIZE];
+ bool done = false;
+ int i;
+
+ pagevec_init(&pvec, 0);
+ pvec.nr = 1; /* start small: we may be there already */
+ while (!done) {
+ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ pvec.nr, pvec.pages, indices);
+ if (!pvec.nr) {
+ if (whence == SEEK_DATA)
+ index = end;
+ break;
+ }
+ for (i = 0; i < pvec.nr; i++, index++) {
+ if (index < indices[i]) {
+ if (whence == SEEK_HOLE) {
+ done = true;
+ break;
+ }
+ index = indices[i];
+ }
+ page = pvec.pages[i];
+ if (page && !radix_tree_exceptional_entry(page)) {
+ if (!PageUptodate(page))
+ page = NULL;
+ }
+ if (index >= end ||
+ (page && whence == SEEK_DATA) ||
+ (!page && whence == SEEK_HOLE)) {
+ done = true;
+ break;
+ }
+ }
+ shmem_deswap_pagevec(&pvec);
+ pagevec_release(&pvec);
+ pvec.nr = PAGEVEC_SIZE;
+ cond_resched();
+ }
+ return index;
+}
+
+static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ pgoff_t start, end;
+ loff_t new_offset;
+
+ if (whence != SEEK_DATA && whence != SEEK_HOLE)
+ return generic_file_llseek_size(file, offset, whence,
+ MAX_LFS_FILESIZE, i_size_read(inode));
+ mutex_lock(&inode->i_mutex);
+ /* We're holding i_mutex so we can access i_size directly */
+
+ if (offset < 0)
+ offset = -EINVAL;
+ else if (offset >= inode->i_size)
+ offset = -ENXIO;
+ else {
+ start = offset >> PAGE_CACHE_SHIFT;
+ end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ new_offset = shmem_seek_hole_data(mapping, start, end, whence);
+ new_offset <<= PAGE_CACHE_SHIFT;
+ if (new_offset > offset) {
+ if (new_offset < inode->i_size)
+ offset = new_offset;
+ else if (whence == SEEK_DATA)
+ offset = -ENXIO;
+ else
+ offset = inode->i_size;
+ }
+ }
+
+ if (offset >= 0 && offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+ mutex_unlock(&inode->i_mutex);
+ return offset;
+}
+
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
@@ -2373,7 +2463,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
if (!gid_valid(sbinfo->gid))
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
- if (mpol_parse_str(value, &sbinfo->mpol, 1))
+ if (mpol_parse_str(value, &sbinfo->mpol))
goto bad_val;
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
@@ -2586,7 +2676,7 @@ static const struct address_space_operations shmem_aops = {
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
- .llseek = generic_file_llseek,
+ .llseek = shmem_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = shmem_file_aio_read,
diff --git a/mm/slab.c b/mm/slab.c
index 33d3363658d..e7667a3584b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -87,7 +87,6 @@
*/
#include <linux/slab.h>
-#include "slab.h"
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/swap.h>
@@ -128,6 +127,8 @@
#include "internal.h"
+#include "slab.h"
+
/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
@@ -162,23 +163,6 @@
*/
static bool pfmemalloc_active __read_mostly;
-/* Legal flag mask for kmem_cache_create(). */
-#if DEBUG
-# define CREATE_MASK (SLAB_RED_ZONE | \
- SLAB_POISON | SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | \
- SLAB_STORE_USER | \
- SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
-#else
-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | \
- SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
-#endif
-
/*
* kmem_bufctl_t:
*
@@ -564,15 +548,11 @@ static struct cache_names __initdata cache_names[] = {
#undef CACHE
};
-static struct arraycache_init initarray_cache __initdata =
- { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
-static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache kmem_cache_boot = {
- .nodelists = kmem_cache_nodelists,
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
@@ -662,6 +642,26 @@ static void init_node_lock_keys(int q)
}
}
+static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
+{
+ struct kmem_list3 *l3;
+ l3 = cachep->nodelists[q];
+ if (!l3)
+ return;
+
+ slab_set_lock_classes(cachep, &on_slab_l3_key,
+ &on_slab_alc_key, q);
+}
+
+static inline void on_slab_lock_classes(struct kmem_cache *cachep)
+{
+ int node;
+
+ VM_BUG_ON(OFF_SLAB(cachep));
+ for_each_node(node)
+ on_slab_lock_classes_node(cachep, node);
+}
+
static inline void init_lock_keys(void)
{
int node;
@@ -678,6 +678,14 @@ static inline void init_lock_keys(void)
{
}
+static inline void on_slab_lock_classes(struct kmem_cache *cachep)
+{
+}
+
+static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
+{
+}
+
static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
{
}
@@ -1406,6 +1414,9 @@ static int __cpuinit cpuup_prepare(long cpu)
free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS)
slab_set_debugobj_lock_classes_node(cachep, node);
+ else if (!OFF_SLAB(cachep) &&
+ !(cachep->flags & SLAB_DESTROY_BY_RCU))
+ on_slab_lock_classes_node(cachep, node);
}
init_node_lock_keys(node);
@@ -1577,28 +1588,33 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
}
/*
+ * The memory after the last cpu cache pointer is used for the
+ * the nodelists pointer.
+ */
+static void setup_nodelists_pointer(struct kmem_cache *cachep)
+{
+ cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
+}
+
+/*
* Initialisation. Called after the page allocator have been initialised and
* before smp_init().
*/
void __init kmem_cache_init(void)
{
- size_t left_over;
struct cache_sizes *sizes;
struct cache_names *names;
int i;
- int order;
- int node;
kmem_cache = &kmem_cache_boot;
+ setup_nodelists_pointer(kmem_cache);
if (num_possible_nodes() == 1)
use_alien_caches = 0;
- for (i = 0; i < NUM_INIT_LISTS; i++) {
+ for (i = 0; i < NUM_INIT_LISTS; i++)
kmem_list3_init(&initkmem_list3[i]);
- if (i < MAX_NUMNODES)
- kmem_cache->nodelists[i] = NULL;
- }
+
set_up_list3s(kmem_cache, CACHE_CACHE);
/*
@@ -1629,37 +1645,16 @@ void __init kmem_cache_init(void)
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
- node = numa_mem_id();
-
/* 1) create the kmem_cache */
- INIT_LIST_HEAD(&slab_caches);
- list_add(&kmem_cache->list, &slab_caches);
- kmem_cache->colour_off = cache_line_size();
- kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
- kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/*
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
- kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
- nr_node_ids * sizeof(struct kmem_list3 *);
- kmem_cache->object_size = kmem_cache->size;
- kmem_cache->size = ALIGN(kmem_cache->object_size,
- cache_line_size());
- kmem_cache->reciprocal_buffer_size =
- reciprocal_value(kmem_cache->size);
-
- for (order = 0; order < MAX_ORDER; order++) {
- cache_estimate(order, kmem_cache->size,
- cache_line_size(), 0, &left_over, &kmem_cache->num);
- if (kmem_cache->num)
- break;
- }
- BUG_ON(!kmem_cache->num);
- kmem_cache->gfporder = order;
- kmem_cache->colour = left_over / kmem_cache->colour_off;
- kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
- sizeof(struct slab), cache_line_size());
+ create_boot_cache(kmem_cache, "kmem_cache",
+ offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+ nr_node_ids * sizeof(struct kmem_list3 *),
+ SLAB_HWCACHE_ALIGN);
+ list_add(&kmem_cache->list, &slab_caches);
/* 2+3) create the kmalloc caches */
sizes = malloc_sizes;
@@ -1671,23 +1666,13 @@ void __init kmem_cache_init(void)
* bug.
*/
- sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
- sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
- sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
- sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
- list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
-
- if (INDEX_AC != INDEX_L3) {
- sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
- sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
- sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
- sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
- list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
- }
+ sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
+
+ if (INDEX_AC != INDEX_L3)
+ sizes[INDEX_L3].cs_cachep =
+ create_kmalloc_cache(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
slab_early_init = 0;
@@ -1699,24 +1684,14 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
- if (!sizes->cs_cachep) {
- sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes->cs_cachep->name = names->name;
- sizes->cs_cachep->size = sizes->cs_size;
- sizes->cs_cachep->object_size = sizes->cs_size;
- sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
- list_add(&sizes->cs_cachep->list, &slab_caches);
- }
+ if (!sizes->cs_cachep)
+ sizes->cs_cachep = create_kmalloc_cache(names->name,
+ sizes->cs_size, ARCH_KMALLOC_FLAGS);
+
#ifdef CONFIG_ZONE_DMA
- sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- sizes->cs_dmacachep->name = names->name_dma;
- sizes->cs_dmacachep->size = sizes->cs_size;
- sizes->cs_dmacachep->object_size = sizes->cs_size;
- sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
- __kmem_cache_create(sizes->cs_dmacachep,
- ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
- list_add(&sizes->cs_dmacachep->list, &slab_caches);
+ sizes->cs_dmacachep = create_kmalloc_cache(
+ names->name_dma, sizes->cs_size,
+ SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
#endif
sizes++;
names++;
@@ -1727,7 +1702,6 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
- BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(kmem_cache),
sizeof(struct arraycache_init));
/*
@@ -1921,6 +1895,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
if (page->pfmemalloc)
SetPageSlabPfmemalloc(page + i);
}
+ memcg_bind_pages(cachep, cachep->gfporder);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
@@ -1957,9 +1932,11 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
__ClearPageSlab(page);
page++;
}
+
+ memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
- free_pages((unsigned long)addr, cachep->gfporder);
+ free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
}
static void kmem_rcu_free(struct rcu_head *head)
@@ -2282,7 +2259,15 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
if (slab_state == DOWN) {
/*
- * Note: the first kmem_cache_create must create the cache
+ * Note: Creation of first cache (kmem_cache).
+ * The setup_list3s is taken care
+ * of by the caller of __kmem_cache_create
+ */
+ cachep->array[smp_processor_id()] = &initarray_generic.cache;
+ slab_state = PARTIAL;
+ } else if (slab_state == PARTIAL) {
+ /*
+ * Note: the second kmem_cache_create must create the cache
* that's used by kmalloc(24), otherwise the creation of
* further caches will BUG().
*/
@@ -2290,7 +2275,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
/*
* If the cache that's used by kmalloc(sizeof(kmem_list3)) is
- * the first cache, then we need to set up all its list3s,
+ * the second cache, then we need to set up all its list3s,
* otherwise the creation of further caches will BUG().
*/
set_up_list3s(cachep, SIZE_AC);
@@ -2299,6 +2284,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
else
slab_state = PARTIAL_ARRAYCACHE;
} else {
+ /* Remaining boot caches */
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), gfp);
@@ -2331,11 +2317,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
/**
* __kmem_cache_create - Create a cache.
- * @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
+ * @cachep: cache management descriptor
* @flags: SLAB flags
- * @ctor: A constructor for the objects.
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
@@ -2378,11 +2361,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif
- /*
- * Always checks flags, a caller might be expecting debug support which
- * isn't available.
- */
- BUG_ON(flags & ~CREATE_MASK);
/*
* Check that size is in terms of words. This is needed to avoid
@@ -2394,22 +2372,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size &= ~(BYTES_PER_WORD - 1);
}
- /* calculate the final buffer alignment: */
-
- /* 1) arch recommendation: can be overridden for debug */
- if (flags & SLAB_HWCACHE_ALIGN) {
- /*
- * Default alignment: as specified by the arch code. Except if
- * an object is really small, then squeeze multiple objects into
- * one cacheline.
- */
- ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- } else {
- ralign = BYTES_PER_WORD;
- }
-
/*
* Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated
@@ -2426,10 +2388,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size &= ~(REDZONE_ALIGN - 1);
}
- /* 2) arch mandated alignment */
- if (ralign < ARCH_SLAB_MINALIGN) {
- ralign = ARCH_SLAB_MINALIGN;
- }
/* 3) caller mandated alignment */
if (ralign < cachep->align) {
ralign = cachep->align;
@@ -2447,7 +2405,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
else
gfp = GFP_NOWAIT;
- cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
+ setup_nodelists_pointer(cachep);
#if DEBUG
/*
@@ -2566,7 +2524,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
slab_set_debugobj_lock_classes(cachep);
- }
+ } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
+ on_slab_lock_classes(cachep);
return 0;
}
@@ -3530,6 +3489,8 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
if (slab_should_failslab(cachep, flags))
return NULL;
+ cachep = memcg_kmem_get_cache(cachep, flags);
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
@@ -3615,6 +3576,8 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
if (slab_should_failslab(cachep, flags))
return NULL;
+ cachep = memcg_kmem_get_cache(cachep, flags);
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);
@@ -3928,6 +3891,9 @@ EXPORT_SYMBOL(__kmalloc);
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
+ cachep = cache_from_obj(cachep, objp);
+ if (!cachep)
+ return;
local_irq_save(flags);
debug_check_no_locks_freed(objp, cachep->object_size);
@@ -3969,12 +3935,6 @@ void kfree(const void *objp)
}
EXPORT_SYMBOL(kfree);
-unsigned int kmem_cache_size(struct kmem_cache *cachep)
-{
- return cachep->object_size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-
/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
@@ -4081,7 +4041,7 @@ static void do_ccupdate_local(void *info)
}
/* Always called with the slab_mutex held */
-static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp)
{
struct ccupdate_struct *new;
@@ -4124,12 +4084,49 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
return alloc_kmemlist(cachep, gfp);
}
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+ int batchcount, int shared, gfp_t gfp)
+{
+ int ret;
+ struct kmem_cache *c = NULL;
+ int i = 0;
+
+ ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
+
+ if (slab_state < FULL)
+ return ret;
+
+ if ((ret < 0) || !is_root_cache(cachep))
+ return ret;
+
+ VM_BUG_ON(!mutex_is_locked(&slab_mutex));
+ for_each_memcg_cache_index(i) {
+ c = cache_from_memcg(cachep, i);
+ if (c)
+ /* return value determined by the parent cache only */
+ __do_tune_cpucache(c, limit, batchcount, shared, gfp);
+ }
+
+ return ret;
+}
+
/* Called with slab_mutex held always */
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
{
int err;
- int limit, shared;
+ int limit = 0;
+ int shared = 0;
+ int batchcount = 0;
+
+ if (!is_root_cache(cachep)) {
+ struct kmem_cache *root = memcg_root_cache(cachep);
+ limit = root->limit;
+ shared = root->shared;
+ batchcount = root->batchcount;
+ }
+ if (limit && shared && batchcount)
+ goto skip_setup;
/*
* The head array serves three purposes:
* - create a LIFO ordering, i.e. return objects that are cache-warm
@@ -4171,7 +4168,9 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
if (limit > 32)
limit = 32;
#endif
- err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
+ batchcount = (limit + 1) / 2;
+skip_setup:
+ err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err);
@@ -4276,54 +4275,8 @@ out:
}
#ifdef CONFIG_SLABINFO
-
-static void print_slabinfo_header(struct seq_file *m)
-{
- /*
- * Output format version, so at least we can change it
- * without _too_ many complaints.
- */
-#if STATS
- seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
-#else
- seq_puts(m, "slabinfo - version: 2.1\n");
-#endif
- seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
- "<objperslab> <pagesperslab>");
- seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
- seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
-#if STATS
- seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
- "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
- seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
-#endif
- seq_putc(m, '\n');
-}
-
-static void *s_start(struct seq_file *m, loff_t *pos)
-{
- loff_t n = *pos;
-
- mutex_lock(&slab_mutex);
- if (!n)
- print_slabinfo_header(m);
-
- return seq_list_start(&slab_caches, *pos);
-}
-
-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{
- return seq_list_next(p, &slab_caches, pos);
-}
-
-static void s_stop(struct seq_file *m, void *p)
-{
- mutex_unlock(&slab_mutex);
-}
-
-static int s_show(struct seq_file *m, void *p)
-{
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
@@ -4378,13 +4331,20 @@ static int s_show(struct seq_file *m, void *p)
if (error)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
- seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
- name, active_objs, num_objs, cachep->size,
- cachep->num, (1 << cachep->gfporder));
- seq_printf(m, " : tunables %4u %4u %4u",
- cachep->limit, cachep->batchcount, cachep->shared);
- seq_printf(m, " : slabdata %6lu %6lu %6lu",
- active_slabs, num_slabs, shared_avail);
+ sinfo->active_objs = active_objs;
+ sinfo->num_objs = num_objs;
+ sinfo->active_slabs = active_slabs;
+ sinfo->num_slabs = num_slabs;
+ sinfo->shared_avail = shared_avail;
+ sinfo->limit = cachep->limit;
+ sinfo->batchcount = cachep->batchcount;
+ sinfo->shared = cachep->shared;
+ sinfo->objects_per_slab = cachep->num;
+ sinfo->cache_order = cachep->gfporder;
+}
+
+void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+{
#if STATS
{ /* list3 stats */
unsigned long high = cachep->high_mark;
@@ -4414,31 +4374,8 @@ static int s_show(struct seq_file *m, void *p)
allochit, allocmiss, freehit, freemiss);
}
#endif
- seq_putc(m, '\n');
- return 0;
}
-/*
- * slabinfo_op - iterator that generates /proc/slabinfo
- *
- * Output layout:
- * cache-name
- * num-active-objs
- * total-objs
- * object size
- * num-active-slabs
- * total-slabs
- * num-pages-per-slab
- * + further values on SMP and with statistics enabled
- */
-
-static const struct seq_operations slabinfo_op = {
- .start = s_start,
- .next = s_next,
- .stop = s_stop,
- .show = s_show,
-};
-
#define MAX_SLABINFO_WRITE 128
/**
* slabinfo_write - Tuning for the slab allocator
@@ -4447,7 +4384,7 @@ static const struct seq_operations slabinfo_op = {
* @count: data length
* @ppos: unused
*/
-static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
+ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
@@ -4490,19 +4427,6 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
return res;
}
-static int slabinfo_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &slabinfo_op);
-}
-
-static const struct file_operations proc_slabinfo_operations = {
- .open = slabinfo_open,
- .read = seq_read,
- .write = slabinfo_write,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
#ifdef CONFIG_DEBUG_SLAB_LEAK
static void *leaks_start(struct seq_file *m, loff_t *pos)
@@ -4631,6 +4555,16 @@ static int leaks_show(struct seq_file *m, void *p)
return 0;
}
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &slab_caches, pos);
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&slab_mutex);
+}
+
static const struct seq_operations slabstats_op = {
.start = leaks_start,
.next = s_next,
@@ -4665,7 +4599,6 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
- proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
#ifdef CONFIG_DEBUG_SLAB_LEAK
proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
#endif
diff --git a/mm/slab.h b/mm/slab.h
index 7deeb449a30..34a98d64219 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -32,19 +32,201 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
+unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size);
+
/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
+extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
+ unsigned long flags);
+extern void create_boot_cache(struct kmem_cache *, const char *name,
+ size_t size, unsigned long flags);
+
+struct mem_cgroup;
#ifdef CONFIG_SLUB
-struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *));
+struct kmem_cache *
+__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *));
#else
-static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+static inline struct kmem_cache *
+__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif
+/* Legal flag mask for kmem_cache_create(), for various configurations */
+#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
+
+#if defined(CONFIG_DEBUG_SLAB)
+#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
+#elif defined(CONFIG_SLUB_DEBUG)
+#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
+ SLAB_TRACE | SLAB_DEBUG_FREE)
+#else
+#define SLAB_DEBUG_FLAGS (0)
+#endif
+
+#if defined(CONFIG_SLAB)
+#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
+ SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
+#elif defined(CONFIG_SLUB)
+#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
+ SLAB_TEMPORARY | SLAB_NOTRACK)
+#else
+#define SLAB_CACHE_FLAGS (0)
+#endif
+
+#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
+
int __kmem_cache_shutdown(struct kmem_cache *);
+struct seq_file;
+struct file;
+
+struct slabinfo {
+ unsigned long active_objs;
+ unsigned long num_objs;
+ unsigned long active_slabs;
+ unsigned long num_slabs;
+ unsigned long shared_avail;
+ unsigned int limit;
+ unsigned int batchcount;
+ unsigned int shared;
+ unsigned int objects_per_slab;
+ unsigned int cache_order;
+};
+
+void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
+void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
+ssize_t slabinfo_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+#ifdef CONFIG_MEMCG_KMEM
+static inline bool is_root_cache(struct kmem_cache *s)
+{
+ return !s->memcg_params || s->memcg_params->is_root_cache;
+}
+
+static inline bool cache_match_memcg(struct kmem_cache *cachep,
+ struct mem_cgroup *memcg)
+{
+ return (is_root_cache(cachep) && !memcg) ||
+ (cachep->memcg_params->memcg == memcg);
+}
+
+static inline void memcg_bind_pages(struct kmem_cache *s, int order)
+{
+ if (!is_root_cache(s))
+ atomic_add(1 << order, &s->memcg_params->nr_pages);
+}
+
+static inline void memcg_release_pages(struct kmem_cache *s, int order)
+{
+ if (is_root_cache(s))
+ return;
+
+ if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
+ mem_cgroup_destroy_cache(s);
+}
+
+static inline bool slab_equal_or_root(struct kmem_cache *s,
+ struct kmem_cache *p)
+{
+ return (p == s) ||
+ (s->memcg_params && (p == s->memcg_params->root_cache));
+}
+
+/*
+ * We use suffixes to the name in memcg because we can't have caches
+ * created in the system with the same name. But when we print them
+ * locally, better refer to them with the base name
+ */
+static inline const char *cache_name(struct kmem_cache *s)
+{
+ if (!is_root_cache(s))
+ return s->memcg_params->root_cache->name;
+ return s->name;
+}
+
+static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
+{
+ return s->memcg_params->memcg_caches[idx];
+}
+
+static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
+{
+ if (is_root_cache(s))
+ return s;
+ return s->memcg_params->root_cache;
+}
+#else
+static inline bool is_root_cache(struct kmem_cache *s)
+{
+ return true;
+}
+
+static inline bool cache_match_memcg(struct kmem_cache *cachep,
+ struct mem_cgroup *memcg)
+{
+ return true;
+}
+
+static inline void memcg_bind_pages(struct kmem_cache *s, int order)
+{
+}
+
+static inline void memcg_release_pages(struct kmem_cache *s, int order)
+{
+}
+
+static inline bool slab_equal_or_root(struct kmem_cache *s,
+ struct kmem_cache *p)
+{
+ return true;
+}
+
+static inline const char *cache_name(struct kmem_cache *s)
+{
+ return s->name;
+}
+
+static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
+{
+ return NULL;
+}
+
+static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
+{
+ return s;
+}
+#endif
+
+static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+{
+ struct kmem_cache *cachep;
+ struct page *page;
+
+ /*
+ * When kmemcg is not being used, both assignments should return the
+ * same value. but we don't want to pay the assignment price in that
+ * case. If it is not compiled in, the compiler should be smart enough
+ * to not do even the assignment. In that case, slab_equal_or_root
+ * will also be a constant.
+ */
+ if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
+ return s;
+
+ page = virt_to_head_page(x);
+ cachep = page->slab_cache;
+ if (slab_equal_or_root(cachep, s))
+ return cachep;
+
+ pr_err("%s: Wrong slab cache. %s but object is from %s\n",
+ __FUNCTION__, cachep->name, s->name);
+ WARN_ON_ONCE(1);
+ return s;
+}
#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 069a24e6440..3f3cd97d3fd 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -13,9 +13,12 @@
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
+#include <linux/memcontrol.h>
#include "slab.h"
@@ -25,7 +28,8 @@ DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
#ifdef CONFIG_DEBUG_VM
-static int kmem_cache_sanity_check(const char *name, size_t size)
+static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
+ size_t size)
{
struct kmem_cache *s = NULL;
@@ -51,7 +55,13 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
continue;
}
- if (!strcmp(s->name, name)) {
+ /*
+ * For simplicity, we won't check this in the list of memcg
+ * caches. We have control over memcg naming, and if there
+ * aren't duplicates in the global list, there won't be any
+ * duplicates in the memcg lists as well.
+ */
+ if (!memcg && !strcmp(s->name, name)) {
pr_err("%s (%s): Cache name already exists.\n",
__func__, name);
dump_stack();
@@ -64,12 +74,69 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
return 0;
}
#else
-static inline int kmem_cache_sanity_check(const char *name, size_t size)
+static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg,
+ const char *name, size_t size)
{
return 0;
}
#endif
+#ifdef CONFIG_MEMCG_KMEM
+int memcg_update_all_caches(int num_memcgs)
+{
+ struct kmem_cache *s;
+ int ret = 0;
+ mutex_lock(&slab_mutex);
+
+ list_for_each_entry(s, &slab_caches, list) {
+ if (!is_root_cache(s))
+ continue;
+
+ ret = memcg_update_cache_size(s, num_memcgs);
+ /*
+ * See comment in memcontrol.c, memcg_update_cache_size:
+ * Instead of freeing the memory, we'll just leave the caches
+ * up to this point in an updated state.
+ */
+ if (ret)
+ goto out;
+ }
+
+ memcg_update_array_size(num_memcgs);
+out:
+ mutex_unlock(&slab_mutex);
+ return ret;
+}
+#endif
+
+/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
+
/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -95,8 +162,10 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
* as davem.
*/
-struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+struct kmem_cache *
+kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *),
+ struct kmem_cache *parent_cache)
{
struct kmem_cache *s = NULL;
int err = 0;
@@ -104,19 +173,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
get_online_cpus();
mutex_lock(&slab_mutex);
- if (!kmem_cache_sanity_check(name, size) == 0)
+ if (!kmem_cache_sanity_check(memcg, name, size) == 0)
goto out_locked;
+ /*
+ * Some allocators will constraint the set of valid flags to a subset
+ * of all flags. We expect them to define CACHE_CREATE_MASK in this
+ * case, and we'll just provide them with a sanitized version of the
+ * passed flags.
+ */
+ flags &= CACHE_CREATE_MASK;
- s = __kmem_cache_alias(name, size, align, flags, ctor);
+ s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
if (s)
goto out_locked;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (s) {
s->object_size = s->size = size;
- s->align = align;
+ s->align = calculate_alignment(flags, align, size);
s->ctor = ctor;
+
+ if (memcg_register_cache(memcg, s, parent_cache)) {
+ kmem_cache_free(kmem_cache, s);
+ err = -ENOMEM;
+ goto out_locked;
+ }
+
s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) {
kmem_cache_free(kmem_cache, s);
@@ -126,10 +209,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
err = __kmem_cache_create(s, flags);
if (!err) {
-
s->refcount = 1;
list_add(&s->list, &slab_caches);
-
+ memcg_cache_list_add(memcg, s);
} else {
kfree(s->name);
kmem_cache_free(kmem_cache, s);
@@ -157,10 +239,20 @@ out_locked:
return s;
}
+
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
+{
+ return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
+}
EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *s)
{
+ /* Destroy all the children caches if we aren't a memcg cache */
+ kmem_cache_destroy_memcg_children(s);
+
get_online_cpus();
mutex_lock(&slab_mutex);
s->refcount--;
@@ -172,6 +264,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
+ memcg_release_cache(s);
kfree(s->name);
kmem_cache_free(kmem_cache, s);
} else {
@@ -192,3 +285,182 @@ int slab_is_available(void)
{
return slab_state >= UP;
}
+
+#ifndef CONFIG_SLOB
+/* Create a cache during boot when no slab services are available yet */
+void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
+ unsigned long flags)
+{
+ int err;
+
+ s->name = name;
+ s->size = s->object_size = size;
+ s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
+ err = __kmem_cache_create(s, flags);
+
+ if (err)
+ panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
+ name, size, err);
+
+ s->refcount = -1; /* Exempt from merging for now */
+}
+
+struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
+ unsigned long flags)
+{
+ struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
+
+ if (!s)
+ panic("Out of memory when creating slab %s\n", name);
+
+ create_boot_cache(s, name, size, flags);
+ list_add(&s->list, &slab_caches);
+ s->refcount = 1;
+ return s;
+}
+
+#endif /* !CONFIG_SLOB */
+
+
+#ifdef CONFIG_SLABINFO
+void print_slabinfo_header(struct seq_file *m)
+{
+ /*
+ * Output format version, so at least we can change it
+ * without _too_ many complaints.
+ */
+#ifdef CONFIG_DEBUG_SLAB
+ seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
+#else
+ seq_puts(m, "slabinfo - version: 2.1\n");
+#endif
+ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
+ "<objperslab> <pagesperslab>");
+ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+#ifdef CONFIG_DEBUG_SLAB
+ seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
+ "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
+ seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
+#endif
+ seq_putc(m, '\n');
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+
+ mutex_lock(&slab_mutex);
+ if (!n)
+ print_slabinfo_header(m);
+
+ return seq_list_start(&slab_caches, *pos);
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &slab_caches, pos);
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&slab_mutex);
+}
+
+static void
+memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
+{
+ struct kmem_cache *c;
+ struct slabinfo sinfo;
+ int i;
+
+ if (!is_root_cache(s))
+ return;
+
+ for_each_memcg_cache_index(i) {
+ c = cache_from_memcg(s, i);
+ if (!c)
+ continue;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+ get_slabinfo(c, &sinfo);
+
+ info->active_slabs += sinfo.active_slabs;
+ info->num_slabs += sinfo.num_slabs;
+ info->shared_avail += sinfo.shared_avail;
+ info->active_objs += sinfo.active_objs;
+ info->num_objs += sinfo.num_objs;
+ }
+}
+
+int cache_show(struct kmem_cache *s, struct seq_file *m)
+{
+ struct slabinfo sinfo;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+ get_slabinfo(s, &sinfo);
+
+ memcg_accumulate_slabinfo(s, &sinfo);
+
+ seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
+ cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
+ sinfo.objects_per_slab, (1 << sinfo.cache_order));
+
+ seq_printf(m, " : tunables %4u %4u %4u",
+ sinfo.limit, sinfo.batchcount, sinfo.shared);
+ seq_printf(m, " : slabdata %6lu %6lu %6lu",
+ sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
+ slabinfo_show_stats(m, s);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
+
+ if (!is_root_cache(s))
+ return 0;
+ return cache_show(s, m);
+}
+
+/*
+ * slabinfo_op - iterator that generates /proc/slabinfo
+ *
+ * Output layout:
+ * cache-name
+ * num-active-objs
+ * total-objs
+ * object size
+ * num-active-slabs
+ * total-slabs
+ * num-pages-per-slab
+ * + further values on SMP and with statistics enabled
+ */
+static const struct seq_operations slabinfo_op = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
+};
+
+static int slabinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &slabinfo_op);
+}
+
+static const struct file_operations proc_slabinfo_operations = {
+ .open = slabinfo_open,
+ .read = seq_read,
+ .write = slabinfo_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init slab_proc_init(void)
+{
+ proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
+ return 0;
+}
+module_init(slab_proc_init);
+#endif /* CONFIG_SLABINFO */
diff --git a/mm/slob.c b/mm/slob.c
index 1e921c5e957..a99fdf7a090 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -28,9 +28,8 @@
* from kmalloc are prepended with a 4-byte header with the kmalloc size.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* alloc_pages() directly, allocating compound pages so the page order
- * does not have to be separately tracked, and also stores the exact
- * allocation size in page->private so that it can be used to accurately
- * provide ksize(). These objects are detected in kfree() because slob_page()
+ * does not have to be separately tracked.
+ * These objects are detected in kfree() because PageSlab()
* is false for them.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
@@ -59,7 +58,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include "slab.h"
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
@@ -74,6 +72,7 @@
#include <linux/atomic.h>
+#include "slab.h"
/*
* slob_block has a field 'units', which indicates size of block if +ve,
* or offset of next block if -ve (in SLOB_UNITs).
@@ -124,7 +123,6 @@ static inline void clear_slob_page_free(struct page *sp)
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
-#define SLOB_ALIGN L1_CACHE_BYTES
/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -455,11 +453,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
if (likely(order))
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
- if (ret) {
- struct page *page;
- page = virt_to_page(ret);
- page->private = size;
- }
trace_kmalloc_node(caller, ret,
size, PAGE_SIZE << order, gfp, node);
@@ -506,7 +499,7 @@ void kfree(const void *block)
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else
- put_page(sp);
+ __free_pages(sp, compound_order(sp));
}
EXPORT_SYMBOL(kfree);
@@ -514,37 +507,30 @@ EXPORT_SYMBOL(kfree);
size_t ksize(const void *block)
{
struct page *sp;
+ int align;
+ unsigned int *m;
BUG_ON(!block);
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
sp = virt_to_page(block);
- if (PageSlab(sp)) {
- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
- unsigned int *m = (unsigned int *)(block - align);
- return SLOB_UNITS(*m) * SLOB_UNIT;
- } else
- return sp->private;
+ if (unlikely(!PageSlab(sp)))
+ return PAGE_SIZE << compound_order(sp);
+
+ align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ m = (unsigned int *)(block - align);
+ return SLOB_UNITS(*m) * SLOB_UNIT;
}
EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{
- size_t align = c->size;
-
if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
c->flags = flags;
- /* ignore alignment unless it's forced */
- c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
- if (c->align < ARCH_SLAB_MINALIGN)
- c->align = ARCH_SLAB_MINALIGN;
- if (c->align < align)
- c->align = align;
-
return 0;
}
@@ -558,12 +544,12 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
- trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
SLOB_UNITS(c->size) * SLOB_UNIT,
flags, node);
} else {
b = slob_new_pages(flags, get_order(c->size), node);
- trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
PAGE_SIZE << get_order(c->size),
flags, node);
}
@@ -608,12 +594,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
}
EXPORT_SYMBOL(kmem_cache_free);
-unsigned int kmem_cache_size(struct kmem_cache *c)
-{
- return c->size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-
int __kmem_cache_shutdown(struct kmem_cache *c)
{
/* No way to check for remaining objects */
diff --git a/mm/slub.c b/mm/slub.c
index 487f0bdd53c..ba2ca53f6c3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -31,6 +31,7 @@
#include <linux/fault-inject.h>
#include <linux/stacktrace.h>
#include <linux/prefetch.h>
+#include <linux/memcontrol.h>
#include <trace/events/kmem.h>
@@ -112,9 +113,6 @@
* the fast path and disables lockless freelists.
*/
-#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
- SLAB_TRACE | SLAB_DEBUG_FREE)
-
static inline int kmem_cache_debug(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -179,8 +177,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#define __OBJECT_POISON 0x80000000UL /* Poison object */
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
-static int kmem_size = sizeof(struct kmem_cache);
-
#ifdef CONFIG_SMP
static struct notifier_block slab_notifier;
#endif
@@ -205,13 +201,14 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
-
+static void memcg_propagate_slab_attrs(struct kmem_cache *s);
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s) { }
+static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
#endif
static inline void stat(const struct kmem_cache *s, enum stat_item si)
@@ -1092,11 +1089,11 @@ static noinline struct kmem_cache_node *free_debug_processing(
if (!check_object(s, page, object, SLUB_RED_ACTIVE))
goto out;
- if (unlikely(s != page->slab)) {
+ if (unlikely(s != page->slab_cache)) {
if (!PageSlab(page)) {
slab_err(s, page, "Attempt to free object(0x%p) "
"outside of slab", object);
- } else if (!page->slab) {
+ } else if (!page->slab_cache) {
printk(KERN_ERR
"SLUB <none>: no slab for object 0x%p.\n",
object);
@@ -1348,6 +1345,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
void *start;
void *last;
void *p;
+ int order;
BUG_ON(flags & GFP_SLAB_BUG_MASK);
@@ -1356,8 +1354,10 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
goto out;
+ order = compound_order(page);
inc_slabs_node(s, page_to_nid(page), page->objects);
- page->slab = s;
+ memcg_bind_pages(s, order);
+ page->slab_cache = s;
__SetPageSlab(page);
if (page->pfmemalloc)
SetPageSlabPfmemalloc(page);
@@ -1365,7 +1365,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
start = page_address(page);
if (unlikely(s->flags & SLAB_POISON))
- memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
+ memset(start, POISON_INUSE, PAGE_SIZE << order);
last = start;
for_each_object(p, s, start, page->objects) {
@@ -1406,10 +1406,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
+
+ memcg_release_pages(s, order);
reset_page_mapcount(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- __free_pages(page, order);
+ __free_memcg_kmem_pages(page, order);
}
#define need_reserve_slab_rcu \
@@ -1424,7 +1426,7 @@ static void rcu_free_slab(struct rcu_head *h)
else
page = container_of((struct list_head *)h, struct page, lru);
- __free_slab(page->slab, page);
+ __free_slab(page->slab_cache, page);
}
static void free_slab(struct kmem_cache *s, struct page *page)
@@ -1872,12 +1874,14 @@ redo:
/*
* Unfreeze all the cpu partial slabs.
*
- * This function must be called with interrupt disabled.
+ * This function must be called with interrupts disabled
+ * for the cpu using c (or some other guarantee must be there
+ * to guarantee no concurrent accesses).
*/
-static void unfreeze_partials(struct kmem_cache *s)
+static void unfreeze_partials(struct kmem_cache *s,
+ struct kmem_cache_cpu *c)
{
struct kmem_cache_node *n = NULL, *n2 = NULL;
- struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
struct page *page, *discard_page = NULL;
while ((page = c->partial)) {
@@ -1963,7 +1967,7 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
* set to the per node partial list.
*/
local_irq_save(flags);
- unfreeze_partials(s);
+ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
local_irq_restore(flags);
oldpage = NULL;
pobjects = 0;
@@ -2006,7 +2010,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
if (c->page)
flush_slab(s, c);
- unfreeze_partials(s);
+ unfreeze_partials(s, c);
}
}
@@ -2325,6 +2329,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
if (slab_pre_alloc_hook(s, gfpflags))
return NULL;
+ s = memcg_kmem_get_cache(s, gfpflags);
redo:
/*
@@ -2459,7 +2464,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
void *prior;
void **object = (void *)x;
int was_frozen;
- int inuse;
struct page new;
unsigned long counters;
struct kmem_cache_node *n = NULL;
@@ -2472,13 +2476,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
return;
do {
+ if (unlikely(n)) {
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ n = NULL;
+ }
prior = page->freelist;
counters = page->counters;
set_freepointer(s, object, prior);
new.counters = counters;
was_frozen = new.frozen;
new.inuse--;
- if ((!new.inuse || !prior) && !was_frozen && !n) {
+ if ((!new.inuse || !prior) && !was_frozen) {
if (!kmem_cache_debug(s) && !prior)
@@ -2503,7 +2511,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
}
}
- inuse = new.inuse;
} while (!cmpxchg_double_slab(s, page,
prior, counters,
@@ -2529,25 +2536,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
return;
}
+ if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
+ goto slab_empty;
+
/*
- * was_frozen may have been set after we acquired the list_lock in
- * an earlier loop. So we need to check it here again.
+ * Objects left in the slab. If it was not on the partial list before
+ * then add it.
*/
- if (was_frozen)
- stat(s, FREE_FROZEN);
- else {
- if (unlikely(!inuse && n->nr_partial > s->min_partial))
- goto slab_empty;
-
- /*
- * Objects left in the slab. If it was not on the partial list before
- * then add it.
- */
- if (unlikely(!prior)) {
- remove_full(s, page);
- add_partial(n, page, DEACTIVATE_TO_TAIL);
- stat(s, FREE_ADD_PARTIAL);
- }
+ if (kmem_cache_debug(s) && unlikely(!prior)) {
+ remove_full(s, page);
+ add_partial(n, page, DEACTIVATE_TO_TAIL);
+ stat(s, FREE_ADD_PARTIAL);
}
spin_unlock_irqrestore(&n->list_lock, flags);
return;
@@ -2619,19 +2618,10 @@ redo:
void kmem_cache_free(struct kmem_cache *s, void *x)
{
- struct page *page;
-
- page = virt_to_head_page(x);
-
- if (kmem_cache_debug(s) && page->slab != s) {
- pr_err("kmem_cache_free: Wrong slab cache. %s but object"
- " is from %s\n", page->slab->name, s->name);
- WARN_ON_ONCE(1);
+ s = cache_from_obj(s, x);
+ if (!s)
return;
- }
-
- slab_free(s, page, x, _RET_IP_);
-
+ slab_free(s, virt_to_head_page(x), x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x);
}
EXPORT_SYMBOL(kmem_cache_free);
@@ -2769,32 +2759,6 @@ static inline int calculate_order(int size, int reserved)
return -ENOSYS;
}
-/*
- * Figure out what the alignment of the objects will be.
- */
-static unsigned long calculate_alignment(unsigned long flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
@@ -2928,7 +2892,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
unsigned long flags = s->flags;
unsigned long size = s->object_size;
- unsigned long align = s->align;
int order;
/*
@@ -3000,19 +2963,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
#endif
/*
- * Determine the alignment based on various parameters that the
- * user specified and the dynamic determination of cache line size
- * on bootup.
- */
- align = calculate_alignment(flags, align, s->object_size);
- s->align = align;
-
- /*
* SLUB stores one object immediately after another beginning from
* offset 0. In order to align the objects we have to simply size
* each object to conform to the alignment.
*/
- size = ALIGN(size, align);
+ size = ALIGN(size, s->align);
s->size = size;
if (forced_order >= 0)
order = forced_order;
@@ -3041,7 +2996,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
s->max = s->oo;
return !!oo_objects(s->oo);
-
}
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
@@ -3127,15 +3081,6 @@ error:
return -EINVAL;
}
-/*
- * Determine the size of a slab object
- */
-unsigned int kmem_cache_size(struct kmem_cache *s)
-{
- return s->object_size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-
static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
@@ -3208,8 +3153,19 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
{
int rc = kmem_cache_close(s);
- if (!rc)
+ if (!rc) {
+ /*
+ * We do the same lock strategy around sysfs_slab_add, see
+ * __kmem_cache_create. Because this is pretty much the last
+ * operation we do and the lock will be released shortly after
+ * that in slab_common.c, we could just move sysfs_slab_remove
+ * to a later point in common code. We should do that when we
+ * have a common sysfs framework for all allocators.
+ */
+ mutex_unlock(&slab_mutex);
sysfs_slab_remove(s);
+ mutex_lock(&slab_mutex);
+ }
return rc;
}
@@ -3261,32 +3217,6 @@ static int __init setup_slub_nomerge(char *str)
__setup("slub_nomerge", setup_slub_nomerge);
-static struct kmem_cache *__init create_kmalloc_cache(const char *name,
- int size, unsigned int flags)
-{
- struct kmem_cache *s;
-
- s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
-
- s->name = name;
- s->size = s->object_size = size;
- s->align = ARCH_KMALLOC_MINALIGN;
-
- /*
- * This function is called with IRQs disabled during early-boot on
- * single CPU so there's no need to take slab_mutex here.
- */
- if (kmem_cache_open(s, flags))
- goto panic;
-
- list_add(&s->list, &slab_caches);
- return s;
-
-panic:
- panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
- return NULL;
-}
-
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -3372,7 +3302,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
struct page *page;
void *ptr = NULL;
- flags |= __GFP_COMP | __GFP_NOTRACK;
+ flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
page = alloc_pages_node(node, flags, get_order(size));
if (page)
ptr = page_address(page);
@@ -3424,7 +3354,7 @@ size_t ksize(const void *object)
return PAGE_SIZE << compound_order(page);
}
- return slab_ksize(page->slab);
+ return slab_ksize(page->slab_cache);
}
EXPORT_SYMBOL(ksize);
@@ -3449,8 +3379,8 @@ bool verify_mem_not_deleted(const void *x)
}
slab_lock(page);
- if (on_freelist(page->slab, page, object)) {
- object_err(page->slab, page, object, "Object is on free-list");
+ if (on_freelist(page->slab_cache, page, object)) {
+ object_err(page->slab_cache, page, object, "Object is on free-list");
rv = false;
} else {
rv = true;
@@ -3478,10 +3408,10 @@ void kfree(const void *x)
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
kmemleak_free(x);
- __free_pages(page, compound_order(page));
+ __free_memcg_kmem_pages(page, compound_order(page));
return;
}
- slab_free(page->slab, page, object, _RET_IP_);
+ slab_free(page->slab_cache, page, object, _RET_IP_);
}
EXPORT_SYMBOL(kfree);
@@ -3676,15 +3606,16 @@ static int slab_memory_callback(struct notifier_block *self,
/*
* Used for early kmem_cache structures that were allocated using
- * the page allocator
+ * the page allocator. Allocate them properly then fix up the pointers
+ * that may be pointing to the wrong kmem_cache structure.
*/
-static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
+static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
{
int node;
+ struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
- list_add(&s->list, &slab_caches);
- s->refcount = -1;
+ memcpy(s, static_cache, kmem_cache->object_size);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
@@ -3692,78 +3623,52 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
if (n) {
list_for_each_entry(p, &n->partial, lru)
- p->slab = s;
+ p->slab_cache = s;
#ifdef CONFIG_SLUB_DEBUG
list_for_each_entry(p, &n->full, lru)
- p->slab = s;
+ p->slab_cache = s;
#endif
}
}
+ list_add(&s->list, &slab_caches);
+ return s;
}
void __init kmem_cache_init(void)
{
+ static __initdata struct kmem_cache boot_kmem_cache,
+ boot_kmem_cache_node;
int i;
- int caches = 0;
- struct kmem_cache *temp_kmem_cache;
- int order;
- struct kmem_cache *temp_kmem_cache_node;
- unsigned long kmalloc_size;
+ int caches = 2;
if (debug_guardpage_minorder())
slub_max_order = 0;
- kmem_size = offsetof(struct kmem_cache, node) +
- nr_node_ids * sizeof(struct kmem_cache_node *);
-
- /* Allocate two kmem_caches from the page allocator */
- kmalloc_size = ALIGN(kmem_size, cache_line_size());
- order = get_order(2 * kmalloc_size);
- kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
-
- /*
- * Must first have the slab cache available for the allocations of the
- * struct kmem_cache_node's. There is special bootstrap code in
- * kmem_cache_open for slab_state == DOWN.
- */
- kmem_cache_node = (void *)kmem_cache + kmalloc_size;
+ kmem_cache_node = &boot_kmem_cache_node;
+ kmem_cache = &boot_kmem_cache;
- kmem_cache_node->name = "kmem_cache_node";
- kmem_cache_node->size = kmem_cache_node->object_size =
- sizeof(struct kmem_cache_node);
- kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
+ create_boot_cache(kmem_cache_node, "kmem_cache_node",
+ sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
/* Able to allocate the per node structures */
slab_state = PARTIAL;
- temp_kmem_cache = kmem_cache;
- kmem_cache->name = "kmem_cache";
- kmem_cache->size = kmem_cache->object_size = kmem_size;
- kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
+ create_boot_cache(kmem_cache, "kmem_cache",
+ offsetof(struct kmem_cache, node) +
+ nr_node_ids * sizeof(struct kmem_cache_node *),
+ SLAB_HWCACHE_ALIGN);
- kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
- memcpy(kmem_cache, temp_kmem_cache, kmem_size);
+ kmem_cache = bootstrap(&boot_kmem_cache);
/*
* Allocate kmem_cache_node properly from the kmem_cache slab.
* kmem_cache_node is separately allocated so no need to
* update any list pointers.
*/
- temp_kmem_cache_node = kmem_cache_node;
-
- kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
- memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
-
- kmem_cache_bootstrap_fixup(kmem_cache_node);
-
- caches++;
- kmem_cache_bootstrap_fixup(kmem_cache);
- caches++;
- /* Free temporary boot structure */
- free_pages((unsigned long)temp_kmem_cache, order);
+ kmem_cache_node = bootstrap(&boot_kmem_cache_node);
/* Now we can use the kmem_cache to allocate kmalloc slabs */
@@ -3891,7 +3796,7 @@ static int slab_unmergeable(struct kmem_cache *s)
return 0;
}
-static struct kmem_cache *find_mergeable(size_t size,
+static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size,
size_t align, unsigned long flags, const char *name,
void (*ctor)(void *))
{
@@ -3927,17 +3832,21 @@ static struct kmem_cache *find_mergeable(size_t size,
if (s->size - size >= sizeof(void *))
continue;
+ if (!cache_match_memcg(s, memcg))
+ continue;
+
return s;
}
return NULL;
}
-struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+struct kmem_cache *
+__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
- s = find_mergeable(size, align, flags, name, ctor);
+ s = find_mergeable(memcg, size, align, flags, name, ctor);
if (s) {
s->refcount++;
/*
@@ -3964,6 +3873,11 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
if (err)
return err;
+ /* Mutex is not taken during early boot */
+ if (slab_state <= UP)
+ return 0;
+
+ memcg_propagate_slab_attrs(s);
mutex_unlock(&slab_mutex);
err = sysfs_slab_add(s);
mutex_lock(&slab_mutex);
@@ -5197,10 +5111,95 @@ static ssize_t slab_attr_store(struct kobject *kobj,
return -EIO;
err = attribute->store(s, buf, len);
+#ifdef CONFIG_MEMCG_KMEM
+ if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
+ int i;
+
+ mutex_lock(&slab_mutex);
+ if (s->max_attr_size < len)
+ s->max_attr_size = len;
+ /*
+ * This is a best effort propagation, so this function's return
+ * value will be determined by the parent cache only. This is
+ * basically because not all attributes will have a well
+ * defined semantics for rollbacks - most of the actions will
+ * have permanent effects.
+ *
+ * Returning the error value of any of the children that fail
+ * is not 100 % defined, in the sense that users seeing the
+ * error code won't be able to know anything about the state of
+ * the cache.
+ *
+ * Only returning the error code for the parent cache at least
+ * has well defined semantics. The cache being written to
+ * directly either failed or succeeded, in which case we loop
+ * through the descendants with best-effort propagation.
+ */
+ for_each_memcg_cache_index(i) {
+ struct kmem_cache *c = cache_from_memcg(s, i);
+ if (c)
+ attribute->store(c, buf, len);
+ }
+ mutex_unlock(&slab_mutex);
+ }
+#endif
return err;
}
+static void memcg_propagate_slab_attrs(struct kmem_cache *s)
+{
+#ifdef CONFIG_MEMCG_KMEM
+ int i;
+ char *buffer = NULL;
+
+ if (!is_root_cache(s))
+ return;
+
+ /*
+ * This mean this cache had no attribute written. Therefore, no point
+ * in copying default values around
+ */
+ if (!s->max_attr_size)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
+ char mbuf[64];
+ char *buf;
+ struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+
+ if (!attr || !attr->store || !attr->show)
+ continue;
+
+ /*
+ * It is really bad that we have to allocate here, so we will
+ * do it only as a fallback. If we actually allocate, though,
+ * we can just use the allocated buffer until the end.
+ *
+ * Most of the slub attributes will tend to be very small in
+ * size, but sysfs allows buffers up to a page, so they can
+ * theoretically happen.
+ */
+ if (buffer)
+ buf = buffer;
+ else if (s->max_attr_size < ARRAY_SIZE(mbuf))
+ buf = mbuf;
+ else {
+ buffer = (char *) get_zeroed_page(GFP_KERNEL);
+ if (WARN_ON(!buffer))
+ continue;
+ buf = buffer;
+ }
+
+ attr->show(s->memcg_params->root_cache, buf);
+ attr->store(s, buf, strlen(buf));
+ }
+
+ if (buffer)
+ free_page((unsigned long)buffer);
+#endif
+}
+
static const struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
@@ -5257,6 +5256,12 @@ static char *create_unique_id(struct kmem_cache *s)
if (p != name + 1)
*p++ = '-';
p += sprintf(p, "%07d", s->size);
+
+#ifdef CONFIG_MEMCG_KMEM
+ if (!is_root_cache(s))
+ p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+#endif
+
BUG_ON(p > name + ID_STR_LENGTH - 1);
return name;
}
@@ -5265,13 +5270,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
const char *name;
- int unmergeable;
-
- if (slab_state < FULL)
- /* Defer until later */
- return 0;
+ int unmergeable = slab_unmergeable(s);
- unmergeable = slab_unmergeable(s);
if (unmergeable) {
/*
* Slabcache can never be merged so we can use the name proper.
@@ -5405,49 +5405,14 @@ __initcall(slab_sysfs_init);
* The /proc/slabinfo ABI
*/
#ifdef CONFIG_SLABINFO
-static void print_slabinfo_header(struct seq_file *m)
-{
- seq_puts(m, "slabinfo - version: 2.1\n");
- seq_puts(m, "# name <active_objs> <num_objs> <object_size> "
- "<objperslab> <pagesperslab>");
- seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
- seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
- seq_putc(m, '\n');
-}
-
-static void *s_start(struct seq_file *m, loff_t *pos)
-{
- loff_t n = *pos;
-
- mutex_lock(&slab_mutex);
- if (!n)
- print_slabinfo_header(m);
-
- return seq_list_start(&slab_caches, *pos);
-}
-
-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-{
- return seq_list_next(p, &slab_caches, pos);
-}
-
-static void s_stop(struct seq_file *m, void *p)
-{
- mutex_unlock(&slab_mutex);
-}
-
-static int s_show(struct seq_file *m, void *p)
+void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
{
unsigned long nr_partials = 0;
unsigned long nr_slabs = 0;
- unsigned long nr_inuse = 0;
unsigned long nr_objs = 0;
unsigned long nr_free = 0;
- struct kmem_cache *s;
int node;
- s = list_entry(p, struct kmem_cache, list);
-
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
@@ -5460,41 +5425,21 @@ static int s_show(struct seq_file *m, void *p)
nr_free += count_partial(n, count_free);
}
- nr_inuse = nr_objs - nr_free;
-
- seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
- nr_objs, s->size, oo_objects(s->oo),
- (1 << oo_order(s->oo)));
- seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
- seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
- 0UL);
- seq_putc(m, '\n');
- return 0;
+ sinfo->active_objs = nr_objs - nr_free;
+ sinfo->num_objs = nr_objs;
+ sinfo->active_slabs = nr_slabs;
+ sinfo->num_slabs = nr_slabs;
+ sinfo->objects_per_slab = oo_objects(s->oo);
+ sinfo->cache_order = oo_order(s->oo);
}
-static const struct seq_operations slabinfo_op = {
- .start = s_start,
- .next = s_next,
- .stop = s_stop,
- .show = s_show,
-};
-
-static int slabinfo_open(struct inode *inode, struct file *file)
+void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
{
- return seq_open(file, &slabinfo_op);
}
-static const struct file_operations proc_slabinfo_operations = {
- .open = slabinfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static int __init slab_proc_init(void)
+ssize_t slabinfo_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
- proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
- return 0;
+ return -EIO;
}
-module_init(slab_proc_init);
#endif /* CONFIG_SLABINFO */
diff --git a/mm/truncate.c b/mm/truncate.c
index d51ce92d6e8..c75b736e54b 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -577,29 +577,6 @@ void truncate_setsize(struct inode *inode, loff_t newsize)
EXPORT_SYMBOL(truncate_setsize);
/**
- * vmtruncate - unmap mappings "freed" by truncate() syscall
- * @inode: inode of the file used
- * @newsize: file offset to start truncating
- *
- * This function is deprecated and truncate_setsize or truncate_pagecache
- * should be used instead, together with filesystem specific block truncation.
- */
-int vmtruncate(struct inode *inode, loff_t newsize)
-{
- int error;
-
- error = inode_newsize_ok(inode, newsize);
- if (error)
- return error;
-
- truncate_setsize(inode, newsize);
- if (inode->i_op->truncate)
- inode->i_op->truncate(inode);
- return 0;
-}
-EXPORT_SYMBOL(vmtruncate);
-
-/**
* truncate_pagecache_range - unmap and remove pagecache that is hole-punched
* @inode: inode
* @lstart: offset of beginning of hole
diff --git a/mm/util.c b/mm/util.c
index dc3036cdcc6..c55e26b17d9 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(__krealloc);
*
* The contents of the object pointed to are preserved up to the
* lesser of the new and old sizes. If @p is %NULL, krealloc()
- * behaves exactly like kmalloc(). If @size is 0 and @p is not a
+ * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
* %NULL pointer, the object pointed to is freed.
*/
void *krealloc(const void *p, size_t new_size, gfp_t flags)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 157bb116dec..16b42af393a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1177,7 +1177,11 @@ int isolate_lru_page(struct page *page)
}
/*
- * Are there way too many processes in the direct reclaim path already?
+ * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
+ * then get resheduled. When there are massive number of tasks doing page
+ * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
+ * the LRU list will go small and be scanned faster than necessary, leading to
+ * unnecessary swapping, thrashing and OOM.
*/
static int too_many_isolated(struct zone *zone, int file,
struct scan_control *sc)
@@ -1198,6 +1202,14 @@ static int too_many_isolated(struct zone *zone, int file,
isolated = zone_page_state(zone, NR_ISOLATED_ANON);
}
+ /*
+ * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
+ * won't get blocked by normal direct-reclaimers, forming a circular
+ * deadlock.
+ */
+ if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
+ inactive >>= 3;
+
return isolated > inactive;
}
@@ -2440,12 +2452,16 @@ static bool zone_balanced(struct zone *zone, int order,
}
/*
- * pgdat_balanced is used when checking if a node is balanced for high-order
- * allocations. Only zones that meet watermarks and are in a zone allowed
- * by the callers classzone_idx are added to balanced_pages. The total of
- * balanced pages must be at least 25% of the zones allowed by classzone_idx
- * for the node to be considered balanced. Forcing all zones to be balanced
- * for high orders can cause excessive reclaim when there are imbalanced zones.
+ * pgdat_balanced() is used when checking if a node is balanced.
+ *
+ * For order-0, all zones must be balanced!
+ *
+ * For high-order allocations only zones that meet watermarks and are in a
+ * zone allowed by the callers classzone_idx are added to balanced_pages. The
+ * total of balanced pages must be at least 25% of the zones allowed by
+ * classzone_idx for the node to be considered balanced. Forcing all zones to
+ * be balanced for high orders can cause excessive reclaim when there are
+ * imbalanced zones.
* The choice of 25% is due to
* o a 16M DMA zone that is balanced will not balance a zone on any
* reasonable sized machine
@@ -2455,17 +2471,43 @@ static bool zone_balanced(struct zone *zone, int order,
* Similarly, on x86-64 the Normal zone would need to be at least 1G
* to balance a node on its own. These seemed like reasonable ratios.
*/
-static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
- int classzone_idx)
+static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
{
unsigned long present_pages = 0;
+ unsigned long balanced_pages = 0;
int i;
- for (i = 0; i <= classzone_idx; i++)
- present_pages += pgdat->node_zones[i].present_pages;
+ /* Check the watermark levels */
+ for (i = 0; i <= classzone_idx; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ if (!populated_zone(zone))
+ continue;
- /* A special case here: if zone has no page, we think it's balanced */
- return balanced_pages >= (present_pages >> 2);
+ present_pages += zone->present_pages;
+
+ /*
+ * A special case here:
+ *
+ * balance_pgdat() skips over all_unreclaimable after
+ * DEF_PRIORITY. Effectively, it considers them balanced so
+ * they must be considered balanced here as well!
+ */
+ if (zone->all_unreclaimable) {
+ balanced_pages += zone->present_pages;
+ continue;
+ }
+
+ if (zone_balanced(zone, order, 0, i))
+ balanced_pages += zone->present_pages;
+ else if (!order)
+ return false;
+ }
+
+ if (order)
+ return balanced_pages >= (present_pages >> 2);
+ else
+ return true;
}
/*
@@ -2477,10 +2519,6 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
int classzone_idx)
{
- int i;
- unsigned long balanced = 0;
- bool all_zones_ok = true;
-
/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
if (remaining)
return false;
@@ -2499,39 +2537,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
return false;
}
- /* Check the watermark levels */
- for (i = 0; i <= classzone_idx; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- if (!populated_zone(zone))
- continue;
-
- /*
- * balance_pgdat() skips over all_unreclaimable after
- * DEF_PRIORITY. Effectively, it considers them balanced so
- * they must be considered balanced here as well if kswapd
- * is to sleep
- */
- if (zone->all_unreclaimable) {
- balanced += zone->present_pages;
- continue;
- }
-
- if (!zone_balanced(zone, order, 0, i))
- all_zones_ok = false;
- else
- balanced += zone->present_pages;
- }
-
- /*
- * For high-order requests, the balanced zones must contain at least
- * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
- * must be balanced
- */
- if (order)
- return pgdat_balanced(pgdat, balanced, classzone_idx);
- else
- return all_zones_ok;
+ return pgdat_balanced(pgdat, order, classzone_idx);
}
/*
@@ -2558,8 +2564,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
int *classzone_idx)
{
- int all_zones_ok;
- unsigned long balanced;
+ struct zone *unbalanced_zone;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long total_scanned;
@@ -2592,8 +2597,7 @@ loop_again:
unsigned long lru_pages = 0;
int has_under_min_watermark_zone = 0;
- all_zones_ok = 1;
- balanced = 0;
+ unbalanced_zone = NULL;
/*
* Scan in the highmem->dma direction for the highest
@@ -2731,7 +2735,7 @@ loop_again:
}
if (!zone_balanced(zone, testorder, 0, end_zone)) {
- all_zones_ok = 0;
+ unbalanced_zone = zone;
/*
* We are still under min water mark. This
* means that we have a GFP_ATOMIC allocation
@@ -2749,8 +2753,6 @@ loop_again:
* speculatively avoid congestion waits
*/
zone_clear_flag(zone, ZONE_CONGESTED);
- if (i <= *classzone_idx)
- balanced += zone->present_pages;
}
}
@@ -2764,7 +2766,7 @@ loop_again:
pfmemalloc_watermark_ok(pgdat))
wake_up(&pgdat->pfmemalloc_wait);
- if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
+ if (pgdat_balanced(pgdat, order, *classzone_idx))
break; /* kswapd: all done */
/*
* OK, kswapd is getting into trouble. Take a nap, then take
@@ -2773,8 +2775,8 @@ loop_again:
if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
if (has_under_min_watermark_zone)
count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
- else
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ else if (unbalanced_zone)
+ wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10);
}
/*
@@ -2788,12 +2790,7 @@ loop_again:
} while (--sc.priority >= 0);
out:
- /*
- * order-0: All zones must meet high watermark for a balanced node
- * high-order: Balanced zones must make up at least 25% of the node
- * for the node to be balanced
- */
- if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
+ if (!pgdat_balanced(pgdat, order, *classzone_idx)) {
cond_resched();
try_to_freeze();
@@ -3131,7 +3128,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
int nid;
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
- for_each_node_state(nid, N_HIGH_MEMORY) {
+ for_each_node_state(nid, N_MEMORY) {
pg_data_t *pgdat = NODE_DATA(nid);
const struct cpumask *mask;
@@ -3187,7 +3184,7 @@ static int __init kswapd_init(void)
int nid;
swap_setup();
- for_each_node_state(nid, N_HIGH_MEMORY)
+ for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
hotcpu_notifier(cpu_callback, 0);
return 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c7370579111..9800306c819 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -774,10 +774,20 @@ const char * const vmstat_text[] = {
"pgrotated",
+#ifdef CONFIG_NUMA_BALANCING
+ "numa_pte_updates",
+ "numa_hint_faults",
+ "numa_hint_faults_local",
+ "numa_pages_migrated",
+#endif
+#ifdef CONFIG_MIGRATION
+ "pgmigrate_success",
+ "pgmigrate_fail",
+#endif
#ifdef CONFIG_COMPACTION
- "compact_blocks_moved",
- "compact_pages_moved",
- "compact_pagemigrate_failed",
+ "compact_migrate_scanned",
+ "compact_free_scanned",
+ "compact_isolated",
"compact_stall",
"compact_fail",
"compact_success",
@@ -801,6 +811,8 @@ const char * const vmstat_text[] = {
"thp_collapse_alloc",
"thp_collapse_alloc_failed",
"thp_split",
+ "thp_zero_page_alloc",
+ "thp_zero_page_alloc_failed",
#endif
#endif /* CONFIG_VM_EVENTS_COUNTERS */
@@ -930,7 +942,7 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg)
pg_data_t *pgdat = (pg_data_t *)arg;
/* check memoryless node */
- if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
+ if (!node_state(pgdat->node_id, N_MEMORY))
return 0;
seq_printf(m, "Page block order: %d\n", pageblock_order);
@@ -992,14 +1004,16 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n high %lu"
"\n scanned %lu"
"\n spanned %lu"
- "\n present %lu",
+ "\n present %lu"
+ "\n managed %lu",
zone_page_state(zone, NR_FREE_PAGES),
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
zone->pages_scanned,
zone->spanned_pages,
- zone->present_pages);
+ zone->present_pages,
+ zone->managed_pages);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
seq_printf(m, "\n %-12s %lu", vmstat_text[i],
@@ -1292,7 +1306,7 @@ static int unusable_show(struct seq_file *m, void *arg)
pg_data_t *pgdat = (pg_data_t *)arg;
/* check memoryless node */
- if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
+ if (!node_state(pgdat->node_id, N_MEMORY))
return 0;
walk_zones_in_node(m, pgdat, unusable_show_print);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 35b8911b1c8..fd05c81cb34 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -39,6 +39,7 @@
#include <linux/inet.h>
#include <linux/idr.h>
#include <linux/file.h>
+#include <linux/highmem.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
@@ -325,7 +326,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
int count = nr_pages;
while (nr_pages) {
s = rest_of_page(data);
- pages[index++] = virt_to_page(data);
+ pages[index++] = kmap_to_page(data);
data += s;
nr_pages--;
}
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f49da5814bc..350bf62b2ae 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -14,49 +14,45 @@ static ssize_t show_type(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
- return sprintf(buf, "%s\n", adev->type);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
}
static ssize_t show_address(struct device *cdev,
struct device_attribute *attr, char *buf)
{
- char *pos = buf;
struct atm_dev *adev = to_atm_dev(cdev);
- int i;
-
- for (i = 0; i < (ESI_LEN - 1); i++)
- pos += sprintf(pos, "%02x:", adev->esi[i]);
- pos += sprintf(pos, "%02x\n", adev->esi[i]);
- return pos - buf;
+ return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
}
static ssize_t show_atmaddress(struct device *cdev,
struct device_attribute *attr, char *buf)
{
unsigned long flags;
- char *pos = buf;
struct atm_dev *adev = to_atm_dev(cdev);
struct atm_dev_addr *aaddr;
int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin;
- int i, j;
+ int i, j, count = 0;
spin_lock_irqsave(&adev->lock, flags);
list_for_each_entry(aaddr, &adev->local, entry) {
for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
if (j == *fmt) {
- pos += sprintf(pos, ".");
+ count += scnprintf(buf + count,
+ PAGE_SIZE - count, ".");
++fmt;
j = 0;
}
- pos += sprintf(pos, "%02x",
- aaddr->addr.sas_addr.prv[i]);
+ count += scnprintf(buf + count,
+ PAGE_SIZE - count, "%02x",
+ aaddr->addr.sas_addr.prv[i]);
}
- pos += sprintf(pos, "\n");
+ count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
}
spin_unlock_irqrestore(&adev->lock, flags);
- return pos - buf;
+ return count;
}
static ssize_t show_atmindex(struct device *cdev,
@@ -64,25 +60,21 @@ static ssize_t show_atmindex(struct device *cdev,
{
struct atm_dev *adev = to_atm_dev(cdev);
- return sprintf(buf, "%d\n", adev->number);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
}
static ssize_t show_carrier(struct device *cdev,
struct device_attribute *attr, char *buf)
{
- char *pos = buf;
struct atm_dev *adev = to_atm_dev(cdev);
- pos += sprintf(pos, "%d\n",
- adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
-
- return pos - buf;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
}
static ssize_t show_link_rate(struct device *cdev,
struct device_attribute *attr, char *buf)
{
- char *pos = buf;
struct atm_dev *adev = to_atm_dev(cdev);
int link_rate;
@@ -100,9 +92,7 @@ static ssize_t show_link_rate(struct device *cdev,
default:
link_rate = adev->link_rate * 8 * 53;
}
- pos += sprintf(pos, "%d\n", link_rate);
-
- return pos - buf;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
}
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 9f3925a85aa..7d02ebd11a7 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -123,7 +123,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
unsigned int msecs;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
- msecs += (random32() % 2 * BATADV_JITTER);
+ msecs += random32() % (2 * BATADV_JITTER);
return jiffies + msecs_to_jiffies(msecs);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0c0028463fa..b2bcbe2dc32 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -945,6 +945,13 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->hid_get_raw_report = hidp_get_raw_report;
hid->hid_output_raw_report = hidp_output_raw_report;
+ /* True if device is blacklisted in drivers/hid/hid-core.c */
+ if (hid_ignore(hid)) {
+ hid_destroy_device(session->hid);
+ session->hid = NULL;
+ return -ENODEV;
+ }
+
return 0;
fault:
@@ -1017,7 +1024,7 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
if (req->rd_size > 0) {
err = hidp_setup_hid(session, req);
- if (err)
+ if (err && err != -ENODEV)
goto purge;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1c8fdc3558c..37fe693471a 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -366,11 +366,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
err = netdev_set_master(dev, br->dev);
if (err)
- goto err3;
+ goto err4;
err = netdev_rx_handler_register(dev, br_handle_frame, p);
if (err)
- goto err4;
+ goto err5;
dev->priv_flags |= IFF_BRIDGE_PORT;
@@ -402,8 +402,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
return 0;
-err4:
+err5:
netdev_set_master(dev, NULL);
+err4:
+ br_netpoll_disable(p);
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6f0a2eebcb2..acc9f4cc18f 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -83,9 +83,12 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
if (port) {
struct br_mdb_entry e;
e.ifindex = port->dev->ifindex;
- e.addr.u.ip4 = p->addr.u.ip4;
+ e.state = p->state;
+ if (p->addr.proto == htons(ETH_P_IP))
+ e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- e.addr.u.ip6 = p->addr.u.ip6;
+ if (p->addr.proto == htons(ETH_P_IPV6))
+ e.addr.u.ip6 = p->addr.u.ip6;
#endif
e.addr.proto = p->addr.proto;
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
@@ -253,6 +256,8 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
#endif
} else
return false;
+ if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
+ return false;
return true;
}
@@ -310,7 +315,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
- struct br_ip *group)
+ struct br_ip *group, unsigned char state)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
@@ -336,7 +341,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
break;
}
- p = br_multicast_new_port_group(port, group, *pp);
+ p = br_multicast_new_port_group(port, group, *pp, state);
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
@@ -373,7 +378,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
#endif
spin_lock_bh(&br->multicast_lock);
- ret = br_mdb_add_group(br, p, &ip);
+ ret = br_mdb_add_group(br, p, &ip, entry->state);
spin_unlock_bh(&br->multicast_lock);
return ret;
}
@@ -479,3 +484,10 @@ void br_mdb_init(void)
rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
}
+
+void br_mdb_uninit(void)
+{
+ rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
+ rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
+ rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
+}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 977c3ee02e6..5391ca43336 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -279,7 +279,7 @@ static void br_multicast_port_group_expired(unsigned long data)
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
- hlist_unhashed(&pg->mglist))
+ hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
goto out;
br_multicast_del_pg(br, pg);
@@ -622,7 +622,8 @@ out:
struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
- struct net_bridge_port_group *next)
+ struct net_bridge_port_group __rcu *next,
+ unsigned char state)
{
struct net_bridge_port_group *p;
@@ -632,7 +633,8 @@ struct net_bridge_port_group *br_multicast_new_port_group(
p->addr = *group;
p->port = port;
- p->next = next;
+ p->state = state;
+ rcu_assign_pointer(p->next, next);
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
@@ -674,7 +676,7 @@ static int br_multicast_add_group(struct net_bridge *br,
break;
}
- p = br_multicast_new_port_group(port, group, *pp);
+ p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
if (unlikely(!p))
goto err;
rcu_assign_pointer(*pp, p);
@@ -1138,7 +1140,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
+ struct mld_msg *mld;
struct net_bridge_mdb_entry *mp;
struct mld2_query *mld2q;
struct net_bridge_port_group *p;
@@ -1172,7 +1174,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
- max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
+ max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
}
if (!group)
@@ -1631,6 +1633,7 @@ void br_multicast_stop(struct net_bridge *br)
del_timer_sync(&br->multicast_querier_timer);
del_timer_sync(&br->multicast_query_timer);
+ br_mdb_uninit();
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
if (!mdb)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index dead9dfe865..97ba0189c6f 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -305,5 +305,4 @@ int __init br_netlink_init(void)
void __exit br_netlink_fini(void)
{
rtnl_link_unregister(&br_link_ops);
- rtnl_unregister_all(PF_BRIDGE);
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index f21a739a618..8d83be5ffed 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -83,6 +83,7 @@ struct net_bridge_port_group {
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
+ unsigned char state;
};
struct net_bridge_mdb_entry
@@ -443,8 +444,10 @@ extern void br_multicast_free_pg(struct rcu_head *head);
extern struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
- struct net_bridge_port_group *next);
+ struct net_bridge_port_group *next,
+ unsigned char state);
extern void br_mdb_init(void);
+extern void br_mdb_uninit(void);
extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
struct br_ip *group, int type);
diff --git a/net/can/proc.c b/net/can/proc.c
index 3b6dd318049..ae566902d2b 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -397,7 +397,7 @@ static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
int i;
int all_empty = 1;
- /* check wether at least one list is non-empty */
+ /* check whether at least one list is non-empty */
for (i = 0; i < 0x800; i++)
if (!hlist_empty(&d->rx_sff[i])) {
all_empty = 0;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a8020293f34..ee71ea26777 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
/* start with defaults */
opt->flags = CEPH_OPT_DEFAULT;
- opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
@@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
/* misc */
case Opt_osdtimeout:
- opt->osd_timeout = intval;
+ pr_warning("ignoring deprecated osdtimeout option\n");
break;
case Opt_osdkeepalivetimeout:
opt->osd_keepalive_timeout = intval;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3ef1759403b..5ccf87ed8d6 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
{
/* reset connection, out_queue, msg_ and connect_seq */
/* discard existing out_queue and msg_seq */
+ dout("reset_connection %p\n", con);
ceph_msg_remove_list(&con->out_queue);
ceph_msg_remove_list(&con->out_sent);
@@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
mutex_lock(&con->mutex);
dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
- BUG_ON(con->state != CON_STATE_CLOSED);
+ WARN_ON(con->state != CON_STATE_CLOSED);
con->state = CON_STATE_PREOPEN;
con->peer_name.type = (__u8) entity_type;
@@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
return 0;
}
-static void fail_protocol(struct ceph_connection *con)
-{
- reset_connection(con);
- BUG_ON(con->state != CON_STATE_NEGOTIATING);
- con->state = CON_STATE_CLOSED;
-}
-
static int process_connect(struct ceph_connection *con)
{
u64 sup_feat = con->msgr->supported_features;
@@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr),
sup_feat, server_feat, server_feat & ~sup_feat);
con->error_msg = "missing required protocol features";
- fail_protocol(con);
+ reset_connection(con);
return -1;
case CEPH_MSGR_TAG_BADPROTOVER:
@@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->out_connect.protocol_version),
le32_to_cpu(con->in_reply.protocol_version));
con->error_msg = "protocol version mismatch";
- fail_protocol(con);
+ reset_connection(con);
return -1;
case CEPH_MSGR_TAG_BADAUTHORIZER:
@@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr),
req_feat, server_feat, req_feat & ~server_feat);
con->error_msg = "missing required protocol features";
- fail_protocol(con);
+ reset_connection(con);
return -1;
}
- BUG_ON(con->state != CON_STATE_NEGOTIATING);
+ WARN_ON(con->state != CON_STATE_NEGOTIATING);
con->state = CON_STATE_OPEN;
con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
@@ -2132,7 +2126,6 @@ more:
if (ret < 0)
goto out;
- BUG_ON(con->state != CON_STATE_CONNECTING);
con->state = CON_STATE_NEGOTIATING;
/*
@@ -2160,7 +2153,7 @@ more:
goto more;
}
- BUG_ON(con->state != CON_STATE_OPEN);
+ WARN_ON(con->state != CON_STATE_OPEN);
if (con->in_base_pos < 0) {
/*
@@ -2244,22 +2237,62 @@ bad_tag:
/*
- * Atomically queue work on a connection. Bump @con reference to
- * avoid races with connection teardown.
+ * Atomically queue work on a connection after the specified delay.
+ * Bump @con reference to avoid races with connection teardown.
+ * Returns 0 if work was queued, or an error code otherwise.
*/
-static void queue_con(struct ceph_connection *con)
+static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
{
if (!con->ops->get(con)) {
- dout("queue_con %p ref count 0\n", con);
- return;
+ dout("%s %p ref count 0\n", __func__, con);
+
+ return -ENOENT;
}
- if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
- dout("queue_con %p - already queued\n", con);
+ if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
+ dout("%s %p - already queued\n", __func__, con);
con->ops->put(con);
- } else {
- dout("queue_con %p\n", con);
+
+ return -EBUSY;
+ }
+
+ dout("%s %p %lu\n", __func__, con, delay);
+
+ return 0;
+}
+
+static void queue_con(struct ceph_connection *con)
+{
+ (void) queue_con_delay(con, 0);
+}
+
+static bool con_sock_closed(struct ceph_connection *con)
+{
+ if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
+ return false;
+
+#define CASE(x) \
+ case CON_STATE_ ## x: \
+ con->error_msg = "socket closed (con state " #x ")"; \
+ break;
+
+ switch (con->state) {
+ CASE(CLOSED);
+ CASE(PREOPEN);
+ CASE(CONNECTING);
+ CASE(NEGOTIATING);
+ CASE(OPEN);
+ CASE(STANDBY);
+ default:
+ pr_warning("%s con %p unrecognized state %lu\n",
+ __func__, con, con->state);
+ con->error_msg = "unrecognized con state";
+ BUG();
+ break;
}
+#undef CASE
+
+ return true;
}
/*
@@ -2273,35 +2306,16 @@ static void con_work(struct work_struct *work)
mutex_lock(&con->mutex);
restart:
- if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
- switch (con->state) {
- case CON_STATE_CONNECTING:
- con->error_msg = "connection failed";
- break;
- case CON_STATE_NEGOTIATING:
- con->error_msg = "negotiation failed";
- break;
- case CON_STATE_OPEN:
- con->error_msg = "socket closed";
- break;
- default:
- dout("unrecognized con state %d\n", (int)con->state);
- con->error_msg = "unrecognized con state";
- BUG();
- }
+ if (con_sock_closed(con))
goto fault;
- }
if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
dout("con_work %p backing off\n", con);
- if (queue_delayed_work(ceph_msgr_wq, &con->work,
- round_jiffies_relative(con->delay))) {
- dout("con_work %p backoff %lu\n", con, con->delay);
- mutex_unlock(&con->mutex);
- return;
- } else {
+ ret = queue_con_delay(con, round_jiffies_relative(con->delay));
+ if (ret) {
dout("con_work %p FAILED to back off %lu\n", con,
con->delay);
+ BUG_ON(ret == -ENOENT);
set_bit(CON_FLAG_BACKOFF, &con->flags);
}
goto done;
@@ -2356,12 +2370,12 @@ fault:
static void ceph_fault(struct ceph_connection *con)
__releases(con->mutex)
{
- pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
+ pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
dout("fault %p state %lu to peer %s\n",
con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
- BUG_ON(con->state != CON_STATE_CONNECTING &&
+ WARN_ON(con->state != CON_STATE_CONNECTING &&
con->state != CON_STATE_NEGOTIATING &&
con->state != CON_STATE_OPEN);
@@ -2398,24 +2412,8 @@ static void ceph_fault(struct ceph_connection *con)
con->delay = BASE_DELAY_INTERVAL;
else if (con->delay < MAX_DELAY_INTERVAL)
con->delay *= 2;
- con->ops->get(con);
- if (queue_delayed_work(ceph_msgr_wq, &con->work,
- round_jiffies_relative(con->delay))) {
- dout("fault queued %p delay %lu\n", con, con->delay);
- } else {
- con->ops->put(con);
- dout("fault failed to queue %p delay %lu, backoff\n",
- con, con->delay);
- /*
- * In many cases we see a socket state change
- * while con_work is running and end up
- * queuing (non-delayed) work, such that we
- * can't backoff with a delay. Set a flag so
- * that when con_work restarts we schedule the
- * delay then.
- */
- set_bit(CON_FLAG_BACKOFF, &con->flags);
- }
+ set_bit(CON_FLAG_BACKOFF, &con->flags);
+ queue_con(con);
}
out_unlock:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index c1d756cc744..eb9a4447876 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -221,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
kref_init(&req->r_kref);
init_completion(&req->r_completion);
init_completion(&req->r_safe_completion);
+ RB_CLEAR_NODE(&req->r_node);
INIT_LIST_HEAD(&req->r_unsafe_item);
INIT_LIST_HEAD(&req->r_linger_item);
INIT_LIST_HEAD(&req->r_linger_osd);
@@ -580,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
dout("__kick_osd_requests osd%d\n", osd->o_osd);
err = __reset_osd(osdc, osd);
- if (err == -EAGAIN)
+ if (err)
return;
list_for_each_entry(req, &osd->o_requests, r_osd_item) {
@@ -607,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
}
}
-static void kick_osd_requests(struct ceph_osd_client *osdc,
- struct ceph_osd *kickosd)
-{
- mutex_lock(&osdc->request_mutex);
- __kick_osd_requests(osdc, kickosd);
- mutex_unlock(&osdc->request_mutex);
-}
-
/*
* If the osd connection drops, we need to resubmit all requests.
*/
@@ -628,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
dout("osd_reset osd%d\n", osd->o_osd);
osdc = osd->o_osdc;
down_read(&osdc->map_sem);
- kick_osd_requests(osdc, osd);
+ mutex_lock(&osdc->request_mutex);
+ __kick_osd_requests(osdc, osd);
+ mutex_unlock(&osdc->request_mutex);
send_queued(osdc);
up_read(&osdc->map_sem);
}
@@ -647,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
atomic_set(&osd->o_ref, 1);
osd->o_osdc = osdc;
osd->o_osd = onum;
+ RB_CLEAR_NODE(&osd->o_node);
INIT_LIST_HEAD(&osd->o_requests);
INIT_LIST_HEAD(&osd->o_linger_requests);
INIT_LIST_HEAD(&osd->o_osd_lru);
@@ -750,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
if (list_empty(&osd->o_requests) &&
list_empty(&osd->o_linger_requests)) {
__remove_osd(osdc, osd);
+ ret = -ENODEV;
} else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
&osd->o_con.peer_addr,
sizeof(osd->o_con.peer_addr)) == 0 &&
@@ -876,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
req->r_osd = NULL;
}
+ list_del_init(&req->r_req_lru_item);
ceph_osdc_put_request(req);
- list_del_init(&req->r_req_lru_item);
if (osdc->num_requests == 0) {
dout(" no requests, canceling timeout\n");
__cancel_osd_timeout(osdc);
@@ -910,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req)
{
dout("__unregister_linger_request %p\n", req);
+ list_del_init(&req->r_linger_item);
if (req->r_osd) {
- list_del_init(&req->r_linger_item);
list_del_init(&req->r_linger_osd);
if (list_empty(&req->r_osd->o_requests) &&
@@ -1090,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
{
struct ceph_osd_client *osdc =
container_of(work, struct ceph_osd_client, timeout_work.work);
- struct ceph_osd_request *req, *last_req = NULL;
+ struct ceph_osd_request *req;
struct ceph_osd *osd;
- unsigned long timeout = osdc->client->options->osd_timeout * HZ;
unsigned long keepalive =
osdc->client->options->osd_keepalive_timeout * HZ;
- unsigned long last_stamp = 0;
struct list_head slow_osds;
dout("timeout\n");
down_read(&osdc->map_sem);
@@ -1105,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
mutex_lock(&osdc->request_mutex);
/*
- * reset osds that appear to be _really_ unresponsive. this
- * is a failsafe measure.. we really shouldn't be getting to
- * this point if the system is working properly. the monitors
- * should mark the osd as failed and we should find out about
- * it from an updated osd map.
- */
- while (timeout && !list_empty(&osdc->req_lru)) {
- req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
- r_req_lru_item);
-
- /* hasn't been long enough since we sent it? */
- if (time_before(jiffies, req->r_stamp + timeout))
- break;
-
- /* hasn't been long enough since it was acked? */
- if (req->r_request->ack_stamp == 0 ||
- time_before(jiffies, req->r_request->ack_stamp + timeout))
- break;
-
- BUG_ON(req == last_req && req->r_stamp == last_stamp);
- last_req = req;
- last_stamp = req->r_stamp;
-
- osd = req->r_osd;
- BUG_ON(!osd);
- pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
- req->r_tid, osd->o_osd);
- __kick_osd_requests(osdc, osd);
- }
-
- /*
* ping osds that are a bit slow. this ensures that if there
* is a break in the TCP connection we will notice, and reopen
* a connection with that osd (from the fault callback).
@@ -1306,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
* Requeue requests whose mapping to an OSD has changed. If requests map to
* no osd, request a new map.
*
- * Caller should hold map_sem for read and request_mutex.
+ * Caller should hold map_sem for read.
*/
static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
{
@@ -1320,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
for (p = rb_first(&osdc->requests); p; ) {
req = rb_entry(p, struct ceph_osd_request, r_node);
p = rb_next(p);
+
+ /*
+ * For linger requests that have not yet been
+ * registered, move them to the linger list; they'll
+ * be sent to the osd in the loop below. Unregister
+ * the request before re-registering it as a linger
+ * request to ensure the __map_request() below
+ * will decide it needs to be sent.
+ */
+ if (req->r_linger && list_empty(&req->r_linger_item)) {
+ dout("%p tid %llu restart on osd%d\n",
+ req, req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
+ __unregister_request(osdc, req);
+ __register_linger_request(osdc, req);
+ continue;
+ }
+
err = __map_request(osdc, req, force_resend);
if (err < 0)
continue; /* error */
@@ -1334,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
req->r_flags |= CEPH_OSD_FLAG_RETRY;
}
}
- if (req->r_linger && list_empty(&req->r_linger_item)) {
- /*
- * register as a linger so that we will
- * re-submit below and get a new tid
- */
- dout("%p tid %llu restart on osd%d\n",
- req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- __register_linger_request(osdc, req);
- __unregister_request(osdc, req);
- }
}
list_for_each_entry_safe(req, nreq, &osdc->req_linger,
@@ -1352,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
err = __map_request(osdc, req, force_resend);
+ dout("__map_request returned %d\n", err);
if (err == 0)
continue; /* no change and no osd was specified */
if (err < 0)
@@ -1364,8 +1336,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
req->r_osd ? req->r_osd->o_osd : -1);
- __unregister_linger_request(osdc, req);
__register_request(osdc, req);
+ __unregister_linger_request(osdc, req);
}
mutex_unlock(&osdc->request_mutex);
@@ -1373,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("%d requests for down osds, need new map\n", needmap);
ceph_monc_request_next_osdmap(&osdc->client->monc);
}
+ reset_changed_osds(osdc);
}
@@ -1429,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
osdc->osdmap = newmap;
}
kick_requests(osdc, 0);
- reset_changed_osds(osdc);
} else {
dout("ignoring incremental map %u len %d\n",
epoch, maplen);
@@ -1599,6 +1571,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
event->data = data;
event->osdc = osdc;
INIT_LIST_HEAD(&event->osd_node);
+ RB_CLEAR_NODE(&event->node);
kref_init(&event->kref); /* one ref for us */
kref_get(&event->kref); /* one ref for the caller */
init_completion(&event->completion);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 5433fb0eb3c..de73214b5d2 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -469,6 +469,22 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
return NULL;
}
+const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
+{
+ struct ceph_pg_pool_info *pi;
+
+ if (id == CEPH_NOPOOL)
+ return NULL;
+
+ if (WARN_ON_ONCE(id > (u64) INT_MAX))
+ return NULL;
+
+ pi = __lookup_pg_pool(&map->pg_pools, (int) id);
+
+ return pi ? pi->name : NULL;
+}
+EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
+
int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
{
struct rb_node *rbp;
@@ -645,10 +661,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_32_safe(p, end, max, bad);
while (max--) {
ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
+ err = -ENOMEM;
pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
goto bad;
pi->id = ceph_decode_32(p);
+ err = -EINVAL;
ev = ceph_decode_8(p); /* encoding version */
if (ev > CEPH_PG_POOL_VERSION) {
pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
@@ -664,8 +682,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
__insert_pg_pool(&map->pg_pools, pi);
}
- if (version >= 5 && __decode_pool_names(p, end, map) < 0)
- goto bad;
+ if (version >= 5) {
+ err = __decode_pool_names(p, end, map);
+ if (err < 0) {
+ dout("fail to decode pool names");
+ goto bad;
+ }
+ }
ceph_decode_32_safe(p, end, map->pool_max, bad);
@@ -745,7 +768,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
return map;
bad:
- dout("osdmap_decode fail\n");
+ dout("osdmap_decode fail err %d\n", err);
ceph_osdmap_destroy(map);
return ERR_PTR(err);
}
@@ -839,6 +862,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
if (ev > CEPH_PG_POOL_VERSION) {
pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
ev, CEPH_PG_POOL_VERSION);
+ err = -EINVAL;
goto bad;
}
pi = __lookup_pg_pool(&map->pg_pools, pool);
@@ -855,8 +879,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
if (err < 0)
goto bad;
}
- if (version >= 5 && __decode_pool_names(p, end, map) < 0)
- goto bad;
+ if (version >= 5) {
+ err = __decode_pool_names(p, end, map);
+ if (err < 0)
+ goto bad;
+ }
/* old_pool */
ceph_decode_32_safe(p, end, len, bad);
@@ -932,15 +959,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
(void) __remove_pg_mapping(&map->pg_temp, pgid);
/* insert */
- if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
- err = -EINVAL;
+ err = -EINVAL;
+ if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
goto bad;
- }
+ err = -ENOMEM;
pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
- if (!pg) {
- err = -ENOMEM;
+ if (!pg)
goto bad;
- }
pg->pgid = pgid;
pg->len = pglen;
for (j = 0; j < pglen; j++)
diff --git a/net/core/dev.c b/net/core/dev.c
index d0cbc93fcf3..515473ee52c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -203,7 +203,7 @@ static struct list_head offload_base __read_mostly;
DEFINE_RWLOCK(dev_base_lock);
EXPORT_SYMBOL(dev_base_lock);
-DEFINE_SEQLOCK(devnet_rename_seq);
+seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net)
{
@@ -1093,10 +1093,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (dev->flags & IFF_UP)
return -EBUSY;
- write_seqlock(&devnet_rename_seq);
+ write_seqcount_begin(&devnet_rename_seq);
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
- write_sequnlock(&devnet_rename_seq);
+ write_seqcount_end(&devnet_rename_seq);
return 0;
}
@@ -1104,7 +1104,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
err = dev_get_valid_name(net, dev, newname);
if (err < 0) {
- write_sequnlock(&devnet_rename_seq);
+ write_seqcount_end(&devnet_rename_seq);
return err;
}
@@ -1112,11 +1112,11 @@ rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- write_sequnlock(&devnet_rename_seq);
+ write_seqcount_end(&devnet_rename_seq);
return ret;
}
- write_sequnlock(&devnet_rename_seq);
+ write_seqcount_end(&devnet_rename_seq);
write_lock_bh(&dev_base_lock);
hlist_del_rcu(&dev->name_hlist);
@@ -1135,7 +1135,7 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
- write_seqlock(&devnet_rename_seq);
+ write_seqcount_begin(&devnet_rename_seq);
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
@@ -4180,7 +4180,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
return -EFAULT;
retry:
- seq = read_seqbegin(&devnet_rename_seq);
+ seq = read_seqcount_begin(&devnet_rename_seq);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
if (!dev) {
@@ -4190,7 +4190,7 @@ retry:
strcpy(ifr.ifr_name, dev->name);
rcu_read_unlock();
- if (read_seqretry(&devnet_rename_seq, seq))
+ if (read_seqcount_retry(&devnet_rename_seq, seq))
goto retry;
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 334efd5d67a..28c5f5aa7ca 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1334,7 +1334,6 @@ struct kobj_ns_type_operations net_ns_type_operations = {
};
EXPORT_SYMBOL_GPL(net_ns_type_operations);
-#ifdef CONFIG_HOTPLUG
static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
{
struct net_device *dev = to_net_dev(d);
@@ -1353,7 +1352,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
exit:
return retval;
}
-#endif
/*
* netdev_release -- destroy and free a dead device.
@@ -1382,9 +1380,7 @@ static struct class net_class = {
#ifdef CONFIG_SYSFS
.dev_attrs = net_class_attributes,
#endif /* CONFIG_SYSFS */
-#ifdef CONFIG_HOTPLUG
.dev_uevent = netdev_uevent,
-#endif
.ns_type = &net_ns_type_operations,
.namespace = net_namespace,
};
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6456439cbbd..8acce01b6da 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -381,6 +381,21 @@ struct net *get_net_ns_by_pid(pid_t pid)
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
+static __net_init int net_ns_net_init(struct net *net)
+{
+ return proc_alloc_inum(&net->proc_inum);
+}
+
+static __net_exit void net_ns_net_exit(struct net *net)
+{
+ proc_free_inum(net->proc_inum);
+}
+
+static struct pernet_operations __net_initdata net_ns_ops = {
+ .init = net_ns_net_init,
+ .exit = net_ns_net_exit,
+};
+
static int __init net_ns_init(void)
{
struct net_generic *ng;
@@ -412,6 +427,8 @@ static int __init net_ns_init(void)
mutex_unlock(&net_mutex);
+ register_pernet_subsys(&net_ns_ops);
+
return 0;
}
@@ -630,16 +647,29 @@ static void netns_put(void *ns)
static int netns_install(struct nsproxy *nsproxy, void *ns)
{
+ struct net *net = ns;
+
+ if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
+ !nsown_capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
put_net(nsproxy->net_ns);
- nsproxy->net_ns = get_net(ns);
+ nsproxy->net_ns = get_net(net);
return 0;
}
+static unsigned int netns_inum(void *ns)
+{
+ struct net *net = ns;
+ return net->proc_inum;
+}
+
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
+ .inum = netns_inum,
};
#endif
diff --git a/net/core/sock.c b/net/core/sock.c
index a692ef49c9b..bc131d41968 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -583,7 +583,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
goto out;
retry:
- seq = read_seqbegin(&devnet_rename_seq);
+ seq = read_seqcount_begin(&devnet_rename_seq);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
ret = -ENODEV;
@@ -594,7 +594,7 @@ retry:
strcpy(devname, dev->name);
rcu_read_unlock();
- if (read_seqretry(&devnet_rename_seq, seq))
+ if (read_seqcount_retry(&devnet_rename_seq, seq))
goto retry;
len = strlen(devname) + 1;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 176ecdba4a2..4f9f5eb478f 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -439,8 +439,8 @@ exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
- bh_unlock_sock(newsk);
- sock_put(newsk);
+ inet_csk_prepare_forced_close(newsk);
+ dccp_done(newsk);
goto exit;
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 56840b249f3..6e05981f271 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -585,7 +585,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (__inet_inherit_port(sk, newsk) < 0) {
- sock_put(newsk);
+ inet_csk_prepare_forced_close(newsk);
+ dccp_done(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8aa4b111538..0a69d075779 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -259,20 +259,16 @@ static int __init init_dns_resolver(void)
if (!cred)
return -ENOMEM;
- keyring = key_alloc(&key_type_keyring, ".dns_resolver",
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) |
- KEY_USR_VIEW | KEY_USR_READ,
- KEY_ALLOC_NOT_IN_QUOTA);
+ keyring = keyring_alloc(".dns_resolver",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
- ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
- if (ret < 0)
- goto failed_put_key;
-
ret = register_key_type(&key_type_dns_resolver);
if (ret < 0)
goto failed_put_key;
@@ -304,3 +300,4 @@ static void __exit exit_dns_resolver(void)
module_init(init_dns_resolver)
module_exit(exit_dns_resolver)
MODULE_LICENSE("GPL");
+
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index ce6fbdfd40b..9547a273b9e 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -321,7 +321,7 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
__be32 saddr = 0;
- u8 *dst_ha = NULL;
+ u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL;
struct net_device *dev = neigh->dev;
__be32 target = *(__be32 *)neigh->primary_key;
int probes = atomic_read(&neigh->probes);
@@ -363,8 +363,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (probes < 0) {
if (!(neigh->nud_state & NUD_VALID))
pr_debug("trying to ucast probe in NUD_INVALID\n");
- dst_ha = neigh->ha;
- read_lock_bh(&neigh->lock);
+ neigh_ha_snapshot(dst_ha, neigh, dev);
+ dst_hw = dst_ha;
} else {
probes -= neigh->parms->app_probes;
if (probes < 0) {
@@ -376,9 +376,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
}
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
- dst_ha, dev->dev_addr, NULL);
- if (dst_ha)
- read_unlock_bh(&neigh->lock);
+ dst_hw, dev->dev_addr, NULL);
}
static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 2026542d683..d0670f00d52 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -710,6 +710,22 @@ void inet_csk_destroy_sock(struct sock *sk)
}
EXPORT_SYMBOL(inet_csk_destroy_sock);
+/* This function allows to force a closure of a socket after the call to
+ * tcp/dccp_create_openreq_child().
+ */
+void inet_csk_prepare_forced_close(struct sock *sk)
+{
+ /* sk_clone_lock locked the socket and set refcnt to 2 */
+ bh_unlock_sock(sk);
+ sock_put(sk);
+
+ /* The below has to be done to allow calling inet_csk_destroy_sock */
+ sock_set_flag(sk, SOCK_DEAD);
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+ inet_sk(sk)->inet_num = 0;
+}
+EXPORT_SYMBOL(inet_csk_prepare_forced_close);
+
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
{
struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a85ae2f7a21..303012adf9e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -750,6 +750,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
int gre_hlen;
__be32 dst;
int mtu;
+ u8 ttl;
if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb))
@@ -760,7 +761,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
gre_hlen = 0;
- tiph = (const struct iphdr *)skb->data;
+ if (skb->protocol == htons(ETH_P_IP))
+ tiph = (const struct iphdr *)skb->data;
+ else
+ tiph = &tunnel->parms.iph;
} else {
gre_hlen = tunnel->hlen;
tiph = &tunnel->parms.iph;
@@ -812,6 +816,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
goto tx_error;
}
+ ttl = tiph->ttl;
tos = tiph->tos;
if (tos == 1) {
tos = 0;
@@ -904,11 +909,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
dev_kfree_skb(skb);
skb = new_skb;
old_iph = ip_hdr(skb);
+ /* Warning : tiph value might point to freed memory */
}
- skb_reset_transport_header(skb);
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
+ skb_set_transport_header(skb, sizeof(*iph));
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
@@ -927,8 +933,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
iph->daddr = fl4.daddr;
iph->saddr = fl4.saddr;
+ iph->ttl = ttl;
- if ((iph->ttl = tiph->ttl) == 0) {
+ if (ttl == 0) {
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index baf28611b33..291f2ed7cc3 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -1,7 +1,7 @@
/*
* Plugable TCP congestion control support and newReno
* congestion control.
- * Based on ideas from I/O scheduler suport and Web100.
+ * Based on ideas from I/O scheduler support and Web100.
*
* Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
*/
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a13692560e6..a28e4db8a95 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5543,6 +5543,9 @@ slow_path:
if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
goto csum_error;
+ if (!th->ack)
+ goto discard;
+
/*
* Standard slow path.
*/
@@ -5551,7 +5554,7 @@ slow_path:
return 0;
step5:
- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+ if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
goto discard;
/* ts_recent update must be made after we are sure that the packet
@@ -5984,11 +5987,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
goto discard;
}
+
+ if (!th->ack)
+ goto discard;
+
if (!tcp_validate_incoming(sk, skb, th, 0))
return 0;
/* step 5: check the ACK field */
- if (th->ack) {
+ if (true) {
int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
switch (sk->sk_state) {
@@ -6138,8 +6145,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
}
break;
}
- } else
- goto discard;
+ }
/* ts_recent update must be made after we are sure that the packet
* is in window.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1ed230716d5..54139fa514e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1767,10 +1767,8 @@ exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
- tcp_clear_xmit_timers(newsk);
- tcp_cleanup_congestion_control(newsk);
- bh_unlock_sock(newsk);
- sock_put(newsk);
+ inet_csk_prepare_forced_close(newsk);
+ tcp_done(newsk);
goto exit;
}
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2068ac4fbda..4ea244891b5 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -41,6 +41,6 @@ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
obj-y += addrconf_core.o exthdrs_core.o
-obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6_offload)
+obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6fca01f136a..408cac4ae00 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -534,8 +534,7 @@ void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
+ rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
}
static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 867466c96aa..c727e471275 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -758,8 +758,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb_dst_set_noref(skb, dst);
}
- skb->transport_header = skb->network_header;
-
proto = NEXTHDR_GRE;
if (encap_limit >= 0) {
init_tel_txopt(&opt, encap_limit);
@@ -768,6 +766,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
+ skb_set_transport_header(skb, sizeof(*ipv6h));
/*
* Push down and install the IP header.
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 4c02e6ab96e..6574175795d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -151,8 +151,8 @@ static inline int ndisc_opt_addr_space(struct net_device *dev)
static u8 *ndisc_fill_addr_option(u8 *opt, int type, void *data, int data_len,
unsigned short addr_type)
{
- int space = NDISC_OPT_SPACE(data_len);
int pad = ndisc_addr_option_pad(addr_type);
+ int space = NDISC_OPT_SPACE(data_len + pad);
opt[0] = type;
opt[1] = space>>3;
@@ -1314,6 +1314,12 @@ out:
static void ndisc_redirect_rcv(struct sk_buff *skb)
{
+ u8 *hdr;
+ struct ndisc_options ndopts;
+ struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
+ u32 ndoptlen = skb->tail - (skb->transport_header +
+ offsetof(struct rd_msg, opt));
+
#ifdef CONFIG_IPV6_NDISC_NODETYPE
switch (skb->ndisc_nodetype) {
case NDISC_NODETYPE_HOST:
@@ -1330,6 +1336,17 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
return;
}
+ if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
+ return;
+
+ if (!ndopts.nd_opts_rh)
+ return;
+
+ hdr = (u8 *)ndopts.nd_opts_rh;
+ hdr += 8;
+ if (!pskb_pull(skb, hdr - skb_transport_header(skb)))
+ return;
+
icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6565cf55eb1..93825dd3a7c 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1288,7 +1288,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
#endif
if (__inet_inherit_port(sk, newsk) < 0) {
- sock_put(newsk);
+ inet_csk_prepare_forced_close(newsk);
+ tcp_done(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c6560cc7a9d..698dc7e6f30 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -634,7 +634,7 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
static inline int drv_tx_last_beacon(struct ieee80211_local *local)
{
- int ret = 0; /* default unsuported op for less congestion */
+ int ret = 0; /* default unsupported op for less congestion */
might_sleep();
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e748aed290a..b7c7f815dea 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -224,9 +224,9 @@ void ieee802154_free_device(struct ieee802154_dev *hw)
BUG_ON(!list_empty(&priv->slaves));
- wpan_phy_free(priv->phy);
-
mutex_destroy(&priv->slaves_mtx);
+
+ wpan_phy_free(priv->phy);
}
EXPORT_SYMBOL(ieee802154_free_device);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 703fb26aa48..9e312695c81 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -32,7 +32,7 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
return NULL;
}
-/* return EEXIST if the same logger is registred, 0 on success. */
+/* return EEXIST if the same logger is registered, 0 on success. */
int nf_log_register(u_int8_t pf, struct nf_logger *logger)
{
const struct nf_logger *llog;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c8a1eb6eca2..c0353d55d56 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -669,6 +669,9 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
int err;
+ if (addr_len < sizeof(struct sockaddr_nl))
+ return -EINVAL;
+
if (nladdr->nl_family != AF_NETLINK)
return -EINVAL;
@@ -2059,7 +2062,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
- seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
+ seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->portid,
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 0fa1e92ceac..fea22eb41b8 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -614,10 +614,6 @@ static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_namelen < sizeof(*addr)) {
release_sock(sk);
-
- pr_err("Invalid socket address length %d\n",
- msg->msg_namelen);
-
return -EINVAL;
}
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index a1e11627747..31b74f5e61a 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -434,12 +434,11 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
version = RDS_PROTOCOL_3_0;
while ((common >>= 1) != 0)
version++;
- }
- printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
- "incompatible protocol version %u.%u\n",
- &dp->dp_saddr,
- dp->dp_protocol_major,
- dp->dp_protocol_minor);
+ } else
+ printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
+ &dp->dp_saddr,
+ dp->dp_protocol_major,
+ dp->dp_protocol_minor);
return version;
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8c5bc857f04..8eb9501e3d6 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -339,8 +339,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
sge->length = sizeof(struct rds_header);
sge = &recv->r_sge[1];
- sge->addr = sg_dma_address(&recv->r_frag->f_sg);
- sge->length = sg_dma_len(&recv->r_frag->f_sg);
+ sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
+ sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
ret = 0;
out:
@@ -381,7 +381,10 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
- (long) sg_dma_address(&recv->r_frag->f_sg), ret);
+ (long) ib_sg_dma_address(
+ ic->i_cm_id->device,
+ &recv->r_frag->f_sg),
+ ret);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
"%pI4 returned %d, disconnecting and "
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index a5c95274127..9b9be5279f5 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -676,7 +676,7 @@ static ssize_t rfkill_soft_store(struct device *dev,
rfkill_set_block(rfkill, state);
mutex_unlock(&rfkill_global_mutex);
- return err ?: count;
+ return count;
}
static u8 user_state_from_blocked(unsigned long state)
@@ -721,7 +721,7 @@ static ssize_t rfkill_state_store(struct device *dev,
rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
mutex_unlock(&rfkill_global_mutex);
- return err ?: count;
+ return count;
}
static ssize_t rfkill_claim_show(struct device *dev,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index d2922c0ef57..51561eafcb7 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -919,7 +919,7 @@ ok:
q->now = ktime_to_ns(ktime_get());
start_at = jiffies;
- next_event = q->now + 5 * NSEC_PER_SEC;
+ next_event = q->now + 5LLU * NSEC_PER_SEC;
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index a9edd2e205f..c26210618e1 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -66,12 +66,36 @@ config SCTP_DBG_OBJCNT
'cat /proc/net/sctp/sctp_dbg_objcnt'
If unsure, say N
+choice
+ prompt "Default SCTP cookie HMAC encoding"
+ default SCTP_COOKIE_HMAC_MD5
+ help
+ This option sets the default sctp cookie hmac algorithm
+ when in doubt select 'md5'
+
+config SCTP_DEFAULT_COOKIE_HMAC_MD5
+ bool "Enable optional MD5 hmac cookie generation"
+ help
+ Enable optional MD5 hmac based SCTP cookie generation
+ select SCTP_COOKIE_HMAC_MD5
+
+config SCTP_DEFAULT_COOKIE_HMAC_SHA1
+ bool "Enable optional SHA1 hmac cookie generation"
+ help
+ Enable optional SHA1 hmac based SCTP cookie generation
+ select SCTP_COOKIE_HMAC_SHA1
+
+config SCTP_DEFAULT_COOKIE_HMAC_NONE
+ bool "Use no hmac alg in SCTP cookie generation"
+ help
+ Use no hmac algorithm in SCTP cookie generation
+
+endchoice
config SCTP_COOKIE_HMAC_MD5
bool "Enable optional MD5 hmac cookie generation"
help
Enable optional MD5 hmac based SCTP cookie generation
- default y
select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5
select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5
@@ -79,7 +103,6 @@ config SCTP_COOKIE_HMAC_SHA1
bool "Enable optional SHA1 hmac cookie generation"
help
Enable optional SHA1 hmac based SCTP cookie generation
- default y
select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 32ab55b1828..17a001bac2c 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -163,7 +163,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
list_add(&null_key->key_list, &ep->endpoint_shared_keys);
- /* Allocate and initialize transorms arrays for suported HMACs. */
+ /* Allocate and initialize transorms arrays for supported HMACs. */
err = sctp_auth_init_hmacs(ep, gfp);
if (err)
goto nomem_hmacs;
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index bc6cd75cc1d..5f7518de2fd 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -122,7 +122,8 @@ static const struct file_operations sctpprobe_fops = {
.llseek = noop_llseek,
};
-sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep,
+sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
+ const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 2c7785bacf7..f898b1c58bd 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1191,9 +1191,9 @@ static int __net_init sctp_net_init(struct net *net)
net->sctp.cookie_preserve_enable = 1;
/* Default sctp sockets to use md5 as their hmac alg */
-#if defined (CONFIG_CRYPTO_MD5)
+#if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
net->sctp.sctp_hmac_alg = "md5";
-#elif defined (CONFIG_CRYPTO_SHA1)
+#elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
net->sctp.sctp_hmac_alg = "sha1";
#else
net->sctp.sctp_hmac_alg = NULL;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index ebcd1eedb11..618ec7e216c 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4000,7 +4000,7 @@ static sctp_ierror_t sctp_sf_authenticate(struct net *net,
chunk->subh.auth_hdr = auth_hdr;
skb_pull(chunk->skb, sizeof(struct sctp_authhdr));
- /* Make sure that we suport the HMAC algorithm from the auth
+ /* Make sure that we support the HMAC algorithm from the auth
* chunk.
*/
if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id))
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 909dc0c31aa..6e5c824b040 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -192,17 +192,23 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
const void *q;
unsigned int seclen;
unsigned int timeout;
+ unsigned long now = jiffies;
u32 window_size;
int ret;
- /* First unsigned int gives the lifetime (in seconds) of the cred */
+ /* First unsigned int gives the remaining lifetime in seconds of the
+ * credential - e.g. the remaining TGT lifetime for Kerberos or
+ * the -t value passed to GSSD.
+ */
p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
if (IS_ERR(p))
goto err;
if (timeout == 0)
timeout = GSSD_MIN_TIMEOUT;
- ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4;
- /* Sequence number window. Determines the maximum number of simultaneous requests */
+ ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
+ /* Sequence number window. Determines the maximum number of
+ * simultaneous requests
+ */
p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
if (IS_ERR(p))
goto err;
@@ -237,9 +243,12 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
p = ERR_PTR(ret);
goto err;
}
+ dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n",
+ __func__, ctx->gc_expiry, now, timeout);
return q;
err:
- dprintk("RPC: %s returning %ld\n", __func__, -PTR_ERR(p));
+ dprintk("RPC: %s returns %ld gc_expiry %lu now %lu timeout %u\n",
+ __func__, -PTR_ERR(p), ctx->gc_expiry, now, timeout);
return p;
}
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index a9c0bbccad6..890a29912d5 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -59,7 +59,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
struct xdr_buf *xbufp;
dprintk("RPC: free allocations for req= %p\n", req);
- BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
xbufp = &req->rq_private_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
xbufp = &req->rq_snd_buf;
@@ -191,7 +191,9 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
dprintk("RPC: destroy backchannel transport\n");
- BUG_ON(max_reqs == 0);
+ if (max_reqs == 0)
+ goto out;
+
spin_lock_bh(&xprt->bc_pa_lock);
xprt_dec_alloc_count(xprt, max_reqs);
list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
@@ -202,6 +204,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
}
spin_unlock_bh(&xprt->bc_pa_lock);
+out:
dprintk("RPC: backchannel list empty= %s\n",
list_empty(&xprt->bc_pa_list) ? "true" : "false");
}
@@ -255,7 +258,7 @@ void xprt_free_bc_request(struct rpc_rqst *req)
dprintk("RPC: free backchannel req=%p\n", req);
smp_mb__before_clear_bit();
- BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
smp_mb__after_clear_bit();
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 0b2eb388cbd..15c7a8a1c24 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -53,7 +53,7 @@ int bc_send(struct rpc_rqst *req)
if (IS_ERR(task))
ret = PTR_ERR(task);
else {
- BUG_ON(atomic_read(&task->tk_count) != 1);
+ WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
ret = task->tk_status;
rpc_put_task(task);
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index fc2f7aa4dca..9afa4393c21 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -775,11 +775,11 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
if (rp->q.list.next == &cd->queue) {
spin_unlock(&queue_lock);
mutex_unlock(&inode->i_mutex);
- BUG_ON(rp->offset);
+ WARN_ON_ONCE(rp->offset);
return 0;
}
rq = container_of(rp->q.list.next, struct cache_request, q.list);
- BUG_ON(rq->q.reader);
+ WARN_ON_ONCE(rq->q.reader);
if (rp->offset == 0)
rq->readers++;
spin_unlock(&queue_lock);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cdc7564b451..822f020fa7f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -132,8 +132,10 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
int error;
dir = rpc_d_lookup_sb(sb, dir_name);
- if (dir == NULL)
+ if (dir == NULL) {
+ pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
return dir;
+ }
for (;;) {
q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
name[sizeof(name) - 1] = '\0';
@@ -192,7 +194,8 @@ static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
case RPC_PIPEFS_MOUNT:
dentry = rpc_setup_pipedir_sb(sb, clnt,
clnt->cl_program->pipe_dir_name);
- BUG_ON(dentry == NULL);
+ if (!dentry)
+ return -ENOENT;
if (IS_ERR(dentry))
return PTR_ERR(dentry);
clnt->cl_dentry = dentry;
@@ -234,7 +237,7 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
spin_lock(&sn->rpc_client_lock);
list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
if (clnt->cl_program->pipe_dir_name == NULL)
- break;
+ continue;
if (rpc_clnt_skip_event(clnt, event))
continue;
if (atomic_inc_not_zero(&clnt->cl_count) == 0)
@@ -607,6 +610,13 @@ EXPORT_SYMBOL_GPL(rpc_killall_tasks);
*/
void rpc_shutdown_client(struct rpc_clnt *clnt)
{
+ /*
+ * To avoid deadlock, never call rpc_shutdown_client from a
+ * workqueue context!
+ */
+ WARN_ON_ONCE(current->flags & PF_WQ_WORKER);
+ might_sleep();
+
dprintk_rcu("RPC: shutting down %s client for %s\n",
clnt->cl_protname,
rcu_dereference(clnt->cl_xprt)->servername);
@@ -693,21 +703,19 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
const struct rpc_program *program,
u32 vers)
{
+ struct rpc_create_args args = {
+ .program = program,
+ .prognumber = program->number,
+ .version = vers,
+ .authflavor = old->cl_auth->au_flavor,
+ .client_name = old->cl_principal,
+ };
struct rpc_clnt *clnt;
- const struct rpc_version *version;
int err;
- BUG_ON(vers >= program->nrvers || !program->version[vers]);
- version = program->version[vers];
- clnt = rpc_clone_client(old);
+ clnt = __rpc_clone_client(&args, old);
if (IS_ERR(clnt))
goto out;
- clnt->cl_procinfo = version->procs;
- clnt->cl_maxproc = version->nrprocs;
- clnt->cl_protname = program->name;
- clnt->cl_prog = program->number;
- clnt->cl_vers = version->number;
- clnt->cl_stats = program->stats;
err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
@@ -832,7 +840,12 @@ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flag
};
int status;
- BUG_ON(flags & RPC_TASK_ASYNC);
+ WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
+ if (flags & RPC_TASK_ASYNC) {
+ rpc_release_calldata(task_setup_data.callback_ops,
+ task_setup_data.callback_data);
+ return -EINVAL;
+ }
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
@@ -908,7 +921,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
task->tk_action = call_bc_transmit;
atomic_inc(&task->tk_count);
- BUG_ON(atomic_read(&task->tk_count) != 2);
+ WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
rpc_execute(task);
out:
@@ -1368,6 +1381,7 @@ call_refreshresult(struct rpc_task *task)
return;
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
+ case -EKEYEXPIRED:
case -EAGAIN:
status = -EACCES;
if (!task->tk_cred_retry)
@@ -1654,7 +1668,6 @@ call_transmit(struct rpc_task *task)
task->tk_action = call_transmit_status;
/* Encode here so that rpcsec_gss can use correct sequence number. */
if (rpc_task_need_encode(task)) {
- BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
rpc_xdr_encode(task);
/* Did the encode result in an error condition? */
if (task->tk_status != 0) {
@@ -1738,7 +1751,6 @@ call_bc_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- BUG_ON(task->tk_status != 0);
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status == -EAGAIN) {
/*
@@ -1785,7 +1797,7 @@ call_bc_transmit(struct rpc_task *task)
* We were unable to reply and will have to drop the
* request. The server should reconnect and retransmit.
*/
- BUG_ON(task->tk_status == -EAGAIN);
+ WARN_ON_ONCE(task->tk_status == -EAGAIN);
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
break;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 80f5dd23417..fd10981ea79 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1093,7 +1093,7 @@ void rpc_put_sb_net(const struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
- BUG_ON(sn->pipefs_sb == NULL);
+ WARN_ON(sn->pipefs_sb == NULL);
mutex_unlock(&sn->pipefs_sb_lock);
}
EXPORT_SYMBOL_GPL(rpc_put_sb_net);
@@ -1152,14 +1152,19 @@ static void rpc_kill_sb(struct super_block *sb)
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
mutex_lock(&sn->pipefs_sb_lock);
+ if (sn->pipefs_sb != sb) {
+ mutex_unlock(&sn->pipefs_sb_lock);
+ goto out;
+ }
sn->pipefs_sb = NULL;
mutex_unlock(&sn->pipefs_sb_lock);
- put_net(net);
dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n",
net, NET_NAME(net));
blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_UMOUNT,
sb);
+ put_net(net);
+out:
kill_litter_super(sb);
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index a70acae496e..795a0f4e920 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -23,7 +23,6 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/nsproxy.h>
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
@@ -884,7 +883,10 @@ static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
u32 len;
len = strlen(string);
- BUG_ON(len > maxstrlen);
+ WARN_ON_ONCE(len > maxstrlen);
+ if (len > maxstrlen)
+ /* truncate and hope for the best */
+ len = maxstrlen;
p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6357fcb00c7..d17a704aaf5 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,6 +98,23 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
+static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
+{
+ queue->priority = priority;
+}
+
+static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
+{
+ queue->owner = pid;
+ queue->nr = RPC_BATCH_COUNT;
+}
+
+static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
+{
+ rpc_set_waitqueue_priority(queue, queue->maxpriority);
+ rpc_set_waitqueue_owner(queue, 0);
+}
+
/*
* Add new request to a priority queue.
*/
@@ -109,9 +126,11 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
struct rpc_task *t;
INIT_LIST_HEAD(&task->u.tk_wait.links);
- q = &queue->tasks[queue_priority];
if (unlikely(queue_priority > queue->maxpriority))
- q = &queue->tasks[queue->maxpriority];
+ queue_priority = queue->maxpriority;
+ if (queue_priority > queue->priority)
+ rpc_set_waitqueue_priority(queue, queue_priority);
+ q = &queue->tasks[queue_priority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
@@ -133,7 +152,9 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
struct rpc_task *task,
unsigned char queue_priority)
{
- BUG_ON (RPC_IS_QUEUED(task));
+ WARN_ON_ONCE(RPC_IS_QUEUED(task));
+ if (RPC_IS_QUEUED(task))
+ return;
if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task, queue_priority);
@@ -178,24 +199,6 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
task->tk_pid, queue, rpc_qname(queue));
}
-static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
-{
- queue->priority = priority;
- queue->count = 1 << (priority * 2);
-}
-
-static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
-{
- queue->owner = pid;
- queue->nr = RPC_BATCH_COUNT;
-}
-
-static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
-{
- rpc_set_waitqueue_priority(queue, queue->maxpriority);
- rpc_set_waitqueue_owner(queue, 0);
-}
-
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
{
int i;
@@ -334,7 +337,7 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
__rpc_add_wait_queue(q, task, queue_priority);
- BUG_ON(task->tk_callback != NULL);
+ WARN_ON_ONCE(task->tk_callback != NULL);
task->tk_callback = action;
__rpc_add_timer(q, task);
}
@@ -343,7 +346,12 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
/* We shouldn't ever put an inactive task to sleep */
- BUG_ON(!RPC_IS_ACTIVATED(task));
+ WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
+ if (!RPC_IS_ACTIVATED(task)) {
+ task->tk_status = -EIO;
+ rpc_put_task_async(task);
+ return;
+ }
/*
* Protect the queue operations.
@@ -358,7 +366,12 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action, int priority)
{
/* We shouldn't ever put an inactive task to sleep */
- BUG_ON(!RPC_IS_ACTIVATED(task));
+ WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
+ if (!RPC_IS_ACTIVATED(task)) {
+ task->tk_status = -EIO;
+ rpc_put_task_async(task);
+ return;
+ }
/*
* Protect the queue operations.
@@ -367,6 +380,7 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
spin_unlock_bh(&q->lock);
}
+EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
@@ -451,8 +465,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
/*
* Check if we need to switch queues.
*/
- if (--queue->count)
- goto new_owner;
+ goto new_owner;
}
/*
@@ -697,7 +710,9 @@ static void __rpc_execute(struct rpc_task *task)
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
- BUG_ON(RPC_IS_QUEUED(task));
+ WARN_ON_ONCE(RPC_IS_QUEUED(task));
+ if (RPC_IS_QUEUED(task))
+ return;
for (;;) {
void (*do_action)(struct rpc_task *);
@@ -981,7 +996,7 @@ static void rpc_release_task(struct rpc_task *task)
{
dprintk("RPC: %5u release task\n", task->tk_pid);
- BUG_ON (RPC_IS_QUEUED(task));
+ WARN_ON_ONCE(RPC_IS_QUEUED(task));
rpc_release_resources_task(task);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 3ee7461926d..dbf12ac5ecb 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/slab.h>
-#include <linux/nsproxy.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/xdr.h>
@@ -324,7 +323,9 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
* The caller checks for sv_nrpools > 1, which
* implies that we've been initialized.
*/
- BUG_ON(m->count == 0);
+ WARN_ON_ONCE(m->count == 0);
+ if (m->count == 0)
+ return;
switch (m->mode) {
case SVC_POOL_PERCPU:
@@ -585,7 +586,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
* We assume one is at most one page
*/
arghi = 0;
- BUG_ON(pages > RPCSVC_MAXPAGES);
+ WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
+ if (pages > RPCSVC_MAXPAGES)
+ pages = RPCSVC_MAXPAGES;
while (pages) {
struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
if (!p)
@@ -946,7 +949,9 @@ int svc_register(const struct svc_serv *serv, struct net *net,
unsigned int i;
int error = 0;
- BUG_ON(proto == 0 && port == 0);
+ WARN_ON_ONCE(proto == 0 && port == 0);
+ if (proto == 0 && port == 0)
+ return -EINVAL;
for (progp = serv->sv_program; progp; progp = progp->pg_next) {
for (i = 0; i < progp->pg_nvers; i++) {
@@ -1035,7 +1040,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
}
/*
- * Printk the given error with the address of the client that caused it.
+ * dprintk the given error with the address of the client that caused it.
*/
static __printf(2, 3)
void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
@@ -1049,8 +1054,7 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- net_warn_ratelimited("svc: %s: %pV",
- svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
+ dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
va_end(args);
}
@@ -1299,7 +1303,7 @@ svc_process(struct svc_rqst *rqstp)
* Setup response xdr_buf.
* Initially it has just one page
*/
- rqstp->rq_resused = 1;
+ rqstp->rq_next_page = &rqstp->rq_respages[1];
resv->iov_base = page_address(rqstp->rq_respages[0]);
resv->iov_len = 0;
rqstp->rq_res.pages = rqstp->rq_respages + 1;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 194d865fae7..b8e47fac731 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -218,7 +218,9 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
*/
static void svc_xprt_received(struct svc_xprt *xprt)
{
- BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
+ WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
+ if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
+ return;
/* As soon as we clear busy, the xprt could be closed and
* 'put', so we need a reference to call svc_xprt_enqueue with:
*/
@@ -577,7 +579,10 @@ int svc_alloc_arg(struct svc_rqst *rqstp)
/* now allocate needed pages. If we get a failure, sleep briefly */
pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
- BUG_ON(pages >= RPCSVC_MAXPAGES);
+ WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES);
+ if (pages >= RPCSVC_MAXPAGES)
+ /* use as many pages as possible */
+ pages = RPCSVC_MAXPAGES - 1;
for (i = 0; i < pages ; i++)
while (rqstp->rq_pages[i] == NULL) {
struct page *p = alloc_page(GFP_KERNEL);
@@ -926,7 +931,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_lock_bh(&serv->sv_lock);
if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
list_del_init(&xprt->xpt_list);
- BUG_ON(!list_empty(&xprt->xpt_ready));
+ WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 03827cef1fa..0a148c9d2a5 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -84,7 +84,11 @@ static struct lock_class_key svc_slock_key[2];
static void svc_reclassify_socket(struct socket *sock)
{
struct sock *sk = sock->sk;
- BUG_ON(sock_owned_by_user(sk));
+
+ WARN_ON_ONCE(sock_owned_by_user(sk));
+ if (sock_owned_by_user(sk))
+ return;
+
switch (sk->sk_family) {
case AF_INET:
sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
@@ -601,6 +605,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_respages = rqstp->rq_pages + 1 +
DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
}
+ rqstp->rq_next_page = rqstp->rq_respages+1;
if (serv->sv_stats)
serv->sv_stats->netudpcnt++;
@@ -874,9 +879,9 @@ static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst
{
unsigned int i, len, npages;
- if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ if (svsk->sk_datalen == 0)
return 0;
- len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ len = svsk->sk_datalen;
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
if (rqstp->rq_pages[i] != NULL)
@@ -893,9 +898,9 @@ static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
unsigned int i, len, npages;
- if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ if (svsk->sk_datalen == 0)
return;
- len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ len = svsk->sk_datalen;
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
svsk->sk_pages[i] = rqstp->rq_pages[i];
@@ -907,9 +912,9 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
{
unsigned int i, len, npages;
- if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ if (svsk->sk_datalen == 0)
goto out;
- len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ len = svsk->sk_datalen;
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
BUG_ON(svsk->sk_pages[i] == NULL);
@@ -918,13 +923,12 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
}
out:
svsk->sk_tcplen = 0;
+ svsk->sk_datalen = 0;
}
/*
- * Receive data.
+ * Receive fragment record header.
* If we haven't gotten the record length yet, get the next four bytes.
- * Otherwise try to gobble up as much as possible up to the complete
- * record length.
*/
static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
@@ -950,32 +954,16 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
return -EAGAIN;
}
- svsk->sk_reclen = ntohl(svsk->sk_reclen);
- if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) {
- /* FIXME: technically, a record can be fragmented,
- * and non-terminal fragments will not have the top
- * bit set in the fragment length header.
- * But apparently no known nfs clients send fragmented
- * records. */
- net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
- goto err_delete;
- }
-
- svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
- dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
- if (svsk->sk_reclen > serv->sv_max_mesg) {
- net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
- (unsigned long)svsk->sk_reclen);
+ dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk));
+ if (svc_sock_reclen(svsk) + svsk->sk_datalen >
+ serv->sv_max_mesg) {
+ net_notice_ratelimited("RPC: fragment too large: %d\n",
+ svc_sock_reclen(svsk));
goto err_delete;
}
}
- if (svsk->sk_reclen < 8)
- goto err_delete; /* client is nuts. */
-
- len = svsk->sk_reclen;
-
- return len;
+ return svc_sock_reclen(svsk);
error:
dprintk("RPC: TCP recv_record got %d\n", len);
return len;
@@ -1019,7 +1007,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (dst->iov_len < src->iov_len)
return -EAGAIN; /* whatever; just giving up. */
memcpy(dst->iov_base, src->iov_base, src->iov_len);
- xprt_complete_rqst(req->rq_task, svsk->sk_reclen);
+ xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
rqstp->rq_arg.len = 0;
return 0;
}
@@ -1038,6 +1026,17 @@ static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
return i;
}
+static void svc_tcp_fragment_received(struct svc_sock *svsk)
+{
+ /* If we have more data, signal svc_xprt_enqueue() to try again */
+ if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
+ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+ dprintk("svc: TCP %s record (%d bytes)\n",
+ svc_sock_final_rec(svsk) ? "final" : "nonfinal",
+ svc_sock_reclen(svsk));
+ svsk->sk_tcplen = 0;
+ svsk->sk_reclen = 0;
+}
/*
* Receive data from a TCP socket.
@@ -1064,29 +1063,39 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
goto error;
base = svc_tcp_restore_pages(svsk, rqstp);
- want = svsk->sk_reclen - base;
+ want = svc_sock_reclen(svsk) - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
vec = rqstp->rq_vec;
pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
- svsk->sk_reclen);
+ svsk->sk_datalen + want);
rqstp->rq_respages = &rqstp->rq_pages[pnum];
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
/* Now receive data */
len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
- if (len >= 0)
+ if (len >= 0) {
svsk->sk_tcplen += len;
- if (len != want) {
+ svsk->sk_datalen += len;
+ }
+ if (len != want || !svc_sock_final_rec(svsk)) {
svc_tcp_save_pages(svsk, rqstp);
if (len < 0 && len != -EAGAIN)
- goto err_other;
- dprintk("svc: incomplete TCP record (%d of %d)\n",
- svsk->sk_tcplen, svsk->sk_reclen);
+ goto err_delete;
+ if (len == want)
+ svc_tcp_fragment_received(svsk);
+ else
+ dprintk("svc: incomplete TCP record (%d of %d)\n",
+ (int)(svsk->sk_tcplen - sizeof(rpc_fraghdr)),
+ svc_sock_reclen(svsk));
goto err_noclose;
}
- rqstp->rq_arg.len = svsk->sk_reclen;
+ if (svc_sock_reclen(svsk) < 8)
+ goto err_delete; /* client is nuts. */
+
+ rqstp->rq_arg.len = svsk->sk_datalen;
rqstp->rq_arg.page_base = 0;
if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
@@ -1103,11 +1112,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
len = receive_cb_reply(svsk, rqstp);
/* Reset TCP read info */
- svsk->sk_reclen = 0;
- svsk->sk_tcplen = 0;
- /* If we have more data, signal svc_xprt_enqueue() to try again */
- if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
- set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+ svsk->sk_datalen = 0;
+ svc_tcp_fragment_received(svsk);
if (len < 0)
goto error;
@@ -1116,15 +1122,14 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
if (serv->sv_stats)
serv->sv_stats->nettcpcnt++;
- dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len);
return rqstp->rq_arg.len;
error:
if (len != -EAGAIN)
- goto err_other;
+ goto err_delete;
dprintk("RPC: TCP recvfrom got EAGAIN\n");
return 0;
-err_other:
+err_delete:
printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
svsk->sk_xprt.xpt_server->sv_name, -len);
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1301,6 +1306,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
+ svsk->sk_datalen = 0;
memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 08f50afd5f2..56055632f15 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -318,7 +318,10 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
tail = buf->tail;
head = buf->head;
- BUG_ON (len > head->iov_len);
+
+ WARN_ON_ONCE(len > head->iov_len);
+ if (len > head->iov_len)
+ len = head->iov_len;
/* Shift the tail first */
if (tail->iov_len != 0) {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 41cb63b623d..0ce75524ed2 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -521,11 +521,11 @@ next_sge:
rqstp->rq_pages[ch_no] = NULL;
/*
- * Detach res pages. svc_release must see a resused count of
- * zero or it will attempt to put them.
+ * Detach res pages. If svc_release sees any it will attempt to
+ * put them.
*/
- while (rqstp->rq_resused)
- rqstp->rq_respages[--rqstp->rq_resused] = NULL;
+ while (rqstp->rq_next_page != rqstp->rq_respages)
+ *(--rqstp->rq_next_page) = NULL;
return err;
}
@@ -550,7 +550,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
/* rq_respages starts after the last arg page */
rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
- rqstp->rq_resused = 0;
+ rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no];
/* Rebuild rq_arg head and tail. */
rqstp->rq_arg.head[0] = head->arg.head[0];
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 42eb7ba0b90..c1d124dc772 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -548,6 +548,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
int sge_no;
int sge_bytes;
int page_no;
+ int pages;
int ret;
/* Post a recv buffer to handle another request. */
@@ -611,7 +612,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
* respages array. They are our pages until the I/O
* completes.
*/
- for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
+ pages = rqstp->rq_next_page - rqstp->rq_respages;
+ for (page_no = 0; page_no < pages; page_no++) {
ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
ctxt->count++;
rqstp->rq_respages[page_no] = NULL;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 75853cabf4c..68b0a81c31d 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1746,7 +1746,6 @@ static inline void xs_reclassify_socketu(struct socket *sock)
{
struct sock *sk = sock->sk;
- BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
&xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
}
@@ -1755,7 +1754,6 @@ static inline void xs_reclassify_socket4(struct socket *sock)
{
struct sock *sk = sock->sk;
- BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
}
@@ -1764,13 +1762,16 @@ static inline void xs_reclassify_socket6(struct socket *sock)
{
struct sock *sk = sock->sk;
- BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
}
static inline void xs_reclassify_socket(int family, struct socket *sock)
{
+ WARN_ON_ONCE(sock_owned_by_user(sock->sk));
+ if (sock_owned_by_user(sock->sk))
+ return;
+
switch (family) {
case AF_LOCAL:
xs_reclassify_socketu(sock);
@@ -1901,6 +1902,10 @@ static void xs_local_setup_socket(struct work_struct *work)
dprintk("RPC: xprt %p: socket %s does not exist\n",
xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
break;
+ case -ECONNREFUSED:
+ dprintk("RPC: xprt %p: connection refused for %s\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ break;
default:
printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
__func__, -status,
@@ -2329,9 +2334,11 @@ static void *bc_malloc(struct rpc_task *task, size_t size)
struct page *page;
struct rpc_buffer *buf;
- BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
- page = alloc_page(GFP_KERNEL);
+ WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
+ if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
+ return NULL;
+ page = alloc_page(GFP_KERNEL);
if (!page)
return NULL;
@@ -2393,7 +2400,6 @@ static int bc_send_request(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct svc_xprt *xprt;
- struct svc_sock *svsk;
u32 len;
dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
@@ -2401,7 +2407,6 @@ static int bc_send_request(struct rpc_task *task)
* Get the server socket associated with this callback xprt
*/
xprt = req->rq_xprt->bc_xprt;
- svsk = container_of(xprt, struct svc_sock, sk_xprt);
/*
* Grab the mutex to serialize data as the connection is shared
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6e5308998e3..82c4fc7c994 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2365,7 +2365,6 @@ int set_regdom(const struct ieee80211_regdomain *rd)
return r;
}
-#ifdef CONFIG_HOTPLUG
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
if (last_request && !last_request->processed) {
@@ -2377,12 +2376,6 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#else
-int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_HOTPLUG */
void wiphy_regulatory_register(struct wiphy *wiphy)
{
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9bf6d5e3216..1f6f01e2dc4 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -77,13 +77,11 @@ static void wiphy_dev_release(struct device *dev)
cfg80211_dev_free(rdev);
}
-#ifdef CONFIG_HOTPLUG
static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
{
/* TODO, we probably need stuff here */
return 0;
}
-#endif
static int wiphy_suspend(struct device *dev, pm_message_t state)
{
@@ -134,9 +132,7 @@ struct class ieee80211_class = {
.owner = THIS_MODULE,
.dev_release = wiphy_dev_release,
.dev_attrs = ieee80211_dev_attrs,
-#ifdef CONFIG_HOTPLUG
.dev_uevent = wiphy_uevent,
-#endif
.suspend = wiphy_suspend,
.resume = wiphy_resume,
.ns_type = &net_ns_type_operations,
diff --git a/scripts/Makefile.modsign b/scripts/Makefile.modsign
new file mode 100644
index 00000000000..abfda626dba
--- /dev/null
+++ b/scripts/Makefile.modsign
@@ -0,0 +1,32 @@
+# ==========================================================================
+# Signing modules
+# ==========================================================================
+
+PHONY := __modsign
+__modsign:
+
+include scripts/Kbuild.include
+
+__modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod)))
+modules := $(patsubst %.o,%.ko,$(wildcard $(__modules:.ko=.o)))
+
+PHONY += $(modules)
+__modsign: $(modules)
+ @:
+
+quiet_cmd_sign_ko = SIGN [M] $(2)/$(notdir $@)
+ cmd_sign_ko = $(mod_sign_cmd) $(2)/$(notdir $@)
+
+# Modules built outside the kernel source tree go into extra by default
+INSTALL_MOD_DIR ?= extra
+ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D))
+
+modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
+
+$(modules):
+ $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir))
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable se we can use it in if_changed and friends.
+
+.PHONY: $(PHONY)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index f18750e3bd6..4d2c7dfdaab 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -33,6 +33,7 @@ my %ignore_type = ();
my @ignore = ();
my $help = 0;
my $configuration_file = ".checkpatch.conf";
+my $max_line_length = 80;
sub help {
my ($exitcode) = @_;
@@ -51,6 +52,7 @@ Options:
-f, --file treat FILE as regular source file
--subjective, --strict enable more subjective tests
--ignore TYPE(,TYPE2...) ignore various comma separated message types
+ --max-line-length=n set the maximum line length, if exceeded, warn
--show-types show the message "types" in the output
--root=PATH PATH to the kernel tree root
--no-summary suppress the per-file summary
@@ -107,6 +109,7 @@ GetOptions(
'strict!' => \$check,
'ignore=s' => \@ignore,
'show-types!' => \$show_types,
+ 'max-line-length=i' => \$max_line_length,
'root=s' => \$root,
'summary!' => \$summary,
'mailback!' => \$mailback,
@@ -227,7 +230,11 @@ our $Inline = qr{inline|__always_inline|noinline};
our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
our $Lval = qr{$Ident(?:$Member)*};
-our $Constant = qr{(?i:(?:[0-9]+|0x[0-9a-f]+)[ul]*)};
+our $Float_hex = qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)};
+our $Float_dec = qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))};
+our $Float_int = qr{(?i:[0-9]+e-?[0-9]+[fl]?)};
+our $Float = qr{$Float_hex|$Float_dec|$Float_int};
+our $Constant = qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))};
our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)};
our $Compare = qr{<=|>=|==|!=|<|>};
our $Operators = qr{
@@ -352,27 +359,6 @@ sub deparenthesize {
$chk_signoff = 0 if ($file);
-my @dep_includes = ();
-my @dep_functions = ();
-my $removal = "Documentation/feature-removal-schedule.txt";
-if ($tree && -f "$root/$removal") {
- open(my $REMOVE, '<', "$root/$removal") ||
- die "$P: $removal: open failed - $!\n";
- while (<$REMOVE>) {
- if (/^Check:\s+(.*\S)/) {
- for my $entry (split(/[, ]+/, $1)) {
- if ($entry =~ m@include/(.*)@) {
- push(@dep_includes, $1);
-
- } elsif ($entry !~ m@/@) {
- push(@dep_functions, $entry);
- }
- }
- }
- }
- close($REMOVE);
-}
-
my @rawlines = ();
my @lines = ();
my $vname;
@@ -1412,6 +1398,8 @@ sub process {
my %suppress_export;
my $suppress_statement = 0;
+ my %camelcase = ();
+
# Pre-scan the patch sanitizing the lines.
# Pre-scan the patch looking for any __setup documentation.
#
@@ -1757,6 +1745,13 @@ sub process {
#print "is_start<$is_start> is_end<$is_end> length<$length>\n";
}
+# discourage the addition of CONFIG_EXPERIMENTAL in Kconfig.
+ if ($realfile =~ /Kconfig/ &&
+ $line =~ /.\s*depends on\s+.*\bEXPERIMENTAL\b/) {
+ WARN("CONFIG_EXPERIMENTAL",
+ "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
+ }
+
if (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&
($line =~ /\+(EXTRA_[A-Z]+FLAGS).*/)) {
my $flag = $1;
@@ -1774,15 +1769,15 @@ sub process {
# check we are in a valid source file if not then ignore this hunk
next if ($realfile !~ /\.(h|c|s|S|pl|sh)$/);
-#80 column limit
+#line length limit
if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
$rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
!($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:|,|\)\s*;)\s*$/ ||
$line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
- $length > 80)
+ $length > $max_line_length)
{
WARN("LONG_LINE",
- "line over 80 characters\n" . $herecurr);
+ "line over $max_line_length characters\n" . $herecurr);
}
# Check for user-visible strings broken across lines, which breaks the ability
@@ -1912,6 +1907,12 @@ sub process {
# check we are in a valid C source file if not then ignore this hunk
next if ($realfile !~ /\.(h|c)$/);
+# discourage the addition of CONFIG_EXPERIMENTAL in #if(def).
+ if ($line =~ /^\+\s*\#\s*if.*\bCONFIG_EXPERIMENTAL\b/) {
+ WARN("CONFIG_EXPERIMENTAL",
+ "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
+ }
+
# check for RCS/CVS revision markers
if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
WARN("CVS_KEYWORD",
@@ -2225,8 +2226,11 @@ sub process {
my $path = $1;
if ($path =~ m{//}) {
ERROR("MALFORMED_INCLUDE",
- "malformed #include filename\n" .
- $herecurr);
+ "malformed #include filename\n" . $herecurr);
+ }
+ if ($path =~ "^uapi/" && $realfile =~ m@\binclude/uapi/@) {
+ ERROR("UAPI_INCLUDE",
+ "No #include in ...include/uapi/... should use a uapi/ path prefix\n" . $herecurr);
}
}
@@ -2906,12 +2910,17 @@ sub process {
}
}
-#studly caps, commented out until figure out how to distinguish between use of existing and adding new
-# if (($line=~/[\w_][a-z\d]+[A-Z]/) and !($line=~/print/)) {
-# print "No studly caps, use _\n";
-# print "$herecurr";
-# $clean = 0;
-# }
+#CamelCase
+ while ($line =~ m{($Constant|$Lval)}g) {
+ my $var = $1;
+ if ($var !~ /$Constant/ &&
+ $var =~ /[A-Z]\w*[a-z]|[a-z]\w*[A-Z]/ &&
+ !defined $camelcase{$var}) {
+ $camelcase{$var} = 1;
+ WARN("CAMELCASE",
+ "Avoid CamelCase: <$var>\n" . $herecurr);
+ }
+ }
#no spaces allowed after \ in define
if ($line=~/\#\s*define.*\\\s$/) {
@@ -3013,6 +3022,17 @@ sub process {
"Macros with complex values should be enclosed in parenthesis\n" . "$herectx");
}
}
+
+# check for line continuations outside of #defines, preprocessor #, and asm
+
+ } else {
+ if ($prevline !~ /^..*\\$/ &&
+ $line !~ /^\+\s*\#.*\\$/ && # preprocessor
+ $line !~ /^\+.*\b(__asm__|asm)\b.*\\$/ && # asm
+ $line =~ /^\+.*\\$/) {
+ WARN("LINE_CONTINUATIONS",
+ "Avoid unnecessary line continuations\n" . $herecurr);
+ }
}
# do {} while (0) macro tests:
@@ -3183,20 +3203,14 @@ sub process {
}
}
-# don't include deprecated include files (uses RAW line)
- for my $inc (@dep_includes) {
- if ($rawline =~ m@^.\s*\#\s*include\s*\<$inc>@) {
- ERROR("DEPRECATED_INCLUDE",
- "Don't use <$inc>: see Documentation/feature-removal-schedule.txt\n" . $herecurr);
- }
+# check for unnecessary blank lines around braces
+ if (($line =~ /^..*}\s*$/ && $prevline =~ /^.\s*$/)) {
+ CHK("BRACES",
+ "Blank lines aren't necessary before a close brace '}'\n" . $hereprev);
}
-
-# don't use deprecated functions
- for my $func (@dep_functions) {
- if ($line =~ /\b$func\b/) {
- ERROR("DEPRECATED_FUNCTION",
- "Don't use $func(): see Documentation/feature-removal-schedule.txt\n" . $herecurr);
- }
+ if (($line =~ /^.\s*$/ && $prevline =~ /^..*{\s*$/)) {
+ CHK("BRACES",
+ "Blank lines aren't necessary after an open brace '{'\n" . $hereprev);
}
# no volatiles please
@@ -3213,20 +3227,12 @@ sub process {
$herecurr);
}
-# check for needless kfree() checks
- if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
- my $expr = $1;
- if ($line =~ /\bkfree\(\Q$expr\E\);/) {
- WARN("NEEDLESS_KFREE",
- "kfree(NULL) is safe this check is probably not required\n" . $hereprev);
- }
- }
-# check for needless usb_free_urb() checks
- if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
- my $expr = $1;
- if ($line =~ /\busb_free_urb\(\Q$expr\E\);/) {
- WARN("NEEDLESS_USB_FREE_URB",
- "usb_free_urb(NULL) is safe this check is probably not required\n" . $hereprev);
+# check for needless "if (<foo>) fn(<foo>)" uses
+ if ($prevline =~ /\bif\s*\(\s*($Lval)\s*\)/) {
+ my $expr = '\s*\(\s*' . quotemeta($1) . '\s*\)\s*;';
+ if ($line =~ /\b(kfree|usb_free_urb|debugfs_remove(?:_recursive)?)$expr/) {
+ WARN('NEEDLESS_IF',
+ "$1(NULL) is safe this check is probably not required\n" . $hereprev);
}
}
@@ -3344,6 +3350,12 @@ sub process {
"Avoid line continuations in quoted strings\n" . $herecurr);
}
+# check for struct spinlock declarations
+ if ($line =~ /^.\s*\bstruct\s+spinlock\s+\w+\s*;/) {
+ WARN("USE_SPINLOCK_T",
+ "struct spinlock should be spinlock_t\n" . $herecurr);
+ }
+
# Check for misused memsets
if ($^V && $^V ge 5.10.0 &&
defined $stat &&
@@ -3450,8 +3462,22 @@ sub process {
# check for multiple semicolons
if ($line =~ /;\s*;\s*$/) {
- WARN("ONE_SEMICOLON",
- "Statements terminations use 1 semicolon\n" . $herecurr);
+ WARN("ONE_SEMICOLON",
+ "Statements terminations use 1 semicolon\n" . $herecurr);
+ }
+
+# check for switch/default statements without a break;
+ if ($^V && $^V ge 5.10.0 &&
+ defined $stat &&
+ $stat =~ /^\+[$;\s]*(?:case[$;\s]+\w+[$;\s]*:[$;\s]*|)*[$;\s]*\bdefault[$;\s]*:[$;\s]*;/g) {
+ my $ctx = '';
+ my $herectx = $here . "\n";
+ my $cnt = statement_rawlines($stat);
+ for (my $n = 0; $n < $cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";
+ }
+ WARN("DEFAULT_NO_BREAK",
+ "switch default: should use break\n" . $herectx);
}
# check for gcc specific __FUNCTION__
diff --git a/scripts/coccinelle/api/d_find_alias.cocci b/scripts/coccinelle/api/d_find_alias.cocci
new file mode 100644
index 00000000000..a9694a8d3e5
--- /dev/null
+++ b/scripts/coccinelle/api/d_find_alias.cocci
@@ -0,0 +1,80 @@
+/// Make sure calls to d_find_alias() have a corresponding call to dput().
+//
+// Keywords: d_find_alias, dput
+//
+// Confidence: Moderate
+// URL: http://coccinelle.lip6.fr/
+// Options: -include_headers
+
+virtual context
+virtual org
+virtual patch
+virtual report
+
+@r exists@
+local idexpression struct dentry *dent;
+expression E, E1;
+statement S1, S2;
+position p1, p2;
+@@
+(
+ if (!(dent@p1 = d_find_alias(...))) S1
+|
+ dent@p1 = d_find_alias(...)
+)
+
+<...when != dput(dent)
+ when != if (...) { <+... dput(dent) ...+> }
+ when != true !dent || ...
+ when != dent = E
+ when != E = dent
+if (!dent || ...) S2
+...>
+(
+ return <+...dent...+>;
+|
+ return @p2 ...;
+|
+ dent@p2 = E1;
+|
+ E1 = dent;
+)
+
+@depends on context@
+local idexpression struct dentry *r.dent;
+position r.p1,r.p2;
+@@
+* dent@p1 = ...
+ ...
+(
+* return@p2 ...;
+|
+* dent@p2
+)
+
+
+@script:python depends on org@
+p1 << r.p1;
+p2 << r.p2;
+@@
+cocci.print_main("Missing call to dput()",p1)
+cocci.print_secs("",p2)
+
+@depends on patch@
+local idexpression struct dentry *r.dent;
+position r.p2;
+@@
+(
++ dput(dent);
+ return @p2 ...;
+|
++ dput(dent);
+ dent@p2 = ...;
+)
+
+@script:python depends on report@
+p1 << r.p1;
+p2 << r.p2;
+@@
+msg = "Missing call to dput() at line %s."
+coccilib.report.print_report(p1[0], msg % (p2[0].line))
diff --git a/scripts/coccinelle/misc/warn.cocci b/scripts/coccinelle/misc/warn.cocci
new file mode 100644
index 00000000000..fda8c3558e4
--- /dev/null
+++ b/scripts/coccinelle/misc/warn.cocci
@@ -0,0 +1,109 @@
+/// Use WARN(1,...) rather than printk followed by WARN_ON(1)
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual patch
+virtual context
+virtual org
+virtual report
+
+@bad1@
+position p;
+@@
+
+printk(...);
+printk@p(...);
+WARN_ON(1);
+
+@r1 depends on context || report || org@
+position p != bad1.p;
+@@
+
+ printk@p(...);
+*WARN_ON(1);
+
+@script:python depends on org@
+p << r1.p;
+@@
+
+cocci.print_main("printk + WARN_ON can be just WARN",p)
+
+@script:python depends on report@
+p << r1.p;
+@@
+
+msg = "SUGGESTION: printk + WARN_ON can be just WARN"
+coccilib.report.print_report(p[0],msg)
+
+@ok1 depends on patch@
+expression list es;
+position p != bad1.p;
+@@
+
+-printk@p(
++WARN(1,
+ es);
+-WARN_ON(1);
+
+@depends on patch@
+expression list ok1.es;
+@@
+
+if (...)
+- {
+ WARN(1,es);
+- }
+
+// --------------------------------------------------------------------
+
+@bad2@
+position p;
+@@
+
+printk(...);
+printk@p(...);
+WARN_ON_ONCE(1);
+
+@r2 depends on context || report || org@
+position p != bad1.p;
+@@
+
+ printk@p(...);
+*WARN_ON_ONCE(1);
+
+@script:python depends on org@
+p << r2.p;
+@@
+
+cocci.print_main("printk + WARN_ON_ONCE can be just WARN_ONCE",p)
+
+@script:python depends on report@
+p << r2.p;
+@@
+
+msg = "SUGGESTION: printk + WARN_ON_ONCE can be just WARN_ONCE"
+coccilib.report.print_report(p[0],msg)
+
+@ok2 depends on patch@
+expression list es;
+position p != bad2.p;
+@@
+
+-printk@p(
++WARN_ONCE(1,
+ es);
+-WARN_ON_ONCE(1);
+
+@depends on patch@
+expression list ok2.es;
+@@
+
+if (...)
+- {
+ WARN_ONCE(1,es);
+- }
diff --git a/scripts/config b/scripts/config
index ee355394f4e..bb4d3deb6d1 100755
--- a/scripts/config
+++ b/scripts/config
@@ -101,7 +101,6 @@ while [ "$1" != "" ] ; do
case "$CMD" in
--keep-case|-k)
MUNGE_CASE=no
- shift
continue
;;
--refresh)
diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
index 6c353ae8a45..581ca99c96f 100644
--- a/scripts/headers_install.pl
+++ b/scripts/headers_install.pl
@@ -42,9 +42,9 @@ foreach my $filename (@files) {
$line =~ s/(^|\s)(inline)\b/$1__$2__/g;
$line =~ s/(^|\s)(asm)\b(\s|[(]|$)/$1__$2__$3/g;
$line =~ s/(^|\s|[(])(volatile)\b(\s|[(]|$)/$1__$2__$3/g;
- $line =~ s/#ifndef _UAPI/#ifndef /;
- $line =~ s/#define _UAPI/#define /;
- $line =~ s!#endif /[*] _UAPI!#endif /* !;
+ $line =~ s/#ifndef\s+_UAPI/#ifndef /;
+ $line =~ s/#define\s+_UAPI/#define /;
+ $line =~ s!#endif\s+/[*]\s*_UAPI!#endif /* !;
printf {$out} "%s", $line;
}
close $out;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 46e7aff80d1..28b76156781 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -137,6 +137,8 @@ use strict;
# should document the "Context:" of the function, e.g. whether the functions
# can be called form interrupts. Unlike other sections you can end it with an
# empty line.
+# A non-void function should have a "Return:" section describing the return
+# value(s).
# Example-sections should contain the string EXAMPLE so that they are marked
# appropriately in DocBook.
#
@@ -315,6 +317,7 @@ my $section_default = "Description"; # default section
my $section_intro = "Introduction";
my $section = $section_default;
my $section_context = "Context";
+my $section_return = "Return";
my $undescribed = "-- undescribed --";
@@ -2039,6 +2042,28 @@ sub check_sections($$$$$$) {
}
##
+# Checks the section describing the return value of a function.
+sub check_return_section {
+ my $file = shift;
+ my $declaration_name = shift;
+ my $return_type = shift;
+
+ # Ignore an empty return type (It's a macro)
+ # Ignore functions with a "void" return type. (But don't ignore "void *")
+ if (($return_type eq "") || ($return_type =~ /void\s*\w*\s*$/)) {
+ return;
+ }
+
+ if (!defined($sections{$section_return}) ||
+ $sections{$section_return} eq "") {
+ print STDERR "Warning(${file}:$.): " .
+ "No description found for return value of " .
+ "'$declaration_name'\n";
+ ++$warnings;
+ }
+}
+
+##
# takes a function prototype and the name of the current file being
# processed and spits out all the details stored in the global
# arrays/hashes.
@@ -2109,6 +2134,15 @@ sub dump_function($$) {
my $prms = join " ", @parameterlist;
check_sections($file, $declaration_name, "function", $sectcheck, $prms, "");
+ # This check emits a lot of warnings at the moment, because many
+ # functions don't have a 'Return' doc section. So until the number
+ # of warnings goes sufficiently down, the check is only performed in
+ # verbose mode.
+ # TODO: always perform the check.
+ if ($verbose) {
+ check_return_section($file, $declaration_name, $return_type);
+ }
+
output_declaration($declaration_name,
'function',
{'function' => $declaration_name,
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 5c113123ed9..68bb4efc5af 100644
--- a/scripts/pnmtologo.c
+++ b/scripts/pnmtologo.c
@@ -74,6 +74,7 @@ static unsigned int logo_height;
static struct color **logo_data;
static struct color logo_clut[MAX_LINUX_LOGO_COLORS];
static unsigned int logo_clutsize;
+static int is_plain_pbm = 0;
static void die(const char *fmt, ...)
__attribute__ ((noreturn)) __attribute ((format (printf, 1, 2)));
@@ -103,6 +104,11 @@ static unsigned int get_number(FILE *fp)
val = 0;
while (isdigit(c)) {
val = 10*val+c-'0';
+ /* some PBM are 'broken'; GiMP for example exports a PBM without space
+ * between the digits. This is Ok cause we know a PBM can only have a '1'
+ * or a '0' for the digit. */
+ if (is_plain_pbm)
+ break;
c = fgetc(fp);
if (c == EOF)
die("%s: end of file\n", filename);
@@ -167,6 +173,7 @@ static void read_image(void)
switch (magic) {
case '1':
/* Plain PBM */
+ is_plain_pbm = 1;
for (i = 0; i < logo_height; i++)
for (j = 0; j < logo_width; j++)
logo_data[i][j].red = logo_data[i][j].green =
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 79fdafb0d26..08f06c00745 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -48,13 +48,14 @@ find_arch_sources()
for i in $archincludedir; do
prune="$prune -wholename $i -prune -o"
done
- find ${tree}arch/$1 $ignore $prune -name "$2" -print;
+ find ${tree}arch/$1 $ignore $subarchprune $prune -name "$2" -print;
}
# find sources in arch/$1/include
find_arch_include_sources()
{
- include=$(find ${tree}arch/$1/ -name include -type d);
+ include=$(find ${tree}arch/$1/ $subarchprune \
+ -name include -type d -print);
if [ -n "$include" ]; then
archincludedir="$archincludedir $include"
find $include $ignore -name "$2" -print;
@@ -95,6 +96,32 @@ all_sources()
find_other_sources '*.[chS]'
}
+all_compiled_sources()
+{
+ for i in $(all_sources); do
+ case "$i" in
+ *.[cS])
+ j=${i/\.[cS]/\.o}
+ if [ -e $j ]; then
+ echo $i
+ fi
+ ;;
+ *)
+ echo $i
+ ;;
+ esac
+ done
+}
+
+all_target_sources()
+{
+ if [ -n "$COMPILED_SOURCE" ]; then
+ all_compiled_sources
+ else
+ all_sources
+ fi
+}
+
all_kconfigs()
{
for arch in $ALLSOURCE_ARCHS; do
@@ -110,18 +137,18 @@ all_defconfigs()
docscope()
{
- (echo \-k; echo \-q; all_sources) > cscope.files
+ (echo \-k; echo \-q; all_target_sources) > cscope.files
cscope -b -f cscope.out
}
dogtags()
{
- all_sources | gtags -i -f -
+ all_target_sources | gtags -i -f -
}
exuberant()
{
- all_sources | xargs $1 -a \
+ all_target_sources | xargs $1 -a \
-I __initdata,__exitdata,__acquires,__releases \
-I __read_mostly,____cacheline_aligned \
-I ____cacheline_aligned_in_smp \
@@ -173,7 +200,7 @@ exuberant()
emacs()
{
- all_sources | xargs $1 -a \
+ all_target_sources | xargs $1 -a \
--regex='/^(ENTRY|_GLOBAL)(\([^)]*\)).*/\2/' \
--regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/' \
--regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/' \
@@ -220,11 +247,10 @@ xtags()
elif $1 --version 2>&1 | grep -iq emacs; then
emacs $1
else
- all_sources | xargs $1 -a
+ all_target_sources | xargs $1 -a
fi
}
-
# Support um (which uses SUBARCH)
if [ "${ARCH}" = "um" ]; then
if [ "$SUBARCH" = "i386" ]; then
@@ -234,6 +260,21 @@ if [ "${ARCH}" = "um" ]; then
else
archinclude=${SUBARCH}
fi
+elif [ "${SRCARCH}" = "arm" -a "${SUBARCH}" != "" ]; then
+ subarchdir=$(find ${tree}arch/$SRCARCH/ -name "mach-*" -type d -o \
+ -name "plat-*" -type d);
+ for i in $subarchdir; do
+ case "$i" in
+ *"mach-"${SUBARCH})
+ ;;
+ *"plat-"${SUBARCH})
+ ;;
+ *)
+ subarchprune="$subarchprune \
+ -wholename $i -prune -o"
+ ;;
+ esac
+ done
fi
remove_structs=
diff --git a/security/capability.c b/security/capability.c
index b14a30c234b..0fe5a026aef 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -395,6 +395,11 @@ static int cap_kernel_module_request(char *kmod_name)
return 0;
}
+static int cap_kernel_module_from_file(struct file *file)
+{
+ return 0;
+}
+
static int cap_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -967,6 +972,7 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, kernel_act_as);
set_to_cap_if_null(ops, kernel_create_files_as);
set_to_cap_if_null(ops, kernel_module_request);
+ set_to_cap_if_null(ops, kernel_module_from_file);
set_to_cap_if_null(ops, task_fix_setuid);
set_to_cap_if_null(ops, task_setpgid);
set_to_cap_if_null(ops, task_getpgid);
diff --git a/security/commoncap.c b/security/commoncap.c
index 6dbae4650ab..7ee08c756d6 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -76,24 +76,33 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
int cap, int audit)
{
- for (;;) {
- /* The owner of the user namespace has all caps. */
- if (targ_ns != &init_user_ns && uid_eq(targ_ns->owner, cred->euid))
- return 0;
+ struct user_namespace *ns = targ_ns;
+ /* See if cred has the capability in the target user namespace
+ * by examining the target user namespace and all of the target
+ * user namespace's parents.
+ */
+ for (;;) {
/* Do we have the necessary capabilities? */
- if (targ_ns == cred->user_ns)
+ if (ns == cred->user_ns)
return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
/* Have we tried all of the parent namespaces? */
- if (targ_ns == &init_user_ns)
+ if (ns == &init_user_ns)
return -EPERM;
+ /*
+ * The owner of the user namespace in the parent of the
+ * user namespace has all caps.
+ */
+ if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
+ return 0;
+
/*
- *If you have a capability in a parent user ns, then you have
+ * If you have a capability in a parent user ns, then you have
* it over all children user namespaces as well.
*/
- targ_ns = targ_ns->parent;
+ ns = ns->parent;
}
/* We never get here */
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 6ee8826662c..3b2adb794f1 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -127,7 +127,7 @@ struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
/* IMA policy related functions */
-enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK, POST_SETATTR };
+enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK, MODULE_CHECK, POST_SETATTR };
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
int flags);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index b356884fb3e..0cea3db2165 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -100,12 +100,12 @@ err_out:
* ima_get_action - appraise & measure decision based on policy.
* @inode: pointer to inode to measure
* @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
- * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
+ * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP, MODULE_CHECK)
*
* The policy is defined in terms of keypairs:
* subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
- * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP
+ * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP | MODULE_CHECK
* mask: contains the permission mask
* fsmagic: hex value
*
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 73c9a268253..45de18e9a6f 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -280,6 +280,27 @@ int ima_file_check(struct file *file, int mask)
}
EXPORT_SYMBOL_GPL(ima_file_check);
+/**
+ * ima_module_check - based on policy, collect/store/appraise measurement.
+ * @file: pointer to the file to be measured/appraised
+ *
+ * Measure/appraise kernel modules based on policy.
+ *
+ * Always return 0 and audit dentry_open failures.
+ * Return code is based upon measurement appraisal.
+ */
+int ima_module_check(struct file *file)
+{
+ int rc;
+
+ if (!file)
+ rc = INTEGRITY_UNKNOWN;
+ else
+ rc = process_measurement(file, file->f_dentry->d_name.name,
+ MAY_EXEC, MODULE_CHECK);
+ return (ima_appraise & IMA_APPRAISE_ENFORCE) ? rc : 0;
+}
+
static int __init init_ima(void)
{
int error;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index c7dacd2eab7..af7d182d5a4 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -80,6 +80,7 @@ static struct ima_rule_entry default_rules[] = {
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID,
.flags = IMA_FUNC | IMA_MASK | IMA_UID},
+ {.action = MEASURE,.func = MODULE_CHECK, .flags = IMA_FUNC},
};
static struct ima_rule_entry default_appraise_rules[] = {
@@ -401,6 +402,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
/* PATH_CHECK is for backwards compat */
else if (strcmp(args[0].from, "PATH_CHECK") == 0)
entry->func = FILE_CHECK;
+ else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
+ entry->func = MODULE_CHECK;
else if (strcmp(args[0].from, "FILE_MMAP") == 0)
entry->func = FILE_MMAP;
else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
diff --git a/security/keys/key.c b/security/keys/key.c
index a15c9da8f97..8fb7c7bd465 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -854,13 +854,13 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
- perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
+ perm |= KEY_USR_VIEW;
if (ktype->read)
- perm |= KEY_POS_READ | KEY_USR_READ;
+ perm |= KEY_POS_READ;
if (ktype == &key_type_keyring || ktype->update)
- perm |= KEY_USR_WRITE;
+ perm |= KEY_POS_WRITE;
}
/* allocate a new key */
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 5d34b4e827d..4b5c948eb41 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1132,12 +1132,12 @@ long keyctl_instantiate_key_iov(key_serial_t id,
ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
ARRAY_SIZE(iovstack), iovstack, &iov);
if (ret < 0)
- return ret;
+ goto err;
if (ret == 0)
goto no_payload_free;
ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-
+err:
if (iov != iovstack)
kfree(iov);
return ret;
@@ -1495,7 +1495,8 @@ long keyctl_session_to_parent(void)
goto error_keyring;
newwork = &cred->rcu;
- cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
+ cred->session_keyring = key_ref_to_ptr(keyring_r);
+ keyring_r = NULL;
init_task_work(newwork, key_change_session_keyring);
me = current;
@@ -1519,7 +1520,7 @@ long keyctl_session_to_parent(void)
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
- mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) {
+ mycred->session_keyring == pcred->session_keyring) {
ret = 0;
goto unlock;
}
@@ -1535,9 +1536,9 @@ long keyctl_session_to_parent(void)
goto unlock;
/* the keyrings must have the same UID */
- if ((pcred->tgcred->session_keyring &&
- !uid_eq(pcred->tgcred->session_keyring->uid, mycred->euid)) ||
- !uid_eq(mycred->tgcred->session_keyring->uid, mycred->euid))
+ if ((pcred->session_keyring &&
+ !uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
+ !uid_eq(mycred->session_keyring->uid, mycred->euid))
goto unlock;
/* cancel an already pending keyring replacement */
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 6e42df15a24..6ece7f2e570 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -257,17 +257,14 @@ error:
* Allocate a keyring and link into the destination keyring.
*/
struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
- const struct cred *cred, unsigned long flags,
- struct key *dest)
+ const struct cred *cred, key_perm_t perm,
+ unsigned long flags, struct key *dest)
{
struct key *keyring;
int ret;
keyring = key_alloc(&key_type_keyring, description,
- uid, gid, cred,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
- flags);
-
+ uid, gid, cred, perm, flags);
if (!IS_ERR(keyring)) {
ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
if (ret < 0) {
@@ -278,6 +275,7 @@ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
return keyring;
}
+EXPORT_SYMBOL(keyring_alloc);
/**
* keyring_search_aux - Search a keyring tree for a key matching some criteria
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index a58f712605d..20e4bf57aec 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -45,10 +45,12 @@ int install_user_keyrings(void)
struct user_struct *user;
const struct cred *cred;
struct key *uid_keyring, *session_keyring;
+ key_perm_t user_keyring_perm;
char buf[20];
int ret;
uid_t uid;
+ user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
cred = current_cred();
user = cred->user;
uid = from_kuid(cred->user_ns, user->uid);
@@ -73,8 +75,8 @@ int install_user_keyrings(void)
uid_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(uid_keyring)) {
uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
- cred, KEY_ALLOC_IN_QUOTA,
- NULL);
+ cred, user_keyring_perm,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
@@ -89,7 +91,8 @@ int install_user_keyrings(void)
if (IS_ERR(session_keyring)) {
session_keyring =
keyring_alloc(buf, user->uid, INVALID_GID,
- cred, KEY_ALLOC_IN_QUOTA, NULL);
+ cred, user_keyring_perm,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
@@ -130,6 +133,7 @@ int install_thread_keyring_to_cred(struct cred *new)
struct key *keyring;
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
@@ -170,27 +174,18 @@ static int install_thread_keyring(void)
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
- int ret;
- if (new->tgcred->process_keyring)
+ if (new->process_keyring)
return -EEXIST;
- keyring = keyring_alloc("_pid", new->uid, new->gid,
- new, KEY_ALLOC_QUOTA_OVERRUN, NULL);
+ keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
- spin_lock_irq(&new->tgcred->lock);
- if (!new->tgcred->process_keyring) {
- new->tgcred->process_keyring = keyring;
- keyring = NULL;
- ret = 0;
- } else {
- ret = -EEXIST;
- }
- spin_unlock_irq(&new->tgcred->lock);
- key_put(keyring);
- return ret;
+ new->process_keyring = keyring;
+ return 0;
}
/*
@@ -231,11 +226,12 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
- if (cred->tgcred->session_keyring)
+ if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
- keyring = keyring_alloc("_ses", cred->uid, cred->gid,
- cred, flags, NULL);
+ keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+ flags, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
@@ -243,17 +239,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
}
/* install the keyring */
- spin_lock_irq(&cred->tgcred->lock);
- old = cred->tgcred->session_keyring;
- rcu_assign_pointer(cred->tgcred->session_keyring, keyring);
- spin_unlock_irq(&cred->tgcred->lock);
-
- /* we're using RCU on the pointer, but there's no point synchronising
- * on it if it didn't previously point to anything */
- if (old) {
- synchronize_rcu();
+ old = cred->session_keyring;
+ rcu_assign_pointer(cred->session_keyring, keyring);
+
+ if (old)
key_put(old);
- }
return 0;
}
@@ -358,8 +348,6 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
- if (ret)
- break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
@@ -370,17 +358,15 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the process keyring second */
- if (cred->tgcred->process_keyring) {
+ if (cred->process_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->tgcred->process_keyring, 1),
+ make_key_ref(cred->process_keyring, 1),
cred, type, description, match, no_state_check);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
- if (ret)
- break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
@@ -391,12 +377,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the session keyring */
- if (cred->tgcred->session_keyring) {
+ if (cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
- make_key_ref(rcu_dereference(
- cred->tgcred->session_keyring),
- 1),
+ make_key_ref(rcu_dereference(cred->session_keyring), 1),
cred, type, description, match, no_state_check);
rcu_read_unlock();
@@ -566,7 +550,7 @@ try_again:
break;
case KEY_SPEC_PROCESS_KEYRING:
- if (!cred->tgcred->process_keyring) {
+ if (!cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
@@ -578,13 +562,13 @@ try_again:
goto reget_creds;
}
- key = cred->tgcred->process_keyring;
+ key = cred->process_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
- if (!cred->tgcred->session_keyring) {
+ if (!cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
@@ -599,7 +583,7 @@ try_again:
if (ret < 0)
goto error;
goto reget_creds;
- } else if (cred->tgcred->session_keyring ==
+ } else if (cred->session_keyring ==
cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
@@ -609,7 +593,7 @@ try_again:
}
rcu_read_lock();
- key = rcu_dereference(cred->tgcred->session_keyring);
+ key = rcu_dereference(cred->session_keyring);
atomic_inc(&key->usage);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
@@ -769,12 +753,6 @@ long join_session_keyring(const char *name)
struct key *keyring;
long ret, serial;
- /* only permit this if there's a single thread in the thread group -
- * this avoids us having to adjust the creds on all threads and risking
- * ENOMEM */
- if (!current_is_single_threaded())
- return -EMLINK;
-
new = prepare_creds();
if (!new)
return -ENOMEM;
@@ -786,7 +764,7 @@ long join_session_keyring(const char *name)
if (ret < 0)
goto error;
- serial = new->tgcred->session_keyring->serial;
+ serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
@@ -800,8 +778,10 @@ long join_session_keyring(const char *name)
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
- keyring = keyring_alloc(name, old->uid, old->gid, old,
- KEY_ALLOC_IN_QUOTA, NULL);
+ keyring = keyring_alloc(
+ name, old->uid, old->gid, old,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
@@ -809,6 +789,9 @@ long join_session_keyring(const char *name)
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
+ } else if (keyring == new->session_keyring) {
+ ret = 0;
+ goto error2;
}
/* we've got a keyring - now to install it */
@@ -865,8 +848,7 @@ void key_change_session_keyring(struct callback_head *twork)
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
- new->tgcred->tgid = old->tgcred->tgid;
- new->tgcred->process_keyring = key_get(old->tgcred->process_keyring);
+ new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 66e21184b55..4bd6bdb7419 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -126,6 +126,7 @@ static int call_sbin_request_key(struct key_construction *cons,
cred = get_current_cred();
keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
put_cred(cred);
if (IS_ERR(keyring)) {
@@ -150,12 +151,12 @@ static int call_sbin_request_key(struct key_construction *cons,
cred->thread_keyring ? cred->thread_keyring->serial : 0);
prkey = 0;
- if (cred->tgcred->process_keyring)
- prkey = cred->tgcred->process_keyring->serial;
+ if (cred->process_keyring)
+ prkey = cred->process_keyring->serial;
sprintf(keyring_str[1], "%d", prkey);
rcu_read_lock();
- session = rcu_dereference(cred->tgcred->session_keyring);
+ session = rcu_dereference(cred->session_keyring);
if (!session)
session = cred->user->session_keyring;
sskey = session->serial;
@@ -297,14 +298,14 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
- dest_keyring = key_get(cred->tgcred->process_keyring);
+ dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
- rcu_dereference(cred->tgcred->session_keyring));
+ rcu_dereference(cred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
@@ -347,6 +348,7 @@ static int construct_alloc_key(struct key_type *type,
const struct cred *cred = current_cred();
unsigned long prealloc;
struct key *key;
+ key_perm_t perm;
key_ref_t key_ref;
int ret;
@@ -355,8 +357,15 @@ static int construct_alloc_key(struct key_type *type,
*_key = NULL;
mutex_lock(&user->cons_lock);
+ perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+ perm |= KEY_USR_VIEW;
+ if (type->read)
+ perm |= KEY_POS_READ;
+ if (type == &key_type_keyring || type->update)
+ perm |= KEY_POS_WRITE;
+
key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred,
- KEY_POS_ALL, flags);
+ perm, flags);
if (IS_ERR(key))
goto alloc_failed;
diff --git a/security/security.c b/security/security.c
index 8dcd4ae10a5..daa97f4ac9d 100644
--- a/security/security.c
+++ b/security/security.c
@@ -820,6 +820,16 @@ int security_kernel_module_request(char *kmod_name)
return security_ops->kernel_module_request(kmod_name);
}
+int security_kernel_module_from_file(struct file *file)
+{
+ int ret;
+
+ ret = security_ops->kernel_module_from_file(file);
+ if (ret)
+ return ret;
+ return ima_module_check(file);
+}
+
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 370a6468b3b..855e464e92e 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -69,6 +69,8 @@ static struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
index 603b0878434..e69de9c642b 100644
--- a/security/smack/Kconfig
+++ b/security/smack/Kconfig
@@ -1,6 +1,10 @@
config SECURITY_SMACK
bool "Simplified Mandatory Access Control Kernel Support"
- depends on NETLABEL && SECURITY_NETWORK
+ depends on NET
+ depends on INET
+ depends on SECURITY
+ select NETLABEL
+ select SECURITY_NETWORK
default n
help
This selects the Simplified Mandatory Access Control Kernel.
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 99929a50093..76a5dca4640 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2063,6 +2063,19 @@ static const struct file_operations smk_revoke_subj_ops = {
.llseek = generic_file_llseek,
};
+static struct kset *smackfs_kset;
+/**
+ * smk_init_sysfs - initialize /sys/fs/smackfs
+ *
+ */
+static int smk_init_sysfs(void)
+{
+ smackfs_kset = kset_create_and_add("smackfs", NULL, fs_kobj);
+ if (!smackfs_kset)
+ return -ENOMEM;
+ return 0;
+}
+
/**
* smk_fill_super - fill the /smackfs superblock
* @sb: the empty superblock
@@ -2183,6 +2196,10 @@ static int __init init_smk_fs(void)
if (!security_module_enable(&smack_ops))
return 0;
+ err = smk_init_sysfs();
+ if (err)
+ printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n");
+
err = register_filesystem(&smk_fs_type);
if (!err) {
smackfs_mount = kern_mount(&smk_fs_type);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index b4c29848b49..23414b93771 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -17,6 +17,7 @@
#include <linux/ptrace.h>
#include <linux/prctl.h>
#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
#define YAMA_SCOPE_DISABLED 0
#define YAMA_SCOPE_RELATIONAL 1
@@ -29,12 +30,37 @@ static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
struct ptrace_relation {
struct task_struct *tracer;
struct task_struct *tracee;
+ bool invalid;
struct list_head node;
+ struct rcu_head rcu;
};
static LIST_HEAD(ptracer_relations);
static DEFINE_SPINLOCK(ptracer_relations_lock);
+static void yama_relation_cleanup(struct work_struct *work);
+static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
+
+/**
+ * yama_relation_cleanup - remove invalid entries from the relation list
+ *
+ */
+static void yama_relation_cleanup(struct work_struct *work)
+{
+ struct ptrace_relation *relation;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid) {
+ list_del_rcu(&relation->node);
+ kfree_rcu(relation, rcu);
+ }
+ }
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+}
+
/**
* yama_ptracer_add - add/replace an exception for this tracer/tracee pair
* @tracer: the task_struct of the process doing the ptrace
@@ -48,32 +74,34 @@ static DEFINE_SPINLOCK(ptracer_relations_lock);
static int yama_ptracer_add(struct task_struct *tracer,
struct task_struct *tracee)
{
- int rc = 0;
- struct ptrace_relation *added;
- struct ptrace_relation *entry, *relation = NULL;
+ struct ptrace_relation *relation, *added;
added = kmalloc(sizeof(*added), GFP_KERNEL);
if (!added)
return -ENOMEM;
- spin_lock_bh(&ptracer_relations_lock);
- list_for_each_entry(entry, &ptracer_relations, node)
- if (entry->tracee == tracee) {
- relation = entry;
- break;
+ added->tracee = tracee;
+ added->tracer = tracer;
+ added->invalid = false;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee) {
+ list_replace_rcu(&relation->node, &added->node);
+ kfree_rcu(relation, rcu);
+ goto out;
}
- if (!relation) {
- relation = added;
- relation->tracee = tracee;
- list_add(&relation->node, &ptracer_relations);
}
- relation->tracer = tracer;
- spin_unlock_bh(&ptracer_relations_lock);
- if (added != relation)
- kfree(added);
+ list_add_rcu(&added->node, &ptracer_relations);
- return rc;
+out:
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+ return 0;
}
/**
@@ -84,16 +112,23 @@ static int yama_ptracer_add(struct task_struct *tracer,
static void yama_ptracer_del(struct task_struct *tracer,
struct task_struct *tracee)
{
- struct ptrace_relation *relation, *safe;
+ struct ptrace_relation *relation;
+ bool marked = false;
- spin_lock_bh(&ptracer_relations_lock);
- list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
if (relation->tracee == tracee ||
(tracer && relation->tracer == tracer)) {
- list_del(&relation->node);
- kfree(relation);
+ relation->invalid = true;
+ marked = true;
}
- spin_unlock_bh(&ptracer_relations_lock);
+ }
+ rcu_read_unlock();
+
+ if (marked)
+ schedule_work(&yama_relation_work);
}
/**
@@ -217,21 +252,22 @@ static int ptracer_exception_found(struct task_struct *tracer,
struct task_struct *parent = NULL;
bool found = false;
- spin_lock_bh(&ptracer_relations_lock);
rcu_read_lock();
if (!thread_group_leader(tracee))
tracee = rcu_dereference(tracee->group_leader);
- list_for_each_entry(relation, &ptracer_relations, node)
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
if (relation->tracee == tracee) {
parent = relation->tracer;
found = true;
break;
}
+ }
if (found && (parent == NULL || task_is_descendant(parent, tracer)))
rc = 1;
rcu_read_unlock();
- spin_unlock_bh(&ptracer_relations_lock);
return rc;
}
@@ -262,14 +298,18 @@ int yama_ptrace_access_check(struct task_struct *child,
/* No additional restrictions. */
break;
case YAMA_SCOPE_RELATIONAL:
+ rcu_read_lock();
if (!task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
- !ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
+ !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
+ rcu_read_unlock();
break;
case YAMA_SCOPE_CAPABILITY:
- if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
+ rcu_read_unlock();
break;
case YAMA_SCOPE_NO_ATTACH:
default:
@@ -307,8 +347,10 @@ int yama_ptrace_traceme(struct task_struct *parent)
/* Only disallow PTRACE_TRACEME on more aggressive settings. */
switch (ptrace_scope) {
case YAMA_SCOPE_CAPABILITY:
- if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
+ rcu_read_unlock();
break;
case YAMA_SCOPE_NO_ATTACH:
rc = -EPERM;
diff --git a/sound/Kconfig b/sound/Kconfig
index 261a03c8a20..c710ce2c5c3 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -52,9 +52,6 @@ config SOUND_OSS_CORE_PRECLAIM
Disabling this allows alternative OSS implementations.
- Please read Documentation/feature-removal-schedule.txt for
- details.
-
If unsure, say Y.
source "sound/oss/dmasound/Kconfig"
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 5119fdabcb9..aa5d8034890 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -786,7 +786,7 @@ static int aaci_resume(struct amba_device *dev)
#endif
-static struct ac97_pcm ac97_defs[] __devinitdata = {
+static struct ac97_pcm ac97_defs[] = {
[0] = { /* Front PCM */
.exclusive = 1,
.r = {
@@ -832,7 +832,7 @@ static struct snd_ac97_bus_ops aaci_bus_ops = {
.read = aaci_ac97_read,
};
-static int __devinit aaci_probe_ac97(struct aaci *aaci)
+static int aaci_probe_ac97(struct aaci *aaci)
{
struct snd_ac97_template ac97_template;
struct snd_ac97_bus *ac97_bus;
@@ -893,7 +893,7 @@ static void aaci_free_card(struct snd_card *card)
iounmap(aaci->base);
}
-static struct aaci * __devinit aaci_init_card(struct amba_device *dev)
+static struct aaci *aaci_init_card(struct amba_device *dev)
{
struct aaci *aaci;
struct snd_card *card;
@@ -926,7 +926,7 @@ static struct aaci * __devinit aaci_init_card(struct amba_device *dev)
return aaci;
}
-static int __devinit aaci_init_pcm(struct aaci *aaci)
+static int aaci_init_pcm(struct aaci *aaci)
{
struct snd_pcm *pcm;
int ret;
@@ -948,7 +948,7 @@ static int __devinit aaci_init_pcm(struct aaci *aaci)
return ret;
}
-static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
+static unsigned int aaci_size_fifo(struct aaci *aaci)
{
struct aaci_runtime *aacirun = &aaci->playback;
int i;
@@ -984,8 +984,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
return i;
}
-static int __devinit aaci_probe(struct amba_device *dev,
- const struct amba_id *id)
+static int aaci_probe(struct amba_device *dev,
+ const struct amba_id *id)
{
struct aaci *aaci;
int ret, i;
@@ -1072,7 +1072,7 @@ static int __devinit aaci_probe(struct amba_device *dev,
return ret;
}
-static int __devexit aaci_remove(struct amba_device *dev)
+static int aaci_remove(struct amba_device *dev)
{
struct snd_card *card = amba_get_drvdata(dev);
@@ -1104,7 +1104,7 @@ static struct amba_driver aaci_driver = {
.name = DRIVER_NAME,
},
.probe = aaci_probe,
- .remove = __devexit_p(aaci_remove),
+ .remove = aaci_remove,
.suspend = aaci_suspend,
.resume = aaci_resume,
.id_table = aaci_ids,
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index 48d7c0aa507..6fc0ae90e5b 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -314,7 +314,7 @@ int pxa2xx_ac97_hw_resume(void)
EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_resume);
#endif
-int __devinit pxa2xx_ac97_hw_probe(struct platform_device *dev)
+int pxa2xx_ac97_hw_probe(struct platform_device *dev)
{
int ret;
pxa2xx_audio_ops_t *pdata = dev->dev.platform_data;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 4e1fda75c1c..ec54be4efff 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -163,7 +163,7 @@ static int pxa2xx_ac97_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pxa2xx_ac97_pm_ops, pxa2xx_ac97_suspend, pxa2xx_ac97_resume);
#endif
-static int __devinit pxa2xx_ac97_probe(struct platform_device *dev)
+static int pxa2xx_ac97_probe(struct platform_device *dev)
{
struct snd_card *card;
struct snd_ac97_bus *ac97_bus;
@@ -224,7 +224,7 @@ err_dev:
return ret;
}
-static int __devexit pxa2xx_ac97_remove(struct platform_device *dev)
+static int pxa2xx_ac97_remove(struct platform_device *dev)
{
struct snd_card *card = platform_get_drvdata(dev);
@@ -239,7 +239,7 @@ static int __devexit pxa2xx_ac97_remove(struct platform_device *dev)
static struct platform_driver pxa2xx_ac97_driver = {
.probe = pxa2xx_ac97_probe,
- .remove = __devexit_p(pxa2xx_ac97_remove),
+ .remove = pxa2xx_ac97_remove,
.driver = {
.name = "pxa2xx-ac97",
.owner = THIS_MODULE,
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 277ebce23a4..071ce1b5f2b 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -309,7 +309,7 @@ static struct snd_pcm_ops atmel_abdac_ops = {
.pointer = atmel_abdac_pointer,
};
-static int __devinit atmel_abdac_pcm_new(struct atmel_abdac *dac)
+static int atmel_abdac_pcm_new(struct atmel_abdac *dac)
{
struct snd_pcm_hardware hw = atmel_abdac_hw;
struct snd_pcm *pcm;
@@ -386,7 +386,7 @@ static int set_sample_rates(struct atmel_abdac *dac)
return retval;
}
-static int __devinit atmel_abdac_probe(struct platform_device *pdev)
+static int atmel_abdac_probe(struct platform_device *pdev)
{
struct snd_card *card;
struct atmel_abdac *dac;
@@ -567,7 +567,7 @@ static SIMPLE_DEV_PM_OPS(atmel_abdac_pm, atmel_abdac_suspend, atmel_abdac_resume
#define ATMEL_ABDAC_PM_OPS NULL
#endif
-static int __devexit atmel_abdac_remove(struct platform_device *pdev)
+static int atmel_abdac_remove(struct platform_device *pdev)
{
struct snd_card *card = platform_get_drvdata(pdev);
struct atmel_abdac *dac = get_dac(card);
@@ -589,7 +589,7 @@ static int __devexit atmel_abdac_remove(struct platform_device *pdev)
}
static struct platform_driver atmel_abdac_driver = {
- .remove = __devexit_p(atmel_abdac_remove),
+ .remove = atmel_abdac_remove,
.driver = {
.name = "atmel_abdac",
.owner = THIS_MODULE,
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 9052aff37f6..79d6bda5875 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -728,7 +728,7 @@ static irqreturn_t atmel_ac97c_interrupt(int irq, void *dev)
return retval;
}
-static struct ac97_pcm at91_ac97_pcm_defs[] __devinitdata = {
+static struct ac97_pcm at91_ac97_pcm_defs[] = {
/* Playback */
{
.exclusive = 1,
@@ -756,7 +756,7 @@ static struct ac97_pcm at91_ac97_pcm_defs[] __devinitdata = {
},
};
-static int __devinit atmel_ac97c_pcm_new(struct atmel_ac97c *chip)
+static int atmel_ac97c_pcm_new(struct atmel_ac97c *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_hardware hw = atmel_ac97c_hw;
@@ -902,7 +902,7 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
}
}
-static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
+static int atmel_ac97c_probe(struct platform_device *pdev)
{
struct snd_card *card;
struct atmel_ac97c *chip;
@@ -1168,7 +1168,7 @@ static SIMPLE_DEV_PM_OPS(atmel_ac97c_pm, atmel_ac97c_suspend, atmel_ac97c_resume
#define ATMEL_AC97C_PM_OPS NULL
#endif
-static int __devexit atmel_ac97c_remove(struct platform_device *pdev)
+static int atmel_ac97c_remove(struct platform_device *pdev)
{
struct snd_card *card = platform_get_drvdata(pdev);
struct atmel_ac97c *chip = get_chip(card);
@@ -1205,7 +1205,7 @@ static int __devexit atmel_ac97c_remove(struct platform_device *pdev)
}
static struct platform_driver atmel_ac97c_driver = {
- .remove = __devexit_p(atmel_ac97c_remove),
+ .remove = atmel_ac97c_remove,
.driver = {
.name = "atmel_ac97c",
.owner = THIS_MODULE,
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 71cc3ddf5c1..727ac44d39f 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -199,12 +199,13 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames)
{
struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
- int stream = snd_pcm_plug_stream(plug);
+ int stream;
if (snd_BUG_ON(!plug))
return -ENXIO;
if (drv_frames == 0)
return 0;
+ stream = snd_pcm_plug_stream(plug);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
plugin = snd_pcm_plug_last(plug);
while (plugin && drv_frames > 0) {
@@ -230,13 +231,14 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
{
struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
snd_pcm_sframes_t frames;
- int stream = snd_pcm_plug_stream(plug);
+ int stream;
if (snd_BUG_ON(!plug))
return -ENXIO;
if (clt_frames == 0)
return 0;
frames = clt_frames;
+ stream = snd_pcm_plug_stream(plug);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
plugin = snd_pcm_plug_first(plug);
while (plugin && frames > 0) {
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 030102caeee..61798f85d03 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -981,8 +981,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
kfree(runtime->hw_constraints.rules);
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
- if (runtime->hwptr_log)
- kfree(runtime->hwptr_log);
+ kfree(runtime->hwptr_log);
#endif
kfree(runtime);
substream->runtime = NULL;
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 91cdf9435fe..af49721ba0e 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -190,7 +190,9 @@ struct snd_pcm_status32 {
u32 avail_max;
u32 overrange;
s32 suspended_state;
- unsigned char reserved[60];
+ u32 reserved_alignment;
+ struct compat_timespec audio_tstamp;
+ unsigned char reserved[56-sizeof(struct compat_timespec)];
} __attribute__((packed));
@@ -205,17 +207,16 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
return err;
if (put_user(status.state, &src->state) ||
- put_user(status.trigger_tstamp.tv_sec, &src->trigger_tstamp.tv_sec) ||
- put_user(status.trigger_tstamp.tv_nsec, &src->trigger_tstamp.tv_nsec) ||
- put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) ||
- put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) ||
+ compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+ compat_put_timespec(&status.tstamp, &src->tstamp) ||
put_user(status.appl_ptr, &src->appl_ptr) ||
put_user(status.hw_ptr, &src->hw_ptr) ||
put_user(status.delay, &src->delay) ||
put_user(status.avail, &src->avail) ||
put_user(status.avail_max, &src->avail_max) ||
put_user(status.overrange, &src->overrange) ||
- put_user(status.suspended_state, &src->suspended_state))
+ put_user(status.suspended_state, &src->suspended_state) ||
+ compat_put_timespec(&status.audio_tstamp, &src->audio_tstamp))
return -EFAULT;
return err;
@@ -364,6 +365,7 @@ struct snd_pcm_mmap_status32 {
u32 hw_ptr;
struct compat_timespec tstamp;
s32 suspended_state;
+ struct compat_timespec audio_tstamp;
} __attribute__((packed));
struct snd_pcm_mmap_control32 {
@@ -426,12 +428,14 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
sstatus.hw_ptr = status->hw_ptr % boundary;
sstatus.tstamp = status->tstamp;
sstatus.suspended_state = status->suspended_state;
+ sstatus.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
if (put_user(sstatus.state, &src->s.status.state) ||
put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
- put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp.tv_sec) ||
- put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp.tv_nsec) ||
+ compat_put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
+ compat_put_timespec(&sstatus.audio_tstamp,
+ &src->s.status.audio_tstamp) ||
put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
put_user(scontrol.avail_min, &src->c.control.avail_min))
return -EFAULT;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index f42c10a4331..c4840ff75d0 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -316,6 +316,8 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
unsigned long jdelta;
unsigned long curr_jiffies;
struct timespec curr_tstamp;
+ struct timespec audio_tstamp;
+ int crossed_boundary = 0;
old_hw_ptr = runtime->status->hw_ptr;
@@ -327,9 +329,14 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
*/
pos = substream->ops->pointer(substream);
curr_jiffies = jiffies;
- if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+ if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
+ if ((runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK) &&
+ (substream->ops->wall_clock))
+ substream->ops->wall_clock(substream, &audio_tstamp);
+ }
+
if (pos == SNDRV_PCM_POS_XRUN) {
xrun(substream);
return -EPIPE;
@@ -360,8 +367,10 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
hw_base += runtime->buffer_size;
- if (hw_base >= runtime->boundary)
+ if (hw_base >= runtime->boundary) {
hw_base = 0;
+ crossed_boundary++;
+ }
new_hw_ptr = hw_base + pos;
goto __delta;
}
@@ -371,8 +380,10 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
/* pointer crosses the end of the ring buffer */
if (new_hw_ptr < old_hw_ptr) {
hw_base += runtime->buffer_size;
- if (hw_base >= runtime->boundary)
+ if (hw_base >= runtime->boundary) {
hw_base = 0;
+ crossed_boundary++;
+ }
new_hw_ptr = hw_base + pos;
}
__delta:
@@ -410,8 +421,10 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
while (hdelta > xrun_threshold) {
delta += runtime->buffer_size;
hw_base += runtime->buffer_size;
- if (hw_base >= runtime->boundary)
+ if (hw_base >= runtime->boundary) {
hw_base = 0;
+ crossed_boundary++;
+ }
new_hw_ptr = hw_base + pos;
hdelta -= runtime->hw_ptr_buffer_jiffies;
}
@@ -456,8 +469,10 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
/* the delta value is small or zero in most cases */
while (delta > 0) {
new_hw_ptr += runtime->period_size;
- if (new_hw_ptr >= runtime->boundary)
+ if (new_hw_ptr >= runtime->boundary) {
new_hw_ptr -= runtime->boundary;
+ crossed_boundary--;
+ }
delta--;
}
/* align hw_base to buffer_size */
@@ -507,9 +522,35 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
runtime->hw_ptr_base = hw_base;
runtime->status->hw_ptr = new_hw_ptr;
runtime->hw_ptr_jiffies = curr_jiffies;
- if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+ if (crossed_boundary) {
+ snd_BUG_ON(crossed_boundary != 1);
+ runtime->hw_ptr_wrap += runtime->boundary;
+ }
+ if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
runtime->status->tstamp = curr_tstamp;
+ if (!(runtime->hw.info & SNDRV_PCM_INFO_HAS_WALL_CLOCK)) {
+ /*
+ * no wall clock available, provide audio timestamp
+ * derived from pointer position+delay
+ */
+ u64 audio_frames, audio_nsecs;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ audio_frames = runtime->hw_ptr_wrap
+ + runtime->status->hw_ptr
+ - runtime->delay;
+ else
+ audio_frames = runtime->hw_ptr_wrap
+ + runtime->status->hw_ptr
+ + runtime->delay;
+ audio_nsecs = div_u64(audio_frames * 1000000000LL,
+ runtime->rate);
+ audio_tstamp = ns_to_timespec(audio_nsecs);
+ }
+ runtime->status->audio_tstamp = audio_tstamp;
+ }
+
return snd_pcm_update_state(substream, runtime);
}
@@ -1661,8 +1702,10 @@ static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
if (snd_pcm_running(substream) &&
snd_pcm_update_hw_ptr(substream) >= 0)
runtime->status->hw_ptr %= runtime->buffer_size;
- else
+ else {
runtime->status->hw_ptr = 0;
+ runtime->hw_ptr_wrap = 0;
+ }
snd_pcm_stream_unlock_irqrestore(substream, flags);
return 0;
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f9ddecf2f4c..09b4286c65f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -602,6 +602,8 @@ int snd_pcm_status(struct snd_pcm_substream *substream,
snd_pcm_update_hw_ptr(substream);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
status->tstamp = runtime->status->tstamp;
+ status->audio_tstamp =
+ runtime->status->audio_tstamp;
goto _tstamp_end;
}
}
@@ -1998,7 +2000,7 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
if (runtime->dma_bytes) {
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
if (err < 0)
- return -EINVAL;
+ return err;
}
if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
index 60e8fc1b344..040c60e1da2 100644
--- a/sound/core/seq/seq_device.c
+++ b/sound/core/seq/seq_device.c
@@ -66,7 +66,7 @@ struct ops_list {
/* operators */
struct snd_seq_dev_ops ops;
- /* registred devices */
+ /* registered devices */
struct list_head dev_list; /* list of devices */
int num_devices; /* number of associated devices */
int num_init_devices; /* number of initialized devices */
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index fe5ae09ffcc..7d02c322ed9 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -14,6 +14,7 @@ config SND_OPL4_LIB
config SND_VX_LIB
tristate
+ select FW_LOADER
select SND_HWDEP
select SND_PCM
@@ -35,7 +36,6 @@ config SND_PCSP
tristate "PC-Speaker support (READ HELP!)"
depends on PCSPKR_PLATFORM && X86 && HIGH_RES_TIMERS
depends on INPUT
- depends on EXPERIMENTAL
select SND_PCM
help
If you don't have a sound card in your computer, you can include a
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 0fe6d64ff84..3d822328d38 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -120,7 +120,6 @@ struct loopback_pcm {
unsigned int last_drift;
unsigned long last_jiffies;
struct timer_list timer;
- spinlock_t timer_lock;
};
static struct platform_device *devices[SNDRV_CARDS];
@@ -166,12 +165,12 @@ static inline unsigned int get_rate_shift(struct loopback_pcm *dpcm)
return get_setup(dpcm)->rate_shift;
}
+/* call in cable->lock */
static void loopback_timer_start(struct loopback_pcm *dpcm)
{
unsigned long tick;
unsigned int rate_shift = get_rate_shift(dpcm);
- spin_lock(&dpcm->timer_lock);
if (rate_shift != dpcm->pcm_rate_shift) {
dpcm->pcm_rate_shift = rate_shift;
dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
@@ -184,15 +183,13 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
dpcm->timer.expires = jiffies + tick;
add_timer(&dpcm->timer);
- spin_unlock(&dpcm->timer_lock);
}
+/* call in cable->lock */
static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
{
- spin_lock(&dpcm->timer_lock);
del_timer(&dpcm->timer);
dpcm->timer.expires = 0;
- spin_unlock(&dpcm->timer_lock);
}
#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
@@ -274,8 +271,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
cable->running |= stream;
cable->pause &= ~stream;
- spin_unlock(&cable->lock);
loopback_timer_start(dpcm);
+ spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
break;
@@ -283,23 +280,23 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
cable->running &= ~stream;
cable->pause &= ~stream;
- spin_unlock(&cable->lock);
loopback_timer_stop(dpcm);
+ spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
spin_lock(&cable->lock);
cable->pause |= stream;
- spin_unlock(&cable->lock);
loopback_timer_stop(dpcm);
+ spin_unlock(&cable->lock);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
spin_lock(&cable->lock);
dpcm->last_jiffies = jiffies;
cable->pause &= ~stream;
- spin_unlock(&cable->lock);
loopback_timer_start(dpcm);
+ spin_unlock(&cable->lock);
break;
default:
return -EINVAL;
@@ -477,6 +474,7 @@ static inline void bytepos_finish(struct loopback_pcm *dpcm,
dpcm->buf_pos %= dpcm->pcm_buffer_size;
}
+/* call in cable->lock */
static unsigned int loopback_pos_update(struct loopback_cable *cable)
{
struct loopback_pcm *dpcm_play =
@@ -485,9 +483,7 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable)
cable->streams[SNDRV_PCM_STREAM_CAPTURE];
unsigned long delta_play = 0, delta_capt = 0;
unsigned int running, count1, count2;
- unsigned long flags;
- spin_lock_irqsave(&cable->lock, flags);
running = cable->running ^ cable->pause;
if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
delta_play = jiffies - dpcm_play->last_jiffies;
@@ -529,32 +525,39 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable)
bytepos_finish(dpcm_play, count1);
bytepos_finish(dpcm_capt, count1);
unlock:
- spin_unlock_irqrestore(&cable->lock, flags);
return running;
}
static void loopback_timer_function(unsigned long data)
{
struct loopback_pcm *dpcm = (struct loopback_pcm *)data;
- unsigned int running;
+ unsigned long flags;
- running = loopback_pos_update(dpcm->cable);
- if (running & (1 << dpcm->substream->stream)) {
+ spin_lock_irqsave(&dpcm->cable->lock, flags);
+ if (loopback_pos_update(dpcm->cable) & (1 << dpcm->substream->stream)) {
loopback_timer_start(dpcm);
if (dpcm->period_update_pending) {
dpcm->period_update_pending = 0;
+ spin_unlock_irqrestore(&dpcm->cable->lock, flags);
+ /* need to unlock before calling below */
snd_pcm_period_elapsed(dpcm->substream);
+ return;
}
}
+ spin_unlock_irqrestore(&dpcm->cable->lock, flags);
}
static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct loopback_pcm *dpcm = runtime->private_data;
+ snd_pcm_uframes_t pos;
+ spin_lock(&dpcm->cable->lock);
loopback_pos_update(dpcm->cable);
- return bytes_to_frames(runtime, dpcm->buf_pos);
+ pos = dpcm->buf_pos;
+ spin_unlock(&dpcm->cable->lock);
+ return bytes_to_frames(runtime, pos);
}
static struct snd_pcm_hardware loopback_pcm_hardware =
@@ -672,7 +675,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
dpcm->substream = substream;
setup_timer(&dpcm->timer, loopback_timer_function,
(unsigned long)dpcm);
- spin_lock_init(&dpcm->timer_lock);
cable = loopback->cables[substream->number][dev];
if (!cable) {
@@ -772,8 +774,8 @@ static struct snd_pcm_ops loopback_capture_ops = {
.mmap = snd_pcm_lib_mmap_vmalloc,
};
-static int __devinit loopback_pcm_new(struct loopback *loopback,
- int device, int substreams)
+static int loopback_pcm_new(struct loopback *loopback,
+ int device, int substreams)
{
struct snd_pcm *pcm;
int err;
@@ -947,7 +949,7 @@ static int loopback_channels_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new loopback_controls[] __devinitdata = {
+static struct snd_kcontrol_new loopback_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "PCM Rate Shift 100000",
@@ -996,7 +998,7 @@ static struct snd_kcontrol_new loopback_controls[] __devinitdata = {
}
};
-static int __devinit loopback_mixer_new(struct loopback *loopback, int notify)
+static int loopback_mixer_new(struct loopback *loopback, int notify)
{
struct snd_card *card = loopback->card;
struct snd_pcm *pcm;
@@ -1109,7 +1111,7 @@ static void print_cable_info(struct snd_info_entry *entry,
mutex_unlock(&loopback->cable_lock);
}
-static int __devinit loopback_proc_new(struct loopback *loopback, int cidx)
+static int loopback_proc_new(struct loopback *loopback, int cidx)
{
char name[32];
struct snd_info_entry *entry;
@@ -1130,7 +1132,7 @@ static int __devinit loopback_proc_new(struct loopback *loopback, int cidx)
#endif
-static int __devinit loopback_probe(struct platform_device *devptr)
+static int loopback_probe(struct platform_device *devptr)
{
struct snd_card *card;
struct loopback *loopback;
@@ -1175,7 +1177,7 @@ static int __devinit loopback_probe(struct platform_device *devptr)
return err;
}
-static int __devexit loopback_remove(struct platform_device *devptr)
+static int loopback_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
@@ -1213,7 +1215,7 @@ static SIMPLE_DEV_PM_OPS(loopback_pm, loopback_suspend, loopback_resume);
static struct platform_driver loopback_driver = {
.probe = loopback_probe,
- .remove = __devexit_p(loopback_remove),
+ .remove = loopback_remove,
.driver = {
.name = SND_LOOPBACK_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 54bb6644a59..fd798f75360 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -134,6 +134,9 @@ struct snd_dummy {
spinlock_t mixer_lock;
int mixer_volume[MIXER_ADDR_LAST+1][2];
int capture_source[MIXER_ADDR_LAST+1][2];
+ int iobox;
+ struct snd_kcontrol *cd_volume_ctl;
+ struct snd_kcontrol *cd_switch_ctl;
const struct dummy_timer_ops *timer_ops;
};
@@ -685,8 +688,8 @@ static struct snd_pcm_ops dummy_pcm_ops_no_buf = {
.page = dummy_pcm_page,
};
-static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
- int substreams)
+static int snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
+ int substreams)
{
struct snd_pcm *pcm;
struct snd_pcm_ops *ops;
@@ -817,6 +820,57 @@ static int snd_dummy_capsrc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_el
return change;
}
+static int snd_dummy_iobox_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ const char *const names[] = { "None", "CD Player" };
+
+ return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int snd_dummy_iobox_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol);
+
+ value->value.enumerated.item[0] = dummy->iobox;
+ return 0;
+}
+
+static int snd_dummy_iobox_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol);
+ int changed;
+
+ if (value->value.enumerated.item[0] > 1)
+ return -EINVAL;
+
+ changed = value->value.enumerated.item[0] != dummy->iobox;
+ if (changed) {
+ dummy->iobox = value->value.enumerated.item[0];
+
+ if (dummy->iobox) {
+ dummy->cd_volume_ctl->vd[0].access &=
+ ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ dummy->cd_switch_ctl->vd[0].access &=
+ ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ } else {
+ dummy->cd_volume_ctl->vd[0].access |=
+ SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ dummy->cd_switch_ctl->vd[0].access |=
+ SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ }
+
+ snd_ctl_notify(dummy->card, SNDRV_CTL_EVENT_MASK_INFO,
+ &dummy->cd_volume_ctl->id);
+ snd_ctl_notify(dummy->card, SNDRV_CTL_EVENT_MASK_INFO,
+ &dummy->cd_switch_ctl->id);
+ }
+
+ return changed;
+}
+
static struct snd_kcontrol_new snd_dummy_controls[] = {
DUMMY_VOLUME("Master Volume", 0, MIXER_ADDR_MASTER),
DUMMY_CAPSRC("Master Capture Switch", 0, MIXER_ADDR_MASTER),
@@ -827,22 +881,37 @@ DUMMY_CAPSRC("Line Capture Switch", 0, MIXER_ADDR_LINE),
DUMMY_VOLUME("Mic Volume", 0, MIXER_ADDR_MIC),
DUMMY_CAPSRC("Mic Capture Switch", 0, MIXER_ADDR_MIC),
DUMMY_VOLUME("CD Volume", 0, MIXER_ADDR_CD),
-DUMMY_CAPSRC("CD Capture Switch", 0, MIXER_ADDR_CD)
+DUMMY_CAPSRC("CD Capture Switch", 0, MIXER_ADDR_CD),
+{
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "External I/O Box",
+ .info = snd_dummy_iobox_info,
+ .get = snd_dummy_iobox_get,
+ .put = snd_dummy_iobox_put,
+},
};
-static int __devinit snd_card_dummy_new_mixer(struct snd_dummy *dummy)
+static int snd_card_dummy_new_mixer(struct snd_dummy *dummy)
{
struct snd_card *card = dummy->card;
+ struct snd_kcontrol *kcontrol;
unsigned int idx;
int err;
spin_lock_init(&dummy->mixer_lock);
strcpy(card->mixername, "Dummy Mixer");
+ dummy->iobox = 1;
for (idx = 0; idx < ARRAY_SIZE(snd_dummy_controls); idx++) {
- err = snd_ctl_add(card, snd_ctl_new1(&snd_dummy_controls[idx], dummy));
+ kcontrol = snd_ctl_new1(&snd_dummy_controls[idx], dummy);
+ err = snd_ctl_add(card, kcontrol);
if (err < 0)
return err;
+ if (!strcmp(kcontrol->id.name, "CD Volume"))
+ dummy->cd_volume_ctl = kcontrol;
+ else if (!strcmp(kcontrol->id.name, "CD Capture Switch"))
+ dummy->cd_switch_ctl = kcontrol;
+
}
return 0;
}
@@ -962,7 +1031,7 @@ static void dummy_proc_write(struct snd_info_entry *entry,
}
}
-static void __devinit dummy_proc_init(struct snd_dummy *chip)
+static void dummy_proc_init(struct snd_dummy *chip)
{
struct snd_info_entry *entry;
@@ -977,7 +1046,7 @@ static void __devinit dummy_proc_init(struct snd_dummy *chip)
#define dummy_proc_init(x)
#endif /* CONFIG_SND_DEBUG && CONFIG_PROC_FS */
-static int __devinit snd_dummy_probe(struct platform_device *devptr)
+static int snd_dummy_probe(struct platform_device *devptr)
{
struct snd_card *card;
struct snd_dummy *dummy;
@@ -1057,7 +1126,7 @@ static int __devinit snd_dummy_probe(struct platform_device *devptr)
return err;
}
-static int __devexit snd_dummy_remove(struct platform_device *devptr)
+static int snd_dummy_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
@@ -1093,7 +1162,7 @@ static SIMPLE_DEV_PM_OPS(snd_dummy_pm, snd_dummy_suspend, snd_dummy_resume);
static struct platform_driver snd_dummy_driver = {
.probe = snd_dummy_probe,
- .remove = __devexit_p(snd_dummy_remove),
+ .remove = snd_dummy_remove,
.driver = {
.name = SND_DUMMY_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index 6c83b1aed28..8125a7e95ee 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1063,7 +1063,7 @@ snd_ml403_ac97cr_codec_write(struct snd_ac97 *ac97, unsigned short reg,
return;
}
-static int __devinit
+static int
snd_ml403_ac97cr_chip_init(struct snd_ml403_ac97cr *ml403_ac97cr)
{
unsigned long end_time;
@@ -1108,7 +1108,7 @@ static int snd_ml403_ac97cr_dev_free(struct snd_device *snddev)
return snd_ml403_ac97cr_free(ml403_ac97cr);
}
-static int __devinit
+static int
snd_ml403_ac97cr_create(struct snd_card *card, struct platform_device *pfdev,
struct snd_ml403_ac97cr **rml403_ac97cr)
{
@@ -1204,7 +1204,7 @@ static void snd_ml403_ac97cr_mixer_free(struct snd_ac97 *ac97)
PDEBUG(INIT_INFO, "mixer_free(): (done)\n");
}
-static int __devinit
+static int
snd_ml403_ac97cr_mixer(struct snd_ml403_ac97cr *ml403_ac97cr)
{
struct snd_ac97_bus *bus;
@@ -1237,7 +1237,7 @@ snd_ml403_ac97cr_mixer(struct snd_ml403_ac97cr *ml403_ac97cr)
return err;
}
-static int __devinit
+static int
snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device,
struct snd_pcm **rpcm)
{
@@ -1268,7 +1268,7 @@ snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device,
return 0;
}
-static int __devinit snd_ml403_ac97cr_probe(struct platform_device *pfdev)
+static int snd_ml403_ac97cr_probe(struct platform_device *pfdev)
{
struct snd_card *card;
struct snd_ml403_ac97cr *ml403_ac97cr = NULL;
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c
index bc03a2046c9..da1a29bfc85 100644
--- a/sound/drivers/mpu401/mpu401.c
+++ b/sound/drivers/mpu401/mpu401.c
@@ -100,7 +100,7 @@ static int snd_mpu401_create(int dev, struct snd_card **rcard)
return err;
}
-static int __devinit snd_mpu401_probe(struct platform_device *devptr)
+static int snd_mpu401_probe(struct platform_device *devptr)
{
int dev = devptr->id;
int err;
@@ -126,7 +126,7 @@ static int __devinit snd_mpu401_probe(struct platform_device *devptr)
return 0;
}
-static int __devexit snd_mpu401_remove(struct platform_device *devptr)
+static int snd_mpu401_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
@@ -137,7 +137,7 @@ static int __devexit snd_mpu401_remove(struct platform_device *devptr)
static struct platform_driver snd_mpu401_driver = {
.probe = snd_mpu401_probe,
- .remove = __devexit_p(snd_mpu401_remove),
+ .remove = snd_mpu401_remove,
.driver = {
.name = SND_MPU401_DRIVER,
.owner = THIS_MODULE,
@@ -156,8 +156,8 @@ static struct pnp_device_id snd_mpu401_pnpids[] = {
MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids);
-static int __devinit snd_mpu401_pnp(int dev, struct pnp_dev *device,
- const struct pnp_device_id *id)
+static int snd_mpu401_pnp(int dev, struct pnp_dev *device,
+ const struct pnp_device_id *id)
{
if (!pnp_port_valid(device, 0) ||
pnp_port_flags(device, 0) & IORESOURCE_DISABLED) {
@@ -182,8 +182,8 @@ static int __devinit snd_mpu401_pnp(int dev, struct pnp_dev *device,
return 0;
}
-static int __devinit snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev,
- const struct pnp_device_id *id)
+static int snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev,
+ const struct pnp_device_id *id)
{
static int dev;
struct snd_card *card;
@@ -211,7 +211,7 @@ static int __devinit snd_mpu401_pnp_probe(struct pnp_dev *pnp_dev,
return -ENODEV;
}
-static void __devexit snd_mpu401_pnp_remove(struct pnp_dev *dev)
+static void snd_mpu401_pnp_remove(struct pnp_dev *dev)
{
struct snd_card *card = (struct snd_card *) pnp_get_drvdata(dev);
@@ -223,7 +223,7 @@ static struct pnp_driver snd_mpu401_pnp_driver = {
.name = "mpu401",
.id_table = snd_mpu401_pnpids,
.probe = snd_mpu401_pnp_probe,
- .remove = __devexit_p(snd_mpu401_pnp_remove),
+ .remove = snd_mpu401_pnp_remove,
};
#else
static struct pnp_driver snd_mpu401_pnp_driver;
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index cad73af3860..9f1815b99a1 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -583,7 +583,7 @@ static irqreturn_t snd_mtpav_irqh(int irq, void *dev_id)
/*
* get ISA resources
*/
-static int __devinit snd_mtpav_get_ISA(struct mtpav * mcard)
+static int snd_mtpav_get_ISA(struct mtpav *mcard)
{
if ((mcard->res_port = request_region(port, 3, "MotuMTPAV MIDI")) == NULL) {
snd_printk(KERN_ERR "MTVAP port 0x%lx is busy\n", port);
@@ -619,8 +619,8 @@ static struct snd_rawmidi_ops snd_mtpav_input = {
* get RAWMIDI resources
*/
-static void __devinit snd_mtpav_set_name(struct mtpav *chip,
- struct snd_rawmidi_substream *substream)
+static void snd_mtpav_set_name(struct mtpav *chip,
+ struct snd_rawmidi_substream *substream)
{
if (substream->number >= 0 && substream->number < chip->num_ports)
sprintf(substream->name, "MTP direct %d", (substream->number % chip->num_ports) + 1);
@@ -634,7 +634,7 @@ static void __devinit snd_mtpav_set_name(struct mtpav *chip,
strcpy(substream->name, "MTP broadcast");
}
-static int __devinit snd_mtpav_get_RAWMIDI(struct mtpav *mcard)
+static int snd_mtpav_get_RAWMIDI(struct mtpav *mcard)
{
int rval;
struct snd_rawmidi *rawmidi;
@@ -691,7 +691,7 @@ static void snd_mtpav_free(struct snd_card *card)
/*
*/
-static int __devinit snd_mtpav_probe(struct platform_device *dev)
+static int snd_mtpav_probe(struct platform_device *dev)
{
struct snd_card *card;
int err;
@@ -746,7 +746,7 @@ static int __devinit snd_mtpav_probe(struct platform_device *dev)
return err;
}
-static int __devexit snd_mtpav_remove(struct platform_device *devptr)
+static int snd_mtpav_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
@@ -757,7 +757,7 @@ static int __devexit snd_mtpav_remove(struct platform_device *devptr)
static struct platform_driver snd_mtpav_driver = {
.probe = snd_mtpav_probe,
- .remove = __devexit_p(snd_mtpav_remove),
+ .remove = snd_mtpav_remove,
.driver = {
.name = SND_MTPAV_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
index 2d5514b0a29..4e0dd22ba08 100644
--- a/sound/drivers/mts64.c
+++ b/sound/drivers/mts64.c
@@ -83,9 +83,9 @@ static int snd_mts64_free(struct mts64 *mts)
return 0;
}
-static int __devinit snd_mts64_create(struct snd_card *card,
- struct pardevice *pardev,
- struct mts64 **rchip)
+static int snd_mts64_create(struct snd_card *card,
+ struct pardevice *pardev,
+ struct mts64 **rchip)
{
struct mts64 *mts;
@@ -214,7 +214,7 @@ static int mts64_device_ready(struct parport *p)
* 0 init ok
* -EIO failure
*/
-static int __devinit mts64_device_init(struct parport *p)
+static int mts64_device_init(struct parport *p)
{
int i;
@@ -290,7 +290,7 @@ static u8 mts64_map_midi_input(u8 c)
* 0 device found
* -ENODEV no device
*/
-static int __devinit mts64_probe(struct parport *p)
+static int mts64_probe(struct parport *p)
{
u8 c;
@@ -483,7 +483,7 @@ __out:
return changed;
}
-static struct snd_kcontrol_new mts64_ctl_smpte_switch __devinitdata = {
+static struct snd_kcontrol_new mts64_ctl_smpte_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
.name = "SMPTE Playback Switch",
.index = 0,
@@ -556,7 +556,7 @@ static int snd_mts64_ctl_smpte_time_put(struct snd_kcontrol *kctl,
return changed;
}
-static struct snd_kcontrol_new mts64_ctl_smpte_time_hours __devinitdata = {
+static struct snd_kcontrol_new mts64_ctl_smpte_time_hours = {
.iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
.name = "SMPTE Time Hours",
.index = 0,
@@ -567,7 +567,7 @@ static struct snd_kcontrol_new mts64_ctl_smpte_time_hours __devinitdata = {
.put = snd_mts64_ctl_smpte_time_put
};
-static struct snd_kcontrol_new mts64_ctl_smpte_time_minutes __devinitdata = {
+static struct snd_kcontrol_new mts64_ctl_smpte_time_minutes = {
.iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
.name = "SMPTE Time Minutes",
.index = 0,
@@ -578,7 +578,7 @@ static struct snd_kcontrol_new mts64_ctl_smpte_time_minutes __devinitdata = {
.put = snd_mts64_ctl_smpte_time_put
};
-static struct snd_kcontrol_new mts64_ctl_smpte_time_seconds __devinitdata = {
+static struct snd_kcontrol_new mts64_ctl_smpte_time_seconds = {
.iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
.name = "SMPTE Time Seconds",
.index = 0,
@@ -589,7 +589,7 @@ static struct snd_kcontrol_new mts64_ctl_smpte_time_seconds __devinitdata = {
.put = snd_mts64_ctl_smpte_time_put
};
-static struct snd_kcontrol_new mts64_ctl_smpte_time_frames __devinitdata = {
+static struct snd_kcontrol_new mts64_ctl_smpte_time_frames = {
.iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
.name = "SMPTE Time Frames",
.index = 0,
@@ -651,7 +651,7 @@ static int snd_mts64_ctl_smpte_fps_put(struct snd_kcontrol *kctl,
return changed;
}
-static struct snd_kcontrol_new mts64_ctl_smpte_fps __devinitdata = {
+static struct snd_kcontrol_new mts64_ctl_smpte_fps = {
.iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
.name = "SMPTE Fps",
.index = 0,
@@ -663,11 +663,11 @@ static struct snd_kcontrol_new mts64_ctl_smpte_fps __devinitdata = {
};
-static int __devinit snd_mts64_ctl_create(struct snd_card *card,
- struct mts64 *mts)
+static int snd_mts64_ctl_create(struct snd_card *card,
+ struct mts64 *mts)
{
int err, i;
- static struct snd_kcontrol_new *control[] __devinitdata = {
+ static struct snd_kcontrol_new *control[] = {
&mts64_ctl_smpte_switch,
&mts64_ctl_smpte_time_hours,
&mts64_ctl_smpte_time_minutes,
@@ -774,7 +774,7 @@ static struct snd_rawmidi_ops snd_mts64_rawmidi_input_ops = {
};
/* Create and initialize the rawmidi component */
-static int __devinit snd_mts64_rawmidi_create(struct snd_card *card)
+static int snd_mts64_rawmidi_create(struct snd_card *card)
{
struct mts64 *mts = card->private_data;
struct snd_rawmidi *rmidi;
@@ -860,7 +860,7 @@ __out:
spin_unlock(&mts->lock);
}
-static int __devinit snd_mts64_probe_port(struct parport *p)
+static int snd_mts64_probe_port(struct parport *p)
{
struct pardevice *pardev;
int res;
@@ -884,7 +884,7 @@ static int __devinit snd_mts64_probe_port(struct parport *p)
return res;
}
-static void __devinit snd_mts64_attach(struct parport *p)
+static void snd_mts64_attach(struct parport *p)
{
struct platform_device *device;
@@ -940,7 +940,7 @@ static void snd_mts64_card_private_free(struct snd_card *card)
snd_mts64_free(mts);
}
-static int __devinit snd_mts64_probe(struct platform_device *pdev)
+static int snd_mts64_probe(struct platform_device *pdev)
{
struct pardevice *pardev;
struct parport *p;
@@ -1025,7 +1025,7 @@ __err:
return err;
}
-static int __devexit snd_mts64_remove(struct platform_device *pdev)
+static int snd_mts64_remove(struct platform_device *pdev)
{
struct snd_card *card = platform_get_drvdata(pdev);
@@ -1038,7 +1038,7 @@ static int __devexit snd_mts64_remove(struct platform_device *pdev)
static struct platform_driver snd_mts64_driver = {
.probe = snd_mts64_probe,
- .remove = __devexit_p(snd_mts64_remove),
+ .remove = snd_mts64_remove,
.driver = {
.name = PLATFORM_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index ef171295f6d..7a5fdb9b0af 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(nopcm, "Disable PC-Speaker PCM sound. Only beeps remain.");
struct snd_pcsp pcsp_chip;
-static int __devinit snd_pcsp_create(struct snd_card *card)
+static int snd_pcsp_create(struct snd_card *card)
{
static struct snd_device_ops ops = { };
struct timespec tp;
@@ -93,7 +93,7 @@ static int __devinit snd_pcsp_create(struct snd_card *card)
return 0;
}
-static int __devinit snd_card_pcsp_probe(int devnum, struct device *dev)
+static int snd_card_pcsp_probe(int devnum, struct device *dev)
{
struct snd_card *card;
int err;
@@ -142,7 +142,7 @@ static int __devinit snd_card_pcsp_probe(int devnum, struct device *dev)
return 0;
}
-static int __devinit alsa_card_pcsp_init(struct device *dev)
+static int alsa_card_pcsp_init(struct device *dev)
{
int err;
@@ -161,12 +161,12 @@ static int __devinit alsa_card_pcsp_init(struct device *dev)
return 0;
}
-static void __devexit alsa_card_pcsp_exit(struct snd_pcsp *chip)
+static void alsa_card_pcsp_exit(struct snd_pcsp *chip)
{
snd_card_free(chip->card);
}
-static int __devinit pcsp_probe(struct platform_device *dev)
+static int pcsp_probe(struct platform_device *dev)
{
int err;
@@ -184,7 +184,7 @@ static int __devinit pcsp_probe(struct platform_device *dev)
return 0;
}
-static int __devexit pcsp_remove(struct platform_device *dev)
+static int pcsp_remove(struct platform_device *dev)
{
struct snd_pcsp *chip = platform_get_drvdata(dev);
alsa_card_pcsp_exit(chip);
@@ -227,7 +227,7 @@ static struct platform_driver pcsp_platform_driver = {
.pm = PCSP_PM_OPS,
},
.probe = pcsp_probe,
- .remove = __devexit_p(pcsp_remove),
+ .remove = pcsp_remove,
.shutdown = pcsp_shutdown,
};
diff --git a/sound/drivers/pcsp/pcsp_input.c b/sound/drivers/pcsp/pcsp_input.c
index b5e2b54c260..b874b0ad99c 100644
--- a/sound/drivers/pcsp/pcsp_input.c
+++ b/sound/drivers/pcsp/pcsp_input.c
@@ -77,7 +77,7 @@ static int pcspkr_input_event(struct input_dev *dev, unsigned int type,
return 0;
}
-int __devinit pcspkr_input_init(struct input_dev **rdev, struct device *dev)
+int pcspkr_input_init(struct input_dev **rdev, struct device *dev)
{
int err;
diff --git a/sound/drivers/pcsp/pcsp_input.h b/sound/drivers/pcsp/pcsp_input.h
index e66738c7833..d692749b8c9 100644
--- a/sound/drivers/pcsp/pcsp_input.h
+++ b/sound/drivers/pcsp/pcsp_input.h
@@ -7,7 +7,7 @@
#ifndef __PCSP_INPUT_H__
#define __PCSP_INPUT_H__
-int __devinit pcspkr_input_init(struct input_dev **rdev, struct device *dev);
+int pcspkr_input_init(struct input_dev **rdev, struct device *dev);
int pcspkr_input_remove(struct input_dev *dev);
void pcspkr_stop_sound(void);
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index 434981dd4a6..29ebaa4ec0f 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -334,7 +334,7 @@ static struct snd_pcm_ops snd_pcsp_playback_ops = {
.pointer = snd_pcsp_playback_pointer,
};
-int __devinit snd_pcsp_new_pcm(struct snd_pcsp *chip)
+int snd_pcsp_new_pcm(struct snd_pcsp *chip)
{
int err;
diff --git a/sound/drivers/pcsp/pcsp_mixer.c b/sound/drivers/pcsp/pcsp_mixer.c
index 6f633f4f3b9..f1e1defc09b 100644
--- a/sound/drivers/pcsp/pcsp_mixer.c
+++ b/sound/drivers/pcsp/pcsp_mixer.c
@@ -119,17 +119,17 @@ static int pcsp_pcspkr_put(struct snd_kcontrol *kcontrol,
.put = pcsp_##ctl_type##_put, \
}
-static struct snd_kcontrol_new __devinitdata snd_pcsp_controls_pcm[] = {
+static struct snd_kcontrol_new snd_pcsp_controls_pcm[] = {
PCSP_MIXER_CONTROL(enable, "Master Playback Switch"),
PCSP_MIXER_CONTROL(treble, "BaseFRQ Playback Volume"),
};
-static struct snd_kcontrol_new __devinitdata snd_pcsp_controls_spkr[] = {
+static struct snd_kcontrol_new snd_pcsp_controls_spkr[] = {
PCSP_MIXER_CONTROL(pcspkr, "Beep Playback Switch"),
};
-static int __devinit snd_pcsp_ctls_add(struct snd_pcsp *chip,
- struct snd_kcontrol_new *ctls, int num)
+static int snd_pcsp_ctls_add(struct snd_pcsp *chip,
+ struct snd_kcontrol_new *ctls, int num)
{
int i, err;
struct snd_card *card = chip->card;
@@ -141,7 +141,7 @@ static int __devinit snd_pcsp_ctls_add(struct snd_pcsp *chip,
return 0;
}
-int __devinit snd_pcsp_new_mixer(struct snd_pcsp *chip, int nopcm)
+int snd_pcsp_new_mixer(struct snd_pcsp *chip, int nopcm)
{
int err;
struct snd_card *card = chip->card;
diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
index 8364855ed14..991018df713 100644
--- a/sound/drivers/portman2x4.c
+++ b/sound/drivers/portman2x4.c
@@ -96,9 +96,9 @@ static int portman_free(struct portman *pm)
return 0;
}
-static int __devinit portman_create(struct snd_card *card,
- struct pardevice *pardev,
- struct portman **rchip)
+static int portman_create(struct snd_card *card,
+ struct pardevice *pardev,
+ struct portman **rchip)
{
struct portman *pm;
@@ -561,7 +561,7 @@ static struct snd_rawmidi_ops snd_portman_midi_input = {
};
/* Create and initialize the rawmidi component */
-static int __devinit snd_portman_rawmidi_create(struct snd_card *card)
+static int snd_portman_rawmidi_create(struct snd_card *card)
{
struct portman *pm = card->private_data;
struct snd_rawmidi *rmidi;
@@ -648,7 +648,7 @@ static void snd_portman_interrupt(void *userdata)
spin_unlock(&pm->reg_lock);
}
-static int __devinit snd_portman_probe_port(struct parport *p)
+static int snd_portman_probe_port(struct parport *p)
{
struct pardevice *pardev;
int res;
@@ -672,7 +672,7 @@ static int __devinit snd_portman_probe_port(struct parport *p)
return res ? -EIO : 0;
}
-static void __devinit snd_portman_attach(struct parport *p)
+static void snd_portman_attach(struct parport *p)
{
struct platform_device *device;
@@ -728,7 +728,7 @@ static void snd_portman_card_private_free(struct snd_card *card)
portman_free(pm);
}
-static int __devinit snd_portman_probe(struct platform_device *pdev)
+static int snd_portman_probe(struct platform_device *pdev)
{
struct pardevice *pardev;
struct parport *p;
@@ -814,7 +814,7 @@ __err:
return err;
}
-static int __devexit snd_portman_remove(struct platform_device *pdev)
+static int snd_portman_remove(struct platform_device *pdev)
{
struct snd_card *card = platform_get_drvdata(pdev);
@@ -827,7 +827,7 @@ static int __devexit snd_portman_remove(struct platform_device *pdev)
static struct platform_driver snd_portman_driver = {
.probe = snd_portman_probe,
- .remove = __devexit_p(snd_portman_remove),
+ .remove = snd_portman_remove,
.driver = {
.name = PLATFORM_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/drivers/serial-u16550.c b/sound/drivers/serial-u16550.c
index 86700671d1a..7425dd8c1f0 100644
--- a/sound/drivers/serial-u16550.c
+++ b/sound/drivers/serial-u16550.c
@@ -328,7 +328,7 @@ static void snd_uart16550_buffer_timer(unsigned long data)
* return 0 if found
* return negative error if not found
*/
-static int __devinit snd_uart16550_detect(struct snd_uart16550 *uart)
+static int snd_uart16550_detect(struct snd_uart16550 *uart)
{
unsigned long io_base = uart->base;
int ok;
@@ -783,14 +783,14 @@ static int snd_uart16550_dev_free(struct snd_device *device)
return snd_uart16550_free(uart);
}
-static int __devinit snd_uart16550_create(struct snd_card *card,
- unsigned long iobase,
- int irq,
- unsigned int speed,
- unsigned int base,
- int adaptor,
- int droponfull,
- struct snd_uart16550 **ruart)
+static int snd_uart16550_create(struct snd_card *card,
+ unsigned long iobase,
+ int irq,
+ unsigned int speed,
+ unsigned int base,
+ int adaptor,
+ int droponfull,
+ struct snd_uart16550 **ruart)
{
static struct snd_device_ops ops = {
.dev_free = snd_uart16550_dev_free,
@@ -863,7 +863,7 @@ static int __devinit snd_uart16550_create(struct snd_card *card,
return 0;
}
-static void __devinit snd_uart16550_substreams(struct snd_rawmidi_str *stream)
+static void snd_uart16550_substreams(struct snd_rawmidi_str *stream)
{
struct snd_rawmidi_substream *substream;
@@ -872,9 +872,9 @@ static void __devinit snd_uart16550_substreams(struct snd_rawmidi_str *stream)
}
}
-static int __devinit snd_uart16550_rmidi(struct snd_uart16550 *uart, int device,
- int outs, int ins,
- struct snd_rawmidi **rmidi)
+static int snd_uart16550_rmidi(struct snd_uart16550 *uart, int device,
+ int outs, int ins,
+ struct snd_rawmidi **rmidi)
{
struct snd_rawmidi *rrawmidi;
int err;
@@ -899,7 +899,7 @@ static int __devinit snd_uart16550_rmidi(struct snd_uart16550 *uart, int device,
return 0;
}
-static int __devinit snd_serial_probe(struct platform_device *devptr)
+static int snd_serial_probe(struct platform_device *devptr)
{
struct snd_card *card;
struct snd_uart16550 *uart;
@@ -982,7 +982,7 @@ static int __devinit snd_serial_probe(struct platform_device *devptr)
return err;
}
-static int __devexit snd_serial_remove(struct platform_device *devptr)
+static int snd_serial_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
@@ -993,7 +993,7 @@ static int __devexit snd_serial_remove(struct platform_device *devptr)
static struct platform_driver snd_serial_driver = {
.probe = snd_serial_probe,
- .remove = __devexit_p( snd_serial_remove),
+ .remove = snd_serial_remove,
.driver = {
.name = SND_SERIAL_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/drivers/virmidi.c b/sound/drivers/virmidi.c
index d7d514df905..cc4be88d731 100644
--- a/sound/drivers/virmidi.c
+++ b/sound/drivers/virmidi.c
@@ -83,7 +83,7 @@ struct snd_card_virmidi {
static struct platform_device *devices[SNDRV_CARDS];
-static int __devinit snd_virmidi_probe(struct platform_device *devptr)
+static int snd_virmidi_probe(struct platform_device *devptr)
{
struct snd_card *card;
struct snd_card_virmidi *vmidi;
@@ -129,7 +129,7 @@ static int __devinit snd_virmidi_probe(struct platform_device *devptr)
return err;
}
-static int __devexit snd_virmidi_remove(struct platform_device *devptr)
+static int snd_virmidi_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
@@ -140,7 +140,7 @@ static int __devexit snd_virmidi_remove(struct platform_device *devptr)
static struct platform_driver snd_virmidi_driver = {
.probe = snd_virmidi_probe,
- .remove = __devexit_p(snd_virmidi_remove),
+ .remove = snd_virmidi_remove,
.driver = {
.name = SND_VIRMIDI_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/drivers/vx/vx_hwdep.c b/sound/drivers/vx/vx_hwdep.c
index 4a1fae99ac5..3014b86362b 100644
--- a/sound/drivers/vx/vx_hwdep.c
+++ b/sound/drivers/vx/vx_hwdep.c
@@ -29,8 +29,6 @@
#include <sound/hwdep.h>
#include <sound/vx_core.h>
-#ifdef SND_VX_FW_LOADER
-
MODULE_FIRMWARE("vx/bx_1_vxp.b56");
MODULE_FIRMWARE("vx/bx_1_vp4.b56");
MODULE_FIRMWARE("vx/x1_1_vx2.xlx");
@@ -119,142 +117,5 @@ void snd_vx_free_firmware(struct vx_core *chip)
#endif
}
-#else /* old style firmware loading */
-
-static int vx_hwdep_dsp_status(struct snd_hwdep *hw,
- struct snd_hwdep_dsp_status *info)
-{
- static char *type_ids[VX_TYPE_NUMS] = {
- [VX_TYPE_BOARD] = "vxboard",
- [VX_TYPE_V2] = "vx222",
- [VX_TYPE_MIC] = "vx222",
- [VX_TYPE_VXPOCKET] = "vxpocket",
- [VX_TYPE_VXP440] = "vxp440",
- };
- struct vx_core *vx = hw->private_data;
-
- if (snd_BUG_ON(!type_ids[vx->type]))
- return -EINVAL;
- strcpy(info->id, type_ids[vx->type]);
- if (vx_is_pcmcia(vx))
- info->num_dsps = 4;
- else
- info->num_dsps = 3;
- if (vx->chip_status & VX_STAT_CHIP_INIT)
- info->chip_ready = 1;
- info->version = VX_DRIVER_VERSION;
- return 0;
-}
-
-static void free_fw(const struct firmware *fw)
-{
- if (fw) {
- vfree(fw->data);
- kfree(fw);
- }
-}
-
-static int vx_hwdep_dsp_load(struct snd_hwdep *hw,
- struct snd_hwdep_dsp_image *dsp)
-{
- struct vx_core *vx = hw->private_data;
- int index, err;
- struct firmware *fw;
-
- if (snd_BUG_ON(!vx->ops->load_dsp))
- return -ENXIO;
-
- fw = kmalloc(sizeof(*fw), GFP_KERNEL);
- if (! fw) {
- snd_printk(KERN_ERR "cannot allocate firmware\n");
- return -ENOMEM;
- }
- fw->size = dsp->length;
- fw->data = vmalloc(fw->size);
- if (! fw->data) {
- snd_printk(KERN_ERR "cannot allocate firmware image (length=%d)\n",
- (int)fw->size);
- kfree(fw);
- return -ENOMEM;
- }
- if (copy_from_user((void *)fw->data, dsp->image, dsp->length)) {
- free_fw(fw);
- return -EFAULT;
- }
-
- index = dsp->index;
- if (! vx_is_pcmcia(vx))
- index++;
- err = vx->ops->load_dsp(vx, index, fw);
- if (err < 0) {
- free_fw(fw);
- return err;
- }
-#ifdef CONFIG_PM
- vx->firmware[index] = fw;
-#else
- free_fw(fw);
-#endif
-
- if (index == 1)
- vx->chip_status |= VX_STAT_XILINX_LOADED;
- if (index < 3)
- return 0;
-
- /* ok, we reached to the last one */
- /* create the devices if not built yet */
- if (! (vx->chip_status & VX_STAT_DEVICE_INIT)) {
- if ((err = snd_vx_pcm_new(vx)) < 0)
- return err;
-
- if ((err = snd_vx_mixer_new(vx)) < 0)
- return err;
-
- if (vx->ops->add_controls)
- if ((err = vx->ops->add_controls(vx)) < 0)
- return err;
-
- if ((err = snd_card_register(vx->card)) < 0)
- return err;
-
- vx->chip_status |= VX_STAT_DEVICE_INIT;
- }
- vx->chip_status |= VX_STAT_CHIP_INIT;
- return 0;
-}
-
-
-/* exported */
-int snd_vx_setup_firmware(struct vx_core *chip)
-{
- int err;
- struct snd_hwdep *hw;
-
- if ((err = snd_hwdep_new(chip->card, SND_VX_HWDEP_ID, 0, &hw)) < 0)
- return err;
-
- hw->iface = SNDRV_HWDEP_IFACE_VX;
- hw->private_data = chip;
- hw->ops.dsp_status = vx_hwdep_dsp_status;
- hw->ops.dsp_load = vx_hwdep_dsp_load;
- hw->exclusive = 1;
- sprintf(hw->name, "VX Loader (%s)", chip->card->driver);
- chip->hwdep = hw;
-
- return snd_card_register(chip->card);
-}
-
-/* exported */
-void snd_vx_free_firmware(struct vx_core *chip)
-{
-#ifdef CONFIG_PM
- int i;
- for (i = 0; i < 4; i++)
- free_fw(chip->firmware[i]);
-#endif
-}
-
-#endif /* SND_VX_FW_LOADER */
-
EXPORT_SYMBOL(snd_vx_setup_firmware);
EXPORT_SYMBOL(snd_vx_free_firmware);
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 26071489970..ea063e1f872 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -33,4 +33,17 @@ config SND_ISIGHT
To compile this driver as a module, choose M here: the module
will be called snd-isight.
+config SND_SCS1X
+ tristate "Stanton Control System 1 MIDI"
+ select SND_PCM
+ select SND_RAWMIDI
+ select SND_FIREWIRE_LIB
+ help
+ Say Y here to include support for the MIDI ports of the Stanton
+ SCS.1d/SCS.1m DJ controllers. (SCS.1m audio is still handled
+ by FFADO.)
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-scs1x.
+
endif # SND_FIREWIRE
diff --git a/sound/firewire/Makefile b/sound/firewire/Makefile
index d71ed8935f7..460179df5bb 100644
--- a/sound/firewire/Makefile
+++ b/sound/firewire/Makefile
@@ -2,7 +2,9 @@ snd-firewire-lib-objs := lib.o iso-resources.o packets-buffer.o \
fcp.o cmp.o amdtp.o
snd-firewire-speakers-objs := speakers.o
snd-isight-objs := isight.o
+snd-scs1x-objs := scs1x.o
obj-$(CONFIG_SND_FIREWIRE_LIB) += snd-firewire-lib.o
obj-$(CONFIG_SND_FIREWIRE_SPEAKERS) += snd-firewire-speakers.o
obj-$(CONFIG_SND_ISIGHT) += snd-isight.o
+obj-$(CONFIG_SND_SCS1X) += snd-scs1x.o
diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
new file mode 100644
index 00000000000..844a555c3b1
--- /dev/null
+++ b/sound/firewire/scs1x.c
@@ -0,0 +1,527 @@
+/*
+ * Stanton Control System 1 MIDI driver
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+#include "lib.h"
+
+#define OUI_STANTON 0x001260
+#define MODEL_SCS_1M 0x001000
+#define MODEL_SCS_1D 0x002000
+
+#define HSS1394_ADDRESS 0xc007dedadadaULL
+#define HSS1394_MAX_PACKET_SIZE 64
+
+#define HSS1394_TAG_USER_DATA 0x00
+#define HSS1394_TAG_CHANGE_ADDRESS 0xf1
+
+struct scs {
+ struct snd_card *card;
+ struct fw_unit *unit;
+ struct fw_address_handler hss_handler;
+ struct fw_transaction transaction;
+ bool transaction_running;
+ bool output_idle;
+ u8 output_status;
+ u8 output_bytes;
+ bool output_escaped;
+ bool output_escape_high_nibble;
+ u8 input_escape_count;
+ struct snd_rawmidi_substream *output;
+ struct snd_rawmidi_substream *input;
+ struct tasklet_struct tasklet;
+ wait_queue_head_t idle_wait;
+ u8 *buffer;
+};
+
+static const u8 sysex_escape_prefix[] = {
+ 0xf0, /* SysEx begin */
+ 0x00, 0x01, 0x60, /* Stanton DJ */
+ 0x48, 0x53, 0x53, /* "HSS" */
+};
+
+static int scs_output_open(struct snd_rawmidi_substream *stream)
+{
+ struct scs *scs = stream->rmidi->private_data;
+
+ scs->output_status = 0;
+ scs->output_bytes = 1;
+ scs->output_escaped = false;
+
+ return 0;
+}
+
+static int scs_output_close(struct snd_rawmidi_substream *stream)
+{
+ return 0;
+}
+
+static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
+{
+ struct scs *scs = stream->rmidi->private_data;
+
+ ACCESS_ONCE(scs->output) = up ? stream : NULL;
+ if (up) {
+ scs->output_idle = false;
+ tasklet_schedule(&scs->tasklet);
+ }
+}
+
+static void scs_write_callback(struct fw_card *card, int rcode,
+ void *data, size_t length, void *callback_data)
+{
+ struct scs *scs = callback_data;
+
+ if (rcode == RCODE_GENERATION) {
+ /* TODO: retry this packet */
+ }
+
+ scs->transaction_running = false;
+ tasklet_schedule(&scs->tasklet);
+}
+
+static bool is_valid_running_status(u8 status)
+{
+ return status >= 0x80 && status <= 0xef;
+}
+
+static bool is_one_byte_cmd(u8 status)
+{
+ return status == 0xf6 ||
+ status >= 0xf8;
+}
+
+static bool is_two_bytes_cmd(u8 status)
+{
+ return (status >= 0xc0 && status <= 0xdf) ||
+ status == 0xf1 ||
+ status == 0xf3;
+}
+
+static bool is_three_bytes_cmd(u8 status)
+{
+ return (status >= 0x80 && status <= 0xbf) ||
+ (status >= 0xe0 && status <= 0xef) ||
+ status == 0xf2;
+}
+
+static bool is_invalid_cmd(u8 status)
+{
+ return status == 0xf4 ||
+ status == 0xf5 ||
+ status == 0xf9 ||
+ status == 0xfd;
+}
+
+static void scs_output_tasklet(unsigned long data)
+{
+ struct scs *scs = (void *)data;
+ struct snd_rawmidi_substream *stream;
+ unsigned int i;
+ u8 byte;
+ struct fw_device *dev;
+ int generation;
+
+ if (scs->transaction_running)
+ return;
+
+ stream = ACCESS_ONCE(scs->output);
+ if (!stream) {
+ scs->output_idle = true;
+ wake_up(&scs->idle_wait);
+ return;
+ }
+
+ i = scs->output_bytes;
+ for (;;) {
+ if (snd_rawmidi_transmit(stream, &byte, 1) != 1) {
+ scs->output_bytes = i;
+ scs->output_idle = true;
+ wake_up(&scs->idle_wait);
+ return;
+ }
+ /*
+ * Convert from real MIDI to what I think the device expects (no
+ * running status, one command per packet, unescaped SysExs).
+ */
+ if (scs->output_escaped && byte < 0x80) {
+ if (scs->output_escape_high_nibble) {
+ if (i < HSS1394_MAX_PACKET_SIZE) {
+ scs->buffer[i] = byte << 4;
+ scs->output_escape_high_nibble = false;
+ }
+ } else {
+ scs->buffer[i++] |= byte & 0x0f;
+ scs->output_escape_high_nibble = true;
+ }
+ } else if (byte < 0x80) {
+ if (i == 1) {
+ if (!is_valid_running_status(scs->output_status))
+ continue;
+ scs->buffer[0] = HSS1394_TAG_USER_DATA;
+ scs->buffer[i++] = scs->output_status;
+ }
+ scs->buffer[i++] = byte;
+ if ((i == 3 && is_two_bytes_cmd(scs->output_status)) ||
+ (i == 4 && is_three_bytes_cmd(scs->output_status)))
+ break;
+ if (i == 1 + ARRAY_SIZE(sysex_escape_prefix) &&
+ !memcmp(scs->buffer + 1, sysex_escape_prefix,
+ ARRAY_SIZE(sysex_escape_prefix))) {
+ scs->output_escaped = true;
+ scs->output_escape_high_nibble = true;
+ i = 0;
+ }
+ if (i >= HSS1394_MAX_PACKET_SIZE)
+ i = 1;
+ } else if (byte == 0xf7) {
+ if (scs->output_escaped) {
+ if (i >= 1 && scs->output_escape_high_nibble &&
+ scs->buffer[0] != HSS1394_TAG_CHANGE_ADDRESS)
+ break;
+ } else {
+ if (i > 1 && scs->output_status == 0xf0) {
+ scs->buffer[i++] = 0xf7;
+ break;
+ }
+ }
+ i = 1;
+ scs->output_escaped = false;
+ } else if (!is_invalid_cmd(byte) &&
+ byte < 0xf8) {
+ i = 1;
+ scs->buffer[0] = HSS1394_TAG_USER_DATA;
+ scs->buffer[i++] = byte;
+ scs->output_status = byte;
+ scs->output_escaped = false;
+ if (is_one_byte_cmd(byte))
+ break;
+ }
+ }
+ scs->output_bytes = 1;
+ scs->output_escaped = false;
+
+ scs->transaction_running = true;
+ dev = fw_parent_device(scs->unit);
+ generation = dev->generation;
+ smp_rmb(); /* node_id vs. generation */
+ fw_send_request(dev->card, &scs->transaction, TCODE_WRITE_BLOCK_REQUEST,
+ dev->node_id, generation, dev->max_speed,
+ HSS1394_ADDRESS, scs->buffer, i,
+ scs_write_callback, scs);
+}
+
+static void scs_output_drain(struct snd_rawmidi_substream *stream)
+{
+ struct scs *scs = stream->rmidi->private_data;
+
+ wait_event(scs->idle_wait, scs->output_idle);
+}
+
+static struct snd_rawmidi_ops output_ops = {
+ .open = scs_output_open,
+ .close = scs_output_close,
+ .trigger = scs_output_trigger,
+ .drain = scs_output_drain,
+};
+
+static int scs_input_open(struct snd_rawmidi_substream *stream)
+{
+ struct scs *scs = stream->rmidi->private_data;
+
+ scs->input_escape_count = 0;
+
+ return 0;
+}
+
+static int scs_input_close(struct snd_rawmidi_substream *stream)
+{
+ return 0;
+}
+
+static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
+{
+ struct scs *scs = stream->rmidi->private_data;
+
+ ACCESS_ONCE(scs->input) = up ? stream : NULL;
+}
+
+static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
+ u8 byte)
+{
+ u8 nibbles[2];
+
+ nibbles[0] = byte >> 4;
+ nibbles[1] = byte & 0x0f;
+ snd_rawmidi_receive(stream, nibbles, 2);
+}
+
+static void scs_input_midi_byte(struct scs *scs,
+ struct snd_rawmidi_substream *stream,
+ u8 byte)
+{
+ if (scs->input_escape_count > 0) {
+ scs_input_escaped_byte(stream, byte);
+ scs->input_escape_count--;
+ if (scs->input_escape_count == 0)
+ snd_rawmidi_receive(stream, (const u8[]) { 0xf7 }, 1);
+ } else if (byte == 0xf9) {
+ snd_rawmidi_receive(stream, sysex_escape_prefix,
+ ARRAY_SIZE(sysex_escape_prefix));
+ scs_input_escaped_byte(stream, 0x00);
+ scs_input_escaped_byte(stream, 0xf9);
+ scs->input_escape_count = 3;
+ } else {
+ snd_rawmidi_receive(stream, &byte, 1);
+ }
+}
+
+static void scs_input_packet(struct scs *scs,
+ struct snd_rawmidi_substream *stream,
+ const u8 *data, unsigned int bytes)
+{
+ unsigned int i;
+
+ if (data[0] == HSS1394_TAG_USER_DATA) {
+ for (i = 1; i < bytes; ++i)
+ scs_input_midi_byte(scs, stream, data[i]);
+ } else {
+ snd_rawmidi_receive(stream, sysex_escape_prefix,
+ ARRAY_SIZE(sysex_escape_prefix));
+ for (i = 0; i < bytes; ++i)
+ scs_input_escaped_byte(stream, data[i]);
+ snd_rawmidi_receive(stream, (const u8[]) { 0xf7 }, 1);
+ }
+}
+
+static struct snd_rawmidi_ops input_ops = {
+ .open = scs_input_open,
+ .close = scs_input_close,
+ .trigger = scs_input_trigger,
+};
+
+static int scs_create_midi(struct scs *scs)
+{
+ struct snd_rawmidi *rmidi;
+ int err;
+
+ err = snd_rawmidi_new(scs->card, "SCS.1x", 0, 1, 1, &rmidi);
+ if (err < 0)
+ return err;
+ snprintf(rmidi->name, sizeof(rmidi->name),
+ "%s MIDI", scs->card->shortname);
+ rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT |
+ SNDRV_RAWMIDI_INFO_INPUT |
+ SNDRV_RAWMIDI_INFO_DUPLEX;
+ rmidi->private_data = scs;
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &output_ops);
+ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &input_ops);
+
+ return 0;
+}
+
+static void handle_hss(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source, int generation,
+ unsigned long long offset, void *data, size_t length,
+ void *callback_data)
+{
+ struct scs *scs = callback_data;
+ struct snd_rawmidi_substream *stream;
+
+ if (offset != scs->hss_handler.offset) {
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+ if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
+ tcode != TCODE_WRITE_BLOCK_REQUEST) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+ return;
+ }
+
+ if (length >= 1) {
+ stream = ACCESS_ONCE(scs->input);
+ if (stream)
+ scs_input_packet(scs, stream, data, length);
+ }
+
+ fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static int scs_init_hss_address(struct scs *scs)
+{
+ __be64 data;
+ int err;
+
+ data = cpu_to_be64(((u64)HSS1394_TAG_CHANGE_ADDRESS << 56) |
+ scs->hss_handler.offset);
+ err = snd_fw_transaction(scs->unit, TCODE_WRITE_BLOCK_REQUEST,
+ HSS1394_ADDRESS, &data, 8);
+ if (err < 0)
+ dev_err(&scs->unit->device, "HSS1394 communication failed\n");
+
+ return err;
+}
+
+static void scs_card_free(struct snd_card *card)
+{
+ struct scs *scs = card->private_data;
+
+ fw_core_remove_address_handler(&scs->hss_handler);
+ kfree(scs->buffer);
+}
+
+static int scs_probe(struct device *unit_dev)
+{
+ struct fw_unit *unit = fw_unit(unit_dev);
+ struct fw_device *fw_dev = fw_parent_device(unit);
+ struct snd_card *card;
+ struct scs *scs;
+ int err;
+
+ err = snd_card_create(-16, NULL, THIS_MODULE, sizeof(*scs), &card);
+ if (err < 0)
+ return err;
+ snd_card_set_dev(card, unit_dev);
+
+ scs = card->private_data;
+ scs->card = card;
+ scs->unit = unit;
+ tasklet_init(&scs->tasklet, scs_output_tasklet, (unsigned long)scs);
+ init_waitqueue_head(&scs->idle_wait);
+ scs->output_idle = true;
+
+ scs->buffer = kmalloc(HSS1394_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!scs->buffer)
+ goto err_card;
+
+ scs->hss_handler.length = HSS1394_MAX_PACKET_SIZE;
+ scs->hss_handler.address_callback = handle_hss;
+ scs->hss_handler.callback_data = scs;
+ err = fw_core_add_address_handler(&scs->hss_handler,
+ &fw_high_memory_region);
+ if (err < 0)
+ goto err_buffer;
+
+ card->private_free = scs_card_free;
+
+ strcpy(card->driver, "SCS.1x");
+ strcpy(card->shortname, "SCS.1x");
+ fw_csr_string(unit->directory, CSR_MODEL,
+ card->shortname, sizeof(card->shortname));
+ snprintf(card->longname, sizeof(card->longname),
+ "Stanton DJ %s (GUID %08x%08x) at %s, S%d",
+ card->shortname, fw_dev->config_rom[3], fw_dev->config_rom[4],
+ dev_name(&unit->device), 100 << fw_dev->max_speed);
+ strcpy(card->mixername, card->shortname);
+
+ err = scs_init_hss_address(scs);
+ if (err < 0)
+ goto err_card;
+
+ err = scs_create_midi(scs);
+ if (err < 0)
+ goto err_card;
+
+ err = snd_card_register(card);
+ if (err < 0)
+ goto err_card;
+
+ dev_set_drvdata(unit_dev, scs);
+
+ return 0;
+
+err_buffer:
+ kfree(scs->buffer);
+err_card:
+ snd_card_free(card);
+ return err;
+}
+
+static int scs_remove(struct device *dev)
+{
+ struct scs *scs = dev_get_drvdata(dev);
+
+ snd_card_disconnect(scs->card);
+
+ ACCESS_ONCE(scs->output) = NULL;
+ ACCESS_ONCE(scs->input) = NULL;
+
+ wait_event(scs->idle_wait, scs->output_idle);
+
+ tasklet_kill(&scs->tasklet);
+
+ snd_card_free_when_closed(scs->card);
+
+ return 0;
+}
+
+static void scs_update(struct fw_unit *unit)
+{
+ struct scs *scs = dev_get_drvdata(&unit->device);
+ __be64 data;
+
+ data = cpu_to_be64(((u64)HSS1394_TAG_CHANGE_ADDRESS << 56) |
+ scs->hss_handler.offset);
+ snd_fw_transaction(scs->unit, TCODE_WRITE_BLOCK_REQUEST,
+ HSS1394_ADDRESS, &data, 8);
+}
+
+static const struct ieee1394_device_id scs_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_STANTON,
+ .model_id = MODEL_SCS_1M,
+ },
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_STANTON,
+ .model_id = MODEL_SCS_1D,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(ieee1394, scs_id_table);
+
+MODULE_DESCRIPTION("SCS.1x MIDI driver");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL v2");
+
+static struct fw_driver scs_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .bus = &fw_bus_type,
+ .probe = scs_probe,
+ .remove = scs_remove,
+ },
+ .update = scs_update,
+ .id_table = scs_id_table,
+};
+
+static int __init alsa_scs1x_init(void)
+{
+ return driver_register(&scs_driver.driver);
+}
+
+static void __exit alsa_scs1x_exit(void)
+{
+ driver_unregister(&scs_driver.driver);
+}
+
+module_init(alsa_scs1x_init);
+module_exit(alsa_scs1x_exit);
diff --git a/sound/firewire/speakers.c b/sound/firewire/speakers.c
index 297244e658d..d6846557f27 100644
--- a/sound/firewire/speakers.c
+++ b/sound/firewire/speakers.c
@@ -663,7 +663,7 @@ static void fwspk_card_free(struct snd_card *card)
mutex_destroy(&fwspk->mutex);
}
-static const struct device_info *__devinit fwspk_detect(struct fw_device *dev)
+static const struct device_info *fwspk_detect(struct fw_device *dev)
{
static const struct device_info griffin_firewave = {
.driver_name = "FireWave",
@@ -699,7 +699,7 @@ static const struct device_info *__devinit fwspk_detect(struct fw_device *dev)
return NULL;
}
-static int __devinit fwspk_probe(struct device *unit_dev)
+static int fwspk_probe(struct device *unit_dev)
{
struct fw_unit *unit = fw_unit(unit_dev);
struct fw_device *fw_dev = fw_parent_device(unit);
@@ -770,7 +770,7 @@ error:
return err;
}
-static int __devexit fwspk_remove(struct device *dev)
+static int fwspk_remove(struct device *dev)
{
struct fwspk *fwspk = dev_get_drvdata(dev);
@@ -834,7 +834,7 @@ static struct fw_driver fwspk_driver = {
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
.probe = fwspk_probe,
- .remove = __devexit_p(fwspk_remove),
+ .remove = fwspk_remove,
},
.update = fwspk_bus_reset,
.id_table = fwspk_id_table,
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index a38d9643e9d..affa1348065 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -425,7 +425,7 @@ config SND_WAVEFRONT
config SND_MSND_PINNACLE
tristate "Turtle Beach MultiSound Pinnacle/Fiji driver"
- depends on X86 && EXPERIMENTAL
+ depends on X86
select FW_LOADER
select SND_MPU401_UART
select SND_PCM
@@ -438,7 +438,7 @@ config SND_MSND_PINNACLE
config SND_MSND_CLASSIC
tristate "Support for Turtle Beach MultiSound Classic, Tahiti, Monterey"
- depends on X86 && EXPERIMENTAL
+ depends on X86
select FW_LOADER
select SND_MPU401_UART
select SND_PCM
diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
index 2c2f829c3fd..26ce26a5884 100644
--- a/sound/isa/ad1816a/ad1816a.c
+++ b/sound/isa/ad1816a/ad1816a.c
@@ -94,8 +94,8 @@ MODULE_DEVICE_TABLE(pnp_card, snd_ad1816a_pnpids);
#define DRIVER_NAME "snd-card-ad1816a"
-static int __devinit snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
@@ -135,8 +135,8 @@ static int __devinit snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card,
return 0;
}
-static int __devinit snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
int error;
struct snd_card *card;
@@ -217,10 +217,10 @@ static int __devinit snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard
return 0;
}
-static unsigned int __devinitdata ad1816a_devices;
+static unsigned int ad1816a_devices;
-static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_ad1816a_pnp_detect(struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
static int dev;
int res;
@@ -238,7 +238,7 @@ static int __devinit snd_ad1816a_pnp_detect(struct pnp_card_link *card,
return -ENODEV;
}
-static void __devexit snd_ad1816a_pnp_remove(struct pnp_card_link * pcard)
+static void snd_ad1816a_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -270,7 +270,7 @@ static struct pnp_card_driver ad1816a_pnpc_driver = {
.name = "ad1816a",
.id_table = snd_ad1816a_pnpids,
.probe = snd_ad1816a_pnp_detect,
- .remove = __devexit_p(snd_ad1816a_pnp_remove),
+ .remove = snd_ad1816a_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_ad1816a_pnp_suspend,
.resume = snd_ad1816a_pnp_resume,
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index db64df6023e..f0fd98e695e 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -537,7 +537,7 @@ void snd_ad1816a_resume(struct snd_ad1816a *chip)
}
#endif
-static int __devinit snd_ad1816a_probe(struct snd_ad1816a *chip)
+static int snd_ad1816a_probe(struct snd_ad1816a *chip)
{
unsigned long flags;
@@ -583,7 +583,7 @@ static int snd_ad1816a_dev_free(struct snd_device *device)
return snd_ad1816a_free(chip);
}
-static const char __devinit *snd_ad1816a_chip_id(struct snd_ad1816a *chip)
+static const char *snd_ad1816a_chip_id(struct snd_ad1816a *chip)
{
switch (chip->hardware) {
case AD1816A_HW_AD1816A: return "AD1816A";
@@ -596,9 +596,9 @@ static const char __devinit *snd_ad1816a_chip_id(struct snd_ad1816a *chip)
}
}
-int __devinit snd_ad1816a_create(struct snd_card *card,
- unsigned long port, int irq, int dma1, int dma2,
- struct snd_ad1816a *chip)
+int snd_ad1816a_create(struct snd_card *card,
+ unsigned long port, int irq, int dma1, int dma2,
+ struct snd_ad1816a *chip)
{
static struct snd_device_ops ops = {
.dev_free = snd_ad1816a_dev_free,
@@ -675,7 +675,7 @@ static struct snd_pcm_ops snd_ad1816a_capture_ops = {
.pointer = snd_ad1816a_capture_pointer,
};
-int __devinit snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_pcm **rpcm)
+int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_pcm **rpcm)
{
int error;
struct snd_pcm *pcm;
@@ -702,7 +702,8 @@ int __devinit snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_p
return 0;
}
-int __devinit snd_ad1816a_timer(struct snd_ad1816a *chip, int device, struct snd_timer **rtimer)
+int snd_ad1816a_timer(struct snd_ad1816a *chip, int device,
+ struct snd_timer **rtimer)
{
struct snd_timer *timer;
struct snd_timer_id tid;
@@ -923,7 +924,7 @@ static const DECLARE_TLV_DB_SCALE(db_scale_6bit, -9450, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_5bit_12db_max, -3450, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_rec_gain, 0, 150, 0);
-static struct snd_kcontrol_new snd_ad1816a_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ad1816a_controls[] = {
AD1816A_DOUBLE("Master Playback Switch", AD1816A_MASTER_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("Master Playback Volume", AD1816A_MASTER_ATT, 8, 0, 31, 1,
db_scale_5bit),
@@ -969,7 +970,7 @@ AD1816A_SINGLE("3D Control - Switch", AD1816A_3D_PHAT_CTRL, 15, 1, 1),
AD1816A_SINGLE("3D Control - Level", AD1816A_3D_PHAT_CTRL, 0, 15, 0),
};
-int __devinit snd_ad1816a_mixer(struct snd_ad1816a *chip)
+int snd_ad1816a_mixer(struct snd_ad1816a *chip)
{
struct snd_card *card;
unsigned int idx;
diff --git a/sound/isa/ad1848/ad1848.c b/sound/isa/ad1848/ad1848.c
index 2af77faefbb..c214ecf4540 100644
--- a/sound/isa/ad1848/ad1848.c
+++ b/sound/isa/ad1848/ad1848.c
@@ -64,7 +64,7 @@ MODULE_PARM_DESC(dma1, "DMA1 # for " CRD_NAME " driver.");
module_param_array(thinkpad, bool, NULL, 0444);
MODULE_PARM_DESC(thinkpad, "Enable only for the onboard CS4248 of IBM Thinkpad 360/750/755 series.");
-static int __devinit snd_ad1848_match(struct device *dev, unsigned int n)
+static int snd_ad1848_match(struct device *dev, unsigned int n)
{
if (!enable[n])
return 0;
@@ -84,7 +84,7 @@ static int __devinit snd_ad1848_match(struct device *dev, unsigned int n)
return 1;
}
-static int __devinit snd_ad1848_probe(struct device *dev, unsigned int n)
+static int snd_ad1848_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_wss *chip;
@@ -132,7 +132,7 @@ out: snd_card_free(card);
return error;
}
-static int __devexit snd_ad1848_remove(struct device *dev, unsigned int n)
+static int snd_ad1848_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -164,7 +164,7 @@ static int snd_ad1848_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_ad1848_driver = {
.match = snd_ad1848_match,
.probe = snd_ad1848_probe,
- .remove = __devexit_p(snd_ad1848_remove),
+ .remove = snd_ad1848_remove,
#ifdef CONFIG_PM
.suspend = snd_ad1848_suspend,
.resume = snd_ad1848_resume,
diff --git a/sound/isa/adlib.c b/sound/isa/adlib.c
index 4d50c69f329..d2654554373 100644
--- a/sound/isa/adlib.c
+++ b/sound/isa/adlib.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard.");
module_param_array(port, long, NULL, 0444);
MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver.");
-static int __devinit snd_adlib_match(struct device *dev, unsigned int n)
+static int snd_adlib_match(struct device *dev, unsigned int n)
{
if (!enable[n])
return 0;
@@ -47,7 +47,7 @@ static void snd_adlib_free(struct snd_card *card)
release_and_free_resource(card->private_data);
}
-static int __devinit snd_adlib_probe(struct device *dev, unsigned int n)
+static int snd_adlib_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_opl3 *opl3;
@@ -98,7 +98,7 @@ out: snd_card_free(card);
return error;
}
-static int __devexit snd_adlib_remove(struct device *dev, unsigned int n)
+static int snd_adlib_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -108,7 +108,7 @@ static int __devexit snd_adlib_remove(struct device *dev, unsigned int n)
static struct isa_driver snd_adlib_driver = {
.match = snd_adlib_match,
.probe = snd_adlib_probe,
- .remove = __devexit_p(snd_adlib_remove),
+ .remove = snd_adlib_remove,
.driver = {
.name = DEV_NAME
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index f7cdaf51512..10f08a18fe3 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -117,9 +117,9 @@ static struct pnp_card_device_id snd_als100_pnpids[] = {
MODULE_DEVICE_TABLE(pnp_card, snd_als100_pnpids);
-static int __devinit snd_card_als100_pnp(int dev, struct snd_card_als100 *acard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_card_als100_pnp(int dev, struct snd_card_als100 *acard,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
@@ -183,9 +183,9 @@ static int __devinit snd_card_als100_pnp(int dev, struct snd_card_als100 *acard,
return 0;
}
-static int __devinit snd_card_als100_probe(int dev,
- struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_card_als100_probe(int dev,
+ struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
int error;
struct snd_sb *chip;
@@ -286,10 +286,10 @@ static int __devinit snd_card_als100_probe(int dev,
return 0;
}
-static unsigned int __devinitdata als100_devices;
+static unsigned int als100_devices;
-static int __devinit snd_als100_pnp_detect(struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_als100_pnp_detect(struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
static int dev;
int res;
@@ -307,7 +307,7 @@ static int __devinit snd_als100_pnp_detect(struct pnp_card_link *card,
return -ENODEV;
}
-static void __devexit snd_als100_pnp_remove(struct pnp_card_link * pcard)
+static void snd_als100_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -344,7 +344,7 @@ static struct pnp_card_driver als100_pnpc_driver = {
.name = "als100",
.id_table = snd_als100_pnpids,
.probe = snd_als100_pnp_detect,
- .remove = __devexit_p(snd_als100_pnp_remove),
+ .remove = snd_als100_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_als100_pnp_suspend,
.resume = snd_als100_pnp_resume,
diff --git a/sound/isa/azt2320.c b/sound/isa/azt2320.c
index 6a2c78ef1d8..db301ff94ec 100644
--- a/sound/isa/azt2320.c
+++ b/sound/isa/azt2320.c
@@ -99,9 +99,9 @@ MODULE_DEVICE_TABLE(pnp_card, snd_azt2320_pnpids);
#define DRIVER_NAME "snd-card-azt2320"
-static int __devinit snd_card_azt2320_pnp(int dev, struct snd_card_azt2320 *acard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_card_azt2320_pnp(int dev, struct snd_card_azt2320 *acard,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
@@ -147,7 +147,7 @@ static int __devinit snd_card_azt2320_pnp(int dev, struct snd_card_azt2320 *acar
}
/* same of snd_sbdsp_command by Jaroslav Kysela */
-static int __devinit snd_card_azt2320_command(unsigned long port, unsigned char val)
+static int snd_card_azt2320_command(unsigned long port, unsigned char val)
{
int i;
unsigned long limit;
@@ -161,7 +161,7 @@ static int __devinit snd_card_azt2320_command(unsigned long port, unsigned char
return -EBUSY;
}
-static int __devinit snd_card_azt2320_enable_wss(unsigned long port)
+static int snd_card_azt2320_enable_wss(unsigned long port)
{
int error;
@@ -174,9 +174,9 @@ static int __devinit snd_card_azt2320_enable_wss(unsigned long port)
return 0;
}
-static int __devinit snd_card_azt2320_probe(int dev,
- struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_card_azt2320_probe(int dev,
+ struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
int error;
struct snd_card *card;
@@ -264,10 +264,10 @@ static int __devinit snd_card_azt2320_probe(int dev,
return 0;
}
-static unsigned int __devinitdata azt2320_devices;
+static unsigned int azt2320_devices;
-static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_azt2320_pnp_detect(struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
static int dev;
int res;
@@ -285,7 +285,7 @@ static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card,
return -ENODEV;
}
-static void __devexit snd_azt2320_pnp_remove(struct pnp_card_link * pcard)
+static void snd_azt2320_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -320,7 +320,7 @@ static struct pnp_card_driver azt2320_pnpc_driver = {
.name = "azt2320",
.id_table = snd_azt2320_pnpids,
.probe = snd_azt2320_pnp_detect,
- .remove = __devexit_p(snd_azt2320_pnp_remove),
+ .remove = snd_azt2320_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_azt2320_pnp_suspend,
.resume = snd_azt2320_pnp_resume,
diff --git a/sound/isa/cmi8328.c b/sound/isa/cmi8328.c
index bde60139bb9..a7369fe19a6 100644
--- a/sound/isa/cmi8328.c
+++ b/sound/isa/cmi8328.c
@@ -140,7 +140,7 @@ static void snd_cmi8328_cfg_restore(u16 port, u8 cfg[])
snd_cmi8328_cfg_write(port, CFG3, cfg[2]);
}
-static int __devinit snd_cmi8328_mixer(struct snd_wss *chip)
+static int snd_cmi8328_mixer(struct snd_wss *chip)
{
struct snd_card *card;
struct snd_ctl_elem_id id1, id2;
@@ -212,7 +212,7 @@ int array_find_l(long array[], long item)
return -1;
}
-static int __devinit snd_cmi8328_probe(struct device *pdev, unsigned int ndev)
+static int snd_cmi8328_probe(struct device *pdev, unsigned int ndev)
{
struct snd_card *card;
struct snd_opl3 *opl3;
@@ -401,7 +401,7 @@ error:
return err;
}
-static int __devexit snd_cmi8328_remove(struct device *pdev, unsigned int dev)
+static int snd_cmi8328_remove(struct device *pdev, unsigned int dev)
{
struct snd_card *card = dev_get_drvdata(pdev);
struct snd_cmi8328 *cmi = card->private_data;
@@ -459,7 +459,7 @@ static int snd_cmi8328_resume(struct device *pdev, unsigned int n)
static struct isa_driver snd_cmi8328_driver = {
.probe = snd_cmi8328_probe,
- .remove = __devexit_p(snd_cmi8328_remove),
+ .remove = snd_cmi8328_remove,
#ifdef CONFIG_PM
.suspend = snd_cmi8328_suspend,
.resume = snd_cmi8328_resume,
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index 7bd5e337ee9..c707c52268a 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -193,7 +193,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_cmi8330_pnpids);
#endif
-static struct snd_kcontrol_new snd_cmi8330_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_cmi8330_controls[] = {
WSS_DOUBLE("Master Playback Volume", 0,
CMI8330_MASTVOL, CMI8330_MASTVOL, 4, 0, 15, 0),
WSS_SINGLE("Loud Playback Switch", 0,
@@ -249,7 +249,7 @@ WSS_SINGLE(SNDRV_CTL_NAME_IEC958("Input ", PLAYBACK, SWITCH), 0,
};
#ifdef ENABLE_SB_MIXER
-static struct sbmix_elem cmi8330_sb_mixers[] __devinitdata = {
+static struct sbmix_elem cmi8330_sb_mixers[] = {
SB_DOUBLE("SB Master Playback Volume", SB_DSP4_MASTER_DEV, (SB_DSP4_MASTER_DEV + 1), 3, 3, 31),
SB_DOUBLE("Tone Control - Bass", SB_DSP4_BASS_DEV, (SB_DSP4_BASS_DEV + 1), 4, 4, 15),
SB_DOUBLE("Tone Control - Treble", SB_DSP4_TREBLE_DEV, (SB_DSP4_TREBLE_DEV + 1), 4, 4, 15),
@@ -267,7 +267,7 @@ SB_DOUBLE("SB Playback Volume", SB_DSP4_OGAIN_DEV, (SB_DSP4_OGAIN_DEV + 1), 6, 6
SB_SINGLE("SB Mic Auto Gain", SB_DSP4_MIC_AGC, 0, 1),
};
-static unsigned char cmi8330_sb_init_values[][2] __devinitdata = {
+static unsigned char cmi8330_sb_init_values[][2] = {
{ SB_DSP4_MASTER_DEV + 0, 0 },
{ SB_DSP4_MASTER_DEV + 1, 0 },
{ SB_DSP4_PCM_DEV + 0, 0 },
@@ -281,7 +281,7 @@ static unsigned char cmi8330_sb_init_values[][2] __devinitdata = {
};
-static int __devinit cmi8330_add_sb_mixers(struct snd_sb *chip)
+static int cmi8330_add_sb_mixers(struct snd_sb *chip)
{
int idx, err;
unsigned long flags;
@@ -306,7 +306,7 @@ static int __devinit cmi8330_add_sb_mixers(struct snd_sb *chip)
}
#endif
-static int __devinit snd_cmi8330_mixer(struct snd_card *card, struct snd_cmi8330 *acard)
+static int snd_cmi8330_mixer(struct snd_card *card, struct snd_cmi8330 *acard)
{
unsigned int idx;
int err;
@@ -329,9 +329,9 @@ static int __devinit snd_cmi8330_mixer(struct snd_card *card, struct snd_cmi8330
}
#ifdef CONFIG_PNP
-static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
@@ -437,7 +437,7 @@ static int snd_cmi8330_capture_open(struct snd_pcm_substream *substream)
return chip->streams[SNDRV_PCM_STREAM_CAPTURE].open(substream);
}
-static int __devinit snd_cmi8330_pcm(struct snd_card *card, struct snd_cmi8330 *chip)
+static int snd_cmi8330_pcm(struct snd_card *card, struct snd_cmi8330 *chip)
{
struct snd_pcm *pcm;
const struct snd_pcm_ops *ops;
@@ -532,7 +532,7 @@ static int snd_cmi8330_card_new(int dev, struct snd_card **cardp)
return 0;
}
-static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
+static int snd_cmi8330_probe(struct snd_card *card, int dev)
{
struct snd_cmi8330 *acard;
int i, err;
@@ -613,8 +613,8 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
return snd_card_register(card);
}
-static int __devinit snd_cmi8330_isa_match(struct device *pdev,
- unsigned int dev)
+static int snd_cmi8330_isa_match(struct device *pdev,
+ unsigned int dev)
{
if (!enable[dev] || is_isapnp_selected(dev))
return 0;
@@ -629,8 +629,8 @@ static int __devinit snd_cmi8330_isa_match(struct device *pdev,
return 1;
}
-static int __devinit snd_cmi8330_isa_probe(struct device *pdev,
- unsigned int dev)
+static int snd_cmi8330_isa_probe(struct device *pdev,
+ unsigned int dev)
{
struct snd_card *card;
int err;
@@ -647,8 +647,8 @@ static int __devinit snd_cmi8330_isa_probe(struct device *pdev,
return 0;
}
-static int __devexit snd_cmi8330_isa_remove(struct device *devptr,
- unsigned int dev)
+static int snd_cmi8330_isa_remove(struct device *devptr,
+ unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -673,7 +673,7 @@ static int snd_cmi8330_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_cmi8330_driver = {
.match = snd_cmi8330_isa_match,
.probe = snd_cmi8330_isa_probe,
- .remove = __devexit_p(snd_cmi8330_isa_remove),
+ .remove = snd_cmi8330_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_cmi8330_isa_suspend,
.resume = snd_cmi8330_isa_resume,
@@ -685,8 +685,8 @@ static struct isa_driver snd_cmi8330_driver = {
#ifdef CONFIG_PNP
-static int __devinit snd_cmi8330_pnp_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_cmi8330_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
@@ -717,7 +717,7 @@ static int __devinit snd_cmi8330_pnp_detect(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_cmi8330_pnp_remove(struct pnp_card_link * pcard)
+static void snd_cmi8330_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -740,7 +740,7 @@ static struct pnp_card_driver cmi8330_pnpc_driver = {
.name = "cmi8330",
.id_table = snd_cmi8330_pnpids,
.probe = snd_cmi8330_pnp_detect,
- .remove = __devexit_p(snd_cmi8330_pnp_remove),
+ .remove = snd_cmi8330_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_cmi8330_pnp_suspend,
.resume = snd_cmi8330_pnp_resume,
diff --git a/sound/isa/cs423x/cs4231.c b/sound/isa/cs423x/cs4231.c
index 99dda45e82f..aa7a5d86e48 100644
--- a/sound/isa/cs423x/cs4231.c
+++ b/sound/isa/cs423x/cs4231.c
@@ -68,7 +68,7 @@ MODULE_PARM_DESC(dma1, "DMA1 # for " CRD_NAME " driver.");
module_param_array(dma2, int, NULL, 0444);
MODULE_PARM_DESC(dma2, "DMA2 # for " CRD_NAME " driver.");
-static int __devinit snd_cs4231_match(struct device *dev, unsigned int n)
+static int snd_cs4231_match(struct device *dev, unsigned int n)
{
if (!enable[n])
return 0;
@@ -88,7 +88,7 @@ static int __devinit snd_cs4231_match(struct device *dev, unsigned int n)
return 1;
}
-static int __devinit snd_cs4231_probe(struct device *dev, unsigned int n)
+static int snd_cs4231_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_wss *chip;
@@ -148,7 +148,7 @@ out: snd_card_free(card);
return error;
}
-static int __devexit snd_cs4231_remove(struct device *dev, unsigned int n)
+static int snd_cs4231_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -180,7 +180,7 @@ static int snd_cs4231_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_cs4231_driver = {
.match = snd_cs4231_match,
.probe = snd_cs4231_probe,
- .remove = __devexit_p(snd_cs4231_remove),
+ .remove = snd_cs4231_remove,
#ifdef CONFIG_PM
.suspend = snd_cs4231_suspend,
.resume = snd_cs4231_resume,
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 740c51a1ed7..252e9fb37db 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -251,7 +251,7 @@ static struct pnp_card_device_id snd_cs423x_pnpids[] = {
MODULE_DEVICE_TABLE(pnp_card, snd_cs423x_pnpids);
/* WSS initialization */
-static int __devinit snd_cs423x_pnp_init_wss(int dev, struct pnp_dev *pdev)
+static int snd_cs423x_pnp_init_wss(int dev, struct pnp_dev *pdev)
{
if (pnp_activate_dev(pdev) < 0) {
printk(KERN_ERR IDENT " WSS PnP configure failed for WSS (out of resources?)\n");
@@ -272,7 +272,7 @@ static int __devinit snd_cs423x_pnp_init_wss(int dev, struct pnp_dev *pdev)
}
/* CTRL initialization */
-static int __devinit snd_cs423x_pnp_init_ctrl(int dev, struct pnp_dev *pdev)
+static int snd_cs423x_pnp_init_ctrl(int dev, struct pnp_dev *pdev)
{
if (pnp_activate_dev(pdev) < 0) {
printk(KERN_ERR IDENT " CTRL PnP configure failed for WSS (out of resources?)\n");
@@ -284,7 +284,7 @@ static int __devinit snd_cs423x_pnp_init_ctrl(int dev, struct pnp_dev *pdev)
}
/* MPU initialization */
-static int __devinit snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
+static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
{
if (pnp_activate_dev(pdev) < 0) {
printk(KERN_ERR IDENT " MPU401 PnP configure failed for WSS (out of resources?)\n");
@@ -303,9 +303,9 @@ static int __devinit snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
return 0;
}
-static int __devinit snd_card_cs423x_pnp(int dev, struct snd_card_cs4236 *acard,
- struct pnp_dev *pdev,
- struct pnp_dev *cdev)
+static int snd_card_cs423x_pnp(int dev, struct snd_card_cs4236 *acard,
+ struct pnp_dev *pdev,
+ struct pnp_dev *cdev)
{
acard->wss = pdev;
if (snd_cs423x_pnp_init_wss(dev, acard->wss) < 0)
@@ -317,9 +317,9 @@ static int __devinit snd_card_cs423x_pnp(int dev, struct snd_card_cs4236 *acard,
return 0;
}
-static int __devinit snd_card_cs423x_pnpc(int dev, struct snd_card_cs4236 *acard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_card_cs423x_pnpc(int dev, struct snd_card_cs4236 *acard,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
acard->wss = pnp_request_card_device(card, id->devs[0].id, NULL);
if (acard->wss == NULL)
@@ -378,7 +378,7 @@ static int snd_cs423x_card_new(int dev, struct snd_card **cardp)
return 0;
}
-static int __devinit snd_cs423x_probe(struct snd_card *card, int dev)
+static int snd_cs423x_probe(struct snd_card *card, int dev)
{
struct snd_card_cs4236 *acard;
struct snd_pcm *pcm;
@@ -456,8 +456,8 @@ static int __devinit snd_cs423x_probe(struct snd_card *card, int dev)
return snd_card_register(card);
}
-static int __devinit snd_cs423x_isa_match(struct device *pdev,
- unsigned int dev)
+static int snd_cs423x_isa_match(struct device *pdev,
+ unsigned int dev)
{
if (!enable[dev] || is_isapnp_selected(dev))
return 0;
@@ -481,8 +481,8 @@ static int __devinit snd_cs423x_isa_match(struct device *pdev,
return 1;
}
-static int __devinit snd_cs423x_isa_probe(struct device *pdev,
- unsigned int dev)
+static int snd_cs423x_isa_probe(struct device *pdev,
+ unsigned int dev)
{
struct snd_card *card;
int err;
@@ -500,8 +500,8 @@ static int __devinit snd_cs423x_isa_probe(struct device *pdev,
return 0;
}
-static int __devexit snd_cs423x_isa_remove(struct device *pdev,
- unsigned int dev)
+static int snd_cs423x_isa_remove(struct device *pdev,
+ unsigned int dev)
{
snd_card_free(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
@@ -540,7 +540,7 @@ static int snd_cs423x_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver cs423x_isa_driver = {
.match = snd_cs423x_isa_match,
.probe = snd_cs423x_isa_probe,
- .remove = __devexit_p(snd_cs423x_isa_remove),
+ .remove = snd_cs423x_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_cs423x_isa_suspend,
.resume = snd_cs423x_isa_resume,
@@ -552,8 +552,8 @@ static struct isa_driver cs423x_isa_driver = {
#ifdef CONFIG_PNP
-static int __devinit snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
- const struct pnp_device_id *id)
+static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
{
static int dev;
int err;
@@ -597,7 +597,7 @@ static int __devinit snd_cs423x_pnpbios_detect(struct pnp_dev *pdev,
return 0;
}
-static void __devexit snd_cs423x_pnp_remove(struct pnp_dev *pdev)
+static void snd_cs423x_pnp_remove(struct pnp_dev *pdev)
{
snd_card_free(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
@@ -619,15 +619,15 @@ static struct pnp_driver cs423x_pnp_driver = {
.name = "cs423x-pnpbios",
.id_table = snd_cs423x_pnpbiosids,
.probe = snd_cs423x_pnpbios_detect,
- .remove = __devexit_p(snd_cs423x_pnp_remove),
+ .remove = snd_cs423x_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_cs423x_pnp_suspend,
.resume = snd_cs423x_pnp_resume,
#endif
};
-static int __devinit snd_cs423x_pnpc_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_cs423x_pnpc_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
@@ -659,7 +659,7 @@ static int __devinit snd_cs423x_pnpc_detect(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_cs423x_pnpc_remove(struct pnp_card_link * pcard)
+static void snd_cs423x_pnpc_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -682,7 +682,7 @@ static struct pnp_card_driver cs423x_pnpc_driver = {
.name = CS423X_ISAPNP_DRIVER,
.id_table = snd_cs423x_pnpids,
.probe = snd_cs423x_pnpc_detect,
- .remove = __devexit_p(snd_cs423x_pnpc_remove),
+ .remove = snd_cs423x_pnpc_remove,
#ifdef CONFIG_PM
.suspend = snd_cs423x_pnpc_suspend,
.resume = snd_cs423x_pnpc_resume,
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index b036e60f62d..102874a703d 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -90,13 +90,13 @@ MODULE_PARM_DESC(dma8, "8-bit DMA # for " CRD_NAME " driver.");
#define is_isapnp_selected(dev) 0
#endif
-static int __devinit snd_es1688_match(struct device *dev, unsigned int n)
+static int snd_es1688_match(struct device *dev, unsigned int n)
{
return enable[n] && !is_isapnp_selected(n);
}
-static int __devinit snd_es1688_legacy_create(struct snd_card *card,
- struct device *dev, unsigned int n)
+static int snd_es1688_legacy_create(struct snd_card *card,
+ struct device *dev, unsigned int n)
{
struct snd_es1688 *chip = card->private_data;
static long possible_ports[] = {0x220, 0x240, 0x260};
@@ -134,7 +134,7 @@ static int __devinit snd_es1688_legacy_create(struct snd_card *card,
return error;
}
-static int __devinit snd_es1688_probe(struct snd_card *card, unsigned int n)
+static int snd_es1688_probe(struct snd_card *card, unsigned int n)
{
struct snd_es1688 *chip = card->private_data;
struct snd_opl3 *opl3;
@@ -182,7 +182,7 @@ static int __devinit snd_es1688_probe(struct snd_card *card, unsigned int n)
return snd_card_register(card);
}
-static int __devinit snd_es1688_isa_probe(struct device *dev, unsigned int n)
+static int snd_es1688_isa_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
int error;
@@ -210,7 +210,7 @@ out:
return error;
}
-static int __devexit snd_es1688_isa_remove(struct device *dev, unsigned int n)
+static int snd_es1688_isa_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -220,7 +220,7 @@ static int __devexit snd_es1688_isa_remove(struct device *dev, unsigned int n)
static struct isa_driver snd_es1688_driver = {
.match = snd_es1688_match,
.probe = snd_es1688_isa_probe,
- .remove = __devexit_p(snd_es1688_isa_remove),
+ .remove = snd_es1688_isa_remove,
#if 0 /* FIXME */
.suspend = snd_es1688_suspend,
.resume = snd_es1688_resume,
@@ -233,9 +233,9 @@ static struct isa_driver snd_es1688_driver = {
static int snd_es968_pnp_is_probed;
#ifdef CONFIG_PNP
-static int __devinit snd_card_es968_pnp(struct snd_card *card, unsigned int n,
- struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_card_es968_pnp(struct snd_card *card, unsigned int n,
+ struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
struct snd_es1688 *chip = card->private_data;
struct pnp_dev *pdev;
@@ -258,8 +258,8 @@ static int __devinit snd_card_es968_pnp(struct snd_card *card, unsigned int n,
mpu_irq[n], dma8[n], ES1688_HW_AUTO);
}
-static int __devinit snd_es968_pnp_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_es968_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
struct snd_card *card;
static unsigned int dev;
@@ -295,7 +295,7 @@ static int __devinit snd_es968_pnp_detect(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_es968_pnp_remove(struct pnp_card_link * pcard)
+static void snd_es968_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -338,7 +338,7 @@ static struct pnp_card_driver es968_pnpc_driver = {
.name = DEV_NAME " PnP",
.id_table = snd_es968_pnpids,
.probe = snd_es968_pnp_detect,
- .remove = __devexit_p(snd_es968_pnp_remove),
+ .remove = snd_es968_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_es968_pnp_suspend,
.resume = snd_es968_pnp_resume,
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index c20baafd9b7..24380efe31a 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -348,7 +348,7 @@ static inline int snd_es18xx_mixer_writable(struct snd_es18xx *chip, unsigned ch
}
-static int __devinit snd_es18xx_reset(struct snd_es18xx *chip)
+static int snd_es18xx_reset(struct snd_es18xx *chip)
{
int i;
outb(0x03, chip->port + 0x06);
@@ -1363,7 +1363,7 @@ static struct snd_kcontrol_new snd_es18xx_hw_volume_controls[] = {
ES18XX_SINGLE("Hardware Master Volume Split", 0, 0x64, 7, 1, 0),
};
-static int __devinit snd_es18xx_config_read(struct snd_es18xx *chip, unsigned char reg)
+static int snd_es18xx_config_read(struct snd_es18xx *chip, unsigned char reg)
{
int data;
@@ -1372,8 +1372,8 @@ static int __devinit snd_es18xx_config_read(struct snd_es18xx *chip, unsigned ch
return data;
}
-static void __devinit snd_es18xx_config_write(struct snd_es18xx *chip,
- unsigned char reg, unsigned char data)
+static void snd_es18xx_config_write(struct snd_es18xx *chip,
+ unsigned char reg, unsigned char data)
{
/* No need for spinlocks, this function is used only in
otherwise protected init code */
@@ -1384,9 +1384,9 @@ static void __devinit snd_es18xx_config_write(struct snd_es18xx *chip,
#endif
}
-static int __devinit snd_es18xx_initialize(struct snd_es18xx *chip,
- unsigned long mpu_port,
- unsigned long fm_port)
+static int snd_es18xx_initialize(struct snd_es18xx *chip,
+ unsigned long mpu_port,
+ unsigned long fm_port)
{
int mask = 0;
@@ -1549,7 +1549,7 @@ static int __devinit snd_es18xx_initialize(struct snd_es18xx *chip,
return 0;
}
-static int __devinit snd_es18xx_identify(struct snd_es18xx *chip)
+static int snd_es18xx_identify(struct snd_es18xx *chip)
{
int hi,lo;
@@ -1618,9 +1618,9 @@ static int __devinit snd_es18xx_identify(struct snd_es18xx *chip)
return 0;
}
-static int __devinit snd_es18xx_probe(struct snd_es18xx *chip,
- unsigned long mpu_port,
- unsigned long fm_port)
+static int snd_es18xx_probe(struct snd_es18xx *chip,
+ unsigned long mpu_port,
+ unsigned long fm_port)
{
if (snd_es18xx_identify(chip) < 0) {
snd_printk(KERN_ERR PFX "[0x%lx] ESS chip not found\n", chip->port);
@@ -1680,8 +1680,8 @@ static struct snd_pcm_ops snd_es18xx_capture_ops = {
.pointer = snd_es18xx_capture_pointer,
};
-static int __devinit snd_es18xx_pcm(struct snd_card *card, int device,
- struct snd_pcm **rpcm)
+static int snd_es18xx_pcm(struct snd_card *card, int device,
+ struct snd_pcm **rpcm)
{
struct snd_es18xx *chip = card->private_data;
struct snd_pcm *pcm;
@@ -1777,11 +1777,11 @@ static int snd_es18xx_dev_free(struct snd_device *device)
return snd_es18xx_free(device->card);
}
-static int __devinit snd_es18xx_new_device(struct snd_card *card,
- unsigned long port,
- unsigned long mpu_port,
- unsigned long fm_port,
- int irq, int dma1, int dma2)
+static int snd_es18xx_new_device(struct snd_card *card,
+ unsigned long port,
+ unsigned long mpu_port,
+ unsigned long fm_port,
+ int irq, int dma1, int dma2)
{
struct snd_es18xx *chip = card->private_data;
static struct snd_device_ops ops = {
@@ -1839,7 +1839,7 @@ static int __devinit snd_es18xx_new_device(struct snd_card *card,
return 0;
}
-static int __devinit snd_es18xx_mixer(struct snd_card *card)
+static int snd_es18xx_mixer(struct snd_card *card)
{
struct snd_es18xx *chip = card->private_data;
int err;
@@ -2016,7 +2016,7 @@ static struct pnp_device_id snd_audiodrive_pnpbiosids[] = {
MODULE_DEVICE_TABLE(pnp, snd_audiodrive_pnpbiosids);
/* PnP main device initialization */
-static int __devinit snd_audiodrive_pnp_init_main(int dev, struct pnp_dev *pdev)
+static int snd_audiodrive_pnp_init_main(int dev, struct pnp_dev *pdev)
{
if (pnp_activate_dev(pdev) < 0) {
snd_printk(KERN_ERR PFX "PnP configure failure (out of resources?)\n");
@@ -2043,8 +2043,8 @@ static int __devinit snd_audiodrive_pnp_init_main(int dev, struct pnp_dev *pdev)
return 0;
}
-static int __devinit snd_audiodrive_pnp(int dev, struct snd_es18xx *chip,
- struct pnp_dev *pdev)
+static int snd_audiodrive_pnp(int dev, struct snd_es18xx *chip,
+ struct pnp_dev *pdev)
{
chip->dev = pdev;
if (snd_audiodrive_pnp_init_main(dev, chip->dev) < 0)
@@ -2073,9 +2073,9 @@ static struct pnp_card_device_id snd_audiodrive_pnpids[] = {
MODULE_DEVICE_TABLE(pnp_card, snd_audiodrive_pnpids);
-static int __devinit snd_audiodrive_pnpc(int dev, struct snd_es18xx *chip,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_audiodrive_pnpc(int dev, struct snd_es18xx *chip,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
chip->dev = pnp_request_card_device(card, id->devs[0].id, NULL);
if (chip->dev == NULL)
@@ -2111,7 +2111,7 @@ static int snd_es18xx_card_new(int dev, struct snd_card **cardp)
sizeof(struct snd_es18xx), cardp);
}
-static int __devinit snd_audiodrive_probe(struct snd_card *card, int dev)
+static int snd_audiodrive_probe(struct snd_card *card, int dev)
{
struct snd_es18xx *chip = card->private_data;
struct snd_opl3 *opl3;
@@ -2169,12 +2169,12 @@ static int __devinit snd_audiodrive_probe(struct snd_card *card, int dev)
return snd_card_register(card);
}
-static int __devinit snd_es18xx_isa_match(struct device *pdev, unsigned int dev)
+static int snd_es18xx_isa_match(struct device *pdev, unsigned int dev)
{
return enable[dev] && !is_isapnp_selected(dev);
}
-static int __devinit snd_es18xx_isa_probe1(int dev, struct device *devptr)
+static int snd_es18xx_isa_probe1(int dev, struct device *devptr)
{
struct snd_card *card;
int err;
@@ -2191,7 +2191,7 @@ static int __devinit snd_es18xx_isa_probe1(int dev, struct device *devptr)
return 0;
}
-static int __devinit snd_es18xx_isa_probe(struct device *pdev, unsigned int dev)
+static int snd_es18xx_isa_probe(struct device *pdev, unsigned int dev)
{
int err;
static int possible_irqs[] = {5, 9, 10, 7, 11, 12, -1};
@@ -2231,8 +2231,8 @@ static int __devinit snd_es18xx_isa_probe(struct device *pdev, unsigned int dev)
}
}
-static int __devexit snd_es18xx_isa_remove(struct device *devptr,
- unsigned int dev)
+static int snd_es18xx_isa_remove(struct device *devptr,
+ unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -2257,7 +2257,7 @@ static int snd_es18xx_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_es18xx_isa_driver = {
.match = snd_es18xx_isa_match,
.probe = snd_es18xx_isa_probe,
- .remove = __devexit_p(snd_es18xx_isa_remove),
+ .remove = snd_es18xx_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_es18xx_isa_suspend,
.resume = snd_es18xx_isa_resume,
@@ -2269,8 +2269,8 @@ static struct isa_driver snd_es18xx_isa_driver = {
#ifdef CONFIG_PNP
-static int __devinit snd_audiodrive_pnp_detect(struct pnp_dev *pdev,
- const struct pnp_device_id *id)
+static int snd_audiodrive_pnp_detect(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
{
static int dev;
int err;
@@ -2302,7 +2302,7 @@ static int __devinit snd_audiodrive_pnp_detect(struct pnp_dev *pdev,
return 0;
}
-static void __devexit snd_audiodrive_pnp_remove(struct pnp_dev * pdev)
+static void snd_audiodrive_pnp_remove(struct pnp_dev *pdev)
{
snd_card_free(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
@@ -2323,15 +2323,15 @@ static struct pnp_driver es18xx_pnp_driver = {
.name = "es18xx-pnpbios",
.id_table = snd_audiodrive_pnpbiosids,
.probe = snd_audiodrive_pnp_detect,
- .remove = __devexit_p(snd_audiodrive_pnp_remove),
+ .remove = snd_audiodrive_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_audiodrive_pnp_suspend,
.resume = snd_audiodrive_pnp_resume,
#endif
};
-static int __devinit snd_audiodrive_pnpc_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_audiodrive_pnpc_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
@@ -2363,7 +2363,7 @@ static int __devinit snd_audiodrive_pnpc_detect(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_audiodrive_pnpc_remove(struct pnp_card_link * pcard)
+static void snd_audiodrive_pnpc_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -2387,7 +2387,7 @@ static struct pnp_card_driver es18xx_pnpc_driver = {
.name = "es18xx",
.id_table = snd_audiodrive_pnpids,
.probe = snd_audiodrive_pnpc_detect,
- .remove = __devexit_p(snd_audiodrive_pnpc_remove),
+ .remove = snd_audiodrive_pnpc_remove,
#ifdef CONFIG_PM
.suspend = snd_audiodrive_pnpc_suspend,
.resume = snd_audiodrive_pnpc_resume,
diff --git a/sound/isa/galaxy/galaxy.c b/sound/isa/galaxy/galaxy.c
index 55e20782858..672184e3221 100644
--- a/sound/isa/galaxy/galaxy.c
+++ b/sound/isa/galaxy/galaxy.c
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(dma2, "Capture DMA # for " CRD_NAME " driver.");
#define DSP_COMMAND_GET_VERSION 0xe1
-static int __devinit dsp_get_byte(void __iomem *port, u8 *val)
+static int dsp_get_byte(void __iomem *port, u8 *val)
{
int loops = 1000;
@@ -97,7 +97,7 @@ static int __devinit dsp_get_byte(void __iomem *port, u8 *val)
return 0;
}
-static int __devinit dsp_reset(void __iomem *port)
+static int dsp_reset(void __iomem *port)
{
u8 val;
@@ -111,7 +111,7 @@ static int __devinit dsp_reset(void __iomem *port)
return 0;
}
-static int __devinit dsp_command(void __iomem *port, u8 cmd)
+static int dsp_command(void __iomem *port, u8 cmd)
{
int loops = 1000;
@@ -124,7 +124,7 @@ static int __devinit dsp_command(void __iomem *port, u8 cmd)
return 0;
}
-static int __devinit dsp_get_version(void __iomem *port, u8 *major, u8 *minor)
+static int dsp_get_version(void __iomem *port, u8 *major, u8 *minor)
{
int err;
@@ -161,7 +161,7 @@ static int __devinit dsp_get_version(void __iomem *port, u8 *major, u8 *minor)
#define WSS_SIGNATURE 4
-static int __devinit wss_detect(void __iomem *wss_port)
+static int wss_detect(void __iomem *wss_port)
{
if ((ioread8(wss_port + WSS_PORT_SIGNATURE) & 0x3f) != WSS_SIGNATURE)
return -ENODEV;
@@ -204,7 +204,7 @@ struct snd_galaxy {
static u32 config[SNDRV_CARDS];
static u8 wss_config[SNDRV_CARDS];
-static int __devinit snd_galaxy_match(struct device *dev, unsigned int n)
+static int snd_galaxy_match(struct device *dev, unsigned int n)
{
if (!enable[n])
return 0;
@@ -379,7 +379,7 @@ fm:
return 1;
}
-static int __devinit galaxy_init(struct snd_galaxy *galaxy, u8 *type)
+static int galaxy_init(struct snd_galaxy *galaxy, u8 *type)
{
u8 major;
u8 minor;
@@ -411,7 +411,7 @@ static int __devinit galaxy_init(struct snd_galaxy *galaxy, u8 *type)
return 0;
}
-static int __devinit galaxy_set_mode(struct snd_galaxy *galaxy, u8 mode)
+static int galaxy_set_mode(struct snd_galaxy *galaxy, u8 mode)
{
int err;
@@ -449,7 +449,7 @@ static void galaxy_set_config(struct snd_galaxy *galaxy, u32 config)
msleep(10);
}
-static void __devinit galaxy_config(struct snd_galaxy *galaxy, u32 config)
+static void galaxy_config(struct snd_galaxy *galaxy, u32 config)
{
int i;
@@ -461,7 +461,7 @@ static void __devinit galaxy_config(struct snd_galaxy *galaxy, u32 config)
galaxy_set_config(galaxy, config);
}
-static int __devinit galaxy_wss_config(struct snd_galaxy *galaxy, u8 wss_config)
+static int galaxy_wss_config(struct snd_galaxy *galaxy, u8 wss_config)
{
int err;
@@ -498,7 +498,7 @@ static void snd_galaxy_free(struct snd_card *card)
}
}
-static int __devinit snd_galaxy_probe(struct device *dev, unsigned int n)
+static int snd_galaxy_probe(struct device *dev, unsigned int n)
{
struct snd_galaxy *galaxy;
struct snd_wss *chip;
@@ -620,7 +620,7 @@ error:
return err;
}
-static int __devexit snd_galaxy_remove(struct device *dev, unsigned int n)
+static int snd_galaxy_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -630,7 +630,7 @@ static int __devexit snd_galaxy_remove(struct device *dev, unsigned int n)
static struct isa_driver snd_galaxy_driver = {
.match = snd_galaxy_match,
.probe = snd_galaxy_probe,
- .remove = __devexit_p(snd_galaxy_remove),
+ .remove = snd_galaxy_remove,
.driver = {
.name = DEV_NAME
diff --git a/sound/isa/gus/gusclassic.c b/sound/isa/gus/gusclassic.c
index bf633367161..16bca4e96c0 100644
--- a/sound/isa/gus/gusclassic.c
+++ b/sound/isa/gus/gusclassic.c
@@ -73,13 +73,14 @@ MODULE_PARM_DESC(channels, "GF1 channels for " CRD_NAME " driver.");
module_param_array(pcm_channels, int, NULL, 0444);
MODULE_PARM_DESC(pcm_channels, "Reserved PCM channels for " CRD_NAME " driver.");
-static int __devinit snd_gusclassic_match(struct device *dev, unsigned int n)
+static int snd_gusclassic_match(struct device *dev, unsigned int n)
{
return enable[n];
}
-static int __devinit snd_gusclassic_create(struct snd_card *card,
- struct device *dev, unsigned int n, struct snd_gus_card **rgus)
+static int snd_gusclassic_create(struct snd_card *card,
+ struct device *dev, unsigned int n,
+ struct snd_gus_card **rgus)
{
static long possible_ports[] = {0x220, 0x230, 0x240, 0x250, 0x260};
static int possible_irqs[] = {5, 11, 12, 9, 7, 15, 3, 4, -1};
@@ -123,7 +124,7 @@ static int __devinit snd_gusclassic_create(struct snd_card *card,
return error;
}
-static int __devinit snd_gusclassic_detect(struct snd_gus_card *gus)
+static int snd_gusclassic_detect(struct snd_gus_card *gus)
{
unsigned char d;
@@ -142,7 +143,7 @@ static int __devinit snd_gusclassic_detect(struct snd_gus_card *gus)
return 0;
}
-static int __devinit snd_gusclassic_probe(struct device *dev, unsigned int n)
+static int snd_gusclassic_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_gus_card *gus;
@@ -211,7 +212,7 @@ out: snd_card_free(card);
return error;
}
-static int __devexit snd_gusclassic_remove(struct device *dev, unsigned int n)
+static int snd_gusclassic_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -221,7 +222,7 @@ static int __devexit snd_gusclassic_remove(struct device *dev, unsigned int n)
static struct isa_driver snd_gusclassic_driver = {
.match = snd_gusclassic_match,
.probe = snd_gusclassic_probe,
- .remove = __devexit_p(snd_gusclassic_remove),
+ .remove = snd_gusclassic_remove,
#if 0 /* FIXME */
.suspend = snd_gusclassic_suspend,
.remove = snd_gusclassic_remove,
diff --git a/sound/isa/gus/gusextreme.c b/sound/isa/gus/gusextreme.c
index bc10cc26e5f..0b9c2426b49 100644
--- a/sound/isa/gus/gusextreme.c
+++ b/sound/isa/gus/gusextreme.c
@@ -89,13 +89,14 @@ MODULE_PARM_DESC(channels, "GF1 channels for " CRD_NAME " driver.");
module_param_array(pcm_channels, int, NULL, 0444);
MODULE_PARM_DESC(pcm_channels, "Reserved PCM channels for " CRD_NAME " driver.");
-static int __devinit snd_gusextreme_match(struct device *dev, unsigned int n)
+static int snd_gusextreme_match(struct device *dev, unsigned int n)
{
return enable[n];
}
-static int __devinit snd_gusextreme_es1688_create(struct snd_card *card,
- struct snd_es1688 *chip, struct device *dev, unsigned int n)
+static int snd_gusextreme_es1688_create(struct snd_card *card,
+ struct snd_es1688 *chip,
+ struct device *dev, unsigned int n)
{
static long possible_ports[] = {0x220, 0x240, 0x260};
static int possible_irqs[] = {5, 9, 10, 7, -1};
@@ -132,8 +133,9 @@ static int __devinit snd_gusextreme_es1688_create(struct snd_card *card,
return error;
}
-static int __devinit snd_gusextreme_gus_card_create(struct snd_card *card,
- struct device *dev, unsigned int n, struct snd_gus_card **rgus)
+static int snd_gusextreme_gus_card_create(struct snd_card *card,
+ struct device *dev, unsigned int n,
+ struct snd_gus_card **rgus)
{
static int possible_irqs[] = {11, 12, 15, 9, 5, 7, 3, -1};
static int possible_dmas[] = {5, 6, 7, 3, 1, -1};
@@ -156,8 +158,8 @@ static int __devinit snd_gusextreme_gus_card_create(struct snd_card *card,
0, channels[n], pcm_channels[n], 0, rgus);
}
-static int __devinit snd_gusextreme_detect(struct snd_gus_card *gus,
- struct snd_es1688 *es1688)
+static int snd_gusextreme_detect(struct snd_gus_card *gus,
+ struct snd_es1688 *es1688)
{
unsigned long flags;
unsigned char d;
@@ -206,7 +208,7 @@ static int __devinit snd_gusextreme_detect(struct snd_gus_card *gus,
return 0;
}
-static int __devinit snd_gusextreme_mixer(struct snd_card *card)
+static int snd_gusextreme_mixer(struct snd_card *card)
{
struct snd_ctl_elem_id id1, id2;
int error;
@@ -232,7 +234,7 @@ static int __devinit snd_gusextreme_mixer(struct snd_card *card)
return 0;
}
-static int __devinit snd_gusextreme_probe(struct device *dev, unsigned int n)
+static int snd_gusextreme_probe(struct device *dev, unsigned int n)
{
struct snd_card *card;
struct snd_gus_card *gus;
@@ -339,7 +341,7 @@ out: snd_card_free(card);
return error;
}
-static int __devexit snd_gusextreme_remove(struct device *dev, unsigned int n)
+static int snd_gusextreme_remove(struct device *dev, unsigned int n)
{
snd_card_free(dev_get_drvdata(dev));
dev_set_drvdata(dev, NULL);
@@ -349,7 +351,7 @@ static int __devexit snd_gusextreme_remove(struct device *dev, unsigned int n)
static struct isa_driver snd_gusextreme_driver = {
.match = snd_gusextreme_match,
.probe = snd_gusextreme_probe,
- .remove = __devexit_p(snd_gusextreme_remove),
+ .remove = snd_gusextreme_remove,
#if 0 /* FIXME */
.suspend = snd_gusextreme_suspend,
.resume = snd_gusextreme_resume,
diff --git a/sound/isa/gus/gusmax.c b/sound/isa/gus/gusmax.c
index 41c3f448745..c309a5d0e7e 100644
--- a/sound/isa/gus/gusmax.c
+++ b/sound/isa/gus/gusmax.c
@@ -82,7 +82,7 @@ struct snd_gusmax {
#define PFX "gusmax: "
-static int __devinit snd_gusmax_detect(struct snd_gus_card * gus)
+static int snd_gusmax_detect(struct snd_gus_card *gus)
{
unsigned char d;
@@ -124,8 +124,8 @@ static irqreturn_t snd_gusmax_interrupt(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static void __devinit snd_gusmax_init(int dev, struct snd_card *card,
- struct snd_gus_card * gus)
+static void snd_gusmax_init(int dev, struct snd_card *card,
+ struct snd_gus_card *gus)
{
gus->equal_irq = 1;
gus->codec_flag = 1;
@@ -140,7 +140,7 @@ static void __devinit snd_gusmax_init(int dev, struct snd_card *card,
outb(gus->max_cntrl_val, GUSP(gus, MAXCNTRLPORT));
}
-static int __devinit snd_gusmax_mixer(struct snd_wss *chip)
+static int snd_gusmax_mixer(struct snd_wss *chip)
{
struct snd_card *card = chip->card;
struct snd_ctl_elem_id id1, id2;
@@ -199,12 +199,12 @@ static void snd_gusmax_free(struct snd_card *card)
free_irq(maxcard->irq, (void *)maxcard);
}
-static int __devinit snd_gusmax_match(struct device *pdev, unsigned int dev)
+static int snd_gusmax_match(struct device *pdev, unsigned int dev)
{
return enable[dev];
}
-static int __devinit snd_gusmax_probe(struct device *pdev, unsigned int dev)
+static int snd_gusmax_probe(struct device *pdev, unsigned int dev)
{
static int possible_irqs[] = {5, 11, 12, 9, 7, 15, 3, -1};
static int possible_dmas[] = {5, 6, 7, 1, 3, -1};
@@ -354,7 +354,7 @@ static int __devinit snd_gusmax_probe(struct device *pdev, unsigned int dev)
return err;
}
-static int __devexit snd_gusmax_remove(struct device *devptr, unsigned int dev)
+static int snd_gusmax_remove(struct device *devptr, unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -366,7 +366,7 @@ static int __devexit snd_gusmax_remove(struct device *devptr, unsigned int dev)
static struct isa_driver snd_gusmax_driver = {
.match = snd_gusmax_match,
.probe = snd_gusmax_probe,
- .remove = __devexit_p(snd_gusmax_remove),
+ .remove = snd_gusmax_remove,
/* FIXME: suspend/resume */
.driver = {
.name = DEV_NAME
diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c
index 3fc8b66fd16..78bc5744e89 100644
--- a/sound/isa/gus/interwave.c
+++ b/sound/isa/gus/interwave.c
@@ -207,9 +207,9 @@ static struct snd_i2c_bit_ops snd_interwave_i2c_bit_ops = {
.getdata = snd_interwave_i2c_getdataline,
};
-static int __devinit snd_interwave_detect_stb(struct snd_interwave *iwcard,
- struct snd_gus_card * gus, int dev,
- struct snd_i2c_bus **rbus)
+static int snd_interwave_detect_stb(struct snd_interwave *iwcard,
+ struct snd_gus_card *gus, int dev,
+ struct snd_i2c_bus **rbus)
{
unsigned long port;
struct snd_i2c_bus *bus;
@@ -249,11 +249,11 @@ static int __devinit snd_interwave_detect_stb(struct snd_interwave *iwcard,
}
#endif
-static int __devinit snd_interwave_detect(struct snd_interwave *iwcard,
- struct snd_gus_card * gus,
- int dev
+static int snd_interwave_detect(struct snd_interwave *iwcard,
+ struct snd_gus_card *gus,
+ int dev
#ifdef SNDRV_STB
- , struct snd_i2c_bus **rbus
+ , struct snd_i2c_bus **rbus
#endif
)
{
@@ -318,7 +318,7 @@ static irqreturn_t snd_interwave_interrupt(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static void __devinit snd_interwave_reset(struct snd_gus_card * gus)
+static void snd_interwave_reset(struct snd_gus_card *gus)
{
snd_gf1_write8(gus, SNDRV_GF1_GB_RESET, 0x00);
udelay(160);
@@ -326,7 +326,7 @@ static void __devinit snd_interwave_reset(struct snd_gus_card * gus)
udelay(160);
}
-static void __devinit snd_interwave_bank_sizes(struct snd_gus_card * gus, int *sizes)
+static void snd_interwave_bank_sizes(struct snd_gus_card *gus, int *sizes)
{
unsigned int idx;
unsigned int local;
@@ -377,7 +377,7 @@ struct rom_hdr {
/* 511 */ unsigned char csum;
};
-static void __devinit snd_interwave_detect_memory(struct snd_gus_card * gus)
+static void snd_interwave_detect_memory(struct snd_gus_card *gus)
{
static unsigned int lmc[13] =
{
@@ -475,7 +475,7 @@ static void __devinit snd_interwave_detect_memory(struct snd_gus_card * gus)
snd_interwave_reset(gus);
}
-static void __devinit snd_interwave_init(int dev, struct snd_gus_card * gus)
+static void snd_interwave_init(int dev, struct snd_gus_card *gus)
{
unsigned long flags;
@@ -508,7 +508,7 @@ WSS_DOUBLE("Mic Playback Volume", 0,
CS4231_LEFT_MIC_INPUT, CS4231_RIGHT_MIC_INPUT, 0, 0, 31, 1)
};
-static int __devinit snd_interwave_mixer(struct snd_wss *chip)
+static int snd_interwave_mixer(struct snd_wss *chip)
{
struct snd_card *card = chip->card;
struct snd_ctl_elem_id id1, id2;
@@ -558,9 +558,9 @@ static int __devinit snd_interwave_mixer(struct snd_wss *chip)
#ifdef CONFIG_PNP
-static int __devinit snd_interwave_pnp(int dev, struct snd_interwave *iwcard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_interwave_pnp(int dev, struct snd_interwave *iwcard,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
@@ -644,7 +644,7 @@ static int snd_interwave_card_new(int dev, struct snd_card **cardp)
return 0;
}
-static int __devinit snd_interwave_probe(struct snd_card *card, int dev)
+static int snd_interwave_probe(struct snd_card *card, int dev)
{
int xirq, xdma1, xdma2;
struct snd_interwave *iwcard = card->private_data;
@@ -775,7 +775,7 @@ static int __devinit snd_interwave_probe(struct snd_card *card, int dev)
return 0;
}
-static int __devinit snd_interwave_isa_probe1(int dev, struct device *devptr)
+static int snd_interwave_isa_probe1(int dev, struct device *devptr)
{
struct snd_card *card;
int err;
@@ -793,8 +793,8 @@ static int __devinit snd_interwave_isa_probe1(int dev, struct device *devptr)
return 0;
}
-static int __devinit snd_interwave_isa_match(struct device *pdev,
- unsigned int dev)
+static int snd_interwave_isa_match(struct device *pdev,
+ unsigned int dev)
{
if (!enable[dev])
return 0;
@@ -805,8 +805,8 @@ static int __devinit snd_interwave_isa_match(struct device *pdev,
return 1;
}
-static int __devinit snd_interwave_isa_probe(struct device *pdev,
- unsigned int dev)
+static int snd_interwave_isa_probe(struct device *pdev,
+ unsigned int dev)
{
int err;
static int possible_irqs[] = {5, 11, 12, 9, 7, 15, 3, -1};
@@ -846,7 +846,7 @@ static int __devinit snd_interwave_isa_probe(struct device *pdev,
}
}
-static int __devexit snd_interwave_isa_remove(struct device *devptr, unsigned int dev)
+static int snd_interwave_isa_remove(struct device *devptr, unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -856,7 +856,7 @@ static int __devexit snd_interwave_isa_remove(struct device *devptr, unsigned in
static struct isa_driver snd_interwave_driver = {
.match = snd_interwave_isa_match,
.probe = snd_interwave_isa_probe,
- .remove = __devexit_p(snd_interwave_isa_remove),
+ .remove = snd_interwave_isa_remove,
/* FIXME: suspend,resume */
.driver = {
.name = INTERWAVE_DRIVER
@@ -864,8 +864,8 @@ static struct isa_driver snd_interwave_driver = {
};
#ifdef CONFIG_PNP
-static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_interwave_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
@@ -896,7 +896,7 @@ static int __devinit snd_interwave_pnp_detect(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_interwave_pnp_remove(struct pnp_card_link * pcard)
+static void snd_interwave_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -907,7 +907,7 @@ static struct pnp_card_driver interwave_pnpc_driver = {
.name = INTERWAVE_PNP_DRIVER,
.id_table = snd_interwave_pnpids,
.probe = snd_interwave_pnp_detect,
- .remove = __devexit_p(snd_interwave_pnp_remove),
+ .remove = snd_interwave_pnp_remove,
/* FIXME: suspend,resume */
};
diff --git a/sound/isa/msnd/msnd.h b/sound/isa/msnd/msnd.h
index a168ba3313a..dbac3a42347 100644
--- a/sound/isa/msnd/msnd.h
+++ b/sound/isa/msnd/msnd.h
@@ -303,6 +303,6 @@ int snd_msndmidi_new(struct snd_card *card, int device);
void snd_msndmidi_input_read(void *mpu);
void snd_msndmix_setup(struct snd_msnd *chip);
-int __devinit snd_msndmix_new(struct snd_card *card);
+int snd_msndmix_new(struct snd_card *card);
int snd_msndmix_force_recsrc(struct snd_msnd *chip, int recsrc);
#endif /* __MSND_H */
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 29cc8e162b0..ddabb406b14 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -78,7 +78,7 @@
# define LOGNAME "snd_msnd_pinnacle"
#endif
-static void __devinit set_default_audio_parameters(struct snd_msnd *chip)
+static void set_default_audio_parameters(struct snd_msnd *chip)
{
chip->play_sample_size = DEFSAMPLESIZE;
chip->play_sample_rate = DEFSAMPLERATE;
@@ -213,7 +213,7 @@ static int snd_msnd_reset_dsp(long io, unsigned char *info)
return -EIO;
}
-static int __devinit snd_msnd_probe(struct snd_card *card)
+static int snd_msnd_probe(struct snd_card *card)
{
struct snd_msnd *chip = card->private_data;
unsigned char info;
@@ -497,7 +497,7 @@ static int snd_msnd_send_dsp_cmd_chk(struct snd_msnd *chip, u8 cmd)
return snd_msnd_send_dsp_cmd(chip, cmd);
}
-static int __devinit snd_msnd_calibrate_adc(struct snd_msnd *chip, u16 srate)
+static int snd_msnd_calibrate_adc(struct snd_msnd *chip, u16 srate)
{
snd_printdd("snd_msnd_calibrate_adc(%i)\n", srate);
writew(srate, chip->SMA + SMA_wCalFreqAtoD);
@@ -535,7 +535,7 @@ static void snd_msnd_mpu401_close(struct snd_mpu401 *mpu)
static long mpu_io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
-static int __devinit snd_msnd_attach(struct snd_card *card)
+static int snd_msnd_attach(struct snd_card *card)
{
struct snd_msnd *chip = card->private_data;
int err;
@@ -634,7 +634,7 @@ err_release_region:
}
-static void __devexit snd_msnd_unload(struct snd_card *card)
+static void snd_msnd_unload(struct snd_card *card)
{
struct snd_msnd *chip = card->private_data;
@@ -649,7 +649,7 @@ static void __devexit snd_msnd_unload(struct snd_card *card)
/* Pinnacle/Fiji Logical Device Configuration */
-static int __devinit snd_msnd_write_cfg(int cfg, int reg, int value)
+static int snd_msnd_write_cfg(int cfg, int reg, int value)
{
outb(reg, cfg);
outb(value, cfg + 1);
@@ -660,7 +660,7 @@ static int __devinit snd_msnd_write_cfg(int cfg, int reg, int value)
return 0;
}
-static int __devinit snd_msnd_write_cfg_io0(int cfg, int num, u16 io)
+static int snd_msnd_write_cfg_io0(int cfg, int num, u16 io)
{
if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
return -EIO;
@@ -671,7 +671,7 @@ static int __devinit snd_msnd_write_cfg_io0(int cfg, int num, u16 io)
return 0;
}
-static int __devinit snd_msnd_write_cfg_io1(int cfg, int num, u16 io)
+static int snd_msnd_write_cfg_io1(int cfg, int num, u16 io)
{
if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
return -EIO;
@@ -682,7 +682,7 @@ static int __devinit snd_msnd_write_cfg_io1(int cfg, int num, u16 io)
return 0;
}
-static int __devinit snd_msnd_write_cfg_irq(int cfg, int num, u16 irq)
+static int snd_msnd_write_cfg_irq(int cfg, int num, u16 irq)
{
if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
return -EIO;
@@ -693,7 +693,7 @@ static int __devinit snd_msnd_write_cfg_irq(int cfg, int num, u16 irq)
return 0;
}
-static int __devinit snd_msnd_write_cfg_mem(int cfg, int num, int mem)
+static int snd_msnd_write_cfg_mem(int cfg, int num, int mem)
{
u16 wmem;
@@ -711,7 +711,7 @@ static int __devinit snd_msnd_write_cfg_mem(int cfg, int num, int mem)
return 0;
}
-static int __devinit snd_msnd_activate_logical(int cfg, int num)
+static int snd_msnd_activate_logical(int cfg, int num)
{
if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
return -EIO;
@@ -720,8 +720,8 @@ static int __devinit snd_msnd_activate_logical(int cfg, int num)
return 0;
}
-static int __devinit snd_msnd_write_cfg_logical(int cfg, int num, u16 io0,
- u16 io1, u16 irq, int mem)
+static int snd_msnd_write_cfg_logical(int cfg, int num, u16 io0,
+ u16 io1, u16 irq, int mem)
{
if (snd_msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
return -EIO;
@@ -738,7 +738,7 @@ static int __devinit snd_msnd_write_cfg_logical(int cfg, int num, u16 io0,
return 0;
}
-static int __devinit snd_msnd_pinnacle_cfg_reset(int cfg)
+static int snd_msnd_pinnacle_cfg_reset(int cfg)
{
int i;
@@ -818,7 +818,7 @@ module_param_array(joystick_io, long, NULL, S_IRUGO);
#endif
-static int __devinit snd_msnd_isa_match(struct device *pdev, unsigned int i)
+static int snd_msnd_isa_match(struct device *pdev, unsigned int i)
{
if (io[i] == SNDRV_AUTO_PORT)
return 0;
@@ -888,7 +888,7 @@ static int __devinit snd_msnd_isa_match(struct device *pdev, unsigned int i)
return 1;
}
-static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
+static int snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
{
int err;
struct snd_card *card;
@@ -1061,7 +1061,7 @@ cfg_error:
#endif
}
-static int __devexit snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
+static int snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
{
snd_msnd_unload(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
@@ -1073,7 +1073,7 @@ static int __devexit snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
static struct isa_driver snd_msnd_driver = {
.match = snd_msnd_isa_match,
.probe = snd_msnd_isa_probe,
- .remove = __devexit_p(snd_msnd_isa_remove),
+ .remove = snd_msnd_isa_remove,
/* FIXME: suspend, resume */
.driver = {
.name = DEV_NAME
@@ -1081,8 +1081,8 @@ static struct isa_driver snd_msnd_driver = {
};
#ifdef CONFIG_PNP
-static int __devinit snd_msnd_pnp_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_msnd_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int idx;
struct pnp_dev *pnp_dev;
@@ -1185,7 +1185,7 @@ _release_card:
return ret;
}
-static void __devexit snd_msnd_pnp_remove(struct pnp_card_link *pcard)
+static void snd_msnd_pnp_remove(struct pnp_card_link *pcard)
{
snd_msnd_unload(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -1207,7 +1207,7 @@ static struct pnp_card_driver msnd_pnpc_driver = {
.name = "msnd_pinnacle",
.id_table = msnd_pnpids,
.probe = snd_msnd_pnp_detect,
- .remove = __devexit_p(snd_msnd_pnp_remove),
+ .remove = snd_msnd_pnp_remove,
};
#endif /* CONFIG_PNP */
diff --git a/sound/isa/msnd/msnd_pinnacle_mixer.c b/sound/isa/msnd/msnd_pinnacle_mixer.c
index 1de59d44142..031dc69b747 100644
--- a/sound/isa/msnd/msnd_pinnacle_mixer.c
+++ b/sound/isa/msnd/msnd_pinnacle_mixer.c
@@ -302,7 +302,7 @@ DUMMY_VOLUME("Monitor", 0, MSND_MIXER_IMIX),
};
-int __devinit snd_msndmix_new(struct snd_card *card)
+int snd_msndmix_new(struct snd_card *card)
{
struct snd_msnd *chip = card->private_data;
unsigned int idx;
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index f6cc0b917ef..075777a6cf0 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -221,7 +221,7 @@ static void snd_opl3sa2_write(struct snd_opl3sa2 *chip, unsigned char reg, unsig
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
-static int __devinit snd_opl3sa2_detect(struct snd_card *card)
+static int snd_opl3sa2_detect(struct snd_card *card)
{
struct snd_opl3sa2 *chip = card->private_data;
unsigned long port;
@@ -496,7 +496,7 @@ static void snd_opl3sa2_master_free(struct snd_kcontrol *kcontrol)
chip->master_volume = NULL;
}
-static int __devinit snd_opl3sa2_mixer(struct snd_card *card)
+static int snd_opl3sa2_mixer(struct snd_card *card)
{
struct snd_opl3sa2 *chip = card->private_data;
struct snd_ctl_elem_id id1, id2;
@@ -596,8 +596,8 @@ static int snd_opl3sa2_resume(struct snd_card *card)
#endif /* CONFIG_PM */
#ifdef CONFIG_PNP
-static int __devinit snd_opl3sa2_pnp(int dev, struct snd_opl3sa2 *chip,
- struct pnp_dev *pdev)
+static int snd_opl3sa2_pnp(int dev, struct snd_opl3sa2 *chip,
+ struct pnp_dev *pdev)
{
if (pnp_activate_dev(pdev) < 0) {
snd_printk(KERN_ERR "PnP configure failure (out of resources?)\n");
@@ -647,7 +647,7 @@ static int snd_opl3sa2_card_new(int dev, struct snd_card **cardp)
return 0;
}
-static int __devinit snd_opl3sa2_probe(struct snd_card *card, int dev)
+static int snd_opl3sa2_probe(struct snd_card *card, int dev)
{
int xirq, xdma1, xdma2;
struct snd_opl3sa2 *chip;
@@ -721,8 +721,8 @@ static int __devinit snd_opl3sa2_probe(struct snd_card *card, int dev)
}
#ifdef CONFIG_PNP
-static int __devinit snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
- const struct pnp_device_id *id)
+static int snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
{
static int dev;
int err;
@@ -754,7 +754,7 @@ static int __devinit snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
return 0;
}
-static void __devexit snd_opl3sa2_pnp_remove(struct pnp_dev * pdev)
+static void snd_opl3sa2_pnp_remove(struct pnp_dev *pdev)
{
snd_card_free(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
@@ -775,15 +775,15 @@ static struct pnp_driver opl3sa2_pnp_driver = {
.name = "snd-opl3sa2-pnpbios",
.id_table = snd_opl3sa2_pnpbiosids,
.probe = snd_opl3sa2_pnp_detect,
- .remove = __devexit_p(snd_opl3sa2_pnp_remove),
+ .remove = snd_opl3sa2_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_opl3sa2_pnp_suspend,
.resume = snd_opl3sa2_pnp_resume,
#endif
};
-static int __devinit snd_opl3sa2_pnp_cdetect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *id)
+static int snd_opl3sa2_pnp_cdetect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *id)
{
static int dev;
struct pnp_dev *pdev;
@@ -820,7 +820,7 @@ static int __devinit snd_opl3sa2_pnp_cdetect(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_opl3sa2_pnp_cremove(struct pnp_card_link * pcard)
+static void snd_opl3sa2_pnp_cremove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -842,7 +842,7 @@ static struct pnp_card_driver opl3sa2_pnpc_driver = {
.name = "snd-opl3sa2-cpnp",
.id_table = snd_opl3sa2_pnpids,
.probe = snd_opl3sa2_pnp_cdetect,
- .remove = __devexit_p(snd_opl3sa2_pnp_cremove),
+ .remove = snd_opl3sa2_pnp_cremove,
#ifdef CONFIG_PM
.suspend = snd_opl3sa2_pnp_csuspend,
.resume = snd_opl3sa2_pnp_cresume,
@@ -850,8 +850,8 @@ static struct pnp_card_driver opl3sa2_pnpc_driver = {
};
#endif /* CONFIG_PNP */
-static int __devinit snd_opl3sa2_isa_match(struct device *pdev,
- unsigned int dev)
+static int snd_opl3sa2_isa_match(struct device *pdev,
+ unsigned int dev)
{
if (!enable[dev])
return 0;
@@ -878,8 +878,8 @@ static int __devinit snd_opl3sa2_isa_match(struct device *pdev,
return 1;
}
-static int __devinit snd_opl3sa2_isa_probe(struct device *pdev,
- unsigned int dev)
+static int snd_opl3sa2_isa_probe(struct device *pdev,
+ unsigned int dev)
{
struct snd_card *card;
int err;
@@ -896,8 +896,8 @@ static int __devinit snd_opl3sa2_isa_probe(struct device *pdev,
return 0;
}
-static int __devexit snd_opl3sa2_isa_remove(struct device *devptr,
- unsigned int dev)
+static int snd_opl3sa2_isa_remove(struct device *devptr,
+ unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -922,7 +922,7 @@ static int snd_opl3sa2_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_opl3sa2_isa_driver = {
.match = snd_opl3sa2_isa_match,
.probe = snd_opl3sa2_isa_probe,
- .remove = __devexit_p(snd_opl3sa2_isa_remove),
+ .remove = snd_opl3sa2_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_opl3sa2_isa_suspend,
.resume = snd_opl3sa2_isa_resume,
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index 4a7ff4e8985..c3da1df9371 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -587,7 +587,7 @@ static int snd_miro_put_double(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_miro_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_miro_controls[] = {
MIRO_DOUBLE("Master Playback Volume", 0, ACI_GET_MASTER, ACI_SET_MASTER),
MIRO_DOUBLE("Mic Playback Volume", 1, ACI_GET_MIC, ACI_SET_MIC),
MIRO_DOUBLE("Line Playback Volume", 1, ACI_GET_LINE, ACI_SET_LINE),
@@ -599,7 +599,7 @@ MIRO_DOUBLE("Aux Playback Volume", 2, ACI_GET_LINE2, ACI_SET_LINE2),
/* Equalizer with seven bands (only PCM20)
from -12dB up to +12dB on each band */
-static struct snd_kcontrol_new snd_miro_eq_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_miro_eq_controls[] = {
MIRO_DOUBLE("Tone Control - 28 Hz", 0, ACI_GET_EQ1, ACI_SET_EQ1),
MIRO_DOUBLE("Tone Control - 160 Hz", 0, ACI_GET_EQ2, ACI_SET_EQ2),
MIRO_DOUBLE("Tone Control - 400 Hz", 0, ACI_GET_EQ3, ACI_SET_EQ3),
@@ -609,15 +609,15 @@ MIRO_DOUBLE("Tone Control - 6.3 kHz", 0, ACI_GET_EQ6, ACI_SET_EQ6),
MIRO_DOUBLE("Tone Control - 16 kHz", 0, ACI_GET_EQ7, ACI_SET_EQ7),
};
-static struct snd_kcontrol_new snd_miro_radio_control[] __devinitdata = {
+static struct snd_kcontrol_new snd_miro_radio_control[] = {
MIRO_DOUBLE("Radio Playback Volume", 0, ACI_GET_LINE1, ACI_SET_LINE1),
};
-static struct snd_kcontrol_new snd_miro_line_control[] __devinitdata = {
+static struct snd_kcontrol_new snd_miro_line_control[] = {
MIRO_DOUBLE("Line Playback Volume", 2, ACI_GET_LINE1, ACI_SET_LINE1),
};
-static struct snd_kcontrol_new snd_miro_preamp_control[] __devinitdata = {
+static struct snd_kcontrol_new snd_miro_preamp_control[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Boost",
@@ -627,7 +627,7 @@ static struct snd_kcontrol_new snd_miro_preamp_control[] __devinitdata = {
.put = snd_miro_put_preamp,
}};
-static struct snd_kcontrol_new snd_miro_amp_control[] __devinitdata = {
+static struct snd_kcontrol_new snd_miro_amp_control[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Boost",
@@ -637,7 +637,7 @@ static struct snd_kcontrol_new snd_miro_amp_control[] __devinitdata = {
.put = snd_miro_put_amp,
}};
-static struct snd_kcontrol_new snd_miro_capture_control[] __devinitdata = {
+static struct snd_kcontrol_new snd_miro_capture_control[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Capture Switch",
@@ -647,7 +647,7 @@ static struct snd_kcontrol_new snd_miro_capture_control[] __devinitdata = {
.put = snd_miro_put_capture,
}};
-static unsigned char aci_init_values[][2] __devinitdata = {
+static unsigned char aci_init_values[][2] = {
{ ACI_SET_MUTE, 0x00 },
{ ACI_SET_POWERAMP, 0x00 },
{ ACI_SET_PREAMP, 0x00 },
@@ -670,7 +670,7 @@ static unsigned char aci_init_values[][2] __devinitdata = {
{ ACI_SET_MASTER + 1, 0x20 },
};
-static int __devinit snd_set_aci_init_values(struct snd_miro *miro)
+static int snd_set_aci_init_values(struct snd_miro *miro)
{
int idx, error;
struct snd_miro_aci *aci = miro->aci;
@@ -713,8 +713,8 @@ static int __devinit snd_set_aci_init_values(struct snd_miro *miro)
return 0;
}
-static int __devinit snd_miro_mixer(struct snd_card *card,
- struct snd_miro *miro)
+static int snd_miro_mixer(struct snd_card *card,
+ struct snd_miro *miro)
{
unsigned int idx;
int err;
@@ -771,8 +771,8 @@ static int __devinit snd_miro_mixer(struct snd_card *card,
return 0;
}
-static int __devinit snd_miro_init(struct snd_miro *chip,
- unsigned short hardware)
+static int snd_miro_init(struct snd_miro *chip,
+ unsigned short hardware)
{
static int opti9xx_mc_size[] = {7, 7, 10, 10, 2, 2, 2};
@@ -989,8 +989,8 @@ static void snd_miro_proc_read(struct snd_info_entry * entry,
snd_iprintf(buffer, " preamp : 0x%x\n", aci->aci_preamp);
}
-static void __devinit snd_miro_proc_init(struct snd_card *card,
- struct snd_miro *miro)
+static void snd_miro_proc_init(struct snd_card *card,
+ struct snd_miro *miro)
{
struct snd_info_entry *entry;
@@ -1002,7 +1002,7 @@ static void __devinit snd_miro_proc_init(struct snd_card *card,
* Init
*/
-static int __devinit snd_miro_configure(struct snd_miro *chip)
+static int snd_miro_configure(struct snd_miro *chip)
{
unsigned char wss_base_bits;
unsigned char irq_bits;
@@ -1162,7 +1162,7 @@ __skip_mpu:
return 0;
}
-static int __devinit snd_miro_opti_check(struct snd_miro *chip)
+static int snd_miro_opti_check(struct snd_miro *chip)
{
unsigned char value;
@@ -1182,8 +1182,8 @@ static int __devinit snd_miro_opti_check(struct snd_miro *chip)
return -ENODEV;
}
-static int __devinit snd_card_miro_detect(struct snd_card *card,
- struct snd_miro *chip)
+static int snd_card_miro_detect(struct snd_card *card,
+ struct snd_miro *chip)
{
int i, err;
@@ -1200,8 +1200,8 @@ static int __devinit snd_card_miro_detect(struct snd_card *card,
return -ENODEV;
}
-static int __devinit snd_card_miro_aci_detect(struct snd_card *card,
- struct snd_miro *miro)
+static int snd_card_miro_aci_detect(struct snd_card *card,
+ struct snd_miro *miro)
{
unsigned char regval;
int i;
@@ -1265,7 +1265,7 @@ static void snd_card_miro_free(struct snd_card *card)
release_and_free_resource(miro->res_mc_base);
}
-static int __devinit snd_miro_probe(struct snd_card *card)
+static int snd_miro_probe(struct snd_card *card)
{
int error;
struct snd_miro *miro = card->private_data;
@@ -1386,7 +1386,7 @@ static int __devinit snd_miro_probe(struct snd_card *card)
return snd_card_register(card);
}
-static int __devinit snd_miro_isa_match(struct device *devptr, unsigned int n)
+static int snd_miro_isa_match(struct device *devptr, unsigned int n)
{
#ifdef CONFIG_PNP
if (snd_miro_pnp_is_probed)
@@ -1397,7 +1397,7 @@ static int __devinit snd_miro_isa_match(struct device *devptr, unsigned int n)
return 1;
}
-static int __devinit snd_miro_isa_probe(struct device *devptr, unsigned int n)
+static int snd_miro_isa_probe(struct device *devptr, unsigned int n)
{
static long possible_ports[] = {0x530, 0xe80, 0xf40, 0x604, -1};
static long possible_mpu_ports[] = {0x330, 0x300, 0x310, 0x320, -1};
@@ -1491,8 +1491,8 @@ static int __devinit snd_miro_isa_probe(struct device *devptr, unsigned int n)
return 0;
}
-static int __devexit snd_miro_isa_remove(struct device *devptr,
- unsigned int dev)
+static int snd_miro_isa_remove(struct device *devptr,
+ unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -1504,7 +1504,7 @@ static int __devexit snd_miro_isa_remove(struct device *devptr,
static struct isa_driver snd_miro_driver = {
.match = snd_miro_isa_match,
.probe = snd_miro_isa_probe,
- .remove = __devexit_p(snd_miro_isa_remove),
+ .remove = snd_miro_isa_remove,
/* FIXME: suspend/resume */
.driver = {
.name = DEV_NAME
@@ -1513,9 +1513,9 @@ static struct isa_driver snd_miro_driver = {
#ifdef CONFIG_PNP
-static int __devinit snd_card_miro_pnp(struct snd_miro *chip,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *pid)
+static int snd_card_miro_pnp(struct snd_miro *chip,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *pid)
{
struct pnp_dev *pdev;
int err;
@@ -1574,8 +1574,8 @@ static int __devinit snd_card_miro_pnp(struct snd_miro *chip,
return 0;
}
-static int __devinit snd_miro_pnp_probe(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_miro_pnp_probe(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
struct snd_card *card;
int err;
@@ -1624,7 +1624,7 @@ static int __devinit snd_miro_pnp_probe(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_miro_pnp_remove(struct pnp_card_link * pcard)
+static void snd_miro_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -1636,7 +1636,7 @@ static struct pnp_card_driver miro_pnpc_driver = {
.name = "miro",
.id_table = snd_miro_pnpids,
.probe = snd_miro_pnp_probe,
- .remove = __devexit_p(snd_miro_pnp_remove),
+ .remove = snd_miro_pnp_remove,
};
#endif
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index 2899c9fd1ce..b41ed8661b2 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -186,8 +186,8 @@ static char * snd_opti9xx_names[] = {
"82C930", "82C931", "82C933"
};
-static int __devinit snd_opti9xx_init(struct snd_opti9xx *chip,
- unsigned short hardware)
+static int snd_opti9xx_init(struct snd_opti9xx *chip,
+ unsigned short hardware)
{
static int opti9xx_mc_size[] = {7, 7, 10, 10, 2, 2, 2};
@@ -593,7 +593,7 @@ WSS_DOUBLE_TLV("Aux Playback Volume", 0,
db_scale_4bit_12db_max),
};
-static int __devinit snd_opti93x_mixer(struct snd_wss *chip)
+static int snd_opti93x_mixer(struct snd_wss *chip)
{
struct snd_card *card;
unsigned int idx;
@@ -666,7 +666,7 @@ static irqreturn_t snd_opti93x_interrupt(int irq, void *dev_id)
#endif /* OPTi93X */
-static int __devinit snd_opti9xx_read_check(struct snd_opti9xx *chip)
+static int snd_opti9xx_read_check(struct snd_opti9xx *chip)
{
unsigned char value;
#ifdef OPTi93X
@@ -707,8 +707,8 @@ static int __devinit snd_opti9xx_read_check(struct snd_opti9xx *chip)
return -ENODEV;
}
-static int __devinit snd_card_opti9xx_detect(struct snd_card *card,
- struct snd_opti9xx *chip)
+static int snd_card_opti9xx_detect(struct snd_card *card,
+ struct snd_opti9xx *chip)
{
int i, err;
@@ -732,9 +732,9 @@ static int __devinit snd_card_opti9xx_detect(struct snd_card *card,
}
#ifdef CONFIG_PNP
-static int __devinit snd_card_opti9xx_pnp(struct snd_opti9xx *chip,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *pid)
+static int snd_card_opti9xx_pnp(struct snd_opti9xx *chip,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *pid)
{
struct pnp_dev *pdev;
int err;
@@ -817,7 +817,7 @@ static void snd_card_opti9xx_free(struct snd_card *card)
}
}
-static int __devinit snd_opti9xx_probe(struct snd_card *card)
+static int snd_opti9xx_probe(struct snd_card *card)
{
static long possible_ports[] = {0x530, 0xe80, 0xf40, 0x604, -1};
int error;
@@ -952,8 +952,8 @@ static int snd_opti9xx_card_new(struct snd_card **cardp)
return 0;
}
-static int __devinit snd_opti9xx_isa_match(struct device *devptr,
- unsigned int dev)
+static int snd_opti9xx_isa_match(struct device *devptr,
+ unsigned int dev)
{
#ifdef CONFIG_PNP
if (snd_opti9xx_pnp_is_probed)
@@ -964,8 +964,8 @@ static int __devinit snd_opti9xx_isa_match(struct device *devptr,
return 1;
}
-static int __devinit snd_opti9xx_isa_probe(struct device *devptr,
- unsigned int dev)
+static int snd_opti9xx_isa_probe(struct device *devptr,
+ unsigned int dev)
{
struct snd_card *card;
int error;
@@ -1031,8 +1031,8 @@ static int __devinit snd_opti9xx_isa_probe(struct device *devptr,
return 0;
}
-static int __devexit snd_opti9xx_isa_remove(struct device *devptr,
- unsigned int dev)
+static int snd_opti9xx_isa_remove(struct device *devptr,
+ unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -1083,7 +1083,7 @@ static int snd_opti9xx_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_opti9xx_driver = {
.match = snd_opti9xx_isa_match,
.probe = snd_opti9xx_isa_probe,
- .remove = __devexit_p(snd_opti9xx_isa_remove),
+ .remove = snd_opti9xx_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_opti9xx_isa_suspend,
.resume = snd_opti9xx_isa_resume,
@@ -1094,8 +1094,8 @@ static struct isa_driver snd_opti9xx_driver = {
};
#ifdef CONFIG_PNP
-static int __devinit snd_opti9xx_pnp_probe(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_opti9xx_pnp_probe(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
struct snd_card *card;
int error, hw;
@@ -1146,7 +1146,7 @@ static int __devinit snd_opti9xx_pnp_probe(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_opti9xx_pnp_remove(struct pnp_card_link * pcard)
+static void snd_opti9xx_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -1171,7 +1171,7 @@ static struct pnp_card_driver opti9xx_pnpc_driver = {
.name = "opti9xx",
.id_table = snd_opti9xx_pnpids,
.probe = snd_opti9xx_pnp_probe,
- .remove = __devexit_p(snd_opti9xx_pnp_remove),
+ .remove = snd_opti9xx_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_opti9xx_pnp_suspend,
.resume = snd_opti9xx_pnp_resume,
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index 2aae6a0efbc..45fcdff611f 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -131,7 +131,7 @@ snd_emu8000_dma_chan(struct snd_emu8000 *emu, int ch, int mode)
/*
*/
-static void __devinit
+static void
snd_emu8000_read_wait(struct snd_emu8000 *emu)
{
while ((EMU8000_SMALR_READ(emu) & 0x80000000) != 0) {
@@ -143,7 +143,7 @@ snd_emu8000_read_wait(struct snd_emu8000 *emu)
/*
*/
-static void __devinit
+static void
snd_emu8000_write_wait(struct snd_emu8000 *emu)
{
while ((EMU8000_SMALW_READ(emu) & 0x80000000) != 0) {
@@ -156,7 +156,7 @@ snd_emu8000_write_wait(struct snd_emu8000 *emu)
/*
* detect a card at the given port
*/
-static int __devinit
+static int
snd_emu8000_detect(struct snd_emu8000 *emu)
{
/* Initialise */
@@ -182,7 +182,7 @@ snd_emu8000_detect(struct snd_emu8000 *emu)
/*
* intiailize audio channels
*/
-static void __devinit
+static void
init_audio(struct snd_emu8000 *emu)
{
int ch;
@@ -223,7 +223,7 @@ init_audio(struct snd_emu8000 *emu)
/*
* initialize DMA address
*/
-static void __devinit
+static void
init_dma(struct snd_emu8000 *emu)
{
EMU8000_SMALR_WRITE(emu, 0);
@@ -235,7 +235,7 @@ init_dma(struct snd_emu8000 *emu)
/*
* initialization arrays; from ADIP
*/
-static unsigned short init1[128] /*__devinitdata*/ = {
+static unsigned short init1[128] = {
0x03ff, 0x0030, 0x07ff, 0x0130, 0x0bff, 0x0230, 0x0fff, 0x0330,
0x13ff, 0x0430, 0x17ff, 0x0530, 0x1bff, 0x0630, 0x1fff, 0x0730,
0x23ff, 0x0830, 0x27ff, 0x0930, 0x2bff, 0x0a30, 0x2fff, 0x0b30,
@@ -257,7 +257,7 @@ static unsigned short init1[128] /*__devinitdata*/ = {
0xf3ff, 0x0c30, 0xf7ff, 0x0d30, 0xfbff, 0x0e30, 0xffff, 0x0f30,
};
-static unsigned short init2[128] /*__devinitdata*/ = {
+static unsigned short init2[128] = {
0x03ff, 0x8030, 0x07ff, 0x8130, 0x0bff, 0x8230, 0x0fff, 0x8330,
0x13ff, 0x8430, 0x17ff, 0x8530, 0x1bff, 0x8630, 0x1fff, 0x8730,
0x23ff, 0x8830, 0x27ff, 0x8930, 0x2bff, 0x8a30, 0x2fff, 0x8b30,
@@ -279,7 +279,7 @@ static unsigned short init2[128] /*__devinitdata*/ = {
0xf3ff, 0x8c30, 0xf7ff, 0x8d30, 0xfbff, 0x8e30, 0xffff, 0x8f30,
};
-static unsigned short init3[128] /*__devinitdata*/ = {
+static unsigned short init3[128] = {
0x0C10, 0x8470, 0x14FE, 0xB488, 0x167F, 0xA470, 0x18E7, 0x84B5,
0x1B6E, 0x842A, 0x1F1D, 0x852A, 0x0DA3, 0x8F7C, 0x167E, 0xF254,
0x0000, 0x842A, 0x0001, 0x852A, 0x18E6, 0x8BAA, 0x1B6D, 0xF234,
@@ -301,7 +301,7 @@ static unsigned short init3[128] /*__devinitdata*/ = {
0x1342, 0xD36E, 0x3EC7, 0xB3FF, 0x0000, 0x8365, 0x1420, 0x9570,
};
-static unsigned short init4[128] /*__devinitdata*/ = {
+static unsigned short init4[128] = {
0x0C10, 0x8470, 0x14FE, 0xB488, 0x167F, 0xA470, 0x18E7, 0x84B5,
0x1B6E, 0x842A, 0x1F1D, 0x852A, 0x0DA3, 0x0F7C, 0x167E, 0x7254,
0x0000, 0x842A, 0x0001, 0x852A, 0x18E6, 0x0BAA, 0x1B6D, 0x7234,
@@ -327,7 +327,7 @@ static unsigned short init4[128] /*__devinitdata*/ = {
* Taken from the oss driver, not obvious from the doc how this
* is meant to work
*/
-static void __devinit
+static void
send_array(struct snd_emu8000 *emu, unsigned short *data, int size)
{
int i;
@@ -349,7 +349,7 @@ send_array(struct snd_emu8000 *emu, unsigned short *data, int size)
* Send initialization arrays to start up, this just follows the
* initialisation sequence in the adip.
*/
-static void __devinit
+static void
init_arrays(struct snd_emu8000 *emu)
{
send_array(emu, init1, ARRAY_SIZE(init1)/4);
@@ -375,7 +375,7 @@ init_arrays(struct snd_emu8000 *emu)
* seems that the only way to do this is to use the one channel and keep
* reallocating between read and write.
*/
-static void __devinit
+static void
size_dram(struct snd_emu8000 *emu)
{
int i, size, detected_size;
@@ -512,7 +512,7 @@ snd_emu8000_init_fm(struct snd_emu8000 *emu)
/*
* The main initialization routine.
*/
-static void __devinit
+static void
snd_emu8000_init_hw(struct snd_emu8000 *emu)
{
int i;
@@ -1031,7 +1031,7 @@ static struct snd_kcontrol_new *mixer_defs[EMU8000_NUM_CONTROLS] = {
/*
* create and attach mixer elements for WaveTable treble/bass controls
*/
-static int __devinit
+static int
snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
{
int i, err = 0;
@@ -1082,7 +1082,7 @@ static int snd_emu8000_dev_free(struct snd_device *device)
/*
* initialize and register emu8000 synth device.
*/
-int __devinit
+int
snd_emu8000_new(struct snd_card *card, int index, long port, int seq_ports,
struct snd_seq_device **awe_ret)
{
diff --git a/sound/isa/sb/jazz16.c b/sound/isa/sb/jazz16.c
index 410758c6809..4961da4e627 100644
--- a/sound/isa/sb/jazz16.c
+++ b/sound/isa/sb/jazz16.c
@@ -78,8 +78,8 @@ static irqreturn_t jazz16_interrupt(int irq, void *chip)
return snd_sb8dsp_interrupt(chip);
}
-static int __devinit jazz16_configure_ports(unsigned long port,
- unsigned long mpu_port, int idx)
+static int jazz16_configure_ports(unsigned long port,
+ unsigned long mpu_port, int idx)
{
unsigned char val;
@@ -99,8 +99,8 @@ static int __devinit jazz16_configure_ports(unsigned long port,
return 0;
}
-static int __devinit jazz16_detect_board(unsigned long port,
- unsigned long mpu_port)
+static int jazz16_detect_board(unsigned long port,
+ unsigned long mpu_port)
{
int err;
int val;
@@ -156,7 +156,7 @@ err_unmap:
return err;
}
-static int __devinit jazz16_configure_board(struct snd_sb *chip, int mpu_irq)
+static int jazz16_configure_board(struct snd_sb *chip, int mpu_irq)
{
static unsigned char jazz_irq_bits[] = { 0, 0, 2, 3, 0, 1, 0, 4,
0, 2, 5, 0, 0, 0, 0, 6 };
@@ -183,7 +183,7 @@ static int __devinit jazz16_configure_board(struct snd_sb *chip, int mpu_irq)
return 0;
}
-static int __devinit snd_jazz16_match(struct device *devptr, unsigned int dev)
+static int snd_jazz16_match(struct device *devptr, unsigned int dev)
{
if (!enable[dev])
return 0;
@@ -218,7 +218,7 @@ static int __devinit snd_jazz16_match(struct device *devptr, unsigned int dev)
return 1;
}
-static int __devinit snd_jazz16_probe(struct device *devptr, unsigned int dev)
+static int snd_jazz16_probe(struct device *devptr, unsigned int dev)
{
struct snd_card *card;
struct snd_card_jazz16 *jazz16;
@@ -341,7 +341,7 @@ err_free:
return err;
}
-static int __devexit snd_jazz16_remove(struct device *devptr, unsigned int dev)
+static int snd_jazz16_remove(struct device *devptr, unsigned int dev)
{
struct snd_card *card = dev_get_drvdata(devptr);
@@ -380,7 +380,7 @@ static int snd_jazz16_resume(struct device *pdev, unsigned int n)
static struct isa_driver snd_jazz16_driver = {
.match = snd_jazz16_match,
.probe = snd_jazz16_probe,
- .remove = __devexit_p(snd_jazz16_remove),
+ .remove = snd_jazz16_remove,
#ifdef CONFIG_PM
.suspend = snd_jazz16_suspend,
.resume = snd_jazz16_resume,
diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c
index 39b8eca1521..50dbec454f9 100644
--- a/sound/isa/sb/sb16.c
+++ b/sound/isa/sb/sb16.c
@@ -250,9 +250,9 @@ MODULE_DEVICE_TABLE(pnp_card, snd_sb16_pnpids);
#ifdef CONFIG_PNP
-static int __devinit snd_card_sb16_pnp(int dev, struct snd_card_sb16 *acard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *id)
+static int snd_card_sb16_pnp(int dev, struct snd_card_sb16 *acard,
+ struct pnp_card_link *card,
+ const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
@@ -337,7 +337,7 @@ static int snd_sb16_card_new(int dev, struct snd_card **cardp)
return 0;
}
-static int __devinit snd_sb16_probe(struct snd_card *card, int dev)
+static int snd_sb16_probe(struct snd_card *card, int dev)
{
int xirq, xdma8, xdma16;
struct snd_sb *chip;
@@ -487,7 +487,7 @@ static int snd_sb16_resume(struct snd_card *card)
}
#endif
-static int __devinit snd_sb16_isa_probe1(int dev, struct device *pdev)
+static int snd_sb16_isa_probe1(int dev, struct device *pdev)
{
struct snd_card_sb16 *acard;
struct snd_card *card;
@@ -517,12 +517,12 @@ static int __devinit snd_sb16_isa_probe1(int dev, struct device *pdev)
}
-static int __devinit snd_sb16_isa_match(struct device *pdev, unsigned int dev)
+static int snd_sb16_isa_match(struct device *pdev, unsigned int dev)
{
return enable[dev] && !is_isapnp_selected(dev);
}
-static int __devinit snd_sb16_isa_probe(struct device *pdev, unsigned int dev)
+static int snd_sb16_isa_probe(struct device *pdev, unsigned int dev)
{
int err;
static int possible_irqs[] = {5, 9, 10, 7, -1};
@@ -563,7 +563,7 @@ static int __devinit snd_sb16_isa_probe(struct device *pdev, unsigned int dev)
}
}
-static int __devexit snd_sb16_isa_remove(struct device *pdev, unsigned int dev)
+static int snd_sb16_isa_remove(struct device *pdev, unsigned int dev)
{
snd_card_free(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
@@ -592,7 +592,7 @@ static int snd_sb16_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_sb16_isa_driver = {
.match = snd_sb16_isa_match,
.probe = snd_sb16_isa_probe,
- .remove = __devexit_p(snd_sb16_isa_remove),
+ .remove = snd_sb16_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_sb16_isa_suspend,
.resume = snd_sb16_isa_resume,
@@ -604,8 +604,8 @@ static struct isa_driver snd_sb16_isa_driver = {
#ifdef CONFIG_PNP
-static int __devinit snd_sb16_pnp_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_sb16_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
@@ -631,7 +631,7 @@ static int __devinit snd_sb16_pnp_detect(struct pnp_card_link *pcard,
return -ENODEV;
}
-static void __devexit snd_sb16_pnp_remove(struct pnp_card_link * pcard)
+static void snd_sb16_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -657,7 +657,7 @@ static struct pnp_card_driver sb16_pnpc_driver = {
#endif
.id_table = snd_sb16_pnpids,
.probe = snd_sb16_pnp_detect,
- .remove = __devexit_p(snd_sb16_pnp_remove),
+ .remove = snd_sb16_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_sb16_pnp_suspend,
.resume = snd_sb16_pnp_resume,
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index ab5cebea52e..237d964ff8a 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -79,7 +79,7 @@ static void snd_sb8_free(struct snd_card *card)
release_and_free_resource(acard->fm_res);
}
-static int __devinit snd_sb8_match(struct device *pdev, unsigned int dev)
+static int snd_sb8_match(struct device *pdev, unsigned int dev)
{
if (!enable[dev])
return 0;
@@ -94,7 +94,7 @@ static int __devinit snd_sb8_match(struct device *pdev, unsigned int dev)
return 1;
}
-static int __devinit snd_sb8_probe(struct device *pdev, unsigned int dev)
+static int snd_sb8_probe(struct device *pdev, unsigned int dev)
{
struct snd_sb *chip;
struct snd_card *card;
@@ -205,7 +205,7 @@ static int __devinit snd_sb8_probe(struct device *pdev, unsigned int dev)
return err;
}
-static int __devexit snd_sb8_remove(struct device *pdev, unsigned int dev)
+static int snd_sb8_remove(struct device *pdev, unsigned int dev)
{
snd_card_free(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
@@ -244,7 +244,7 @@ static int snd_sb8_resume(struct device *dev, unsigned int n)
static struct isa_driver snd_sb8_driver = {
.match = snd_sb8_match,
.probe = snd_sb8_probe,
- .remove = __devexit_p(snd_sb8_remove),
+ .remove = snd_sb8_remove,
#ifdef CONFIG_PM
.suspend = snd_sb8_suspend,
.resume = snd_sb8_resume,
diff --git a/sound/isa/sc6000.c b/sound/isa/sc6000.c
index d97d0f38181..5376ebff845 100644
--- a/sound/isa/sc6000.c
+++ b/sound/isa/sc6000.c
@@ -121,7 +121,7 @@ MODULE_PARM_DESC(joystick, "Enable gameport.");
/*
* sc6000_irq_to_softcfg - Decode irq number into cfg code.
*/
-static __devinit unsigned char sc6000_irq_to_softcfg(int irq)
+static unsigned char sc6000_irq_to_softcfg(int irq)
{
unsigned char val = 0;
@@ -150,7 +150,7 @@ static __devinit unsigned char sc6000_irq_to_softcfg(int irq)
/*
* sc6000_dma_to_softcfg - Decode dma number into cfg code.
*/
-static __devinit unsigned char sc6000_dma_to_softcfg(int dma)
+static unsigned char sc6000_dma_to_softcfg(int dma)
{
unsigned char val = 0;
@@ -173,7 +173,7 @@ static __devinit unsigned char sc6000_dma_to_softcfg(int dma)
/*
* sc6000_mpu_irq_to_softcfg - Decode MPU-401 irq number into cfg code.
*/
-static __devinit unsigned char sc6000_mpu_irq_to_softcfg(int mpu_irq)
+static unsigned char sc6000_mpu_irq_to_softcfg(int mpu_irq)
{
unsigned char val = 0;
@@ -242,8 +242,8 @@ static int sc6000_write(char __iomem *vport, int cmd)
return -EIO;
}
-static int __devinit sc6000_dsp_get_answer(char __iomem *vport, int command,
- char *data, int data_len)
+static int sc6000_dsp_get_answer(char __iomem *vport, int command,
+ char *data, int data_len)
{
int len = 0;
@@ -269,7 +269,7 @@ static int __devinit sc6000_dsp_get_answer(char __iomem *vport, int command,
return len ? len : -EIO;
}
-static int __devinit sc6000_dsp_reset(char __iomem *vport)
+static int sc6000_dsp_reset(char __iomem *vport)
{
iowrite8(1, vport + DSP_RESET);
udelay(10);
@@ -281,7 +281,7 @@ static int __devinit sc6000_dsp_reset(char __iomem *vport)
}
/* detection and initialization */
-static int __devinit sc6000_hw_cfg_write(char __iomem *vport, const int *cfg)
+static int sc6000_hw_cfg_write(char __iomem *vport, const int *cfg)
{
if (sc6000_write(vport, COMMAND_6C) < 0) {
snd_printk(KERN_WARNING "CMD 0x%x: failed!\n", COMMAND_6C);
@@ -345,8 +345,8 @@ static int sc6000_setup_board(char __iomem *vport, int config)
return 0;
}
-static int __devinit sc6000_init_mss(char __iomem *vport, int config,
- char __iomem *vmss_port, int mss_config)
+static int sc6000_init_mss(char __iomem *vport, int config,
+ char __iomem *vmss_port, int mss_config)
{
if (sc6000_write(vport, DSP_INIT_MSS)) {
snd_printk(KERN_ERR "sc6000_init_mss [0x%x]: failed!\n",
@@ -364,9 +364,9 @@ static int __devinit sc6000_init_mss(char __iomem *vport, int config,
return 0;
}
-static void __devinit sc6000_hw_cfg_encode(char __iomem *vport, int *cfg,
- long xport, long xmpu,
- long xmss_port, int joystick)
+static void sc6000_hw_cfg_encode(char __iomem *vport, int *cfg,
+ long xport, long xmpu,
+ long xmss_port, int joystick)
{
cfg[0] = 0;
cfg[1] = 0;
@@ -386,8 +386,8 @@ static void __devinit sc6000_hw_cfg_encode(char __iomem *vport, int *cfg,
snd_printd("hw cfg %x, %x\n", cfg[0], cfg[1]);
}
-static int __devinit sc6000_init_board(char __iomem *vport,
- char __iomem *vmss_port, int dev)
+static int sc6000_init_board(char __iomem *vport,
+ char __iomem *vmss_port, int dev)
{
char answer[15];
char version[2];
@@ -467,7 +467,7 @@ static int __devinit sc6000_init_board(char __iomem *vport,
return 0;
}
-static int __devinit snd_sc6000_mixer(struct snd_wss *chip)
+static int snd_sc6000_mixer(struct snd_wss *chip)
{
struct snd_card *card = chip->card;
struct snd_ctl_elem_id id1, id2;
@@ -502,7 +502,7 @@ static int __devinit snd_sc6000_mixer(struct snd_wss *chip)
return 0;
}
-static int __devinit snd_sc6000_match(struct device *devptr, unsigned int dev)
+static int snd_sc6000_match(struct device *devptr, unsigned int dev)
{
if (!enable[dev])
return 0;
@@ -545,7 +545,7 @@ static int __devinit snd_sc6000_match(struct device *devptr, unsigned int dev)
return 1;
}
-static int __devinit snd_sc6000_probe(struct device *devptr, unsigned int dev)
+static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
{
static int possible_irqs[] = { 5, 7, 9, 10, 11, -1 };
static int possible_dmas[] = { 1, 3, 0, -1 };
@@ -687,7 +687,7 @@ err_exit:
return err;
}
-static int __devexit snd_sc6000_remove(struct device *devptr, unsigned int dev)
+static int snd_sc6000_remove(struct device *devptr, unsigned int dev)
{
struct snd_card *card = dev_get_drvdata(devptr);
char __iomem **vport = card->private_data;
@@ -706,7 +706,7 @@ static int __devexit snd_sc6000_remove(struct device *devptr, unsigned int dev)
static struct isa_driver snd_sc6000_driver = {
.match = snd_sc6000_match,
.probe = snd_sc6000_probe,
- .remove = __devexit_p(snd_sc6000_remove),
+ .remove = snd_sc6000_remove,
/* FIXME: suspend/resume */
.driver = {
.name = DRV_NAME,
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 8490f59709b..42a009720b2 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -683,7 +683,7 @@ static struct snd_kcontrol_new midi_mixer_ctl = {
* These IRQs are encoded as bit patterns so that they can be
* written to the control registers.
*/
-static unsigned __devinit get_irq_config(int sscape_type, int irq)
+static unsigned get_irq_config(int sscape_type, int irq)
{
static const int valid_irq[] = { 9, 5, 7, 10 };
static const int old_irq[] = { 9, 7, 5, 15 };
@@ -706,7 +706,7 @@ static unsigned __devinit get_irq_config(int sscape_type, int irq)
* Perform certain arcane port-checks to see whether there
* is a SoundScape board lurking behind the given ports.
*/
-static int __devinit detect_sscape(struct soundscape *s, long wss_io)
+static int detect_sscape(struct soundscape *s, long wss_io)
{
unsigned long flags;
unsigned d;
@@ -817,8 +817,8 @@ static int mpu401_open(struct snd_mpu401 *mpu)
/*
* Initialse an MPU-401 subdevice for MIDI support on the SoundScape.
*/
-static int __devinit create_mpu401(struct snd_card *card, int devnum,
- unsigned long port, int irq)
+static int create_mpu401(struct snd_card *card, int devnum,
+ unsigned long port, int irq)
{
struct soundscape *sscape = get_card_soundscape(card);
struct snd_rawmidi *rawmidi;
@@ -845,8 +845,8 @@ static int __devinit create_mpu401(struct snd_card *card, int devnum,
* try to support at least some of the extra bits by overriding
* some of the CS4231 callback.
*/
-static int __devinit create_ad1845(struct snd_card *card, unsigned port,
- int irq, int dma1, int dma2)
+static int create_ad1845(struct snd_card *card, unsigned port,
+ int irq, int dma1, int dma2)
{
register struct soundscape *sscape = get_card_soundscape(card);
struct snd_wss *chip;
@@ -937,7 +937,7 @@ _error:
* Create an ALSA soundcard entry for the SoundScape, using
* the given list of port, IRQ and DMA resources.
*/
-static int __devinit create_sscape(int dev, struct snd_card *card)
+static int create_sscape(int dev, struct snd_card *card)
{
struct soundscape *sscape = get_card_soundscape(card);
unsigned dma_cfg;
@@ -1143,7 +1143,7 @@ _release_region:
}
-static int __devinit snd_sscape_match(struct device *pdev, unsigned int i)
+static int snd_sscape_match(struct device *pdev, unsigned int i)
{
/*
* Make sure we were given ALL of the other parameters.
@@ -1163,7 +1163,7 @@ static int __devinit snd_sscape_match(struct device *pdev, unsigned int i)
return 1;
}
-static int __devinit snd_sscape_probe(struct device *pdev, unsigned int dev)
+static int snd_sscape_probe(struct device *pdev, unsigned int dev)
{
struct snd_card *card;
struct soundscape *sscape;
@@ -1197,7 +1197,7 @@ _release_card:
return ret;
}
-static int __devexit snd_sscape_remove(struct device *devptr, unsigned int dev)
+static int snd_sscape_remove(struct device *devptr, unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -1209,7 +1209,7 @@ static int __devexit snd_sscape_remove(struct device *devptr, unsigned int dev)
static struct isa_driver snd_sscape_driver = {
.match = snd_sscape_match,
.probe = snd_sscape_probe,
- .remove = __devexit_p(snd_sscape_remove),
+ .remove = snd_sscape_remove,
/* FIXME: suspend/resume */
.driver = {
.name = DEV_NAME
@@ -1217,7 +1217,7 @@ static struct isa_driver snd_sscape_driver = {
};
#ifdef CONFIG_PNP
-static inline int __devinit get_next_autoindex(int i)
+static inline int get_next_autoindex(int i)
{
while (i < SNDRV_CARDS && port[i] != SNDRV_AUTO_PORT)
++i;
@@ -1225,8 +1225,8 @@ static inline int __devinit get_next_autoindex(int i)
}
-static int __devinit sscape_pnp_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int sscape_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int idx = 0;
struct pnp_dev *dev;
@@ -1310,7 +1310,7 @@ _release_card:
return ret;
}
-static void __devexit sscape_pnp_remove(struct pnp_card_link * pcard)
+static void sscape_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -1321,7 +1321,7 @@ static struct pnp_card_driver sscape_pnpc_driver = {
.name = "sscape",
.id_table = sscape_pnpids,
.probe = sscape_pnp_detect,
- .remove = __devexit_p(sscape_pnp_remove),
+ .remove = sscape_pnp_remove,
};
#endif /* CONFIG_PNP */
diff --git a/sound/isa/wavefront/wavefront.c b/sound/isa/wavefront/wavefront.c
index e0a73271cb9..fe5dd982bd2 100644
--- a/sound/isa/wavefront/wavefront.c
+++ b/sound/isa/wavefront/wavefront.c
@@ -98,7 +98,7 @@ static struct pnp_card_device_id snd_wavefront_pnpids[] = {
MODULE_DEVICE_TABLE(pnp_card, snd_wavefront_pnpids);
-static int __devinit
+static int
snd_wavefront_pnp (int dev, snd_wavefront_card_t *acard, struct pnp_card_link *card,
const struct pnp_card_device_id *id)
{
@@ -231,10 +231,9 @@ static irqreturn_t snd_wavefront_ics2115_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct snd_hwdep * __devinit
-snd_wavefront_new_synth (struct snd_card *card,
- int hw_dev,
- snd_wavefront_card_t *acard)
+static struct snd_hwdep *snd_wavefront_new_synth(struct snd_card *card,
+ int hw_dev,
+ snd_wavefront_card_t *acard)
{
struct snd_hwdep *wavefront_synth;
@@ -257,11 +256,10 @@ snd_wavefront_new_synth (struct snd_card *card,
return wavefront_synth;
}
-static struct snd_hwdep * __devinit
-snd_wavefront_new_fx (struct snd_card *card,
- int hw_dev,
- snd_wavefront_card_t *acard,
- unsigned long port)
+static struct snd_hwdep *snd_wavefront_new_fx(struct snd_card *card,
+ int hw_dev,
+ snd_wavefront_card_t *acard,
+ unsigned long port)
{
struct snd_hwdep *fx_processor;
@@ -284,12 +282,11 @@ snd_wavefront_new_fx (struct snd_card *card,
static snd_wavefront_mpu_id internal_id = internal_mpu;
static snd_wavefront_mpu_id external_id = external_mpu;
-static struct snd_rawmidi *__devinit
-snd_wavefront_new_midi (struct snd_card *card,
- int midi_dev,
- snd_wavefront_card_t *acard,
- unsigned long port,
- snd_wavefront_mpu_id mpu)
+static struct snd_rawmidi *snd_wavefront_new_midi(struct snd_card *card,
+ int midi_dev,
+ snd_wavefront_card_t *acard,
+ unsigned long port,
+ snd_wavefront_mpu_id mpu)
{
struct snd_rawmidi *rmidi;
@@ -361,7 +358,7 @@ static int snd_wavefront_card_new(int dev, struct snd_card **cardp)
return 0;
}
-static int __devinit
+static int
snd_wavefront_probe (struct snd_card *card, int dev)
{
snd_wavefront_card_t *acard = card->private_data;
@@ -541,8 +538,8 @@ snd_wavefront_probe (struct snd_card *card, int dev)
return snd_card_register(card);
}
-static int __devinit snd_wavefront_isa_match(struct device *pdev,
- unsigned int dev)
+static int snd_wavefront_isa_match(struct device *pdev,
+ unsigned int dev)
{
if (!enable[dev])
return 0;
@@ -561,8 +558,8 @@ static int __devinit snd_wavefront_isa_match(struct device *pdev,
return 1;
}
-static int __devinit snd_wavefront_isa_probe(struct device *pdev,
- unsigned int dev)
+static int snd_wavefront_isa_probe(struct device *pdev,
+ unsigned int dev)
{
struct snd_card *card;
int err;
@@ -580,8 +577,8 @@ static int __devinit snd_wavefront_isa_probe(struct device *pdev,
return 0;
}
-static int __devexit snd_wavefront_isa_remove(struct device *devptr,
- unsigned int dev)
+static int snd_wavefront_isa_remove(struct device *devptr,
+ unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
@@ -593,7 +590,7 @@ static int __devexit snd_wavefront_isa_remove(struct device *devptr,
static struct isa_driver snd_wavefront_driver = {
.match = snd_wavefront_isa_match,
.probe = snd_wavefront_isa_probe,
- .remove = __devexit_p(snd_wavefront_isa_remove),
+ .remove = snd_wavefront_isa_remove,
/* FIXME: suspend, resume */
.driver = {
.name = DEV_NAME
@@ -602,8 +599,8 @@ static struct isa_driver snd_wavefront_driver = {
#ifdef CONFIG_PNP
-static int __devinit snd_wavefront_pnp_detect(struct pnp_card_link *pcard,
- const struct pnp_card_device_id *pid)
+static int snd_wavefront_pnp_detect(struct pnp_card_link *pcard,
+ const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
@@ -637,7 +634,7 @@ static int __devinit snd_wavefront_pnp_detect(struct pnp_card_link *pcard,
return 0;
}
-static void __devexit snd_wavefront_pnp_remove(struct pnp_card_link * pcard)
+static void snd_wavefront_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
@@ -648,7 +645,7 @@ static struct pnp_card_driver wavefront_pnpc_driver = {
.name = "wavefront",
.id_table = snd_wavefront_pnpids,
.probe = snd_wavefront_pnp_detect,
- .remove = __devexit_p(snd_wavefront_pnp_remove),
+ .remove = snd_wavefront_pnp_remove,
/* FIXME: suspend,resume */
};
diff --git a/sound/isa/wavefront/wavefront_fx.c b/sound/isa/wavefront/wavefront_fx.c
index e51e0906050..b77883c7ee7 100644
--- a/sound/isa/wavefront/wavefront_fx.c
+++ b/sound/isa/wavefront/wavefront_fx.c
@@ -240,7 +240,7 @@ snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file,
that outputs it.
*/
-int __devinit
+int
snd_wavefront_fx_start (snd_wavefront_t *dev)
{
unsigned int i;
diff --git a/sound/isa/wavefront/wavefront_midi.c b/sound/isa/wavefront/wavefront_midi.c
index 65329f3abc3..7dc99168229 100644
--- a/sound/isa/wavefront/wavefront_midi.c
+++ b/sound/isa/wavefront/wavefront_midi.c
@@ -481,7 +481,7 @@ snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *card)
spin_unlock_irqrestore (&card->wavefront.midi.virtual, flags);
}
-int __devinit
+int
snd_wavefront_midi_start (snd_wavefront_card_t *card)
{
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index b1bf8d4e649..a2f87f9488e 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -1739,7 +1739,7 @@ snd_wavefront_internal_interrupt (snd_wavefront_card_t *card)
7 Unused
*/
-static int __devinit
+static int
snd_wavefront_interrupt_bits (int irq)
{
@@ -1767,7 +1767,7 @@ snd_wavefront_interrupt_bits (int irq)
return bits;
}
-static void __devinit
+static void
wavefront_should_cause_interrupt (snd_wavefront_t *dev,
int val, int port, unsigned long timeout)
@@ -1786,7 +1786,7 @@ wavefront_should_cause_interrupt (snd_wavefront_t *dev,
}
}
-static int __devinit
+static int
wavefront_reset_to_cleanliness (snd_wavefront_t *dev)
{
@@ -1937,7 +1937,7 @@ wavefront_reset_to_cleanliness (snd_wavefront_t *dev)
return (1);
}
-static int __devinit
+static int
wavefront_download_firmware (snd_wavefront_t *dev, char *path)
{
@@ -2010,7 +2010,7 @@ wavefront_download_firmware (snd_wavefront_t *dev, char *path)
}
-static int __devinit
+static int
wavefront_do_reset (snd_wavefront_t *dev)
{
@@ -2099,7 +2099,7 @@ wavefront_do_reset (snd_wavefront_t *dev)
return 1;
}
-int __devinit
+int
snd_wavefront_start (snd_wavefront_t *dev)
{
@@ -2141,7 +2141,7 @@ snd_wavefront_start (snd_wavefront_t *dev)
return (0);
}
-int __devinit
+int
snd_wavefront_detect (snd_wavefront_card_t *card)
{
diff --git a/sound/mips/au1x00.c b/sound/mips/au1x00.c
index 3f3ec0bec06..224f54be15a 100644
--- a/sound/mips/au1x00.c
+++ b/sound/mips/au1x00.c
@@ -439,7 +439,7 @@ static struct snd_pcm_ops snd_card_au1000_capture_ops = {
.pointer = snd_au1000_pointer,
};
-static int __devinit
+static int
snd_au1000_pcm_new(struct snd_au1000 *au1000)
{
struct snd_pcm *pcm;
@@ -552,7 +552,7 @@ get the interrupt driven case to work efficiently */
spin_unlock(&au1000->ac97_lock);
}
-static int __devinit
+static int
snd_au1000_ac97_new(struct snd_au1000 *au1000)
{
int err;
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 5f88d1f09ff..7420c59444a 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -260,7 +260,7 @@ static int hal2_gain_put(struct snd_kcontrol *kcontrol,
return old != new;
}
-static struct snd_kcontrol_new hal2_ctrl_headphone __devinitdata = {
+static struct snd_kcontrol_new hal2_ctrl_headphone = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -270,7 +270,7 @@ static struct snd_kcontrol_new hal2_ctrl_headphone __devinitdata = {
.put = hal2_gain_put,
};
-static struct snd_kcontrol_new hal2_ctrl_mic __devinitdata = {
+static struct snd_kcontrol_new hal2_ctrl_mic = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Capture Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -280,7 +280,7 @@ static struct snd_kcontrol_new hal2_ctrl_mic __devinitdata = {
.put = hal2_gain_put,
};
-static int __devinit hal2_mixer_create(struct snd_hal2 *hal2)
+static int hal2_mixer_create(struct snd_hal2 *hal2)
{
int err;
@@ -733,7 +733,7 @@ static struct snd_pcm_ops hal2_capture_ops = {
.ack = hal2_capture_ack,
};
-static int __devinit hal2_pcm_create(struct snd_hal2 *hal2)
+static int hal2_pcm_create(struct snd_hal2 *hal2)
{
struct snd_pcm *pcm;
int err;
@@ -874,7 +874,7 @@ static int hal2_create(struct snd_card *card, struct snd_hal2 **rchip)
return 0;
}
-static int __devinit hal2_probe(struct platform_device *pdev)
+static int hal2_probe(struct platform_device *pdev)
{
struct snd_card *card;
struct snd_hal2 *chip;
@@ -917,7 +917,7 @@ static int __devinit hal2_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit hal2_remove(struct platform_device *pdev)
+static int hal2_remove(struct platform_device *pdev)
{
struct snd_card *card = platform_get_drvdata(pdev);
@@ -928,7 +928,7 @@ static int __devexit hal2_remove(struct platform_device *pdev)
static struct platform_driver hal2_driver = {
.probe = hal2_probe,
- .remove = __devexit_p(hal2_remove),
+ .remove = hal2_remove,
.driver = {
.name = "sgihal2",
.owner = THIS_MODULE,
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index ceaa593ea4e..01a03efdc8b 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -237,7 +237,7 @@ static int sgio2audio_source_put(struct snd_kcontrol *kcontrol,
}
/* dac1/pcm0 mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_pcm0 __devinitdata = {
+static struct snd_kcontrol_new sgio2audio_ctrl_pcm0 = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.index = 0,
@@ -249,7 +249,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_pcm0 __devinitdata = {
};
/* dac2/pcm1 mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_pcm1 __devinitdata = {
+static struct snd_kcontrol_new sgio2audio_ctrl_pcm1 = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.index = 1,
@@ -261,7 +261,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_pcm1 __devinitdata = {
};
/* record level mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_reclevel __devinitdata = {
+static struct snd_kcontrol_new sgio2audio_ctrl_reclevel = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -272,7 +272,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_reclevel __devinitdata = {
};
/* record level source control */
-static struct snd_kcontrol_new sgio2audio_ctrl_recsource __devinitdata = {
+static struct snd_kcontrol_new sgio2audio_ctrl_recsource = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -282,7 +282,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_recsource __devinitdata = {
};
/* line mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_line __devinitdata = {
+static struct snd_kcontrol_new sgio2audio_ctrl_line = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Playback Volume",
.index = 0,
@@ -294,7 +294,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_line __devinitdata = {
};
/* cd mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_cd __devinitdata = {
+static struct snd_kcontrol_new sgio2audio_ctrl_cd = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Playback Volume",
.index = 1,
@@ -306,7 +306,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_cd __devinitdata = {
};
/* mic mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_mic __devinitdata = {
+static struct snd_kcontrol_new sgio2audio_ctrl_mic = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -317,7 +317,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_mic __devinitdata = {
};
-static int __devinit snd_sgio2audio_new_mixer(struct snd_sgio2audio *chip)
+static int snd_sgio2audio_new_mixer(struct snd_sgio2audio *chip)
{
int err;
@@ -726,7 +726,7 @@ static struct snd_pcm_ops snd_sgio2audio_capture_ops = {
*/
/* create a pcm device */
-static int __devinit snd_sgio2audio_new_pcm(struct snd_sgio2audio *chip)
+static int snd_sgio2audio_new_pcm(struct snd_sgio2audio *chip)
{
struct snd_pcm *pcm;
int err;
@@ -834,8 +834,8 @@ static struct snd_device_ops ops = {
.dev_free = snd_sgio2audio_dev_free,
};
-static int __devinit snd_sgio2audio_create(struct snd_card *card,
- struct snd_sgio2audio **rchip)
+static int snd_sgio2audio_create(struct snd_card *card,
+ struct snd_sgio2audio **rchip)
{
struct snd_sgio2audio *chip;
int i, err;
@@ -914,7 +914,7 @@ static int __devinit snd_sgio2audio_create(struct snd_card *card,
return 0;
}
-static int __devinit snd_sgio2audio_probe(struct platform_device *pdev)
+static int snd_sgio2audio_probe(struct platform_device *pdev)
{
struct snd_card *card;
struct snd_sgio2audio *chip;
@@ -958,7 +958,7 @@ static int __devinit snd_sgio2audio_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit snd_sgio2audio_remove(struct platform_device *pdev)
+static int snd_sgio2audio_remove(struct platform_device *pdev)
{
struct snd_card *card = platform_get_drvdata(pdev);
@@ -969,7 +969,7 @@ static int __devexit snd_sgio2audio_remove(struct platform_device *pdev)
static struct platform_driver sgio2audio_driver = {
.probe = snd_sgio2audio_probe,
- .remove = __devexit_p(snd_sgio2audio_remove),
+ .remove = snd_sgio2audio_remove,
.driver = {
.name = "sgio2audio",
.owner = THIS_MODULE,
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 98d23bdcaf2..4918b7145b7 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -2864,7 +2864,7 @@ static struct {
{NULL}
};
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
{ ISAPNP_VENDOR('C','M','I'), ISAPNP_DEVICE(0x0001),
ISAPNP_VENDOR('@','@','@'), ISAPNP_FUNCTION(0x0001), 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
diff --git a/sound/oss/kahlua.c b/sound/oss/kahlua.c
index 52d06a334e8..2a44cc10645 100644
--- a/sound/oss/kahlua.c
+++ b/sound/oss/kahlua.c
@@ -43,7 +43,7 @@
* not real hardware.
*/
-static u8 __devinit mixer_read(unsigned long io, u8 reg)
+static u8 mixer_read(unsigned long io, u8 reg)
{
outb(reg, io + 4);
udelay(20);
@@ -52,7 +52,7 @@ static u8 __devinit mixer_read(unsigned long io, u8 reg)
return reg;
}
-static int __devinit probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct address_info *hw_config;
unsigned long base;
@@ -183,7 +183,7 @@ err_out_free:
return 1;
}
-static void __devexit remove_one(struct pci_dev *pdev)
+static void remove_one(struct pci_dev *pdev)
{
struct address_info *hw_config = pci_get_drvdata(pdev);
sb_dsp_unload(hw_config, 0);
@@ -210,7 +210,7 @@ static struct pci_driver kahlua_driver = {
.name = "kahlua",
.id_table = id_tbl,
.probe = probe_one,
- .remove = __devexit_p(remove_one),
+ .remove = remove_one,
};
@@ -220,7 +220,7 @@ static int __init kahlua_init_module(void)
return pci_register_driver(&kahlua_driver);
}
-static void __devexit kahlua_cleanup_module(void)
+static void kahlua_cleanup_module(void)
{
pci_unregister_driver(&kahlua_driver);
}
diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
index b2b3c014221..048439a1600 100644
--- a/sound/oss/sb_audio.c
+++ b/sound/oss/sb_audio.c
@@ -442,7 +442,7 @@ static int sb201_audio_set_speed(int dev, int speed)
{
sb_devc *devc = audio_devs[dev]->devc;
int tmp;
- int s = speed * devc->channels;
+ int s;
if (speed > 0)
{
@@ -452,6 +452,7 @@ static int sb201_audio_set_speed(int dev, int speed)
speed = 44100;
if (devc->opened & OPEN_READ && speed > 15000)
speed = 15000;
+ s = speed * devc->channels;
devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff;
tmp = 256 - devc->tconst;
speed = ((1000000 + tmp / 2) / tmp) / devc->channels;
diff --git a/sound/parisc/harmony.c b/sound/parisc/harmony.c
index f47f9e226b0..0e66ba48d45 100644
--- a/sound/parisc/harmony.c
+++ b/sound/parisc/harmony.c
@@ -856,7 +856,7 @@ static struct snd_kcontrol_new snd_harmony_controls[] = {
HARMONY_GAIN_HE_SHIFT, 1, 0),
};
-static void __devinit
+static void
snd_harmony_mixer_reset(struct snd_harmony *h)
{
harmony_mute(h);
@@ -865,7 +865,7 @@ snd_harmony_mixer_reset(struct snd_harmony *h)
harmony_unmute(h);
}
-static int __devinit
+static int
snd_harmony_mixer_init(struct snd_harmony *h)
{
struct snd_card *card;
@@ -915,7 +915,7 @@ snd_harmony_dev_free(struct snd_device *dev)
return snd_harmony_free(h);
}
-static int __devinit
+static int
snd_harmony_create(struct snd_card *card,
struct parisc_device *padev,
struct snd_harmony **rchip)
@@ -972,7 +972,7 @@ free_and_ret:
return err;
}
-static int __devinit
+static int
snd_harmony_probe(struct parisc_device *padev)
{
int err;
@@ -1012,7 +1012,7 @@ free_and_ret:
return err;
}
-static int __devexit
+static int
snd_harmony_remove(struct parisc_device *padev)
{
snd_card_free(parisc_get_drvdata(padev));
@@ -1024,7 +1024,7 @@ static struct parisc_driver snd_harmony_driver = {
.name = "harmony",
.id_table = snd_harmony_devtable,
.probe = snd_harmony_probe,
- .remove = __devexit_p(snd_harmony_remove),
+ .remove = snd_harmony_remove,
};
static int __init
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index f99fa251228..947cfb4eb30 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -572,6 +572,7 @@ source "sound/pci/hda/Kconfig"
config SND_HDSP
tristate "RME Hammerfall DSP Audio"
+ select FW_LOADER
select SND_HWDEP
select SND_RAWMIDI
select SND_PCM
@@ -630,7 +631,7 @@ config SND_ICE1724
AudioTrak Prodigy 192, 7.1 (HIFI/LT/XT), HD2; Hercules
Fortissimo IV; ESI Juli@; Pontis MS300; EGO-SYS WaveTerminal
192M; Albatron K8X800 Pro II; Chaintech ZNF3-150/250, 9CJS,
- AV-710; Shuttle SN25P.
+ AV-710; Shuttle SN25P; Philips PSC724 Ultimate Edge.
To compile this driver as a module, choose M here: the module
will be called snd-ice1724.
@@ -707,6 +708,7 @@ config SND_MAESTRO3_INPUT
config SND_MIXART
tristate "Digigram miXart"
+ select FW_LOADER
select SND_HWDEP
select SND_PCM
help
@@ -727,6 +729,7 @@ config SND_NM256
config SND_PCXHR
tristate "Digigram PCXHR"
+ select FW_LOADER
select SND_PCM
select SND_HWDEP
help
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index e672ff4df2d..ad8a3117393 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -624,7 +624,7 @@ snd_ad1889_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit
+static int
snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device, struct snd_pcm **rpcm)
{
int err;
@@ -747,7 +747,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
snd_iprintf(buffer, "Resampler samplerate: %u Hz\n", reg);
}
-static void __devinit
+static void
snd_ad1889_proc_init(struct snd_ad1889 *chip)
{
struct snd_info_entry *entry;
@@ -767,7 +767,7 @@ static struct ac97_quirk ac97_quirks[] = {
{ } /* terminator */
};
-static void __devinit
+static void
snd_ad1889_ac97_xinit(struct snd_ad1889 *chip)
{
u16 reg;
@@ -805,7 +805,7 @@ snd_ad1889_ac97_free(struct snd_ac97 *ac97)
chip->ac97 = NULL;
}
-static int __devinit
+static int
snd_ad1889_ac97_init(struct snd_ad1889 *chip, const char *quirk_override)
{
int err;
@@ -878,7 +878,7 @@ snd_ad1889_dev_free(struct snd_device *device)
return snd_ad1889_free(chip);
}
-static int __devinit
+static int
snd_ad1889_init(struct snd_ad1889 *chip)
{
ad1889_writew(chip, AD_DS_CCS, AD_DS_CCS_CLKEN); /* turn on clock */
@@ -892,7 +892,7 @@ snd_ad1889_init(struct snd_ad1889 *chip)
return 0;
}
-static int __devinit
+static int
snd_ad1889_create(struct snd_card *card,
struct pci_dev *pci,
struct snd_ad1889 **rchip)
@@ -978,7 +978,7 @@ free_and_ret:
return err;
}
-static int __devinit
+static int
snd_ad1889_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
@@ -1042,7 +1042,7 @@ free_and_ret:
return err;
}
-static void __devexit
+static void
snd_ad1889_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
@@ -1059,7 +1059,7 @@ static struct pci_driver ad1889_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ad1889_ids,
.probe = snd_ad1889_probe,
- .remove = __devexit_p(snd_ad1889_remove),
+ .remove = snd_ad1889_remove,
};
module_pci_driver(ad1889_pci_driver);
diff --git a/sound/pci/ak4531_codec.c b/sound/pci/ak4531_codec.c
index cadf7b962e3..3bf0dc53360 100644
--- a/sound/pci/ak4531_codec.c
+++ b/sound/pci/ak4531_codec.c
@@ -274,7 +274,7 @@ static const DECLARE_TLV_DB_SCALE(db_scale_master, -6200, 200, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_mono, -2800, 400, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_input, -5000, 200, 0);
-static struct snd_kcontrol_new snd_ak4531_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ak4531_controls[] = {
AK4531_DOUBLE_TLV("Master Playback Switch", 0,
AK4531_LMASTER, AK4531_RMASTER, 7, 7, 1, 1,
@@ -383,9 +383,9 @@ static u8 snd_ak4531_initial_map[0x19 + 1] = {
0x01 /* 19: Mic Amp Setup */
};
-int __devinit snd_ak4531_mixer(struct snd_card *card,
- struct snd_ak4531 *_ak4531,
- struct snd_ak4531 **rak4531)
+int snd_ak4531_mixer(struct snd_card *card,
+ struct snd_ak4531 *_ak4531,
+ struct snd_ak4531 **rak4531)
{
unsigned int idx;
int err;
@@ -483,7 +483,7 @@ static void snd_ak4531_proc_read(struct snd_info_entry *entry,
ak4531->regs[AK4531_MIC_GAIN] & 1 ? "+30dB" : "+0dB");
}
-static void __devinit
+static void
snd_ak4531_proc_init(struct snd_card *card, struct snd_ak4531 *ak4531)
{
struct snd_info_entry *entry;
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index c7e3c533316..136a393b70a 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1678,8 +1678,8 @@ static void snd_ali_pcm_free(struct snd_pcm *pcm)
}
-static int __devinit snd_ali_pcm(struct snd_ali * codec, int device,
- struct ali_pcm_description *desc)
+static int snd_ali_pcm(struct snd_ali *codec, int device,
+ struct ali_pcm_description *desc)
{
struct snd_pcm *pcm;
int err;
@@ -1727,7 +1727,7 @@ static struct ali_pcm_description ali_pcms[] = {
}
};
-static int __devinit snd_ali_build_pcms(struct snd_ali *codec)
+static int snd_ali_build_pcms(struct snd_ali *codec)
{
int i, err;
for (i = 0; i < codec->num_of_codecs && i < ARRAY_SIZE(ali_pcms); i++) {
@@ -1832,7 +1832,7 @@ static int snd_ali5451_spdif_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ali5451_mixer_spdif[] __devinitdata = {
+static struct snd_kcontrol_new snd_ali5451_mixer_spdif[] = {
/* spdif aplayback switch */
/* FIXME: "IEC958 Playback Switch" may conflict with one on ac97_codec */
ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH), 0, 0),
@@ -1842,7 +1842,7 @@ static struct snd_kcontrol_new snd_ali5451_mixer_spdif[] __devinitdata = {
ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, 2)
};
-static int __devinit snd_ali_mixer(struct snd_ali * codec)
+static int snd_ali_mixer(struct snd_ali *codec)
{
struct snd_ac97_template ac97;
unsigned int idx;
@@ -2079,14 +2079,14 @@ static void snd_ali_proc_read(struct snd_info_entry *entry,
snd_iprintf(buf, "%02x: %08x\n", i, inl(ALI_REG(codec, i)));
}
-static void __devinit snd_ali_proc_init(struct snd_ali *codec)
+static void snd_ali_proc_init(struct snd_ali *codec)
{
struct snd_info_entry *entry;
if (!snd_card_proc_new(codec->card, "ali5451", &entry))
snd_info_set_text_ops(entry, codec, snd_ali_proc_read);
}
-static int __devinit snd_ali_resources(struct snd_ali *codec)
+static int snd_ali_resources(struct snd_ali *codec)
{
int err;
@@ -2112,11 +2112,11 @@ static int snd_ali_dev_free(struct snd_device *device)
return 0;
}
-static int __devinit snd_ali_create(struct snd_card *card,
- struct pci_dev *pci,
- int pcm_streams,
- int spdif_support,
- struct snd_ali ** r_ali)
+static int snd_ali_create(struct snd_card *card,
+ struct pci_dev *pci,
+ int pcm_streams,
+ int spdif_support,
+ struct snd_ali **r_ali)
{
struct snd_ali *codec;
int i, err;
@@ -2246,8 +2246,8 @@ static int __devinit snd_ali_create(struct snd_card *card,
return 0;
}
-static int __devinit snd_ali_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_ali_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct snd_ali *codec;
@@ -2295,7 +2295,7 @@ static int __devinit snd_ali_probe(struct pci_dev *pci,
return err;
}
-static void __devexit snd_ali_remove(struct pci_dev *pci)
+static void snd_ali_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2305,7 +2305,7 @@ static struct pci_driver ali5451_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ali_ids,
.probe = snd_ali_probe,
- .remove = __devexit_p(snd_ali_remove),
+ .remove = snd_ali_remove,
.driver = {
.pm = ALI_PM_OPS,
},
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index 5af3cb6b0c1..864c4310366 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -278,7 +278,7 @@ static irqreturn_t snd_als300plus_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __devexit snd_als300_remove(struct pci_dev *pci)
+static void snd_als300_remove(struct pci_dev *pci)
{
snd_als300_dbgcallenter();
snd_card_free(pci_get_drvdata(pci));
@@ -622,7 +622,7 @@ static struct snd_pcm_ops snd_als300_capture_ops = {
.pointer = snd_als300_pointer,
};
-static int __devinit snd_als300_new_pcm(struct snd_als300 *chip)
+static int snd_als300_new_pcm(struct snd_als300 *chip)
{
struct snd_pcm *pcm;
int err;
@@ -683,9 +683,9 @@ static void snd_als300_init(struct snd_als300 *chip)
snd_als300_dbgcallleave();
}
-static int __devinit snd_als300_create(struct snd_card *card,
- struct pci_dev *pci, int chip_type,
- struct snd_als300 **rchip)
+static int snd_als300_create(struct snd_card *card,
+ struct pci_dev *pci, int chip_type,
+ struct snd_als300 **rchip)
{
struct snd_als300 *chip;
void *irq_handler;
@@ -815,7 +815,7 @@ static SIMPLE_DEV_PM_OPS(snd_als300_pm, snd_als300_suspend, snd_als300_resume);
#define SND_ALS300_PM_OPS NULL
#endif
-static int __devinit snd_als300_probe(struct pci_dev *pci,
+static int snd_als300_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
@@ -867,7 +867,7 @@ static struct pci_driver als300_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_als300_ids,
.probe = snd_als300_probe,
- .remove = __devexit_p(snd_als300_remove),
+ .remove = snd_als300_remove,
.driver = {
.pm = SND_ALS300_PM_OPS,
},
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index feb2a143683..61efda2a4d9 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -694,7 +694,7 @@ static struct snd_pcm_ops snd_als4000_capture_ops = {
.pointer = snd_als4000_capture_pointer
};
-static int __devinit snd_als4000_pcm(struct snd_sb *chip, int device)
+static int snd_als4000_pcm(struct snd_sb *chip, int device)
{
struct snd_pcm *pcm;
int err;
@@ -770,7 +770,7 @@ static void snd_als4000_configure(struct snd_sb *chip)
}
#ifdef SUPPORT_JOYSTICK
-static int __devinit snd_als4000_create_gameport(struct snd_card_als4000 *acard, int dev)
+static int snd_als4000_create_gameport(struct snd_card_als4000 *acard, int dev)
{
struct gameport *gp;
struct resource *r;
@@ -847,8 +847,8 @@ static void snd_card_als4000_free( struct snd_card *card )
pci_disable_device(acard->pci);
}
-static int __devinit snd_card_als4000_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_card_als4000_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -981,7 +981,7 @@ out:
return err;
}
-static void __devexit snd_card_als4000_remove(struct pci_dev *pci)
+static void snd_card_als4000_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1046,7 +1046,7 @@ static struct pci_driver als4000_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_als4000_ids,
.probe = snd_card_als4000_probe,
- .remove = __devexit_p(snd_card_als4000_remove),
+ .remove = snd_card_als4000_remove,
.driver = {
.pm = SND_ALS4000_PM_OPS,
},
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index eedc017c1cd..3536b076b52 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -1235,8 +1235,7 @@ static struct snd_pcm_ops snd_card_asihpi_capture_mmap_ops = {
.pointer = snd_card_asihpi_capture_pointer,
};
-static int __devinit snd_card_asihpi_pcm_new(
- struct snd_card_asihpi *asihpi, int device)
+static int snd_card_asihpi_pcm_new(struct snd_card_asihpi *asihpi, int device)
{
struct snd_pcm *pcm;
int err;
@@ -1497,8 +1496,8 @@ static int snd_asihpi_volume_mute_put(struct snd_kcontrol *kcontrol,
return change;
}
-static int __devinit snd_asihpi_volume_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_volume_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -1593,8 +1592,8 @@ static int snd_asihpi_level_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(db_scale_level, -1000, 100, 0);
-static int __devinit snd_asihpi_level_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_level_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -1715,8 +1714,8 @@ static int snd_asihpi_aesebu_rxstatus_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static int __devinit snd_asihpi_aesebu_rx_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_aesebu_rx_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -1753,8 +1752,8 @@ static int snd_asihpi_aesebu_tx_format_put(struct snd_kcontrol *kcontrol,
}
-static int __devinit snd_asihpi_aesebu_tx_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_aesebu_tx_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -1996,8 +1995,8 @@ static int snd_asihpi_tuner_freq_put(struct snd_kcontrol *kcontrol,
}
/* Tuner control group initializer */
-static int __devinit snd_asihpi_tuner_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_tuner_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -2100,8 +2099,8 @@ static int snd_asihpi_meter_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static int __devinit snd_asihpi_meter_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl, int subidx)
+static int snd_asihpi_meter_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl, int subidx)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -2214,8 +2213,8 @@ static int snd_asihpi_mux_put(struct snd_kcontrol *kcontrol,
}
-static int __devinit snd_asihpi_mux_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_mux_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -2303,8 +2302,8 @@ static int snd_asihpi_cmode_put(struct snd_kcontrol *kcontrol,
}
-static int __devinit snd_asihpi_cmode_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_cmode_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -2471,8 +2470,8 @@ static int snd_asihpi_clkrate_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
- struct hpi_control *hpi_ctl)
+static int snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
+ struct hpi_control *hpi_ctl)
{
struct snd_card *card = asihpi->card;
struct snd_kcontrol_new snd_control;
@@ -2548,7 +2547,7 @@ static int __devinit snd_asihpi_sampleclock_add(struct snd_card_asihpi *asihpi,
Mixer
------------------------------------------------------------*/
-static int __devinit snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
+static int snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi)
{
struct snd_card *card = asihpi->card;
unsigned int idx = 0;
@@ -2722,7 +2721,7 @@ snd_asihpi_proc_read(struct snd_info_entry *entry,
}
}
-static void __devinit snd_asihpi_proc_init(struct snd_card_asihpi *asihpi)
+static void snd_asihpi_proc_init(struct snd_card_asihpi *asihpi)
{
struct snd_info_entry *entry;
@@ -2764,8 +2763,8 @@ static int snd_asihpi_hpi_ioctl(struct snd_hwdep *hw, struct file *file,
/* results in /dev/snd/hwC#D0 file for each card with index #
also /proc/asound/hwdep will contain '#-00: asihpi (HPI) for each card'
*/
-static int __devinit snd_asihpi_hpi_new(struct snd_card_asihpi *asihpi,
- int device, struct snd_hwdep **rhwdep)
+static int snd_asihpi_hpi_new(struct snd_card_asihpi *asihpi,
+ int device, struct snd_hwdep **rhwdep)
{
struct snd_hwdep *hw;
int err;
@@ -2789,8 +2788,8 @@ static int __devinit snd_asihpi_hpi_new(struct snd_card_asihpi *asihpi,
/*------------------------------------------------------------
CARD
------------------------------------------------------------*/
-static int __devinit snd_asihpi_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int snd_asihpi_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
int err;
struct hpi_adapter *hpi;
@@ -2944,7 +2943,7 @@ __nodev:
}
-static void __devexit snd_asihpi_remove(struct pci_dev *pci_dev)
+static void snd_asihpi_remove(struct pci_dev *pci_dev)
{
struct hpi_adapter *hpi = pci_get_drvdata(pci_dev);
snd_card_free(hpi->snd_card);
@@ -2967,7 +2966,7 @@ static struct pci_driver driver = {
.name = KBUILD_MODNAME,
.id_table = asihpi_pci_tbl,
.probe = snd_asihpi_probe,
- .remove = __devexit_p(snd_asihpi_remove),
+ .remove = snd_asihpi_remove,
#ifdef CONFIG_PM_SLEEP
/* .suspend = snd_asihpi_suspend,
.resume = snd_asihpi_resume, */
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index 456a758f04f..ac916377001 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -49,14 +49,12 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
err = request_firmware(&firmware, fw_name, &dev->dev);
if (err || !firmware) {
- dev_printk(KERN_ERR, &dev->dev,
- "%d, request_firmware failed for %s\n", err,
- fw_name);
+ dev_err(&dev->dev, "%d, request_firmware failed for %s\n",
+ err, fw_name);
goto error1;
}
if (firmware->size < sizeof(header)) {
- dev_printk(KERN_ERR, &dev->dev, "Header size too small %s\n",
- fw_name);
+ dev_err(&dev->dev, "Header size too small %s\n", fw_name);
goto error2;
}
memcpy(&header, firmware->data, sizeof(header));
@@ -64,7 +62,7 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
if ((header.type != 0x45444F43) || /* "CODE" */
(header.adapter != adapter)
|| (header.size != firmware->size)) {
- dev_printk(KERN_ERR, &dev->dev,
+ dev_err(&dev->dev,
"Invalid firmware header size %d != file %zd\n",
header.size, firmware->size);
goto error2;
@@ -72,17 +70,15 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
if ((header.version >> 9) != (HPI_VER >> 9)) {
/* Consider even and subsequent odd minor versions to be compatible */
- dev_printk(KERN_ERR, &dev->dev,
- "Incompatible firmware version "
- "DSP image %X != Driver %X\n", header.version,
- HPI_VER);
+ dev_err(&dev->dev, "Incompatible firmware version DSP image %X != Driver %X\n",
+ header.version, HPI_VER);
goto error2;
}
if (header.version != HPI_VER) {
- dev_printk(KERN_INFO, &dev->dev,
- "Firmware: release version mismatch DSP image %X != Driver %X\n",
- header.version, HPI_VER);
+ dev_info(&dev->dev,
+ "Firmware: release version mismatch DSP image %X != Driver %X\n",
+ header.version, HPI_VER);
}
HPI_DEBUG_LOG(DEBUG, "dsp code %s opened\n", fw_name);
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 60915620556..ef5019fe519 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -307,8 +307,8 @@ out:
return err;
}
-int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+int asihpi_adapter_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
int idx, nm;
int adapter_index;
@@ -326,7 +326,7 @@ int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
pci_dev->subsystem_device, pci_dev->devfn);
if (pci_enable_device(pci_dev) < 0) {
- dev_printk(KERN_ERR, &pci_dev->dev,
+ dev_err(&pci_dev->dev,
"pci_enable_device failed, disabling device\n");
return -EIO;
}
@@ -398,9 +398,8 @@ int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
mutex_init(&adapters[adapter_index].mutex);
pci_set_drvdata(pci_dev, &adapters[adapter_index]);
- dev_printk(KERN_INFO, &pci_dev->dev,
- "probe succeeded for ASI%04X HPI index %d\n",
- adapter.adapter->type, adapter_index);
+ dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n",
+ adapter.adapter->type, adapter_index);
return 0;
@@ -421,7 +420,7 @@ err:
return -ENODEV;
}
-void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
+void asihpi_adapter_remove(struct pci_dev *pci_dev)
{
int idx;
struct hpi_message hm;
@@ -448,11 +447,11 @@ void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
if (1)
- dev_printk(KERN_INFO, &pci_dev->dev,
- "remove %04x:%04x,%04x:%04x,%04x," " HPI index %d.\n",
- pci_dev->vendor, pci_dev->device,
- pci_dev->subsystem_vendor, pci_dev->subsystem_device,
- pci_dev->devfn, pa->adapter->index);
+ dev_info(&pci_dev->dev,
+ "remove %04x:%04x,%04x:%04x,%04x, HPI index %d\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ pci_dev->devfn, pa->adapter->index);
memset(pa, 0, sizeof(*pa));
}
diff --git a/sound/pci/asihpi/hpioctl.h b/sound/pci/asihpi/hpioctl.h
index 2614aff672e..0d767e10ac4 100644
--- a/sound/pci/asihpi/hpioctl.h
+++ b/sound/pci/asihpi/hpioctl.h
@@ -19,9 +19,9 @@
Linux HPI ioctl, and shared module init functions
*******************************************************************************/
-int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id);
-void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev);
+int asihpi_adapter_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id);
+void asihpi_adapter_remove(struct pci_dev *pci_dev);
void __init asihpi_init(void);
void __exit asihpi_exit(void);
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 368df8b0853..a67743183aa 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -296,7 +296,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_atiixp_ids) = {
MODULE_DEVICE_TABLE(pci, snd_atiixp_ids);
-static struct snd_pci_quirk atiixp_quirks[] __devinitdata = {
+static struct snd_pci_quirk atiixp_quirks[] = {
SND_PCI_QUIRK(0x105b, 0x0c81, "Foxconn RC4107MA-RS2", 0),
SND_PCI_QUIRK(0x15bd, 0x3100, "DFI RS482", 0),
{ } /* terminator */
@@ -561,7 +561,7 @@ static int snd_atiixp_aclink_down(struct atiixp *chip)
ATI_REG_ISR_CODEC2_NOT_READY)
#define CODEC_CHECK_BITS (ALL_CODEC_NOT_READY|ATI_REG_ISR_NEW_FRAME)
-static int __devinit ac97_probing_bugs(struct pci_dev *pci)
+static int ac97_probing_bugs(struct pci_dev *pci)
{
const struct snd_pci_quirk *q;
@@ -575,7 +575,7 @@ static int __devinit ac97_probing_bugs(struct pci_dev *pci)
return -1;
}
-static int __devinit snd_atiixp_codec_detect(struct atiixp *chip)
+static int snd_atiixp_codec_detect(struct atiixp *chip)
{
int timeout;
@@ -1183,7 +1183,7 @@ static struct snd_pcm_ops snd_atiixp_spdif_ops = {
.pointer = snd_atiixp_pcm_pointer,
};
-static struct ac97_pcm atiixp_pcm_defs[] __devinitdata = {
+static struct ac97_pcm atiixp_pcm_defs[] = {
/* front PCM */
{
.exclusive = 1,
@@ -1247,7 +1247,7 @@ static struct atiixp_dma_ops snd_atiixp_spdif_dma_ops = {
};
-static int __devinit snd_atiixp_pcm_new(struct atiixp *chip)
+static int snd_atiixp_pcm_new(struct atiixp *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_chmap *chmap;
@@ -1390,7 +1390,7 @@ static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id)
* ac97 mixer section
*/
-static struct ac97_quirk ac97_quirks[] __devinitdata = {
+static struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x103c,
.subdevice = 0x006b,
@@ -1412,8 +1412,8 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
{ } /* terminator */
};
-static int __devinit snd_atiixp_mixer_new(struct atiixp *chip, int clock,
- const char *quirk_override)
+static int snd_atiixp_mixer_new(struct atiixp *chip, int clock,
+ const char *quirk_override)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
@@ -1560,7 +1560,7 @@ static void snd_atiixp_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "%02x: %08x\n", i, readl(chip->remap_addr + i));
}
-static void __devinit snd_atiixp_proc_init(struct atiixp *chip)
+static void snd_atiixp_proc_init(struct atiixp *chip)
{
struct snd_info_entry *entry;
@@ -1602,9 +1602,9 @@ static int snd_atiixp_dev_free(struct snd_device *device)
/*
* constructor for chip instance
*/
-static int __devinit snd_atiixp_create(struct snd_card *card,
- struct pci_dev *pci,
- struct atiixp **r_chip)
+static int snd_atiixp_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct atiixp **r_chip)
{
static struct snd_device_ops ops = {
.dev_free = snd_atiixp_dev_free,
@@ -1661,8 +1661,8 @@ static int __devinit snd_atiixp_create(struct snd_card *card,
}
-static int __devinit snd_atiixp_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_atiixp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct atiixp *chip;
@@ -1710,7 +1710,7 @@ static int __devinit snd_atiixp_probe(struct pci_dev *pci,
return err;
}
-static void __devexit snd_atiixp_remove(struct pci_dev *pci)
+static void snd_atiixp_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1720,7 +1720,7 @@ static struct pci_driver atiixp_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
- .remove = __devexit_p(snd_atiixp_remove),
+ .remove = snd_atiixp_remove,
.driver = {
.pm = SND_ATIIXP_PM_OPS,
},
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 6fc03d9f2cf..d0bec7ba3b0 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -988,7 +988,7 @@ static struct atiixp_dma_ops snd_atiixp_capture_dma_ops = {
.flush_dma = atiixp_in_flush_dma,
};
-static int __devinit snd_atiixp_pcm_new(struct atiixp_modem *chip)
+static int snd_atiixp_pcm_new(struct atiixp_modem *chip)
{
struct snd_pcm *pcm;
int err;
@@ -1061,7 +1061,7 @@ static irqreturn_t snd_atiixp_interrupt(int irq, void *dev_id)
* ac97 mixer section
*/
-static int __devinit snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
+static int snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
@@ -1186,7 +1186,7 @@ static void snd_atiixp_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "%02x: %08x\n", i, readl(chip->remap_addr + i));
}
-static void __devinit snd_atiixp_proc_init(struct atiixp_modem *chip)
+static void snd_atiixp_proc_init(struct atiixp_modem *chip)
{
struct snd_info_entry *entry;
@@ -1228,9 +1228,9 @@ static int snd_atiixp_dev_free(struct snd_device *device)
/*
* constructor for chip instance
*/
-static int __devinit snd_atiixp_create(struct snd_card *card,
- struct pci_dev *pci,
- struct atiixp_modem **r_chip)
+static int snd_atiixp_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct atiixp_modem **r_chip)
{
static struct snd_device_ops ops = {
.dev_free = snd_atiixp_dev_free,
@@ -1287,8 +1287,8 @@ static int __devinit snd_atiixp_create(struct snd_card *card,
}
-static int __devinit snd_atiixp_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_atiixp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct atiixp_modem *chip;
@@ -1331,7 +1331,7 @@ static int __devinit snd_atiixp_probe(struct pci_dev *pci,
return err;
}
-static void __devexit snd_atiixp_remove(struct pci_dev *pci)
+static void snd_atiixp_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1341,7 +1341,7 @@ static struct pci_driver atiixp_modem_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
- .remove = __devexit_p(snd_atiixp_remove),
+ .remove = snd_atiixp_remove,
.driver = {
.pm = SND_ATIIXP_PM_OPS,
},
diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
index ffc376f9f4e..b157e1fadd8 100644
--- a/sound/pci/au88x0/au88x0.c
+++ b/sound/pci/au88x0/au88x0.c
@@ -78,7 +78,7 @@ static void vortex_fix_agp_bridge(struct pci_dev *via)
}
}
-static void __devinit snd_vortex_workaround(struct pci_dev *vortex, int fix)
+static void snd_vortex_workaround(struct pci_dev *vortex, int fix)
{
struct pci_dev *via = NULL;
@@ -137,7 +137,7 @@ static int snd_vortex_dev_free(struct snd_device *device)
// chip-specific constructor
// (see "Management of Cards and Components")
-static int __devinit
+static int
snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
{
vortex_t *chip;
@@ -234,7 +234,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
}
// constructor -- see "Constructor" sub-section
-static int __devinit
+static int
snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
@@ -368,7 +368,7 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
}
// destructor -- see "Destructor" sub-section
-static void __devexit snd_vortex_remove(struct pci_dev *pci)
+static void snd_vortex_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -379,7 +379,7 @@ static struct pci_driver vortex_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_vortex_ids,
.probe = snd_vortex_probe,
- .remove = __devexit_p(snd_vortex_remove),
+ .remove = snd_vortex_remove,
};
module_pci_driver(vortex_driver);
diff --git a/sound/pci/au88x0/au88x0_a3d.c b/sound/pci/au88x0/au88x0_a3d.c
index 9ae8b3b1765..aad831acbb1 100644
--- a/sound/pci/au88x0/au88x0_a3d.c
+++ b/sound/pci/au88x0/au88x0_a3d.c
@@ -594,7 +594,7 @@ static int Vort3DRend_Initialize(vortex_t * v, unsigned short mode)
static int vortex_a3d_register_controls(vortex_t * vortex);
static void vortex_a3d_unregister_controls(vortex_t * vortex);
/* A3D base support init/shudown */
-static void __devinit vortex_Vort3D_enable(vortex_t * v)
+static void vortex_Vort3D_enable(vortex_t *v)
{
int i;
@@ -845,7 +845,7 @@ snd_vortex_a3d_filter_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new vortex_a3d_kcontrol __devinitdata = {
+static struct snd_kcontrol_new vortex_a3d_kcontrol = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Playback PCM advanced processing",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -855,7 +855,7 @@ static struct snd_kcontrol_new vortex_a3d_kcontrol __devinitdata = {
};
/* Control (un)registration. */
-static int __devinit vortex_a3d_register_controls(vortex_t * vortex)
+static int vortex_a3d_register_controls(vortex_t *vortex)
{
struct snd_kcontrol *kcontrol;
int err, i;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 525f881f040..ae59dbaa53d 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2461,7 +2461,12 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
#ifndef CHIP_AU8810
for (i = 0; i < NR_WT; i++) {
if (vortex->dma_wt[i].fifo_status == FIFO_START) {
- if (vortex_wtdma_bufshift(vortex, i)) ;
+ /* FIXME: we ignore the return value from
+ * vortex_wtdma_bufshift() below as the delta
+ * calculation seems not working for wavetable
+ * by some reason
+ */
+ vortex_wtdma_bufshift(vortex, i);
spin_unlock(&vortex->lock);
snd_pcm_period_elapsed(vortex->dma_wt[i].
substream);
@@ -2675,7 +2680,7 @@ static void vortex_spdif_init(vortex_t * vortex, int spdif_sr, int spdif_mode)
/* Initialization */
-static int __devinit vortex_core_init(vortex_t * vortex)
+static int vortex_core_init(vortex_t *vortex)
{
printk(KERN_INFO "Vortex: init.... ");
diff --git a/sound/pci/au88x0/au88x0_eq.c b/sound/pci/au88x0/au88x0_eq.c
index 278ed8189fc..e7220533ecf 100644
--- a/sound/pci/au88x0/au88x0_eq.c
+++ b/sound/pci/au88x0/au88x0_eq.c
@@ -757,7 +757,7 @@ snd_vortex_eqtoggle_put(struct snd_kcontrol *kcontrol,
return 1; /* Allways changes */
}
-static struct snd_kcontrol_new vortex_eqtoggle_kcontrol __devinitdata = {
+static struct snd_kcontrol_new vortex_eqtoggle_kcontrol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "EQ Enable",
.index = 0,
@@ -815,7 +815,7 @@ snd_vortex_eq_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucon
return changed;
}
-static struct snd_kcontrol_new vortex_eq_kcontrol __devinitdata = {
+static struct snd_kcontrol_new vortex_eq_kcontrol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = " .",
.index = 0,
@@ -854,7 +854,7 @@ snd_vortex_peaks_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *u
return 0;
}
-static struct snd_kcontrol_new vortex_levels_kcontrol __devinitdata = {
+static struct snd_kcontrol_new vortex_levels_kcontrol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "EQ Peaks",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
@@ -863,7 +863,7 @@ static struct snd_kcontrol_new vortex_levels_kcontrol __devinitdata = {
};
/* EQ band gain labels. */
-static char *EqBandLabels[10] __devinitdata = {
+static char *EqBandLabels[10] = {
"EQ0 31Hz\0",
"EQ1 63Hz\0",
"EQ2 125Hz\0",
@@ -877,7 +877,7 @@ static char *EqBandLabels[10] __devinitdata = {
};
/* ALSA driver entry points. Init and exit. */
-static int __devinit vortex_eq_init(vortex_t * vortex)
+static int vortex_eq_init(vortex_t *vortex)
{
struct snd_kcontrol *kcontrol;
int err, i;
diff --git a/sound/pci/au88x0/au88x0_game.c b/sound/pci/au88x0/au88x0_game.c
index 30a456700d8..280f86de223 100644
--- a/sound/pci/au88x0/au88x0_game.c
+++ b/sound/pci/au88x0/au88x0_game.c
@@ -92,7 +92,7 @@ static int vortex_game_open(struct gameport *gameport, int mode)
return 0;
}
-static int __devinit vortex_gameport_register(vortex_t * vortex)
+static int vortex_gameport_register(vortex_t *vortex)
{
struct gameport *gp;
diff --git a/sound/pci/au88x0/au88x0_mixer.c b/sound/pci/au88x0/au88x0_mixer.c
index fa13efbebda..a58298cfe7e 100644
--- a/sound/pci/au88x0/au88x0_mixer.c
+++ b/sound/pci/au88x0/au88x0_mixer.c
@@ -19,7 +19,7 @@ static int remove_ctl(struct snd_card *card, const char *name)
return snd_ctl_remove_id(card, &id);
}
-static int __devinit snd_vortex_mixer(vortex_t * vortex)
+static int snd_vortex_mixer(vortex_t *vortex)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
diff --git a/sound/pci/au88x0/au88x0_mpu401.c b/sound/pci/au88x0/au88x0_mpu401.c
index e6c6a0febb7..29e5945eef6 100644
--- a/sound/pci/au88x0/au88x0_mpu401.c
+++ b/sound/pci/au88x0/au88x0_mpu401.c
@@ -41,7 +41,7 @@
#define MPU401_ENTER_UART 0x3f
#define MPU401_ACK 0xfe
-static int __devinit snd_vortex_midi(vortex_t * vortex)
+static int snd_vortex_midi(vortex_t *vortex)
{
struct snd_rawmidi *rmidi;
int temp, mode;
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index b2405020284..a4184bb2776 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -516,7 +516,7 @@ static int snd_vortex_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_el
}
/* spdif controls */
-static struct snd_kcontrol_new snd_vortex_mixer_spdif[] __devinitdata = {
+static struct snd_kcontrol_new snd_vortex_mixer_spdif[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
@@ -598,7 +598,7 @@ static int snd_vortex_pcm_vol_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_MINMAX(vortex_pcm_vol_db_scale, -9600, 2400);
-static struct snd_kcontrol_new snd_vortex_pcm_vol __devinitdata = {
+static struct snd_kcontrol_new snd_vortex_pcm_vol = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "PCM Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -611,7 +611,7 @@ static struct snd_kcontrol_new snd_vortex_pcm_vol __devinitdata = {
};
/* create a pcm device */
-static int __devinit snd_vortex_new_pcm(vortex_t *chip, int idx, int nr)
+static int snd_vortex_new_pcm(vortex_t *chip, int idx, int nr)
{
struct snd_pcm *pcm;
struct snd_kcontrol *kctl;
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index 0f804741825..08e9a4702cb 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -113,11 +113,11 @@ struct aw2 {
* FUNCTION DECLARATIONS
********************************/
static int snd_aw2_dev_free(struct snd_device *device);
-static int __devinit snd_aw2_create(struct snd_card *card,
- struct pci_dev *pci, struct aw2 **rchip);
-static int __devinit snd_aw2_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id);
-static void __devexit snd_aw2_remove(struct pci_dev *pci);
+static int snd_aw2_create(struct snd_card *card,
+ struct pci_dev *pci, struct aw2 **rchip);
+static int snd_aw2_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id);
+static void snd_aw2_remove(struct pci_dev *pci);
static int snd_aw2_pcm_playback_open(struct snd_pcm_substream *substream);
static int snd_aw2_pcm_playback_close(struct snd_pcm_substream *substream);
static int snd_aw2_pcm_capture_open(struct snd_pcm_substream *substream);
@@ -135,7 +135,7 @@ static snd_pcm_uframes_t snd_aw2_pcm_pointer_playback(struct snd_pcm_substream
*substream);
static snd_pcm_uframes_t snd_aw2_pcm_pointer_capture(struct snd_pcm_substream
*substream);
-static int __devinit snd_aw2_new_pcm(struct aw2 *chip);
+static int snd_aw2_new_pcm(struct aw2 *chip);
static int snd_aw2_control_switch_capture_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
@@ -173,7 +173,7 @@ static struct pci_driver aw2_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_aw2_ids,
.probe = snd_aw2_probe,
- .remove = __devexit_p(snd_aw2_remove),
+ .remove = snd_aw2_remove,
};
module_pci_driver(aw2_driver);
@@ -202,7 +202,7 @@ static struct snd_pcm_ops snd_aw2_capture_ops = {
.pointer = snd_aw2_pcm_pointer_capture,
};
-static struct snd_kcontrol_new aw2_control __devinitdata = {
+static struct snd_kcontrol_new aw2_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Capture Route",
.index = 0,
@@ -242,8 +242,8 @@ static int snd_aw2_dev_free(struct snd_device *device)
}
/* chip-specific constructor */
-static int __devinit snd_aw2_create(struct snd_card *card,
- struct pci_dev *pci, struct aw2 **rchip)
+static int snd_aw2_create(struct snd_card *card,
+ struct pci_dev *pci, struct aw2 **rchip)
{
struct aw2 *chip;
int err;
@@ -332,8 +332,8 @@ static int __devinit snd_aw2_create(struct snd_card *card,
}
/* constructor */
-static int __devinit snd_aw2_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_aw2_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -389,7 +389,7 @@ static int __devinit snd_aw2_probe(struct pci_dev *pci,
}
/* destructor */
-static void __devexit snd_aw2_remove(struct pci_dev *pci)
+static void snd_aw2_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -591,7 +591,7 @@ static snd_pcm_uframes_t snd_aw2_pcm_pointer_capture(struct snd_pcm_substream
}
/* create a pcm device */
-static int __devinit snd_aw2_new_pcm(struct aw2 *chip)
+static int snd_aw2_new_pcm(struct aw2 *chip)
{
struct snd_pcm *pcm_playback_ana;
struct snd_pcm *pcm_playback_num;
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index c03b66b784a..1204a0fa336 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -817,7 +817,7 @@ snd_azf3328_mixer_ac97_write(struct snd_ac97 *ac97,
snd_azf3328_mixer_ac97_map_unsupported(reg_ac97, "write");
}
-static int __devinit
+static int
snd_azf3328_mixer_new(struct snd_azf3328 *chip)
{
struct snd_ac97_bus *bus;
@@ -1171,7 +1171,7 @@ snd_azf3328_put_mixer_enum(struct snd_kcontrol *kcontrol,
return (nreg != oreg);
}
-static struct snd_kcontrol_new snd_azf3328_mixer_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_azf3328_mixer_controls[] = {
AZF3328_MIXER_SWITCH("Master Playback Switch", IDX_MIXER_PLAY_MASTER, 15, 1),
AZF3328_MIXER_VOL_STEREO("Master Playback Volume", IDX_MIXER_PLAY_MASTER, 0x1f, 1),
AZF3328_MIXER_SWITCH("PCM Playback Switch", IDX_MIXER_WAVEOUT, 15, 1),
@@ -1229,7 +1229,7 @@ static struct snd_kcontrol_new snd_azf3328_mixer_controls[] __devinitdata = {
#endif
};
-static u16 __devinitdata snd_azf3328_init_values[][2] = {
+static u16 snd_azf3328_init_values[][2] = {
{ IDX_MIXER_PLAY_MASTER, MIXER_MUTE_MASK|0x1f1f },
{ IDX_MIXER_MODEMOUT, MIXER_MUTE_MASK|0x1f1f },
{ IDX_MIXER_BASSTREBLE, 0x0000 },
@@ -1245,7 +1245,7 @@ static u16 __devinitdata snd_azf3328_init_values[][2] = {
{ IDX_MIXER_REC_VOLUME, MIXER_MUTE_MASK|0x0707 },
};
-static int __devinit
+static int
snd_azf3328_mixer_new(struct snd_azf3328 *chip)
{
struct snd_card *card;
@@ -1899,7 +1899,7 @@ snd_azf3328_gameport_cooked_read(struct gameport *gameport,
return 0;
}
-static int __devinit
+static int
snd_azf3328_gameport(struct snd_azf3328 *chip, int dev)
{
struct gameport *gp;
@@ -2212,7 +2212,7 @@ static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
.pointer = snd_azf3328_pcm_pointer
};
-static int __devinit
+static int
snd_azf3328_pcm(struct snd_azf3328 *chip)
{
enum { AZF_PCMDEV_STD, AZF_PCMDEV_I2S_OUT, NUM_AZF_PCMDEVS }; /* pcm devices */
@@ -2344,7 +2344,7 @@ static struct snd_timer_hardware snd_azf3328_timer_hw = {
.precise_resolution = snd_azf3328_timer_precise_resolution,
};
-static int __devinit
+static int
snd_azf3328_timer(struct snd_azf3328 *chip, int device)
{
struct snd_timer *timer = NULL;
@@ -2489,7 +2489,7 @@ snd_azf3328_debug_show_ports(const struct snd_azf3328 *chip)
#endif /* DEBUG_MISC */
}
-static int __devinit
+static int
snd_azf3328_create(struct snd_card *card,
struct pci_dev *pci,
unsigned long device_type,
@@ -2615,7 +2615,7 @@ out:
return err;
}
-static int __devinit
+static int
snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
@@ -2720,7 +2720,7 @@ out:
return err;
}
-static void __devexit
+static void
snd_azf3328_remove(struct pci_dev *pci)
{
snd_azf3328_dbgcallenter();
@@ -2872,7 +2872,7 @@ static struct pci_driver azf3328_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_azf3328_ids,
.probe = snd_azf3328_probe,
- .remove = __devexit_p(snd_azf3328_remove),
+ .remove = snd_azf3328_remove,
.driver = {
.pm = SND_AZF3328_PM_OPS,
},
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index b6a95eeca09..cdd100dae85 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -164,7 +164,7 @@ struct snd_bt87x_board {
unsigned no_digital:1; /* No digital input */
};
-static __devinitdata struct snd_bt87x_board snd_bt87x_boards[] = {
+static struct snd_bt87x_board snd_bt87x_boards[] = {
[SND_BT87X_BOARD_UNKNOWN] = {
.dig_rate = 32000, /* just a guess */
},
@@ -696,7 +696,7 @@ static int snd_bt87x_dev_free(struct snd_device *device)
return snd_bt87x_free(chip);
}
-static int __devinit snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *name)
+static int snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *name)
{
int err;
struct snd_pcm *pcm;
@@ -714,9 +714,9 @@ static int __devinit snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *nam
ALIGN(255 * 4092, 1024));
}
-static int __devinit snd_bt87x_create(struct snd_card *card,
- struct pci_dev *pci,
- struct snd_bt87x **rchip)
+static int snd_bt87x_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct snd_bt87x **rchip)
{
struct snd_bt87x *chip;
int err;
@@ -822,7 +822,7 @@ MODULE_DEVICE_TABLE(pci, snd_bt87x_ids);
* (DVB cards use the audio function to transfer MPEG data) */
static struct {
unsigned short subvendor, subdevice;
-} blacklist[] __devinitdata = {
+} blacklist[] = {
{0x0071, 0x0101}, /* Nebula Electronics DigiTV */
{0x11bd, 0x001c}, /* Pinnacle PCTV Sat */
{0x11bd, 0x0026}, /* Pinnacle PCTV SAT CI */
@@ -837,7 +837,7 @@ static struct {
};
/* return the id of the card, or a negative value if it's blacklisted */
-static int __devinit snd_bt87x_detect_card(struct pci_dev *pci)
+static int snd_bt87x_detect_card(struct pci_dev *pci)
{
int i;
const struct pci_device_id *supported;
@@ -862,8 +862,8 @@ static int __devinit snd_bt87x_detect_card(struct pci_dev *pci)
return SND_BT87X_BOARD_UNKNOWN;
}
-static int __devinit snd_bt87x_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_bt87x_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -948,7 +948,7 @@ _error:
return err;
}
-static void __devexit snd_bt87x_remove(struct pci_dev *pci)
+static void snd_bt87x_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -966,7 +966,7 @@ static struct pci_driver bt87x_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_bt87x_ids,
.probe = snd_bt87x_probe,
- .remove = __devexit_p(snd_bt87x_remove),
+ .remove = snd_bt87x_remove,
};
module_pci_driver(bt87x_driver);
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 65c55910566..1610a570597 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1352,7 +1352,7 @@ static const struct snd_pcm_chmap_elem side_map[] = {
{ }
};
-static int __devinit snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
+static int snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
@@ -1650,7 +1650,7 @@ static void ca0106_stop_chip(struct snd_ca0106 *chip)
*/
}
-static int __devinit snd_ca0106_create(int dev, struct snd_card *card,
+static int snd_ca0106_create(int dev, struct snd_card *card,
struct pci_dev *pci,
struct snd_ca0106 **rchip)
{
@@ -1777,7 +1777,7 @@ static int ca0106_dev_id_port(void *dev_id)
return ((struct snd_ca0106 *)dev_id)->port;
}
-static int __devinit snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
+static int snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
{
struct snd_ca_midi *midi;
char *name;
@@ -1828,7 +1828,7 @@ static int __devinit snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int chann
}
-static int __devinit snd_ca0106_probe(struct pci_dev *pci,
+static int snd_ca0106_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
@@ -1893,7 +1893,7 @@ static int __devinit snd_ca0106_probe(struct pci_dev *pci,
return err;
}
-static void __devexit snd_ca0106_remove(struct pci_dev *pci)
+static void snd_ca0106_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1971,7 +1971,7 @@ static struct pci_driver ca0106_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ca0106_ids,
.probe = snd_ca0106_probe,
- .remove = __devexit_p(snd_ca0106_remove),
+ .remove = snd_ca0106_remove,
.driver = {
.pm = SND_CA0106_PM_OPS,
},
diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c
index 68eacf7002d..27de0de9001 100644
--- a/sound/pci/ca0106/ca0106_mixer.c
+++ b/sound/pci/ca0106/ca0106_mixer.c
@@ -325,7 +325,7 @@ static int snd_ca0106_capture_mic_line_in_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ca0106_capture_mic_line_in __devinitdata =
+static struct snd_kcontrol_new snd_ca0106_capture_mic_line_in =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Shared Mic/Line in Capture Switch",
@@ -334,7 +334,7 @@ static struct snd_kcontrol_new snd_ca0106_capture_mic_line_in __devinitdata =
.put = snd_ca0106_capture_mic_line_in_put
};
-static struct snd_kcontrol_new snd_ca0106_capture_line_in_side_out __devinitdata =
+static struct snd_kcontrol_new snd_ca0106_capture_line_in_side_out =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Shared Line in/Side out Capture Switch",
@@ -588,7 +588,7 @@ static int spi_mute_put(struct snd_kcontrol *kcontrol,
.private_value = ((chid) << 8) | (reg) \
}
-static struct snd_kcontrol_new snd_ca0106_volume_ctls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ca0106_volume_ctls[] = {
CA_VOLUME("Analog Front Playback Volume",
CONTROL_FRONT_CHANNEL, PLAYBACK_VOLUME2),
CA_VOLUME("Analog Rear Playback Volume",
@@ -669,7 +669,7 @@ static struct snd_kcontrol_new snd_ca0106_volume_ctls[] __devinitdata = {
.private_value = chid \
}
-static struct snd_kcontrol_new snd_ca0106_volume_i2c_adc_ctls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ca0106_volume_i2c_adc_ctls[] = {
I2C_VOLUME("Phone Capture Volume", 0),
I2C_VOLUME("Mic Capture Volume", 1),
I2C_VOLUME("Line in Capture Volume", 2),
@@ -691,7 +691,7 @@ static const int spi_dmute_bit[] = {
SPI_DMUTE4_BIT,
};
-static struct snd_kcontrol_new __devinit
+static struct snd_kcontrol_new
snd_ca0106_volume_spi_dac_ctl(struct snd_ca0106_details *details,
int channel_id)
{
@@ -735,7 +735,7 @@ snd_ca0106_volume_spi_dac_ctl(struct snd_ca0106_details *details,
return spi_switch;
}
-static int __devinit remove_ctl(struct snd_card *card, const char *name)
+static int remove_ctl(struct snd_card *card, const char *name)
{
struct snd_ctl_elem_id id;
memset(&id, 0, sizeof(id));
@@ -744,7 +744,7 @@ static int __devinit remove_ctl(struct snd_card *card, const char *name)
return snd_ctl_remove_id(card, &id);
}
-static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card, const char *name)
+static struct snd_kcontrol *ctl_find(struct snd_card *card, const char *name)
{
struct snd_ctl_elem_id sid;
memset(&sid, 0, sizeof(sid));
@@ -754,7 +754,7 @@ static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card, const char
return snd_ctl_find_id(card, &sid);
}
-static int __devinit rename_ctl(struct snd_card *card, const char *src, const char *dst)
+static int rename_ctl(struct snd_card *card, const char *src, const char *dst)
{
struct snd_kcontrol *kctl = ctl_find(card, src);
if (kctl) {
@@ -774,10 +774,10 @@ static int __devinit rename_ctl(struct snd_card *card, const char *src, const ch
} \
} while (0)
-static __devinitdata
+static
DECLARE_TLV_DB_SCALE(snd_ca0106_master_db_scale, -6375, 25, 1);
-static char *slave_vols[] __devinitdata = {
+static char *slave_vols[] = {
"Analog Front Playback Volume",
"Analog Rear Playback Volume",
"Analog Center/LFE Playback Volume",
@@ -790,7 +790,7 @@ static char *slave_vols[] __devinitdata = {
NULL
};
-static char *slave_sws[] __devinitdata = {
+static char *slave_sws[] = {
"Analog Front Playback Switch",
"Analog Rear Playback Switch",
"Analog Center/LFE Playback Switch",
@@ -799,7 +799,7 @@ static char *slave_sws[] __devinitdata = {
NULL
};
-static void __devinit add_slaves(struct snd_card *card,
+static void add_slaves(struct snd_card *card,
struct snd_kcontrol *master, char **list)
{
for (; *list; list++) {
@@ -809,7 +809,7 @@ static void __devinit add_slaves(struct snd_card *card,
}
}
-int __devinit snd_ca0106_mixer(struct snd_ca0106 *emu)
+int snd_ca0106_mixer(struct snd_ca0106 *emu)
{
int err;
struct snd_card *card = emu->card;
diff --git a/sound/pci/ca0106/ca0106_proc.c b/sound/pci/ca0106/ca0106_proc.c
index c694464b116..4f9c2821bb3 100644
--- a/sound/pci/ca0106/ca0106_proc.c
+++ b/sound/pci/ca0106/ca0106_proc.c
@@ -424,7 +424,7 @@ static void snd_ca0106_proc_i2c_write(struct snd_info_entry *entry,
}
}
-int __devinit snd_ca0106_proc_init(struct snd_ca0106 * emu)
+int snd_ca0106_proc_init(struct snd_ca0106 *emu)
{
struct snd_info_entry *entry;
diff --git a/sound/pci/ca0106/ca_midi.c b/sound/pci/ca0106/ca_midi.c
index c7885117da3..8bbdf265d11 100644
--- a/sound/pci/ca0106/ca_midi.c
+++ b/sound/pci/ca0106/ca_midi.c
@@ -286,7 +286,7 @@ static void ca_rmidi_free(struct snd_rawmidi *rmidi)
ca_midi_free(rmidi->private_data);
}
-int __devinit ca_midi_init(void *dev_id, struct snd_ca_midi *midi, int device, char *name)
+int ca_midi_init(void *dev_id, struct snd_ca_midi *midi, int device, char *name)
{
struct snd_rawmidi *rmidi;
int err;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 22122ff26e3..c617435db6e 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -1045,7 +1045,7 @@ static int snd_cmipci_spdif_default_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_cmipci_spdif_default __devinitdata =
+static struct snd_kcontrol_new snd_cmipci_spdif_default =
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
@@ -1072,7 +1072,7 @@ static int snd_cmipci_spdif_mask_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_cmipci_spdif_mask __devinitdata =
+static struct snd_kcontrol_new snd_cmipci_spdif_mask =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1119,7 +1119,7 @@ static int snd_cmipci_spdif_stream_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_cmipci_spdif_stream __devinitdata =
+static struct snd_kcontrol_new snd_cmipci_spdif_stream =
{
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1897,7 +1897,7 @@ static struct snd_pcm_ops snd_cmipci_capture_spdif_ops = {
/*
*/
-static int __devinit snd_cmipci_pcm_new(struct cmipci *cm, int device)
+static int snd_cmipci_pcm_new(struct cmipci *cm, int device)
{
struct snd_pcm *pcm;
int err;
@@ -1920,7 +1920,7 @@ static int __devinit snd_cmipci_pcm_new(struct cmipci *cm, int device)
return 0;
}
-static int __devinit snd_cmipci_pcm2_new(struct cmipci *cm, int device)
+static int snd_cmipci_pcm2_new(struct cmipci *cm, int device)
{
struct snd_pcm *pcm;
int err;
@@ -1942,7 +1942,7 @@ static int __devinit snd_cmipci_pcm2_new(struct cmipci *cm, int device)
return 0;
}
-static int __devinit snd_cmipci_pcm_spdif_new(struct cmipci *cm, int device)
+static int snd_cmipci_pcm_spdif_new(struct cmipci *cm, int device)
{
struct snd_pcm *pcm;
int err;
@@ -2290,7 +2290,7 @@ static int snd_cmipci_put_native_mixer_sensitive(struct snd_kcontrol *kcontrol,
}
-static struct snd_kcontrol_new snd_cmipci_mixers[] __devinitdata = {
+static struct snd_kcontrol_new snd_cmipci_mixers[] = {
CMIPCI_SB_VOL_STEREO("Master Playback Volume", SB_DSP4_MASTER_DEV, 3, 31),
CMIPCI_MIXER_SW_MONO("3D Control - Switch", CM_REG_MIXER1, CM_X3DEN_SHIFT, 0),
CMIPCI_SB_VOL_STEREO("PCM Playback Volume", SB_DSP4_PCM_DEV, 3, 31),
@@ -2601,7 +2601,7 @@ static int snd_cmipci_mic_in_mode_put(struct snd_kcontrol *kcontrol,
}
/* both for CM8338/8738 */
-static struct snd_kcontrol_new snd_cmipci_mixer_switches[] __devinitdata = {
+static struct snd_kcontrol_new snd_cmipci_mixer_switches[] = {
DEFINE_MIXER_SWITCH("Four Channel Mode", fourch),
{
.name = "Line-In Mode",
@@ -2613,11 +2613,11 @@ static struct snd_kcontrol_new snd_cmipci_mixer_switches[] __devinitdata = {
};
/* for non-multichannel chips */
-static struct snd_kcontrol_new snd_cmipci_nomulti_switch __devinitdata =
+static struct snd_kcontrol_new snd_cmipci_nomulti_switch =
DEFINE_MIXER_SWITCH("Exchange DAC", exchange_dac);
/* only for CM8738 */
-static struct snd_kcontrol_new snd_cmipci_8738_mixer_switches[] __devinitdata = {
+static struct snd_kcontrol_new snd_cmipci_8738_mixer_switches[] = {
#if 0 /* controlled in pcm device */
DEFINE_MIXER_SWITCH("IEC958 In Record", spdif_in),
DEFINE_MIXER_SWITCH("IEC958 Out", spdif_out),
@@ -2639,14 +2639,14 @@ static struct snd_kcontrol_new snd_cmipci_8738_mixer_switches[] __devinitdata =
};
/* only for model 033/037 */
-static struct snd_kcontrol_new snd_cmipci_old_mixer_switches[] __devinitdata = {
+static struct snd_kcontrol_new snd_cmipci_old_mixer_switches[] = {
DEFINE_MIXER_SWITCH("IEC958 Mix Analog", spdif_dac_out),
DEFINE_MIXER_SWITCH("IEC958 In Phase Inverse", spdi_phase),
DEFINE_MIXER_SWITCH("IEC958 In Select", spdif_in_sel1),
};
/* only for model 039 or later */
-static struct snd_kcontrol_new snd_cmipci_extra_mixer_switches[] __devinitdata = {
+static struct snd_kcontrol_new snd_cmipci_extra_mixer_switches[] = {
DEFINE_MIXER_SWITCH("IEC958 In Select", spdif_in_sel2),
DEFINE_MIXER_SWITCH("IEC958 In Phase Inverse", spdi_phase2),
{
@@ -2659,11 +2659,11 @@ static struct snd_kcontrol_new snd_cmipci_extra_mixer_switches[] __devinitdata =
};
/* card control switches */
-static struct snd_kcontrol_new snd_cmipci_modem_switch __devinitdata =
+static struct snd_kcontrol_new snd_cmipci_modem_switch =
DEFINE_CARD_SWITCH("Modem", modem);
-static int __devinit snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device)
+static int snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device)
{
struct snd_card *card;
struct snd_kcontrol_new *sw;
@@ -2791,7 +2791,7 @@ static void snd_cmipci_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "\n");
}
-static void __devinit snd_cmipci_proc_init(struct cmipci *cm)
+static void snd_cmipci_proc_init(struct cmipci *cm)
{
struct snd_info_entry *entry;
@@ -2817,7 +2817,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_cmipci_ids) = {
* check chip version and capabilities
* driver name is modified according to the chip model
*/
-static void __devinit query_chip(struct cmipci *cm)
+static void query_chip(struct cmipci *cm)
{
unsigned int detect;
@@ -2866,7 +2866,7 @@ static void __devinit query_chip(struct cmipci *cm)
}
#ifdef SUPPORT_JOYSTICK
-static int __devinit snd_cmipci_create_gameport(struct cmipci *cm, int dev)
+static int snd_cmipci_create_gameport(struct cmipci *cm, int dev)
{
static int ports[] = { 0x201, 0x200, 0 }; /* FIXME: majority is 0x201? */
struct gameport *gp;
@@ -2959,7 +2959,7 @@ static int snd_cmipci_dev_free(struct snd_device *device)
return snd_cmipci_free(cm);
}
-static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port)
+static int snd_cmipci_create_fm(struct cmipci *cm, long fm_port)
{
long iosynth;
unsigned int val;
@@ -3012,8 +3012,8 @@ static int __devinit snd_cmipci_create_fm(struct cmipci *cm, long fm_port)
return 0;
}
-static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
- int dev, struct cmipci **rcmipci)
+static int snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
+ int dev, struct cmipci **rcmipci)
{
struct cmipci *cm;
int err;
@@ -3265,8 +3265,8 @@ static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pc
MODULE_DEVICE_TABLE(pci, snd_cmipci_ids);
-static int __devinit snd_cmipci_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_cmipci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -3314,7 +3314,7 @@ static int __devinit snd_cmipci_probe(struct pci_dev *pci,
}
-static void __devexit snd_cmipci_remove(struct pci_dev *pci)
+static void snd_cmipci_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -3415,7 +3415,7 @@ static struct pci_driver cmipci_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cmipci_ids,
.probe = snd_cmipci_probe,
- .remove = __devexit_p(snd_cmipci_remove),
+ .remove = snd_cmipci_remove,
.driver = {
.pm = SND_CMIPCI_PM_OPS,
},
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 8e86ec0031f..6a869506994 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -969,8 +969,8 @@ static struct snd_pcm_ops snd_cs4281_capture_ops = {
.pointer = snd_cs4281_pointer,
};
-static int __devinit snd_cs4281_pcm(struct cs4281 * chip, int device,
- struct snd_pcm ** rpcm)
+static int snd_cs4281_pcm(struct cs4281 *chip, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1093,7 +1093,7 @@ static void snd_cs4281_mixer_free_ac97(struct snd_ac97 *ac97)
chip->ac97 = NULL;
}
-static int __devinit snd_cs4281_mixer(struct cs4281 * chip)
+static int snd_cs4281_mixer(struct cs4281 *chip)
{
struct snd_card *card = chip->card;
struct snd_ac97_template ac97;
@@ -1171,7 +1171,7 @@ static struct snd_info_entry_ops snd_cs4281_proc_ops_BA1 = {
.read = snd_cs4281_BA1_read,
};
-static void __devinit snd_cs4281_proc_init(struct cs4281 * chip)
+static void snd_cs4281_proc_init(struct cs4281 *chip)
{
struct snd_info_entry *entry;
@@ -1259,7 +1259,7 @@ static int snd_cs4281_gameport_open(struct gameport *gameport, int mode)
return 0;
}
-static int __devinit snd_cs4281_create_gameport(struct cs4281 *chip)
+static int snd_cs4281_create_gameport(struct cs4281 *chip)
{
struct gameport *gp;
@@ -1335,10 +1335,10 @@ static int snd_cs4281_dev_free(struct snd_device *device)
static int snd_cs4281_chip_init(struct cs4281 *chip); /* defined below */
-static int __devinit snd_cs4281_create(struct snd_card *card,
- struct pci_dev *pci,
- struct cs4281 ** rchip,
- int dual_codec)
+static int snd_cs4281_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct cs4281 **rchip,
+ int dual_codec)
{
struct cs4281 *chip;
unsigned int tmp;
@@ -1779,8 +1779,8 @@ static struct snd_rawmidi_ops snd_cs4281_midi_input =
.trigger = snd_cs4281_midi_input_trigger,
};
-static int __devinit snd_cs4281_midi(struct cs4281 * chip, int device,
- struct snd_rawmidi **rrawmidi)
+static int snd_cs4281_midi(struct cs4281 *chip, int device,
+ struct snd_rawmidi **rrawmidi)
{
struct snd_rawmidi *rmidi;
int err;
@@ -1901,8 +1901,8 @@ static void snd_cs4281_opl3_command(struct snd_opl3 *opl3, unsigned short cmd,
spin_unlock_irqrestore(&opl3->reg_lock, flags);
}
-static int __devinit snd_cs4281_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_cs4281_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1968,7 +1968,7 @@ static int __devinit snd_cs4281_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_cs4281_remove(struct pci_dev *pci)
+static void snd_cs4281_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2095,7 +2095,7 @@ static struct pci_driver cs4281_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cs4281_ids,
.probe = snd_cs4281_probe,
- .remove = __devexit_p(snd_cs4281_remove),
+ .remove = snd_cs4281_remove,
.driver = {
.pm = CS4281_PM_OPS,
},
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index 575bed0836f..6b0d8b50a30 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -73,8 +73,8 @@ static DEFINE_PCI_DEVICE_TABLE(snd_cs46xx_ids) = {
MODULE_DEVICE_TABLE(pci, snd_cs46xx_ids);
-static int __devinit snd_card_cs46xx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_card_cs46xx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -155,7 +155,7 @@ static int __devinit snd_card_cs46xx_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_card_cs46xx_remove(struct pci_dev *pci)
+static void snd_card_cs46xx_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -165,7 +165,7 @@ static struct pci_driver cs46xx_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cs46xx_ids,
.probe = snd_card_cs46xx_probe,
- .remove = __devexit_p(snd_card_cs46xx_remove),
+ .remove = snd_card_cs46xx_remove,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &snd_cs46xx_pm,
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index a2bb8c91ebe..1b66efd9b72 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1590,7 +1590,7 @@ static struct snd_pcm_ops snd_cs46xx_capture_indirect_ops = {
#define MAX_PLAYBACK_CHANNELS 1
#endif
-int __devinit snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm)
+int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1621,7 +1621,8 @@ int __devinit snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm
#ifdef CONFIG_SND_CS46XX_NEW_DSP
-int __devinit snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm)
+int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1650,7 +1651,8 @@ int __devinit snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device, struct sn
return 0;
}
-int __devinit snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm)
+int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1679,7 +1681,8 @@ int __devinit snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device, str
return 0;
}
-int __devinit snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm)
+int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -2092,7 +2095,7 @@ static int snd_cs46xx_spdif_stream_put(struct snd_kcontrol *kcontrol,
#endif /* CONFIG_SND_CS46XX_NEW_DSP */
-static struct snd_kcontrol_new snd_cs46xx_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_cs46xx_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DAC Volume",
@@ -2278,7 +2281,7 @@ static void snd_cs46xx_codec_reset (struct snd_ac97 * ac97)
}
#endif
-static int __devinit cs46xx_detect_codec(struct snd_cs46xx *chip, int codec)
+static int cs46xx_detect_codec(struct snd_cs46xx *chip, int codec)
{
int idx, err;
struct snd_ac97_template ac97;
@@ -2311,7 +2314,7 @@ static int __devinit cs46xx_detect_codec(struct snd_cs46xx *chip, int codec)
return -ENXIO;
}
-int __devinit snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device)
+int snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device)
{
struct snd_card *card = chip->card;
struct snd_ctl_elem_id id;
@@ -2531,7 +2534,7 @@ static struct snd_rawmidi_ops snd_cs46xx_midi_input =
.trigger = snd_cs46xx_midi_input_trigger,
};
-int __devinit snd_cs46xx_midi(struct snd_cs46xx *chip, int device, struct snd_rawmidi **rrawmidi)
+int snd_cs46xx_midi(struct snd_cs46xx *chip, int device, struct snd_rawmidi **rrawmidi)
{
struct snd_rawmidi *rmidi;
int err;
@@ -2613,7 +2616,7 @@ static int snd_cs46xx_gameport_open(struct gameport *gameport, int mode)
return 0;
}
-int __devinit snd_cs46xx_gameport(struct snd_cs46xx *chip)
+int snd_cs46xx_gameport(struct snd_cs46xx *chip)
{
struct gameport *gp;
@@ -2649,7 +2652,7 @@ static inline void snd_cs46xx_remove_gameport(struct snd_cs46xx *chip)
}
}
#else
-int __devinit snd_cs46xx_gameport(struct snd_cs46xx *chip) { return -ENOSYS; }
+int snd_cs46xx_gameport(struct snd_cs46xx *chip) { return -ENOSYS; }
static inline void snd_cs46xx_remove_gameport(struct snd_cs46xx *chip) { }
#endif /* CONFIG_GAMEPORT */
@@ -2674,7 +2677,7 @@ static struct snd_info_entry_ops snd_cs46xx_proc_io_ops = {
.read = snd_cs46xx_io_read,
};
-static int __devinit snd_cs46xx_proc_init(struct snd_card *card, struct snd_cs46xx *chip)
+static int snd_cs46xx_proc_init(struct snd_card *card, struct snd_cs46xx *chip)
{
struct snd_info_entry *entry;
int idx;
@@ -3061,7 +3064,7 @@ static void cs46xx_enable_stream_irqs(struct snd_cs46xx *chip)
snd_cs46xx_poke(chip, BA1_CIE, tmp); /* capture interrupt enable */
}
-int __devinit snd_cs46xx_start_dsp(struct snd_cs46xx *chip)
+int snd_cs46xx_start_dsp(struct snd_cs46xx *chip)
{
unsigned int tmp;
/*
@@ -3477,7 +3480,7 @@ struct cs_card_type
void (*mixer_init)(struct snd_cs46xx *);
};
-static struct cs_card_type __devinitdata cards[] = {
+static struct cs_card_type cards[] = {
{
.vendor = 0x1489,
.id = 0x7001,
@@ -3717,10 +3720,10 @@ SIMPLE_DEV_PM_OPS(snd_cs46xx_pm, snd_cs46xx_suspend, snd_cs46xx_resume);
/*
*/
-int __devinit snd_cs46xx_create(struct snd_card *card,
- struct pci_dev * pci,
+int snd_cs46xx_create(struct snd_card *card,
+ struct pci_dev *pci,
int external_amp, int thinkpad,
- struct snd_cs46xx ** rchip)
+ struct snd_cs46xx **rchip)
{
struct snd_cs46xx *chip;
int err, idx;
diff --git a/sound/pci/cs5530.c b/sound/pci/cs5530.c
index d1cca283157..dace827b45d 100644
--- a/sound/pci/cs5530.c
+++ b/sound/pci/cs5530.c
@@ -88,13 +88,13 @@ static int snd_cs5530_dev_free(struct snd_device *device)
return snd_cs5530_free(chip);
}
-static void __devexit snd_cs5530_remove(struct pci_dev *pci)
+static void snd_cs5530_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
-static u8 __devinit snd_cs5530_mixer_read(unsigned long io, u8 reg)
+static u8 snd_cs5530_mixer_read(unsigned long io, u8 reg)
{
outb(reg, io + 4);
udelay(20);
@@ -103,9 +103,9 @@ static u8 __devinit snd_cs5530_mixer_read(unsigned long io, u8 reg)
return reg;
}
-static int __devinit snd_cs5530_create(struct snd_card *card,
- struct pci_dev *pci,
- struct snd_cs5530 **rchip)
+static int snd_cs5530_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct snd_cs5530 **rchip)
{
struct snd_cs5530 *chip;
unsigned long sb_base;
@@ -250,8 +250,8 @@ static int __devinit snd_cs5530_create(struct snd_card *card,
return 0;
}
-static int __devinit snd_cs5530_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_cs5530_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -294,7 +294,7 @@ static struct pci_driver cs5530_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cs5530_ids,
.probe = snd_cs5530_probe,
- .remove = __devexit_p(snd_cs5530_remove),
+ .remove = snd_cs5530_remove,
};
module_pci_driver(cs5530_driver);
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 4915efa551f..7e4b13e2d12 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -43,7 +43,7 @@ static char *ac97_quirk;
module_param(ac97_quirk, charp, 0444);
MODULE_PARM_DESC(ac97_quirk, "AC'97 board specific workarounds.");
-static struct ac97_quirk ac97_quirks[] __devinitdata = {
+static struct ac97_quirk ac97_quirks[] = {
#if 0 /* Not yet confirmed if all 5536 boards are HP only */
{
.subvendor = PCI_VENDOR_ID_AMD,
@@ -144,7 +144,7 @@ static unsigned short snd_cs5535audio_ac97_codec_read(struct snd_ac97 *ac97,
return snd_cs5535audio_codec_read(cs5535au, reg);
}
-static int __devinit snd_cs5535audio_mixer(struct cs5535audio *cs5535au)
+static int snd_cs5535audio_mixer(struct cs5535audio *cs5535au)
{
struct snd_card *card = cs5535au->card;
struct snd_ac97_bus *pbus;
@@ -270,9 +270,9 @@ static int snd_cs5535audio_dev_free(struct snd_device *device)
return snd_cs5535audio_free(cs5535au);
}
-static int __devinit snd_cs5535audio_create(struct snd_card *card,
- struct pci_dev *pci,
- struct cs5535audio **rcs5535au)
+static int snd_cs5535audio_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct cs5535audio **rcs5535au)
{
struct cs5535audio *cs5535au;
@@ -338,8 +338,8 @@ pcifail:
return err;
}
-static int __devinit snd_cs5535audio_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_cs5535audio_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -387,7 +387,7 @@ probefail_out:
return err;
}
-static void __devexit snd_cs5535audio_remove(struct pci_dev *pci)
+static void snd_cs5535audio_remove(struct pci_dev *pci)
{
olpc_quirks_cleanup();
snd_card_free(pci_get_drvdata(pci));
@@ -398,7 +398,7 @@ static struct pci_driver cs5535audio_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cs5535audio_ids,
.probe = snd_cs5535audio_probe,
- .remove = __devexit_p(snd_cs5535audio_remove),
+ .remove = snd_cs5535audio_remove,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &snd_cs5535audio_pm,
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index bb3cc641130..0579daa6221 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -97,10 +97,10 @@ struct cs5535audio {
extern const struct dev_pm_ops snd_cs5535audio_pm;
#ifdef CONFIG_OLPC
-void __devinit olpc_prequirks(struct snd_card *card,
- struct snd_ac97_template *ac97);
-int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97);
-void __devexit olpc_quirks_cleanup(void);
+void olpc_prequirks(struct snd_card *card,
+ struct snd_ac97_template *ac97);
+int olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97);
+void olpc_quirks_cleanup(void);
void olpc_analog_input(struct snd_ac97 *ac97, int on);
void olpc_mic_bias(struct snd_ac97 *ac97, int on);
@@ -133,7 +133,7 @@ static inline void olpc_capture_open(struct snd_ac97 *ac97) { }
static inline void olpc_capture_close(struct snd_ac97 *ac97) { }
#endif
-int __devinit snd_cs5535audio_pcm(struct cs5535audio *cs5535audio);
+int snd_cs5535audio_pcm(struct cs5535audio *cs5535audio);
#endif /* __SOUND_CS5535AUDIO_H */
diff --git a/sound/pci/cs5535audio/cs5535audio_olpc.c b/sound/pci/cs5535audio/cs5535audio_olpc.c
index 50da49be9ae..da1cb9c4c76 100644
--- a/sound/pci/cs5535audio/cs5535audio_olpc.c
+++ b/sound/pci/cs5535audio/cs5535audio_olpc.c
@@ -114,7 +114,7 @@ static int olpc_mic_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *v)
return 1;
}
-static struct snd_kcontrol_new olpc_cs5535audio_ctls[] __devinitdata = {
+static struct snd_kcontrol_new olpc_cs5535audio_ctls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DC Mode Enable",
@@ -133,8 +133,8 @@ static struct snd_kcontrol_new olpc_cs5535audio_ctls[] __devinitdata = {
},
};
-void __devinit olpc_prequirks(struct snd_card *card,
- struct snd_ac97_template *ac97)
+void olpc_prequirks(struct snd_card *card,
+ struct snd_ac97_template *ac97)
{
if (!machine_is_olpc())
return;
@@ -144,7 +144,7 @@ void __devinit olpc_prequirks(struct snd_card *card,
ac97->scaps |= AC97_SCAP_INV_EAPD;
}
-int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
+int olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
{
struct snd_ctl_elem_id elem;
int i, err;
@@ -185,7 +185,7 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
return 0;
}
-void __devexit olpc_quirks_cleanup(void)
+void olpc_quirks_cleanup(void)
{
gpio_free(OLPC_GPIO_MIC_AC);
}
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index dbf94b189e7..9ab01a7047c 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -422,7 +422,7 @@ static struct cs5535audio_dma_ops snd_cs5535audio_capture_dma_ops = {
.read_dma_pntr = cs5535audio_capture_read_dma_pntr,
};
-int __devinit snd_cs5535audio_pcm(struct cs5535audio *cs5535au)
+int snd_cs5535audio_pcm(struct cs5535audio *cs5535au)
{
struct snd_pcm *pcm;
int err;
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index a2f997a9977..b5fa583a239 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -38,7 +38,7 @@
| (0x10 << 16) \
| ((IEC958_AES3_CON_FS_48000) << 24))
-static struct snd_pci_quirk __devinitdata subsys_20k1_list[] = {
+static struct snd_pci_quirk subsys_20k1_list[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0022, "SB055x", CTSB055X),
SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x002f, "SB055x", CTSB055X),
SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0029, "SB073x", CTSB073X),
@@ -48,7 +48,7 @@ static struct snd_pci_quirk __devinitdata subsys_20k1_list[] = {
{ } /* terminator */
};
-static struct snd_pci_quirk __devinitdata subsys_20k2_list[] = {
+static struct snd_pci_quirk subsys_20k2_list[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB0760,
"SB0760", CTSB0760),
SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, PCI_SUBDEVICE_ID_CREATIVE_SB1270,
@@ -1249,7 +1249,7 @@ static int atc_dev_free(struct snd_device *dev)
return ct_atc_destroy(atc);
}
-static int __devinit atc_identify_card(struct ct_atc *atc, unsigned int ssid)
+static int atc_identify_card(struct ct_atc *atc, unsigned int ssid)
{
const struct snd_pci_quirk *p;
const struct snd_pci_quirk *list;
@@ -1296,7 +1296,7 @@ static int __devinit atc_identify_card(struct ct_atc *atc, unsigned int ssid)
return 0;
}
-int __devinit ct_atc_create_alsa_devs(struct ct_atc *atc)
+int ct_atc_create_alsa_devs(struct ct_atc *atc)
{
enum CTALSADEVS i;
int err;
@@ -1319,7 +1319,7 @@ int __devinit ct_atc_create_alsa_devs(struct ct_atc *atc)
return 0;
}
-static int __devinit atc_create_hw_devs(struct ct_atc *atc)
+static int atc_create_hw_devs(struct ct_atc *atc)
{
struct hw *hw;
struct card_conf info = {0};
@@ -1614,7 +1614,7 @@ static int atc_resume(struct ct_atc *atc)
}
#endif
-static struct ct_atc atc_preset __devinitdata = {
+static struct ct_atc atc_preset = {
.map_audio_buffer = ct_map_audio_buffer,
.unmap_audio_buffer = ct_unmap_audio_buffer,
.pcm_playback_prepare = atc_pcm_playback_prepare,
@@ -1665,10 +1665,10 @@ static struct ct_atc atc_preset __devinitdata = {
* Returns 0 if succeeds, or negative error code if fails.
*/
-int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
- unsigned int rsr, unsigned int msr,
- int chip_type, unsigned int ssid,
- struct ct_atc **ratc)
+int ct_atc_create(struct snd_card *card, struct pci_dev *pci,
+ unsigned int rsr, unsigned int msr,
+ int chip_type, unsigned int ssid,
+ struct ct_atc **ratc)
{
struct ct_atc *atc;
static struct snd_device_ops ops = {
diff --git a/sound/pci/ctxfi/ctatc.h b/sound/pci/ctxfi/ctatc.h
index 69b51f9d345..5f11ca22fcd 100644
--- a/sound/pci/ctxfi/ctatc.h
+++ b/sound/pci/ctxfi/ctatc.h
@@ -152,9 +152,9 @@ struct ct_atc {
};
-int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
- unsigned int rsr, unsigned int msr, int chip_type,
- unsigned int subsysid, struct ct_atc **ratc);
-int __devinit ct_atc_create_alsa_devs(struct ct_atc *atc);
+int ct_atc_create(struct snd_card *card, struct pci_dev *pci,
+ unsigned int rsr, unsigned int msr, int chip_type,
+ unsigned int subsysid, struct ct_atc **ratc);
+int ct_atc_create_alsa_devs(struct ct_atc *atc);
#endif /* CTATC_H */
diff --git a/sound/pci/ctxfi/cthardware.c b/sound/pci/ctxfi/cthardware.c
index 8e64f4862e8..110b8ace6d8 100644
--- a/sound/pci/ctxfi/cthardware.c
+++ b/sound/pci/ctxfi/cthardware.c
@@ -20,8 +20,8 @@
#include "cthw20k2.h"
#include <linux/bug.h>
-int __devinit create_hw_obj(struct pci_dev *pci, enum CHIPTYP chip_type,
- enum CTCARDS model, struct hw **rhw)
+int create_hw_obj(struct pci_dev *pci, enum CHIPTYP chip_type,
+ enum CTCARDS model, struct hw **rhw)
{
int err;
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index 4507f7088b2..6ac40beb49d 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -2171,7 +2171,7 @@ static void hw_write_pci(struct hw *hw, u32 reg, u32 data)
&container_of(hw, struct hw20k1, hw)->reg_pci_lock, flags);
}
-static struct hw ct20k1_preset __devinitdata = {
+static struct hw ct20k1_preset = {
.irq = -1,
.card_init = hw_card_init,
@@ -2275,7 +2275,7 @@ static struct hw ct20k1_preset __devinitdata = {
.get_wc = get_wc,
};
-int __devinit create_20k1_hw_obj(struct hw **rhw)
+int create_20k1_hw_obj(struct hw **rhw)
{
struct hw20k1 *hw20k1;
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index b9c9349058b..b1438861d38 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -2237,7 +2237,7 @@ static void hw_write_20kx(struct hw *hw, u32 reg, u32 data)
writel(data, (void *)(hw->mem_base + reg));
}
-static struct hw ct20k2_preset __devinitdata = {
+static struct hw ct20k2_preset = {
.irq = -1,
.card_init = hw_card_init,
@@ -2345,7 +2345,7 @@ static struct hw ct20k2_preset __devinitdata = {
.get_wc = get_wc,
};
-int __devinit create_20k2_hw_obj(struct hw **rhw)
+int create_20k2_hw_obj(struct hw **rhw)
{
struct hw20k2 *hw20k2;
diff --git a/sound/pci/ctxfi/xfi.c b/sound/pci/ctxfi/xfi.c
index 07c07d752fd..d01ffcb2b2f 100644
--- a/sound/pci/ctxfi/xfi.c
+++ b/sound/pci/ctxfi/xfi.c
@@ -56,7 +56,7 @@ static DEFINE_PCI_DEVICE_TABLE(ct_pci_dev_ids) = {
};
MODULE_DEVICE_TABLE(pci, ct_pci_dev_ids);
-static int __devinit
+static int
ct_card_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
@@ -119,7 +119,7 @@ error:
return err;
}
-static void __devexit ct_card_remove(struct pci_dev *pci)
+static void ct_card_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -152,7 +152,7 @@ static struct pci_driver ct_driver = {
.name = KBUILD_MODNAME,
.id_table = ct_pci_dev_ids,
.probe = ct_card_probe,
- .remove = __devexit_p(ct_card_remove),
+ .remove = ct_card_remove,
.driver = {
.pm = CT_CARD_PM_OPS,
},
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index abb0b86c41c..760cbff5321 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -907,7 +907,7 @@ static int snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
/*<--snd_echo_probe() */
-static int __devinit snd_echo_new_pcm(struct echoaudio *chip)
+static int snd_echo_new_pcm(struct echoaudio *chip)
{
struct snd_pcm *pcm;
int err;
@@ -1050,7 +1050,7 @@ static int snd_echo_output_gain_put(struct snd_kcontrol *kcontrol,
#ifdef ECHOCARD_HAS_LINE_OUT_GAIN
/* On the Mia this one controls the line-out volume */
-static struct snd_kcontrol_new snd_echo_line_output_gain __devinitdata = {
+static struct snd_kcontrol_new snd_echo_line_output_gain = {
.name = "Line Playback Volume",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -1061,7 +1061,7 @@ static struct snd_kcontrol_new snd_echo_line_output_gain __devinitdata = {
.tlv = {.p = db_scale_output_gain},
};
#else
-static struct snd_kcontrol_new snd_echo_pcm_output_gain __devinitdata = {
+static struct snd_kcontrol_new snd_echo_pcm_output_gain = {
.name = "PCM Playback Volume",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,
@@ -1131,7 +1131,7 @@ static int snd_echo_input_gain_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(db_scale_input_gain, -2500, 50, 0);
-static struct snd_kcontrol_new snd_echo_line_input_gain __devinitdata = {
+static struct snd_kcontrol_new snd_echo_line_input_gain = {
.name = "Line Capture Volume",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,
@@ -1195,7 +1195,7 @@ static int snd_echo_output_nominal_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_output_nominal_level __devinitdata = {
+static struct snd_kcontrol_new snd_echo_output_nominal_level = {
.name = "Line Playback Switch (-10dBV)",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = snd_echo_output_nominal_info,
@@ -1261,7 +1261,7 @@ static int snd_echo_input_nominal_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_intput_nominal_level __devinitdata = {
+static struct snd_kcontrol_new snd_echo_intput_nominal_level = {
.name = "Line Capture Switch (-10dBV)",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = snd_echo_input_nominal_info,
@@ -1327,7 +1327,7 @@ static int snd_echo_mixer_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_monitor_mixer __devinitdata = {
+static struct snd_kcontrol_new snd_echo_monitor_mixer = {
.name = "Monitor Mixer Volume",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,
@@ -1395,7 +1395,7 @@ static int snd_echo_vmixer_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_vmixer __devinitdata = {
+static struct snd_kcontrol_new snd_echo_vmixer = {
.name = "VMixer Volume",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,
@@ -1490,7 +1490,7 @@ static int snd_echo_digital_mode_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_digital_mode_switch __devinitdata = {
+static struct snd_kcontrol_new snd_echo_digital_mode_switch = {
.name = "Digital mode Switch",
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.info = snd_echo_digital_mode_info,
@@ -1547,7 +1547,7 @@ static int snd_echo_spdif_mode_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_echo_spdif_mode_switch __devinitdata = {
+static struct snd_kcontrol_new snd_echo_spdif_mode_switch = {
.name = "S/PDIF mode Switch",
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.info = snd_echo_spdif_mode_info,
@@ -1626,7 +1626,7 @@ static int snd_echo_clock_source_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_clock_source_switch __devinitdata = {
+static struct snd_kcontrol_new snd_echo_clock_source_switch = {
.name = "Sample Clock Source",
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.info = snd_echo_clock_source_info,
@@ -1669,7 +1669,7 @@ static int snd_echo_phantom_power_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_phantom_power_switch __devinitdata = {
+static struct snd_kcontrol_new snd_echo_phantom_power_switch = {
.name = "Phantom power Switch",
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.info = snd_echo_phantom_power_info,
@@ -1712,7 +1712,7 @@ static int snd_echo_automute_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_echo_automute_switch __devinitdata = {
+static struct snd_kcontrol_new snd_echo_automute_switch = {
.name = "Digital Capture Switch (automute)",
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.info = snd_echo_automute_info,
@@ -1739,7 +1739,7 @@ static int snd_echo_vumeters_switch_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new snd_echo_vumeters_switch __devinitdata = {
+static struct snd_kcontrol_new snd_echo_vumeters_switch = {
.name = "VU-meters Switch",
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.access = SNDRV_CTL_ELEM_ACCESS_WRITE,
@@ -1780,7 +1780,7 @@ static int snd_echo_vumeters_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_echo_vumeters __devinitdata = {
+static struct snd_kcontrol_new snd_echo_vumeters = {
.name = "VU-meters",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READ |
@@ -1836,7 +1836,7 @@ static int snd_echo_channels_info_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_echo_channels_info __devinitdata = {
+static struct snd_kcontrol_new snd_echo_channels_info = {
.name = "Channels info",
.iface = SNDRV_CTL_ELEM_IFACE_HWDEP,
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
@@ -1940,9 +1940,9 @@ static int snd_echo_dev_free(struct snd_device *device)
/* <--snd_echo_probe() */
-static __devinit int snd_echo_create(struct snd_card *card,
- struct pci_dev *pci,
- struct echoaudio **rchip)
+static int snd_echo_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct echoaudio **rchip)
{
struct echoaudio *chip;
int err;
@@ -2040,8 +2040,8 @@ static __devinit int snd_echo_create(struct snd_card *card,
/* constructor */
-static int __devinit snd_echo_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_echo_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2316,7 +2316,7 @@ static SIMPLE_DEV_PM_OPS(snd_echo_pm, snd_echo_suspend, snd_echo_resume);
#endif /* CONFIG_PM_SLEEP */
-static void __devexit snd_echo_remove(struct pci_dev *pci)
+static void snd_echo_remove(struct pci_dev *pci)
{
struct echoaudio *chip;
@@ -2337,7 +2337,7 @@ static struct pci_driver echo_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_echo_ids,
.probe = snd_echo_probe,
- .remove = __devexit_p(snd_echo_remove),
+ .remove = snd_echo_remove,
.driver = {
.pm = SND_ECHO_PM_OPS,
},
diff --git a/sound/pci/echoaudio/echoaudio.h b/sound/pci/echoaudio/echoaudio.h
index e158369f5fa..b86b88da81c 100644
--- a/sound/pci/echoaudio/echoaudio.h
+++ b/sound/pci/echoaudio/echoaudio.h
@@ -475,8 +475,8 @@ static int enable_midi_input(struct echoaudio *chip, char enable);
static void snd_echo_midi_output_trigger(
struct snd_rawmidi_substream *substream, int up);
static int midi_service_irq(struct echoaudio *chip);
-static int __devinit snd_echo_midi_create(struct snd_card *card,
- struct echoaudio *chip);
+static int snd_echo_midi_create(struct snd_card *card,
+ struct echoaudio *chip);
#endif
diff --git a/sound/pci/echoaudio/midi.c b/sound/pci/echoaudio/midi.c
index a953d142cb4..abfd51c2530 100644
--- a/sound/pci/echoaudio/midi.c
+++ b/sound/pci/echoaudio/midi.c
@@ -307,8 +307,8 @@ static struct snd_rawmidi_ops snd_echo_midi_output = {
/* <--snd_echo_probe() */
-static int __devinit snd_echo_midi_create(struct snd_card *card,
- struct echoaudio *chip)
+static int snd_echo_midi_create(struct snd_card *card,
+ struct echoaudio *chip)
{
int err;
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index b7c1875ba90..8c5010f7889 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -99,8 +99,8 @@ static DEFINE_PCI_DEVICE_TABLE(snd_emu10k1_ids) = {
MODULE_DEVICE_TABLE(pci, snd_emu10k1_ids);
-static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_card_emu10k1_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -199,7 +199,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
return err;
}
-static void __devexit snd_card_emu10k1_remove(struct pci_dev *pci)
+static void snd_card_emu10k1_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -215,6 +215,8 @@ static int snd_emu10k1_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ emu->suspend = 1;
+
snd_pcm_suspend_all(emu->pcm);
snd_pcm_suspend_all(emu->pcm_mic);
snd_pcm_suspend_all(emu->pcm_efx);
@@ -260,6 +262,8 @@ static int snd_emu10k1_resume(struct device *dev)
if (emu->card_capabilities->ca0151_chip)
snd_p16v_resume(emu);
+ emu->suspend = 0;
+
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
@@ -274,7 +278,7 @@ static struct pci_driver emu10k1_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_emu10k1_ids,
.probe = snd_card_emu10k1_probe,
- .remove = __devexit_p(snd_card_emu10k1_remove),
+ .remove = snd_card_emu10k1_remove,
.driver = {
.pm = SND_EMU10K1_PM_OPS,
},
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index c21adb6ef1d..a7c296a36a1 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -657,22 +657,17 @@ static int snd_emu10k1_cardbus_init(struct snd_emu10k1 *emu)
return 0;
}
-static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, const char *filename)
+static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu)
{
- int err;
int n, i;
int reg;
int value;
unsigned int write_post;
unsigned long flags;
- const struct firmware *fw_entry;
+ const struct firmware *fw_entry = emu->firmware;
- err = request_firmware(&fw_entry, filename, &emu->pci->dev);
- if (err != 0) {
- snd_printk(KERN_ERR "firmware: %s not found. Err = %d\n", filename, err);
- return err;
- }
- snd_printk(KERN_INFO "firmware size = 0x%zx\n", fw_entry->size);
+ if (!fw_entry)
+ return -EIO;
/* The FPGA is a Xilinx Spartan IIE XC2S50E */
/* GPIO7 -> FPGA PGMN
@@ -705,7 +700,6 @@ static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, const char *filena
write_post = inl(emu->port + A_IOCFG);
spin_unlock_irqrestore(&emu->emu_lock, flags);
- release_firmware(fw_entry);
return 0;
}
@@ -720,6 +714,10 @@ static int emu1010_firmware_thread(void *data)
msleep_interruptible(1000);
if (kthread_should_stop())
break;
+#ifdef CONFIG_PM_SLEEP
+ if (emu->suspend)
+ continue;
+#endif
snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp); /* IRQ Status */
snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */
if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
@@ -727,22 +725,9 @@ static int emu1010_firmware_thread(void *data)
/* Return to Audio Dock programming mode */
snd_printk(KERN_INFO "emu1010: Loading Audio Dock Firmware\n");
snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, EMU_HANA_FPGA_CONFIG_AUDIODOCK);
- if (emu->card_capabilities->emu_model ==
- EMU_MODEL_EMU1010) {
- err = snd_emu1010_load_firmware(emu, DOCK_FILENAME);
- if (err != 0)
- continue;
- } else if (emu->card_capabilities->emu_model ==
- EMU_MODEL_EMU1010B) {
- err = snd_emu1010_load_firmware(emu, MICRO_DOCK_FILENAME);
- if (err != 0)
- continue;
- } else if (emu->card_capabilities->emu_model ==
- EMU_MODEL_EMU1616) {
- err = snd_emu1010_load_firmware(emu, MICRO_DOCK_FILENAME);
- if (err != 0)
- continue;
- }
+ err = snd_emu1010_load_firmware(emu);
+ if (err != 0)
+ continue;
snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &reg);
@@ -807,7 +792,6 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
unsigned int i;
u32 tmp, tmp2, reg;
int err;
- const char *filename = NULL;
snd_printk(KERN_INFO "emu1010: Special config.\n");
/* AC97 2.1, Any 16Meg of 4Gig address, Auto-Mute, EMU32 Slave,
@@ -849,31 +833,33 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
return -ENODEV;
}
snd_printk(KERN_INFO "emu1010: EMU_HANA_ID = 0x%x\n", reg);
- switch (emu->card_capabilities->emu_model) {
- case EMU_MODEL_EMU1010:
- filename = HANA_FILENAME;
- break;
- case EMU_MODEL_EMU1010B:
- filename = EMU1010B_FILENAME;
- break;
- case EMU_MODEL_EMU1616:
- filename = EMU1010_NOTEBOOK_FILENAME;
- break;
- case EMU_MODEL_EMU0404:
- filename = EMU0404_FILENAME;
- break;
- default:
- filename = NULL;
- return -ENODEV;
- break;
- }
- snd_printk(KERN_INFO "emu1010: filename %s testing\n", filename);
- err = snd_emu1010_load_firmware(emu, filename);
- if (err != 0) {
- snd_printk(
- KERN_INFO "emu1010: Loading Firmware file %s failed\n",
- filename);
- return err;
+
+ if (!emu->firmware) {
+ const char *filename;
+ switch (emu->card_capabilities->emu_model) {
+ case EMU_MODEL_EMU1010:
+ filename = HANA_FILENAME;
+ break;
+ case EMU_MODEL_EMU1010B:
+ filename = EMU1010B_FILENAME;
+ break;
+ case EMU_MODEL_EMU1616:
+ filename = EMU1010_NOTEBOOK_FILENAME;
+ break;
+ case EMU_MODEL_EMU0404:
+ filename = EMU0404_FILENAME;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ err = request_firmware(&emu->firmware, filename, &emu->pci->dev);
+ if (err != 0) {
+ snd_printk(KERN_ERR "emu1010: firmware: %s not found. Err = %d\n", filename, err);
+ return err;
+ }
+ snd_printk(KERN_INFO "emu1010: firmware file = %s, size = 0x%zx\n",
+ filename, emu->firmware->size);
}
/* ID, should read & 0x7f = 0x55 when FPGA programmed. */
@@ -1259,6 +1245,8 @@ static int snd_emu10k1_free(struct snd_emu10k1 *emu)
}
if (emu->emu1010.firmware_thread)
kthread_stop(emu->emu1010.firmware_thread);
+ if (emu->firmware)
+ release_firmware(emu->firmware);
if (emu->irq >= 0)
free_irq(emu->irq, emu);
/* remove reserved page */
@@ -1738,7 +1726,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
{ } /* terminator */
};
-int __devinit snd_emu10k1_create(struct snd_card *card,
+int snd_emu10k1_create(struct snd_card *card,
struct pci_dev *pci,
unsigned short extin_mask,
unsigned short extout_mask,
@@ -2025,7 +2013,7 @@ static unsigned char saved_regs_audigy[] = {
0xff /* end */
};
-static int __devinit alloc_pm_buffer(struct snd_emu10k1 *emu)
+static int alloc_pm_buffer(struct snd_emu10k1 *emu)
{
int size;
diff --git a/sound/pci/emu10k1/emu10k1_patch.c b/sound/pci/emu10k1/emu10k1_patch.c
index e10f027bde0..662a45876a8 100644
--- a/sound/pci/emu10k1/emu10k1_patch.c
+++ b/sound/pci/emu10k1/emu10k1_patch.c
@@ -123,7 +123,7 @@ snd_emu10k1_sample_new(struct snd_emux *rec, struct snd_sf_sample *sp,
offset += size;
data += size;
-#if 0 /* not suppported yet */
+#if 0 /* not supported yet */
/* handle reverse (or bidirectional) loop */
if (sp->v.mode_flags & (SNDRV_SFNT_SAMPLE_BIDIR_LOOP|SNDRV_SFNT_SAMPLE_REVERSE_LOOP)) {
/* copy loop in reverse */
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 556fd6f456e..cdff11d48eb 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -842,7 +842,7 @@ static const struct snd_pcm_chmap_elem clfe_map[] = {
{ }
};
-static int __devinit snd_emu10k1x_pcm(struct emu10k1x *emu, int device, struct snd_pcm **rpcm)
+static int snd_emu10k1x_pcm(struct emu10k1x *emu, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
const struct snd_pcm_chmap_elem *map = NULL;
@@ -902,9 +902,9 @@ static int __devinit snd_emu10k1x_pcm(struct emu10k1x *emu, int device, struct s
return 0;
}
-static int __devinit snd_emu10k1x_create(struct snd_card *card,
- struct pci_dev *pci,
- struct emu10k1x **rchip)
+static int snd_emu10k1x_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct emu10k1x **rchip)
{
struct emu10k1x *chip;
int err;
@@ -1066,7 +1066,7 @@ static void snd_emu10k1x_proc_reg_write(struct snd_info_entry *entry,
}
}
-static int __devinit snd_emu10k1x_proc_init(struct emu10k1x * emu)
+static int snd_emu10k1x_proc_init(struct emu10k1x *emu)
{
struct snd_info_entry *entry;
@@ -1115,7 +1115,7 @@ static int snd_emu10k1x_shared_spdif_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_emu10k1x_shared_spdif __devinitdata =
+static struct snd_kcontrol_new snd_emu10k1x_shared_spdif =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog/Digital Output Jack",
@@ -1194,7 +1194,7 @@ static struct snd_kcontrol_new snd_emu10k1x_spdif_control =
.put = snd_emu10k1x_spdif_put
};
-static int __devinit snd_emu10k1x_mixer(struct emu10k1x *emu)
+static int snd_emu10k1x_mixer(struct emu10k1x *emu)
{
int err;
struct snd_kcontrol *kctl;
@@ -1507,8 +1507,9 @@ static void snd_emu10k1x_midi_free(struct snd_rawmidi *rmidi)
midi->rmidi = NULL;
}
-static int __devinit emu10k1x_midi_init(struct emu10k1x *emu,
- struct emu10k1x_midi *midi, int device, char *name)
+static int emu10k1x_midi_init(struct emu10k1x *emu,
+ struct emu10k1x_midi *midi, int device,
+ char *name)
{
struct snd_rawmidi *rmidi;
int err;
@@ -1531,7 +1532,7 @@ static int __devinit emu10k1x_midi_init(struct emu10k1x *emu,
return 0;
}
-static int __devinit snd_emu10k1x_midi(struct emu10k1x *emu)
+static int snd_emu10k1x_midi(struct emu10k1x *emu)
{
struct emu10k1x_midi *midi = &emu->midi;
int err;
@@ -1548,8 +1549,8 @@ static int __devinit snd_emu10k1x_midi(struct emu10k1x *emu)
return 0;
}
-static int __devinit snd_emu10k1x_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_emu10k1x_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1619,7 +1620,7 @@ static int __devinit snd_emu10k1x_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_emu10k1x_remove(struct pci_dev *pci)
+static void snd_emu10k1x_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1637,7 +1638,7 @@ static struct pci_driver emu10k1x_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_emu10k1x_ids,
.probe = snd_emu10k1x_probe,
- .remove = __devexit_p(snd_emu10k1x_remove),
+ .remove = snd_emu10k1x_remove,
};
module_pci_driver(emu10k1x_driver);
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 52419959178..0275209ca82 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -1073,7 +1073,7 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
#define SND_EMU10K1_PLAYBACK_CHANNELS 8
#define SND_EMU10K1_CAPTURE_CHANNELS 4
-static void __devinit
+static void
snd_emu10k1_init_mono_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
const char *name, int gpr, int defval)
{
@@ -1094,7 +1094,7 @@ snd_emu10k1_init_mono_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
}
}
-static void __devinit
+static void
snd_emu10k1_init_stereo_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
const char *name, int gpr, int defval)
{
@@ -1116,7 +1116,7 @@ snd_emu10k1_init_stereo_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
}
}
-static void __devinit
+static void
snd_emu10k1_init_mono_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
const char *name, int gpr, int defval)
{
@@ -1129,7 +1129,7 @@ snd_emu10k1_init_mono_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
ctl->translation = EMU10K1_GPR_TRANSLATION_ONOFF;
}
-static void __devinit
+static void
snd_emu10k1_init_stereo_onoff_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
const char *name, int gpr, int defval)
{
@@ -1168,7 +1168,7 @@ static int snd_emu10k1_audigy_dsp_convert_32_to_2x16(
* initial DSP configuration for Audigy
*/
-static int __devinit _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
+static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
{
int err, i, z, gpr, nctl;
int bit_shifter16;
@@ -1757,14 +1757,14 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
/* when volume = max, then copy only to avoid volume modification */
/* with iMAC0 (negative values) */
-static void __devinit _volume(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol)
+static void _volume(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol)
{
OP(icode, ptr, iMAC0, dst, C_00000000, src, vol);
OP(icode, ptr, iANDXOR, C_00000000, vol, C_ffffffff, C_7fffffff);
OP(icode, ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000001);
OP(icode, ptr, iACC3, dst, src, C_00000000, C_00000000);
}
-static void __devinit _volume_add(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol)
+static void _volume_add(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol)
{
OP(icode, ptr, iANDXOR, C_00000000, vol, C_ffffffff, C_7fffffff);
OP(icode, ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000002);
@@ -1772,7 +1772,7 @@ static void __devinit _volume_add(struct snd_emu10k1_fx8010_code *icode, u32 *pt
OP(icode, ptr, iSKIP, C_00000000, C_7fffffff, C_7fffffff, C_00000001);
OP(icode, ptr, iMAC0, dst, dst, src, vol);
}
-static void __devinit _volume_out(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol)
+static void _volume_out(struct snd_emu10k1_fx8010_code *icode, u32 *ptr, u32 dst, u32 src, u32 vol)
{
OP(icode, ptr, iANDXOR, C_00000000, vol, C_ffffffff, C_7fffffff);
OP(icode, ptr, iSKIP, GPR_COND, GPR_COND, CC_REG_NONZERO, C_00000002);
@@ -1803,7 +1803,7 @@ static void __devinit _volume_out(struct snd_emu10k1_fx8010_code *icode, u32 *pt
_SWITCH_NEG(icode, ptr, GPR(dst), GPR(src))
-static int __devinit _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
+static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
{
int err, i, z, gpr, tmp, playback, capture;
u32 ptr;
@@ -2373,7 +2373,7 @@ static int __devinit _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
return err;
}
-int __devinit snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
+int snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
{
spin_lock_init(&emu->fx8010.irq_lock);
INIT_LIST_HEAD(&emu->fx8010.gpr_ctl);
@@ -2626,7 +2626,8 @@ static int snd_emu10k1_fx8010_release(struct snd_hwdep * hw, struct file *file)
return 0;
}
-int __devinit snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device, struct snd_hwdep ** rhwdep)
+int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device,
+ struct snd_hwdep **rhwdep)
{
struct snd_hwdep *hw;
int err;
@@ -2647,7 +2648,7 @@ int __devinit snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device, struct
}
#ifdef CONFIG_PM_SLEEP
-int __devinit snd_emu10k1_efx_alloc_pm_buffer(struct snd_emu10k1 *emu)
+int snd_emu10k1_efx_alloc_pm_buffer(struct snd_emu10k1 *emu)
{
int len;
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 9d890a5aec5..f6c3da0d377 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -510,7 +510,7 @@ static int snd_emu1010_input_source_put(struct snd_kcontrol *kcontrol,
.private_value = chid \
}
-static struct snd_kcontrol_new snd_emu1010_output_enum_ctls[] __devinitdata = {
+static struct snd_kcontrol_new snd_emu1010_output_enum_ctls[] = {
EMU1010_SOURCE_OUTPUT("Dock DAC1 Left Playback Enum", 0),
EMU1010_SOURCE_OUTPUT("Dock DAC1 Right Playback Enum", 1),
EMU1010_SOURCE_OUTPUT("Dock DAC2 Left Playback Enum", 2),
@@ -539,7 +539,7 @@ static struct snd_kcontrol_new snd_emu1010_output_enum_ctls[] __devinitdata = {
/* 1616(m) cardbus */
-static struct snd_kcontrol_new snd_emu1616_output_enum_ctls[] __devinitdata = {
+static struct snd_kcontrol_new snd_emu1616_output_enum_ctls[] = {
EMU1010_SOURCE_OUTPUT("Dock DAC1 Left Playback Enum", 0),
EMU1010_SOURCE_OUTPUT("Dock DAC1 Right Playback Enum", 1),
EMU1010_SOURCE_OUTPUT("Dock DAC2 Left Playback Enum", 2),
@@ -571,7 +571,7 @@ static struct snd_kcontrol_new snd_emu1616_output_enum_ctls[] __devinitdata = {
.private_value = chid \
}
-static struct snd_kcontrol_new snd_emu1010_input_enum_ctls[] __devinitdata = {
+static struct snd_kcontrol_new snd_emu1010_input_enum_ctls[] = {
EMU1010_SOURCE_INPUT("DSP 0 Capture Enum", 0),
EMU1010_SOURCE_INPUT("DSP 1 Capture Enum", 1),
EMU1010_SOURCE_INPUT("DSP 2 Capture Enum", 2),
@@ -639,7 +639,7 @@ static int snd_emu1010_adc_pads_put(struct snd_kcontrol *kcontrol, struct snd_ct
.private_value = chid \
}
-static struct snd_kcontrol_new snd_emu1010_adc_pads[] __devinitdata = {
+static struct snd_kcontrol_new snd_emu1010_adc_pads[] = {
EMU1010_ADC_PADS("ADC1 14dB PAD Audio Dock Capture Switch", EMU_HANA_DOCK_ADC_PAD1),
EMU1010_ADC_PADS("ADC2 14dB PAD Audio Dock Capture Switch", EMU_HANA_DOCK_ADC_PAD2),
EMU1010_ADC_PADS("ADC3 14dB PAD Audio Dock Capture Switch", EMU_HANA_DOCK_ADC_PAD3),
@@ -687,7 +687,7 @@ static int snd_emu1010_dac_pads_put(struct snd_kcontrol *kcontrol, struct snd_ct
.private_value = chid \
}
-static struct snd_kcontrol_new snd_emu1010_dac_pads[] __devinitdata = {
+static struct snd_kcontrol_new snd_emu1010_dac_pads[] = {
EMU1010_DAC_PADS("DAC1 Audio Dock 14dB PAD Playback Switch", EMU_HANA_DOCK_DAC_PAD1),
EMU1010_DAC_PADS("DAC2 Audio Dock 14dB PAD Playback Switch", EMU_HANA_DOCK_DAC_PAD2),
EMU1010_DAC_PADS("DAC3 Audio Dock 14dB PAD Playback Switch", EMU_HANA_DOCK_DAC_PAD3),
@@ -989,7 +989,7 @@ static int snd_audigy_i2c_volume_put(struct snd_kcontrol *kcontrol,
}
-static struct snd_kcontrol_new snd_audigy_i2c_volume_ctls[] __devinitdata = {
+static struct snd_kcontrol_new snd_audigy_i2c_volume_ctls[] = {
I2C_VOLUME("Mic Capture Volume", 0),
I2C_VOLUME("Line Capture Volume", 0)
};
@@ -1621,7 +1621,7 @@ static int snd_emu10k1_shared_spdif_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_emu10k1_shared_spdif __devinitdata =
+static struct snd_kcontrol_new snd_emu10k1_shared_spdif =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "SB Live Analog/Digital Output Jack",
@@ -1630,7 +1630,7 @@ static struct snd_kcontrol_new snd_emu10k1_shared_spdif __devinitdata =
.put = snd_emu10k1_shared_spdif_put
};
-static struct snd_kcontrol_new snd_audigy_shared_spdif __devinitdata =
+static struct snd_kcontrol_new snd_audigy_shared_spdif =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Audigy Analog/Digital Output Jack",
@@ -1668,7 +1668,7 @@ static int snd_audigy_capture_boost_put(struct snd_kcontrol *kcontrol,
return snd_ac97_update(emu->ac97, AC97_REC_GAIN, val);
}
-static struct snd_kcontrol_new snd_audigy_capture_boost __devinitdata =
+static struct snd_kcontrol_new snd_audigy_capture_boost =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog Capture Boost",
@@ -1716,8 +1716,8 @@ static int rename_ctl(struct snd_card *card, const char *src, const char *dst)
return -ENOENT;
}
-int __devinit snd_emu10k1_mixer(struct snd_emu10k1 *emu,
- int pcm_device, int multi_device)
+int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
+ int pcm_device, int multi_device)
{
int err, pcm;
struct snd_kcontrol *kctl;
diff --git a/sound/pci/emu10k1/emumpu401.c b/sound/pci/emu10k1/emumpu401.c
index bab564824ef..1ec91246dfe 100644
--- a/sound/pci/emu10k1/emumpu401.c
+++ b/sound/pci/emu10k1/emumpu401.c
@@ -326,7 +326,7 @@ static void snd_emu10k1_midi_free(struct snd_rawmidi *rmidi)
midi->rmidi = NULL;
}
-static int __devinit emu10k1_midi_init(struct snd_emu10k1 *emu, struct snd_emu10k1_midi *midi, int device, char *name)
+static int emu10k1_midi_init(struct snd_emu10k1 *emu, struct snd_emu10k1_midi *midi, int device, char *name)
{
struct snd_rawmidi *rmidi;
int err;
@@ -349,7 +349,7 @@ static int __devinit emu10k1_midi_init(struct snd_emu10k1 *emu, struct snd_emu10
return 0;
}
-int __devinit snd_emu10k1_midi(struct snd_emu10k1 *emu)
+int snd_emu10k1_midi(struct snd_emu10k1 *emu)
{
struct snd_emu10k1_midi *midi = &emu->midi;
int err;
@@ -366,7 +366,7 @@ int __devinit snd_emu10k1_midi(struct snd_emu10k1 *emu)
return 0;
}
-int __devinit snd_emu10k1_audigy_midi(struct snd_emu10k1 *emu)
+int snd_emu10k1_audigy_midi(struct snd_emu10k1 *emu)
{
struct snd_emu10k1_midi *midi;
int err;
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 0e6664fa6cd..748a286277e 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1391,7 +1391,7 @@ static struct snd_pcm_ops snd_emu10k1_efx_playback_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
-int __devinit snd_emu10k1_pcm(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm)
+int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
@@ -1426,7 +1426,8 @@ int __devinit snd_emu10k1_pcm(struct snd_emu10k1 * emu, int device, struct snd_p
return 0;
}
-int __devinit snd_emu10k1_pcm_multi(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm)
+int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
@@ -1469,7 +1470,8 @@ static struct snd_pcm_ops snd_emu10k1_capture_mic_ops = {
.pointer = snd_emu10k1_capture_pointer,
};
-int __devinit snd_emu10k1_pcm_mic(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm)
+int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1810,7 +1812,8 @@ static struct snd_pcm_ops snd_emu10k1_fx8010_playback_ops = {
.ack = snd_emu10k1_fx8010_playback_transfer,
};
-int __devinit snd_emu10k1_pcm_efx(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm)
+int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
struct snd_kcontrol *kctl;
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index bc38dd4d071..2ca9f2e9313 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -577,7 +577,7 @@ static struct snd_info_entry_ops snd_emu10k1_proc_ops_fx8010 = {
.read = snd_emu10k1_fx8010_read,
};
-int __devinit snd_emu10k1_proc_init(struct snd_emu10k1 * emu)
+int snd_emu10k1_proc_init(struct snd_emu10k1 *emu)
{
struct snd_info_entry *entry;
#ifdef CONFIG_SND_DEBUG
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index 88cec6b7dd4..7e2025cd6d9 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -637,7 +637,7 @@ int snd_p16v_free(struct snd_emu10k1 *chip)
return 0;
}
-int __devinit snd_p16v_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
+int snd_p16v_pcm(struct snd_emu10k1 *emu, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
@@ -854,7 +854,7 @@ static const DECLARE_TLV_DB_SCALE(snd_p16v_db_scale1, -5175, 25, 1);
.private_value = ((xreg) | ((xhl) << 8)) \
}
-static struct snd_kcontrol_new p16v_mixer_controls[] __devinitdata = {
+static struct snd_kcontrol_new p16v_mixer_controls[] = {
P16V_VOL("HD Analog Front Playback Volume", PLAYBACK_VOLUME_MIXER9, 0),
P16V_VOL("HD Analog Rear Playback Volume", PLAYBACK_VOLUME_MIXER10, 1),
P16V_VOL("HD Analog Center/LFE Playback Volume", PLAYBACK_VOLUME_MIXER9, 1),
@@ -880,7 +880,7 @@ static struct snd_kcontrol_new p16v_mixer_controls[] __devinitdata = {
};
-int __devinit snd_p16v_mixer(struct snd_emu10k1 *emu)
+int snd_p16v_mixer(struct snd_emu10k1 *emu)
{
int i, err;
struct snd_card *card = emu->card;
@@ -897,7 +897,7 @@ int __devinit snd_p16v_mixer(struct snd_emu10k1 *emu)
#define NUM_CHS 1 /* up to 4, but only first channel is used */
-int __devinit snd_p16v_alloc_pm_buffer(struct snd_emu10k1 *emu)
+int snd_p16v_alloc_pm_buffer(struct snd_emu10k1 *emu)
{
emu->p16v_saved = vmalloc(NUM_CHS * 4 * 0x80);
if (! emu->p16v_saved)
diff --git a/sound/pci/emu10k1/timer.c b/sound/pci/emu10k1/timer.c
index 72321e946cc..b69a7f8a216 100644
--- a/sound/pci/emu10k1/timer.c
+++ b/sound/pci/emu10k1/timer.c
@@ -75,7 +75,7 @@ static struct snd_timer_hardware snd_emu10k1_timer_hw = {
.precise_resolution = snd_emu10k1_timer_precise_resolution,
};
-int __devinit snd_emu10k1_timer(struct snd_emu10k1 *emu, int device)
+int snd_emu10k1_timer(struct snd_emu10k1 *emu, int device)
{
struct snd_timer *timer = NULL;
struct snd_timer_id tid;
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 5674cc31653..db2dc835171 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1268,8 +1268,8 @@ static const struct snd_pcm_chmap_elem surround_map[] = {
{ }
};
-static int __devinit snd_ensoniq_pcm(struct ensoniq * ensoniq, int device,
- struct snd_pcm ** rpcm)
+static int snd_ensoniq_pcm(struct ensoniq *ensoniq, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1310,8 +1310,8 @@ static int __devinit snd_ensoniq_pcm(struct ensoniq * ensoniq, int device,
return 0;
}
-static int __devinit snd_ensoniq_pcm2(struct ensoniq * ensoniq, int device,
- struct snd_pcm ** rpcm)
+static int snd_ensoniq_pcm2(struct ensoniq *ensoniq, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1484,7 +1484,7 @@ static int snd_es1371_spdif_put(struct snd_kcontrol *kcontrol,
/* spdif controls */
-static struct snd_kcontrol_new snd_es1371_mixer_spdif[] __devinitdata = {
+static struct snd_kcontrol_new snd_es1371_mixer_spdif[] = {
ES1371_SPDIF(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH)),
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1546,7 +1546,7 @@ static int snd_es1373_rear_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ens1373_rear __devinitdata =
+static struct snd_kcontrol_new snd_ens1373_rear =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "AC97 2ch->4ch Copy Switch",
@@ -1591,7 +1591,7 @@ static int snd_es1373_line_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_ens1373_line __devinitdata =
+static struct snd_kcontrol_new snd_ens1373_line =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line In->Rear Out Switch",
@@ -1625,7 +1625,7 @@ static int es1371_quirk_lookup(struct ensoniq *ensoniq,
return 0;
}
-static struct es1371_quirk es1371_spdif_present[] __devinitdata = {
+static struct es1371_quirk es1371_spdif_present[] = {
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_C },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_D },
{ .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_E },
@@ -1634,14 +1634,14 @@ static struct es1371_quirk es1371_spdif_present[] __devinitdata = {
{ .vid = PCI_ANY_ID, .did = PCI_ANY_ID }
};
-static struct snd_pci_quirk ens1373_line_quirk[] __devinitdata = {
+static struct snd_pci_quirk ens1373_line_quirk[] = {
SND_PCI_QUIRK_ID(0x1274, 0x2000), /* GA-7DXR */
SND_PCI_QUIRK_ID(0x1458, 0xa000), /* GA-8IEXP */
{ } /* end */
};
-static int __devinit snd_ensoniq_1371_mixer(struct ensoniq *ensoniq,
- int has_spdif, int has_line)
+static int snd_ensoniq_1371_mixer(struct ensoniq *ensoniq,
+ int has_spdif, int has_line)
{
struct snd_card *card = ensoniq->card;
struct snd_ac97_bus *pbus;
@@ -1749,7 +1749,7 @@ static int snd_ensoniq_control_put(struct snd_kcontrol *kcontrol,
* ENS1370 mixer
*/
-static struct snd_kcontrol_new snd_es1370_controls[2] __devinitdata = {
+static struct snd_kcontrol_new snd_es1370_controls[2] = {
ENSONIQ_CONTROL("PCM 0 Output also on Line-In Jack", ES_1370_XCTL0),
ENSONIQ_CONTROL("Mic +5V bias", ES_1370_XCTL1)
};
@@ -1762,7 +1762,7 @@ static void snd_ensoniq_mixer_free_ak4531(struct snd_ak4531 *ak4531)
ensoniq->u.es1370.ak4531 = NULL;
}
-static int __devinit snd_ensoniq_1370_mixer(struct ensoniq * ensoniq)
+static int snd_ensoniq_1370_mixer(struct ensoniq *ensoniq)
{
struct snd_card *card = ensoniq->card;
struct snd_ak4531 ak4531;
@@ -1796,7 +1796,7 @@ static int __devinit snd_ensoniq_1370_mixer(struct ensoniq * ensoniq)
#ifdef SUPPORT_JOYSTICK
#ifdef CHIP1371
-static int __devinit snd_ensoniq_get_joystick_port(int dev)
+static int snd_ensoniq_get_joystick_port(int dev)
{
switch (joystick_port[dev]) {
case 0: /* disabled */
@@ -1819,7 +1819,7 @@ static inline int snd_ensoniq_get_joystick_port(int dev)
}
#endif
-static int __devinit snd_ensoniq_create_gameport(struct ensoniq *ensoniq, int dev)
+static int snd_ensoniq_create_gameport(struct ensoniq *ensoniq, int dev)
{
struct gameport *gp;
int io_port;
@@ -1913,7 +1913,7 @@ static void snd_ensoniq_proc_read(struct snd_info_entry *entry,
#endif
}
-static void __devinit snd_ensoniq_proc_init(struct ensoniq * ensoniq)
+static void snd_ensoniq_proc_init(struct ensoniq *ensoniq)
{
struct snd_info_entry *entry;
@@ -1960,7 +1960,7 @@ static int snd_ensoniq_dev_free(struct snd_device *device)
}
#ifdef CHIP1371
-static struct snd_pci_quirk es1371_amplifier_hack[] __devinitdata = {
+static struct snd_pci_quirk es1371_amplifier_hack[] = {
SND_PCI_QUIRK_ID(0x107b, 0x2150), /* Gateway Solo 2150 */
SND_PCI_QUIRK_ID(0x13bd, 0x100c), /* EV1938 on Mebius PC-MJ100V */
SND_PCI_QUIRK_ID(0x1102, 0x5938), /* Targa Xtender300 */
@@ -2106,9 +2106,9 @@ static SIMPLE_DEV_PM_OPS(snd_ensoniq_pm, snd_ensoniq_suspend, snd_ensoniq_resume
#define SND_ENSONIQ_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
-static int __devinit snd_ensoniq_create(struct snd_card *card,
- struct pci_dev *pci,
- struct ensoniq ** rensoniq)
+static int snd_ensoniq_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct ensoniq **rensoniq)
{
struct ensoniq *ensoniq;
int err;
@@ -2361,8 +2361,8 @@ static struct snd_rawmidi_ops snd_ensoniq_midi_input =
.trigger = snd_ensoniq_midi_input_trigger,
};
-static int __devinit snd_ensoniq_midi(struct ensoniq * ensoniq, int device,
- struct snd_rawmidi **rrawmidi)
+static int snd_ensoniq_midi(struct ensoniq *ensoniq, int device,
+ struct snd_rawmidi **rrawmidi)
{
struct snd_rawmidi *rmidi;
int err;
@@ -2422,8 +2422,8 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit snd_audiopci_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_audiopci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2494,7 +2494,7 @@ static int __devinit snd_audiopci_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_audiopci_remove(struct pci_dev *pci)
+static void snd_audiopci_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2504,7 +2504,7 @@ static struct pci_driver ens137x_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_audiopci_ids,
.probe = snd_audiopci_probe,
- .remove = __devexit_p(snd_audiopci_remove),
+ .remove = snd_audiopci_remove,
.driver = {
.pm = SND_ENSONIQ_PM_OPS,
},
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 394c5d41353..8423403954a 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1027,7 +1027,7 @@ static struct snd_pcm_ops snd_es1938_capture_ops = {
.copy = snd_es1938_capture_copy,
};
-static int __devinit snd_es1938_new_pcm(struct es1938 *chip, int device)
+static int snd_es1938_new_pcm(struct es1938 *chip, int device)
{
struct snd_pcm *pcm;
int err;
@@ -1539,7 +1539,7 @@ static SIMPLE_DEV_PM_OPS(es1938_pm, es1938_suspend, es1938_resume);
#endif /* CONFIG_PM_SLEEP */
#ifdef SUPPORT_JOYSTICK
-static int __devinit snd_es1938_create_gameport(struct es1938 *chip)
+static int snd_es1938_create_gameport(struct es1938 *chip)
{
struct gameport *gp;
@@ -1594,9 +1594,9 @@ static int snd_es1938_dev_free(struct snd_device *device)
return snd_es1938_free(chip);
}
-static int __devinit snd_es1938_create(struct snd_card *card,
- struct pci_dev * pci,
- struct es1938 ** rchip)
+static int snd_es1938_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct es1938 **rchip)
{
struct es1938 *chip;
int err;
@@ -1754,7 +1754,7 @@ static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id)
#define ES1938_DMA_SIZE 64
-static int __devinit snd_es1938_mixer(struct es1938 *chip)
+static int snd_es1938_mixer(struct es1938 *chip)
{
struct snd_card *card;
unsigned int idx;
@@ -1792,8 +1792,8 @@ static int __devinit snd_es1938_mixer(struct es1938 *chip)
}
-static int __devinit snd_es1938_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_es1938_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1878,7 +1878,7 @@ static int __devinit snd_es1938_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_es1938_remove(struct pci_dev *pci)
+static void snd_es1938_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1888,7 +1888,7 @@ static struct pci_driver es1938_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_es1938_ids,
.probe = snd_es1938_probe,
- .remove = __devexit_p(snd_es1938_remove),
+ .remove = snd_es1938_remove,
.driver = {
.pm = ES1938_PM_OPS,
},
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 7266020c16c..a1f32b5ae0d 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -1429,7 +1429,7 @@ static void snd_es1968_free_dmabuf(struct es1968 *chip)
}
}
-static int __devinit
+static int
snd_es1968_init_dmabuf(struct es1968 *chip)
{
int err;
@@ -1704,7 +1704,7 @@ static struct snd_pcm_ops snd_es1968_capture_ops = {
*/
#define CLOCK_MEASURE_BUFSIZE 16768 /* enough large for a single shot */
-static void __devinit es1968_measure_clock(struct es1968 *chip)
+static void es1968_measure_clock(struct es1968 *chip)
{
int i, apu;
unsigned int pa, offset, t;
@@ -1806,7 +1806,7 @@ static void snd_es1968_pcm_free(struct snd_pcm *pcm)
esm->pcm = NULL;
}
-static int __devinit
+static int
snd_es1968_pcm(struct es1968 *chip, int device)
{
struct snd_pcm *pcm;
@@ -2016,7 +2016,7 @@ static irqreturn_t snd_es1968_interrupt(int irq, void *dev_id)
* Mixer stuff
*/
-static int __devinit
+static int
snd_es1968_mixer(struct es1968 *chip)
{
struct snd_ac97_bus *pbus;
@@ -2291,7 +2291,7 @@ static void snd_es1968_chip_init(struct es1968 *chip)
outb(0x88, iobase+0x1f);
/* it appears some maestros (dell 7500) only work if these are set,
- regardless of wether we use the assp or not. */
+ regardless of whether we use the assp or not. */
outb(0, iobase + ASSP_CONTROL_B);
outb(3, iobase + ASSP_CONTROL_A); /* M: Reserved bits... */
@@ -2465,7 +2465,7 @@ static SIMPLE_DEV_PM_OPS(es1968_pm, es1968_suspend, es1968_resume);
#ifdef SUPPORT_JOYSTICK
#define JOYSTICK_ADDR 0x200
-static int __devinit snd_es1968_create_gameport(struct es1968 *chip, int dev)
+static int snd_es1968_create_gameport(struct es1968 *chip, int dev)
{
struct gameport *gp;
struct resource *r;
@@ -2516,7 +2516,7 @@ static inline void snd_es1968_free_gameport(struct es1968 *chip) { }
#endif
#ifdef CONFIG_SND_ES1968_INPUT
-static int __devinit snd_es1968_input_register(struct es1968 *chip)
+static int snd_es1968_input_register(struct es1968 *chip)
{
struct input_dev *input_dev;
int err;
@@ -2653,7 +2653,7 @@ struct ess_device_list {
unsigned short vendor; /* subsystem vendor id */
};
-static struct ess_device_list pm_whitelist[] __devinitdata = {
+static struct ess_device_list pm_whitelist[] = {
{ TYPE_MAESTRO2E, 0x0e11 }, /* Compaq Armada */
{ TYPE_MAESTRO2E, 0x1028 },
{ TYPE_MAESTRO2E, 0x103c },
@@ -2664,19 +2664,19 @@ static struct ess_device_list pm_whitelist[] __devinitdata = {
{ TYPE_MAESTRO2, 0x125d }, /* a PCI card, e.g. SF64-PCE2 */
};
-static struct ess_device_list mpu_blacklist[] __devinitdata = {
+static struct ess_device_list mpu_blacklist[] = {
{ TYPE_MAESTRO2, 0x125d },
};
-static int __devinit snd_es1968_create(struct snd_card *card,
- struct pci_dev *pci,
- int total_bufsize,
- int play_streams,
- int capt_streams,
- int chip_type,
- int do_pm,
- int radio_nr,
- struct es1968 **chip_ret)
+static int snd_es1968_create(struct snd_card *card,
+ struct pci_dev *pci,
+ int total_bufsize,
+ int play_streams,
+ int capt_streams,
+ int chip_type,
+ int do_pm,
+ int radio_nr,
+ struct es1968 **chip_ret)
{
static struct snd_device_ops ops = {
.dev_free = snd_es1968_dev_free,
@@ -2795,8 +2795,8 @@ static int __devinit snd_es1968_create(struct snd_card *card,
/*
*/
-static int __devinit snd_es1968_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_es1968_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2906,7 +2906,7 @@ static int __devinit snd_es1968_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_es1968_remove(struct pci_dev *pci)
+static void snd_es1968_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2916,7 +2916,7 @@ static struct pci_driver es1968_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_es1968_ids,
.probe = snd_es1968_probe,
- .remove = __devexit_p(snd_es1968_remove),
+ .remove = snd_es1968_remove,
.driver = {
.pm = ES1968_PM_OPS,
},
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index c5806f89be1..4f07fda5adf 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -689,7 +689,7 @@ static struct snd_pcm_ops snd_fm801_capture_ops = {
.pointer = snd_fm801_capture_pointer,
};
-static int __devinit snd_fm801_pcm(struct fm801 *chip, int device, struct snd_pcm ** rpcm)
+static int snd_fm801_pcm(struct fm801 *chip, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -984,7 +984,7 @@ static const DECLARE_TLV_DB_SCALE(db_scale_dsp, -3450, 150, 0);
#define FM801_CONTROLS ARRAY_SIZE(snd_fm801_controls)
-static struct snd_kcontrol_new snd_fm801_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_fm801_controls[] = {
FM801_DOUBLE_TLV("Wave Playback Volume", FM801_PCM_VOL, 0, 8, 31, 1,
db_scale_dsp),
FM801_SINGLE("Wave Playback Switch", FM801_PCM_VOL, 15, 1, 1),
@@ -1005,7 +1005,7 @@ FM801_SINGLE("FM Playback Switch", FM801_FM_VOL, 15, 1, 1),
#define FM801_CONTROLS_MULTI ARRAY_SIZE(snd_fm801_controls_multi)
-static struct snd_kcontrol_new snd_fm801_controls_multi[] __devinitdata = {
+static struct snd_kcontrol_new snd_fm801_controls_multi[] = {
FM801_SINGLE("AC97 2ch->4ch Copy Switch", FM801_CODEC_CTRL, 7, 1, 0),
FM801_SINGLE("AC97 18-bit Switch", FM801_CODEC_CTRL, 10, 1, 0),
FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), FM801_I2S_MODE, 8, 1, 0),
@@ -1030,7 +1030,7 @@ static void snd_fm801_mixer_free_ac97(struct snd_ac97 *ac97)
}
}
-static int __devinit snd_fm801_mixer(struct fm801 *chip)
+static int snd_fm801_mixer(struct fm801 *chip)
{
struct snd_ac97_template ac97;
unsigned int i;
@@ -1191,11 +1191,11 @@ static int snd_fm801_dev_free(struct snd_device *device)
return snd_fm801_free(chip);
}
-static int __devinit snd_fm801_create(struct snd_card *card,
- struct pci_dev * pci,
- int tea575x_tuner,
- int radio_nr,
- struct fm801 ** rchip)
+static int snd_fm801_create(struct snd_card *card,
+ struct pci_dev *pci,
+ int tea575x_tuner,
+ int radio_nr,
+ struct fm801 **rchip)
{
struct fm801 *chip;
int err;
@@ -1296,8 +1296,8 @@ static int __devinit snd_fm801_create(struct snd_card *card,
return 0;
}
-static int __devinit snd_card_fm801_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_card_fm801_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1367,7 +1367,7 @@ static int __devinit snd_card_fm801_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_card_fm801_remove(struct pci_dev *pci)
+static void snd_card_fm801_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1439,7 +1439,7 @@ static struct pci_driver fm801_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_fm801_ids,
.probe = snd_card_fm801_probe,
- .remove = __devexit_p(snd_card_fm801_remove),
+ .remove = snd_card_fm801_remove,
.driver = {
.pm = SND_FM801_PM_OPS,
},
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 7105c3de1bc..6eeb8897624 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -37,8 +37,8 @@ config SND_HDA_HWDEP
with codecs for debugging purposes.
config SND_HDA_RECONFIG
- bool "Allow dynamic codec reconfiguration (EXPERIMENTAL)"
- depends on SND_HDA_HWDEP && EXPERIMENTAL
+ bool "Allow dynamic codec reconfiguration"
+ depends on SND_HDA_HWDEP
help
Say Y here to enable the HD-audio codec re-configuration feature.
This adds the sysfs interfaces to allow user to clear the whole
@@ -72,7 +72,6 @@ config SND_HDA_INPUT_JACK
config SND_HDA_PATCH_LOADER
bool "Support initialization patch loading for HD-audio"
- depends on EXPERIMENTAL
select FW_LOADER
select SND_HDA_HWDEP
select SND_HDA_RECONFIG
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index bd4149f1aaf..24a251497a1 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -8,6 +8,7 @@ snd-hda-codec-$(CONFIG_SND_HDA_INPUT_BEEP) += hda_beep.o
# for trace-points
CFLAGS_hda_codec.o := -I$(src)
+CFLAGS_hda_intel.o := -I$(src)
snd-hda-codec-realtek-objs := patch_realtek.o
snd-hda-codec-cmedia-objs := patch_cmedia.o
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 4ec6dc88b7f..7da883a464e 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/sort.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -30,29 +31,30 @@ static int is_in_nid_list(hda_nid_t nid, const hda_nid_t *list)
return 0;
}
+/* a pair of input pin and its sequence */
+struct auto_out_pin {
+ hda_nid_t pin;
+ short seq;
+};
+
+static int compare_seq(const void *ap, const void *bp)
+{
+ const struct auto_out_pin *a = ap;
+ const struct auto_out_pin *b = bp;
+ return (int)(a->seq - b->seq);
+}
/*
* Sort an associated group of pins according to their sequence numbers.
+ * then store it to a pin array.
*/
-static void sort_pins_by_sequence(hda_nid_t *pins, short *sequences,
+static void sort_pins_by_sequence(hda_nid_t *pins, struct auto_out_pin *list,
int num_pins)
{
- int i, j;
- short seq;
- hda_nid_t nid;
-
- for (i = 0; i < num_pins; i++) {
- for (j = i + 1; j < num_pins; j++) {
- if (sequences[i] > sequences[j]) {
- seq = sequences[i];
- sequences[i] = sequences[j];
- sequences[j] = seq;
- nid = pins[i];
- pins[i] = pins[j];
- pins[j] = nid;
- }
- }
- }
+ int i;
+ sort(list, num_pins, sizeof(list[0]), compare_seq, NULL);
+ for (i = 0; i < num_pins; i++)
+ pins[i] = list[i].pin;
}
@@ -67,21 +69,11 @@ static void add_auto_cfg_input_pin(struct auto_pin_cfg *cfg, hda_nid_t nid,
}
}
-/* sort inputs in the order of AUTO_PIN_* type */
-static void sort_autocfg_input_pins(struct auto_pin_cfg *cfg)
+static int compare_input_type(const void *ap, const void *bp)
{
- int i, j;
-
- for (i = 0; i < cfg->num_inputs; i++) {
- for (j = i + 1; j < cfg->num_inputs; j++) {
- if (cfg->inputs[i].type > cfg->inputs[j].type) {
- struct auto_pin_cfg_item tmp;
- tmp = cfg->inputs[i];
- cfg->inputs[i] = cfg->inputs[j];
- cfg->inputs[j] = tmp;
- }
- }
- }
+ const struct auto_pin_cfg_item *a = ap;
+ const struct auto_pin_cfg_item *b = bp;
+ return (int)(a->type - b->type);
}
/* Reorder the surround channels
@@ -129,16 +121,16 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
{
hda_nid_t nid, end_nid;
short seq, assoc_line_out;
- short sequences_line_out[ARRAY_SIZE(cfg->line_out_pins)];
- short sequences_speaker[ARRAY_SIZE(cfg->speaker_pins)];
- short sequences_hp[ARRAY_SIZE(cfg->hp_pins)];
+ struct auto_out_pin line_out[ARRAY_SIZE(cfg->line_out_pins)];
+ struct auto_out_pin speaker_out[ARRAY_SIZE(cfg->speaker_pins)];
+ struct auto_out_pin hp_out[ARRAY_SIZE(cfg->hp_pins)];
int i;
memset(cfg, 0, sizeof(*cfg));
- memset(sequences_line_out, 0, sizeof(sequences_line_out));
- memset(sequences_speaker, 0, sizeof(sequences_speaker));
- memset(sequences_hp, 0, sizeof(sequences_hp));
+ memset(line_out, 0, sizeof(line_out));
+ memset(speaker_out, 0, sizeof(speaker_out));
+ memset(hp_out, 0, sizeof(hp_out));
assoc_line_out = 0;
end_nid = codec->start_nid + codec->num_nodes;
@@ -184,8 +176,8 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
continue;
if (cfg->line_outs >= ARRAY_SIZE(cfg->line_out_pins))
continue;
- cfg->line_out_pins[cfg->line_outs] = nid;
- sequences_line_out[cfg->line_outs] = seq;
+ line_out[cfg->line_outs].pin = nid;
+ line_out[cfg->line_outs].seq = seq;
cfg->line_outs++;
break;
case AC_JACK_SPEAKER:
@@ -193,8 +185,8 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
assoc = get_defcfg_association(def_conf);
if (cfg->speaker_outs >= ARRAY_SIZE(cfg->speaker_pins))
continue;
- cfg->speaker_pins[cfg->speaker_outs] = nid;
- sequences_speaker[cfg->speaker_outs] = (assoc << 4) | seq;
+ speaker_out[cfg->speaker_outs].pin = nid;
+ speaker_out[cfg->speaker_outs].seq = (assoc << 4) | seq;
cfg->speaker_outs++;
break;
case AC_JACK_HP_OUT:
@@ -202,8 +194,8 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
assoc = get_defcfg_association(def_conf);
if (cfg->hp_outs >= ARRAY_SIZE(cfg->hp_pins))
continue;
- cfg->hp_pins[cfg->hp_outs] = nid;
- sequences_hp[cfg->hp_outs] = (assoc << 4) | seq;
+ hp_out[cfg->hp_outs].pin = nid;
+ hp_out[cfg->hp_outs].seq = (assoc << 4) | seq;
cfg->hp_outs++;
break;
case AC_JACK_MIC_IN:
@@ -248,34 +240,28 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
int i = 0;
while (i < cfg->hp_outs) {
/* The real HPs should have the sequence 0x0f */
- if ((sequences_hp[i] & 0x0f) == 0x0f) {
+ if ((hp_out[i].seq & 0x0f) == 0x0f) {
i++;
continue;
}
/* Move it to the line-out table */
- cfg->line_out_pins[cfg->line_outs] = cfg->hp_pins[i];
- sequences_line_out[cfg->line_outs] = sequences_hp[i];
- cfg->line_outs++;
+ line_out[cfg->line_outs++] = hp_out[i];
cfg->hp_outs--;
- memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
- sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
- memmove(sequences_hp + i, sequences_hp + i + 1,
- sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
+ memmove(hp_out + i, hp_out + i + 1,
+ sizeof(hp_out[0]) * (cfg->hp_outs - i));
}
- memset(cfg->hp_pins + cfg->hp_outs, 0,
- sizeof(hda_nid_t) * (AUTO_CFG_MAX_OUTS - cfg->hp_outs));
+ memset(hp_out + cfg->hp_outs, 0,
+ sizeof(hp_out[0]) * (AUTO_CFG_MAX_OUTS - cfg->hp_outs));
if (!cfg->hp_outs)
cfg->line_out_type = AUTO_PIN_HP_OUT;
}
/* sort by sequence */
- sort_pins_by_sequence(cfg->line_out_pins, sequences_line_out,
- cfg->line_outs);
- sort_pins_by_sequence(cfg->speaker_pins, sequences_speaker,
+ sort_pins_by_sequence(cfg->line_out_pins, line_out, cfg->line_outs);
+ sort_pins_by_sequence(cfg->speaker_pins, speaker_out,
cfg->speaker_outs);
- sort_pins_by_sequence(cfg->hp_pins, sequences_hp,
- cfg->hp_outs);
+ sort_pins_by_sequence(cfg->hp_pins, hp_out, cfg->hp_outs);
/*
* FIX-UP: if no line-outs are detected, try to use speaker or HP pin
@@ -304,7 +290,9 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
reorder_outputs(cfg->hp_outs, cfg->hp_pins);
reorder_outputs(cfg->speaker_outs, cfg->speaker_pins);
- sort_autocfg_input_pins(cfg);
+ /* sort inputs in the order of AUTO_PIN_* type */
+ sort(cfg->inputs, cfg->num_inputs, sizeof(cfg->inputs[0]),
+ compare_input_type, NULL);
/*
* debug prints of the parsed results
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index d010de12335..8353c77536a 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -738,7 +738,7 @@ static int snd_hda_bus_dev_register(struct snd_device *device)
*
* Returns 0 if successful, or a negative error code.
*/
-int /*__devinit*/ snd_hda_bus_new(struct snd_card *card,
+int snd_hda_bus_new(struct snd_card *card,
const struct hda_bus_template *temp,
struct hda_bus **busp)
{
@@ -908,7 +908,7 @@ static int get_codec_name(struct hda_codec *codec)
/*
* look for an AFG and MFG nodes
*/
-static void /*__devinit*/ setup_fg_nodes(struct hda_codec *codec)
+static void setup_fg_nodes(struct hda_codec *codec)
{
int i, total_nodes, function_id;
hda_nid_t nid;
@@ -993,19 +993,6 @@ static struct hda_pincfg *look_up_pincfg(struct hda_codec *codec,
return NULL;
}
-/* write a config value for the given NID */
-static void set_pincfg(struct hda_codec *codec, hda_nid_t nid,
- unsigned int cfg)
-{
- int i;
- for (i = 0; i < 4; i++) {
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CONFIG_DEFAULT_BYTES_0 + i,
- cfg & 0xff);
- cfg >>= 8;
- }
-}
-
/* set the current pin config value for the given NID.
* the value is cached, and read via snd_hda_codec_get_pincfg()
*/
@@ -1013,12 +1000,10 @@ int snd_hda_add_pincfg(struct hda_codec *codec, struct snd_array *list,
hda_nid_t nid, unsigned int cfg)
{
struct hda_pincfg *pin;
- unsigned int oldcfg;
if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
return -EINVAL;
- oldcfg = snd_hda_codec_get_pincfg(codec, nid);
pin = look_up_pincfg(codec, list, nid);
if (!pin) {
pin = snd_array_new(list);
@@ -1027,13 +1012,6 @@ int snd_hda_add_pincfg(struct hda_codec *codec, struct snd_array *list,
pin->nid = nid;
}
pin->cfg = cfg;
-
- /* change only when needed; e.g. if the pincfg is already present
- * in user_pins[], don't write it
- */
- cfg = snd_hda_codec_get_pincfg(codec, nid);
- if (oldcfg != cfg)
- set_pincfg(codec, nid, cfg);
return 0;
}
@@ -1082,17 +1060,6 @@ unsigned int snd_hda_codec_get_pincfg(struct hda_codec *codec, hda_nid_t nid)
}
EXPORT_SYMBOL_HDA(snd_hda_codec_get_pincfg);
-/* restore all current pin configs */
-static void restore_pincfgs(struct hda_codec *codec)
-{
- int i;
- for (i = 0; i < codec->init_pins.used; i++) {
- struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
- set_pincfg(codec, pin->nid,
- snd_hda_codec_get_pincfg(codec, pin->nid));
- }
-}
-
/**
* snd_hda_shutup_pins - Shut up all pins
* @codec: the HDA codec
@@ -1137,21 +1104,30 @@ static void restore_shutup_pins(struct hda_codec *codec)
}
#endif
+static void hda_jackpoll_work(struct work_struct *work)
+{
+ struct hda_codec *codec =
+ container_of(work, struct hda_codec, jackpoll_work.work);
+ if (!codec->jackpoll_interval)
+ return;
+
+ snd_hda_jack_set_dirty_all(codec);
+ snd_hda_jack_poll_all(codec);
+ queue_delayed_work(codec->bus->workq, &codec->jackpoll_work,
+ codec->jackpoll_interval);
+}
+
static void init_hda_cache(struct hda_cache_rec *cache,
unsigned int record_size);
static void free_hda_cache(struct hda_cache_rec *cache);
-/* restore the initial pin cfgs and release all pincfg lists */
-static void restore_init_pincfgs(struct hda_codec *codec)
+/* release all pincfg lists */
+static void free_init_pincfgs(struct hda_codec *codec)
{
- /* first free driver_pins and user_pins, then call restore_pincfg
- * so that only the values in init_pins are restored
- */
snd_array_free(&codec->driver_pins);
#ifdef CONFIG_SND_HDA_HWDEP
snd_array_free(&codec->user_pins);
#endif
- restore_pincfgs(codec);
snd_array_free(&codec->init_pins);
}
@@ -1192,8 +1168,9 @@ static void snd_hda_codec_free(struct hda_codec *codec)
{
if (!codec)
return;
+ cancel_delayed_work_sync(&codec->jackpoll_work);
snd_hda_jack_tbl_clear(codec);
- restore_init_pincfgs(codec);
+ free_init_pincfgs(codec);
#ifdef CONFIG_PM
cancel_delayed_work(&codec->power_work);
flush_workqueue(codec->bus->workq);
@@ -1235,7 +1212,7 @@ static unsigned int hda_set_power_state(struct hda_codec *codec,
*
* Returns 0 if successful, or a negative error code.
*/
-int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
+int snd_hda_codec_new(struct hda_bus *bus,
unsigned int codec_addr,
struct hda_codec **codecp)
{
@@ -1275,6 +1252,8 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
snd_array_init(&codec->cvt_setups, sizeof(struct hda_cvt_setup), 8);
snd_array_init(&codec->conn_lists, sizeof(hda_nid_t), 64);
snd_array_init(&codec->spdif_out, sizeof(struct hda_spdif_out), 16);
+ snd_array_init(&codec->jacktbl, sizeof(struct hda_jack_tbl), 16);
+ INIT_DELAYED_WORK(&codec->jackpoll_work, hda_jackpoll_work);
#ifdef CONFIG_PM
spin_lock_init(&codec->power_lock);
@@ -1588,7 +1567,7 @@ static void hda_cleanup_all_streams(struct hda_codec *codec)
#define INFO_AMP_VOL(ch) (1 << (1 + (ch)))
/* initialize the hash table */
-static void /*__devinit*/ init_hda_cache(struct hda_cache_rec *cache,
+static void init_hda_cache(struct hda_cache_rec *cache,
unsigned int record_size)
{
memset(cache, 0, sizeof(*cache));
@@ -2153,12 +2132,12 @@ EXPORT_SYMBOL_HDA(snd_hda_set_vmaster_tlv);
/* find a mixer control element with the given name */
static struct snd_kcontrol *
-_snd_hda_find_mixer_ctl(struct hda_codec *codec,
- const char *name, int idx)
+find_mixer_ctl(struct hda_codec *codec, const char *name, int dev, int idx)
{
struct snd_ctl_elem_id id;
memset(&id, 0, sizeof(id));
id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ id.device = dev;
id.index = idx;
if (snd_BUG_ON(strlen(name) >= sizeof(id.name)))
return NULL;
@@ -2176,15 +2155,16 @@ _snd_hda_find_mixer_ctl(struct hda_codec *codec,
struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec,
const char *name)
{
- return _snd_hda_find_mixer_ctl(codec, name, 0);
+ return find_mixer_ctl(codec, name, 0, 0);
}
EXPORT_SYMBOL_HDA(snd_hda_find_mixer_ctl);
-static int find_empty_mixer_ctl_idx(struct hda_codec *codec, const char *name)
+static int find_empty_mixer_ctl_idx(struct hda_codec *codec, const char *name,
+ int dev)
{
int idx;
for (idx = 0; idx < 16; idx++) { /* 16 ctlrs should be large enough */
- if (!_snd_hda_find_mixer_ctl(codec, name, idx))
+ if (!find_mixer_ctl(codec, name, dev, idx))
return idx;
}
return -EBUSY;
@@ -2351,7 +2331,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
return -EBUSY;
/* OK, let it free */
-
+ cancel_delayed_work_sync(&codec->jackpoll_work);
#ifdef CONFIG_PM
cancel_delayed_work_sync(&codec->power_work);
codec->power_on = 0;
@@ -2380,7 +2360,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
/* free only driver_pins so that init_pins + user_pins are restored */
snd_array_free(&codec->driver_pins);
- restore_pincfgs(codec);
snd_array_free(&codec->cvt_setups);
snd_array_free(&codec->spdif_out);
codec->num_pcms = 0;
@@ -3135,26 +3114,48 @@ static struct snd_kcontrol_new dig_mixes[] = {
};
/**
- * snd_hda_create_spdif_out_ctls - create Output SPDIF-related controls
+ * snd_hda_create_dig_out_ctls - create Output SPDIF-related controls
* @codec: the HDA codec
- * @nid: audio out widget NID
- *
- * Creates controls related with the SPDIF output.
- * Called from each patch supporting the SPDIF out.
+ * @associated_nid: NID that new ctls associated with
+ * @cvt_nid: converter NID
+ * @type: HDA_PCM_TYPE_*
+ * Creates controls related with the digital output.
+ * Called from each patch supporting the digital out.
*
* Returns 0 if successful, or a negative error code.
*/
-int snd_hda_create_spdif_out_ctls(struct hda_codec *codec,
- hda_nid_t associated_nid,
- hda_nid_t cvt_nid)
+int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
+ hda_nid_t associated_nid,
+ hda_nid_t cvt_nid,
+ int type)
{
int err;
struct snd_kcontrol *kctl;
struct snd_kcontrol_new *dig_mix;
- int idx;
+ int idx, dev = 0;
+ const int spdif_pcm_dev = 1;
struct hda_spdif_out *spdif;
- idx = find_empty_mixer_ctl_idx(codec, "IEC958 Playback Switch");
+ if (codec->primary_dig_out_type == HDA_PCM_TYPE_HDMI &&
+ type == HDA_PCM_TYPE_SPDIF) {
+ dev = spdif_pcm_dev;
+ } else if (codec->primary_dig_out_type == HDA_PCM_TYPE_SPDIF &&
+ type == HDA_PCM_TYPE_HDMI) {
+ for (idx = 0; idx < codec->spdif_out.used; idx++) {
+ spdif = snd_array_elem(&codec->spdif_out, idx);
+ for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) {
+ kctl = find_mixer_ctl(codec, dig_mix->name, 0, idx);
+ if (!kctl)
+ break;
+ kctl->id.device = spdif_pcm_dev;
+ }
+ }
+ codec->primary_dig_out_type = HDA_PCM_TYPE_HDMI;
+ }
+ if (!codec->primary_dig_out_type)
+ codec->primary_dig_out_type = type;
+
+ idx = find_empty_mixer_ctl_idx(codec, "IEC958 Playback Switch", dev);
if (idx < 0) {
printk(KERN_ERR "hda_codec: too many IEC958 outputs\n");
return -EBUSY;
@@ -3164,6 +3165,7 @@ int snd_hda_create_spdif_out_ctls(struct hda_codec *codec,
kctl = snd_ctl_new1(dig_mix, codec);
if (!kctl)
return -ENOMEM;
+ kctl->id.device = dev;
kctl->id.index = idx;
kctl->private_value = codec->spdif_out.used - 1;
err = snd_hda_ctl_add(codec, associated_nid, kctl);
@@ -3176,7 +3178,7 @@ int snd_hda_create_spdif_out_ctls(struct hda_codec *codec,
spdif->status = convert_to_spdif_status(spdif->ctls);
return 0;
}
-EXPORT_SYMBOL_HDA(snd_hda_create_spdif_out_ctls);
+EXPORT_SYMBOL_HDA(snd_hda_create_dig_out_ctls);
/* get the hda_spdif_out entry from the given NID
* call within spdif_mutex lock
@@ -3351,7 +3353,7 @@ int snd_hda_create_spdif_in_ctls(struct hda_codec *codec, hda_nid_t nid)
struct snd_kcontrol_new *dig_mix;
int idx;
- idx = find_empty_mixer_ctl_idx(codec, "IEC958 Capture Switch");
+ idx = find_empty_mixer_ctl_idx(codec, "IEC958 Capture Switch", 0);
if (idx < 0) {
printk(KERN_ERR "hda_codec: too many IEC958 inputs\n");
return -EBUSY;
@@ -3650,10 +3652,8 @@ static void hda_call_codec_resume(struct hda_codec *codec)
*/
hda_keep_power_on(codec);
hda_set_power_state(codec, AC_PWRST_D0);
- restore_pincfgs(codec); /* restore all current pin configs */
restore_shutup_pins(codec);
hda_exec_init_verbs(codec);
- snd_hda_jack_set_dirty_all(codec);
if (codec->patch_ops.resume)
codec->patch_ops.resume(codec);
else {
@@ -3662,7 +3662,13 @@ static void hda_call_codec_resume(struct hda_codec *codec)
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
}
- snd_hda_jack_report_sync(codec);
+
+ if (codec->jackpoll_interval)
+ hda_jackpoll_work(&codec->jackpoll_work.work);
+ else {
+ snd_hda_jack_set_dirty_all(codec);
+ snd_hda_jack_report_sync(codec);
+ }
codec->in_pm = 0;
snd_hda_power_down(codec); /* flag down before returning */
@@ -3678,7 +3684,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
*
* Returns 0 if successful, otherwise a negative error code.
*/
-int /*__devinit*/ snd_hda_build_controls(struct hda_bus *bus)
+int snd_hda_build_controls(struct hda_bus *bus)
{
struct hda_codec *codec;
@@ -3712,13 +3718,14 @@ static int add_std_chmaps(struct hda_codec *codec)
struct hda_pcm_stream *hinfo =
&codec->pcm_info[i].stream[str];
struct snd_pcm_chmap *chmap;
+ const struct snd_pcm_chmap_elem *elem;
if (codec->pcm_info[i].own_chmap)
continue;
if (!pcm || !hinfo->substreams)
continue;
- err = snd_pcm_add_chmap_ctls(pcm, str,
- snd_pcm_std_chmaps,
+ elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
+ err = snd_pcm_add_chmap_ctls(pcm, str, elem,
hinfo->channels_max,
0, &chmap);
if (err < 0)
@@ -3729,6 +3736,19 @@ static int add_std_chmaps(struct hda_codec *codec)
return 0;
}
+/* default channel maps for 2.1 speakers;
+ * since HD-audio supports only stereo, odd number channels are omitted
+ */
+const struct snd_pcm_chmap_elem snd_pcm_2_1_chmaps[] = {
+ { .channels = 2,
+ .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
+ { .channels = 4,
+ .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
+ SNDRV_CHMAP_LFE, SNDRV_CHMAP_LFE } },
+ { }
+};
+EXPORT_SYMBOL_GPL(snd_pcm_2_1_chmaps);
+
int snd_hda_codec_build_controls(struct hda_codec *codec)
{
int err = 0;
@@ -3746,7 +3766,10 @@ int snd_hda_codec_build_controls(struct hda_codec *codec)
if (err < 0)
return err;
- snd_hda_jack_report_sync(codec); /* call at the last init point */
+ if (codec->jackpoll_interval)
+ hda_jackpoll_work(&codec->jackpoll_work.work);
+ else
+ snd_hda_jack_report_sync(codec); /* call at the last init point */
return 0;
}
@@ -4458,7 +4481,7 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
addr = codec->addr;
else if (!idx && !knew->index) {
idx = find_empty_mixer_ctl_idx(codec,
- knew->name);
+ knew->name, 0);
if (idx <= 0)
return err;
} else
@@ -4771,6 +4794,34 @@ EXPORT_SYMBOL_HDA(snd_hda_input_mux_put);
/*
+ * process kcontrol info callback of a simple string enum array
+ * when @num_items is 0 or @texts is NULL, assume a boolean enum array
+ */
+int snd_hda_enum_helper_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo,
+ int num_items, const char * const *texts)
+{
+ static const char * const texts_default[] = {
+ "Disabled", "Enabled"
+ };
+
+ if (!texts || !num_items) {
+ num_items = 2;
+ texts = texts_default;
+ }
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = num_items;
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+ return 0;
+}
+EXPORT_SYMBOL_HDA(snd_hda_enum_helper_info);
+
+/*
* Multi-channel / digital-out PCM helper functions
*/
@@ -4778,10 +4829,20 @@ EXPORT_SYMBOL_HDA(snd_hda_input_mux_put);
static void setup_dig_out_stream(struct hda_codec *codec, hda_nid_t nid,
unsigned int stream_tag, unsigned int format)
{
- struct hda_spdif_out *spdif = snd_hda_spdif_out_of_nid(codec, nid);
-
- /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */
- if (codec->spdif_status_reset && (spdif->ctls & AC_DIG1_ENABLE))
+ struct hda_spdif_out *spdif;
+ unsigned int curr_fmt;
+ bool reset;
+
+ spdif = snd_hda_spdif_out_of_nid(codec, nid);
+ curr_fmt = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_STREAM_FORMAT, 0);
+ reset = codec->spdif_status_reset &&
+ (spdif->ctls & AC_DIG1_ENABLE) &&
+ curr_fmt != format;
+
+ /* turn off SPDIF if needed; otherwise the IEC958 bits won't be
+ updated */
+ if (reset)
set_dig_out_convert(codec, nid,
spdif->ctls & ~AC_DIG1_ENABLE & 0xff,
-1);
@@ -4793,7 +4854,7 @@ static void setup_dig_out_stream(struct hda_codec *codec, hda_nid_t nid,
format);
}
/* turn on again (if needed) */
- if (codec->spdif_status_reset && (spdif->ctls & AC_DIG1_ENABLE))
+ if (reset)
set_dig_out_convert(codec, nid,
spdif->ctls & 0xff, -1);
}
@@ -5137,6 +5198,7 @@ int snd_hda_suspend(struct hda_bus *bus)
struct hda_codec *codec;
list_for_each_entry(codec, &bus->codec_list, list) {
+ cancel_delayed_work_sync(&codec->jackpoll_work);
if (hda_codec_is_power_on(codec))
hda_call_codec_suspend(codec, false);
}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 4f4e545c0f4..8665540e55a 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -757,6 +757,7 @@ struct hda_pcm_stream {
u32 rates; /* supported rates */
u64 formats; /* supported formats (SNDRV_PCM_FMTBIT_) */
unsigned int maxbps; /* supported max. bit per sample */
+ const struct snd_pcm_chmap_elem *chmap; /* chmap to override */
struct hda_pcm_ops ops;
};
@@ -836,6 +837,7 @@ struct hda_codec {
struct mutex hash_mutex;
struct snd_array spdif_out;
unsigned int spdif_in_enable; /* SPDIF input enable? */
+ int primary_dig_out_type; /* primary digital out PCM type */
const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */
struct snd_array init_pins; /* initial (BIOS) pin configurations */
struct snd_array driver_pins; /* pin configs set by codec parser */
@@ -885,6 +887,8 @@ struct hda_codec {
/* jack detection */
struct snd_array jacktbl;
+ unsigned long jackpoll_interval; /* In jiffies. Zero means no poll, rely on unsol events */
+ struct delayed_work jackpoll_work;
#ifdef CONFIG_SND_HDA_INPUT_JACK
/* jack detection */
@@ -1024,6 +1028,8 @@ unsigned int snd_hda_calc_stream_format(unsigned int rate,
int snd_hda_is_supported_format(struct hda_codec *codec, hda_nid_t nid,
unsigned int format);
+extern const struct snd_pcm_chmap_elem snd_pcm_2_1_chmaps[];
+
/*
* Misc
*/
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 1af86d40eb2..a5c9411bb36 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -125,7 +125,7 @@ static void hwdep_free(struct snd_hwdep *hwdep)
clear_hwdep_elements(hwdep->private_data);
}
-int /*__devinit*/ snd_hda_create_hwdep(struct hda_codec *codec)
+int snd_hda_create_hwdep(struct hda_codec *codec)
{
char hwname[16];
struct snd_hwdep *hwdep;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index f9d870e554d..cca87277baf 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -47,6 +47,10 @@
#include <linux/reboot.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/clocksource.h>
+#include <linux/time.h>
+#include <linux/completion.h>
+
#ifdef CONFIG_X86
/* for snoop control */
#include <asm/pgtable.h>
@@ -68,6 +72,7 @@ static int position_fix[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
static int bdl_pos_adj[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
static int probe_only[SNDRV_CARDS];
+static int jackpoll_ms[SNDRV_CARDS];
static bool single_cmd;
static int enable_msi = -1;
#ifdef CONFIG_SND_HDA_PATCH_LOADER
@@ -95,6 +100,8 @@ module_param_array(probe_mask, int, NULL, 0444);
MODULE_PARM_DESC(probe_mask, "Bitmask to probe codecs (default = -1).");
module_param_array(probe_only, int, NULL, 0444);
MODULE_PARM_DESC(probe_only, "Only probing and no codec initialization.");
+module_param_array(jackpoll_ms, int, NULL, 0444);
+MODULE_PARM_DESC(jackpoll_ms, "Ms between polling for jack events (default = 0, using unsol events only)");
module_param(single_cmd, bool, 0444);
MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs "
"(for debugging only).");
@@ -185,7 +192,7 @@ MODULE_DESCRIPTION("Intel HDA driver");
#ifdef CONFIG_SND_VERBOSE_PRINTK
#define SFX /* nop */
#else
-#define SFX "hda-intel: "
+#define SFX "hda-intel "
#endif
#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
@@ -416,6 +423,9 @@ struct azx_dev {
unsigned int insufficient :1;
unsigned int wc_marked:1;
unsigned int no_period_wakeup:1;
+
+ struct timecounter azx_tc;
+ struct cyclecounter azx_cc;
};
/* CORB/RIRB */
@@ -460,6 +470,7 @@ struct azx {
/* locks */
spinlock_t reg_lock;
struct mutex open_mutex;
+ struct completion probe_wait;
/* streams (x num_streams) */
struct azx_dev *azx_dev;
@@ -518,6 +529,9 @@ struct azx {
struct list_head list;
};
+#define CREATE_TRACE_POINTS
+#include "hda_intel_trace.h"
+
/* driver types */
enum {
AZX_DRIVER_ICH,
@@ -589,15 +603,7 @@ enum {
#define use_vga_switcheroo(chip) 0
#endif
-#if defined(SUPPORT_VGA_SWITCHEROO) || defined(CONFIG_SND_HDA_PATCH_LOADER)
-#define DELAYED_INIT_MARK
-#define DELAYED_INITDATA_MARK
-#else
-#define DELAYED_INIT_MARK __devinit
-#define DELAYED_INITDATA_MARK __devinitdata
-#endif
-
-static char *driver_short_names[] DELAYED_INITDATA_MARK = {
+static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
[AZX_DRIVER_PCH] = "HDA Intel PCH",
[AZX_DRIVER_SCH] = "HDA Intel MID",
@@ -703,7 +709,7 @@ static int azx_alloc_cmd_io(struct azx *chip)
snd_dma_pci_data(chip->pci),
PAGE_SIZE, &chip->rb);
if (err < 0) {
- snd_printk(KERN_ERR SFX "cannot allocate CORB/RIRB\n");
+ snd_printk(KERN_ERR SFX "%s: cannot allocate CORB/RIRB\n", pci_name(chip->pci));
return err;
}
mark_pages_wc(chip, &chip->rb, true);
@@ -793,7 +799,12 @@ static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
spin_lock_irq(&chip->reg_lock);
/* add command to corb */
- wp = azx_readb(chip, CORBWP);
+ wp = azx_readw(chip, CORBWP);
+ if (wp == 0xffff) {
+ /* something wrong, controller likely turned to D3 */
+ spin_unlock_irq(&chip->reg_lock);
+ return -1;
+ }
wp++;
wp %= ICH6_MAX_CORB_ENTRIES;
@@ -815,7 +826,12 @@ static void azx_update_rirb(struct azx *chip)
unsigned int addr;
u32 res, res_ex;
- wp = azx_readb(chip, RIRBWP);
+ wp = azx_readw(chip, RIRBWP);
+ if (wp == 0xffff) {
+ /* something wrong, controller likely turned to D3 */
+ return;
+ }
+
if (wp == chip->rirb.wp)
return;
chip->rirb.wp = wp;
@@ -835,8 +851,9 @@ static void azx_update_rirb(struct azx *chip)
smp_wmb();
chip->rirb.cmds[addr]--;
} else
- snd_printk(KERN_ERR SFX "spurious response %#x:%#x, "
+ snd_printk(KERN_ERR SFX "%s: spurious response %#x:%#x, "
"last cmd=%#08x\n",
+ pci_name(chip->pci),
res, res_ex,
chip->last_cmd[addr]);
}
@@ -879,9 +896,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
}
if (!chip->polling_mode && chip->poll_count < 2) {
- snd_printdd(SFX "azx_get_response timeout, "
+ snd_printdd(SFX "%s: azx_get_response timeout, "
"polling the codec once: last cmd=0x%08x\n",
- chip->last_cmd[addr]);
+ pci_name(chip->pci), chip->last_cmd[addr]);
do_poll = 1;
chip->poll_count++;
goto again;
@@ -889,17 +906,17 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
if (!chip->polling_mode) {
- snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
+ snd_printk(KERN_WARNING SFX "%s: azx_get_response timeout, "
"switching to polling mode: last cmd=0x%08x\n",
- chip->last_cmd[addr]);
+ pci_name(chip->pci), chip->last_cmd[addr]);
chip->polling_mode = 1;
goto again;
}
if (chip->msi) {
- snd_printk(KERN_WARNING SFX "No response from codec, "
+ snd_printk(KERN_WARNING SFX "%s: No response from codec, "
"disabling MSI: last cmd=0x%08x\n",
- chip->last_cmd[addr]);
+ pci_name(chip->pci), chip->last_cmd[addr]);
free_irq(chip->irq, chip);
chip->irq = -1;
pci_disable_msi(chip->pci);
@@ -965,8 +982,8 @@ static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
udelay(1);
}
if (printk_ratelimit())
- snd_printd(SFX "get_response timeout: IRS=0x%x\n",
- azx_readw(chip, IRS));
+ snd_printd(SFX "%s: get_response timeout: IRS=0x%x\n",
+ pci_name(chip->pci), azx_readw(chip, IRS));
chip->rirb.res[addr] = -1;
return -EIO;
}
@@ -993,8 +1010,8 @@ static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
udelay(1);
}
if (printk_ratelimit())
- snd_printd(SFX "send_cmd timeout: IRS=0x%x, val=0x%x\n",
- azx_readw(chip, IRS), val);
+ snd_printd(SFX "%s: send_cmd timeout: IRS=0x%x, val=0x%x\n",
+ pci_name(chip->pci), azx_readw(chip, IRS), val);
return -EIO;
}
@@ -1047,7 +1064,7 @@ static void azx_power_notify(struct hda_bus *bus, bool power_up);
/* reset codec link */
static int azx_reset(struct azx *chip, int full_reset)
{
- int count;
+ unsigned long timeout;
if (!full_reset)
goto __skip;
@@ -1058,29 +1075,31 @@ static int azx_reset(struct azx *chip, int full_reset)
/* reset controller */
azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
- count = 50;
- while (azx_readb(chip, GCTL) && --count)
- msleep(1);
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (azx_readb(chip, GCTL) &&
+ time_before(jiffies, timeout))
+ usleep_range(500, 1000);
/* delay for >= 100us for codec PLL to settle per spec
* Rev 0.9 section 5.5.1
*/
- msleep(1);
+ usleep_range(500, 1000);
/* Bring controller out of reset */
azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
- count = 50;
- while (!azx_readb(chip, GCTL) && --count)
- msleep(1);
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (!azx_readb(chip, GCTL) &&
+ time_before(jiffies, timeout))
+ usleep_range(500, 1000);
/* Brent Chartrand said to wait >= 540us for codecs to initialize */
- msleep(1);
+ usleep_range(1000, 1200);
__skip:
/* check to see if controller is ready */
if (!azx_readb(chip, GCTL)) {
- snd_printd(SFX "azx_reset: controller not ready!\n");
+ snd_printd(SFX "%s: azx_reset: controller not ready!\n", pci_name(chip->pci));
return -EBUSY;
}
@@ -1092,7 +1111,7 @@ static int azx_reset(struct azx *chip, int full_reset)
/* detect codecs */
if (!chip->codec_mask) {
chip->codec_mask = azx_readw(chip, STATESTS);
- snd_printdd(SFX "codec_mask = 0x%x\n", chip->codec_mask);
+ snd_printdd(SFX "%s: codec_mask = 0x%x\n", pci_name(chip->pci), chip->codec_mask);
}
return 0;
@@ -1236,7 +1255,7 @@ static void azx_init_pci(struct azx *chip)
* The PCI register TCSEL is defined in the Intel manuals.
*/
if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) {
- snd_printdd(SFX "Clearing TCSEL\n");
+ snd_printdd(SFX "%s: Clearing TCSEL\n", pci_name(chip->pci));
update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
}
@@ -1244,7 +1263,7 @@ static void azx_init_pci(struct azx *chip)
* we need to enable snoop.
*/
if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
- snd_printdd(SFX "Setting ATI snoop: %d\n", azx_snoop(chip));
+ snd_printdd(SFX "%s: Setting ATI snoop: %d\n", pci_name(chip->pci), azx_snoop(chip));
update_pci_byte(chip->pci,
ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 0x07,
azx_snoop(chip) ? ATI_SB450_HDAUDIO_ENABLE_SNOOP : 0);
@@ -1252,7 +1271,7 @@ static void azx_init_pci(struct azx *chip)
/* For NVIDIA HDA, enable snoop */
if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
- snd_printdd(SFX "Setting Nvidia snoop: %d\n", azx_snoop(chip));
+ snd_printdd(SFX "%s: Setting Nvidia snoop: %d\n", pci_name(chip->pci), azx_snoop(chip));
update_pci_byte(chip->pci,
NVIDIA_HDA_TRANSREG_ADDR,
0x0f, NVIDIA_HDA_ENABLE_COHBITS);
@@ -1277,8 +1296,8 @@ static void azx_init_pci(struct azx *chip)
pci_read_config_word(chip->pci,
INTEL_SCH_HDA_DEVC, &snoop);
}
- snd_printdd(SFX "SCH snoop: %s\n",
- (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
+ snd_printdd(SFX "%s: SCH snoop: %s\n",
+ pci_name(chip->pci), (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
? "Disabled" : "Enabled");
}
}
@@ -1438,8 +1457,8 @@ static int azx_setup_periods(struct azx *chip,
pos_align;
pos_adj = frames_to_bytes(runtime, pos_adj);
if (pos_adj >= period_bytes) {
- snd_printk(KERN_WARNING SFX "Too big adjustment %d\n",
- bdl_pos_adj[chip->dev_index]);
+ snd_printk(KERN_WARNING SFX "%s: Too big adjustment %d\n",
+ pci_name(chip->pci), bdl_pos_adj[chip->dev_index]);
pos_adj = 0;
} else {
ofs = setup_bdle(chip, substream, azx_dev,
@@ -1463,8 +1482,8 @@ static int azx_setup_periods(struct azx *chip,
return 0;
error:
- snd_printk(KERN_ERR SFX "Too many BDL entries: buffer=%d, period=%d\n",
- azx_dev->bufsize, period_bytes);
+ snd_printk(KERN_ERR SFX "%s: Too many BDL entries: buffer=%d, period=%d\n",
+ pci_name(chip->pci), azx_dev->bufsize, period_bytes);
return -EINVAL;
}
@@ -1561,7 +1580,7 @@ static int probe_codec(struct azx *chip, int addr)
mutex_unlock(&chip->bus->cmd_mutex);
if (res == -1)
return -EIO;
- snd_printdd(SFX "codec #%d probed OK\n", addr);
+ snd_printdd(SFX "%s: codec #%d probed OK\n", pci_name(chip->pci), addr);
return 0;
}
@@ -1588,17 +1607,33 @@ static void azx_bus_reset(struct hda_bus *bus)
bus->in_reset = 0;
}
+static int get_jackpoll_interval(struct azx *chip)
+{
+ int i = jackpoll_ms[chip->dev_index];
+ unsigned int j;
+ if (i == 0)
+ return 0;
+ if (i < 50 || i > 60000)
+ j = 0;
+ else
+ j = msecs_to_jiffies(i);
+ if (j == 0)
+ snd_printk(KERN_WARNING SFX
+ "jackpoll_ms value out of range: %d\n", i);
+ return j;
+}
+
/*
* Codec initialization
*/
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
-static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] DELAYED_INITDATA_MARK = {
+static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8,
[AZX_DRIVER_TERA] = 1,
};
-static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *model)
+static int azx_codec_create(struct azx *chip, const char *model)
{
struct hda_bus_template bus_temp;
int c, codecs, err;
@@ -1622,7 +1657,7 @@ static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *mode
return err;
if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
- snd_printd(SFX "Enable delay in RIRB handling\n");
+ snd_printd(SFX "%s: Enable delay in RIRB handling\n", pci_name(chip->pci));
chip->bus->needs_damn_long_delay = 1;
}
@@ -1639,8 +1674,8 @@ static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *mode
* that don't exist
*/
snd_printk(KERN_WARNING SFX
- "Codec #%d probe error; "
- "disabling it...\n", c);
+ "%s: Codec #%d probe error; "
+ "disabling it...\n", pci_name(chip->pci), c);
chip->codec_mask &= ~(1 << c);
/* More badly, accessing to a non-existing
* codec often screws up the controller chip,
@@ -1660,7 +1695,8 @@ static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *mode
* access works around the stall. Grrr...
*/
if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
- snd_printd(SFX "Enable sync_write for stable communication\n");
+ snd_printd(SFX "%s: Enable sync_write for stable communication\n",
+ pci_name(chip->pci));
chip->bus->sync_write = 1;
chip->bus->allow_bus_reset = 1;
}
@@ -1672,19 +1708,20 @@ static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *mode
err = snd_hda_codec_new(chip->bus, c, &codec);
if (err < 0)
continue;
+ codec->jackpoll_interval = get_jackpoll_interval(chip);
codec->beep_mode = chip->beep_mode;
codecs++;
}
}
if (!codecs) {
- snd_printk(KERN_ERR SFX "no codecs initialized\n");
+ snd_printk(KERN_ERR SFX "%s: no codecs initialized\n", pci_name(chip->pci));
return -ENXIO;
}
return 0;
}
/* configure each codec instance */
-static int __devinit azx_codec_configure(struct azx *chip)
+static int azx_codec_configure(struct azx *chip)
{
struct hda_codec *codec;
list_for_each_entry(codec, &chip->bus->codec_list, list) {
@@ -1734,6 +1771,64 @@ static inline void azx_release_device(struct azx_dev *azx_dev)
azx_dev->opened = 0;
}
+static cycle_t azx_cc_read(const struct cyclecounter *cc)
+{
+ struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
+ struct snd_pcm_substream *substream = azx_dev->substream;
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ struct azx *chip = apcm->chip;
+
+ return azx_readl(chip, WALLCLK);
+}
+
+static void azx_timecounter_init(struct snd_pcm_substream *substream,
+ bool force, cycle_t last)
+{
+ struct azx_dev *azx_dev = get_azx_dev(substream);
+ struct timecounter *tc = &azx_dev->azx_tc;
+ struct cyclecounter *cc = &azx_dev->azx_cc;
+ u64 nsec;
+
+ cc->read = azx_cc_read;
+ cc->mask = CLOCKSOURCE_MASK(32);
+
+ /*
+ * Converting from 24 MHz to ns means applying a 125/3 factor.
+ * To avoid any saturation issues in intermediate operations,
+ * the 125 factor is applied first. The division is applied
+ * last after reading the timecounter value.
+ * Applying the 1/3 factor as part of the multiplication
+ * requires at least 20 bits for a decent precision, however
+ * overflows occur after about 4 hours or less, not a option.
+ */
+
+ cc->mult = 125; /* saturation after 195 years */
+ cc->shift = 0;
+
+ nsec = 0; /* audio time is elapsed time since trigger */
+ timecounter_init(tc, cc, nsec);
+ if (force)
+ /*
+ * force timecounter to use predefined value,
+ * used for synchronized starts
+ */
+ tc->cycle_last = last;
+}
+
+static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
+ struct timespec *ts)
+{
+ struct azx_dev *azx_dev = get_azx_dev(substream);
+ u64 nsec;
+
+ nsec = timecounter_read(&azx_dev->azx_tc);
+ nsec = div_u64(nsec, 3); /* can be optimized */
+
+ *ts = ns_to_timespec(nsec);
+
+ return 0;
+}
+
static struct snd_pcm_hardware azx_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
@@ -1743,6 +1838,7 @@ static struct snd_pcm_hardware azx_pcm_hw = {
/* SNDRV_PCM_INFO_RESUME |*/
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_HAS_WALL_CLOCK |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
@@ -1782,6 +1878,12 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
runtime->hw.rates = hinfo->rates;
snd_pcm_limit_hw_rates(runtime);
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+ /* avoid wrap-around with wall-clock */
+ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+ 20,
+ 178000000);
+
if (chip->align_buffer_size)
/* constrain buffer sizes to be multiple of 128
bytes. This is more efficient in terms of memory
@@ -1821,6 +1923,12 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
mutex_unlock(&chip->open_mutex);
return -EINVAL;
}
+
+ /* disable WALLCLOCK timestamps for capture streams
+ until we figure out how to handle digital inputs */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
+
spin_lock_irqsave(&chip->reg_lock, flags);
azx_dev->substream = substream;
azx_dev->running = 0;
@@ -1916,16 +2024,16 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
ctls);
if (!format_val) {
snd_printk(KERN_ERR SFX
- "invalid format_val, rate=%d, ch=%d, format=%d\n",
- runtime->rate, runtime->channels, runtime->format);
+ "%s: invalid format_val, rate=%d, ch=%d, format=%d\n",
+ pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format);
return -EINVAL;
}
bufsize = snd_pcm_lib_buffer_bytes(substream);
period_bytes = snd_pcm_lib_period_bytes(substream);
- snd_printdd(SFX "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
- bufsize, format_val);
+ snd_printdd(SFX "%s: azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
+ pci_name(chip->pci), bufsize, format_val);
if (bufsize != azx_dev->bufsize ||
period_bytes != azx_dev->period_bytes ||
@@ -1967,6 +2075,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
int rstart = 0, start, nsync = 0, sbits = 0;
int nwait, timeout;
+ azx_dev = get_azx_dev(substream);
+ trace_azx_pcm_trigger(chip, azx_dev, cmd);
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
rstart = 1;
@@ -2057,6 +2168,22 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
azx_readl(chip, OLD_SSYNC) & ~sbits);
else
azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
+ if (start) {
+ azx_timecounter_init(substream, 0, 0);
+ if (nsync > 1) {
+ cycle_t cycle_last;
+
+ /* same start cycle for master and group */
+ azx_dev = get_azx_dev(substream);
+ cycle_last = azx_dev->azx_tc.cycle_last;
+
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (s->pcm->card != substream->pcm->card)
+ continue;
+ azx_timecounter_init(s, 1, cycle_last);
+ }
+ }
+ }
spin_unlock(&chip->reg_lock);
return 0;
}
@@ -2123,6 +2250,7 @@ static unsigned int azx_get_position(struct azx *chip,
{
unsigned int pos;
int stream = azx_dev->substream->stream;
+ int delay = 0;
switch (chip->position_fix[stream]) {
case POS_FIX_LPIB:
@@ -2156,7 +2284,6 @@ static unsigned int azx_get_position(struct azx *chip,
chip->position_fix[stream] == POS_FIX_POSBUF &&
(chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
unsigned int lpib_pos = azx_sd_readl(azx_dev, SD_LPIB);
- int delay;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
delay = pos - lpib_pos;
else
@@ -2165,15 +2292,16 @@ static unsigned int azx_get_position(struct azx *chip,
delay += azx_dev->bufsize;
if (delay >= azx_dev->period_bytes) {
snd_printk(KERN_WARNING SFX
- "Unstable LPIB (%d >= %d); "
+ "%s: Unstable LPIB (%d >= %d); "
"disabling LPIB delay counting\n",
- delay, azx_dev->period_bytes);
+ pci_name(chip->pci), delay, azx_dev->period_bytes);
delay = 0;
chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
}
azx_dev->substream->runtime->delay =
bytes_to_frames(azx_dev->substream->runtime, delay);
}
+ trace_azx_get_position(chip, azx_dev, pos, delay);
return pos;
}
@@ -2199,13 +2327,11 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
{
u32 wallclk;
unsigned int pos;
- int stream;
wallclk = azx_readl(chip, WALLCLK) - azx_dev->start_wallclk;
if (wallclk < (azx_dev->period_wallclk * 2) / 3)
return -1; /* bogus (too early) interrupt */
- stream = azx_dev->substream->stream;
pos = azx_get_position(chip, azx_dev, true);
if (WARN_ONCE(!azx_dev->period_bytes,
@@ -2296,6 +2422,7 @@ static struct snd_pcm_ops azx_pcm_ops = {
.prepare = azx_pcm_prepare,
.trigger = azx_pcm_trigger,
.pointer = azx_pcm_pointer,
+ .wall_clock = azx_get_wallclock_tstamp,
.mmap = azx_pcm_mmap,
.page = snd_pcm_sgbuf_ops_page,
};
@@ -2324,7 +2451,8 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
list_for_each_entry(apcm, &chip->pcm_list, list) {
if (apcm->pcm->device == pcm_dev) {
- snd_printk(KERN_ERR SFX "PCM %d already exists\n", pcm_dev);
+ snd_printk(KERN_ERR SFX "%s: PCM %d already exists\n",
+ pci_name(chip->pci), pcm_dev);
return -EBUSY;
}
}
@@ -2365,7 +2493,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
/*
* mixer creation - all stuff is implemented in hda module
*/
-static int __devinit azx_mixer_create(struct azx *chip)
+static int azx_mixer_create(struct azx *chip)
{
return snd_hda_build_controls(chip->bus);
}
@@ -2374,7 +2502,7 @@ static int __devinit azx_mixer_create(struct azx *chip)
/*
* initialize SD streams
*/
-static int __devinit azx_init_stream(struct azx *chip)
+static int azx_init_stream(struct azx *chip)
{
int i;
@@ -2502,6 +2630,9 @@ static int azx_suspend(struct device *dev)
struct azx *chip = card->private_data;
struct azx_pcm *p;
+ if (chip->disabled)
+ return 0;
+
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
azx_clear_irq_pending(chip);
list_for_each_entry(p, &chip->pcm_list, list)
@@ -2527,6 +2658,9 @@ static int azx_resume(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
+ if (chip->disabled)
+ return 0;
+
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
@@ -2557,10 +2691,6 @@ static int azx_runtime_suspend(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (!power_save_controller ||
- !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
- return -EAGAIN;
-
azx_stop_chip(chip);
azx_clear_irq_pending(chip);
return 0;
@@ -2575,12 +2705,25 @@ static int azx_runtime_resume(struct device *dev)
azx_init_chip(chip, 1);
return 0;
}
+
+static int azx_runtime_idle(struct device *dev)
+{
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip = card->private_data;
+
+ if (!power_save_controller ||
+ !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ return -EBUSY;
+
+ return 0;
+}
+
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM
static const struct dev_pm_ops azx_pm = {
SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
- SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
};
#define AZX_PM_OPS &azx_pm
@@ -2612,11 +2755,11 @@ static void azx_notifier_unregister(struct azx *chip)
unregister_reboot_notifier(&chip->reboot_notifier);
}
-static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
-static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
+static int azx_first_init(struct azx *chip);
+static int azx_probe_continue(struct azx *chip);
#ifdef SUPPORT_VGA_SWITCHEROO
-static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
+static struct pci_dev *get_bound_vga(struct pci_dev *pci);
static void azx_vs_set_state(struct pci_dev *pci,
enum vga_switcheroo_state state)
@@ -2625,6 +2768,7 @@ static void azx_vs_set_state(struct pci_dev *pci,
struct azx *chip = card->private_data;
bool disabled;
+ wait_for_completion(&chip->probe_wait);
if (chip->init_failed)
return;
@@ -2648,15 +2792,14 @@ static void azx_vs_set_state(struct pci_dev *pci,
}
} else {
snd_printk(KERN_INFO SFX
- "%s %s via VGA-switcheroo\n",
- disabled ? "Disabling" : "Enabling",
- pci_name(chip->pci));
+ "%s: %s via VGA-switcheroo\n", pci_name(chip->pci),
+ disabled ? "Disabling" : "Enabling");
if (disabled) {
azx_suspend(&pci->dev);
chip->disabled = true;
if (snd_hda_lock_devices(chip->bus))
- snd_printk(KERN_WARNING SFX
- "Cannot lock devices!\n");
+ snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n",
+ pci_name(chip->pci));
} else {
snd_hda_unlock_devices(chip->bus);
chip->disabled = false;
@@ -2670,6 +2813,7 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
+ wait_for_completion(&chip->probe_wait);
if (chip->init_failed)
return false;
if (chip->disabled || !chip->bus)
@@ -2680,7 +2824,7 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
return true;
}
-static void __devinit init_vga_switcheroo(struct azx *chip)
+static void init_vga_switcheroo(struct azx *chip)
{
struct pci_dev *p = get_bound_vga(chip->pci);
if (p) {
@@ -2697,7 +2841,7 @@ static const struct vga_switcheroo_client_ops azx_vs_ops = {
.can_switch = azx_vs_can_switch,
};
-static int __devinit register_vga_switcheroo(struct azx *chip)
+static int register_vga_switcheroo(struct azx *chip)
{
int err;
@@ -2731,6 +2875,9 @@ static int azx_free(struct azx *chip)
azx_notifier_unregister(chip);
+ chip->init_failed = 1; /* to be sure */
+ complete_all(&chip->probe_wait);
+
if (use_vga_switcheroo(chip)) {
if (chip->disabled && chip->bus)
snd_hda_unlock_devices(chip->bus);
@@ -2789,7 +2936,7 @@ static int azx_dev_free(struct snd_device *device)
/*
* Check of disabled HDMI controller by vga-switcheroo
*/
-static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci)
+static struct pci_dev *get_bound_vga(struct pci_dev *pci)
{
struct pci_dev *p;
@@ -2812,7 +2959,7 @@ static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci)
return NULL;
}
-static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
+static bool check_hdmi_disabled(struct pci_dev *pci)
{
bool vga_inactive = false;
struct pci_dev *p = get_bound_vga(pci);
@@ -2829,7 +2976,7 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
/*
* white/black-listing for position_fix
*/
-static struct snd_pci_quirk position_fix_list[] __devinitdata = {
+static struct snd_pci_quirk position_fix_list[] = {
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
@@ -2847,7 +2994,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
{}
};
-static int __devinit check_position_fix(struct azx *chip, int fix)
+static int check_position_fix(struct azx *chip, int fix)
{
const struct snd_pci_quirk *q;
@@ -2871,11 +3018,11 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
/* Check VIA/ATI HD Audio Controller exist */
if (chip->driver_caps & AZX_DCAPS_POSFIX_VIA) {
- snd_printd(SFX "Using VIACOMBO position fix\n");
+ snd_printd(SFX "%s: Using VIACOMBO position fix\n", pci_name(chip->pci));
return POS_FIX_VIACOMBO;
}
if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
- snd_printd(SFX "Using LPIB position fix\n");
+ snd_printd(SFX "%s: Using LPIB position fix\n", pci_name(chip->pci));
return POS_FIX_LPIB;
}
return POS_FIX_AUTO;
@@ -2884,7 +3031,7 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
/*
* black-lists for probe_mask
*/
-static struct snd_pci_quirk probe_mask_list[] __devinitdata = {
+static struct snd_pci_quirk probe_mask_list[] = {
/* Thinkpad often breaks the controller communication when accessing
* to the non-working (or non-existing) modem codec slot.
*/
@@ -2905,7 +3052,7 @@ static struct snd_pci_quirk probe_mask_list[] __devinitdata = {
#define AZX_FORCE_CODEC_MASK 0x100
-static void __devinit check_probe_mask(struct azx *chip, int dev)
+static void check_probe_mask(struct azx *chip, int dev)
{
const struct snd_pci_quirk *q;
@@ -2933,7 +3080,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
/*
* white/black-list for enable_msi
*/
-static struct snd_pci_quirk msi_black_list[] __devinitdata = {
+static struct snd_pci_quirk msi_black_list[] = {
SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
@@ -2942,7 +3089,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
{}
};
-static void __devinit check_msi(struct azx *chip)
+static void check_msi(struct azx *chip)
{
const struct snd_pci_quirk *q;
@@ -2968,7 +3115,7 @@ static void __devinit check_msi(struct azx *chip)
}
/* check the snoop mode availability */
-static void __devinit azx_check_snoop_available(struct azx *chip)
+static void azx_check_snoop_available(struct azx *chip)
{
bool snoop = chip->snoop;
@@ -2991,8 +3138,8 @@ static void __devinit azx_check_snoop_available(struct azx *chip)
}
if (snoop != chip->snoop) {
- snd_printk(KERN_INFO SFX "Force to %s mode\n",
- snoop ? "snoop" : "non-snoop");
+ snd_printk(KERN_INFO SFX "%s: Force to %s mode\n",
+ pci_name(chip->pci), snoop ? "snoop" : "non-snoop");
chip->snoop = snoop;
}
}
@@ -3000,9 +3147,9 @@ static void __devinit azx_check_snoop_available(struct azx *chip)
/*
* constructor
*/
-static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
- int dev, unsigned int driver_caps,
- struct azx **rchip)
+static int azx_create(struct snd_card *card, struct pci_dev *pci,
+ int dev, unsigned int driver_caps,
+ struct azx **rchip)
{
static struct snd_device_ops ops = {
.dev_free = azx_dev_free,
@@ -3018,7 +3165,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip) {
- snd_printk(KERN_ERR SFX "cannot allocate chip\n");
+ snd_printk(KERN_ERR SFX "%s: Cannot allocate chip\n", pci_name(pci));
pci_disable_device(pci);
return -ENOMEM;
}
@@ -3036,6 +3183,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
INIT_LIST_HEAD(&chip->pcm_list);
INIT_LIST_HEAD(&chip->list);
init_vga_switcheroo(chip);
+ init_completion(&chip->probe_wait);
chip->position_fix[0] = chip->position_fix[1] =
check_position_fix(chip, position_fix[dev]);
@@ -3063,29 +3211,10 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
}
}
- if (check_hdmi_disabled(pci)) {
- snd_printk(KERN_INFO SFX "VGA controller for %s is disabled\n",
- pci_name(pci));
- if (use_vga_switcheroo(chip)) {
- snd_printk(KERN_INFO SFX "Delaying initialization\n");
- chip->disabled = true;
- goto ok;
- }
- kfree(chip);
- pci_disable_device(pci);
- return -ENXIO;
- }
-
- err = azx_first_init(chip);
- if (err < 0) {
- azx_free(chip);
- return err;
- }
-
- ok:
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err < 0) {
- snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+ snd_printk(KERN_ERR SFX "%s: Error creating device [card]!\n",
+ pci_name(chip->pci));
azx_free(chip);
return err;
}
@@ -3094,7 +3223,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
return 0;
}
-static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
+static int azx_first_init(struct azx *chip)
{
int dev = chip->dev_index;
struct pci_dev *pci = chip->pci;
@@ -3120,7 +3249,7 @@ static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
chip->addr = pci_resource_start(pci, 0);
chip->remap_addr = pci_ioremap_bar(pci, 0);
if (chip->remap_addr == NULL) {
- snd_printk(KERN_ERR SFX "ioremap error\n");
+ snd_printk(KERN_ERR SFX "%s: ioremap error\n", pci_name(chip->pci));
return -ENXIO;
}
@@ -3135,7 +3264,7 @@ static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
synchronize_irq(chip->irq);
gcap = azx_readw(chip, GCAP);
- snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
+ snd_printdd(SFX "%s: chipset global capabilities = 0x%x\n", pci_name(chip->pci), gcap);
/* disable SB600 64bit support for safety */
if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
@@ -3152,7 +3281,7 @@ static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
/* disable 64bit DMA address on some devices */
if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
- snd_printd(SFX "Disabling 64bit DMA\n");
+ snd_printd(SFX "%s: Disabling 64bit DMA\n", pci_name(chip->pci));
gcap &= ~ICH6_GCAP_64OK;
}
@@ -3207,7 +3336,7 @@ static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
chip->azx_dev = kcalloc(chip->num_streams, sizeof(*chip->azx_dev),
GFP_KERNEL);
if (!chip->azx_dev) {
- snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
+ snd_printk(KERN_ERR SFX "%s: cannot malloc azx_dev\n", pci_name(chip->pci));
return -ENOMEM;
}
@@ -3217,7 +3346,7 @@ static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
snd_dma_pci_data(chip->pci),
BDL_SIZE, &chip->azx_dev[i].bdl);
if (err < 0) {
- snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
+ snd_printk(KERN_ERR SFX "%s: cannot allocate BDL\n", pci_name(chip->pci));
return -ENOMEM;
}
mark_pages_wc(chip, &chip->azx_dev[i].bdl, true);
@@ -3227,7 +3356,7 @@ static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
snd_dma_pci_data(chip->pci),
chip->num_streams * 8, &chip->posbuf);
if (err < 0) {
- snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
+ snd_printk(KERN_ERR SFX "%s: cannot allocate posbuf\n", pci_name(chip->pci));
return -ENOMEM;
}
mark_pages_wc(chip, &chip->posbuf, true);
@@ -3245,7 +3374,7 @@ static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
/* codec detection */
if (!chip->codec_mask) {
- snd_printk(KERN_ERR SFX "no codecs found!\n");
+ snd_printk(KERN_ERR SFX "%s: no codecs found!\n", pci_name(chip->pci));
return -ENODEV;
}
@@ -3281,7 +3410,8 @@ static void azx_firmware_cb(const struct firmware *fw, void *context)
struct pci_dev *pci = chip->pci;
if (!fw) {
- snd_printk(KERN_ERR SFX "Cannot load firmware, aborting\n");
+ snd_printk(KERN_ERR SFX "%s: Cannot load firmware, aborting\n",
+ pci_name(chip->pci));
goto error;
}
@@ -3299,8 +3429,8 @@ static void azx_firmware_cb(const struct firmware *fw, void *context)
}
#endif
-static int __devinit azx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int azx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -3317,7 +3447,7 @@ static int __devinit azx_probe(struct pci_dev *pci,
err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
if (err < 0) {
- snd_printk(KERN_ERR SFX "Error creating card!\n");
+ snd_printk(KERN_ERR "hda-intel: Error creating card!\n");
return err;
}
@@ -3327,12 +3457,34 @@ static int __devinit azx_probe(struct pci_dev *pci,
if (err < 0)
goto out_free;
card->private_data = chip;
+
+ pci_set_drvdata(pci, card);
+
+ err = register_vga_switcheroo(chip);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX
+ "%s: Error registering VGA-switcheroo client\n", pci_name(pci));
+ goto out_free;
+ }
+
+ if (check_hdmi_disabled(pci)) {
+ snd_printk(KERN_INFO SFX "%s: VGA controller is disabled\n",
+ pci_name(pci));
+ snd_printk(KERN_INFO SFX "%s: Delaying initialization\n", pci_name(pci));
+ chip->disabled = true;
+ }
+
probe_now = !chip->disabled;
+ if (probe_now) {
+ err = azx_first_init(chip);
+ if (err < 0)
+ goto out_free;
+ }
#ifdef CONFIG_SND_HDA_PATCH_LOADER
if (patch[dev] && *patch[dev]) {
- snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
- patch[dev]);
+ snd_printk(KERN_ERR SFX "%s: Applying patch firmware '%s'\n",
+ pci_name(pci), patch[dev]);
err = request_firmware_nowait(THIS_MODULE, true, patch[dev],
&pci->dev, GFP_KERNEL, card,
azx_firmware_cb);
@@ -3348,27 +3500,20 @@ static int __devinit azx_probe(struct pci_dev *pci,
goto out_free;
}
- pci_set_drvdata(pci, card);
-
if (pci_dev_run_wake(pci))
pm_runtime_put_noidle(&pci->dev);
- err = register_vga_switcheroo(chip);
- if (err < 0) {
- snd_printk(KERN_ERR SFX
- "Error registering VGA-switcheroo client\n");
- goto out_free;
- }
-
dev++;
+ complete_all(&chip->probe_wait);
return 0;
out_free:
snd_card_free(card);
+ pci_set_drvdata(pci, NULL);
return err;
}
-static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip)
+static int azx_probe_continue(struct azx *chip)
{
int dev = chip->dev_index;
int err;
@@ -3387,8 +3532,10 @@ static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip)
chip->fw->data);
if (err < 0)
goto out_free;
+#ifndef CONFIG_PM
release_firmware(chip->fw); /* no longer needed */
chip->fw = NULL;
+#endif
}
#endif
if ((probe_only[dev] & 1) == 0) {
@@ -3423,7 +3570,7 @@ out_free:
return err;
}
-static void __devexit azx_remove(struct pci_dev *pci)
+static void azx_remove(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
@@ -3610,7 +3757,7 @@ static struct pci_driver azx_driver = {
.name = KBUILD_MODNAME,
.id_table = azx_ids,
.probe = azx_probe,
- .remove = __devexit_p(azx_remove),
+ .remove = azx_remove,
.driver = {
.pm = AZX_PM_OPS,
},
diff --git a/sound/pci/hda/hda_intel_trace.h b/sound/pci/hda/hda_intel_trace.h
new file mode 100644
index 00000000000..7b5e4c2cf9d
--- /dev/null
+++ b/sound/pci/hda/hda_intel_trace.h
@@ -0,0 +1,62 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hda_intel
+#define TRACE_INCLUDE_FILE hda_intel_trace
+
+#if !defined(_TRACE_HDA_INTEL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HDA_INTEL_H
+
+#include <linux/tracepoint.h>
+
+struct azx;
+struct azx_dev;
+
+TRACE_EVENT(azx_pcm_trigger,
+
+ TP_PROTO(struct azx *chip, struct azx_dev *dev, int cmd),
+
+ TP_ARGS(chip, dev, cmd),
+
+ TP_STRUCT__entry(
+ __field( int, card )
+ __field( int, idx )
+ __field( int, cmd )
+ ),
+
+ TP_fast_assign(
+ __entry->card = (chip)->card->number;
+ __entry->idx = (dev)->index;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk("[%d:%d] cmd=%d", __entry->card, __entry->idx, __entry->cmd)
+);
+
+TRACE_EVENT(azx_get_position,
+
+ TP_PROTO(struct azx *chip, struct azx_dev *dev, unsigned int pos, unsigned int delay),
+
+ TP_ARGS(chip, dev, pos, delay),
+
+ TP_STRUCT__entry(
+ __field( int, card )
+ __field( int, idx )
+ __field( unsigned int, pos )
+ __field( unsigned int, delay )
+ ),
+
+ TP_fast_assign(
+ __entry->card = (chip)->card->number;
+ __entry->idx = (dev)->index;
+ __entry->pos = pos;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("[%d:%d] pos=%u, delay=%u", __entry->card, __entry->idx, __entry->pos, __entry->delay)
+);
+
+#endif /* _TRACE_HDA_INTEL_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index 5c690cb873d..6e9f57bbe66 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -95,7 +95,6 @@ snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid)
struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
if (jack)
return jack;
- snd_array_init(&codec->jacktbl, sizeof(*jack), 16);
jack = snd_array_new(&codec->jacktbl);
if (!jack)
return NULL;
@@ -122,6 +121,8 @@ void snd_hda_jack_tbl_clear(struct hda_codec *codec)
snd_array_free(&codec->jacktbl);
}
+#define get_jack_plug_state(sense) !!(sense & AC_PINSENSE_PRESENCE)
+
/* update the cached value and notification flag if needed */
static void jack_detect_update(struct hda_codec *codec,
struct hda_jack_tbl *jack)
@@ -134,7 +135,21 @@ static void jack_detect_update(struct hda_codec *codec,
else
jack->pin_sense = read_pin_sense(codec, jack->nid);
+ /* A gating jack indicates the jack is invalid if gating is unplugged */
+ if (jack->gating_jack && !snd_hda_jack_detect(codec, jack->gating_jack))
+ jack->pin_sense &= ~AC_PINSENSE_PRESENCE;
+
jack->jack_dirty = 0;
+
+ /* If a jack is gated by this one update it. */
+ if (jack->gated_jack) {
+ struct hda_jack_tbl *gated =
+ snd_hda_jack_tbl_get(codec, jack->gated_jack);
+ if (gated) {
+ gated->jack_dirty = 1;
+ jack_detect_update(codec, gated);
+ }
+ }
}
/**
@@ -173,8 +188,6 @@ u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid)
}
EXPORT_SYMBOL_HDA(snd_hda_pin_sense);
-#define get_jack_plug_state(sense) !!(sense & AC_PINSENSE_PRESENCE)
-
/**
* snd_hda_jack_detect - query pin Presence Detect status
* @codec: the CODEC to sense
@@ -206,6 +219,8 @@ int snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
jack->action = action;
if (cb)
jack->callback = cb;
+ if (codec->jackpoll_interval > 0)
+ return 0; /* No unsol if we're polling instead */
return snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_UNSOLICITED_ENABLE,
AC_USRSP_EN | jack->tag);
@@ -220,16 +235,46 @@ int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid,
EXPORT_SYMBOL_HDA(snd_hda_jack_detect_enable);
/**
+ * snd_hda_jack_set_gating_jack - Set gating jack.
+ *
+ * Indicates the gated jack is only valid when the gating jack is plugged.
+ */
+int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
+ hda_nid_t gating_nid)
+{
+ struct hda_jack_tbl *gated = snd_hda_jack_tbl_get(codec, gated_nid);
+ struct hda_jack_tbl *gating = snd_hda_jack_tbl_get(codec, gating_nid);
+
+ if (!gated || !gating)
+ return -EINVAL;
+
+ gated->gating_jack = gating_nid;
+ gating->gated_jack = gated_nid;
+
+ return 0;
+}
+EXPORT_SYMBOL_HDA(snd_hda_jack_set_gating_jack);
+
+/**
* snd_hda_jack_report_sync - sync the states of all jacks and report if changed
*/
void snd_hda_jack_report_sync(struct hda_codec *codec)
{
- struct hda_jack_tbl *jack = codec->jacktbl.list;
+ struct hda_jack_tbl *jack;
int i, state;
+ /* update all jacks at first */
+ jack = codec->jacktbl.list;
for (i = 0; i < codec->jacktbl.used; i++, jack++)
- if (jack->nid) {
+ if (jack->nid)
jack_detect_update(codec, jack);
+
+ /* report the updated jacks; it's done after updating all jacks
+ * to make sure that all gating jacks properly have been set
+ */
+ jack = codec->jacktbl.list;
+ for (i = 0; i < codec->jacktbl.used; i++, jack++)
+ if (jack->nid) {
if (!jack->kctl)
continue;
state = get_jack_plug_state(jack->pin_sense);
@@ -422,6 +467,19 @@ int snd_hda_jack_add_kctls(struct hda_codec *codec,
}
EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctls);
+static void call_jack_callback(struct hda_codec *codec,
+ struct hda_jack_tbl *jack)
+{
+ if (jack->callback)
+ jack->callback(codec, jack);
+ if (jack->gated_jack) {
+ struct hda_jack_tbl *gated =
+ snd_hda_jack_tbl_get(codec, jack->gated_jack);
+ if (gated && gated->callback)
+ gated->callback(codec, gated);
+ }
+}
+
void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res)
{
struct hda_jack_tbl *event;
@@ -432,10 +490,29 @@ void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res)
return;
event->jack_dirty = 1;
- if (event->callback)
- event->callback(codec, event);
-
+ call_jack_callback(codec, event);
snd_hda_jack_report_sync(codec);
}
EXPORT_SYMBOL_HDA(snd_hda_jack_unsol_event);
+void snd_hda_jack_poll_all(struct hda_codec *codec)
+{
+ struct hda_jack_tbl *jack = codec->jacktbl.list;
+ int i, changes = 0;
+
+ for (i = 0; i < codec->jacktbl.used; i++, jack++) {
+ unsigned int old_sense;
+ if (!jack->nid || !jack->jack_dirty || jack->phantom_jack)
+ continue;
+ old_sense = get_jack_plug_state(jack->pin_sense);
+ jack_detect_update(codec, jack);
+ if (old_sense == get_jack_plug_state(jack->pin_sense))
+ continue;
+ changes = 1;
+ call_jack_callback(codec, jack);
+ }
+ if (changes)
+ snd_hda_jack_report_sync(codec);
+}
+EXPORT_SYMBOL_HDA(snd_hda_jack_poll_all);
+
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index af8dd4724da..ec12abd4526 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -28,6 +28,8 @@ struct hda_jack_tbl {
unsigned int jack_detect:1; /* capable of jack-detection? */
unsigned int jack_dirty:1; /* needs to update? */
unsigned int phantom_jack:1; /* a fixed, always present port? */
+ hda_nid_t gating_jack; /* valid when gating jack plugged */
+ hda_nid_t gated_jack; /* gated is dependent on this jack */
struct snd_kcontrol *kctl; /* assigned kctl for jack-detection */
#ifdef CONFIG_SND_HDA_INPUT_JACK
int type;
@@ -69,6 +71,8 @@ int snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
unsigned char action,
hda_jack_callback cb);
+int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
+ hda_nid_t gating_nid);
u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
@@ -84,4 +88,6 @@ void snd_hda_jack_report_sync(struct hda_codec *codec);
void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res);
+void snd_hda_jack_poll_all(struct hda_codec *codec);
+
#endif /* __SOUND_HDA_JACK_H */
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 09dbdc37f78..4b40a5e7a8f 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -240,9 +240,11 @@ int snd_hda_mixer_bind_tlv(struct snd_kcontrol *kcontrol, int op_flag,
/*
* SPDIF I/O
*/
-int snd_hda_create_spdif_out_ctls(struct hda_codec *codec,
- hda_nid_t associated_nid,
- hda_nid_t cvt_nid);
+int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
+ hda_nid_t associated_nid,
+ hda_nid_t cvt_nid, int type);
+#define snd_hda_create_spdif_out_ctls(codec, anid, cnid) \
+ snd_hda_create_dig_out_ctls(codec, anid, cnid, HDA_PCM_TYPE_SPDIF)
int snd_hda_create_spdif_in_ctls(struct hda_codec *codec, hda_nid_t nid);
/*
@@ -599,6 +601,15 @@ int snd_hda_check_amp_list_power(struct hda_codec *codec,
#define get_amp_min_mute(kc) (((kc)->private_value >> 29) & 0x1)
/*
+ * enum control helper
+ */
+int snd_hda_enum_helper_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo,
+ int num_entries, const char * const *texts);
+#define snd_hda_enum_bool_helper_info(kcontrol, uinfo) \
+ snd_hda_enum_helper_info(kcontrol, uinfo, 0, NULL)
+
+/*
* CEA Short Audio Descriptor data
*/
struct cea_sad {
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 1eeba738666..89fc5030ec7 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -636,7 +636,6 @@ static void ad198x_free(struct hda_codec *codec)
if (!spec)
return;
- ad198x_shutup(codec);
ad198x_free_kctls(codec);
kfree(spec);
snd_hda_detach_beep_device(codec);
@@ -1247,16 +1246,27 @@ static int is_jack_available(struct hda_codec *codec, hda_nid_t nid)
return get_defcfg_connect(conf) != AC_JACK_PORT_NONE;
}
-static int patch_ad1986a(struct hda_codec *codec)
+static int alloc_ad_spec(struct hda_codec *codec)
{
struct ad198x_spec *spec;
- int err, board_config;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
+ if (!spec)
return -ENOMEM;
-
codec->spec = spec;
+ snd_array_init(&spec->kctls, sizeof(struct snd_kcontrol_new), 32);
+ return 0;
+}
+
+static int patch_ad1986a(struct hda_codec *codec)
+{
+ struct ad198x_spec *spec;
+ int err, board_config;
+
+ err = alloc_ad_spec(codec);
+ if (err < 0)
+ return err;
+ spec = codec->spec;
err = snd_hda_attach_beep_device(codec, 0x19);
if (err < 0) {
@@ -1549,11 +1559,10 @@ static int patch_ad1983(struct hda_codec *codec)
struct ad198x_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
+ err = alloc_ad_spec(codec);
+ if (err < 0)
+ return err;
+ spec = codec->spec;
err = snd_hda_attach_beep_device(codec, 0x10);
if (err < 0) {
@@ -1955,11 +1964,10 @@ static int patch_ad1981(struct hda_codec *codec)
struct ad198x_spec *spec;
int err, board_config;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
+ err = alloc_ad_spec(codec);
+ if (err < 0)
return -ENOMEM;
-
- codec->spec = spec;
+ spec = codec->spec;
err = snd_hda_attach_beep_device(codec, 0x10);
if (err < 0) {
@@ -2837,7 +2845,6 @@ static int add_control(struct ad198x_spec *spec, int type, const char *name,
{
struct snd_kcontrol_new *knew;
- snd_array_init(&spec->kctls, sizeof(*knew), 32);
knew = snd_array_new(&spec->kctls);
if (!knew)
return -ENOMEM;
@@ -3255,11 +3262,10 @@ static int patch_ad1988(struct hda_codec *codec)
struct ad198x_spec *spec;
int err, board_config;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
+ err = alloc_ad_spec(codec);
+ if (err < 0)
+ return err;
+ spec = codec->spec;
if (is_rev2(codec))
snd_printk(KERN_INFO "patch_analog: AD1988A rev.2 is detected, enable workarounds\n");
@@ -3575,11 +3581,10 @@ static int patch_ad1884(struct hda_codec *codec)
struct ad198x_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
+ err = alloc_ad_spec(codec);
+ if (err < 0)
+ return err;
+ spec = codec->spec;
err = snd_hda_attach_beep_device(codec, 0x10);
if (err < 0) {
@@ -4575,11 +4580,10 @@ static int patch_ad1884a(struct hda_codec *codec)
struct ad198x_spec *spec;
int err, board_config;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
+ err = alloc_ad_spec(codec);
+ if (err < 0)
+ return err;
+ spec = codec->spec;
err = snd_hda_attach_beep_device(codec, 0x10);
if (err < 0) {
@@ -4988,11 +4992,10 @@ static int patch_ad1882(struct hda_codec *codec)
struct ad198x_spec *spec;
int err, board_config;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
+ err = alloc_ad_spec(codec);
+ if (err < 0)
+ return err;
+ spec = codec->spec;
err = snd_hda_attach_beep_device(codec, 0x10);
if (err < 0) {
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 3bcb6717235..a2537b2f872 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -68,6 +68,7 @@ struct cs_spec {
unsigned int hp_detect:1;
unsigned int mic_detect:1;
+ unsigned int speaker_2_1:1;
/* CS421x */
unsigned int spdif_detect:1;
unsigned int sense_b:1;
@@ -84,7 +85,7 @@ enum {
CS420X_GPIO_13,
CS420X_GPIO_23,
CS420X_MBP101,
- CS420X_MBP101_COEF,
+ CS420X_MBP81,
CS420X_AUTO,
/* aliases */
CS420X_IMAC27_122 = CS420X_GPIO_23,
@@ -343,6 +344,9 @@ static int cs_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dac_nid[0];
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
spec->multiout.max_channels;
+ if (spec->speaker_2_1)
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].chmap =
+ snd_pcm_2_1_chmaps;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = cs_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
spec->adc_nid[spec->cur_input];
@@ -443,6 +447,9 @@ static int parse_output(struct hda_codec *codec)
spec->multiout.dac_nids = spec->dac_nid;
spec->multiout.max_channels = i * 2;
+ if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && i == 2)
+ spec->speaker_2_1 = 1; /* assume 2.1 speakers */
+
/* add HP and speakers */
extra_nids = 0;
for (i = 0; i < cfg->hp_outs; i++) {
@@ -633,7 +640,9 @@ static int add_output(struct hda_codec *codec, hda_nid_t dac, int idx,
index = idx;
break;
case AUTO_PIN_SPEAKER_OUT:
- if (num_ctls > 1)
+ if (spec->speaker_2_1)
+ name = idx ? "Bass Speaker" : "Speaker";
+ else if (num_ctls > 1)
name = speakers[idx];
else
name = "Speaker";
@@ -874,8 +883,9 @@ static int build_digital_output(struct hda_codec *codec)
if (!spec->multiout.dig_out_nid)
return 0;
- err = snd_hda_create_spdif_out_ctls(codec, spec->multiout.dig_out_nid,
- spec->multiout.dig_out_nid);
+ err = snd_hda_create_dig_out_ctls(codec, spec->multiout.dig_out_nid,
+ spec->multiout.dig_out_nid,
+ spec->pcm_rec[1].pcm_type);
if (err < 0)
return err;
err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
@@ -1079,9 +1089,6 @@ static void init_input(struct hda_codec *codec)
if (spec->mic_detect)
cs_automic(codec, NULL);
- coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
- cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
-
coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG);
if (is_active_pin(codec, CS_DMIC2_PIN_NID))
coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */
@@ -1111,6 +1118,9 @@ static const struct hda_verb cs_coef_init_verbs[] = {
| 0x1000 /* Enable DACs High Pass Filter */
| 0x0400 /* Disable Coefficient Auto increment */
)},
+ /* ADC1/2 - Digital and Analog Soft Ramp */
+ {0x11, AC_VERB_SET_COEF_INDEX, IDX_ADC_CFG},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x000a},
/* Beep */
{0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG},
{0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
@@ -1167,14 +1177,6 @@ static const struct hda_verb cs_errata_init_verbs[] = {
{} /* terminator */
};
-static const struct hda_verb mbp101_init_verbs[] = {
- {0x11, AC_VERB_SET_COEF_INDEX, 0x0002},
- {0x11, AC_VERB_SET_PROC_COEF, 0x100a},
- {0x11, AC_VERB_SET_COEF_INDEX, 0x0004},
- {0x11, AC_VERB_SET_PROC_COEF, 0x000f},
- {}
-};
-
/* SPDIF setup */
static void init_digital(struct hda_codec *codec)
{
@@ -1199,6 +1201,8 @@ static int cs_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, cs_coef_init_verbs);
+ snd_hda_gen_apply_verbs(codec);
+
if (spec->gpio_mask) {
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK,
spec->gpio_mask);
@@ -1291,6 +1295,7 @@ static const struct hda_model_fixup cs420x_models[] = {
{ .id = CS420X_IMAC27_122, .name = "imac27_122" },
{ .id = CS420X_APPLE, .name = "apple" },
{ .id = CS420X_MBP101, .name = "mbp101" },
+ { .id = CS420X_MBP81, .name = "mbp81" },
{}
};
@@ -1303,6 +1308,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
/* codec SSID */
+ SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
@@ -1413,11 +1419,16 @@ static const struct hda_fixup cs420x_fixups[] = {
.type = HDA_FIXUP_PINS,
.v.pins = mbp101_pincfgs,
.chained = true,
- .chain_id = CS420X_MBP101_COEF,
+ .chain_id = CS420X_GPIO_13,
},
- [CS420X_MBP101_COEF] = {
+ [CS420X_MBP81] = {
.type = HDA_FIXUP_VERBS,
- .v.verbs = mbp101_init_verbs,
+ .v.verbs = (const struct hda_verb[]) {
+ /* internal mic ADC2: right only, single ended */
+ {0x11, AC_VERB_SET_COEF_INDEX, IDX_ADC_CFG},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x102a},
+ {}
+ },
.chained = true,
.chain_id = CS420X_GPIO_13,
},
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 03b1dc317ff..60890bfecc1 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -337,6 +337,8 @@ static const struct hda_pcm_stream cx5051_pcm_analog_capture = {
},
};
+static bool is_2_1_speaker(struct conexant_spec *spec);
+
static int conexant_build_pcms(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
@@ -351,6 +353,9 @@ static int conexant_build_pcms(struct hda_codec *codec)
spec->multiout.max_channels;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
spec->multiout.dac_nids[0];
+ if (is_2_1_speaker(spec))
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].chmap =
+ snd_pcm_2_1_chmaps;
if (spec->capture_stream)
info->stream[SNDRV_PCM_STREAM_CAPTURE] = *spec->capture_stream;
else {
@@ -472,7 +477,7 @@ static const struct snd_kcontrol_new cxt_beep_mixer[] = {
#endif
static const char * const slave_pfxs[] = {
- "Headphone", "Speaker", "Front", "Surround", "CLFE",
+ "Headphone", "Speaker", "Bass Speaker", "Front", "Surround", "CLFE",
NULL
};
@@ -3430,28 +3435,13 @@ static int cx_automute_mode_info(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct conexant_spec *spec = codec->spec;
- static const char * const texts2[] = {
- "Disabled", "Enabled"
- };
static const char * const texts3[] = {
"Disabled", "Speaker Only", "Line Out+Speaker"
};
- const char * const *texts;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- if (spec->automute_hp_lo) {
- uinfo->value.enumerated.items = 3;
- texts = texts3;
- } else {
- uinfo->value.enumerated.items = 2;
- texts = texts2;
- }
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ if (spec->automute_hp_lo)
+ return snd_hda_enum_helper_info(kcontrol, uinfo, 3, texts3);
+ return snd_hda_enum_bool_helper_info(kcontrol, uinfo);
}
static int cx_automute_mode_get(struct snd_kcontrol *kcontrol,
@@ -4116,11 +4106,26 @@ static int try_add_pb_volume(struct hda_codec *codec, hda_nid_t dac,
return 0;
}
+static bool is_2_1_speaker(struct conexant_spec *spec)
+{
+ int i, type, num_spk = 0;
+
+ for (i = 0; i < spec->dac_info_filled; i++) {
+ type = spec->dac_info[i].type;
+ if (type == AUTO_PIN_LINE_OUT)
+ type = spec->autocfg.line_out_type;
+ if (type == AUTO_PIN_SPEAKER_OUT)
+ num_spk++;
+ }
+ return (num_spk == 2 && spec->autocfg.line_out_type != AUTO_PIN_LINE_OUT);
+}
+
static int cx_auto_build_output_controls(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
int i, err;
int num_line = 0, num_hp = 0, num_spk = 0;
+ bool speaker_2_1;
static const char * const texts[3] = { "Front", "Surround", "CLFE" };
if (spec->dac_info_filled == 1)
@@ -4128,6 +4133,8 @@ static int cx_auto_build_output_controls(struct hda_codec *codec)
spec->dac_info[0].pin,
"Master", 0);
+ speaker_2_1 = is_2_1_speaker(spec);
+
for (i = 0; i < spec->dac_info_filled; i++) {
const char *label;
int idx, type;
@@ -4146,8 +4153,13 @@ static int cx_auto_build_output_controls(struct hda_codec *codec)
idx = num_hp++;
break;
case AUTO_PIN_SPEAKER_OUT:
- label = "Speaker";
- idx = num_spk++;
+ if (speaker_2_1) {
+ label = num_spk++ ? "Bass Speaker" : "Speaker";
+ idx = 0;
+ } else {
+ label = "Speaker";
+ idx = num_spk++;
+ }
break;
}
err = try_add_pb_volume(codec, dac,
@@ -4405,7 +4417,10 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
enum {
CXT_PINCFG_LENOVO_X200,
CXT_PINCFG_LENOVO_TP410,
+ CXT_PINCFG_LEMOTE_A1004,
+ CXT_PINCFG_LEMOTE_A1205,
CXT_FIXUP_STEREO_DMIC,
+ CXT_FIXUP_INC_MIC_BOOST,
};
static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
@@ -4415,6 +4430,19 @@ static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
spec->fixup_stereo_dmic = 1;
}
+static void cxt5066_increase_mic_boost(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ snd_hda_override_amp_caps(codec, 0x17, HDA_OUTPUT,
+ (0x3 << AC_AMPCAP_OFFSET_SHIFT) |
+ (0x4 << AC_AMPCAP_NUM_STEPS_SHIFT) |
+ (0x27 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+ (0 << AC_AMPCAP_MUTE_SHIFT));
+}
+
/* ThinkPad X200 & co with cxt5051 */
static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
@@ -4432,6 +4460,18 @@ static const struct hda_pintbl cxt_pincfg_lenovo_tp410[] = {
{}
};
+/* Lemote A1004/A1205 with cxt5066 */
+static const struct hda_pintbl cxt_pincfg_lemote[] = {
+ { 0x1a, 0x90a10020 }, /* Internal mic */
+ { 0x1b, 0x03a11020 }, /* External mic */
+ { 0x1d, 0x400101f0 }, /* Not used */
+ { 0x1e, 0x40a701f0 }, /* Not used */
+ { 0x20, 0x404501f0 }, /* Not used */
+ { 0x22, 0x404401f0 }, /* Not used */
+ { 0x23, 0x40a701f0 }, /* Not used */
+ {}
+};
+
static const struct hda_fixup cxt_fixups[] = {
[CXT_PINCFG_LENOVO_X200] = {
.type = HDA_FIXUP_PINS,
@@ -4441,10 +4481,24 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_PINS,
.v.pins = cxt_pincfg_lenovo_tp410,
},
+ [CXT_PINCFG_LEMOTE_A1004] = {
+ .type = HDA_FIXUP_PINS,
+ .chained = true,
+ .chain_id = CXT_FIXUP_INC_MIC_BOOST,
+ .v.pins = cxt_pincfg_lemote,
+ },
+ [CXT_PINCFG_LEMOTE_A1205] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = cxt_pincfg_lemote,
+ },
[CXT_FIXUP_STEREO_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_stereo_dmic,
},
+ [CXT_FIXUP_INC_MIC_BOOST] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt5066_increase_mic_boost,
+ },
};
static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -4453,6 +4507,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
};
static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
@@ -4461,6 +4516,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+ SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
{}
};
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 71555cc54db..b6c21ea187c 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -431,9 +431,11 @@ static void hdmi_init_pin(struct hda_codec *codec, hda_nid_t pin_nid)
if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
snd_hda_codec_write(codec, pin_nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
- /* Disable pin out until stream is active*/
+ /* Enable pin out: some machines with GM965 gets broken output when
+ * the pin is disabled or changed while using with HDMI
+ */
snd_hda_codec_write(codec, pin_nid, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
}
static int hdmi_get_channel_count(struct hda_codec *codec, hda_nid_t cvt_nid)
@@ -1193,12 +1195,11 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
struct hdmi_spec_per_pin *per_pin;
int err;
- caps = snd_hda_param_read(codec, pin_nid, AC_PAR_PIN_CAP);
+ caps = snd_hda_query_pin_caps(codec, pin_nid);
if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP)))
return 0;
- config = snd_hda_codec_read(codec, pin_nid, 0,
- AC_VERB_GET_CONFIG_DEFAULT, 0);
+ config = snd_hda_codec_get_pincfg(codec, pin_nid);
if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
return 0;
@@ -1272,7 +1273,7 @@ static int hdmi_parse_codec(struct hda_codec *codec)
unsigned int caps;
unsigned int type;
- caps = snd_hda_param_read(codec, nid, AC_PAR_AUDIO_WIDGET_CAP);
+ caps = get_wcaps(codec, nid);
type = get_wcaps_type(caps);
if (!(caps & AC_WCAP_DIGITAL))
@@ -1288,13 +1289,17 @@ static int hdmi_parse_codec(struct hda_codec *codec)
}
}
+#ifdef CONFIG_PM
+ /* We're seeing some problems with unsolicited hot plug events on
+ * PantherPoint after S3, if this is not enabled */
+ if (codec->vendor_id == 0x80862806)
+ codec->bus->power_keep_link_on = 1;
/*
* G45/IbexPeak don't support EPSS: the unsolicited pin hot plug event
* can be lost and presence sense verb will become inaccurate if the
* HDA link is powered off at hot plug or hw initialization time.
*/
-#ifdef CONFIG_PM
- if (!(snd_hda_param_read(codec, codec->afg, AC_PAR_POWER_STATE) &
+ else if (!(snd_hda_param_read(codec, codec->afg, AC_PAR_POWER_STATE) &
AC_PWRST_EPSS))
codec->bus->power_keep_link_on = 1;
#endif
@@ -1338,7 +1343,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hdmi_spec *spec = codec->spec;
int pin_idx = hinfo_to_pin_index(spec, hinfo);
hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
- int pinctl;
bool non_pcm;
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
@@ -1347,11 +1351,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream);
- pinctl = snd_hda_codec_read(codec, pin_nid, 0,
- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
- snd_hda_codec_write(codec, pin_nid, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
-
return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
}
@@ -1371,7 +1370,6 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
int cvt_idx, pin_idx;
struct hdmi_spec_per_cvt *per_cvt;
struct hdmi_spec_per_pin *per_pin;
- int pinctl;
if (hinfo->nid) {
cvt_idx = cvt_nid_to_cvt_index(spec, hinfo->nid);
@@ -1388,11 +1386,6 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
return -EINVAL;
per_pin = &spec->pins[pin_idx];
- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL,
- pinctl & ~PIN_OUT);
snd_hda_spdif_ctls_unassign(codec, pin_idx);
per_pin->chmap_set = false;
memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
@@ -1589,9 +1582,10 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
if (err < 0)
return err;
- err = snd_hda_create_spdif_out_ctls(codec,
- per_pin->pin_nid,
- per_pin->mux_nids[0]);
+ err = snd_hda_create_dig_out_ctls(codec,
+ per_pin->pin_nid,
+ per_pin->mux_nids[0],
+ HDA_PCM_TYPE_HDMI);
if (err < 0)
return err;
snd_hda_spdif_ctls_unassign(codec, pin_idx);
@@ -1687,6 +1681,30 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = {
.unsol_event = hdmi_unsol_event,
};
+static void intel_haswell_fixup_connect_list(struct hda_codec *codec)
+{
+ unsigned int vendor_param;
+ hda_nid_t list[3] = {0x2, 0x3, 0x4};
+
+ vendor_param = snd_hda_codec_read(codec, 0x08, 0, 0xf81, 0);
+ if (vendor_param == -1 || vendor_param & 0x02)
+ return;
+
+ /* enable DP1.2 mode */
+ vendor_param |= 0x02;
+ snd_hda_codec_read(codec, 0x08, 0, 0x781, vendor_param);
+
+ vendor_param = snd_hda_codec_read(codec, 0x08, 0, 0xf81, 0);
+ if (vendor_param == -1 || !(vendor_param & 0x02))
+ return;
+
+ /* override 3 pins connection list */
+ snd_hda_override_conn_list(codec, 0x05, 3, list);
+ snd_hda_override_conn_list(codec, 0x06, 3, list);
+ snd_hda_override_conn_list(codec, 0x07, 3, list);
+}
+
+
static int patch_generic_hdmi(struct hda_codec *codec)
{
struct hdmi_spec *spec;
@@ -1696,6 +1714,10 @@ static int patch_generic_hdmi(struct hda_codec *codec)
return -ENOMEM;
codec->spec = spec;
+
+ if (codec->vendor_id == 0x80862807)
+ intel_haswell_fixup_connect_list(codec);
+
if (hdmi_parse_codec(codec) < 0) {
codec->spec = NULL;
kfree(spec);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ad68d223f8a..6ee34593774 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -153,8 +153,8 @@ struct alc_spec {
const struct hda_channel_mode *channel_mode;
int num_channel_mode;
int need_dac_fix;
- int const_channel_count;
- int ext_channel_count;
+ int const_channel_count; /* min. channel count (for speakers) */
+ int ext_channel_count; /* current channel count for multi-io */
/* PCM information */
struct hda_pcm pcm_rec[3]; /* used in alc_build_pcms() */
@@ -815,28 +815,13 @@ static int alc_automute_mode_info(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct alc_spec *spec = codec->spec;
- static const char * const texts2[] = {
- "Disabled", "Enabled"
- };
static const char * const texts3[] = {
"Disabled", "Speaker Only", "Line Out+Speaker"
};
- const char * const *texts;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- if (spec->automute_speaker_possible && spec->automute_lo_possible) {
- uinfo->value.enumerated.items = 3;
- texts = texts3;
- } else {
- uinfo->value.enumerated.items = 2;
- texts = texts2;
- }
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ if (spec->automute_speaker_possible && spec->automute_lo_possible)
+ return snd_hda_enum_helper_info(kcontrol, uinfo, 3, texts3);
+ return snd_hda_enum_bool_helper_info(kcontrol, uinfo);
}
static int alc_automute_mode_get(struct snd_kcontrol *kcontrol,
@@ -903,23 +888,25 @@ static const struct snd_kcontrol_new alc_automute_mode_enum = {
.put = alc_automute_mode_put,
};
-static struct snd_kcontrol_new *alc_kcontrol_new(struct alc_spec *spec)
+static struct snd_kcontrol_new *
+alc_kcontrol_new(struct alc_spec *spec, const char *name,
+ const struct snd_kcontrol_new *temp)
{
- snd_array_init(&spec->kctls, sizeof(struct snd_kcontrol_new), 32);
- return snd_array_new(&spec->kctls);
+ struct snd_kcontrol_new *knew = snd_array_new(&spec->kctls);
+ if (!knew)
+ return NULL;
+ *knew = *temp;
+ knew->name = kstrdup(name, GFP_KERNEL);
+ if (!knew->name)
+ return NULL;
+ return knew;
}
static int alc_add_automute_mode_enum(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- struct snd_kcontrol_new *knew;
- knew = alc_kcontrol_new(spec);
- if (!knew)
- return -ENOMEM;
- *knew = alc_automute_mode_enum;
- knew->name = kstrdup("Auto-Mute Mode", GFP_KERNEL);
- if (!knew->name)
+ if (!alc_kcontrol_new(spec, "Auto-Mute Mode", &alc_automute_mode_enum))
return -ENOMEM;
return 0;
}
@@ -928,12 +915,12 @@ static int alc_add_automute_mode_enum(struct hda_codec *codec)
* Check the availability of HP/line-out auto-mute;
* Set up appropriately if really supported
*/
-static void alc_init_automute(struct hda_codec *codec)
+static int alc_init_automute(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int present = 0;
- int i;
+ int i, err;
if (cfg->hp_pins[0])
present++;
@@ -942,7 +929,7 @@ static void alc_init_automute(struct hda_codec *codec)
if (cfg->speaker_pins[0])
present++;
if (present < 2) /* need two different output types */
- return;
+ return 0;
if (!cfg->speaker_pins[0] &&
cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
@@ -992,9 +979,13 @@ static void alc_init_automute(struct hda_codec *codec)
spec->automute_lo = spec->automute_lo_possible;
spec->automute_speaker = spec->automute_speaker_possible;
- if (spec->automute_speaker_possible || spec->automute_lo_possible)
+ if (spec->automute_speaker_possible || spec->automute_lo_possible) {
/* create a control for automute mode */
- alc_add_automute_mode_enum(codec);
+ err = alc_add_automute_mode_enum(codec);
+ if (err < 0)
+ return err;
+ }
+ return 0;
}
/* return the position of NID in the list, or -1 if not found */
@@ -1094,7 +1085,7 @@ static bool alc_auto_mic_check_imux(struct hda_codec *codec)
* Check the availability of auto-mic switch;
* Set up if really supported
*/
-static void alc_init_auto_mic(struct hda_codec *codec)
+static int alc_init_auto_mic(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
@@ -1102,7 +1093,7 @@ static void alc_init_auto_mic(struct hda_codec *codec)
int i;
if (spec->shared_mic_hp)
- return; /* no auto-mic for the shared I/O */
+ return 0; /* no auto-mic for the shared I/O */
spec->ext_mic_idx = spec->int_mic_idx = spec->dock_mic_idx = -1;
@@ -1114,25 +1105,25 @@ static void alc_init_auto_mic(struct hda_codec *codec)
switch (snd_hda_get_input_pin_attr(defcfg)) {
case INPUT_PIN_ATTR_INT:
if (fixed)
- return; /* already occupied */
+ return 0; /* already occupied */
if (cfg->inputs[i].type != AUTO_PIN_MIC)
- return; /* invalid type */
+ return 0; /* invalid type */
fixed = nid;
break;
case INPUT_PIN_ATTR_UNUSED:
- return; /* invalid entry */
+ return 0; /* invalid entry */
case INPUT_PIN_ATTR_DOCK:
if (dock)
- return; /* already occupied */
+ return 0; /* already occupied */
if (cfg->inputs[i].type > AUTO_PIN_LINE_IN)
- return; /* invalid type */
+ return 0; /* invalid type */
dock = nid;
break;
default:
if (ext)
- return; /* already occupied */
+ return 0; /* already occupied */
if (cfg->inputs[i].type != AUTO_PIN_MIC)
- return; /* invalid type */
+ return 0; /* invalid type */
ext = nid;
break;
}
@@ -1142,11 +1133,11 @@ static void alc_init_auto_mic(struct hda_codec *codec)
dock = 0;
}
if (!ext || !fixed)
- return;
+ return 0;
if (!is_jack_detectable(codec, ext))
- return; /* no unsol support */
+ return 0; /* no unsol support */
if (dock && !is_jack_detectable(codec, dock))
- return; /* no unsol support */
+ return 0; /* no unsol support */
/* check imux indices */
spec->ext_mic_pin = ext;
@@ -1155,17 +1146,26 @@ static void alc_init_auto_mic(struct hda_codec *codec)
spec->auto_mic = 1;
if (!alc_auto_mic_check_imux(codec))
- return;
+ return 0;
snd_printdd("realtek: Enable auto-mic switch on NID 0x%x/0x%x/0x%x\n",
ext, fixed, dock);
+
+ return 0;
}
/* check the availabilities of auto-mute and auto-mic switches */
-static void alc_auto_check_switches(struct hda_codec *codec)
+static int alc_auto_check_switches(struct hda_codec *codec)
{
- alc_init_automute(codec);
- alc_init_auto_mic(codec);
+ int err;
+
+ err = alc_init_automute(codec);
+ if (err < 0)
+ return err;
+ err = alc_init_auto_mic(codec);
+ if (err < 0)
+ return err;
+ return 0;
}
/*
@@ -1757,12 +1757,9 @@ static const struct snd_kcontrol_new alc_inv_dmic_sw = {
static int alc_add_inv_dmic_mixer(struct hda_codec *codec, hda_nid_t nid)
{
struct alc_spec *spec = codec->spec;
- struct snd_kcontrol_new *knew = alc_kcontrol_new(spec);
- if (!knew)
- return -ENOMEM;
- *knew = alc_inv_dmic_sw;
- knew->name = kstrdup("Inverted Internal Mic Capture Switch", GFP_KERNEL);
- if (!knew->name)
+
+ if (!alc_kcontrol_new(spec, "Inverted Internal Mic Capture Switch",
+ &alc_inv_dmic_sw))
return -ENOMEM;
spec->inv_dmic_fixup = 1;
spec->inv_dmic_muted = 0;
@@ -1836,9 +1833,10 @@ static int __alc_build_controls(struct hda_codec *codec)
return err;
}
if (spec->multiout.dig_out_nid) {
- err = snd_hda_create_spdif_out_ctls(codec,
- spec->multiout.dig_out_nid,
- spec->multiout.dig_out_nid);
+ err = snd_hda_create_dig_out_ctls(codec,
+ spec->multiout.dig_out_nid,
+ spec->multiout.dig_out_nid,
+ spec->pcm_rec[1].pcm_type);
if (err < 0)
return err;
if (!spec->no_analog) {
@@ -2259,6 +2257,10 @@ static int alc_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dac_nids[0];
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
spec->multiout.max_channels;
+ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT &&
+ spec->autocfg.line_outs == 2)
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].chmap =
+ snd_pcm_2_1_chmaps;
}
if (spec->adc_nids) {
p = spec->stream_analog_capture;
@@ -2399,7 +2401,6 @@ static void alc_free(struct hda_codec *codec)
if (!spec)
return;
- alc_shutup(codec);
alc_free_kctls(codec);
alc_free_bind_ctls(codec);
snd_hda_gen_free(&spec->gen);
@@ -2534,13 +2535,9 @@ static int add_control(struct alc_spec *spec, int type, const char *name,
{
struct snd_kcontrol_new *knew;
- knew = alc_kcontrol_new(spec);
+ knew = alc_kcontrol_new(spec, name, &alc_control_templates[type]);
if (!knew)
return -ENOMEM;
- *knew = alc_control_templates[type];
- knew->name = kstrdup(name, GFP_KERNEL);
- if (!knew->name)
- return -ENOMEM;
knew->index = cidx;
if (get_amp_nid_(val))
knew->subdevice = HDA_SUBDEV_AMP_FLAG;
@@ -3601,7 +3598,6 @@ static struct hda_bind_ctls *new_bind_ctl(struct hda_codec *codec,
{
struct alc_spec *spec = codec->spec;
struct hda_bind_ctls **ctlp, *ctl;
- snd_array_init(&spec->bind_ctls, sizeof(ctl), 8);
ctlp = snd_array_new(&spec->bind_ctls);
if (!ctlp)
return NULL;
@@ -3965,8 +3961,9 @@ static int alc_auto_ch_mode_put(struct snd_kcontrol *kcontrol,
spec->ext_channel_count = (ch + 1) * 2;
for (i = 0; i < spec->multi_ios; i++)
alc_set_multi_io(codec, i, i < ch);
- spec->multiout.max_channels = spec->ext_channel_count;
- if (spec->need_dac_fix && !spec->const_channel_count)
+ spec->multiout.max_channels = max(spec->ext_channel_count,
+ spec->const_channel_count);
+ if (spec->need_dac_fix)
spec->multiout.num_dacs = spec->multiout.max_channels / 2;
return 1;
}
@@ -3984,14 +3981,8 @@ static int alc_auto_add_multi_channel_mode(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
if (spec->multi_ios > 0) {
- struct snd_kcontrol_new *knew;
-
- knew = alc_kcontrol_new(spec);
- if (!knew)
- return -ENOMEM;
- *knew = alc_auto_channel_mode_enum;
- knew->name = kstrdup("Channel Mode", GFP_KERNEL);
- if (!knew->name)
+ if (!alc_kcontrol_new(spec, "Channel Mode",
+ &alc_auto_channel_mode_enum))
return -ENOMEM;
}
return 0;
@@ -4334,7 +4325,17 @@ static int alc_parse_auto_config(struct hda_codec *codec,
if (err < 0)
return err;
- spec->multiout.max_channels = spec->multiout.num_dacs * 2;
+ /* check the multiple speaker pins */
+ if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ spec->const_channel_count = cfg->line_outs * 2;
+ else
+ spec->const_channel_count = cfg->speaker_outs * 2;
+
+ if (spec->multi_ios > 0)
+ spec->multiout.max_channels = max(spec->ext_channel_count,
+ spec->const_channel_count);
+ else
+ spec->multiout.max_channels = spec->multiout.num_dacs * 2;
dig_only:
alc_auto_parse_digital(codec);
@@ -4346,7 +4347,9 @@ static int alc_parse_auto_config(struct hda_codec *codec,
alc_ssid_check(codec, ssid_nids);
if (!spec->no_analog) {
- alc_auto_check_switches(codec);
+ err = alc_auto_check_switches(codec);
+ if (err < 0)
+ return err;
err = alc_auto_add_mic_boost(codec);
if (err < 0)
return err;
@@ -4370,8 +4373,11 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
if (!spec)
return -ENOMEM;
codec->spec = spec;
+ codec->single_adc_amp = 1;
spec->mixer_nid = mixer_nid;
snd_hda_gen_init(&spec->gen);
+ snd_array_init(&spec->kctls, sizeof(struct snd_kcontrol_new), 32);
+ snd_array_init(&spec->bind_ctls, sizeof(struct hda_bind_ctls *), 8);
err = alc_codec_rename_from_preset(codec);
if (err < 0) {
@@ -6009,6 +6015,16 @@ static void alc269_fixup_mic2_mute(struct hda_codec *codec,
}
}
+static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
+ const struct alc_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == ALC_FIXUP_ACT_PROBE)
+ snd_hda_jack_set_gating_jack(codec, spec->ext_mic_pin,
+ spec->autocfg.hp_pins[0]);
+}
enum {
ALC269_FIXUP_SONY_VAIO,
@@ -6031,6 +6047,8 @@ enum {
ALC269_FIXUP_INV_DMIC,
ALC269_FIXUP_LENOVO_DOCK,
ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ ALC271_FIXUP_AMIC_MIC2,
+ ALC271_FIXUP_HP_GATE_MIC_JACK,
};
static const struct alc_fixup alc269_fixups[] = {
@@ -6175,6 +6193,22 @@ static const struct alc_fixup alc269_fixups[] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
},
+ [ALC271_FIXUP_AMIC_MIC2] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x99130110 }, /* speaker */
+ { 0x19, 0x01a19c20 }, /* mic */
+ { 0x1b, 0x99a7012f }, /* int-mic */
+ { 0x21, 0x0121401f }, /* HP out */
+ { }
+ },
+ },
+ [ALC271_FIXUP_HP_GATE_MIC_JACK] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc271_hp_gate_mic_jack,
+ .chained = true,
+ .chain_id = ALC271_FIXUP_AMIC_MIC2,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6195,6 +6229,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
@@ -6535,8 +6570,8 @@ static void alc861vd_fixup_dallas(struct hda_codec *codec,
const struct alc_fixup *fix, int action)
{
if (action == ALC_FIXUP_ACT_PRE_PROBE) {
- snd_hda_override_pin_caps(codec, 0x18, 0x00001714);
- snd_hda_override_pin_caps(codec, 0x19, 0x0000171c);
+ snd_hda_override_pin_caps(codec, 0x18, 0x00000734);
+ snd_hda_override_pin_caps(codec, 0x19, 0x0000073c);
}
}
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9ba8af05617..a86547ca17c 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1081,7 +1081,7 @@ static struct snd_kcontrol_new stac_smux_mixer = {
static const char * const slave_pfxs[] = {
"Front", "Surround", "Center", "LFE", "Side",
- "Headphone", "Speaker", "IEC958", "PCM",
+ "Headphone", "Speaker", "Bass Speaker", "IEC958", "PCM",
NULL
};
@@ -1136,9 +1136,10 @@ static int stac92xx_build_controls(struct hda_codec *codec)
}
if (spec->multiout.dig_out_nid) {
- err = snd_hda_create_spdif_out_ctls(codec,
- spec->multiout.dig_out_nid,
- spec->multiout.dig_out_nid);
+ err = snd_hda_create_dig_out_ctls(codec,
+ spec->multiout.dig_out_nid,
+ spec->multiout.dig_out_nid,
+ spec->autocfg.dig_out_type[0]);
if (err < 0)
return err;
err = snd_hda_create_spdif_share_sw(codec,
@@ -1724,7 +1725,7 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658,
"HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659,
- "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ "HP Pavilion dv7", STAC_HP_DV7_4000),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A,
"HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B,
@@ -2515,6 +2516,11 @@ static int stac92xx_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = stac92xx_pcm_analog_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
spec->multiout.dac_nids[0];
+ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT &&
+ spec->autocfg.line_outs == 2)
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].chmap =
+ snd_pcm_2_1_chmaps;
+
info->stream[SNDRV_PCM_STREAM_CAPTURE] = stac92xx_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0];
info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adcs;
@@ -2805,7 +2811,6 @@ stac_control_new(struct sigmatel_spec *spec,
{
struct snd_kcontrol_new *knew;
- snd_array_init(&spec->kctls, sizeof(*knew), 32);
knew = snd_array_new(&spec->kctls);
if (!knew)
return NULL;
@@ -3268,9 +3273,9 @@ static int create_multi_out_ctls(struct hda_codec *codec, int num_outs,
idx = i;
break;
case AUTO_PIN_SPEAKER_OUT:
- if (num_outs <= 1) {
- name = "Speaker";
- idx = i;
+ if (num_outs <= 2) {
+ name = i ? "Bass Speaker" : "Speaker";
+ idx = 0;
break;
}
/* Fall through in case of multi speaker outs */
@@ -4569,8 +4574,6 @@ static void stac92xx_free(struct hda_codec *codec)
if (! spec)
return;
- stac92xx_shutup(codec);
-
kfree(spec);
snd_hda_detach_beep_device(codec);
}
@@ -5155,20 +5158,34 @@ static const struct hda_codec_ops stac92xx_patch_ops = {
.reboot_notify = stac92xx_shutup,
};
+static int alloc_stac_spec(struct hda_codec *codec, int num_pins,
+ const hda_nid_t *pin_nids)
+{
+ struct sigmatel_spec *spec;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ codec->spec = spec;
+ codec->no_trigger_sense = 1; /* seems common with STAC/IDT codecs */
+ spec->num_pins = num_pins;
+ spec->pin_nids = pin_nids;
+ snd_array_init(&spec->kctls, sizeof(struct snd_kcontrol_new), 32);
+ return 0;
+}
+
static int patch_stac9200(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, ARRAY_SIZE(stac9200_pin_nids),
+ stac9200_pin_nids);
+ if (err < 0)
+ return err;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ spec = codec->spec;
spec->linear_tone_beep = 1;
- spec->num_pins = ARRAY_SIZE(stac9200_pin_nids);
- spec->pin_nids = stac9200_pin_nids;
spec->board_config = snd_hda_check_board_config(codec, STAC_9200_MODELS,
stac9200_models,
stac9200_cfg_tbl);
@@ -5224,15 +5241,13 @@ static int patch_stac925x(struct hda_codec *codec)
struct sigmatel_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, ARRAY_SIZE(stac925x_pin_nids),
+ stac925x_pin_nids);
+ if (err < 0)
+ return err;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ spec = codec->spec;
spec->linear_tone_beep = 1;
- spec->num_pins = ARRAY_SIZE(stac925x_pin_nids);
- spec->pin_nids = stac925x_pin_nids;
/* Check first for codec ID */
spec->board_config = snd_hda_check_board_codec_sid_config(codec,
@@ -5307,19 +5322,17 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
hda_nid_t conn[STAC92HD73_DAC_COUNT + 2];
- int err = 0;
+ int err;
int num_dacs;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, ARRAY_SIZE(stac92hd73xx_pin_nids),
+ stac92hd73xx_pin_nids);
+ if (err < 0)
+ return err;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ spec = codec->spec;
spec->linear_tone_beep = 0;
codec->slave_dig_outs = stac92hd73xx_slave_dig_outs;
- spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids);
- spec->pin_nids = stac92hd73xx_pin_nids;
spec->board_config = snd_hda_check_board_config(codec,
STAC_92HD73XX_MODELS,
stac92hd73xx_models,
@@ -5596,9 +5609,9 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
int default_polarity = -1; /* no default cfg */
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, 0, NULL); /* pins filled later */
+ if (err < 0)
+ return err;
if (hp_bnb2011_with_dock(codec)) {
snd_hda_codec_set_pincfg(codec, 0xa, 0x2101201f);
@@ -5606,11 +5619,9 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
}
codec->epss = 0; /* longer delay needed for D3 */
- codec->no_trigger_sense = 1;
- codec->spec = spec;
-
stac92hd8x_fill_auto_spec(codec);
+ spec = codec->spec;
spec->linear_tone_beep = 0;
codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
spec->digbeep_nid = 0x21;
@@ -5779,21 +5790,19 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
struct sigmatel_spec *spec;
const struct hda_verb *unmute_init = stac92hd71bxx_unmute_core_init;
unsigned int pin_cfg;
- int err = 0;
+ int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, STAC92HD71BXX_NUM_PINS,
+ stac92hd71bxx_pin_nids_4port);
+ if (err < 0)
+ return err;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ spec = codec->spec;
spec->linear_tone_beep = 0;
codec->patch_ops = stac92xx_patch_ops;
- spec->num_pins = STAC92HD71BXX_NUM_PINS;
switch (codec->vendor_id) {
case 0x111d76b6:
case 0x111d76b7:
- spec->pin_nids = stac92hd71bxx_pin_nids_4port;
break;
case 0x111d7603:
case 0x111d7608:
@@ -6024,15 +6033,13 @@ static int patch_stac922x(struct hda_codec *codec)
struct sigmatel_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, ARRAY_SIZE(stac922x_pin_nids),
+ stac922x_pin_nids);
+ if (err < 0)
+ return err;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ spec = codec->spec;
spec->linear_tone_beep = 1;
- spec->num_pins = ARRAY_SIZE(stac922x_pin_nids);
- spec->pin_nids = stac922x_pin_nids;
spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS,
stac922x_models,
stac922x_cfg_tbl);
@@ -6129,16 +6136,14 @@ static int patch_stac927x(struct hda_codec *codec)
struct sigmatel_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, ARRAY_SIZE(stac927x_pin_nids),
+ stac927x_pin_nids);
+ if (err < 0)
+ return err;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ spec = codec->spec;
spec->linear_tone_beep = 1;
codec->slave_dig_outs = stac927x_slave_dig_outs;
- spec->num_pins = ARRAY_SIZE(stac927x_pin_nids);
- spec->pin_nids = stac927x_pin_nids;
spec->board_config = snd_hda_check_board_config(codec, STAC_927X_MODELS,
stac927x_models,
stac927x_cfg_tbl);
@@ -6265,15 +6270,13 @@ static int patch_stac9205(struct hda_codec *codec)
struct sigmatel_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
+ err = alloc_stac_spec(codec, ARRAY_SIZE(stac9205_pin_nids),
+ stac9205_pin_nids);
+ if (err < 0)
+ return err;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ spec = codec->spec;
spec->linear_tone_beep = 1;
- spec->num_pins = ARRAY_SIZE(stac9205_pin_nids);
- spec->pin_nids = stac9205_pin_nids;
spec->board_config = snd_hda_check_board_config(codec, STAC_9205_MODELS,
stac9205_models,
stac9205_cfg_tbl);
@@ -6421,14 +6424,13 @@ static int patch_stac9872(struct hda_codec *codec)
struct sigmatel_spec *spec;
int err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
- codec->no_trigger_sense = 1;
- codec->spec = spec;
+ err = alloc_stac_spec(codec, ARRAY_SIZE(stac9872_pin_nids),
+ stac9872_pin_nids);
+ if (err < 0)
+ return err;
+
+ spec = codec->spec;
spec->linear_tone_beep = 1;
- spec->num_pins = ARRAY_SIZE(stac9872_pin_nids);
- spec->pin_nids = stac9872_pin_nids;
spec->board_config = snd_hda_check_board_config(codec, STAC_9872_MODELS,
stac9872_models,
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 019e1a00414..09bb64996d7 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -76,6 +76,8 @@ enum VIA_HDA_CODEC {
VT2002P,
VT1812,
VT1802,
+ VT1705CF,
+ VT1808,
CODEC_TYPES,
};
@@ -220,6 +222,7 @@ struct via_spec {
int vt1708_hp_present;
void (*set_widgets_power_state)(struct hda_codec *codec);
+ unsigned int dac_stream_tag[4];
struct hda_loopback_check loopback;
int num_loopbacks;
@@ -241,6 +244,7 @@ static struct via_spec * via_new_spec(struct hda_codec *codec)
if (spec == NULL)
return NULL;
+ snd_array_init(&spec->kctls, sizeof(struct snd_kcontrol_new), 32);
mutex_init(&spec->config_mutex);
codec->spec = spec;
spec->codec = codec;
@@ -295,6 +299,10 @@ static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec)
codec_type = VT1708S;
else if ((dev_id & 0xfff) == 0x446)
codec_type = VT1802;
+ else if (dev_id == 0x4760)
+ codec_type = VT1705CF;
+ else if (dev_id == 0x4761 || dev_id == 0x4762)
+ codec_type = VT1808;
else
codec_type = UNKNOWN;
return codec_type;
@@ -387,7 +395,6 @@ static struct snd_kcontrol_new *__via_clone_ctl(struct via_spec *spec,
{
struct snd_kcontrol_new *knew;
- snd_array_init(&spec->kctls, sizeof(*knew), 32);
knew = snd_array_new(&spec->kctls);
if (!knew)
return NULL;
@@ -711,6 +718,28 @@ static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm);
}
+static void update_conv_power_state(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int parm, unsigned int index)
+{
+ struct via_spec *spec = codec->spec;
+ unsigned int format;
+ if (snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_POWER_STATE, 0) == parm)
+ return;
+ format = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
+ if (format && (spec->dac_stream_tag[index] != format))
+ spec->dac_stream_tag[index] = format;
+
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm);
+ if (parm == AC_PWRST_D0) {
+ format = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
+ if (!format && (spec->dac_stream_tag[index] != format))
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_CHANNEL_STREAMID,
+ spec->dac_stream_tag[index]);
+ }
+}
+
static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid,
unsigned int *affected_parm)
{
@@ -739,18 +768,7 @@ static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid,
static int via_pin_power_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static const char * const texts[] = {
- "Disabled", "Enabled"
- };
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
- return 0;
+ return snd_hda_enum_bool_helper_info(kcontrol, uinfo);
}
static int via_pin_power_ctl_get(struct snd_kcontrol *kcontrol,
@@ -1096,6 +1114,11 @@ static void __analog_low_current_mode(struct hda_codec *codec, bool force)
verb = 0xf93;
parm = enable ? 0x00 : 0xe0; /* 0x00: 4/40x, 0xe0: 1x */
break;
+ case VT1705CF:
+ case VT1808:
+ verb = 0xf82;
+ parm = enable ? 0x00 : 0xe0; /* 0x00: 4/40x, 0xe0: 1x */
+ break;
default:
return; /* other codecs are not supported */
}
@@ -1454,7 +1477,7 @@ static const struct hda_pcm_stream via_pcm_digital_capture = {
*/
static const char * const via_slave_pfxs[] = {
"Front", "Surround", "Center", "LFE", "Side",
- "Headphone", "Speaker",
+ "Headphone", "Speaker", "Bass Speaker",
NULL,
};
@@ -1555,6 +1578,10 @@ static int via_build_pcms(struct hda_codec *codec)
spec->multiout.dac_nids[0];
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
spec->multiout.max_channels;
+ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT
+ && spec->autocfg.line_outs == 2)
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].chmap =
+ snd_pcm_2_1_chmaps;
}
if (!spec->stream_analog_capture) {
@@ -1934,7 +1961,7 @@ static int via_auto_create_multi_out_ctls(struct hda_codec *codec)
struct auto_pin_cfg *cfg = &spec->autocfg;
struct nid_path *path;
static const char * const chname[4] = {
- "Front", "Surround", "C/LFE", "Side"
+ "Front", "Surround", NULL /* "CLFE" */, "Side"
};
int i, idx, err;
int old_line_outs;
@@ -1969,8 +1996,8 @@ static int via_auto_create_multi_out_ctls(struct hda_codec *codec)
} else {
const char *pfx = chname[i];
if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT &&
- cfg->line_outs == 1)
- pfx = "Speaker";
+ cfg->line_outs <= 2)
+ pfx = i ? "Bass Speaker" : "Speaker";
err = create_ch_ctls(codec, pfx, 3, true, path);
if (err < 0)
return err;
@@ -3824,6 +3851,125 @@ static int patch_vt1812(struct hda_codec *codec)
return 0;
}
+/* patch for vt3476 */
+
+static const struct hda_verb vt3476_init_verbs[] = {
+ /* Enable DMic 8/16/32K */
+ {0x1, 0xF7B, 0x30},
+ /* Enable Boost Volume backdoor */
+ {0x1, 0xFB9, 0x20},
+ /* Enable AOW-MW9 path */
+ {0x1, 0xFB8, 0x10},
+ { }
+};
+
+static void set_widgets_power_state_vt3476(struct hda_codec *codec)
+{
+ struct via_spec *spec = codec->spec;
+ int imux_is_smixer;
+ unsigned int parm, parm2;
+ /* MUX10 (1eh) = stereo mixer */
+ imux_is_smixer =
+ snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 4;
+ /* inputs */
+ /* PW 5/6/7 (29h/2ah/2bh) */
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x29, &parm);
+ set_pin_power_state(codec, 0x2a, &parm);
+ set_pin_power_state(codec, 0x2b, &parm);
+ if (imux_is_smixer)
+ parm = AC_PWRST_D0;
+ /* MUX10/11 (1eh/1fh), AIW 0/1 (10h/11h) */
+ update_power_state(codec, 0x1e, parm);
+ update_power_state(codec, 0x1f, parm);
+ update_power_state(codec, 0x10, parm);
+ update_power_state(codec, 0x11, parm);
+
+ /* outputs */
+ /* PW3 (27h), MW3(37h), AOW3 (bh) */
+ if (spec->codec_type == VT1705CF) {
+ parm = AC_PWRST_D3;
+ update_power_state(codec, 0x27, parm);
+ update_power_state(codec, 0x37, parm);
+ } else {
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x27, &parm);
+ update_power_state(codec, 0x37, parm);
+ }
+
+ /* PW2 (26h), MW2(36h), AOW2 (ah) */
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x26, &parm);
+ update_power_state(codec, 0x36, parm);
+ if (spec->smart51_enabled) {
+ /* PW7(2bh), MW7(3bh), MUX7(1Bh) */
+ set_pin_power_state(codec, 0x2b, &parm);
+ update_power_state(codec, 0x3b, parm);
+ update_power_state(codec, 0x1b, parm);
+ }
+ update_conv_power_state(codec, 0xa, parm, 2);
+
+ /* PW1 (25h), MW1(35h), AOW1 (9h) */
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x25, &parm);
+ update_power_state(codec, 0x35, parm);
+ if (spec->smart51_enabled) {
+ /* PW6(2ah), MW6(3ah), MUX6(1ah) */
+ set_pin_power_state(codec, 0x2a, &parm);
+ update_power_state(codec, 0x3a, parm);
+ update_power_state(codec, 0x1a, parm);
+ }
+ update_conv_power_state(codec, 0x9, parm, 1);
+
+ /* PW4 (28h), MW4 (38h), MUX4(18h), AOW3(bh)/AOW0(8h) */
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x28, &parm);
+ update_power_state(codec, 0x38, parm);
+ update_power_state(codec, 0x18, parm);
+ if (spec->hp_independent_mode)
+ update_conv_power_state(codec, 0xb, parm, 3);
+ parm2 = parm; /* for pin 0x0b */
+
+ /* PW0 (24h), MW0(34h), MW9(3fh), AOW0 (8h) */
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x24, &parm);
+ update_power_state(codec, 0x34, parm);
+ if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
+ parm = parm2;
+ update_conv_power_state(codec, 0x8, parm, 0);
+ /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
+ update_power_state(codec, 0x3f, imux_is_smixer ? AC_PWRST_D0 : parm);
+}
+
+static int patch_vt3476(struct hda_codec *codec)
+{
+ struct via_spec *spec;
+ int err;
+
+ /* create a codec specific record */
+ spec = via_new_spec(codec);
+ if (spec == NULL)
+ return -ENOMEM;
+
+ spec->aa_mix_nid = 0x3f;
+ add_secret_dac_path(codec);
+
+ /* automatic parse from the BIOS config */
+ err = via_parse_auto_config(codec);
+ if (err < 0) {
+ via_free(codec);
+ return err;
+ }
+
+ spec->init_verbs[spec->num_iverbs++] = vt3476_init_verbs;
+
+ codec->patch_ops = via_patch_ops;
+
+ spec->set_widgets_power_state = set_widgets_power_state_vt3476;
+
+ return 0;
+}
+
/*
* patch entries
*/
@@ -3917,6 +4063,12 @@ static const struct hda_codec_preset snd_hda_preset_via[] = {
.patch = patch_vt2002P},
{ .id = 0x11068446, .name = "VT1802",
.patch = patch_vt2002P},
+ { .id = 0x11064760, .name = "VT1705CF",
+ .patch = patch_vt3476},
+ { .id = 0x11064761, .name = "VT1708SCE",
+ .patch = patch_vt3476},
+ { .id = 0x11064762, .name = "VT1808",
+ .patch = patch_vt3476},
{} /* terminator */
};
diff --git a/sound/pci/ice1712/Makefile b/sound/pci/ice1712/Makefile
index f7ce33f00ea..7e50c132455 100644
--- a/sound/pci/ice1712/Makefile
+++ b/sound/pci/ice1712/Makefile
@@ -5,7 +5,7 @@
snd-ice17xx-ak4xxx-objs := ak4xxx.o
snd-ice1712-objs := ice1712.o delta.o hoontech.o ews.o
-snd-ice1724-objs := ice1724.o amp.o revo.o aureon.o vt1720_mobo.o pontis.o prodigy192.o prodigy_hifi.o juli.o phase.o wtm.o se.o maya44.o quartet.o
+snd-ice1724-objs := ice1724.o amp.o revo.o aureon.o vt1720_mobo.o pontis.o prodigy192.o prodigy_hifi.o juli.o phase.o wtm.o se.o maya44.o quartet.o psc724.o wm8766.o wm8776.o
# Toplevel Module Dependency
obj-$(CONFIG_SND_ICE1712) += snd-ice1712.o snd-ice17xx-ak4xxx.o
diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
index e525da2673b..2f9b9346786 100644
--- a/sound/pci/ice1712/amp.c
+++ b/sound/pci/ice1712/amp.c
@@ -21,7 +21,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -38,7 +37,7 @@ static void wm_put(struct snd_ice1712 *ice, int reg, unsigned short val)
snd_vt1724_write_i2c(ice, WM_DEV, cval >> 8, cval & 0xff);
}
-static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
+static int snd_vt1724_amp_init(struct snd_ice1712 *ice)
{
static const unsigned short wm_inits[] = {
WM_ATTEN_L, 0x0000, /* 0 db */
@@ -66,7 +65,7 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
return 0;
}
-static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
+static int snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
{
if (ice->ac97)
/* we use pins 39 and 41 of the VT1616 for left and right
@@ -78,7 +77,7 @@ static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_amp_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_amp_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_AV710,
.name = "Chaintech AV-710",
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 20bcddea2ea..55902ec4034 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -46,7 +46,6 @@
* on mixer switch and other coll stuff.
*/
-#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -203,7 +202,8 @@ static void aureon_pca9554_write(struct snd_ice1712 *ice, unsigned char reg,
static int aureon_universe_inmux_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- char *texts[3] = {"Internal Aux", "Wavetable", "Rear Line-In"};
+ static const char * const texts[3] =
+ {"Internal Aux", "Wavetable", "Rear Line-In"};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -1433,7 +1433,7 @@ static int aureon_oversampling_put(struct snd_kcontrol *kcontrol, struct snd_ctl
* mixers
*/
-static struct snd_kcontrol_new aureon_dac_controls[] __devinitdata = {
+static struct snd_kcontrol_new aureon_dac_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
@@ -1548,7 +1548,7 @@ static struct snd_kcontrol_new aureon_dac_controls[] __devinitdata = {
}
};
-static struct snd_kcontrol_new wm_controls[] __devinitdata = {
+static struct snd_kcontrol_new wm_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
@@ -1614,7 +1614,7 @@ static struct snd_kcontrol_new wm_controls[] __devinitdata = {
}
};
-static struct snd_kcontrol_new ac97_controls[] __devinitdata = {
+static struct snd_kcontrol_new ac97_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "AC97 Playback Switch",
@@ -1719,7 +1719,7 @@ static struct snd_kcontrol_new ac97_controls[] __devinitdata = {
}
};
-static struct snd_kcontrol_new universe_ac97_controls[] __devinitdata = {
+static struct snd_kcontrol_new universe_ac97_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "AC97 Playback Switch",
@@ -1851,7 +1851,7 @@ static struct snd_kcontrol_new universe_ac97_controls[] __devinitdata = {
};
-static struct snd_kcontrol_new cs8415_controls[] __devinitdata = {
+static struct snd_kcontrol_new cs8415_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, SWITCH),
@@ -1896,7 +1896,7 @@ static struct snd_kcontrol_new cs8415_controls[] __devinitdata = {
}
};
-static int __devinit aureon_add_controls(struct snd_ice1712 *ice)
+static int aureon_add_controls(struct snd_ice1712 *ice)
{
unsigned int i, counts;
int err;
@@ -2124,7 +2124,7 @@ static int aureon_resume(struct snd_ice1712 *ice)
/*
* initialize the chip
*/
-static int __devinit aureon_init(struct snd_ice1712 *ice)
+static int aureon_init(struct snd_ice1712 *ice)
{
struct aureon_spec *spec;
int i, err;
@@ -2174,7 +2174,7 @@ static int __devinit aureon_init(struct snd_ice1712 *ice)
* hence the driver needs to sets up it properly.
*/
-static unsigned char aureon51_eeprom[] __devinitdata = {
+static unsigned char aureon51_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x0a, /* clock 512, spdif-in/ADC, 3DACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0xfc, /* vol, 96k, 24bit, 192k */
@@ -2190,7 +2190,7 @@ static unsigned char aureon51_eeprom[] __devinitdata = {
[ICE_EEP2_GPIO_STATE2] = 0x00,
};
-static unsigned char aureon71_eeprom[] __devinitdata = {
+static unsigned char aureon71_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x0b, /* clock 512, spdif-in/ADC, 4DACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0xfc, /* vol, 96k, 24bit, 192k */
@@ -2207,7 +2207,7 @@ static unsigned char aureon71_eeprom[] __devinitdata = {
};
#define prodigy71_eeprom aureon71_eeprom
-static unsigned char aureon71_universe_eeprom[] __devinitdata = {
+static unsigned char aureon71_universe_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x2b, /* clock 512, mpu401, spdif-in/ADC,
* 4DACs
*/
@@ -2225,7 +2225,7 @@ static unsigned char aureon71_universe_eeprom[] __devinitdata = {
[ICE_EEP2_GPIO_STATE2] = 0x00,
};
-static unsigned char prodigy71lt_eeprom[] __devinitdata = {
+static unsigned char prodigy71lt_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x4b, /* clock 384, spdif-in/ADC, 4DACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0xfc, /* vol, 96k, 24bit, 192k */
@@ -2243,7 +2243,7 @@ static unsigned char prodigy71lt_eeprom[] __devinitdata = {
#define prodigy71xt_eeprom prodigy71lt_eeprom
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_aureon_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_aureon_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_AUREON51_SKY,
.name = "Terratec Aureon 5.1-Sky",
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index 20c6b079d0d..9e28cc12969 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -22,7 +22,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -432,7 +431,7 @@ static int snd_ice1712_delta1010lt_wordclock_status_get(struct snd_kcontrol *kco
return 0;
}
-static struct snd_kcontrol_new snd_ice1712_delta1010lt_wordclock_status __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_delta1010lt_wordclock_status =
{
.access = (SNDRV_CTL_ELEM_ACCESS_READ),
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -445,7 +444,7 @@ static struct snd_kcontrol_new snd_ice1712_delta1010lt_wordclock_status __devini
* initialize the chips on M-Audio cards
*/
-static struct snd_akm4xxx akm_audiophile __devinitdata = {
+static struct snd_akm4xxx akm_audiophile = {
.type = SND_AK4528,
.num_adcs = 2,
.num_dacs = 2,
@@ -454,7 +453,7 @@ static struct snd_akm4xxx akm_audiophile __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_audiophile_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_audiophile_priv = {
.caddr = 2,
.cif = 0,
.data_mask = ICE1712_DELTA_AP_DOUT,
@@ -466,7 +465,7 @@ static struct snd_ak4xxx_private akm_audiophile_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_delta410 __devinitdata = {
+static struct snd_akm4xxx akm_delta410 = {
.type = SND_AK4529,
.num_adcs = 2,
.num_dacs = 8,
@@ -475,7 +474,7 @@ static struct snd_akm4xxx akm_delta410 __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_delta410_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_delta410_priv = {
.caddr = 0,
.cif = 0,
.data_mask = ICE1712_DELTA_AP_DOUT,
@@ -487,7 +486,7 @@ static struct snd_ak4xxx_private akm_delta410_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_delta1010lt __devinitdata = {
+static struct snd_akm4xxx akm_delta1010lt = {
.type = SND_AK4524,
.num_adcs = 8,
.num_dacs = 8,
@@ -497,7 +496,7 @@ static struct snd_akm4xxx akm_delta1010lt __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_delta1010lt_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_delta1010lt_priv = {
.caddr = 2,
.cif = 0, /* the default level of the CIF pin from AK4524 */
.data_mask = ICE1712_DELTA_1010LT_DOUT,
@@ -509,7 +508,7 @@ static struct snd_ak4xxx_private akm_delta1010lt_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_delta66e __devinitdata = {
+static struct snd_akm4xxx akm_delta66e = {
.type = SND_AK4524,
.num_adcs = 4,
.num_dacs = 4,
@@ -519,7 +518,7 @@ static struct snd_akm4xxx akm_delta66e __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_delta66e_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_delta66e_priv = {
.caddr = 2,
.cif = 0, /* the default level of the CIF pin from AK4524 */
.data_mask = ICE1712_DELTA_66E_DOUT,
@@ -532,7 +531,7 @@ static struct snd_ak4xxx_private akm_delta66e_priv __devinitdata = {
};
-static struct snd_akm4xxx akm_delta44 __devinitdata = {
+static struct snd_akm4xxx akm_delta44 = {
.type = SND_AK4524,
.num_adcs = 4,
.num_dacs = 4,
@@ -542,7 +541,7 @@ static struct snd_akm4xxx akm_delta44 __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_delta44_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_delta44_priv = {
.caddr = 2,
.cif = 0, /* the default level of the CIF pin from AK4524 */
.data_mask = ICE1712_DELTA_CODEC_SERIAL_DATA,
@@ -554,7 +553,7 @@ static struct snd_ak4xxx_private akm_delta44_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_vx442 __devinitdata = {
+static struct snd_akm4xxx akm_vx442 = {
.type = SND_AK4524,
.num_adcs = 4,
.num_dacs = 4,
@@ -564,7 +563,7 @@ static struct snd_akm4xxx akm_vx442 __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_vx442_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_vx442_priv = {
.caddr = 2,
.cif = 0,
.data_mask = ICE1712_VX442_DOUT,
@@ -576,7 +575,7 @@ static struct snd_ak4xxx_private akm_vx442_priv __devinitdata = {
.mask_flags = 0,
};
-static int __devinit snd_ice1712_delta_init(struct snd_ice1712 *ice)
+static int snd_ice1712_delta_init(struct snd_ice1712 *ice)
{
int err;
struct snd_akm4xxx *ak;
@@ -617,7 +616,7 @@ static int __devinit snd_ice1712_delta_init(struct snd_ice1712 *ice)
ice->num_total_dacs = 4; /* two AK4324 codecs */
break;
case ICE1712_SUBDEVICE_VX442:
- case ICE1712_SUBDEVICE_DELTA66E: /* omni not suported yet */
+ case ICE1712_SUBDEVICE_DELTA66E: /* omni not supported yet */
ice->num_total_dacs = 4;
ice->num_total_adcs = 4;
break;
@@ -714,19 +713,19 @@ static int __devinit snd_ice1712_delta_init(struct snd_ice1712 *ice)
* additional controls for M-Audio cards
*/
-static struct snd_kcontrol_new snd_ice1712_delta1010_wordclock_select __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_delta1010_wordclock_select =
ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_WORD_CLOCK_SELECT, 1, 0);
-static struct snd_kcontrol_new snd_ice1712_delta1010lt_wordclock_select __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_delta1010lt_wordclock_select =
ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_1010LT_WORDCLOCK, 0, 0);
-static struct snd_kcontrol_new snd_ice1712_delta1010_wordclock_status __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_delta1010_wordclock_status =
ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Status", 0, ICE1712_DELTA_WORD_CLOCK_STATUS, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
-static struct snd_kcontrol_new snd_ice1712_deltadio2496_spdif_in_select __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_deltadio2496_spdif_in_select =
ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "IEC958 Input Optical", 0, ICE1712_DELTA_SPDIF_INPUT_SELECT, 0, 0);
-static struct snd_kcontrol_new snd_ice1712_delta_spdif_in_status __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_delta_spdif_in_status =
ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Delta IEC958 Input Status", 0, ICE1712_DELTA_SPDIF_IN_STAT, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
-static int __devinit snd_ice1712_delta_add_controls(struct snd_ice1712 *ice)
+static int snd_ice1712_delta_add_controls(struct snd_ice1712 *ice)
{
int err;
@@ -802,7 +801,7 @@ static int __devinit snd_ice1712_delta_add_controls(struct snd_ice1712 *ice)
/* entry point */
-struct snd_ice1712_card_info snd_ice1712_delta_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_ice1712_delta_cards[] = {
{
.subvendor = ICE1712_SUBDEVICE_DELTA1010,
.name = "M Audio Delta 1010",
diff --git a/sound/pci/ice1712/ews.c b/sound/pci/ice1712/ews.c
index 6fe35b81204..bc2e7011c55 100644
--- a/sound/pci/ice1712/ews.c
+++ b/sound/pci/ice1712/ews.c
@@ -22,7 +22,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -344,7 +343,7 @@ static void ews88_setup_spdif(struct snd_ice1712 *ice, int rate)
/*
*/
-static struct snd_akm4xxx akm_ews88mt __devinitdata = {
+static struct snd_akm4xxx akm_ews88mt = {
.num_adcs = 8,
.num_dacs = 8,
.type = SND_AK4524,
@@ -354,7 +353,7 @@ static struct snd_akm4xxx akm_ews88mt __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_ews88mt_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_ews88mt_priv = {
.caddr = 2,
.cif = 1, /* CIF high */
.data_mask = ICE1712_EWS88_SERIAL_DATA,
@@ -366,7 +365,7 @@ static struct snd_ak4xxx_private akm_ews88mt_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_ewx2496 __devinitdata = {
+static struct snd_akm4xxx akm_ewx2496 = {
.num_adcs = 2,
.num_dacs = 2,
.type = SND_AK4524,
@@ -375,7 +374,7 @@ static struct snd_akm4xxx akm_ewx2496 __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_ewx2496_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_ewx2496_priv = {
.caddr = 2,
.cif = 1, /* CIF high */
.data_mask = ICE1712_EWS88_SERIAL_DATA,
@@ -387,7 +386,7 @@ static struct snd_ak4xxx_private akm_ewx2496_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_6fire __devinitdata = {
+static struct snd_akm4xxx akm_6fire = {
.num_adcs = 6,
.num_dacs = 6,
.type = SND_AK4524,
@@ -396,7 +395,7 @@ static struct snd_akm4xxx akm_6fire __devinitdata = {
}
};
-static struct snd_ak4xxx_private akm_6fire_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_6fire_priv = {
.caddr = 2,
.cif = 1, /* CIF high */
.data_mask = ICE1712_6FIRE_SERIAL_DATA,
@@ -420,7 +419,7 @@ static struct snd_ak4xxx_private akm_6fire_priv __devinitdata = {
static int snd_ice1712_6fire_write_pca(struct snd_ice1712 *ice, unsigned char reg, unsigned char data);
-static int __devinit snd_ice1712_ews_init(struct snd_ice1712 *ice)
+static int snd_ice1712_ews_init(struct snd_ice1712 *ice)
{
int err;
struct snd_akm4xxx *ak;
@@ -576,7 +575,7 @@ static int __devinit snd_ice1712_ews_init(struct snd_ice1712 *ice)
/* i/o sensitivity - this callback is shared among other devices, too */
static int snd_ice1712_ewx_io_sense_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo){
- static char *texts[2] = {
+ static const char * const texts[2] = {
"+4dBu", "-10dBV",
};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
@@ -616,7 +615,7 @@ static int snd_ice1712_ewx_io_sense_put(struct snd_kcontrol *kcontrol, struct sn
return val != nval;
}
-static struct snd_kcontrol_new snd_ice1712_ewx2496_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_ewx2496_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input Sensitivity Switch",
@@ -724,7 +723,7 @@ static int snd_ice1712_ews88mt_input_sense_put(struct snd_kcontrol *kcontrol, st
return ndata != data;
}
-static struct snd_kcontrol_new snd_ice1712_ews88mt_input_sense __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_ews88mt_input_sense = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input Sensitivity Switch",
.info = snd_ice1712_ewx_io_sense_info,
@@ -733,7 +732,7 @@ static struct snd_kcontrol_new snd_ice1712_ews88mt_input_sense __devinitdata = {
.count = 8,
};
-static struct snd_kcontrol_new snd_ice1712_ews88mt_output_sense __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_ews88mt_output_sense = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Output Sensitivity Switch",
.info = snd_ice1712_ewx_io_sense_info,
@@ -811,7 +810,7 @@ static int snd_ice1712_ews88d_control_put(struct snd_kcontrol *kcontrol, struct
.private_value = xshift | (xinvert << 8),\
}
-static struct snd_kcontrol_new snd_ice1712_ews88d_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_ews88d_controls[] = {
EWS88D_CONTROL(SNDRV_CTL_ELEM_IFACE_MIXER, "IEC958 Input Optical", 0, 1, 0), /* inverted */
EWS88D_CONTROL(SNDRV_CTL_ELEM_IFACE_MIXER, "ADAT Output Optical", 1, 0, 0),
EWS88D_CONTROL(SNDRV_CTL_ELEM_IFACE_MIXER, "ADAT External Master Clock", 2, 0, 0),
@@ -899,7 +898,7 @@ static int snd_ice1712_6fire_control_put(struct snd_kcontrol *kcontrol, struct s
static int snd_ice1712_6fire_select_input_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
- static char *texts[4] = {
+ static const char * const texts[4] = {
"Internal", "Front Input", "Rear Input", "Wave Table"
};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
@@ -948,7 +947,7 @@ static int snd_ice1712_6fire_select_input_put(struct snd_kcontrol *kcontrol, str
.private_value = xshift | (xinvert << 8),\
}
-static struct snd_kcontrol_new snd_ice1712_6fire_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_6fire_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog Input Select",
@@ -964,7 +963,7 @@ static struct snd_kcontrol_new snd_ice1712_6fire_controls[] __devinitdata = {
};
-static int __devinit snd_ice1712_ews_add_controls(struct snd_ice1712 *ice)
+static int snd_ice1712_ews_add_controls(struct snd_ice1712 *ice)
{
unsigned int idx;
int err;
@@ -1030,7 +1029,7 @@ static int __devinit snd_ice1712_ews_add_controls(struct snd_ice1712 *ice)
/* entry point */
-struct snd_ice1712_card_info snd_ice1712_ews_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_ice1712_ews_cards[] = {
{
.subvendor = ICE1712_SUBDEVICE_EWX2496,
.name = "TerraTec EWX24/96",
diff --git a/sound/pci/ice1712/hoontech.c b/sound/pci/ice1712/hoontech.c
index 6914189073a..59e37c58169 100644
--- a/sound/pci/ice1712/hoontech.c
+++ b/sound/pci/ice1712/hoontech.c
@@ -21,7 +21,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -40,7 +39,7 @@ struct hoontech_spec {
unsigned short boxconfig[4];
};
-static void __devinit snd_ice1712_stdsp24_gpio_write(struct snd_ice1712 *ice, unsigned char byte)
+static void snd_ice1712_stdsp24_gpio_write(struct snd_ice1712 *ice, unsigned char byte)
{
byte |= ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
@@ -53,7 +52,7 @@ static void __devinit snd_ice1712_stdsp24_gpio_write(struct snd_ice1712 *ice, un
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
}
-static void __devinit snd_ice1712_stdsp24_darear(struct snd_ice1712 *ice, int activate)
+static void snd_ice1712_stdsp24_darear(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
@@ -62,7 +61,7 @@ static void __devinit snd_ice1712_stdsp24_darear(struct snd_ice1712 *ice, int ac
mutex_unlock(&ice->gpio_mutex);
}
-static void __devinit snd_ice1712_stdsp24_mute(struct snd_ice1712 *ice, int activate)
+static void snd_ice1712_stdsp24_mute(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
@@ -71,7 +70,7 @@ static void __devinit snd_ice1712_stdsp24_mute(struct snd_ice1712 *ice, int acti
mutex_unlock(&ice->gpio_mutex);
}
-static void __devinit snd_ice1712_stdsp24_insel(struct snd_ice1712 *ice, int activate)
+static void snd_ice1712_stdsp24_insel(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
@@ -80,7 +79,7 @@ static void __devinit snd_ice1712_stdsp24_insel(struct snd_ice1712 *ice, int act
mutex_unlock(&ice->gpio_mutex);
}
-static void __devinit snd_ice1712_stdsp24_box_channel(struct snd_ice1712 *ice, int box, int chn, int activate)
+static void snd_ice1712_stdsp24_box_channel(struct snd_ice1712 *ice, int box, int chn, int activate)
{
struct hoontech_spec *spec = ice->spec;
@@ -130,7 +129,7 @@ static void __devinit snd_ice1712_stdsp24_box_channel(struct snd_ice1712 *ice, i
mutex_unlock(&ice->gpio_mutex);
}
-static void __devinit snd_ice1712_stdsp24_box_midi(struct snd_ice1712 *ice, int box, int master)
+static void snd_ice1712_stdsp24_box_midi(struct snd_ice1712 *ice, int box, int master)
{
struct hoontech_spec *spec = ice->spec;
@@ -158,7 +157,7 @@ static void __devinit snd_ice1712_stdsp24_box_midi(struct snd_ice1712 *ice, int
mutex_unlock(&ice->gpio_mutex);
}
-static void __devinit snd_ice1712_stdsp24_midi2(struct snd_ice1712 *ice, int activate)
+static void snd_ice1712_stdsp24_midi2(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
@@ -167,7 +166,7 @@ static void __devinit snd_ice1712_stdsp24_midi2(struct snd_ice1712 *ice, int act
mutex_unlock(&ice->gpio_mutex);
}
-static int __devinit snd_ice1712_hoontech_init(struct snd_ice1712 *ice)
+static int snd_ice1712_hoontech_init(struct snd_ice1712 *ice)
{
struct hoontech_spec *spec;
int box, chn;
@@ -267,10 +266,10 @@ static void stdsp24_ak4524_lock(struct snd_akm4xxx *ak, int chip)
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, ~tmp);
}
-static int __devinit snd_ice1712_value_init(struct snd_ice1712 *ice)
+static int snd_ice1712_value_init(struct snd_ice1712 *ice)
{
/* Hoontech STDSP24 with modified hardware */
- static struct snd_akm4xxx akm_stdsp24_mv __devinitdata = {
+ static struct snd_akm4xxx akm_stdsp24_mv = {
.num_adcs = 2,
.num_dacs = 2,
.type = SND_AK4524,
@@ -279,7 +278,7 @@ static int __devinit snd_ice1712_value_init(struct snd_ice1712 *ice)
}
};
- static struct snd_ak4xxx_private akm_stdsp24_mv_priv __devinitdata = {
+ static struct snd_ak4xxx_private akm_stdsp24_mv_priv = {
.caddr = 2,
.cif = 1, /* CIF high */
.data_mask = ICE1712_STDSP24_SERIAL_DATA,
@@ -317,7 +316,7 @@ static int __devinit snd_ice1712_value_init(struct snd_ice1712 *ice)
return 0;
}
-static int __devinit snd_ice1712_ez8_init(struct snd_ice1712 *ice)
+static int snd_ice1712_ez8_init(struct snd_ice1712 *ice)
{
ice->gpio.write_mask = ice->eeprom.gpiomask;
ice->gpio.direction = ice->eeprom.gpiodir;
@@ -329,7 +328,7 @@ static int __devinit snd_ice1712_ez8_init(struct snd_ice1712 *ice)
/* entry point */
-struct snd_ice1712_card_info snd_ice1712_hoontech_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_ice1712_hoontech_cards[] = {
{
.subvendor = ICE1712_SUBDEVICE_STDSP24,
.name = "Hoontech SoundTrack Audio DSP24",
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 5be2e120a14..2ffdc35d5ff 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -47,7 +47,6 @@
*/
-#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -280,7 +279,7 @@ static int snd_ice1712_digmix_route_ac97_put(struct snd_kcontrol *kcontrol, stru
return val != nval;
}
-static struct snd_kcontrol_new snd_ice1712_mixer_digmix_route_ac97 __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_mixer_digmix_route_ac97 = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Digital Mixer To AC97",
.info = snd_ice1712_digmix_route_ac97_info,
@@ -388,7 +387,7 @@ static void setup_cs8427(struct snd_ice1712 *ice, int rate)
/*
* create and initialize callbacks for cs8427 interface
*/
-int __devinit snd_ice1712_init_cs8427(struct snd_ice1712 *ice, int addr)
+int snd_ice1712_init_cs8427(struct snd_ice1712 *ice, int addr)
{
int err;
@@ -879,7 +878,7 @@ static struct snd_pcm_ops snd_ice1712_capture_ops = {
.pointer = snd_ice1712_capture_pointer,
};
-static int __devinit snd_ice1712_pcm(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
+static int snd_ice1712_pcm(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -909,7 +908,7 @@ static int __devinit snd_ice1712_pcm(struct snd_ice1712 *ice, int device, struct
return 0;
}
-static int __devinit snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
+static int snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1254,7 +1253,7 @@ static struct snd_pcm_ops snd_ice1712_capture_pro_ops = {
.pointer = snd_ice1712_capture_pro_pointer,
};
-static int __devinit snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
+static int snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1388,7 +1387,7 @@ static int snd_ice1712_pro_mixer_volume_put(struct snd_kcontrol *kcontrol, struc
static const DECLARE_TLV_DB_SCALE(db_scale_playback, -14400, 150, 0);
-static struct snd_kcontrol_new snd_ice1712_multi_playback_ctrls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_multi_playback_ctrls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Playback Switch",
@@ -1412,7 +1411,7 @@ static struct snd_kcontrol_new snd_ice1712_multi_playback_ctrls[] __devinitdata
},
};
-static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_switch __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "H/W Multi Capture Switch",
.info = snd_ice1712_pro_mixer_switch_info,
@@ -1421,7 +1420,7 @@ static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_switch __devinit
.private_value = 10,
};
-static struct snd_kcontrol_new snd_ice1712_multi_capture_spdif_switch __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_multi_capture_spdif_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("Multi ", CAPTURE, SWITCH),
.info = snd_ice1712_pro_mixer_switch_info,
@@ -1431,7 +1430,7 @@ static struct snd_kcontrol_new snd_ice1712_multi_capture_spdif_switch __devinitd
.count = 2,
};
-static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_volume __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -1443,7 +1442,7 @@ static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_volume __devinit
.tlv = { .p = db_scale_playback }
};
-static struct snd_kcontrol_new snd_ice1712_multi_capture_spdif_volume __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_multi_capture_spdif_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("Multi ", CAPTURE, VOLUME),
.info = snd_ice1712_pro_mixer_volume_info,
@@ -1453,7 +1452,7 @@ static struct snd_kcontrol_new snd_ice1712_multi_capture_spdif_volume __devinitd
.count = 2,
};
-static int __devinit snd_ice1712_build_pro_mixer(struct snd_ice1712 *ice)
+static int snd_ice1712_build_pro_mixer(struct snd_ice1712 *ice)
{
struct snd_card *card = ice->card;
unsigned int idx;
@@ -1512,7 +1511,7 @@ static void snd_ice1712_mixer_free_ac97(struct snd_ac97 *ac97)
ice->ac97 = NULL;
}
-static int __devinit snd_ice1712_ac97_mixer(struct snd_ice1712 *ice)
+static int snd_ice1712_ac97_mixer(struct snd_ice1712 *ice)
{
int err, bus_num = 0;
struct snd_ac97_template ac97;
@@ -1611,7 +1610,7 @@ static void snd_ice1712_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, " GPIO_DIRECTION : 0x%02x\n", (unsigned)snd_ice1712_read(ice, ICE1712_IREG_GPIO_DIRECTION));
}
-static void __devinit snd_ice1712_proc_init(struct snd_ice1712 *ice)
+static void snd_ice1712_proc_init(struct snd_ice1712 *ice)
{
struct snd_info_entry *entry;
@@ -1640,7 +1639,7 @@ static int snd_ice1712_eeprom_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_ice1712_eeprom __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_eeprom = {
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "ICE1712 EEPROM",
.access = SNDRV_CTL_ELEM_ACCESS_READ,
@@ -1676,7 +1675,7 @@ static int snd_ice1712_spdif_default_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_ice1712_spdif_default __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_spdif_default =
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
@@ -1727,7 +1726,7 @@ static int snd_ice1712_spdif_maskp_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_ice1712_spdif_maskc __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_spdif_maskc =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1736,7 +1735,7 @@ static struct snd_kcontrol_new snd_ice1712_spdif_maskc __devinitdata =
.get = snd_ice1712_spdif_maskc_get,
};
-static struct snd_kcontrol_new snd_ice1712_spdif_maskp __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_spdif_maskp =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1763,7 +1762,7 @@ static int snd_ice1712_spdif_stream_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_ice1712_spdif_stream __devinitdata =
+static struct snd_kcontrol_new snd_ice1712_spdif_stream =
{
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_INACTIVE),
@@ -1894,7 +1893,7 @@ static int snd_ice1712_pro_internal_clock_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ice1712_pro_internal_clock __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_pro_internal_clock = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Internal Clock",
.info = snd_ice1712_pro_internal_clock_info,
@@ -1965,7 +1964,7 @@ static int snd_ice1712_pro_internal_clock_default_put(struct snd_kcontrol *kcont
return change;
}
-static struct snd_kcontrol_new snd_ice1712_pro_internal_clock_default __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_pro_internal_clock_default = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Internal Clock Default",
.info = snd_ice1712_pro_internal_clock_default_info,
@@ -1996,7 +1995,7 @@ static int snd_ice1712_pro_rate_locking_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ice1712_pro_rate_locking __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_pro_rate_locking = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Rate Locking",
.info = snd_ice1712_pro_rate_locking_info,
@@ -2027,7 +2026,7 @@ static int snd_ice1712_pro_rate_reset_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ice1712_pro_rate_reset __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_pro_rate_reset = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Rate Reset",
.info = snd_ice1712_pro_rate_reset_info,
@@ -2194,7 +2193,7 @@ static int snd_ice1712_pro_route_spdif_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ice1712_mixer_pro_analog_route __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_mixer_pro_analog_route = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "H/W Playback Route",
.info = snd_ice1712_pro_route_info,
@@ -2202,7 +2201,7 @@ static struct snd_kcontrol_new snd_ice1712_mixer_pro_analog_route __devinitdata
.put = snd_ice1712_pro_route_analog_put,
};
-static struct snd_kcontrol_new snd_ice1712_mixer_pro_spdif_route __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_mixer_pro_spdif_route = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, NONE) "Route",
.info = snd_ice1712_pro_route_info,
@@ -2244,7 +2243,7 @@ static int snd_ice1712_pro_volume_rate_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ice1712_mixer_pro_volume_rate __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_mixer_pro_volume_rate = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Volume Rate",
.info = snd_ice1712_pro_volume_rate_info,
@@ -2277,7 +2276,7 @@ static int snd_ice1712_pro_peak_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak __devinitdata = {
+static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Multi Track Peak",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
@@ -2292,16 +2291,16 @@ static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak __devinitdata = {
/*
* list of available boards
*/
-static struct snd_ice1712_card_info *card_tables[] __devinitdata = {
+static struct snd_ice1712_card_info *card_tables[] = {
snd_ice1712_hoontech_cards,
snd_ice1712_delta_cards,
snd_ice1712_ews_cards,
NULL,
};
-static unsigned char __devinit snd_ice1712_read_i2c(struct snd_ice1712 *ice,
- unsigned char dev,
- unsigned char addr)
+static unsigned char snd_ice1712_read_i2c(struct snd_ice1712 *ice,
+ unsigned char dev,
+ unsigned char addr)
{
long t = 0x10000;
@@ -2311,8 +2310,8 @@ static unsigned char __devinit snd_ice1712_read_i2c(struct snd_ice1712 *ice,
return inb(ICEREG(ice, I2C_DATA));
}
-static int __devinit snd_ice1712_read_eeprom(struct snd_ice1712 *ice,
- const char *modelname)
+static int snd_ice1712_read_eeprom(struct snd_ice1712 *ice,
+ const char *modelname)
{
int dev = 0xa0; /* EEPROM device address */
unsigned int i, size;
@@ -2386,7 +2385,7 @@ static int __devinit snd_ice1712_read_eeprom(struct snd_ice1712 *ice,
-static int __devinit snd_ice1712_chip_init(struct snd_ice1712 *ice)
+static int snd_ice1712_chip_init(struct snd_ice1712 *ice)
{
outb(ICE1712_RESET | ICE1712_NATIVE, ICEREG(ice, CONTROL));
udelay(200);
@@ -2433,7 +2432,7 @@ static int __devinit snd_ice1712_chip_init(struct snd_ice1712 *ice)
return 0;
}
-int __devinit snd_ice1712_spdif_build_controls(struct snd_ice1712 *ice)
+int snd_ice1712_spdif_build_controls(struct snd_ice1712 *ice)
{
int err;
struct snd_kcontrol *kctl;
@@ -2461,7 +2460,7 @@ int __devinit snd_ice1712_spdif_build_controls(struct snd_ice1712 *ice)
}
-static int __devinit snd_ice1712_build_controls(struct snd_ice1712 *ice)
+static int snd_ice1712_build_controls(struct snd_ice1712 *ice)
{
int err;
@@ -2531,13 +2530,13 @@ static int snd_ice1712_dev_free(struct snd_device *device)
return snd_ice1712_free(ice);
}
-static int __devinit snd_ice1712_create(struct snd_card *card,
- struct pci_dev *pci,
- const char *modelname,
- int omni,
- int cs8427_timeout,
- int dxr_enable,
- struct snd_ice1712 **r_ice1712)
+static int snd_ice1712_create(struct snd_card *card,
+ struct pci_dev *pci,
+ const char *modelname,
+ int omni,
+ int cs8427_timeout,
+ int dxr_enable,
+ struct snd_ice1712 **r_ice1712)
{
struct snd_ice1712 *ice;
int err;
@@ -2651,10 +2650,10 @@ static int __devinit snd_ice1712_create(struct snd_card *card,
*
*/
-static struct snd_ice1712_card_info no_matched __devinitdata;
+static struct snd_ice1712_card_info no_matched;
-static int __devinit snd_ice1712_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_ice1712_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2686,6 +2685,7 @@ static int __devinit snd_ice1712_probe(struct pci_dev *pci,
for (tbl = card_tables; *tbl; tbl++) {
for (c = *tbl; c->subvendor; c++) {
if (c->subvendor == ice->eeprom.subvendor) {
+ ice->card_info = c;
strcpy(card->shortname, c->name);
if (c->driver) /* specific driver? */
strcpy(card->driver, c->driver);
@@ -2797,9 +2797,14 @@ static int __devinit snd_ice1712_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_ice1712_remove(struct pci_dev *pci)
+static void snd_ice1712_remove(struct pci_dev *pci)
{
- snd_card_free(pci_get_drvdata(pci));
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_ice1712 *ice = card->private_data;
+
+ if (ice->card_info && ice->card_info->chip_exit)
+ ice->card_info->chip_exit(ice);
+ snd_card_free(card);
pci_set_drvdata(pci, NULL);
}
@@ -2807,7 +2812,7 @@ static struct pci_driver ice1712_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ice1712_ids,
.probe = snd_ice1712_probe,
- .remove = __devexit_p(snd_ice1712_remove),
+ .remove = snd_ice1712_remove,
};
module_pci_driver(ice1712_driver);
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index d0e7d87f09f..b209fc30b33 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -22,6 +22,7 @@
*
*/
+#include <linux/io.h>
#include <sound/control.h>
#include <sound/ac97_codec.h>
#include <sound/rawmidi.h>
@@ -288,6 +289,7 @@ struct snd_ice1712_spdif {
} ops;
};
+struct snd_ice1712_card_info;
struct snd_ice1712 {
unsigned long conp_dma_size;
@@ -324,6 +326,7 @@ struct snd_ice1712 {
struct snd_info_entry *proc_entry;
struct snd_ice1712_eeprom eeprom;
+ struct snd_ice1712_card_info *card_info;
unsigned int pro_volumes[20];
unsigned int omni:1; /* Delta Omni I/O */
@@ -381,7 +384,7 @@ struct snd_ice1712 {
unsigned char (*set_mclk)(struct snd_ice1712 *ice, unsigned int rate);
int (*set_spdif_clock)(struct snd_ice1712 *ice, int type);
int (*get_spdif_master_type)(struct snd_ice1712 *ice);
- char **ext_clock_names;
+ const char * const *ext_clock_names;
int ext_clock_count;
void (*pro_open)(struct snd_ice1712 *, struct snd_pcm_substream *);
#ifdef CONFIG_PM_SLEEP
@@ -513,10 +516,11 @@ static inline u8 snd_ice1712_read(struct snd_ice1712 *ice, u8 addr)
struct snd_ice1712_card_info {
unsigned int subvendor;
- char *name;
- char *model;
- char *driver;
+ const char *name;
+ const char *model;
+ const char *driver;
int (*chip_init)(struct snd_ice1712 *);
+ void (*chip_exit)(struct snd_ice1712 *);
int (*build_controls)(struct snd_ice1712 *);
unsigned int no_mpu401:1;
unsigned int mpu401_1_info_flags;
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 245d874891b..ce70e7f113e 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -22,7 +22,6 @@
*
*/
-#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -54,6 +53,7 @@
#include "wtm.h"
#include "se.h"
#include "quartet.h"
+#include "psc724.h"
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("VIA ICEnsemble ICE1724/1720 (Envy24HT/PT)");
@@ -106,7 +106,7 @@ static int PRO_RATE_LOCKED;
static int PRO_RATE_RESET = 1;
static unsigned int PRO_RATE_DEFAULT = 44100;
-static char *ext_clock_names[1] = { "IEC958 In" };
+static const char * const ext_clock_names[1] = { "IEC958 In" };
/*
* Basic I/O
@@ -1135,7 +1135,7 @@ static struct snd_pcm_ops snd_vt1724_capture_pro_ops = {
.pointer = snd_vt1724_pcm_pointer,
};
-static int __devinit snd_vt1724_pcm_profi(struct snd_ice1712 *ice, int device)
+static int snd_vt1724_pcm_profi(struct snd_ice1712 *ice, int device)
{
struct snd_pcm *pcm;
int capt, err;
@@ -1315,7 +1315,7 @@ static struct snd_pcm_ops snd_vt1724_capture_spdif_ops = {
};
-static int __devinit snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device)
+static int snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device)
{
char *name;
struct snd_pcm *pcm;
@@ -1449,7 +1449,7 @@ static struct snd_pcm_ops snd_vt1724_playback_indep_ops = {
};
-static int __devinit snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
+static int snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
{
struct snd_pcm *pcm;
int play;
@@ -1484,7 +1484,7 @@ static int __devinit snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
* Mixer section
*/
-static int __devinit snd_vt1724_ac97_mixer(struct snd_ice1712 *ice)
+static int snd_vt1724_ac97_mixer(struct snd_ice1712 *ice)
{
int err;
@@ -1570,7 +1570,7 @@ static void snd_vt1724_proc_read(struct snd_info_entry *entry,
idx, inb(ice->profi_port+idx));
}
-static void __devinit snd_vt1724_proc_init(struct snd_ice1712 *ice)
+static void snd_vt1724_proc_init(struct snd_ice1712 *ice)
{
struct snd_info_entry *entry;
@@ -1599,7 +1599,7 @@ static int snd_vt1724_eeprom_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_vt1724_eeprom __devinitdata = {
+static struct snd_kcontrol_new snd_vt1724_eeprom = {
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "ICE1724 EEPROM",
.access = SNDRV_CTL_ELEM_ACCESS_READ,
@@ -1712,7 +1712,7 @@ static int snd_vt1724_spdif_default_put(struct snd_kcontrol *kcontrol,
return val != old;
}
-static struct snd_kcontrol_new snd_vt1724_spdif_default __devinitdata =
+static struct snd_kcontrol_new snd_vt1724_spdif_default =
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
@@ -1744,7 +1744,7 @@ static int snd_vt1724_spdif_maskp_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_vt1724_spdif_maskc __devinitdata =
+static struct snd_kcontrol_new snd_vt1724_spdif_maskc =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1753,7 +1753,7 @@ static struct snd_kcontrol_new snd_vt1724_spdif_maskc __devinitdata =
.get = snd_vt1724_spdif_maskc_get,
};
-static struct snd_kcontrol_new snd_vt1724_spdif_maskp __devinitdata =
+static struct snd_kcontrol_new snd_vt1724_spdif_maskp =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1790,7 +1790,7 @@ static int snd_vt1724_spdif_sw_put(struct snd_kcontrol *kcontrol,
return old != val;
}
-static struct snd_kcontrol_new snd_vt1724_spdif_switch __devinitdata =
+static struct snd_kcontrol_new snd_vt1724_spdif_switch =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
/* FIXME: the following conflict with IEC958 Playback Route */
@@ -1965,7 +1965,7 @@ static int snd_vt1724_pro_internal_clock_put(struct snd_kcontrol *kcontrol,
return old_rate != new_rate;
}
-static struct snd_kcontrol_new snd_vt1724_pro_internal_clock __devinitdata = {
+static struct snd_kcontrol_new snd_vt1724_pro_internal_clock = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Internal Clock",
.info = snd_vt1724_pro_internal_clock_info,
@@ -1996,7 +1996,7 @@ static int snd_vt1724_pro_rate_locking_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_vt1724_pro_rate_locking __devinitdata = {
+static struct snd_kcontrol_new snd_vt1724_pro_rate_locking = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Rate Locking",
.info = snd_vt1724_pro_rate_locking_info,
@@ -2027,7 +2027,7 @@ static int snd_vt1724_pro_rate_reset_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_vt1724_pro_rate_reset __devinitdata = {
+static struct snd_kcontrol_new snd_vt1724_pro_rate_reset = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Multi Track Rate Reset",
.info = snd_vt1724_pro_rate_reset_info,
@@ -2042,7 +2042,7 @@ static struct snd_kcontrol_new snd_vt1724_pro_rate_reset __devinitdata = {
static int snd_vt1724_pro_route_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {
+ static const char * const texts[] = {
"PCM Out", /* 0 */
"H/W In 0", "H/W In 1", /* 1-2 */
"IEC958 In L", "IEC958 In R", /* 3-4 */
@@ -2149,7 +2149,7 @@ static int snd_vt1724_pro_route_spdif_put(struct snd_kcontrol *kcontrol,
digital_route_shift(idx));
}
-static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route __devinitdata =
+static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "H/W Playback Route",
@@ -2158,7 +2158,7 @@ static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route __devinitdata =
.put = snd_vt1724_pro_route_analog_put,
};
-static struct snd_kcontrol_new snd_vt1724_mixer_pro_spdif_route __devinitdata = {
+static struct snd_kcontrol_new snd_vt1724_mixer_pro_spdif_route = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, NONE) "Route",
.info = snd_vt1724_pro_route_info,
@@ -2194,7 +2194,7 @@ static int snd_vt1724_pro_peak_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = {
+static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "Multi Track Peak",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
@@ -2206,13 +2206,13 @@ static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = {
*
*/
-static struct snd_ice1712_card_info no_matched __devinitdata;
+static struct snd_ice1712_card_info no_matched;
/*
ooAoo cards with no controls
*/
-static unsigned char ooaoo_sq210_eeprom[] __devinitdata = {
+static unsigned char ooaoo_sq210_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x4c, /* 49MHz crystal, no mpu401, no ADC,
1xDACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
@@ -2232,7 +2232,7 @@ static unsigned char ooaoo_sq210_eeprom[] __devinitdata = {
};
-struct snd_ice1712_card_info snd_vt1724_ooaoo_cards[] __devinitdata = {
+static struct snd_ice1712_card_info snd_vt1724_ooaoo_cards[] = {
{
.name = "ooAoo SQ210a",
.model = "sq210a",
@@ -2242,7 +2242,7 @@ struct snd_ice1712_card_info snd_vt1724_ooaoo_cards[] __devinitdata = {
{ } /* terminator */
};
-static struct snd_ice1712_card_info *card_tables[] __devinitdata = {
+static struct snd_ice1712_card_info *card_tables[] = {
snd_vt1724_revo_cards,
snd_vt1724_amp_cards,
snd_vt1724_aureon_cards,
@@ -2257,6 +2257,7 @@ static struct snd_ice1712_card_info *card_tables[] __devinitdata = {
snd_vt1724_se_cards,
snd_vt1724_qtet_cards,
snd_vt1724_ooaoo_cards,
+ snd_vt1724_psc724_cards,
NULL,
};
@@ -2306,8 +2307,8 @@ void snd_vt1724_write_i2c(struct snd_ice1712 *ice,
mutex_unlock(&ice->i2c_mutex);
}
-static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
- const char *modelname)
+static int snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
+ const char *modelname)
{
const int dev = 0xa0; /* EEPROM device address */
unsigned int i, size;
@@ -2348,6 +2349,7 @@ static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
ice->eeprom.subvendor = c->subvendor;
} else if (c->subvendor != ice->eeprom.subvendor)
continue;
+ ice->card_info = c;
if (!c->eeprom_size || !c->eeprom_data)
goto found;
/* if the EEPROM is given by the driver, use it */
@@ -2360,6 +2362,10 @@ static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
}
printk(KERN_WARNING "ice1724: No matching model found for ID 0x%x\n",
ice->eeprom.subvendor);
+#ifdef CONFIG_PM_SLEEP
+ /* assume AC97-only card which can suspend without additional code */
+ ice->pm_suspend_enabled = 1;
+#endif
found:
ice->eeprom.size = snd_vt1724_read_i2c(ice, dev, 0x04);
@@ -2371,7 +2377,7 @@ static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
return -EIO;
}
ice->eeprom.version = snd_vt1724_read_i2c(ice, dev, 0x05);
- if (ice->eeprom.version != 2)
+ if (ice->eeprom.version != 1 && ice->eeprom.version != 2)
printk(KERN_WARNING "ice1724: Invalid EEPROM version %i\n",
ice->eeprom.version);
size = ice->eeprom.size - 6;
@@ -2424,7 +2430,7 @@ static int snd_vt1724_chip_init(struct snd_ice1712 *ice)
return 0;
}
-static int __devinit snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice)
+static int snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice)
{
int err;
struct snd_kcontrol *kctl;
@@ -2466,7 +2472,7 @@ static int __devinit snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice)
}
-static int __devinit snd_vt1724_build_controls(struct snd_ice1712 *ice)
+static int snd_vt1724_build_controls(struct snd_ice1712 *ice)
{
int err;
@@ -2526,10 +2532,10 @@ static int snd_vt1724_dev_free(struct snd_device *device)
return snd_vt1724_free(ice);
}
-static int __devinit snd_vt1724_create(struct snd_card *card,
- struct pci_dev *pci,
- const char *modelname,
- struct snd_ice1712 **r_ice1712)
+static int snd_vt1724_create(struct snd_card *card,
+ struct pci_dev *pci,
+ const char *modelname,
+ struct snd_ice1712 **r_ice1712)
{
struct snd_ice1712 *ice;
int err;
@@ -2616,8 +2622,8 @@ static int __devinit snd_vt1724_create(struct snd_card *card,
*
*/
-static int __devinit snd_vt1724_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_vt1724_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2786,9 +2792,14 @@ __found:
return 0;
}
-static void __devexit snd_vt1724_remove(struct pci_dev *pci)
+static void snd_vt1724_remove(struct pci_dev *pci)
{
- snd_card_free(pci_get_drvdata(pci));
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_ice1712 *ice = card->private_data;
+
+ if (ice->card_info && ice->card_info->chip_exit)
+ ice->card_info->chip_exit(ice);
+ snd_card_free(card);
pci_set_drvdata(pci, NULL);
}
@@ -2889,7 +2900,7 @@ static struct pci_driver vt1724_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_vt1724_ids,
.probe = snd_vt1724_probe,
- .remove = __devexit_p(snd_vt1724_remove),
+ .remove = snd_vt1724_remove,
.driver = {
.pm = SND_VT1724_PM_OPS,
},
diff --git a/sound/pci/ice1712/juli.c b/sound/pci/ice1712/juli.c
index 14fd536b645..8855933e710 100644
--- a/sound/pci/ice1712/juli.c
+++ b/sound/pci/ice1712/juli.c
@@ -23,7 +23,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -283,7 +282,7 @@ static const struct snd_akm4xxx_dac_channel juli_dac[] = {
};
-static struct snd_akm4xxx akm_juli_dac __devinitdata = {
+static struct snd_akm4xxx akm_juli_dac = {
.type = SND_AK4358,
.num_dacs = 8, /* DAC1 - analog out
DAC2 - analog in monitor
@@ -358,7 +357,7 @@ static int juli_mute_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new juli_mute_controls[] __devinitdata = {
+static struct snd_kcontrol_new juli_mute_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
@@ -412,7 +411,7 @@ static struct snd_kcontrol_new juli_mute_controls[] __devinitdata = {
},
};
-static char *slave_vols[] __devinitdata = {
+static char *slave_vols[] = {
PCM_VOLUME,
MONITOR_AN_IN_VOLUME,
MONITOR_DIG_IN_VOLUME,
@@ -420,11 +419,11 @@ static char *slave_vols[] __devinitdata = {
NULL
};
-static __devinitdata
+static
DECLARE_TLV_DB_SCALE(juli_master_db_scale, -6350, 50, 1);
-static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card,
- const char *name)
+static struct snd_kcontrol *ctl_find(struct snd_card *card,
+ const char *name)
{
struct snd_ctl_elem_id sid;
memset(&sid, 0, sizeof(sid));
@@ -434,8 +433,9 @@ static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card,
return snd_ctl_find_id(card, &sid);
}
-static void __devinit add_slaves(struct snd_card *card,
- struct snd_kcontrol *master, char **list)
+static void add_slaves(struct snd_card *card,
+ struct snd_kcontrol *master,
+ char * const *list)
{
for (; *list; list++) {
struct snd_kcontrol *slave = ctl_find(card, *list);
@@ -447,7 +447,7 @@ static void __devinit add_slaves(struct snd_card *card,
}
}
-static int __devinit juli_add_controls(struct snd_ice1712 *ice)
+static int juli_add_controls(struct snd_ice1712 *ice)
{
struct juli_spec *spec = ice->spec;
int err;
@@ -579,7 +579,7 @@ static void juli_ak4114_change(struct ak4114 *ak4114, unsigned char c0,
}
}
-static int __devinit juli_init(struct snd_ice1712 *ice)
+static int juli_init(struct snd_ice1712 *ice)
{
static const unsigned char ak4114_init_vals[] = {
/* AK4117_REG_PWRDN */ AK4114_RST | AK4114_PWN |
@@ -667,7 +667,7 @@ static int __devinit juli_init(struct snd_ice1712 *ice)
* hence the driver needs to sets up it properly.
*/
-static unsigned char juli_eeprom[] __devinitdata = {
+static unsigned char juli_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x2b, /* clock 512, mpu401, 1xADC, 1xDACs,
SPDIF in */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
@@ -686,7 +686,7 @@ static unsigned char juli_eeprom[] __devinitdata = {
};
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_juli_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_juli_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_JULI,
.name = "ESI Juli@",
diff --git a/sound/pci/ice1712/maya44.c b/sound/pci/ice1712/maya44.c
index 726fd4b92e1..63aa39f06f0 100644
--- a/sound/pci/ice1712/maya44.c
+++ b/sound/pci/ice1712/maya44.c
@@ -24,7 +24,6 @@
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/io.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
@@ -358,7 +357,7 @@ static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line)
static int maya_rec_src_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Line", "Mic" };
+ static const char * const texts[] = { "Line", "Mic" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -407,7 +406,7 @@ static int maya_rec_src_put(struct snd_kcontrol *kcontrol,
static int maya_pb_route_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {
+ static const char * const texts[] = {
"PCM Out", /* 0 */
"Input 1", "Input 2", "Input 3", "Input 4"
};
@@ -455,7 +454,7 @@ static int maya_pb_route_put(struct snd_kcontrol *kcontrol,
* controls to be added
*/
-static struct snd_kcontrol_new maya_controls[] __devinitdata = {
+static struct snd_kcontrol_new maya_controls[] = {
{
.name = "Crossmix Playback Volume",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -545,7 +544,7 @@ static struct snd_kcontrol_new maya_controls[] __devinitdata = {
},
};
-static int __devinit maya44_add_controls(struct snd_ice1712 *ice)
+static int maya44_add_controls(struct snd_ice1712 *ice)
{
int err, i;
@@ -562,8 +561,8 @@ static int __devinit maya44_add_controls(struct snd_ice1712 *ice)
/*
* initialize a wm8776 chip
*/
-static void __devinit wm8776_init(struct snd_ice1712 *ice,
- struct snd_wm8776 *wm, unsigned int addr)
+static void wm8776_init(struct snd_ice1712 *ice,
+ struct snd_wm8776 *wm, unsigned int addr)
{
static const unsigned short inits_wm8776[] = {
0x02, 0x100, /* R2: headphone L+R muted + update */
@@ -693,14 +692,14 @@ static struct snd_pcm_hw_constraint_list dac_rates = {
/*
* chip addresses on I2C bus
*/
-static unsigned char wm8776_addr[2] __devinitdata = {
+static unsigned char wm8776_addr[2] = {
0x34, 0x36, /* codec 0 & 1 */
};
/*
* initialize the chip
*/
-static int __devinit maya44_init(struct snd_ice1712 *ice)
+static int maya44_init(struct snd_ice1712 *ice)
{
int i;
struct snd_maya44 *chip;
@@ -743,7 +742,7 @@ static int __devinit maya44_init(struct snd_ice1712 *ice)
* hence the driver needs to sets up it properly.
*/
-static unsigned char maya44_eeprom[] __devinitdata = {
+static unsigned char maya44_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x45,
/* clock xin1=49.152MHz, mpu401, 2 stereo ADCs+DACs */
[ICE_EEP2_ACLINK] = 0x80,
@@ -765,7 +764,7 @@ static unsigned char maya44_eeprom[] __devinitdata = {
};
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_maya44_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_maya44_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_MAYA44,
.name = "ESI Maya44",
diff --git a/sound/pci/ice1712/phase.c b/sound/pci/ice1712/phase.c
index de29be8c965..0011e04f36a 100644
--- a/sound/pci/ice1712/phase.c
+++ b/sound/pci/ice1712/phase.c
@@ -42,7 +42,6 @@
* Digital receiver: CS8414-CS (supported in this release)
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -103,13 +102,13 @@ static const unsigned char wm_vol[256] = {
#define WM_VOL_MAX (sizeof(wm_vol) - 1)
#define WM_VOL_MUTE 0x8000
-static struct snd_akm4xxx akm_phase22 __devinitdata = {
+static struct snd_akm4xxx akm_phase22 = {
.type = SND_AK4524,
.num_dacs = 2,
.num_adcs = 2,
};
-static struct snd_ak4xxx_private akm_phase22_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_phase22_priv = {
.caddr = 2,
.cif = 1,
.data_mask = 1 << 4,
@@ -121,7 +120,7 @@ static struct snd_ak4xxx_private akm_phase22_priv __devinitdata = {
.mask_flags = 0,
};
-static int __devinit phase22_init(struct snd_ice1712 *ice)
+static int phase22_init(struct snd_ice1712 *ice)
{
struct snd_akm4xxx *ak;
int err;
@@ -158,7 +157,7 @@ static int __devinit phase22_init(struct snd_ice1712 *ice)
return 0;
}
-static int __devinit phase22_add_controls(struct snd_ice1712 *ice)
+static int phase22_add_controls(struct snd_ice1712 *ice)
{
int err = 0;
@@ -172,7 +171,7 @@ static int __devinit phase22_add_controls(struct snd_ice1712 *ice)
return 0;
}
-static unsigned char phase22_eeprom[] __devinitdata = {
+static unsigned char phase22_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x28, /* clock 512, mpu 401,
spdif-in/1xADC, 1xDACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
@@ -189,7 +188,7 @@ static unsigned char phase22_eeprom[] __devinitdata = {
[ICE_EEP2_GPIO_STATE2] = 0x00,
};
-static unsigned char phase28_eeprom[] __devinitdata = {
+static unsigned char phase28_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x2b, /* clock 512, mpu401,
spdif-in/1xADC, 4xDACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
@@ -379,7 +378,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol,
return change;
}
-static int __devinit phase28_init(struct snd_ice1712 *ice)
+static int phase28_init(struct snd_ice1712 *ice)
{
static const unsigned short wm_inits_phase28[] = {
/* These come first to reduce init pop noise */
@@ -722,7 +721,7 @@ static int phase28_deemp_put(struct snd_kcontrol *kcontrol,
static int phase28_oversampling_info(struct snd_kcontrol *k,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "128x", "64x" };
+ static const char * const texts[2] = { "128x", "64x" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -770,7 +769,7 @@ static int phase28_oversampling_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
static const DECLARE_TLV_DB_SCALE(db_scale_wm_pcm, -6400, 50, 1);
-static struct snd_kcontrol_new phase28_dac_controls[] __devinitdata = {
+static struct snd_kcontrol_new phase28_dac_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
@@ -885,7 +884,7 @@ static struct snd_kcontrol_new phase28_dac_controls[] __devinitdata = {
}
};
-static struct snd_kcontrol_new wm_controls[] __devinitdata = {
+static struct snd_kcontrol_new wm_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
@@ -919,7 +918,7 @@ static struct snd_kcontrol_new wm_controls[] __devinitdata = {
}
};
-static int __devinit phase28_add_controls(struct snd_ice1712 *ice)
+static int phase28_add_controls(struct snd_ice1712 *ice)
{
unsigned int i, counts;
int err;
@@ -943,7 +942,7 @@ static int __devinit phase28_add_controls(struct snd_ice1712 *ice)
return 0;
}
-struct snd_ice1712_card_info snd_vt1724_phase_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_phase_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_PHASE22,
.name = "Terratec PHASE 22",
diff --git a/sound/pci/ice1712/pontis.c b/sound/pci/ice1712/pontis.c
index 92c1160d7ab..5555eb4b240 100644
--- a/sound/pci/ice1712/pontis.c
+++ b/sound/pci/ice1712/pontis.c
@@ -21,7 +21,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -550,7 +549,7 @@ static const DECLARE_TLV_DB_SCALE(db_scale_volume, -6400, 50, 1);
* mixers
*/
-static struct snd_kcontrol_new pontis_controls[] __devinitdata = {
+static struct snd_kcontrol_new pontis_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -697,7 +696,7 @@ static void cs_proc_init(struct snd_ice1712 *ice)
}
-static int __devinit pontis_add_controls(struct snd_ice1712 *ice)
+static int pontis_add_controls(struct snd_ice1712 *ice)
{
unsigned int i;
int err;
@@ -718,7 +717,7 @@ static int __devinit pontis_add_controls(struct snd_ice1712 *ice)
/*
* initialize the chip
*/
-static int __devinit pontis_init(struct snd_ice1712 *ice)
+static int pontis_init(struct snd_ice1712 *ice)
{
static const unsigned short wm_inits[] = {
/* These come first to reduce init pop noise */
@@ -805,7 +804,7 @@ static int __devinit pontis_init(struct snd_ice1712 *ice)
* hence the driver needs to sets up it properly.
*/
-static unsigned char pontis_eeprom[] __devinitdata = {
+static unsigned char pontis_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x08, /* clock 256, mpu401, spdif-in/ADC, 1DAC */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0xf8, /* vol, 96k, 24bit, 192k */
@@ -822,7 +821,7 @@ static unsigned char pontis_eeprom[] __devinitdata = {
};
/* entry point */
-struct snd_ice1712_card_info snd_vt1720_pontis_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1720_pontis_cards[] = {
{
.subvendor = VT1720_SUBDEVICE_PONTIS_MS300,
.name = "Pontis MS300",
diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c
index e36ddb94c38..e610339f760 100644
--- a/sound/pci/ice1712/prodigy192.c
+++ b/sound/pci/ice1712/prodigy192.c
@@ -54,7 +54,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -283,7 +282,7 @@ static int stac9460_adc_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_el
static int stac9460_mic_sw_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "Line In", "Mic" };
+ static const char * const texts[2] = { "Line In", "Mic" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -369,7 +368,7 @@ static const DECLARE_TLV_DB_SCALE(db_scale_adc, 0, 150, 0);
* mixers
*/
-static struct snd_kcontrol_new stac_controls[] __devinitdata = {
+static struct snd_kcontrol_new stac_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
@@ -562,7 +561,7 @@ static unsigned char prodigy192_ak4114_read(void *private_data,
static int ak4114_input_sw_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[2] = { "Toslink", "Coax" };
+ static const char * const texts[2] = { "Toslink", "Coax" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -607,7 +606,7 @@ static int ak4114_input_sw_put(struct snd_kcontrol *kcontrol,
}
-static struct snd_kcontrol_new ak4114_controls[] __devinitdata = {
+static struct snd_kcontrol_new ak4114_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "MIODIO IEC958 Capture Input",
@@ -672,7 +671,7 @@ static void stac9460_proc_init(struct snd_ice1712 *ice)
}
-static int __devinit prodigy192_add_controls(struct snd_ice1712 *ice)
+static int prodigy192_add_controls(struct snd_ice1712 *ice)
{
struct prodigy192_spec *spec = ice->spec;
unsigned int i;
@@ -728,7 +727,7 @@ static int prodigy192_miodio_exists(struct snd_ice1712 *ice)
/*
* initialize the chip
*/
-static int __devinit prodigy192_init(struct snd_ice1712 *ice)
+static int prodigy192_init(struct snd_ice1712 *ice)
{
static const unsigned short stac_inits_prodigy[] = {
STAC946X_RESET, 0,
@@ -784,7 +783,7 @@ static int __devinit prodigy192_init(struct snd_ice1712 *ice)
* hence the driver needs to sets up it properly.
*/
-static unsigned char prodigy71_eeprom[] __devinitdata = {
+static unsigned char prodigy71_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x6a, /* 49MHz crystal, mpu401,
* spdif-in+ 1 stereo ADC,
* 3 stereo DACs
@@ -808,7 +807,7 @@ static unsigned char prodigy71_eeprom[] __devinitdata = {
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_prodigy192_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_prodigy192_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_PRODIGY192VE,
.name = "Audiotrak Prodigy 192",
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 7bf093c51ce..2261d1e4915 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -25,7 +25,6 @@
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -299,7 +298,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
-static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
+static struct snd_kcontrol_new prodigy_hd2_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -782,7 +781,7 @@ static int wm_chswap_put(struct snd_kcontrol *kcontrol,
* mixers
*/
-static struct snd_kcontrol_new prodigy_hifi_controls[] __devinitdata = {
+static struct snd_kcontrol_new prodigy_hifi_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -939,7 +938,7 @@ static void wm_proc_init(struct snd_ice1712 *ice)
}
}
-static int __devinit prodigy_hifi_add_controls(struct snd_ice1712 *ice)
+static int prodigy_hifi_add_controls(struct snd_ice1712 *ice)
{
unsigned int i;
int err;
@@ -956,7 +955,7 @@ static int __devinit prodigy_hifi_add_controls(struct snd_ice1712 *ice)
return 0;
}
-static int __devinit prodigy_hd2_add_controls(struct snd_ice1712 *ice)
+static int prodigy_hd2_add_controls(struct snd_ice1712 *ice)
{
unsigned int i;
int err;
@@ -977,7 +976,7 @@ static int __devinit prodigy_hd2_add_controls(struct snd_ice1712 *ice)
/*
* initialize the chip
*/
-static int __devinit prodigy_hifi_init(struct snd_ice1712 *ice)
+static int prodigy_hifi_init(struct snd_ice1712 *ice)
{
static unsigned short wm_inits[] = {
/* These come first to reduce init pop noise */
@@ -1115,7 +1114,7 @@ static int prodigy_hd2_resume(struct snd_ice1712 *ice)
}
#endif
-static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
+static int prodigy_hd2_init(struct snd_ice1712 *ice)
{
struct prodigy_hifi_spec *spec;
@@ -1152,7 +1151,7 @@ static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
}
-static unsigned char prodigy71hifi_eeprom[] __devinitdata = {
+static unsigned char prodigy71hifi_eeprom[] = {
0x4b, /* SYSCONF: clock 512, spdif-in/ADC, 4DACs */
0x80, /* ACLINK: I2S */
0xfc, /* I2S: vol, 96k, 24bit, 192k */
@@ -1168,7 +1167,7 @@ static unsigned char prodigy71hifi_eeprom[] __devinitdata = {
0x00, /* GPIO_STATE2 */
};
-static unsigned char prodigyhd2_eeprom[] __devinitdata = {
+static unsigned char prodigyhd2_eeprom[] = {
0x4b, /* SYSCONF: clock 512, spdif-in/ADC, 4DACs */
0x80, /* ACLINK: I2S */
0xfc, /* I2S: vol, 96k, 24bit, 192k */
@@ -1184,7 +1183,7 @@ static unsigned char prodigyhd2_eeprom[] __devinitdata = {
0x00, /* GPIO_STATE2 */
};
-static unsigned char fortissimo4_eeprom[] __devinitdata = {
+static unsigned char fortissimo4_eeprom[] = {
0x43, /* SYSCONF: clock 512, ADC, 4DACs */
0x80, /* ACLINK: I2S */
0xfc, /* I2S: vol, 96k, 24bit, 192k */
@@ -1201,7 +1200,7 @@ static unsigned char fortissimo4_eeprom[] __devinitdata = {
};
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_prodigy_hifi_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_prodigy_hifi_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_PRODIGY_HIFI,
.name = "Audiotrak Prodigy 7.1 HiFi",
diff --git a/sound/pci/ice1712/psc724.c b/sound/pci/ice1712/psc724.c
new file mode 100644
index 00000000000..302ac6ddd54
--- /dev/null
+++ b/sound/pci/ice1712/psc724.c
@@ -0,0 +1,464 @@
+/*
+ * ALSA driver for ICEnsemble VT1724 (Envy24HT)
+ *
+ * Lowlevel functions for Philips PSC724 Ultimate Edge
+ *
+ * Copyright (c) 2012 Ondrej Zary <linux@rainbow-software.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+
+#include "ice1712.h"
+#include "envy24ht.h"
+#include "psc724.h"
+#include "wm8766.h"
+#include "wm8776.h"
+
+struct psc724_spec {
+ struct snd_wm8766 wm8766;
+ struct snd_wm8776 wm8776;
+ bool mute_all, jack_detect;
+ struct snd_ice1712 *ice;
+ struct delayed_work hp_work;
+ bool hp_connected;
+};
+
+/****************************************************************************/
+/* PHILIPS PSC724 ULTIMATE EDGE */
+/****************************************************************************/
+/*
+ * VT1722 (Envy24GT) - 6 outputs, 4 inputs (only 2 used), 24-bit/96kHz
+ *
+ * system configuration ICE_EEP2_SYSCONF=0x42
+ * XIN1 49.152MHz
+ * no MPU401
+ * one stereo ADC, no S/PDIF receiver
+ * three stereo DACs (FRONT, REAR, CENTER+LFE)
+ *
+ * AC-Link configuration ICE_EEP2_ACLINK=0x80
+ * use I2S, not AC97
+ *
+ * I2S converters feature ICE_EEP2_I2S=0x30
+ * I2S codec has no volume/mute control feature (bug!)
+ * I2S codec does not support 96KHz or 192KHz (bug!)
+ * I2S codec 24bits
+ *
+ * S/PDIF configuration ICE_EEP2_SPDIF=0xc1
+ * Enable integrated S/PDIF transmitter
+ * internal S/PDIF out implemented
+ * No S/PDIF input
+ * External S/PDIF out implemented
+ *
+ *
+ * ** connected chips **
+ *
+ * WM8776
+ * 2-channel DAC used for main output and stereo ADC (with 10-channel MUX)
+ * AIN1: LINE IN, AIN2: CD/VIDEO, AIN3: AUX, AIN4: Front MIC, AIN5: Rear MIC
+ * Controlled by I2C using VT1722 I2C interface:
+ * MODE (pin16) -- GND
+ * CE (pin17) -- GND I2C mode (address=0x34)
+ * DI (pin18) -- SDA (VT1722 pin70)
+ * CL (pin19) -- SCLK (VT1722 pin71)
+ *
+ * WM8766
+ * 6-channel DAC used for rear & center/LFE outputs (only 4 channels used)
+ * Controlled by SPI using VT1722 GPIO pins:
+ * MODE (pin 1) -- GPIO19 (VT1722 pin99)
+ * ML/I2S (pin11) -- GPIO18 (VT1722 pin98)
+ * MC/IWL (pin12) -- GPIO17 (VT1722 pin97)
+ * MD/DM (pin13) -- GPIO16 (VT1722 pin96)
+ * MUTE (pin14) -- GPIO20 (VT1722 pin101)
+ *
+ * GPIO14 is used as input for headphone jack detection (1 = connected)
+ * GPIO22 is used as MUTE ALL output, grounding all 6 channels
+ *
+ * ** output pins and device names **
+ *
+ * 5.1ch name -- output connector color -- device (-D option)
+ *
+ * FRONT 2ch -- green -- plughw:0,0
+ * CENTER(Lch) SUBWOOFER(Rch) -- orange -- plughw:0,2,0
+ * REAR 2ch -- black -- plughw:0,2,1
+ */
+
+/* codec access low-level functions */
+
+#define GPIO_HP_JACK (1 << 14)
+#define GPIO_MUTE_SUR (1 << 20)
+#define GPIO_MUTE_ALL (1 << 22)
+
+#define JACK_INTERVAL 1000
+
+#define PSC724_SPI_DELAY 1
+
+#define PSC724_SPI_DATA (1 << 16)
+#define PSC724_SPI_CLK (1 << 17)
+#define PSC724_SPI_LOAD (1 << 18)
+#define PSC724_SPI_MASK (PSC724_SPI_DATA | PSC724_SPI_CLK | PSC724_SPI_LOAD)
+
+static void psc724_wm8766_write(struct snd_wm8766 *wm, u16 addr, u16 data)
+{
+ struct psc724_spec *spec = container_of(wm, struct psc724_spec, wm8766);
+ struct snd_ice1712 *ice = spec->ice;
+ u32 st, bits;
+ int i;
+
+ snd_ice1712_save_gpio_status(ice);
+
+ st = ((addr & 0x7f) << 9) | (data & 0x1ff);
+ snd_ice1712_gpio_set_dir(ice, ice->gpio.direction | PSC724_SPI_MASK);
+ snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask & ~PSC724_SPI_MASK);
+ bits = snd_ice1712_gpio_read(ice) & ~PSC724_SPI_MASK;
+ snd_ice1712_gpio_write(ice, bits);
+
+ for (i = 0; i < 16; i++) {
+ udelay(PSC724_SPI_DELAY);
+ bits &= ~PSC724_SPI_CLK;
+ /* MSB first */
+ st <<= 1;
+ if (st & 0x10000)
+ bits |= PSC724_SPI_DATA;
+ else
+ bits &= ~PSC724_SPI_DATA;
+ snd_ice1712_gpio_write(ice, bits);
+ /* CLOCK high */
+ udelay(PSC724_SPI_DELAY);
+ bits |= PSC724_SPI_CLK;
+ snd_ice1712_gpio_write(ice, bits);
+ }
+ /* LOAD high */
+ udelay(PSC724_SPI_DELAY);
+ bits |= PSC724_SPI_LOAD;
+ snd_ice1712_gpio_write(ice, bits);
+ /* LOAD low, DATA and CLOCK high */
+ udelay(PSC724_SPI_DELAY);
+ bits |= (PSC724_SPI_DATA | PSC724_SPI_CLK);
+ snd_ice1712_gpio_write(ice, bits);
+
+ snd_ice1712_restore_gpio_status(ice);
+}
+
+static void psc724_wm8776_write(struct snd_wm8776 *wm, u8 addr, u8 data)
+{
+ struct psc724_spec *spec = container_of(wm, struct psc724_spec, wm8776);
+
+ snd_vt1724_write_i2c(spec->ice, 0x34, addr, data);
+}
+
+/* mute all */
+
+static void psc724_set_master_switch(struct snd_ice1712 *ice, bool on)
+{
+ unsigned int bits = snd_ice1712_gpio_read(ice);
+ struct psc724_spec *spec = ice->spec;
+
+ spec->mute_all = !on;
+ if (on)
+ bits &= ~(GPIO_MUTE_ALL | GPIO_MUTE_SUR);
+ else
+ bits |= GPIO_MUTE_ALL | GPIO_MUTE_SUR;
+ snd_ice1712_gpio_write(ice, bits);
+}
+
+static bool psc724_get_master_switch(struct snd_ice1712 *ice)
+{
+ struct psc724_spec *spec = ice->spec;
+
+ return !spec->mute_all;
+}
+
+/* jack detection */
+
+static void psc724_set_jack_state(struct snd_ice1712 *ice, bool hp_connected)
+{
+ struct psc724_spec *spec = ice->spec;
+ struct snd_ctl_elem_id elem_id;
+ struct snd_kcontrol *kctl;
+ u16 power = spec->wm8776.regs[WM8776_REG_PWRDOWN] & ~WM8776_PWR_HPPD;
+
+ psc724_set_master_switch(ice, !hp_connected);
+ if (!hp_connected)
+ power |= WM8776_PWR_HPPD;
+ snd_wm8776_set_power(&spec->wm8776, power);
+ spec->hp_connected = hp_connected;
+ /* notify about master speaker mute change */
+ memset(&elem_id, 0, sizeof(elem_id));
+ elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ strncpy(elem_id.name, "Master Speakers Playback Switch",
+ sizeof(elem_id.name));
+ kctl = snd_ctl_find_id(ice->card, &elem_id);
+ snd_ctl_notify(ice->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
+ /* and headphone mute change */
+ strncpy(elem_id.name, spec->wm8776.ctl[WM8776_CTL_HP_SW].name,
+ sizeof(elem_id.name));
+ kctl = snd_ctl_find_id(ice->card, &elem_id);
+ snd_ctl_notify(ice->card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id);
+}
+
+static void psc724_update_hp_jack_state(struct work_struct *work)
+{
+ struct psc724_spec *spec = container_of(work, struct psc724_spec,
+ hp_work.work);
+ struct snd_ice1712 *ice = spec->ice;
+ bool hp_connected = snd_ice1712_gpio_read(ice) & GPIO_HP_JACK;
+
+ schedule_delayed_work(&spec->hp_work, msecs_to_jiffies(JACK_INTERVAL));
+ if (hp_connected == spec->hp_connected)
+ return;
+ psc724_set_jack_state(ice, hp_connected);
+}
+
+static void psc724_set_jack_detection(struct snd_ice1712 *ice, bool on)
+{
+ struct psc724_spec *spec = ice->spec;
+
+ if (spec->jack_detect == on)
+ return;
+
+ spec->jack_detect = on;
+ if (on) {
+ bool hp_connected = snd_ice1712_gpio_read(ice) & GPIO_HP_JACK;
+ psc724_set_jack_state(ice, hp_connected);
+ schedule_delayed_work(&spec->hp_work,
+ msecs_to_jiffies(JACK_INTERVAL));
+ } else
+ cancel_delayed_work_sync(&spec->hp_work);
+}
+
+static bool psc724_get_jack_detection(struct snd_ice1712 *ice)
+{
+ struct psc724_spec *spec = ice->spec;
+
+ return spec->jack_detect;
+}
+
+/* mixer controls */
+
+struct psc724_control {
+ const char *name;
+ void (*set)(struct snd_ice1712 *ice, bool on);
+ bool (*get)(struct snd_ice1712 *ice);
+};
+
+static const struct psc724_control psc724_cont[] = {
+ {
+ .name = "Master Speakers Playback Switch",
+ .set = psc724_set_master_switch,
+ .get = psc724_get_master_switch,
+ },
+ {
+ .name = "Headphone Jack Detection Playback Switch",
+ .set = psc724_set_jack_detection,
+ .get = psc724_get_jack_detection,
+ },
+};
+
+static int psc724_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+
+ ucontrol->value.integer.value[0] = psc724_cont[n].get(ice);
+
+ return 0;
+}
+
+static int psc724_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+
+ psc724_cont[n].set(ice, ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static const char *front_volume = "Front Playback Volume";
+static const char *front_switch = "Front Playback Switch";
+static const char *front_zc = "Front Zero Cross Detect Playback Switch";
+static const char *front_izd = "Front Infinite Zero Detect Playback Switch";
+static const char *front_phase = "Front Phase Invert Playback Switch";
+static const char *front_deemph = "Front Deemphasis Playback Switch";
+static const char *ain1_switch = "Line Capture Switch";
+static const char *ain2_switch = "CD Capture Switch";
+static const char *ain3_switch = "AUX Capture Switch";
+static const char *ain4_switch = "Front Mic Capture Switch";
+static const char *ain5_switch = "Rear Mic Capture Switch";
+static const char *rear_volume = "Surround Playback Volume";
+static const char *clfe_volume = "CLFE Playback Volume";
+static const char *rear_switch = "Surround Playback Switch";
+static const char *clfe_switch = "CLFE Playback Switch";
+static const char *rear_phase = "Surround Phase Invert Playback Switch";
+static const char *clfe_phase = "CLFE Phase Invert Playback Switch";
+static const char *rear_deemph = "Surround Deemphasis Playback Switch";
+static const char *clfe_deemph = "CLFE Deemphasis Playback Switch";
+static const char *rear_clfe_izd = "Rear Infinite Zero Detect Playback Switch";
+static const char *rear_clfe_zc = "Rear Zero Cross Detect Playback Switch";
+
+static int psc724_add_controls(struct snd_ice1712 *ice)
+{
+ struct snd_kcontrol_new cont;
+ struct snd_kcontrol *ctl;
+ int err, i;
+ struct psc724_spec *spec = ice->spec;
+
+ spec->wm8776.ctl[WM8776_CTL_DAC_VOL].name = front_volume;
+ spec->wm8776.ctl[WM8776_CTL_DAC_SW].name = front_switch;
+ spec->wm8776.ctl[WM8776_CTL_DAC_ZC_SW].name = front_zc;
+ spec->wm8776.ctl[WM8776_CTL_AUX_SW].name = NULL;
+ spec->wm8776.ctl[WM8776_CTL_DAC_IZD_SW].name = front_izd;
+ spec->wm8776.ctl[WM8776_CTL_PHASE_SW].name = front_phase;
+ spec->wm8776.ctl[WM8776_CTL_DEEMPH_SW].name = front_deemph;
+ spec->wm8776.ctl[WM8776_CTL_INPUT1_SW].name = ain1_switch;
+ spec->wm8776.ctl[WM8776_CTL_INPUT2_SW].name = ain2_switch;
+ spec->wm8776.ctl[WM8776_CTL_INPUT3_SW].name = ain3_switch;
+ spec->wm8776.ctl[WM8776_CTL_INPUT4_SW].name = ain4_switch;
+ spec->wm8776.ctl[WM8776_CTL_INPUT5_SW].name = ain5_switch;
+ snd_wm8776_build_controls(&spec->wm8776);
+ spec->wm8766.ctl[WM8766_CTL_CH1_VOL].name = rear_volume;
+ spec->wm8766.ctl[WM8766_CTL_CH2_VOL].name = clfe_volume;
+ spec->wm8766.ctl[WM8766_CTL_CH3_VOL].name = NULL;
+ spec->wm8766.ctl[WM8766_CTL_CH1_SW].name = rear_switch;
+ spec->wm8766.ctl[WM8766_CTL_CH2_SW].name = clfe_switch;
+ spec->wm8766.ctl[WM8766_CTL_CH3_SW].name = NULL;
+ spec->wm8766.ctl[WM8766_CTL_PHASE1_SW].name = rear_phase;
+ spec->wm8766.ctl[WM8766_CTL_PHASE2_SW].name = clfe_phase;
+ spec->wm8766.ctl[WM8766_CTL_PHASE3_SW].name = NULL;
+ spec->wm8766.ctl[WM8766_CTL_DEEMPH1_SW].name = rear_deemph;
+ spec->wm8766.ctl[WM8766_CTL_DEEMPH2_SW].name = clfe_deemph;
+ spec->wm8766.ctl[WM8766_CTL_DEEMPH3_SW].name = NULL;
+ spec->wm8766.ctl[WM8766_CTL_IZD_SW].name = rear_clfe_izd;
+ spec->wm8766.ctl[WM8766_CTL_ZC_SW].name = rear_clfe_zc;
+ snd_wm8766_build_controls(&spec->wm8766);
+
+ memset(&cont, 0, sizeof(cont));
+ cont.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ for (i = 0; i < ARRAY_SIZE(psc724_cont); i++) {
+ cont.private_value = i;
+ cont.name = psc724_cont[i].name;
+ cont.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ cont.info = snd_ctl_boolean_mono_info;
+ cont.get = psc724_ctl_get;
+ cont.put = psc724_ctl_put;
+ ctl = snd_ctl_new1(&cont, ice);
+ if (!ctl)
+ return -ENOMEM;
+ err = snd_ctl_add(ice->card, ctl);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void psc724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate)
+{
+ struct psc724_spec *spec = ice->spec;
+ /* restore codec volume settings after rate change (PMCLK stop) */
+ snd_wm8776_volume_restore(&spec->wm8776);
+ snd_wm8766_volume_restore(&spec->wm8766);
+}
+
+/* power management */
+
+#ifdef CONFIG_PM_SLEEP
+static int psc724_resume(struct snd_ice1712 *ice)
+{
+ struct psc724_spec *spec = ice->spec;
+
+ snd_wm8776_resume(&spec->wm8776);
+ snd_wm8766_resume(&spec->wm8766);
+
+ return 0;
+}
+#endif
+
+/* init */
+
+static int psc724_init(struct snd_ice1712 *ice)
+{
+ struct psc724_spec *spec;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ ice->spec = spec;
+ spec->ice = ice;
+
+ ice->num_total_dacs = 6;
+ ice->num_total_adcs = 2;
+ spec->wm8776.ops.write = psc724_wm8776_write;
+ spec->wm8776.card = ice->card;
+ snd_wm8776_init(&spec->wm8776);
+ spec->wm8766.ops.write = psc724_wm8766_write;
+ spec->wm8766.card = ice->card;
+#ifdef CONFIG_PM_SLEEP
+ ice->pm_resume = psc724_resume;
+ ice->pm_suspend_enabled = 1;
+#endif
+ snd_wm8766_init(&spec->wm8766);
+ snd_wm8766_set_if(&spec->wm8766,
+ WM8766_IF_FMT_I2S | WM8766_IF_IWL_24BIT);
+ ice->gpio.set_pro_rate = psc724_set_pro_rate;
+ INIT_DELAYED_WORK(&spec->hp_work, psc724_update_hp_jack_state);
+ psc724_set_jack_detection(ice, true);
+ return 0;
+}
+
+static void psc724_exit(struct snd_ice1712 *ice)
+{
+ struct psc724_spec *spec = ice->spec;
+
+ cancel_delayed_work_sync(&spec->hp_work);
+}
+
+/* PSC724 has buggy EEPROM (no 96&192kHz, all FFh GPIOs), so override it here */
+static unsigned char psc724_eeprom[] = {
+ [ICE_EEP2_SYSCONF] = 0x42, /* 49.152MHz, 1 ADC, 3 DACs */
+ [ICE_EEP2_ACLINK] = 0x80, /* I2S */
+ [ICE_EEP2_I2S] = 0xf0, /* I2S volume, 96kHz, 24bit */
+ [ICE_EEP2_SPDIF] = 0xc1, /* spdif out-en, out-int, no input */
+ /* GPIO outputs */
+ [ICE_EEP2_GPIO_DIR2] = 0x5f, /* MUTE_ALL,WM8766 MUTE/MODE/ML/MC/MD */
+ /* GPIO write enable */
+ [ICE_EEP2_GPIO_MASK] = 0xff, /* read-only */
+ [ICE_EEP2_GPIO_MASK1] = 0xff, /* read-only */
+ [ICE_EEP2_GPIO_MASK2] = 0xa0, /* MUTE_ALL,WM8766 MUTE/MODE/ML/MC/MD */
+ /* GPIO initial state */
+ [ICE_EEP2_GPIO_STATE2] = 0x20, /* unmuted, all WM8766 pins low */
+};
+
+struct snd_ice1712_card_info snd_vt1724_psc724_cards[] = {
+ {
+ .subvendor = VT1724_SUBDEVICE_PSC724,
+ .name = "Philips PSC724 Ultimate Edge",
+ .model = "psc724",
+ .chip_init = psc724_init,
+ .chip_exit = psc724_exit,
+ .build_controls = psc724_add_controls,
+ .eeprom_size = sizeof(psc724_eeprom),
+ .eeprom_data = psc724_eeprom,
+ },
+ {} /*terminator*/
+};
diff --git a/sound/pci/ice1712/psc724.h b/sound/pci/ice1712/psc724.h
new file mode 100644
index 00000000000..858e5fd0eeb
--- /dev/null
+++ b/sound/pci/ice1712/psc724.h
@@ -0,0 +1,13 @@
+#ifndef __SOUND_PSC724_H
+#define __SOUND_PSC724_H
+
+/* ID */
+#define PSC724_DEVICE_DESC \
+ "{Philips,PSC724 Ultimate Edge},"
+
+#define VT1724_SUBDEVICE_PSC724 0xab170619
+
+/* entry struct */
+extern struct snd_ice1712_card_info snd_vt1724_psc724_cards[];
+
+#endif /* __SOUND_PSC724_H */
diff --git a/sound/pci/ice1712/quartet.c b/sound/pci/ice1712/quartet.c
index 1948632787e..975e0357bd5 100644
--- a/sound/pci/ice1712/quartet.c
+++ b/sound/pci/ice1712/quartet.c
@@ -22,7 +22,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -47,7 +46,7 @@ struct qtet_kcontrol_private {
unsigned int bit;
void (*set_register)(struct snd_ice1712 *ice, unsigned int val);
unsigned int (*get_register)(struct snd_ice1712 *ice);
- unsigned char *texts[2];
+ unsigned char * const texts[2];
};
enum {
@@ -63,7 +62,7 @@ enum {
OUT34_MON12,
};
-static char *ext_clock_names[3] = {"IEC958 In", "Word Clock 1xFS",
+static const char * const ext_clock_names[3] = {"IEC958 In", "Word Clock 1xFS",
"Word Clock 256xFS"};
/* chip address on I2C bus */
@@ -387,7 +386,7 @@ static const struct snd_akm4xxx_adc_channel qtet_adc[] = {
AK_CONTROL(PCM_34_CAPTURE_VOLUME, 2),
};
-static struct snd_akm4xxx akm_qtet_dac __devinitdata = {
+static struct snd_akm4xxx akm_qtet_dac = {
.type = SND_AK4620,
.num_dacs = 4, /* DAC1 - Output 12
*/
@@ -551,7 +550,8 @@ static int qtet_mute_put(struct snd_kcontrol *kcontrol,
static int qtet_ain12_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[3] = {"Line In 1/2", "Mic", "Mic + Low-cut"};
+ static const char * const texts[3] =
+ {"Line In 1/2", "Mic", "Mic + Low-cut"};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = ARRAY_SIZE(texts);
@@ -758,7 +758,7 @@ static int qtet_sw_put(struct snd_kcontrol *kcontrol,
.put = qtet_sw_put,\
.private_value = xpriv }
-static struct snd_kcontrol_new qtet_controls[] __devinitdata = {
+static struct snd_kcontrol_new qtet_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
@@ -795,17 +795,17 @@ static struct snd_kcontrol_new qtet_controls[] __devinitdata = {
QTET_CONTROL("Output 3/4 to Monitor 1/2", sw, OUT34_MON12),
};
-static char *slave_vols[] __devinitdata = {
+static char *slave_vols[] = {
PCM_12_PLAYBACK_VOLUME,
PCM_34_PLAYBACK_VOLUME,
NULL
};
-static __devinitdata
+static
DECLARE_TLV_DB_SCALE(qtet_master_db_scale, -6350, 50, 1);
-static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card,
- const char *name)
+static struct snd_kcontrol *ctl_find(struct snd_card *card,
+ const char *name)
{
struct snd_ctl_elem_id sid;
memset(&sid, 0, sizeof(sid));
@@ -815,8 +815,8 @@ static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card,
return snd_ctl_find_id(card, &sid);
}
-static void __devinit add_slaves(struct snd_card *card,
- struct snd_kcontrol *master, char **list)
+static void add_slaves(struct snd_card *card,
+ struct snd_kcontrol *master, char * const *list)
{
for (; *list; list++) {
struct snd_kcontrol *slave = ctl_find(card, *list);
@@ -825,7 +825,7 @@ static void __devinit add_slaves(struct snd_card *card,
}
}
-static int __devinit qtet_add_controls(struct snd_ice1712 *ice)
+static int qtet_add_controls(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
int err, i;
@@ -1007,7 +1007,7 @@ static void qtet_spdif_in_open(struct snd_ice1712 *ice,
/*
* initialize the chip
*/
-static int __devinit qtet_init(struct snd_ice1712 *ice)
+static int qtet_init(struct snd_ice1712 *ice)
{
static const unsigned char ak4113_init_vals[] = {
/* AK4113_REG_PWRDN */ AK4113_RST | AK4113_PWN |
@@ -1095,7 +1095,7 @@ static int __devinit qtet_init(struct snd_ice1712 *ice)
return 0;
}
-static unsigned char qtet_eeprom[] __devinitdata = {
+static unsigned char qtet_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x28, /* clock 256(24MHz), mpu401, 1xADC,
1xDACs, SPDIF in */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
@@ -1116,7 +1116,7 @@ static unsigned char qtet_eeprom[] __devinitdata = {
};
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_qtet_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_qtet_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_QTET,
.name = "Infrasonic Quartet",
diff --git a/sound/pci/ice1712/revo.c b/sound/pci/ice1712/revo.c
index b508bb360b9..7641080a9b5 100644
--- a/sound/pci/ice1712/revo.c
+++ b/sound/pci/ice1712/revo.c
@@ -21,7 +21,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -235,7 +234,7 @@ static const struct snd_akm4xxx_adc_channel revo51_adc[] = {
},
};
-static struct snd_akm4xxx akm_revo_front __devinitdata = {
+static struct snd_akm4xxx akm_revo_front = {
.type = SND_AK4381,
.num_dacs = 2,
.ops = {
@@ -244,7 +243,7 @@ static struct snd_akm4xxx akm_revo_front __devinitdata = {
.dac_info = revo71_front,
};
-static struct snd_ak4xxx_private akm_revo_front_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_revo_front_priv = {
.caddr = 1,
.cif = 0,
.data_mask = VT1724_REVO_CDOUT,
@@ -256,7 +255,7 @@ static struct snd_ak4xxx_private akm_revo_front_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_revo_surround __devinitdata = {
+static struct snd_akm4xxx akm_revo_surround = {
.type = SND_AK4355,
.idx_offset = 1,
.num_dacs = 6,
@@ -266,7 +265,7 @@ static struct snd_akm4xxx akm_revo_surround __devinitdata = {
.dac_info = revo71_surround,
};
-static struct snd_ak4xxx_private akm_revo_surround_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_revo_surround_priv = {
.caddr = 3,
.cif = 0,
.data_mask = VT1724_REVO_CDOUT,
@@ -278,7 +277,7 @@ static struct snd_ak4xxx_private akm_revo_surround_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_revo51 __devinitdata = {
+static struct snd_akm4xxx akm_revo51 = {
.type = SND_AK4358,
.num_dacs = 8,
.ops = {
@@ -287,7 +286,7 @@ static struct snd_akm4xxx akm_revo51 __devinitdata = {
.dac_info = revo51_dac,
};
-static struct snd_ak4xxx_private akm_revo51_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_revo51_priv = {
.caddr = 2,
.cif = 0,
.data_mask = VT1724_REVO_CDOUT,
@@ -299,13 +298,13 @@ static struct snd_ak4xxx_private akm_revo51_priv __devinitdata = {
.mask_flags = 0,
};
-static struct snd_akm4xxx akm_revo51_adc __devinitdata = {
+static struct snd_akm4xxx akm_revo51_adc = {
.type = SND_AK5365,
.num_adcs = 2,
.adc_info = revo51_adc,
};
-static struct snd_ak4xxx_private akm_revo51_adc_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_revo51_adc_priv = {
.caddr = 2,
.cif = 0,
.data_mask = VT1724_REVO_CDOUT,
@@ -346,7 +345,7 @@ static const struct snd_akm4xxx_dac_channel ap192_dac[] = {
AK_DAC("PCM Playback Volume", 2)
};
-static struct snd_akm4xxx akm_ap192 __devinitdata = {
+static struct snd_akm4xxx akm_ap192 = {
.type = SND_AK4358,
.num_dacs = 2,
.ops = {
@@ -355,7 +354,7 @@ static struct snd_akm4xxx akm_ap192 __devinitdata = {
.dac_info = ap192_dac,
};
-static struct snd_ak4xxx_private akm_ap192_priv __devinitdata = {
+static struct snd_ak4xxx_private akm_ap192_priv = {
.caddr = 2,
.cif = 0,
.data_mask = VT1724_REVO_CDOUT,
@@ -468,7 +467,7 @@ static unsigned char ap192_ak4114_read(void *private_data, unsigned char addr)
return data;
}
-static int __devinit ap192_ak4114_init(struct snd_ice1712 *ice)
+static int ap192_ak4114_init(struct snd_ice1712 *ice)
{
static const unsigned char ak4114_init_vals[] = {
AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1,
@@ -496,7 +495,7 @@ static int __devinit ap192_ak4114_init(struct snd_ice1712 *ice)
return 0; /* error ignored; it's no fatal error */
}
-static int __devinit revo_init(struct snd_ice1712 *ice)
+static int revo_init(struct snd_ice1712 *ice)
{
struct snd_akm4xxx *ak;
int err;
@@ -574,7 +573,7 @@ static int __devinit revo_init(struct snd_ice1712 *ice)
}
-static int __devinit revo_add_controls(struct snd_ice1712 *ice)
+static int revo_add_controls(struct snd_ice1712 *ice)
{
struct revo51_spec *spec;
int err;
@@ -607,7 +606,7 @@ static int __devinit revo_add_controls(struct snd_ice1712 *ice)
}
/* entry point */
-struct snd_ice1712_card_info snd_vt1724_revo_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_revo_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_REVOLUTION71,
.name = "M Audio Revolution-7.1",
diff --git a/sound/pci/ice1712/se.c b/sound/pci/ice1712/se.c
index 69673b95869..ffd894bb450 100644
--- a/sound/pci/ice1712/se.c
+++ b/sound/pci/ice1712/se.c
@@ -22,7 +22,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -114,7 +113,7 @@ struct se_spec {
/* WM8740 interface */
/****************************************************************************/
-static void __devinit se200pci_WM8740_init(struct snd_ice1712 *ice)
+static void se200pci_WM8740_init(struct snd_ice1712 *ice)
{
/* nothing to do */
}
@@ -196,7 +195,7 @@ static void se200pci_WM8766_set_volume(struct snd_ice1712 *ice, int ch,
}
}
-static void __devinit se200pci_WM8766_init(struct snd_ice1712 *ice)
+static void se200pci_WM8766_init(struct snd_ice1712 *ice)
{
se200pci_WM8766_write(ice, 0x1f, 0x000); /* RESET ALL */
udelay(10);
@@ -253,7 +252,7 @@ static void se200pci_WM8776_set_input_volume(struct snd_ice1712 *ice,
se200pci_WM8776_write(ice, 0x0f, vol2 | 0x100);
}
-static const char *se200pci_sel[] = {
+static const char * const se200pci_sel[] = {
"LINE-IN", "CD-IN", "MIC-IN", "ALL-MIX", NULL
};
@@ -278,7 +277,7 @@ static void se200pci_WM8776_set_afl(struct snd_ice1712 *ice, unsigned int afl)
se200pci_WM8776_write(ice, 0x16, 0x001);
}
-static const char *se200pci_agc[] = {
+static const char * const se200pci_agc[] = {
"Off", "LimiterMode", "ALCMode", NULL
};
@@ -300,10 +299,10 @@ static void se200pci_WM8776_set_agc(struct snd_ice1712 *ice, unsigned int agc)
}
}
-static void __devinit se200pci_WM8776_init(struct snd_ice1712 *ice)
+static void se200pci_WM8776_init(struct snd_ice1712 *ice)
{
int i;
- static unsigned short __devinitdata default_values[] = {
+ static unsigned short default_values[] = {
0x100, 0x100, 0x100,
0x100, 0x100, 0x100,
0x000, 0x090, 0x000, 0x000,
@@ -352,7 +351,7 @@ static void se200pci_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate)
}
struct se200pci_control {
- char *name;
+ const char *name;
enum {
WM8766,
WM8776in,
@@ -363,7 +362,7 @@ struct se200pci_control {
} target;
enum { VOLUME1, VOLUME2, BOOLEAN, ENUM } type;
int ch;
- const char **member;
+ const char * const *member;
const char *comment;
};
@@ -421,7 +420,7 @@ static const struct se200pci_control se200pci_cont[] = {
static int se200pci_get_enum_count(int n)
{
- const char **member;
+ const char * const *member;
int c;
member = se200pci_cont[n].member;
@@ -600,7 +599,7 @@ static int se200pci_cont_enum_put(struct snd_kcontrol *kc,
static const DECLARE_TLV_DB_SCALE(db_scale_gain1, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(db_scale_gain2, -10350, 50, 1);
-static int __devinit se200pci_add_controls(struct snd_ice1712 *ice)
+static int se200pci_add_controls(struct snd_ice1712 *ice)
{
int i;
struct snd_kcontrol_new cont;
@@ -678,7 +677,7 @@ static int __devinit se200pci_add_controls(struct snd_ice1712 *ice)
/* probe/initialize/setup */
/****************************************************************************/
-static int __devinit se_init(struct snd_ice1712 *ice)
+static int se_init(struct snd_ice1712 *ice)
{
struct se_spec *spec;
@@ -706,7 +705,7 @@ static int __devinit se_init(struct snd_ice1712 *ice)
return -ENOENT;
}
-static int __devinit se_add_controls(struct snd_ice1712 *ice)
+static int se_add_controls(struct snd_ice1712 *ice)
{
int err;
@@ -723,7 +722,7 @@ static int __devinit se_add_controls(struct snd_ice1712 *ice)
/* entry point */
/****************************************************************************/
-static unsigned char se200pci_eeprom[] __devinitdata = {
+static unsigned char se200pci_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x4b, /* 49.152Hz, spdif-in/ADC, 4DACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0x78, /* 96k-ok, 24bit, 192k-ok */
@@ -742,7 +741,7 @@ static unsigned char se200pci_eeprom[] __devinitdata = {
[ICE_EEP2_GPIO_STATE2] = 0x07, /* WM8766 ML/MC/MD */
};
-static unsigned char se90pci_eeprom[] __devinitdata = {
+static unsigned char se90pci_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x4b, /* 49.152Hz, spdif-in/ADC, 4DACs */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0x78, /* 96k-ok, 24bit, 192k-ok */
@@ -751,7 +750,7 @@ static unsigned char se90pci_eeprom[] __devinitdata = {
/* ALL GPIO bits are in input mode */
};
-struct snd_ice1712_card_info snd_vt1724_se_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_se_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_SE200PCI,
.name = "ONKYO SE200PCI",
diff --git a/sound/pci/ice1712/vt1720_mobo.c b/sound/pci/ice1712/vt1720_mobo.c
index 4c551e147c0..5dbb867e642 100644
--- a/sound/pci/ice1712/vt1720_mobo.c
+++ b/sound/pci/ice1712/vt1720_mobo.c
@@ -21,7 +21,6 @@
*
*/
-#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -32,7 +31,7 @@
#include "vt1720_mobo.h"
-static int __devinit k8x800_init(struct snd_ice1712 *ice)
+static int k8x800_init(struct snd_ice1712 *ice)
{
ice->vt1720 = 1;
@@ -46,7 +45,7 @@ static int __devinit k8x800_init(struct snd_ice1712 *ice)
return 0;
}
-static int __devinit k8x800_add_controls(struct snd_ice1712 *ice)
+static int k8x800_add_controls(struct snd_ice1712 *ice)
{
/* FIXME: needs some quirks for VT1616? */
return 0;
@@ -54,7 +53,7 @@ static int __devinit k8x800_add_controls(struct snd_ice1712 *ice)
/* EEPROM image */
-static unsigned char k8x800_eeprom[] __devinitdata = {
+static unsigned char k8x800_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x01, /* clock 256, 1ADC, 2DACs */
[ICE_EEP2_ACLINK] = 0x02, /* ACLINK, packed */
[ICE_EEP2_I2S] = 0x00, /* - */
@@ -70,7 +69,7 @@ static unsigned char k8x800_eeprom[] __devinitdata = {
[ICE_EEP2_GPIO_STATE2] = 0x00, /* - */
};
-static unsigned char sn25p_eeprom[] __devinitdata = {
+static unsigned char sn25p_eeprom[] = {
[ICE_EEP2_SYSCONF] = 0x01, /* clock 256, 1ADC, 2DACs */
[ICE_EEP2_ACLINK] = 0x02, /* ACLINK, packed */
[ICE_EEP2_I2S] = 0x00, /* - */
@@ -88,7 +87,7 @@ static unsigned char sn25p_eeprom[] __devinitdata = {
/* entry point */
-struct snd_ice1712_card_info snd_vt1720_mobo_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1720_mobo_cards[] = {
{
.subvendor = VT1720_SUBDEVICE_K8X800,
.name = "Albatron K8X800 Pro II",
diff --git a/sound/pci/ice1712/wm8766.c b/sound/pci/ice1712/wm8766.c
new file mode 100644
index 00000000000..8072adeecf6
--- /dev/null
+++ b/sound/pci/ice1712/wm8766.c
@@ -0,0 +1,361 @@
+/*
+ * ALSA driver for ICEnsemble VT17xx
+ *
+ * Lowlevel functions for WM8766 codec
+ *
+ * Copyright (c) 2012 Ondrej Zary <linux@rainbow-software.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/tlv.h>
+#include "wm8766.h"
+
+/* low-level access */
+
+static void snd_wm8766_write(struct snd_wm8766 *wm, u16 addr, u16 data)
+{
+ if (addr < WM8766_REG_RESET)
+ wm->regs[addr] = data;
+ wm->ops.write(wm, addr, data);
+}
+
+/* mixer controls */
+
+static const DECLARE_TLV_DB_SCALE(wm8766_tlv, -12750, 50, 1);
+
+static struct snd_wm8766_ctl snd_wm8766_default_ctl[WM8766_CTL_COUNT] = {
+ [WM8766_CTL_CH1_VOL] = {
+ .name = "Channel 1 Playback Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8766_tlv,
+ .reg1 = WM8766_REG_DACL1,
+ .reg2 = WM8766_REG_DACR1,
+ .mask1 = WM8766_VOL_MASK,
+ .mask2 = WM8766_VOL_MASK,
+ .max = 0xff,
+ .flags = WM8766_FLAG_STEREO | WM8766_FLAG_VOL_UPDATE,
+ },
+ [WM8766_CTL_CH2_VOL] = {
+ .name = "Channel 2 Playback Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8766_tlv,
+ .reg1 = WM8766_REG_DACL2,
+ .reg2 = WM8766_REG_DACR2,
+ .mask1 = WM8766_VOL_MASK,
+ .mask2 = WM8766_VOL_MASK,
+ .max = 0xff,
+ .flags = WM8766_FLAG_STEREO | WM8766_FLAG_VOL_UPDATE,
+ },
+ [WM8766_CTL_CH3_VOL] = {
+ .name = "Channel 3 Playback Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8766_tlv,
+ .reg1 = WM8766_REG_DACL3,
+ .reg2 = WM8766_REG_DACR3,
+ .mask1 = WM8766_VOL_MASK,
+ .mask2 = WM8766_VOL_MASK,
+ .max = 0xff,
+ .flags = WM8766_FLAG_STEREO | WM8766_FLAG_VOL_UPDATE,
+ },
+ [WM8766_CTL_CH1_SW] = {
+ .name = "Channel 1 Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL2,
+ .mask1 = WM8766_DAC2_MUTE1,
+ .flags = WM8766_FLAG_INVERT,
+ },
+ [WM8766_CTL_CH2_SW] = {
+ .name = "Channel 2 Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL2,
+ .mask1 = WM8766_DAC2_MUTE2,
+ .flags = WM8766_FLAG_INVERT,
+ },
+ [WM8766_CTL_CH3_SW] = {
+ .name = "Channel 3 Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL2,
+ .mask1 = WM8766_DAC2_MUTE3,
+ .flags = WM8766_FLAG_INVERT,
+ },
+ [WM8766_CTL_PHASE1_SW] = {
+ .name = "Channel 1 Phase Invert Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_IFCTRL,
+ .mask1 = WM8766_PHASE_INVERT1,
+ },
+ [WM8766_CTL_PHASE2_SW] = {
+ .name = "Channel 2 Phase Invert Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_IFCTRL,
+ .mask1 = WM8766_PHASE_INVERT2,
+ },
+ [WM8766_CTL_PHASE3_SW] = {
+ .name = "Channel 3 Phase Invert Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_IFCTRL,
+ .mask1 = WM8766_PHASE_INVERT3,
+ },
+ [WM8766_CTL_DEEMPH1_SW] = {
+ .name = "Channel 1 Deemphasis Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL2,
+ .mask1 = WM8766_DAC2_DEEMP1,
+ },
+ [WM8766_CTL_DEEMPH2_SW] = {
+ .name = "Channel 2 Deemphasis Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL2,
+ .mask1 = WM8766_DAC2_DEEMP2,
+ },
+ [WM8766_CTL_DEEMPH3_SW] = {
+ .name = "Channel 3 Deemphasis Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL2,
+ .mask1 = WM8766_DAC2_DEEMP3,
+ },
+ [WM8766_CTL_IZD_SW] = {
+ .name = "Infinite Zero Detect Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL1,
+ .mask1 = WM8766_DAC_IZD,
+ },
+ [WM8766_CTL_ZC_SW] = {
+ .name = "Zero Cross Detect Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8766_REG_DACCTRL2,
+ .mask1 = WM8766_DAC2_ZCD,
+ .flags = WM8766_FLAG_INVERT,
+ },
+};
+
+/* exported functions */
+
+void snd_wm8766_init(struct snd_wm8766 *wm)
+{
+ int i;
+ static const u16 default_values[] = {
+ 0x000, 0x100,
+ 0x120, 0x000,
+ 0x000, 0x100, 0x000, 0x100, 0x000,
+ 0x000, 0x080,
+ };
+
+ memcpy(wm->ctl, snd_wm8766_default_ctl, sizeof(wm->ctl));
+
+ snd_wm8766_write(wm, WM8766_REG_RESET, 0x00); /* reset */
+ udelay(10);
+ /* load defaults */
+ for (i = 0; i < ARRAY_SIZE(default_values); i++)
+ snd_wm8766_write(wm, i, default_values[i]);
+}
+
+void snd_wm8766_resume(struct snd_wm8766 *wm)
+{
+ int i;
+
+ for (i = 0; i < WM8766_REG_COUNT; i++)
+ snd_wm8766_write(wm, i, wm->regs[i]);
+}
+
+void snd_wm8766_set_if(struct snd_wm8766 *wm, u16 dac)
+{
+ u16 val = wm->regs[WM8766_REG_IFCTRL] & ~WM8766_IF_MASK;
+
+ dac &= WM8766_IF_MASK;
+ snd_wm8766_write(wm, WM8766_REG_IFCTRL, val | dac);
+}
+
+void snd_wm8766_set_master_mode(struct snd_wm8766 *wm, u16 mode)
+{
+ u16 val = wm->regs[WM8766_REG_DACCTRL3] & ~WM8766_DAC3_MSTR_MASK;
+
+ mode &= WM8766_DAC3_MSTR_MASK;
+ snd_wm8766_write(wm, WM8766_REG_DACCTRL3, val | mode);
+}
+
+void snd_wm8766_set_power(struct snd_wm8766 *wm, u16 power)
+{
+ u16 val = wm->regs[WM8766_REG_DACCTRL3] & ~WM8766_DAC3_POWER_MASK;
+
+ power &= WM8766_DAC3_POWER_MASK;
+ snd_wm8766_write(wm, WM8766_REG_DACCTRL3, val | power);
+}
+
+void snd_wm8766_volume_restore(struct snd_wm8766 *wm)
+{
+ u16 val = wm->regs[WM8766_REG_DACR1];
+ /* restore volume after MCLK stopped */
+ snd_wm8766_write(wm, WM8766_REG_DACR1, val | WM8766_VOL_UPDATE);
+}
+
+/* mixer callbacks */
+
+static int snd_wm8766_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = (wm->ctl[n].flags & WM8766_FLAG_STEREO) ? 2 : 1;
+ uinfo->value.integer.min = wm->ctl[n].min;
+ uinfo->value.integer.max = wm->ctl[n].max;
+
+ return 0;
+}
+
+static int snd_wm8766_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+
+ return snd_ctl_enum_info(uinfo, 1, wm->ctl[n].max,
+ wm->ctl[n].enum_names);
+}
+
+static int snd_wm8766_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+ u16 val1, val2;
+
+ if (wm->ctl[n].get)
+ wm->ctl[n].get(wm, &val1, &val2);
+ else {
+ val1 = wm->regs[wm->ctl[n].reg1] & wm->ctl[n].mask1;
+ val1 >>= __ffs(wm->ctl[n].mask1);
+ if (wm->ctl[n].flags & WM8766_FLAG_STEREO) {
+ val2 = wm->regs[wm->ctl[n].reg2] & wm->ctl[n].mask2;
+ val2 >>= __ffs(wm->ctl[n].mask2);
+ if (wm->ctl[n].flags & WM8766_FLAG_VOL_UPDATE)
+ val2 &= ~WM8766_VOL_UPDATE;
+ }
+ }
+ if (wm->ctl[n].flags & WM8766_FLAG_INVERT) {
+ val1 = wm->ctl[n].max - (val1 - wm->ctl[n].min);
+ val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min);
+ }
+ ucontrol->value.integer.value[0] = val1;
+ if (wm->ctl[n].flags & WM8766_FLAG_STEREO)
+ ucontrol->value.integer.value[1] = val2;
+
+ return 0;
+}
+
+static int snd_wm8766_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_wm8766 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+ u16 val, regval1, regval2;
+
+ /* this also works for enum because value is an union */
+ regval1 = ucontrol->value.integer.value[0];
+ regval2 = ucontrol->value.integer.value[1];
+ if (wm->ctl[n].flags & WM8766_FLAG_INVERT) {
+ regval1 = wm->ctl[n].max - (regval1 - wm->ctl[n].min);
+ regval2 = wm->ctl[n].max - (regval2 - wm->ctl[n].min);
+ }
+ if (wm->ctl[n].set)
+ wm->ctl[n].set(wm, regval1, regval2);
+ else {
+ val = wm->regs[wm->ctl[n].reg1] & ~wm->ctl[n].mask1;
+ val |= regval1 << __ffs(wm->ctl[n].mask1);
+ /* both stereo controls in one register */
+ if (wm->ctl[n].flags & WM8766_FLAG_STEREO &&
+ wm->ctl[n].reg1 == wm->ctl[n].reg2) {
+ val &= ~wm->ctl[n].mask2;
+ val |= regval2 << __ffs(wm->ctl[n].mask2);
+ }
+ snd_wm8766_write(wm, wm->ctl[n].reg1, val);
+ /* stereo controls in different registers */
+ if (wm->ctl[n].flags & WM8766_FLAG_STEREO &&
+ wm->ctl[n].reg1 != wm->ctl[n].reg2) {
+ val = wm->regs[wm->ctl[n].reg2] & ~wm->ctl[n].mask2;
+ val |= regval2 << __ffs(wm->ctl[n].mask2);
+ if (wm->ctl[n].flags & WM8766_FLAG_VOL_UPDATE)
+ val |= WM8766_VOL_UPDATE;
+ snd_wm8766_write(wm, wm->ctl[n].reg2, val);
+ }
+ }
+
+ return 0;
+}
+
+static int snd_wm8766_add_control(struct snd_wm8766 *wm, int num)
+{
+ struct snd_kcontrol_new cont;
+ struct snd_kcontrol *ctl;
+
+ memset(&cont, 0, sizeof(cont));
+ cont.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ cont.private_value = num;
+ cont.name = wm->ctl[num].name;
+ cont.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ if (wm->ctl[num].flags & WM8766_FLAG_LIM ||
+ wm->ctl[num].flags & WM8766_FLAG_ALC)
+ cont.access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ cont.tlv.p = NULL;
+ cont.get = snd_wm8766_ctl_get;
+ cont.put = snd_wm8766_ctl_put;
+
+ switch (wm->ctl[num].type) {
+ case SNDRV_CTL_ELEM_TYPE_INTEGER:
+ cont.info = snd_wm8766_volume_info;
+ cont.access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+ cont.tlv.p = wm->ctl[num].tlv;
+ break;
+ case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
+ wm->ctl[num].max = 1;
+ if (wm->ctl[num].flags & WM8766_FLAG_STEREO)
+ cont.info = snd_ctl_boolean_stereo_info;
+ else
+ cont.info = snd_ctl_boolean_mono_info;
+ break;
+ case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
+ cont.info = snd_wm8766_enum_info;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctl = snd_ctl_new1(&cont, wm);
+ if (!ctl)
+ return -ENOMEM;
+ wm->ctl[num].kctl = ctl;
+
+ return snd_ctl_add(wm->card, ctl);
+}
+
+int snd_wm8766_build_controls(struct snd_wm8766 *wm)
+{
+ int err, i;
+
+ for (i = 0; i < WM8766_CTL_COUNT; i++)
+ if (wm->ctl[i].name) {
+ err = snd_wm8766_add_control(wm, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/sound/pci/ice1712/wm8766.h b/sound/pci/ice1712/wm8766.h
new file mode 100644
index 00000000000..c119f84bd2c
--- /dev/null
+++ b/sound/pci/ice1712/wm8766.h
@@ -0,0 +1,163 @@
+#ifndef __SOUND_WM8766_H
+#define __SOUND_WM8766_H
+
+/*
+ * ALSA driver for ICEnsemble VT17xx
+ *
+ * Lowlevel functions for WM8766 codec
+ *
+ * Copyright (c) 2012 Ondrej Zary <linux@rainbow-software.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define WM8766_REG_DACL1 0x00
+#define WM8766_REG_DACR1 0x01
+#define WM8766_VOL_MASK 0x1ff /* incl. update bit */
+#define WM8766_VOL_UPDATE (1 << 8) /* update volume */
+#define WM8766_REG_DACCTRL1 0x02
+#define WM8766_DAC_MUTEALL (1 << 0)
+#define WM8766_DAC_DEEMPALL (1 << 1)
+#define WM8766_DAC_PDWN (1 << 2)
+#define WM8766_DAC_ATC (1 << 3)
+#define WM8766_DAC_IZD (1 << 4)
+#define WM8766_DAC_PL_MASK 0x1e0
+#define WM8766_DAC_PL_LL (1 << 5) /* L chan: L signal */
+#define WM8766_DAC_PL_LR (2 << 5) /* L chan: R signal */
+#define WM8766_DAC_PL_LB (3 << 5) /* L chan: both */
+#define WM8766_DAC_PL_RL (1 << 7) /* R chan: L signal */
+#define WM8766_DAC_PL_RR (2 << 7) /* R chan: R signal */
+#define WM8766_DAC_PL_RB (3 << 7) /* R chan: both */
+#define WM8766_REG_IFCTRL 0x03
+#define WM8766_IF_FMT_RIGHTJ (0 << 0)
+#define WM8766_IF_FMT_LEFTJ (1 << 0)
+#define WM8766_IF_FMT_I2S (2 << 0)
+#define WM8766_IF_FMT_DSP (3 << 0)
+#define WM8766_IF_DSP_LATE (1 << 2) /* in DSP mode */
+#define WM8766_IF_LRC_INVERTED (1 << 2) /* in other modes */
+#define WM8766_IF_BCLK_INVERTED (1 << 3)
+#define WM8766_IF_IWL_16BIT (0 << 4)
+#define WM8766_IF_IWL_20BIT (1 << 4)
+#define WM8766_IF_IWL_24BIT (2 << 4)
+#define WM8766_IF_IWL_32BIT (3 << 4)
+#define WM8766_IF_MASK 0x3f
+#define WM8766_PHASE_INVERT1 (1 << 6)
+#define WM8766_PHASE_INVERT2 (1 << 7)
+#define WM8766_PHASE_INVERT3 (1 << 8)
+#define WM8766_REG_DACL2 0x04
+#define WM8766_REG_DACR2 0x05
+#define WM8766_REG_DACL3 0x06
+#define WM8766_REG_DACR3 0x07
+#define WM8766_REG_MASTDA 0x08
+#define WM8766_REG_DACCTRL2 0x09
+#define WM8766_DAC2_ZCD (1 << 0)
+#define WM8766_DAC2_ZFLAG_ALL (0 << 1)
+#define WM8766_DAC2_ZFLAG_1 (1 << 1)
+#define WM8766_DAC2_ZFLAG_2 (2 << 1)
+#define WM8766_DAC2_ZFLAG_3 (3 << 1)
+#define WM8766_DAC2_MUTE1 (1 << 3)
+#define WM8766_DAC2_MUTE2 (1 << 4)
+#define WM8766_DAC2_MUTE3 (1 << 5)
+#define WM8766_DAC2_DEEMP1 (1 << 6)
+#define WM8766_DAC2_DEEMP2 (1 << 7)
+#define WM8766_DAC2_DEEMP3 (1 << 8)
+#define WM8766_REG_DACCTRL3 0x0a
+#define WM8766_DAC3_DACPD1 (1 << 1)
+#define WM8766_DAC3_DACPD2 (1 << 2)
+#define WM8766_DAC3_DACPD3 (1 << 3)
+#define WM8766_DAC3_PWRDNALL (1 << 4)
+#define WM8766_DAC3_POWER_MASK 0x1e
+#define WM8766_DAC3_MASTER (1 << 5)
+#define WM8766_DAC3_DAC128FS (0 << 6)
+#define WM8766_DAC3_DAC192FS (1 << 6)
+#define WM8766_DAC3_DAC256FS (2 << 6)
+#define WM8766_DAC3_DAC384FS (3 << 6)
+#define WM8766_DAC3_DAC512FS (4 << 6)
+#define WM8766_DAC3_DAC768FS (5 << 6)
+#define WM8766_DAC3_MSTR_MASK 0x1e0
+#define WM8766_REG_MUTE1 0x0c
+#define WM8766_MUTE1_MPD (1 << 6)
+#define WM8766_REG_MUTE2 0x0f
+#define WM8766_MUTE2_MPD (1 << 5)
+#define WM8766_REG_RESET 0x1f
+
+#define WM8766_REG_COUNT 0x10 /* don't cache the RESET register */
+
+struct snd_wm8766;
+
+struct snd_wm8766_ops {
+ void (*write)(struct snd_wm8766 *wm, u16 addr, u16 data);
+};
+
+enum snd_wm8766_ctl_id {
+ WM8766_CTL_CH1_VOL,
+ WM8766_CTL_CH2_VOL,
+ WM8766_CTL_CH3_VOL,
+ WM8766_CTL_CH1_SW,
+ WM8766_CTL_CH2_SW,
+ WM8766_CTL_CH3_SW,
+ WM8766_CTL_PHASE1_SW,
+ WM8766_CTL_PHASE2_SW,
+ WM8766_CTL_PHASE3_SW,
+ WM8766_CTL_DEEMPH1_SW,
+ WM8766_CTL_DEEMPH2_SW,
+ WM8766_CTL_DEEMPH3_SW,
+ WM8766_CTL_IZD_SW,
+ WM8766_CTL_ZC_SW,
+
+ WM8766_CTL_COUNT,
+};
+
+#define WM8766_ENUM_MAX 16
+
+#define WM8766_FLAG_STEREO (1 << 0)
+#define WM8766_FLAG_VOL_UPDATE (1 << 1)
+#define WM8766_FLAG_INVERT (1 << 2)
+#define WM8766_FLAG_LIM (1 << 3)
+#define WM8766_FLAG_ALC (1 << 4)
+
+struct snd_wm8766_ctl {
+ struct snd_kcontrol *kctl;
+ const char *name;
+ snd_ctl_elem_type_t type;
+ const char *const enum_names[WM8766_ENUM_MAX];
+ const unsigned int *tlv;
+ u16 reg1, reg2, mask1, mask2, min, max, flags;
+ void (*set)(struct snd_wm8766 *wm, u16 ch1, u16 ch2);
+ void (*get)(struct snd_wm8766 *wm, u16 *ch1, u16 *ch2);
+};
+
+enum snd_wm8766_agc_mode { WM8766_AGC_OFF, WM8766_AGC_LIM, WM8766_AGC_ALC };
+
+struct snd_wm8766 {
+ struct snd_card *card;
+ struct snd_wm8766_ctl ctl[WM8766_CTL_COUNT];
+ enum snd_wm8766_agc_mode agc_mode;
+ struct snd_wm8766_ops ops;
+ u16 regs[WM8766_REG_COUNT]; /* 9-bit registers */
+};
+
+
+
+void snd_wm8766_init(struct snd_wm8766 *wm);
+void snd_wm8766_resume(struct snd_wm8766 *wm);
+void snd_wm8766_set_if(struct snd_wm8766 *wm, u16 dac);
+void snd_wm8766_set_master_mode(struct snd_wm8766 *wm, u16 mode);
+void snd_wm8766_set_power(struct snd_wm8766 *wm, u16 power);
+void snd_wm8766_volume_restore(struct snd_wm8766 *wm);
+int snd_wm8766_build_controls(struct snd_wm8766 *wm);
+
+#endif /* __SOUND_WM8766_H */
diff --git a/sound/pci/ice1712/wm8776.c b/sound/pci/ice1712/wm8776.c
new file mode 100644
index 00000000000..a3c05fe5daf
--- /dev/null
+++ b/sound/pci/ice1712/wm8776.c
@@ -0,0 +1,633 @@
+/*
+ * ALSA driver for ICEnsemble VT17xx
+ *
+ * Lowlevel functions for WM8776 codec
+ *
+ * Copyright (c) 2012 Ondrej Zary <linux@rainbow-software.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/tlv.h>
+#include "wm8776.h"
+
+/* low-level access */
+
+static void snd_wm8776_write(struct snd_wm8776 *wm, u16 addr, u16 data)
+{
+ u8 bus_addr = addr << 1 | data >> 8; /* addr + 9th data bit */
+ u8 bus_data = data & 0xff; /* remaining 8 data bits */
+
+ if (addr < WM8776_REG_RESET)
+ wm->regs[addr] = data;
+ wm->ops.write(wm, bus_addr, bus_data);
+}
+
+/* register-level functions */
+
+static void snd_wm8776_activate_ctl(struct snd_wm8776 *wm,
+ const char *ctl_name,
+ bool active)
+{
+ struct snd_card *card = wm->card;
+ struct snd_kcontrol *kctl;
+ struct snd_kcontrol_volatile *vd;
+ struct snd_ctl_elem_id elem_id;
+ unsigned int index_offset;
+
+ memset(&elem_id, 0, sizeof(elem_id));
+ strncpy(elem_id.name, ctl_name, sizeof(elem_id.name));
+ elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ kctl = snd_ctl_find_id(card, &elem_id);
+ if (!kctl)
+ return;
+ index_offset = snd_ctl_get_ioff(kctl, &kctl->id);
+ vd = &kctl->vd[index_offset];
+ if (active)
+ vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ else
+ vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id);
+}
+
+static void snd_wm8776_update_agc_ctl(struct snd_wm8776 *wm)
+{
+ int i, flags_on = 0, flags_off = 0;
+
+ switch (wm->agc_mode) {
+ case WM8776_AGC_OFF:
+ flags_off = WM8776_FLAG_LIM | WM8776_FLAG_ALC;
+ break;
+ case WM8776_AGC_LIM:
+ flags_off = WM8776_FLAG_ALC;
+ flags_on = WM8776_FLAG_LIM;
+ break;
+ case WM8776_AGC_ALC_R:
+ case WM8776_AGC_ALC_L:
+ case WM8776_AGC_ALC_STEREO:
+ flags_off = WM8776_FLAG_LIM;
+ flags_on = WM8776_FLAG_ALC;
+ break;
+ }
+
+ for (i = 0; i < WM8776_CTL_COUNT; i++)
+ if (wm->ctl[i].flags & flags_off)
+ snd_wm8776_activate_ctl(wm, wm->ctl[i].name, false);
+ else if (wm->ctl[i].flags & flags_on)
+ snd_wm8776_activate_ctl(wm, wm->ctl[i].name, true);
+}
+
+static void snd_wm8776_set_agc(struct snd_wm8776 *wm, u16 agc, u16 nothing)
+{
+ u16 alc1 = wm->regs[WM8776_REG_ALCCTRL1] & ~WM8776_ALC1_LCT_MASK;
+ u16 alc2 = wm->regs[WM8776_REG_ALCCTRL2] & ~WM8776_ALC2_LCEN;
+
+ switch (agc) {
+ case 0: /* Off */
+ wm->agc_mode = WM8776_AGC_OFF;
+ break;
+ case 1: /* Limiter */
+ alc2 |= WM8776_ALC2_LCEN;
+ wm->agc_mode = WM8776_AGC_LIM;
+ break;
+ case 2: /* ALC Right */
+ alc1 |= WM8776_ALC1_LCSEL_ALCR;
+ alc2 |= WM8776_ALC2_LCEN;
+ wm->agc_mode = WM8776_AGC_ALC_R;
+ break;
+ case 3: /* ALC Left */
+ alc1 |= WM8776_ALC1_LCSEL_ALCL;
+ alc2 |= WM8776_ALC2_LCEN;
+ wm->agc_mode = WM8776_AGC_ALC_L;
+ break;
+ case 4: /* ALC Stereo */
+ alc1 |= WM8776_ALC1_LCSEL_ALCSTEREO;
+ alc2 |= WM8776_ALC2_LCEN;
+ wm->agc_mode = WM8776_AGC_ALC_STEREO;
+ break;
+ }
+ snd_wm8776_write(wm, WM8776_REG_ALCCTRL1, alc1);
+ snd_wm8776_write(wm, WM8776_REG_ALCCTRL2, alc2);
+ snd_wm8776_update_agc_ctl(wm);
+}
+
+static void snd_wm8776_get_agc(struct snd_wm8776 *wm, u16 *mode, u16 *nothing)
+{
+ *mode = wm->agc_mode;
+}
+
+/* mixer controls */
+
+static const DECLARE_TLV_DB_SCALE(wm8776_hp_tlv, -7400, 100, 1);
+static const DECLARE_TLV_DB_SCALE(wm8776_dac_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(wm8776_adc_tlv, -10350, 50, 1);
+static const DECLARE_TLV_DB_SCALE(wm8776_lct_tlv, -1600, 100, 0);
+static const DECLARE_TLV_DB_SCALE(wm8776_maxgain_tlv, 0, 400, 0);
+static const DECLARE_TLV_DB_SCALE(wm8776_ngth_tlv, -7800, 600, 0);
+static const DECLARE_TLV_DB_SCALE(wm8776_maxatten_lim_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_SCALE(wm8776_maxatten_alc_tlv, -2100, 400, 0);
+
+static struct snd_wm8776_ctl snd_wm8776_default_ctl[WM8776_CTL_COUNT] = {
+ [WM8776_CTL_DAC_VOL] = {
+ .name = "Master Playback Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_dac_tlv,
+ .reg1 = WM8776_REG_DACLVOL,
+ .reg2 = WM8776_REG_DACRVOL,
+ .mask1 = WM8776_DACVOL_MASK,
+ .mask2 = WM8776_DACVOL_MASK,
+ .max = 0xff,
+ .flags = WM8776_FLAG_STEREO | WM8776_FLAG_VOL_UPDATE,
+ },
+ [WM8776_CTL_DAC_SW] = {
+ .name = "Master Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_DACCTRL1,
+ .reg2 = WM8776_REG_DACCTRL1,
+ .mask1 = WM8776_DAC_PL_LL,
+ .mask2 = WM8776_DAC_PL_RR,
+ .flags = WM8776_FLAG_STEREO,
+ },
+ [WM8776_CTL_DAC_ZC_SW] = {
+ .name = "Master Zero Cross Detect Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_DACCTRL1,
+ .mask1 = WM8776_DAC_DZCEN,
+ },
+ [WM8776_CTL_HP_VOL] = {
+ .name = "Headphone Playback Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_hp_tlv,
+ .reg1 = WM8776_REG_HPLVOL,
+ .reg2 = WM8776_REG_HPRVOL,
+ .mask1 = WM8776_HPVOL_MASK,
+ .mask2 = WM8776_HPVOL_MASK,
+ .min = 0x2f,
+ .max = 0x7f,
+ .flags = WM8776_FLAG_STEREO | WM8776_FLAG_VOL_UPDATE,
+ },
+ [WM8776_CTL_HP_SW] = {
+ .name = "Headphone Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_PWRDOWN,
+ .mask1 = WM8776_PWR_HPPD,
+ .flags = WM8776_FLAG_INVERT,
+ },
+ [WM8776_CTL_HP_ZC_SW] = {
+ .name = "Headphone Zero Cross Detect Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_HPLVOL,
+ .reg2 = WM8776_REG_HPRVOL,
+ .mask1 = WM8776_VOL_HPZCEN,
+ .mask2 = WM8776_VOL_HPZCEN,
+ .flags = WM8776_FLAG_STEREO,
+ },
+ [WM8776_CTL_AUX_SW] = {
+ .name = "AUX Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_OUTMUX,
+ .mask1 = WM8776_OUTMUX_AUX,
+ },
+ [WM8776_CTL_BYPASS_SW] = {
+ .name = "Bypass Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_OUTMUX,
+ .mask1 = WM8776_OUTMUX_BYPASS,
+ },
+ [WM8776_CTL_DAC_IZD_SW] = {
+ .name = "Infinite Zero Detect Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_DACCTRL1,
+ .mask1 = WM8776_DAC_IZD,
+ },
+ [WM8776_CTL_PHASE_SW] = {
+ .name = "Phase Invert Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_PHASESWAP,
+ .reg2 = WM8776_REG_PHASESWAP,
+ .mask1 = WM8776_PHASE_INVERTL,
+ .mask2 = WM8776_PHASE_INVERTR,
+ .flags = WM8776_FLAG_STEREO,
+ },
+ [WM8776_CTL_DEEMPH_SW] = {
+ .name = "Deemphasis Playback Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_DACCTRL2,
+ .mask1 = WM8776_DAC2_DEEMPH,
+ },
+ [WM8776_CTL_ADC_VOL] = {
+ .name = "Input Capture Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_adc_tlv,
+ .reg1 = WM8776_REG_ADCLVOL,
+ .reg2 = WM8776_REG_ADCRVOL,
+ .mask1 = WM8776_ADC_GAIN_MASK,
+ .mask2 = WM8776_ADC_GAIN_MASK,
+ .max = 0xff,
+ .flags = WM8776_FLAG_STEREO | WM8776_FLAG_VOL_UPDATE,
+ },
+ [WM8776_CTL_ADC_SW] = {
+ .name = "Input Capture Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_ADCMUX,
+ .reg2 = WM8776_REG_ADCMUX,
+ .mask1 = WM8776_ADC_MUTEL,
+ .mask2 = WM8776_ADC_MUTER,
+ .flags = WM8776_FLAG_STEREO | WM8776_FLAG_INVERT,
+ },
+ [WM8776_CTL_INPUT1_SW] = {
+ .name = "AIN1 Capture Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_ADCMUX,
+ .mask1 = WM8776_ADC_MUX_AIN1,
+ },
+ [WM8776_CTL_INPUT2_SW] = {
+ .name = "AIN2 Capture Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_ADCMUX,
+ .mask1 = WM8776_ADC_MUX_AIN2,
+ },
+ [WM8776_CTL_INPUT3_SW] = {
+ .name = "AIN3 Capture Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_ADCMUX,
+ .mask1 = WM8776_ADC_MUX_AIN3,
+ },
+ [WM8776_CTL_INPUT4_SW] = {
+ .name = "AIN4 Capture Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_ADCMUX,
+ .mask1 = WM8776_ADC_MUX_AIN4,
+ },
+ [WM8776_CTL_INPUT5_SW] = {
+ .name = "AIN5 Capture Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_ADCMUX,
+ .mask1 = WM8776_ADC_MUX_AIN5,
+ },
+ [WM8776_CTL_AGC_SEL] = {
+ .name = "AGC Select Capture Enum",
+ .type = SNDRV_CTL_ELEM_TYPE_ENUMERATED,
+ .enum_names = { "Off", "Limiter", "ALC Right", "ALC Left",
+ "ALC Stereo" },
+ .max = 5, /* .enum_names item count */
+ .set = snd_wm8776_set_agc,
+ .get = snd_wm8776_get_agc,
+ },
+ [WM8776_CTL_LIM_THR] = {
+ .name = "Limiter Threshold Capture Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_lct_tlv,
+ .reg1 = WM8776_REG_ALCCTRL1,
+ .mask1 = WM8776_ALC1_LCT_MASK,
+ .max = 15,
+ .flags = WM8776_FLAG_LIM,
+ },
+ [WM8776_CTL_LIM_ATK] = {
+ .name = "Limiter Attack Time Capture Enum",
+ .type = SNDRV_CTL_ELEM_TYPE_ENUMERATED,
+ .enum_names = { "0.25 ms", "0.5 ms", "1 ms", "2 ms", "4 ms",
+ "8 ms", "16 ms", "32 ms", "64 ms", "128 ms", "256 ms" },
+ .max = 11, /* .enum_names item count */
+ .reg1 = WM8776_REG_ALCCTRL3,
+ .mask1 = WM8776_ALC3_ATK_MASK,
+ .flags = WM8776_FLAG_LIM,
+ },
+ [WM8776_CTL_LIM_DCY] = {
+ .name = "Limiter Decay Time Capture Enum",
+ .type = SNDRV_CTL_ELEM_TYPE_ENUMERATED,
+ .enum_names = { "1.2 ms", "2.4 ms", "4.8 ms", "9.6 ms",
+ "19.2 ms", "38.4 ms", "76.8 ms", "154 ms", "307 ms",
+ "614 ms", "1.23 s" },
+ .max = 11, /* .enum_names item count */
+ .reg1 = WM8776_REG_ALCCTRL3,
+ .mask1 = WM8776_ALC3_DCY_MASK,
+ .flags = WM8776_FLAG_LIM,
+ },
+ [WM8776_CTL_LIM_TRANWIN] = {
+ .name = "Limiter Transient Window Capture Enum",
+ .type = SNDRV_CTL_ELEM_TYPE_ENUMERATED,
+ .enum_names = { "0 us", "62.5 us", "125 us", "250 us", "500 us",
+ "1 ms", "2 ms", "4 ms" },
+ .max = 8, /* .enum_names item count */
+ .reg1 = WM8776_REG_LIMITER,
+ .mask1 = WM8776_LIM_TRANWIN_MASK,
+ .flags = WM8776_FLAG_LIM,
+ },
+ [WM8776_CTL_LIM_MAXATTN] = {
+ .name = "Limiter Maximum Attenuation Capture Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_maxatten_lim_tlv,
+ .reg1 = WM8776_REG_LIMITER,
+ .mask1 = WM8776_LIM_MAXATTEN_MASK,
+ .min = 3,
+ .max = 12,
+ .flags = WM8776_FLAG_LIM | WM8776_FLAG_INVERT,
+ },
+ [WM8776_CTL_ALC_TGT] = {
+ .name = "ALC Target Level Capture Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_lct_tlv,
+ .reg1 = WM8776_REG_ALCCTRL1,
+ .mask1 = WM8776_ALC1_LCT_MASK,
+ .max = 15,
+ .flags = WM8776_FLAG_ALC,
+ },
+ [WM8776_CTL_ALC_ATK] = {
+ .name = "ALC Attack Time Capture Enum",
+ .type = SNDRV_CTL_ELEM_TYPE_ENUMERATED,
+ .enum_names = { "8.40 ms", "16.8 ms", "33.6 ms", "67.2 ms",
+ "134 ms", "269 ms", "538 ms", "1.08 s", "2.15 s",
+ "4.3 s", "8.6 s" },
+ .max = 11, /* .enum_names item count */
+ .reg1 = WM8776_REG_ALCCTRL3,
+ .mask1 = WM8776_ALC3_ATK_MASK,
+ .flags = WM8776_FLAG_ALC,
+ },
+ [WM8776_CTL_ALC_DCY] = {
+ .name = "ALC Decay Time Capture Enum",
+ .type = SNDRV_CTL_ELEM_TYPE_ENUMERATED,
+ .enum_names = { "33.5 ms", "67.0 ms", "134 ms", "268 ms",
+ "536 ms", "1.07 s", "2.14 s", "4.29 s", "8.58 s",
+ "17.2 s", "34.3 s" },
+ .max = 11, /* .enum_names item count */
+ .reg1 = WM8776_REG_ALCCTRL3,
+ .mask1 = WM8776_ALC3_DCY_MASK,
+ .flags = WM8776_FLAG_ALC,
+ },
+ [WM8776_CTL_ALC_MAXGAIN] = {
+ .name = "ALC Maximum Gain Capture Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_maxgain_tlv,
+ .reg1 = WM8776_REG_ALCCTRL1,
+ .mask1 = WM8776_ALC1_MAXGAIN_MASK,
+ .min = 1,
+ .max = 7,
+ .flags = WM8776_FLAG_ALC,
+ },
+ [WM8776_CTL_ALC_MAXATTN] = {
+ .name = "ALC Maximum Attenuation Capture Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_maxatten_alc_tlv,
+ .reg1 = WM8776_REG_LIMITER,
+ .mask1 = WM8776_LIM_MAXATTEN_MASK,
+ .min = 10,
+ .max = 15,
+ .flags = WM8776_FLAG_ALC | WM8776_FLAG_INVERT,
+ },
+ [WM8776_CTL_ALC_HLD] = {
+ .name = "ALC Hold Time Capture Enum",
+ .type = SNDRV_CTL_ELEM_TYPE_ENUMERATED,
+ .enum_names = { "0 ms", "2.67 ms", "5.33 ms", "10.6 ms",
+ "21.3 ms", "42.7 ms", "85.3 ms", "171 ms", "341 ms",
+ "683 ms", "1.37 s", "2.73 s", "5.46 s", "10.9 s",
+ "21.8 s", "43.7 s" },
+ .max = 16, /* .enum_names item count */
+ .reg1 = WM8776_REG_ALCCTRL2,
+ .mask1 = WM8776_ALC2_HOLD_MASK,
+ .flags = WM8776_FLAG_ALC,
+ },
+ [WM8776_CTL_NGT_SW] = {
+ .name = "Noise Gate Capture Switch",
+ .type = SNDRV_CTL_ELEM_TYPE_BOOLEAN,
+ .reg1 = WM8776_REG_NOISEGATE,
+ .mask1 = WM8776_NGAT_ENABLE,
+ .flags = WM8776_FLAG_ALC,
+ },
+ [WM8776_CTL_NGT_THR] = {
+ .name = "Noise Gate Threshold Capture Volume",
+ .type = SNDRV_CTL_ELEM_TYPE_INTEGER,
+ .tlv = wm8776_ngth_tlv,
+ .reg1 = WM8776_REG_NOISEGATE,
+ .mask1 = WM8776_NGAT_THR_MASK,
+ .max = 7,
+ .flags = WM8776_FLAG_ALC,
+ },
+};
+
+/* exported functions */
+
+void snd_wm8776_init(struct snd_wm8776 *wm)
+{
+ int i;
+ static const u16 default_values[] = {
+ 0x000, 0x100, 0x000,
+ 0x000, 0x100, 0x000,
+ 0x000, 0x090, 0x000, 0x000,
+ 0x022, 0x022, 0x022,
+ 0x008, 0x0cf, 0x0cf, 0x07b, 0x000,
+ 0x032, 0x000, 0x0a6, 0x001, 0x001
+ };
+
+ memcpy(wm->ctl, snd_wm8776_default_ctl, sizeof(wm->ctl));
+
+ snd_wm8776_write(wm, WM8776_REG_RESET, 0x00); /* reset */
+ udelay(10);
+ /* load defaults */
+ for (i = 0; i < ARRAY_SIZE(default_values); i++)
+ snd_wm8776_write(wm, i, default_values[i]);
+}
+
+void snd_wm8776_resume(struct snd_wm8776 *wm)
+{
+ int i;
+
+ for (i = 0; i < WM8776_REG_COUNT; i++)
+ snd_wm8776_write(wm, i, wm->regs[i]);
+}
+
+void snd_wm8776_set_dac_if(struct snd_wm8776 *wm, u16 dac)
+{
+ snd_wm8776_write(wm, WM8776_REG_DACIFCTRL, dac);
+}
+
+void snd_wm8776_set_adc_if(struct snd_wm8776 *wm, u16 adc)
+{
+ snd_wm8776_write(wm, WM8776_REG_ADCIFCTRL, adc);
+}
+
+void snd_wm8776_set_master_mode(struct snd_wm8776 *wm, u16 mode)
+{
+ snd_wm8776_write(wm, WM8776_REG_MSTRCTRL, mode);
+}
+
+void snd_wm8776_set_power(struct snd_wm8776 *wm, u16 power)
+{
+ snd_wm8776_write(wm, WM8776_REG_PWRDOWN, power);
+}
+
+void snd_wm8776_volume_restore(struct snd_wm8776 *wm)
+{
+ u16 val = wm->regs[WM8776_REG_DACRVOL];
+ /* restore volume after MCLK stopped */
+ snd_wm8776_write(wm, WM8776_REG_DACRVOL, val | WM8776_VOL_UPDATE);
+}
+
+/* mixer callbacks */
+
+static int snd_wm8776_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct snd_wm8776 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = (wm->ctl[n].flags & WM8776_FLAG_STEREO) ? 2 : 1;
+ uinfo->value.integer.min = wm->ctl[n].min;
+ uinfo->value.integer.max = wm->ctl[n].max;
+
+ return 0;
+}
+
+static int snd_wm8776_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct snd_wm8776 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+
+ return snd_ctl_enum_info(uinfo, 1, wm->ctl[n].max,
+ wm->ctl[n].enum_names);
+}
+
+static int snd_wm8776_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_wm8776 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+ u16 val1, val2;
+
+ if (wm->ctl[n].get)
+ wm->ctl[n].get(wm, &val1, &val2);
+ else {
+ val1 = wm->regs[wm->ctl[n].reg1] & wm->ctl[n].mask1;
+ val1 >>= __ffs(wm->ctl[n].mask1);
+ if (wm->ctl[n].flags & WM8776_FLAG_STEREO) {
+ val2 = wm->regs[wm->ctl[n].reg2] & wm->ctl[n].mask2;
+ val2 >>= __ffs(wm->ctl[n].mask2);
+ if (wm->ctl[n].flags & WM8776_FLAG_VOL_UPDATE)
+ val2 &= ~WM8776_VOL_UPDATE;
+ }
+ }
+ if (wm->ctl[n].flags & WM8776_FLAG_INVERT) {
+ val1 = wm->ctl[n].max - (val1 - wm->ctl[n].min);
+ val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min);
+ }
+ ucontrol->value.integer.value[0] = val1;
+ if (wm->ctl[n].flags & WM8776_FLAG_STEREO)
+ ucontrol->value.integer.value[1] = val2;
+
+ return 0;
+}
+
+static int snd_wm8776_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_wm8776 *wm = snd_kcontrol_chip(kcontrol);
+ int n = kcontrol->private_value;
+ u16 val, regval1, regval2;
+
+ /* this also works for enum because value is an union */
+ regval1 = ucontrol->value.integer.value[0];
+ regval2 = ucontrol->value.integer.value[1];
+ if (wm->ctl[n].flags & WM8776_FLAG_INVERT) {
+ regval1 = wm->ctl[n].max - (regval1 - wm->ctl[n].min);
+ regval2 = wm->ctl[n].max - (regval2 - wm->ctl[n].min);
+ }
+ if (wm->ctl[n].set)
+ wm->ctl[n].set(wm, regval1, regval2);
+ else {
+ val = wm->regs[wm->ctl[n].reg1] & ~wm->ctl[n].mask1;
+ val |= regval1 << __ffs(wm->ctl[n].mask1);
+ /* both stereo controls in one register */
+ if (wm->ctl[n].flags & WM8776_FLAG_STEREO &&
+ wm->ctl[n].reg1 == wm->ctl[n].reg2) {
+ val &= ~wm->ctl[n].mask2;
+ val |= regval2 << __ffs(wm->ctl[n].mask2);
+ }
+ snd_wm8776_write(wm, wm->ctl[n].reg1, val);
+ /* stereo controls in different registers */
+ if (wm->ctl[n].flags & WM8776_FLAG_STEREO &&
+ wm->ctl[n].reg1 != wm->ctl[n].reg2) {
+ val = wm->regs[wm->ctl[n].reg2] & ~wm->ctl[n].mask2;
+ val |= regval2 << __ffs(wm->ctl[n].mask2);
+ if (wm->ctl[n].flags & WM8776_FLAG_VOL_UPDATE)
+ val |= WM8776_VOL_UPDATE;
+ snd_wm8776_write(wm, wm->ctl[n].reg2, val);
+ }
+ }
+
+ return 0;
+}
+
+static int snd_wm8776_add_control(struct snd_wm8776 *wm, int num)
+{
+ struct snd_kcontrol_new cont;
+ struct snd_kcontrol *ctl;
+
+ memset(&cont, 0, sizeof(cont));
+ cont.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ cont.private_value = num;
+ cont.name = wm->ctl[num].name;
+ cont.access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ if (wm->ctl[num].flags & WM8776_FLAG_LIM ||
+ wm->ctl[num].flags & WM8776_FLAG_ALC)
+ cont.access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ cont.tlv.p = NULL;
+ cont.get = snd_wm8776_ctl_get;
+ cont.put = snd_wm8776_ctl_put;
+
+ switch (wm->ctl[num].type) {
+ case SNDRV_CTL_ELEM_TYPE_INTEGER:
+ cont.info = snd_wm8776_volume_info;
+ cont.access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+ cont.tlv.p = wm->ctl[num].tlv;
+ break;
+ case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
+ wm->ctl[num].max = 1;
+ if (wm->ctl[num].flags & WM8776_FLAG_STEREO)
+ cont.info = snd_ctl_boolean_stereo_info;
+ else
+ cont.info = snd_ctl_boolean_mono_info;
+ break;
+ case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
+ cont.info = snd_wm8776_enum_info;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctl = snd_ctl_new1(&cont, wm);
+ if (!ctl)
+ return -ENOMEM;
+
+ return snd_ctl_add(wm->card, ctl);
+}
+
+int snd_wm8776_build_controls(struct snd_wm8776 *wm)
+{
+ int err, i;
+
+ for (i = 0; i < WM8776_CTL_COUNT; i++)
+ if (wm->ctl[i].name) {
+ err = snd_wm8776_add_control(wm, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/sound/pci/ice1712/wm8776.h b/sound/pci/ice1712/wm8776.h
new file mode 100644
index 00000000000..93a2d697115
--- /dev/null
+++ b/sound/pci/ice1712/wm8776.h
@@ -0,0 +1,226 @@
+#ifndef __SOUND_WM8776_H
+#define __SOUND_WM8776_H
+
+/*
+ * ALSA driver for ICEnsemble VT17xx
+ *
+ * Lowlevel functions for WM8776 codec
+ *
+ * Copyright (c) 2012 Ondrej Zary <linux@rainbow-software.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define WM8776_REG_HPLVOL 0x00
+#define WM8776_REG_HPRVOL 0x01
+#define WM8776_REG_HPMASTER 0x02
+#define WM8776_HPVOL_MASK 0x17f /* incl. update bit */
+#define WM8776_VOL_HPZCEN (1 << 7) /* zero cross detect */
+#define WM8776_VOL_UPDATE (1 << 8) /* update volume */
+#define WM8776_REG_DACLVOL 0x03
+#define WM8776_REG_DACRVOL 0x04
+#define WM8776_REG_DACMASTER 0x05
+#define WM8776_DACVOL_MASK 0x1ff /* incl. update bit */
+#define WM8776_REG_PHASESWAP 0x06
+#define WM8776_PHASE_INVERTL (1 << 0)
+#define WM8776_PHASE_INVERTR (1 << 1)
+#define WM8776_REG_DACCTRL1 0x07
+#define WM8776_DAC_DZCEN (1 << 0)
+#define WM8776_DAC_ATC (1 << 1)
+#define WM8776_DAC_IZD (1 << 2)
+#define WM8776_DAC_TOD (1 << 3)
+#define WM8776_DAC_PL_MASK 0xf0
+#define WM8776_DAC_PL_LL (1 << 4) /* L chan: L signal */
+#define WM8776_DAC_PL_LR (2 << 4) /* L chan: R signal */
+#define WM8776_DAC_PL_LB (3 << 4) /* L chan: both */
+#define WM8776_DAC_PL_RL (1 << 6) /* R chan: L signal */
+#define WM8776_DAC_PL_RR (2 << 6) /* R chan: R signal */
+#define WM8776_DAC_PL_RB (3 << 6) /* R chan: both */
+#define WM8776_REG_DACMUTE 0x08
+#define WM8776_DACMUTE (1 << 0)
+#define WM8776_REG_DACCTRL2 0x09
+#define WM8776_DAC2_DEEMPH (1 << 0)
+#define WM8776_DAC2_ZFLAG_DISABLE (0 << 1)
+#define WM8776_DAC2_ZFLAG_OWN (1 << 1)
+#define WM8776_DAC2_ZFLAG_BOTH (2 << 1)
+#define WM8776_DAC2_ZFLAG_EITHER (3 << 1)
+#define WM8776_REG_DACIFCTRL 0x0a
+#define WM8776_FMT_RIGHTJ (0 << 0)
+#define WM8776_FMT_LEFTJ (1 << 0)
+#define WM8776_FMT_I2S (2 << 0)
+#define WM8776_FMT_DSP (3 << 0)
+#define WM8776_FMT_DSP_LATE (1 << 2) /* in DSP mode */
+#define WM8776_FMT_LRC_INVERTED (1 << 2) /* in other modes */
+#define WM8776_FMT_BCLK_INVERTED (1 << 3)
+#define WM8776_FMT_16BIT (0 << 4)
+#define WM8776_FMT_20BIT (1 << 4)
+#define WM8776_FMT_24BIT (2 << 4)
+#define WM8776_FMT_32BIT (3 << 4)
+#define WM8776_REG_ADCIFCTRL 0x0b
+#define WM8776_FMT_ADCMCLK_INVERTED (1 << 6)
+#define WM8776_FMT_ADCHPD (1 << 8)
+#define WM8776_REG_MSTRCTRL 0x0c
+#define WM8776_IF_ADC256FS (2 << 0)
+#define WM8776_IF_ADC384FS (3 << 0)
+#define WM8776_IF_ADC512FS (4 << 0)
+#define WM8776_IF_ADC768FS (5 << 0)
+#define WM8776_IF_OVERSAMP64 (1 << 3)
+#define WM8776_IF_DAC128FS (0 << 4)
+#define WM8776_IF_DAC192FS (1 << 4)
+#define WM8776_IF_DAC256FS (2 << 4)
+#define WM8776_IF_DAC384FS (3 << 4)
+#define WM8776_IF_DAC512FS (4 << 4)
+#define WM8776_IF_DAC768FS (5 << 4)
+#define WM8776_IF_DAC_MASTER (1 << 7)
+#define WM8776_IF_ADC_MASTER (1 << 8)
+#define WM8776_REG_PWRDOWN 0x0d
+#define WM8776_PWR_PDWN (1 << 0)
+#define WM8776_PWR_ADCPD (1 << 1)
+#define WM8776_PWR_DACPD (1 << 2)
+#define WM8776_PWR_HPPD (1 << 3)
+#define WM8776_PWR_AINPD (1 << 6)
+#define WM8776_REG_ADCLVOL 0x0e
+#define WM8776_REG_ADCRVOL 0x0f
+#define WM8776_ADC_GAIN_MASK 0xff
+#define WM8776_ADC_ZCEN (1 << 8)
+#define WM8776_REG_ALCCTRL1 0x10
+#define WM8776_ALC1_LCT_MASK 0x0f /* 0=-16dB, 1=-15dB..15=-1dB */
+#define WM8776_ALC1_MAXGAIN_MASK 0x70 /* 0,1=0dB, 2=+4dB...7=+24dB */
+#define WM8776_ALC1_LCSEL_MASK 0x180
+#define WM8776_ALC1_LCSEL_LIMITER (0 << 7)
+#define WM8776_ALC1_LCSEL_ALCR (1 << 7)
+#define WM8776_ALC1_LCSEL_ALCL (2 << 7)
+#define WM8776_ALC1_LCSEL_ALCSTEREO (3 << 7)
+#define WM8776_REG_ALCCTRL2 0x11
+#define WM8776_ALC2_HOLD_MASK 0x0f /*0=0ms, 1=2.67ms, 2=5.33ms.. */
+#define WM8776_ALC2_ZCEN (1 << 7)
+#define WM8776_ALC2_LCEN (1 << 8)
+#define WM8776_REG_ALCCTRL3 0x12
+#define WM8776_ALC3_ATK_MASK 0x0f
+#define WM8776_ALC3_DCY_MASK 0xf0
+#define WM8776_ALC3_FDECAY (1 << 8)
+#define WM8776_REG_NOISEGATE 0x13
+#define WM8776_NGAT_ENABLE (1 << 0)
+#define WM8776_NGAT_THR_MASK 0x1c /*0=-78dB, 1=-72dB...7=-36dB */
+#define WM8776_REG_LIMITER 0x14
+#define WM8776_LIM_MAXATTEN_MASK 0x0f
+#define WM8776_LIM_TRANWIN_MASK 0x70 /*0=0us, 1=62.5us, 2=125us.. */
+#define WM8776_REG_ADCMUX 0x15
+#define WM8776_ADC_MUX_AIN1 (1 << 0)
+#define WM8776_ADC_MUX_AIN2 (1 << 1)
+#define WM8776_ADC_MUX_AIN3 (1 << 2)
+#define WM8776_ADC_MUX_AIN4 (1 << 3)
+#define WM8776_ADC_MUX_AIN5 (1 << 4)
+#define WM8776_ADC_MUTER (1 << 6)
+#define WM8776_ADC_MUTEL (1 << 7)
+#define WM8776_ADC_LRBOTH (1 << 8)
+#define WM8776_REG_OUTMUX 0x16
+#define WM8776_OUTMUX_DAC (1 << 0)
+#define WM8776_OUTMUX_AUX (1 << 1)
+#define WM8776_OUTMUX_BYPASS (1 << 2)
+#define WM8776_REG_RESET 0x17
+
+#define WM8776_REG_COUNT 0x17 /* don't cache the RESET register */
+
+struct snd_wm8776;
+
+struct snd_wm8776_ops {
+ void (*write)(struct snd_wm8776 *wm, u8 addr, u8 data);
+};
+
+enum snd_wm8776_ctl_id {
+ WM8776_CTL_DAC_VOL,
+ WM8776_CTL_DAC_SW,
+ WM8776_CTL_DAC_ZC_SW,
+ WM8776_CTL_HP_VOL,
+ WM8776_CTL_HP_SW,
+ WM8776_CTL_HP_ZC_SW,
+ WM8776_CTL_AUX_SW,
+ WM8776_CTL_BYPASS_SW,
+ WM8776_CTL_DAC_IZD_SW,
+ WM8776_CTL_PHASE_SW,
+ WM8776_CTL_DEEMPH_SW,
+ WM8776_CTL_ADC_VOL,
+ WM8776_CTL_ADC_SW,
+ WM8776_CTL_INPUT1_SW,
+ WM8776_CTL_INPUT2_SW,
+ WM8776_CTL_INPUT3_SW,
+ WM8776_CTL_INPUT4_SW,
+ WM8776_CTL_INPUT5_SW,
+ WM8776_CTL_AGC_SEL,
+ WM8776_CTL_LIM_THR,
+ WM8776_CTL_LIM_ATK,
+ WM8776_CTL_LIM_DCY,
+ WM8776_CTL_LIM_TRANWIN,
+ WM8776_CTL_LIM_MAXATTN,
+ WM8776_CTL_ALC_TGT,
+ WM8776_CTL_ALC_ATK,
+ WM8776_CTL_ALC_DCY,
+ WM8776_CTL_ALC_MAXGAIN,
+ WM8776_CTL_ALC_MAXATTN,
+ WM8776_CTL_ALC_HLD,
+ WM8776_CTL_NGT_SW,
+ WM8776_CTL_NGT_THR,
+
+ WM8776_CTL_COUNT,
+};
+
+#define WM8776_ENUM_MAX 16
+
+#define WM8776_FLAG_STEREO (1 << 0)
+#define WM8776_FLAG_VOL_UPDATE (1 << 1)
+#define WM8776_FLAG_INVERT (1 << 2)
+#define WM8776_FLAG_LIM (1 << 3)
+#define WM8776_FLAG_ALC (1 << 4)
+
+struct snd_wm8776_ctl {
+ const char *name;
+ snd_ctl_elem_type_t type;
+ const char *const enum_names[WM8776_ENUM_MAX];
+ const unsigned int *tlv;
+ u16 reg1, reg2, mask1, mask2, min, max, flags;
+ void (*set)(struct snd_wm8776 *wm, u16 ch1, u16 ch2);
+ void (*get)(struct snd_wm8776 *wm, u16 *ch1, u16 *ch2);
+};
+
+enum snd_wm8776_agc_mode {
+ WM8776_AGC_OFF,
+ WM8776_AGC_LIM,
+ WM8776_AGC_ALC_R,
+ WM8776_AGC_ALC_L,
+ WM8776_AGC_ALC_STEREO
+};
+
+struct snd_wm8776 {
+ struct snd_card *card;
+ struct snd_wm8776_ctl ctl[WM8776_CTL_COUNT];
+ enum snd_wm8776_agc_mode agc_mode;
+ struct snd_wm8776_ops ops;
+ u16 regs[WM8776_REG_COUNT]; /* 9-bit registers */
+};
+
+
+
+void snd_wm8776_init(struct snd_wm8776 *wm);
+void snd_wm8776_resume(struct snd_wm8776 *wm);
+void snd_wm8776_set_dac_if(struct snd_wm8776 *wm, u16 dac);
+void snd_wm8776_set_adc_if(struct snd_wm8776 *wm, u16 adc);
+void snd_wm8776_set_master_mode(struct snd_wm8776 *wm, u16 mode);
+void snd_wm8776_set_power(struct snd_wm8776 *wm, u16 power);
+void snd_wm8776_volume_restore(struct snd_wm8776 *wm);
+int snd_wm8776_build_controls(struct snd_wm8776 *wm);
+
+#endif /* __SOUND_WM8776_H */
diff --git a/sound/pci/ice1712/wtm.c b/sound/pci/ice1712/wtm.c
index e618f789026..bcf30a387b8 100644
--- a/sound/pci/ice1712/wtm.c
+++ b/sound/pci/ice1712/wtm.c
@@ -25,7 +25,6 @@
-#include <linux/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -384,7 +383,7 @@ static int stac9460_mic_sw_put(struct snd_kcontrol *kcontrol,
/*
* Control tabs
*/
-static struct snd_kcontrol_new stac9640_controls[] __devinitdata = {
+static struct snd_kcontrol_new stac9640_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
@@ -448,7 +447,7 @@ static struct snd_kcontrol_new stac9640_controls[] __devinitdata = {
/*INIT*/
-static int __devinit wtm_add_controls(struct snd_ice1712 *ice)
+static int wtm_add_controls(struct snd_ice1712 *ice)
{
unsigned int i;
int err;
@@ -462,7 +461,7 @@ static int __devinit wtm_add_controls(struct snd_ice1712 *ice)
return 0;
}
-static int __devinit wtm_init(struct snd_ice1712 *ice)
+static int wtm_init(struct snd_ice1712 *ice)
{
static unsigned short stac_inits_prodigy[] = {
STAC946X_RESET, 0,
@@ -485,7 +484,7 @@ static int __devinit wtm_init(struct snd_ice1712 *ice)
}
-static unsigned char wtm_eeprom[] __devinitdata = {
+static unsigned char wtm_eeprom[] = {
0x47, /*SYSCONF: clock 192KHz, 4ADC, 8DAC */
0x80, /* ACLINK : I2S */
0xf8, /* I2S: vol; 96k, 24bit, 192k */
@@ -503,7 +502,7 @@ static unsigned char wtm_eeprom[] __devinitdata = {
/*entry point*/
-struct snd_ice1712_card_info snd_vt1724_wtm_cards[] __devinitdata = {
+struct snd_ice1712_card_info snd_vt1724_wtm_cards[] = {
{
.subvendor = VT1724_SUBDEVICE_WTM,
.name = "ESI Waveterminal 192M",
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index ea4b706c8d6..3b9be752f3e 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -592,8 +592,8 @@ static unsigned short snd_intel8x0_codec_read(struct snd_ac97 *ac97,
return res;
}
-static void __devinit snd_intel8x0_codec_read_test(struct intel8x0 *chip,
- unsigned int codec)
+static void snd_intel8x0_codec_read_test(struct intel8x0 *chip,
+ unsigned int codec)
{
unsigned int tmp;
@@ -1507,8 +1507,8 @@ struct ich_pcm_table {
int ac97_idx;
};
-static int __devinit snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
- struct ich_pcm_table *rec)
+static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
+ struct ich_pcm_table *rec)
{
struct snd_pcm *pcm;
int err;
@@ -1564,7 +1564,7 @@ static int __devinit snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
return 0;
}
-static struct ich_pcm_table intel_pcms[] __devinitdata = {
+static struct ich_pcm_table intel_pcms[] = {
{
.playback_ops = &snd_intel8x0_playback_ops,
.capture_ops = &snd_intel8x0_capture_ops,
@@ -1601,7 +1601,7 @@ static struct ich_pcm_table intel_pcms[] __devinitdata = {
},
};
-static struct ich_pcm_table nforce_pcms[] __devinitdata = {
+static struct ich_pcm_table nforce_pcms[] = {
{
.playback_ops = &snd_intel8x0_playback_ops,
.capture_ops = &snd_intel8x0_capture_ops,
@@ -1624,7 +1624,7 @@ static struct ich_pcm_table nforce_pcms[] __devinitdata = {
},
};
-static struct ich_pcm_table ali_pcms[] __devinitdata = {
+static struct ich_pcm_table ali_pcms[] = {
{
.playback_ops = &snd_intel8x0_ali_playback_ops,
.capture_ops = &snd_intel8x0_ali_capture_ops,
@@ -1656,7 +1656,7 @@ static struct ich_pcm_table ali_pcms[] __devinitdata = {
#endif
};
-static int __devinit snd_intel8x0_pcm(struct intel8x0 *chip)
+static int snd_intel8x0_pcm(struct intel8x0 *chip)
{
int i, tblsize, device, err;
struct ich_pcm_table *tbl, *rec;
@@ -1719,7 +1719,7 @@ static void snd_intel8x0_mixer_free_ac97(struct snd_ac97 *ac97)
chip->ac97[ac97->num] = NULL;
}
-static struct ac97_pcm ac97_pcm_defs[] __devinitdata = {
+static struct ac97_pcm ac97_pcm_defs[] = {
/* front PCM */
{
.exclusive = 1,
@@ -1789,7 +1789,7 @@ static struct ac97_pcm ac97_pcm_defs[] __devinitdata = {
},
};
-static struct ac97_quirk ac97_quirks[] __devinitdata = {
+static struct ac97_quirk ac97_quirks[] = {
{
.subvendor = 0x0e11,
.subdevice = 0x000e,
@@ -2196,8 +2196,8 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
{ } /* terminator */
};
-static int __devinit snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
- const char *quirk_override)
+static int snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
+ const char *quirk_override)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
@@ -2765,7 +2765,7 @@ static SIMPLE_DEV_PM_OPS(intel8x0_pm, intel8x0_suspend, intel8x0_resume);
#define INTEL8X0_TESTBUF_SIZE 32768 /* enough large for one shot */
-static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
+static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
{
struct snd_pcm_substream *subs;
struct ichdev *ichdev;
@@ -2883,7 +2883,7 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
snd_ac97_update_power(chip->ac97[0], AC97_PCM_FRONT_DAC_RATE, 0);
}
-static struct snd_pci_quirk intel8x0_clock_list[] __devinitdata = {
+static struct snd_pci_quirk intel8x0_clock_list[] = {
SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
@@ -2892,7 +2892,7 @@ static struct snd_pci_quirk intel8x0_clock_list[] __devinitdata = {
{ } /* terminator */
};
-static int __devinit intel8x0_in_clock_list(struct intel8x0 *chip)
+static int intel8x0_in_clock_list(struct intel8x0 *chip)
{
struct pci_dev *pci = chip->pci;
const struct snd_pci_quirk *wl;
@@ -2941,7 +2941,7 @@ static void snd_intel8x0_proc_read(struct snd_info_entry * entry,
chip->ac97_sdin[2]);
}
-static void __devinit snd_intel8x0_proc_init(struct intel8x0 * chip)
+static void snd_intel8x0_proc_init(struct intel8x0 *chip)
{
struct snd_info_entry *entry;
@@ -2970,7 +2970,7 @@ static unsigned int sis_codec_bits[3] = {
ICH_PCR, ICH_SCR, ICH_SIS_TCR
};
-static int __devinit snd_intel8x0_inside_vm(struct pci_dev *pci)
+static int snd_intel8x0_inside_vm(struct pci_dev *pci)
{
int result = inside_vm;
char *msg = NULL;
@@ -3009,10 +3009,10 @@ fini:
return result;
}
-static int __devinit snd_intel8x0_create(struct snd_card *card,
- struct pci_dev *pci,
- unsigned long device_type,
- struct intel8x0 ** r_intel8x0)
+static int snd_intel8x0_create(struct snd_card *card,
+ struct pci_dev *pci,
+ unsigned long device_type,
+ struct intel8x0 **r_intel8x0)
{
struct intel8x0 *chip;
int err;
@@ -3227,7 +3227,7 @@ static int __devinit snd_intel8x0_create(struct snd_card *card,
static struct shortname_table {
unsigned int id;
const char *s;
-} shortnames[] __devinitdata = {
+} shortnames[] = {
{ PCI_DEVICE_ID_INTEL_82801AA_5, "Intel 82801AA-ICH" },
{ PCI_DEVICE_ID_INTEL_82801AB_5, "Intel 82901AB-ICH0" },
{ PCI_DEVICE_ID_INTEL_82801BA_4, "Intel 82801BA-ICH2" },
@@ -3253,13 +3253,13 @@ static struct shortname_table {
{ 0, NULL },
};
-static struct snd_pci_quirk spdif_aclink_defaults[] __devinitdata = {
+static struct snd_pci_quirk spdif_aclink_defaults[] = {
SND_PCI_QUIRK(0x147b, 0x1c1a, "ASUS KN8", 1),
{ } /* end */
};
/* look up white/black list for SPDIF over ac-link */
-static int __devinit check_default_spdif_aclink(struct pci_dev *pci)
+static int check_default_spdif_aclink(struct pci_dev *pci)
{
const struct snd_pci_quirk *w;
@@ -3276,8 +3276,8 @@ static int __devinit check_default_spdif_aclink(struct pci_dev *pci)
return 0;
}
-static int __devinit snd_intel8x0_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_intel8x0_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct intel8x0 *chip;
@@ -3359,7 +3359,7 @@ static int __devinit snd_intel8x0_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_intel8x0_remove(struct pci_dev *pci)
+static void snd_intel8x0_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -3369,7 +3369,7 @@ static struct pci_driver intel8x0_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_intel8x0_ids,
.probe = snd_intel8x0_probe,
- .remove = __devexit_p(snd_intel8x0_remove),
+ .remove = snd_intel8x0_remove,
.driver = {
.pm = INTEL8X0_PM_OPS,
},
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 4d551736531..fea09e8ea60 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -710,8 +710,8 @@ struct ich_pcm_table {
int ac97_idx;
};
-static int __devinit snd_intel8x0m_pcm1(struct intel8x0m *chip, int device,
- struct ich_pcm_table *rec)
+static int snd_intel8x0m_pcm1(struct intel8x0m *chip, int device,
+ struct ich_pcm_table *rec)
{
struct snd_pcm *pcm;
int err;
@@ -749,7 +749,7 @@ static int __devinit snd_intel8x0m_pcm1(struct intel8x0m *chip, int device,
return 0;
}
-static struct ich_pcm_table intel_pcms[] __devinitdata = {
+static struct ich_pcm_table intel_pcms[] = {
{
.suffix = "Modem",
.playback_ops = &snd_intel8x0m_playback_ops,
@@ -759,7 +759,7 @@ static struct ich_pcm_table intel_pcms[] __devinitdata = {
},
};
-static int __devinit snd_intel8x0m_pcm(struct intel8x0m *chip)
+static int snd_intel8x0m_pcm(struct intel8x0m *chip)
{
int i, tblsize, device, err;
struct ich_pcm_table *tbl, *rec;
@@ -819,7 +819,7 @@ static void snd_intel8x0m_mixer_free_ac97(struct snd_ac97 *ac97)
}
-static int __devinit snd_intel8x0m_mixer(struct intel8x0m *chip, int ac97_clock)
+static int snd_intel8x0m_mixer(struct intel8x0m *chip, int ac97_clock)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
@@ -1090,7 +1090,7 @@ static void snd_intel8x0m_proc_read(struct snd_info_entry * entry,
(tmp & (ICH_PCR | ICH_SCR | ICH_TCR)) == 0 ? " none" : "");
}
-static void __devinit snd_intel8x0m_proc_init(struct intel8x0m * chip)
+static void snd_intel8x0m_proc_init(struct intel8x0m *chip)
{
struct snd_info_entry *entry;
@@ -1113,10 +1113,10 @@ struct ich_reg_info {
unsigned int offset;
};
-static int __devinit snd_intel8x0m_create(struct snd_card *card,
- struct pci_dev *pci,
- unsigned long device_type,
- struct intel8x0m **r_intel8x0m)
+static int snd_intel8x0m_create(struct snd_card *card,
+ struct pci_dev *pci,
+ unsigned long device_type,
+ struct intel8x0m **r_intel8x0m)
{
struct intel8x0m *chip;
int err;
@@ -1252,7 +1252,7 @@ static int __devinit snd_intel8x0m_create(struct snd_card *card,
static struct shortname_table {
unsigned int id;
const char *s;
-} shortnames[] __devinitdata = {
+} shortnames[] = {
{ PCI_DEVICE_ID_INTEL_82801AA_6, "Intel 82801AA-ICH" },
{ PCI_DEVICE_ID_INTEL_82801AB_6, "Intel 82901AB-ICH0" },
{ PCI_DEVICE_ID_INTEL_82801BA_6, "Intel 82801BA-ICH2" },
@@ -1275,8 +1275,8 @@ static struct shortname_table {
{ 0 },
};
-static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_intel8x0m_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct intel8x0m *chip;
@@ -1325,7 +1325,7 @@ static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_intel8x0m_remove(struct pci_dev *pci)
+static void snd_intel8x0m_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1335,7 +1335,7 @@ static struct pci_driver intel8x0m_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_intel8x0m_ids,
.probe = snd_intel8x0m_probe,
- .remove = __devexit_p(snd_intel8x0m_remove),
+ .remove = snd_intel8x0m_remove,
.driver = {
.pm = INTEL8X0M_PM_OPS,
},
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 8a67ce95f24..43b4228d9af 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2083,7 +2083,7 @@ static void snd_korg1212_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, " Error count: %ld\n", korg1212->totalerrorcnt);
}
-static void __devinit snd_korg1212_proc_init(struct snd_korg1212 *korg1212)
+static void snd_korg1212_proc_init(struct snd_korg1212 *korg1212)
{
struct snd_info_entry *entry;
@@ -2154,8 +2154,8 @@ static int snd_korg1212_dev_free(struct snd_device *device)
return snd_korg1212_free(korg1212);
}
-static int __devinit snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
- struct snd_korg1212 ** rchip)
+static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
+ struct snd_korg1212 **rchip)
{
int err, rc;
@@ -2429,7 +2429,7 @@ static int __devinit snd_korg1212_create(struct snd_card *card, struct pci_dev *
* Card initialisation
*/
-static int __devinit
+static int
snd_korg1212_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
@@ -2470,7 +2470,7 @@ snd_korg1212_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_korg1212_remove(struct pci_dev *pci)
+static void snd_korg1212_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2480,7 +2480,7 @@ static struct pci_driver korg1212_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_korg1212_ids,
.probe = snd_korg1212_probe,
- .remove = __devexit_p(snd_korg1212_remove),
+ .remove = snd_korg1212_remove,
};
module_pci_driver(korg1212_driver);
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index ac15166bee6..322b638e8ec 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip)
lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */
}
-static int __devinit lola_parse_tree(struct lola *chip)
+static int lola_parse_tree(struct lola *chip)
{
unsigned int val;
int nid, err;
@@ -568,8 +568,8 @@ static int lola_dev_free(struct snd_device *device)
return 0;
}
-static int __devinit lola_create(struct snd_card *card, struct pci_dev *pci,
- int dev, struct lola **rchip)
+static int lola_create(struct snd_card *card, struct pci_dev *pci,
+ int dev, struct lola **rchip)
{
struct lola *chip;
int err;
@@ -702,8 +702,8 @@ static int __devinit lola_create(struct snd_card *card, struct pci_dev *pci,
return err;
}
-static int __devinit lola_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int lola_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -756,7 +756,7 @@ out_free:
return err;
}
-static void __devexit lola_remove(struct pci_dev *pci)
+static void lola_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -774,7 +774,7 @@ static struct pci_driver lola_driver = {
.name = KBUILD_MODNAME,
.id_table = lola_ids,
.probe = lola_probe,
- .remove = __devexit_p(lola_remove),
+ .remove = lola_remove,
};
module_pci_driver(lola_driver);
diff --git a/sound/pci/lola/lola_clock.c b/sound/pci/lola/lola_clock.c
index 72f8ef0ac86..eb1d6b97df1 100644
--- a/sound/pci/lola/lola_clock.c
+++ b/sound/pci/lola/lola_clock.c
@@ -120,7 +120,7 @@ int lola_set_granularity(struct lola *chip, unsigned int val, bool force)
* Clock widget handling
*/
-int __devinit lola_init_clock_widget(struct lola *chip, int nid)
+int lola_init_clock_widget(struct lola *chip, int nid)
{
unsigned int val;
int i, j, nitems, nb_verbs, idx, idx_list;
diff --git a/sound/pci/lola/lola_mixer.c b/sound/pci/lola/lola_mixer.c
index 6b8d6481295..52c8d6b0f39 100644
--- a/sound/pci/lola/lola_mixer.c
+++ b/sound/pci/lola/lola_mixer.c
@@ -28,8 +28,8 @@
#include <sound/tlv.h>
#include "lola.h"
-static int __devinit lola_init_pin(struct lola *chip, struct lola_pin *pin,
- int dir, int nid)
+static int lola_init_pin(struct lola *chip, struct lola_pin *pin,
+ int dir, int nid)
{
unsigned int val;
int err;
@@ -91,7 +91,7 @@ static int __devinit lola_init_pin(struct lola *chip, struct lola_pin *pin,
return 0;
}
-int __devinit lola_init_pins(struct lola *chip, int dir, int *nidp)
+int lola_init_pins(struct lola *chip, int dir, int *nidp)
{
int i, err, nid;
nid = *nidp;
@@ -112,7 +112,7 @@ void lola_free_mixer(struct lola *chip)
vfree(chip->mixer.array_saved);
}
-int __devinit lola_init_mixer_widget(struct lola *chip, int nid)
+int lola_init_mixer_widget(struct lola *chip, int nid)
{
unsigned int val;
int err;
@@ -579,7 +579,7 @@ static int lola_analog_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
return 0;
}
-static struct snd_kcontrol_new lola_analog_mixer __devinitdata = {
+static struct snd_kcontrol_new lola_analog_mixer = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
@@ -590,7 +590,7 @@ static struct snd_kcontrol_new lola_analog_mixer __devinitdata = {
.tlv.c = lola_analog_vol_tlv,
};
-static int __devinit create_analog_mixer(struct lola *chip, int dir, char *name)
+static int create_analog_mixer(struct lola *chip, int dir, char *name)
{
if (!chip->pin[dir].num_pins)
return 0;
@@ -644,7 +644,7 @@ static int lola_input_src_put(struct snd_kcontrol *kcontrol,
return lola_set_src_config(chip, mask, true);
}
-static struct snd_kcontrol_new lola_input_src_mixer __devinitdata = {
+static struct snd_kcontrol_new lola_input_src_mixer = {
.name = "Digital SRC Capture Switch",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = lola_input_src_info,
@@ -656,7 +656,7 @@ static struct snd_kcontrol_new lola_input_src_mixer __devinitdata = {
* Lola16161 or Lola881 can have Hardware sample rate converters
* on its digital input pins
*/
-static int __devinit create_input_src_mixer(struct lola *chip)
+static int create_input_src_mixer(struct lola *chip)
{
if (!chip->input_src_caps_mask)
return 0;
@@ -726,7 +726,7 @@ static int lola_src_gain_put(struct snd_kcontrol *kcontrol,
/* raw value: 0 = -84dB, 336 = 0dB, 408=18dB, incremented 1 for mute */
static const DECLARE_TLV_DB_SCALE(lola_src_gain_tlv, -8425, 25, 1);
-static struct snd_kcontrol_new lola_src_gain_mixer __devinitdata = {
+static struct snd_kcontrol_new lola_src_gain_mixer = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -736,8 +736,8 @@ static struct snd_kcontrol_new lola_src_gain_mixer __devinitdata = {
.tlv.p = lola_src_gain_tlv,
};
-static int __devinit create_src_gain_mixer(struct lola *chip,
- int num, int ofs, char *name)
+static int create_src_gain_mixer(struct lola *chip,
+ int num, int ofs, char *name)
{
lola_src_gain_mixer.name = name;
lola_src_gain_mixer.private_value = ofs + (num << 8);
@@ -813,7 +813,7 @@ static int lola_dest_gain_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(lola_dest_gain_tlv, -8425, 25, 1);
-static struct snd_kcontrol_new lola_dest_gain_mixer __devinitdata = {
+static struct snd_kcontrol_new lola_dest_gain_mixer = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -823,9 +823,9 @@ static struct snd_kcontrol_new lola_dest_gain_mixer __devinitdata = {
.tlv.p = lola_dest_gain_tlv,
};
-static int __devinit create_dest_gain_mixer(struct lola *chip,
- int src_num, int src_ofs,
- int num, int ofs, char *name)
+static int create_dest_gain_mixer(struct lola *chip,
+ int src_num, int src_ofs,
+ int num, int ofs, char *name)
{
lola_dest_gain_mixer.count = num;
lola_dest_gain_mixer.name = name;
@@ -838,7 +838,7 @@ static int __devinit create_dest_gain_mixer(struct lola *chip,
/*
*/
-int __devinit lola_create_mixer(struct lola *chip)
+int lola_create_mixer(struct lola *chip)
{
int err;
diff --git a/sound/pci/lola/lola_pcm.c b/sound/pci/lola/lola_pcm.c
index c44db68eecb..5ea85e8b83a 100644
--- a/sound/pci/lola/lola_pcm.c
+++ b/sound/pci/lola/lola_pcm.c
@@ -597,7 +597,7 @@ static struct snd_pcm_ops lola_pcm_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
-int __devinit lola_create_pcm(struct lola *chip)
+int lola_create_pcm(struct lola *chip)
{
struct snd_pcm *pcm;
int i, err;
@@ -690,7 +690,7 @@ static int lola_init_stream(struct lola *chip, struct lola_stream *str,
return 0;
}
-int __devinit lola_init_pcm(struct lola *chip, int dir, int *nidp)
+int lola_init_pcm(struct lola *chip, int dir, int *nidp)
{
struct lola_pcm *pcm = &chip->pcm[dir];
int i, nid, err;
diff --git a/sound/pci/lola/lola_proc.c b/sound/pci/lola/lola_proc.c
index 9d7daf897c9..04df83defc0 100644
--- a/sound/pci/lola/lola_proc.c
+++ b/sound/pci/lola/lola_proc.c
@@ -206,7 +206,7 @@ static void lola_proc_regs_read(struct snd_info_entry *entry,
}
}
-void __devinit lola_proc_debug_new(struct lola *chip)
+void lola_proc_debug_new(struct lola *chip)
{
struct snd_info_entry *entry;
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 5579b08bb35..298bc9b7299 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -578,7 +578,7 @@ static int snd_lx6464es_dev_free(struct snd_device *device)
}
/* reset the dsp during initialization */
-static int __devinit lx_init_xilinx_reset(struct lx6464es *chip)
+static int lx_init_xilinx_reset(struct lx6464es *chip)
{
int i;
u32 plx_reg = lx_plx_reg_read(chip, ePLX_CHIPSC);
@@ -620,7 +620,7 @@ static int __devinit lx_init_xilinx_reset(struct lx6464es *chip)
return 0;
}
-static int __devinit lx_init_xilinx_test(struct lx6464es *chip)
+static int lx_init_xilinx_test(struct lx6464es *chip)
{
u32 reg;
@@ -650,7 +650,7 @@ static int __devinit lx_init_xilinx_test(struct lx6464es *chip)
}
/* initialize ethersound */
-static int __devinit lx_init_ethersound_config(struct lx6464es *chip)
+static int lx_init_ethersound_config(struct lx6464es *chip)
{
int i;
u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES);
@@ -690,7 +690,7 @@ static int __devinit lx_init_ethersound_config(struct lx6464es *chip)
return 0;
}
-static int __devinit lx_init_get_version_features(struct lx6464es *chip)
+static int lx_init_get_version_features(struct lx6464es *chip)
{
u32 dsp_version;
@@ -759,7 +759,7 @@ static int lx_set_granularity(struct lx6464es *chip, u32 gran)
}
/* initialize and test the xilinx dsp chip */
-static int __devinit lx_init_dsp(struct lx6464es *chip)
+static int lx_init_dsp(struct lx6464es *chip)
{
int err;
int i;
@@ -835,7 +835,7 @@ static struct snd_pcm_ops lx_ops_capture = {
.pointer = lx_pcm_stream_pointer,
};
-static int __devinit lx_pcm_create(struct lx6464es *chip)
+static int lx_pcm_create(struct lx6464es *chip)
{
int err;
struct snd_pcm *pcm;
@@ -907,7 +907,7 @@ static int lx_control_playback_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new lx_control_playback_switch __devinitdata = {
+static struct snd_kcontrol_new lx_control_playback_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.index = 0,
@@ -954,7 +954,7 @@ static void lx_proc_levels_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "\n");
}
-static int __devinit lx_proc_create(struct snd_card *card, struct lx6464es *chip)
+static int lx_proc_create(struct snd_card *card, struct lx6464es *chip)
{
struct snd_info_entry *entry;
int err = snd_card_proc_new(card, "levels", &entry);
@@ -966,9 +966,9 @@ static int __devinit lx_proc_create(struct snd_card *card, struct lx6464es *chip
}
-static int __devinit snd_lx6464es_create(struct snd_card *card,
- struct pci_dev *pci,
- struct lx6464es **rchip)
+static int snd_lx6464es_create(struct snd_card *card,
+ struct pci_dev *pci,
+ struct lx6464es **rchip)
{
struct lx6464es *chip;
int err;
@@ -1082,8 +1082,8 @@ alloc_failed:
return err;
}
-static int __devinit snd_lx6464es_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_lx6464es_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1136,7 +1136,7 @@ out_free:
}
-static void __devexit snd_lx6464es_remove(struct pci_dev *pci)
+static void snd_lx6464es_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1147,7 +1147,7 @@ static struct pci_driver lx6464es_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_lx6464es_ids,
.probe = snd_lx6464es_probe,
- .remove = __devexit_p(snd_lx6464es_remove),
+ .remove = snd_lx6464es_remove,
};
module_pci_driver(lx6464es_driver);
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 8c3e7fcefd9..633c8607d05 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -385,7 +385,7 @@ polling_successful:
/* low-level dsp access */
-int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
+int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
{
u16 ret;
unsigned long flags;
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
index 4d7ff797a64..5ec5e04da1a 100644
--- a/sound/pci/lx6464es/lx_core.h
+++ b/sound/pci/lx6464es/lx_core.h
@@ -109,7 +109,7 @@ struct lx_rmh {
/* low-level dsp access */
-int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version);
+int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version);
int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq);
int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran);
int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data);
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index eb3cd3a4315..9387533f70d 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -822,7 +822,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_m3_ids) = {
MODULE_DEVICE_TABLE(pci, snd_m3_ids);
-static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
+static struct snd_pci_quirk m3_amp_quirk_list[] = {
SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
@@ -831,7 +831,7 @@ static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
{ } /* END */
};
-static struct snd_pci_quirk m3_irda_quirk_list[] __devinitdata = {
+static struct snd_pci_quirk m3_irda_quirk_list[] = {
SND_PCI_QUIRK(0x1028, 0x00b0, "Dell Inspiron 4000", 1),
SND_PCI_QUIRK(0x1028, 0x00a4, "Dell Inspiron 8000", 1),
SND_PCI_QUIRK(0x1028, 0x00e6, "Dell Inspiron 8100", 1),
@@ -839,7 +839,7 @@ static struct snd_pci_quirk m3_irda_quirk_list[] __devinitdata = {
};
/* hardware volume quirks */
-static struct snd_pci_quirk m3_hv_quirk_list[] __devinitdata = {
+static struct snd_pci_quirk m3_hv_quirk_list[] = {
/* Allegro chips */
SND_PCI_QUIRK(0x0E11, 0x002E, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD),
SND_PCI_QUIRK(0x0E11, 0x0094, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD),
@@ -917,7 +917,7 @@ static struct snd_pci_quirk m3_hv_quirk_list[] __devinitdata = {
};
/* HP Omnibook quirks */
-static struct snd_pci_quirk m3_omnibook_quirk_list[] __devinitdata = {
+static struct snd_pci_quirk m3_omnibook_quirk_list[] = {
SND_PCI_QUIRK_ID(0x103c, 0x0010), /* HP OmniBook 6000 */
SND_PCI_QUIRK_ID(0x103c, 0x0011), /* HP OmniBook 500 */
{ } /* END */
@@ -1856,7 +1856,7 @@ static struct snd_pcm_ops snd_m3_capture_ops = {
.pointer = snd_m3_pcm_pointer,
};
-static int __devinit
+static int
snd_m3_pcm(struct snd_m3 * chip, int device)
{
struct snd_pcm *pcm;
@@ -2031,7 +2031,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
#endif
}
-static int __devinit snd_m3_mixer(struct snd_m3 *chip)
+static int snd_m3_mixer(struct snd_m3 *chip)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
@@ -2173,7 +2173,7 @@ static void snd_m3_assp_init(struct snd_m3 *chip)
}
-static int __devinit snd_m3_assp_client_init(struct snd_m3 *chip, struct m3_dma *s, int index)
+static int snd_m3_assp_client_init(struct snd_m3 *chip, struct m3_dma *s, int index)
{
int data_bytes = 2 * ( MINISRC_TMP_BUFFER_SIZE / 2 +
MINISRC_IN_BUFFER_SIZE / 2 +
@@ -2488,7 +2488,7 @@ static SIMPLE_DEV_PM_OPS(m3_pm, m3_suspend, m3_resume);
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SND_MAESTRO3_INPUT
-static int __devinit snd_m3_input_register(struct snd_m3 *chip)
+static int snd_m3_input_register(struct snd_m3 *chip)
{
struct input_dev *input_dev;
int err;
@@ -2532,7 +2532,7 @@ static int snd_m3_dev_free(struct snd_device *device)
return snd_m3_free(chip);
}
-static int __devinit
+static int
snd_m3_create(struct snd_card *card, struct pci_dev *pci,
int enable_amp,
int amp_gpio,
@@ -2700,7 +2700,7 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
/*
*/
-static int __devinit
+static int
snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
@@ -2770,7 +2770,7 @@ snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return 0;
}
-static void __devexit snd_m3_remove(struct pci_dev *pci)
+static void snd_m3_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2780,7 +2780,7 @@ static struct pci_driver m3_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_m3_ids,
.probe = snd_m3_probe,
- .remove = __devexit_p(snd_m3_remove),
+ .remove = snd_m3_remove,
.driver = {
.pm = M3_PM_OPS,
},
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 0762610c99c..01f7f37a841 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -1004,7 +1004,7 @@ static int snd_mixart_chip_dev_free(struct snd_device *device)
/*
*/
-static int __devinit snd_mixart_create(struct mixart_mgr *mgr, struct snd_card *card, int idx)
+static int snd_mixart_create(struct mixart_mgr *mgr, struct snd_card *card, int idx)
{
int err;
struct snd_mixart *chip;
@@ -1180,7 +1180,7 @@ static void snd_mixart_proc_read(struct snd_info_entry *entry,
} /* endif elf loaded */
}
-static void __devinit snd_mixart_proc_init(struct snd_mixart *chip)
+static void snd_mixart_proc_init(struct snd_mixart *chip)
{
struct snd_info_entry *entry;
@@ -1209,8 +1209,8 @@ static void __devinit snd_mixart_proc_init(struct snd_mixart *chip)
/*
* probe function - creates the card manager
*/
-static int __devinit snd_mixart_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_mixart_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct mixart_mgr *mgr;
@@ -1374,7 +1374,7 @@ static int __devinit snd_mixart_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_mixart_remove(struct pci_dev *pci)
+static void snd_mixart_remove(struct pci_dev *pci)
{
snd_mixart_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1384,7 +1384,7 @@ static struct pci_driver mixart_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_mixart_ids,
.probe = snd_mixart_probe,
- .remove = __devexit_p(snd_mixart_remove),
+ .remove = snd_mixart_remove,
};
module_pci_driver(mixart_driver);
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index e0f4d87555a..ece1f831c16 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -546,14 +546,6 @@ static int mixart_dsp_load(struct mixart_mgr* mgr, int index, const struct firmw
}
-#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
-#if !defined(CONFIG_USE_MIXARTLOADER) && !defined(CONFIG_SND_MIXART) /* built-in kernel */
-#define SND_MIXART_FW_LOADER /* use the standard firmware loader */
-#endif
-#endif
-
-#ifdef SND_MIXART_FW_LOADER
-
int snd_mixart_setup_firmware(struct mixart_mgr *mgr)
{
static char *fw_files[3] = {
@@ -583,71 +575,3 @@ int snd_mixart_setup_firmware(struct mixart_mgr *mgr)
MODULE_FIRMWARE("mixart/miXart8.xlx");
MODULE_FIRMWARE("mixart/miXart8.elf");
MODULE_FIRMWARE("mixart/miXart8AES.xlx");
-
-#else /* old style firmware loading */
-
-/* miXart hwdep interface id string */
-#define SND_MIXART_HWDEP_ID "miXart Loader"
-
-static int mixart_hwdep_dsp_status(struct snd_hwdep *hw,
- struct snd_hwdep_dsp_status *info)
-{
- struct mixart_mgr *mgr = hw->private_data;
-
- strcpy(info->id, "miXart");
- info->num_dsps = MIXART_HARDW_FILES_MAX_INDEX;
-
- if (mgr->dsp_loaded & (1 << MIXART_MOTHERBOARD_ELF_INDEX))
- info->chip_ready = 1;
-
- info->version = MIXART_DRIVER_VERSION;
- return 0;
-}
-
-static int mixart_hwdep_dsp_load(struct snd_hwdep *hw,
- struct snd_hwdep_dsp_image *dsp)
-{
- struct mixart_mgr* mgr = hw->private_data;
- struct firmware fw;
- int err;
-
- fw.size = dsp->length;
- fw.data = vmalloc(dsp->length);
- if (! fw.data) {
- snd_printk(KERN_ERR "miXart: cannot allocate image size %d\n",
- (int)dsp->length);
- return -ENOMEM;
- }
- if (copy_from_user((void *) fw.data, dsp->image, dsp->length)) {
- vfree(fw.data);
- return -EFAULT;
- }
- err = mixart_dsp_load(mgr, dsp->index, &fw);
- vfree(fw.data);
- if (err < 0)
- return err;
- mgr->dsp_loaded |= 1 << dsp->index;
- return err;
-}
-
-int snd_mixart_setup_firmware(struct mixart_mgr *mgr)
-{
- int err;
- struct snd_hwdep *hw;
-
- /* only create hwdep interface for first cardX (see "index" module parameter)*/
- if ((err = snd_hwdep_new(mgr->chip[0]->card, SND_MIXART_HWDEP_ID, 0, &hw)) < 0)
- return err;
-
- hw->iface = SNDRV_HWDEP_IFACE_MIXART;
- hw->private_data = mgr;
- hw->ops.dsp_status = mixart_hwdep_dsp_status;
- hw->ops.dsp_load = mixart_hwdep_dsp_load;
- hw->exclusive = 1;
- sprintf(hw->name, SND_MIXART_HWDEP_ID);
- mgr->dsp_loaded = 0;
-
- return snd_card_register(mgr->chip[0]->card);
-}
-
-#endif /* SND_MIXART_FW_LOADER */
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index e80e9a1e84a..563a193e36a 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -928,7 +928,7 @@ static struct snd_pcm_ops snd_nm256_capture_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static int __devinit
+static int
snd_nm256_pcm(struct nm256 *chip, int device)
{
struct snd_pcm *pcm;
@@ -1295,7 +1295,7 @@ snd_nm256_ac97_reset(struct snd_ac97 *ac97)
}
/* create an ac97 mixer interface */
-static int __devinit
+static int
snd_nm256_mixer(struct nm256 *chip)
{
struct snd_ac97_bus *pbus;
@@ -1336,7 +1336,7 @@ snd_nm256_mixer(struct nm256 *chip)
* RAM.
*/
-static int __devinit
+static int
snd_nm256_peek_for_sig(struct nm256 *chip)
{
/* The signature is located 1K below the end of video RAM. */
@@ -1472,7 +1472,7 @@ static int snd_nm256_dev_free(struct snd_device *device)
return snd_nm256_free(chip);
}
-static int __devinit
+static int
snd_nm256_create(struct snd_card *card, struct pci_dev *pci,
struct nm256 **chip_ret)
{
@@ -1639,7 +1639,7 @@ __error:
enum { NM_BLACKLISTED, NM_RESET_WORKAROUND, NM_RESET_WORKAROUND_2 };
-static struct snd_pci_quirk nm256_quirks[] __devinitdata = {
+static struct snd_pci_quirk nm256_quirks[] = {
/* HP omnibook 4150 has cs4232 codec internally */
SND_PCI_QUIRK(0x103c, 0x0007, "HP omnibook 4150", NM_BLACKLISTED),
/* Reset workarounds to avoid lock-ups */
@@ -1650,8 +1650,8 @@ static struct snd_pci_quirk nm256_quirks[] __devinitdata = {
};
-static int __devinit snd_nm256_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_nm256_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct nm256 *chip;
@@ -1742,7 +1742,7 @@ static int __devinit snd_nm256_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_nm256_remove(struct pci_dev *pci)
+static void snd_nm256_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1753,7 +1753,7 @@ static struct pci_driver nm256_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_nm256_ids,
.probe = snd_nm256_probe,
- .remove = __devexit_p(snd_nm256_remove),
+ .remove = snd_nm256_remove,
.driver = {
.pm = NM256_PM_OPS,
},
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 2becae155a4..ada6c256378 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -756,8 +756,8 @@ static const struct oxygen_model model_generic = {
.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
};
-static int __devinit get_oxygen_model(struct oxygen *chip,
- const struct pci_device_id *id)
+static int get_oxygen_model(struct oxygen *chip,
+ const struct pci_device_id *id)
{
static const char *const names[] = {
[MODEL_MERIDIAN] = "AuzenTech X-Meridian",
@@ -848,8 +848,8 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
return 0;
}
-static int __devinit generic_oxygen_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int generic_oxygen_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
int err;
@@ -871,7 +871,7 @@ static struct pci_driver oxygen_driver = {
.name = KBUILD_MODNAME,
.id_table = oxygen_ids,
.probe = generic_oxygen_probe,
- .remove = __devexit_p(oxygen_pci_remove),
+ .remove = oxygen_pci_remove,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &oxygen_pci_pm,
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index 3d71423b23b..64b9fda5f04 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -52,13 +52,14 @@ static DEFINE_PCI_DEVICE_TABLE(xonar_ids) = {
{ OXYGEN_PCI_SUBID(0x1043, 0x835d) },
{ OXYGEN_PCI_SUBID(0x1043, 0x835e) },
{ OXYGEN_PCI_SUBID(0x1043, 0x838e) },
+ { OXYGEN_PCI_SUBID(0x1043, 0x8522) },
{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
{ }
};
MODULE_DEVICE_TABLE(pci, xonar_ids);
-static int __devinit get_xonar_model(struct oxygen *chip,
- const struct pci_device_id *id)
+static int get_xonar_model(struct oxygen *chip,
+ const struct pci_device_id *id)
{
if (get_xonar_pcm179x_model(chip, id) >= 0)
return 0;
@@ -69,8 +70,8 @@ static int __devinit get_xonar_model(struct oxygen *chip,
return -EINVAL;
}
-static int __devinit xonar_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int xonar_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
int err;
@@ -92,7 +93,7 @@ static struct pci_driver xonar_driver = {
.name = KBUILD_MODNAME,
.id_table = xonar_ids,
.probe = xonar_probe,
- .remove = __devexit_p(oxygen_pci_remove),
+ .remove = oxygen_pci_remove,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &oxygen_pci_pm,
diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c
index c8febf4b9bd..d231b93d6ab 100644
--- a/sound/pci/oxygen/xonar_cs43xx.c
+++ b/sound/pci/oxygen/xonar_cs43xx.c
@@ -431,8 +431,8 @@ static const struct oxygen_model model_xonar_d1 = {
.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
};
-int __devinit get_xonar_cs43xx_model(struct oxygen *chip,
- const struct pci_device_id *id)
+int get_xonar_cs43xx_model(struct oxygen *chip,
+ const struct pci_device_id *id)
{
switch (id->subdevice) {
case 0x834f:
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
index 8433aa7c3d7..c8c7f2c9b35 100644
--- a/sound/pci/oxygen/xonar_pcm179x.c
+++ b/sound/pci/oxygen/xonar_pcm179x.c
@@ -1087,8 +1087,8 @@ static const struct oxygen_model model_xonar_st = {
.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
};
-int __devinit get_xonar_pcm179x_model(struct oxygen *chip,
- const struct pci_device_id *id)
+int get_xonar_pcm179x_model(struct oxygen *chip,
+ const struct pci_device_id *id)
{
switch (id->subdevice) {
case 0x8269:
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index 63cff90706b..6ce68604c25 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -1255,7 +1255,6 @@ static void dump_wm87x6_registers(struct oxygen *chip,
}
static const struct oxygen_model model_xonar_ds = {
- .shortname = "Xonar DS",
.longname = "Asus Virtuoso 66",
.chip = "AV200",
.init = xonar_ds_init,
@@ -1321,12 +1320,17 @@ static const struct oxygen_model model_xonar_hdav_slim = {
.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
};
-int __devinit get_xonar_wm87x6_model(struct oxygen *chip,
- const struct pci_device_id *id)
+int get_xonar_wm87x6_model(struct oxygen *chip,
+ const struct pci_device_id *id)
{
switch (id->subdevice) {
case 0x838e:
chip->model = model_xonar_ds;
+ chip->model.shortname = "Xonar DS";
+ break;
+ case 0x8522:
+ chip->model = model_xonar_ds;
+ chip->model.shortname = "Xonar DSX";
break;
case 0x835e:
chip->model = model_xonar_hdav_slim;
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index be4f1456009..b97384ad946 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -1203,8 +1203,8 @@ static int pcxhr_chip_dev_free(struct snd_device *device)
/*
*/
-static int __devinit pcxhr_create(struct pcxhr_mgr *mgr,
- struct snd_card *card, int idx)
+static int pcxhr_create(struct pcxhr_mgr *mgr,
+ struct snd_card *card, int idx)
{
int err;
struct snd_pcxhr *chip;
@@ -1453,7 +1453,7 @@ static void pcxhr_proc_ltc(struct snd_info_entry *entry,
}
}
-static void __devinit pcxhr_proc_init(struct snd_pcxhr *chip)
+static void pcxhr_proc_init(struct snd_pcxhr *chip)
{
struct snd_info_entry *entry;
@@ -1513,8 +1513,8 @@ static int pcxhr_free(struct pcxhr_mgr *mgr)
/*
* probe function - creates the card manager
*/
-static int __devinit pcxhr_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int pcxhr_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct pcxhr_mgr *mgr;
@@ -1688,7 +1688,7 @@ static int __devinit pcxhr_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit pcxhr_remove(struct pci_dev *pci)
+static void pcxhr_remove(struct pci_dev *pci)
{
pcxhr_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1698,7 +1698,7 @@ static struct pci_driver pcxhr_driver = {
.name = KBUILD_MODNAME,
.id_table = pcxhr_ids,
.probe = pcxhr_probe,
- .remove = __devexit_p(pcxhr_remove),
+ .remove = pcxhr_remove,
};
module_pci_driver(pcxhr_driver);
diff --git a/sound/pci/pcxhr/pcxhr_hwdep.c b/sound/pci/pcxhr/pcxhr_hwdep.c
index bf207e317f7..d995175c1c4 100644
--- a/sound/pci/pcxhr/pcxhr_hwdep.c
+++ b/sound/pci/pcxhr/pcxhr_hwdep.c
@@ -35,13 +35,6 @@
#include "pcxhr_mix22.h"
-#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
-#if !defined(CONFIG_USE_PCXHRLOADER) && !defined(CONFIG_SND_PCXHR) /* built-in kernel */
-#define SND_PCXHR_FW_LOADER /* use the standard firmware loader */
-#endif
-#endif
-
-
static int pcxhr_sub_init(struct pcxhr_mgr *mgr);
/*
* get basic information and init pcxhr card
@@ -362,8 +355,6 @@ static int pcxhr_dsp_load(struct pcxhr_mgr *mgr, int index,
/*
* fw loader entry
*/
-#ifdef SND_PCXHR_FW_LOADER
-
int pcxhr_setup_firmware(struct pcxhr_mgr *mgr)
{
static char *fw_files[][5] = {
@@ -424,80 +415,3 @@ MODULE_FIRMWARE("pcxhr/xlxc924.dat");
MODULE_FIRMWARE("pcxhr/dspe924.e56");
MODULE_FIRMWARE("pcxhr/dspb924.b56");
MODULE_FIRMWARE("pcxhr/dspd222.d56");
-
-
-#else /* old style firmware loading */
-
-/* pcxhr hwdep interface id string */
-#define PCXHR_HWDEP_ID "pcxhr loader"
-
-
-static int pcxhr_hwdep_dsp_status(struct snd_hwdep *hw,
- struct snd_hwdep_dsp_status *info)
-{
- struct pcxhr_mgr *mgr = hw->private_data;
- sprintf(info->id, "pcxhr%d", mgr->fw_file_set);
- info->num_dsps = PCXHR_FIRMWARE_FILES_MAX_INDEX;
-
- if (hw->dsp_loaded & (1 << PCXHR_FIRMWARE_DSP_MAIN_INDEX))
- info->chip_ready = 1;
-
- info->version = PCXHR_DRIVER_VERSION;
- return 0;
-}
-
-static int pcxhr_hwdep_dsp_load(struct snd_hwdep *hw,
- struct snd_hwdep_dsp_image *dsp)
-{
- struct pcxhr_mgr *mgr = hw->private_data;
- int err;
- struct firmware fw;
-
- fw.size = dsp->length;
- fw.data = vmalloc(fw.size);
- if (! fw.data) {
- snd_printk(KERN_ERR "pcxhr: cannot allocate dsp image "
- "(%lu bytes)\n", (unsigned long)fw.size);
- return -ENOMEM;
- }
- if (copy_from_user((void *)fw.data, dsp->image, dsp->length)) {
- vfree(fw.data);
- return -EFAULT;
- }
- err = pcxhr_dsp_load(mgr, dsp->index, &fw);
- vfree(fw.data);
- if (err < 0)
- return err;
- mgr->dsp_loaded |= 1 << dsp->index;
- return 0;
-}
-
-int pcxhr_setup_firmware(struct pcxhr_mgr *mgr)
-{
- int err;
- struct snd_hwdep *hw;
-
- /* only create hwdep interface for first cardX
- * (see "index" module parameter)
- */
- err = snd_hwdep_new(mgr->chip[0]->card, PCXHR_HWDEP_ID, 0, &hw);
- if (err < 0)
- return err;
-
- hw->iface = SNDRV_HWDEP_IFACE_PCXHR;
- hw->private_data = mgr;
- hw->ops.dsp_status = pcxhr_hwdep_dsp_status;
- hw->ops.dsp_load = pcxhr_hwdep_dsp_load;
- hw->exclusive = 1;
- /* stereo cards don't need fw_file_0 -> dsp_loaded = 1 */
- hw->dsp_loaded = mgr->is_hr_stereo ? 1 : 0;
- mgr->dsp_loaded = 0;
- sprintf(hw->name, PCXHR_HWDEP_ID);
-
- err = snd_card_register(mgr->chip[0]->card);
- if (err < 0)
- return err;
- return 0;
-}
-
-#endif /* SND_PCXHR_FW_LOADER */
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 7d291542c5b..63c1c804155 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1706,7 +1706,7 @@ static struct snd_pcm_ops snd_riptide_capture_ops = {
.pointer = snd_riptide_pointer,
};
-static int __devinit
+static int
snd_riptide_pcm(struct snd_riptide *chip, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
@@ -1857,7 +1857,7 @@ static int snd_riptide_dev_free(struct snd_device *device)
return snd_riptide_free(chip);
}
-static int __devinit
+static int
snd_riptide_create(struct snd_card *card, struct pci_dev *pci,
struct snd_riptide **rchip)
{
@@ -1993,7 +1993,7 @@ snd_riptide_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "\n");
}
-static void __devinit snd_riptide_proc_init(struct snd_riptide *chip)
+static void snd_riptide_proc_init(struct snd_riptide *chip)
{
struct snd_info_entry *entry;
@@ -2001,7 +2001,7 @@ static void __devinit snd_riptide_proc_init(struct snd_riptide *chip)
snd_info_set_text_ops(entry, chip, snd_riptide_proc_read);
}
-static int __devinit snd_riptide_mixer(struct snd_riptide *chip)
+static int snd_riptide_mixer(struct snd_riptide *chip)
{
struct snd_ac97_bus *pbus;
struct snd_ac97_template ac97;
@@ -2027,7 +2027,7 @@ static int __devinit snd_riptide_mixer(struct snd_riptide *chip)
#ifdef SUPPORT_JOYSTICK
-static int __devinit
+static int
snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
static int dev;
@@ -2060,7 +2060,7 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
return 0;
}
-static void __devexit snd_riptide_joystick_remove(struct pci_dev *pci)
+static void snd_riptide_joystick_remove(struct pci_dev *pci)
{
struct gameport *gameport = pci_get_drvdata(pci);
if (gameport) {
@@ -2071,7 +2071,7 @@ static void __devexit snd_riptide_joystick_remove(struct pci_dev *pci)
}
#endif
-static int __devinit
+static int
snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
@@ -2176,7 +2176,7 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return err;
}
-static void __devexit snd_card_riptide_remove(struct pci_dev *pci)
+static void snd_card_riptide_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2186,7 +2186,7 @@ static struct pci_driver driver = {
.name = KBUILD_MODNAME,
.id_table = snd_riptide_ids,
.probe = snd_card_riptide_probe,
- .remove = __devexit_p(snd_card_riptide_remove),
+ .remove = snd_card_riptide_remove,
.driver = {
.pm = RIPTIDE_PM_OPS,
},
@@ -2197,7 +2197,7 @@ static struct pci_driver joystick_driver = {
.name = KBUILD_MODNAME "-joystick",
.id_table = snd_riptide_joystick_ids,
.probe = snd_riptide_joystick_probe,
- .remove = __devexit_p(snd_riptide_joystick_remove),
+ .remove = snd_riptide_joystick_remove,
};
#endif
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 46b3629dda2..2450663e1a1 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1332,7 +1332,7 @@ snd_rme32_free_adat_pcm(struct snd_pcm *pcm)
rme32->adat_pcm = NULL;
}
-static int __devinit snd_rme32_create(struct rme32 * rme32)
+static int snd_rme32_create(struct rme32 *rme32)
{
struct pci_dev *pci = rme32->pci;
int err;
@@ -1554,7 +1554,7 @@ snd_rme32_proc_read(struct snd_info_entry * entry, struct snd_info_buffer *buffe
}
}
-static void __devinit snd_rme32_proc_init(struct rme32 * rme32)
+static void snd_rme32_proc_init(struct rme32 *rme32)
{
struct snd_info_entry *entry;
@@ -1922,7 +1922,7 @@ static void snd_rme32_card_free(struct snd_card *card)
snd_rme32_free(card->private_data);
}
-static int __devinit
+static int
snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
@@ -1978,7 +1978,7 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return 0;
}
-static void __devexit snd_rme32_remove(struct pci_dev *pci)
+static void snd_rme32_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1988,7 +1988,7 @@ static struct pci_driver rme32_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_rme32_ids,
.probe = snd_rme32_probe,
- .remove = __devexit_p(snd_rme32_remove),
+ .remove = snd_rme32_remove,
};
module_pci_driver(rme32_driver);
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 9b98dc40698..5fb88ac82aa 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -270,8 +270,7 @@ snd_rme96_playback_pointer(struct snd_pcm_substream *substream);
static snd_pcm_uframes_t
snd_rme96_capture_pointer(struct snd_pcm_substream *substream);
-static void __devinit
-snd_rme96_proc_init(struct rme96 *rme96);
+static void snd_rme96_proc_init(struct rme96 *rme96);
static int
snd_rme96_create_switches(struct snd_card *card,
@@ -1538,7 +1537,7 @@ snd_rme96_free_adat_pcm(struct snd_pcm *pcm)
rme96->adat_pcm = NULL;
}
-static int __devinit
+static int
snd_rme96_create(struct rme96 *rme96)
{
struct pci_dev *pci = rme96->pci;
@@ -1786,8 +1785,7 @@ snd_rme96_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer
}
}
-static void __devinit
-snd_rme96_proc_init(struct rme96 *rme96)
+static void snd_rme96_proc_init(struct rme96 *rme96)
{
struct snd_info_entry *entry;
@@ -2326,7 +2324,7 @@ static void snd_rme96_card_free(struct snd_card *card)
snd_rme96_free(card->private_data);
}
-static int __devinit
+static int
snd_rme96_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
@@ -2389,7 +2387,7 @@ snd_rme96_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_rme96_remove(struct pci_dev *pci)
+static void snd_rme96_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2399,7 +2397,7 @@ static struct pci_driver rme96_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_rme96_ids,
.probe = snd_rme96_probe,
- .remove = __devexit_p(snd_rme96_remove),
+ .remove = snd_rme96_remove,
};
module_pci_driver(rme96_driver);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 0d6930c4f4b..4fae81f21ef 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -28,6 +28,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/math64.h>
+#include <linux/vmalloc.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -59,13 +60,11 @@ MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{RME Hammerfall-DSP},"
"{RME HDSP-9652},"
"{RME HDSP-9632}}");
-#ifdef HDSP_FW_LOADER
MODULE_FIRMWARE("rpm_firmware.bin");
MODULE_FIRMWARE("multiface_firmware.bin");
MODULE_FIRMWARE("multiface_firmware_rev11.bin");
MODULE_FIRMWARE("digiface_firmware.bin");
MODULE_FIRMWARE("digiface_firmware_rev11.bin");
-#endif
#define HDSP_MAX_CHANNELS 26
#define HDSP_MAX_DS_CHANNELS 14
@@ -423,12 +422,7 @@ MODULE_FIRMWARE("digiface_firmware_rev11.bin");
#define HDSP_DMA_AREA_BYTES ((HDSP_MAX_CHANNELS+1) * HDSP_CHANNEL_BUFFER_BYTES)
#define HDSP_DMA_AREA_KILOBYTES (HDSP_DMA_AREA_BYTES/1024)
-/* use hotplug firmware loader? */
-#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
-#if !defined(HDSP_USE_HWDEP_LOADER)
-#define HDSP_FW_LOADER
-#endif
-#endif
+#define HDSP_FIRMWARE_SIZE (24413 * 4)
struct hdsp_9632_meters {
u32 input_peak[16];
@@ -475,7 +469,8 @@ struct hdsp {
enum HDSP_IO_Type io_type; /* ditto, but for code use */
unsigned short firmware_rev;
unsigned short state; /* stores state bits */
- u32 firmware_cache[24413]; /* this helps recover from accidental iobox power failure */
+ const struct firmware *firmware;
+ u32 *fw_uploaded;
size_t period_bytes; /* guess what this is */
unsigned char max_channels;
unsigned char qs_in_channels; /* quad speed mode for H9632 */
@@ -712,6 +707,17 @@ static int snd_hdsp_load_firmware_from_cache(struct hdsp *hdsp) {
int i;
unsigned long flags;
+ const u32 *cache;
+
+ if (hdsp->fw_uploaded)
+ cache = hdsp->fw_uploaded;
+ else {
+ if (!hdsp->firmware)
+ return -ENODEV;
+ cache = (u32 *)hdsp->firmware->data;
+ if (!cache)
+ return -ENODEV;
+ }
if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
@@ -727,8 +733,8 @@ static int snd_hdsp_load_firmware_from_cache(struct hdsp *hdsp) {
hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
- for (i = 0; i < 24413; ++i) {
- hdsp_write(hdsp, HDSP_fifoData, hdsp->firmware_cache[i]);
+ for (i = 0; i < HDSP_FIRMWARE_SIZE / 4; ++i) {
+ hdsp_write(hdsp, HDSP_fifoData, cache[i]);
if (hdsp_fifo_wait (hdsp, 127, HDSP_LONG_WAIT)) {
snd_printk ("Hammerfall-DSP: timeout during firmware loading\n");
return -EIO;
@@ -798,9 +804,7 @@ static int hdsp_get_iobox_version (struct hdsp *hdsp)
}
-#ifdef HDSP_FW_LOADER
static int hdsp_request_fw_loader(struct hdsp *hdsp);
-#endif
static int hdsp_check_for_firmware (struct hdsp *hdsp, int load_on_demand)
{
@@ -813,10 +817,8 @@ static int hdsp_check_for_firmware (struct hdsp *hdsp, int load_on_demand)
snd_printk(KERN_ERR "Hammerfall-DSP: firmware not present.\n");
/* try to load firmware */
if (! (hdsp->state & HDSP_FirmwareCached)) {
-#ifdef HDSP_FW_LOADER
if (! hdsp_request_fw_loader(hdsp))
return 0;
-#endif
snd_printk(KERN_ERR
"Hammerfall-DSP: No firmware loaded nor "
"cached, please upload firmware.\n");
@@ -3673,9 +3675,7 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
}
} else {
int err = -EINVAL;
-#ifdef HDSP_FW_LOADER
err = hdsp_request_fw_loader(hdsp);
-#endif
if (err < 0) {
snd_iprintf(buffer,
"No firmware loaded nor cached, "
@@ -4020,7 +4020,7 @@ static void snd_hdsp_free_buffers(struct hdsp *hdsp)
snd_hammerfall_free_buffer(&hdsp->playback_dma_buf, hdsp->pci);
}
-static int __devinit snd_hdsp_initialize_memory(struct hdsp *hdsp)
+static int snd_hdsp_initialize_memory(struct hdsp *hdsp)
{
unsigned long pb_bus, cb_bus;
@@ -5100,8 +5100,18 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
if (hdsp_check_for_iobox (hdsp))
return -EIO;
- if (copy_from_user(hdsp->firmware_cache, firmware_data, sizeof(hdsp->firmware_cache)) != 0)
+ if (!hdsp->fw_uploaded) {
+ hdsp->fw_uploaded = vmalloc(HDSP_FIRMWARE_SIZE);
+ if (!hdsp->fw_uploaded)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(hdsp->fw_uploaded, firmware_data,
+ HDSP_FIRMWARE_SIZE)) {
+ vfree(hdsp->fw_uploaded);
+ hdsp->fw_uploaded = NULL;
return -EFAULT;
+ }
hdsp->state |= HDSP_FirmwareCached;
@@ -5330,7 +5340,6 @@ static int snd_hdsp_create_alsa_devices(struct snd_card *card, struct hdsp *hdsp
return 0;
}
-#ifdef HDSP_FW_LOADER
/* load firmware via hotplug fw loader */
static int hdsp_request_fw_loader(struct hdsp *hdsp)
{
@@ -5373,16 +5382,13 @@ static int hdsp_request_fw_loader(struct hdsp *hdsp)
snd_printk(KERN_ERR "Hammerfall-DSP: cannot load firmware %s\n", fwfile);
return -ENOENT;
}
- if (fw->size < sizeof(hdsp->firmware_cache)) {
+ if (fw->size < HDSP_FIRMWARE_SIZE) {
snd_printk(KERN_ERR "Hammerfall-DSP: too short firmware size %d (expected %d)\n",
- (int)fw->size, (int)sizeof(hdsp->firmware_cache));
- release_firmware(fw);
+ (int)fw->size, HDSP_FIRMWARE_SIZE);
return -EINVAL;
}
- memcpy(hdsp->firmware_cache, fw->data, sizeof(hdsp->firmware_cache));
-
- release_firmware(fw);
+ hdsp->firmware = fw;
hdsp->state |= HDSP_FirmwareCached;
@@ -5406,10 +5412,9 @@ static int hdsp_request_fw_loader(struct hdsp *hdsp)
}
return 0;
}
-#endif
-static int __devinit snd_hdsp_create(struct snd_card *card,
- struct hdsp *hdsp)
+static int snd_hdsp_create(struct snd_card *card,
+ struct hdsp *hdsp)
{
struct pci_dev *pci = hdsp->pci;
int err;
@@ -5504,7 +5509,6 @@ static int __devinit snd_hdsp_create(struct snd_card *card,
return err;
if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
-#ifdef HDSP_FW_LOADER
if ((err = hdsp_request_fw_loader(hdsp)) < 0)
/* we don't fail as this can happen
if userspace is not ready for
@@ -5514,7 +5518,6 @@ static int __devinit snd_hdsp_create(struct snd_card *card,
else
/* init is complete, we return */
return 0;
-#endif
/* we defer initialization */
snd_printk(KERN_INFO "Hammerfall-DSP: card initialization pending : waiting for firmware\n");
if ((err = snd_hdsp_create_hwdep(card, hdsp)) < 0)
@@ -5568,6 +5571,10 @@ static int snd_hdsp_free(struct hdsp *hdsp)
snd_hdsp_free_buffers(hdsp);
+ if (hdsp->firmware)
+ release_firmware(hdsp->firmware);
+ vfree(hdsp->fw_uploaded);
+
if (hdsp->iobase)
iounmap(hdsp->iobase);
@@ -5586,8 +5593,8 @@ static void snd_hdsp_card_free(struct snd_card *card)
snd_hdsp_free(hdsp);
}
-static int __devinit snd_hdsp_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_hdsp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct hdsp *hdsp;
@@ -5630,7 +5637,7 @@ static int __devinit snd_hdsp_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_hdsp_remove(struct pci_dev *pci)
+static void snd_hdsp_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -5640,7 +5647,7 @@ static struct pci_driver hdsp_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_hdsp_ids,
.probe = snd_hdsp_probe,
- .remove = __devexit_p(snd_hdsp_remove),
+ .remove = snd_hdsp_remove,
};
module_pci_driver(hdsp_driver);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 748e36c6660..6e02e064d7b 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -962,10 +962,10 @@ static DEFINE_PCI_DEVICE_TABLE(snd_hdspm_ids) = {
MODULE_DEVICE_TABLE(pci, snd_hdspm_ids);
/* prototypes */
-static int __devinit snd_hdspm_create_alsa_devices(struct snd_card *card,
- struct hdspm * hdspm);
-static int __devinit snd_hdspm_create_pcm(struct snd_card *card,
- struct hdspm * hdspm);
+static int snd_hdspm_create_alsa_devices(struct snd_card *card,
+ struct hdspm *hdspm);
+static int snd_hdspm_create_pcm(struct snd_card *card,
+ struct hdspm *hdspm);
static inline void snd_hdspm_initialize_midi_flush(struct hdspm *hdspm);
static int hdspm_update_simple_mixer_controls(struct hdspm *hdspm);
@@ -1845,8 +1845,8 @@ static struct snd_rawmidi_ops snd_hdspm_midi_input =
.trigger = snd_hdspm_midi_input_trigger,
};
-static int __devinit snd_hdspm_create_midi (struct snd_card *card,
- struct hdspm *hdspm, int id)
+static int snd_hdspm_create_midi(struct snd_card *card,
+ struct hdspm *hdspm, int id)
{
int err;
char buf[32];
@@ -2887,330 +2887,50 @@ static int snd_hdspm_get_autosync_ref(struct snd_kcontrol *kcontrol,
return 0;
}
-
-#define HDSPM_LINE_OUT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_line_out, \
- .get = snd_hdspm_get_line_out, \
- .put = snd_hdspm_put_line_out \
-}
-
-static int hdspm_line_out(struct hdspm * hdspm)
-{
- return (hdspm->control_register & HDSPM_LineOut) ? 1 : 0;
-}
-
-
-static int hdspm_set_line_output(struct hdspm * hdspm, int out)
-{
- if (out)
- hdspm->control_register |= HDSPM_LineOut;
- else
- hdspm->control_register &= ~HDSPM_LineOut;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
-
- return 0;
-}
-
-#define snd_hdspm_info_line_out snd_ctl_boolean_mono_info
-
-static int snd_hdspm_get_line_out(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
-
- spin_lock_irq(&hdspm->lock);
- ucontrol->value.integer.value[0] = hdspm_line_out(hdspm);
- spin_unlock_irq(&hdspm->lock);
- return 0;
-}
-
-static int snd_hdspm_put_line_out(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- int change;
- unsigned int val;
-
- if (!snd_hdspm_use_is_exclusive(hdspm))
- return -EBUSY;
- val = ucontrol->value.integer.value[0] & 1;
- spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_line_out(hdspm);
- hdspm_set_line_output(hdspm, val);
- spin_unlock_irq(&hdspm->lock);
- return change;
-}
-
-
-#define HDSPM_TX_64(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_tx_64, \
- .get = snd_hdspm_get_tx_64, \
- .put = snd_hdspm_put_tx_64 \
-}
-
-static int hdspm_tx_64(struct hdspm * hdspm)
-{
- return (hdspm->control_register & HDSPM_TX_64ch) ? 1 : 0;
-}
-
-static int hdspm_set_tx_64(struct hdspm * hdspm, int out)
-{
- if (out)
- hdspm->control_register |= HDSPM_TX_64ch;
- else
- hdspm->control_register &= ~HDSPM_TX_64ch;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
-
- return 0;
-}
-
-#define snd_hdspm_info_tx_64 snd_ctl_boolean_mono_info
-
-static int snd_hdspm_get_tx_64(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
-
- spin_lock_irq(&hdspm->lock);
- ucontrol->value.integer.value[0] = hdspm_tx_64(hdspm);
- spin_unlock_irq(&hdspm->lock);
- return 0;
-}
-
-static int snd_hdspm_put_tx_64(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- int change;
- unsigned int val;
-
- if (!snd_hdspm_use_is_exclusive(hdspm))
- return -EBUSY;
- val = ucontrol->value.integer.value[0] & 1;
- spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_tx_64(hdspm);
- hdspm_set_tx_64(hdspm, val);
- spin_unlock_irq(&hdspm->lock);
- return change;
-}
-
-
-#define HDSPM_C_TMS(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_c_tms, \
- .get = snd_hdspm_get_c_tms, \
- .put = snd_hdspm_put_c_tms \
-}
-
-static int hdspm_c_tms(struct hdspm * hdspm)
-{
- return (hdspm->control_register & HDSPM_clr_tms) ? 1 : 0;
-}
-
-static int hdspm_set_c_tms(struct hdspm * hdspm, int out)
-{
- if (out)
- hdspm->control_register |= HDSPM_clr_tms;
- else
- hdspm->control_register &= ~HDSPM_clr_tms;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
-
- return 0;
-}
-
-#define snd_hdspm_info_c_tms snd_ctl_boolean_mono_info
-
-static int snd_hdspm_get_c_tms(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
-
- spin_lock_irq(&hdspm->lock);
- ucontrol->value.integer.value[0] = hdspm_c_tms(hdspm);
- spin_unlock_irq(&hdspm->lock);
- return 0;
-}
-
-static int snd_hdspm_put_c_tms(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- int change;
- unsigned int val;
-
- if (!snd_hdspm_use_is_exclusive(hdspm))
- return -EBUSY;
- val = ucontrol->value.integer.value[0] & 1;
- spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_c_tms(hdspm);
- hdspm_set_c_tms(hdspm, val);
- spin_unlock_irq(&hdspm->lock);
- return change;
-}
-
-
-#define HDSPM_SAFE_MODE(xname, xindex) \
+#define HDSPM_TOGGLE_SETTING(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_safe_mode, \
- .get = snd_hdspm_get_safe_mode, \
- .put = snd_hdspm_put_safe_mode \
+ .private_value = xindex, \
+ .info = snd_hdspm_info_toggle_setting, \
+ .get = snd_hdspm_get_toggle_setting, \
+ .put = snd_hdspm_put_toggle_setting \
}
-static int hdspm_safe_mode(struct hdspm * hdspm)
+static int hdspm_toggle_setting(struct hdspm *hdspm, u32 regmask)
{
- return (hdspm->control_register & HDSPM_AutoInp) ? 1 : 0;
+ return (hdspm->control_register & regmask) ? 1 : 0;
}
-static int hdspm_set_safe_mode(struct hdspm * hdspm, int out)
+static int hdspm_set_toggle_setting(struct hdspm *hdspm, u32 regmask, int out)
{
if (out)
- hdspm->control_register |= HDSPM_AutoInp;
- else
- hdspm->control_register &= ~HDSPM_AutoInp;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
-
- return 0;
-}
-
-#define snd_hdspm_info_safe_mode snd_ctl_boolean_mono_info
-
-static int snd_hdspm_get_safe_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
-
- spin_lock_irq(&hdspm->lock);
- ucontrol->value.integer.value[0] = hdspm_safe_mode(hdspm);
- spin_unlock_irq(&hdspm->lock);
- return 0;
-}
-
-static int snd_hdspm_put_safe_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- int change;
- unsigned int val;
-
- if (!snd_hdspm_use_is_exclusive(hdspm))
- return -EBUSY;
- val = ucontrol->value.integer.value[0] & 1;
- spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_safe_mode(hdspm);
- hdspm_set_safe_mode(hdspm, val);
- spin_unlock_irq(&hdspm->lock);
- return change;
-}
-
-
-#define HDSPM_EMPHASIS(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_emphasis, \
- .get = snd_hdspm_get_emphasis, \
- .put = snd_hdspm_put_emphasis \
-}
-
-static int hdspm_emphasis(struct hdspm * hdspm)
-{
- return (hdspm->control_register & HDSPM_Emphasis) ? 1 : 0;
-}
-
-static int hdspm_set_emphasis(struct hdspm * hdspm, int emp)
-{
- if (emp)
- hdspm->control_register |= HDSPM_Emphasis;
+ hdspm->control_register |= regmask;
else
- hdspm->control_register &= ~HDSPM_Emphasis;
+ hdspm->control_register &= ~regmask;
hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
return 0;
}
-#define snd_hdspm_info_emphasis snd_ctl_boolean_mono_info
-
-static int snd_hdspm_get_emphasis(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+#define snd_hdspm_info_toggle_setting snd_ctl_boolean_mono_info
- spin_lock_irq(&hdspm->lock);
- ucontrol->value.enumerated.item[0] = hdspm_emphasis(hdspm);
- spin_unlock_irq(&hdspm->lock);
- return 0;
-}
-
-static int snd_hdspm_put_emphasis(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- int change;
- unsigned int val;
-
- if (!snd_hdspm_use_is_exclusive(hdspm))
- return -EBUSY;
- val = ucontrol->value.integer.value[0] & 1;
- spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_emphasis(hdspm);
- hdspm_set_emphasis(hdspm, val);
- spin_unlock_irq(&hdspm->lock);
- return change;
-}
-
-
-#define HDSPM_DOLBY(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_dolby, \
- .get = snd_hdspm_get_dolby, \
- .put = snd_hdspm_put_dolby \
-}
-
-static int hdspm_dolby(struct hdspm * hdspm)
-{
- return (hdspm->control_register & HDSPM_Dolby) ? 1 : 0;
-}
-
-static int hdspm_set_dolby(struct hdspm * hdspm, int dol)
-{
- if (dol)
- hdspm->control_register |= HDSPM_Dolby;
- else
- hdspm->control_register &= ~HDSPM_Dolby;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
-
- return 0;
-}
-
-#define snd_hdspm_info_dolby snd_ctl_boolean_mono_info
-
-static int snd_hdspm_get_dolby(struct snd_kcontrol *kcontrol,
+static int snd_hdspm_get_toggle_setting(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ u32 regmask = kcontrol->private_value;
spin_lock_irq(&hdspm->lock);
- ucontrol->value.enumerated.item[0] = hdspm_dolby(hdspm);
+ ucontrol->value.integer.value[0] = hdspm_toggle_setting(hdspm, regmask);
spin_unlock_irq(&hdspm->lock);
return 0;
}
-static int snd_hdspm_put_dolby(struct snd_kcontrol *kcontrol,
+static int snd_hdspm_put_toggle_setting(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ u32 regmask = kcontrol->private_value;
int change;
unsigned int val;
@@ -3218,64 +2938,8 @@ static int snd_hdspm_put_dolby(struct snd_kcontrol *kcontrol,
return -EBUSY;
val = ucontrol->value.integer.value[0] & 1;
spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_dolby(hdspm);
- hdspm_set_dolby(hdspm, val);
- spin_unlock_irq(&hdspm->lock);
- return change;
-}
-
-
-#define HDSPM_PROFESSIONAL(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = xindex, \
- .info = snd_hdspm_info_professional, \
- .get = snd_hdspm_get_professional, \
- .put = snd_hdspm_put_professional \
-}
-
-static int hdspm_professional(struct hdspm * hdspm)
-{
- return (hdspm->control_register & HDSPM_Professional) ? 1 : 0;
-}
-
-static int hdspm_set_professional(struct hdspm * hdspm, int dol)
-{
- if (dol)
- hdspm->control_register |= HDSPM_Professional;
- else
- hdspm->control_register &= ~HDSPM_Professional;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
-
- return 0;
-}
-
-#define snd_hdspm_info_professional snd_ctl_boolean_mono_info
-
-static int snd_hdspm_get_professional(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
-
- spin_lock_irq(&hdspm->lock);
- ucontrol->value.enumerated.item[0] = hdspm_professional(hdspm);
- spin_unlock_irq(&hdspm->lock);
- return 0;
-}
-
-static int snd_hdspm_put_professional(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- int change;
- unsigned int val;
-
- if (!snd_hdspm_use_is_exclusive(hdspm))
- return -EBUSY;
- val = ucontrol->value.integer.value[0] & 1;
- spin_lock_irq(&hdspm->lock);
- change = (int) val != hdspm_professional(hdspm);
- hdspm_set_professional(hdspm, val);
+ change = (int) val != hdspm_toggle_setting(hdspm, regmask);
+ hdspm_set_toggle_setting(hdspm, regmask, val);
spin_unlock_irq(&hdspm->lock);
return change;
}
@@ -4476,10 +4140,10 @@ static struct snd_kcontrol_new snd_hdspm_controls_madi[] = {
HDSPM_SYNC_CHECK("MADI SyncCheck", 1),
HDSPM_SYNC_CHECK("TCO SyncCheck", 2),
HDSPM_SYNC_CHECK("SYNC IN SyncCheck", 3),
- HDSPM_LINE_OUT("Line Out", 0),
- HDSPM_TX_64("TX 64 channels mode", 0),
- HDSPM_C_TMS("Clear Track Marker", 0),
- HDSPM_SAFE_MODE("Safe Mode", 0),
+ HDSPM_TOGGLE_SETTING("Line Out", HDSPM_LineOut),
+ HDSPM_TOGGLE_SETTING("TX 64 channels mode", HDSPM_TX_64ch),
+ HDSPM_TOGGLE_SETTING("Clear Track Marker", HDSPM_clr_tms),
+ HDSPM_TOGGLE_SETTING("Safe Mode", HDSPM_AutoInp),
HDSPM_INPUT_SELECT("Input Select", 0),
HDSPM_MADI_SPEEDMODE("MADI Speed Mode", 0)
};
@@ -4492,9 +4156,9 @@ static struct snd_kcontrol_new snd_hdspm_controls_madiface[] = {
HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
HDSPM_SYNC_CHECK("MADI SyncCheck", 0),
- HDSPM_TX_64("TX 64 channels mode", 0),
- HDSPM_C_TMS("Clear Track Marker", 0),
- HDSPM_SAFE_MODE("Safe Mode", 0),
+ HDSPM_TOGGLE_SETTING("TX 64 channels mode", HDSPM_TX_64ch),
+ HDSPM_TOGGLE_SETTING("Clear Track Marker", HDSPM_clr_tms),
+ HDSPM_TOGGLE_SETTING("Safe Mode", HDSPM_AutoInp),
HDSPM_MADI_SPEEDMODE("MADI Speed Mode", 0)
};
@@ -4587,11 +4251,11 @@ static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
HDSPM_AUTOSYNC_SAMPLE_RATE("AES8 Frequency", 8),
HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 9),
HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 10),
- HDSPM_LINE_OUT("Line Out", 0),
- HDSPM_EMPHASIS("Emphasis", 0),
- HDSPM_DOLBY("Non Audio", 0),
- HDSPM_PROFESSIONAL("Professional", 0),
- HDSPM_C_TMS("Clear Track Marker", 0),
+ HDSPM_TOGGLE_SETTING("Line Out", HDSPM_LineOut),
+ HDSPM_TOGGLE_SETTING("Emphasis", HDSPM_Emphasis),
+ HDSPM_TOGGLE_SETTING("Non Audio", HDSPM_Dolby),
+ HDSPM_TOGGLE_SETTING("Professional", HDSPM_Professional),
+ HDSPM_TOGGLE_SETTING("Clear Track Marker", HDSPM_clr_tms),
HDSPM_DS_WIRE("Double Speed Wire Mode", 0),
HDSPM_QS_WIRE("Quad Speed Wire Mode", 0),
};
@@ -5233,7 +4897,7 @@ static void snd_hdspm_proc_ports_out(struct snd_info_entry *entry,
}
-static void __devinit snd_hdspm_proc_init(struct hdspm *hdspm)
+static void snd_hdspm_proc_init(struct hdspm *hdspm)
{
struct snd_info_entry *entry;
@@ -6266,7 +5930,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
info.system_clock_mode = hdspm_system_clock_mode(hdspm);
info.clock_source = hdspm_clock_source(hdspm);
info.autosync_ref = hdspm_autosync_ref(hdspm);
- info.line_out = hdspm_line_out(hdspm);
+ info.line_out = hdspm_toggle_setting(hdspm, HDSPM_LineOut);
info.passthru = 0;
spin_unlock_irq(&hdspm->lock);
if (copy_to_user(argp, &info, sizeof(info)))
@@ -6369,8 +6033,8 @@ static struct snd_pcm_ops snd_hdspm_capture_ops = {
.page = snd_pcm_sgbuf_ops_page,
};
-static int __devinit snd_hdspm_create_hwdep(struct snd_card *card,
- struct hdspm * hdspm)
+static int snd_hdspm_create_hwdep(struct snd_card *card,
+ struct hdspm *hdspm)
{
struct snd_hwdep *hw;
int err;
@@ -6395,7 +6059,7 @@ static int __devinit snd_hdspm_create_hwdep(struct snd_card *card,
/*------------------------------------------------------------
memory interface
------------------------------------------------------------*/
-static int __devinit snd_hdspm_preallocate_memory(struct hdspm *hdspm)
+static int snd_hdspm_preallocate_memory(struct hdspm *hdspm)
{
int err;
struct snd_pcm *pcm;
@@ -6436,8 +6100,8 @@ static void hdspm_set_sgbuf(struct hdspm *hdspm,
/* ------------- ALSA Devices ---------------------------- */
-static int __devinit snd_hdspm_create_pcm(struct snd_card *card,
- struct hdspm *hdspm)
+static int snd_hdspm_create_pcm(struct snd_card *card,
+ struct hdspm *hdspm)
{
struct snd_pcm *pcm;
int err;
@@ -6472,8 +6136,8 @@ static inline void snd_hdspm_initialize_midi_flush(struct hdspm * hdspm)
snd_hdspm_flush_midi_input(hdspm, i);
}
-static int __devinit snd_hdspm_create_alsa_devices(struct snd_card *card,
- struct hdspm * hdspm)
+static int snd_hdspm_create_alsa_devices(struct snd_card *card,
+ struct hdspm *hdspm)
{
int err, i;
@@ -6531,8 +6195,9 @@ static int __devinit snd_hdspm_create_alsa_devices(struct snd_card *card,
return 0;
}
-static int __devinit snd_hdspm_create(struct snd_card *card,
- struct hdspm *hdspm) {
+static int snd_hdspm_create(struct snd_card *card,
+ struct hdspm *hdspm)
+{
struct pci_dev *pci = hdspm->pci;
int err;
@@ -6905,8 +6570,8 @@ static void snd_hdspm_card_free(struct snd_card *card)
}
-static int __devinit snd_hdspm_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_hdspm_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct hdspm *hdspm;
@@ -6964,7 +6629,7 @@ static int __devinit snd_hdspm_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_hdspm_remove(struct pci_dev *pci)
+static void snd_hdspm_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -6974,7 +6639,7 @@ static struct pci_driver hdspm_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_hdspm_ids,
.probe = snd_hdspm_probe,
- .remove = __devexit_p(snd_hdspm_remove),
+ .remove = snd_hdspm_remove,
};
module_pci_driver(hdspm_driver);
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index a15fc100ab0..773a67fff4c 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -1757,7 +1757,7 @@ snd_rme9652_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buff
snd_iprintf(buffer, "\n");
}
-static void __devinit snd_rme9652_proc_init(struct snd_rme9652 *rme9652)
+static void snd_rme9652_proc_init(struct snd_rme9652 *rme9652)
{
struct snd_info_entry *entry;
@@ -1788,7 +1788,7 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
return 0;
}
-static int __devinit snd_rme9652_initialize_memory(struct snd_rme9652 *rme9652)
+static int snd_rme9652_initialize_memory(struct snd_rme9652 *rme9652)
{
unsigned long pb_bus, cb_bus;
@@ -2414,8 +2414,8 @@ static struct snd_pcm_ops snd_rme9652_capture_ops = {
.copy = snd_rme9652_capture_copy,
};
-static int __devinit snd_rme9652_create_pcm(struct snd_card *card,
- struct snd_rme9652 *rme9652)
+static int snd_rme9652_create_pcm(struct snd_card *card,
+ struct snd_rme9652 *rme9652)
{
struct snd_pcm *pcm;
int err;
@@ -2438,9 +2438,9 @@ static int __devinit snd_rme9652_create_pcm(struct snd_card *card,
return 0;
}
-static int __devinit snd_rme9652_create(struct snd_card *card,
- struct snd_rme9652 *rme9652,
- int precise_ptr)
+static int snd_rme9652_create(struct snd_card *card,
+ struct snd_rme9652 *rme9652,
+ int precise_ptr)
{
struct pci_dev *pci = rme9652->pci;
int err;
@@ -2578,8 +2578,8 @@ static void snd_rme9652_card_free(struct snd_card *card)
snd_rme9652_free(rme9652);
}
-static int __devinit snd_rme9652_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_rme9652_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_rme9652 *rme9652;
@@ -2625,7 +2625,7 @@ static int __devinit snd_rme9652_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_rme9652_remove(struct pci_dev *pci)
+static void snd_rme9652_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2635,7 +2635,7 @@ static struct pci_driver rme9652_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_rme9652_ids,
.probe = snd_rme9652_probe,
- .remove = __devexit_p(snd_rme9652_remove),
+ .remove = snd_rme9652_remove,
};
module_pci_driver(rme9652_driver);
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 51e43407ebc..d59abe1682c 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -894,7 +894,7 @@ static struct snd_pcm_ops sis_capture_ops = {
.pointer = sis_pcm_pointer,
};
-static int __devinit sis_pcm_create(struct sis7019 *sis)
+static int sis_pcm_create(struct sis7019 *sis)
{
struct snd_pcm *pcm;
int rc;
@@ -1013,7 +1013,7 @@ static unsigned short sis_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
(reg << 8) | cmd[ac97->num]);
}
-static int __devinit sis_mixer_create(struct sis7019 *sis)
+static int sis_mixer_create(struct sis7019 *sis)
{
struct snd_ac97_bus *bus;
struct snd_ac97_template ac97;
@@ -1171,7 +1171,7 @@ static int sis_chip_init(struct sis7019 *sis)
outl(SIS_DMA_CSR_PCI_SETTINGS, io + SIS_DMA_CSR);
/* Reset the synchronization groups for all of the channels
- * to be asyncronous. If we start doing SPDIF or 5.1 sound, etc.
+ * to be asynchronous. If we start doing SPDIF or 5.1 sound, etc.
* we'll need to change how we handle these. Until then, we just
* assign sub-mixer 0 to all playback channels, and avoid any
* attenuation on the audio.
@@ -1326,8 +1326,8 @@ static int sis_alloc_suspend(struct sis7019 *sis)
return 0;
}
-static int __devinit sis_chip_create(struct snd_card *card,
- struct pci_dev *pci)
+static int sis_chip_create(struct snd_card *card,
+ struct pci_dev *pci)
{
struct sis7019 *sis = card->private_data;
struct voice *voice;
@@ -1417,8 +1417,8 @@ error_out:
return rc;
}
-static int __devinit snd_sis7019_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_sis7019_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct sis7019 *sis;
@@ -1478,7 +1478,7 @@ error_out:
return rc;
}
-static void __devexit snd_sis7019_remove(struct pci_dev *pci)
+static void snd_sis7019_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1488,7 +1488,7 @@ static struct pci_driver sis7019_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_sis7019_ids,
.probe = snd_sis7019_probe,
- .remove = __devexit_p(snd_sis7019_remove),
+ .remove = snd_sis7019_remove,
.driver = {
.pm = SIS_PM_OPS,
},
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index baa9946bedf..a2e7686e7ae 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -877,7 +877,8 @@ static struct snd_pcm_ops snd_sonicvibes_capture_ops = {
.pointer = snd_sonicvibes_capture_pointer,
};
-static int __devinit snd_sonicvibes_pcm(struct sonicvibes * sonic, int device, struct snd_pcm ** rpcm)
+static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1087,7 +1088,7 @@ static int snd_sonicvibes_put_double(struct snd_kcontrol *kcontrol, struct snd_c
return change;
}
-static struct snd_kcontrol_new snd_sonicvibes_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_sonicvibes_controls[] = {
SONICVIBES_DOUBLE("Capture Volume", 0, SV_IREG_LEFT_ADC, SV_IREG_RIGHT_ADC, 0, 0, 15, 0),
SONICVIBES_DOUBLE("Aux Playback Switch", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 7, 7, 1, 1),
SONICVIBES_DOUBLE("Aux Playback Volume", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 0, 0, 31, 1),
@@ -1118,7 +1119,7 @@ static void snd_sonicvibes_master_free(struct snd_kcontrol *kcontrol)
sonic->master_volume = NULL;
}
-static int __devinit snd_sonicvibes_mixer(struct sonicvibes * sonic)
+static int snd_sonicvibes_mixer(struct sonicvibes *sonic)
{
struct snd_card *card;
struct snd_kcontrol *kctl;
@@ -1175,7 +1176,7 @@ static void snd_sonicvibes_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "MIDI to ext. Tx : %s\n", tmp & 0x04 ? "on" : "off");
}
-static void __devinit snd_sonicvibes_proc_init(struct sonicvibes * sonic)
+static void snd_sonicvibes_proc_init(struct sonicvibes *sonic)
{
struct snd_info_entry *entry;
@@ -1188,10 +1189,10 @@ static void __devinit snd_sonicvibes_proc_init(struct sonicvibes * sonic)
*/
#ifdef SUPPORT_JOYSTICK
-static struct snd_kcontrol_new snd_sonicvibes_game_control __devinitdata =
+static struct snd_kcontrol_new snd_sonicvibes_game_control =
SONICVIBES_SINGLE("Joystick Speed", 0, SV_IREG_GAME_PORT, 1, 15, 0);
-static int __devinit snd_sonicvibes_create_gameport(struct sonicvibes *sonic)
+static int snd_sonicvibes_create_gameport(struct sonicvibes *sonic)
{
struct gameport *gp;
@@ -1246,11 +1247,11 @@ static int snd_sonicvibes_dev_free(struct snd_device *device)
return snd_sonicvibes_free(sonic);
}
-static int __devinit snd_sonicvibes_create(struct snd_card *card,
- struct pci_dev *pci,
- int reverb,
- int mge,
- struct sonicvibes ** rsonic)
+static int snd_sonicvibes_create(struct snd_card *card,
+ struct pci_dev *pci,
+ int reverb,
+ int mge,
+ struct sonicvibes **rsonic)
{
struct sonicvibes *sonic;
unsigned int dmaa, dmac;
@@ -1401,7 +1402,7 @@ static int __devinit snd_sonicvibes_create(struct snd_card *card,
* MIDI section
*/
-static struct snd_kcontrol_new snd_sonicvibes_midi_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_sonicvibes_midi_controls[] = {
SONICVIBES_SINGLE("SonicVibes Wave Source RAM", 0, SV_IREG_WAVE_SOURCE, 0, 1, 0),
SONICVIBES_SINGLE("SonicVibes Wave Source RAM+ROM", 0, SV_IREG_WAVE_SOURCE, 1, 1, 0),
SONICVIBES_SINGLE("SonicVibes Onboard Synth", 0, SV_IREG_MPU401, 0, 1, 0),
@@ -1422,8 +1423,8 @@ static void snd_sonicvibes_midi_input_close(struct snd_mpu401 * mpu)
outb(sonic->irqmask |= SV_MIDI_MASK, SV_REG(sonic, IRQMASK));
}
-static int __devinit snd_sonicvibes_midi(struct sonicvibes * sonic,
- struct snd_rawmidi *rmidi)
+static int snd_sonicvibes_midi(struct sonicvibes *sonic,
+ struct snd_rawmidi *rmidi)
{
struct snd_mpu401 * mpu = rmidi->private_data;
struct snd_card *card = sonic->card;
@@ -1441,8 +1442,8 @@ static int __devinit snd_sonicvibes_midi(struct sonicvibes * sonic,
return 0;
}
-static int __devinit snd_sonic_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_sonic_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1524,7 +1525,7 @@ static int __devinit snd_sonic_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_sonic_remove(struct pci_dev *pci)
+static void snd_sonic_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1534,7 +1535,7 @@ static struct pci_driver sonicvibes_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_sonic_ids,
.probe = snd_sonic_probe,
- .remove = __devexit_p(snd_sonic_remove),
+ .remove = snd_sonic_remove,
};
module_pci_driver(sonicvibes_driver);
diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
index 8a6f1f76e87..1aefd6204a6 100644
--- a/sound/pci/trident/trident.c
+++ b/sound/pci/trident/trident.c
@@ -73,8 +73,8 @@ static DEFINE_PCI_DEVICE_TABLE(snd_trident_ids) = {
MODULE_DEVICE_TABLE(pci, snd_trident_ids);
-static int __devinit snd_trident_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_trident_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -166,7 +166,7 @@ static int __devinit snd_trident_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_trident_remove(struct pci_dev *pci)
+static void snd_trident_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -176,7 +176,7 @@ static struct pci_driver trident_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_trident_ids,
.probe = snd_trident_probe,
- .remove = __devexit_p(snd_trident_remove),
+ .remove = snd_trident_remove,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &snd_trident_pm,
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 06b10d1a76e..fb0e1586a6f 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -2171,8 +2171,8 @@ static struct snd_pcm_ops snd_trident_spdif_7018_ops = {
---------------------------------------------------------------------------*/
-int __devinit snd_trident_pcm(struct snd_trident * trident,
- int device, struct snd_pcm ** rpcm)
+int snd_trident_pcm(struct snd_trident *trident,
+ int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -2229,8 +2229,8 @@ int __devinit snd_trident_pcm(struct snd_trident * trident,
---------------------------------------------------------------------------*/
-int __devinit snd_trident_foldback_pcm(struct snd_trident * trident,
- int device, struct snd_pcm ** rpcm)
+int snd_trident_foldback_pcm(struct snd_trident *trident,
+ int device, struct snd_pcm **rpcm)
{
struct snd_pcm *foldback;
int err;
@@ -2286,8 +2286,8 @@ int __devinit snd_trident_foldback_pcm(struct snd_trident * trident,
---------------------------------------------------------------------------*/
-int __devinit snd_trident_spdif_pcm(struct snd_trident * trident,
- int device, struct snd_pcm ** rpcm)
+int snd_trident_spdif_pcm(struct snd_trident *trident,
+ int device, struct snd_pcm **rpcm)
{
struct snd_pcm *spdif;
int err;
@@ -2371,7 +2371,7 @@ static int snd_trident_spdif_control_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_spdif_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_spdif_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH),
@@ -2434,7 +2434,7 @@ static int snd_trident_spdif_default_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_spdif_default __devinitdata =
+static struct snd_kcontrol_new snd_trident_spdif_default =
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
@@ -2467,7 +2467,7 @@ static int snd_trident_spdif_mask_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_trident_spdif_mask __devinitdata =
+static struct snd_kcontrol_new snd_trident_spdif_mask =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -2529,7 +2529,7 @@ static int snd_trident_spdif_stream_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_spdif_stream __devinitdata =
+static struct snd_kcontrol_new snd_trident_spdif_stream =
{
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -2579,7 +2579,7 @@ static int snd_trident_ac97_control_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_ac97_rear_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_ac97_rear_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Rear Path",
@@ -2637,7 +2637,7 @@ static int snd_trident_vol_control_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_vol_music_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_vol_music_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Music Playback Volume",
@@ -2648,7 +2648,7 @@ static struct snd_kcontrol_new snd_trident_vol_music_control __devinitdata =
.tlv = { .p = db_scale_gvol },
};
-static struct snd_kcontrol_new snd_trident_vol_wave_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_vol_wave_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Wave Playback Volume",
@@ -2715,7 +2715,7 @@ static int snd_trident_pcm_vol_control_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_pcm_vol_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_pcm_vol_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Front Playback Volume",
@@ -2779,7 +2779,7 @@ static int snd_trident_pcm_pan_control_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_pcm_pan_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_pcm_pan_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Pan Playback Control",
@@ -2836,7 +2836,7 @@ static int snd_trident_pcm_rvol_control_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(db_scale_crvol, -3175, 25, 1);
-static struct snd_kcontrol_new snd_trident_pcm_rvol_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_pcm_rvol_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Reverb Playback Volume",
@@ -2892,7 +2892,7 @@ static int snd_trident_pcm_cvol_control_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_trident_pcm_cvol_control __devinitdata =
+static struct snd_kcontrol_new snd_trident_pcm_cvol_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Chorus Playback Volume",
@@ -2972,7 +2972,7 @@ static int snd_trident_pcm_mixer_free(struct snd_trident *trident, struct snd_tr
---------------------------------------------------------------------------*/
-static int __devinit snd_trident_mixer(struct snd_trident * trident, int pcm_spdif_device)
+static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
{
struct snd_ac97_template _ac97;
struct snd_card *card = trident->card;
@@ -3191,7 +3191,7 @@ static int snd_trident_gameport_open(struct gameport *gameport, int mode)
}
}
-int __devinit snd_trident_create_gameport(struct snd_trident *chip)
+int snd_trident_create_gameport(struct snd_trident *chip)
{
struct gameport *gp;
@@ -3225,7 +3225,7 @@ static inline void snd_trident_free_gameport(struct snd_trident *chip)
}
}
#else
-int __devinit snd_trident_create_gameport(struct snd_trident *chip) { return -ENOSYS; }
+int snd_trident_create_gameport(struct snd_trident *chip) { return -ENOSYS; }
static inline void snd_trident_free_gameport(struct snd_trident *chip) { }
#endif /* CONFIG_GAMEPORT */
@@ -3329,7 +3329,7 @@ static void snd_trident_proc_read(struct snd_info_entry *entry,
}
}
-static void __devinit snd_trident_proc_init(struct snd_trident * trident)
+static void snd_trident_proc_init(struct snd_trident *trident)
{
struct snd_info_entry *entry;
const char *s = "trident";
@@ -3358,7 +3358,7 @@ static int snd_trident_dev_free(struct snd_device *device)
---------------------------------------------------------------------------*/
-static int __devinit snd_trident_tlb_alloc(struct snd_trident *trident)
+static int snd_trident_tlb_alloc(struct snd_trident *trident)
{
int i;
@@ -3539,7 +3539,7 @@ static int snd_trident_sis_init(struct snd_trident *trident)
---------------------------------------------------------------------------*/
-int __devinit snd_trident_create(struct snd_card *card,
+int snd_trident_create(struct snd_card *card,
struct pci_dev *pci,
int pcm_streams,
int pcm_spdif_device,
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index f0b4efdb483..6442f611a07 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -1437,7 +1437,7 @@ static void init_viadev(struct via82xx *chip, int idx, unsigned int reg_offset,
/*
* create pcm instances for VIA8233, 8233C and 8235 (not 8233A)
*/
-static int __devinit snd_via8233_pcm_new(struct via82xx *chip)
+static int snd_via8233_pcm_new(struct via82xx *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_chmap *chmap;
@@ -1505,7 +1505,7 @@ static int __devinit snd_via8233_pcm_new(struct via82xx *chip)
/*
* create pcm instances for VIA8233A
*/
-static int __devinit snd_via8233a_pcm_new(struct via82xx *chip)
+static int snd_via8233a_pcm_new(struct via82xx *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_chmap *chmap;
@@ -1566,7 +1566,7 @@ static int __devinit snd_via8233a_pcm_new(struct via82xx *chip)
/*
* create a pcm instance for via686a/b
*/
-static int __devinit snd_via686_pcm_new(struct via82xx *chip)
+static int snd_via686_pcm_new(struct via82xx *chip)
{
struct snd_pcm *pcm;
int err;
@@ -1643,7 +1643,7 @@ static int snd_via8233_capture_source_put(struct snd_kcontrol *kcontrol,
return val != oval;
}
-static struct snd_kcontrol_new snd_via8233_capture_source __devinitdata = {
+static struct snd_kcontrol_new snd_via8233_capture_source = {
.name = "Input Source Select",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = snd_via8233_capture_source_info,
@@ -1683,7 +1683,7 @@ static int snd_via8233_dxs3_spdif_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_via8233_dxs3_spdif_control __devinitdata = {
+static struct snd_kcontrol_new snd_via8233_dxs3_spdif_control = {
.name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH),
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = snd_via8233_dxs3_spdif_info,
@@ -1772,7 +1772,7 @@ static int snd_via8233_pcmdxs_volume_put(struct snd_kcontrol *kcontrol,
static const DECLARE_TLV_DB_SCALE(db_scale_dxs, -4650, 150, 1);
-static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control __devinitdata = {
+static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control = {
.name = "PCM Playback Volume",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -1783,7 +1783,7 @@ static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control __devinitdata =
.tlv = { .p = db_scale_dxs }
};
-static struct snd_kcontrol_new snd_via8233_dxs_volume_control __devinitdata = {
+static struct snd_kcontrol_new snd_via8233_dxs_volume_control = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.device = 0,
/* .subdevice set later */
@@ -1895,7 +1895,7 @@ static struct ac97_quirk ac97_quirks[] = {
{ } /* terminator */
};
-static int __devinit snd_via82xx_mixer_new(struct via82xx *chip, const char *quirk_override)
+static int snd_via82xx_mixer_new(struct via82xx *chip, const char *quirk_override)
{
struct snd_ac97_template ac97;
int err;
@@ -1930,7 +1930,7 @@ static int __devinit snd_via82xx_mixer_new(struct via82xx *chip, const char *qui
#ifdef SUPPORT_JOYSTICK
#define JOYSTICK_ADDR 0x200
-static int __devinit snd_via686_create_gameport(struct via82xx *chip, unsigned char *legacy)
+static int snd_via686_create_gameport(struct via82xx *chip, unsigned char *legacy)
{
struct gameport *gp;
struct resource *r;
@@ -1990,7 +1990,7 @@ static inline void snd_via686_free_gameport(struct via82xx *chip) { }
*
*/
-static int __devinit snd_via8233_init_misc(struct via82xx *chip)
+static int snd_via8233_init_misc(struct via82xx *chip)
{
int i, err, caps;
unsigned char val;
@@ -2047,7 +2047,7 @@ static int __devinit snd_via8233_init_misc(struct via82xx *chip)
return 0;
}
-static int __devinit snd_via686_init_misc(struct via82xx *chip)
+static int snd_via686_init_misc(struct via82xx *chip)
{
unsigned char legacy, legacy_cfg;
int rev_h = 0;
@@ -2137,7 +2137,7 @@ static void snd_via82xx_proc_read(struct snd_info_entry *entry,
}
}
-static void __devinit snd_via82xx_proc_init(struct via82xx *chip)
+static void snd_via82xx_proc_init(struct via82xx *chip)
{
struct snd_info_entry *entry;
@@ -2370,12 +2370,12 @@ static int snd_via82xx_dev_free(struct snd_device *device)
return snd_via82xx_free(chip);
}
-static int __devinit snd_via82xx_create(struct snd_card *card,
- struct pci_dev *pci,
- int chip_type,
- int revision,
- unsigned int ac97_clock,
- struct via82xx ** r_via)
+static int snd_via82xx_create(struct snd_card *card,
+ struct pci_dev *pci,
+ int chip_type,
+ int revision,
+ unsigned int ac97_clock,
+ struct via82xx **r_via)
{
struct via82xx *chip;
int err;
@@ -2452,7 +2452,7 @@ struct via823x_info {
char *name;
int type;
};
-static struct via823x_info via823x_cards[] __devinitdata = {
+static struct via823x_info via823x_cards[] = {
{ VIA_REV_PRE_8233, "VIA 8233-Pre", TYPE_VIA8233 },
{ VIA_REV_8233C, "VIA 8233C", TYPE_VIA8233 },
{ VIA_REV_8233, "VIA 8233", TYPE_VIA8233 },
@@ -2466,7 +2466,7 @@ static struct via823x_info via823x_cards[] __devinitdata = {
* auto detection of DXS channel supports.
*/
-static struct snd_pci_quirk dxs_whitelist[] __devinitdata = {
+static struct snd_pci_quirk dxs_whitelist[] = {
SND_PCI_QUIRK(0x1005, 0x4710, "Avance Logic Mobo", VIA_DXS_ENABLE),
SND_PCI_QUIRK(0x1019, 0x0996, "ESC Mobo", VIA_DXS_48K),
SND_PCI_QUIRK(0x1019, 0x0a81, "ECS K7VTA3 v8.0", VIA_DXS_NO_VRA),
@@ -2510,7 +2510,7 @@ static struct snd_pci_quirk dxs_whitelist[] __devinitdata = {
{ } /* terminator */
};
-static int __devinit check_dxs_list(struct pci_dev *pci, int revision)
+static int check_dxs_list(struct pci_dev *pci, int revision)
{
const struct snd_pci_quirk *w;
@@ -2535,8 +2535,8 @@ static int __devinit check_dxs_list(struct pci_dev *pci, int revision)
return VIA_DXS_48K;
};
-static int __devinit snd_via82xx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_via82xx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct via82xx *chip;
@@ -2643,7 +2643,7 @@ static int __devinit snd_via82xx_probe(struct pci_dev *pci,
return err;
}
-static void __devexit snd_via82xx_remove(struct pci_dev *pci)
+static void snd_via82xx_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -2653,7 +2653,7 @@ static struct pci_driver via82xx_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_via82xx_ids,
.probe = snd_via82xx_probe,
- .remove = __devexit_p(snd_via82xx_remove),
+ .remove = snd_via82xx_remove,
.driver = {
.pm = SND_VIA82XX_PM_OPS,
},
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 8e0efc416f2..4f5fd80b7e5 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -836,7 +836,7 @@ static void init_viadev(struct via82xx_modem *chip, int idx, unsigned int reg_of
/*
* create a pcm instance for via686a/b
*/
-static int __devinit snd_via686_pcm_new(struct via82xx_modem *chip)
+static int snd_via686_pcm_new(struct via82xx_modem *chip)
{
struct snd_pcm *pcm;
int err;
@@ -885,7 +885,7 @@ static void snd_via82xx_mixer_free_ac97(struct snd_ac97 *ac97)
}
-static int __devinit snd_via82xx_mixer_new(struct via82xx_modem *chip)
+static int snd_via82xx_mixer_new(struct via82xx_modem *chip)
{
struct snd_ac97_template ac97;
int err;
@@ -928,7 +928,7 @@ static void snd_via82xx_proc_read(struct snd_info_entry *entry, struct snd_info_
}
}
-static void __devinit snd_via82xx_proc_init(struct via82xx_modem *chip)
+static void snd_via82xx_proc_init(struct via82xx_modem *chip)
{
struct snd_info_entry *entry;
@@ -1103,12 +1103,12 @@ static int snd_via82xx_dev_free(struct snd_device *device)
return snd_via82xx_free(chip);
}
-static int __devinit snd_via82xx_create(struct snd_card *card,
- struct pci_dev *pci,
- int chip_type,
- int revision,
- unsigned int ac97_clock,
- struct via82xx_modem ** r_via)
+static int snd_via82xx_create(struct snd_card *card,
+ struct pci_dev *pci,
+ int chip_type,
+ int revision,
+ unsigned int ac97_clock,
+ struct via82xx_modem **r_via)
{
struct via82xx_modem *chip;
int err;
@@ -1168,8 +1168,8 @@ static int __devinit snd_via82xx_create(struct snd_card *card,
}
-static int __devinit snd_via82xx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_via82xx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct via82xx_modem *chip;
@@ -1224,7 +1224,7 @@ static int __devinit snd_via82xx_probe(struct pci_dev *pci,
return err;
}
-static void __devexit snd_via82xx_remove(struct pci_dev *pci)
+static void snd_via82xx_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -1234,7 +1234,7 @@ static struct pci_driver via82xx_modem_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_via82xx_modem_ids,
.probe = snd_via82xx_probe,
- .remove = __devexit_p(snd_via82xx_remove),
+ .remove = snd_via82xx_remove,
.driver = {
.pm = SND_VIA82XX_PM_OPS,
},
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index fdfbaf85723..e2f1ab37e15 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -134,9 +134,9 @@ static int snd_vx222_dev_free(struct snd_device *device)
}
-static int __devinit snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
- struct snd_vx_hardware *hw,
- struct snd_vx222 **rchip)
+static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
+ struct snd_vx_hardware *hw,
+ struct snd_vx222 **rchip)
{
struct vx_core *chip;
struct snd_vx222 *vx;
@@ -188,8 +188,8 @@ static int __devinit snd_vx222_create(struct snd_card *card, struct pci_dev *pci
}
-static int __devinit snd_vx222_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_vx222_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -251,7 +251,7 @@ static int __devinit snd_vx222_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_vx222_remove(struct pci_dev *pci)
+static void snd_vx222_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -300,7 +300,7 @@ static struct pci_driver vx222_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_vx222_ids,
.probe = snd_vx222_probe,
- .remove = __devexit_p(snd_vx222_remove),
+ .remove = snd_vx222_remove,
.driver = {
.pm = SND_VX222_PM_OPS,
},
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index e01fe34db9e..01c49655a3c 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -79,8 +79,8 @@ static DEFINE_PCI_DEVICE_TABLE(snd_ymfpci_ids) = {
MODULE_DEVICE_TABLE(pci, snd_ymfpci_ids);
#ifdef SUPPORT_JOYSTICK
-static int __devinit snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev,
- int legacy_ctrl, int legacy_ctrl2)
+static int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev,
+ int legacy_ctrl, int legacy_ctrl2)
{
struct gameport *gp;
struct resource *r = NULL;
@@ -167,8 +167,8 @@ static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, i
void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { }
#endif /* SUPPORT_JOYSTICK */
-static int __devinit snd_card_ymfpci_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int snd_card_ymfpci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -344,7 +344,7 @@ static int __devinit snd_card_ymfpci_probe(struct pci_dev *pci,
return 0;
}
-static void __devexit snd_card_ymfpci_remove(struct pci_dev *pci)
+static void snd_card_ymfpci_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
@@ -354,7 +354,7 @@ static struct pci_driver ymfpci_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ymfpci_ids,
.probe = snd_card_ymfpci_probe,
- .remove = __devexit_p(snd_card_ymfpci_remove),
+ .remove = snd_card_ymfpci_remove,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &snd_ymfpci_pm,
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 3a6f03f9b02..22056c50fe3 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -25,7 +25,6 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -598,7 +597,7 @@ static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int
}
}
-static int __devinit snd_ymfpci_ac3_init(struct snd_ymfpci *chip)
+static int snd_ymfpci_ac3_init(struct snd_ymfpci *chip)
{
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
4096, &chip->ac3_tmp_base) < 0)
@@ -1144,7 +1143,7 @@ static struct snd_pcm_ops snd_ymfpci_capture_rec_ops = {
.pointer = snd_ymfpci_capture_pointer,
};
-int __devinit snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm)
+int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1187,7 +1186,7 @@ static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = {
.pointer = snd_ymfpci_capture_pointer,
};
-int __devinit snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm)
+int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1225,7 +1224,8 @@ static struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = {
.pointer = snd_ymfpci_playback_pointer,
};
-int __devinit snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm)
+int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1270,7 +1270,8 @@ static const struct snd_pcm_chmap_elem surround_map[] = {
{ }
};
-int __devinit snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm)
+int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device,
+ struct snd_pcm **rpcm)
{
struct snd_pcm *pcm;
int err;
@@ -1339,7 +1340,7 @@ static int snd_ymfpci_spdif_default_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ymfpci_spdif_default __devinitdata =
+static struct snd_kcontrol_new snd_ymfpci_spdif_default =
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
@@ -1367,7 +1368,7 @@ static int snd_ymfpci_spdif_mask_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_ymfpci_spdif_mask __devinitdata =
+static struct snd_kcontrol_new snd_ymfpci_spdif_mask =
{
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1414,7 +1415,7 @@ static int snd_ymfpci_spdif_stream_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ymfpci_spdif_stream __devinitdata =
+static struct snd_kcontrol_new snd_ymfpci_spdif_stream =
{
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -1462,7 +1463,7 @@ static int snd_ymfpci_drec_source_put(struct snd_kcontrol *kcontrol, struct snd_
return reg != old_reg;
}
-static struct snd_kcontrol_new snd_ymfpci_drec_source __devinitdata = {
+static struct snd_kcontrol_new snd_ymfpci_drec_source = {
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Direct Recording Source",
@@ -1632,7 +1633,7 @@ static int snd_ymfpci_put_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_e
return change;
}
-static struct snd_kcontrol_new snd_ymfpci_dup4ch __devinitdata = {
+static struct snd_kcontrol_new snd_ymfpci_dup4ch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "4ch Duplication",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -1641,7 +1642,7 @@ static struct snd_kcontrol_new snd_ymfpci_dup4ch __devinitdata = {
.put = snd_ymfpci_put_dup4ch,
};
-static struct snd_kcontrol_new snd_ymfpci_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_ymfpci_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Wave Playback Volume",
@@ -1735,7 +1736,7 @@ static int snd_ymfpci_gpio_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_
return 0;
}
-static struct snd_kcontrol_new snd_ymfpci_rear_shared __devinitdata = {
+static struct snd_kcontrol_new snd_ymfpci_rear_shared = {
.name = "Shared Rear/Line-In Switch",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = snd_ymfpci_gpio_sw_info,
@@ -1799,7 +1800,7 @@ static int snd_ymfpci_pcm_vol_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_ymfpci_pcm_volume __devinitdata = {
+static struct snd_kcontrol_new snd_ymfpci_pcm_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "PCM Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -1826,7 +1827,7 @@ static void snd_ymfpci_mixer_free_ac97(struct snd_ac97 *ac97)
chip->ac97 = NULL;
}
-int __devinit snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
+int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
{
struct snd_ac97_template ac97;
struct snd_kcontrol *kctl;
@@ -1970,7 +1971,7 @@ static struct snd_timer_hardware snd_ymfpci_timer_hw = {
.precise_resolution = snd_ymfpci_timer_precise_resolution,
};
-int __devinit snd_ymfpci_timer(struct snd_ymfpci *chip, int device)
+int snd_ymfpci_timer(struct snd_ymfpci *chip, int device)
{
struct snd_timer *timer = NULL;
struct snd_timer_id tid;
@@ -2006,7 +2007,7 @@ static void snd_ymfpci_proc_read(struct snd_info_entry *entry,
snd_iprintf(buffer, "%04x: %04x\n", i, snd_ymfpci_readl(chip, i));
}
-static int __devinit snd_ymfpci_proc_init(struct snd_card *card, struct snd_ymfpci *chip)
+static int snd_ymfpci_proc_init(struct snd_card *card, struct snd_ymfpci *chip)
{
struct snd_info_entry *entry;
@@ -2128,7 +2129,7 @@ static void snd_ymfpci_download_image(struct snd_ymfpci *chip)
snd_ymfpci_enable_dsp(chip);
}
-static int __devinit snd_ymfpci_memalloc(struct snd_ymfpci *chip)
+static int snd_ymfpci_memalloc(struct snd_ymfpci *chip)
{
long size, playback_ctrl_size;
int voice, bank, reg;
@@ -2261,7 +2262,7 @@ static int snd_ymfpci_free(struct snd_ymfpci *chip)
#endif
#ifdef CONFIG_PM_SLEEP
- vfree(chip->saved_regs);
+ kfree(chip->saved_regs);
#endif
if (chip->irq >= 0)
free_irq(chip->irq, chip);
@@ -2394,10 +2395,10 @@ static int snd_ymfpci_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(snd_ymfpci_pm, snd_ymfpci_suspend, snd_ymfpci_resume);
#endif /* CONFIG_PM_SLEEP */
-int __devinit snd_ymfpci_create(struct snd_card *card,
- struct pci_dev * pci,
- unsigned short old_legacy_ctrl,
- struct snd_ymfpci ** rchip)
+int snd_ymfpci_create(struct snd_card *card,
+ struct pci_dev *pci,
+ unsigned short old_legacy_ctrl,
+ struct snd_ymfpci **rchip)
{
struct snd_ymfpci *chip;
int err;
@@ -2471,7 +2472,8 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
}
#ifdef CONFIG_PM_SLEEP
- chip->saved_regs = vmalloc(YDSXGR_NUM_SAVED_REGS * sizeof(u32));
+ chip->saved_regs = kmalloc(YDSXGR_NUM_SAVED_REGS * sizeof(u32),
+ GFP_KERNEL);
if (chip->saved_regs == NULL) {
snd_ymfpci_free(chip);
return -ENOMEM;
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
index b36679384b2..5fbf5db2543 100644
--- a/sound/ppc/awacs.c
+++ b/sound/ppc/awacs.c
@@ -477,7 +477,7 @@ static int snd_pmac_awacs_put_master_amp(struct snd_kcontrol *kcontrol,
#define AMP_CH_SPK 0
#define AMP_CH_HD 1
-static struct snd_kcontrol_new snd_pmac_awacs_amp_vol[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_amp_vol[] = {
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Speaker Playback Volume",
.info = snd_pmac_awacs_info_volume_amp,
@@ -514,7 +514,7 @@ static struct snd_kcontrol_new snd_pmac_awacs_amp_vol[] __devinitdata = {
},
};
-static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Playback Switch",
.info = snd_pmac_boolean_stereo_info,
@@ -523,7 +523,7 @@ static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw __devinitdata = {
.private_value = AMP_CH_HD,
};
-static struct snd_kcontrol_new snd_pmac_awacs_amp_spk_sw __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_amp_spk_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Speaker Playback Switch",
.info = snd_pmac_boolean_stereo_info,
@@ -595,46 +595,46 @@ static int snd_pmac_screamer_mic_boost_put(struct snd_kcontrol *kcontrol,
/*
* lists of mixer elements
*/
-static struct snd_kcontrol_new snd_pmac_awacs_mixers[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_mixers[] = {
AWACS_SWITCH("Master Capture Switch", 1, SHIFT_LOOPTHRU, 0),
AWACS_VOLUME("Master Capture Volume", 0, 4, 0),
/* AWACS_SWITCH("Unknown Playback Switch", 6, SHIFT_PAROUT0, 0), */
};
-static struct snd_kcontrol_new snd_pmac_screamer_mixers_beige[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_screamer_mixers_beige[] = {
AWACS_VOLUME("Master Playback Volume", 2, 6, 1),
AWACS_VOLUME("Play-through Playback Volume", 5, 6, 1),
AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_LINE, 0),
};
-static struct snd_kcontrol_new snd_pmac_screamer_mixers_lo[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_screamer_mixers_lo[] = {
AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
};
-static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] = {
AWACS_VOLUME("Play-through Playback Volume", 5, 6, 1),
AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
};
-static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] = {
AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
AWACS_VOLUME("Master Playback Volume", 5, 6, 1),
AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
};
-static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] = {
AWACS_VOLUME("Line out Playback Volume", 2, 6, 1),
AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
};
-static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac5500[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac5500[] = {
AWACS_VOLUME("Headphone Playback Volume", 2, 6, 1),
};
-static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac[] = {
AWACS_VOLUME("Master Playback Volume", 2, 6, 1),
AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
};
@@ -642,34 +642,34 @@ static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac[] __devinitdata = {
/* FIXME: is this correct order?
* screamer (powerbook G3 pismo) seems to have different bits...
*/
-static struct snd_kcontrol_new snd_pmac_awacs_mixers2[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_mixers2[] = {
AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_LINE, 0),
AWACS_SWITCH("Mic Capture Switch", 0, SHIFT_MUX_MIC, 0),
};
-static struct snd_kcontrol_new snd_pmac_screamer_mixers2[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_screamer_mixers2[] = {
AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0),
AWACS_SWITCH("Mic Capture Switch", 0, SHIFT_MUX_LINE, 0),
};
-static struct snd_kcontrol_new snd_pmac_awacs_mixers2_pmac5500[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_mixers2_pmac5500[] = {
AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0),
};
-static struct snd_kcontrol_new snd_pmac_awacs_master_sw __devinitdata =
+static struct snd_kcontrol_new snd_pmac_awacs_master_sw =
AWACS_SWITCH("Master Playback Switch", 1, SHIFT_HDMUTE, 1);
-static struct snd_kcontrol_new snd_pmac_awacs_master_sw_imac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_awacs_master_sw_imac =
AWACS_SWITCH("Line out Playback Switch", 1, SHIFT_HDMUTE, 1);
-static struct snd_kcontrol_new snd_pmac_awacs_master_sw_pmac5500 __devinitdata =
+static struct snd_kcontrol_new snd_pmac_awacs_master_sw_pmac5500 =
AWACS_SWITCH("Headphone Playback Switch", 1, SHIFT_HDMUTE, 1);
-static struct snd_kcontrol_new snd_pmac_awacs_mic_boost[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_mic_boost[] = {
AWACS_SWITCH("Mic Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
};
-static struct snd_kcontrol_new snd_pmac_screamer_mic_boost[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_screamer_mic_boost[] = {
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Boost Capture Volume",
.info = snd_pmac_screamer_mic_boost_info,
@@ -678,34 +678,34 @@ static struct snd_kcontrol_new snd_pmac_screamer_mic_boost[] __devinitdata = {
},
};
-static struct snd_kcontrol_new snd_pmac_awacs_mic_boost_pmac7500[] __devinitdata =
+static struct snd_kcontrol_new snd_pmac_awacs_mic_boost_pmac7500[] =
{
AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
};
-static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_beige[] __devinitdata =
+static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_beige[] =
{
AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
AWACS_SWITCH("CD Boost Capture Switch", 6, SHIFT_MIC_BOOST, 0),
};
-static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_imac[] __devinitdata =
+static struct snd_kcontrol_new snd_pmac_screamer_mic_boost_imac[] =
{
AWACS_SWITCH("Line Boost Capture Switch", 0, SHIFT_GAINLINE, 0),
AWACS_SWITCH("Mic Boost Capture Switch", 6, SHIFT_MIC_BOOST, 0),
};
-static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] = {
AWACS_VOLUME("Speaker Playback Volume", 4, 6, 1),
};
-static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __devinitdata =
+static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw =
AWACS_SWITCH("Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1);
-static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 __devinitdata =
+static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 =
AWACS_SWITCH("Speaker Playback Switch", 1, SHIFT_PAROUT1, 1);
-static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 __devinitdata =
+static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 =
AWACS_SWITCH("Speaker Playback Switch", 1, SHIFT_PAROUT1, 0);
@@ -872,7 +872,7 @@ static void snd_pmac_awacs_update_automute(struct snd_pmac *chip, int do_notify)
/*
* initialize chip
*/
-int __devinit
+int
snd_pmac_awacs_init(struct snd_pmac *chip)
{
int pm7500 = IS_PM7500;
diff --git a/sound/ppc/beep.c b/sound/ppc/beep.c
index a9d350789f5..0040f048221 100644
--- a/sound/ppc/beep.c
+++ b/sound/ppc/beep.c
@@ -215,7 +215,7 @@ static struct snd_kcontrol_new snd_pmac_beep_mixer = {
};
/* Initialize beep stuff */
-int __devinit snd_pmac_attach_beep(struct snd_pmac *chip)
+int snd_pmac_attach_beep(struct snd_pmac *chip)
{
struct pmac_beep *beep;
struct input_dev *input_dev;
diff --git a/sound/ppc/burgundy.c b/sound/ppc/burgundy.c
index 00e2d5166d0..cb4f0a5e984 100644
--- a/sound/ppc/burgundy.c
+++ b/sound/ppc/burgundy.c
@@ -467,7 +467,7 @@ static int snd_pmac_burgundy_put_switch_b(struct snd_kcontrol *kcontrol,
/*
* Burgundy mixers
*/
-static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] = {
BURGUNDY_VOLUME_W("Master Playback Volume", 0,
MASK_ADDR_BURGUNDY_MASTER_VOLUME, 8),
BURGUNDY_VOLUME_W("CD Capture Volume", 0,
@@ -495,7 +495,7 @@ static struct snd_kcontrol_new snd_pmac_burgundy_mixers[] __devinitdata = {
*/ BURGUNDY_SWITCH_B("PCM Capture Switch", 0,
MASK_ADDR_BURGUNDY_HOSTIFEH, 0x01, 0, 0)
};
-static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] = {
BURGUNDY_VOLUME_W("Line in Capture Volume", 0,
MASK_ADDR_BURGUNDY_VOLLINE, 16),
BURGUNDY_VOLUME_W("Mic Capture Volume", 0,
@@ -521,7 +521,7 @@ static struct snd_kcontrol_new snd_pmac_burgundy_mixers_imac[] __devinitdata = {
BURGUNDY_SWITCH_B("Mic Boost Capture Switch", 0,
MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1)
};
-static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] __devinitdata = {
+static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] = {
BURGUNDY_VOLUME_W("Line in Capture Volume", 0,
MASK_ADDR_BURGUNDY_VOLMIC, 16),
BURGUNDY_VOLUME_B("Line in Gain Capture Volume", 0,
@@ -537,33 +537,33 @@ static struct snd_kcontrol_new snd_pmac_burgundy_mixers_pmac[] __devinitdata = {
/* BURGUNDY_SWITCH_B("Line in Boost Capture Switch", 0,
* MASK_ADDR_BURGUNDY_INPBOOST, 0x40, 0x80, 1) */
};
-static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_imac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_imac =
BURGUNDY_SWITCH_B("Master Playback Switch", 0,
MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
BURGUNDY_OUTPUT_LEFT | BURGUNDY_LINEOUT_LEFT | BURGUNDY_HP_LEFT,
BURGUNDY_OUTPUT_RIGHT | BURGUNDY_LINEOUT_RIGHT | BURGUNDY_HP_RIGHT, 1);
-static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_pmac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_burgundy_master_sw_pmac =
BURGUNDY_SWITCH_B("Master Playback Switch", 0,
MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
BURGUNDY_OUTPUT_INTERN
| BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1);
-static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_imac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_imac =
BURGUNDY_SWITCH_B("Speaker Playback Switch", 0,
MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1);
-static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_pmac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_burgundy_speaker_sw_pmac =
BURGUNDY_SWITCH_B("Speaker Playback Switch", 0,
MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
BURGUNDY_OUTPUT_INTERN, 0, 0);
-static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_imac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_imac =
BURGUNDY_SWITCH_B("Line out Playback Switch", 0,
MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
BURGUNDY_LINEOUT_LEFT, BURGUNDY_LINEOUT_RIGHT, 1);
-static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_pmac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_burgundy_line_sw_pmac =
BURGUNDY_SWITCH_B("Line out Playback Switch", 0,
MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
BURGUNDY_OUTPUT_LEFT, BURGUNDY_OUTPUT_RIGHT, 1);
-static struct snd_kcontrol_new snd_pmac_burgundy_hp_sw_imac __devinitdata =
+static struct snd_kcontrol_new snd_pmac_burgundy_hp_sw_imac =
BURGUNDY_SWITCH_B("Headphone Playback Switch", 0,
MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES,
BURGUNDY_HP_LEFT, BURGUNDY_HP_RIGHT, 1);
@@ -617,7 +617,7 @@ static void snd_pmac_burgundy_update_automute(struct snd_pmac *chip, int do_noti
/*
* initialize burgundy
*/
-int __devinit snd_pmac_burgundy_init(struct snd_pmac *chip)
+int snd_pmac_burgundy_init(struct snd_pmac *chip)
{
int imac = of_machine_is_compatible("iMac");
int i, err;
diff --git a/sound/ppc/daca.c b/sound/ppc/daca.c
index 24200b7bdac..b86526223e4 100644
--- a/sound/ppc/daca.c
+++ b/sound/ppc/daca.c
@@ -244,7 +244,7 @@ static void daca_cleanup(struct snd_pmac *chip)
}
/* exported */
-int __devinit snd_pmac_daca_init(struct snd_pmac *chip)
+int snd_pmac_daca_init(struct snd_pmac *chip)
{
int i, err;
struct pmac_daca *mix;
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 4080becf4ce..01aecc2b507 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -115,7 +115,7 @@ void snd_pmac_keywest_cleanup(struct pmac_keywest *i2c)
}
}
-int __devinit snd_pmac_tumbler_post_init(void)
+int snd_pmac_tumbler_post_init(void)
{
int err;
@@ -130,7 +130,7 @@ int __devinit snd_pmac_tumbler_post_init(void)
}
/* exported */
-int __devinit snd_pmac_keywest_init(struct pmac_keywest *i2c)
+int snd_pmac_keywest_init(struct pmac_keywest *i2c)
{
int err;
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index ab96cde7417..c93fbbb201f 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -702,7 +702,7 @@ static struct snd_pcm_ops snd_pmac_capture_ops = {
.pointer = snd_pmac_capture_pointer,
};
-int __devinit snd_pmac_pcm_new(struct snd_pmac *chip)
+int snd_pmac_pcm_new(struct snd_pmac *chip)
{
struct snd_pcm *pcm;
int err;
@@ -907,7 +907,7 @@ static int snd_pmac_dev_free(struct snd_device *device)
* check the machine support byteswap (little-endian)
*/
-static void __devinit detect_byte_swap(struct snd_pmac *chip)
+static void detect_byte_swap(struct snd_pmac *chip)
{
struct device_node *mio;
@@ -933,7 +933,7 @@ static void __devinit detect_byte_swap(struct snd_pmac *chip)
/*
* detect a sound chip
*/
-static int __devinit snd_pmac_detect(struct snd_pmac *chip)
+static int snd_pmac_detect(struct snd_pmac *chip)
{
struct device_node *sound;
struct device_node *dn;
@@ -1146,7 +1146,7 @@ static int pmac_hp_detect_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = {
+static struct snd_kcontrol_new auto_mute_controls[] = {
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Auto Mute Switch",
.info = snd_pmac_boolean_mono_info,
@@ -1161,7 +1161,7 @@ static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = {
},
};
-int __devinit snd_pmac_add_automute(struct snd_pmac *chip)
+int snd_pmac_add_automute(struct snd_pmac *chip)
{
int err;
chip->auto_mute = 1;
@@ -1178,7 +1178,7 @@ int __devinit snd_pmac_add_automute(struct snd_pmac *chip)
/*
* create and detect a pmac chip record
*/
-int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
+int snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
{
struct snd_pmac *chip;
struct device_node *np;
diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
index 210cafe0489..09fc848d32e 100644
--- a/sound/ppc/powermac.c
+++ b/sound/ppc/powermac.c
@@ -51,7 +51,7 @@ static struct platform_device *device;
/*
*/
-static int __devinit snd_pmac_probe(struct platform_device *devptr)
+static int snd_pmac_probe(struct platform_device *devptr)
{
struct snd_card *card;
struct snd_pmac *chip;
@@ -136,7 +136,7 @@ __error:
}
-static int __devexit snd_pmac_remove(struct platform_device *devptr)
+static int snd_pmac_remove(struct platform_device *devptr)
{
snd_card_free(platform_get_drvdata(devptr));
platform_set_drvdata(devptr, NULL);
@@ -168,7 +168,7 @@ static SIMPLE_DEV_PM_OPS(snd_pmac_pm, snd_pmac_driver_suspend, snd_pmac_driver_r
static struct platform_driver snd_pmac_driver = {
.probe = snd_pmac_probe,
- .remove = __devexit_p(snd_pmac_remove),
+ .remove = snd_pmac_remove,
.driver = {
.name = SND_PMAC_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 9b18b5243a5..8c7dcbe0118 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -786,7 +786,7 @@ static struct snd_pcm_ops snd_ps3_pcm_spdif_ops = {
};
-static int __devinit snd_ps3_map_mmio(void)
+static int snd_ps3_map_mmio(void)
{
the_card.mapped_mmio_vaddr =
ioremap(the_card.ps3_dev->m_region->bus_addr,
@@ -808,7 +808,7 @@ static void snd_ps3_unmap_mmio(void)
the_card.mapped_mmio_vaddr = NULL;
}
-static int __devinit snd_ps3_allocate_irq(void)
+static int snd_ps3_allocate_irq(void)
{
int ret;
u64 lpar_addr, lpar_size;
@@ -866,7 +866,7 @@ static void snd_ps3_free_irq(void)
ps3_irq_plug_destroy(the_card.irq_no);
}
-static void __devinit snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
+static void snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
{
uint64_t val;
int ret;
@@ -882,7 +882,7 @@ static void __devinit snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
ret);
}
-static void __devinit snd_ps3_audio_fixup(struct snd_ps3_card_info *card)
+static void snd_ps3_audio_fixup(struct snd_ps3_card_info *card)
{
/*
* avsetting driver seems to never change the followings
@@ -906,7 +906,7 @@ static void __devinit snd_ps3_audio_fixup(struct snd_ps3_card_info *card)
PS3_AUDIO_AO_3WMCTRL_ASOPLRCK_DEFAULT);
}
-static int __devinit snd_ps3_init_avsetting(struct snd_ps3_card_info *card)
+static int snd_ps3_init_avsetting(struct snd_ps3_card_info *card)
{
int ret;
pr_debug("%s: start\n", __func__);
@@ -928,7 +928,7 @@ static int __devinit snd_ps3_init_avsetting(struct snd_ps3_card_info *card)
return ret;
}
-static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
+static int snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
{
int i, ret;
u64 lpar_addr, lpar_size;
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 9cea84c3e0c..b23354a4cec 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -844,7 +844,7 @@ static int snapper_put_capture_source(struct snd_kcontrol *kcontrol,
/*
*/
-static struct snd_kcontrol_new tumbler_mixers[] __devinitdata = {
+static struct snd_kcontrol_new tumbler_mixers[] = {
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Volume",
.info = tumbler_info_master_volume,
@@ -868,7 +868,7 @@ static struct snd_kcontrol_new tumbler_mixers[] __devinitdata = {
},
};
-static struct snd_kcontrol_new snapper_mixers[] __devinitdata = {
+static struct snd_kcontrol_new snapper_mixers[] = {
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Volume",
.info = tumbler_info_master_volume,
@@ -901,7 +901,7 @@ static struct snd_kcontrol_new snapper_mixers[] __devinitdata = {
},
};
-static struct snd_kcontrol_new tumbler_hp_sw __devinitdata = {
+static struct snd_kcontrol_new tumbler_hp_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Playback Switch",
.info = snd_pmac_boolean_mono_info,
@@ -909,7 +909,7 @@ static struct snd_kcontrol_new tumbler_hp_sw __devinitdata = {
.put = tumbler_put_mute_switch,
.private_value = TUMBLER_MUTE_HP,
};
-static struct snd_kcontrol_new tumbler_speaker_sw __devinitdata = {
+static struct snd_kcontrol_new tumbler_speaker_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Speaker Playback Switch",
.info = snd_pmac_boolean_mono_info,
@@ -917,7 +917,7 @@ static struct snd_kcontrol_new tumbler_speaker_sw __devinitdata = {
.put = tumbler_put_mute_switch,
.private_value = TUMBLER_MUTE_AMP,
};
-static struct snd_kcontrol_new tumbler_lineout_sw __devinitdata = {
+static struct snd_kcontrol_new tumbler_lineout_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Out Playback Switch",
.info = snd_pmac_boolean_mono_info,
@@ -925,7 +925,7 @@ static struct snd_kcontrol_new tumbler_lineout_sw __devinitdata = {
.put = tumbler_put_mute_switch,
.private_value = TUMBLER_MUTE_LINE,
};
-static struct snd_kcontrol_new tumbler_drc_sw __devinitdata = {
+static struct snd_kcontrol_new tumbler_drc_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DRC Switch",
.info = snd_pmac_boolean_mono_info,
@@ -1276,7 +1276,7 @@ static void tumbler_resume(struct snd_pmac *chip)
#endif
/* initialize tumbler */
-static int __devinit tumbler_init(struct snd_pmac *chip)
+static int tumbler_init(struct snd_pmac *chip)
{
int irq;
struct pmac_tumbler *mix = chip->mixer_data;
@@ -1349,7 +1349,7 @@ static void tumbler_cleanup(struct snd_pmac *chip)
}
/* exported */
-int __devinit snd_pmac_tumbler_init(struct snd_pmac *chip)
+int snd_pmac_tumbler_init(struct snd_pmac *chip)
{
int i, err;
struct pmac_tumbler *mix;
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index d48b523207e..e59a73a9bc4 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -540,7 +540,7 @@ static int aica_pcmvolume_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new snd_aica_pcmswitch_control __devinitdata = {
+static struct snd_kcontrol_new snd_aica_pcmswitch_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.index = 0,
@@ -549,7 +549,7 @@ static struct snd_kcontrol_new snd_aica_pcmswitch_control __devinitdata = {
.put = aica_pcmswitch_put
};
-static struct snd_kcontrol_new snd_aica_pcmvolume_control __devinitdata = {
+static struct snd_kcontrol_new snd_aica_pcmvolume_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.index = 0,
@@ -574,8 +574,7 @@ static int load_aica_firmware(void)
return err;
}
-static int __devinit add_aicamixer_controls(struct snd_card_aica
- *dreamcastcard)
+static int add_aicamixer_controls(struct snd_card_aica *dreamcastcard)
{
int err;
err = snd_ctl_add
@@ -591,7 +590,7 @@ static int __devinit add_aicamixer_controls(struct snd_card_aica
return 0;
}
-static int __devexit snd_aica_remove(struct platform_device *devptr)
+static int snd_aica_remove(struct platform_device *devptr)
{
struct snd_card_aica *dreamcastcard;
dreamcastcard = platform_get_drvdata(devptr);
@@ -603,7 +602,7 @@ static int __devexit snd_aica_remove(struct platform_device *devptr)
return 0;
}
-static int __devinit snd_aica_probe(struct platform_device *devptr)
+static int snd_aica_probe(struct platform_device *devptr)
{
int err;
struct snd_card_aica *dreamcastcard;
@@ -652,7 +651,7 @@ static int __devinit snd_aica_probe(struct platform_device *devptr)
static struct platform_driver snd_aica_driver = {
.probe = snd_aica_probe,
- .remove = __devexit_p(snd_aica_remove),
+ .remove = snd_aica_remove,
.driver = {
.name = SND_AICA_DRIVER,
.owner = THIS_MODULE,
diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
index 0a3394751ed..e68c4fc91a0 100644
--- a/sound/sh/sh_dac_audio.c
+++ b/sound/sh/sh_dac_audio.c
@@ -261,7 +261,7 @@ static struct snd_pcm_ops snd_sh_dac_pcm_ops = {
.mmap = snd_pcm_lib_mmap_iomem,
};
-static int __devinit snd_sh_dac_pcm(struct snd_sh_dac *chip, int device)
+static int snd_sh_dac_pcm(struct snd_sh_dac *chip, int device)
{
int err;
struct snd_pcm *pcm;
@@ -346,9 +346,9 @@ static enum hrtimer_restart sh_dac_audio_timer(struct hrtimer *handle)
}
/* create -- chip-specific constructor for the cards components */
-static int __devinit snd_sh_dac_create(struct snd_card *card,
- struct platform_device *devptr,
- struct snd_sh_dac **rchip)
+static int snd_sh_dac_create(struct snd_card *card,
+ struct platform_device *devptr,
+ struct snd_sh_dac **rchip)
{
struct snd_sh_dac *chip;
int err;
@@ -392,7 +392,7 @@ static int __devinit snd_sh_dac_create(struct snd_card *card,
}
/* driver .probe -- constructor */
-static int __devinit snd_sh_dac_probe(struct platform_device *devptr)
+static int snd_sh_dac_probe(struct platform_device *devptr)
{
struct snd_sh_dac *chip;
struct snd_card *card;
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 72b09cfd3dc..d1b691bf8e2 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -6,6 +6,14 @@ config SND_ATMEL_SOC
the ATMEL SSC interface. You will also need
to select the audio interfaces to support below.
+config SND_ATMEL_SOC_PDC
+ tristate
+ depends on SND_ATMEL_SOC
+
+config SND_ATMEL_SOC_DMA
+ tristate
+ depends on SND_ATMEL_SOC
+
config SND_ATMEL_SOC_SSC
tristate
depends on SND_ATMEL_SOC
@@ -16,8 +24,8 @@ config SND_ATMEL_SOC_SSC
config SND_AT91_SOC_SAM9G20_WM8731
tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board"
- depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC && \
- AT91_PROGRAMMABLE_CLOCKS
+ depends on ATMEL_SSC && SND_ATMEL_SOC && AT91_PROGRAMMABLE_CLOCKS
+ select SND_ATMEL_SOC_PDC
select SND_ATMEL_SOC_SSC
select SND_SOC_WM8731
help
@@ -27,6 +35,7 @@ config SND_AT91_SOC_SAM9G20_WM8731
config SND_AT91_SOC_AFEB9260
tristate "SoC Audio support for AFEB9260 board"
depends on ATMEL_SSC && ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
+ select SND_ATMEL_SOC_PDC
select SND_ATMEL_SOC_SSC
select SND_SOC_TLV320AIC23
help
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index a5c0bf19da7..41967ccb6f4 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -1,8 +1,12 @@
# AT91 Platform Support
snd-soc-atmel-pcm-objs := atmel-pcm.o
+snd-soc-atmel-pcm-pdc-objs := atmel-pcm-pdc.o
+snd-soc-atmel-pcm-dma-objs := atmel-pcm-dma.o
snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
obj-$(CONFIG_SND_ATMEL_SOC) += snd-soc-atmel-pcm.o
+obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
+obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
# AT91 Machine Support
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
new file mode 100644
index 00000000000..30184a4a147
--- /dev/null
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -0,0 +1,240 @@
+/*
+ * atmel-pcm-dma.c -- ALSA PCM DMA support for the Atmel SoC.
+ *
+ * Copyright (C) 2012 Atmel
+ *
+ * Author: Bo Shen <voice.shen@atmel.com>
+ *
+ * Based on atmel-pcm by:
+ * Sedji Gaouaou <sedji.gaouaou@atmel.com>
+ * Copyright 2008 Atmel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/atmel-ssc.h>
+#include <linux/platform_data/dma-atmel.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
+
+#include "atmel-pcm.h"
+
+/*--------------------------------------------------------------------------*\
+ * Hardware definition
+\*--------------------------------------------------------------------------*/
+static const struct snd_pcm_hardware atmel_pcm_dma_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .period_bytes_min = 256, /* lighting DMA overhead */
+ .period_bytes_max = 2 * 0xffff, /* if 2 bytes format */
+ .periods_min = 8,
+ .periods_max = 1024, /* no limit */
+ .buffer_bytes_max = ATMEL_SSC_DMABUF_SIZE,
+};
+
+/**
+ * atmel_pcm_dma_irq: SSC interrupt handler for DMAENGINE enabled SSC
+ *
+ * We use DMAENGINE to send/receive data to/from SSC so this ISR is only to
+ * check if any overrun occured.
+ */
+static void atmel_pcm_dma_irq(u32 ssc_sr,
+ struct snd_pcm_substream *substream)
+{
+ struct atmel_pcm_dma_params *prtd;
+
+ prtd = snd_dmaengine_pcm_get_data(substream);
+
+ if (ssc_sr & prtd->mask->ssc_error) {
+ if (snd_pcm_running(substream))
+ pr_warn("atmel-pcm: buffer %s on %s (SSC_SR=%#x)\n",
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK
+ ? "underrun" : "overrun", prtd->name,
+ ssc_sr);
+
+ /* stop RX and capture: will be enabled again at restart */
+ ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_disable);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+
+ /* now drain RHR and read status to remove xrun condition */
+ ssc_readx(prtd->ssc->regs, SSC_RHR);
+ ssc_readx(prtd->ssc->regs, SSC_SR);
+ }
+}
+
+/*--------------------------------------------------------------------------*\
+ * DMAENGINE operations
+\*--------------------------------------------------------------------------*/
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct atmel_pcm_dma_params *prtd;
+ struct ssc_device *ssc;
+ struct dma_chan *dma_chan;
+ struct dma_slave_config slave_config;
+ int ret;
+
+ prtd = snd_dmaengine_pcm_get_data(substream);
+ ssc = prtd->ssc;
+
+ ret = snd_hwparams_to_dma_slave_config(substream, params,
+ &slave_config);
+ if (ret) {
+ pr_err("atmel-pcm: hwparams to dma slave configure failed\n");
+ return ret;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config.dst_addr = (dma_addr_t)ssc->phybase + SSC_THR;
+ slave_config.dst_maxburst = 1;
+ } else {
+ slave_config.src_addr = (dma_addr_t)ssc->phybase + SSC_RHR;
+ slave_config.src_maxburst = 1;
+ }
+
+ slave_config.device_fc = false;
+
+ dma_chan = snd_dmaengine_pcm_get_chan(substream);
+ if (dmaengine_slave_config(dma_chan, &slave_config)) {
+ pr_err("atmel-pcm: failed to configure dma channel\n");
+ ret = -EBUSY;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct atmel_pcm_dma_params *prtd;
+ struct ssc_device *ssc;
+ struct at_dma_slave *sdata = NULL;
+ int ret;
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ ssc = prtd->ssc;
+ if (ssc->pdev)
+ sdata = ssc->pdev->dev.platform_data;
+
+ ret = snd_dmaengine_pcm_open(substream, filter, sdata);
+ if (ret) {
+ pr_err("atmel-pcm: dmaengine pcm open failed\n");
+ return -EINVAL;
+ }
+
+ snd_dmaengine_pcm_set_data(substream, prtd);
+
+ ret = atmel_pcm_configure_dma(substream, params);
+ if (ret) {
+ pr_err("atmel-pcm: failed to configure dmai\n");
+ goto err;
+ }
+
+ prtd->dma_intr_handler = atmel_pcm_dma_irq;
+
+ return 0;
+err:
+ snd_dmaengine_pcm_close(substream);
+ return ret;
+}
+
+static int atmel_pcm_dma_prepare(struct snd_pcm_substream *substream)
+{
+ struct atmel_pcm_dma_params *prtd;
+
+ prtd = snd_dmaengine_pcm_get_data(substream);
+
+ ssc_writex(prtd->ssc->regs, SSC_IER, prtd->mask->ssc_error);
+ ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_enable);
+
+ return 0;
+}
+
+static int atmel_pcm_open(struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &atmel_pcm_dma_hardware);
+
+ return 0;
+}
+
+static int atmel_pcm_close(struct snd_pcm_substream *substream)
+{
+ snd_dmaengine_pcm_close(substream);
+
+ return 0;
+}
+
+static struct snd_pcm_ops atmel_pcm_ops = {
+ .open = atmel_pcm_open,
+ .close = atmel_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = atmel_pcm_hw_params,
+ .prepare = atmel_pcm_dma_prepare,
+ .trigger = snd_dmaengine_pcm_trigger,
+ .pointer = snd_dmaengine_pcm_pointer_no_residue,
+ .mmap = atmel_pcm_mmap,
+};
+
+static struct snd_soc_platform_driver atmel_soc_platform = {
+ .ops = &atmel_pcm_ops,
+ .pcm_new = atmel_pcm_new,
+ .pcm_free = atmel_pcm_free,
+};
+
+int atmel_pcm_dma_platform_register(struct device *dev)
+{
+ return snd_soc_register_platform(dev, &atmel_soc_platform);
+}
+EXPORT_SYMBOL(atmel_pcm_dma_platform_register);
+
+void atmel_pcm_dma_platform_unregister(struct device *dev)
+{
+ snd_soc_unregister_platform(dev);
+}
+EXPORT_SYMBOL(atmel_pcm_dma_platform_unregister);
+
+MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>");
+MODULE_DESCRIPTION("Atmel DMA based PCM module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/atmel/atmel-pcm-pdc.c b/sound/soc/atmel/atmel-pcm-pdc.c
new file mode 100644
index 00000000000..6a293c713a3
--- /dev/null
+++ b/sound/soc/atmel/atmel-pcm-pdc.c
@@ -0,0 +1,401 @@
+/*
+ * atmel-pcm.c -- ALSA PCM interface for the Atmel atmel SoC.
+ *
+ * Copyright (C) 2005 SAN People
+ * Copyright (C) 2008 Atmel
+ *
+ * Authors: Sedji Gaouaou <sedji.gaouaou@atmel.com>
+ *
+ * Based on at91-pcm. by:
+ * Frank Mandarino <fmandarino@endrelia.com>
+ * Copyright 2006 Endrelia Technologies Inc.
+ *
+ * Based on pxa2xx-pcm.c by:
+ *
+ * Author: Nicolas Pitre
+ * Created: Nov 30, 2004
+ * Copyright: (C) 2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/atmel_pdc.h>
+#include <linux/atmel-ssc.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "atmel-pcm.h"
+
+
+/*--------------------------------------------------------------------------*\
+ * Hardware definition
+\*--------------------------------------------------------------------------*/
+/* TODO: These values were taken from the AT91 platform driver, check
+ * them against real values for AT32
+ */
+static const struct snd_pcm_hardware atmel_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .period_bytes_min = 32,
+ .period_bytes_max = 8192,
+ .periods_min = 2,
+ .periods_max = 1024,
+ .buffer_bytes_max = ATMEL_SSC_DMABUF_SIZE,
+};
+
+
+/*--------------------------------------------------------------------------*\
+ * Data types
+\*--------------------------------------------------------------------------*/
+struct atmel_runtime_data {
+ struct atmel_pcm_dma_params *params;
+ dma_addr_t dma_buffer; /* physical address of dma buffer */
+ dma_addr_t dma_buffer_end; /* first address beyond DMA buffer */
+ size_t period_size;
+
+ dma_addr_t period_ptr; /* physical address of next period */
+
+ /* PDC register save */
+ u32 pdc_xpr_save;
+ u32 pdc_xcr_save;
+ u32 pdc_xnpr_save;
+ u32 pdc_xncr_save;
+};
+
+/*--------------------------------------------------------------------------*\
+ * ISR
+\*--------------------------------------------------------------------------*/
+static void atmel_pcm_dma_irq(u32 ssc_sr,
+ struct snd_pcm_substream *substream)
+{
+ struct atmel_runtime_data *prtd = substream->runtime->private_data;
+ struct atmel_pcm_dma_params *params = prtd->params;
+ static int count;
+
+ count++;
+
+ if (ssc_sr & params->mask->ssc_endbuf) {
+ pr_warn("atmel-pcm: buffer %s on %s (SSC_SR=%#x, count=%d)\n",
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK
+ ? "underrun" : "overrun",
+ params->name, ssc_sr, count);
+
+ /* re-start the PDC */
+ ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
+ params->mask->pdc_disable);
+ prtd->period_ptr += prtd->period_size;
+ if (prtd->period_ptr >= prtd->dma_buffer_end)
+ prtd->period_ptr = prtd->dma_buffer;
+
+ ssc_writex(params->ssc->regs, params->pdc->xpr,
+ prtd->period_ptr);
+ ssc_writex(params->ssc->regs, params->pdc->xcr,
+ prtd->period_size / params->pdc_xfer_size);
+ ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
+ params->mask->pdc_enable);
+ }
+
+ if (ssc_sr & params->mask->ssc_endx) {
+ /* Load the PDC next pointer and counter registers */
+ prtd->period_ptr += prtd->period_size;
+ if (prtd->period_ptr >= prtd->dma_buffer_end)
+ prtd->period_ptr = prtd->dma_buffer;
+
+ ssc_writex(params->ssc->regs, params->pdc->xnpr,
+ prtd->period_ptr);
+ ssc_writex(params->ssc->regs, params->pdc->xncr,
+ prtd->period_size / params->pdc_xfer_size);
+ }
+
+ snd_pcm_period_elapsed(substream);
+}
+
+
+/*--------------------------------------------------------------------------*\
+ * PCM operations
+\*--------------------------------------------------------------------------*/
+static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct atmel_runtime_data *prtd = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ /* this may get called several times by oss emulation
+ * with different params */
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+ runtime->dma_bytes = params_buffer_bytes(params);
+
+ prtd->params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ prtd->params->dma_intr_handler = atmel_pcm_dma_irq;
+
+ prtd->dma_buffer = runtime->dma_addr;
+ prtd->dma_buffer_end = runtime->dma_addr + runtime->dma_bytes;
+ prtd->period_size = params_period_bytes(params);
+
+ pr_debug("atmel-pcm: "
+ "hw_params: DMA for %s initialized "
+ "(dma_bytes=%u, period_size=%u)\n",
+ prtd->params->name,
+ runtime->dma_bytes,
+ prtd->period_size);
+ return 0;
+}
+
+static int atmel_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ struct atmel_runtime_data *prtd = substream->runtime->private_data;
+ struct atmel_pcm_dma_params *params = prtd->params;
+
+ if (params != NULL) {
+ ssc_writex(params->ssc->regs, SSC_PDC_PTCR,
+ params->mask->pdc_disable);
+ prtd->params->dma_intr_handler = NULL;
+ }
+
+ return 0;
+}
+
+static int atmel_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct atmel_runtime_data *prtd = substream->runtime->private_data;
+ struct atmel_pcm_dma_params *params = prtd->params;
+
+ ssc_writex(params->ssc->regs, SSC_IDR,
+ params->mask->ssc_endx | params->mask->ssc_endbuf);
+ ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
+ params->mask->pdc_disable);
+ return 0;
+}
+
+static int atmel_pcm_trigger(struct snd_pcm_substream *substream,
+ int cmd)
+{
+ struct snd_pcm_runtime *rtd = substream->runtime;
+ struct atmel_runtime_data *prtd = rtd->private_data;
+ struct atmel_pcm_dma_params *params = prtd->params;
+ int ret = 0;
+
+ pr_debug("atmel-pcm:buffer_size = %ld,"
+ "dma_area = %p, dma_bytes = %u\n",
+ rtd->buffer_size, rtd->dma_area, rtd->dma_bytes);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ prtd->period_ptr = prtd->dma_buffer;
+
+ ssc_writex(params->ssc->regs, params->pdc->xpr,
+ prtd->period_ptr);
+ ssc_writex(params->ssc->regs, params->pdc->xcr,
+ prtd->period_size / params->pdc_xfer_size);
+
+ prtd->period_ptr += prtd->period_size;
+ ssc_writex(params->ssc->regs, params->pdc->xnpr,
+ prtd->period_ptr);
+ ssc_writex(params->ssc->regs, params->pdc->xncr,
+ prtd->period_size / params->pdc_xfer_size);
+
+ pr_debug("atmel-pcm: trigger: "
+ "period_ptr=%lx, xpr=%u, "
+ "xcr=%u, xnpr=%u, xncr=%u\n",
+ (unsigned long)prtd->period_ptr,
+ ssc_readx(params->ssc->regs, params->pdc->xpr),
+ ssc_readx(params->ssc->regs, params->pdc->xcr),
+ ssc_readx(params->ssc->regs, params->pdc->xnpr),
+ ssc_readx(params->ssc->regs, params->pdc->xncr));
+
+ ssc_writex(params->ssc->regs, SSC_IER,
+ params->mask->ssc_endx | params->mask->ssc_endbuf);
+ ssc_writex(params->ssc->regs, SSC_PDC_PTCR,
+ params->mask->pdc_enable);
+
+ pr_debug("sr=%u imr=%u\n",
+ ssc_readx(params->ssc->regs, SSC_SR),
+ ssc_readx(params->ssc->regs, SSC_IER));
+ break; /* SNDRV_PCM_TRIGGER_START */
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
+ params->mask->pdc_disable);
+ break;
+
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
+ params->mask->pdc_enable);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static snd_pcm_uframes_t atmel_pcm_pointer(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct atmel_runtime_data *prtd = runtime->private_data;
+ struct atmel_pcm_dma_params *params = prtd->params;
+ dma_addr_t ptr;
+ snd_pcm_uframes_t x;
+
+ ptr = (dma_addr_t) ssc_readx(params->ssc->regs, params->pdc->xpr);
+ x = bytes_to_frames(runtime, ptr - prtd->dma_buffer);
+
+ if (x == runtime->buffer_size)
+ x = 0;
+
+ return x;
+}
+
+static int atmel_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct atmel_runtime_data *prtd;
+ int ret = 0;
+
+ snd_soc_set_runtime_hwparams(substream, &atmel_pcm_hardware);
+
+ /* ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ goto out;
+
+ prtd = kzalloc(sizeof(struct atmel_runtime_data), GFP_KERNEL);
+ if (prtd == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ runtime->private_data = prtd;
+
+ out:
+ return ret;
+}
+
+static int atmel_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct atmel_runtime_data *prtd = substream->runtime->private_data;
+
+ kfree(prtd);
+ return 0;
+}
+
+static struct snd_pcm_ops atmel_pcm_ops = {
+ .open = atmel_pcm_open,
+ .close = atmel_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = atmel_pcm_hw_params,
+ .hw_free = atmel_pcm_hw_free,
+ .prepare = atmel_pcm_prepare,
+ .trigger = atmel_pcm_trigger,
+ .pointer = atmel_pcm_pointer,
+ .mmap = atmel_pcm_mmap,
+};
+
+
+/*--------------------------------------------------------------------------*\
+ * ASoC platform driver
+\*--------------------------------------------------------------------------*/
+#ifdef CONFIG_PM
+static int atmel_pcm_suspend(struct snd_soc_dai *dai)
+{
+ struct snd_pcm_runtime *runtime = dai->runtime;
+ struct atmel_runtime_data *prtd;
+ struct atmel_pcm_dma_params *params;
+
+ if (!runtime)
+ return 0;
+
+ prtd = runtime->private_data;
+ params = prtd->params;
+
+ /* disable the PDC and save the PDC registers */
+
+ ssc_writel(params->ssc->regs, PDC_PTCR, params->mask->pdc_disable);
+
+ prtd->pdc_xpr_save = ssc_readx(params->ssc->regs, params->pdc->xpr);
+ prtd->pdc_xcr_save = ssc_readx(params->ssc->regs, params->pdc->xcr);
+ prtd->pdc_xnpr_save = ssc_readx(params->ssc->regs, params->pdc->xnpr);
+ prtd->pdc_xncr_save = ssc_readx(params->ssc->regs, params->pdc->xncr);
+
+ return 0;
+}
+
+static int atmel_pcm_resume(struct snd_soc_dai *dai)
+{
+ struct snd_pcm_runtime *runtime = dai->runtime;
+ struct atmel_runtime_data *prtd;
+ struct atmel_pcm_dma_params *params;
+
+ if (!runtime)
+ return 0;
+
+ prtd = runtime->private_data;
+ params = prtd->params;
+
+ /* restore the PDC registers and enable the PDC */
+ ssc_writex(params->ssc->regs, params->pdc->xpr, prtd->pdc_xpr_save);
+ ssc_writex(params->ssc->regs, params->pdc->xcr, prtd->pdc_xcr_save);
+ ssc_writex(params->ssc->regs, params->pdc->xnpr, prtd->pdc_xnpr_save);
+ ssc_writex(params->ssc->regs, params->pdc->xncr, prtd->pdc_xncr_save);
+
+ ssc_writel(params->ssc->regs, PDC_PTCR, params->mask->pdc_enable);
+ return 0;
+}
+#else
+#define atmel_pcm_suspend NULL
+#define atmel_pcm_resume NULL
+#endif
+
+static struct snd_soc_platform_driver atmel_soc_platform = {
+ .ops = &atmel_pcm_ops,
+ .pcm_new = atmel_pcm_new,
+ .pcm_free = atmel_pcm_free,
+ .suspend = atmel_pcm_suspend,
+ .resume = atmel_pcm_resume,
+};
+
+int atmel_pcm_pdc_platform_register(struct device *dev)
+{
+ return snd_soc_register_platform(dev, &atmel_soc_platform);
+}
+EXPORT_SYMBOL(atmel_pcm_pdc_platform_register);
+
+void atmel_pcm_pdc_platform_unregister(struct device *dev)
+{
+ snd_soc_unregister_platform(dev);
+}
+EXPORT_SYMBOL(atmel_pcm_pdc_platform_unregister);
+
+MODULE_AUTHOR("Sedji Gaouaou <sedji.gaouaou@atmel.com>");
+MODULE_DESCRIPTION("Atmel PCM module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c
index 9b84f985770..e99f1811300 100644
--- a/sound/soc/atmel/atmel-pcm.c
+++ b/sound/soc/atmel/atmel-pcm.c
@@ -32,80 +32,25 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include <linux/atmel_pdc.h>
-#include <linux/atmel-ssc.h>
-
-#include <sound/core.h>
#include <sound/pcm.h>
-#include <sound/pcm_params.h>
#include <sound/soc.h>
-
#include "atmel-pcm.h"
-
-/*--------------------------------------------------------------------------*\
- * Hardware definition
-\*--------------------------------------------------------------------------*/
-/* TODO: These values were taken from the AT91 platform driver, check
- * them against real values for AT32
- */
-static const struct snd_pcm_hardware atmel_pcm_hardware = {
- .info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_PAUSE,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .period_bytes_min = 32,
- .period_bytes_max = 8192,
- .periods_min = 2,
- .periods_max = 1024,
- .buffer_bytes_max = 32 * 1024,
-};
-
-
-/*--------------------------------------------------------------------------*\
- * Data types
-\*--------------------------------------------------------------------------*/
-struct atmel_runtime_data {
- struct atmel_pcm_dma_params *params;
- dma_addr_t dma_buffer; /* physical address of dma buffer */
- dma_addr_t dma_buffer_end; /* first address beyond DMA buffer */
- size_t period_size;
-
- dma_addr_t period_ptr; /* physical address of next period */
-
- /* PDC register save */
- u32 pdc_xpr_save;
- u32 pdc_xcr_save;
- u32 pdc_xnpr_save;
- u32 pdc_xncr_save;
-};
-
-
-/*--------------------------------------------------------------------------*\
- * Helper functions
-\*--------------------------------------------------------------------------*/
static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
- size_t size = atmel_pcm_hardware.buffer_bytes_max;
+ size_t size = ATMEL_SSC_DMABUF_SIZE;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
buf->area = dma_alloc_coherent(pcm->card->dev, size,
- &buf->addr, GFP_KERNEL);
- pr_debug("atmel-pcm:"
- "preallocate_dma_buffer: area=%p, addr=%p, size=%d\n",
- (void *) buf->area,
- (void *) buf->addr,
- size);
+ &buf->addr, GFP_KERNEL);
+ pr_debug("atmel-pcm: alloc dma buffer: area=%p, addr=%p, size=%d\n",
+ (void *)buf->area, (void *)buf->addr, size);
if (!buf->area)
return -ENOMEM;
@@ -113,258 +58,19 @@ static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
buf->bytes = size;
return 0;
}
-/*--------------------------------------------------------------------------*\
- * ISR
-\*--------------------------------------------------------------------------*/
-static void atmel_pcm_dma_irq(u32 ssc_sr,
- struct snd_pcm_substream *substream)
-{
- struct atmel_runtime_data *prtd = substream->runtime->private_data;
- struct atmel_pcm_dma_params *params = prtd->params;
- static int count;
-
- count++;
-
- if (ssc_sr & params->mask->ssc_endbuf) {
- pr_warning("atmel-pcm: buffer %s on %s"
- " (SSC_SR=%#x, count=%d)\n",
- substream->stream == SNDRV_PCM_STREAM_PLAYBACK
- ? "underrun" : "overrun",
- params->name, ssc_sr, count);
-
- /* re-start the PDC */
- ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
- params->mask->pdc_disable);
- prtd->period_ptr += prtd->period_size;
- if (prtd->period_ptr >= prtd->dma_buffer_end)
- prtd->period_ptr = prtd->dma_buffer;
-
- ssc_writex(params->ssc->regs, params->pdc->xpr,
- prtd->period_ptr);
- ssc_writex(params->ssc->regs, params->pdc->xcr,
- prtd->period_size / params->pdc_xfer_size);
- ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
- params->mask->pdc_enable);
- }
-
- if (ssc_sr & params->mask->ssc_endx) {
- /* Load the PDC next pointer and counter registers */
- prtd->period_ptr += prtd->period_size;
- if (prtd->period_ptr >= prtd->dma_buffer_end)
- prtd->period_ptr = prtd->dma_buffer;
-
- ssc_writex(params->ssc->regs, params->pdc->xnpr,
- prtd->period_ptr);
- ssc_writex(params->ssc->regs, params->pdc->xncr,
- prtd->period_size / params->pdc_xfer_size);
- }
-
- snd_pcm_period_elapsed(substream);
-}
-
-
-/*--------------------------------------------------------------------------*\
- * PCM operations
-\*--------------------------------------------------------------------------*/
-static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct atmel_runtime_data *prtd = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
-
- /* this may get called several times by oss emulation
- * with different params */
-
- snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
- runtime->dma_bytes = params_buffer_bytes(params);
-
- prtd->params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- prtd->params->dma_intr_handler = atmel_pcm_dma_irq;
-
- prtd->dma_buffer = runtime->dma_addr;
- prtd->dma_buffer_end = runtime->dma_addr + runtime->dma_bytes;
- prtd->period_size = params_period_bytes(params);
-
- pr_debug("atmel-pcm: "
- "hw_params: DMA for %s initialized "
- "(dma_bytes=%u, period_size=%u)\n",
- prtd->params->name,
- runtime->dma_bytes,
- prtd->period_size);
- return 0;
-}
-
-static int atmel_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- struct atmel_runtime_data *prtd = substream->runtime->private_data;
- struct atmel_pcm_dma_params *params = prtd->params;
-
- if (params != NULL) {
- ssc_writex(params->ssc->regs, SSC_PDC_PTCR,
- params->mask->pdc_disable);
- prtd->params->dma_intr_handler = NULL;
- }
-
- return 0;
-}
-
-static int atmel_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct atmel_runtime_data *prtd = substream->runtime->private_data;
- struct atmel_pcm_dma_params *params = prtd->params;
-
- ssc_writex(params->ssc->regs, SSC_IDR,
- params->mask->ssc_endx | params->mask->ssc_endbuf);
- ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
- params->mask->pdc_disable);
- return 0;
-}
-
-static int atmel_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- struct snd_pcm_runtime *rtd = substream->runtime;
- struct atmel_runtime_data *prtd = rtd->private_data;
- struct atmel_pcm_dma_params *params = prtd->params;
- int ret = 0;
-
- pr_debug("atmel-pcm:buffer_size = %ld,"
- "dma_area = %p, dma_bytes = %u\n",
- rtd->buffer_size, rtd->dma_area, rtd->dma_bytes);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- prtd->period_ptr = prtd->dma_buffer;
-
- ssc_writex(params->ssc->regs, params->pdc->xpr,
- prtd->period_ptr);
- ssc_writex(params->ssc->regs, params->pdc->xcr,
- prtd->period_size / params->pdc_xfer_size);
-
- prtd->period_ptr += prtd->period_size;
- ssc_writex(params->ssc->regs, params->pdc->xnpr,
- prtd->period_ptr);
- ssc_writex(params->ssc->regs, params->pdc->xncr,
- prtd->period_size / params->pdc_xfer_size);
-
- pr_debug("atmel-pcm: trigger: "
- "period_ptr=%lx, xpr=%u, "
- "xcr=%u, xnpr=%u, xncr=%u\n",
- (unsigned long)prtd->period_ptr,
- ssc_readx(params->ssc->regs, params->pdc->xpr),
- ssc_readx(params->ssc->regs, params->pdc->xcr),
- ssc_readx(params->ssc->regs, params->pdc->xnpr),
- ssc_readx(params->ssc->regs, params->pdc->xncr));
-
- ssc_writex(params->ssc->regs, SSC_IER,
- params->mask->ssc_endx | params->mask->ssc_endbuf);
- ssc_writex(params->ssc->regs, SSC_PDC_PTCR,
- params->mask->pdc_enable);
-
- pr_debug("sr=%u imr=%u\n",
- ssc_readx(params->ssc->regs, SSC_SR),
- ssc_readx(params->ssc->regs, SSC_IER));
- break; /* SNDRV_PCM_TRIGGER_START */
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
- params->mask->pdc_disable);
- break;
-
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
- params->mask->pdc_enable);
- break;
-
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static snd_pcm_uframes_t atmel_pcm_pointer(
- struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct atmel_runtime_data *prtd = runtime->private_data;
- struct atmel_pcm_dma_params *params = prtd->params;
- dma_addr_t ptr;
- snd_pcm_uframes_t x;
-
- ptr = (dma_addr_t) ssc_readx(params->ssc->regs, params->pdc->xpr);
- x = bytes_to_frames(runtime, ptr - prtd->dma_buffer);
-
- if (x == runtime->buffer_size)
- x = 0;
-
- return x;
-}
-
-static int atmel_pcm_open(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct atmel_runtime_data *prtd;
- int ret = 0;
-
- snd_soc_set_runtime_hwparams(substream, &atmel_pcm_hardware);
-
- /* ensure that buffer size is a multiple of period size */
- ret = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
- if (ret < 0)
- goto out;
-
- prtd = kzalloc(sizeof(struct atmel_runtime_data), GFP_KERNEL);
- if (prtd == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- runtime->private_data = prtd;
-
- out:
- return ret;
-}
-
-static int atmel_pcm_close(struct snd_pcm_substream *substream)
-{
- struct atmel_runtime_data *prtd = substream->runtime->private_data;
-
- kfree(prtd);
- return 0;
-}
-
-static int atmel_pcm_mmap(struct snd_pcm_substream *substream,
+int atmel_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
return remap_pfn_range(vma, vma->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
+EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
-static struct snd_pcm_ops atmel_pcm_ops = {
- .open = atmel_pcm_open,
- .close = atmel_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = atmel_pcm_hw_params,
- .hw_free = atmel_pcm_hw_free,
- .prepare = atmel_pcm_prepare,
- .trigger = atmel_pcm_trigger,
- .pointer = atmel_pcm_pointer,
- .mmap = atmel_pcm_mmap,
-};
-
-
-/*--------------------------------------------------------------------------*\
- * ASoC platform driver
-\*--------------------------------------------------------------------------*/
static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
-static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
+int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -376,6 +82,7 @@ static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
ret = atmel_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
@@ -383,8 +90,7 @@ static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
- pr_debug("atmel-pcm:"
- "Allocating PCM capture DMA buffer\n");
+ pr_debug("atmel-pcm: allocating PCM capture DMA buffer\n");
ret = atmel_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
@@ -393,8 +99,9 @@ static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(atmel_pcm_new);
-static void atmel_pcm_free_dma_buffers(struct snd_pcm *pcm)
+void atmel_pcm_free(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -413,89 +120,5 @@ static void atmel_pcm_free_dma_buffers(struct snd_pcm *pcm)
buf->area = NULL;
}
}
+EXPORT_SYMBOL_GPL(atmel_pcm_free);
-#ifdef CONFIG_PM
-static int atmel_pcm_suspend(struct snd_soc_dai *dai)
-{
- struct snd_pcm_runtime *runtime = dai->runtime;
- struct atmel_runtime_data *prtd;
- struct atmel_pcm_dma_params *params;
-
- if (!runtime)
- return 0;
-
- prtd = runtime->private_data;
- params = prtd->params;
-
- /* disable the PDC and save the PDC registers */
-
- ssc_writel(params->ssc->regs, PDC_PTCR, params->mask->pdc_disable);
-
- prtd->pdc_xpr_save = ssc_readx(params->ssc->regs, params->pdc->xpr);
- prtd->pdc_xcr_save = ssc_readx(params->ssc->regs, params->pdc->xcr);
- prtd->pdc_xnpr_save = ssc_readx(params->ssc->regs, params->pdc->xnpr);
- prtd->pdc_xncr_save = ssc_readx(params->ssc->regs, params->pdc->xncr);
-
- return 0;
-}
-
-static int atmel_pcm_resume(struct snd_soc_dai *dai)
-{
- struct snd_pcm_runtime *runtime = dai->runtime;
- struct atmel_runtime_data *prtd;
- struct atmel_pcm_dma_params *params;
-
- if (!runtime)
- return 0;
-
- prtd = runtime->private_data;
- params = prtd->params;
-
- /* restore the PDC registers and enable the PDC */
- ssc_writex(params->ssc->regs, params->pdc->xpr, prtd->pdc_xpr_save);
- ssc_writex(params->ssc->regs, params->pdc->xcr, prtd->pdc_xcr_save);
- ssc_writex(params->ssc->regs, params->pdc->xnpr, prtd->pdc_xnpr_save);
- ssc_writex(params->ssc->regs, params->pdc->xncr, prtd->pdc_xncr_save);
-
- ssc_writel(params->ssc->regs, PDC_PTCR, params->mask->pdc_enable);
- return 0;
-}
-#else
-#define atmel_pcm_suspend NULL
-#define atmel_pcm_resume NULL
-#endif
-
-static struct snd_soc_platform_driver atmel_soc_platform = {
- .ops = &atmel_pcm_ops,
- .pcm_new = atmel_pcm_new,
- .pcm_free = atmel_pcm_free_dma_buffers,
- .suspend = atmel_pcm_suspend,
- .resume = atmel_pcm_resume,
-};
-
-static int __devinit atmel_soc_platform_probe(struct platform_device *pdev)
-{
- return snd_soc_register_platform(&pdev->dev, &atmel_soc_platform);
-}
-
-static int __devexit atmel_soc_platform_remove(struct platform_device *pdev)
-{
- snd_soc_unregister_platform(&pdev->dev);
- return 0;
-}
-
-static struct platform_driver atmel_pcm_driver = {
- .driver = {
- .name = "atmel-pcm-audio",
- .owner = THIS_MODULE,
- },
-
- .probe = atmel_soc_platform_probe,
- .remove = __devexit_p(atmel_soc_platform_remove),
-};
-
-module_platform_driver(atmel_pcm_driver);
-
-MODULE_AUTHOR("Sedji Gaouaou <sedji.gaouaou@atmel.com>");
-MODULE_DESCRIPTION("Atmel PCM module");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/atmel/atmel-pcm.h b/sound/soc/atmel/atmel-pcm.h
index 5e0a95e6432..bb45d20e725 100644
--- a/sound/soc/atmel/atmel-pcm.h
+++ b/sound/soc/atmel/atmel-pcm.h
@@ -36,6 +36,8 @@
#include <linux/atmel-ssc.h>
+#define ATMEL_SSC_DMABUF_SIZE (64 * 1024)
+
/*
* Registers and status bits that are required by the PCM driver.
*/
@@ -50,6 +52,7 @@ struct atmel_pdc_regs {
struct atmel_ssc_mask {
u32 ssc_enable; /* SSC recv/trans enable */
u32 ssc_disable; /* SSC recv/trans disable */
+ u32 ssc_error; /* SSC error conditions */
u32 ssc_endx; /* SSC ENDTX or ENDRX */
u32 ssc_endbuf; /* SSC TXBUFE or RXBUFF */
u32 pdc_enable; /* PDC recv/trans enable */
@@ -80,4 +83,35 @@ struct atmel_pcm_dma_params {
#define ssc_readx(base, reg) (__raw_readl((base) + (reg)))
#define ssc_writex(base, reg, value) __raw_writel((value), (base) + (reg))
+int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd);
+void atmel_pcm_free(struct snd_pcm *pcm);
+int atmel_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma);
+
+#ifdef CONFIG_SND_ATMEL_SOC_PDC
+int atmel_pcm_pdc_platform_register(struct device *dev);
+void atmel_pcm_pdc_platform_unregister(struct device *dev);
+#else
+static inline int atmel_pcm_pdc_platform_register(struct device *dev)
+{
+ return 0;
+}
+static inline void atmel_pcm_pdc_platform_unregister(struct device *dev)
+{
+}
+#endif
+
+#ifdef CONFIG_SND_ATMEL_SOC_DMA
+int atmel_pcm_dma_platform_register(struct device *dev);
+void atmel_pcm_dma_platform_unregister(struct device *dev);
+#else
+static inline int atmel_pcm_dma_platform_register(struct device *dev)
+{
+ return 0;
+}
+static inline void atmel_pcm_dma_platform_unregister(struct device *dev)
+{
+}
+#endif
+
#endif /* _ATMEL_PCM_H */
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 354341ec0f4..1c766342205 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -48,11 +48,7 @@
#include "atmel_ssc_dai.h"
-#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9G20)
-#define NUM_SSC_DEVICES 1
-#else
#define NUM_SSC_DEVICES 3
-#endif
/*
* SSC PDC registers required by the PCM DMA engine.
@@ -107,7 +103,6 @@ static struct atmel_pcm_dma_params ssc_dma_params[NUM_SSC_DEVICES][2] = {
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
-#if NUM_SSC_DEVICES == 3
{{
.name = "SSC1 PCM out",
.pdc = &pdc_tx_reg,
@@ -128,7 +123,6 @@ static struct atmel_pcm_dma_params ssc_dma_params[NUM_SSC_DEVICES][2] = {
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
-#endif
};
@@ -139,7 +133,6 @@ static struct atmel_ssc_info ssc_info[NUM_SSC_DEVICES] = {
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
-#if NUM_SSC_DEVICES == 3
{
.name = "ssc1",
.lock = __SPIN_LOCK_UNLOCKED(ssc_info[1].lock),
@@ -152,7 +145,6 @@ static struct atmel_ssc_info ssc_info[NUM_SSC_DEVICES] = {
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
-#endif
};
@@ -690,27 +682,9 @@ static int atmel_ssc_resume(struct snd_soc_dai *cpu_dai)
static int atmel_ssc_probe(struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
- int ret = 0;
snd_soc_dai_set_drvdata(dai, ssc_p);
- /*
- * Request SSC device
- */
- ssc_p->ssc = ssc_request(dai->id);
- if (IS_ERR(ssc_p->ssc)) {
- printk(KERN_ERR "ASoC: Failed to request SSC %d\n", dai->id);
- ret = PTR_ERR(ssc_p->ssc);
- }
-
- return ret;
-}
-
-static int atmel_ssc_remove(struct snd_soc_dai *dai)
-{
- struct atmel_ssc_info *ssc_p = snd_soc_dai_get_drvdata(dai);
-
- ssc_free(ssc_p->ssc);
return 0;
}
@@ -728,30 +702,8 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
.set_clkdiv = atmel_ssc_set_dai_clkdiv,
};
-static struct snd_soc_dai_driver atmel_ssc_dai[NUM_SSC_DEVICES] = {
- {
- .name = "atmel-ssc-dai.0",
- .probe = atmel_ssc_probe,
- .remove = atmel_ssc_remove,
- .suspend = atmel_ssc_suspend,
- .resume = atmel_ssc_resume,
- .playback = {
- .channels_min = 1,
- .channels_max = 2,
- .rates = ATMEL_SSC_RATES,
- .formats = ATMEL_SSC_FORMATS,},
- .capture = {
- .channels_min = 1,
- .channels_max = 2,
- .rates = ATMEL_SSC_RATES,
- .formats = ATMEL_SSC_FORMATS,},
- .ops = &atmel_ssc_dai_ops,
- },
-#if NUM_SSC_DEVICES == 3
- {
- .name = "atmel-ssc-dai.1",
+static struct snd_soc_dai_driver atmel_ssc_dai = {
.probe = atmel_ssc_probe,
- .remove = atmel_ssc_remove,
.suspend = atmel_ssc_suspend,
.resume = atmel_ssc_resume,
.playback = {
@@ -765,50 +717,50 @@ static struct snd_soc_dai_driver atmel_ssc_dai[NUM_SSC_DEVICES] = {
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
- },
- {
- .name = "atmel-ssc-dai.2",
- .probe = atmel_ssc_probe,
- .remove = atmel_ssc_remove,
- .suspend = atmel_ssc_suspend,
- .resume = atmel_ssc_resume,
- .playback = {
- .channels_min = 1,
- .channels_max = 2,
- .rates = ATMEL_SSC_RATES,
- .formats = ATMEL_SSC_FORMATS,},
- .capture = {
- .channels_min = 1,
- .channels_max = 2,
- .rates = ATMEL_SSC_RATES,
- .formats = ATMEL_SSC_FORMATS,},
- .ops = &atmel_ssc_dai_ops,
- },
-#endif
};
-static __devinit int asoc_ssc_probe(struct platform_device *pdev)
+static int asoc_ssc_init(struct device *dev)
{
- BUG_ON(pdev->id < 0);
- BUG_ON(pdev->id >= ARRAY_SIZE(atmel_ssc_dai));
- return snd_soc_register_dai(&pdev->dev, &atmel_ssc_dai[pdev->id]);
-}
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ssc_device *ssc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = snd_soc_register_dai(dev, &atmel_ssc_dai);
+ if (ret) {
+ dev_err(dev, "Could not register DAI: %d\n", ret);
+ goto err;
+ }
+
+ if (ssc->pdata->use_dma)
+ ret = atmel_pcm_dma_platform_register(dev);
+ else
+ ret = atmel_pcm_pdc_platform_register(dev);
+
+ if (ret) {
+ dev_err(dev, "Could not register PCM: %d\n", ret);
+ goto err_unregister_dai;
+ };
-static int __devexit asoc_ssc_remove(struct platform_device *pdev)
-{
- snd_soc_unregister_dai(&pdev->dev);
return 0;
+
+err_unregister_dai:
+ snd_soc_unregister_dai(dev);
+err:
+ return ret;
}
-static struct platform_driver asoc_ssc_driver = {
- .driver = {
- .name = "atmel-ssc-dai",
- .owner = THIS_MODULE,
- },
+static void asoc_ssc_exit(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ssc_device *ssc = platform_get_drvdata(pdev);
- .probe = asoc_ssc_probe,
- .remove = __devexit_p(asoc_ssc_remove),
-};
+ if (ssc->pdata->use_dma)
+ atmel_pcm_dma_platform_unregister(dev);
+ else
+ atmel_pcm_pdc_platform_unregister(dev);
+
+ snd_soc_unregister_dai(dev);
+}
/**
* atmel_ssc_set_audio - Allocate the specified SSC for audio use.
@@ -816,50 +768,32 @@ static struct platform_driver asoc_ssc_driver = {
int atmel_ssc_set_audio(int ssc_id)
{
struct ssc_device *ssc;
- static struct platform_device *dma_pdev;
- struct platform_device *ssc_pdev;
int ret;
- if (ssc_id < 0 || ssc_id >= ARRAY_SIZE(atmel_ssc_dai))
- return -EINVAL;
-
- /* Allocate a dummy device for DMA if we don't have one already */
- if (!dma_pdev) {
- dma_pdev = platform_device_alloc("atmel-pcm-audio", -1);
- if (!dma_pdev)
- return -ENOMEM;
-
- ret = platform_device_add(dma_pdev);
- if (ret < 0) {
- platform_device_put(dma_pdev);
- dma_pdev = NULL;
- return ret;
- }
- }
-
- ssc_pdev = platform_device_alloc("atmel-ssc-dai", ssc_id);
- if (!ssc_pdev)
- return -ENOMEM;
-
/* If we can grab the SSC briefly to parent the DAI device off it */
ssc = ssc_request(ssc_id);
- if (IS_ERR(ssc))
- pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n",
+ if (IS_ERR(ssc)) {
+ pr_err("Unable to parent ASoC SSC DAI on SSC: %ld\n",
PTR_ERR(ssc));
- else {
- ssc_pdev->dev.parent = &(ssc->pdev->dev);
- ssc_free(ssc);
+ return PTR_ERR(ssc);
+ } else {
+ ssc_info[ssc_id].ssc = ssc;
}
- ret = platform_device_add(ssc_pdev);
- if (ret < 0)
- platform_device_put(ssc_pdev);
+ ret = asoc_ssc_init(&ssc->pdev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(atmel_ssc_set_audio);
-module_platform_driver(asoc_ssc_driver);
+void atmel_ssc_put_audio(int ssc_id)
+{
+ struct ssc_device *ssc = ssc_info[ssc_id].ssc;
+
+ ssc_free(ssc);
+ asoc_ssc_exit(&ssc->pdev->dev);
+}
+EXPORT_SYMBOL_GPL(atmel_ssc_put_audio);
/* Module information */
MODULE_AUTHOR("Sedji Gaouaou, sedji.gaouaou@atmel.com, www.atmel.com");
diff --git a/sound/soc/atmel/atmel_ssc_dai.h b/sound/soc/atmel/atmel_ssc_dai.h
index 5d4f0f9b4d9..b1f08d51149 100644
--- a/sound/soc/atmel/atmel_ssc_dai.h
+++ b/sound/soc/atmel/atmel_ssc_dai.h
@@ -117,6 +117,7 @@ struct atmel_ssc_info {
struct atmel_ssc_state ssc_state;
};
-int atmel_ssc_set_audio(int ssc);
+int atmel_ssc_set_audio(int ssc_id);
+void atmel_ssc_put_audio(int ssc_id);
#endif /* _AT91_SSC_DAI_H */
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index c88351488f4..da976291da9 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -38,6 +38,8 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/pinctrl/consumer.h>
+
#include <linux/atmel-ssc.h>
#include <sound/core.h>
@@ -179,10 +181,10 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
static struct snd_soc_dai_link at91sam9g20ek_dai = {
.name = "WM8731",
.stream_name = "WM8731 PCM",
- .cpu_dai_name = "atmel-ssc-dai.0",
+ .cpu_dai_name = "at91rm9200_ssc.0",
.codec_dai_name = "wm8731-hifi",
.init = at91sam9g20ek_wm8731_init,
- .platform_name = "atmel-pcm-audio",
+ .platform_name = "at91rm9200_ssc.0",
.codec_name = "wm8731.0-001b",
.ops = &at91sam9g20ek_ops,
};
@@ -195,20 +197,31 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
.set_bias_level = at91sam9g20ek_set_bias_level,
};
-static struct platform_device *at91sam9g20ek_snd_device;
-
-static int __init at91sam9g20ek_init(void)
+static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *codec_np, *cpu_np;
struct clk *pllb;
+ struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
+ struct pinctrl *pinctrl;
int ret;
- if (!(machine_is_at91sam9g20ek() || machine_is_at91sam9g20ek_2mmc()))
- return -ENODEV;
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "Failed to request pinctrl for mck\n");
+ return PTR_ERR(pinctrl);
+ }
+
+ if (!np) {
+ if (!(machine_is_at91sam9g20ek() ||
+ machine_is_at91sam9g20ek_2mmc()))
+ return -ENODEV;
+ }
ret = atmel_ssc_set_audio(0);
- if (ret != 0) {
- pr_err("Failed to set SSC 0 for audio: %d\n", ret);
- return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "ssc channel is not valid\n");
+ return -EINVAL;
}
/*
@@ -236,45 +249,92 @@ static int __init at91sam9g20ek_init(void)
clk_set_rate(mclk, MCLK_RATE);
- at91sam9g20ek_snd_device = platform_device_alloc("soc-audio", -1);
- if (!at91sam9g20ek_snd_device) {
- printk(KERN_ERR "ASoC: Platform device allocation failed\n");
- ret = -ENOMEM;
- goto err_mclk;
+ card->dev = &pdev->dev;
+
+ /* Parse device node info */
+ if (np) {
+ ret = snd_soc_of_parse_card_name(card, "atmel,model");
+ if (ret)
+ goto err;
+
+ ret = snd_soc_of_parse_audio_routing(card,
+ "atmel,audio-routing");
+ if (ret)
+ goto err;
+
+ /* Parse codec info */
+ at91sam9g20ek_dai.codec_name = NULL;
+ codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
+ if (!codec_np) {
+ dev_err(&pdev->dev, "codec info missing\n");
+ return -EINVAL;
+ }
+ at91sam9g20ek_dai.codec_of_node = codec_np;
+
+ /* Parse dai and platform info */
+ at91sam9g20ek_dai.cpu_dai_name = NULL;
+ at91sam9g20ek_dai.platform_name = NULL;
+ cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
+ if (!cpu_np) {
+ dev_err(&pdev->dev, "dai and pcm info missing\n");
+ return -EINVAL;
+ }
+ at91sam9g20ek_dai.cpu_of_node = cpu_np;
+ at91sam9g20ek_dai.platform_of_node = cpu_np;
+
+ of_node_put(codec_np);
+ of_node_put(cpu_np);
}
- platform_set_drvdata(at91sam9g20ek_snd_device,
- &snd_soc_at91sam9g20ek);
-
- ret = platform_device_add(at91sam9g20ek_snd_device);
+ ret = snd_soc_register_card(card);
if (ret) {
- printk(KERN_ERR "ASoC: Platform device allocation failed\n");
- goto err_device_add;
+ printk(KERN_ERR "ASoC: snd_soc_register_card() failed\n");
}
return ret;
-err_device_add:
- platform_device_put(at91sam9g20ek_snd_device);
err_mclk:
clk_put(mclk);
mclk = NULL;
err:
+ atmel_ssc_put_audio(0);
return ret;
}
-static void __exit at91sam9g20ek_exit(void)
+static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
{
- platform_device_unregister(at91sam9g20ek_snd_device);
- at91sam9g20ek_snd_device = NULL;
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ atmel_ssc_put_audio(0);
+ snd_soc_unregister_card(card);
clk_put(mclk);
mclk = NULL;
+
+ return 0;
}
-module_init(at91sam9g20ek_init);
-module_exit(at91sam9g20ek_exit);
+#ifdef CONFIG_OF
+static const struct of_device_id at91sam9g20ek_wm8731_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g20ek-wm8731-audio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, at91sam9g20ek_wm8731_dt_ids);
+#endif
+
+static struct platform_driver at91sam9g20ek_audio_driver = {
+ .driver = {
+ .name = "at91sam9g20ek-audio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91sam9g20ek_wm8731_dt_ids),
+ },
+ .probe = at91sam9g20ek_audio_probe,
+ .remove = at91sam9g20ek_audio_remove,
+};
+
+module_platform_driver(at91sam9g20ek_audio_driver);
/* Module information */
MODULE_AUTHOR("Sedji Gaouaou <sedji.gaouaou@atmel.com>");
MODULE_DESCRIPTION("ALSA SoC AT91SAM9G20EK_WM8731");
+MODULE_ALIAS("platform:at91sam9g20ek-audio");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index c5ac2449563..ea7d9d15702 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -223,7 +223,7 @@ static struct snd_soc_dai_driver au1xac97c_dai_driver = {
.ops = &alchemy_ac97c_ops,
};
-static int __devinit au1xac97c_drvprobe(struct platform_device *pdev)
+static int au1xac97c_drvprobe(struct platform_device *pdev)
{
int ret;
struct resource *iores, *dmares;
@@ -276,7 +276,7 @@ static int __devinit au1xac97c_drvprobe(struct platform_device *pdev)
return 0;
}
-static int __devexit au1xac97c_drvremove(struct platform_device *pdev)
+static int au1xac97c_drvremove(struct platform_device *pdev)
{
struct au1xpsc_audio_data *ctx = platform_get_drvdata(pdev);
@@ -330,7 +330,7 @@ static struct platform_driver au1xac97c_driver = {
.pm = AU1XPSCAC97_PMOPS,
},
.probe = au1xac97c_drvprobe,
- .remove = __devexit_p(au1xac97c_drvremove),
+ .remove = au1xac97c_drvremove,
};
static int __init au1xac97c_load(void)
diff --git a/sound/soc/au1x/db1000.c b/sound/soc/au1x/db1000.c
index 511d83c11a9..376d976bcc2 100644
--- a/sound/soc/au1x/db1000.c
+++ b/sound/soc/au1x/db1000.c
@@ -34,14 +34,14 @@ static struct snd_soc_card db1000_ac97 = {
.num_links = 1,
};
-static int __devinit db1000_audio_probe(struct platform_device *pdev)
+static int db1000_audio_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &db1000_ac97;
card->dev = &pdev->dev;
return snd_soc_register_card(card);
}
-static int __devexit db1000_audio_remove(struct platform_device *pdev)
+static int db1000_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
@@ -55,7 +55,7 @@ static struct platform_driver db1000_audio_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = db1000_audio_probe,
- .remove = __devexit_p(db1000_audio_remove),
+ .remove = db1000_audio_remove,
};
module_platform_driver(db1000_audio_driver);
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index 30ea513d81d..a497a0cfeba 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -167,7 +167,7 @@ static struct snd_soc_card db1550_i2s_machine = {
/*------------------------- COMMON PART ---------------------------*/
-static struct snd_soc_card *db1200_cards[] __devinitdata = {
+static struct snd_soc_card *db1200_cards[] = {
&db1200_ac97_machine,
&db1200_i2s_machine,
&db1300_ac97_machine,
@@ -176,7 +176,7 @@ static struct snd_soc_card *db1200_cards[] __devinitdata = {
&db1550_i2s_machine,
};
-static int __devinit db1200_audio_probe(struct platform_device *pdev)
+static int db1200_audio_probe(struct platform_device *pdev)
{
const struct platform_device_id *pid = platform_get_device_id(pdev);
struct snd_soc_card *card;
@@ -186,7 +186,7 @@ static int __devinit db1200_audio_probe(struct platform_device *pdev)
return snd_soc_register_card(card);
}
-static int __devexit db1200_audio_remove(struct platform_device *pdev)
+static int db1200_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
@@ -201,7 +201,7 @@ static struct platform_driver db1200_audio_driver = {
},
.id_table = db1200_pids,
.probe = db1200_audio_probe,
- .remove = __devexit_p(db1200_audio_remove),
+ .remove = db1200_audio_remove,
};
module_platform_driver(db1200_audio_driver);
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 8372cd35f0d..3b4eafaf30d 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -347,7 +347,7 @@ static struct snd_soc_platform_driver au1xpsc_soc_platform = {
.pcm_free = au1xpsc_pcm_free_dma_buffers,
};
-static int __devinit au1xpsc_pcm_drvprobe(struct platform_device *pdev)
+static int au1xpsc_pcm_drvprobe(struct platform_device *pdev)
{
struct au1xpsc_audio_dmadata *dmadata;
@@ -362,7 +362,7 @@ static int __devinit au1xpsc_pcm_drvprobe(struct platform_device *pdev)
return snd_soc_register_platform(&pdev->dev, &au1xpsc_soc_platform);
}
-static int __devexit au1xpsc_pcm_drvremove(struct platform_device *pdev)
+static int au1xpsc_pcm_drvremove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
@@ -375,7 +375,7 @@ static struct platform_driver au1xpsc_pcm_driver = {
.owner = THIS_MODULE,
},
.probe = au1xpsc_pcm_drvprobe,
- .remove = __devexit_p(au1xpsc_pcm_drvremove),
+ .remove = au1xpsc_pcm_drvremove,
};
module_platform_driver(au1xpsc_pcm_driver);
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 0a91b186a86..befd1074f9b 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -322,7 +322,7 @@ static struct snd_soc_platform_driver alchemy_pcm_soc_platform = {
.pcm_free = alchemy_pcm_free_dma_buffers,
};
-static int __devinit alchemy_pcm_drvprobe(struct platform_device *pdev)
+static int alchemy_pcm_drvprobe(struct platform_device *pdev)
{
struct alchemy_pcm_ctx *ctx;
@@ -335,7 +335,7 @@ static int __devinit alchemy_pcm_drvprobe(struct platform_device *pdev)
return snd_soc_register_platform(&pdev->dev, &alchemy_pcm_soc_platform);
}
-static int __devexit alchemy_pcm_drvremove(struct platform_device *pdev)
+static int alchemy_pcm_drvremove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
@@ -348,7 +348,7 @@ static struct platform_driver alchemy_pcmdma_driver = {
.owner = THIS_MODULE,
},
.probe = alchemy_pcm_drvprobe,
- .remove = __devexit_p(alchemy_pcm_drvremove),
+ .remove = alchemy_pcm_drvremove,
};
module_platform_driver(alchemy_pcmdma_driver);
diff --git a/sound/soc/au1x/i2sc.c b/sound/soc/au1x/i2sc.c
index d4b9e364a47..072448afc21 100644
--- a/sound/soc/au1x/i2sc.c
+++ b/sound/soc/au1x/i2sc.c
@@ -225,7 +225,7 @@ static struct snd_soc_dai_driver au1xi2s_dai_driver = {
.ops = &au1xi2s_dai_ops,
};
-static int __devinit au1xi2s_drvprobe(struct platform_device *pdev)
+static int au1xi2s_drvprobe(struct platform_device *pdev)
{
struct resource *iores, *dmares;
struct au1xpsc_audio_data *ctx;
@@ -263,7 +263,7 @@ static int __devinit au1xi2s_drvprobe(struct platform_device *pdev)
return snd_soc_register_dai(&pdev->dev, &au1xi2s_dai_driver);
}
-static int __devexit au1xi2s_drvremove(struct platform_device *pdev)
+static int au1xi2s_drvremove(struct platform_device *pdev)
{
struct au1xpsc_audio_data *ctx = platform_get_drvdata(pdev);
@@ -309,7 +309,7 @@ static struct platform_driver au1xi2s_driver = {
.pm = AU1XI2SC_PMOPS,
},
.probe = au1xi2s_drvprobe,
- .remove = __devexit_p(au1xi2s_drvremove),
+ .remove = au1xi2s_drvremove,
};
module_platform_driver(au1xi2s_driver);
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index 476b79a1c11..6ba07e36596 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -361,7 +361,7 @@ static const struct snd_soc_dai_driver au1xpsc_ac97_dai_template = {
.ops = &au1xpsc_ac97_dai_ops,
};
-static int __devinit au1xpsc_ac97_drvprobe(struct platform_device *pdev)
+static int au1xpsc_ac97_drvprobe(struct platform_device *pdev)
{
int ret;
struct resource *iores, *dmares;
@@ -427,7 +427,7 @@ static int __devinit au1xpsc_ac97_drvprobe(struct platform_device *pdev)
return 0;
}
-static int __devexit au1xpsc_ac97_drvremove(struct platform_device *pdev)
+static int au1xpsc_ac97_drvremove(struct platform_device *pdev)
{
struct au1xpsc_audio_data *wd = platform_get_drvdata(pdev);
@@ -495,7 +495,7 @@ static struct platform_driver au1xpsc_ac97_driver = {
.pm = AU1XPSCAC97_PMOPS,
},
.probe = au1xpsc_ac97_drvprobe,
- .remove = __devexit_p(au1xpsc_ac97_drvremove),
+ .remove = au1xpsc_ac97_drvremove,
};
static int __init au1xpsc_ac97_load(void)
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index 0607ba3d925..360b4e50d7c 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -288,7 +288,7 @@ static const struct snd_soc_dai_driver au1xpsc_i2s_dai_template = {
.ops = &au1xpsc_i2s_dai_ops,
};
-static int __devinit au1xpsc_i2s_drvprobe(struct platform_device *pdev)
+static int au1xpsc_i2s_drvprobe(struct platform_device *pdev)
{
struct resource *iores, *dmares;
unsigned long sel;
@@ -353,7 +353,7 @@ static int __devinit au1xpsc_i2s_drvprobe(struct platform_device *pdev)
return snd_soc_register_dai(&pdev->dev, &wd->dai_drv);
}
-static int __devexit au1xpsc_i2s_drvremove(struct platform_device *pdev)
+static int au1xpsc_i2s_drvremove(struct platform_device *pdev)
{
struct au1xpsc_audio_data *wd = platform_get_drvdata(pdev);
@@ -418,7 +418,7 @@ static struct platform_driver au1xpsc_i2s_driver = {
.pm = AU1XPSCI2S_PMOPS,
},
.probe = au1xpsc_i2s_drvprobe,
- .remove = __devexit_p(au1xpsc_i2s_drvremove),
+ .remove = au1xpsc_i2s_drvremove,
};
module_platform_driver(au1xpsc_i2s_driver);
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index d7dc9bde097..7e2f36004a5 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -453,12 +453,12 @@ static struct snd_soc_platform_driver bf5xx_ac97_soc_platform = {
.pcm_free = bf5xx_pcm_free_dma_buffers,
};
-static int __devinit bf5xx_soc_platform_probe(struct platform_device *pdev)
+static int bf5xx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &bf5xx_ac97_soc_platform);
}
-static int __devexit bf5xx_soc_platform_remove(struct platform_device *pdev)
+static int bf5xx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -471,7 +471,7 @@ static struct platform_driver bf5xx_pcm_driver = {
},
.probe = bf5xx_soc_platform_probe,
- .remove = __devexit_p(bf5xx_soc_platform_remove),
+ .remove = bf5xx_soc_platform_remove,
};
module_platform_driver(bf5xx_pcm_driver);
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index f4e9dc4e262..8e41bcb020e 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -282,7 +282,7 @@ static struct snd_soc_dai_driver bfin_ac97_dai = {
.formats = SNDRV_PCM_FMTBIT_S16_LE, },
};
-static int __devinit asoc_bfin_ac97_probe(struct platform_device *pdev)
+static int asoc_bfin_ac97_probe(struct platform_device *pdev)
{
struct sport_device *sport_handle;
int ret;
@@ -352,7 +352,7 @@ gpio_err:
return ret;
}
-static int __devexit asoc_bfin_ac97_remove(struct platform_device *pdev)
+static int asoc_bfin_ac97_remove(struct platform_device *pdev)
{
struct sport_device *sport_handle = platform_get_drvdata(pdev);
@@ -372,7 +372,7 @@ static struct platform_driver asoc_bfin_ac97_driver = {
},
.probe = asoc_bfin_ac97_probe,
- .remove = __devexit_p(asoc_bfin_ac97_remove),
+ .remove = asoc_bfin_ac97_remove,
};
module_platform_driver(asoc_bfin_ac97_driver);
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index 16b9c9efd19..d23f4b0ea54 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -75,7 +75,7 @@ static struct snd_soc_card bf5xx_ad1836 = {
.num_links = 1,
};
-static __devinit int bf5xx_ad1836_driver_probe(struct platform_device *pdev)
+static int bf5xx_ad1836_driver_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &bf5xx_ad1836;
const char **link_name;
@@ -98,7 +98,7 @@ static __devinit int bf5xx_ad1836_driver_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit bf5xx_ad1836_driver_remove(struct platform_device *pdev)
+static int bf5xx_ad1836_driver_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -113,7 +113,7 @@ static struct platform_driver bf5xx_ad1836_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = bf5xx_ad1836_driver_probe,
- .remove = __devexit_p(bf5xx_ad1836_driver_remove),
+ .remove = bf5xx_ad1836_driver_remove,
};
module_platform_driver(bf5xx_ad1836_driver);
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index 63205d723ea..262c1de364d 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -292,12 +292,12 @@ static struct snd_soc_platform_driver bf5xx_i2s_soc_platform = {
.pcm_free = bf5xx_pcm_free_dma_buffers,
};
-static int __devinit bfin_i2s_soc_platform_probe(struct platform_device *pdev)
+static int bfin_i2s_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &bf5xx_i2s_soc_platform);
}
-static int __devexit bfin_i2s_soc_platform_remove(struct platform_device *pdev)
+static int bfin_i2s_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -310,7 +310,7 @@ static struct platform_driver bfin_i2s_pcm_driver = {
},
.probe = bfin_i2s_soc_platform_probe,
- .remove = __devexit_p(bfin_i2s_soc_platform_remove),
+ .remove = bfin_i2s_soc_platform_remove,
};
module_platform_driver(bfin_i2s_pcm_driver);
diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
index 4dccf0374fe..168d88bccb4 100644
--- a/sound/soc/blackfin/bf5xx-i2s.c
+++ b/sound/soc/blackfin/bf5xx-i2s.c
@@ -245,7 +245,7 @@ static struct snd_soc_dai_driver bf5xx_i2s_dai = {
.ops = &bf5xx_i2s_dai_ops,
};
-static int __devinit bf5xx_i2s_probe(struct platform_device *pdev)
+static int bf5xx_i2s_probe(struct platform_device *pdev)
{
struct sport_device *sport_handle;
int ret;
@@ -267,7 +267,7 @@ static int __devinit bf5xx_i2s_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit bf5xx_i2s_remove(struct platform_device *pdev)
+static int bf5xx_i2s_remove(struct platform_device *pdev)
{
struct sport_device *sport_handle = platform_get_drvdata(pdev);
@@ -281,7 +281,7 @@ static int __devexit bf5xx_i2s_remove(struct platform_device *pdev)
static struct platform_driver bfin_i2s_driver = {
.probe = bf5xx_i2s_probe,
- .remove = __devexit_p(bf5xx_i2s_remove),
+ .remove = bf5xx_i2s_remove,
.driver = {
.name = "bfin-i2s",
.owner = THIS_MODULE,
diff --git a/sound/soc/blackfin/bf5xx-tdm-pcm.c b/sound/soc/blackfin/bf5xx-tdm-pcm.c
index 254490cf187..0e6b888bb4c 100644
--- a/sound/soc/blackfin/bf5xx-tdm-pcm.c
+++ b/sound/soc/blackfin/bf5xx-tdm-pcm.c
@@ -317,12 +317,12 @@ static struct snd_soc_platform_driver bf5xx_tdm_soc_platform = {
.pcm_free = bf5xx_pcm_free_dma_buffers,
};
-static int __devinit bf5xx_soc_platform_probe(struct platform_device *pdev)
+static int bf5xx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &bf5xx_tdm_soc_platform);
}
-static int __devexit bf5xx_soc_platform_remove(struct platform_device *pdev)
+static int bf5xx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -335,7 +335,7 @@ static struct platform_driver bfin_tdm_driver = {
},
.probe = bf5xx_soc_platform_probe,
- .remove = __devexit_p(bf5xx_soc_platform_remove),
+ .remove = bf5xx_soc_platform_remove,
};
module_platform_driver(bfin_tdm_driver);
diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
index 594f88217c7..c1e516ec53a 100644
--- a/sound/soc/blackfin/bf5xx-tdm.c
+++ b/sound/soc/blackfin/bf5xx-tdm.c
@@ -249,7 +249,7 @@ static struct snd_soc_dai_driver bf5xx_tdm_dai = {
.ops = &bf5xx_tdm_dai_ops,
};
-static int __devinit bfin_tdm_probe(struct platform_device *pdev)
+static int bfin_tdm_probe(struct platform_device *pdev)
{
struct sport_device *sport_handle;
int ret;
@@ -295,7 +295,7 @@ sport_config_err:
return ret;
}
-static int __devexit bfin_tdm_remove(struct platform_device *pdev)
+static int bfin_tdm_remove(struct platform_device *pdev)
{
struct sport_device *sport_handle = platform_get_drvdata(pdev);
@@ -307,7 +307,7 @@ static int __devexit bfin_tdm_remove(struct platform_device *pdev)
static struct platform_driver bfin_tdm_driver = {
.probe = bfin_tdm_probe,
- .remove = __devexit_p(bfin_tdm_remove),
+ .remove = bfin_tdm_remove,
.driver = {
.name = "bfin-tdm",
.owner = THIS_MODULE,
diff --git a/sound/soc/blackfin/bf6xx-i2s.c b/sound/soc/blackfin/bf6xx-i2s.c
index c3c2466d3a4..8f337972f43 100644
--- a/sound/soc/blackfin/bf6xx-i2s.c
+++ b/sound/soc/blackfin/bf6xx-i2s.c
@@ -186,7 +186,7 @@ static struct snd_soc_dai_driver bfin_i2s_dai = {
.ops = &bfin_i2s_dai_ops,
};
-static int __devinit bfin_i2s_probe(struct platform_device *pdev)
+static int bfin_i2s_probe(struct platform_device *pdev)
{
struct sport_device *sport;
struct device *dev = &pdev->dev;
@@ -208,7 +208,7 @@ static int __devinit bfin_i2s_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit bfin_i2s_remove(struct platform_device *pdev)
+static int bfin_i2s_remove(struct platform_device *pdev)
{
struct sport_device *sport = platform_get_drvdata(pdev);
@@ -220,7 +220,7 @@ static int __devexit bfin_i2s_remove(struct platform_device *pdev)
static struct platform_driver bfin_i2s_driver = {
.probe = bfin_i2s_probe,
- .remove = __devexit_p(bfin_i2s_remove),
+ .remove = bfin_i2s_remove,
.driver = {
.name = "bfin-i2s",
.owner = THIS_MODULE,
diff --git a/sound/soc/blackfin/bfin-eval-adau1373.c b/sound/soc/blackfin/bfin-eval-adau1373.c
index f3adbdbdd5e..4ef9683bcad 100644
--- a/sound/soc/blackfin/bfin-eval-adau1373.c
+++ b/sound/soc/blackfin/bfin-eval-adau1373.c
@@ -157,7 +157,7 @@ static int bfin_eval_adau1373_probe(struct platform_device *pdev)
return snd_soc_register_card(&bfin_eval_adau1373);
}
-static int __devexit bfin_eval_adau1373_remove(struct platform_device *pdev)
+static int bfin_eval_adau1373_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -173,7 +173,7 @@ static struct platform_driver bfin_eval_adau1373_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adau1373_probe,
- .remove = __devexit_p(bfin_eval_adau1373_remove),
+ .remove = bfin_eval_adau1373_remove,
};
module_platform_driver(bfin_eval_adau1373_driver);
diff --git a/sound/soc/blackfin/bfin-eval-adau1701.c b/sound/soc/blackfin/bfin-eval-adau1701.c
index b0531fc9d81..3b55081a96c 100644
--- a/sound/soc/blackfin/bfin-eval-adau1701.c
+++ b/sound/soc/blackfin/bfin-eval-adau1701.c
@@ -97,7 +97,7 @@ static int bfin_eval_adau1701_probe(struct platform_device *pdev)
return snd_soc_register_card(&bfin_eval_adau1701);
}
-static int __devexit bfin_eval_adau1701_remove(struct platform_device *pdev)
+static int bfin_eval_adau1701_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -113,7 +113,7 @@ static struct platform_driver bfin_eval_adau1701_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adau1701_probe,
- .remove = __devexit_p(bfin_eval_adau1701_remove),
+ .remove = bfin_eval_adau1701_remove,
};
module_platform_driver(bfin_eval_adau1701_driver);
diff --git a/sound/soc/blackfin/bfin-eval-adav80x.c b/sound/soc/blackfin/bfin-eval-adav80x.c
index 84b09987b7f..3b1b61a4481 100644
--- a/sound/soc/blackfin/bfin-eval-adav80x.c
+++ b/sound/soc/blackfin/bfin-eval-adav80x.c
@@ -122,7 +122,7 @@ static int bfin_eval_adav80x_probe(struct platform_device *pdev)
return snd_soc_register_card(&bfin_eval_adav80x);
}
-static int __devexit bfin_eval_adav80x_remove(struct platform_device *pdev)
+static int bfin_eval_adav80x_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -145,7 +145,7 @@ static struct platform_driver bfin_eval_adav80x_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adav80x_probe,
- .remove = __devexit_p(bfin_eval_adav80x_remove),
+ .remove = bfin_eval_adav80x_remove,
.id_table = bfin_eval_adav80x_ids,
};
diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
index e01cb02abd3..5db68cf7b28 100644
--- a/sound/soc/cirrus/edb93xx.c
+++ b/sound/soc/cirrus/edb93xx.c
@@ -80,7 +80,7 @@ static struct snd_soc_card snd_soc_edb93xx = {
.num_links = 1,
};
-static int __devinit edb93xx_probe(struct platform_device *pdev)
+static int edb93xx_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_edb93xx;
int ret;
@@ -101,7 +101,7 @@ static int __devinit edb93xx_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit edb93xx_remove(struct platform_device *pdev)
+static int edb93xx_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -117,7 +117,7 @@ static struct platform_driver edb93xx_driver = {
.owner = THIS_MODULE,
},
.probe = edb93xx_probe,
- .remove = __devexit_p(edb93xx_remove),
+ .remove = edb93xx_remove,
};
module_platform_driver(edb93xx_driver);
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index c3521653cfd..f3f50e6fd6e 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -352,7 +352,7 @@ static struct snd_soc_dai_driver ep93xx_ac97_dai = {
.ops = &ep93xx_ac97_dai_ops,
};
-static int __devinit ep93xx_ac97_probe(struct platform_device *pdev)
+static int ep93xx_ac97_probe(struct platform_device *pdev)
{
struct ep93xx_ac97_info *info;
struct resource *res;
@@ -402,7 +402,7 @@ fail:
return ret;
}
-static int __devexit ep93xx_ac97_remove(struct platform_device *pdev)
+static int ep93xx_ac97_remove(struct platform_device *pdev)
{
struct ep93xx_ac97_info *info = platform_get_drvdata(pdev);
@@ -420,7 +420,7 @@ static int __devexit ep93xx_ac97_remove(struct platform_device *pdev)
static struct platform_driver ep93xx_ac97_driver = {
.probe = ep93xx_ac97_probe,
- .remove = __devexit_p(ep93xx_ac97_remove),
+ .remove = ep93xx_ac97_remove,
.driver = {
.name = "ep93xx-ac97",
.owner = THIS_MODULE,
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index ac4a7515e7b..3365d4e843b 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -422,7 +422,7 @@ fail:
return err;
}
-static int __devexit ep93xx_i2s_remove(struct platform_device *pdev)
+static int ep93xx_i2s_remove(struct platform_device *pdev)
{
struct ep93xx_i2s_info *info = dev_get_drvdata(&pdev->dev);
@@ -436,7 +436,7 @@ static int __devexit ep93xx_i2s_remove(struct platform_device *pdev)
static struct platform_driver ep93xx_i2s_driver = {
.probe = ep93xx_i2s_probe,
- .remove = __devexit_p(ep93xx_i2s_remove),
+ .remove = ep93xx_i2s_remove,
.driver = {
.name = "ep93xx-i2s",
.owner = THIS_MODULE,
diff --git a/sound/soc/cirrus/ep93xx-pcm.c b/sound/soc/cirrus/ep93xx-pcm.c
index 665d9c94cc1..72eb7a49e16 100644
--- a/sound/soc/cirrus/ep93xx-pcm.c
+++ b/sound/soc/cirrus/ep93xx-pcm.c
@@ -213,12 +213,12 @@ static struct snd_soc_platform_driver ep93xx_soc_platform = {
.pcm_free = &ep93xx_pcm_free_dma_buffers,
};
-static int __devinit ep93xx_soc_platform_probe(struct platform_device *pdev)
+static int ep93xx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &ep93xx_soc_platform);
}
-static int __devexit ep93xx_soc_platform_remove(struct platform_device *pdev)
+static int ep93xx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -231,7 +231,7 @@ static struct platform_driver ep93xx_pcm_driver = {
},
.probe = ep93xx_soc_platform_probe,
- .remove = __devexit_p(ep93xx_soc_platform_remove),
+ .remove = ep93xx_soc_platform_remove,
};
module_platform_driver(ep93xx_pcm_driver);
diff --git a/sound/soc/cirrus/simone.c b/sound/soc/cirrus/simone.c
index dd997094eb3..a397bb0d817 100644
--- a/sound/soc/cirrus/simone.c
+++ b/sound/soc/cirrus/simone.c
@@ -41,7 +41,7 @@ static struct snd_soc_card snd_soc_simone = {
static struct platform_device *simone_snd_ac97_device;
-static int __devinit simone_probe(struct platform_device *pdev)
+static int simone_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_simone;
int ret;
@@ -63,7 +63,7 @@ static int __devinit simone_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit simone_remove(struct platform_device *pdev)
+static int simone_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -79,7 +79,7 @@ static struct platform_driver simone_driver = {
.owner = THIS_MODULE,
},
.probe = simone_probe,
- .remove = __devexit_p(simone_remove),
+ .remove = simone_remove,
};
module_platform_driver(simone_driver);
diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
index a193cea3cf3..9d77fe28dfc 100644
--- a/sound/soc/cirrus/snappercl15.c
+++ b/sound/soc/cirrus/snappercl15.c
@@ -98,7 +98,7 @@ static struct snd_soc_card snd_soc_snappercl15 = {
.num_links = 1,
};
-static int __devinit snappercl15_probe(struct platform_device *pdev)
+static int snappercl15_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_snappercl15;
int ret;
@@ -119,7 +119,7 @@ static int __devinit snappercl15_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit snappercl15_remove(struct platform_device *pdev)
+static int snappercl15_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -135,7 +135,7 @@ static struct platform_driver snappercl15_driver = {
.owner = THIS_MODULE,
},
.probe = snappercl15_probe,
- .remove = __devexit_p(snappercl15_remove),
+ .remove = snappercl15_remove,
};
module_platform_driver(snappercl15_driver);
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 9fd3b6827bb..60159c07448 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1423,7 +1423,7 @@ static struct snd_soc_codec_driver soc_codec_dev_pm860x = {
.num_dapm_routes = ARRAY_SIZE(pm860x_dapm_routes),
};
-static int __devinit pm860x_codec_probe(struct platform_device *pdev)
+static int pm860x_codec_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_priv *pm860x;
@@ -1463,7 +1463,7 @@ out:
return -EINVAL;
}
-static int __devexit pm860x_codec_remove(struct platform_device *pdev)
+static int pm860x_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
platform_set_drvdata(pdev, NULL);
@@ -1476,7 +1476,7 @@ static struct platform_driver pm860x_codec_driver = {
.owner = THIS_MODULE,
},
.probe = pm860x_codec_probe,
- .remove = __devexit_p(pm860x_codec_remove),
+ .remove = pm860x_codec_remove,
};
module_platform_driver(pm860x_codec_driver);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index b92759a3936..3a847828932 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -44,6 +44,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_LM4857 if I2C
select SND_SOC_LM49453 if I2C
select SND_SOC_MAX98088 if I2C
+ select SND_SOC_MAX98090 if I2C
select SND_SOC_MAX98095 if I2C
select SND_SOC_MAX9850 if I2C
select SND_SOC_MAX9768 if I2C
@@ -54,6 +55,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_PCM3008
select SND_SOC_RT5631 if I2C
select SND_SOC_SGTL5000 if I2C
+ select SND_SOC_SI476X if MFD_SI476X_CORE
select SND_SOC_SN95031 if INTEL_SCU_IPC
select SND_SOC_SPDIF
select SND_SOC_SSM2602 if SND_SOC_I2C_AND_SPI
@@ -146,6 +148,13 @@ config SND_SOC_WM_HUBS
default y if SND_SOC_WM8993=y || SND_SOC_WM8994=y
default m if SND_SOC_WM8993=m || SND_SOC_WM8994=m
+config SND_SOC_WM_ADSP
+ tristate
+ default y if SND_SOC_WM5102=y
+ default y if SND_SOC_WM2200=y
+ default m if SND_SOC_WM5102=m
+ default m if SND_SOC_WM2200=m
+
config SND_SOC_AB8500_CODEC
tristate
@@ -229,6 +238,7 @@ config SND_SOC_CX20442
tristate
config SND_SOC_JZ4740_CODEC
+ select REGMAP_MMIO
tristate
config SND_SOC_L3
@@ -258,6 +268,9 @@ config SND_SOC_LM49453
config SND_SOC_MAX98088
tristate
+config SND_SOC_MAX98090
+ tristate
+
config SND_SOC_MAX98095
tristate
@@ -277,6 +290,9 @@ config SND_SOC_RT5631
config SND_SOC_SGTL5000
tristate
+config SND_SOC_SI476X
+ tristate
+
config SND_SOC_SIGMADSP
tristate
select CRC32
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 9bd4d95aab4..f6e8e36cceb 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -34,6 +34,7 @@ snd-soc-lm4857-objs := lm4857.o
snd-soc-lm49453-objs := lm49453.o
snd-soc-max9768-objs := max9768.o
snd-soc-max98088-objs := max98088.o
+snd-soc-max98090-objs := max98090.o
snd-soc-max98095-objs := max98095.o
snd-soc-max9850-objs := max9850.o
snd-soc-mc13783-objs := mc13783.o
@@ -45,6 +46,7 @@ snd-soc-sgtl5000-objs := sgtl5000.o
snd-soc-alc5623-objs := alc5623.o
snd-soc-alc5632-objs := alc5632.o
snd-soc-sigmadsp-objs := sigmadsp.o
+snd-soc-si476x-objs := si476x.o
snd-soc-sn95031-objs := sn95031.o
snd-soc-spdif-tx-objs := spdif_transciever.o
snd-soc-spdif-rx-objs := spdif_receiver.o
@@ -62,6 +64,7 @@ snd-soc-twl6040-objs := twl6040.o
snd-soc-uda134x-objs := uda134x.o
snd-soc-uda1380-objs := uda1380.o
snd-soc-wl1273-objs := wl1273.o
+snd-soc-wm-adsp-objs := wm_adsp.o
snd-soc-wm0010-objs := wm0010.o
snd-soc-wm1250-ev1-objs := wm1250-ev1.o
snd-soc-wm2000-objs := wm2000.o
@@ -155,6 +158,7 @@ obj-$(CONFIG_SND_SOC_LM4857) += snd-soc-lm4857.o
obj-$(CONFIG_SND_SOC_LM49453) += snd-soc-lm49453.o
obj-$(CONFIG_SND_SOC_MAX9768) += snd-soc-max9768.o
obj-$(CONFIG_SND_SOC_MAX98088) += snd-soc-max98088.o
+obj-$(CONFIG_SND_SOC_MAX98090) += snd-soc-max98090.o
obj-$(CONFIG_SND_SOC_MAX98095) += snd-soc-max98095.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
@@ -164,6 +168,7 @@ obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
obj-$(CONFIG_SND_SOC_RT5631) += snd-soc-rt5631.o
obj-$(CONFIG_SND_SOC_SGTL5000) += snd-soc-sgtl5000.o
obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o
+obj-$(CONFIG_SND_SOC_SI476X) += snd-soc-si476x.o
obj-$(CONFIG_SND_SOC_SN95031) +=snd-soc-sn95031.o
obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif-rx.o snd-soc-spdif-tx.o
obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o
@@ -229,6 +234,7 @@ obj-$(CONFIG_SND_SOC_WM9090) += snd-soc-wm9090.o
obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o
obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o
obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
+obj-$(CONFIG_SND_SOC_WM_ADSP) += snd-soc-wm-adsp.o
obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
# Amp
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index af547490b4f..6c12ac206ee 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2356,7 +2356,7 @@ static int ab8500_codec_set_dai_tdm_slot(struct snd_soc_dai *dai,
return 0;
}
-struct snd_soc_dai_driver ab8500_codec_dai[] = {
+static struct snd_soc_dai_driver ab8500_codec_dai[] = {
{
.name = "ab8500-codec-dai.0",
.id = 0,
@@ -2554,7 +2554,7 @@ static struct snd_soc_codec_driver ab8500_codec_driver = {
.num_dapm_routes = ARRAY_SIZE(ab8500_dapm_routes),
};
-static int __devinit ab8500_codec_driver_probe(struct platform_device *pdev)
+static int ab8500_codec_driver_probe(struct platform_device *pdev)
{
int status;
struct ab8500_codec_drvdata *drvdata;
@@ -2580,7 +2580,7 @@ static int __devinit ab8500_codec_driver_probe(struct platform_device *pdev)
return status;
}
-static int __devexit ab8500_codec_driver_remove(struct platform_device *pdev)
+static int ab8500_codec_driver_remove(struct platform_device *pdev)
{
dev_info(&pdev->dev, "%s Enter.\n", __func__);
@@ -2595,7 +2595,7 @@ static struct platform_driver ab8500_codec_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ab8500_codec_driver_probe,
- .remove = __devexit_p(ab8500_codec_driver_remove),
+ .remove = ab8500_codec_driver_remove,
.suspend = NULL,
.resume = NULL,
};
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index ea06b834a7d..ef2ae32ffc6 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -118,13 +118,13 @@ static struct snd_soc_codec_driver soc_codec_dev_ac97 = {
.resume = ac97_soc_resume,
};
-static __devinit int ac97_probe(struct platform_device *pdev)
+static int ac97_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_ac97, &ac97_dai, 1);
}
-static int __devexit ac97_remove(struct platform_device *pdev)
+static int ac97_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -137,7 +137,7 @@ static struct platform_driver ac97_codec_driver = {
},
.probe = ac97_probe,
- .remove = __devexit_p(ac97_remove),
+ .remove = ac97_remove,
};
module_platform_driver(ac97_codec_driver);
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index dce6ebeef45..9a92b7962f4 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -360,7 +360,7 @@ static const struct regmap_config ad1836_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit ad1836_spi_probe(struct spi_device *spi)
+static int ad1836_spi_probe(struct spi_device *spi)
{
struct ad1836_priv *ad1836;
int ret;
@@ -383,7 +383,7 @@ static int __devinit ad1836_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit ad1836_spi_remove(struct spi_device *spi)
+static int ad1836_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -405,7 +405,7 @@ static struct spi_driver ad1836_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad1836_spi_probe,
- .remove = __devexit_p(ad1836_spi_remove),
+ .remove = ad1836_spi_remove,
.id_table = ad1836_ids,
};
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 2f752660f67..aea7e52cf71 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -378,7 +378,7 @@ static const struct regmap_config ad193x_spi_regmap_config = {
.volatile_reg = adau193x_reg_volatile,
};
-static int __devinit ad193x_spi_probe(struct spi_device *spi)
+static int ad193x_spi_probe(struct spi_device *spi)
{
struct ad193x_priv *ad193x;
@@ -397,7 +397,7 @@ static int __devinit ad193x_spi_probe(struct spi_device *spi)
&ad193x_dai, 1);
}
-static int __devexit ad193x_spi_remove(struct spi_device *spi)
+static int ad193x_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -409,7 +409,7 @@ static struct spi_driver ad193x_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad193x_spi_probe,
- .remove = __devexit_p(ad193x_spi_remove),
+ .remove = ad193x_spi_remove,
};
#endif
@@ -430,8 +430,8 @@ static const struct i2c_device_id ad193x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad193x_id);
-static int __devinit ad193x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ad193x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct ad193x_priv *ad193x;
@@ -450,7 +450,7 @@ static int __devinit ad193x_i2c_probe(struct i2c_client *client,
&ad193x_dai, 1);
}
-static int __devexit ad193x_i2c_remove(struct i2c_client *client)
+static int ad193x_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -461,7 +461,7 @@ static struct i2c_driver ad193x_i2c_driver = {
.name = "ad193x",
},
.probe = ad193x_i2c_probe,
- .remove = __devexit_p(ad193x_i2c_remove),
+ .remove = ad193x_i2c_remove,
.id_table = ad193x_id,
};
#endif
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 8c39dddd7d0..f385342947d 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -255,13 +255,13 @@ static struct snd_soc_codec_driver soc_codec_dev_ad1980 = {
.read = ac97_read,
};
-static __devinit int ad1980_probe(struct platform_device *pdev)
+static int ad1980_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_ad1980, &ad1980_dai, 1);
}
-static int __devexit ad1980_remove(struct platform_device *pdev)
+static int ad1980_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -274,7 +274,7 @@ static struct platform_driver ad1980_codec_driver = {
},
.probe = ad1980_probe,
- .remove = __devexit_p(ad1980_remove),
+ .remove = ad1980_remove,
};
module_platform_driver(ad1980_codec_driver);
diff --git a/sound/soc/codecs/ad73311.c b/sound/soc/codecs/ad73311.c
index ee7a68dcefd..b1f2baf42b4 100644
--- a/sound/soc/codecs/ad73311.c
+++ b/sound/soc/codecs/ad73311.c
@@ -47,7 +47,7 @@ static int ad73311_probe(struct platform_device *pdev)
&soc_codec_dev_ad73311, &ad73311_dai, 1);
}
-static int __devexit ad73311_remove(struct platform_device *pdev)
+static int ad73311_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -60,7 +60,7 @@ static struct platform_driver ad73311_codec_driver = {
},
.probe = ad73311_probe,
- .remove = __devexit_p(ad73311_remove),
+ .remove = ad73311_remove,
};
module_platform_driver(ad73311_codec_driver);
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 704544bfc90..068b3ae56a1 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -1353,8 +1353,8 @@ static struct snd_soc_codec_driver adau1373_codec_driver = {
.num_dapm_routes = ARRAY_SIZE(adau1373_dapm_routes),
};
-static int __devinit adau1373_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adau1373_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adau1373 *adau1373;
int ret;
@@ -1370,7 +1370,7 @@ static int __devinit adau1373_i2c_probe(struct i2c_client *client,
return ret;
}
-static int __devexit adau1373_i2c_remove(struct i2c_client *client)
+static int adau1373_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1388,7 +1388,7 @@ static struct i2c_driver adau1373_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = adau1373_i2c_probe,
- .remove = __devexit_p(adau1373_i2c_remove),
+ .remove = adau1373_i2c_remove,
.id_table = adau1373_i2c_id,
};
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index 51f2f3cd813..dafdbe87ede 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -489,8 +489,8 @@ static struct snd_soc_codec_driver adau1701_codec_drv = {
.set_sysclk = adau1701_set_sysclk,
};
-static __devinit int adau1701_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adau1701_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adau1701 *adau1701;
int ret;
@@ -505,7 +505,7 @@ static __devinit int adau1701_i2c_probe(struct i2c_client *client,
return ret;
}
-static __devexit int adau1701_i2c_remove(struct i2c_client *client)
+static int adau1701_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -523,7 +523,7 @@ static struct i2c_driver adau1701_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = adau1701_i2c_probe,
- .remove = __devexit_p(adau1701_i2c_remove),
+ .remove = adau1701_i2c_remove,
.id_table = adau1701_i2c_id,
};
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index ebd7b37b902..3c839cc4e00 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -839,8 +839,8 @@ static struct snd_soc_codec_driver adav80x_codec_driver = {
.num_dapm_routes = ARRAY_SIZE(adav80x_dapm_routes),
};
-static int __devinit adav80x_bus_probe(struct device *dev,
- enum snd_soc_control_type control_type)
+static int adav80x_bus_probe(struct device *dev,
+ enum snd_soc_control_type control_type)
{
struct adav80x *adav80x;
int ret;
@@ -860,7 +860,7 @@ static int __devinit adav80x_bus_probe(struct device *dev,
return ret;
}
-static int __devexit adav80x_bus_remove(struct device *dev)
+static int adav80x_bus_remove(struct device *dev)
{
snd_soc_unregister_codec(dev);
kfree(dev_get_drvdata(dev));
@@ -868,12 +868,12 @@ static int __devexit adav80x_bus_remove(struct device *dev)
}
#if defined(CONFIG_SPI_MASTER)
-static int __devinit adav80x_spi_probe(struct spi_device *spi)
+static int adav80x_spi_probe(struct spi_device *spi)
{
return adav80x_bus_probe(&spi->dev, SND_SOC_SPI);
}
-static int __devexit adav80x_spi_remove(struct spi_device *spi)
+static int adav80x_spi_remove(struct spi_device *spi)
{
return adav80x_bus_remove(&spi->dev);
}
@@ -884,7 +884,7 @@ static struct spi_driver adav80x_spi_driver = {
.owner = THIS_MODULE,
},
.probe = adav80x_spi_probe,
- .remove = __devexit_p(adav80x_spi_remove),
+ .remove = adav80x_spi_remove,
};
#endif
@@ -895,13 +895,13 @@ static const struct i2c_device_id adav80x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adav80x_id);
-static int __devinit adav80x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adav80x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
return adav80x_bus_probe(&client->dev, SND_SOC_I2C);
}
-static int __devexit adav80x_i2c_remove(struct i2c_client *client)
+static int adav80x_i2c_remove(struct i2c_client *client)
{
return adav80x_bus_remove(&client->dev);
}
@@ -912,7 +912,7 @@ static struct i2c_driver adav80x_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = adav80x_i2c_probe,
- .remove = __devexit_p(adav80x_i2c_remove),
+ .remove = adav80x_i2c_remove,
.id_table = adav80x_id,
};
#endif
diff --git a/sound/soc/codecs/ads117x.c b/sound/soc/codecs/ads117x.c
index 8103b938b8c..506d474c4d2 100644
--- a/sound/soc/codecs/ads117x.c
+++ b/sound/soc/codecs/ads117x.c
@@ -36,13 +36,13 @@ static struct snd_soc_dai_driver ads117x_dai = {
static struct snd_soc_codec_driver soc_codec_dev_ads117x;
-static __devinit int ads117x_probe(struct platform_device *pdev)
+static int ads117x_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_ads117x, &ads117x_dai, 1);
}
-static int __devexit ads117x_remove(struct platform_device *pdev)
+static int ads117x_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -55,7 +55,7 @@ static struct platform_driver ads117x_codec_driver = {
},
.probe = ads117x_probe,
- .remove = __devexit_p(ads117x_remove),
+ .remove = ads117x_remove,
};
module_platform_driver(ads117x_codec_driver);
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index 31d4483245d..6f6c335a5ba 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -15,6 +15,8 @@
#include <sound/soc.h>
#include <sound/initval.h>
#include <linux/spi/spi.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <sound/asoundef.h>
/* AK4104 registers addresses */
@@ -98,14 +100,32 @@ static int ak4104_hw_params(struct snd_pcm_substream *substream,
val = 0;
switch (params_rate(params)) {
+ case 22050:
+ val |= IEC958_AES3_CON_FS_22050;
+ break;
+ case 24000:
+ val |= IEC958_AES3_CON_FS_24000;
+ break;
+ case 32000:
+ val |= IEC958_AES3_CON_FS_32000;
+ break;
case 44100:
val |= IEC958_AES3_CON_FS_44100;
break;
case 48000:
val |= IEC958_AES3_CON_FS_48000;
break;
- case 32000:
- val |= IEC958_AES3_CON_FS_32000;
+ case 88200:
+ val |= IEC958_AES3_CON_FS_88200;
+ break;
+ case 96000:
+ val |= IEC958_AES3_CON_FS_96000;
+ break;
+ case 176400:
+ val |= IEC958_AES3_CON_FS_176400;
+ break;
+ case 192000:
+ val |= IEC958_AES3_CON_FS_192000;
break;
default:
dev_err(codec->dev, "unsupported sampling rate\n");
@@ -186,6 +206,7 @@ static const struct regmap_config ak4104_regmap = {
static int ak4104_spi_probe(struct spi_device *spi)
{
+ struct device_node *np = spi->dev.of_node;
struct ak4104_private *ak4104;
unsigned int val;
int ret;
@@ -201,52 +222,62 @@ static int ak4104_spi_probe(struct spi_device *spi)
if (ak4104 == NULL)
return -ENOMEM;
- ak4104->regmap = regmap_init_spi(spi, &ak4104_regmap);
+ ak4104->regmap = devm_regmap_init_spi(spi, &ak4104_regmap);
if (IS_ERR(ak4104->regmap)) {
ret = PTR_ERR(ak4104->regmap);
return ret;
}
+ if (np) {
+ enum of_gpio_flags flags;
+ int gpio = of_get_named_gpio_flags(np, "reset-gpio", 0, &flags);
+
+ if (gpio_is_valid(gpio)) {
+ ret = devm_gpio_request_one(&spi->dev, gpio,
+ flags & OF_GPIO_ACTIVE_LOW ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
+ "ak4104 reset");
+ if (ret < 0)
+ return ret;
+ }
+ }
+
/* read the 'reserved' register - according to the datasheet, it
* should contain 0x5b. Not a good way to verify the presence of
* the device, but there is no hardware ID register. */
ret = regmap_read(ak4104->regmap, AK4104_REG_RESERVED, &val);
if (ret != 0)
- goto err;
- if (val != AK4104_RESERVED_VAL) {
- ret = -ENODEV;
- goto err;
- }
+ return ret;
+ if (val != AK4104_RESERVED_VAL)
+ return -ENODEV;
spi_set_drvdata(spi, ak4104);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_device_ak4104, &ak4104_dai, 1);
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- regmap_exit(ak4104->regmap);
return ret;
}
-static int __devexit ak4104_spi_remove(struct spi_device *spi)
+static int ak4104_spi_remove(struct spi_device *spi)
{
- struct ak4104_private *ak4101 = spi_get_drvdata(spi);
- regmap_exit(ak4101->regmap);
snd_soc_unregister_codec(&spi->dev);
return 0;
}
+static const struct of_device_id ak4104_of_match[] = {
+ { .compatible = "asahi-kasei,ak4104", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ak4104_of_match);
+
static struct spi_driver ak4104_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = ak4104_of_match,
},
.probe = ak4104_spi_probe,
- .remove = __devexit_p(ak4104_spi_remove),
+ .remove = ak4104_spi_remove,
};
module_spi_driver(ak4104_spi_driver);
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 618fdc30f73..684fe910669 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -436,8 +436,8 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4535 = {
.num_dapm_routes = ARRAY_SIZE(ak4535_audio_map),
};
-static __devinit int ak4535_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ak4535_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct ak4535_priv *ak4535;
int ret;
@@ -447,7 +447,7 @@ static __devinit int ak4535_i2c_probe(struct i2c_client *i2c,
if (ak4535 == NULL)
return -ENOMEM;
- ak4535->regmap = regmap_init_i2c(i2c, &ak4535_regmap);
+ ak4535->regmap = devm_regmap_init_i2c(i2c, &ak4535_regmap);
if (IS_ERR(ak4535->regmap)) {
ret = PTR_ERR(ak4535->regmap);
dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret);
@@ -458,18 +458,13 @@ static __devinit int ak4535_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_ak4535, &ak4535_dai, 1);
- if (ret != 0)
- regmap_exit(ak4535->regmap);
return ret;
}
-static __devexit int ak4535_i2c_remove(struct i2c_client *client)
+static int ak4535_i2c_remove(struct i2c_client *client)
{
- struct ak4535_priv *ak4535 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(ak4535->regmap);
return 0;
}
@@ -485,7 +480,7 @@ static struct i2c_driver ak4535_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ak4535_i2c_probe,
- .remove = __devexit_p(ak4535_i2c_remove),
+ .remove = ak4535_i2c_remove,
.id_table = ak4535_i2c_id,
};
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 543a12f471b..5f9af1fb76e 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -557,8 +557,8 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4641 = {
};
-static int __devinit ak4641_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ak4641_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct ak4641_platform_data *pdata = i2c->dev.platform_data;
struct ak4641_priv *ak4641;
@@ -610,7 +610,7 @@ err_out:
return ret;
}
-static int __devexit ak4641_i2c_remove(struct i2c_client *i2c)
+static int ak4641_i2c_remove(struct i2c_client *i2c)
{
struct ak4641_platform_data *pdata = i2c->dev.platform_data;
@@ -640,7 +640,7 @@ static struct i2c_driver ak4641_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ak4641_i2c_probe,
- .remove = __devexit_p(ak4641_i2c_remove),
+ .remove = ak4641_i2c_remove,
.id_table = ak4641_i2c_id,
};
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index b3e24f28942..1f0cdab0329 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -194,12 +194,6 @@ static const struct snd_soc_dapm_route ak4642_intercon[] = {
{"LINEOUT Mixer", "DACL", "DAC"},
};
-/* codec private data */
-struct ak4642_priv {
- unsigned int sysclk;
- enum snd_soc_control_type control_type;
-};
-
/*
* ak4642 register cache
*/
@@ -468,10 +462,9 @@ static int ak4642_resume(struct snd_soc_codec *codec)
static int ak4642_probe(struct snd_soc_codec *codec)
{
- struct ak4642_priv *ak4642 = snd_soc_codec_get_drvdata(codec);
int ret;
- ret = snd_soc_codec_set_cache_io(codec, 8, 8, ak4642->control_type);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
@@ -520,27 +513,15 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4648 = {
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int ak4642_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ak4642_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
- struct ak4642_priv *ak4642;
- int ret;
-
- ak4642 = devm_kzalloc(&i2c->dev, sizeof(struct ak4642_priv),
- GFP_KERNEL);
- if (!ak4642)
- return -ENOMEM;
-
- i2c_set_clientdata(i2c, ak4642);
- ak4642->control_type = SND_SOC_I2C;
-
- ret = snd_soc_register_codec(&i2c->dev,
+ return snd_soc_register_codec(&i2c->dev,
(struct snd_soc_codec_driver *)id->driver_data,
&ak4642_dai, 1);
- return ret;
}
-static __devexit int ak4642_i2c_remove(struct i2c_client *client)
+static int ak4642_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -560,7 +541,7 @@ static struct i2c_driver ak4642_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ak4642_i2c_probe,
- .remove = __devexit_p(ak4642_i2c_remove),
+ .remove = ak4642_i2c_remove,
.id_table = ak4642_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 2b457976a7b..25bdf6ad4a5 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -655,8 +655,8 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4671 = {
.num_dapm_routes = ARRAY_SIZE(ak4671_intercon),
};
-static int __devinit ak4671_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ak4671_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct ak4671_priv *ak4671;
int ret;
@@ -674,7 +674,7 @@ static int __devinit ak4671_i2c_probe(struct i2c_client *client,
return ret;
}
-static __devexit int ak4671_i2c_remove(struct i2c_client *client)
+static int ak4671_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -692,7 +692,7 @@ static struct i2c_driver ak4671_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ak4671_i2c_probe,
- .remove = __devexit_p(ak4671_i2c_remove),
+ .remove = ak4671_i2c_remove,
.id_table = ak4671_i2c_id,
};
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 1960478ce6b..256c364193a 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -991,8 +991,8 @@ static struct snd_soc_codec_driver soc_codec_device_alc5623 = {
* low = 0x1a
* high = 0x1b
*/
-static __devinit int alc5623_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int alc5623_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct alc5623_platform_data *pdata;
struct alc5623_priv *alc5623;
@@ -1058,7 +1058,7 @@ static __devinit int alc5623_i2c_probe(struct i2c_client *client,
return ret;
}
-static __devexit int alc5623_i2c_remove(struct i2c_client *client)
+static int alc5623_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1079,7 +1079,7 @@ static struct i2c_driver alc5623_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = alc5623_i2c_probe,
- .remove = __devexit_p(alc5623_i2c_remove),
+ .remove = alc5623_i2c_remove,
.id_table = alc5623_i2c_table,
};
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 7dd02420b36..f2e62e45f91 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1116,8 +1116,8 @@ static struct regmap_config alc5632_regmap = {
* low = 0x1a
* high = 0x1b
*/
-static __devinit int alc5632_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int alc5632_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct alc5632_priv *alc5632;
int ret, ret1, ret2;
@@ -1179,7 +1179,7 @@ static __devinit int alc5632_i2c_probe(struct i2c_client *client,
return ret;
}
-static __devexit int alc5632_i2c_remove(struct i2c_client *client)
+static int alc5632_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1198,7 +1198,7 @@ static struct i2c_driver alc5632_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = alc5632_i2c_probe,
- .remove = __devexit_p(alc5632_i2c_remove),
+ .remove = alc5632_i2c_remove,
.id_table = alc5632_i2c_table,
};
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 054967d8bac..adf397b9d0e 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -226,6 +226,31 @@ EXPORT_SYMBOL_GPL(arizona_mixer_values);
const DECLARE_TLV_DB_SCALE(arizona_mixer_tlv, -3200, 100, 0);
EXPORT_SYMBOL_GPL(arizona_mixer_tlv);
+static const char *arizona_vol_ramp_text[] = {
+ "0ms/6dB", "0.5ms/6dB", "1ms/6dB", "2ms/6dB", "4ms/6dB", "8ms/6dB",
+ "15ms/6dB", "30ms/6dB",
+};
+
+const struct soc_enum arizona_in_vd_ramp =
+ SOC_ENUM_SINGLE(ARIZONA_INPUT_VOLUME_RAMP,
+ ARIZONA_IN_VD_RAMP_SHIFT, 7, arizona_vol_ramp_text);
+EXPORT_SYMBOL_GPL(arizona_in_vd_ramp);
+
+const struct soc_enum arizona_in_vi_ramp =
+ SOC_ENUM_SINGLE(ARIZONA_INPUT_VOLUME_RAMP,
+ ARIZONA_IN_VI_RAMP_SHIFT, 7, arizona_vol_ramp_text);
+EXPORT_SYMBOL_GPL(arizona_in_vi_ramp);
+
+const struct soc_enum arizona_out_vd_ramp =
+ SOC_ENUM_SINGLE(ARIZONA_OUTPUT_VOLUME_RAMP,
+ ARIZONA_OUT_VD_RAMP_SHIFT, 7, arizona_vol_ramp_text);
+EXPORT_SYMBOL_GPL(arizona_out_vd_ramp);
+
+const struct soc_enum arizona_out_vi_ramp =
+ SOC_ENUM_SINGLE(ARIZONA_OUTPUT_VOLUME_RAMP,
+ ARIZONA_OUT_VI_RAMP_SHIFT, 7, arizona_vol_ramp_text);
+EXPORT_SYMBOL_GPL(arizona_out_vi_ramp);
+
static const char *arizona_lhpf_mode_text[] = {
"Low-pass", "High-pass"
};
@@ -380,6 +405,18 @@ int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
case 49152000:
val |= 3 << ARIZONA_SYSCLK_FREQ_SHIFT;
break;
+ case 67737600:
+ case 73728000:
+ val |= 4 << ARIZONA_SYSCLK_FREQ_SHIFT;
+ break;
+ case 90316800:
+ case 98304000:
+ val |= 5 << ARIZONA_SYSCLK_FREQ_SHIFT;
+ break;
+ case 135475200:
+ case 147456000:
+ val |= 6 << ARIZONA_SYSCLK_FREQ_SHIFT;
+ break;
default:
return -EINVAL;
}
@@ -737,6 +774,9 @@ static int arizona_dai_set_sysclk(struct snd_soc_dai *dai,
return -EBUSY;
}
+ dev_dbg(codec->dev, "Setting AIF%d to %s\n", dai->id + 1,
+ arizona_dai_clk_str(clk_id));
+
memset(&routes, 0, sizeof(routes));
routes[0].sink = dai->driver->capture.stream_name;
routes[1].sink = dai->driver->playback.stream_name;
@@ -749,6 +789,8 @@ static int arizona_dai_set_sysclk(struct snd_soc_dai *dai,
routes[1].source = arizona_dai_clk_str(clk_id);
snd_soc_dapm_add_routes(&codec->dapm, routes, ARRAY_SIZE(routes));
+ dai_priv->clk = clk_id;
+
return snd_soc_dapm_sync(&codec->dapm);
}
@@ -925,6 +967,9 @@ int arizona_set_fll(struct arizona_fll *fll, int source,
bool ena;
int ret;
+ if (fll->fref == Fref && fll->fout == Fout)
+ return 0;
+
ret = regmap_read(arizona->regmap, fll->base + 1, &reg);
if (ret != 0) {
arizona_fll_err(fll, "Failed to read current state: %d\n",
@@ -970,6 +1015,9 @@ int arizona_set_fll(struct arizona_fll *fll, int source,
if (ena)
pm_runtime_put_autosuspend(arizona->dev);
+ fll->fref = Fref;
+ fll->fout = Fout;
+
return 0;
}
@@ -998,10 +1046,13 @@ int arizona_set_fll(struct arizona_fll *fll, int source,
ARIZONA_FLL1_SYNC_ENA);
ret = wait_for_completion_timeout(&fll->ok,
- msecs_to_jiffies(25));
+ msecs_to_jiffies(250));
if (ret == 0)
arizona_fll_warn(fll, "Timed out waiting for lock\n");
+ fll->fref = Fref;
+ fll->fout = Fout;
+
return 0;
}
EXPORT_SYMBOL_GPL(arizona_set_fll);
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 36ec6494612..41dae1ed3b7 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -17,6 +17,8 @@
#include <sound/soc.h>
+#include "wm_adsp.h"
+
#define ARIZONA_CLK_SYSCLK 1
#define ARIZONA_CLK_ASYNCCLK 2
#define ARIZONA_CLK_OPCLK 3
@@ -46,15 +48,18 @@
#define ARIZONA_MIXER_VOL_SHIFT 1
#define ARIZONA_MIXER_VOL_WIDTH 7
-#define ARIZONA_MAX_DAI 3
+#define ARIZONA_MAX_DAI 4
+#define ARIZONA_MAX_ADSP 4
struct arizona;
+struct wm_adsp;
struct arizona_dai_priv {
int clk;
};
struct arizona_priv {
+ struct wm_adsp adsp[ARIZONA_MAX_ADSP];
struct arizona *arizona;
int sysclk;
int asyncclk;
@@ -89,19 +94,30 @@ extern int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
const struct snd_kcontrol_new name##_mux = \
SOC_DAPM_VALUE_ENUM("Route", name##_enum)
+#define ARIZONA_MUX_ENUMS(name, base_reg) \
+ static ARIZONA_MUX_ENUM_DECL(name##_enum, base_reg); \
+ static ARIZONA_MUX_CTL_DECL(name)
+
#define ARIZONA_MIXER_ENUMS(name, base_reg) \
- static ARIZONA_MUX_ENUM_DECL(name##_in1_enum, base_reg); \
- static ARIZONA_MUX_ENUM_DECL(name##_in2_enum, base_reg + 2); \
- static ARIZONA_MUX_ENUM_DECL(name##_in3_enum, base_reg + 4); \
- static ARIZONA_MUX_ENUM_DECL(name##_in4_enum, base_reg + 6); \
- static ARIZONA_MUX_CTL_DECL(name##_in1); \
- static ARIZONA_MUX_CTL_DECL(name##_in2); \
- static ARIZONA_MUX_CTL_DECL(name##_in3); \
- static ARIZONA_MUX_CTL_DECL(name##_in4)
+ ARIZONA_MUX_ENUMS(name##_in1, base_reg); \
+ ARIZONA_MUX_ENUMS(name##_in2, base_reg + 2); \
+ ARIZONA_MUX_ENUMS(name##_in3, base_reg + 4); \
+ ARIZONA_MUX_ENUMS(name##_in4, base_reg + 6)
+
+#define ARIZONA_DSP_AUX_ENUMS(name, base_reg) \
+ ARIZONA_MUX_ENUMS(name##_aux1, base_reg); \
+ ARIZONA_MUX_ENUMS(name##_aux2, base_reg + 8); \
+ ARIZONA_MUX_ENUMS(name##_aux3, base_reg + 16); \
+ ARIZONA_MUX_ENUMS(name##_aux4, base_reg + 24); \
+ ARIZONA_MUX_ENUMS(name##_aux5, base_reg + 32); \
+ ARIZONA_MUX_ENUMS(name##_aux6, base_reg + 40)
#define ARIZONA_MUX(name, ctrl) \
SND_SOC_DAPM_VALUE_MUX(name, SND_SOC_NOPM, 0, 0, ctrl)
+#define ARIZONA_MUX_WIDGETS(name, name_str) \
+ ARIZONA_MUX(name_str " Input", &name##_mux)
+
#define ARIZONA_MIXER_WIDGETS(name, name_str) \
ARIZONA_MUX(name_str " Input 1", &name##_in1_mux), \
ARIZONA_MUX(name_str " Input 2", &name##_in2_mux), \
@@ -109,6 +125,19 @@ extern int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
ARIZONA_MUX(name_str " Input 4", &name##_in4_mux), \
SND_SOC_DAPM_MIXER(name_str " Mixer", SND_SOC_NOPM, 0, 0, NULL, 0)
+#define ARIZONA_DSP_WIDGETS(name, name_str) \
+ ARIZONA_MIXER_WIDGETS(name##L, name_str "L"), \
+ ARIZONA_MIXER_WIDGETS(name##R, name_str "R"), \
+ ARIZONA_MUX(name_str " Aux 1", &name##_aux1_mux), \
+ ARIZONA_MUX(name_str " Aux 2", &name##_aux2_mux), \
+ ARIZONA_MUX(name_str " Aux 3", &name##_aux3_mux), \
+ ARIZONA_MUX(name_str " Aux 4", &name##_aux4_mux), \
+ ARIZONA_MUX(name_str " Aux 5", &name##_aux5_mux), \
+ ARIZONA_MUX(name_str " Aux 6", &name##_aux6_mux)
+
+#define ARIZONA_MUX_ROUTES(name) \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Input")
+
#define ARIZONA_MIXER_ROUTES(widget, name) \
{ widget, NULL, name " Mixer" }, \
{ name " Mixer", NULL, name " Input 1" }, \
@@ -120,6 +149,28 @@ extern int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
ARIZONA_MIXER_INPUT_ROUTES(name " Input 3"), \
ARIZONA_MIXER_INPUT_ROUTES(name " Input 4")
+#define ARIZONA_DSP_ROUTES(name) \
+ { name, NULL, name " Aux 1" }, \
+ { name, NULL, name " Aux 2" }, \
+ { name, NULL, name " Aux 3" }, \
+ { name, NULL, name " Aux 4" }, \
+ { name, NULL, name " Aux 5" }, \
+ { name, NULL, name " Aux 6" }, \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Aux 1"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Aux 2"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Aux 3"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Aux 4"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Aux 5"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Aux 6"), \
+ ARIZONA_MIXER_ROUTES(name, name "L"), \
+ ARIZONA_MIXER_ROUTES(name, name "R")
+
+extern const struct soc_enum arizona_in_vi_ramp;
+extern const struct soc_enum arizona_in_vd_ramp;
+
+extern const struct soc_enum arizona_out_vi_ramp;
+extern const struct soc_enum arizona_out_vd_ramp;
+
extern const struct soc_enum arizona_lhpf1_mode;
extern const struct soc_enum arizona_lhpf2_mode;
extern const struct soc_enum arizona_lhpf3_mode;
@@ -146,6 +197,8 @@ struct arizona_fll {
unsigned int vco_mult;
struct completion lock;
struct completion ok;
+ unsigned int fref;
+ unsigned int fout;
char lock_name[ARIZONA_FLL_NAME_LEN];
char clock_ok_name[ARIZONA_FLL_NAME_LEN];
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 064cd6a9351..23316c887b1 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -201,7 +201,7 @@ static struct platform_driver cq93vc_codec_driver = {
},
.probe = cq93vc_platform_probe,
- .remove = __devexit_p(cq93vc_platform_remove),
+ .remove = cq93vc_platform_remove,
};
module_platform_driver(cq93vc_codec_driver);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index e3f0a7f3131..4f1127935fd 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -474,15 +474,25 @@ static int cs4271_probe(struct snd_soc_codec *codec)
struct cs4271_platform_data *cs4271plat = codec->dev->platform_data;
int ret;
int gpio_nreset = -EINVAL;
+ int amutec_eq_bmutec = 0;
#ifdef CONFIG_OF
- if (of_match_device(cs4271_dt_ids, codec->dev))
+ if (of_match_device(cs4271_dt_ids, codec->dev)) {
gpio_nreset = of_get_named_gpio(codec->dev->of_node,
"reset-gpio", 0);
+
+ if (!of_get_property(codec->dev->of_node,
+ "cirrus,amutec-eq-bmutec", NULL))
+ amutec_eq_bmutec = 1;
+ }
#endif
- if (cs4271plat && gpio_is_valid(cs4271plat->gpio_nreset))
- gpio_nreset = cs4271plat->gpio_nreset;
+ if (cs4271plat) {
+ if (gpio_is_valid(cs4271plat->gpio_nreset))
+ gpio_nreset = cs4271plat->gpio_nreset;
+
+ amutec_eq_bmutec = cs4271plat->amutec_eq_bmutec;
+ }
if (gpio_nreset >= 0)
if (devm_gpio_request(codec->dev, gpio_nreset, "CS4271 Reset"))
@@ -528,6 +538,11 @@ static int cs4271_probe(struct snd_soc_codec *codec)
/* Power-up sequence requires 85 uS */
udelay(85);
+ if (amutec_eq_bmutec)
+ snd_soc_update_bits(codec, CS4271_MODE2,
+ CS4271_MODE2_MUTECAEQUB,
+ CS4271_MODE2_MUTECAEQUB);
+
return snd_soc_add_codec_controls(codec, cs4271_snd_controls,
ARRAY_SIZE(cs4271_snd_controls));
}
@@ -555,7 +570,7 @@ static struct snd_soc_codec_driver soc_codec_dev_cs4271 = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit cs4271_spi_probe(struct spi_device *spi)
+static int cs4271_spi_probe(struct spi_device *spi)
{
struct cs4271_private *cs4271;
@@ -570,7 +585,7 @@ static int __devinit cs4271_spi_probe(struct spi_device *spi)
&cs4271_dai, 1);
}
-static int __devexit cs4271_spi_remove(struct spi_device *spi)
+static int cs4271_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -583,7 +598,7 @@ static struct spi_driver cs4271_spi_driver = {
.of_match_table = of_match_ptr(cs4271_dt_ids),
},
.probe = cs4271_spi_probe,
- .remove = __devexit_p(cs4271_spi_remove),
+ .remove = cs4271_spi_remove,
};
#endif /* defined(CONFIG_SPI_MASTER) */
@@ -594,8 +609,8 @@ static const struct i2c_device_id cs4271_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cs4271_i2c_id);
-static int __devinit cs4271_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int cs4271_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct cs4271_private *cs4271;
@@ -610,7 +625,7 @@ static int __devinit cs4271_i2c_probe(struct i2c_client *client,
&cs4271_dai, 1);
}
-static int __devexit cs4271_i2c_remove(struct i2c_client *client)
+static int cs4271_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -624,7 +639,7 @@ static struct i2c_driver cs4271_i2c_driver = {
},
.id_table = cs4271_i2c_id,
.probe = cs4271_i2c_probe,
- .remove = __devexit_p(cs4271_i2c_remove),
+ .remove = cs4271_i2c_remove,
};
#endif /* defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) */
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 97a81051e88..99bb1c69499 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1271,7 +1271,7 @@ static struct i2c_driver cs42l52_i2c_driver = {
},
.id_table = cs42l52_id,
.probe = cs42l52_i2c_probe,
- .remove = __devexit_p(cs42l52_i2c_remove),
+ .remove = cs42l52_i2c_remove,
};
module_i2c_driver(cs42l52_i2c_driver);
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 2c08c4cb465..6361dab48bd 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -40,6 +40,7 @@ struct cs42l73_private {
u32 sysclk;
u8 mclksel;
u32 mclk;
+ int shutdwn_delay;
};
static const struct reg_default cs42l73_reg_defaults[] = {
@@ -588,7 +589,60 @@ static const struct snd_kcontrol_new cs42l73_snd_controls[] = {
SOC_ENUM("XSPOUT Mono/Stereo Select", xsp_output_mux_enum),
};
+static int cs42l73_spklo_spk_amp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ /* 150 ms delay between setting PDN and MCLKDIS */
+ priv->shutdwn_delay = 150;
+ break;
+ default:
+ pr_err("Invalid event = 0x%x\n", event);
+ }
+ return 0;
+}
+
+static int cs42l73_ear_amp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ /* 50 ms delay between setting PDN and MCLKDIS */
+ if (priv->shutdwn_delay < 50)
+ priv->shutdwn_delay = 50;
+ break;
+ default:
+ pr_err("Invalid event = 0x%x\n", event);
+ }
+ return 0;
+}
+
+
+static int cs42l73_hp_amp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ /* 30 ms delay between setting PDN and MCLKDIS */
+ if (priv->shutdwn_delay < 30)
+ priv->shutdwn_delay = 30;
+ break;
+ default:
+ pr_err("Invalid event = 0x%x\n", event);
+ }
+ return 0;
+}
+
static const struct snd_soc_dapm_widget cs42l73_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("DMICA"),
+ SND_SOC_DAPM_INPUT("DMICB"),
SND_SOC_DAPM_INPUT("LINEINA"),
SND_SOC_DAPM_INPUT("LINEINB"),
SND_SOC_DAPM_INPUT("MIC1"),
@@ -604,9 +658,7 @@ static const struct snd_soc_dapm_widget cs42l73_dapm_widgets[] = {
CS42L73_PWRCTL2, 3, 1),
SND_SOC_DAPM_AIF_OUT("ASPOUTR", NULL, 0,
CS42L73_PWRCTL2, 3, 1),
- SND_SOC_DAPM_AIF_OUT("VSPOUTL", NULL, 0,
- CS42L73_PWRCTL2, 4, 1),
- SND_SOC_DAPM_AIF_OUT("VSPOUTR", NULL, 0,
+ SND_SOC_DAPM_AIF_OUT("VSPINOUT", NULL, 0,
CS42L73_PWRCTL2, 4, 1),
SND_SOC_DAPM_PGA("PGA Left", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -632,8 +684,7 @@ static const struct snd_soc_dapm_widget cs42l73_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("ASPR Output Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("XSPL Output Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("XSPR Output Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("VSPL Output Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("VSPR Output Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("VSP Output Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_AIF_IN("XSPINL", NULL, 0,
CS42L73_PWRCTL2, 0, 1),
@@ -649,7 +700,7 @@ static const struct snd_soc_dapm_widget cs42l73_dapm_widgets[] = {
SND_SOC_DAPM_AIF_IN("ASPINM", NULL, 0,
CS42L73_PWRCTL2, 2, 1),
- SND_SOC_DAPM_AIF_IN("VSPIN", NULL, 0,
+ SND_SOC_DAPM_AIF_IN("VSPINOUT", NULL, 0,
CS42L73_PWRCTL2, 4, 1),
SND_SOC_DAPM_MIXER("HL Left Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -674,16 +725,20 @@ static const struct snd_soc_dapm_widget cs42l73_dapm_widgets[] = {
SND_SOC_DAPM_PGA("SPK DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("ESL DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_SWITCH("HP Amp", CS42L73_PWRCTL3, 0, 1,
- &hp_amp_ctl),
+ SND_SOC_DAPM_SWITCH_E("HP Amp", CS42L73_PWRCTL3, 0, 1,
+ &hp_amp_ctl, cs42l73_hp_amp_event,
+ SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SWITCH("LO Amp", CS42L73_PWRCTL3, 1, 1,
&lo_amp_ctl),
- SND_SOC_DAPM_SWITCH("SPK Amp", CS42L73_PWRCTL3, 2, 1,
- &spk_amp_ctl),
- SND_SOC_DAPM_SWITCH("EAR Amp", CS42L73_PWRCTL3, 3, 1,
- &ear_amp_ctl),
- SND_SOC_DAPM_SWITCH("SPKLO Amp", CS42L73_PWRCTL3, 4, 1,
- &spklo_amp_ctl),
+ SND_SOC_DAPM_SWITCH_E("SPK Amp", CS42L73_PWRCTL3, 2, 1,
+ &spk_amp_ctl, cs42l73_spklo_spk_amp_event,
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SWITCH_E("EAR Amp", CS42L73_PWRCTL3, 3, 1,
+ &ear_amp_ctl, cs42l73_ear_amp_event,
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SWITCH_E("SPKLO Amp", CS42L73_PWRCTL3, 4, 1,
+ &spklo_amp_ctl, cs42l73_spklo_spk_amp_event,
+ SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_OUTPUT("HPOUTA"),
SND_SOC_DAPM_OUTPUT("HPOUTB"),
@@ -705,7 +760,7 @@ static const struct snd_soc_dapm_route cs42l73_audio_map[] = {
{"ESL DAC", "ESL-ASP Mono Volume", "ESL Mixer"},
{"ESL DAC", "ESL-XSP Mono Volume", "ESL Mixer"},
- {"ESL DAC", "ESL-VSP Mono Volume", "VSPIN"},
+ {"ESL DAC", "ESL-VSP Mono Volume", "VSPINOUT"},
/* Loopback */
{"ESL DAC", "ESL-IP Mono Volume", "Input Left Capture"},
{"ESL DAC", "ESL-IP Mono Volume", "Input Right Capture"},
@@ -727,7 +782,7 @@ static const struct snd_soc_dapm_route cs42l73_audio_map[] = {
{"SPK DAC", "SPK-ASP Mono Volume", "SPK Mixer"},
{"SPK DAC", "SPK-XSP Mono Volume", "SPK Mixer"},
- {"SPK DAC", "SPK-VSP Mono Volume", "VSPIN"},
+ {"SPK DAC", "SPK-VSP Mono Volume", "VSPINOUT"},
/* Loopback */
{"SPK DAC", "SPK-IP Mono Volume", "Input Left Capture"},
{"SPK DAC", "SPK-IP Mono Volume", "Input Right Capture"},
@@ -770,8 +825,8 @@ static const struct snd_soc_dapm_route cs42l73_audio_map[] = {
{"HL Right Mixer", NULL, "ASPINR"},
{"HL Left Mixer", NULL, "XSPINL"},
{"HL Right Mixer", NULL, "XSPINR"},
- {"HL Left Mixer", NULL, "VSPIN"},
- {"HL Right Mixer", NULL, "VSPIN"},
+ {"HL Left Mixer", NULL, "VSPINOUT"},
+ {"HL Right Mixer", NULL, "VSPINOUT"},
{"ASPINL", NULL, "ASP Playback"},
{"ASPINM", NULL, "ASP Playback"},
@@ -779,7 +834,7 @@ static const struct snd_soc_dapm_route cs42l73_audio_map[] = {
{"XSPINL", NULL, "XSP Playback"},
{"XSPINM", NULL, "XSP Playback"},
{"XSPINR", NULL, "XSP Playback"},
- {"VSPIN", NULL, "VSP Playback"},
+ {"VSPINOUT", NULL, "VSP Playback"},
/* Capture Paths */
{"MIC1", NULL, "MIC1 Bias"},
@@ -795,6 +850,8 @@ static const struct snd_soc_dapm_route cs42l73_audio_map[] = {
{"ADC Left", NULL, "PGA Left"},
{"ADC Right", NULL, "PGA Right"},
+ {"DMIC Left", NULL, "DMICA"},
+ {"DMIC Right", NULL, "DMICB"},
{"Input Left Capture", "ADC Left Input", "ADC Left"},
{"Input Right Capture", "ADC Right Input", "ADC Right"},
@@ -819,21 +876,18 @@ static const struct snd_soc_dapm_route cs42l73_audio_map[] = {
{"XSPOUTR", NULL, "XSPR Output Mixer"},
/* Voice Capture */
- {"VSPL Output Mixer", NULL, "Input Left Capture"},
- {"VSPR Output Mixer", NULL, "Input Left Capture"},
+ {"VSP Output Mixer", NULL, "Input Left Capture"},
+ {"VSP Output Mixer", NULL, "Input Right Capture"},
- {"VSPOUTL", "VSP-IP Volume", "VSPL Output Mixer"},
- {"VSPOUTR", "VSP-IP Volume", "VSPR Output Mixer"},
+ {"VSPINOUT", "VSP-IP Volume", "VSP Output Mixer"},
- {"VSPOUTL", NULL, "VSPL Output Mixer"},
- {"VSPOUTR", NULL, "VSPR Output Mixer"},
+ {"VSPINOUT", NULL, "VSP Output Mixer"},
{"ASP Capture", NULL, "ASPOUTL"},
{"ASP Capture", NULL, "ASPOUTR"},
{"XSP Capture", NULL, "XSPOUTL"},
{"XSP Capture", NULL, "XSPOUTR"},
- {"VSP Capture", NULL, "VSPOUTL"},
- {"VSP Capture", NULL, "VSPOUTR"},
+ {"VSP Capture", NULL, "VSPINOUT"},
};
struct cs42l73_mclk_div {
@@ -1167,6 +1221,14 @@ static int cs42l73_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, CS42L73_PWRCTL1, PDN, 1);
+ if (cs42l73->shutdwn_delay > 0) {
+ mdelay(cs42l73->shutdwn_delay);
+ cs42l73->shutdwn_delay = 0;
+ } else {
+ mdelay(15); /* Min amount of time requred to power
+ * down.
+ */
+ }
snd_soc_update_bits(codec, CS42L73_DMMCC, MCLKDIS, 1);
break;
}
@@ -1345,8 +1407,8 @@ static struct regmap_config cs42l73_regmap = {
.cache_type = REGCACHE_RBTREE,
};
-static __devinit int cs42l73_i2c_probe(struct i2c_client *i2c_client,
- const struct i2c_device_id *id)
+static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
{
struct cs42l73_private *cs42l73;
int ret;
@@ -1406,7 +1468,7 @@ static __devinit int cs42l73_i2c_probe(struct i2c_client *i2c_client,
return 0;
}
-static __devexit int cs42l73_i2c_remove(struct i2c_client *client)
+static int cs42l73_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1426,7 +1488,7 @@ static struct i2c_driver cs42l73_i2c_driver = {
},
.id_table = cs42l73_id,
.probe = cs42l73_i2c_probe,
- .remove = __devexit_p(cs42l73_i2c_remove),
+ .remove = cs42l73_i2c_remove,
};
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index af5db708051..9c123145650 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -1218,8 +1218,8 @@ static const struct regmap_config da7210_regmap_config_i2c = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit da7210_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da7210_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct da7210_priv *da7210;
int ret;
@@ -1231,7 +1231,7 @@ static int __devinit da7210_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, da7210);
- da7210->regmap = regmap_init_i2c(i2c, &da7210_regmap_config_i2c);
+ da7210->regmap = devm_regmap_init_i2c(i2c, &da7210_regmap_config_i2c);
if (IS_ERR(da7210->regmap)) {
ret = PTR_ERR(da7210->regmap);
dev_err(&i2c->dev, "regmap_init() failed: %d\n", ret);
@@ -1245,24 +1245,15 @@ static int __devinit da7210_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_da7210, &da7210_dai, 1);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
- goto err_regmap;
- }
- return ret;
-
-err_regmap:
- regmap_exit(da7210->regmap);
return ret;
}
-static int __devexit da7210_i2c_remove(struct i2c_client *client)
+static int da7210_i2c_remove(struct i2c_client *client)
{
- struct da7210_priv *da7210 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(da7210->regmap);
return 0;
}
@@ -1279,7 +1270,7 @@ static struct i2c_driver da7210_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = da7210_i2c_probe,
- .remove = __devexit_p(da7210_i2c_remove),
+ .remove = da7210_i2c_remove,
.id_table = da7210_i2c_id,
};
#endif
@@ -1323,7 +1314,7 @@ static const struct regmap_config da7210_regmap_config_spi = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit da7210_spi_probe(struct spi_device *spi)
+static int da7210_spi_probe(struct spi_device *spi)
{
struct da7210_priv *da7210;
int ret;
@@ -1346,24 +1337,15 @@ static int __devinit da7210_spi_probe(struct spi_device *spi)
if (ret != 0)
dev_warn(&spi->dev, "Failed to apply regmap patch: %d\n", ret);
- ret = snd_soc_register_codec(&spi->dev,
+ ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_da7210, &da7210_dai, 1);
- if (ret < 0)
- goto err_regmap;
-
- return ret;
-
-err_regmap:
- regmap_exit(da7210->regmap);
return ret;
}
-static int __devexit da7210_spi_remove(struct spi_device *spi)
+static int da7210_spi_remove(struct spi_device *spi)
{
- struct da7210_priv *da7210 = spi_get_drvdata(spi);
snd_soc_unregister_codec(&spi->dev);
- regmap_exit(da7210->regmap);
return 0;
}
@@ -1373,7 +1355,7 @@ static struct spi_driver da7210_spi_driver = {
.owner = THIS_MODULE,
},
.probe = da7210_spi_probe,
- .remove = __devexit_p(da7210_spi_remove)
+ .remove = da7210_spi_remove
};
#endif
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 01be2a320e2..dc0284dc9e6 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -1557,8 +1557,8 @@ static struct snd_soc_codec_driver soc_codec_dev_da732x = {
.reg_cache_size = ARRAY_SIZE(da732x_reg_cache),
};
-static __devinit int da732x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da732x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct da732x_priv *da732x;
unsigned int reg;
@@ -1596,7 +1596,7 @@ err:
return ret;
}
-static __devexit int da732x_i2c_remove(struct i2c_client *client)
+static int da732x_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -1615,7 +1615,7 @@ static struct i2c_driver da732x_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = da732x_i2c_probe,
- .remove = __devexit_p(da732x_i2c_remove),
+ .remove = da732x_i2c_remove,
.id_table = da732x_i2c_id,
};
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index f379b085c39..fc9802d1281 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -173,6 +173,7 @@
#define DA9055_AIF_FORMAT_I2S_MODE (0 << 0)
#define DA9055_AIF_FORMAT_LEFT_J (1 << 0)
#define DA9055_AIF_FORMAT_RIGHT_J (2 << 0)
+#define DA9055_AIF_FORMAT_DSP (3 << 0)
#define DA9055_AIF_WORD_S16_LE (0 << 2)
#define DA9055_AIF_WORD_S20_3LE (1 << 2)
#define DA9055_AIF_WORD_S24_LE (2 << 2)
@@ -752,6 +753,17 @@ static const struct snd_kcontrol_new da9055_dapm_mixoutr_controls[] = {
6, 1, 0),
};
+/* Headphone Output Enable */
+static const struct snd_kcontrol_new da9055_dapm_hp_l_control =
+SOC_DAPM_SINGLE("Switch", DA9055_HP_L_CTRL, 3, 1, 0);
+
+static const struct snd_kcontrol_new da9055_dapm_hp_r_control =
+SOC_DAPM_SINGLE("Switch", DA9055_HP_R_CTRL, 3, 1, 0);
+
+/* Lineout Output Enable */
+static const struct snd_kcontrol_new da9055_dapm_lineout_control =
+SOC_DAPM_SINGLE("Switch", DA9055_LINE_CTRL, 3, 1, 0);
+
/* DAPM widgets */
static const struct snd_soc_dapm_widget da9055_dapm_widgets[] = {
/* Input Side */
@@ -816,6 +828,14 @@ static const struct snd_soc_dapm_widget da9055_dapm_widgets[] = {
&da9055_dapm_mixoutr_controls[0],
ARRAY_SIZE(da9055_dapm_mixoutr_controls)),
+ /* Output Enable Switches */
+ SND_SOC_DAPM_SWITCH("Headphone Left Enable", SND_SOC_NOPM, 0, 0,
+ &da9055_dapm_hp_l_control),
+ SND_SOC_DAPM_SWITCH("Headphone Right Enable", SND_SOC_NOPM, 0, 0,
+ &da9055_dapm_hp_r_control),
+ SND_SOC_DAPM_SWITCH("Lineout Enable", SND_SOC_NOPM, 0, 0,
+ &da9055_dapm_lineout_control),
+
/* Output PGAs */
SND_SOC_DAPM_PGA("MIXOUT Left", DA9055_MIXOUT_L_CTRL, 7, 0, NULL, 0),
SND_SOC_DAPM_PGA("MIXOUT Right", DA9055_MIXOUT_R_CTRL, 7, 0, NULL, 0),
@@ -901,17 +921,20 @@ static const struct snd_soc_dapm_route da9055_audio_map[] = {
{"Out Mixer Right", "DAC Right Switch", "DAC Right"},
{"MIXOUT Left", NULL, "Out Mixer Left"},
- {"Headphone Left", NULL, "MIXOUT Left"},
+ {"Headphone Left Enable", "Switch", "MIXOUT Left"},
+ {"Headphone Left", NULL, "Headphone Left Enable"},
{"Headphone Left", NULL, "Charge Pump"},
{"HPL", NULL, "Headphone Left"},
{"MIXOUT Right", NULL, "Out Mixer Right"},
- {"Headphone Right", NULL, "MIXOUT Right"},
+ {"Headphone Right Enable", "Switch", "MIXOUT Right"},
+ {"Headphone Right", NULL, "Headphone Right Enable"},
{"Headphone Right", NULL, "Charge Pump"},
{"HPR", NULL, "Headphone Right"},
{"MIXOUT Right", NULL, "Out Mixer Right"},
- {"Lineout", NULL, "MIXOUT Right"},
+ {"Lineout Enable", "Switch", "MIXOUT Right"},
+ {"Lineout", NULL, "Lineout Enable"},
{"LINE", NULL, "Lineout"},
};
@@ -1175,6 +1198,9 @@ static int da9055_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
case SND_SOC_DAIFMT_RIGHT_J:
aif_ctrl = DA9055_AIF_FORMAT_RIGHT_J;
break;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif_ctrl = DA9055_AIF_FORMAT_DSP;
+ break;
default:
return -EINVAL;
}
@@ -1390,8 +1416,7 @@ static int da9055_probe(struct snd_soc_codec *codec)
DA9055_GAIN_RAMPING_EN, DA9055_GAIN_RAMPING_EN);
/*
- * There are two separate control bits for input and output mixers as
- * well as headphone and line outs.
+ * There are two separate control bits for input and output mixers.
* One to enable corresponding amplifier and other to enable its
* output. As amplifier bits are related to power control, they are
* being managed by DAPM while other (non power related) bits are
@@ -1407,14 +1432,6 @@ static int da9055_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, DA9055_MIXOUT_R_CTRL,
DA9055_MIXOUT_R_MIX_EN, DA9055_MIXOUT_R_MIX_EN);
- snd_soc_update_bits(codec, DA9055_HP_L_CTRL,
- DA9055_HP_L_AMP_OE, DA9055_HP_L_AMP_OE);
- snd_soc_update_bits(codec, DA9055_HP_R_CTRL,
- DA9055_HP_R_AMP_OE, DA9055_HP_R_AMP_OE);
-
- snd_soc_update_bits(codec, DA9055_LINE_CTRL,
- DA9055_LINE_AMP_OE, DA9055_LINE_AMP_OE);
-
/* Set this as per your system configuration */
snd_soc_write(codec, DA9055_PLL_CTRL, DA9055_PLL_INDIV_10_20_MHZ);
@@ -1467,8 +1484,8 @@ static const struct regmap_config da9055_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit da9055_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9055_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct da9055_priv *da9055;
struct da9055_platform_data *pdata = dev_get_platdata(&i2c->dev);
@@ -1500,7 +1517,7 @@ static int __devinit da9055_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit da9055_remove(struct i2c_client *client)
+static int da9055_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1519,7 +1536,7 @@ static struct i2c_driver da9055_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = da9055_i2c_probe,
- .remove = __devexit_p(da9055_remove),
+ .remove = da9055_remove,
.id_table = da9055_i2c_id,
};
diff --git a/sound/soc/codecs/dfbmcs320.c b/sound/soc/codecs/dfbmcs320.c
index bfe46aa9036..4f4f7f41a7d 100644
--- a/sound/soc/codecs/dfbmcs320.c
+++ b/sound/soc/codecs/dfbmcs320.c
@@ -33,13 +33,13 @@ static struct snd_soc_dai_driver dfbmcs320_dai = {
static struct snd_soc_codec_driver soc_codec_dev_dfbmcs320;
-static int __devinit dfbmcs320_probe(struct platform_device *pdev)
+static int dfbmcs320_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_dfbmcs320,
&dfbmcs320_dai, 1);
}
-static int __devexit dfbmcs320_remove(struct platform_device *pdev)
+static int dfbmcs320_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
@@ -52,7 +52,7 @@ static struct platform_driver dfmcs320_driver = {
.owner = THIS_MODULE,
},
.probe = dfbmcs320_probe,
- .remove = __devexit_p(dfbmcs320_remove),
+ .remove = dfbmcs320_remove,
};
module_platform_driver(dfmcs320_driver);
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index 3e929f079a1..66967ba6f75 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -66,13 +66,13 @@ static struct snd_soc_codec_driver soc_dmic = {
.probe = dmic_probe,
};
-static int __devinit dmic_dev_probe(struct platform_device *pdev)
+static int dmic_dev_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_dmic, &dmic_dai, 1);
}
-static int __devexit dmic_dev_remove(struct platform_device *pdev)
+static int dmic_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -86,7 +86,7 @@ static struct platform_driver dmic_driver = {
.owner = THIS_MODULE,
},
.probe = dmic_dev_probe,
- .remove = __devexit_p(dmic_dev_remove),
+ .remove = dmic_dev_remove,
};
module_platform_driver(dmic_driver);
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index 1bf55602c9e..53b455b8c07 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -1119,8 +1119,8 @@ static const struct regmap_config isabelle_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit isabelle_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int isabelle_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct regmap *isabelle_regmap;
int ret = 0;
@@ -1145,7 +1145,7 @@ static int __devinit isabelle_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit isabelle_i2c_remove(struct i2c_client *client)
+static int isabelle_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1163,7 +1163,7 @@ static struct i2c_driver isabelle_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = isabelle_i2c_probe,
- .remove = __devexit_p(isabelle_i2c_remove),
+ .remove = isabelle_i2c_remove,
.id_table = isabelle_i2c_id,
};
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 85d9cabe6d5..d991529e1af 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/regmap.h>
#include <linux/delay.h>
@@ -24,9 +25,10 @@
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
+#include <sound/tlv.h>
#define JZ4740_REG_CODEC_1 0x0
-#define JZ4740_REG_CODEC_2 0x1
+#define JZ4740_REG_CODEC_2 0x4
#define JZ4740_CODEC_1_LINE_ENABLE BIT(29)
#define JZ4740_CODEC_1_MIC_ENABLE BIT(28)
@@ -67,43 +69,36 @@
#define JZ4740_CODEC_2_MIC_BOOST_GAIN_OFFSET 4
#define JZ4740_CODEC_2_HEADPHONE_VOLUME_OFFSET 0
-static const uint32_t jz4740_codec_regs[] = {
- 0x021b2302, 0x00170803,
+static const struct reg_default jz4740_codec_reg_defaults[] = {
+ { JZ4740_REG_CODEC_1, 0x021b2302 },
+ { JZ4740_REG_CODEC_2, 0x00170803 },
};
struct jz4740_codec {
- void __iomem *base;
- struct resource *mem;
+ struct regmap *regmap;
};
-static unsigned int jz4740_codec_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(codec);
- return readl(jz4740_codec->base + (reg << 2));
-}
-
-static int jz4740_codec_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(codec);
- u32 *cache = codec->reg_cache;
-
- cache[reg] = val;
- writel(val, jz4740_codec->base + (reg << 2));
+static const unsigned int jz4740_mic_tlv[] = {
+ TLV_DB_RANGE_HEAD(2),
+ 0, 2, TLV_DB_SCALE_ITEM(0, 600, 0),
+ 3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0),
+};
- return 0;
-}
+static const DECLARE_TLV_DB_SCALE(jz4740_out_tlv, 0, 200, 0);
+static const DECLARE_TLV_DB_SCALE(jz4740_in_tlv, -3450, 150, 0);
static const struct snd_kcontrol_new jz4740_codec_controls[] = {
- SOC_SINGLE("Master Playback Volume", JZ4740_REG_CODEC_2,
- JZ4740_CODEC_2_HEADPHONE_VOLUME_OFFSET, 3, 0),
- SOC_SINGLE("Master Capture Volume", JZ4740_REG_CODEC_2,
- JZ4740_CODEC_2_INPUT_VOLUME_OFFSET, 31, 0),
+ SOC_SINGLE_TLV("Master Playback Volume", JZ4740_REG_CODEC_2,
+ JZ4740_CODEC_2_HEADPHONE_VOLUME_OFFSET, 3, 0,
+ jz4740_out_tlv),
+ SOC_SINGLE_TLV("Master Capture Volume", JZ4740_REG_CODEC_2,
+ JZ4740_CODEC_2_INPUT_VOLUME_OFFSET, 31, 0,
+ jz4740_in_tlv),
SOC_SINGLE("Master Playback Switch", JZ4740_REG_CODEC_1,
JZ4740_CODEC_1_HEADPHONE_DISABLE_OFFSET, 1, 1),
- SOC_SINGLE("Mic Capture Volume", JZ4740_REG_CODEC_2,
- JZ4740_CODEC_2_MIC_BOOST_GAIN_OFFSET, 3, 0),
+ SOC_SINGLE_TLV("Mic Capture Volume", JZ4740_REG_CODEC_2,
+ JZ4740_CODEC_2_MIC_BOOST_GAIN_OFFSET, 3, 0,
+ jz4740_mic_tlv),
};
static const struct snd_kcontrol_new jz4740_codec_output_controls[] = {
@@ -163,8 +158,8 @@ static const struct snd_soc_dapm_route jz4740_codec_dapm_routes[] = {
static int jz4740_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
+ struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(dai->codec);
uint32_t val;
- struct snd_soc_codec *codec = dai->codec;
switch (params_rate(params)) {
case 8000:
@@ -200,7 +195,7 @@ static int jz4740_codec_hw_params(struct snd_pcm_substream *substream,
val <<= JZ4740_CODEC_2_SAMPLE_RATE_OFFSET;
- snd_soc_update_bits(codec, JZ4740_REG_CODEC_2,
+ regmap_update_bits(jz4740_codec->regmap, JZ4740_REG_CODEC_2,
JZ4740_CODEC_2_SAMPLE_RATE_MASK, val);
return 0;
@@ -230,25 +225,23 @@ static struct snd_soc_dai_driver jz4740_codec_dai = {
.symmetric_rates = 1,
};
-static void jz4740_codec_wakeup(struct snd_soc_codec *codec)
+static void jz4740_codec_wakeup(struct regmap *regmap)
{
- int i;
- uint32_t *cache = codec->reg_cache;
-
- snd_soc_update_bits(codec, JZ4740_REG_CODEC_1,
+ regmap_update_bits(regmap, JZ4740_REG_CODEC_1,
JZ4740_CODEC_1_RESET, JZ4740_CODEC_1_RESET);
udelay(2);
- snd_soc_update_bits(codec, JZ4740_REG_CODEC_1,
+ regmap_update_bits(regmap, JZ4740_REG_CODEC_1,
JZ4740_CODEC_1_SUSPEND | JZ4740_CODEC_1_RESET, 0);
- for (i = 0; i < ARRAY_SIZE(jz4740_codec_regs); ++i)
- jz4740_codec_write(codec, i, cache[i]);
+ regcache_sync(regmap);
}
static int jz4740_codec_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(codec);
+ struct regmap *regmap = jz4740_codec->regmap;
unsigned int mask;
unsigned int value;
@@ -261,12 +254,12 @@ static int jz4740_codec_set_bias_level(struct snd_soc_codec *codec,
JZ4740_CODEC_1_HEADPHONE_POWERDOWN_M;
value = 0;
- snd_soc_update_bits(codec, JZ4740_REG_CODEC_1, mask, value);
+ regmap_update_bits(regmap, JZ4740_REG_CODEC_1, mask, value);
break;
case SND_SOC_BIAS_STANDBY:
/* The only way to clear the suspend flag is to reset the codec */
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
- jz4740_codec_wakeup(codec);
+ jz4740_codec_wakeup(regmap);
mask = JZ4740_CODEC_1_VREF_DISABLE |
JZ4740_CODEC_1_VREF_AMP_DISABLE |
@@ -275,13 +268,14 @@ static int jz4740_codec_set_bias_level(struct snd_soc_codec *codec,
JZ4740_CODEC_1_VREF_AMP_DISABLE |
JZ4740_CODEC_1_HEADPHONE_POWERDOWN_M;
- snd_soc_update_bits(codec, JZ4740_REG_CODEC_1, mask, value);
+ regmap_update_bits(regmap, JZ4740_REG_CODEC_1, mask, value);
break;
case SND_SOC_BIAS_OFF:
mask = JZ4740_CODEC_1_SUSPEND;
value = JZ4740_CODEC_1_SUSPEND;
- snd_soc_update_bits(codec, JZ4740_REG_CODEC_1, mask, value);
+ regmap_update_bits(regmap, JZ4740_REG_CODEC_1, mask, value);
+ regcache_mark_dirty(regmap);
break;
default:
break;
@@ -294,7 +288,9 @@ static int jz4740_codec_set_bias_level(struct snd_soc_codec *codec,
static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
{
- snd_soc_update_bits(codec, JZ4740_REG_CODEC_1,
+ struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(codec);
+
+ regmap_update_bits(jz4740_codec->regmap, JZ4740_REG_CODEC_1,
JZ4740_CODEC_1_SW2_ENABLE, JZ4740_CODEC_1_SW2_ENABLE);
jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -331,12 +327,7 @@ static struct snd_soc_codec_driver soc_codec_dev_jz4740_codec = {
.remove = jz4740_codec_dev_remove,
.suspend = jz4740_codec_suspend,
.resume = jz4740_codec_resume,
- .read = jz4740_codec_read,
- .write = jz4740_codec_write,
.set_bias_level = jz4740_codec_set_bias_level,
- .reg_cache_default = jz4740_codec_regs,
- .reg_word_size = sizeof(u32),
- .reg_cache_size = 2,
.controls = jz4740_codec_controls,
.num_controls = ARRAY_SIZE(jz4740_codec_controls),
@@ -346,11 +337,23 @@ static struct snd_soc_codec_driver soc_codec_dev_jz4740_codec = {
.num_dapm_routes = ARRAY_SIZE(jz4740_codec_dapm_routes),
};
-static int __devinit jz4740_codec_probe(struct platform_device *pdev)
+static const struct regmap_config jz4740_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = JZ4740_REG_CODEC_2,
+
+ .reg_defaults = jz4740_codec_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(jz4740_codec_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int jz4740_codec_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_codec *jz4740_codec;
struct resource *mem;
+ void __iomem *base;
jz4740_codec = devm_kzalloc(&pdev->dev, sizeof(*jz4740_codec),
GFP_KERNEL);
@@ -358,56 +361,29 @@ static int __devinit jz4740_codec_probe(struct platform_device *pdev)
return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "Failed to get mmio memory resource\n");
- ret = -ENOENT;
- goto err_out;
- }
-
- mem = request_mem_region(mem->start, resource_size(mem), pdev->name);
- if (!mem) {
- dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- ret = -EBUSY;
- goto err_out;
- }
+ base = devm_request_and_ioremap(&pdev->dev, mem);
+ if (!base)
+ return -EBUSY;
- jz4740_codec->base = ioremap(mem->start, resource_size(mem));
- if (!jz4740_codec->base) {
- dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
- ret = -EBUSY;
- goto err_release_mem_region;
- }
- jz4740_codec->mem = mem;
+ jz4740_codec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &jz4740_codec_regmap_config);
+ if (IS_ERR(jz4740_codec->regmap))
+ return PTR_ERR(jz4740_codec->regmap);
platform_set_drvdata(pdev, jz4740_codec);
ret = snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_jz4740_codec, &jz4740_codec_dai, 1);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "Failed to register codec\n");
- goto err_iounmap;
- }
- return 0;
-
-err_iounmap:
- iounmap(jz4740_codec->base);
-err_release_mem_region:
- release_mem_region(mem->start, resource_size(mem));
-err_out:
return ret;
}
-static int __devexit jz4740_codec_remove(struct platform_device *pdev)
+static int jz4740_codec_remove(struct platform_device *pdev)
{
- struct jz4740_codec *jz4740_codec = platform_get_drvdata(pdev);
- struct resource *mem = jz4740_codec->mem;
-
snd_soc_unregister_codec(&pdev->dev);
- iounmap(jz4740_codec->base);
- release_mem_region(mem->start, resource_size(mem));
-
platform_set_drvdata(pdev, NULL);
return 0;
@@ -415,7 +391,7 @@ static int __devexit jz4740_codec_remove(struct platform_device *pdev)
static struct platform_driver jz4740_codec_driver = {
.probe = jz4740_codec_probe,
- .remove = __devexit_p(jz4740_codec_remove),
+ .remove = jz4740_codec_remove,
.driver = {
.name = "jz4740-codec",
.owner = THIS_MODULE,
diff --git a/sound/soc/codecs/lm4857.c b/sound/soc/codecs/lm4857.c
index 81a328c7883..9f9f59573f7 100644
--- a/sound/soc/codecs/lm4857.c
+++ b/sound/soc/codecs/lm4857.c
@@ -209,8 +209,8 @@ static struct snd_soc_codec_driver soc_codec_dev_lm4857 = {
.set_bias_level = lm4857_set_bias_level,
};
-static int __devinit lm4857_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int lm4857_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct lm4857 *lm4857;
int ret;
@@ -228,7 +228,7 @@ static int __devinit lm4857_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit lm4857_i2c_remove(struct i2c_client *i2c)
+static int lm4857_i2c_remove(struct i2c_client *i2c)
{
snd_soc_unregister_codec(&i2c->dev);
return 0;
@@ -246,7 +246,7 @@ static struct i2c_driver lm4857_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lm4857_i2c_probe,
- .remove = __devexit_p(lm4857_i2c_remove),
+ .remove = lm4857_i2c_remove,
.id_table = lm4857_i2c_id,
};
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 99b0a9dcff3..d75257d40a4 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -1483,8 +1483,8 @@ static const struct regmap_config lm49453_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static __devinit int lm49453_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int lm49453_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct lm49453_priv *lm49453;
int ret = 0;
@@ -1497,7 +1497,7 @@ static __devinit int lm49453_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, lm49453);
- lm49453->regmap = regmap_init_i2c(i2c, &lm49453_regmap_config);
+ lm49453->regmap = devm_regmap_init_i2c(i2c, &lm49453_regmap_config);
if (IS_ERR(lm49453->regmap)) {
ret = PTR_ERR(lm49453->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -1508,21 +1508,15 @@ static __devinit int lm49453_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_lm49453,
lm49453_dai, ARRAY_SIZE(lm49453_dai));
- if (ret < 0) {
+ if (ret < 0)
dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
- regmap_exit(lm49453->regmap);
- return ret;
- }
return ret;
}
-static int __devexit lm49453_i2c_remove(struct i2c_client *client)
+static int lm49453_i2c_remove(struct i2c_client *client)
{
- struct lm49453_priv *lm49453 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(lm49453->regmap);
return 0;
}
@@ -1538,7 +1532,7 @@ static struct i2c_driver lm49453_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = lm49453_i2c_probe,
- .remove = __devexit_p(lm49453_i2c_remove),
+ .remove = lm49453_i2c_remove,
.id_table = lm49453_i2c_id,
};
diff --git a/sound/soc/codecs/max9768.c b/sound/soc/codecs/max9768.c
index 17b3ec2d05c..a6ac2313047 100644
--- a/sound/soc/codecs/max9768.c
+++ b/sound/soc/codecs/max9768.c
@@ -159,8 +159,8 @@ static const struct regmap_config max9768_i2c_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit max9768_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max9768_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct max9768 *max9768;
struct max9768_pdata *pdata = client->dev.platform_data;
@@ -187,7 +187,7 @@ static int __devinit max9768_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, max9768);
- max9768->regmap = regmap_init_i2c(client, &max9768_i2c_regmap_config);
+ max9768->regmap = devm_regmap_init_i2c(client, &max9768_i2c_regmap_config);
if (IS_ERR(max9768->regmap)) {
err = PTR_ERR(max9768->regmap);
goto err_gpio_free;
@@ -195,12 +195,10 @@ static int __devinit max9768_i2c_probe(struct i2c_client *client,
err = snd_soc_register_codec(&client->dev, &max9768_codec_driver, NULL, 0);
if (err)
- goto err_regmap_free;
+ goto err_gpio_free;
return 0;
- err_regmap_free:
- regmap_exit(max9768->regmap);
err_gpio_free:
if (gpio_is_valid(max9768->shdn_gpio))
gpio_free(max9768->shdn_gpio);
@@ -210,12 +208,11 @@ static int __devinit max9768_i2c_probe(struct i2c_client *client,
return err;
}
-static int __devexit max9768_i2c_remove(struct i2c_client *client)
+static int max9768_i2c_remove(struct i2c_client *client)
{
struct max9768 *max9768 = i2c_get_clientdata(client);
snd_soc_unregister_codec(&client->dev);
- regmap_exit(max9768->regmap);
if (gpio_is_valid(max9768->shdn_gpio))
gpio_free(max9768->shdn_gpio);
@@ -237,7 +234,7 @@ static struct i2c_driver max9768_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = max9768_i2c_probe,
- .remove = __devexit_p(max9768_i2c_remove),
+ .remove = max9768_i2c_remove,
.id_table = max9768_i2c_id,
};
module_i2c_driver(max9768_i2c_driver);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 3264a516930..a4c16fd70f7 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -2084,7 +2084,7 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit max98088_i2c_remove(struct i2c_client *client)
+static int max98088_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -2098,13 +2098,13 @@ static const struct i2c_device_id max98088_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
static struct i2c_driver max98088_i2c_driver = {
- .driver = {
- .name = "max98088",
- .owner = THIS_MODULE,
- },
- .probe = max98088_i2c_probe,
- .remove = __devexit_p(max98088_i2c_remove),
- .id_table = max98088_i2c_id,
+ .driver = {
+ .name = "max98088",
+ .owner = THIS_MODULE,
+ },
+ .probe = max98088_i2c_probe,
+ .remove = max98088_i2c_remove,
+ .id_table = max98088_i2c_id,
};
module_i2c_driver(max98088_i2c_driver);
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
new file mode 100644
index 00000000000..c9772ca3da4
--- /dev/null
+++ b/sound/soc/codecs/max98090.c
@@ -0,0 +1,577 @@
+/*
+ * max98090.c -- MAX98090 ALSA SoC Audio driver
+ * based on Rev0p8 datasheet
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * Based on
+ *
+ * max98095.c
+ * Copyright 2011 Maxim Integrated Products
+ *
+ * https://github.com/hardkernel/linux/commit/\
+ * 3417d7166b17113b3b33b0a337c74d1c7cc313df#sound/soc/codecs/max98090.c
+ * Copyright 2011 Maxim Integrated Products
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+/*
+ *
+ * MAX98090 Registers Definition
+ *
+ */
+
+/* RESET / STATUS / INTERRUPT REGISTERS */
+#define MAX98090_0x00_SW_RESET 0x00
+#define MAX98090_0x01_INT_STS 0x01
+#define MAX98090_0x02_JACK_STS 0x02
+#define MAX98090_0x03_INT_MASK 0x03
+
+/* QUICK SETUP REGISTERS */
+#define MAX98090_0x04_SYS_CLK 0x04
+#define MAX98090_0x05_SAMPLE_RATE 0x05
+#define MAX98090_0x06_DAI_IF 0x06
+#define MAX98090_0x07_DAC_PATH 0x07
+#define MAX98090_0x08_MIC_TO_ADC 0x08
+#define MAX98090_0x09_LINE_TO_ADC 0x09
+#define MAX98090_0x0A_ANALOG_MIC_LOOP 0x0A
+#define MAX98090_0x0B_ANALOG_LINE_LOOP 0x0B
+
+/* ANALOG INPUT CONFIGURATION REGISTERS */
+#define MAX98090_0x0D_INPUT_CONFIG 0x0D
+#define MAX98090_0x0E_LINE_IN_LVL 0x0E
+#define MAX98090_0x0F_LINI_IN_CFG 0x0F
+#define MAX98090_0x10_MIC1_IN_LVL 0x10
+#define MAX98090_0x11_MIC2_IN_LVL 0x11
+
+/* MICROPHONE CONFIGURATION REGISTERS */
+#define MAX98090_0x12_MIC_BIAS_VOL 0x12
+#define MAX98090_0x13_DIGITAL_MIC_CFG 0x13
+#define MAX98090_0x14_DIGITAL_MIC_MODE 0x14
+
+/* ADC PATH AND CONFIGURATION REGISTERS */
+#define MAX98090_0x15_L_ADC_MIX 0x15
+#define MAX98090_0x16_R_ADC_MIX 0x16
+#define MAX98090_0x17_L_ADC_LVL 0x17
+#define MAX98090_0x18_R_ADC_LVL 0x18
+#define MAX98090_0x19_ADC_BIQUAD_LVL 0x19
+#define MAX98090_0x1A_ADC_SIDETONE 0x1A
+
+/* CLOCK CONFIGURATION REGISTERS */
+#define MAX98090_0x1B_SYS_CLK 0x1B
+#define MAX98090_0x1C_CLK_MODE 0x1C
+#define MAX98090_0x1D_ANY_CLK1 0x1D
+#define MAX98090_0x1E_ANY_CLK2 0x1E
+#define MAX98090_0x1F_ANY_CLK3 0x1F
+#define MAX98090_0x20_ANY_CLK4 0x20
+#define MAX98090_0x21_MASTER_MODE 0x21
+
+/* INTERFACE CONTROL REGISTERS */
+#define MAX98090_0x22_DAI_IF_FMT 0x22
+#define MAX98090_0x23_DAI_TDM_FMT1 0x23
+#define MAX98090_0x24_DAI_TDM_FMT2 0x24
+#define MAX98090_0x25_DAI_IO_CFG 0x25
+#define MAX98090_0x26_FILTER_CFG 0x26
+#define MAX98090_0x27_DAI_PLAYBACK_LVL 0x27
+#define MAX98090_0x28_EQ_PLAYBACK_LVL 0x28
+
+/* HEADPHONE CONTROL REGISTERS */
+#define MAX98090_0x29_L_HP_MIX 0x29
+#define MAX98090_0x2A_R_HP_MIX 0x2A
+#define MAX98090_0x2B_HP_CTR 0x2B
+#define MAX98090_0x2C_L_HP_VOL 0x2C
+#define MAX98090_0x2D_R_HP_VOL 0x2D
+
+/* SPEAKER CONFIGURATION REGISTERS */
+#define MAX98090_0x2E_L_SPK_MIX 0x2E
+#define MAX98090_0x2F_R_SPK_MIX 0x2F
+#define MAX98090_0x30_SPK_CTR 0x30
+#define MAX98090_0x31_L_SPK_VOL 0x31
+#define MAX98090_0x32_R_SPK_VOL 0x32
+
+/* ALC CONFIGURATION REGISTERS */
+#define MAX98090_0x33_ALC_TIMING 0x33
+#define MAX98090_0x34_ALC_COMPRESSOR 0x34
+#define MAX98090_0x35_ALC_EXPANDER 0x35
+#define MAX98090_0x36_ALC_GAIN 0x36
+
+/* RECEIVER AND LINE_OUTPUT REGISTERS */
+#define MAX98090_0x37_RCV_LOUT_L_MIX 0x37
+#define MAX98090_0x38_RCV_LOUT_L_CNTL 0x38
+#define MAX98090_0x39_RCV_LOUT_L_VOL 0x39
+#define MAX98090_0x3A_LOUT_R_MIX 0x3A
+#define MAX98090_0x3B_LOUT_R_CNTL 0x3B
+#define MAX98090_0x3C_LOUT_R_VOL 0x3C
+
+/* JACK DETECT AND ENABLE REGISTERS */
+#define MAX98090_0x3D_JACK_DETECT 0x3D
+#define MAX98090_0x3E_IN_ENABLE 0x3E
+#define MAX98090_0x3F_OUT_ENABLE 0x3F
+#define MAX98090_0x40_LVL_CTR 0x40
+#define MAX98090_0x41_DSP_FILTER_ENABLE 0x41
+
+/* BIAS AND POWER MODE CONFIGURATION REGISTERS */
+#define MAX98090_0x42_BIAS_CTR 0x42
+#define MAX98090_0x43_DAC_CTR 0x43
+#define MAX98090_0x44_ADC_CTR 0x44
+#define MAX98090_0x45_DEV_SHUTDOWN 0x45
+
+/* REVISION ID REGISTER */
+#define MAX98090_0xFF_REV_ID 0xFF
+
+#define MAX98090_REG_MAX_CACHED 0x45
+#define MAX98090_REG_END 0xFF
+
+/*
+ *
+ * MAX98090 Registers Bit Fields
+ *
+ */
+
+/* MAX98090_0x06_DAI_IF */
+#define MAX98090_DAI_IF_MASK 0x3F
+#define MAX98090_RJ_M (1 << 5)
+#define MAX98090_RJ_S (1 << 4)
+#define MAX98090_LJ_M (1 << 3)
+#define MAX98090_LJ_S (1 << 2)
+#define MAX98090_I2S_M (1 << 1)
+#define MAX98090_I2S_S (1 << 0)
+
+/* MAX98090_0x45_DEV_SHUTDOWN */
+#define MAX98090_SHDNRUN (1 << 7)
+
+/* codec private data */
+struct max98090_priv {
+ struct regmap *regmap;
+};
+
+static const struct reg_default max98090_reg_defaults[] = {
+ /* RESET / STATUS / INTERRUPT REGISTERS */
+ {MAX98090_0x00_SW_RESET, 0x00},
+ {MAX98090_0x01_INT_STS, 0x00},
+ {MAX98090_0x02_JACK_STS, 0x00},
+ {MAX98090_0x03_INT_MASK, 0x04},
+
+ /* QUICK SETUP REGISTERS */
+ {MAX98090_0x04_SYS_CLK, 0x00},
+ {MAX98090_0x05_SAMPLE_RATE, 0x00},
+ {MAX98090_0x06_DAI_IF, 0x00},
+ {MAX98090_0x07_DAC_PATH, 0x00},
+ {MAX98090_0x08_MIC_TO_ADC, 0x00},
+ {MAX98090_0x09_LINE_TO_ADC, 0x00},
+ {MAX98090_0x0A_ANALOG_MIC_LOOP, 0x00},
+ {MAX98090_0x0B_ANALOG_LINE_LOOP, 0x00},
+
+ /* ANALOG INPUT CONFIGURATION REGISTERS */
+ {MAX98090_0x0D_INPUT_CONFIG, 0x00},
+ {MAX98090_0x0E_LINE_IN_LVL, 0x1B},
+ {MAX98090_0x0F_LINI_IN_CFG, 0x00},
+ {MAX98090_0x10_MIC1_IN_LVL, 0x11},
+ {MAX98090_0x11_MIC2_IN_LVL, 0x11},
+
+ /* MICROPHONE CONFIGURATION REGISTERS */
+ {MAX98090_0x12_MIC_BIAS_VOL, 0x00},
+ {MAX98090_0x13_DIGITAL_MIC_CFG, 0x00},
+ {MAX98090_0x14_DIGITAL_MIC_MODE, 0x00},
+
+ /* ADC PATH AND CONFIGURATION REGISTERS */
+ {MAX98090_0x15_L_ADC_MIX, 0x00},
+ {MAX98090_0x16_R_ADC_MIX, 0x00},
+ {MAX98090_0x17_L_ADC_LVL, 0x03},
+ {MAX98090_0x18_R_ADC_LVL, 0x03},
+ {MAX98090_0x19_ADC_BIQUAD_LVL, 0x00},
+ {MAX98090_0x1A_ADC_SIDETONE, 0x00},
+
+ /* CLOCK CONFIGURATION REGISTERS */
+ {MAX98090_0x1B_SYS_CLK, 0x00},
+ {MAX98090_0x1C_CLK_MODE, 0x00},
+ {MAX98090_0x1D_ANY_CLK1, 0x00},
+ {MAX98090_0x1E_ANY_CLK2, 0x00},
+ {MAX98090_0x1F_ANY_CLK3, 0x00},
+ {MAX98090_0x20_ANY_CLK4, 0x00},
+ {MAX98090_0x21_MASTER_MODE, 0x00},
+
+ /* INTERFACE CONTROL REGISTERS */
+ {MAX98090_0x22_DAI_IF_FMT, 0x00},
+ {MAX98090_0x23_DAI_TDM_FMT1, 0x00},
+ {MAX98090_0x24_DAI_TDM_FMT2, 0x00},
+ {MAX98090_0x25_DAI_IO_CFG, 0x00},
+ {MAX98090_0x26_FILTER_CFG, 0x80},
+ {MAX98090_0x27_DAI_PLAYBACK_LVL, 0x00},
+ {MAX98090_0x28_EQ_PLAYBACK_LVL, 0x00},
+
+ /* HEADPHONE CONTROL REGISTERS */
+ {MAX98090_0x29_L_HP_MIX, 0x00},
+ {MAX98090_0x2A_R_HP_MIX, 0x00},
+ {MAX98090_0x2B_HP_CTR, 0x00},
+ {MAX98090_0x2C_L_HP_VOL, 0x1A},
+ {MAX98090_0x2D_R_HP_VOL, 0x1A},
+
+ /* SPEAKER CONFIGURATION REGISTERS */
+ {MAX98090_0x2E_L_SPK_MIX, 0x00},
+ {MAX98090_0x2F_R_SPK_MIX, 0x00},
+ {MAX98090_0x30_SPK_CTR, 0x00},
+ {MAX98090_0x31_L_SPK_VOL, 0x2C},
+ {MAX98090_0x32_R_SPK_VOL, 0x2C},
+
+ /* ALC CONFIGURATION REGISTERS */
+ {MAX98090_0x33_ALC_TIMING, 0x00},
+ {MAX98090_0x34_ALC_COMPRESSOR, 0x00},
+ {MAX98090_0x35_ALC_EXPANDER, 0x00},
+ {MAX98090_0x36_ALC_GAIN, 0x00},
+
+ /* RECEIVER AND LINE_OUTPUT REGISTERS */
+ {MAX98090_0x37_RCV_LOUT_L_MIX, 0x00},
+ {MAX98090_0x38_RCV_LOUT_L_CNTL, 0x00},
+ {MAX98090_0x39_RCV_LOUT_L_VOL, 0x15},
+ {MAX98090_0x3A_LOUT_R_MIX, 0x00},
+ {MAX98090_0x3B_LOUT_R_CNTL, 0x00},
+ {MAX98090_0x3C_LOUT_R_VOL, 0x15},
+
+ /* JACK DETECT AND ENABLE REGISTERS */
+ {MAX98090_0x3D_JACK_DETECT, 0x00},
+ {MAX98090_0x3E_IN_ENABLE, 0x00},
+ {MAX98090_0x3F_OUT_ENABLE, 0x00},
+ {MAX98090_0x40_LVL_CTR, 0x00},
+ {MAX98090_0x41_DSP_FILTER_ENABLE, 0x00},
+
+ /* BIAS AND POWER MODE CONFIGURATION REGISTERS */
+ {MAX98090_0x42_BIAS_CTR, 0x00},
+ {MAX98090_0x43_DAC_CTR, 0x00},
+ {MAX98090_0x44_ADC_CTR, 0x06},
+ {MAX98090_0x45_DEV_SHUTDOWN, 0x00},
+};
+
+static const unsigned int max98090_hp_tlv[] = {
+ TLV_DB_RANGE_HEAD(5),
+ 0x0, 0x6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
+ 0x7, 0xE, TLV_DB_SCALE_ITEM(-4000, 300, 0),
+ 0xF, 0x15, TLV_DB_SCALE_ITEM(-1700, 200, 0),
+ 0x16, 0x1B, TLV_DB_SCALE_ITEM(-400, 100, 0),
+ 0x1C, 0x1F, TLV_DB_SCALE_ITEM(150, 50, 0),
+};
+
+static struct snd_kcontrol_new max98090_snd_controls[] = {
+ SOC_DOUBLE_R_TLV("Headphone Volume", MAX98090_0x2C_L_HP_VOL,
+ MAX98090_0x2D_R_HP_VOL, 0, 31, 0, max98090_hp_tlv),
+};
+
+/* Left HeadPhone Mixer Switch */
+static struct snd_kcontrol_new max98090_left_hp_mixer_controls[] = {
+ SOC_DAPM_SINGLE("DACR Switch", MAX98090_0x29_L_HP_MIX, 1, 1, 0),
+ SOC_DAPM_SINGLE("DACL Switch", MAX98090_0x29_L_HP_MIX, 0, 1, 0),
+};
+
+/* Right HeadPhone Mixer Switch */
+static struct snd_kcontrol_new max98090_right_hp_mixer_controls[] = {
+ SOC_DAPM_SINGLE("DACR Switch", MAX98090_0x2A_R_HP_MIX, 1, 1, 0),
+ SOC_DAPM_SINGLE("DACL Switch", MAX98090_0x2A_R_HP_MIX, 0, 1, 0),
+};
+
+static struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
+ /* Output */
+ SND_SOC_DAPM_OUTPUT("HPL"),
+ SND_SOC_DAPM_OUTPUT("HPR"),
+
+ /* PGA */
+ SND_SOC_DAPM_PGA("HPL Out", MAX98090_0x3F_OUT_ENABLE, 7, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HPR Out", MAX98090_0x3F_OUT_ENABLE, 6, 0, NULL, 0),
+
+ /* Mixer */
+ SND_SOC_DAPM_MIXER("HPL Mixer", SND_SOC_NOPM, 0, 0,
+ max98090_left_hp_mixer_controls,
+ ARRAY_SIZE(max98090_left_hp_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("HPR Mixer", SND_SOC_NOPM, 0, 0,
+ max98090_right_hp_mixer_controls,
+ ARRAY_SIZE(max98090_right_hp_mixer_controls)),
+
+ /* DAC */
+ SND_SOC_DAPM_DAC("DACL", "Hifi Playback", MAX98090_0x3F_OUT_ENABLE, 0, 0),
+ SND_SOC_DAPM_DAC("DACR", "Hifi Playback", MAX98090_0x3F_OUT_ENABLE, 1, 0),
+};
+
+static struct snd_soc_dapm_route max98090_audio_map[] = {
+ /* Output */
+ {"HPL", NULL, "HPL Out"},
+ {"HPR", NULL, "HPR Out"},
+
+ /* PGA */
+ {"HPL Out", NULL, "HPL Mixer"},
+ {"HPR Out", NULL, "HPR Mixer"},
+
+ /* Mixer*/
+ {"HPL Mixer", "DACR Switch", "DACR"},
+ {"HPL Mixer", "DACL Switch", "DACL"},
+
+ {"HPR Mixer", "DACR Switch", "DACR"},
+ {"HPR Mixer", "DACL Switch", "DACL"},
+};
+
+static bool max98090_volatile(struct device *dev, unsigned int reg)
+{
+ if ((reg == MAX98090_0x01_INT_STS) ||
+ (reg == MAX98090_0x02_JACK_STS) ||
+ (reg > MAX98090_REG_MAX_CACHED))
+ return true;
+
+ return false;
+}
+
+static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int val;
+
+ switch (params_rate(params)) {
+ case 96000:
+ val = 1 << 5;
+ break;
+ case 32000:
+ val = 1 << 4;
+ break;
+ case 48000:
+ val = 1 << 3;
+ break;
+ case 44100:
+ val = 1 << 2;
+ break;
+ case 16000:
+ val = 1 << 1;
+ break;
+ case 8000:
+ val = 1 << 0;
+ break;
+ default:
+ dev_err(codec->dev, "unsupported rate\n");
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, MAX98090_0x05_SAMPLE_RATE, 0x03F, val);
+
+ return 0;
+}
+
+static int max98090_dai_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int val;
+
+ snd_soc_update_bits(codec, MAX98090_0x45_DEV_SHUTDOWN,
+ MAX98090_SHDNRUN, 0);
+
+ switch (freq) {
+ case 26000000:
+ val = 1 << 7;
+ break;
+ case 19200000:
+ val = 1 << 6;
+ break;
+ case 13000000:
+ val = 1 << 5;
+ break;
+ case 12288000:
+ val = 1 << 4;
+ break;
+ case 12000000:
+ val = 1 << 3;
+ break;
+ case 11289600:
+ val = 1 << 2;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid master clock frequency\n");
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, MAX98090_0x04_SYS_CLK, 0xFD, val);
+
+ snd_soc_update_bits(codec, MAX98090_0x45_DEV_SHUTDOWN,
+ MAX98090_SHDNRUN, MAX98090_SHDNRUN);
+
+ dev_dbg(dai->dev, "sysclk is %uHz\n", freq);
+
+ return 0;
+}
+
+static int max98090_dai_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int is_master;
+ u8 val;
+
+ /* master/slave mode */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ is_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ is_master = 0;
+ break;
+ default:
+ dev_err(codec->dev, "unsupported clock\n");
+ return -EINVAL;
+ }
+
+ /* format */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ val = (is_master) ? MAX98090_I2S_M : MAX98090_I2S_S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val = (is_master) ? MAX98090_RJ_M : MAX98090_RJ_S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = (is_master) ? MAX98090_LJ_M : MAX98090_LJ_S;
+ break;
+ default:
+ dev_err(codec->dev, "unsupported format\n");
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, MAX98090_0x06_DAI_IF,
+ MAX98090_DAI_IF_MASK, val);
+
+ return 0;
+}
+
+#define MAX98090_RATES SNDRV_PCM_RATE_8000_96000
+#define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops max98090_dai_ops = {
+ .set_sysclk = max98090_dai_set_sysclk,
+ .set_fmt = max98090_dai_set_fmt,
+ .hw_params = max98090_dai_hw_params,
+};
+
+static struct snd_soc_dai_driver max98090_dai = {
+ .name = "max98090-Hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98090_RATES,
+ .formats = MAX98090_FORMATS,
+ },
+ .ops = &max98090_dai_ops,
+};
+
+static int max98090_probe(struct snd_soc_codec *codec)
+{
+ struct max98090_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct device *dev = codec->dev;
+ int ret;
+
+ codec->control_data = priv->regmap;
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+
+ /* Device active */
+ snd_soc_update_bits(codec, MAX98090_0x45_DEV_SHUTDOWN,
+ MAX98090_SHDNRUN, MAX98090_SHDNRUN);
+
+ return 0;
+}
+
+static int max98090_remove(struct snd_soc_codec *codec)
+{
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_max98090 = {
+ .probe = max98090_probe,
+ .remove = max98090_remove,
+ .controls = max98090_snd_controls,
+ .num_controls = ARRAY_SIZE(max98090_snd_controls),
+ .dapm_widgets = max98090_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98090_dapm_widgets),
+ .dapm_routes = max98090_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98090_audio_map),
+};
+
+static const struct regmap_config max98090_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX98090_REG_END,
+ .volatile_reg = max98090_volatile,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = max98090_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(max98090_reg_defaults),
+};
+
+static int max98090_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max98090_priv *priv;
+ struct device *dev = &i2c->dev;
+ unsigned int val;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct max98090_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &max98090_regmap);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(dev, "Failed to init regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, priv);
+
+ ret = regmap_read(priv->regmap, MAX98090_0xFF_REV_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read device revision: %d\n", ret);
+ return ret;
+ }
+ dev_info(dev, "revision 0x%02x\n", val);
+
+ ret = snd_soc_register_codec(dev,
+ &soc_codec_dev_max98090,
+ &max98090_dai, 1);
+
+ return ret;
+}
+
+static int max98090_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id max98090_i2c_id[] = {
+ { "max98090", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max98090_i2c_id);
+
+static struct i2c_driver max98090_i2c_driver = {
+ .driver = {
+ .name = "max98090",
+ .owner = THIS_MODULE,
+ },
+ .probe = max98090_i2c_probe,
+ .remove = max98090_i2c_remove,
+ .id_table = max98090_i2c_id,
+};
+module_i2c_driver(max98090_i2c_driver);
+
+MODULE_DESCRIPTION("ALSA SoC MAX98090 driver");
+MODULE_AUTHOR("Peter Hsiang, Kuninori Morimoto");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 38d43c59d3f..41cdd164297 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -2511,7 +2511,7 @@ static int max98095_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit max98095_i2c_remove(struct i2c_client *client)
+static int max98095_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -2529,7 +2529,7 @@ static struct i2c_driver max98095_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = max98095_i2c_probe,
- .remove = __devexit_p(max98095_i2c_remove),
+ .remove = max98095_i2c_remove,
.id_table = max98095_i2c_id,
};
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index efe535c37b3..58c38a5b481 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -329,8 +329,8 @@ static struct snd_soc_codec_driver soc_codec_dev_max9850 = {
.num_dapm_routes = ARRAY_SIZE(max9850_dapm_routes),
};
-static int __devinit max9850_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int max9850_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct max9850_priv *max9850;
int ret;
@@ -347,7 +347,7 @@ static int __devinit max9850_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int max9850_i2c_remove(struct i2c_client *client)
+static int max9850_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -365,7 +365,7 @@ static struct i2c_driver max9850_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = max9850_i2c_probe,
- .remove = __devexit_p(max9850_i2c_remove),
+ .remove = max9850_i2c_remove,
.id_table = max9850_i2c_id,
};
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index d15e5943c85..6b6c74cd83e 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -258,8 +258,8 @@ int max9877_add_controls(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(max9877_add_controls);
-static int __devinit max9877_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max9877_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
i2c = client;
@@ -268,7 +268,7 @@ static int __devinit max9877_i2c_probe(struct i2c_client *client,
return 0;
}
-static __devexit int max9877_i2c_remove(struct i2c_client *client)
+static int max9877_i2c_remove(struct i2c_client *client)
{
i2c = NULL;
@@ -287,7 +287,7 @@ static struct i2c_driver max9877_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = max9877_i2c_probe,
- .remove = __devexit_p(max9877_i2c_remove),
+ .remove = max9877_i2c_remove,
.id_table = max9877_i2c_id,
};
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index bc955999c8a..5402dfbbb71 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -779,7 +779,7 @@ static struct platform_driver mc13783_codec_driver = {
.owner = THIS_MODULE,
},
.probe = mc13783_codec_probe,
- .remove = __devexit_p(mc13783_codec_remove),
+ .remove = mc13783_codec_remove,
};
module_platform_driver(mc13783_codec_driver);
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index 96aa5fa0516..26118828782 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -626,8 +626,8 @@ static const struct regmap_config ml26124_i2c_regmap = {
.write_flag_mask = 0x01,
};
-static __devinit int ml26124_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ml26124_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct ml26124_priv *priv;
int ret;
@@ -649,7 +649,7 @@ static __devinit int ml26124_i2c_probe(struct i2c_client *i2c,
&soc_codec_dev_ml26124, &ml26124_dai, 1);
}
-static __devexit int ml26124_i2c_remove(struct i2c_client *client)
+static int ml26124_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -667,7 +667,7 @@ static struct i2c_driver ml26124_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ml26124_i2c_probe,
- .remove = __devexit_p(ml26124_i2c_remove),
+ .remove = ml26124_i2c_remove,
.id_table = ml26124_i2c_id,
};
diff --git a/sound/soc/codecs/omap-hdmi.c b/sound/soc/codecs/omap-hdmi.c
index 1bf5c74f5f9..529d06444c5 100644
--- a/sound/soc/codecs/omap-hdmi.c
+++ b/sound/soc/codecs/omap-hdmi.c
@@ -39,13 +39,13 @@ static struct snd_soc_dai_driver omap_hdmi_codec_dai = {
},
};
-static __devinit int omap_hdmi_codec_probe(struct platform_device *pdev)
+static int omap_hdmi_codec_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &omap_hdmi_codec,
&omap_hdmi_codec_dai, 1);
}
-static __devexit int omap_hdmi_codec_remove(struct platform_device *pdev)
+static int omap_hdmi_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -58,7 +58,7 @@ static struct platform_driver omap_hdmi_codec_driver = {
},
.probe = omap_hdmi_codec_probe,
- .remove = __devexit_p(omap_hdmi_codec_remove),
+ .remove = omap_hdmi_codec_remove,
};
module_platform_driver(omap_hdmi_codec_driver);
diff --git a/sound/soc/codecs/pcm3008.c b/sound/soc/codecs/pcm3008.c
index edcaa7ea548..f2a6282b41f 100644
--- a/sound/soc/codecs/pcm3008.c
+++ b/sound/soc/codecs/pcm3008.c
@@ -149,13 +149,13 @@ static struct snd_soc_codec_driver soc_codec_dev_pcm3008 = {
.resume = pcm3008_soc_resume,
};
-static int __devinit pcm3008_codec_probe(struct platform_device *pdev)
+static int pcm3008_codec_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_pcm3008, &pcm3008_dai, 1);
}
-static int __devexit pcm3008_codec_remove(struct platform_device *pdev)
+static int pcm3008_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -165,7 +165,7 @@ MODULE_ALIAS("platform:pcm3008-codec");
static struct platform_driver pcm3008_codec_driver = {
.probe = pcm3008_codec_probe,
- .remove = __devexit_p(pcm3008_codec_remove),
+ .remove = pcm3008_codec_remove,
.driver = {
.name = "pcm3008-codec",
.owner = THIS_MODULE,
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 960d0e93cce..912c9cbc272 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -1382,7 +1382,7 @@ static int rt5631_hifi_pcm_params(struct snd_pcm_substream *substream,
timesofbclk);
if (coeff < 0) {
dev_err(codec->dev, "Fail to get coeff\n");
- return -EINVAL;
+ return coeff;
}
switch (params_format(params)) {
@@ -1748,7 +1748,7 @@ static int rt5631_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int rt5631_i2c_remove(struct i2c_client *client)
+static int rt5631_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1760,7 +1760,7 @@ static struct i2c_driver rt5631_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = rt5631_i2c_probe,
- .remove = __devexit_p(rt5631_i2c_remove),
+ .remove = rt5631_i2c_remove,
.id_table = rt5631_i2c_id,
};
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index df2f99d1d42..cb1675cd8e1 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1404,8 +1404,8 @@ static struct snd_soc_codec_driver sgtl5000_driver = {
.num_dapm_routes = ARRAY_SIZE(sgtl5000_dapm_routes),
};
-static __devinit int sgtl5000_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sgtl5000_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct sgtl5000_priv *sgtl5000;
int ret;
@@ -1422,7 +1422,7 @@ static __devinit int sgtl5000_i2c_probe(struct i2c_client *client,
return ret;
}
-static __devexit int sgtl5000_i2c_remove(struct i2c_client *client)
+static int sgtl5000_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -1449,7 +1449,7 @@ static struct i2c_driver sgtl5000_i2c_driver = {
.of_match_table = sgtl5000_dt_ids,
},
.probe = sgtl5000_i2c_probe,
- .remove = __devexit_p(sgtl5000_i2c_remove),
+ .remove = sgtl5000_i2c_remove,
.id_table = sgtl5000_id,
};
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
new file mode 100644
index 00000000000..f2d61a18783
--- /dev/null
+++ b/sound/soc/codecs/si476x.c
@@ -0,0 +1,255 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+
+#include <linux/i2c.h>
+
+#include <linux/mfd/si476x-core.h>
+
+enum si476x_audio_registers {
+ SI476X_DIGITAL_IO_OUTPUT_FORMAT = 0x0203,
+ SI476X_DIGITAL_IO_OUTPUT_SAMPLE_RATE = 0x0202,
+};
+
+enum si476x_digital_io_output_format {
+ SI476X_DIGITAL_IO_SLOT_SIZE_SHIFT = 11,
+ SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT = 8,
+};
+
+#define SI476X_DIGITAL_IO_OUTPUT_WIDTH_MASK ((0b111 << SI476X_DIGITAL_IO_SLOT_SIZE_SHIFT) | \
+ (0b111 << SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT))
+#define SI476X_DIGITAL_IO_OUTPUT_FORMAT_MASK (0b1111110)
+
+enum si476x_daudio_formats {
+ SI476X_DAUDIO_MODE_I2S = (0x0 << 1),
+ SI476X_DAUDIO_MODE_DSP_A = (0x6 << 1),
+ SI476X_DAUDIO_MODE_DSP_B = (0x7 << 1),
+ SI476X_DAUDIO_MODE_LEFT_J = (0x8 << 1),
+ SI476X_DAUDIO_MODE_RIGHT_J = (0x9 << 1),
+
+ SI476X_DAUDIO_MODE_IB = (1 << 5),
+ SI476X_DAUDIO_MODE_IF = (1 << 6),
+};
+
+enum si476x_pcm_format {
+ SI476X_PCM_FORMAT_S8 = 2,
+ SI476X_PCM_FORMAT_S16_LE = 4,
+ SI476X_PCM_FORMAT_S20_3LE = 5,
+ SI476X_PCM_FORMAT_S24_LE = 6,
+};
+
+static unsigned int si476x_codec_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ int err;
+ struct si476x_core *core = codec->control_data;
+
+ si476x_core_lock(core);
+ err = si476x_core_cmd_get_property(core, reg);
+ si476x_core_unlock(core);
+
+ return err;
+}
+
+static int si476x_codec_write(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int val)
+{
+ int err;
+ struct si476x_core *core = codec->control_data;
+
+ si476x_core_lock(core);
+ err = si476x_core_cmd_set_property(core, reg, val);
+ si476x_core_unlock(core);
+
+ return err;
+}
+
+static int si476x_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ int err;
+ u16 format = 0;
+
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
+ return -EINVAL;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ format |= SI476X_DAUDIO_MODE_DSP_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ format |= SI476X_DAUDIO_MODE_DSP_B;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ format |= SI476X_DAUDIO_MODE_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ format |= SI476X_DAUDIO_MODE_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ format |= SI476X_DAUDIO_MODE_LEFT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ format |= SI476X_DAUDIO_MODE_IB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_LEFT_J:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ format |= SI476X_DAUDIO_MODE_IB |
+ SI476X_DAUDIO_MODE_IF;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ format |= SI476X_DAUDIO_MODE_IB;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ format |= SI476X_DAUDIO_MODE_IF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = snd_soc_update_bits(codec_dai->codec, SI476X_DIGITAL_IO_OUTPUT_FORMAT,
+ SI476X_DIGITAL_IO_OUTPUT_FORMAT_MASK,
+ format);
+ if (err < 0) {
+ dev_err(codec_dai->codec->dev, "Failed to set output format\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int rate, width, err;
+
+ rate = params_rate(params);
+ if (rate < 32000 || rate > 48000) {
+ dev_err(dai->codec->dev, "Rate: %d is not supported\n", rate);
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ width = SI476X_PCM_FORMAT_S8;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ width = SI476X_PCM_FORMAT_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ width = SI476X_PCM_FORMAT_S20_3LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ width = SI476X_PCM_FORMAT_S24_LE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = snd_soc_write(dai->codec, SI476X_DIGITAL_IO_OUTPUT_SAMPLE_RATE,
+ rate);
+ if (err < 0) {
+ dev_err(dai->codec->dev, "Failed to set sample rate\n");
+ return err;
+ }
+
+ err = snd_soc_update_bits(dai->codec, SI476X_DIGITAL_IO_OUTPUT_FORMAT,
+ SI476X_DIGITAL_IO_OUTPUT_WIDTH_MASK,
+ (width << SI476X_DIGITAL_IO_SLOT_SIZE_SHIFT) |
+ (width << SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT));
+ if (err < 0) {
+ dev_err(dai->codec->dev, "Failed to set output width\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int si476x_codec_probe(struct snd_soc_codec *codec)
+{
+ codec->control_data = i2c_mfd_cell_to_core(codec->dev);
+ return 0;
+}
+
+static struct snd_soc_dai_ops si476x_dai_ops = {
+ .hw_params = si476x_codec_hw_params,
+ .set_fmt = si476x_codec_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver si476x_dai = {
+ .name = "si476x-codec",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+
+ .rates = SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE
+ },
+ .ops = &si476x_dai_ops,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_si476x = {
+ .probe = si476x_codec_probe,
+ .read = si476x_codec_read,
+ .write = si476x_codec_write,
+};
+
+static int si476x_platform_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_si476x,
+ &si476x_dai, 1);
+}
+
+static int si476x_platform_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+MODULE_ALIAS("platform:si476x-codec");
+
+static struct platform_driver si476x_platform_driver = {
+ .driver = {
+ .name = "si476x-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = si476x_platform_probe,
+ .remove = si476x_platform_remove,
+};
+module_platform_driver(si476x_platform_driver);
+
+MODULE_AUTHOR("Andrey Smirnov <andrey.smirnov@convergeddevices.net>");
+MODULE_DESCRIPTION("ASoC Si4761/64 codec driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index 5be42bf5699..4068f249123 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(process_sigma_firmware);
static int sigma_action_write_regmap(void *control_data,
const struct sigma_action *sa, size_t len)
{
- return regmap_raw_write(control_data, le16_to_cpu(sa->addr),
+ return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
sa->payload, len - 2);
}
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index 50dbdb9357e..d1ae869d318 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -896,14 +896,14 @@ struct snd_soc_codec_driver sn95031_codec = {
.num_dapm_routes = ARRAY_SIZE(sn95031_audio_map),
};
-static int __devinit sn95031_device_probe(struct platform_device *pdev)
+static int sn95031_device_probe(struct platform_device *pdev)
{
pr_debug("codec device probe called for %s\n", dev_name(&pdev->dev));
return snd_soc_register_codec(&pdev->dev, &sn95031_codec,
sn95031_dais, ARRAY_SIZE(sn95031_dais));
}
-static int __devexit sn95031_device_remove(struct platform_device *pdev)
+static int sn95031_device_remove(struct platform_device *pdev)
{
pr_debug("codec device remove called\n");
snd_soc_unregister_codec(&pdev->dev);
@@ -916,7 +916,7 @@ static struct platform_driver sn95031_codec_driver = {
.owner = THIS_MODULE,
},
.probe = sn95031_device_probe,
- .remove = __devexit_p(sn95031_device_remove),
+ .remove = sn95031_device_remove,
};
module_platform_driver(sn95031_codec_driver);
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 079066fef42..f8d30e5f637 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -691,7 +691,7 @@ static const struct regmap_config ssm2602_regmap_config = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit ssm2602_spi_probe(struct spi_device *spi)
+static int ssm2602_spi_probe(struct spi_device *spi)
{
struct ssm2602_priv *ssm2602;
int ret;
@@ -713,7 +713,7 @@ static int __devinit ssm2602_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit ssm2602_spi_remove(struct spi_device *spi)
+static int ssm2602_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -725,7 +725,7 @@ static struct spi_driver ssm2602_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ssm2602_spi_probe,
- .remove = __devexit_p(ssm2602_spi_remove),
+ .remove = ssm2602_spi_remove,
};
#endif
@@ -736,7 +736,7 @@ static struct spi_driver ssm2602_spi_driver = {
* low = 0x1a
* high = 0x1b
*/
-static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c,
+static int ssm2602_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct ssm2602_priv *ssm2602;
@@ -759,7 +759,7 @@ static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit ssm2602_i2c_remove(struct i2c_client *client)
+static int ssm2602_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -780,7 +780,7 @@ static struct i2c_driver ssm2602_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ssm2602_i2c_probe,
- .remove = __devexit_p(ssm2602_i2c_remove),
+ .remove = ssm2602_i2c_remove,
.id_table = ssm2602_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 0935bfe6247..cfb55fe35e9 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -995,8 +995,8 @@ static const struct regmap_config sta32x_regmap = {
.volatile_reg = sta32x_reg_is_volatile,
};
-static __devinit int sta32x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sta32x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct sta32x_priv *sta32x;
int ret, i;
@@ -1033,7 +1033,7 @@ static __devinit int sta32x_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int sta32x_i2c_remove(struct i2c_client *client)
+static int sta32x_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1053,7 +1053,7 @@ static struct i2c_driver sta32x_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = sta32x_i2c_probe,
- .remove = __devexit_p(sta32x_i2c_remove),
+ .remove = sta32x_i2c_remove,
.id_table = sta32x_i2c_id,
};
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index 9e314486238..ab355c4f0b2 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -380,8 +380,8 @@ static const struct regmap_config sta529_regmap = {
.num_reg_defaults = ARRAY_SIZE(sta529_reg_defaults),
};
-static __devinit int sta529_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sta529_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct sta529 *sta529;
int ret;
@@ -412,7 +412,7 @@ static __devinit int sta529_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit sta529_i2c_remove(struct i2c_client *client)
+static int sta529_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -431,7 +431,7 @@ static struct i2c_driver sta529_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = sta529_i2c_probe,
- .remove = __devexit_p(sta529_i2c_remove),
+ .remove = sta529_i2c_remove,
.id_table = sta529_i2c_id,
};
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 982e437799a..2eda85ba79a 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -385,13 +385,13 @@ static struct snd_soc_codec_driver soc_codec_dev_stac9766 = {
.reg_cache_default = stac9766_reg,
};
-static __devinit int stac9766_probe(struct platform_device *pdev)
+static int stac9766_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_stac9766, stac9766_dai, ARRAY_SIZE(stac9766_dai));
}
-static int __devexit stac9766_remove(struct platform_device *pdev)
+static int stac9766_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -404,7 +404,7 @@ static struct platform_driver stac9766_codec_driver = {
},
.probe = stac9766_probe,
- .remove = __devexit_p(stac9766_remove),
+ .remove = stac9766_remove,
};
module_platform_driver(stac9766_codec_driver);
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index f230292ba96..17df4e32fea 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/cdev.h>
#include <linux/slab.h>
@@ -65,6 +66,7 @@ struct aic32x4_priv {
u32 power_cfg;
u32 micpga_routing;
bool swapdacs;
+ int rstn_gpio;
};
/* 0dB min, 1dB steps */
@@ -627,10 +629,20 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
{
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
u32 tmp_reg;
+ int ret;
codec->hw_write = (hw_write_t) i2c_master_send;
codec->control_data = aic32x4->control_data;
+ if (aic32x4->rstn_gpio >= 0) {
+ ret = devm_gpio_request_one(codec->dev, aic32x4->rstn_gpio,
+ GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
+ if (ret != 0)
+ return ret;
+ ndelay(10);
+ gpio_set_value(aic32x4->rstn_gpio, 1);
+ }
+
snd_soc_write(codec, AIC32X4_RESET, 0x01);
/* Power platform configuration */
@@ -675,6 +687,16 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
ARRAY_SIZE(aic32x4_snd_controls));
aic32x4_add_widgets(codec);
+ /*
+ * Workaround: for an unknown reason, the ADC needs to be powered up
+ * and down for the first capture to work properly. It seems related to
+ * a HW BUG or some kind of behavior not documented in the datasheet.
+ */
+ tmp_reg = snd_soc_read(codec, AIC32X4_ADCSETUP);
+ snd_soc_write(codec, AIC32X4_ADCSETUP, tmp_reg |
+ AIC32X4_LADC_EN | AIC32X4_RADC_EN);
+ snd_soc_write(codec, AIC32X4_ADCSETUP, tmp_reg);
+
return 0;
}
@@ -694,8 +716,8 @@ static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
.set_bias_level = aic32x4_set_bias_level,
};
-static __devinit int aic32x4_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int aic32x4_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct aic32x4_pdata *pdata = i2c->dev.platform_data;
struct aic32x4_priv *aic32x4;
@@ -713,10 +735,12 @@ static __devinit int aic32x4_i2c_probe(struct i2c_client *i2c,
aic32x4->power_cfg = pdata->power_cfg;
aic32x4->swapdacs = pdata->swapdacs;
aic32x4->micpga_routing = pdata->micpga_routing;
+ aic32x4->rstn_gpio = pdata->rstn_gpio;
} else {
aic32x4->power_cfg = 0;
aic32x4->swapdacs = false;
aic32x4->micpga_routing = 0;
+ aic32x4->rstn_gpio = -1;
}
ret = snd_soc_register_codec(&i2c->dev,
@@ -724,7 +748,7 @@ static __devinit int aic32x4_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int aic32x4_i2c_remove(struct i2c_client *client)
+static int aic32x4_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -742,7 +766,7 @@ static struct i2c_driver aic32x4_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = aic32x4_i2c_probe,
- .remove = __devexit_p(aic32x4_i2c_remove),
+ .remove = aic32x4_i2c_remove,
.id_table = aic32x4_i2c_id,
};
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index aae2b244039..35774223fd9 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -94,6 +94,9 @@
#define AIC32X4_WORD_LEN_24BITS 0x02
#define AIC32X4_WORD_LEN_32BITS 0x03
+#define AIC32X4_LADC_EN (1 << 7)
+#define AIC32X4_RADC_EN (1 << 6)
+
#define AIC32X4_I2S_MODE 0x00
#define AIC32X4_DSP_MODE 0x01
#define AIC32X4_RIGHT_JUSTIFIED_MODE 0x02
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d2e16c5d7d1..782b0cded2e 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -1514,8 +1514,8 @@ static struct snd_soc_dai_driver dac33_dai = {
.ops = &dac33_dai_ops,
};
-static int __devinit dac33_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int dac33_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tlv320dac33_platform_data *pdata;
struct tlv320dac33_priv *dac33;
@@ -1586,7 +1586,7 @@ err_gpio:
return ret;
}
-static int __devexit dac33_i2c_remove(struct i2c_client *client)
+static int dac33_i2c_remove(struct i2c_client *client)
{
struct tlv320dac33_priv *dac33 = i2c_get_clientdata(client);
@@ -1617,7 +1617,7 @@ static struct i2c_driver tlv320dac33_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = dac33_i2c_probe,
- .remove = __devexit_p(dac33_i2c_remove),
+ .remove = dac33_i2c_remove,
.id_table = tlv320dac33_i2c_id,
};
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 565ff39ad3a..c58bee8346c 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -359,8 +359,8 @@ int tpa6130a2_add_controls(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(tpa6130a2_add_controls);
-static int __devinit tpa6130a2_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tpa6130a2_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct device *dev;
struct tpa6130a2_data *data;
@@ -398,7 +398,8 @@ static int __devinit tpa6130a2_probe(struct i2c_client *client,
TPA6130A2_MUTE_L;
if (data->power_gpio >= 0) {
- ret = gpio_request(data->power_gpio, "tpa6130a2 enable");
+ ret = devm_gpio_request(dev, data->power_gpio,
+ "tpa6130a2 enable");
if (ret < 0) {
dev_err(dev, "Failed to request power GPIO (%d)\n",
data->power_gpio);
@@ -419,16 +420,16 @@ static int __devinit tpa6130a2_probe(struct i2c_client *client,
break;
}
- data->supply = regulator_get(dev, regulator);
+ data->supply = devm_regulator_get(dev, regulator);
if (IS_ERR(data->supply)) {
ret = PTR_ERR(data->supply);
dev_err(dev, "Failed to request supply: %d\n", ret);
- goto err_regulator;
+ goto err_gpio;
}
ret = tpa6130a2_power(1);
if (ret != 0)
- goto err_power;
+ goto err_gpio;
/* Read version */
@@ -440,31 +441,19 @@ static int __devinit tpa6130a2_probe(struct i2c_client *client,
/* Disable the chip */
ret = tpa6130a2_power(0);
if (ret != 0)
- goto err_power;
+ goto err_gpio;
return 0;
-err_power:
- regulator_put(data->supply);
-err_regulator:
- if (data->power_gpio >= 0)
- gpio_free(data->power_gpio);
err_gpio:
tpa6130a2_client = NULL;
return ret;
}
-static int __devexit tpa6130a2_remove(struct i2c_client *client)
+static int tpa6130a2_remove(struct i2c_client *client)
{
- struct tpa6130a2_data *data = i2c_get_clientdata(client);
-
tpa6130a2_power(0);
-
- if (data->power_gpio >= 0)
- gpio_free(data->power_gpio);
-
- regulator_put(data->supply);
tpa6130a2_client = NULL;
return 0;
@@ -483,7 +472,7 @@ static struct i2c_driver tpa6130a2_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = tpa6130a2_probe,
- .remove = __devexit_p(tpa6130a2_remove),
+ .remove = tpa6130a2_remove,
.id_table = tpa6130a2_id,
};
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index e7f608996c4..63b280b0603 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -2334,13 +2334,13 @@ static struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
.num_dapm_routes = ARRAY_SIZE(intercon),
};
-static int __devinit twl4030_codec_probe(struct platform_device *pdev)
+static int twl4030_codec_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_twl4030,
twl4030_dai, ARRAY_SIZE(twl4030_dai));
}
-static int __devexit twl4030_codec_remove(struct platform_device *pdev)
+static int twl4030_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -2350,7 +2350,7 @@ MODULE_ALIAS("platform:twl4030-codec");
static struct platform_driver twl4030_codec_driver = {
.probe = twl4030_codec_probe,
- .remove = __devexit_p(twl4030_codec_remove),
+ .remove = twl4030_codec_remove,
.driver = {
.name = "twl4030-codec",
.owner = THIS_MODULE,
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 00b85cc1b9a..3fc3fc64dd8 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -1229,13 +1229,13 @@ static struct snd_soc_codec_driver soc_codec_dev_twl6040 = {
.num_dapm_routes = ARRAY_SIZE(intercon),
};
-static int __devinit twl6040_codec_probe(struct platform_device *pdev)
+static int twl6040_codec_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_twl6040,
twl6040_dai, ARRAY_SIZE(twl6040_dai));
}
-static int __devexit twl6040_codec_remove(struct platform_device *pdev)
+static int twl6040_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -1247,7 +1247,7 @@ static struct platform_driver twl6040_codec_driver = {
.owner = THIS_MODULE,
},
.probe = twl6040_codec_probe,
- .remove = __devexit_p(twl6040_codec_remove),
+ .remove = twl6040_codec_remove,
};
module_platform_driver(twl6040_codec_driver);
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 6c3d43b8ee8..6d0aa44c375 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -601,13 +601,13 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
.set_bias_level = uda134x_set_bias_level,
};
-static int __devinit uda134x_codec_probe(struct platform_device *pdev)
+static int uda134x_codec_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_uda134x, &uda134x_dai, 1);
}
-static int __devexit uda134x_codec_remove(struct platform_device *pdev)
+static int uda134x_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -619,7 +619,7 @@ static struct platform_driver uda134x_codec_driver = {
.owner = THIS_MODULE,
},
.probe = uda134x_codec_probe,
- .remove = __devexit_p(uda134x_codec_remove),
+ .remove = uda134x_codec_remove,
};
module_platform_driver(uda134x_codec_driver);
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 2502214b84a..fd0a314bc20 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -795,8 +795,8 @@ static struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int uda1380_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int uda1380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct uda1380_priv *uda1380;
int ret;
@@ -814,7 +814,7 @@ static __devinit int uda1380_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int __devexit uda1380_i2c_remove(struct i2c_client *i2c)
+static int uda1380_i2c_remove(struct i2c_client *i2c)
{
snd_soc_unregister_codec(&i2c->dev);
return 0;
@@ -832,7 +832,7 @@ static struct i2c_driver uda1380_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = uda1380_i2c_probe,
- .remove = __devexit_p(uda1380_i2c_remove),
+ .remove = uda1380_i2c_remove,
.id_table = uda1380_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 7b24d6d192e..54cd3da09ab 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -485,13 +485,13 @@ static struct snd_soc_codec_driver soc_codec_dev_wl1273 = {
.remove = wl1273_remove,
};
-static int __devinit wl1273_platform_probe(struct platform_device *pdev)
+static int wl1273_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wl1273,
&wl1273_dai, 1);
}
-static int __devexit wl1273_platform_remove(struct platform_device *pdev)
+static int wl1273_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -505,7 +505,7 @@ static struct platform_driver wl1273_platform_driver = {
.owner = THIS_MODULE,
},
.probe = wl1273_platform_probe,
- .remove = __devexit_p(wl1273_platform_remove),
+ .remove = wl1273_platform_remove,
};
module_platform_driver(wl1273_platform_driver);
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 99afc003a08..ad2fee4bb4c 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -31,6 +31,9 @@
#define DEVICE_ID_WM0010 10
+/* We only support v1 of the .dfw INFO record */
+#define INFO_VERSION 1
+
enum dfw_cmd {
DFW_CMD_FUSE = 0x01,
DFW_CMD_CODE_HDR,
@@ -46,6 +49,13 @@ struct dfw_binrec {
uint8_t data[0];
} __packed;
+struct dfw_inforec {
+ u8 info_version;
+ u8 tool_major_version;
+ u8 tool_minor_version;
+ u8 dsp_target;
+};
+
struct dfw_pllrec {
u8 command;
u32 length:24;
@@ -97,7 +107,6 @@ struct wm0010_priv {
enum wm0010_state state;
bool boot_failed;
- int boot_done;
bool ready;
bool pll_running;
int max_spi_freq;
@@ -234,7 +243,7 @@ static void wm0010_boot_xfer_complete(void *data)
break;
case 0x55555555:
- if (wm0010->boot_done == 0)
+ if (wm0010->state < WM0010_STAGE2)
break;
dev_err(codec->dev,
"%d: ROM bootloader running in stage 2\n", i);
@@ -321,7 +330,6 @@ static void wm0010_boot_xfer_complete(void *data)
break;
}
- wm0010->boot_done++;
if (xfer->done)
complete(xfer->done);
}
@@ -334,94 +342,198 @@ static void byte_swap_64(u64 *data_in, u64 *data_out, u32 len)
data_out[i] = cpu_to_be64(le64_to_cpu(data_in[i]));
}
-static int wm0010_boot(struct snd_soc_codec *codec)
+static int wm0010_firmware_load(char *name, struct snd_soc_codec *codec)
{
struct spi_device *spi = to_spi_device(codec->dev);
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
- unsigned long flags;
struct list_head xfer_list;
struct wm0010_boot_xfer *xfer;
int ret;
struct completion done;
const struct firmware *fw;
const struct dfw_binrec *rec;
- struct spi_message m;
- struct spi_transfer t;
- struct dfw_pllrec pll_rec;
- u32 *img, *p;
- u64 *img_swap;
- u8 *out;
+ const struct dfw_inforec *inforec;
+ u64 *img;
+ u8 *out, dsp;
u32 len, offset;
- int i;
- spin_lock_irqsave(&wm0010->irq_lock, flags);
- if (wm0010->state != WM0010_POWER_OFF)
- dev_warn(wm0010->dev, "DSP already powered up!\n");
- spin_unlock_irqrestore(&wm0010->irq_lock, flags);
+ INIT_LIST_HEAD(&xfer_list);
- if (wm0010->sysclk > 26000000) {
- dev_err(codec->dev, "Max DSP clock frequency is 26MHz\n");
- ret = -ECANCELED;
- goto err;
+ ret = request_firmware(&fw, name, codec->dev);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request application: %d\n",
+ ret);
+ return ret;
}
- INIT_LIST_HEAD(&xfer_list);
+ rec = (const struct dfw_binrec *)fw->data;
+ inforec = (const struct dfw_inforec *)rec->data;
+ offset = 0;
+ dsp = inforec->dsp_target;
+ wm0010->boot_failed = false;
+ BUG_ON(!list_empty(&xfer_list));
+ init_completion(&done);
- mutex_lock(&wm0010->lock);
- wm0010->pll_running = false;
+ /* First record should be INFO */
+ if (rec->command != DFW_CMD_INFO) {
+ dev_err(codec->dev, "First record not INFO\r\n");
+ ret = -EINVAL;
+ goto abort;
+ }
- dev_dbg(codec->dev, "max_spi_freq: %d\n", wm0010->max_spi_freq);
+ if (inforec->info_version != INFO_VERSION) {
+ dev_err(codec->dev,
+ "Unsupported version (%02d) of INFO record\r\n",
+ inforec->info_version);
+ ret = -EINVAL;
+ goto abort;
+ }
- ret = regulator_bulk_enable(ARRAY_SIZE(wm0010->core_supplies),
- wm0010->core_supplies);
- if (ret != 0) {
- dev_err(&spi->dev, "Failed to enable core supplies: %d\n",
- ret);
- mutex_unlock(&wm0010->lock);
- goto err;
+ dev_dbg(codec->dev, "Version v%02d INFO record found\r\n",
+ inforec->info_version);
+
+ /* Check it's a DSP file */
+ if (dsp != DEVICE_ID_WM0010) {
+ dev_err(codec->dev, "Not a WM0010 firmware file.\r\n");
+ ret = -EINVAL;
+ goto abort;
}
- ret = regulator_enable(wm0010->dbvdd);
- if (ret != 0) {
- dev_err(&spi->dev, "Failed to enable DBVDD: %d\n", ret);
- goto err_core;
+ /* Skip the info record as we don't need to send it */
+ offset += ((rec->length) + 8);
+ rec = (void *)&rec->data[rec->length];
+
+ while (offset < fw->size) {
+ dev_dbg(codec->dev,
+ "Packet: command %d, data length = 0x%x\r\n",
+ rec->command, rec->length);
+ len = rec->length + 8;
+
+ out = kzalloc(len, GFP_KERNEL);
+ if (!out) {
+ dev_err(codec->dev,
+ "Failed to allocate RX buffer\n");
+ ret = -ENOMEM;
+ goto abort1;
+ }
+
+ img = kzalloc(len, GFP_KERNEL);
+ if (!img) {
+ dev_err(codec->dev,
+ "Failed to allocate image buffer\n");
+ ret = -ENOMEM;
+ goto abort1;
+ }
+
+ byte_swap_64((u64 *)&rec->command, img, len);
+
+ xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
+ if (!xfer) {
+ dev_err(codec->dev, "Failed to allocate xfer\n");
+ ret = -ENOMEM;
+ goto abort1;
+ }
+
+ xfer->codec = codec;
+ list_add_tail(&xfer->list, &xfer_list);
+
+ spi_message_init(&xfer->m);
+ xfer->m.complete = wm0010_boot_xfer_complete;
+ xfer->m.context = xfer;
+ xfer->t.tx_buf = img;
+ xfer->t.rx_buf = out;
+ xfer->t.len = len;
+ xfer->t.bits_per_word = 8;
+
+ if (!wm0010->pll_running) {
+ xfer->t.speed_hz = wm0010->sysclk / 6;
+ } else {
+ xfer->t.speed_hz = wm0010->max_spi_freq;
+
+ if (wm0010->board_max_spi_speed &&
+ (wm0010->board_max_spi_speed < wm0010->max_spi_freq))
+ xfer->t.speed_hz = wm0010->board_max_spi_speed;
+ }
+
+ /* Store max usable spi frequency for later use */
+ wm0010->max_spi_freq = xfer->t.speed_hz;
+
+ spi_message_add_tail(&xfer->t, &xfer->m);
+
+ offset += ((rec->length) + 8);
+ rec = (void *)&rec->data[rec->length];
+
+ if (offset >= fw->size) {
+ dev_dbg(codec->dev, "All transfers scheduled\n");
+ xfer->done = &done;
+ }
+
+ ret = spi_async(spi, &xfer->m);
+ if (ret != 0) {
+ dev_err(codec->dev, "Write failed: %d\n", ret);
+ goto abort1;
+ }
+
+ if (wm0010->boot_failed) {
+ dev_dbg(codec->dev, "Boot fail!\n");
+ ret = -EINVAL;
+ goto abort1;
+ }
}
- /* Release reset */
- gpio_set_value_cansleep(wm0010->gpio_reset, !wm0010->gpio_reset_value);
- spin_lock_irqsave(&wm0010->irq_lock, flags);
- wm0010->state = WM0010_OUT_OF_RESET;
- spin_unlock_irqrestore(&wm0010->irq_lock, flags);
+ wait_for_completion(&done);
+
+ ret = 0;
+
+abort1:
+ while (!list_empty(&xfer_list)) {
+ xfer = list_first_entry(&xfer_list, struct wm0010_boot_xfer,
+ list);
+ kfree(xfer->t.rx_buf);
+ kfree(xfer->t.tx_buf);
+ list_del(&xfer->list);
+ kfree(xfer);
+ }
+
+abort:
+ release_firmware(fw);
+ return ret;
+}
+
+static int wm0010_stage2_load(struct snd_soc_codec *codec)
+{
+ struct spi_device *spi = to_spi_device(codec->dev);
+ struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
+ const struct firmware *fw;
+ struct spi_message m;
+ struct spi_transfer t;
+ u32 *img;
+ u8 *out;
+ int i;
+ int ret = 0;
- /* First the bootloader */
ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
if (ret != 0) {
dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
ret);
- goto abort;
+ return ret;
}
- if (!wait_for_completion_timeout(&wm0010->boot_completion,
- msecs_to_jiffies(10)))
- dev_err(codec->dev, "Failed to get interrupt from DSP\n");
-
- spin_lock_irqsave(&wm0010->irq_lock, flags);
- wm0010->state = WM0010_BOOTROM;
- spin_unlock_irqrestore(&wm0010->irq_lock, flags);
-
dev_dbg(codec->dev, "Downloading %zu byte stage 2 loader\n", fw->size);
/* Copy to local buffer first as vmalloc causes problems for dma */
img = kzalloc(fw->size, GFP_KERNEL);
if (!img) {
dev_err(codec->dev, "Failed to allocate image buffer\n");
- goto abort;
+ ret = -ENOMEM;
+ goto abort2;
}
out = kzalloc(fw->size, GFP_KERNEL);
if (!out) {
dev_err(codec->dev, "Failed to allocate output buffer\n");
- goto abort;
+ ret = -ENOMEM;
+ goto abort1;
}
memcpy(img, &fw->data[0], fw->size);
@@ -447,20 +559,97 @@ static int wm0010_boot(struct snd_soc_codec *codec)
/* Look for errors from the boot ROM */
for (i = 0; i < fw->size; i++) {
if (out[i] != 0x55) {
- ret = -EBUSY;
dev_err(codec->dev, "Boot ROM error: %x in %d\n",
out[i], i);
wm0010_mark_boot_failure(wm0010);
+ ret = -EBUSY;
goto abort;
}
}
-
- release_firmware(fw);
- kfree(img);
+abort:
kfree(out);
+abort1:
+ kfree(img);
+abort2:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int wm0010_boot(struct snd_soc_codec *codec)
+{
+ struct spi_device *spi = to_spi_device(codec->dev);
+ struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
+ unsigned long flags;
+ int ret;
+ const struct firmware *fw;
+ struct spi_message m;
+ struct spi_transfer t;
+ struct dfw_pllrec pll_rec;
+ u32 *p, len;
+ u64 *img_swap;
+ u8 *out;
+ int i;
+
+ spin_lock_irqsave(&wm0010->irq_lock, flags);
+ if (wm0010->state != WM0010_POWER_OFF)
+ dev_warn(wm0010->dev, "DSP already powered up!\n");
+ spin_unlock_irqrestore(&wm0010->irq_lock, flags);
+
+ if (wm0010->sysclk > 26000000) {
+ dev_err(codec->dev, "Max DSP clock frequency is 26MHz\n");
+ ret = -ECANCELED;
+ goto err;
+ }
+
+ mutex_lock(&wm0010->lock);
+ wm0010->pll_running = false;
+
+ dev_dbg(codec->dev, "max_spi_freq: %d\n", wm0010->max_spi_freq);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm0010->core_supplies),
+ wm0010->core_supplies);
+ if (ret != 0) {
+ dev_err(&spi->dev, "Failed to enable core supplies: %d\n",
+ ret);
+ mutex_unlock(&wm0010->lock);
+ goto err;
+ }
+
+ ret = regulator_enable(wm0010->dbvdd);
+ if (ret != 0) {
+ dev_err(&spi->dev, "Failed to enable DBVDD: %d\n", ret);
+ goto err_core;
+ }
+
+ /* Release reset */
+ gpio_set_value_cansleep(wm0010->gpio_reset, !wm0010->gpio_reset_value);
+ spin_lock_irqsave(&wm0010->irq_lock, flags);
+ wm0010->state = WM0010_OUT_OF_RESET;
+ spin_unlock_irqrestore(&wm0010->irq_lock, flags);
+
+ /* First the bootloader */
+ ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
+ ret);
+ goto abort;
+ }
+
+ if (!wait_for_completion_timeout(&wm0010->boot_completion,
+ msecs_to_jiffies(20)))
+ dev_err(codec->dev, "Failed to get interrupt from DSP\n");
+
+ spin_lock_irqsave(&wm0010->irq_lock, flags);
+ wm0010->state = WM0010_BOOTROM;
+ spin_unlock_irqrestore(&wm0010->irq_lock, flags);
+
+ ret = wm0010_stage2_load(codec);
+ if (ret)
+ goto abort;
if (!wait_for_completion_timeout(&wm0010->boot_completion,
- msecs_to_jiffies(10)))
+ msecs_to_jiffies(20)))
dev_err(codec->dev, "Failed to get interrupt from DSP loader.\n");
spin_lock_irqsave(&wm0010->irq_lock, flags);
@@ -535,110 +724,10 @@ static int wm0010_boot(struct snd_soc_codec *codec)
} else
dev_dbg(codec->dev, "Not enabling DSP PLL.");
- ret = request_firmware(&fw, "wm0010.dfw", codec->dev);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request application: %d\n",
- ret);
- goto abort;
- }
-
- rec = (const struct dfw_binrec *)fw->data;
- offset = 0;
- wm0010->boot_done = 0;
- wm0010->boot_failed = false;
- BUG_ON(!list_empty(&xfer_list));
- init_completion(&done);
+ ret = wm0010_firmware_load("wm0010.dfw", codec);
- /* First record should be INFO */
- if (rec->command != DFW_CMD_INFO) {
- dev_err(codec->dev, "First record not INFO\r\n");
- goto abort;
- }
-
- /* Check it's a 0010 file */
- if (rec->data[0] != DEVICE_ID_WM0010) {
- dev_err(codec->dev, "Not a WM0010 firmware file.\r\n");
+ if (ret != 0)
goto abort;
- }
-
- /* Skip the info record as we don't need to send it */
- offset += ((rec->length) + 8);
- rec = (void *)&rec->data[rec->length];
-
- while (offset < fw->size) {
- dev_dbg(codec->dev,
- "Packet: command %d, data length = 0x%x\r\n",
- rec->command, rec->length);
- len = rec->length + 8;
-
- out = kzalloc(len, GFP_KERNEL);
- if (!out) {
- dev_err(codec->dev,
- "Failed to allocate RX buffer\n");
- goto abort;
- }
-
- img_swap = kzalloc(len, GFP_KERNEL);
- if (!img_swap) {
- dev_err(codec->dev,
- "Failed to allocate image buffer\n");
- goto abort;
- }
-
- /* We need to re-order for 0010 */
- byte_swap_64((u64 *)&rec->command, img_swap, len);
-
- xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
- if (!xfer) {
- dev_err(codec->dev, "Failed to allocate xfer\n");
- goto abort;
- }
-
- xfer->codec = codec;
- list_add_tail(&xfer->list, &xfer_list);
-
- spi_message_init(&xfer->m);
- xfer->m.complete = wm0010_boot_xfer_complete;
- xfer->m.context = xfer;
- xfer->t.tx_buf = img_swap;
- xfer->t.rx_buf = out;
- xfer->t.len = len;
- xfer->t.bits_per_word = 8;
-
- if (!wm0010->pll_running) {
- xfer->t.speed_hz = wm0010->sysclk / 6;
- } else {
- xfer->t.speed_hz = wm0010->max_spi_freq;
-
- if (wm0010->board_max_spi_speed &&
- (wm0010->board_max_spi_speed < wm0010->max_spi_freq))
- xfer->t.speed_hz = wm0010->board_max_spi_speed;
- }
-
- /* Store max usable spi frequency for later use */
- wm0010->max_spi_freq = xfer->t.speed_hz;
-
- spi_message_add_tail(&xfer->t, &xfer->m);
-
- offset += ((rec->length) + 8);
- rec = (void *)&rec->data[rec->length];
-
- if (offset >= fw->size) {
- dev_dbg(codec->dev, "All transfers scheduled\n");
- xfer->done = &done;
- }
-
- ret = spi_async(spi, &xfer->m);
- if (ret != 0) {
- dev_err(codec->dev, "Write failed: %d\n", ret);
- goto abort;
- }
-
- if (wm0010->boot_failed)
- goto abort;
- }
-
- wait_for_completion(&done);
spin_lock_irqsave(&wm0010->irq_lock, flags);
wm0010->state = WM0010_FIRMWARE;
@@ -646,17 +735,6 @@ static int wm0010_boot(struct snd_soc_codec *codec)
mutex_unlock(&wm0010->lock);
- release_firmware(fw);
-
- while (!list_empty(&xfer_list)) {
- xfer = list_first_entry(&xfer_list, struct wm0010_boot_xfer,
- list);
- kfree(xfer->t.rx_buf);
- kfree(xfer->t.tx_buf);
- list_del(&xfer->list);
- kfree(xfer);
- }
-
return 0;
abort:
@@ -784,7 +862,6 @@ static irqreturn_t wm0010_irq(int irq, void *data)
struct wm0010_priv *wm0010 = data;
switch (wm0010->state) {
- case WM0010_POWER_OFF:
case WM0010_OUT_OF_RESET:
case WM0010_BOOTROM:
case WM0010_STAGE2:
@@ -808,7 +885,7 @@ static int wm0010_probe(struct snd_soc_codec *codec)
return 0;
}
-static int __devinit wm0010_spi_probe(struct spi_device *spi)
+static int wm0010_spi_probe(struct spi_device *spi)
{
unsigned long gpio_flags;
int ret;
@@ -908,7 +985,7 @@ static int __devinit wm0010_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit wm0010_spi_remove(struct spi_device *spi)
+static int wm0010_spi_remove(struct spi_device *spi)
{
struct wm0010_priv *wm0010 = spi_get_drvdata(spi);
@@ -930,7 +1007,7 @@ static struct spi_driver wm0010_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm0010_spi_probe,
- .remove = __devexit_p(wm0010_spi_remove),
+ .remove = wm0010_spi_remove,
};
module_spi_driver(wm0010_spi_driver);
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index 951d7b49476..6e6b93d4696 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -153,7 +153,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm1250_ev1 = {
.idle_bias_off = true,
};
-static int __devinit wm1250_ev1_pdata(struct i2c_client *i2c)
+static int wm1250_ev1_pdata(struct i2c_client *i2c)
{
struct wm1250_ev1_pdata *pdata = dev_get_platdata(&i2c->dev);
struct wm1250_priv *wm1250;
@@ -199,8 +199,8 @@ static void wm1250_ev1_free(struct i2c_client *i2c)
gpio_free_array(wm1250->gpios, ARRAY_SIZE(wm1250->gpios));
}
-static int __devinit wm1250_ev1_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int wm1250_ev1_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
{
int id, board, rev, ret;
@@ -237,7 +237,7 @@ static int __devinit wm1250_ev1_probe(struct i2c_client *i2c,
return 0;
}
-static int __devexit wm1250_ev1_remove(struct i2c_client *i2c)
+static int wm1250_ev1_remove(struct i2c_client *i2c)
{
snd_soc_unregister_codec(&i2c->dev);
wm1250_ev1_free(i2c);
@@ -257,7 +257,7 @@ static struct i2c_driver wm1250_ev1_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm1250_ev1_probe,
- .remove = __devexit_p(wm1250_ev1_remove),
+ .remove = wm1250_ev1_remove,
.id_table = wm1250_ev1_i2c_id,
};
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 683dc43b1d8..1cbe88f01d6 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -646,7 +646,7 @@ static const struct snd_kcontrol_new wm2000_controls[] = {
static int wm2000_anc_power_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_codec *codec = w->codec;
struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
if (SND_SOC_DAPM_EVENT_ON(event))
@@ -764,8 +764,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wm2000 = {
.num_controls = ARRAY_SIZE(wm2000_controls),
};
-static int __devinit wm2000_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int wm2000_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
{
struct wm2000_priv *wm2000;
struct wm2000_platform_data *pdata;
@@ -871,7 +871,7 @@ out:
return ret;
}
-static __devexit int wm2000_i2c_remove(struct i2c_client *i2c)
+static int wm2000_i2c_remove(struct i2c_client *i2c)
{
snd_soc_unregister_codec(&i2c->dev);
@@ -890,7 +890,7 @@ static struct i2c_driver wm2000_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm2000_i2c_probe,
- .remove = __devexit_p(wm2000_i2c_remove),
+ .remove = wm2000_i2c_remove,
.id_table = wm2000_i2c_id,
};
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index eab64a19398..afcf31df77e 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/firmware.h>
#include <linux/gcd.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
@@ -32,6 +33,40 @@
#include <sound/wm2200.h>
#include "wm2200.h"
+#include "wmfw.h"
+#include "wm_adsp.h"
+
+#define WM2200_DSP_CONTROL_1 0x00
+#define WM2200_DSP_CONTROL_2 0x02
+#define WM2200_DSP_CONTROL_3 0x03
+#define WM2200_DSP_CONTROL_4 0x04
+#define WM2200_DSP_CONTROL_5 0x06
+#define WM2200_DSP_CONTROL_6 0x07
+#define WM2200_DSP_CONTROL_7 0x08
+#define WM2200_DSP_CONTROL_8 0x09
+#define WM2200_DSP_CONTROL_9 0x0A
+#define WM2200_DSP_CONTROL_10 0x0B
+#define WM2200_DSP_CONTROL_11 0x0C
+#define WM2200_DSP_CONTROL_12 0x0D
+#define WM2200_DSP_CONTROL_13 0x0F
+#define WM2200_DSP_CONTROL_14 0x10
+#define WM2200_DSP_CONTROL_15 0x11
+#define WM2200_DSP_CONTROL_16 0x12
+#define WM2200_DSP_CONTROL_17 0x13
+#define WM2200_DSP_CONTROL_18 0x14
+#define WM2200_DSP_CONTROL_19 0x16
+#define WM2200_DSP_CONTROL_20 0x17
+#define WM2200_DSP_CONTROL_21 0x18
+#define WM2200_DSP_CONTROL_22 0x1A
+#define WM2200_DSP_CONTROL_23 0x1B
+#define WM2200_DSP_CONTROL_24 0x1C
+#define WM2200_DSP_CONTROL_25 0x1E
+#define WM2200_DSP_CONTROL_26 0x20
+#define WM2200_DSP_CONTROL_27 0x21
+#define WM2200_DSP_CONTROL_28 0x22
+#define WM2200_DSP_CONTROL_29 0x23
+#define WM2200_DSP_CONTROL_30 0x24
+#define WM2200_DSP_CONTROL_31 0x26
/* The code assumes DCVDD is generated internally */
#define WM2200_NUM_CORE_SUPPLIES 2
@@ -49,6 +84,7 @@ struct wm2200_fll {
/* codec private data */
struct wm2200_priv {
+ struct wm_adsp dsp[2];
struct regmap *regmap;
struct device *dev;
struct snd_soc_codec *codec;
@@ -64,6 +100,72 @@ struct wm2200_priv {
int sysclk;
};
+#define WM2200_DSP_RANGE_BASE (WM2200_MAX_REGISTER + 1)
+#define WM2200_DSP_SPACING 12288
+
+#define WM2200_DSP1_DM_BASE (WM2200_DSP_RANGE_BASE + (0 * WM2200_DSP_SPACING))
+#define WM2200_DSP1_PM_BASE (WM2200_DSP_RANGE_BASE + (1 * WM2200_DSP_SPACING))
+#define WM2200_DSP1_ZM_BASE (WM2200_DSP_RANGE_BASE + (2 * WM2200_DSP_SPACING))
+#define WM2200_DSP2_DM_BASE (WM2200_DSP_RANGE_BASE + (3 * WM2200_DSP_SPACING))
+#define WM2200_DSP2_PM_BASE (WM2200_DSP_RANGE_BASE + (4 * WM2200_DSP_SPACING))
+#define WM2200_DSP2_ZM_BASE (WM2200_DSP_RANGE_BASE + (5 * WM2200_DSP_SPACING))
+
+static const struct regmap_range_cfg wm2200_ranges[] = {
+ { .name = "DSP1DM", .range_min = WM2200_DSP1_DM_BASE,
+ .range_max = WM2200_DSP1_DM_BASE + 12287,
+ .selector_reg = WM2200_DSP1_CONTROL_3,
+ .selector_mask = WM2200_DSP1_PAGE_BASE_DM_0_MASK,
+ .selector_shift = WM2200_DSP1_PAGE_BASE_DM_0_SHIFT,
+ .window_start = WM2200_DSP1_DM_0, .window_len = 2048, },
+
+ { .name = "DSP1PM", .range_min = WM2200_DSP1_PM_BASE,
+ .range_max = WM2200_DSP1_PM_BASE + 12287,
+ .selector_reg = WM2200_DSP1_CONTROL_2,
+ .selector_mask = WM2200_DSP1_PAGE_BASE_PM_0_MASK,
+ .selector_shift = WM2200_DSP1_PAGE_BASE_PM_0_SHIFT,
+ .window_start = WM2200_DSP1_PM_0, .window_len = 768, },
+
+ { .name = "DSP1ZM", .range_min = WM2200_DSP1_ZM_BASE,
+ .range_max = WM2200_DSP1_ZM_BASE + 2047,
+ .selector_reg = WM2200_DSP1_CONTROL_4,
+ .selector_mask = WM2200_DSP1_PAGE_BASE_ZM_0_MASK,
+ .selector_shift = WM2200_DSP1_PAGE_BASE_ZM_0_SHIFT,
+ .window_start = WM2200_DSP1_ZM_0, .window_len = 1024, },
+
+ { .name = "DSP2DM", .range_min = WM2200_DSP2_DM_BASE,
+ .range_max = WM2200_DSP2_DM_BASE + 4095,
+ .selector_reg = WM2200_DSP2_CONTROL_3,
+ .selector_mask = WM2200_DSP2_PAGE_BASE_DM_0_MASK,
+ .selector_shift = WM2200_DSP2_PAGE_BASE_DM_0_SHIFT,
+ .window_start = WM2200_DSP2_DM_0, .window_len = 2048, },
+
+ { .name = "DSP2PM", .range_min = WM2200_DSP2_PM_BASE,
+ .range_max = WM2200_DSP2_PM_BASE + 11287,
+ .selector_reg = WM2200_DSP2_CONTROL_2,
+ .selector_mask = WM2200_DSP2_PAGE_BASE_PM_0_MASK,
+ .selector_shift = WM2200_DSP2_PAGE_BASE_PM_0_SHIFT,
+ .window_start = WM2200_DSP2_PM_0, .window_len = 768, },
+
+ { .name = "DSP2ZM", .range_min = WM2200_DSP2_ZM_BASE,
+ .range_max = WM2200_DSP2_ZM_BASE + 2047,
+ .selector_reg = WM2200_DSP2_CONTROL_4,
+ .selector_mask = WM2200_DSP2_PAGE_BASE_ZM_0_MASK,
+ .selector_shift = WM2200_DSP2_PAGE_BASE_ZM_0_SHIFT,
+ .window_start = WM2200_DSP2_ZM_0, .window_len = 1024, },
+};
+
+static const struct wm_adsp_region wm2200_dsp1_regions[] = {
+ { .type = WMFW_ADSP1_PM, .base = WM2200_DSP1_PM_BASE },
+ { .type = WMFW_ADSP1_DM, .base = WM2200_DSP1_DM_BASE },
+ { .type = WMFW_ADSP1_ZM, .base = WM2200_DSP1_ZM_BASE },
+};
+
+static const struct wm_adsp_region wm2200_dsp2_regions[] = {
+ { .type = WMFW_ADSP1_PM, .base = WM2200_DSP2_PM_BASE },
+ { .type = WMFW_ADSP1_DM, .base = WM2200_DSP2_DM_BASE },
+ { .type = WMFW_ADSP1_ZM, .base = WM2200_DSP2_ZM_BASE },
+};
+
static struct reg_default wm2200_reg_defaults[] = {
{ 0x000B, 0x0000 }, /* R11 - Tone Generator 1 */
{ 0x0102, 0x0000 }, /* R258 - Clocking 3 */
@@ -407,6 +509,16 @@ static struct reg_default wm2200_reg_defaults[] = {
static bool wm2200_volatile_register(struct device *dev, unsigned int reg)
{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm2200_ranges); i++)
+ if ((reg >= wm2200_ranges[i].window_start &&
+ reg <= wm2200_ranges[i].window_start +
+ wm2200_ranges[i].window_len) ||
+ (reg >= wm2200_ranges[i].range_min &&
+ reg <= wm2200_ranges[i].range_max))
+ return true;
+
switch (reg) {
case WM2200_SOFTWARE_RESET:
case WM2200_DEVICE_REVISION:
@@ -423,6 +535,16 @@ static bool wm2200_volatile_register(struct device *dev, unsigned int reg)
static bool wm2200_readable_register(struct device *dev, unsigned int reg)
{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm2200_ranges); i++)
+ if ((reg >= wm2200_ranges[i].window_start &&
+ reg <= wm2200_ranges[i].window_start +
+ wm2200_ranges[i].window_len) ||
+ (reg >= wm2200_ranges[i].range_min &&
+ reg <= wm2200_ranges[i].range_max))
+ return true;
+
switch (reg) {
case WM2200_SOFTWARE_RESET:
case WM2200_DEVICE_REVISION:
@@ -880,7 +1002,7 @@ static DECLARE_TLV_DB_SCALE(out_tlv, -6400, 100, 0);
static const char *wm2200_mixer_texts[] = {
"None",
"Tone Generator",
- "AEC loopback",
+ "AEC Loopback",
"IN1L",
"IN1R",
"IN2L",
@@ -976,6 +1098,20 @@ static int wm2200_mixer_values[] = {
static WM2200_MUX_CTL_DECL(name##_in3); \
static WM2200_MUX_CTL_DECL(name##_in4)
+#define WM2200_DSP_ENUMS(name, base_reg) \
+ static WM2200_MUX_ENUM_DECL(name##_aux1_enum, base_reg); \
+ static WM2200_MUX_ENUM_DECL(name##_aux2_enum, base_reg + 1); \
+ static WM2200_MUX_ENUM_DECL(name##_aux3_enum, base_reg + 2); \
+ static WM2200_MUX_ENUM_DECL(name##_aux4_enum, base_reg + 3); \
+ static WM2200_MUX_ENUM_DECL(name##_aux5_enum, base_reg + 4); \
+ static WM2200_MUX_ENUM_DECL(name##_aux6_enum, base_reg + 5); \
+ static WM2200_MUX_CTL_DECL(name##_aux1); \
+ static WM2200_MUX_CTL_DECL(name##_aux2); \
+ static WM2200_MUX_CTL_DECL(name##_aux3); \
+ static WM2200_MUX_CTL_DECL(name##_aux4); \
+ static WM2200_MUX_CTL_DECL(name##_aux5); \
+ static WM2200_MUX_CTL_DECL(name##_aux6);
+
static const struct snd_kcontrol_new wm2200_snd_controls[] = {
SOC_SINGLE("IN1 High Performance Switch", WM2200_IN1L_CONTROL,
WM2200_IN1_OSR_SHIFT, 1, 0),
@@ -1051,6 +1187,9 @@ WM2200_MIXER_ENUMS(DSP1R, WM2200_DSP1RMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(DSP2L, WM2200_DSP2LMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(DSP2R, WM2200_DSP2RMIX_INPUT_1_SOURCE);
+WM2200_DSP_ENUMS(DSP1, WM2200_DSP1AUX1MIX_INPUT_1_SOURCE);
+WM2200_DSP_ENUMS(DSP2, WM2200_DSP2AUX1MIX_INPUT_1_SOURCE);
+
WM2200_MIXER_ENUMS(LHPF1, WM2200_LHPF1MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(LHPF2, WM2200_LHPF2MIX_INPUT_1_SOURCE);
@@ -1064,8 +1203,19 @@ WM2200_MIXER_ENUMS(LHPF2, WM2200_LHPF2MIX_INPUT_1_SOURCE);
WM2200_MUX(name_str " Input 4", &name##_in4_mux), \
SND_SOC_DAPM_MIXER(name_str " Mixer", SND_SOC_NOPM, 0, 0, NULL, 0)
+#define WM2200_DSP_WIDGETS(name, name_str) \
+ WM2200_MIXER_WIDGETS(name##L, name_str "L"), \
+ WM2200_MIXER_WIDGETS(name##R, name_str "R"), \
+ WM2200_MUX(name_str " Aux 1", &name##_aux1_mux), \
+ WM2200_MUX(name_str " Aux 2", &name##_aux2_mux), \
+ WM2200_MUX(name_str " Aux 3", &name##_aux3_mux), \
+ WM2200_MUX(name_str " Aux 4", &name##_aux4_mux), \
+ WM2200_MUX(name_str " Aux 5", &name##_aux5_mux), \
+ WM2200_MUX(name_str " Aux 6", &name##_aux6_mux)
+
#define WM2200_MIXER_INPUT_ROUTES(name) \
{ name, "Tone Generator", "Tone Generator" }, \
+ { name, "AEC Loopback", "AEC Loopback" }, \
{ name, "IN1L", "IN1L PGA" }, \
{ name, "IN1R", "IN1R PGA" }, \
{ name, "IN2L", "IN2L PGA" }, \
@@ -1106,6 +1256,33 @@ WM2200_MIXER_ENUMS(LHPF2, WM2200_LHPF2MIX_INPUT_1_SOURCE);
WM2200_MIXER_INPUT_ROUTES(name " Input 3"), \
WM2200_MIXER_INPUT_ROUTES(name " Input 4")
+#define WM2200_DSP_AUX_ROUTES(name) \
+ { name, NULL, name " Aux 1" }, \
+ { name, NULL, name " Aux 2" }, \
+ { name, NULL, name " Aux 3" }, \
+ { name, NULL, name " Aux 4" }, \
+ { name, NULL, name " Aux 5" }, \
+ { name, NULL, name " Aux 6" }, \
+ WM2200_MIXER_INPUT_ROUTES(name " Aux 1"), \
+ WM2200_MIXER_INPUT_ROUTES(name " Aux 2"), \
+ WM2200_MIXER_INPUT_ROUTES(name " Aux 3"), \
+ WM2200_MIXER_INPUT_ROUTES(name " Aux 4"), \
+ WM2200_MIXER_INPUT_ROUTES(name " Aux 5"), \
+ WM2200_MIXER_INPUT_ROUTES(name " Aux 6")
+
+static const char *wm2200_aec_loopback_texts[] = {
+ "OUT1L", "OUT1R", "OUT2L", "OUT2R",
+};
+
+static const struct soc_enum wm2200_aec_loopback =
+ SOC_ENUM_SINGLE(WM2200_DAC_AEC_CONTROL_1,
+ WM2200_AEC_LOOPBACK_SRC_SHIFT,
+ ARRAY_SIZE(wm2200_aec_loopback_texts),
+ wm2200_aec_loopback_texts);
+
+static const struct snd_kcontrol_new wm2200_aec_loopback_mux =
+ SOC_DAPM_ENUM("AEC Loopback", wm2200_aec_loopback);
+
static const struct snd_soc_dapm_widget wm2200_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", WM2200_CLOCKING_3, WM2200_SYSCLK_ENA_SHIFT, 0,
NULL, 0),
@@ -1165,8 +1342,8 @@ SND_SOC_DAPM_PGA("LHPF1", WM2200_HPLPF1_1, WM2200_LHPF1_ENA_SHIFT, 0,
SND_SOC_DAPM_PGA("LHPF2", WM2200_HPLPF2_1, WM2200_LHPF2_ENA_SHIFT, 0,
NULL, 0),
-SND_SOC_DAPM_PGA_E("DSP1", SND_SOC_NOPM, 0, 0, NULL, 0, NULL, 0),
-SND_SOC_DAPM_PGA_E("DSP2", SND_SOC_NOPM, 1, 0, NULL, 0, NULL, 0),
+WM_ADSP1("DSP1", 0),
+WM_ADSP1("DSP2", 1),
SND_SOC_DAPM_AIF_OUT("AIF1TX1", "Capture", 0,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX1_ENA_SHIFT, 0),
@@ -1181,6 +1358,9 @@ SND_SOC_DAPM_AIF_OUT("AIF1TX5", "Capture", 4,
SND_SOC_DAPM_AIF_OUT("AIF1TX6", "Capture", 5,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_MUX("AEC Loopback", WM2200_DAC_AEC_CONTROL_1,
+ WM2200_AEC_LOOPBACK_ENA_SHIFT, 0, &wm2200_aec_loopback_mux),
+
SND_SOC_DAPM_PGA_S("OUT1L", 0, WM2200_OUTPUT_ENABLES,
WM2200_OUT1L_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("OUT1R", 0, WM2200_OUTPUT_ENABLES,
@@ -1231,10 +1411,8 @@ WM2200_MIXER_WIDGETS(EQR, "EQR"),
WM2200_MIXER_WIDGETS(LHPF1, "LHPF1"),
WM2200_MIXER_WIDGETS(LHPF2, "LHPF2"),
-WM2200_MIXER_WIDGETS(DSP1L, "DSP1L"),
-WM2200_MIXER_WIDGETS(DSP1R, "DSP1R"),
-WM2200_MIXER_WIDGETS(DSP2L, "DSP2L"),
-WM2200_MIXER_WIDGETS(DSP2R, "DSP2R"),
+WM2200_DSP_WIDGETS(DSP1, "DSP1"),
+WM2200_DSP_WIDGETS(DSP2, "DSP2"),
WM2200_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"),
WM2200_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"),
@@ -1326,11 +1504,19 @@ static const struct snd_soc_dapm_route wm2200_dapm_routes[] = {
{ "SPK", NULL, "OUT2L" },
{ "SPK", NULL, "OUT2R" },
+ { "AEC Loopback", "OUT1L", "OUT1L" },
+ { "AEC Loopback", "OUT1R", "OUT1R" },
+ { "AEC Loopback", "OUT2L", "OUT2L" },
+ { "AEC Loopback", "OUT2R", "OUT2R" },
+
WM2200_MIXER_ROUTES("DSP1", "DSP1L"),
WM2200_MIXER_ROUTES("DSP1", "DSP1R"),
WM2200_MIXER_ROUTES("DSP2", "DSP2L"),
WM2200_MIXER_ROUTES("DSP2", "DSP2R"),
+ WM2200_DSP_AUX_ROUTES("DSP1"),
+ WM2200_DSP_AUX_ROUTES("DSP2"),
+
WM2200_MIXER_ROUTES("OUT1L", "OUT1L"),
WM2200_MIXER_ROUTES("OUT1R", "OUT1R"),
WM2200_MIXER_ROUTES("OUT2L", "OUT2L"),
@@ -1968,12 +2154,15 @@ static const struct regmap_config wm2200_regmap = {
.reg_bits = 16,
.val_bits = 16,
- .max_register = WM2200_MAX_REGISTER,
+ .max_register = WM2200_MAX_REGISTER + (ARRAY_SIZE(wm2200_ranges) *
+ WM2200_DSP_SPACING),
.reg_defaults = wm2200_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm2200_reg_defaults),
.volatile_reg = wm2200_volatile_register,
.readable_reg = wm2200_readable_register,
.cache_type = REGCACHE_RBTREE,
+ .ranges = wm2200_ranges,
+ .num_ranges = ARRAY_SIZE(wm2200_ranges),
};
static const unsigned int wm2200_dig_vu[] = {
@@ -1995,8 +2184,8 @@ static const unsigned int wm2200_mic_ctrl_reg[] = {
WM2200_IN3L_CONTROL,
};
-static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm2200_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm2200_pdata *pdata = dev_get_platdata(&i2c->dev);
struct wm2200_priv *wm2200;
@@ -2011,14 +2200,30 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
wm2200->dev = &i2c->dev;
init_completion(&wm2200->fll_lock);
- wm2200->regmap = regmap_init_i2c(i2c, &wm2200_regmap);
+ wm2200->regmap = devm_regmap_init_i2c(i2c, &wm2200_regmap);
if (IS_ERR(wm2200->regmap)) {
ret = PTR_ERR(wm2200->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
+ }
+
+ for (i = 0; i < 2; i++) {
+ wm2200->dsp[i].type = WMFW_ADSP1;
+ wm2200->dsp[i].part = "wm2200";
+ wm2200->dsp[i].num = i + 1;
+ wm2200->dsp[i].dev = &i2c->dev;
+ wm2200->dsp[i].regmap = wm2200->regmap;
}
+ wm2200->dsp[0].base = WM2200_DSP1_CONTROL_1;
+ wm2200->dsp[0].mem = wm2200_dsp1_regions;
+ wm2200->dsp[0].num_mems = ARRAY_SIZE(wm2200_dsp1_regions);
+
+ wm2200->dsp[1].base = WM2200_DSP2_CONTROL_1;
+ wm2200->dsp[1].mem = wm2200_dsp2_regions;
+ wm2200->dsp[1].num_mems = ARRAY_SIZE(wm2200_dsp2_regions);
+
if (pdata)
wm2200->pdata = *pdata;
@@ -2027,12 +2232,13 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
for (i = 0; i < ARRAY_SIZE(wm2200->core_supplies); i++)
wm2200->core_supplies[i].supply = wm2200_core_supply_names[i];
- ret = regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm2200->core_supplies),
- wm2200->core_supplies);
+ ret = devm_regulator_bulk_get(&i2c->dev,
+ ARRAY_SIZE(wm2200->core_supplies),
+ wm2200->core_supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request core supplies: %d\n",
ret);
- goto err_regmap;
+ return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm2200->core_supplies),
@@ -2040,12 +2246,13 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
if (ret != 0) {
dev_err(&i2c->dev, "Failed to enable core supplies: %d\n",
ret);
- goto err_core;
+ return ret;
}
if (wm2200->pdata.ldo_ena) {
- ret = gpio_request_one(wm2200->pdata.ldo_ena,
- GPIOF_OUT_INIT_HIGH, "WM2200 LDOENA");
+ ret = devm_gpio_request_one(&i2c->dev, wm2200->pdata.ldo_ena,
+ GPIOF_OUT_INIT_HIGH,
+ "WM2200 LDOENA");
if (ret < 0) {
dev_err(&i2c->dev, "Failed to request LDOENA %d: %d\n",
wm2200->pdata.ldo_ena, ret);
@@ -2055,8 +2262,9 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
}
if (wm2200->pdata.reset) {
- ret = gpio_request_one(wm2200->pdata.reset,
- GPIOF_OUT_INIT_HIGH, "WM2200 /RESET");
+ ret = devm_gpio_request_one(&i2c->dev, wm2200->pdata.reset,
+ GPIOF_OUT_INIT_HIGH,
+ "WM2200 /RESET");
if (ret < 0) {
dev_err(&i2c->dev, "Failed to request /RESET %d: %d\n",
wm2200->pdata.reset, ret);
@@ -2166,45 +2374,28 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
err_pm_runtime:
pm_runtime_disable(&i2c->dev);
err_reset:
- if (wm2200->pdata.reset) {
+ if (wm2200->pdata.reset)
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
- gpio_free(wm2200->pdata.reset);
- }
err_ldo:
- if (wm2200->pdata.ldo_ena) {
+ if (wm2200->pdata.ldo_ena)
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
- gpio_free(wm2200->pdata.ldo_ena);
- }
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
-err_core:
- regulator_bulk_free(ARRAY_SIZE(wm2200->core_supplies),
- wm2200->core_supplies);
-err_regmap:
- regmap_exit(wm2200->regmap);
-err:
return ret;
}
-static __devexit int wm2200_i2c_remove(struct i2c_client *i2c)
+static int wm2200_i2c_remove(struct i2c_client *i2c)
{
struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c);
snd_soc_unregister_codec(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm2200);
- if (wm2200->pdata.reset) {
+ if (wm2200->pdata.reset)
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
- gpio_free(wm2200->pdata.reset);
- }
- if (wm2200->pdata.ldo_ena) {
+ if (wm2200->pdata.ldo_ena)
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
- gpio_free(wm2200->pdata.ldo_ena);
- }
- regulator_bulk_free(ARRAY_SIZE(wm2200->core_supplies),
- wm2200->core_supplies);
- regmap_exit(wm2200->regmap);
return 0;
}
@@ -2267,7 +2458,7 @@ static struct i2c_driver wm2200_i2c_driver = {
.pm = &wm2200_pm,
},
.probe = wm2200_i2c_probe,
- .remove = __devexit_p(wm2200_i2c_remove),
+ .remove = wm2200_i2c_remove,
.id_table = wm2200_i2c_id,
};
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 7f567585832..5a5f3693623 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1233,7 +1233,7 @@ static const struct snd_soc_dapm_route wm5100_dapm_routes[] = {
{ "PWM2", NULL, "PWM2 Driver" },
};
-static const __devinitconst struct reg_default wm5100_reva_patches[] = {
+static const struct reg_default wm5100_reva_patches[] = {
{ WM5100_AUDIO_IF_1_10, 0 },
{ WM5100_AUDIO_IF_1_11, 1 },
{ WM5100_AUDIO_IF_1_12, 2 },
@@ -2414,8 +2414,8 @@ static const unsigned int wm5100_mic_ctrl_reg[] = {
WM5100_IN4L_CONTROL,
};
-static __devinit int wm5100_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm5100_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm5100_pdata *pdata = dev_get_platdata(&i2c->dev);
struct wm5100_priv *wm5100;
@@ -2639,7 +2639,7 @@ err:
return ret;
}
-static __devexit int wm5100_i2c_remove(struct i2c_client *i2c)
+static int wm5100_i2c_remove(struct i2c_client *i2c)
{
struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
@@ -2717,7 +2717,7 @@ static struct i2c_driver wm5100_i2c_driver = {
.pm = &wm5100_pm,
},
.probe = wm5100_i2c_probe,
- .remove = __devexit_p(wm5100_i2c_remove),
+ .remove = wm5100_i2c_remove,
.id_table = wm5100_i2c_id,
};
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 7394e73fa43..688ade08058 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -31,6 +31,7 @@
#include "arizona.h"
#include "wm5102.h"
+#include "wm_adsp.h"
struct wm5102_priv {
struct arizona_priv core;
@@ -42,6 +43,13 @@ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+static const struct wm_adsp_region wm5102_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x100000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x180000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x190000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x1a8000 },
+};
+
static const struct reg_default wm5102_sysclk_reva_patch[] = {
{ 0x3000, 0x2225 },
{ 0x3001, 0x3a03 },
@@ -627,11 +635,23 @@ SOC_DOUBLE_R_TLV("IN3 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_3L,
ARIZONA_ADC_DIGITAL_VOLUME_3R, ARIZONA_IN3L_DIG_VOL_SHIFT,
0xbf, 0, digital_tlv),
+SOC_ENUM("Input Ramp Up", arizona_in_vi_ramp),
+SOC_ENUM("Input Ramp Down", arizona_in_vd_ramp),
+
ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
+SND_SOC_BYTES_MASK("EQ1 Coefficeints", ARIZONA_EQ1_1, 21,
+ ARIZONA_EQ1_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ2 Coefficeints", ARIZONA_EQ2_1, 21,
+ ARIZONA_EQ2_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ3 Coefficeints", ARIZONA_EQ3_1, 21,
+ ARIZONA_EQ3_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ4 Coefficeints", ARIZONA_EQ4_1, 21,
+ ARIZONA_EQ4_ENA_MASK),
+
SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -687,6 +707,14 @@ ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
+SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
+SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
+SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
+SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+
+ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE),
+
SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
@@ -708,14 +736,6 @@ ARIZONA_MIXER_CONTROLS("SPKOUTR", ARIZONA_OUT4RMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
-SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L,
- ARIZONA_OUT1_OSR_SHIFT, 1, 0),
-SOC_SINGLE("OUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L,
- ARIZONA_OUT2_OSR_SHIFT, 1, 0),
-SOC_SINGLE("EPOUT High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L,
- ARIZONA_OUT3_OSR_SHIFT, 1, 0),
-SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
- ARIZONA_OUT4_OSR_SHIFT, 1, 0),
SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
ARIZONA_OUT5_OSR_SHIFT, 1, 0),
@@ -745,16 +765,8 @@ SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L,
ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT,
0xbf, 0, digital_tlv),
-SOC_DOUBLE_R_RANGE_TLV("HPOUT1 Volume", ARIZONA_OUTPUT_PATH_CONFIG_1L,
- ARIZONA_OUTPUT_PATH_CONFIG_1R,
- ARIZONA_OUT1L_PGA_VOL_SHIFT,
- 0x34, 0x40, 0, ana_tlv),
-SOC_DOUBLE_R_RANGE_TLV("OUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L,
- ARIZONA_OUTPUT_PATH_CONFIG_2R,
- ARIZONA_OUT2L_PGA_VOL_SHIFT,
- 0x34, 0x40, 0, ana_tlv),
-SOC_SINGLE_RANGE_TLV("EPOUT Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L,
- ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv),
+SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp),
+SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp),
SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
@@ -819,11 +831,15 @@ ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(AIF3TX1, ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(AIF3TX2, ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(DSP1L, ARIZONA_DSP1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DSP1R, ARIZONA_DSP1RMIX_INPUT_1_SOURCE);
+ARIZONA_DSP_AUX_ENUMS(DSP1, ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE);
static const char *wm5102_aec_loopback_texts[] = {
"HPOUT1L", "HPOUT1R", "HPOUT2L", "HPOUT2R", "EPOUT",
@@ -864,6 +880,7 @@ SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDR", 0, 0),
SND_SOC_DAPM_SIGGEN("TONE"),
SND_SOC_DAPM_SIGGEN("NOISE"),
+SND_SOC_DAPM_SIGGEN("HAPTICS"),
SND_SOC_DAPM_INPUT("IN1L"),
SND_SOC_DAPM_INPUT("IN1R"),
@@ -894,9 +911,9 @@ SND_SOC_DAPM_PGA_E("IN3R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3R_ENA_SHIFT,
SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1,
ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2,
- ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_MICB2_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3,
- ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+ ARIZONA_MICB3_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR,
ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0),
@@ -996,6 +1013,8 @@ SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0,
SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
+ARIZONA_DSP_WIDGETS(DSP1, "DSP1"),
+
SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
ARIZONA_AEC_LOOPBACK_ENA, 0, &wm5102_aec_loopback_mux),
@@ -1071,10 +1090,12 @@ ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"),
ARIZONA_MIXER_WIDGETS(AIF3TX1, "AIF3TX1"),
ARIZONA_MIXER_WIDGETS(AIF3TX2, "AIF3TX2"),
-ARIZONA_MIXER_WIDGETS(ASRC1L, "ASRC1L"),
-ARIZONA_MIXER_WIDGETS(ASRC1R, "ASRC1R"),
-ARIZONA_MIXER_WIDGETS(ASRC2L, "ASRC2L"),
-ARIZONA_MIXER_WIDGETS(ASRC2R, "ASRC2R"),
+ARIZONA_MUX_WIDGETS(ASRC1L, "ASRC1L"),
+ARIZONA_MUX_WIDGETS(ASRC1R, "ASRC1R"),
+ARIZONA_MUX_WIDGETS(ASRC2L, "ASRC2L"),
+ARIZONA_MUX_WIDGETS(ASRC2R, "ASRC2R"),
+
+WM_ADSP2("DSP1", 0),
SND_SOC_DAPM_OUTPUT("HPOUT1L"),
SND_SOC_DAPM_OUTPUT("HPOUT1R"),
@@ -1094,6 +1115,7 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
{ name, "Noise Generator", "Noise Generator" }, \
{ name, "Tone Generator 1", "Tone Generator 1" }, \
{ name, "Tone Generator 2", "Tone Generator 2" }, \
+ { name, "Haptics", "HAPTICS" }, \
{ name, "AEC", "AEC Loopback" }, \
{ name, "IN1L", "IN1L PGA" }, \
{ name, "IN1R", "IN1R PGA" }, \
@@ -1127,7 +1149,13 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
{ name, "ASRC1L", "ASRC1L" }, \
{ name, "ASRC1R", "ASRC1R" }, \
{ name, "ASRC2L", "ASRC2L" }, \
- { name, "ASRC2R", "ASRC2R" }
+ { name, "ASRC2R", "ASRC2R" }, \
+ { name, "DSP1.1", "DSP1" }, \
+ { name, "DSP1.2", "DSP1" }, \
+ { name, "DSP1.3", "DSP1" }, \
+ { name, "DSP1.4", "DSP1" }, \
+ { name, "DSP1.5", "DSP1" }, \
+ { name, "DSP1.6", "DSP1" }
static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "AIF2 Capture", NULL, "DBVDD2" },
@@ -1213,6 +1241,11 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "IN3L PGA", NULL, "IN3L" },
{ "IN3R PGA", NULL, "IN3R" },
+ { "ASRC1L", NULL, "ASRC1L Input" },
+ { "ASRC1R", NULL, "ASRC1R Input" },
+ { "ASRC2L", NULL, "ASRC2L Input" },
+ { "ASRC2R", NULL, "ASRC2R Input" },
+
ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
@@ -1255,10 +1288,12 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
- ARIZONA_MIXER_ROUTES("ASRC1L", "ASRC1L"),
- ARIZONA_MIXER_ROUTES("ASRC1R", "ASRC1R"),
- ARIZONA_MIXER_ROUTES("ASRC2L", "ASRC2L"),
- ARIZONA_MIXER_ROUTES("ASRC2R", "ASRC2R"),
+ ARIZONA_MUX_ROUTES("ASRC1L"),
+ ARIZONA_MUX_ROUTES("ASRC1R"),
+ ARIZONA_MUX_ROUTES("ASRC2L"),
+ ARIZONA_MUX_ROUTES("ASRC2R"),
+
+ ARIZONA_DSP_ROUTES("DSP1"),
{ "AEC Loopback", "HPOUT1L", "OUT1L" },
{ "AEC Loopback", "HPOUT1R", "OUT1R" },
@@ -1377,9 +1412,28 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
static int wm5102_codec_probe(struct snd_soc_codec *codec)
{
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
codec->control_data = priv->core.arizona->regmap;
- return snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+
+ ret = snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+ if (ret != 0)
+ return ret;
+
+ snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
+
+ priv->core.arizona->dapm = &codec->dapm;
+
+ return 0;
+}
+
+static int wm5102_codec_remove(struct snd_soc_codec *codec)
+{
+ struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->core.arizona->dapm = NULL;
+
+ return 0;
}
#define WM5102_DIG_VU 0x0200
@@ -1406,6 +1460,7 @@ static unsigned int wm5102_digital_vu[] = {
static struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
.probe = wm5102_codec_probe,
+ .remove = wm5102_codec_remove,
.idle_bias_off = true,
@@ -1420,11 +1475,11 @@ static struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
.num_dapm_routes = ARRAY_SIZE(wm5102_dapm_routes),
};
-static int __devinit wm5102_probe(struct platform_device *pdev)
+static int wm5102_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct wm5102_priv *wm5102;
- int i;
+ int i, ret;
wm5102 = devm_kzalloc(&pdev->dev, sizeof(struct wm5102_priv),
GFP_KERNEL);
@@ -1434,6 +1489,19 @@ static int __devinit wm5102_probe(struct platform_device *pdev)
wm5102->core.arizona = arizona;
+ wm5102->core.adsp[0].part = "wm5102";
+ wm5102->core.adsp[0].num = 1;
+ wm5102->core.adsp[0].type = WMFW_ADSP2;
+ wm5102->core.adsp[0].base = ARIZONA_DSP1_CONTROL_1;
+ wm5102->core.adsp[0].dev = arizona->dev;
+ wm5102->core.adsp[0].regmap = arizona->regmap;
+ wm5102->core.adsp[0].mem = wm5102_dsp1_regions;
+ wm5102->core.adsp[0].num_mems = ARRAY_SIZE(wm5102_dsp1_regions);
+
+ ret = wm_adsp2_init(&wm5102->core.adsp[0], true);
+ if (ret != 0)
+ return ret;
+
for (i = 0; i < ARRAY_SIZE(wm5102->fll); i++)
wm5102->fll[i].vco_mult = 1;
@@ -1459,7 +1527,7 @@ static int __devinit wm5102_probe(struct platform_device *pdev)
wm5102_dai, ARRAY_SIZE(wm5102_dai));
}
-static int __devexit wm5102_remove(struct platform_device *pdev)
+static int wm5102_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1473,7 +1541,7 @@ static struct platform_driver wm5102_codec_driver = {
.owner = THIS_MODULE,
},
.probe = wm5102_probe,
- .remove = __devexit_p(wm5102_remove),
+ .remove = wm5102_remove,
};
module_platform_driver(wm5102_codec_driver);
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 9211e4192f7..ae80c8c2853 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -84,11 +84,23 @@ SOC_DOUBLE_R_TLV("IN4 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_4L,
ARIZONA_ADC_DIGITAL_VOLUME_4R, ARIZONA_IN4L_DIG_VOL_SHIFT,
0xbf, 0, digital_tlv),
+SOC_ENUM("Input Ramp Up", arizona_in_vi_ramp),
+SOC_ENUM("Input Ramp Down", arizona_in_vd_ramp),
+
ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
+SND_SOC_BYTES_MASK("EQ1 Coefficeints", ARIZONA_EQ1_1, 21,
+ ARIZONA_EQ1_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ2 Coefficeints", ARIZONA_EQ2_1, 21,
+ ARIZONA_EQ2_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ3 Coefficeints", ARIZONA_EQ3_1, 21,
+ ARIZONA_EQ3_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ4 Coefficeints", ARIZONA_EQ4_1, 21,
+ ARIZONA_EQ4_ENA_MASK),
+
SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -148,6 +160,11 @@ ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
+SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
+SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
+SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
+SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+
SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
@@ -243,6 +260,9 @@ SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
ARIZONA_SPK2R_MUTE_SHIFT, 1, 1),
+SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp),
+SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp),
+
ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE),
@@ -308,10 +328,10 @@ ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(AIF3TX1, ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(AIF3TX2, ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE);
static const char *wm5110_aec_loopback_texts[] = {
"HPOUT1L", "HPOUT1R", "HPOUT2L", "HPOUT2R", "HPOUT3L", "HPOUT3R",
@@ -352,6 +372,7 @@ SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDR", 0, 0),
SND_SOC_DAPM_SIGGEN("TONE"),
SND_SOC_DAPM_SIGGEN("NOISE"),
+SND_SOC_DAPM_SIGGEN("HAPTICS"),
SND_SOC_DAPM_INPUT("IN1L"),
SND_SOC_DAPM_INPUT("IN1R"),
@@ -585,10 +606,10 @@ ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"),
ARIZONA_MIXER_WIDGETS(AIF3TX1, "AIF3TX1"),
ARIZONA_MIXER_WIDGETS(AIF3TX2, "AIF3TX2"),
-ARIZONA_MIXER_WIDGETS(ASRC1L, "ASRC1L"),
-ARIZONA_MIXER_WIDGETS(ASRC1R, "ASRC1R"),
-ARIZONA_MIXER_WIDGETS(ASRC2L, "ASRC2L"),
-ARIZONA_MIXER_WIDGETS(ASRC2R, "ASRC2R"),
+ARIZONA_MUX_WIDGETS(ASRC1L, "ASRC1L"),
+ARIZONA_MUX_WIDGETS(ASRC1R, "ASRC1R"),
+ARIZONA_MUX_WIDGETS(ASRC2L, "ASRC2L"),
+ARIZONA_MUX_WIDGETS(ASRC2R, "ASRC2R"),
SND_SOC_DAPM_OUTPUT("HPOUT1L"),
SND_SOC_DAPM_OUTPUT("HPOUT1R"),
@@ -610,6 +631,7 @@ SND_SOC_DAPM_OUTPUT("SPKDAT2R"),
{ name, "Noise Generator", "Noise Generator" }, \
{ name, "Tone Generator 1", "Tone Generator 1" }, \
{ name, "Tone Generator 2", "Tone Generator 2" }, \
+ { name, "Haptics", "HAPTICS" }, \
{ name, "AEC", "AEC Loopback" }, \
{ name, "IN1L", "IN1L PGA" }, \
{ name, "IN1R", "IN1R PGA" }, \
@@ -786,10 +808,10 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
- ARIZONA_MIXER_ROUTES("ASRC1L", "ASRC1L"),
- ARIZONA_MIXER_ROUTES("ASRC1R", "ASRC1R"),
- ARIZONA_MIXER_ROUTES("ASRC2L", "ASRC2L"),
- ARIZONA_MIXER_ROUTES("ASRC2R", "ASRC2R"),
+ ARIZONA_MUX_ROUTES("ASRC1L"),
+ ARIZONA_MUX_ROUTES("ASRC1R"),
+ ARIZONA_MUX_ROUTES("ASRC2L"),
+ ARIZONA_MUX_ROUTES("ASRC2R"),
{ "HPOUT1L", NULL, "OUT1L" },
{ "HPOUT1R", NULL, "OUT1R" },
@@ -902,9 +924,29 @@ static struct snd_soc_dai_driver wm5110_dai[] = {
static int wm5110_codec_probe(struct snd_soc_codec *codec)
{
struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
codec->control_data = priv->core.arizona->regmap;
- return snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+ priv->core.arizona->dapm = &codec->dapm;
+
+ ret = snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+ if (ret != 0)
+ return ret;
+
+ snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
+
+ priv->core.arizona->dapm = &codec->dapm;
+
+ return 0;
+}
+
+static int wm5110_codec_remove(struct snd_soc_codec *codec)
+{
+ struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->core.arizona->dapm = NULL;
+
+ return 0;
}
#define WM5110_DIG_VU 0x0200
@@ -935,6 +977,7 @@ static unsigned int wm5110_digital_vu[] = {
static struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
.probe = wm5110_codec_probe,
+ .remove = wm5110_codec_remove,
.idle_bias_off = true,
@@ -949,7 +992,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
.num_dapm_routes = ARRAY_SIZE(wm5110_dapm_routes),
};
-static int __devinit wm5110_probe(struct platform_device *pdev)
+static int wm5110_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct wm5110_priv *wm5110;
@@ -988,7 +1031,7 @@ static int __devinit wm5110_probe(struct platform_device *pdev)
wm5110_dai, ARRAY_SIZE(wm5110_dai));
}
-static int __devexit wm5110_remove(struct platform_device *pdev)
+static int wm5110_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1002,7 +1045,7 @@ static struct platform_driver wm5110_codec_driver = {
.owner = THIS_MODULE,
},
.probe = wm5110_probe,
- .remove = __devexit_p(wm5110_remove),
+ .remove = wm5110_remove,
};
module_platform_driver(wm5110_codec_driver);
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index a4cae060bf2..fb92fb47d63 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1500,7 +1500,7 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
for (i = 0; i < ARRAY_SIZE(supply_names); i++)
priv->supplies[i].supply = supply_names[i];
- ret = regulator_bulk_get(wm8350->dev, ARRAY_SIZE(priv->supplies),
+ ret = devm_regulator_bulk_get(wm8350->dev, ARRAY_SIZE(priv->supplies),
priv->supplies);
if (ret != 0)
return ret;
@@ -1607,8 +1607,6 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec)
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
- regulator_bulk_free(ARRAY_SIZE(priv->supplies), priv->supplies);
-
return 0;
}
@@ -1627,13 +1625,13 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
.num_dapm_routes = ARRAY_SIZE(wm8350_dapm_routes),
};
-static int __devinit wm8350_probe(struct platform_device *pdev)
+static int wm8350_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8350,
&wm8350_dai, 1);
}
-static int __devexit wm8350_remove(struct platform_device *pdev)
+static int wm8350_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -1645,7 +1643,7 @@ static struct platform_driver wm8350_codec_driver = {
.owner = THIS_MODULE,
},
.probe = wm8350_probe,
- .remove = __devexit_p(wm8350_remove),
+ .remove = wm8350_remove,
};
module_platform_driver(wm8350_codec_driver);
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 5d277a915f8..af6d227e67b 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1373,7 +1373,7 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
codec->control_data = priv->wm8400 = wm8400;
priv->codec = codec;
- ret = regulator_bulk_get(wm8400->dev,
+ ret = devm_regulator_bulk_get(wm8400->dev,
ARRAY_SIZE(power), &power[0]);
if (ret != 0) {
dev_err(codec->dev, "Failed to get regulators: %d\n", ret);
@@ -1398,15 +1398,9 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
snd_soc_write(codec, WM8400_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8));
snd_soc_write(codec, WM8400_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8));
- if (!schedule_work(&priv->work)) {
- ret = -EINVAL;
- goto err_regulator;
- }
+ if (!schedule_work(&priv->work))
+ return -EINVAL;
return 0;
-
-err_regulator:
- regulator_bulk_free(ARRAY_SIZE(power), power);
- return ret;
}
static int wm8400_codec_remove(struct snd_soc_codec *codec)
@@ -1417,8 +1411,6 @@ static int wm8400_codec_remove(struct snd_soc_codec *codec)
snd_soc_write(codec, WM8400_POWER_MANAGEMENT_1,
reg & (~WM8400_CODEC_ENA));
- regulator_bulk_free(ARRAY_SIZE(power), power);
-
return 0;
}
@@ -1439,13 +1431,13 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
.num_dapm_routes = ARRAY_SIZE(wm8400_dapm_routes),
};
-static int __devinit wm8400_probe(struct platform_device *pdev)
+static int wm8400_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8400,
&wm8400_dai, 1);
}
-static int __devexit wm8400_remove(struct platform_device *pdev)
+static int wm8400_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -1457,7 +1449,7 @@ static struct platform_driver wm8400_codec_driver = {
.owner = THIS_MODULE,
},
.probe = wm8400_probe,
- .remove = __devexit_p(wm8400_remove),
+ .remove = wm8400_remove,
};
module_platform_driver(wm8400_codec_driver);
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index c12a54e72e8..6ed5433943e 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -608,10 +608,7 @@ static int wm8510_probe(struct snd_soc_codec *codec)
/* power down chip */
static int wm8510_remove(struct snd_soc_codec *codec)
{
- struct wm8510_priv *wm8510 = snd_soc_codec_get_drvdata(codec);
-
wm8510_set_bias_level(codec, SND_SOC_BIAS_OFF);
- kfree(wm8510);
return 0;
}
@@ -648,7 +645,7 @@ static const struct regmap_config wm8510_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8510_spi_probe(struct spi_device *spi)
+static int wm8510_spi_probe(struct spi_device *spi)
{
struct wm8510_priv *wm8510;
int ret;
@@ -670,7 +667,7 @@ static int __devinit wm8510_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8510_spi_remove(struct spi_device *spi)
+static int wm8510_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -683,13 +680,13 @@ static struct spi_driver wm8510_spi_driver = {
.of_match_table = wm8510_of_match,
},
.probe = wm8510_spi_probe,
- .remove = __devexit_p(wm8510_spi_remove),
+ .remove = wm8510_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8510_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8510_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8510_priv *wm8510;
int ret;
@@ -711,7 +708,7 @@ static __devinit int wm8510_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8510_i2c_remove(struct i2c_client *client)
+static int wm8510_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -730,7 +727,7 @@ static struct i2c_driver wm8510_i2c_driver = {
.of_match_table = wm8510_of_match,
},
.probe = wm8510_i2c_probe,
- .remove = __devexit_p(wm8510_i2c_remove),
+ .remove = wm8510_i2c_remove,
.id_table = wm8510_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 8d5c2767350..139bf9ac940 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -453,8 +453,8 @@ static const struct regmap_config wm8523_regmap = {
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8523_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8523_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8523_priv *wm8523;
unsigned int val;
@@ -528,7 +528,7 @@ err_enable:
return ret;
}
-static __devexit int wm8523_i2c_remove(struct i2c_client *client)
+static int wm8523_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -547,7 +547,7 @@ static struct i2c_driver wm8523_i2c_driver = {
.of_match_table = wm8523_of_match,
},
.probe = wm8523_i2c_probe,
- .remove = __devexit_p(wm8523_i2c_remove),
+ .remove = wm8523_i2c_remove,
.id_table = wm8523_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 8b8bb70f1eb..5b428b060d4 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -429,7 +429,7 @@ static const struct regmap_config wm8711_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8711_spi_probe(struct spi_device *spi)
+static int wm8711_spi_probe(struct spi_device *spi)
{
struct wm8711_priv *wm8711;
int ret;
@@ -451,7 +451,7 @@ static int __devinit wm8711_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8711_spi_remove(struct spi_device *spi)
+static int wm8711_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
@@ -465,13 +465,13 @@ static struct spi_driver wm8711_spi_driver = {
.of_match_table = wm8711_of_match,
},
.probe = wm8711_spi_probe,
- .remove = __devexit_p(wm8711_spi_remove),
+ .remove = wm8711_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8711_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int wm8711_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct wm8711_priv *wm8711;
int ret;
@@ -493,7 +493,7 @@ static __devinit int wm8711_i2c_probe(struct i2c_client *client,
return ret;
}
-static __devexit int wm8711_i2c_remove(struct i2c_client *client)
+static int wm8711_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -512,7 +512,7 @@ static struct i2c_driver wm8711_i2c_driver = {
.of_match_table = wm8711_of_match,
},
.probe = wm8711_i2c_probe,
- .remove = __devexit_p(wm8711_i2c_remove),
+ .remove = wm8711_i2c_remove,
.id_table = wm8711_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8727.c b/sound/soc/codecs/wm8727.c
index e8170562071..462f5e4d5c0 100644
--- a/sound/soc/codecs/wm8727.c
+++ b/sound/soc/codecs/wm8727.c
@@ -45,13 +45,13 @@ static struct snd_soc_dai_driver wm8727_dai = {
static struct snd_soc_codec_driver soc_codec_dev_wm8727;
-static __devinit int wm8727_probe(struct platform_device *pdev)
+static int wm8727_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm8727, &wm8727_dai, 1);
}
-static int __devexit wm8727_remove(struct platform_device *pdev)
+static int wm8727_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -64,7 +64,7 @@ static struct platform_driver wm8727_codec_driver = {
},
.probe = wm8727_probe,
- .remove = __devexit_p(wm8727_remove),
+ .remove = wm8727_remove,
};
module_platform_driver(wm8727_codec_driver);
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 00a12a0c391..c6a292dcded 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -280,7 +280,7 @@ static const struct regmap_config wm8728_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8728_spi_probe(struct spi_device *spi)
+static int wm8728_spi_probe(struct spi_device *spi)
{
struct wm8728_priv *wm8728;
int ret;
@@ -302,7 +302,7 @@ static int __devinit wm8728_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8728_spi_remove(struct spi_device *spi)
+static int wm8728_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
@@ -316,13 +316,13 @@ static struct spi_driver wm8728_spi_driver = {
.of_match_table = wm8728_of_match,
},
.probe = wm8728_spi_probe,
- .remove = __devexit_p(wm8728_spi_remove),
+ .remove = wm8728_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8728_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8728_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8728_priv *wm8728;
int ret;
@@ -344,7 +344,7 @@ static __devinit int wm8728_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8728_i2c_remove(struct i2c_client *client)
+static int wm8728_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -363,7 +363,7 @@ static struct i2c_driver wm8728_i2c_driver = {
.of_match_table = wm8728_of_match,
},
.probe = wm8728_i2c_probe,
- .remove = __devexit_p(wm8728_i2c_remove),
+ .remove = wm8728_i2c_remove,
.id_table = wm8728_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index bb1d26919b1..5276062d6c7 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -631,7 +631,7 @@ static const struct regmap_config wm8731_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8731_spi_probe(struct spi_device *spi)
+static int wm8731_spi_probe(struct spi_device *spi)
{
struct wm8731_priv *wm8731;
int ret;
@@ -661,7 +661,7 @@ static int __devinit wm8731_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit wm8731_spi_remove(struct spi_device *spi)
+static int wm8731_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -674,13 +674,13 @@ static struct spi_driver wm8731_spi_driver = {
.of_match_table = wm8731_of_match,
},
.probe = wm8731_spi_probe,
- .remove = __devexit_p(wm8731_spi_remove),
+ .remove = wm8731_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8731_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8731_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8731_priv *wm8731;
int ret;
@@ -710,7 +710,7 @@ static __devinit int wm8731_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static __devexit int wm8731_i2c_remove(struct i2c_client *client)
+static int wm8731_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -729,7 +729,7 @@ static struct i2c_driver wm8731_i2c_driver = {
.of_match_table = wm8731_of_match,
},
.probe = wm8731_i2c_probe,
- .remove = __devexit_p(wm8731_i2c_remove),
+ .remove = wm8731_i2c_remove,
.id_table = wm8731_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index 5c9634f4c1f..2f167a8ca01 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -645,8 +645,8 @@ static const struct regmap_config wm8737_regmap = {
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8737_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8737_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8737_priv *wm8737;
int ret, i;
@@ -679,7 +679,7 @@ static __devinit int wm8737_i2c_probe(struct i2c_client *i2c,
}
-static __devexit int wm8737_i2c_remove(struct i2c_client *client)
+static int wm8737_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -699,13 +699,13 @@ static struct i2c_driver wm8737_i2c_driver = {
.of_match_table = wm8737_of_match,
},
.probe = wm8737_i2c_probe,
- .remove = __devexit_p(wm8737_i2c_remove),
+ .remove = wm8737_i2c_remove,
.id_table = wm8737_i2c_id,
};
#endif
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8737_spi_probe(struct spi_device *spi)
+static int wm8737_spi_probe(struct spi_device *spi)
{
struct wm8737_priv *wm8737;
int ret, i;
@@ -737,7 +737,7 @@ static int __devinit wm8737_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8737_spi_remove(struct spi_device *spi)
+static int wm8737_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
@@ -751,7 +751,7 @@ static struct spi_driver wm8737_spi_driver = {
.of_match_table = wm8737_of_match,
},
.probe = wm8737_spi_probe,
- .remove = __devexit_p(wm8737_spi_remove),
+ .remove = wm8737_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 4281a080213..b18813cc7ba 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -522,7 +522,7 @@ static int wm8741_i2c_probe(struct i2c_client *i2c,
return ret;
}
- wm8741->regmap = regmap_init_i2c(i2c, &wm8741_regmap);
+ wm8741->regmap = devm_regmap_init_i2c(i2c, &wm8741_regmap);
if (IS_ERR(wm8741->regmap)) {
ret = PTR_ERR(wm8741->regmap);
dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret);
@@ -562,7 +562,7 @@ static struct i2c_driver wm8741_i2c_driver = {
#endif
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8741_spi_probe(struct spi_device *spi)
+static int wm8741_spi_probe(struct spi_device *spi)
{
struct wm8741_priv *wm8741;
int ret, i;
@@ -582,7 +582,7 @@ static int __devinit wm8741_spi_probe(struct spi_device *spi)
return ret;
}
- wm8741->regmap = regmap_init_spi(spi, &wm8741_regmap);
+ wm8741->regmap = devm_regmap_init_spi(spi, &wm8741_regmap);
if (IS_ERR(wm8741->regmap)) {
ret = PTR_ERR(wm8741->regmap);
dev_err(&spi->dev, "Failed to init regmap: %d\n", ret);
@@ -596,7 +596,7 @@ static int __devinit wm8741_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8741_spi_remove(struct spi_device *spi)
+static int wm8741_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -609,7 +609,7 @@ static struct spi_driver wm8741_spi_driver = {
.of_match_table = wm8741_of_match,
},
.probe = wm8741_spi_probe,
- .remove = __devexit_p(wm8741_spi_remove),
+ .remove = wm8741_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 89151ca5e77..50d5ff61623 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/of_device.h>
@@ -34,24 +35,55 @@
* We can't read the WM8750 register space when we
* are using 2 wire for device control, so we cache them instead.
*/
-static const u16 wm8750_reg[] = {
- 0x0097, 0x0097, 0x0079, 0x0079, /* 0 */
- 0x0000, 0x0008, 0x0000, 0x000a, /* 4 */
- 0x0000, 0x0000, 0x00ff, 0x00ff, /* 8 */
- 0x000f, 0x000f, 0x0000, 0x0000, /* 12 */
- 0x0000, 0x007b, 0x0000, 0x0032, /* 16 */
- 0x0000, 0x00c3, 0x00c3, 0x00c0, /* 20 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 24 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 28 */
- 0x0000, 0x0000, 0x0050, 0x0050, /* 32 */
- 0x0050, 0x0050, 0x0050, 0x0050, /* 36 */
- 0x0079, 0x0079, 0x0079, /* 40 */
+static const struct reg_default wm8750_reg_defaults[] = {
+ { 0, 0x0097 },
+ { 1, 0x0097 },
+ { 2, 0x0079 },
+ { 3, 0x0079 },
+ { 4, 0x0000 },
+ { 5, 0x0008 },
+ { 6, 0x0000 },
+ { 7, 0x000a },
+ { 8, 0x0000 },
+ { 9, 0x0000 },
+ { 10, 0x00ff },
+ { 11, 0x00ff },
+ { 12, 0x000f },
+ { 13, 0x000f },
+ { 14, 0x0000 },
+ { 15, 0x0000 },
+ { 16, 0x0000 },
+ { 17, 0x007b },
+ { 18, 0x0000 },
+ { 19, 0x0032 },
+ { 20, 0x0000 },
+ { 21, 0x00c3 },
+ { 22, 0x00c3 },
+ { 23, 0x00c0 },
+ { 24, 0x0000 },
+ { 25, 0x0000 },
+ { 26, 0x0000 },
+ { 27, 0x0000 },
+ { 28, 0x0000 },
+ { 29, 0x0000 },
+ { 30, 0x0000 },
+ { 31, 0x0000 },
+ { 32, 0x0000 },
+ { 33, 0x0000 },
+ { 34, 0x0050 },
+ { 35, 0x0050 },
+ { 36, 0x0050 },
+ { 37, 0x0050 },
+ { 38, 0x0050 },
+ { 39, 0x0050 },
+ { 40, 0x0079 },
+ { 41, 0x0079 },
+ { 42, 0x0079 },
};
/* codec private data */
struct wm8750_priv {
unsigned int sysclk;
- enum snd_soc_control_type control_type;
};
#define wm8750_reset(c) snd_soc_write(c, WM8750_RESET, 0)
@@ -668,10 +700,9 @@ static int wm8750_resume(struct snd_soc_codec *codec)
static int wm8750_probe(struct snd_soc_codec *codec)
{
- struct wm8750_priv *wm8750 = snd_soc_codec_get_drvdata(codec);
int ret;
- ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8750->control_type);
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret < 0) {
printk(KERN_ERR "wm8750: failed to set cache I/O: %d\n", ret);
return ret;
@@ -711,9 +742,6 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8750 = {
.suspend = wm8750_suspend,
.resume = wm8750_resume,
.set_bias_level = wm8750_set_bias_level,
- .reg_cache_size = ARRAY_SIZE(wm8750_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_default = wm8750_reg,
.controls = wm8750_snd_controls,
.num_controls = ARRAY_SIZE(wm8750_snd_controls),
@@ -730,10 +758,21 @@ static const struct of_device_id wm8750_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wm8750_of_match);
+static const struct regmap_config wm8750_regmap = {
+ .reg_bits = 7,
+ .val_bits = 9,
+ .max_register = WM8750_MOUTV,
+
+ .reg_defaults = wm8750_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm8750_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8750_spi_probe(struct spi_device *spi)
+static int wm8750_spi_probe(struct spi_device *spi)
{
struct wm8750_priv *wm8750;
+ struct regmap *regmap;
int ret;
wm8750 = devm_kzalloc(&spi->dev, sizeof(struct wm8750_priv),
@@ -741,7 +780,10 @@ static int __devinit wm8750_spi_probe(struct spi_device *spi)
if (wm8750 == NULL)
return -ENOMEM;
- wm8750->control_type = SND_SOC_SPI;
+ regmap = devm_regmap_init_spi(spi, &wm8750_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
spi_set_drvdata(spi, wm8750);
ret = snd_soc_register_codec(&spi->dev,
@@ -749,7 +791,7 @@ static int __devinit wm8750_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8750_spi_remove(struct spi_device *spi)
+static int wm8750_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -770,15 +812,16 @@ static struct spi_driver wm8750_spi_driver = {
},
.id_table = wm8750_spi_ids,
.probe = wm8750_spi_probe,
- .remove = __devexit_p(wm8750_spi_remove),
+ .remove = wm8750_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8750_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8750_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8750_priv *wm8750;
+ struct regmap *regmap;
int ret;
wm8750 = devm_kzalloc(&i2c->dev, sizeof(struct wm8750_priv),
@@ -787,14 +830,17 @@ static __devinit int wm8750_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
i2c_set_clientdata(i2c, wm8750);
- wm8750->control_type = SND_SOC_I2C;
+
+ regmap = devm_regmap_init_i2c(i2c, &wm8750_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8750, &wm8750_dai, 1);
return ret;
}
-static __devexit int wm8750_i2c_remove(struct i2c_client *client)
+static int wm8750_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -814,7 +860,7 @@ static struct i2c_driver wm8750_i2c_driver = {
.of_match_table = wm8750_of_match,
},
.probe = wm8750_i2c_probe,
- .remove = __devexit_p(wm8750_i2c_remove),
+ .remove = wm8750_i2c_remove,
.id_table = wm8750_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 2e4a775ae56..0a4ab4c423d 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1550,7 +1550,7 @@ static const struct regmap_config wm8753_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8753_spi_probe(struct spi_device *spi)
+static int wm8753_spi_probe(struct spi_device *spi)
{
struct wm8753_priv *wm8753;
int ret;
@@ -1562,36 +1562,25 @@ static int __devinit wm8753_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, wm8753);
- wm8753->regmap = regmap_init_spi(spi, &wm8753_regmap);
+ wm8753->regmap = devm_regmap_init_spi(spi, &wm8753_regmap);
if (IS_ERR(wm8753->regmap)) {
ret = PTR_ERR(wm8753->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8753,
wm8753_dai, ARRAY_SIZE(wm8753_dai));
- if (ret != 0) {
+ if (ret != 0)
dev_err(&spi->dev, "Failed to register CODEC: %d\n", ret);
- goto err_regmap;
- }
- return 0;
-
-err_regmap:
- regmap_exit(wm8753->regmap);
-err:
return ret;
}
-static int __devexit wm8753_spi_remove(struct spi_device *spi)
+static int wm8753_spi_remove(struct spi_device *spi)
{
- struct wm8753_priv *wm8753 = spi_get_drvdata(spi);
-
snd_soc_unregister_codec(&spi->dev);
- regmap_exit(wm8753->regmap);
- kfree(wm8753);
return 0;
}
@@ -1602,13 +1591,13 @@ static struct spi_driver wm8753_spi_driver = {
.of_match_table = wm8753_of_match,
},
.probe = wm8753_spi_probe,
- .remove = __devexit_p(wm8753_spi_remove),
+ .remove = wm8753_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8753_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8753_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8753_priv *wm8753;
int ret;
@@ -1620,35 +1609,25 @@ static __devinit int wm8753_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8753);
- wm8753->regmap = regmap_init_i2c(i2c, &wm8753_regmap);
+ wm8753->regmap = devm_regmap_init_i2c(i2c, &wm8753_regmap);
if (IS_ERR(wm8753->regmap)) {
ret = PTR_ERR(wm8753->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8753,
wm8753_dai, ARRAY_SIZE(wm8753_dai));
- if (ret != 0) {
+ if (ret != 0)
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
- goto err_regmap;
- }
- return 0;
-
-err_regmap:
- regmap_exit(wm8753->regmap);
-err:
return ret;
}
-static __devexit int wm8753_i2c_remove(struct i2c_client *client)
+static int wm8753_i2c_remove(struct i2c_client *client)
{
- struct wm8753_priv *wm8753 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm8753->regmap);
return 0;
}
@@ -1665,7 +1644,7 @@ static struct i2c_driver wm8753_i2c_driver = {
.of_match_table = wm8753_of_match,
},
.probe = wm8753_i2c_probe,
- .remove = __devexit_p(wm8753_i2c_remove),
+ .remove = wm8753_i2c_remove,
.id_table = wm8753_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index c7c0034d396..89a18d82f30 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -17,6 +17,7 @@
#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -35,19 +36,52 @@ static const char *wm8770_supply_names[WM8770_NUM_SUPPLIES] = {
"DVDD"
};
-static const u16 wm8770_reg_defs[WM8770_CACHEREGNUM] = {
- 0x7f, 0x7f, 0x7f, 0x7f,
- 0x7f, 0x7f, 0x7f, 0x7f,
- 0x7f, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0, 0x90, 0,
- 0, 0x22, 0x22, 0x3e,
- 0xc, 0xc, 0x100, 0x189,
- 0x189, 0x8770
+static const struct reg_default wm8770_reg_defaults[] = {
+ { 0, 0x7f },
+ { 1, 0x7f },
+ { 2, 0x7f },
+ { 3, 0x7f },
+ { 4, 0x7f },
+ { 5, 0x7f },
+ { 6, 0x7f },
+ { 7, 0x7f },
+ { 8, 0x7f },
+ { 9, 0xff },
+ { 10, 0xff },
+ { 11, 0xff },
+ { 12, 0xff },
+ { 13, 0xff },
+ { 14, 0xff },
+ { 15, 0xff },
+ { 16, 0xff },
+ { 17, 0xff },
+ { 18, 0 },
+ { 19, 0x90 },
+ { 20, 0 },
+ { 21, 0 },
+ { 22, 0x22 },
+ { 23, 0x22 },
+ { 24, 0x3e },
+ { 25, 0xc },
+ { 26, 0xc },
+ { 27, 0x100 },
+ { 28, 0x189 },
+ { 29, 0x189 },
+ { 30, 0x8770 },
};
+static bool wm8770_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8770_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct wm8770_priv {
- enum snd_soc_control_type control_type;
+ struct regmap *regmap;
struct regulator_bulk_data supplies[WM8770_NUM_SUPPLIES];
struct notifier_block disable_nb[WM8770_NUM_SUPPLIES];
struct snd_soc_codec *codec;
@@ -71,7 +105,7 @@ static int wm8770_regulator_event_##n(struct notifier_block *nb, \
struct wm8770_priv *wm8770 = container_of(nb, struct wm8770_priv, \
disable_nb[n]); \
if (event & REGULATOR_EVENT_DISABLE) { \
- wm8770->codec->cache_sync = 1; \
+ regcache_mark_dirty(wm8770->regmap); \
} \
return 0; \
}
@@ -466,24 +500,6 @@ static int wm8770_set_sysclk(struct snd_soc_dai *dai,
return 0;
}
-static void wm8770_sync_cache(struct snd_soc_codec *codec)
-{
- int i;
- u16 *cache;
-
- if (!codec->cache_sync)
- return;
-
- codec->cache_only = 0;
- cache = codec->reg_cache;
- for (i = 0; i < codec->driver->reg_cache_size; i++) {
- if (i == WM8770_RESET || cache[i] == wm8770_reg_defs[i])
- continue;
- snd_soc_write(codec, i, cache[i]);
- }
- codec->cache_sync = 0;
-}
-
static int wm8770_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -507,7 +523,9 @@ static int wm8770_set_bias_level(struct snd_soc_codec *codec,
ret);
return ret;
}
- wm8770_sync_cache(codec);
+
+ regcache_sync(wm8770->regmap);
+
/* global powerup */
snd_soc_write(codec, WM8770_PWDNCTRL, 0);
}
@@ -554,68 +572,25 @@ static struct snd_soc_dai_driver wm8770_dai = {
.symmetric_rates = 1
};
-#ifdef CONFIG_PM
-static int wm8770_suspend(struct snd_soc_codec *codec)
-{
- wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8770_resume(struct snd_soc_codec *codec)
-{
- wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8770_suspend NULL
-#define wm8770_resume NULL
-#endif
-
static int wm8770_probe(struct snd_soc_codec *codec)
{
struct wm8770_priv *wm8770;
int ret;
- int i;
wm8770 = snd_soc_codec_get_drvdata(codec);
wm8770->codec = codec;
- ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8770->control_type);
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
- for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++)
- wm8770->supplies[i].supply = wm8770_supply_names[i];
-
- ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8770->supplies),
- wm8770->supplies);
- if (ret) {
- dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
- return ret;
- }
-
- wm8770->disable_nb[0].notifier_call = wm8770_regulator_event_0;
- wm8770->disable_nb[1].notifier_call = wm8770_regulator_event_1;
- wm8770->disable_nb[2].notifier_call = wm8770_regulator_event_2;
-
- /* This should really be moved into the regulator core */
- for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) {
- ret = regulator_register_notifier(wm8770->supplies[i].consumer,
- &wm8770->disable_nb[i]);
- if (ret) {
- dev_err(codec->dev,
- "Failed to register regulator notifier: %d\n",
- ret);
- }
- }
-
ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
wm8770->supplies);
if (ret) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
- goto err_reg_get;
+ return ret;
}
ret = wm8770_reset(codec);
@@ -624,8 +599,6 @@ static int wm8770_probe(struct snd_soc_codec *codec)
goto err_reg_enable;
}
- wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
/* latch the volume update bits */
snd_soc_update_bits(codec, WM8770_MSDIGVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_MSALGVOL, 0x100, 0x100);
@@ -641,46 +614,22 @@ static int wm8770_probe(struct snd_soc_codec *codec)
/* mute all DACs */
snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10, 0x10);
- snd_soc_add_codec_controls(codec, wm8770_snd_controls,
- ARRAY_SIZE(wm8770_snd_controls));
- snd_soc_dapm_new_controls(&codec->dapm, wm8770_dapm_widgets,
- ARRAY_SIZE(wm8770_dapm_widgets));
- snd_soc_dapm_add_routes(&codec->dapm, wm8770_intercon,
- ARRAY_SIZE(wm8770_intercon));
- return 0;
-
err_reg_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
-err_reg_get:
- regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
return ret;
}
-static int wm8770_remove(struct snd_soc_codec *codec)
-{
- struct wm8770_priv *wm8770;
- int i;
-
- wm8770 = snd_soc_codec_get_drvdata(codec);
- wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i)
- regulator_unregister_notifier(wm8770->supplies[i].consumer,
- &wm8770->disable_nb[i]);
- regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_wm8770 = {
.probe = wm8770_probe,
- .remove = wm8770_remove,
- .suspend = wm8770_suspend,
- .resume = wm8770_resume,
.set_bias_level = wm8770_set_bias_level,
.idle_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(wm8770_reg_defs),
- .reg_word_size = sizeof (u16),
- .reg_cache_default = wm8770_reg_defs
+
+ .controls = wm8770_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8770_snd_controls),
+ .dapm_widgets = wm8770_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8770_dapm_widgets),
+ .dapm_routes = wm8770_intercon,
+ .num_dapm_routes = ARRAY_SIZE(wm8770_intercon),
};
static const struct of_device_id wm8770_of_match[] = {
@@ -689,17 +638,57 @@ static const struct of_device_id wm8770_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wm8770_of_match);
-static int __devinit wm8770_spi_probe(struct spi_device *spi)
+static const struct regmap_config wm8770_regmap = {
+ .reg_bits = 7,
+ .val_bits = 9,
+ .max_register = WM8770_RESET,
+
+ .reg_defaults = wm8770_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm8770_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = wm8770_volatile_reg,
+};
+
+static int wm8770_spi_probe(struct spi_device *spi)
{
struct wm8770_priv *wm8770;
- int ret;
+ int ret, i;
wm8770 = devm_kzalloc(&spi->dev, sizeof(struct wm8770_priv),
GFP_KERNEL);
if (!wm8770)
return -ENOMEM;
- wm8770->control_type = SND_SOC_SPI;
+ for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++)
+ wm8770->supplies[i].supply = wm8770_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(wm8770->supplies),
+ wm8770->supplies);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ wm8770->disable_nb[0].notifier_call = wm8770_regulator_event_0;
+ wm8770->disable_nb[1].notifier_call = wm8770_regulator_event_1;
+ wm8770->disable_nb[2].notifier_call = wm8770_regulator_event_2;
+
+ /* This should really be moved into the regulator core */
+ for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) {
+ ret = regulator_register_notifier(wm8770->supplies[i].consumer,
+ &wm8770->disable_nb[i]);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to register regulator notifier: %d\n",
+ ret);
+ }
+ }
+
+ wm8770->regmap = devm_regmap_init_spi(spi, &wm8770_regmap);
+ if (IS_ERR(wm8770->regmap))
+ return PTR_ERR(wm8770->regmap);
+
spi_set_drvdata(spi, wm8770);
ret = snd_soc_register_codec(&spi->dev,
@@ -708,9 +697,17 @@ static int __devinit wm8770_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8770_spi_remove(struct spi_device *spi)
+static int wm8770_spi_remove(struct spi_device *spi)
{
+ struct wm8770_priv *wm8770 = spi_get_drvdata(spi);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i)
+ regulator_unregister_notifier(wm8770->supplies[i].consumer,
+ &wm8770->disable_nb[i]);
+
snd_soc_unregister_codec(&spi->dev);
+
return 0;
}
@@ -721,7 +718,7 @@ static struct spi_driver wm8770_spi_driver = {
.of_match_table = wm8770_of_match,
},
.probe = wm8770_spi_probe,
- .remove = __devexit_p(wm8770_spi_remove)
+ .remove = wm8770_spi_remove
};
module_spi_driver(wm8770_spi_driver);
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index c32249ddb2e..f31017ed138 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -492,7 +492,7 @@ static const struct regmap_config wm8776_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8776_spi_probe(struct spi_device *spi)
+static int wm8776_spi_probe(struct spi_device *spi)
{
struct wm8776_priv *wm8776;
int ret;
@@ -514,7 +514,7 @@ static int __devinit wm8776_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8776_spi_remove(struct spi_device *spi)
+static int wm8776_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -527,13 +527,13 @@ static struct spi_driver wm8776_spi_driver = {
.of_match_table = wm8776_of_match,
},
.probe = wm8776_spi_probe,
- .remove = __devexit_p(wm8776_spi_remove),
+ .remove = wm8776_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8776_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8776_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8776_priv *wm8776;
int ret;
@@ -555,7 +555,7 @@ static __devinit int wm8776_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8776_i2c_remove(struct i2c_client *client)
+static int wm8776_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -575,7 +575,7 @@ static struct i2c_driver wm8776_i2c_driver = {
.of_match_table = wm8776_of_match,
},
.probe = wm8776_i2c_probe,
- .remove = __devexit_p(wm8776_i2c_remove),
+ .remove = wm8776_i2c_remove,
.id_table = wm8776_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index 3fdea98f732..f1fdbf63abb 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -42,13 +42,13 @@ static struct snd_soc_dai_driver wm8782_dai = {
static struct snd_soc_codec_driver soc_codec_dev_wm8782;
-static __devinit int wm8782_probe(struct platform_device *pdev)
+static int wm8782_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm8782, &wm8782_dai, 1);
}
-static int __devexit wm8782_remove(struct platform_device *pdev)
+static int wm8782_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -60,7 +60,7 @@ static struct platform_driver wm8782_codec_driver = {
.owner = THIS_MODULE,
},
.probe = wm8782_probe,
- .remove = __devexit_p(wm8782_remove),
+ .remove = wm8782_remove,
};
module_platform_driver(wm8782_codec_driver);
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index c088020172a..d321a875b02 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -702,7 +702,7 @@ static struct regmap_config wm8804_regmap_config = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8804_spi_probe(struct spi_device *spi)
+static int wm8804_spi_probe(struct spi_device *spi)
{
struct wm8804_priv *wm8804;
int ret;
@@ -711,7 +711,7 @@ static int __devinit wm8804_spi_probe(struct spi_device *spi)
if (!wm8804)
return -ENOMEM;
- wm8804->regmap = regmap_init_spi(spi, &wm8804_regmap_config);
+ wm8804->regmap = devm_regmap_init_spi(spi, &wm8804_regmap_config);
if (IS_ERR(wm8804->regmap)) {
ret = PTR_ERR(wm8804->regmap);
return ret;
@@ -725,11 +725,9 @@ static int __devinit wm8804_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8804_spi_remove(struct spi_device *spi)
+static int wm8804_spi_remove(struct spi_device *spi)
{
- struct wm8804_priv *wm8804 = spi_get_drvdata(spi);
snd_soc_unregister_codec(&spi->dev);
- regmap_exit(wm8804->regmap);
return 0;
}
@@ -740,13 +738,13 @@ static struct spi_driver wm8804_spi_driver = {
.of_match_table = wm8804_of_match,
},
.probe = wm8804_spi_probe,
- .remove = __devexit_p(wm8804_spi_remove)
+ .remove = wm8804_spi_remove
};
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8804_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8804_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8804_priv *wm8804;
int ret;
@@ -755,7 +753,7 @@ static __devinit int wm8804_i2c_probe(struct i2c_client *i2c,
if (!wm8804)
return -ENOMEM;
- wm8804->regmap = regmap_init_i2c(i2c, &wm8804_regmap_config);
+ wm8804->regmap = devm_regmap_init_i2c(i2c, &wm8804_regmap_config);
if (IS_ERR(wm8804->regmap)) {
ret = PTR_ERR(wm8804->regmap);
return ret;
@@ -765,23 +763,12 @@ static __devinit int wm8804_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8804, &wm8804_dai, 1);
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- regmap_exit(wm8804->regmap);
return ret;
}
-static __devexit int wm8804_i2c_remove(struct i2c_client *i2c)
+static int wm8804_i2c_remove(struct i2c_client *i2c)
{
- struct wm8804_priv *wm8804 = i2c_get_clientdata(i2c);
-
snd_soc_unregister_codec(&i2c->dev);
- regmap_exit(wm8804->regmap);
-
return 0;
}
@@ -798,7 +785,7 @@ static struct i2c_driver wm8804_i2c_driver = {
.of_match_table = wm8804_of_match,
},
.probe = wm8804_i2c_probe,
- .remove = __devexit_p(wm8804_i2c_remove),
+ .remove = wm8804_i2c_remove,
.id_table = wm8804_i2c_id
};
#endif
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index e781f865e5d..7c8257c5a17 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1247,7 +1247,7 @@ static const struct regmap_config wm8900_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8900_spi_probe(struct spi_device *spi)
+static int wm8900_spi_probe(struct spi_device *spi)
{
struct wm8900_priv *wm8900;
int ret;
@@ -1269,7 +1269,7 @@ static int __devinit wm8900_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8900_spi_remove(struct spi_device *spi)
+static int wm8900_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -1281,13 +1281,13 @@ static struct spi_driver wm8900_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm8900_spi_probe,
- .remove = __devexit_p(wm8900_spi_remove),
+ .remove = wm8900_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8900_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8900_priv *wm8900;
int ret;
@@ -1309,7 +1309,7 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8900_i2c_remove(struct i2c_client *client)
+static int wm8900_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1327,7 +1327,7 @@ static struct i2c_driver wm8900_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8900_i2c_probe,
- .remove = __devexit_p(wm8900_i2c_remove),
+ .remove = wm8900_i2c_remove,
.id_table = wm8900_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 839414f9e2e..134e41c870b 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -2020,8 +2020,8 @@ static int wm8903_set_pdata_from_of(struct i2c_client *i2c,
return 0;
}
-static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8903_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8903_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct wm8903_priv *wm8903;
@@ -2206,7 +2206,7 @@ err:
return ret;
}
-static __devexit int wm8903_i2c_remove(struct i2c_client *client)
+static int wm8903_i2c_remove(struct i2c_client *client)
{
struct wm8903_priv *wm8903 = i2c_get_clientdata(client);
@@ -2237,7 +2237,7 @@ static struct i2c_driver wm8903_i2c_driver = {
.of_match_table = wm8903_of_match,
},
.probe = wm8903_i2c_probe,
- .remove = __devexit_p(wm8903_i2c_remove),
+ .remove = wm8903_i2c_remove,
.id_table = wm8903_i2c_id,
};
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 7c8df52a8d9..3ff195c541d 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -2111,8 +2111,8 @@ static const struct regmap_config wm8904_regmap = {
.num_reg_defaults = ARRAY_SIZE(wm8904_reg_defaults),
};
-static __devinit int wm8904_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8904_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8904_priv *wm8904;
unsigned int val;
@@ -2247,7 +2247,7 @@ err_enable:
return ret;
}
-static __devexit int wm8904_i2c_remove(struct i2c_client *client)
+static int wm8904_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -2267,7 +2267,7 @@ static struct i2c_driver wm8904_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8904_i2c_probe,
- .remove = __devexit_p(wm8904_i2c_remove),
+ .remove = wm8904_i2c_remove,
.id_table = wm8904_i2c_id,
};
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index b20aa4e7c3f..b1591c61c25 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -742,8 +742,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8940 = {
.volatile_register = wm8940_volatile_register,
};
-static __devinit int wm8940_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8940_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8940_priv *wm8940;
int ret;
@@ -762,7 +762,7 @@ static __devinit int wm8940_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8940_i2c_remove(struct i2c_client *client)
+static int wm8940_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -781,7 +781,7 @@ static struct i2c_driver wm8940_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8940_i2c_probe,
- .remove = __devexit_p(wm8940_i2c_remove),
+ .remove = wm8940_i2c_remove,
.id_table = wm8940_i2c_id,
};
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 2f1c075755b..82c8ba97572 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -1012,8 +1012,8 @@ static const struct regmap_config wm8955_regmap = {
.num_reg_defaults = ARRAY_SIZE(wm8955_reg_defaults),
};
-static __devinit int wm8955_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8955_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8955_priv *wm8955;
int ret;
@@ -1023,7 +1023,7 @@ static __devinit int wm8955_i2c_probe(struct i2c_client *i2c,
if (wm8955 == NULL)
return -ENOMEM;
- wm8955->regmap = regmap_init_i2c(i2c, &wm8955_regmap);
+ wm8955->regmap = devm_regmap_init_i2c(i2c, &wm8955_regmap);
if (IS_ERR(wm8955->regmap)) {
ret = PTR_ERR(wm8955->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -1035,22 +1035,13 @@ static __devinit int wm8955_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8955, &wm8955_dai, 1);
- if (ret != 0)
- goto err;
return ret;
-
-err:
- regmap_exit(wm8955->regmap);
- return ret;
}
-static __devexit int wm8955_i2c_remove(struct i2c_client *client)
+static int wm8955_i2c_remove(struct i2c_client *client)
{
- struct wm8955_priv *wm8955 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm8955->regmap);
return 0;
}
@@ -1067,7 +1058,7 @@ static struct i2c_driver wm8955_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8955_i2c_probe,
- .remove = __devexit_p(wm8955_i2c_remove),
+ .remove = wm8955_i2c_remove,
.id_table = wm8955_i2c_id,
};
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 00121ba3659..b0710d817a6 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -195,7 +195,7 @@ ok:
static void wm8958_dsp_start_mbc(struct snd_soc_codec *codec, int path)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
int i;
/* If the DSP is already running then noop */
@@ -210,9 +210,9 @@ static void wm8958_dsp_start_mbc(struct snd_soc_codec *codec, int path)
WM8958_DSP2_ENA, WM8958_DSP2_ENA);
/* If we've got user supplied MBC settings use them */
- if (pdata && pdata->num_mbc_cfgs) {
+ if (control->pdata.num_mbc_cfgs) {
struct wm8958_mbc_cfg *cfg
- = &pdata->mbc_cfgs[wm8994->mbc_cfg];
+ = &control->pdata.mbc_cfgs[wm8994->mbc_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->coeff_regs); i++)
snd_soc_write(codec, i + WM8958_MBC_BAND_1_K_1,
@@ -239,7 +239,7 @@ static void wm8958_dsp_start_mbc(struct snd_soc_codec *codec, int path)
static void wm8958_dsp_start_vss(struct snd_soc_codec *codec, int path)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
int i, ena;
if (wm8994->mbc_vss)
@@ -249,26 +249,26 @@ static void wm8958_dsp_start_vss(struct snd_soc_codec *codec, int path)
WM8958_DSP2_ENA, WM8958_DSP2_ENA);
/* If we've got user supplied settings use them */
- if (pdata && pdata->num_mbc_cfgs) {
+ if (control->pdata.num_mbc_cfgs) {
struct wm8958_mbc_cfg *cfg
- = &pdata->mbc_cfgs[wm8994->mbc_cfg];
+ = &control->pdata.mbc_cfgs[wm8994->mbc_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->combined_regs); i++)
snd_soc_write(codec, i + 0x2800,
cfg->combined_regs[i]);
}
- if (pdata && pdata->num_vss_cfgs) {
+ if (control->pdata.num_vss_cfgs) {
struct wm8958_vss_cfg *cfg
- = &pdata->vss_cfgs[wm8994->vss_cfg];
+ = &control->pdata.vss_cfgs[wm8994->vss_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->regs); i++)
snd_soc_write(codec, i + 0x2600, cfg->regs[i]);
}
- if (pdata && pdata->num_vss_hpf_cfgs) {
+ if (control->pdata.num_vss_hpf_cfgs) {
struct wm8958_vss_hpf_cfg *cfg
- = &pdata->vss_hpf_cfgs[wm8994->vss_hpf_cfg];
+ = &control->pdata.vss_hpf_cfgs[wm8994->vss_hpf_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->regs); i++)
snd_soc_write(codec, i + 0x2400, cfg->regs[i]);
@@ -300,7 +300,7 @@ static void wm8958_dsp_start_vss(struct snd_soc_codec *codec, int path)
static void wm8958_dsp_start_enh_eq(struct snd_soc_codec *codec, int path)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
int i;
wm8958_dsp2_fw(codec, "ENH_EQ", wm8994->enh_eq, false);
@@ -309,9 +309,9 @@ static void wm8958_dsp_start_enh_eq(struct snd_soc_codec *codec, int path)
WM8958_DSP2_ENA, WM8958_DSP2_ENA);
/* If we've got user supplied settings use them */
- if (pdata && pdata->num_enh_eq_cfgs) {
+ if (control->pdata.num_enh_eq_cfgs) {
struct wm8958_enh_eq_cfg *cfg
- = &pdata->enh_eq_cfgs[wm8994->enh_eq_cfg];
+ = &control->pdata.enh_eq_cfgs[wm8994->enh_eq_cfg];
for (i = 0; i < ARRAY_SIZE(cfg->regs); i++)
snd_soc_write(codec, i + 0x2200,
@@ -458,7 +458,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
@@ -467,7 +467,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
- if (value >= pdata->num_mbc_cfgs)
+ if (value >= control->pdata.num_mbc_cfgs)
return -EINVAL;
wm8994->mbc_cfg = value;
@@ -548,7 +548,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
@@ -557,7 +557,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
- if (value >= pdata->num_vss_cfgs)
+ if (value >= control->pdata.num_vss_cfgs)
return -EINVAL;
wm8994->vss_cfg = value;
@@ -581,7 +581,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
@@ -590,7 +590,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
- if (value >= pdata->num_vss_hpf_cfgs)
+ if (value >= control->pdata.num_vss_hpf_cfgs)
return -EINVAL;
wm8994->vss_hpf_cfg = value;
@@ -748,7 +748,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
int value = ucontrol->value.integer.value[0];
int reg;
@@ -757,7 +757,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
return -EBUSY;
- if (value >= pdata->num_enh_eq_cfgs)
+ if (value >= control->pdata.num_enh_eq_cfgs)
return -EINVAL;
wm8994->enh_eq_cfg = value;
@@ -883,13 +883,6 @@ static void wm8958_mbc_vss_loaded(const struct firmware *fw, void *context)
wm8994->mbc_vss = fw;
mutex_unlock(&codec->mutex);
}
-
- /* We can't have more than one request outstanding at once so
- * we daisy chain.
- */
- request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- "wm8958_enh_eq.wfw", codec->dev, GFP_KERNEL,
- codec, wm8958_enh_eq_loaded);
}
static void wm8958_mbc_loaded(const struct firmware *fw, void *context)
@@ -897,25 +890,18 @@ static void wm8958_mbc_loaded(const struct firmware *fw, void *context)
struct snd_soc_codec *codec = context;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- if (wm8958_dsp2_fw(codec, "MBC", fw, true) != 0)
- return;
-
- mutex_lock(&codec->mutex);
- wm8994->mbc = fw;
- mutex_unlock(&codec->mutex);
-
- /* We can't have more than one request outstanding at once so
- * we daisy chain.
- */
- request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- "wm8958_mbc_vss.wfw", codec->dev, GFP_KERNEL,
- codec, wm8958_mbc_vss_loaded);
+ if (fw && (wm8958_dsp2_fw(codec, "MBC", fw, true) == 0)) {
+ mutex_lock(&codec->mutex);
+ wm8994->mbc = fw;
+ mutex_unlock(&codec->mutex);
+ }
}
void wm8958_dsp2_init(struct snd_soc_codec *codec)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
+ struct wm8994_pdata *pdata = &control->pdata;
int ret, i;
wm8994->dsp_active = -1;
@@ -932,9 +918,12 @@ void wm8958_dsp2_init(struct snd_soc_codec *codec)
request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
"wm8958_mbc.wfw", codec->dev, GFP_KERNEL,
codec, wm8958_mbc_loaded);
-
- if (!pdata)
- return;
+ request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ "wm8958_mbc_vss.wfw", codec->dev, GFP_KERNEL,
+ codec, wm8958_mbc_vss_loaded);
+ request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ "wm8958_enh_eq.wfw", codec->dev, GFP_KERNEL,
+ codec, wm8958_enh_eq_loaded);
if (pdata->num_mbc_cfgs) {
struct snd_kcontrol_new control[] = {
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index f0f6f660178..9bb92732599 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -1028,8 +1028,8 @@ static const struct regmap_config wm8960_regmap = {
.volatile_reg = wm8960_volatile,
};
-static __devinit int wm8960_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8960_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8960_data *pdata = dev_get_platdata(&i2c->dev);
struct wm8960_priv *wm8960;
@@ -1040,7 +1040,7 @@ static __devinit int wm8960_i2c_probe(struct i2c_client *i2c,
if (wm8960 == NULL)
return -ENOMEM;
- wm8960->regmap = regmap_init_i2c(i2c, &wm8960_regmap);
+ wm8960->regmap = devm_regmap_init_i2c(i2c, &wm8960_regmap);
if (IS_ERR(wm8960->regmap))
return PTR_ERR(wm8960->regmap);
@@ -1062,7 +1062,7 @@ static __devinit int wm8960_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8960_i2c_remove(struct i2c_client *client)
+static int wm8960_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1080,7 +1080,7 @@ static struct i2c_driver wm8960_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8960_i2c_probe,
- .remove = __devexit_p(wm8960_i2c_remove),
+ .remove = wm8960_i2c_remove,
.id_table = wm8960_i2c_id,
};
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index f387670d0d7..900328e28a1 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -937,8 +937,8 @@ static const struct regmap_config wm8961_regmap = {
.readable_reg = wm8961_readable,
};
-static __devinit int wm8961_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8961_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8961_priv *wm8961;
unsigned int val;
@@ -993,7 +993,7 @@ static __devinit int wm8961_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8961_i2c_remove(struct i2c_client *client)
+static int wm8961_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -1012,7 +1012,7 @@ static struct i2c_driver wm8961_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8961_i2c_probe,
- .remove = __devexit_p(wm8961_i2c_remove),
+ .remove = wm8961_i2c_remove,
.id_table = wm8961_i2c_id,
};
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index ce672007379..bd4b0db4cda 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3588,8 +3588,8 @@ static const struct regmap_config wm8962_regmap = {
.cache_type = REGCACHE_RBTREE,
};
-static __devinit int wm8962_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8962_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8962_pdata *pdata = dev_get_platdata(&i2c->dev);
struct wm8962_priv *wm8962;
@@ -3610,7 +3610,7 @@ static __devinit int wm8962_i2c_probe(struct i2c_client *i2c,
for (i = 0; i < ARRAY_SIZE(wm8962->supplies); i++)
wm8962->supplies[i].supply = wm8962_supply_names[i];
- ret = regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8962->supplies),
+ ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8962->supplies),
wm8962->supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
@@ -3621,10 +3621,10 @@ static __devinit int wm8962_i2c_probe(struct i2c_client *i2c,
wm8962->supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
- goto err_get;
+ return ret;
}
- wm8962->regmap = regmap_init_i2c(i2c, &wm8962_regmap);
+ wm8962->regmap = devm_regmap_init_i2c(i2c, &wm8962_regmap);
if (IS_ERR(wm8962->regmap)) {
ret = PTR_ERR(wm8962->regmap);
dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
@@ -3641,20 +3641,20 @@ static __devinit int wm8962_i2c_probe(struct i2c_client *i2c,
ret = regmap_read(wm8962->regmap, WM8962_SOFTWARE_RESET, &reg);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read ID register\n");
- goto err_regmap;
+ goto err_enable;
}
if (reg != 0x6243) {
dev_err(&i2c->dev,
"Device is not a WM8962, ID %x != 0x6243\n", reg);
ret = -EINVAL;
- goto err_regmap;
+ goto err_enable;
}
ret = regmap_read(wm8962->regmap, WM8962_RIGHT_INPUT_VOLUME, &reg);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read device revision: %d\n",
ret);
- goto err_regmap;
+ goto err_enable;
}
dev_info(&i2c->dev, "customer id %x revision %c\n",
@@ -3667,7 +3667,7 @@ static __devinit int wm8962_i2c_probe(struct i2c_client *i2c,
ret = wm8962_reset(wm8962);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to issue reset\n");
- goto err_regmap;
+ goto err_enable;
}
if (pdata && pdata->in4_dc_measure) {
@@ -3686,30 +3686,22 @@ static __devinit int wm8962_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8962, &wm8962_dai, 1);
if (ret < 0)
- goto err_regmap;
+ goto err_enable;
/* The drivers should power up as needed */
regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
return 0;
-err_regmap:
- regmap_exit(wm8962->regmap);
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
-err_get:
- regulator_bulk_free(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
err:
return ret;
}
-static __devexit int wm8962_i2c_remove(struct i2c_client *client)
+static int wm8962_i2c_remove(struct i2c_client *client)
{
- struct wm8962_priv *wm8962 = dev_get_drvdata(&client->dev);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm8962->regmap);
- regulator_bulk_free(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
return 0;
}
@@ -3773,7 +3765,7 @@ static struct i2c_driver wm8962_i2c_driver = {
.pm = &wm8962_pm,
},
.probe = wm8962_i2c_probe,
- .remove = __devexit_p(wm8962_i2c_remove),
+ .remove = wm8962_i2c_remove,
.id_table = wm8962_i2c_id,
};
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 5ce64775844..67aba78a7ca 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -34,7 +35,6 @@ static struct workqueue_struct *wm8971_workq = NULL;
/* codec private data */
struct wm8971_priv {
- enum snd_soc_control_type control_type;
unsigned int sysclk;
};
@@ -43,18 +43,50 @@ struct wm8971_priv {
* We can't read the WM8971 register space when we
* are using 2 wire for device control, so we cache them instead.
*/
-static const u16 wm8971_reg[] = {
- 0x0097, 0x0097, 0x0079, 0x0079, /* 0 */
- 0x0000, 0x0008, 0x0000, 0x000a, /* 4 */
- 0x0000, 0x0000, 0x00ff, 0x00ff, /* 8 */
- 0x000f, 0x000f, 0x0000, 0x0000, /* 12 */
- 0x0000, 0x007b, 0x0000, 0x0032, /* 16 */
- 0x0000, 0x00c3, 0x00c3, 0x00c0, /* 20 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 24 */
- 0x0000, 0x0000, 0x0000, 0x0000, /* 28 */
- 0x0000, 0x0000, 0x0050, 0x0050, /* 32 */
- 0x0050, 0x0050, 0x0050, 0x0050, /* 36 */
- 0x0079, 0x0079, 0x0079, /* 40 */
+static const struct reg_default wm8971_reg_defaults[] = {
+ { 0, 0x0097 },
+ { 1, 0x0097 },
+ { 2, 0x0079 },
+ { 3, 0x0079 },
+ { 4, 0x0000 },
+ { 5, 0x0008 },
+ { 6, 0x0000 },
+ { 7, 0x000a },
+ { 8, 0x0000 },
+ { 9, 0x0000 },
+ { 10, 0x00ff },
+ { 11, 0x00ff },
+ { 12, 0x000f },
+ { 13, 0x000f },
+ { 14, 0x0000 },
+ { 15, 0x0000 },
+ { 16, 0x0000 },
+ { 17, 0x007b },
+ { 18, 0x0000 },
+ { 19, 0x0032 },
+ { 20, 0x0000 },
+ { 21, 0x00c3 },
+ { 22, 0x00c3 },
+ { 23, 0x00c0 },
+ { 24, 0x0000 },
+ { 25, 0x0000 },
+ { 26, 0x0000 },
+ { 27, 0x0000 },
+ { 28, 0x0000 },
+ { 29, 0x0000 },
+ { 30, 0x0000 },
+ { 31, 0x0000 },
+ { 32, 0x0000 },
+ { 33, 0x0000 },
+ { 34, 0x0050 },
+ { 35, 0x0050 },
+ { 36, 0x0050 },
+ { 37, 0x0050 },
+ { 38, 0x0050 },
+ { 39, 0x0050 },
+ { 40, 0x0079 },
+ { 41, 0x0079 },
+ { 42, 0x0079 },
};
#define wm8971_reset(c) snd_soc_write(c, WM8971_RESET, 0)
@@ -613,11 +645,10 @@ static int wm8971_resume(struct snd_soc_codec *codec)
static int wm8971_probe(struct snd_soc_codec *codec)
{
- struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
u16 reg;
- ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8971->control_type);
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret < 0) {
printk(KERN_ERR "wm8971: failed to set cache I/O: %d\n", ret);
return ret;
@@ -667,9 +698,6 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8971 = {
.suspend = wm8971_suspend,
.resume = wm8971_resume,
.set_bias_level = wm8971_set_bias_level,
- .reg_cache_size = ARRAY_SIZE(wm8971_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_default = wm8971_reg,
.controls = wm8971_snd_controls,
.num_controls = ARRAY_SIZE(wm8971_snd_controls),
@@ -679,10 +707,21 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8971 = {
.num_dapm_routes = ARRAY_SIZE(wm8971_dapm_routes),
};
-static __devinit int wm8971_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static const struct regmap_config wm8971_regmap = {
+ .reg_bits = 7,
+ .val_bits = 9,
+ .max_register = WM8971_MOUTV,
+
+ .reg_defaults = wm8971_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm8971_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int wm8971_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8971_priv *wm8971;
+ struct regmap *regmap;
int ret;
wm8971 = devm_kzalloc(&i2c->dev, sizeof(struct wm8971_priv),
@@ -690,7 +729,10 @@ static __devinit int wm8971_i2c_probe(struct i2c_client *i2c,
if (wm8971 == NULL)
return -ENOMEM;
- wm8971->control_type = SND_SOC_I2C;
+ regmap = devm_regmap_init_i2c(i2c, &wm8971_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
i2c_set_clientdata(i2c, wm8971);
ret = snd_soc_register_codec(&i2c->dev,
@@ -699,7 +741,7 @@ static __devinit int wm8971_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8971_i2c_remove(struct i2c_client *client)
+static int wm8971_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -717,7 +759,7 @@ static struct i2c_driver wm8971_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8971_i2c_probe,
- .remove = __devexit_p(wm8971_i2c_remove),
+ .remove = wm8971_i2c_remove,
.id_table = wm8971_i2c_id,
};
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 9a39511af52..ea58b73e86b 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -625,8 +625,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8974 = {
.num_dapm_routes = ARRAY_SIZE(wm8974_dapm_routes),
};
-static __devinit int wm8974_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8974_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
int ret;
@@ -636,7 +636,7 @@ static __devinit int wm8974_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8974_i2c_remove(struct i2c_client *client)
+static int wm8974_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -655,7 +655,7 @@ static struct i2c_driver wm8974_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8974_i2c_probe,
- .remove = __devexit_p(wm8974_i2c_remove),
+ .remove = wm8974_i2c_remove,
.id_table = wm8974_i2c_id,
};
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 4c0a8e49613..f347af3a67c 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -527,9 +527,6 @@ static int wm8978_configure_pll(struct snd_soc_codec *codec)
return idx;
wm8978->mclk_idx = idx;
-
- /* GPIO1 into default mode as input - before configuring PLL */
- snd_soc_update_bits(codec, WM8978_GPIO_CONTROL, 7, 0);
} else {
return -EINVAL;
}
@@ -1038,8 +1035,8 @@ static const struct regmap_config wm8978_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(wm8978_reg_defaults),
};
-static __devinit int wm8978_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8978_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8978_priv *wm8978;
int ret;
@@ -1049,7 +1046,7 @@ static __devinit int wm8978_i2c_probe(struct i2c_client *i2c,
if (wm8978 == NULL)
return -ENOMEM;
- wm8978->regmap = regmap_init_i2c(i2c, &wm8978_regmap_config);
+ wm8978->regmap = devm_regmap_init_i2c(i2c, &wm8978_regmap_config);
if (IS_ERR(wm8978->regmap)) {
ret = PTR_ERR(wm8978->regmap);
dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
@@ -1062,29 +1059,22 @@ static __devinit int wm8978_i2c_probe(struct i2c_client *i2c,
ret = regmap_write(wm8978->regmap, WM8978_RESET, 0);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
- goto err;
+ return ret;
}
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8978, &wm8978_dai, 1);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
- goto err;
+ return ret;
}
return 0;
-
-err:
- regmap_exit(wm8978->regmap);
- return ret;
}
-static __devexit int wm8978_i2c_remove(struct i2c_client *client)
+static int wm8978_i2c_remove(struct i2c_client *client)
{
- struct wm8978_priv *wm8978 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm8978->regmap);
return 0;
}
@@ -1101,7 +1091,7 @@ static struct i2c_driver wm8978_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8978_i2c_probe,
- .remove = __devexit_p(wm8978_i2c_remove),
+ .remove = wm8978_i2c_remove,
.id_table = wm8978_i2c_id,
};
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index d8879f262d2..9fe1e041da4 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -1087,7 +1087,7 @@ static const struct regmap_config wm8983_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8983_spi_probe(struct spi_device *spi)
+static int wm8983_spi_probe(struct spi_device *spi)
{
struct wm8983_priv *wm8983;
int ret;
@@ -1110,7 +1110,7 @@ static int __devinit wm8983_spi_probe(struct spi_device *spi)
return ret;
}
-static int __devexit wm8983_spi_remove(struct spi_device *spi)
+static int wm8983_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
@@ -1122,13 +1122,13 @@ static struct spi_driver wm8983_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm8983_spi_probe,
- .remove = __devexit_p(wm8983_spi_remove)
+ .remove = wm8983_spi_remove
};
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8983_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8983_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8983_priv *wm8983;
int ret;
@@ -1152,7 +1152,7 @@ static __devinit int wm8983_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8983_i2c_remove(struct i2c_client *client)
+static int wm8983_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
@@ -1170,7 +1170,7 @@ static struct i2c_driver wm8983_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8983_i2c_probe,
- .remove = __devexit_p(wm8983_i2c_remove),
+ .remove = wm8983_i2c_remove,
.id_table = wm8983_i2c_id
};
#endif
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 14f666398d0..ab3782657ac 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1111,7 +1111,7 @@ static const struct regmap_config wm8985_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8985_spi_probe(struct spi_device *spi)
+static int wm8985_spi_probe(struct spi_device *spi)
{
struct wm8985_priv *wm8985;
int ret;
@@ -1122,33 +1122,22 @@ static int __devinit wm8985_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, wm8985);
- wm8985->regmap = regmap_init_spi(spi, &wm8985_regmap);
+ wm8985->regmap = devm_regmap_init_spi(spi, &wm8985_regmap);
if (IS_ERR(wm8985->regmap)) {
ret = PTR_ERR(wm8985->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8985, &wm8985_dai, 1);
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- regmap_exit(wm8985->regmap);
return ret;
}
-static int __devexit wm8985_spi_remove(struct spi_device *spi)
+static int wm8985_spi_remove(struct spi_device *spi)
{
- struct wm8985_priv *wm8985 = spi_get_drvdata(spi);
-
snd_soc_unregister_codec(&spi->dev);
- regmap_exit(wm8985->regmap);
-
return 0;
}
@@ -1158,13 +1147,13 @@ static struct spi_driver wm8985_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm8985_spi_probe,
- .remove = __devexit_p(wm8985_spi_remove)
+ .remove = wm8985_spi_remove
};
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8985_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8985_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8985_priv *wm8985;
int ret;
@@ -1175,33 +1164,22 @@ static __devinit int wm8985_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8985);
- wm8985->regmap = regmap_init_i2c(i2c, &wm8985_regmap);
+ wm8985->regmap = devm_regmap_init_i2c(i2c, &wm8985_regmap);
if (IS_ERR(wm8985->regmap)) {
ret = PTR_ERR(wm8985->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8985, &wm8985_dai, 1);
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- regmap_exit(wm8985->regmap);
return ret;
}
-static __devexit int wm8985_i2c_remove(struct i2c_client *i2c)
+static int wm8985_i2c_remove(struct i2c_client *i2c)
{
- struct wm8985_priv *wm8985 = i2c_get_clientdata(i2c);
-
snd_soc_unregister_codec(&i2c->dev);
- regmap_exit(wm8985->regmap);
-
return 0;
}
@@ -1217,7 +1195,7 @@ static struct i2c_driver wm8985_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8985_i2c_probe,
- .remove = __devexit_p(wm8985_i2c_remove),
+ .remove = wm8985_i2c_remove,
.id_table = wm8985_i2c_id
};
#endif
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index 1d4c5cf47b0..39b9acceb59 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -872,7 +872,7 @@ static struct regmap_config wm8988_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8988_spi_probe(struct spi_device *spi)
+static int wm8988_spi_probe(struct spi_device *spi)
{
struct wm8988_priv *wm8988;
int ret;
@@ -882,7 +882,7 @@ static int __devinit wm8988_spi_probe(struct spi_device *spi)
if (wm8988 == NULL)
return -ENOMEM;
- wm8988->regmap = regmap_init_spi(spi, &wm8988_regmap);
+ wm8988->regmap = devm_regmap_init_spi(spi, &wm8988_regmap);
if (IS_ERR(wm8988->regmap)) {
ret = PTR_ERR(wm8988->regmap);
dev_err(&spi->dev, "Failed to init regmap: %d\n", ret);
@@ -893,17 +893,12 @@ static int __devinit wm8988_spi_probe(struct spi_device *spi)
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8988, &wm8988_dai, 1);
- if (ret != 0)
- regmap_exit(wm8988->regmap);
-
return ret;
}
-static int __devexit wm8988_spi_remove(struct spi_device *spi)
+static int wm8988_spi_remove(struct spi_device *spi)
{
- struct wm8988_priv *wm8988 = spi_get_drvdata(spi);
snd_soc_unregister_codec(&spi->dev);
- regmap_exit(wm8988->regmap);
return 0;
}
@@ -913,13 +908,13 @@ static struct spi_driver wm8988_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm8988_spi_probe,
- .remove = __devexit_p(wm8988_spi_remove),
+ .remove = wm8988_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8988_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8988_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8988_priv *wm8988;
int ret;
@@ -931,7 +926,7 @@ static __devinit int wm8988_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8988);
- wm8988->regmap = regmap_init_i2c(i2c, &wm8988_regmap);
+ wm8988->regmap = devm_regmap_init_i2c(i2c, &wm8988_regmap);
if (IS_ERR(wm8988->regmap)) {
ret = PTR_ERR(wm8988->regmap);
dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret);
@@ -940,17 +935,12 @@ static __devinit int wm8988_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8988, &wm8988_dai, 1);
- if (ret != 0)
- regmap_exit(wm8988->regmap);
-
return ret;
}
-static __devexit int wm8988_i2c_remove(struct i2c_client *client)
+static int wm8988_i2c_remove(struct i2c_client *client)
{
- struct wm8988_priv *wm8988 = i2c_get_clientdata(client);
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm8988->regmap);
return 0;
}
@@ -966,7 +956,7 @@ static struct i2c_driver wm8988_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8988_i2c_probe,
- .remove = __devexit_p(wm8988_i2c_remove),
+ .remove = wm8988_i2c_remove,
.id_table = wm8988_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index c28c83e5395..837978e16e9 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1382,8 +1382,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8990 = {
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8990_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8990_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8990_priv *wm8990;
int ret;
@@ -1401,7 +1401,7 @@ static __devinit int wm8990_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8990_i2c_remove(struct i2c_client *client)
+static int wm8990_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -1420,7 +1420,7 @@ static struct i2c_driver wm8990_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8990_i2c_probe,
- .remove = __devexit_p(wm8990_i2c_remove),
+ .remove = wm8990_i2c_remove,
.id_table = wm8990_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index fe439f027e1..3a39df7a382 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -1357,8 +1357,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8991 = {
.reg_cache_default = wm8991_reg_defs
};
-static __devinit int wm8991_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8991_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8991_priv *wm8991;
int ret;
@@ -1376,7 +1376,7 @@ static __devinit int wm8991_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static __devexit int wm8991_i2c_remove(struct i2c_client *client)
+static int wm8991_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
@@ -1395,7 +1395,7 @@ static struct i2c_driver wm8991_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8991_i2c_probe,
- .remove = __devexit_p(wm8991_i2c_remove),
+ .remove = wm8991_i2c_remove,
.id_table = wm8991_i2c_id,
};
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 94737a30716..433d59a0f3e 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1645,8 +1645,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8993 = {
.set_bias_level = wm8993_set_bias_level,
};
-static __devinit int wm8993_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8993_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8993_priv *wm8993;
unsigned int reg;
@@ -1660,7 +1660,7 @@ static __devinit int wm8993_i2c_probe(struct i2c_client *i2c,
wm8993->dev = &i2c->dev;
init_completion(&wm8993->fll_lock);
- wm8993->regmap = regmap_init_i2c(i2c, &wm8993_regmap);
+ wm8993->regmap = devm_regmap_init_i2c(i2c, &wm8993_regmap);
if (IS_ERR(wm8993->regmap)) {
ret = PTR_ERR(wm8993->regmap);
dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
@@ -1672,18 +1672,18 @@ static __devinit int wm8993_i2c_probe(struct i2c_client *i2c,
for (i = 0; i < ARRAY_SIZE(wm8993->supplies); i++)
wm8993->supplies[i].supply = wm8993_supply_names[i];
- ret = regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8993->supplies),
+ ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8993->supplies),
wm8993->supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
- goto err;
+ return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8993->supplies),
wm8993->supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
- goto err_get;
+ return ret;
}
ret = regmap_read(wm8993->regmap, WM8993_SOFTWARE_RESET, &reg);
@@ -1742,23 +1742,17 @@ err_irq:
free_irq(i2c->irq, wm8993);
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8993->supplies), wm8993->supplies);
-err_get:
- regulator_bulk_free(ARRAY_SIZE(wm8993->supplies), wm8993->supplies);
-err:
- regmap_exit(wm8993->regmap);
return ret;
}
-static __devexit int wm8993_i2c_remove(struct i2c_client *i2c)
+static int wm8993_i2c_remove(struct i2c_client *i2c)
{
struct wm8993_priv *wm8993 = i2c_get_clientdata(i2c);
snd_soc_unregister_codec(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm8993);
- regmap_exit(wm8993->regmap);
regulator_bulk_disable(ARRAY_SIZE(wm8993->supplies), wm8993->supplies);
- regulator_bulk_free(ARRAY_SIZE(wm8993->supplies), wm8993->supplies);
return 0;
}
@@ -1775,7 +1769,7 @@ static struct i2c_driver wm8993_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8993_i2c_probe,
- .remove = __devexit_p(wm8993_i2c_remove),
+ .remove = wm8993_i2c_remove,
.id_table = wm8993_i2c_id,
};
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index b2b2b37131b..3b269fa226b 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -91,8 +91,6 @@ static int wm8994_retune_mobile_base[] = {
WM8994_AIF2_EQ_GAINS_1,
};
-static void wm8958_default_micdet(u16 status, void *data);
-
static const struct wm8958_micd_rate micdet_rates[] = {
{ 32768, true, 1, 4 },
{ 32768, false, 1, 1 },
@@ -110,15 +108,12 @@ static const struct wm8958_micd_rate jackdet_rates[] = {
static void wm8958_micd_set_rate(struct snd_soc_codec *codec)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ struct wm8994 *control = wm8994->wm8994;
int best, i, sysclk, val;
bool idle;
const struct wm8958_micd_rate *rates;
int num_rates;
- if (!(wm8994->pdata && wm8994->pdata->micd_rates) &&
- wm8994->jack_cb != wm8958_default_micdet)
- return;
-
idle = !wm8994->jack_mic;
sysclk = snd_soc_read(codec, WM8994_CLOCKING_1);
@@ -127,9 +122,9 @@ static void wm8958_micd_set_rate(struct snd_soc_codec *codec)
else
sysclk = wm8994->aifclk[0];
- if (wm8994->pdata && wm8994->pdata->micd_rates) {
- rates = wm8994->pdata->micd_rates;
- num_rates = wm8994->pdata->num_micd_rates;
+ if (control->pdata.micd_rates) {
+ rates = control->pdata.micd_rates;
+ num_rates = control->pdata.num_micd_rates;
} else if (wm8994->jackdet) {
rates = jackdet_rates;
num_rates = ARRAY_SIZE(jackdet_rates);
@@ -326,7 +321,8 @@ static int wm8994_put_drc_sw(struct snd_kcontrol *kcontrol,
static void wm8994_set_drc(struct snd_soc_codec *codec, int drc)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
+ struct wm8994_pdata *pdata = &control->pdata;
int base = wm8994_drc_base[drc];
int cfg = wm8994->drc_cfg[drc];
int save, i;
@@ -362,7 +358,8 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
+ struct wm8994_pdata *pdata = &control->pdata;
int drc = wm8994_get_drc(kcontrol->id.name);
int value = ucontrol->value.integer.value[0];
@@ -394,7 +391,8 @@ static int wm8994_get_drc_enum(struct snd_kcontrol *kcontrol,
static void wm8994_set_retune_mobile(struct snd_soc_codec *codec, int block)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
+ struct wm8994_pdata *pdata = &control->pdata;
int base = wm8994_retune_mobile_base[block];
int iface, best, best_val, save, i, cfg;
@@ -465,7 +463,8 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
+ struct wm8994_pdata *pdata = &control->pdata;
int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
int value = ucontrol->value.integer.value[0];
@@ -736,7 +735,7 @@ static void wm1811_jackdet_set_mode(struct snd_soc_codec *codec, u16 mode)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- if (!wm8994->jackdet || !wm8994->jack_cb)
+ if (!wm8994->jackdet || !wm8994->micdet[0].jack)
return;
if (wm8994->active_refcount)
@@ -862,7 +861,7 @@ static void vmid_reference(struct snd_soc_codec *codec)
WM8994_BIAS_SRC |
WM8994_STARTUP_BIAS_ENA |
WM8994_VMID_BUF_ENA |
- (0x3 << WM8994_VMID_RAMP_SHIFT));
+ (0x2 << WM8994_VMID_RAMP_SHIFT));
/* Main bias enable, VMID=2x40k */
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
@@ -870,7 +869,7 @@ static void vmid_reference(struct snd_soc_codec *codec)
WM8994_VMID_SEL_MASK,
WM8994_BIAS_ENA | 0x2);
- msleep(50);
+ msleep(300);
snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
WM8994_VMID_RAMP_MASK |
@@ -939,16 +938,10 @@ static void vmid_dereference(struct snd_soc_codec *codec)
WM8994_BIAS_SRC |
WM8994_VMID_DISCH);
- switch (wm8994->vmid_mode) {
- case WM8994_VMID_FORCE:
- msleep(350);
- break;
- default:
- break;
- }
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
+ WM8994_VMID_SEL_MASK, 0);
- snd_soc_update_bits(codec, WM8994_ADDITIONAL_CONTROL,
- WM8994_VROI, WM8994_VROI);
+ msleep(400);
/* Active discharge */
snd_soc_update_bits(codec, WM8994_ANTIPOP_1,
@@ -957,17 +950,12 @@ static void vmid_dereference(struct snd_soc_codec *codec)
WM8994_LINEOUT1_DISCH |
WM8994_LINEOUT2_DISCH);
- msleep(150);
-
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_3,
WM8994_LINEOUT1N_ENA |
WM8994_LINEOUT1P_ENA |
WM8994_LINEOUT2N_ENA |
WM8994_LINEOUT2P_ENA, 0);
- snd_soc_update_bits(codec, WM8994_ADDITIONAL_CONTROL,
- WM8994_VROI, 0);
-
/* Switch off startup biases */
snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
WM8994_BIAS_SRC |
@@ -976,10 +964,7 @@ static void vmid_dereference(struct snd_soc_codec *codec)
WM8994_VMID_RAMP_MASK, 0);
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
- WM8994_BIAS_ENA | WM8994_VMID_SEL_MASK, 0);
-
- snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
- WM8994_VMID_RAMP_MASK, 0);
+ WM8994_VMID_SEL_MASK, 0);
}
pm_runtime_put(codec->dev);
@@ -2277,6 +2262,18 @@ out:
configure_clock(codec);
+ /*
+ * If SYSCLK will be less than 50kHz adjust AIFnCLK dividers
+ * for detection.
+ */
+ if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000) {
+ dev_dbg(codec->dev, "Configuring AIFs for 128fs\n");
+ snd_soc_update_bits(codec, WM8994_AIF1_RATE,
+ WM8994_AIF1CLK_RATE_MASK, 0x1);
+ snd_soc_update_bits(codec, WM8994_AIF2_RATE,
+ WM8994_AIF2CLK_RATE_MASK, 0x1);
+ }
+
return 0;
}
@@ -2365,6 +2362,18 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
configure_clock(codec);
+ /*
+ * If SYSCLK will be less than 50kHz adjust AIFnCLK dividers
+ * for detection.
+ */
+ if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000) {
+ dev_dbg(codec->dev, "Configuring AIFs for 128fs\n");
+ snd_soc_update_bits(codec, WM8994_AIF1_RATE,
+ WM8994_AIF1CLK_RATE_MASK, 0x1);
+ snd_soc_update_bits(codec, WM8994_AIF2_RATE,
+ WM8994_AIF2CLK_RATE_MASK, 0x1);
+ }
+
return 0;
}
@@ -3082,7 +3091,8 @@ static int wm8994_codec_resume(struct snd_soc_codec *codec)
static void wm8994_handle_retune_mobile_pdata(struct wm8994_priv *wm8994)
{
struct snd_soc_codec *codec = wm8994->hubs.codec;
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
+ struct wm8994_pdata *pdata = &control->pdata;
struct snd_kcontrol_new controls[] = {
SOC_ENUM_EXT("AIF1.1 EQ Mode",
wm8994->retune_mobile_enum,
@@ -3149,7 +3159,8 @@ static void wm8994_handle_retune_mobile_pdata(struct wm8994_priv *wm8994)
static void wm8994_handle_pdata(struct wm8994_priv *wm8994)
{
struct snd_soc_codec *codec = wm8994->hubs.codec;
- struct wm8994_pdata *pdata = wm8994->pdata;
+ struct wm8994 *control = wm8994->wm8994;
+ struct wm8994_pdata *pdata = &control->pdata;
int ret, i;
if (!pdata)
@@ -3389,38 +3400,80 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
return IRQ_HANDLED;
}
-/* Default microphone detection handler for WM8958 - the user can
- * override this if they wish.
- */
-static void wm8958_default_micdet(u16 status, void *data)
+static void wm1811_micd_stop(struct snd_soc_codec *codec)
+{
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+ if (!wm8994->jackdet)
+ return;
+
+ mutex_lock(&wm8994->accdet_lock);
+
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0);
+
+ wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_JACK);
+
+ mutex_unlock(&wm8994->accdet_lock);
+
+ if (wm8994->wm8994->pdata.jd_ext_cap)
+ snd_soc_dapm_disable_pin(&codec->dapm,
+ "MICBIAS2");
+}
+
+static void wm8958_button_det(struct snd_soc_codec *codec, u16 status)
{
- struct snd_soc_codec *codec = data;
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
int report;
- dev_dbg(codec->dev, "MICDET %x\n", status);
+ report = 0;
+ if (status & 0x4)
+ report |= SND_JACK_BTN_0;
+
+ if (status & 0x8)
+ report |= SND_JACK_BTN_1;
+
+ if (status & 0x10)
+ report |= SND_JACK_BTN_2;
+
+ if (status & 0x20)
+ report |= SND_JACK_BTN_3;
+
+ if (status & 0x40)
+ report |= SND_JACK_BTN_4;
+
+ if (status & 0x80)
+ report |= SND_JACK_BTN_5;
+
+ snd_soc_jack_report(wm8994->micdet[0].jack, report,
+ wm8994->btn_mask);
+}
+
+static void wm8958_mic_id(void *data, u16 status)
+{
+ struct snd_soc_codec *codec = data;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
/* Either nothing present or just starting detection */
if (!(status & WM8958_MICD_STS)) {
- if (!wm8994->jackdet) {
- /* If nothing present then clear our statuses */
- dev_dbg(codec->dev, "Detected open circuit\n");
- wm8994->jack_mic = false;
- wm8994->mic_detecting = true;
+ /* If nothing present then clear our statuses */
+ dev_dbg(codec->dev, "Detected open circuit\n");
+ wm8994->jack_mic = false;
+ wm8994->mic_detecting = true;
- wm8958_micd_set_rate(codec);
+ wm1811_micd_stop(codec);
- snd_soc_jack_report(wm8994->micdet[0].jack, 0,
- wm8994->btn_mask |
- SND_JACK_HEADSET);
- }
+ wm8958_micd_set_rate(codec);
+
+ snd_soc_jack_report(wm8994->micdet[0].jack, 0,
+ wm8994->btn_mask |
+ SND_JACK_HEADSET);
return;
}
/* If the measurement is showing a high impedence we've got a
* microphone.
*/
- if (wm8994->mic_detecting && (status & 0x600)) {
+ if (status & 0x600) {
dev_dbg(codec->dev, "Detected microphone\n");
wm8994->mic_detecting = false;
@@ -3433,64 +3486,67 @@ static void wm8958_default_micdet(u16 status, void *data)
}
- if (wm8994->mic_detecting && status & 0xfc) {
+ if (status & 0xfc) {
dev_dbg(codec->dev, "Detected headphone\n");
wm8994->mic_detecting = false;
wm8958_micd_set_rate(codec);
/* If we have jackdet that will detect removal */
- if (wm8994->jackdet) {
- mutex_lock(&wm8994->accdet_lock);
-
- snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
- WM8958_MICD_ENA, 0);
-
- wm1811_jackdet_set_mode(codec,
- WM1811_JACKDET_MODE_JACK);
-
- mutex_unlock(&wm8994->accdet_lock);
-
- if (wm8994->pdata->jd_ext_cap)
- snd_soc_dapm_disable_pin(&codec->dapm,
- "MICBIAS2");
- }
+ wm1811_micd_stop(codec);
snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE,
SND_JACK_HEADSET);
}
+}
- /* Report short circuit as a button */
- if (wm8994->jack_mic) {
- report = 0;
- if (status & 0x4)
- report |= SND_JACK_BTN_0;
+/* Deferred mic detection to allow for extra settling time */
+static void wm1811_mic_work(struct work_struct *work)
+{
+ struct wm8994_priv *wm8994 = container_of(work, struct wm8994_priv,
+ mic_work.work);
+ struct wm8994 *control = wm8994->wm8994;
+ struct snd_soc_codec *codec = wm8994->hubs.codec;
- if (status & 0x8)
- report |= SND_JACK_BTN_1;
+ pm_runtime_get_sync(codec->dev);
- if (status & 0x10)
- report |= SND_JACK_BTN_2;
+ /* If required for an external cap force MICBIAS on */
+ if (control->pdata.jd_ext_cap) {
+ snd_soc_dapm_force_enable_pin(&codec->dapm,
+ "MICBIAS2");
+ snd_soc_dapm_sync(&codec->dapm);
+ }
- if (status & 0x20)
- report |= SND_JACK_BTN_3;
+ mutex_lock(&wm8994->accdet_lock);
- if (status & 0x40)
- report |= SND_JACK_BTN_4;
+ dev_dbg(codec->dev, "Starting mic detection\n");
- if (status & 0x80)
- report |= SND_JACK_BTN_5;
+ /* Use a user-supplied callback if we have one */
+ if (wm8994->micd_cb) {
+ wm8994->micd_cb(wm8994->micd_cb_data);
+ } else {
+ /*
+ * Start off measument of microphone impedence to find out
+ * what's actually there.
+ */
+ wm8994->mic_detecting = true;
+ wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_MIC);
- snd_soc_jack_report(wm8994->micdet[0].jack, report,
- wm8994->btn_mask);
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+ WM8958_MICD_ENA, WM8958_MICD_ENA);
}
+
+ mutex_unlock(&wm8994->accdet_lock);
+
+ pm_runtime_put(codec->dev);
}
static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
{
struct wm8994_priv *wm8994 = data;
+ struct wm8994 *control = wm8994->wm8994;
struct snd_soc_codec *codec = wm8994->hubs.codec;
- int reg;
+ int reg, delay;
bool present;
pm_runtime_get_sync(codec->dev);
@@ -3521,18 +3577,14 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
snd_soc_update_bits(codec, WM1811_JACKDET_CTRL,
WM1811_JACKDET_DB, 0);
- /*
- * Start off measument of microphone impedence to find
- * out what's actually there.
- */
- wm8994->mic_detecting = true;
- wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_MIC);
-
- snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
- WM8958_MICD_ENA, WM8958_MICD_ENA);
+ delay = control->pdata.micdet_delay;
+ schedule_delayed_work(&wm8994->mic_work,
+ msecs_to_jiffies(delay));
} else {
dev_dbg(codec->dev, "Jack not detected\n");
+ cancel_delayed_work_sync(&wm8994->mic_work);
+
snd_soc_update_bits(codec, WM8958_MICBIAS2,
WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
@@ -3549,14 +3601,9 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
mutex_unlock(&wm8994->accdet_lock);
- /* If required for an external cap force MICBIAS on */
- if (wm8994->pdata->jd_ext_cap) {
- if (present)
- snd_soc_dapm_force_enable_pin(&codec->dapm,
- "MICBIAS2");
- else
- snd_soc_dapm_disable_pin(&codec->dapm, "MICBIAS2");
- }
+ /* Turn off MICBIAS if it was on for an external cap */
+ if (control->pdata.jd_ext_cap && !present)
+ snd_soc_dapm_disable_pin(&codec->dapm, "MICBIAS2");
if (present)
snd_soc_jack_report(wm8994->micdet[0].jack,
@@ -3599,7 +3646,8 @@ static void wm1811_jackdet_bootstrap(struct work_struct *work)
* detection algorithm.
*/
int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
- wm8958_micdet_cb cb, void *cb_data)
+ wm1811_micdet_cb det_cb, void *det_cb_data,
+ wm1811_mic_id_cb id_cb, void *id_cb_data)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
@@ -3614,27 +3662,32 @@ int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
}
if (jack) {
- if (!cb) {
- dev_dbg(codec->dev, "Using default micdet callback\n");
- cb = wm8958_default_micdet;
- cb_data = codec;
- }
-
snd_soc_dapm_force_enable_pin(&codec->dapm, "CLK_SYS");
snd_soc_dapm_sync(&codec->dapm);
wm8994->micdet[0].jack = jack;
- wm8994->jack_cb = cb;
- wm8994->jack_cb_data = cb_data;
- wm8994->mic_detecting = true;
- wm8994->jack_mic = false;
+ if (det_cb) {
+ wm8994->micd_cb = det_cb;
+ wm8994->micd_cb_data = det_cb_data;
+ } else {
+ wm8994->mic_detecting = true;
+ wm8994->jack_mic = false;
+ }
+
+ if (id_cb) {
+ wm8994->mic_id_cb = id_cb;
+ wm8994->mic_id_cb_data = id_cb_data;
+ } else {
+ wm8994->mic_id_cb = wm8958_mic_id;
+ wm8994->mic_id_cb_data = codec;
+ }
wm8958_micd_set_rate(codec);
/* Detect microphones and short circuits by default */
- if (wm8994->pdata->micd_lvl_sel)
- micd_lvl_sel = wm8994->pdata->micd_lvl_sel;
+ if (control->pdata.micd_lvl_sel)
+ micd_lvl_sel = control->pdata.micd_lvl_sel;
else
micd_lvl_sel = 0x41;
@@ -3728,10 +3781,22 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
trace_snd_soc_jack_irq(dev_name(codec->dev));
#endif
- if (wm8994->jack_cb)
- wm8994->jack_cb(reg, wm8994->jack_cb_data);
+ /* Avoid a transient report when the accessory is being removed */
+ if (wm8994->jackdet) {
+ reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
+ if (reg < 0) {
+ dev_err(codec->dev, "Failed to read jack status: %d\n",
+ reg);
+ } else if (!(reg & WM1811_JACKDET_LVL)) {
+ dev_dbg(codec->dev, "Ignoring removed jack\n");
+ return IRQ_HANDLED;
+ }
+ }
+
+ if (wm8994->mic_detecting)
+ wm8994->mic_id_cb(wm8994->mic_id_cb_data, reg);
else
- dev_warn(codec->dev, "Accessory detection with no callback\n");
+ wm8958_button_det(codec, reg);
out:
pm_runtime_put(codec->dev);
@@ -3779,15 +3844,24 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP);
mutex_init(&wm8994->accdet_lock);
- INIT_DELAYED_WORK(&wm8994->mic_work, wm8994_mic_work);
INIT_DELAYED_WORK(&wm8994->jackdet_bootstrap,
wm1811_jackdet_bootstrap);
+ switch (control->type) {
+ case WM8994:
+ INIT_DELAYED_WORK(&wm8994->mic_work, wm8994_mic_work);
+ break;
+ case WM1811:
+ INIT_DELAYED_WORK(&wm8994->mic_work, wm1811_mic_work);
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < ARRAY_SIZE(wm8994->fll_locked); i++)
init_completion(&wm8994->fll_locked[i]);
- if (wm8994->pdata && wm8994->pdata->micdet_irq)
- wm8994->micdet_irq = wm8994->pdata->micdet_irq;
+ wm8994->micdet_irq = control->pdata.micdet_irq;
pm_runtime_enable(codec->dev);
pm_runtime_idle(codec->dev);
@@ -3800,8 +3874,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
switch (control->type) {
case WM8994:
/* Single ended line outputs should have VMID on. */
- if (!wm8994->pdata->lineout1_diff ||
- !wm8994->pdata->lineout2_diff)
+ if (!control->pdata.lineout1_diff ||
+ !control->pdata.lineout2_diff)
codec->dapm.idle_bias_off = 0;
switch (wm8994->revision) {
@@ -3839,20 +3913,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
wm8994->hubs.no_cache_dac_hp_direct = true;
wm8994->fll_byp = true;
- switch (control->cust_id) {
- case 0:
- case 2:
- wm8994->hubs.dcs_codes_l = -9;
- wm8994->hubs.dcs_codes_r = -7;
- break;
- case 1:
- case 3:
- wm8994->hubs.dcs_codes_l = -8;
- wm8994->hubs.dcs_codes_r = -7;
- break;
- default:
- break;
- }
+ wm8994->hubs.dcs_codes_l = -9;
+ wm8994->hubs.dcs_codes_r = -7;
snd_soc_update_bits(codec, WM8994_ANALOGUE_HP_1,
WM1811_HPOUT1_ATTN, WM1811_HPOUT1_ATTN);
@@ -4225,7 +4287,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
.set_bias_level = wm8994_set_bias_level,
};
-static int __devinit wm8994_probe(struct platform_device *pdev)
+static int wm8994_probe(struct platform_device *pdev)
{
struct wm8994_priv *wm8994;
@@ -4236,13 +4298,12 @@ static int __devinit wm8994_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wm8994);
wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent);
- wm8994->pdata = dev_get_platdata(pdev->dev.parent);
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8994,
wm8994_dai, ARRAY_SIZE(wm8994_dai));
}
-static int __devexit wm8994_remove(struct platform_device *pdev)
+static int wm8994_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -4266,7 +4327,7 @@ static int wm8994_resume(struct device *dev)
{
struct wm8994_priv *wm8994 = dev_get_drvdata(dev);
- if (wm8994->jackdet && wm8994->jack_cb)
+ if (wm8994->jackdet && wm8994->jackdet_mode)
regmap_update_bits(wm8994->wm8994->regmap, WM8994_ANTIPOP_2,
WM1811_JACKDET_MODE_MASK,
WM1811_JACKDET_MODE_AUDIO);
@@ -4286,7 +4347,7 @@ static struct platform_driver wm8994_codec_driver = {
.pm = &wm8994_pm_ops,
},
.probe = wm8994_probe,
- .remove = __devexit_p(wm8994_remove),
+ .remove = wm8994_remove,
};
module_platform_driver(wm8994_codec_driver);
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index ccbce5791e9..45f19270202 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -39,12 +39,14 @@ enum wm8994_vmid_mode {
WM8994_VMID_FORCE,
};
-typedef void (*wm8958_micdet_cb)(u16 status, void *data);
+typedef void (*wm1811_micdet_cb)(void *data);
+typedef void (*wm1811_mic_id_cb)(void *data, u16 status);
int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
int micbias);
int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
- wm8958_micdet_cb cb, void *cb_data);
+ wm1811_micdet_cb cb, void *det_cb_data,
+ wm1811_mic_id_cb id_cb, void *id_cb_data);
int wm8994_vmid_mode(struct snd_soc_codec *codec, enum wm8994_vmid_mode mode);
@@ -138,12 +140,13 @@ struct wm8994_priv {
int jackdet_mode;
struct delayed_work jackdet_bootstrap;
- wm8958_micdet_cb jack_cb;
- void *jack_cb_data;
int micdet_irq;
+ wm1811_micdet_cb micd_cb;
+ void *micd_cb_data;
+ wm1811_mic_id_cb mic_id_cb;
+ void *mic_id_cb_data;
int revision;
- struct wm8994_pdata *pdata;
unsigned int aif1clk_enable:1;
unsigned int aif2clk_enable:1;
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 28c89b094c6..90a65c42754 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2256,46 +2256,33 @@ static struct regmap_config wm8995_regmap = {
};
#if defined(CONFIG_SPI_MASTER)
-static int __devinit wm8995_spi_probe(struct spi_device *spi)
+static int wm8995_spi_probe(struct spi_device *spi)
{
struct wm8995_priv *wm8995;
int ret;
- wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+ wm8995 = devm_kzalloc(&spi->dev, sizeof(*wm8995), GFP_KERNEL);
if (!wm8995)
return -ENOMEM;
spi_set_drvdata(spi, wm8995);
- wm8995->regmap = regmap_init_spi(spi, &wm8995_regmap);
+ wm8995->regmap = devm_regmap_init_spi(spi, &wm8995_regmap);
if (IS_ERR(wm8995->regmap)) {
ret = PTR_ERR(wm8995->regmap);
dev_err(&spi->dev, "Failed to register regmap: %d\n", ret);
- goto err_alloc;
+ return ret;
}
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8995, wm8995_dai,
ARRAY_SIZE(wm8995_dai));
- if (ret < 0)
- goto err_regmap;
-
- return ret;
-
-err_regmap:
- regmap_exit(wm8995->regmap);
-err_alloc:
- kfree(wm8995);
-
return ret;
}
-static int __devexit wm8995_spi_remove(struct spi_device *spi)
+static int wm8995_spi_remove(struct spi_device *spi)
{
- struct wm8995_priv *wm8995 = spi_get_drvdata(spi);
snd_soc_unregister_codec(&spi->dev);
- regmap_exit(wm8995->regmap);
- kfree(wm8995);
return 0;
}
@@ -2305,55 +2292,42 @@ static struct spi_driver wm8995_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm8995_spi_probe,
- .remove = __devexit_p(wm8995_spi_remove)
+ .remove = wm8995_spi_remove
};
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm8995_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8995_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8995_priv *wm8995;
int ret;
- wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+ wm8995 = devm_kzalloc(&i2c->dev, sizeof(*wm8995), GFP_KERNEL);
if (!wm8995)
return -ENOMEM;
i2c_set_clientdata(i2c, wm8995);
- wm8995->regmap = regmap_init_i2c(i2c, &wm8995_regmap);
+ wm8995->regmap = devm_regmap_init_i2c(i2c, &wm8995_regmap);
if (IS_ERR(wm8995->regmap)) {
ret = PTR_ERR(wm8995->regmap);
dev_err(&i2c->dev, "Failed to register regmap: %d\n", ret);
- goto err_alloc;
+ return ret;
}
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8995, wm8995_dai,
ARRAY_SIZE(wm8995_dai));
- if (ret < 0) {
+ if (ret < 0)
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
- goto err_regmap;
- }
-
- return ret;
-
-err_regmap:
- regmap_exit(wm8995->regmap);
-err_alloc:
- kfree(wm8995);
return ret;
}
-static __devexit int wm8995_i2c_remove(struct i2c_client *client)
+static int wm8995_i2c_remove(struct i2c_client *client)
{
- struct wm8995_priv *wm8995 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm8995->regmap);
- kfree(wm8995);
return 0;
}
@@ -2370,7 +2344,7 @@ static struct i2c_driver wm8995_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8995_i2c_probe,
- .remove = __devexit_p(wm8995_i2c_remove),
+ .remove = wm8995_i2c_remove,
.id_table = wm8995_i2c_id
};
#endif
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 6dcb02c3666..46fe83d2b22 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -2765,8 +2765,8 @@ static struct snd_soc_dai_driver wm8996_dai[] = {
},
};
-static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm8996_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm8996_priv *wm8996;
int ret, i;
@@ -3077,7 +3077,7 @@ err:
return ret;
}
-static __devexit int wm8996_i2c_remove(struct i2c_client *client)
+static int wm8996_i2c_remove(struct i2c_client *client)
{
struct wm8996_priv *wm8996 = i2c_get_clientdata(client);
int i;
@@ -3107,7 +3107,7 @@ static struct i2c_driver wm8996_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm8996_i2c_probe,
- .remove = __devexit_p(wm8996_i2c_remove),
+ .remove = wm8996_i2c_remove,
.id_table = wm8996_i2c_id,
};
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 2de74e1ea22..630b3d776ec 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1327,8 +1327,8 @@ static const struct regmap_config wm9081_regmap = {
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int wm9081_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct wm9081_priv *wm9081;
unsigned int reg;
@@ -1341,28 +1341,27 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm9081);
- wm9081->regmap = regmap_init_i2c(i2c, &wm9081_regmap);
+ wm9081->regmap = devm_regmap_init_i2c(i2c, &wm9081_regmap);
if (IS_ERR(wm9081->regmap)) {
ret = PTR_ERR(wm9081->regmap);
dev_err(&i2c->dev, "regmap_init() failed: %d\n", ret);
- goto err;
+ return ret;
}
ret = regmap_read(wm9081->regmap, WM9081_SOFTWARE_RESET, &reg);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to read chip ID: %d\n", ret);
- goto err_regmap;
+ return ret;
}
if (reg != 0x9081) {
dev_err(&i2c->dev, "Device is not a WM9081: ID=0x%x\n", reg);
- ret = -EINVAL;
- goto err_regmap;
+ return -EINVAL;
}
ret = wm9081_reset(wm9081->regmap);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to issue reset\n");
- goto err_regmap;
+ return ret;
}
if (dev_get_platdata(&i2c->dev))
@@ -1382,23 +1381,14 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm9081, &wm9081_dai, 1);
if (ret < 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(wm9081->regmap);
-err:
-
- return ret;
}
-static __devexit int wm9081_i2c_remove(struct i2c_client *client)
+static int wm9081_i2c_remove(struct i2c_client *client)
{
- struct wm9081_priv *wm9081 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm9081->regmap);
return 0;
}
@@ -1414,7 +1404,7 @@ static struct i2c_driver wm9081_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm9081_i2c_probe,
- .remove = __devexit_p(wm9081_i2c_remove),
+ .remove = wm9081_i2c_remove,
.id_table = wm9081_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index c7ddc56175d..a07fe1618ee 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -628,7 +628,7 @@ static int wm9090_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
}
- wm9090->regmap = regmap_init_i2c(i2c, &wm9090_regmap);
+ wm9090->regmap = devm_regmap_init_i2c(i2c, &wm9090_regmap);
if (IS_ERR(wm9090->regmap)) {
ret = PTR_ERR(wm9090->regmap);
dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
@@ -637,16 +637,16 @@ static int wm9090_i2c_probe(struct i2c_client *i2c,
ret = regmap_read(wm9090->regmap, WM9090_SOFTWARE_RESET, &reg);
if (ret < 0)
- goto err;
+ return ret;
+
if (reg != 0x9093) {
dev_err(&i2c->dev, "Device is not a WM9090, ID=%x\n", reg);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
ret = regmap_write(wm9090->regmap, WM9090_SOFTWARE_RESET, 0);
if (ret < 0)
- goto err;
+ return ret;
if (i2c->dev.platform_data)
memcpy(&wm9090->pdata, i2c->dev.platform_data,
@@ -658,23 +658,15 @@ static int wm9090_i2c_probe(struct i2c_client *i2c,
&soc_codec_dev_wm9090, NULL, 0);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
- goto err;
+ return ret;
}
return 0;
-
-err:
- regmap_exit(wm9090->regmap);
- return ret;
}
-static int __devexit wm9090_i2c_remove(struct i2c_client *i2c)
+static int wm9090_i2c_remove(struct i2c_client *i2c)
{
- struct wm9090_priv *wm9090 = i2c_get_clientdata(i2c);
-
snd_soc_unregister_codec(&i2c->dev);
- regmap_exit(wm9090->regmap);
-
return 0;
}
@@ -691,7 +683,7 @@ static struct i2c_driver wm9090_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = wm9090_i2c_probe,
- .remove = __devexit_p(wm9090_i2c_remove),
+ .remove = wm9090_i2c_remove,
.id_table = wm9090_id,
};
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index e8e782a0c78..05b1f346695 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -382,13 +382,13 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
.num_dapm_routes = ARRAY_SIZE(wm9705_audio_map),
};
-static __devinit int wm9705_probe(struct platform_device *pdev)
+static int wm9705_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm9705, wm9705_dai, ARRAY_SIZE(wm9705_dai));
}
-static int __devexit wm9705_remove(struct platform_device *pdev)
+static int wm9705_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -401,7 +401,7 @@ static struct platform_driver wm9705_codec_driver = {
},
.probe = wm9705_probe,
- .remove = __devexit_p(wm9705_remove),
+ .remove = wm9705_remove,
};
module_platform_driver(wm9705_codec_driver);
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 4dd73ea08d0..8e9a6a3eeb1 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -685,13 +685,13 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
.num_dapm_routes = ARRAY_SIZE(wm9712_audio_map),
};
-static __devinit int wm9712_probe(struct platform_device *pdev)
+static int wm9712_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm9712, wm9712_dai, ARRAY_SIZE(wm9712_dai));
}
-static int __devexit wm9712_remove(struct platform_device *pdev)
+static int wm9712_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -704,7 +704,7 @@ static struct platform_driver wm9712_codec_driver = {
},
.probe = wm9712_probe,
- .remove = __devexit_p(wm9712_remove),
+ .remove = wm9712_remove,
};
module_platform_driver(wm9712_codec_driver);
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 3eb19fb71d1..f7afa68d8c7 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1254,13 +1254,13 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
.num_dapm_routes = ARRAY_SIZE(wm9713_audio_map),
};
-static __devinit int wm9713_probe(struct platform_device *pdev)
+static int wm9713_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm9713, wm9713_dai, ARRAY_SIZE(wm9713_dai));
}
-static int __devexit wm9713_remove(struct platform_device *pdev)
+static int wm9713_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
@@ -1273,7 +1273,7 @@ static struct platform_driver wm9713_codec_driver = {
},
.probe = wm9713_probe,
- .remove = __devexit_p(wm9713_remove),
+ .remove = wm9713_remove,
};
module_platform_driver(wm9713_codec_driver);
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
new file mode 100644
index 00000000000..ffc89fab96f
--- /dev/null
+++ b/sound/soc/codecs/wm_adsp.c
@@ -0,0 +1,699 @@
+/*
+ * wm_adsp.c -- Wolfson ADSP support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include <linux/mfd/arizona/registers.h>
+
+#include "wm_adsp.h"
+
+#define adsp_crit(_dsp, fmt, ...) \
+ dev_crit(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+#define adsp_err(_dsp, fmt, ...) \
+ dev_err(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+#define adsp_warn(_dsp, fmt, ...) \
+ dev_warn(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+#define adsp_info(_dsp, fmt, ...) \
+ dev_info(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+#define adsp_dbg(_dsp, fmt, ...) \
+ dev_dbg(_dsp->dev, "DSP%d: " fmt, _dsp->num, ##__VA_ARGS__)
+
+#define ADSP1_CONTROL_1 0x00
+#define ADSP1_CONTROL_2 0x02
+#define ADSP1_CONTROL_3 0x03
+#define ADSP1_CONTROL_4 0x04
+#define ADSP1_CONTROL_5 0x06
+#define ADSP1_CONTROL_6 0x07
+#define ADSP1_CONTROL_7 0x08
+#define ADSP1_CONTROL_8 0x09
+#define ADSP1_CONTROL_9 0x0A
+#define ADSP1_CONTROL_10 0x0B
+#define ADSP1_CONTROL_11 0x0C
+#define ADSP1_CONTROL_12 0x0D
+#define ADSP1_CONTROL_13 0x0F
+#define ADSP1_CONTROL_14 0x10
+#define ADSP1_CONTROL_15 0x11
+#define ADSP1_CONTROL_16 0x12
+#define ADSP1_CONTROL_17 0x13
+#define ADSP1_CONTROL_18 0x14
+#define ADSP1_CONTROL_19 0x16
+#define ADSP1_CONTROL_20 0x17
+#define ADSP1_CONTROL_21 0x18
+#define ADSP1_CONTROL_22 0x1A
+#define ADSP1_CONTROL_23 0x1B
+#define ADSP1_CONTROL_24 0x1C
+#define ADSP1_CONTROL_25 0x1E
+#define ADSP1_CONTROL_26 0x20
+#define ADSP1_CONTROL_27 0x21
+#define ADSP1_CONTROL_28 0x22
+#define ADSP1_CONTROL_29 0x23
+#define ADSP1_CONTROL_30 0x24
+#define ADSP1_CONTROL_31 0x26
+
+/*
+ * ADSP1 Control 19
+ */
+#define ADSP1_WDMA_BUFFER_LENGTH_MASK 0x00FF /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_SHIFT 0 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_WIDTH 8 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+
+
+/*
+ * ADSP1 Control 30
+ */
+#define ADSP1_DBG_CLK_ENA 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_MASK 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_SHIFT 3 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_WIDTH 1 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP1_START 0x0001 /* DSP1_START */
+#define ADSP1_START_MASK 0x0001 /* DSP1_START */
+#define ADSP1_START_SHIFT 0 /* DSP1_START */
+#define ADSP1_START_WIDTH 1 /* DSP1_START */
+
+#define ADSP2_CONTROL 0
+#define ADSP2_CLOCKING 1
+#define ADSP2_STATUS1 4
+
+/*
+ * ADSP2 Control
+ */
+
+#define ADSP2_MEM_ENA 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */
+#define ADSP2_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP2_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP2_START 0x0001 /* DSP1_START */
+#define ADSP2_START_MASK 0x0001 /* DSP1_START */
+#define ADSP2_START_SHIFT 0 /* DSP1_START */
+#define ADSP2_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * ADSP2 clocking
+ */
+#define ADSP2_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+/*
+ * ADSP2 Status 1
+ */
+#define ADSP2_RAM_RDY 0x0001
+#define ADSP2_RAM_RDY_MASK 0x0001
+#define ADSP2_RAM_RDY_SHIFT 0
+#define ADSP2_RAM_RDY_WIDTH 1
+
+
+static struct wm_adsp_region const *wm_adsp_find_region(struct wm_adsp *dsp,
+ int type)
+{
+ int i;
+
+ for (i = 0; i < dsp->num_mems; i++)
+ if (dsp->mem[i].type == type)
+ return &dsp->mem[i];
+
+ return NULL;
+}
+
+static int wm_adsp_load(struct wm_adsp *dsp)
+{
+ const struct firmware *firmware;
+ struct regmap *regmap = dsp->regmap;
+ unsigned int pos = 0;
+ const struct wmfw_header *header;
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+ const struct wmfw_adsp2_sizes *adsp2_sizes;
+ const struct wmfw_footer *footer;
+ const struct wmfw_region *region;
+ const struct wm_adsp_region *mem;
+ const char *region_name;
+ char *file, *text;
+ unsigned int reg;
+ int regions = 0;
+ int ret, offset, type, sizes;
+
+ file = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (file == NULL)
+ return -ENOMEM;
+
+ snprintf(file, PAGE_SIZE, "%s-dsp%d.wmfw", dsp->part, dsp->num);
+ file[PAGE_SIZE - 1] = '\0';
+
+ ret = request_firmware(&firmware, file, dsp->dev);
+ if (ret != 0) {
+ adsp_err(dsp, "Failed to request '%s'\n", file);
+ goto out;
+ }
+ ret = -EINVAL;
+
+ pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+ if (pos >= firmware->size) {
+ adsp_err(dsp, "%s: file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ header = (void*)&firmware->data[0];
+
+ if (memcmp(&header->magic[0], "WMFW", 4) != 0) {
+ adsp_err(dsp, "%s: invalid magic\n", file);
+ goto out_fw;
+ }
+
+ if (header->ver != 0) {
+ adsp_err(dsp, "%s: unknown file format %d\n",
+ file, header->ver);
+ goto out_fw;
+ }
+
+ if (header->core != dsp->type) {
+ adsp_err(dsp, "%s: invalid core %d != %d\n",
+ file, header->core, dsp->type);
+ goto out_fw;
+ }
+
+ switch (dsp->type) {
+ case WMFW_ADSP1:
+ pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+ adsp1_sizes = (void *)&(header[1]);
+ footer = (void *)&(adsp1_sizes[1]);
+ sizes = sizeof(*adsp1_sizes);
+
+ adsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n",
+ file, le32_to_cpu(adsp1_sizes->dm),
+ le32_to_cpu(adsp1_sizes->pm),
+ le32_to_cpu(adsp1_sizes->zm));
+ break;
+
+ case WMFW_ADSP2:
+ pos = sizeof(*header) + sizeof(*adsp2_sizes) + sizeof(*footer);
+ adsp2_sizes = (void *)&(header[1]);
+ footer = (void *)&(adsp2_sizes[1]);
+ sizes = sizeof(*adsp2_sizes);
+
+ adsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n",
+ file, le32_to_cpu(adsp2_sizes->xm),
+ le32_to_cpu(adsp2_sizes->ym),
+ le32_to_cpu(adsp2_sizes->pm),
+ le32_to_cpu(adsp2_sizes->zm));
+ break;
+
+ default:
+ BUG_ON(NULL == "Unknown DSP type");
+ goto out_fw;
+ }
+
+ if (le32_to_cpu(header->len) != sizeof(*header) +
+ sizes + sizeof(*footer)) {
+ adsp_err(dsp, "%s: unexpected header length %d\n",
+ file, le32_to_cpu(header->len));
+ goto out_fw;
+ }
+
+ adsp_dbg(dsp, "%s: timestamp %llu\n", file,
+ le64_to_cpu(footer->timestamp));
+
+ while (pos < firmware->size &&
+ pos - firmware->size > sizeof(*region)) {
+ region = (void *)&(firmware->data[pos]);
+ region_name = "Unknown";
+ reg = 0;
+ text = NULL;
+ offset = le32_to_cpu(region->offset) & 0xffffff;
+ type = be32_to_cpu(region->type) & 0xff;
+ mem = wm_adsp_find_region(dsp, type);
+
+ switch (type) {
+ case WMFW_NAME_TEXT:
+ region_name = "Firmware name";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_INFO_TEXT:
+ region_name = "Information";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_ABSOLUTE:
+ region_name = "Absolute";
+ reg = offset;
+ break;
+ case WMFW_ADSP1_PM:
+ BUG_ON(!mem);
+ region_name = "PM";
+ reg = mem->base + (offset * 3);
+ break;
+ case WMFW_ADSP1_DM:
+ BUG_ON(!mem);
+ region_name = "DM";
+ reg = mem->base + (offset * 2);
+ break;
+ case WMFW_ADSP2_XM:
+ BUG_ON(!mem);
+ region_name = "XM";
+ reg = mem->base + (offset * 2);
+ break;
+ case WMFW_ADSP2_YM:
+ BUG_ON(!mem);
+ region_name = "YM";
+ reg = mem->base + (offset * 2);
+ break;
+ case WMFW_ADSP1_ZM:
+ BUG_ON(!mem);
+ region_name = "ZM";
+ reg = mem->base + (offset * 2);
+ break;
+ default:
+ adsp_warn(dsp,
+ "%s.%d: Unknown region type %x at %d(%x)\n",
+ file, regions, type, pos, pos);
+ break;
+ }
+
+ adsp_dbg(dsp, "%s.%d: %d bytes at %d in %s\n", file,
+ regions, le32_to_cpu(region->len), offset,
+ region_name);
+
+ if (text) {
+ memcpy(text, region->data, le32_to_cpu(region->len));
+ adsp_info(dsp, "%s: %s\n", file, text);
+ kfree(text);
+ }
+
+ if (reg) {
+ ret = regmap_raw_write(regmap, reg, region->data,
+ le32_to_cpu(region->len));
+ if (ret != 0) {
+ adsp_err(dsp,
+ "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+ file, regions,
+ le32_to_cpu(region->len), offset,
+ region_name, ret);
+ goto out_fw;
+ }
+ }
+
+ pos += le32_to_cpu(region->len) + sizeof(*region);
+ regions++;
+ }
+
+ if (pos > firmware->size)
+ adsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, regions, pos - firmware->size);
+
+out_fw:
+ release_firmware(firmware);
+out:
+ kfree(file);
+
+ return ret;
+}
+
+static int wm_adsp_load_coeff(struct wm_adsp *dsp)
+{
+ struct regmap *regmap = dsp->regmap;
+ struct wmfw_coeff_hdr *hdr;
+ struct wmfw_coeff_item *blk;
+ const struct firmware *firmware;
+ const char *region_name;
+ int ret, pos, blocks, type, offset, reg;
+ char *file;
+
+ file = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (file == NULL)
+ return -ENOMEM;
+
+ snprintf(file, PAGE_SIZE, "%s-dsp%d.bin", dsp->part, dsp->num);
+ file[PAGE_SIZE - 1] = '\0';
+
+ ret = request_firmware(&firmware, file, dsp->dev);
+ if (ret != 0) {
+ adsp_warn(dsp, "Failed to request '%s'\n", file);
+ ret = 0;
+ goto out;
+ }
+ ret = -EINVAL;
+
+ if (sizeof(*hdr) >= firmware->size) {
+ adsp_err(dsp, "%s: file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ hdr = (void*)&firmware->data[0];
+ if (memcmp(hdr->magic, "WMDR", 4) != 0) {
+ adsp_err(dsp, "%s: invalid magic\n", file);
+ return -EINVAL;
+ }
+
+ adsp_dbg(dsp, "%s: v%d.%d.%d\n", file,
+ (le32_to_cpu(hdr->ver) >> 16) & 0xff,
+ (le32_to_cpu(hdr->ver) >> 8) & 0xff,
+ le32_to_cpu(hdr->ver) & 0xff);
+
+ pos = le32_to_cpu(hdr->len);
+
+ blocks = 0;
+ while (pos < firmware->size &&
+ pos - firmware->size > sizeof(*blk)) {
+ blk = (void*)(&firmware->data[pos]);
+
+ type = be32_to_cpu(blk->type) & 0xff;
+ offset = le32_to_cpu(blk->offset) & 0xffffff;
+
+ adsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n",
+ file, blocks, le32_to_cpu(blk->id),
+ (le32_to_cpu(blk->ver) >> 16) & 0xff,
+ (le32_to_cpu(blk->ver) >> 8) & 0xff,
+ le32_to_cpu(blk->ver) & 0xff);
+ adsp_dbg(dsp, "%s.%d: %d bytes at 0x%x in %x\n",
+ file, blocks, le32_to_cpu(blk->len), offset, type);
+
+ reg = 0;
+ region_name = "Unknown";
+ switch (type) {
+ case WMFW_NAME_TEXT:
+ case WMFW_INFO_TEXT:
+ break;
+ case WMFW_ABSOLUTE:
+ region_name = "register";
+ reg = offset;
+ break;
+ default:
+ adsp_err(dsp, "Unknown region type %x\n", type);
+ break;
+ }
+
+ if (reg) {
+ ret = regmap_raw_write(regmap, reg, blk->data,
+ le32_to_cpu(blk->len));
+ if (ret != 0) {
+ adsp_err(dsp,
+ "%s.%d: Failed to write to %x in %s\n",
+ file, blocks, reg, region_name);
+ }
+ }
+
+ pos += le32_to_cpu(blk->len) + sizeof(*blk);
+ blocks++;
+ }
+
+ if (pos > firmware->size)
+ adsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, blocks, pos - firmware->size);
+
+out_fw:
+ release_firmware(firmware);
+out:
+ kfree(file);
+ return 0;
+}
+
+int wm_adsp1_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
+ struct wm_adsp *dsp = &dsps[w->shift];
+ int ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, ADSP1_SYS_ENA);
+
+ ret = wm_adsp_load(dsp);
+ if (ret != 0)
+ goto err;
+
+ ret = wm_adsp_load_coeff(dsp);
+ if (ret != 0)
+ goto err;
+
+ /* Start the core running */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START,
+ ADSP1_CORE_ENA | ADSP1_START);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Halt the core */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_19,
+ ADSP1_WDMA_BUFFER_LENGTH_MASK, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+
+err:
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm_adsp1_event);
+
+static int wm_adsp2_ena(struct wm_adsp *dsp)
+{
+ unsigned int val;
+ int ret, count;
+
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ if (ret != 0)
+ return ret;
+
+ /* Wait for the RAM to start, should be near instantaneous */
+ count = 0;
+ do {
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
+ &val);
+ if (ret != 0)
+ return ret;
+ } while (!(val & ADSP2_RAM_RDY) && ++count < 10);
+
+ if (!(val & ADSP2_RAM_RDY)) {
+ adsp_err(dsp, "Failed to start DSP RAM\n");
+ return -EBUSY;
+ }
+
+ adsp_dbg(dsp, "RAM ready after %d polls\n", count);
+ adsp_info(dsp, "RAM ready after %d polls\n", count);
+
+ return 0;
+}
+
+int wm_adsp2_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
+ struct wm_adsp *dsp = &dsps[w->shift];
+ unsigned int val;
+ int ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * For simplicity set the DSP clock rate to be the
+ * SYSCLK rate rather than making it configurable.
+ */
+ ret = regmap_read(dsp->regmap, ARIZONA_SYSTEM_CLOCK_1, &val);
+ if (ret != 0) {
+ adsp_err(dsp, "Failed to read SYSCLK state: %d\n",
+ ret);
+ return ret;
+ }
+ val = (val & ARIZONA_SYSCLK_FREQ_MASK)
+ >> ARIZONA_SYSCLK_FREQ_SHIFT;
+
+ ret = regmap_update_bits(dsp->regmap,
+ dsp->base + ADSP2_CLOCKING,
+ ADSP2_CLK_SEL_MASK, val);
+ if (ret != 0) {
+ adsp_err(dsp, "Failed to set clock rate: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (dsp->dvfs) {
+ ret = regmap_read(dsp->regmap,
+ dsp->base + ADSP2_CLOCKING, &val);
+ if (ret != 0) {
+ dev_err(dsp->dev,
+ "Failed to read clocking: %d\n", ret);
+ return ret;
+ }
+
+ if ((val & ADSP2_CLK_SEL_MASK) >= 3) {
+ ret = regulator_enable(dsp->dvfs);
+ if (ret != 0) {
+ dev_err(dsp->dev,
+ "Failed to enable supply: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regulator_set_voltage(dsp->dvfs,
+ 1800000,
+ 1800000);
+ if (ret != 0) {
+ dev_err(dsp->dev,
+ "Failed to raise supply: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ ret = wm_adsp2_ena(dsp);
+ if (ret != 0)
+ return ret;
+
+ ret = wm_adsp_load(dsp);
+ if (ret != 0)
+ goto err;
+
+ ret = wm_adsp_load_coeff(dsp);
+ if (ret != 0)
+ goto err;
+
+ ret = regmap_update_bits(dsp->regmap,
+ dsp->base + ADSP2_CONTROL,
+ ADSP2_CORE_ENA | ADSP2_START,
+ ADSP2_CORE_ENA | ADSP2_START);
+ if (ret != 0)
+ goto err;
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA | ADSP2_CORE_ENA |
+ ADSP2_START, 0);
+
+ if (dsp->dvfs) {
+ ret = regulator_set_voltage(dsp->dvfs, 1200000,
+ 1800000);
+ if (ret != 0)
+ dev_warn(dsp->dev,
+ "Failed to lower supply: %d\n",
+ ret);
+
+ ret = regulator_disable(dsp->dvfs);
+ if (ret != 0)
+ dev_err(dsp->dev,
+ "Failed to enable supply: %d\n",
+ ret);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+err:
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA | ADSP2_CORE_ENA | ADSP2_START, 0);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm_adsp2_event);
+
+int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
+{
+ int ret;
+
+ /*
+ * Disable the DSP memory by default when in reset for a small
+ * power saving.
+ */
+ ret = regmap_update_bits(adsp->regmap, adsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+ if (ret != 0) {
+ adsp_err(adsp, "Failed to clear memory retention: %d\n", ret);
+ return ret;
+ }
+
+ if (dvfs) {
+ adsp->dvfs = devm_regulator_get(adsp->dev, "DCVDD");
+ if (IS_ERR(adsp->dvfs)) {
+ ret = PTR_ERR(adsp->dvfs);
+ dev_err(adsp->dev, "Failed to get DCVDD: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(adsp->dvfs);
+ if (ret != 0) {
+ dev_err(adsp->dev, "Failed to enable DCVDD: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regulator_set_voltage(adsp->dvfs, 1200000, 1800000);
+ if (ret != 0) {
+ dev_err(adsp->dev, "Failed to initialise DVFS: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regulator_disable(adsp->dvfs);
+ if (ret != 0) {
+ dev_err(adsp->dev, "Failed to disable DCVDD: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm_adsp2_init);
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
new file mode 100644
index 00000000000..ffd29a4609e
--- /dev/null
+++ b/sound/soc/codecs/wm_adsp.h
@@ -0,0 +1,59 @@
+/*
+ * wm_adsp.h -- Wolfson ADSP support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __WM_ADSP_H
+#define __WM_ADSP_H
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include "wmfw.h"
+
+struct regulator;
+
+struct wm_adsp_region {
+ int type;
+ unsigned int base;
+};
+
+struct wm_adsp {
+ const char *part;
+ int num;
+ int type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ int base;
+
+ const struct wm_adsp_region *mem;
+ int num_mems;
+
+ struct regulator *dvfs;
+};
+
+#define WM_ADSP1(wname, num) \
+ { .id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, \
+ .shift = num, .event = wm_adsp1_event, \
+ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD }
+
+#define WM_ADSP2(wname, num) \
+{ .id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, \
+ .shift = num, .event = wm_adsp2_event, \
+ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD }
+
+int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs);
+int wm_adsp1_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
+int wm_adsp2_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
+
+#endif
diff --git a/sound/soc/codecs/wmfw.h b/sound/soc/codecs/wmfw.h
new file mode 100644
index 00000000000..5632ded67fd
--- /dev/null
+++ b/sound/soc/codecs/wmfw.h
@@ -0,0 +1,128 @@
+/*
+ * wmfw.h - Wolfson firmware format information
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __WMFW_H
+#define __WMFW_H
+
+#include <linux/types.h>
+
+struct wmfw_header {
+ char magic[4];
+ __le32 len;
+ __le16 rev;
+ u8 core;
+ u8 ver;
+} __packed;
+
+struct wmfw_footer {
+ __le64 timestamp;
+ __le32 checksum;
+} __packed;
+
+struct wmfw_adsp1_sizes {
+ __le32 dm;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_adsp2_sizes {
+ __le32 xm;
+ __le32 ym;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_region {
+ union {
+ __be32 type;
+ __le32 offset;
+ };
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_id_hdr {
+ __be32 core_id;
+ __be32 core_rev;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 dm;
+ __be32 algs;
+} __packed;
+
+struct wmfw_adsp2_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+ __be32 algs;
+} __packed;
+
+struct wmfw_alg_hdr {
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 dm;
+} __packed;
+
+struct wmfw_adsp2_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+} __packed;
+
+struct wmfw_coeff_hdr {
+ u8 magic[4];
+ __le32 len;
+ __le32 ver;
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_item {
+ union {
+ __be32 type;
+ __le32 offset;
+ };
+ __le32 id;
+ __le32 ver;
+ __le32 sr;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+#define WMFW_ADSP1 1
+#define WMFW_ADSP2 2
+
+#define WMFW_ABSOLUTE 0xf0
+#define WMFW_NAME_TEXT 0xfe
+#define WMFW_INFO_TEXT 0xff
+
+#define WMFW_ADSP1_PM 2
+#define WMFW_ADSP1_DM 3
+#define WMFW_ADSP1_ZM 4
+
+#define WMFW_ADSP2_PM 2
+#define WMFW_ADSP2_ZM 4
+#define WMFW_ADSP2_XM 5
+#define WMFW_ADSP2_YM 6
+
+#endif
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 6fac5af1329..d55e6477bff 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -71,6 +71,11 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
+ /* set the CPU system clock */
+ ret = snd_soc_dai_set_sysclk(cpu_dai, 0, sysclk, SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+
return 0;
}
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 714e51e5be5..55e2bf652be 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -199,6 +199,7 @@
#define ACLKXE BIT(5)
#define TX_ASYNC BIT(6)
#define ACLKXPOL BIT(7)
+#define ACLKXDIV_MASK 0x1f
/*
* DAVINCI_MCASP_ACLKRCTL_REG Receive Clock Control Register Bits
@@ -207,6 +208,7 @@
#define ACLKRE BIT(5)
#define RX_ASYNC BIT(6)
#define ACLKRPOL BIT(7)
+#define ACLKRDIV_MASK 0x1f
/*
* DAVINCI_MCASP_AHCLKXCTL_REG - High Frequency Transmit Clock Control
@@ -215,6 +217,7 @@
#define AHCLKXDIV(val) (val)
#define AHCLKXPOL BIT(14)
#define AHCLKXE BIT(15)
+#define AHCLKXDIV_MASK 0xfff
/*
* DAVINCI_MCASP_AHCLKRCTL_REG - High Frequency Receive Clock Control
@@ -223,6 +226,7 @@
#define AHCLKRDIV(val) (val)
#define AHCLKRPOL BIT(14)
#define AHCLKRE BIT(15)
+#define AHCLKRDIV_MASK 0xfff
/*
* DAVINCI_MCASP_XRSRCTL_BASE_REG - Serializer Control Register Bits
@@ -473,6 +477,23 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
void __iomem *base = dev->base;
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+ case SND_SOC_DAIFMT_AC97:
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
+ break;
+ default:
+ /* configure a full-word SYNC pulse (LRCLK) */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
+
+ /* make 1st data bit occur one ACLK cycle after the frame sync */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, FSXDLY(1));
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, FSRDLY(1));
+ break;
+ }
+
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* codec is clock and frame slave */
@@ -482,8 +503,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
- mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
- ACLKX | AHCLKX | AFSX);
+ mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX);
break;
case SND_SOC_DAIFMT_CBM_CFS:
/* codec is clock master and frame slave */
@@ -554,59 +574,75 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
return 0;
}
-static int davinci_config_channel_size(struct davinci_audio_dev *dev,
- int channel_size)
+static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
{
- u32 fmt = 0;
- u32 mask, rotate;
-
- switch (channel_size) {
- case DAVINCI_AUDIO_WORD_8:
- fmt = 0x03;
- rotate = 6;
- mask = 0x000000ff;
- break;
+ struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(dai);
- case DAVINCI_AUDIO_WORD_12:
- fmt = 0x05;
- rotate = 5;
- mask = 0x00000fff;
+ switch (div_id) {
+ case 0: /* MCLK divider */
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG,
+ AHCLKXDIV(div - 1), AHCLKXDIV_MASK);
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG,
+ AHCLKRDIV(div - 1), AHCLKRDIV_MASK);
break;
- case DAVINCI_AUDIO_WORD_16:
- fmt = 0x07;
- rotate = 4;
- mask = 0x0000ffff;
+ case 1: /* BCLK divider */
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG,
+ ACLKXDIV(div - 1), ACLKXDIV_MASK);
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_ACLKRCTL_REG,
+ ACLKRDIV(div - 1), ACLKRDIV_MASK);
break;
- case DAVINCI_AUDIO_WORD_20:
- fmt = 0x09;
- rotate = 3;
- mask = 0x000fffff;
+ case 2: /* BCLK/LRCLK ratio */
+ dev->bclk_lrclk_ratio = div;
break;
- case DAVINCI_AUDIO_WORD_24:
- fmt = 0x0B;
- rotate = 2;
- mask = 0x00ffffff;
- break;
+ default:
+ return -EINVAL;
+ }
- case DAVINCI_AUDIO_WORD_28:
- fmt = 0x0D;
- rotate = 1;
- mask = 0x0fffffff;
- break;
+ return 0;
+}
- case DAVINCI_AUDIO_WORD_32:
- fmt = 0x0F;
- rotate = 0;
- mask = 0xffffffff;
- break;
+static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(dai);
- default:
- return -EINVAL;
+ if (dir == SND_SOC_CLOCK_OUT) {
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXE);
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE);
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG, AHCLKX);
+ } else {
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXE);
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE);
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_PDIR_REG, AHCLKX);
}
+ return 0;
+}
+
+static int davinci_config_channel_size(struct davinci_audio_dev *dev,
+ int word_length)
+{
+ u32 fmt;
+ u32 rotate = (32 - word_length) / 4;
+ u32 mask = (1ULL << word_length) - 1;
+
+ /*
+ * if s BCLK-to-LRCLK ratio has been configured via the set_clkdiv()
+ * callback, take it into account here. That allows us to for example
+ * send 32 bits per channel to the codec, while only 16 of them carry
+ * audio payload.
+ * The clock ratio is given for a full period of data (both left and
+ * right channels), so it has to be divided by 2.
+ */
+ if (dev->bclk_lrclk_ratio)
+ word_length = dev->bclk_lrclk_ratio / 2;
+
+ /* mapping of the XSSZ bit-field as described in the datasheet */
+ fmt = (word_length >> 1) - 1;
+
mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
RXSSZ(fmt), RXSSZ(0x0F));
mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
@@ -709,8 +745,6 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
/* bit stream is MSB first with no delay */
/* DSP_B mode */
- mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG,
- AHCLKXE);
mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask);
mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD);
@@ -720,14 +754,10 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
else
printk(KERN_ERR "playback tdm slot %d not supported\n",
dev->tdm_slots);
-
- mcasp_clr_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
} else {
/* bit stream is MSB first with no delay */
/* DSP_B mode */
mcasp_set_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXORD);
- mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG,
- AHCLKRE);
mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask);
if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
@@ -736,8 +766,6 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
else
printk(KERN_ERR "capture tdm slot %d not supported\n",
dev->tdm_slots);
-
- mcasp_clr_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
}
}
@@ -800,19 +828,27 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
case SNDRV_PCM_FORMAT_U8:
case SNDRV_PCM_FORMAT_S8:
dma_params->data_type = 1;
- word_length = DAVINCI_AUDIO_WORD_8;
+ word_length = 8;
break;
case SNDRV_PCM_FORMAT_U16_LE:
case SNDRV_PCM_FORMAT_S16_LE:
dma_params->data_type = 2;
- word_length = DAVINCI_AUDIO_WORD_16;
+ word_length = 16;
+ break;
+
+ case SNDRV_PCM_FORMAT_U24_3LE:
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ dma_params->data_type = 3;
+ word_length = 24;
break;
+ case SNDRV_PCM_FORMAT_U24_LE:
+ case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_U32_LE:
case SNDRV_PCM_FORMAT_S32_LE:
dma_params->data_type = 4;
- word_length = DAVINCI_AUDIO_WORD_32;
+ word_length = 32;
break;
default:
@@ -880,13 +916,18 @@ static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
.trigger = davinci_mcasp_trigger,
.hw_params = davinci_mcasp_hw_params,
.set_fmt = davinci_mcasp_set_dai_fmt,
-
+ .set_clkdiv = davinci_mcasp_set_clkdiv,
+ .set_sysclk = davinci_mcasp_set_sysclk,
};
#define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_U8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_U16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_U24_LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
+ SNDRV_PCM_FMTBIT_U24_3LE | \
SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_U32_LE)
@@ -1089,7 +1130,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
dev->tdm_slots = pdata->tdm_slots;
dev->num_serializer = pdata->num_serializer;
dev->serial_dir = pdata->serial_dir;
- dev->codec_fmt = pdata->codec_fmt;
dev->version = pdata->version;
dev->txnumevt = pdata->txnumevt;
dev->rxnumevt = pdata->rxnumevt;
@@ -1098,6 +1138,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
dma_data->asp_chan_q = pdata->asp_chan_q;
dma_data->ram_chan_q = pdata->ram_chan_q;
+ dma_data->sram_pool = pdata->sram_pool;
dma_data->sram_size = pdata->sram_size_playback;
dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
mem->start);
@@ -1115,6 +1156,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
dma_data = &dev->dma_params[SNDRV_PCM_STREAM_CAPTURE];
dma_data->asp_chan_q = pdata->asp_chan_q;
dma_data->ram_chan_q = pdata->ram_chan_q;
+ dma_data->sram_pool = pdata->sram_pool;
dma_data->sram_size = pdata->sram_size_capture;
dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
mem->start);
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 0de9ed6ce03..0edd3b5a37f 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -23,26 +23,14 @@
#include "davinci-pcm.h"
-#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_96000
+#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000
#define DAVINCI_MCASP_I2S_DAI 0
#define DAVINCI_MCASP_DIT_DAI 1
-enum {
- DAVINCI_AUDIO_WORD_8 = 0,
- DAVINCI_AUDIO_WORD_12,
- DAVINCI_AUDIO_WORD_16,
- DAVINCI_AUDIO_WORD_20,
- DAVINCI_AUDIO_WORD_24,
- DAVINCI_AUDIO_WORD_32,
- DAVINCI_AUDIO_WORD_28, /* This is only valid for McASP */
-};
-
struct davinci_audio_dev {
struct davinci_pcm_dma_params dma_params[2];
void __iomem *base;
- int sample_rate;
struct device *dev;
- unsigned int codec_fmt;
/* McASP specific data */
int tdm_slots;
@@ -50,6 +38,7 @@ struct davinci_audio_dev {
u8 num_serializer;
u8 *serial_dir;
u8 version;
+ u8 bclk_lrclk_ratio;
/* McASP FIFO related */
u8 txnumevt;
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 93ea3bf567e..afab81f844a 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
+#include <linux/genalloc.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -23,7 +24,6 @@
#include <sound/soc.h>
#include <asm/dma.h>
-#include <mach/sram.h>
#include "davinci-pcm.h"
@@ -67,13 +67,9 @@ static struct snd_pcm_hardware pcm_hardware_playback = {
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME|
SNDRV_PCM_INFO_BATCH),
.formats = DAVINCI_PCM_FMTBITS,
- .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
- SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_KNOT),
+ .rates = SNDRV_PCM_RATE_8000_192000 | SNDRV_PCM_RATE_KNOT,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
.channels_min = 2,
.channels_max = 384,
.buffer_bytes_max = 128 * 1024,
@@ -90,13 +86,9 @@ static struct snd_pcm_hardware pcm_hardware_capture = {
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_BATCH),
.formats = DAVINCI_PCM_FMTBITS,
- .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
- SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
- SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_KNOT),
+ .rates = SNDRV_PCM_RATE_8000_192000 | SNDRV_PCM_RATE_KNOT,
.rate_min = 8000,
- .rate_max = 96000,
+ .rate_max = 192000,
.channels_min = 2,
.channels_max = 384,
.buffer_bytes_max = 128 * 1024,
@@ -259,7 +251,9 @@ static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data)
}
}
-static int allocate_sram(struct snd_pcm_substream *substream, unsigned size,
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static int allocate_sram(struct snd_pcm_substream *substream,
+ struct gen_pool *sram_pool, unsigned size,
struct snd_pcm_hardware *ppcm)
{
struct snd_dma_buffer *buf = &substream->dma_buffer;
@@ -271,9 +265,10 @@ static int allocate_sram(struct snd_pcm_substream *substream, unsigned size,
return 0;
ppcm->period_bytes_max = size;
- iram_virt = sram_alloc(size, &iram_phys);
+ iram_virt = (void *)gen_pool_alloc(sram_pool, size);
if (!iram_virt)
goto exit1;
+ iram_phys = gen_pool_virt_to_phys(sram_pool, (unsigned)iram_virt);
iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
if (!iram_dma)
goto exit2;
@@ -285,11 +280,33 @@ static int allocate_sram(struct snd_pcm_substream *substream, unsigned size,
return 0;
exit2:
if (iram_virt)
- sram_free(iram_virt, size);
+ gen_pool_free(sram_pool, (unsigned)iram_virt, size);
exit1:
return -ENOMEM;
}
+static void davinci_free_sram(struct snd_pcm_substream *substream,
+ struct snd_dma_buffer *iram_dma)
+{
+ struct davinci_runtime_data *prtd = substream->runtime->private_data;
+ struct gen_pool *sram_pool = prtd->params->sram_pool;
+
+ gen_pool_free(sram_pool, (unsigned) iram_dma->area, iram_dma->bytes);
+}
+#else
+static int allocate_sram(struct snd_pcm_substream *substream,
+ struct gen_pool *sram_pool, unsigned size,
+ struct snd_pcm_hardware *ppcm)
+{
+ return 0;
+}
+
+static void davinci_free_sram(struct snd_pcm_substream *substream,
+ struct snd_dma_buffer *iram_dma)
+{
+}
+#endif
+
/*
* Only used with ping/pong.
* This is called after runtime->dma_addr, period_bytes and data_type are valid
@@ -676,7 +693,7 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream)
ppcm = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
&pcm_hardware_playback : &pcm_hardware_capture;
- allocate_sram(substream, params->sram_size, ppcm);
+ allocate_sram(substream, params->sram_pool, params->sram_size, ppcm);
snd_soc_set_runtime_hwparams(substream, ppcm);
/* ensure that buffer size is a multiple of period size */
ret = snd_pcm_hw_constraint_integer(runtime,
@@ -819,7 +836,7 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
buf->area = NULL;
iram_dma = buf->private_data;
if (iram_dma) {
- sram_free(iram_dma->area, iram_dma->bytes);
+ davinci_free_sram(substream, iram_dma);
kfree(iram_dma);
}
}
diff --git a/sound/soc/davinci/davinci-pcm.h b/sound/soc/davinci/davinci-pcm.h
index fc4d01cdd8c..b6ef7039dd0 100644
--- a/sound/soc/davinci/davinci-pcm.h
+++ b/sound/soc/davinci/davinci-pcm.h
@@ -12,6 +12,7 @@
#ifndef _DAVINCI_PCM_H
#define _DAVINCI_PCM_H
+#include <linux/genalloc.h>
#include <linux/platform_data/davinci_asp.h>
#include <mach/edma.h>
@@ -20,6 +21,7 @@ struct davinci_pcm_dma_params {
unsigned short acnt;
dma_addr_t dma_addr; /* device physical address for DMA */
unsigned sram_size;
+ struct gen_pool *sram_pool; /* SRAM gen_pool for ping pong */
enum dma_event_q asp_chan_q; /* event queue number for ASP channel */
enum dma_event_q ram_chan_q; /* event queue number for RAM channel */
unsigned char data_type; /* xfer data type */
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 4563b28bd62..3b98159d964 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -46,6 +46,20 @@ config SND_SOC_P1022_DS
This will also include the Wolfson Microelectronics WM8776 codec
driver.
+config SND_SOC_P1022_RDK
+ tristate "ALSA SoC support for the Freescale / iVeia P1022 RDK board"
+ # I2C is necessary for the WM8960 driver
+ depends on P1022_RDK && I2C
+ select SND_SOC_FSL_SSI
+ select SND_SOC_FSL_UTILS
+ select SND_SOC_POWERPC_DMA
+ select SND_SOC_WM8960
+ default y if P1022_RDK
+ help
+ Say Y if you want to enable audio on the Freescale / iVeia
+ P1022 RDK board. This will also include the Wolfson
+ Microelectronics WM8960 codec driver.
+
config SND_SOC_MPC5200_I2S
tristate "Freescale MPC5200 PSC in I2S mode driver"
depends on PPC_MPC52xx && PPC_BESTCOMM
@@ -98,12 +112,12 @@ config SND_SOC_IMX_PCM
tristate
config SND_SOC_IMX_PCM_FIQ
- tristate
+ bool
select FIQ
select SND_SOC_IMX_PCM
config SND_SOC_IMX_PCM_DMA
- tristate
+ bool
select SND_SOC_DMAENGINE_PCM
select SND_SOC_IMX_PCM
@@ -112,7 +126,7 @@ config SND_SOC_IMX_AUDMUX
config SND_MXC_SOC_WM1133_EV1
tristate "Audio on the i.MX31ADS with WM1133-EV1 fitted"
- depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL
+ depends on MACH_MX31ADS_WM1133_EV1
select SND_SOC_WM8350
select SND_SOC_IMX_PCM_FIQ
select SND_SOC_IMX_AUDMUX
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index 5f3cf3f52ea..afd34794db5 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -6,6 +6,10 @@ obj-$(CONFIG_SND_SOC_MPC8610_HPCD) += snd-soc-mpc8610-hpcd.o
snd-soc-p1022-ds-objs := p1022_ds.o
obj-$(CONFIG_SND_SOC_P1022_DS) += snd-soc-p1022-ds.o
+# P1022 RDK Machine Support
+snd-soc-p1022-rdk-objs := p1022_rdk.o
+obj-$(CONFIG_SND_SOC_P1022_RDK) += snd-soc-p1022-rdk.o
+
# Freescale PowerPC SSI/DMA Platform Support
snd-soc-fsl-ssi-objs := fsl_ssi.o
snd-soc-fsl-utils-objs := fsl_utils.o
@@ -26,14 +30,18 @@ obj-$(CONFIG_SND_MPC52xx_SOC_EFIKA) += efika-audio-fabric.o
# i.MX Platform Support
snd-soc-imx-ssi-objs := imx-ssi.o
snd-soc-imx-audmux-objs := imx-audmux.o
+snd-soc-imx-pcm-objs := imx-pcm.o
+ifneq ($(CONFIG_SND_SOC_IMX_PCM_FIQ),)
+ snd-soc-imx-pcm-objs += imx-pcm-fiq.o
+endif
+ifneq ($(CONFIG_SND_SOC_IMX_PCM_DMA),)
+ snd-soc-imx-pcm-objs += imx-pcm-dma.o
+endif
obj-$(CONFIG_SND_SOC_IMX_SSI) += snd-soc-imx-ssi.o
obj-$(CONFIG_SND_SOC_IMX_AUDMUX) += snd-soc-imx-audmux.o
obj-$(CONFIG_SND_SOC_IMX_PCM) += snd-soc-imx-pcm.o
-snd-soc-imx-pcm-y := imx-pcm.o
-snd-soc-imx-pcm-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += imx-pcm-fiq.o
-snd-soc-imx-pcm-$(CONFIG_SND_SOC_IMX_PCM_DMA) += imx-pcm-dma.o
# i.MX Machine Support
snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 267d5b4b63c..75ffdf0e2aa 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -93,7 +93,7 @@ static struct snd_soc_card eukrea_tlv320 = {
.num_links = 1,
};
-static int __devinit eukrea_tlv320_probe(struct platform_device *pdev)
+static int eukrea_tlv320_probe(struct platform_device *pdev)
{
int ret;
int int_port = 0, ext_port;
@@ -142,7 +142,7 @@ static int __devinit eukrea_tlv320_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit eukrea_tlv320_remove(struct platform_device *pdev)
+static int eukrea_tlv320_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&eukrea_tlv320);
@@ -155,7 +155,7 @@ static struct platform_driver eukrea_tlv320_driver = {
.owner = THIS_MODULE,
},
.probe = eukrea_tlv320_probe,
- .remove = __devexit_p(eukrea_tlv320_remove),};
+ .remove = eukrea_tlv320_remove,};
module_platform_driver(eukrea_tlv320_driver);
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 6feb2650058..9cc5c1f82f0 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -894,7 +894,7 @@ static struct snd_pcm_ops fsl_dma_ops = {
.pointer = fsl_dma_pointer,
};
-static int __devinit fsl_soc_dma_probe(struct platform_device *pdev)
+static int fsl_soc_dma_probe(struct platform_device *pdev)
{
struct dma_object *dma;
struct device_node *np = pdev->dev.of_node;
@@ -958,7 +958,7 @@ static int __devinit fsl_soc_dma_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit fsl_soc_dma_remove(struct platform_device *pdev)
+static int fsl_soc_dma_remove(struct platform_device *pdev)
{
struct dma_object *dma = dev_get_drvdata(&pdev->dev);
@@ -983,7 +983,7 @@ static struct platform_driver fsl_soc_dma_driver = {
.of_match_table = fsl_soc_dma_ids,
},
.probe = fsl_soc_dma_probe,
- .remove = __devexit_p(fsl_soc_dma_remove),
+ .remove = fsl_soc_dma_remove,
};
module_platform_driver(fsl_soc_dma_driver);
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 4ed2afd4778..7decbd9b234 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -639,7 +639,7 @@ static void make_lowercase(char *s)
}
}
-static int __devinit fsl_ssi_probe(struct platform_device *pdev)
+static int fsl_ssi_probe(struct platform_device *pdev)
{
struct fsl_ssi_private *ssi_private;
int ret = 0;
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 524ce6210ce..251f4d981e0 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -162,7 +162,7 @@ static void __init audmux_debugfs_init(void)
}
}
-static void __devexit audmux_debugfs_remove(void)
+static void audmux_debugfs_remove(void)
{
debugfs_remove_recursive(audmux_debugfs_root);
}
@@ -244,7 +244,7 @@ int imx_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
}
EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port);
-static int __devinit imx_audmux_probe(struct platform_device *pdev)
+static int imx_audmux_probe(struct platform_device *pdev)
{
struct resource *res;
struct pinctrl *pinctrl;
@@ -278,7 +278,7 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit imx_audmux_remove(struct platform_device *pdev)
+static int imx_audmux_remove(struct platform_device *pdev)
{
if (audmux_type == IMX31_AUDMUX)
audmux_debugfs_remove();
@@ -289,7 +289,7 @@ static int __devexit imx_audmux_remove(struct platform_device *pdev)
static struct platform_driver imx_audmux_driver = {
.probe = imx_audmux_probe,
- .remove = __devexit_p(imx_audmux_remove),
+ .remove = imx_audmux_remove,
.id_table = imx_audmux_ids,
.driver = {
.name = DRIVER_NAME,
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index 549b31fdc9d..4ae30f21fdb 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -98,7 +98,7 @@ static struct snd_soc_card imx_mc13783 = {
.num_dapm_routes = ARRAY_SIZE(imx_mc13783_routes),
};
-static int __devinit imx_mc13783_probe(struct platform_device *pdev)
+static int imx_mc13783_probe(struct platform_device *pdev)
{
int ret;
@@ -148,7 +148,7 @@ static int __devinit imx_mc13783_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit imx_mc13783_remove(struct platform_device *pdev)
+static int imx_mc13783_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&imx_mc13783);
@@ -161,7 +161,7 @@ static struct platform_driver imx_mc13783_audio_driver = {
.owner = THIS_MODULE,
},
.probe = imx_mc13783_probe,
- .remove = __devexit_p(imx_mc13783_remove)
+ .remove = imx_mc13783_remove
};
module_platform_driver(imx_mc13783_audio_driver);
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index d85929b79c3..bf363d8d044 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -154,12 +154,12 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = {
.pcm_free = imx_pcm_free,
};
-static int __devinit imx_soc_platform_probe(struct platform_device *pdev)
+static int imx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2);
}
-static int __devexit imx_soc_platform_remove(struct platform_device *pdev)
+static int imx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -171,7 +171,7 @@ static struct platform_driver imx_pcm_driver = {
.owner = THIS_MODULE,
},
.probe = imx_soc_platform_probe,
- .remove = __devexit_p(imx_soc_platform_remove),
+ .remove = imx_soc_platform_remove,
};
module_platform_driver(imx_pcm_driver);
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 9ffc9e66308..5ec362ae4d0 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {
.pcm_free = imx_pcm_fiq_free,
};
-static int __devinit imx_soc_platform_probe(struct platform_device *pdev)
+static int imx_soc_platform_probe(struct platform_device *pdev)
{
struct imx_ssi *ssi = platform_get_drvdata(pdev);
int ret;
@@ -315,7 +315,7 @@ failed_register:
return ret;
}
-static int __devexit imx_soc_platform_remove(struct platform_device *pdev)
+static int imx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -328,7 +328,7 @@ static struct platform_driver imx_pcm_driver = {
},
.probe = imx_soc_platform_probe,
- .remove = __devexit_p(imx_soc_platform_remove),
+ .remove = imx_soc_platform_remove,
};
module_platform_driver(imx_pcm_driver);
diff --git a/sound/soc/fsl/imx-pcm.c b/sound/soc/fsl/imx-pcm.c
index 93dc360b177..d5cd9eff3b4 100644
--- a/sound/soc/fsl/imx-pcm.c
+++ b/sound/soc/fsl/imx-pcm.c
@@ -103,3 +103,7 @@ void imx_pcm_free(struct snd_pcm *pcm)
}
}
EXPORT_SYMBOL_GPL(imx_pcm_free);
+
+MODULE_DESCRIPTION("Freescale i.MX PCM driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 199408ec426..424347e9b2d 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -56,7 +56,7 @@ static const struct snd_soc_dapm_widget imx_sgtl5000_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Ext Spk", NULL),
};
-static int __devinit imx_sgtl5000_probe(struct platform_device *pdev)
+static int imx_sgtl5000_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *ssi_np, *codec_np;
@@ -162,6 +162,7 @@ static int __devinit imx_sgtl5000_probe(struct platform_device *pdev)
if (ret)
goto clk_fail;
data->card.num_links = 1;
+ data->card.owner = THIS_MODULE;
data->card.dai_link = &data->dai;
data->card.dapm_widgets = imx_sgtl5000_dapm_widgets;
data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets);
@@ -184,7 +185,7 @@ fail:
return ret;
}
-static int __devexit imx_sgtl5000_remove(struct platform_device *pdev)
+static int imx_sgtl5000_remove(struct platform_device *pdev)
{
struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
@@ -210,7 +211,7 @@ static struct platform_driver imx_sgtl5000_driver = {
.of_match_table = imx_sgtl5000_dt_ids,
},
.probe = imx_sgtl5000_probe,
- .remove = __devexit_p(imx_sgtl5000_remove),
+ .remove = imx_sgtl5000_remove,
};
module_platform_driver(imx_sgtl5000_driver);
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index dd566444e3c..3b480423747 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -638,7 +638,7 @@ failed_clk:
return ret;
}
-static int __devexit imx_ssi_remove(struct platform_device *pdev)
+static int imx_ssi_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct imx_ssi *ssi = platform_get_drvdata(pdev);
@@ -659,7 +659,7 @@ static int __devexit imx_ssi_remove(struct platform_device *pdev)
static struct platform_driver imx_ssi_driver = {
.probe = imx_ssi_probe,
- .remove = __devexit_p(imx_ssi_remove),
+ .remove = imx_ssi_remove,
.driver = {
.name = "imx-ssi",
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index a313c0ae36d..a4aec0488dd 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -277,7 +277,7 @@ static struct snd_soc_dai_driver psc_ac97_dai[] = {
* - Probe/remove operations
* - OF device match table
*/
-static int __devinit psc_ac97_of_probe(struct platform_device *op)
+static int psc_ac97_of_probe(struct platform_device *op)
{
int rc;
struct snd_ac97 ac97;
@@ -310,7 +310,7 @@ static int __devinit psc_ac97_of_probe(struct platform_device *op)
return 0;
}
-static int __devexit psc_ac97_of_remove(struct platform_device *op)
+static int psc_ac97_of_remove(struct platform_device *op)
{
mpc5200_audio_dma_destroy(op);
snd_soc_unregister_dais(&op->dev, ARRAY_SIZE(psc_ac97_dai));
@@ -318,7 +318,7 @@ static int __devexit psc_ac97_of_remove(struct platform_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id psc_ac97_match[] __devinitdata = {
+static struct of_device_id psc_ac97_match[] = {
{ .compatible = "fsl,mpc5200-psc-ac97", },
{ .compatible = "fsl,mpc5200b-psc-ac97", },
{}
@@ -327,7 +327,7 @@ MODULE_DEVICE_TABLE(of, psc_ac97_match);
static struct platform_driver psc_ac97_driver = {
.probe = psc_ac97_of_probe,
- .remove = __devexit_p(psc_ac97_of_remove),
+ .remove = psc_ac97_of_remove,
.driver = {
.name = "mpc5200-psc-ac97",
.owner = THIS_MODULE,
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index ba1f0a66358..b95b966f25a 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -153,7 +153,7 @@ static struct snd_soc_dai_driver psc_i2s_dai[] = {{
* - Probe/remove operations
* - OF device match table
*/
-static int __devinit psc_i2s_of_probe(struct platform_device *op)
+static int psc_i2s_of_probe(struct platform_device *op)
{
int rc;
struct psc_dma *psc_dma;
@@ -205,7 +205,7 @@ static int __devinit psc_i2s_of_probe(struct platform_device *op)
}
-static int __devexit psc_i2s_of_remove(struct platform_device *op)
+static int psc_i2s_of_remove(struct platform_device *op)
{
mpc5200_audio_dma_destroy(op);
snd_soc_unregister_dais(&op->dev, ARRAY_SIZE(psc_i2s_dai));
@@ -213,7 +213,7 @@ static int __devexit psc_i2s_of_remove(struct platform_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id psc_i2s_match[] __devinitdata = {
+static struct of_device_id psc_i2s_match[] = {
{ .compatible = "fsl,mpc5200-psc-i2s", },
{ .compatible = "fsl,mpc5200b-psc-i2s", },
{}
@@ -222,7 +222,7 @@ MODULE_DEVICE_TABLE(of, psc_i2s_match);
static struct platform_driver psc_i2s_driver = {
.probe = psc_i2s_of_probe,
- .remove = __devexit_p(psc_i2s_of_remove),
+ .remove = psc_i2s_of_remove,
.driver = {
.name = "mpc5200-psc-i2s",
.owner = THIS_MODULE,
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 9ff9318c52b..228c52e7144 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -368,7 +368,7 @@ error_alloc:
*
* This function is called when the platform device is removed.
*/
-static int __devexit mpc8610_hpcd_remove(struct platform_device *pdev)
+static int mpc8610_hpcd_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct mpc8610_hpcd_data *machine_data =
@@ -382,7 +382,7 @@ static int __devexit mpc8610_hpcd_remove(struct platform_device *pdev)
static struct platform_driver mpc8610_hpcd_driver = {
.probe = mpc8610_hpcd_probe,
- .remove = __devexit_p(mpc8610_hpcd_remove),
+ .remove = mpc8610_hpcd_remove,
.driver = {
/* The name must match 'compatible' property in the device tree,
* in lowercase letters.
diff --git a/sound/soc/fsl/mx27vis-aic32x4.c b/sound/soc/fsl/mx27vis-aic32x4.c
index 2b76877b178..3d107417905 100644
--- a/sound/soc/fsl/mx27vis-aic32x4.c
+++ b/sound/soc/fsl/mx27vis-aic32x4.c
@@ -180,7 +180,7 @@ static struct snd_soc_card mx27vis_aic32x4 = {
.num_dapm_routes = ARRAY_SIZE(aic32x4_dapm_routes),
};
-static int __devinit mx27vis_aic32x4_probe(struct platform_device *pdev)
+static int mx27vis_aic32x4_probe(struct platform_device *pdev)
{
struct snd_mx27vis_platform_data *pdata = pdev->dev.platform_data;
int ret;
@@ -219,7 +219,7 @@ static int __devinit mx27vis_aic32x4_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit mx27vis_aic32x4_remove(struct platform_device *pdev)
+static int mx27vis_aic32x4_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&mx27vis_aic32x4);
@@ -232,7 +232,7 @@ static struct platform_driver mx27vis_aic32x4_audio_driver = {
.owner = THIS_MODULE,
},
.probe = mx27vis_aic32x4_probe,
- .remove = __devexit_p(mx27vis_aic32x4_remove),
+ .remove = mx27vis_aic32x4_remove,
};
module_platform_driver(mx27vis_aic32x4_audio_driver);
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index 144d4960363..ba59c23a137 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -376,7 +376,7 @@ error_put:
*
* This function is called when the platform device is removed.
*/
-static int __devexit p1022_ds_remove(struct platform_device *pdev)
+static int p1022_ds_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct machine_data *mdata =
@@ -390,7 +390,7 @@ static int __devexit p1022_ds_remove(struct platform_device *pdev)
static struct platform_driver p1022_ds_driver = {
.probe = p1022_ds_probe,
- .remove = __devexit_p(p1022_ds_remove),
+ .remove = p1022_ds_remove,
.driver = {
/*
* The name must match 'compatible' property in the device tree,
diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c
new file mode 100644
index 00000000000..f2155191153
--- /dev/null
+++ b/sound/soc/fsl/p1022_rdk.c
@@ -0,0 +1,392 @@
+/**
+ * Freescale P1022RDK ALSA SoC Machine driver
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Note: in order for audio to work correctly, the output controls need
+ * to be enabled, because they control the clock. So for playback, for
+ * example:
+ *
+ * amixer sset 'Left Output Mixer PCM' on
+ * amixer sset 'Right Output Mixer PCM' on
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <asm/fsl_guts.h>
+
+#include "fsl_dma.h"
+#include "fsl_ssi.h"
+#include "fsl_utils.h"
+
+/* P1022-specific PMUXCR and DMUXCR bit definitions */
+
+#define CCSR_GUTS_PMUXCR_UART0_I2C1_MASK 0x0001c000
+#define CCSR_GUTS_PMUXCR_UART0_I2C1_UART0_SSI 0x00010000
+#define CCSR_GUTS_PMUXCR_UART0_I2C1_SSI 0x00018000
+
+#define CCSR_GUTS_PMUXCR_SSI_DMA_TDM_MASK 0x00000c00
+#define CCSR_GUTS_PMUXCR_SSI_DMA_TDM_SSI 0x00000000
+
+#define CCSR_GUTS_DMUXCR_PAD 1 /* DMA controller/channel set to pad */
+#define CCSR_GUTS_DMUXCR_SSI 2 /* DMA controller/channel set to SSI */
+
+/*
+ * Set the DMACR register in the GUTS
+ *
+ * The DMACR register determines the source of initiated transfers for each
+ * channel on each DMA controller. Rather than have a bunch of repetitive
+ * macros for the bit patterns, we just have a function that calculates
+ * them.
+ *
+ * guts: Pointer to GUTS structure
+ * co: The DMA controller (0 or 1)
+ * ch: The channel on the DMA controller (0, 1, 2, or 3)
+ * device: The device to set as the target (CCSR_GUTS_DMUXCR_xxx)
+ */
+static inline void guts_set_dmuxcr(struct ccsr_guts __iomem *guts,
+ unsigned int co, unsigned int ch, unsigned int device)
+{
+ unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
+
+ clrsetbits_be32(&guts->dmuxcr, 3 << shift, device << shift);
+}
+
+/* There's only one global utilities register */
+static phys_addr_t guts_phys;
+
+/**
+ * machine_data: machine-specific ASoC device data
+ *
+ * This structure contains data for a single sound platform device on an
+ * P1022 RDK. Some of the data is taken from the device tree.
+ */
+struct machine_data {
+ struct snd_soc_dai_link dai[2];
+ struct snd_soc_card card;
+ unsigned int dai_format;
+ unsigned int codec_clk_direction;
+ unsigned int cpu_clk_direction;
+ unsigned int clk_frequency;
+ unsigned int dma_id[2]; /* 0 = DMA1, 1 = DMA2, etc */
+ unsigned int dma_channel_id[2]; /* 0 = ch 0, 1 = ch 1, etc*/
+ char platform_name[2][DAI_NAME_SIZE]; /* One for each DMA channel */
+};
+
+/**
+ * p1022_rdk_machine_probe: initialize the board
+ *
+ * This function is used to initialize the board-specific hardware.
+ *
+ * Here we program the DMACR and PMUXCR registers.
+ */
+static int p1022_rdk_machine_probe(struct snd_soc_card *card)
+{
+ struct machine_data *mdata =
+ container_of(card, struct machine_data, card);
+ struct ccsr_guts __iomem *guts;
+
+ guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
+ if (!guts) {
+ dev_err(card->dev, "could not map global utilities\n");
+ return -ENOMEM;
+ }
+
+ /* Enable SSI Tx signal */
+ clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_UART0_I2C1_MASK,
+ CCSR_GUTS_PMUXCR_UART0_I2C1_UART0_SSI);
+
+ /* Enable SSI Rx signal */
+ clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI_DMA_TDM_MASK,
+ CCSR_GUTS_PMUXCR_SSI_DMA_TDM_SSI);
+
+ /* Enable DMA Channel for SSI */
+ guts_set_dmuxcr(guts, mdata->dma_id[0], mdata->dma_channel_id[0],
+ CCSR_GUTS_DMUXCR_SSI);
+
+ guts_set_dmuxcr(guts, mdata->dma_id[1], mdata->dma_channel_id[1],
+ CCSR_GUTS_DMUXCR_SSI);
+
+ iounmap(guts);
+
+ return 0;
+}
+
+/**
+ * p1022_rdk_startup: program the board with various hardware parameters
+ *
+ * This function takes board-specific information, like clock frequencies
+ * and serial data formats, and passes that information to the codec and
+ * transport drivers.
+ */
+static int p1022_rdk_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct machine_data *mdata =
+ container_of(rtd->card, struct machine_data, card);
+ struct device *dev = rtd->card->dev;
+ int ret = 0;
+
+ /* Tell the codec driver what the serial protocol is. */
+ ret = snd_soc_dai_set_fmt(rtd->codec_dai, mdata->dai_format);
+ if (ret < 0) {
+ dev_err(dev, "could not set codec driver audio format (ret=%i)\n",
+ ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(rtd->codec_dai, 0, 0, mdata->clk_frequency,
+ mdata->clk_frequency);
+ if (ret < 0) {
+ dev_err(dev, "could not set codec PLL frequency (ret=%i)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * p1022_rdk_machine_remove: Remove the sound device
+ *
+ * This function is called to remove the sound device for one SSI. We
+ * de-program the DMACR and PMUXCR register.
+ */
+static int p1022_rdk_machine_remove(struct snd_soc_card *card)
+{
+ struct machine_data *mdata =
+ container_of(card, struct machine_data, card);
+ struct ccsr_guts __iomem *guts;
+
+ guts = ioremap(guts_phys, sizeof(struct ccsr_guts));
+ if (!guts) {
+ dev_err(card->dev, "could not map global utilities\n");
+ return -ENOMEM;
+ }
+
+ /* Restore the signal routing */
+ clrbits32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_UART0_I2C1_MASK);
+ clrbits32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI_DMA_TDM_MASK);
+ guts_set_dmuxcr(guts, mdata->dma_id[0], mdata->dma_channel_id[0], 0);
+ guts_set_dmuxcr(guts, mdata->dma_id[1], mdata->dma_channel_id[1], 0);
+
+ iounmap(guts);
+
+ return 0;
+}
+
+/**
+ * p1022_rdk_ops: ASoC machine driver operations
+ */
+static struct snd_soc_ops p1022_rdk_ops = {
+ .startup = p1022_rdk_startup,
+};
+
+/**
+ * p1022_rdk_probe: platform probe function for the machine driver
+ *
+ * Although this is a machine driver, the SSI node is the "master" node with
+ * respect to audio hardware connections. Therefore, we create a new ASoC
+ * device for each new SSI node that has a codec attached.
+ */
+static int p1022_rdk_probe(struct platform_device *pdev)
+{
+ struct device *dev = pdev->dev.parent;
+ /* ssi_pdev is the platform device for the SSI node that probed us */
+ struct platform_device *ssi_pdev =
+ container_of(dev, struct platform_device, dev);
+ struct device_node *np = ssi_pdev->dev.of_node;
+ struct device_node *codec_np = NULL;
+ struct machine_data *mdata;
+ const u32 *iprop;
+ int ret;
+
+ /* Find the codec node for this SSI. */
+ codec_np = of_parse_phandle(np, "codec-handle", 0);
+ if (!codec_np) {
+ dev_err(dev, "could not find codec node\n");
+ return -EINVAL;
+ }
+
+ mdata = kzalloc(sizeof(struct machine_data), GFP_KERNEL);
+ if (!mdata) {
+ ret = -ENOMEM;
+ goto error_put;
+ }
+
+ mdata->dai[0].cpu_dai_name = dev_name(&ssi_pdev->dev);
+ mdata->dai[0].ops = &p1022_rdk_ops;
+
+ /* ASoC core can match codec with device node */
+ mdata->dai[0].codec_of_node = codec_np;
+
+ /*
+ * We register two DAIs per SSI, one for playback and the other for
+ * capture. We support codecs that have separate DAIs for both playback
+ * and capture.
+ */
+ memcpy(&mdata->dai[1], &mdata->dai[0], sizeof(struct snd_soc_dai_link));
+
+ /* The DAI names from the codec (snd_soc_dai_driver.name) */
+ mdata->dai[0].codec_dai_name = "wm8960-hifi";
+ mdata->dai[1].codec_dai_name = mdata->dai[0].codec_dai_name;
+
+ /*
+ * Configure the SSI for I2S slave mode. Older device trees have
+ * an fsl,mode property, but we ignore that since there's really
+ * only one way to configure the SSI.
+ */
+ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_OUT;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_IN;
+
+ /*
+ * In i2s-slave mode, the codec has its own clock source, so we
+ * need to get the frequency from the device tree and pass it to
+ * the codec driver.
+ */
+ iprop = of_get_property(codec_np, "clock-frequency", NULL);
+ if (!iprop || !*iprop) {
+ dev_err(&pdev->dev, "codec bus-frequency property is missing or invalid\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ mdata->clk_frequency = be32_to_cpup(iprop);
+
+ if (!mdata->clk_frequency) {
+ dev_err(&pdev->dev, "unknown clock frequency\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Find the playback DMA channel to use. */
+ mdata->dai[0].platform_name = mdata->platform_name[0];
+ ret = fsl_asoc_get_dma_channel(np, "fsl,playback-dma", &mdata->dai[0],
+ &mdata->dma_channel_id[0],
+ &mdata->dma_id[0]);
+ if (ret) {
+ dev_err(&pdev->dev, "missing/invalid playback DMA phandle (ret=%i)\n",
+ ret);
+ goto error;
+ }
+
+ /* Find the capture DMA channel to use. */
+ mdata->dai[1].platform_name = mdata->platform_name[1];
+ ret = fsl_asoc_get_dma_channel(np, "fsl,capture-dma", &mdata->dai[1],
+ &mdata->dma_channel_id[1],
+ &mdata->dma_id[1]);
+ if (ret) {
+ dev_err(&pdev->dev, "missing/invalid capture DMA phandle (ret=%i)\n",
+ ret);
+ goto error;
+ }
+
+ /* Initialize our DAI data structure. */
+ mdata->dai[0].stream_name = "playback";
+ mdata->dai[1].stream_name = "capture";
+ mdata->dai[0].name = mdata->dai[0].stream_name;
+ mdata->dai[1].name = mdata->dai[1].stream_name;
+
+ mdata->card.probe = p1022_rdk_machine_probe;
+ mdata->card.remove = p1022_rdk_machine_remove;
+ mdata->card.name = pdev->name; /* The platform driver name */
+ mdata->card.owner = THIS_MODULE;
+ mdata->card.dev = &pdev->dev;
+ mdata->card.num_links = 2;
+ mdata->card.dai_link = mdata->dai;
+
+ /* Register with ASoC */
+ ret = snd_soc_register_card(&mdata->card);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register card (ret=%i)\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ kfree(mdata);
+error_put:
+ of_node_put(codec_np);
+ return ret;
+}
+
+/**
+ * p1022_rdk_remove: remove the platform device
+ *
+ * This function is called when the platform device is removed.
+ */
+static int p1022_rdk_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct machine_data *mdata =
+ container_of(card, struct machine_data, card);
+
+ snd_soc_unregister_card(card);
+ kfree(mdata);
+
+ return 0;
+}
+
+static struct platform_driver p1022_rdk_driver = {
+ .probe = p1022_rdk_probe,
+ .remove = p1022_rdk_remove,
+ .driver = {
+ /*
+ * The name must match 'compatible' property in the device tree,
+ * in lowercase letters.
+ */
+ .name = "snd-soc-p1022rdk",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**
+ * p1022_rdk_init: machine driver initialization.
+ *
+ * This function is called when this module is loaded.
+ */
+static int __init p1022_rdk_init(void)
+{
+ struct device_node *guts_np;
+ struct resource res;
+
+ /* Get the physical address of the global utilities registers */
+ guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
+ if (of_address_to_resource(guts_np, 0, &res)) {
+ pr_err("snd-soc-p1022rdk: missing/invalid global utils node\n");
+ of_node_put(guts_np);
+ return -EINVAL;
+ }
+ guts_phys = res.start;
+ of_node_put(guts_np);
+
+ return platform_driver_register(&p1022_rdk_driver);
+}
+
+/**
+ * p1022_rdk_exit: machine driver exit
+ *
+ * This function is called when this driver is unloaded.
+ */
+static void __exit p1022_rdk_exit(void)
+{
+ platform_driver_unregister(&p1022_rdk_driver);
+}
+
+late_initcall(p1022_rdk_init);
+module_exit(p1022_rdk_exit);
+
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_DESCRIPTION("Freescale / iVeia P1022 RDK ALSA SoC machine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index 4b63ec8eb37..8e52c1485df 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -29,14 +29,14 @@ struct pcm030_audio_data {
static struct snd_soc_dai_link pcm030_fabric_dai[] = {
{
- .name = "AC97",
+ .name = "AC97.0",
.stream_name = "AC97 Analog",
.codec_dai_name = "wm9712-hifi",
.cpu_dai_name = "mpc5200-psc-ac97.0",
.codec_name = "wm9712-codec",
},
{
- .name = "AC97",
+ .name = "AC97.1",
.stream_name = "AC97 IEC958",
.codec_dai_name = "wm9712-aux",
.cpu_dai_name = "mpc5200-psc-ac97.1",
@@ -101,7 +101,7 @@ static int __init pcm030_fabric_probe(struct platform_device *op)
return ret;
}
-static int __devexit pcm030_fabric_remove(struct platform_device *op)
+static int pcm030_fabric_remove(struct platform_device *op)
{
struct pcm030_audio_data *pdata = platform_get_drvdata(op);
int ret;
@@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(of, pcm030_audio_match);
static struct platform_driver pcm030_fabric_driver = {
.probe = pcm030_fabric_probe,
- .remove = __devexit_p(pcm030_fabric_remove),
+ .remove = pcm030_fabric_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 41349670ada..6cef491f482 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -425,7 +425,7 @@ static struct snd_soc_dai_driver jz4740_i2s_dai = {
.resume = jz4740_i2s_resume,
};
-static int __devinit jz4740_i2s_dev_probe(struct platform_device *pdev)
+static int jz4740_i2s_dev_probe(struct platform_device *pdev)
{
struct jz4740_i2s *i2s;
int ret;
@@ -492,7 +492,7 @@ err_free:
return ret;
}
-static int __devexit jz4740_i2s_dev_remove(struct platform_device *pdev)
+static int jz4740_i2s_dev_remove(struct platform_device *pdev)
{
struct jz4740_i2s *i2s = platform_get_drvdata(pdev);
@@ -512,7 +512,7 @@ static int __devexit jz4740_i2s_dev_remove(struct platform_device *pdev)
static struct platform_driver jz4740_i2s_driver = {
.probe = jz4740_i2s_dev_probe,
- .remove = __devexit_p(jz4740_i2s_dev_remove),
+ .remove = jz4740_i2s_dev_remove,
.driver = {
.name = "jz4740-i2s",
.owner = THIS_MODULE,
diff --git a/sound/soc/jz4740/jz4740-pcm.c b/sound/soc/jz4740/jz4740-pcm.c
index 9b8cf256847..71005929231 100644
--- a/sound/soc/jz4740/jz4740-pcm.c
+++ b/sound/soc/jz4740/jz4740-pcm.c
@@ -335,12 +335,12 @@ static struct snd_soc_platform_driver jz4740_soc_platform = {
.pcm_free = jz4740_pcm_free,
};
-static int __devinit jz4740_pcm_probe(struct platform_device *pdev)
+static int jz4740_pcm_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &jz4740_soc_platform);
}
-static int __devexit jz4740_pcm_remove(struct platform_device *pdev)
+static int jz4740_pcm_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -348,7 +348,7 @@ static int __devexit jz4740_pcm_remove(struct platform_device *pdev)
static struct platform_driver jz4740_pcm_driver = {
.probe = jz4740_pcm_probe,
- .remove = __devexit_p(jz4740_pcm_remove),
+ .remove = jz4740_pcm_remove,
.driver = {
.name = "jz4740-pcm-audio",
.owner = THIS_MODULE,
diff --git a/sound/soc/jz4740/qi_lb60.c b/sound/soc/jz4740/qi_lb60.c
index e8aaff18d7c..55fd6b5df55 100644
--- a/sound/soc/jz4740/qi_lb60.c
+++ b/sound/soc/jz4740/qi_lb60.c
@@ -96,7 +96,7 @@ static const struct gpio qi_lb60_gpios[] = {
{ QI_LB60_AMP_GPIO, GPIOF_OUT_INIT_LOW, "AMP" },
};
-static int __devinit qi_lb60_probe(struct platform_device *pdev)
+static int qi_lb60_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &qi_lb60;
int ret;
@@ -116,7 +116,7 @@ static int __devinit qi_lb60_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit qi_lb60_remove(struct platform_device *pdev)
+static int qi_lb60_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -131,7 +131,7 @@ static struct platform_driver qi_lb60_driver = {
.owner = THIS_MODULE,
},
.probe = qi_lb60_probe,
- .remove = __devexit_p(qi_lb60_remove),
+ .remove = qi_lb60_remove,
};
module_platform_driver(qi_lb60_driver);
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 2ba08148655..d3d4bdca1cc 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -22,12 +22,16 @@
#include "kirkwood.h"
#define KIRKWOOD_RATES \
- (SNDRV_PCM_RATE_44100 | \
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000)
+ (SNDRV_PCM_RATE_8000_192000 | \
+ SNDRV_PCM_RATE_CONTINUOUS | \
+ SNDRV_PCM_RATE_KNOT)
+
#define KIRKWOOD_FORMATS \
(SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE)
+ SNDRV_PCM_FMTBIT_S32_LE | \
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | \
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE)
struct kirkwood_dma_priv {
struct snd_pcm_substream *play_stream;
@@ -43,10 +47,10 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
SNDRV_PCM_INFO_PAUSE),
.formats = KIRKWOOD_FORMATS,
.rates = KIRKWOOD_RATES,
- .rate_min = 44100,
- .rate_max = 96000,
+ .rate_min = 8000,
+ .rate_max = 384000,
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 8,
.buffer_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES * KIRKWOOD_SND_MAX_PERIODS,
.period_bytes_min = KIRKWOOD_SND_MIN_PERIOD_BYTES,
.period_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES,
@@ -368,12 +372,12 @@ static struct snd_soc_platform_driver kirkwood_soc_platform = {
.pcm_free = kirkwood_dma_free_dma_buffers,
};
-static int __devinit kirkwood_soc_platform_probe(struct platform_device *pdev)
+static int kirkwood_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform);
}
-static int __devexit kirkwood_soc_platform_remove(struct platform_device *pdev)
+static int kirkwood_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -386,7 +390,7 @@ static struct platform_driver kirkwood_pcm_driver = {
},
.probe = kirkwood_soc_platform_probe,
- .remove = __devexit_p(kirkwood_soc_platform_remove),
+ .remove = kirkwood_soc_platform_remove,
};
module_platform_driver(kirkwood_pcm_driver);
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 1d5db484d2d..282d8b1163b 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -99,6 +99,29 @@ static inline void kirkwood_set_dco(void __iomem *io, unsigned long rate)
} while (value == 0);
}
+static void kirkwood_set_rate(struct snd_soc_dai *dai,
+ struct kirkwood_dma_data *priv, unsigned long rate)
+{
+ uint32_t clks_ctrl;
+
+ if (rate == 44100 || rate == 48000 || rate == 96000) {
+ /* use internal dco for supported rates */
+ dev_dbg(dai->dev, "%s: dco set rate = %lu\n",
+ __func__, rate);
+ kirkwood_set_dco(priv->io, rate);
+
+ clks_ctrl = KIRKWOOD_MCLK_SOURCE_DCO;
+ } else if (!IS_ERR(priv->extclk)) {
+ /* use optional external clk for other rates */
+ dev_dbg(dai->dev, "%s: extclk set rate = %lu -> %lu\n",
+ __func__, rate, 256 * rate);
+ clk_set_rate(priv->extclk, 256 * rate);
+
+ clks_ctrl = KIRKWOOD_MCLK_SOURCE_EXTCLK;
+ }
+ writel(clks_ctrl, priv->io + KIRKWOOD_CLOCKS_CTRL);
+}
+
static int kirkwood_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -113,26 +136,21 @@ static int kirkwood_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai);
- unsigned int i2s_reg, reg;
- unsigned long i2s_value, value;
+ uint32_t ctl_play, ctl_rec;
+ unsigned int i2s_reg;
+ unsigned long i2s_value;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
i2s_reg = KIRKWOOD_I2S_PLAYCTL;
- reg = KIRKWOOD_PLAYCTL;
} else {
i2s_reg = KIRKWOOD_I2S_RECCTL;
- reg = KIRKWOOD_RECCTL;
}
- /* set dco conf */
- kirkwood_set_dco(priv->io, params_rate(params));
+ kirkwood_set_rate(dai, priv, params_rate(params));
i2s_value = readl(priv->io+i2s_reg);
i2s_value &= ~KIRKWOOD_I2S_CTL_SIZE_MASK;
- value = readl(priv->io+reg);
- value &= ~KIRKWOOD_PLAYCTL_SIZE_MASK;
-
/*
* Size settings in play/rec i2s control regs and play/rec control
* regs must be the same.
@@ -140,38 +158,57 @@ static int kirkwood_i2s_hw_params(struct snd_pcm_substream *substream,
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
i2s_value |= KIRKWOOD_I2S_CTL_SIZE_16;
- value |= KIRKWOOD_PLAYCTL_SIZE_16_C;
+ ctl_play = KIRKWOOD_PLAYCTL_SIZE_16_C |
+ KIRKWOOD_PLAYCTL_I2S_EN;
+ ctl_rec = KIRKWOOD_RECCTL_SIZE_16_C |
+ KIRKWOOD_RECCTL_I2S_EN;
break;
/*
* doesn't work... S20_3LE != kirkwood 20bit format ?
*
case SNDRV_PCM_FORMAT_S20_3LE:
i2s_value |= KIRKWOOD_I2S_CTL_SIZE_20;
- value |= KIRKWOOD_PLAYCTL_SIZE_20;
+ ctl_play = KIRKWOOD_PLAYCTL_SIZE_20 |
+ KIRKWOOD_PLAYCTL_I2S_EN;
+ ctl_rec = KIRKWOOD_RECCTL_SIZE_20 |
+ KIRKWOOD_RECCTL_I2S_EN;
break;
*/
case SNDRV_PCM_FORMAT_S24_LE:
i2s_value |= KIRKWOOD_I2S_CTL_SIZE_24;
- value |= KIRKWOOD_PLAYCTL_SIZE_24;
+ ctl_play = KIRKWOOD_PLAYCTL_SIZE_24 |
+ KIRKWOOD_PLAYCTL_I2S_EN;
+ ctl_rec = KIRKWOOD_RECCTL_SIZE_24 |
+ KIRKWOOD_RECCTL_I2S_EN;
break;
case SNDRV_PCM_FORMAT_S32_LE:
i2s_value |= KIRKWOOD_I2S_CTL_SIZE_32;
- value |= KIRKWOOD_PLAYCTL_SIZE_32;
+ ctl_play = KIRKWOOD_PLAYCTL_SIZE_32 |
+ KIRKWOOD_PLAYCTL_I2S_EN;
+ ctl_rec = KIRKWOOD_RECCTL_SIZE_32 |
+ KIRKWOOD_RECCTL_I2S_EN;
break;
default:
return -EINVAL;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- value &= ~KIRKWOOD_PLAYCTL_MONO_MASK;
if (params_channels(params) == 1)
- value |= KIRKWOOD_PLAYCTL_MONO_BOTH;
+ ctl_play |= KIRKWOOD_PLAYCTL_MONO_BOTH;
else
- value |= KIRKWOOD_PLAYCTL_MONO_OFF;
+ ctl_play |= KIRKWOOD_PLAYCTL_MONO_OFF;
+
+ priv->ctl_play &= ~(KIRKWOOD_PLAYCTL_MONO_MASK |
+ KIRKWOOD_PLAYCTL_I2S_EN |
+ KIRKWOOD_PLAYCTL_SPDIF_EN |
+ KIRKWOOD_PLAYCTL_SIZE_MASK);
+ priv->ctl_play |= ctl_play;
+ } else {
+ priv->ctl_rec &= ~KIRKWOOD_RECCTL_SIZE_MASK;
+ priv->ctl_rec |= ctl_rec;
}
writel(i2s_value, priv->io+i2s_reg);
- writel(value, priv->io+reg);
return 0;
}
@@ -205,20 +242,18 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
+ /* configure */
+ ctl = priv->ctl_play;
+ value = ctl & ~(KIRKWOOD_PLAYCTL_I2S_EN |
+ KIRKWOOD_PLAYCTL_SPDIF_EN);
+ writel(value, priv->io + KIRKWOOD_PLAYCTL);
+
+ /* enable interrupts */
value = readl(priv->io + KIRKWOOD_INT_MASK);
value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
- /* configure audio & enable i2s playback */
- ctl &= ~KIRKWOOD_PLAYCTL_BURST_MASK;
- ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE
- | KIRKWOOD_PLAYCTL_SPDIF_EN);
-
- if (priv->burst == 32)
- ctl |= KIRKWOOD_PLAYCTL_BURST_32;
- else
- ctl |= KIRKWOOD_PLAYCTL_BURST_128;
- ctl |= KIRKWOOD_PLAYCTL_I2S_EN;
+ /* enable playback */
writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
@@ -259,30 +294,24 @@ static int kirkwood_i2s_rec_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai);
- unsigned long value;
+ uint32_t ctl, value;
value = readl(priv->io + KIRKWOOD_RECCTL);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
+ /* configure */
+ ctl = priv->ctl_rec;
+ value = ctl & ~KIRKWOOD_RECCTL_I2S_EN;
+ writel(value, priv->io + KIRKWOOD_RECCTL);
+
+ /* enable interrupts */
value = readl(priv->io + KIRKWOOD_INT_MASK);
value |= KIRKWOOD_INT_CAUSE_REC_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
- /* configure audio & enable i2s record */
- value = readl(priv->io + KIRKWOOD_RECCTL);
- value &= ~KIRKWOOD_RECCTL_BURST_MASK;
- value &= ~KIRKWOOD_RECCTL_MONO;
- value &= ~(KIRKWOOD_RECCTL_PAUSE | KIRKWOOD_RECCTL_MUTE
- | KIRKWOOD_RECCTL_SPDIF_EN);
-
- if (priv->burst == 32)
- value |= KIRKWOOD_RECCTL_BURST_32;
- else
- value |= KIRKWOOD_RECCTL_BURST_128;
- value |= KIRKWOOD_RECCTL_I2S_EN;
-
- writel(value, priv->io + KIRKWOOD_RECCTL);
+ /* enable record */
+ writel(ctl, priv->io + KIRKWOOD_RECCTL);
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -389,112 +418,146 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai = {
.channels_min = 1,
.channels_max = 2,
.rates = KIRKWOOD_I2S_RATES,
- .formats = KIRKWOOD_I2S_FORMATS,},
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = KIRKWOOD_I2S_RATES,
- .formats = KIRKWOOD_I2S_FORMATS,},
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
.ops = &kirkwood_i2s_dai_ops,
};
-static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
+static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk = {
+ .probe = kirkwood_i2s_probe,
+ .remove = kirkwood_i2s_remove,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000 |
+ SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_KNOT,
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000 |
+ SNDRV_PCM_RATE_CONTINUOUS |
+ SNDRV_PCM_RATE_KNOT,
+ .formats = KIRKWOOD_I2S_FORMATS,
+ },
+ .ops = &kirkwood_i2s_dai_ops,
+};
+
+static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
{
- struct resource *mem;
- struct kirkwood_asoc_platform_data *data =
- pdev->dev.platform_data;
+ struct kirkwood_asoc_platform_data *data = pdev->dev.platform_data;
+ struct snd_soc_dai_driver *soc_dai = &kirkwood_i2s_dai;
struct kirkwood_dma_data *priv;
+ struct resource *mem;
int err;
- priv = kzalloc(sizeof(struct kirkwood_dma_data), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "allocation failed\n");
- err = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
dev_set_drvdata(&pdev->dev, priv);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "platform_get_resource failed\n");
- err = -ENXIO;
- goto err_alloc;
- }
-
- priv->mem = request_mem_region(mem->start, SZ_16K, DRV_NAME);
- if (!priv->mem) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- err = -EBUSY;
- goto err_alloc;
+ return -ENXIO;
}
- priv->io = ioremap(priv->mem->start, SZ_16K);
+ priv->io = devm_request_and_ioremap(&pdev->dev, mem);
if (!priv->io) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -ENOMEM;
- goto err_iomem;
+ dev_err(&pdev->dev, "devm_request_and_ioremap failed\n");
+ return -ENOMEM;
}
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq <= 0) {
dev_err(&pdev->dev, "platform_get_irq failed\n");
- err = -ENXIO;
- goto err_ioremap;
+ return -ENXIO;
}
if (!data) {
dev_err(&pdev->dev, "no platform data ?!\n");
- err = -EINVAL;
- goto err_ioremap;
+ return -EINVAL;
}
priv->burst = data->burst;
- priv->clk = clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "no clock\n");
- err = PTR_ERR(priv->clk);
- goto err_ioremap;
+ return PTR_ERR(priv->clk);
+ }
+
+ err = clk_prepare_enable(priv->clk);
+ if (err < 0)
+ return err;
+
+ priv->extclk = clk_get(&pdev->dev, "extclk");
+ if (!IS_ERR(priv->extclk)) {
+ if (priv->extclk == priv->clk) {
+ clk_put(priv->extclk);
+ priv->extclk = ERR_PTR(-EINVAL);
+ } else {
+ dev_info(&pdev->dev, "found external clock\n");
+ clk_prepare_enable(priv->extclk);
+ soc_dai = &kirkwood_i2s_dai_extclk;
+ }
+ }
+
+ /* Some sensible defaults - this reflects the powerup values */
+ priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24;
+ priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24;
+
+ /* Select the burst size */
+ if (data->burst == 32) {
+ priv->ctl_play |= KIRKWOOD_PLAYCTL_BURST_32;
+ priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_32;
+ } else {
+ priv->ctl_play |= KIRKWOOD_PLAYCTL_BURST_128;
+ priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_128;
}
- clk_prepare_enable(priv->clk);
- err = snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
+ err = snd_soc_register_dai(&pdev->dev, soc_dai);
if (!err)
return 0;
dev_err(&pdev->dev, "snd_soc_register_dai failed\n");
+ if (!IS_ERR(priv->extclk)) {
+ clk_disable_unprepare(priv->extclk);
+ clk_put(priv->extclk);
+ }
clk_disable_unprepare(priv->clk);
- clk_put(priv->clk);
-
-err_ioremap:
- iounmap(priv->io);
-err_iomem:
- release_mem_region(priv->mem->start, SZ_16K);
-err_alloc:
- kfree(priv);
-error:
+
return err;
}
-static __devexit int kirkwood_i2s_dev_remove(struct platform_device *pdev)
+static int kirkwood_i2s_dev_remove(struct platform_device *pdev)
{
struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
+ if (!IS_ERR(priv->extclk)) {
+ clk_disable_unprepare(priv->extclk);
+ clk_put(priv->extclk);
+ }
clk_disable_unprepare(priv->clk);
- clk_put(priv->clk);
-
- iounmap(priv->io);
- release_mem_region(priv->mem->start, SZ_16K);
- kfree(priv);
return 0;
}
static struct platform_driver kirkwood_i2s_driver = {
.probe = kirkwood_i2s_dev_probe,
- .remove = __devexit_p(kirkwood_i2s_dev_remove),
+ .remove = kirkwood_i2s_dev_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/sound/soc/kirkwood/kirkwood-openrd.c b/sound/soc/kirkwood/kirkwood-openrd.c
index c28540aeea2..b979c715471 100644
--- a/sound/soc/kirkwood/kirkwood-openrd.c
+++ b/sound/soc/kirkwood/kirkwood-openrd.c
@@ -71,7 +71,7 @@ static struct snd_soc_card openrd_client = {
.num_links = ARRAY_SIZE(openrd_client_dai),
};
-static int __devinit openrd_probe(struct platform_device *pdev)
+static int openrd_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &openrd_client;
int ret;
@@ -85,7 +85,7 @@ static int __devinit openrd_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit openrd_remove(struct platform_device *pdev)
+static int openrd_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -99,7 +99,7 @@ static struct platform_driver openrd_driver = {
.owner = THIS_MODULE,
},
.probe = openrd_probe,
- .remove = __devexit_p(openrd_remove),
+ .remove = openrd_remove,
};
module_platform_driver(openrd_driver);
diff --git a/sound/soc/kirkwood/kirkwood-t5325.c b/sound/soc/kirkwood/kirkwood-t5325.c
index c67bbc57498..1d0ed6f8add 100644
--- a/sound/soc/kirkwood/kirkwood-t5325.c
+++ b/sound/soc/kirkwood/kirkwood-t5325.c
@@ -92,7 +92,7 @@ static struct snd_soc_card t5325 = {
.num_dapm_routes = ARRAY_SIZE(t5325_route),
};
-static int __devinit t5325_probe(struct platform_device *pdev)
+static int t5325_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &t5325;
int ret;
@@ -106,7 +106,7 @@ static int __devinit t5325_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit t5325_remove(struct platform_device *pdev)
+static int t5325_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -120,7 +120,7 @@ static struct platform_driver t5325_driver = {
.owner = THIS_MODULE,
},
.probe = t5325_probe,
- .remove = __devexit_p(t5325_remove),
+ .remove = t5325_remove,
};
module_platform_driver(t5325_driver);
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index f9084d83e6b..4d92637ddb3 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -77,6 +77,11 @@
#define KIRKWOOD_DCO_SPCR_STATUS 0x120c
#define KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK (1<<16)
+#define KIRKWOOD_CLOCKS_CTRL 0x1230
+#define KIRKWOOD_MCLK_SOURCE_MASK (3<<0)
+#define KIRKWOOD_MCLK_SOURCE_DCO (0<<0)
+#define KIRKWOOD_MCLK_SOURCE_EXTCLK (3<<0)
+
#define KIRKWOOD_ERR_CAUSE 0x1300
#define KIRKWOOD_ERR_MASK 0x1304
@@ -119,11 +124,13 @@
#define KIRKWOOD_SND_MAX_PERIOD_BYTES 0x4000
struct kirkwood_dma_data {
- struct resource *mem;
void __iomem *io;
+ struct clk *clk;
+ struct clk *extclk;
+ uint32_t ctl_play;
+ uint32_t ctl_rec;
int irq;
int burst;
- struct clk *clk;
};
#endif
diff --git a/sound/soc/mid-x86/mfld_machine.c b/sound/soc/mid-x86/mfld_machine.c
index 2cc7782714b..4139116c33b 100644
--- a/sound/soc/mid-x86/mfld_machine.c
+++ b/sound/soc/mid-x86/mfld_machine.c
@@ -358,7 +358,7 @@ static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit snd_mfld_mc_probe(struct platform_device *pdev)
+static int snd_mfld_mc_probe(struct platform_device *pdev)
{
int ret_val = 0, irq;
struct mfld_mc_private *mc_drv_ctx;
@@ -417,7 +417,7 @@ unalloc:
return ret_val;
}
-static int __devexit snd_mfld_mc_remove(struct platform_device *pdev)
+static int snd_mfld_mc_remove(struct platform_device *pdev)
{
struct mfld_mc_private *mc_drv_ctx = platform_get_drvdata(pdev);
@@ -435,7 +435,7 @@ static struct platform_driver snd_mfld_mc_driver = {
.name = "msic_audio",
},
.probe = snd_mfld_mc_probe,
- .remove = __devexit_p(snd_mfld_mc_remove),
+ .remove = snd_mfld_mc_remove,
};
module_platform_driver(snd_mfld_mc_driver);
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index f82d766cbf9..564b5b60319 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -220,13 +220,13 @@ static struct snd_soc_platform_driver mxs_soc_platform = {
.pcm_free = mxs_pcm_free,
};
-int __devinit mxs_pcm_platform_register(struct device *dev)
+int mxs_pcm_platform_register(struct device *dev)
{
return snd_soc_register_platform(dev, &mxs_soc_platform);
}
EXPORT_SYMBOL_GPL(mxs_pcm_platform_register);
-void __devexit mxs_pcm_platform_unregister(struct device *dev)
+void mxs_pcm_platform_unregister(struct device *dev)
{
snd_soc_unregister_platform(dev);
}
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index c294fbb523f..365d9d27a32 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -229,6 +229,7 @@ int mxs_saif_put_mclk(unsigned int saif_id)
saif->mclk_in_use = 0;
return 0;
}
+EXPORT_SYMBOL_GPL(mxs_saif_put_mclk);
/*
* Get MCLK and set clock rate, then enable it
@@ -282,6 +283,7 @@ int mxs_saif_get_mclk(unsigned int saif_id, unsigned int mclk,
return 0;
}
+EXPORT_SYMBOL_GPL(mxs_saif_get_mclk);
/*
* SAIF DAI format configuration.
@@ -655,7 +657,7 @@ static irqreturn_t mxs_saif_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mxs_saif_probe(struct platform_device *pdev)
+static int mxs_saif_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct resource *iores, *dmares;
@@ -790,7 +792,7 @@ failed_pdev_alloc:
return ret;
}
-static int __devexit mxs_saif_remove(struct platform_device *pdev)
+static int mxs_saif_remove(struct platform_device *pdev)
{
mxs_pcm_platform_unregister(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
@@ -806,7 +808,7 @@ MODULE_DEVICE_TABLE(of, mxs_saif_dt_ids);
static struct platform_driver mxs_saif_driver = {
.probe = mxs_saif_probe,
- .remove = __devexit_p(mxs_saif_remove),
+ .remove = mxs_saif_remove,
.driver = {
.name = "mxs-saif",
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 215113b05f7..b1d9b5ebeee 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -112,7 +112,7 @@ static struct snd_soc_card mxs_sgtl5000 = {
.num_links = ARRAY_SIZE(mxs_sgtl5000_dai),
};
-static int __devinit mxs_sgtl5000_probe_dt(struct platform_device *pdev)
+static int mxs_sgtl5000_probe_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *saif_np[2], *codec_np;
@@ -145,7 +145,7 @@ static int __devinit mxs_sgtl5000_probe_dt(struct platform_device *pdev)
return ret;
}
-static int __devinit mxs_sgtl5000_probe(struct platform_device *pdev)
+static int mxs_sgtl5000_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mxs_sgtl5000;
int ret;
@@ -176,7 +176,7 @@ static int __devinit mxs_sgtl5000_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit mxs_sgtl5000_remove(struct platform_device *pdev)
+static int mxs_sgtl5000_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -200,7 +200,7 @@ static struct platform_driver mxs_sgtl5000_audio_driver = {
.of_match_table = mxs_sgtl5000_dt_ids,
},
.probe = mxs_sgtl5000_probe,
- .remove = __devexit_p(mxs_sgtl5000_remove),
+ .remove = mxs_sgtl5000_remove,
};
module_platform_driver(mxs_sgtl5000_audio_driver);
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 946020a647d..0418467a484 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -314,7 +314,7 @@ static struct snd_soc_dai_driver nuc900_ac97_dai = {
.ops = &nuc900_ac97_dai_ops,
};
-static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
+static int nuc900_ac97_drvprobe(struct platform_device *pdev)
{
struct nuc900_audio *nuc900_audio;
int ret;
@@ -382,7 +382,7 @@ out0:
return ret;
}
-static int __devexit nuc900_ac97_drvremove(struct platform_device *pdev)
+static int nuc900_ac97_drvremove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
@@ -403,7 +403,7 @@ static struct platform_driver nuc900_ac97_driver = {
.owner = THIS_MODULE,
},
.probe = nuc900_ac97_drvprobe,
- .remove = __devexit_p(nuc900_ac97_drvremove),
+ .remove = nuc900_ac97_drvremove,
};
module_platform_driver(nuc900_ac97_driver);
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index 37585b47f4e..c894ff0f258 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -337,12 +337,12 @@ static struct snd_soc_platform_driver nuc900_soc_platform = {
.pcm_free = nuc900_dma_free_dma_buffers,
};
-static int __devinit nuc900_soc_platform_probe(struct platform_device *pdev)
+static int nuc900_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
}
-static int __devexit nuc900_soc_platform_remove(struct platform_device *pdev)
+static int nuc900_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -355,7 +355,7 @@ static struct platform_driver nuc900_pcm_driver = {
},
.probe = nuc900_soc_platform_probe,
- .remove = __devexit_p(nuc900_soc_platform_remove),
+ .remove = nuc900_soc_platform_remove,
};
module_platform_driver(nuc900_pcm_driver);
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index d8e96b2cd03..2600447fa74 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -575,7 +575,7 @@ static struct snd_soc_card ams_delta_audio_card = {
};
/* Module init/exit */
-static __devinit int ams_delta_probe(struct platform_device *pdev)
+static int ams_delta_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &ams_delta_audio_card;
int ret;
@@ -591,7 +591,7 @@ static __devinit int ams_delta_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ams_delta_remove(struct platform_device *pdev)
+static int ams_delta_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -616,7 +616,7 @@ static struct platform_driver ams_delta_driver = {
.owner = THIS_MODULE,
},
.probe = ams_delta_probe,
- .remove = __devexit_p(ams_delta_remove),
+ .remove = ams_delta_remove,
};
module_platform_driver(ams_delta_driver);
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index afb8d4f1bed..285c8368cb4 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -28,8 +28,6 @@
#include <linux/platform_data/asoc-ti-mcbsp.h>
-#include <plat/cpu.h>
-
#include "mcbsp.h"
static void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
@@ -612,7 +610,7 @@ void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
* system will refuse to enter idle if the CLKS pin source is not reset
* back to internal source.
*/
- if (!cpu_class_is_omap1())
+ if (!mcbsp_omap1())
omap2_mcbsp_set_clks_src(mcbsp, MCBSP_CLKS_PRCM_SRC);
spin_lock(&mcbsp->lock);
@@ -932,8 +930,7 @@ static const struct attribute_group sidetone_attr_group = {
.attrs = (struct attribute **)sidetone_attrs,
};
-static int __devinit omap_st_add(struct omap_mcbsp *mcbsp,
- struct resource *res)
+static int omap_st_add(struct omap_mcbsp *mcbsp, struct resource *res)
{
struct omap_mcbsp_st_data *st_data;
int err;
@@ -959,7 +956,7 @@ static int __devinit omap_st_add(struct omap_mcbsp *mcbsp,
* McBSP1 and McBSP3 are directly mapped on 1610 and 1510.
* 730 has only 2 McBSP, and both of them are MPU peripherals.
*/
-int __devinit omap_mcbsp_init(struct platform_device *pdev)
+int omap_mcbsp_init(struct platform_device *pdev)
{
struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
struct resource *res;
@@ -1087,7 +1084,7 @@ err_thres:
return ret;
}
-void __devexit omap_mcbsp_sysfs_remove(struct omap_mcbsp *mcbsp)
+void omap_mcbsp_sysfs_remove(struct omap_mcbsp *mcbsp)
{
if (mcbsp->pdata->buffer_size)
sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
diff --git a/sound/soc/omap/mcbsp.h b/sound/soc/omap/mcbsp.h
index 49a67259ce5..f93e0b0af30 100644
--- a/sound/soc/omap/mcbsp.h
+++ b/sound/soc/omap/mcbsp.h
@@ -26,6 +26,12 @@
#include "omap-pcm.h"
+#ifdef CONFIG_ARCH_OMAP1
+#define mcbsp_omap1() 1
+#else
+#define mcbsp_omap1() 0
+#endif
+
/* McBSP register numbers. Register address offset = num * reg_step */
enum {
/* Common registers */
@@ -341,7 +347,7 @@ int omap_st_enable(struct omap_mcbsp *mcbsp);
int omap_st_disable(struct omap_mcbsp *mcbsp);
int omap_st_is_enabled(struct omap_mcbsp *mcbsp);
-int __devinit omap_mcbsp_init(struct platform_device *pdev);
-void __devexit omap_mcbsp_sysfs_remove(struct omap_mcbsp *mcbsp);
+int omap_mcbsp_init(struct platform_device *pdev);
+void omap_mcbsp_sysfs_remove(struct omap_mcbsp *mcbsp);
#endif /* __ASOC_MCBSP_H */
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index a57a4e68dcc..e7d93fa412a 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -273,7 +273,7 @@ static struct snd_soc_card omap_abe_card = {
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
-static __devinit int omap_abe_probe(struct platform_device *pdev)
+static int omap_abe_probe(struct platform_device *pdev)
{
struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
@@ -331,8 +331,8 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
num_links = 1;
}
- of_property_read_u32(node, "ti,jack-detection",
- &priv->jack_detection);
+ priv->jack_detection = of_property_read_bool(node,
+ "ti,jack-detection");
of_property_read_u32(node, "ti,mclk-freq",
&priv->mclk_freq);
if (!priv->mclk_freq) {
@@ -390,7 +390,7 @@ err_unregister:
return ret;
}
-static int __devexit omap_abe_remove(struct platform_device *pdev)
+static int omap_abe_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
@@ -417,7 +417,7 @@ static struct platform_driver omap_abe_driver = {
.of_match_table = omap_abe_of_match,
},
.probe = omap_abe_probe,
- .remove = __devexit_p(omap_abe_remove),
+ .remove = omap_abe_remove,
};
module_platform_driver(omap_abe_driver);
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 5a6aeaf552a..ba49ccd9eed 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -448,7 +448,7 @@ static struct snd_soc_dai_driver omap_dmic_dai = {
.ops = &omap_dmic_dai_ops,
};
-static __devinit int asoc_dmic_probe(struct platform_device *pdev)
+static int asoc_dmic_probe(struct platform_device *pdev)
{
struct omap_dmic *dmic;
struct resource *res;
@@ -518,7 +518,7 @@ err_put_clk:
return ret;
}
-static int __devexit asoc_dmic_remove(struct platform_device *pdev)
+static int asoc_dmic_remove(struct platform_device *pdev)
{
struct omap_dmic *dmic = platform_get_drvdata(pdev);
@@ -541,7 +541,7 @@ static struct platform_driver asoc_dmic_driver = {
.of_match_table = omap_dmic_of_match,
},
.probe = asoc_dmic_probe,
- .remove = __devexit_p(asoc_dmic_remove),
+ .remove = asoc_dmic_remove,
};
module_platform_driver(asoc_dmic_driver);
diff --git a/sound/soc/omap/omap-hdmi-card.c b/sound/soc/omap/omap-hdmi-card.c
index eaa2ea0e3f8..d4eaa92e518 100644
--- a/sound/soc/omap/omap-hdmi-card.c
+++ b/sound/soc/omap/omap-hdmi-card.c
@@ -45,7 +45,7 @@ static struct snd_soc_card snd_soc_omap_hdmi = {
.num_links = 1,
};
-static __devinit int omap_hdmi_probe(struct platform_device *pdev)
+static int omap_hdmi_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_omap_hdmi;
int ret;
@@ -61,7 +61,7 @@ static __devinit int omap_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit omap_hdmi_remove(struct platform_device *pdev)
+static int omap_hdmi_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -76,7 +76,7 @@ static struct platform_driver omap_hdmi_driver = {
.owner = THIS_MODULE,
},
.probe = omap_hdmi_probe,
- .remove = __devexit_p(omap_hdmi_remove),
+ .remove = omap_hdmi_remove,
};
module_platform_driver(omap_hdmi_driver);
diff --git a/sound/soc/omap/omap-hdmi.c b/sound/soc/omap/omap-hdmi.c
index f59c69fb400..7ea24819d57 100644
--- a/sound/soc/omap/omap-hdmi.c
+++ b/sound/soc/omap/omap-hdmi.c
@@ -262,7 +262,7 @@ static struct snd_soc_dai_driver omap_hdmi_dai = {
.ops = &omap_hdmi_dai_ops,
};
-static __devinit int omap_hdmi_probe(struct platform_device *pdev)
+static int omap_hdmi_probe(struct platform_device *pdev)
{
int ret;
struct resource *hdmi_rsrc;
@@ -324,7 +324,7 @@ static __devinit int omap_hdmi_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit omap_hdmi_remove(struct platform_device *pdev)
+static int omap_hdmi_remove(struct platform_device *pdev)
{
struct hdmi_priv *hdmi_data = dev_get_drvdata(&pdev->dev);
@@ -345,7 +345,7 @@ static struct platform_driver hdmi_dai_driver = {
.owner = THIS_MODULE,
},
.probe = omap_hdmi_probe,
- .remove = __devexit_p(omap_hdmi_remove),
+ .remove = omap_hdmi_remove,
};
module_platform_driver(hdmi_dai_driver);
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index a6ee1574785..8d2defd6fdb 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -34,7 +34,6 @@
#include <sound/initval.h>
#include <sound/soc.h>
-#include <plat/cpu.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include "mcbsp.h"
#include "omap-mcbsp.h"
@@ -512,7 +511,7 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
regs->srgr2 |= CLKSM;
break;
case OMAP_MCBSP_SYSCLK_CLKS_FCLK:
- if (cpu_class_is_omap1()) {
+ if (mcbsp_omap1()) {
err = -EINVAL;
break;
}
@@ -520,7 +519,7 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
MCBSP_CLKS_PRCM_SRC);
break;
case OMAP_MCBSP_SYSCLK_CLKS_EXT:
- if (cpu_class_is_omap1()) {
+ if (mcbsp_omap1()) {
err = 0;
break;
}
@@ -758,7 +757,7 @@ static const struct of_device_id omap_mcbsp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_mcbsp_of_match);
-static __devinit int asoc_mcbsp_probe(struct platform_device *pdev)
+static int asoc_mcbsp_probe(struct platform_device *pdev)
{
struct omap_mcbsp_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_mcbsp *mcbsp;
@@ -799,7 +798,7 @@ static __devinit int asoc_mcbsp_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit asoc_mcbsp_remove(struct platform_device *pdev)
+static int asoc_mcbsp_remove(struct platform_device *pdev)
{
struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
@@ -825,7 +824,7 @@ static struct platform_driver asoc_mcbsp_driver = {
},
.probe = asoc_mcbsp_probe,
- .remove = __devexit_p(asoc_mcbsp_remove),
+ .remove = asoc_mcbsp_remove,
};
module_platform_driver(asoc_mcbsp_driver);
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 56965bb3275..2fe8be20945 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -429,7 +429,7 @@ void omap_mcpdm_configure_dn_offsets(struct snd_soc_pcm_runtime *rtd,
}
EXPORT_SYMBOL_GPL(omap_mcpdm_configure_dn_offsets);
-static __devinit int asoc_mcpdm_probe(struct platform_device *pdev)
+static int asoc_mcpdm_probe(struct platform_device *pdev)
{
struct omap_mcpdm *mcpdm;
struct resource *res;
@@ -487,7 +487,7 @@ static __devinit int asoc_mcpdm_probe(struct platform_device *pdev)
return snd_soc_register_dai(&pdev->dev, &omap_mcpdm_dai);
}
-static int __devexit asoc_mcpdm_remove(struct platform_device *pdev)
+static int asoc_mcpdm_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
@@ -507,7 +507,7 @@ static struct platform_driver asoc_mcpdm_driver = {
},
.probe = asoc_mcpdm_probe,
- .remove = __devexit_p(asoc_mcpdm_remove),
+ .remove = asoc_mcpdm_remove,
};
module_platform_driver(asoc_mcpdm_driver);
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 52977aa3035..47bdbd415ad 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -302,13 +302,13 @@ static struct snd_soc_platform_driver omap_soc_platform = {
.pcm_free = omap_pcm_free_dma_buffers,
};
-static __devinit int omap_pcm_probe(struct platform_device *pdev)
+static int omap_pcm_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev,
&omap_soc_platform);
}
-static int __devexit omap_pcm_remove(struct platform_device *pdev)
+static int omap_pcm_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -321,7 +321,7 @@ static struct platform_driver omap_pcm_driver = {
},
.probe = omap_pcm_probe,
- .remove = __devexit_p(omap_pcm_remove),
+ .remove = omap_pcm_remove,
};
module_platform_driver(omap_pcm_driver);
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c
index 3b97b87971f..4541d28b531 100644
--- a/sound/soc/omap/omap-twl4030.c
+++ b/sound/soc/omap/omap-twl4030.c
@@ -107,7 +107,7 @@ static struct snd_soc_card omap_twl4030_card = {
.num_links = ARRAY_SIZE(omap_twl4030_dai_links),
};
-static __devinit int omap_twl4030_probe(struct platform_device *pdev)
+static int omap_twl4030_probe(struct platform_device *pdev)
{
struct omap_tw4030_pdata *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
@@ -154,7 +154,7 @@ static __devinit int omap_twl4030_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit omap_twl4030_remove(struct platform_device *pdev)
+static int omap_twl4030_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -177,7 +177,7 @@ static struct platform_driver omap_twl4030_driver = {
.of_match_table = omap_twl4030_of_match,
},
.probe = omap_twl4030_probe,
- .remove = __devexit_p(omap_twl4030_remove),
+ .remove = omap_twl4030_remove,
};
module_platform_driver(omap_twl4030_driver);
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
index 1ff6bb9ade5..771bff27ac3 100644
--- a/sound/soc/omap/zoom2.c
+++ b/sound/soc/omap/zoom2.c
@@ -37,8 +37,6 @@
#include "omap-mcbsp.h"
#include "omap-pcm.h"
-#define ZOOM2_HEADSET_MUX_GPIO (OMAP_MAX_GPIO_LINES + 15)
-
static int zoom2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -187,9 +185,6 @@ static int __init zoom2_soc_init(void)
if (ret)
goto err1;
- BUG_ON(gpio_request(ZOOM2_HEADSET_MUX_GPIO, "hs_mux") < 0);
- gpio_direction_output(ZOOM2_HEADSET_MUX_GPIO, 0);
-
return 0;
err1:
@@ -202,8 +197,6 @@ module_init(zoom2_soc_init);
static void __exit zoom2_soc_exit(void)
{
- gpio_free(ZOOM2_HEADSET_MUX_GPIO);
-
platform_device_unregister(zoom2_snd_device);
}
module_exit(zoom2_soc_exit);
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index 5e666e03d33..4ad76099dd4 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -140,7 +140,7 @@ static struct snd_soc_card brownstone = {
.num_dapm_routes = ARRAY_SIZE(brownstone_audio_map),
};
-static int __devinit brownstone_probe(struct platform_device *pdev)
+static int brownstone_probe(struct platform_device *pdev)
{
int ret;
@@ -152,7 +152,7 @@ static int __devinit brownstone_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit brownstone_remove(struct platform_device *pdev)
+static int brownstone_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&brownstone);
return 0;
@@ -164,7 +164,7 @@ static struct platform_driver mmp_driver = {
.owner = THIS_MODULE,
},
.probe = brownstone_probe,
- .remove = __devexit_p(brownstone_remove),
+ .remove = brownstone_remove,
};
module_platform_driver(mmp_driver);
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index 863367ad89c..f4cce1e8011 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -303,7 +303,7 @@ static struct snd_soc_card corgi = {
.num_dapm_routes = ARRAY_SIZE(corgi_audio_map),
};
-static int __devinit corgi_probe(struct platform_device *pdev)
+static int corgi_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &corgi;
int ret;
@@ -317,7 +317,7 @@ static int __devinit corgi_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit corgi_remove(struct platform_device *pdev)
+static int corgi_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -331,7 +331,7 @@ static struct platform_driver corgi_driver = {
.owner = THIS_MODULE,
},
.probe = corgi_probe,
- .remove = __devexit_p(corgi_remove),
+ .remove = corgi_remove,
};
module_platform_driver(corgi_driver);
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 7b1bc239003..70d799b13f0 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -144,7 +144,7 @@ static struct gpio e740_audio_gpios[] = {
{ GPIO_E740_WM9705_nAVDD2, GPIOF_OUT_INIT_HIGH, "Audio power" },
};
-static int __devinit e740_probe(struct platform_device *pdev)
+static int e740_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &e740;
int ret;
@@ -165,7 +165,7 @@ static int __devinit e740_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit e740_remove(struct platform_device *pdev)
+static int e740_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -180,7 +180,7 @@ static struct platform_driver e740_driver = {
.owner = THIS_MODULE,
},
.probe = e740_probe,
- .remove = __devexit_p(e740_remove),
+ .remove = e740_remove,
};
module_platform_driver(e740_driver);
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 47b89d71e28..f94d2ab5135 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -126,7 +126,7 @@ static struct gpio e750_audio_gpios[] = {
{ GPIO_E750_SPK_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Speaker amp" },
};
-static int __devinit e750_probe(struct platform_device *pdev)
+static int e750_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &e750;
int ret;
@@ -147,7 +147,7 @@ static int __devinit e750_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit e750_remove(struct platform_device *pdev)
+static int e750_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -162,7 +162,7 @@ static struct platform_driver e750_driver = {
.owner = THIS_MODULE,
},
.probe = e750_probe,
- .remove = __devexit_p(e750_remove),
+ .remove = e750_remove,
};
module_platform_driver(e750_driver);
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index ea9707ec6f2..8768a640dd7 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -116,7 +116,7 @@ static struct gpio e800_audio_gpios[] = {
{ GPIO_E800_HP_AMP_OFF, GPIOF_OUT_INIT_HIGH, "Speaker amp" },
};
-static int __devinit e800_probe(struct platform_device *pdev)
+static int e800_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &e800;
int ret;
@@ -137,7 +137,7 @@ static int __devinit e800_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit e800_remove(struct platform_device *pdev)
+static int e800_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -152,7 +152,7 @@ static struct platform_driver e800_driver = {
.owner = THIS_MODULE,
},
.probe = e800_probe,
- .remove = __devexit_p(e800_remove),
+ .remove = e800_remove,
};
module_platform_driver(e800_driver);
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 2a342c92d82..dcc9b04bd92 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -183,7 +183,7 @@ static struct gpio hx4700_audio_gpios[] = {
{ GPIO92_HX4700_HP_DRIVER, GPIOF_OUT_INIT_LOW, "EP_POWER" },
};
-static int __devinit hx4700_audio_probe(struct platform_device *pdev)
+static int hx4700_audio_probe(struct platform_device *pdev)
{
int ret;
@@ -204,7 +204,7 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit hx4700_audio_remove(struct platform_device *pdev)
+static int hx4700_audio_remove(struct platform_device *pdev)
{
snd_soc_jack_free_gpios(&hs_jack, 1, &hs_jack_gpio);
snd_soc_unregister_card(&snd_soc_card_hx4700);
@@ -223,7 +223,7 @@ static struct platform_driver hx4700_audio_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = hx4700_audio_probe,
- .remove = __devexit_p(hx4700_audio_remove),
+ .remove = hx4700_audio_remove,
};
module_platform_driver(hx4700_audio_driver);
diff --git a/sound/soc/pxa/imote2.c b/sound/soc/pxa/imote2.c
index b93dafd32b8..eef1f7b7b38 100644
--- a/sound/soc/pxa/imote2.c
+++ b/sound/soc/pxa/imote2.c
@@ -65,7 +65,7 @@ static struct snd_soc_card imote2 = {
.num_links = 1,
};
-static int __devinit imote2_probe(struct platform_device *pdev)
+static int imote2_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &imote2;
int ret;
@@ -79,7 +79,7 @@ static int __devinit imote2_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit imote2_remove(struct platform_device *pdev)
+static int imote2_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -93,7 +93,7 @@ static struct platform_driver imote2_driver = {
.owner = THIS_MODULE,
},
.probe = imote2_probe,
- .remove = __devexit_p(imote2_remove),
+ .remove = imote2_remove,
};
module_platform_driver(imote2_driver);
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 8687c1c65d2..97b711e1282 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -186,7 +186,7 @@ static struct snd_soc_card mioa701 = {
.num_links = ARRAY_SIZE(mioa701_dai),
};
-static int __devinit mioa701_wm9713_probe(struct platform_device *pdev)
+static int mioa701_wm9713_probe(struct platform_device *pdev)
{
int rc;
@@ -202,7 +202,7 @@ static int __devinit mioa701_wm9713_probe(struct platform_device *pdev)
return rc;
}
-static int __devexit mioa701_wm9713_remove(struct platform_device *pdev)
+static int mioa701_wm9713_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -212,7 +212,7 @@ static int __devexit mioa701_wm9713_remove(struct platform_device *pdev)
static struct platform_driver mioa701_wm9713_driver = {
.probe = mioa701_wm9713_probe,
- .remove = __devexit_p(mioa701_wm9713_remove),
+ .remove = mioa701_wm9713_remove,
.driver = {
.name = "mioa701-wm9713",
.owner = THIS_MODULE,
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index e834faf859f..190eb0bccf5 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -257,7 +257,7 @@ struct snd_soc_platform_driver mmp_soc_platform = {
.pcm_free = mmp_pcm_free_dma_buffers,
};
-static __devinit int mmp_pcm_probe(struct platform_device *pdev)
+static int mmp_pcm_probe(struct platform_device *pdev)
{
struct mmp_audio_platdata *pdata = pdev->dev.platform_data;
@@ -274,7 +274,7 @@ static __devinit int mmp_pcm_probe(struct platform_device *pdev)
return snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
}
-static int __devexit mmp_pcm_remove(struct platform_device *pdev)
+static int mmp_pcm_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -287,7 +287,7 @@ static struct platform_driver mmp_pcm_driver = {
},
.probe = mmp_pcm_probe,
- .remove = __devexit_p(mmp_pcm_remove),
+ .remove = mmp_pcm_remove,
};
module_platform_driver(mmp_pcm_driver);
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index 4d6cb8a30fc..41c3a09b53e 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -405,7 +405,7 @@ struct snd_soc_dai_driver mmp_sspa_dai = {
.ops = &mmp_sspa_dai_ops,
};
-static __devinit int asoc_mmp_sspa_probe(struct platform_device *pdev)
+static int asoc_mmp_sspa_probe(struct platform_device *pdev)
{
struct sspa_priv *priv;
struct resource *res;
@@ -453,7 +453,7 @@ static __devinit int asoc_mmp_sspa_probe(struct platform_device *pdev)
return snd_soc_register_dai(&pdev->dev, &mmp_sspa_dai);
}
-static int __devexit asoc_mmp_sspa_remove(struct platform_device *pdev)
+static int asoc_mmp_sspa_remove(struct platform_device *pdev)
{
struct sspa_priv *priv = platform_get_drvdata(pdev);
@@ -470,7 +470,7 @@ static struct platform_driver asoc_mmp_sspa_driver = {
.owner = THIS_MODULE,
},
.probe = asoc_mmp_sspa_probe,
- .remove = __devexit_p(asoc_mmp_sspa_remove),
+ .remove = asoc_mmp_sspa_remove,
};
module_platform_driver(asoc_mmp_sspa_driver);
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index aa3da91907c..2074e2daf9c 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -187,7 +187,7 @@ put_device:
return ret;
}
-static int __devexit palm27x_asoc_remove(struct platform_device *pdev)
+static int palm27x_asoc_remove(struct platform_device *pdev)
{
platform_device_unregister(palm27x_snd_device);
return 0;
@@ -195,7 +195,7 @@ static int __devexit palm27x_asoc_remove(struct platform_device *pdev)
static struct platform_driver palm27x_wm9712_driver = {
.probe = palm27x_asoc_probe,
- .remove = __devexit_p(palm27x_asoc_remove),
+ .remove = palm27x_asoc_remove,
.driver = {
.name = "palm27x-asoc",
.owner = THIS_MODULE,
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index d2cc8173503..fafe46355c3 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -269,7 +269,7 @@ static struct snd_soc_card poodle = {
.num_dapm_routes = ARRAY_SIZE(poodle_audio_map),
};
-static int __devinit poodle_probe(struct platform_device *pdev)
+static int poodle_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &poodle;
int ret;
@@ -291,7 +291,7 @@ static int __devinit poodle_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit poodle_remove(struct platform_device *pdev)
+static int poodle_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -305,7 +305,7 @@ static struct platform_driver poodle_driver = {
.owner = THIS_MODULE,
},
.probe = poodle_probe,
- .remove = __devexit_p(poodle_remove),
+ .remove = poodle_remove,
};
module_platform_driver(poodle_driver);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 4da5fc55c7e..d3eb0c2eec7 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -794,12 +794,12 @@ static struct snd_soc_dai_driver pxa_ssp_dai = {
.ops = &pxa_ssp_dai_ops,
};
-static __devinit int asoc_ssp_probe(struct platform_device *pdev)
+static int asoc_ssp_probe(struct platform_device *pdev)
{
return snd_soc_register_dai(&pdev->dev, &pxa_ssp_dai);
}
-static int __devexit asoc_ssp_remove(struct platform_device *pdev)
+static int asoc_ssp_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
@@ -812,7 +812,7 @@ static struct platform_driver asoc_ssp_driver = {
},
.probe = asoc_ssp_probe,
- .remove = __devexit_p(asoc_ssp_remove),
+ .remove = asoc_ssp_remove,
};
module_platform_driver(asoc_ssp_driver);
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 06ea2744cc8..4b0a009bd68 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -104,7 +104,7 @@ static int pxa2xx_ac97_resume(struct snd_soc_dai *dai)
#define pxa2xx_ac97_resume NULL
#endif
-static int __devinit pxa2xx_ac97_probe(struct snd_soc_dai *dai)
+static int pxa2xx_ac97_probe(struct snd_soc_dai *dai)
{
return pxa2xx_ac97_hw_probe(to_platform_device(dai->dev));
}
@@ -234,7 +234,7 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
EXPORT_SYMBOL_GPL(soc_ac97_ops);
-static __devinit int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
+static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
{
if (pdev->id != -1) {
dev_err(&pdev->dev, "PXA2xx has only one AC97 port.\n");
@@ -249,7 +249,7 @@ static __devinit int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
ARRAY_SIZE(pxa_ac97_dai_driver));
}
-static int __devexit pxa2xx_ac97_dev_remove(struct platform_device *pdev)
+static int pxa2xx_ac97_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(pxa_ac97_dai_driver));
return 0;
@@ -257,7 +257,7 @@ static int __devexit pxa2xx_ac97_dev_remove(struct platform_device *pdev)
static struct platform_driver pxa2xx_ac97_driver = {
.probe = pxa2xx_ac97_dev_probe,
- .remove = __devexit_p(pxa2xx_ac97_dev_remove),
+ .remove = pxa2xx_ac97_dev_remove,
.driver = {
.name = "pxa2xx-ac97",
.owner = THIS_MODULE,
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 3075a426124..6b1a06f6756 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -365,7 +365,7 @@ static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
return snd_soc_register_dai(&pdev->dev, &pxa_i2s_dai);
}
-static int __devexit pxa2xx_i2s_drv_remove(struct platform_device *pdev)
+static int pxa2xx_i2s_drv_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
@@ -373,7 +373,7 @@ static int __devexit pxa2xx_i2s_drv_remove(struct platform_device *pdev)
static struct platform_driver pxa2xx_i2s_driver = {
.probe = pxa2xx_i2s_drv_probe,
- .remove = __devexit_p(pxa2xx_i2s_drv_remove),
+ .remove = pxa2xx_i2s_drv_remove,
.driver = {
.name = "pxa2xx-i2s",
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index fdd6bedef9b..ecff116cb7b 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -120,12 +120,12 @@ static struct snd_soc_platform_driver pxa2xx_soc_platform = {
.pcm_free = pxa2xx_pcm_free_dma_buffers,
};
-static int __devinit pxa2xx_soc_platform_probe(struct platform_device *pdev)
+static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
}
-static int __devexit pxa2xx_soc_platform_remove(struct platform_device *pdev)
+static int pxa2xx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -138,7 +138,7 @@ static struct platform_driver pxa_pcm_driver = {
},
.probe = pxa2xx_soc_platform_probe,
- .remove = __devexit_p(pxa2xx_soc_platform_remove),
+ .remove = pxa2xx_soc_platform_remove,
};
module_platform_driver(pxa_pcm_driver);
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index 2aec63f3706..a3fe19123f0 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -241,7 +241,7 @@ static struct snd_soc_card tosa = {
.num_links = ARRAY_SIZE(tosa_dai),
};
-static int __devinit tosa_probe(struct platform_device *pdev)
+static int tosa_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &tosa;
int ret;
@@ -262,7 +262,7 @@ static int __devinit tosa_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit tosa_remove(struct platform_device *pdev)
+static int tosa_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -277,7 +277,7 @@ static struct platform_driver tosa_driver = {
.owner = THIS_MODULE,
},
.probe = tosa_probe,
- .remove = __devexit_p(tosa_remove),
+ .remove = tosa_remove,
};
module_platform_driver(tosa_driver);
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
index 935491a8a77..f4ea4f6663a 100644
--- a/sound/soc/pxa/ttc-dkb.c
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -131,7 +131,7 @@ static struct snd_soc_card ttc_dkb_card = {
.num_dapm_routes = ARRAY_SIZE(ttc_audio_map),
};
-static int __devinit ttc_dkb_probe(struct platform_device *pdev)
+static int ttc_dkb_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &ttc_dkb_card;
int ret;
@@ -146,7 +146,7 @@ static int __devinit ttc_dkb_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit ttc_dkb_remove(struct platform_device *pdev)
+static int ttc_dkb_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -161,7 +161,7 @@ static struct platform_driver ttc_dkb_driver = {
.owner = THIS_MODULE,
},
.probe = ttc_dkb_probe,
- .remove = __devexit_p(ttc_dkb_remove),
+ .remove = ttc_dkb_remove,
};
module_platform_driver(ttc_dkb_driver);
diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c
index aaabdbaec19..fee4d477a49 100644
--- a/sound/soc/s6000/s6000-i2s.c
+++ b/sound/soc/s6000/s6000-i2s.c
@@ -436,7 +436,7 @@ static struct snd_soc_dai_driver s6000_i2s_dai = {
.ops = &s6000_i2s_dai_ops,
};
-static int __devinit s6000_i2s_probe(struct platform_device *pdev)
+static int s6000_i2s_probe(struct platform_device *pdev)
{
struct s6000_i2s_dev *dev;
struct resource *scbmem, *sifmem, *region, *dma1, *dma2;
@@ -566,7 +566,7 @@ err_release_none:
return ret;
}
-static void __devexit s6000_i2s_remove(struct platform_device *pdev)
+static void s6000_i2s_remove(struct platform_device *pdev)
{
struct s6000_i2s_dev *dev = dev_get_drvdata(&pdev->dev);
struct resource *region;
@@ -597,7 +597,7 @@ static void __devexit s6000_i2s_remove(struct platform_device *pdev)
static struct platform_driver s6000_i2s_driver = {
.probe = s6000_i2s_probe,
- .remove = __devexit_p(s6000_i2s_remove),
+ .remove = s6000_i2s_remove,
.driver = {
.name = "s6000-i2s",
.owner = THIS_MODULE,
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index 716da861c62..1358c7de252 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -500,12 +500,12 @@ static struct snd_soc_platform_driver s6000_soc_platform = {
.pcm_free = s6000_pcm_free,
};
-static int __devinit s6000_soc_platform_probe(struct platform_device *pdev)
+static int s6000_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform);
}
-static int __devexit s6000_soc_platform_remove(struct platform_device *pdev)
+static int s6000_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -518,7 +518,7 @@ static struct platform_driver s6000_pcm_driver = {
},
.probe = s6000_soc_platform_probe,
- .remove = __devexit_p(s6000_soc_platform_remove),
+ .remove = s6000_soc_platform_remove,
};
module_platform_driver(s6000_pcm_driver);
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 14fbcd30cae..0df3c5644cf 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -370,7 +370,7 @@ static struct snd_soc_dai_driver s3c_ac97_dai[] = {
},
};
-static __devinit int s3c_ac97_probe(struct platform_device *pdev)
+static int s3c_ac97_probe(struct platform_device *pdev)
{
struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
struct s3c_audio_pdata *ac97_pdata;
@@ -442,7 +442,7 @@ static __devinit int s3c_ac97_probe(struct platform_device *pdev)
ret = -ENODEV;
goto err2;
}
- clk_enable(s3c_ac97.ac97_clk);
+ clk_prepare_enable(s3c_ac97.ac97_clk);
if (ac97_pdata->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to configure gpio\n");
@@ -462,13 +462,20 @@ static __devinit int s3c_ac97_probe(struct platform_device *pdev)
if (ret)
goto err5;
- return 0;
+ ret = asoc_dma_platform_register(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
+ goto err6;
+ }
+ return 0;
+err6:
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c_ac97_dai));
err5:
free_irq(irq_res->start, NULL);
err4:
err3:
- clk_disable(s3c_ac97.ac97_clk);
+ clk_disable_unprepare(s3c_ac97.ac97_clk);
clk_put(s3c_ac97.ac97_clk);
err2:
iounmap(s3c_ac97.regs);
@@ -478,17 +485,18 @@ err1:
return ret;
}
-static __devexit int s3c_ac97_remove(struct platform_device *pdev)
+static int s3c_ac97_remove(struct platform_device *pdev)
{
struct resource *mem_res, *irq_res;
+ asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c_ac97_dai));
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res)
free_irq(irq_res->start, NULL);
- clk_disable(s3c_ac97.ac97_clk);
+ clk_disable_unprepare(s3c_ac97.ac97_clk);
clk_put(s3c_ac97.ac97_clk);
iounmap(s3c_ac97.regs);
@@ -502,7 +510,7 @@ static __devexit int s3c_ac97_remove(struct platform_device *pdev)
static struct platform_driver s3c_ac97_driver = {
.probe = s3c_ac97_probe,
- .remove = __devexit_p(s3c_ac97_remove),
+ .remove = s3c_ac97_remove,
.driver = {
.name = "samsung-ac97",
.owner = THIS_MODULE,
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index a2ca1567b9e..ceed466af9f 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -18,15 +18,6 @@
#include "../codecs/wm5102.h"
#include "../codecs/wm9081.h"
-/*
- * 44.1kHz based clocks for the SYSCLK domain, use a very high clock
- * to allow all the DSP functionality to be enabled if desired.
- */
-#define SYSCLK_RATE (44100 * 1024)
-
-/* 48kHz based clocks for the ASYNC domain */
-#define ASYNCCLK_RATE (48000 * 512)
-
/* BCLK2 is fixed at this currently */
#define BCLK2_RATE (64 * 8000)
@@ -36,15 +27,40 @@
*/
#define MCLK_RATE 24576000
-#define WM9081_AUDIO_RATE 44100
-#define WM9081_MCLK_RATE (WM9081_AUDIO_RATE * 256)
+#define SYS_AUDIO_RATE 44100
+#define SYS_MCLK_RATE (SYS_AUDIO_RATE * 512)
+
+#define DAI_AP_DSP 0
+#define DAI_DSP_CODEC 1
+#define DAI_CODEC_CP 2
+#define DAI_CODEC_SUB 3
+
+struct bells_drvdata {
+ int sysclk_rate;
+ int asyncclk_rate;
+};
+
+static struct bells_drvdata wm2200_drvdata = {
+ .sysclk_rate = 22579200,
+};
+
+static struct bells_drvdata wm5102_drvdata = {
+ .sysclk_rate = 45158400,
+ .asyncclk_rate = 49152000,
+};
+
+static struct bells_drvdata wm5110_drvdata = {
+ .sysclk_rate = 135475200,
+ .asyncclk_rate = 147456000,
+};
static int bells_set_bias_level(struct snd_soc_card *card,
struct snd_soc_dapm_context *dapm,
enum snd_soc_bias_level level)
{
- struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
+ struct snd_soc_dai *codec_dai = card->rtd[DAI_DSP_CODEC].codec_dai;
struct snd_soc_codec *codec = codec_dai->codec;
+ struct bells_drvdata *bells = card->drvdata;
int ret;
if (dapm->dev != codec_dai->dev)
@@ -52,18 +68,21 @@ static int bells_set_bias_level(struct snd_soc_card *card,
switch (level) {
case SND_SOC_BIAS_PREPARE:
- if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL1,
- ARIZONA_FLL_SRC_MCLK1,
- MCLK_RATE,
- SYSCLK_RATE);
- if (ret < 0)
- pr_err("Failed to start FLL: %d\n", ret);
+ if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
+ break;
+ ret = snd_soc_codec_set_pll(codec, WM5102_FLL1,
+ ARIZONA_FLL_SRC_MCLK1,
+ MCLK_RATE,
+ bells->sysclk_rate);
+ if (ret < 0)
+ pr_err("Failed to start FLL: %d\n", ret);
+
+ if (bells->asyncclk_rate) {
ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
ARIZONA_FLL_SRC_AIF2BCLK,
BCLK2_RATE,
- ASYNCCLK_RATE);
+ bells->asyncclk_rate);
if (ret < 0)
pr_err("Failed to start FLL: %d\n", ret);
}
@@ -80,8 +99,9 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
struct snd_soc_dapm_context *dapm,
enum snd_soc_bias_level level)
{
- struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
+ struct snd_soc_dai *codec_dai = card->rtd[DAI_DSP_CODEC].codec_dai;
struct snd_soc_codec *codec = codec_dai->codec;
+ struct bells_drvdata *bells = card->drvdata;
int ret;
if (dapm->dev != codec_dai->dev)
@@ -95,10 +115,13 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
return ret;
}
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL2, 0, 0, 0);
- if (ret < 0) {
- pr_err("Failed to stop FLL: %d\n", ret);
- return ret;
+ if (bells->asyncclk_rate) {
+ ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+ 0, 0, 0);
+ if (ret < 0) {
+ pr_err("Failed to stop FLL: %d\n", ret);
+ return ret;
+ }
}
break;
@@ -113,56 +136,73 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
static int bells_late_probe(struct snd_soc_card *card)
{
- struct snd_soc_codec *codec = card->rtd[0].codec;
- struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
- struct snd_soc_dai *aif2_dai = card->rtd[1].cpu_dai;
- struct snd_soc_dai *aif3_dai = card->rtd[2].cpu_dai;
- struct snd_soc_dai *wm9081_dai = card->rtd[2].codec_dai;
+ struct bells_drvdata *bells = card->drvdata;
+ struct snd_soc_codec *wm0010 = card->rtd[DAI_AP_DSP].codec;
+ struct snd_soc_codec *codec = card->rtd[DAI_DSP_CODEC].codec;
+ struct snd_soc_dai *aif1_dai = card->rtd[DAI_DSP_CODEC].codec_dai;
+ struct snd_soc_dai *aif2_dai;
+ struct snd_soc_dai *aif3_dai;
+ struct snd_soc_dai *wm9081_dai;
int ret;
- ret = snd_soc_dai_set_sysclk(aif1_dai, ARIZONA_CLK_SYSCLK, 0, 0);
+ ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+ ARIZONA_CLK_SRC_FLL1,
+ bells->sysclk_rate,
+ SND_SOC_CLOCK_IN);
if (ret != 0) {
- dev_err(aif1_dai->dev, "Failed to set AIF1 clock: %d\n", ret);
+ dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
return ret;
}
- ret = snd_soc_dai_set_sysclk(aif2_dai, ARIZONA_CLK_ASYNCCLK, 0, 0);
+ ret = snd_soc_codec_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
if (ret != 0) {
- dev_err(aif2_dai->dev, "Failed to set AIF2 clock: %d\n", ret);
+ dev_err(wm0010->dev, "Failed to set WM0010 clock: %d\n", ret);
return ret;
}
- ret = snd_soc_dai_set_sysclk(aif3_dai, ARIZONA_CLK_SYSCLK, 0, 0);
- if (ret != 0) {
+ ret = snd_soc_dai_set_sysclk(aif1_dai, ARIZONA_CLK_SYSCLK, 0, 0);
+ if (ret != 0)
dev_err(aif1_dai->dev, "Failed to set AIF1 clock: %d\n", ret);
- return ret;
- }
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
- ARIZONA_CLK_SRC_FLL1, SYSCLK_RATE,
+ ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_OPCLK, 0,
+ SYS_MCLK_RATE, SND_SOC_CLOCK_OUT);
+ if (ret != 0)
+ dev_err(codec->dev, "Failed to set OPCLK: %d\n", ret);
+
+ if (card->num_rtd == DAI_CODEC_CP)
+ return 0;
+
+ ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
+ ARIZONA_CLK_SRC_FLL2,
+ bells->asyncclk_rate,
SND_SOC_CLOCK_IN);
if (ret != 0) {
- dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+ dev_err(codec->dev, "Failed to set ASYNCCLK: %d\n", ret);
return ret;
}
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_OPCLK, 0,
- WM9081_MCLK_RATE, SND_SOC_CLOCK_OUT);
+ aif2_dai = card->rtd[DAI_CODEC_CP].cpu_dai;
+
+ ret = snd_soc_dai_set_sysclk(aif2_dai, ARIZONA_CLK_ASYNCCLK, 0, 0);
if (ret != 0) {
- dev_err(codec->dev, "Failed to set OPCLK: %d\n", ret);
+ dev_err(aif2_dai->dev, "Failed to set AIF2 clock: %d\n", ret);
return ret;
}
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
- ARIZONA_CLK_SRC_FLL2, ASYNCCLK_RATE,
- SND_SOC_CLOCK_IN);
+ if (card->num_rtd == DAI_CODEC_SUB)
+ return 0;
+
+ aif3_dai = card->rtd[DAI_CODEC_SUB].cpu_dai;
+ wm9081_dai = card->rtd[DAI_CODEC_SUB].codec_dai;
+
+ ret = snd_soc_dai_set_sysclk(aif3_dai, ARIZONA_CLK_SYSCLK, 0, 0);
if (ret != 0) {
- dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+ dev_err(aif1_dai->dev, "Failed to set AIF1 clock: %d\n", ret);
return ret;
}
ret = snd_soc_codec_set_sysclk(wm9081_dai->codec, WM9081_SYSCLK_MCLK,
- 0, WM9081_MCLK_RATE, 0);
+ 0, SYS_MCLK_RATE, 0);
if (ret != 0) {
dev_err(wm9081_dai->dev, "Failed to set MCLK: %d\n", ret);
return ret;
@@ -181,22 +221,57 @@ static const struct snd_soc_pcm_stream baseband_params = {
static const struct snd_soc_pcm_stream sub_params = {
.formats = SNDRV_PCM_FMTBIT_S32_LE,
- .rate_min = WM9081_AUDIO_RATE,
- .rate_max = WM9081_AUDIO_RATE,
+ .rate_min = SYS_AUDIO_RATE,
+ .rate_max = SYS_AUDIO_RATE,
.channels_min = 2,
.channels_max = 2,
};
+static struct snd_soc_dai_link bells_dai_wm2200[] = {
+ {
+ .name = "CPU-DSP",
+ .stream_name = "CPU-DSP",
+ .cpu_dai_name = "samsung-i2s.0",
+ .codec_dai_name = "wm0010-sdi1",
+ .platform_name = "samsung-i2s.0",
+ .codec_name = "spi0.0",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ },
+ {
+ .name = "DSP-CODEC",
+ .stream_name = "DSP-CODEC",
+ .cpu_dai_name = "wm0010-sdi2",
+ .codec_dai_name = "wm2200",
+ .codec_name = "wm2200.1-003a",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .params = &sub_params,
+ .ignore_suspend = 1,
+ },
+};
+
static struct snd_soc_dai_link bells_dai_wm5102[] = {
{
- .name = "CPU",
- .stream_name = "CPU",
+ .name = "CPU-DSP",
+ .stream_name = "CPU-DSP",
.cpu_dai_name = "samsung-i2s.0",
+ .codec_dai_name = "wm0010-sdi1",
+ .platform_name = "samsung-i2s.0",
+ .codec_name = "spi0.0",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ },
+ {
+ .name = "DSP-CODEC",
+ .stream_name = "DSP-CODEC",
+ .cpu_dai_name = "wm0010-sdi2",
.codec_dai_name = "wm5102-aif1",
- .platform_name = "samsung-audio",
.codec_name = "wm5102-codec",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
+ .params = &sub_params,
+ .ignore_suspend = 1,
},
{
.name = "Baseband",
@@ -224,14 +299,25 @@ static struct snd_soc_dai_link bells_dai_wm5102[] = {
static struct snd_soc_dai_link bells_dai_wm5110[] = {
{
- .name = "CPU",
- .stream_name = "CPU",
+ .name = "CPU-DSP",
+ .stream_name = "CPU-DSP",
.cpu_dai_name = "samsung-i2s.0",
+ .codec_dai_name = "wm0010-sdi1",
+ .platform_name = "samsung-i2s.0",
+ .codec_name = "spi0.0",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ },
+ {
+ .name = "DSP-CODEC",
+ .stream_name = "DSP-CODEC",
+ .cpu_dai_name = "wm0010-sdi2",
.codec_dai_name = "wm5110-aif1",
- .platform_name = "samsung-audio",
.codec_name = "wm5110-codec",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
+ .params = &sub_params,
+ .ignore_suspend = 1,
},
{
.name = "Baseband",
@@ -270,6 +356,24 @@ static struct snd_soc_dapm_route bells_routes[] = {
static struct snd_soc_card bells_cards[] = {
{
+ .name = "Bells WM2200",
+ .owner = THIS_MODULE,
+ .dai_link = bells_dai_wm2200,
+ .num_links = ARRAY_SIZE(bells_dai_wm2200),
+ .codec_conf = bells_codec_conf,
+ .num_configs = ARRAY_SIZE(bells_codec_conf),
+
+ .late_probe = bells_late_probe,
+
+ .dapm_routes = bells_routes,
+ .num_dapm_routes = ARRAY_SIZE(bells_routes),
+
+ .set_bias_level = bells_set_bias_level,
+ .set_bias_level_post = bells_set_bias_level_post,
+
+ .drvdata = &wm2200_drvdata,
+ },
+ {
.name = "Bells WM5102",
.owner = THIS_MODULE,
.dai_link = bells_dai_wm5102,
@@ -284,6 +388,8 @@ static struct snd_soc_card bells_cards[] = {
.set_bias_level = bells_set_bias_level,
.set_bias_level_post = bells_set_bias_level_post,
+
+ .drvdata = &wm5102_drvdata,
},
{
.name = "Bells WM5110",
@@ -300,11 +406,13 @@ static struct snd_soc_card bells_cards[] = {
.set_bias_level = bells_set_bias_level,
.set_bias_level_post = bells_set_bias_level_post,
+
+ .drvdata = &wm5110_drvdata,
},
};
-static __devinit int bells_probe(struct platform_device *pdev)
+static int bells_probe(struct platform_device *pdev)
{
int ret;
@@ -321,7 +429,7 @@ static __devinit int bells_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit bells_remove(struct platform_device *pdev)
+static int bells_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&bells_cards[pdev->id]);
@@ -335,7 +443,7 @@ static struct platform_driver bells_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = bells_probe,
- .remove = __devexit_p(bells_remove),
+ .remove = bells_remove,
};
module_platform_driver(bells_driver);
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index b70964ea448..db87628d763 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -432,30 +432,18 @@ static struct snd_soc_platform_driver samsung_asoc_platform = {
.pcm_free = dma_free_dma_buffers,
};
-static int __devinit samsung_asoc_platform_probe(struct platform_device *pdev)
+int asoc_dma_platform_register(struct device *dev)
{
- return snd_soc_register_platform(&pdev->dev, &samsung_asoc_platform);
+ return snd_soc_register_platform(dev, &samsung_asoc_platform);
}
+EXPORT_SYMBOL_GPL(asoc_dma_platform_register);
-static int __devexit samsung_asoc_platform_remove(struct platform_device *pdev)
+void asoc_dma_platform_unregister(struct device *dev)
{
- snd_soc_unregister_platform(&pdev->dev);
- return 0;
+ snd_soc_unregister_platform(dev);
}
-
-static struct platform_driver asoc_dma_driver = {
- .driver = {
- .name = "samsung-audio",
- .owner = THIS_MODULE,
- },
-
- .probe = samsung_asoc_platform_probe,
- .remove = __devexit_p(samsung_asoc_platform_remove),
-};
-
-module_platform_driver(asoc_dma_driver);
+EXPORT_SYMBOL_GPL(asoc_dma_platform_unregister);
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_DESCRIPTION("Samsung ASoC DMA Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-audio");
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index 7d1ead77ef2..73d8c7c8a1e 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -21,4 +21,7 @@ struct s3c_dma_params {
struct samsung_dma_ops *ops;
};
+int asoc_dma_platform_register(struct device *dev);
+void asoc_dma_platform_unregister(struct device *dev);
+
#endif
diff --git a/sound/soc/samsung/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c
index c23c2ae91f5..d37ede58e0a 100644
--- a/sound/soc/samsung/goni_wm8994.c
+++ b/sound/soc/samsung/goni_wm8994.c
@@ -228,7 +228,7 @@ static struct snd_soc_dai_link goni_dai[] = {
.stream_name = "WM8994 HiFi",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm8994-aif1",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm8994-codec.0-001a",
.init = goni_wm8994_init,
.ops = &goni_hifi_ops,
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
index 6e3257717c5..3870e9678b5 100644
--- a/sound/soc/samsung/h1940_uda1380.c
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -207,7 +207,7 @@ static struct snd_soc_dai_link h1940_uda1380_dai[] = {
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "uda1380-hifi",
.init = h1940_uda1380_init,
- .platform_name = "samsung-audio",
+ .platform_name = "s3c24xx-iis",
.codec_name = "uda1380-codec.0-001a",
.ops = &h1940_ops,
},
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 40b00a13dcd..d2d124f1dd1 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -49,8 +49,6 @@ struct i2s_dai {
struct clk *clk;
/* Clock for generating I2S signals */
struct clk *op_clk;
- /* Array of clock names for op_clk */
- const char **src_clk;
/* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */
struct i2s_dai *pri_dai;
/* Pointer to the Secondary_Fifo if it has one, NULL otherwise */
@@ -423,7 +421,7 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
if (i2s->op_clk) {
if ((clk_id && !(mod & MOD_IMS_SYSMUX)) ||
(!clk_id && (mod & MOD_IMS_SYSMUX))) {
- clk_disable(i2s->op_clk);
+ clk_disable_unprepare(i2s->op_clk);
clk_put(i2s->op_clk);
} else {
i2s->rclk_srcrate =
@@ -432,9 +430,13 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
}
}
- i2s->op_clk = clk_get(&i2s->pdev->dev,
- i2s->src_clk[clk_id]);
- clk_enable(i2s->op_clk);
+ if (clk_id)
+ i2s->op_clk = clk_get(&i2s->pdev->dev,
+ "i2s_opclk1");
+ else
+ i2s->op_clk = clk_get(&i2s->pdev->dev,
+ "i2s_opclk0");
+ clk_prepare_enable(i2s->op_clk);
i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
/* Over-ride the other's */
@@ -880,7 +882,7 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
iounmap(i2s->addr);
return -ENOENT;
}
- clk_enable(i2s->clk);
+ clk_prepare_enable(i2s->clk);
if (other) {
other->addr = i2s->addr;
@@ -922,7 +924,7 @@ static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
if (i2s->quirks & QUIRK_NEED_RSTCLR)
writel(0, i2s->addr + I2SCON);
- clk_disable(i2s->clk);
+ clk_disable_unprepare(i2s->clk);
clk_put(i2s->clk);
iounmap(i2s->addr);
@@ -950,8 +952,7 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = {
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE)
-static __devinit
-struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
+static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
{
struct i2s_dai *i2s;
@@ -992,7 +993,7 @@ struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
return i2s;
}
-static __devinit int samsung_i2s_probe(struct platform_device *pdev)
+static int samsung_i2s_probe(struct platform_device *pdev)
{
u32 dma_pl_chan, dma_cp_chan, dma_pl_sec_chan;
struct i2s_dai *pri_dai, *sec_dai = NULL;
@@ -1007,6 +1008,7 @@ static __devinit int samsung_i2s_probe(struct platform_device *pdev)
sec_dai = dev_get_drvdata(&pdev->dev);
snd_soc_register_dai(&sec_dai->pdev->dev,
&sec_dai->i2s_dai_drv);
+ asoc_dma_platform_register(&pdev->dev);
return 0;
}
@@ -1067,7 +1069,6 @@ static __devinit int samsung_i2s_probe(struct platform_device *pdev)
(struct s3c2410_dma_client *)&pri_dai->dma_capture;
pri_dai->dma_playback.channel = dma_pl_chan;
pri_dai->dma_capture.channel = dma_cp_chan;
- pri_dai->src_clk = i2s_cfg->src_clk;
pri_dai->dma_playback.dma_size = 4;
pri_dai->dma_capture.dma_size = 4;
pri_dai->base = regs_base;
@@ -1088,7 +1089,6 @@ static __devinit int samsung_i2s_probe(struct platform_device *pdev)
(struct s3c2410_dma_client *)&sec_dai->dma_playback;
/* Use iDMA always if SysDMA not provided */
sec_dai->dma_playback.channel = dma_pl_sec_chan ? : -1;
- sec_dai->src_clk = i2s_cfg->src_clk;
sec_dai->dma_playback.dma_size = 4;
sec_dai->base = regs_base;
sec_dai->quirks = quirks;
@@ -1107,6 +1107,8 @@ static __devinit int samsung_i2s_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
+ asoc_dma_platform_register(&pdev->dev);
+
return 0;
err:
release_mem_region(regs_base, resource_size(res));
@@ -1114,7 +1116,7 @@ err:
return ret;
}
-static __devexit int samsung_i2s_remove(struct platform_device *pdev)
+static int samsung_i2s_remove(struct platform_device *pdev)
{
struct i2s_dai *i2s, *other;
struct resource *res;
@@ -1135,6 +1137,7 @@ static __devexit int samsung_i2s_remove(struct platform_device *pdev)
i2s->pri_dai = NULL;
i2s->sec_dai = NULL;
+ asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
return 0;
@@ -1142,7 +1145,7 @@ static __devexit int samsung_i2s_remove(struct platform_device *pdev)
static struct platform_driver samsung_i2s_driver = {
.probe = samsung_i2s_probe,
- .remove = __devexit_p(samsung_i2s_remove),
+ .remove = samsung_i2s_remove,
.driver = {
.name = "samsung-i2s",
.owner = THIS_MODULE,
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index c227c3163ca..a07950b0c8c 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -416,12 +416,12 @@ static struct snd_soc_platform_driver asoc_idma_platform = {
.pcm_free = idma_free,
};
-static int __devinit asoc_idma_platform_probe(struct platform_device *pdev)
+static int asoc_idma_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &asoc_idma_platform);
}
-static int __devexit asoc_idma_platform_remove(struct platform_device *pdev)
+static int asoc_idma_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -434,7 +434,7 @@ static struct platform_driver asoc_idma_driver = {
},
.probe = asoc_idma_platform_probe,
- .remove = __devexit_p(asoc_idma_platform_remove),
+ .remove = asoc_idma_platform_remove,
};
module_platform_driver(asoc_idma_driver);
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 1578663a1fa..b5f6abd9d22 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -118,7 +118,7 @@ static struct snd_soc_dai_link jive_dai = {
.stream_name = "WM8750",
.cpu_dai_name = "s3c2412-i2s",
.codec_dai_name = "wm8750-hifi",
- .platform_name = "samsung-audio",
+ .platform_name = "s3c2412-i2s",
.codec_name = "wm8750.0-001a",
.init = jive_wm8750_init,
.ops = &jive_ops,
diff --git a/sound/soc/samsung/littlemill.c b/sound/soc/samsung/littlemill.c
index ee52c8a0077..bfb91f34a22 100644
--- a/sound/soc/samsung/littlemill.c
+++ b/sound/soc/samsung/littlemill.c
@@ -145,7 +145,7 @@ static struct snd_soc_dai_link littlemill_dai[] = {
.stream_name = "CPU",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm8994-aif1",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm8994-codec",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
@@ -270,7 +270,7 @@ static int littlemill_late_probe(struct snd_soc_card *card)
return ret;
/* This will check device compatibility itself */
- wm8958_mic_detect(codec, &littlemill_headset, NULL, NULL);
+ wm8958_mic_detect(codec, &littlemill_headset, NULL, NULL, NULL, NULL);
/* As will this */
wm8994_mic_detect(codec, &littlemill_headset, 1);
@@ -297,7 +297,7 @@ static struct snd_soc_card littlemill = {
.late_probe = littlemill_late_probe,
};
-static __devinit int littlemill_probe(struct platform_device *pdev)
+static int littlemill_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &littlemill;
int ret;
@@ -314,7 +314,7 @@ static __devinit int littlemill_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit littlemill_remove(struct platform_device *pdev)
+static int littlemill_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -330,7 +330,7 @@ static struct platform_driver littlemill_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = littlemill_probe,
- .remove = __devexit_p(littlemill_remove),
+ .remove = littlemill_remove,
};
module_platform_driver(littlemill_driver);
diff --git a/sound/soc/samsung/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c
index 69c4a5934a4..9342fc270c2 100644
--- a/sound/soc/samsung/ln2440sbc_alc650.c
+++ b/sound/soc/samsung/ln2440sbc_alc650.c
@@ -28,7 +28,7 @@ static struct snd_soc_dai_link ln2440sbc_dai[] = {
.cpu_dai_name = "samsung-ac97",
.codec_dai_name = "ac97-hifi",
.codec_name = "ac97-codec",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-ac97",
},
};
diff --git a/sound/soc/samsung/lowland.c b/sound/soc/samsung/lowland.c
index 6abf341c4a2..570cf522950 100644
--- a/sound/soc/samsung/lowland.c
+++ b/sound/soc/samsung/lowland.c
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link lowland_dai[] = {
.stream_name = "CPU",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm5100-aif1",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm5100.1-001a",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
@@ -180,7 +180,7 @@ static struct snd_soc_card lowland = {
.num_dapm_routes = ARRAY_SIZE(audio_paths),
};
-static __devinit int lowland_probe(struct platform_device *pdev)
+static int lowland_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &lowland;
int ret;
@@ -197,7 +197,7 @@ static __devinit int lowland_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit lowland_remove(struct platform_device *pdev)
+static int lowland_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -213,7 +213,7 @@ static struct platform_driver lowland_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = lowland_probe,
- .remove = __devexit_p(lowland_remove),
+ .remove = lowland_remove,
};
module_platform_driver(lowland_driver);
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index 321d51134e4..c7e965f80d2 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -364,7 +364,7 @@ static struct snd_soc_dai_link neo1973_dai[] = {
{ /* Hifi Playback - for similatious use with voice below */
.name = "WM8753",
.stream_name = "WM8753 HiFi",
- .platform_name = "samsung-audio",
+ .platform_name = "s3c24xx-iis",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "wm8753-hifi",
.codec_name = "wm8753.0-001a",
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index c86081992df..13bab79ad93 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -490,7 +490,7 @@ static struct snd_soc_dai_driver s3c_pcm_dai[] = {
},
};
-static __devinit int s3c_pcm_dev_probe(struct platform_device *pdev)
+static int s3c_pcm_dev_probe(struct platform_device *pdev)
{
struct s3c_pcm_info *pcm;
struct resource *mem_res, *dmatx_res, *dmarx_res;
@@ -543,7 +543,7 @@ static __devinit int s3c_pcm_dev_probe(struct platform_device *pdev)
ret = PTR_ERR(pcm->cclk);
goto err1;
}
- clk_enable(pcm->cclk);
+ clk_prepare_enable(pcm->cclk);
/* record our pcm structure for later use in the callbacks */
dev_set_drvdata(&pdev->dev, pcm);
@@ -568,7 +568,7 @@ static __devinit int s3c_pcm_dev_probe(struct platform_device *pdev)
ret = -ENOENT;
goto err4;
}
- clk_enable(pcm->pclk);
+ clk_prepare_enable(pcm->pclk);
s3c_pcm_stereo_in[pdev->id].dma_addr = mem_res->start
+ S3C_PCM_RXFIFO;
@@ -589,27 +589,36 @@ static __devinit int s3c_pcm_dev_probe(struct platform_device *pdev)
goto err5;
}
+ ret = asoc_dma_platform_register(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
+ goto err6;
+ }
+
return 0;
+err6:
+ snd_soc_unregister_dai(&pdev->dev);
err5:
- clk_disable(pcm->pclk);
+ clk_disable_unprepare(pcm->pclk);
clk_put(pcm->pclk);
err4:
iounmap(pcm->regs);
err3:
release_mem_region(mem_res->start, resource_size(mem_res));
err2:
- clk_disable(pcm->cclk);
+ clk_disable_unprepare(pcm->cclk);
clk_put(pcm->cclk);
err1:
return ret;
}
-static __devexit int s3c_pcm_dev_remove(struct platform_device *pdev)
+static int s3c_pcm_dev_remove(struct platform_device *pdev)
{
struct s3c_pcm_info *pcm = &s3c_pcm[pdev->id];
struct resource *mem_res;
+ asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -619,8 +628,8 @@ static __devexit int s3c_pcm_dev_remove(struct platform_device *pdev)
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem_res->start, resource_size(mem_res));
- clk_disable(pcm->cclk);
- clk_disable(pcm->pclk);
+ clk_disable_unprepare(pcm->cclk);
+ clk_disable_unprepare(pcm->pclk);
clk_put(pcm->pclk);
clk_put(pcm->cclk);
@@ -629,7 +638,7 @@ static __devexit int s3c_pcm_dev_remove(struct platform_device *pdev)
static struct platform_driver s3c_pcm_driver = {
.probe = s3c_pcm_dev_probe,
- .remove = __devexit_p(s3c_pcm_dev_remove),
+ .remove = s3c_pcm_dev_remove,
.driver = {
.name = "samsung-pcm",
.owner = THIS_MODULE,
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
index 21e12361a9c..a5826ea9cad 100644
--- a/sound/soc/samsung/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -85,7 +85,7 @@ static struct snd_soc_dai_link rx1950_uda1380_dai[] = {
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "uda1380-hifi",
.init = rx1950_uda1380_init,
- .platform_name = "samsung-audio",
+ .platform_name = "s3c24xx-iis",
.codec_name = "uda1380-codec.0-001a",
.ops = &rx1950_ops,
},
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index ac7701b3c5d..22133771639 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -160,20 +160,38 @@ static struct snd_soc_dai_driver s3c2412_i2s_dai = {
.ops = &s3c2412_i2s_dai_ops,
};
-static __devinit int s3c2412_iis_dev_probe(struct platform_device *pdev)
+static int s3c2412_iis_dev_probe(struct platform_device *pdev)
{
- return s3c_i2sv2_register_dai(&pdev->dev, -1, &s3c2412_i2s_dai);
+ int ret = 0;
+
+ ret = s3c_i2sv2_register_dai(&pdev->dev, -1, &s3c2412_i2s_dai);
+ if (ret) {
+ pr_err("failed to register the dai\n");
+ return ret;
+ }
+
+ ret = asoc_dma_platform_register(&pdev->dev);
+ if (ret) {
+ pr_err("failed to register the DMA: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ snd_soc_unregister_dai(&pdev->dev);
+ return ret;
}
-static __devexit int s3c2412_iis_dev_remove(struct platform_device *pdev)
+static int s3c2412_iis_dev_remove(struct platform_device *pdev)
{
+ asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver s3c2412_iis_driver = {
.probe = s3c2412_iis_dev_probe,
- .remove = __devexit_p(s3c2412_iis_dev_remove),
+ .remove = s3c2412_iis_dev_remove,
.driver = {
.name = "s3c2412-iis",
.owner = THIS_MODULE,
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 0aae3a3883d..ee10e8704e9 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -465,20 +465,38 @@ static struct snd_soc_dai_driver s3c24xx_i2s_dai = {
.ops = &s3c24xx_i2s_dai_ops,
};
-static __devinit int s3c24xx_iis_dev_probe(struct platform_device *pdev)
+static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
{
- return snd_soc_register_dai(&pdev->dev, &s3c24xx_i2s_dai);
+ int ret = 0;
+
+ ret = s3c_i2sv2_register_dai(&pdev->dev, -1, &s3c2412_i2s_dai);
+ if (ret) {
+ pr_err("failed to register the dai\n");
+ return ret;
+ }
+
+ ret = asoc_dma_platform_register(&pdev->dev);
+ if (ret) {
+ pr_err("failed to register the dma: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ snd_soc_unregister_dai(&pdev->dev);
+ return ret;
}
-static __devexit int s3c24xx_iis_dev_remove(struct platform_device *pdev)
+static int s3c24xx_iis_dev_remove(struct platform_device *pdev)
{
+ asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver s3c24xx_iis_driver = {
.probe = s3c24xx_iis_dev_probe,
- .remove = __devexit_p(s3c24xx_iis_dev_remove),
+ .remove = s3c24xx_iis_dev_remove,
.driver = {
.name = "s3c24xx-iis",
.owner = THIS_MODULE,
diff --git a/sound/soc/samsung/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
index 335a7d8a4a8..2c015f62ead 100644
--- a/sound/soc/samsung/s3c24xx_simtec.c
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -313,8 +313,8 @@ const struct dev_pm_ops simtec_audio_pmops = {
EXPORT_SYMBOL_GPL(simtec_audio_pmops);
#endif
-int __devinit simtec_audio_core_probe(struct platform_device *pdev,
- struct snd_soc_card *card)
+int simtec_audio_core_probe(struct platform_device *pdev,
+ struct snd_soc_card *card)
{
struct platform_device *snd_dev;
int ret;
@@ -371,7 +371,7 @@ err_clk:
}
EXPORT_SYMBOL_GPL(simtec_audio_core_probe);
-int __devexit simtec_audio_remove(struct platform_device *pdev)
+int simtec_audio_remove(struct platform_device *pdev)
{
struct platform_device *snd_dev = platform_get_drvdata(pdev);
diff --git a/sound/soc/samsung/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c
index 7ace6a87f41..d8a0543cae5 100644
--- a/sound/soc/samsung/s3c24xx_simtec_hermes.c
+++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c
@@ -82,7 +82,7 @@ static struct snd_soc_dai_link simtec_dai_aic33 = {
.codec_name = "tlv320aic3x-codec.0-001a",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "tlv320aic3x-hifi",
- .platform_name = "samsung-audio",
+ .platform_name = "s3c24xx-iis",
.init = simtec_hermes_init,
};
@@ -99,7 +99,7 @@ static struct snd_soc_card snd_soc_machine_simtec_aic33 = {
.num_dapm_routes = ARRAY_SIZE(base_map),
};
-static int __devinit simtec_audio_hermes_probe(struct platform_device *pd)
+static int simtec_audio_hermes_probe(struct platform_device *pd)
{
dev_info(&pd->dev, "probing....\n");
return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic33);
@@ -112,7 +112,7 @@ static struct platform_driver simtec_audio_hermes_platdrv = {
.pm = simtec_audio_pm,
},
.probe = simtec_audio_hermes_probe,
- .remove = __devexit_p(simtec_audio_remove),
+ .remove = simtec_audio_remove,
};
module_platform_driver(simtec_audio_hermes_platdrv);
diff --git a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
index c42d5f00b0e..1ac0d7a63a3 100644
--- a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
+++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
@@ -71,7 +71,7 @@ static struct snd_soc_dai_link simtec_dai_aic23 = {
.codec_name = "tlv320aic3x-codec.0-001a",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "tlv320aic3x-hifi",
- .platform_name = "samsung-audio",
+ .platform_name = "s3c24xx-iis",
.init = simtec_tlv320aic23_init,
};
@@ -88,7 +88,7 @@ static struct snd_soc_card snd_soc_machine_simtec_aic23 = {
.num_dapm_routes = ARRAY_SIZE(base_map),
};
-static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd)
+static int simtec_audio_tlv320aic23_probe(struct platform_device *pd)
{
return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic23);
}
@@ -100,7 +100,7 @@ static struct platform_driver simtec_audio_tlv320aic23_driver = {
.pm = simtec_audio_pm,
},
.probe = simtec_audio_tlv320aic23_probe,
- .remove = __devexit_p(simtec_audio_remove),
+ .remove = simtec_audio_remove,
};
module_platform_driver(simtec_audio_tlv320aic23_driver);
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index d731042e51b..333e1b7f06c 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -224,7 +224,7 @@ static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
.codec_dai_name = "uda134x-hifi",
.cpu_dai_name = "s3c24xx-iis",
.ops = &s3c24xx_uda134x_ops,
- .platform_name = "samsung-audio",
+ .platform_name = "s3c24xx-iis",
};
static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
diff --git a/sound/soc/samsung/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
index f2dcb424ea2..58ae3237ef6 100644
--- a/sound/soc/samsung/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -189,7 +189,7 @@ static struct snd_soc_dai_link smartq_dai[] = {
.stream_name = "SmartQ Hi-Fi",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm8750-hifi",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm8750.0-0x1a",
.init = smartq_wm8987_init,
.ops = &smartq_hifi_ops,
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
index 720ba29bb7e..c390aad68cf 100644
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -24,7 +24,7 @@ static struct snd_soc_dai_link smdk2443_dai[] = {
.cpu_dai_name = "samsung-ac97",
.codec_dai_name = "ac97-hifi",
.codec_name = "ac97-codec",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-ac97",
},
};
diff --git a/sound/soc/samsung/smdk_spdif.c b/sound/soc/samsung/smdk_spdif.c
index beaa9c15d69..a2f2363fe1c 100644
--- a/sound/soc/samsung/smdk_spdif.c
+++ b/sound/soc/samsung/smdk_spdif.c
@@ -151,7 +151,7 @@ static struct snd_soc_ops smdk_spdif_ops = {
static struct snd_soc_dai_link smdk_dai = {
.name = "S/PDIF",
.stream_name = "S/PDIF PCM Playback",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-spdif",
.cpu_dai_name = "samsung-spdif",
.codec_dai_name = "dit-hifi",
.codec_name = "spdif-dit",
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index ade2809cf39..7e2b710763b 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -176,7 +176,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.stream_name = "Playback",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm8580-hifi-playback",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm8580.0-001b",
.ops = &smdk_ops,
},
@@ -185,7 +185,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.stream_name = "Capture",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm8580-hifi-capture",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm8580.0-001b",
.init = smdk_wm8580_init_paiftx,
.ops = &smdk_ops,
@@ -195,7 +195,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.stream_name = "Playback",
.cpu_dai_name = "samsung-i2s.x",
.codec_dai_name = "wm8580-hifi-playback",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.x",
.codec_name = "wm8580.0-001b",
.ops = &smdk_ops,
},
diff --git a/sound/soc/samsung/smdk_wm8580pcm.c b/sound/soc/samsung/smdk_wm8580pcm.c
index fab5322e9f0..e43bd4294f9 100644
--- a/sound/soc/samsung/smdk_wm8580pcm.c
+++ b/sound/soc/samsung/smdk_wm8580pcm.c
@@ -135,7 +135,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.stream_name = "Capture",
.cpu_dai_name = "samsung-pcm.0",
.codec_dai_name = "wm8580-hifi-capture",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-pcm.0",
.codec_name = "wm8580.0-001b",
.ops = &smdk_wm8580_pcm_ops,
},
@@ -153,7 +153,7 @@ static struct snd_soc_card smdk_pcm = {
* is absent (or not connected), so we connect EXT_VOICE_CLK(OSC4),
* 2.0484Mhz, directly with MCLK both Codec and SoC.
*/
-static int __devinit snd_smdk_probe(struct platform_device *pdev)
+static int snd_smdk_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -173,7 +173,7 @@ static int __devinit snd_smdk_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit snd_smdk_remove(struct platform_device *pdev)
+static int snd_smdk_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&smdk_pcm);
platform_set_drvdata(pdev, NULL);
@@ -186,7 +186,7 @@ static struct platform_driver snd_smdk_driver = {
.name = "samsung-smdk-pcm",
},
.probe = snd_smdk_probe,
- .remove = __devexit_p(snd_smdk_remove),
+ .remove = snd_smdk_remove,
};
module_platform_driver(snd_smdk_driver);
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index 48dd4dd9ee0..b0d0ab8bff5 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -127,7 +127,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.stream_name = "Pri_Dai",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm8994-aif1",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm8994-codec",
.init = smdk_wm8994_init_paiftx,
.ops = &smdk_ops,
@@ -136,7 +136,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.stream_name = "Sec_Dai",
.cpu_dai_name = "samsung-i2s.4",
.codec_dai_name = "wm8994-aif1",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.4",
.codec_name = "wm8994-codec",
.ops = &smdk_ops,
},
@@ -150,7 +150,7 @@ static struct snd_soc_card smdk = {
};
-static int __devinit smdk_audio_probe(struct platform_device *pdev)
+static int smdk_audio_probe(struct platform_device *pdev)
{
int ret;
struct snd_soc_card *card = &smdk;
@@ -164,7 +164,7 @@ static int __devinit smdk_audio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit smdk_audio_remove(struct platform_device *pdev)
+static int smdk_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -179,7 +179,7 @@ static struct platform_driver smdk_audio_driver = {
.owner = THIS_MODULE,
},
.probe = smdk_audio_probe,
- .remove = __devexit_p(smdk_audio_remove),
+ .remove = smdk_audio_remove,
};
module_platform_driver(smdk_audio_driver);
diff --git a/sound/soc/samsung/smdk_wm8994pcm.c b/sound/soc/samsung/smdk_wm8994pcm.c
index 77ecba93511..3688a32000a 100644
--- a/sound/soc/samsung/smdk_wm8994pcm.c
+++ b/sound/soc/samsung/smdk_wm8994pcm.c
@@ -116,7 +116,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
.stream_name = "Primary PCM",
.cpu_dai_name = "samsung-pcm.0",
.codec_dai_name = "wm8994-aif1",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-pcm.0",
.codec_name = "wm8994-codec",
.ops = &smdk_wm8994_pcm_ops,
},
@@ -129,7 +129,7 @@ static struct snd_soc_card smdk_pcm = {
.num_links = 1,
};
-static int __devinit snd_smdk_probe(struct platform_device *pdev)
+static int snd_smdk_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -143,7 +143,7 @@ static int __devinit snd_smdk_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit snd_smdk_remove(struct platform_device *pdev)
+static int snd_smdk_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&smdk_pcm);
platform_set_drvdata(pdev, NULL);
@@ -156,7 +156,7 @@ static struct platform_driver snd_smdk_driver = {
.name = "samsung-smdk-pcm",
},
.probe = snd_smdk_probe,
- .remove = __devexit_p(snd_smdk_remove),
+ .remove = snd_smdk_remove,
};
module_platform_driver(snd_smdk_driver);
diff --git a/sound/soc/samsung/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c
index 55b2ca7f329..0d20e4ed27a 100644
--- a/sound/soc/samsung/smdk_wm9713.c
+++ b/sound/soc/samsung/smdk_wm9713.c
@@ -42,7 +42,7 @@ static struct snd_soc_card smdk;
static struct snd_soc_dai_link smdk_dai = {
.name = "AC97",
.stream_name = "AC97 PCM",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-ac97",
.cpu_dai_name = "samsung-ac97",
.codec_dai_name = "wm9713-hifi",
.codec_name = "wm9713-codec",
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index bc24c7af02b..5008e5bd6ed 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -357,7 +357,7 @@ static struct snd_soc_dai_driver samsung_spdif_dai = {
.resume = spdif_resume,
};
-static __devinit int spdif_probe(struct platform_device *pdev)
+static int spdif_probe(struct platform_device *pdev)
{
struct s3c_audio_pdata *spdif_pdata;
struct resource *mem_res, *dma_res;
@@ -397,7 +397,7 @@ static __devinit int spdif_probe(struct platform_device *pdev)
ret = -ENOENT;
goto err0;
}
- clk_enable(spdif->pclk);
+ clk_prepare_enable(spdif->pclk);
spdif->sclk = clk_get(&pdev->dev, "sclk_spdif");
if (IS_ERR(spdif->sclk)) {
@@ -405,7 +405,7 @@ static __devinit int spdif_probe(struct platform_device *pdev)
ret = -ENOENT;
goto err1;
}
- clk_enable(spdif->sclk);
+ clk_prepare_enable(spdif->sclk);
/* Request S/PDIF Register's memory region */
if (!request_mem_region(mem_res->start,
@@ -437,27 +437,35 @@ static __devinit int spdif_probe(struct platform_device *pdev)
spdif->dma_playback = &spdif_stereo_out;
- return 0;
+ ret = asoc_dma_platform_register(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register DMA: %d\n", ret);
+ goto err5;
+ }
+ return 0;
+err5:
+ snd_soc_unregister_dai(&pdev->dev);
err4:
iounmap(spdif->regs);
err3:
release_mem_region(mem_res->start, resource_size(mem_res));
err2:
- clk_disable(spdif->sclk);
+ clk_disable_unprepare(spdif->sclk);
clk_put(spdif->sclk);
err1:
- clk_disable(spdif->pclk);
+ clk_disable_unprepare(spdif->pclk);
clk_put(spdif->pclk);
err0:
return ret;
}
-static __devexit int spdif_remove(struct platform_device *pdev)
+static int spdif_remove(struct platform_device *pdev)
{
struct samsung_spdif_info *spdif = &spdif_info;
struct resource *mem_res;
+ asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
iounmap(spdif->regs);
@@ -466,9 +474,9 @@ static __devexit int spdif_remove(struct platform_device *pdev)
if (mem_res)
release_mem_region(mem_res->start, resource_size(mem_res));
- clk_disable(spdif->sclk);
+ clk_disable_unprepare(spdif->sclk);
clk_put(spdif->sclk);
- clk_disable(spdif->pclk);
+ clk_disable_unprepare(spdif->pclk);
clk_put(spdif->pclk);
return 0;
@@ -476,7 +484,7 @@ static __devexit int spdif_remove(struct platform_device *pdev)
static struct platform_driver samsung_spdif_driver = {
.probe = spdif_probe,
- .remove = __devexit_p(spdif_remove),
+ .remove = spdif_remove,
.driver = {
.name = "samsung-spdif",
.owner = THIS_MODULE,
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index c7e1c28528a..57df90d6b7c 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -198,7 +198,7 @@ static struct snd_soc_dai_link speyside_dai[] = {
.stream_name = "CPU-DSP",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm0010-sdi1",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "spi0.0",
.init = speyside_wm0010_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
@@ -320,7 +320,7 @@ static struct snd_soc_card speyside = {
.late_probe = speyside_late_probe,
};
-static __devinit int speyside_probe(struct platform_device *pdev)
+static int speyside_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &speyside;
int ret;
@@ -337,7 +337,7 @@ static __devinit int speyside_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit speyside_remove(struct platform_device *pdev)
+static int speyside_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -353,7 +353,7 @@ static struct platform_driver speyside_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = speyside_probe,
- .remove = __devexit_p(speyside_remove),
+ .remove = speyside_remove,
};
module_platform_driver(speyside_driver);
diff --git a/sound/soc/samsung/tobermory.c b/sound/soc/samsung/tobermory.c
index 9199649bf78..f21ff608a81 100644
--- a/sound/soc/samsung/tobermory.c
+++ b/sound/soc/samsung/tobermory.c
@@ -110,7 +110,7 @@ static struct snd_soc_dai_link tobermory_dai[] = {
.stream_name = "CPU",
.cpu_dai_name = "samsung-i2s.0",
.codec_dai_name = "wm8962",
- .platform_name = "samsung-audio",
+ .platform_name = "samsung-i2s.0",
.codec_name = "wm8962.1-001a",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM,
@@ -214,7 +214,7 @@ static struct snd_soc_card tobermory = {
.late_probe = tobermory_late_probe,
};
-static __devinit int tobermory_probe(struct platform_device *pdev)
+static int tobermory_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &tobermory;
int ret;
@@ -231,7 +231,7 @@ static __devinit int tobermory_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tobermory_remove(struct platform_device *pdev)
+static int tobermory_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -247,7 +247,7 @@ static struct platform_driver tobermory_driver = {
.pm = &snd_soc_pm_ops,
},
.probe = tobermory_probe,
- .remove = __devexit_p(tobermory_remove),
+ .remove = tobermory_remove,
};
module_platform_driver(tobermory_driver);
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 7da20186b19..19eff8fc4fd 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -348,12 +348,12 @@ static struct snd_soc_platform sh7760_soc_platform = {
.pcm_free = camelot_pcm_free,
};
-static int __devinit sh7760_soc_platform_probe(struct platform_device *pdev)
+static int sh7760_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
}
-static int __devexit sh7760_soc_platform_remove(struct platform_device *pdev)
+static int sh7760_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -366,7 +366,7 @@ static struct platform_driver sh7760_pcm_driver = {
},
.probe = sh7760_soc_platform_probe,
- .remove = __devexit_p(sh7760_soc_platform_remove),
+ .remove = sh7760_soc_platform_remove,
};
module_platform_driver(sh7760_pcm_driver);
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 9d7f30774a4..a606d0f93d1 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/workqueue.h>
#include <sound/soc.h>
+#include <sound/pcm_params.h>
#include <sound/sh_fsi.h>
/* PortA/PortB register */
@@ -189,6 +190,14 @@ typedef int (*set_rate_func)(struct device *dev, int rate, int enable);
*/
/*
+ * FSI clock
+ *
+ * FSIxCLK [CPG] (ick) -------> |
+ * |-> FSI_DIV (div)-> FSI2
+ * FSIxCK [external] (xck) ---> |
+ */
+
+/*
* struct
*/
@@ -228,6 +237,20 @@ struct fsi_stream {
dma_addr_t dma;
};
+struct fsi_clk {
+ /* see [FSI clock] */
+ struct clk *own;
+ struct clk *xck;
+ struct clk *ick;
+ struct clk *div;
+ int (*set_rate)(struct device *dev,
+ struct fsi_priv *fsi,
+ unsigned long rate);
+
+ unsigned long rate;
+ unsigned int count;
+};
+
struct fsi_priv {
void __iomem *base;
struct fsi_master *master;
@@ -236,11 +259,17 @@ struct fsi_priv {
struct fsi_stream playback;
struct fsi_stream capture;
+ struct fsi_clk clock;
+
u32 fmt;
int chan_num:16;
int clk_master:1;
+ int clk_cpg:1;
int spdif:1;
+ int enable_stream:1;
+ int bit_clk_inv:1;
+ int lr_clk_inv:1;
long rate;
};
@@ -370,6 +399,11 @@ static int fsi_is_spdif(struct fsi_priv *fsi)
return fsi->spdif;
}
+static int fsi_is_enable_stream(struct fsi_priv *fsi)
+{
+ return fsi->enable_stream;
+}
+
static int fsi_is_play(struct snd_pcm_substream *substream)
{
return substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
@@ -717,14 +751,335 @@ static void fsi_spdif_clk_ctrl(struct fsi_priv *fsi, int enable)
/*
* clock function
*/
+static int fsi_clk_init(struct device *dev,
+ struct fsi_priv *fsi,
+ int xck,
+ int ick,
+ int div,
+ int (*set_rate)(struct device *dev,
+ struct fsi_priv *fsi,
+ unsigned long rate))
+{
+ struct fsi_clk *clock = &fsi->clock;
+ int is_porta = fsi_is_port_a(fsi);
+
+ clock->xck = NULL;
+ clock->ick = NULL;
+ clock->div = NULL;
+ clock->rate = 0;
+ clock->count = 0;
+ clock->set_rate = set_rate;
+
+ clock->own = devm_clk_get(dev, NULL);
+ if (IS_ERR(clock->own))
+ return -EINVAL;
+
+ /* external clock */
+ if (xck) {
+ clock->xck = devm_clk_get(dev, is_porta ? "xcka" : "xckb");
+ if (IS_ERR(clock->xck)) {
+ dev_err(dev, "can't get xck clock\n");
+ return -EINVAL;
+ }
+ if (clock->xck == clock->own) {
+ dev_err(dev, "cpu doesn't support xck clock\n");
+ return -EINVAL;
+ }
+ }
+
+ /* FSIACLK/FSIBCLK */
+ if (ick) {
+ clock->ick = devm_clk_get(dev, is_porta ? "icka" : "ickb");
+ if (IS_ERR(clock->ick)) {
+ dev_err(dev, "can't get ick clock\n");
+ return -EINVAL;
+ }
+ if (clock->ick == clock->own) {
+ dev_err(dev, "cpu doesn't support ick clock\n");
+ return -EINVAL;
+ }
+ }
+
+ /* FSI-DIV */
+ if (div) {
+ clock->div = devm_clk_get(dev, is_porta ? "diva" : "divb");
+ if (IS_ERR(clock->div)) {
+ dev_err(dev, "can't get div clock\n");
+ return -EINVAL;
+ }
+ if (clock->div == clock->own) {
+ dev_err(dev, "cpu doens't support div clock\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+#define fsi_clk_invalid(fsi) fsi_clk_valid(fsi, 0)
+static void fsi_clk_valid(struct fsi_priv *fsi, unsigned long rate)
+{
+ fsi->clock.rate = rate;
+}
+
+static int fsi_clk_is_valid(struct fsi_priv *fsi)
+{
+ return fsi->clock.set_rate &&
+ fsi->clock.rate;
+}
+
+static int fsi_clk_enable(struct device *dev,
+ struct fsi_priv *fsi,
+ unsigned long rate)
+{
+ struct fsi_clk *clock = &fsi->clock;
+ int ret = -EINVAL;
+
+ if (!fsi_clk_is_valid(fsi))
+ return ret;
+
+ if (0 == clock->count) {
+ ret = clock->set_rate(dev, fsi, rate);
+ if (ret < 0) {
+ fsi_clk_invalid(fsi);
+ return ret;
+ }
+
+ if (clock->xck)
+ clk_enable(clock->xck);
+ if (clock->ick)
+ clk_enable(clock->ick);
+ if (clock->div)
+ clk_enable(clock->div);
+
+ clock->count++;
+ }
+
+ return ret;
+}
+
+static int fsi_clk_disable(struct device *dev,
+ struct fsi_priv *fsi)
+{
+ struct fsi_clk *clock = &fsi->clock;
+
+ if (!fsi_clk_is_valid(fsi))
+ return -EINVAL;
+
+ if (1 == clock->count--) {
+ if (clock->xck)
+ clk_disable(clock->xck);
+ if (clock->ick)
+ clk_disable(clock->ick);
+ if (clock->div)
+ clk_disable(clock->div);
+ }
+
+ return 0;
+}
+
+static int fsi_clk_set_ackbpf(struct device *dev,
+ struct fsi_priv *fsi,
+ int ackmd, int bpfmd)
+{
+ u32 data = 0;
+
+ /* check ackmd/bpfmd relationship */
+ if (bpfmd > ackmd) {
+ dev_err(dev, "unsupported rate (%d/%d)\n", ackmd, bpfmd);
+ return -EINVAL;
+ }
+
+ /* ACKMD */
+ switch (ackmd) {
+ case 512:
+ data |= (0x0 << 12);
+ break;
+ case 256:
+ data |= (0x1 << 12);
+ break;
+ case 128:
+ data |= (0x2 << 12);
+ break;
+ case 64:
+ data |= (0x3 << 12);
+ break;
+ case 32:
+ data |= (0x4 << 12);
+ break;
+ default:
+ dev_err(dev, "unsupported ackmd (%d)\n", ackmd);
+ return -EINVAL;
+ }
+
+ /* BPFMD */
+ switch (bpfmd) {
+ case 32:
+ data |= (0x0 << 8);
+ break;
+ case 64:
+ data |= (0x1 << 8);
+ break;
+ case 128:
+ data |= (0x2 << 8);
+ break;
+ case 256:
+ data |= (0x3 << 8);
+ break;
+ case 512:
+ data |= (0x4 << 8);
+ break;
+ case 16:
+ data |= (0x7 << 8);
+ break;
+ default:
+ dev_err(dev, "unsupported bpfmd (%d)\n", bpfmd);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "ACKMD/BPFMD = %d/%d\n", ackmd, bpfmd);
+
+ fsi_reg_mask_set(fsi, CKG1, (ACKMD_MASK | BPFMD_MASK) , data);
+ udelay(10);
+
+ return 0;
+}
+
+static int fsi_clk_set_rate_external(struct device *dev,
+ struct fsi_priv *fsi,
+ unsigned long rate)
+{
+ struct clk *xck = fsi->clock.xck;
+ struct clk *ick = fsi->clock.ick;
+ unsigned long xrate;
+ int ackmd, bpfmd;
+ int ret = 0;
+
+ /* check clock rate */
+ xrate = clk_get_rate(xck);
+ if (xrate % rate) {
+ dev_err(dev, "unsupported clock rate\n");
+ return -EINVAL;
+ }
+
+ clk_set_parent(ick, xck);
+ clk_set_rate(ick, xrate);
+
+ bpfmd = fsi->chan_num * 32;
+ ackmd = xrate / rate;
+
+ dev_dbg(dev, "external/rate = %ld/%ld\n", xrate, rate);
+
+ ret = fsi_clk_set_ackbpf(dev, fsi, ackmd, bpfmd);
+ if (ret < 0)
+ dev_err(dev, "%s failed", __func__);
+
+ return ret;
+}
+
+static int fsi_clk_set_rate_cpg(struct device *dev,
+ struct fsi_priv *fsi,
+ unsigned long rate)
+{
+ struct clk *ick = fsi->clock.ick;
+ struct clk *div = fsi->clock.div;
+ unsigned long target = 0; /* 12288000 or 11289600 */
+ unsigned long actual, cout;
+ unsigned long diff, min;
+ unsigned long best_cout, best_act;
+ int adj;
+ int ackmd, bpfmd;
+ int ret = -EINVAL;
+
+ if (!(12288000 % rate))
+ target = 12288000;
+ if (!(11289600 % rate))
+ target = 11289600;
+ if (!target) {
+ dev_err(dev, "unsupported rate\n");
+ return ret;
+ }
+
+ bpfmd = fsi->chan_num * 32;
+ ackmd = target / rate;
+ ret = fsi_clk_set_ackbpf(dev, fsi, ackmd, bpfmd);
+ if (ret < 0) {
+ dev_err(dev, "%s failed", __func__);
+ return ret;
+ }
+
+ /*
+ * The clock flow is
+ *
+ * [CPG] = cout => [FSI_DIV] = audio => [FSI] => [codec]
+ *
+ * But, it needs to find best match of CPG and FSI_DIV
+ * combination, since it is difficult to generate correct
+ * frequency of audio clock from ick clock only.
+ * Because ick is created from its parent clock.
+ *
+ * target = rate x [512/256/128/64]fs
+ * cout = round(target x adjustment)
+ * actual = cout / adjustment (by FSI-DIV) ~= target
+ * audio = actual
+ */
+ min = ~0;
+ best_cout = 0;
+ best_act = 0;
+ for (adj = 1; adj < 0xffff; adj++) {
+
+ cout = target * adj;
+ if (cout > 100000000) /* max clock = 100MHz */
+ break;
+
+ /* cout/actual audio clock */
+ cout = clk_round_rate(ick, cout);
+ actual = cout / adj;
+
+ /* find best frequency */
+ diff = abs(actual - target);
+ if (diff < min) {
+ min = diff;
+ best_cout = cout;
+ best_act = actual;
+ }
+ }
+
+ ret = clk_set_rate(ick, best_cout);
+ if (ret < 0) {
+ dev_err(dev, "ick clock failed\n");
+ return -EIO;
+ }
+
+ ret = clk_set_rate(div, clk_round_rate(div, best_act));
+ if (ret < 0) {
+ dev_err(dev, "div clock failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(dev, "ick/div = %ld/%ld\n",
+ clk_get_rate(ick), clk_get_rate(div));
+
+ return ret;
+}
+
static int fsi_set_master_clk(struct device *dev, struct fsi_priv *fsi,
long rate, int enable)
{
set_rate_func set_rate = fsi_get_info_set_rate(fsi);
int ret;
- if (!set_rate)
- return 0;
+ /*
+ * CAUTION
+ *
+ * set_rate will be deleted
+ */
+ if (!set_rate) {
+ if (enable)
+ return fsi_clk_enable(dev, fsi, rate);
+ else
+ return fsi_clk_disable(dev, fsi);
+ }
ret = set_rate(dev, rate, enable);
if (ret < 0) /* error */
@@ -792,10 +1147,9 @@ static int fsi_set_master_clk(struct device *dev, struct fsi_priv *fsi,
*/
static void fsi_pio_push16(struct fsi_priv *fsi, u8 *_buf, int samples)
{
- u32 enable_stream = fsi_get_info_flags(fsi) & SH_FSI_ENABLE_STREAM_MODE;
int i;
- if (enable_stream) {
+ if (fsi_is_enable_stream(fsi)) {
/*
* stream mode
* see
@@ -953,8 +1307,6 @@ static void fsi_pio_start_stop(struct fsi_priv *fsi, struct fsi_stream *io,
static int fsi_pio_push_init(struct fsi_priv *fsi, struct fsi_stream *io)
{
- u32 enable_stream = fsi_get_info_flags(fsi) & SH_FSI_ENABLE_STREAM_MODE;
-
/*
* we can use 16bit stream mode
* when "playback" and "16bit data"
@@ -962,7 +1314,7 @@ static int fsi_pio_push_init(struct fsi_priv *fsi, struct fsi_stream *io)
* see
* fsi_pio_push16()
*/
- if (enable_stream)
+ if (fsi_is_enable_stream(fsi))
io->bus_option = BUSOP_SET(24, PACKAGE_24BITBUS_BACK) |
BUSOP_SET(16, PACKAGE_16BITBUS_STREAM);
else
@@ -1296,6 +1648,16 @@ static int fsi_hw_startup(struct fsi_priv *fsi,
/* clock inversion (CKG2) */
data = 0;
+ if (fsi->bit_clk_inv)
+ data |= (1 << 0);
+ if (fsi->lr_clk_inv)
+ data |= (1 << 4);
+ if (fsi_is_clk_master(fsi))
+ data <<= 8;
+ /* FIXME
+ *
+ * SH_FSI_xxx_INV style will be removed
+ */
if (SH_FSI_LRM_INV & flags)
data |= 1 << 12;
if (SH_FSI_BRM_INV & flags)
@@ -1334,14 +1696,21 @@ static int fsi_hw_startup(struct fsi_priv *fsi,
/* fifo init */
fsi_fifo_init(fsi, io, dev);
+ /* start master clock */
+ if (fsi_is_clk_master(fsi))
+ return fsi_set_master_clk(dev, fsi, fsi->rate, 1);
+
return 0;
}
-static void fsi_hw_shutdown(struct fsi_priv *fsi,
+static int fsi_hw_shutdown(struct fsi_priv *fsi,
struct device *dev)
{
+ /* stop master clock */
if (fsi_is_clk_master(fsi))
- fsi_set_master_clk(dev, fsi, fsi->rate, 0);
+ return fsi_set_master_clk(dev, fsi, fsi->rate, 0);
+
+ return 0;
}
static int fsi_dai_startup(struct snd_pcm_substream *substream,
@@ -1349,6 +1718,7 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
{
struct fsi_priv *fsi = fsi_get_priv(substream);
+ fsi_clk_invalid(fsi);
fsi->rate = 0;
return 0;
@@ -1359,6 +1729,7 @@ static void fsi_dai_shutdown(struct snd_pcm_substream *substream,
{
struct fsi_priv *fsi = fsi_get_priv(substream);
+ fsi_clk_invalid(fsi);
fsi->rate = 0;
}
@@ -1372,13 +1743,16 @@ static int fsi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
fsi_stream_init(fsi, io, substream);
- fsi_hw_startup(fsi, io, dai->dev);
- ret = fsi_stream_transfer(io);
- if (0 == ret)
+ if (!ret)
+ ret = fsi_hw_startup(fsi, io, dai->dev);
+ if (!ret)
+ ret = fsi_stream_transfer(io);
+ if (!ret)
fsi_stream_start(fsi, io);
break;
case SNDRV_PCM_TRIGGER_STOP:
- fsi_hw_shutdown(fsi, dai->dev);
+ if (!ret)
+ ret = fsi_hw_shutdown(fsi, dai->dev);
fsi_stream_stop(fsi, io);
fsi_stream_quit(fsi, io);
break;
@@ -1414,7 +1788,6 @@ static int fsi_set_fmt_spdif(struct fsi_priv *fsi)
fsi->fmt = CR_DTMD_SPDIF_PCM | CR_PCM;
fsi->chan_num = 2;
- fsi->spdif = 1;
return 0;
}
@@ -1423,7 +1796,6 @@ static int fsi_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct fsi_priv *fsi = fsi_get_priv_frm_dai(dai);
set_rate_func set_rate = fsi_get_info_set_rate(fsi);
- u32 flags = fsi_get_info_flags(fsi);
int ret;
/* set master/slave audio interface */
@@ -1437,23 +1809,50 @@ static int fsi_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- if (fsi_is_clk_master(fsi) && !set_rate) {
- dev_err(dai->dev, "platform doesn't have set_rate\n");
- return -EINVAL;
- }
-
- /* set format */
- switch (flags & SH_FSI_FMT_MASK) {
- case SH_FSI_FMT_DAI:
- ret = fsi_set_fmt_dai(fsi, fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ /* set clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_IF:
+ fsi->bit_clk_inv = 0;
+ fsi->lr_clk_inv = 1;
break;
- case SH_FSI_FMT_SPDIF:
- ret = fsi_set_fmt_spdif(fsi);
+ case SND_SOC_DAIFMT_IB_NF:
+ fsi->bit_clk_inv = 1;
+ fsi->lr_clk_inv = 0;
break;
+ case SND_SOC_DAIFMT_IB_IF:
+ fsi->bit_clk_inv = 1;
+ fsi->lr_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
default:
- ret = -EINVAL;
+ fsi->bit_clk_inv = 0;
+ fsi->lr_clk_inv = 0;
+ break;
+ }
+
+ if (fsi_is_clk_master(fsi)) {
+ /*
+ * CAUTION
+ *
+ * set_rate will be deleted
+ */
+ if (set_rate)
+ dev_warn(dai->dev, "set_rate will be removed soon\n");
+
+ if (fsi->clk_cpg)
+ fsi_clk_init(dai->dev, fsi, 0, 1, 1,
+ fsi_clk_set_rate_cpg);
+ else
+ fsi_clk_init(dai->dev, fsi, 1, 1, 0,
+ fsi_clk_set_rate_external);
}
+ /* set format */
+ if (fsi_is_spdif(fsi))
+ ret = fsi_set_fmt_spdif(fsi);
+ else
+ ret = fsi_set_fmt_dai(fsi, fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+
return ret;
}
@@ -1462,19 +1861,13 @@ static int fsi_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct fsi_priv *fsi = fsi_get_priv(substream);
- long rate = params_rate(params);
- int ret;
- if (!fsi_is_clk_master(fsi))
- return 0;
-
- ret = fsi_set_master_clk(dai->dev, fsi, rate, 1);
- if (ret < 0)
- return ret;
-
- fsi->rate = rate;
+ if (fsi_is_clk_master(fsi)) {
+ fsi->rate = params_rate(params);
+ fsi_clk_valid(fsi, fsi->rate);
+ }
- return ret;
+ return 0;
}
static const struct snd_soc_dai_ops fsi_dai_ops = {
@@ -1498,7 +1891,7 @@ static struct snd_pcm_hardware fsi_pcm_hardware = {
.rates = FSI_RATES,
.rate_min = 8000,
.rate_max = 192000,
- .channels_min = 1,
+ .channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 64 * 1024,
.period_bytes_min = 32,
@@ -1586,14 +1979,14 @@ static struct snd_soc_dai_driver fsi_soc_dai[] = {
.playback = {
.rates = FSI_RATES,
.formats = FSI_FMTS,
- .channels_min = 1,
- .channels_max = 8,
+ .channels_min = 2,
+ .channels_max = 2,
},
.capture = {
.rates = FSI_RATES,
.formats = FSI_FMTS,
- .channels_min = 1,
- .channels_max = 8,
+ .channels_min = 2,
+ .channels_max = 2,
},
.ops = &fsi_dai_ops,
},
@@ -1602,14 +1995,14 @@ static struct snd_soc_dai_driver fsi_soc_dai[] = {
.playback = {
.rates = FSI_RATES,
.formats = FSI_FMTS,
- .channels_min = 1,
- .channels_max = 8,
+ .channels_min = 2,
+ .channels_max = 2,
},
.capture = {
.rates = FSI_RATES,
.formats = FSI_FMTS,
- .channels_min = 1,
- .channels_max = 8,
+ .channels_min = 2,
+ .channels_max = 2,
},
.ops = &fsi_dai_ops,
},
@@ -1624,15 +2017,29 @@ static struct snd_soc_platform_driver fsi_soc_platform = {
/*
* platform function
*/
-static void fsi_handler_init(struct fsi_priv *fsi)
+static void fsi_port_info_init(struct fsi_priv *fsi,
+ struct sh_fsi_port_info *info)
+{
+ if (info->flags & SH_FSI_FMT_SPDIF)
+ fsi->spdif = 1;
+
+ if (info->flags & SH_FSI_CLK_CPG)
+ fsi->clk_cpg = 1;
+
+ if (info->flags & SH_FSI_ENABLE_STREAM_MODE)
+ fsi->enable_stream = 1;
+}
+
+static void fsi_handler_init(struct fsi_priv *fsi,
+ struct sh_fsi_port_info *info)
{
fsi->playback.handler = &fsi_pio_push_handler; /* default PIO */
fsi->playback.priv = fsi;
fsi->capture.handler = &fsi_pio_pop_handler; /* default PIO */
fsi->capture.priv = fsi;
- if (fsi->info->tx_id) {
- fsi->playback.slave.shdma_slave.slave_id = fsi->info->tx_id;
+ if (info->tx_id) {
+ fsi->playback.slave.shdma_slave.slave_id = info->tx_id;
fsi->playback.handler = &fsi_dma_push_handler;
}
}
@@ -1642,10 +2049,16 @@ static int fsi_probe(struct platform_device *pdev)
struct fsi_master *master;
const struct platform_device_id *id_entry;
struct sh_fsi_platform_info *info = pdev->dev.platform_data;
+ struct sh_fsi_port_info nul_info, *pinfo;
+ struct fsi_priv *fsi;
struct resource *res;
unsigned int irq;
int ret;
+ nul_info.flags = 0;
+ nul_info.tx_id = 0;
+ nul_info.rx_id = 0;
+
id_entry = pdev->id_entry;
if (!id_entry) {
dev_err(&pdev->dev, "unknown fsi device\n");
@@ -1678,22 +2091,28 @@ static int fsi_probe(struct platform_device *pdev)
spin_lock_init(&master->lock);
/* FSI A setting */
- master->fsia.base = master->base;
- master->fsia.master = master;
- master->fsia.info = &info->port_a;
- fsi_handler_init(&master->fsia);
- ret = fsi_stream_probe(&master->fsia, &pdev->dev);
+ pinfo = (info) ? &info->port_a : &nul_info;
+ fsi = &master->fsia;
+ fsi->base = master->base;
+ fsi->master = master;
+ fsi->info = pinfo;
+ fsi_port_info_init(fsi, pinfo);
+ fsi_handler_init(fsi, pinfo);
+ ret = fsi_stream_probe(fsi, &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "FSIA stream probe failed\n");
return ret;
}
/* FSI B setting */
- master->fsib.base = master->base + 0x40;
- master->fsib.master = master;
- master->fsib.info = &info->port_b;
- fsi_handler_init(&master->fsib);
- ret = fsi_stream_probe(&master->fsib, &pdev->dev);
+ pinfo = (info) ? &info->port_b : &nul_info;
+ fsi = &master->fsib;
+ fsi->base = master->base + 0x40;
+ fsi->master = master;
+ fsi->info = pinfo;
+ fsi_port_info_init(fsi, pinfo);
+ fsi_handler_init(fsi, pinfo);
+ ret = fsi_stream_probe(fsi, &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "FSIB stream probe failed\n");
goto exit_fsia;
@@ -1702,7 +2121,7 @@ static int fsi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
dev_set_drvdata(&pdev->dev, master);
- ret = request_irq(irq, &fsi_interrupt, 0,
+ ret = devm_request_irq(&pdev->dev, irq, &fsi_interrupt, 0,
id_entry->name, master);
if (ret) {
dev_err(&pdev->dev, "irq request err\n");
@@ -1712,7 +2131,7 @@ static int fsi_probe(struct platform_device *pdev)
ret = snd_soc_register_platform(&pdev->dev, &fsi_soc_platform);
if (ret < 0) {
dev_err(&pdev->dev, "cannot snd soc register\n");
- goto exit_free_irq;
+ goto exit_fsib;
}
ret = snd_soc_register_dais(&pdev->dev, fsi_soc_dai,
@@ -1726,8 +2145,6 @@ static int fsi_probe(struct platform_device *pdev)
exit_snd_soc:
snd_soc_unregister_platform(&pdev->dev);
-exit_free_irq:
- free_irq(irq, master);
exit_fsib:
pm_runtime_disable(&pdev->dev);
fsi_stream_remove(&master->fsib);
@@ -1743,7 +2160,6 @@ static int fsi_remove(struct platform_device *pdev)
master = dev_get_drvdata(&pdev->dev);
- free_irq(master->irq, master);
pm_runtime_disable(&pdev->dev);
snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
@@ -1774,10 +2190,6 @@ static void __fsi_resume(struct fsi_priv *fsi,
return;
fsi_hw_startup(fsi, io, dev);
-
- if (fsi_is_clk_master(fsi) && fsi->rate)
- fsi_set_master_clk(dev, fsi, fsi->rate, 1);
-
fsi_stream_start(fsi, io);
}
diff --git a/sound/soc/sh/hac.c b/sound/soc/sh/hac.c
index 3474d7befe5..4cc2d64ef47 100644
--- a/sound/soc/sh/hac.c
+++ b/sound/soc/sh/hac.c
@@ -310,13 +310,13 @@ static struct snd_soc_dai_driver sh4_hac_dai[] = {
#endif
};
-static int __devinit hac_soc_platform_probe(struct platform_device *pdev)
+static int hac_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_dais(&pdev->dev, sh4_hac_dai,
ARRAY_SIZE(sh4_hac_dai));
}
-static int __devexit hac_soc_platform_remove(struct platform_device *pdev)
+static int hac_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(sh4_hac_dai));
return 0;
@@ -329,7 +329,7 @@ static struct platform_driver hac_pcm_driver = {
},
.probe = hac_soc_platform_probe,
- .remove = __devexit_p(hac_soc_platform_remove),
+ .remove = hac_soc_platform_remove,
};
module_platform_driver(hac_pcm_driver);
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index 52d4c17b123..34facdc9e4a 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -726,7 +726,7 @@ static struct snd_soc_dai_driver siu_i2s_dai = {
.ops = &siu_dai_ops,
};
-static int __devinit siu_probe(struct platform_device *pdev)
+static int siu_probe(struct platform_device *pdev)
{
const struct firmware *fw_entry;
struct resource *res, *region;
@@ -815,7 +815,7 @@ ereqfw:
return ret;
}
-static int __devexit siu_remove(struct platform_device *pdev)
+static int siu_remove(struct platform_device *pdev)
{
struct siu_info *info = dev_get_drvdata(&pdev->dev);
struct resource *res;
@@ -843,7 +843,7 @@ static struct platform_driver siu_driver = {
.name = "siu-pcm-audio",
},
.probe = siu_probe,
- .remove = __devexit_p(siu_remove),
+ .remove = siu_remove,
};
module_platform_driver(siu_driver);
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index ff82b56a886..c8e73a70393 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -379,13 +379,13 @@ static struct snd_soc_dai_driver sh4_ssi_dai[] = {
#endif
};
-static int __devinit sh4_soc_dai_probe(struct platform_device *pdev)
+static int sh4_soc_dai_probe(struct platform_device *pdev)
{
return snd_soc_register_dais(&pdev->dev, sh4_ssi_dai,
ARRAY_SIZE(sh4_ssi_dai));
}
-static int __devexit sh4_soc_dai_remove(struct platform_device *pdev)
+static int sh4_soc_dai_remove(struct platform_device *pdev)
{
snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(sh4_ssi_dai));
return 0;
@@ -398,7 +398,7 @@ static struct platform_driver sh4_ssi_driver = {
},
.probe = sh4_soc_dai_probe,
- .remove = __devexit_p(sh4_soc_dai_remove),
+ .remove = sh4_soc_dai_remove,
};
module_platform_driver(sh4_ssi_driver);
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index 9d56f0218f4..e72f55428f0 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -88,7 +88,7 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
ret = snd_soc_write(codec, i, val);
if (ret)
return ret;
- dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+ dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
i, val);
}
return 0;
@@ -156,7 +156,7 @@ int snd_soc_cache_init(struct snd_soc_codec *codec)
/* Fall back to flat compression */
if (i == ARRAY_SIZE(cache_types)) {
- dev_warn(codec->dev, "Could not match compress type: %d\n",
+ dev_warn(codec->dev, "ASoC: Could not match compress type: %d\n",
codec->compress_type);
i = 0;
}
@@ -166,7 +166,7 @@ int snd_soc_cache_init(struct snd_soc_codec *codec)
if (codec->cache_ops->init) {
if (codec->cache_ops->name)
- dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
+ dev_dbg(codec->dev, "ASoC: Initializing %s cache for %s codec\n",
codec->cache_ops->name, codec->name);
return codec->cache_ops->init(codec);
}
@@ -181,7 +181,7 @@ int snd_soc_cache_exit(struct snd_soc_codec *codec)
{
if (codec->cache_ops && codec->cache_ops->exit) {
if (codec->cache_ops->name)
- dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
+ dev_dbg(codec->dev, "ASoC: Destroying %s cache for %s codec\n",
codec->cache_ops->name, codec->name);
return codec->cache_ops->exit(codec);
}
@@ -265,7 +265,7 @@ int snd_soc_cache_sync(struct snd_soc_codec *codec)
name = "unknown";
if (codec->cache_ops->name)
- dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
+ dev_dbg(codec->dev, "ASoC: Syncing %s cache for %s codec\n",
codec->cache_ops->name, codec->name);
trace_snd_soc_cache_sync(codec, name, "start");
ret = codec->cache_ops->sync(codec);
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 967d0e173e1..5fbfb06e808 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -113,7 +113,7 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
SNDRV_PCM_STREAM_PLAYBACK,
SND_SOC_DAPM_STREAM_STOP);
} else
- codec_dai->pop_wait = 1;
+ rtd->pop_wait = 1;
schedule_delayed_work(&rtd->delayed_work,
msecs_to_jiffies(rtd->pmdown_time));
} else {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 10d21be383f..91d592ff67b 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -271,7 +271,8 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
codec->debugfs_codec_root = debugfs_create_dir(codec->name,
debugfs_card_root);
if (!codec->debugfs_codec_root) {
- dev_warn(codec->dev, "Failed to create codec debugfs directory\n");
+ dev_warn(codec->dev, "ASoC: Failed to create codec debugfs"
+ " directory\n");
return;
}
@@ -284,7 +285,8 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
codec->debugfs_codec_root,
codec, &codec_reg_fops);
if (!codec->debugfs_reg)
- dev_warn(codec->dev, "Failed to create codec register debugfs file\n");
+ dev_warn(codec->dev, "ASoC: Failed to create codec register"
+ " debugfs file\n");
snd_soc_dapm_debugfs_init(&codec->dapm, codec->debugfs_codec_root);
}
@@ -302,7 +304,7 @@ static void soc_init_platform_debugfs(struct snd_soc_platform *platform)
debugfs_card_root);
if (!platform->debugfs_platform_root) {
dev_warn(platform->dev,
- "Failed to create platform debugfs directory\n");
+ "ASoC: Failed to create platform debugfs directory\n");
return;
}
@@ -430,7 +432,7 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)
&card->pop_time);
if (!card->debugfs_pop_time)
dev_warn(card->dev,
- "Failed to create pop time debugfs file\n");
+ "ASoC: Failed to create pop time debugfs file\n");
}
static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
@@ -475,7 +477,7 @@ struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
!strcmp(card->rtd[i].dai_link->name, dai_link))
return card->rtd[i].pcm->streams[stream].substream;
}
- dev_dbg(card->dev, "failed to find dai link %s\n", dai_link);
+ dev_dbg(card->dev, "ASoC: failed to find dai link %s\n", dai_link);
return NULL;
}
EXPORT_SYMBOL_GPL(snd_soc_get_dai_substream);
@@ -489,7 +491,7 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
if (!strcmp(card->rtd[i].dai_link->name, dai_link))
return &card->rtd[i];
}
- dev_dbg(card->dev, "failed to find rtd %s\n", dai_link);
+ dev_dbg(card->dev, "ASoC: failed to find rtd %s\n", dai_link);
return NULL;
}
EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
@@ -519,7 +521,7 @@ static int soc_ac97_dev_register(struct snd_soc_codec *codec)
codec->card->snd_card->number, 0, codec->name);
err = device_register(&codec->ac97->dev);
if (err < 0) {
- snd_printk(KERN_ERR "Can't register ac97 bus\n");
+ dev_err(codec->dev, "ASoC: Can't register ac97 bus\n");
codec->ac97->dev.bus = NULL;
return err;
}
@@ -628,7 +630,8 @@ int snd_soc_suspend(struct device *dev)
*/
if (codec->dapm.idle_bias_off) {
dev_dbg(codec->dev,
- "idle_bias_off CODEC on over suspend\n");
+ "ASoC: idle_bias_off CODEC on"
+ " over suspend\n");
break;
}
case SND_SOC_BIAS_OFF:
@@ -639,7 +642,8 @@ int snd_soc_suspend(struct device *dev)
regcache_mark_dirty(codec->control_data);
break;
default:
- dev_dbg(codec->dev, "CODEC is on over suspend\n");
+ dev_dbg(codec->dev, "ASoC: CODEC is on"
+ " over suspend\n");
break;
}
}
@@ -676,7 +680,7 @@ static void soc_resume_deferred(struct work_struct *work)
* so userspace apps are blocked from touching us
*/
- dev_dbg(card->dev, "starting resume work\n");
+ dev_dbg(card->dev, "ASoC: starting resume work\n");
/* Bring us up into D2 so that DAPM starts enabling things */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D2);
@@ -708,7 +712,8 @@ static void soc_resume_deferred(struct work_struct *work)
codec->suspended = 0;
break;
default:
- dev_dbg(codec->dev, "CODEC was on over suspend\n");
+ dev_dbg(codec->dev, "ASoC: CODEC was on over"
+ " suspend\n");
break;
}
}
@@ -758,7 +763,7 @@ static void soc_resume_deferred(struct work_struct *work)
if (card->resume_post)
card->resume_post(card);
- dev_dbg(card->dev, "resume work completed\n");
+ dev_dbg(card->dev, "ASoC: resume work completed\n");
/* userspace can access us now we are back as we were before */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
@@ -790,12 +795,12 @@ int snd_soc_resume(struct device *dev)
ac97_control |= cpu_dai->driver->ac97_control;
}
if (ac97_control) {
- dev_dbg(dev, "Resuming AC97 immediately\n");
+ dev_dbg(dev, "ASoC: Resuming AC97 immediately\n");
soc_resume_deferred(&card->deferred_resume_work);
} else {
- dev_dbg(dev, "Scheduling resume work\n");
+ dev_dbg(dev, "ASoC: Scheduling resume work\n");
if (!schedule_work(&card->deferred_resume_work))
- dev_err(dev, "resume work item may be lost\n");
+ dev_err(dev, "ASoC: resume work item may be lost\n");
}
return 0;
@@ -818,7 +823,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
struct snd_soc_dai *codec_dai, *cpu_dai;
const char *platform_name;
- dev_dbg(card->dev, "binding %s at idx %d\n", dai_link->name, num);
+ dev_dbg(card->dev, "ASoC: binding %s at idx %d\n", dai_link->name, num);
/* Find CPU DAI from registered DAIs*/
list_for_each_entry(cpu_dai, &dai_list, list) {
@@ -836,7 +841,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
}
if (!rtd->cpu_dai) {
- dev_err(card->dev, "CPU DAI %s not registered\n",
+ dev_err(card->dev, "ASoC: CPU DAI %s not registered\n",
dai_link->cpu_dai_name);
return -EPROBE_DEFER;
}
@@ -867,14 +872,14 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
}
if (!rtd->codec_dai) {
- dev_err(card->dev, "CODEC DAI %s not registered\n",
+ dev_err(card->dev, "ASoC: CODEC DAI %s not registered\n",
dai_link->codec_dai_name);
return -EPROBE_DEFER;
}
}
if (!rtd->codec) {
- dev_err(card->dev, "CODEC %s not registered\n",
+ dev_err(card->dev, "ASoC: CODEC %s not registered\n",
dai_link->codec_name);
return -EPROBE_DEFER;
}
@@ -898,7 +903,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
rtd->platform = platform;
}
if (!rtd->platform) {
- dev_err(card->dev, "platform %s not registered\n",
+ dev_err(card->dev, "ASoC: platform %s not registered\n",
dai_link->platform_name);
return -EPROBE_DEFER;
}
@@ -915,8 +920,8 @@ static int soc_remove_platform(struct snd_soc_platform *platform)
if (platform->driver->remove) {
ret = platform->driver->remove(platform);
if (ret < 0)
- pr_err("asoc: failed to remove %s: %d\n",
- platform->name, ret);
+ dev_err(platform->dev, "ASoC: failed to remove %d\n",
+ ret);
}
/* Make sure all DAPM widgets are freed */
@@ -937,9 +942,7 @@ static void soc_remove_codec(struct snd_soc_codec *codec)
if (codec->driver->remove) {
err = codec->driver->remove(codec);
if (err < 0)
- dev_err(codec->dev,
- "asoc: failed to remove %s: %d\n",
- codec->name, err);
+ dev_err(codec->dev, "ASoC: failed to remove %d\n", err);
}
/* Make sure all DAPM widgets are freed */
@@ -971,8 +974,9 @@ static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order)
if (codec_dai->driver->remove) {
err = codec_dai->driver->remove(codec_dai);
if (err < 0)
- pr_err("asoc: failed to remove %s: %d\n",
- codec_dai->name, err);
+ dev_err(codec_dai->dev,
+ "ASoC: failed to remove %s: %d\n",
+ codec_dai->name, err);
}
codec_dai->probed = 0;
list_del(&codec_dai->card_list);
@@ -984,8 +988,9 @@ static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order)
if (cpu_dai->driver->remove) {
err = cpu_dai->driver->remove(cpu_dai);
if (err < 0)
- pr_err("asoc: failed to remove %s: %d\n",
- cpu_dai->name, err);
+ dev_err(cpu_dai->dev,
+ "ASoC: failed to remove %s: %d\n",
+ cpu_dai->name, err);
}
cpu_dai->probed = 0;
list_del(&cpu_dai->card_list);
@@ -1099,8 +1104,7 @@ static int soc_probe_codec(struct snd_soc_card *card,
ret = driver->probe(codec);
if (ret < 0) {
dev_err(codec->dev,
- "asoc: failed to probe CODEC %s: %d\n",
- codec->name, ret);
+ "ASoC: failed to probe CODEC %d\n", ret);
goto err_probe;
}
}
@@ -1163,8 +1167,7 @@ static int soc_probe_platform(struct snd_soc_card *card,
ret = driver->probe(platform);
if (ret < 0) {
dev_err(platform->dev,
- "asoc: failed to probe platform %s: %d\n",
- platform->name, ret);
+ "ASoC: failed to probe platform %d\n", ret);
goto err_probe;
}
}
@@ -1229,7 +1232,7 @@ static int soc_post_component_init(struct snd_soc_card *card,
else if (dailess && aux_dev->init)
ret = aux_dev->init(&codec->dapm);
if (ret < 0) {
- dev_err(card->dev, "asoc: failed to init %s: %d\n", name, ret);
+ dev_err(card->dev, "ASoC: failed to init %s: %d\n", name, ret);
return ret;
}
codec->name_prefix = temp;
@@ -1253,7 +1256,7 @@ static int soc_post_component_init(struct snd_soc_card *card,
ret = device_add(rtd->dev);
if (ret < 0) {
dev_err(card->dev,
- "asoc: failed to register runtime device: %d\n", ret);
+ "ASoC: failed to register runtime device: %d\n", ret);
return ret;
}
rtd->dev_registered = 1;
@@ -1262,14 +1265,13 @@ static int soc_post_component_init(struct snd_soc_card *card,
ret = snd_soc_dapm_sys_add(rtd->dev);
if (ret < 0)
dev_err(codec->dev,
- "asoc: failed to add codec dapm sysfs entries: %d\n",
- ret);
+ "ASoC: failed to add codec dapm sysfs entries: %d\n", ret);
/* add codec sysfs entries */
ret = device_create_file(rtd->dev, &dev_attr_codec_reg);
if (ret < 0)
dev_err(codec->dev,
- "asoc: failed to add codec sysfs files: %d\n", ret);
+ "ASoC: failed to add codec sysfs files: %d\n", ret);
#ifdef CONFIG_DEBUG_FS
/* add DPCM sysfs entries */
@@ -1278,7 +1280,7 @@ static int soc_post_component_init(struct snd_soc_card *card,
ret = soc_dpcm_debugfs_add(rtd);
if (ret < 0)
- dev_err(rtd->dev, "asoc: failed to add dpcm sysfs entries: %d\n", ret);
+ dev_err(rtd->dev, "ASoC: failed to add dpcm sysfs entries: %d\n", ret);
out:
#endif
@@ -1333,7 +1335,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
struct snd_soc_dapm_widget *play_w, *capture_w;
int ret;
- dev_dbg(card->dev, "probe %s dai link %d late %d\n",
+ dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n",
card->name, num, order);
/* config components */
@@ -1359,8 +1361,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
if (cpu_dai->driver->probe) {
ret = cpu_dai->driver->probe(cpu_dai);
if (ret < 0) {
- pr_err("asoc: failed to probe CPU DAI %s: %d\n",
- cpu_dai->name, ret);
+ dev_err(cpu_dai->dev,
+ "ASoC: failed to probe CPU DAI %s: %d\n",
+ cpu_dai->name, ret);
module_put(cpu_dai->dev->driver->owner);
return ret;
}
@@ -1375,8 +1378,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
if (codec_dai->driver->probe) {
ret = codec_dai->driver->probe(codec_dai);
if (ret < 0) {
- pr_err("asoc: failed to probe CODEC DAI %s: %d\n",
- codec_dai->name, ret);
+ dev_err(codec_dai->dev,
+ "ASoC: failed to probe CODEC DAI %s: %d\n",
+ codec_dai->name, ret);
return ret;
}
}
@@ -1396,13 +1400,14 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
ret = device_create_file(rtd->dev, &dev_attr_pmdown_time);
if (ret < 0)
- pr_warn("asoc: failed to add pmdown_time sysfs:%d\n", ret);
+ dev_warn(rtd->dev, "ASoC: failed to add pmdown_time sysfs: %d\n",
+ ret);
if (cpu_dai->driver->compress_dai) {
/*create compress_device"*/
ret = soc_new_compress(rtd, num);
if (ret < 0) {
- pr_err("asoc: can't create compress %s\n",
+ dev_err(card->dev, "ASoC: can't create compress %s\n",
dai_link->stream_name);
return ret;
}
@@ -1412,7 +1417,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
/* create the pcm */
ret = soc_new_pcm(rtd, num);
if (ret < 0) {
- pr_err("asoc: can't create pcm %s :%d\n",
+ dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
dai_link->stream_name, ret);
return ret;
}
@@ -1424,7 +1429,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
ret = snd_soc_dapm_new_pcm(card, dai_link->params,
capture_w, play_w);
if (ret != 0) {
- dev_err(card->dev, "Can't link %s to %s: %d\n",
+ dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
play_w->name, capture_w->name, ret);
return ret;
}
@@ -1436,7 +1441,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
ret = snd_soc_dapm_new_pcm(card, dai_link->params,
capture_w, play_w);
if (ret != 0) {
- dev_err(card->dev, "Can't link %s to %s: %d\n",
+ dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
play_w->name, capture_w->name, ret);
return ret;
}
@@ -1473,7 +1478,8 @@ static int soc_register_ac97_dai_link(struct snd_soc_pcm_runtime *rtd)
ret = soc_ac97_dev_register(rtd->codec);
if (ret < 0) {
- pr_err("asoc: AC97 device register failed:%d\n", ret);
+ dev_err(rtd->codec->dev,
+ "ASoC: AC97 device register failed: %d\n", ret);
return ret;
}
@@ -1502,7 +1508,7 @@ static int soc_check_aux_dev(struct snd_soc_card *card, int num)
return 0;
}
- dev_err(card->dev, "%s not registered\n", aux_dev->codec_name);
+ dev_err(card->dev, "ASoC: %s not registered\n", aux_dev->codec_name);
return -EPROBE_DEFER;
}
@@ -1518,7 +1524,7 @@ static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
if (!strcmp(codec->name, aux_dev->codec_name)) {
if (codec->probed) {
dev_err(codec->dev,
- "asoc: codec already probed");
+ "ASoC: codec already probed");
ret = -EBUSY;
goto out;
}
@@ -1526,7 +1532,7 @@ static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
}
}
/* codec not found */
- dev_err(card->dev, "asoc: codec %s not found", aux_dev->codec_name);
+ dev_err(card->dev, "ASoC: codec %s not found", aux_dev->codec_name);
return -EPROBE_DEFER;
found:
@@ -1569,8 +1575,8 @@ static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
codec->compress_type = compress_type;
ret = snd_soc_cache_init(codec);
if (ret < 0) {
- dev_err(codec->dev, "Failed to set cache compression type: %d\n",
- ret);
+ dev_err(codec->dev, "ASoC: Failed to set cache compression"
+ " type: %d\n", ret);
return ret;
}
codec->cache_init = 1;
@@ -1626,8 +1632,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
card->owner, 0, &card->snd_card);
if (ret < 0) {
- pr_err("asoc: can't create sound card for card %s: %d\n",
- card->name, ret);
+ dev_err(card->dev, "ASoC: can't create sound card for"
+ " card %s: %d\n", card->name, ret);
goto base_error;
}
card->snd_card->dev = card->dev;
@@ -1663,8 +1669,9 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
for (i = 0; i < card->num_links; i++) {
ret = soc_probe_link_components(card, i, order);
if (ret < 0) {
- pr_err("asoc: failed to instantiate card %s: %d\n",
- card->name, ret);
+ dev_err(card->dev,
+ "ASoC: failed to instantiate card %d\n",
+ ret);
goto probe_dai_err;
}
}
@@ -1676,8 +1683,9 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
for (i = 0; i < card->num_links; i++) {
ret = soc_probe_link_dais(card, i, order);
if (ret < 0) {
- pr_err("asoc: failed to instantiate card %s: %d\n",
- card->name, ret);
+ dev_err(card->dev,
+ "ASoC: failed to instantiate card %d\n",
+ ret);
goto probe_dai_err;
}
}
@@ -1686,8 +1694,9 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
for (i = 0; i < card->num_aux_devs; i++) {
ret = soc_probe_aux_dev(card, i);
if (ret < 0) {
- pr_err("asoc: failed to add auxiliary devices %s: %d\n",
- card->name, ret);
+ dev_err(card->dev,
+ "ASoC: failed to add auxiliary devices %d\n",
+ ret);
goto probe_aux_dev_err;
}
}
@@ -1712,7 +1721,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dai_fmt);
if (ret != 0 && ret != -ENOTSUPP)
dev_warn(card->rtd[i].codec_dai->dev,
- "Failed to set DAI format: %d\n",
+ "ASoC: Failed to set DAI format: %d\n",
ret);
}
@@ -1723,7 +1732,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dai_fmt);
if (ret != 0 && ret != -ENOTSUPP)
dev_warn(card->rtd[i].cpu_dai->dev,
- "Failed to set DAI format: %d\n",
+ "ASoC: Failed to set DAI format: %d\n",
ret);
} else if (dai_fmt) {
/* Flip the polarity for the "CPU" end */
@@ -1748,7 +1757,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dai_fmt);
if (ret != 0 && ret != -ENOTSUPP)
dev_warn(card->rtd[i].cpu_dai->dev,
- "Failed to set DAI format: %d\n",
+ "ASoC: Failed to set DAI format: %d\n",
ret);
}
}
@@ -1775,7 +1784,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
if (card->late_probe) {
ret = card->late_probe(card);
if (ret < 0) {
- dev_err(card->dev, "%s late_probe() failed: %d\n",
+ dev_err(card->dev, "ASoC: %s late_probe() failed: %d\n",
card->name, ret);
goto probe_aux_dev_err;
}
@@ -1789,8 +1798,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
ret = snd_card_register(card->snd_card);
if (ret < 0) {
- pr_err("asoc: failed to register soundcard for %s: %d\n",
- card->name, ret);
+ dev_err(card->dev, "ASoC: failed to register soundcard %d\n",
+ ret);
goto probe_aux_dev_err;
}
@@ -1799,8 +1808,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
for (i = 0; i < card->num_rtd; i++) {
ret = soc_register_ac97_dai_link(&card->rtd[i]);
if (ret < 0) {
- pr_err("asoc: failed to register AC97 %s: %d\n",
- card->name, ret);
+ dev_err(card->dev, "ASoC: failed to register AC97:"
+ " %d\n", ret);
while (--i >= 0)
soc_unregister_ac97_dai_link(card->rtd[i].codec);
goto probe_aux_dev_err;
@@ -1846,7 +1855,7 @@ static int soc_probe(struct platform_device *pdev)
return -EINVAL;
dev_warn(&pdev->dev,
- "ASoC machine %s should use snd_soc_register_card()\n",
+ "ASoC: machine %s should use snd_soc_register_card()\n",
card->name);
/* Bodge while we unpick instantiation */
@@ -1996,7 +2005,7 @@ int snd_soc_platform_read(struct snd_soc_platform *platform,
unsigned int ret;
if (!platform->driver->read) {
- dev_err(platform->dev, "platform has no read back\n");
+ dev_err(platform->dev, "ASoC: platform has no read back\n");
return -1;
}
@@ -2012,7 +2021,7 @@ int snd_soc_platform_write(struct snd_soc_platform *platform,
unsigned int reg, unsigned int val)
{
if (!platform->driver->write) {
- dev_err(platform->dev, "platform has no write back\n");
+ dev_err(platform->dev, "ASoC: platform has no write back\n");
return -1;
}
@@ -2283,7 +2292,8 @@ static int snd_soc_add_controls(struct snd_card *card, struct device *dev,
err = snd_ctl_add(card, snd_soc_cnew(control, data,
control->name, prefix));
if (err < 0) {
- dev_err(dev, "Failed to add %s: %d\n", control->name, err);
+ dev_err(dev, "ASoC: Failed to add %s: %d\n",
+ control->name, err);
return err;
}
}
@@ -3534,15 +3544,14 @@ int snd_soc_register_card(struct snd_soc_card *card)
* not both or neither.
*/
if (!!link->codec_name == !!link->codec_of_node) {
- dev_err(card->dev,
- "Neither/both codec name/of_node are set for %s\n",
- link->name);
+ dev_err(card->dev, "ASoC: Neither/both codec"
+ " name/of_node are set for %s\n", link->name);
return -EINVAL;
}
/* Codec DAI name must be specified */
if (!link->codec_dai_name) {
- dev_err(card->dev, "codec_dai_name not set for %s\n",
- link->name);
+ dev_err(card->dev, "ASoC: codec_dai_name not"
+ " set for %s\n", link->name);
return -EINVAL;
}
@@ -3551,8 +3560,8 @@ int snd_soc_register_card(struct snd_soc_card *card)
* can be left unspecified, and a dummy platform will be used.
*/
if (link->platform_name && link->platform_of_node) {
- dev_err(card->dev,
- "Both platform name/of_node are set for %s\n", link->name);
+ dev_err(card->dev, "ASoC: Both platform name/of_node"
+ " are set for %s\n", link->name);
return -EINVAL;
}
@@ -3562,9 +3571,8 @@ int snd_soc_register_card(struct snd_soc_card *card)
* name alone..
*/
if (link->cpu_name && link->cpu_of_node) {
- dev_err(card->dev,
- "Neither/both cpu name/of_node are set for %s\n",
- link->name);
+ dev_err(card->dev, "ASoC: Neither/both "
+ "cpu name/of_node are set for %s\n",link->name);
return -EINVAL;
}
/*
@@ -3573,9 +3581,8 @@ int snd_soc_register_card(struct snd_soc_card *card)
*/
if (!link->cpu_dai_name &&
!(link->cpu_name || link->cpu_of_node)) {
- dev_err(card->dev,
- "Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
- link->name);
+ dev_err(card->dev, "ASoC: Neither cpu_dai_name nor "
+ "cpu_name/of_node are set for %s\n", link->name);
return -EINVAL;
}
}
@@ -3622,7 +3629,7 @@ int snd_soc_unregister_card(struct snd_soc_card *card)
{
if (card->instantiated)
soc_cleanup_card_resources(card);
- dev_dbg(card->dev, "Unregistered card '%s'\n", card->name);
+ dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
return 0;
}
@@ -3679,8 +3686,8 @@ static inline char *fmt_multiple_name(struct device *dev,
struct snd_soc_dai_driver *dai_drv)
{
if (dai_drv->name == NULL) {
- pr_err("asoc: error - multiple DAI %s registered with no name\n",
- dev_name(dev));
+ dev_err(dev, "ASoC: error - multiple DAI %s registered with"
+ " no name\n", dev_name(dev));
return NULL;
}
@@ -3698,7 +3705,7 @@ int snd_soc_register_dai(struct device *dev,
struct snd_soc_codec *codec;
struct snd_soc_dai *dai;
- dev_dbg(dev, "dai register %s\n", dev_name(dev));
+ dev_dbg(dev, "ASoC: dai register %s\n", dev_name(dev));
dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL);
if (dai == NULL)
@@ -3721,7 +3728,7 @@ int snd_soc_register_dai(struct device *dev,
list_for_each_entry(codec, &codec_list, list) {
if (codec->dev == dev) {
- dev_dbg(dev, "Mapped DAI %s to CODEC %s\n",
+ dev_dbg(dev, "ASoC: Mapped DAI %s to CODEC %s\n",
dai->name, codec->name);
dai->codec = codec;
break;
@@ -3735,7 +3742,7 @@ int snd_soc_register_dai(struct device *dev,
mutex_unlock(&client_mutex);
- pr_debug("Registered DAI '%s'\n", dai->name);
+ dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name);
return 0;
}
@@ -3761,7 +3768,7 @@ found:
list_del(&dai->list);
mutex_unlock(&client_mutex);
- pr_debug("Unregistered DAI '%s'\n", dai->name);
+ dev_dbg(dev, "ASoC: Unregistered DAI '%s'\n", dai->name);
kfree(dai->name);
kfree(dai);
}
@@ -3780,7 +3787,7 @@ int snd_soc_register_dais(struct device *dev,
struct snd_soc_dai *dai;
int i, ret = 0;
- dev_dbg(dev, "dai register %s #%Zu\n", dev_name(dev), count);
+ dev_dbg(dev, "ASoC: dai register %s #%Zu\n", dev_name(dev), count);
for (i = 0; i < count; i++) {
@@ -3812,8 +3819,8 @@ int snd_soc_register_dais(struct device *dev,
list_for_each_entry(codec, &codec_list, list) {
if (codec->dev == dev) {
- dev_dbg(dev, "Mapped DAI %s to CODEC %s\n",
- dai->name, codec->name);
+ dev_dbg(dev, "ASoC: Mapped DAI %s to "
+ "CODEC %s\n", dai->name, codec->name);
dai->codec = codec;
break;
}
@@ -3826,7 +3833,7 @@ int snd_soc_register_dais(struct device *dev,
mutex_unlock(&client_mutex);
- pr_debug("Registered DAI '%s'\n", dai->name);
+ dev_dbg(dai->dev, "ASoC: Registered DAI '%s'\n", dai->name);
}
return 0;
@@ -3864,7 +3871,7 @@ int snd_soc_register_platform(struct device *dev,
{
struct snd_soc_platform *platform;
- dev_dbg(dev, "platform register %s\n", dev_name(dev));
+ dev_dbg(dev, "ASoC: platform register %s\n", dev_name(dev));
platform = kzalloc(sizeof(struct snd_soc_platform), GFP_KERNEL);
if (platform == NULL)
@@ -3888,7 +3895,7 @@ int snd_soc_register_platform(struct device *dev,
list_add(&platform->list, &platform_list);
mutex_unlock(&client_mutex);
- pr_debug("Registered platform '%s'\n", platform->name);
+ dev_dbg(dev, "ASoC: Registered platform '%s'\n", platform->name);
return 0;
}
@@ -3914,7 +3921,7 @@ found:
list_del(&platform->list);
mutex_unlock(&client_mutex);
- pr_debug("Unregistered platform '%s'\n", platform->name);
+ dev_dbg(dev, "ASoC: Unregistered platform '%s'\n", platform->name);
kfree(platform->name);
kfree(platform);
}
@@ -4007,7 +4014,7 @@ int snd_soc_register_codec(struct device *dev,
codec->reg_size = reg_size;
/* it is necessary to make a copy of the default register cache
* because in the case of using a compression type that requires
- * the default register cache to be marked as __devinitconst the
+ * the default register cache to be marked as the
* kernel might have freed the array by the time we initialize
* the cache.
*/
@@ -4043,11 +4050,11 @@ int snd_soc_register_codec(struct device *dev,
if (num_dai) {
ret = snd_soc_register_dais(dev, dai_drv, num_dai);
if (ret < 0)
- dev_err(codec->dev, "Failed to regster DAIs: %d\n",
- ret);
+ dev_err(codec->dev, "ASoC: Failed to regster"
+ " DAIs: %d\n", ret);
}
- pr_debug("Registered codec '%s'\n", codec->name);
+ dev_dbg(codec->dev, "ASoC: Registered codec '%s'\n", codec->name);
return 0;
fail:
@@ -4082,7 +4089,7 @@ found:
list_del(&codec->list);
mutex_unlock(&client_mutex);
- pr_debug("Unregistered codec '%s'\n", codec->name);
+ dev_dbg(codec->dev, "ASoC: Unregistered codec '%s'\n", codec->name);
snd_soc_cache_exit(codec);
kfree(codec->reg_def_copy);
@@ -4106,7 +4113,7 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
*/
if (ret < 0 && ret != -EINVAL) {
dev_err(card->dev,
- "Property '%s' could not be read: %d\n",
+ "ASoC: Property '%s' could not be read: %d\n",
propname, ret);
return ret;
}
@@ -4125,15 +4132,13 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
num_routes = of_property_count_strings(np, propname);
if (num_routes < 0 || num_routes & 1) {
- dev_err(card->dev,
- "Property '%s' does not exist or its length is not even\n",
- propname);
+ dev_err(card->dev, "ASoC: Property '%s' does not exist or its"
+ " length is not even\n", propname);
return -EINVAL;
}
num_routes /= 2;
if (!num_routes) {
- dev_err(card->dev,
- "Property '%s's length is zero\n",
+ dev_err(card->dev, "ASoC: Property '%s's length is zero\n",
propname);
return -EINVAL;
}
@@ -4142,7 +4147,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
GFP_KERNEL);
if (!routes) {
dev_err(card->dev,
- "Could not allocate DAPM route table\n");
+ "ASoC: Could not allocate DAPM route table\n");
return -EINVAL;
}
@@ -4151,7 +4156,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
2 * i, &routes[i].sink);
if (ret) {
dev_err(card->dev,
- "Property '%s' index %d could not be read: %d\n",
+ "ASoC: Property '%s' index %d could not be read: %d\n",
propname, 2 * i, ret);
kfree(routes);
return -EINVAL;
@@ -4160,7 +4165,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
(2 * i) + 1, &routes[i].source);
if (ret) {
dev_err(card->dev,
- "Property '%s' index %d could not be read: %d\n",
+ "ASoC: Property '%s' index %d could not be read: %d\n",
propname, (2 * i) + 1, ret);
kfree(routes);
return -EINVAL;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 6e35bcae02d..1e36bc81e5a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -220,7 +220,7 @@ static int soc_widget_read(struct snd_soc_dapm_widget *w, int reg)
else if (w->platform)
return snd_soc_platform_read(w->platform, reg);
- dev_err(w->dapm->dev, "no valid widget read method\n");
+ dev_err(w->dapm->dev, "ASoC: no valid widget read method\n");
return -1;
}
@@ -231,7 +231,7 @@ static int soc_widget_write(struct snd_soc_dapm_widget *w, int reg, int val)
else if (w->platform)
return snd_soc_platform_write(w->platform, reg, val);
- dev_err(w->dapm->dev, "no valid widget write method\n");
+ dev_err(w->dapm->dev, "ASoC: no valid widget write method\n");
return -1;
}
@@ -546,7 +546,7 @@ static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
wlist = kzalloc(wlistsize, GFP_KERNEL);
if (wlist == NULL) {
dev_err(dapm->dev,
- "asoc: can't allocate widget list for %s\n",
+ "ASoC: can't allocate widget list for %s\n",
w->name);
return -ENOMEM;
}
@@ -595,9 +595,9 @@ static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
prefix);
ret = snd_ctl_add(card, path->kcontrol);
if (ret < 0) {
- dev_err(dapm->dev,
- "asoc: failed to add dapm kcontrol %s: %d\n",
- path->long_name, ret);
+ dev_err(dapm->dev, "ASoC: failed to add widget"
+ " %s dapm kcontrol %s: %d\n",
+ w->name, path->long_name, ret);
kfree(wlist);
kfree(path->long_name);
path->long_name = NULL;
@@ -626,7 +626,7 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
if (w->num_kcontrols != 1) {
dev_err(dapm->dev,
- "asoc: mux %s has incorrect number of controls\n",
+ "ASoC: mux %s has incorrect number of controls\n",
w->name);
return -EINVAL;
}
@@ -645,7 +645,7 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
wlist = krealloc(wlist, wlistsize, GFP_KERNEL);
if (wlist == NULL) {
dev_err(dapm->dev,
- "asoc: can't allocate widget list for %s\n", w->name);
+ "ASoC: can't allocate widget list for %s\n", w->name);
return -ENOMEM;
}
wlist->num_widgets = wlistentries;
@@ -677,7 +677,7 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
name + prefix_len, prefix);
ret = snd_ctl_add(card, kcontrol);
if (ret < 0) {
- dev_err(dapm->dev, "failed to add kcontrol %s: %d\n",
+ dev_err(dapm->dev, "ASoC: failed to add kcontrol %s: %d\n",
w->name, ret);
kfree(wlist);
return ret;
@@ -699,7 +699,7 @@ static int dapm_new_pga(struct snd_soc_dapm_widget *w)
{
if (w->num_kcontrols)
dev_err(w->dapm->dev,
- "asoc: PGA controls not supported: '%s'\n", w->name);
+ "ASoC: PGA controls not supported: '%s'\n", w->name);
return 0;
}
@@ -725,7 +725,7 @@ static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget)
case SNDRV_CTL_POWER_D3hot:
case SNDRV_CTL_POWER_D3cold:
if (widget->ignore_suspend)
- dev_dbg(widget->dapm->dev, "%s ignoring suspend\n",
+ dev_dbg(widget->dapm->dev, "ASoC: %s ignoring suspend\n",
widget->name);
return widget->ignore_suspend;
default:
@@ -757,14 +757,14 @@ static int dapm_list_add_widget(struct snd_soc_dapm_widget_list **list,
wlistentries * sizeof(struct snd_soc_dapm_widget *);
*list = krealloc(wlist, wlistsize, GFP_KERNEL);
if (*list == NULL) {
- dev_err(w->dapm->dev, "can't allocate widget list for %s\n",
+ dev_err(w->dapm->dev, "ASoC: can't allocate widget list for %s\n",
w->name);
return -ENOMEM;
}
wlist = *list;
/* insert the widget */
- dev_dbg(w->dapm->dev, "added %s in widget list pos %d\n",
+ dev_dbg(w->dapm->dev, "ASoC: added %s in widget list pos %d\n",
w->name, wlist->num_widgets);
wlist->widgets[wlist->num_widgets] = w;
@@ -844,7 +844,8 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
int err;
err = dapm_list_add_widget(list, path->sink);
if (err < 0) {
- dev_err(widget->dapm->dev, "could not add widget %s\n",
+ dev_err(widget->dapm->dev,
+ "ASoC: could not add widget %s\n",
widget->name);
return con;
}
@@ -943,7 +944,8 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
int err;
err = dapm_list_add_widget(list, path->source);
if (err < 0) {
- dev_err(widget->dapm->dev, "could not add widget %s\n",
+ dev_err(widget->dapm->dev,
+ "ASoC: could not add widget %s\n",
widget->name);
return con;
}
@@ -1024,7 +1026,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
dev_warn(w->dapm->dev,
- "Failed to bypass %s: %d\n",
+ "ASoC: Failed to bypass %s: %d\n",
w->name, ret);
}
@@ -1034,7 +1036,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
ret = regulator_allow_bypass(w->regulator, false);
if (ret != 0)
dev_warn(w->dapm->dev,
- "Failed to unbypass %s: %d\n",
+ "ASoC: Failed to unbypass %s: %d\n",
w->name, ret);
}
@@ -1253,7 +1255,7 @@ static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
ret = w->event(w, NULL, event);
trace_snd_soc_dapm_widget_event_done(w, event);
if (ret < 0)
- pr_err("%s: %s event failed: %d\n",
+ dev_err(dapm->dev, "ASoC: %s: %s event failed: %d\n",
ev_name, w->name, ret);
}
}
@@ -1402,7 +1404,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
if (ret < 0)
dev_err(w->dapm->dev,
- "Failed to apply widget power: %d\n", ret);
+ "ASoC: Failed to apply widget power: %d\n", ret);
}
if (!list_empty(&pending))
@@ -1431,20 +1433,21 @@ static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
(w->event_flags & SND_SOC_DAPM_PRE_REG)) {
ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
if (ret != 0)
- pr_err("%s DAPM pre-event failed: %d\n",
+ dev_err(dapm->dev, "ASoC: %s DAPM pre-event failed: %d\n",
w->name, ret);
}
ret = soc_widget_update_bits_locked(w, update->reg, update->mask,
update->val);
if (ret < 0)
- pr_err("%s DAPM update failed: %d\n", w->name, ret);
+ dev_err(dapm->dev, "ASoC: %s DAPM update failed: %d\n",
+ w->name, ret);
if (w->event &&
(w->event_flags & SND_SOC_DAPM_POST_REG)) {
ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
if (ret != 0)
- pr_err("%s DAPM post-event failed: %d\n",
+ dev_err(dapm->dev, "ASoC: %s DAPM post-event failed: %d\n",
w->name, ret);
}
}
@@ -1466,7 +1469,7 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY);
if (ret != 0)
dev_err(d->dev,
- "Failed to turn on bias: %d\n", ret);
+ "ASoC: Failed to turn on bias: %d\n", ret);
}
/* Prepare for a STADDBY->ON or ON->STANDBY transition */
@@ -1474,7 +1477,7 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE);
if (ret != 0)
dev_err(d->dev,
- "Failed to prepare bias: %d\n", ret);
+ "ASoC: Failed to prepare bias: %d\n", ret);
}
}
@@ -1492,7 +1495,7 @@ static void dapm_post_sequence_async(void *data, async_cookie_t cookie)
d->target_bias_level == SND_SOC_BIAS_OFF)) {
ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY);
if (ret != 0)
- dev_err(d->dev, "Failed to apply standby bias: %d\n",
+ dev_err(d->dev, "ASoC: Failed to apply standby bias: %d\n",
ret);
}
@@ -1501,7 +1504,8 @@ static void dapm_post_sequence_async(void *data, async_cookie_t cookie)
d->target_bias_level == SND_SOC_BIAS_OFF) {
ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_OFF);
if (ret != 0)
- dev_err(d->dev, "Failed to turn off bias: %d\n", ret);
+ dev_err(d->dev, "ASoC: Failed to turn off bias: %d\n",
+ ret);
if (d->dev)
pm_runtime_put(d->dev);
@@ -1512,7 +1516,7 @@ static void dapm_post_sequence_async(void *data, async_cookie_t cookie)
d->target_bias_level == SND_SOC_BIAS_ON) {
ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_ON);
if (ret != 0)
- dev_err(d->dev, "Failed to apply active bias: %d\n",
+ dev_err(d->dev, "ASoC: Failed to apply active bias: %d\n",
ret);
}
}
@@ -1838,7 +1842,7 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
if (!dapm->debugfs_dapm) {
dev_warn(dapm->dev,
- "Failed to create DAPM debugfs directory\n");
+ "ASoC: Failed to create DAPM debugfs directory\n");
return;
}
@@ -2123,7 +2127,7 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
if (!w) {
- dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
+ dev_err(dapm->dev, "ASoC: DAPM unknown pin %s\n", pin);
return -EINVAL;
}
@@ -2212,8 +2216,16 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
if (!wsource)
wsource = wtsource;
- if (wsource == NULL || wsink == NULL)
+ if (wsource == NULL) {
+ dev_err(dapm->dev, "ASoC: no source widget found for %s\n",
+ route->source);
return -ENODEV;
+ }
+ if (wsink == NULL) {
+ dev_err(dapm->dev, "ASoC: no sink widget found for %s\n",
+ route->sink);
+ return -ENODEV;
+ }
path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL);
if (!path)
@@ -2308,7 +2320,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
return 0;
err:
- dev_warn(dapm->dev, "asoc: no dapm match for %s --> %s --> %s\n",
+ dev_warn(dapm->dev, "ASoC: no dapm match for %s --> %s --> %s\n",
source, control, sink);
kfree(path);
return ret;
@@ -2325,7 +2337,7 @@ static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm,
if (route->control) {
dev_err(dapm->dev,
- "Removal of routes with controls not supported\n");
+ "ASoC: Removal of routes with controls not supported\n");
return -EINVAL;
}
@@ -2360,7 +2372,7 @@ static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm,
list_del(&path->list_source);
kfree(path);
} else {
- dev_warn(dapm->dev, "Route %s->%s does not exist\n",
+ dev_warn(dapm->dev, "ASoC: Route %s->%s does not exist\n",
source, sink);
}
@@ -2389,8 +2401,10 @@ int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
for (i = 0; i < num; i++) {
r = snd_soc_dapm_add_route(dapm, route);
if (r < 0) {
- dev_err(dapm->dev, "Failed to add route %s->%s\n",
- route->source, route->sink);
+ dev_err(dapm->dev, "ASoC: Failed to add route %s -> %s -> %s\n",
+ route->source,
+ route->control ? route->control : "direct",
+ route->sink);
ret = r;
}
route++;
@@ -2438,19 +2452,19 @@ static int snd_soc_dapm_weak_route(struct snd_soc_dapm_context *dapm,
int count = 0;
if (!source) {
- dev_err(dapm->dev, "Unable to find source %s for weak route\n",
+ dev_err(dapm->dev, "ASoC: Unable to find source %s for weak route\n",
route->source);
return -ENODEV;
}
if (!sink) {
- dev_err(dapm->dev, "Unable to find sink %s for weak route\n",
+ dev_err(dapm->dev, "ASoC: Unable to find sink %s for weak route\n",
route->sink);
return -ENODEV;
}
if (route->control || route->connected)
- dev_warn(dapm->dev, "Ignoring control for weak route %s->%s\n",
+ dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n",
route->source, route->sink);
list_for_each_entry(path, &source->sinks, list_source) {
@@ -2461,10 +2475,10 @@ static int snd_soc_dapm_weak_route(struct snd_soc_dapm_context *dapm,
}
if (count == 0)
- dev_err(dapm->dev, "No path found for weak route %s->%s\n",
+ dev_err(dapm->dev, "ASoC: No path found for weak route %s->%s\n",
route->source, route->sink);
if (count > 1)
- dev_warn(dapm->dev, "%d paths found for weak route %s->%s\n",
+ dev_warn(dapm->dev, "ASoC: %d paths found for weak route %s->%s\n",
count, route->source, route->sink);
return 0;
@@ -2601,7 +2615,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
if (snd_soc_volsw_is_stereo(mc))
dev_warn(widget->dapm->dev,
- "Control '%s' is stereo, which is not supported\n",
+ "ASoC: Control '%s' is stereo, which is not supported\n",
kcontrol->id.name);
ucontrol->value.integer.value[0] =
@@ -2644,7 +2658,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
if (snd_soc_volsw_is_stereo(mc))
dev_warn(widget->dapm->dev,
- "Control '%s' is stereo, which is not supported\n",
+ "ASoC: Control '%s' is stereo, which is not supported\n",
kcontrol->id.name);
val = (ucontrol->value.integer.value[0] & mask);
@@ -3021,7 +3035,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
w->regulator = devm_regulator_get(dapm->dev, w->name);
if (IS_ERR(w->regulator)) {
ret = PTR_ERR(w->regulator);
- dev_err(dapm->dev, "Failed to request %s: %d\n",
+ dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
w->name, ret);
return NULL;
}
@@ -3031,7 +3045,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
w->clk = devm_clk_get(dapm->dev, w->name);
if (IS_ERR(w->clk)) {
ret = PTR_ERR(w->clk);
- dev_err(dapm->dev, "Failed to request %s: %d\n",
+ dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
w->name, ret);
return NULL;
}
@@ -3182,7 +3196,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
if (config->formats) {
fmt = ffs(config->formats) - 1;
} else {
- dev_warn(w->dapm->dev, "Invalid format %llx specified\n",
+ dev_warn(w->dapm->dev, "ASoC: Invalid format %llx specified\n",
config->formats);
fmt = 0;
}
@@ -3215,7 +3229,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
params, source);
if (ret != 0) {
dev_err(source->dev,
- "hw_params() failed: %d\n", ret);
+ "ASoC: hw_params() failed: %d\n", ret);
goto out;
}
}
@@ -3226,7 +3240,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
sink);
if (ret != 0) {
dev_err(sink->dev,
- "hw_params() failed: %d\n", ret);
+ "ASoC: hw_params() failed: %d\n", ret);
goto out;
}
}
@@ -3235,14 +3249,14 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMU:
ret = snd_soc_dai_digital_mute(sink, 0);
if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(sink->dev, "Failed to unmute: %d\n", ret);
+ dev_warn(sink->dev, "ASoC: Failed to unmute: %d\n", ret);
ret = 0;
break;
case SND_SOC_DAPM_PRE_PMD:
ret = snd_soc_dai_digital_mute(sink, 1);
if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(sink->dev, "Failed to mute: %d\n", ret);
+ dev_warn(sink->dev, "ASoC: Failed to mute: %d\n", ret);
ret = 0;
break;
@@ -3281,11 +3295,11 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
template.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD;
- dev_dbg(card->dev, "adding %s widget\n", link_name);
+ dev_dbg(card->dev, "ASoC: adding %s widget\n", link_name);
w = snd_soc_dapm_new_control(&card->dapm, &template);
if (!w) {
- dev_err(card->dev, "Failed to create %s widget\n",
+ dev_err(card->dev, "ASoC: Failed to create %s widget\n",
link_name);
return -ENOMEM;
}
@@ -3319,12 +3333,12 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name = dai->driver->playback.stream_name;
template.sname = dai->driver->playback.stream_name;
- dev_dbg(dai->dev, "adding %s widget\n",
+ dev_dbg(dai->dev, "ASoC: adding %s widget\n",
template.name);
w = snd_soc_dapm_new_control(dapm, &template);
if (!w) {
- dev_err(dapm->dev, "Failed to create %s widget\n",
+ dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->playback.stream_name);
}
@@ -3337,12 +3351,12 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name = dai->driver->capture.stream_name;
template.sname = dai->driver->capture.stream_name;
- dev_dbg(dai->dev, "adding %s widget\n",
+ dev_dbg(dai->dev, "ASoC: adding %s widget\n",
template.name);
w = snd_soc_dapm_new_control(dapm, &template);
if (!w) {
- dev_err(dapm->dev, "Failed to create %s widget\n",
+ dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->capture.stream_name);
}
@@ -3518,11 +3532,11 @@ int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
if (!w) {
- dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
+ dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin);
return -EINVAL;
}
- dev_dbg(w->dapm->dev, "dapm: force enable pin %s\n", pin);
+ dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin);
w->connected = 1;
w->force = 1;
dapm_mark_dirty(w, "force enable");
@@ -3605,7 +3619,7 @@ int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, false);
if (!w) {
- dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
+ dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin);
return -EINVAL;
}
@@ -3664,7 +3678,7 @@ void snd_soc_dapm_auto_nc_codec_pins(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_dapm_widget *w;
- dev_dbg(codec->dev, "Auto NC: DAPMs: card:%p codec:%p\n",
+ dev_dbg(codec->dev, "ASoC: Auto NC: DAPMs: card:%p codec:%p\n",
&card->dapm, &codec->dapm);
list_for_each_entry(w, &card->widgets, list) {
@@ -3674,7 +3688,7 @@ void snd_soc_dapm_auto_nc_codec_pins(struct snd_soc_codec *codec)
case snd_soc_dapm_input:
case snd_soc_dapm_output:
case snd_soc_dapm_micbias:
- dev_dbg(codec->dev, "Auto NC: Checking widget %s\n",
+ dev_dbg(codec->dev, "ASoC: Auto NC: Checking widget %s\n",
w->name);
if (!snd_soc_dapm_widget_in_card_paths(card, w)) {
dev_dbg(codec->dev,
diff --git a/sound/soc/soc-dmaengine-pcm.c b/sound/soc/soc-dmaengine-pcm.c
index bbc125748a3..111b7d921e8 100644
--- a/sound/soc/soc-dmaengine-pcm.c
+++ b/sound/soc/soc-dmaengine-pcm.c
@@ -317,3 +317,5 @@ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 1ab5fe04bfc..0bb5cccd776 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -66,7 +66,6 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
struct snd_soc_dapm_context *dapm;
struct snd_soc_jack_pin *pin;
int enable;
- int oldstatus;
trace_snd_soc_jack_report(jack, mask, status);
@@ -78,8 +77,6 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
mutex_lock(&jack->mutex);
- oldstatus = jack->status;
-
jack->status &= ~mask;
jack->status |= status & mask;
@@ -172,12 +169,13 @@ int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
for (i = 0; i < count; i++) {
if (!pins[i].pin) {
- printk(KERN_ERR "No name for pin %d\n", i);
+ dev_err(jack->codec->dev, "ASoC: No name for pin %d\n",
+ i);
return -EINVAL;
}
if (!pins[i].mask) {
- printk(KERN_ERR "No mask for pin %d (%s)\n", i,
- pins[i].pin);
+ dev_err(jack->codec->dev, "ASoC: No mask for pin %d"
+ " (%s)\n", i, pins[i].pin);
return -EINVAL;
}
@@ -297,13 +295,13 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
for (i = 0; i < count; i++) {
if (!gpio_is_valid(gpios[i].gpio)) {
- printk(KERN_ERR "Invalid gpio %d\n",
+ dev_err(jack->codec->dev, "ASoC: Invalid gpio %d\n",
gpios[i].gpio);
ret = -EINVAL;
goto undo;
}
if (!gpios[i].name) {
- printk(KERN_ERR "No name for gpio %d\n",
+ dev_err(jack->codec->dev, "ASoC: No name for gpio %d\n",
gpios[i].gpio);
ret = -EINVAL;
goto undo;
@@ -332,7 +330,7 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
if (gpios[i].wake) {
ret = irq_set_irq_wake(gpio_to_irq(gpios[i].gpio), 1);
if (ret != 0)
- printk(KERN_ERR
+ dev_err(jack->codec->dev, "ASoC: "
"Failed to mark GPIO %d as wake source: %d\n",
gpios[i].gpio, ret);
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index ef22d0bd9e9..d7711fce119 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -43,7 +43,7 @@ static int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
struct snd_soc_pcm_runtime *be = dpcm->be;
- dev_dbg(be->dev, "pm: BE %s event %d dir %d\n",
+ dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
be->dai_link->name, event, dir);
snd_soc_dapm_stream_event(be, dir, event);
@@ -70,18 +70,19 @@ static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream,
*/
if (!soc_dai->rate) {
dev_warn(soc_dai->dev,
- "Not enforcing symmetric_rates due to race\n");
+ "ASoC: Not enforcing symmetric_rates due to race\n");
return 0;
}
- dev_dbg(soc_dai->dev, "Symmetry forces %dHz rate\n", soc_dai->rate);
+ dev_dbg(soc_dai->dev, "ASoC: Symmetry forces %dHz rate\n", soc_dai->rate);
ret = snd_pcm_hw_constraint_minmax(substream->runtime,
SNDRV_PCM_HW_PARAM_RATE,
soc_dai->rate, soc_dai->rate);
if (ret < 0) {
dev_err(soc_dai->dev,
- "Unable to apply rate symmetry constraint: %d\n", ret);
+ "ASoC: Unable to apply rate symmetry constraint: %d\n",
+ ret);
return ret;
}
@@ -118,7 +119,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream,
sample_sizes[i], bits);
if (ret != 0)
dev_warn(dai->dev,
- "Failed to set MSB %d/%d: %d\n",
+ "ASoC: Failed to set MSB %d/%d: %d\n",
bits, sample_sizes[i], ret);
}
}
@@ -149,8 +150,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
if (cpu_dai->driver->ops->startup) {
ret = cpu_dai->driver->ops->startup(substream, cpu_dai);
if (ret < 0) {
- dev_err(cpu_dai->dev, "can't open interface %s: %d\n",
- cpu_dai->name, ret);
+ dev_err(cpu_dai->dev, "ASoC: can't open interface"
+ " %s: %d\n", cpu_dai->name, ret);
goto out;
}
}
@@ -158,8 +159,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
if (platform->driver->ops && platform->driver->ops->open) {
ret = platform->driver->ops->open(substream);
if (ret < 0) {
- dev_err(platform->dev, "can't open platform %s: %d\n",
- platform->name, ret);
+ dev_err(platform->dev, "ASoC: can't open platform"
+ " %s: %d\n", platform->name, ret);
goto platform_err;
}
}
@@ -167,8 +168,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
if (codec_dai->driver->ops->startup) {
ret = codec_dai->driver->ops->startup(substream, codec_dai);
if (ret < 0) {
- dev_err(codec_dai->dev, "can't open codec %s: %d\n",
- codec_dai->name, ret);
+ dev_err(codec_dai->dev, "ASoC: can't open codec"
+ " %s: %d\n", codec_dai->name, ret);
goto codec_dai_err;
}
}
@@ -176,7 +177,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
if (rtd->dai_link->ops && rtd->dai_link->ops->startup) {
ret = rtd->dai_link->ops->startup(substream);
if (ret < 0) {
- pr_err("asoc: %s startup failed: %d\n",
+ pr_err("ASoC: %s startup failed: %d\n",
rtd->dai_link->name, ret);
goto machine_err;
}
@@ -238,18 +239,18 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
ret = -EINVAL;
snd_pcm_limit_hw_rates(runtime);
if (!runtime->hw.rates) {
- printk(KERN_ERR "asoc: %s <-> %s No matching rates\n",
+ printk(KERN_ERR "ASoC: %s <-> %s No matching rates\n",
codec_dai->name, cpu_dai->name);
goto config_err;
}
if (!runtime->hw.formats) {
- printk(KERN_ERR "asoc: %s <-> %s No matching formats\n",
+ printk(KERN_ERR "ASoC: %s <-> %s No matching formats\n",
codec_dai->name, cpu_dai->name);
goto config_err;
}
if (!runtime->hw.channels_min || !runtime->hw.channels_max ||
runtime->hw.channels_min > runtime->hw.channels_max) {
- printk(KERN_ERR "asoc: %s <-> %s No matching channels\n",
+ printk(KERN_ERR "ASoC: %s <-> %s No matching channels\n",
codec_dai->name, cpu_dai->name);
goto config_err;
}
@@ -270,12 +271,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
goto config_err;
}
- pr_debug("asoc: %s <-> %s info:\n",
+ pr_debug("ASoC: %s <-> %s info:\n",
codec_dai->name, cpu_dai->name);
- pr_debug("asoc: rate mask 0x%x\n", runtime->hw.rates);
- pr_debug("asoc: min ch %d max ch %d\n", runtime->hw.channels_min,
+ pr_debug("ASoC: rate mask 0x%x\n", runtime->hw.rates);
+ pr_debug("ASoC: min ch %d max ch %d\n", runtime->hw.channels_min,
runtime->hw.channels_max);
- pr_debug("asoc: min rate %d max rate %d\n", runtime->hw.rate_min,
+ pr_debug("ASoC: min rate %d max rate %d\n", runtime->hw.rate_min,
runtime->hw.rate_max);
dynamic:
@@ -330,14 +331,14 @@ static void close_delayed_work(struct work_struct *work)
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
- pr_debug("pop wq checking: %s status: %s waiting: %s\n",
+ dev_dbg(rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n",
codec_dai->driver->playback.stream_name,
codec_dai->playback_active ? "active" : "inactive",
- codec_dai->pop_wait ? "yes" : "no");
+ rtd->pop_wait ? "yes" : "no");
/* are we waiting on this codec DAI stream */
- if (codec_dai->pop_wait == 1) {
- codec_dai->pop_wait = 0;
+ if (rtd->pop_wait == 1) {
+ rtd->pop_wait = 0;
snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
SND_SOC_DAPM_STREAM_STOP);
}
@@ -407,7 +408,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
SND_SOC_DAPM_STREAM_STOP);
} else {
/* start delayed pop wq here for playback streams */
- codec_dai->pop_wait = 1;
+ rtd->pop_wait = 1;
schedule_delayed_work(&rtd->delayed_work,
msecs_to_jiffies(rtd->pmdown_time));
}
@@ -444,7 +445,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
if (rtd->dai_link->ops && rtd->dai_link->ops->prepare) {
ret = rtd->dai_link->ops->prepare(substream);
if (ret < 0) {
- pr_err("asoc: machine prepare error: %d\n", ret);
+ dev_err(rtd->card->dev, "ASoC: machine prepare error:"
+ " %d\n", ret);
goto out;
}
}
@@ -452,8 +454,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
if (platform->driver->ops && platform->driver->ops->prepare) {
ret = platform->driver->ops->prepare(substream);
if (ret < 0) {
- dev_err(platform->dev, "platform prepare error: %d\n",
- ret);
+ dev_err(platform->dev, "ASoC: platform prepare error:"
+ " %d\n", ret);
goto out;
}
}
@@ -461,7 +463,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
if (codec_dai->driver->ops->prepare) {
ret = codec_dai->driver->ops->prepare(substream, codec_dai);
if (ret < 0) {
- dev_err(codec_dai->dev, "DAI prepare error: %d\n",
+ dev_err(codec_dai->dev, "ASoC: DAI prepare error: %d\n",
ret);
goto out;
}
@@ -470,7 +472,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
if (cpu_dai->driver->ops->prepare) {
ret = cpu_dai->driver->ops->prepare(substream, cpu_dai);
if (ret < 0) {
- dev_err(cpu_dai->dev, "DAI prepare error: %d\n",
+ dev_err(cpu_dai->dev, "ASoC: DAI prepare error: %d\n",
ret);
goto out;
}
@@ -478,8 +480,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
/* cancel any delayed stream shutdown that is pending */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
- codec_dai->pop_wait) {
- codec_dai->pop_wait = 0;
+ rtd->pop_wait) {
+ rtd->pop_wait = 0;
cancel_delayed_work(&rtd->delayed_work);
}
@@ -512,7 +514,8 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
if (rtd->dai_link->ops && rtd->dai_link->ops->hw_params) {
ret = rtd->dai_link->ops->hw_params(substream, params);
if (ret < 0) {
- pr_err("asoc: machine hw_params failed: %d\n", ret);
+ dev_err(rtd->card->dev, "ASoC: machine hw_params"
+ " failed: %d\n", ret);
goto out;
}
}
@@ -520,8 +523,8 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
if (codec_dai->driver->ops->hw_params) {
ret = codec_dai->driver->ops->hw_params(substream, params, codec_dai);
if (ret < 0) {
- dev_err(codec_dai->dev, "can't set %s hw params: %d\n",
- codec_dai->name, ret);
+ dev_err(codec_dai->dev, "ASoC: can't set %s hw params:"
+ " %d\n", codec_dai->name, ret);
goto codec_err;
}
}
@@ -529,7 +532,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
if (cpu_dai->driver->ops->hw_params) {
ret = cpu_dai->driver->ops->hw_params(substream, params, cpu_dai);
if (ret < 0) {
- dev_err(cpu_dai->dev, "%s hw params failed: %d\n",
+ dev_err(cpu_dai->dev, "ASoC: %s hw params failed: %d\n",
cpu_dai->name, ret);
goto interface_err;
}
@@ -538,7 +541,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
if (platform->driver->ops && platform->driver->ops->hw_params) {
ret = platform->driver->ops->hw_params(substream, params);
if (ret < 0) {
- dev_err(platform->dev, "%s hw params failed: %d\n",
+ dev_err(platform->dev, "ASoC: %s hw params failed: %d\n",
platform->name, ret);
goto platform_err;
}
@@ -760,7 +763,7 @@ static void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm, *d;
list_for_each_entry_safe(dpcm, d, &fe->dpcm[stream].be_clients, list_be) {
- dev_dbg(fe->dev, "BE %s disconnect check for %s\n",
+ dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
stream ? "capture" : "playback",
dpcm->be->dai_link->name);
@@ -815,7 +818,7 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
}
}
- dev_err(card->dev, "can't get %s BE for %s\n",
+ dev_err(card->dev, "ASoC: can't get %s BE for %s\n",
stream ? "capture" : "playback", widget->name);
return NULL;
}
@@ -866,7 +869,7 @@ static int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
/* get number of valid DAI paths and their widgets */
paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, &list);
- dev_dbg(fe->dev, "found %d audio %s paths\n", paths,
+ dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
stream ? "capture" : "playback");
*list_ = list;
@@ -903,7 +906,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
if (widget && widget_in_list(list, widget))
continue;
- dev_dbg(fe->dev, "pruning %s BE %s for %s\n",
+ dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n",
stream ? "capture" : "playback",
dpcm->be->dai_link->name, fe->dai_link->name);
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
@@ -911,7 +914,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
prune++;
}
- dev_dbg(fe->dev, "found %d old BE paths for pruning\n", prune);
+ dev_dbg(fe->dev, "ASoC: found %d old BE paths for pruning\n", prune);
return prune;
}
@@ -932,7 +935,7 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
/* is there a valid BE rtd for this widget */
be = dpcm_get_be(card, list->widgets[i], stream);
if (!be) {
- dev_err(fe->dev, "no BE found for %s\n",
+ dev_err(fe->dev, "ASoC: no BE found for %s\n",
list->widgets[i]->name);
continue;
}
@@ -948,7 +951,7 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
/* newly connected FE and BE */
err = dpcm_be_connect(fe, be, stream);
if (err < 0) {
- dev_err(fe->dev, "can't connect %s\n",
+ dev_err(fe->dev, "ASoC: can't connect %s\n",
list->widgets[i]->name);
break;
} else if (err == 0) /* already connected */
@@ -959,7 +962,7 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
new++;
}
- dev_dbg(fe->dev, "found %d new BE paths\n", new);
+ dev_dbg(fe->dev, "ASoC: found %d new BE paths\n", new);
return new;
}
@@ -998,7 +1001,7 @@ static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe,
snd_soc_dpcm_get_substream(be, stream);
if (be->dpcm[stream].users == 0)
- dev_err(be->dev, "no users %s at close - state %d\n",
+ dev_err(be->dev, "ASoC: no users %s at close - state %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
@@ -1032,7 +1035,7 @@ static int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
/* first time the dpcm is open ? */
if (be->dpcm[stream].users == DPCM_MAX_BE_USERS)
- dev_err(be->dev, "too many users %s at open %d\n",
+ dev_err(be->dev, "ASoC: too many users %s at open %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
@@ -1043,15 +1046,15 @@ static int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
continue;
- dev_dbg(be->dev, "dpcm: open BE %s\n", be->dai_link->name);
+ dev_dbg(be->dev, "ASoC: open BE %s\n", be->dai_link->name);
be_substream->runtime = be->dpcm[stream].runtime;
err = soc_pcm_open(be_substream);
if (err < 0) {
- dev_err(be->dev, "BE open failed %d\n", err);
+ dev_err(be->dev, "ASoC: BE open failed %d\n", err);
be->dpcm[stream].users--;
if (be->dpcm[stream].users < 0)
- dev_err(be->dev, "no users %s at unwind %d\n",
+ dev_err(be->dev, "ASoC: no users %s at unwind %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
@@ -1076,7 +1079,7 @@ unwind:
continue;
if (be->dpcm[stream].users == 0)
- dev_err(be->dev, "no users %s at close %d\n",
+ dev_err(be->dev, "ASoC: no users %s at close %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
@@ -1128,16 +1131,16 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
ret = dpcm_be_dai_startup(fe, fe_substream->stream);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: failed to start some BEs %d\n", ret);
+ dev_err(fe->dev,"ASoC: failed to start some BEs %d\n", ret);
goto be_err;
}
- dev_dbg(fe->dev, "dpcm: open FE %s\n", fe->dai_link->name);
+ dev_dbg(fe->dev, "ASoC: open FE %s\n", fe->dai_link->name);
/* start the DAI frontend */
ret = soc_pcm_open(fe_substream);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: failed to start FE %d\n", ret);
+ dev_err(fe->dev,"ASoC: failed to start FE %d\n", ret);
goto unwind;
}
@@ -1172,7 +1175,7 @@ static int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
continue;
if (be->dpcm[stream].users == 0)
- dev_err(be->dev, "no users %s at close - state %d\n",
+ dev_err(be->dev, "ASoC: no users %s at close - state %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
@@ -1183,7 +1186,7 @@ static int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN))
continue;
- dev_dbg(be->dev, "dpcm: close BE %s\n",
+ dev_dbg(be->dev, "ASoC: close BE %s\n",
dpcm->fe->dai_link->name);
soc_pcm_close(be_substream);
@@ -1204,7 +1207,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
/* shutdown the BEs */
dpcm_be_dai_shutdown(fe, substream->stream);
- dev_dbg(fe->dev, "dpcm: close FE %s\n", fe->dai_link->name);
+ dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
/* now shutdown the frontend */
soc_pcm_close(substream);
@@ -1243,7 +1246,7 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
continue;
- dev_dbg(be->dev, "dpcm: hw_free BE %s\n",
+ dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
dpcm->fe->dai_link->name);
soc_pcm_hw_free(be_substream);
@@ -1262,12 +1265,12 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
- dev_dbg(fe->dev, "dpcm: hw_free FE %s\n", fe->dai_link->name);
+ dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
/* call hw_free on the frontend */
err = soc_pcm_hw_free(substream);
if (err < 0)
- dev_err(fe->dev,"dpcm: hw_free FE %s failed\n",
+ dev_err(fe->dev,"ASoC: hw_free FE %s failed\n",
fe->dai_link->name);
/* only hw_params backends that are either sinks or sources
@@ -1305,7 +1308,7 @@ static int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE))
continue;
- dev_dbg(be->dev, "dpcm: hw_params BE %s\n",
+ dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
dpcm->fe->dai_link->name);
/* copy params for each dpcm */
@@ -1318,7 +1321,7 @@ static int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
&dpcm->hw_params);
if (ret < 0) {
dev_err(be->dev,
- "dpcm: hw_params BE fixup failed %d\n",
+ "ASoC: hw_params BE fixup failed %d\n",
ret);
goto unwind;
}
@@ -1327,7 +1330,7 @@ static int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
ret = soc_pcm_hw_params(be_substream, &dpcm->hw_params);
if (ret < 0) {
dev_err(dpcm->be->dev,
- "dpcm: hw_params BE failed %d\n", ret);
+ "ASoC: hw_params BE failed %d\n", ret);
goto unwind;
}
@@ -1374,18 +1377,18 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
sizeof(struct snd_pcm_hw_params));
ret = dpcm_be_dai_hw_params(fe, substream->stream);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: hw_params BE failed %d\n", ret);
+ dev_err(fe->dev,"ASoC: hw_params BE failed %d\n", ret);
goto out;
}
- dev_dbg(fe->dev, "dpcm: hw_params FE %s rate %d chan %x fmt %d\n",
+ dev_dbg(fe->dev, "ASoC: hw_params FE %s rate %d chan %x fmt %d\n",
fe->dai_link->name, params_rate(params),
params_channels(params), params_format(params));
/* call hw_params on the frontend */
ret = soc_pcm_hw_params(substream, params);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: hw_params FE failed %d\n", ret);
+ dev_err(fe->dev,"ASoC: hw_params FE failed %d\n", ret);
dpcm_be_dai_hw_free(fe, stream);
} else
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
@@ -1401,12 +1404,12 @@ static int dpcm_do_trigger(struct snd_soc_dpcm *dpcm,
{
int ret;
- dev_dbg(dpcm->be->dev, "dpcm: trigger BE %s cmd %d\n",
+ dev_dbg(dpcm->be->dev, "ASoC: trigger BE %s cmd %d\n",
dpcm->fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
if (ret < 0)
- dev_err(dpcm->be->dev,"dpcm: trigger BE failed %d\n", ret);
+ dev_err(dpcm->be->dev,"ASoC: trigger BE failed %d\n", ret);
return ret;
}
@@ -1517,12 +1520,12 @@ static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
case SND_SOC_DPCM_TRIGGER_PRE:
/* call trigger on the frontend before the backend. */
- dev_dbg(fe->dev, "dpcm: pre trigger FE %s cmd %d\n",
+ dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: trigger FE failed %d\n", ret);
+ dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
goto out;
}
@@ -1533,11 +1536,11 @@ static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: trigger FE failed %d\n", ret);
+ dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
goto out;
}
- dev_dbg(fe->dev, "dpcm: post trigger FE %s cmd %d\n",
+ dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
@@ -1545,17 +1548,17 @@ static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
case SND_SOC_DPCM_TRIGGER_BESPOKE:
/* bespoke trigger() - handles both FE and BEs */
- dev_dbg(fe->dev, "dpcm: bespoke trigger FE %s cmd %d\n",
+ dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_bespoke_trigger(substream, cmd);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: trigger FE failed %d\n", ret);
+ dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
goto out;
}
break;
default:
- dev_err(fe->dev, "dpcm: invalid trigger cmd %d for %s\n", cmd,
+ dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
fe->dai_link->name);
ret = -EINVAL;
goto out;
@@ -1598,12 +1601,12 @@ static int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
continue;
- dev_dbg(be->dev, "dpcm: prepare BE %s\n",
+ dev_dbg(be->dev, "ASoC: prepare BE %s\n",
dpcm->fe->dai_link->name);
ret = soc_pcm_prepare(be_substream);
if (ret < 0) {
- dev_err(be->dev, "dpcm: backend prepare failed %d\n",
+ dev_err(be->dev, "ASoC: backend prepare failed %d\n",
ret);
break;
}
@@ -1620,13 +1623,13 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
- dev_dbg(fe->dev, "dpcm: prepare FE %s\n", fe->dai_link->name);
+ dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
/* there is no point preparing this FE if there are no BEs */
if (list_empty(&fe->dpcm[stream].be_clients)) {
- dev_err(fe->dev, "dpcm: no backend DAIs enabled for %s\n",
+ dev_err(fe->dev, "ASoC: no backend DAIs enabled for %s\n",
fe->dai_link->name);
ret = -EINVAL;
goto out;
@@ -1639,7 +1642,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
/* call prepare on the frontend */
ret = soc_pcm_prepare(substream);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: prepare FE %s failed\n",
+ dev_err(fe->dev,"ASoC: prepare FE %s failed\n",
fe->dai_link->name);
goto out;
}
@@ -1673,33 +1676,33 @@ static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
int err;
- dev_dbg(fe->dev, "runtime %s close on FE %s\n",
+ dev_dbg(fe->dev, "ASoC: runtime %s close on FE %s\n",
stream ? "capture" : "playback", fe->dai_link->name);
if (trigger == SND_SOC_DPCM_TRIGGER_BESPOKE) {
/* call bespoke trigger - FE takes care of all BE triggers */
- dev_dbg(fe->dev, "dpcm: bespoke trigger FE %s cmd stop\n",
+ dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd stop\n",
fe->dai_link->name);
err = soc_pcm_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_STOP);
if (err < 0)
- dev_err(fe->dev,"dpcm: trigger FE failed %d\n", err);
+ dev_err(fe->dev,"ASoC: trigger FE failed %d\n", err);
} else {
- dev_dbg(fe->dev, "dpcm: trigger FE %s cmd stop\n",
+ dev_dbg(fe->dev, "ASoC: trigger FE %s cmd stop\n",
fe->dai_link->name);
err = dpcm_be_dai_trigger(fe, stream, SNDRV_PCM_TRIGGER_STOP);
if (err < 0)
- dev_err(fe->dev,"dpcm: trigger FE failed %d\n", err);
+ dev_err(fe->dev,"ASoC: trigger FE failed %d\n", err);
}
err = dpcm_be_dai_hw_free(fe, stream);
if (err < 0)
- dev_err(fe->dev,"dpcm: hw_free FE failed %d\n", err);
+ dev_err(fe->dev,"ASoC: hw_free FE failed %d\n", err);
err = dpcm_be_dai_shutdown(fe, stream);
if (err < 0)
- dev_err(fe->dev,"dpcm: shutdown FE failed %d\n", err);
+ dev_err(fe->dev,"ASoC: shutdown FE failed %d\n", err);
/* run the stream event for each BE */
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_NOP);
@@ -1715,7 +1718,7 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
int ret;
- dev_dbg(fe->dev, "runtime %s open on FE %s\n",
+ dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n",
stream ? "capture" : "playback", fe->dai_link->name);
/* Only start the BE if the FE is ready */
@@ -1761,22 +1764,22 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
if (trigger == SND_SOC_DPCM_TRIGGER_BESPOKE) {
/* call trigger on the frontend - FE takes care of all BE triggers */
- dev_dbg(fe->dev, "dpcm: bespoke trigger FE %s cmd start\n",
+ dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd start\n",
fe->dai_link->name);
ret = soc_pcm_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_START);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: bespoke trigger FE failed %d\n", ret);
+ dev_err(fe->dev,"ASoC: bespoke trigger FE failed %d\n", ret);
goto hw_free;
}
} else {
- dev_dbg(fe->dev, "dpcm: trigger FE %s cmd start\n",
+ dev_dbg(fe->dev, "ASoC: trigger FE %s cmd start\n",
fe->dai_link->name);
ret = dpcm_be_dai_trigger(fe, stream,
SNDRV_PCM_TRIGGER_START);
if (ret < 0) {
- dev_err(fe->dev,"dpcm: trigger FE failed %d\n", ret);
+ dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
goto hw_free;
}
}
@@ -1805,7 +1808,7 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream)
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
ret = dpcm_run_update_startup(fe, stream);
if (ret < 0)
- dev_err(fe->dev, "failed to startup some BEs\n");
+ dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
return ret;
@@ -1818,7 +1821,7 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
ret = dpcm_run_update_shutdown(fe, stream);
if (ret < 0)
- dev_err(fe->dev, "failed to shutdown some BEs\n");
+ dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
return ret;
@@ -1853,7 +1856,7 @@ int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *widget)
continue;
/* DAPM sync will call this to update DSP paths */
- dev_dbg(fe->dev, "DPCM runtime update for FE %s\n",
+ dev_dbg(fe->dev, "ASoC: DPCM runtime update for FE %s\n",
fe->dai_link->name);
/* skip if FE doesn't have playback capability */
@@ -1862,7 +1865,7 @@ int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *widget)
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
if (paths < 0) {
- dev_warn(fe->dev, "%s no valid %s path\n",
+ dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
fe->dai_link->name, "playback");
mutex_unlock(&card->mutex);
return paths;
@@ -1891,7 +1894,7 @@ capture:
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
if (paths < 0) {
- dev_warn(fe->dev, "%s no valid %s path\n",
+ dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
fe->dai_link->name, "capture");
mutex_unlock(&card->mutex);
return paths;
@@ -1934,7 +1937,7 @@ int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
if (be->dai_link->ignore_suspend)
continue;
- dev_dbg(be->dev, "BE digital mute %s\n", be->dai_link->name);
+ dev_dbg(be->dev, "ASoC: BE digital mute %s\n", be->dai_link->name);
if (drv->ops->digital_mute && dai->playback_active)
drv->ops->digital_mute(dai, mute);
@@ -1955,7 +1958,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
fe->dpcm[stream].runtime = fe_substream->runtime;
if (dpcm_path_get(fe, stream, &list) <= 0) {
- dev_dbg(fe->dev, "asoc: %s no valid %s route\n",
+ dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
fe->dai_link->name, stream ? "capture" : "playback");
}
@@ -2039,11 +2042,11 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
capture, &pcm);
}
if (ret < 0) {
- dev_err(rtd->card->dev, "can't create pcm for %s\n",
+ dev_err(rtd->card->dev, "ASoC: can't create pcm for %s\n",
rtd->dai_link->name);
return ret;
}
- dev_dbg(rtd->card->dev, "registered pcm #%d %s\n",num, new_name);
+ dev_dbg(rtd->card->dev, "ASoC: registered pcm #%d %s\n",num, new_name);
/* DAPM dai link stream work */
INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
@@ -2097,7 +2100,9 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
if (platform->driver->pcm_new) {
ret = platform->driver->pcm_new(rtd);
if (ret < 0) {
- dev_err(platform->dev, "pcm constructor failed\n");
+ dev_err(platform->dev,
+ "ASoC: pcm constructor failed: %d\n",
+ ret);
return ret;
}
}
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 60053709e41..fe4541df498 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -94,7 +94,7 @@ static struct snd_soc_dai_driver dummy_dai = {
.name = "snd-soc-dummy-dai",
};
-static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
+static int snd_soc_dummy_probe(struct platform_device *pdev)
{
int ret;
@@ -111,7 +111,7 @@ static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
return ret;
}
-static __devexit int snd_soc_dummy_remove(struct platform_device *pdev)
+static int snd_soc_dummy_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
snd_soc_unregister_codec(&pdev->dev);
@@ -125,7 +125,7 @@ static struct platform_driver soc_dummy_driver = {
.owner = THIS_MODULE,
},
.probe = snd_soc_dummy_probe,
- .remove = __devexit_p(snd_soc_dummy_remove),
+ .remove = snd_soc_dummy_remove,
};
static struct platform_device *soc_dummy_dev;
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
index 8c7f2372944..9b76cc5a114 100644
--- a/sound/soc/spear/spear_pcm.c
+++ b/sound/soc/spear/spear_pcm.c
@@ -184,12 +184,12 @@ struct snd_soc_platform_driver spear_soc_platform = {
.pcm_free = spear_pcm_free,
};
-static int __devinit spear_soc_platform_probe(struct platform_device *pdev)
+static int spear_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &spear_soc_platform);
}
-static int __devexit spear_soc_platform_remove(struct platform_device *pdev)
+static int spear_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
@@ -203,7 +203,7 @@ static struct platform_driver spear_pcm_driver = {
},
.probe = spear_soc_platform_probe,
- .remove = __devexit_p(spear_soc_platform_remove),
+ .remove = spear_soc_platform_remove,
};
module_platform_driver(spear_pcm_driver);
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
index bf99296bce9..65431848387 100644
--- a/sound/soc/tegra/tegra20_das.c
+++ b/sound/soc/tegra/tegra20_das.c
@@ -131,7 +131,7 @@ static const struct regmap_config tegra20_das_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit tegra20_das_probe(struct platform_device *pdev)
+static int tegra20_das_probe(struct platform_device *pdev)
{
struct resource *res, *region;
void __iomem *regs;
@@ -200,7 +200,7 @@ err:
return ret;
}
-static int __devexit tegra20_das_remove(struct platform_device *pdev)
+static int tegra20_das_remove(struct platform_device *pdev)
{
if (!das)
return -ENODEV;
@@ -210,14 +210,14 @@ static int __devexit tegra20_das_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra20_das_of_match[] __devinitconst = {
+static const struct of_device_id tegra20_das_of_match[] = {
{ .compatible = "nvidia,tegra20-das", },
{},
};
static struct platform_driver tegra20_das_driver = {
.probe = tegra20_das_probe,
- .remove = __devexit_p(tegra20_das_remove),
+ .remove = tegra20_das_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 0832e8afd73..caa772de5a1 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -331,7 +331,7 @@ static const struct regmap_config tegra20_i2s_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static __devinit int tegra20_i2s_platform_probe(struct platform_device *pdev)
+static int tegra20_i2s_platform_probe(struct platform_device *pdev)
{
struct tegra20_i2s *i2s;
struct resource *mem, *memregion, *dmareq;
@@ -447,7 +447,7 @@ err:
return ret;
}
-static int __devexit tegra20_i2s_platform_remove(struct platform_device *pdev)
+static int tegra20_i2s_platform_remove(struct platform_device *pdev)
{
struct tegra20_i2s *i2s = dev_get_drvdata(&pdev->dev);
@@ -463,12 +463,12 @@ static int __devexit tegra20_i2s_platform_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra20_i2s_of_match[] __devinitconst = {
+static const struct of_device_id tegra20_i2s_of_match[] = {
{ .compatible = "nvidia,tegra20-i2s", },
{},
};
-static const struct dev_pm_ops tegra20_i2s_pm_ops __devinitconst = {
+static const struct dev_pm_ops tegra20_i2s_pm_ops = {
SET_RUNTIME_PM_OPS(tegra20_i2s_runtime_suspend,
tegra20_i2s_runtime_resume, NULL)
};
@@ -481,7 +481,7 @@ static struct platform_driver tegra20_i2s_driver = {
.pm = &tegra20_i2s_pm_ops,
},
.probe = tegra20_i2s_platform_probe,
- .remove = __devexit_p(tegra20_i2s_platform_remove),
+ .remove = tegra20_i2s_platform_remove,
};
module_platform_driver(tegra20_i2s_driver);
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 3ebc8670ba0..04771d14d34 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -257,7 +257,7 @@ static const struct regmap_config tegra20_spdif_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static __devinit int tegra20_spdif_platform_probe(struct platform_device *pdev)
+static int tegra20_spdif_platform_probe(struct platform_device *pdev)
{
struct tegra20_spdif *spdif;
struct resource *mem, *memregion, *dmareq;
@@ -357,7 +357,7 @@ err:
return ret;
}
-static int __devexit tegra20_spdif_platform_remove(struct platform_device *pdev)
+static int tegra20_spdif_platform_remove(struct platform_device *pdev)
{
struct tegra20_spdif *spdif = dev_get_drvdata(&pdev->dev);
@@ -373,7 +373,7 @@ static int __devexit tegra20_spdif_platform_remove(struct platform_device *pdev)
return 0;
}
-static const struct dev_pm_ops tegra20_spdif_pm_ops __devinitconst = {
+static const struct dev_pm_ops tegra20_spdif_pm_ops = {
SET_RUNTIME_PM_OPS(tegra20_spdif_runtime_suspend,
tegra20_spdif_runtime_resume, NULL)
};
@@ -385,7 +385,7 @@ static struct platform_driver tegra20_spdif_driver = {
.pm = &tegra20_spdif_pm_ops,
},
.probe = tegra20_spdif_platform_probe,
- .remove = __devexit_p(tegra20_spdif_platform_remove),
+ .remove = tegra20_spdif_platform_remove,
};
module_platform_driver(tegra20_spdif_driver);
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index 64b67a30919..f354dc390a0 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -287,7 +287,7 @@ int tegra30_ahub_unset_rx_cif_source(enum tegra30_ahub_rxcif rxcif)
}
EXPORT_SYMBOL_GPL(tegra30_ahub_unset_rx_cif_source);
-static const char * const configlink_clocks[] __devinitconst = {
+static const char * const configlink_clocks[] = {
"i2s0",
"i2s1",
"i2s2",
@@ -299,7 +299,7 @@ static const char * const configlink_clocks[] __devinitconst = {
"spdif_in",
};
-struct of_dev_auxdata ahub_auxdata[] __devinitdata = {
+struct of_dev_auxdata ahub_auxdata[] = {
OF_DEV_AUXDATA("nvidia,tegra30-i2s", 0x70080300, "tegra30-i2s.0", NULL),
OF_DEV_AUXDATA("nvidia,tegra30-i2s", 0x70080400, "tegra30-i2s.1", NULL),
OF_DEV_AUXDATA("nvidia,tegra30-i2s", 0x70080500, "tegra30-i2s.2", NULL),
@@ -433,7 +433,7 @@ static const struct regmap_config tegra30_ahub_ahub_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit tegra30_ahub_probe(struct platform_device *pdev)
+static int tegra30_ahub_probe(struct platform_device *pdev)
{
struct clk *clk;
int i;
@@ -585,7 +585,7 @@ err:
return ret;
}
-static int __devexit tegra30_ahub_remove(struct platform_device *pdev)
+static int tegra30_ahub_remove(struct platform_device *pdev)
{
if (!ahub)
return -ENODEV;
@@ -602,19 +602,19 @@ static int __devexit tegra30_ahub_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra30_ahub_of_match[] __devinitconst = {
+static const struct of_device_id tegra30_ahub_of_match[] = {
{ .compatible = "nvidia,tegra30-ahub", },
{},
};
-static const struct dev_pm_ops tegra30_ahub_pm_ops __devinitconst = {
+static const struct dev_pm_ops tegra30_ahub_pm_ops = {
SET_RUNTIME_PM_OPS(tegra30_ahub_runtime_suspend,
tegra30_ahub_runtime_resume, NULL)
};
static struct platform_driver tegra30_ahub_driver = {
.probe = tegra30_ahub_probe,
- .remove = __devexit_p(tegra30_ahub_remove),
+ .remove = tegra30_ahub_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 44184228d1f..27e91dd0b91 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -391,7 +391,7 @@ static const struct regmap_config tegra30_i2s_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static __devinit int tegra30_i2s_platform_probe(struct platform_device *pdev)
+static int tegra30_i2s_platform_probe(struct platform_device *pdev)
{
struct tegra30_i2s *i2s;
u32 cif_ids[2];
@@ -492,7 +492,7 @@ err:
return ret;
}
-static int __devexit tegra30_i2s_platform_remove(struct platform_device *pdev)
+static int tegra30_i2s_platform_remove(struct platform_device *pdev)
{
struct tegra30_i2s *i2s = dev_get_drvdata(&pdev->dev);
@@ -508,12 +508,12 @@ static int __devexit tegra30_i2s_platform_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra30_i2s_of_match[] __devinitconst = {
+static const struct of_device_id tegra30_i2s_of_match[] = {
{ .compatible = "nvidia,tegra30-i2s", },
{},
};
-static const struct dev_pm_ops tegra30_i2s_pm_ops __devinitconst = {
+static const struct dev_pm_ops tegra30_i2s_pm_ops = {
SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
tegra30_i2s_runtime_resume, NULL)
};
@@ -526,7 +526,7 @@ static struct platform_driver tegra30_i2s_driver = {
.pm = &tegra30_i2s_pm_ops,
},
.probe = tegra30_i2s_platform_probe,
- .remove = __devexit_p(tegra30_i2s_platform_remove),
+ .remove = tegra30_i2s_platform_remove,
};
module_platform_driver(tegra30_i2s_driver);
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index 76cb1b363b7..c80adb9da47 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -150,7 +150,7 @@ static struct snd_soc_card snd_soc_tegra_alc5632 = {
.fully_routed = true,
};
-static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
+static int tegra_alc5632_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct snd_soc_card *card = &snd_soc_tegra_alc5632;
@@ -227,7 +227,7 @@ err:
return ret;
}
-static int __devexit tegra_alc5632_remove(struct platform_device *pdev)
+static int tegra_alc5632_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(card);
@@ -242,7 +242,7 @@ static int __devexit tegra_alc5632_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra_alc5632_of_match[] __devinitconst = {
+static const struct of_device_id tegra_alc5632_of_match[] = {
{ .compatible = "nvidia,tegra-audio-alc5632", },
{},
};
@@ -255,7 +255,7 @@ static struct platform_driver tegra_alc5632_driver = {
.of_match_table = tegra_alc5632_of_match,
},
.probe = tegra_alc5632_probe,
- .remove = __devexit_p(tegra_alc5632_remove),
+ .remove = tegra_alc5632_remove,
};
module_platform_driver(tegra_alc5632_driver);
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index e18733963cb..c925ab0adeb 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -253,13 +253,13 @@ static struct snd_soc_platform_driver tegra_pcm_platform = {
.pcm_free = tegra_pcm_free,
};
-int __devinit tegra_pcm_platform_register(struct device *dev)
+int tegra_pcm_platform_register(struct device *dev)
{
return snd_soc_register_platform(dev, &tegra_pcm_platform);
}
EXPORT_SYMBOL_GPL(tegra_pcm_platform_register);
-void __devexit tegra_pcm_platform_unregister(struct device *dev)
+void tegra_pcm_platform_unregister(struct device *dev)
{
snd_soc_unregister_platform(dev);
}
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index ea9166d5c4e..c8ef88a67c5 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -122,7 +122,7 @@ static struct snd_soc_card snd_soc_tegra_wm8753 = {
.fully_routed = true,
};
-static __devinit int tegra_wm8753_driver_probe(struct platform_device *pdev)
+static int tegra_wm8753_driver_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_tegra_wm8753;
struct tegra_wm8753 *machine;
@@ -188,7 +188,7 @@ err:
return ret;
}
-static int __devexit tegra_wm8753_driver_remove(struct platform_device *pdev)
+static int tegra_wm8753_driver_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
@@ -200,7 +200,7 @@ static int __devexit tegra_wm8753_driver_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra_wm8753_of_match[] __devinitconst = {
+static const struct of_device_id tegra_wm8753_of_match[] = {
{ .compatible = "nvidia,tegra-audio-wm8753", },
{},
};
@@ -213,7 +213,7 @@ static struct platform_driver tegra_wm8753_driver = {
.of_match_table = tegra_wm8753_of_match,
},
.probe = tegra_wm8753_driver_probe,
- .remove = __devexit_p(tegra_wm8753_driver_remove),
+ .remove = tegra_wm8753_driver_remove,
};
module_platform_driver(tegra_wm8753_driver);
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index cee13b7bfb9..bbd79bf5630 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -252,7 +252,7 @@ static struct snd_soc_card snd_soc_tegra_wm8903 = {
.fully_routed = true,
};
-static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
+static int tegra_wm8903_driver_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct snd_soc_card *card = &snd_soc_tegra_wm8903;
@@ -402,7 +402,7 @@ err:
return ret;
}
-static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev)
+static int tegra_wm8903_driver_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
@@ -417,7 +417,7 @@ static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra_wm8903_of_match[] __devinitconst = {
+static const struct of_device_id tegra_wm8903_of_match[] = {
{ .compatible = "nvidia,tegra-audio-wm8903", },
{},
};
@@ -430,7 +430,7 @@ static struct platform_driver tegra_wm8903_driver = {
.of_match_table = tegra_wm8903_of_match,
},
.probe = tegra_wm8903_driver_probe,
- .remove = __devexit_p(tegra_wm8903_driver_remove),
+ .remove = tegra_wm8903_driver_remove,
};
module_platform_driver(tegra_wm8903_driver);
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index e69a4f7000d..7fcf6c2297d 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -120,7 +120,7 @@ static struct snd_soc_card snd_soc_trimslice = {
.fully_routed = true,
};
-static __devinit int tegra_snd_trimslice_probe(struct platform_device *pdev)
+static int tegra_snd_trimslice_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_trimslice;
struct tegra_trimslice *trimslice;
@@ -183,7 +183,7 @@ err:
return ret;
}
-static int __devexit tegra_snd_trimslice_remove(struct platform_device *pdev)
+static int tegra_snd_trimslice_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct tegra_trimslice *trimslice = snd_soc_card_get_drvdata(card);
@@ -195,7 +195,7 @@ static int __devexit tegra_snd_trimslice_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id trimslice_of_match[] __devinitconst = {
+static const struct of_device_id trimslice_of_match[] = {
{ .compatible = "nvidia,tegra-audio-trimslice", },
{},
};
@@ -208,7 +208,7 @@ static struct platform_driver tegra_snd_trimslice_driver = {
.of_match_table = trimslice_of_match,
},
.probe = tegra_snd_trimslice_probe,
- .remove = __devexit_p(tegra_snd_trimslice_remove),
+ .remove = tegra_snd_trimslice_remove,
};
module_platform_driver(tegra_snd_trimslice_driver);
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index 28db4ca997c..16ab69635e2 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -170,7 +170,7 @@ static struct snd_soc_dai_driver txx9aclc_ac97_dai = {
},
};
-static int __devinit txx9aclc_ac97_dev_probe(struct platform_device *pdev)
+static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
{
struct txx9aclc_plat_drvdata *drvdata;
struct resource *r;
@@ -208,7 +208,7 @@ static int __devinit txx9aclc_ac97_dev_probe(struct platform_device *pdev)
return snd_soc_register_dai(&pdev->dev, &txx9aclc_ac97_dai);
}
-static int __devexit txx9aclc_ac97_dev_remove(struct platform_device *pdev)
+static int txx9aclc_ac97_dev_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
@@ -216,7 +216,7 @@ static int __devexit txx9aclc_ac97_dev_remove(struct platform_device *pdev)
static struct platform_driver txx9aclc_ac97_driver = {
.probe = txx9aclc_ac97_dev_probe,
- .remove = __devexit_p(txx9aclc_ac97_dev_remove),
+ .remove = txx9aclc_ac97_dev_remove,
.driver = {
.name = "txx9aclc-ac97",
.owner = THIS_MODULE,
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index b609d2c64c5..45a6428cba8 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -417,12 +417,12 @@ static struct snd_soc_platform_driver txx9aclc_soc_platform = {
.pcm_free = txx9aclc_pcm_free_dma_buffers,
};
-static int __devinit txx9aclc_soc_platform_probe(struct platform_device *pdev)
+static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &txx9aclc_soc_platform);
}
-static int __devexit txx9aclc_soc_platform_remove(struct platform_device *pdev)
+static int txx9aclc_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
@@ -435,7 +435,7 @@ static struct platform_driver txx9aclc_pcm_driver = {
},
.probe = txx9aclc_soc_platform_probe,
- .remove = __devexit_p(txx9aclc_soc_platform_remove),
+ .remove = txx9aclc_soc_platform_remove,
};
module_platform_driver(txx9aclc_pcm_driver);
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index 54f7e25b6f7..ae699073878 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -33,7 +33,7 @@ struct snd_soc_dai_link mop500_dai_links[] = {
.stream_name = "ab8500_0",
.cpu_dai_name = "ux500-msp-i2s.1",
.codec_dai_name = "ab8500-codec-dai.0",
- .platform_name = "ux500-pcm.0",
+ .platform_name = "ux500-msp-i2s.1",
.codec_name = "ab8500-codec.0",
.init = mop500_ab8500_machine_init,
.ops = mop500_ab8500_ops,
@@ -43,7 +43,7 @@ struct snd_soc_dai_link mop500_dai_links[] = {
.stream_name = "ab8500_1",
.cpu_dai_name = "ux500-msp-i2s.3",
.codec_dai_name = "ab8500-codec-dai.1",
- .platform_name = "ux500-pcm.0",
+ .platform_name = "ux500-msp-i2s.3",
.codec_name = "ab8500-codec.0",
.init = NULL,
.ops = mop500_ab8500_ops,
@@ -71,8 +71,8 @@ static void mop500_of_node_put(void)
}
}
-static int __devinit mop500_of_probe(struct platform_device *pdev,
- struct device_node *np)
+static int mop500_of_probe(struct platform_device *pdev,
+ struct device_node *np)
{
struct device_node *codec_np, *msp_np[2];
int i;
@@ -99,7 +99,7 @@ static int __devinit mop500_of_probe(struct platform_device *pdev,
return 0;
}
-static int __devinit mop500_probe(struct platform_device *pdev)
+static int mop500_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int ret;
@@ -136,7 +136,7 @@ static int __devinit mop500_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit mop500_remove(struct platform_device *pdev)
+static int mop500_remove(struct platform_device *pdev)
{
struct snd_soc_card *mop500_card = platform_get_drvdata(pdev);
@@ -161,7 +161,7 @@ static struct platform_driver snd_soc_mop500_driver = {
.of_match_table = snd_soc_mop500_match,
},
.probe = mop500_probe,
- .remove = __devexit_p(mop500_remove),
+ .remove = mop500_remove,
};
module_platform_driver(snd_soc_mop500_driver);
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index be94bf9bf94..94a3e5705aa 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -28,6 +28,7 @@
#include "ux500_msp_i2s.h"
#include "ux500_msp_dai.h"
+#include "ux500_pcm.h"
static int setup_pcm_multichan(struct snd_soc_dai *dai,
struct ux500_msp_config *msp_config)
@@ -398,11 +399,28 @@ static int ux500_msp_dai_startup(struct snd_pcm_substream *substream,
return ret;
}
- /* Enable clock */
- dev_dbg(dai->dev, "%s: Enabling MSP-clock.\n", __func__);
- clk_enable(drvdata->clk);
+ /* Prepare and enable clocks */
+ dev_dbg(dai->dev, "%s: Enabling MSP-clocks.\n", __func__);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret) {
+ dev_err(drvdata->msp->dev,
+ "%s: Failed to prepare/enable pclk!\n", __func__);
+ goto err_pclk;
+ }
- return 0;
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret) {
+ dev_err(drvdata->msp->dev,
+ "%s: Failed to prepare/enable clk!\n", __func__);
+ goto err_clk;
+ }
+
+ return ret;
+err_clk:
+ clk_disable_unprepare(drvdata->pclk);
+err_pclk:
+ regulator_disable(drvdata->reg_vape);
+ return ret;
}
static void ux500_msp_dai_shutdown(struct snd_pcm_substream *substream,
@@ -428,8 +446,9 @@ static void ux500_msp_dai_shutdown(struct snd_pcm_substream *substream,
__func__, dai->id, snd_pcm_stream_str(substream));
}
- /* Disable clock */
- clk_disable(drvdata->clk);
+ /* Disable and unprepare clocks */
+ clk_disable_unprepare(drvdata->clk);
+ clk_disable_unprepare(drvdata->pclk);
/* Disable regulator */
ret = regulator_disable(drvdata->reg_vape);
@@ -749,7 +768,7 @@ static struct snd_soc_dai_driver ux500_msp_dai_drv[UX500_NBR_OF_DAI] = {
},
};
-static int __devinit ux500_msp_drv_probe(struct platform_device *pdev)
+static int ux500_msp_drv_probe(struct platform_device *pdev)
{
struct ux500_msp_i2s_drvdata *drvdata;
int ret = 0;
@@ -780,6 +799,14 @@ static int __devinit ux500_msp_drv_probe(struct platform_device *pdev)
}
prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)pdev->name, 50);
+ drvdata->pclk = clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(drvdata->pclk)) {
+ ret = (int)PTR_ERR(drvdata->pclk);
+ dev_err(&pdev->dev, "%s: ERROR: clk_get of pclk failed (%d)!\n",
+ __func__, ret);
+ goto err_pclk;
+ }
+
drvdata->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(drvdata->clk)) {
ret = (int)PTR_ERR(drvdata->clk);
@@ -806,27 +833,41 @@ static int __devinit ux500_msp_drv_probe(struct platform_device *pdev)
goto err_init_msp;
}
+ ret = ux500_pcm_register_platform(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Error: %s: Failed to register PCM platform device!\n",
+ __func__);
+ goto err_reg_plat;
+ }
+
return 0;
+err_reg_plat:
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(ux500_msp_dai_drv));
err_init_msp:
clk_put(drvdata->clk);
-
err_clk:
+ clk_put(drvdata->pclk);
+err_pclk:
devm_regulator_put(drvdata->reg_vape);
return ret;
}
-static int __devexit ux500_msp_drv_remove(struct platform_device *pdev)
+static int ux500_msp_drv_remove(struct platform_device *pdev)
{
struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+ ux500_pcm_unregister_platform(pdev);
+
snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(ux500_msp_dai_drv));
devm_regulator_put(drvdata->reg_vape);
prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s");
clk_put(drvdata->clk);
+ clk_put(drvdata->pclk);
ux500_msp_i2s_cleanup_msp(pdev, drvdata->msp);
diff --git a/sound/soc/ux500/ux500_msp_dai.h b/sound/soc/ux500/ux500_msp_dai.h
index 98202a34a5d..9c778d9c383 100644
--- a/sound/soc/ux500/ux500_msp_dai.h
+++ b/sound/soc/ux500/ux500_msp_dai.h
@@ -69,6 +69,7 @@ struct ux500_msp_i2s_drvdata {
/* Clocks */
unsigned int master_clk;
struct clk *clk;
+ struct clk *pclk;
/* Regulators */
int vape_opp_constraint;
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
index 1a04e248453..846fa82a58d 100644
--- a/sound/soc/ux500/ux500_pcm.c
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -18,8 +18,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/slab.h>
-
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -282,7 +281,7 @@ static struct snd_soc_platform_driver ux500_pcm_soc_drv = {
.pcm_new = ux500_pcm_new,
};
-static int __devexit ux500_pcm_drv_probe(struct platform_device *pdev)
+int ux500_pcm_register_platform(struct platform_device *pdev)
{
int ret;
@@ -296,23 +295,12 @@ static int __devexit ux500_pcm_drv_probe(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(ux500_pcm_register_platform);
-static int __devinit ux500_pcm_drv_remove(struct platform_device *pdev)
+int ux500_pcm_unregister_platform(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
-
-static struct platform_driver ux500_pcm_driver = {
- .driver = {
- .name = "ux500-pcm",
- .owner = THIS_MODULE,
- },
-
- .probe = ux500_pcm_drv_probe,
- .remove = __devexit_p(ux500_pcm_drv_remove),
-};
-module_platform_driver(ux500_pcm_driver);
-
-MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
diff --git a/sound/soc/ux500/ux500_pcm.h b/sound/soc/ux500/ux500_pcm.h
index 77ed44d371e..76d344476af 100644
--- a/sound/soc/ux500/ux500_pcm.h
+++ b/sound/soc/ux500/ux500_pcm.h
@@ -32,4 +32,7 @@
#define UX500_PLATFORM_PERIODS_MAX 48
#define UX500_PLATFORM_BUFFER_BYTES_MAX (2048 * PAGE_SIZE)
+int ux500_pcm_register_platform(struct platform_device *pdev);
+int ux500_pcm_unregister_platform(struct platform_device *pdev);
+
#endif
diff --git a/sound/sound_core.c b/sound/sound_core.c
index fb9255cca21..bb23009edc8 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -146,8 +146,7 @@ extern int msnd_pinnacle_init(void);
* devices only the standard chrdev aliases are requested.
*
* All these clutters are scheduled to be removed along with
- * sound-slot/service-* module aliases. Please take a look at
- * feature-removal-schedule.txt for details.
+ * sound-slot/service-* module aliases.
*/
#ifdef CONFIG_SOUND_OSS_CORE_PRECLAIM
static int preclaim_oss = 1;
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 5701787c0e6..174d21fb56e 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -755,7 +755,7 @@ static struct snd_pcm_ops snd_amd7930_capture_ops = {
.pointer = snd_amd7930_capture_pointer,
};
-static int __devinit snd_amd7930_pcm(struct snd_amd7930 *amd)
+static int snd_amd7930_pcm(struct snd_amd7930 *amd)
{
struct snd_pcm *pcm;
int err;
@@ -854,7 +854,7 @@ static int snd_amd7930_put_volume(struct snd_kcontrol *kctl, struct snd_ctl_elem
return change;
}
-static struct snd_kcontrol_new amd7930_controls[] __devinitdata = {
+static struct snd_kcontrol_new amd7930_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Monitor Volume",
@@ -884,7 +884,7 @@ static struct snd_kcontrol_new amd7930_controls[] __devinitdata = {
},
};
-static int __devinit snd_amd7930_mixer(struct snd_amd7930 *amd)
+static int snd_amd7930_mixer(struct snd_amd7930 *amd)
{
struct snd_card *card;
int idx, err;
@@ -933,10 +933,10 @@ static struct snd_device_ops snd_amd7930_dev_ops = {
.dev_free = snd_amd7930_dev_free,
};
-static int __devinit snd_amd7930_create(struct snd_card *card,
- struct platform_device *op,
- int irq, int dev,
- struct snd_amd7930 **ramd)
+static int snd_amd7930_create(struct snd_card *card,
+ struct platform_device *op,
+ int irq, int dev,
+ struct snd_amd7930 **ramd)
{
struct snd_amd7930 *amd;
unsigned long flags;
@@ -1002,7 +1002,7 @@ static int __devinit snd_amd7930_create(struct snd_card *card,
return 0;
}
-static int __devinit amd7930_sbus_probe(struct platform_device *op)
+static int amd7930_sbus_probe(struct platform_device *op)
{
struct resource *rp = &op->resource[0];
static int dev_num;
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index f2eabd3f22f..54aaad2a10f 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -702,7 +702,7 @@ static int snd_cs4231_timer_stop(struct snd_timer *timer)
return 0;
}
-static void __devinit snd_cs4231_init(struct snd_cs4231 *chip)
+static void snd_cs4231_init(struct snd_cs4231 *chip)
{
unsigned long flags;
@@ -1019,7 +1019,7 @@ static snd_pcm_uframes_t snd_cs4231_capture_pointer(
return bytes_to_frames(substream->runtime, ptr);
}
-static int __devinit snd_cs4231_probe(struct snd_cs4231 *chip)
+static int snd_cs4231_probe(struct snd_cs4231 *chip)
{
unsigned long flags;
int i;
@@ -1218,7 +1218,7 @@ static struct snd_pcm_ops snd_cs4231_capture_ops = {
.pointer = snd_cs4231_capture_pointer,
};
-static int __devinit snd_cs4231_pcm(struct snd_card *card)
+static int snd_cs4231_pcm(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
struct snd_pcm *pcm;
@@ -1247,7 +1247,7 @@ static int __devinit snd_cs4231_pcm(struct snd_card *card)
return 0;
}
-static int __devinit snd_cs4231_timer(struct snd_card *card)
+static int snd_cs4231_timer(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
struct snd_timer *timer;
@@ -1498,7 +1498,7 @@ static int snd_cs4231_put_double(struct snd_kcontrol *kcontrol,
.private_value = (left_reg) | ((right_reg) << 8) | ((shift_left) << 16) | \
((shift_right) << 19) | ((mask) << 24) | ((invert) << 22) }
-static struct snd_kcontrol_new snd_cs4231_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_cs4231_controls[] = {
CS4231_DOUBLE("PCM Playback Switch", 0, CS4231_LEFT_OUTPUT,
CS4231_RIGHT_OUTPUT, 7, 7, 1, 1),
CS4231_DOUBLE("PCM Playback Volume", 0, CS4231_LEFT_OUTPUT,
@@ -1537,7 +1537,7 @@ CS4231_SINGLE("Line Out Switch", 0, CS4231_PIN_CTRL, 6, 1, 1),
CS4231_SINGLE("Headphone Out Switch", 0, CS4231_PIN_CTRL, 7, 1, 1)
};
-static int __devinit snd_cs4231_mixer(struct snd_card *card)
+static int snd_cs4231_mixer(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
int err, idx;
@@ -1558,7 +1558,7 @@ static int __devinit snd_cs4231_mixer(struct snd_card *card)
static int dev;
-static int __devinit cs4231_attach_begin(struct snd_card **rcard)
+static int cs4231_attach_begin(struct snd_card **rcard)
{
struct snd_card *card;
struct snd_cs4231 *chip;
@@ -1589,7 +1589,7 @@ static int __devinit cs4231_attach_begin(struct snd_card **rcard)
return 0;
}
-static int __devinit cs4231_attach_finish(struct snd_card *card)
+static int cs4231_attach_finish(struct snd_card *card)
{
struct snd_cs4231 *chip = card->private_data;
int err;
@@ -1793,9 +1793,9 @@ static struct snd_device_ops snd_cs4231_sbus_dev_ops = {
.dev_free = snd_cs4231_sbus_dev_free,
};
-static int __devinit snd_cs4231_sbus_create(struct snd_card *card,
- struct platform_device *op,
- int dev)
+static int snd_cs4231_sbus_create(struct snd_card *card,
+ struct platform_device *op,
+ int dev)
{
struct snd_cs4231 *chip = card->private_data;
int err;
@@ -1856,7 +1856,7 @@ static int __devinit snd_cs4231_sbus_create(struct snd_card *card,
return 0;
}
-static int __devinit cs4231_sbus_probe(struct platform_device *op)
+static int cs4231_sbus_probe(struct platform_device *op)
{
struct resource *rp = &op->resource[0];
struct snd_card *card;
@@ -1959,9 +1959,9 @@ static struct snd_device_ops snd_cs4231_ebus_dev_ops = {
.dev_free = snd_cs4231_ebus_dev_free,
};
-static int __devinit snd_cs4231_ebus_create(struct snd_card *card,
- struct platform_device *op,
- int dev)
+static int snd_cs4231_ebus_create(struct snd_card *card,
+ struct platform_device *op,
+ int dev)
{
struct snd_cs4231 *chip = card->private_data;
int err;
@@ -2048,7 +2048,7 @@ static int __devinit snd_cs4231_ebus_create(struct snd_card *card,
return 0;
}
-static int __devinit cs4231_ebus_probe(struct platform_device *op)
+static int cs4231_ebus_probe(struct platform_device *op)
{
struct snd_card *card;
int err;
@@ -2072,7 +2072,7 @@ static int __devinit cs4231_ebus_probe(struct platform_device *op)
}
#endif
-static int __devinit cs4231_probe(struct platform_device *op)
+static int cs4231_probe(struct platform_device *op)
{
#ifdef EBUS_SUPPORT
if (!strcmp(op->dev.of_node->parent->name, "ebus"))
@@ -2086,7 +2086,7 @@ static int __devinit cs4231_probe(struct platform_device *op)
return -ENODEV;
}
-static int __devexit cs4231_remove(struct platform_device *op)
+static int cs4231_remove(struct platform_device *op)
{
struct snd_cs4231 *chip = dev_get_drvdata(&op->dev);
@@ -2115,7 +2115,7 @@ static struct platform_driver cs4231_driver = {
.of_match_table = cs4231_match,
},
.probe = cs4231_probe,
- .remove = __devexit_p(cs4231_remove),
+ .remove = cs4231_remove,
};
module_platform_driver(cs4231_driver);
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index ae35f5342e1..75e6016d3ef 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -745,7 +745,7 @@ static void dbri_reset(struct snd_dbri *dbri)
}
/* Lock must not be held before calling this */
-static void __devinit dbri_initialize(struct snd_dbri *dbri)
+static void dbri_initialize(struct snd_dbri *dbri)
{
s32 *cmd;
u32 dma_addr;
@@ -1305,7 +1305,7 @@ to the DBRI via the CHI interface and few of the DBRI's PIO pins.
* Lock must not be held before calling it.
*/
-static __devinit void cs4215_setup_pipes(struct snd_dbri *dbri)
+static void cs4215_setup_pipes(struct snd_dbri *dbri)
{
unsigned long flags;
@@ -1338,7 +1338,7 @@ static __devinit void cs4215_setup_pipes(struct snd_dbri *dbri)
dbri_cmdwait(dbri);
}
-static __devinit int cs4215_init_data(struct cs4215 *mm)
+static int cs4215_init_data(struct cs4215 *mm)
{
/*
* No action, memory resetting only.
@@ -1630,7 +1630,7 @@ static int cs4215_prepare(struct snd_dbri *dbri, unsigned int rate,
/*
*
*/
-static __devinit int cs4215_init(struct snd_dbri *dbri)
+static int cs4215_init(struct snd_dbri *dbri)
{
u32 reg2 = sbus_readl(dbri->regs + REG2);
dprintk(D_MM, "cs4215_init: reg2=0x%x\n", reg2);
@@ -2217,7 +2217,7 @@ static struct snd_pcm_ops snd_dbri_ops = {
.pointer = snd_dbri_pointer,
};
-static int __devinit snd_dbri_pcm(struct snd_card *card)
+static int snd_dbri_pcm(struct snd_card *card)
{
struct snd_pcm *pcm;
int err;
@@ -2409,7 +2409,7 @@ static int snd_cs4215_put_single(struct snd_kcontrol *kcontrol,
.private_value = (entry) | ((shift) << 8) | ((mask) << 16) | \
((invert) << 24) },
-static struct snd_kcontrol_new dbri_controls[] __devinitdata = {
+static struct snd_kcontrol_new dbri_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Playback Volume",
@@ -2436,7 +2436,7 @@ static struct snd_kcontrol_new dbri_controls[] __devinitdata = {
CS4215_SINGLE("Mic boost", 4, 4, 1, 1)
};
-static int __devinit snd_dbri_mixer(struct snd_card *card)
+static int snd_dbri_mixer(struct snd_card *card)
{
int idx, err;
struct snd_dbri *dbri;
@@ -2500,7 +2500,7 @@ static void dbri_debug_read(struct snd_info_entry *entry,
}
#endif
-static void __devinit snd_dbri_proc(struct snd_card *card)
+static void snd_dbri_proc(struct snd_card *card)
{
struct snd_dbri *dbri = card->private_data;
struct snd_info_entry *entry;
@@ -2523,9 +2523,9 @@ static void __devinit snd_dbri_proc(struct snd_card *card)
*/
static void snd_dbri_free(struct snd_dbri *dbri);
-static int __devinit snd_dbri_create(struct snd_card *card,
- struct platform_device *op,
- int irq, int dev)
+static int snd_dbri_create(struct snd_card *card,
+ struct platform_device *op,
+ int irq, int dev)
{
struct snd_dbri *dbri = card->private_data;
int err;
@@ -2593,7 +2593,7 @@ static void snd_dbri_free(struct snd_dbri *dbri)
(void *)dbri->dma, dbri->dma_dvma);
}
-static int __devinit dbri_probe(struct platform_device *op)
+static int dbri_probe(struct platform_device *op)
{
struct snd_dbri *dbri;
struct resource *rp;
@@ -2663,7 +2663,7 @@ _err:
return err;
}
-static int __devexit dbri_remove(struct platform_device *op)
+static int dbri_remove(struct platform_device *op)
{
struct snd_card *card = dev_get_drvdata(&op->dev);
@@ -2694,7 +2694,7 @@ static struct platform_driver dbri_sbus_driver = {
.of_match_table = dbri_match,
},
.probe = dbri_probe,
- .remove = __devexit_p(dbri_remove),
+ .remove = dbri_remove,
};
module_platform_driver(dbri_sbus_driver);
diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c
index c6500d00053..4dd60d8a488 100644
--- a/sound/spi/at73c213.c
+++ b/sound/spi/at73c213.c
@@ -330,7 +330,7 @@ static struct snd_pcm_ops at73c213_playback_ops = {
.pointer = snd_at73c213_pcm_pointer,
};
-static int __devinit snd_at73c213_pcm_new(struct snd_at73c213 *chip, int device)
+static int snd_at73c213_pcm_new(struct snd_at73c213 *chip, int device)
{
struct snd_pcm *pcm;
int retval;
@@ -665,7 +665,7 @@ static int snd_at73c213_aux_capture_volume_info(
| (mask << 24) | (invert << 22)) \
}
-static struct snd_kcontrol_new snd_at73c213_controls[] __devinitdata = {
+static struct snd_kcontrol_new snd_at73c213_controls[] = {
AT73C213_STEREO("Master Playback Volume", 0, DAC_LMPG, DAC_RMPG, 0, 0, 0x1f, 1),
AT73C213_STEREO("Master Playback Switch", 0, DAC_LMPG, DAC_RMPG, 5, 5, 1, 1),
AT73C213_STEREO("PCM Playback Volume", 0, DAC_LLOG, DAC_RLOG, 0, 0, 0x1f, 1),
@@ -709,7 +709,7 @@ AT73C213_MONO_SWITCH("Aux Capture Switch", 0, DAC_CTRL, DAC_CTRL_ONAUXIN,
AT73C213_MONO_SWITCH("Line Capture Switch", 0, DAC_CTRL, 0, 0x03, 0),
};
-static int __devinit snd_at73c213_mixer(struct snd_at73c213 *chip)
+static int snd_at73c213_mixer(struct snd_at73c213 *chip)
{
struct snd_card *card;
int errval, idx;
@@ -744,7 +744,7 @@ cleanup:
/*
* Device functions
*/
-static int __devinit snd_at73c213_ssc_init(struct snd_at73c213 *chip)
+static int snd_at73c213_ssc_init(struct snd_at73c213 *chip)
{
/*
* Continuous clock output.
@@ -774,7 +774,7 @@ static int __devinit snd_at73c213_ssc_init(struct snd_at73c213 *chip)
return 0;
}
-static int __devinit snd_at73c213_chip_init(struct snd_at73c213 *chip)
+static int snd_at73c213_chip_init(struct snd_at73c213 *chip)
{
int retval;
unsigned char dac_ctrl = 0;
@@ -879,8 +879,8 @@ static int snd_at73c213_dev_free(struct snd_device *device)
return 0;
}
-static int __devinit snd_at73c213_dev_init(struct snd_card *card,
- struct spi_device *spi)
+static int snd_at73c213_dev_init(struct snd_card *card,
+ struct spi_device *spi)
{
static struct snd_device_ops ops = {
.dev_free = snd_at73c213_dev_free,
@@ -940,7 +940,7 @@ out:
return retval;
}
-static int __devinit snd_at73c213_probe(struct spi_device *spi)
+static int snd_at73c213_probe(struct spi_device *spi)
{
struct snd_card *card;
struct snd_at73c213 *chip;
@@ -1007,7 +1007,7 @@ out:
return retval;
}
-static int __devexit snd_at73c213_remove(struct spi_device *spi)
+static int snd_at73c213_remove(struct spi_device *spi)
{
struct snd_card *card = dev_get_drvdata(&spi->dev);
struct snd_at73c213 *chip = card->private_data;
@@ -1109,7 +1109,7 @@ static struct spi_driver at73c213_driver = {
.probe = snd_at73c213_probe,
.suspend = snd_at73c213_suspend,
.resume = snd_at73c213_resume,
- .remove = __devexit_p(snd_at73c213_remove),
+ .remove = snd_at73c213_remove,
};
module_spi_driver(at73c213_driver);
diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
index fc8cc823e43..4394ae79635 100644
--- a/sound/usb/6fire/chip.c
+++ b/sound/usb/6fire/chip.c
@@ -82,8 +82,8 @@ static void usb6fire_chip_destroy(struct sfire_chip *chip)
}
}
-static int __devinit usb6fire_chip_probe(struct usb_interface *intf,
- const struct usb_device_id *usb_id)
+static int usb6fire_chip_probe(struct usb_interface *intf,
+ const struct usb_device_id *usb_id)
{
int ret;
int i;
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
index 6c3d531a250..9e6e3ffd86b 100644
--- a/sound/usb/6fire/comm.c
+++ b/sound/usb/6fire/comm.c
@@ -125,16 +125,17 @@ static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
}
-int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+int usb6fire_comm_init(struct sfire_chip *chip)
{
struct comm_runtime *rt = kzalloc(sizeof(struct comm_runtime),
GFP_KERNEL);
- struct urb *urb = &rt->receiver;
+ struct urb *urb;
int ret;
if (!rt)
return -ENOMEM;
+ urb = &rt->receiver;
rt->serial = 1;
rt->chip = chip;
usb_init_urb(urb);
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
index d2af0a5ddcf..6a0840b0dcf 100644
--- a/sound/usb/6fire/comm.h
+++ b/sound/usb/6fire/comm.h
@@ -36,7 +36,7 @@ struct comm_runtime {
u8 vh, u8 vl);
};
-int __devinit usb6fire_comm_init(struct sfire_chip *chip);
+int usb6fire_comm_init(struct sfire_chip *chip);
void usb6fire_comm_abort(struct sfire_chip *chip);
void usb6fire_comm_destroy(struct sfire_chip *chip);
#endif /* USB6FIRE_COMM_H */
diff --git a/sound/usb/6fire/control.c b/sound/usb/6fire/control.c
index 07ed914d5e7..f6434c24572 100644
--- a/sound/usb/6fire/control.c
+++ b/sound/usb/6fire/control.c
@@ -411,7 +411,7 @@ static int usb6fire_control_digital_thru_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct __devinitdata snd_kcontrol_new vol_elements[] = {
+static struct snd_kcontrol_new vol_elements[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog Playback Volume",
@@ -451,7 +451,7 @@ static struct __devinitdata snd_kcontrol_new vol_elements[] = {
{}
};
-static struct __devinitdata snd_kcontrol_new mute_elements[] = {
+static struct snd_kcontrol_new mute_elements[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog Playback Switch",
@@ -485,7 +485,7 @@ static struct __devinitdata snd_kcontrol_new mute_elements[] = {
{}
};
-static struct __devinitdata snd_kcontrol_new elements[] = {
+static struct snd_kcontrol_new elements[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line/Phono Capture Route",
@@ -561,7 +561,7 @@ static int usb6fire_control_add_virtual(
return 0;
}
-int __devinit usb6fire_control_init(struct sfire_chip *chip)
+int usb6fire_control_init(struct sfire_chip *chip)
{
int i;
int ret;
diff --git a/sound/usb/6fire/control.h b/sound/usb/6fire/control.h
index 9a596d95474..5a40ba14348 100644
--- a/sound/usb/6fire/control.h
+++ b/sound/usb/6fire/control.h
@@ -50,7 +50,7 @@ struct control_runtime {
u8 ivol_updated;
};
-int __devinit usb6fire_control_init(struct sfire_chip *chip);
+int usb6fire_control_init(struct sfire_chip *chip);
void usb6fire_control_abort(struct sfire_chip *chip);
void usb6fire_control_destroy(struct sfire_chip *chip);
#endif /* USB6FIRE_CONTROL_H */
diff --git a/sound/usb/6fire/firmware.h b/sound/usb/6fire/firmware.h
index 00856989538..c109c4f75ab 100644
--- a/sound/usb/6fire/firmware.h
+++ b/sound/usb/6fire/firmware.h
@@ -22,6 +22,6 @@ enum /* firmware state of device */
FW_NOT_READY = 1
};
-int __devinit usb6fire_fw_init(struct usb_interface *intf);
+int usb6fire_fw_init(struct usb_interface *intf);
#endif /* USB6FIRE_FIRMWARE_H */
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index f0e5179b242..26722423330 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -146,7 +146,7 @@ static struct snd_rawmidi_ops in_ops = {
.trigger = usb6fire_midi_in_trigger
};
-int __devinit usb6fire_midi_init(struct sfire_chip *chip)
+int usb6fire_midi_init(struct sfire_chip *chip)
{
int ret;
struct midi_runtime *rt = kzalloc(sizeof(struct midi_runtime),
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index 5114eccc1d8..c321006e543 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -38,7 +38,7 @@ struct midi_runtime {
void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
};
-int __devinit usb6fire_midi_init(struct sfire_chip *chip);
+int usb6fire_midi_init(struct sfire_chip *chip);
void usb6fire_midi_abort(struct sfire_chip *chip);
void usb6fire_midi_destroy(struct sfire_chip *chip);
#endif /* USB6FIRE_MIDI_H */
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index c97d05f0e96..e2ca12fe92e 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -135,6 +135,9 @@ static void usb6fire_pcm_stream_stop(struct pcm_runtime *rt)
struct control_runtime *ctrl_rt = rt->chip->control;
if (rt->stream_state != STREAM_DISABLED) {
+
+ rt->stream_state = STREAM_STOPPING;
+
for (i = 0; i < PCM_N_URBS; i++) {
usb_kill_urb(&rt->in_urbs[i].instance);
usb_kill_urb(&rt->out_urbs[i].instance);
@@ -559,9 +562,9 @@ static struct snd_pcm_ops pcm_ops = {
.pointer = usb6fire_pcm_pointer,
};
-static void __devinit usb6fire_pcm_init_urb(struct pcm_urb *urb,
- struct sfire_chip *chip, bool in, int ep,
- void (*handler)(struct urb *))
+static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
+ struct sfire_chip *chip, bool in, int ep,
+ void (*handler)(struct urb *))
{
urb->chip = chip;
usb_init_urb(&urb->instance);
@@ -578,7 +581,7 @@ static void __devinit usb6fire_pcm_init_urb(struct pcm_urb *urb,
urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
}
-int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+int usb6fire_pcm_init(struct sfire_chip *chip)
{
int i;
int ret;
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 3104301b257..9b01133ee3f 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -69,7 +69,7 @@ struct pcm_runtime {
bool stream_wait_cond;
};
-int __devinit usb6fire_pcm_init(struct sfire_chip *chip);
+int usb6fire_pcm_init(struct sfire_chip *chip);
void usb6fire_pcm_abort(struct sfire_chip *chip);
void usb6fire_pcm_destroy(struct sfire_chip *chip);
#endif /* USB6FIRE_PCM_H */
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index ff77b28f3da..225dfd73726 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -90,7 +90,7 @@ config SND_USB_CAIAQ_INPUT
config SND_USB_US122L
tristate "Tascam US-122L USB driver"
- depends on X86 && EXPERIMENTAL
+ depends on X86
select SND_HWDEP
select SND_RAWMIDI
help
diff --git a/sound/usb/caiaq/control.c b/sound/usb/caiaq/control.c
index 00e5d0a469e..adb8d03267a 100644
--- a/sound/usb/caiaq/control.c
+++ b/sound/usb/caiaq/control.c
@@ -137,7 +137,7 @@ static int control_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new kcontrol_template __devinitdata = {
+static struct snd_kcontrol_new kcontrol_template = {
.iface = SNDRV_CTL_ELEM_IFACE_HWDEP,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.index = 0,
@@ -489,8 +489,8 @@ static struct caiaq_controller kontrols4_controller[] = {
{ "LED: FX2: Mode", 133 | CNT_INTVAL },
};
-static int __devinit add_controls(struct caiaq_controller *c, int num,
- struct snd_usb_caiaqdev *dev)
+static int add_controls(struct caiaq_controller *c, int num,
+ struct snd_usb_caiaqdev *dev)
{
int i, ret;
struct snd_kcontrol *kc;
@@ -507,7 +507,7 @@ static int __devinit add_controls(struct caiaq_controller *c, int num,
return 0;
}
-int __devinit snd_usb_caiaq_control_init(struct snd_usb_caiaqdev *dev)
+int snd_usb_caiaq_control_init(struct snd_usb_caiaqdev *dev)
{
int ret = 0;
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 7da0d0aa72c..c828f8189c2 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -289,7 +289,7 @@ int snd_usb_caiaq_set_auto_msg(struct snd_usb_caiaqdev *dev,
tmp, sizeof(tmp));
}
-static void __devinit setup_card(struct snd_usb_caiaqdev *dev)
+static void setup_card(struct snd_usb_caiaqdev *dev)
{
int ret;
char val[4];
@@ -407,7 +407,7 @@ static int create_card(struct usb_device *usb_dev,
return 0;
}
-static int __devinit init_card(struct snd_usb_caiaqdev *dev)
+static int init_card(struct snd_usb_caiaqdev *dev)
{
char *c, usbpath[32];
struct usb_device *usb_dev = dev->chip.dev;
@@ -481,7 +481,7 @@ static int __devinit init_card(struct snd_usb_caiaqdev *dev)
return 0;
}
-static int __devinit snd_probe(struct usb_interface *intf,
+static int snd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index dbf7999d18b..ccf95cfe186 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -25,9 +25,6 @@
*
* NOTES:
*
- * - async unlink should be used for avoiding the sleep inside lock.
- * 2.4.22 usb-uhci seems buggy for async unlinking and results in
- * oops. in such a cse, pass async_unlink=0 option.
* - the linked URBs would be preferred but not used so far because of
* the instability of unlinking.
* - type II is not supported properly. there is no device which supports
@@ -83,7 +80,6 @@ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card *
static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
static int nrpacks = 8; /* max. number of packets per urb */
-static bool async_unlink = 1;
static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
static bool ignore_ctl_error;
@@ -99,8 +95,6 @@ module_param_array(pid, int, NULL, 0444);
MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
module_param(nrpacks, int, 0644);
MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
-module_param(async_unlink, bool, 0444);
-MODULE_PARM_DESC(async_unlink, "Use async unlink mode.");
module_param_array(device_setup, int, NULL, 0444);
MODULE_PARM_DESC(device_setup, "Specific device setup (if needed).");
module_param(ignore_ctl_error, bool, 0444);
@@ -345,7 +339,6 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
chip->card = card;
chip->setup = device_setup[idx];
chip->nrpacks = nrpacks;
- chip->async_unlink = async_unlink;
chip->probing = 1;
chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 814cb357ff8..8a751b4887e 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -27,6 +27,7 @@ struct audioformat {
unsigned int nr_rates; /* number of rate table entries */
unsigned int *rate_table; /* rate table */
unsigned char clock; /* associated clock */
+ struct snd_pcm_chmap_elem *chmap; /* (optional) channel map */
};
struct snd_usb_substream;
@@ -109,6 +110,7 @@ struct snd_usb_substream {
struct audioformat *cur_audiofmt; /* current audioformat pointer (for hw_params callback) */
snd_pcm_format_t pcm_format; /* current audio format (for hw_params callback) */
unsigned int channels; /* current number of channels (for hw_params callback) */
+ unsigned int channels_max; /* max channels in the all audiofmts */
unsigned int cur_rate; /* current rate (for hw_params callback) */
unsigned int period_bytes; /* current period bytes (for hw_params callback) */
unsigned int altset_idx; /* USB data format: index of alternate setting */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 34de6f2faf6..21049b882ee 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -485,15 +485,10 @@ __exit_unlock:
static int wait_clear_urbs(struct snd_usb_endpoint *ep)
{
unsigned long end_time = jiffies + msecs_to_jiffies(1000);
- unsigned int i;
int alive;
do {
- alive = 0;
- for (i = 0; i < ep->nurbs; i++)
- if (test_bit(i, &ep->active_mask))
- alive++;
-
+ alive = bitmap_weight(&ep->active_mask, ep->nurbs);
if (!alive)
break;
@@ -520,33 +515,24 @@ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
/*
* unlink active urbs.
*/
-static int deactivate_urbs(struct snd_usb_endpoint *ep, int force, int can_sleep)
+static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force)
{
unsigned int i;
- int async;
if (!force && ep->chip->shutdown) /* to be sure... */
return -EBADFD;
- async = !can_sleep && ep->chip->async_unlink;
-
clear_bit(EP_FLAG_RUNNING, &ep->flags);
INIT_LIST_HEAD(&ep->ready_playback_urbs);
ep->next_packet_read_pos = 0;
ep->next_packet_write_pos = 0;
- if (!async && in_interrupt())
- return 0;
-
for (i = 0; i < ep->nurbs; i++) {
if (test_bit(i, &ep->active_mask)) {
if (!test_and_set_bit(i, &ep->unlink_mask)) {
struct urb *u = ep->urb[i].urb;
- if (async)
- usb_unlink_urb(u);
- else
- usb_kill_urb(u);
+ usb_unlink_urb(u);
}
}
}
@@ -566,7 +552,7 @@ static void release_urbs(struct snd_usb_endpoint *ep, int force)
ep->prepare_data_urb = NULL;
/* stop urbs */
- deactivate_urbs(ep, force, 1);
+ deactivate_urbs(ep, force);
wait_clear_urbs(ep);
for (i = 0; i < ep->nurbs; i++)
@@ -829,7 +815,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
*
* Returns an error if the URB submission failed, 0 in all other cases.
*/
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
{
int err;
unsigned int i;
@@ -842,7 +828,7 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
return 0;
/* just to be sure */
- deactivate_urbs(ep, 0, can_sleep);
+ deactivate_urbs(ep, false);
if (can_sleep)
wait_clear_urbs(ep);
@@ -896,7 +882,7 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
__error:
clear_bit(EP_FLAG_RUNNING, &ep->flags);
ep->use_count--;
- deactivate_urbs(ep, 0, 0);
+ deactivate_urbs(ep, false);
return -EPIPE;
}
@@ -910,9 +896,11 @@ __error:
* actually be deactivated.
*
* Must be balanced to calls of snd_usb_endpoint_start().
+ *
+ * The caller needs to synchronize the pending stop operation via
+ * snd_usb_endpoint_sync_pending_stop().
*/
-void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
- int force, int can_sleep, int wait)
+void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
{
if (!ep)
return;
@@ -921,16 +909,12 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
return;
if (--ep->use_count == 0) {
- deactivate_urbs(ep, force, can_sleep);
+ deactivate_urbs(ep, false);
ep->data_subs = NULL;
ep->sync_slave = NULL;
ep->retire_data_urb = NULL;
ep->prepare_data_urb = NULL;
-
- if (wait)
- wait_clear_urbs(ep);
- else
- set_bit(EP_FLAG_STOPPING, &ep->flags);
+ set_bit(EP_FLAG_STOPPING, &ep->flags);
}
}
@@ -952,7 +936,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
if (!ep)
return -EINVAL;
- deactivate_urbs(ep, 1, 1);
+ deactivate_urbs(ep, true);
wait_clear_urbs(ep);
if (ep->use_count != 0)
@@ -1034,15 +1018,18 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
/*
* Iterate through the inbound packet and prepare the lengths
* for the output packet. The OUT packet we are about to send
- * will have the same amount of payload bytes than the IN
- * packet we just received.
+ * will have the same amount of payload bytes per stride as the
+ * IN packet we just received. Since the actual size is scaled
+ * by the stride, use the sender stride to calculate the length
+ * in case the number of channels differ between the implicitly
+ * fed-back endpoint and the synchronizing endpoint.
*/
out_packet->packets = in_ctx->packets;
for (i = 0; i < in_ctx->packets; i++) {
if (urb->iso_frame_desc[i].status == 0)
out_packet->packet_size[i] =
- urb->iso_frame_desc[i].actual_length / ep->stride;
+ urb->iso_frame_desc[i].actual_length / sender->stride;
else
out_packet->packet_size[i] = 0;
}
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 3d4c9705041..447902dd8a4 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -16,9 +16,8 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
struct audioformat *fmt,
struct snd_usb_endpoint *sync_ep);
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep);
-void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
- int force, int can_sleep, int wait);
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep);
+void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
diff --git a/sound/usb/format.c b/sound/usb/format.c
index ddfef57c4c9..e831ee4238b 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -155,7 +155,7 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
if (fmt[0] < offset + 1 + 3 * (nr_rates ? nr_rates : 2)) {
snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n",
chip->dev->devnum, fp->iface, fp->altsetting);
- return -1;
+ return -EINVAL;
}
if (nr_rates) {
@@ -167,7 +167,7 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
if (fp->rate_table == NULL) {
snd_printk(KERN_ERR "cannot malloc\n");
- return -1;
+ return -ENOMEM;
}
fp->nr_rates = 0;
@@ -198,7 +198,7 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
}
if (!fp->nr_rates) {
hwc_debug("All rates were zero. Skipping format!\n");
- return -1;
+ return -EINVAL;
}
} else {
/* continuous rates */
@@ -383,7 +383,7 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
fp->formats = parse_audio_format_i_type(chip, fp, format,
fmt, protocol);
if (!fp->formats)
- return -1;
+ return -EINVAL;
}
/* gather possible sample rates */
@@ -409,7 +409,7 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
if (fp->channels < 1) {
snd_printk(KERN_ERR "%d:%u:%d : invalid channels %d\n",
chip->dev->devnum, fp->iface, fp->altsetting, fp->channels);
- return -1;
+ return -EINVAL;
}
return ret;
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index eeefbce3873..c183d34842a 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -116,6 +116,7 @@ struct snd_usb_midi {
struct list_head list;
struct timer_list error_timer;
spinlock_t disc_lock;
+ struct rw_semaphore disc_rwsem;
struct mutex mutex;
u32 usb_id;
int next_midi_device;
@@ -125,8 +126,10 @@ struct snd_usb_midi {
struct snd_usb_midi_in_endpoint *in;
} endpoints[MIDI_MAX_ENDPOINTS];
unsigned long input_triggered;
- unsigned int opened;
+ bool autopm_reference;
+ unsigned int opened[2];
unsigned char disconnected;
+ unsigned char input_running;
struct snd_kcontrol *roland_load_ctl;
};
@@ -148,7 +151,6 @@ struct snd_usb_midi_out_endpoint {
struct snd_usb_midi_out_endpoint* ep;
struct snd_rawmidi_substream *substream;
int active;
- bool autopm_reference;
uint8_t cable; /* cable number << 4 */
uint8_t state;
#define STATE_UNKNOWN 0
@@ -1033,29 +1035,58 @@ static void update_roland_altsetting(struct snd_usb_midi* umidi)
snd_usbmidi_input_start(&umidi->list);
}
-static void substream_open(struct snd_rawmidi_substream *substream, int open)
+static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ int open)
{
struct snd_usb_midi* umidi = substream->rmidi->private_data;
struct snd_kcontrol *ctl;
+ int err;
+
+ down_read(&umidi->disc_rwsem);
+ if (umidi->disconnected) {
+ up_read(&umidi->disc_rwsem);
+ return open ? -ENODEV : 0;
+ }
mutex_lock(&umidi->mutex);
if (open) {
- if (umidi->opened++ == 0 && umidi->roland_load_ctl) {
- ctl = umidi->roland_load_ctl;
- ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
- snd_ctl_notify(umidi->card,
+ if (!umidi->opened[0] && !umidi->opened[1]) {
+ err = usb_autopm_get_interface(umidi->iface);
+ umidi->autopm_reference = err >= 0;
+ if (err < 0 && err != -EACCES) {
+ mutex_unlock(&umidi->mutex);
+ up_read(&umidi->disc_rwsem);
+ return -EIO;
+ }
+ if (umidi->roland_load_ctl) {
+ ctl = umidi->roland_load_ctl;
+ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ snd_ctl_notify(umidi->card,
SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
- update_roland_altsetting(umidi);
+ update_roland_altsetting(umidi);
+ }
}
+ umidi->opened[dir]++;
+ if (umidi->opened[1])
+ snd_usbmidi_input_start(&umidi->list);
} else {
- if (--umidi->opened == 0 && umidi->roland_load_ctl) {
- ctl = umidi->roland_load_ctl;
- ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
- snd_ctl_notify(umidi->card,
+ umidi->opened[dir]--;
+ if (!umidi->opened[1])
+ snd_usbmidi_input_stop(&umidi->list);
+ if (!umidi->opened[0] && !umidi->opened[1]) {
+ if (umidi->roland_load_ctl) {
+ ctl = umidi->roland_load_ctl;
+ ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+ snd_ctl_notify(umidi->card,
SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
+ }
+ if (umidi->autopm_reference)
+ usb_autopm_put_interface(umidi->iface);
}
}
mutex_unlock(&umidi->mutex);
+ up_read(&umidi->disc_rwsem);
+ return 0;
}
static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
@@ -1063,7 +1094,6 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
struct snd_usb_midi* umidi = substream->rmidi->private_data;
struct usbmidi_out_port* port = NULL;
int i, j;
- int err;
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
if (umidi->endpoints[i].out)
@@ -1076,25 +1106,15 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
snd_BUG();
return -ENXIO;
}
- err = usb_autopm_get_interface(umidi->iface);
- port->autopm_reference = err >= 0;
- if (err < 0 && err != -EACCES)
- return -EIO;
+
substream->runtime->private_data = port;
port->state = STATE_UNKNOWN;
- substream_open(substream, 1);
- return 0;
+ return substream_open(substream, 0, 1);
}
static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
{
- struct snd_usb_midi* umidi = substream->rmidi->private_data;
- struct usbmidi_out_port *port = substream->runtime->private_data;
-
- substream_open(substream, 0);
- if (port->autopm_reference)
- usb_autopm_put_interface(umidi->iface);
- return 0;
+ return substream_open(substream, 0, 0);
}
static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
@@ -1147,14 +1167,12 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream)
{
- substream_open(substream, 1);
- return 0;
+ return substream_open(substream, 1, 1);
}
static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream)
{
- substream_open(substream, 0);
- return 0;
+ return substream_open(substream, 1, 0);
}
static void snd_usbmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
@@ -1403,9 +1421,12 @@ void snd_usbmidi_disconnect(struct list_head* p)
* a timer may submit an URB. To reliably break the cycle
* a flag under lock must be used
*/
+ down_write(&umidi->disc_rwsem);
spin_lock_irq(&umidi->disc_lock);
umidi->disconnected = 1;
spin_unlock_irq(&umidi->disc_lock);
+ up_write(&umidi->disc_rwsem);
+
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
if (ep->out)
@@ -2060,12 +2081,15 @@ void snd_usbmidi_input_stop(struct list_head* p)
unsigned int i, j;
umidi = list_entry(p, struct snd_usb_midi, list);
+ if (!umidi->input_running)
+ return;
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
if (ep->in)
for (j = 0; j < INPUT_URBS; ++j)
usb_kill_urb(ep->in->urbs[j]);
}
+ umidi->input_running = 0;
}
static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep)
@@ -2090,8 +2114,11 @@ void snd_usbmidi_input_start(struct list_head* p)
int i;
umidi = list_entry(p, struct snd_usb_midi, list);
+ if (umidi->input_running || !umidi->opened[1])
+ return;
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
+ umidi->input_running = 1;
}
/*
@@ -2117,6 +2144,7 @@ int snd_usbmidi_create(struct snd_card *card,
umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
init_timer(&umidi->error_timer);
spin_lock_init(&umidi->disc_lock);
+ init_rwsem(&umidi->disc_rwsem);
mutex_init(&umidi->mutex);
umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
le16_to_cpu(umidi->dev->descriptor.idProduct));
@@ -2153,6 +2181,10 @@ int snd_usbmidi_create(struct snd_card *card,
umidi->usb_protocol_ops = &snd_usbmidi_novation_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
+ case QUIRK_MIDI_MBOX2:
+ umidi->usb_protocol_ops = &snd_usbmidi_midiman_ops;
+ err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
+ break;
case QUIRK_MIDI_RAW_BYTES:
umidi->usb_protocol_ops = &snd_usbmidi_raw_ops;
/*
@@ -2229,9 +2261,6 @@ int snd_usbmidi_create(struct snd_card *card,
}
list_add_tail(&umidi->list, midi_list);
-
- for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
- snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
return 0;
}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 298070e8f2d..ed4d89c8b52 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -382,6 +382,8 @@ error:
static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
{
+ validx += cval->idx_off;
+
return (cval->mixer->protocol == UAC_VERSION_1) ?
get_ctl_value_v1(cval, request, validx, value_ret) :
get_ctl_value_v2(cval, request, validx, value_ret);
@@ -432,6 +434,8 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
unsigned char buf[2];
int idx = 0, val_len, err, timeout = 10;
+ validx += cval->idx_off;
+
if (cval->mixer->protocol == UAC_VERSION_1) {
val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
} else { /* UAC_VERSION_2 */
@@ -719,8 +723,19 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
return 0;
}
case UAC1_PROCESSING_UNIT:
- case UAC1_EXTENSION_UNIT: {
+ case UAC1_EXTENSION_UNIT:
+ /* UAC2_PROCESSING_UNIT_V2 */
+ /* UAC2_EFFECT_UNIT */ {
struct uac_processing_unit_descriptor *d = p1;
+
+ if (state->mixer->protocol == UAC_VERSION_2 &&
+ hdr[2] == UAC2_EFFECT_UNIT) {
+ /* UAC2/UAC1 unit IDs overlap here in an
+ * uncompatible way. Ignore this unit for now.
+ */
+ return 0;
+ }
+
if (d->bNrInPins) {
id = d->baSourceID[0];
break; /* continue to parse */
@@ -791,6 +806,33 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
struct snd_kcontrol *kctl)
{
switch (cval->mixer->chip->usb_id) {
+ case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ if (strcmp(kctl->id.name, "Effect Duration") == 0) {
+ cval->min = 0x0000;
+ cval->max = 0xffff;
+ cval->res = 0x00e6;
+ break;
+ }
+ if (strcmp(kctl->id.name, "Effect Volume") == 0 ||
+ strcmp(kctl->id.name, "Effect Feedback Volume") == 0) {
+ cval->min = 0x00;
+ cval->max = 0xff;
+ break;
+ }
+ if (strstr(kctl->id.name, "Effect Return") != NULL) {
+ cval->min = 0xb706;
+ cval->max = 0xff7b;
+ cval->res = 0x0073;
+ break;
+ }
+ if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
+ (strstr(kctl->id.name, "Effect Send") != NULL)) {
+ cval->min = 0xb5fb; /* -73 dB = 0xb6ff */
+ cval->max = 0xfcfe;
+ cval->res = 0x0073;
+ }
+ break;
+
case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
if (strcmp(kctl->id.name, "Effect Duration") == 0) {
@@ -1094,6 +1136,32 @@ static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str)
return strlcat(kctl->id.name, str, sizeof(kctl->id.name));
}
+/* A lot of headsets/headphones have a "Speaker" mixer. Make sure we
+ rename it to "Headphone". We determine if something is a headphone
+ similar to how udev determines form factor. */
+static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
+ struct snd_card *card)
+{
+ const char *names_to_check[] = {
+ "Headset", "headset", "Headphone", "headphone", NULL};
+ const char **s;
+ bool found = 0;
+
+ if (strcmp("Speaker", kctl->id.name))
+ return;
+
+ for (s = names_to_check; *s; s++)
+ if (strstr(card->shortname, *s)) {
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ return;
+
+ strlcpy(kctl->id.name, "Headphone", sizeof(kctl->id.name));
+}
+
static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
unsigned int ctl_mask, int control,
struct usb_audio_term *iterm, int unitid,
@@ -1180,6 +1248,10 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
len = snprintf(kctl->id.name, sizeof(kctl->id.name),
"Feature %d", unitid);
}
+
+ if (!mapped_name)
+ check_no_speaker_on_headset(kctl, state->mixer->chip->card);
+
/* determine the stream direction:
* if the connected output is USB stream, then it's likely a
* capture stream. otherwise it should be playback (hopefully :)
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index a7f3d45a8ac..aab80df201b 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -43,6 +43,7 @@ struct usb_mixer_elem_info {
unsigned int id;
unsigned int control; /* CS or ICN (high byte) */
unsigned int cmask; /* channel mask bitmap: 0 = master */
+ unsigned int idx_off; /* Control index offset */
unsigned int ch_readonly;
unsigned int master_readonly;
int channels;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index ae2b7143522..0422b1360af 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -63,11 +63,12 @@ static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
* Since there doesn't seem to be a devices that needs a multichannel
* version, we keep it mono for simplicity.
*/
-static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
+static int snd_create_std_mono_ctl_offset(struct usb_mixer_interface *mixer,
unsigned int unitid,
unsigned int control,
unsigned int cmask,
int val_type,
+ unsigned int idx_off,
const char *name,
snd_kcontrol_tlv_rw_t *tlv_callback)
{
@@ -85,6 +86,7 @@ static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
cval->channels = 1;
cval->control = control;
cval->cmask = cmask;
+ cval->idx_off = idx_off;
/* get_min_max() is called only for integer volumes later,
* so provide a short-cut for booleans */
@@ -120,6 +122,18 @@ static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
return 0;
}
+static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
+ unsigned int unitid,
+ unsigned int control,
+ unsigned int cmask,
+ int val_type,
+ const char *name,
+ snd_kcontrol_tlv_rw_t *tlv_callback)
+{
+ return snd_create_std_mono_ctl_offset(mixer, unitid, control, cmask,
+ val_type, 0 /* Offset */, name, tlv_callback);
+}
+
/*
* Create a set of standard UAC controls from a table
*/
@@ -416,6 +430,8 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
}
}
+/* ASUS Xonar U1 / U3 controls */
+
static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -621,11 +637,13 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
}
/* M-Audio FastTrack Ultra quirks */
-/* FTU Effect switch */
+/* FTU Effect switch (also used by C400) */
struct snd_ftu_eff_switch_priv_val {
struct usb_mixer_interface *mixer;
int cached_value;
int is_cached;
+ int bUnitID;
+ int validx;
};
static int snd_ftu_eff_switch_info(struct snd_kcontrol *kcontrol,
@@ -660,9 +678,8 @@ static int snd_ftu_eff_switch_get(struct snd_kcontrol *kctl,
struct snd_ftu_eff_switch_priv_val *pval;
int err;
unsigned char value[2];
+ int id, validx;
- const int id = 6;
- const int validx = 1;
const int val_len = 2;
value[0] = 0x00;
@@ -684,6 +701,8 @@ static int snd_ftu_eff_switch_get(struct snd_kcontrol *kctl,
if (snd_BUG_ON(!chip))
return -EINVAL;
+ id = pval->bUnitID;
+ validx = pval->validx;
down_read(&mixer->chip->shutdown_rwsem);
if (mixer->chip->shutdown)
@@ -714,10 +733,8 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
struct usb_mixer_interface *mixer;
int changed, cur_val, err, new_val;
unsigned char value[2];
+ int id, validx;
-
- const int id = 6;
- const int validx = 1;
const int val_len = 2;
changed = 0;
@@ -735,6 +752,9 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
if (snd_BUG_ON(!chip))
return -EINVAL;
+ id = pval->bUnitID;
+ validx = pval->validx;
+
if (!pval->is_cached) {
/* Read current value */
down_read(&mixer->chip->shutdown_rwsem);
@@ -779,7 +799,8 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
return changed;
}
-static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer)
+static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
+ int validx, int bUnitID)
{
static struct snd_kcontrol_new template = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -802,6 +823,8 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer)
pval->cached_value = 0;
pval->is_cached = 0;
pval->mixer = mixer;
+ pval->bUnitID = bUnitID;
+ pval->validx = validx;
template.private_value = (unsigned long) pval;
kctl = snd_ctl_new1(&template, mixer->chip);
@@ -960,9 +983,10 @@ static int snd_ftu_create_mixer(struct usb_mixer_interface *mixer)
if (err < 0)
return err;
- err = snd_ftu_create_effect_switch(mixer);
+ err = snd_ftu_create_effect_switch(mixer, 1, 6);
if (err < 0)
return err;
+
err = snd_ftu_create_effect_volume_ctl(mixer);
if (err < 0)
return err;
@@ -1005,6 +1029,178 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
}
}
+/* M-Audio Fast Track C400 */
+/* C400 volume controls, this control needs a volume quirk, see mixer.c */
+static int snd_c400_create_vol_ctls(struct usb_mixer_interface *mixer)
+{
+ char name[64];
+ unsigned int cmask, offset;
+ int out, chan, err;
+
+ const unsigned int id = 0x40;
+ const int val_type = USB_MIXER_S16;
+ const int control = 1;
+
+ for (chan = 0; chan < 10; chan++) {
+ for (out = 0; out < 6; out++) {
+ if (chan < 6) {
+ snprintf(name, sizeof(name),
+ "PCM%d-Out%d Playback Volume",
+ chan + 1, out + 1);
+ } else {
+ snprintf(name, sizeof(name),
+ "In%d-Out%d Playback Volume",
+ chan - 5, out + 1);
+ }
+
+ cmask = (out == 0) ? 0 : 1 << (out - 1);
+ offset = chan * 6;
+ err = snd_create_std_mono_ctl_offset(mixer, id, control,
+ cmask, val_type, offset, name,
+ &snd_usb_mixer_vol_tlv);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* This control needs a volume quirk, see mixer.c */
+static int snd_c400_create_effect_volume_ctl(struct usb_mixer_interface *mixer)
+{
+ static const char name[] = "Effect Volume";
+ const unsigned int id = 0x43;
+ const int val_type = USB_MIXER_U8;
+ const unsigned int control = 3;
+ const unsigned int cmask = 0;
+
+ return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type,
+ name, snd_usb_mixer_vol_tlv);
+}
+
+/* This control needs a volume quirk, see mixer.c */
+static int snd_c400_create_effect_duration_ctl(struct usb_mixer_interface *mixer)
+{
+ static const char name[] = "Effect Duration";
+ const unsigned int id = 0x43;
+ const int val_type = USB_MIXER_S16;
+ const unsigned int control = 4;
+ const unsigned int cmask = 0;
+
+ return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type,
+ name, snd_usb_mixer_vol_tlv);
+}
+
+/* This control needs a volume quirk, see mixer.c */
+static int snd_c400_create_effect_feedback_ctl(struct usb_mixer_interface *mixer)
+{
+ static const char name[] = "Effect Feedback Volume";
+ const unsigned int id = 0x43;
+ const int val_type = USB_MIXER_U8;
+ const unsigned int control = 5;
+ const unsigned int cmask = 0;
+
+ return snd_create_std_mono_ctl(mixer, id, control, cmask, val_type,
+ name, NULL);
+}
+
+static int snd_c400_create_effect_vol_ctls(struct usb_mixer_interface *mixer)
+{
+ char name[64];
+ unsigned int cmask;
+ int chan, err;
+
+ const unsigned int id = 0x42;
+ const int val_type = USB_MIXER_S16;
+ const int control = 1;
+
+ for (chan = 0; chan < 10; chan++) {
+ if (chan < 6) {
+ snprintf(name, sizeof(name),
+ "Effect Send DOut%d",
+ chan + 1);
+ } else {
+ snprintf(name, sizeof(name),
+ "Effect Send AIn%d",
+ chan - 5);
+ }
+
+ cmask = (chan == 0) ? 0 : 1 << (chan - 1);
+ err = snd_create_std_mono_ctl(mixer, id, control,
+ cmask, val_type, name,
+ &snd_usb_mixer_vol_tlv);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int snd_c400_create_effect_ret_vol_ctls(struct usb_mixer_interface *mixer)
+{
+ char name[64];
+ unsigned int cmask;
+ int chan, err;
+
+ const unsigned int id = 0x40;
+ const int val_type = USB_MIXER_S16;
+ const int control = 1;
+ const int chan_id[6] = { 0, 7, 2, 9, 4, 0xb };
+ const unsigned int offset = 0x3c;
+ /* { 0x3c, 0x43, 0x3e, 0x45, 0x40, 0x47 } */
+
+ for (chan = 0; chan < 6; chan++) {
+ snprintf(name, sizeof(name),
+ "Effect Return %d",
+ chan + 1);
+
+ cmask = (chan_id[chan] == 0) ? 0 : 1 << (chan_id[chan] - 1);
+ err = snd_create_std_mono_ctl_offset(mixer, id, control,
+ cmask, val_type, offset, name,
+ &snd_usb_mixer_vol_tlv);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int snd_c400_create_mixer(struct usb_mixer_interface *mixer)
+{
+ int err;
+
+ err = snd_c400_create_vol_ctls(mixer);
+ if (err < 0)
+ return err;
+
+ err = snd_c400_create_effect_vol_ctls(mixer);
+ if (err < 0)
+ return err;
+
+ err = snd_c400_create_effect_ret_vol_ctls(mixer);
+ if (err < 0)
+ return err;
+
+ err = snd_ftu_create_effect_switch(mixer, 2, 0x43);
+ if (err < 0)
+ return err;
+
+ err = snd_c400_create_effect_volume_ctl(mixer);
+ if (err < 0)
+ return err;
+
+ err = snd_c400_create_effect_duration_ctl(mixer);
+ if (err < 0)
+ return err;
+
+ err = snd_c400_create_effect_feedback_ctl(mixer);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
/*
* The mixer units for Ebox-44 are corrupt, and even where they
* are valid they presents mono controls as L and R channels of
@@ -1102,13 +1298,18 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
snd_audigy2nx_proc_read);
break;
+ case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ err = snd_c400_create_mixer(mixer);
+ break;
+
case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
err = snd_ftu_create_mixer(mixer);
break;
- case USB_ID(0x0b05, 0x1739):
- case USB_ID(0x0b05, 0x1743):
+ case USB_ID(0x0b05, 0x1739): /* ASUS Xonar U1 */
+ case USB_ID(0x0b05, 0x1743): /* ASUS Xonar U1 (2) */
+ case USB_ID(0x0b05, 0x17a0): /* ASUS Xonar U3 */
err = snd_xonar_u1_controls_create(mixer);
break;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index ef6fa24fc47..c6593101c04 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -46,6 +46,9 @@ snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
int frame_diff;
int est_delay;
+ if (!subs->last_delay)
+ return 0; /* short path */
+
current_frame_number = usb_get_current_frame_number(subs->dev);
/*
* HCD implementations use different widths, use lower 8 bits.
@@ -75,7 +78,8 @@ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream
return SNDRV_PCM_POS_XRUN;
spin_lock(&subs->lock);
hwptr_done = subs->hwptr_done;
- substream->runtime->delay = snd_usb_pcm_delay(subs,
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ substream->runtime->delay = snd_usb_pcm_delay(subs,
substream->runtime->rate);
spin_unlock(&subs->lock);
return hwptr_done / (substream->runtime->frame_bits >> 3);
@@ -173,11 +177,8 @@ static int init_pitch_v2(struct snd_usb_audio *chip, int iface,
{
struct usb_device *dev = chip->dev;
unsigned char data[1];
- unsigned int ep;
int err;
- ep = get_endpoint(alts, 0)->bEndpointAddress;
-
data[0] = 1;
if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT,
@@ -214,7 +215,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
}
}
-static int start_endpoints(struct snd_usb_substream *subs, int can_sleep)
+static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
{
int err;
@@ -266,16 +267,18 @@ static int start_endpoints(struct snd_usb_substream *subs, int can_sleep)
return 0;
}
-static void stop_endpoints(struct snd_usb_substream *subs,
- int force, int can_sleep, int wait)
+static void stop_endpoints(struct snd_usb_substream *subs, bool wait)
{
if (test_and_clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags))
- snd_usb_endpoint_stop(subs->sync_endpoint,
- force, can_sleep, wait);
+ snd_usb_endpoint_stop(subs->sync_endpoint);
if (test_and_clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags))
- snd_usb_endpoint_stop(subs->data_endpoint,
- force, can_sleep, wait);
+ snd_usb_endpoint_stop(subs->data_endpoint);
+
+ if (wait) {
+ snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint);
+ snd_usb_endpoint_sync_pending_stop(subs->data_endpoint);
+ }
}
static int deactivate_endpoints(struct snd_usb_substream *subs)
@@ -359,6 +362,19 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
switch (subs->stream->chip->usb_id) {
+ case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ if (is_playback) {
+ implicit_fb = 1;
+ ep = 0x81;
+ iface = usb_ifnum_to_if(dev, 3);
+
+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+ alts = &iface->altsetting[1];
+ goto add_sync_ep;
+ }
+ break;
case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */
case USB_ID(0x0763, 0x2081):
if (is_playback) {
@@ -381,7 +397,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
/* ... and check descriptor size before accessing bSynchAddress
because there is a version of the SB Audigy 2 NX firmware lacking
the audio fields in the endpoint descriptors */
- if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 0x01 ||
+ if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC ||
(get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
get_endpoint(alts, 1)->bSynchAddress != 0 &&
!implicit_fb)) {
@@ -438,6 +454,103 @@ add_sync_ep:
}
/*
+ * Return the score of matching two audioformats.
+ * Veto the audioformat if:
+ * - It has no channels for some reason.
+ * - Requested PCM format is not supported.
+ * - Requested sample rate is not supported.
+ */
+static int match_endpoint_audioformats(struct audioformat *fp,
+ struct audioformat *match, int rate,
+ snd_pcm_format_t pcm_format)
+{
+ int i;
+ int score = 0;
+
+ if (fp->channels < 1) {
+ snd_printdd("%s: (fmt @%p) no channels\n", __func__, fp);
+ return 0;
+ }
+
+ if (!(fp->formats & (1ULL << pcm_format))) {
+ snd_printdd("%s: (fmt @%p) no match for format %d\n", __func__,
+ fp, pcm_format);
+ return 0;
+ }
+
+ for (i = 0; i < fp->nr_rates; i++) {
+ if (fp->rate_table[i] == rate) {
+ score++;
+ break;
+ }
+ }
+ if (!score) {
+ snd_printdd("%s: (fmt @%p) no match for rate %d\n", __func__,
+ fp, rate);
+ return 0;
+ }
+
+ if (fp->channels == match->channels)
+ score++;
+
+ snd_printdd("%s: (fmt @%p) score %d\n", __func__, fp, score);
+
+ return score;
+}
+
+/*
+ * Configure the sync ep using the rate and pcm format of the data ep.
+ */
+static int configure_sync_endpoint(struct snd_usb_substream *subs)
+{
+ int ret;
+ struct audioformat *fp;
+ struct audioformat *sync_fp = NULL;
+ int cur_score = 0;
+ int sync_period_bytes = subs->period_bytes;
+ struct snd_usb_substream *sync_subs =
+ &subs->stream->substream[subs->direction ^ 1];
+
+ /* Try to find the best matching audioformat. */
+ list_for_each_entry(fp, &sync_subs->fmt_list, list) {
+ int score = match_endpoint_audioformats(fp, subs->cur_audiofmt,
+ subs->cur_rate, subs->pcm_format);
+
+ if (score > cur_score) {
+ sync_fp = fp;
+ cur_score = score;
+ }
+ }
+
+ if (unlikely(sync_fp == NULL)) {
+ snd_printk(KERN_ERR "%s: no valid audioformat for sync ep %x found\n",
+ __func__, sync_subs->ep_num);
+ return -EINVAL;
+ }
+
+ /*
+ * Recalculate the period bytes if channel number differ between
+ * data and sync ep audioformat.
+ */
+ if (sync_fp->channels != subs->channels) {
+ sync_period_bytes = (subs->period_bytes / subs->channels) *
+ sync_fp->channels;
+ snd_printdd("%s: adjusted sync ep period bytes (%d -> %d)\n",
+ __func__, subs->period_bytes, sync_period_bytes);
+ }
+
+ ret = snd_usb_endpoint_set_params(subs->sync_endpoint,
+ subs->pcm_format,
+ sync_fp->channels,
+ sync_period_bytes,
+ subs->cur_rate,
+ sync_fp,
+ NULL);
+
+ return ret;
+}
+
+/*
* configure endpoint params
*
* called during initial setup and upon resume
@@ -447,7 +560,7 @@ static int configure_endpoint(struct snd_usb_substream *subs)
int ret;
/* format changed */
- stop_endpoints(subs, 0, 0, 0);
+ stop_endpoints(subs, true);
ret = snd_usb_endpoint_set_params(subs->data_endpoint,
subs->pcm_format,
subs->channels,
@@ -459,13 +572,8 @@ static int configure_endpoint(struct snd_usb_substream *subs)
return ret;
if (subs->sync_endpoint)
- ret = snd_usb_endpoint_set_params(subs->sync_endpoint,
- subs->pcm_format,
- subs->channels,
- subs->period_bytes,
- subs->cur_rate,
- subs->cur_audiofmt,
- NULL);
+ ret = configure_sync_endpoint(subs);
+
return ret;
}
@@ -533,7 +641,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
subs->period_bytes = 0;
down_read(&subs->stream->chip->shutdown_rwsem);
if (!subs->stream->chip->shutdown) {
- stop_endpoints(subs, 0, 1, 1);
+ stop_endpoints(subs, true);
deactivate_endpoints(subs);
}
up_read(&subs->stream->chip->shutdown_rwsem);
@@ -608,7 +716,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
/* for playback, submit the URBs now; otherwise, the first hwptr_done
* updates for all URBs would happen at the same time when starting */
if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
- ret = start_endpoints(subs, 1);
+ ret = start_endpoints(subs, true);
unlock:
up_read(&subs->stream->chip->shutdown_rwsem);
@@ -1013,7 +1121,7 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
struct snd_usb_substream *subs = &as->substream[direction];
- stop_endpoints(subs, 0, 0, 0);
+ stop_endpoints(subs, true);
if (!as->chip->shutdown && subs->interface >= 0) {
usb_set_interface(subs->dev, subs->interface, 0);
@@ -1195,6 +1303,9 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
return;
spin_lock_irqsave(&subs->lock, flags);
+ if (!subs->last_delay)
+ goto out; /* short path */
+
est_delay = snd_usb_pcm_delay(subs, runtime->rate);
/* update delay with exact number of samples played */
if (processed > subs->last_delay)
@@ -1212,6 +1323,15 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
est_delay, subs->last_delay);
+ if (!subs->running) {
+ /* update last_frame_number for delay counting here since
+ * prepare_playback_urb won't be called during pause
+ */
+ subs->last_frame_number =
+ usb_get_current_frame_number(subs->dev) & 0xff;
+ }
+
+ out:
spin_unlock_irqrestore(&subs->lock, flags);
}
@@ -1248,12 +1368,13 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
subs->running = 1;
return 0;
case SNDRV_PCM_TRIGGER_STOP:
- stop_endpoints(subs, 0, 0, 0);
+ stop_endpoints(subs, false);
subs->running = 0;
return 0;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
subs->data_endpoint->prepare_data_urb = NULL;
- subs->data_endpoint->retire_data_urb = NULL;
+ /* keep retire_data_urb for delay calculation */
+ subs->data_endpoint->retire_data_urb = retire_playback_urb;
subs->running = 0;
return 0;
}
@@ -1269,7 +1390,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- err = start_endpoints(subs, 0);
+ err = start_endpoints(subs, false);
if (err < 0)
return err;
@@ -1277,7 +1398,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
subs->running = 1;
return 0;
case SNDRV_PCM_TRIGGER_STOP:
- stop_endpoints(subs, 0, 0, 0);
+ stop_endpoints(subs, false);
subs->running = 0;
return 0;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 88d8cebbb24..cdcf6b45e8a 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -99,6 +99,42 @@
},
/*
+ * HP Wireless Audio
+ * When not ignored, causes instability issues for some users, forcing them to
+ * blacklist the entire module.
+ */
+{
+ USB_DEVICE(0x0424, 0xb832),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Standard Microsystems Corp.",
+ .product_name = "HP Wireless Audio",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ /* Mixer */
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Playback */
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Capture */
+ {
+ .ifnum = 2,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* HID Device, .ifnum = 3 */
+ {
+ .ifnum = -1,
+ }
+ }
+ }
+},
+
+/*
* Logitech QuickCam: bDeviceClass is vendor-specific, so generic interface
* class matches do not take effect without an explicit ID match.
*/
@@ -1457,6 +1493,40 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
{
+ /* Advanced mode of the Roland VG-99, with MIDI and 24-bit PCM at 44.1
+ * kHz. In standard mode, the device has ID 0582:00b3, and offers
+ * 16-bit PCM at 44.1 kHz with no MIDI.
+ */
+ USB_DEVICE(0x0582, 0x00b2),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Roland",
+ .product_name = "VG-99",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+{
/* Roland SonicCell */
USB_DEVICE(0x0582, 0x00c2),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
@@ -2163,6 +2233,77 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
{
+ USB_DEVICE_VENDOR_SPEC(0x0763, 0x2030),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "M-Audio", */
+ /* .product_name = "Fast Track C400", */
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ /* Playback */
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 6,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x01,
+ .ep_attr = 0x09,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000,
+ .rate_min = 44100,
+ .rate_max = 96000,
+ .nr_rates = 4,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000, 88200, 96000
+ },
+ .clock = 0x81,
+ }
+ },
+ /* Capture */
+ {
+ .ifnum = 3,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 3,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x81,
+ .ep_attr = 0x05,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000,
+ .rate_min = 44100,
+ .rate_max = 96000,
+ .nr_rates = 4,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000, 88200, 96000
+ },
+ .clock = 0x81,
+ }
+ },
+ /* MIDI */
+ {
+ .ifnum = -1 /* Interface = 4 */
+ }
+ }
+ }
+},
+{
USB_DEVICE_VENDOR_SPEC(0x0763, 0x2080),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
/* .vendor_name = "M-Audio", */
@@ -2780,6 +2921,93 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+
+/* DIGIDESIGN MBOX 2 */
+{
+ USB_DEVICE(0x0dba, 0x3000),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Digidesign",
+ .product_name = "Mbox 2",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 2,
+ .altset_idx = 1,
+ .attributes = 0x00,
+ .endpoint = 0x03,
+ .ep_attr = USB_ENDPOINT_SYNC_ASYNC,
+ .maxpacksize = 0x128,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ {
+ .ifnum = 3,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 4,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3BE,
+ .channels = 2,
+ .iface = 4,
+ .altsetting = 2,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x85,
+ .ep_attr = USB_ENDPOINT_SYNC_SYNC,
+ .maxpacksize = 0x128,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ {
+ .ifnum = 5,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 6,
+ .type = QUIRK_MIDI_MBOX2,
+ .data = &(const struct snd_usb_midi_endpoint_info) {
+ .out_ep = 0x02,
+ .out_cables = 0x0001,
+ .in_ep = 0x81,
+ .in_interval = 0x01,
+ .in_cables = 0x0001
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
{
/* Tascam US122 MKII - playback-only support */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
@@ -2880,6 +3108,99 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+/* Reloop Play */
+{
+ USB_DEVICE(0x200c, 0x100b),
+ .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 4,
+ .iface = 1,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_SYNC_ADAPTIVE,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+ .rate_max = 48000,
+ .nr_rates = 2,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000
+ }
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
+{
+ /*
+ * Focusrite Scarlett 18i6
+ *
+ * Avoid mixer creation, which otherwise fails because some of
+ * the interface descriptor subtypes for interface 0 are
+ * unknown. That should be fixed or worked-around but this at
+ * least allows the device to be used successfully with a DAW
+ * and an external mixer. See comments below about other
+ * ignored interfaces.
+ */
+ USB_DEVICE(0x1235, 0x8004),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Focusrite",
+ .product_name = "Scarlett 18i6",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = & (const struct snd_usb_audio_quirk[]) {
+ {
+ /* InterfaceSubClass 1 (Control Device) */
+ .ifnum = 0,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ /* InterfaceSubClass 1 (Control Device) */
+ .ifnum = 3,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = 4,
+ .type = QUIRK_MIDI_STANDARD_INTERFACE
+ },
+ {
+ /* InterfaceSubClass 1 (Device Firmware Update) */
+ .ifnum = 5,
+ .type = QUIRK_IGNORE_INTERFACE
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
{
/*
* Some USB MIDI devices don't have an audio control interface,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 0f58b4b6d70..f104c68fe1e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -306,6 +306,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
[QUIRK_MIDI_YAMAHA] = create_any_midi_quirk,
[QUIRK_MIDI_MIDIMAN] = create_any_midi_quirk,
[QUIRK_MIDI_NOVATION] = create_any_midi_quirk,
+ [QUIRK_MIDI_MBOX2] = create_any_midi_quirk,
[QUIRK_MIDI_RAW_BYTES] = create_any_midi_quirk,
[QUIRK_MIDI_EMAGIC] = create_any_midi_quirk,
[QUIRK_MIDI_CME] = create_any_midi_quirk,
@@ -497,6 +498,92 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
return -EAGAIN;
}
+static void mbox2_setup_48_24_magic(struct usb_device *dev)
+{
+ u8 srate[3];
+ u8 temp[12];
+
+ /* Choose 48000Hz permanently */
+ srate[0] = 0x80;
+ srate[1] = 0xbb;
+ srate[2] = 0x00;
+
+ /* Send the magic! */
+ snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x01, 0x22, 0x0100, 0x0085, &temp, 0x0003);
+ snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x81, 0xa2, 0x0100, 0x0085, &srate, 0x0003);
+ snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x81, 0xa2, 0x0100, 0x0086, &srate, 0x0003);
+ snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x81, 0xa2, 0x0100, 0x0003, &srate, 0x0003);
+ return;
+}
+
+/* Digidesign Mbox 2 needs to load firmware onboard
+ * and driver must wait a few seconds for initialisation.
+ */
+
+#define MBOX2_FIRMWARE_SIZE 646
+#define MBOX2_BOOT_LOADING 0x01 /* Hard coded into the device */
+#define MBOX2_BOOT_READY 0x02 /* Hard coded into the device */
+
+int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
+{
+ struct usb_host_config *config = dev->actconfig;
+ int err;
+ u8 bootresponse;
+ int fwsize;
+ int count;
+
+ fwsize = le16_to_cpu(get_cfg_desc(config)->wTotalLength);
+
+ if (fwsize != MBOX2_FIRMWARE_SIZE) {
+ snd_printk(KERN_ERR "usb-audio: Invalid firmware size=%d.\n", fwsize);
+ return -ENODEV;
+ }
+
+ snd_printd("usb-audio: Sending Digidesign Mbox 2 boot sequence...\n");
+
+ count = 0;
+ bootresponse = MBOX2_BOOT_LOADING;
+ while ((bootresponse == MBOX2_BOOT_LOADING) && (count < 10)) {
+ msleep(500); /* 0.5 second delay */
+ snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ /* Control magic - load onboard firmware */
+ 0x85, 0xc0, 0x0001, 0x0000, &bootresponse, 0x0012);
+ if (bootresponse == MBOX2_BOOT_READY)
+ break;
+ snd_printd("usb-audio: device not ready, resending boot sequence...\n");
+ count++;
+ }
+
+ if (bootresponse != MBOX2_BOOT_READY) {
+ snd_printk(KERN_ERR "usb-audio: Unknown bootresponse=%d, or timed out, ignoring device.\n", bootresponse);
+ return -ENODEV;
+ }
+
+ snd_printdd("usb-audio: device initialised!\n");
+
+ err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
+ &dev->descriptor, sizeof(dev->descriptor));
+ config = dev->actconfig;
+ if (err < 0)
+ snd_printd("error usb_get_descriptor: %d\n", err);
+
+ err = usb_reset_configuration(dev);
+ if (err < 0)
+ snd_printd("error usb_reset_configuration: %d\n", err);
+ snd_printdd("mbox2_boot: new boot length = %d\n",
+ le16_to_cpu(get_cfg_desc(config)->wTotalLength));
+
+ mbox2_setup_48_24_magic(dev);
+
+ snd_printk(KERN_INFO "usb-audio: Digidesign Mbox 2: 24bit 48kHz");
+
+ return 0; /* Successful boot */
+}
+
/*
* Setup quirks
*/
@@ -655,6 +742,10 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
case USB_ID(0x0ccd, 0x00b1): /* Terratec Aureon 7.1 USB */
return snd_usb_cm6206_boot_quirk(dev);
+ case USB_ID(0x0dba, 0x3000):
+ /* Digidesign Mbox 2 */
+ return snd_usb_mbox2_boot_quirk(dev);
+
case USB_ID(0x133e, 0x0815):
/* Access Music VirusTI Desktop */
return snd_usb_accessmusic_boot_quirk(dev);
@@ -675,7 +766,7 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
*/
int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat *fp)
{
- /* it depends on altsetting wether the device is big-endian or not */
+ /* it depends on altsetting whether the device is big-endian or not */
switch (chip->usb_id) {
case USB_ID(0x0763, 0x2001): /* M-Audio Quattro: captured data only */
if (fp->altsetting == 2 || fp->altsetting == 3 ||
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 1de0c8c002a..ad181d538bd 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -23,6 +23,8 @@
#include <sound/core.h>
#include <sound/pcm.h>
+#include <sound/control.h>
+#include <sound/tlv.h>
#include "usbaudio.h"
#include "card.h"
@@ -47,6 +49,7 @@ static void free_substream(struct snd_usb_substream *subs)
list_for_each_safe(p, n, &subs->fmt_list) {
struct audioformat *fp = list_entry(p, struct audioformat, list);
kfree(fp->rate_table);
+ kfree(fp->chmap);
kfree(fp);
}
kfree(subs->rate_list.list);
@@ -99,6 +102,206 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
subs->num_formats++;
subs->fmt_type = fp->fmt_type;
subs->ep_num = fp->endpoint;
+ if (fp->channels > subs->channels_max)
+ subs->channels_max = fp->channels;
+}
+
+/* kctl callbacks for usb-audio channel maps */
+static int usb_chmap_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_substream *subs = info->private_data;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = subs->channels_max;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = SNDRV_CHMAP_LAST;
+ return 0;
+}
+
+/* check whether a duplicated entry exists in the audiofmt list */
+static bool have_dup_chmap(struct snd_usb_substream *subs,
+ struct audioformat *fp)
+{
+ struct list_head *p;
+
+ for (p = fp->list.prev; p != &subs->fmt_list; p = p->prev) {
+ struct audioformat *prev;
+ prev = list_entry(p, struct audioformat, list);
+ if (prev->chmap &&
+ !memcmp(prev->chmap, fp->chmap, sizeof(*fp->chmap)))
+ return true;
+ }
+ return false;
+}
+
+static int usb_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ unsigned int size, unsigned int __user *tlv)
+{
+ struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_substream *subs = info->private_data;
+ struct audioformat *fp;
+ unsigned int __user *dst;
+ int count = 0;
+
+ if (size < 8)
+ return -ENOMEM;
+ if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
+ return -EFAULT;
+ size -= 8;
+ dst = tlv + 2;
+ list_for_each_entry(fp, &subs->fmt_list, list) {
+ int i, ch_bytes;
+
+ if (!fp->chmap)
+ continue;
+ if (have_dup_chmap(subs, fp))
+ continue;
+ /* copy the entry */
+ ch_bytes = fp->chmap->channels * 4;
+ if (size < 8 + ch_bytes)
+ return -ENOMEM;
+ if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
+ put_user(ch_bytes, dst + 1))
+ return -EFAULT;
+ dst += 2;
+ for (i = 0; i < fp->chmap->channels; i++, dst++) {
+ if (put_user(fp->chmap->map[i], dst))
+ return -EFAULT;
+ }
+
+ count += 8 + ch_bytes;
+ size -= 8 + ch_bytes;
+ }
+ if (put_user(count, tlv + 1))
+ return -EFAULT;
+ return 0;
+}
+
+static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_substream *subs = info->private_data;
+ struct snd_pcm_chmap_elem *chmap = NULL;
+ int i;
+
+ memset(ucontrol->value.integer.value, 0,
+ sizeof(ucontrol->value.integer.value));
+ if (subs->cur_audiofmt)
+ chmap = subs->cur_audiofmt->chmap;
+ if (chmap) {
+ for (i = 0; i < chmap->channels; i++)
+ ucontrol->value.integer.value[i] = chmap->map[i];
+ }
+ return 0;
+}
+
+/* create a chmap kctl assigned to the given USB substream */
+static int add_chmap(struct snd_pcm *pcm, int stream,
+ struct snd_usb_substream *subs)
+{
+ struct audioformat *fp;
+ struct snd_pcm_chmap *chmap;
+ struct snd_kcontrol *kctl;
+ int err;
+
+ list_for_each_entry(fp, &subs->fmt_list, list)
+ if (fp->chmap)
+ goto ok;
+ /* no chmap is found */
+ return 0;
+
+ ok:
+ err = snd_pcm_add_chmap_ctls(pcm, stream, NULL, 0, 0, &chmap);
+ if (err < 0)
+ return err;
+
+ /* override handlers */
+ chmap->private_data = subs;
+ kctl = chmap->kctl;
+ kctl->info = usb_chmap_ctl_info;
+ kctl->get = usb_chmap_ctl_get;
+ kctl->tlv.c = usb_chmap_ctl_tlv;
+
+ return 0;
+}
+
+/* convert from USB ChannelConfig bits to ALSA chmap element */
+static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ int protocol)
+{
+ static unsigned int uac1_maps[] = {
+ SNDRV_CHMAP_FL, /* left front */
+ SNDRV_CHMAP_FR, /* right front */
+ SNDRV_CHMAP_FC, /* center front */
+ SNDRV_CHMAP_LFE, /* LFE */
+ SNDRV_CHMAP_SL, /* left surround */
+ SNDRV_CHMAP_SR, /* right surround */
+ SNDRV_CHMAP_FLC, /* left of center */
+ SNDRV_CHMAP_FRC, /* right of center */
+ SNDRV_CHMAP_RC, /* surround */
+ SNDRV_CHMAP_SL, /* side left */
+ SNDRV_CHMAP_SR, /* side right */
+ SNDRV_CHMAP_TC, /* top */
+ 0 /* terminator */
+ };
+ static unsigned int uac2_maps[] = {
+ SNDRV_CHMAP_FL, /* front left */
+ SNDRV_CHMAP_FR, /* front right */
+ SNDRV_CHMAP_FC, /* front center */
+ SNDRV_CHMAP_LFE, /* LFE */
+ SNDRV_CHMAP_RL, /* back left */
+ SNDRV_CHMAP_RR, /* back right */
+ SNDRV_CHMAP_FLC, /* front left of center */
+ SNDRV_CHMAP_FRC, /* front right of center */
+ SNDRV_CHMAP_RC, /* back center */
+ SNDRV_CHMAP_SL, /* side left */
+ SNDRV_CHMAP_SR, /* side right */
+ SNDRV_CHMAP_TC, /* top center */
+ SNDRV_CHMAP_TFL, /* top front left */
+ SNDRV_CHMAP_TFC, /* top front center */
+ SNDRV_CHMAP_TFR, /* top front right */
+ SNDRV_CHMAP_TRL, /* top back left */
+ SNDRV_CHMAP_TRC, /* top back center */
+ SNDRV_CHMAP_TRR, /* top back right */
+ SNDRV_CHMAP_TFLC, /* top front left of center */
+ SNDRV_CHMAP_TFRC, /* top front right of center */
+ SNDRV_CHMAP_LLFE, /* left LFE */
+ SNDRV_CHMAP_RLFE, /* right LFE */
+ SNDRV_CHMAP_TSL, /* top side left */
+ SNDRV_CHMAP_TSR, /* top side right */
+ SNDRV_CHMAP_BC, /* bottom center */
+ SNDRV_CHMAP_BLC, /* bottom left center */
+ SNDRV_CHMAP_BRC, /* bottom right center */
+ 0 /* terminator */
+ };
+ struct snd_pcm_chmap_elem *chmap;
+ const unsigned int *maps;
+ int c;
+
+ if (!bits)
+ return NULL;
+ if (channels > ARRAY_SIZE(chmap->map))
+ return NULL;
+
+ chmap = kzalloc(sizeof(*chmap), GFP_KERNEL);
+ if (!chmap)
+ return NULL;
+
+ maps = protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
+ chmap->channels = channels;
+ c = 0;
+ for (; bits && *maps; maps++, bits >>= 1) {
+ if (bits & 1)
+ chmap->map[c++] = *maps;
+ }
+
+ for (; c < channels; c++)
+ chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+
+ return chmap;
}
/*
@@ -140,7 +343,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
if (err < 0)
return err;
snd_usb_init_substream(as, stream, fp);
- return 0;
+ return add_chmap(as->pcm, stream, subs);
}
/* create a new pcm */
@@ -174,7 +377,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
snd_usb_proc_pcm_format_add(as);
- return 0;
+ return add_chmap(pcm, stream, &as->substream[stream]);
}
static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
@@ -218,8 +421,11 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
return attributes;
}
-static struct uac2_input_terminal_descriptor *
- snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
+/* find an input terminal descriptor (either UAC1 or UAC2) with the given
+ * terminal id
+ */
+static void *
+snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
int terminal_id)
{
struct uac2_input_terminal_descriptor *term = NULL;
@@ -261,6 +467,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
struct audioformat *fp = NULL;
int num, protocol, clock = 0;
struct uac_format_type_i_continuous_descriptor *fmt;
+ unsigned int chconfig;
dev = chip->dev;
@@ -300,6 +507,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
if (snd_usb_apply_interface_quirk(chip, iface_no, altno))
continue;
+ chconfig = 0;
/* get audio formats */
switch (protocol) {
default:
@@ -311,6 +519,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
case UAC_VERSION_1: {
struct uac1_as_header_descriptor *as =
snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
+ struct uac_input_terminal_descriptor *iterm;
if (!as) {
snd_printk(KERN_ERR "%d:%u:%d : UAC_AS_GENERAL descriptor not found\n",
@@ -325,6 +534,14 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
}
format = le16_to_cpu(as->wFormatTag); /* remember the format value */
+
+ iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
+ as->bTerminalLink);
+ if (iterm) {
+ num_channels = iterm->bNrChannels;
+ chconfig = le16_to_cpu(iterm->wChannelConfig);
+ }
+
break;
}
@@ -355,6 +572,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
as->bTerminalLink);
if (input_term) {
clock = input_term->bCSourceID;
+ chconfig = le32_to_cpu(input_term->bmChannelConfig);
break;
}
@@ -413,13 +631,13 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
fp->datainterval = snd_usb_parse_datainterval(chip, alts);
fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
- /* num_channels is only set for v2 interfaces */
fp->channels = num_channels;
if (snd_usb_get_speed(dev) == USB_SPEED_HIGH)
fp->maxpacksize = (((fp->maxpacksize >> 11) & 3) + 1)
* (fp->maxpacksize & 0x7ff);
fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
fp->clock = clock;
+ fp->chmap = convert_chmap(num_channels, chconfig, protocol);
/* some quirks for attributes here */
@@ -455,6 +673,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
/* ok, let's parse further... */
if (snd_usb_parse_audio_format(chip, fp, format, fmt, stream, alts) < 0) {
kfree(fp->rate_table);
+ kfree(fp->chmap);
kfree(fp);
fp = NULL;
continue;
@@ -464,6 +683,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
err = snd_usb_add_audio_stream(chip, stream, fp);
if (err < 0) {
kfree(fp->rate_table);
+ kfree(fp->chmap);
kfree(fp);
return err;
}
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index ef42797f56f..a8172c11979 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -56,7 +56,6 @@ struct snd_usb_audio {
int setup; /* from the 'device_setup' module param */
int nrpacks; /* from the 'nrpacks' module param */
- int async_unlink; /* from the 'async_unlink' module param */
struct usb_host_interface *ctrl_intf; /* the audio control interface */
};
@@ -77,6 +76,7 @@ enum quirk_type {
QUIRK_MIDI_YAMAHA,
QUIRK_MIDI_MIDIMAN,
QUIRK_MIDI_NOVATION,
+ QUIRK_MIDI_MBOX2,
QUIRK_MIDI_RAW_BYTES,
QUIRK_MIDI_EMAGIC,
QUIRK_MIDI_CME,
diff --git a/tools/firewire/nosy-dump.c b/tools/firewire/nosy-dump.c
index f93b776370b..3179c711bd6 100644
--- a/tools/firewire/nosy-dump.c
+++ b/tools/firewire/nosy-dump.c
@@ -150,6 +150,8 @@ subaction_create(uint32_t *data, size_t length)
/* we put the ack in the subaction struct for easy access. */
sa = malloc(sizeof *sa - sizeof sa->packet + length);
+ if (!sa)
+ exit(EXIT_FAILURE);
sa->ack = data[length / 4 - 1];
sa->length = length;
memcpy(&sa->packet, data, length);
@@ -180,6 +182,8 @@ link_transaction_lookup(int request_node, int response_node, int tlabel)
}
t = malloc(sizeof *t);
+ if (!t)
+ exit(EXIT_FAILURE);
t->request_node = request_node;
t->response_node = response_node;
t->tlabel = tlabel;
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index fd2f9221b24..07a03452c22 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -179,29 +179,6 @@ static struct termios orig_term;
#define wmb() __asm__ __volatile__("" : : : "memory")
#define mb() __asm__ __volatile__("" : : : "memory")
-/*
- * Convert an iovec element to the given type.
- *
- * This is a fairly ugly trick: we need to know the size of the type and
- * alignment requirement to check the pointer is kosher. It's also nice to
- * have the name of the type in case we report failure.
- *
- * Typing those three things all the time is cumbersome and error prone, so we
- * have a macro which sets them all up and passes to the real function.
- */
-#define convert(iov, type) \
- ((type *)_convert((iov), sizeof(type), __alignof__(type), #type))
-
-static void *_convert(struct iovec *iov, size_t size, size_t align,
- const char *name)
-{
- if (iov->iov_len != size)
- errx(1, "Bad iovec size %zu for %s", iov->iov_len, name);
- if ((unsigned long)iov->iov_base % align != 0)
- errx(1, "Bad alignment %p for %s", iov->iov_base, name);
- return iov->iov_base;
-}
-
/* Wrapper for the last available index. Makes it easier to change. */
#define lg_last_avail(vq) ((vq)->last_avail_idx)
@@ -228,7 +205,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
}
/* Take len bytes from the front of this iovec. */
-static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len)
+static void iov_consume(struct iovec iov[], unsigned num_iov,
+ void *dest, unsigned len)
{
unsigned int i;
@@ -236,11 +214,16 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len)
unsigned int used;
used = iov[i].iov_len < len ? iov[i].iov_len : len;
+ if (dest) {
+ memcpy(dest, iov[i].iov_base, used);
+ dest += used;
+ }
iov[i].iov_base += used;
iov[i].iov_len -= used;
len -= used;
}
- assert(len == 0);
+ if (len != 0)
+ errx(1, "iovec too short!");
}
/* The device virtqueue descriptors are followed by feature bitmasks. */
@@ -864,7 +847,7 @@ static void console_output(struct virtqueue *vq)
warn("Write to stdout gave %i (%d)", len, errno);
break;
}
- iov_consume(iov, out, len);
+ iov_consume(iov, out, NULL, len);
}
/*
@@ -1591,9 +1574,9 @@ static void blk_request(struct virtqueue *vq)
{
struct vblk_info *vblk = vq->dev->priv;
unsigned int head, out_num, in_num, wlen;
- int ret;
+ int ret, i;
u8 *in;
- struct virtio_blk_outhdr *out;
+ struct virtio_blk_outhdr out;
struct iovec iov[vq->vring.num];
off64_t off;
@@ -1603,32 +1586,36 @@ static void blk_request(struct virtqueue *vq)
*/
head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
- /*
- * Every block request should contain at least one output buffer
- * (detailing the location on disk and the type of request) and one
- * input buffer (to hold the result).
- */
- if (out_num == 0 || in_num == 0)
- errx(1, "Bad virtblk cmd %u out=%u in=%u",
- head, out_num, in_num);
+ /* Copy the output header from the front of the iov (adjusts iov) */
+ iov_consume(iov, out_num, &out, sizeof(out));
+
+ /* Find and trim end of iov input array, for our status byte. */
+ in = NULL;
+ for (i = out_num + in_num - 1; i >= out_num; i--) {
+ if (iov[i].iov_len > 0) {
+ in = iov[i].iov_base + iov[i].iov_len - 1;
+ iov[i].iov_len--;
+ break;
+ }
+ }
+ if (!in)
+ errx(1, "Bad virtblk cmd with no room for status");
- out = convert(&iov[0], struct virtio_blk_outhdr);
- in = convert(&iov[out_num+in_num-1], u8);
/*
* For historical reasons, block operations are expressed in 512 byte
* "sectors".
*/
- off = out->sector * 512;
+ off = out.sector * 512;
/*
* In general the virtio block driver is allowed to try SCSI commands.
* It'd be nice if we supported eject, for example, but we don't.
*/
- if (out->type & VIRTIO_BLK_T_SCSI_CMD) {
+ if (out.type & VIRTIO_BLK_T_SCSI_CMD) {
fprintf(stderr, "Scsi commands unsupported\n");
*in = VIRTIO_BLK_S_UNSUPP;
wlen = sizeof(*in);
- } else if (out->type & VIRTIO_BLK_T_OUT) {
+ } else if (out.type & VIRTIO_BLK_T_OUT) {
/*
* Write
*
@@ -1636,10 +1623,10 @@ static void blk_request(struct virtqueue *vq)
* if they try to write past end.
*/
if (lseek64(vblk->fd, off, SEEK_SET) != off)
- err(1, "Bad seek to sector %llu", out->sector);
+ err(1, "Bad seek to sector %llu", out.sector);
- ret = writev(vblk->fd, iov+1, out_num-1);
- verbose("WRITE to sector %llu: %i\n", out->sector, ret);
+ ret = writev(vblk->fd, iov, out_num);
+ verbose("WRITE to sector %llu: %i\n", out.sector, ret);
/*
* Grr... Now we know how long the descriptor they sent was, we
@@ -1655,7 +1642,7 @@ static void blk_request(struct virtqueue *vq)
wlen = sizeof(*in);
*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
- } else if (out->type & VIRTIO_BLK_T_FLUSH) {
+ } else if (out.type & VIRTIO_BLK_T_FLUSH) {
/* Flush */
ret = fdatasync(vblk->fd);
verbose("FLUSH fdatasync: %i\n", ret);
@@ -1669,10 +1656,9 @@ static void blk_request(struct virtqueue *vq)
* if they try to read past end.
*/
if (lseek64(vblk->fd, off, SEEK_SET) != off)
- err(1, "Bad seek to sector %llu", out->sector);
+ err(1, "Bad seek to sector %llu", out.sector);
- ret = readv(vblk->fd, iov+1, in_num-1);
- verbose("READ from sector %llu: %i\n", out->sector, ret);
+ ret = readv(vblk->fd, iov + out_num, in_num);
if (ret >= 0) {
wlen = sizeof(*in) + ret;
*in = VIRTIO_BLK_S_OK;
@@ -1758,7 +1744,7 @@ static void rng_input(struct virtqueue *vq)
len = readv(rng_info->rfd, iov, in_num);
if (len <= 0)
err(1, "Read from /dev/random gave %i", len);
- iov_consume(iov, in_num, len);
+ iov_consume(iov, in_num, NULL, len);
totlen += len;
}
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index b38a1f9ad46..938e8904f64 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -175,7 +175,7 @@ following filters are defined:
+
The option requires at least one branch type among any, any_call, any_ret, ind_call.
-The privilege levels may be ommitted, in which case, the privilege levels of the associated
+The privilege levels may be omitted, in which case, the privilege levels of the associated
event are applied to the branch filter. Both kernel (k) and hypervisor (hv) privilege
levels are subject to permissions. When sampling on multiple events, branch stack sampling
is enabled for all the sampling events. The sampled branch type is the same for all events.
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index f8564955419..f09641da40d 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -1,9 +1,22 @@
+CC = $(CROSS_COMPILE)gcc
+BUILD_OUTPUT := $(PWD)
+PREFIX := /usr
+DESTDIR :=
+
turbostat : turbostat.c
CFLAGS += -Wall
+CFLAGS += -I../../../../arch/x86/include/uapi/
+
+%: %.c
+ @mkdir -p $(BUILD_OUTPUT)
+ $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@
+.PHONY : clean
clean :
- rm -f turbostat
+ @rm -f $(BUILD_OUTPUT)/turbostat
-install :
- install turbostat /usr/bin/turbostat
- install turbostat.8 /usr/share/man/man8
+install : turbostat
+ install -d $(DESTDIR)$(PREFIX)/bin
+ install $(BUILD_OUTPUT)/turbostat $(DESTDIR)$(PREFIX)/bin/turbostat
+ install -d $(DESTDIR)$(PREFIX)/share/man/man8
+ install turbostat.8 $(DESTDIR)$(PREFIX)/share/man/man8
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index e4d0690cccf..0d7dc2cfefb 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -11,16 +11,16 @@ turbostat \- Report processor frequency and idle statistics
.RB [ Options ]
.RB [ "\-i interval_sec" ]
.SH DESCRIPTION
-\fBturbostat \fP reports processor topology, frequency
-and idle power state statistics on modern X86 processors.
+\fBturbostat \fP reports processor topology, frequency,
+idle power-state statistics, temperature and power on modern X86 processors.
Either \fBcommand\fP is forked and statistics are printed
upon its completion, or statistics are printed periodically.
\fBturbostat \fP
-requires that the processor
+must be run on root, and
+minimally requires that the processor
supports an "invariant" TSC, plus the APERF and MPERF MSRs.
-\fBturbostat \fP will report idle cpu power state residency
-on processors that additionally support C-state residency counters.
+Additional information is reported depending on hardware counter support.
.SS Options
The \fB-p\fP option limits output to the 1st thread in 1st core of each package.
@@ -57,7 +57,15 @@ Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading T
\fBGHz\fP average clock rate while the CPU was in c0 state.
\fBTSC\fP average GHz that the TSC ran during the entire interval.
\fB%c1, %c3, %c6, %c7\fP show the percentage residency in hardware core idle states.
+\fBCTMP\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
+\fBPTMP\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
\fB%pc2, %pc3, %pc6, %pc7\fP percentage residency in hardware package idle states.
+\fBPkg_W\fP Watts consumed by the whole package.
+\fBCor_W\fP Watts consumed by the core part of the package.
+\fBGFX_W\fP Watts consumed by the Graphics part of the package -- available only on client processors.
+\fBRAM_W\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
+\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package.
+\fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
.fi
.PP
.SH EXAMPLE
@@ -66,50 +74,73 @@ Without any parameters, turbostat prints out counters ever 5 seconds.
for turbostat to fork).
The first row of statistics is a summary for the entire system.
-Note that the summary is a weighted average.
+For residency % columns, the summary is a weighted average.
+For Temperature columns, the summary is the column maximum.
+For Watts columns, the summary is a system total.
Subsequent rows show per-CPU statistics.
.nf
-[root@x980]# ./turbostat
-cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
- 0.09 1.62 3.38 1.83 0.32 97.76 1.26 83.61
- 0 0 0.15 1.62 3.38 10.23 0.05 89.56 1.26 83.61
- 0 6 0.05 1.62 3.38 10.34
- 1 2 0.03 1.62 3.38 0.07 0.05 99.86
- 1 8 0.03 1.62 3.38 0.06
- 2 4 0.21 1.62 3.38 0.10 1.49 98.21
- 2 10 0.02 1.62 3.38 0.29
- 8 1 0.04 1.62 3.38 0.04 0.08 99.84
- 8 7 0.01 1.62 3.38 0.06
- 9 3 0.53 1.62 3.38 0.10 0.20 99.17
- 9 9 0.02 1.62 3.38 0.60
- 10 5 0.01 1.62 3.38 0.02 0.04 99.92
- 10 11 0.02 1.62 3.38 0.02
+[root@sandy]# ./turbostat
+cor CPU %c0 GHz TSC %c1 %c3 %c6 %c7 CTMP PTMP %pc2 %pc3 %pc6 %pc7 Pkg_W Cor_W GFX_W
+ 0.06 0.80 2.29 0.11 0.00 0.00 99.83 47 40 0.26 0.01 0.44 98.78 3.49 0.12 0.14
+ 0 0 0.07 0.80 2.29 0.07 0.00 0.00 99.86 40 40 0.26 0.01 0.44 98.78 3.49 0.12 0.14
+ 0 4 0.03 0.80 2.29 0.12
+ 1 1 0.04 0.80 2.29 0.25 0.01 0.00 99.71 40
+ 1 5 0.16 0.80 2.29 0.13
+ 2 2 0.05 0.80 2.29 0.06 0.01 0.00 99.88 40
+ 2 6 0.03 0.80 2.29 0.08
+ 3 3 0.05 0.80 2.29 0.08 0.00 0.00 99.87 47
+ 3 7 0.04 0.84 2.29 0.09
.fi
.SH SUMMARY EXAMPLE
The "-s" option prints the column headers just once,
and then the one line system summary for each sample interval.
.nf
-[root@x980]# ./turbostat -s
- %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
- 0.23 1.67 3.38 2.00 0.30 97.47 1.07 82.12
- 0.10 1.62 3.38 1.87 2.25 95.77 12.02 72.60
- 0.20 1.64 3.38 1.98 0.11 97.72 0.30 83.36
- 0.11 1.70 3.38 1.86 1.81 96.22 9.71 74.90
+[root@wsm]# turbostat -S
+ %c0 GHz TSC %c1 %c3 %c6 CTMP %pc3 %pc6
+ 1.40 2.81 3.38 10.78 43.47 44.35 42 13.67 2.09
+ 1.34 2.90 3.38 11.48 58.96 28.23 41 19.89 0.15
+ 1.55 2.72 3.38 26.73 37.66 34.07 42 2.53 2.80
+ 1.37 2.83 3.38 16.95 60.05 21.63 42 5.76 0.20
.fi
.SH VERBOSE EXAMPLE
The "-v" option adds verbosity to the output:
.nf
-GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2)
-12 * 133 = 1600 MHz max efficiency
-25 * 133 = 3333 MHz TSC frequency
-26 * 133 = 3467 MHz max turbo 4 active cores
-26 * 133 = 3467 MHz max turbo 3 active cores
-27 * 133 = 3600 MHz max turbo 2 active cores
-27 * 133 = 3600 MHz max turbo 1 active cores
-
+[root@ivy]# turbostat -v
+turbostat v3.0 November 23, 2012 - Len Brown <lenb@kernel.org>
+CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3a:9 (6:58:9)
+CPUID(6): APERF, DTS, PTM, EPB
+RAPL: 851 sec. Joule Counter Range
+cpu0: MSR_NHM_PLATFORM_INFO: 0x81010f0012300
+16 * 100 = 1600 MHz max efficiency
+35 * 100 = 3500 MHz TSC frequency
+cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6-noret)
+cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727
+37 * 100 = 3700 MHz max turbo 4 active cores
+38 * 100 = 3800 MHz max turbo 3 active cores
+39 * 100 = 3900 MHz max turbo 2 active cores
+39 * 100 = 3900 MHz max turbo 1 active cores
+cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced)
+cpu0: MSR_RAPL_POWER_UNIT: 0x000a1003 (0.125000 Watts, 0.000015 Joules, 0.000977 sec.)
+cpu0: MSR_PKG_POWER_INFO: 0x01e00268 (77 W TDP, RAPL 60 - 0 W, 0.000000 sec.)
+cpu0: MSR_PKG_POWER_LIMIT: 0x830000148268 (UNlocked)
+cpu0: PKG Limit #1: ENabled (77.000000 Watts, 1.000000 sec, clamp DISabled)
+cpu0: PKG Limit #2: ENabled (96.000000 Watts, 0.000977* sec, clamp DISabled)
+cpu0: MSR_PP0_POLICY: 0
+cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked)
+cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
+cpu0: MSR_PP1_POLICY: 0
+cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked)
+cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
+cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00691400 (105 C)
+cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884e0000 (27 C)
+cpu0: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1)
+cpu1: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1)
+cpu2: MSR_IA32_THERM_STATUS: 0x88540000 (21 C +/- 1)
+cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1)
+ ...
.fi
The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
available at the minimum package voltage. The \fBTSC frequency\fP is the nominal
@@ -142,7 +173,7 @@ cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
10 5 1.42 3.43 3.38 2.14 30.99 65.44
10 11 0.16 2.88 3.38 3.40
.fi
-Above the cycle soaker drives cpu7 up its 3.6 Ghz turbo limit
+Above the cycle soaker drives cpu7 up its 3.6 GHz turbo limit
while the other processors are generally in various states of idle.
Note that cpu1 and cpu7 are HT siblings within core8.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index ea095abbe97..ce6d46038f7 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -20,6 +20,7 @@
*/
#define _GNU_SOURCE
+#include <asm/msr.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
@@ -35,28 +36,18 @@
#include <ctype.h>
#include <sched.h>
-#define MSR_NEHALEM_PLATFORM_INFO 0xCE
-#define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1AD
-#define MSR_IVT_TURBO_RATIO_LIMIT 0x1AE
-#define MSR_APERF 0xE8
-#define MSR_MPERF 0xE7
-#define MSR_PKG_C2_RESIDENCY 0x60D /* SNB only */
-#define MSR_PKG_C3_RESIDENCY 0x3F8
-#define MSR_PKG_C6_RESIDENCY 0x3F9
-#define MSR_PKG_C7_RESIDENCY 0x3FA /* SNB only */
-#define MSR_CORE_C3_RESIDENCY 0x3FC
-#define MSR_CORE_C6_RESIDENCY 0x3FD
-#define MSR_CORE_C7_RESIDENCY 0x3FE /* SNB only */
-
char *proc_stat = "/proc/stat";
unsigned int interval_sec = 5; /* set with -i interval_sec */
unsigned int verbose; /* set with -v */
+unsigned int rapl_verbose; /* set with -R */
+unsigned int thermal_verbose; /* set with -T */
unsigned int summary_only; /* set with -s */
unsigned int skip_c0;
unsigned int skip_c1;
unsigned int do_nhm_cstates;
unsigned int do_snb_cstates;
unsigned int has_aperf;
+unsigned int has_epb;
unsigned int units = 1000000000; /* Ghz etc */
unsigned int genuine_intel;
unsigned int has_invariant_tsc;
@@ -74,6 +65,23 @@ unsigned int show_cpu;
unsigned int show_pkg_only;
unsigned int show_core_only;
char *output_buffer, *outp;
+unsigned int do_rapl;
+unsigned int do_dts;
+unsigned int do_ptm;
+unsigned int tcc_activation_temp;
+unsigned int tcc_activation_temp_override;
+double rapl_power_units, rapl_energy_units, rapl_time_units;
+double rapl_joule_counter_range;
+
+#define RAPL_PKG (1 << 0)
+#define RAPL_CORES (1 << 1)
+#define RAPL_GFX (1 << 2)
+#define RAPL_DRAM (1 << 3)
+#define RAPL_PKG_PERF_STATUS (1 << 4)
+#define RAPL_DRAM_PERF_STATUS (1 << 5)
+#define TJMAX_DEFAULT 100
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
int aperf_mperf_unstable;
int backwards_count;
@@ -101,6 +109,7 @@ struct core_data {
unsigned long long c3;
unsigned long long c6;
unsigned long long c7;
+ unsigned int core_temp_c;
unsigned int core_id;
} *core_even, *core_odd;
@@ -110,6 +119,14 @@ struct pkg_data {
unsigned long long pc6;
unsigned long long pc7;
unsigned int package_id;
+ unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
+ unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
+ unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
+ unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
+ unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
+ unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
+ unsigned int pkg_temp_c;
+
} *package_even, *package_odd;
#define ODD_COUNTERS thread_odd, core_odd, package_odd
@@ -247,6 +264,12 @@ void print_header(void)
outp += sprintf(outp, " %%c6");
if (do_snb_cstates)
outp += sprintf(outp, " %%c7");
+
+ if (do_dts)
+ outp += sprintf(outp, " CTMP");
+ if (do_ptm)
+ outp += sprintf(outp, " PTMP");
+
if (do_snb_cstates)
outp += sprintf(outp, " %%pc2");
if (do_nhm_cstates)
@@ -256,6 +279,19 @@ void print_header(void)
if (do_snb_cstates)
outp += sprintf(outp, " %%pc7");
+ if (do_rapl & RAPL_PKG)
+ outp += sprintf(outp, " Pkg_W");
+ if (do_rapl & RAPL_CORES)
+ outp += sprintf(outp, " Cor_W");
+ if (do_rapl & RAPL_GFX)
+ outp += sprintf(outp, " GFX_W");
+ if (do_rapl & RAPL_DRAM)
+ outp += sprintf(outp, " RAM_W");
+ if (do_rapl & RAPL_PKG_PERF_STATUS)
+ outp += sprintf(outp, " PKG_%%");
+ if (do_rapl & RAPL_DRAM_PERF_STATUS)
+ outp += sprintf(outp, " RAM_%%");
+
outp += sprintf(outp, "\n");
}
@@ -285,6 +321,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
fprintf(stderr, "c3: %016llX\n", c->c3);
fprintf(stderr, "c6: %016llX\n", c->c6);
fprintf(stderr, "c7: %016llX\n", c->c7);
+ fprintf(stderr, "DTS: %dC\n", c->core_temp_c);
}
if (p) {
@@ -293,6 +330,13 @@ int dump_counters(struct thread_data *t, struct core_data *c,
fprintf(stderr, "pc3: %016llX\n", p->pc3);
fprintf(stderr, "pc6: %016llX\n", p->pc6);
fprintf(stderr, "pc7: %016llX\n", p->pc7);
+ fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
+ fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
+ fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
+ fprintf(stderr, "Joules RAM: %0X\n", p->energy_dram);
+ fprintf(stderr, "Throttle PKG: %0X\n", p->rapl_pkg_perf_status);
+ fprintf(stderr, "Throttle RAM: %0X\n", p->rapl_dram_perf_status);
+ fprintf(stderr, "PTM: %dC\n", p->pkg_temp_c);
}
return 0;
}
@@ -302,14 +346,21 @@ int dump_counters(struct thread_data *t, struct core_data *c,
* package: "pk" 2 columns %2d
* core: "cor" 3 columns %3d
* CPU: "CPU" 3 columns %3d
+ * Pkg_W: %6.2
+ * Cor_W: %6.2
+ * GFX_W: %5.2
+ * RAM_W: %5.2
* GHz: "GHz" 3 columns %3.2
* TSC: "TSC" 3 columns %3.2
* percentage " %pc3" %6.2
+ * Perf Status percentage: %5.2
+ * "CTMP" 4 columns %4d
*/
int format_counters(struct thread_data *t, struct core_data *c,
struct pkg_data *p)
{
double interval_float;
+ char *fmt5, *fmt6;
/* if showing only 1st thread in core and this isn't one, bail out */
if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
@@ -349,7 +400,6 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (show_cpu)
outp += sprintf(outp, " %3d", t->cpu_id);
}
-
/* %c0 */
if (do_nhm_cstates) {
if (show_pkg || show_core || show_cpu)
@@ -414,10 +464,16 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (do_snb_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc);
+ if (do_dts)
+ outp += sprintf(outp, " %4d", c->core_temp_c);
+
/* print per-package data only for 1st core in package */
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
goto done;
+ if (do_ptm)
+ outp += sprintf(outp, " %4d", p->pkg_temp_c);
+
if (do_snb_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
if (do_nhm_cstates)
@@ -426,6 +482,32 @@ int format_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
if (do_snb_cstates)
outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+
+ /*
+ * If measurement interval exceeds minimum RAPL Joule Counter range,
+ * indicate that results are suspect by printing "**" in fraction place.
+ */
+ if (interval_float < rapl_joule_counter_range) {
+ fmt5 = " %5.2f";
+ fmt6 = " %6.2f";
+ } else {
+ fmt5 = " %3.0f**";
+ fmt6 = " %4.0f**";
+ }
+
+ if (do_rapl & RAPL_PKG)
+ outp += sprintf(outp, fmt6, p->energy_pkg * rapl_energy_units / interval_float);
+ if (do_rapl & RAPL_CORES)
+ outp += sprintf(outp, fmt6, p->energy_cores * rapl_energy_units / interval_float);
+ if (do_rapl & RAPL_GFX)
+ outp += sprintf(outp, fmt5, p->energy_gfx * rapl_energy_units / interval_float);
+ if (do_rapl & RAPL_DRAM)
+ outp += sprintf(outp, fmt5, p->energy_dram * rapl_energy_units / interval_float);
+ if (do_rapl & RAPL_PKG_PERF_STATUS )
+ outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+ if (do_rapl & RAPL_DRAM_PERF_STATUS )
+ outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+
done:
outp += sprintf(outp, "\n");
@@ -435,6 +517,7 @@ done:
void flush_stdout()
{
fputs(output_buffer, stdout);
+ fflush(stdout);
outp = output_buffer;
}
void flush_stderr()
@@ -461,6 +544,13 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
for_all_cpus(format_counters, t, c, p);
}
+#define DELTA_WRAP32(new, old) \
+ if (new > old) { \
+ old = new - old; \
+ } else { \
+ old = 0x100000000 + new - old; \
+ }
+
void
delta_package(struct pkg_data *new, struct pkg_data *old)
{
@@ -468,6 +558,14 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
old->pc3 = new->pc3 - old->pc3;
old->pc6 = new->pc6 - old->pc6;
old->pc7 = new->pc7 - old->pc7;
+ old->pkg_temp_c = new->pkg_temp_c;
+
+ DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
+ DELTA_WRAP32(new->energy_cores, old->energy_cores);
+ DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
+ DELTA_WRAP32(new->energy_dram, old->energy_dram);
+ DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
+ DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
}
void
@@ -476,6 +574,7 @@ delta_core(struct core_data *new, struct core_data *old)
old->c3 = new->c3 - old->c3;
old->c6 = new->c6 - old->c6;
old->c7 = new->c7 - old->c7;
+ old->core_temp_c = new->core_temp_c;
}
/*
@@ -582,11 +681,20 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->c3 = 0;
c->c6 = 0;
c->c7 = 0;
+ c->core_temp_c = 0;
p->pc2 = 0;
p->pc3 = 0;
p->pc6 = 0;
p->pc7 = 0;
+
+ p->energy_pkg = 0;
+ p->energy_dram = 0;
+ p->energy_cores = 0;
+ p->energy_gfx = 0;
+ p->rapl_pkg_perf_status = 0;
+ p->rapl_dram_perf_status = 0;
+ p->pkg_temp_c = 0;
}
int sum_counters(struct thread_data *t, struct core_data *c,
struct pkg_data *p)
@@ -607,6 +715,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
average.cores.c6 += c->c6;
average.cores.c7 += c->c7;
+ average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
+
/* sum per-pkg values only for 1st core in pkg */
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
@@ -616,6 +726,15 @@ int sum_counters(struct thread_data *t, struct core_data *c,
average.packages.pc6 += p->pc6;
average.packages.pc7 += p->pc7;
+ average.packages.energy_pkg += p->energy_pkg;
+ average.packages.energy_dram += p->energy_dram;
+ average.packages.energy_cores += p->energy_cores;
+ average.packages.energy_gfx += p->energy_gfx;
+
+ average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
+
+ average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
+ average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
return 0;
}
/*
@@ -667,23 +786,26 @@ static unsigned long long rdtsc(void)
int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
int cpu = t->cpu_id;
+ unsigned long long msr;
- if (cpu_migrate(cpu))
+ if (cpu_migrate(cpu)) {
+ fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
return -1;
+ }
t->tsc = rdtsc(); /* we are running on local CPU of interest */
if (has_aperf) {
- if (get_msr(cpu, MSR_APERF, &t->aperf))
+ if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
return -3;
- if (get_msr(cpu, MSR_MPERF, &t->mperf))
+ if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
return -4;
}
if (extra_delta_offset32) {
- if (get_msr(cpu, extra_delta_offset32, &t->extra_delta32))
+ if (get_msr(cpu, extra_delta_offset32, &msr))
return -5;
- t->extra_delta32 &= 0xFFFFFFFF;
+ t->extra_delta32 = msr & 0xFFFFFFFF;
}
if (extra_delta_offset64)
@@ -691,9 +813,9 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return -5;
if (extra_msr_offset32) {
- if (get_msr(cpu, extra_msr_offset32, &t->extra_msr32))
+ if (get_msr(cpu, extra_msr_offset32, &msr))
return -5;
- t->extra_msr32 &= 0xFFFFFFFF;
+ t->extra_msr32 = msr & 0xFFFFFFFF;
}
if (extra_msr_offset64)
@@ -715,6 +837,13 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
return -8;
+ if (do_dts) {
+ if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
+ return -9;
+ c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
+ }
+
+
/* collect package counters only for 1st core in package */
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
return 0;
@@ -731,6 +860,41 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
return -12;
}
+ if (do_rapl & RAPL_PKG) {
+ if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
+ return -13;
+ p->energy_pkg = msr & 0xFFFFFFFF;
+ }
+ if (do_rapl & RAPL_CORES) {
+ if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
+ return -14;
+ p->energy_cores = msr & 0xFFFFFFFF;
+ }
+ if (do_rapl & RAPL_DRAM) {
+ if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
+ return -15;
+ p->energy_dram = msr & 0xFFFFFFFF;
+ }
+ if (do_rapl & RAPL_GFX) {
+ if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
+ return -16;
+ p->energy_gfx = msr & 0xFFFFFFFF;
+ }
+ if (do_rapl & RAPL_PKG_PERF_STATUS) {
+ if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
+ return -16;
+ p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
+ }
+ if (do_rapl & RAPL_DRAM_PERF_STATUS) {
+ if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
+ return -16;
+ p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
+ }
+ if (do_ptm) {
+ if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
+ return -17;
+ p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
+ }
return 0;
}
@@ -742,10 +906,10 @@ void print_verbose_header(void)
if (!do_nehalem_platform_info)
return;
- get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr);
+ get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
- if (verbose > 1)
- fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
+ if (verbose)
+ fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
ratio = (msr >> 40) & 0xFF;
fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
@@ -760,8 +924,8 @@ void print_verbose_header(void)
get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT, &msr);
- if (verbose > 1)
- fprintf(stderr, "MSR_IVT_TURBO_RATIO_LIMIT: 0x%llx\n", msr);
+ if (verbose)
+ fprintf(stderr, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
ratio = (msr >> 56) & 0xFF;
if (ratio)
@@ -804,14 +968,56 @@ void print_verbose_header(void)
ratio, bclk, ratio * bclk);
print_nhm_turbo_ratio_limits:
+ get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+
+#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
+#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
+
+ fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
+
+ fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: ",
+ (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
+ (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
+ (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
+ (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
+ (msr & (1 << 15)) ? "" : "UN",
+ (unsigned int)msr & 7);
+
+
+ switch(msr & 0x7) {
+ case 0:
+ fprintf(stderr, "pc0");
+ break;
+ case 1:
+ fprintf(stderr, do_snb_cstates ? "pc2" : "pc0");
+ break;
+ case 2:
+ fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3");
+ break;
+ case 3:
+ fprintf(stderr, "pc6");
+ break;
+ case 4:
+ fprintf(stderr, "pc7");
+ break;
+ case 5:
+ fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid");
+ break;
+ case 7:
+ fprintf(stderr, "unlimited");
+ break;
+ default:
+ fprintf(stderr, "invalid");
+ }
+ fprintf(stderr, ")\n");
if (!do_nehalem_turbo_ratio_limit)
return;
- get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr);
+ get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
- if (verbose > 1)
- fprintf(stderr, "MSR_NEHALEM_TURBO_RATIO_LIMIT: 0x%llx\n", msr);
+ if (verbose)
+ fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
ratio = (msr >> 56) & 0xFF;
if (ratio)
@@ -1100,15 +1306,22 @@ int mark_cpu_present(int cpu)
void turbostat_loop()
{
int retval;
+ int restarted = 0;
restart:
+ restarted++;
+
retval = for_all_cpus(get_counters, EVEN_COUNTERS);
if (retval < -1) {
exit(retval);
} else if (retval == -1) {
+ if (restarted > 1) {
+ exit(retval);
+ }
re_initialize();
goto restart;
}
+ restarted = 0;
gettimeofday(&tv_even, (struct timezone *)NULL);
while (1) {
@@ -1207,6 +1420,299 @@ int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
}
}
+/*
+ * print_epb()
+ * Decode the ENERGY_PERF_BIAS MSR
+ */
+int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+ unsigned long long msr;
+ char *epb_string;
+ int cpu;
+
+ if (!has_epb)
+ return 0;
+
+ cpu = t->cpu_id;
+
+ /* EPB is per-package */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ return 0;
+
+ if (cpu_migrate(cpu)) {
+ fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
+ return -1;
+ }
+
+ if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
+ return 0;
+
+ switch (msr & 0x7) {
+ case ENERGY_PERF_BIAS_PERFORMANCE:
+ epb_string = "performance";
+ break;
+ case ENERGY_PERF_BIAS_NORMAL:
+ epb_string = "balanced";
+ break;
+ case ENERGY_PERF_BIAS_POWERSAVE:
+ epb_string = "powersave";
+ break;
+ default:
+ epb_string = "custom";
+ break;
+ }
+ fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
+
+ return 0;
+}
+
+#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
+#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
+
+/*
+ * rapl_probe()
+ *
+ * sets do_rapl
+ */
+void rapl_probe(unsigned int family, unsigned int model)
+{
+ unsigned long long msr;
+ double tdp;
+
+ if (!genuine_intel)
+ return;
+
+ if (family != 6)
+ return;
+
+ switch (model) {
+ case 0x2A:
+ case 0x3A:
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
+ break;
+ case 0x2D:
+ case 0x3E:
+ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS;
+ break;
+ default:
+ return;
+ }
+
+ /* units on package 0, verify later other packages match */
+ if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
+ return;
+
+ rapl_power_units = 1.0 / (1 << (msr & 0xF));
+ rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
+ rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
+
+ /* get TDP to determine energy counter range */
+ if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
+ return;
+
+ tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
+
+ rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+
+ if (verbose)
+ fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+
+ return;
+}
+
+int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+ unsigned long long msr;
+ unsigned int dts;
+ int cpu;
+
+ if (!(do_dts || do_ptm))
+ return 0;
+
+ cpu = t->cpu_id;
+
+ /* DTS is per-core, no need to print for each thread */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+ return 0;
+
+ if (cpu_migrate(cpu)) {
+ fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
+ return -1;
+ }
+
+ if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
+ if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
+ return 0;
+
+ dts = (msr >> 16) & 0x7F;
+ fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
+ cpu, msr, tcc_activation_temp - dts);
+
+#ifdef THERM_DEBUG
+ if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
+ return 0;
+
+ dts = (msr >> 16) & 0x7F;
+ dts2 = (msr >> 8) & 0x7F;
+ fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
+ cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
+#endif
+ }
+
+
+ if (do_dts) {
+ unsigned int resolution;
+
+ if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
+ return 0;
+
+ dts = (msr >> 16) & 0x7F;
+ resolution = (msr >> 27) & 0xF;
+ fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
+ cpu, msr, tcc_activation_temp - dts, resolution);
+
+#ifdef THERM_DEBUG
+ if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
+ return 0;
+
+ dts = (msr >> 16) & 0x7F;
+ dts2 = (msr >> 8) & 0x7F;
+ fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
+ cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
+#endif
+ }
+
+ return 0;
+}
+
+void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
+{
+ fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
+ cpu, label,
+ ((msr >> 15) & 1) ? "EN" : "DIS",
+ ((msr >> 0) & 0x7FFF) * rapl_power_units,
+ (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
+ (((msr >> 16) & 1) ? "EN" : "DIS"));
+
+ return;
+}
+
+int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+ unsigned long long msr;
+ int cpu;
+ double local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units;
+
+ if (!do_rapl)
+ return 0;
+
+ /* RAPL counters are per package, so print only for 1st thread/package */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ return 0;
+
+ cpu = t->cpu_id;
+ if (cpu_migrate(cpu)) {
+ fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
+ return -1;
+ }
+
+ if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
+ return -1;
+
+ local_rapl_power_units = 1.0 / (1 << (msr & 0xF));
+ local_rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
+ local_rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
+
+ if (local_rapl_power_units != rapl_power_units)
+ fprintf(stderr, "cpu%d, ERROR: Power units mis-match\n", cpu);
+ if (local_rapl_energy_units != rapl_energy_units)
+ fprintf(stderr, "cpu%d, ERROR: Energy units mis-match\n", cpu);
+ if (local_rapl_time_units != rapl_time_units)
+ fprintf(stderr, "cpu%d, ERROR: Time units mis-match\n", cpu);
+
+ if (verbose) {
+ fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
+ "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
+ local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units);
+ }
+ if (do_rapl & RAPL_PKG) {
+ if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
+ return -5;
+
+
+ fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
+ cpu, msr,
+ ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
+ ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
+ ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
+ ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
+
+ if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
+ return -9;
+
+ fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
+ cpu, msr, (msr >> 63) & 1 ? "": "UN");
+
+ print_power_limit_msr(cpu, msr, "PKG Limit #1");
+ fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
+ cpu,
+ ((msr >> 47) & 1) ? "EN" : "DIS",
+ ((msr >> 32) & 0x7FFF) * rapl_power_units,
+ (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
+ ((msr >> 48) & 1) ? "EN" : "DIS");
+ }
+
+ if (do_rapl & RAPL_DRAM) {
+ if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
+ return -6;
+
+
+ fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
+ cpu, msr,
+ ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
+ ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
+ ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
+ ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
+
+
+ if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
+ return -9;
+ fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
+ cpu, msr, (msr >> 31) & 1 ? "": "UN");
+
+ print_power_limit_msr(cpu, msr, "DRAM Limit");
+ }
+ if (do_rapl & RAPL_CORES) {
+ if (verbose) {
+ if (get_msr(cpu, MSR_PP0_POLICY, &msr))
+ return -7;
+
+ fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
+
+ if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
+ return -9;
+ fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
+ cpu, msr, (msr >> 31) & 1 ? "": "UN");
+ print_power_limit_msr(cpu, msr, "Cores Limit");
+ }
+ }
+ if (do_rapl & RAPL_GFX) {
+ if (verbose) {
+ if (get_msr(cpu, MSR_PP1_POLICY, &msr))
+ return -8;
+
+ fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
+
+ if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
+ return -9;
+ fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
+ cpu, msr, (msr >> 31) & 1 ? "": "UN");
+ print_power_limit_msr(cpu, msr, "GFX Limit");
+ }
+ }
+ return 0;
+}
+
int is_snb(unsigned int family, unsigned int model)
{
@@ -1231,6 +1737,72 @@ double discover_bclk(unsigned int family, unsigned int model)
return 133.33;
}
+/*
+ * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
+ * the Thermal Control Circuit (TCC) activates.
+ * This is usually equal to tjMax.
+ *
+ * Older processors do not have this MSR, so there we guess,
+ * but also allow cmdline over-ride with -T.
+ *
+ * Several MSR temperature values are in units of degrees-C
+ * below this value, including the Digital Thermal Sensor (DTS),
+ * Package Thermal Management Sensor (PTM), and thermal event thresholds.
+ */
+int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+ unsigned long long msr;
+ unsigned int target_c_local;
+ int cpu;
+
+ /* tcc_activation_temp is used only for dts or ptm */
+ if (!(do_dts || do_ptm))
+ return 0;
+
+ /* this is a per-package concept */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ return 0;
+
+ cpu = t->cpu_id;
+ if (cpu_migrate(cpu)) {
+ fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
+ return -1;
+ }
+
+ if (tcc_activation_temp_override != 0) {
+ tcc_activation_temp = tcc_activation_temp_override;
+ fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n",
+ cpu, tcc_activation_temp);
+ return 0;
+ }
+
+ /* Temperature Target MSR is Nehalem and newer only */
+ if (!do_nehalem_platform_info)
+ goto guess;
+
+ if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
+ goto guess;
+
+ target_c_local = (msr >> 16) & 0x7F;
+
+ if (verbose)
+ fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
+ cpu, msr, target_c_local);
+
+ if (target_c_local < 85 || target_c_local > 120)
+ goto guess;
+
+ tcc_activation_temp = target_c_local;
+
+ return 0;
+
+guess:
+ tcc_activation_temp = TJMAX_DEFAULT;
+ fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
+ cpu, tcc_activation_temp);
+
+ return 0;
+}
void check_cpuid()
{
unsigned int eax, ebx, ecx, edx, max_level;
@@ -1244,7 +1816,7 @@ void check_cpuid()
genuine_intel = 1;
if (verbose)
- fprintf(stderr, "%.4s%.4s%.4s ",
+ fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
(char *)&ebx, (char *)&edx, (char *)&ecx);
asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
@@ -1295,10 +1867,19 @@ void check_cpuid()
asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
has_aperf = ecx & (1 << 0);
- if (!has_aperf) {
- fprintf(stderr, "No APERF MSR\n");
- exit(1);
- }
+ do_dts = eax & (1 << 0);
+ do_ptm = eax & (1 << 6);
+ has_epb = ecx & (1 << 3);
+
+ if (verbose)
+ fprintf(stderr, "CPUID(6): %s%s%s%s\n",
+ has_aperf ? "APERF" : "No APERF!",
+ do_dts ? ", DTS" : "",
+ do_ptm ? ", PTM": "",
+ has_epb ? ", EPB": "");
+
+ if (!has_aperf)
+ exit(-1);
do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
@@ -1307,12 +1888,15 @@ void check_cpuid()
do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model);
+ rapl_probe(family, model);
+
+ return;
}
void usage()
{
- fprintf(stderr, "%s: [-v][-p|-P|-S][-c MSR# | -s]][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
+ fprintf(stderr, "%s: [-v][-R][-T][-p|-P|-S][-c MSR# | -s]][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
progname);
exit(1);
}
@@ -1548,6 +2132,17 @@ void turbostat_init()
if (verbose)
print_verbose_header();
+
+ if (verbose)
+ for_all_cpus(print_epb, ODD_COUNTERS);
+
+ if (verbose)
+ for_all_cpus(print_rapl, ODD_COUNTERS);
+
+ for_all_cpus(set_temperature_target, ODD_COUNTERS);
+
+ if (verbose)
+ for_all_cpus(print_thermal, ODD_COUNTERS);
}
int fork_it(char **argv)
@@ -1604,7 +2199,7 @@ void cmdline(int argc, char **argv)
progname = argv[0];
- while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:")) != -1) {
+ while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:RT:")) != -1) {
switch (opt) {
case 'p':
show_core_only++;
@@ -1636,6 +2231,12 @@ void cmdline(int argc, char **argv)
case 'M':
sscanf(optarg, "%x", &extra_msr_offset64);
break;
+ case 'R':
+ rapl_verbose++;
+ break;
+ case 'T':
+ tcc_activation_temp_override = atoi(optarg);
+ break;
default:
usage();
}
@@ -1646,8 +2247,8 @@ int main(int argc, char **argv)
{
cmdline(argc, argv);
- if (verbose > 1)
- fprintf(stderr, "turbostat v2.1 October 6, 2012"
+ if (verbose)
+ fprintf(stderr, "turbostat v3.0 November 23, 2012"
" - Len Brown <lenb@kernel.org>\n");
turbostat_init();
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index f458237fdd7..971c9ffdcb5 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -1,8 +1,10 @@
+DESTDIR ?=
+
x86_energy_perf_policy : x86_energy_perf_policy.c
clean :
rm -f x86_energy_perf_policy
install :
- install x86_energy_perf_policy /usr/bin/
- install x86_energy_perf_policy.8 /usr/share/man/man8/
+ install x86_energy_perf_policy ${DESTDIR}/usr/bin/
+ install x86_energy_perf_policy.8 ${DESTDIR}/usr/share/man/man8/
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 33c5c7ee148..40b3e5482f8 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -289,7 +289,7 @@ void for_every_cpu(void (func)(int))
"cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
&cpu);
if (retval != 1)
- return;
+ break;
func(cpu);
}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index c7ba7614061..35fc584a4ff 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -53,6 +53,9 @@ my %default = (
"STOP_AFTER_FAILURE" => 60,
"STOP_TEST_AFTER" => 600,
"MAX_MONITOR_WAIT" => 1800,
+ "GRUB_REBOOT" => "grub2-reboot",
+ "SYSLINUX" => "extlinux",
+ "SYSLINUX_PATH" => "/boot/extlinux",
# required, and we will ask users if they don't have them but we keep the default
# value something that is common.
@@ -105,7 +108,12 @@ my $scp_to_target;
my $scp_to_target_install;
my $power_off;
my $grub_menu;
+my $grub_file;
my $grub_number;
+my $grub_reboot;
+my $syslinux;
+my $syslinux_path;
+my $syslinux_label;
my $target;
my $make;
my $pre_install;
@@ -232,6 +240,11 @@ my %option_map = (
"ADD_CONFIG" => \$addconfig,
"REBOOT_TYPE" => \$reboot_type,
"GRUB_MENU" => \$grub_menu,
+ "GRUB_FILE" => \$grub_file,
+ "GRUB_REBOOT" => \$grub_reboot,
+ "SYSLINUX" => \$syslinux,
+ "SYSLINUX_PATH" => \$syslinux_path,
+ "SYSLINUX_LABEL" => \$syslinux_label,
"PRE_INSTALL" => \$pre_install,
"POST_INSTALL" => \$post_install,
"NO_INSTALL" => \$no_install,
@@ -368,7 +381,7 @@ EOF
;
$config_help{"REBOOT_TYPE"} = << "EOF"
Way to reboot the box to the test kernel.
- Only valid options so far are "grub" and "script".
+ Only valid options so far are "grub", "grub2", "syslinux", and "script".
If you specify grub, it will assume grub version 1
and will search in /boot/grub/menu.lst for the title \$GRUB_MENU
@@ -378,11 +391,19 @@ $config_help{"REBOOT_TYPE"} = << "EOF"
The entry in /boot/grub/menu.lst must be entered in manually.
The test will not modify that file.
+
+ If you specify grub2, then you also need to specify both \$GRUB_MENU
+ and \$GRUB_FILE.
+
+ If you specify syslinux, then you may use SYSLINUX to define the syslinux
+ command (defaults to extlinux), and SYSLINUX_PATH to specify the path to
+ the syslinux install (defaults to /boot/extlinux). But you have to specify
+ SYSLINUX_LABEL to define the label to boot to for the test kernel.
EOF
;
$config_help{"GRUB_MENU"} = << "EOF"
The grub title name for the test kernel to boot
- (Only mandatory if REBOOT_TYPE = grub)
+ (Only mandatory if REBOOT_TYPE = grub or grub2)
Note, ktest.pl will not update the grub menu.lst, you need to
manually add an option for the test. ktest.pl will search
@@ -393,6 +414,22 @@ $config_help{"GRUB_MENU"} = << "EOF"
title Test Kernel
kernel vmlinuz-test
GRUB_MENU = Test Kernel
+
+ For grub2, a search of \$GRUB_FILE is performed for the lines
+ that begin with "menuentry". It will not detect submenus. The
+ menu must be a non-nested menu. Add the quotes used in the menu
+ to guarantee your selection, as the first menuentry with the content
+ of \$GRUB_MENU that is found will be used.
+EOF
+ ;
+$config_help{"GRUB_FILE"} = << "EOF"
+ If grub2 is used, the full path for the grub.cfg file is placed
+ here. Use something like /boot/grub2/grub.cfg to search.
+EOF
+ ;
+$config_help{"SYSLINUX_LABEL"} = << "EOF"
+ If syslinux is used, the label that boots the target kernel must
+ be specified with SYSLINUX_LABEL.
EOF
;
$config_help{"REBOOT_SCRIPT"} = << "EOF"
@@ -521,6 +558,15 @@ sub get_ktest_configs {
if ($rtype eq "grub") {
get_ktest_config("GRUB_MENU");
}
+
+ if ($rtype eq "grub2") {
+ get_ktest_config("GRUB_MENU");
+ get_ktest_config("GRUB_FILE");
+ }
+
+ if ($rtype eq "syslinux") {
+ get_ktest_config("SYSLINUX_LABEL");
+ }
}
sub process_variables {
@@ -1123,6 +1169,9 @@ sub wait_for_monitor;
sub reboot {
my ($time) = @_;
+ # Make sure everything has been written to disk
+ run_ssh("sync");
+
if (defined($time)) {
start_monitor;
# flush out current monitor
@@ -1452,8 +1501,44 @@ sub run_scp_mod {
return run_scp($src, $dst, $cp_scp);
}
+sub get_grub2_index {
+
+ return if (defined($grub_number));
+
+ doprint "Find grub2 menu ... ";
+ $grub_number = -1;
+
+ my $ssh_grub = $ssh_exec;
+ $ssh_grub =~ s,\$SSH_COMMAND,cat $grub_file,g;
+
+ open(IN, "$ssh_grub |")
+ or die "unable to get $grub_file";
+
+ my $found = 0;
+
+ while (<IN>) {
+ if (/^menuentry.*$grub_menu/) {
+ $grub_number++;
+ $found = 1;
+ last;
+ } elsif (/^menuentry\s/) {
+ $grub_number++;
+ }
+ }
+ close(IN);
+
+ die "Could not find '$grub_menu' in $grub_file on $machine"
+ if (!$found);
+ doprint "$grub_number\n";
+}
+
sub get_grub_index {
+ if ($reboot_type eq "grub2") {
+ get_grub2_index;
+ return;
+ }
+
if ($reboot_type ne "grub") {
return;
}
@@ -1524,6 +1609,10 @@ sub reboot_to {
if ($reboot_type eq "grub") {
run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
+ } elsif ($reboot_type eq "grub2") {
+ run_ssh "$grub_reboot $grub_number";
+ } elsif ($reboot_type eq "syslinux") {
+ run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
} elsif (defined $reboot_script) {
run_command "$reboot_script";
}
@@ -1718,6 +1807,14 @@ sub do_post_install {
dodie "Failed to run post install";
}
+# Sometimes the reboot fails, and will hang. We try to ssh to the box
+# and if we fail, we force another reboot, that should powercycle it.
+sub test_booted {
+ if (!run_ssh "echo testing connection") {
+ reboot $sleep_time;
+ }
+}
+
sub install {
return if ($no_install);
@@ -1730,6 +1827,8 @@ sub install {
my $cp_target = eval_kernel_version $target_image;
+ test_booted;
+
run_scp_install "$outputdir/$build_target", "$cp_target" or
dodie "failed to copy image";
@@ -1877,10 +1976,14 @@ sub make_oldconfig {
if (!run_command "$make olddefconfig") {
# Perhaps olddefconfig doesn't exist in this version of the kernel
- # try a yes '' | oldconfig
- doprint "olddefconfig failed, trying yes '' | make oldconfig\n";
- run_command "yes '' | $make oldconfig" or
- dodie "failed make config oldconfig";
+ # try oldnoconfig
+ doprint "olddefconfig failed, trying make oldnoconfig\n";
+ if (!run_command "$make oldnoconfig") {
+ doprint "oldnoconfig failed, trying yes '' | make oldconfig\n";
+ # try a yes '' | oldconfig
+ run_command "yes '' | $make oldconfig" or
+ dodie "failed make config oldconfig";
+ }
}
}
@@ -3700,6 +3803,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$target = "$ssh_user\@$machine";
if ($reboot_type eq "grub") {
dodie "GRUB_MENU not defined" if (!defined($grub_menu));
+ } elsif ($reboot_type eq "grub2") {
+ dodie "GRUB_MENU not defined" if (!defined($grub_menu));
+ dodie "GRUB_FILE not defined" if (!defined($grub_file));
+ } elsif ($reboot_type eq "syslinux") {
+ dodie "SYSLINUX_LABEL not defined" if (!defined($syslinux_label));
}
}
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index de28a0a3b8f..4012e933034 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -332,8 +332,18 @@
# from other linux builds on the system.
#LOCALVERSION = -test
+# For REBOOT_TYPE = grub2, you must specify where the grub.cfg
+# file is. This is the file that is searched to find the menu
+# option to boot to with GRUB_REBOOT
+#GRUB_FILE = /boot/grub2/grub.cfg
+
+# The tool for REBOOT_TYPE = grub2 to set the next reboot kernel
+# to boot into (one shot mode).
+# (default grub2_reboot)
+#GRUB_REBOOT = grub2_reboot
+
# The grub title name for the test kernel to boot
-# (Only mandatory if REBOOT_TYPE = grub)
+# (Only mandatory if REBOOT_TYPE = grub or grub2)
#
# Note, ktest.pl will not update the grub menu.lst, you need to
# manually add an option for the test. ktest.pl will search
@@ -343,8 +353,33 @@
# For example, if in the /boot/grub/menu.lst the test kernel title has:
# title Test Kernel
# kernel vmlinuz-test
+#
+# For grub2, a search of top level "menuentry"s are done. No
+# submenu is searched. The menu is found by searching for the
+# contents of GRUB_MENU in the line that starts with "menuentry".
+# You may want to include the quotes around the option. For example:
+# for: menuentry 'Test Kernel'
+# do a: GRUB_MENU = 'Test Kernel'
+# For customizing, add your entry in /etc/grub.d/40_custom.
+#
#GRUB_MENU = Test Kernel
+# For REBOOT_TYPE = syslinux, the name of the syslinux executable
+# (on the target) to use to set up the next reboot to boot the
+# test kernel.
+# (default extlinux)
+#SYSLINUX = syslinux
+
+# For REBOOT_TYPE = syslinux, the path that is passed to to the
+# syslinux command where syslinux is installed.
+# (default /boot/extlinux)
+#SYSLINUX_PATH = /boot/syslinux
+
+# For REBOOT_TYPE = syslinux, the syslinux label that references the
+# test kernel in the syslinux config file.
+# (default undefined)
+#SYSLINUX_LABEL = "test-kernel"
+
# A script to reboot the target into the test kernel
# This and SWITCH_TO_TEST are about the same, except
# SWITCH_TO_TEST is run even for REBOOT_TYPE = grub.
@@ -497,7 +532,7 @@
#POST_BUILD_DIE = 1
# Way to reboot the box to the test kernel.
-# Only valid options so far are "grub" and "script"
+# Only valid options so far are "grub", "grub2", "syslinux" and "script"
# (default grub)
# If you specify grub, it will assume grub version 1
# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
@@ -505,6 +540,13 @@
# your setup, then specify "script" and have a command or script
# specified in REBOOT_SCRIPT to boot to the target.
#
+# For REBOOT_TYPE = grub2, you must define both GRUB_MENU and
+# GRUB_FILE.
+#
+# For REBOOT_TYPE = syslinux, you must define SYSLINUX_LABEL, and
+# perhaps modify SYSLINUX (default extlinux) and SYSLINUX_PATH
+# (default /boot/extlinux)
+#
# The entry in /boot/grub/menu.lst must be entered in manually.
# The test will not modify that file.
#REBOOT_TYPE = grub
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile
index 931278035f5..e18b42b254a 100644
--- a/tools/testing/selftests/breakpoints/Makefile
+++ b/tools/testing/selftests/breakpoints/Makefile
@@ -17,7 +17,7 @@ else
endif
run_tests:
- ./breakpoint_test
+ @./breakpoint_test || echo "breakpoints selftests: [FAIL]"
clean:
rm -fr breakpoint_test
diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile
index 7c9c20ff578..12657a5e4bf 100644
--- a/tools/testing/selftests/cpu-hotplug/Makefile
+++ b/tools/testing/selftests/cpu-hotplug/Makefile
@@ -1,6 +1,6 @@
all:
run_tests:
- ./on-off-test.sh
+ @./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
clean:
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
index dc79b86ea65..56eb5523dbb 100644
--- a/tools/testing/selftests/kcmp/Makefile
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -16,13 +16,13 @@ CFLAGS += -I../../../../arch/x86/include/
all:
ifeq ($(ARCH),X86)
- gcc $(CFLAGS) kcmp_test.c -o run_test
+ gcc $(CFLAGS) kcmp_test.c -o kcmp_test
else
echo "Not an x86 target, can't build kcmp selftest"
endif
-run-tests: all
- ./kcmp_test
+run_tests: all
+ @./kcmp_test || echo "kcmp_test: [FAIL]"
clean:
rm -fr ./run_test
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
index 358cc6bfa35..fa4f1b37e04 100644
--- a/tools/testing/selftests/kcmp/kcmp_test.c
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -72,7 +72,8 @@ int main(int argc, char **argv)
/* This one should return same fd */
ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
if (ret) {
- printf("FAIL: 0 expected but %d returned\n", ret);
+ printf("FAIL: 0 expected but %d returned (%s)\n",
+ ret, strerror(errno));
ret = -1;
} else
printf("PASS: 0 returned as expected\n");
@@ -80,7 +81,8 @@ int main(int argc, char **argv)
/* Compare with self */
ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
if (ret) {
- printf("FAIL: 0 expected but %li returned\n", ret);
+ printf("FAIL: 0 expected but %li returned (%s)\n",
+ ret, strerror(errno));
ret = -1;
} else
printf("PASS: 0 returned as expected\n");
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile
index 7c9c20ff578..0f49c3f5f58 100644
--- a/tools/testing/selftests/memory-hotplug/Makefile
+++ b/tools/testing/selftests/memory-hotplug/Makefile
@@ -1,6 +1,6 @@
all:
run_tests:
- ./on-off-test.sh
+ @./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
clean:
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 54c0aad2b47..218a122c795 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -3,8 +3,8 @@ all:
gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c
run_tests:
- ./mq_open_tests /test1
- ./mq_perf_tests
+ @./mq_open_tests /test1 || echo "mq_open_tests: [FAIL]"
+ @./mq_perf_tests || echo "mq_perf_tests: [FAIL]"
clean:
rm -f mq_open_tests mq_perf_tests
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 7300d0702ef..436d2e81868 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -8,7 +8,7 @@ all: hugepage-mmap hugepage-shm map_hugetlb thuge-gen
$(CC) $(CFLAGS) -o $@ $^
run_tests: all
- /bin/sh ./run_vmtests
+ @/bin/sh ./run_vmtests || echo "vmtests: [FAIL]"
clean:
$(RM) hugepage-mmap hugepage-shm map_hugetlb
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 6d25dcd2e97..fcc9aa25fd0 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -164,7 +164,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
r = virtqueue_add_buf(vq->vq, &sl, 1, 0,
dev->buf + started,
GFP_ATOMIC);
- if (likely(r >= 0)) {
+ if (likely(r == 0)) {
++started;
virtqueue_kick(vq->vq);
}
@@ -177,7 +177,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
r = 0;
}
- } while (r >= 0);
+ } while (r == 0);
if (completed == completed_before)
++spurious;
assert(completed <= bufs);
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index aca6edcbbc6..af8c925e93e 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -405,7 +405,6 @@ static char *cpio_replace_env(char *new_location)
return new_location;
}
-
static int cpio_mkfile_line(const char *line)
{
char name[PATH_MAX + 1];
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 23a41a9f8db..3642239252b 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -105,6 +105,15 @@ static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
}
#ifdef __KVM_HAVE_MSI
+static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
+{
+ struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
+ int ret = kvm_set_irq_inatomic(assigned_dev->kvm,
+ assigned_dev->irq_source_id,
+ assigned_dev->guest_irq, 1);
+ return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id)
{
struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
@@ -117,6 +126,23 @@ static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id)
#endif
#ifdef __KVM_HAVE_MSIX
+static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
+{
+ struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
+ int index = find_index_from_host_irq(assigned_dev, irq);
+ u32 vector;
+ int ret = 0;
+
+ if (index >= 0) {
+ vector = assigned_dev->guest_msix_entries[index].vector;
+ ret = kvm_set_irq_inatomic(assigned_dev->kvm,
+ assigned_dev->irq_source_id,
+ vector, 1);
+ }
+
+ return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
{
struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
@@ -334,11 +360,6 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
}
#ifdef __KVM_HAVE_MSI
-static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
-{
- return IRQ_WAKE_THREAD;
-}
-
static int assigned_device_enable_host_msi(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev)
{
@@ -363,11 +384,6 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
#endif
#ifdef __KVM_HAVE_MSIX
-static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
-{
- return IRQ_WAKE_THREAD;
-}
-
static int assigned_device_enable_host_msix(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev)
{
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 9718e98d6d2..b6eea5cc7b3 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -35,6 +35,7 @@
#include "iodev.h"
+#ifdef __KVM_HAVE_IOAPIC
/*
* --------------------------------------------------------------------
* irqfd: Allows an fd to be used to inject an interrupt to the guest
@@ -332,7 +333,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
mutex_lock(&kvm->irqfds.resampler_lock);
list_for_each_entry(resampler,
- &kvm->irqfds.resampler_list, list) {
+ &kvm->irqfds.resampler_list, link) {
if (resampler->notifier.gsi == irqfd->gsi) {
irqfd->resampler = resampler;
break;
@@ -425,17 +426,21 @@ fail:
kfree(irqfd);
return ret;
}
+#endif
void
kvm_eventfd_init(struct kvm *kvm)
{
+#ifdef __KVM_HAVE_IOAPIC
spin_lock_init(&kvm->irqfds.lock);
INIT_LIST_HEAD(&kvm->irqfds.items);
INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
mutex_init(&kvm->irqfds.resampler_lock);
+#endif
INIT_LIST_HEAD(&kvm->ioeventfds);
}
+#ifdef __KVM_HAVE_IOAPIC
/*
* shutdown any irqfd's that match fd+gsi
*/
@@ -555,6 +560,7 @@ static void __exit irqfd_module_exit(void)
module_init(irqfd_module_init);
module_exit(irqfd_module_exit);
+#endif
/*
* --------------------------------------------------------------------
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 037cb6730e6..4a340cb2301 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -52,7 +52,7 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
end_gfn = gfn + (size >> PAGE_SHIFT);
gfn += 1;
- if (is_error_pfn(pfn))
+ if (is_error_noslot_pfn(pfn))
return pfn;
while (gfn < end_gfn)
@@ -106,7 +106,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
* important because we unmap and unpin in 4kb steps later.
*/
pfn = kvm_pin_pages(slot, gfn, page_size);
- if (is_error_pfn(pfn)) {
+ if (is_error_noslot_pfn(pfn)) {
gfn += 1;
continue;
}
@@ -168,11 +168,7 @@ int kvm_assign_device(struct kvm *kvm,
r = iommu_attach_device(domain, &pdev->dev);
if (r) {
- printk(KERN_ERR "assign device %x:%x:%x.%x failed",
- pci_domain_nr(pdev->bus),
- pdev->bus->number,
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
return r;
}
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 2eb58af7ee9..656fa455e15 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -102,6 +102,23 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
return r;
}
+static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm_lapic_irq *irq)
+{
+ trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
+
+ irq->dest_id = (e->msi.address_lo &
+ MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+ irq->vector = (e->msi.data &
+ MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+ irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
+ irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
+ irq->delivery_mode = e->msi.data & 0x700;
+ irq->level = 1;
+ irq->shorthand = 0;
+ /* TODO Deal with RH bit of MSI message address */
+}
+
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level)
{
@@ -110,22 +127,26 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
if (!level)
return -1;
- trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
+ kvm_set_msi_irq(e, &irq);
- irq.dest_id = (e->msi.address_lo &
- MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
- irq.vector = (e->msi.data &
- MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
- irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
- irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
- irq.delivery_mode = e->msi.data & 0x700;
- irq.level = 1;
- irq.shorthand = 0;
-
- /* TODO Deal with RH bit of MSI message address */
return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
}
+
+static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm)
+{
+ struct kvm_lapic_irq irq;
+ int r;
+
+ kvm_set_msi_irq(e, &irq);
+
+ if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r))
+ return r;
+ else
+ return -EWOULDBLOCK;
+}
+
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
{
struct kvm_kernel_irq_routing_entry route;
@@ -178,6 +199,44 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
return ret;
}
+/*
+ * Deliver an IRQ in an atomic context if we can, or return a failure,
+ * user can retry in a process context.
+ * Return value:
+ * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
+ * Other values - No need to retry.
+ */
+int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
+{
+ struct kvm_kernel_irq_routing_entry *e;
+ int ret = -EINVAL;
+ struct kvm_irq_routing_table *irq_rt;
+ struct hlist_node *n;
+
+ trace_kvm_set_irq(irq, level, irq_source_id);
+
+ /*
+ * Injection into either PIC or IOAPIC might need to scan all CPUs,
+ * which would need to be retried from thread context; when same GSI
+ * is connected to both PIC and IOAPIC, we'd have to report a
+ * partial failure here.
+ * Since there's no easy way to do this, we only support injecting MSI
+ * which is limited to 1:1 GSI mapping.
+ */
+ rcu_read_lock();
+ irq_rt = rcu_dereference(kvm->irq_routing);
+ if (irq < irq_rt->nr_rt_entries)
+ hlist_for_each_entry(e, n, &irq_rt->map[irq], link) {
+ if (likely(e->type == KVM_IRQ_ROUTING_MSI))
+ ret = kvm_set_msi_inatomic(e, kvm);
+ else
+ ret = -EWOULDBLOCK;
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
struct kvm_irq_ack_notifier *kian;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index be70035fd42..1cd693a76a5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -212,6 +212,11 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
}
+void kvm_make_mclock_inprogress_request(struct kvm *kvm)
+{
+ make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
+}
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
struct page *page;
@@ -709,8 +714,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
int r;
gfn_t base_gfn;
unsigned long npages;
- unsigned long i;
- struct kvm_memory_slot *memslot;
+ struct kvm_memory_slot *memslot, *slot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
@@ -761,13 +765,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
- for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
-
- if (s == memslot || !s->npages)
+ kvm_for_each_memslot(slot, kvm->memslots) {
+ if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot)
continue;
- if (!((base_gfn + npages <= s->base_gfn) ||
- (base_gfn >= s->base_gfn + s->npages)))
+ if (!((base_gfn + npages <= slot->base_gfn) ||
+ (base_gfn >= slot->base_gfn + slot->npages)))
goto out_free;
}
@@ -1208,7 +1210,7 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
return KVM_PFN_ERR_RO_FAULT;
if (kvm_is_error_hva(addr))
- return KVM_PFN_ERR_BAD;
+ return KVM_PFN_NOSLOT;
/* Do not map writable pfn in the readonly memslot. */
if (writable && memslot_is_readonly(slot)) {
@@ -1290,7 +1292,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
static struct page *kvm_pfn_to_page(pfn_t pfn)
{
- if (is_error_pfn(pfn))
+ if (is_error_noslot_pfn(pfn))
return KVM_ERR_PTR_BAD_PAGE;
if (kvm_is_mmio_pfn(pfn)) {
@@ -1322,7 +1324,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(pfn_t pfn)
{
- if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
+ if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1848,6 +1850,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
atomic_inc(&kvm->online_vcpus);
mutex_unlock(&kvm->lock);
+ kvm_arch_vcpu_postcreate(vcpu);
return r;
unlock_vcpu_destroy:
@@ -1929,10 +1932,6 @@ out_free1:
goto out;
}
r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
- if (r)
- goto out_free2;
- r = 0;
-out_free2:
kfree(kvm_regs);
break;
}
@@ -1954,12 +1953,10 @@ out_free2:
kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
if (IS_ERR(kvm_sregs)) {
r = PTR_ERR(kvm_sregs);
+ kvm_sregs = NULL;
goto out;
}
r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_GET_MP_STATE: {
@@ -1981,9 +1978,6 @@ out_free2:
if (copy_from_user(&mp_state, argp, sizeof mp_state))
goto out;
r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_TRANSLATE: {
@@ -2008,9 +2002,6 @@ out_free2:
if (copy_from_user(&dbg, argp, sizeof dbg))
goto out;
r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_SET_SIGNAL_MASK: {
@@ -2054,12 +2045,10 @@ out_free2:
fpu = memdup_user(argp, sizeof(*fpu));
if (IS_ERR(fpu)) {
r = PTR_ERR(fpu);
+ fpu = NULL;
goto out;
}
r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
- if (r)
- goto out;
- r = 0;
break;
}
default:
@@ -2129,8 +2118,6 @@ static long kvm_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_VCPU:
r = kvm_vm_ioctl_create_vcpu(kvm, arg);
- if (r < 0)
- goto out;
break;
case KVM_SET_USER_MEMORY_REGION: {
struct kvm_userspace_memory_region kvm_userspace_mem;
@@ -2141,8 +2128,6 @@ static long kvm_vm_ioctl(struct file *filp,
goto out;
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
- if (r)
- goto out;
break;
}
case KVM_GET_DIRTY_LOG: {
@@ -2152,8 +2137,6 @@ static long kvm_vm_ioctl(struct file *filp,
if (copy_from_user(&log, argp, sizeof log))
goto out;
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
- if (r)
- goto out;
break;
}
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
@@ -2163,9 +2146,6 @@ static long kvm_vm_ioctl(struct file *filp,
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
- if (r)
- goto out;
- r = 0;
break;
}
case KVM_UNREGISTER_COALESCED_MMIO: {
@@ -2174,9 +2154,6 @@ static long kvm_vm_ioctl(struct file *filp,
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
- if (r)
- goto out;
- r = 0;
break;
}
#endif
@@ -2285,8 +2262,6 @@ static long kvm_vm_compat_ioctl(struct file *filp,
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
- if (r)
- goto out;
break;
}
default: